From d1f3a269d7420bb68148d5f8c82708987651e003 Mon Sep 17 00:00:00 2001 From: Timo Aaltonen Date: Mon, 8 Jan 2024 18:04:08 +0200 Subject: [PATCH] Import 389-ds-base_2.4.4+dfsg1.orig.tar.xz [dgit import orig 389-ds-base_2.4.4+dfsg1.orig.tar.xz] --- .cargo/config.in | 6 + .clang-format | 49 + .copr/Makefile | 22 + .cvsignore | 5 + .dockerignore | 3 + .github/ISSUE_TEMPLATE/bug_report.md | 32 + .github/ISSUE_TEMPLATE/feature_request.md | 20 + .github/daemon.json | 5 + .github/scripts/generate_matrix.py | 37 + .github/workflows/compile.yml | 78 + .github/workflows/lmdbpytest.yml | 120 + .github/workflows/npm.yml | 25 + .github/workflows/pytest.yml | 120 + .github/workflows/release.yml | 60 + .github/workflows/validate.yml | 27 + .gitignore | 238 + LICENSE | 28 + LICENSE.GPLv3+ | 674 + LICENSE.mit | 32 + LICENSE.openldap | 47 + LICENSE.openssl | 11 + Makefile.am | 2135 ++ README.md | 52 + VERSION.sh | 54 + autogen.sh | 103 + buildnum.py | 23 + configure.ac | 921 + dirsrvtests/README | 28 + dirsrvtests/__init__.py | 0 dirsrvtests/check_for_duplicate_ids.py | 46 + dirsrvtests/conftest.py | 156 + dirsrvtests/create_test.py | 340 + dirsrvtests/pytest.ini | 6 + dirsrvtests/report.py | 117 + dirsrvtests/requirements.txt | 3 + dirsrvtests/testimony.yaml | 22 + dirsrvtests/tests/__init__.py | 0 dirsrvtests/tests/data/README | 11 + dirsrvtests/tests/data/__init__.py | 0 dirsrvtests/tests/data/basic/__init__.py | 0 dirsrvtests/tests/data/basic/dse.ldif.broken | 95 + ...ocalhost-userRoot-2020_03_30_13_14_47.ldif | 233 + .../entryuuid/localhost-userRoot-invalid.ldif | 233 + ..._protect_long_test_reference_1.4.2.12.json | 405 + .../openldap_2_389/1/example_com.slapcat.ldif | 241 + .../openldap_2_389/1/example_net.slapcat.ldif | 241 + .../openldap_2_389/1/setup/example_com.ldif | 117 + .../openldap_2_389/1/setup/example_net.ldif | 117 + .../data/openldap_2_389/1/setup/slapd.ldif | 146 + .../openldap_2_389/1/slapd.d/cn=config.ldif | 14 + .../1/slapd.d/cn=config/cn=module{0}.ldif | 16 + .../1/slapd.d/cn=config/cn=schema.ldif | 12 + .../cn=config/cn=schema/cn={0}core.ldif | 249 + .../cn=config/cn=schema/cn={1}cosine.ldif | 178 + .../cn=schema/cn={2}inetorgperson.ldif | 49 + .../cn=config/cn=schema/cn={3}rfc2307bis.ldif | 155 + .../cn=config/cn=schema/cn={4}yast.ldif | 108 + .../cn=config/cn=schema/cn={5}test.ldif | 12 + .../cn=config/olcDatabase={-1}frontend.ldif | 15 + .../cn=config/olcDatabase={0}config.ldif | 20 + .../slapd.d/cn=config/olcDatabase={1}mdb.ldif | 19 + .../olcOverlay={0}memberof.ldif | 14 + .../olcOverlay={1}refint.ldif | 15 + .../olcOverlay={2}unique.ldif | 15 + .../slapd.d/cn=config/olcDatabase={2}mdb.ldif | 18 + .../olcOverlay={0}memberof.ldif | 14 + .../olcOverlay={1}unique.ldif | 15 + .../4539/slapd.d/cn=config.ldif | 42 + .../4539/slapd.d/cn=config/cn=module{0}.ldif | 13 + .../4539/slapd.d/cn=config/cn=schema.ldif | 634 + .../cn=config/cn=schema/cn={0}core.ldif | 247 + .../cn=config/cn=schema/cn={1}cosine.ldif | 178 + .../cn=schema/cn={2}inetorgperson.ldif | 49 + .../cn=config/cn=schema/cn={3}rfc2307bis.ldif | 155 + .../cn=config/cn=schema/cn={4}yast.ldif | 108 + .../cn=config/olcDatabase={-1}frontend.ldif | 25 + .../cn=config/olcDatabase={0}config.ldif | 20 + .../slapd.d/cn=config/olcDatabase={1}hdb.ldif | 36 + .../5323/slapd.d/cn=config.ldif | 16 + .../5323/slapd.d/cn=config/cn=module{0}.ldif | 15 + .../5323/slapd.d/cn=config/cn=schema.ldif | 12 + .../cn=config/cn=schema/cn={0}core.ldif | 249 + .../cn=config/cn=schema/cn={1}cosine.ldif | 178 + .../cn=config/cn=schema/cn={2}nis.ldif | 108 + .../cn=schema/cn={3}inetorgperson.ldif | 49 + .../cn=config/olcDatabase={-1}frontend.ldif | 18 + .../cn=config/olcDatabase={0}config.ldif | 15 + .../slapd.d/cn=config/olcDatabase={1}mdb.ldif | 28 + .../cn=config/olcDatabase={2}monitor.ldif | 15 + .../memberof/openldap_to_389ds-db.ldif | 73 + .../memberof/openldap_to_389ds-slapd.conf | 42 + .../memberof/slapd.d/cn=config.ldif | 43 + .../slapd.d/cn=config/cn=module{0}.ldif | 16 + .../memberof/slapd.d/cn=config/cn=schema.ldif | 893 + .../cn=config/cn=schema/cn={0}core.ldif | 244 + .../cn=config/cn=schema/cn={1}cosine.ldif | 177 + .../cn=schema/cn={2}inetorgperson.ldif | 48 + .../cn=config/cn=schema/cn={3}rfc2307bis.ldif | 153 + .../cn=config/cn=schema/cn={4}yast.ldif | 107 + .../cn=config/olcDatabase={-1}frontend.ldif | 26 + .../cn=config/olcDatabase={0}config.ldif | 21 + .../slapd.d/cn=config/olcDatabase={1}mdb.ldif | 33 + .../olcOverlay={0}memberof.ldif | 15 + .../olcOverlay={1}unique.ldif | 14 + .../olcOverlay={2}refint.ldif | 15 + .../saslauthd/slapd.d/cn=config.ldif | 39 + .../slapd.d/cn=config/cn=module{0}.ldif | 16 + .../slapd.d/cn=config/cn=schema.ldif | 669 + .../cn=config/cn=schema/cn={0}core.ldif | 247 + .../cn=config/cn=schema/cn={1}cosine.ldif | 178 + .../cn=schema/cn={2}inetorgperson.ldif | 49 + .../cn=config/cn=schema/cn={3}rfc2307bis.ldif | 155 + .../cn=config/cn=schema/cn={4}yast.ldif | 108 + .../cn=config/olcDatabase={-1}frontend.ldif | 25 + .../cn=config/olcDatabase={0}config.ldif | 20 + .../slapd.d/cn=config/olcDatabase={1}mdb.ldif | 31 + .../olcOverlay={0}memberof.ldif | 15 + .../olcOverlay={1}unique.ldif | 14 + .../olcOverlay={2}refint.ldif | 15 + .../data/openldap_2_389/saslauthd/suffix.ldif | 130 + .../tests/data/ticket47953/__init__.py | 0 .../tests/data/ticket47953/ticket47953.ldif | 27 + .../tests/data/ticket47988/__init__.py | 0 .../data/ticket47988/schema_ipa3.3.tar.gz | Bin 0 -> 98049 bytes .../data/ticket47988/schema_ipa4.1.tar.gz | Bin 0 -> 87335 bytes .../tests/data/ticket48212/__init__.py | 0 .../data/ticket48212/example1k_posix.ldif | 17017 ++++++++++++++++ .../tests/data/ticket49121/utf8str.txt | 1 + .../tests/data/ticket49441/binary.ldif | 858 + dirsrvtests/tests/data/tls/ca.crt | 20 + dirsrvtests/tests/data/tls/cert9.db | Bin 0 -> 36864 bytes dirsrvtests/tests/data/tls/int.crt | 20 + dirsrvtests/tests/data/tls/key4.db | Bin 0 -> 61440 bytes dirsrvtests/tests/data/tls/leaf.crt | 21 + dirsrvtests/tests/data/tls/pkcs11.txt | 5 + dirsrvtests/tests/data/tls/pwdfile.txt | 1 + dirsrvtests/tests/data/tls/server-export.p12 | Bin 0 -> 4573 bytes .../tests/data/tls/tls_import_ca_chain.pem | 40 + .../tests/data/tls/tls_import_crt_chain.pem | 61 + dirsrvtests/tests/data/tls/tls_import_key.pem | 32 + .../tests/data/tls/tls_import_key_chain.pem | 53 + .../longduration/automembers_long_test.py | 728 + .../longduration/db_protect_long_test.py | 372 + dirsrvtests/tests/perf/create_data.py | 289 + dirsrvtests/tests/perf/ltest.py | 75 + dirsrvtests/tests/perf/memberof_test.py | 405 + .../tests/perf/search_performance_test.py | 161 + dirsrvtests/tests/stress/README | 13 + dirsrvtests/tests/stress/__init__.py | 1 + .../stress/cos/cos_scale_template_test.py | 150 + .../tests/stress/reliabilty/__init__.py | 0 .../stress/reliabilty/reliab_7_5_test.py | 576 + .../stress/reliabilty/reliab_conn_test.py | 235 + .../replication/mmr_01_4m-2h-4c_test.py | 979 + .../stress/replication/mmr_01_4m_test.py | 582 + dirsrvtests/tests/stress/search/__init__.py | 0 dirsrvtests/tests/stress/search/simple.py | 55 + dirsrvtests/tests/suites/__init__.py | 0 dirsrvtests/tests/suites/acl/__init__.py | 3 + .../tests/suites/acl/aci_excl_filter_test.py | 154 + dirsrvtests/tests/suites/acl/acivattr_test.py | 254 + dirsrvtests/tests/suites/acl/acl_deny_test.py | 208 + dirsrvtests/tests/suites/acl/acl_test.py | 1286 ++ dirsrvtests/tests/suites/acl/conftest.py | 125 + .../acl/default_aci_allows_self_write_test.py | 133 + dirsrvtests/tests/suites/acl/deladd_test.py | 456 + .../suites/acl/enhanced_aci_modrnd_test.py | 121 + .../suites/acl/globalgroup_part2_test.py | 478 + .../tests/suites/acl/globalgroup_test.py | 438 + .../tests/suites/acl/keywords_part2_test.py | 425 + dirsrvtests/tests/suites/acl/keywords_test.py | 475 + dirsrvtests/tests/suites/acl/misc_test.py | 509 + dirsrvtests/tests/suites/acl/modify_test.py | 584 + dirsrvtests/tests/suites/acl/modrdn_test.py | 299 + .../suites/acl/repeated_ldap_add_test.py | 489 + dirsrvtests/tests/suites/acl/roledn_test.py | 274 + .../suites/acl/search_real_part2_test.py | 474 + .../suites/acl/search_real_part3_test.py | 480 + .../tests/suites/acl/search_real_test.py | 430 + .../suites/acl/selfdn_permissions_test.py | 353 + dirsrvtests/tests/suites/acl/syntax_test.py | 256 + dirsrvtests/tests/suites/acl/userattr_test.py | 298 + .../tests/suites/acl/valueacl_part2_test.py | 442 + dirsrvtests/tests/suites/acl/valueacl_test.py | 758 + .../tests/suites/attr_encryption/__init__.py | 3 + .../attr_encryption/attr_encryption_test.py | 459 + .../tests/suites/auth_token/__init__.py | 3 + .../suites/auth_token/basic_auth_test.py | 240 + .../suites/automember_plugin/__init__.py | 3 + .../automember_abort_test.py | 102 + .../automember_plugin/automember_mod_test.py | 170 + .../automember_plugin/automember_test.py | 308 + .../suites/automember_plugin/basic_test.py | 1182 ++ .../automember_plugin/configuration_test.py | 99 + dirsrvtests/tests/suites/backups/__init__.py | 3 + .../tests/suites/backups/backup_test.py | 114 + dirsrvtests/tests/suites/basic/__init__.py | 3 + dirsrvtests/tests/suites/basic/basic_test.py | 2480 +++ .../tests/suites/basic/ds_entrydn_test.py | 97 + .../tests/suites/basic/haproxy_test.py | 96 + dirsrvtests/tests/suites/basic/vlv.py | 148 + dirsrvtests/tests/suites/betxns/__init__.py | 3 + dirsrvtests/tests/suites/betxns/betxn_test.py | 364 + .../tests/suites/chaining_plugin/__init__.py | 3 + .../anonymous_access_denied_basic.py | 149 + .../chaining_plugin/paged_search_test.py | 91 + dirsrvtests/tests/suites/clu/__init__.py | 31 + .../tests/suites/clu/ca_cert_bundle_test.py | 166 + dirsrvtests/tests/suites/clu/clu_test.py | 95 + dirsrvtests/tests/suites/clu/dbgen_test.py | 789 + .../tests/suites/clu/dbgen_test_usan.py | 807 + dirsrvtests/tests/suites/clu/dbmon_test.py | 281 + dirsrvtests/tests/suites/clu/dbverify_test.py | 77 + .../suites/clu/dsconf_pta_add_url_test.py | 49 + .../tests/suites/clu/dsconf_tasks_test.py | 219 + dirsrvtests/tests/suites/clu/dsconf_test.py | 246 + .../tests/suites/clu/dsctl_acceptance_test.py | 63 + .../tests/suites/clu/dsctl_dblib_test.py | 119 + .../tests/suites/clu/dsctl_tls_test.py | 92 + .../tests/suites/clu/dsidm_account_test.py | 129 + .../suites/clu/dsidm_bulk_update_test.py | 92 + .../tests/suites/clu/dsidm_config_test.py | 218 + .../tests/suites/clu/dsidm_init_test.py | 86 + .../clu/dsidm_organizational_unit_test.py | 84 + .../tests/suites/clu/dsidm_services_test.py | 407 + .../tests/suites/clu/dsidm_user_test.py | 426 + dirsrvtests/tests/suites/clu/dsrc_test.py | 263 + dirsrvtests/tests/suites/clu/fixup_test.py | 102 + .../tests/suites/clu/repl_monitor_test.py | 278 + dirsrvtests/tests/suites/clu/schema_test.py | 144 + dirsrvtests/tests/suites/config/__init__.py | 3 + .../tests/suites/config/autotuning_test.py | 366 + .../tests/suites/config/compact_test.py | 184 + .../suites/config/config_delete_attr_test.py | 156 + .../tests/suites/config/config_test.py | 759 + .../tests/suites/config/regression_test.py | 114 + .../config/removed_config_49298_test.py | 90 + dirsrvtests/tests/suites/cos/__init__.py | 3 + dirsrvtests/tests/suites/cos/cos_test.py | 149 + .../tests/suites/cos/indirect_cos_test.py | 179 + .../tests/suites/disk_monitoring/__init__.py | 3 + .../disk_monitoring_divide_test.py | 104 + .../disk_monitoring/disk_monitoring_test.py | 773 + .../suites/disk_monitoring/disk_space_test.py | 47 + dirsrvtests/tests/suites/ds_logs/__init__.py | 3 + .../tests/suites/ds_logs/audit_log_test.py | 104 + .../tests/suites/ds_logs/ds_logs_test.py | 1713 ++ .../tests/suites/ds_logs/regression_test.py | 79 + dirsrvtests/tests/suites/ds_tools/__init__.py | 4 + .../tests/suites/ds_tools/logpipe_test.py | 78 + .../tests/suites/ds_tools/replcheck_test.py | 554 + .../tests/suites/dynamic_plugins/__init__.py | 3 + .../dynamic_plugins/dynamic_plugins_test.py | 445 + .../notice_for_restart_test.py | 51 + .../suites/dynamic_plugins/stress_tests.py | 131 + .../tests/suites/entryuuid/__init__.py | 3 + .../tests/suites/entryuuid/basic_test.py | 337 + .../tests/suites/entryuuid/replicated_test.py | 153 + dirsrvtests/tests/suites/export/__init__.py | 3 + .../tests/suites/export/export_test.py | 140 + dirsrvtests/tests/suites/filter/__init__.py | 3 + .../tests/suites/filter/basic_filter_test.py | 49 + .../tests/suites/filter/bitw_filter_test.py | 397 + .../suites/filter/complex_filters_test.py | 147 + .../tests/suites/filter/filter_cert_test.py | 70 + .../suites/filter/filter_index_match_test.py | 869 + .../suites/filter/filter_indexing_test.py | 170 + .../tests/suites/filter/filter_logic_test.py | 451 + .../tests/suites/filter/filter_match_test.py | 778 + .../suites/filter/filter_onelevel_aci_test.py | 49 + .../tests/suites/filter/filter_test.py | 311 + .../filter/filter_test_aci_with_optimiser.py | 120 + .../filter/filter_with_non_root_user_test.py | 391 + .../suites/filter/filterscanlimit_test.py | 406 + .../tests/suites/filter/large_filter_test.py | 165 + .../filter/rfc3673_all_oper_attrs_test.py | 190 + .../suites/filter/schema_validation_test.py | 264 + .../suites/filter/vfilter_attribute_test.py | 219 + .../suites/filter/vfilter_simple_test.py | 556 + .../tests/suites/fourwaymmr/__init__.py | 3 + .../suites/fourwaymmr/fourwaymmr_test.py | 529 + .../tests/suites/fractional/__init__.py | 3 + .../suites/fractional/fractional_test.py | 405 + .../suites/get_effective_rights/__init__.py | 3 + .../get_effective_rights/acceptance_test.py | 140 + dirsrvtests/tests/suites/gssapi/__init__.py | 3 + .../tests/suites/gssapi/simple_gssapi_test.py | 147 + .../tests/suites/gssapi_repl/__init__.py | 3 + .../suites/gssapi_repl/gssapi_repl_test.py | 193 + .../tests/suites/healthcheck/__init__.py | 3 + .../suites/healthcheck/health_config_test.py | 589 + .../suites/healthcheck/health_repl_test.py | 300 + .../healthcheck/health_security_test.py | 344 + .../suites/healthcheck/health_sync_test.py | 132 + .../suites/healthcheck/healthcheck_test.py | 498 + dirsrvtests/tests/suites/import/__init__.py | 3 + .../tests/suites/import/import_test.py | 634 + .../suites/import/import_warning_test.py | 123 + .../tests/suites/import/regression_test.py | 446 + dirsrvtests/tests/suites/indexes/__init__.py | 3 + .../tests/suites/indexes/entryrdn_test.py | 149 + .../tests/suites/indexes/huge_index_key.py | 144 + .../tests/suites/indexes/regression_test.py | 268 + dirsrvtests/tests/suites/ldapi/__init__.py | 3 + dirsrvtests/tests/suites/ldapi/ldapi_test.py | 162 + dirsrvtests/tests/suites/lib389/__init__.py | 3 + .../suites/lib389/config_compare_test.py | 49 + .../suites/lib389/dsldapobject/__init__.py | 0 .../lib389/dsldapobject/dn_construct_test.py | 236 + .../tests/suites/lib389/idm/__init__.py | 0 .../tests/suites/lib389/idm/account_test.py | 42 + .../suites/lib389/idm/user_compare_i2_test.py | 60 + .../lib389/idm/user_compare_m2Repl_test.py | 65 + .../suites/lib389/idm/user_compare_st_test.py | 86 + .../tests/suites/lib389/timeout_test.py | 60 + dirsrvtests/tests/suites/logging/__init__.py | 3 + .../logging/logging_compression_test.py | 125 + .../suites/logging/logging_config_test.py | 97 + .../suites/logging/security_basic_test.py | 418 + .../tests/suites/mapping_tree/__init__.py | 3 + .../suites/mapping_tree/acceptance_test.py | 67 + .../be_del_and_default_naming_attr_test.py | 101 + .../suites/mapping_tree/mt_cursed_test.py | 449 + .../referral_during_tot_init_test.py | 80 + .../suites/mapping_tree/regression_test.py | 129 + .../tests/suites/memberof_plugin/__init__.py | 3 + .../suites/memberof_plugin/fixup_test.py | 85 + .../memberof_include_scopes_test.py | 127 + .../suites/memberof_plugin/regression_test.py | 865 + .../memory_leaks/MMR_double_free_test.py | 166 + .../tests/suites/memory_leaks/__init__.py | 3 + .../suites/memory_leaks/allids_search_test.py | 70 + .../suites/memory_leaks/range_search_test.py | 71 + .../tests/suites/migration/__init__.py | 3 + .../suites/migration/export_data_test.py | 82 + .../suites/migration/import_data_test.py | 70 + dirsrvtests/tests/suites/monitor/__init__.py | 3 + .../suites/monitor/db_locks_monitor_test.py | 322 + .../tests/suites/monitor/monitor_test.py | 189 + .../tests/suites/openldap_2_389/__init__.py | 3 + .../suites/openldap_2_389/migrate_hdb_test.py | 49 + .../openldap_2_389/migrate_memberof_test.py | 64 + .../openldap_2_389/migrate_monitor_test.py | 57 + .../suites/openldap_2_389/migrate_test.py | 154 + .../openldap_2_389/password_migrate_test.py | 73 + .../tests/suites/paged_results/__init__.py | 3 + .../paged_results/paged_results_test.py | 1198 ++ dirsrvtests/tests/suites/password/__init__.py | 3 + .../password/password_TPR_policy_test.py | 521 + .../suites/password/password_policy_test.py | 1524 ++ .../tests/suites/password/password_test.py | 72 + .../password/pbkdf2_upgrade_plugin_test.py | 52 + .../suites/password/pw_expired_access_test.py | 70 + .../tests/suites/password/pwdAdmin_test.py | 443 + .../tests/suites/password/pwdModify_test.py | 282 + .../password/pwdPolicy_attribute_test.py | 312 + .../pwdPolicy_controls_sequence_test.py | 133 + .../password/pwdPolicy_controls_test.py | 300 + .../password/pwdPolicy_inherit_global_test.py | 212 + .../suites/password/pwdPolicy_logging_test.py | 160 + .../suites/password/pwdPolicy_syntax_test.py | 381 + .../password/pwdPolicy_temporary_password.py | 1154 ++ .../suites/password/pwdPolicy_token_test.py | 91 + .../suites/password/pwdPolicy_warning_test.py | 600 + .../tests/suites/password/pwd_algo_test.py | 190 + .../password/pwd_crypt_asterisk_test.py | 50 + .../password/pwd_lockout_bypass_test.py | 82 + .../tests/suites/password/pwd_log_test.py | 87 + .../password/pwd_upgrade_on_bind_test.py | 231 + .../tests/suites/password/pwp_gracel_test.py | 123 + .../tests/suites/password/pwp_history_test.py | 333 + dirsrvtests/tests/suites/password/pwp_test.py | 520 + .../password/regression_of_bugs_test.py | 622 + .../tests/suites/password/regression_test.py | 329 + dirsrvtests/tests/suites/plugins/__init__.py | 3 + .../tests/suites/plugins/acceptance_test.py | 1810 ++ .../accpol_check_all_state_attrs_test.py | 119 + .../tests/suites/plugins/accpol_test.py | 1306 ++ .../suites/plugins/alias_entries_test.py | 111 + .../plugins/attr_nsslapd-pluginarg_test.py | 211 + .../tests/suites/plugins/attruniq_test.py | 83 + dirsrvtests/tests/suites/plugins/cos_test.py | 220 + .../tests/suites/plugins/deref_aci_test.py | 149 + .../tests/suites/plugins/dna_interval_test.py | 194 + dirsrvtests/tests/suites/plugins/dna_test.py | 86 + .../tests/suites/plugins/entryusn_test.py | 245 + .../suites/plugins/managed_entry_test.py | 531 + .../tests/suites/plugins/memberof_test.py | 2831 +++ .../plugins/pluginpath_validation_test.py | 111 + .../tests/suites/plugins/referint_test.py | 149 + .../suites/plugins/rootdn_plugin_test.py | 744 + dirsrvtests/tests/suites/psearch/__init__.py | 3 + .../tests/suites/psearch/psearch_test.py | 75 + .../tests/suites/pwp_storage/__init__.py | 3 + .../tests/suites/pwp_storage/storage_test.py | 165 + .../tests/suites/referint_plugin/__init__.py | 3 + .../suites/referint_plugin/rename_test.py | 182 + .../tests/suites/replication/__init__.py | 21 + .../suites/replication/acceptance_test.py | 717 + .../suites/replication/cascading_test.py | 152 + .../replication/changelog_encryption_test.py | 91 + .../suites/replication/changelog_test.py | 783 + .../replication/changelog_trimming_test.py | 176 + .../cleanallruv_abort_certify_test.py | 136 + .../cleanallruv_abort_restart_test.py | 146 + .../replication/cleanallruv_abort_test.py | 123 + .../replication/cleanallruv_force_test.py | 187 + .../replication/cleanallruv_max_tasks_test.py | 72 + .../cleanallruv_multiple_force_test.py | 214 + .../replication/cleanallruv_restart_test.py | 161 + .../cleanallruv_shutdown_crash_test.py | 123 + .../replication/cleanallruv_stress_test.py | 216 + .../suites/replication/cleanallruv_test.py | 149 + .../replication/conflict_resolve_test.py | 1093 + .../tests/suites/replication/conftest.py | 53 + .../suites/replication/encryption_cl5_test.py | 153 + .../replication/multiple_changelogs_test.py | 186 + .../suites/replication/promote_demote_test.py | 75 + .../suites/replication/regression_i2_test.py | 90 + .../suites/replication/regression_m2_test.py | 1050 + .../replication/regression_m2c2_test.py | 331 + .../suites/replication/regression_m3_test.py | 172 + .../replication/repl_agmt_bootstrap_test.py | 129 + .../suites/replication/replica_config_test.py | 308 + .../suites/replication/replica_roles_test.py | 125 + .../tests/suites/replication/ruvstore_test.py | 271 + .../tests/suites/replication/sasl_m2_test.py | 185 + .../replication/series_of_repl_bugs_test.py | 374 + .../suites/replication/single_master_test.py | 163 + .../replication/tls_client_auth_repl_test.py | 176 + .../replication/tombstone_fixup_test.py | 130 + .../replication/tombstone_repl_mods_test.py | 101 + .../suites/replication/tombstone_test.py | 63 + .../virtual_attribute_replication_test.py | 223 + .../wait_for_async_feature_test.py | 212 + .../tests/suites/resource_limits/__init__.py | 3 + .../suites/resource_limits/fdlimits_test.py | 129 + dirsrvtests/tests/suites/retrocl/__init__.py | 3 + .../tests/suites/retrocl/basic_test.py | 426 + .../suites/retrocl/retrocl_indexing_test.py | 76 + .../tests/suites/rewriters/__init__.py | 3 + .../tests/suites/rewriters/adfilter_test.py | 197 + .../tests/suites/rewriters/basic_test.py | 80 + dirsrvtests/tests/suites/roles/__init__.py | 3 + dirsrvtests/tests/suites/roles/basic_test.py | 774 + dirsrvtests/tests/suites/sasl/__init__.py | 3 + .../tests/suites/sasl/allowed_mechs_test.py | 211 + dirsrvtests/tests/suites/sasl/plain_test.py | 95 + .../tests/suites/sasl/regression_test.py | 181 + dirsrvtests/tests/suites/schema/__init__.py | 3 + .../tests/suites/schema/eduperson_test.py | 90 + .../tests/suites/schema/schema_reload_test.py | 293 + .../schema/schema_replication_origin_test.py | 235 + .../suites/schema/schema_replication_test.py | 712 + .../tests/suites/schema/schema_test.py | 635 + .../schema/x_attribute_descr_oid_test.py | 53 + dirsrvtests/tests/suites/setup_ds/__init__.py | 12 + .../tests/suites/setup_ds/db_home_test.py | 221 + .../tests/suites/setup_ds/dscreate_test.py | 601 + .../tests/suites/setup_ds/remove_test.py | 69 + .../slapi_memberof/basic_interface_test.py | 4423 ++++ dirsrvtests/tests/suites/snmp/__init__.py | 3 + dirsrvtests/tests/suites/state/__init__.py | 3 + .../tests/suites/state/mmt_state_test.py | 374 + .../tests/suites/subentries/__init__.py | 3 + .../suites/subentries/subentries_test.py | 169 + .../tests/suites/syncrepl_plugin/__init__.py | 282 + .../suites/syncrepl_plugin/basic_test.py | 734 + .../suites/syncrepl_plugin/openldap_test.py | 70 + dirsrvtests/tests/suites/syntax/__init__.py | 3 + .../tests/suites/syntax/acceptance_test.py | 248 + dirsrvtests/tests/suites/syntax/mr_test.py | 70 + dirsrvtests/tests/suites/tls/__init__.py | 3 + dirsrvtests/tests/suites/tls/cipher_test.py | 60 + dirsrvtests/tests/suites/tls/ecdsa_test.py | 214 + .../tests/suites/tls/ssl_version_test.py | 89 + .../suites/tls/tls_cert_namespace_test.py | 133 + .../tests/suites/tls/tls_check_crl_test.py | 54 + .../suites/tls/tls_import_ca_chain_test.py | 67 + .../tests/suites/tls/tls_ldaps_only_test.py | 46 + dirsrvtests/tests/suites/upgrade/__init__.py | 3 + .../upgrade/upgrade_repl_plugin_test.py | 103 + .../suites/upgrade/upgradednformat_test.py | 79 + dirsrvtests/tests/suites/vlv/__init__.py | 3 + .../tests/suites/vlv/regression_test.py | 253 + dirsrvtests/tests/suites/webui/README | 1 + dirsrvtests/tests/suites/webui/__init__.py | 218 + .../tests/suites/webui/backup/__init__.py | 3 + .../tests/suites/webui/backup/backup_test.py | 54 + .../tests/suites/webui/create/__init__.py | 3 + .../webui/create/create_instance_test.py | 223 + .../tests/suites/webui/database/__init__.py | 3 + .../suites/webui/database/database_test.py | 345 + .../suites/webui/ldap_browser/__init__.py | 3 + .../webui/ldap_browser/ldap_browser_test.py | 308 + .../tests/suites/webui/login/__init__.py | 3 + .../tests/suites/webui/login/login_test.py | 132 + .../tests/suites/webui/monitoring/__init__.py | 3 + .../webui/monitoring/monitoring_test.py | 270 + .../tests/suites/webui/plugins/__init__.py | 3 + .../suites/webui/plugins/plugins_test.py | 423 + .../suites/webui/replication/__init__.py | 3 + .../webui/replication/replication_test.py | 146 + .../tests/suites/webui/schema/__init__.py | 3 + .../tests/suites/webui/schema/schema_test.py | 64 + .../tests/suites/webui/server/__init__.py | 3 + .../tests/suites/webui/server/server_test.py | 351 + dirsrvtests/tests/tickets/__init__.py | 0 dirsrvtests/tests/tickets/ticket47560_test.py | 191 + dirsrvtests/tests/tickets/ticket47573_test.py | 235 + dirsrvtests/tests/tickets/ticket47619_test.py | 97 + dirsrvtests/tests/tickets/ticket47640_test.py | 82 + .../tests/tickets/ticket47653MMR_test.py | 348 + dirsrvtests/tests/tickets/ticket47676_test.py | 252 + dirsrvtests/tests/tickets/ticket47714_test.py | 213 + dirsrvtests/tests/tickets/ticket47721_test.py | 293 + dirsrvtests/tests/tickets/ticket47781_test.py | 104 + dirsrvtests/tests/tickets/ticket47787_test.py | 428 + dirsrvtests/tests/tickets/ticket47808_test.py | 101 + dirsrvtests/tests/tickets/ticket47815_test.py | 116 + dirsrvtests/tests/tickets/ticket47823_test.py | 965 + dirsrvtests/tests/tickets/ticket47828_test.py | 652 + dirsrvtests/tests/tickets/ticket47829_test.py | 629 + dirsrvtests/tests/tickets/ticket47833_test.py | 220 + .../tests/tickets/ticket47869MMR_test.py | 200 + dirsrvtests/tests/tickets/ticket47871_test.py | 108 + dirsrvtests/tests/tickets/ticket47900_test.py | 212 + dirsrvtests/tests/tickets/ticket47910_test.py | 166 + dirsrvtests/tests/tickets/ticket47920_test.py | 130 + dirsrvtests/tests/tickets/ticket47921_test.py | 88 + dirsrvtests/tests/tickets/ticket47927_test.py | 267 + dirsrvtests/tests/tickets/ticket47931_test.py | 179 + dirsrvtests/tests/tickets/ticket47953_test.py | 73 + dirsrvtests/tests/tickets/ticket47963_test.py | 152 + dirsrvtests/tests/tickets/ticket47970_test.py | 89 + dirsrvtests/tests/tickets/ticket47976_test.py | 168 + dirsrvtests/tests/tickets/ticket47980_test.py | 595 + dirsrvtests/tests/tickets/ticket47981_test.py | 228 + dirsrvtests/tests/tickets/ticket47988_test.py | 372 + dirsrvtests/tests/tickets/ticket48005_test.py | 365 + dirsrvtests/tests/tickets/ticket48013_test.py | 95 + dirsrvtests/tests/tickets/ticket48026_test.py | 121 + dirsrvtests/tests/tickets/ticket48109_test.py | 338 + dirsrvtests/tests/tickets/ticket48170_test.py | 43 + dirsrvtests/tests/tickets/ticket48194_test.py | 352 + dirsrvtests/tests/tickets/ticket48212_test.py | 142 + dirsrvtests/tests/tickets/ticket48214_test.py | 113 + dirsrvtests/tests/tickets/ticket48228_test.py | 274 + dirsrvtests/tests/tickets/ticket48233_test.py | 69 + dirsrvtests/tests/tickets/ticket48252_test.py | 120 + dirsrvtests/tests/tickets/ticket48265_test.py | 76 + dirsrvtests/tests/tickets/ticket48266_test.py | 288 + dirsrvtests/tests/tickets/ticket48270_test.py | 126 + dirsrvtests/tests/tickets/ticket48272_test.py | 144 + dirsrvtests/tests/tickets/ticket48294_test.py | 220 + dirsrvtests/tests/tickets/ticket48295_test.py | 144 + dirsrvtests/tests/tickets/ticket48312_test.py | 132 + dirsrvtests/tests/tickets/ticket48325_test.py | 140 + dirsrvtests/tests/tickets/ticket48342_test.py | 150 + dirsrvtests/tests/tickets/ticket48354_test.py | 65 + dirsrvtests/tests/tickets/ticket48362_test.py | 169 + dirsrvtests/tests/tickets/ticket48366_test.py | 148 + dirsrvtests/tests/tickets/ticket48370_test.py | 202 + dirsrvtests/tests/tickets/ticket48383_test.py | 103 + dirsrvtests/tests/tickets/ticket48497_test.py | 122 + dirsrvtests/tests/tickets/ticket48637_test.py | 158 + dirsrvtests/tests/tickets/ticket48665_test.py | 80 + dirsrvtests/tests/tickets/ticket48745_test.py | 136 + dirsrvtests/tests/tickets/ticket48746_test.py | 156 + dirsrvtests/tests/tickets/ticket48759_test.py | 227 + dirsrvtests/tests/tickets/ticket48784_test.py | 141 + dirsrvtests/tests/tickets/ticket48798_test.py | 73 + dirsrvtests/tests/tickets/ticket48799_test.py | 95 + dirsrvtests/tests/tickets/ticket48808_test.py | 311 + dirsrvtests/tests/tickets/ticket48844_test.py | 144 + dirsrvtests/tests/tickets/ticket48891_test.py | 102 + dirsrvtests/tests/tickets/ticket48893_test.py | 61 + dirsrvtests/tests/tickets/ticket48896_test.py | 139 + dirsrvtests/tests/tickets/ticket48906_test.py | 302 + dirsrvtests/tests/tickets/ticket48916_test.py | 143 + dirsrvtests/tests/tickets/ticket48944_test.py | 211 + dirsrvtests/tests/tickets/ticket48956_test.py | 136 + dirsrvtests/tests/tickets/ticket48973_test.py | 314 + dirsrvtests/tests/tickets/ticket49008_test.py | 133 + dirsrvtests/tests/tickets/ticket49020_test.py | 81 + dirsrvtests/tests/tickets/ticket49039_test.py | 127 + dirsrvtests/tests/tickets/ticket49072_test.py | 114 + dirsrvtests/tests/tickets/ticket49073_test.py | 158 + dirsrvtests/tests/tickets/ticket49076_test.py | 113 + dirsrvtests/tests/tickets/ticket49095_test.py | 95 + dirsrvtests/tests/tickets/ticket49104_test.py | 88 + dirsrvtests/tests/tickets/ticket49121_test.py | 206 + dirsrvtests/tests/tickets/ticket49122_test.py | 102 + dirsrvtests/tests/tickets/ticket49180_test.py | 124 + dirsrvtests/tests/tickets/ticket49184_test.py | 156 + dirsrvtests/tests/tickets/ticket49192_test.py | 187 + dirsrvtests/tests/tickets/ticket49227_test.py | 157 + dirsrvtests/tests/tickets/ticket49249_test.py | 150 + dirsrvtests/tests/tickets/ticket49273_test.py | 52 + dirsrvtests/tests/tickets/ticket49287_test.py | 347 + dirsrvtests/tests/tickets/ticket49290_test.py | 68 + dirsrvtests/tests/tickets/ticket49303_test.py | 113 + dirsrvtests/tests/tickets/ticket49386_test.py | 159 + dirsrvtests/tests/tickets/ticket49412_test.py | 75 + dirsrvtests/tests/tickets/ticket49441_test.py | 84 + dirsrvtests/tests/tickets/ticket49460_test.py | 125 + dirsrvtests/tests/tickets/ticket49463_test.py | 236 + dirsrvtests/tests/tickets/ticket49471_test.py | 89 + dirsrvtests/tests/tickets/ticket49540_test.py | 143 + .../tests/tickets/ticket49623_2_test.py | 68 + dirsrvtests/tests/tickets/ticket49658_test.py | 4276 ++++ dirsrvtests/tests/tickets/ticket49788_test.py | 96 + dirsrvtests/tests/tickets/ticket50078_test.py | 78 + dirsrvtests/tests/tickets/ticket50232_test.py | 165 + dirsrvtests/tests/tickets/ticket50234_test.py | 72 + dirsrvtests/tests/tickets/ticket548_test.py | 408 + dirsrvtests/tests/tmp/README | 10 + dirsrvtests/tests/tmp/__init__.py | 0 docker.mk | 6 + docker/389-ds-fedora/Dockerfile | 51 + docker/389-ds-suse/Dockerfile | 82 + docker/389-ds-suse/Dockerfile.release | 72 + docker/README.md | 61 + docs/custom.css | 1366 ++ docs/doc_header.html | 47 + docs/slapi.doxy.in | 2602 +++ include/base/crit.h | 211 + include/base/dbtbase.h | 227 + include/base/ereport.h | 56 + include/base/file.h | 95 + include/base/fsmutex.h | 98 + include/base/plist.h | 73 + include/base/pool.h | 92 + include/base/shexp.h | 118 + include/base/systems.h | 246 + include/base/systhr.h | 88 + include/base/util.h | 92 + include/i18n.h | 191 + include/ldaputil/cert.h | 31 + include/ldaputil/certmap.h | 280 + include/ldaputil/dbconf.h | 96 + include/ldaputil/encode.h | 33 + include/ldaputil/errors.h | 100 + include/ldaputil/init.h | 31 + include/ldaputil/ldapauth.h | 36 + include/ldaputil/ldaputil.h | 140 + include/libaccess/acl.h | 48 + include/libaccess/aclerror.h | 307 + include/libaccess/acleval.h | 44 + include/libaccess/aclglobal.h | 61 + include/libaccess/aclproto.h | 152 + include/libaccess/aclstruct.h | 284 + include/libaccess/attrec.h | 165 + include/libaccess/authdb.h | 33 + include/libaccess/dbtlibaccess.h | 172 + include/libaccess/dnfstruct.h | 61 + include/libaccess/ipfstruct.h | 92 + include/libaccess/las.h | 170 + include/libaccess/nsauth.h | 297 + include/libaccess/nsautherr.h | 104 + include/libaccess/nserror.h | 54 + include/libaccess/symbols.h | 99 + include/libaccess/userauth.h | 21 + include/libaccess/usi.h | 90 + include/libaccess/usrcache.h | 88 + include/libadmin/dbtlibadmin.h | 28 + include/libadmin/libadmin.h | 93 + include/netsite.h | 196 + include/public/base/systems.h | 101 + include/public/netsite.h | 29 + include/public/nsacl/aclapi.h | 373 + include/public/nsacl/acldef.h | 465 + include/public/nsacl/nserrdef.h | 113 + include/public/nsacl/plistdef.h | 70 + include/public/nsapi.h | 260 + ldap/admin/src/70-dirsrv.conf | 47 + ldap/admin/src/base-initconfig.in | 48 + ldap/admin/src/defaults.inf.in | 66 + ldap/admin/src/initconfig.in | 12 + ldap/admin/src/logconv.pl | 3032 +++ ldap/admin/src/scripts/ds-logpipe.py | 417 + ldap/admin/src/scripts/ds-replcheck | 1654 ++ ldap/admin/src/scripts/failedbinds.py | 185 + ldap/admin/src/scripts/logregex.py | 38 + ldap/admin/src/scripts/ns-slapd-gdb.py | 177 + ldap/admin/src/slapd.inf.in | 37 + ldap/admin/src/template-initconfig.in | 22 + ldap/include/avl.h | 85 + ldap/include/dblayer.h | 15 + ldap/include/disptmpl.h | 328 + ldap/include/ldaprot.h | 317 + ldap/include/portable.h | 336 + ldap/include/regex.h | 71 + ldap/include/srchpref.h | 121 + ldap/include/sysexits-compat.h | 115 + ldap/ldif/Ace.ldif | 2607 +++ ldap/ldif/European.ldif | 7592 +++++++ ldap/ldif/Eurosuffix.ldif | 15 + ldap/ldif/Example-roles.ldif | 2998 +++ ldap/ldif/Example-views.ldif | 3170 +++ ldap/ldif/Example.ldif | 2984 +++ ldap/ldif/template-baseacis.ldif.in | 5 + ldap/ldif/template-country.ldif.in | 4 + ldap/ldif/template-domain.ldif.in | 4 + ldap/ldif/template-dse-minimal.ldif.in | 628 + ldap/ldif/template-dse.ldif.in | 1270 ++ ldap/ldif/template-ldapi-autobind.ldif.in | 19 + ldap/ldif/template-ldapi-default.ldif.in | 8 + ldap/ldif/template-ldapi.ldif.in | 7 + ldap/ldif/template-locality.ldif.in | 4 + ldap/ldif/template-org.ldif.in | 4 + ldap/ldif/template-orgunit.ldif.in | 4 + ldap/ldif/template-sasl.ldif.in | 16 + ldap/ldif/template-state.ldif.in | 4 + ldap/ldif/template-suffix-db.ldif.in | 29 + ldap/ldif/template.ldif | 96 + ldap/libraries/libavl/avl.c | 779 + ldap/libraries/libavl/testavl.c | 127 + ldap/schema/00core.ldif | 893 + ldap/schema/01core389.ldif | 354 + ldap/schema/02common.ldif | 192 + ldap/schema/03entryuuid.ldif | 17 + ldap/schema/05rfc2927.ldif | 22 + ldap/schema/05rfc4523.ldif | 165 + ldap/schema/05rfc4524.ldif | 296 + ldap/schema/06inetorgperson.ldif | 144 + ldap/schema/10automember-plugin.ldif | 94 + ldap/schema/10dna-plugin.ldif | 219 + ldap/schema/10mep-plugin.ldif | 112 + ldap/schema/10presence.ldif | 28 + ldap/schema/10rfc2307.ldif | 55 + ldap/schema/10rfc2307bis.ldif | 348 + ldap/schema/10rfc2307compat.ldif | 287 + ldap/schema/20subscriber.ldif | 35 + ldap/schema/25java-object.ldif | 27 + ldap/schema/28pilot.ldif | 29 + ldap/schema/30ns-common.ldif | 84 + ldap/schema/50ns-admin.ldif | 45 + ldap/schema/50ns-certificate.ldif | 17 + ldap/schema/50ns-directory.ldif | 92 + ldap/schema/50ns-mail.ldif | 51 + ldap/schema/50ns-value.ldif | 27 + ldap/schema/50ns-web.ldif | 17 + ldap/schema/60acctpolicy.ldif | 60 + ldap/schema/60autofs.ldif | 57 + ldap/schema/60changelog.ldif | 105 + ldap/schema/60eduperson.ldif | 169 + ldap/schema/60inetmail.ldif | 104 + ldap/schema/60kerberos.ldif | 283 + ldap/schema/60krb5kdc.ldif | 159 + ldap/schema/60mozilla.ldif | 233 + ldap/schema/60nis.ldif | 146 + ldap/schema/60nss-ldap.ldif | 29 + ldap/schema/60pam-plugin.ldif | 23 + ldap/schema/60posix-winsync-plugin.ldif | 15 + ldap/schema/60pureftpd.ldif | 129 + ldap/schema/60qmail.ldif | 442 + ldap/schema/60radius.ldif | 559 + ldap/schema/60rfc2739.ldif | 123 + ldap/schema/60rfc3712.ldif | 486 + ldap/schema/60rfc4876.ldif | 198 + ldap/schema/60sabayon.ldif | 66 + ldap/schema/60samba.ldif | 205 + ldap/schema/60samba3.ldif | 190 + ldap/schema/60sendmail.ldif | 54 + ldap/schema/60sudo.ldif | 121 + ldap/schema/60trust.ldif | 47 + ldap/schema/99user.ldif | 14 + ldap/schema/dsee.schema | 208 + ldap/schema/slapd-collations.conf | 263 + .../plugins/acct_usability/acct_usability.c | 428 + .../plugins/acct_usability/acct_usability.h | 35 + ldap/servers/plugins/acctpolicy/acct_config.c | 217 + ldap/servers/plugins/acctpolicy/acct_init.c | 298 + ldap/servers/plugins/acctpolicy/acct_plugin.c | 673 + ldap/servers/plugins/acctpolicy/acct_util.c | 289 + ldap/servers/plugins/acctpolicy/acctpolicy.h | 106 + .../plugins/acctpolicy/sampleconfig.ldif | 40 + .../plugins/acctpolicy/samplepolicy.ldif | 27 + ldap/servers/plugins/acl/ACL-Notes | 218 + ldap/servers/plugins/acl/acl.c | 4443 ++++ ldap/servers/plugins/acl/acl.h | 879 + ldap/servers/plugins/acl/acl_ext.c | 1124 + ldap/servers/plugins/acl/aclanom.c | 575 + ldap/servers/plugins/acl/acleffectiverights.c | 1047 + ldap/servers/plugins/acl/aclgroup.c | 472 + ldap/servers/plugins/acl/aclinit.c | 530 + ldap/servers/plugins/acl/acllas.c | 4546 +++++ ldap/servers/plugins/acl/acllist.c | 1004 + ldap/servers/plugins/acl/aclparse.c | 2391 +++ ldap/servers/plugins/acl/aclplugin.c | 375 + ldap/servers/plugins/acl/aclutil.c | 1508 ++ ldap/servers/plugins/addn/addn.c | 501 + ldap/servers/plugins/addn/addn.h | 26 + .../plugins/alias_entries/alias-entries.c | 145 + .../plugins/alias_entries/alias-entries.h | 29 + ldap/servers/plugins/automember/automember.c | 3154 +++ ldap/servers/plugins/automember/automember.h | 113 + ldap/servers/plugins/bitwise/bitwise.c | 196 + ldap/servers/plugins/chainingdb/cb.h | 476 + ldap/servers/plugins/chainingdb/cb_abandon.c | 60 + ldap/servers/plugins/chainingdb/cb_acl.c | 65 + ldap/servers/plugins/chainingdb/cb_add.c | 271 + ldap/servers/plugins/chainingdb/cb_bind.c | 313 + ldap/servers/plugins/chainingdb/cb_cleanup.c | 30 + ldap/servers/plugins/chainingdb/cb_close.c | 105 + ldap/servers/plugins/chainingdb/cb_compare.c | 250 + ldap/servers/plugins/chainingdb/cb_config.c | 642 + .../plugins/chainingdb/cb_conn_stateless.c | 1016 + ldap/servers/plugins/chainingdb/cb_controls.c | 301 + ldap/servers/plugins/chainingdb/cb_debug.c | 21 + ldap/servers/plugins/chainingdb/cb_delete.c | 241 + ldap/servers/plugins/chainingdb/cb_init.c | 124 + ldap/servers/plugins/chainingdb/cb_instance.c | 2107 ++ ldap/servers/plugins/chainingdb/cb_modify.c | 280 + ldap/servers/plugins/chainingdb/cb_modrdn.c | 258 + ldap/servers/plugins/chainingdb/cb_monitor.c | 245 + ldap/servers/plugins/chainingdb/cb_schema.c | 55 + ldap/servers/plugins/chainingdb/cb_search.c | 761 + ldap/servers/plugins/chainingdb/cb_start.c | 50 + ldap/servers/plugins/chainingdb/cb_temp.c | 25 + ldap/servers/plugins/chainingdb/cb_test.c | 82 + ldap/servers/plugins/chainingdb/cb_unbind.c | 32 + ldap/servers/plugins/chainingdb/cb_utils.c | 387 + ldap/servers/plugins/collation/collate.c | 509 + ldap/servers/plugins/collation/collate.h | 44 + ldap/servers/plugins/collation/config.c | 185 + ldap/servers/plugins/collation/config.h | 20 + ldap/servers/plugins/collation/debug.c | 12 + ldap/servers/plugins/collation/orfilter.c | 1058 + ldap/servers/plugins/collation/orfilter.h | 18 + ldap/servers/plugins/cos/cos.c | 275 + ldap/servers/plugins/cos/cos_cache.c | 3486 ++++ ldap/servers/plugins/cos/cos_cache.h | 26 + ldap/servers/plugins/deref/deref.c | 798 + ldap/servers/plugins/deref/deref.h | 30 + ldap/servers/plugins/distrib/Makefile | 60 + ldap/servers/plugins/distrib/Makefile.HPUX | 30 + ldap/servers/plugins/distrib/Makefile.HPUX64 | 30 + ldap/servers/plugins/distrib/Makefile.Linux | 33 + ldap/servers/plugins/distrib/Makefile.SOLARIS | 33 + .../plugins/distrib/Makefile.SOLARIS64 | 33 + .../plugins/distrib/Makefile.SOLARISx86 | 33 + ldap/servers/plugins/distrib/README | 23 + ldap/servers/plugins/distrib/distrib.c | 223 + ldap/servers/plugins/dna/addentries.sh | 2 + ldap/servers/plugins/dna/config.sh | 5 + ldap/servers/plugins/dna/del_test_entries.dns | 6 + ldap/servers/plugins/dna/delentries.sh | 2 + ldap/servers/plugins/dna/dna.c | 4773 +++++ ldap/servers/plugins/dna/editentries.sh | 2 + ldap/servers/plugins/dna/oneentry.sh | 2 + ldap/servers/plugins/dna/posix.ldif | 38 + ldap/servers/plugins/dna/posix_one.ldif | 10 + ldap/servers/plugins/dna/posix_test.ldif | 58 + ldap/servers/plugins/dna/seeconfig.sh | 2 + ldap/servers/plugins/dna/seeentries.sh | 2 + ldap/servers/plugins/dna/subtest.ldif | 31 + ldap/servers/plugins/linkedattrs/fixup_task.c | 460 + .../plugins/linkedattrs/linked_attrs.c | 2185 ++ .../plugins/linkedattrs/linked_attrs.h | 114 + ldap/servers/plugins/memberof/memberof.c | 3436 ++++ ldap/servers/plugins/memberof/memberof.h | 109 + .../plugins/memberof/memberof_config.c | 1128 + ldap/servers/plugins/mep/mep.c | 2935 +++ ldap/servers/plugins/mep/mep.h | 102 + ldap/servers/plugins/pam_passthru/README | 208 + .../plugins/pam_passthru/pam_passthru.h | 142 + .../plugins/pam_passthru/pam_ptconfig.c | 853 + .../plugins/pam_passthru/pam_ptdebug.c | 21 + .../servers/plugins/pam_passthru/pam_ptimpl.c | 486 + .../plugins/pam_passthru/pam_ptpreop.c | 712 + ldap/servers/plugins/passthru/PT-Notes | 33 + ldap/servers/plugins/passthru/passthru.h | 141 + ldap/servers/plugins/passthru/ptbind.c | 144 + ldap/servers/plugins/passthru/ptconfig.c | 377 + ldap/servers/plugins/passthru/ptconn.c | 456 + ldap/servers/plugins/passthru/ptdebug.c | 22 + ldap/servers/plugins/passthru/ptpreop.c | 265 + ldap/servers/plugins/passthru/ptutil.c | 89 + ldap/servers/plugins/posix-winsync/README | 50 + .../plugins/posix-winsync/posix-group-func.c | 1031 + .../plugins/posix-winsync/posix-group-func.h | 27 + .../plugins/posix-winsync/posix-group-task.c | 473 + .../posix-winsync/posix-winsync-config.c | 304 + .../plugins/posix-winsync/posix-winsync.c | 2222 ++ .../plugins/posix-winsync/posix-wsp-ident.h | 58 + ldap/servers/plugins/pwdstorage/clear_pwd.c | 76 + ldap/servers/plugins/pwdstorage/crypt_pwd.c | 139 + .../plugins/pwdstorage/gost_yescrypt.c | 87 + ldap/servers/plugins/pwdstorage/md5.h | 71 + ldap/servers/plugins/pwdstorage/md5_pwd.c | 105 + ldap/servers/plugins/pwdstorage/md5c.c | 342 + .../plugins/pwdstorage/ns-mta-md5_pwd.bu | 408 + .../plugins/pwdstorage/ns-mta-md5_pwd.c | 87 + ldap/servers/plugins/pwdstorage/pbkdf2_pwd.c | 370 + ldap/servers/plugins/pwdstorage/pwd_init.c | 449 + ldap/servers/plugins/pwdstorage/pwd_util.c | 43 + ldap/servers/plugins/pwdstorage/pwdstorage.h | 102 + ldap/servers/plugins/pwdstorage/sha_pwd.c | 244 + ldap/servers/plugins/pwdstorage/smd5_pwd.c | 140 + ldap/servers/plugins/pwdstorage/ssha_pwd.c | 172 + ldap/servers/plugins/referint/referint.c | 1795 ++ ldap/servers/plugins/replication/cl5.h | 57 + ldap/servers/plugins/replication/cl5_api.c | 4501 ++++ ldap/servers/plugins/replication/cl5_api.h | 339 + .../servers/plugins/replication/cl5_clcache.c | 1179 ++ .../servers/plugins/replication/cl5_clcache.h | 31 + ldap/servers/plugins/replication/cl5_config.c | 513 + ldap/servers/plugins/replication/cl5_init.c | 219 + ldap/servers/plugins/replication/cl5_test.c | 766 + ldap/servers/plugins/replication/cl5_test.h | 27 + ldap/servers/plugins/replication/cl_crypt.c | 198 + ldap/servers/plugins/replication/cl_crypt.h | 25 + ldap/servers/plugins/replication/csnpl.c | 447 + ldap/servers/plugins/replication/csnpl.h | 36 + ldap/servers/plugins/replication/llist.c | 338 + ldap/servers/plugins/replication/llist.h | 33 + ldap/servers/plugins/replication/profile.c | 50 + .../plugins/replication/repl-session-plugin.h | 82 + ldap/servers/plugins/replication/repl5.h | 931 + ldap/servers/plugins/replication/repl5_agmt.c | 3519 ++++ .../plugins/replication/repl5_agmtlist.c | 865 + .../plugins/replication/repl5_backoff.c | 231 + .../plugins/replication/repl5_connection.c | 2058 ++ .../plugins/replication/repl5_inc_protocol.c | 2339 +++ ldap/servers/plugins/replication/repl5_init.c | 987 + .../plugins/replication/repl5_mtnode_ext.c | 209 + .../plugins/replication/repl5_plugins.c | 1468 ++ .../plugins/replication/repl5_prot_private.h | 88 + .../plugins/replication/repl5_protocol.c | 522 + .../plugins/replication/repl5_protocol_util.c | 728 + .../plugins/replication/repl5_replica.c | 4323 ++++ .../replication/repl5_replica_config.c | 1331 ++ .../replication/repl5_replica_dnhash.c | 194 + .../plugins/replication/repl5_replica_hash.c | 226 + .../plugins/replication/repl5_replsupplier.c | 160 + ldap/servers/plugins/replication/repl5_ruv.c | 2451 +++ ldap/servers/plugins/replication/repl5_ruv.h | 130 + .../plugins/replication/repl5_schedule.c | 698 + .../plugins/replication/repl5_tot_protocol.c | 929 + .../servers/plugins/replication/repl5_total.c | 815 + .../plugins/replication/repl5_updatedn_list.c | 374 + .../plugins/replication/repl_cleanallruv.c | 2509 +++ .../plugins/replication/repl_connext.c | 237 + .../plugins/replication/repl_controls.c | 302 + ldap/servers/plugins/replication/repl_ext.c | 119 + ldap/servers/plugins/replication/repl_extop.c | 1938 ++ .../plugins/replication/repl_globals.c | 154 + .../servers/plugins/replication/repl_helper.c | 92 + .../servers/plugins/replication/repl_helper.h | 77 + ldap/servers/plugins/replication/repl_opext.c | 88 + .../plugins/replication/repl_session_plugin.c | 164 + .../servers/plugins/replication/repl_shared.h | 123 + ldap/servers/plugins/replication/replutil.c | 1086 + .../replication/test_repl_session_plugin.c | 301 + .../plugins/replication/tests/dnp_sim.c | 1002 + .../plugins/replication/tests/dnp_sim2.c | 940 + .../plugins/replication/tests/dnp_sim3.c | 1399 ++ .../servers/plugins/replication/tests/makesim | 53 + ldap/servers/plugins/replication/urp.c | 2239 ++ ldap/servers/plugins/replication/urp.h | 61 + ldap/servers/plugins/replication/urp_glue.c | 241 + .../plugins/replication/urp_tombstone.c | 459 + .../plugins/replication/windows_connection.c | 1885 ++ .../replication/windows_inc_protocol.c | 1634 ++ .../plugins/replication/windows_private.c | 2618 +++ .../replication/windows_prot_private.h | 53 + .../replication/windows_protocol_util.c | 5813 ++++++ .../replication/windows_tot_protocol.c | 443 + .../servers/plugins/replication/windowsrepl.h | 235 + .../plugins/replication/winsync-plugin.h | 252 + ldap/servers/plugins/retrocl/linktest.c | 25 + ldap/servers/plugins/retrocl/retrocl.c | 736 + ldap/servers/plugins/retrocl/retrocl.h | 150 + ldap/servers/plugins/retrocl/retrocl.txt | 111 + ldap/servers/plugins/retrocl/retrocl_cn.c | 423 + ldap/servers/plugins/retrocl/retrocl_create.c | 341 + ldap/servers/plugins/retrocl/retrocl_po.c | 709 + .../servers/plugins/retrocl/retrocl_rootdse.c | 72 + ldap/servers/plugins/retrocl/retrocl_trim.c | 502 + ldap/servers/plugins/rever/pbe.c | 575 + ldap/servers/plugins/rever/rever.c | 148 + ldap/servers/plugins/rever/rever.h | 43 + ldap/servers/plugins/roles/roles_cache.c | 2337 +++ ldap/servers/plugins/roles/roles_cache.h | 63 + ldap/servers/plugins/roles/roles_plugin.c | 357 + .../plugins/rootdn_access/rootdn_access.c | 768 + .../plugins/rootdn_access/rootdn_access.h | 27 + .../plugins/schema_reload/schema_reload.c | 283 + .../servers/plugins/statechange/statechange.c | 492 + ldap/servers/plugins/sync/README.md | 42 + ldap/servers/plugins/sync/sync.h | 237 + ldap/servers/plugins/sync/sync_init.c | 331 + ldap/servers/plugins/sync/sync_persist.c | 1110 + ldap/servers/plugins/sync/sync_refresh.c | 952 + ldap/servers/plugins/sync/sync_util.c | 991 + ldap/servers/plugins/syntaxes/bin.c | 370 + ldap/servers/plugins/syntaxes/bitstring.c | 235 + ldap/servers/plugins/syntaxes/ces.c | 445 + ldap/servers/plugins/syntaxes/cis.c | 1295 ++ ldap/servers/plugins/syntaxes/debug.c | 19 + .../servers/plugins/syntaxes/deliverymethod.c | 304 + ldap/servers/plugins/syntaxes/dn.c | 203 + ldap/servers/plugins/syntaxes/facsimile.c | 310 + ldap/servers/plugins/syntaxes/guide.c | 717 + ldap/servers/plugins/syntaxes/int.c | 285 + ldap/servers/plugins/syntaxes/nameoptuid.c | 279 + ldap/servers/plugins/syntaxes/numericstring.c | 285 + ldap/servers/plugins/syntaxes/phonetic.c | 650 + ldap/servers/plugins/syntaxes/sicis.c | 167 + ldap/servers/plugins/syntaxes/string.c | 956 + ldap/servers/plugins/syntaxes/syntax.h | 187 + ldap/servers/plugins/syntaxes/syntax_common.c | 88 + ldap/servers/plugins/syntaxes/tel.c | 294 + ldap/servers/plugins/syntaxes/teletex.c | 328 + ldap/servers/plugins/syntaxes/telex.c | 243 + ldap/servers/plugins/syntaxes/validate.c | 568 + ldap/servers/plugins/syntaxes/validate_task.c | 273 + ldap/servers/plugins/syntaxes/value.c | 390 + ldap/servers/plugins/uiduniq/7bit.c | 852 + ldap/servers/plugins/uiduniq/UID-Notes | 96 + ldap/servers/plugins/uiduniq/plugin-utils.h | 65 + ldap/servers/plugins/uiduniq/uid.c | 1651 ++ ldap/servers/plugins/uiduniq/utils.c | 224 + ldap/servers/plugins/usn/usn.c | 753 + ldap/servers/plugins/usn/usn.h | 28 + ldap/servers/plugins/usn/usn_cleanup.c | 371 + .../plugins/vattrsp_template/vattrsp.c | 384 + ldap/servers/plugins/views/views.c | 1720 ++ ldap/servers/plugins/whoami/whoami.c | 113 + ldap/servers/slapd/abandon.c | 160 + ldap/servers/slapd/add.c | 1097 + ldap/servers/slapd/agtmmap.c | 316 + ldap/servers/slapd/agtmmap.h | 193 + ldap/servers/slapd/apibroker.c | 296 + ldap/servers/slapd/attr.c | 1012 + ldap/servers/slapd/attrlist.c | 327 + ldap/servers/slapd/attrsyntax.c | 1664 ++ ldap/servers/slapd/auditlog.c | 494 + ldap/servers/slapd/auth.c | 532 + ldap/servers/slapd/auth.h | 23 + ldap/servers/slapd/ava.c | 69 + ldap/servers/slapd/back-ldbm/ancestorid.c | 396 + ldap/servers/slapd/back-ldbm/archive.c | 737 + ldap/servers/slapd/back-ldbm/attrcrypt.h | 41 + ldap/servers/slapd/back-ldbm/back-ldbm.h | 898 + ldap/servers/slapd/back-ldbm/backentry.c | 134 + ldap/servers/slapd/back-ldbm/cache.c | 2286 +++ ldap/servers/slapd/back-ldbm/cleanup.c | 75 + ldap/servers/slapd/back-ldbm/close.c | 52 + .../slapd/back-ldbm/db-bdb/bdb_config.c | 2522 +++ .../slapd/back-ldbm/db-bdb/bdb_import.c | 3449 ++++ .../back-ldbm/db-bdb/bdb_import_threads.c | 3987 ++++ .../back-ldbm/db-bdb/bdb_instance_config.c | 293 + .../slapd/back-ldbm/db-bdb/bdb_layer.c | 7239 +++++++ .../slapd/back-ldbm/db-bdb/bdb_layer.h | 289 + .../slapd/back-ldbm/db-bdb/bdb_ldif2db.c | 3329 +++ .../servers/slapd/back-ldbm/db-bdb/bdb_misc.c | 394 + .../slapd/back-ldbm/db-bdb/bdb_monitor.c | 310 + .../slapd/back-ldbm/db-bdb/bdb_perfctrs.c | 293 + .../slapd/back-ldbm/db-bdb/bdb_perfctrs.h | 61 + .../slapd/back-ldbm/db-bdb/bdb_upgrade.c | 343 + .../slapd/back-ldbm/db-bdb/bdb_verify.c | 231 + .../slapd/back-ldbm/db-bdb/bdb_version.c | 199 + .../slapd/back-ldbm/db-mdb/mdb_config.c | 1386 ++ .../slapd/back-ldbm/db-mdb/mdb_dbicmp.h | 5073 +++++ .../slapd/back-ldbm/db-mdb/mdb_debug.c | 553 + .../slapd/back-ldbm/db-mdb/mdb_debug.h | 101 + .../slapd/back-ldbm/db-mdb/mdb_import.c | 1533 ++ .../slapd/back-ldbm/db-mdb/mdb_import.h | 179 + .../back-ldbm/db-mdb/mdb_import_threads.c | 4254 ++++ .../slapd/back-ldbm/db-mdb/mdb_instance.c | 1638 ++ .../back-ldbm/db-mdb/mdb_instance_config.c | 94 + .../slapd/back-ldbm/db-mdb/mdb_layer.c | 2988 +++ .../slapd/back-ldbm/db-mdb/mdb_layer.h | 513 + .../slapd/back-ldbm/db-mdb/mdb_ldif2db.c | 1883 ++ .../servers/slapd/back-ldbm/db-mdb/mdb_misc.c | 152 + .../slapd/back-ldbm/db-mdb/mdb_monitor.c | 342 + .../slapd/back-ldbm/db-mdb/mdb_perfctrs.c | 268 + .../slapd/back-ldbm/db-mdb/mdb_perfctrs.h | 60 + ldap/servers/slapd/back-ldbm/db-mdb/mdb_txn.c | 247 + .../slapd/back-ldbm/db-mdb/mdb_upgrade.c | 32 + .../slapd/back-ldbm/db-mdb/mdb_verify.c | 21 + ldap/servers/slapd/back-ldbm/dbimpl.c | 521 + ldap/servers/slapd/back-ldbm/dbimpl.h | 173 + ldap/servers/slapd/back-ldbm/dblayer.c | 1426 ++ ldap/servers/slapd/back-ldbm/dblayer.h | 236 + ldap/servers/slapd/back-ldbm/dbsize.c | 33 + ldap/servers/slapd/back-ldbm/dbverify.c | 27 + ldap/servers/slapd/back-ldbm/dn2entry.c | 269 + ldap/servers/slapd/back-ldbm/entrystore.c | 20 + ldap/servers/slapd/back-ldbm/filterindex.c | 1147 ++ ldap/servers/slapd/back-ldbm/findentry.c | 393 + ldap/servers/slapd/back-ldbm/haschildren.c | 14 + ldap/servers/slapd/back-ldbm/id2entry.c | 481 + ldap/servers/slapd/back-ldbm/idl.c | 1595 ++ ldap/servers/slapd/back-ldbm/idl_common.c | 550 + ldap/servers/slapd/back-ldbm/idl_new.c | 996 + ldap/servers/slapd/back-ldbm/idl_set.c | 549 + ldap/servers/slapd/back-ldbm/idl_shim.c | 172 + ldap/servers/slapd/back-ldbm/import.c | 200 + ldap/servers/slapd/back-ldbm/import.h | 215 + ldap/servers/slapd/back-ldbm/index.c | 2480 +++ ldap/servers/slapd/back-ldbm/init.c | 166 + ldap/servers/slapd/back-ldbm/instance.c | 435 + ldap/servers/slapd/back-ldbm/ldbm_abandon.c | 23 + ldap/servers/slapd/back-ldbm/ldbm_add.c | 1511 ++ ldap/servers/slapd/back-ldbm/ldbm_attr.c | 1228 ++ ldap/servers/slapd/back-ldbm/ldbm_attrcrypt.c | 1726 ++ .../slapd/back-ldbm/ldbm_attrcrypt_config.c | 312 + ldap/servers/slapd/back-ldbm/ldbm_bind.c | 113 + ldap/servers/slapd/back-ldbm/ldbm_compare.c | 104 + ldap/servers/slapd/back-ldbm/ldbm_config.c | 1790 ++ ldap/servers/slapd/back-ldbm/ldbm_config.h | 172 + ldap/servers/slapd/back-ldbm/ldbm_delete.c | 1540 ++ ldap/servers/slapd/back-ldbm/ldbm_entryrdn.c | 3278 +++ .../slapd/back-ldbm/ldbm_index_config.c | 500 + .../slapd/back-ldbm/ldbm_instance_config.c | 1225 ++ ldap/servers/slapd/back-ldbm/ldbm_modify.c | 1246 ++ ldap/servers/slapd/back-ldbm/ldbm_modrdn.c | 2438 +++ ldap/servers/slapd/back-ldbm/ldbm_search.c | 2066 ++ ldap/servers/slapd/back-ldbm/ldbm_unbind.c | 22 + ldap/servers/slapd/back-ldbm/ldbm_usn.c | 202 + ldap/servers/slapd/back-ldbm/ldif2ldbm.c | 377 + ldap/servers/slapd/back-ldbm/matchrule.c | 153 + ldap/servers/slapd/back-ldbm/misc.c | 582 + ldap/servers/slapd/back-ldbm/nextid.c | 221 + ldap/servers/slapd/back-ldbm/parents.c | 192 + .../servers/slapd/back-ldbm/proto-back-ldbm.h | 648 + ldap/servers/slapd/back-ldbm/rmdb.c | 63 + ldap/servers/slapd/back-ldbm/seq.c | 252 + ldap/servers/slapd/back-ldbm/sort.c | 864 + ldap/servers/slapd/back-ldbm/start.c | 175 + ldap/servers/slapd/back-ldbm/uniqueid2entry.c | 80 + ldap/servers/slapd/back-ldbm/vlv.c | 2109 ++ ldap/servers/slapd/back-ldbm/vlv_key.c | 61 + ldap/servers/slapd/back-ldbm/vlv_key.h | 30 + ldap/servers/slapd/back-ldbm/vlv_srch.c | 840 + ldap/servers/slapd/back-ldbm/vlv_srch.h | 149 + ldap/servers/slapd/backend.c | 713 + ldap/servers/slapd/backend_manager.c | 459 + ldap/servers/slapd/bind.c | 969 + ldap/servers/slapd/bitset.c | 55 + ldap/servers/slapd/bulk_import.c | 145 + ldap/servers/slapd/ch_malloc.c | 351 + ldap/servers/slapd/charray.c | 524 + ldap/servers/slapd/compare.c | 200 + ldap/servers/slapd/computed.c | 325 + ldap/servers/slapd/config.c | 582 + ldap/servers/slapd/configdse.c | 647 + ldap/servers/slapd/connection.c | 2518 +++ ldap/servers/slapd/conntable.c | 764 + ldap/servers/slapd/control.c | 646 + ldap/servers/slapd/counters.c | 150 + ldap/servers/slapd/csn.c | 387 + ldap/servers/slapd/csngen.c | 898 + ldap/servers/slapd/csngen.h | 34 + ldap/servers/slapd/csnset.c | 321 + ldap/servers/slapd/daemon.c | 2719 +++ ldap/servers/slapd/defbackend.c | 206 + ldap/servers/slapd/delete.c | 390 + ldap/servers/slapd/detach.c | 315 + ldap/servers/slapd/disconnect_error_strings.h | 39 + ldap/servers/slapd/disconnect_errors.h | 40 + ldap/servers/slapd/dl.c | 232 + ldap/servers/slapd/dn.c | 3122 +++ ldap/servers/slapd/dse.c | 2956 +++ ldap/servers/slapd/dynalib.c | 143 + ldap/servers/slapd/entry.c | 4222 ++++ ldap/servers/slapd/entrywsi.c | 1532 ++ ldap/servers/slapd/errormap.c | 186 + ldap/servers/slapd/eventq-deprecated.c | 483 + ldap/servers/slapd/eventq.c | 519 + ldap/servers/slapd/extendop.c | 538 + ldap/servers/slapd/factory.c | 475 + ldap/servers/slapd/fe.h | 169 + ldap/servers/slapd/features.c | 53 + ldap/servers/slapd/fedse.c | 3076 +++ ldap/servers/slapd/fileio.c | 84 + ldap/servers/slapd/filter.c | 1841 ++ ldap/servers/slapd/filter.h | 45 + ldap/servers/slapd/filtercmp.c | 443 + ldap/servers/slapd/filterentry.c | 1056 + ldap/servers/slapd/generation.c | 134 + ldap/servers/slapd/getfilelist.c | 291 + ldap/servers/slapd/getopt_ext.c | 229 + ldap/servers/slapd/getopt_ext.h | 106 + ldap/servers/slapd/getsocketpeer.c | 139 + ldap/servers/slapd/getsocketpeer.h | 16 + ldap/servers/slapd/globals.c | 82 + ldap/servers/slapd/haproxy.c | 392 + ldap/servers/slapd/haproxy.h | 76 + ldap/servers/slapd/house.c | 114 + ldap/servers/slapd/http.h | 50 + ldap/servers/slapd/index_subsystem.c | 1209 ++ ldap/servers/slapd/init.c | 52 + ldap/servers/slapd/intrinsics.h | 86 + ldap/servers/slapd/ldapi.c | 397 + ldap/servers/slapd/ldaputil.c | 1836 ++ ldap/servers/slapd/ldbmlinktest.c | 28 + ldap/servers/slapd/lenstr.c | 89 + ldap/servers/slapd/libglobs.c | 9879 +++++++++ ldap/servers/slapd/libmakefile | 130 + ldap/servers/slapd/listConfigAttrs.pl | 109 + ldap/servers/slapd/localhost.c | 238 + ldap/servers/slapd/lock.c | 75 + ldap/servers/slapd/log.c | 6768 ++++++ ldap/servers/slapd/log.h | 277 + ldap/servers/slapd/main.c | 3004 +++ ldap/servers/slapd/mapping_tree.c | 3774 ++++ ldap/servers/slapd/match.c | 330 + ldap/servers/slapd/mkDBErrStrs.py | 57 + ldap/servers/slapd/modify.c | 1472 ++ ldap/servers/slapd/modrdn.c | 727 + ldap/servers/slapd/modutil.c | 817 + ldap/servers/slapd/monitor.c | 179 + ldap/servers/slapd/object.c | 101 + ldap/servers/slapd/objset.c | 355 + ldap/servers/slapd/openldapber.h | 27 + ldap/servers/slapd/operation.c | 769 + ldap/servers/slapd/opshared.c | 1596 ++ ldap/servers/slapd/pagedresults.c | 1078 + ldap/servers/slapd/passwd_extop.c | 949 + ldap/servers/slapd/pblock.c | 4542 +++++ ldap/servers/slapd/pblock_v3.h | 225 + ldap/servers/slapd/plugin.c | 4549 +++++ ldap/servers/slapd/plugin_acl.c | 225 + ldap/servers/slapd/plugin_internal_op.c | 940 + ldap/servers/slapd/plugin_mmr.c | 71 + ldap/servers/slapd/plugin_mr.c | 777 + ldap/servers/slapd/plugin_role.c | 39 + ldap/servers/slapd/plugin_syntax.c | 1013 + ldap/servers/slapd/poll_using_select.c | 139 + ldap/servers/slapd/poll_using_select.h | 52 + ldap/servers/slapd/prerrstrs.h | 142 + ldap/servers/slapd/protect_db.c | 534 + ldap/servers/slapd/protect_db.h | 88 + ldap/servers/slapd/proto-slap.h | 1642 ++ ldap/servers/slapd/proxyauth.c | 227 + ldap/servers/slapd/psearch.c | 746 + ldap/servers/slapd/pw.c | 3630 ++++ ldap/servers/slapd/pw.h | 55 + ldap/servers/slapd/pw_mgmt.c | 338 + ldap/servers/slapd/pw_retry.c | 374 + ldap/servers/slapd/pw_verify.c | 183 + ldap/servers/slapd/pw_verify.h | 17 + ldap/servers/slapd/rdn.c | 1092 + ldap/servers/slapd/referral.c | 513 + ldap/servers/slapd/regex.c | 250 + ldap/servers/slapd/resourcelimit.c | 608 + ldap/servers/slapd/result.c | 2533 +++ ldap/servers/slapd/rewriters.c | 261 + ldap/servers/slapd/rootdse.c | 362 + ldap/servers/slapd/sasl_io.c | 778 + ldap/servers/slapd/sasl_map.c | 725 + ldap/servers/slapd/saslbind.c | 1252 ++ ldap/servers/slapd/schema.c | 6593 ++++++ ldap/servers/slapd/schemaparse.c | 284 + ldap/servers/slapd/search.c | 456 + ldap/servers/slapd/secerrstrs.h | 394 + ldap/servers/slapd/security_wrappers.c | 458 + ldap/servers/slapd/slap.h | 2901 +++ ldap/servers/slapd/slapd.lite.key | 11 + ldap/servers/slapd/slapd.normal.key | 12 + ldap/servers/slapd/slapd_plhash.c | 67 + ldap/servers/slapd/slapi-memberof.c | 1306 ++ ldap/servers/slapd/slapi-plugin-compat4.h | 176 + ldap/servers/slapd/slapi-plugin.h | 8503 ++++++++ ldap/servers/slapd/slapi-private.h | 1528 ++ ldap/servers/slapd/slapi2runtime.c | 341 + ldap/servers/slapd/slapi_counter.c | 398 + ldap/servers/slapd/slapi_pal.c | 417 + ldap/servers/slapd/slapi_pal.h | 55 + ldap/servers/slapd/snmp_collator.c | 1131 + ldap/servers/slapd/snmp_collator.h | 35 + ldap/servers/slapd/snoop.c | 48 + ldap/servers/slapd/sort.c | 106 + ldap/servers/slapd/ssl.c | 3067 +++ ldap/servers/slapd/sslerrstrs.h | 303 + ldap/servers/slapd/start_tls_extop.c | 441 + ldap/servers/slapd/statechange.h | 57 + ldap/servers/slapd/str2filter.c | 501 + ldap/servers/slapd/strdup.c | 34 + ldap/servers/slapd/stubrepl.c | 50 + ldap/servers/slapd/stubs.c | 38 + ldap/servers/slapd/subentries.c | 73 + ldap/servers/slapd/subentry.c | 65 + ldap/servers/slapd/task.c | 2880 +++ ldap/servers/slapd/tempnam.c | 53 + ldap/servers/slapd/test-plugins/Makefile | 52 + ldap/servers/slapd/test-plugins/Makefile.AIX | 39 + ldap/servers/slapd/test-plugins/Makefile.BSDI | 34 + ldap/servers/slapd/test-plugins/Makefile.HPUX | 28 + .../slapd/test-plugins/Makefile.HPUX64 | 27 + ldap/servers/slapd/test-plugins/Makefile.IRIX | 34 + .../servers/slapd/test-plugins/Makefile.Linux | 34 + ldap/servers/slapd/test-plugins/Makefile.OSF1 | 33 + .../slapd/test-plugins/Makefile.ReliantUNIX | 34 + .../slapd/test-plugins/Makefile.SOLARIS | 31 + .../slapd/test-plugins/Makefile.SOLARIS64 | 31 + .../slapd/test-plugins/Makefile.SOLARISx86 | 34 + .../slapd/test-plugins/Makefile.UnixWare | 34 + .../slapd/test-plugins/Makefile.UnixWareUDK | 34 + .../slapd/test-plugins/Makefile.server | 59 + ldap/servers/slapd/test-plugins/README | 190 + .../servers/slapd/test-plugins/clients/README | 48 + .../slapd/test-plugins/clients/ReqExtOp.java | 80 + .../slapd/test-plugins/clients/reqextop.c | 92 + ldap/servers/slapd/test-plugins/installDse.pl | 138 + ldap/servers/slapd/test-plugins/nicknames | 13 + ldap/servers/slapd/test-plugins/sampletask.c | 191 + .../slapd/test-plugins/test_slapi_memberof.c | 476 + ldap/servers/slapd/test-plugins/testbind.c | 256 + .../slapd/test-plugins/testdatainterop.c | 295 + .../slapd/test-plugins/testdbinterop.c | 177 + .../slapd/test-plugins/testdbinterop.h | 28 + ldap/servers/slapd/test-plugins/testentry.c | 135 + .../slapd/test-plugins/testextendedop.c | 192 + ldap/servers/slapd/test-plugins/testgetip.c | 142 + ldap/servers/slapd/test-plugins/testpostop.c | 372 + ldap/servers/slapd/test-plugins/testpreop.c | 243 + .../servers/slapd/test-plugins/testsaslbind.c | 144 + ldap/servers/slapd/thread_data.c | 209 + ldap/servers/slapd/time.c | 721 + ldap/servers/slapd/tools/dbscan.c | 1536 ++ ldap/servers/slapd/tools/eggencode.c | 65 + ldap/servers/slapd/tools/ldaptool-sasl.c | 341 + ldap/servers/slapd/tools/ldaptool-sasl.h | 45 + ldap/servers/slapd/tools/ldaptool.h | 166 + ldap/servers/slapd/tools/ldclt/README | 1 + ldap/servers/slapd/tools/ldclt/data.c | 398 + .../slapd/tools/ldclt/examples/001/add.ksh | 26 + .../tools/ldclt/examples/001/add_incr.ksh | 29 + .../slapd/tools/ldclt/examples/001/config.ksh | 27 + .../slapd/tools/ldclt/examples/001/delete.ksh | 27 + .../slapd/tools/ldclt/examples/001/env.ksh | 22 + .../slapd/tools/ldclt/examples/001/search.ksh | 27 + .../slapd/tools/ldclt/examples/002/add.ksh | 30 + .../slapd/tools/ldclt/examples/002/config.ksh | 28 + .../slapd/tools/ldclt/examples/002/env.ksh | 22 + .../slapd/tools/ldclt/examples/002/ldif01.ksh | 31 + .../slapd/tools/ldclt/examples/002/ldif02.ksh | 31 + .../slapd/tools/ldclt/examples/002/ldif03.ksh | 31 + .../slapd/tools/ldclt/examples/002/ofile | 12 + .../servers/slapd/tools/ldclt/examples/README | 52 + ldap/servers/slapd/tools/ldclt/ldap-private.h | 306 + ldap/servers/slapd/tools/ldclt/ldapfct.c | 3653 ++++ ldap/servers/slapd/tools/ldclt/ldclt.c | 2876 +++ ldap/servers/slapd/tools/ldclt/ldclt.h | 620 + ldap/servers/slapd/tools/ldclt/ldclt.man | 754 + ldap/servers/slapd/tools/ldclt/ldclt.use | 87 + ldap/servers/slapd/tools/ldclt/ldcltU.c | 230 + ldap/servers/slapd/tools/ldclt/opCheck.c | 837 + ldap/servers/slapd/tools/ldclt/parser.c | 520 + ldap/servers/slapd/tools/ldclt/port.c | 85 + ldap/servers/slapd/tools/ldclt/port.h | 60 + ldap/servers/slapd/tools/ldclt/remote.h | 46 + ldap/servers/slapd/tools/ldclt/repcheck.c | 167 + ldap/servers/slapd/tools/ldclt/repworker.c | 331 + ldap/servers/slapd/tools/ldclt/scalab01.c | 950 + ldap/servers/slapd/tools/ldclt/scalab01.h | 98 + ldap/servers/slapd/tools/ldclt/srv.c | 106 + ldap/servers/slapd/tools/ldclt/threadMain.c | 1095 + ldap/servers/slapd/tools/ldclt/utils.c | 204 + ldap/servers/slapd/tools/ldclt/utils.h | 51 + ldap/servers/slapd/tools/ldclt/version.c | 1 + ldap/servers/slapd/tools/ldclt/workarounds.c | 89 + ldap/servers/slapd/tools/mkdep.c | 282 + ldap/servers/slapd/tools/pwenc.c | 440 + ldap/servers/slapd/unbind.c | 94 + ldap/servers/slapd/uniqueid.c | 279 + ldap/servers/slapd/uniqueidgen.c | 225 + ldap/servers/slapd/upgrade.c | 359 + ldap/servers/slapd/utf8.c | 412 + ldap/servers/slapd/utf8compare.c | 2286 +++ ldap/servers/slapd/util.c | 1797 ++ ldap/servers/slapd/uuid.c | 900 + ldap/servers/slapd/uuid.h | 119 + ldap/servers/slapd/value.c | 580 + ldap/servers/slapd/valueset.c | 1497 ++ ldap/servers/slapd/vattr.c | 2621 +++ ldap/servers/slapd/vattr_spi.h | 64 + ldap/servers/slapd/views.h | 37 + ldap/servers/snmp/ldap-agent.c | 764 + ldap/servers/snmp/ldap-agent.conf.in | 30 + ldap/servers/snmp/ldap-agent.h | 194 + ldap/servers/snmp/main.c | 557 + ldap/servers/snmp/redhat-directory.mib | 818 + ldap/systools/README | 31 + ldap/systools/getHPPatches.pl | 91 + ldap/systools/getSolPatches.pl | 72 + ldap/systools/hp_patches.c | 24 + ldap/systools/mergeSolPatches.pl | 65 + ldap/systools/pio.h | 37 + ldap/systools/sol_patches.c | 183 + ldap/systools/viewcore.c | 480 + lib/base/.cvsignore | 1 + lib/base/crit.cpp | 393 + lib/base/dnsdmain.cpp | 156 + lib/base/ereport.cpp | 68 + lib/base/file.cpp | 356 + lib/base/fsmutex.cpp | 155 + lib/base/lexer_pvt.h | 40 + lib/base/nscperror.c | 169 + lib/base/plist.cpp | 1163 ++ lib/base/plist_pvt.h | 133 + lib/base/pool.cpp | 628 + lib/base/shexp.cpp | 290 + lib/base/system.cpp | 244 + lib/base/systhr.cpp | 180 + lib/base/util.cpp | 475 + lib/ldaputil/.cvsignore | 1 + lib/ldaputil/cert.c | 504 + lib/ldaputil/certmap.c | 1612 ++ lib/ldaputil/certmap.conf | 51 + lib/ldaputil/dbconf.c | 686 + lib/ldaputil/encode.c | 151 + lib/ldaputil/errors.c | 210 + lib/ldaputil/examples/Makefile | 89 + lib/ldaputil/examples/README | 100 + lib/ldaputil/examples/init.c | 44 + lib/ldaputil/examples/plugin.c | 240 + lib/ldaputil/examples/plugin.h | 35 + lib/ldaputil/init.c | 138 + lib/ldaputil/ldapauth.c | 214 + lib/ldaputil/ldapu-changes.html | 406 + lib/ldaputil/ldaputili.h | 68 + lib/ldaputil/vtable.c | 210 + lib/libaccess/.cvsignore | 1 + lib/libaccess/access_plhash.cpp | 73 + lib/libaccess/access_plhash.h | 24 + lib/libaccess/acl.tab.cpp | 1703 ++ lib/libaccess/acl.tab.h | 52 + lib/libaccess/acl.yy.cpp | 2005 ++ lib/libaccess/aclcache.cpp | 492 + lib/libaccess/aclcache.h | 34 + lib/libaccess/aclerror.cpp | 254 + lib/libaccess/acleval.cpp | 564 + lib/libaccess/aclflush.cpp | 185 + lib/libaccess/aclpriv.h | 191 + lib/libaccess/aclscan.h | 33 + lib/libaccess/aclscan.l | 379 + lib/libaccess/aclspace.cpp | 45 + lib/libaccess/acltext.y | 928 + lib/libaccess/acltools.cpp | 1749 ++ lib/libaccess/aclutil.cpp | 234 + lib/libaccess/aclutil.h | 34 + lib/libaccess/authdb.cpp | 231 + lib/libaccess/las.h | 40 + lib/libaccess/lasdns.cpp | 382 + lib/libaccess/lasdns.h | 19 + lib/libaccess/lasgroup.cpp | 174 + lib/libaccess/lasip.cpp | 732 + lib/libaccess/lasip.h | 24 + lib/libaccess/lastod.cpp | 187 + lib/libaccess/lasuser.cpp | 162 + lib/libaccess/ldapauth.h | 42 + lib/libaccess/method.cpp | 171 + lib/libaccess/nsautherr.cpp | 134 + lib/libaccess/nseframe.cpp | 216 + lib/libaccess/oneeval.cpp | 1062 + lib/libaccess/oneeval.h | 24 + lib/libaccess/parse.h | 29 + lib/libaccess/permhash.h | 78 + lib/libaccess/register.cpp | 873 + lib/libaccess/symbols.cpp | 353 + lib/libaccess/usi.cpp | 379 + lib/libaccess/usrcache.cpp | 658 + lib/libaccess/yy-sed | 24 + lib/libadmin/.cvsignore | 1 + lib/libadmin/error.c | 107 + lib/libadmin/template.c | 70 + lib/libadmin/util.c | 190 + lib/libsi18n/getstrmem.h | 1156 ++ lib/libsi18n/getstrprop.c | 86 + lib/libsi18n/gsslapd.h | 33 + lib/libsi18n/makstrdb.c | 216 + lib/libsi18n/reshash.c | 292 + lib/libsi18n/reshash.h | 64 + lib/libsi18n/txtfile.c | 131 + lib/libsi18n/txtfile.h | 56 + m4/ax_compare_version.m4 | 177 + m4/db.m4 | 143 + m4/doxygen.m4 | 18 + m4/fhs.m4 | 40 + m4/netsnmp.m4 | 102 + m4/openldap.m4 | 166 + m4/selinux.m4 | 26 + m4/systemd.m4 | 138 + man/man1/dbscan.1 | 106 + man/man1/ds-logpipe.py.1 | 100 + man/man1/ds-replcheck.1 | 163 + man/man1/dsktune.1 | 64 + man/man1/infadd.1 | 82 + man/man1/ldap-agent.1 | 59 + man/man1/ldclt.1 | 278 + man/man1/ldif.1 | 55 + man/man1/logconv.pl.1 | 139 + man/man1/mmldif.1 | 61 + man/man1/pwdhash.1 | 62 + man/man1/rsearch.1 | 138 + man/man5/99user.ldif.5 | 54 + man/man5/certmap.conf.5 | 133 + man/man5/dirsrv.5 | 46 + man/man5/dirsrv.systemd.5 | 39 + man/man5/slapd-collations.conf.5 | 51 + man/man5/template-initconfig.5 | 62 + man/man8/ns-slapd.8 | 60 + profiling/stap/probe_do_search_detail.stp | 64 + profiling/stap/probe_log_access_detail.stp | 51 + profiling/stap/probe_op_shared_search.stp | 64 + rfcs/Makefile | 13 + rfcs/examples/template-bare-06.txt | 426 + rfcs/src/draft-wibrown-ldapssotoken-00.xml | 453 + rpm.mk | 143 + rpm/389-ds-base-devel.README | 4 + rpm/389-ds-base-git.sh | 16 + rpm/389-ds-base.spec.in | 802 + rpm/add_patches.sh | 55 + rpm/bundle-rust-downstream.py | 199 + rpm/rpmverrel.sh | 17 + src/Cargo.toml | 17 + src/README.md | 0 src/cockpit/389-console/.eslintignore | 2 + src/cockpit/389-console/.eslintrc.json | 48 + src/cockpit/389-console/.stylelintrc.json | 38 + src/cockpit/389-console/README.md | 176 + src/cockpit/389-console/audit-ci.json | 7 + src/cockpit/389-console/build.js | 136 + src/cockpit/389-console/buildAndRun.sh | 16 + .../cockpit_dist/index.css.LEGAL.txt | 0 .../389-console/cockpit_dist/index.css.gz | Bin 0 -> 141464 bytes .../389-console/cockpit_dist/index.html | 21 + .../cockpit_dist/index.js.LEGAL.txt | 72 + .../389-console/cockpit_dist/index.js.gz | Bin 0 -> 635039 bytes .../389-console/cockpit_dist/manifest.json | 13 + .../389-console/cockpit_dist/po.de.js.gz | Bin 0 -> 210 bytes .../389-console/cockpit_dist/po.ja.js.gz | Bin 0 -> 78082 bytes src/cockpit/389-console/mk_po.sh | 5 + .../org.port389.cockpit_console.metainfo.xml | 18 + src/cockpit/389-console/package-lock.json | 6899 +++++++ src/cockpit/389-console/package.json | 67 + src/cockpit/389-console/pkg/lib/README | 5 + .../pkg/lib/_global-variables.scss | 14 + .../lib/cockpit-components-context-menu.jsx | 124 + .../pkg/lib/cockpit-components-dialog.jsx | 362 + .../pkg/lib/cockpit-components-dialog.scss | 8 + .../lib/cockpit-components-empty-state.css | 3 + .../lib/cockpit-components-empty-state.jsx | 63 + .../cockpit-components-file-autocomplete.jsx | 210 + .../cockpit-components-firewalld-request.jsx | 167 + .../cockpit-components-firewalld-request.scss | 12 + .../lib/cockpit-components-form-helper.jsx | 43 + ...cockpit-components-inline-notification.css | 7 + ...cockpit-components-inline-notification.jsx | 96 + .../lib/cockpit-components-install-dialog.css | 43 + .../lib/cockpit-components-install-dialog.jsx | 211 + .../lib/cockpit-components-listing-panel.jsx | 87 + .../lib/cockpit-components-listing-panel.scss | 94 + .../pkg/lib/cockpit-components-logs-panel.jsx | 185 + .../lib/cockpit-components-logs-panel.scss | 15 + .../lib/cockpit-components-modifications.css | 28 + .../lib/cockpit-components-modifications.jsx | 191 + .../pkg/lib/cockpit-components-password.jsx | 140 + .../pkg/lib/cockpit-components-password.scss | 7 + .../pkg/lib/cockpit-components-plot.jsx | 513 + .../pkg/lib/cockpit-components-plot.scss | 119 + .../pkg/lib/cockpit-components-privileged.jsx | 77 + .../pkg/lib/cockpit-components-shutdown.jsx | 244 + .../pkg/lib/cockpit-components-shutdown.scss | 17 + .../pkg/lib/cockpit-components-table.jsx | 300 + .../pkg/lib/cockpit-components-table.scss | 106 + .../pkg/lib/cockpit-components-terminal.jsx | 346 + .../389-console/pkg/lib/cockpit-dark-theme.js | 70 + .../389-console/pkg/lib/cockpit-po-plugin.js | 144 + .../pkg/lib/cockpit-rsync-plugin.js | 49 + src/cockpit/389-console/pkg/lib/cockpit.js | 4451 ++++ src/cockpit/389-console/pkg/lib/console.css | 11 + .../389-console/pkg/lib/context-menu.scss | 20 + .../pkg/lib/credentials-ssh-private-keys.sh | 34 + .../pkg/lib/credentials-ssh-remove-key.sh | 10 + .../389-console/pkg/lib/credentials.js | 344 + src/cockpit/389-console/pkg/lib/ct-card.scss | 63 + src/cockpit/389-console/pkg/lib/dialogs.jsx | 118 + .../pkg/lib/esbuild-cleanup-plugin.js | 17 + .../389-console/pkg/lib/esbuild-common.js | 40 + .../pkg/lib/esbuild-compress-plugin.js | 50 + .../pkg/lib/esbuild-eslint-plugin.js | 32 + .../pkg/lib/esbuild-stylelint-plugin.js | 42 + .../pkg/lib/esbuild-test-html-plugin.js | 28 + .../pkg/lib/get-timesync-backend.py | 61 + src/cockpit/389-console/pkg/lib/hooks.js | 328 + src/cockpit/389-console/pkg/lib/html2po.js | 228 + src/cockpit/389-console/pkg/lib/inotify.py | 73 + src/cockpit/389-console/pkg/lib/journal.css | 161 + src/cockpit/389-console/pkg/lib/journal.js | 452 + .../pkg/lib/long-running-process.js | 166 + .../389-console/pkg/lib/machine-info.js | 259 + .../389-console/pkg/lib/manifest2po.js | 177 + .../pkg/lib/menu-select-widget.scss | 35 + .../389-console/pkg/lib/notifications.js | 167 + src/cockpit/389-console/pkg/lib/os-release.js | 38 + src/cockpit/389-console/pkg/lib/packagekit.js | 503 + src/cockpit/389-console/pkg/lib/page.scss | 197 + .../pkg/lib/patternfly/_fonts.scss | 38 + .../lib/patternfly/patternfly-5-cockpit.scss | 9 + .../patternfly/patternfly-5-overrides.scss | 577 + src/cockpit/389-console/pkg/lib/plot.js | 574 + src/cockpit/389-console/pkg/lib/polyfills.js | 25 + src/cockpit/389-console/pkg/lib/python.js | 30 + .../pkg/lib/qunit-template.html.in | 35 + .../389-console/pkg/lib/qunit-tests.js | 90 + src/cockpit/389-console/pkg/lib/serverTime.js | 787 + .../389-console/pkg/lib/serverTime.scss | 7 + src/cockpit/389-console/pkg/lib/service.js | 344 + src/cockpit/389-console/pkg/lib/superuser.js | 126 + src/cockpit/389-console/pkg/lib/table.css | 148 + src/cockpit/389-console/pkg/lib/timeformat.js | 66 + src/cockpit/389-console/pkg/lib/utils.jsx | 33 + src/cockpit/389-console/po/de.po | 39 + src/cockpit/389-console/po/ja.po | 14975 ++++++++++++++ src/cockpit/389-console/src/LDAPEditor.jsx | 1299 ++ src/cockpit/389-console/src/css/_fonts.scss | 37 + src/cockpit/389-console/src/css/branding.css | 3 + src/cockpit/389-console/src/css/ds.css | 617 + .../src/css/patternfly-4-cockpit.scss | 14 + src/cockpit/389-console/src/database.jsx | 1522 ++ src/cockpit/389-console/src/ds.jsx | 901 + src/cockpit/389-console/src/dsModals.jsx | 1202 ++ src/cockpit/389-console/src/index.html | 21 + src/cockpit/389-console/src/index.js | 11 + .../src/lib/database/attrEncryption.jsx | 259 + .../389-console/src/lib/database/backups.jsx | 1023 + .../389-console/src/lib/database/chaining.jsx | 1995 ++ .../src/lib/database/databaseConfig.jsx | 992 + .../src/lib/database/databaseModal.jsx | 568 + .../src/lib/database/databaseTables.jsx | 1286 ++ .../src/lib/database/globalPwp.jsx | 1665 ++ .../389-console/src/lib/database/indexes.jsx | 1289 ++ .../389-console/src/lib/database/localPwp.jsx | 3270 +++ .../src/lib/database/referrals.jsx | 502 + .../389-console/src/lib/database/suffix.jsx | 1086 + .../src/lib/database/suffixConfig.jsx | 230 + .../src/lib/database/vlvIndexes.jsx | 803 + .../src/lib/ldap_editor/lib/aciParser.jsx | 251 + .../lib/ldap_editor/lib/compactPagination.jsx | 66 + .../src/lib/ldap_editor/lib/constants.jsx | 90 + .../src/lib/ldap_editor/lib/editableTable.jsx | 890 + .../lib/ldap_editor/lib/genericPagination.jsx | 283 + .../src/lib/ldap_editor/lib/ldapNavigator.jsx | 558 + .../src/lib/ldap_editor/lib/options.jsx | 23 + .../src/lib/ldap_editor/lib/rootSuffix.jsx | 145 + .../src/lib/ldap_editor/lib/utils.jsx | 1239 ++ .../src/lib/ldap_editor/search.jsx | 1199 ++ .../src/lib/ldap_editor/tableView.jsx | 86 + .../src/lib/ldap_editor/treeView.jsx | 836 + .../src/lib/ldap_editor/wizards/aci.jsx | 573 + .../src/lib/ldap_editor/wizards/cos.jsx | 126 + .../ldap_editor/wizards/deleteOperation.jsx | 304 + .../lib/ldap_editor/wizards/genericWizard.jsx | 72 + .../src/lib/ldap_editor/wizards/newEntry.jsx | 198 + .../wizards/operations/aciBindRuleTable.jsx | 60 + .../ldap_editor/wizards/operations/aciNew.jsx | 1703 ++ .../wizards/operations/addCosDefinition.jsx | 1413 ++ .../wizards/operations/addCosTemplate.jsx | 1299 ++ .../wizards/operations/addGroup.jsx | 660 + .../wizards/operations/addLdapEntry.jsx | 970 + .../wizards/operations/addRole.jsx | 1033 + .../wizards/operations/addUser.jsx | 822 + .../wizards/operations/editGroup.jsx | 610 + .../wizards/operations/editLdapEntry.jsx | 1501 ++ .../wizards/operations/genericUpdate.jsx | 696 + .../wizards/operations/groupTable.jsx | 210 + .../wizards/operations/renameEntry.jsx | 489 + .../389-console/src/lib/monitor/accesslog.jsx | 189 + .../src/lib/monitor/auditfaillog.jsx | 184 + .../389-console/src/lib/monitor/auditlog.jsx | 182 + .../src/lib/monitor/chainingMonitor.jsx | 152 + .../389-console/src/lib/monitor/dbMonitor.jsx | 522 + .../389-console/src/lib/monitor/errorlog.jsx | 270 + .../src/lib/monitor/monitorModals.jsx | 1247 ++ .../src/lib/monitor/monitorTables.jsx | 2304 +++ .../src/lib/monitor/replMonAgmts.jsx | 98 + .../src/lib/monitor/replMonConflict.jsx | 545 + .../src/lib/monitor/replMonTasks.jsx | 142 + .../src/lib/monitor/replMonWinsync.jsx | 97 + .../src/lib/monitor/replMonitor.jsx | 1631 ++ .../src/lib/monitor/securitylog.jsx | 189 + .../src/lib/monitor/serverMonitor.jsx | 777 + .../src/lib/monitor/suffixMonitor.jsx | 633 + .../389-console/src/lib/notifications.jsx | 122 + .../src/lib/plugins/accountPolicy.jsx | 1078 + .../src/lib/plugins/attributeUniqueness.jsx | 851 + .../src/lib/plugins/autoMembership.jsx | 1411 ++ .../389-console/src/lib/plugins/dna.jsx | 1501 ++ .../src/lib/plugins/linkedAttributes.jsx | 621 + .../src/lib/plugins/managedEntries.jsx | 1602 ++ .../389-console/src/lib/plugins/memberOf.jsx | 1705 ++ .../src/lib/plugins/pamPassThru.jsx | 936 + .../lib/plugins/passthroughAuthentication.jsx | 663 + .../src/lib/plugins/pluginBasicConfig.jsx | 359 + .../src/lib/plugins/pluginTables.jsx | 1940 ++ .../src/lib/plugins/referentialIntegrity.jsx | 1228 ++ .../src/lib/plugins/retroChangelog.jsx | 534 + .../src/lib/plugins/rootDNAccessControl.jsx | 779 + .../389-console/src/lib/plugins/usn.jsx | 379 + .../389-console/src/lib/plugins/winsync.jsx | 477 + .../src/lib/replication/replAgmts.jsx | 1806 ++ .../src/lib/replication/replChangelog.jsx | 390 + .../src/lib/replication/replConfig.jsx | 906 + .../src/lib/replication/replModals.jsx | 2222 ++ .../src/lib/replication/replSuffix.jsx | 499 + .../src/lib/replication/replTables.jsx | 486 + .../src/lib/replication/replTasks.jsx | 500 + .../src/lib/replication/winsyncAgmts.jsx | 1392 ++ .../src/lib/schema/schemaModals.jsx | 707 + .../src/lib/schema/schemaTables.jsx | 850 + .../lib/security/certificateManagement.jsx | 1495 ++ .../389-console/src/lib/security/ciphers.jsx | 522 + .../src/lib/security/securityModals.jsx | 1133 + .../src/lib/security/securityTables.jsx | 786 + .../389-console/src/lib/server/accessLog.jsx | 902 + .../389-console/src/lib/server/auditLog.jsx | 894 + .../src/lib/server/auditfailLog.jsx | 748 + .../389-console/src/lib/server/errorLog.jsx | 908 + .../389-console/src/lib/server/ldapi.jsx | 438 + .../389-console/src/lib/server/sasl.jsx | 775 + .../src/lib/server/securityLog.jsx | 896 + .../src/lib/server/serverModals.jsx | 248 + .../src/lib/server/serverTables.jsx | 278 + .../389-console/src/lib/server/settings.jsx | 1681 ++ .../389-console/src/lib/server/tuning.jsx | 666 + src/cockpit/389-console/src/lib/tools.jsx | 322 + src/cockpit/389-console/src/manifest.json | 13 + src/cockpit/389-console/src/monitor.jsx | 1206 ++ src/cockpit/389-console/src/plugins.jsx | 731 + src/cockpit/389-console/src/replication.jsx | 1104 + src/cockpit/389-console/src/schema.jsx | 1589 ++ src/cockpit/389-console/src/security.jsx | 1340 ++ src/cockpit/389-console/src/server.jsx | 362 + src/cockpit/389-console/stats.json | 0 src/contrib/README.md | 6 + src/contrib/back-ldif/add.c | 198 + src/contrib/back-ldif/back-ldif.h | 91 + src/contrib/back-ldif/bind.c | 116 + src/contrib/back-ldif/close.c | 69 + src/contrib/back-ldif/compare.c | 89 + src/contrib/back-ldif/config.c | 206 + src/contrib/back-ldif/delete.c | 137 + src/contrib/back-ldif/init.c | 110 + src/contrib/back-ldif/modify.c | 564 + src/contrib/back-ldif/modrdn.c | 282 + src/contrib/back-ldif/monitor.c | 128 + src/contrib/back-ldif/search.c | 196 + src/contrib/back-ldif/start.c | 39 + src/contrib/back-ldif/unbind.c | 35 + src/lib389/.coveragerc | 2 + src/lib389/.gitignore | 10 + src/lib389/LICENSE | 675 + src/lib389/MANIFEST.in | 1 + src/lib389/README.md | 26 + src/lib389/cli/dsconf | 156 + src/lib389/cli/dscontainer | 481 + src/lib389/cli/dscreate | 111 + src/lib389/cli/dsctl | 175 + src/lib389/cli/dsidm | 163 + src/lib389/cli/dsrate | 149 + src/lib389/cli/openldap_to_ds | 264 + src/lib389/doc/Makefile | 230 + src/lib389/doc/source/accesscontrol.rst | 6 + src/lib389/doc/source/aci.rst | 66 + src/lib389/doc/source/agreement.rst | 27 + src/lib389/doc/source/backend.rst | 29 + src/lib389/doc/source/changelog.rst | 22 + src/lib389/doc/source/conf.py | 339 + src/lib389/doc/source/config.rst | 47 + src/lib389/doc/source/databases.rst | 7 + src/lib389/doc/source/dirsrv_log.rst | 30 + src/lib389/doc/source/domain.rst | 21 + src/lib389/doc/source/dseldif.rst | 30 + src/lib389/doc/source/group.rst | 44 + src/lib389/doc/source/guidelines.rst | 624 + src/lib389/doc/source/identitymanagement.rst | 10 + src/lib389/doc/source/index.rst | 52 + src/lib389/doc/source/indexes.rst | 53 + src/lib389/doc/source/ldclt.rst | 42 + src/lib389/doc/source/mappingtree.rst | 31 + src/lib389/doc/source/monitor.rst | 19 + src/lib389/doc/source/need_to_be_triaged.rst | 18 + src/lib389/doc/source/organizationalunit.rst | 41 + src/lib389/doc/source/passwd.rst | 34 + src/lib389/doc/source/paths.rst | 43 + src/lib389/doc/source/plugin.rst | 35 + src/lib389/doc/source/replica.rst | 87 + src/lib389/doc/source/replication.rst | 9 + src/lib389/doc/source/repltools.rst | 43 + src/lib389/doc/source/rootdse.rst | 25 + src/lib389/doc/source/schema.rst | 62 + src/lib389/doc/source/services.rst | 40 + src/lib389/doc/source/task.rst | 59 + src/lib389/doc/source/user.rst | 53 + src/lib389/doc/source/utils.rst | 23 + src/lib389/dsadmin.pylintrc | 236 + src/lib389/lib389/__init__.py | 3513 ++++ src/lib389/lib389/_constants.py | 377 + src/lib389/lib389/_controls.py | 240 + src/lib389/lib389/_entry.py | 645 + src/lib389/lib389/_ldifconn.py | 52 + src/lib389/lib389/_mapped_object.py | 1569 ++ src/lib389/lib389/_mapped_object_lint.py | 170 + src/lib389/lib389/_replication.py | 211 + src/lib389/lib389/aci.py | 237 + src/lib389/lib389/agreement.py | 1267 ++ src/lib389/lib389/backend.py | 1200 ++ src/lib389/lib389/chaining.py | 185 + src/lib389/lib389/cli_base/__init__.py | 475 + src/lib389/lib389/cli_base/dsrc.py | 217 + src/lib389/lib389/cli_conf/__init__.py | 170 + src/lib389/lib389/cli_conf/backend.py | 1197 ++ src/lib389/lib389/cli_conf/backup.py | 65 + src/lib389/lib389/cli_conf/chaining.py | 324 + src/lib389/lib389/cli_conf/config.py | 143 + src/lib389/lib389/cli_conf/conflicts.py | 127 + .../lib389/cli_conf/directory_manager.py | 33 + src/lib389/lib389/cli_conf/monitor.py | 326 + src/lib389/lib389/cli_conf/plugin.py | 148 + .../lib389/cli_conf/plugins/__init__.py | 0 .../lib389/cli_conf/plugins/accountpolicy.py | 135 + .../lib389/cli_conf/plugins/attruniq.py | 172 + .../lib389/cli_conf/plugins/automember.py | 289 + .../lib389/cli_conf/plugins/contentsync.py | 43 + src/lib389/lib389/cli_conf/plugins/dna.py | 243 + .../lib389/cli_conf/plugins/entryuuid.py | 65 + .../cli_conf/plugins/ldappassthrough.py | 157 + .../lib389/cli_conf/plugins/linkedattr.py | 145 + .../lib389/cli_conf/plugins/managedentries.py | 248 + .../lib389/cli_conf/plugins/memberof.py | 169 + .../lib389/cli_conf/plugins/pampassthrough.py | 133 + .../lib389/cli_conf/plugins/posix_winsync.py | 80 + .../lib389/cli_conf/plugins/referint.py | 118 + .../lib389/cli_conf/plugins/retrochangelog.py | 75 + .../lib389/cli_conf/plugins/rootdn_ac.py | 135 + src/lib389/lib389/cli_conf/plugins/usn.py | 76 + src/lib389/lib389/cli_conf/pwpolicy.py | 320 + src/lib389/lib389/cli_conf/replication.py | 1786 ++ src/lib389/lib389/cli_conf/saslmappings.py | 125 + src/lib389/lib389/cli_conf/schema.py | 413 + src/lib389/lib389/cli_conf/security.py | 667 + src/lib389/lib389/cli_ctl/__init__.py | 8 + src/lib389/lib389/cli_ctl/cockpit.py | 88 + src/lib389/lib389/cli_ctl/dbgen-FamilyNames | 13419 ++++++++++++ src/lib389/lib389/cli_ctl/dbgen-GivenNames | 8606 ++++++++ src/lib389/lib389/cli_ctl/dbgen-OrgUnits | 5 + src/lib389/lib389/cli_ctl/dbgen.py | 585 + src/lib389/lib389/cli_ctl/dblib.py | 528 + src/lib389/lib389/cli_ctl/dbtasks.py | 173 + src/lib389/lib389/cli_ctl/dsrc.py | 435 + src/lib389/lib389/cli_ctl/health.py | 175 + src/lib389/lib389/cli_ctl/instance.py | 325 + src/lib389/lib389/cli_ctl/nsstate.py | 64 + src/lib389/lib389/cli_ctl/tls.py | 182 + src/lib389/lib389/cli_idm/__init__.py | 188 + src/lib389/lib389/cli_idm/account.py | 279 + src/lib389/lib389/cli_idm/client_config.py | 330 + src/lib389/lib389/cli_idm/group.py | 151 + src/lib389/lib389/cli_idm/initialise.py | 22 + .../lib389/cli_idm/organizationalunit.py | 95 + src/lib389/lib389/cli_idm/posixgroup.py | 102 + src/lib389/lib389/cli_idm/role.py | 194 + src/lib389/lib389/cli_idm/service.py | 94 + src/lib389/lib389/cli_idm/uniquegroup.py | 147 + src/lib389/lib389/cli_idm/user.py | 104 + src/lib389/lib389/clitools/__init__.py | 77 + src/lib389/lib389/clitools/ds_aci_lint | 41 + src/lib389/lib389/clitools/ds_backend_getattr | 42 + src/lib389/lib389/clitools/ds_backend_list | 39 + src/lib389/lib389/clitools/ds_backend_setattr | 43 + .../lib389/clitools/ds_krb_create_keytab | 40 + .../lib389/clitools/ds_krb_create_principal | 38 + .../lib389/clitools/ds_krb_create_realm | 34 + .../lib389/clitools/ds_krb_destroy_realm | 34 + src/lib389/lib389/clitools/ds_monitor_backend | 39 + src/lib389/lib389/clitools/ds_monitor_server | 35 + .../clitools/ds_schema_attributetype_list | 33 + .../clitools/ds_schema_attributetype_query | 46 + src/lib389/lib389/clitools/ds_setup | 77 + src/lib389/lib389/config.py | 580 + src/lib389/lib389/configurations/__init__.py | 51 + src/lib389/lib389/configurations/config.py | 44 + .../lib389/configurations/config_001003006.py | 129 + .../lib389/configurations/config_001004000.py | 131 + .../lib389/configurations/config_001004002.py | 131 + .../lib389/configurations/config_002003000.py | 135 + src/lib389/lib389/configurations/sample.py | 145 + src/lib389/lib389/conflicts.py | 182 + src/lib389/lib389/cos.py | 205 + src/lib389/lib389/dbgen.py | 740 + src/lib389/lib389/dirsrv_log.py | 399 + src/lib389/lib389/ds_instance.py | 97 + src/lib389/lib389/dseldif.py | 487 + src/lib389/lib389/encrypted_attributes.py | 46 + src/lib389/lib389/exceptions.py | 43 + src/lib389/lib389/extended_operations.py | 48 + src/lib389/lib389/extensibleobject.py | 53 + src/lib389/lib389/idm/__init__.py | 0 src/lib389/lib389/idm/account.py | 399 + src/lib389/lib389/idm/country.py | 53 + src/lib389/lib389/idm/directorymanager.py | 53 + src/lib389/lib389/idm/domain.py | 31 + src/lib389/lib389/idm/group.py | 221 + src/lib389/lib389/idm/ipadomain.py | 36 + src/lib389/lib389/idm/nscontainer.py | 91 + src/lib389/lib389/idm/organization.py | 53 + src/lib389/lib389/idm/organizationalrole.py | 54 + src/lib389/lib389/idm/organizationalunit.py | 54 + src/lib389/lib389/idm/posixgroup.py | 91 + src/lib389/lib389/idm/role.py | 352 + src/lib389/lib389/idm/services.py | 88 + src/lib389/lib389/idm/user.py | 245 + src/lib389/lib389/index.py | 263 + src/lib389/lib389/instance/__init__.py | 7 + src/lib389/lib389/instance/options.py | 446 + src/lib389/lib389/instance/remove.py | 174 + src/lib389/lib389/instance/setup.py | 1226 ++ src/lib389/lib389/ldap_objs.py | 30 + src/lib389/lib389/ldapi.py | 70 + src/lib389/lib389/ldclt.py | 166 + src/lib389/lib389/lint.py | 513 + src/lib389/lib389/mappingTree.py | 467 + src/lib389/lib389/migrate/__init__.py | 0 src/lib389/lib389/migrate/ldif.py | 42 + .../lib389/migrate/openldap/__init__.py | 0 src/lib389/lib389/migrate/openldap/config.py | 328 + src/lib389/lib389/migrate/openldap/schema.py | 9 + src/lib389/lib389/migrate/plan.py | 900 + src/lib389/lib389/mit_krb5.py | 227 + src/lib389/lib389/monitor.py | 441 + src/lib389/lib389/ns-slapd.valgrind | 29 + src/lib389/lib389/nss_ssl.py | 1403 ++ src/lib389/lib389/passwd.py | 80 + src/lib389/lib389/password_plugins.py | 65 + src/lib389/lib389/paths.py | 311 + src/lib389/lib389/perftools.py | 568 + src/lib389/lib389/plugins.py | 2438 +++ src/lib389/lib389/properties.py | 505 + src/lib389/lib389/pwpolicy.py | 365 + src/lib389/lib389/referral.py | 35 + src/lib389/lib389/replica.py | 2761 +++ src/lib389/lib389/repltools.py | 306 + src/lib389/lib389/rewriters.py | 92 + src/lib389/lib389/rootdse.py | 51 + src/lib389/lib389/sasl.py | 36 + src/lib389/lib389/saslmap.py | 59 + src/lib389/lib389/schema.py | 827 + src/lib389/lib389/suffix.py | 96 + src/lib389/lib389/tasks.py | 1621 ++ src/lib389/lib389/tests/__init__.py | 7 + src/lib389/lib389/tests/aci_parse_test.py | 65 + src/lib389/lib389/tests/aci_test.py | 122 + src/lib389/lib389/tests/agreement_test.py | 362 + src/lib389/lib389/tests/backend_test.py | 365 + src/lib389/lib389/tests/cli/__init__.py | 111 + .../lib389/tests/cli/adm_instance_test.py | 30 + .../lib389/tests/cli/conf_backend_test.py | 513 + .../lib389/tests/cli/conf_backup_test.py | 50 + .../lib389/tests/cli/conf_chaining_test.py | 199 + .../lib389/tests/cli/conf_conflicts_test.py | 162 + .../tests/cli/conf_directory_manager_test.py | 22 + .../lib389/tests/cli/conf_plugin_test.py | 53 + .../lib389/tests/cli/conf_plugins/__init__.py | 0 .../tests/cli/conf_plugins/automember_test.py | 126 + .../tests/cli/conf_plugins/memberof_test.py | 490 + .../tests/cli/conf_plugins/referint_test.py | 119 + .../tests/cli/conf_plugins/rootdn_ac_test.py | 281 + .../lib389/tests/cli/conf_plugins/usn_test.py | 53 + .../lib389/tests/cli/conf_pwpolicy_test.py | 160 + .../lib389/tests/cli/conf_schema_test.py | 1 + .../lib389/tests/cli/ctl_dbtasks_test.py | 74 + src/lib389/lib389/tests/cli/dsrc_test.py | 180 + src/lib389/lib389/tests/cli/idm_group_test.py | 89 + .../lib389/tests/cli/idm_user_modify_test.py | 95 + src/lib389/lib389/tests/cli/idm_user_test.py | 93 + src/lib389/lib389/tests/config.py | 47 + src/lib389/lib389/tests/config_test.py | 87 + .../lib389/tests/configurations/__init__.py | 0 .../configurations/config_001003006_test.py | 47 + .../configurations/config_001004000_test.py | 231 + src/lib389/lib389/tests/conftest.py | 121 + src/lib389/lib389/tests/dereference_test.py | 99 + src/lib389/lib389/tests/dirsrv_log_test.py | 123 + src/lib389/lib389/tests/dirsrv_test.py | 215 + src/lib389/lib389/tests/dseldif_test.py | 130 + src/lib389/lib389/tests/dsversion_test.py | 12 + .../lib389/tests/effective_rights_test.py | 93 + src/lib389/lib389/tests/entry_test.py | 98 + src/lib389/lib389/tests/healthcheck_test.py | 61 + src/lib389/lib389/tests/idm/__init__.py | 0 src/lib389/lib389/tests/idm/account_test.py | 82 + src/lib389/lib389/tests/idm/services_test.py | 63 + .../lib389/tests/idm/user_and_group_test.py | 101 + src/lib389/lib389/tests/index_test.py | 69 + src/lib389/lib389/tests/ldclt_test.py | 64 + .../lib389/tests/mapped_object_lint_test.py | 86 + src/lib389/lib389/tests/mapped_object_test.py | 23 + src/lib389/lib389/tests/mappingtree_test.py | 64 + src/lib389/lib389/tests/nss_ssl_test.py | 95 + src/lib389/lib389/tests/paths_test.py | 36 + src/lib389/lib389/tests/plugin_test.py | 137 + src/lib389/lib389/tests/plugins/__init__.py | 0 .../lib389/tests/plugins/memberof_test.py | 354 + .../lib389/tests/plugins/referint_test.py | 83 + src/lib389/lib389/tests/plugins/usn_test.py | 240 + src/lib389/lib389/tests/plugins/utils.py | 124 + src/lib389/lib389/tests/referral_test.py | 36 + src/lib389/lib389/tests/replica_test.py | 375 + src/lib389/lib389/tests/schema_test.py | 121 + src/lib389/lib389/tests/suffix_test.py | 105 + src/lib389/lib389/tests/test_module_proxy.py | 79 + src/lib389/lib389/tests/tls_external_test.py | 82 + src/lib389/lib389/tests/utils_test.py | 192 + src/lib389/lib389/tombstone.py | 215 + src/lib389/lib389/tools.py | 752 + src/lib389/lib389/topologies.py | 542 + src/lib389/lib389/utils.py | 1986 ++ src/lib389/requirements.txt | 9 + src/lib389/setup.cfg | 9 + src/lib389/setup.py.in | 110 + src/lib389/tox.ini | 6 + src/librnsslapd/Cargo.toml | 26 + src/librnsslapd/README.md | 4 + src/librnsslapd/build.rs | 16 + src/librnsslapd/src/lib.rs | 68 + src/librslapd/Cargo.toml | 23 + src/librslapd/README.md | 3 + src/librslapd/build.rs | 16 + src/librslapd/src/cache.rs | 202 + src/librslapd/src/lib.rs | 55 + src/librslapd/src/sds/lib.rs | 30 + src/librslapd/src/sds/tqueue.rs | 132 + src/libsds/external/csiphash/csiphash.c | 141 + src/libsds/include/sds.h | 1429 ++ src/pkgconfig/dirsrv.pc.in | 11 + src/pkgconfig/libsds.pc.in | 12 + src/pkgconfig/svrcore.pc.in | 11 + src/plugins/entryuuid/Cargo.toml | 22 + src/plugins/entryuuid/src/lib.rs | 267 + src/plugins/entryuuid_syntax/Cargo.toml | 22 + src/plugins/entryuuid_syntax/src/lib.rs | 145 + src/plugins/pwdchan/Cargo.toml | 24 + src/plugins/pwdchan/src/lib.rs | 264 + src/plugins/pwdchan/src/pbkdf2.rs | 44 + src/plugins/pwdchan/src/pbkdf2_sha1.rs | 44 + src/plugins/pwdchan/src/pbkdf2_sha256.rs | 43 + src/plugins/pwdchan/src/pbkdf2_sha512.rs | 43 + src/rewriters/adfilter.c | 431 + src/slapd/Cargo.toml | 11 + src/slapd/src/error.rs | 6 + src/slapd/src/fernet.rs | 30 + src/slapd/src/lib.rs | 2 + src/slapi_r_plugin/Cargo.toml | 23 + src/slapi_r_plugin/README.md | 216 + src/slapi_r_plugin/build.rs | 9 + src/slapi_r_plugin/src/backend.rs | 72 + src/slapi_r_plugin/src/ber.rs | 92 + src/slapi_r_plugin/src/charray.rs | 32 + src/slapi_r_plugin/src/constants.rs | 212 + src/slapi_r_plugin/src/dn.rs | 108 + src/slapi_r_plugin/src/entry.rs | 100 + src/slapi_r_plugin/src/error.rs | 66 + src/slapi_r_plugin/src/init.c | 8 + src/slapi_r_plugin/src/lib.rs | 41 + src/slapi_r_plugin/src/log.rs | 87 + src/slapi_r_plugin/src/macros.rs | 940 + src/slapi_r_plugin/src/modify.rs | 117 + src/slapi_r_plugin/src/pblock.rs | 323 + src/slapi_r_plugin/src/plugin.rs | 133 + src/slapi_r_plugin/src/search.rs | 127 + src/slapi_r_plugin/src/syntax_plugin.rs | 142 + src/slapi_r_plugin/src/task.rs | 148 + src/slapi_r_plugin/src/value.rs | 247 + src/svrcore/AUTHORS | 3 + src/svrcore/COPYING | 2 + src/svrcore/ChangeLog | 0 src/svrcore/INSTALL | 370 + src/svrcore/INSTALL.win | 77 + src/svrcore/LICENSE | 27 + src/svrcore/Makefile.am | 54 + src/svrcore/NEWS | 31 + src/svrcore/README | 58 + src/svrcore/TODO | 3 + src/svrcore/autogen.sh | 92 + src/svrcore/configure.ac | 69 + src/svrcore/examples/svrcore_driver.c | 140 + src/svrcore/m4/nspr.m4 | 110 + src/svrcore/m4/nss.m4 | 114 + src/svrcore/m4/systemd.m4 | 41 + src/svrcore/src/Makefile.am | 30 + src/svrcore/src/Makefile.win | 77 + src/svrcore/src/alt.c | 107 + src/svrcore/src/cache.c | 227 + src/svrcore/src/errors.c | 28 + src/svrcore/src/file.c | 236 + src/svrcore/src/key.ico | Bin 0 -> 766 bytes src/svrcore/src/logo.ico | Bin 0 -> 766 bytes src/svrcore/src/manifest.mn | 29 + src/svrcore/src/ntgetpin.c | 146 + src/svrcore/src/ntgetpin.rc | 92 + src/svrcore/src/ntresource.h | 26 + src/svrcore/src/pin.c | 79 + src/svrcore/src/pk11.c | 296 + src/svrcore/src/std-systemd.c | 234 + src/svrcore/src/std.c | 167 + src/svrcore/src/svrcore.h | 308 + src/svrcore/src/systemd-ask-pass.c | 469 + src/svrcore/src/user.c | 167 + test/libslapd/counters/atomic.c | 69 + test/libslapd/filter/optimise.c | 83 + test/libslapd/haproxy/parse.c | 340 + test/libslapd/operation/v3_compat.c | 60 + test/libslapd/pblock/analytics.c | 35 + test/libslapd/pblock/pblock_accessors.txt | 315 + .../libslapd/pblock/pblock_accessors_freq.txt | 630 + test/libslapd/pblock/v3_compat.c | 220 + test/libslapd/schema/filter_validate.c | 124 + test/libslapd/spal/meminfo.c | 68 + test/libslapd/test.c | 42 + test/main.c | 20 + test/pblock_analyse.py | 148 + test/plugins/pwdstorage/pbkdf2.c | 80 + test/plugins/test.c | 31 + test/test_slapd.h | 74 + wrappers/ds_selinux_restorecon.sh.in | 34 + wrappers/ds_systemd_ask_password_acl.in | 34 + wrappers/initscript.in | 360 + wrappers/ldap-agent-initscript.in | 227 + wrappers/systemd-snmp.service.in | 28 + wrappers/systemd.group.in | 7 + .../systemd.template.service.custom.conf.in | 111 + wrappers/systemd.template.service.in | 36 + .../systemd.template.service.xsan.conf.in | 11 + 2141 files changed, 880466 insertions(+) create mode 100644 .cargo/config.in create mode 100644 .clang-format create mode 100644 .copr/Makefile create mode 100644 .cvsignore create mode 100644 .dockerignore create mode 100644 .github/ISSUE_TEMPLATE/bug_report.md create mode 100644 .github/ISSUE_TEMPLATE/feature_request.md create mode 100644 .github/daemon.json create mode 100644 .github/scripts/generate_matrix.py create mode 100644 .github/workflows/compile.yml create mode 100644 .github/workflows/lmdbpytest.yml create mode 100644 .github/workflows/npm.yml create mode 100644 .github/workflows/pytest.yml create mode 100644 .github/workflows/release.yml create mode 100644 .github/workflows/validate.yml create mode 100644 .gitignore create mode 100644 LICENSE create mode 100644 LICENSE.GPLv3+ create mode 100644 LICENSE.mit create mode 100644 LICENSE.openldap create mode 100644 LICENSE.openssl create mode 100644 Makefile.am create mode 100644 README.md create mode 100644 VERSION.sh create mode 100755 autogen.sh create mode 100755 buildnum.py create mode 100644 configure.ac create mode 100644 dirsrvtests/README create mode 100644 dirsrvtests/__init__.py create mode 100644 dirsrvtests/check_for_duplicate_ids.py create mode 100644 dirsrvtests/conftest.py create mode 100755 dirsrvtests/create_test.py create mode 100644 dirsrvtests/pytest.ini create mode 100644 dirsrvtests/report.py create mode 100644 dirsrvtests/requirements.txt create mode 100644 dirsrvtests/testimony.yaml create mode 100644 dirsrvtests/tests/__init__.py create mode 100644 dirsrvtests/tests/data/README create mode 100644 dirsrvtests/tests/data/__init__.py create mode 100644 dirsrvtests/tests/data/basic/__init__.py create mode 100644 dirsrvtests/tests/data/basic/dse.ldif.broken create mode 100644 dirsrvtests/tests/data/entryuuid/localhost-userRoot-2020_03_30_13_14_47.ldif create mode 100644 dirsrvtests/tests/data/entryuuid/localhost-userRoot-invalid.ldif create mode 100644 dirsrvtests/tests/data/longduration/db_protect_long_test_reference_1.4.2.12.json create mode 100644 dirsrvtests/tests/data/openldap_2_389/1/example_com.slapcat.ldif create mode 100644 dirsrvtests/tests/data/openldap_2_389/1/example_net.slapcat.ldif create mode 100644 dirsrvtests/tests/data/openldap_2_389/1/setup/example_com.ldif create mode 100644 dirsrvtests/tests/data/openldap_2_389/1/setup/example_net.ldif create mode 100644 dirsrvtests/tests/data/openldap_2_389/1/setup/slapd.ldif create mode 100644 dirsrvtests/tests/data/openldap_2_389/1/slapd.d/cn=config.ldif create mode 100644 dirsrvtests/tests/data/openldap_2_389/1/slapd.d/cn=config/cn=module{0}.ldif create mode 100644 dirsrvtests/tests/data/openldap_2_389/1/slapd.d/cn=config/cn=schema.ldif create mode 100644 dirsrvtests/tests/data/openldap_2_389/1/slapd.d/cn=config/cn=schema/cn={0}core.ldif create mode 100644 dirsrvtests/tests/data/openldap_2_389/1/slapd.d/cn=config/cn=schema/cn={1}cosine.ldif create mode 100644 dirsrvtests/tests/data/openldap_2_389/1/slapd.d/cn=config/cn=schema/cn={2}inetorgperson.ldif create mode 100644 dirsrvtests/tests/data/openldap_2_389/1/slapd.d/cn=config/cn=schema/cn={3}rfc2307bis.ldif create mode 100644 dirsrvtests/tests/data/openldap_2_389/1/slapd.d/cn=config/cn=schema/cn={4}yast.ldif create mode 100644 dirsrvtests/tests/data/openldap_2_389/1/slapd.d/cn=config/cn=schema/cn={5}test.ldif create mode 100644 dirsrvtests/tests/data/openldap_2_389/1/slapd.d/cn=config/olcDatabase={-1}frontend.ldif create mode 100644 dirsrvtests/tests/data/openldap_2_389/1/slapd.d/cn=config/olcDatabase={0}config.ldif create mode 100644 dirsrvtests/tests/data/openldap_2_389/1/slapd.d/cn=config/olcDatabase={1}mdb.ldif create mode 100644 dirsrvtests/tests/data/openldap_2_389/1/slapd.d/cn=config/olcDatabase={1}mdb/olcOverlay={0}memberof.ldif create mode 100644 dirsrvtests/tests/data/openldap_2_389/1/slapd.d/cn=config/olcDatabase={1}mdb/olcOverlay={1}refint.ldif create mode 100644 dirsrvtests/tests/data/openldap_2_389/1/slapd.d/cn=config/olcDatabase={1}mdb/olcOverlay={2}unique.ldif create mode 100644 dirsrvtests/tests/data/openldap_2_389/1/slapd.d/cn=config/olcDatabase={2}mdb.ldif create mode 100644 dirsrvtests/tests/data/openldap_2_389/1/slapd.d/cn=config/olcDatabase={2}mdb/olcOverlay={0}memberof.ldif create mode 100644 dirsrvtests/tests/data/openldap_2_389/1/slapd.d/cn=config/olcDatabase={2}mdb/olcOverlay={1}unique.ldif create mode 100644 dirsrvtests/tests/data/openldap_2_389/4539/slapd.d/cn=config.ldif create mode 100644 dirsrvtests/tests/data/openldap_2_389/4539/slapd.d/cn=config/cn=module{0}.ldif create mode 100644 dirsrvtests/tests/data/openldap_2_389/4539/slapd.d/cn=config/cn=schema.ldif create mode 100644 dirsrvtests/tests/data/openldap_2_389/4539/slapd.d/cn=config/cn=schema/cn={0}core.ldif create mode 100644 dirsrvtests/tests/data/openldap_2_389/4539/slapd.d/cn=config/cn=schema/cn={1}cosine.ldif create mode 100644 dirsrvtests/tests/data/openldap_2_389/4539/slapd.d/cn=config/cn=schema/cn={2}inetorgperson.ldif create mode 100644 dirsrvtests/tests/data/openldap_2_389/4539/slapd.d/cn=config/cn=schema/cn={3}rfc2307bis.ldif create mode 100644 dirsrvtests/tests/data/openldap_2_389/4539/slapd.d/cn=config/cn=schema/cn={4}yast.ldif create mode 100644 dirsrvtests/tests/data/openldap_2_389/4539/slapd.d/cn=config/olcDatabase={-1}frontend.ldif create mode 100644 dirsrvtests/tests/data/openldap_2_389/4539/slapd.d/cn=config/olcDatabase={0}config.ldif create mode 100644 dirsrvtests/tests/data/openldap_2_389/4539/slapd.d/cn=config/olcDatabase={1}hdb.ldif create mode 100755 dirsrvtests/tests/data/openldap_2_389/5323/slapd.d/cn=config.ldif create mode 100755 dirsrvtests/tests/data/openldap_2_389/5323/slapd.d/cn=config/cn=module{0}.ldif create mode 100755 dirsrvtests/tests/data/openldap_2_389/5323/slapd.d/cn=config/cn=schema.ldif create mode 100755 dirsrvtests/tests/data/openldap_2_389/5323/slapd.d/cn=config/cn=schema/cn={0}core.ldif create mode 100755 dirsrvtests/tests/data/openldap_2_389/5323/slapd.d/cn=config/cn=schema/cn={1}cosine.ldif create mode 100755 dirsrvtests/tests/data/openldap_2_389/5323/slapd.d/cn=config/cn=schema/cn={2}nis.ldif create mode 100755 dirsrvtests/tests/data/openldap_2_389/5323/slapd.d/cn=config/cn=schema/cn={3}inetorgperson.ldif create mode 100755 dirsrvtests/tests/data/openldap_2_389/5323/slapd.d/cn=config/olcDatabase={-1}frontend.ldif create mode 100755 dirsrvtests/tests/data/openldap_2_389/5323/slapd.d/cn=config/olcDatabase={0}config.ldif create mode 100755 dirsrvtests/tests/data/openldap_2_389/5323/slapd.d/cn=config/olcDatabase={1}mdb.ldif create mode 100755 dirsrvtests/tests/data/openldap_2_389/5323/slapd.d/cn=config/olcDatabase={2}monitor.ldif create mode 100644 dirsrvtests/tests/data/openldap_2_389/memberof/openldap_to_389ds-db.ldif create mode 100644 dirsrvtests/tests/data/openldap_2_389/memberof/openldap_to_389ds-slapd.conf create mode 100755 dirsrvtests/tests/data/openldap_2_389/memberof/slapd.d/cn=config.ldif create mode 100755 dirsrvtests/tests/data/openldap_2_389/memberof/slapd.d/cn=config/cn=module{0}.ldif create mode 100755 dirsrvtests/tests/data/openldap_2_389/memberof/slapd.d/cn=config/cn=schema.ldif create mode 100755 dirsrvtests/tests/data/openldap_2_389/memberof/slapd.d/cn=config/cn=schema/cn={0}core.ldif create mode 100755 dirsrvtests/tests/data/openldap_2_389/memberof/slapd.d/cn=config/cn=schema/cn={1}cosine.ldif create mode 100755 dirsrvtests/tests/data/openldap_2_389/memberof/slapd.d/cn=config/cn=schema/cn={2}inetorgperson.ldif create mode 100755 dirsrvtests/tests/data/openldap_2_389/memberof/slapd.d/cn=config/cn=schema/cn={3}rfc2307bis.ldif create mode 100755 dirsrvtests/tests/data/openldap_2_389/memberof/slapd.d/cn=config/cn=schema/cn={4}yast.ldif create mode 100755 dirsrvtests/tests/data/openldap_2_389/memberof/slapd.d/cn=config/olcDatabase={-1}frontend.ldif create mode 100755 dirsrvtests/tests/data/openldap_2_389/memberof/slapd.d/cn=config/olcDatabase={0}config.ldif create mode 100755 dirsrvtests/tests/data/openldap_2_389/memberof/slapd.d/cn=config/olcDatabase={1}mdb.ldif create mode 100755 dirsrvtests/tests/data/openldap_2_389/memberof/slapd.d/cn=config/olcDatabase={1}mdb/olcOverlay={0}memberof.ldif create mode 100755 dirsrvtests/tests/data/openldap_2_389/memberof/slapd.d/cn=config/olcDatabase={1}mdb/olcOverlay={1}unique.ldif create mode 100755 dirsrvtests/tests/data/openldap_2_389/memberof/slapd.d/cn=config/olcDatabase={1}mdb/olcOverlay={2}refint.ldif create mode 100644 dirsrvtests/tests/data/openldap_2_389/saslauthd/slapd.d/cn=config.ldif create mode 100644 dirsrvtests/tests/data/openldap_2_389/saslauthd/slapd.d/cn=config/cn=module{0}.ldif create mode 100644 dirsrvtests/tests/data/openldap_2_389/saslauthd/slapd.d/cn=config/cn=schema.ldif create mode 100644 dirsrvtests/tests/data/openldap_2_389/saslauthd/slapd.d/cn=config/cn=schema/cn={0}core.ldif create mode 100644 dirsrvtests/tests/data/openldap_2_389/saslauthd/slapd.d/cn=config/cn=schema/cn={1}cosine.ldif create mode 100644 dirsrvtests/tests/data/openldap_2_389/saslauthd/slapd.d/cn=config/cn=schema/cn={2}inetorgperson.ldif create mode 100644 dirsrvtests/tests/data/openldap_2_389/saslauthd/slapd.d/cn=config/cn=schema/cn={3}rfc2307bis.ldif create mode 100644 dirsrvtests/tests/data/openldap_2_389/saslauthd/slapd.d/cn=config/cn=schema/cn={4}yast.ldif create mode 100644 dirsrvtests/tests/data/openldap_2_389/saslauthd/slapd.d/cn=config/olcDatabase={-1}frontend.ldif create mode 100644 dirsrvtests/tests/data/openldap_2_389/saslauthd/slapd.d/cn=config/olcDatabase={0}config.ldif create mode 100644 dirsrvtests/tests/data/openldap_2_389/saslauthd/slapd.d/cn=config/olcDatabase={1}mdb.ldif create mode 100644 dirsrvtests/tests/data/openldap_2_389/saslauthd/slapd.d/cn=config/olcDatabase={1}mdb/olcOverlay={0}memberof.ldif create mode 100644 dirsrvtests/tests/data/openldap_2_389/saslauthd/slapd.d/cn=config/olcDatabase={1}mdb/olcOverlay={1}unique.ldif create mode 100644 dirsrvtests/tests/data/openldap_2_389/saslauthd/slapd.d/cn=config/olcDatabase={1}mdb/olcOverlay={2}refint.ldif create mode 100644 dirsrvtests/tests/data/openldap_2_389/saslauthd/suffix.ldif create mode 100644 dirsrvtests/tests/data/ticket47953/__init__.py create mode 100644 dirsrvtests/tests/data/ticket47953/ticket47953.ldif create mode 100644 dirsrvtests/tests/data/ticket47988/__init__.py create mode 100644 dirsrvtests/tests/data/ticket47988/schema_ipa3.3.tar.gz create mode 100644 dirsrvtests/tests/data/ticket47988/schema_ipa4.1.tar.gz create mode 100644 dirsrvtests/tests/data/ticket48212/__init__.py create mode 100644 dirsrvtests/tests/data/ticket48212/example1k_posix.ldif create mode 100644 dirsrvtests/tests/data/ticket49121/utf8str.txt create mode 100644 dirsrvtests/tests/data/ticket49441/binary.ldif create mode 100644 dirsrvtests/tests/data/tls/ca.crt create mode 100644 dirsrvtests/tests/data/tls/cert9.db create mode 100644 dirsrvtests/tests/data/tls/int.crt create mode 100644 dirsrvtests/tests/data/tls/key4.db create mode 100644 dirsrvtests/tests/data/tls/leaf.crt create mode 100644 dirsrvtests/tests/data/tls/pkcs11.txt create mode 100644 dirsrvtests/tests/data/tls/pwdfile.txt create mode 100644 dirsrvtests/tests/data/tls/server-export.p12 create mode 100644 dirsrvtests/tests/data/tls/tls_import_ca_chain.pem create mode 100644 dirsrvtests/tests/data/tls/tls_import_crt_chain.pem create mode 100644 dirsrvtests/tests/data/tls/tls_import_key.pem create mode 100644 dirsrvtests/tests/data/tls/tls_import_key_chain.pem create mode 100644 dirsrvtests/tests/longduration/automembers_long_test.py create mode 100644 dirsrvtests/tests/longduration/db_protect_long_test.py create mode 100755 dirsrvtests/tests/perf/create_data.py create mode 100755 dirsrvtests/tests/perf/ltest.py create mode 100755 dirsrvtests/tests/perf/memberof_test.py create mode 100644 dirsrvtests/tests/perf/search_performance_test.py create mode 100644 dirsrvtests/tests/stress/README create mode 100644 dirsrvtests/tests/stress/__init__.py create mode 100644 dirsrvtests/tests/stress/cos/cos_scale_template_test.py create mode 100644 dirsrvtests/tests/stress/reliabilty/__init__.py create mode 100644 dirsrvtests/tests/stress/reliabilty/reliab_7_5_test.py create mode 100644 dirsrvtests/tests/stress/reliabilty/reliab_conn_test.py create mode 100644 dirsrvtests/tests/stress/replication/mmr_01_4m-2h-4c_test.py create mode 100644 dirsrvtests/tests/stress/replication/mmr_01_4m_test.py create mode 100644 dirsrvtests/tests/stress/search/__init__.py create mode 100644 dirsrvtests/tests/stress/search/simple.py create mode 100644 dirsrvtests/tests/suites/__init__.py create mode 100644 dirsrvtests/tests/suites/acl/__init__.py create mode 100644 dirsrvtests/tests/suites/acl/aci_excl_filter_test.py create mode 100644 dirsrvtests/tests/suites/acl/acivattr_test.py create mode 100644 dirsrvtests/tests/suites/acl/acl_deny_test.py create mode 100644 dirsrvtests/tests/suites/acl/acl_test.py create mode 100644 dirsrvtests/tests/suites/acl/conftest.py create mode 100644 dirsrvtests/tests/suites/acl/default_aci_allows_self_write_test.py create mode 100644 dirsrvtests/tests/suites/acl/deladd_test.py create mode 100644 dirsrvtests/tests/suites/acl/enhanced_aci_modrnd_test.py create mode 100644 dirsrvtests/tests/suites/acl/globalgroup_part2_test.py create mode 100644 dirsrvtests/tests/suites/acl/globalgroup_test.py create mode 100644 dirsrvtests/tests/suites/acl/keywords_part2_test.py create mode 100644 dirsrvtests/tests/suites/acl/keywords_test.py create mode 100644 dirsrvtests/tests/suites/acl/misc_test.py create mode 100644 dirsrvtests/tests/suites/acl/modify_test.py create mode 100644 dirsrvtests/tests/suites/acl/modrdn_test.py create mode 100644 dirsrvtests/tests/suites/acl/repeated_ldap_add_test.py create mode 100644 dirsrvtests/tests/suites/acl/roledn_test.py create mode 100644 dirsrvtests/tests/suites/acl/search_real_part2_test.py create mode 100644 dirsrvtests/tests/suites/acl/search_real_part3_test.py create mode 100644 dirsrvtests/tests/suites/acl/search_real_test.py create mode 100644 dirsrvtests/tests/suites/acl/selfdn_permissions_test.py create mode 100644 dirsrvtests/tests/suites/acl/syntax_test.py create mode 100644 dirsrvtests/tests/suites/acl/userattr_test.py create mode 100644 dirsrvtests/tests/suites/acl/valueacl_part2_test.py create mode 100644 dirsrvtests/tests/suites/acl/valueacl_test.py create mode 100644 dirsrvtests/tests/suites/attr_encryption/__init__.py create mode 100644 dirsrvtests/tests/suites/attr_encryption/attr_encryption_test.py create mode 100644 dirsrvtests/tests/suites/auth_token/__init__.py create mode 100644 dirsrvtests/tests/suites/auth_token/basic_auth_test.py create mode 100644 dirsrvtests/tests/suites/automember_plugin/__init__.py create mode 100644 dirsrvtests/tests/suites/automember_plugin/automember_abort_test.py create mode 100644 dirsrvtests/tests/suites/automember_plugin/automember_mod_test.py create mode 100644 dirsrvtests/tests/suites/automember_plugin/automember_test.py create mode 100644 dirsrvtests/tests/suites/automember_plugin/basic_test.py create mode 100644 dirsrvtests/tests/suites/automember_plugin/configuration_test.py create mode 100644 dirsrvtests/tests/suites/backups/__init__.py create mode 100644 dirsrvtests/tests/suites/backups/backup_test.py create mode 100644 dirsrvtests/tests/suites/basic/__init__.py create mode 100644 dirsrvtests/tests/suites/basic/basic_test.py create mode 100644 dirsrvtests/tests/suites/basic/ds_entrydn_test.py create mode 100644 dirsrvtests/tests/suites/basic/haproxy_test.py create mode 100644 dirsrvtests/tests/suites/basic/vlv.py create mode 100644 dirsrvtests/tests/suites/betxns/__init__.py create mode 100644 dirsrvtests/tests/suites/betxns/betxn_test.py create mode 100644 dirsrvtests/tests/suites/chaining_plugin/__init__.py create mode 100644 dirsrvtests/tests/suites/chaining_plugin/anonymous_access_denied_basic.py create mode 100644 dirsrvtests/tests/suites/chaining_plugin/paged_search_test.py create mode 100644 dirsrvtests/tests/suites/clu/__init__.py create mode 100644 dirsrvtests/tests/suites/clu/ca_cert_bundle_test.py create mode 100644 dirsrvtests/tests/suites/clu/clu_test.py create mode 100644 dirsrvtests/tests/suites/clu/dbgen_test.py create mode 100644 dirsrvtests/tests/suites/clu/dbgen_test_usan.py create mode 100644 dirsrvtests/tests/suites/clu/dbmon_test.py create mode 100644 dirsrvtests/tests/suites/clu/dbverify_test.py create mode 100644 dirsrvtests/tests/suites/clu/dsconf_pta_add_url_test.py create mode 100644 dirsrvtests/tests/suites/clu/dsconf_tasks_test.py create mode 100644 dirsrvtests/tests/suites/clu/dsconf_test.py create mode 100644 dirsrvtests/tests/suites/clu/dsctl_acceptance_test.py create mode 100644 dirsrvtests/tests/suites/clu/dsctl_dblib_test.py create mode 100644 dirsrvtests/tests/suites/clu/dsctl_tls_test.py create mode 100644 dirsrvtests/tests/suites/clu/dsidm_account_test.py create mode 100644 dirsrvtests/tests/suites/clu/dsidm_bulk_update_test.py create mode 100644 dirsrvtests/tests/suites/clu/dsidm_config_test.py create mode 100644 dirsrvtests/tests/suites/clu/dsidm_init_test.py create mode 100644 dirsrvtests/tests/suites/clu/dsidm_organizational_unit_test.py create mode 100644 dirsrvtests/tests/suites/clu/dsidm_services_test.py create mode 100644 dirsrvtests/tests/suites/clu/dsidm_user_test.py create mode 100644 dirsrvtests/tests/suites/clu/dsrc_test.py create mode 100644 dirsrvtests/tests/suites/clu/fixup_test.py create mode 100644 dirsrvtests/tests/suites/clu/repl_monitor_test.py create mode 100644 dirsrvtests/tests/suites/clu/schema_test.py create mode 100644 dirsrvtests/tests/suites/config/__init__.py create mode 100644 dirsrvtests/tests/suites/config/autotuning_test.py create mode 100644 dirsrvtests/tests/suites/config/compact_test.py create mode 100644 dirsrvtests/tests/suites/config/config_delete_attr_test.py create mode 100644 dirsrvtests/tests/suites/config/config_test.py create mode 100644 dirsrvtests/tests/suites/config/regression_test.py create mode 100644 dirsrvtests/tests/suites/config/removed_config_49298_test.py create mode 100644 dirsrvtests/tests/suites/cos/__init__.py create mode 100644 dirsrvtests/tests/suites/cos/cos_test.py create mode 100644 dirsrvtests/tests/suites/cos/indirect_cos_test.py create mode 100644 dirsrvtests/tests/suites/disk_monitoring/__init__.py create mode 100644 dirsrvtests/tests/suites/disk_monitoring/disk_monitoring_divide_test.py create mode 100644 dirsrvtests/tests/suites/disk_monitoring/disk_monitoring_test.py create mode 100644 dirsrvtests/tests/suites/disk_monitoring/disk_space_test.py create mode 100644 dirsrvtests/tests/suites/ds_logs/__init__.py create mode 100644 dirsrvtests/tests/suites/ds_logs/audit_log_test.py create mode 100644 dirsrvtests/tests/suites/ds_logs/ds_logs_test.py create mode 100644 dirsrvtests/tests/suites/ds_logs/regression_test.py create mode 100644 dirsrvtests/tests/suites/ds_tools/__init__.py create mode 100644 dirsrvtests/tests/suites/ds_tools/logpipe_test.py create mode 100644 dirsrvtests/tests/suites/ds_tools/replcheck_test.py create mode 100644 dirsrvtests/tests/suites/dynamic_plugins/__init__.py create mode 100644 dirsrvtests/tests/suites/dynamic_plugins/dynamic_plugins_test.py create mode 100644 dirsrvtests/tests/suites/dynamic_plugins/notice_for_restart_test.py create mode 100644 dirsrvtests/tests/suites/dynamic_plugins/stress_tests.py create mode 100644 dirsrvtests/tests/suites/entryuuid/__init__.py create mode 100644 dirsrvtests/tests/suites/entryuuid/basic_test.py create mode 100644 dirsrvtests/tests/suites/entryuuid/replicated_test.py create mode 100644 dirsrvtests/tests/suites/export/__init__.py create mode 100644 dirsrvtests/tests/suites/export/export_test.py create mode 100644 dirsrvtests/tests/suites/filter/__init__.py create mode 100644 dirsrvtests/tests/suites/filter/basic_filter_test.py create mode 100644 dirsrvtests/tests/suites/filter/bitw_filter_test.py create mode 100644 dirsrvtests/tests/suites/filter/complex_filters_test.py create mode 100644 dirsrvtests/tests/suites/filter/filter_cert_test.py create mode 100644 dirsrvtests/tests/suites/filter/filter_index_match_test.py create mode 100644 dirsrvtests/tests/suites/filter/filter_indexing_test.py create mode 100644 dirsrvtests/tests/suites/filter/filter_logic_test.py create mode 100644 dirsrvtests/tests/suites/filter/filter_match_test.py create mode 100644 dirsrvtests/tests/suites/filter/filter_onelevel_aci_test.py create mode 100644 dirsrvtests/tests/suites/filter/filter_test.py create mode 100644 dirsrvtests/tests/suites/filter/filter_test_aci_with_optimiser.py create mode 100644 dirsrvtests/tests/suites/filter/filter_with_non_root_user_test.py create mode 100644 dirsrvtests/tests/suites/filter/filterscanlimit_test.py create mode 100644 dirsrvtests/tests/suites/filter/large_filter_test.py create mode 100644 dirsrvtests/tests/suites/filter/rfc3673_all_oper_attrs_test.py create mode 100644 dirsrvtests/tests/suites/filter/schema_validation_test.py create mode 100644 dirsrvtests/tests/suites/filter/vfilter_attribute_test.py create mode 100644 dirsrvtests/tests/suites/filter/vfilter_simple_test.py create mode 100644 dirsrvtests/tests/suites/fourwaymmr/__init__.py create mode 100644 dirsrvtests/tests/suites/fourwaymmr/fourwaymmr_test.py create mode 100644 dirsrvtests/tests/suites/fractional/__init__.py create mode 100644 dirsrvtests/tests/suites/fractional/fractional_test.py create mode 100644 dirsrvtests/tests/suites/get_effective_rights/__init__.py create mode 100644 dirsrvtests/tests/suites/get_effective_rights/acceptance_test.py create mode 100644 dirsrvtests/tests/suites/gssapi/__init__.py create mode 100644 dirsrvtests/tests/suites/gssapi/simple_gssapi_test.py create mode 100644 dirsrvtests/tests/suites/gssapi_repl/__init__.py create mode 100644 dirsrvtests/tests/suites/gssapi_repl/gssapi_repl_test.py create mode 100644 dirsrvtests/tests/suites/healthcheck/__init__.py create mode 100644 dirsrvtests/tests/suites/healthcheck/health_config_test.py create mode 100644 dirsrvtests/tests/suites/healthcheck/health_repl_test.py create mode 100644 dirsrvtests/tests/suites/healthcheck/health_security_test.py create mode 100644 dirsrvtests/tests/suites/healthcheck/health_sync_test.py create mode 100644 dirsrvtests/tests/suites/healthcheck/healthcheck_test.py create mode 100644 dirsrvtests/tests/suites/import/__init__.py create mode 100644 dirsrvtests/tests/suites/import/import_test.py create mode 100644 dirsrvtests/tests/suites/import/import_warning_test.py create mode 100644 dirsrvtests/tests/suites/import/regression_test.py create mode 100644 dirsrvtests/tests/suites/indexes/__init__.py create mode 100644 dirsrvtests/tests/suites/indexes/entryrdn_test.py create mode 100644 dirsrvtests/tests/suites/indexes/huge_index_key.py create mode 100644 dirsrvtests/tests/suites/indexes/regression_test.py create mode 100644 dirsrvtests/tests/suites/ldapi/__init__.py create mode 100644 dirsrvtests/tests/suites/ldapi/ldapi_test.py create mode 100644 dirsrvtests/tests/suites/lib389/__init__.py create mode 100644 dirsrvtests/tests/suites/lib389/config_compare_test.py create mode 100644 dirsrvtests/tests/suites/lib389/dsldapobject/__init__.py create mode 100644 dirsrvtests/tests/suites/lib389/dsldapobject/dn_construct_test.py create mode 100644 dirsrvtests/tests/suites/lib389/idm/__init__.py create mode 100644 dirsrvtests/tests/suites/lib389/idm/account_test.py create mode 100644 dirsrvtests/tests/suites/lib389/idm/user_compare_i2_test.py create mode 100644 dirsrvtests/tests/suites/lib389/idm/user_compare_m2Repl_test.py create mode 100644 dirsrvtests/tests/suites/lib389/idm/user_compare_st_test.py create mode 100644 dirsrvtests/tests/suites/lib389/timeout_test.py create mode 100644 dirsrvtests/tests/suites/logging/__init__.py create mode 100644 dirsrvtests/tests/suites/logging/logging_compression_test.py create mode 100644 dirsrvtests/tests/suites/logging/logging_config_test.py create mode 100644 dirsrvtests/tests/suites/logging/security_basic_test.py create mode 100644 dirsrvtests/tests/suites/mapping_tree/__init__.py create mode 100644 dirsrvtests/tests/suites/mapping_tree/acceptance_test.py create mode 100644 dirsrvtests/tests/suites/mapping_tree/be_del_and_default_naming_attr_test.py create mode 100644 dirsrvtests/tests/suites/mapping_tree/mt_cursed_test.py create mode 100644 dirsrvtests/tests/suites/mapping_tree/referral_during_tot_init_test.py create mode 100644 dirsrvtests/tests/suites/mapping_tree/regression_test.py create mode 100644 dirsrvtests/tests/suites/memberof_plugin/__init__.py create mode 100644 dirsrvtests/tests/suites/memberof_plugin/fixup_test.py create mode 100644 dirsrvtests/tests/suites/memberof_plugin/memberof_include_scopes_test.py create mode 100644 dirsrvtests/tests/suites/memberof_plugin/regression_test.py create mode 100644 dirsrvtests/tests/suites/memory_leaks/MMR_double_free_test.py create mode 100644 dirsrvtests/tests/suites/memory_leaks/__init__.py create mode 100644 dirsrvtests/tests/suites/memory_leaks/allids_search_test.py create mode 100644 dirsrvtests/tests/suites/memory_leaks/range_search_test.py create mode 100644 dirsrvtests/tests/suites/migration/__init__.py create mode 100644 dirsrvtests/tests/suites/migration/export_data_test.py create mode 100644 dirsrvtests/tests/suites/migration/import_data_test.py create mode 100644 dirsrvtests/tests/suites/monitor/__init__.py create mode 100644 dirsrvtests/tests/suites/monitor/db_locks_monitor_test.py create mode 100644 dirsrvtests/tests/suites/monitor/monitor_test.py create mode 100644 dirsrvtests/tests/suites/openldap_2_389/__init__.py create mode 100644 dirsrvtests/tests/suites/openldap_2_389/migrate_hdb_test.py create mode 100644 dirsrvtests/tests/suites/openldap_2_389/migrate_memberof_test.py create mode 100644 dirsrvtests/tests/suites/openldap_2_389/migrate_monitor_test.py create mode 100644 dirsrvtests/tests/suites/openldap_2_389/migrate_test.py create mode 100644 dirsrvtests/tests/suites/openldap_2_389/password_migrate_test.py create mode 100644 dirsrvtests/tests/suites/paged_results/__init__.py create mode 100644 dirsrvtests/tests/suites/paged_results/paged_results_test.py create mode 100644 dirsrvtests/tests/suites/password/__init__.py create mode 100644 dirsrvtests/tests/suites/password/password_TPR_policy_test.py create mode 100644 dirsrvtests/tests/suites/password/password_policy_test.py create mode 100644 dirsrvtests/tests/suites/password/password_test.py create mode 100644 dirsrvtests/tests/suites/password/pbkdf2_upgrade_plugin_test.py create mode 100644 dirsrvtests/tests/suites/password/pw_expired_access_test.py create mode 100644 dirsrvtests/tests/suites/password/pwdAdmin_test.py create mode 100644 dirsrvtests/tests/suites/password/pwdModify_test.py create mode 100644 dirsrvtests/tests/suites/password/pwdPolicy_attribute_test.py create mode 100644 dirsrvtests/tests/suites/password/pwdPolicy_controls_sequence_test.py create mode 100644 dirsrvtests/tests/suites/password/pwdPolicy_controls_test.py create mode 100644 dirsrvtests/tests/suites/password/pwdPolicy_inherit_global_test.py create mode 100644 dirsrvtests/tests/suites/password/pwdPolicy_logging_test.py create mode 100644 dirsrvtests/tests/suites/password/pwdPolicy_syntax_test.py create mode 100644 dirsrvtests/tests/suites/password/pwdPolicy_temporary_password.py create mode 100644 dirsrvtests/tests/suites/password/pwdPolicy_token_test.py create mode 100644 dirsrvtests/tests/suites/password/pwdPolicy_warning_test.py create mode 100644 dirsrvtests/tests/suites/password/pwd_algo_test.py create mode 100644 dirsrvtests/tests/suites/password/pwd_crypt_asterisk_test.py create mode 100644 dirsrvtests/tests/suites/password/pwd_lockout_bypass_test.py create mode 100644 dirsrvtests/tests/suites/password/pwd_log_test.py create mode 100644 dirsrvtests/tests/suites/password/pwd_upgrade_on_bind_test.py create mode 100644 dirsrvtests/tests/suites/password/pwp_gracel_test.py create mode 100644 dirsrvtests/tests/suites/password/pwp_history_test.py create mode 100644 dirsrvtests/tests/suites/password/pwp_test.py create mode 100644 dirsrvtests/tests/suites/password/regression_of_bugs_test.py create mode 100644 dirsrvtests/tests/suites/password/regression_test.py create mode 100644 dirsrvtests/tests/suites/plugins/__init__.py create mode 100644 dirsrvtests/tests/suites/plugins/acceptance_test.py create mode 100644 dirsrvtests/tests/suites/plugins/accpol_check_all_state_attrs_test.py create mode 100644 dirsrvtests/tests/suites/plugins/accpol_test.py create mode 100644 dirsrvtests/tests/suites/plugins/alias_entries_test.py create mode 100644 dirsrvtests/tests/suites/plugins/attr_nsslapd-pluginarg_test.py create mode 100644 dirsrvtests/tests/suites/plugins/attruniq_test.py create mode 100644 dirsrvtests/tests/suites/plugins/cos_test.py create mode 100644 dirsrvtests/tests/suites/plugins/deref_aci_test.py create mode 100644 dirsrvtests/tests/suites/plugins/dna_interval_test.py create mode 100644 dirsrvtests/tests/suites/plugins/dna_test.py create mode 100644 dirsrvtests/tests/suites/plugins/entryusn_test.py create mode 100644 dirsrvtests/tests/suites/plugins/managed_entry_test.py create mode 100644 dirsrvtests/tests/suites/plugins/memberof_test.py create mode 100644 dirsrvtests/tests/suites/plugins/pluginpath_validation_test.py create mode 100644 dirsrvtests/tests/suites/plugins/referint_test.py create mode 100644 dirsrvtests/tests/suites/plugins/rootdn_plugin_test.py create mode 100644 dirsrvtests/tests/suites/psearch/__init__.py create mode 100644 dirsrvtests/tests/suites/psearch/psearch_test.py create mode 100644 dirsrvtests/tests/suites/pwp_storage/__init__.py create mode 100644 dirsrvtests/tests/suites/pwp_storage/storage_test.py create mode 100644 dirsrvtests/tests/suites/referint_plugin/__init__.py create mode 100644 dirsrvtests/tests/suites/referint_plugin/rename_test.py create mode 100644 dirsrvtests/tests/suites/replication/__init__.py create mode 100644 dirsrvtests/tests/suites/replication/acceptance_test.py create mode 100644 dirsrvtests/tests/suites/replication/cascading_test.py create mode 100644 dirsrvtests/tests/suites/replication/changelog_encryption_test.py create mode 100644 dirsrvtests/tests/suites/replication/changelog_test.py create mode 100644 dirsrvtests/tests/suites/replication/changelog_trimming_test.py create mode 100644 dirsrvtests/tests/suites/replication/cleanallruv_abort_certify_test.py create mode 100644 dirsrvtests/tests/suites/replication/cleanallruv_abort_restart_test.py create mode 100644 dirsrvtests/tests/suites/replication/cleanallruv_abort_test.py create mode 100644 dirsrvtests/tests/suites/replication/cleanallruv_force_test.py create mode 100644 dirsrvtests/tests/suites/replication/cleanallruv_max_tasks_test.py create mode 100644 dirsrvtests/tests/suites/replication/cleanallruv_multiple_force_test.py create mode 100644 dirsrvtests/tests/suites/replication/cleanallruv_restart_test.py create mode 100644 dirsrvtests/tests/suites/replication/cleanallruv_shutdown_crash_test.py create mode 100644 dirsrvtests/tests/suites/replication/cleanallruv_stress_test.py create mode 100644 dirsrvtests/tests/suites/replication/cleanallruv_test.py create mode 100644 dirsrvtests/tests/suites/replication/conflict_resolve_test.py create mode 100644 dirsrvtests/tests/suites/replication/conftest.py create mode 100644 dirsrvtests/tests/suites/replication/encryption_cl5_test.py create mode 100644 dirsrvtests/tests/suites/replication/multiple_changelogs_test.py create mode 100644 dirsrvtests/tests/suites/replication/promote_demote_test.py create mode 100644 dirsrvtests/tests/suites/replication/regression_i2_test.py create mode 100644 dirsrvtests/tests/suites/replication/regression_m2_test.py create mode 100644 dirsrvtests/tests/suites/replication/regression_m2c2_test.py create mode 100644 dirsrvtests/tests/suites/replication/regression_m3_test.py create mode 100644 dirsrvtests/tests/suites/replication/repl_agmt_bootstrap_test.py create mode 100644 dirsrvtests/tests/suites/replication/replica_config_test.py create mode 100644 dirsrvtests/tests/suites/replication/replica_roles_test.py create mode 100644 dirsrvtests/tests/suites/replication/ruvstore_test.py create mode 100644 dirsrvtests/tests/suites/replication/sasl_m2_test.py create mode 100644 dirsrvtests/tests/suites/replication/series_of_repl_bugs_test.py create mode 100644 dirsrvtests/tests/suites/replication/single_master_test.py create mode 100644 dirsrvtests/tests/suites/replication/tls_client_auth_repl_test.py create mode 100644 dirsrvtests/tests/suites/replication/tombstone_fixup_test.py create mode 100644 dirsrvtests/tests/suites/replication/tombstone_repl_mods_test.py create mode 100644 dirsrvtests/tests/suites/replication/tombstone_test.py create mode 100644 dirsrvtests/tests/suites/replication/virtual_attribute_replication_test.py create mode 100644 dirsrvtests/tests/suites/replication/wait_for_async_feature_test.py create mode 100644 dirsrvtests/tests/suites/resource_limits/__init__.py create mode 100644 dirsrvtests/tests/suites/resource_limits/fdlimits_test.py create mode 100644 dirsrvtests/tests/suites/retrocl/__init__.py create mode 100644 dirsrvtests/tests/suites/retrocl/basic_test.py create mode 100644 dirsrvtests/tests/suites/retrocl/retrocl_indexing_test.py create mode 100644 dirsrvtests/tests/suites/rewriters/__init__.py create mode 100644 dirsrvtests/tests/suites/rewriters/adfilter_test.py create mode 100644 dirsrvtests/tests/suites/rewriters/basic_test.py create mode 100644 dirsrvtests/tests/suites/roles/__init__.py create mode 100644 dirsrvtests/tests/suites/roles/basic_test.py create mode 100644 dirsrvtests/tests/suites/sasl/__init__.py create mode 100644 dirsrvtests/tests/suites/sasl/allowed_mechs_test.py create mode 100644 dirsrvtests/tests/suites/sasl/plain_test.py create mode 100644 dirsrvtests/tests/suites/sasl/regression_test.py create mode 100644 dirsrvtests/tests/suites/schema/__init__.py create mode 100644 dirsrvtests/tests/suites/schema/eduperson_test.py create mode 100644 dirsrvtests/tests/suites/schema/schema_reload_test.py create mode 100644 dirsrvtests/tests/suites/schema/schema_replication_origin_test.py create mode 100644 dirsrvtests/tests/suites/schema/schema_replication_test.py create mode 100644 dirsrvtests/tests/suites/schema/schema_test.py create mode 100644 dirsrvtests/tests/suites/schema/x_attribute_descr_oid_test.py create mode 100644 dirsrvtests/tests/suites/setup_ds/__init__.py create mode 100755 dirsrvtests/tests/suites/setup_ds/db_home_test.py create mode 100644 dirsrvtests/tests/suites/setup_ds/dscreate_test.py create mode 100644 dirsrvtests/tests/suites/setup_ds/remove_test.py create mode 100644 dirsrvtests/tests/suites/slapi_memberof/basic_interface_test.py create mode 100644 dirsrvtests/tests/suites/snmp/__init__.py create mode 100644 dirsrvtests/tests/suites/state/__init__.py create mode 100644 dirsrvtests/tests/suites/state/mmt_state_test.py create mode 100644 dirsrvtests/tests/suites/subentries/__init__.py create mode 100644 dirsrvtests/tests/suites/subentries/subentries_test.py create mode 100644 dirsrvtests/tests/suites/syncrepl_plugin/__init__.py create mode 100644 dirsrvtests/tests/suites/syncrepl_plugin/basic_test.py create mode 100644 dirsrvtests/tests/suites/syncrepl_plugin/openldap_test.py create mode 100644 dirsrvtests/tests/suites/syntax/__init__.py create mode 100644 dirsrvtests/tests/suites/syntax/acceptance_test.py create mode 100644 dirsrvtests/tests/suites/syntax/mr_test.py create mode 100644 dirsrvtests/tests/suites/tls/__init__.py create mode 100644 dirsrvtests/tests/suites/tls/cipher_test.py create mode 100644 dirsrvtests/tests/suites/tls/ecdsa_test.py create mode 100644 dirsrvtests/tests/suites/tls/ssl_version_test.py create mode 100644 dirsrvtests/tests/suites/tls/tls_cert_namespace_test.py create mode 100644 dirsrvtests/tests/suites/tls/tls_check_crl_test.py create mode 100644 dirsrvtests/tests/suites/tls/tls_import_ca_chain_test.py create mode 100644 dirsrvtests/tests/suites/tls/tls_ldaps_only_test.py create mode 100644 dirsrvtests/tests/suites/upgrade/__init__.py create mode 100644 dirsrvtests/tests/suites/upgrade/upgrade_repl_plugin_test.py create mode 100644 dirsrvtests/tests/suites/upgrade/upgradednformat_test.py create mode 100644 dirsrvtests/tests/suites/vlv/__init__.py create mode 100644 dirsrvtests/tests/suites/vlv/regression_test.py create mode 100644 dirsrvtests/tests/suites/webui/README create mode 100644 dirsrvtests/tests/suites/webui/__init__.py create mode 100644 dirsrvtests/tests/suites/webui/backup/__init__.py create mode 100644 dirsrvtests/tests/suites/webui/backup/backup_test.py create mode 100644 dirsrvtests/tests/suites/webui/create/__init__.py create mode 100644 dirsrvtests/tests/suites/webui/create/create_instance_test.py create mode 100644 dirsrvtests/tests/suites/webui/database/__init__.py create mode 100644 dirsrvtests/tests/suites/webui/database/database_test.py create mode 100644 dirsrvtests/tests/suites/webui/ldap_browser/__init__.py create mode 100644 dirsrvtests/tests/suites/webui/ldap_browser/ldap_browser_test.py create mode 100644 dirsrvtests/tests/suites/webui/login/__init__.py create mode 100644 dirsrvtests/tests/suites/webui/login/login_test.py create mode 100644 dirsrvtests/tests/suites/webui/monitoring/__init__.py create mode 100644 dirsrvtests/tests/suites/webui/monitoring/monitoring_test.py create mode 100644 dirsrvtests/tests/suites/webui/plugins/__init__.py create mode 100644 dirsrvtests/tests/suites/webui/plugins/plugins_test.py create mode 100644 dirsrvtests/tests/suites/webui/replication/__init__.py create mode 100644 dirsrvtests/tests/suites/webui/replication/replication_test.py create mode 100644 dirsrvtests/tests/suites/webui/schema/__init__.py create mode 100644 dirsrvtests/tests/suites/webui/schema/schema_test.py create mode 100644 dirsrvtests/tests/suites/webui/server/__init__.py create mode 100644 dirsrvtests/tests/suites/webui/server/server_test.py create mode 100644 dirsrvtests/tests/tickets/__init__.py create mode 100644 dirsrvtests/tests/tickets/ticket47560_test.py create mode 100644 dirsrvtests/tests/tickets/ticket47573_test.py create mode 100644 dirsrvtests/tests/tickets/ticket47619_test.py create mode 100644 dirsrvtests/tests/tickets/ticket47640_test.py create mode 100644 dirsrvtests/tests/tickets/ticket47653MMR_test.py create mode 100644 dirsrvtests/tests/tickets/ticket47676_test.py create mode 100644 dirsrvtests/tests/tickets/ticket47714_test.py create mode 100644 dirsrvtests/tests/tickets/ticket47721_test.py create mode 100644 dirsrvtests/tests/tickets/ticket47781_test.py create mode 100644 dirsrvtests/tests/tickets/ticket47787_test.py create mode 100644 dirsrvtests/tests/tickets/ticket47808_test.py create mode 100644 dirsrvtests/tests/tickets/ticket47815_test.py create mode 100644 dirsrvtests/tests/tickets/ticket47823_test.py create mode 100644 dirsrvtests/tests/tickets/ticket47828_test.py create mode 100644 dirsrvtests/tests/tickets/ticket47829_test.py create mode 100644 dirsrvtests/tests/tickets/ticket47833_test.py create mode 100644 dirsrvtests/tests/tickets/ticket47869MMR_test.py create mode 100644 dirsrvtests/tests/tickets/ticket47871_test.py create mode 100644 dirsrvtests/tests/tickets/ticket47900_test.py create mode 100644 dirsrvtests/tests/tickets/ticket47910_test.py create mode 100644 dirsrvtests/tests/tickets/ticket47920_test.py create mode 100644 dirsrvtests/tests/tickets/ticket47921_test.py create mode 100644 dirsrvtests/tests/tickets/ticket47927_test.py create mode 100644 dirsrvtests/tests/tickets/ticket47931_test.py create mode 100644 dirsrvtests/tests/tickets/ticket47953_test.py create mode 100644 dirsrvtests/tests/tickets/ticket47963_test.py create mode 100644 dirsrvtests/tests/tickets/ticket47970_test.py create mode 100644 dirsrvtests/tests/tickets/ticket47976_test.py create mode 100644 dirsrvtests/tests/tickets/ticket47980_test.py create mode 100644 dirsrvtests/tests/tickets/ticket47981_test.py create mode 100644 dirsrvtests/tests/tickets/ticket47988_test.py create mode 100644 dirsrvtests/tests/tickets/ticket48005_test.py create mode 100644 dirsrvtests/tests/tickets/ticket48013_test.py create mode 100644 dirsrvtests/tests/tickets/ticket48026_test.py create mode 100644 dirsrvtests/tests/tickets/ticket48109_test.py create mode 100644 dirsrvtests/tests/tickets/ticket48170_test.py create mode 100644 dirsrvtests/tests/tickets/ticket48194_test.py create mode 100644 dirsrvtests/tests/tickets/ticket48212_test.py create mode 100644 dirsrvtests/tests/tickets/ticket48214_test.py create mode 100644 dirsrvtests/tests/tickets/ticket48228_test.py create mode 100644 dirsrvtests/tests/tickets/ticket48233_test.py create mode 100644 dirsrvtests/tests/tickets/ticket48252_test.py create mode 100644 dirsrvtests/tests/tickets/ticket48265_test.py create mode 100644 dirsrvtests/tests/tickets/ticket48266_test.py create mode 100644 dirsrvtests/tests/tickets/ticket48270_test.py create mode 100644 dirsrvtests/tests/tickets/ticket48272_test.py create mode 100644 dirsrvtests/tests/tickets/ticket48294_test.py create mode 100644 dirsrvtests/tests/tickets/ticket48295_test.py create mode 100644 dirsrvtests/tests/tickets/ticket48312_test.py create mode 100644 dirsrvtests/tests/tickets/ticket48325_test.py create mode 100644 dirsrvtests/tests/tickets/ticket48342_test.py create mode 100644 dirsrvtests/tests/tickets/ticket48354_test.py create mode 100644 dirsrvtests/tests/tickets/ticket48362_test.py create mode 100644 dirsrvtests/tests/tickets/ticket48366_test.py create mode 100644 dirsrvtests/tests/tickets/ticket48370_test.py create mode 100644 dirsrvtests/tests/tickets/ticket48383_test.py create mode 100644 dirsrvtests/tests/tickets/ticket48497_test.py create mode 100644 dirsrvtests/tests/tickets/ticket48637_test.py create mode 100644 dirsrvtests/tests/tickets/ticket48665_test.py create mode 100644 dirsrvtests/tests/tickets/ticket48745_test.py create mode 100644 dirsrvtests/tests/tickets/ticket48746_test.py create mode 100644 dirsrvtests/tests/tickets/ticket48759_test.py create mode 100644 dirsrvtests/tests/tickets/ticket48784_test.py create mode 100644 dirsrvtests/tests/tickets/ticket48798_test.py create mode 100644 dirsrvtests/tests/tickets/ticket48799_test.py create mode 100644 dirsrvtests/tests/tickets/ticket48808_test.py create mode 100644 dirsrvtests/tests/tickets/ticket48844_test.py create mode 100644 dirsrvtests/tests/tickets/ticket48891_test.py create mode 100644 dirsrvtests/tests/tickets/ticket48893_test.py create mode 100644 dirsrvtests/tests/tickets/ticket48896_test.py create mode 100644 dirsrvtests/tests/tickets/ticket48906_test.py create mode 100644 dirsrvtests/tests/tickets/ticket48916_test.py create mode 100644 dirsrvtests/tests/tickets/ticket48944_test.py create mode 100644 dirsrvtests/tests/tickets/ticket48956_test.py create mode 100644 dirsrvtests/tests/tickets/ticket48973_test.py create mode 100644 dirsrvtests/tests/tickets/ticket49008_test.py create mode 100644 dirsrvtests/tests/tickets/ticket49020_test.py create mode 100644 dirsrvtests/tests/tickets/ticket49039_test.py create mode 100644 dirsrvtests/tests/tickets/ticket49072_test.py create mode 100644 dirsrvtests/tests/tickets/ticket49073_test.py create mode 100644 dirsrvtests/tests/tickets/ticket49076_test.py create mode 100644 dirsrvtests/tests/tickets/ticket49095_test.py create mode 100644 dirsrvtests/tests/tickets/ticket49104_test.py create mode 100644 dirsrvtests/tests/tickets/ticket49121_test.py create mode 100644 dirsrvtests/tests/tickets/ticket49122_test.py create mode 100644 dirsrvtests/tests/tickets/ticket49180_test.py create mode 100644 dirsrvtests/tests/tickets/ticket49184_test.py create mode 100644 dirsrvtests/tests/tickets/ticket49192_test.py create mode 100644 dirsrvtests/tests/tickets/ticket49227_test.py create mode 100644 dirsrvtests/tests/tickets/ticket49249_test.py create mode 100644 dirsrvtests/tests/tickets/ticket49273_test.py create mode 100644 dirsrvtests/tests/tickets/ticket49287_test.py create mode 100644 dirsrvtests/tests/tickets/ticket49290_test.py create mode 100644 dirsrvtests/tests/tickets/ticket49303_test.py create mode 100644 dirsrvtests/tests/tickets/ticket49386_test.py create mode 100644 dirsrvtests/tests/tickets/ticket49412_test.py create mode 100644 dirsrvtests/tests/tickets/ticket49441_test.py create mode 100644 dirsrvtests/tests/tickets/ticket49460_test.py create mode 100644 dirsrvtests/tests/tickets/ticket49463_test.py create mode 100644 dirsrvtests/tests/tickets/ticket49471_test.py create mode 100644 dirsrvtests/tests/tickets/ticket49540_test.py create mode 100644 dirsrvtests/tests/tickets/ticket49623_2_test.py create mode 100644 dirsrvtests/tests/tickets/ticket49658_test.py create mode 100644 dirsrvtests/tests/tickets/ticket49788_test.py create mode 100644 dirsrvtests/tests/tickets/ticket50078_test.py create mode 100644 dirsrvtests/tests/tickets/ticket50232_test.py create mode 100644 dirsrvtests/tests/tickets/ticket50234_test.py create mode 100644 dirsrvtests/tests/tickets/ticket548_test.py create mode 100644 dirsrvtests/tests/tmp/README create mode 100644 dirsrvtests/tests/tmp/__init__.py create mode 100644 docker.mk create mode 100644 docker/389-ds-fedora/Dockerfile create mode 100644 docker/389-ds-suse/Dockerfile create mode 100644 docker/389-ds-suse/Dockerfile.release create mode 100644 docker/README.md create mode 100644 docs/custom.css create mode 100644 docs/doc_header.html create mode 100644 docs/slapi.doxy.in create mode 100644 include/base/crit.h create mode 100644 include/base/dbtbase.h create mode 100644 include/base/ereport.h create mode 100644 include/base/file.h create mode 100644 include/base/fsmutex.h create mode 100644 include/base/plist.h create mode 100644 include/base/pool.h create mode 100644 include/base/shexp.h create mode 100644 include/base/systems.h create mode 100644 include/base/systhr.h create mode 100644 include/base/util.h create mode 100644 include/i18n.h create mode 100644 include/ldaputil/cert.h create mode 100644 include/ldaputil/certmap.h create mode 100644 include/ldaputil/dbconf.h create mode 100644 include/ldaputil/encode.h create mode 100644 include/ldaputil/errors.h create mode 100644 include/ldaputil/init.h create mode 100644 include/ldaputil/ldapauth.h create mode 100644 include/ldaputil/ldaputil.h create mode 100644 include/libaccess/acl.h create mode 100644 include/libaccess/aclerror.h create mode 100644 include/libaccess/acleval.h create mode 100644 include/libaccess/aclglobal.h create mode 100644 include/libaccess/aclproto.h create mode 100644 include/libaccess/aclstruct.h create mode 100644 include/libaccess/attrec.h create mode 100644 include/libaccess/authdb.h create mode 100644 include/libaccess/dbtlibaccess.h create mode 100644 include/libaccess/dnfstruct.h create mode 100644 include/libaccess/ipfstruct.h create mode 100644 include/libaccess/las.h create mode 100644 include/libaccess/nsauth.h create mode 100644 include/libaccess/nsautherr.h create mode 100644 include/libaccess/nserror.h create mode 100644 include/libaccess/symbols.h create mode 100644 include/libaccess/userauth.h create mode 100644 include/libaccess/usi.h create mode 100644 include/libaccess/usrcache.h create mode 100644 include/libadmin/dbtlibadmin.h create mode 100644 include/libadmin/libadmin.h create mode 100644 include/netsite.h create mode 100644 include/public/base/systems.h create mode 100644 include/public/netsite.h create mode 100644 include/public/nsacl/aclapi.h create mode 100644 include/public/nsacl/acldef.h create mode 100644 include/public/nsacl/nserrdef.h create mode 100644 include/public/nsacl/plistdef.h create mode 100644 include/public/nsapi.h create mode 100644 ldap/admin/src/70-dirsrv.conf create mode 100644 ldap/admin/src/base-initconfig.in create mode 100644 ldap/admin/src/defaults.inf.in create mode 100644 ldap/admin/src/initconfig.in create mode 100755 ldap/admin/src/logconv.pl create mode 100644 ldap/admin/src/scripts/ds-logpipe.py create mode 100755 ldap/admin/src/scripts/ds-replcheck create mode 100644 ldap/admin/src/scripts/failedbinds.py create mode 100644 ldap/admin/src/scripts/logregex.py create mode 100644 ldap/admin/src/scripts/ns-slapd-gdb.py create mode 100644 ldap/admin/src/slapd.inf.in create mode 100644 ldap/admin/src/template-initconfig.in create mode 100644 ldap/include/avl.h create mode 100644 ldap/include/dblayer.h create mode 100644 ldap/include/disptmpl.h create mode 100644 ldap/include/ldaprot.h create mode 100644 ldap/include/portable.h create mode 100644 ldap/include/regex.h create mode 100644 ldap/include/srchpref.h create mode 100644 ldap/include/sysexits-compat.h create mode 100644 ldap/ldif/Ace.ldif create mode 100644 ldap/ldif/European.ldif create mode 100644 ldap/ldif/Eurosuffix.ldif create mode 100644 ldap/ldif/Example-roles.ldif create mode 100644 ldap/ldif/Example-views.ldif create mode 100644 ldap/ldif/Example.ldif create mode 100644 ldap/ldif/template-baseacis.ldif.in create mode 100644 ldap/ldif/template-country.ldif.in create mode 100644 ldap/ldif/template-domain.ldif.in create mode 100644 ldap/ldif/template-dse-minimal.ldif.in create mode 100644 ldap/ldif/template-dse.ldif.in create mode 100644 ldap/ldif/template-ldapi-autobind.ldif.in create mode 100644 ldap/ldif/template-ldapi-default.ldif.in create mode 100644 ldap/ldif/template-ldapi.ldif.in create mode 100644 ldap/ldif/template-locality.ldif.in create mode 100644 ldap/ldif/template-org.ldif.in create mode 100644 ldap/ldif/template-orgunit.ldif.in create mode 100644 ldap/ldif/template-sasl.ldif.in create mode 100644 ldap/ldif/template-state.ldif.in create mode 100644 ldap/ldif/template-suffix-db.ldif.in create mode 100644 ldap/ldif/template.ldif create mode 100644 ldap/libraries/libavl/avl.c create mode 100644 ldap/libraries/libavl/testavl.c create mode 100644 ldap/schema/00core.ldif create mode 100644 ldap/schema/01core389.ldif create mode 100644 ldap/schema/02common.ldif create mode 100644 ldap/schema/03entryuuid.ldif create mode 100644 ldap/schema/05rfc2927.ldif create mode 100644 ldap/schema/05rfc4523.ldif create mode 100644 ldap/schema/05rfc4524.ldif create mode 100644 ldap/schema/06inetorgperson.ldif create mode 100644 ldap/schema/10automember-plugin.ldif create mode 100644 ldap/schema/10dna-plugin.ldif create mode 100644 ldap/schema/10mep-plugin.ldif create mode 100644 ldap/schema/10presence.ldif create mode 100644 ldap/schema/10rfc2307.ldif create mode 100644 ldap/schema/10rfc2307bis.ldif create mode 100644 ldap/schema/10rfc2307compat.ldif create mode 100644 ldap/schema/20subscriber.ldif create mode 100644 ldap/schema/25java-object.ldif create mode 100644 ldap/schema/28pilot.ldif create mode 100644 ldap/schema/30ns-common.ldif create mode 100644 ldap/schema/50ns-admin.ldif create mode 100644 ldap/schema/50ns-certificate.ldif create mode 100644 ldap/schema/50ns-directory.ldif create mode 100644 ldap/schema/50ns-mail.ldif create mode 100644 ldap/schema/50ns-value.ldif create mode 100644 ldap/schema/50ns-web.ldif create mode 100644 ldap/schema/60acctpolicy.ldif create mode 100644 ldap/schema/60autofs.ldif create mode 100644 ldap/schema/60changelog.ldif create mode 100644 ldap/schema/60eduperson.ldif create mode 100644 ldap/schema/60inetmail.ldif create mode 100644 ldap/schema/60kerberos.ldif create mode 100644 ldap/schema/60krb5kdc.ldif create mode 100644 ldap/schema/60mozilla.ldif create mode 100644 ldap/schema/60nis.ldif create mode 100644 ldap/schema/60nss-ldap.ldif create mode 100644 ldap/schema/60pam-plugin.ldif create mode 100644 ldap/schema/60posix-winsync-plugin.ldif create mode 100644 ldap/schema/60pureftpd.ldif create mode 100644 ldap/schema/60qmail.ldif create mode 100644 ldap/schema/60radius.ldif create mode 100644 ldap/schema/60rfc2739.ldif create mode 100644 ldap/schema/60rfc3712.ldif create mode 100644 ldap/schema/60rfc4876.ldif create mode 100644 ldap/schema/60sabayon.ldif create mode 100644 ldap/schema/60samba.ldif create mode 100644 ldap/schema/60samba3.ldif create mode 100644 ldap/schema/60sendmail.ldif create mode 100644 ldap/schema/60sudo.ldif create mode 100644 ldap/schema/60trust.ldif create mode 100644 ldap/schema/99user.ldif create mode 100644 ldap/schema/dsee.schema create mode 100644 ldap/schema/slapd-collations.conf create mode 100644 ldap/servers/plugins/acct_usability/acct_usability.c create mode 100644 ldap/servers/plugins/acct_usability/acct_usability.h create mode 100644 ldap/servers/plugins/acctpolicy/acct_config.c create mode 100644 ldap/servers/plugins/acctpolicy/acct_init.c create mode 100644 ldap/servers/plugins/acctpolicy/acct_plugin.c create mode 100644 ldap/servers/plugins/acctpolicy/acct_util.c create mode 100644 ldap/servers/plugins/acctpolicy/acctpolicy.h create mode 100644 ldap/servers/plugins/acctpolicy/sampleconfig.ldif create mode 100644 ldap/servers/plugins/acctpolicy/samplepolicy.ldif create mode 100644 ldap/servers/plugins/acl/ACL-Notes create mode 100644 ldap/servers/plugins/acl/acl.c create mode 100644 ldap/servers/plugins/acl/acl.h create mode 100644 ldap/servers/plugins/acl/acl_ext.c create mode 100644 ldap/servers/plugins/acl/aclanom.c create mode 100644 ldap/servers/plugins/acl/acleffectiverights.c create mode 100644 ldap/servers/plugins/acl/aclgroup.c create mode 100644 ldap/servers/plugins/acl/aclinit.c create mode 100644 ldap/servers/plugins/acl/acllas.c create mode 100644 ldap/servers/plugins/acl/acllist.c create mode 100644 ldap/servers/plugins/acl/aclparse.c create mode 100644 ldap/servers/plugins/acl/aclplugin.c create mode 100644 ldap/servers/plugins/acl/aclutil.c create mode 100644 ldap/servers/plugins/addn/addn.c create mode 100644 ldap/servers/plugins/addn/addn.h create mode 100644 ldap/servers/plugins/alias_entries/alias-entries.c create mode 100644 ldap/servers/plugins/alias_entries/alias-entries.h create mode 100644 ldap/servers/plugins/automember/automember.c create mode 100644 ldap/servers/plugins/automember/automember.h create mode 100644 ldap/servers/plugins/bitwise/bitwise.c create mode 100644 ldap/servers/plugins/chainingdb/cb.h create mode 100644 ldap/servers/plugins/chainingdb/cb_abandon.c create mode 100644 ldap/servers/plugins/chainingdb/cb_acl.c create mode 100644 ldap/servers/plugins/chainingdb/cb_add.c create mode 100644 ldap/servers/plugins/chainingdb/cb_bind.c create mode 100644 ldap/servers/plugins/chainingdb/cb_cleanup.c create mode 100644 ldap/servers/plugins/chainingdb/cb_close.c create mode 100644 ldap/servers/plugins/chainingdb/cb_compare.c create mode 100644 ldap/servers/plugins/chainingdb/cb_config.c create mode 100644 ldap/servers/plugins/chainingdb/cb_conn_stateless.c create mode 100644 ldap/servers/plugins/chainingdb/cb_controls.c create mode 100644 ldap/servers/plugins/chainingdb/cb_debug.c create mode 100644 ldap/servers/plugins/chainingdb/cb_delete.c create mode 100644 ldap/servers/plugins/chainingdb/cb_init.c create mode 100644 ldap/servers/plugins/chainingdb/cb_instance.c create mode 100644 ldap/servers/plugins/chainingdb/cb_modify.c create mode 100644 ldap/servers/plugins/chainingdb/cb_modrdn.c create mode 100644 ldap/servers/plugins/chainingdb/cb_monitor.c create mode 100644 ldap/servers/plugins/chainingdb/cb_schema.c create mode 100644 ldap/servers/plugins/chainingdb/cb_search.c create mode 100644 ldap/servers/plugins/chainingdb/cb_start.c create mode 100644 ldap/servers/plugins/chainingdb/cb_temp.c create mode 100644 ldap/servers/plugins/chainingdb/cb_test.c create mode 100644 ldap/servers/plugins/chainingdb/cb_unbind.c create mode 100644 ldap/servers/plugins/chainingdb/cb_utils.c create mode 100644 ldap/servers/plugins/collation/collate.c create mode 100644 ldap/servers/plugins/collation/collate.h create mode 100644 ldap/servers/plugins/collation/config.c create mode 100644 ldap/servers/plugins/collation/config.h create mode 100644 ldap/servers/plugins/collation/debug.c create mode 100644 ldap/servers/plugins/collation/orfilter.c create mode 100644 ldap/servers/plugins/collation/orfilter.h create mode 100644 ldap/servers/plugins/cos/cos.c create mode 100644 ldap/servers/plugins/cos/cos_cache.c create mode 100644 ldap/servers/plugins/cos/cos_cache.h create mode 100644 ldap/servers/plugins/deref/deref.c create mode 100644 ldap/servers/plugins/deref/deref.h create mode 100644 ldap/servers/plugins/distrib/Makefile create mode 100644 ldap/servers/plugins/distrib/Makefile.HPUX create mode 100644 ldap/servers/plugins/distrib/Makefile.HPUX64 create mode 100644 ldap/servers/plugins/distrib/Makefile.Linux create mode 100644 ldap/servers/plugins/distrib/Makefile.SOLARIS create mode 100644 ldap/servers/plugins/distrib/Makefile.SOLARIS64 create mode 100644 ldap/servers/plugins/distrib/Makefile.SOLARISx86 create mode 100644 ldap/servers/plugins/distrib/README create mode 100644 ldap/servers/plugins/distrib/distrib.c create mode 100755 ldap/servers/plugins/dna/addentries.sh create mode 100755 ldap/servers/plugins/dna/config.sh create mode 100644 ldap/servers/plugins/dna/del_test_entries.dns create mode 100755 ldap/servers/plugins/dna/delentries.sh create mode 100644 ldap/servers/plugins/dna/dna.c create mode 100755 ldap/servers/plugins/dna/editentries.sh create mode 100755 ldap/servers/plugins/dna/oneentry.sh create mode 100644 ldap/servers/plugins/dna/posix.ldif create mode 100644 ldap/servers/plugins/dna/posix_one.ldif create mode 100644 ldap/servers/plugins/dna/posix_test.ldif create mode 100755 ldap/servers/plugins/dna/seeconfig.sh create mode 100755 ldap/servers/plugins/dna/seeentries.sh create mode 100644 ldap/servers/plugins/dna/subtest.ldif create mode 100644 ldap/servers/plugins/linkedattrs/fixup_task.c create mode 100644 ldap/servers/plugins/linkedattrs/linked_attrs.c create mode 100644 ldap/servers/plugins/linkedattrs/linked_attrs.h create mode 100644 ldap/servers/plugins/memberof/memberof.c create mode 100644 ldap/servers/plugins/memberof/memberof.h create mode 100644 ldap/servers/plugins/memberof/memberof_config.c create mode 100644 ldap/servers/plugins/mep/mep.c create mode 100644 ldap/servers/plugins/mep/mep.h create mode 100644 ldap/servers/plugins/pam_passthru/README create mode 100644 ldap/servers/plugins/pam_passthru/pam_passthru.h create mode 100644 ldap/servers/plugins/pam_passthru/pam_ptconfig.c create mode 100644 ldap/servers/plugins/pam_passthru/pam_ptdebug.c create mode 100644 ldap/servers/plugins/pam_passthru/pam_ptimpl.c create mode 100644 ldap/servers/plugins/pam_passthru/pam_ptpreop.c create mode 100644 ldap/servers/plugins/passthru/PT-Notes create mode 100644 ldap/servers/plugins/passthru/passthru.h create mode 100644 ldap/servers/plugins/passthru/ptbind.c create mode 100644 ldap/servers/plugins/passthru/ptconfig.c create mode 100644 ldap/servers/plugins/passthru/ptconn.c create mode 100644 ldap/servers/plugins/passthru/ptdebug.c create mode 100644 ldap/servers/plugins/passthru/ptpreop.c create mode 100644 ldap/servers/plugins/passthru/ptutil.c create mode 100644 ldap/servers/plugins/posix-winsync/README create mode 100644 ldap/servers/plugins/posix-winsync/posix-group-func.c create mode 100644 ldap/servers/plugins/posix-winsync/posix-group-func.h create mode 100644 ldap/servers/plugins/posix-winsync/posix-group-task.c create mode 100644 ldap/servers/plugins/posix-winsync/posix-winsync-config.c create mode 100644 ldap/servers/plugins/posix-winsync/posix-winsync.c create mode 100644 ldap/servers/plugins/posix-winsync/posix-wsp-ident.h create mode 100644 ldap/servers/plugins/pwdstorage/clear_pwd.c create mode 100644 ldap/servers/plugins/pwdstorage/crypt_pwd.c create mode 100644 ldap/servers/plugins/pwdstorage/gost_yescrypt.c create mode 100644 ldap/servers/plugins/pwdstorage/md5.h create mode 100644 ldap/servers/plugins/pwdstorage/md5_pwd.c create mode 100644 ldap/servers/plugins/pwdstorage/md5c.c create mode 100644 ldap/servers/plugins/pwdstorage/ns-mta-md5_pwd.bu create mode 100644 ldap/servers/plugins/pwdstorage/ns-mta-md5_pwd.c create mode 100644 ldap/servers/plugins/pwdstorage/pbkdf2_pwd.c create mode 100644 ldap/servers/plugins/pwdstorage/pwd_init.c create mode 100644 ldap/servers/plugins/pwdstorage/pwd_util.c create mode 100644 ldap/servers/plugins/pwdstorage/pwdstorage.h create mode 100644 ldap/servers/plugins/pwdstorage/sha_pwd.c create mode 100644 ldap/servers/plugins/pwdstorage/smd5_pwd.c create mode 100644 ldap/servers/plugins/pwdstorage/ssha_pwd.c create mode 100644 ldap/servers/plugins/referint/referint.c create mode 100644 ldap/servers/plugins/replication/cl5.h create mode 100644 ldap/servers/plugins/replication/cl5_api.c create mode 100644 ldap/servers/plugins/replication/cl5_api.h create mode 100644 ldap/servers/plugins/replication/cl5_clcache.c create mode 100644 ldap/servers/plugins/replication/cl5_clcache.h create mode 100644 ldap/servers/plugins/replication/cl5_config.c create mode 100644 ldap/servers/plugins/replication/cl5_init.c create mode 100644 ldap/servers/plugins/replication/cl5_test.c create mode 100644 ldap/servers/plugins/replication/cl5_test.h create mode 100644 ldap/servers/plugins/replication/cl_crypt.c create mode 100644 ldap/servers/plugins/replication/cl_crypt.h create mode 100644 ldap/servers/plugins/replication/csnpl.c create mode 100644 ldap/servers/plugins/replication/csnpl.h create mode 100644 ldap/servers/plugins/replication/llist.c create mode 100644 ldap/servers/plugins/replication/llist.h create mode 100644 ldap/servers/plugins/replication/profile.c create mode 100644 ldap/servers/plugins/replication/repl-session-plugin.h create mode 100644 ldap/servers/plugins/replication/repl5.h create mode 100644 ldap/servers/plugins/replication/repl5_agmt.c create mode 100644 ldap/servers/plugins/replication/repl5_agmtlist.c create mode 100644 ldap/servers/plugins/replication/repl5_backoff.c create mode 100644 ldap/servers/plugins/replication/repl5_connection.c create mode 100644 ldap/servers/plugins/replication/repl5_inc_protocol.c create mode 100644 ldap/servers/plugins/replication/repl5_init.c create mode 100644 ldap/servers/plugins/replication/repl5_mtnode_ext.c create mode 100644 ldap/servers/plugins/replication/repl5_plugins.c create mode 100644 ldap/servers/plugins/replication/repl5_prot_private.h create mode 100644 ldap/servers/plugins/replication/repl5_protocol.c create mode 100644 ldap/servers/plugins/replication/repl5_protocol_util.c create mode 100644 ldap/servers/plugins/replication/repl5_replica.c create mode 100644 ldap/servers/plugins/replication/repl5_replica_config.c create mode 100644 ldap/servers/plugins/replication/repl5_replica_dnhash.c create mode 100644 ldap/servers/plugins/replication/repl5_replica_hash.c create mode 100644 ldap/servers/plugins/replication/repl5_replsupplier.c create mode 100644 ldap/servers/plugins/replication/repl5_ruv.c create mode 100644 ldap/servers/plugins/replication/repl5_ruv.h create mode 100644 ldap/servers/plugins/replication/repl5_schedule.c create mode 100644 ldap/servers/plugins/replication/repl5_tot_protocol.c create mode 100644 ldap/servers/plugins/replication/repl5_total.c create mode 100644 ldap/servers/plugins/replication/repl5_updatedn_list.c create mode 100644 ldap/servers/plugins/replication/repl_cleanallruv.c create mode 100644 ldap/servers/plugins/replication/repl_connext.c create mode 100644 ldap/servers/plugins/replication/repl_controls.c create mode 100644 ldap/servers/plugins/replication/repl_ext.c create mode 100644 ldap/servers/plugins/replication/repl_extop.c create mode 100644 ldap/servers/plugins/replication/repl_globals.c create mode 100644 ldap/servers/plugins/replication/repl_helper.c create mode 100644 ldap/servers/plugins/replication/repl_helper.h create mode 100644 ldap/servers/plugins/replication/repl_opext.c create mode 100644 ldap/servers/plugins/replication/repl_session_plugin.c create mode 100644 ldap/servers/plugins/replication/repl_shared.h create mode 100644 ldap/servers/plugins/replication/replutil.c create mode 100644 ldap/servers/plugins/replication/test_repl_session_plugin.c create mode 100644 ldap/servers/plugins/replication/tests/dnp_sim.c create mode 100644 ldap/servers/plugins/replication/tests/dnp_sim2.c create mode 100644 ldap/servers/plugins/replication/tests/dnp_sim3.c create mode 100755 ldap/servers/plugins/replication/tests/makesim create mode 100644 ldap/servers/plugins/replication/urp.c create mode 100644 ldap/servers/plugins/replication/urp.h create mode 100644 ldap/servers/plugins/replication/urp_glue.c create mode 100644 ldap/servers/plugins/replication/urp_tombstone.c create mode 100644 ldap/servers/plugins/replication/windows_connection.c create mode 100644 ldap/servers/plugins/replication/windows_inc_protocol.c create mode 100644 ldap/servers/plugins/replication/windows_private.c create mode 100644 ldap/servers/plugins/replication/windows_prot_private.h create mode 100644 ldap/servers/plugins/replication/windows_protocol_util.c create mode 100644 ldap/servers/plugins/replication/windows_tot_protocol.c create mode 100644 ldap/servers/plugins/replication/windowsrepl.h create mode 100644 ldap/servers/plugins/replication/winsync-plugin.h create mode 100644 ldap/servers/plugins/retrocl/linktest.c create mode 100644 ldap/servers/plugins/retrocl/retrocl.c create mode 100644 ldap/servers/plugins/retrocl/retrocl.h create mode 100644 ldap/servers/plugins/retrocl/retrocl.txt create mode 100644 ldap/servers/plugins/retrocl/retrocl_cn.c create mode 100644 ldap/servers/plugins/retrocl/retrocl_create.c create mode 100644 ldap/servers/plugins/retrocl/retrocl_po.c create mode 100644 ldap/servers/plugins/retrocl/retrocl_rootdse.c create mode 100644 ldap/servers/plugins/retrocl/retrocl_trim.c create mode 100644 ldap/servers/plugins/rever/pbe.c create mode 100644 ldap/servers/plugins/rever/rever.c create mode 100644 ldap/servers/plugins/rever/rever.h create mode 100644 ldap/servers/plugins/roles/roles_cache.c create mode 100644 ldap/servers/plugins/roles/roles_cache.h create mode 100644 ldap/servers/plugins/roles/roles_plugin.c create mode 100644 ldap/servers/plugins/rootdn_access/rootdn_access.c create mode 100644 ldap/servers/plugins/rootdn_access/rootdn_access.h create mode 100644 ldap/servers/plugins/schema_reload/schema_reload.c create mode 100644 ldap/servers/plugins/statechange/statechange.c create mode 100644 ldap/servers/plugins/sync/README.md create mode 100644 ldap/servers/plugins/sync/sync.h create mode 100644 ldap/servers/plugins/sync/sync_init.c create mode 100644 ldap/servers/plugins/sync/sync_persist.c create mode 100644 ldap/servers/plugins/sync/sync_refresh.c create mode 100644 ldap/servers/plugins/sync/sync_util.c create mode 100644 ldap/servers/plugins/syntaxes/bin.c create mode 100644 ldap/servers/plugins/syntaxes/bitstring.c create mode 100644 ldap/servers/plugins/syntaxes/ces.c create mode 100644 ldap/servers/plugins/syntaxes/cis.c create mode 100644 ldap/servers/plugins/syntaxes/debug.c create mode 100644 ldap/servers/plugins/syntaxes/deliverymethod.c create mode 100644 ldap/servers/plugins/syntaxes/dn.c create mode 100644 ldap/servers/plugins/syntaxes/facsimile.c create mode 100644 ldap/servers/plugins/syntaxes/guide.c create mode 100644 ldap/servers/plugins/syntaxes/int.c create mode 100644 ldap/servers/plugins/syntaxes/nameoptuid.c create mode 100644 ldap/servers/plugins/syntaxes/numericstring.c create mode 100644 ldap/servers/plugins/syntaxes/phonetic.c create mode 100644 ldap/servers/plugins/syntaxes/sicis.c create mode 100644 ldap/servers/plugins/syntaxes/string.c create mode 100644 ldap/servers/plugins/syntaxes/syntax.h create mode 100644 ldap/servers/plugins/syntaxes/syntax_common.c create mode 100644 ldap/servers/plugins/syntaxes/tel.c create mode 100644 ldap/servers/plugins/syntaxes/teletex.c create mode 100644 ldap/servers/plugins/syntaxes/telex.c create mode 100644 ldap/servers/plugins/syntaxes/validate.c create mode 100644 ldap/servers/plugins/syntaxes/validate_task.c create mode 100644 ldap/servers/plugins/syntaxes/value.c create mode 100644 ldap/servers/plugins/uiduniq/7bit.c create mode 100644 ldap/servers/plugins/uiduniq/UID-Notes create mode 100644 ldap/servers/plugins/uiduniq/plugin-utils.h create mode 100644 ldap/servers/plugins/uiduniq/uid.c create mode 100644 ldap/servers/plugins/uiduniq/utils.c create mode 100644 ldap/servers/plugins/usn/usn.c create mode 100644 ldap/servers/plugins/usn/usn.h create mode 100644 ldap/servers/plugins/usn/usn_cleanup.c create mode 100644 ldap/servers/plugins/vattrsp_template/vattrsp.c create mode 100644 ldap/servers/plugins/views/views.c create mode 100644 ldap/servers/plugins/whoami/whoami.c create mode 100644 ldap/servers/slapd/abandon.c create mode 100644 ldap/servers/slapd/add.c create mode 100644 ldap/servers/slapd/agtmmap.c create mode 100644 ldap/servers/slapd/agtmmap.h create mode 100644 ldap/servers/slapd/apibroker.c create mode 100644 ldap/servers/slapd/attr.c create mode 100644 ldap/servers/slapd/attrlist.c create mode 100644 ldap/servers/slapd/attrsyntax.c create mode 100644 ldap/servers/slapd/auditlog.c create mode 100644 ldap/servers/slapd/auth.c create mode 100644 ldap/servers/slapd/auth.h create mode 100644 ldap/servers/slapd/ava.c create mode 100644 ldap/servers/slapd/back-ldbm/ancestorid.c create mode 100644 ldap/servers/slapd/back-ldbm/archive.c create mode 100644 ldap/servers/slapd/back-ldbm/attrcrypt.h create mode 100644 ldap/servers/slapd/back-ldbm/back-ldbm.h create mode 100644 ldap/servers/slapd/back-ldbm/backentry.c create mode 100644 ldap/servers/slapd/back-ldbm/cache.c create mode 100644 ldap/servers/slapd/back-ldbm/cleanup.c create mode 100644 ldap/servers/slapd/back-ldbm/close.c create mode 100644 ldap/servers/slapd/back-ldbm/db-bdb/bdb_config.c create mode 100644 ldap/servers/slapd/back-ldbm/db-bdb/bdb_import.c create mode 100644 ldap/servers/slapd/back-ldbm/db-bdb/bdb_import_threads.c create mode 100644 ldap/servers/slapd/back-ldbm/db-bdb/bdb_instance_config.c create mode 100644 ldap/servers/slapd/back-ldbm/db-bdb/bdb_layer.c create mode 100644 ldap/servers/slapd/back-ldbm/db-bdb/bdb_layer.h create mode 100644 ldap/servers/slapd/back-ldbm/db-bdb/bdb_ldif2db.c create mode 100644 ldap/servers/slapd/back-ldbm/db-bdb/bdb_misc.c create mode 100644 ldap/servers/slapd/back-ldbm/db-bdb/bdb_monitor.c create mode 100644 ldap/servers/slapd/back-ldbm/db-bdb/bdb_perfctrs.c create mode 100644 ldap/servers/slapd/back-ldbm/db-bdb/bdb_perfctrs.h create mode 100644 ldap/servers/slapd/back-ldbm/db-bdb/bdb_upgrade.c create mode 100644 ldap/servers/slapd/back-ldbm/db-bdb/bdb_verify.c create mode 100644 ldap/servers/slapd/back-ldbm/db-bdb/bdb_version.c create mode 100644 ldap/servers/slapd/back-ldbm/db-mdb/mdb_config.c create mode 100644 ldap/servers/slapd/back-ldbm/db-mdb/mdb_dbicmp.h create mode 100644 ldap/servers/slapd/back-ldbm/db-mdb/mdb_debug.c create mode 100644 ldap/servers/slapd/back-ldbm/db-mdb/mdb_debug.h create mode 100644 ldap/servers/slapd/back-ldbm/db-mdb/mdb_import.c create mode 100644 ldap/servers/slapd/back-ldbm/db-mdb/mdb_import.h create mode 100644 ldap/servers/slapd/back-ldbm/db-mdb/mdb_import_threads.c create mode 100644 ldap/servers/slapd/back-ldbm/db-mdb/mdb_instance.c create mode 100644 ldap/servers/slapd/back-ldbm/db-mdb/mdb_instance_config.c create mode 100644 ldap/servers/slapd/back-ldbm/db-mdb/mdb_layer.c create mode 100644 ldap/servers/slapd/back-ldbm/db-mdb/mdb_layer.h create mode 100644 ldap/servers/slapd/back-ldbm/db-mdb/mdb_ldif2db.c create mode 100644 ldap/servers/slapd/back-ldbm/db-mdb/mdb_misc.c create mode 100644 ldap/servers/slapd/back-ldbm/db-mdb/mdb_monitor.c create mode 100644 ldap/servers/slapd/back-ldbm/db-mdb/mdb_perfctrs.c create mode 100644 ldap/servers/slapd/back-ldbm/db-mdb/mdb_perfctrs.h create mode 100644 ldap/servers/slapd/back-ldbm/db-mdb/mdb_txn.c create mode 100644 ldap/servers/slapd/back-ldbm/db-mdb/mdb_upgrade.c create mode 100644 ldap/servers/slapd/back-ldbm/db-mdb/mdb_verify.c create mode 100644 ldap/servers/slapd/back-ldbm/dbimpl.c create mode 100644 ldap/servers/slapd/back-ldbm/dbimpl.h create mode 100644 ldap/servers/slapd/back-ldbm/dblayer.c create mode 100644 ldap/servers/slapd/back-ldbm/dblayer.h create mode 100644 ldap/servers/slapd/back-ldbm/dbsize.c create mode 100644 ldap/servers/slapd/back-ldbm/dbverify.c create mode 100644 ldap/servers/slapd/back-ldbm/dn2entry.c create mode 100644 ldap/servers/slapd/back-ldbm/entrystore.c create mode 100644 ldap/servers/slapd/back-ldbm/filterindex.c create mode 100644 ldap/servers/slapd/back-ldbm/findentry.c create mode 100644 ldap/servers/slapd/back-ldbm/haschildren.c create mode 100644 ldap/servers/slapd/back-ldbm/id2entry.c create mode 100644 ldap/servers/slapd/back-ldbm/idl.c create mode 100644 ldap/servers/slapd/back-ldbm/idl_common.c create mode 100644 ldap/servers/slapd/back-ldbm/idl_new.c create mode 100644 ldap/servers/slapd/back-ldbm/idl_set.c create mode 100644 ldap/servers/slapd/back-ldbm/idl_shim.c create mode 100644 ldap/servers/slapd/back-ldbm/import.c create mode 100644 ldap/servers/slapd/back-ldbm/import.h create mode 100644 ldap/servers/slapd/back-ldbm/index.c create mode 100644 ldap/servers/slapd/back-ldbm/init.c create mode 100644 ldap/servers/slapd/back-ldbm/instance.c create mode 100644 ldap/servers/slapd/back-ldbm/ldbm_abandon.c create mode 100644 ldap/servers/slapd/back-ldbm/ldbm_add.c create mode 100644 ldap/servers/slapd/back-ldbm/ldbm_attr.c create mode 100644 ldap/servers/slapd/back-ldbm/ldbm_attrcrypt.c create mode 100644 ldap/servers/slapd/back-ldbm/ldbm_attrcrypt_config.c create mode 100644 ldap/servers/slapd/back-ldbm/ldbm_bind.c create mode 100644 ldap/servers/slapd/back-ldbm/ldbm_compare.c create mode 100644 ldap/servers/slapd/back-ldbm/ldbm_config.c create mode 100644 ldap/servers/slapd/back-ldbm/ldbm_config.h create mode 100644 ldap/servers/slapd/back-ldbm/ldbm_delete.c create mode 100644 ldap/servers/slapd/back-ldbm/ldbm_entryrdn.c create mode 100644 ldap/servers/slapd/back-ldbm/ldbm_index_config.c create mode 100644 ldap/servers/slapd/back-ldbm/ldbm_instance_config.c create mode 100644 ldap/servers/slapd/back-ldbm/ldbm_modify.c create mode 100644 ldap/servers/slapd/back-ldbm/ldbm_modrdn.c create mode 100644 ldap/servers/slapd/back-ldbm/ldbm_search.c create mode 100644 ldap/servers/slapd/back-ldbm/ldbm_unbind.c create mode 100644 ldap/servers/slapd/back-ldbm/ldbm_usn.c create mode 100644 ldap/servers/slapd/back-ldbm/ldif2ldbm.c create mode 100644 ldap/servers/slapd/back-ldbm/matchrule.c create mode 100644 ldap/servers/slapd/back-ldbm/misc.c create mode 100644 ldap/servers/slapd/back-ldbm/nextid.c create mode 100644 ldap/servers/slapd/back-ldbm/parents.c create mode 100644 ldap/servers/slapd/back-ldbm/proto-back-ldbm.h create mode 100644 ldap/servers/slapd/back-ldbm/rmdb.c create mode 100644 ldap/servers/slapd/back-ldbm/seq.c create mode 100644 ldap/servers/slapd/back-ldbm/sort.c create mode 100644 ldap/servers/slapd/back-ldbm/start.c create mode 100644 ldap/servers/slapd/back-ldbm/uniqueid2entry.c create mode 100644 ldap/servers/slapd/back-ldbm/vlv.c create mode 100644 ldap/servers/slapd/back-ldbm/vlv_key.c create mode 100644 ldap/servers/slapd/back-ldbm/vlv_key.h create mode 100644 ldap/servers/slapd/back-ldbm/vlv_srch.c create mode 100644 ldap/servers/slapd/back-ldbm/vlv_srch.h create mode 100644 ldap/servers/slapd/backend.c create mode 100644 ldap/servers/slapd/backend_manager.c create mode 100644 ldap/servers/slapd/bind.c create mode 100644 ldap/servers/slapd/bitset.c create mode 100644 ldap/servers/slapd/bulk_import.c create mode 100644 ldap/servers/slapd/ch_malloc.c create mode 100644 ldap/servers/slapd/charray.c create mode 100644 ldap/servers/slapd/compare.c create mode 100644 ldap/servers/slapd/computed.c create mode 100644 ldap/servers/slapd/config.c create mode 100644 ldap/servers/slapd/configdse.c create mode 100644 ldap/servers/slapd/connection.c create mode 100644 ldap/servers/slapd/conntable.c create mode 100644 ldap/servers/slapd/control.c create mode 100644 ldap/servers/slapd/counters.c create mode 100644 ldap/servers/slapd/csn.c create mode 100644 ldap/servers/slapd/csngen.c create mode 100644 ldap/servers/slapd/csngen.h create mode 100644 ldap/servers/slapd/csnset.c create mode 100644 ldap/servers/slapd/daemon.c create mode 100644 ldap/servers/slapd/defbackend.c create mode 100644 ldap/servers/slapd/delete.c create mode 100644 ldap/servers/slapd/detach.c create mode 100644 ldap/servers/slapd/disconnect_error_strings.h create mode 100644 ldap/servers/slapd/disconnect_errors.h create mode 100644 ldap/servers/slapd/dl.c create mode 100644 ldap/servers/slapd/dn.c create mode 100644 ldap/servers/slapd/dse.c create mode 100644 ldap/servers/slapd/dynalib.c create mode 100644 ldap/servers/slapd/entry.c create mode 100644 ldap/servers/slapd/entrywsi.c create mode 100644 ldap/servers/slapd/errormap.c create mode 100644 ldap/servers/slapd/eventq-deprecated.c create mode 100644 ldap/servers/slapd/eventq.c create mode 100644 ldap/servers/slapd/extendop.c create mode 100644 ldap/servers/slapd/factory.c create mode 100644 ldap/servers/slapd/fe.h create mode 100644 ldap/servers/slapd/features.c create mode 100644 ldap/servers/slapd/fedse.c create mode 100644 ldap/servers/slapd/fileio.c create mode 100644 ldap/servers/slapd/filter.c create mode 100644 ldap/servers/slapd/filter.h create mode 100644 ldap/servers/slapd/filtercmp.c create mode 100644 ldap/servers/slapd/filterentry.c create mode 100644 ldap/servers/slapd/generation.c create mode 100644 ldap/servers/slapd/getfilelist.c create mode 100644 ldap/servers/slapd/getopt_ext.c create mode 100644 ldap/servers/slapd/getopt_ext.h create mode 100644 ldap/servers/slapd/getsocketpeer.c create mode 100644 ldap/servers/slapd/getsocketpeer.h create mode 100644 ldap/servers/slapd/globals.c create mode 100644 ldap/servers/slapd/haproxy.c create mode 100644 ldap/servers/slapd/haproxy.h create mode 100644 ldap/servers/slapd/house.c create mode 100644 ldap/servers/slapd/http.h create mode 100644 ldap/servers/slapd/index_subsystem.c create mode 100644 ldap/servers/slapd/init.c create mode 100644 ldap/servers/slapd/intrinsics.h create mode 100644 ldap/servers/slapd/ldapi.c create mode 100644 ldap/servers/slapd/ldaputil.c create mode 100644 ldap/servers/slapd/ldbmlinktest.c create mode 100644 ldap/servers/slapd/lenstr.c create mode 100644 ldap/servers/slapd/libglobs.c create mode 100644 ldap/servers/slapd/libmakefile create mode 100644 ldap/servers/slapd/listConfigAttrs.pl create mode 100644 ldap/servers/slapd/localhost.c create mode 100644 ldap/servers/slapd/lock.c create mode 100644 ldap/servers/slapd/log.c create mode 100644 ldap/servers/slapd/log.h create mode 100644 ldap/servers/slapd/main.c create mode 100644 ldap/servers/slapd/mapping_tree.c create mode 100644 ldap/servers/slapd/match.c create mode 100755 ldap/servers/slapd/mkDBErrStrs.py create mode 100644 ldap/servers/slapd/modify.c create mode 100644 ldap/servers/slapd/modrdn.c create mode 100644 ldap/servers/slapd/modutil.c create mode 100644 ldap/servers/slapd/monitor.c create mode 100644 ldap/servers/slapd/object.c create mode 100644 ldap/servers/slapd/objset.c create mode 100644 ldap/servers/slapd/openldapber.h create mode 100644 ldap/servers/slapd/operation.c create mode 100644 ldap/servers/slapd/opshared.c create mode 100644 ldap/servers/slapd/pagedresults.c create mode 100644 ldap/servers/slapd/passwd_extop.c create mode 100644 ldap/servers/slapd/pblock.c create mode 100644 ldap/servers/slapd/pblock_v3.h create mode 100644 ldap/servers/slapd/plugin.c create mode 100644 ldap/servers/slapd/plugin_acl.c create mode 100644 ldap/servers/slapd/plugin_internal_op.c create mode 100644 ldap/servers/slapd/plugin_mmr.c create mode 100644 ldap/servers/slapd/plugin_mr.c create mode 100644 ldap/servers/slapd/plugin_role.c create mode 100644 ldap/servers/slapd/plugin_syntax.c create mode 100644 ldap/servers/slapd/poll_using_select.c create mode 100644 ldap/servers/slapd/poll_using_select.h create mode 100644 ldap/servers/slapd/prerrstrs.h create mode 100644 ldap/servers/slapd/protect_db.c create mode 100644 ldap/servers/slapd/protect_db.h create mode 100644 ldap/servers/slapd/proto-slap.h create mode 100644 ldap/servers/slapd/proxyauth.c create mode 100644 ldap/servers/slapd/psearch.c create mode 100644 ldap/servers/slapd/pw.c create mode 100644 ldap/servers/slapd/pw.h create mode 100644 ldap/servers/slapd/pw_mgmt.c create mode 100644 ldap/servers/slapd/pw_retry.c create mode 100644 ldap/servers/slapd/pw_verify.c create mode 100644 ldap/servers/slapd/pw_verify.h create mode 100644 ldap/servers/slapd/rdn.c create mode 100644 ldap/servers/slapd/referral.c create mode 100644 ldap/servers/slapd/regex.c create mode 100644 ldap/servers/slapd/resourcelimit.c create mode 100644 ldap/servers/slapd/result.c create mode 100644 ldap/servers/slapd/rewriters.c create mode 100644 ldap/servers/slapd/rootdse.c create mode 100644 ldap/servers/slapd/sasl_io.c create mode 100644 ldap/servers/slapd/sasl_map.c create mode 100644 ldap/servers/slapd/saslbind.c create mode 100644 ldap/servers/slapd/schema.c create mode 100644 ldap/servers/slapd/schemaparse.c create mode 100644 ldap/servers/slapd/search.c create mode 100644 ldap/servers/slapd/secerrstrs.h create mode 100644 ldap/servers/slapd/security_wrappers.c create mode 100644 ldap/servers/slapd/slap.h create mode 100644 ldap/servers/slapd/slapd.lite.key create mode 100644 ldap/servers/slapd/slapd.normal.key create mode 100644 ldap/servers/slapd/slapd_plhash.c create mode 100644 ldap/servers/slapd/slapi-memberof.c create mode 100644 ldap/servers/slapd/slapi-plugin-compat4.h create mode 100644 ldap/servers/slapd/slapi-plugin.h create mode 100644 ldap/servers/slapd/slapi-private.h create mode 100644 ldap/servers/slapd/slapi2runtime.c create mode 100644 ldap/servers/slapd/slapi_counter.c create mode 100644 ldap/servers/slapd/slapi_pal.c create mode 100644 ldap/servers/slapd/slapi_pal.h create mode 100644 ldap/servers/slapd/snmp_collator.c create mode 100644 ldap/servers/slapd/snmp_collator.h create mode 100644 ldap/servers/slapd/snoop.c create mode 100644 ldap/servers/slapd/sort.c create mode 100644 ldap/servers/slapd/ssl.c create mode 100644 ldap/servers/slapd/sslerrstrs.h create mode 100644 ldap/servers/slapd/start_tls_extop.c create mode 100644 ldap/servers/slapd/statechange.h create mode 100644 ldap/servers/slapd/str2filter.c create mode 100644 ldap/servers/slapd/strdup.c create mode 100644 ldap/servers/slapd/stubrepl.c create mode 100644 ldap/servers/slapd/stubs.c create mode 100644 ldap/servers/slapd/subentries.c create mode 100644 ldap/servers/slapd/subentry.c create mode 100644 ldap/servers/slapd/task.c create mode 100644 ldap/servers/slapd/tempnam.c create mode 100644 ldap/servers/slapd/test-plugins/Makefile create mode 100644 ldap/servers/slapd/test-plugins/Makefile.AIX create mode 100644 ldap/servers/slapd/test-plugins/Makefile.BSDI create mode 100644 ldap/servers/slapd/test-plugins/Makefile.HPUX create mode 100644 ldap/servers/slapd/test-plugins/Makefile.HPUX64 create mode 100644 ldap/servers/slapd/test-plugins/Makefile.IRIX create mode 100644 ldap/servers/slapd/test-plugins/Makefile.Linux create mode 100644 ldap/servers/slapd/test-plugins/Makefile.OSF1 create mode 100644 ldap/servers/slapd/test-plugins/Makefile.ReliantUNIX create mode 100644 ldap/servers/slapd/test-plugins/Makefile.SOLARIS create mode 100644 ldap/servers/slapd/test-plugins/Makefile.SOLARIS64 create mode 100644 ldap/servers/slapd/test-plugins/Makefile.SOLARISx86 create mode 100644 ldap/servers/slapd/test-plugins/Makefile.UnixWare create mode 100644 ldap/servers/slapd/test-plugins/Makefile.UnixWareUDK create mode 100644 ldap/servers/slapd/test-plugins/Makefile.server create mode 100644 ldap/servers/slapd/test-plugins/README create mode 100644 ldap/servers/slapd/test-plugins/clients/README create mode 100644 ldap/servers/slapd/test-plugins/clients/ReqExtOp.java create mode 100644 ldap/servers/slapd/test-plugins/clients/reqextop.c create mode 100755 ldap/servers/slapd/test-plugins/installDse.pl create mode 100644 ldap/servers/slapd/test-plugins/nicknames create mode 100644 ldap/servers/slapd/test-plugins/sampletask.c create mode 100644 ldap/servers/slapd/test-plugins/test_slapi_memberof.c create mode 100644 ldap/servers/slapd/test-plugins/testbind.c create mode 100644 ldap/servers/slapd/test-plugins/testdatainterop.c create mode 100644 ldap/servers/slapd/test-plugins/testdbinterop.c create mode 100644 ldap/servers/slapd/test-plugins/testdbinterop.h create mode 100644 ldap/servers/slapd/test-plugins/testentry.c create mode 100644 ldap/servers/slapd/test-plugins/testextendedop.c create mode 100644 ldap/servers/slapd/test-plugins/testgetip.c create mode 100644 ldap/servers/slapd/test-plugins/testpostop.c create mode 100644 ldap/servers/slapd/test-plugins/testpreop.c create mode 100644 ldap/servers/slapd/test-plugins/testsaslbind.c create mode 100644 ldap/servers/slapd/thread_data.c create mode 100644 ldap/servers/slapd/time.c create mode 100644 ldap/servers/slapd/tools/dbscan.c create mode 100644 ldap/servers/slapd/tools/eggencode.c create mode 100644 ldap/servers/slapd/tools/ldaptool-sasl.c create mode 100644 ldap/servers/slapd/tools/ldaptool-sasl.h create mode 100644 ldap/servers/slapd/tools/ldaptool.h create mode 100644 ldap/servers/slapd/tools/ldclt/README create mode 100644 ldap/servers/slapd/tools/ldclt/data.c create mode 100644 ldap/servers/slapd/tools/ldclt/examples/001/add.ksh create mode 100644 ldap/servers/slapd/tools/ldclt/examples/001/add_incr.ksh create mode 100644 ldap/servers/slapd/tools/ldclt/examples/001/config.ksh create mode 100644 ldap/servers/slapd/tools/ldclt/examples/001/delete.ksh create mode 100644 ldap/servers/slapd/tools/ldclt/examples/001/env.ksh create mode 100644 ldap/servers/slapd/tools/ldclt/examples/001/search.ksh create mode 100644 ldap/servers/slapd/tools/ldclt/examples/002/add.ksh create mode 100644 ldap/servers/slapd/tools/ldclt/examples/002/config.ksh create mode 100644 ldap/servers/slapd/tools/ldclt/examples/002/env.ksh create mode 100644 ldap/servers/slapd/tools/ldclt/examples/002/ldif01.ksh create mode 100644 ldap/servers/slapd/tools/ldclt/examples/002/ldif02.ksh create mode 100644 ldap/servers/slapd/tools/ldclt/examples/002/ldif03.ksh create mode 100644 ldap/servers/slapd/tools/ldclt/examples/002/ofile create mode 100644 ldap/servers/slapd/tools/ldclt/examples/README create mode 100644 ldap/servers/slapd/tools/ldclt/ldap-private.h create mode 100644 ldap/servers/slapd/tools/ldclt/ldapfct.c create mode 100644 ldap/servers/slapd/tools/ldclt/ldclt.c create mode 100644 ldap/servers/slapd/tools/ldclt/ldclt.h create mode 100644 ldap/servers/slapd/tools/ldclt/ldclt.man create mode 100644 ldap/servers/slapd/tools/ldclt/ldclt.use create mode 100644 ldap/servers/slapd/tools/ldclt/ldcltU.c create mode 100644 ldap/servers/slapd/tools/ldclt/opCheck.c create mode 100644 ldap/servers/slapd/tools/ldclt/parser.c create mode 100644 ldap/servers/slapd/tools/ldclt/port.c create mode 100644 ldap/servers/slapd/tools/ldclt/port.h create mode 100644 ldap/servers/slapd/tools/ldclt/remote.h create mode 100644 ldap/servers/slapd/tools/ldclt/repcheck.c create mode 100644 ldap/servers/slapd/tools/ldclt/repworker.c create mode 100644 ldap/servers/slapd/tools/ldclt/scalab01.c create mode 100644 ldap/servers/slapd/tools/ldclt/scalab01.h create mode 100644 ldap/servers/slapd/tools/ldclt/srv.c create mode 100644 ldap/servers/slapd/tools/ldclt/threadMain.c create mode 100644 ldap/servers/slapd/tools/ldclt/utils.c create mode 100644 ldap/servers/slapd/tools/ldclt/utils.h create mode 100644 ldap/servers/slapd/tools/ldclt/version.c create mode 100644 ldap/servers/slapd/tools/ldclt/workarounds.c create mode 100644 ldap/servers/slapd/tools/mkdep.c create mode 100644 ldap/servers/slapd/tools/pwenc.c create mode 100644 ldap/servers/slapd/unbind.c create mode 100644 ldap/servers/slapd/uniqueid.c create mode 100644 ldap/servers/slapd/uniqueidgen.c create mode 100644 ldap/servers/slapd/upgrade.c create mode 100644 ldap/servers/slapd/utf8.c create mode 100644 ldap/servers/slapd/utf8compare.c create mode 100644 ldap/servers/slapd/util.c create mode 100644 ldap/servers/slapd/uuid.c create mode 100644 ldap/servers/slapd/uuid.h create mode 100644 ldap/servers/slapd/value.c create mode 100644 ldap/servers/slapd/valueset.c create mode 100644 ldap/servers/slapd/vattr.c create mode 100644 ldap/servers/slapd/vattr_spi.h create mode 100644 ldap/servers/slapd/views.h create mode 100644 ldap/servers/snmp/ldap-agent.c create mode 100644 ldap/servers/snmp/ldap-agent.conf.in create mode 100644 ldap/servers/snmp/ldap-agent.h create mode 100644 ldap/servers/snmp/main.c create mode 100644 ldap/servers/snmp/redhat-directory.mib create mode 100644 ldap/systools/README create mode 100755 ldap/systools/getHPPatches.pl create mode 100755 ldap/systools/getSolPatches.pl create mode 100644 ldap/systools/hp_patches.c create mode 100755 ldap/systools/mergeSolPatches.pl create mode 100644 ldap/systools/pio.h create mode 100644 ldap/systools/sol_patches.c create mode 100644 ldap/systools/viewcore.c create mode 100644 lib/base/.cvsignore create mode 100644 lib/base/crit.cpp create mode 100644 lib/base/dnsdmain.cpp create mode 100644 lib/base/ereport.cpp create mode 100644 lib/base/file.cpp create mode 100644 lib/base/fsmutex.cpp create mode 100644 lib/base/lexer_pvt.h create mode 100644 lib/base/nscperror.c create mode 100644 lib/base/plist.cpp create mode 100644 lib/base/plist_pvt.h create mode 100644 lib/base/pool.cpp create mode 100644 lib/base/shexp.cpp create mode 100644 lib/base/system.cpp create mode 100644 lib/base/systhr.cpp create mode 100644 lib/base/util.cpp create mode 100644 lib/ldaputil/.cvsignore create mode 100644 lib/ldaputil/cert.c create mode 100644 lib/ldaputil/certmap.c create mode 100644 lib/ldaputil/certmap.conf create mode 100644 lib/ldaputil/dbconf.c create mode 100644 lib/ldaputil/encode.c create mode 100644 lib/ldaputil/errors.c create mode 100644 lib/ldaputil/examples/Makefile create mode 100644 lib/ldaputil/examples/README create mode 100644 lib/ldaputil/examples/init.c create mode 100644 lib/ldaputil/examples/plugin.c create mode 100644 lib/ldaputil/examples/plugin.h create mode 100644 lib/ldaputil/init.c create mode 100644 lib/ldaputil/ldapauth.c create mode 100644 lib/ldaputil/ldapu-changes.html create mode 100644 lib/ldaputil/ldaputili.h create mode 100644 lib/ldaputil/vtable.c create mode 100644 lib/libaccess/.cvsignore create mode 100644 lib/libaccess/access_plhash.cpp create mode 100644 lib/libaccess/access_plhash.h create mode 100644 lib/libaccess/acl.tab.cpp create mode 100644 lib/libaccess/acl.tab.h create mode 100644 lib/libaccess/acl.yy.cpp create mode 100644 lib/libaccess/aclcache.cpp create mode 100644 lib/libaccess/aclcache.h create mode 100644 lib/libaccess/aclerror.cpp create mode 100644 lib/libaccess/acleval.cpp create mode 100644 lib/libaccess/aclflush.cpp create mode 100644 lib/libaccess/aclpriv.h create mode 100644 lib/libaccess/aclscan.h create mode 100644 lib/libaccess/aclscan.l create mode 100644 lib/libaccess/aclspace.cpp create mode 100644 lib/libaccess/acltext.y create mode 100644 lib/libaccess/acltools.cpp create mode 100644 lib/libaccess/aclutil.cpp create mode 100644 lib/libaccess/aclutil.h create mode 100644 lib/libaccess/authdb.cpp create mode 100644 lib/libaccess/las.h create mode 100644 lib/libaccess/lasdns.cpp create mode 100644 lib/libaccess/lasdns.h create mode 100644 lib/libaccess/lasgroup.cpp create mode 100644 lib/libaccess/lasip.cpp create mode 100644 lib/libaccess/lasip.h create mode 100644 lib/libaccess/lastod.cpp create mode 100644 lib/libaccess/lasuser.cpp create mode 100644 lib/libaccess/ldapauth.h create mode 100644 lib/libaccess/method.cpp create mode 100644 lib/libaccess/nsautherr.cpp create mode 100644 lib/libaccess/nseframe.cpp create mode 100644 lib/libaccess/oneeval.cpp create mode 100644 lib/libaccess/oneeval.h create mode 100644 lib/libaccess/parse.h create mode 100644 lib/libaccess/permhash.h create mode 100644 lib/libaccess/register.cpp create mode 100644 lib/libaccess/symbols.cpp create mode 100644 lib/libaccess/usi.cpp create mode 100644 lib/libaccess/usrcache.cpp create mode 100644 lib/libaccess/yy-sed create mode 100644 lib/libadmin/.cvsignore create mode 100644 lib/libadmin/error.c create mode 100644 lib/libadmin/template.c create mode 100644 lib/libadmin/util.c create mode 100644 lib/libsi18n/getstrmem.h create mode 100644 lib/libsi18n/getstrprop.c create mode 100644 lib/libsi18n/gsslapd.h create mode 100644 lib/libsi18n/makstrdb.c create mode 100644 lib/libsi18n/reshash.c create mode 100644 lib/libsi18n/reshash.h create mode 100644 lib/libsi18n/txtfile.c create mode 100644 lib/libsi18n/txtfile.h create mode 100644 m4/ax_compare_version.m4 create mode 100644 m4/db.m4 create mode 100644 m4/doxygen.m4 create mode 100644 m4/fhs.m4 create mode 100644 m4/netsnmp.m4 create mode 100644 m4/openldap.m4 create mode 100644 m4/selinux.m4 create mode 100644 m4/systemd.m4 create mode 100644 man/man1/dbscan.1 create mode 100644 man/man1/ds-logpipe.py.1 create mode 100644 man/man1/ds-replcheck.1 create mode 100644 man/man1/dsktune.1 create mode 100644 man/man1/infadd.1 create mode 100644 man/man1/ldap-agent.1 create mode 100644 man/man1/ldclt.1 create mode 100644 man/man1/ldif.1 create mode 100644 man/man1/logconv.pl.1 create mode 100644 man/man1/mmldif.1 create mode 100644 man/man1/pwdhash.1 create mode 100644 man/man1/rsearch.1 create mode 100644 man/man5/99user.ldif.5 create mode 100644 man/man5/certmap.conf.5 create mode 100644 man/man5/dirsrv.5 create mode 100644 man/man5/dirsrv.systemd.5 create mode 100644 man/man5/slapd-collations.conf.5 create mode 100644 man/man5/template-initconfig.5 create mode 100644 man/man8/ns-slapd.8 create mode 100644 profiling/stap/probe_do_search_detail.stp create mode 100644 profiling/stap/probe_log_access_detail.stp create mode 100644 profiling/stap/probe_op_shared_search.stp create mode 100644 rfcs/Makefile create mode 100644 rfcs/examples/template-bare-06.txt create mode 100644 rfcs/src/draft-wibrown-ldapssotoken-00.xml create mode 100644 rpm.mk create mode 100644 rpm/389-ds-base-devel.README create mode 100644 rpm/389-ds-base-git.sh create mode 100644 rpm/389-ds-base.spec.in create mode 100755 rpm/add_patches.sh create mode 100644 rpm/bundle-rust-downstream.py create mode 100755 rpm/rpmverrel.sh create mode 100644 src/Cargo.toml create mode 100644 src/README.md create mode 100644 src/cockpit/389-console/.eslintignore create mode 100644 src/cockpit/389-console/.eslintrc.json create mode 100644 src/cockpit/389-console/.stylelintrc.json create mode 100644 src/cockpit/389-console/README.md create mode 100644 src/cockpit/389-console/audit-ci.json create mode 100755 src/cockpit/389-console/build.js create mode 100755 src/cockpit/389-console/buildAndRun.sh create mode 100644 src/cockpit/389-console/cockpit_dist/index.css.LEGAL.txt create mode 100644 src/cockpit/389-console/cockpit_dist/index.css.gz create mode 100644 src/cockpit/389-console/cockpit_dist/index.html create mode 100644 src/cockpit/389-console/cockpit_dist/index.js.LEGAL.txt create mode 100644 src/cockpit/389-console/cockpit_dist/index.js.gz create mode 100644 src/cockpit/389-console/cockpit_dist/manifest.json create mode 100644 src/cockpit/389-console/cockpit_dist/po.de.js.gz create mode 100644 src/cockpit/389-console/cockpit_dist/po.ja.js.gz create mode 100644 src/cockpit/389-console/mk_po.sh create mode 100644 src/cockpit/389-console/org.port389.cockpit_console.metainfo.xml create mode 100644 src/cockpit/389-console/package-lock.json create mode 100644 src/cockpit/389-console/package.json create mode 100644 src/cockpit/389-console/pkg/lib/README create mode 100644 src/cockpit/389-console/pkg/lib/_global-variables.scss create mode 100644 src/cockpit/389-console/pkg/lib/cockpit-components-context-menu.jsx create mode 100644 src/cockpit/389-console/pkg/lib/cockpit-components-dialog.jsx create mode 100644 src/cockpit/389-console/pkg/lib/cockpit-components-dialog.scss create mode 100644 src/cockpit/389-console/pkg/lib/cockpit-components-empty-state.css create mode 100644 src/cockpit/389-console/pkg/lib/cockpit-components-empty-state.jsx create mode 100644 src/cockpit/389-console/pkg/lib/cockpit-components-file-autocomplete.jsx create mode 100644 src/cockpit/389-console/pkg/lib/cockpit-components-firewalld-request.jsx create mode 100644 src/cockpit/389-console/pkg/lib/cockpit-components-firewalld-request.scss create mode 100644 src/cockpit/389-console/pkg/lib/cockpit-components-form-helper.jsx create mode 100644 src/cockpit/389-console/pkg/lib/cockpit-components-inline-notification.css create mode 100644 src/cockpit/389-console/pkg/lib/cockpit-components-inline-notification.jsx create mode 100644 src/cockpit/389-console/pkg/lib/cockpit-components-install-dialog.css create mode 100644 src/cockpit/389-console/pkg/lib/cockpit-components-install-dialog.jsx create mode 100644 src/cockpit/389-console/pkg/lib/cockpit-components-listing-panel.jsx create mode 100644 src/cockpit/389-console/pkg/lib/cockpit-components-listing-panel.scss create mode 100644 src/cockpit/389-console/pkg/lib/cockpit-components-logs-panel.jsx create mode 100644 src/cockpit/389-console/pkg/lib/cockpit-components-logs-panel.scss create mode 100644 src/cockpit/389-console/pkg/lib/cockpit-components-modifications.css create mode 100644 src/cockpit/389-console/pkg/lib/cockpit-components-modifications.jsx create mode 100644 src/cockpit/389-console/pkg/lib/cockpit-components-password.jsx create mode 100644 src/cockpit/389-console/pkg/lib/cockpit-components-password.scss create mode 100644 src/cockpit/389-console/pkg/lib/cockpit-components-plot.jsx create mode 100644 src/cockpit/389-console/pkg/lib/cockpit-components-plot.scss create mode 100644 src/cockpit/389-console/pkg/lib/cockpit-components-privileged.jsx create mode 100644 src/cockpit/389-console/pkg/lib/cockpit-components-shutdown.jsx create mode 100644 src/cockpit/389-console/pkg/lib/cockpit-components-shutdown.scss create mode 100644 src/cockpit/389-console/pkg/lib/cockpit-components-table.jsx create mode 100644 src/cockpit/389-console/pkg/lib/cockpit-components-table.scss create mode 100644 src/cockpit/389-console/pkg/lib/cockpit-components-terminal.jsx create mode 100644 src/cockpit/389-console/pkg/lib/cockpit-dark-theme.js create mode 100644 src/cockpit/389-console/pkg/lib/cockpit-po-plugin.js create mode 100644 src/cockpit/389-console/pkg/lib/cockpit-rsync-plugin.js create mode 100644 src/cockpit/389-console/pkg/lib/cockpit.js create mode 100644 src/cockpit/389-console/pkg/lib/console.css create mode 100644 src/cockpit/389-console/pkg/lib/context-menu.scss create mode 100644 src/cockpit/389-console/pkg/lib/credentials-ssh-private-keys.sh create mode 100644 src/cockpit/389-console/pkg/lib/credentials-ssh-remove-key.sh create mode 100644 src/cockpit/389-console/pkg/lib/credentials.js create mode 100644 src/cockpit/389-console/pkg/lib/ct-card.scss create mode 100644 src/cockpit/389-console/pkg/lib/dialogs.jsx create mode 100644 src/cockpit/389-console/pkg/lib/esbuild-cleanup-plugin.js create mode 100644 src/cockpit/389-console/pkg/lib/esbuild-common.js create mode 100644 src/cockpit/389-console/pkg/lib/esbuild-compress-plugin.js create mode 100644 src/cockpit/389-console/pkg/lib/esbuild-eslint-plugin.js create mode 100644 src/cockpit/389-console/pkg/lib/esbuild-stylelint-plugin.js create mode 100644 src/cockpit/389-console/pkg/lib/esbuild-test-html-plugin.js create mode 100644 src/cockpit/389-console/pkg/lib/get-timesync-backend.py create mode 100644 src/cockpit/389-console/pkg/lib/hooks.js create mode 100755 src/cockpit/389-console/pkg/lib/html2po.js create mode 100644 src/cockpit/389-console/pkg/lib/inotify.py create mode 100644 src/cockpit/389-console/pkg/lib/journal.css create mode 100644 src/cockpit/389-console/pkg/lib/journal.js create mode 100644 src/cockpit/389-console/pkg/lib/long-running-process.js create mode 100644 src/cockpit/389-console/pkg/lib/machine-info.js create mode 100755 src/cockpit/389-console/pkg/lib/manifest2po.js create mode 100644 src/cockpit/389-console/pkg/lib/menu-select-widget.scss create mode 100644 src/cockpit/389-console/pkg/lib/notifications.js create mode 100644 src/cockpit/389-console/pkg/lib/os-release.js create mode 100644 src/cockpit/389-console/pkg/lib/packagekit.js create mode 100644 src/cockpit/389-console/pkg/lib/page.scss create mode 100644 src/cockpit/389-console/pkg/lib/patternfly/_fonts.scss create mode 100644 src/cockpit/389-console/pkg/lib/patternfly/patternfly-5-cockpit.scss create mode 100644 src/cockpit/389-console/pkg/lib/patternfly/patternfly-5-overrides.scss create mode 100644 src/cockpit/389-console/pkg/lib/plot.js create mode 100644 src/cockpit/389-console/pkg/lib/polyfills.js create mode 100644 src/cockpit/389-console/pkg/lib/python.js create mode 100644 src/cockpit/389-console/pkg/lib/qunit-template.html.in create mode 100644 src/cockpit/389-console/pkg/lib/qunit-tests.js create mode 100644 src/cockpit/389-console/pkg/lib/serverTime.js create mode 100644 src/cockpit/389-console/pkg/lib/serverTime.scss create mode 100644 src/cockpit/389-console/pkg/lib/service.js create mode 100644 src/cockpit/389-console/pkg/lib/superuser.js create mode 100644 src/cockpit/389-console/pkg/lib/table.css create mode 100644 src/cockpit/389-console/pkg/lib/timeformat.js create mode 100644 src/cockpit/389-console/pkg/lib/utils.jsx create mode 100644 src/cockpit/389-console/po/de.po create mode 100644 src/cockpit/389-console/po/ja.po create mode 100644 src/cockpit/389-console/src/LDAPEditor.jsx create mode 100644 src/cockpit/389-console/src/css/_fonts.scss create mode 100644 src/cockpit/389-console/src/css/branding.css create mode 100644 src/cockpit/389-console/src/css/ds.css create mode 100644 src/cockpit/389-console/src/css/patternfly-4-cockpit.scss create mode 100644 src/cockpit/389-console/src/database.jsx create mode 100644 src/cockpit/389-console/src/ds.jsx create mode 100644 src/cockpit/389-console/src/dsModals.jsx create mode 100644 src/cockpit/389-console/src/index.html create mode 100644 src/cockpit/389-console/src/index.js create mode 100644 src/cockpit/389-console/src/lib/database/attrEncryption.jsx create mode 100644 src/cockpit/389-console/src/lib/database/backups.jsx create mode 100644 src/cockpit/389-console/src/lib/database/chaining.jsx create mode 100644 src/cockpit/389-console/src/lib/database/databaseConfig.jsx create mode 100644 src/cockpit/389-console/src/lib/database/databaseModal.jsx create mode 100644 src/cockpit/389-console/src/lib/database/databaseTables.jsx create mode 100644 src/cockpit/389-console/src/lib/database/globalPwp.jsx create mode 100644 src/cockpit/389-console/src/lib/database/indexes.jsx create mode 100644 src/cockpit/389-console/src/lib/database/localPwp.jsx create mode 100644 src/cockpit/389-console/src/lib/database/referrals.jsx create mode 100644 src/cockpit/389-console/src/lib/database/suffix.jsx create mode 100644 src/cockpit/389-console/src/lib/database/suffixConfig.jsx create mode 100644 src/cockpit/389-console/src/lib/database/vlvIndexes.jsx create mode 100644 src/cockpit/389-console/src/lib/ldap_editor/lib/aciParser.jsx create mode 100644 src/cockpit/389-console/src/lib/ldap_editor/lib/compactPagination.jsx create mode 100644 src/cockpit/389-console/src/lib/ldap_editor/lib/constants.jsx create mode 100644 src/cockpit/389-console/src/lib/ldap_editor/lib/editableTable.jsx create mode 100644 src/cockpit/389-console/src/lib/ldap_editor/lib/genericPagination.jsx create mode 100644 src/cockpit/389-console/src/lib/ldap_editor/lib/ldapNavigator.jsx create mode 100644 src/cockpit/389-console/src/lib/ldap_editor/lib/options.jsx create mode 100644 src/cockpit/389-console/src/lib/ldap_editor/lib/rootSuffix.jsx create mode 100644 src/cockpit/389-console/src/lib/ldap_editor/lib/utils.jsx create mode 100644 src/cockpit/389-console/src/lib/ldap_editor/search.jsx create mode 100644 src/cockpit/389-console/src/lib/ldap_editor/tableView.jsx create mode 100644 src/cockpit/389-console/src/lib/ldap_editor/treeView.jsx create mode 100644 src/cockpit/389-console/src/lib/ldap_editor/wizards/aci.jsx create mode 100644 src/cockpit/389-console/src/lib/ldap_editor/wizards/cos.jsx create mode 100644 src/cockpit/389-console/src/lib/ldap_editor/wizards/deleteOperation.jsx create mode 100644 src/cockpit/389-console/src/lib/ldap_editor/wizards/genericWizard.jsx create mode 100644 src/cockpit/389-console/src/lib/ldap_editor/wizards/newEntry.jsx create mode 100644 src/cockpit/389-console/src/lib/ldap_editor/wizards/operations/aciBindRuleTable.jsx create mode 100644 src/cockpit/389-console/src/lib/ldap_editor/wizards/operations/aciNew.jsx create mode 100644 src/cockpit/389-console/src/lib/ldap_editor/wizards/operations/addCosDefinition.jsx create mode 100644 src/cockpit/389-console/src/lib/ldap_editor/wizards/operations/addCosTemplate.jsx create mode 100644 src/cockpit/389-console/src/lib/ldap_editor/wizards/operations/addGroup.jsx create mode 100644 src/cockpit/389-console/src/lib/ldap_editor/wizards/operations/addLdapEntry.jsx create mode 100644 src/cockpit/389-console/src/lib/ldap_editor/wizards/operations/addRole.jsx create mode 100644 src/cockpit/389-console/src/lib/ldap_editor/wizards/operations/addUser.jsx create mode 100644 src/cockpit/389-console/src/lib/ldap_editor/wizards/operations/editGroup.jsx create mode 100644 src/cockpit/389-console/src/lib/ldap_editor/wizards/operations/editLdapEntry.jsx create mode 100644 src/cockpit/389-console/src/lib/ldap_editor/wizards/operations/genericUpdate.jsx create mode 100644 src/cockpit/389-console/src/lib/ldap_editor/wizards/operations/groupTable.jsx create mode 100644 src/cockpit/389-console/src/lib/ldap_editor/wizards/operations/renameEntry.jsx create mode 100644 src/cockpit/389-console/src/lib/monitor/accesslog.jsx create mode 100644 src/cockpit/389-console/src/lib/monitor/auditfaillog.jsx create mode 100644 src/cockpit/389-console/src/lib/monitor/auditlog.jsx create mode 100644 src/cockpit/389-console/src/lib/monitor/chainingMonitor.jsx create mode 100644 src/cockpit/389-console/src/lib/monitor/dbMonitor.jsx create mode 100644 src/cockpit/389-console/src/lib/monitor/errorlog.jsx create mode 100644 src/cockpit/389-console/src/lib/monitor/monitorModals.jsx create mode 100644 src/cockpit/389-console/src/lib/monitor/monitorTables.jsx create mode 100644 src/cockpit/389-console/src/lib/monitor/replMonAgmts.jsx create mode 100644 src/cockpit/389-console/src/lib/monitor/replMonConflict.jsx create mode 100644 src/cockpit/389-console/src/lib/monitor/replMonTasks.jsx create mode 100644 src/cockpit/389-console/src/lib/monitor/replMonWinsync.jsx create mode 100644 src/cockpit/389-console/src/lib/monitor/replMonitor.jsx create mode 100644 src/cockpit/389-console/src/lib/monitor/securitylog.jsx create mode 100644 src/cockpit/389-console/src/lib/monitor/serverMonitor.jsx create mode 100644 src/cockpit/389-console/src/lib/monitor/suffixMonitor.jsx create mode 100644 src/cockpit/389-console/src/lib/notifications.jsx create mode 100644 src/cockpit/389-console/src/lib/plugins/accountPolicy.jsx create mode 100644 src/cockpit/389-console/src/lib/plugins/attributeUniqueness.jsx create mode 100644 src/cockpit/389-console/src/lib/plugins/autoMembership.jsx create mode 100644 src/cockpit/389-console/src/lib/plugins/dna.jsx create mode 100644 src/cockpit/389-console/src/lib/plugins/linkedAttributes.jsx create mode 100644 src/cockpit/389-console/src/lib/plugins/managedEntries.jsx create mode 100644 src/cockpit/389-console/src/lib/plugins/memberOf.jsx create mode 100644 src/cockpit/389-console/src/lib/plugins/pamPassThru.jsx create mode 100644 src/cockpit/389-console/src/lib/plugins/passthroughAuthentication.jsx create mode 100644 src/cockpit/389-console/src/lib/plugins/pluginBasicConfig.jsx create mode 100644 src/cockpit/389-console/src/lib/plugins/pluginTables.jsx create mode 100644 src/cockpit/389-console/src/lib/plugins/referentialIntegrity.jsx create mode 100644 src/cockpit/389-console/src/lib/plugins/retroChangelog.jsx create mode 100644 src/cockpit/389-console/src/lib/plugins/rootDNAccessControl.jsx create mode 100644 src/cockpit/389-console/src/lib/plugins/usn.jsx create mode 100644 src/cockpit/389-console/src/lib/plugins/winsync.jsx create mode 100644 src/cockpit/389-console/src/lib/replication/replAgmts.jsx create mode 100644 src/cockpit/389-console/src/lib/replication/replChangelog.jsx create mode 100644 src/cockpit/389-console/src/lib/replication/replConfig.jsx create mode 100644 src/cockpit/389-console/src/lib/replication/replModals.jsx create mode 100644 src/cockpit/389-console/src/lib/replication/replSuffix.jsx create mode 100644 src/cockpit/389-console/src/lib/replication/replTables.jsx create mode 100644 src/cockpit/389-console/src/lib/replication/replTasks.jsx create mode 100644 src/cockpit/389-console/src/lib/replication/winsyncAgmts.jsx create mode 100644 src/cockpit/389-console/src/lib/schema/schemaModals.jsx create mode 100644 src/cockpit/389-console/src/lib/schema/schemaTables.jsx create mode 100644 src/cockpit/389-console/src/lib/security/certificateManagement.jsx create mode 100644 src/cockpit/389-console/src/lib/security/ciphers.jsx create mode 100644 src/cockpit/389-console/src/lib/security/securityModals.jsx create mode 100644 src/cockpit/389-console/src/lib/security/securityTables.jsx create mode 100644 src/cockpit/389-console/src/lib/server/accessLog.jsx create mode 100644 src/cockpit/389-console/src/lib/server/auditLog.jsx create mode 100644 src/cockpit/389-console/src/lib/server/auditfailLog.jsx create mode 100644 src/cockpit/389-console/src/lib/server/errorLog.jsx create mode 100644 src/cockpit/389-console/src/lib/server/ldapi.jsx create mode 100644 src/cockpit/389-console/src/lib/server/sasl.jsx create mode 100644 src/cockpit/389-console/src/lib/server/securityLog.jsx create mode 100644 src/cockpit/389-console/src/lib/server/serverModals.jsx create mode 100644 src/cockpit/389-console/src/lib/server/serverTables.jsx create mode 100644 src/cockpit/389-console/src/lib/server/settings.jsx create mode 100644 src/cockpit/389-console/src/lib/server/tuning.jsx create mode 100644 src/cockpit/389-console/src/lib/tools.jsx create mode 100644 src/cockpit/389-console/src/manifest.json create mode 100644 src/cockpit/389-console/src/monitor.jsx create mode 100644 src/cockpit/389-console/src/plugins.jsx create mode 100644 src/cockpit/389-console/src/replication.jsx create mode 100644 src/cockpit/389-console/src/schema.jsx create mode 100644 src/cockpit/389-console/src/security.jsx create mode 100644 src/cockpit/389-console/src/server.jsx create mode 100644 src/cockpit/389-console/stats.json create mode 100644 src/contrib/README.md create mode 100644 src/contrib/back-ldif/add.c create mode 100644 src/contrib/back-ldif/back-ldif.h create mode 100644 src/contrib/back-ldif/bind.c create mode 100644 src/contrib/back-ldif/close.c create mode 100644 src/contrib/back-ldif/compare.c create mode 100644 src/contrib/back-ldif/config.c create mode 100644 src/contrib/back-ldif/delete.c create mode 100644 src/contrib/back-ldif/init.c create mode 100644 src/contrib/back-ldif/modify.c create mode 100644 src/contrib/back-ldif/modrdn.c create mode 100644 src/contrib/back-ldif/monitor.c create mode 100644 src/contrib/back-ldif/search.c create mode 100644 src/contrib/back-ldif/start.c create mode 100644 src/contrib/back-ldif/unbind.c create mode 100644 src/lib389/.coveragerc create mode 100644 src/lib389/.gitignore create mode 100644 src/lib389/LICENSE create mode 100644 src/lib389/MANIFEST.in create mode 100644 src/lib389/README.md create mode 100755 src/lib389/cli/dsconf create mode 100755 src/lib389/cli/dscontainer create mode 100755 src/lib389/cli/dscreate create mode 100755 src/lib389/cli/dsctl create mode 100755 src/lib389/cli/dsidm create mode 100755 src/lib389/cli/dsrate create mode 100755 src/lib389/cli/openldap_to_ds create mode 100644 src/lib389/doc/Makefile create mode 100644 src/lib389/doc/source/accesscontrol.rst create mode 100644 src/lib389/doc/source/aci.rst create mode 100644 src/lib389/doc/source/agreement.rst create mode 100644 src/lib389/doc/source/backend.rst create mode 100644 src/lib389/doc/source/changelog.rst create mode 100644 src/lib389/doc/source/conf.py create mode 100644 src/lib389/doc/source/config.rst create mode 100644 src/lib389/doc/source/databases.rst create mode 100644 src/lib389/doc/source/dirsrv_log.rst create mode 100644 src/lib389/doc/source/domain.rst create mode 100644 src/lib389/doc/source/dseldif.rst create mode 100644 src/lib389/doc/source/group.rst create mode 100644 src/lib389/doc/source/guidelines.rst create mode 100644 src/lib389/doc/source/identitymanagement.rst create mode 100644 src/lib389/doc/source/index.rst create mode 100644 src/lib389/doc/source/indexes.rst create mode 100644 src/lib389/doc/source/ldclt.rst create mode 100644 src/lib389/doc/source/mappingtree.rst create mode 100644 src/lib389/doc/source/monitor.rst create mode 100644 src/lib389/doc/source/need_to_be_triaged.rst create mode 100644 src/lib389/doc/source/organizationalunit.rst create mode 100644 src/lib389/doc/source/passwd.rst create mode 100644 src/lib389/doc/source/paths.rst create mode 100644 src/lib389/doc/source/plugin.rst create mode 100644 src/lib389/doc/source/replica.rst create mode 100644 src/lib389/doc/source/replication.rst create mode 100644 src/lib389/doc/source/repltools.rst create mode 100644 src/lib389/doc/source/rootdse.rst create mode 100644 src/lib389/doc/source/schema.rst create mode 100644 src/lib389/doc/source/services.rst create mode 100644 src/lib389/doc/source/task.rst create mode 100644 src/lib389/doc/source/user.rst create mode 100644 src/lib389/doc/source/utils.rst create mode 100644 src/lib389/dsadmin.pylintrc create mode 100644 src/lib389/lib389/__init__.py create mode 100644 src/lib389/lib389/_constants.py create mode 100644 src/lib389/lib389/_controls.py create mode 100644 src/lib389/lib389/_entry.py create mode 100644 src/lib389/lib389/_ldifconn.py create mode 100644 src/lib389/lib389/_mapped_object.py create mode 100644 src/lib389/lib389/_mapped_object_lint.py create mode 100644 src/lib389/lib389/_replication.py create mode 100644 src/lib389/lib389/aci.py create mode 100644 src/lib389/lib389/agreement.py create mode 100644 src/lib389/lib389/backend.py create mode 100644 src/lib389/lib389/chaining.py create mode 100644 src/lib389/lib389/cli_base/__init__.py create mode 100644 src/lib389/lib389/cli_base/dsrc.py create mode 100644 src/lib389/lib389/cli_conf/__init__.py create mode 100644 src/lib389/lib389/cli_conf/backend.py create mode 100644 src/lib389/lib389/cli_conf/backup.py create mode 100644 src/lib389/lib389/cli_conf/chaining.py create mode 100644 src/lib389/lib389/cli_conf/config.py create mode 100644 src/lib389/lib389/cli_conf/conflicts.py create mode 100644 src/lib389/lib389/cli_conf/directory_manager.py create mode 100644 src/lib389/lib389/cli_conf/monitor.py create mode 100644 src/lib389/lib389/cli_conf/plugin.py create mode 100644 src/lib389/lib389/cli_conf/plugins/__init__.py create mode 100644 src/lib389/lib389/cli_conf/plugins/accountpolicy.py create mode 100644 src/lib389/lib389/cli_conf/plugins/attruniq.py create mode 100644 src/lib389/lib389/cli_conf/plugins/automember.py create mode 100644 src/lib389/lib389/cli_conf/plugins/contentsync.py create mode 100644 src/lib389/lib389/cli_conf/plugins/dna.py create mode 100644 src/lib389/lib389/cli_conf/plugins/entryuuid.py create mode 100644 src/lib389/lib389/cli_conf/plugins/ldappassthrough.py create mode 100644 src/lib389/lib389/cli_conf/plugins/linkedattr.py create mode 100644 src/lib389/lib389/cli_conf/plugins/managedentries.py create mode 100644 src/lib389/lib389/cli_conf/plugins/memberof.py create mode 100644 src/lib389/lib389/cli_conf/plugins/pampassthrough.py create mode 100644 src/lib389/lib389/cli_conf/plugins/posix_winsync.py create mode 100644 src/lib389/lib389/cli_conf/plugins/referint.py create mode 100644 src/lib389/lib389/cli_conf/plugins/retrochangelog.py create mode 100644 src/lib389/lib389/cli_conf/plugins/rootdn_ac.py create mode 100644 src/lib389/lib389/cli_conf/plugins/usn.py create mode 100644 src/lib389/lib389/cli_conf/pwpolicy.py create mode 100644 src/lib389/lib389/cli_conf/replication.py create mode 100644 src/lib389/lib389/cli_conf/saslmappings.py create mode 100644 src/lib389/lib389/cli_conf/schema.py create mode 100644 src/lib389/lib389/cli_conf/security.py create mode 100644 src/lib389/lib389/cli_ctl/__init__.py create mode 100644 src/lib389/lib389/cli_ctl/cockpit.py create mode 100644 src/lib389/lib389/cli_ctl/dbgen-FamilyNames create mode 100644 src/lib389/lib389/cli_ctl/dbgen-GivenNames create mode 100644 src/lib389/lib389/cli_ctl/dbgen-OrgUnits create mode 100644 src/lib389/lib389/cli_ctl/dbgen.py create mode 100644 src/lib389/lib389/cli_ctl/dblib.py create mode 100644 src/lib389/lib389/cli_ctl/dbtasks.py create mode 100644 src/lib389/lib389/cli_ctl/dsrc.py create mode 100644 src/lib389/lib389/cli_ctl/health.py create mode 100644 src/lib389/lib389/cli_ctl/instance.py create mode 100644 src/lib389/lib389/cli_ctl/nsstate.py create mode 100644 src/lib389/lib389/cli_ctl/tls.py create mode 100644 src/lib389/lib389/cli_idm/__init__.py create mode 100644 src/lib389/lib389/cli_idm/account.py create mode 100644 src/lib389/lib389/cli_idm/client_config.py create mode 100644 src/lib389/lib389/cli_idm/group.py create mode 100644 src/lib389/lib389/cli_idm/initialise.py create mode 100644 src/lib389/lib389/cli_idm/organizationalunit.py create mode 100644 src/lib389/lib389/cli_idm/posixgroup.py create mode 100644 src/lib389/lib389/cli_idm/role.py create mode 100644 src/lib389/lib389/cli_idm/service.py create mode 100644 src/lib389/lib389/cli_idm/uniquegroup.py create mode 100644 src/lib389/lib389/cli_idm/user.py create mode 100755 src/lib389/lib389/clitools/__init__.py create mode 100755 src/lib389/lib389/clitools/ds_aci_lint create mode 100755 src/lib389/lib389/clitools/ds_backend_getattr create mode 100755 src/lib389/lib389/clitools/ds_backend_list create mode 100755 src/lib389/lib389/clitools/ds_backend_setattr create mode 100755 src/lib389/lib389/clitools/ds_krb_create_keytab create mode 100755 src/lib389/lib389/clitools/ds_krb_create_principal create mode 100755 src/lib389/lib389/clitools/ds_krb_create_realm create mode 100755 src/lib389/lib389/clitools/ds_krb_destroy_realm create mode 100755 src/lib389/lib389/clitools/ds_monitor_backend create mode 100755 src/lib389/lib389/clitools/ds_monitor_server create mode 100755 src/lib389/lib389/clitools/ds_schema_attributetype_list create mode 100755 src/lib389/lib389/clitools/ds_schema_attributetype_query create mode 100755 src/lib389/lib389/clitools/ds_setup create mode 100644 src/lib389/lib389/config.py create mode 100644 src/lib389/lib389/configurations/__init__.py create mode 100644 src/lib389/lib389/configurations/config.py create mode 100644 src/lib389/lib389/configurations/config_001003006.py create mode 100644 src/lib389/lib389/configurations/config_001004000.py create mode 100644 src/lib389/lib389/configurations/config_001004002.py create mode 100644 src/lib389/lib389/configurations/config_002003000.py create mode 100644 src/lib389/lib389/configurations/sample.py create mode 100644 src/lib389/lib389/conflicts.py create mode 100644 src/lib389/lib389/cos.py create mode 100644 src/lib389/lib389/dbgen.py create mode 100644 src/lib389/lib389/dirsrv_log.py create mode 100644 src/lib389/lib389/ds_instance.py create mode 100644 src/lib389/lib389/dseldif.py create mode 100644 src/lib389/lib389/encrypted_attributes.py create mode 100644 src/lib389/lib389/exceptions.py create mode 100644 src/lib389/lib389/extended_operations.py create mode 100644 src/lib389/lib389/extensibleobject.py create mode 100644 src/lib389/lib389/idm/__init__.py create mode 100644 src/lib389/lib389/idm/account.py create mode 100644 src/lib389/lib389/idm/country.py create mode 100644 src/lib389/lib389/idm/directorymanager.py create mode 100644 src/lib389/lib389/idm/domain.py create mode 100644 src/lib389/lib389/idm/group.py create mode 100644 src/lib389/lib389/idm/ipadomain.py create mode 100644 src/lib389/lib389/idm/nscontainer.py create mode 100644 src/lib389/lib389/idm/organization.py create mode 100644 src/lib389/lib389/idm/organizationalrole.py create mode 100644 src/lib389/lib389/idm/organizationalunit.py create mode 100644 src/lib389/lib389/idm/posixgroup.py create mode 100644 src/lib389/lib389/idm/role.py create mode 100644 src/lib389/lib389/idm/services.py create mode 100644 src/lib389/lib389/idm/user.py create mode 100644 src/lib389/lib389/index.py create mode 100644 src/lib389/lib389/instance/__init__.py create mode 100644 src/lib389/lib389/instance/options.py create mode 100644 src/lib389/lib389/instance/remove.py create mode 100644 src/lib389/lib389/instance/setup.py create mode 100644 src/lib389/lib389/ldap_objs.py create mode 100644 src/lib389/lib389/ldapi.py create mode 100644 src/lib389/lib389/ldclt.py create mode 100644 src/lib389/lib389/lint.py create mode 100644 src/lib389/lib389/mappingTree.py create mode 100644 src/lib389/lib389/migrate/__init__.py create mode 100644 src/lib389/lib389/migrate/ldif.py create mode 100644 src/lib389/lib389/migrate/openldap/__init__.py create mode 100644 src/lib389/lib389/migrate/openldap/config.py create mode 100644 src/lib389/lib389/migrate/openldap/schema.py create mode 100644 src/lib389/lib389/migrate/plan.py create mode 100644 src/lib389/lib389/mit_krb5.py create mode 100644 src/lib389/lib389/monitor.py create mode 100755 src/lib389/lib389/ns-slapd.valgrind create mode 100644 src/lib389/lib389/nss_ssl.py create mode 100644 src/lib389/lib389/passwd.py create mode 100644 src/lib389/lib389/password_plugins.py create mode 100644 src/lib389/lib389/paths.py create mode 100644 src/lib389/lib389/perftools.py create mode 100644 src/lib389/lib389/plugins.py create mode 100644 src/lib389/lib389/properties.py create mode 100644 src/lib389/lib389/pwpolicy.py create mode 100644 src/lib389/lib389/referral.py create mode 100644 src/lib389/lib389/replica.py create mode 100644 src/lib389/lib389/repltools.py create mode 100644 src/lib389/lib389/rewriters.py create mode 100644 src/lib389/lib389/rootdse.py create mode 100644 src/lib389/lib389/sasl.py create mode 100644 src/lib389/lib389/saslmap.py create mode 100755 src/lib389/lib389/schema.py create mode 100644 src/lib389/lib389/suffix.py create mode 100644 src/lib389/lib389/tasks.py create mode 100644 src/lib389/lib389/tests/__init__.py create mode 100644 src/lib389/lib389/tests/aci_parse_test.py create mode 100644 src/lib389/lib389/tests/aci_test.py create mode 100644 src/lib389/lib389/tests/agreement_test.py create mode 100644 src/lib389/lib389/tests/backend_test.py create mode 100644 src/lib389/lib389/tests/cli/__init__.py create mode 100644 src/lib389/lib389/tests/cli/adm_instance_test.py create mode 100644 src/lib389/lib389/tests/cli/conf_backend_test.py create mode 100644 src/lib389/lib389/tests/cli/conf_backup_test.py create mode 100644 src/lib389/lib389/tests/cli/conf_chaining_test.py create mode 100644 src/lib389/lib389/tests/cli/conf_conflicts_test.py create mode 100644 src/lib389/lib389/tests/cli/conf_directory_manager_test.py create mode 100644 src/lib389/lib389/tests/cli/conf_plugin_test.py create mode 100644 src/lib389/lib389/tests/cli/conf_plugins/__init__.py create mode 100644 src/lib389/lib389/tests/cli/conf_plugins/automember_test.py create mode 100644 src/lib389/lib389/tests/cli/conf_plugins/memberof_test.py create mode 100644 src/lib389/lib389/tests/cli/conf_plugins/referint_test.py create mode 100644 src/lib389/lib389/tests/cli/conf_plugins/rootdn_ac_test.py create mode 100644 src/lib389/lib389/tests/cli/conf_plugins/usn_test.py create mode 100644 src/lib389/lib389/tests/cli/conf_pwpolicy_test.py create mode 100644 src/lib389/lib389/tests/cli/conf_schema_test.py create mode 100644 src/lib389/lib389/tests/cli/ctl_dbtasks_test.py create mode 100644 src/lib389/lib389/tests/cli/dsrc_test.py create mode 100644 src/lib389/lib389/tests/cli/idm_group_test.py create mode 100644 src/lib389/lib389/tests/cli/idm_user_modify_test.py create mode 100644 src/lib389/lib389/tests/cli/idm_user_test.py create mode 100644 src/lib389/lib389/tests/config.py create mode 100644 src/lib389/lib389/tests/config_test.py create mode 100644 src/lib389/lib389/tests/configurations/__init__.py create mode 100644 src/lib389/lib389/tests/configurations/config_001003006_test.py create mode 100644 src/lib389/lib389/tests/configurations/config_001004000_test.py create mode 100644 src/lib389/lib389/tests/conftest.py create mode 100644 src/lib389/lib389/tests/dereference_test.py create mode 100644 src/lib389/lib389/tests/dirsrv_log_test.py create mode 100644 src/lib389/lib389/tests/dirsrv_test.py create mode 100644 src/lib389/lib389/tests/dseldif_test.py create mode 100644 src/lib389/lib389/tests/dsversion_test.py create mode 100644 src/lib389/lib389/tests/effective_rights_test.py create mode 100644 src/lib389/lib389/tests/entry_test.py create mode 100644 src/lib389/lib389/tests/healthcheck_test.py create mode 100644 src/lib389/lib389/tests/idm/__init__.py create mode 100644 src/lib389/lib389/tests/idm/account_test.py create mode 100644 src/lib389/lib389/tests/idm/services_test.py create mode 100644 src/lib389/lib389/tests/idm/user_and_group_test.py create mode 100644 src/lib389/lib389/tests/index_test.py create mode 100644 src/lib389/lib389/tests/ldclt_test.py create mode 100644 src/lib389/lib389/tests/mapped_object_lint_test.py create mode 100644 src/lib389/lib389/tests/mapped_object_test.py create mode 100644 src/lib389/lib389/tests/mappingtree_test.py create mode 100644 src/lib389/lib389/tests/nss_ssl_test.py create mode 100644 src/lib389/lib389/tests/paths_test.py create mode 100644 src/lib389/lib389/tests/plugin_test.py create mode 100644 src/lib389/lib389/tests/plugins/__init__.py create mode 100644 src/lib389/lib389/tests/plugins/memberof_test.py create mode 100644 src/lib389/lib389/tests/plugins/referint_test.py create mode 100644 src/lib389/lib389/tests/plugins/usn_test.py create mode 100644 src/lib389/lib389/tests/plugins/utils.py create mode 100644 src/lib389/lib389/tests/referral_test.py create mode 100644 src/lib389/lib389/tests/replica_test.py create mode 100644 src/lib389/lib389/tests/schema_test.py create mode 100644 src/lib389/lib389/tests/suffix_test.py create mode 100644 src/lib389/lib389/tests/test_module_proxy.py create mode 100644 src/lib389/lib389/tests/tls_external_test.py create mode 100644 src/lib389/lib389/tests/utils_test.py create mode 100644 src/lib389/lib389/tombstone.py create mode 100644 src/lib389/lib389/tools.py create mode 100644 src/lib389/lib389/topologies.py create mode 100644 src/lib389/lib389/utils.py create mode 100644 src/lib389/requirements.txt create mode 100644 src/lib389/setup.cfg create mode 100644 src/lib389/setup.py.in create mode 100644 src/lib389/tox.ini create mode 100644 src/librnsslapd/Cargo.toml create mode 100644 src/librnsslapd/README.md create mode 100644 src/librnsslapd/build.rs create mode 100644 src/librnsslapd/src/lib.rs create mode 100644 src/librslapd/Cargo.toml create mode 100644 src/librslapd/README.md create mode 100644 src/librslapd/build.rs create mode 100644 src/librslapd/src/cache.rs create mode 100644 src/librslapd/src/lib.rs create mode 100644 src/librslapd/src/sds/lib.rs create mode 100644 src/librslapd/src/sds/tqueue.rs create mode 100644 src/libsds/external/csiphash/csiphash.c create mode 100644 src/libsds/include/sds.h create mode 100644 src/pkgconfig/dirsrv.pc.in create mode 100644 src/pkgconfig/libsds.pc.in create mode 100644 src/pkgconfig/svrcore.pc.in create mode 100644 src/plugins/entryuuid/Cargo.toml create mode 100644 src/plugins/entryuuid/src/lib.rs create mode 100644 src/plugins/entryuuid_syntax/Cargo.toml create mode 100644 src/plugins/entryuuid_syntax/src/lib.rs create mode 100644 src/plugins/pwdchan/Cargo.toml create mode 100644 src/plugins/pwdchan/src/lib.rs create mode 100644 src/plugins/pwdchan/src/pbkdf2.rs create mode 100644 src/plugins/pwdchan/src/pbkdf2_sha1.rs create mode 100644 src/plugins/pwdchan/src/pbkdf2_sha256.rs create mode 100644 src/plugins/pwdchan/src/pbkdf2_sha512.rs create mode 100644 src/rewriters/adfilter.c create mode 100644 src/slapd/Cargo.toml create mode 100644 src/slapd/src/error.rs create mode 100644 src/slapd/src/fernet.rs create mode 100644 src/slapd/src/lib.rs create mode 100644 src/slapi_r_plugin/Cargo.toml create mode 100644 src/slapi_r_plugin/README.md create mode 100644 src/slapi_r_plugin/build.rs create mode 100644 src/slapi_r_plugin/src/backend.rs create mode 100644 src/slapi_r_plugin/src/ber.rs create mode 100644 src/slapi_r_plugin/src/charray.rs create mode 100644 src/slapi_r_plugin/src/constants.rs create mode 100644 src/slapi_r_plugin/src/dn.rs create mode 100644 src/slapi_r_plugin/src/entry.rs create mode 100644 src/slapi_r_plugin/src/error.rs create mode 100644 src/slapi_r_plugin/src/init.c create mode 100644 src/slapi_r_plugin/src/lib.rs create mode 100644 src/slapi_r_plugin/src/log.rs create mode 100644 src/slapi_r_plugin/src/macros.rs create mode 100644 src/slapi_r_plugin/src/modify.rs create mode 100644 src/slapi_r_plugin/src/pblock.rs create mode 100644 src/slapi_r_plugin/src/plugin.rs create mode 100644 src/slapi_r_plugin/src/search.rs create mode 100644 src/slapi_r_plugin/src/syntax_plugin.rs create mode 100644 src/slapi_r_plugin/src/task.rs create mode 100644 src/slapi_r_plugin/src/value.rs create mode 100644 src/svrcore/AUTHORS create mode 100644 src/svrcore/COPYING create mode 100644 src/svrcore/ChangeLog create mode 100644 src/svrcore/INSTALL create mode 100644 src/svrcore/INSTALL.win create mode 100644 src/svrcore/LICENSE create mode 100644 src/svrcore/Makefile.am create mode 100644 src/svrcore/NEWS create mode 100644 src/svrcore/README create mode 100644 src/svrcore/TODO create mode 100755 src/svrcore/autogen.sh create mode 100644 src/svrcore/configure.ac create mode 100644 src/svrcore/examples/svrcore_driver.c create mode 100644 src/svrcore/m4/nspr.m4 create mode 100644 src/svrcore/m4/nss.m4 create mode 100644 src/svrcore/m4/systemd.m4 create mode 100644 src/svrcore/src/Makefile.am create mode 100644 src/svrcore/src/Makefile.win create mode 100644 src/svrcore/src/alt.c create mode 100644 src/svrcore/src/cache.c create mode 100644 src/svrcore/src/errors.c create mode 100644 src/svrcore/src/file.c create mode 100644 src/svrcore/src/key.ico create mode 100644 src/svrcore/src/logo.ico create mode 100644 src/svrcore/src/manifest.mn create mode 100644 src/svrcore/src/ntgetpin.c create mode 100644 src/svrcore/src/ntgetpin.rc create mode 100644 src/svrcore/src/ntresource.h create mode 100644 src/svrcore/src/pin.c create mode 100644 src/svrcore/src/pk11.c create mode 100644 src/svrcore/src/std-systemd.c create mode 100644 src/svrcore/src/std.c create mode 100644 src/svrcore/src/svrcore.h create mode 100644 src/svrcore/src/systemd-ask-pass.c create mode 100644 src/svrcore/src/user.c create mode 100644 test/libslapd/counters/atomic.c create mode 100644 test/libslapd/filter/optimise.c create mode 100644 test/libslapd/haproxy/parse.c create mode 100644 test/libslapd/operation/v3_compat.c create mode 100644 test/libslapd/pblock/analytics.c create mode 100644 test/libslapd/pblock/pblock_accessors.txt create mode 100644 test/libslapd/pblock/pblock_accessors_freq.txt create mode 100644 test/libslapd/pblock/v3_compat.c create mode 100644 test/libslapd/schema/filter_validate.c create mode 100644 test/libslapd/spal/meminfo.c create mode 100644 test/libslapd/test.c create mode 100644 test/main.c create mode 100644 test/pblock_analyse.py create mode 100644 test/plugins/pwdstorage/pbkdf2.c create mode 100644 test/plugins/test.c create mode 100644 test/test_slapd.h create mode 100644 wrappers/ds_selinux_restorecon.sh.in create mode 100644 wrappers/ds_systemd_ask_password_acl.in create mode 100644 wrappers/initscript.in create mode 100644 wrappers/ldap-agent-initscript.in create mode 100644 wrappers/systemd-snmp.service.in create mode 100644 wrappers/systemd.group.in create mode 100644 wrappers/systemd.template.service.custom.conf.in create mode 100644 wrappers/systemd.template.service.in create mode 100644 wrappers/systemd.template.service.xsan.conf.in diff --git a/.cargo/config.in b/.cargo/config.in new file mode 100644 index 0000000..d7d8ff4 --- /dev/null +++ b/.cargo/config.in @@ -0,0 +1,6 @@ +[source.crates-io] +registry = "https://github.com/rust-lang/crates.io-index" +@rust_vendor_sources@ + +[source.vendored-sources] +directory = "./vendor" diff --git a/.clang-format b/.clang-format new file mode 100644 index 0000000..ca89d76 --- /dev/null +++ b/.clang-format @@ -0,0 +1,49 @@ +--- +# BasedOnStyle: Mozilla +AccessModifierOffset: 0 +# ConstructorInitializerIndentWidth: 4 +# AlignEscapedNewlinesLeft: true +# AlignTrailingComments: true +AllowAllParametersOfDeclarationOnNextLine: false +AllowShortIfStatementsOnASingleLine: false +AllowShortLoopsOnASingleLine: false +# AlwaysBreakTemplateDeclarations: false +# AlwaysBreakBeforeMultilineStrings: false +BreakBeforeBinaryOperators: false +BreakBeforeTernaryOperators: true +BreakConstructorInitializersBeforeComma: false +BinPackParameters: false +ColumnLimit: 0 +ConstructorInitializerAllOnOneLineOrOnePerLine: false +# DerivePointerBinding: true +# ExperimentalAutoDetectBinPacking: false +# IndentCaseLabels: true +MaxEmptyLinesToKeep: 2 +# NamespaceIndentation: None +# ObjCSpaceBeforeProtocolList: false +# PenaltyBreakBeforeFirstCallParameter: 19 +# PenaltyBreakComment: 60 +# PenaltyBreakString: 1000 +# PenaltyBreakFirstLessLess: 120 +# PenaltyExcessCharacter: 1000000 +# PenaltyReturnTypeOnItsOwnLine: 200 +# PointerBindsToType: true +SpacesBeforeTrailingComments: 2 +# Cpp11BracedListStyle: false +Standard: Cpp03 +IndentWidth: 4 +TabWidth: 4 +UseTab: Never +SpaceBeforeAssignmentOperators: true +BreakBeforeBraces: Mozilla +IndentFunctionDeclarationAfterType: false +SpacesInParentheses: false +SpacesInAngles: false +SpaceInEmptyParentheses: false +SpacesInCStyleCastParentheses: false +SpaceAfterControlStatementKeyword: true +ContinuationIndentWidth: 4 +SortIncludes: false +AlwaysBreakAfterReturnType: TopLevelDefinitions +... + diff --git a/.copr/Makefile b/.copr/Makefile new file mode 100644 index 0000000..d575880 --- /dev/null +++ b/.copr/Makefile @@ -0,0 +1,22 @@ +srpm: + # Install git in the buildroot to correctly generate commit hash + dnf install -y git + # Generate spec file + make -f rpm.mk rpmroot + # Install build dependencies + dnf install -y dnf-plugins-core + dnf builddep -y --skip-broken --spec rpmbuild/SPECS/389-ds-base.spec --best --allowerasing --setopt=install_weak_deps=False + # chown files in current working directory to root:root + # because when npm is run as root, scripts are always run + # with the effective uid and gid of the working directory owner. + # copr-rpmbuild runs mock with CAP_SETUID and CAP_SETGID + # capabilities dropped, and build commands are executed as root. + # So npm fails if current working directory is not owned by root. + chown -R root:root . + # Generate srpm + SKIP_AUDIT_CI=1 make -f rpm.mk srpms + + if [[ "${outdir}" != "" ]]; then \ + mv dist/srpms/* ${outdir}; \ + fi + diff --git a/.cvsignore b/.cvsignore new file mode 100644 index 0000000..e50a4a3 --- /dev/null +++ b/.cvsignore @@ -0,0 +1,5 @@ +Linux +built +modules.mk +pumpkin.dat +.cvsignore diff --git a/.dockerignore b/.dockerignore new file mode 100644 index 0000000..1905eb5 --- /dev/null +++ b/.dockerignore @@ -0,0 +1,3 @@ +.git +.gitignore +./src/cockpit/389-console/node_modules diff --git a/.github/ISSUE_TEMPLATE/bug_report.md b/.github/ISSUE_TEMPLATE/bug_report.md new file mode 100644 index 0000000..4b0080e --- /dev/null +++ b/.github/ISSUE_TEMPLATE/bug_report.md @@ -0,0 +1,32 @@ +--- +name: Bug report +about: Create a report to help us improve +title: '' +labels: needs triage +assignees: '' + +--- + +**Issue Description** +A clear and concise description of what the bug is. + +**Package Version and Platform:** + - Platform: [e.g. Fedora] + - Package and version: [e.g. 389-ds-base-1.4.4.4-20200721git5d41dc5a4.fc32.x86_64] + - Browser [e.g. chrome, safari] + +**Steps to Reproduce** +Steps to reproduce the behavior: +1. Go to '...' +2. Click on '....' +3. Scroll down to '....' +4. See error + +**Expected results** +A clear and concise description of what you expected to happen. + +**Screenshots** +If applicable, add screenshots to help explain your problem. + +**Additional context** +Add any other context about the problem here. diff --git a/.github/ISSUE_TEMPLATE/feature_request.md b/.github/ISSUE_TEMPLATE/feature_request.md new file mode 100644 index 0000000..598ce00 --- /dev/null +++ b/.github/ISSUE_TEMPLATE/feature_request.md @@ -0,0 +1,20 @@ +--- +name: Feature request +about: Suggest an idea for this project +title: '' +labels: needs triage +assignees: '' + +--- + +**Is your feature request related to a problem? Please describe.** +A clear and concise description of what the problem is. Ex. I'm always frustrated when [...] + +**Describe the solution you'd like** +A clear and concise description of what you want to happen. + +**Describe alternatives you've considered** +A clear and concise description of any alternative solutions or features you've considered. + +**Additional context** +Add any other context or screenshots about the feature request here. diff --git a/.github/daemon.json b/.github/daemon.json new file mode 100644 index 0000000..5de054f --- /dev/null +++ b/.github/daemon.json @@ -0,0 +1,5 @@ +{ + "ipv6": true, + "fixed-cidr-v6": "2001:db8:1::/64" +} + diff --git a/.github/scripts/generate_matrix.py b/.github/scripts/generate_matrix.py new file mode 100644 index 0000000..5843745 --- /dev/null +++ b/.github/scripts/generate_matrix.py @@ -0,0 +1,37 @@ +import os +import sys +import glob +import json + +# If we have arguments passed to the script, use them as the test names to run +if len(sys.argv) > 1: + suites = sys.argv[1:] + valid_suites = [] + # Validate if the path is a valid file or directory with files + for suite in suites: + test_path = os.path.join("dirsrvtests/tests/suites/", suite) + if os.path.exists(test_path) and not os.path.islink(test_path): + if os.path.isfile(test_path) and test_path.endswith(".py"): + valid_suites.append(suite) + elif os.path.isdir(test_path): + valid_suites.append(suite) + suites = valid_suites + +else: + # Use tests from the source + suites = next(os.walk('dirsrvtests/tests/suites/'))[1] + + # Filter out snmp as it is an empty directory: + suites.remove('snmp') + + # Run each replication test module separately to speed things up + suites.remove('replication') + repl_tests = glob.glob('dirsrvtests/tests/suites/replication/*_test.py') + suites += [repl_test.replace('dirsrvtests/tests/suites/', '') for repl_test in repl_tests] + suites.sort() + +suites_list = [{ "suite": suite} for suite in suites] +matrix = {"include": suites_list} + +print(json.dumps(matrix)) + diff --git a/.github/workflows/compile.yml b/.github/workflows/compile.yml new file mode 100644 index 0000000..06872c8 --- /dev/null +++ b/.github/workflows/compile.yml @@ -0,0 +1,78 @@ +name: Compile +on: + - pull_request + - push + +permissions: + actions: read + packages: read + contents: read + +jobs: + compile: + runs-on: ubuntu-latest + strategy: + fail-fast: false + matrix: + name: + - GCC + - GCC Strict + - GCC Static Analyzer + - Clang + - Clang -Weverything + + include: + - name: GCC + image: quay.io/389ds/ci-images:fedora + compiler: gcc + cpp-compiler: g++ + cflags: "-O2 -g" + + - name: GCC strict + image: quay.io/389ds/ci-images:fedora + compiler: gcc + cpp-compiler: g++ + cflags: "-O2 -g -Wall -Wextra -Wundef -Wpointer-arith -Wfloat-equal \ + -Wstrict-prototypes -Wstrict-overflow=5 -Wwrite-strings -Winit-self \ + -Wuninitialized -Wno-sign-compare -Wshadow -Wformat-security" + + - name: GCC Static Analyzer + image: quay.io/389ds/ci-images:fedora + compiler: gcc + cpp-compiler: g++ + cflags: "-O2 -g -fanalyzer" + + - name: Clang + image: quay.io/389ds/ci-images:fedora + compiler: clang + cpp-compiler: clang++ + cflags: "-O2 -g -Qunused-arguments" + + - name: Clang -Weverything + image: quay.io/389ds/ci-images:fedora + compiler: clang + cpp-compiler: clang++ + cflags: "-O2 -g -Weverything -Qunused-arguments" + + container: + image: ${{ matrix.image }} + + steps: + - uses: actions/checkout@v3 + - name: Checkout and configure + run: autoreconf -fvi && ./configure + env: + CC: ${{ matrix.compiler }} + CXX: ${{ matrix.cpp-compiler }} + CFLAGS: ${{ matrix.cflags || env.CFLAGS }} + CXXFLAGS: ${{ matrix.cxxflags || env.CXXFLAGS }} + LDFLAGS: ${{ matrix.ldflags || env.LDFLAGS }} + + - uses: ammaraskar/gcc-problem-matcher@master + - name: Build using ${{ matrix.compiler }} + run: bash -c "(make V=0 2> >(tee /dev/stderr)) > log.txt" + + - uses: actions/upload-artifact@v3 + with: + name: ${{ matrix.name }} + path: log.txt diff --git a/.github/workflows/lmdbpytest.yml b/.github/workflows/lmdbpytest.yml new file mode 100644 index 0000000..77dd971 --- /dev/null +++ b/.github/workflows/lmdbpytest.yml @@ -0,0 +1,120 @@ +name: LMDB Test + +on: + push: + pull_request: + schedule: + - cron: '0 0 * * *' + workflow_dispatch: + inputs: + pytest_tests: + description: 'Run only specified suites or test modules delimited by space, for example "basic/basic_test.py replication"' + required: false + default: false + debug_enabled: + description: 'Set to "true" to enable debugging with tmate (https://github.com/marketplace/actions/debugging-with-tmate)' + required: false + default: false + +permissions: + actions: read + packages: read + contents: read + +jobs: + build: + name: Build + runs-on: ubuntu-22.04 + container: + image: quay.io/389ds/ci-images:test + outputs: + matrix: ${{ steps.set-matrix.outputs.matrix }} + steps: + - name: Checkout + uses: actions/checkout@v3 + + - name: Add GITHUB_WORKSPACE as a safe directory + run: git config --global --add safe.directory "$GITHUB_WORKSPACE" + + - name: Get a list of all test suites + id: set-matrix + run: echo "matrix=$(python3 .github/scripts/generate_matrix.py ${{ github.event.inputs.pytest_tests }})" >>$GITHUB_OUTPUT + + - name: Build RPMs + run: SKIP_AUDIT_CI=1 make -f rpm.mk dist-bz2 rpms + + - name: Tar build artifacts + run: tar -cvf dist.tar dist/ + + - name: Upload RPMs + uses: actions/upload-artifact@v3 + with: + name: rpms + path: dist.tar + + test: + name: LMDB Test + runs-on: ubuntu-22.04 + needs: build + strategy: + fail-fast: false + matrix: ${{ fromJson(needs.build.outputs.matrix) }} + + steps: + - name: Checkout + uses: actions/checkout@v3 + + - name: Setup tmate session + uses: mxschmitt/action-tmate@v3 + with: + limit-access-to-actor: true + if: ${{ github.event_name == 'workflow_dispatch' && github.event.inputs.debug_enabled }} + + - name: Install dependencies + run: | + sudo apt update -y + sudo apt install -y docker.io containerd runc + sudo cp .github/daemon.json /etc/docker/daemon.json + sudo systemctl unmask docker + sudo systemctl start docker + + - name: Download RPMs + uses: actions/download-artifact@master + with: + name: rpms + + - name: Extract RPMs + run: tar xvf dist.tar + + - name: Run pytest in a container + run: | + set -x + CID=$(sudo docker run -d -h server.example.com --ulimit core=-1 --cap-add=SYS_PTRACE --privileged --rm --shm-size=4gb -v ${PWD}:/workspace quay.io/389ds/ci-images:test) + sudo docker exec $CID sh -c "dnf install -y -v dist/rpms/*rpm" + export PASSWD=$(openssl rand -base64 32) + sudo docker exec $CID sh -c "echo \"${PASSWD}\" | passwd --stdin root" + sudo docker exec $CID sh -c "systemctl start dbus.service" + sudo docker exec $CID sh -c "systemctl enable --now cockpit.socket" + sudo docker exec $CID sh -c "mkdir -p /workspace/assets/cores && chmod 777 /workspace{,/assets{,/cores}}" + sudo docker exec $CID sh -c "echo '/workspace/assets/cores/core.%e.%P' > /proc/sys/kernel/core_pattern" + sudo docker exec -e WEBUI=1 -e NSSLAPD_DB_LIB=mdb -e DEBUG=pw:api -e PASSWD="${PASSWD}" $CID py.test --suppress-no-test-exit-code -m "not flaky" --junit-xml=pytest.xml --html=pytest.html --browser=firefox --browser=chromium -v dirsrvtests/tests/suites/${{ matrix.suite }} + + - name: Make the results file readable by all + if: always() + run: | + sudo chmod -f -v -R a+r pytest.*ml assets + sudo chmod -f -v a+x assets + - name: Sanitize filename + if: always() + run: echo "PYTEST_SUITE=$(echo ${{ matrix.suite }} | sed -e 's#\/#-#g')" >> $GITHUB_ENV + + - name: Upload pytest test results + if: always() + uses: actions/upload-artifact@v3 + with: + name: pytest-${{ env.PYTEST_SUITE }} + path: | + pytest.xml + pytest.html + assets + diff --git a/.github/workflows/npm.yml b/.github/workflows/npm.yml new file mode 100644 index 0000000..2064e3d --- /dev/null +++ b/.github/workflows/npm.yml @@ -0,0 +1,25 @@ +name: npm-audit-ci + +on: + push: + pull_request: + schedule: + - cron: '0 0 * * *' + +permissions: + actions: read + packages: read + contents: read + +jobs: + npm-audit-ci: + name: npm-audit-ci + runs-on: ubuntu-latest + container: + image: quay.io/389ds/ci-images:test + steps: + - name: Checkout + uses: actions/checkout@v3 + + - name: Run NPM Audit CI + run: cd $GITHUB_WORKSPACE/src/cockpit/389-console && npx audit-ci --config audit-ci.json diff --git a/.github/workflows/pytest.yml b/.github/workflows/pytest.yml new file mode 100644 index 0000000..be9db46 --- /dev/null +++ b/.github/workflows/pytest.yml @@ -0,0 +1,120 @@ +name: Test + +on: + push: + pull_request: + schedule: + - cron: '0 0 * * *' + workflow_dispatch: + inputs: + pytest_tests: + description: 'Run only specified suites or test modules delimited by space, for example "basic/basic_test.py replication"' + required: false + default: false + debug_enabled: + description: 'Set to "true" to enable debugging with tmate (https://github.com/marketplace/actions/debugging-with-tmate)' + required: false + default: false + +permissions: + actions: read + packages: read + contents: read + +jobs: + build: + name: Build + runs-on: ubuntu-22.04 + container: + image: quay.io/389ds/ci-images:test + outputs: + matrix: ${{ steps.set-matrix.outputs.matrix }} + steps: + - name: Checkout + uses: actions/checkout@v3 + + - name: Add GITHUB_WORKSPACE as a safe directory + run: git config --global --add safe.directory "$GITHUB_WORKSPACE" + + - name: Get a list of all test suites + id: set-matrix + run: echo "matrix=$(python3 .github/scripts/generate_matrix.py ${{ github.event.inputs.pytest_tests }})" >>$GITHUB_OUTPUT + + - name: Build RPMs + run: SKIP_AUDIT_CI=1 make -f rpm.mk dist-bz2 rpms + + - name: Tar build artifacts + run: tar -cvf dist.tar dist/ + + - name: Upload RPMs + uses: actions/upload-artifact@v3 + with: + name: rpms + path: dist.tar + + test: + name: Test + runs-on: ubuntu-22.04 + needs: build + strategy: + fail-fast: false + matrix: ${{ fromJson(needs.build.outputs.matrix) }} + + steps: + - name: Checkout + uses: actions/checkout@v3 + + - name: Setup tmate session + uses: mxschmitt/action-tmate@v3 + with: + limit-access-to-actor: true + if: ${{ github.event_name == 'workflow_dispatch' && github.event.inputs.debug_enabled }} + + - name: Install dependencies + run: | + sudo apt update -y + sudo apt install -y docker.io containerd runc + sudo cp .github/daemon.json /etc/docker/daemon.json + sudo systemctl unmask docker + sudo systemctl start docker + + - name: Download RPMs + uses: actions/download-artifact@master + with: + name: rpms + + - name: Extract RPMs + run: tar xvf dist.tar + + - name: Run pytest in a container + run: | + set -x + CID=$(sudo docker run -d -h server.example.com --ulimit core=-1 --cap-add=SYS_PTRACE --privileged --rm --shm-size=4gb -v ${PWD}:/workspace quay.io/389ds/ci-images:test) + sudo docker exec $CID sh -c "dnf install -y -v dist/rpms/*rpm" + export PASSWD=$(openssl rand -base64 32) + sudo docker exec $CID sh -c "echo \"${PASSWD}\" | passwd --stdin root" + sudo docker exec $CID sh -c "systemctl start dbus.service" + sudo docker exec $CID sh -c "systemctl enable --now cockpit.socket" + sudo docker exec $CID sh -c "mkdir -p /workspace/assets/cores && chmod 777 /workspace{,/assets{,/cores}}" + sudo docker exec $CID sh -c "echo '/workspace/assets/cores/core.%e.%P' > /proc/sys/kernel/core_pattern" + sudo docker exec -e WEBUI=1 -e DEBUG=pw:api -e PASSWD="${PASSWD}" $CID py.test --suppress-no-test-exit-code -m "not flaky" --junit-xml=pytest.xml --html=pytest.html --browser=firefox --browser=chromium -v dirsrvtests/tests/suites/${{ matrix.suite }} + + - name: Make the results file readable by all + if: always() + run: | + sudo chmod -f -v -R a+r pytest.*ml assets + sudo chmod -f -v a+x assets + - name: Sanitize filename + if: always() + run: echo "PYTEST_SUITE=$(echo ${{ matrix.suite }} | sed -e 's#\/#-#g')" >> $GITHUB_ENV + + - name: Upload pytest test results + if: always() + uses: actions/upload-artifact@v3 + with: + name: pytest-${{ env.PYTEST_SUITE }} + path: | + pytest.xml + pytest.html + assets + diff --git a/.github/workflows/release.yml b/.github/workflows/release.yml new file mode 100644 index 0000000..3a6b568 --- /dev/null +++ b/.github/workflows/release.yml @@ -0,0 +1,60 @@ +name: Release + +on: + push: + tags: + - "389-ds-base-*" + workflow_dispatch: + inputs: + version: + description: Specify tag to generate a tarball + required: true + skip-audit-ci: + description: Skip npm audit-ci + type: boolean + default: false + +permissions: + actions: read + packages: read + contents: write + +jobs: + build: + runs-on: ubuntu-latest + container: + image: quay.io/389ds/ci-images:test + steps: + - name: Get the version + id: get_version + run: | + echo "version=${VERSION}" >> $GITHUB_OUTPUT + env: + VERSION: ${{ github.event.inputs.version || github.ref_name }} + + - name: Checkout + uses: actions/checkout@v3 + with: + fetch-depth: 0 + ref: ${{ steps.get_version.outputs.version }} + + - name: Create tarball + run: | + git config --global --add safe.directory "$GITHUB_WORKSPACE" + if [ "${{ github.event.inputs.skip-audit-ci }}" = "true" ]; then + export SKIP_AUDIT_CI=1 + fi + TAG=${{ steps.get_version.outputs.version }} make -f rpm.mk dist-bz2 + + - name: Upload tarball + uses: actions/upload-artifact@v3 + with: + name: ${{ steps.get_version.outputs.version }}.tar.bz2 + path: ${{ steps.get_version.outputs.version }}.tar.bz2 + + - name: Release + uses: softprops/action-gh-release@v1 + with: + tag_name: ${{ steps.get_version.outputs.version }} + files: | + ${{ steps.get_version.outputs.version }}.tar.bz2 diff --git a/.github/workflows/validate.yml b/.github/workflows/validate.yml new file mode 100644 index 0000000..04b2c98 --- /dev/null +++ b/.github/workflows/validate.yml @@ -0,0 +1,27 @@ +name: Validate tests + +on: + push: + pull_request: + +permissions: + actions: read + packages: read + contents: read + +jobs: + validate: + runs-on: ubuntu-latest + container: + image: quay.io/389ds/ci-images:test + steps: + - name: Checkout + uses: actions/checkout@v3 + + - name: Run testimony + if: always() + run: testimony validate -c dirsrvtests/testimony.yaml dirsrvtests/tests/suites + + - name: Check for duplicate IDs + if: always() + run: python3 dirsrvtests/check_for_duplicate_ids.py dirsrvtests/tests/suites diff --git a/.gitignore b/.gitignore new file mode 100644 index 0000000..3b7c3ba --- /dev/null +++ b/.gitignore @@ -0,0 +1,238 @@ +autom4te.cache +m4/libtool.m4 +m4/ltoptions.m4 +m4/ltsugar.m4 +m4/ltversion.m4 +m4/lt~obsolete.m4 +Makefile.in +aclocal.m4 +ar-lib +compile +config.guess +config.h.in +config.h.in~ +config.sub +configure +depcomp +install-sh +ltmain.sh +missing +Makefile +config.h +config.log +config.status +libtool +stamp-h1 +*~ +*.patch +.DS_Store +.autotools +.cproject +.project +.settings +.cache +*.a +*.rsa +*.dirstamp +*.la +*.lo +*.o +*.rso +*.pyc +*.rej +__pycache__ +.libs +.deps +rpmbuild +rpm/389-ds-base.spec +Makefile +config.h +config.log +config.status +dberrstrs.h +dbscan +dirsrv.pc +dsktune +infadd +ldap-agent +ldclt +ldif +libtool +makstrdb +migratecred +mmldif +ns-slapd +ns-slapd.properties +pwdhash +rsearch +stamp-h1 +benchmark_par_sds +benchmark_sds +doxyfile.stamp +tags +test-driver +test-suite.log +test_libsds +test_libsds.log +test_libsds.trs +test_nuncstans +test_nuncstans.log +test_nuncstans.trs +test_nuncstans_stress_large +test_nuncstans_stress_small +test_nuncstans_stress_small.log +test_nuncstans_stress_small.trs +test_slapd +test_slapd.log +test_slapd.trs +ldap/admin/src/dirsrv +ldap/admin/src/defaults.inf +ldap/admin/src/scripts/80upgradednformat.pl +ldap/admin/src/scripts/DSCreate.pm +ldap/admin/src/scripts/DSMigration.pm +ldap/admin/src/scripts/DSSharedLib +ldap/admin/src/scripts/DSUpdate.pm +ldap/admin/src/scripts/DSUtil.pm +ldap/admin/src/scripts/DialogManager.pm +ldap/admin/src/scripts/Migration.pm +ldap/admin/src/scripts/Setup.pm +ldap/admin/src/scripts/SetupDialogs.pm +ldap/admin/src/scripts/bak2db +ldap/admin/src/scripts/bak2db.pl +ldap/admin/src/scripts/cleanallruv.pl +ldap/admin/src/scripts/db2bak +ldap/admin/src/scripts/db2bak.pl +ldap/admin/src/scripts/db2index +ldap/admin/src/scripts/db2index.pl +ldap/admin/src/scripts/db2ldif +ldap/admin/src/scripts/db2ldif.pl +ldap/admin/src/scripts/dbverify +ldap/admin/src/scripts/dn2rdn +ldap/admin/src/scripts/dscreate.map +ldap/admin/src/scripts/dsorgentries.map +ldap/admin/src/scripts/dsupdate.map +ldap/admin/src/scripts/fixup-linkedattrs.pl +ldap/admin/src/scripts/fixup-memberof.pl +ldap/admin/src/scripts/ldif2db +ldap/admin/src/scripts/ldif2db.pl +ldap/admin/src/scripts/ldif2ldap +ldap/admin/src/scripts/migrate-ds.pl +ldap/admin/src/scripts/monitor +ldap/admin/src/scripts/ns-accountstatus.pl +ldap/admin/src/scripts/ns-activate.pl +ldap/admin/src/scripts/ns-inactivate.pl +ldap/admin/src/scripts/ns-newpwpolicy.pl +ldap/admin/src/scripts/remove-ds.pl +ldap/admin/src/scripts/repl-monitor.pl +ldap/admin/src/scripts/restart-dirsrv +ldap/admin/src/scripts/restoreconfig +ldap/admin/src/scripts/saveconfig +ldap/admin/src/scripts/schema-reload.pl +ldap/admin/src/scripts/setup-ds.pl +ldap/admin/src/scripts/setup-ds.res +ldap/admin/src/scripts/start-dirsrv +ldap/admin/src/scripts/stop-dirsrv +ldap/admin/src/scripts/suffix2instance +ldap/admin/src/scripts/syntax-validate.pl +ldap/admin/src/scripts/template-bak2db +ldap/admin/src/scripts/template-bak2db.pl +ldap/admin/src/scripts/template-cleanallruv.pl +ldap/admin/src/scripts/template-db2bak +ldap/admin/src/scripts/template-db2bak.pl +ldap/admin/src/scripts/template-db2index +ldap/admin/src/scripts/template-db2index.pl +ldap/admin/src/scripts/template-db2ldif +ldap/admin/src/scripts/template-db2ldif.pl +ldap/admin/src/scripts/template-dbverify +ldap/admin/src/scripts/template-dn2rdn +ldap/admin/src/scripts/template-fixup-linkedattrs.pl +ldap/admin/src/scripts/template-fixup-memberof.pl +ldap/admin/src/scripts/template-fixup-memberuid.pl +ldap/admin/src/scripts/template-ldif2db +ldap/admin/src/scripts/template-ldif2db.pl +ldap/admin/src/scripts/template-ldif2ldap +ldap/admin/src/scripts/template-monitor +ldap/admin/src/scripts/template-ns-accountstatus.pl +ldap/admin/src/scripts/template-ns-activate.pl +ldap/admin/src/scripts/template-ns-inactivate.pl +ldap/admin/src/scripts/template-ns-newpwpolicy.pl +ldap/admin/src/scripts/template-restart-slapd +ldap/admin/src/scripts/template-restoreconfig +ldap/admin/src/scripts/template-saveconfig +ldap/admin/src/scripts/template-schema-reload.pl +ldap/admin/src/scripts/template-start-slapd +ldap/admin/src/scripts/template-stop-slapd +ldap/admin/src/scripts/template-suffix2instance +ldap/admin/src/scripts/template-syntax-validate.pl +ldap/admin/src/scripts/template-upgradednformat +ldap/admin/src/scripts/template-usn-tombstone-cleanup.pl +ldap/admin/src/scripts/template-verify-db.pl +ldap/admin/src/scripts/template-vlvindex +ldap/admin/src/scripts/upgradedb +ldap/admin/src/scripts/upgradednformat +ldap/admin/src/scripts/usn-tombstone-cleanup.pl +ldap/admin/src/scripts/verify-db.pl +ldap/admin/src/scripts/vlvindex +ldap/admin/src/scripts/91reindex.pl +ldap/admin/src/scripts/dbmon.sh +ldap/admin/src/scripts/ds_selinux_enabled +ldap/admin/src/scripts/ds_selinux_port_query +ldap/admin/src/scripts/readnsstate +ldap/admin/src/scripts/status-dirsrv +ldap/admin/src/slapd.inf +ldap/admin/src/template-initconfig +ldap/ldif/template-baseacis.ldif +ldap/ldif/template-bitwise.ldif +ldap/ldif/template-country.ldif +ldap/ldif/template-dnaplugin.ldif +ldap/ldif/template-domain.ldif +ldap/ldif/template-dse.ldif +ldap/ldif/template-ldapi-autobind.ldif +ldap/ldif/template-ldapi-default.ldif +ldap/ldif/template-ldapi.ldif +ldap/ldif/template-locality.ldif +ldap/ldif/template-org.ldif +ldap/ldif/template-orgunit.ldif +ldap/ldif/template-pampta.ldif +ldap/ldif/template-sasl.ldif +ldap/ldif/template-state.ldif +ldap/ldif/template-suffix-db.ldif +ldap/ldif/template-dse-minimal.ldif +ldap/servers/slapd/tools/rsearch/scripts/dbgen.pl +ldap/servers/snmp/ldap-agent.conf +src/pkgconfig/libsds.pc +src/pkgconfig/nunc-stans.pc +src/pkgconfig/svrcore.pc +wrappers/cl-dump +wrappers/dbscan +wrappers/dirsrv +wrappers/dirsrv-snmp +wrappers/dsktune +wrappers/infadd +wrappers/ldap-agent +wrappers/ldclt +wrappers/ldif +wrappers/migratecred +wrappers/mmldif +wrappers/pwdhash +wrappers/repl-monitor +wrappers/rsearch +wrappers/ds_systemd_ask_password_acl +docs/slapi.doxy +man/man3/ +html/ +.pytest_cache/ +src/lib389/dist/ +src/lib389/man/ +src/libsds/target/ +src/librslapd/target/ +dist +venv +.idea +src/cockpit/389-console/cockpit_dist/ +src/cockpit/389-console/node_modules/ +vendor +vendor.tar.gz +.history +.vscode/launch.json +.cargo/config diff --git a/LICENSE b/LICENSE new file mode 100644 index 0000000..395a348 --- /dev/null +++ b/LICENSE @@ -0,0 +1,28 @@ +Copyright (C) 2015 Red Hat +See files 'LICENSE.GPLv3+', 'LICENSE.openssl', and 'LICENSE.mit' for +more information. + +This program is free software; you can redistribute it and/or modify +it under the terms of the GNU General Public License as published by +the Free Software Foundation, either version 3 of the License, or +(at your option) any later version. + +This program is distributed in the hope that it will be useful, +but WITHOUT ANY WARRANTY; without even the implied warranty of +MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +GNU General Public License for more details. + +You should have received a copy of the GNU General Public License +along with this program. If not, see . + +Additional permission under GPLv3 section 7: + +If you modify this Program, or any covered work, by linking or +combining it with OpenSSL, or a modified version of OpenSSL licensed +under the OpenSSL license +(https://www.openssl.org/source/license.html), the licensors of this +Program grant you additional permission to convey the resulting +work. Corresponding Source for a non-source form of such a +combination shall include the source code for the parts that are +licensed under the OpenSSL license as well as that of the covered +work. diff --git a/LICENSE.GPLv3+ b/LICENSE.GPLv3+ new file mode 100644 index 0000000..94a9ed0 --- /dev/null +++ b/LICENSE.GPLv3+ @@ -0,0 +1,674 @@ + GNU GENERAL PUBLIC LICENSE + Version 3, 29 June 2007 + + Copyright (C) 2007 Free Software Foundation, Inc. + Everyone is permitted to copy and distribute verbatim copies + of this license document, but changing it is not allowed. + + Preamble + + The GNU General Public License is a free, copyleft license for +software and other kinds of works. + + The licenses for most software and other practical works are designed +to take away your freedom to share and change the works. By contrast, +the GNU General Public License is intended to guarantee your freedom to +share and change all versions of a program--to make sure it remains free +software for all its users. We, the Free Software Foundation, use the +GNU General Public License for most of our software; it applies also to +any other work released this way by its authors. You can apply it to +your programs, too. + + When we speak of free software, we are referring to freedom, not +price. Our General Public Licenses are designed to make sure that you +have the freedom to distribute copies of free software (and charge for +them if you wish), that you receive source code or can get it if you +want it, that you can change the software or use pieces of it in new +free programs, and that you know you can do these things. + + To protect your rights, we need to prevent others from denying you +these rights or asking you to surrender the rights. Therefore, you have +certain responsibilities if you distribute copies of the software, or if +you modify it: responsibilities to respect the freedom of others. + + For example, if you distribute copies of such a program, whether +gratis or for a fee, you must pass on to the recipients the same +freedoms that you received. You must make sure that they, too, receive +or can get the source code. And you must show them these terms so they +know their rights. + + Developers that use the GNU GPL protect your rights with two steps: +(1) assert copyright on the software, and (2) offer you this License +giving you legal permission to copy, distribute and/or modify it. + + For the developers' and authors' protection, the GPL clearly explains +that there is no warranty for this free software. For both users' and +authors' sake, the GPL requires that modified versions be marked as +changed, so that their problems will not be attributed erroneously to +authors of previous versions. + + Some devices are designed to deny users access to install or run +modified versions of the software inside them, although the manufacturer +can do so. This is fundamentally incompatible with the aim of +protecting users' freedom to change the software. The systematic +pattern of such abuse occurs in the area of products for individuals to +use, which is precisely where it is most unacceptable. Therefore, we +have designed this version of the GPL to prohibit the practice for those +products. If such problems arise substantially in other domains, we +stand ready to extend this provision to those domains in future versions +of the GPL, as needed to protect the freedom of users. + + Finally, every program is threatened constantly by software patents. +States should not allow patents to restrict development and use of +software on general-purpose computers, but in those that do, we wish to +avoid the special danger that patents applied to a free program could +make it effectively proprietary. To prevent this, the GPL assures that +patents cannot be used to render the program non-free. + + The precise terms and conditions for copying, distribution and +modification follow. + + TERMS AND CONDITIONS + + 0. Definitions. + + "This License" refers to version 3 of the GNU General Public License. + + "Copyright" also means copyright-like laws that apply to other kinds of +works, such as semiconductor masks. + + "The Program" refers to any copyrightable work licensed under this +License. Each licensee is addressed as "you". "Licensees" and +"recipients" may be individuals or organizations. + + To "modify" a work means to copy from or adapt all or part of the work +in a fashion requiring copyright permission, other than the making of an +exact copy. The resulting work is called a "modified version" of the +earlier work or a work "based on" the earlier work. + + A "covered work" means either the unmodified Program or a work based +on the Program. + + To "propagate" a work means to do anything with it that, without +permission, would make you directly or secondarily liable for +infringement under applicable copyright law, except executing it on a +computer or modifying a private copy. Propagation includes copying, +distribution (with or without modification), making available to the +public, and in some countries other activities as well. + + To "convey" a work means any kind of propagation that enables other +parties to make or receive copies. Mere interaction with a user through +a computer network, with no transfer of a copy, is not conveying. + + An interactive user interface displays "Appropriate Legal Notices" +to the extent that it includes a convenient and prominently visible +feature that (1) displays an appropriate copyright notice, and (2) +tells the user that there is no warranty for the work (except to the +extent that warranties are provided), that licensees may convey the +work under this License, and how to view a copy of this License. If +the interface presents a list of user commands or options, such as a +menu, a prominent item in the list meets this criterion. + + 1. Source Code. + + The "source code" for a work means the preferred form of the work +for making modifications to it. "Object code" means any non-source +form of a work. + + A "Standard Interface" means an interface that either is an official +standard defined by a recognized standards body, or, in the case of +interfaces specified for a particular programming language, one that +is widely used among developers working in that language. + + The "System Libraries" of an executable work include anything, other +than the work as a whole, that (a) is included in the normal form of +packaging a Major Component, but which is not part of that Major +Component, and (b) serves only to enable use of the work with that +Major Component, or to implement a Standard Interface for which an +implementation is available to the public in source code form. A +"Major Component", in this context, means a major essential component +(kernel, window system, and so on) of the specific operating system +(if any) on which the executable work runs, or a compiler used to +produce the work, or an object code interpreter used to run it. + + The "Corresponding Source" for a work in object code form means all +the source code needed to generate, install, and (for an executable +work) run the object code and to modify the work, including scripts to +control those activities. However, it does not include the work's +System Libraries, or general-purpose tools or generally available free +programs which are used unmodified in performing those activities but +which are not part of the work. For example, Corresponding Source +includes interface definition files associated with source files for +the work, and the source code for shared libraries and dynamically +linked subprograms that the work is specifically designed to require, +such as by intimate data communication or control flow between those +subprograms and other parts of the work. + + The Corresponding Source need not include anything that users +can regenerate automatically from other parts of the Corresponding +Source. + + The Corresponding Source for a work in source code form is that +same work. + + 2. Basic Permissions. + + All rights granted under this License are granted for the term of +copyright on the Program, and are irrevocable provided the stated +conditions are met. This License explicitly affirms your unlimited +permission to run the unmodified Program. The output from running a +covered work is covered by this License only if the output, given its +content, constitutes a covered work. This License acknowledges your +rights of fair use or other equivalent, as provided by copyright law. + + You may make, run and propagate covered works that you do not +convey, without conditions so long as your license otherwise remains +in force. You may convey covered works to others for the sole purpose +of having them make modifications exclusively for you, or provide you +with facilities for running those works, provided that you comply with +the terms of this License in conveying all material for which you do +not control copyright. Those thus making or running the covered works +for you must do so exclusively on your behalf, under your direction +and control, on terms that prohibit them from making any copies of +your copyrighted material outside their relationship with you. + + Conveying under any other circumstances is permitted solely under +the conditions stated below. Sublicensing is not allowed; section 10 +makes it unnecessary. + + 3. Protecting Users' Legal Rights From Anti-Circumvention Law. + + No covered work shall be deemed part of an effective technological +measure under any applicable law fulfilling obligations under article +11 of the WIPO copyright treaty adopted on 20 December 1996, or +similar laws prohibiting or restricting circumvention of such +measures. + + When you convey a covered work, you waive any legal power to forbid +circumvention of technological measures to the extent such circumvention +is effected by exercising rights under this License with respect to +the covered work, and you disclaim any intention to limit operation or +modification of the work as a means of enforcing, against the work's +users, your or third parties' legal rights to forbid circumvention of +technological measures. + + 4. Conveying Verbatim Copies. + + You may convey verbatim copies of the Program's source code as you +receive it, in any medium, provided that you conspicuously and +appropriately publish on each copy an appropriate copyright notice; +keep intact all notices stating that this License and any +non-permissive terms added in accord with section 7 apply to the code; +keep intact all notices of the absence of any warranty; and give all +recipients a copy of this License along with the Program. + + You may charge any price or no price for each copy that you convey, +and you may offer support or warranty protection for a fee. + + 5. Conveying Modified Source Versions. + + You may convey a work based on the Program, or the modifications to +produce it from the Program, in the form of source code under the +terms of section 4, provided that you also meet all of these conditions: + + a) The work must carry prominent notices stating that you modified + it, and giving a relevant date. + + b) The work must carry prominent notices stating that it is + released under this License and any conditions added under section + 7. This requirement modifies the requirement in section 4 to + "keep intact all notices". + + c) You must license the entire work, as a whole, under this + License to anyone who comes into possession of a copy. This + License will therefore apply, along with any applicable section 7 + additional terms, to the whole of the work, and all its parts, + regardless of how they are packaged. This License gives no + permission to license the work in any other way, but it does not + invalidate such permission if you have separately received it. + + d) If the work has interactive user interfaces, each must display + Appropriate Legal Notices; however, if the Program has interactive + interfaces that do not display Appropriate Legal Notices, your + work need not make them do so. + + A compilation of a covered work with other separate and independent +works, which are not by their nature extensions of the covered work, +and which are not combined with it such as to form a larger program, +in or on a volume of a storage or distribution medium, is called an +"aggregate" if the compilation and its resulting copyright are not +used to limit the access or legal rights of the compilation's users +beyond what the individual works permit. Inclusion of a covered work +in an aggregate does not cause this License to apply to the other +parts of the aggregate. + + 6. Conveying Non-Source Forms. + + You may convey a covered work in object code form under the terms +of sections 4 and 5, provided that you also convey the +machine-readable Corresponding Source under the terms of this License, +in one of these ways: + + a) Convey the object code in, or embodied in, a physical product + (including a physical distribution medium), accompanied by the + Corresponding Source fixed on a durable physical medium + customarily used for software interchange. + + b) Convey the object code in, or embodied in, a physical product + (including a physical distribution medium), accompanied by a + written offer, valid for at least three years and valid for as + long as you offer spare parts or customer support for that product + model, to give anyone who possesses the object code either (1) a + copy of the Corresponding Source for all the software in the + product that is covered by this License, on a durable physical + medium customarily used for software interchange, for a price no + more than your reasonable cost of physically performing this + conveying of source, or (2) access to copy the + Corresponding Source from a network server at no charge. + + c) Convey individual copies of the object code with a copy of the + written offer to provide the Corresponding Source. This + alternative is allowed only occasionally and noncommercially, and + only if you received the object code with such an offer, in accord + with subsection 6b. + + d) Convey the object code by offering access from a designated + place (gratis or for a charge), and offer equivalent access to the + Corresponding Source in the same way through the same place at no + further charge. You need not require recipients to copy the + Corresponding Source along with the object code. If the place to + copy the object code is a network server, the Corresponding Source + may be on a different server (operated by you or a third party) + that supports equivalent copying facilities, provided you maintain + clear directions next to the object code saying where to find the + Corresponding Source. Regardless of what server hosts the + Corresponding Source, you remain obligated to ensure that it is + available for as long as needed to satisfy these requirements. + + e) Convey the object code using peer-to-peer transmission, provided + you inform other peers where the object code and Corresponding + Source of the work are being offered to the general public at no + charge under subsection 6d. + + A separable portion of the object code, whose source code is excluded +from the Corresponding Source as a System Library, need not be +included in conveying the object code work. + + A "User Product" is either (1) a "consumer product", which means any +tangible personal property which is normally used for personal, family, +or household purposes, or (2) anything designed or sold for incorporation +into a dwelling. In determining whether a product is a consumer product, +doubtful cases shall be resolved in favor of coverage. For a particular +product received by a particular user, "normally used" refers to a +typical or common use of that class of product, regardless of the status +of the particular user or of the way in which the particular user +actually uses, or expects or is expected to use, the product. A product +is a consumer product regardless of whether the product has substantial +commercial, industrial or non-consumer uses, unless such uses represent +the only significant mode of use of the product. + + "Installation Information" for a User Product means any methods, +procedures, authorization keys, or other information required to install +and execute modified versions of a covered work in that User Product from +a modified version of its Corresponding Source. The information must +suffice to ensure that the continued functioning of the modified object +code is in no case prevented or interfered with solely because +modification has been made. + + If you convey an object code work under this section in, or with, or +specifically for use in, a User Product, and the conveying occurs as +part of a transaction in which the right of possession and use of the +User Product is transferred to the recipient in perpetuity or for a +fixed term (regardless of how the transaction is characterized), the +Corresponding Source conveyed under this section must be accompanied +by the Installation Information. But this requirement does not apply +if neither you nor any third party retains the ability to install +modified object code on the User Product (for example, the work has +been installed in ROM). + + The requirement to provide Installation Information does not include a +requirement to continue to provide support service, warranty, or updates +for a work that has been modified or installed by the recipient, or for +the User Product in which it has been modified or installed. Access to a +network may be denied when the modification itself materially and +adversely affects the operation of the network or violates the rules and +protocols for communication across the network. + + Corresponding Source conveyed, and Installation Information provided, +in accord with this section must be in a format that is publicly +documented (and with an implementation available to the public in +source code form), and must require no special password or key for +unpacking, reading or copying. + + 7. Additional Terms. + + "Additional permissions" are terms that supplement the terms of this +License by making exceptions from one or more of its conditions. +Additional permissions that are applicable to the entire Program shall +be treated as though they were included in this License, to the extent +that they are valid under applicable law. If additional permissions +apply only to part of the Program, that part may be used separately +under those permissions, but the entire Program remains governed by +this License without regard to the additional permissions. + + When you convey a copy of a covered work, you may at your option +remove any additional permissions from that copy, or from any part of +it. (Additional permissions may be written to require their own +removal in certain cases when you modify the work.) You may place +additional permissions on material, added by you to a covered work, +for which you have or can give appropriate copyright permission. + + Notwithstanding any other provision of this License, for material you +add to a covered work, you may (if authorized by the copyright holders of +that material) supplement the terms of this License with terms: + + a) Disclaiming warranty or limiting liability differently from the + terms of sections 15 and 16 of this License; or + + b) Requiring preservation of specified reasonable legal notices or + author attributions in that material or in the Appropriate Legal + Notices displayed by works containing it; or + + c) Prohibiting misrepresentation of the origin of that material, or + requiring that modified versions of such material be marked in + reasonable ways as different from the original version; or + + d) Limiting the use for publicity purposes of names of licensors or + authors of the material; or + + e) Declining to grant rights under trademark law for use of some + trade names, trademarks, or service marks; or + + f) Requiring indemnification of licensors and authors of that + material by anyone who conveys the material (or modified versions of + it) with contractual assumptions of liability to the recipient, for + any liability that these contractual assumptions directly impose on + those licensors and authors. + + All other non-permissive additional terms are considered "further +restrictions" within the meaning of section 10. If the Program as you +received it, or any part of it, contains a notice stating that it is +governed by this License along with a term that is a further +restriction, you may remove that term. If a license document contains +a further restriction but permits relicensing or conveying under this +License, you may add to a covered work material governed by the terms +of that license document, provided that the further restriction does +not survive such relicensing or conveying. + + If you add terms to a covered work in accord with this section, you +must place, in the relevant source files, a statement of the +additional terms that apply to those files, or a notice indicating +where to find the applicable terms. + + Additional terms, permissive or non-permissive, may be stated in the +form of a separately written license, or stated as exceptions; +the above requirements apply either way. + + 8. Termination. + + You may not propagate or modify a covered work except as expressly +provided under this License. Any attempt otherwise to propagate or +modify it is void, and will automatically terminate your rights under +this License (including any patent licenses granted under the third +paragraph of section 11). + + However, if you cease all violation of this License, then your +license from a particular copyright holder is reinstated (a) +provisionally, unless and until the copyright holder explicitly and +finally terminates your license, and (b) permanently, if the copyright +holder fails to notify you of the violation by some reasonable means +prior to 60 days after the cessation. + + Moreover, your license from a particular copyright holder is +reinstated permanently if the copyright holder notifies you of the +violation by some reasonable means, this is the first time you have +received notice of violation of this License (for any work) from that +copyright holder, and you cure the violation prior to 30 days after +your receipt of the notice. + + Termination of your rights under this section does not terminate the +licenses of parties who have received copies or rights from you under +this License. If your rights have been terminated and not permanently +reinstated, you do not qualify to receive new licenses for the same +material under section 10. + + 9. Acceptance Not Required for Having Copies. + + You are not required to accept this License in order to receive or +run a copy of the Program. Ancillary propagation of a covered work +occurring solely as a consequence of using peer-to-peer transmission +to receive a copy likewise does not require acceptance. However, +nothing other than this License grants you permission to propagate or +modify any covered work. These actions infringe copyright if you do +not accept this License. Therefore, by modifying or propagating a +covered work, you indicate your acceptance of this License to do so. + + 10. Automatic Licensing of Downstream Recipients. + + Each time you convey a covered work, the recipient automatically +receives a license from the original licensors, to run, modify and +propagate that work, subject to this License. You are not responsible +for enforcing compliance by third parties with this License. + + An "entity transaction" is a transaction transferring control of an +organization, or substantially all assets of one, or subdividing an +organization, or merging organizations. If propagation of a covered +work results from an entity transaction, each party to that +transaction who receives a copy of the work also receives whatever +licenses to the work the party's predecessor in interest had or could +give under the previous paragraph, plus a right to possession of the +Corresponding Source of the work from the predecessor in interest, if +the predecessor has it or can get it with reasonable efforts. + + You may not impose any further restrictions on the exercise of the +rights granted or affirmed under this License. For example, you may +not impose a license fee, royalty, or other charge for exercise of +rights granted under this License, and you may not initiate litigation +(including a cross-claim or counterclaim in a lawsuit) alleging that +any patent claim is infringed by making, using, selling, offering for +sale, or importing the Program or any portion of it. + + 11. Patents. + + A "contributor" is a copyright holder who authorizes use under this +License of the Program or a work on which the Program is based. The +work thus licensed is called the contributor's "contributor version". + + A contributor's "essential patent claims" are all patent claims +owned or controlled by the contributor, whether already acquired or +hereafter acquired, that would be infringed by some manner, permitted +by this License, of making, using, or selling its contributor version, +but do not include claims that would be infringed only as a +consequence of further modification of the contributor version. For +purposes of this definition, "control" includes the right to grant +patent sublicenses in a manner consistent with the requirements of +this License. + + Each contributor grants you a non-exclusive, worldwide, royalty-free +patent license under the contributor's essential patent claims, to +make, use, sell, offer for sale, import and otherwise run, modify and +propagate the contents of its contributor version. + + In the following three paragraphs, a "patent license" is any express +agreement or commitment, however denominated, not to enforce a patent +(such as an express permission to practice a patent or covenant not to +sue for patent infringement). To "grant" such a patent license to a +party means to make such an agreement or commitment not to enforce a +patent against the party. + + If you convey a covered work, knowingly relying on a patent license, +and the Corresponding Source of the work is not available for anyone +to copy, free of charge and under the terms of this License, through a +publicly available network server or other readily accessible means, +then you must either (1) cause the Corresponding Source to be so +available, or (2) arrange to deprive yourself of the benefit of the +patent license for this particular work, or (3) arrange, in a manner +consistent with the requirements of this License, to extend the patent +license to downstream recipients. "Knowingly relying" means you have +actual knowledge that, but for the patent license, your conveying the +covered work in a country, or your recipient's use of the covered work +in a country, would infringe one or more identifiable patents in that +country that you have reason to believe are valid. + + If, pursuant to or in connection with a single transaction or +arrangement, you convey, or propagate by procuring conveyance of, a +covered work, and grant a patent license to some of the parties +receiving the covered work authorizing them to use, propagate, modify +or convey a specific copy of the covered work, then the patent license +you grant is automatically extended to all recipients of the covered +work and works based on it. + + A patent license is "discriminatory" if it does not include within +the scope of its coverage, prohibits the exercise of, or is +conditioned on the non-exercise of one or more of the rights that are +specifically granted under this License. You may not convey a covered +work if you are a party to an arrangement with a third party that is +in the business of distributing software, under which you make payment +to the third party based on the extent of your activity of conveying +the work, and under which the third party grants, to any of the +parties who would receive the covered work from you, a discriminatory +patent license (a) in connection with copies of the covered work +conveyed by you (or copies made from those copies), or (b) primarily +for and in connection with specific products or compilations that +contain the covered work, unless you entered into that arrangement, +or that patent license was granted, prior to 28 March 2007. + + Nothing in this License shall be construed as excluding or limiting +any implied license or other defenses to infringement that may +otherwise be available to you under applicable patent law. + + 12. No Surrender of Others' Freedom. + + If conditions are imposed on you (whether by court order, agreement or +otherwise) that contradict the conditions of this License, they do not +excuse you from the conditions of this License. If you cannot convey a +covered work so as to satisfy simultaneously your obligations under this +License and any other pertinent obligations, then as a consequence you may +not convey it at all. For example, if you agree to terms that obligate you +to collect a royalty for further conveying from those to whom you convey +the Program, the only way you could satisfy both those terms and this +License would be to refrain entirely from conveying the Program. + + 13. Use with the GNU Affero General Public License. + + Notwithstanding any other provision of this License, you have +permission to link or combine any covered work with a work licensed +under version 3 of the GNU Affero General Public License into a single +combined work, and to convey the resulting work. The terms of this +License will continue to apply to the part which is the covered work, +but the special requirements of the GNU Affero General Public License, +section 13, concerning interaction through a network will apply to the +combination as such. + + 14. Revised Versions of this License. + + The Free Software Foundation may publish revised and/or new versions of +the GNU General Public License from time to time. Such new versions will +be similar in spirit to the present version, but may differ in detail to +address new problems or concerns. + + Each version is given a distinguishing version number. If the +Program specifies that a certain numbered version of the GNU General +Public License "or any later version" applies to it, you have the +option of following the terms and conditions either of that numbered +version or of any later version published by the Free Software +Foundation. If the Program does not specify a version number of the +GNU General Public License, you may choose any version ever published +by the Free Software Foundation. + + If the Program specifies that a proxy can decide which future +versions of the GNU General Public License can be used, that proxy's +public statement of acceptance of a version permanently authorizes you +to choose that version for the Program. + + Later license versions may give you additional or different +permissions. However, no additional obligations are imposed on any +author or copyright holder as a result of your choosing to follow a +later version. + + 15. Disclaimer of Warranty. + + THERE IS NO WARRANTY FOR THE PROGRAM, TO THE EXTENT PERMITTED BY +APPLICABLE LAW. EXCEPT WHEN OTHERWISE STATED IN WRITING THE COPYRIGHT +HOLDERS AND/OR OTHER PARTIES PROVIDE THE PROGRAM "AS IS" WITHOUT WARRANTY +OF ANY KIND, EITHER EXPRESSED OR IMPLIED, INCLUDING, BUT NOT LIMITED TO, +THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR +PURPOSE. THE ENTIRE RISK AS TO THE QUALITY AND PERFORMANCE OF THE PROGRAM +IS WITH YOU. SHOULD THE PROGRAM PROVE DEFECTIVE, YOU ASSUME THE COST OF +ALL NECESSARY SERVICING, REPAIR OR CORRECTION. + + 16. Limitation of Liability. + + IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN WRITING +WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MODIFIES AND/OR CONVEYS +THE PROGRAM AS PERMITTED ABOVE, BE LIABLE TO YOU FOR DAMAGES, INCLUDING ANY +GENERAL, SPECIAL, INCIDENTAL OR CONSEQUENTIAL DAMAGES ARISING OUT OF THE +USE OR INABILITY TO USE THE PROGRAM (INCLUDING BUT NOT LIMITED TO LOSS OF +DATA OR DATA BEING RENDERED INACCURATE OR LOSSES SUSTAINED BY YOU OR THIRD +PARTIES OR A FAILURE OF THE PROGRAM TO OPERATE WITH ANY OTHER PROGRAMS), +EVEN IF SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE POSSIBILITY OF +SUCH DAMAGES. + + 17. Interpretation of Sections 15 and 16. + + If the disclaimer of warranty and limitation of liability provided +above cannot be given local legal effect according to their terms, +reviewing courts shall apply local law that most closely approximates +an absolute waiver of all civil liability in connection with the +Program, unless a warranty or assumption of liability accompanies a +copy of the Program in return for a fee. + + END OF TERMS AND CONDITIONS + + How to Apply These Terms to Your New Programs + + If you develop a new program, and you want it to be of the greatest +possible use to the public, the best way to achieve this is to make it +free software which everyone can redistribute and change under these terms. + + To do so, attach the following notices to the program. It is safest +to attach them to the start of each source file to most effectively +state the exclusion of warranty; and each file should have at least +the "copyright" line and a pointer to where the full notice is found. + + + Copyright (C) + + This program is free software: you can redistribute it and/or modify + it under the terms of the GNU General Public License as published by + the Free Software Foundation, either version 3 of the License, or + (at your option) any later version. + + This program is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + You should have received a copy of the GNU General Public License + along with this program. If not, see . + +Also add information on how to contact you by electronic and paper mail. + + If the program does terminal interaction, make it output a short +notice like this when it starts in an interactive mode: + + Copyright (C) + This program comes with ABSOLUTELY NO WARRANTY; for details type `show w'. + This is free software, and you are welcome to redistribute it + under certain conditions; type `show c' for details. + +The hypothetical commands `show w' and `show c' should show the appropriate +parts of the General Public License. Of course, your program's commands +might be different; for a GUI interface, you would use an "about box". + + You should also get your employer (if you work as a programmer) or school, +if any, to sign a "copyright disclaimer" for the program, if necessary. +For more information on this, and how to apply and follow the GNU GPL, see +. + + The GNU General Public License does not permit incorporating your program +into proprietary programs. If your program is a subroutine library, you +may consider it more useful to permit linking proprietary applications with +the library. If this is what you want to do, use the GNU Lesser General +Public License instead of this License. But first, please read +. diff --git a/LICENSE.mit b/LICENSE.mit new file mode 100644 index 0000000..9c78d66 --- /dev/null +++ b/LICENSE.mit @@ -0,0 +1,32 @@ +/* + Copyright (c) 2013 Marek Majkowski + + Permission is hereby granted, free of charge, to any person obtaining a copy + of this software and associated documentation files (the "Software"), to deal + in the Software without restriction, including without limitation the rights + to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + copies of the Software, and to permit persons to whom the Software is + furnished to do so, subject to the following conditions: + + The above copyright notice and this permission notice shall be included in + all copies or substantial portions of the Software. + + THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN + THE SOFTWARE. + + + Original location: + https://github.com/majek/csiphash/ + + Solution inspired by code from: + Samuel Neves (supercop/crypto_auth/siphash24/little) + djb (supercop/crypto_auth/siphash24/little2) + Jean-Philippe Aumasson (https://131002.net/siphash/siphash24.c) +*/ + + diff --git a/LICENSE.openldap b/LICENSE.openldap new file mode 100644 index 0000000..05ad757 --- /dev/null +++ b/LICENSE.openldap @@ -0,0 +1,47 @@ +The OpenLDAP Public License + Version 2.8, 17 August 2003 + +Redistribution and use of this software and associated documentation +("Software"), with or without modification, are permitted provided +that the following conditions are met: + +1. Redistributions in source form must retain copyright statements + and notices, + +2. Redistributions in binary form must reproduce applicable copyright + statements and notices, this list of conditions, and the following + disclaimer in the documentation and/or other materials provided + with the distribution, and + +3. Redistributions must contain a verbatim copy of this document. + +The OpenLDAP Foundation may revise this license from time to time. +Each revision is distinguished by a version number. You may use +this Software under terms of this license revision or under the +terms of any subsequent revision of the license. + +THIS SOFTWARE IS PROVIDED BY THE OPENLDAP FOUNDATION AND ITS +CONTRIBUTORS ``AS IS'' AND ANY EXPRESSED OR IMPLIED WARRANTIES, +INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY +AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT +SHALL THE OPENLDAP FOUNDATION, ITS CONTRIBUTORS, OR THE AUTHOR(S) +OR OWNER(S) OF THE SOFTWARE BE LIABLE FOR ANY DIRECT, INDIRECT, +INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, +BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; +LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT +LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN +ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE +POSSIBILITY OF SUCH DAMAGE. + +The names of the authors and copyright holders must not be used in +advertising or otherwise to promote the sale, use or other dealing +in this Software without specific, written prior permission. Title +to copyright in this Software shall at all times remain with copyright +holders. + +OpenLDAP is a registered trademark of the OpenLDAP Foundation. + +Copyright 1999-2003 The OpenLDAP Foundation, Redwood City, +California, USA. All Rights Reserved. Permission to copy and +distribute verbatim copies of this document is granted. diff --git a/LICENSE.openssl b/LICENSE.openssl new file mode 100644 index 0000000..1625bce --- /dev/null +++ b/LICENSE.openssl @@ -0,0 +1,11 @@ +Additional permission under GPLv3 section 7: + +If you modify this Program, or any covered work, by linking or +combining it with OpenSSL, or a modified version of OpenSSL licensed +under the OpenSSL license +(https://www.openssl.org/source/license.html), the licensors of this +Program grant you additional permission to convey the resulting +work. Corresponding Source for a non-source form of such a +combination shall include the source code for the parts that are +licensed under the OpenSSL license as well as that of the covered +work. diff --git a/Makefile.am b/Makefile.am new file mode 100644 index 0000000..c448694 --- /dev/null +++ b/Makefile.am @@ -0,0 +1,2135 @@ +# look for included m4 files in the ./m4/ directory +ACLOCAL_AMFLAGS = -I m4 +NULLSTRING := +SPACE := $(NULLSTRING) # the space is between the ) and the # +COLON := $(NULLSTRING):# a colon +QUOTE := $(NULLSTRING)"# a double quote" + +#------------------------ +# Compiler Flags +#------------------------ +# +# First, we setup the definitions from configure.ac +# + +PYTHON := python3 +if DEBUG +# This allows sccache to work correctly with C files. +BUILDNUM := "\"0000.000.0000\"" +else +BUILDNUM := $(shell $(srcdir)/buildnum.py) +endif +NQBUILDNUM := $(subst \,,$(subst $(QUOTE),,$(BUILDNUM))) +DEBUG_DEFINES = @debug_defs@ +DEBUG_CFLAGS = @debug_cflags@ +DEBUG_CXXFLAGS = @debug_cxxflags@ +GCCSEC_CFLAGS = @gccsec_cflags@ +if CLANG_ENABLE +ASAN_CFLAGS = @asan_cflags@ +else +if enable_asan +ASAN_CFLAGS = @asan_cflags@ -lasan +else +ASAN_CFLAGS = @asan_cflags@ +endif +endif +MSAN_CFLAGS = @msan_cflags@ +TSAN_CFLAGS = @tsan_cflags@ +UBSAN_CFLAGS = @ubsan_cflags@ + +if CFI_ENABLE +# https://clang.llvm.org/docs/ControlFlowIntegrity.html#available-schemes +# vcall is "forward edge" cfi which is what gives a lot of benefit security wise. +CFI_CFLAGS = -flto=thin -fsanitize=cfi-cast-strict,cfi-vcall -fvisibility=hidden +# Settings we could use in the future +# -fsanitize=cfi-icall,cfi-nvcall,cfi-derived-cast,cfi-unrelated-cast,cfi-mfcall +else +CFI_CFLAGS = +endif + +SYSTEMD_DEFINES = @systemd_defs@ + +CMOCKA_INCLUDES = $(CMOCKA_CFLAGS) + +PROFILING_DEFINES = @profiling_defs@ +SYSTEMTAP_DEFINES = @systemtap_defs@ +NSPR_INCLUDES = $(NSPR_CFLAGS) + +# Rust inclusions. +CARGO_FLAGS = @cargo_defs@ + +if CLANG_ENABLE +RUSTC_FLAGS = @asan_rust_defs@ @msan_rust_defs@ @tsan_rust_defs@ @debug_rust_defs@ +RUSTC_LINK_FLAGS = -C link-arg=-fuse-ld=lld +RUST_LDFLAGS = -ldl -lpthread -lc -lm -lrt -lutil +else +RUSTC_FLAGS = @asan_rust_defs@ @msan_rust_defs@ @tsan_rust_defs@ @debug_rust_defs@ +RUSTC_LINK_FLAGS = +# This avoids issues with stderr being double provided with clang + asan. +RUST_LDFLAGS = -ldl -lpthread -lgcc_s -lc -lm -lrt -lutil +endif +RUST_DEFINES = -DRUST_ENABLE +if RUST_ENABLE_OFFLINE +RUST_OFFLINE = --locked --offline +else +RUST_OFFLINE = +endif + +if CLANG_ENABLE +CLANG_ON = 1 +CLANG_LDFLAGS = -latomic -fuse-ld=lld +EXPORT_LDFLAGS = +else +CLANG_ON = 0 +CLANG_LDFLAGS = +if DEBUG +EXPORT_LDFLAGS = -rdynamic +endif +endif + +REWRITERS_INCLUDES = -I$(srcdir)/src/rewriters/ + +SVRCORE_INCLUDES = -I$(srcdir)/src/svrcore/src/ + +if CLANG_ENABLE +# clang complains about the -U. +DS_DEFINES = -DBUILD_NUM=$(BUILDNUM) -DVENDOR="\"$(vendor)\"" -DBRAND="\"$(brand)\"" -DCAPBRAND="\"$(capbrand)\"" +else +# the -U undefines these symbols - should use the corresponding DS_ ones instead - see configure.ac +DS_DEFINES = -DBUILD_NUM=$(BUILDNUM) -DVENDOR="\"$(vendor)\"" -DBRAND="\"$(brand)\"" -DCAPBRAND="\"$(capbrand)\"" \ + -UPACKAGE_VERSION -UPACKAGE_TARNAME -UPACKAGE_STRING -UPACKAGE_BUGREPORT +endif +DS_INCLUDES = -I$(srcdir)/ldap/include -I$(srcdir)/ldap/servers/slapd -I$(srcdir)/include -I. + + +if enable_asan +ASAN_ON = 1 +SANITIZER = ASAN +else +ASAN_ON = 0 +endif + +if enable_msan +MSAN_ON = 1 +SANITIZER = MSAN +else +MSAN_ON = 0 +endif + +if enable_tsan +TSAN_ON = 1 +SANITIZER = TSAN +else +TSAN_ON = 0 +endif + +if enable_ubsan +UBSAN_ON = 1 +SANITIZER = UBSAN +else +UBSAN_ON = 0 +endif + +if with_systemd +WITH_SYSTEMD = 1 +else +WITH_SYSTEMD = 0 +endif + +# these paths are dependent on the settings of prefix and exec_prefix which may be specified +# at make time. So we cannot use AC_DEFINE in the configure.ac because that would set the +# values prior to their being defined. Defining them here ensures that they are properly +# expanded before use. See create_instance.h for more details. The quoting ensures that +# the values are quoted for the shell command, and the value expands to a quoted string +# value in the header file e.g. +# #define LOCALSTATEDIR "/var" +# without the quotes, it would be +# #define LOCALSTATEDIR /var +# which would be an error +PATH_DEFINES = -DLOCALSTATEDIR="\"$(localstatedir)\"" -DSYSCONFDIR="\"$(sysconfdir)\"" \ + -DLIBDIR="\"$(libdir)\"" -DBINDIR="\"$(bindir)\"" \ + -DDATADIR="\"$(datadir)\"" -DDOCDIR="\"$(docdir)\"" \ + -DSBINDIR="\"$(sbindir)\"" -DPLUGINDIR="\"$(serverplugindir)\"" \ + -DTEMPLATEDIR="\"$(sampledatadir)\"" -DSYSTEMSCHEMADIR="\"$(systemschemadir)\"" \ + -DLOCALRUNDIR="\"$(localrundir)\"" +# Now that we have all our defines in place, setup the CPPFLAGS + +# These flags are the "must have" for all components +AM_CPPFLAGS = $(DEBUG_DEFINES) $(PROFILING_DEFINES) $(SYSTEMTAP_DEFINES) $(RUST_DEFINES) +AM_CFLAGS = $(DEBUG_CFLAGS) $(GCCSEC_CFLAGS) $(ASAN_CFLAGS) $(MSAN_CFLAGS) $(TSAN_CFLAGS) $(UBSAN_CFLAGS) +AM_CXXFLAGS = $(DEBUG_CXXFLAGS) $(GCCSEC_CFLAGS) $(ASAN_CFLAGS) $(MSAN_CFLAGS) $(TSAN_CFLAGS) $(UBSAN_CFLAGS) +# Flags for Directory Server +# WARNING: This needs a clean up, because slap.h is a horrible mess and is publically exposed! +DSPLUGIN_CPPFLAGS = $(DS_DEFINES) $(DS_INCLUDES) $(PATH_DEFINES) $(SYSTEMD_DEFINES) @openldap_inc@ $(NSS_CFLAGS) $(NSPR_INCLUDES) $(SYSTEMD_CFLAGS) +# This should give access to internal headers only for tests!!! +DSINTERNAL_CPPFLAGS = -I$(srcdir)/include/ldaputil +# Flags for Datastructure Library + +#------------------------ +# Linker Flags +#------------------------ +CMOCKA_LINKS = $(CMOCKA_LIBS) +PROFILING_LINKS = @profiling_links@ + +NSPR_LINK = $(NSPR_LIBS) +NSS_LINK = $(NSS_LIBS) + +# OpenLDAP 2.5 and newer versions don't have libldap_r shared library (only libldap) +# For the older versions we should compile with libldap_r +if WITH_LIBLDAP_R +LDAPSDK_LINK = @openldap_lib@ -lldap_r@ol_libver@ @ldap_lib_ldif@ -llber@ol_libver@ +else +LDAPSDK_LINK = @openldap_lib@ -lldap@ol_libver@ @ldap_lib_ldif@ -llber@ol_libver@ +endif +ldaplib = @ldaplib@ +ldaplib_defs = @ldaplib_defs@ + +DB_LINK = @db_lib@ -ldb-@db_libver@ -llmdb +DB_INC = @db_inc@ +DB_IMPL = libback-ldbm.la +SASL_LINK = $(SASL_LIBS) +NETSNMP_LINK = @netsnmp_lib@ @netsnmp_link@ +PAM_LINK = -lpam +EVENT_LINK = $(EVENT_LIBS) +PW_CRACK_LINK = -lcrack +ZLIB_LINK = -lz +JSON_C_LINK = -ljson-c + +LIBSOCKET=@LIBSOCKET@ +LIBNSL=@LIBNSL@ +LIBDL=@LIBDL@ +LIBCSTD=@LIBCSTD@ +LIBCRUN=@LIBCRUN@ +THREADLIB=@THREADLIB@ +LIBCRYPT=@LIBCRYPT@ + +# We need to make sure that libpthread is linked before libc on HP-UX. +if HPUX +AM_LDFLAGS = -lpthread +else +#AM_LDFLAGS = -Wl,-z,defs +AM_LDFLAGS = $(ZLIB_LINK) $(JSON_C_LINK) $(PW_CRACK_LINK) $(RUST_LDFLAGS) $(ASAN_CFLAGS) $(MSAN_CFLAGS) $(TSAN_CFLAGS) $(UBSAN_CFLAGS) $(PROFILING_LINKS) $(CLANG_LDFLAGS) $(EXPORT_LDFLAGS) +endif #end hpux + +# https://www.gnu.org/software/libtool/manual/html_node/Updating-version-info.html#Updating-version-info +# So, libtool library versions are described by three integers: +# +# current +# +# The most recent interface number that this library implements. +# revision +# +# The implementation number of the current interface. +# age +# +# The difference between the newest and oldest interfaces that this library implements. In other words, the library implements all the interface numbers in the range from number current - age to current. +# +# Here are a set of rules to help you update your library version information: +# +# Start with version information of ‘0:0:0’ for each libtool library. +# Update the version information only immediately before a public release of your software. More frequent updates are unnecessary, and only guarantee that the current interface number gets larger faster. +# If the library source code has changed at all since the last update, then increment revision (‘c:r:a’ becomes ‘c:r+1:a’). +# If any interfaces have been added, removed, or changed since the last update, increment current, and set revision to 0. +# If any interfaces have been added since the last public release, then increment age. +# If any interfaces have been removed or changed since the last public release, then set age to 0. + +SLAPD_LDFLAGS = -version-info 1:0:1 + + +#------------------------ +# Generated Sources +#------------------------ +BUILT_SOURCES = dberrstrs.h rust-slapi-private.h rust-nsslapd-private.h \ + $(POLICY_FC) + +if enable_posix_winsync +LIBPOSIX_WINSYNC_PLUGIN = libposix-winsync-plugin.la +endif + +CLEANFILES = dberrstrs.h ns-slapd.properties \ + ldap/admin/src/template-initconfig \ + ldap/ldif/template-baseacis.ldif ldap/ldif/template-bitwise.ldif ldap/ldif/template-country.ldif \ + ldap/ldif/template-dnaplugin.ldif ldap/ldif/template-domain.ldif ldap/ldif/template-dse.ldif \ + ldap/ldif/template-dse-minimal.ldif \ + ldap/ldif/template-ldapi-autobind.ldif ldap/ldif/template-ldapi-default.ldif \ + ldap/ldif/template-ldapi.ldif ldap/ldif/template-locality.ldif ldap/ldif/template-org.ldif \ + ldap/ldif/template-orgunit.ldif ldap/ldif/template-pampta.ldif ldap/ldif/template-sasl.ldif \ + ldap/ldif/template-state.ldif ldap/ldif/template-suffix-db.ldif \ + doxyfile.stamp rust-slapi-private.h\ + $(NULL) + +clean-local: + -rm -rf dist + -rm -rf $(abs_top_builddir)/html + -rm -rf $(abs_top_builddir)/man/man3 + -rm -rf $(abs_top_builddir)/rs + +dberrstrs.h: Makefile $(srcdir)/ldap/servers/slapd/mkDBErrStrs.py $(srcdir)/ldap/servers/slapd/back-ldbm/dbimpl.h + $(srcdir)/ldap/servers/slapd/mkDBErrStrs.py -i $(srcdir)/ldap/servers/slapd/back-ldbm -o . + + +#------------------------ +# Install Paths +#------------------------ +prefixdir = @prefixdir@ +configdir = $(sysconfdir)@configdir@ +sampledatadir = $(datadir)@sampledatadir@ +systemschemadir = $(datadir)@systemschemadir@ +propertydir = $(datadir)@propertydir@ +schemadir = $(sysconfdir)@schemadir@ +serverdir = $(libdir)/@serverdir@ +serverplugindir = $(libdir)@serverplugindir@ +taskdir = $(datadir)@scripttemplatedir@ +systemdsystemunitdir = @with_systemdsystemunitdir@ +systemdsystemunitdropindir = @with_systemdsystemunitdir@/$(PACKAGE_NAME)@.service.d +systemdsystemconfdir = @with_systemdsystemconfdir@ +systemdgroupname = @with_systemdgroupname@ +initdir = @initdir@ +initconfigdir = $(sysconfdir)@initconfigdir@ +instconfigdir = @instconfigdir@ +perldir = $(libdir)@perldir@ +pythondir = $(libdir)@pythondir@ +infdir = $(datadir)@infdir@ +mibdir = $(datadir)@mibdir@ +updatedir = $(datadir)@updatedir@ +pkgconfigdir = $(libdir)/pkgconfig +serverincdir = $(includedir)/@serverincdir@ +gdbautoloaddir = $(prefixdir)/share/gdb/auto-load$(sbindir) +cockpitdir = $(prefixdir)/share/cockpit@cockpitdir@ +metainfodir = $(prefixdir)/share/metainfo/389-console +tmpfiles_d = @tmpfiles_d@ + +# This has to be hardcoded to /lib - $libdir changes between lib/lib64, but +# sysctl.d is always in /lib. +sysctldir = @prefixdir@/lib/sysctl.d + +defaultuser=@defaultuser@ +defaultgroup=@defaultgroup@ + +#------------------------ +# Build Products +#------------------------ +sbin_PROGRAMS = ns-slapd ldap-agent + +bin_PROGRAMS = dbscan \ + ldclt \ + pwdhash + +# ---------------------------------------------------------------------------------------- +# This odd looking definition is to keep the libraries in ORDER that they are needed. rsds +# is needed by sds, which is needed by ns. So we have a blank LTLIB, then append in order +# based on defines +# ---------------------------------------------------------------------------------------- + +server_LTLIBRARIES = libslapd.la libldaputil.la libns-dshttpd.la librewriters.la + +lib_LTLIBRARIES = libsvrcore.la + +# this is how to add optional plugins +if enable_pam_passthru +LIBPAM_PASSTHRU_PLUGIN = libpam-passthru-plugin.la +enable_pam_passthru = 1 +endif +if enable_dna +LIBDNA_PLUGIN = libdna-plugin.la +enable_dna = 1 +endif + +if enable_bitwise +LIBBITWISE_PLUGIN = libbitwise-plugin.la +enable_bitwise = 1 +endif + +if enable_acctpolicy +LIBACCTPOLICY_PLUGIN = libacctpolicy-plugin.la +LIBACCTPOLICY_SCHEMA = $(srcdir)/ldap/schema/60acctpolicy.ldif +enable_acctpolicy = 1 +endif + +serverplugin_LTLIBRARIES = libacl-plugin.la \ + libaddn-plugin.la \ + libattr-unique-plugin.la \ + libautomember-plugin.la libback-ldbm.la libchainingdb-plugin.la \ + libcollation-plugin.la libcos-plugin.la libderef-plugin.la \ + libpbe-plugin.la libdistrib-plugin.la \ + liblinkedattrs-plugin.la libmanagedentries-plugin.la \ + libmemberof-plugin.la libpassthru-plugin.la libpwdstorage-plugin.la \ + libcontentsync-plugin.la \ + libreferint-plugin.la libreplication-plugin.la libretrocl-plugin.la \ + libroles-plugin.la libstatechange-plugin.la libsyntax-plugin.la \ + libviews-plugin.la libschemareload-plugin.la libusn-plugin.la \ + libacctusability-plugin.la librootdn-access-plugin.la \ + libwhoami-plugin.la libalias-entries-plugin.la $(LIBACCTPOLICY_PLUGIN) \ + $(LIBPAM_PASSTHRU_PLUGIN) $(LIBDNA_PLUGIN) \ + $(LIBBITWISE_PLUGIN) $(LIBPRESENCE_PLUGIN) $(LIBPOSIX_WINSYNC_PLUGIN) \ + libentryuuid-plugin.la libentryuuid-syntax-plugin.la libpwdchan-plugin.la + +noinst_LIBRARIES = libavl.a + +dist_noinst_HEADERS = \ + include/i18n.h \ + include/netsite.h \ + include/base/crit.h \ + include/base/dbtbase.h \ + include/base/ereport.h \ + include/base/file.h \ + include/base/fsmutex.h \ + include/base/plist.h \ + include/base/pool.h \ + include/base/shexp.h \ + include/base/systems.h \ + include/base/systhr.h \ + include/base/util.h \ + include/ldaputil/cert.h \ + include/ldaputil/certmap.h \ + include/ldaputil/dbconf.h \ + include/ldaputil/encode.h \ + include/ldaputil/errors.h \ + include/ldaputil/init.h \ + include/ldaputil/ldapauth.h \ + include/ldaputil/ldaputil.h \ + include/libaccess/aclerror.h \ + include/libaccess/acleval.h \ + include/libaccess/aclglobal.h \ + include/libaccess/acl.h \ + include/libaccess/aclproto.h \ + include/libaccess/aclstruct.h \ + include/libaccess/attrec.h \ + include/libaccess/authdb.h \ + include/libaccess/dbtlibaccess.h \ + include/libaccess/dnfstruct.h \ + include/libaccess/ipfstruct.h \ + include/libaccess/las.h \ + include/libaccess/nsautherr.h \ + include/libaccess/nsauth.h \ + include/libaccess/nserror.h \ + include/libaccess/symbols.h \ + include/libaccess/userauth.h \ + include/libaccess/usi.h \ + include/libaccess/usrcache.h \ + include/libadmin/dbtlibadmin.h \ + include/libadmin/libadmin.h \ + include/public/netsite.h \ + include/public/nsapi.h \ + include/public/base/systems.h \ + include/public/nsacl/aclapi.h \ + include/public/nsacl/acldef.h \ + include/public/nsacl/nserrdef.h \ + include/public/nsacl/plistdef.h \ + ldap/include/avl.h \ + ldap/include/dblayer.h \ + ldap/include/disptmpl.h \ + ldap/include/ldaprot.h \ + ldap/include/portable.h \ + ldap/include/regex.h \ + ldap/include/srchpref.h \ + ldap/include/sysexits-compat.h \ + ldap/servers/plugins/addn/addn.h \ + ldap/servers/plugins/collation/config.h \ + ldap/servers/plugins/collation/collate.h \ + ldap/servers/plugins/collation/orfilter.h \ + ldap/servers/plugins/chainingdb/cb.h \ + ldap/servers/plugins/deref/deref.h \ + ldap/servers/plugins/acctpolicy/acctpolicy.h \ + ldap/servers/plugins/posix-winsync/posix-wsp-ident.h \ + ldap/servers/plugins/posix-winsync/posix-group-func.h \ + ldap/servers/plugins/roles/roles_cache.h \ + ldap/servers/plugins/usn/usn.h \ + ldap/servers/plugins/pwdstorage/pwdstorage.h \ + ldap/servers/plugins/pwdstorage/md5.h \ + ldap/servers/plugins/acl/acl.h \ + ldap/servers/plugins/linkedattrs/linked_attrs.h \ + ldap/servers/plugins/rootdn_access/rootdn_access.h \ + ldap/servers/plugins/acct_usability/acct_usability.h \ + ldap/servers/plugins/retrocl/retrocl.h \ + ldap/servers/plugins/uiduniq/plugin-utils.h \ + ldap/servers/plugins/memberof/memberof.h \ + ldap/servers/plugins/replication/cl5_api.h \ + ldap/servers/plugins/replication/llist.h \ + ldap/servers/plugins/replication/repl_shared.h \ + ldap/servers/plugins/replication/csnpl.h \ + ldap/servers/plugins/replication/cl5.h \ + ldap/servers/plugins/replication/repl-session-plugin.h \ + ldap/servers/plugins/replication/windows_prot_private.h \ + ldap/servers/plugins/replication/repl_helper.h \ + ldap/servers/plugins/replication/repl5.h \ + ldap/servers/plugins/replication/cl5_test.h \ + ldap/servers/plugins/replication/repl5_ruv.h \ + ldap/servers/plugins/replication/cl5_clcache.h \ + ldap/servers/plugins/replication/cl_crypt.h \ + ldap/servers/plugins/replication/urp.h \ + ldap/servers/plugins/replication/winsync-plugin.h \ + ldap/servers/plugins/replication/windowsrepl.h \ + ldap/servers/plugins/replication/repl5_prot_private.h \ + ldap/servers/plugins/pam_passthru/pam_passthru.h \ + ldap/servers/plugins/syntaxes/syntax.h \ + ldap/servers/plugins/cos/cos_cache.h \ + ldap/servers/plugins/sync/sync.h \ + ldap/servers/plugins/passthru/passthru.h \ + ldap/servers/plugins/rever/rever.h \ + ldap/servers/plugins/automember/automember.h \ + ldap/servers/plugins/alias_entries/alias-entries.h \ + ldap/servers/plugins/mep/mep.h \ + ldap/servers/slapd/agtmmap.h \ + ldap/servers/slapd/auth.h \ + ldap/servers/slapd/csngen.h \ + ldap/servers/slapd/disconnect_errors.h \ + ldap/servers/slapd/disconnect_error_strings.h \ + ldap/servers/slapd/fe.h \ + ldap/servers/slapd/filter.h \ + ldap/servers/slapd/getopt_ext.h \ + ldap/servers/slapd/getsocketpeer.h \ + ldap/servers/slapd/haproxy.h \ + ldap/servers/slapd/intrinsics.h \ + ldap/servers/slapd/log.h \ + ldap/servers/slapd/openldapber.h \ + ldap/servers/slapd/pblock_v3.h \ + ldap/servers/slapd/poll_using_select.h \ + ldap/servers/slapd/prerrstrs.h \ + ldap/servers/slapd/protect_db.h \ + ldap/servers/slapd/proto-slap.h \ + ldap/servers/slapd/pw.h \ + ldap/servers/slapd/pw_verify.h \ + ldap/servers/slapd/secerrstrs.h \ + ldap/servers/slapd/slap.h \ + ldap/servers/slapd/slapi_pal.h \ + ldap/servers/slapd/slapi-plugin-compat4.h \ + ldap/servers/slapd/slapi-plugin.h \ + ldap/servers/slapd/slapi-private.h \ + ldap/servers/slapd/snmp_collator.h \ + ldap/servers/slapd/sslerrstrs.h \ + ldap/servers/slapd/statechange.h \ + ldap/servers/slapd/uuid.h \ + ldap/servers/slapd/vattr_spi.h \ + ldap/servers/slapd/views.h \ + ldap/servers/slapd/back-ldbm/attrcrypt.h \ + ldap/servers/slapd/back-ldbm/back-ldbm.h \ + ldap/servers/slapd/back-ldbm/dbimpl.h \ + ldap/servers/slapd/back-ldbm/dblayer.h \ + ldap/servers/slapd/back-ldbm/import.h \ + ldap/servers/slapd/back-ldbm/ldbm_config.h \ + ldap/servers/slapd/back-ldbm/proto-back-ldbm.h \ + ldap/servers/slapd/back-ldbm/vlv_key.h \ + ldap/servers/slapd/back-ldbm/vlv_srch.h \ + ldap/servers/slapd/tools/ldaptool.h \ + ldap/servers/slapd/tools/ldaptool-sasl.h \ + ldap/servers/slapd/tools/ldclt/ldap-private.h \ + ldap/servers/slapd/tools/ldclt/ldclt.h \ + ldap/servers/slapd/tools/ldclt/port.h \ + ldap/servers/slapd/tools/ldclt/remote.h \ + ldap/servers/slapd/tools/ldclt/scalab01.h \ + ldap/servers/slapd/tools/ldclt/utils.h \ + ldap/servers/snmp/ldap-agent.h \ + ldap/systools/pio.h \ + lib/base/lexer_pvt.h \ + lib/base/plist_pvt.h \ + lib/ldaputil/ldaputili.h \ + lib/libaccess/access_plhash.h \ + lib/libaccess/aclcache.h \ + lib/libaccess/aclpriv.h \ + lib/libaccess/aclscan.h \ + lib/libaccess/acl.tab.h \ + lib/libaccess/aclutil.h \ + lib/libaccess/lasdns.h \ + lib/libaccess/las.h \ + lib/libaccess/lasip.h \ + lib/libaccess/ldapauth.h \ + lib/libaccess/oneeval.h \ + lib/libaccess/parse.h \ + lib/libaccess/permhash.h \ + lib/libsi18n/getstrmem.h \ + lib/libsi18n/gsslapd.h \ + lib/libsi18n/reshash.h \ + lib/libsi18n/txtfile.h + +if ENABLE_CMOCKA +dist_noinst_HEADERS += \ + test/test_slapd.h +endif + +dist_noinst_DATA = \ + $(srcdir)/buildnum.py \ + $(srcdir)/ldap/admin/src/*.in \ + $(srcdir)/ldap/admin/src/scripts/*.py \ + $(srcdir)/ldap/admin/src/scripts/ds-replcheck \ + $(srcdir)/ldap/ldif/*.in \ + $(srcdir)/ldap/ldif/*.ldif \ + $(srcdir)/ldap/schema/*.ldif \ + $(srcdir)/ldap/schema/slapd-collations.conf \ + $(srcdir)/ldap/servers/snmp/ldap-agent.conf \ + $(srcdir)/ldap/servers/snmp/redhat-directory.mib \ + $(srcdir)/ldap/servers/slapd/mkDBErrStrs.py \ + $(srcdir)/lib/ldaputil/certmap.conf \ + $(srcdir)/m4 \ + $(srcdir)/rpm/389-ds-base.spec.in \ + $(srcdir)/rpm/389-ds-base-devel.README \ + $(srcdir)/rpm/389-ds-base-git.sh \ + $(srcdir)/README.md \ + $(srcdir)/LICENSE \ + $(srcdir)/LICENSE.* \ + $(srcdir)/VERSION.sh \ + $(srcdir)/wrappers/*.in \ + $(srcdir)/dirsrvtests \ + $(srcdir)/src/lib389/setup.py.in \ + $(srcdir)/src/lib389 + + +#------------------------ +# Installed Files +#------------------------ +config_DATA = $(srcdir)/lib/ldaputil/certmap.conf \ + $(srcdir)/ldap/schema/slapd-collations.conf \ + ldap/servers/snmp/ldap-agent.conf + +# the schema files in this list are either not +# standard schema, not tested, or not compatible +# with the default schema e.g. there is +# considerable overlap of 60changelog.ldif and 01common.ldif +# and 60inetmail.ldif and 50ns-mail.ldif among others +sampledata_DATA = $(srcdir)/ldap/ldif/Ace.ldif \ + $(srcdir)/ldap/ldif/European.ldif \ + $(srcdir)/ldap/ldif/Eurosuffix.ldif \ + $(srcdir)/ldap/ldif/Example.ldif \ + $(srcdir)/ldap/ldif/Example-roles.ldif \ + $(srcdir)/ldap/ldif/Example-views.ldif \ + $(srcdir)/ldap/ldif/template.ldif \ + ldap/ldif/template-dse.ldif \ + ldap/ldif/template-dse-minimal.ldif \ + ldap/ldif/template-suffix-db.ldif \ + ldap/ldif/template-ldapi.ldif \ + ldap/ldif/template-ldapi-default.ldif \ + ldap/ldif/template-ldapi-autobind.ldif \ + ldap/ldif/template-org.ldif \ + ldap/ldif/template-domain.ldif \ + ldap/ldif/template-state.ldif \ + ldap/ldif/template-locality.ldif \ + ldap/ldif/template-country.ldif \ + ldap/ldif/template-orgunit.ldif \ + ldap/ldif/template-baseacis.ldif \ + ldap/ldif/template-sasl.ldif \ + $(srcdir)/ldap/schema/10rfc2307compat.ldif \ + $(srcdir)/ldap/schema/10rfc2307bis.ldif \ + $(srcdir)/ldap/schema/60changelog.ldif \ + $(srcdir)/ldap/schema/60inetmail.ldif \ + $(srcdir)/ldap/schema/60krb5kdc.ldif \ + $(srcdir)/ldap/schema/60kerberos.ldif \ + $(srcdir)/ldap/schema/60nis.ldif \ + $(srcdir)/ldap/schema/60qmail.ldif \ + $(srcdir)/ldap/schema/60radius.ldif \ + $(srcdir)/ldap/schema/60rfc4876.ldif \ + $(srcdir)/ldap/schema/60samba.ldif \ + $(srcdir)/ldap/schema/60sendmail.ldif \ + $(srcdir)/ldap/schema/dsee.schema \ + $(srcdir)/src/lib389/lib389/cli_ctl/dbgen-FamilyNames \ + $(srcdir)/src/lib389/lib389/cli_ctl/dbgen-GivenNames \ + $(srcdir)/src/lib389/lib389/cli_ctl/dbgen-OrgUnits \ + $(LIBPRESENCE_SCHEMA) + +systemschema_DATA = $(srcdir)/ldap/schema/00core.ldif \ + $(srcdir)/ldap/schema/01core389.ldif \ + $(srcdir)/ldap/schema/02common.ldif \ + $(srcdir)/ldap/schema/05rfc2927.ldif \ + $(srcdir)/ldap/schema/05rfc4523.ldif \ + $(srcdir)/ldap/schema/05rfc4524.ldif \ + $(srcdir)/ldap/schema/06inetorgperson.ldif \ + $(srcdir)/ldap/schema/10automember-plugin.ldif \ + $(srcdir)/ldap/schema/10dna-plugin.ldif \ + $(srcdir)/ldap/schema/10mep-plugin.ldif \ + $(srcdir)/ldap/schema/10rfc2307compat.ldif \ + $(srcdir)/ldap/schema/20subscriber.ldif \ + $(srcdir)/ldap/schema/25java-object.ldif \ + $(srcdir)/ldap/schema/28pilot.ldif \ + $(srcdir)/ldap/schema/30ns-common.ldif \ + $(srcdir)/ldap/schema/50ns-admin.ldif \ + $(srcdir)/ldap/schema/50ns-certificate.ldif \ + $(srcdir)/ldap/schema/50ns-directory.ldif \ + $(srcdir)/ldap/schema/50ns-mail.ldif \ + $(srcdir)/ldap/schema/50ns-value.ldif \ + $(srcdir)/ldap/schema/50ns-web.ldif \ + $(srcdir)/ldap/schema/60pam-plugin.ldif \ + $(srcdir)/ldap/schema/60posix-winsync-plugin.ldif \ + $(srcdir)/ldap/schema/60autofs.ldif \ + $(srcdir)/ldap/schema/60eduperson.ldif \ + $(srcdir)/ldap/schema/60mozilla.ldif \ + $(srcdir)/ldap/schema/60pureftpd.ldif \ + $(srcdir)/ldap/schema/60rfc2739.ldif \ + $(srcdir)/ldap/schema/60rfc3712.ldif \ + $(srcdir)/ldap/schema/60sabayon.ldif \ + $(srcdir)/ldap/schema/60samba3.ldif \ + $(srcdir)/ldap/schema/60sudo.ldif \ + $(srcdir)/ldap/schema/60trust.ldif \ + $(srcdir)/ldap/schema/60nss-ldap.ldif \ + $(srcdir)/ldap/schema/03entryuuid.ldif \ + $(LIBACCTPOLICY_SCHEMA) + +schema_DATA = $(srcdir)/ldap/schema/99user.ldif + +libexec_SCRIPTS = + +if SYSTEMD +libexec_SCRIPTS += wrappers/ds_systemd_ask_password_acl wrappers/ds_selinux_restorecon.sh +endif + +if ENABLE_COCKPIT +install-data-hook: + if [ "$(srcdir)" != "." ]; then cp -r $(srcdir)/src/cockpit src ; fi + mkdir -p src/cockpit/389-console/cockpit_dist/ + mkdir -p $(DESTDIR)$(cockpitdir) + rsync -rupE src/cockpit/389-console/cockpit_dist/ $(DESTDIR)$(cockpitdir) + mkdir -p $(DESTDIR)$(metainfodir) + rsync -up src/cockpit/389-console/org.port389.cockpit_console.metainfo.xml $(DESTDIR)$(metainfodir)/org.port389.cockpit_console.metainfo.xml +endif + +sbin_SCRIPTS = + +bin_SCRIPTS = + +# For scripts that are "as is". +dist_bin_SCRIPTS = ldap/admin/src/scripts/ds-replcheck \ + ldap/admin/src/scripts/ds-logpipe.py + +dist_bin_SCRIPTS += ldap/admin/src/logconv.pl + +python_DATA = ldap/admin/src/scripts/failedbinds.py \ + ldap/admin/src/scripts/logregex.py + +gdbautoload_DATA = ldap/admin/src/scripts/ns-slapd-gdb.py + +dist_sysctl_DATA = ldap/admin/src/70-dirsrv.conf + +if SYSTEMD +# yes, that is an @ in the filename . . . +systemdsystemunit_DATA = wrappers/$(PACKAGE_NAME)@.service \ + wrappers/$(systemdgroupname) \ + wrappers/$(PACKAGE_NAME)-snmp.service + +if with_sanitizer +systemdsystemunitdropin_DATA = wrappers/$(PACKAGE_NAME)@.service.d/xsan.conf +else +systemdsystemunitdropin_DATA = wrappers/$(PACKAGE_NAME)@.service.d/custom.conf +endif + +else +if INITDDIR +init_SCRIPTS = wrappers/$(PACKAGE_NAME) \ + wrappers/$(PACKAGE_NAME)-snmp +endif +endif + +if INITDDIR +initconfig_DATA = ldap/admin/src/$(PACKAGE_NAME) +endif + +inf_DATA = ldap/admin/src/slapd.inf \ + ldap/admin/src/defaults.inf + +mib_DATA = ldap/servers/snmp/redhat-directory.mib + +pkgconfig_DATA = src/pkgconfig/dirsrv.pc \ + src/pkgconfig/svrcore.pc + +#------------------------ +# header files +#------------------------ +serverinc_HEADERS = ldap/servers/plugins/replication/repl-session-plugin.h \ + ldap/servers/slapd/slapi_pal.h \ + ldap/servers/slapd/slapi-plugin.h \ + ldap/servers/plugins/replication/winsync-plugin.h + +include_HEADERS = src/svrcore/src/svrcore.h + +#------------------------ +# man pages +#------------------------ +dist_man_MANS = man/man1/dbscan.1 \ + man/man1/ds-logpipe.py.1 \ + man/man1/ds-replcheck.1 \ + man/man1/ldap-agent.1 \ + man/man1/ldclt.1 \ + man/man1/logconv.pl.1 \ + man/man1/pwdhash.1 \ + man/man5/99user.ldif.5 \ + man/man8/ns-slapd.8 \ + man/man5/certmap.conf.5 \ + man/man5/dirsrv.5 \ + man/man5/dirsrv.systemd.5 \ + man/man5/slapd-collations.conf.5 + +#//////////////////////////////////////////////////////////////// +# +# Static Server Libraries +# +#//////////////////////////////////////////////////////////////// +#------------------------ +# libavl +#------------------------ +libavl_a_SOURCES = ldap/libraries/libavl/avl.c +libavl_a_CPPFLAGS = $(AM_CPPFLAGS) $(DSPLUGIN_CPPFLAGS) + +#------------------------ +# libldaputil +#------------------------ +libldaputil_la_SOURCES = lib/ldaputil/cert.c \ + lib/ldaputil/certmap.c \ + lib/ldaputil/dbconf.c \ + lib/ldaputil/encode.c \ + lib/ldaputil/errors.c \ + lib/ldaputil/init.c \ + lib/ldaputil/ldapauth.c \ + lib/ldaputil/vtable.c + +libldaputil_la_CPPFLAGS = $(AM_CPPFLAGS) $(DSPLUGIN_CPPFLAGS) $(DSINTERNAL_CPPFLAGS) -I$(srcdir)/lib/ldaputil +libldaputil_la_LIBADD = libslapd.la $(NSS_LINK) $(NSPR_LINK) +libldaputil_la_LDFLAGS = $(AM_LDFLAGS) + +#//////////////////////////////////////////////////////////////// +# +# Dynamic Server Libraries +# +#//////////////////////////////////////////////////////////////// + +#------------------------ +# librewriters +#------------------------ +librewriters_la_SOURCES = \ + src/rewriters/adfilter.c + +librewriters_la_LDFLAGS = $(AM_LDFLAGS) +librewriters_la_CPPFLAGS = $(AM_CPPFLAGS) $(REWRITERS_INCLUDES) $(DSPLUGIN_CPPFLAGS) +librewriters_la_LIBADD = libslapd.la $(NSS_LINK) $(NSPR_LINK) + +#------------------------ +# libsvrcore +#------------------------ +libsvrcore_la_SOURCES = \ + src/svrcore/src/alt.c \ + src/svrcore/src/cache.c \ + src/svrcore/src/errors.c \ + src/svrcore/src/file.c \ + src/svrcore/src/ntgetpin.c \ + src/svrcore/src/ntresource.h \ + src/svrcore/src/pin.c \ + src/svrcore/src/pk11.c \ + src/svrcore/src/std.c \ + src/svrcore/src/systemd-ask-pass.c \ + src/svrcore/src/std-systemd.c \ + src/svrcore/src/user.c + +libsvrcore_la_LDFLAGS = $(AM_LDFLAGS) +libsvrcore_la_CPPFLAGS = $(AM_CPPFLAGS) $(SVRCORE_INCLUDES) $(DSPLUGIN_CPPFLAGS) +libsvrcore_la_LIBADD = $(NSS_LINK) $(NSPR_LINK) + +noinst_LTLIBRARIES = librslapd.la librnsslapd.la libentryuuid.la libentryuuid_syntax.la \ + libpwdchan.la + +### Why does this exist? +# +# Both cargo and autotools are really opinionated. It's really hard to make this work. :( +# +# https://people.gnome.org/~federico/blog/librsvg-build-infrastructure.html +# https://gitlab.gnome.org/GNOME/librsvg/blob/master/Makefile.am + +### Rust lib slapd components +RSLAPD_LIB = @abs_top_builddir@/rs/rslapd/@rust_target_dir@/librslapd.a + +librslapd_la_SOURCES = \ + src/librslapd/Cargo.toml \ + src/librslapd/build.rs \ + src/librslapd/src/cache.rs \ + src/librslapd/src/lib.rs + +librslapd_la_EXTRA = src/librslapd/Cargo.lock + +@abs_top_builddir@/rs/rslapd/@rust_target_dir@/librslapd.a: $(librslapd_la_SOURCES) + RUST_BACKTRACE=1 RUSTC_BOOTSTRAP=1 \ + CARGO_TARGET_DIR=$(abs_top_builddir)/rs/rslapd \ + SLAPD_DYLIB_DIR=$(abs_top_builddir)/ \ + SLAPD_HEADER_DIR=$(abs_top_builddir)/ \ + cargo rustc $(RUST_OFFLINE) --manifest-path=$(srcdir)/src/librslapd/Cargo.toml \ + $(CARGO_FLAGS) --verbose -- $(RUSTC_FLAGS) $(RUSTC_LINK_FLAGS) + +# The header needs the lib build first. +rust-slapi-private.h: @abs_top_builddir@/rs/rslapd/@rust_target_dir@/librslapd.a + +# Build rust ns-slapd components as a library. +RNSSLAPD_LIB = @abs_top_builddir@/rs/rnsslapd/@rust_target_dir@/librnsslapd.a + +librnsslapd_la_SOURCES = \ + src/librnsslapd/Cargo.toml \ + src/librnsslapd/build.rs \ + src/librnsslapd/src/lib.rs + +librnsslapd_la_EXTRA = src/librnsslapd/Cargo.lock + +@abs_top_builddir@/rs/rnsslapd/@rust_target_dir@/librnsslapd.a: $(librnsslapd_la_SOURCES) + RUST_BACKTRACE=1 RUSTC_BOOTSTRAP=1 \ + CARGO_TARGET_DIR=$(abs_top_builddir)/rs/rnsslapd \ + SLAPD_DYLIB_DIR=$(abs_top_builddir)/ \ + SLAPD_HEADER_DIR=$(abs_top_builddir)/ \ + cargo rustc $(RUST_OFFLINE) --manifest-path=$(srcdir)/src/librnsslapd/Cargo.toml \ + $(CARGO_FLAGS) --verbose -- $(RUSTC_FLAGS) $(RUSTC_LINK_FLAGS) + +# The header needs the lib build first. +rust-nsslapd-private.h: @abs_top_builddir@/rs/rnsslapd/@rust_target_dir@/librnsslapd.a + +libslapi_r_plugin_SOURCES = \ + src/slapi_r_plugin/src/backend.rs \ + src/slapi_r_plugin/src/ber.rs \ + src/slapi_r_plugin/src/charray.rs \ + src/slapi_r_plugin/src/constants.rs \ + src/slapi_r_plugin/src/dn.rs \ + src/slapi_r_plugin/src/entry.rs \ + src/slapi_r_plugin/src/error.rs \ + src/slapi_r_plugin/src/log.rs \ + src/slapi_r_plugin/src/macros.rs \ + src/slapi_r_plugin/src/pblock.rs \ + src/slapi_r_plugin/src/plugin.rs \ + src/slapi_r_plugin/src/search.rs \ + src/slapi_r_plugin/src/syntax_plugin.rs \ + src/slapi_r_plugin/src/task.rs \ + src/slapi_r_plugin/src/value.rs \ + src/slapi_r_plugin/src/lib.rs + +# Build rust ns-slapd components as a library. +ENTRYUUID_LIB = @abs_top_builddir@/rs/entryuuid/@rust_target_dir@/libentryuuid.a + +libentryuuid_la_SOURCES = \ + src/plugins/entryuuid/Cargo.toml \ + src/plugins/entryuuid/src/lib.rs \ + $(libslapi_r_plugin_SOURCES) + +libentryuuid_la_EXTRA = src/plugin/entryuuid/Cargo.lock + +@abs_top_builddir@/rs/entryuuid/@rust_target_dir@/libentryuuid.a: $(libentryuuid_la_SOURCES) libslapd.la libentryuuid.la + RUST_BACKTRACE=1 RUSTC_BOOTSTRAP=1 \ + CARGO_TARGET_DIR=$(abs_top_builddir)/rs/entryuuid \ + SLAPD_DYLIB_DIR=$(abs_top_builddir)/ \ + SLAPD_HEADER_DIR=$(abs_top_builddir)/ \ + cargo rustc $(RUST_OFFLINE) --manifest-path=$(srcdir)/src/plugins/entryuuid/Cargo.toml \ + $(CARGO_FLAGS) --verbose -- $(RUSTC_FLAGS) $(RUSTC_LINK_FLAGS) + cp $(ENTRYUUID_LIB) @abs_top_builddir@/.libs/libentryuuid.a + +ENTRYUUID_SYNTAX_LIB = @abs_top_builddir@/rs/entryuuid_syntax/@rust_target_dir@/libentryuuid_syntax.a + +libentryuuid_syntax_la_SOURCES = \ + src/plugins/entryuuid_syntax/Cargo.toml \ + src/plugins/entryuuid_syntax/src/lib.rs \ + $(libslapi_r_plugin_SOURCES) + +libentryuuid_syntax_la_EXTRA = src/plugin/entryuuid_syntax/Cargo.lock + +@abs_top_builddir@/rs/entryuuid_syntax/@rust_target_dir@/libentryuuid_syntax.a: $(libentryuuid_syntax_la_SOURCES) libslapd.la libentryuuid_syntax.la + RUST_BACKTRACE=1 RUSTC_BOOTSTRAP=1 \ + CARGO_TARGET_DIR=$(abs_top_builddir)/rs/entryuuid_syntax \ + SLAPD_DYLIB_DIR=$(abs_top_builddir)/ \ + SLAPD_HEADER_DIR=$(abs_top_builddir)/ \ + cargo rustc $(RUST_OFFLINE) --manifest-path=$(srcdir)/src/plugins/entryuuid_syntax/Cargo.toml \ + $(CARGO_FLAGS) --verbose -- $(RUSTC_FLAGS) $(RUSTC_LINK_FLAGS) + cp $(ENTRYUUID_SYNTAX_LIB) @abs_top_builddir@/.libs/libentryuuid_syntax.a + +# == pwdchan + +PWDCHAN_LIB = @abs_top_builddir@/rs/pwdchan/@rust_target_dir@/libpwdchan.a + +libpwdchan_la_SOURCES = \ + src/plugins/pwdchan/Cargo.toml \ + src/plugins/pwdchan/src/lib.rs \ + $(libslapi_r_plugin_SOURCES) + +libpwdchan_la_EXTRA = src/plugin/pwdchan/Cargo.lock + +@abs_top_builddir@/rs/pwdchan/@rust_target_dir@/libpwdchan.a: $(libpwdchan_la_SOURCES) libslapd.la libpwdchan.la + RUST_BACKTRACE=1 RUSTC_BOOTSTRAP=1 \ + CARGO_TARGET_DIR=$(abs_top_builddir)/rs/pwdchan \ + SLAPD_DYLIB_DIR=$(abs_top_builddir)/ \ + SLAPD_HEADER_DIR=$(abs_top_builddir)/ \ + cargo rustc $(RUST_OFFLINE) --manifest-path=$(srcdir)/src/plugins/pwdchan/Cargo.toml \ + $(CARGO_FLAGS) --verbose -- $(RUSTC_FLAGS) $(RUSTC_LINK_FLAGS) + cp $(PWDCHAN_LIB) @abs_top_builddir@/.libs/libpwdchan.a + +# == pwdchan + +EXTRA_DIST = $(librslapd_la_SOURCES) $(librslapd_la_EXTRA) \ + $(libentryuuid_la_SOURCES) $(libentryuuid_la_EXTRA) \ + $(libentryuuid_syntax_la_SOURCES) $(libentryuuid_syntax_la_EXTRA) \ + $(libpwdchan_la_SOURCES) $(libpwdchan_la_EXTRA) \ + $(librnsslapd_la_SOURCES) $(librnsslapd_la_EXTRA) + +## Run rust tests +# cargo does not support offline tests :( +if RUST_ENABLE_OFFLINE +else +if enable_asan +# Distro rust tends not to have proper asan support w_ clang +else +check-local: + for thing in "librslapd" "librnsslapd" ; do \ + echo \ + LD_LIBRARY_PATH=$(abs_top_builddir)/.libs \ + RUSTFLAGS="$(RUSTC_FLAGS)" \ + RUST_BACKTRACE=1 RUSTC_BOOTSTRAP=1 \ + CARGO_TARGET_DIR=$(abs_top_builddir)/rs/test/$${thing} \ + SLAPD_DYLIB_DIR=$(abs_top_builddir)/ \ + SLAPD_HEADER_DIR=$(abs_top_builddir)/ \ + cargo test $(RUST_OFFLINE) \ + --manifest-path=$(srcdir)/src/$${thing}/Cargo.toml -- --nocapture ; \ + LD_LIBRARY_PATH=$(abs_top_builddir)/.libs \ + RUSTFLAGS="$(RUSTC_FLAGS)" \ + RUST_BACKTRACE=1 RUSTC_BOOTSTRAP=1 \ + CARGO_TARGET_DIR=$(abs_top_builddir)/rs/test/$${thing} \ + SLAPD_DYLIB_DIR=$(abs_top_builddir)/ \ + SLAPD_HEADER_DIR=$(abs_top_builddir)/ \ + cargo test $(RUST_OFFLINE) \ + --manifest-path=$(srcdir)/src/$${thing}/Cargo.toml -- --nocapture ; \ + done +# Plugin tests are a little different + for thing in "plugins/pwdchan" ; do \ + echo \ + LD_LIBRARY_PATH=$(abs_top_builddir)/.libs \ + RUSTFLAGS="$(RUSTC_FLAGS)" \ + RUST_BACKTRACE=1 RUSTC_BOOTSTRAP=1 \ + CARGO_TARGET_DIR=$(abs_top_builddir)/rs/test/$${thing} \ + SLAPD_DYLIB_DIR=$(abs_top_builddir)/ \ + SLAPD_HEADER_DIR=$(abs_top_builddir)/ \ + cargo test $(RUST_OFFLINE) --features=slapi_r_plugin/test_log_direct \ + --manifest-path=$(srcdir)/src/$${thing}/Cargo.toml -- --nocapture ; \ + LD_LIBRARY_PATH=$(abs_top_builddir)/.libs \ + RUSTFLAGS="$(RUSTC_FLAGS)" \ + RUST_BACKTRACE=1 RUSTC_BOOTSTRAP=1 \ + CARGO_TARGET_DIR=$(abs_top_builddir)/rs/test/$${thing} \ + SLAPD_DYLIB_DIR=$(abs_top_builddir)/ \ + SLAPD_HEADER_DIR=$(abs_top_builddir)/ \ + cargo test $(RUST_OFFLINE) --features=slapi_r_plugin/test_log_direct \ + --manifest-path=$(srcdir)/src/$${thing}/Cargo.toml -- --nocapture ; \ + done +endif +endif + +#------------------------ +# libns-dshttpd +#------------------------ +libns_dshttpd_la_SOURCES = lib/libaccess/access_plhash.cpp \ + lib/libaccess/acl.tab.cpp \ + lib/libaccess/acl.yy.cpp \ + lib/libaccess/aclcache.cpp \ + lib/libaccess/aclerror.cpp \ + lib/libaccess/acleval.cpp \ + lib/libaccess/aclflush.cpp \ + lib/libaccess/aclspace.cpp \ + lib/libaccess/acltools.cpp \ + lib/libaccess/aclutil.cpp \ + lib/libaccess/authdb.cpp \ + lib/libaccess/lasdns.cpp \ + lib/libaccess/lasgroup.cpp \ + lib/libaccess/lasip.cpp \ + lib/libaccess/lastod.cpp \ + lib/libaccess/lasuser.cpp \ + lib/libaccess/method.cpp \ + lib/libaccess/nseframe.cpp \ + lib/libaccess/nsautherr.cpp \ + lib/libaccess/oneeval.cpp \ + lib/libaccess/register.cpp \ + lib/libaccess/symbols.cpp \ + lib/libaccess/usi.cpp \ + lib/libaccess/usrcache.cpp \ + lib/libadmin/error.c \ + lib/libadmin/template.c \ + lib/libadmin/util.c \ + lib/base/crit.cpp \ + lib/base/dnsdmain.cpp \ + lib/base/ereport.cpp \ + lib/base/file.cpp \ + lib/base/fsmutex.cpp \ + lib/base/nscperror.c \ + lib/base/plist.cpp \ + lib/base/pool.cpp \ + lib/base/shexp.cpp \ + lib/base/system.cpp \ + lib/base/systhr.cpp \ + lib/base/util.cpp \ + lib/libsi18n/getstrprop.c \ + lib/libsi18n/reshash.c \ + lib/libsi18n/txtfile.c + +libns_dshttpd_la_CPPFLAGS = -I$(srcdir)/include/base $(AM_CPPFLAGS) $(DSPLUGIN_CPPFLAGS) -I$(srcdir)/lib/ldaputil +libns_dshttpd_la_LIBADD = libslapd.la libldaputil.la $(LDAPSDK_LINK) $(SASL_LINK) $(NSS_LINK) $(NSPR_LINK) +if CLANG_ENABLE +# This avoids issues with stderr being double provided with clang + asan. +libns_dshttpd_la_LDFLAGS = $(AM_LDFLAGS) -static-libgcc +else +libns_dshttpd_la_LDFLAGS = $(AM_LDFLAGS) +endif + +#------------------------ +# libslapd +#------------------------ +libslapd_la_SOURCES = ldap/servers/slapd/add.c \ + ldap/servers/slapd/agtmmap.c \ + ldap/servers/slapd/apibroker.c \ + ldap/servers/slapd/attr.c \ + ldap/servers/slapd/attrlist.c \ + ldap/servers/slapd/attrsyntax.c \ + ldap/servers/slapd/auditlog.c \ + ldap/servers/slapd/ava.c \ + ldap/servers/slapd/backend.c \ + ldap/servers/slapd/backend_manager.c \ + ldap/servers/slapd/bitset.c \ + ldap/servers/slapd/bulk_import.c \ + ldap/servers/slapd/charray.c \ + ldap/servers/slapd/ch_malloc.c \ + ldap/servers/slapd/computed.c \ + ldap/servers/slapd/control.c \ + ldap/servers/slapd/configdse.c \ + ldap/servers/slapd/counters.c \ + ldap/servers/slapd/csn.c \ + ldap/servers/slapd/csngen.c \ + ldap/servers/slapd/csnset.c \ + ldap/servers/slapd/defbackend.c \ + ldap/servers/slapd/delete.c \ + ldap/servers/slapd/dl.c \ + ldap/servers/slapd/dn.c \ + ldap/servers/slapd/dse.c \ + ldap/servers/slapd/dynalib.c \ + ldap/servers/slapd/entry.c \ + ldap/servers/slapd/entrywsi.c \ + ldap/servers/slapd/errormap.c \ + ldap/servers/slapd/eventq.c \ + ldap/servers/slapd/eventq-deprecated.c \ + ldap/servers/slapd/factory.c \ + ldap/servers/slapd/features.c \ + ldap/servers/slapd/fileio.c \ + ldap/servers/slapd/filter.c \ + ldap/servers/slapd/filtercmp.c \ + ldap/servers/slapd/filterentry.c \ + ldap/servers/slapd/generation.c \ + ldap/servers/slapd/getfilelist.c \ + ldap/servers/slapd/haproxy.c \ + ldap/servers/slapd/ldapi.c \ + ldap/servers/slapd/ldaputil.c \ + ldap/servers/slapd/lenstr.c \ + ldap/servers/slapd/libglobs.c \ + ldap/servers/slapd/localhost.c \ + ldap/servers/slapd/log.c \ + ldap/servers/slapd/mapping_tree.c \ + ldap/servers/slapd/match.c \ + ldap/servers/slapd/modify.c \ + ldap/servers/slapd/modrdn.c \ + ldap/servers/slapd/modutil.c \ + ldap/servers/slapd/object.c \ + ldap/servers/slapd/objset.c \ + ldap/servers/slapd/operation.c \ + ldap/servers/slapd/opshared.c \ + ldap/servers/slapd/pagedresults.c \ + ldap/servers/slapd/pblock.c \ + ldap/servers/slapd/plugin.c \ + ldap/servers/slapd/plugin_acl.c \ + ldap/servers/slapd/plugin_mmr.c \ + ldap/servers/slapd/plugin_internal_op.c \ + ldap/servers/slapd/plugin_mr.c \ + ldap/servers/slapd/plugin_role.c \ + ldap/servers/slapd/plugin_syntax.c \ + ldap/servers/slapd/protect_db.c \ + ldap/servers/slapd/proxyauth.c \ + ldap/servers/slapd/pw.c \ + ldap/servers/slapd/pw_retry.c \ + ldap/servers/slapd/rdn.c \ + ldap/servers/slapd/referral.c \ + ldap/servers/slapd/regex.c \ + ldap/servers/slapd/resourcelimit.c \ + ldap/servers/slapd/result.c \ + ldap/servers/slapd/rewriters.c \ + ldap/servers/slapd/sasl_map.c \ + ldap/servers/slapd/schema.c \ + ldap/servers/slapd/schemaparse.c \ + ldap/servers/slapd/security_wrappers.c \ + ldap/servers/slapd/slapd_plhash.c \ + ldap/servers/slapd/slapi_counter.c \ + ldap/servers/slapd/slapi-memberof.c \ + ldap/servers/slapd/slapi2runtime.c \ + ldap/servers/slapd/snmp_collator.c \ + ldap/servers/slapd/sort.c \ + ldap/servers/slapd/ssl.c \ + ldap/servers/slapd/str2filter.c \ + ldap/servers/slapd/subentry.c \ + ldap/servers/slapd/task.c \ + ldap/servers/slapd/time.c \ + ldap/servers/slapd/thread_data.c \ + ldap/servers/slapd/uniqueid.c \ + ldap/servers/slapd/uniqueidgen.c \ + ldap/servers/slapd/upgrade.c \ + ldap/servers/slapd/utf8.c \ + ldap/servers/slapd/utf8compare.c \ + ldap/servers/slapd/util.c \ + ldap/servers/slapd/uuid.c \ + ldap/servers/slapd/value.c \ + ldap/servers/slapd/valueset.c \ + ldap/servers/slapd/vattr.c \ + ldap/servers/slapd/slapi_pal.c \ + src/libsds/external/csiphash/csiphash.c \ + $(GETSOCKETPEER) \ + $(libavl_a_SOURCES) + +libslapd_la_CPPFLAGS = $(AM_CPPFLAGS) $(DSPLUGIN_CPPFLAGS) $(SASL_CFLAGS) $(DB_INC) $(KERBEROS_CFLAGS) $(PCRE_CFLAGS) $(SVRCORE_INCLUDES) +libslapd_la_LIBADD = $(LDAPSDK_LINK) $(SASL_LINK) $(NSS_LINK) $(NSPR_LINK) $(KERBEROS_LIBS) $(PCRE_LIBS) $(THREADLIB) $(SYSTEMD_LIBS) libsvrcore.la $(RSLAPD_LIB) $(OPENSSL_LIBS) +# If asan is enabled, it creates special libcrypt interceptors. However, they are +# detected by the first load of libasan at runtime, and what is in the linked lib +# so we need libcrypt to be present as soon as libasan is loaded for the interceptors +# to function. Since ns-slapd links libslapd, this is pulled at startup, which allows +# pwdstorage to be asan checked with libcrypt. +if enable_asan +libslapd_la_LIBADD += $(LIBCRYPT) +endif +libslapd_la_LDFLAGS = $(AM_LDFLAGS) $(SLAPD_LDFLAGS) + +#//////////////////////////////////////////////////////////////// +# +# Plugins +# +#//////////////////////////////////////////////////////////////// +#------------------------ +# libback-ldbm +#------------------------ +libback_ldbm_la_SOURCES = ldap/servers/slapd/back-ldbm/ancestorid.c \ + ldap/servers/slapd/back-ldbm/archive.c \ + ldap/servers/slapd/back-ldbm/backentry.c \ + ldap/servers/slapd/back-ldbm/cache.c \ + ldap/servers/slapd/back-ldbm/cleanup.c \ + ldap/servers/slapd/back-ldbm/close.c \ + ldap/servers/slapd/back-ldbm/dbimpl.c \ + ldap/servers/slapd/back-ldbm/dblayer.c \ + ldap/servers/slapd/back-ldbm/dbsize.c \ + ldap/servers/slapd/back-ldbm/dn2entry.c \ + ldap/servers/slapd/back-ldbm/entrystore.c \ + ldap/servers/slapd/back-ldbm/filterindex.c \ + ldap/servers/slapd/back-ldbm/findentry.c \ + ldap/servers/slapd/back-ldbm/haschildren.c \ + ldap/servers/slapd/back-ldbm/id2entry.c \ + ldap/servers/slapd/back-ldbm/idl.c \ + ldap/servers/slapd/back-ldbm/idl_shim.c \ + ldap/servers/slapd/back-ldbm/idl_new.c \ + ldap/servers/slapd/back-ldbm/idl_set.c \ + ldap/servers/slapd/back-ldbm/idl_common.c \ + ldap/servers/slapd/back-ldbm/import.c \ + ldap/servers/slapd/back-ldbm/index.c \ + ldap/servers/slapd/back-ldbm/init.c \ + ldap/servers/slapd/back-ldbm/instance.c \ + ldap/servers/slapd/back-ldbm/ldbm_abandon.c \ + ldap/servers/slapd/back-ldbm/ldbm_add.c \ + ldap/servers/slapd/back-ldbm/ldbm_attr.c \ + ldap/servers/slapd/back-ldbm/ldbm_attrcrypt.c \ + ldap/servers/slapd/back-ldbm/ldbm_attrcrypt_config.c \ + ldap/servers/slapd/back-ldbm/ldbm_bind.c \ + ldap/servers/slapd/back-ldbm/ldbm_compare.c \ + ldap/servers/slapd/back-ldbm/ldbm_config.c \ + ldap/servers/slapd/back-ldbm/ldbm_delete.c \ + ldap/servers/slapd/back-ldbm/ldbm_entryrdn.c \ + ldap/servers/slapd/back-ldbm/ldbm_index_config.c \ + ldap/servers/slapd/back-ldbm/ldbm_instance_config.c \ + ldap/servers/slapd/back-ldbm/ldbm_modify.c \ + ldap/servers/slapd/back-ldbm/ldbm_modrdn.c \ + ldap/servers/slapd/back-ldbm/ldbm_search.c \ + ldap/servers/slapd/back-ldbm/ldbm_unbind.c \ + ldap/servers/slapd/back-ldbm/ldbm_usn.c \ + ldap/servers/slapd/back-ldbm/ldif2ldbm.c \ + ldap/servers/slapd/back-ldbm/dbverify.c \ + ldap/servers/slapd/back-ldbm/matchrule.c \ + ldap/servers/slapd/back-ldbm/misc.c \ + ldap/servers/slapd/back-ldbm/nextid.c \ + ldap/servers/slapd/back-ldbm/parents.c \ + ldap/servers/slapd/back-ldbm/rmdb.c \ + ldap/servers/slapd/back-ldbm/seq.c \ + ldap/servers/slapd/back-ldbm/sort.c \ + ldap/servers/slapd/back-ldbm/start.c \ + ldap/servers/slapd/back-ldbm/uniqueid2entry.c \ + ldap/servers/slapd/back-ldbm/vlv.c \ + ldap/servers/slapd/back-ldbm/vlv_key.c \ + ldap/servers/slapd/back-ldbm/vlv_srch.c \ + ldap/servers/slapd/back-ldbm/db-bdb/bdb_config.c \ + ldap/servers/slapd/back-ldbm/db-bdb/bdb_instance_config.c \ + ldap/servers/slapd/back-ldbm/db-bdb/bdb_verify.c \ + ldap/servers/slapd/back-ldbm/db-bdb/bdb_layer.c \ + ldap/servers/slapd/back-ldbm/db-bdb/bdb_misc.c \ + ldap/servers/slapd/back-ldbm/db-bdb/bdb_perfctrs.c \ + ldap/servers/slapd/back-ldbm/db-bdb/bdb_upgrade.c \ + ldap/servers/slapd/back-ldbm/db-bdb/bdb_version.c \ + ldap/servers/slapd/back-ldbm/db-bdb/bdb_monitor.c \ + ldap/servers/slapd/back-ldbm/db-bdb/bdb_ldif2db.c \ + ldap/servers/slapd/back-ldbm/db-bdb/bdb_import.c \ + ldap/servers/slapd/back-ldbm/db-bdb/bdb_import_threads.c \ + ldap/servers/slapd/back-ldbm/db-mdb/mdb_config.c \ + ldap/servers/slapd/back-ldbm/db-mdb/mdb_debug.c \ + ldap/servers/slapd/back-ldbm/db-mdb/mdb_instance.c \ + ldap/servers/slapd/back-ldbm/db-mdb/mdb_instance_config.c \ + ldap/servers/slapd/back-ldbm/db-mdb/mdb_verify.c \ + ldap/servers/slapd/back-ldbm/db-mdb/mdb_txn.c \ + ldap/servers/slapd/back-ldbm/db-mdb/mdb_layer.c \ + ldap/servers/slapd/back-ldbm/db-mdb/mdb_misc.c \ + ldap/servers/slapd/back-ldbm/db-mdb/mdb_perfctrs.c \ + ldap/servers/slapd/back-ldbm/db-mdb/mdb_upgrade.c \ + ldap/servers/slapd/back-ldbm/db-mdb/mdb_monitor.c \ + ldap/servers/slapd/back-ldbm/db-mdb/mdb_ldif2db.c \ + ldap/servers/slapd/back-ldbm/db-mdb/mdb_import.c \ + ldap/servers/slapd/back-ldbm/db-mdb/mdb_import_threads.c + + + +libback_ldbm_la_CPPFLAGS = $(AM_CPPFLAGS) $(DSPLUGIN_CPPFLAGS) $(DB_INC) +libback_ldbm_la_DEPENDENCIES = libslapd.la +libback_ldbm_la_LIBADD = libslapd.la $(DB_LINK) $(LDAPSDK_LINK) $(NSPR_LINK) +libback_ldbm_la_LDFLAGS = -avoid-version + +#------------------------ +# libacctpolicy-plugin +#------------------------ +libacctpolicy_plugin_la_SOURCES = ldap/servers/plugins/acctpolicy/acct_config.c \ + ldap/servers/plugins/acctpolicy/acct_init.c \ + ldap/servers/plugins/acctpolicy/acct_plugin.c \ + ldap/servers/plugins/acctpolicy/acct_util.c + +libacctpolicy_plugin_la_CPPFLAGS = $(AM_CPPFLAGS) $(DSPLUGIN_CPPFLAGS) +libacctpolicy_plugin_la_LIBADD = libslapd.la $(NSPR_LINK) +libacctpolicy_plugin_la_DEPENDENCIES = libslapd.la +libacctpolicy_plugin_la_LDFLAGS = -avoid-version + +#------------------------ +# libacctusability-plugin +#------------------------ +libacctusability_plugin_la_SOURCES = ldap/servers/plugins/acct_usability/acct_usability.c + +libacctusability_plugin_la_CPPFLAGS = $(AM_CPPFLAGS) $(DSPLUGIN_CPPFLAGS) +libacctusability_plugin_la_LIBADD = libslapd.la $(NSPR_LINK) +libacctusability_plugin_la_DEPENDENCIES = libslapd.la +libacctusability_plugin_la_LDFLAGS = -avoid-version + +#------------------------ +# libacl-plugin +#------------------------ +libacl_plugin_la_SOURCES = ldap/servers/plugins/acl/acl.c \ + ldap/servers/plugins/acl/acl_ext.c \ + ldap/servers/plugins/acl/aclanom.c \ + ldap/servers/plugins/acl/acleffectiverights.c \ + ldap/servers/plugins/acl/aclgroup.c \ + ldap/servers/plugins/acl/aclinit.c \ + ldap/servers/plugins/acl/acllas.c \ + ldap/servers/plugins/acl/acllist.c \ + ldap/servers/plugins/acl/aclparse.c \ + ldap/servers/plugins/acl/aclplugin.c \ + ldap/servers/plugins/acl/aclutil.c + +libacl_plugin_la_CPPFLAGS = -I$(srcdir)/include/libaccess $(AM_CPPFLAGS) $(DSPLUGIN_CPPFLAGS) +libacl_plugin_la_DEPENDENCIES = libslapd.la libns-dshttpd.la +libacl_plugin_la_LIBADD = libslapd.la libns-dshttpd.la $(LDAPSDK_LINK) $(NSPR_LINK) $(LIBCSTD) $(LIBCRUN) +libacl_plugin_la_LDFLAGS = -avoid-version +# libacl_plugin_la_LINK = $(CXXLINK) -avoid-version + +#------------------------ +# libalias-entries-plugin +#------------------------ +libalias_entries_plugin_la_SOURCES = ldap/servers/plugins/alias_entries/alias-entries.c + +libalias_entries_plugin_la_CPPFLAGS = $(AM_CPPFLAGS) $(DSPLUGIN_CPPFLAGS) +libalias_entries_plugin_la_LIBADD = libslapd.la $(NSPR_LINK) +libalias_entries_plugin_la_DEPENDENCIES = libslapd.la +libalias_entries_plugin_la_LDFLAGS = -avoid-version + +#------------------------ +# libaddn-plugin +#------------------------ +libaddn_plugin_la_SOURCES = ldap/servers/plugins/addn/addn.c + +libaddn_plugin_la_CPPFLAGS = $(AM_CPPFLAGS) $(DSPLUGIN_CPPFLAGS) +libaddn_plugin_la_LIBADD = libslapd.la $(NSPR_LINK) +libaddn_plugin_la_DEPENDENCIES = libslapd.la +libaddn_plugin_la_LDFLAGS = -avoid-version + +#------------------------ +# librootdn-access-plugin +#------------------------ +# +librootdn_access_plugin_la_SOURCES = ldap/servers/plugins/rootdn_access/rootdn_access.c + +librootdn_access_plugin_la_CPPFLAGS = $(AM_CPPFLAGS) $(DSPLUGIN_CPPFLAGS) +librootdn_access_plugin_la_LIBADD = libslapd.la $(NSPR_LINK) +librootdn_access_plugin_la_DEPENDENCIES = libslapd.la +librootdn_access_plugin_la_LDFLAGS = -avoid-version + + +#------------------------ +# libautomember-plugin +#------------------------ +libautomember_plugin_la_SOURCES = ldap/servers/plugins/automember/automember.c + +libautomember_plugin_la_CPPFLAGS = $(AM_CPPFLAGS) $(DSPLUGIN_CPPFLAGS) +libautomember_plugin_la_LIBADD = libslapd.la $(NSPR_LINK) +libautomember_plugin_la_DEPENDENCIES = libslapd.la +libautomember_plugin_la_LDFLAGS = -avoid-version + +#------------------------ +# libattr-unique-plugin +#------------------------ +libattr_unique_plugin_la_SOURCES = ldap/servers/plugins/uiduniq/7bit.c \ + ldap/servers/plugins/uiduniq/uid.c \ + ldap/servers/plugins/uiduniq/utils.c + +libattr_unique_plugin_la_CPPFLAGS = $(AM_CPPFLAGS) $(DSPLUGIN_CPPFLAGS) +libattr_unique_plugin_la_LIBADD = libslapd.la $(NSPR_LINK) +libattr_unique_plugin_la_DEPENDENCIES = libslapd.la +libattr_unique_plugin_la_LDFLAGS = -avoid-version + +#------------------------ +# libbitwise-plugin +#------------------------ +libbitwise_plugin_la_SOURCES = ldap/servers/plugins/bitwise/bitwise.c + +libbitwise_plugin_la_CPPFLAGS = $(AM_CPPFLAGS) $(DSPLUGIN_CPPFLAGS) +libbitwise_plugin_la_LIBADD = libslapd.la +libbitwise_plugin_la_DEPENDENCIES = libslapd.la +libbitwise_plugin_la_LDFLAGS = -avoid-version + +#------------------------ +# libchainingdb-plugin +#------------------------ +libchainingdb_plugin_la_SOURCES = ldap/servers/plugins/chainingdb/cb_abandon.c \ + ldap/servers/plugins/chainingdb/cb_acl.c \ + ldap/servers/plugins/chainingdb/cb_add.c \ + ldap/servers/plugins/chainingdb/cb_bind.c \ + ldap/servers/plugins/chainingdb/cb_cleanup.c \ + ldap/servers/plugins/chainingdb/cb_close.c \ + ldap/servers/plugins/chainingdb/cb_compare.c \ + ldap/servers/plugins/chainingdb/cb_config.c \ + ldap/servers/plugins/chainingdb/cb_conn_stateless.c \ + ldap/servers/plugins/chainingdb/cb_controls.c \ + ldap/servers/plugins/chainingdb/cb_debug.c \ + ldap/servers/plugins/chainingdb/cb_delete.c \ + ldap/servers/plugins/chainingdb/cb_init.c \ + ldap/servers/plugins/chainingdb/cb_instance.c \ + ldap/servers/plugins/chainingdb/cb_modify.c \ + ldap/servers/plugins/chainingdb/cb_modrdn.c \ + ldap/servers/plugins/chainingdb/cb_monitor.c \ + ldap/servers/plugins/chainingdb/cb_schema.c \ + ldap/servers/plugins/chainingdb/cb_search.c \ + ldap/servers/plugins/chainingdb/cb_start.c \ + ldap/servers/plugins/chainingdb/cb_temp.c \ + ldap/servers/plugins/chainingdb/cb_test.c \ + ldap/servers/plugins/chainingdb/cb_unbind.c \ + ldap/servers/plugins/chainingdb/cb_utils.c + +libchainingdb_plugin_la_CPPFLAGS = $(AM_CPPFLAGS) $(DSPLUGIN_CPPFLAGS) +libchainingdb_plugin_la_LIBADD = libslapd.la $(LDAPSDK_LINK) $(NSPR_LINK) +libchainingdb_plugin_la_DEPENDENCIES = libslapd.la +libchainingdb_plugin_la_LDFLAGS = -avoid-version + +#------------------------ +# libcollation-plugin +#------------------------ +libcollation_plugin_la_SOURCES = ldap/servers/plugins/collation/collate.c \ + ldap/servers/plugins/collation/config.c \ + ldap/servers/plugins/collation/orfilter.c + +libcollation_plugin_la_CPPFLAGS = $(AM_CPPFLAGS) $(DSPLUGIN_CPPFLAGS) $(ICU_CFLAGS) +libcollation_plugin_la_LIBADD = libslapd.la $(LDAPSDK_LINK) $(NSPR_LINK) $(ICU_LIBS) $(LIBCSTD) $(LIBCRUN) +libcollation_plugin_la_DEPENDENCIES = libslapd.la +libcollation_plugin_la_LDFLAGS = -avoid-version +# libcollation_plugin_la_LINK = $(CXXLINK) -avoid-version + +#------------------------ +# libcos-plugin +#------------------------ +libcos_plugin_la_SOURCES = ldap/servers/plugins/cos/cos.c \ + ldap/servers/plugins/cos/cos_cache.c + +libcos_plugin_la_CPPFLAGS = $(AM_CPPFLAGS) $(DSPLUGIN_CPPFLAGS) +libcos_plugin_la_LIBADD = libslapd.la $(LDAPSDK_LINK) $(NSPR_LINK) +libcos_plugin_la_DEPENDENCIES = libslapd.la +libcos_plugin_la_LDFLAGS = -avoid-version + +#------------------------ +# libderef-plugin +#----------------------- +libderef_plugin_la_SOURCES = ldap/servers/plugins/deref/deref.c + +libderef_plugin_la_CPPFLAGS = $(AM_CPPFLAGS) $(DSPLUGIN_CPPFLAGS) +libderef_plugin_la_LIBADD = libslapd.la $(LDAPSDK_LINK) $(NSPR_LINK) +libderef_plugin_la_DEPENDENCIES = libslapd.la +libderef_plugin_la_LDFLAGS = -avoid-version + +#------------------------ +# libentryuuid-syntax-plugin +#----------------------- +libentryuuid_syntax_plugin_la_SOURCES = src/slapi_r_plugin/src/init.c +libentryuuid_syntax_plugin_la_LIBADD = libslapd.la $(LDAPSDK_LINK) $(NSPR_LINK) -lentryuuid_syntax +libentryuuid_syntax_plugin_la_DEPENDENCIES = libslapd.la $(ENTRYUUID_SYNTAX_LIB) +libentryuuid_syntax_plugin_la_LDFLAGS = -avoid-version + +#------------------------ +# libentryuuid-plugin +#----------------------- +libentryuuid_plugin_la_SOURCES = src/slapi_r_plugin/src/init.c +libentryuuid_plugin_la_LIBADD = libslapd.la $(LDAPSDK_LINK) $(NSPR_LINK) -lentryuuid +libentryuuid_plugin_la_DEPENDENCIES = libslapd.la $(ENTRYUUID_LIB) +libentryuuid_plugin_la_LDFLAGS = -avoid-version + +#------------------------ +# libpwdchan-plugin +#----------------------- +libpwdchan_plugin_la_SOURCES = src/slapi_r_plugin/src/init.c +libpwdchan_plugin_la_LIBADD = libslapd.la $(LDAPSDK_LINK) $(NSPR_LINK) -lpwdchan +libpwdchan_plugin_la_DEPENDENCIES = libslapd.la $(PWDCHAN_LIB) +libpwdchan_plugin_la_LDFLAGS = -avoid-version + +#------------------------ +# libpbe-plugin +#----------------------- +libpbe_plugin_la_SOURCES = ldap/servers/plugins/rever/pbe.c \ + ldap/servers/plugins/rever/rever.c + +libpbe_plugin_la_CPPFLAGS = $(AM_CPPFLAGS) $(DSPLUGIN_CPPFLAGS) $(SVRCORE_INCLUDES) +libpbe_plugin_la_LIBADD = libslapd.la libsvrcore.la $(NSS_LINK) +libpbe_plugin_la_DEPENDENCIES = libslapd.la +libpbe_plugin_la_LDFLAGS = -avoid-version + +#------------------------ +# libdistrib-plugin +#------------------------ +libdistrib_plugin_la_SOURCES = ldap/servers/plugins/distrib/distrib.c + +libdistrib_plugin_la_CPPFLAGS = $(AM_CPPFLAGS) $(DSPLUGIN_CPPFLAGS) +libdistrib_plugin_la_LIBADD = libslapd.la +libdistrib_plugin_la_DEPENDENCIES = libslapd.la +libdistrib_plugin_la_LDFLAGS = -avoid-version + +#------------------------ +# libdna-plugin +#------------------------ +libdna_plugin_la_SOURCES = ldap/servers/plugins/dna/dna.c + +libdna_plugin_la_CPPFLAGS = $(AM_CPPFLAGS) $(DSPLUGIN_CPPFLAGS) +libdna_plugin_la_LIBADD = libslapd.la $(LDAPSDK_LINK) $(NSPR_LINK) +libdna_plugin_la_DEPENDENCIES = libslapd.la +libdna_plugin_la_LDFLAGS = -avoid-version + +#------------------------ +# liblinkedattrs-plugin +#------------------------ +liblinkedattrs_plugin_la_SOURCES = ldap/servers/plugins/linkedattrs/fixup_task.c \ + ldap/servers/plugins/linkedattrs/linked_attrs.c + +liblinkedattrs_plugin_la_CPPFLAGS = $(AM_CPPFLAGS) $(DSPLUGIN_CPPFLAGS) +liblinkedattrs_plugin_la_LIBADD = libslapd.la $(NSPR_LINK) +liblinkedattrs_plugin_la_DEPENDENCIES = libslapd.la +liblinkedattrs_plugin_la_LDFLAGS = -avoid-version + +#------------------------ +# libmanagedentries-plugin +#------------------------ +libmanagedentries_plugin_la_SOURCES = ldap/servers/plugins/mep/mep.c + +libmanagedentries_plugin_la_CPPFLAGS = $(AM_CPPFLAGS) $(DSPLUGIN_CPPFLAGS) +libmanagedentries_plugin_la_LIBADD = libslapd.la $(NSPR_LINK) +libmanagedentries_plugin_la_DEPENDENCIES = libslapd.la +libmanagedentries_plugin_la_LDFLAGS = -avoid-version + +#------------------------ +# libmemberof-plugin +#------------------------ +libmemberof_plugin_la_SOURCES= ldap/servers/plugins/memberof/memberof.c \ + ldap/servers/plugins/memberof/memberof_config.c + +libmemberof_plugin_la_CPPFLAGS = $(AM_CPPFLAGS) $(DSPLUGIN_CPPFLAGS) +libmemberof_plugin_la_LIBADD = libslapd.la $(LDAPSDK_LINK) $(NSPR_LINK) +libmemberof_plugin_la_DEPENDENCIES = libslapd.la +libmemberof_plugin_la_LDFLAGS = -avoid-version + +#------------------------ +# libpam-passthru-plugin +#------------------------ +libpam_passthru_plugin_la_SOURCES = ldap/servers/plugins/pam_passthru/pam_ptconfig.c \ + ldap/servers/plugins/pam_passthru/pam_ptdebug.c \ + ldap/servers/plugins/pam_passthru/pam_ptimpl.c \ + ldap/servers/plugins/pam_passthru/pam_ptpreop.c + +libpam_passthru_plugin_la_CPPFLAGS = $(AM_CPPFLAGS) $(DSPLUGIN_CPPFLAGS) +libpam_passthru_plugin_la_LIBADD = libslapd.la $(LDAPSDK_LINK) $(NSPR_LINK) $(PAM_LINK) +libpam_passthru_plugin_la_DEPENDENCIES = libslapd.la +libpam_passthru_plugin_la_LDFLAGS = -avoid-version + +#------------------------ +# libpassthru-plugin +#------------------------ +libpassthru_plugin_la_SOURCES = ldap/servers/plugins/passthru/ptbind.c \ + ldap/servers/plugins/passthru/ptconfig.c \ + ldap/servers/plugins/passthru/ptconn.c \ + ldap/servers/plugins/passthru/ptdebug.c \ + ldap/servers/plugins/passthru/ptpreop.c \ + ldap/servers/plugins/passthru/ptutil.c + +libpassthru_plugin_la_CPPFLAGS = $(AM_CPPFLAGS) $(DSPLUGIN_CPPFLAGS) +libpassthru_plugin_la_LIBADD = libslapd.la $(LDAPSDK_LINK) $(NSPR_LINK) +libpassthru_plugin_la_DEPENDENCIES = libslapd.la +libpassthru_plugin_la_LDFLAGS = -avoid-version + +#------------------------ +# libposix-winsync-plugin +#------------------------ +libposix_winsync_plugin_la_SOURCES = ldap/servers/plugins/posix-winsync/posix-winsync.c \ + ldap/servers/plugins/posix-winsync/posix-group-func.c \ + ldap/servers/plugins/posix-winsync/posix-group-task.c \ + ldap/servers/plugins/posix-winsync/posix-winsync-config.c + +libposix_winsync_plugin_la_CPPFLAGS = $(AM_CPPFLAGS) $(DSPLUGIN_CPPFLAGS) -DWINSYNC_TEST_POSIX \ + -I$(srcdir)/ldap/servers/plugins/replication +libposix_winsync_plugin_la_LIBADD = libslapd.la $(LDAPSDK_LINK) $(NSPR_LINK) +libposix_winsync_plugin_la_DEPENDENCIES = libslapd.la +libposix_winsync_plugin_la_LDFLAGS = -avoid-version + +#------------------------ +# libpwdstorage-plugin +#------------------------ +libpwdstorage_plugin_la_SOURCES = ldap/servers/plugins/pwdstorage/clear_pwd.c \ + ldap/servers/plugins/pwdstorage/crypt_pwd.c \ + ldap/servers/plugins/pwdstorage/md5_pwd.c \ + ldap/servers/plugins/pwdstorage/md5c.c \ + ldap/servers/plugins/pwdstorage/ns-mta-md5_pwd.c \ + ldap/servers/plugins/pwdstorage/pwd_init.c \ + ldap/servers/plugins/pwdstorage/pwd_util.c \ + ldap/servers/plugins/pwdstorage/sha_pwd.c \ + ldap/servers/plugins/pwdstorage/smd5_pwd.c \ + ldap/servers/plugins/pwdstorage/ssha_pwd.c \ + ldap/servers/plugins/pwdstorage/pbkdf2_pwd.c \ + ldap/servers/plugins/pwdstorage/gost_yescrypt.c \ + $(NULLSTRING) + +libpwdstorage_plugin_la_CPPFLAGS = $(AM_CPPFLAGS) $(DSPLUGIN_CPPFLAGS) +libpwdstorage_plugin_la_LIBADD = libslapd.la $(NSS_LINK) $(NSPR_LINK) $(LIBCRYPT) +libpwdstorage_plugin_la_DEPENDENCIES = libslapd.la +libpwdstorage_plugin_la_LDFLAGS = -avoid-version + +#------------------------ +# libcontentsync-plugin +#------------------------ +libcontentsync_plugin_la_SOURCES = ldap/servers/plugins/sync/sync_init.c \ + ldap/servers/plugins/sync/sync_util.c \ + ldap/servers/plugins/sync/sync_refresh.c \ + ldap/servers/plugins/sync/sync_persist.c + +libcontentsync_plugin_la_CPPFLAGS = $(AM_CPPFLAGS) $(DSPLUGIN_CPPFLAGS) +libcontentsync_plugin_la_LIBADD = libslapd.la $(NSS_LINK) $(NSPR_LINK) $(LIBCRYPT) +libcontentsync_plugin_la_DEPENDENCIES = libslapd.la +libcontentsync_plugin_la_LDFLAGS = -avoid-version + +#------------------------ +# libreferint-plugin +#------------------------ +libreferint_plugin_la_SOURCES = ldap/servers/plugins/referint/referint.c + +libreferint_plugin_la_CPPFLAGS = $(AM_CPPFLAGS) $(DSPLUGIN_CPPFLAGS) +libreferint_plugin_la_LIBADD = libslapd.la $(LDAPSDK_LINK) $(NSPR_LINK) +libreferint_plugin_la_DEPENDENCIES = libslapd.la +libreferint_plugin_la_LDFLAGS = -avoid-version + +#------------------------ +# libreplication-plugin +#------------------------ +libreplication_plugin_la_SOURCES = ldap/servers/plugins/replication/cl5_api.c \ + ldap/servers/plugins/replication/cl5_clcache.c \ + ldap/servers/plugins/replication/cl5_config.c \ + ldap/servers/plugins/replication/cl5_init.c \ + ldap/servers/plugins/replication/cl_crypt.c \ + ldap/servers/plugins/replication/csnpl.c \ + ldap/servers/plugins/replication/llist.c \ + ldap/servers/plugins/replication/repl_connext.c \ + ldap/servers/plugins/replication/repl_controls.c \ + ldap/servers/plugins/replication/repl_ext.c \ + ldap/servers/plugins/replication/repl_extop.c \ + ldap/servers/plugins/replication/repl_globals.c \ + ldap/servers/plugins/replication/repl_opext.c \ + ldap/servers/plugins/replication/repl_session_plugin.c \ + ldap/servers/plugins/replication/repl_cleanallruv.c \ + ldap/servers/plugins/replication/repl5_agmt.c \ + ldap/servers/plugins/replication/repl5_agmtlist.c \ + ldap/servers/plugins/replication/repl5_backoff.c \ + ldap/servers/plugins/replication/repl5_connection.c \ + ldap/servers/plugins/replication/repl5_inc_protocol.c \ + ldap/servers/plugins/replication/repl5_init.c \ + ldap/servers/plugins/replication/repl5_mtnode_ext.c \ + ldap/servers/plugins/replication/repl5_plugins.c \ + ldap/servers/plugins/replication/repl5_protocol.c \ + ldap/servers/plugins/replication/repl5_protocol_util.c \ + ldap/servers/plugins/replication/repl5_replica.c \ + ldap/servers/plugins/replication/repl5_replica_config.c \ + ldap/servers/plugins/replication/repl5_replica_dnhash.c \ + ldap/servers/plugins/replication/repl5_replica_hash.c \ + ldap/servers/plugins/replication/repl5_ruv.c \ + ldap/servers/plugins/replication/repl5_schedule.c \ + ldap/servers/plugins/replication/repl5_tot_protocol.c \ + ldap/servers/plugins/replication/repl5_total.c \ + ldap/servers/plugins/replication/repl5_updatedn_list.c \ + ldap/servers/plugins/replication/replutil.c \ + ldap/servers/plugins/replication/urp.c \ + ldap/servers/plugins/replication/urp_glue.c \ + ldap/servers/plugins/replication/urp_tombstone.c \ + ldap/servers/plugins/replication/windows_connection.c \ + ldap/servers/plugins/replication/windows_inc_protocol.c \ + ldap/servers/plugins/replication/windows_private.c \ + ldap/servers/plugins/replication/windows_protocol_util.c \ + ldap/servers/plugins/replication/windows_tot_protocol.c + +libreplication_plugin_la_CPPFLAGS = $(AM_CPPFLAGS) $(DSPLUGIN_CPPFLAGS) $(ICU_CFLAGS) $(DB_INC) +libreplication_plugin_la_LIBADD = libslapd.la libback-ldbm.la $(LDAPSDK_LINK) $(NSS_LINK) $(NSPR_LINK) $(ICU_LIBS) $(DB_LINK) +libreplication_plugin_la_DEPENDENCIES = libslapd.la libback-ldbm.la +libreplication_plugin_la_LDFLAGS = -avoid-version + +#------------------------ +# libretrocl-plugin +#------------------------ +libretrocl_plugin_la_SOURCES = ldap/servers/plugins/retrocl/retrocl.c \ + ldap/servers/plugins/retrocl/retrocl_cn.c \ + ldap/servers/plugins/retrocl/retrocl_create.c \ + ldap/servers/plugins/retrocl/retrocl_po.c \ + ldap/servers/plugins/retrocl/retrocl_rootdse.c \ + ldap/servers/plugins/retrocl/retrocl_trim.c + +libretrocl_plugin_la_CPPFLAGS = $(AM_CPPFLAGS) $(DSPLUGIN_CPPFLAGS) +libretrocl_plugin_la_LIBADD = libslapd.la $(LDAPSDK_LINK) $(NSPR_LINK) +libretrocl_plugin_la_DEPENDENCIES = libslapd.la +libretrocl_plugin_la_LDFLAGS = -avoid-version + +#------------------------ +# libroles-plugin +#------------------------ +libroles_plugin_la_SOURCES = ldap/servers/plugins/roles/roles_cache.c \ + ldap/servers/plugins/roles/roles_plugin.c + +libroles_plugin_la_CPPFLAGS = $(AM_CPPFLAGS) $(DSPLUGIN_CPPFLAGS) +libroles_plugin_la_LIBADD = libslapd.la $(NSPR_LINK) +libroles_plugin_la_DEPENDENCIES = libslapd.la +libroles_plugin_la_LDFLAGS = -avoid-version + +#------------------------ +# libschemareload-plugin +#------------------------ +libschemareload_plugin_la_SOURCES = ldap/servers/plugins/schema_reload/schema_reload.c + +libschemareload_plugin_la_CPPFLAGS = $(AM_CPPFLAGS) $(DSPLUGIN_CPPFLAGS) +libschemareload_plugin_la_LIBADD = libslapd.la $(NSPR_LINK) +libschemareload_plugin_la_DEPENDENCIES = libslapd.la +libschemareload_plugin_la_LDFLAGS = -avoid-version + +#------------------------ +# libstatechange-plugin +#------------------------ +libstatechange_plugin_la_SOURCES = ldap/servers/plugins/statechange/statechange.c + +libstatechange_plugin_la_CPPFLAGS = $(AM_CPPFLAGS) $(DSPLUGIN_CPPFLAGS) +libstatechange_plugin_la_LIBADD = libslapd.la +libstatechange_plugin_la_DEPENDENCIES = libslapd.la +libstatechange_plugin_la_LDFLAGS = -avoid-version + +#------------------------ +# libsyntax-plugin +#------------------------ +libsyntax_plugin_la_SOURCES = ldap/servers/plugins/syntaxes/bin.c \ + ldap/servers/plugins/syntaxes/bitstring.c \ + ldap/servers/plugins/syntaxes/ces.c \ + ldap/servers/plugins/syntaxes/cis.c \ + ldap/servers/plugins/syntaxes/debug.c \ + ldap/servers/plugins/syntaxes/dn.c \ + ldap/servers/plugins/syntaxes/deliverymethod.c \ + ldap/servers/plugins/syntaxes/facsimile.c \ + ldap/servers/plugins/syntaxes/guide.c \ + ldap/servers/plugins/syntaxes/int.c \ + ldap/servers/plugins/syntaxes/nameoptuid.c \ + ldap/servers/plugins/syntaxes/numericstring.c \ + ldap/servers/plugins/syntaxes/phonetic.c \ + ldap/servers/plugins/syntaxes/sicis.c \ + ldap/servers/plugins/syntaxes/string.c \ + ldap/servers/plugins/syntaxes/syntax_common.c \ + ldap/servers/plugins/syntaxes/tel.c \ + ldap/servers/plugins/syntaxes/telex.c \ + ldap/servers/plugins/syntaxes/teletex.c \ + ldap/servers/plugins/syntaxes/validate.c \ + ldap/servers/plugins/syntaxes/validate_task.c \ + ldap/servers/plugins/syntaxes/value.c + +libsyntax_plugin_la_CPPFLAGS = $(AM_CPPFLAGS) $(DSPLUGIN_CPPFLAGS) +libsyntax_plugin_la_LIBADD = libslapd.la $(LDAPSDK_LINK) $(NSPR_LINK) +libsyntax_plugin_la_DEPENDENCIES = libslapd.la +libsyntax_plugin_la_LDFLAGS = -avoid-version + +#------------------------ +# libusn-plugin +#------------------------ +libusn_plugin_la_SOURCES = ldap/servers/plugins/usn/usn.c \ + ldap/servers/plugins/usn/usn_cleanup.c + +libusn_plugin_la_CPPFLAGS = $(AM_CPPFLAGS) $(DSPLUGIN_CPPFLAGS) +libusn_plugin_la_LIBADD = libslapd.la $(LDAPSDK_LINK) $(NSPR_LINK) +libusn_plugin_la_DEPENDENCIES = libslapd.la +libusn_plugin_la_LDFLAGS = -avoid-version + +#------------------------ +# libviews-plugin +#------------------------ +libviews_plugin_la_SOURCES = ldap/servers/plugins/views/views.c + +libviews_plugin_la_CPPFLAGS = $(AM_CPPFLAGS) $(DSPLUGIN_CPPFLAGS) +libviews_plugin_la_LIBADD = libslapd.la $(LDAPSDK_LINK) $(NSPR_LINK) +libviews_plugin_la_DEPENDENCIES = libslapd.la +libviews_plugin_la_LDFLAGS = -avoid-version + +#------------------------ +# libwhoami-plugin +#------------------------ +libwhoami_plugin_la_SOURCES = ldap/servers/plugins/whoami/whoami.c + +libwhoami_plugin_la_CPPFLAGS = $(AM_CPPFLAGS) $(DSPLUGIN_CPPFLAGS) +libwhoami_plugin_la_LIBADD = libslapd.la $(LDAPSDK_LINK) $(NSPR_LINK) +libwhoami_plugin_la_DEPENDENCIES = libslapd.la +libwhoami_plugin_la_LDFLAGS = -avoid-version + +#------------------------ +#//////////////////////////////////////////////////////////////// +# +# Programs +# +#//////////////////////////////////////////////////////////////// +#------------------------ +# dbscan +#------------------------ +dbscan_SOURCES = ldap/servers/slapd/tools/dbscan.c + +dbscan_CPPFLAGS = $(NSPR_INCLUDES) $(AM_CPPFLAGS) +dbscan_LDADD = $(NSPR_LINK) $(DB_IMPL) libslapd.la + +#------------------------ +# ldap-agent +#------------------------ +ldap_agent_SOURCES = ldap/servers/snmp/main.c \ + ldap/servers/snmp/ldap-agent.c \ + ldap/servers/slapd/agtmmap.c + +ldap_agent_CPPFLAGS = $(AM_CPPFLAGS) $(DSPLUGIN_CPPFLAGS) @netsnmp_inc@ +ldap_agent_LDADD = $(LDAPSDK_LINK) $(SASL_LINK) $(NSS_LINK) $(NSPR_LINK) $(NETSNMP_LINK) $(THREADLIB) + + +#------------------------ +# ldclt +#------------------------ +ldclt_SOURCES = ldap/servers/slapd/tools/ldaptool-sasl.c \ + ldap/servers/slapd/tools/ldclt/data.c \ + ldap/servers/slapd/tools/ldclt/ldapfct.c \ + ldap/servers/slapd/tools/ldclt/ldclt.c \ + ldap/servers/slapd/tools/ldclt/ldcltU.c \ + ldap/servers/slapd/tools/ldclt/parser.c \ + ldap/servers/slapd/tools/ldclt/port.c \ + ldap/servers/slapd/tools/ldclt/scalab01.c \ + ldap/servers/slapd/tools/ldclt/threadMain.c \ + ldap/servers/slapd/tools/ldclt/utils.c \ + ldap/servers/slapd/tools/ldclt/version.c \ + ldap/servers/slapd/tools/ldclt/workarounds.c + +ldclt_CPPFLAGS = $(AM_CPPFLAGS) -I$(srcdir)/ldap/servers/slapd/tools $(DSPLUGIN_CPPFLAGS) $(SASL_CFLAGS) +ldclt_LDADD = $(NSPR_LINK) $(NSS_LINK) $(LDAPSDK_LINK) $(SASL_LINK) $(LIBNSL) $(LIBSOCKET) $(LIBDL) $(THREADLIB) + +#------------------------ +# ns-slapd +#------------------------ +if enable_ldapi + GETSOCKETPEER=ldap/servers/slapd/getsocketpeer.c + enable_ldapi = 1 +endif +if enable_autobind + enable_autobind = 1 +endif +if enable_auto_dn_suffix + enable_auto_dn_suffix = 1 +endif + +ns_slapd_SOURCES = ldap/servers/slapd/abandon.c \ + ldap/servers/slapd/auth.c \ + ldap/servers/slapd/bind.c \ + ldap/servers/slapd/compare.c \ + ldap/servers/slapd/config.c \ + ldap/servers/slapd/connection.c \ + ldap/servers/slapd/conntable.c \ + ldap/servers/slapd/daemon.c \ + ldap/servers/slapd/detach.c \ + ldap/servers/slapd/extendop.c \ + ldap/servers/slapd/fedse.c \ + ldap/servers/slapd/fileio.c \ + ldap/servers/slapd/getopt_ext.c \ + ldap/servers/slapd/globals.c \ + ldap/servers/slapd/house.c \ + ldap/servers/slapd/init.c \ + ldap/servers/slapd/main.c \ + ldap/servers/slapd/monitor.c \ + ldap/servers/slapd/passwd_extop.c \ + ldap/servers/slapd/psearch.c \ + ldap/servers/slapd/pw_mgmt.c \ + ldap/servers/slapd/pw_verify.c \ + ldap/servers/slapd/rootdse.c \ + ldap/servers/slapd/sasl_io.c \ + ldap/servers/slapd/saslbind.c \ + ldap/servers/slapd/search.c \ + ldap/servers/slapd/start_tls_extop.c \ + ldap/servers/slapd/strdup.c \ + ldap/servers/slapd/stubs.c \ + ldap/servers/slapd/tempnam.c \ + ldap/servers/slapd/unbind.c \ + ldap/servers/slapd/subentries.c + +ns_slapd_CPPFLAGS = $(AM_CPPFLAGS) $(DSPLUGIN_CPPFLAGS) $(SASL_CFLAGS) $(SVRCORE_INCLUDES) $(CFI_CFLAGS) +# We need our libraries to come first, then our externals libraries second. +ns_slapd_LDADD = libslapd.la libldaputil.la libsvrcore.la $(RNSSLAPD_LIB) + +ns_slapd_LDADD += $(LDAPSDK_LINK) $(NSS_LINK) $(LIBADD_DL) $(OPENSSL_LIBS) \ + $(NSPR_LINK) $(SASL_LINK) $(LIBNSL) $(LIBSOCKET) $(THREADLIB) $(SYSTEMD_LIBS) $(EVENT_LINK) + +ns_slapd_DEPENDENCIES = libslapd.la libldaputil.la +ns_slapd_LINK = $(LINK) + + +#------------------------ +# pwdhash +#------------------------ +pwdhash_SOURCES = ldap/servers/slapd/tools/pwenc.c + +pwdhash_CPPFLAGS = $(AM_CPPFLAGS) $(DSPLUGIN_CPPFLAGS) +pwdhash_LDADD = libslapd.la libsvrcore.la $(NSPR_LINK) $(NSS_LINK) $(LDAPSDK_LINK) $(SASL_LINK) +pwdhash_DEPENDENCIES = libslapd.la + +#------------------------- +# CMOCKA TEST PROGRAMS +#------------------------- +if ENABLE_CMOCKA + +check_PROGRAMS = test_slapd +# Mark all check programs for testing +TESTS = test_slapd + +test_slapd_SOURCES = test/main.c \ + test/libslapd/test.c \ + test/libslapd/counters/atomic.c \ + test/libslapd/filter/optimise.c \ + test/libslapd/pblock/analytics.c \ + test/libslapd/pblock/v3_compat.c \ + test/libslapd/schema/filter_validate.c \ + test/libslapd/operation/v3_compat.c \ + test/libslapd/spal/meminfo.c \ + test/libslapd/haproxy/parse.c \ + test/plugins/test.c \ + test/plugins/pwdstorage/pbkdf2.c + +# We need to link a lot of plugins for this test. +test_slapd_LDADD = libslapd.la \ + libpwdstorage-plugin.la \ + $(NSS_LINK) $(NSPR_LINK) +test_slapd_LDFLAGS = $(AM_CPPFLAGS) $(CMOCKA_LINKS) +### WARNING: Slap.h needs cert.h, which requires the -I/lib/ldaputil!!! +### WARNING: Slap.h pulls ssl.h, which requires nss!!!! +# We need to pull in plugin header paths too: +test_slapd_CPPFLAGS = $(AM_CPPFLAGS) $(DSPLUGIN_CPPFLAGS) $(DSINTERNAL_CPPFLAGS) \ + -I$(srcdir)/ldap/servers/plugins/pwdstorage + +endif +#------------------------ +# end cmocka tests +#------------------------ + +# these are for the config files and scripts that we need to generate and replace +# the paths and other tokens with the real values set during configure/make +# note that we cannot just use AC_OUTPUT to do this for us, since it will do things like this: +# LD_LIBRARY_PATH = ${prefix}/lib/dirsrv +# i.e. it literally copies in '${prefix}' rather than expanding it out - we want this instead: +# LD_LIBRARY_PATH = /usr/lib/dirsrv +fixupcmd = sed \ + -e 's,@bindir\@,$(bindir),g' \ + -e 's,@sbindir\@,$(sbindir),g' \ + -e 's,@libdir\@,$(libdir),g' \ + -e 's,@libexecdir\@,$(libexecdir),g' \ + -e 's,@nss_libdir\@,$(nss_libdir),g' \ + -e 's,@ldaptool_bindir\@,$(ldaptool_bindir),g' \ + -e 's,@ldaptool_opts\@,$(ldaptool_opts),g' \ + -e 's,@plainldif_opts\@,$(plainldif_opts),g' \ + -e 's,@db_libdir\@,$(db_libdir),g' \ + -e 's,@db_bindir\@,$(db_bindir),g' \ + -e 's,@netsnmp_libdir\@,$(netsnmp_libdir),g' \ + -e 's,@propertydir\@,$(propertydir),g' \ + -e 's,@datadir\@,$(datadir),g' \ + -e 's,@schemadir\@,$(schemadir),g' \ + -e 's,@serverdir\@,$(serverdir),g' \ + -e 's,@serverincdir\@,$(serverincdir),g' \ + -e 's,@serverplugindir\@,$(serverplugindir),g' \ + -e 's,@taskdir\@,$(taskdir),g' \ + -e 's,@configdir\@,$(configdir),g' \ + -e 's,@sysconfdir\@,$(sysconfdir),g' \ + -e 's,@localstatedir\@,$(localstatedir),g' \ + -e 's,@localrundir\@,$(localrundir),g' \ + -e 's,@infdir\@,$(infdir),g' \ + -e 's,@mibdir\@,$(mibdir),g' \ + -e 's,@cockpitdir\@,$(cockpitdir),g' \ + -e 's,@templatedir\@,$(sampledatadir),g' \ + -e 's,@systemschemadir\@,$(systemschemadir),g' \ + -e 's,@package_name\@,$(PACKAGE_NAME),g' \ + -e 's,@instconfigdir\@,$(instconfigdir),g' \ + -e 's,@enable_ldapi\@,$(enable_ldapi),g' \ + -e 's,@enable_pam_passthru\@,$(enable_pam_passthru),g' \ + -e 's,@enable_bitwise\@,$(enable_bitwise),g' \ + -e 's,@enable_dna\@,$(enable_dna),g' \ + -e 's,@enable_autobind\@,$(enable_autobind),g' \ + -e 's,@enable_auto_dn_suffix\@,$(enable_auto_dn_suffix),g' \ + -e 's,@enable_presence\@,$(enable_presence),g' \ + -e 's,@enable_asan\@,$(ASAN_ON),g' \ + -e 's,@enable_msan\@,$(MSAN_ON),g' \ + -e 's,@enable_tsan\@,$(TSAN_ON),g' \ + -e 's,@enable_ubsan\@,$(UBSAN_ON),g' \ + -e 's,@SANITIZER\@,$(SANITIZER),g' \ + -e 's,@ECHO_N\@,$(ECHO_N),g' \ + -e 's,@ECHO_C\@,$(ECHO_C),g' \ + -e 's,@brand\@,$(brand),g' \ + -e 's,@capbrand\@,$(capbrand),g' \ + -e 's,@vendor\@,$(vendor),g' \ + -e 's,@PACKAGE_NAME\@,$(PACKAGE_NAME),g' \ + -e 's,@PACKAGE_VERSION\@,$(PACKAGE_VERSION),g' \ + -e 's,@RPM_VERSION\@,$(RPM_VERSION),g' \ + -e 's,@PACKAGE_BASE_VERSION\@,$(PACKAGE_BASE_VERSION),g' \ + -e 's,@CONSOLE_VERSION\@,$(CONSOLE_VERSION),g' \ + -e 's,@BUILDNUM\@,$(BUILDNUM),g' \ + -e 's,@NQBUILD_NUM\@,$(NQBUILDNUM),g' \ + -e 's,@perlpath\@,$(perldir),g' \ + -e 's,@defaultuser\@,$(defaultuser),g' \ + -e 's,@defaultgroup\@,$(defaultgroup),g' \ + -e 's,@with_fhs_opt\@,@with_fhs_opt@,g' \ + -e 's,@with_selinux\@,@with_selinux@,g' \ + -e 's,@with_systemd\@,$(WITH_SYSTEMD),g' \ + -e 's,@tmpfiles_d\@,$(tmpfiles_d),g' \ + -e 's,@pythonexec\@,@pythonexec@,g' \ + -e 's,@sttyexec\@,@sttyexec@,g' \ + -e 's,@initconfigdir\@,$(initconfigdir),g' \ + -e 's,@updatedir\@,$(updatedir),g' \ + -e 's,@ldaplib\@,$(ldaplib),g' \ + -e 's,@ldaplib_defs\@,$(ldaplib_defs),g' \ + -e 's,@systemdsystemunitdir\@,$(systemdsystemunitdir),g' \ + -e 's,@systemdsystemconfdir\@,$(systemdsystemconfdir),g' \ + -e 's,@systemdgroupname\@,$(systemdgroupname),g' \ + -e 's,@prefixdir\@,$(prefixdir),g' + +%: %.in + mkdir -p $(dir $@) + $(fixupcmd) $^ > $@ + +%/$(PACKAGE_NAME): %/initscript.in + if [ ! -d $(dir $@) ] ; then mkdir -p $(dir $@) ; fi + $(fixupcmd) $^ > $@ + +%/$(PACKAGE_NAME): %/base-initconfig.in + if [ ! -d $(dir $@) ] ; then mkdir -p $(dir $@) ; fi +if SYSTEMD + $(fixupcmd) $^ | sed -e 's/@preamble@/# This file is in systemd EnvironmentFile format - see man systemd.exec/' > $@ +else + $(fixupcmd) $^ | sed -n -e 's/@preamble@//' -e '/^#/{p;d;}' -e '/^$$/{p;d;}' -e 's/^\([^=]*\)\(=.*\)$$/\1\2 ; export \1/ ; p' > $@ + $(fixupcmd) $(srcdir)/ldap/admin/src/initconfig.in >> $@ +endif + +%/template-initconfig: %/template-initconfig.in + if [ ! -d $(dir $@) ] ; then mkdir -p $(dir $@) ; fi +if SYSTEMD + $(fixupcmd) $^ | sed -e 's/@preamble@/# This file is in systemd EnvironmentFile format - see man systemd.exec/' > $@ +else + $(fixupcmd) $^ | sed -n -e 's/@preamble@//' -e '/^#/{p;d;}' -e '/^$$/{p;d;}' -e 's/^\([^=]*\)\(=.*\)$$/\1\2 ; export \1/ ; p' > $@ +endif + +%/$(PACKAGE_NAME)-snmp: %/ldap-agent-initscript.in + if [ ! -d $(dir $@) ] ; then mkdir -p $(dir $@) ; fi + $(fixupcmd) $^ > $@ + +# yes, that is an @ in the filename . . . +%/$(PACKAGE_NAME)@.service: %/systemd.template.service.in + if [ ! -d $(dir $@) ] ; then mkdir -p $(dir $@) ; fi + $(fixupcmd) $^ > $@ + +%/$(PACKAGE_NAME)@.service.d/custom.conf: %/systemd.template.service.custom.conf.in + if [ ! -d $(dir $@) ] ; then mkdir -p $(dir $@) ; fi + $(fixupcmd) $^ > $@ + +if with_sanitizer +%/$(PACKAGE_NAME)@.service.d/xsan.conf: %/systemd.template.service.xsan.conf.in + if [ ! -d $(dir $@) ] ; then mkdir -p $(dir $@) ; fi + $(fixupcmd) $^ > $@ +endif + +%/$(systemdgroupname): %/systemd.group.in + if [ ! -d $(dir $@) ] ; then mkdir -p $(dir $@) ; fi + $(fixupcmd) $^ > $@ + +%/$(PACKAGE_NAME)-snmp.service: %/systemd-snmp.service.in + if [ ! -d $(dir $@) ] ; then mkdir -p $(dir $@) ; fi + $(fixupcmd) $^ > $@ + +# if distdir is a git tag, use that for the git archive tag, else +# just assume a developer build and use HEAD +git-archive: + if [ -n "$(SRCDISTDIR)" -a -d "$(SRCDISTDIR)" ] ; then \ + srcdistdir=$(SRCDISTDIR) ; \ + else \ + srcdistdir=`pwd` ; \ + fi ; \ + cd $(srcdir) ; \ + if git show-ref --tags -q $(distdir) ; then \ + gittag=$(distdir) ; \ + else \ + gittag=HEAD ; \ + fi ; \ + git archive --prefix=$(distdir)/ $$gittag | bzip2 > $$srcdistdir/$(distdir).tar.bz2 + +# Python test tests +# How will we update this to python 3? + +lib389: src/lib389/setup.py + cd $(srcdir)/src/lib389; $(PYTHON) setup.py build ; $(PYTHON) setup.py build_manpages + +lib389-install: lib389 + cd $(srcdir)/src/lib389; $(PYTHON) setup.py install --skip-build --force + +if ENABLE_COCKPIT + +NODE_MODULES_TEST = src/cockpit/389-console/package-lock.json +WEBPACK_TEST = src/cockpit/389-console/cockpit_dist/index.html + +# Cockpit UI plugin - we install the dependancies and build the JS sources +# and then we use install-data-hook for copying the results on 'make install' +$(NODE_MODULES_TEST): + cd src/cockpit/389-console; npm install + +$(WEBPACK_TEST): $(NODE_MODULES_TEST) + cd src/cockpit/389-console; npx audit-ci; ./build.js + +389-console: $(WEBPACK_TEST) + +# This requires a built source tree and avoids having to install anything system-wide +389-console-devel-install: + cd $(srcdir)/src/cockpit/389-console; \ + rm ~/.local/share/cockpit/389-console; \ + mkdir -p ~/.local/share/cockpit/; \ + ln -s $$(pwd)/dist ~/.local/share/cockpit/389-console + +389-console-clean: + cd $(srcdir)/src/cockpit/389-console; \ + rm -rf dist; \ + rm -rf cockpit_dist +endif + +if HAVE_DOXYGEN + +# The rm in man3 is to remove files like: _home_william_development_389ds_libsds_src_.3 +# If there is a way to ignore this in doxygen I'm all ears ... + +doxyfile.stamp: + cd $(srcdir); $(DOXYGEN) $(abs_top_builddir)/docs/slapi.doxy + rm -f $(abs_top_builddir)/man/man3/_* + touch doxyfile.stamp + +# Add the docs to make all. +all-local: doxyfile.stamp + +endif diff --git a/README.md b/README.md new file mode 100644 index 0000000..d26633a --- /dev/null +++ b/README.md @@ -0,0 +1,52 @@ +389 Directory Server +==================== + +[![Test](https://github.com/389ds/389-ds-base/actions/workflows/pytest.yml/badge.svg)](https://github.com/389ds/389-ds-base/actions/workflows/pytest.yml) +[![npm-audit-ci](https://github.com/389ds/389-ds-base/actions/workflows/npm.yml/badge.svg)](https://github.com/389ds/389-ds-base/actions/workflows/npm.yml) + +389 Directory Server is a highly usable, fully featured, reliable +and secure LDAP server implementation. It handles many of the +largest LDAP deployments in the world. + +All our code has been extensively tested with sanitisation tools. +As well as a rich feature set of fail-over and backup technologies +gives administrators confidence their accounts are safe. + +License +------- + +The 389 Directory Server is subject to the terms detailed in the +license agreement file called LICENSE. + +Late-breaking news and information on the 389 Directory Server is +available on our [wiki page](https://www.port389.org/) + +Building +-------- + + autoreconf -fiv + ./configure --enable-debug --with-openldap --enable-cmocka --enable-asan + make + make lib389 + sudo make install + sudo make lib389-install + +Note: **--enable-asan** is optional, and it should only be used for debugging/development purposes. + +See also full [building guide](https://www.port389.org/docs/389ds/development/building.html). + +Testing +------- + + make check + sudo py.test -s 389-ds-base/dirsrvtests/tests/suites/basic/ + +To debug the make check item's, you'll need libtool to help: + + libtool --mode=execute gdb /home/william/build/ds/test_slapd + +More information +---------------- + +Please see our [contributing guide](https://www.port389.org/docs/389ds/contributing.html). + diff --git a/VERSION.sh b/VERSION.sh new file mode 100644 index 0000000..4b4d9a2 --- /dev/null +++ b/VERSION.sh @@ -0,0 +1,54 @@ +# brand is lower case - used for names that don't appear to end users +# brand is used for file naming - should contain no spaces +brand=389 +# capbrand is the properly capitalized brand name that appears to end users +# may contain spaces +capbrand=389 +# vendor is the properly formatted vendor/manufacturer name that appears to end users +vendor="389 Project" + +# PACKAGE_VERSION is constructed from these +VERSION_MAJOR=2 +VERSION_MINOR=4 +VERSION_MAINT=4 +# NOTE: VERSION_PREREL is automatically set for builds made out of a git tree +VERSION_PREREL= +VERSION_DATE=$(date -u +%Y%m%d%H%M) + +# Set the version and release numbers for local developer RPM builds. We +# set these here because we do not want the git commit hash in the RPM +# version since it can make RPM upgrades difficult. If we have a git +# commit hash, we add it into the release number below. +RPM_RELEASE=${VERSION_DATE} +RPM_VERSION=${VERSION_MAJOR}.${VERSION_MINOR}.${VERSION_MAINT} + +if $(git -C "$srcdir" rev-parse --is-inside-work-tree > /dev/null 2>&1); then +# Check if the source is from a git repo +# if this is not a git repo, git log will say +# fatal: Not a git repository +# to stderr and stdout will be empty +# this tells git to print the short commit hash from the last commit + COMMIT=$(git -C "$srcdir" log -1 --pretty=format:%h 2> /dev/null) + if test -n "$COMMIT" ; then + VERSION_PREREL=.${VERSION_DATE}git$COMMIT + RPM_RELEASE=${RPM_RELEASE}git$COMMIT + fi +fi + +# the real version used throughout configure and make +# NOTE: because of autoconf/automake harshness, we cannot override the settings +# below in C code - there is no way to override the default #defines +# for these set with AC_INIT - so configure.ac should AC_DEFINE +# DS_PACKAGE_VERSION DS_PACKAGE_TARNAME DS_PACKAGE_BUGREPORT +# for use in C code - other code (perl scripts, shell scripts, Makefiles) +# can use PACKAGE_VERSION et. al. +PACKAGE_VERSION=$VERSION_MAJOR.$VERSION_MINOR.${VERSION_MAINT}${VERSION_PREREL} +# the name of the source tarball - see make dist +PACKAGE_TARNAME=${brand}-ds-base +# url for bug reports +PACKAGE_BUGREPORT="${PACKAGE_BUGREPORT}enter_bug.cgi?product=$brand" +# PACKAGE_STRING="$PACKAGE_TARNAME $PACKAGE_VERSION" +# the version of the ds console package that this directory server +# is compatible with +# console .2 is still compatible with 389 .3 for now +CONSOLE_VERSION=$VERSION_MAJOR.2 diff --git a/autogen.sh b/autogen.sh new file mode 100755 index 0000000..058cec8 --- /dev/null +++ b/autogen.sh @@ -0,0 +1,103 @@ +#!/bin/sh + +# set required versions of tools here +# the version is dotted integers like X.Y.Z where +# X, Y, and Z are integers +# comparisons are done using shell -lt, -gt, etc. +# this works if the numbers are zero filled as well +# so 06 == 6 + +# autoconf version required +# need 2.69 or later +ac_need_maj=2 +ac_need_min=69 +# automake version required +# need 1.13.4 or later +am_need_maj=1 +am_need_min=13 +am_need_rev=4 +# libtool version required +# need 2.4.2 or later +lt_need_maj=2 +lt_need_min=4 +lt_need_rev=2 +# should never have to touch anything below this line unless there is a bug +########################################################################### + +# input +# arg1 - version string in the form "X.Y[.Z]" - the .Z is optional +# args remaining - the needed X, Y, and Z to match +# output +# return 0 - success - the version string is >= the required X.Y.Z +# return 1 - failure - the version string is < the required X.Y.Z +# NOTE: All input must be integers, otherwise you will see shell errors +checkvers() { + vers="$1"; shift + needmaj="$1"; shift + needmin="$1"; shift + if [ "$#" != "0" ]; then + needrev="$1"; shift + fi + verslist=`echo $vers | tr '.' ' '` + set $verslist + maj=$1; shift + min=$1; shift + if [ "$#" != "0" ]; then + rev=$1; shift + fi + if [ "$maj" -gt "$needmaj" ] ; then return 0; fi + if [ "$maj" -lt "$needmaj" ] ; then return 1; fi + # if we got here, maj == needmaj + if [ -z "$needmin" ] ; then return 0; fi + if [ "$min" -gt "$needmin" ] ; then return 0; fi + if [ "$min" -lt "$needmin" ] ; then return 1; fi + # if we got here, min == needmin + if [ -z "$needrev" ] ; then return 0; fi + if [ "$rev" -gt "$needrev" ] ; then return 0; fi + if [ "$rev" -lt "$needrev" ] ; then return 1; fi + # if we got here, rev == needrev + return 0 +} + +# We use GNU sed-isms, so if `gsed' exists, use that instead. +sed=sed +if command -v gsed >/dev/null +then + sed=gsed +fi + +# Check autoconf version +AC_VERSION=`autoconf --version | $sed '/^autoconf/ {s/^.* \([1-9][0-9.]*\)$/\1/; q}'` +if checkvers "$AC_VERSION" $ac_need_maj $ac_need_min ; then + echo Found valid autoconf version $AC_VERSION +else + echo "You must have autoconf version $ac_need_maj.$ac_need_min or later installed (found version $AC_VERSION)." + exit 1 +fi + +# Check automake version +AM_VERSION=`automake --version | $sed '/^automake/ {s/^.* \([1-9][0-9.]*\)$/\1/; q}'` +if checkvers "$AM_VERSION" $am_need_maj $am_need_min $am_need_rev ; then + echo Found valid automake version $AM_VERSION +else + echo "You must have automake version $am_need_maj.$am_need_min.$am_need_rev or later installed (found version $AM_VERSION)." + exit 1 +fi + +# Check libtool version +# NOTE: some libtool versions report a letter at the end e.g. on RHEL6 +# the version is 2.2.6b - for comparison purposes, just strip off the +# letter - note that the shell -lt and -gt comparisons will fail with +# test: 6b: integer expression expected if the number to compare +# contains a non-digit +LT_VERSION=`libtool --version | $sed '/GNU libtool/ {s/^.* \([1-9][0-9a-zA-Z.]*\)$/\1/; s/[a-zA-Z]//g; q}'` +if checkvers "$LT_VERSION" $lt_need_maj $lt_need_min $lt_need_rev ; then + echo Found valid libtool version $LT_VERSION +else + echo "You must have libtool version $lt_need_maj.$lt_need_min.$lt_need_rev or later installed (found version $LT_VERSION)." + exit 1 +fi + +# Run autoreconf +echo "Running autoreconf -fvi" +autoreconf -fvi diff --git a/buildnum.py b/buildnum.py new file mode 100755 index 0000000..eff4b44 --- /dev/null +++ b/buildnum.py @@ -0,0 +1,23 @@ +#!/usr/bin/env python3 +# --- BEGIN COPYRIGHT BLOCK --- +# Copyright (C) 2020 Red Hat, Inc. +# All rights reserved. +# +# License: GPL (version 3 or any later version). +# See LICENSE for details. +# --- END COPYRIGHT BLOCK --- + +# Generate a build number in the format YYYY.DDD.HHMM + +import os +import time + +SDE = os.getenv('SOURCE_DATE_EPOCH') +if SDE is not None: + gmtime_obj = time.gmtime(int(SDE)) +else: + gmtime_obj = time.gmtime() + +# Print build number +buildnum = time.strftime("%Y.%j.%H%M", gmtime_obj) +print(f'\\"{buildnum}\\"', end = '') diff --git a/configure.ac b/configure.ac new file mode 100644 index 0000000..e27edbd --- /dev/null +++ b/configure.ac @@ -0,0 +1,921 @@ +# -*- Autoconf -*- +# Process this file with autoconf to produce a configure script. +AC_PREREQ([2.69]) +AC_INIT([dirsrv],[1.0],[http://bugzilla.redhat.com/]) +# AC_CONFIG_HEADER must be called right after AC_INIT. +AC_CONFIG_HEADERS([config.h]) +# include the version information +. $srcdir/VERSION.sh +AC_MSG_NOTICE(This is configure for $PACKAGE_TARNAME $PACKAGE_VERSION) +AM_INIT_AUTOMAKE([1.9 foreign subdir-objects dist-bzip2 no-dist-gzip no-define tar-pax]) +AC_SUBST([RPM_VERSION]) +AC_SUBST([RPM_RELEASE]) +AC_SUBST([VERSION_PREREL]) +AC_SUBST([CONSOLE_VERSION]) +AM_MAINTAINER_MODE +AC_CANONICAL_HOST + +AC_CONFIG_MACRO_DIRS([m4]) + +# Checks for programs. +: ${CXXFLAGS=""} +AC_PROG_CXX +: ${CFLAGS=""} +AC_PROG_CC +AM_PROG_CC_C_O +AM_PROG_AS +PKG_PROG_PKG_CONFIG + +# disable static libs by default - we only use a couple +AC_DISABLE_STATIC +LT_INIT + +# Checks for header files. +AC_HEADER_DIRENT +AC_HEADER_SYS_WAIT +AC_CHECK_HEADERS([arpa/inet.h errno.h fcntl.h malloc.h netdb.h netinet/in.h stdlib.h string.h strings.h sys/file.h sys/socket.h sys/time.h syslog.h unistd.h mntent.h sys/sysinfo.h sys/endian.h endian.h]) +# These are *required* headers without option. +AC_CHECK_HEADERS([inttypes.h], [], AC_MSG_ERROR([unable to locate required header inttypes.h])) +AC_CHECK_HEADERS([crack.h], [], AC_MSG_ERROR([unable to locate required header crack.h])) +AC_CHECK_HEADERS([lmdb.h], [], AC_MSG_ERROR([unable to locate required header lmdb.h])) +AC_CHECK_HEADERS([json-c/json.h], [], AC_MSG_ERROR([unable to locate required header json-c/json.h])) + +# Checks for typedefs, structures, and compiler characteristics. +AC_HEADER_STAT +AC_C_CONST +AC_HEADER_STDBOOL +AC_TYPE_UID_T +AC_TYPE_PID_T +AC_TYPE_SIZE_T +AC_STRUCT_TM + +# Checks for library functions. +AC_FUNC_CHOWN +AC_FUNC_CLOSEDIR_VOID +AC_FUNC_ERROR_AT_LINE +AC_FUNC_FORK +AC_FUNC_LSTAT +AC_FUNC_LSTAT_FOLLOWS_SLASHED_SYMLINK +AC_FUNC_MALLOC +AC_FUNC_MEMCMP +AC_FUNC_MMAP +AC_DIAGNOSE([obsolete],[your code may safely assume C89 semantics that RETSIGTYPE is void. +Remove this warning and the `AC_CACHE_CHECK' when you adjust the code.])dnl +AC_CACHE_CHECK([return type of signal handlers],[ac_cv_type_signal],[AC_COMPILE_IFELSE( +[AC_LANG_PROGRAM([#include +#include +], + [return *(signal (0, 0)) (0) == 1;])], + [ac_cv_type_signal=int], + [ac_cv_type_signal=void])]) +AC_DEFINE_UNQUOTED([RETSIGTYPE],[$ac_cv_type_signal],[Define as the return type of signal handlers + (`int' or `void').]) + +AC_FUNC_STAT +AC_FUNC_STRERROR_R +AC_FUNC_STRFTIME +AC_FUNC_VPRINTF +AC_CHECK_FUNCS([endpwent ftruncate getcwd getaddrinfo inet_pton inet_ntop localtime_r memmove memset mkdir munmap putenv rmdir setrlimit socket strcasecmp strchr strcspn strdup strerror strncasecmp strpbrk strrchr strstr strtol tzset]) + +# These functions are *required* without option. +AC_CHECK_FUNCS([clock_gettime], [], AC_MSG_ERROR([unable to locate required symbol clock_gettime])) + +# This will detect if we need to add the LIBADD_DL value for us. +LT_LIB_DLLOAD + +# Optional rust component support. +AC_MSG_CHECKING(for --enable-rust-offline) +AC_ARG_ENABLE(rust_offline, AS_HELP_STRING([--enable-rust-offline], [Enable rust building offline. you MUST have run vendor! (default: no)]), + [], [ enable_rust_offline=no ]) +AC_MSG_RESULT($enable_rust_offline) +AM_CONDITIONAL([RUST_ENABLE_OFFLINE],[test "$enable_rust_offline" = yes]) + +AS_IF([test "$enable_rust_offline" = yes], + [rust_vendor_sources="replace-with = \"vendored-sources\""], + [rust_vendor_sources=""]) +AC_SUBST([rust_vendor_sources]) +if test "$enable_rust_offline" = yes; then + AC_CHECK_PROG(CARGO, [cargo], [yes], [no]) + AC_CHECK_PROG(RUSTC, [rustc], [yes], [no]) + + AS_IF([test "$CARGO" != "yes" -o "$RUSTC" != "yes"], [ + AC_MSG_FAILURE("Rust based plugins cannot be built cargo=$CARGO rustc=$RUSTC") + ]) +fi + +# Optional cockpit support (enabled by default) +AC_MSG_CHECKING(for --enable-cockpit) +AC_ARG_ENABLE(cockpit, AS_HELP_STRING([--enable-cockpit], [Enable cockpit plugin (default: yes)]), + [], [ enable_cockpit=yes ]) +AC_MSG_RESULT($enable_cockpit) +AC_SUBST([enable_cockpit]) +AC_SUBST(ENABLE_COCKPIT) +AM_CONDITIONAL([ENABLE_COCKPIT],[test "$enable_cockpit" = yes]) + +AC_DEFINE_UNQUOTED([DS_PACKAGE_TARNAME], "$PACKAGE_TARNAME", [package tarball name]) +AC_DEFINE_UNQUOTED([DS_PACKAGE_BUGREPORT], "$PACKAGE_BUGREPORT", [package bug report url]) +# define these for automake distdir +PACKAGE=$PACKAGE_TARNAME +AC_DEFINE_UNQUOTED([PACKAGE], "$PACKAGE", [package tar name]) + +AC_MSG_CHECKING(for --enable-debug) +AC_ARG_ENABLE(debug, AS_HELP_STRING([--enable-debug], [Enable debug features (default: no)]), + [], [ enable_debug=no ]) +AC_MSG_RESULT($enable_debug) +if test "$enable_debug" = yes ; then + debug_defs="-DDEBUG -DMCC_DEBUG" + debug_cflags="-g3 -ggdb -gdwarf-5 -O0" + debug_cxxflags="-g3 -ggdb -gdwarf-5 -O0" + debug_rust_defs="-C debuginfo=2 -Z macro-backtrace" + cargo_defs="" + rust_target_dir="debug" + AC_DEFINE_UNQUOTED([DS_PACKAGE_VERSION], "$VERSION_MAJOR.$VERSION_MINOR.$VERSION_MAINT DEVELOPER BUILD", [package version]) + AC_DEFINE_UNQUOTED([DS_PACKAGE_STRING], "$PACKAGE_TARNAME DEVELOPER BUILD", [package string]) + # define these for automake distdir + VERSION="DEBUG" + AC_DEFINE_UNQUOTED([VERSION], "$VERSION", [package version]) +else + debug_defs="" + # set the default safe CFLAGS that would be set by AC_PROG_CC otherwise + debug_cflags="-g -O2" + debug_cxxflags="-g -O2" + debug_rust_defs="-C debuginfo=2" + cargo_defs="--release" + rust_target_dir="release" + AC_DEFINE_UNQUOTED([DS_PACKAGE_VERSION], "$PACKAGE_VERSION", [package version]) + AC_DEFINE_UNQUOTED([DS_PACKAGE_STRING], "$PACKAGE_TARNAME $PACKAGE_VERSION", [package string]) + # define these for automake distdir + VERSION=$PACKAGE_VERSION + AC_DEFINE_UNQUOTED([VERSION], "$VERSION", [package version]) +fi +AC_SUBST([debug_defs]) +AC_SUBST([debug_cflags]) +AC_SUBST([debug_cxxflags]) +AC_SUBST([debug_rust_defs]) +AC_SUBST([cargo_defs]) +AC_SUBST([rust_target_dir]) +AM_CONDITIONAL([DEBUG],[test "$enable_debug" = yes]) + +AC_MSG_CHECKING(for --enable-asan) +AC_ARG_ENABLE(asan, AS_HELP_STRING([--enable-asan], [Enable gcc/clang address sanitizer options (default: no)]), + [], [ enable_asan=no ]) +AC_MSG_RESULT($enable_asan) +if test "$enable_asan" = yes ; then + asan_cflags="-fsanitize=address -fno-omit-frame-pointer" + asan_rust_defs="-Z sanitizer=address" +else + asan_cflags="" + asan_rust_defs="" +fi +AC_SUBST([asan_cflags]) +AC_SUBST([asan_rust_defs]) +AM_CONDITIONAL(enable_asan,[test "$enable_asan" = yes]) + +AC_MSG_CHECKING(for --enable-msan) +AC_ARG_ENABLE(msan, AS_HELP_STRING([--enable-msan], [Enable gcc/clang memory sanitizer options (default: no)]), + [], [ enable_msan=no ]) +AC_MSG_RESULT($enable_msan) +if test "$enable_msan" = yes ; then + msan_cflags="-fsanitize=memory -fsanitize-memory-track-origins -fno-omit-frame-pointer" + msan_rust_defs="-Z sanitizer=memory" +else + msan_cflags="" + msan_rust_defs="" +fi +AC_SUBST([msan_cflags]) +AC_SUBST([msan_rust_defs]) +AM_CONDITIONAL(enable_msan,test "$enable_msan" = "yes") + +AC_MSG_CHECKING(for --enable-tsan) +AC_ARG_ENABLE(tsan, AS_HELP_STRING([--enable-tsan], [Enable gcc/clang thread sanitizer options (default: no)]), + [], [ enable_tsan=no ]) +AC_MSG_RESULT($enable_tsan) +if test "$enable_tsan" = yes ; then + tsan_cflags="-fsanitize=thread -fno-omit-frame-pointer" + tsan_rust_defs="-Z sanitizer=thread" +else + tsan_cflags="" + tsan_rust_defs="" +fi +AC_SUBST([tsan_cflags]) +AC_SUBST([tsan_rust_defs]) +AM_CONDITIONAL(enable_tsan,test "$enable_tsan" = "yes") + +AC_MSG_CHECKING(for --enable-ubsan) +AC_ARG_ENABLE(ubsan, AS_HELP_STRING([--enable-tsan], [Enable gcc/clang undefined behaviour sanitizer options (default: no)]), + [], [ enable_ubsan=no ]) +AC_MSG_RESULT($enable_ubsan) +if test "$enable_ubsan" = yes ; then + ubsan_cflags="-fsanitize=undefined -fno-omit-frame-pointer" + ubsan_rust_defs="" +else + ubsan_cflags="" + ubsan_rust_defs="" +fi +AC_SUBST([ubsan_cflags]) +AC_SUBST([ubsan_rust_defs]) +AM_CONDITIONAL(enable_ubsan,test "$enable_ubsan" = "yes") + +AM_CONDITIONAL(with_sanitizer,test "$enable_asan" = "yes" -o "$enable_msan" = "yes" -o "$enable_tsan" = "yes" -o "$enable_ubsan" = "yes") + +AC_MSG_CHECKING(for --enable-clang) +AC_ARG_ENABLE(clang, AS_HELP_STRING([--enable-clang], [Enable clang (default: no)]), + [], [ enable_clang=no ]) +AC_MSG_RESULT($enable_clang) +AM_CONDITIONAL(CLANG_ENABLE,test "$enable_clang" = "yes") + +AC_MSG_CHECKING(for --enable-cfi) +AC_ARG_ENABLE(cfi, AS_HELP_STRING([--enable-cfi], [Enable control flow integrity - requires --enable-clang (default: no)]), + [], [ enable_cfi=no ]) +AC_MSG_RESULT($enable_cfi) +AM_CONDITIONAL(CFI_ENABLE,test "$enable_cfi" = "yes" -a "$enable_clang" = "yes") + +AM_CONDITIONAL([RPM_HARDEND_CC], [test -f /usr/lib/rpm/redhat/redhat-hardened-cc1]) +AC_MSG_CHECKING(for --enable-gcc-security) +AC_ARG_ENABLE(gcc-security, AS_HELP_STRING([--enable-gcc-security], [Enable gcc secure compilation options (default: no)]), + [], [ enable_gcc_security=no ]) +AC_MSG_RESULT($enable_gcc_security) +if test "$enable_gcc_security" = yes ; then + gccsec_cflags="-Wall -Wp,-D_FORTIFY_SOURCE=2 -fexceptions -fstack-protector-strong --param=ssp-buffer-size=4 -grecord-gcc-switches -Werror=format-security" +else + # Without this, -fPIC doesn't work on generic fedora builds, --disable-gcc-sec. + gccsec_cflags="" +fi +AM_COND_IF([RPM_HARDEND_CC], + [ gccsec_cflags="$gccsec_flags -specs=/usr/lib/rpm/redhat/redhat-hardened-cc1" ], + []) +AC_SUBST([gccsec_cflags]) + +# Pull in profiling. +AC_MSG_CHECKING(for --enable-profiling) +AC_ARG_ENABLE(profiling, AS_HELP_STRING([--enable-profiling], [Enable gcov profiling features (default: no)]), + [], [ enable_profiling=no ]) +AC_MSG_RESULT($enable_profiling) +if test "$enable_profiling" = yes ; then + profiling_defs="-fprofile-arcs -ftest-coverage -g3 -ggdb -gdwarf-5 -O0" + profiling_links="-lgcov --coverage" +else + profiling_defs="" + profiling_links="" +fi +AC_SUBST([profiling_defs]) +AC_SUBST([profiling_links]) + +AC_MSG_CHECKING(for --enable-systemtap) +AC_ARG_ENABLE(systemtap, AS_HELP_STRING([--enable-systemtap], [Enable systemtap probe features (default: no)]), + [], [ enable_systemtap=no ]) +AC_MSG_RESULT($enable_systemtap) +if test "$enable_systemtap" = yes ; then + systemtap_defs="-DSYSTEMTAP" +else + systemtap_defs="" +fi +AC_SUBST([systemtap_defs]) + + +# these enables are for optional or experimental features +AC_MSG_CHECKING(for --enable-pam-passthru) +AC_ARG_ENABLE(pam-passthru, + AS_HELP_STRING([--enable-pam-passthru], + [enable the PAM passthrough auth plugin (default: yes)]), + [], [ enable_pam_passthru=yes ]) +AC_MSG_RESULT($enable_pam_passthru) +if test "$enable_pam_passthru" = yes ; then + # check for pam header file used by plugins/pass_passthru/pam_ptimpl.c + AC_CHECK_HEADER([security/pam_appl.h], [], [AC_MSG_ERROR([Missing header file security/pam_appl.h])]) + AC_DEFINE([ENABLE_PAM_PASSTHRU], [1], [enable the pam passthru auth plugin]) +fi +AM_CONDITIONAL(enable_pam_passthru,test "$enable_pam_passthru" = "yes") + +if test -z "$enable_dna" ; then + enable_dna=yes # if not set on cmdline, set default +fi +AC_MSG_CHECKING(for --enable-dna) +AC_ARG_ENABLE(dna, + AS_HELP_STRING([--enable-dna], + [enable the Distributed Numeric Assignment (DNA) plugin (default: yes)])) +if test "$enable_dna" = yes ; then + AC_MSG_RESULT(yes) + AC_DEFINE([ENABLE_DNA], [1], [enable the dna plugin]) +else + AC_MSG_RESULT(no) +fi +AM_CONDITIONAL(enable_dna,test "$enable_dna" = "yes") + +if test -z "$enable_ldapi" ; then + enable_ldapi=yes # if not set on cmdline, set default +fi +AC_MSG_CHECKING(for --enable-ldapi) +AC_ARG_ENABLE(ldapi, + AS_HELP_STRING([--enable-ldapi], + [enable LDAP over unix domain socket (LDAPI) support (default: yes)])) +if test "$enable_ldapi" = yes ; then + AC_MSG_RESULT(yes) + AC_DEFINE([ENABLE_LDAPI], [1], [enable ldapi support in the server]) +else + AC_MSG_RESULT(no) +fi +AM_CONDITIONAL(enable_ldapi,test "$enable_ldapi" = "yes") + +if test -z "$enable_autobind" ; then + enable_autobind=yes # if not set on cmdline, set default +fi +AC_MSG_CHECKING(for --enable-autobind) +AC_ARG_ENABLE(autobind, + AS_HELP_STRING([--enable-autobind], + [enable auto bind over unix domain socket (LDAPI) support (default: no)])) +if test "$enable_ldapi" = yes -a "$enable_autobind" = yes ; then + AC_MSG_RESULT(yes) + AC_DEFINE([ENABLE_AUTOBIND], [1], [enable ldapi auto bind support in the server]) +else + AC_MSG_RESULT(no) +fi +AM_CONDITIONAL(enable_autobind,test "$enable_autobind" = "yes") + +if test -z "$enable_auto_dn_suffix" ; then + enable_auto_dn_suffix=no # if not set on cmdline, set default +fi +AC_MSG_CHECKING(for --enable-auto-dn-suffix) +AC_ARG_ENABLE(auto-dn-suffix, + AS_HELP_STRING([--enable-auto-dn-suffix], + [enable auto bind with auto dn suffix over unix domain socket (LDAPI) support (default: no)])) +if test "$enable_ldapi" = yes -a "$enable_autobind" = yes -a "$enable_auto_dn_suffix" = "yes"; then + AC_MSG_RESULT(yes) + AC_DEFINE([ENABLE_AUTO_DN_SUFFIX], [1], [enable ldapi auto bind with auto dn suffix support in the server]) +else + AC_MSG_RESULT(no) +fi +AM_CONDITIONAL(enable_auto_dn_suffix,test "$enable_auto_dn_suffix" = "yes") + +if test -z "$enable_bitwise" ; then + enable_bitwise=yes # if not set on cmdline, set default +fi +AC_MSG_CHECKING(for --enable-bitwise) +AC_ARG_ENABLE(bitwise, + AS_HELP_STRING([--enable-bitwise], + [enable the bitwise matching rule plugin (default: yes)])) +if test "$enable_bitwise" = yes ; then + AC_MSG_RESULT(yes) + AC_DEFINE([ENABLE_BITWISE], [1], [enable the bitwise plugin]) +else + AC_MSG_RESULT(no) +fi +AM_CONDITIONAL(enable_bitwise,test "$enable_bitwise" = "yes") + +# Can never be enabled. +AM_CONDITIONAL(enable_presence,test "$enable_presence" = "yes") + +if test -z "$enable_acctpolicy" ; then + enable_acctpolicy=yes # if not set on cmdline, set default +fi +AC_MSG_CHECKING(for --enable-acctpolicy) +AC_ARG_ENABLE(acctpolicy, + AS_HELP_STRING([--enable-acctpolicy], + [enable the account policy plugin (default: yes)])) +if test "$enable_acctpolicy" = yes ; then + AC_MSG_RESULT(yes) + AC_DEFINE([ENABLE_ACCTPOLICY], [1], [enable the account policy plugin]) +else + AC_MSG_RESULT(no) +fi +AM_CONDITIONAL(enable_acctpolicy,test "$enable_acctpolicy" = "yes") + +if test -z "$enable_posix_winsync" ; then + enable_posix_winsync=yes # if not set on cmdline, set default +fi +AC_MSG_CHECKING(for --enable-posix-winsync) +AC_ARG_ENABLE(posix_winsync, + AS_HELP_STRING([--enable-posix-winsync], + [enable support for POSIX user/group attributes in winsync (default: yes)])) +if test "$enable_posix_winsync" = yes ; then + AC_MSG_RESULT(yes) + AC_DEFINE([ENABLE_POSIX_WINSYNC], [1], [enable support for POSIX user/group attributes in winsync]) +else + AC_MSG_RESULT(no) +fi +AM_CONDITIONAL(enable_posix_winsync,test "$enable_posix_winsync" = "yes") + +# the default prefix - override with --prefix or --with-fhs +AC_PREFIX_DEFAULT([/opt/$PACKAGE_NAME]) + +# If we have no prefix specified, we need to fix the prefix variable. +# If we don't what happens is $prefixdir ends up as NONE, and then +# later configure changes $prefix to $ac_default_prefix underneath us. +if test "$prefix" = "NONE"; then + prefix=$ac_default_prefix +fi + +m4_include(m4/fhs.m4) + +# /run directory path +AC_ARG_WITH([localrundir], + AS_HELP_STRING([--with-localrundir=DIR], + [Runtime data directory]), + [localrundir=$with_localrundir], + [localrundir="/run"]) +AC_SUBST([localrundir]) + +cockpitdir=/389-console + +# installation paths - by default, we store everything +# under the prefix. The with-fhs option will use /usr, +# /etc, and /var. The with-fhs-opt option will use the +# prefix, but it's sysconfdir and localstatedir will be +# /etc/opt, and /var/opt. +if test "$with_fhs_opt" = "yes"; then + # Override sysconfdir and localstatedir if FHS optional + # package was requested. + prefixdir=$prefix + sysconfdir='/etc/opt' + localstatedir='/var/opt' + localrundir='/var/opt/run' + # relative to datadir + sampledatadir=/data + # relative to datadir + systemschemadir=/schema + # relative to datadir + scripttemplatedir=/script-templates + # relative to datadir + updatedir=/updates + # relative to libdir + serverdir= + # relative to includedir + serverincdir= + # relative to libdir + serverplugindir=/plugins + # relative to datadir + infdir=/inf + # relative to datadir + mibdir=/mibs + # location of property/resource files, relative to datadir + propertydir=/properties + # relative to libdir + perldir=/perl + # relative to libdir + pythondir=/python +else + if test "$with_fhs" = "yes"; then + ac_default_prefix=/usr + prefix=$ac_default_prefix + exec_prefix=$prefix + dnl as opposed to the default /usr/etc + sysconfdir='/etc' + dnl as opposed to the default /usr/var + localstatedir='/var' + localrundir='/run' + fi + prefixdir=$prefix + # relative to datadir + sampledatadir=/$PACKAGE_NAME/data + # relative to datadir + systemschemadir=/$PACKAGE_NAME/schema + # relative to datadir + scripttemplatedir=/$PACKAGE_NAME/script-templates + # relative to datadir + updatedir=/$PACKAGE_NAME/updates + # relative to libdir + serverdir=$PACKAGE_NAME + # relative to includedir + serverincdir=$PACKAGE_NAME + # relative to libdir + serverplugindir=/$PACKAGE_NAME/plugins + # relative to datadir + infdir=/$PACKAGE_NAME/inf + # relative to datadir + mibdir=/$PACKAGE_NAME/mibs + # location of property/resource files, relative to datadir + propertydir=/$PACKAGE_NAME/properties + # relative to libdir + perldir=/$PACKAGE_NAME/perl + # relative to libdir + pythondir=/$PACKAGE_NAME/python +fi + +# if mandir is the default value, override it +# otherwise, the user must have set it - just use it +if test X"$mandir" = X'${prefix}/man' ; then + mandir='$(datadir)/man' +fi + +# Shared paths for all layouts +# relative to sysconfdir +configdir=/$PACKAGE_NAME/config +# relative to sysconfdir +schemadir=/$PACKAGE_NAME/schema + +# default user, group +defaultuser=dirsrv +defaultgroup=dirsrv + +AC_MSG_CHECKING(for --with-perldir) +AC_ARG_WITH([perldir], + AS_HELP_STRING([--with-perldir=PATH], + [Directory for perl]) +) +if test -n "$with_perldir"; then + if test "$with_perldir" = yes ; then + AC_MSG_ERROR([You must specify --with-perldir=/full/path/to/perl]) + elif test "$with_perldir" = no ; then + with_perldir= + else + AC_MSG_RESULT([$with_perldir]) + fi +else + with_perldir= +fi + +AC_MSG_CHECKING(for --with-pythonexec) +AC_ARG_WITH([pythonexec], + AS_HELP_STRING([--with-pythonexec=PATH], + [Path to executable for python]) +) +if test -n "$with_pythonexec"; then + if test "$with_pythonexec" = yes ; then + AC_MSG_ERROR([You must specify --with-pythonexec=/full/path/to/python]) + elif test "$with_pythonexec" = no ; then + with_pythonexec=/usr/bin/python3 + else + AC_MSG_RESULT([$with_pythonexec]) + fi +else + with_pythonexec=/usr/bin/python3 +fi + +AC_SUBST(prefixdir) +AC_SUBST(configdir) +AC_SUBST(sampledatadir) +AC_SUBST(systemschemadir) +AC_SUBST(propertydir) +AC_SUBST(schemadir) +AC_SUBST(serverdir) +AC_SUBST(serverincdir) +AC_SUBST(serverplugindir) +AC_SUBST(scripttemplatedir) +AC_SUBST(perldir) +AC_SUBST(pythondir) +AC_SUBST(infdir) +AC_SUBST(mibdir) +AC_SUBST(mandir) +AC_SUBST(updatedir) +AC_SUBST(defaultuser) +AC_SUBST(defaultgroup) +AC_SUBST(cockpitdir) + +# check for --with-instconfigdir +AC_MSG_CHECKING(for --with-instconfigdir) +AC_ARG_WITH(instconfigdir, + AS_HELP_STRING([--with-instconfigdir=/path], + [Base directory for instance specific writable configuration directories (default $sysconfdir/$PACKAGE_NAME)]), +[ + if test $withval = yes ; then + AC_MSG_ERROR(Please specify a full path with --with-instconfigdir) + fi + instconfigdir="$withval" + AC_MSG_RESULT($withval) +], +[ + dnl this value is expanded out in Makefile.am + instconfigdir='$(sysconfdir)/$(PACKAGE_NAME)' + AC_MSG_RESULT(no) +]) +AC_SUBST(instconfigdir) + +# WINNT should be true if building on Windows system not using +# cygnus, mingw, or the like and using cmd.exe as the shell +AM_CONDITIONAL([WINNT], false) + +# Deal with platform dependent defines +# initdir is the location for the SysV init scripts - very heavily platform +# dependent and not specified in fhs or lsb +# and not used if systemd is used +initdir='$(sysconfdir)/rc.d' +AC_MSG_CHECKING(for --with-initddir) +AC_ARG_WITH(initddir, + AS_HELP_STRING([--with-initddir=/path], + [Absolute path (not relative like some of the other options) that should contain the SysV init scripts (default '$(sysconfdir)/rc.d')]), +[ + AC_MSG_RESULT($withval) +], +[ + AC_MSG_RESULT(no) +]) + +AM_CONDITIONAL([INITDDIR], [test -n "$with_initddir" -a "$with_initddir" != "no"]) + +# This will let us change over the python version easier in the future. +if test -n "$with_pythonexec"; then + pythonexec="$with_pythonexec" +else + pythonexec='/usr/bin/python3' +fi + +# Default to no atomic queue operations. +with_atomic_queue="no" + +# we use stty in perl scripts to disable password echo +# this doesn't work unless the full absolute path of the +# stty command is used e.g. system("stty -echo") does not +# work but system("/bin/stty -echo") does work +# since the path of stty may not be the same on all +# platforms, we set the default here to /bin/stty and +# allow that value to be overridden in the platform +# specific section below +sttyexec=/bin/stty +case $host in + *-*-linux*) + AC_DEFINE([LINUX], [1], [Linux]) + AC_DEFINE([_GNU_SOURCE], [1], [GNU Source]) + platform="linux" + initdir='$(sysconfdir)/rc.d/init.d' + # do arch specific linux stuff here + case $host in + i*86-*-linux*) + AC_DEFINE([CPU_x86], [], [cpu type x86]) + ;; + x86_64-*-linux*) + AC_DEFINE([CPU_x86_64], [1], [cpu type x86_64]) + + ;; + aarch64-*-linux*) + AC_DEFINE([CPU_arm], [], [cpu type arm]) + ;; + arm-*-linux*) + AC_DEFINE([CPU_arm], [], [cpu type arm]) + ;; + ppc64le-*-linux*) + ;; + ppc64-*-linux*) + ;; + ppc-*-linux*) + ;; + s390-*-linux*) + ;; + s390x-*-linux*) + ;; + esac + # some programs use the native thread library directly + THREADLIB=-lpthread + AC_SUBST([THREADLIB], [$THREADLIB]) + LIBCRYPT=-lcrypt + AC_SUBST([LIBCRYPT], [$LIBCRYPT]) + AC_DEFINE([USE_POSIX_RWLOCKS], [1], [POSIX rwlocks]) + ;; + *-*-freebsd*) + AC_DEFINE([FREEBSD], [1], [FreeBSD]) + platform="freebsd" + initdir='$(sysconfdir)/rc.d' + THREADLIB=-lthr + AC_SUBST([THREADLIB], [$THREADLIB]) + AC_DEFINE([USE_POSIX_RWLOCKS], [1], [POSIX rwlocks]) + LIBDL= + ;; + ia64-hp-hpux*) + AC_DEFINE([hpux], [1], [HP-UX]) + AC_DEFINE([HPUX], [1], [HP-UX]) + AC_DEFINE([HPUX11], [1], [HP-UX 11]) + AC_DEFINE([HPUX11_23], [1], [HP-UX 11.23]) + AC_DEFINE([CPU_ia64], [], [cpu type ia64]) + AC_DEFINE([OS_hpux], [1], [OS HP-UX]) + AC_DEFINE([_POSIX_C_SOURCE], [199506L], [POSIX revision]) + AC_DEFINE([_HPUX_SOURCE], [1], [Source namespace]) + AC_DEFINE([_INCLUDE_STDC__SOURCE_199901], [1], [to pick up all of the printf format macros in inttypes.h]) + # assume 64 bit + platform="hpux" + initconfigdir="/$PACKAGE_NAME/config" + # HPUX doesn't use /etc for this + initdir=/init.d + ;; + hppa*-hp-hpux*) + AC_DEFINE([hpux], [1], [HP-UX]) + AC_DEFINE([HPUX], [1], [HP-UX]) + AC_DEFINE([HPUX11], [1], [HP-UX 11]) + AC_DEFINE([HPUX11_11], [1], [HP-UX 11.11]) + AC_DEFINE([CPU_hppa], [], [cpu type pa-risc]) + AC_DEFINE([OS_hpux], [1], [OS HP-UX]) + AC_DEFINE([_POSIX_C_SOURCE], [199506L], [POSIX revision]) + AC_DEFINE([_HPUX_SOURCE], [1], [Source namespace]) + AC_DEFINE([_INCLUDE_STDC__SOURCE_199901], [1], [to pick up all of the printf format macros in inttypes.h]) + # assume 64 bit + initconfigdir="/$PACKAGE_NAME/config" + platform="hpux" + # HPUX doesn't use /etc for this + initdir=/init.d + ;; + *-*-solaris*) + AC_DEFINE([SVR4], [1], [SVR4]) + AC_DEFINE([__svr4], [1], [SVR4]) + AC_DEFINE([__svr4__], [1], [SVR4]) + AC_DEFINE([_SVID_GETTOD], [1], [SVID_GETTOD]) + AC_DEFINE([SOLARIS], [1], [SOLARIS]) + AC_DEFINE([OS_solaris], [1], [OS SOLARIS]) + AC_DEFINE([sunos5], [1], [SunOS5]) + AC_DEFINE([OSVERSION], [509], [OS version]) + AC_DEFINE([_REENTRANT], [1], [_REENTRANT]) + AC_DEFINE([NO_DOMAINNAME], [1], [no getdomainname]) +dnl socket nsl and dl are required to link several programs and libdb + LIBSOCKET=-lsocket + AC_SUBST([LIBSOCKET], [$LIBSOCKET]) + LIBNSL=-lnsl + AC_SUBST([LIBNSL], [$LIBNSL]) + LIBDL=-ldl + AC_SUBST([LIBDL], [$LIBDL]) +dnl Cstd and Crun are required to link any C++ related code + LIBCSTD=-lCstd + AC_SUBST([LIBCSTD], [$LIBCSTD]) + LIBCRUN=-lCrun + AC_SUBST([LIBCRUN], [$LIBCRUN]) + platform="solaris" + initdir='$(sysconfdir)/init.d' + case $host in + i?86-*-solaris2.1[[0-9]]*) + dnl I dont know why i386 need this explicit + AC_DEFINE([HAVE_GETPEERUCRED], [1], [have getpeerucred]) + ;; + sparc-*-solaris*) + dnl includes some assembler stuff in counter.o + AC_DEFINE([CPU_sparc], [], [cpu type sparc]) + TARGET='SPARC' + ;; + esac + ;; + *) + platform="" + ;; +esac + +### TO CHECK FOR SSE4.2!!! +# gcc -march=native -dM -E - < /dev/null | grep SSE +# We can just use the define in GCC instead! + +AC_MSG_CHECKING([for GCC provided 64-bit atomic operations]) +AC_LINK_IFELSE([AC_LANG_PROGRAM([[ + #include + ]], + [[ + uint64_t t_counter = 0; + uint64_t t_oldval = 0; + uint64_t t_newval = 1; + + __atomic_compare_exchange_8(&t_counter, &t_oldval, t_newval, 0, __ATOMIC_SEQ_CST, __ATOMIC_SEQ_CST); + __atomic_add_fetch_8(&t_counter, t_newval, __ATOMIC_SEQ_CST); + __atomic_sub_fetch_8(&t_counter, t_newval, __ATOMIC_SEQ_CST); + __atomic_load(&t_counter, &t_oldval, __ATOMIC_SEQ_CST); + return 0; + ]])], + [ + AC_DEFINE([ATOMIC_64BIT_OPERATIONS], [1], [have 64-bit atomic operation functions provided by gcc]) + AC_MSG_RESULT([yes]) + ], + [ + AC_MSG_RESULT([no]) + ] +) + +# cmd line overrides default setting above +if test -n "$with_initddir" ; then + initdir="$with_initddir" +fi + +# sysv init scripts not used when systemd is used +AC_SUBST(initdir) +AC_SUBST(pythonexec) +AC_SUBST(sttyexec) + +# set default initconfigdir if not already set +# value will be set so as to be relative to $(sysconfdir) +if test -z "$initconfigdir" ; then + if test -d /etc/sysconfig ; then + initconfigdir=/sysconfig + elif test -d /etc/default ; then + initconfigdir=/default + else + initconfigdir="/$PACKAGE_NAME/config" + fi +fi +AC_SUBST(initconfigdir) + +# Conditionals for makefile.am +AM_CONDITIONAL([HPUX],[test "$platform" = "hpux"]) +AM_CONDITIONAL([SOLARIS],[test "$platform" = "solaris"]) +AM_CONDITIONAL([FREEBSD],[test "$platform" = "freebsd"]) +AM_CONDITIONAL([SPARC],[test "x$TARGET" = xSPARC]) + +# Check for library dependencies +if $PKG_CONFIG --exists nspr; then + PKG_CHECK_MODULES([NSPR], [nspr]) +else + PKG_CHECK_MODULES([NSPR], [dirsec-nspr]) +fi + +if $PKG_CONFIG --exists nss; then + PKG_CHECK_MODULES([NSS], [nss]) + nss_libdir=`$PKG_CONFIG --libs-only-L nss | sed -e s/-L// | sed -e s/\ .*$//` +else + PKG_CHECK_MODULES([NSS], [dirsec-nss]) + nss_libdir=`$PKG_CONFIG --libs-only-L dirsec-nss | sed -e s/-L// | sed -e s/\ .*$//` +fi +AC_SUBST(nss_libdir) + +PKG_CHECK_MODULES([OPENSSL], [openssl]) + +m4_include(m4/openldap.m4) +m4_include(m4/db.m4) + +PKG_CHECK_MODULES([SASL], [libsasl2]) + +PKG_CHECK_MODULES([ICU], [icu-i18n >= 60.2]) + +m4_include(m4/netsnmp.m4) + +PKG_CHECK_MODULES([KERBEROS], [krb5]) +krb5_vendor=`$PKG_CONFIG --variable=vendor krb5` +if test "$krb5_vendor" = "MIT"; then + AC_DEFINE(HAVE_KRB5, 1, [Define if you have Kerberos V]) + save_LIBS="$LIBS" + LIBS="$KERBEROS_LIBS" + AC_CHECK_FUNCS([krb5_cc_new_unique]) + LIBS="$save_LIBS" +fi + +PKG_CHECK_MODULES( + [PCRE], + [libpcre2-8], + [ + AC_DEFINE( + [PCRE2_CODE_UNIT_WIDTH], + 8, + [Define libpcre2 unit size] + ) + ] +) + +m4_include(m4/selinux.m4) +m4_include(m4/systemd.m4) + +AC_MSG_CHECKING(whether to enable cmocka unit tests) +AC_ARG_ENABLE(cmocka, AS_HELP_STRING([--enable-cmocka], [Enable cmocka unit tests (default: no)])) +if test "x$enable_cmocka" = "xyes"; then + AC_MSG_RESULT(yes) + PKG_CHECK_MODULES([CMOCKA], [cmocka]) + AC_DEFINE([ENABLE_CMOCKA], [1], [Enable cmocka unit tests]) +else + AC_MSG_RESULT(no) +fi +AM_CONDITIONAL([ENABLE_CMOCKA], [test "x$enable_cmocka" = "xyes"]) + +m4_include(m4/doxygen.m4) + +PACKAGE_BASE_VERSION=`echo $PACKAGE_VERSION | awk -F\. '{print $1"."$2}'` +AC_SUBST(PACKAGE_BASE_VERSION) + +AM_CONDITIONAL(OPENLDAP,test "$with_openldap" = "yes") + +# check for --with-libldap-r +AC_MSG_CHECKING(for --with-libldap-r) +AC_ARG_WITH(libldap-r, AS_HELP_STRING([--with-libldap-r],[Use lldap_r shared library (default: if OpenLDAP version is less than 2.5, then lldap_r will be used, else - lldap)]), +[ + if test "$withval" = "no"; then + AC_MSG_RESULT(no) + else + with_libldap_r=yes + AC_MSG_RESULT(yes) + AC_SUBST(with_libldap_r) + fi +], +OPENLDAP_VERSION=`ldapsearch -VV 2> >(sed -n '/ldapsearch/ s/.*ldapsearch \([[[0-9]]]\+\.[[[0-9]]]\+\.[[[0-9]]]\+\) .*/\1/p')` +AX_COMPARE_VERSION([$OPENLDAP_VERSION], [lt], [2.5], [ with_libldap_r=yes ], [ with_libldap_r=no ]) +AC_MSG_RESULT($with_libldap_r)) + +AM_CONDITIONAL([WITH_LIBLDAP_R],[test "$with_libldap_r" = yes]) + +# write out paths for binary components +AC_SUBST(ldaplib) +AC_SUBST(ldaplib_defs) +AC_SUBST(ldaptool_bindir) +AC_SUBST(ldaptool_opts) +AC_SUBST(plainldif_opts) + +AC_SUBST(brand) +AC_SUBST(capbrand) +AC_SUBST(vendor) + +# AC_DEFINE([USE_OLD_UNHASHED], [], [Use old unhashed code]) + +# Internally we use a macro function slapi_log_err() to call slapi_log_error() +# which gives us the option to do performance testing without the presence of +# logging. To remove the presence of error logging undefine LDAP_ERROR_LOGGING. +AC_DEFINE([LDAP_ERROR_LOGGING], [1], [LDAP error logging flag]) + +# Build our pkgconfig files +# This currently conflicts with %.in: rule in Makefile.am, which should be removed eventually. + +# AC_CONFIG_FILES([ldap/admin/src/defaults.inf]) + +AC_CONFIG_FILES([src/pkgconfig/dirsrv.pc src/pkgconfig/libsds.pc src/pkgconfig/svrcore.pc]) + +AC_CONFIG_FILES([Makefile rpm/389-ds-base.spec ]) + +AC_CONFIG_FILES([.cargo/config]) + +AC_OUTPUT + diff --git a/dirsrvtests/README b/dirsrvtests/README new file mode 100644 index 0000000..48b003f --- /dev/null +++ b/dirsrvtests/README @@ -0,0 +1,28 @@ +389-ds-base-tests README +================================================= + +Prerequisites: +------------------------------------------------- +Install the python-lib389 packages, or +download the source(git clone ssh://git.fedorahosted.org/git/389/lib389.git) and set your PYTHONPATH accordingly + + +Description: +------------------------------------------------- +This package includes python-lib389 based python scripts for testing the Directory Server. The following describes the various types of tests available: + +tickets - These scripts test individual bug fixes +suites - These test functinoal areas of the server +stress - These tests perform "stress" tests on the server + +There is also a "create_test.py" script available to construct a template test script for creating new tests. + + +Documentation: +------------------------------------------------- +See http://www.port389.org for the latest information + +http://www.port389.org/docs/389ds/FAQ/upstream-test-framework.html +http://www.port389.org/docs/389ds/howto/howto-write-lib389.html +http://www.port389.org/docs/389ds/howto/howto-run-lib389-jenkins.html + diff --git a/dirsrvtests/__init__.py b/dirsrvtests/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/dirsrvtests/check_for_duplicate_ids.py b/dirsrvtests/check_for_duplicate_ids.py new file mode 100644 index 0000000..55ae97b --- /dev/null +++ b/dirsrvtests/check_for_duplicate_ids.py @@ -0,0 +1,46 @@ +# --- BEGIN COPYRIGHT BLOCK --- +# Copyright (C) 2022 Red Hat, Inc. +# All rights reserved. +# +# License: GPL (version 3 or any later version). +# See LICENSE for details. +# --- END COPYRIGHT BLOCK --- +# +import os +import subprocess +import sys + + +def check_for_duplicates(path): + """Check for duplicate id tokens in tests""" + prefix = ":id:" + cmd = ["grep", "-rhi", f"{prefix}", path] + p = subprocess.run(cmd, check=True, stdout=subprocess.PIPE) + ids = [x.replace(prefix, "").strip() for x in p.stdout.decode().splitlines()] + return set([x for x in ids if ids.count(x) > 1]) + + +def main(): + if len(sys.argv) < 2: + print(f"Usage: {sys.argv[0]} path_to_tests") + sys.exit(1) + else: + path = sys.argv[1] + if os.path.exists(path): + dups = check_for_duplicates(path) + if len(dups) > 0: + print("Found duplicate ids:") + for dup in dups: + print(dup) + sys.exit(1) + else: + print("No duplicates found") + sys.exit(0) + else: + print(f"Path {path} doesn't exist, exiting...") + sys.exit(1) + + +if __name__ == "__main__": + main() + diff --git a/dirsrvtests/conftest.py b/dirsrvtests/conftest.py new file mode 100644 index 0000000..c9db07a --- /dev/null +++ b/dirsrvtests/conftest.py @@ -0,0 +1,156 @@ +# --- BEGIN COPYRIGHT BLOCK --- +# Copyright (C) 2022 Red Hat, Inc. +# All rights reserved. +# +# License: GPL (version 3 or any later version). +# See LICENSE for details. +# --- END COPYRIGHT BLOCK --- +# +import subprocess +import logging +import pytest +import shutil +import glob +import ldap +import os +import gzip + +from .report import getReport +from lib389.paths import Paths +from enum import Enum + +if "WEBUI" in os.environ: + from slugify import slugify + from pathlib import Path + + +pkgs = ['389-ds-base', 'nss', 'nspr', 'openldap', 'cyrus-sasl'] +p = Paths() + +class FIPSState(Enum): + ENABLED = 'enabled' + DISABLED = 'disabled' + NOT_AVAILABLE = 'not_available' + + def __unicode__(self): + return self.value + + def __str__(self): + return self.value + + +def get_rpm_version(pkg): + try: + result = subprocess.check_output(['rpm', '-q', '--queryformat', + '%{VERSION}-%{RELEASE}', pkg]) + except: + result = b"not installed" + + return result.decode('utf-8') + + +def is_fips(): + # Are we running in FIPS mode? + if not os.path.exists('/proc/sys/crypto/fips_enabled'): + return FIPSState.NOT_AVAILABLE + state = None + with open('/proc/sys/crypto/fips_enabled', 'r') as f: + state = f.readline().strip() + if state == '1': + return FIPSState.ENABLED + else: + return FIPSState.DISABLED + + +@pytest.fixture(autouse=True) +def _environment(request): + if "_metadata" in dir(request.config): + for pkg in pkgs: + request.config._metadata[pkg] = get_rpm_version(pkg) + request.config._metadata['FIPS'] = is_fips() + + +def pytest_cmdline_main(config): + logging.basicConfig(level=logging.DEBUG) + + +def pytest_report_header(config): + header = "" + for pkg in pkgs: + header += "%s: %s\n" % (pkg, get_rpm_version(pkg)) + header += "FIPS: %s" % is_fips() + return header + + +@pytest.fixture(scope="function", autouse=True) +def log_test_name_to_journald(request): + if p.with_systemd: + def log_current_test(): + subprocess.Popen("echo $PYTEST_CURRENT_TEST | systemd-cat -t pytest", stdin=subprocess.PIPE, shell=True) + + log_current_test() + request.addfinalizer(log_current_test) + return log_test_name_to_journald + + +@pytest.fixture(scope="function", autouse=True) +def rotate_xsan_logs(request): + # Do we have a pytest-html installed? + pytest_html = request.config.pluginmanager.getplugin('html') + if pytest_html is not None: + # We have it installed, but let's check if we actually use it (--html=report.html) + pytest_htmlpath = request.config.getoption('htmlpath') + if p.asan_enabled and pytest_htmlpath is not None: + # ASAN is enabled and an HTML report was requested, + # rotate the ASAN logs so that only relevant logs are attached to the case in the report. + xsan_logs_dir = f'{p.run_dir}/bak' + if not os.path.exists(xsan_logs_dir): + os.mkdir(xsan_logs_dir) + else: + for f in glob.glob(f'{p.run_dir}/ns-slapd-*san*'): + shutil.move(f, xsan_logs_dir) + return rotate_xsan_logs + + +@pytest.hookimpl(hookwrapper=True) +def pytest_runtest_makereport(item, call): + pytest_html = item.config.pluginmanager.getplugin('html') + outcome = yield + report = outcome.get_result() + extra = getattr(report, 'extra', []) + if report.when == 'call' and pytest_html is not None: + pytest_htmlpath = item.config.getoption('htmlpath') + if pytest_htmlpath is not None: + for f in glob.glob(f'{p.run_dir}/ns-slapd-*san*'): + with open(f) as asan_report: + text = asan_report.read() + extra.append(pytest_html.extras.text(text, name=os.path.basename(f))) + for f in glob.glob(f'{p.log_dir.split("/slapd",1)[0]}/*/*'): + if f.endswith('gz'): + with gzip.open(f, 'rb') as dirsrv_log: + text = dirsrv_log.read() + log_name = os.path.basename(f) + instance_name = os.path.basename(os.path.dirname(f)).split("slapd-",1)[1] + extra.append(pytest_html.extras.text(text, name=f"{instance_name}-{log_name}")) + elif 'rotationinfo' not in f: + with open(f) as dirsrv_log: + text = dirsrv_log.read() + log_name = os.path.basename(f) + instance_name = os.path.basename(os.path.dirname(f)).split("slapd-",1)[1] + extra.append(pytest_html.extras.text(text, name=f"{instance_name}-{log_name}")) + report.extra = extra + + # Make a screenshot if WebUI test fails + if call.when == "call" and "WEBUI" in os.environ: + if call.excinfo is not None and "page" in item.funcargs: + page = item.funcargs["page"] + screenshot_dir = Path(".playwright-screenshots") + screenshot_dir.mkdir(exist_ok=True) + page.screenshot(path=str(screenshot_dir / f"{slugify(item.nodeid)}.png")) + + +def pytest_exception_interact(node, call, report): + if report.failed: + # call.excinfo contains an ExceptionInfo instance + if call.excinfo.type is ldap.SERVER_DOWN: + report.sections.extend(getReport()) diff --git a/dirsrvtests/create_test.py b/dirsrvtests/create_test.py new file mode 100755 index 0000000..5331e7b --- /dev/null +++ b/dirsrvtests/create_test.py @@ -0,0 +1,340 @@ +#!/usr/bin/python3 +# +# --- BEGIN COPYRIGHT BLOCK --- +# Copyright (C) 2022 Red Hat, Inc. +# All rights reserved. +# +# License: GPL (version 3 or any later version). +# See LICENSE for details. +# --- END COPYRIGHT BLOCK --- +# +# PYTHON_ARGCOMPLETE_OK + +import argparse, argcomplete +import argcomplete +import datetime +import optparse +import os +import re +import sys +import uuid +from lib389 import topologies + +"""This script generates a template test script that handles the +non-interesting parts of a test script: +- topology fixture that doesn't exist in in lib389/topologies.py +- test function (to be completed by the user), +- run-isolated function +""" + + +def display_usage(): + """Display the usage""" + + print('\nUsage:\ncreate_ticket.py -t|--ticket ' + + '-s|--suite ' + + '[ i|--instances ' + + '[ -m|--suppliers -h|--hubs ' + + '-c|--consumers ] -o|--outputfile -C|--copyright ]\n') + print('If only "-t" is provided then a single standalone instance is ' + + 'created. Or you can create a test suite script using ' + + '"-s|--suite" instead of using "-t|--ticket". The "-i" option ' + + 'can add multiple standalone instances (maximum 99). However, you' + + ' can not mix "-i" with the replication options (-m, -h , -c). ' + + 'There is a maximum of 99 suppliers, 99 hubs, and 99 consumers.') + print('If "-s|--suite" option was chosen, then no topology would be added ' + + 'to the test script. You can find predefined fixtures in the lib389/topologies.py ' + + 'and use them or write a new one if you have a special case.') + exit(1) + + +def write_finalizer(): + """Write the finalizer function - delete/stop each instance""" + + def writeInstanceOp(action): + TEST.write(' map(lambda inst: inst.{}(), topology.all_insts.values())\n'.format(action)) + + TEST.write('\n def fin():\n') + TEST.write(' """If we are debugging just stop the instances, otherwise remove them"""\n\n') + TEST.write(' if DEBUGGING:\n') + writeInstanceOp('stop') + TEST.write(' else:\n') + writeInstanceOp('delete') + TEST.write('\n request.addfinalizer(fin)') + TEST.write('\n\n') + + +def get_existing_topologies(inst, suppliers, hubs, consumers): + """Check if the requested topology exists""" + setup_text = "" + + if inst: + if inst == 1: + i = 'st' + setup_text = "Standalone Instance" + else: + i = 'i{}'.format(inst) + setup_text = "{} Standalone Instances".format(inst) + else: + i = '' + if suppliers: + ms = 'm{}'.format(suppliers) + if len(setup_text) > 0: + setup_text += ", " + if suppliers == 1: + setup_text += "Supplier Instance" + else: + setup_text += "{} Supplier Instances".format(suppliers) + else: + ms = '' + if hubs: + hs = 'h{}'.format(hubs) + if len(setup_text) > 0: + setup_text += ", " + if hubs == 1: + setup_text += "Hub Instance" + else: + setup_text += "{} Hub Instances".format(hubs) + else: + hs = '' + if consumers: + cs = 'c{}'.format(consumers) + if len(setup_text) > 0: + setup_text += ", " + if consumers == 1: + setup_text += "Consumer Instance" + else: + setup_text += "{} Consumer Instances".format(consumers) + else: + cs = '' + + my_topology = 'topology_{}{}{}{}'.format(i, ms, hs, cs) + + # Returns True in the first element of a list, if topology was found + if my_topology in dir(topologies): + return [True, my_topology, setup_text] + else: + return [False, my_topology, setup_text] + + +def check_id_uniqueness(id_value): + """Checks if ID is already present in other tests. + create_test.py script should exist in the directory + with a 'tests' dir. + """ + + tests_dir = os.path.join(os.getcwd(), 'tests') + for root, dirs, files in os.walk(tests_dir): + for name in files: + if name.endswith('.py'): + with open(os.path.join(root, name), "r") as cifile: + for line in cifile: + if re.search(str(id_value), line): + return False + + return True + + +def display_uuid(): + tc_uuid = '0' + while not check_id_uniqueness(tc_uuid): tc_uuid = uuid.uuid4() + print(str(tc_uuid)) + exit(0) + + +desc = 'Script to generate an initial lib389 test script. ' + \ + 'This generates the topology, test, final, and run-isolated functions.' + +if len(sys.argv) > 0: + parser = argparse.ArgumentParser() + parser.add_argument('-t', '--ticket', default=None, + help="The name of the ticket/issue to include in the script name: 'ticket__test.py'") + parser.add_argument('-s', '--suite', default=None, help="Name for the test: '_test.py'") + parser.add_argument('-i', '--instances', default='0', help="Number of instances needed in the test") + parser.add_argument('-m', '--suppliers', default='0', + help="Number of replication suppliers needed in the test") + parser.add_argument('-b', '--hubs', default='0', help="Number of replication hubs needed in the test") + parser.add_argument('-c', '--consumers', default='0', + help="Number of replication consumers needed in the test") + parser.add_argument('-o', '--filename', default=None, help="Custom test script file name") + parser.add_argument('-u', '--uuid', action='store_true', + help="Display a test case uuid to used for new test functions in script") + parser.add_argument('-C', '--copyright', default="Red Hat, Inc.", help="Add a copyright section in the beginning of the file") + argcomplete.autocomplete(parser) + args = parser.parse_args() + + if args.uuid: + display_uuid() + + if args.ticket is None and args.suite is None: + print('Missing required ticket number/suite name') + display_usage() + + if args.ticket and args.suite: + print('You must choose either "-t|--ticket" or "-s|--suite", ' + + 'but not both.') + display_usage() + + if int(args.suppliers) == 0: + if int(args.hubs) > 0 or int(args.consumers) > 0: + print('You must use "-m|--suppliers" if you want to have hubs ' + + 'and/or consumers') + display_usage() + + if not args.suppliers.isdigit() or \ + int(args.suppliers) > 99 or \ + int(args.suppliers) < 0: + print('Invalid value for "--suppliers", it must be a number and it can' + + ' not be greater than 99') + display_usage() + + if not args.hubs.isdigit() or int(args.hubs) > 99 or int(args.hubs) < 0: + print('Invalid value for "--hubs", it must be a number and it can ' + + 'not be greater than 99') + display_usage() + + if not args.consumers.isdigit() or \ + int(args.consumers) > 99 or \ + int(args.consumers) < 0: + print('Invalid value for "--consumers", it must be a number and it ' + + 'can not be greater than 99') + display_usage() + + if args.instances: + if not args.instances.isdigit() or \ + int(args.instances) > 99 or \ + int(args.instances) < 0: + print('Invalid value for "--instances", it must be a number ' + + 'greater than 0 and not greater than 99') + display_usage() + if int(args.instances) > 0: + if int(args.suppliers) > 0 or \ + int(args.hubs) > 0 or \ + int(args.consumers) > 0: + print('You can not mix "--instances" with replication.') + display_usage() + + # Extract usable values + ticket = args.ticket + suite = args.suite + + if args.instances == '0' and args.suppliers == '0' and args.hubs == '0' \ + and args.consumers == '0': + instances = 1 + my_topology = [True, 'topology_st', "Standalone Instance"] + else: + instances = int(args.instances) + suppliers = int(args.suppliers) + hubs = int(args.hubs) + consumers = int(args.consumers) + my_topology = get_existing_topologies(instances, suppliers, hubs, consumers) + filename = args.filename + setup_text = my_topology[2] + + # Create/open the new test script file + if not filename: + if ticket: + filename = 'ticket' + ticket + '_test.py' + else: + filename = suite + '_test.py' + + try: + TEST = open(filename, "w") + except IOError: + print("Can\'t open file:", filename) + exit(1) + + # Write the copyright section + if args.copyright: + today = datetime.date.today() + current_year = today.year + + TEST.write('# --- BEGIN COPYRIGHT BLOCK ---\n') + TEST.write('# Copyright (C) {} {}\n'.format(current_year, args.copyright)) + TEST.write('# All rights reserved.\n') + TEST.write('#\n') + TEST.write('# License: GPL (version 3 or any later version).\n') + TEST.write('# See LICENSE for details.\n') + TEST.write('# --- END COPYRIGHT BLOCK ---\n') + TEST.write('#\n') + + # Write the imports + if my_topology[0]: + topology_import = 'from lib389.topologies import {} as topo\n'.format(my_topology[1]) + else: + topology_import = 'from lib389.topologies import create_topology\n' + + TEST.write('import logging\nimport pytest\nimport os\n') + TEST.write('from lib389._constants import *\n') + TEST.write('{}\n'.format(topology_import)) + TEST.write('log = logging.getLogger(__name__)\n\n') + + # Add topology function for non existing (in lib389/topologies.py) topologies only + if not my_topology[0]: + # Write the replication or standalone classes + topologies_str = "" + if suppliers > 0: + topologies_str += " {} suppliers".format(suppliers) + if hubs > 0: + topologies_str += " {} hubs".format(hubs) + if consumers > 0: + topologies_str += " {} consumers".format(consumers) + if instances > 0: + topologies_str += " {} standalone instances".format(instances) + + # Write the 'topology function' + TEST.write('\n@pytest.fixture(scope="module")\n') + TEST.write('def topo(request):\n') + TEST.write(' """Create a topology with{}"""\n\n'.format(topologies_str)) + TEST.write(' topology = create_topology({\n') + if suppliers > 0: + TEST.write(' ReplicaRole.SUPPLIER: {},\n'.format(suppliers)) + if hubs > 0: + TEST.write(' ReplicaRole.HUB: {},\n'.format(hubs)) + if consumers > 0: + TEST.write(' ReplicaRole.CONSUMER: {},\n'.format(consumers)) + if instances > 0: + TEST.write(' ReplicaRole.STANDALONE: {},\n'.format(instances)) + TEST.write(' })\n') + + TEST.write(' # You can write replica test here. Just uncomment the block and choose instances\n') + TEST.write(' # replicas = Replicas(topology.ms["supplier1"])\n') + TEST.write(' # replicas.test(DEFAULT_SUFFIX, topology.cs["consumer1"])\n') + + write_finalizer() + TEST.write(' return topology\n\n') + + tc_id = '0' + while not check_id_uniqueness(tc_id): tc_id = uuid.uuid4() + + # Write the test function + if ticket: + TEST.write('\ndef test_ticket{}(topo):\n'.format(ticket)) + else: + TEST.write('\ndef test_something(topo):\n') + TEST.write(' """Specify a test case purpose or name here\n\n') + TEST.write(' :id: {}\n'.format(tc_id)) + TEST.write(' :setup: ' + setup_text + '\n') + TEST.write(' :steps:\n') + TEST.write(' 1. Fill in test case steps here\n') + TEST.write(' 2. And indent them like this (RST format requirement)\n') + TEST.write(' :expectedresults:\n') + TEST.write(' 1. Fill in the result that is expected\n') + TEST.write(' 2. For each test step\n') + TEST.write(' """\n\n') + TEST.write(' # If you need any test suite initialization,\n') + TEST.write(' # please, write additional fixture for that (including finalizer).\n' + ' # Topology for suites are predefined in lib389/topologies.py.\n\n') + TEST.write(' # If you need host, port or any other data about instance,\n') + TEST.write(' # Please, use the instance object attributes for that (for example, topo.ms["supplier1"].serverid)\n\n\n') + + # Write the main function + TEST.write("if __name__ == '__main__':\n") + TEST.write(' # Run isolated\n') + TEST.write(' # -s for DEBUG mode\n') + TEST.write(' CURRENT_FILE = os.path.realpath(__file__)\n') + TEST.write(' pytest.main(["-s", CURRENT_FILE])\n\n') + + # Done, close things up + TEST.close() + print('Created: ' + filename) diff --git a/dirsrvtests/pytest.ini b/dirsrvtests/pytest.ini new file mode 100644 index 0000000..48984e5 --- /dev/null +++ b/dirsrvtests/pytest.ini @@ -0,0 +1,6 @@ +[pytest] +markers = + tier0: mark a test as part of tier0 + tier1: mark a test as part of tier1 + tier2: mark a test as part of tier2 + tier3: mark a test as part of tier3 diff --git a/dirsrvtests/report.py b/dirsrvtests/report.py new file mode 100644 index 0000000..0032870 --- /dev/null +++ b/dirsrvtests/report.py @@ -0,0 +1,117 @@ +# --- BEGIN COPYRIGHT BLOCK --- +# Copyright (C) 2021 Red Hat, Inc. +# All rights reserved. +# +# License: GPL (version 3 or any later version). +# See LICENSE for details. +# --- END COPYRIGHT BLOCK --- +# + +""" Helper module to generate pytest report""" + +import glob +import ldap +import sys +import os +import subprocess +from lib389 import DirSrv +from lib389.utils import Paths + +p = Paths() + +# Concat filtered lines from a file as a string +# Keeping only relevant data +def logErrors(path): + keywords = ( "CRIT", "EMERG", "ERR" ) + cleanstop = '- INFO - main - slapd stopped.' + res="" + with open(path) as file: + for line in file: + if any(ele in line for ele in keywords): + res += line + # Check if last line is a clean stop. + if cleanstop in line: + res += line + return res + +# Log a list of items +def loglist(list): + text = "" + for item in list: + text += f" {item}\n" + return text + +# Log cores file +def logcorefiles(): + cmd = [ "/usr/bin/coredumpctl", "info", "ns-slapd" ] + coreinfo = subprocess.run(cmd, capture_output=True, shell=False, check=False, text=True) + text = "" + text += coreinfo.stdout + text += "\n\ncoredumpctl STDERR\n" + text += coreinfo.stderr + text += "\n" + return text + +# Log ASAN files +def logasanfiles(): + res = [] + for f in glob.glob(f'{p.run_dir}/ns-slapd-*san*'): + with open(f) as asan_report: + res.append((os.path.basename(f), asan_report.read())) + return res + +# Log dbscan -L output (This may help to determine mdb map size) +def logDbscan(inst): + dblib = inst.get_db_lib() + dbhome = Paths(inst.getServerId()).db_home_dir + cmd = [ "dbscan", "-D", dblib, "-L", dbhome ] + dbscan = subprocess.run(cmd, capture_output=True, shell=False, check=False, text=True) + text = "" + text += dbscan.stdout.replace(f'{dbhome}/', '') + if dbscan.stderr: + text += "\n\ndbscan STDERR\n" + text += dbscan.stderr + text += "\n" + return text + + +def getReport(): + # Capture data about stoped instances + # Return a Report (i.e: list of ( sectionName, text ) tuple ) + # Lets determine the list of instances + report = [] + def addSection(name, text): + report.append((name, text)) + instancesOK=[] + instancesKO=[] + for instdir in DirSrv().list(all=True): + inst = DirSrv() + inst.allocate(instdir) + if inst.status(): + instancesOK.append(inst) + else: + instancesKO.append(inst) + text="" + # Lets generate the report + addSection("Running instances", loglist([i.getServerId() for i in instancesOK])) + addSection("Stopped instances", loglist([i.getServerId() for i in instancesKO])) + + # Get core file informations + addSection("Core files", logcorefiles()) + + # Get asan file informations + report.extend(logasanfiles()) + + # Get error log informations on stopped servers + # By default we only log an extract of error log: + # Critical, Emergency and standard errors + # and the final "server stopped" info line (that denotes a clean stop) + for inst in instancesKO: + # Log extract of error log + path = inst.ds_paths.error_log.format(instance_name=inst.getServerId()) + addSection(f"Extract of instance {inst.getServerId()} error log", logErrors(path)) + # And dbscan -L output + addSection(f"Database info for instance {inst.getServerId()}", logDbscan(inst)) + + return report + diff --git a/dirsrvtests/requirements.txt b/dirsrvtests/requirements.txt new file mode 100644 index 0000000..3f407ef --- /dev/null +++ b/dirsrvtests/requirements.txt @@ -0,0 +1,3 @@ +pytest +pytest-libfaketime +slugify diff --git a/dirsrvtests/testimony.yaml b/dirsrvtests/testimony.yaml new file mode 100644 index 0000000..38cc061 --- /dev/null +++ b/dirsrvtests/testimony.yaml @@ -0,0 +1,22 @@ +--- +id: + required: True +setup: {} +steps: {} +expectedresults: {} +requirement: + required: False +feature: + required: False +customerscenario: + required: False +parametrized: + required: False +testype: + required: False +subtype1: + required: False +subtype2: + required: False +subsystemteam: + required: False diff --git a/dirsrvtests/tests/__init__.py b/dirsrvtests/tests/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/dirsrvtests/tests/data/README b/dirsrvtests/tests/data/README new file mode 100644 index 0000000..4261f92 --- /dev/null +++ b/dirsrvtests/tests/data/README @@ -0,0 +1,11 @@ +DATA DIRECTORY README + +This directory is used for storing LDIF files used by the dirsrvtests scripts. +This directory can be retrieved via getDir() from the DirSrv class. + +Example: + + data_dir_path = topology.standalone.getDir(__file__, DATA_DIR) + + ldif_file = data_dir_path + "ticket44444/1000entries.ldif" + diff --git a/dirsrvtests/tests/data/__init__.py b/dirsrvtests/tests/data/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/dirsrvtests/tests/data/basic/__init__.py b/dirsrvtests/tests/data/basic/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/dirsrvtests/tests/data/basic/dse.ldif.broken b/dirsrvtests/tests/data/basic/dse.ldif.broken new file mode 100644 index 0000000..489b443 --- /dev/null +++ b/dirsrvtests/tests/data/basic/dse.ldif.broken @@ -0,0 +1,95 @@ +dn: +objectClass: top +aci: (targetattr != "aci")(version 3.0; aci "rootdse anon read access"; allow( + read,search,compare) userdn="ldap:///anyone";) +creatorsName: cn=server,cn=plugins,cn=config +modifiersName: cn=server,cn=plugins,cn=config +createTimestamp: 20150204165610Z +modifyTimestamp: 20150204165610Z + +dn: cn=config +cn: config +objectClass: top +objectClass: extensibleObject +objectClass: nsslapdConfig +nsslapd-schemadir: /etc/dirsrv/slapd-localhost/schema +nsslapd-lockdir: /var/lock/dirsrv/slapd-localhost +nsslapd-tmpdir: /tmp +nsslapd-certdir: /etc/dirsrv/slapd-localhost +nsslapd-ldifdir: /var/lib/dirsrv/slapd-localhost/ldif +nsslapd-bakdir: /var/lib/dirsrv/slapd-localhost/bak +nsslapd-rundir: /var/run/dirsrv +nsslapd-instancedir: /usr/lib64/dirsrv/slapd-localhost +nsslapd-accesslog-logging-enabled: on +nsslapd-accesslog-maxlogsperdir: 10 +nsslapd-accesslog-mode: 600 +nsslapd-accesslog-maxlogsize: 100 +nsslapd-accesslog-logrotationtime: 1 +nsslapd-accesslog-logrotationtimeunit: day +nsslapd-accesslog-logrotationsync-enabled: off +nsslapd-accesslog-logrotationsynchour: 0 +nsslapd-accesslog-logrotationsyncmin: 0 +nsslapd-accesslog: /var/log/dirsrv/slapd-localhost/access +nsslapd-enquote-sup-oc: off +nsslapd-localhost: localhost.localdomain +nsslapd-schemacheck: on +nsslapd-syntaxcheck: on +nsslapd-dn-validate-strict: off +nsslapd-rewrite-rfc1274: off +nsslapd-return-exact-case: on +nsslapd-ssl-check-hostname: on +nsslapd-validate-cert: warn +nsslapd-allow-unauthenticated-binds: off +nsslapd-require-secure-binds: off +nsslapd-allow-anonymous####-access: on +nsslapd-localssf: 71 +nsslapd-minssf: 0 +nsslapd-port: 389 +nsslapd-localuser: nobody +nsslapd-errorlog-logging-enabled: on +nsslapd-errorlog-mode: 600 +nsslapd-errorlog-maxlogsperdir: 2 +nsslapd-errorlog-maxlogsize: 100 +nsslapd-errorlog-logrotationtime: 1 +nsslapd-errorlog-logrotationtimeunit: week +nsslapd-errorlog-logrotationsync-enabled: off +nsslapd-errorlog-logrotationsynchour: 0 +nsslapd-errorlog-logrotationsyncmin: 0 +nsslapd-errorlog: /var/log/dirsrv/slapd-localhost/errors +nsslapd-auditlog: /var/log/dirsrv/slapd-localhost/audit +nsslapd-auditlog-mode: 600 +nsslapd-auditlog-maxlogsize: 100 +nsslapd-auditlog-logrotationtime: 1 +nsslapd-auditlog-logrotationtimeunit: day +nsslapd-rootdn: cn=dm +nsslapd-maxdescriptors: 1024 +nsslapd-max-filter-nest-level: 40 +nsslapd-ndn-cache-enabled: on +nsslapd-sasl-mapping-fallback: off +nsslapd-dynamic-plugins: off +nsslapd-allow-hashed-passwords: off +nsslapd-ldapifilepath: /var/run/slapd-localhost.socket +nsslapd-ldapilisten: off +nsslapd-ldapiautobind: off +nsslapd-ldapimaprootdn: cn=dm +nsslapd-ldapimaptoentries: off +nsslapd-ldapiuidnumbertype: uidNumber +nsslapd-ldapigidnumbertype: gidNumber +nsslapd-ldapientrysearchbase: dc=example,dc=com +nsslapd-defaultnamingcontext: dc=example,dc=com +aci: (targetattr="*")(version 3.0; acl "Configuration Administrators Group"; a + llow (all) groupdn="ldap:///cn=Configuration Administrators,ou=Groups,ou=Topo + logyManagement,o=NetscapeRoot";) +aci: (targetattr="*")(version 3.0; acl "Configuration Administrator"; allow (a + ll) userdn="ldap:///uid=admin,ou=Administrators,ou=TopologyManagement,o=Netsc + apeRoot";) +aci: (targetattr = "*")(version 3.0; acl "SIE Group"; allow (all) groupdn = "l + dap:///cn=slapd-localhost,cn=389 Directory Server,cn=Server Group,cn=localhos + t.localdomain,ou=example.com,o=NetscapeRoot";) +modifiersName: cn=dm +modifyTimestamp: 20150205195242Z +nsslapd-auditlog-logging-enabled: on +nsslapd-auditlog-logging-hide-unhashed-pw: off +nsslapd-rootpw: {SSHA}AQH9bTYZW4kfkfyHg1k+lG88H2dFOuwakzFEpw== +numSubordinates: 10 + diff --git a/dirsrvtests/tests/data/entryuuid/localhost-userRoot-2020_03_30_13_14_47.ldif b/dirsrvtests/tests/data/entryuuid/localhost-userRoot-2020_03_30_13_14_47.ldif new file mode 100644 index 0000000..b64090a --- /dev/null +++ b/dirsrvtests/tests/data/entryuuid/localhost-userRoot-2020_03_30_13_14_47.ldif @@ -0,0 +1,233 @@ +version: 1 + +# entry-id: 1 +dn: dc=example,dc=com +objectClass: top +objectClass: domain +dc: example +description: dc=example,dc=com +creatorsName: cn=Directory Manager +modifiersName: cn=Directory Manager +createTimestamp: 20200325015542Z +modifyTimestamp: 20200325015542Z +nsUniqueId: a2b33229-6e3b11ea-8de0c78c-83e27eda +aci: (targetattr="dc || description || objectClass")(targetfilter="(objectClas + s=domain)")(version 3.0; acl "Enable anyone domain read"; allow (read, search + , compare)(userdn="ldap:///anyone");) +aci: (targetattr="ou || objectClass")(targetfilter="(objectClass=organizationa + lUnit)")(version 3.0; acl "Enable anyone ou read"; allow (read, search, compa + re)(userdn="ldap:///anyone");) + +# entry-id: 2 +dn: cn=389_ds_system,dc=example,dc=com +objectClass: top +objectClass: nscontainer +objectClass: ldapsubentry +cn: 389_ds_system +creatorsName: cn=Directory Manager +modifiersName: cn=Directory Manager +createTimestamp: 20200325015542Z +modifyTimestamp: 20200325015542Z +nsUniqueId: a2b3322a-6e3b11ea-8de0c78c-83e27eda + +# entry-id: 3 +dn: ou=groups,dc=example,dc=com +objectClass: top +objectClass: organizationalunit +ou: groups +aci: (targetattr="cn || member || gidNumber || nsUniqueId || description || ob + jectClass")(targetfilter="(objectClass=groupOfNames)")(version 3.0; acl "Enab + le anyone group read"; allow (read, search, compare)(userdn="ldap:///anyone") + ;) +aci: (targetattr="member")(targetfilter="(objectClass=groupOfNames)")(version + 3.0; acl "Enable group_modify to alter members"; allow (write)(groupdn="ldap: + ///cn=group_modify,ou=permissions,dc=example,dc=com");) +aci: (targetattr="cn || member || gidNumber || description || objectClass")(ta + rgetfilter="(objectClass=groupOfNames)")(version 3.0; acl "Enable group_admin + to manage groups"; allow (write, add, delete)(groupdn="ldap:///cn=group_admi + n,ou=permissions,dc=example,dc=com");) +creatorsName: cn=Directory Manager +modifiersName: cn=Directory Manager +createTimestamp: 20200325015543Z +modifyTimestamp: 20200325015543Z +nsUniqueId: a2b3322b-6e3b11ea-8de0c78c-83e27eda + +# entry-id: 4 +dn: ou=people,dc=example,dc=com +objectClass: top +objectClass: organizationalunit +ou: people +aci: (targetattr="objectClass || description || nsUniqueId || uid || displayNa + me || loginShell || uidNumber || gidNumber || gecos || homeDirectory || cn || + memberOf || mail || nsSshPublicKey || nsAccountLock || userCertificate")(tar + getfilter="(objectClass=posixaccount)")(version 3.0; acl "Enable anyone user + read"; allow (read, search, compare)(userdn="ldap:///anyone");) +aci: (targetattr="displayName || legalName || userPassword || nsSshPublicKey") + (version 3.0; acl "Enable self partial modify"; allow (write)(userdn="ldap:// + /self");) +aci: (targetattr="legalName || telephoneNumber || mobile || sn")(targetfilter= + "(|(objectClass=nsPerson)(objectClass=inetOrgPerson))")(version 3.0; acl "Ena + ble self legalname read"; allow (read, search, compare)(userdn="ldap:///self" + );) +aci: (targetattr="legalName || telephoneNumber")(targetfilter="(objectClass=ns + Person)")(version 3.0; acl "Enable user legalname read"; allow (read, search, + compare)(groupdn="ldap:///cn=user_private_read,ou=permissions,dc=example,dc= + com");) +aci: (targetattr="uid || description || displayName || loginShell || uidNumber + || gidNumber || gecos || homeDirectory || cn || memberOf || mail || legalNam + e || telephoneNumber || mobile")(targetfilter="(&(objectClass=nsPerson)(objec + tClass=nsAccount))")(version 3.0; acl "Enable user admin create"; allow (writ + e, add, delete, read)(groupdn="ldap:///cn=user_admin,ou=permissions,dc=exampl + e,dc=com");) +aci: (targetattr="uid || description || displayName || loginShell || uidNumber + || gidNumber || gecos || homeDirectory || cn || memberOf || mail || legalNam + e || telephoneNumber || mobile")(targetfilter="(&(objectClass=nsPerson)(objec + tClass=nsAccount))")(version 3.0; acl "Enable user modify to change users"; a + llow (write, read)(groupdn="ldap:///cn=user_modify,ou=permissions,dc=example, + dc=com");) +aci: (targetattr="userPassword || nsAccountLock || userCertificate || nsSshPub + licKey")(targetfilter="(objectClass=nsAccount)")(version 3.0; acl "Enable use + r password reset"; allow (write, read)(groupdn="ldap:///cn=user_passwd_reset, + ou=permissions,dc=example,dc=com");) +creatorsName: cn=Directory Manager +modifiersName: cn=Directory Manager +createTimestamp: 20200325015543Z +modifyTimestamp: 20200325015543Z +nsUniqueId: a2b3322c-6e3b11ea-8de0c78c-83e27eda + +# entry-id: 5 +dn: ou=permissions,dc=example,dc=com +objectClass: top +objectClass: organizationalunit +ou: permissions +creatorsName: cn=Directory Manager +modifiersName: cn=Directory Manager +createTimestamp: 20200325015543Z +modifyTimestamp: 20200325015543Z +nsUniqueId: a2b3322d-6e3b11ea-8de0c78c-83e27eda + +# entry-id: 6 +dn: ou=services,dc=example,dc=com +objectClass: top +objectClass: organizationalunit +ou: services +aci: (targetattr="objectClass || description || nsUniqueId || cn || memberOf | + | nsAccountLock ")(targetfilter="(objectClass=netscapeServer)")(version 3.0; + acl "Enable anyone service account read"; allow (read, search, compare)(userd + n="ldap:///anyone");) +creatorsName: cn=Directory Manager +modifiersName: cn=Directory Manager +createTimestamp: 20200325015544Z +modifyTimestamp: 20200325015544Z +nsUniqueId: a2b3322e-6e3b11ea-8de0c78c-83e27eda + +# entry-id: 7 +dn: uid=demo_user,ou=people,dc=example,dc=com +objectClass: top +objectClass: nsPerson +objectClass: nsAccount +objectClass: nsOrgPerson +objectClass: posixAccount +uid: demo_user +cn: Demo User +displayName: Demo User +legalName: Demo User Name +uidNumber: 99998 +gidNumber: 99998 +homeDirectory: /var/empty +loginShell: /bin/false +nsAccountLock: true +creatorsName: cn=Directory Manager +modifiersName: cn=Directory Manager +createTimestamp: 20200325015544Z +modifyTimestamp: 20200325061615Z +nsUniqueId: a2b3322f-6e3b11ea-8de0c78c-83e27eda +entryUUID: 973e1bbf-ba9c-45d4-b01b-ff7371fd9008 + +# entry-id: 8 +dn: cn=demo_group,ou=groups,dc=example,dc=com +objectClass: top +objectClass: groupOfNames +objectClass: posixGroup +objectClass: nsMemberOf +cn: demo_group +gidNumber: 99999 +creatorsName: cn=Directory Manager +modifiersName: cn=Directory Manager +createTimestamp: 20200325015544Z +modifyTimestamp: 20200325015544Z +nsUniqueId: a2b33230-6e3b11ea-8de0c78c-83e27eda +entryUUID: f6df8fe9-6b30-46aa-aa13-f0bf755371e8 + +# entry-id: 9 +dn: cn=group_admin,ou=permissions,dc=example,dc=com +objectClass: top +objectClass: groupOfNames +objectClass: nsMemberOf +cn: group_admin +creatorsName: cn=Directory Manager +modifiersName: cn=Directory Manager +createTimestamp: 20200325015545Z +modifyTimestamp: 20200325015545Z +nsUniqueId: a2b33231-6e3b11ea-8de0c78c-83e27eda + +# entry-id: 10 +dn: cn=group_modify,ou=permissions,dc=example,dc=com +objectClass: top +objectClass: groupOfNames +objectClass: nsMemberOf +cn: group_modify +creatorsName: cn=Directory Manager +modifiersName: cn=Directory Manager +createTimestamp: 20200325015545Z +modifyTimestamp: 20200325015545Z +nsUniqueId: a2b33232-6e3b11ea-8de0c78c-83e27eda + +# entry-id: 11 +dn: cn=user_admin,ou=permissions,dc=example,dc=com +objectClass: top +objectClass: groupOfNames +objectClass: nsMemberOf +cn: user_admin +creatorsName: cn=Directory Manager +modifiersName: cn=Directory Manager +createTimestamp: 20200325015545Z +modifyTimestamp: 20200325015545Z +nsUniqueId: a2b33233-6e3b11ea-8de0c78c-83e27eda + +# entry-id: 12 +dn: cn=user_modify,ou=permissions,dc=example,dc=com +objectClass: top +objectClass: groupOfNames +objectClass: nsMemberOf +cn: user_modify +creatorsName: cn=Directory Manager +modifiersName: cn=Directory Manager +createTimestamp: 20200325015546Z +modifyTimestamp: 20200325015546Z +nsUniqueId: a2b33234-6e3b11ea-8de0c78c-83e27eda + +# entry-id: 13 +dn: cn=user_passwd_reset,ou=permissions,dc=example,dc=com +objectClass: top +objectClass: groupOfNames +objectClass: nsMemberOf +cn: user_passwd_reset +creatorsName: cn=Directory Manager +modifiersName: cn=Directory Manager +createTimestamp: 20200325015546Z +modifyTimestamp: 20200325015546Z +nsUniqueId: a2b33235-6e3b11ea-8de0c78c-83e27eda + +# entry-id: 14 +dn: cn=user_private_read,ou=permissions,dc=example,dc=com +objectClass: top +objectClass: groupOfNames +objectClass: nsMemberOf +cn: user_private_read +creatorsName: cn=Directory Manager +modifiersName: cn=Directory Manager +createTimestamp: 20200325015547Z +modifyTimestamp: 20200325015547Z +nsUniqueId: a2b33236-6e3b11ea-8de0c78c-83e27eda + diff --git a/dirsrvtests/tests/data/entryuuid/localhost-userRoot-invalid.ldif b/dirsrvtests/tests/data/entryuuid/localhost-userRoot-invalid.ldif new file mode 100644 index 0000000..9703bab --- /dev/null +++ b/dirsrvtests/tests/data/entryuuid/localhost-userRoot-invalid.ldif @@ -0,0 +1,233 @@ +version: 1 + +# entry-id: 1 +dn: dc=example,dc=com +objectClass: top +objectClass: domain +dc: example +description: dc=example,dc=com +creatorsName: cn=Directory Manager +modifiersName: cn=Directory Manager +createTimestamp: 20200325015542Z +modifyTimestamp: 20200325015542Z +nsUniqueId: a2b33229-6e3b11ea-8de0c78c-83e27eda +aci: (targetattr="dc || description || objectClass")(targetfilter="(objectClas + s=domain)")(version 3.0; acl "Enable anyone domain read"; allow (read, search + , compare)(userdn="ldap:///anyone");) +aci: (targetattr="ou || objectClass")(targetfilter="(objectClass=organizationa + lUnit)")(version 3.0; acl "Enable anyone ou read"; allow (read, search, compa + re)(userdn="ldap:///anyone");) + +# entry-id: 2 +dn: cn=389_ds_system,dc=example,dc=com +objectClass: top +objectClass: nscontainer +objectClass: ldapsubentry +cn: 389_ds_system +creatorsName: cn=Directory Manager +modifiersName: cn=Directory Manager +createTimestamp: 20200325015542Z +modifyTimestamp: 20200325015542Z +nsUniqueId: a2b3322a-6e3b11ea-8de0c78c-83e27eda + +# entry-id: 3 +dn: ou=groups,dc=example,dc=com +objectClass: top +objectClass: organizationalunit +ou: groups +aci: (targetattr="cn || member || gidNumber || nsUniqueId || description || ob + jectClass")(targetfilter="(objectClass=groupOfNames)")(version 3.0; acl "Enab + le anyone group read"; allow (read, search, compare)(userdn="ldap:///anyone") + ;) +aci: (targetattr="member")(targetfilter="(objectClass=groupOfNames)")(version + 3.0; acl "Enable group_modify to alter members"; allow (write)(groupdn="ldap: + ///cn=group_modify,ou=permissions,dc=example,dc=com");) +aci: (targetattr="cn || member || gidNumber || description || objectClass")(ta + rgetfilter="(objectClass=groupOfNames)")(version 3.0; acl "Enable group_admin + to manage groups"; allow (write, add, delete)(groupdn="ldap:///cn=group_admi + n,ou=permissions,dc=example,dc=com");) +creatorsName: cn=Directory Manager +modifiersName: cn=Directory Manager +createTimestamp: 20200325015543Z +modifyTimestamp: 20200325015543Z +nsUniqueId: a2b3322b-6e3b11ea-8de0c78c-83e27eda + +# entry-id: 4 +dn: ou=people,dc=example,dc=com +objectClass: top +objectClass: organizationalunit +ou: people +aci: (targetattr="objectClass || description || nsUniqueId || uid || displayNa + me || loginShell || uidNumber || gidNumber || gecos || homeDirectory || cn || + memberOf || mail || nsSshPublicKey || nsAccountLock || userCertificate")(tar + getfilter="(objectClass=posixaccount)")(version 3.0; acl "Enable anyone user + read"; allow (read, search, compare)(userdn="ldap:///anyone");) +aci: (targetattr="displayName || legalName || userPassword || nsSshPublicKey") + (version 3.0; acl "Enable self partial modify"; allow (write)(userdn="ldap:// + /self");) +aci: (targetattr="legalName || telephoneNumber || mobile || sn")(targetfilter= + "(|(objectClass=nsPerson)(objectClass=inetOrgPerson))")(version 3.0; acl "Ena + ble self legalname read"; allow (read, search, compare)(userdn="ldap:///self" + );) +aci: (targetattr="legalName || telephoneNumber")(targetfilter="(objectClass=ns + Person)")(version 3.0; acl "Enable user legalname read"; allow (read, search, + compare)(groupdn="ldap:///cn=user_private_read,ou=permissions,dc=example,dc= + com");) +aci: (targetattr="uid || description || displayName || loginShell || uidNumber + || gidNumber || gecos || homeDirectory || cn || memberOf || mail || legalNam + e || telephoneNumber || mobile")(targetfilter="(&(objectClass=nsPerson)(objec + tClass=nsAccount))")(version 3.0; acl "Enable user admin create"; allow (writ + e, add, delete, read)(groupdn="ldap:///cn=user_admin,ou=permissions,dc=exampl + e,dc=com");) +aci: (targetattr="uid || description || displayName || loginShell || uidNumber + || gidNumber || gecos || homeDirectory || cn || memberOf || mail || legalNam + e || telephoneNumber || mobile")(targetfilter="(&(objectClass=nsPerson)(objec + tClass=nsAccount))")(version 3.0; acl "Enable user modify to change users"; a + llow (write, read)(groupdn="ldap:///cn=user_modify,ou=permissions,dc=example, + dc=com");) +aci: (targetattr="userPassword || nsAccountLock || userCertificate || nsSshPub + licKey")(targetfilter="(objectClass=nsAccount)")(version 3.0; acl "Enable use + r password reset"; allow (write, read)(groupdn="ldap:///cn=user_passwd_reset, + ou=permissions,dc=example,dc=com");) +creatorsName: cn=Directory Manager +modifiersName: cn=Directory Manager +createTimestamp: 20200325015543Z +modifyTimestamp: 20200325015543Z +nsUniqueId: a2b3322c-6e3b11ea-8de0c78c-83e27eda + +# entry-id: 5 +dn: ou=permissions,dc=example,dc=com +objectClass: top +objectClass: organizationalunit +ou: permissions +creatorsName: cn=Directory Manager +modifiersName: cn=Directory Manager +createTimestamp: 20200325015543Z +modifyTimestamp: 20200325015543Z +nsUniqueId: a2b3322d-6e3b11ea-8de0c78c-83e27eda + +# entry-id: 6 +dn: ou=services,dc=example,dc=com +objectClass: top +objectClass: organizationalunit +ou: services +aci: (targetattr="objectClass || description || nsUniqueId || cn || memberOf | + | nsAccountLock ")(targetfilter="(objectClass=netscapeServer)")(version 3.0; + acl "Enable anyone service account read"; allow (read, search, compare)(userd + n="ldap:///anyone");) +creatorsName: cn=Directory Manager +modifiersName: cn=Directory Manager +createTimestamp: 20200325015544Z +modifyTimestamp: 20200325015544Z +nsUniqueId: a2b3322e-6e3b11ea-8de0c78c-83e27eda + +# entry-id: 7 +dn: uid=demo_user,ou=people,dc=example,dc=com +objectClass: top +objectClass: nsPerson +objectClass: nsAccount +objectClass: nsOrgPerson +objectClass: posixAccount +uid: demo_user +cn: Demo User +displayName: Demo User +legalName: Demo User Name +uidNumber: 99998 +gidNumber: 99998 +homeDirectory: /var/empty +loginShell: /bin/false +nsAccountLock: true +creatorsName: cn=Directory Manager +modifiersName: cn=Directory Manager +createTimestamp: 20200325015544Z +modifyTimestamp: 20200325061615Z +nsUniqueId: a2b3322f-6e3b11ea-8de0c78c-83e27eda +entryUUID: INVALID_UUID + +# entry-id: 8 +dn: cn=demo_group,ou=groups,dc=example,dc=com +objectClass: top +objectClass: groupOfNames +objectClass: posixGroup +objectClass: nsMemberOf +cn: demo_group +gidNumber: 99999 +creatorsName: cn=Directory Manager +modifiersName: cn=Directory Manager +createTimestamp: 20200325015544Z +modifyTimestamp: 20200325015544Z +nsUniqueId: a2b33230-6e3b11ea-8de0c78c-83e27eda +entryUUID: f6df8fe9-6b30-46aa-aa13-f0bf755371e8 + +# entry-id: 9 +dn: cn=group_admin,ou=permissions,dc=example,dc=com +objectClass: top +objectClass: groupOfNames +objectClass: nsMemberOf +cn: group_admin +creatorsName: cn=Directory Manager +modifiersName: cn=Directory Manager +createTimestamp: 20200325015545Z +modifyTimestamp: 20200325015545Z +nsUniqueId: a2b33231-6e3b11ea-8de0c78c-83e27eda + +# entry-id: 10 +dn: cn=group_modify,ou=permissions,dc=example,dc=com +objectClass: top +objectClass: groupOfNames +objectClass: nsMemberOf +cn: group_modify +creatorsName: cn=Directory Manager +modifiersName: cn=Directory Manager +createTimestamp: 20200325015545Z +modifyTimestamp: 20200325015545Z +nsUniqueId: a2b33232-6e3b11ea-8de0c78c-83e27eda + +# entry-id: 11 +dn: cn=user_admin,ou=permissions,dc=example,dc=com +objectClass: top +objectClass: groupOfNames +objectClass: nsMemberOf +cn: user_admin +creatorsName: cn=Directory Manager +modifiersName: cn=Directory Manager +createTimestamp: 20200325015545Z +modifyTimestamp: 20200325015545Z +nsUniqueId: a2b33233-6e3b11ea-8de0c78c-83e27eda + +# entry-id: 12 +dn: cn=user_modify,ou=permissions,dc=example,dc=com +objectClass: top +objectClass: groupOfNames +objectClass: nsMemberOf +cn: user_modify +creatorsName: cn=Directory Manager +modifiersName: cn=Directory Manager +createTimestamp: 20200325015546Z +modifyTimestamp: 20200325015546Z +nsUniqueId: a2b33234-6e3b11ea-8de0c78c-83e27eda + +# entry-id: 13 +dn: cn=user_passwd_reset,ou=permissions,dc=example,dc=com +objectClass: top +objectClass: groupOfNames +objectClass: nsMemberOf +cn: user_passwd_reset +creatorsName: cn=Directory Manager +modifiersName: cn=Directory Manager +createTimestamp: 20200325015546Z +modifyTimestamp: 20200325015546Z +nsUniqueId: a2b33235-6e3b11ea-8de0c78c-83e27eda + +# entry-id: 14 +dn: cn=user_private_read,ou=permissions,dc=example,dc=com +objectClass: top +objectClass: groupOfNames +objectClass: nsMemberOf +cn: user_private_read +creatorsName: cn=Directory Manager +modifiersName: cn=Directory Manager +createTimestamp: 20200325015547Z +modifyTimestamp: 20200325015547Z +nsUniqueId: a2b33236-6e3b11ea-8de0c78c-83e27eda + diff --git a/dirsrvtests/tests/data/longduration/db_protect_long_test_reference_1.4.2.12.json b/dirsrvtests/tests/data/longduration/db_protect_long_test_reference_1.4.2.12.json new file mode 100644 index 0000000..45d0eb2 --- /dev/null +++ b/dirsrvtests/tests/data/longduration/db_protect_long_test_reference_1.4.2.12.json @@ -0,0 +1,405 @@ +{"Instance OFFLINE OFFLINE _job_nothing + OFFLINE _job_nothing": "OK + OK", + "Instance OFFLINE OFFLINE _job_nothing + OFFLINE _job_db2ldifSuffix1": "OK + OK", + "Instance OFFLINE OFFLINE _job_nothing + OFFLINE _job_db2ldifSuffix2": "OK + OK", + "Instance OFFLINE OFFLINE _job_nothing + OFFLINE _job_ldif2dbSuffix1": "OK + OK", + "Instance OFFLINE OFFLINE _job_nothing + OFFLINE _job_ldif2dbSuffix2": "OK + OK", + "Instance OFFLINE OFFLINE _job_nothing + OFFLINE _job_db2indexSuffix1": "OK + OK", + "Instance OFFLINE OFFLINE _job_nothing + OFFLINE _job_db2indexSuffix2": "OK + OK", + "Instance OFFLINE OFFLINE _job_nothing + OFFLINE _job_db2archive": "OK + OK", + "Instance OFFLINE OFFLINE _job_nothing + OFFLINE _job_archive2db": "OK + OK", + "Instance OFFLINE OFFLINE _job_db2ldifSuffix1 + OFFLINE _job_nothing": "OK + OK", + "Instance OFFLINE OFFLINE _job_db2ldifSuffix1 + OFFLINE _job_db2ldifSuffix1": "OK + OK", + "Instance OFFLINE OFFLINE _job_db2ldifSuffix1 + OFFLINE _job_db2ldifSuffix2": "OK + OK", + "Instance OFFLINE OFFLINE _job_db2ldifSuffix1 + OFFLINE _job_ldif2dbSuffix1": "OK + KO", + "Instance OFFLINE OFFLINE _job_db2ldifSuffix1 + OFFLINE _job_ldif2dbSuffix2": "OK + KO", + "Instance OFFLINE OFFLINE _job_db2ldifSuffix1 + OFFLINE _job_db2indexSuffix1": "KO + OK", + "Instance OFFLINE OFFLINE _job_db2ldifSuffix1 + OFFLINE _job_db2indexSuffix2": "KO + OK", + "Instance OFFLINE OFFLINE _job_db2ldifSuffix1 + OFFLINE _job_db2archive": "OK + OK", + "Instance OFFLINE OFFLINE _job_db2ldifSuffix1 + OFFLINE _job_archive2db": "OK + KO", + "Instance OFFLINE OFFLINE _job_db2ldifSuffix2 + OFFLINE _job_nothing": "OK + OK", + "Instance OFFLINE OFFLINE _job_db2ldifSuffix2 + OFFLINE _job_db2ldifSuffix1": "OK + OK", + "Instance OFFLINE OFFLINE _job_db2ldifSuffix2 + OFFLINE _job_db2ldifSuffix2": "OK + OK", + "Instance OFFLINE OFFLINE _job_db2ldifSuffix2 + OFFLINE _job_ldif2dbSuffix1": "OK + KO", + "Instance OFFLINE OFFLINE _job_db2ldifSuffix2 + OFFLINE _job_ldif2dbSuffix2": "OK + KO", + "Instance OFFLINE OFFLINE _job_db2ldifSuffix2 + OFFLINE _job_db2indexSuffix1": "KO + OK", + "Instance OFFLINE OFFLINE _job_db2ldifSuffix2 + OFFLINE _job_db2indexSuffix2": "KO + OK", + "Instance OFFLINE OFFLINE _job_db2ldifSuffix2 + OFFLINE _job_db2archive": "OK + OK", + "Instance OFFLINE OFFLINE _job_db2ldifSuffix2 + OFFLINE _job_archive2db": "OK + KO", + "Instance OFFLINE OFFLINE _job_ldif2dbSuffix1 + OFFLINE _job_nothing": "OK + OK", + "Instance OFFLINE OFFLINE _job_ldif2dbSuffix1 + OFFLINE _job_db2ldifSuffix1": "OK + KO", + "Instance OFFLINE OFFLINE _job_ldif2dbSuffix1 + OFFLINE _job_db2ldifSuffix2": "OK + KO", + "Instance OFFLINE OFFLINE _job_ldif2dbSuffix1 + OFFLINE _job_ldif2dbSuffix1": "OK + KO", + "Instance OFFLINE OFFLINE _job_ldif2dbSuffix1 + OFFLINE _job_ldif2dbSuffix2": "OK + KO", + "Instance OFFLINE OFFLINE _job_ldif2dbSuffix1 + OFFLINE _job_db2indexSuffix1": "KO + OK", + "Instance OFFLINE OFFLINE _job_ldif2dbSuffix1 + OFFLINE _job_db2indexSuffix2": "KO + OK", + "Instance OFFLINE OFFLINE _job_ldif2dbSuffix1 + OFFLINE _job_db2archive": "OK + KO", + "Instance OFFLINE OFFLINE _job_ldif2dbSuffix1 + OFFLINE _job_archive2db": "OK + KO", + "Instance OFFLINE OFFLINE _job_ldif2dbSuffix2 + OFFLINE _job_nothing": "OK + OK", + "Instance OFFLINE OFFLINE _job_ldif2dbSuffix2 + OFFLINE _job_db2ldifSuffix1": "OK + KO", + "Instance OFFLINE OFFLINE _job_ldif2dbSuffix2 + OFFLINE _job_db2ldifSuffix2": "OK + KO", + "Instance OFFLINE OFFLINE _job_ldif2dbSuffix2 + OFFLINE _job_ldif2dbSuffix1": "OK + KO", + "Instance OFFLINE OFFLINE _job_ldif2dbSuffix2 + OFFLINE _job_ldif2dbSuffix2": "OK + KO", + "Instance OFFLINE OFFLINE _job_ldif2dbSuffix2 + OFFLINE _job_db2indexSuffix1": "KO + OK", + "Instance OFFLINE OFFLINE _job_ldif2dbSuffix2 + OFFLINE _job_db2indexSuffix2": "KO + OK", + "Instance OFFLINE OFFLINE _job_ldif2dbSuffix2 + OFFLINE _job_db2archive": "OK + KO", + "Instance OFFLINE OFFLINE _job_ldif2dbSuffix2 + OFFLINE _job_archive2db": "OK + KO", + "Instance OFFLINE OFFLINE _job_db2indexSuffix1 + OFFLINE _job_nothing": "OK + OK", + "Instance OFFLINE OFFLINE _job_db2indexSuffix1 + OFFLINE _job_db2ldifSuffix1": "OK + KO", + "Instance OFFLINE OFFLINE _job_db2indexSuffix1 + OFFLINE _job_db2ldifSuffix2": "OK + KO", + "Instance OFFLINE OFFLINE _job_db2indexSuffix1 + OFFLINE _job_ldif2dbSuffix1": "OK + KO", + "Instance OFFLINE OFFLINE _job_db2indexSuffix1 + OFFLINE _job_ldif2dbSuffix2": "OK + KO", + "Instance OFFLINE OFFLINE _job_db2indexSuffix1 + OFFLINE _job_db2indexSuffix1": "OK + KO", + "Instance OFFLINE OFFLINE _job_db2indexSuffix1 + OFFLINE _job_db2indexSuffix2": "OK + KO", + "Instance OFFLINE OFFLINE _job_db2indexSuffix1 + OFFLINE _job_db2archive": "OK + KO", + "Instance OFFLINE OFFLINE _job_db2indexSuffix1 + OFFLINE _job_archive2db": "OK + KO", + "Instance OFFLINE OFFLINE _job_db2indexSuffix2 + OFFLINE _job_nothing": "OK + OK", + "Instance OFFLINE OFFLINE _job_db2indexSuffix2 + OFFLINE _job_db2ldifSuffix1": "OK + KO", + "Instance OFFLINE OFFLINE _job_db2indexSuffix2 + OFFLINE _job_db2ldifSuffix2": "OK + KO", + "Instance OFFLINE OFFLINE _job_db2indexSuffix2 + OFFLINE _job_ldif2dbSuffix1": "OK + KO", + "Instance OFFLINE OFFLINE _job_db2indexSuffix2 + OFFLINE _job_ldif2dbSuffix2": "OK + KO", + "Instance OFFLINE OFFLINE _job_db2indexSuffix2 + OFFLINE _job_db2indexSuffix1": "OK + KO", + "Instance OFFLINE OFFLINE _job_db2indexSuffix2 + OFFLINE _job_db2indexSuffix2": "OK + KO", + "Instance OFFLINE OFFLINE _job_db2indexSuffix2 + OFFLINE _job_db2archive": "OK + KO", + "Instance OFFLINE OFFLINE _job_db2indexSuffix2 + OFFLINE _job_archive2db": "OK + KO", + "Instance OFFLINE OFFLINE _job_db2archive + OFFLINE _job_nothing": "OK + OK", + "Instance OFFLINE OFFLINE _job_db2archive + OFFLINE _job_db2ldifSuffix1": "OK + OK", + "Instance OFFLINE OFFLINE _job_db2archive + OFFLINE _job_db2ldifSuffix2": "OK + OK", + "Instance OFFLINE OFFLINE _job_db2archive + OFFLINE _job_ldif2dbSuffix1": "OK + KO", + "Instance OFFLINE OFFLINE _job_db2archive + OFFLINE _job_ldif2dbSuffix2": "OK + KO", + "Instance OFFLINE OFFLINE _job_db2archive + OFFLINE _job_db2indexSuffix1": "KO + OK", + "Instance OFFLINE OFFLINE _job_db2archive + OFFLINE _job_db2indexSuffix2": "KO + OK", + "Instance OFFLINE OFFLINE _job_db2archive + OFFLINE _job_db2archive": "OK + OK", + "Instance OFFLINE OFFLINE _job_db2archive + OFFLINE _job_archive2db": "OK + KO", + "Instance OFFLINE OFFLINE _job_archive2db + OFFLINE _job_nothing": "OK + OK", + "Instance OFFLINE OFFLINE _job_archive2db + OFFLINE _job_db2ldifSuffix1": "OK + KO", + "Instance OFFLINE OFFLINE _job_archive2db + OFFLINE _job_db2ldifSuffix2": "OK + KO", + "Instance OFFLINE OFFLINE _job_archive2db + OFFLINE _job_ldif2dbSuffix1": "OK + KO", + "Instance OFFLINE OFFLINE _job_archive2db + OFFLINE _job_ldif2dbSuffix2": "OK + KO", + "Instance OFFLINE OFFLINE _job_archive2db + OFFLINE _job_db2indexSuffix1": "KO + OK", + "Instance OFFLINE OFFLINE _job_archive2db + OFFLINE _job_db2indexSuffix2": "KO + OK", + "Instance OFFLINE OFFLINE _job_archive2db + OFFLINE _job_db2archive": "OK + KO", + "Instance OFFLINE OFFLINE _job_archive2db + OFFLINE _job_archive2db": "OK + KO", + "Instance ONLINE OFFLINE _job_nothing + OFFLINE _job_nothing": "OK + OK", + "Instance ONLINE OFFLINE _job_nothing + OFFLINE _job_db2ldifSuffix1": "OK + KO", + "Instance ONLINE OFFLINE _job_nothing + OFFLINE _job_db2ldifSuffix2": "OK + KO", + "Instance ONLINE OFFLINE _job_nothing + OFFLINE _job_ldif2dbSuffix1": "OK + KO", + "Instance ONLINE OFFLINE _job_nothing + OFFLINE _job_ldif2dbSuffix2": "OK + KO", + "Instance ONLINE OFFLINE _job_nothing + OFFLINE _job_db2indexSuffix1": "OK + KO", + "Instance ONLINE OFFLINE _job_nothing + OFFLINE _job_db2indexSuffix2": "OK + KO", + "Instance ONLINE OFFLINE _job_nothing + OFFLINE _job_db2archive": "OK + KO", + "Instance ONLINE OFFLINE _job_nothing + OFFLINE _job_archive2db": "OK + KO", + "Instance ONLINE OFFLINE _job_db2ldifSuffix1 + OFFLINE _job_nothing": "KO + OK", + "Instance ONLINE OFFLINE _job_db2ldifSuffix1 + OFFLINE _job_db2ldifSuffix1": "KO + KO", + "Instance ONLINE OFFLINE _job_db2ldifSuffix1 + OFFLINE _job_db2ldifSuffix2": "KO + KO", + "Instance ONLINE OFFLINE _job_db2ldifSuffix1 + OFFLINE _job_ldif2dbSuffix1": "KO + KO", + "Instance ONLINE OFFLINE _job_db2ldifSuffix1 + OFFLINE _job_ldif2dbSuffix2": "KO + KO", + "Instance ONLINE OFFLINE _job_db2ldifSuffix1 + OFFLINE _job_db2indexSuffix1": "KO + KO", + "Instance ONLINE OFFLINE _job_db2ldifSuffix1 + OFFLINE _job_db2indexSuffix2": "KO + KO", + "Instance ONLINE OFFLINE _job_db2ldifSuffix1 + OFFLINE _job_db2archive": "KO + KO", + "Instance ONLINE OFFLINE _job_db2ldifSuffix1 + OFFLINE _job_archive2db": "KO + KO", + "Instance ONLINE OFFLINE _job_db2ldifSuffix2 + OFFLINE _job_nothing": "KO + OK", + "Instance ONLINE OFFLINE _job_db2ldifSuffix2 + OFFLINE _job_db2ldifSuffix1": "KO + KO", + "Instance ONLINE OFFLINE _job_db2ldifSuffix2 + OFFLINE _job_db2ldifSuffix2": "KO + KO", + "Instance ONLINE OFFLINE _job_db2ldifSuffix2 + OFFLINE _job_ldif2dbSuffix1": "KO + KO", + "Instance ONLINE OFFLINE _job_db2ldifSuffix2 + OFFLINE _job_ldif2dbSuffix2": "KO + KO", + "Instance ONLINE OFFLINE _job_db2ldifSuffix2 + OFFLINE _job_db2indexSuffix1": "KO + KO", + "Instance ONLINE OFFLINE _job_db2ldifSuffix2 + OFFLINE _job_db2indexSuffix2": "KO + KO", + "Instance ONLINE OFFLINE _job_db2ldifSuffix2 + OFFLINE _job_db2archive": "KO + KO", + "Instance ONLINE OFFLINE _job_db2ldifSuffix2 + OFFLINE _job_archive2db": "KO + KO", + "Instance ONLINE OFFLINE _job_ldif2dbSuffix1 + OFFLINE _job_nothing": "KO + OK", + "Instance ONLINE OFFLINE _job_ldif2dbSuffix1 + OFFLINE _job_db2ldifSuffix1": "KO + KO", + "Instance ONLINE OFFLINE _job_ldif2dbSuffix1 + OFFLINE _job_db2ldifSuffix2": "KO + KO", + "Instance ONLINE OFFLINE _job_ldif2dbSuffix1 + OFFLINE _job_ldif2dbSuffix1": "KO + KO", + "Instance ONLINE OFFLINE _job_ldif2dbSuffix1 + OFFLINE _job_ldif2dbSuffix2": "KO + KO", + "Instance ONLINE OFFLINE _job_ldif2dbSuffix1 + OFFLINE _job_db2indexSuffix1": "KO + KO", + "Instance ONLINE OFFLINE _job_ldif2dbSuffix1 + OFFLINE _job_db2indexSuffix2": "KO + KO", + "Instance ONLINE OFFLINE _job_ldif2dbSuffix1 + OFFLINE _job_db2archive": "KO + KO", + "Instance ONLINE OFFLINE _job_ldif2dbSuffix1 + OFFLINE _job_archive2db": "KO + KO", + "Instance ONLINE OFFLINE _job_ldif2dbSuffix2 + OFFLINE _job_nothing": "KO + OK", + "Instance ONLINE OFFLINE _job_ldif2dbSuffix2 + OFFLINE _job_db2ldifSuffix1": "KO + KO", + "Instance ONLINE OFFLINE _job_ldif2dbSuffix2 + OFFLINE _job_db2ldifSuffix2": "KO + KO", + "Instance ONLINE OFFLINE _job_ldif2dbSuffix2 + OFFLINE _job_ldif2dbSuffix1": "KO + KO", + "Instance ONLINE OFFLINE _job_ldif2dbSuffix2 + OFFLINE _job_ldif2dbSuffix2": "KO + KO", + "Instance ONLINE OFFLINE _job_ldif2dbSuffix2 + OFFLINE _job_db2indexSuffix1": "KO + KO", + "Instance ONLINE OFFLINE _job_ldif2dbSuffix2 + OFFLINE _job_db2indexSuffix2": "KO + KO", + "Instance ONLINE OFFLINE _job_ldif2dbSuffix2 + OFFLINE _job_db2archive": "KO + KO", + "Instance ONLINE OFFLINE _job_ldif2dbSuffix2 + OFFLINE _job_archive2db": "KO + KO", + "Instance ONLINE OFFLINE _job_db2indexSuffix1 + OFFLINE _job_nothing": "KO + OK", + "Instance ONLINE OFFLINE _job_db2indexSuffix1 + OFFLINE _job_db2ldifSuffix1": "KO + KO", + "Instance ONLINE OFFLINE _job_db2indexSuffix1 + OFFLINE _job_db2ldifSuffix2": "KO + KO", + "Instance ONLINE OFFLINE _job_db2indexSuffix1 + OFFLINE _job_ldif2dbSuffix1": "KO + KO", + "Instance ONLINE OFFLINE _job_db2indexSuffix1 + OFFLINE _job_ldif2dbSuffix2": "KO + KO", + "Instance ONLINE OFFLINE _job_db2indexSuffix1 + OFFLINE _job_db2indexSuffix1": "KO + KO", + "Instance ONLINE OFFLINE _job_db2indexSuffix1 + OFFLINE _job_db2indexSuffix2": "KO + KO", + "Instance ONLINE OFFLINE _job_db2indexSuffix1 + OFFLINE _job_db2archive": "KO + KO", + "Instance ONLINE OFFLINE _job_db2indexSuffix1 + OFFLINE _job_archive2db": "KO + KO", + "Instance ONLINE OFFLINE _job_db2indexSuffix2 + OFFLINE _job_nothing": "KO + OK", + "Instance ONLINE OFFLINE _job_db2indexSuffix2 + OFFLINE _job_db2ldifSuffix1": "KO + KO", + "Instance ONLINE OFFLINE _job_db2indexSuffix2 + OFFLINE _job_db2ldifSuffix2": "KO + KO", + "Instance ONLINE OFFLINE _job_db2indexSuffix2 + OFFLINE _job_ldif2dbSuffix1": "KO + KO", + "Instance ONLINE OFFLINE _job_db2indexSuffix2 + OFFLINE _job_ldif2dbSuffix2": "KO + KO", + "Instance ONLINE OFFLINE _job_db2indexSuffix2 + OFFLINE _job_db2indexSuffix1": "KO + KO", + "Instance ONLINE OFFLINE _job_db2indexSuffix2 + OFFLINE _job_db2indexSuffix2": "KO + KO", + "Instance ONLINE OFFLINE _job_db2indexSuffix2 + OFFLINE _job_db2archive": "KO + KO", + "Instance ONLINE OFFLINE _job_db2indexSuffix2 + OFFLINE _job_archive2db": "KO + KO", + "Instance ONLINE OFFLINE _job_db2archive + OFFLINE _job_nothing": "KO + OK", + "Instance ONLINE OFFLINE _job_db2archive + OFFLINE _job_db2ldifSuffix1": "KO + KO", + "Instance ONLINE OFFLINE _job_db2archive + OFFLINE _job_db2ldifSuffix2": "KO + KO", + "Instance ONLINE OFFLINE _job_db2archive + OFFLINE _job_ldif2dbSuffix1": "KO + KO", + "Instance ONLINE OFFLINE _job_db2archive + OFFLINE _job_ldif2dbSuffix2": "KO + KO", + "Instance ONLINE OFFLINE _job_db2archive + OFFLINE _job_db2indexSuffix1": "KO + KO", + "Instance ONLINE OFFLINE _job_db2archive + OFFLINE _job_db2indexSuffix2": "KO + KO", + "Instance ONLINE OFFLINE _job_db2archive + OFFLINE _job_db2archive": "KO + KO", + "Instance ONLINE OFFLINE _job_db2archive + OFFLINE _job_archive2db": "KO + KO", + "Instance ONLINE OFFLINE _job_archive2db + OFFLINE _job_nothing": "KO + OK", + "Instance ONLINE OFFLINE _job_archive2db + OFFLINE _job_db2ldifSuffix1": "KO + KO", + "Instance ONLINE OFFLINE _job_archive2db + OFFLINE _job_db2ldifSuffix2": "KO + KO", + "Instance ONLINE OFFLINE _job_archive2db + OFFLINE _job_ldif2dbSuffix1": "KO + KO", + "Instance ONLINE OFFLINE _job_archive2db + OFFLINE _job_ldif2dbSuffix2": "KO + KO", + "Instance ONLINE OFFLINE _job_archive2db + OFFLINE _job_db2indexSuffix1": "KO + KO", + "Instance ONLINE OFFLINE _job_archive2db + OFFLINE _job_db2indexSuffix2": "KO + KO", + "Instance ONLINE OFFLINE _job_archive2db + OFFLINE _job_db2archive": "KO + KO", + "Instance ONLINE OFFLINE _job_archive2db + OFFLINE _job_archive2db": "KO + KO", + "Instance ONLINE ONLINE _job_nothing + OFFLINE _job_nothing": "OK + OK", + "Instance ONLINE ONLINE _job_nothing + OFFLINE _job_db2ldifSuffix1": "OK + KO", + "Instance ONLINE ONLINE _job_nothing + OFFLINE _job_db2ldifSuffix2": "OK + KO", + "Instance ONLINE ONLINE _job_nothing + OFFLINE _job_ldif2dbSuffix1": "OK + KO", + "Instance ONLINE ONLINE _job_nothing + OFFLINE _job_ldif2dbSuffix2": "OK + KO", + "Instance ONLINE ONLINE _job_nothing + OFFLINE _job_db2indexSuffix1": "OK + KO", + "Instance ONLINE ONLINE _job_nothing + OFFLINE _job_db2indexSuffix2": "OK + KO", + "Instance ONLINE ONLINE _job_nothing + OFFLINE _job_db2archive": "OK + KO", + "Instance ONLINE ONLINE _job_nothing + OFFLINE _job_archive2db": "OK + KO", + "Instance ONLINE ONLINE _job_db2ldifSuffix1 + OFFLINE _job_nothing": "OK + OK", + "Instance ONLINE ONLINE _job_db2ldifSuffix1 + OFFLINE _job_db2ldifSuffix1": "OK + KO", + "Instance ONLINE ONLINE _job_db2ldifSuffix1 + OFFLINE _job_db2ldifSuffix2": "OK + KO", + "Instance ONLINE ONLINE _job_db2ldifSuffix1 + OFFLINE _job_ldif2dbSuffix1": "OK + KO", + "Instance ONLINE ONLINE _job_db2ldifSuffix1 + OFFLINE _job_ldif2dbSuffix2": "OK + KO", + "Instance ONLINE ONLINE _job_db2ldifSuffix1 + OFFLINE _job_db2indexSuffix1": "OK + KO", + "Instance ONLINE ONLINE _job_db2ldifSuffix1 + OFFLINE _job_db2indexSuffix2": "OK + KO", + "Instance ONLINE ONLINE _job_db2ldifSuffix1 + OFFLINE _job_db2archive": "OK + KO", + "Instance ONLINE ONLINE _job_db2ldifSuffix1 + OFFLINE _job_archive2db": "OK + KO", + "Instance ONLINE ONLINE _job_db2ldifSuffix2 + OFFLINE _job_nothing": "OK + OK", + "Instance ONLINE ONLINE _job_db2ldifSuffix2 + OFFLINE _job_db2ldifSuffix1": "OK + KO", + "Instance ONLINE ONLINE _job_db2ldifSuffix2 + OFFLINE _job_db2ldifSuffix2": "OK + KO", + "Instance ONLINE ONLINE _job_db2ldifSuffix2 + OFFLINE _job_ldif2dbSuffix1": "OK + KO", + "Instance ONLINE ONLINE _job_db2ldifSuffix2 + OFFLINE _job_ldif2dbSuffix2": "OK + KO", + "Instance ONLINE ONLINE _job_db2ldifSuffix2 + OFFLINE _job_db2indexSuffix1": "OK + KO", + "Instance ONLINE ONLINE _job_db2ldifSuffix2 + OFFLINE _job_db2indexSuffix2": "OK + KO", + "Instance ONLINE ONLINE _job_db2ldifSuffix2 + OFFLINE _job_db2archive": "OK + KO", + "Instance ONLINE ONLINE _job_db2ldifSuffix2 + OFFLINE _job_archive2db": "OK + KO", + "Instance ONLINE ONLINE _job_ldif2dbSuffix1 + OFFLINE _job_nothing": "OK + OK", + "Instance ONLINE ONLINE _job_ldif2dbSuffix1 + OFFLINE _job_db2ldifSuffix1": "OK + KO", + "Instance ONLINE ONLINE _job_ldif2dbSuffix1 + OFFLINE _job_db2ldifSuffix2": "OK + KO", + "Instance ONLINE ONLINE _job_ldif2dbSuffix1 + OFFLINE _job_ldif2dbSuffix1": "OK + KO", + "Instance ONLINE ONLINE _job_ldif2dbSuffix1 + OFFLINE _job_ldif2dbSuffix2": "OK + KO", + "Instance ONLINE ONLINE _job_ldif2dbSuffix1 + OFFLINE _job_db2indexSuffix1": "OK + KO", + "Instance ONLINE ONLINE _job_ldif2dbSuffix1 + OFFLINE _job_db2indexSuffix2": "OK + KO", + "Instance ONLINE ONLINE _job_ldif2dbSuffix1 + OFFLINE _job_db2archive": "OK + KO", + "Instance ONLINE ONLINE _job_ldif2dbSuffix1 + OFFLINE _job_archive2db": "OK + KO", + "Instance ONLINE ONLINE _job_ldif2dbSuffix2 + OFFLINE _job_nothing": "OK + OK", + "Instance ONLINE ONLINE _job_ldif2dbSuffix2 + OFFLINE _job_db2ldifSuffix1": "OK + KO", + "Instance ONLINE ONLINE _job_ldif2dbSuffix2 + OFFLINE _job_db2ldifSuffix2": "OK + KO", + "Instance ONLINE ONLINE _job_ldif2dbSuffix2 + OFFLINE _job_ldif2dbSuffix1": "OK + KO", + "Instance ONLINE ONLINE _job_ldif2dbSuffix2 + OFFLINE _job_ldif2dbSuffix2": "OK + KO", + "Instance ONLINE ONLINE _job_ldif2dbSuffix2 + OFFLINE _job_db2indexSuffix1": "OK + KO", + "Instance ONLINE ONLINE _job_ldif2dbSuffix2 + OFFLINE _job_db2indexSuffix2": "OK + KO", + "Instance ONLINE ONLINE _job_ldif2dbSuffix2 + OFFLINE _job_db2archive": "OK + KO", + "Instance ONLINE ONLINE _job_ldif2dbSuffix2 + OFFLINE _job_archive2db": "OK + KO", + "Instance ONLINE ONLINE _job_db2indexSuffix1 + OFFLINE _job_nothing": "OK + OK", + "Instance ONLINE ONLINE _job_db2indexSuffix1 + OFFLINE _job_db2ldifSuffix1": "OK + KO", + "Instance ONLINE ONLINE _job_db2indexSuffix1 + OFFLINE _job_db2ldifSuffix2": "OK + KO", + "Instance ONLINE ONLINE _job_db2indexSuffix1 + OFFLINE _job_ldif2dbSuffix1": "OK + KO", + "Instance ONLINE ONLINE _job_db2indexSuffix1 + OFFLINE _job_ldif2dbSuffix2": "OK + KO", + "Instance ONLINE ONLINE _job_db2indexSuffix1 + OFFLINE _job_db2indexSuffix1": "OK + KO", + "Instance ONLINE ONLINE _job_db2indexSuffix1 + OFFLINE _job_db2indexSuffix2": "OK + KO", + "Instance ONLINE ONLINE _job_db2indexSuffix1 + OFFLINE _job_db2archive": "OK + KO", + "Instance ONLINE ONLINE _job_db2indexSuffix1 + OFFLINE _job_archive2db": "OK + KO", + "Instance ONLINE ONLINE _job_db2indexSuffix2 + OFFLINE _job_nothing": "OK + OK", + "Instance ONLINE ONLINE _job_db2indexSuffix2 + OFFLINE _job_db2ldifSuffix1": "OK + KO", + "Instance ONLINE ONLINE _job_db2indexSuffix2 + OFFLINE _job_db2ldifSuffix2": "OK + KO", + "Instance ONLINE ONLINE _job_db2indexSuffix2 + OFFLINE _job_ldif2dbSuffix1": "OK + KO", + "Instance ONLINE ONLINE _job_db2indexSuffix2 + OFFLINE _job_ldif2dbSuffix2": "OK + KO", + "Instance ONLINE ONLINE _job_db2indexSuffix2 + OFFLINE _job_db2indexSuffix1": "OK + KO", + "Instance ONLINE ONLINE _job_db2indexSuffix2 + OFFLINE _job_db2indexSuffix2": "OK + KO", + "Instance ONLINE ONLINE _job_db2indexSuffix2 + OFFLINE _job_db2archive": "OK + KO", + "Instance ONLINE ONLINE _job_db2indexSuffix2 + OFFLINE _job_archive2db": "OK + KO", + "Instance ONLINE ONLINE _job_db2archive + OFFLINE _job_nothing": "OK + OK", + "Instance ONLINE ONLINE _job_db2archive + OFFLINE _job_db2ldifSuffix1": "OK + KO", + "Instance ONLINE ONLINE _job_db2archive + OFFLINE _job_db2ldifSuffix2": "OK + KO", + "Instance ONLINE ONLINE _job_db2archive + OFFLINE _job_ldif2dbSuffix1": "OK + KO", + "Instance ONLINE ONLINE _job_db2archive + OFFLINE _job_ldif2dbSuffix2": "OK + KO", + "Instance ONLINE ONLINE _job_db2archive + OFFLINE _job_db2indexSuffix1": "OK + KO", + "Instance ONLINE ONLINE _job_db2archive + OFFLINE _job_db2indexSuffix2": "OK + KO", + "Instance ONLINE ONLINE _job_db2archive + OFFLINE _job_db2archive": "KO + KO", + "Instance ONLINE ONLINE _job_db2archive + OFFLINE _job_archive2db": "KO + KO", + "Instance ONLINE ONLINE _job_archive2db + OFFLINE _job_nothing": "KO + OK", + "Instance ONLINE ONLINE _job_archive2db + OFFLINE _job_db2ldifSuffix1": "KO + KO", + "Instance ONLINE ONLINE _job_archive2db + OFFLINE _job_db2ldifSuffix2": "KO + KO", + "Instance ONLINE ONLINE _job_archive2db + OFFLINE _job_ldif2dbSuffix1": "KO + KO", + "Instance ONLINE ONLINE _job_archive2db + OFFLINE _job_ldif2dbSuffix2": "KO + KO", + "Instance ONLINE ONLINE _job_archive2db + OFFLINE _job_db2indexSuffix1": "KO + KO", + "Instance ONLINE ONLINE _job_archive2db + OFFLINE _job_db2indexSuffix2": "KO + KO", + "Instance ONLINE ONLINE _job_archive2db + OFFLINE _job_db2archive": "KO + KO", + "Instance ONLINE ONLINE _job_archive2db + OFFLINE _job_archive2db": "KO + KO", + "Instance ONLINE OFFLINE _job_nothing + ONLINE _job_nothing": "OK + OK", + "Instance ONLINE OFFLINE _job_nothing + ONLINE _job_db2ldifSuffix1": "OK + KO", + "Instance ONLINE OFFLINE _job_nothing + ONLINE _job_db2ldifSuffix2": "OK + KO", + "Instance ONLINE OFFLINE _job_nothing + ONLINE _job_ldif2dbSuffix1": "OK + KO", + "Instance ONLINE OFFLINE _job_nothing + ONLINE _job_ldif2dbSuffix2": "OK + KO", + "Instance ONLINE OFFLINE _job_nothing + ONLINE _job_db2indexSuffix1": "OK + KO", + "Instance ONLINE OFFLINE _job_nothing + ONLINE _job_db2indexSuffix2": "OK + KO", + "Instance ONLINE OFFLINE _job_nothing + ONLINE _job_db2archive": "OK + KO", + "Instance ONLINE OFFLINE _job_nothing + ONLINE _job_archive2db": "OK + KO", + "Instance ONLINE OFFLINE _job_db2ldifSuffix1 + ONLINE _job_nothing": "KO + OK", + "Instance ONLINE OFFLINE _job_db2ldifSuffix1 + ONLINE _job_db2ldifSuffix1": "KO + KO", + "Instance ONLINE OFFLINE _job_db2ldifSuffix1 + ONLINE _job_db2ldifSuffix2": "KO + KO", + "Instance ONLINE OFFLINE _job_db2ldifSuffix1 + ONLINE _job_ldif2dbSuffix1": "KO + KO", + "Instance ONLINE OFFLINE _job_db2ldifSuffix1 + ONLINE _job_ldif2dbSuffix2": "KO + KO", + "Instance ONLINE OFFLINE _job_db2ldifSuffix1 + ONLINE _job_db2indexSuffix1": "KO + KO", + "Instance ONLINE OFFLINE _job_db2ldifSuffix1 + ONLINE _job_db2indexSuffix2": "KO + KO", + "Instance ONLINE OFFLINE _job_db2ldifSuffix1 + ONLINE _job_db2archive": "KO + KO", + "Instance ONLINE OFFLINE _job_db2ldifSuffix1 + ONLINE _job_archive2db": "KO + KO", + "Instance ONLINE OFFLINE _job_db2ldifSuffix2 + ONLINE _job_nothing": "KO + OK", + "Instance ONLINE OFFLINE _job_db2ldifSuffix2 + ONLINE _job_db2ldifSuffix1": "KO + KO", + "Instance ONLINE OFFLINE _job_db2ldifSuffix2 + ONLINE _job_db2ldifSuffix2": "KO + KO", + "Instance ONLINE OFFLINE _job_db2ldifSuffix2 + ONLINE _job_ldif2dbSuffix1": "KO + KO", + "Instance ONLINE OFFLINE _job_db2ldifSuffix2 + ONLINE _job_ldif2dbSuffix2": "KO + KO", + "Instance ONLINE OFFLINE _job_db2ldifSuffix2 + ONLINE _job_db2indexSuffix1": "KO + KO", + "Instance ONLINE OFFLINE _job_db2ldifSuffix2 + ONLINE _job_db2indexSuffix2": "KO + KO", + "Instance ONLINE OFFLINE _job_db2ldifSuffix2 + ONLINE _job_db2archive": "KO + KO", + "Instance ONLINE OFFLINE _job_db2ldifSuffix2 + ONLINE _job_archive2db": "KO + KO", + "Instance ONLINE OFFLINE _job_ldif2dbSuffix1 + ONLINE _job_nothing": "KO + OK", + "Instance ONLINE OFFLINE _job_ldif2dbSuffix1 + ONLINE _job_db2ldifSuffix1": "KO + KO", + "Instance ONLINE OFFLINE _job_ldif2dbSuffix1 + ONLINE _job_db2ldifSuffix2": "KO + KO", + "Instance ONLINE OFFLINE _job_ldif2dbSuffix1 + ONLINE _job_ldif2dbSuffix1": "KO + KO", + "Instance ONLINE OFFLINE _job_ldif2dbSuffix1 + ONLINE _job_ldif2dbSuffix2": "KO + KO", + "Instance ONLINE OFFLINE _job_ldif2dbSuffix1 + ONLINE _job_db2indexSuffix1": "KO + KO", + "Instance ONLINE OFFLINE _job_ldif2dbSuffix1 + ONLINE _job_db2indexSuffix2": "KO + KO", + "Instance ONLINE OFFLINE _job_ldif2dbSuffix1 + ONLINE _job_db2archive": "KO + KO", + "Instance ONLINE OFFLINE _job_ldif2dbSuffix1 + ONLINE _job_archive2db": "KO + KO", + "Instance ONLINE OFFLINE _job_ldif2dbSuffix2 + ONLINE _job_nothing": "KO + OK", + "Instance ONLINE OFFLINE _job_ldif2dbSuffix2 + ONLINE _job_db2ldifSuffix1": "KO + KO", + "Instance ONLINE OFFLINE _job_ldif2dbSuffix2 + ONLINE _job_db2ldifSuffix2": "KO + KO", + "Instance ONLINE OFFLINE _job_ldif2dbSuffix2 + ONLINE _job_ldif2dbSuffix1": "KO + KO", + "Instance ONLINE OFFLINE _job_ldif2dbSuffix2 + ONLINE _job_ldif2dbSuffix2": "KO + KO", + "Instance ONLINE OFFLINE _job_ldif2dbSuffix2 + ONLINE _job_db2indexSuffix1": "KO + KO", + "Instance ONLINE OFFLINE _job_ldif2dbSuffix2 + ONLINE _job_db2indexSuffix2": "KO + KO", + "Instance ONLINE OFFLINE _job_ldif2dbSuffix2 + ONLINE _job_db2archive": "KO + KO", + "Instance ONLINE OFFLINE _job_ldif2dbSuffix2 + ONLINE _job_archive2db": "KO + KO", + "Instance ONLINE OFFLINE _job_db2indexSuffix1 + ONLINE _job_nothing": "KO + OK", + "Instance ONLINE OFFLINE _job_db2indexSuffix1 + ONLINE _job_db2ldifSuffix1": "KO + KO", + "Instance ONLINE OFFLINE _job_db2indexSuffix1 + ONLINE _job_db2ldifSuffix2": "KO + KO", + "Instance ONLINE OFFLINE _job_db2indexSuffix1 + ONLINE _job_ldif2dbSuffix1": "KO + KO", + "Instance ONLINE OFFLINE _job_db2indexSuffix1 + ONLINE _job_ldif2dbSuffix2": "KO + KO", + "Instance ONLINE OFFLINE _job_db2indexSuffix1 + ONLINE _job_db2indexSuffix1": "KO + KO", + "Instance ONLINE OFFLINE _job_db2indexSuffix1 + ONLINE _job_db2indexSuffix2": "KO + KO", + "Instance ONLINE OFFLINE _job_db2indexSuffix1 + ONLINE _job_db2archive": "KO + KO", + "Instance ONLINE OFFLINE _job_db2indexSuffix1 + ONLINE _job_archive2db": "KO + KO", + "Instance ONLINE OFFLINE _job_db2indexSuffix2 + ONLINE _job_nothing": "KO + OK", + "Instance ONLINE OFFLINE _job_db2indexSuffix2 + ONLINE _job_db2ldifSuffix1": "KO + KO", + "Instance ONLINE OFFLINE _job_db2indexSuffix2 + ONLINE _job_db2ldifSuffix2": "KO + KO", + "Instance ONLINE OFFLINE _job_db2indexSuffix2 + ONLINE _job_ldif2dbSuffix1": "KO + KO", + "Instance ONLINE OFFLINE _job_db2indexSuffix2 + ONLINE _job_ldif2dbSuffix2": "KO + KO", + "Instance ONLINE OFFLINE _job_db2indexSuffix2 + ONLINE _job_db2indexSuffix1": "KO + KO", + "Instance ONLINE OFFLINE _job_db2indexSuffix2 + ONLINE _job_db2indexSuffix2": "KO + KO", + "Instance ONLINE OFFLINE _job_db2indexSuffix2 + ONLINE _job_db2archive": "KO + KO", + "Instance ONLINE OFFLINE _job_db2indexSuffix2 + ONLINE _job_archive2db": "KO + KO", + "Instance ONLINE OFFLINE _job_db2archive + ONLINE _job_nothing": "KO + OK", + "Instance ONLINE OFFLINE _job_db2archive + ONLINE _job_db2ldifSuffix1": "KO + KO", + "Instance ONLINE OFFLINE _job_db2archive + ONLINE _job_db2ldifSuffix2": "KO + KO", + "Instance ONLINE OFFLINE _job_db2archive + ONLINE _job_ldif2dbSuffix1": "KO + KO", + "Instance ONLINE OFFLINE _job_db2archive + ONLINE _job_ldif2dbSuffix2": "KO + KO", + "Instance ONLINE OFFLINE _job_db2archive + ONLINE _job_db2indexSuffix1": "KO + KO", + "Instance ONLINE OFFLINE _job_db2archive + ONLINE _job_db2indexSuffix2": "KO + KO", + "Instance ONLINE OFFLINE _job_db2archive + ONLINE _job_db2archive": "KO + KO", + "Instance ONLINE OFFLINE _job_db2archive + ONLINE _job_archive2db": "KO + KO", + "Instance ONLINE OFFLINE _job_archive2db + ONLINE _job_nothing": "KO + OK", + "Instance ONLINE OFFLINE _job_archive2db + ONLINE _job_db2ldifSuffix1": "KO + KO", + "Instance ONLINE OFFLINE _job_archive2db + ONLINE _job_db2ldifSuffix2": "KO + KO", + "Instance ONLINE OFFLINE _job_archive2db + ONLINE _job_ldif2dbSuffix1": "KO + KO", + "Instance ONLINE OFFLINE _job_archive2db + ONLINE _job_ldif2dbSuffix2": "KO + KO", + "Instance ONLINE OFFLINE _job_archive2db + ONLINE _job_db2indexSuffix1": "KO + KO", + "Instance ONLINE OFFLINE _job_archive2db + ONLINE _job_db2indexSuffix2": "KO + KO", + "Instance ONLINE OFFLINE _job_archive2db + ONLINE _job_db2archive": "KO + KO", + "Instance ONLINE OFFLINE _job_archive2db + ONLINE _job_archive2db": "KO + KO", + "Instance ONLINE ONLINE _job_nothing + ONLINE _job_nothing": "OK + OK", + "Instance ONLINE ONLINE _job_nothing + ONLINE _job_db2ldifSuffix1": "OK + KO", + "Instance ONLINE ONLINE _job_nothing + ONLINE _job_db2ldifSuffix2": "OK + KO", + "Instance ONLINE ONLINE _job_nothing + ONLINE _job_ldif2dbSuffix1": "OK + KO", + "Instance ONLINE ONLINE _job_nothing + ONLINE _job_ldif2dbSuffix2": "OK + KO", + "Instance ONLINE ONLINE _job_nothing + ONLINE _job_db2indexSuffix1": "OK + KO", + "Instance ONLINE ONLINE _job_nothing + ONLINE _job_db2indexSuffix2": "OK + KO", + "Instance ONLINE ONLINE _job_nothing + ONLINE _job_db2archive": "OK + KO", + "Instance ONLINE ONLINE _job_nothing + ONLINE _job_archive2db": "OK + KO", + "Instance ONLINE ONLINE _job_db2ldifSuffix1 + ONLINE _job_nothing": "KO + OK", + "Instance ONLINE ONLINE _job_db2ldifSuffix1 + ONLINE _job_db2ldifSuffix1": "KO + KO", + "Instance ONLINE ONLINE _job_db2ldifSuffix1 + ONLINE _job_db2ldifSuffix2": "KO + KO", + "Instance ONLINE ONLINE _job_db2ldifSuffix1 + ONLINE _job_ldif2dbSuffix1": "KO + KO", + "Instance ONLINE ONLINE _job_db2ldifSuffix1 + ONLINE _job_ldif2dbSuffix2": "KO + KO", + "Instance ONLINE ONLINE _job_db2ldifSuffix1 + ONLINE _job_db2indexSuffix1": "KO + KO", + "Instance ONLINE ONLINE _job_db2ldifSuffix1 + ONLINE _job_db2indexSuffix2": "KO + KO", + "Instance ONLINE ONLINE _job_db2ldifSuffix1 + ONLINE _job_db2archive": "KO + KO", + "Instance ONLINE ONLINE _job_db2ldifSuffix1 + ONLINE _job_archive2db": "KO + KO", + "Instance ONLINE ONLINE _job_db2ldifSuffix2 + ONLINE _job_nothing": "KO + OK", + "Instance ONLINE ONLINE _job_db2ldifSuffix2 + ONLINE _job_db2ldifSuffix1": "KO + KO", + "Instance ONLINE ONLINE _job_db2ldifSuffix2 + ONLINE _job_db2ldifSuffix2": "KO + KO", + "Instance ONLINE ONLINE _job_db2ldifSuffix2 + ONLINE _job_ldif2dbSuffix1": "KO + KO", + "Instance ONLINE ONLINE _job_db2ldifSuffix2 + ONLINE _job_ldif2dbSuffix2": "KO + KO", + "Instance ONLINE ONLINE _job_db2ldifSuffix2 + ONLINE _job_db2indexSuffix1": "KO + KO", + "Instance ONLINE ONLINE _job_db2ldifSuffix2 + ONLINE _job_db2indexSuffix2": "KO + KO", + "Instance ONLINE ONLINE _job_db2ldifSuffix2 + ONLINE _job_db2archive": "KO + KO", + "Instance ONLINE ONLINE _job_db2ldifSuffix2 + ONLINE _job_archive2db": "KO + KO", + "Instance ONLINE ONLINE _job_ldif2dbSuffix1 + ONLINE _job_nothing": "KO + OK", + "Instance ONLINE ONLINE _job_ldif2dbSuffix1 + ONLINE _job_db2ldifSuffix1": "KO + KO", + "Instance ONLINE ONLINE _job_ldif2dbSuffix1 + ONLINE _job_db2ldifSuffix2": "KO + KO", + "Instance ONLINE ONLINE _job_ldif2dbSuffix1 + ONLINE _job_ldif2dbSuffix1": "KO + KO", + "Instance ONLINE ONLINE _job_ldif2dbSuffix1 + ONLINE _job_ldif2dbSuffix2": "KO + KO", + "Instance ONLINE ONLINE _job_ldif2dbSuffix1 + ONLINE _job_db2indexSuffix1": "KO + KO", + "Instance ONLINE ONLINE _job_ldif2dbSuffix1 + ONLINE _job_db2indexSuffix2": "KO + KO", + "Instance ONLINE ONLINE _job_ldif2dbSuffix1 + ONLINE _job_db2archive": "KO + KO", + "Instance ONLINE ONLINE _job_ldif2dbSuffix1 + ONLINE _job_archive2db": "KO + KO", + "Instance ONLINE ONLINE _job_ldif2dbSuffix2 + ONLINE _job_nothing": "KO + OK", + "Instance ONLINE ONLINE _job_ldif2dbSuffix2 + ONLINE _job_db2ldifSuffix1": "KO + KO", + "Instance ONLINE ONLINE _job_ldif2dbSuffix2 + ONLINE _job_db2ldifSuffix2": "KO + KO", + "Instance ONLINE ONLINE _job_ldif2dbSuffix2 + ONLINE _job_ldif2dbSuffix1": "KO + KO", + "Instance ONLINE ONLINE _job_ldif2dbSuffix2 + ONLINE _job_ldif2dbSuffix2": "KO + KO", + "Instance ONLINE ONLINE _job_ldif2dbSuffix2 + ONLINE _job_db2indexSuffix1": "KO + KO", + "Instance ONLINE ONLINE _job_ldif2dbSuffix2 + ONLINE _job_db2indexSuffix2": "KO + KO", + "Instance ONLINE ONLINE _job_ldif2dbSuffix2 + ONLINE _job_db2archive": "KO + KO", + "Instance ONLINE ONLINE _job_ldif2dbSuffix2 + ONLINE _job_archive2db": "KO + KO", + "Instance ONLINE ONLINE _job_db2indexSuffix1 + ONLINE _job_nothing": "KO + OK", + "Instance ONLINE ONLINE _job_db2indexSuffix1 + ONLINE _job_db2ldifSuffix1": "KO + KO", + "Instance ONLINE ONLINE _job_db2indexSuffix1 + ONLINE _job_db2ldifSuffix2": "KO + KO", + "Instance ONLINE ONLINE _job_db2indexSuffix1 + ONLINE _job_ldif2dbSuffix1": "KO + KO", + "Instance ONLINE ONLINE _job_db2indexSuffix1 + ONLINE _job_ldif2dbSuffix2": "KO + KO", + "Instance ONLINE ONLINE _job_db2indexSuffix1 + ONLINE _job_db2indexSuffix1": "KO + KO", + "Instance ONLINE ONLINE _job_db2indexSuffix1 + ONLINE _job_db2indexSuffix2": "KO + KO", + "Instance ONLINE ONLINE _job_db2indexSuffix1 + ONLINE _job_db2archive": "KO + KO", + "Instance ONLINE ONLINE _job_db2indexSuffix1 + ONLINE _job_archive2db": "KO + KO", + "Instance ONLINE ONLINE _job_db2indexSuffix2 + ONLINE _job_nothing": "KO + OK", + "Instance ONLINE ONLINE _job_db2indexSuffix2 + ONLINE _job_db2ldifSuffix1": "KO + KO", + "Instance ONLINE ONLINE _job_db2indexSuffix2 + ONLINE _job_db2ldifSuffix2": "KO + KO", + "Instance ONLINE ONLINE _job_db2indexSuffix2 + ONLINE _job_ldif2dbSuffix1": "KO + KO", + "Instance ONLINE ONLINE _job_db2indexSuffix2 + ONLINE _job_ldif2dbSuffix2": "KO + KO", + "Instance ONLINE ONLINE _job_db2indexSuffix2 + ONLINE _job_db2indexSuffix1": "KO + KO", + "Instance ONLINE ONLINE _job_db2indexSuffix2 + ONLINE _job_db2indexSuffix2": "KO + KO", + "Instance ONLINE ONLINE _job_db2indexSuffix2 + ONLINE _job_db2archive": "KO + KO", + "Instance ONLINE ONLINE _job_db2indexSuffix2 + ONLINE _job_archive2db": "KO + KO", + "Instance ONLINE ONLINE _job_db2archive + ONLINE _job_nothing": "KO + OK", + "Instance ONLINE ONLINE _job_db2archive + ONLINE _job_db2ldifSuffix1": "KO + KO", + "Instance ONLINE ONLINE _job_db2archive + ONLINE _job_db2ldifSuffix2": "KO + KO", + "Instance ONLINE ONLINE _job_db2archive + ONLINE _job_ldif2dbSuffix1": "KO + KO", + "Instance ONLINE ONLINE _job_db2archive + ONLINE _job_ldif2dbSuffix2": "KO + KO", + "Instance ONLINE ONLINE _job_db2archive + ONLINE _job_db2indexSuffix1": "KO + KO", + "Instance ONLINE ONLINE _job_db2archive + ONLINE _job_db2indexSuffix2": "KO + KO", + "Instance ONLINE ONLINE _job_db2archive + ONLINE _job_db2archive": "KO + KO", + "Instance ONLINE ONLINE _job_db2archive + ONLINE _job_archive2db": "KO + KO", + "Instance ONLINE ONLINE _job_archive2db + ONLINE _job_nothing": "KO + OK", + "Instance ONLINE ONLINE _job_archive2db + ONLINE _job_db2ldifSuffix1": "KO + KO", + "Instance ONLINE ONLINE _job_archive2db + ONLINE _job_db2ldifSuffix2": "KO + KO", + "Instance ONLINE ONLINE _job_archive2db + ONLINE _job_ldif2dbSuffix1": "KO + KO", + "Instance ONLINE ONLINE _job_archive2db + ONLINE _job_ldif2dbSuffix2": "KO + KO", + "Instance ONLINE ONLINE _job_archive2db + ONLINE _job_db2indexSuffix1": "KO + KO", + "Instance ONLINE ONLINE _job_archive2db + ONLINE _job_db2indexSuffix2": "KO + KO", + "Instance ONLINE ONLINE _job_archive2db + ONLINE _job_db2archive": "KO + KO", + "Instance ONLINE ONLINE _job_archive2db + ONLINE _job_archive2db": "KO + KO"} diff --git a/dirsrvtests/tests/data/openldap_2_389/1/example_com.slapcat.ldif b/dirsrvtests/tests/data/openldap_2_389/1/example_com.slapcat.ldif new file mode 100644 index 0000000..c7a5089 --- /dev/null +++ b/dirsrvtests/tests/data/openldap_2_389/1/example_com.slapcat.ldif @@ -0,0 +1,241 @@ +dn: dc=example,dc=com +objectClass: dcObject +objectClass: organization +o: Example Company +dc: example +structuralObjectClass: organization +entryUUID: 67c6a9b8-eafa-1039-882d-152569770969 +creatorsName: cn=Manager,dc=example,dc=com +createTimestamp: 20200224023755Z +entryCSN: 20200224023755.130368Z#000000#000#000000 +modifiersName: cn=Manager,dc=example,dc=com +modifyTimestamp: 20200224023755Z + +dn: cn=Manager,dc=example,dc=com +objectClass: organizationalRole +cn: Manager +structuralObjectClass: organizationalRole +entryUUID: 67c8c932-eafa-1039-882e-152569770969 +creatorsName: cn=Manager,dc=example,dc=com +createTimestamp: 20200224023755Z +entryCSN: 20200224023755.144283Z#000000#000#000000 +modifiersName: cn=Manager,dc=example,dc=com +modifyTimestamp: 20200224023755Z + +dn: ou=People,dc=example,dc=com +objectClass: organizationalUnit +ou: People +structuralObjectClass: organizationalUnit +entryUUID: 67ca92a8-eafa-1039-882f-152569770969 +creatorsName: cn=Manager,dc=example,dc=com +createTimestamp: 20200224023755Z +entryCSN: 20200224023755.155994Z#000000#000#000000 +modifiersName: cn=Manager,dc=example,dc=com +modifyTimestamp: 20200224023755Z + +dn: ou=Groups,dc=example,dc=com +objectClass: organizationalUnit +ou: Groups +structuralObjectClass: organizationalUnit +entryUUID: 67cc2212-eafa-1039-8830-152569770969 +creatorsName: cn=Manager,dc=example,dc=com +createTimestamp: 20200224023755Z +entryCSN: 20200224023755.166219Z#000000#000#000000 +modifiersName: cn=Manager,dc=example,dc=com +modifyTimestamp: 20200224023755Z + +dn: cn=user0,ou=People,dc=example,dc=com +objectClass: account +objectClass: posixAccount +cn: user0 +uid: user0 +uidNumber: 80000 +gidNumber: 80000 +homeDirectory: /home/user0 +structuralObjectClass: account +entryUUID: 67cdfcea-eafa-1039-8831-152569770969 +creatorsName: cn=Manager,dc=example,dc=com +createTimestamp: 20200224023755Z +entryCSN: 20200224023755.178373Z#000000#000#000000 +modifyTimestamp: 20200224023755Z +memberOf: cn=group0,ou=groups,dc=example,dc=com +memberOf: cn=group1,ou=groups,dc=example,dc=com +memberOf: cn=group2,ou=groups,dc=example,dc=com +memberOf: cn=group3,ou=groups,dc=example,dc=com +memberOf: cn=group4,ou=groups,dc=example,dc=com +modifiersName: cn=Manager,dc=example,dc=com + +dn: cn=user1,ou=People,dc=example,dc=com +objectClass: account +objectClass: posixAccount +cn: user1 +uid: user1 +uidNumber: 80001 +gidNumber: 80001 +homeDirectory: /home/user1 +structuralObjectClass: account +entryUUID: 67d05080-eafa-1039-8832-152569770969 +creatorsName: cn=Manager,dc=example,dc=com +createTimestamp: 20200224023755Z +entryCSN: 20200224023755.193620Z#000000#000#000000 +modifyTimestamp: 20200224023755Z +memberOf: cn=group0,ou=groups,dc=example,dc=com +memberOf: cn=group1,ou=groups,dc=example,dc=com +memberOf: cn=group2,ou=groups,dc=example,dc=com +memberOf: cn=group3,ou=groups,dc=example,dc=com +memberOf: cn=group4,ou=groups,dc=example,dc=com +modifiersName: cn=Manager,dc=example,dc=com + +dn: cn=user2,ou=People,dc=example,dc=com +objectClass: account +objectClass: posixAccount +cn: user2 +uid: user2 +uidNumber: 80002 +gidNumber: 80002 +homeDirectory: /home/user2 +structuralObjectClass: account +entryUUID: 67d26172-eafa-1039-8833-152569770969 +creatorsName: cn=Manager,dc=example,dc=com +createTimestamp: 20200224023755Z +entryCSN: 20200224023755.207161Z#000000#000#000000 +modifyTimestamp: 20200224023755Z +memberOf: cn=group0,ou=groups,dc=example,dc=com +memberOf: cn=group1,ou=groups,dc=example,dc=com +memberOf: cn=group2,ou=groups,dc=example,dc=com +memberOf: cn=group3,ou=groups,dc=example,dc=com +memberOf: cn=group4,ou=groups,dc=example,dc=com +modifiersName: cn=Manager,dc=example,dc=com + +dn: cn=user3,ou=People,dc=example,dc=com +objectClass: account +objectClass: posixAccount +cn: user3 +uid: user3 +uidNumber: 80003 +gidNumber: 80003 +homeDirectory: /home/user3 +structuralObjectClass: account +entryUUID: 67d460bc-eafa-1039-8834-152569770969 +creatorsName: cn=Manager,dc=example,dc=com +createTimestamp: 20200224023755Z +entryCSN: 20200224023755.220249Z#000000#000#000000 +modifyTimestamp: 20200224023755Z +memberOf: cn=group0,ou=groups,dc=example,dc=com +memberOf: cn=group1,ou=groups,dc=example,dc=com +memberOf: cn=group2,ou=groups,dc=example,dc=com +memberOf: cn=group3,ou=groups,dc=example,dc=com +memberOf: cn=group4,ou=groups,dc=example,dc=com +modifiersName: cn=Manager,dc=example,dc=com + +dn: cn=user4,ou=People,dc=example,dc=com +objectClass: account +objectClass: posixAccount +cn: user4 +uid: user4 +uidNumber: 80004 +gidNumber: 80004 +homeDirectory: /home/user4 +structuralObjectClass: account +entryUUID: 67d5d2a8-eafa-1039-8835-152569770969 +creatorsName: cn=Manager,dc=example,dc=com +createTimestamp: 20200224023755Z +entryCSN: 20200224023755.229723Z#000000#000#000000 +modifyTimestamp: 20200224023755Z +memberOf: cn=group0,ou=groups,dc=example,dc=com +memberOf: cn=group1,ou=groups,dc=example,dc=com +memberOf: cn=group2,ou=groups,dc=example,dc=com +memberOf: cn=group3,ou=groups,dc=example,dc=com +memberOf: cn=group4,ou=groups,dc=example,dc=com +modifiersName: cn=Manager,dc=example,dc=com + +dn: cn=group0,ou=Groups,dc=example,dc=com +objectClass: groupOfNames +objectClass: posixGroup +cn: group0 +member: cn=user0,ou=people,dc=example,dc=com +member: cn=user1,ou=people,dc=example,dc=com +member: cn=user2,ou=people,dc=example,dc=com +member: cn=user3,ou=people,dc=example,dc=com +member: cn=user4,ou=people,dc=example,dc=com +gidNumber: 90000 +structuralObjectClass: groupOfNames +entryUUID: 67d6f796-eafa-1039-8836-152569770969 +creatorsName: cn=Manager,dc=example,dc=com +createTimestamp: 20200224023755Z +entryCSN: 20200224023755.237225Z#000000#000#000000 +modifiersName: cn=Manager,dc=example,dc=com +modifyTimestamp: 20200224023755Z + +dn: cn=group1,ou=Groups,dc=example,dc=com +objectClass: groupOfNames +objectClass: posixGroup +cn: group1 +member: cn=user0,ou=people,dc=example,dc=com +member: cn=user1,ou=people,dc=example,dc=com +member: cn=user2,ou=people,dc=example,dc=com +member: cn=user3,ou=people,dc=example,dc=com +member: cn=user4,ou=people,dc=example,dc=com +gidNumber: 90001 +structuralObjectClass: groupOfNames +entryUUID: 67da9d2e-eafa-1039-8837-152569770969 +creatorsName: cn=Manager,dc=example,dc=com +createTimestamp: 20200224023755Z +entryCSN: 20200224023755.261127Z#000000#000#000000 +modifiersName: cn=Manager,dc=example,dc=com +modifyTimestamp: 20200224023755Z + +dn: cn=group2,ou=Groups,dc=example,dc=com +objectClass: groupOfNames +objectClass: posixGroup +cn: group2 +member: cn=user0,ou=people,dc=example,dc=com +member: cn=user1,ou=people,dc=example,dc=com +member: cn=user2,ou=people,dc=example,dc=com +member: cn=user3,ou=people,dc=example,dc=com +member: cn=user4,ou=people,dc=example,dc=com +gidNumber: 90002 +structuralObjectClass: groupOfNames +entryUUID: 67de2822-eafa-1039-8838-152569770969 +creatorsName: cn=Manager,dc=example,dc=com +createTimestamp: 20200224023755Z +entryCSN: 20200224023755.284346Z#000000#000#000000 +modifiersName: cn=Manager,dc=example,dc=com +modifyTimestamp: 20200224023755Z + +dn: cn=group3,ou=Groups,dc=example,dc=com +objectClass: groupOfNames +objectClass: posixGroup +cn: group3 +member: cn=user0,ou=people,dc=example,dc=com +member: cn=user1,ou=people,dc=example,dc=com +member: cn=user2,ou=people,dc=example,dc=com +member: cn=user3,ou=people,dc=example,dc=com +member: cn=user4,ou=people,dc=example,dc=com +gidNumber: 90003 +structuralObjectClass: groupOfNames +entryUUID: 67e1a6aa-eafa-1039-8839-152569770969 +creatorsName: cn=Manager,dc=example,dc=com +createTimestamp: 20200224023755Z +entryCSN: 20200224023755.307244Z#000000#000#000000 +modifiersName: cn=Manager,dc=example,dc=com +modifyTimestamp: 20200224023755Z + +dn: cn=group4,ou=Groups,dc=example,dc=com +objectClass: groupOfNames +objectClass: posixGroup +cn: group4 +member: cn=user0,ou=people,dc=example,dc=com +member: cn=user1,ou=people,dc=example,dc=com +member: cn=user2,ou=people,dc=example,dc=com +member: cn=user3,ou=people,dc=example,dc=com +member: cn=user4,ou=people,dc=example,dc=com +gidNumber: 90004 +structuralObjectClass: groupOfNames +entryUUID: 67e5a50c-eafa-1039-883a-152569770969 +creatorsName: cn=Manager,dc=example,dc=com +createTimestamp: 20200224023755Z +entryCSN: 20200224023755.333416Z#000000#000#000000 +modifiersName: cn=Manager,dc=example,dc=com +modifyTimestamp: 20200224023755Z + diff --git a/dirsrvtests/tests/data/openldap_2_389/1/example_net.slapcat.ldif b/dirsrvtests/tests/data/openldap_2_389/1/example_net.slapcat.ldif new file mode 100644 index 0000000..e7a34a6 --- /dev/null +++ b/dirsrvtests/tests/data/openldap_2_389/1/example_net.slapcat.ldif @@ -0,0 +1,241 @@ +dn: dc=example,dc=net +objectClass: dcObject +objectClass: organization +o: Example Company +dc: example +structuralObjectClass: organization +entryUUID: 5df457fe-eafb-1039-8857-152569770969 +creatorsName: cn=Manager,dc=example,dc=net +createTimestamp: 20200224024448Z +entryCSN: 20200224024448.149265Z#000000#000#000000 +modifiersName: cn=Manager,dc=example,dc=net +modifyTimestamp: 20200224024448Z + +dn: cn=Manager,dc=example,dc=net +objectClass: organizationalRole +cn: Manager +structuralObjectClass: organizationalRole +entryUUID: 5df55cf8-eafb-1039-8858-152569770969 +creatorsName: cn=Manager,dc=example,dc=net +createTimestamp: 20200224024448Z +entryCSN: 20200224024448.155945Z#000000#000#000000 +modifiersName: cn=Manager,dc=example,dc=net +modifyTimestamp: 20200224024448Z + +dn: ou=People,dc=example,dc=net +objectClass: organizationalUnit +ou: People +structuralObjectClass: organizationalUnit +entryUUID: 5df60342-eafb-1039-8859-152569770969 +creatorsName: cn=Manager,dc=example,dc=net +createTimestamp: 20200224024448Z +entryCSN: 20200224024448.160202Z#000000#000#000000 +modifiersName: cn=Manager,dc=example,dc=net +modifyTimestamp: 20200224024448Z + +dn: ou=Groups,dc=example,dc=net +objectClass: organizationalUnit +ou: Groups +structuralObjectClass: organizationalUnit +entryUUID: 5df6a57c-eafb-1039-885a-152569770969 +creatorsName: cn=Manager,dc=example,dc=net +createTimestamp: 20200224024448Z +entryCSN: 20200224024448.164355Z#000000#000#000000 +modifiersName: cn=Manager,dc=example,dc=net +modifyTimestamp: 20200224024448Z + +dn: cn=user0,ou=People,dc=example,dc=net +objectClass: account +objectClass: posixAccount +cn: user0 +uid: user0 +uidNumber: 80000 +gidNumber: 80000 +homeDirectory: /home/user0 +structuralObjectClass: account +entryUUID: 5df7521a-eafb-1039-885b-152569770969 +creatorsName: cn=Manager,dc=example,dc=net +createTimestamp: 20200224024448Z +entryCSN: 20200224024448.168774Z#000000#000#000000 +modifyTimestamp: 20200224024448Z +memberOf: cn=group0,ou=groups,dc=example,dc=net +memberOf: cn=group1,ou=groups,dc=example,dc=net +memberOf: cn=group2,ou=groups,dc=example,dc=net +memberOf: cn=group3,ou=groups,dc=example,dc=net +memberOf: cn=group4,ou=groups,dc=example,dc=net +modifiersName: cn=Manager,dc=example,dc=net + +dn: cn=user1,ou=People,dc=example,dc=net +objectClass: account +objectClass: posixAccount +cn: user1 +uid: user1 +uidNumber: 80001 +gidNumber: 80001 +homeDirectory: /home/user1 +structuralObjectClass: account +entryUUID: 5df80f66-eafb-1039-885c-152569770969 +creatorsName: cn=Manager,dc=example,dc=net +createTimestamp: 20200224024448Z +entryCSN: 20200224024448.173619Z#000000#000#000000 +modifyTimestamp: 20200224024448Z +memberOf: cn=group0,ou=groups,dc=example,dc=net +memberOf: cn=group1,ou=groups,dc=example,dc=net +memberOf: cn=group2,ou=groups,dc=example,dc=net +memberOf: cn=group3,ou=groups,dc=example,dc=net +memberOf: cn=group4,ou=groups,dc=example,dc=net +modifiersName: cn=Manager,dc=example,dc=net + +dn: cn=user2,ou=People,dc=example,dc=net +objectClass: account +objectClass: posixAccount +cn: user2 +uid: user2 +uidNumber: 80002 +gidNumber: 80002 +homeDirectory: /home/user2 +structuralObjectClass: account +entryUUID: 5df8e710-eafb-1039-885d-152569770969 +creatorsName: cn=Manager,dc=example,dc=net +createTimestamp: 20200224024448Z +entryCSN: 20200224024448.179140Z#000000#000#000000 +modifyTimestamp: 20200224024448Z +memberOf: cn=group0,ou=groups,dc=example,dc=net +memberOf: cn=group1,ou=groups,dc=example,dc=net +memberOf: cn=group2,ou=groups,dc=example,dc=net +memberOf: cn=group3,ou=groups,dc=example,dc=net +memberOf: cn=group4,ou=groups,dc=example,dc=net +modifiersName: cn=Manager,dc=example,dc=net + +dn: cn=user3,ou=People,dc=example,dc=net +objectClass: account +objectClass: posixAccount +cn: user3 +uid: user3 +uidNumber: 80003 +gidNumber: 80003 +homeDirectory: /home/user3 +structuralObjectClass: account +entryUUID: 5df9c356-eafb-1039-885e-152569770969 +creatorsName: cn=Manager,dc=example,dc=net +createTimestamp: 20200224024448Z +entryCSN: 20200224024448.184778Z#000000#000#000000 +modifyTimestamp: 20200224024448Z +memberOf: cn=group0,ou=groups,dc=example,dc=net +memberOf: cn=group1,ou=groups,dc=example,dc=net +memberOf: cn=group2,ou=groups,dc=example,dc=net +memberOf: cn=group3,ou=groups,dc=example,dc=net +memberOf: cn=group4,ou=groups,dc=example,dc=net +modifiersName: cn=Manager,dc=example,dc=net + +dn: cn=user4,ou=People,dc=example,dc=net +objectClass: account +objectClass: posixAccount +cn: user4 +uid: user4 +uidNumber: 80004 +gidNumber: 80004 +homeDirectory: /home/user4 +structuralObjectClass: account +entryUUID: 5dfaecc2-eafb-1039-885f-152569770969 +creatorsName: cn=Manager,dc=example,dc=net +createTimestamp: 20200224024448Z +entryCSN: 20200224024448.192376Z#000000#000#000000 +modifyTimestamp: 20200224024448Z +memberOf: cn=group0,ou=groups,dc=example,dc=net +memberOf: cn=group1,ou=groups,dc=example,dc=net +memberOf: cn=group2,ou=groups,dc=example,dc=net +memberOf: cn=group3,ou=groups,dc=example,dc=net +memberOf: cn=group4,ou=groups,dc=example,dc=net +modifiersName: cn=Manager,dc=example,dc=net + +dn: cn=group0,ou=Groups,dc=example,dc=net +objectClass: groupOfNames +objectClass: posixGroup +cn: group0 +member: cn=user0,ou=people,dc=example,dc=net +member: cn=user1,ou=people,dc=example,dc=net +member: cn=user2,ou=people,dc=example,dc=net +member: cn=user3,ou=people,dc=example,dc=net +member: cn=user4,ou=people,dc=example,dc=net +gidNumber: 90000 +structuralObjectClass: groupOfNames +entryUUID: 5dfc02c4-eafb-1039-8860-152569770969 +creatorsName: cn=Manager,dc=example,dc=net +createTimestamp: 20200224024448Z +entryCSN: 20200224024448.199510Z#000000#000#000000 +modifiersName: cn=Manager,dc=example,dc=net +modifyTimestamp: 20200224024448Z + +dn: cn=group1,ou=Groups,dc=example,dc=net +objectClass: groupOfNames +objectClass: posixGroup +cn: group1 +member: cn=user0,ou=people,dc=example,dc=net +member: cn=user1,ou=people,dc=example,dc=net +member: cn=user2,ou=people,dc=example,dc=net +member: cn=user3,ou=people,dc=example,dc=net +member: cn=user4,ou=people,dc=example,dc=net +gidNumber: 90001 +structuralObjectClass: groupOfNames +entryUUID: 5e01038c-eafb-1039-8861-152569770969 +creatorsName: cn=Manager,dc=example,dc=net +createTimestamp: 20200224024448Z +entryCSN: 20200224024448.232297Z#000000#000#000000 +modifiersName: cn=Manager,dc=example,dc=net +modifyTimestamp: 20200224024448Z + +dn: cn=group2,ou=Groups,dc=example,dc=net +objectClass: groupOfNames +objectClass: posixGroup +cn: group2 +member: cn=user0,ou=people,dc=example,dc=net +member: cn=user1,ou=people,dc=example,dc=net +member: cn=user2,ou=people,dc=example,dc=net +member: cn=user3,ou=people,dc=example,dc=net +member: cn=user4,ou=people,dc=example,dc=net +gidNumber: 90002 +structuralObjectClass: groupOfNames +entryUUID: 5e06b610-eafb-1039-8862-152569770969 +creatorsName: cn=Manager,dc=example,dc=net +createTimestamp: 20200224024448Z +entryCSN: 20200224024448.269635Z#000000#000#000000 +modifiersName: cn=Manager,dc=example,dc=net +modifyTimestamp: 20200224024448Z + +dn: cn=group3,ou=Groups,dc=example,dc=net +objectClass: groupOfNames +objectClass: posixGroup +cn: group3 +member: cn=user0,ou=people,dc=example,dc=net +member: cn=user1,ou=people,dc=example,dc=net +member: cn=user2,ou=people,dc=example,dc=net +member: cn=user3,ou=people,dc=example,dc=net +member: cn=user4,ou=people,dc=example,dc=net +gidNumber: 90003 +structuralObjectClass: groupOfNames +entryUUID: 5e0aec76-eafb-1039-8863-152569770969 +creatorsName: cn=Manager,dc=example,dc=net +createTimestamp: 20200224024448Z +entryCSN: 20200224024448.297242Z#000000#000#000000 +modifiersName: cn=Manager,dc=example,dc=net +modifyTimestamp: 20200224024448Z + +dn: cn=group4,ou=Groups,dc=example,dc=net +objectClass: groupOfNames +objectClass: posixGroup +cn: group4 +member: cn=user0,ou=people,dc=example,dc=net +member: cn=user1,ou=people,dc=example,dc=net +member: cn=user2,ou=people,dc=example,dc=net +member: cn=user3,ou=people,dc=example,dc=net +member: cn=user4,ou=people,dc=example,dc=net +gidNumber: 90004 +structuralObjectClass: groupOfNames +entryUUID: 5e0f0900-eafb-1039-8864-152569770969 +creatorsName: cn=Manager,dc=example,dc=net +createTimestamp: 20200224024448Z +entryCSN: 20200224024448.324187Z#000000#000#000000 +modifiersName: cn=Manager,dc=example,dc=net +modifyTimestamp: 20200224024448Z + diff --git a/dirsrvtests/tests/data/openldap_2_389/1/setup/example_com.ldif b/dirsrvtests/tests/data/openldap_2_389/1/setup/example_com.ldif new file mode 100644 index 0000000..4e6330c --- /dev/null +++ b/dirsrvtests/tests/data/openldap_2_389/1/setup/example_com.ldif @@ -0,0 +1,117 @@ +dn: dc=example,dc=com +objectclass: dcObject +objectclass: organization +o: Example Company +dc: example + +dn: cn=Manager,dc=example,dc=com +objectclass: organizationalRole +cn: Manager + +dn: ou=People,dc=example,dc=com +objectClass: organizationalUnit +ou: People + +dn: ou=Groups,dc=example,dc=com +objectClass: organizationalUnit +ou: Groups + +dn: cn=user0,ou=people,dc=example,dc=com +objectClass: Account +objectClass: posixAccount +cn: user0 +uid: user0 +uidnumber: 80000 +gidnumber: 80000 +homeDirectory: /home/user0 + +dn: cn=user1,ou=people,dc=example,dc=com +objectClass: Account +objectClass: posixAccount +cn: user1 +uid: user1 +uidnumber: 80001 +gidnumber: 80001 +homeDirectory: /home/user1 + +dn: cn=user2,ou=people,dc=example,dc=com +objectClass: Account +objectClass: posixAccount +cn: user2 +uid: user2 +uidnumber: 80002 +gidnumber: 80002 +homeDirectory: /home/user2 + +dn: cn=user3,ou=people,dc=example,dc=com +objectClass: Account +objectClass: posixAccount +cn: user3 +uid: user3 +uidnumber: 80003 +gidnumber: 80003 +homeDirectory: /home/user3 + +dn: cn=user4,ou=people,dc=example,dc=com +objectClass: Account +objectClass: posixAccount +cn: user4 +uid: user4 +uidnumber: 80004 +gidnumber: 80004 +homeDirectory: /home/user4 + +dn: cn=group0,ou=groups,dc=example,dc=com +objectClass: groupOfNames +objectClass: posixGroup +cn: group0 +member: cn=user0,ou=people,dc=example,dc=com +member: cn=user1,ou=people,dc=example,dc=com +member: cn=user2,ou=people,dc=example,dc=com +member: cn=user3,ou=people,dc=example,dc=com +member: cn=user4,ou=people,dc=example,dc=com +gidNumber: 90000 + +dn: cn=group1,ou=groups,dc=example,dc=com +objectClass: groupOfNames +objectClass: posixGroup +cn: group1 +member: cn=user0,ou=people,dc=example,dc=com +member: cn=user1,ou=people,dc=example,dc=com +member: cn=user2,ou=people,dc=example,dc=com +member: cn=user3,ou=people,dc=example,dc=com +member: cn=user4,ou=people,dc=example,dc=com +gidNumber: 90001 + +dn: cn=group2,ou=groups,dc=example,dc=com +objectClass: groupOfNames +objectClass: posixGroup +cn: group2 +member: cn=user0,ou=people,dc=example,dc=com +member: cn=user1,ou=people,dc=example,dc=com +member: cn=user2,ou=people,dc=example,dc=com +member: cn=user3,ou=people,dc=example,dc=com +member: cn=user4,ou=people,dc=example,dc=com +gidNumber: 90002 + +dn: cn=group3,ou=groups,dc=example,dc=com +objectClass: groupOfNames +objectClass: posixGroup +cn: group3 +member: cn=user0,ou=people,dc=example,dc=com +member: cn=user1,ou=people,dc=example,dc=com +member: cn=user2,ou=people,dc=example,dc=com +member: cn=user3,ou=people,dc=example,dc=com +member: cn=user4,ou=people,dc=example,dc=com +gidNumber: 90003 + +dn: cn=group4,ou=groups,dc=example,dc=com +objectClass: groupOfNames +objectClass: posixGroup +cn: group4 +member: cn=user0,ou=people,dc=example,dc=com +member: cn=user1,ou=people,dc=example,dc=com +member: cn=user2,ou=people,dc=example,dc=com +member: cn=user3,ou=people,dc=example,dc=com +member: cn=user4,ou=people,dc=example,dc=com +gidNumber: 90004 diff --git a/dirsrvtests/tests/data/openldap_2_389/1/setup/example_net.ldif b/dirsrvtests/tests/data/openldap_2_389/1/setup/example_net.ldif new file mode 100644 index 0000000..a4b70cc --- /dev/null +++ b/dirsrvtests/tests/data/openldap_2_389/1/setup/example_net.ldif @@ -0,0 +1,117 @@ +dn: dc=example,dc=net +objectclass: dcObject +objectclass: organization +o: Example Company +dc: example + +dn: cn=Manager,dc=example,dc=net +objectclass: organizationalRole +cn: Manager + +dn: ou=People,dc=example,dc=net +objectClass: organizationalUnit +ou: People + +dn: ou=Groups,dc=example,dc=net +objectClass: organizationalUnit +ou: Groups + +dn: cn=user0,ou=people,dc=example,dc=net +objectClass: Account +objectClass: posixAccount +cn: user0 +uid: user0 +uidnumber: 80000 +gidnumber: 80000 +homeDirectory: /home/user0 + +dn: cn=user1,ou=people,dc=example,dc=net +objectClass: Account +objectClass: posixAccount +cn: user1 +uid: user1 +uidnumber: 80001 +gidnumber: 80001 +homeDirectory: /home/user1 + +dn: cn=user2,ou=people,dc=example,dc=net +objectClass: Account +objectClass: posixAccount +cn: user2 +uid: user2 +uidnumber: 80002 +gidnumber: 80002 +homeDirectory: /home/user2 + +dn: cn=user3,ou=people,dc=example,dc=net +objectClass: Account +objectClass: posixAccount +cn: user3 +uid: user3 +uidnumber: 80003 +gidnumber: 80003 +homeDirectory: /home/user3 + +dn: cn=user4,ou=people,dc=example,dc=net +objectClass: Account +objectClass: posixAccount +cn: user4 +uid: user4 +uidnumber: 80004 +gidnumber: 80004 +homeDirectory: /home/user4 + +dn: cn=group0,ou=groups,dc=example,dc=net +objectClass: groupOfNames +objectClass: posixGroup +cn: group0 +member: cn=user0,ou=people,dc=example,dc=net +member: cn=user1,ou=people,dc=example,dc=net +member: cn=user2,ou=people,dc=example,dc=net +member: cn=user3,ou=people,dc=example,dc=net +member: cn=user4,ou=people,dc=example,dc=net +gidNumber: 90000 + +dn: cn=group1,ou=groups,dc=example,dc=net +objectClass: groupOfNames +objectClass: posixGroup +cn: group1 +member: cn=user0,ou=people,dc=example,dc=net +member: cn=user1,ou=people,dc=example,dc=net +member: cn=user2,ou=people,dc=example,dc=net +member: cn=user3,ou=people,dc=example,dc=net +member: cn=user4,ou=people,dc=example,dc=net +gidNumber: 90001 + +dn: cn=group2,ou=groups,dc=example,dc=net +objectClass: groupOfNames +objectClass: posixGroup +cn: group2 +member: cn=user0,ou=people,dc=example,dc=net +member: cn=user1,ou=people,dc=example,dc=net +member: cn=user2,ou=people,dc=example,dc=net +member: cn=user3,ou=people,dc=example,dc=net +member: cn=user4,ou=people,dc=example,dc=net +gidNumber: 90002 + +dn: cn=group3,ou=groups,dc=example,dc=net +objectClass: groupOfNames +objectClass: posixGroup +cn: group3 +member: cn=user0,ou=people,dc=example,dc=net +member: cn=user1,ou=people,dc=example,dc=net +member: cn=user2,ou=people,dc=example,dc=net +member: cn=user3,ou=people,dc=example,dc=net +member: cn=user4,ou=people,dc=example,dc=net +gidNumber: 90003 + +dn: cn=group4,ou=groups,dc=example,dc=net +objectClass: groupOfNames +objectClass: posixGroup +cn: group4 +member: cn=user0,ou=people,dc=example,dc=net +member: cn=user1,ou=people,dc=example,dc=net +member: cn=user2,ou=people,dc=example,dc=net +member: cn=user3,ou=people,dc=example,dc=net +member: cn=user4,ou=people,dc=example,dc=net +gidNumber: 90004 diff --git a/dirsrvtests/tests/data/openldap_2_389/1/setup/slapd.ldif b/dirsrvtests/tests/data/openldap_2_389/1/setup/slapd.ldif new file mode 100644 index 0000000..45a7b08 --- /dev/null +++ b/dirsrvtests/tests/data/openldap_2_389/1/setup/slapd.ldif @@ -0,0 +1,146 @@ +# +# See slapd-config(5) for details on configuration options. +# This file should NOT be world readable. +# +dn: cn=config +objectClass: olcGlobal +cn: config +# +# +# Define global ACLs to disable default read access. +# +olcArgsFile: /var/run/slapd.args +olcPidFile: /var/run/slapd.pid +# +# Do not enable referrals until AFTER you have a working directory +# service AND an understanding of referrals. +#olcReferral: ldap://root.openldap.org +# +# Sample security restrictions +# Require integrity protection (prevent hijacking) +# Require 112-bit (3DES or better) encryption for updates +# Require 64-bit encryption for simple bind +#olcSecurity: ssf=1 update_ssf=112 simple_bind=64 + + +# +# Load dynamic backend modules: +# +dn: cn=module,cn=config +objectClass: olcModuleList +cn: module +#olcModulepath: %MODULEDIR% +olcModuleload: back_mdb.la +olcModuleload: memberof.la +olcModuleload: refint.la +olcModuleload: unique.la +#olcModuleload: back_ldap.la +#olcModuleload: back_passwd.la +#olcModuleload: back_shell.la + + +dn: cn=schema,cn=config +objectClass: olcSchemaConfig +cn: schema + +include: file:///etc/openldap/schema/core.ldif +include: file:///etc/openldap/schema/cosine.ldif +include: file:///etc/openldap/schema/inetorgperson.ldif +include: file:///etc/openldap/schema/rfc2307bis.ldif +include: file:///etc/openldap/schema/yast.ldif + + +# Frontend settings +# +dn: olcDatabase=frontend,cn=config +objectClass: olcDatabaseConfig +objectClass: olcFrontendConfig +olcDatabase: frontend +# +# Sample global access control policy: +# Root DSE: allow anyone to read it +# Subschema (sub)entry DSE: allow anyone to read it +# Other DSEs: +# Allow self write access +# Allow authenticated users read access +# Allow anonymous users to authenticate +# +olcAccess: to dn.base="" by * read +olcAccess: to dn.base="cn=Subschema" by * read +#olcAccess: to * +# by self write +# by users read +# by anonymous auth +# +# if no access controls are present, the default policy +# allows anyone and everyone to read anything but restricts +# updates to rootdn. (e.g., "access to * by * read") +# +# rootdn can always read and write EVERYTHING! +# + + +####################################################################### +# LMDB database definitions +####################################################################### +# +dn: olcDatabase={1}mdb,cn=config +objectClass: olcDatabaseConfig +objectClass: olcMdbConfig +olcDatabase: mdb +olcSuffix: dc=example,dc=com +olcRootDN: cn=Manager,dc=example,dc=com +# Cleartext passwords, especially for the rootdn, should +# be avoided. See slappasswd(8) and slapd-config(5) for details. +# Use of strong authentication encouraged. +olcRootPW: secret +# The database directory MUST exist prior to running slapd AND +# should only be accessible by the slapd and slap tools. +# Mode 700 recommended. +olcDbDirectory: /var/lib/ldap/example_com +# Indices to maintain +olcDbIndex: objectClass eq + +dn: olcOverlay=memberof,olcDatabase={1}mdb,cn=config +objectClass: olcOverlayConfig +objectClass: olcMemberOf +olcOverlay: memberof +olcMemberOfRefint: TRUE + +dn: olcOverlay=refint,olcDatabase={1}mdb,cn=config +objectClass: olcOverlayConfig +objectClass: olcRefintConfig +olcOverlay: refint +olcRefintAttribute: member +olcRefintAttribute: memberOf + +dn: olcOverlay=unique,olcDatabase={1}mdb,cn=config +objectClass: olcOverlayConfig +objectClass: olcUniqueConfig +olcOverlay: unique +olcUniqueURI: ldap:///?mail?sub +olcUniqueURI: ldap:///?uid?sub + +dn: olcDatabase={2}mdb,cn=config +objectClass: olcDatabaseConfig +objectClass: olcMdbConfig +olcDatabase: mdb +olcSuffix: dc=example,dc=net +olcRootDN: cn=Manager,dc=example,dc=net +olcRootPW: secret +olcDbDirectory: /var/lib/ldap/example_net +olcDbIndex: objectClass eq + +dn: olcOverlay=memberof,olcDatabase={2}mdb,cn=config +objectClass: olcOverlayConfig +objectClass: olcMemberOf +olcOverlay: memberof +olcMemberOfRefint: TRUE + +dn: olcOverlay=unique,olcDatabase={2}mdb,cn=config +objectClass: olcOverlayConfig +objectClass: olcUniqueConfig +olcOverlay: unique +olcUniqueURI: ldap:///?mail?sub +olcUniqueURI: ldap:///?uid?sub + diff --git a/dirsrvtests/tests/data/openldap_2_389/1/slapd.d/cn=config.ldif b/dirsrvtests/tests/data/openldap_2_389/1/slapd.d/cn=config.ldif new file mode 100644 index 0000000..1ff7282 --- /dev/null +++ b/dirsrvtests/tests/data/openldap_2_389/1/slapd.d/cn=config.ldif @@ -0,0 +1,14 @@ +# AUTO-GENERATED FILE - DO NOT EDIT!! Use ldapmodify. +# CRC32 6905879f +dn: cn=config +objectClass: olcGlobal +cn: config +olcArgsFile: /var/run/slapd.args +olcPidFile: /var/run/slapd.pid +structuralObjectClass: olcGlobal +entryUUID: 4019c5a8-eaf5-1039-865e-dbfbf2f5e6dd +creatorsName: cn=config +createTimestamp: 20200224020101Z +entryCSN: 20200224020101.082506Z#000000#000#000000 +modifiersName: cn=config +modifyTimestamp: 20200224020101Z diff --git a/dirsrvtests/tests/data/openldap_2_389/1/slapd.d/cn=config/cn=module{0}.ldif b/dirsrvtests/tests/data/openldap_2_389/1/slapd.d/cn=config/cn=module{0}.ldif new file mode 100644 index 0000000..0e635ff --- /dev/null +++ b/dirsrvtests/tests/data/openldap_2_389/1/slapd.d/cn=config/cn=module{0}.ldif @@ -0,0 +1,16 @@ +# AUTO-GENERATED FILE - DO NOT EDIT!! Use ldapmodify. +# CRC32 9b38b059 +dn: cn=module{0} +objectClass: olcModuleList +cn: module{0} +olcModuleLoad: {0}back_mdb.la +olcModuleLoad: {1}memberof.la +olcModuleLoad: {2}refint.la +olcModuleLoad: {3}unique.la +structuralObjectClass: olcModuleList +entryUUID: 4019cc88-eaf5-1039-865f-dbfbf2f5e6dd +creatorsName: cn=config +createTimestamp: 20200224020101Z +entryCSN: 20200224020101.082706Z#000000#000#000000 +modifiersName: cn=config +modifyTimestamp: 20200224020101Z diff --git a/dirsrvtests/tests/data/openldap_2_389/1/slapd.d/cn=config/cn=schema.ldif b/dirsrvtests/tests/data/openldap_2_389/1/slapd.d/cn=config/cn=schema.ldif new file mode 100644 index 0000000..7840625 --- /dev/null +++ b/dirsrvtests/tests/data/openldap_2_389/1/slapd.d/cn=config/cn=schema.ldif @@ -0,0 +1,12 @@ +# AUTO-GENERATED FILE - DO NOT EDIT!! Use ldapmodify. +# CRC32 095fcaec +dn: cn=schema +objectClass: olcSchemaConfig +cn: schema +structuralObjectClass: olcSchemaConfig +entryUUID: 4019e6aa-eaf5-1039-8660-dbfbf2f5e6dd +creatorsName: cn=config +createTimestamp: 20200224020101Z +entryCSN: 20200224020101.083375Z#000000#000#000000 +modifiersName: cn=config +modifyTimestamp: 20200224020101Z diff --git a/dirsrvtests/tests/data/openldap_2_389/1/slapd.d/cn=config/cn=schema/cn={0}core.ldif b/dirsrvtests/tests/data/openldap_2_389/1/slapd.d/cn=config/cn=schema/cn={0}core.ldif new file mode 100644 index 0000000..abdfaa7 --- /dev/null +++ b/dirsrvtests/tests/data/openldap_2_389/1/slapd.d/cn=config/cn=schema/cn={0}core.ldif @@ -0,0 +1,249 @@ +# AUTO-GENERATED FILE - DO NOT EDIT!! Use ldapmodify. +# CRC32 31e6d4be +dn: cn={0}core +objectClass: olcSchemaConfig +cn: {0}core +olcAttributeTypes: {0}( 2.5.4.2 NAME 'knowledgeInformation' DESC 'RFC2256: k + nowledge information' EQUALITY caseIgnoreMatch SYNTAX 1.3.6.1.4.1.1466.115. + 121.1.15{32768} ) +olcAttributeTypes: {1}( 2.5.4.4 NAME ( 'sn' 'surname' ) DESC 'RFC2256: last + (family) name(s) for which the entity is known by' SUP name ) +olcAttributeTypes: {2}( 2.5.4.5 NAME 'serialNumber' DESC 'RFC2256: serial nu + mber of the entity' EQUALITY caseIgnoreMatch SUBSTR caseIgnoreSubstringsMat + ch SYNTAX 1.3.6.1.4.1.1466.115.121.1.44{64} ) +olcAttributeTypes: {3}( 2.5.4.6 NAME ( 'c' 'countryName' ) DESC 'RFC4519: tw + o-letter ISO-3166 country code' SUP name SYNTAX 1.3.6.1.4.1.1466.115.121.1. + 11 SINGLE-VALUE ) +olcAttributeTypes: {4}( 2.5.4.7 NAME ( 'l' 'localityName' ) DESC 'RFC2256: l + ocality which this object resides in' SUP name ) +olcAttributeTypes: {5}( 2.5.4.8 NAME ( 'st' 'stateOrProvinceName' ) DESC 'RF + C2256: state or province which this object resides in' SUP name ) +olcAttributeTypes: {6}( 2.5.4.9 NAME ( 'street' 'streetAddress' ) DESC 'RFC2 + 256: street address of this object' EQUALITY caseIgnoreMatch SUBSTR caseIgn + oreSubstringsMatch SYNTAX 1.3.6.1.4.1.1466.115.121.1.15{128} ) +olcAttributeTypes: {7}( 2.5.4.10 NAME ( 'o' 'organizationName' ) DESC 'RFC22 + 56: organization this object belongs to' SUP name ) +olcAttributeTypes: {8}( 2.5.4.11 NAME ( 'ou' 'organizationalUnitName' ) DESC + 'RFC2256: organizational unit this object belongs to' SUP name ) +olcAttributeTypes: {9}( 2.5.4.12 NAME 'title' DESC 'RFC2256: title associate + d with the entity' SUP name ) +olcAttributeTypes: {10}( 2.5.4.14 NAME 'searchGuide' DESC 'RFC2256: search g + uide, deprecated by enhancedSearchGuide' SYNTAX 1.3.6.1.4.1.1466.115.121.1. + 25 ) +olcAttributeTypes: {11}( 2.5.4.15 NAME 'businessCategory' DESC 'RFC2256: bus + iness category' EQUALITY caseIgnoreMatch SUBSTR caseIgnoreSubstringsMatch S + YNTAX 1.3.6.1.4.1.1466.115.121.1.15{128} ) +olcAttributeTypes: {12}( 2.5.4.16 NAME 'postalAddress' DESC 'RFC2256: postal + address' EQUALITY caseIgnoreListMatch SUBSTR caseIgnoreListSubstringsMatch + SYNTAX 1.3.6.1.4.1.1466.115.121.1.41 ) +olcAttributeTypes: {13}( 2.5.4.17 NAME 'postalCode' DESC 'RFC2256: postal co + de' EQUALITY caseIgnoreMatch SUBSTR caseIgnoreSubstringsMatch SYNTAX 1.3.6. + 1.4.1.1466.115.121.1.15{40} ) +olcAttributeTypes: {14}( 2.5.4.18 NAME 'postOfficeBox' DESC 'RFC2256: Post O + ffice Box' EQUALITY caseIgnoreMatch SUBSTR caseIgnoreSubstringsMatch SYNTAX + 1.3.6.1.4.1.1466.115.121.1.15{40} ) +olcAttributeTypes: {15}( 2.5.4.19 NAME 'physicalDeliveryOfficeName' DESC 'RF + C2256: Physical Delivery Office Name' EQUALITY caseIgnoreMatch SUBSTR caseI + gnoreSubstringsMatch SYNTAX 1.3.6.1.4.1.1466.115.121.1.15{128} ) +olcAttributeTypes: {16}( 2.5.4.20 NAME 'telephoneNumber' DESC 'RFC2256: Tele + phone Number' EQUALITY telephoneNumberMatch SUBSTR telephoneNumberSubstring + sMatch SYNTAX 1.3.6.1.4.1.1466.115.121.1.50{32} ) +olcAttributeTypes: {17}( 2.5.4.21 NAME 'telexNumber' DESC 'RFC2256: Telex Nu + mber' SYNTAX 1.3.6.1.4.1.1466.115.121.1.52 ) +olcAttributeTypes: {18}( 2.5.4.22 NAME 'teletexTerminalIdentifier' DESC 'RFC + 2256: Teletex Terminal Identifier' SYNTAX 1.3.6.1.4.1.1466.115.121.1.51 ) +olcAttributeTypes: {19}( 2.5.4.23 NAME ( 'facsimileTelephoneNumber' 'fax' ) + DESC 'RFC2256: Facsimile (Fax) Telephone Number' SYNTAX 1.3.6.1.4.1.1466.11 + 5.121.1.22 ) +olcAttributeTypes: {20}( 2.5.4.24 NAME 'x121Address' DESC 'RFC2256: X.121 Ad + dress' EQUALITY numericStringMatch SUBSTR numericStringSubstringsMatch SYNT + AX 1.3.6.1.4.1.1466.115.121.1.36{15} ) +olcAttributeTypes: {21}( 2.5.4.25 NAME 'internationaliSDNNumber' DESC 'RFC22 + 56: international ISDN number' EQUALITY numericStringMatch SUBSTR numericSt + ringSubstringsMatch SYNTAX 1.3.6.1.4.1.1466.115.121.1.36{16} ) +olcAttributeTypes: {22}( 2.5.4.26 NAME 'registeredAddress' DESC 'RFC2256: re + gistered postal address' SUP postalAddress SYNTAX 1.3.6.1.4.1.1466.115.121. + 1.41 ) +olcAttributeTypes: {23}( 2.5.4.27 NAME 'destinationIndicator' DESC 'RFC2256: + destination indicator' EQUALITY caseIgnoreMatch SUBSTR caseIgnoreSubstring + sMatch SYNTAX 1.3.6.1.4.1.1466.115.121.1.44{128} ) +olcAttributeTypes: {24}( 2.5.4.28 NAME 'preferredDeliveryMethod' DESC 'RFC22 + 56: preferred delivery method' SYNTAX 1.3.6.1.4.1.1466.115.121.1.14 SINGLE- + VALUE ) +olcAttributeTypes: {25}( 2.5.4.29 NAME 'presentationAddress' DESC 'RFC2256: + presentation address' EQUALITY presentationAddressMatch SYNTAX 1.3.6.1.4.1. + 1466.115.121.1.43 SINGLE-VALUE ) +olcAttributeTypes: {26}( 2.5.4.30 NAME 'supportedApplicationContext' DESC 'R + FC2256: supported application context' EQUALITY objectIdentifierMatch SYNTA + X 1.3.6.1.4.1.1466.115.121.1.38 ) +olcAttributeTypes: {27}( 2.5.4.31 NAME 'member' DESC 'RFC2256: member of a g + roup' SUP distinguishedName ) +olcAttributeTypes: {28}( 2.5.4.32 NAME 'owner' DESC 'RFC2256: owner (of the + object)' SUP distinguishedName ) +olcAttributeTypes: {29}( 2.5.4.33 NAME 'roleOccupant' DESC 'RFC2256: occupan + t of role' SUP distinguishedName ) +olcAttributeTypes: {30}( 2.5.4.36 NAME 'userCertificate' DESC 'RFC2256: X.50 + 9 user certificate, use ;binary' EQUALITY certificateExactMatch SYNTAX 1.3. + 6.1.4.1.1466.115.121.1.8 ) +olcAttributeTypes: {31}( 2.5.4.37 NAME 'cACertificate' DESC 'RFC2256: X.509 + CA certificate, use ;binary' EQUALITY certificateExactMatch SYNTAX 1.3.6.1. + 4.1.1466.115.121.1.8 ) +olcAttributeTypes: {32}( 2.5.4.38 NAME 'authorityRevocationList' DESC 'RFC22 + 56: X.509 authority revocation list, use ;binary' SYNTAX 1.3.6.1.4.1.1466.1 + 15.121.1.9 ) +olcAttributeTypes: {33}( 2.5.4.39 NAME 'certificateRevocationList' DESC 'RFC + 2256: X.509 certificate revocation list, use ;binary' SYNTAX 1.3.6.1.4.1.14 + 66.115.121.1.9 ) +olcAttributeTypes: {34}( 2.5.4.40 NAME 'crossCertificatePair' DESC 'RFC2256: + X.509 cross certificate pair, use ;binary' SYNTAX 1.3.6.1.4.1.1466.115.121 + .1.10 ) +olcAttributeTypes: {35}( 2.5.4.42 NAME ( 'givenName' 'gn' ) DESC 'RFC2256: f + irst name(s) for which the entity is known by' SUP name ) +olcAttributeTypes: {36}( 2.5.4.43 NAME 'initials' DESC 'RFC2256: initials of + some or all of names, but not the surname(s).' SUP name ) +olcAttributeTypes: {37}( 2.5.4.44 NAME 'generationQualifier' DESC 'RFC2256: + name qualifier indicating a generation' SUP name ) +olcAttributeTypes: {38}( 2.5.4.45 NAME 'x500UniqueIdentifier' DESC 'RFC2256: + X.500 unique identifier' EQUALITY bitStringMatch SYNTAX 1.3.6.1.4.1.1466.1 + 15.121.1.6 ) +olcAttributeTypes: {39}( 2.5.4.46 NAME 'dnQualifier' DESC 'RFC2256: DN quali + fier' EQUALITY caseIgnoreMatch ORDERING caseIgnoreOrderingMatch SUBSTR case + IgnoreSubstringsMatch SYNTAX 1.3.6.1.4.1.1466.115.121.1.44 ) +olcAttributeTypes: {40}( 2.5.4.47 NAME 'enhancedSearchGuide' DESC 'RFC2256: + enhanced search guide' SYNTAX 1.3.6.1.4.1.1466.115.121.1.21 ) +olcAttributeTypes: {41}( 2.5.4.48 NAME 'protocolInformation' DESC 'RFC2256: + protocol information' EQUALITY protocolInformationMatch SYNTAX 1.3.6.1.4.1. + 1466.115.121.1.42 ) +olcAttributeTypes: {42}( 2.5.4.50 NAME 'uniqueMember' DESC 'RFC2256: unique + member of a group' EQUALITY uniqueMemberMatch SYNTAX 1.3.6.1.4.1.1466.115.1 + 21.1.34 ) +olcAttributeTypes: {43}( 2.5.4.51 NAME 'houseIdentifier' DESC 'RFC2256: hous + e identifier' EQUALITY caseIgnoreMatch SUBSTR caseIgnoreSubstringsMatch SYN + TAX 1.3.6.1.4.1.1466.115.121.1.15{32768} ) +olcAttributeTypes: {44}( 2.5.4.52 NAME 'supportedAlgorithms' DESC 'RFC2256: + supported algorithms' SYNTAX 1.3.6.1.4.1.1466.115.121.1.49 ) +olcAttributeTypes: {45}( 2.5.4.53 NAME 'deltaRevocationList' DESC 'RFC2256: + delta revocation list; use ;binary' SYNTAX 1.3.6.1.4.1.1466.115.121.1.9 ) +olcAttributeTypes: {46}( 2.5.4.54 NAME 'dmdName' DESC 'RFC2256: name of DMD' + SUP name ) +olcAttributeTypes: {47}( 2.5.4.65 NAME 'pseudonym' DESC 'X.520(4th): pseudon + ym for the object' SUP name ) +olcAttributeTypes: {48}( 0.9.2342.19200300.100.1.3 NAME ( 'mail' 'rfc822Mail + box' ) DESC 'RFC1274: RFC822 Mailbox' EQUALITY caseIgnoreIA5Match SUBST + R caseIgnoreIA5SubstringsMatch SYNTAX 1.3.6.1.4.1.1466.115.121.1.26{256} + ) +olcAttributeTypes: {49}( 0.9.2342.19200300.100.1.25 NAME ( 'dc' 'domainCompo + nent' ) DESC 'RFC1274/2247: domain component' EQUALITY caseIgnoreIA5Match S + UBSTR caseIgnoreIA5SubstringsMatch SYNTAX 1.3.6.1.4.1.1466.115.121.1.26 SIN + GLE-VALUE ) +olcAttributeTypes: {50}( 0.9.2342.19200300.100.1.37 NAME 'associatedDomain' + DESC 'RFC1274: domain associated with object' EQUALITY caseIgnoreIA5Match S + UBSTR caseIgnoreIA5SubstringsMatch SYNTAX 1.3.6.1.4.1.1466.115.121.1.26 ) +olcAttributeTypes: {51}( 1.2.840.113549.1.9.1 NAME ( 'email' 'emailAddress' + 'pkcs9email' ) DESC 'RFC3280: legacy attribute for email addresses in DNs' + EQUALITY caseIgnoreIA5Match SUBSTR caseIgnoreIA5SubstringsMatch SYNTAX 1.3. + 6.1.4.1.1466.115.121.1.26{128} ) +olcObjectClasses: {0}( 2.5.6.2 NAME 'country' DESC 'RFC2256: a country' SUP + top STRUCTURAL MUST c MAY ( searchGuide $ description ) ) +olcObjectClasses: {1}( 2.5.6.3 NAME 'locality' DESC 'RFC2256: a locality' SU + P top STRUCTURAL MAY ( street $ seeAlso $ searchGuide $ st $ l $ descriptio + n ) ) +olcObjectClasses: {2}( 2.5.6.4 NAME 'organization' DESC 'RFC2256: an organiz + ation' SUP top STRUCTURAL MUST o MAY ( userPassword $ searchGuide $ seeAlso + $ businessCategory $ x121Address $ registeredAddress $ destinationIndicato + r $ preferredDeliveryMethod $ telexNumber $ teletexTerminalIdentifier $ tel + ephoneNumber $ internationaliSDNNumber $ facsimileTelephoneNumber $ street + $ postOfficeBox $ postalCode $ postalAddress $ physicalDeliveryOfficeName + $ st $ l $ description ) ) +olcObjectClasses: {3}( 2.5.6.5 NAME 'organizationalUnit' DESC 'RFC2256: an o + rganizational unit' SUP top STRUCTURAL MUST ou MAY ( userPassword $ searchG + uide $ seeAlso $ businessCategory $ x121Address $ registeredAddress $ desti + nationIndicator $ preferredDeliveryMethod $ telexNumber $ teletexTerminalId + entifier $ telephoneNumber $ internationaliSDNNumber $ facsimileTelephoneNu + mber $ street $ postOfficeBox $ postalCode $ postalAddress $ physicalDelive + ryOfficeName $ st $ l $ description ) ) +olcObjectClasses: {4}( 2.5.6.6 NAME 'person' DESC 'RFC2256: a person' SUP to + p STRUCTURAL MUST ( sn $ cn ) MAY ( userPassword $ telephoneNumber $ seeAls + o $ description ) ) +olcObjectClasses: {5}( 2.5.6.7 NAME 'organizationalPerson' DESC 'RFC2256: an + organizational person' SUP person STRUCTURAL MAY ( title $ x121Address $ r + egisteredAddress $ destinationIndicator $ preferredDeliveryMethod $ telexNu + mber $ teletexTerminalIdentifier $ telephoneNumber $ internationaliSDNNumbe + r $ facsimileTelephoneNumber $ street $ postOfficeBox $ postalCode $ posta + lAddress $ physicalDeliveryOfficeName $ ou $ st $ l ) ) +olcObjectClasses: {6}( 2.5.6.8 NAME 'organizationalRole' DESC 'RFC2256: an o + rganizational role' SUP top STRUCTURAL MUST cn MAY ( x121Address $ register + edAddress $ destinationIndicator $ preferredDeliveryMethod $ telexNumber $ + teletexTerminalIdentifier $ telephoneNumber $ internationaliSDNNumber $ fac + simileTelephoneNumber $ seeAlso $ roleOccupant $ preferredDeliveryMethod $ + street $ postOfficeBox $ postalCode $ postalAddress $ physicalDeliveryOffic + eName $ ou $ st $ l $ description ) ) +olcObjectClasses: {7}( 2.5.6.9 NAME 'groupOfNames' DESC 'RFC2256: a group of + names (DNs)' SUP top STRUCTURAL MUST ( member $ cn ) MAY ( businessCategor + y $ seeAlso $ owner $ ou $ o $ description ) ) +olcObjectClasses: {8}( 2.5.6.10 NAME 'residentialPerson' DESC 'RFC2256: an r + esidential person' SUP person STRUCTURAL MUST l MAY ( businessCategory $ x1 + 21Address $ registeredAddress $ destinationIndicator $ preferredDeliveryMet + hod $ telexNumber $ teletexTerminalIdentifier $ telephoneNumber $ internati + onaliSDNNumber $ facsimileTelephoneNumber $ preferredDeliveryMethod $ stree + t $ postOfficeBox $ postalCode $ postalAddress $ physicalDeliveryOfficeName + $ st $ l ) ) +olcObjectClasses: {9}( 2.5.6.11 NAME 'applicationProcess' DESC 'RFC2256: an + application process' SUP top STRUCTURAL MUST cn MAY ( seeAlso $ ou $ l $ de + scription ) ) +olcObjectClasses: {10}( 2.5.6.12 NAME 'applicationEntity' DESC 'RFC2256: an + application entity' SUP top STRUCTURAL MUST ( presentationAddress $ cn ) MA + Y ( supportedApplicationContext $ seeAlso $ ou $ o $ l $ description ) ) +olcObjectClasses: {11}( 2.5.6.13 NAME 'dSA' DESC 'RFC2256: a directory syste + m agent (a server)' SUP applicationEntity STRUCTURAL MAY knowledgeInformati + on ) +olcObjectClasses: {12}( 2.5.6.14 NAME 'device' DESC 'RFC2256: a device' SUP + top STRUCTURAL MUST cn MAY ( serialNumber $ seeAlso $ owner $ ou $ o $ l $ + description ) ) +olcObjectClasses: {13}( 2.5.6.15 NAME 'strongAuthenticationUser' DESC 'RFC22 + 56: a strong authentication user' SUP top AUXILIARY MUST userCertificate ) +olcObjectClasses: {14}( 2.5.6.16 NAME 'certificationAuthority' DESC 'RFC2256 + : a certificate authority' SUP top AUXILIARY MUST ( authorityRevocationList + $ certificateRevocationList $ cACertificate ) MAY crossCertificatePair ) +olcObjectClasses: {15}( 2.5.6.17 NAME 'groupOfUniqueNames' DESC 'RFC2256: a + group of unique names (DN and Unique Identifier)' SUP top STRUCTURAL MUST ( + uniqueMember $ cn ) MAY ( businessCategory $ seeAlso $ owner $ ou $ o $ de + scription ) ) +olcObjectClasses: {16}( 2.5.6.18 NAME 'userSecurityInformation' DESC 'RFC225 + 6: a user security information' SUP top AUXILIARY MAY ( supportedAlgorithms + ) ) +olcObjectClasses: {17}( 2.5.6.16.2 NAME 'certificationAuthority-V2' SUP cert + ificationAuthority AUXILIARY MAY ( deltaRevocationList ) ) +olcObjectClasses: {18}( 2.5.6.19 NAME 'cRLDistributionPoint' SUP top STRUCTU + RAL MUST ( cn ) MAY ( certificateRevocationList $ authorityRevocationList $ + deltaRevocationList ) ) +olcObjectClasses: {19}( 2.5.6.20 NAME 'dmd' SUP top STRUCTURAL MUST ( dmdNam + e ) MAY ( userPassword $ searchGuide $ seeAlso $ businessCategory $ x121Add + ress $ registeredAddress $ destinationIndicator $ preferredDeliveryMethod $ + telexNumber $ teletexTerminalIdentifier $ telephoneNumber $ internationali + SDNNumber $ facsimileTelephoneNumber $ street $ postOfficeBox $ postalCode + $ postalAddress $ physicalDeliveryOfficeName $ st $ l $ description ) ) +olcObjectClasses: {20}( 2.5.6.21 NAME 'pkiUser' DESC 'RFC2587: a PKI user' S + UP top AUXILIARY MAY userCertificate ) +olcObjectClasses: {21}( 2.5.6.22 NAME 'pkiCA' DESC 'RFC2587: PKI certificate + authority' SUP top AUXILIARY MAY ( authorityRevocationList $ certificateRe + vocationList $ cACertificate $ crossCertificatePair ) ) +olcObjectClasses: {22}( 2.5.6.23 NAME 'deltaCRL' DESC 'RFC2587: PKI user' SU + P top AUXILIARY MAY deltaRevocationList ) +olcObjectClasses: {23}( 1.3.6.1.4.1.250.3.15 NAME 'labeledURIObject' DESC 'R + FC2079: object that contains the URI attribute type' MAY ( labeledURI ) SUP + top AUXILIARY ) +olcObjectClasses: {24}( 0.9.2342.19200300.100.4.19 NAME 'simpleSecurityObjec + t' DESC 'RFC1274: simple security object' SUP top AUXILIARY MUST userPasswo + rd ) +olcObjectClasses: {25}( 1.3.6.1.4.1.1466.344 NAME 'dcObject' DESC 'RFC2247: + domain component object' SUP top AUXILIARY MUST dc ) +olcObjectClasses: {26}( 1.3.6.1.1.3.1 NAME 'uidObject' DESC 'RFC2377: uid ob + ject' SUP top AUXILIARY MUST uid ) +structuralObjectClass: olcSchemaConfig +entryUUID: 4019f348-eaf5-1039-8661-dbfbf2f5e6dd +creatorsName: cn=config +createTimestamp: 20200224020101Z +entryCSN: 20200224020101.083690Z#000000#000#000000 +modifiersName: cn=config +modifyTimestamp: 20200224020101Z diff --git a/dirsrvtests/tests/data/openldap_2_389/1/slapd.d/cn=config/cn=schema/cn={1}cosine.ldif b/dirsrvtests/tests/data/openldap_2_389/1/slapd.d/cn=config/cn=schema/cn={1}cosine.ldif new file mode 100644 index 0000000..bc17164 --- /dev/null +++ b/dirsrvtests/tests/data/openldap_2_389/1/slapd.d/cn=config/cn=schema/cn={1}cosine.ldif @@ -0,0 +1,178 @@ +# AUTO-GENERATED FILE - DO NOT EDIT!! Use ldapmodify. +# CRC32 4e3862ab +dn: cn={1}cosine +objectClass: olcSchemaConfig +cn: {1}cosine +olcAttributeTypes: {0}( 0.9.2342.19200300.100.1.2 NAME 'textEncodedORAddress + ' EQUALITY caseIgnoreMatch SUBSTR caseIgnoreSubstringsMatch SYNTAX 1.3.6.1. + 4.1.1466.115.121.1.15{256} ) +olcAttributeTypes: {1}( 0.9.2342.19200300.100.1.4 NAME 'info' DESC 'RFC1274: + general information' EQUALITY caseIgnoreMatch SUBSTR caseIgnoreSubstringsM + atch SYNTAX 1.3.6.1.4.1.1466.115.121.1.15{2048} ) +olcAttributeTypes: {2}( 0.9.2342.19200300.100.1.5 NAME ( 'drink' 'favouriteD + rink' ) DESC 'RFC1274: favorite drink' EQUALITY caseIgnoreMatch SUBSTR case + IgnoreSubstringsMatch SYNTAX 1.3.6.1.4.1.1466.115.121.1.15{256} ) +olcAttributeTypes: {3}( 0.9.2342.19200300.100.1.6 NAME 'roomNumber' DESC 'RF + C1274: room number' EQUALITY caseIgnoreMatch SUBSTR caseIgnoreSubstringsMat + ch SYNTAX 1.3.6.1.4.1.1466.115.121.1.15{256} ) +olcAttributeTypes: {4}( 0.9.2342.19200300.100.1.7 NAME 'photo' DESC 'RFC1274 + : photo (G3 fax)' SYNTAX 1.3.6.1.4.1.1466.115.121.1.23{25000} ) +olcAttributeTypes: {5}( 0.9.2342.19200300.100.1.8 NAME 'userClass' DESC 'RFC + 1274: category of user' EQUALITY caseIgnoreMatch SUBSTR caseIgnoreSubstring + sMatch SYNTAX 1.3.6.1.4.1.1466.115.121.1.15{256} ) +olcAttributeTypes: {6}( 0.9.2342.19200300.100.1.9 NAME 'host' DESC 'RFC1274: + host computer' EQUALITY caseIgnoreMatch SUBSTR caseIgnoreSubstringsMatch S + YNTAX 1.3.6.1.4.1.1466.115.121.1.15{256} ) +olcAttributeTypes: {7}( 0.9.2342.19200300.100.1.10 NAME 'manager' DESC 'RFC1 + 274: DN of manager' EQUALITY distinguishedNameMatch SYNTAX 1.3.6.1.4.1.1466 + .115.121.1.12 ) +olcAttributeTypes: {8}( 0.9.2342.19200300.100.1.11 NAME 'documentIdentifier' + DESC 'RFC1274: unique identifier of document' EQUALITY caseIgnoreMatch SUB + STR caseIgnoreSubstringsMatch SYNTAX 1.3.6.1.4.1.1466.115.121.1.15{256} ) +olcAttributeTypes: {9}( 0.9.2342.19200300.100.1.12 NAME 'documentTitle' DESC + 'RFC1274: title of document' EQUALITY caseIgnoreMatch SUBSTR caseIgnoreSub + stringsMatch SYNTAX 1.3.6.1.4.1.1466.115.121.1.15{256} ) +olcAttributeTypes: {10}( 0.9.2342.19200300.100.1.13 NAME 'documentVersion' D + ESC 'RFC1274: version of document' EQUALITY caseIgnoreMatch SUBSTR caseIgno + reSubstringsMatch SYNTAX 1.3.6.1.4.1.1466.115.121.1.15{256} ) +olcAttributeTypes: {11}( 0.9.2342.19200300.100.1.14 NAME 'documentAuthor' DE + SC 'RFC1274: DN of author of document' EQUALITY distinguishedNameMatch SYNT + AX 1.3.6.1.4.1.1466.115.121.1.12 ) +olcAttributeTypes: {12}( 0.9.2342.19200300.100.1.15 NAME 'documentLocation' + DESC 'RFC1274: location of document original' EQUALITY caseIgnoreMatch SUBS + TR caseIgnoreSubstringsMatch SYNTAX 1.3.6.1.4.1.1466.115.121.1.15{256} ) +olcAttributeTypes: {13}( 0.9.2342.19200300.100.1.20 NAME ( 'homePhone' 'home + TelephoneNumber' ) DESC 'RFC1274: home telephone number' EQUALITY telephone + NumberMatch SUBSTR telephoneNumberSubstringsMatch SYNTAX 1.3.6.1.4.1.1466.1 + 15.121.1.50 ) +olcAttributeTypes: {14}( 0.9.2342.19200300.100.1.21 NAME 'secretary' DESC 'R + FC1274: DN of secretary' EQUALITY distinguishedNameMatch SYNTAX 1.3.6.1.4.1 + .1466.115.121.1.12 ) +olcAttributeTypes: {15}( 0.9.2342.19200300.100.1.22 NAME 'otherMailbox' SYNT + AX 1.3.6.1.4.1.1466.115.121.1.39 ) +olcAttributeTypes: {16}( 0.9.2342.19200300.100.1.26 NAME 'aRecord' EQUALITY + caseIgnoreIA5Match SYNTAX 1.3.6.1.4.1.1466.115.121.1.26 ) +olcAttributeTypes: {17}( 0.9.2342.19200300.100.1.27 NAME 'mDRecord' EQUALITY + caseIgnoreIA5Match SYNTAX 1.3.6.1.4.1.1466.115.121.1.26 ) +olcAttributeTypes: {18}( 0.9.2342.19200300.100.1.28 NAME 'mXRecord' EQUALITY + caseIgnoreIA5Match SYNTAX 1.3.6.1.4.1.1466.115.121.1.26 ) +olcAttributeTypes: {19}( 0.9.2342.19200300.100.1.29 NAME 'nSRecord' EQUALITY + caseIgnoreIA5Match SYNTAX 1.3.6.1.4.1.1466.115.121.1.26 ) +olcAttributeTypes: {20}( 0.9.2342.19200300.100.1.30 NAME 'sOARecord' EQUALIT + Y caseIgnoreIA5Match SYNTAX 1.3.6.1.4.1.1466.115.121.1.26 ) +olcAttributeTypes: {21}( 0.9.2342.19200300.100.1.31 NAME 'cNAMERecord' EQUAL + ITY caseIgnoreIA5Match SYNTAX 1.3.6.1.4.1.1466.115.121.1.26 ) +olcAttributeTypes: {22}( 0.9.2342.19200300.100.1.38 NAME 'associatedName' DE + SC 'RFC1274: DN of entry associated with domain' EQUALITY distinguishedName + Match SYNTAX 1.3.6.1.4.1.1466.115.121.1.12 ) +olcAttributeTypes: {23}( 0.9.2342.19200300.100.1.39 NAME 'homePostalAddress' + DESC 'RFC1274: home postal address' EQUALITY caseIgnoreListMatch SUBSTR ca + seIgnoreListSubstringsMatch SYNTAX 1.3.6.1.4.1.1466.115.121.1.41 ) +olcAttributeTypes: {24}( 0.9.2342.19200300.100.1.40 NAME 'personalTitle' DES + C 'RFC1274: personal title' EQUALITY caseIgnoreMatch SUBSTR caseIgnoreSubst + ringsMatch SYNTAX 1.3.6.1.4.1.1466.115.121.1.15{256} ) +olcAttributeTypes: {25}( 0.9.2342.19200300.100.1.41 NAME ( 'mobile' 'mobileT + elephoneNumber' ) DESC 'RFC1274: mobile telephone number' EQUALITY telephon + eNumberMatch SUBSTR telephoneNumberSubstringsMatch SYNTAX 1.3.6.1.4.1.1466. + 115.121.1.50 ) +olcAttributeTypes: {26}( 0.9.2342.19200300.100.1.42 NAME ( 'pager' 'pagerTel + ephoneNumber' ) DESC 'RFC1274: pager telephone number' EQUALITY telephoneNu + mberMatch SUBSTR telephoneNumberSubstringsMatch SYNTAX 1.3.6.1.4.1.1466.115 + .121.1.50 ) +olcAttributeTypes: {27}( 0.9.2342.19200300.100.1.43 NAME ( 'co' 'friendlyCou + ntryName' ) DESC 'RFC1274: friendly country name' EQUALITY caseIgnoreMatch + SUBSTR caseIgnoreSubstringsMatch SYNTAX 1.3.6.1.4.1.1466.115.121.1.15 ) +olcAttributeTypes: {28}( 0.9.2342.19200300.100.1.44 NAME 'uniqueIdentifier' + DESC 'RFC1274: unique identifer' EQUALITY caseIgnoreMatch SYNTAX 1.3.6.1.4. + 1.1466.115.121.1.15{256} ) +olcAttributeTypes: {29}( 0.9.2342.19200300.100.1.45 NAME 'organizationalStat + us' DESC 'RFC1274: organizational status' EQUALITY caseIgnoreMatch SUBSTR c + aseIgnoreSubstringsMatch SYNTAX 1.3.6.1.4.1.1466.115.121.1.15{256} ) +olcAttributeTypes: {30}( 0.9.2342.19200300.100.1.46 NAME 'janetMailbox' DESC + 'RFC1274: Janet mailbox' EQUALITY caseIgnoreIA5Match SUBSTR caseIgnoreIA5S + ubstringsMatch SYNTAX 1.3.6.1.4.1.1466.115.121.1.26{256} ) +olcAttributeTypes: {31}( 0.9.2342.19200300.100.1.47 NAME 'mailPreferenceOpti + on' DESC 'RFC1274: mail preference option' SYNTAX 1.3.6.1.4.1.1466.115.121. + 1.27 ) +olcAttributeTypes: {32}( 0.9.2342.19200300.100.1.48 NAME 'buildingName' DESC + 'RFC1274: name of building' EQUALITY caseIgnoreMatch SUBSTR caseIgnoreSubs + tringsMatch SYNTAX 1.3.6.1.4.1.1466.115.121.1.15{256} ) +olcAttributeTypes: {33}( 0.9.2342.19200300.100.1.49 NAME 'dSAQuality' DESC ' + RFC1274: DSA Quality' SYNTAX 1.3.6.1.4.1.1466.115.121.1.19 SINGLE-VALUE ) +olcAttributeTypes: {34}( 0.9.2342.19200300.100.1.50 NAME 'singleLevelQuality + ' DESC 'RFC1274: Single Level Quality' SYNTAX 1.3.6.1.4.1.1466.115.121.1.13 + SINGLE-VALUE ) +olcAttributeTypes: {35}( 0.9.2342.19200300.100.1.51 NAME 'subtreeMinimumQual + ity' DESC 'RFC1274: Subtree Mininum Quality' SYNTAX 1.3.6.1.4.1.1466.115.12 + 1.1.13 SINGLE-VALUE ) +olcAttributeTypes: {36}( 0.9.2342.19200300.100.1.52 NAME 'subtreeMaximumQual + ity' DESC 'RFC1274: Subtree Maximun Quality' SYNTAX 1.3.6.1.4.1.1466.115.12 + 1.1.13 SINGLE-VALUE ) +olcAttributeTypes: {37}( 0.9.2342.19200300.100.1.53 NAME 'personalSignature' + DESC 'RFC1274: Personal Signature (G3 fax)' SYNTAX 1.3.6.1.4.1.1466.115.12 + 1.1.23 ) +olcAttributeTypes: {38}( 0.9.2342.19200300.100.1.54 NAME 'dITRedirect' DESC + 'RFC1274: DIT Redirect' EQUALITY distinguishedNameMatch SYNTAX 1.3.6.1.4.1. + 1466.115.121.1.12 ) +olcAttributeTypes: {39}( 0.9.2342.19200300.100.1.55 NAME 'audio' DESC 'RFC12 + 74: audio (u-law)' SYNTAX 1.3.6.1.4.1.1466.115.121.1.4{25000} ) +olcAttributeTypes: {40}( 0.9.2342.19200300.100.1.56 NAME 'documentPublisher' + DESC 'RFC1274: publisher of document' EQUALITY caseIgnoreMatch SUBSTR case + IgnoreSubstringsMatch SYNTAX 1.3.6.1.4.1.1466.115.121.1.15 ) +olcObjectClasses: {0}( 0.9.2342.19200300.100.4.4 NAME ( 'pilotPerson' 'newPi + lotPerson' ) SUP person STRUCTURAL MAY ( userid $ textEncodedORAddress $ rf + c822Mailbox $ favouriteDrink $ roomNumber $ userClass $ homeTelephoneNumber + $ homePostalAddress $ secretary $ personalTitle $ preferredDeliveryMethod + $ businessCategory $ janetMailbox $ otherMailbox $ mobileTelephoneNumber $ + pagerTelephoneNumber $ organizationalStatus $ mailPreferenceOption $ person + alSignature ) ) +olcObjectClasses: {1}( 0.9.2342.19200300.100.4.5 NAME 'account' SUP top STRU + CTURAL MUST userid MAY ( description $ seeAlso $ localityName $ organizatio + nName $ organizationalUnitName $ host ) ) +olcObjectClasses: {2}( 0.9.2342.19200300.100.4.6 NAME 'document' SUP top STR + UCTURAL MUST documentIdentifier MAY ( commonName $ description $ seeAlso $ + localityName $ organizationName $ organizationalUnitName $ documentTitle $ + documentVersion $ documentAuthor $ documentLocation $ documentPublisher ) ) +olcObjectClasses: {3}( 0.9.2342.19200300.100.4.7 NAME 'room' SUP top STRUCTU + RAL MUST commonName MAY ( roomNumber $ description $ seeAlso $ telephoneNum + ber ) ) +olcObjectClasses: {4}( 0.9.2342.19200300.100.4.9 NAME 'documentSeries' SUP t + op STRUCTURAL MUST commonName MAY ( description $ seeAlso $ telephonenumber + $ localityName $ organizationName $ organizationalUnitName ) ) +olcObjectClasses: {5}( 0.9.2342.19200300.100.4.13 NAME 'domain' SUP top STRU + CTURAL MUST domainComponent MAY ( associatedName $ organizationName $ descr + iption $ businessCategory $ seeAlso $ searchGuide $ userPassword $ locality + Name $ stateOrProvinceName $ streetAddress $ physicalDeliveryOfficeName $ p + ostalAddress $ postalCode $ postOfficeBox $ streetAddress $ facsimileTeleph + oneNumber $ internationalISDNNumber $ telephoneNumber $ teletexTerminalIden + tifier $ telexNumber $ preferredDeliveryMethod $ destinationIndicator $ reg + isteredAddress $ x121Address ) ) +olcObjectClasses: {6}( 0.9.2342.19200300.100.4.14 NAME 'RFC822localPart' SUP + domain STRUCTURAL MAY ( commonName $ surname $ description $ seeAlso $ tel + ephoneNumber $ physicalDeliveryOfficeName $ postalAddress $ postalCode $ po + stOfficeBox $ streetAddress $ facsimileTelephoneNumber $ internationalISDNN + umber $ telephoneNumber $ teletexTerminalIdentifier $ telexNumber $ preferr + edDeliveryMethod $ destinationIndicator $ registeredAddress $ x121Address ) + ) +olcObjectClasses: {7}( 0.9.2342.19200300.100.4.15 NAME 'dNSDomain' SUP domai + n STRUCTURAL MAY ( ARecord $ MDRecord $ MXRecord $ NSRecord $ SOARecord $ C + NAMERecord ) ) +olcObjectClasses: {8}( 0.9.2342.19200300.100.4.17 NAME 'domainRelatedObject' + DESC 'RFC1274: an object related to an domain' SUP top AUXILIARY MUST asso + ciatedDomain ) +olcObjectClasses: {9}( 0.9.2342.19200300.100.4.18 NAME 'friendlyCountry' SUP + country STRUCTURAL MUST friendlyCountryName ) +olcObjectClasses: {10}( 0.9.2342.19200300.100.4.20 NAME 'pilotOrganization' + SUP ( organization $ organizationalUnit ) STRUCTURAL MAY buildingName ) +olcObjectClasses: {11}( 0.9.2342.19200300.100.4.21 NAME 'pilotDSA' SUP dsa S + TRUCTURAL MAY dSAQuality ) +olcObjectClasses: {12}( 0.9.2342.19200300.100.4.22 NAME 'qualityLabelledData + ' SUP top AUXILIARY MUST dsaQuality MAY ( subtreeMinimumQuality $ subtreeMa + ximumQuality ) ) +structuralObjectClass: olcSchemaConfig +entryUUID: 401a0f9a-eaf5-1039-8662-dbfbf2f5e6dd +creatorsName: cn=config +createTimestamp: 20200224020101Z +entryCSN: 20200224020101.084423Z#000000#000#000000 +modifiersName: cn=config +modifyTimestamp: 20200224020101Z diff --git a/dirsrvtests/tests/data/openldap_2_389/1/slapd.d/cn=config/cn=schema/cn={2}inetorgperson.ldif b/dirsrvtests/tests/data/openldap_2_389/1/slapd.d/cn=config/cn=schema/cn={2}inetorgperson.ldif new file mode 100644 index 0000000..ecc4a56 --- /dev/null +++ b/dirsrvtests/tests/data/openldap_2_389/1/slapd.d/cn=config/cn=schema/cn={2}inetorgperson.ldif @@ -0,0 +1,49 @@ +# AUTO-GENERATED FILE - DO NOT EDIT!! Use ldapmodify. +# CRC32 2dfdddb6 +dn: cn={2}inetorgperson +objectClass: olcSchemaConfig +cn: {2}inetorgperson +olcAttributeTypes: {0}( 2.16.840.1.113730.3.1.1 NAME 'carLicense' DESC 'RFC2 + 798: vehicle license or registration plate' EQUALITY caseIgnoreMatch SUBSTR + caseIgnoreSubstringsMatch SYNTAX 1.3.6.1.4.1.1466.115.121.1.15 ) +olcAttributeTypes: {1}( 2.16.840.1.113730.3.1.2 NAME 'departmentNumber' DESC + 'RFC2798: identifies a department within an organization' EQUALITY caseIgn + oreMatch SUBSTR caseIgnoreSubstringsMatch SYNTAX 1.3.6.1.4.1.1466.115.121.1 + .15 ) +olcAttributeTypes: {2}( 2.16.840.1.113730.3.1.241 NAME 'displayName' DESC 'R + FC2798: preferred name to be used when displaying entries' EQUALITY caseIgn + oreMatch SUBSTR caseIgnoreSubstringsMatch SYNTAX 1.3.6.1.4.1.1466.115.121.1 + .15 SINGLE-VALUE ) +olcAttributeTypes: {3}( 2.16.840.1.113730.3.1.3 NAME 'employeeNumber' DESC ' + RFC2798: numerically identifies an employee within an organization' EQUALIT + Y caseIgnoreMatch SUBSTR caseIgnoreSubstringsMatch SYNTAX 1.3.6.1.4.1.1466. + 115.121.1.15 SINGLE-VALUE ) +olcAttributeTypes: {4}( 2.16.840.1.113730.3.1.4 NAME 'employeeType' DESC 'RF + C2798: type of employment for a person' EQUALITY caseIgnoreMatch SUBSTR cas + eIgnoreSubstringsMatch SYNTAX 1.3.6.1.4.1.1466.115.121.1.15 ) +olcAttributeTypes: {5}( 0.9.2342.19200300.100.1.60 NAME 'jpegPhoto' DESC 'RF + C2798: a JPEG image' SYNTAX 1.3.6.1.4.1.1466.115.121.1.28 ) +olcAttributeTypes: {6}( 2.16.840.1.113730.3.1.39 NAME 'preferredLanguage' DE + SC 'RFC2798: preferred written or spoken language for a person' EQUALITY ca + seIgnoreMatch SUBSTR caseIgnoreSubstringsMatch SYNTAX 1.3.6.1.4.1.1466.115. + 121.1.15 SINGLE-VALUE ) +olcAttributeTypes: {7}( 2.16.840.1.113730.3.1.40 NAME 'userSMIMECertificate' + DESC 'RFC2798: PKCS#7 SignedData used to support S/MIME' SYNTAX 1.3.6.1.4. + 1.1466.115.121.1.5 ) +olcAttributeTypes: {8}( 2.16.840.1.113730.3.1.216 NAME 'userPKCS12' DESC 'RF + C2798: personal identity information, a PKCS #12 PFX' SYNTAX 1.3.6.1.4.1.14 + 66.115.121.1.5 ) +olcObjectClasses: {0}( 2.16.840.1.113730.3.2.2 NAME 'inetOrgPerson' DESC 'RF + C2798: Internet Organizational Person' SUP organizationalPerson STRUCTURAL + MAY ( audio $ businessCategory $ carLicense $ departmentNumber $ displayNam + e $ employeeNumber $ employeeType $ givenName $ homePhone $ homePostalAddre + ss $ initials $ jpegPhoto $ labeledURI $ mail $ manager $ mobile $ o $ page + r $ photo $ roomNumber $ secretary $ uid $ userCertificate $ x500uniqueIden + tifier $ preferredLanguage $ userSMIMECertificate $ userPKCS12 ) ) +structuralObjectClass: olcSchemaConfig +entryUUID: 401a225a-eaf5-1039-8663-dbfbf2f5e6dd +creatorsName: cn=config +createTimestamp: 20200224020101Z +entryCSN: 20200224020101.084903Z#000000#000#000000 +modifiersName: cn=config +modifyTimestamp: 20200224020101Z diff --git a/dirsrvtests/tests/data/openldap_2_389/1/slapd.d/cn=config/cn=schema/cn={3}rfc2307bis.ldif b/dirsrvtests/tests/data/openldap_2_389/1/slapd.d/cn=config/cn=schema/cn={3}rfc2307bis.ldif new file mode 100644 index 0000000..57c8246 --- /dev/null +++ b/dirsrvtests/tests/data/openldap_2_389/1/slapd.d/cn=config/cn=schema/cn={3}rfc2307bis.ldif @@ -0,0 +1,155 @@ +# AUTO-GENERATED FILE - DO NOT EDIT!! Use ldapmodify. +# CRC32 dd0a742e +dn: cn={3}rfc2307bis +objectClass: olcSchemaConfig +cn: {3}rfc2307bis +olcAttributeTypes: {0}( 1.3.6.1.1.1.1.2 NAME 'gecos' DESC 'The GECOS field; + the common name' EQUALITY caseIgnoreIA5Match SUBSTR caseIgnoreIA5Substrings + Match SYNTAX 1.3.6.1.4.1.1466.115.121.1.26 SINGLE-VALUE ) +olcAttributeTypes: {1}( 1.3.6.1.1.1.1.3 NAME 'homeDirectory' DESC 'The absol + ute path to the home directory' EQUALITY caseExactIA5Match SYNTAX 1.3.6.1.4 + .1.1466.115.121.1.26 SINGLE-VALUE ) +olcAttributeTypes: {2}( 1.3.6.1.1.1.1.4 NAME 'loginShell' DESC 'The path to + the login shell' EQUALITY caseExactIA5Match SYNTAX 1.3.6.1.4.1.1466.115.121 + .1.26 SINGLE-VALUE ) +olcAttributeTypes: {3}( 1.3.6.1.1.1.1.5 NAME 'shadowLastChange' EQUALITY int + egerMatch SYNTAX 1.3.6.1.4.1.1466.115.121.1.27 SINGLE-VALUE ) +olcAttributeTypes: {4}( 1.3.6.1.1.1.1.6 NAME 'shadowMin' EQUALITY integerMat + ch SYNTAX 1.3.6.1.4.1.1466.115.121.1.27 SINGLE-VALUE ) +olcAttributeTypes: {5}( 1.3.6.1.1.1.1.7 NAME 'shadowMax' EQUALITY integerMat + ch SYNTAX 1.3.6.1.4.1.1466.115.121.1.27 SINGLE-VALUE ) +olcAttributeTypes: {6}( 1.3.6.1.1.1.1.8 NAME 'shadowWarning' EQUALITY intege + rMatch SYNTAX 1.3.6.1.4.1.1466.115.121.1.27 SINGLE-VALUE ) +olcAttributeTypes: {7}( 1.3.6.1.1.1.1.9 NAME 'shadowInactive' EQUALITY integ + erMatch SYNTAX 1.3.6.1.4.1.1466.115.121.1.27 SINGLE-VALUE ) +olcAttributeTypes: {8}( 1.3.6.1.1.1.1.10 NAME 'shadowExpire' EQUALITY intege + rMatch SYNTAX 1.3.6.1.4.1.1466.115.121.1.27 SINGLE-VALUE ) +olcAttributeTypes: {9}( 1.3.6.1.1.1.1.11 NAME 'shadowFlag' EQUALITY integerM + atch SYNTAX 1.3.6.1.4.1.1466.115.121.1.27 SINGLE-VALUE ) +olcAttributeTypes: {10}( 1.3.6.1.1.1.1.12 NAME 'memberUid' EQUALITY caseExac + tIA5Match SYNTAX 1.3.6.1.4.1.1466.115.121.1.26 ) +olcAttributeTypes: {11}( 1.3.6.1.1.1.1.13 NAME 'memberNisNetgroup' EQUALITY + caseExactIA5Match SUBSTR caseExactIA5SubstringsMatch SYNTAX 1.3.6.1.4.1.146 + 6.115.121.1.26 ) +olcAttributeTypes: {12}( 1.3.6.1.1.1.1.14 NAME 'nisNetgroupTriple' DESC 'Net + group triple' EQUALITY caseIgnoreIA5Match SYNTAX 1.3.6.1.4.1.1466.115.121.1 + .26 ) +olcAttributeTypes: {13}( 1.3.6.1.1.1.1.15 NAME 'ipServicePort' DESC 'Service + port number' EQUALITY integerMatch SYNTAX 1.3.6.1.4.1.1466.115.121.1.27 SI + NGLE-VALUE ) +olcAttributeTypes: {14}( 1.3.6.1.1.1.1.16 NAME 'ipServiceProtocol' DESC 'Ser + vice protocol name' SUP name ) +olcAttributeTypes: {15}( 1.3.6.1.1.1.1.17 NAME 'ipProtocolNumber' DESC 'IP p + rotocol number' EQUALITY integerMatch SYNTAX 1.3.6.1.4.1.1466.115.121.1.27 + SINGLE-VALUE ) +olcAttributeTypes: {16}( 1.3.6.1.1.1.1.18 NAME 'oncRpcNumber' DESC 'ONC RPC + number' EQUALITY integerMatch SYNTAX 1.3.6.1.4.1.1466.115.121.1.27 SINGLE-V + ALUE ) +olcAttributeTypes: {17}( 1.3.6.1.1.1.1.19 NAME 'ipHostNumber' DESC 'IPv4 add + resses as a dotted decimal omitting leading zeros or IPv6 addresses + as defined in RFC2373' SUP name ) +olcAttributeTypes: {18}( 1.3.6.1.1.1.1.20 NAME 'ipNetworkNumber' DESC 'IP ne + twork as a dotted decimal, eg. 192.168, omitting leading zeros' SUP + name SINGLE-VALUE ) +olcAttributeTypes: {19}( 1.3.6.1.1.1.1.21 NAME 'ipNetmaskNumber' DESC 'IP ne + tmask as a dotted decimal, eg. 255.255.255.0, omitting leading zeros + ' EQUALITY caseIgnoreIA5Match SYNTAX 1.3.6.1.4.1.1466.115.121.1.26 SINGLE-V + ALUE ) +olcAttributeTypes: {20}( 1.3.6.1.1.1.1.22 NAME 'macAddress' DESC 'MAC addres + s in maximal, colon separated hex notation, eg. 00:00:92:90:ee:e2' E + QUALITY caseIgnoreIA5Match SYNTAX 1.3.6.1.4.1.1466.115.121.1.26 ) +olcAttributeTypes: {21}( 1.3.6.1.1.1.1.23 NAME 'bootParameter' DESC 'rpc.boo + tparamd parameter' EQUALITY caseExactIA5Match SYNTAX 1.3.6.1.4.1.1466.115.1 + 21.1.26 ) +olcAttributeTypes: {22}( 1.3.6.1.1.1.1.24 NAME 'bootFile' DESC 'Boot image n + ame' EQUALITY caseExactIA5Match SYNTAX 1.3.6.1.4.1.1466.115.121.1.26 ) +olcAttributeTypes: {23}( 1.3.6.1.1.1.1.26 NAME 'nisMapName' DESC 'Name of a + A generic NIS map' SUP name ) +olcAttributeTypes: {24}( 1.3.6.1.1.1.1.27 NAME 'nisMapEntry' DESC 'A generic + NIS entry' EQUALITY caseExactIA5Match SUBSTR caseExactIA5SubstringsMatch S + YNTAX 1.3.6.1.4.1.1466.115.121.1.26 SINGLE-VALUE ) +olcAttributeTypes: {25}( 1.3.6.1.1.1.1.28 NAME 'nisPublicKey' DESC 'NIS publ + ic key' EQUALITY octetStringMatch SYNTAX 1.3.6.1.4.1.1466.115.121.1.40 SING + LE-VALUE ) +olcAttributeTypes: {26}( 1.3.6.1.1.1.1.29 NAME 'nisSecretKey' DESC 'NIS secr + et key' EQUALITY octetStringMatch SYNTAX 1.3.6.1.4.1.1466.115.121.1.40 SING + LE-VALUE ) +olcAttributeTypes: {27}( 1.3.6.1.1.1.1.30 NAME 'nisDomain' DESC 'NIS domain' + EQUALITY caseIgnoreIA5Match SYNTAX 1.3.6.1.4.1.1466.115.121.1.26 ) +olcAttributeTypes: {28}( 1.3.6.1.1.1.1.31 NAME 'automountMapName' DESC 'auto + mount Map Name' EQUALITY caseExactIA5Match SUBSTR caseExactIA5SubstringsMat + ch SYNTAX 1.3.6.1.4.1.1466.115.121.1.26 SINGLE-VALUE ) +olcAttributeTypes: {29}( 1.3.6.1.1.1.1.32 NAME 'automountKey' DESC 'Automoun + t Key value' EQUALITY caseExactIA5Match SUBSTR caseExactIA5SubstringsMatch + SYNTAX 1.3.6.1.4.1.1466.115.121.1.26 SINGLE-VALUE ) +olcAttributeTypes: {30}( 1.3.6.1.1.1.1.33 NAME 'automountInformation' DESC ' + Automount information' EQUALITY caseExactIA5Match SUBSTR caseExactIA5Substr + ingsMatch SYNTAX 1.3.6.1.4.1.1466.115.121.1.26 SINGLE-VALUE ) +olcObjectClasses: {0}( 1.3.6.1.1.1.2.0 NAME 'posixAccount' DESC 'Abstraction + of an account with POSIX attributes' SUP top AUXILIARY MUST ( cn $ uid $ u + idNumber $ gidNumber $ homeDirectory ) MAY ( userPassword $ loginShell $ ge + cos $ description ) ) +olcObjectClasses: {1}( 1.3.6.1.1.1.2.1 NAME 'shadowAccount' DESC 'Additional + attributes for shadow passwords' SUP top AUXILIARY MUST uid MAY ( userPass + word $ description $ shadowLastChange $ shadowMin $ shadowMax $ shadowWarni + ng $ shadowInactive $ shadowExpire $ shadowFlag ) ) +olcObjectClasses: {2}( 1.3.6.1.1.1.2.2 NAME 'posixGroup' DESC 'Abstraction o + f a group of accounts' SUP top AUXILIARY MUST gidNumber MAY ( userPassword + $ memberUid $ description ) ) +olcObjectClasses: {3}( 1.3.6.1.1.1.2.3 NAME 'ipService' DESC 'Abstraction an + Internet Protocol service. Maps an IP port and protocol (such as tc + p or udp) to one or more names; the distinguished value of th + e cn attribute denotes the services canonical name' SUP top STRUCTUR + AL MUST ( cn $ ipServicePort $ ipServiceProtocol ) MAY description ) +olcObjectClasses: {4}( 1.3.6.1.1.1.2.4 NAME 'ipProtocol' DESC 'Abstraction o + f an IP protocol. Maps a protocol number to one or more names. The d + istinguished value of the cn attribute denotes the protocols canonic + al name' SUP top STRUCTURAL MUST ( cn $ ipProtocolNumber ) MAY description + ) +olcObjectClasses: {5}( 1.3.6.1.1.1.2.5 NAME 'oncRpc' DESC 'Abstraction of an + Open Network Computing (ONC) [RFC1057] Remote Procedure Call (RPC) b + inding. This class maps an ONC RPC number to a name. The distin + guished value of the cn attribute denotes the RPC services canonical + name' SUP top STRUCTURAL MUST ( cn $ oncRpcNumber ) MAY description ) +olcObjectClasses: {6}( 1.3.6.1.1.1.2.6 NAME 'ipHost' DESC 'Abstraction of a + host, an IP device. The distinguished value of the cn attribute deno + tes the hosts canonical name. Device SHOULD be used as a structural + class' SUP top AUXILIARY MUST ( cn $ ipHostNumber ) MAY ( userPassword $ l + $ description $ manager ) ) +olcObjectClasses: {7}( 1.3.6.1.1.1.2.7 NAME 'ipNetwork' DESC 'Abstraction of + a network. The distinguished value of the cn attribute denotes the + networks canonical name' SUP top STRUCTURAL MUST ipNetworkNumber MAY ( cn $ + ipNetmaskNumber $ l $ description $ manager ) ) +olcObjectClasses: {8}( 1.3.6.1.1.1.2.8 NAME 'nisNetgroup' DESC 'Abstraction + of a netgroup. May refer to other netgroups' SUP top STRUCTURAL MUST cn MAY + ( nisNetgroupTriple $ memberNisNetgroup $ description ) ) +olcObjectClasses: {9}( 1.3.6.1.1.1.2.9 NAME 'nisMap' DESC 'A generic abstrac + tion of a NIS map' SUP top STRUCTURAL MUST nisMapName MAY description ) +olcObjectClasses: {10}( 1.3.6.1.1.1.2.10 NAME 'nisObject' DESC 'An entry in + a NIS map' SUP top STRUCTURAL MUST ( cn $ nisMapEntry $ nisMapName ) MAY de + scription ) +olcObjectClasses: {11}( 1.3.6.1.1.1.2.11 NAME 'ieee802Device' DESC 'A device + with a MAC address; device SHOULD be used as a structural class' SU + P top AUXILIARY MAY macAddress ) +olcObjectClasses: {12}( 1.3.6.1.1.1.2.12 NAME 'bootableDevice' DESC 'A devic + e with boot parameters; device SHOULD be used as a structural class' + SUP top AUXILIARY MAY ( bootFile $ bootParameter ) ) +olcObjectClasses: {13}( 1.3.6.1.1.1.2.14 NAME 'nisKeyObject' DESC 'An object + with a public and secret key' SUP top AUXILIARY MUST ( cn $ nisPublicKey $ + nisSecretKey ) MAY ( uidNumber $ description ) ) +olcObjectClasses: {14}( 1.3.6.1.1.1.2.15 NAME 'nisDomainObject' DESC 'Associ + ates a NIS domain with a naming context' SUP top AUXILIARY MUST nisDomain ) +olcObjectClasses: {15}( 1.3.6.1.1.1.2.16 NAME 'automountMap' SUP top STRUCTU + RAL MUST automountMapName MAY description ) +olcObjectClasses: {16}( 1.3.6.1.1.1.2.17 NAME 'automount' DESC 'Automount in + formation' SUP top STRUCTURAL MUST ( automountKey $ automountInformation ) + MAY description ) +olcObjectClasses: {17}( 1.3.6.1.4.1.5322.13.1.1 NAME 'namedObject' SUP top S + TRUCTURAL MAY cn ) +structuralObjectClass: olcSchemaConfig +entryUUID: 401a2e6c-eaf5-1039-8664-dbfbf2f5e6dd +creatorsName: cn=config +createTimestamp: 20200224020101Z +entryCSN: 20200224020101.085186Z#000000#000#000000 +modifiersName: cn=config +modifyTimestamp: 20200224020101Z diff --git a/dirsrvtests/tests/data/openldap_2_389/1/slapd.d/cn=config/cn=schema/cn={4}yast.ldif b/dirsrvtests/tests/data/openldap_2_389/1/slapd.d/cn=config/cn=schema/cn={4}yast.ldif new file mode 100644 index 0000000..0fe0f06 --- /dev/null +++ b/dirsrvtests/tests/data/openldap_2_389/1/slapd.d/cn=config/cn=schema/cn={4}yast.ldif @@ -0,0 +1,108 @@ +# AUTO-GENERATED FILE - DO NOT EDIT!! Use ldapmodify. +# CRC32 442e4b40 +dn: cn={4}yast +objectClass: olcSchemaConfig +cn: {4}yast +olcObjectIdentifier: {0}SUSE 1.3.6.1.4.1.7057 +olcObjectIdentifier: {1}SUSE.YaST SUSE:10.1 +olcObjectIdentifier: {2}SUSE.YaST.ModuleConfig SUSE:10.1.2 +olcObjectIdentifier: {3}SUSE.YaST.ModuleConfig.OC SUSE.YaST.ModuleConfig:1 +olcObjectIdentifier: {4}SUSE.YaST.ModuleConfig.Attr SUSE.YaST.ModuleConfig:2 +olcAttributeTypes: {0}( SUSE.YaST.ModuleConfig.Attr:2 NAME 'suseDefaultBase' + DESC 'Base DN where new Objects should be created by default' EQUALITY dis + tinguishedNameMatch SYNTAX 1.3.6.1.4.1.1466.115.121.1.12 SINGLE-VALUE ) +olcAttributeTypes: {1}( SUSE.YaST.ModuleConfig.Attr:3 NAME 'suseNextUniqueId + ' DESC 'Next unused unique ID, can be used to generate directory wide uniqe + IDs' EQUALITY integerMatch ORDERING integerOrderingMatch SYNTAX 1.3.6.1.4. + 1.1466.115.121.1.27 SINGLE-VALUE ) +olcAttributeTypes: {2}( SUSE.YaST.ModuleConfig.Attr:4 NAME 'suseMinUniqueId' + DESC 'lower Border for Unique IDs' EQUALITY integerMatch ORDERING integerO + rderingMatch SYNTAX 1.3.6.1.4.1.1466.115.121.1.27 SINGLE-VALUE ) +olcAttributeTypes: {3}( SUSE.YaST.ModuleConfig.Attr:5 NAME 'suseMaxUniqueId' + DESC 'upper Border for Unique IDs' EQUALITY integerMatch ORDERING integerO + rderingMatch SYNTAX 1.3.6.1.4.1.1466.115.121.1.27 SINGLE-VALUE ) +olcAttributeTypes: {4}( SUSE.YaST.ModuleConfig.Attr:6 NAME 'suseDefaultTempl + ate' DESC 'The DN of a template that should be used by default' EQUALITY di + stinguishedNameMatch SYNTAX 1.3.6.1.4.1.1466.115.121.1.12 SINGLE-VALUE ) +olcAttributeTypes: {5}( SUSE.YaST.ModuleConfig.Attr:7 NAME 'suseSearchFilter + ' DESC 'Search filter to localize Objects' SYNTAX 1.3.6.1.4.1.1466.115.121. + 1.15 SINGLE-VALUE ) +olcAttributeTypes: {6}( SUSE.YaST.ModuleConfig.Attr:11 NAME 'suseDefaultValu + e' DESC 'an Attribute-Value-Assertions to define defaults for specific Attr + ibutes' EQUALITY caseIgnoreMatch SYNTAX 1.3.6.1.4.1.1466.115.121.1.15 ) +olcAttributeTypes: {7}( SUSE.YaST.ModuleConfig.Attr:12 NAME 'suseNamingAttri + bute' DESC 'AttributeType that should be used as the RDN' EQUALITY caseIgno + reIA5Match SYNTAX 1.3.6.1.4.1.1466.115.121.1.26 SINGLE-VALUE ) +olcAttributeTypes: {8}( SUSE.YaST.ModuleConfig.Attr:15 NAME 'suseSecondaryGr + oup' DESC 'seconday group DN' EQUALITY distinguishedNameMatch SYNTAX 1.3.6. + 1.4.1.1466.115.121.1.12 ) +olcAttributeTypes: {9}( SUSE.YaST.ModuleConfig.Attr:16 NAME 'suseMinPassword + Length' DESC 'minimum Password length for new users' EQUALITY integerMatch + ORDERING integerOrderingMatch SYNTAX 1.3.6.1.4.1.1466.115.121.1.27 SINGLE-V + ALUE ) +olcAttributeTypes: {10}( SUSE.YaST.ModuleConfig.Attr:17 NAME 'suseMaxPasswor + dLength' DESC 'maximum Password length for new users' EQUALITY integerMatch + ORDERING integerOrderingMatch SYNTAX 1.3.6.1.4.1.1466.115.121.1.27 SINGLE- + VALUE ) +olcAttributeTypes: {11}( SUSE.YaST.ModuleConfig.Attr:18 NAME 'susePasswordHa + sh' DESC 'Hash method to use for new users' EQUALITY caseIgnoreIA5Match SYN + TAX 1.3.6.1.4.1.1466.115.121.1.26 SINGLE-VALUE ) +olcAttributeTypes: {12}( SUSE.YaST.ModuleConfig.Attr:19 NAME 'suseSkelDir' D + ESC '' EQUALITY caseExactIA5Match SYNTAX 1.3.6.1.4.1.1466.115.121.1.26 ) +olcAttributeTypes: {13}( SUSE.YaST.ModuleConfig.Attr:20 NAME 'susePlugin' DE + SC 'plugin to use upon user/ group creation' EQUALITY caseIgnoreMatch SYNTA + X 1.3.6.1.4.1.1466.115.121.1.15 ) +olcAttributeTypes: {14}( SUSE.YaST.ModuleConfig.Attr:21 NAME 'suseMapAttribu + te' DESC '' EQUALITY caseIgnoreMatch SYNTAX 1.3.6.1.4.1.1466.115.121.1.15 ) +olcAttributeTypes: {15}( SUSE.YaST.ModuleConfig.Attr:22 NAME 'suseImapServer + ' DESC '' EQUALITY caseIgnoreMatch SYNTAX 1.3.6.1.4.1.1466.115.121.1.15 SIN + GLE-VALUE ) +olcAttributeTypes: {16}( SUSE.YaST.ModuleConfig.Attr:23 NAME 'suseImapAdmin' + DESC '' EQUALITY caseIgnoreMatch SYNTAX 1.3.6.1.4.1.1466.115.121.1.15 SING + LE-VALUE ) +olcAttributeTypes: {17}( SUSE.YaST.ModuleConfig.Attr:24 NAME 'suseImapDefaul + tQuota' DESC '' EQUALITY integerMatch SYNTAX 1.3.6.1.4.1.1466.115.121.1.27 + SINGLE-VALUE ) +olcAttributeTypes: {18}( SUSE.YaST.ModuleConfig.Attr:25 NAME 'suseImapUseSsl + ' DESC '' EQUALITY booleanMatch SYNTAX 1.3.6.1.4.1.1466.115.121.1.7 SINGLE- + VALUE ) +olcObjectClasses: {0}( SUSE.YaST.ModuleConfig.OC:2 NAME 'suseModuleConfigura + tion' DESC 'Contains configuration of Management Modules' SUP top STRUCTURA + L MUST cn MAY suseDefaultBase ) +olcObjectClasses: {1}( SUSE.YaST.ModuleConfig.OC:3 NAME 'suseUserConfigurati + on' DESC 'Configuration of user management tools' SUP suseModuleConfigurati + on STRUCTURAL MAY ( suseMinPasswordLength $ suseMaxPasswordLength $ susePas + swordHash $ suseSkelDir $ suseNextUniqueId $ suseMinUniqueId $ suseMaxUniqu + eId $ suseDefaultTemplate $ suseSearchFilter $ suseMapAttribute ) ) +olcObjectClasses: {2}( SUSE.YaST.ModuleConfig.OC:4 NAME 'suseObjectTemplate' + DESC 'Base Class for Object-Templates' SUP top STRUCTURAL MUST cn MAY ( su + sePlugin $ suseDefaultValue $ suseNamingAttribute ) ) +olcObjectClasses: {3}( SUSE.YaST.ModuleConfig.OC:5 NAME 'suseUserTemplate' D + ESC 'User object template' SUP suseObjectTemplate STRUCTURAL MUST cn MAY su + seSecondaryGroup ) +olcObjectClasses: {4}( SUSE.YaST.ModuleConfig.OC:6 NAME 'suseGroupTemplate' + DESC 'Group object template' SUP suseObjectTemplate STRUCTURAL MUST cn ) +olcObjectClasses: {5}( SUSE.YaST.ModuleConfig.OC:7 NAME 'suseGroupConfigurat + ion' DESC 'Configuration of user management tools' SUP suseModuleConfigurat + ion STRUCTURAL MAY ( suseNextUniqueId $ suseMinUniqueId $ suseMaxUniqueId $ + suseDefaultTemplate $ suseSearchFilter $ suseMapAttribute ) ) +olcObjectClasses: {6}( SUSE.YaST.ModuleConfig.OC:8 NAME 'suseCaConfiguration + ' DESC 'Configuration of CA management tools' SUP suseModuleConfiguration S + TRUCTURAL ) +olcObjectClasses: {7}( SUSE.YaST.ModuleConfig.OC:9 NAME 'suseDnsConfiguratio + n' DESC 'Configuration of mail server management tools' SUP suseModuleConfi + guration STRUCTURAL ) +olcObjectClasses: {8}( SUSE.YaST.ModuleConfig.OC:10 NAME 'suseDhcpConfigurat + ion' DESC 'Configuration of DHCP server management tools' SUP suseModuleCon + figuration STRUCTURAL ) +olcObjectClasses: {9}( SUSE.YaST.ModuleConfig.OC:11 NAME 'suseMailConfigurat + ion' DESC 'Configuration of IMAP user management tools' SUP suseModuleConfi + guration STRUCTURAL MUST ( suseImapServer $ suseImapAdmin $ suseImapDefault + Quota $ suseImapUseSsl ) ) +structuralObjectClass: olcSchemaConfig +entryUUID: 401a3f38-eaf5-1039-8665-dbfbf2f5e6dd +creatorsName: cn=config +createTimestamp: 20200224020101Z +entryCSN: 20200224020101.085642Z#000000#000#000000 +modifiersName: cn=config +modifyTimestamp: 20200224020101Z diff --git a/dirsrvtests/tests/data/openldap_2_389/1/slapd.d/cn=config/cn=schema/cn={5}test.ldif b/dirsrvtests/tests/data/openldap_2_389/1/slapd.d/cn=config/cn=schema/cn={5}test.ldif new file mode 100644 index 0000000..2f82bef --- /dev/null +++ b/dirsrvtests/tests/data/openldap_2_389/1/slapd.d/cn=config/cn=schema/cn={5}test.ldif @@ -0,0 +1,12 @@ +dn: cn={5}test +objectClass: olcSchemaConfig +cn: {5}test +olcAttributeTypes: {0}( x-attribute NAME 'x-attribute' DESC 'desc' EQUALITY distinguishedNameMatch SYNTAX 1.3.6.1.4.1.1466.115.121.1.12 SINGLE-VALUE ) +olcObjectClasses: {0}( x-object-oid NAME 'x-object' DESC 'desc' SUP top STRUCTURAL MUST x-attribute ) +structuralObjectClass: olcSchemaConfig +entryUUID: 86660309-e157-4ebb-be06-a5d7e3c877bc +creatorsName: cn=config +createTimestamp: 20200224020101Z +entryCSN: 20200224020101.085642Z#000000#000#000000 +modifiersName: cn=config +modifyTimestamp: 20200224020101Z diff --git a/dirsrvtests/tests/data/openldap_2_389/1/slapd.d/cn=config/olcDatabase={-1}frontend.ldif b/dirsrvtests/tests/data/openldap_2_389/1/slapd.d/cn=config/olcDatabase={-1}frontend.ldif new file mode 100644 index 0000000..87336b5 --- /dev/null +++ b/dirsrvtests/tests/data/openldap_2_389/1/slapd.d/cn=config/olcDatabase={-1}frontend.ldif @@ -0,0 +1,15 @@ +# AUTO-GENERATED FILE - DO NOT EDIT!! Use ldapmodify. +# CRC32 ebfceba5 +dn: olcDatabase={-1}frontend +objectClass: olcDatabaseConfig +objectClass: olcFrontendConfig +olcDatabase: {-1}frontend +olcAccess: {0}to dn.base="" by * read +olcAccess: {1}to dn.base="cn=Subschema" by * read +structuralObjectClass: olcDatabaseConfig +entryUUID: 401a4c6c-eaf5-1039-8666-dbfbf2f5e6dd +creatorsName: cn=config +createTimestamp: 20200224020101Z +entryCSN: 20200224020101.085980Z#000000#000#000000 +modifiersName: cn=config +modifyTimestamp: 20200224020101Z diff --git a/dirsrvtests/tests/data/openldap_2_389/1/slapd.d/cn=config/olcDatabase={0}config.ldif b/dirsrvtests/tests/data/openldap_2_389/1/slapd.d/cn=config/olcDatabase={0}config.ldif new file mode 100644 index 0000000..fa4e56c --- /dev/null +++ b/dirsrvtests/tests/data/openldap_2_389/1/slapd.d/cn=config/olcDatabase={0}config.ldif @@ -0,0 +1,20 @@ +# AUTO-GENERATED FILE - DO NOT EDIT!! Use ldapmodify. +# CRC32 e112c647 +dn: olcDatabase={0}config +objectClass: olcDatabaseConfig +olcDatabase: {0}config +olcAccess: {0}to * by * none +olcAddContentAcl: TRUE +olcLastMod: TRUE +olcMaxDerefDepth: 15 +olcReadOnly: FALSE +olcRootDN: cn=config +olcSyncUseSubentry: FALSE +olcMonitoring: FALSE +structuralObjectClass: olcDatabaseConfig +entryUUID: 401a534c-eaf5-1039-8668-dbfbf2f5e6dd +creatorsName: cn=config +createTimestamp: 20200224020101Z +entryCSN: 20200224020101.086158Z#000000#000#000000 +modifiersName: cn=config +modifyTimestamp: 20200224020101Z diff --git a/dirsrvtests/tests/data/openldap_2_389/1/slapd.d/cn=config/olcDatabase={1}mdb.ldif b/dirsrvtests/tests/data/openldap_2_389/1/slapd.d/cn=config/olcDatabase={1}mdb.ldif new file mode 100644 index 0000000..f7ef7fd --- /dev/null +++ b/dirsrvtests/tests/data/openldap_2_389/1/slapd.d/cn=config/olcDatabase={1}mdb.ldif @@ -0,0 +1,19 @@ +# AUTO-GENERATED FILE - DO NOT EDIT!! Use ldapmodify. +# CRC32 c1d2cbb7 +dn: olcDatabase={1}mdb +objectClass: olcDatabaseConfig +objectClass: olcMdbConfig +olcDatabase: mdb +olcDbDirectory: /var/lib/ldap/example_com +olcSuffix: dc=example,dc=com +olcRootDN: cn=Manager,dc=example,dc=com +olcRootPW:: c2VjcmV0 +olcDbIndex: objectClass eq +olcDbIndex: uid eq,pres,sub +structuralObjectClass: olcMdbConfig +entryUUID: 401a528e-eaf5-1039-8667-dbfbf2f5e6dd +creatorsName: cn=config +createTimestamp: 20200224020101Z +entryCSN: 20200224020101.086134Z#000000#000#000000 +modifiersName: cn=config +modifyTimestamp: 20200224020101Z diff --git a/dirsrvtests/tests/data/openldap_2_389/1/slapd.d/cn=config/olcDatabase={1}mdb/olcOverlay={0}memberof.ldif b/dirsrvtests/tests/data/openldap_2_389/1/slapd.d/cn=config/olcDatabase={1}mdb/olcOverlay={0}memberof.ldif new file mode 100644 index 0000000..9053f5b --- /dev/null +++ b/dirsrvtests/tests/data/openldap_2_389/1/slapd.d/cn=config/olcDatabase={1}mdb/olcOverlay={0}memberof.ldif @@ -0,0 +1,14 @@ +# AUTO-GENERATED FILE - DO NOT EDIT!! Use ldapmodify. +# CRC32 82bb3fb3 +dn: olcOverlay={0}memberof +objectClass: olcOverlayConfig +objectClass: olcMemberOf +olcOverlay: {0}memberof +olcMemberOfRefInt: TRUE +structuralObjectClass: olcMemberOf +entryUUID: 401a5f7c-eaf5-1039-8669-dbfbf2f5e6dd +creatorsName: cn=config +createTimestamp: 20200224020101Z +entryCSN: 20200224020101.086468Z#000000#000#000000 +modifiersName: cn=config +modifyTimestamp: 20200224020101Z diff --git a/dirsrvtests/tests/data/openldap_2_389/1/slapd.d/cn=config/olcDatabase={1}mdb/olcOverlay={1}refint.ldif b/dirsrvtests/tests/data/openldap_2_389/1/slapd.d/cn=config/olcDatabase={1}mdb/olcOverlay={1}refint.ldif new file mode 100644 index 0000000..f990632 --- /dev/null +++ b/dirsrvtests/tests/data/openldap_2_389/1/slapd.d/cn=config/olcDatabase={1}mdb/olcOverlay={1}refint.ldif @@ -0,0 +1,15 @@ +# AUTO-GENERATED FILE - DO NOT EDIT!! Use ldapmodify. +# CRC32 28d25ae6 +dn: olcOverlay={1}refint +objectClass: olcOverlayConfig +objectClass: olcRefintConfig +olcOverlay: {1}refint +olcRefintAttribute: member +olcRefintAttribute: memberOf +structuralObjectClass: olcRefintConfig +entryUUID: 401a66fc-eaf5-1039-866a-dbfbf2f5e6dd +creatorsName: cn=config +createTimestamp: 20200224020101Z +entryCSN: 20200224020101.086660Z#000000#000#000000 +modifiersName: cn=config +modifyTimestamp: 20200224020101Z diff --git a/dirsrvtests/tests/data/openldap_2_389/1/slapd.d/cn=config/olcDatabase={1}mdb/olcOverlay={2}unique.ldif b/dirsrvtests/tests/data/openldap_2_389/1/slapd.d/cn=config/olcDatabase={1}mdb/olcOverlay={2}unique.ldif new file mode 100644 index 0000000..2a0e6b3 --- /dev/null +++ b/dirsrvtests/tests/data/openldap_2_389/1/slapd.d/cn=config/olcDatabase={1}mdb/olcOverlay={2}unique.ldif @@ -0,0 +1,15 @@ +# AUTO-GENERATED FILE - DO NOT EDIT!! Use ldapmodify. +# CRC32 e3a2aeac +dn: olcOverlay={2}unique +objectClass: olcOverlayConfig +objectClass: olcUniqueConfig +olcOverlay: {2}unique +olcUniqueURI: ldap:///?mail?sub +olcUniqueURI: ldap:///?uid?sub +structuralObjectClass: olcUniqueConfig +entryUUID: 401a6b02-eaf5-1039-866b-dbfbf2f5e6dd +creatorsName: cn=config +createTimestamp: 20200224020101Z +entryCSN: 20200224020101.086763Z#000000#000#000000 +modifiersName: cn=config +modifyTimestamp: 20200224020101Z diff --git a/dirsrvtests/tests/data/openldap_2_389/1/slapd.d/cn=config/olcDatabase={2}mdb.ldif b/dirsrvtests/tests/data/openldap_2_389/1/slapd.d/cn=config/olcDatabase={2}mdb.ldif new file mode 100644 index 0000000..27a21d1 --- /dev/null +++ b/dirsrvtests/tests/data/openldap_2_389/1/slapd.d/cn=config/olcDatabase={2}mdb.ldif @@ -0,0 +1,18 @@ +# AUTO-GENERATED FILE - DO NOT EDIT!! Use ldapmodify. +# CRC32 1bb1ab28 +dn: olcDatabase={2}mdb +objectClass: olcDatabaseConfig +objectClass: olcMdbConfig +olcDatabase: mdb +olcDbDirectory: /var/lib/ldap/example_net +olcSuffix: dc=example,dc=net +olcRootDN: cn=Manager,dc=example,dc=net +olcRootPW:: c2VjcmV0 +olcDbIndex: objectClass eq +structuralObjectClass: olcMdbConfig +entryUUID: 401a7084-eaf5-1039-866c-dbfbf2f5e6dd +creatorsName: cn=config +createTimestamp: 20200224020101Z +entryCSN: 20200224020101.086905Z#000000#000#000000 +modifiersName: cn=config +modifyTimestamp: 20200224020101Z diff --git a/dirsrvtests/tests/data/openldap_2_389/1/slapd.d/cn=config/olcDatabase={2}mdb/olcOverlay={0}memberof.ldif b/dirsrvtests/tests/data/openldap_2_389/1/slapd.d/cn=config/olcDatabase={2}mdb/olcOverlay={0}memberof.ldif new file mode 100644 index 0000000..d04bb79 --- /dev/null +++ b/dirsrvtests/tests/data/openldap_2_389/1/slapd.d/cn=config/olcDatabase={2}mdb/olcOverlay={0}memberof.ldif @@ -0,0 +1,14 @@ +# AUTO-GENERATED FILE - DO NOT EDIT!! Use ldapmodify. +# CRC32 6b48531c +dn: olcOverlay={0}memberof +objectClass: olcOverlayConfig +objectClass: olcMemberOf +olcOverlay: {0}memberof +olcMemberOfRefInt: TRUE +structuralObjectClass: olcMemberOf +entryUUID: 401a7890-eaf5-1039-866d-dbfbf2f5e6dd +creatorsName: cn=config +createTimestamp: 20200224020101Z +entryCSN: 20200224020101.087110Z#000000#000#000000 +modifiersName: cn=config +modifyTimestamp: 20200224020101Z diff --git a/dirsrvtests/tests/data/openldap_2_389/1/slapd.d/cn=config/olcDatabase={2}mdb/olcOverlay={1}unique.ldif b/dirsrvtests/tests/data/openldap_2_389/1/slapd.d/cn=config/olcDatabase={2}mdb/olcOverlay={1}unique.ldif new file mode 100644 index 0000000..4ff7720 --- /dev/null +++ b/dirsrvtests/tests/data/openldap_2_389/1/slapd.d/cn=config/olcDatabase={2}mdb/olcOverlay={1}unique.ldif @@ -0,0 +1,15 @@ +# AUTO-GENERATED FILE - DO NOT EDIT!! Use ldapmodify. +# CRC32 027478a0 +dn: olcOverlay={1}unique +objectClass: olcOverlayConfig +objectClass: olcUniqueConfig +olcOverlay: {1}unique +olcUniqueURI: ldap:///?mail?sub +olcUniqueURI: ldap:///?uid?sub +structuralObjectClass: olcUniqueConfig +entryUUID: 401a7f20-eaf5-1039-866e-dbfbf2f5e6dd +creatorsName: cn=config +createTimestamp: 20200224020101Z +entryCSN: 20200224020101.087278Z#000000#000#000000 +modifiersName: cn=config +modifyTimestamp: 20200224020101Z diff --git a/dirsrvtests/tests/data/openldap_2_389/4539/slapd.d/cn=config.ldif b/dirsrvtests/tests/data/openldap_2_389/4539/slapd.d/cn=config.ldif new file mode 100644 index 0000000..34d9e38 --- /dev/null +++ b/dirsrvtests/tests/data/openldap_2_389/4539/slapd.d/cn=config.ldif @@ -0,0 +1,42 @@ +# AUTO-GENERATED FILE - DO NOT EDIT!! Use ldapmodify. +# CRC32 652b4ad6 +dn: cn=config +objectClass: olcGlobal +cn: config +olcConfigFile: slapd.conf +olcConfigDir: ./slapd.d +olcAttributeOptions: lang- +olcAuthzPolicy: none +olcConcurrency: 0 +olcConnMaxPending: 100 +olcConnMaxPendingAuth: 1000 +olcGentleHUP: FALSE +olcIdleTimeout: 0 +olcIndexSubstrIfMaxLen: 4 +olcIndexSubstrIfMinLen: 2 +olcIndexSubstrAnyLen: 4 +olcIndexSubstrAnyStep: 2 +olcIndexIntLen: 4 +olcListenerThreads: 1 +olcLocalSSF: 71 +olcLogLevel: 0 +olcReadOnly: FALSE +olcSaslSecProps: noplain,noanonymous +olcSockbufMaxIncoming: 262143 +olcSockbufMaxIncomingAuth: 16777215 +olcThreads: 16 +olcTLSCACertificateFile: /tmp/ldap-sssdtest.cacrt +olcTLSCertificateFile: /tmp/ldap-sssdtest.crt +olcTLSCertificateKeyFile: /tmp/ldap-sssdtest.key +olcTLSCRLCheck: none +olcTLSVerifyClient: never +olcTLSProtocolMin: 0.0 +olcToolThreads: 1 +olcWriteTimeout: 0 +structuralObjectClass: olcGlobal +entryUUID: 12127e74-e5e6-103a-973c-d731be523aab +creatorsName: cn=config +createTimestamp: 20210108101443Z +entryCSN: 20210108101443.265809Z#000000#000#000000 +modifiersName: cn=config +modifyTimestamp: 20210108101443Z diff --git a/dirsrvtests/tests/data/openldap_2_389/4539/slapd.d/cn=config/cn=module{0}.ldif b/dirsrvtests/tests/data/openldap_2_389/4539/slapd.d/cn=config/cn=module{0}.ldif new file mode 100644 index 0000000..da0f3cf --- /dev/null +++ b/dirsrvtests/tests/data/openldap_2_389/4539/slapd.d/cn=config/cn=module{0}.ldif @@ -0,0 +1,13 @@ +# AUTO-GENERATED FILE - DO NOT EDIT!! Use ldapmodify. +# CRC32 453c66fb +dn: cn=module{0} +objectClass: olcModuleList +cn: module{0} +olcModuleLoad: {0}back_hdb.la +structuralObjectClass: olcModuleList +entryUUID: 1212848c-e5e6-103a-973d-d731be523aab +creatorsName: cn=config +createTimestamp: 20210108101443Z +entryCSN: 20210108101443.265809Z#000000#000#000000 +modifiersName: cn=config +modifyTimestamp: 20210108101443Z diff --git a/dirsrvtests/tests/data/openldap_2_389/4539/slapd.d/cn=config/cn=schema.ldif b/dirsrvtests/tests/data/openldap_2_389/4539/slapd.d/cn=config/cn=schema.ldif new file mode 100644 index 0000000..1a0a87a --- /dev/null +++ b/dirsrvtests/tests/data/openldap_2_389/4539/slapd.d/cn=config/cn=schema.ldif @@ -0,0 +1,634 @@ +# AUTO-GENERATED FILE - DO NOT EDIT!! Use ldapmodify. +# CRC32 8c9a2f9c +dn: cn=schema +objectClass: olcSchemaConfig +cn: schema +olcObjectIdentifier: OLcfg 1.3.6.1.4.1.4203.1.12.2 +olcObjectIdentifier: OLcfgAt OLcfg:3 +olcObjectIdentifier: OLcfgGlAt OLcfgAt:0 +olcObjectIdentifier: OLcfgBkAt OLcfgAt:1 +olcObjectIdentifier: OLcfgDbAt OLcfgAt:2 +olcObjectIdentifier: OLcfgOvAt OLcfgAt:3 +olcObjectIdentifier: OLcfgCtAt OLcfgAt:4 +olcObjectIdentifier: OLcfgOc OLcfg:4 +olcObjectIdentifier: OLcfgGlOc OLcfgOc:0 +olcObjectIdentifier: OLcfgBkOc OLcfgOc:1 +olcObjectIdentifier: OLcfgDbOc OLcfgOc:2 +olcObjectIdentifier: OLcfgOvOc OLcfgOc:3 +olcObjectIdentifier: OLcfgCtOc OLcfgOc:4 +olcObjectIdentifier: OMsyn 1.3.6.1.4.1.1466.115.121.1 +olcObjectIdentifier: OMsBoolean OMsyn:7 +olcObjectIdentifier: OMsDN OMsyn:12 +olcObjectIdentifier: OMsDirectoryString OMsyn:15 +olcObjectIdentifier: OMsIA5String OMsyn:26 +olcObjectIdentifier: OMsInteger OMsyn:27 +olcObjectIdentifier: OMsOID OMsyn:38 +olcObjectIdentifier: OMsOctetString OMsyn:40 +olcLdapSyntaxes: ( 1.3.6.1.4.1.1466.115.121.1.1 DESC 'ACI Item' X-BINARY-TRA + NSFER-REQUIRED 'TRUE' X-NOT-HUMAN-READABLE 'TRUE' ) +olcLdapSyntaxes: ( 1.3.6.1.4.1.1466.115.121.1.2 DESC 'Access Point' X-NOT-HU + MAN-READABLE 'TRUE' ) +olcLdapSyntaxes: ( 1.3.6.1.4.1.1466.115.121.1.3 DESC 'Attribute Type Descrip + tion' ) +olcLdapSyntaxes: ( 1.3.6.1.4.1.1466.115.121.1.4 DESC 'Audio' X-NOT-HUMAN-REA + DABLE 'TRUE' ) +olcLdapSyntaxes: ( 1.3.6.1.4.1.1466.115.121.1.5 DESC 'Binary' X-NOT-HUMAN-RE + ADABLE 'TRUE' ) +olcLdapSyntaxes: ( 1.3.6.1.4.1.1466.115.121.1.6 DESC 'Bit String' ) +olcLdapSyntaxes: ( 1.3.6.1.4.1.1466.115.121.1.7 DESC 'Boolean' ) +olcLdapSyntaxes: ( 1.3.6.1.4.1.1466.115.121.1.8 DESC 'Certificate' X-BINARY- + TRANSFER-REQUIRED 'TRUE' X-NOT-HUMAN-READABLE 'TRUE' ) +olcLdapSyntaxes: ( 1.3.6.1.4.1.1466.115.121.1.9 DESC 'Certificate List' X-BI + NARY-TRANSFER-REQUIRED 'TRUE' X-NOT-HUMAN-READABLE 'TRUE' ) +olcLdapSyntaxes: ( 1.3.6.1.4.1.1466.115.121.1.10 DESC 'Certificate Pair' X-B + INARY-TRANSFER-REQUIRED 'TRUE' X-NOT-HUMAN-READABLE 'TRUE' ) +olcLdapSyntaxes: ( 1.3.6.1.4.1.4203.666.11.10.2.1 DESC 'X.509 AttributeCerti + ficate' X-BINARY-TRANSFER-REQUIRED 'TRUE' X-NOT-HUMAN-READABLE 'TRUE' ) +olcLdapSyntaxes: ( 1.3.6.1.4.1.1466.115.121.1.12 DESC 'Distinguished Name' ) +olcLdapSyntaxes: ( 1.2.36.79672281.1.5.0 DESC 'RDN' ) +olcLdapSyntaxes: ( 1.3.6.1.4.1.1466.115.121.1.13 DESC 'Data Quality' ) +olcLdapSyntaxes: ( 1.3.6.1.4.1.1466.115.121.1.14 DESC 'Delivery Method' ) +olcLdapSyntaxes: ( 1.3.6.1.4.1.1466.115.121.1.15 DESC 'Directory String' ) +olcLdapSyntaxes: ( 1.3.6.1.4.1.1466.115.121.1.16 DESC 'DIT Content Rule Desc + ription' ) +olcLdapSyntaxes: ( 1.3.6.1.4.1.1466.115.121.1.17 DESC 'DIT Structure Rule De + scription' ) +olcLdapSyntaxes: ( 1.3.6.1.4.1.1466.115.121.1.19 DESC 'DSA Quality' ) +olcLdapSyntaxes: ( 1.3.6.1.4.1.1466.115.121.1.20 DESC 'DSE Type' ) +olcLdapSyntaxes: ( 1.3.6.1.4.1.1466.115.121.1.21 DESC 'Enhanced Guide' ) +olcLdapSyntaxes: ( 1.3.6.1.4.1.1466.115.121.1.22 DESC 'Facsimile Telephone N + umber' ) +olcLdapSyntaxes: ( 1.3.6.1.4.1.1466.115.121.1.23 DESC 'Fax' X-NOT-HUMAN-READ + ABLE 'TRUE' ) +olcLdapSyntaxes: ( 1.3.6.1.4.1.1466.115.121.1.24 DESC 'Generalized Time' ) +olcLdapSyntaxes: ( 1.3.6.1.4.1.1466.115.121.1.25 DESC 'Guide' ) +olcLdapSyntaxes: ( 1.3.6.1.4.1.1466.115.121.1.26 DESC 'IA5 String' ) +olcLdapSyntaxes: ( 1.3.6.1.4.1.1466.115.121.1.27 DESC 'Integer' ) +olcLdapSyntaxes: ( 1.3.6.1.4.1.1466.115.121.1.28 DESC 'JPEG' X-NOT-HUMAN-REA + DABLE 'TRUE' ) +olcLdapSyntaxes: ( 1.3.6.1.4.1.1466.115.121.1.29 DESC 'Supplier And Shadow Acc + ess Points' ) +olcLdapSyntaxes: ( 1.3.6.1.4.1.1466.115.121.1.30 DESC 'Matching Rule Descrip + tion' ) +olcLdapSyntaxes: ( 1.3.6.1.4.1.1466.115.121.1.31 DESC 'Matching Rule Use Des + cription' ) +olcLdapSyntaxes: ( 1.3.6.1.4.1.1466.115.121.1.32 DESC 'Mail Preference' ) +olcLdapSyntaxes: ( 1.3.6.1.4.1.1466.115.121.1.33 DESC 'MHS OR Address' ) +olcLdapSyntaxes: ( 1.3.6.1.4.1.1466.115.121.1.34 DESC 'Name And Optional UID + ' ) +olcLdapSyntaxes: ( 1.3.6.1.4.1.1466.115.121.1.35 DESC 'Name Form Description + ' ) +olcLdapSyntaxes: ( 1.3.6.1.4.1.1466.115.121.1.36 DESC 'Numeric String' ) +olcLdapSyntaxes: ( 1.3.6.1.4.1.1466.115.121.1.37 DESC 'Object Class Descript + ion' ) +olcLdapSyntaxes: ( 1.3.6.1.4.1.1466.115.121.1.38 DESC 'OID' ) +olcLdapSyntaxes: ( 1.3.6.1.4.1.1466.115.121.1.39 DESC 'Other Mailbox' ) +olcLdapSyntaxes: ( 1.3.6.1.4.1.1466.115.121.1.40 DESC 'Octet String' ) +olcLdapSyntaxes: ( 1.3.6.1.4.1.1466.115.121.1.41 DESC 'Postal Address' ) +olcLdapSyntaxes: ( 1.3.6.1.4.1.1466.115.121.1.42 DESC 'Protocol Information' + ) +olcLdapSyntaxes: ( 1.3.6.1.4.1.1466.115.121.1.43 DESC 'Presentation Address' + ) +olcLdapSyntaxes: ( 1.3.6.1.4.1.1466.115.121.1.44 DESC 'Printable String' ) +olcLdapSyntaxes: ( 1.3.6.1.4.1.1466.115.121.1.11 DESC 'Country String' ) +olcLdapSyntaxes: ( 1.3.6.1.4.1.1466.115.121.1.45 DESC 'SubtreeSpecification' + ) +olcLdapSyntaxes: ( 1.3.6.1.4.1.1466.115.121.1.49 DESC 'Supported Algorithm' + X-BINARY-TRANSFER-REQUIRED 'TRUE' X-NOT-HUMAN-READABLE 'TRUE' ) +olcLdapSyntaxes: ( 1.3.6.1.4.1.1466.115.121.1.50 DESC 'Telephone Number' ) +olcLdapSyntaxes: ( 1.3.6.1.4.1.1466.115.121.1.51 DESC 'Teletex Terminal Iden + tifier' ) +olcLdapSyntaxes: ( 1.3.6.1.4.1.1466.115.121.1.52 DESC 'Telex Number' ) +olcLdapSyntaxes: ( 1.3.6.1.4.1.1466.115.121.1.54 DESC 'LDAP Syntax Descripti + on' ) +olcLdapSyntaxes: ( 1.3.6.1.4.1.1466.115.121.1.55 DESC 'Modify Rights' ) +olcLdapSyntaxes: ( 1.3.6.1.4.1.1466.115.121.1.56 DESC 'LDAP Schema Definitio + n' ) +olcLdapSyntaxes: ( 1.3.6.1.4.1.1466.115.121.1.57 DESC 'LDAP Schema Descripti + on' ) +olcLdapSyntaxes: ( 1.3.6.1.4.1.1466.115.121.1.58 DESC 'Substring Assertion' + ) +olcLdapSyntaxes: ( 1.3.6.1.1.1.0.0 DESC 'RFC2307 NIS Netgroup Triple' ) +olcLdapSyntaxes: ( 1.3.6.1.1.1.0.1 DESC 'RFC2307 Boot Parameter' ) +olcLdapSyntaxes: ( 1.3.6.1.1.15.1 DESC 'Certificate Exact Assertion' ) +olcLdapSyntaxes: ( 1.3.6.1.1.15.2 DESC 'Certificate Assertion' ) +olcLdapSyntaxes: ( 1.3.6.1.1.15.3 DESC 'Certificate Pair Exact Assertion' ) +olcLdapSyntaxes: ( 1.3.6.1.1.15.4 DESC 'Certificate Pair Assertion' ) +olcLdapSyntaxes: ( 1.3.6.1.1.15.5 DESC 'Certificate List Exact Assertion' ) +olcLdapSyntaxes: ( 1.3.6.1.1.15.6 DESC 'Certificate List Assertion' ) +olcLdapSyntaxes: ( 1.3.6.1.1.15.7 DESC 'Algorithm Identifier' ) +olcLdapSyntaxes: ( 1.3.6.1.4.1.4203.666.11.10.2.2 DESC 'AttributeCertificate + Exact Assertion' ) +olcLdapSyntaxes: ( 1.3.6.1.4.1.4203.666.11.10.2.3 DESC 'AttributeCertificate + Assertion' ) +olcLdapSyntaxes: ( 1.3.6.1.1.16.1 DESC 'UUID' ) +olcLdapSyntaxes: ( 1.3.6.1.4.1.4203.666.11.2.1 DESC 'CSN' ) +olcLdapSyntaxes: ( 1.3.6.1.4.1.4203.666.11.2.4 DESC 'CSN SID' ) +olcLdapSyntaxes: ( 1.3.6.1.4.1.4203.1.1.1 DESC 'OpenLDAP void' ) +olcLdapSyntaxes: ( 1.3.6.1.4.1.4203.666.2.7 DESC 'OpenLDAP authz' ) +olcLdapSyntaxes: ( 1.3.6.1.4.1.4203.666.2.1 DESC 'OpenLDAP Experimental ACI' + ) +olcAttributeTypes: ( 2.5.4.0 NAME 'objectClass' DESC 'RFC4512: object classe + s of the entity' EQUALITY objectIdentifierMatch SYNTAX 1.3.6.1.4.1.1466.115 + .121.1.38 ) +olcAttributeTypes: ( 2.5.21.9 NAME 'structuralObjectClass' DESC 'RFC4512: st + ructural object class of entry' EQUALITY objectIdentifierMatch SYNTAX 1.3.6 + .1.4.1.1466.115.121.1.38 SINGLE-VALUE NO-USER-MODIFICATION USAGE directoryO + peration ) +olcAttributeTypes: ( 2.5.18.1 NAME 'createTimestamp' DESC 'RFC4512: time whi + ch object was created' EQUALITY generalizedTimeMatch ORDERING generalizedTi + meOrderingMatch SYNTAX 1.3.6.1.4.1.1466.115.121.1.24 SINGLE-VALUE NO-USER-M + ODIFICATION USAGE directoryOperation ) +olcAttributeTypes: ( 2.5.18.2 NAME 'modifyTimestamp' DESC 'RFC4512: time whi + ch object was last modified' EQUALITY generalizedTimeMatch ORDERING general + izedTimeOrderingMatch SYNTAX 1.3.6.1.4.1.1466.115.121.1.24 SINGLE-VALUE NO- + USER-MODIFICATION USAGE directoryOperation ) +olcAttributeTypes: ( 2.5.18.3 NAME 'creatorsName' DESC 'RFC4512: name of cre + ator' EQUALITY distinguishedNameMatch SYNTAX 1.3.6.1.4.1.1466.115.121.1.12 + SINGLE-VALUE NO-USER-MODIFICATION USAGE directoryOperation ) +olcAttributeTypes: ( 2.5.18.4 NAME 'modifiersName' DESC 'RFC4512: name of la + st modifier' EQUALITY distinguishedNameMatch SYNTAX 1.3.6.1.4.1.1466.115.12 + 1.1.12 SINGLE-VALUE NO-USER-MODIFICATION USAGE directoryOperation ) +olcAttributeTypes: ( 2.5.18.9 NAME 'hasSubordinates' DESC 'X.501: entry has + children' EQUALITY booleanMatch SYNTAX 1.3.6.1.4.1.1466.115.121.1.7 SINGLE- + VALUE NO-USER-MODIFICATION USAGE directoryOperation ) +olcAttributeTypes: ( 2.5.18.10 NAME 'subschemaSubentry' DESC 'RFC4512: name + of controlling subschema entry' EQUALITY distinguishedNameMatch SYNTAX 1.3. + 6.1.4.1.1466.115.121.1.12 SINGLE-VALUE NO-USER-MODIFICATION USAGE directory + Operation ) +olcAttributeTypes: ( 2.5.18.12 NAME 'collectiveAttributeSubentries' DESC 'RF + C3671: collective attribute subentries' EQUALITY distinguishedNameMatch SYN + TAX 1.3.6.1.4.1.1466.115.121.1.12 NO-USER-MODIFICATION USAGE directoryOpera + tion ) +olcAttributeTypes: ( 2.5.18.7 NAME 'collectiveExclusions' DESC 'RFC3671: col + lective attribute exclusions' EQUALITY objectIdentifierMatch SYNTAX 1.3.6.1 + .4.1.1466.115.121.1.38 USAGE directoryOperation ) +olcAttributeTypes: ( 1.3.6.1.1.20 NAME 'entryDN' DESC 'DN of the entry' EQUA + LITY distinguishedNameMatch SYNTAX 1.3.6.1.4.1.1466.115.121.1.12 SINGLE-VAL + UE NO-USER-MODIFICATION USAGE directoryOperation ) +olcAttributeTypes: ( 1.3.6.1.1.16.4 NAME 'entryUUID' DESC 'UUID of the entry + ' EQUALITY UUIDMatch ORDERING UUIDOrderingMatch SYNTAX 1.3.6.1.1.16.1 SINGL + E-VALUE NO-USER-MODIFICATION USAGE directoryOperation ) +olcAttributeTypes: ( 1.3.6.1.4.1.4203.666.1.7 NAME 'entryCSN' DESC 'change s + equence number of the entry content' EQUALITY CSNMatch ORDERING CSNOrdering + Match SYNTAX 1.3.6.1.4.1.4203.666.11.2.1{64} SINGLE-VALUE NO-USER-MODIFICAT + ION USAGE directoryOperation ) +olcAttributeTypes: ( 1.3.6.1.4.1.4203.666.1.13 NAME 'namingCSN' DESC 'change + sequence number of the entry naming (RDN)' EQUALITY CSNMatch ORDERING CSNO + rderingMatch SYNTAX 1.3.6.1.4.1.4203.666.11.2.1{64} SINGLE-VALUE NO-USER-MO + DIFICATION USAGE directoryOperation ) +olcAttributeTypes: ( 1.3.6.1.4.1.4203.666.1.23 NAME 'syncreplCookie' DESC 's + yncrepl Cookie for shadow copy' EQUALITY octetStringMatch ORDERING octetStr + ingOrderingMatch SYNTAX 1.3.6.1.4.1.1466.115.121.1.40 SINGLE-VALUE NO-USER- + MODIFICATION USAGE dSAOperation ) +olcAttributeTypes: ( 1.3.6.1.4.1.4203.666.1.25 NAME 'contextCSN' DESC 'the l + argest committed CSN of a context' EQUALITY CSNMatch ORDERING CSNOrderingMa + tch SYNTAX 1.3.6.1.4.1.4203.666.11.2.1{64} NO-USER-MODIFICATION USAGE dSAOp + eration ) +olcAttributeTypes: ( 1.3.6.1.4.1.1466.101.120.6 NAME 'altServer' DESC 'RFC45 + 12: alternative servers' SYNTAX 1.3.6.1.4.1.1466.115.121.1.26 USAGE dSAOper + ation ) +olcAttributeTypes: ( 1.3.6.1.4.1.1466.101.120.5 NAME 'namingContexts' DESC ' + RFC4512: naming contexts' SYNTAX 1.3.6.1.4.1.1466.115.121.1.12 USAGE dSAOpe + ration ) +olcAttributeTypes: ( 1.3.6.1.4.1.1466.101.120.13 NAME 'supportedControl' DES + C 'RFC4512: supported controls' SYNTAX 1.3.6.1.4.1.1466.115.121.1.38 USAGE + dSAOperation ) +olcAttributeTypes: ( 1.3.6.1.4.1.1466.101.120.7 NAME 'supportedExtension' DE + SC 'RFC4512: supported extended operations' SYNTAX 1.3.6.1.4.1.1466.115.121 + .1.38 USAGE dSAOperation ) +olcAttributeTypes: ( 1.3.6.1.4.1.1466.101.120.15 NAME 'supportedLDAPVersion' + DESC 'RFC4512: supported LDAP versions' SYNTAX 1.3.6.1.4.1.1466.115.121.1. + 27 USAGE dSAOperation ) +olcAttributeTypes: ( 1.3.6.1.4.1.1466.101.120.14 NAME 'supportedSASLMechanis + ms' DESC 'RFC4512: supported SASL mechanisms' SYNTAX 1.3.6.1.4.1.1466.115.1 + 21.1.15 USAGE dSAOperation ) +olcAttributeTypes: ( 1.3.6.1.4.1.4203.1.3.5 NAME 'supportedFeatures' DESC 'R + FC4512: features supported by the server' EQUALITY objectIdentifierMatch SY + NTAX 1.3.6.1.4.1.1466.115.121.1.38 USAGE dSAOperation ) +olcAttributeTypes: ( 1.3.6.1.4.1.4203.666.1.10 NAME 'monitorContext' DESC 'm + onitor context' EQUALITY distinguishedNameMatch SYNTAX 1.3.6.1.4.1.1466.115 + .121.1.12 SINGLE-VALUE NO-USER-MODIFICATION USAGE dSAOperation ) +olcAttributeTypes: ( 1.3.6.1.4.1.4203.1.12.2.1 NAME 'configContext' DESC 'co + nfig context' EQUALITY distinguishedNameMatch SYNTAX 1.3.6.1.4.1.1466.115.1 + 21.1.12 SINGLE-VALUE NO-USER-MODIFICATION USAGE dSAOperation ) +olcAttributeTypes: ( 1.3.6.1.1.4 NAME 'vendorName' DESC 'RFC3045: name of im + plementation vendor' EQUALITY caseExactMatch SYNTAX 1.3.6.1.4.1.1466.115.12 + 1.1.15 SINGLE-VALUE NO-USER-MODIFICATION USAGE dSAOperation ) +olcAttributeTypes: ( 1.3.6.1.1.5 NAME 'vendorVersion' DESC 'RFC3045: version + of implementation' EQUALITY caseExactMatch SYNTAX 1.3.6.1.4.1.1466.115.121 + .1.15 SINGLE-VALUE NO-USER-MODIFICATION USAGE dSAOperation ) +olcAttributeTypes: ( 2.5.18.5 NAME 'administrativeRole' DESC 'RFC3672: admin + istrative role' EQUALITY objectIdentifierMatch SYNTAX 1.3.6.1.4.1.1466.115. + 121.1.38 USAGE directoryOperation ) +olcAttributeTypes: ( 2.5.18.6 NAME 'subtreeSpecification' DESC 'RFC3672: sub + tree specification' SYNTAX 1.3.6.1.4.1.1466.115.121.1.45 SINGLE-VALUE USAGE + directoryOperation ) +olcAttributeTypes: ( 2.5.21.1 NAME 'dITStructureRules' DESC 'RFC4512: DIT st + ructure rules' EQUALITY integerFirstComponentMatch SYNTAX 1.3.6.1.4.1.1466. + 115.121.1.17 USAGE directoryOperation ) +olcAttributeTypes: ( 2.5.21.2 NAME 'dITContentRules' DESC 'RFC4512: DIT cont + ent rules' EQUALITY objectIdentifierFirstComponentMatch SYNTAX 1.3.6.1.4.1. + 1466.115.121.1.16 USAGE directoryOperation ) +olcAttributeTypes: ( 2.5.21.4 NAME 'matchingRules' DESC 'RFC4512: matching r + ules' EQUALITY objectIdentifierFirstComponentMatch SYNTAX 1.3.6.1.4.1.1466. + 115.121.1.30 USAGE directoryOperation ) +olcAttributeTypes: ( 2.5.21.5 NAME 'attributeTypes' DESC 'RFC4512: attribute + types' EQUALITY objectIdentifierFirstComponentMatch SYNTAX 1.3.6.1.4.1.146 + 6.115.121.1.3 USAGE directoryOperation ) +olcAttributeTypes: ( 2.5.21.6 NAME 'objectClasses' DESC 'RFC4512: object cla + sses' EQUALITY objectIdentifierFirstComponentMatch SYNTAX 1.3.6.1.4.1.1466. + 115.121.1.37 USAGE directoryOperation ) +olcAttributeTypes: ( 2.5.21.7 NAME 'nameForms' DESC 'RFC4512: name forms ' E + QUALITY objectIdentifierFirstComponentMatch SYNTAX 1.3.6.1.4.1.1466.115.121 + .1.35 USAGE directoryOperation ) +olcAttributeTypes: ( 2.5.21.8 NAME 'matchingRuleUse' DESC 'RFC4512: matching + rule uses' EQUALITY objectIdentifierFirstComponentMatch SYNTAX 1.3.6.1.4.1 + .1466.115.121.1.31 USAGE directoryOperation ) +olcAttributeTypes: ( 1.3.6.1.4.1.1466.101.120.16 NAME 'ldapSyntaxes' DESC 'R + FC4512: LDAP syntaxes' EQUALITY objectIdentifierFirstComponentMatch SYNTAX + 1.3.6.1.4.1.1466.115.121.1.54 USAGE directoryOperation ) +olcAttributeTypes: ( 2.5.4.1 NAME ( 'aliasedObjectName' 'aliasedEntryName' ) + DESC 'RFC4512: name of aliased object' EQUALITY distinguishedNameMatch SYN + TAX 1.3.6.1.4.1.1466.115.121.1.12 SINGLE-VALUE ) +olcAttributeTypes: ( 2.16.840.1.113730.3.1.34 NAME 'ref' DESC 'RFC3296: subo + rdinate referral URL' EQUALITY caseExactMatch SYNTAX 1.3.6.1.4.1.1466.115.1 + 21.1.15 USAGE distributedOperation ) +olcAttributeTypes: ( 1.3.6.1.4.1.4203.1.3.1 NAME 'entry' DESC 'OpenLDAP ACL + entry pseudo-attribute' SYNTAX 1.3.6.1.4.1.4203.1.1.1 SINGLE-VALUE NO-USER- + MODIFICATION USAGE dSAOperation ) +olcAttributeTypes: ( 1.3.6.1.4.1.4203.1.3.2 NAME 'children' DESC 'OpenLDAP A + CL children pseudo-attribute' SYNTAX 1.3.6.1.4.1.4203.1.1.1 SINGLE-VALUE NO + -USER-MODIFICATION USAGE dSAOperation ) +olcAttributeTypes: ( 1.3.6.1.4.1.4203.666.1.8 NAME ( 'authzTo' 'saslAuthzTo' + ) DESC 'proxy authorization targets' EQUALITY authzMatch SYNTAX 1.3.6.1.4. + 1.4203.666.2.7 USAGE distributedOperation X-ORDERED 'VALUES' ) +olcAttributeTypes: ( 1.3.6.1.4.1.4203.666.1.9 NAME ( 'authzFrom' 'saslAuthzF + rom' ) DESC 'proxy authorization sources' EQUALITY authzMatch SYNTAX 1.3.6. + 1.4.1.4203.666.2.7 USAGE distributedOperation X-ORDERED 'VALUES' ) +olcAttributeTypes: ( 1.3.6.1.4.1.1466.101.119.3 NAME 'entryTtl' DESC 'RFC258 + 9: entry time-to-live' SYNTAX 1.3.6.1.4.1.1466.115.121.1.27 SINGLE-VALUE NO + -USER-MODIFICATION USAGE dSAOperation ) +olcAttributeTypes: ( 1.3.6.1.4.1.1466.101.119.4 NAME 'dynamicSubtrees' DESC + 'RFC2589: dynamic subtrees' SYNTAX 1.3.6.1.4.1.1466.115.121.1.12 NO-USER-MO + DIFICATION USAGE dSAOperation ) +olcAttributeTypes: ( 2.5.4.49 NAME 'distinguishedName' DESC 'RFC4519: common + supertype of DN attributes' EQUALITY distinguishedNameMatch SYNTAX 1.3.6.1 + .4.1.1466.115.121.1.12 ) +olcAttributeTypes: ( 2.5.4.41 NAME 'name' DESC 'RFC4519: common supertype of + name attributes' EQUALITY caseIgnoreMatch SUBSTR caseIgnoreSubstringsMatch + SYNTAX 1.3.6.1.4.1.1466.115.121.1.15{32768} ) +olcAttributeTypes: ( 2.5.4.3 NAME ( 'cn' 'commonName' ) DESC 'RFC4519: commo + n name(s) for which the entity is known by' SUP name ) +olcAttributeTypes: ( 0.9.2342.19200300.100.1.1 NAME ( 'uid' 'userid' ) DESC + 'RFC4519: user identifier' EQUALITY caseIgnoreMatch SUBSTR caseIgnoreSubstr + ingsMatch SYNTAX 1.3.6.1.4.1.1466.115.121.1.15{256} ) +olcAttributeTypes: ( 1.3.6.1.1.1.1.0 NAME 'uidNumber' DESC 'RFC2307: An inte + ger uniquely identifying a user in an administrative domain' EQUALITY integ + erMatch ORDERING integerOrderingMatch SYNTAX 1.3.6.1.4.1.1466.115.121.1.27 + SINGLE-VALUE ) +olcAttributeTypes: ( 1.3.6.1.1.1.1.1 NAME 'gidNumber' DESC 'RFC2307: An inte + ger uniquely identifying a group in an administrative domain' EQUALITY inte + gerMatch ORDERING integerOrderingMatch SYNTAX 1.3.6.1.4.1.1466.115.121.1.27 + SINGLE-VALUE ) +olcAttributeTypes: ( 2.5.4.35 NAME 'userPassword' DESC 'RFC4519/2307: passwo + rd of user' EQUALITY octetStringMatch SYNTAX 1.3.6.1.4.1.1466.115.121.1.40{ + 128} ) +olcAttributeTypes: ( 1.3.6.1.4.1.250.1.57 NAME 'labeledURI' DESC 'RFC2079: U + niform Resource Identifier with optional label' EQUALITY caseExactMatch SYN + TAX 1.3.6.1.4.1.1466.115.121.1.15 ) +olcAttributeTypes: ( 2.5.4.13 NAME 'description' DESC 'RFC4519: descriptive + information' EQUALITY caseIgnoreMatch SUBSTR caseIgnoreSubstringsMatch SYNT + AX 1.3.6.1.4.1.1466.115.121.1.15{1024} ) +olcAttributeTypes: ( 2.5.4.34 NAME 'seeAlso' DESC 'RFC4519: DN of related ob + ject' SUP distinguishedName ) +olcAttributeTypes: ( OLcfgGlAt:78 NAME 'olcConfigFile' DESC 'File for slapd + configuration directives' EQUALITY caseIgnoreMatch SYNTAX OMsDirectoryStrin + g SINGLE-VALUE ) +olcAttributeTypes: ( OLcfgGlAt:79 NAME 'olcConfigDir' DESC 'Directory for sl + apd configuration backend' EQUALITY caseIgnoreMatch SYNTAX OMsDirectoryStri + ng SINGLE-VALUE ) +olcAttributeTypes: ( OLcfgGlAt:1 NAME 'olcAccess' DESC 'Access Control List' + EQUALITY caseIgnoreMatch SYNTAX OMsDirectoryString X-ORDERED 'VALUES' ) +olcAttributeTypes: ( OLcfgGlAt:86 NAME 'olcAddContentAcl' DESC 'Check ACLs a + gainst content of Add ops' SYNTAX OMsBoolean SINGLE-VALUE ) +olcAttributeTypes: ( OLcfgGlAt:2 NAME 'olcAllows' DESC 'Allowed set of depre + cated features' EQUALITY caseIgnoreMatch SYNTAX OMsDirectoryString ) +olcAttributeTypes: ( OLcfgGlAt:3 NAME 'olcArgsFile' DESC 'File for slapd com + mand line options' EQUALITY caseIgnoreMatch SYNTAX OMsDirectoryString SINGL + E-VALUE ) +olcAttributeTypes: ( OLcfgGlAt:5 NAME 'olcAttributeOptions' EQUALITY caseIgn + oreMatch SYNTAX OMsDirectoryString ) +olcAttributeTypes: ( OLcfgGlAt:4 NAME 'olcAttributeTypes' DESC 'OpenLDAP att + ributeTypes' EQUALITY caseIgnoreMatch SUBSTR caseIgnoreSubstringsMatch SYNT + AX OMsDirectoryString X-ORDERED 'VALUES' ) +olcAttributeTypes: ( OLcfgGlAt:6 NAME 'olcAuthIDRewrite' EQUALITY caseIgnore + Match SYNTAX OMsDirectoryString X-ORDERED 'VALUES' ) +olcAttributeTypes: ( OLcfgGlAt:7 NAME 'olcAuthzPolicy' EQUALITY caseIgnoreMa + tch SYNTAX OMsDirectoryString SINGLE-VALUE ) +olcAttributeTypes: ( OLcfgGlAt:8 NAME 'olcAuthzRegexp' EQUALITY caseIgnoreMa + tch SYNTAX OMsDirectoryString X-ORDERED 'VALUES' ) +olcAttributeTypes: ( OLcfgGlAt:9 NAME 'olcBackend' DESC 'A type of backend' + EQUALITY caseIgnoreMatch SYNTAX OMsDirectoryString SINGLE-VALUE X-ORDERED ' + SIBLINGS' ) +olcAttributeTypes: ( OLcfgGlAt:10 NAME 'olcConcurrency' SYNTAX OMsInteger SI + NGLE-VALUE ) +olcAttributeTypes: ( OLcfgGlAt:11 NAME 'olcConnMaxPending' SYNTAX OMsInteger + SINGLE-VALUE ) +olcAttributeTypes: ( OLcfgGlAt:12 NAME 'olcConnMaxPendingAuth' SYNTAX OMsInt + eger SINGLE-VALUE ) +olcAttributeTypes: ( OLcfgGlAt:13 NAME 'olcDatabase' DESC 'The backend type + for a database instance' SUP olcBackend SINGLE-VALUE X-ORDERED 'SIBLINGS' ) +olcAttributeTypes: ( OLcfgGlAt:14 NAME 'olcDefaultSearchBase' SYNTAX OMsDN S + INGLE-VALUE ) +olcAttributeTypes: ( OLcfgGlAt:15 NAME 'olcDisallows' EQUALITY caseIgnoreMat + ch SYNTAX OMsDirectoryString ) +olcAttributeTypes: ( OLcfgGlAt:16 NAME 'olcDitContentRules' DESC 'OpenLDAP D + IT content rules' EQUALITY caseIgnoreMatch SUBSTR caseIgnoreSubstringsMatch + SYNTAX OMsDirectoryString X-ORDERED 'VALUES' ) +olcAttributeTypes: ( OLcfgDbAt:0.20 NAME 'olcExtraAttrs' EQUALITY caseIgnore + Match SYNTAX OMsDirectoryString ) +olcAttributeTypes: ( OLcfgGlAt:17 NAME 'olcGentleHUP' SYNTAX OMsBoolean SING + LE-VALUE ) +olcAttributeTypes: ( OLcfgDbAt:0.17 NAME 'olcHidden' SYNTAX OMsBoolean SINGL + E-VALUE ) +olcAttributeTypes: ( OLcfgGlAt:18 NAME 'olcIdleTimeout' SYNTAX OMsInteger SI + NGLE-VALUE ) +olcAttributeTypes: ( OLcfgGlAt:19 NAME 'olcInclude' SUP labeledURI ) +olcAttributeTypes: ( OLcfgGlAt:20 NAME 'olcIndexSubstrIfMinLen' SYNTAX OMsIn + teger SINGLE-VALUE ) +olcAttributeTypes: ( OLcfgGlAt:21 NAME 'olcIndexSubstrIfMaxLen' SYNTAX OMsIn + teger SINGLE-VALUE ) +olcAttributeTypes: ( OLcfgGlAt:22 NAME 'olcIndexSubstrAnyLen' SYNTAX OMsInte + ger SINGLE-VALUE ) +olcAttributeTypes: ( OLcfgGlAt:23 NAME 'olcIndexSubstrAnyStep' SYNTAX OMsInt + eger SINGLE-VALUE ) +olcAttributeTypes: ( OLcfgGlAt:84 NAME 'olcIndexIntLen' SYNTAX OMsInteger SI + NGLE-VALUE ) +olcAttributeTypes: ( OLcfgDbAt:0.4 NAME 'olcLastMod' SYNTAX OMsBoolean SINGL + E-VALUE ) +olcAttributeTypes: ( OLcfgGlAt:85 NAME 'olcLdapSyntaxes' DESC 'OpenLDAP ldap + Syntax' EQUALITY caseIgnoreMatch SUBSTR caseIgnoreSubstringsMatch SYNTAX OM + sDirectoryString X-ORDERED 'VALUES' ) +olcAttributeTypes: ( OLcfgDbAt:0.5 NAME 'olcLimits' EQUALITY caseIgnoreMatch + SYNTAX OMsDirectoryString X-ORDERED 'VALUES' ) +olcAttributeTypes: ( OLcfgGlAt:93 NAME 'olcListenerThreads' SYNTAX OMsIntege + r SINGLE-VALUE ) +olcAttributeTypes: ( OLcfgGlAt:26 NAME 'olcLocalSSF' SYNTAX OMsInteger SINGL + E-VALUE ) +olcAttributeTypes: ( OLcfgGlAt:27 NAME 'olcLogFile' SYNTAX OMsDirectoryStrin + g SINGLE-VALUE ) +olcAttributeTypes: ( OLcfgGlAt:28 NAME 'olcLogLevel' EQUALITY caseIgnoreMatc + h SYNTAX OMsDirectoryString ) +olcAttributeTypes: ( OLcfgDbAt:0.6 NAME 'olcMaxDerefDepth' SYNTAX OMsInteger + SINGLE-VALUE ) +olcAttributeTypes: ( OLcfgDbAt:0.16 NAME 'olcMirrorMode' SYNTAX OMsBoolean S + INGLE-VALUE ) +olcAttributeTypes: ( OLcfgGlAt:30 NAME 'olcModuleLoad' EQUALITY caseIgnoreMa + tch SYNTAX OMsDirectoryString X-ORDERED 'VALUES' ) +olcAttributeTypes: ( OLcfgGlAt:31 NAME 'olcModulePath' SYNTAX OMsDirectorySt + ring SINGLE-VALUE ) +olcAttributeTypes: ( OLcfgDbAt:0.18 NAME 'olcMonitoring' SYNTAX OMsBoolean S + INGLE-VALUE ) +olcAttributeTypes: ( OLcfgGlAt:32 NAME 'olcObjectClasses' DESC 'OpenLDAP obj + ect classes' EQUALITY caseIgnoreMatch SUBSTR caseIgnoreSubstringsMatch SYNT + AX OMsDirectoryString X-ORDERED 'VALUES' ) +olcAttributeTypes: ( OLcfgGlAt:33 NAME 'olcObjectIdentifier' EQUALITY caseIg + noreMatch SUBSTR caseIgnoreSubstringsMatch SYNTAX OMsDirectoryString X-ORDE + RED 'VALUES' ) +olcAttributeTypes: ( OLcfgGlAt:34 NAME 'olcOverlay' SUP olcDatabase SINGLE-V + ALUE X-ORDERED 'SIBLINGS' ) +olcAttributeTypes: ( OLcfgGlAt:35 NAME 'olcPasswordCryptSaltFormat' SYNTAX O + MsDirectoryString SINGLE-VALUE ) +olcAttributeTypes: ( OLcfgGlAt:36 NAME 'olcPasswordHash' EQUALITY caseIgnore + Match SYNTAX OMsDirectoryString ) +olcAttributeTypes: ( OLcfgGlAt:37 NAME 'olcPidFile' SYNTAX OMsDirectoryStrin + g SINGLE-VALUE ) +olcAttributeTypes: ( OLcfgGlAt:38 NAME 'olcPlugin' EQUALITY caseIgnoreMatch + SYNTAX OMsDirectoryString ) +olcAttributeTypes: ( OLcfgGlAt:39 NAME 'olcPluginLogFile' SYNTAX OMsDirector + yString SINGLE-VALUE ) +olcAttributeTypes: ( OLcfgGlAt:40 NAME 'olcReadOnly' SYNTAX OMsBoolean SINGL + E-VALUE ) +olcAttributeTypes: ( OLcfgGlAt:41 NAME 'olcReferral' SUP labeledURI SINGLE-V + ALUE ) +olcAttributeTypes: ( OLcfgDbAt:0.7 NAME 'olcReplica' SUP labeledURI EQUALITY + caseIgnoreMatch X-ORDERED 'VALUES' ) +olcAttributeTypes: ( OLcfgGlAt:43 NAME 'olcReplicaArgsFile' SYNTAX OMsDirect + oryString SINGLE-VALUE ) +olcAttributeTypes: ( OLcfgGlAt:44 NAME 'olcReplicaPidFile' SYNTAX OMsDirecto + ryString SINGLE-VALUE ) +olcAttributeTypes: ( OLcfgGlAt:45 NAME 'olcReplicationInterval' SYNTAX OMsIn + teger SINGLE-VALUE ) +olcAttributeTypes: ( OLcfgGlAt:46 NAME 'olcReplogFile' SYNTAX OMsDirectorySt + ring SINGLE-VALUE ) +olcAttributeTypes: ( OLcfgGlAt:47 NAME 'olcRequires' EQUALITY caseIgnoreMatc + h SYNTAX OMsDirectoryString ) +olcAttributeTypes: ( OLcfgGlAt:48 NAME 'olcRestrict' EQUALITY caseIgnoreMatc + h SYNTAX OMsDirectoryString ) +olcAttributeTypes: ( OLcfgGlAt:49 NAME 'olcReverseLookup' SYNTAX OMsBoolean + SINGLE-VALUE ) +olcAttributeTypes: ( OLcfgDbAt:0.8 NAME 'olcRootDN' EQUALITY distinguishedNa + meMatch SYNTAX OMsDN SINGLE-VALUE ) +olcAttributeTypes: ( OLcfgGlAt:51 NAME 'olcRootDSE' EQUALITY caseIgnoreMatch + SYNTAX OMsDirectoryString ) +olcAttributeTypes: ( OLcfgDbAt:0.9 NAME 'olcRootPW' SYNTAX OMsDirectoryStrin + g SINGLE-VALUE ) +olcAttributeTypes: ( OLcfgGlAt:89 NAME 'olcSaslAuxprops' SYNTAX OMsDirectory + String SINGLE-VALUE ) +olcAttributeTypes: ( OLcfgGlAt:53 NAME 'olcSaslHost' SYNTAX OMsDirectoryStri + ng SINGLE-VALUE ) +olcAttributeTypes: ( OLcfgGlAt:54 NAME 'olcSaslRealm' SYNTAX OMsDirectoryStr + ing SINGLE-VALUE ) +olcAttributeTypes: ( OLcfgGlAt:56 NAME 'olcSaslSecProps' SYNTAX OMsDirectory + String SINGLE-VALUE ) +olcAttributeTypes: ( OLcfgGlAt:58 NAME 'olcSchemaDN' EQUALITY distinguishedN + ameMatch SYNTAX OMsDN SINGLE-VALUE ) +olcAttributeTypes: ( OLcfgGlAt:59 NAME 'olcSecurity' EQUALITY caseIgnoreMatc + h SYNTAX OMsDirectoryString ) +olcAttributeTypes: ( OLcfgGlAt:81 NAME 'olcServerID' EQUALITY caseIgnoreMatc + h SYNTAX OMsDirectoryString ) +olcAttributeTypes: ( OLcfgGlAt:60 NAME 'olcSizeLimit' SYNTAX OMsDirectoryStr + ing SINGLE-VALUE ) +olcAttributeTypes: ( OLcfgGlAt:61 NAME 'olcSockbufMaxIncoming' SYNTAX OMsInt + eger SINGLE-VALUE ) +olcAttributeTypes: ( OLcfgGlAt:62 NAME 'olcSockbufMaxIncomingAuth' SYNTAX OM + sInteger SINGLE-VALUE ) +olcAttributeTypes: ( OLcfgGlAt:83 NAME 'olcSortVals' DESC 'Attributes whose + values will always be sorted' EQUALITY caseIgnoreMatch SYNTAX OMsDirectoryS + tring ) +olcAttributeTypes: ( OLcfgDbAt:0.15 NAME 'olcSubordinate' SYNTAX OMsDirector + yString SINGLE-VALUE ) +olcAttributeTypes: ( OLcfgDbAt:0.10 NAME 'olcSuffix' EQUALITY distinguishedN + ameMatch SYNTAX OMsDN ) +olcAttributeTypes: ( OLcfgDbAt:0.19 NAME 'olcSyncUseSubentry' DESC 'Store sy + nc context in a subentry' SYNTAX OMsBoolean SINGLE-VALUE ) +olcAttributeTypes: ( OLcfgDbAt:0.11 NAME 'olcSyncrepl' EQUALITY caseIgnoreMa + tch SYNTAX OMsDirectoryString X-ORDERED 'VALUES' ) +olcAttributeTypes: ( OLcfgGlAt:90 NAME 'olcTCPBuffer' DESC 'Custom TCP buffe + r size' SYNTAX OMsDirectoryString ) +olcAttributeTypes: ( OLcfgGlAt:66 NAME 'olcThreads' SYNTAX OMsInteger SINGLE + -VALUE ) +olcAttributeTypes: ( OLcfgGlAt:67 NAME 'olcTimeLimit' SYNTAX OMsDirectoryStr + ing ) +olcAttributeTypes: ( OLcfgGlAt:68 NAME 'olcTLSCACertificateFile' SYNTAX OMsD + irectoryString SINGLE-VALUE ) +olcAttributeTypes: ( OLcfgGlAt:69 NAME 'olcTLSCACertificatePath' SYNTAX OMsD + irectoryString SINGLE-VALUE ) +olcAttributeTypes: ( OLcfgGlAt:70 NAME 'olcTLSCertificateFile' SYNTAX OMsDir + ectoryString SINGLE-VALUE ) +olcAttributeTypes: ( OLcfgGlAt:71 NAME 'olcTLSCertificateKeyFile' SYNTAX OMs + DirectoryString SINGLE-VALUE ) +olcAttributeTypes: ( OLcfgGlAt:72 NAME 'olcTLSCipherSuite' SYNTAX OMsDirecto + ryString SINGLE-VALUE ) +olcAttributeTypes: ( OLcfgGlAt:73 NAME 'olcTLSCRLCheck' SYNTAX OMsDirectoryS + tring SINGLE-VALUE ) +olcAttributeTypes: ( OLcfgGlAt:82 NAME 'olcTLSCRLFile' SYNTAX OMsDirectorySt + ring SINGLE-VALUE ) +olcAttributeTypes: ( OLcfgGlAt:74 NAME 'olcTLSRandFile' SYNTAX OMsDirectoryS + tring SINGLE-VALUE ) +olcAttributeTypes: ( OLcfgGlAt:75 NAME 'olcTLSVerifyClient' SYNTAX OMsDirect + oryString SINGLE-VALUE ) +olcAttributeTypes: ( OLcfgGlAt:77 NAME 'olcTLSDHParamFile' SYNTAX OMsDirecto + ryString SINGLE-VALUE ) +olcAttributeTypes: ( OLcfgGlAt:87 NAME 'olcTLSProtocolMin' SYNTAX OMsDirecto + ryString SINGLE-VALUE ) +olcAttributeTypes: ( OLcfgGlAt:80 NAME 'olcToolThreads' SYNTAX OMsInteger SI + NGLE-VALUE ) +olcAttributeTypes: ( OLcfgDbAt:0.12 NAME 'olcUpdateDN' SYNTAX OMsDN SINGLE-V + ALUE ) +olcAttributeTypes: ( OLcfgDbAt:0.13 NAME 'olcUpdateRef' SUP labeledURI EQUAL + ITY caseIgnoreMatch ) +olcAttributeTypes: ( OLcfgGlAt:88 NAME 'olcWriteTimeout' SYNTAX OMsInteger S + INGLE-VALUE ) +olcAttributeTypes: ( OLcfgDbAt:0.1 NAME 'olcDbDirectory' DESC 'Directory for + database content' EQUALITY caseIgnoreMatch SYNTAX OMsDirectoryString SINGL + E-VALUE ) +olcAttributeTypes: ( 1.3.6.1.4.1.4203.666.1.5 NAME 'OpenLDAPaci' DESC 'OpenL + DAP access control information (experimental)' EQUALITY OpenLDAPaciMatch SY + NTAX 1.3.6.1.4.1.4203.666.2.1 USAGE directoryOperation ) +olcAttributeTypes: ( OLcfgDbAt:1.11 NAME 'olcDbCacheFree' DESC 'Number of ex + tra entries to free when max is reached' SYNTAX OMsInteger SINGLE-VALUE ) +olcAttributeTypes: ( OLcfgDbAt:1.1 NAME 'olcDbCacheSize' DESC 'Entry cache s + ize in entries' SYNTAX OMsInteger SINGLE-VALUE ) +olcAttributeTypes: ( OLcfgDbAt:1.2 NAME 'olcDbCheckpoint' DESC 'Database che + ckpoint interval in kbytes and minutes' SYNTAX OMsDirectoryString SINGLE-VA + LUE ) +olcAttributeTypes: ( OLcfgDbAt:1.16 NAME 'olcDbChecksum' DESC 'Enable databa + se checksum validation' SYNTAX OMsBoolean SINGLE-VALUE ) +olcAttributeTypes: ( OLcfgDbAt:1.13 NAME 'olcDbCryptFile' DESC 'Pathname of + file containing the DB encryption key' SYNTAX OMsDirectoryString SINGLE-VAL + UE ) +olcAttributeTypes: ( OLcfgDbAt:1.14 NAME 'olcDbCryptKey' DESC 'DB encryption + key' SYNTAX OMsOctetString SINGLE-VALUE ) +olcAttributeTypes: ( OLcfgDbAt:1.3 NAME 'olcDbConfig' DESC 'BerkeleyDB DB_CO + NFIG configuration directives' SYNTAX OMsIA5String X-ORDERED 'VALUES' ) +olcAttributeTypes: ( OLcfgDbAt:1.4 NAME 'olcDbNoSync' DESC 'Disable synchron + ous database writes' SYNTAX OMsBoolean SINGLE-VALUE ) +olcAttributeTypes: ( OLcfgDbAt:1.15 NAME 'olcDbPageSize' DESC 'Page size of + specified DB, in Kbytes' EQUALITY caseExactMatch SYNTAX OMsDirectoryString + ) +olcAttributeTypes: ( OLcfgDbAt:1.5 NAME 'olcDbDirtyRead' DESC 'Allow reads o + f uncommitted data' SYNTAX OMsBoolean SINGLE-VALUE ) +olcAttributeTypes: ( OLcfgDbAt:1.12 NAME 'olcDbDNcacheSize' DESC 'DN cache s + ize' SYNTAX OMsInteger SINGLE-VALUE ) +olcAttributeTypes: ( OLcfgDbAt:1.6 NAME 'olcDbIDLcacheSize' DESC 'IDL cache + size in IDLs' SYNTAX OMsInteger SINGLE-VALUE ) +olcAttributeTypes: ( OLcfgDbAt:0.2 NAME 'olcDbIndex' DESC 'Attribute index p + arameters' EQUALITY caseIgnoreMatch SYNTAX OMsDirectoryString ) +olcAttributeTypes: ( OLcfgDbAt:1.7 NAME 'olcDbLinearIndex' DESC 'Index attri + butes one at a time' SYNTAX OMsBoolean SINGLE-VALUE ) +olcAttributeTypes: ( OLcfgDbAt:1.8 NAME 'olcDbLockDetect' DESC 'Deadlock det + ection algorithm' SYNTAX OMsDirectoryString SINGLE-VALUE ) +olcAttributeTypes: ( OLcfgDbAt:0.3 NAME 'olcDbMode' DESC 'Unix permissions o + f database files' SYNTAX OMsDirectoryString SINGLE-VALUE ) +olcAttributeTypes: ( OLcfgDbAt:1.9 NAME 'olcDbSearchStack' DESC 'Depth of se + arch stack in IDLs' SYNTAX OMsInteger SINGLE-VALUE ) +olcAttributeTypes: ( OLcfgDbAt:1.10 NAME 'olcDbShmKey' DESC 'Key for shared + memory region' SYNTAX OMsInteger SINGLE-VALUE ) +olcObjectClasses: ( 2.5.6.0 NAME 'top' DESC 'top of the superclass chain' AB + STRACT MUST objectClass ) +olcObjectClasses: ( 1.3.6.1.4.1.1466.101.120.111 NAME 'extensibleObject' DES + C 'RFC4512: extensible object' SUP top AUXILIARY ) +olcObjectClasses: ( 2.5.6.1 NAME 'alias' DESC 'RFC4512: an alias' SUP top ST + RUCTURAL MUST aliasedObjectName ) +olcObjectClasses: ( 2.16.840.1.113730.3.2.6 NAME 'referral' DESC 'namedref: + named subordinate referral' SUP top STRUCTURAL MUST ref ) +olcObjectClasses: ( 1.3.6.1.4.1.4203.1.4.1 NAME ( 'OpenLDAProotDSE' 'LDAProo + tDSE' ) DESC 'OpenLDAP Root DSE object' SUP top STRUCTURAL MAY cn ) +olcObjectClasses: ( 2.5.17.0 NAME 'subentry' DESC 'RFC3672: subentry' SUP to + p STRUCTURAL MUST ( cn $ subtreeSpecification ) ) +olcObjectClasses: ( 2.5.20.1 NAME 'subschema' DESC 'RFC4512: controlling sub + schema (sub)entry' AUXILIARY MAY ( dITStructureRules $ nameForms $ dITConte + ntRules $ objectClasses $ attributeTypes $ matchingRules $ matchingRuleUse + ) ) +olcObjectClasses: ( 2.5.17.2 NAME 'collectiveAttributeSubentry' DESC 'RFC367 + 1: collective attribute subentry' AUXILIARY ) +olcObjectClasses: ( 1.3.6.1.4.1.1466.101.119.2 NAME 'dynamicObject' DESC 'RF + C2589: Dynamic Object' SUP top AUXILIARY ) +olcObjectClasses: ( 1.3.6.1.4.1.4203.666.3.4 NAME 'glue' DESC 'Glue Entry' S + UP top STRUCTURAL ) +olcObjectClasses: ( 1.3.6.1.4.1.4203.666.3.5 NAME 'syncConsumerSubentry' DES + C 'Persistent Info for SyncRepl Consumer' AUXILIARY MAY syncreplCookie ) +olcObjectClasses: ( 1.3.6.1.4.1.4203.666.3.6 NAME 'syncProviderSubentry' DES + C 'Persistent Info for SyncRepl Producer' AUXILIARY MAY contextCSN ) +olcObjectClasses: ( OLcfgGlOc:0 NAME 'olcConfig' DESC 'OpenLDAP configuratio + n object' SUP top ABSTRACT ) +olcObjectClasses: ( OLcfgGlOc:1 NAME 'olcGlobal' DESC 'OpenLDAP Global confi + guration options' SUP olcConfig STRUCTURAL MAY ( cn $ olcConfigFile $ olcCo + nfigDir $ olcAllows $ olcArgsFile $ olcAttributeOptions $ olcAuthIDRewrite + $ olcAuthzPolicy $ olcAuthzRegexp $ olcConcurrency $ olcConnMaxPending $ ol + cConnMaxPendingAuth $ olcDisallows $ olcGentleHUP $ olcIdleTimeout $ olcInd + exSubstrIfMaxLen $ olcIndexSubstrIfMinLen $ olcIndexSubstrAnyLen $ olcIndex + SubstrAnyStep $ olcIndexIntLen $ olcListenerThreads $ olcLocalSSF $ olcLogF + ile $ olcLogLevel $ olcPasswordCryptSaltFormat $ olcPasswordHash $ olcPidFi + le $ olcPluginLogFile $ olcReadOnly $ olcReferral $ olcReplogFile $ olcRequ + ires $ olcRestrict $ olcReverseLookup $ olcRootDSE $ olcSaslAuxprops $ olcS + aslHost $ olcSaslRealm $ olcSaslSecProps $ olcSecurity $ olcServerID $ olcS + izeLimit $ olcSockbufMaxIncoming $ olcSockbufMaxIncomingAuth $ olcTCPBuffer + $ olcThreads $ olcTimeLimit $ olcTLSCACertificateFile $ olcTLSCACertificat + ePath $ olcTLSCertificateFile $ olcTLSCertificateKeyFile $ olcTLSCipherSuit + e $ olcTLSCRLCheck $ olcTLSRandFile $ olcTLSVerifyClient $ olcTLSDHParamFil + e $ olcTLSCRLFile $ olcTLSProtocolMin $ olcToolThreads $ olcWriteTimeout $ + olcObjectIdentifier $ olcAttributeTypes $ olcObjectClasses $ olcDitContentR + ules $ olcLdapSyntaxes ) ) +olcObjectClasses: ( OLcfgGlOc:2 NAME 'olcSchemaConfig' DESC 'OpenLDAP schema + object' SUP olcConfig STRUCTURAL MAY ( cn $ olcObjectIdentifier $ olcLdapS + yntaxes $ olcAttributeTypes $ olcObjectClasses $ olcDitContentRules ) ) +olcObjectClasses: ( OLcfgGlOc:3 NAME 'olcBackendConfig' DESC 'OpenLDAP Backe + nd-specific options' SUP olcConfig STRUCTURAL MUST olcBackend ) +olcObjectClasses: ( OLcfgGlOc:4 NAME 'olcDatabaseConfig' DESC 'OpenLDAP Data + base-specific options' SUP olcConfig STRUCTURAL MUST olcDatabase MAY ( olcH + idden $ olcSuffix $ olcSubordinate $ olcAccess $ olcAddContentAcl $ olcLast + Mod $ olcLimits $ olcMaxDerefDepth $ olcPlugin $ olcReadOnly $ olcReplica $ + olcReplicaArgsFile $ olcReplicaPidFile $ olcReplicationInterval $ olcReplo + gFile $ olcRequires $ olcRestrict $ olcRootDN $ olcRootPW $ olcSchemaDN $ o + lcSecurity $ olcSizeLimit $ olcSyncUseSubentry $ olcSyncrepl $ olcTimeLimit + $ olcUpdateDN $ olcUpdateRef $ olcMirrorMode $ olcMonitoring $ olcExtraAtt + rs ) ) +olcObjectClasses: ( OLcfgGlOc:5 NAME 'olcOverlayConfig' DESC 'OpenLDAP Overl + ay-specific options' SUP olcConfig STRUCTURAL MUST olcOverlay ) +olcObjectClasses: ( OLcfgGlOc:6 NAME 'olcIncludeFile' DESC 'OpenLDAP configu + ration include file' SUP olcConfig STRUCTURAL MUST olcInclude MAY ( cn $ ol + cRootDSE ) ) +olcObjectClasses: ( OLcfgGlOc:7 NAME 'olcFrontendConfig' DESC 'OpenLDAP fron + tend configuration' AUXILIARY MAY ( olcDefaultSearchBase $ olcPasswordHash + $ olcSortVals ) ) +olcObjectClasses: ( OLcfgGlOc:8 NAME 'olcModuleList' DESC 'OpenLDAP dynamic + module info' SUP olcConfig STRUCTURAL MAY ( cn $ olcModulePath $ olcModuleL + oad ) ) +olcObjectClasses: ( OLcfgDbOc:2.1 NAME 'olcLdifConfig' DESC 'LDIF backend co + nfiguration' SUP olcDatabaseConfig STRUCTURAL MUST olcDbDirectory ) +olcObjectClasses: ( OLcfgDbOc:1.2 NAME 'olcHdbConfig' DESC 'HDB backend conf + iguration' SUP olcDatabaseConfig STRUCTURAL MUST olcDbDirectory MAY ( olcDb + CacheSize $ olcDbCheckpoint $ olcDbChecksum $ olcDbConfig $ olcDbCryptFile + $ olcDbCryptKey $ olcDbNoSync $ olcDbDirtyRead $ olcDbIDLcacheSize $ olcDbI + ndex $ olcDbLinearIndex $ olcDbLockDetect $ olcDbMode $ olcDbSearchStack $ + olcDbShmKey $ olcDbCacheFree $ olcDbDNcacheSize $ olcDbPageSize ) ) +structuralObjectClass: olcSchemaConfig +entryUUID: 12129616-e5e6-103a-973e-d731be523aab +creatorsName: cn=config +createTimestamp: 20210108101443Z +entryCSN: 20210108101443.265809Z#000000#000#000000 +modifiersName: cn=config +modifyTimestamp: 20210108101443Z diff --git a/dirsrvtests/tests/data/openldap_2_389/4539/slapd.d/cn=config/cn=schema/cn={0}core.ldif b/dirsrvtests/tests/data/openldap_2_389/4539/slapd.d/cn=config/cn=schema/cn={0}core.ldif new file mode 100644 index 0000000..479c5aa --- /dev/null +++ b/dirsrvtests/tests/data/openldap_2_389/4539/slapd.d/cn=config/cn=schema/cn={0}core.ldif @@ -0,0 +1,247 @@ +# AUTO-GENERATED FILE - DO NOT EDIT!! Use ldapmodify. +# CRC32 db8103c5 +dn: cn={0}core +objectClass: olcSchemaConfig +cn: {0}core +olcAttributeTypes: {0}( 2.5.4.2 NAME 'knowledgeInformation' DESC 'RFC2256: k + nowledge information' EQUALITY caseIgnoreMatch SYNTAX 1.3.6.1.4.1.1466.115. + 121.1.15{32768} ) +olcAttributeTypes: {1}( 2.5.4.4 NAME ( 'sn' 'surname' ) DESC 'RFC2256: last + (family) name(s) for which the entity is known by' SUP name ) +olcAttributeTypes: {2}( 2.5.4.5 NAME 'serialNumber' DESC 'RFC2256: serial nu + mber of the entity' EQUALITY caseIgnoreMatch SUBSTR caseIgnoreSubstringsMat + ch SYNTAX 1.3.6.1.4.1.1466.115.121.1.44{64} ) +olcAttributeTypes: {3}( 2.5.4.6 NAME ( 'c' 'countryName' ) DESC 'RFC4519: tw + o-letter ISO-3166 country code' SUP name SYNTAX 1.3.6.1.4.1.1466.115.121.1. + 11 SINGLE-VALUE ) +olcAttributeTypes: {4}( 2.5.4.7 NAME ( 'l' 'localityName' ) DESC 'RFC2256: l + ocality which this object resides in' SUP name ) +olcAttributeTypes: {5}( 2.5.4.8 NAME ( 'st' 'stateOrProvinceName' ) DESC 'RF + C2256: state or province which this object resides in' SUP name ) +olcAttributeTypes: {6}( 2.5.4.9 NAME ( 'street' 'streetAddress' ) DESC 'RFC2 + 256: street address of this object' EQUALITY caseIgnoreMatch SUBSTR caseIgn + oreSubstringsMatch SYNTAX 1.3.6.1.4.1.1466.115.121.1.15{128} ) +olcAttributeTypes: {7}( 2.5.4.10 NAME ( 'o' 'organizationName' ) DESC 'RFC22 + 56: organization this object belongs to' SUP name ) +olcAttributeTypes: {8}( 2.5.4.11 NAME ( 'ou' 'organizationalUnitName' ) DESC + 'RFC2256: organizational unit this object belongs to' SUP name ) +olcAttributeTypes: {9}( 2.5.4.12 NAME 'title' DESC 'RFC2256: title associate + d with the entity' SUP name ) +olcAttributeTypes: {10}( 2.5.4.14 NAME 'searchGuide' DESC 'RFC2256: search g + uide, deprecated by enhancedSearchGuide' SYNTAX 1.3.6.1.4.1.1466.115.121.1. + 25 ) +olcAttributeTypes: {11}( 2.5.4.15 NAME 'businessCategory' DESC 'RFC2256: bus + iness category' EQUALITY caseIgnoreMatch SUBSTR caseIgnoreSubstringsMatch S + YNTAX 1.3.6.1.4.1.1466.115.121.1.15{128} ) +olcAttributeTypes: {12}( 2.5.4.16 NAME 'postalAddress' DESC 'RFC2256: postal + address' EQUALITY caseIgnoreListMatch SUBSTR caseIgnoreListSubstringsMatch + SYNTAX 1.3.6.1.4.1.1466.115.121.1.41 ) +olcAttributeTypes: {13}( 2.5.4.17 NAME 'postalCode' DESC 'RFC2256: postal co + de' EQUALITY caseIgnoreMatch SUBSTR caseIgnoreSubstringsMatch SYNTAX 1.3.6. + 1.4.1.1466.115.121.1.15{40} ) +olcAttributeTypes: {14}( 2.5.4.18 NAME 'postOfficeBox' DESC 'RFC2256: Post O + ffice Box' EQUALITY caseIgnoreMatch SUBSTR caseIgnoreSubstringsMatch SYNTAX + 1.3.6.1.4.1.1466.115.121.1.15{40} ) +olcAttributeTypes: {15}( 2.5.4.19 NAME 'physicalDeliveryOfficeName' DESC 'RF + C2256: Physical Delivery Office Name' EQUALITY caseIgnoreMatch SUBSTR caseI + gnoreSubstringsMatch SYNTAX 1.3.6.1.4.1.1466.115.121.1.15{128} ) +olcAttributeTypes: {16}( 2.5.4.20 NAME 'telephoneNumber' DESC 'RFC2256: Tele + phone Number' EQUALITY telephoneNumberMatch SUBSTR telephoneNumberSubstring + sMatch SYNTAX 1.3.6.1.4.1.1466.115.121.1.50{32} ) +olcAttributeTypes: {17}( 2.5.4.21 NAME 'telexNumber' DESC 'RFC2256: Telex Nu + mber' SYNTAX 1.3.6.1.4.1.1466.115.121.1.52 ) +olcAttributeTypes: {18}( 2.5.4.22 NAME 'teletexTerminalIdentifier' DESC 'RFC + 2256: Teletex Terminal Identifier' SYNTAX 1.3.6.1.4.1.1466.115.121.1.51 ) +olcAttributeTypes: {19}( 2.5.4.23 NAME ( 'facsimileTelephoneNumber' 'fax' ) + DESC 'RFC2256: Facsimile (Fax) Telephone Number' SYNTAX 1.3.6.1.4.1.1466.11 + 5.121.1.22 ) +olcAttributeTypes: {20}( 2.5.4.24 NAME 'x121Address' DESC 'RFC2256: X.121 Ad + dress' EQUALITY numericStringMatch SUBSTR numericStringSubstringsMatch SYNT + AX 1.3.6.1.4.1.1466.115.121.1.36{15} ) +olcAttributeTypes: {21}( 2.5.4.25 NAME 'internationaliSDNNumber' DESC 'RFC22 + 56: international ISDN number' EQUALITY numericStringMatch SUBSTR numericSt + ringSubstringsMatch SYNTAX 1.3.6.1.4.1.1466.115.121.1.36{16} ) +olcAttributeTypes: {22}( 2.5.4.26 NAME 'registeredAddress' DESC 'RFC2256: re + gistered postal address' SUP postalAddress SYNTAX 1.3.6.1.4.1.1466.115.121. + 1.41 ) +olcAttributeTypes: {23}( 2.5.4.27 NAME 'destinationIndicator' DESC 'RFC2256: + destination indicator' EQUALITY caseIgnoreMatch SUBSTR caseIgnoreSubstring + sMatch SYNTAX 1.3.6.1.4.1.1466.115.121.1.44{128} ) +olcAttributeTypes: {24}( 2.5.4.28 NAME 'preferredDeliveryMethod' DESC 'RFC22 + 56: preferred delivery method' SYNTAX 1.3.6.1.4.1.1466.115.121.1.14 SINGLE- + VALUE ) +olcAttributeTypes: {25}( 2.5.4.29 NAME 'presentationAddress' DESC 'RFC2256: + presentation address' EQUALITY presentationAddressMatch SYNTAX 1.3.6.1.4.1. + 1466.115.121.1.43 SINGLE-VALUE ) +olcAttributeTypes: {26}( 2.5.4.30 NAME 'supportedApplicationContext' DESC 'R + FC2256: supported application context' EQUALITY objectIdentifierMatch SYNTA + X 1.3.6.1.4.1.1466.115.121.1.38 ) +olcAttributeTypes: {27}( 2.5.4.31 NAME 'member' DESC 'RFC2256: member of a g + roup' SUP distinguishedName ) +olcAttributeTypes: {28}( 2.5.4.32 NAME 'owner' DESC 'RFC2256: owner (of the + object)' SUP distinguishedName ) +olcAttributeTypes: {29}( 2.5.4.33 NAME 'roleOccupant' DESC 'RFC2256: occupan + t of role' SUP distinguishedName ) +olcAttributeTypes: {30}( 2.5.4.36 NAME 'userCertificate' DESC 'RFC2256: X.50 + 9 user certificate, use ;binary' EQUALITY certificateExactMatch SYNTAX 1.3. + 6.1.4.1.1466.115.121.1.8 ) +olcAttributeTypes: {31}( 2.5.4.37 NAME 'cACertificate' DESC 'RFC2256: X.509 + CA certificate, use ;binary' EQUALITY certificateExactMatch SYNTAX 1.3.6.1. + 4.1.1466.115.121.1.8 ) +olcAttributeTypes: {32}( 2.5.4.38 NAME 'authorityRevocationList' DESC 'RFC22 + 56: X.509 authority revocation list, use ;binary' SYNTAX 1.3.6.1.4.1.1466.1 + 15.121.1.9 ) +olcAttributeTypes: {33}( 2.5.4.39 NAME 'certificateRevocationList' DESC 'RFC + 2256: X.509 certificate revocation list, use ;binary' SYNTAX 1.3.6.1.4.1.14 + 66.115.121.1.9 ) +olcAttributeTypes: {34}( 2.5.4.40 NAME 'crossCertificatePair' DESC 'RFC2256: + X.509 cross certificate pair, use ;binary' SYNTAX 1.3.6.1.4.1.1466.115.121 + .1.10 ) +olcAttributeTypes: {35}( 2.5.4.42 NAME ( 'givenName' 'gn' ) DESC 'RFC2256: f + irst name(s) for which the entity is known by' SUP name ) +olcAttributeTypes: {36}( 2.5.4.43 NAME 'initials' DESC 'RFC2256: initials of + some or all of names, but not the surname(s).' SUP name ) +olcAttributeTypes: {37}( 2.5.4.44 NAME 'generationQualifier' DESC 'RFC2256: + name qualifier indicating a generation' SUP name ) +olcAttributeTypes: {38}( 2.5.4.45 NAME 'x500UniqueIdentifier' DESC 'RFC2256: + X.500 unique identifier' EQUALITY bitStringMatch SYNTAX 1.3.6.1.4.1.1466.1 + 15.121.1.6 ) +olcAttributeTypes: {39}( 2.5.4.46 NAME 'dnQualifier' DESC 'RFC2256: DN quali + fier' EQUALITY caseIgnoreMatch ORDERING caseIgnoreOrderingMatch SUBSTR case + IgnoreSubstringsMatch SYNTAX 1.3.6.1.4.1.1466.115.121.1.44 ) +olcAttributeTypes: {40}( 2.5.4.47 NAME 'enhancedSearchGuide' DESC 'RFC2256: + enhanced search guide' SYNTAX 1.3.6.1.4.1.1466.115.121.1.21 ) +olcAttributeTypes: {41}( 2.5.4.48 NAME 'protocolInformation' DESC 'RFC2256: + protocol information' EQUALITY protocolInformationMatch SYNTAX 1.3.6.1.4.1. + 1466.115.121.1.42 ) +olcAttributeTypes: {42}( 2.5.4.50 NAME 'uniqueMember' DESC 'RFC2256: unique + member of a group' EQUALITY uniqueMemberMatch SYNTAX 1.3.6.1.4.1.1466.115.1 + 21.1.34 ) +olcAttributeTypes: {43}( 2.5.4.51 NAME 'houseIdentifier' DESC 'RFC2256: hous + e identifier' EQUALITY caseIgnoreMatch SUBSTR caseIgnoreSubstringsMatch SYN + TAX 1.3.6.1.4.1.1466.115.121.1.15{32768} ) +olcAttributeTypes: {44}( 2.5.4.52 NAME 'supportedAlgorithms' DESC 'RFC2256: + supported algorithms' SYNTAX 1.3.6.1.4.1.1466.115.121.1.49 ) +olcAttributeTypes: {45}( 2.5.4.53 NAME 'deltaRevocationList' DESC 'RFC2256: + delta revocation list; use ;binary' SYNTAX 1.3.6.1.4.1.1466.115.121.1.9 ) +olcAttributeTypes: {46}( 2.5.4.54 NAME 'dmdName' DESC 'RFC2256: name of DMD' + SUP name ) +olcAttributeTypes: {47}( 2.5.4.65 NAME 'pseudonym' DESC 'X.520(4th): pseudon + ym for the object' SUP name ) +olcAttributeTypes: {48}( 0.9.2342.19200300.100.1.3 NAME ( 'mail' 'rfc822Mail + box' ) DESC 'RFC1274: RFC822 Mailbox' EQUALITY caseIgnoreIA5Match SUBSTR ca + seIgnoreIA5SubstringsMatch SYNTAX 1.3.6.1.4.1.1466.115.121.1.26{256} ) +olcAttributeTypes: {49}( 0.9.2342.19200300.100.1.25 NAME ( 'dc' 'domainCompo + nent' ) DESC 'RFC1274/2247: domain component' EQUALITY caseIgnoreIA5Match S + UBSTR caseIgnoreIA5SubstringsMatch SYNTAX 1.3.6.1.4.1.1466.115.121.1.26 SIN + GLE-VALUE ) +olcAttributeTypes: {50}( 0.9.2342.19200300.100.1.37 NAME 'associatedDomain' + DESC 'RFC1274: domain associated with object' EQUALITY caseIgnoreIA5Match S + UBSTR caseIgnoreIA5SubstringsMatch SYNTAX 1.3.6.1.4.1.1466.115.121.1.26 ) +olcAttributeTypes: {51}( 1.2.840.113549.1.9.1 NAME ( 'email' 'emailAddress' + 'pkcs9email' ) DESC 'RFC3280: legacy attribute for email addresses in DNs' + EQUALITY caseIgnoreIA5Match SUBSTR caseIgnoreIA5SubstringsMatch SYNTAX 1.3. + 6.1.4.1.1466.115.121.1.26{128} ) +olcObjectClasses: {0}( 2.5.6.2 NAME 'country' DESC 'RFC2256: a country' SUP + top STRUCTURAL MUST c MAY ( searchGuide $ description ) ) +olcObjectClasses: {1}( 2.5.6.3 NAME 'locality' DESC 'RFC2256: a locality' SU + P top STRUCTURAL MAY ( street $ seeAlso $ searchGuide $ st $ l $ descriptio + n ) ) +olcObjectClasses: {2}( 2.5.6.4 NAME 'organization' DESC 'RFC2256: an organiz + ation' SUP top STRUCTURAL MUST o MAY ( userPassword $ searchGuide $ seeAlso + $ businessCategory $ x121Address $ registeredAddress $ destinationIndicato + r $ preferredDeliveryMethod $ telexNumber $ teletexTerminalIdentifier $ tel + ephoneNumber $ internationaliSDNNumber $ facsimileTelephoneNumber $ street + $ postOfficeBox $ postalCode $ postalAddress $ physicalDeliveryOfficeName $ + st $ l $ description ) ) +olcObjectClasses: {3}( 2.5.6.5 NAME 'organizationalUnit' DESC 'RFC2256: an o + rganizational unit' SUP top STRUCTURAL MUST ou MAY ( userPassword $ searchG + uide $ seeAlso $ businessCategory $ x121Address $ registeredAddress $ desti + nationIndicator $ preferredDeliveryMethod $ telexNumber $ teletexTerminalId + entifier $ telephoneNumber $ internationaliSDNNumber $ facsimileTelephoneNu + mber $ street $ postOfficeBox $ postalCode $ postalAddress $ physicalDelive + ryOfficeName $ st $ l $ description ) ) +olcObjectClasses: {4}( 2.5.6.6 NAME 'person' DESC 'RFC2256: a person' SUP to + p STRUCTURAL MUST ( sn $ cn ) MAY ( userPassword $ telephoneNumber $ seeAls + o $ description ) ) +olcObjectClasses: {5}( 2.5.6.7 NAME 'organizationalPerson' DESC 'RFC2256: an + organizational person' SUP person STRUCTURAL MAY ( title $ x121Address $ r + egisteredAddress $ destinationIndicator $ preferredDeliveryMethod $ telexNu + mber $ teletexTerminalIdentifier $ telephoneNumber $ internationaliSDNNumbe + r $ facsimileTelephoneNumber $ street $ postOfficeBox $ postalCode $ postal + Address $ physicalDeliveryOfficeName $ ou $ st $ l ) ) +olcObjectClasses: {6}( 2.5.6.8 NAME 'organizationalRole' DESC 'RFC2256: an o + rganizational role' SUP top STRUCTURAL MUST cn MAY ( x121Address $ register + edAddress $ destinationIndicator $ preferredDeliveryMethod $ telexNumber $ + teletexTerminalIdentifier $ telephoneNumber $ internationaliSDNNumber $ fac + simileTelephoneNumber $ seeAlso $ roleOccupant $ preferredDeliveryMethod $ + street $ postOfficeBox $ postalCode $ postalAddress $ physicalDeliveryOffic + eName $ ou $ st $ l $ description ) ) +olcObjectClasses: {7}( 2.5.6.9 NAME 'groupOfNames' DESC 'RFC2256: a group of + names (DNs)' SUP top STRUCTURAL MUST ( member $ cn ) MAY ( businessCategor + y $ seeAlso $ owner $ ou $ o $ description ) ) +olcObjectClasses: {8}( 2.5.6.10 NAME 'residentialPerson' DESC 'RFC2256: an r + esidential person' SUP person STRUCTURAL MUST l MAY ( businessCategory $ x1 + 21Address $ registeredAddress $ destinationIndicator $ preferredDeliveryMet + hod $ telexNumber $ teletexTerminalIdentifier $ telephoneNumber $ internati + onaliSDNNumber $ facsimileTelephoneNumber $ preferredDeliveryMethod $ stree + t $ postOfficeBox $ postalCode $ postalAddress $ physicalDeliveryOfficeName + $ st $ l ) ) +olcObjectClasses: {9}( 2.5.6.11 NAME 'applicationProcess' DESC 'RFC2256: an + application process' SUP top STRUCTURAL MUST cn MAY ( seeAlso $ ou $ l $ de + scription ) ) +olcObjectClasses: {10}( 2.5.6.12 NAME 'applicationEntity' DESC 'RFC2256: an + application entity' SUP top STRUCTURAL MUST ( presentationAddress $ cn ) MA + Y ( supportedApplicationContext $ seeAlso $ ou $ o $ l $ description ) ) +olcObjectClasses: {11}( 2.5.6.13 NAME 'dSA' DESC 'RFC2256: a directory syste + m agent (a server)' SUP applicationEntity STRUCTURAL MAY knowledgeInformati + on ) +olcObjectClasses: {12}( 2.5.6.14 NAME 'device' DESC 'RFC2256: a device' SUP + top STRUCTURAL MUST cn MAY ( serialNumber $ seeAlso $ owner $ ou $ o $ l $ + description ) ) +olcObjectClasses: {13}( 2.5.6.15 NAME 'strongAuthenticationUser' DESC 'RFC22 + 56: a strong authentication user' SUP top AUXILIARY MUST userCertificate ) +olcObjectClasses: {14}( 2.5.6.16 NAME 'certificationAuthority' DESC 'RFC2256 + : a certificate authority' SUP top AUXILIARY MUST ( authorityRevocationList + $ certificateRevocationList $ cACertificate ) MAY crossCertificatePair ) +olcObjectClasses: {15}( 2.5.6.17 NAME 'groupOfUniqueNames' DESC 'RFC2256: a + group of unique names (DN and Unique Identifier)' SUP top STRUCTURAL MUST ( + uniqueMember $ cn ) MAY ( businessCategory $ seeAlso $ owner $ ou $ o $ de + scription ) ) +olcObjectClasses: {16}( 2.5.6.18 NAME 'userSecurityInformation' DESC 'RFC225 + 6: a user security information' SUP top AUXILIARY MAY supportedAlgorithms ) +olcObjectClasses: {17}( 2.5.6.16.2 NAME 'certificationAuthority-V2' SUP cert + ificationAuthority AUXILIARY MAY deltaRevocationList ) +olcObjectClasses: {18}( 2.5.6.19 NAME 'cRLDistributionPoint' SUP top STRUCTU + RAL MUST cn MAY ( certificateRevocationList $ authorityRevocationList $ del + taRevocationList ) ) +olcObjectClasses: {19}( 2.5.6.20 NAME 'dmd' SUP top STRUCTURAL MUST dmdName + MAY ( userPassword $ searchGuide $ seeAlso $ businessCategory $ x121Address + $ registeredAddress $ destinationIndicator $ preferredDeliveryMethod $ tel + exNumber $ teletexTerminalIdentifier $ telephoneNumber $ internationaliSDNN + umber $ facsimileTelephoneNumber $ street $ postOfficeBox $ postalCode $ po + stalAddress $ physicalDeliveryOfficeName $ st $ l $ description ) ) +olcObjectClasses: {20}( 2.5.6.21 NAME 'pkiUser' DESC 'RFC2587: a PKI user' S + UP top AUXILIARY MAY userCertificate ) +olcObjectClasses: {21}( 2.5.6.22 NAME 'pkiCA' DESC 'RFC2587: PKI certificate + authority' SUP top AUXILIARY MAY ( authorityRevocationList $ certificateRe + vocationList $ cACertificate $ crossCertificatePair ) ) +olcObjectClasses: {22}( 2.5.6.23 NAME 'deltaCRL' DESC 'RFC2587: PKI user' SU + P top AUXILIARY MAY deltaRevocationList ) +olcObjectClasses: {23}( 1.3.6.1.4.1.250.3.15 NAME 'labeledURIObject' DESC 'R + FC2079: object that contains the URI attribute type' SUP top AUXILIARY MAY + labeledURI ) +olcObjectClasses: {24}( 0.9.2342.19200300.100.4.19 NAME 'simpleSecurityObjec + t' DESC 'RFC1274: simple security object' SUP top AUXILIARY MUST userPasswo + rd ) +olcObjectClasses: {25}( 1.3.6.1.4.1.1466.344 NAME 'dcObject' DESC 'RFC2247: + domain component object' SUP top AUXILIARY MUST dc ) +olcObjectClasses: {26}( 1.3.6.1.1.3.1 NAME 'uidObject' DESC 'RFC2377: uid ob + ject' SUP top AUXILIARY MUST uid ) +structuralObjectClass: olcSchemaConfig +entryUUID: 1212a836-e5e6-103a-973f-d731be523aab +creatorsName: cn=config +createTimestamp: 20210108101443Z +entryCSN: 20210108101443.265809Z#000000#000#000000 +modifiersName: cn=config +modifyTimestamp: 20210108101443Z diff --git a/dirsrvtests/tests/data/openldap_2_389/4539/slapd.d/cn=config/cn=schema/cn={1}cosine.ldif b/dirsrvtests/tests/data/openldap_2_389/4539/slapd.d/cn=config/cn=schema/cn={1}cosine.ldif new file mode 100644 index 0000000..db714ed --- /dev/null +++ b/dirsrvtests/tests/data/openldap_2_389/4539/slapd.d/cn=config/cn=schema/cn={1}cosine.ldif @@ -0,0 +1,178 @@ +# AUTO-GENERATED FILE - DO NOT EDIT!! Use ldapmodify. +# CRC32 aac8272d +dn: cn={1}cosine +objectClass: olcSchemaConfig +cn: {1}cosine +olcAttributeTypes: {0}( 0.9.2342.19200300.100.1.2 NAME 'textEncodedORAddress + ' EQUALITY caseIgnoreMatch SUBSTR caseIgnoreSubstringsMatch SYNTAX 1.3.6.1. + 4.1.1466.115.121.1.15{256} ) +olcAttributeTypes: {1}( 0.9.2342.19200300.100.1.4 NAME 'info' DESC 'RFC1274: + general information' EQUALITY caseIgnoreMatch SUBSTR caseIgnoreSubstringsM + atch SYNTAX 1.3.6.1.4.1.1466.115.121.1.15{2048} ) +olcAttributeTypes: {2}( 0.9.2342.19200300.100.1.5 NAME ( 'drink' 'favouriteD + rink' ) DESC 'RFC1274: favorite drink' EQUALITY caseIgnoreMatch SUBSTR case + IgnoreSubstringsMatch SYNTAX 1.3.6.1.4.1.1466.115.121.1.15{256} ) +olcAttributeTypes: {3}( 0.9.2342.19200300.100.1.6 NAME 'roomNumber' DESC 'RF + C1274: room number' EQUALITY caseIgnoreMatch SUBSTR caseIgnoreSubstringsMat + ch SYNTAX 1.3.6.1.4.1.1466.115.121.1.15{256} ) +olcAttributeTypes: {4}( 0.9.2342.19200300.100.1.7 NAME 'photo' DESC 'RFC1274 + : photo (G3 fax)' SYNTAX 1.3.6.1.4.1.1466.115.121.1.23{25000} ) +olcAttributeTypes: {5}( 0.9.2342.19200300.100.1.8 NAME 'userClass' DESC 'RFC + 1274: category of user' EQUALITY caseIgnoreMatch SUBSTR caseIgnoreSubstring + sMatch SYNTAX 1.3.6.1.4.1.1466.115.121.1.15{256} ) +olcAttributeTypes: {6}( 0.9.2342.19200300.100.1.9 NAME 'host' DESC 'RFC1274: + host computer' EQUALITY caseIgnoreMatch SUBSTR caseIgnoreSubstringsMatch S + YNTAX 1.3.6.1.4.1.1466.115.121.1.15{256} ) +olcAttributeTypes: {7}( 0.9.2342.19200300.100.1.10 NAME 'manager' DESC 'RFC1 + 274: DN of manager' EQUALITY distinguishedNameMatch SYNTAX 1.3.6.1.4.1.1466 + .115.121.1.12 ) +olcAttributeTypes: {8}( 0.9.2342.19200300.100.1.11 NAME 'documentIdentifier' + DESC 'RFC1274: unique identifier of document' EQUALITY caseIgnoreMatch SUB + STR caseIgnoreSubstringsMatch SYNTAX 1.3.6.1.4.1.1466.115.121.1.15{256} ) +olcAttributeTypes: {9}( 0.9.2342.19200300.100.1.12 NAME 'documentTitle' DESC + 'RFC1274: title of document' EQUALITY caseIgnoreMatch SUBSTR caseIgnoreSub + stringsMatch SYNTAX 1.3.6.1.4.1.1466.115.121.1.15{256} ) +olcAttributeTypes: {10}( 0.9.2342.19200300.100.1.13 NAME 'documentVersion' D + ESC 'RFC1274: version of document' EQUALITY caseIgnoreMatch SUBSTR caseIgno + reSubstringsMatch SYNTAX 1.3.6.1.4.1.1466.115.121.1.15{256} ) +olcAttributeTypes: {11}( 0.9.2342.19200300.100.1.14 NAME 'documentAuthor' DE + SC 'RFC1274: DN of author of document' EQUALITY distinguishedNameMatch SYNT + AX 1.3.6.1.4.1.1466.115.121.1.12 ) +olcAttributeTypes: {12}( 0.9.2342.19200300.100.1.15 NAME 'documentLocation' + DESC 'RFC1274: location of document original' EQUALITY caseIgnoreMatch SUBS + TR caseIgnoreSubstringsMatch SYNTAX 1.3.6.1.4.1.1466.115.121.1.15{256} ) +olcAttributeTypes: {13}( 0.9.2342.19200300.100.1.20 NAME ( 'homePhone' 'home + TelephoneNumber' ) DESC 'RFC1274: home telephone number' EQUALITY telephone + NumberMatch SUBSTR telephoneNumberSubstringsMatch SYNTAX 1.3.6.1.4.1.1466.1 + 15.121.1.50 ) +olcAttributeTypes: {14}( 0.9.2342.19200300.100.1.21 NAME 'secretary' DESC 'R + FC1274: DN of secretary' EQUALITY distinguishedNameMatch SYNTAX 1.3.6.1.4.1 + .1466.115.121.1.12 ) +olcAttributeTypes: {15}( 0.9.2342.19200300.100.1.22 NAME 'otherMailbox' SYNT + AX 1.3.6.1.4.1.1466.115.121.1.39 ) +olcAttributeTypes: {16}( 0.9.2342.19200300.100.1.26 NAME 'aRecord' EQUALITY + caseIgnoreIA5Match SYNTAX 1.3.6.1.4.1.1466.115.121.1.26 ) +olcAttributeTypes: {17}( 0.9.2342.19200300.100.1.27 NAME 'mDRecord' EQUALITY + caseIgnoreIA5Match SYNTAX 1.3.6.1.4.1.1466.115.121.1.26 ) +olcAttributeTypes: {18}( 0.9.2342.19200300.100.1.28 NAME 'mXRecord' EQUALITY + caseIgnoreIA5Match SYNTAX 1.3.6.1.4.1.1466.115.121.1.26 ) +olcAttributeTypes: {19}( 0.9.2342.19200300.100.1.29 NAME 'nSRecord' EQUALITY + caseIgnoreIA5Match SYNTAX 1.3.6.1.4.1.1466.115.121.1.26 ) +olcAttributeTypes: {20}( 0.9.2342.19200300.100.1.30 NAME 'sOARecord' EQUALIT + Y caseIgnoreIA5Match SYNTAX 1.3.6.1.4.1.1466.115.121.1.26 ) +olcAttributeTypes: {21}( 0.9.2342.19200300.100.1.31 NAME 'cNAMERecord' EQUAL + ITY caseIgnoreIA5Match SYNTAX 1.3.6.1.4.1.1466.115.121.1.26 ) +olcAttributeTypes: {22}( 0.9.2342.19200300.100.1.38 NAME 'associatedName' DE + SC 'RFC1274: DN of entry associated with domain' EQUALITY distinguishedName + Match SYNTAX 1.3.6.1.4.1.1466.115.121.1.12 ) +olcAttributeTypes: {23}( 0.9.2342.19200300.100.1.39 NAME 'homePostalAddress' + DESC 'RFC1274: home postal address' EQUALITY caseIgnoreListMatch SUBSTR ca + seIgnoreListSubstringsMatch SYNTAX 1.3.6.1.4.1.1466.115.121.1.41 ) +olcAttributeTypes: {24}( 0.9.2342.19200300.100.1.40 NAME 'personalTitle' DES + C 'RFC1274: personal title' EQUALITY caseIgnoreMatch SUBSTR caseIgnoreSubst + ringsMatch SYNTAX 1.3.6.1.4.1.1466.115.121.1.15{256} ) +olcAttributeTypes: {25}( 0.9.2342.19200300.100.1.41 NAME ( 'mobile' 'mobileT + elephoneNumber' ) DESC 'RFC1274: mobile telephone number' EQUALITY telephon + eNumberMatch SUBSTR telephoneNumberSubstringsMatch SYNTAX 1.3.6.1.4.1.1466. + 115.121.1.50 ) +olcAttributeTypes: {26}( 0.9.2342.19200300.100.1.42 NAME ( 'pager' 'pagerTel + ephoneNumber' ) DESC 'RFC1274: pager telephone number' EQUALITY telephoneNu + mberMatch SUBSTR telephoneNumberSubstringsMatch SYNTAX 1.3.6.1.4.1.1466.115 + .121.1.50 ) +olcAttributeTypes: {27}( 0.9.2342.19200300.100.1.43 NAME ( 'co' 'friendlyCou + ntryName' ) DESC 'RFC1274: friendly country name' EQUALITY caseIgnoreMatch + SUBSTR caseIgnoreSubstringsMatch SYNTAX 1.3.6.1.4.1.1466.115.121.1.15 ) +olcAttributeTypes: {28}( 0.9.2342.19200300.100.1.44 NAME 'uniqueIdentifier' + DESC 'RFC1274: unique identifer' EQUALITY caseIgnoreMatch SYNTAX 1.3.6.1.4. + 1.1466.115.121.1.15{256} ) +olcAttributeTypes: {29}( 0.9.2342.19200300.100.1.45 NAME 'organizationalStat + us' DESC 'RFC1274: organizational status' EQUALITY caseIgnoreMatch SUBSTR c + aseIgnoreSubstringsMatch SYNTAX 1.3.6.1.4.1.1466.115.121.1.15{256} ) +olcAttributeTypes: {30}( 0.9.2342.19200300.100.1.46 NAME 'janetMailbox' DESC + 'RFC1274: Janet mailbox' EQUALITY caseIgnoreIA5Match SUBSTR caseIgnoreIA5S + ubstringsMatch SYNTAX 1.3.6.1.4.1.1466.115.121.1.26{256} ) +olcAttributeTypes: {31}( 0.9.2342.19200300.100.1.47 NAME 'mailPreferenceOpti + on' DESC 'RFC1274: mail preference option' SYNTAX 1.3.6.1.4.1.1466.115.121. + 1.27 ) +olcAttributeTypes: {32}( 0.9.2342.19200300.100.1.48 NAME 'buildingName' DESC + 'RFC1274: name of building' EQUALITY caseIgnoreMatch SUBSTR caseIgnoreSubs + tringsMatch SYNTAX 1.3.6.1.4.1.1466.115.121.1.15{256} ) +olcAttributeTypes: {33}( 0.9.2342.19200300.100.1.49 NAME 'dSAQuality' DESC ' + RFC1274: DSA Quality' SYNTAX 1.3.6.1.4.1.1466.115.121.1.19 SINGLE-VALUE ) +olcAttributeTypes: {34}( 0.9.2342.19200300.100.1.50 NAME 'singleLevelQuality + ' DESC 'RFC1274: Single Level Quality' SYNTAX 1.3.6.1.4.1.1466.115.121.1.13 + SINGLE-VALUE ) +olcAttributeTypes: {35}( 0.9.2342.19200300.100.1.51 NAME 'subtreeMinimumQual + ity' DESC 'RFC1274: Subtree Mininum Quality' SYNTAX 1.3.6.1.4.1.1466.115.12 + 1.1.13 SINGLE-VALUE ) +olcAttributeTypes: {36}( 0.9.2342.19200300.100.1.52 NAME 'subtreeMaximumQual + ity' DESC 'RFC1274: Subtree Maximun Quality' SYNTAX 1.3.6.1.4.1.1466.115.12 + 1.1.13 SINGLE-VALUE ) +olcAttributeTypes: {37}( 0.9.2342.19200300.100.1.53 NAME 'personalSignature' + DESC 'RFC1274: Personal Signature (G3 fax)' SYNTAX 1.3.6.1.4.1.1466.115.12 + 1.1.23 ) +olcAttributeTypes: {38}( 0.9.2342.19200300.100.1.54 NAME 'dITRedirect' DESC + 'RFC1274: DIT Redirect' EQUALITY distinguishedNameMatch SYNTAX 1.3.6.1.4.1. + 1466.115.121.1.12 ) +olcAttributeTypes: {39}( 0.9.2342.19200300.100.1.55 NAME 'audio' DESC 'RFC12 + 74: audio (u-law)' SYNTAX 1.3.6.1.4.1.1466.115.121.1.4{25000} ) +olcAttributeTypes: {40}( 0.9.2342.19200300.100.1.56 NAME 'documentPublisher' + DESC 'RFC1274: publisher of document' EQUALITY caseIgnoreMatch SUBSTR case + IgnoreSubstringsMatch SYNTAX 1.3.6.1.4.1.1466.115.121.1.15 ) +olcObjectClasses: {0}( 0.9.2342.19200300.100.4.4 NAME ( 'pilotPerson' 'newPi + lotPerson' ) SUP person STRUCTURAL MAY ( userid $ textEncodedORAddress $ rf + c822Mailbox $ favouriteDrink $ roomNumber $ userClass $ homeTelephoneNumber + $ homePostalAddress $ secretary $ personalTitle $ preferredDeliveryMethod + $ businessCategory $ janetMailbox $ otherMailbox $ mobileTelephoneNumber $ + pagerTelephoneNumber $ organizationalStatus $ mailPreferenceOption $ person + alSignature ) ) +olcObjectClasses: {1}( 0.9.2342.19200300.100.4.5 NAME 'account' SUP top STRU + CTURAL MUST userid MAY ( description $ seeAlso $ localityName $ organizatio + nName $ organizationalUnitName $ host ) ) +olcObjectClasses: {2}( 0.9.2342.19200300.100.4.6 NAME 'document' SUP top STR + UCTURAL MUST documentIdentifier MAY ( commonName $ description $ seeAlso $ + localityName $ organizationName $ organizationalUnitName $ documentTitle $ + documentVersion $ documentAuthor $ documentLocation $ documentPublisher ) ) +olcObjectClasses: {3}( 0.9.2342.19200300.100.4.7 NAME 'room' SUP top STRUCTU + RAL MUST commonName MAY ( roomNumber $ description $ seeAlso $ telephoneNum + ber ) ) +olcObjectClasses: {4}( 0.9.2342.19200300.100.4.9 NAME 'documentSeries' SUP t + op STRUCTURAL MUST commonName MAY ( description $ seeAlso $ telephonenumber + $ localityName $ organizationName $ organizationalUnitName ) ) +olcObjectClasses: {5}( 0.9.2342.19200300.100.4.13 NAME 'domain' SUP top STRU + CTURAL MUST domainComponent MAY ( associatedName $ organizationName $ descr + iption $ businessCategory $ seeAlso $ searchGuide $ userPassword $ locality + Name $ stateOrProvinceName $ streetAddress $ physicalDeliveryOfficeName $ p + ostalAddress $ postalCode $ postOfficeBox $ streetAddress $ facsimileTeleph + oneNumber $ internationalISDNNumber $ telephoneNumber $ teletexTerminalIden + tifier $ telexNumber $ preferredDeliveryMethod $ destinationIndicator $ reg + isteredAddress $ x121Address ) ) +olcObjectClasses: {6}( 0.9.2342.19200300.100.4.14 NAME 'RFC822localPart' SUP + domain STRUCTURAL MAY ( commonName $ surname $ description $ seeAlso $ tel + ephoneNumber $ physicalDeliveryOfficeName $ postalAddress $ postalCode $ po + stOfficeBox $ streetAddress $ facsimileTelephoneNumber $ internationalISDNN + umber $ telephoneNumber $ teletexTerminalIdentifier $ telexNumber $ preferr + edDeliveryMethod $ destinationIndicator $ registeredAddress $ x121Address ) + ) +olcObjectClasses: {7}( 0.9.2342.19200300.100.4.15 NAME 'dNSDomain' SUP domai + n STRUCTURAL MAY ( ARecord $ MDRecord $ MXRecord $ NSRecord $ SOARecord $ C + NAMERecord ) ) +olcObjectClasses: {8}( 0.9.2342.19200300.100.4.17 NAME 'domainRelatedObject' + DESC 'RFC1274: an object related to an domain' SUP top AUXILIARY MUST asso + ciatedDomain ) +olcObjectClasses: {9}( 0.9.2342.19200300.100.4.18 NAME 'friendlyCountry' SUP + country STRUCTURAL MUST friendlyCountryName ) +olcObjectClasses: {10}( 0.9.2342.19200300.100.4.20 NAME 'pilotOrganization' + SUP ( organization $ organizationalUnit ) STRUCTURAL MAY buildingName ) +olcObjectClasses: {11}( 0.9.2342.19200300.100.4.21 NAME 'pilotDSA' SUP dsa S + TRUCTURAL MAY dSAQuality ) +olcObjectClasses: {12}( 0.9.2342.19200300.100.4.22 NAME 'qualityLabelledData + ' SUP top AUXILIARY MUST dsaQuality MAY ( subtreeMinimumQuality $ subtreeMa + ximumQuality ) ) +structuralObjectClass: olcSchemaConfig +entryUUID: 1212b38a-e5e6-103a-9740-d731be523aab +creatorsName: cn=config +createTimestamp: 20210108101443Z +entryCSN: 20210108101443.265809Z#000000#000#000000 +modifiersName: cn=config +modifyTimestamp: 20210108101443Z diff --git a/dirsrvtests/tests/data/openldap_2_389/4539/slapd.d/cn=config/cn=schema/cn={2}inetorgperson.ldif b/dirsrvtests/tests/data/openldap_2_389/4539/slapd.d/cn=config/cn=schema/cn={2}inetorgperson.ldif new file mode 100644 index 0000000..2932429 --- /dev/null +++ b/dirsrvtests/tests/data/openldap_2_389/4539/slapd.d/cn=config/cn=schema/cn={2}inetorgperson.ldif @@ -0,0 +1,49 @@ +# AUTO-GENERATED FILE - DO NOT EDIT!! Use ldapmodify. +# CRC32 7fd3c455 +dn: cn={2}inetorgperson +objectClass: olcSchemaConfig +cn: {2}inetorgperson +olcAttributeTypes: {0}( 2.16.840.1.113730.3.1.1 NAME 'carLicense' DESC 'RFC2 + 798: vehicle license or registration plate' EQUALITY caseIgnoreMatch SUBSTR + caseIgnoreSubstringsMatch SYNTAX 1.3.6.1.4.1.1466.115.121.1.15 ) +olcAttributeTypes: {1}( 2.16.840.1.113730.3.1.2 NAME 'departmentNumber' DESC + 'RFC2798: identifies a department within an organization' EQUALITY caseIgn + oreMatch SUBSTR caseIgnoreSubstringsMatch SYNTAX 1.3.6.1.4.1.1466.115.121.1 + .15 ) +olcAttributeTypes: {2}( 2.16.840.1.113730.3.1.241 NAME 'displayName' DESC 'R + FC2798: preferred name to be used when displaying entries' EQUALITY caseIgn + oreMatch SUBSTR caseIgnoreSubstringsMatch SYNTAX 1.3.6.1.4.1.1466.115.121.1 + .15 SINGLE-VALUE ) +olcAttributeTypes: {3}( 2.16.840.1.113730.3.1.3 NAME 'employeeNumber' DESC ' + RFC2798: numerically identifies an employee within an organization' EQUALIT + Y caseIgnoreMatch SUBSTR caseIgnoreSubstringsMatch SYNTAX 1.3.6.1.4.1.1466. + 115.121.1.15 SINGLE-VALUE ) +olcAttributeTypes: {4}( 2.16.840.1.113730.3.1.4 NAME 'employeeType' DESC 'RF + C2798: type of employment for a person' EQUALITY caseIgnoreMatch SUBSTR cas + eIgnoreSubstringsMatch SYNTAX 1.3.6.1.4.1.1466.115.121.1.15 ) +olcAttributeTypes: {5}( 0.9.2342.19200300.100.1.60 NAME 'jpegPhoto' DESC 'RF + C2798: a JPEG image' SYNTAX 1.3.6.1.4.1.1466.115.121.1.28 ) +olcAttributeTypes: {6}( 2.16.840.1.113730.3.1.39 NAME 'preferredLanguage' DE + SC 'RFC2798: preferred written or spoken language for a person' EQUALITY ca + seIgnoreMatch SUBSTR caseIgnoreSubstringsMatch SYNTAX 1.3.6.1.4.1.1466.115. + 121.1.15 SINGLE-VALUE ) +olcAttributeTypes: {7}( 2.16.840.1.113730.3.1.40 NAME 'userSMIMECertificate' + DESC 'RFC2798: PKCS#7 SignedData used to support S/MIME' SYNTAX 1.3.6.1.4. + 1.1466.115.121.1.5 ) +olcAttributeTypes: {8}( 2.16.840.1.113730.3.1.216 NAME 'userPKCS12' DESC 'RF + C2798: personal identity information, a PKCS #12 PFX' SYNTAX 1.3.6.1.4.1.14 + 66.115.121.1.5 ) +olcObjectClasses: {0}( 2.16.840.1.113730.3.2.2 NAME 'inetOrgPerson' DESC 'RF + C2798: Internet Organizational Person' SUP organizationalPerson STRUCTURAL + MAY ( audio $ businessCategory $ carLicense $ departmentNumber $ displayNam + e $ employeeNumber $ employeeType $ givenName $ homePhone $ homePostalAddre + ss $ initials $ jpegPhoto $ labeledURI $ mail $ manager $ mobile $ o $ page + r $ photo $ roomNumber $ secretary $ uid $ userCertificate $ x500uniqueIden + tifier $ preferredLanguage $ userSMIMECertificate $ userPKCS12 ) ) +structuralObjectClass: olcSchemaConfig +entryUUID: 1212b95c-e5e6-103a-9741-d731be523aab +creatorsName: cn=config +createTimestamp: 20210108101443Z +entryCSN: 20210108101443.265809Z#000000#000#000000 +modifiersName: cn=config +modifyTimestamp: 20210108101443Z diff --git a/dirsrvtests/tests/data/openldap_2_389/4539/slapd.d/cn=config/cn=schema/cn={3}rfc2307bis.ldif b/dirsrvtests/tests/data/openldap_2_389/4539/slapd.d/cn=config/cn=schema/cn={3}rfc2307bis.ldif new file mode 100644 index 0000000..81fd3b9 --- /dev/null +++ b/dirsrvtests/tests/data/openldap_2_389/4539/slapd.d/cn=config/cn=schema/cn={3}rfc2307bis.ldif @@ -0,0 +1,155 @@ +# AUTO-GENERATED FILE - DO NOT EDIT!! Use ldapmodify. +# CRC32 e0f5e515 +dn: cn={3}rfc2307bis +objectClass: olcSchemaConfig +cn: {3}rfc2307bis +olcAttributeTypes: {0}( 1.3.6.1.1.1.1.2 NAME 'gecos' DESC 'The GECOS field; + the common name' EQUALITY caseIgnoreIA5Match SUBSTR caseIgnoreIA5Substrings + Match SYNTAX 1.3.6.1.4.1.1466.115.121.1.26 SINGLE-VALUE ) +olcAttributeTypes: {1}( 1.3.6.1.1.1.1.3 NAME 'homeDirectory' DESC 'The absol + ute path to the home directory' EQUALITY caseExactIA5Match SYNTAX 1.3.6.1.4 + .1.1466.115.121.1.26 SINGLE-VALUE ) +olcAttributeTypes: {2}( 1.3.6.1.1.1.1.4 NAME 'loginShell' DESC 'The path to + the login shell' EQUALITY caseExactIA5Match SYNTAX 1.3.6.1.4.1.1466.115.121 + .1.26 SINGLE-VALUE ) +olcAttributeTypes: {3}( 1.3.6.1.1.1.1.5 NAME 'shadowLastChange' EQUALITY int + egerMatch SYNTAX 1.3.6.1.4.1.1466.115.121.1.27 SINGLE-VALUE ) +olcAttributeTypes: {4}( 1.3.6.1.1.1.1.6 NAME 'shadowMin' EQUALITY integerMat + ch SYNTAX 1.3.6.1.4.1.1466.115.121.1.27 SINGLE-VALUE ) +olcAttributeTypes: {5}( 1.3.6.1.1.1.1.7 NAME 'shadowMax' EQUALITY integerMat + ch SYNTAX 1.3.6.1.4.1.1466.115.121.1.27 SINGLE-VALUE ) +olcAttributeTypes: {6}( 1.3.6.1.1.1.1.8 NAME 'shadowWarning' EQUALITY intege + rMatch SYNTAX 1.3.6.1.4.1.1466.115.121.1.27 SINGLE-VALUE ) +olcAttributeTypes: {7}( 1.3.6.1.1.1.1.9 NAME 'shadowInactive' EQUALITY integ + erMatch SYNTAX 1.3.6.1.4.1.1466.115.121.1.27 SINGLE-VALUE ) +olcAttributeTypes: {8}( 1.3.6.1.1.1.1.10 NAME 'shadowExpire' EQUALITY intege + rMatch SYNTAX 1.3.6.1.4.1.1466.115.121.1.27 SINGLE-VALUE ) +olcAttributeTypes: {9}( 1.3.6.1.1.1.1.11 NAME 'shadowFlag' EQUALITY integerM + atch SYNTAX 1.3.6.1.4.1.1466.115.121.1.27 SINGLE-VALUE ) +olcAttributeTypes: {10}( 1.3.6.1.1.1.1.12 NAME 'memberUid' EQUALITY caseExac + tIA5Match SYNTAX 1.3.6.1.4.1.1466.115.121.1.26 ) +olcAttributeTypes: {11}( 1.3.6.1.1.1.1.13 NAME 'memberNisNetgroup' EQUALITY + caseExactIA5Match SUBSTR caseExactIA5SubstringsMatch SYNTAX 1.3.6.1.4.1.146 + 6.115.121.1.26 ) +olcAttributeTypes: {12}( 1.3.6.1.1.1.1.14 NAME 'nisNetgroupTriple' DESC 'Net + group triple' EQUALITY caseIgnoreIA5Match SYNTAX 1.3.6.1.4.1.1466.115.121.1 + .26 ) +olcAttributeTypes: {13}( 1.3.6.1.1.1.1.15 NAME 'ipServicePort' DESC 'Service + port number' EQUALITY integerMatch SYNTAX 1.3.6.1.4.1.1466.115.121.1.27 SI + NGLE-VALUE ) +olcAttributeTypes: {14}( 1.3.6.1.1.1.1.16 NAME 'ipServiceProtocol' DESC 'Ser + vice protocol name' SUP name ) +olcAttributeTypes: {15}( 1.3.6.1.1.1.1.17 NAME 'ipProtocolNumber' DESC 'IP p + rotocol number' EQUALITY integerMatch SYNTAX 1.3.6.1.4.1.1466.115.121.1.27 + SINGLE-VALUE ) +olcAttributeTypes: {16}( 1.3.6.1.1.1.1.18 NAME 'oncRpcNumber' DESC 'ONC RPC + number' EQUALITY integerMatch SYNTAX 1.3.6.1.4.1.1466.115.121.1.27 SINGLE-V + ALUE ) +olcAttributeTypes: {17}( 1.3.6.1.1.1.1.19 NAME 'ipHostNumber' DESC 'IPv4 add + resses as a dotted decimal omitting leading zeros or IPv6 addresses + as defined in RFC2373' SUP name ) +olcAttributeTypes: {18}( 1.3.6.1.1.1.1.20 NAME 'ipNetworkNumber' DESC 'IP ne + twork as a dotted decimal, eg. 192.168, omitting leading zeros' SUP + name SINGLE-VALUE ) +olcAttributeTypes: {19}( 1.3.6.1.1.1.1.21 NAME 'ipNetmaskNumber' DESC 'IP ne + tmask as a dotted decimal, eg. 255.255.255.0, omitting leading zeros + ' EQUALITY caseIgnoreIA5Match SYNTAX 1.3.6.1.4.1.1466.115.121.1.26 SINGLE-V + ALUE ) +olcAttributeTypes: {20}( 1.3.6.1.1.1.1.22 NAME 'macAddress' DESC 'MAC addres + s in maximal, colon separated hex notation, eg. 00:00:92:90:ee:e2' E + QUALITY caseIgnoreIA5Match SYNTAX 1.3.6.1.4.1.1466.115.121.1.26 ) +olcAttributeTypes: {21}( 1.3.6.1.1.1.1.23 NAME 'bootParameter' DESC 'rpc.boo + tparamd parameter' EQUALITY caseExactIA5Match SYNTAX 1.3.6.1.4.1.1466.115.1 + 21.1.26 ) +olcAttributeTypes: {22}( 1.3.6.1.1.1.1.24 NAME 'bootFile' DESC 'Boot image n + ame' EQUALITY caseExactIA5Match SYNTAX 1.3.6.1.4.1.1466.115.121.1.26 ) +olcAttributeTypes: {23}( 1.3.6.1.1.1.1.26 NAME 'nisMapName' DESC 'Name of a + A generic NIS map' SUP name ) +olcAttributeTypes: {24}( 1.3.6.1.1.1.1.27 NAME 'nisMapEntry' DESC 'A generic + NIS entry' EQUALITY caseExactIA5Match SUBSTR caseExactIA5SubstringsMatch S + YNTAX 1.3.6.1.4.1.1466.115.121.1.26 SINGLE-VALUE ) +olcAttributeTypes: {25}( 1.3.6.1.1.1.1.28 NAME 'nisPublicKey' DESC 'NIS publ + ic key' EQUALITY octetStringMatch SYNTAX 1.3.6.1.4.1.1466.115.121.1.40 SING + LE-VALUE ) +olcAttributeTypes: {26}( 1.3.6.1.1.1.1.29 NAME 'nisSecretKey' DESC 'NIS secr + et key' EQUALITY octetStringMatch SYNTAX 1.3.6.1.4.1.1466.115.121.1.40 SING + LE-VALUE ) +olcAttributeTypes: {27}( 1.3.6.1.1.1.1.30 NAME 'nisDomain' DESC 'NIS domain' + EQUALITY caseIgnoreIA5Match SYNTAX 1.3.6.1.4.1.1466.115.121.1.26 ) +olcAttributeTypes: {28}( 1.3.6.1.1.1.1.31 NAME 'automountMapName' DESC 'auto + mount Map Name' EQUALITY caseExactIA5Match SUBSTR caseExactIA5SubstringsMat + ch SYNTAX 1.3.6.1.4.1.1466.115.121.1.26 SINGLE-VALUE ) +olcAttributeTypes: {29}( 1.3.6.1.1.1.1.32 NAME 'automountKey' DESC 'Automoun + t Key value' EQUALITY caseExactIA5Match SUBSTR caseExactIA5SubstringsMatch + SYNTAX 1.3.6.1.4.1.1466.115.121.1.26 SINGLE-VALUE ) +olcAttributeTypes: {30}( 1.3.6.1.1.1.1.33 NAME 'automountInformation' DESC ' + Automount information' EQUALITY caseExactIA5Match SUBSTR caseExactIA5Substr + ingsMatch SYNTAX 1.3.6.1.4.1.1466.115.121.1.26 SINGLE-VALUE ) +olcObjectClasses: {0}( 1.3.6.1.1.1.2.0 NAME 'posixAccount' DESC 'Abstraction + of an account with POSIX attributes' SUP top AUXILIARY MUST ( cn $ uid $ u + idNumber $ gidNumber $ homeDirectory ) MAY ( userPassword $ loginShell $ ge + cos $ description ) ) +olcObjectClasses: {1}( 1.3.6.1.1.1.2.1 NAME 'shadowAccount' DESC 'Additional + attributes for shadow passwords' SUP top AUXILIARY MUST uid MAY ( userPass + word $ description $ shadowLastChange $ shadowMin $ shadowMax $ shadowWarni + ng $ shadowInactive $ shadowExpire $ shadowFlag ) ) +olcObjectClasses: {2}( 1.3.6.1.1.1.2.2 NAME 'posixGroup' DESC 'Abstraction o + f a group of accounts' SUP top AUXILIARY MUST gidNumber MAY ( userPassword + $ memberUid $ description ) ) +olcObjectClasses: {3}( 1.3.6.1.1.1.2.3 NAME 'ipService' DESC 'Abstraction an + Internet Protocol service. Maps an IP port and protocol (such as tc + p or udp) to one or more names; the distinguished value of th + e cn attribute denotes the services canonical name' SUP top STRUCTUR + AL MUST ( cn $ ipServicePort $ ipServiceProtocol ) MAY description ) +olcObjectClasses: {4}( 1.3.6.1.1.1.2.4 NAME 'ipProtocol' DESC 'Abstraction o + f an IP protocol. Maps a protocol number to one or more names. The d + istinguished value of the cn attribute denotes the protocols canonic + al name' SUP top STRUCTURAL MUST ( cn $ ipProtocolNumber ) MAY description + ) +olcObjectClasses: {5}( 1.3.6.1.1.1.2.5 NAME 'oncRpc' DESC 'Abstraction of an + Open Network Computing (ONC) [RFC1057] Remote Procedure Call (RPC) b + inding. This class maps an ONC RPC number to a name. The distin + guished value of the cn attribute denotes the RPC services canonical + name' SUP top STRUCTURAL MUST ( cn $ oncRpcNumber ) MAY description ) +olcObjectClasses: {6}( 1.3.6.1.1.1.2.6 NAME 'ipHost' DESC 'Abstraction of a + host, an IP device. The distinguished value of the cn attribute deno + tes the hosts canonical name. Device SHOULD be used as a structural + class' SUP top AUXILIARY MUST ( cn $ ipHostNumber ) MAY ( userPassword $ l + $ description $ manager ) ) +olcObjectClasses: {7}( 1.3.6.1.1.1.2.7 NAME 'ipNetwork' DESC 'Abstraction of + a network. The distinguished value of the cn attribute denotes the + networks canonical name' SUP top STRUCTURAL MUST ipNetworkNumber MAY ( cn $ + ipNetmaskNumber $ l $ description $ manager ) ) +olcObjectClasses: {8}( 1.3.6.1.1.1.2.8 NAME 'nisNetgroup' DESC 'Abstraction + of a netgroup. May refer to other netgroups' SUP top STRUCTURAL MUST cn MAY + ( nisNetgroupTriple $ memberNisNetgroup $ description ) ) +olcObjectClasses: {9}( 1.3.6.1.1.1.2.9 NAME 'nisMap' DESC 'A generic abstrac + tion of a NIS map' SUP top STRUCTURAL MUST nisMapName MAY description ) +olcObjectClasses: {10}( 1.3.6.1.1.1.2.10 NAME 'nisObject' DESC 'An entry in + a NIS map' SUP top STRUCTURAL MUST ( cn $ nisMapEntry $ nisMapName ) MAY de + scription ) +olcObjectClasses: {11}( 1.3.6.1.1.1.2.11 NAME 'ieee802Device' DESC 'A device + with a MAC address; device SHOULD be used as a structural class' SU + P top AUXILIARY MAY macAddress ) +olcObjectClasses: {12}( 1.3.6.1.1.1.2.12 NAME 'bootableDevice' DESC 'A devic + e with boot parameters; device SHOULD be used as a structural class' + SUP top AUXILIARY MAY ( bootFile $ bootParameter ) ) +olcObjectClasses: {13}( 1.3.6.1.1.1.2.14 NAME 'nisKeyObject' DESC 'An object + with a public and secret key' SUP top AUXILIARY MUST ( cn $ nisPublicKey $ + nisSecretKey ) MAY ( uidNumber $ description ) ) +olcObjectClasses: {14}( 1.3.6.1.1.1.2.15 NAME 'nisDomainObject' DESC 'Associ + ates a NIS domain with a naming context' SUP top AUXILIARY MUST nisDomain ) +olcObjectClasses: {15}( 1.3.6.1.1.1.2.16 NAME 'automountMap' SUP top STRUCTU + RAL MUST automountMapName MAY description ) +olcObjectClasses: {16}( 1.3.6.1.1.1.2.17 NAME 'automount' DESC 'Automount in + formation' SUP top STRUCTURAL MUST ( automountKey $ automountInformation ) + MAY description ) +olcObjectClasses: {17}( 1.3.6.1.4.1.5322.13.1.1 NAME 'namedObject' SUP top S + TRUCTURAL MAY cn ) +structuralObjectClass: olcSchemaConfig +entryUUID: 1212c37a-e5e6-103a-9742-d731be523aab +creatorsName: cn=config +createTimestamp: 20210108101443Z +entryCSN: 20210108101443.265809Z#000000#000#000000 +modifiersName: cn=config +modifyTimestamp: 20210108101443Z diff --git a/dirsrvtests/tests/data/openldap_2_389/4539/slapd.d/cn=config/cn=schema/cn={4}yast.ldif b/dirsrvtests/tests/data/openldap_2_389/4539/slapd.d/cn=config/cn=schema/cn={4}yast.ldif new file mode 100644 index 0000000..8531cd7 --- /dev/null +++ b/dirsrvtests/tests/data/openldap_2_389/4539/slapd.d/cn=config/cn=schema/cn={4}yast.ldif @@ -0,0 +1,108 @@ +# AUTO-GENERATED FILE - DO NOT EDIT!! Use ldapmodify. +# CRC32 79fdfade +dn: cn={4}yast +objectClass: olcSchemaConfig +cn: {4}yast +olcObjectIdentifier: {0}SUSE 1.3.6.1.4.1.7057 +olcObjectIdentifier: {1}SUSE.YaST SUSE:10.1 +olcObjectIdentifier: {2}SUSE.YaST.ModuleConfig SUSE:10.1.2 +olcObjectIdentifier: {3}SUSE.YaST.ModuleConfig.OC SUSE.YaST.ModuleConfig:1 +olcObjectIdentifier: {4}SUSE.YaST.ModuleConfig.Attr SUSE.YaST.ModuleConfig:2 +olcAttributeTypes: {0}( SUSE.YaST.ModuleConfig.Attr:2 NAME 'suseDefaultBase' + DESC 'Base DN where new Objects should be created by default' EQUALITY dis + tinguishedNameMatch SYNTAX 1.3.6.1.4.1.1466.115.121.1.12 SINGLE-VALUE ) +olcAttributeTypes: {1}( SUSE.YaST.ModuleConfig.Attr:3 NAME 'suseNextUniqueId + ' DESC 'Next unused unique ID, can be used to generate directory wide uniqe + IDs' EQUALITY integerMatch ORDERING integerOrderingMatch SYNTAX 1.3.6.1.4. + 1.1466.115.121.1.27 SINGLE-VALUE ) +olcAttributeTypes: {2}( SUSE.YaST.ModuleConfig.Attr:4 NAME 'suseMinUniqueId' + DESC 'lower Border for Unique IDs' EQUALITY integerMatch ORDERING integerO + rderingMatch SYNTAX 1.3.6.1.4.1.1466.115.121.1.27 SINGLE-VALUE ) +olcAttributeTypes: {3}( SUSE.YaST.ModuleConfig.Attr:5 NAME 'suseMaxUniqueId' + DESC 'upper Border for Unique IDs' EQUALITY integerMatch ORDERING integerO + rderingMatch SYNTAX 1.3.6.1.4.1.1466.115.121.1.27 SINGLE-VALUE ) +olcAttributeTypes: {4}( SUSE.YaST.ModuleConfig.Attr:6 NAME 'suseDefaultTempl + ate' DESC 'The DN of a template that should be used by default' EQUALITY di + stinguishedNameMatch SYNTAX 1.3.6.1.4.1.1466.115.121.1.12 SINGLE-VALUE ) +olcAttributeTypes: {5}( SUSE.YaST.ModuleConfig.Attr:7 NAME 'suseSearchFilter + ' DESC 'Search filter to localize Objects' SYNTAX 1.3.6.1.4.1.1466.115.121. + 1.15 SINGLE-VALUE ) +olcAttributeTypes: {6}( SUSE.YaST.ModuleConfig.Attr:11 NAME 'suseDefaultValu + e' DESC 'an Attribute-Value-Assertions to define defaults for specific Attr + ibutes' EQUALITY caseIgnoreMatch SYNTAX 1.3.6.1.4.1.1466.115.121.1.15 ) +olcAttributeTypes: {7}( SUSE.YaST.ModuleConfig.Attr:12 NAME 'suseNamingAttri + bute' DESC 'AttributeType that should be used as the RDN' EQUALITY caseIgno + reIA5Match SYNTAX 1.3.6.1.4.1.1466.115.121.1.26 SINGLE-VALUE ) +olcAttributeTypes: {8}( SUSE.YaST.ModuleConfig.Attr:15 NAME 'suseSecondaryGr + oup' DESC 'seconday group DN' EQUALITY distinguishedNameMatch SYNTAX 1.3.6. + 1.4.1.1466.115.121.1.12 ) +olcAttributeTypes: {9}( SUSE.YaST.ModuleConfig.Attr:16 NAME 'suseMinPassword + Length' DESC 'minimum Password length for new users' EQUALITY integerMatch + ORDERING integerOrderingMatch SYNTAX 1.3.6.1.4.1.1466.115.121.1.27 SINGLE-V + ALUE ) +olcAttributeTypes: {10}( SUSE.YaST.ModuleConfig.Attr:17 NAME 'suseMaxPasswor + dLength' DESC 'maximum Password length for new users' EQUALITY integerMatch + ORDERING integerOrderingMatch SYNTAX 1.3.6.1.4.1.1466.115.121.1.27 SINGLE- + VALUE ) +olcAttributeTypes: {11}( SUSE.YaST.ModuleConfig.Attr:18 NAME 'susePasswordHa + sh' DESC 'Hash method to use for new users' EQUALITY caseIgnoreIA5Match SYN + TAX 1.3.6.1.4.1.1466.115.121.1.26 SINGLE-VALUE ) +olcAttributeTypes: {12}( SUSE.YaST.ModuleConfig.Attr:19 NAME 'suseSkelDir' D + ESC '' EQUALITY caseExactIA5Match SYNTAX 1.3.6.1.4.1.1466.115.121.1.26 ) +olcAttributeTypes: {13}( SUSE.YaST.ModuleConfig.Attr:20 NAME 'susePlugin' DE + SC 'plugin to use upon user/ group creation' EQUALITY caseIgnoreMatch SYNTA + X 1.3.6.1.4.1.1466.115.121.1.15 ) +olcAttributeTypes: {14}( SUSE.YaST.ModuleConfig.Attr:21 NAME 'suseMapAttribu + te' DESC '' EQUALITY caseIgnoreMatch SYNTAX 1.3.6.1.4.1.1466.115.121.1.15 ) +olcAttributeTypes: {15}( SUSE.YaST.ModuleConfig.Attr:22 NAME 'suseImapServer + ' DESC '' EQUALITY caseIgnoreMatch SYNTAX 1.3.6.1.4.1.1466.115.121.1.15 SIN + GLE-VALUE ) +olcAttributeTypes: {16}( SUSE.YaST.ModuleConfig.Attr:23 NAME 'suseImapAdmin' + DESC '' EQUALITY caseIgnoreMatch SYNTAX 1.3.6.1.4.1.1466.115.121.1.15 SING + LE-VALUE ) +olcAttributeTypes: {17}( SUSE.YaST.ModuleConfig.Attr:24 NAME 'suseImapDefaul + tQuota' DESC '' EQUALITY integerMatch SYNTAX 1.3.6.1.4.1.1466.115.121.1.27 + SINGLE-VALUE ) +olcAttributeTypes: {18}( SUSE.YaST.ModuleConfig.Attr:25 NAME 'suseImapUseSsl + ' DESC '' EQUALITY booleanMatch SYNTAX 1.3.6.1.4.1.1466.115.121.1.7 SINGLE- + VALUE ) +olcObjectClasses: {0}( SUSE.YaST.ModuleConfig.OC:2 NAME 'suseModuleConfigura + tion' DESC 'Contains configuration of Management Modules' SUP top STRUCTURA + L MUST cn MAY suseDefaultBase ) +olcObjectClasses: {1}( SUSE.YaST.ModuleConfig.OC:3 NAME 'suseUserConfigurati + on' DESC 'Configuration of user management tools' SUP suseModuleConfigurati + on STRUCTURAL MAY ( suseMinPasswordLength $ suseMaxPasswordLength $ susePas + swordHash $ suseSkelDir $ suseNextUniqueId $ suseMinUniqueId $ suseMaxUniqu + eId $ suseDefaultTemplate $ suseSearchFilter $ suseMapAttribute ) ) +olcObjectClasses: {2}( SUSE.YaST.ModuleConfig.OC:4 NAME 'suseObjectTemplate' + DESC 'Base Class for Object-Templates' SUP top STRUCTURAL MUST cn MAY ( su + sePlugin $ suseDefaultValue $ suseNamingAttribute ) ) +olcObjectClasses: {3}( SUSE.YaST.ModuleConfig.OC:5 NAME 'suseUserTemplate' D + ESC 'User object template' SUP suseObjectTemplate STRUCTURAL MUST cn MAY su + seSecondaryGroup ) +olcObjectClasses: {4}( SUSE.YaST.ModuleConfig.OC:6 NAME 'suseGroupTemplate' + DESC 'Group object template' SUP suseObjectTemplate STRUCTURAL MUST cn ) +olcObjectClasses: {5}( SUSE.YaST.ModuleConfig.OC:7 NAME 'suseGroupConfigurat + ion' DESC 'Configuration of user management tools' SUP suseModuleConfigurat + ion STRUCTURAL MAY ( suseNextUniqueId $ suseMinUniqueId $ suseMaxUniqueId $ + suseDefaultTemplate $ suseSearchFilter $ suseMapAttribute ) ) +olcObjectClasses: {6}( SUSE.YaST.ModuleConfig.OC:8 NAME 'suseCaConfiguration + ' DESC 'Configuration of CA management tools' SUP suseModuleConfiguration S + TRUCTURAL ) +olcObjectClasses: {7}( SUSE.YaST.ModuleConfig.OC:9 NAME 'suseDnsConfiguratio + n' DESC 'Configuration of mail server management tools' SUP suseModuleConfi + guration STRUCTURAL ) +olcObjectClasses: {8}( SUSE.YaST.ModuleConfig.OC:10 NAME 'suseDhcpConfigurat + ion' DESC 'Configuration of DHCP server management tools' SUP suseModuleCon + figuration STRUCTURAL ) +olcObjectClasses: {9}( SUSE.YaST.ModuleConfig.OC:11 NAME 'suseMailConfigurat + ion' DESC 'Configuration of IMAP user management tools' SUP suseModuleConfi + guration STRUCTURAL MUST ( suseImapServer $ suseImapAdmin $ suseImapDefault + Quota $ suseImapUseSsl ) ) +structuralObjectClass: olcSchemaConfig +entryUUID: 1212cabe-e5e6-103a-9743-d731be523aab +creatorsName: cn=config +createTimestamp: 20210108101443Z +entryCSN: 20210108101443.265809Z#000000#000#000000 +modifiersName: cn=config +modifyTimestamp: 20210108101443Z diff --git a/dirsrvtests/tests/data/openldap_2_389/4539/slapd.d/cn=config/olcDatabase={-1}frontend.ldif b/dirsrvtests/tests/data/openldap_2_389/4539/slapd.d/cn=config/olcDatabase={-1}frontend.ldif new file mode 100644 index 0000000..c33e291 --- /dev/null +++ b/dirsrvtests/tests/data/openldap_2_389/4539/slapd.d/cn=config/olcDatabase={-1}frontend.ldif @@ -0,0 +1,25 @@ +# AUTO-GENERATED FILE - DO NOT EDIT!! Use ldapmodify. +# CRC32 adeada7d +dn: olcDatabase={-1}frontend +objectClass: olcDatabaseConfig +objectClass: olcFrontendConfig +olcDatabase: {-1}frontend +olcAccess: {0}to dn.base="" by * read +olcAccess: {1}to dn.base="cn=subschema" by * read +olcAccess: {2}to attrs=userPassword,userPKCS12 by self write by * auth +olcAccess: {3}to attrs=shadowLastChange by self write by * read +olcAccess: {4}to * by * read +olcAddContentAcl: FALSE +olcLastMod: TRUE +olcMaxDerefDepth: 0 +olcReadOnly: FALSE +olcSchemaDN: cn=Subschema +olcSyncUseSubentry: FALSE +olcMonitoring: FALSE +structuralObjectClass: olcDatabaseConfig +entryUUID: 1212d054-e5e6-103a-9744-d731be523aab +creatorsName: cn=config +createTimestamp: 20210108101443Z +entryCSN: 20210108101443.265809Z#000000#000#000000 +modifiersName: cn=config +modifyTimestamp: 20210108101443Z diff --git a/dirsrvtests/tests/data/openldap_2_389/4539/slapd.d/cn=config/olcDatabase={0}config.ldif b/dirsrvtests/tests/data/openldap_2_389/4539/slapd.d/cn=config/olcDatabase={0}config.ldif new file mode 100644 index 0000000..2a99e0b --- /dev/null +++ b/dirsrvtests/tests/data/openldap_2_389/4539/slapd.d/cn=config/olcDatabase={0}config.ldif @@ -0,0 +1,20 @@ +# AUTO-GENERATED FILE - DO NOT EDIT!! Use ldapmodify. +# CRC32 70bea9f6 +dn: olcDatabase={0}config +objectClass: olcDatabaseConfig +olcDatabase: {0}config +olcAccess: {0}to * by * none +olcAddContentAcl: TRUE +olcLastMod: TRUE +olcMaxDerefDepth: 15 +olcReadOnly: FALSE +olcRootDN: cn=config +olcSyncUseSubentry: FALSE +olcMonitoring: FALSE +structuralObjectClass: olcDatabaseConfig +entryUUID: 1212d3e2-e5e6-103a-9745-d731be523aab +creatorsName: cn=config +createTimestamp: 20210108101443Z +entryCSN: 20210108101443.265809Z#000000#000#000000 +modifiersName: cn=config +modifyTimestamp: 20210108101443Z diff --git a/dirsrvtests/tests/data/openldap_2_389/4539/slapd.d/cn=config/olcDatabase={1}hdb.ldif b/dirsrvtests/tests/data/openldap_2_389/4539/slapd.d/cn=config/olcDatabase={1}hdb.ldif new file mode 100644 index 0000000..93fa20f --- /dev/null +++ b/dirsrvtests/tests/data/openldap_2_389/4539/slapd.d/cn=config/olcDatabase={1}hdb.ldif @@ -0,0 +1,36 @@ +# AUTO-GENERATED FILE - DO NOT EDIT!! Use ldapmodify. +# CRC32 4e8f1f23 +dn: olcDatabase={1}hdb +objectClass: olcDatabaseConfig +objectClass: olcHdbConfig +olcDatabase: {1}hdb +olcSuffix: dc=ldapdom,dc=net +olcAddContentAcl: FALSE +olcLastMod: TRUE +olcMaxDerefDepth: 15 +olcReadOnly: FALSE +olcRootDN: cn=root,dc=ldapdom,dc=net +olcRootPW:: cGFzcw== +olcSyncUseSubentry: FALSE +olcMonitoring: FALSE +olcDbDirectory: /tmp/ldap-sssdtest +olcDbCacheSize: 10000 +olcDbCheckpoint: 1024 5 +olcDbChecksum: FALSE +olcDbNoSync: FALSE +olcDbDirtyRead: FALSE +olcDbIDLcacheSize: 0 +olcDbIndex: objectClass eq +olcDbLinearIndex: FALSE +olcDbMode: 0600 +olcDbSearchStack: 16 +olcDbShmKey: 0 +olcDbCacheFree: 1 +olcDbDNcacheSize: 0 +structuralObjectClass: olcHdbConfig +entryUUID: 1212d82e-e5e6-103a-9746-d731be523aab +creatorsName: cn=config +createTimestamp: 20210108101443Z +entryCSN: 20210108101443.265809Z#000000#000#000000 +modifiersName: cn=config +modifyTimestamp: 20210108101443Z diff --git a/dirsrvtests/tests/data/openldap_2_389/5323/slapd.d/cn=config.ldif b/dirsrvtests/tests/data/openldap_2_389/5323/slapd.d/cn=config.ldif new file mode 100755 index 0000000..732d26c --- /dev/null +++ b/dirsrvtests/tests/data/openldap_2_389/5323/slapd.d/cn=config.ldif @@ -0,0 +1,16 @@ +# AUTO-GENERATED FILE - DO NOT EDIT!! Use ldapmodify. +# CRC32 cd0d3c4a +dn: cn=config +objectClass: olcGlobal +cn: config +olcArgsFile: /var/run/slapd/slapd.args +olcLogLevel: none +olcPidFile: /var/run/slapd/slapd.pid +olcToolThreads: 1 +structuralObjectClass: olcGlobal +entryUUID: 304cee42-7b77-103c-99fd-2368e9146c75 +creatorsName: cn=config +createTimestamp: 20220608130351Z +entryCSN: 20220608130351.931183Z#000000#000#000000 +modifiersName: cn=config +modifyTimestamp: 20220608130351Z diff --git a/dirsrvtests/tests/data/openldap_2_389/5323/slapd.d/cn=config/cn=module{0}.ldif b/dirsrvtests/tests/data/openldap_2_389/5323/slapd.d/cn=config/cn=module{0}.ldif new file mode 100755 index 0000000..f88b8ac --- /dev/null +++ b/dirsrvtests/tests/data/openldap_2_389/5323/slapd.d/cn=config/cn=module{0}.ldif @@ -0,0 +1,15 @@ +# AUTO-GENERATED FILE - DO NOT EDIT!! Use ldapmodify. +# CRC32 9ecf1928 +dn: cn=module{0} +objectClass: olcModuleList +cn: module{0} +olcModulePath: /usr/lib/ldap +olcModuleLoad: {0}back_mdb +olcModuleLoad: {1}back_monitor +structuralObjectClass: olcModuleList +entryUUID: 304d3fc8-7b77-103c-9a05-2368e9146c75 +creatorsName: cn=admin,cn=config +createTimestamp: 20220608130351Z +entryCSN: 20220608131231.272094Z#000000#000#000000 +modifiersName: gidNumber=0+uidNumber=0,cn=peercred,cn=external,cn=auth +modifyTimestamp: 20220608131231Z diff --git a/dirsrvtests/tests/data/openldap_2_389/5323/slapd.d/cn=config/cn=schema.ldif b/dirsrvtests/tests/data/openldap_2_389/5323/slapd.d/cn=config/cn=schema.ldif new file mode 100755 index 0000000..3a79dfb --- /dev/null +++ b/dirsrvtests/tests/data/openldap_2_389/5323/slapd.d/cn=config/cn=schema.ldif @@ -0,0 +1,12 @@ +# AUTO-GENERATED FILE - DO NOT EDIT!! Use ldapmodify. +# CRC32 8bcba6e2 +dn: cn=schema +objectClass: olcSchemaConfig +cn: schema +structuralObjectClass: olcSchemaConfig +entryUUID: 304cfa86-7b77-103c-9a00-2368e9146c75 +creatorsName: cn=admin,cn=config +createTimestamp: 20220608130351Z +entryCSN: 20220608130351.931526Z#000000#000#000000 +modifiersName: cn=admin,cn=config +modifyTimestamp: 20220608130351Z diff --git a/dirsrvtests/tests/data/openldap_2_389/5323/slapd.d/cn=config/cn=schema/cn={0}core.ldif b/dirsrvtests/tests/data/openldap_2_389/5323/slapd.d/cn=config/cn=schema/cn={0}core.ldif new file mode 100755 index 0000000..8e26a78 --- /dev/null +++ b/dirsrvtests/tests/data/openldap_2_389/5323/slapd.d/cn=config/cn=schema/cn={0}core.ldif @@ -0,0 +1,249 @@ +# AUTO-GENERATED FILE - DO NOT EDIT!! Use ldapmodify. +# CRC32 87be63e1 +dn: cn={0}core +objectClass: olcSchemaConfig +cn: {0}core +olcAttributeTypes: {0}( 2.5.4.2 NAME 'knowledgeInformation' DESC 'RFC2256: k + nowledge information' EQUALITY caseIgnoreMatch SYNTAX 1.3.6.1.4.1.1466.115. + 121.1.15{32768} ) +olcAttributeTypes: {1}( 2.5.4.4 NAME ( 'sn' 'surname' ) DESC 'RFC2256: last + (family) name(s) for which the entity is known by' SUP name ) +olcAttributeTypes: {2}( 2.5.4.5 NAME 'serialNumber' DESC 'RFC2256: serial nu + mber of the entity' EQUALITY caseIgnoreMatch SUBSTR caseIgnoreSubstringsMat + ch SYNTAX 1.3.6.1.4.1.1466.115.121.1.44{64} ) +olcAttributeTypes: {3}( 2.5.4.6 NAME ( 'c' 'countryName' ) DESC 'RFC4519: tw + o-letter ISO-3166 country code' SUP name SYNTAX 1.3.6.1.4.1.1466.115.121.1. + 11 SINGLE-VALUE ) +olcAttributeTypes: {4}( 2.5.4.7 NAME ( 'l' 'localityName' ) DESC 'RFC2256: l + ocality which this object resides in' SUP name ) +olcAttributeTypes: {5}( 2.5.4.8 NAME ( 'st' 'stateOrProvinceName' ) DESC 'RF + C2256: state or province which this object resides in' SUP name ) +olcAttributeTypes: {6}( 2.5.4.9 NAME ( 'street' 'streetAddress' ) DESC 'RFC2 + 256: street address of this object' EQUALITY caseIgnoreMatch SUBSTR caseIgn + oreSubstringsMatch SYNTAX 1.3.6.1.4.1.1466.115.121.1.15{128} ) +olcAttributeTypes: {7}( 2.5.4.10 NAME ( 'o' 'organizationName' ) DESC 'RFC22 + 56: organization this object belongs to' SUP name ) +olcAttributeTypes: {8}( 2.5.4.11 NAME ( 'ou' 'organizationalUnitName' ) DESC + 'RFC2256: organizational unit this object belongs to' SUP name ) +olcAttributeTypes: {9}( 2.5.4.12 NAME 'title' DESC 'RFC2256: title associate + d with the entity' SUP name ) +olcAttributeTypes: {10}( 2.5.4.14 NAME 'searchGuide' DESC 'RFC2256: search g + uide, deprecated by enhancedSearchGuide' SYNTAX 1.3.6.1.4.1.1466.115.121.1. + 25 ) +olcAttributeTypes: {11}( 2.5.4.15 NAME 'businessCategory' DESC 'RFC2256: bus + iness category' EQUALITY caseIgnoreMatch SUBSTR caseIgnoreSubstringsMatch S + YNTAX 1.3.6.1.4.1.1466.115.121.1.15{128} ) +olcAttributeTypes: {12}( 2.5.4.16 NAME 'postalAddress' DESC 'RFC2256: postal + address' EQUALITY caseIgnoreListMatch SUBSTR caseIgnoreListSubstringsMatch + SYNTAX 1.3.6.1.4.1.1466.115.121.1.41 ) +olcAttributeTypes: {13}( 2.5.4.17 NAME 'postalCode' DESC 'RFC2256: postal co + de' EQUALITY caseIgnoreMatch SUBSTR caseIgnoreSubstringsMatch SYNTAX 1.3.6. + 1.4.1.1466.115.121.1.15{40} ) +olcAttributeTypes: {14}( 2.5.4.18 NAME 'postOfficeBox' DESC 'RFC2256: Post O + ffice Box' EQUALITY caseIgnoreMatch SUBSTR caseIgnoreSubstringsMatch SYNTAX + 1.3.6.1.4.1.1466.115.121.1.15{40} ) +olcAttributeTypes: {15}( 2.5.4.19 NAME 'physicalDeliveryOfficeName' DESC 'RF + C2256: Physical Delivery Office Name' EQUALITY caseIgnoreMatch SUBSTR caseI + gnoreSubstringsMatch SYNTAX 1.3.6.1.4.1.1466.115.121.1.15{128} ) +olcAttributeTypes: {16}( 2.5.4.20 NAME 'telephoneNumber' DESC 'RFC2256: Tele + phone Number' EQUALITY telephoneNumberMatch SUBSTR telephoneNumberSubstring + sMatch SYNTAX 1.3.6.1.4.1.1466.115.121.1.50{32} ) +olcAttributeTypes: {17}( 2.5.4.21 NAME 'telexNumber' DESC 'RFC2256: Telex Nu + mber' SYNTAX 1.3.6.1.4.1.1466.115.121.1.52 ) +olcAttributeTypes: {18}( 2.5.4.22 NAME 'teletexTerminalIdentifier' DESC 'RFC + 2256: Teletex Terminal Identifier' SYNTAX 1.3.6.1.4.1.1466.115.121.1.51 ) +olcAttributeTypes: {19}( 2.5.4.23 NAME ( 'facsimileTelephoneNumber' 'fax' ) + DESC 'RFC2256: Facsimile (Fax) Telephone Number' SYNTAX 1.3.6.1.4.1.1466.11 + 5.121.1.22 ) +olcAttributeTypes: {20}( 2.5.4.24 NAME 'x121Address' DESC 'RFC2256: X.121 Ad + dress' EQUALITY numericStringMatch SUBSTR numericStringSubstringsMatch SYNT + AX 1.3.6.1.4.1.1466.115.121.1.36{15} ) +olcAttributeTypes: {21}( 2.5.4.25 NAME 'internationaliSDNNumber' DESC 'RFC22 + 56: international ISDN number' EQUALITY numericStringMatch SUBSTR numericSt + ringSubstringsMatch SYNTAX 1.3.6.1.4.1.1466.115.121.1.36{16} ) +olcAttributeTypes: {22}( 2.5.4.26 NAME 'registeredAddress' DESC 'RFC2256: re + gistered postal address' SUP postalAddress SYNTAX 1.3.6.1.4.1.1466.115.121. + 1.41 ) +olcAttributeTypes: {23}( 2.5.4.27 NAME 'destinationIndicator' DESC 'RFC2256: + destination indicator' EQUALITY caseIgnoreMatch SUBSTR caseIgnoreSubstring + sMatch SYNTAX 1.3.6.1.4.1.1466.115.121.1.44{128} ) +olcAttributeTypes: {24}( 2.5.4.28 NAME 'preferredDeliveryMethod' DESC 'RFC22 + 56: preferred delivery method' SYNTAX 1.3.6.1.4.1.1466.115.121.1.14 SINGLE- + VALUE ) +olcAttributeTypes: {25}( 2.5.4.29 NAME 'presentationAddress' DESC 'RFC2256: + presentation address' EQUALITY presentationAddressMatch SYNTAX 1.3.6.1.4.1. + 1466.115.121.1.43 SINGLE-VALUE ) +olcAttributeTypes: {26}( 2.5.4.30 NAME 'supportedApplicationContext' DESC 'R + FC2256: supported application context' EQUALITY objectIdentifierMatch SYNTA + X 1.3.6.1.4.1.1466.115.121.1.38 ) +olcAttributeTypes: {27}( 2.5.4.31 NAME 'member' DESC 'RFC2256: member of a g + roup' SUP distinguishedName ) +olcAttributeTypes: {28}( 2.5.4.32 NAME 'owner' DESC 'RFC2256: owner (of the + object)' SUP distinguishedName ) +olcAttributeTypes: {29}( 2.5.4.33 NAME 'roleOccupant' DESC 'RFC2256: occupan + t of role' SUP distinguishedName ) +olcAttributeTypes: {30}( 2.5.4.36 NAME 'userCertificate' DESC 'RFC2256: X.50 + 9 user certificate, use ;binary' EQUALITY certificateExactMatch SYNTAX 1.3. + 6.1.4.1.1466.115.121.1.8 ) +olcAttributeTypes: {31}( 2.5.4.37 NAME 'cACertificate' DESC 'RFC2256: X.509 + CA certificate, use ;binary' EQUALITY certificateExactMatch SYNTAX 1.3.6.1. + 4.1.1466.115.121.1.8 ) +olcAttributeTypes: {32}( 2.5.4.38 NAME 'authorityRevocationList' DESC 'RFC22 + 56: X.509 authority revocation list, use ;binary' SYNTAX 1.3.6.1.4.1.1466.1 + 15.121.1.9 ) +olcAttributeTypes: {33}( 2.5.4.39 NAME 'certificateRevocationList' DESC 'RFC + 2256: X.509 certificate revocation list, use ;binary' SYNTAX 1.3.6.1.4.1.14 + 66.115.121.1.9 ) +olcAttributeTypes: {34}( 2.5.4.40 NAME 'crossCertificatePair' DESC 'RFC2256: + X.509 cross certificate pair, use ;binary' SYNTAX 1.3.6.1.4.1.1466.115.121 + .1.10 ) +olcAttributeTypes: {35}( 2.5.4.42 NAME ( 'givenName' 'gn' ) DESC 'RFC2256: f + irst name(s) for which the entity is known by' SUP name ) +olcAttributeTypes: {36}( 2.5.4.43 NAME 'initials' DESC 'RFC2256: initials of + some or all of names, but not the surname(s).' SUP name ) +olcAttributeTypes: {37}( 2.5.4.44 NAME 'generationQualifier' DESC 'RFC2256: + name qualifier indicating a generation' SUP name ) +olcAttributeTypes: {38}( 2.5.4.45 NAME 'x500UniqueIdentifier' DESC 'RFC2256: + X.500 unique identifier' EQUALITY bitStringMatch SYNTAX 1.3.6.1.4.1.1466.1 + 15.121.1.6 ) +olcAttributeTypes: {39}( 2.5.4.46 NAME 'dnQualifier' DESC 'RFC2256: DN quali + fier' EQUALITY caseIgnoreMatch ORDERING caseIgnoreOrderingMatch SUBSTR case + IgnoreSubstringsMatch SYNTAX 1.3.6.1.4.1.1466.115.121.1.44 ) +olcAttributeTypes: {40}( 2.5.4.47 NAME 'enhancedSearchGuide' DESC 'RFC2256: + enhanced search guide' SYNTAX 1.3.6.1.4.1.1466.115.121.1.21 ) +olcAttributeTypes: {41}( 2.5.4.48 NAME 'protocolInformation' DESC 'RFC2256: + protocol information' EQUALITY protocolInformationMatch SYNTAX 1.3.6.1.4.1. + 1466.115.121.1.42 ) +olcAttributeTypes: {42}( 2.5.4.50 NAME 'uniqueMember' DESC 'RFC2256: unique + member of a group' EQUALITY uniqueMemberMatch SYNTAX 1.3.6.1.4.1.1466.115.1 + 21.1.34 ) +olcAttributeTypes: {43}( 2.5.4.51 NAME 'houseIdentifier' DESC 'RFC2256: hous + e identifier' EQUALITY caseIgnoreMatch SUBSTR caseIgnoreSubstringsMatch SYN + TAX 1.3.6.1.4.1.1466.115.121.1.15{32768} ) +olcAttributeTypes: {44}( 2.5.4.52 NAME 'supportedAlgorithms' DESC 'RFC2256: + supported algorithms' SYNTAX 1.3.6.1.4.1.1466.115.121.1.49 ) +olcAttributeTypes: {45}( 2.5.4.53 NAME 'deltaRevocationList' DESC 'RFC2256: + delta revocation list; use ;binary' SYNTAX 1.3.6.1.4.1.1466.115.121.1.9 ) +olcAttributeTypes: {46}( 2.5.4.54 NAME 'dmdName' DESC 'RFC2256: name of DMD' + SUP name ) +olcAttributeTypes: {47}( 2.5.4.65 NAME 'pseudonym' DESC 'X.520(4th): pseudon + ym for the object' SUP name ) +olcAttributeTypes: {48}( 0.9.2342.19200300.100.1.3 NAME ( 'mail' 'rfc822Mail + box' ) DESC 'RFC1274: RFC822 Mailbox' EQUALITY caseIgnoreIA5Match SUBST + R caseIgnoreIA5SubstringsMatch SYNTAX 1.3.6.1.4.1.1466.115.121.1.26{256} + ) +olcAttributeTypes: {49}( 0.9.2342.19200300.100.1.25 NAME ( 'dc' 'domainCompo + nent' ) DESC 'RFC1274/2247: domain component' EQUALITY caseIgnoreIA5Match S + UBSTR caseIgnoreIA5SubstringsMatch SYNTAX 1.3.6.1.4.1.1466.115.121.1.26 SIN + GLE-VALUE ) +olcAttributeTypes: {50}( 0.9.2342.19200300.100.1.37 NAME 'associatedDomain' + DESC 'RFC1274: domain associated with object' EQUALITY caseIgnoreIA5Match S + UBSTR caseIgnoreIA5SubstringsMatch SYNTAX 1.3.6.1.4.1.1466.115.121.1.26 ) +olcAttributeTypes: {51}( 1.2.840.113549.1.9.1 NAME ( 'email' 'emailAddress' + 'pkcs9email' ) DESC 'RFC3280: legacy attribute for email addresses in DNs' + EQUALITY caseIgnoreIA5Match SUBSTR caseIgnoreIA5SubstringsMatch SYNTAX 1.3. + 6.1.4.1.1466.115.121.1.26{128} ) +olcObjectClasses: {0}( 2.5.6.2 NAME 'country' DESC 'RFC2256: a country' SUP + top STRUCTURAL MUST c MAY ( searchGuide $ description ) ) +olcObjectClasses: {1}( 2.5.6.3 NAME 'locality' DESC 'RFC2256: a locality' SU + P top STRUCTURAL MAY ( street $ seeAlso $ searchGuide $ st $ l $ descriptio + n ) ) +olcObjectClasses: {2}( 2.5.6.4 NAME 'organization' DESC 'RFC2256: an organiz + ation' SUP top STRUCTURAL MUST o MAY ( userPassword $ searchGuide $ seeAlso + $ businessCategory $ x121Address $ registeredAddress $ destinationIndicato + r $ preferredDeliveryMethod $ telexNumber $ teletexTerminalIdentifier $ tel + ephoneNumber $ internationaliSDNNumber $ facsimileTelephoneNumber $ street + $ postOfficeBox $ postalCode $ postalAddress $ physicalDeliveryOfficeName + $ st $ l $ description ) ) +olcObjectClasses: {3}( 2.5.6.5 NAME 'organizationalUnit' DESC 'RFC2256: an o + rganizational unit' SUP top STRUCTURAL MUST ou MAY ( userPassword $ searchG + uide $ seeAlso $ businessCategory $ x121Address $ registeredAddress $ desti + nationIndicator $ preferredDeliveryMethod $ telexNumber $ teletexTerminalId + entifier $ telephoneNumber $ internationaliSDNNumber $ facsimileTelephoneNu + mber $ street $ postOfficeBox $ postalCode $ postalAddress $ physicalDelive + ryOfficeName $ st $ l $ description ) ) +olcObjectClasses: {4}( 2.5.6.6 NAME 'person' DESC 'RFC2256: a person' SUP to + p STRUCTURAL MUST ( sn $ cn ) MAY ( userPassword $ telephoneNumber $ seeAls + o $ description ) ) +olcObjectClasses: {5}( 2.5.6.7 NAME 'organizationalPerson' DESC 'RFC2256: an + organizational person' SUP person STRUCTURAL MAY ( title $ x121Address $ r + egisteredAddress $ destinationIndicator $ preferredDeliveryMethod $ telexNu + mber $ teletexTerminalIdentifier $ telephoneNumber $ internationaliSDNNumbe + r $ facsimileTelephoneNumber $ street $ postOfficeBox $ postalCode $ posta + lAddress $ physicalDeliveryOfficeName $ ou $ st $ l ) ) +olcObjectClasses: {6}( 2.5.6.8 NAME 'organizationalRole' DESC 'RFC2256: an o + rganizational role' SUP top STRUCTURAL MUST cn MAY ( x121Address $ register + edAddress $ destinationIndicator $ preferredDeliveryMethod $ telexNumber $ + teletexTerminalIdentifier $ telephoneNumber $ internationaliSDNNumber $ fac + simileTelephoneNumber $ seeAlso $ roleOccupant $ preferredDeliveryMethod $ + street $ postOfficeBox $ postalCode $ postalAddress $ physicalDeliveryOffic + eName $ ou $ st $ l $ description ) ) +olcObjectClasses: {7}( 2.5.6.9 NAME 'groupOfNames' DESC 'RFC2256: a group of + names (DNs)' SUP top STRUCTURAL MUST ( member $ cn ) MAY ( businessCategor + y $ seeAlso $ owner $ ou $ o $ description ) ) +olcObjectClasses: {8}( 2.5.6.10 NAME 'residentialPerson' DESC 'RFC2256: an r + esidential person' SUP person STRUCTURAL MUST l MAY ( businessCategory $ x1 + 21Address $ registeredAddress $ destinationIndicator $ preferredDeliveryMet + hod $ telexNumber $ teletexTerminalIdentifier $ telephoneNumber $ internati + onaliSDNNumber $ facsimileTelephoneNumber $ preferredDeliveryMethod $ stree + t $ postOfficeBox $ postalCode $ postalAddress $ physicalDeliveryOfficeName + $ st $ l ) ) +olcObjectClasses: {9}( 2.5.6.11 NAME 'applicationProcess' DESC 'RFC2256: an + application process' SUP top STRUCTURAL MUST cn MAY ( seeAlso $ ou $ l $ de + scription ) ) +olcObjectClasses: {10}( 2.5.6.12 NAME 'applicationEntity' DESC 'RFC2256: an + application entity' SUP top STRUCTURAL MUST ( presentationAddress $ cn ) MA + Y ( supportedApplicationContext $ seeAlso $ ou $ o $ l $ description ) ) +olcObjectClasses: {11}( 2.5.6.13 NAME 'dSA' DESC 'RFC2256: a directory syste + m agent (a server)' SUP applicationEntity STRUCTURAL MAY knowledgeInformati + on ) +olcObjectClasses: {12}( 2.5.6.14 NAME 'device' DESC 'RFC2256: a device' SUP + top STRUCTURAL MUST cn MAY ( serialNumber $ seeAlso $ owner $ ou $ o $ l $ + description ) ) +olcObjectClasses: {13}( 2.5.6.15 NAME 'strongAuthenticationUser' DESC 'RFC22 + 56: a strong authentication user' SUP top AUXILIARY MUST userCertificate ) +olcObjectClasses: {14}( 2.5.6.16 NAME 'certificationAuthority' DESC 'RFC2256 + : a certificate authority' SUP top AUXILIARY MUST ( authorityRevocationList + $ certificateRevocationList $ cACertificate ) MAY crossCertificatePair ) +olcObjectClasses: {15}( 2.5.6.17 NAME 'groupOfUniqueNames' DESC 'RFC2256: a + group of unique names (DN and Unique Identifier)' SUP top STRUCTURAL MUST ( + uniqueMember $ cn ) MAY ( businessCategory $ seeAlso $ owner $ ou $ o $ de + scription ) ) +olcObjectClasses: {16}( 2.5.6.18 NAME 'userSecurityInformation' DESC 'RFC225 + 6: a user security information' SUP top AUXILIARY MAY ( supportedAlgorithms + ) ) +olcObjectClasses: {17}( 2.5.6.16.2 NAME 'certificationAuthority-V2' SUP cert + ificationAuthority AUXILIARY MAY ( deltaRevocationList ) ) +olcObjectClasses: {18}( 2.5.6.19 NAME 'cRLDistributionPoint' SUP top STRUCTU + RAL MUST ( cn ) MAY ( certificateRevocationList $ authorityRevocationList $ + deltaRevocationList ) ) +olcObjectClasses: {19}( 2.5.6.20 NAME 'dmd' SUP top STRUCTURAL MUST ( dmdNam + e ) MAY ( userPassword $ searchGuide $ seeAlso $ businessCategory $ x121Add + ress $ registeredAddress $ destinationIndicator $ preferredDeliveryMethod $ + telexNumber $ teletexTerminalIdentifier $ telephoneNumber $ internationali + SDNNumber $ facsimileTelephoneNumber $ street $ postOfficeBox $ postalCode + $ postalAddress $ physicalDeliveryOfficeName $ st $ l $ description ) ) +olcObjectClasses: {20}( 2.5.6.21 NAME 'pkiUser' DESC 'RFC2587: a PKI user' S + UP top AUXILIARY MAY userCertificate ) +olcObjectClasses: {21}( 2.5.6.22 NAME 'pkiCA' DESC 'RFC2587: PKI certificate + authority' SUP top AUXILIARY MAY ( authorityRevocationList $ certificateRe + vocationList $ cACertificate $ crossCertificatePair ) ) +olcObjectClasses: {22}( 2.5.6.23 NAME 'deltaCRL' DESC 'RFC2587: PKI user' SU + P top AUXILIARY MAY deltaRevocationList ) +olcObjectClasses: {23}( 1.3.6.1.4.1.250.3.15 NAME 'labeledURIObject' DESC 'R + FC2079: object that contains the URI attribute type' MAY ( labeledURI ) SUP + top AUXILIARY ) +olcObjectClasses: {24}( 0.9.2342.19200300.100.4.19 NAME 'simpleSecurityObjec + t' DESC 'RFC1274: simple security object' SUP top AUXILIARY MUST userPasswo + rd ) +olcObjectClasses: {25}( 1.3.6.1.4.1.1466.344 NAME 'dcObject' DESC 'RFC2247: + domain component object' SUP top AUXILIARY MUST dc ) +olcObjectClasses: {26}( 1.3.6.1.1.3.1 NAME 'uidObject' DESC 'RFC2377: uid ob + ject' SUP top AUXILIARY MUST uid ) +structuralObjectClass: olcSchemaConfig +entryUUID: 304d0256-7b77-103c-9a01-2368e9146c75 +creatorsName: cn=admin,cn=config +createTimestamp: 20220608130351Z +entryCSN: 20220608130351.931725Z#000000#000#000000 +modifiersName: cn=admin,cn=config +modifyTimestamp: 20220608130351Z diff --git a/dirsrvtests/tests/data/openldap_2_389/5323/slapd.d/cn=config/cn=schema/cn={1}cosine.ldif b/dirsrvtests/tests/data/openldap_2_389/5323/slapd.d/cn=config/cn=schema/cn={1}cosine.ldif new file mode 100755 index 0000000..eafa563 --- /dev/null +++ b/dirsrvtests/tests/data/openldap_2_389/5323/slapd.d/cn=config/cn=schema/cn={1}cosine.ldif @@ -0,0 +1,178 @@ +# AUTO-GENERATED FILE - DO NOT EDIT!! Use ldapmodify. +# CRC32 3b084acc +dn: cn={1}cosine +objectClass: olcSchemaConfig +cn: {1}cosine +olcAttributeTypes: {0}( 0.9.2342.19200300.100.1.2 NAME 'textEncodedORAddress + ' EQUALITY caseIgnoreMatch SUBSTR caseIgnoreSubstringsMatch SYNTAX 1.3.6.1. + 4.1.1466.115.121.1.15{256} ) +olcAttributeTypes: {1}( 0.9.2342.19200300.100.1.4 NAME 'info' DESC 'RFC1274: + general information' EQUALITY caseIgnoreMatch SUBSTR caseIgnoreSubstringsM + atch SYNTAX 1.3.6.1.4.1.1466.115.121.1.15{2048} ) +olcAttributeTypes: {2}( 0.9.2342.19200300.100.1.5 NAME ( 'drink' 'favouriteD + rink' ) DESC 'RFC1274: favorite drink' EQUALITY caseIgnoreMatch SUBSTR case + IgnoreSubstringsMatch SYNTAX 1.3.6.1.4.1.1466.115.121.1.15{256} ) +olcAttributeTypes: {3}( 0.9.2342.19200300.100.1.6 NAME 'roomNumber' DESC 'RF + C1274: room number' EQUALITY caseIgnoreMatch SUBSTR caseIgnoreSubstringsMat + ch SYNTAX 1.3.6.1.4.1.1466.115.121.1.15{256} ) +olcAttributeTypes: {4}( 0.9.2342.19200300.100.1.7 NAME 'photo' DESC 'RFC1274 + : photo (G3 fax)' SYNTAX 1.3.6.1.4.1.1466.115.121.1.23{25000} ) +olcAttributeTypes: {5}( 0.9.2342.19200300.100.1.8 NAME 'userClass' DESC 'RFC + 1274: category of user' EQUALITY caseIgnoreMatch SUBSTR caseIgnoreSubstring + sMatch SYNTAX 1.3.6.1.4.1.1466.115.121.1.15{256} ) +olcAttributeTypes: {6}( 0.9.2342.19200300.100.1.9 NAME 'host' DESC 'RFC1274: + host computer' EQUALITY caseIgnoreMatch SUBSTR caseIgnoreSubstringsMatch S + YNTAX 1.3.6.1.4.1.1466.115.121.1.15{256} ) +olcAttributeTypes: {7}( 0.9.2342.19200300.100.1.10 NAME 'manager' DESC 'RFC1 + 274: DN of manager' EQUALITY distinguishedNameMatch SYNTAX 1.3.6.1.4.1.1466 + .115.121.1.12 ) +olcAttributeTypes: {8}( 0.9.2342.19200300.100.1.11 NAME 'documentIdentifier' + DESC 'RFC1274: unique identifier of document' EQUALITY caseIgnoreMatch SUB + STR caseIgnoreSubstringsMatch SYNTAX 1.3.6.1.4.1.1466.115.121.1.15{256} ) +olcAttributeTypes: {9}( 0.9.2342.19200300.100.1.12 NAME 'documentTitle' DESC + 'RFC1274: title of document' EQUALITY caseIgnoreMatch SUBSTR caseIgnoreSub + stringsMatch SYNTAX 1.3.6.1.4.1.1466.115.121.1.15{256} ) +olcAttributeTypes: {10}( 0.9.2342.19200300.100.1.13 NAME 'documentVersion' D + ESC 'RFC1274: version of document' EQUALITY caseIgnoreMatch SUBSTR caseIgno + reSubstringsMatch SYNTAX 1.3.6.1.4.1.1466.115.121.1.15{256} ) +olcAttributeTypes: {11}( 0.9.2342.19200300.100.1.14 NAME 'documentAuthor' DE + SC 'RFC1274: DN of author of document' EQUALITY distinguishedNameMatch SYNT + AX 1.3.6.1.4.1.1466.115.121.1.12 ) +olcAttributeTypes: {12}( 0.9.2342.19200300.100.1.15 NAME 'documentLocation' + DESC 'RFC1274: location of document original' EQUALITY caseIgnoreMatch SUBS + TR caseIgnoreSubstringsMatch SYNTAX 1.3.6.1.4.1.1466.115.121.1.15{256} ) +olcAttributeTypes: {13}( 0.9.2342.19200300.100.1.20 NAME ( 'homePhone' 'home + TelephoneNumber' ) DESC 'RFC1274: home telephone number' EQUALITY telephone + NumberMatch SUBSTR telephoneNumberSubstringsMatch SYNTAX 1.3.6.1.4.1.1466.1 + 15.121.1.50 ) +olcAttributeTypes: {14}( 0.9.2342.19200300.100.1.21 NAME 'secretary' DESC 'R + FC1274: DN of secretary' EQUALITY distinguishedNameMatch SYNTAX 1.3.6.1.4.1 + .1466.115.121.1.12 ) +olcAttributeTypes: {15}( 0.9.2342.19200300.100.1.22 NAME 'otherMailbox' SYNT + AX 1.3.6.1.4.1.1466.115.121.1.39 ) +olcAttributeTypes: {16}( 0.9.2342.19200300.100.1.26 NAME 'aRecord' EQUALITY + caseIgnoreIA5Match SYNTAX 1.3.6.1.4.1.1466.115.121.1.26 ) +olcAttributeTypes: {17}( 0.9.2342.19200300.100.1.27 NAME 'mDRecord' EQUALITY + caseIgnoreIA5Match SYNTAX 1.3.6.1.4.1.1466.115.121.1.26 ) +olcAttributeTypes: {18}( 0.9.2342.19200300.100.1.28 NAME 'mXRecord' EQUALITY + caseIgnoreIA5Match SYNTAX 1.3.6.1.4.1.1466.115.121.1.26 ) +olcAttributeTypes: {19}( 0.9.2342.19200300.100.1.29 NAME 'nSRecord' EQUALITY + caseIgnoreIA5Match SYNTAX 1.3.6.1.4.1.1466.115.121.1.26 ) +olcAttributeTypes: {20}( 0.9.2342.19200300.100.1.30 NAME 'sOARecord' EQUALIT + Y caseIgnoreIA5Match SYNTAX 1.3.6.1.4.1.1466.115.121.1.26 ) +olcAttributeTypes: {21}( 0.9.2342.19200300.100.1.31 NAME 'cNAMERecord' EQUAL + ITY caseIgnoreIA5Match SYNTAX 1.3.6.1.4.1.1466.115.121.1.26 ) +olcAttributeTypes: {22}( 0.9.2342.19200300.100.1.38 NAME 'associatedName' DE + SC 'RFC1274: DN of entry associated with domain' EQUALITY distinguishedName + Match SYNTAX 1.3.6.1.4.1.1466.115.121.1.12 ) +olcAttributeTypes: {23}( 0.9.2342.19200300.100.1.39 NAME 'homePostalAddress' + DESC 'RFC1274: home postal address' EQUALITY caseIgnoreListMatch SUBSTR ca + seIgnoreListSubstringsMatch SYNTAX 1.3.6.1.4.1.1466.115.121.1.41 ) +olcAttributeTypes: {24}( 0.9.2342.19200300.100.1.40 NAME 'personalTitle' DES + C 'RFC1274: personal title' EQUALITY caseIgnoreMatch SUBSTR caseIgnoreSubst + ringsMatch SYNTAX 1.3.6.1.4.1.1466.115.121.1.15{256} ) +olcAttributeTypes: {25}( 0.9.2342.19200300.100.1.41 NAME ( 'mobile' 'mobileT + elephoneNumber' ) DESC 'RFC1274: mobile telephone number' EQUALITY telephon + eNumberMatch SUBSTR telephoneNumberSubstringsMatch SYNTAX 1.3.6.1.4.1.1466. + 115.121.1.50 ) +olcAttributeTypes: {26}( 0.9.2342.19200300.100.1.42 NAME ( 'pager' 'pagerTel + ephoneNumber' ) DESC 'RFC1274: pager telephone number' EQUALITY telephoneNu + mberMatch SUBSTR telephoneNumberSubstringsMatch SYNTAX 1.3.6.1.4.1.1466.115 + .121.1.50 ) +olcAttributeTypes: {27}( 0.9.2342.19200300.100.1.43 NAME ( 'co' 'friendlyCou + ntryName' ) DESC 'RFC1274: friendly country name' EQUALITY caseIgnoreMatch + SUBSTR caseIgnoreSubstringsMatch SYNTAX 1.3.6.1.4.1.1466.115.121.1.15 ) +olcAttributeTypes: {28}( 0.9.2342.19200300.100.1.44 NAME 'uniqueIdentifier' + DESC 'RFC1274: unique identifer' EQUALITY caseIgnoreMatch SYNTAX 1.3.6.1.4. + 1.1466.115.121.1.15{256} ) +olcAttributeTypes: {29}( 0.9.2342.19200300.100.1.45 NAME 'organizationalStat + us' DESC 'RFC1274: organizational status' EQUALITY caseIgnoreMatch SUBSTR c + aseIgnoreSubstringsMatch SYNTAX 1.3.6.1.4.1.1466.115.121.1.15{256} ) +olcAttributeTypes: {30}( 0.9.2342.19200300.100.1.46 NAME 'janetMailbox' DESC + 'RFC1274: Janet mailbox' EQUALITY caseIgnoreIA5Match SUBSTR caseIgnoreIA5S + ubstringsMatch SYNTAX 1.3.6.1.4.1.1466.115.121.1.26{256} ) +olcAttributeTypes: {31}( 0.9.2342.19200300.100.1.47 NAME 'mailPreferenceOpti + on' DESC 'RFC1274: mail preference option' SYNTAX 1.3.6.1.4.1.1466.115.121. + 1.27 ) +olcAttributeTypes: {32}( 0.9.2342.19200300.100.1.48 NAME 'buildingName' DESC + 'RFC1274: name of building' EQUALITY caseIgnoreMatch SUBSTR caseIgnoreSubs + tringsMatch SYNTAX 1.3.6.1.4.1.1466.115.121.1.15{256} ) +olcAttributeTypes: {33}( 0.9.2342.19200300.100.1.49 NAME 'dSAQuality' DESC ' + RFC1274: DSA Quality' SYNTAX 1.3.6.1.4.1.1466.115.121.1.19 SINGLE-VALUE ) +olcAttributeTypes: {34}( 0.9.2342.19200300.100.1.50 NAME 'singleLevelQuality + ' DESC 'RFC1274: Single Level Quality' SYNTAX 1.3.6.1.4.1.1466.115.121.1.13 + SINGLE-VALUE ) +olcAttributeTypes: {35}( 0.9.2342.19200300.100.1.51 NAME 'subtreeMinimumQual + ity' DESC 'RFC1274: Subtree Mininum Quality' SYNTAX 1.3.6.1.4.1.1466.115.12 + 1.1.13 SINGLE-VALUE ) +olcAttributeTypes: {36}( 0.9.2342.19200300.100.1.52 NAME 'subtreeMaximumQual + ity' DESC 'RFC1274: Subtree Maximun Quality' SYNTAX 1.3.6.1.4.1.1466.115.12 + 1.1.13 SINGLE-VALUE ) +olcAttributeTypes: {37}( 0.9.2342.19200300.100.1.53 NAME 'personalSignature' + DESC 'RFC1274: Personal Signature (G3 fax)' SYNTAX 1.3.6.1.4.1.1466.115.12 + 1.1.23 ) +olcAttributeTypes: {38}( 0.9.2342.19200300.100.1.54 NAME 'dITRedirect' DESC + 'RFC1274: DIT Redirect' EQUALITY distinguishedNameMatch SYNTAX 1.3.6.1.4.1. + 1466.115.121.1.12 ) +olcAttributeTypes: {39}( 0.9.2342.19200300.100.1.55 NAME 'audio' DESC 'RFC12 + 74: audio (u-law)' SYNTAX 1.3.6.1.4.1.1466.115.121.1.4{25000} ) +olcAttributeTypes: {40}( 0.9.2342.19200300.100.1.56 NAME 'documentPublisher' + DESC 'RFC1274: publisher of document' EQUALITY caseIgnoreMatch SUBSTR case + IgnoreSubstringsMatch SYNTAX 1.3.6.1.4.1.1466.115.121.1.15 ) +olcObjectClasses: {0}( 0.9.2342.19200300.100.4.4 NAME ( 'pilotPerson' 'newPi + lotPerson' ) SUP person STRUCTURAL MAY ( userid $ textEncodedORAddress $ rf + c822Mailbox $ favouriteDrink $ roomNumber $ userClass $ homeTelephoneNumber + $ homePostalAddress $ secretary $ personalTitle $ preferredDeliveryMethod + $ businessCategory $ janetMailbox $ otherMailbox $ mobileTelephoneNumber $ + pagerTelephoneNumber $ organizationalStatus $ mailPreferenceOption $ person + alSignature ) ) +olcObjectClasses: {1}( 0.9.2342.19200300.100.4.5 NAME 'account' SUP top STRU + CTURAL MUST userid MAY ( description $ seeAlso $ localityName $ organizatio + nName $ organizationalUnitName $ host ) ) +olcObjectClasses: {2}( 0.9.2342.19200300.100.4.6 NAME 'document' SUP top STR + UCTURAL MUST documentIdentifier MAY ( commonName $ description $ seeAlso $ + localityName $ organizationName $ organizationalUnitName $ documentTitle $ + documentVersion $ documentAuthor $ documentLocation $ documentPublisher ) ) +olcObjectClasses: {3}( 0.9.2342.19200300.100.4.7 NAME 'room' SUP top STRUCTU + RAL MUST commonName MAY ( roomNumber $ description $ seeAlso $ telephoneNum + ber ) ) +olcObjectClasses: {4}( 0.9.2342.19200300.100.4.9 NAME 'documentSeries' SUP t + op STRUCTURAL MUST commonName MAY ( description $ seeAlso $ telephonenumber + $ localityName $ organizationName $ organizationalUnitName ) ) +olcObjectClasses: {5}( 0.9.2342.19200300.100.4.13 NAME 'domain' SUP top STRU + CTURAL MUST domainComponent MAY ( associatedName $ organizationName $ descr + iption $ businessCategory $ seeAlso $ searchGuide $ userPassword $ locality + Name $ stateOrProvinceName $ streetAddress $ physicalDeliveryOfficeName $ p + ostalAddress $ postalCode $ postOfficeBox $ streetAddress $ facsimileTeleph + oneNumber $ internationalISDNNumber $ telephoneNumber $ teletexTerminalIden + tifier $ telexNumber $ preferredDeliveryMethod $ destinationIndicator $ reg + isteredAddress $ x121Address ) ) +olcObjectClasses: {6}( 0.9.2342.19200300.100.4.14 NAME 'RFC822localPart' SUP + domain STRUCTURAL MAY ( commonName $ surname $ description $ seeAlso $ tel + ephoneNumber $ physicalDeliveryOfficeName $ postalAddress $ postalCode $ po + stOfficeBox $ streetAddress $ facsimileTelephoneNumber $ internationalISDNN + umber $ telephoneNumber $ teletexTerminalIdentifier $ telexNumber $ preferr + edDeliveryMethod $ destinationIndicator $ registeredAddress $ x121Address ) + ) +olcObjectClasses: {7}( 0.9.2342.19200300.100.4.15 NAME 'dNSDomain' SUP domai + n STRUCTURAL MAY ( ARecord $ MDRecord $ MXRecord $ NSRecord $ SOARecord $ C + NAMERecord ) ) +olcObjectClasses: {8}( 0.9.2342.19200300.100.4.17 NAME 'domainRelatedObject' + DESC 'RFC1274: an object related to an domain' SUP top AUXILIARY MUST asso + ciatedDomain ) +olcObjectClasses: {9}( 0.9.2342.19200300.100.4.18 NAME 'friendlyCountry' SUP + country STRUCTURAL MUST friendlyCountryName ) +olcObjectClasses: {10}( 0.9.2342.19200300.100.4.20 NAME 'pilotOrganization' + SUP ( organization $ organizationalUnit ) STRUCTURAL MAY buildingName ) +olcObjectClasses: {11}( 0.9.2342.19200300.100.4.21 NAME 'pilotDSA' SUP dsa S + TRUCTURAL MAY dSAQuality ) +olcObjectClasses: {12}( 0.9.2342.19200300.100.4.22 NAME 'qualityLabelledData + ' SUP top AUXILIARY MUST dsaQuality MAY ( subtreeMinimumQuality $ subtreeMa + ximumQuality ) ) +structuralObjectClass: olcSchemaConfig +entryUUID: 304d1d72-7b77-103c-9a02-2368e9146c75 +creatorsName: cn=admin,cn=config +createTimestamp: 20220608130351Z +entryCSN: 20220608130351.932419Z#000000#000#000000 +modifiersName: cn=admin,cn=config +modifyTimestamp: 20220608130351Z diff --git a/dirsrvtests/tests/data/openldap_2_389/5323/slapd.d/cn=config/cn=schema/cn={2}nis.ldif b/dirsrvtests/tests/data/openldap_2_389/5323/slapd.d/cn=config/cn=schema/cn={2}nis.ldif new file mode 100755 index 0000000..e45149f --- /dev/null +++ b/dirsrvtests/tests/data/openldap_2_389/5323/slapd.d/cn=config/cn=schema/cn={2}nis.ldif @@ -0,0 +1,108 @@ +# AUTO-GENERATED FILE - DO NOT EDIT!! Use ldapmodify. +# CRC32 f2c20e36 +dn: cn={2}nis +objectClass: olcSchemaConfig +cn: {2}nis +olcAttributeTypes: {0}( 1.3.6.1.1.1.1.2 NAME 'gecos' DESC 'The GECOS field; + the common name' EQUALITY caseIgnoreIA5Match SUBSTR caseIgnoreIA5Substrings + Match SYNTAX 1.3.6.1.4.1.1466.115.121.1.26 SINGLE-VALUE ) +olcAttributeTypes: {1}( 1.3.6.1.1.1.1.3 NAME 'homeDirectory' DESC 'The absol + ute path to the home directory' EQUALITY caseExactIA5Match SYNTAX 1.3.6.1.4 + .1.1466.115.121.1.26 SINGLE-VALUE ) +olcAttributeTypes: {2}( 1.3.6.1.1.1.1.4 NAME 'loginShell' DESC 'The path to + the login shell' EQUALITY caseExactIA5Match SYNTAX 1.3.6.1.4.1.1466.115.121 + .1.26 SINGLE-VALUE ) +olcAttributeTypes: {3}( 1.3.6.1.1.1.1.5 NAME 'shadowLastChange' EQUALITY int + egerMatch SYNTAX 1.3.6.1.4.1.1466.115.121.1.27 SINGLE-VALUE ) +olcAttributeTypes: {4}( 1.3.6.1.1.1.1.6 NAME 'shadowMin' EQUALITY integerMat + ch SYNTAX 1.3.6.1.4.1.1466.115.121.1.27 SINGLE-VALUE ) +olcAttributeTypes: {5}( 1.3.6.1.1.1.1.7 NAME 'shadowMax' EQUALITY integerMat + ch SYNTAX 1.3.6.1.4.1.1466.115.121.1.27 SINGLE-VALUE ) +olcAttributeTypes: {6}( 1.3.6.1.1.1.1.8 NAME 'shadowWarning' EQUALITY intege + rMatch SYNTAX 1.3.6.1.4.1.1466.115.121.1.27 SINGLE-VALUE ) +olcAttributeTypes: {7}( 1.3.6.1.1.1.1.9 NAME 'shadowInactive' EQUALITY integ + erMatch SYNTAX 1.3.6.1.4.1.1466.115.121.1.27 SINGLE-VALUE ) +olcAttributeTypes: {8}( 1.3.6.1.1.1.1.10 NAME 'shadowExpire' EQUALITY intege + rMatch SYNTAX 1.3.6.1.4.1.1466.115.121.1.27 SINGLE-VALUE ) +olcAttributeTypes: {9}( 1.3.6.1.1.1.1.11 NAME 'shadowFlag' EQUALITY integerM + atch SYNTAX 1.3.6.1.4.1.1466.115.121.1.27 SINGLE-VALUE ) +olcAttributeTypes: {10}( 1.3.6.1.1.1.1.12 NAME 'memberUid' EQUALITY caseExac + tIA5Match SUBSTR caseExactIA5SubstringsMatch SYNTAX 1.3.6.1.4.1.1466.115.12 + 1.1.26 ) +olcAttributeTypes: {11}( 1.3.6.1.1.1.1.13 NAME 'memberNisNetgroup' EQUALITY + caseExactIA5Match SUBSTR caseExactIA5SubstringsMatch SYNTAX 1.3.6.1.4.1.146 + 6.115.121.1.26 ) +olcAttributeTypes: {12}( 1.3.6.1.1.1.1.14 NAME 'nisNetgroupTriple' DESC 'Net + group triple' SYNTAX 1.3.6.1.1.1.0.0 ) +olcAttributeTypes: {13}( 1.3.6.1.1.1.1.15 NAME 'ipServicePort' EQUALITY inte + gerMatch SYNTAX 1.3.6.1.4.1.1466.115.121.1.27 SINGLE-VALUE ) +olcAttributeTypes: {14}( 1.3.6.1.1.1.1.16 NAME 'ipServiceProtocol' SUP name + ) +olcAttributeTypes: {15}( 1.3.6.1.1.1.1.17 NAME 'ipProtocolNumber' EQUALITY i + ntegerMatch SYNTAX 1.3.6.1.4.1.1466.115.121.1.27 SINGLE-VALUE ) +olcAttributeTypes: {16}( 1.3.6.1.1.1.1.18 NAME 'oncRpcNumber' EQUALITY integ + erMatch SYNTAX 1.3.6.1.4.1.1466.115.121.1.27 SINGLE-VALUE ) +olcAttributeTypes: {17}( 1.3.6.1.1.1.1.19 NAME 'ipHostNumber' DESC 'IP addre + ss' EQUALITY caseIgnoreIA5Match SYNTAX 1.3.6.1.4.1.1466.115.121.1.26{128} ) +olcAttributeTypes: {18}( 1.3.6.1.1.1.1.20 NAME 'ipNetworkNumber' DESC 'IP ne + twork' EQUALITY caseIgnoreIA5Match SYNTAX 1.3.6.1.4.1.1466.115.121.1.26{128 + } SINGLE-VALUE ) +olcAttributeTypes: {19}( 1.3.6.1.1.1.1.21 NAME 'ipNetmaskNumber' DESC 'IP ne + tmask' EQUALITY caseIgnoreIA5Match SYNTAX 1.3.6.1.4.1.1466.115.121.1.26{128 + } SINGLE-VALUE ) +olcAttributeTypes: {20}( 1.3.6.1.1.1.1.22 NAME 'macAddress' DESC 'MAC addres + s' EQUALITY caseIgnoreIA5Match SYNTAX 1.3.6.1.4.1.1466.115.121.1.26{128} ) +olcAttributeTypes: {21}( 1.3.6.1.1.1.1.23 NAME 'bootParameter' DESC 'rpc.boo + tparamd parameter' SYNTAX 1.3.6.1.1.1.0.1 ) +olcAttributeTypes: {22}( 1.3.6.1.1.1.1.24 NAME 'bootFile' DESC 'Boot image n + ame' EQUALITY caseExactIA5Match SYNTAX 1.3.6.1.4.1.1466.115.121.1.26 ) +olcAttributeTypes: {23}( 1.3.6.1.1.1.1.26 NAME 'nisMapName' SUP name ) +olcAttributeTypes: {24}( 1.3.6.1.1.1.1.27 NAME 'nisMapEntry' EQUALITY caseEx + actIA5Match SUBSTR caseExactIA5SubstringsMatch SYNTAX 1.3.6.1.4.1.1466.115. + 121.1.26{1024} SINGLE-VALUE ) +olcObjectClasses: {0}( 1.3.6.1.1.1.2.0 NAME 'posixAccount' DESC 'Abstraction + of an account with POSIX attributes' SUP top AUXILIARY MUST ( cn $ uid $ u + idNumber $ gidNumber $ homeDirectory ) MAY ( userPassword $ loginShell $ ge + cos $ description ) ) +olcObjectClasses: {1}( 1.3.6.1.1.1.2.1 NAME 'shadowAccount' DESC 'Additional + attributes for shadow passwords' SUP top AUXILIARY MUST uid MAY ( userPass + word $ shadowLastChange $ shadowMin $ shadowMax $ shadowWarning $ shadowIna + ctive $ shadowExpire $ shadowFlag $ description ) ) +olcObjectClasses: {2}( 1.3.6.1.1.1.2.2 NAME 'posixGroup' DESC 'Abstraction o + f a group of accounts' SUP top STRUCTURAL MUST ( cn $ gidNumber ) MAY ( use + rPassword $ memberUid $ description ) ) +olcObjectClasses: {3}( 1.3.6.1.1.1.2.3 NAME 'ipService' DESC 'Abstraction an + Internet Protocol service' SUP top STRUCTURAL MUST ( cn $ ipServicePort $ + ipServiceProtocol ) MAY description ) +olcObjectClasses: {4}( 1.3.6.1.1.1.2.4 NAME 'ipProtocol' DESC 'Abstraction o + f an IP protocol' SUP top STRUCTURAL MUST ( cn $ ipProtocolNumber $ descrip + tion ) MAY description ) +olcObjectClasses: {5}( 1.3.6.1.1.1.2.5 NAME 'oncRpc' DESC 'Abstraction of an + ONC/RPC binding' SUP top STRUCTURAL MUST ( cn $ oncRpcNumber $ description + ) MAY description ) +olcObjectClasses: {6}( 1.3.6.1.1.1.2.6 NAME 'ipHost' DESC 'Abstraction of a + host, an IP device' SUP top AUXILIARY MUST ( cn $ ipHostNumber ) MAY ( l $ + description $ manager ) ) +olcObjectClasses: {7}( 1.3.6.1.1.1.2.7 NAME 'ipNetwork' DESC 'Abstraction of + an IP network' SUP top STRUCTURAL MUST ( cn $ ipNetworkNumber ) MAY ( ipNe + tmaskNumber $ l $ description $ manager ) ) +olcObjectClasses: {8}( 1.3.6.1.1.1.2.8 NAME 'nisNetgroup' DESC 'Abstraction + of a netgroup' SUP top STRUCTURAL MUST cn MAY ( nisNetgroupTriple $ memberN + isNetgroup $ description ) ) +olcObjectClasses: {9}( 1.3.6.1.1.1.2.9 NAME 'nisMap' DESC 'A generic abstrac + tion of a NIS map' SUP top STRUCTURAL MUST nisMapName MAY description ) +olcObjectClasses: {10}( 1.3.6.1.1.1.2.10 NAME 'nisObject' DESC 'An entry in + a NIS map' SUP top STRUCTURAL MUST ( cn $ nisMapEntry $ nisMapName ) MAY de + scription ) +olcObjectClasses: {11}( 1.3.6.1.1.1.2.11 NAME 'ieee802Device' DESC 'A device + with a MAC address' SUP top AUXILIARY MAY macAddress ) +olcObjectClasses: {12}( 1.3.6.1.1.1.2.12 NAME 'bootableDevice' DESC 'A devic + e with boot parameters' SUP top AUXILIARY MAY ( bootFile $ bootParameter ) + ) +structuralObjectClass: olcSchemaConfig +entryUUID: 304d2f38-7b77-103c-9a03-2368e9146c75 +creatorsName: cn=admin,cn=config +createTimestamp: 20220608130351Z +entryCSN: 20220608130351.932874Z#000000#000#000000 +modifiersName: cn=admin,cn=config +modifyTimestamp: 20220608130351Z diff --git a/dirsrvtests/tests/data/openldap_2_389/5323/slapd.d/cn=config/cn=schema/cn={3}inetorgperson.ldif b/dirsrvtests/tests/data/openldap_2_389/5323/slapd.d/cn=config/cn=schema/cn={3}inetorgperson.ldif new file mode 100755 index 0000000..51500c9 --- /dev/null +++ b/dirsrvtests/tests/data/openldap_2_389/5323/slapd.d/cn=config/cn=schema/cn={3}inetorgperson.ldif @@ -0,0 +1,49 @@ +# AUTO-GENERATED FILE - DO NOT EDIT!! Use ldapmodify. +# CRC32 6762f492 +dn: cn={3}inetorgperson +objectClass: olcSchemaConfig +cn: {3}inetorgperson +olcAttributeTypes: {0}( 2.16.840.1.113730.3.1.1 NAME 'carLicense' DESC 'RFC2 + 798: vehicle license or registration plate' EQUALITY caseIgnoreMatch SUBSTR + caseIgnoreSubstringsMatch SYNTAX 1.3.6.1.4.1.1466.115.121.1.15 ) +olcAttributeTypes: {1}( 2.16.840.1.113730.3.1.2 NAME 'departmentNumber' DESC + 'RFC2798: identifies a department within an organization' EQUALITY caseIgn + oreMatch SUBSTR caseIgnoreSubstringsMatch SYNTAX 1.3.6.1.4.1.1466.115.121.1 + .15 ) +olcAttributeTypes: {2}( 2.16.840.1.113730.3.1.241 NAME 'displayName' DESC 'R + FC2798: preferred name to be used when displaying entries' EQUALITY caseIgn + oreMatch SUBSTR caseIgnoreSubstringsMatch SYNTAX 1.3.6.1.4.1.1466.115.121.1 + .15 SINGLE-VALUE ) +olcAttributeTypes: {3}( 2.16.840.1.113730.3.1.3 NAME 'employeeNumber' DESC ' + RFC2798: numerically identifies an employee within an organization' EQUALIT + Y caseIgnoreMatch SUBSTR caseIgnoreSubstringsMatch SYNTAX 1.3.6.1.4.1.1466. + 115.121.1.15 SINGLE-VALUE ) +olcAttributeTypes: {4}( 2.16.840.1.113730.3.1.4 NAME 'employeeType' DESC 'RF + C2798: type of employment for a person' EQUALITY caseIgnoreMatch SUBSTR cas + eIgnoreSubstringsMatch SYNTAX 1.3.6.1.4.1.1466.115.121.1.15 ) +olcAttributeTypes: {5}( 0.9.2342.19200300.100.1.60 NAME 'jpegPhoto' DESC 'RF + C2798: a JPEG image' SYNTAX 1.3.6.1.4.1.1466.115.121.1.28 ) +olcAttributeTypes: {6}( 2.16.840.1.113730.3.1.39 NAME 'preferredLanguage' DE + SC 'RFC2798: preferred written or spoken language for a person' EQUALITY ca + seIgnoreMatch SUBSTR caseIgnoreSubstringsMatch SYNTAX 1.3.6.1.4.1.1466.115. + 121.1.15 SINGLE-VALUE ) +olcAttributeTypes: {7}( 2.16.840.1.113730.3.1.40 NAME 'userSMIMECertificate' + DESC 'RFC2798: PKCS#7 SignedData used to support S/MIME' SYNTAX 1.3.6.1.4. + 1.1466.115.121.1.5 ) +olcAttributeTypes: {8}( 2.16.840.1.113730.3.1.216 NAME 'userPKCS12' DESC 'RF + C2798: personal identity information, a PKCS #12 PFX' SYNTAX 1.3.6.1.4.1.14 + 66.115.121.1.5 ) +olcObjectClasses: {0}( 2.16.840.1.113730.3.2.2 NAME 'inetOrgPerson' DESC 'RF + C2798: Internet Organizational Person' SUP organizationalPerson STRUCTURAL + MAY ( audio $ businessCategory $ carLicense $ departmentNumber $ displayNam + e $ employeeNumber $ employeeType $ givenName $ homePhone $ homePostalAddre + ss $ initials $ jpegPhoto $ labeledURI $ mail $ manager $ mobile $ o $ page + r $ photo $ roomNumber $ secretary $ uid $ userCertificate $ x500uniqueIden + tifier $ preferredLanguage $ userSMIMECertificate $ userPKCS12 ) ) +structuralObjectClass: olcSchemaConfig +entryUUID: 304d3a82-7b77-103c-9a04-2368e9146c75 +creatorsName: cn=admin,cn=config +createTimestamp: 20220608130351Z +entryCSN: 20220608130351.933163Z#000000#000#000000 +modifiersName: cn=admin,cn=config +modifyTimestamp: 20220608130351Z diff --git a/dirsrvtests/tests/data/openldap_2_389/5323/slapd.d/cn=config/olcDatabase={-1}frontend.ldif b/dirsrvtests/tests/data/openldap_2_389/5323/slapd.d/cn=config/olcDatabase={-1}frontend.ldif new file mode 100755 index 0000000..1a94385 --- /dev/null +++ b/dirsrvtests/tests/data/openldap_2_389/5323/slapd.d/cn=config/olcDatabase={-1}frontend.ldif @@ -0,0 +1,18 @@ +# AUTO-GENERATED FILE - DO NOT EDIT!! Use ldapmodify. +# CRC32 5c9e3034 +dn: olcDatabase={-1}frontend +objectClass: olcDatabaseConfig +objectClass: olcFrontendConfig +olcDatabase: {-1}frontend +olcAccess: {0}to * by dn.exact=gidNumber=0+uidNumber=0,cn=peercred,cn=extern + al,cn=auth manage by * break +olcAccess: {1}to dn.exact="" by * read +olcAccess: {2}to dn.base="cn=Subschema" by * read +olcSizeLimit: 500 +structuralObjectClass: olcDatabaseConfig +entryUUID: 304cf158-7b77-103c-99fe-2368e9146c75 +creatorsName: cn=config +createTimestamp: 20220608130351Z +entryCSN: 20220608130351.931290Z#000000#000#000000 +modifiersName: cn=config +modifyTimestamp: 20220608130351Z diff --git a/dirsrvtests/tests/data/openldap_2_389/5323/slapd.d/cn=config/olcDatabase={0}config.ldif b/dirsrvtests/tests/data/openldap_2_389/5323/slapd.d/cn=config/olcDatabase={0}config.ldif new file mode 100755 index 0000000..1899dae --- /dev/null +++ b/dirsrvtests/tests/data/openldap_2_389/5323/slapd.d/cn=config/olcDatabase={0}config.ldif @@ -0,0 +1,15 @@ +# AUTO-GENERATED FILE - DO NOT EDIT!! Use ldapmodify. +# CRC32 ca6beecd +dn: olcDatabase={0}config +objectClass: olcDatabaseConfig +olcDatabase: {0}config +olcAccess: {0}to * by dn.exact=gidNumber=0+uidNumber=0,cn=peercred,cn=extern + al,cn=auth manage by * break +olcRootDN: cn=admin,cn=config +structuralObjectClass: olcDatabaseConfig +entryUUID: 304cf810-7b77-103c-99ff-2368e9146c75 +creatorsName: cn=config +createTimestamp: 20220608130351Z +entryCSN: 20220608130351.931462Z#000000#000#000000 +modifiersName: cn=config +modifyTimestamp: 20220608130351Z diff --git a/dirsrvtests/tests/data/openldap_2_389/5323/slapd.d/cn=config/olcDatabase={1}mdb.ldif b/dirsrvtests/tests/data/openldap_2_389/5323/slapd.d/cn=config/olcDatabase={1}mdb.ldif new file mode 100755 index 0000000..31eff2e --- /dev/null +++ b/dirsrvtests/tests/data/openldap_2_389/5323/slapd.d/cn=config/olcDatabase={1}mdb.ldif @@ -0,0 +1,28 @@ +# AUTO-GENERATED FILE - DO NOT EDIT!! Use ldapmodify. +# CRC32 8b164b58 +dn: olcDatabase={1}mdb +objectClass: olcDatabaseConfig +objectClass: olcMdbConfig +olcDatabase: {1}mdb +olcDbDirectory: /var/lib/ldap +olcSuffix: dc=example,dc=com +olcAccess: {0}to attrs=userPassword by self write by anonymous auth by * non + e +olcAccess: {1}to attrs=shadowLastChange by self write by * read +olcAccess: {2}to * by * read +olcLastMod: TRUE +olcRootDN: cn=admin,dc=example,dc=com +olcRootPW:: e1NTSEF9eFNxNk9BNjdZcWJocGJqaGduODBVU0szcGtDayt3UXM= +olcDbCheckpoint: 512 30 +olcDbIndex: objectClass eq +olcDbIndex: cn,uid eq +olcDbIndex: uidNumber,gidNumber eq +olcDbIndex: member,memberUid eq +olcDbMaxSize: 1073741824 +structuralObjectClass: olcMdbConfig +entryUUID: 304d4c66-7b77-103c-9a06-2368e9146c75 +creatorsName: cn=admin,cn=config +createTimestamp: 20220608130351Z +entryCSN: 20220608130351.933621Z#000000#000#000000 +modifiersName: cn=admin,cn=config +modifyTimestamp: 20220608130351Z diff --git a/dirsrvtests/tests/data/openldap_2_389/5323/slapd.d/cn=config/olcDatabase={2}monitor.ldif b/dirsrvtests/tests/data/openldap_2_389/5323/slapd.d/cn=config/olcDatabase={2}monitor.ldif new file mode 100755 index 0000000..592dd07 --- /dev/null +++ b/dirsrvtests/tests/data/openldap_2_389/5323/slapd.d/cn=config/olcDatabase={2}monitor.ldif @@ -0,0 +1,15 @@ +# AUTO-GENERATED FILE - DO NOT EDIT!! Use ldapmodify. +# CRC32 6e42bd61 +dn: olcDatabase={2}monitor +objectClass: olcDatabaseConfig +objectClass: olcMonitorConfig +olcDatabase: {2}Monitor +olcAccess: {0}to dn.subtree="cn=Monitor" by dn.base="cn=monitor,dc=example,d + c=com" read by * none +structuralObjectClass: olcMonitorConfig +entryUUID: 6e2ad2d2-7b78-103c-8d72-9b11d7595a60 +creatorsName: gidNumber=0+uidNumber=0,cn=peercred,cn=external,cn=auth +createTimestamp: 20220608131245Z +entryCSN: 20220608131245.223147Z#000000#000#000000 +modifiersName: gidNumber=0+uidNumber=0,cn=peercred,cn=external,cn=auth +modifyTimestamp: 20220608131245Z diff --git a/dirsrvtests/tests/data/openldap_2_389/memberof/openldap_to_389ds-db.ldif b/dirsrvtests/tests/data/openldap_2_389/memberof/openldap_to_389ds-db.ldif new file mode 100644 index 0000000..e9d670d --- /dev/null +++ b/dirsrvtests/tests/data/openldap_2_389/memberof/openldap_to_389ds-db.ldif @@ -0,0 +1,73 @@ +# base +dn: dc=ldapdom,dc=net +dc: ldapdom +objectClass: top +objectClass: domain + +dn: ou=UnixUser,dc=ldapdom,dc=net +ou: People +objectClass: top +objectClass: organizationalUnit + +dn: ou=UnixGroup,dc=ldapdom,dc=net +ou: Group +objectClass: top +objectClass: organizationalUnit + +# users +dn: uid=testuser1,ou=UnixUser,dc=ldapdom,dc=net +objectClass: account +objectClass: posixAccount +objectClass: top +objectClass: shadowAccount +uid: testuser1 +cn: testuser1 +userPassword: {crypt}$6$7syqq.EQ$68iOWF0BTWC24aKE0rJ8cUtPd2Cs7HkruwjEikcJAD5dNNEgMMJ5Jk7w2sC2hYUwN2s65srTQTU83ADt2.t4l0 +loginShell: /bin/bash +uidNumber: 9000 +gidNumber: 8000 +homeDirectory: /tmp + +dn: uid=testuser2,ou=UnixUser,dc=ldapdom,dc=net +objectClass: account +objectClass: posixAccount +objectClass: top +objectClass: shadowAccount +uid: testuser2 +cn: testuser2 +userPassword: {crypt}$6$7syqq.EQ$68iOWF0BTWC24aKE0rJ8cUtPd2Cs7HkruwjEikcJAD5dNNEgMMJ5Jk7w2sC2hYUwN2s65srTQTU83ADt2.t4l0 +loginShell: /bin/bash +uidNumber: 9001 +gidNumber: 8000 +homeDirectory: /tmp + +# groups +dn: cn=group1,ou=UnixGroup,dc=ldapdom,dc=net +objectClass: groupOfNames +objectClass: posixGroup +objectClass: top +cn: group1 +gidNumber: 8000 +member: uid=testuser1,ou=UnixUser,dc=ldapdom,dc=net +memberUid: 9000 +member: uid=testuser2,ou=UnixUser,dc=ldapdom,dc=net +memberUid: 9001 + +dn: cn=group2,ou=UnixGroup,dc=ldapdom,dc=net +objectClass: groupOfNames +objectClass: posixGroup +objectClass: top +cn: group2 +gidNumber: 8001 +member: uid=testuser1,ou=UnixUser,dc=ldapdom,dc=net +memberUid: 9000 + +dn: cn=group3,ou=UnixGroup,dc=ldapdom,dc=net +objectClass: groupOfNames +objectClass: posixGroup +objectClass: top +cn: group3 +gidNumber: 8002 +member: uid=testuser2,ou=UnixUser,dc=ldapdom,dc=net +memberUid: 9001 + diff --git a/dirsrvtests/tests/data/openldap_2_389/memberof/openldap_to_389ds-slapd.conf b/dirsrvtests/tests/data/openldap_2_389/memberof/openldap_to_389ds-slapd.conf new file mode 100644 index 0000000..f5b0fb1 --- /dev/null +++ b/dirsrvtests/tests/data/openldap_2_389/memberof/openldap_to_389ds-slapd.conf @@ -0,0 +1,42 @@ +include /etc/openldap/schema/core.schema +include /etc/openldap/schema/cosine.schema +include /etc/openldap/schema/inetorgperson.schema +include /etc/openldap/schema/rfc2307bis.schema +include /etc/openldap/schema/yast.schema + +access to dn.base="" + by * read + +access to dn.base="cn=Subschema" + by * read + +access to attrs=userPassword,userPKCS12 + by self write + by * auth + +access to attrs=shadowLastChange + by self write + by * read + +access to * + by * read + +moduleload back_mdb.la +moduleload memberof.la +moduleload refint.la +moduleload unique.la + +database mdb +suffix "dc=ldapdom,dc=net" +checkpoint 1024 5 +rootdn "cn=root,dc=ldapdom,dc=net" +rootpw pass +directory /tmp/ldap-sssdtest +index objectClass eq + +overlay memberof +overlay unique +unique_uri ldap:///?mail?sub? +overlay refint +refint_attributes member +refint_nothing "cn=admin,dc=example,dc=com" diff --git a/dirsrvtests/tests/data/openldap_2_389/memberof/slapd.d/cn=config.ldif b/dirsrvtests/tests/data/openldap_2_389/memberof/slapd.d/cn=config.ldif new file mode 100755 index 0000000..cf47b14 --- /dev/null +++ b/dirsrvtests/tests/data/openldap_2_389/memberof/slapd.d/cn=config.ldif @@ -0,0 +1,43 @@ +# AUTO-GENERATED FILE - DO NOT EDIT!! Use ldapmodify. +# CRC32 fc127e60 +dn: cn=config +objectClass: olcGlobal +cn: config +olcConfigFile: ./openldap_to_389ds-slapd.conf +olcConfigDir: slapd.d +olcAttributeOptions: lang- +olcAuthzPolicy: none +olcConcurrency: 0 +olcConnMaxPending: 100 +olcConnMaxPendingAuth: 1000 +olcGentleHUP: FALSE +olcIdleTimeout: 0 +olcIndexSubstrIfMaxLen: 4 +olcIndexSubstrIfMinLen: 2 +olcIndexSubstrAnyLen: 4 +olcIndexSubstrAnyStep: 2 +olcIndexHash64: FALSE +olcIndexIntLen: 4 +olcListenerThreads: 1 +olcLocalSSF: 71 +olcLogLevel: 0 +olcMaxFilterDepth: 1000 +olcReadOnly: FALSE +olcSaslAuxpropsDontUseCopyIgnore: FALSE +olcSaslSecProps: noplain,noanonymous +olcSockbufMaxIncoming: 262143 +olcSockbufMaxIncomingAuth: 16777215 +olcThreads: 16 +olcThreadQueues: 1 +olcTLSCRLCheck: none +olcTLSVerifyClient: never +olcTLSProtocolMin: 0.0 +olcToolThreads: 1 +olcWriteTimeout: 0 +structuralObjectClass: olcGlobal +entryUUID: bae01acc-4e5c-103c-8e3b-83bde8bbc3e8 +creatorsName: cn=config +createTimestamp: 20220412033105Z +entryCSN: 20220412033105.684579Z#000000#000#000000 +modifiersName: cn=config +modifyTimestamp: 20220412033105Z diff --git a/dirsrvtests/tests/data/openldap_2_389/memberof/slapd.d/cn=config/cn=module{0}.ldif b/dirsrvtests/tests/data/openldap_2_389/memberof/slapd.d/cn=config/cn=module{0}.ldif new file mode 100755 index 0000000..f25ded4 --- /dev/null +++ b/dirsrvtests/tests/data/openldap_2_389/memberof/slapd.d/cn=config/cn=module{0}.ldif @@ -0,0 +1,16 @@ +# AUTO-GENERATED FILE - DO NOT EDIT!! Use ldapmodify. +# CRC32 ae81751d +dn: cn=module{0} +objectClass: olcModuleList +cn: module{0} +olcModuleLoad: {0}back_mdb.la +olcModuleLoad: {1}memberof.la +olcModuleLoad: {2}refint.la +olcModuleLoad: {3}unique.la +structuralObjectClass: olcModuleList +entryUUID: bae020f8-4e5c-103c-8e3c-83bde8bbc3e8 +creatorsName: cn=config +createTimestamp: 20220412033105Z +entryCSN: 20220412033105.684579Z#000000#000#000000 +modifiersName: cn=config +modifyTimestamp: 20220412033105Z diff --git a/dirsrvtests/tests/data/openldap_2_389/memberof/slapd.d/cn=config/cn=schema.ldif b/dirsrvtests/tests/data/openldap_2_389/memberof/slapd.d/cn=config/cn=schema.ldif new file mode 100755 index 0000000..63bdfa3 --- /dev/null +++ b/dirsrvtests/tests/data/openldap_2_389/memberof/slapd.d/cn=config/cn=schema.ldif @@ -0,0 +1,893 @@ +# AUTO-GENERATED FILE - DO NOT EDIT!! Use ldapmodify. +# CRC32 af042693 +dn: cn=schema +objectClass: olcSchemaConfig +cn: schema +olcObjectIdentifier: OLcfg 1.3.6.1.4.1.4203.1.12.2 +olcObjectIdentifier: OLcfgAt OLcfg:3 +olcObjectIdentifier: OLcfgGlAt OLcfgAt:0 +olcObjectIdentifier: OLcfgBkAt OLcfgAt:1 +olcObjectIdentifier: OLcfgDbAt OLcfgAt:2 +olcObjectIdentifier: OLcfgOvAt OLcfgAt:3 +olcObjectIdentifier: OLcfgCtAt OLcfgAt:4 +olcObjectIdentifier: OLcfgOc OLcfg:4 +olcObjectIdentifier: OLcfgGlOc OLcfgOc:0 +olcObjectIdentifier: OLcfgBkOc OLcfgOc:1 +olcObjectIdentifier: OLcfgDbOc OLcfgOc:2 +olcObjectIdentifier: OLcfgOvOc OLcfgOc:3 +olcObjectIdentifier: OLcfgCtOc OLcfgOc:4 +olcObjectIdentifier: OMsyn 1.3.6.1.4.1.1466.115.121.1 +olcObjectIdentifier: OMsBoolean OMsyn:7 +olcObjectIdentifier: OMsDN OMsyn:12 +olcObjectIdentifier: OMsDirectoryString OMsyn:15 +olcObjectIdentifier: OMsIA5String OMsyn:26 +olcObjectIdentifier: OMsInteger OMsyn:27 +olcObjectIdentifier: OMsOID OMsyn:38 +olcObjectIdentifier: OMsOctetString OMsyn:40 +olcObjectIdentifier: olmAttributes 1.3.6.1.4.1.4203.666.1.55 +olcObjectIdentifier: olmSubSystemAttributes olmAttributes:0 +olcObjectIdentifier: olmGenericAttributes olmSubSystemAttributes:0 +olcObjectIdentifier: olmDatabaseAttributes olmSubSystemAttributes:1 +olcObjectIdentifier: olmOverlayAttributes olmSubSystemAttributes:2 +olcObjectIdentifier: olmModuleAttributes olmSubSystemAttributes:3 +olcObjectIdentifier: olmObjectClasses 1.3.6.1.4.1.4203.666.3.16 +olcObjectIdentifier: olmSubSystemObjectClasses olmObjectClasses:0 +olcObjectIdentifier: olmGenericObjectClasses olmSubSystemObjectClasses:0 +olcObjectIdentifier: olmDatabaseObjectClasses olmSubSystemObjectClasses:1 +olcObjectIdentifier: olmOverlayObjectClasses olmSubSystemObjectClasses:2 +olcObjectIdentifier: olmModuleObjectClasses olmSubSystemObjectClasses:3 +olcObjectIdentifier: olmSyncReplAttributes olmOverlayAttributes:1 +olcObjectIdentifier: olmSyncReplObjectClasses olmOverlayObjectClasses:1 +olcObjectIdentifier: olmMDBAttributes olmDatabaseAttributes:1 +olcObjectIdentifier: olmMDBObjectClasses olmDatabaseObjectClasses:1 +olcLdapSyntaxes: ( 1.3.6.1.4.1.1466.115.121.1.1 DESC 'ACI Item' X-BINARY-TRANS + FER-REQUIRED 'TRUE' X-NOT-HUMAN-READABLE 'TRUE' ) +olcLdapSyntaxes: ( 1.3.6.1.4.1.1466.115.121.1.2 DESC 'Access Point' X-NOT-HUMA + N-READABLE 'TRUE' ) +olcLdapSyntaxes: ( 1.3.6.1.4.1.1466.115.121.1.3 DESC 'Attribute Type Descripti + on' ) +olcLdapSyntaxes: ( 1.3.6.1.4.1.1466.115.121.1.4 DESC 'Audio' X-NOT-HUMAN-READA + BLE 'TRUE' ) +olcLdapSyntaxes: ( 1.3.6.1.4.1.1466.115.121.1.5 DESC 'Binary' X-NOT-HUMAN-READ + ABLE 'TRUE' ) +olcLdapSyntaxes: ( 1.3.6.1.4.1.1466.115.121.1.6 DESC 'Bit String' ) +olcLdapSyntaxes: ( 1.3.6.1.4.1.1466.115.121.1.7 DESC 'Boolean' ) +olcLdapSyntaxes: ( 1.3.6.1.4.1.1466.115.121.1.8 DESC 'Certificate' X-BINARY-TR + ANSFER-REQUIRED 'TRUE' X-NOT-HUMAN-READABLE 'TRUE' ) +olcLdapSyntaxes: ( 1.3.6.1.4.1.1466.115.121.1.9 DESC 'Certificate List' X-BINA + RY-TRANSFER-REQUIRED 'TRUE' X-NOT-HUMAN-READABLE 'TRUE' ) +olcLdapSyntaxes: ( 1.3.6.1.4.1.1466.115.121.1.10 DESC 'Certificate Pair' X-BIN + ARY-TRANSFER-REQUIRED 'TRUE' X-NOT-HUMAN-READABLE 'TRUE' ) +olcLdapSyntaxes: ( 1.3.6.1.4.1.4203.666.11.10.2.1 DESC 'X.509 AttributeCertifi + cate' X-BINARY-TRANSFER-REQUIRED 'TRUE' X-NOT-HUMAN-READABLE 'TRUE' ) +olcLdapSyntaxes: ( 1.3.6.1.4.1.1466.115.121.1.12 DESC 'Distinguished Name' ) +olcLdapSyntaxes: ( 1.2.36.79672281.1.5.0 DESC 'RDN' ) +olcLdapSyntaxes: ( 1.3.6.1.4.1.1466.115.121.1.13 DESC 'Data Quality' ) +olcLdapSyntaxes: ( 1.3.6.1.4.1.1466.115.121.1.14 DESC 'Delivery Method' ) +olcLdapSyntaxes: ( 1.3.6.1.4.1.1466.115.121.1.15 DESC 'Directory String' ) +olcLdapSyntaxes: ( 1.3.6.1.4.1.1466.115.121.1.16 DESC 'DIT Content Rule Descri + ption' ) +olcLdapSyntaxes: ( 1.3.6.1.4.1.1466.115.121.1.17 DESC 'DIT Structure Rule Desc + ription' ) +olcLdapSyntaxes: ( 1.3.6.1.4.1.1466.115.121.1.19 DESC 'DSA Quality' ) +olcLdapSyntaxes: ( 1.3.6.1.4.1.1466.115.121.1.20 DESC 'DSE Type' ) +olcLdapSyntaxes: ( 1.3.6.1.4.1.1466.115.121.1.21 DESC 'Enhanced Guide' ) +olcLdapSyntaxes: ( 1.3.6.1.4.1.1466.115.121.1.22 DESC 'Facsimile Telephone Num + ber' ) +olcLdapSyntaxes: ( 1.3.6.1.4.1.1466.115.121.1.23 DESC 'Fax' X-NOT-HUMAN-READAB + LE 'TRUE' ) +olcLdapSyntaxes: ( 1.3.6.1.4.1.1466.115.121.1.24 DESC 'Generalized Time' ) +olcLdapSyntaxes: ( 1.3.6.1.4.1.1466.115.121.1.25 DESC 'Guide' ) +olcLdapSyntaxes: ( 1.3.6.1.4.1.1466.115.121.1.26 DESC 'IA5 String' ) +olcLdapSyntaxes: ( 1.3.6.1.4.1.1466.115.121.1.27 DESC 'Integer' ) +olcLdapSyntaxes: ( 1.3.6.1.4.1.1466.115.121.1.28 DESC 'JPEG' X-NOT-HUMAN-READA + BLE 'TRUE' ) +olcLdapSyntaxes: ( 1.3.6.1.4.1.1466.115.121.1.29 DESC 'Master And Shadow Acces + s Points' ) +olcLdapSyntaxes: ( 1.3.6.1.4.1.1466.115.121.1.30 DESC 'Matching Rule Descripti + on' ) +olcLdapSyntaxes: ( 1.3.6.1.4.1.1466.115.121.1.31 DESC 'Matching Rule Use Descr + iption' ) +olcLdapSyntaxes: ( 1.3.6.1.4.1.1466.115.121.1.32 DESC 'Mail Preference' ) +olcLdapSyntaxes: ( 1.3.6.1.4.1.1466.115.121.1.33 DESC 'MHS OR Address' ) +olcLdapSyntaxes: ( 1.3.6.1.4.1.1466.115.121.1.34 DESC 'Name And Optional UID' + ) +olcLdapSyntaxes: ( 1.3.6.1.4.1.1466.115.121.1.35 DESC 'Name Form Description' + ) +olcLdapSyntaxes: ( 1.3.6.1.4.1.1466.115.121.1.36 DESC 'Numeric String' ) +olcLdapSyntaxes: ( 1.3.6.1.4.1.1466.115.121.1.37 DESC 'Object Class Descriptio + n' ) +olcLdapSyntaxes: ( 1.3.6.1.4.1.1466.115.121.1.38 DESC 'OID' ) +olcLdapSyntaxes: ( 1.3.6.1.4.1.1466.115.121.1.39 DESC 'Other Mailbox' ) +olcLdapSyntaxes: ( 1.3.6.1.4.1.1466.115.121.1.40 DESC 'Octet String' ) +olcLdapSyntaxes: ( 1.3.6.1.4.1.1466.115.121.1.41 DESC 'Postal Address' ) +olcLdapSyntaxes: ( 1.3.6.1.4.1.1466.115.121.1.42 DESC 'Protocol Information' ) +olcLdapSyntaxes: ( 1.3.6.1.4.1.1466.115.121.1.43 DESC 'Presentation Address' ) +olcLdapSyntaxes: ( 1.3.6.1.4.1.1466.115.121.1.44 DESC 'Printable String' ) +olcLdapSyntaxes: ( 1.3.6.1.4.1.1466.115.121.1.11 DESC 'Country String' ) +olcLdapSyntaxes: ( 1.3.6.1.4.1.1466.115.121.1.45 DESC 'SubtreeSpecification' ) +olcLdapSyntaxes: ( 1.3.6.1.4.1.1466.115.121.1.49 DESC 'Supported Algorithm' X- + BINARY-TRANSFER-REQUIRED 'TRUE' X-NOT-HUMAN-READABLE 'TRUE' ) +olcLdapSyntaxes: ( 1.3.6.1.4.1.1466.115.121.1.50 DESC 'Telephone Number' ) +olcLdapSyntaxes: ( 1.3.6.1.4.1.1466.115.121.1.51 DESC 'Teletex Terminal Identi + fier' ) +olcLdapSyntaxes: ( 1.3.6.1.4.1.1466.115.121.1.52 DESC 'Telex Number' ) +olcLdapSyntaxes: ( 1.3.6.1.4.1.1466.115.121.1.54 DESC 'LDAP Syntax Description + ' ) +olcLdapSyntaxes: ( 1.3.6.1.4.1.1466.115.121.1.55 DESC 'Modify Rights' ) +olcLdapSyntaxes: ( 1.3.6.1.4.1.1466.115.121.1.56 DESC 'LDAP Schema Definition' + ) +olcLdapSyntaxes: ( 1.3.6.1.4.1.1466.115.121.1.57 DESC 'LDAP Schema Description + ' ) +olcLdapSyntaxes: ( 1.3.6.1.4.1.1466.115.121.1.58 DESC 'Substring Assertion' ) +olcLdapSyntaxes: ( 1.3.6.1.1.1.0.0 DESC 'RFC2307 NIS Netgroup Triple' ) +olcLdapSyntaxes: ( 1.3.6.1.1.1.0.1 DESC 'RFC2307 Boot Parameter' ) +olcLdapSyntaxes: ( 1.3.6.1.1.15.1 DESC 'Certificate Exact Assertion' ) +olcLdapSyntaxes: ( 1.3.6.1.1.15.2 DESC 'Certificate Assertion' ) +olcLdapSyntaxes: ( 1.3.6.1.1.15.3 DESC 'Certificate Pair Exact Assertion' ) +olcLdapSyntaxes: ( 1.3.6.1.1.15.4 DESC 'Certificate Pair Assertion' ) +olcLdapSyntaxes: ( 1.3.6.1.1.15.5 DESC 'Certificate List Exact Assertion' ) +olcLdapSyntaxes: ( 1.3.6.1.1.15.6 DESC 'Certificate List Assertion' ) +olcLdapSyntaxes: ( 1.3.6.1.1.15.7 DESC 'Algorithm Identifier' ) +olcLdapSyntaxes: ( 1.3.6.1.4.1.4203.666.11.10.2.2 DESC 'AttributeCertificate E + xact Assertion' ) +olcLdapSyntaxes: ( 1.3.6.1.4.1.4203.666.11.10.2.3 DESC 'AttributeCertificate A + ssertion' ) +olcLdapSyntaxes: ( 1.3.6.1.1.16.1 DESC 'UUID' ) +olcLdapSyntaxes: ( 1.3.6.1.4.1.4203.666.11.2.1 DESC 'CSN' ) +olcLdapSyntaxes: ( 1.3.6.1.4.1.4203.666.11.2.4 DESC 'CSN SID' ) +olcLdapSyntaxes: ( 1.3.6.1.4.1.4203.1.1.1 DESC 'OpenLDAP void' ) +olcLdapSyntaxes: ( 1.3.6.1.4.1.4203.666.2.7 DESC 'OpenLDAP authz' ) +olcLdapSyntaxes: ( 1.2.840.113549.1.8.1.1 DESC 'PKCS#8 PrivateKeyInfo' ) +olcLdapSyntaxes: ( 1.3.6.1.4.1.4203.666.2.1 DESC 'OpenLDAP Experimental ACI' ) +olcAttributeTypes: ( 2.5.4.0 NAME 'objectClass' DESC 'RFC4512: object classes + of the entity' EQUALITY objectIdentifierMatch SYNTAX 1.3.6.1.4.1.1466.115.121 + .1.38 ) +olcAttributeTypes: ( 2.5.21.9 NAME 'structuralObjectClass' DESC 'RFC4512: stru + ctural object class of entry' EQUALITY objectIdentifierMatch SYNTAX 1.3.6.1.4 + .1.1466.115.121.1.38 SINGLE-VALUE NO-USER-MODIFICATION USAGE directoryOperati + on ) +olcAttributeTypes: ( 2.5.18.1 NAME 'createTimestamp' DESC 'RFC4512: time which + object was created' EQUALITY generalizedTimeMatch ORDERING generalizedTimeOr + deringMatch SYNTAX 1.3.6.1.4.1.1466.115.121.1.24 SINGLE-VALUE NO-USER-MODIFIC + ATION USAGE directoryOperation ) +olcAttributeTypes: ( 2.5.18.2 NAME 'modifyTimestamp' DESC 'RFC4512: time which + object was last modified' EQUALITY generalizedTimeMatch ORDERING generalized + TimeOrderingMatch SYNTAX 1.3.6.1.4.1.1466.115.121.1.24 SINGLE-VALUE NO-USER-M + ODIFICATION USAGE directoryOperation ) +olcAttributeTypes: ( 2.5.18.3 NAME 'creatorsName' DESC 'RFC4512: name of creat + or' EQUALITY distinguishedNameMatch SYNTAX 1.3.6.1.4.1.1466.115.121.1.12 SING + LE-VALUE NO-USER-MODIFICATION USAGE directoryOperation ) +olcAttributeTypes: ( 2.5.18.4 NAME 'modifiersName' DESC 'RFC4512: name of last + modifier' EQUALITY distinguishedNameMatch SYNTAX 1.3.6.1.4.1.1466.115.121.1. + 12 SINGLE-VALUE NO-USER-MODIFICATION USAGE directoryOperation ) +olcAttributeTypes: ( 2.5.18.9 NAME 'hasSubordinates' DESC 'X.501: entry has ch + ildren' EQUALITY booleanMatch SYNTAX 1.3.6.1.4.1.1466.115.121.1.7 SINGLE-VALU + E NO-USER-MODIFICATION USAGE directoryOperation ) +olcAttributeTypes: ( 2.5.18.10 NAME 'subschemaSubentry' DESC 'RFC4512: name of + controlling subschema entry' EQUALITY distinguishedNameMatch SYNTAX 1.3.6.1. + 4.1.1466.115.121.1.12 SINGLE-VALUE NO-USER-MODIFICATION USAGE directoryOperat + ion ) +olcAttributeTypes: ( 2.5.18.12 NAME 'collectiveAttributeSubentries' DESC 'RFC3 + 671: collective attribute subentries' EQUALITY distinguishedNameMatch SYNTAX + 1.3.6.1.4.1.1466.115.121.1.12 NO-USER-MODIFICATION USAGE directoryOperation ) +olcAttributeTypes: ( 2.5.18.7 NAME 'collectiveExclusions' DESC 'RFC3671: colle + ctive attribute exclusions' EQUALITY objectIdentifierMatch SYNTAX 1.3.6.1.4.1 + .1466.115.121.1.38 USAGE directoryOperation ) +olcAttributeTypes: ( 1.3.6.1.1.20 NAME 'entryDN' DESC 'DN of the entry' EQUALI + TY distinguishedNameMatch SYNTAX 1.3.6.1.4.1.1466.115.121.1.12 SINGLE-VALUE N + O-USER-MODIFICATION USAGE directoryOperation ) +olcAttributeTypes: ( 1.3.6.1.1.16.4 NAME 'entryUUID' DESC 'UUID of the entry' + EQUALITY UUIDMatch ORDERING UUIDOrderingMatch SYNTAX 1.3.6.1.1.16.1 SINGLE-VA + LUE NO-USER-MODIFICATION USAGE directoryOperation ) +olcAttributeTypes: ( 1.3.6.1.4.1.4203.666.1.7 NAME 'entryCSN' DESC 'change seq + uence number of the entry content' EQUALITY CSNMatch ORDERING CSNOrderingMatc + h SYNTAX 1.3.6.1.4.1.4203.666.11.2.1{64} SINGLE-VALUE NO-USER-MODIFICATION US + AGE directoryOperation ) +olcAttributeTypes: ( 1.3.6.1.4.1.4203.666.1.13 NAME 'namingCSN' DESC 'change s + equence number of the entry naming (RDN)' EQUALITY CSNMatch ORDERING CSNOrder + ingMatch SYNTAX 1.3.6.1.4.1.4203.666.11.2.1{64} SINGLE-VALUE NO-USER-MODIFICA + TION USAGE directoryOperation ) +olcAttributeTypes: ( 1.3.6.1.4.1.4203.666.1.23 NAME 'syncreplCookie' DESC 'syn + crepl Cookie for shadow copy' EQUALITY octetStringMatch ORDERING octetStringO + rderingMatch SYNTAX 1.3.6.1.4.1.1466.115.121.1.40 SINGLE-VALUE NO-USER-MODIFI + CATION USAGE dSAOperation ) +olcAttributeTypes: ( 1.3.6.1.4.1.4203.666.1.25 NAME 'contextCSN' DESC 'the lar + gest committed CSN of a context' EQUALITY CSNMatch ORDERING CSNOrderingMatch + SYNTAX 1.3.6.1.4.1.4203.666.11.2.1{64} NO-USER-MODIFICATION USAGE dSAOperatio + n ) +olcAttributeTypes: ( 1.3.6.1.4.1.1466.101.120.6 NAME 'altServer' DESC 'RFC4512 + : alternative servers' SYNTAX 1.3.6.1.4.1.1466.115.121.1.26 USAGE dSAOperatio + n ) +olcAttributeTypes: ( 1.3.6.1.4.1.1466.101.120.5 NAME 'namingContexts' DESC 'RF + C4512: naming contexts' EQUALITY distinguishedNameMatch SYNTAX 1.3.6.1.4.1.14 + 66.115.121.1.12 USAGE dSAOperation ) +olcAttributeTypes: ( 1.3.6.1.4.1.1466.101.120.13 NAME 'supportedControl' DESC + 'RFC4512: supported controls' SYNTAX 1.3.6.1.4.1.1466.115.121.1.38 USAGE dSAO + peration ) +olcAttributeTypes: ( 1.3.6.1.4.1.1466.101.120.7 NAME 'supportedExtension' DESC + 'RFC4512: supported extended operations' SYNTAX 1.3.6.1.4.1.1466.115.121.1.3 + 8 USAGE dSAOperation ) +olcAttributeTypes: ( 1.3.6.1.4.1.1466.101.120.15 NAME 'supportedLDAPVersion' D + ESC 'RFC4512: supported LDAP versions' SYNTAX 1.3.6.1.4.1.1466.115.121.1.27 U + SAGE dSAOperation ) +olcAttributeTypes: ( 1.3.6.1.4.1.1466.101.120.14 NAME 'supportedSASLMechanisms + ' DESC 'RFC4512: supported SASL mechanisms' SYNTAX 1.3.6.1.4.1.1466.115.121.1 + .15 USAGE dSAOperation ) +olcAttributeTypes: ( 1.3.6.1.4.1.4203.1.3.5 NAME 'supportedFeatures' DESC 'RFC + 4512: features supported by the server' EQUALITY objectIdentifierMatch SYNTAX + 1.3.6.1.4.1.1466.115.121.1.38 USAGE dSAOperation ) +olcAttributeTypes: ( 1.3.6.1.4.1.4203.666.1.10 NAME 'monitorContext' DESC 'mon + itor context' EQUALITY distinguishedNameMatch SYNTAX 1.3.6.1.4.1.1466.115.121 + .1.12 SINGLE-VALUE NO-USER-MODIFICATION USAGE dSAOperation ) +olcAttributeTypes: ( 1.3.6.1.4.1.4203.1.12.2.1 NAME 'configContext' DESC 'conf + ig context' EQUALITY distinguishedNameMatch SYNTAX 1.3.6.1.4.1.1466.115.121.1 + .12 SINGLE-VALUE NO-USER-MODIFICATION USAGE dSAOperation ) +olcAttributeTypes: ( 1.3.6.1.1.4 NAME 'vendorName' DESC 'RFC3045: name of impl + ementation vendor' EQUALITY caseExactMatch SYNTAX 1.3.6.1.4.1.1466.115.121.1. + 15 SINGLE-VALUE NO-USER-MODIFICATION USAGE dSAOperation ) +olcAttributeTypes: ( 1.3.6.1.1.5 NAME 'vendorVersion' DESC 'RFC3045: version o + f implementation' EQUALITY caseExactMatch SYNTAX 1.3.6.1.4.1.1466.115.121.1.1 + 5 SINGLE-VALUE NO-USER-MODIFICATION USAGE dSAOperation ) +olcAttributeTypes: ( 2.5.18.5 NAME 'administrativeRole' DESC 'RFC3672: adminis + trative role' EQUALITY objectIdentifierMatch SYNTAX 1.3.6.1.4.1.1466.115.121. + 1.38 USAGE directoryOperation ) +olcAttributeTypes: ( 2.5.18.6 NAME 'subtreeSpecification' DESC 'RFC3672: subtr + ee specification' SYNTAX 1.3.6.1.4.1.1466.115.121.1.45 SINGLE-VALUE USAGE dir + ectoryOperation ) +olcAttributeTypes: ( 2.5.21.1 NAME 'dITStructureRules' DESC 'RFC4512: DIT stru + cture rules' EQUALITY integerFirstComponentMatch SYNTAX 1.3.6.1.4.1.1466.115. + 121.1.17 USAGE directoryOperation ) +olcAttributeTypes: ( 2.5.21.2 NAME 'dITContentRules' DESC 'RFC4512: DIT conten + t rules' EQUALITY objectIdentifierFirstComponentMatch SYNTAX 1.3.6.1.4.1.1466 + .115.121.1.16 USAGE directoryOperation ) +olcAttributeTypes: ( 2.5.21.4 NAME 'matchingRules' DESC 'RFC4512: matching rul + es' EQUALITY objectIdentifierFirstComponentMatch SYNTAX 1.3.6.1.4.1.1466.115. + 121.1.30 USAGE directoryOperation ) +olcAttributeTypes: ( 2.5.21.5 NAME 'attributeTypes' DESC 'RFC4512: attribute t + ypes' EQUALITY objectIdentifierFirstComponentMatch SYNTAX 1.3.6.1.4.1.1466.11 + 5.121.1.3 USAGE directoryOperation ) +olcAttributeTypes: ( 2.5.21.6 NAME 'objectClasses' DESC 'RFC4512: object class + es' EQUALITY objectIdentifierFirstComponentMatch SYNTAX 1.3.6.1.4.1.1466.115. + 121.1.37 USAGE directoryOperation ) +olcAttributeTypes: ( 2.5.21.7 NAME 'nameForms' DESC 'RFC4512: name forms ' EQU + ALITY objectIdentifierFirstComponentMatch SYNTAX 1.3.6.1.4.1.1466.115.121.1.3 + 5 USAGE directoryOperation ) +olcAttributeTypes: ( 2.5.21.8 NAME 'matchingRuleUse' DESC 'RFC4512: matching r + ule uses' EQUALITY objectIdentifierFirstComponentMatch SYNTAX 1.3.6.1.4.1.146 + 6.115.121.1.31 USAGE directoryOperation ) +olcAttributeTypes: ( 1.3.6.1.4.1.1466.101.120.16 NAME 'ldapSyntaxes' DESC 'RFC + 4512: LDAP syntaxes' EQUALITY objectIdentifierFirstComponentMatch SYNTAX 1.3. + 6.1.4.1.1466.115.121.1.54 USAGE directoryOperation ) +olcAttributeTypes: ( 2.5.4.1 NAME ( 'aliasedObjectName' 'aliasedEntryName' ) D + ESC 'RFC4512: name of aliased object' EQUALITY distinguishedNameMatch SYNTAX + 1.3.6.1.4.1.1466.115.121.1.12 SINGLE-VALUE ) +olcAttributeTypes: ( 2.16.840.1.113730.3.1.34 NAME 'ref' DESC 'RFC3296: subord + inate referral URL' EQUALITY caseExactMatch SYNTAX 1.3.6.1.4.1.1466.115.121.1 + .15 USAGE distributedOperation ) +olcAttributeTypes: ( 1.3.6.1.4.1.4203.1.3.1 NAME 'entry' DESC 'OpenLDAP ACL en + try pseudo-attribute' SYNTAX 1.3.6.1.4.1.4203.1.1.1 SINGLE-VALUE NO-USER-MODI + FICATION USAGE dSAOperation ) +olcAttributeTypes: ( 1.3.6.1.4.1.4203.1.3.2 NAME 'children' DESC 'OpenLDAP ACL + children pseudo-attribute' SYNTAX 1.3.6.1.4.1.4203.1.1.1 SINGLE-VALUE NO-USE + R-MODIFICATION USAGE dSAOperation ) +olcAttributeTypes: ( 1.3.6.1.4.1.4203.666.1.8 NAME ( 'authzTo' 'saslAuthzTo' ) + DESC 'proxy authorization targets' EQUALITY authzMatch SYNTAX 1.3.6.1.4.1.42 + 03.666.2.7 USAGE distributedOperation X-ORDERED 'VALUES' ) +olcAttributeTypes: ( 1.3.6.1.4.1.4203.666.1.9 NAME ( 'authzFrom' 'saslAuthzFro + m' ) DESC 'proxy authorization sources' EQUALITY authzMatch SYNTAX 1.3.6.1.4. + 1.4203.666.2.7 USAGE distributedOperation X-ORDERED 'VALUES' ) +olcAttributeTypes: ( 1.3.6.1.4.1.1466.101.119.3 NAME 'entryTtl' DESC 'RFC2589: + entry time-to-live' SYNTAX 1.3.6.1.4.1.1466.115.121.1.27 SINGLE-VALUE NO-USE + R-MODIFICATION USAGE dSAOperation ) +olcAttributeTypes: ( 1.3.6.1.4.1.1466.101.119.4 NAME 'dynamicSubtrees' DESC 'R + FC2589: dynamic subtrees' SYNTAX 1.3.6.1.4.1.1466.115.121.1.12 NO-USER-MODIFI + CATION USAGE dSAOperation ) +olcAttributeTypes: ( 2.5.4.49 NAME 'distinguishedName' DESC 'RFC4519: common s + upertype of DN attributes' EQUALITY distinguishedNameMatch SYNTAX 1.3.6.1.4.1 + .1466.115.121.1.12 ) +olcAttributeTypes: ( 2.5.4.41 NAME 'name' DESC 'RFC4519: common supertype of n + ame attributes' EQUALITY caseIgnoreMatch SUBSTR caseIgnoreSubstringsMatch SYN + TAX 1.3.6.1.4.1.1466.115.121.1.15{32768} ) +olcAttributeTypes: ( 2.5.4.3 NAME ( 'cn' 'commonName' ) DESC 'RFC4519: common + name(s) for which the entity is known by' SUP name ) +olcAttributeTypes: ( 0.9.2342.19200300.100.1.1 NAME ( 'uid' 'userid' ) DESC 'R + FC4519: user identifier' EQUALITY caseIgnoreMatch SUBSTR caseIgnoreSubstrings + Match SYNTAX 1.3.6.1.4.1.1466.115.121.1.15{256} ) +olcAttributeTypes: ( 1.3.6.1.1.1.1.0 NAME 'uidNumber' DESC 'RFC2307: An intege + r uniquely identifying a user in an administrative domain' EQUALITY integerMa + tch ORDERING integerOrderingMatch SYNTAX 1.3.6.1.4.1.1466.115.121.1.27 SINGLE + -VALUE ) +olcAttributeTypes: ( 1.3.6.1.1.1.1.1 NAME 'gidNumber' DESC 'RFC2307: An intege + r uniquely identifying a group in an administrative domain' EQUALITY integerM + atch ORDERING integerOrderingMatch SYNTAX 1.3.6.1.4.1.1466.115.121.1.27 SINGL + E-VALUE ) +olcAttributeTypes: ( 2.5.4.35 NAME 'userPassword' DESC 'RFC4519/2307: password + of user' EQUALITY octetStringMatch SYNTAX 1.3.6.1.4.1.1466.115.121.1.40{128} + ) +olcAttributeTypes: ( 1.3.6.1.4.1.250.1.57 NAME 'labeledURI' DESC 'RFC2079: Uni + form Resource Identifier with optional label' EQUALITY caseExactMatch SYNTAX + 1.3.6.1.4.1.1466.115.121.1.15 ) +olcAttributeTypes: ( 2.5.4.13 NAME 'description' DESC 'RFC4519: descriptive in + formation' EQUALITY caseIgnoreMatch SUBSTR caseIgnoreSubstringsMatch SYNTAX 1 + .3.6.1.4.1.1466.115.121.1.15{1024} ) +olcAttributeTypes: ( 2.5.4.34 NAME 'seeAlso' DESC 'RFC4519: DN of related obje + ct' SUP distinguishedName ) +olcAttributeTypes: ( 1.3.6.1.4.1.4203.666.1.60 NAME 'pKCS8PrivateKey' DESC 'PK + CS#8 PrivateKeyInfo, use ;binary' EQUALITY privateKeyMatch SYNTAX 1.2.840.113 + 549.1.8.1.1 ) +olcAttributeTypes: ( 1.3.6.1.4.1.42.2.27.8.1.29 NAME 'pwdLastSuccess' DESC 'Th + e timestamp of the last successful authentication' EQUALITY generalizedTimeMa + tch ORDERING generalizedTimeOrderingMatch SYNTAX 1.3.6.1.4.1.1466.115.121.1.2 + 4 SINGLE-VALUE NO-USER-MODIFICATION USAGE directoryOperation ) +olcAttributeTypes: ( OLcfgGlAt:78 NAME 'olcConfigFile' DESC 'File for slapd co + nfiguration directives' EQUALITY caseExactMatch SYNTAX OMsDirectoryString SIN + GLE-VALUE ) +olcAttributeTypes: ( OLcfgGlAt:79 NAME 'olcConfigDir' DESC 'Directory for slap + d configuration backend' EQUALITY caseExactMatch SYNTAX OMsDirectoryString SI + NGLE-VALUE ) +olcAttributeTypes: ( OLcfgGlAt:1 NAME 'olcAccess' DESC 'Access Control List' E + QUALITY caseIgnoreMatch SYNTAX OMsDirectoryString X-ORDERED 'VALUES' ) +olcAttributeTypes: ( OLcfgGlAt:86 NAME 'olcAddContentAcl' DESC 'Check ACLs aga + inst content of Add ops' EQUALITY booleanMatch SYNTAX OMsBoolean SINGLE-VALUE + ) +olcAttributeTypes: ( OLcfgGlAt:2 NAME 'olcAllows' DESC 'Allowed set of depreca + ted features' EQUALITY caseIgnoreMatch SYNTAX OMsDirectoryString ) +olcAttributeTypes: ( OLcfgGlAt:3 NAME 'olcArgsFile' DESC 'File for slapd comma + nd line options' EQUALITY caseExactMatch SYNTAX OMsDirectoryString SINGLE-VAL + UE ) +olcAttributeTypes: ( OLcfgGlAt:5 NAME 'olcAttributeOptions' EQUALITY caseIgnor + eMatch SYNTAX OMsDirectoryString ) +olcAttributeTypes: ( OLcfgGlAt:4 NAME 'olcAttributeTypes' DESC 'OpenLDAP attri + buteTypes' EQUALITY caseIgnoreMatch SUBSTR caseIgnoreSubstringsMatch SYNTAX O + MsDirectoryString X-ORDERED 'VALUES' ) +olcAttributeTypes: ( OLcfgGlAt:6 NAME 'olcAuthIDRewrite' EQUALITY caseIgnoreMa + tch SYNTAX OMsDirectoryString X-ORDERED 'VALUES' ) +olcAttributeTypes: ( OLcfgGlAt:7 NAME 'olcAuthzPolicy' EQUALITY caseIgnoreMatc + h SYNTAX OMsDirectoryString SINGLE-VALUE ) +olcAttributeTypes: ( OLcfgGlAt:8 NAME 'olcAuthzRegexp' EQUALITY caseIgnoreMatc + h SYNTAX OMsDirectoryString X-ORDERED 'VALUES' ) +olcAttributeTypes: ( OLcfgGlAt:9 NAME 'olcBackend' DESC 'A type of backend' EQ + UALITY caseIgnoreMatch SYNTAX OMsDirectoryString SINGLE-VALUE X-ORDERED 'SIBL + INGS' ) +olcAttributeTypes: ( OLcfgGlAt:10 NAME 'olcConcurrency' EQUALITY integerMatch + SYNTAX OMsInteger SINGLE-VALUE ) +olcAttributeTypes: ( OLcfgGlAt:11 NAME 'olcConnMaxPending' EQUALITY integerMat + ch SYNTAX OMsInteger SINGLE-VALUE ) +olcAttributeTypes: ( OLcfgGlAt:12 NAME 'olcConnMaxPendingAuth' EQUALITY intege + rMatch SYNTAX OMsInteger SINGLE-VALUE ) +olcAttributeTypes: ( OLcfgGlAt:13 NAME 'olcDatabase' DESC 'The backend type fo + r a database instance' SUP olcBackend SINGLE-VALUE X-ORDERED 'SIBLINGS' ) +olcAttributeTypes: ( OLcfgGlAt:14 NAME 'olcDefaultSearchBase' EQUALITY disting + uishedNameMatch SYNTAX OMsDN SINGLE-VALUE ) +olcAttributeTypes: ( OLcfgDbAt:0.21 NAME 'olcDisabled' EQUALITY booleanMatch S + YNTAX OMsBoolean SINGLE-VALUE ) +olcAttributeTypes: ( OLcfgGlAt:15 NAME 'olcDisallows' EQUALITY caseIgnoreMatch + SYNTAX OMsDirectoryString ) +olcAttributeTypes: ( OLcfgGlAt:16 NAME 'olcDitContentRules' DESC 'OpenLDAP DIT + content rules' EQUALITY caseIgnoreMatch SUBSTR caseIgnoreSubstringsMatch SYN + TAX OMsDirectoryString X-ORDERED 'VALUES' ) +olcAttributeTypes: ( OLcfgDbAt:0.20 NAME 'olcExtraAttrs' EQUALITY caseIgnoreMa + tch SYNTAX OMsDirectoryString ) +olcAttributeTypes: ( OLcfgGlAt:17 NAME 'olcGentleHUP' EQUALITY booleanMatch SY + NTAX OMsBoolean SINGLE-VALUE ) +olcAttributeTypes: ( OLcfgDbAt:0.17 NAME 'olcHidden' EQUALITY booleanMatch SYN + TAX OMsBoolean SINGLE-VALUE ) +olcAttributeTypes: ( OLcfgGlAt:18 NAME 'olcIdleTimeout' EQUALITY integerMatch + SYNTAX OMsInteger SINGLE-VALUE ) +olcAttributeTypes: ( OLcfgGlAt:19 NAME 'olcInclude' SUP labeledURI ) +olcAttributeTypes: ( OLcfgGlAt:94 NAME 'olcIndexHash64' EQUALITY booleanMatch + SYNTAX OMsBoolean SINGLE-VALUE ) +olcAttributeTypes: ( OLcfgGlAt:20 NAME 'olcIndexSubstrIfMinLen' EQUALITY integ + erMatch SYNTAX OMsInteger SINGLE-VALUE ) +olcAttributeTypes: ( OLcfgGlAt:21 NAME 'olcIndexSubstrIfMaxLen' EQUALITY integ + erMatch SYNTAX OMsInteger SINGLE-VALUE ) +olcAttributeTypes: ( OLcfgGlAt:22 NAME 'olcIndexSubstrAnyLen' EQUALITY integer + Match SYNTAX OMsInteger SINGLE-VALUE ) +olcAttributeTypes: ( OLcfgGlAt:23 NAME 'olcIndexSubstrAnyStep' EQUALITY intege + rMatch SYNTAX OMsInteger SINGLE-VALUE ) +olcAttributeTypes: ( OLcfgGlAt:84 NAME 'olcIndexIntLen' EQUALITY integerMatch + SYNTAX OMsInteger SINGLE-VALUE ) +olcAttributeTypes: ( OLcfgDbAt:0.4 NAME 'olcLastMod' EQUALITY booleanMatch SYN + TAX OMsBoolean SINGLE-VALUE ) +olcAttributeTypes: ( OLcfgDbAt:0.22 NAME 'olcLastBind' EQUALITY booleanMatch S + YNTAX OMsBoolean SINGLE-VALUE ) +olcAttributeTypes: ( OLcfgGlAt:85 NAME 'olcLdapSyntaxes' DESC 'OpenLDAP ldapSy + ntax' EQUALITY caseIgnoreMatch SUBSTR caseIgnoreSubstringsMatch SYNTAX OMsDir + ectoryString X-ORDERED 'VALUES' ) +olcAttributeTypes: ( OLcfgDbAt:0.5 NAME 'olcLimits' EQUALITY caseIgnoreMatch S + YNTAX OMsDirectoryString X-ORDERED 'VALUES' ) +olcAttributeTypes: ( OLcfgGlAt:93 NAME 'olcListenerThreads' EQUALITY integerMa + tch SYNTAX OMsInteger SINGLE-VALUE ) +olcAttributeTypes: ( OLcfgGlAt:26 NAME 'olcLocalSSF' EQUALITY integerMatch SYN + TAX OMsInteger SINGLE-VALUE ) +olcAttributeTypes: ( OLcfgGlAt:27 NAME 'olcLogFile' EQUALITY caseExactMatch SY + NTAX OMsDirectoryString SINGLE-VALUE ) +olcAttributeTypes: ( OLcfgGlAt:28 NAME 'olcLogLevel' EQUALITY caseIgnoreMatch + SYNTAX OMsDirectoryString ) +olcAttributeTypes: ( OLcfgDbAt:0.6 NAME 'olcMaxDerefDepth' EQUALITY integerMat + ch SYNTAX OMsInteger SINGLE-VALUE ) +olcAttributeTypes: ( OLcfgGlAt:101 NAME 'olcMaxFilterDepth' EQUALITY integerMa + tch SYNTAX OMsInteger SINGLE-VALUE ) +olcAttributeTypes: ( OLcfgDbAt:0.16 NAME ( 'olcMultiProvider' 'olcMirrorMode' + ) EQUALITY booleanMatch SYNTAX OMsBoolean SINGLE-VALUE ) +olcAttributeTypes: ( OLcfgGlAt:30 NAME 'olcModuleLoad' EQUALITY caseIgnoreMatc + h SYNTAX OMsDirectoryString X-ORDERED 'VALUES' ) +olcAttributeTypes: ( OLcfgGlAt:31 NAME 'olcModulePath' EQUALITY caseExactMatch + SYNTAX OMsDirectoryString SINGLE-VALUE ) +olcAttributeTypes: ( OLcfgDbAt:0.18 NAME 'olcMonitoring' EQUALITY booleanMatch + SYNTAX OMsBoolean SINGLE-VALUE ) +olcAttributeTypes: ( OLcfgGlAt:32 NAME 'olcObjectClasses' DESC 'OpenLDAP objec + t classes' EQUALITY caseIgnoreMatch SUBSTR caseIgnoreSubstringsMatch SYNTAX O + MsDirectoryString X-ORDERED 'VALUES' ) +olcAttributeTypes: ( OLcfgGlAt:33 NAME 'olcObjectIdentifier' EQUALITY caseIgno + reMatch SUBSTR caseIgnoreSubstringsMatch SYNTAX OMsDirectoryString X-ORDERED + 'VALUES' ) +olcAttributeTypes: ( OLcfgGlAt:34 NAME 'olcOverlay' SUP olcDatabase SINGLE-VAL + UE X-ORDERED 'SIBLINGS' ) +olcAttributeTypes: ( OLcfgGlAt:35 NAME 'olcPasswordCryptSaltFormat' EQUALITY c + aseIgnoreMatch SYNTAX OMsDirectoryString SINGLE-VALUE ) +olcAttributeTypes: ( OLcfgGlAt:36 NAME 'olcPasswordHash' EQUALITY caseIgnoreMa + tch SYNTAX OMsDirectoryString ) +olcAttributeTypes: ( OLcfgGlAt:37 NAME 'olcPidFile' EQUALITY caseExactMatch SY + NTAX OMsDirectoryString SINGLE-VALUE ) +olcAttributeTypes: ( OLcfgGlAt:38 NAME 'olcPlugin' EQUALITY caseIgnoreMatch SY + NTAX OMsDirectoryString X-ORDERED 'VALUES' ) +olcAttributeTypes: ( OLcfgGlAt:39 NAME 'olcPluginLogFile' EQUALITY caseExactMa + tch SYNTAX OMsDirectoryString SINGLE-VALUE ) +olcAttributeTypes: ( OLcfgGlAt:40 NAME 'olcReadOnly' EQUALITY booleanMatch SYN + TAX OMsBoolean SINGLE-VALUE ) +olcAttributeTypes: ( OLcfgGlAt:41 NAME 'olcReferral' SUP labeledURI SINGLE-VAL + UE ) +olcAttributeTypes: ( OLcfgDbAt:0.7 NAME 'olcReplica' SUP labeledURI EQUALITY c + aseIgnoreMatch X-ORDERED 'VALUES' ) +olcAttributeTypes: ( OLcfgGlAt:43 NAME 'olcReplicaArgsFile' SYNTAX OMsDirector + yString SINGLE-VALUE ) +olcAttributeTypes: ( OLcfgGlAt:44 NAME 'olcReplicaPidFile' SYNTAX OMsDirectory + String SINGLE-VALUE ) +olcAttributeTypes: ( OLcfgGlAt:45 NAME 'olcReplicationInterval' SYNTAX OMsInte + ger SINGLE-VALUE ) +olcAttributeTypes: ( OLcfgGlAt:46 NAME 'olcReplogFile' SYNTAX OMsDirectoryStri + ng SINGLE-VALUE ) +olcAttributeTypes: ( OLcfgGlAt:47 NAME 'olcRequires' EQUALITY caseIgnoreMatch + SYNTAX OMsDirectoryString ) +olcAttributeTypes: ( OLcfgGlAt:48 NAME 'olcRestrict' EQUALITY caseIgnoreMatch + SYNTAX OMsDirectoryString ) +olcAttributeTypes: ( OLcfgGlAt:49 NAME 'olcReverseLookup' EQUALITY booleanMatc + h SYNTAX OMsBoolean SINGLE-VALUE ) +olcAttributeTypes: ( OLcfgDbAt:0.8 NAME 'olcRootDN' EQUALITY distinguishedName + Match SYNTAX OMsDN SINGLE-VALUE ) +olcAttributeTypes: ( OLcfgGlAt:51 NAME 'olcRootDSE' EQUALITY caseIgnoreMatch S + YNTAX OMsDirectoryString ) +olcAttributeTypes: ( OLcfgDbAt:0.9 NAME 'olcRootPW' EQUALITY octetStringMatch + SYNTAX OMsOctetString SINGLE-VALUE ) +olcAttributeTypes: ( OLcfgGlAt:89 NAME 'olcSaslAuxprops' EQUALITY caseIgnoreMa + tch SYNTAX OMsDirectoryString SINGLE-VALUE ) +olcAttributeTypes: ( OLcfgGlAt:91 NAME 'olcSaslAuxpropsDontUseCopy' EQUALITY c + aseIgnoreMatch SYNTAX OMsDirectoryString ) +olcAttributeTypes: ( OLcfgGlAt:92 NAME 'olcSaslAuxpropsDontUseCopyIgnore' EQUA + LITY booleanMatch SYNTAX OMsBoolean SINGLE-VALUE ) +olcAttributeTypes: ( OLcfgGlAt:100 NAME 'olcSaslCBinding' EQUALITY caseIgnoreM + atch SYNTAX OMsDirectoryString SINGLE-VALUE ) +olcAttributeTypes: ( OLcfgGlAt:53 NAME 'olcSaslHost' EQUALITY caseIgnoreMatch + SYNTAX OMsDirectoryString SINGLE-VALUE ) +olcAttributeTypes: ( OLcfgGlAt:54 NAME 'olcSaslRealm' EQUALITY caseExactMatch + SYNTAX OMsDirectoryString SINGLE-VALUE ) +olcAttributeTypes: ( OLcfgGlAt:56 NAME 'olcSaslSecProps' EQUALITY caseExactMat + ch SYNTAX OMsDirectoryString SINGLE-VALUE ) +olcAttributeTypes: ( OLcfgGlAt:58 NAME 'olcSchemaDN' EQUALITY distinguishedNam + eMatch SYNTAX OMsDN SINGLE-VALUE ) +olcAttributeTypes: ( OLcfgGlAt:59 NAME 'olcSecurity' EQUALITY caseIgnoreMatch + SYNTAX OMsDirectoryString ) +olcAttributeTypes: ( OLcfgGlAt:81 NAME 'olcServerID' EQUALITY caseIgnoreMatch + SYNTAX OMsDirectoryString ) +olcAttributeTypes: ( OLcfgGlAt:60 NAME 'olcSizeLimit' EQUALITY caseExactMatch + SYNTAX OMsDirectoryString SINGLE-VALUE ) +olcAttributeTypes: ( OLcfgGlAt:61 NAME 'olcSockbufMaxIncoming' EQUALITY intege + rMatch SYNTAX OMsInteger SINGLE-VALUE ) +olcAttributeTypes: ( OLcfgGlAt:62 NAME 'olcSockbufMaxIncomingAuth' EQUALITY in + tegerMatch SYNTAX OMsInteger SINGLE-VALUE ) +olcAttributeTypes: ( OLcfgGlAt:83 NAME 'olcSortVals' DESC 'Attributes whose va + lues will always be sorted' EQUALITY caseIgnoreMatch SYNTAX OMsDirectoryStrin + g ) +olcAttributeTypes: ( OLcfgDbAt:0.15 NAME 'olcSubordinate' EQUALITY caseExactMa + tch SYNTAX OMsDirectoryString SINGLE-VALUE ) +olcAttributeTypes: ( OLcfgDbAt:0.10 NAME 'olcSuffix' EQUALITY distinguishedNam + eMatch SYNTAX OMsDN ) +olcAttributeTypes: ( OLcfgDbAt:0.19 NAME 'olcSyncUseSubentry' DESC 'Store sync + context in a subentry' EQUALITY booleanMatch SYNTAX OMsBoolean SINGLE-VALUE + ) +olcAttributeTypes: ( OLcfgDbAt:0.11 NAME 'olcSyncrepl' EQUALITY caseIgnoreMatc + h SYNTAX OMsDirectoryString X-ORDERED 'VALUES' ) +olcAttributeTypes: ( OLcfgGlAt:90 NAME 'olcTCPBuffer' DESC 'Custom TCP buffer + size' EQUALITY caseExactMatch SYNTAX OMsDirectoryString ) +olcAttributeTypes: ( OLcfgGlAt:66 NAME 'olcThreads' EQUALITY integerMatch SYNT + AX OMsInteger SINGLE-VALUE ) +olcAttributeTypes: ( OLcfgGlAt:95 NAME 'olcThreadQueues' EQUALITY integerMatch + SYNTAX OMsInteger SINGLE-VALUE ) +olcAttributeTypes: ( OLcfgGlAt:67 NAME 'olcTimeLimit' EQUALITY caseExactMatch + SYNTAX OMsDirectoryString SINGLE-VALUE ) +olcAttributeTypes: ( OLcfgGlAt:97 NAME 'olcTLSCACertificate' DESC 'X.509 certi + ficate, must use ;binary' EQUALITY certificateExactMatch SYNTAX 1.3.6.1.4.1.1 + 466.115.121.1.8 SINGLE-VALUE ) +olcAttributeTypes: ( OLcfgGlAt:68 NAME 'olcTLSCACertificateFile' EQUALITY case + ExactMatch SYNTAX OMsDirectoryString SINGLE-VALUE ) +olcAttributeTypes: ( OLcfgGlAt:69 NAME 'olcTLSCACertificatePath' EQUALITY case + ExactMatch SYNTAX OMsDirectoryString SINGLE-VALUE ) +olcAttributeTypes: ( OLcfgGlAt:98 NAME 'olcTLSCertificate' DESC 'X.509 certifi + cate, must use ;binary' EQUALITY certificateExactMatch SYNTAX 1.3.6.1.4.1.146 + 6.115.121.1.8 SINGLE-VALUE ) +olcAttributeTypes: ( OLcfgGlAt:70 NAME 'olcTLSCertificateFile' EQUALITY caseEx + actMatch SYNTAX OMsDirectoryString SINGLE-VALUE ) +olcAttributeTypes: ( OLcfgGlAt:99 NAME 'olcTLSCertificateKey' DESC 'X.509 priv + ateKey, must use ;binary' EQUALITY privateKeyMatch SYNTAX 1.2.840.113549.1.8. + 1.1 SINGLE-VALUE ) +olcAttributeTypes: ( OLcfgGlAt:71 NAME 'olcTLSCertificateKeyFile' EQUALITY cas + eExactMatch SYNTAX OMsDirectoryString SINGLE-VALUE ) +olcAttributeTypes: ( OLcfgGlAt:72 NAME 'olcTLSCipherSuite' EQUALITY caseExactM + atch SYNTAX OMsDirectoryString SINGLE-VALUE ) +olcAttributeTypes: ( OLcfgGlAt:73 NAME 'olcTLSCRLCheck' EQUALITY caseExactMatc + h SYNTAX OMsDirectoryString SINGLE-VALUE ) +olcAttributeTypes: ( OLcfgGlAt:82 NAME 'olcTLSCRLFile' EQUALITY caseExactMatch + SYNTAX OMsDirectoryString SINGLE-VALUE ) +olcAttributeTypes: ( OLcfgGlAt:74 NAME 'olcTLSRandFile' EQUALITY caseExactMatc + h SYNTAX OMsDirectoryString SINGLE-VALUE ) +olcAttributeTypes: ( OLcfgGlAt:75 NAME 'olcTLSVerifyClient' EQUALITY caseExact + Match SYNTAX OMsDirectoryString SINGLE-VALUE ) +olcAttributeTypes: ( OLcfgGlAt:77 NAME 'olcTLSDHParamFile' EQUALITY caseExactM + atch SYNTAX OMsDirectoryString SINGLE-VALUE ) +olcAttributeTypes: ( OLcfgGlAt:96 NAME 'olcTLSECName' EQUALITY caseExactMatch + SYNTAX OMsDirectoryString SINGLE-VALUE ) +olcAttributeTypes: ( OLcfgGlAt:87 NAME 'olcTLSProtocolMin' EQUALITY caseExactM + atch SYNTAX OMsDirectoryString SINGLE-VALUE ) +olcAttributeTypes: ( OLcfgGlAt:80 NAME 'olcToolThreads' EQUALITY integerMatch + SYNTAX OMsInteger SINGLE-VALUE ) +olcAttributeTypes: ( OLcfgDbAt:0.12 NAME 'olcUpdateDN' EQUALITY distinguishedN + ameMatch SYNTAX OMsDN SINGLE-VALUE ) +olcAttributeTypes: ( OLcfgDbAt:0.13 NAME 'olcUpdateRef' SUP labeledURI EQUALIT + Y caseIgnoreMatch ) +olcAttributeTypes: ( OLcfgGlAt:88 NAME 'olcWriteTimeout' EQUALITY integerMatch + SYNTAX OMsInteger SINGLE-VALUE ) +olcAttributeTypes: ( OLcfgDbAt:0.1 NAME 'olcDbDirectory' DESC 'Directory for d + atabase content' EQUALITY caseIgnoreMatch SYNTAX OMsDirectoryString SINGLE-VA + LUE ) +olcAttributeTypes: ( 1.3.6.1.4.1.4203.666.1.55.1 NAME 'monitoredInfo' DESC 'mo + nitored info' EQUALITY caseIgnoreMatch SUBSTR caseIgnoreSubstringsMatch SYNTA + X 1.3.6.1.4.1.1466.115.121.1.15{32768} NO-USER-MODIFICATION USAGE dSAOperatio + n ) +olcAttributeTypes: ( 1.3.6.1.4.1.4203.666.1.55.2 NAME 'managedInfo' DESC 'moni + tor managed info' SUP name ) +olcAttributeTypes: ( 1.3.6.1.4.1.4203.666.1.55.3 NAME 'monitorCounter' DESC 'm + onitor counter' EQUALITY integerMatch ORDERING integerOrderingMatch SYNTAX 1. + 3.6.1.4.1.1466.115.121.1.27 NO-USER-MODIFICATION USAGE dSAOperation ) +olcAttributeTypes: ( 1.3.6.1.4.1.4203.666.1.55.4 NAME 'monitorOpCompleted' DES + C 'monitor completed operations' SUP monitorCounter NO-USER-MODIFICATION USAG + E dSAOperation ) +olcAttributeTypes: ( 1.3.6.1.4.1.4203.666.1.55.5 NAME 'monitorOpInitiated' DES + C 'monitor initiated operations' SUP monitorCounter NO-USER-MODIFICATION USAG + E dSAOperation ) +olcAttributeTypes: ( 1.3.6.1.4.1.4203.666.1.55.6 NAME 'monitorConnectionNumber + ' DESC 'monitor connection number' SUP monitorCounter NO-USER-MODIFICATION US + AGE dSAOperation ) +olcAttributeTypes: ( 1.3.6.1.4.1.4203.666.1.55.7 NAME 'monitorConnectionAuthzD + N' DESC 'monitor connection authorization DN' EQUALITY distinguishedNameMatch + SYNTAX 1.3.6.1.4.1.1466.115.121.1.12 NO-USER-MODIFICATION USAGE dSAOperation + ) +olcAttributeTypes: ( 1.3.6.1.4.1.4203.666.1.55.8 NAME 'monitorConnectionLocalA + ddress' DESC 'monitor connection local address' SUP monitoredInfo NO-USER-MOD + IFICATION USAGE dSAOperation ) +olcAttributeTypes: ( 1.3.6.1.4.1.4203.666.1.55.9 NAME 'monitorConnectionPeerAd + dress' DESC 'monitor connection peer address' SUP monitoredInfo NO-USER-MODIF + ICATION USAGE dSAOperation ) +olcAttributeTypes: ( 1.3.6.1.4.1.4203.666.1.55.10 NAME 'monitorTimestamp' DESC + 'monitor timestamp' EQUALITY generalizedTimeMatch ORDERING generalizedTimeOr + deringMatch SYNTAX 1.3.6.1.4.1.1466.115.121.1.24 SINGLE-VALUE NO-USER-MODIFIC + ATION USAGE dSAOperation ) +olcAttributeTypes: ( 1.3.6.1.4.1.4203.666.1.55.11 NAME 'monitorOverlay' DESC ' + name of overlays defined for a given database' SUP monitoredInfo NO-USER-MODI + FICATION USAGE dSAOperation ) +olcAttributeTypes: ( 1.3.6.1.4.1.4203.666.1.55.12 NAME 'readOnly' DESC 'read/w + rite status of a given database' EQUALITY booleanMatch SYNTAX 1.3.6.1.4.1.146 + 6.115.121.1.7 SINGLE-VALUE USAGE dSAOperation ) +olcAttributeTypes: ( 1.3.6.1.4.1.4203.666.1.55.13 NAME 'restrictedOperation' D + ESC 'name of restricted operation for a given database' SUP managedInfo ) +olcAttributeTypes: ( 1.3.6.1.4.1.4203.666.1.55.14 NAME 'monitorConnectionProto + col' DESC 'monitor connection protocol' SUP monitoredInfo NO-USER-MODIFICATIO + N USAGE dSAOperation ) +olcAttributeTypes: ( 1.3.6.1.4.1.4203.666.1.55.15 NAME 'monitorConnectionOpsRe + ceived' DESC 'monitor number of operations received by the connection' SUP mo + nitorCounter NO-USER-MODIFICATION USAGE dSAOperation ) +olcAttributeTypes: ( 1.3.6.1.4.1.4203.666.1.55.16 NAME 'monitorConnectionOpsEx + ecuting' DESC 'monitor number of operations in execution within the connectio + n' SUP monitorCounter NO-USER-MODIFICATION USAGE dSAOperation ) +olcAttributeTypes: ( 1.3.6.1.4.1.4203.666.1.55.17 NAME 'monitorConnectionOpsPe + nding' DESC 'monitor number of pending operations within the connection' SUP + monitorCounter NO-USER-MODIFICATION USAGE dSAOperation ) +olcAttributeTypes: ( 1.3.6.1.4.1.4203.666.1.55.18 NAME 'monitorConnectionOpsCo + mpleted' DESC 'monitor number of operations completed within the connection' + SUP monitorCounter NO-USER-MODIFICATION USAGE dSAOperation ) +olcAttributeTypes: ( 1.3.6.1.4.1.4203.666.1.55.19 NAME 'monitorConnectionGet' + DESC 'number of times connection_get() was called so far' SUP monitorCounter + NO-USER-MODIFICATION USAGE dSAOperation ) +olcAttributeTypes: ( 1.3.6.1.4.1.4203.666.1.55.20 NAME 'monitorConnectionRead' + DESC 'number of times connection_read() was called so far' SUP monitorCounte + r NO-USER-MODIFICATION USAGE dSAOperation ) +olcAttributeTypes: ( 1.3.6.1.4.1.4203.666.1.55.21 NAME 'monitorConnectionWrite + ' DESC 'number of times connection_write() was called so far' SUP monitorCoun + ter NO-USER-MODIFICATION USAGE dSAOperation ) +olcAttributeTypes: ( 1.3.6.1.4.1.4203.666.1.55.22 NAME 'monitorConnectionMask' + DESC 'monitor connection mask' SUP monitoredInfo NO-USER-MODIFICATION USAGE + dSAOperation ) +olcAttributeTypes: ( 1.3.6.1.4.1.4203.666.1.55.23 NAME 'monitorConnectionListe + ner' DESC 'monitor connection listener' SUP monitoredInfo NO-USER-MODIFICATIO + N USAGE dSAOperation ) +olcAttributeTypes: ( 1.3.6.1.4.1.4203.666.1.55.24 NAME 'monitorConnectionPeerD + omain' DESC 'monitor connection peer domain' SUP monitoredInfo NO-USER-MODIFI + CATION USAGE dSAOperation ) +olcAttributeTypes: ( 1.3.6.1.4.1.4203.666.1.55.25 NAME 'monitorConnectionStart + Time' DESC 'monitor connection start time' SUP monitorTimestamp SINGLE-VALUE + NO-USER-MODIFICATION USAGE dSAOperation ) +olcAttributeTypes: ( 1.3.6.1.4.1.4203.666.1.55.26 NAME 'monitorConnectionActiv + ityTime' DESC 'monitor connection activity time' SUP monitorTimestamp SINGLE- + VALUE NO-USER-MODIFICATION USAGE dSAOperation ) +olcAttributeTypes: ( 1.3.6.1.4.1.4203.666.1.55.27 NAME 'monitorIsShadow' DESC + 'TRUE if the database is shadow' EQUALITY booleanMatch SYNTAX 1.3.6.1.4.1.146 + 6.115.121.1.7 SINGLE-VALUE USAGE dSAOperation ) +olcAttributeTypes: ( 1.3.6.1.4.1.4203.666.1.55.28 NAME 'monitorUpdateRef' DESC + 'update referral for shadow databases' SUP monitoredInfo SINGLE-VALUE USAGE + dSAOperation ) +olcAttributeTypes: ( 1.3.6.1.4.1.4203.666.1.55.29 NAME 'monitorRuntimeConfig' + DESC 'TRUE if component allows runtime configuration' EQUALITY booleanMatch S + YNTAX 1.3.6.1.4.1.1466.115.121.1.7 SINGLE-VALUE USAGE dSAOperation ) +olcAttributeTypes: ( 1.3.6.1.4.1.4203.666.1.55.30 NAME 'monitorSuperiorDN' DES + C 'monitor superior DN' EQUALITY distinguishedNameMatch SYNTAX 1.3.6.1.4.1.14 + 66.115.121.1.12 NO-USER-MODIFICATION USAGE dSAOperation ) +olcAttributeTypes: ( 1.3.6.1.4.1.4203.666.1.55.31 NAME 'monitorConnectionOpsAs + ync' DESC 'monitor number of asynchronous operations in execution within the + connection' SUP monitorCounter NO-USER-MODIFICATION USAGE dSAOperation ) +olcAttributeTypes: ( olmSyncReplAttributes:1 NAME 'olmSRProviderURIList' DESC + 'List of provider URIs for this consumer instance' SUP monitoredInfo NO-USER- + MODIFICATION USAGE dSAOperation ) +olcAttributeTypes: ( olmSyncReplAttributes:2 NAME 'olmSRConnection' DESC 'Loca + l address:port of connection to provider' SUP monitoredInfo SINGLE-VALUE NO-U + SER-MODIFICATION USAGE dSAOperation ) +olcAttributeTypes: ( olmSyncReplAttributes:3 NAME 'olmSRSyncPhase' DESC 'Curre + nt syncrepl mode' SUP monitoredInfo SINGLE-VALUE NO-USER-MODIFICATION USAGE d + SAOperation ) +olcAttributeTypes: ( olmSyncReplAttributes:4 NAME 'olmSRNextConnect' DESC 'Sch + eduled time of next connection attempt' SUP monitorTimestamp SINGLE-VALUE NO- + USER-MODIFICATION USAGE dSAOperation ) +olcAttributeTypes: ( olmSyncReplAttributes:5 NAME 'olmSRLastConnect' DESC 'Tim + e last connected to provider' SUP monitorTimestamp SINGLE-VALUE NO-USER-MODIF + ICATION USAGE dSAOperation ) +olcAttributeTypes: ( olmSyncReplAttributes:6 NAME 'olmSRLastContact' DESC 'Tim + e last message received from provider' SUP monitorTimestamp SINGLE-VALUE NO-U + SER-MODIFICATION USAGE dSAOperation ) +olcAttributeTypes: ( olmSyncReplAttributes:7 NAME 'olmSRLastCookieRcvd' DESC ' + Last sync cookie received from provider' SUP monitoredInfo NO-USER-MODIFICATI + ON USAGE dSAOperation ) +olcAttributeTypes: ( olmSyncReplAttributes:8 NAME 'olmSRLastCookieSent' DESC ' + Last sync cookie sent to provider' SUP monitoredInfo NO-USER-MODIFICATION USA + GE dSAOperation ) +olcAttributeTypes: ( 1.3.6.1.4.1.4203.666.1.5 NAME 'OpenLDAPaci' DESC 'OpenLDA + P access control information (experimental)' EQUALITY OpenLDAPaciMatch SYNTAX + 1.3.6.1.4.1.4203.666.2.1 USAGE directoryOperation ) +olcAttributeTypes: ( OLcfgBkAt:12.1 NAME 'olcBkMdbIdlExp' DESC 'Power of 2 use + d to set IDL size' EQUALITY integerMatch SYNTAX OMsInteger SINGLE-VALUE ) +olcAttributeTypes: ( OLcfgDbAt:1.2 NAME 'olcDbCheckpoint' DESC 'Database check + point interval in kbytes and minutes' EQUALITY caseIgnoreMatch SYNTAX OMsDire + ctoryString SINGLE-VALUE ) +olcAttributeTypes: ( OLcfgDbAt:1.4 NAME 'olcDbNoSync' DESC 'Disable synchronou + s database writes' EQUALITY booleanMatch SYNTAX OMsBoolean SINGLE-VALUE ) +olcAttributeTypes: ( OLcfgDbAt:12.3 NAME 'olcDbEnvFlags' DESC 'Database enviro + nment flags' EQUALITY caseIgnoreMatch SYNTAX OMsDirectoryString ) +olcAttributeTypes: ( OLcfgDbAt:0.2 NAME 'olcDbIndex' DESC 'Attribute index par + ameters' EQUALITY caseIgnoreMatch SYNTAX OMsDirectoryString ) +olcAttributeTypes: ( OLcfgDbAt:12.4 NAME 'olcDbMaxEntrySize' DESC 'Maximum siz + e of an entry in bytes' EQUALITY integerMatch SYNTAX OMsInteger SINGLE-VALUE + ) +olcAttributeTypes: ( OLcfgDbAt:12.1 NAME 'olcDbMaxReaders' DESC 'Maximum numbe + r of threads that may access the DB concurrently' EQUALITY integerMatch SYNTA + X OMsInteger SINGLE-VALUE ) +olcAttributeTypes: ( OLcfgDbAt:12.2 NAME 'olcDbMaxSize' DESC 'Maximum size of + DB in bytes' EQUALITY integerMatch SYNTAX OMsInteger SINGLE-VALUE ) +olcAttributeTypes: ( OLcfgDbAt:0.3 NAME 'olcDbMode' DESC 'Unix permissions of + database files' EQUALITY caseIgnoreMatch SYNTAX OMsDirectoryString SINGLE-VAL + UE ) +olcAttributeTypes: ( OLcfgDbAt:12.6 NAME 'olcDbMultival' DESC 'Hi/Lo threshold + s for splitting multivalued attr out of main blob' EQUALITY caseIgnoreMatch S + YNTAX OMsDirectoryString ) +olcAttributeTypes: ( OLcfgDbAt:12.5 NAME 'olcDbRtxnSize' DESC 'Number of entri + es to process in one read transaction' EQUALITY integerMatch SYNTAX OMsIntege + r SINGLE-VALUE ) +olcAttributeTypes: ( OLcfgDbAt:1.9 NAME 'olcDbSearchStack' DESC 'Depth of sear + ch stack in IDLs' EQUALITY integerMatch SYNTAX OMsInteger SINGLE-VALUE ) +olcAttributeTypes: ( 1.2.840.113556.1.2.102 NAME 'memberOf' DESC 'Group that t + he entry belongs to' EQUALITY distinguishedNameMatch SYNTAX 1.3.6.1.4.1.1466. + 115.121.1.12 NO-USER-MODIFICATION USAGE dSAOperation X-ORIGIN 'iPlanet Delega + ted Administrator' ) +olcAttributeTypes: ( OLcfgOvAt:18.0 NAME 'olcMemberOfDN' DESC 'DN to be used a + s modifiersName' EQUALITY distinguishedNameMatch SYNTAX OMsDN SINGLE-VALUE ) +olcAttributeTypes: ( OLcfgOvAt:18.1 NAME 'olcMemberOfDangling' DESC 'Behavior + with respect to dangling members, constrained to ignore, drop, error' EQUALIT + Y caseIgnoreMatch SYNTAX OMsDirectoryString SINGLE-VALUE ) +olcAttributeTypes: ( OLcfgOvAt:18.2 NAME 'olcMemberOfRefInt' DESC 'Take care o + f referential integrity' EQUALITY booleanMatch SYNTAX OMsBoolean SINGLE-VALUE + ) +olcAttributeTypes: ( OLcfgOvAt:18.3 NAME 'olcMemberOfGroupOC' DESC 'Group obje + ctClass' EQUALITY caseIgnoreMatch SYNTAX OMsDirectoryString SINGLE-VALUE ) +olcAttributeTypes: ( OLcfgOvAt:18.4 NAME 'olcMemberOfMemberAD' DESC 'member at + tribute' EQUALITY caseIgnoreMatch SYNTAX OMsDirectoryString SINGLE-VALUE ) +olcAttributeTypes: ( OLcfgOvAt:18.5 NAME 'olcMemberOfMemberOfAD' DESC 'memberO + f attribute' EQUALITY caseIgnoreMatch SYNTAX OMsDirectoryString SINGLE-VALUE + ) +olcAttributeTypes: ( OLcfgOvAt:18.7 NAME 'olcMemberOfDanglingError' DESC 'Erro + r code returned in case of dangling back reference' EQUALITY caseIgnoreMatch + SYNTAX OMsDirectoryString SINGLE-VALUE ) +olcAttributeTypes: ( OLcfgOvAt:11.1 NAME 'olcRefintAttribute' DESC 'Attributes + for referential integrity' EQUALITY caseIgnoreMatch SYNTAX OMsDirectoryStrin + g ) +olcAttributeTypes: ( OLcfgOvAt:11.2 NAME 'olcRefintNothing' DESC 'Replacement + DN to supply when needed' EQUALITY distinguishedNameMatch SYNTAX OMsDN SINGLE + -VALUE ) +olcAttributeTypes: ( OLcfgOvAt:11.3 NAME 'olcRefintModifiersName' DESC 'The DN + to use as modifiersName' EQUALITY distinguishedNameMatch SYNTAX OMsDN SINGLE + -VALUE ) +olcAttributeTypes: ( OLcfgOvAt:10.1 NAME 'olcUniqueBase' DESC 'Subtree for uni + queness searches' EQUALITY distinguishedNameMatch SYNTAX OMsDN SINGLE-VALUE ) +olcAttributeTypes: ( OLcfgOvAt:10.2 NAME 'olcUniqueIgnore' DESC 'Attributes fo + r which uniqueness shall not be enforced' EQUALITY caseIgnoreMatch ORDERING c + aseIgnoreOrderingMatch SUBSTR caseIgnoreSubstringsMatch SYNTAX OMsDirectorySt + ring ) +olcAttributeTypes: ( OLcfgOvAt:10.3 NAME 'olcUniqueAttribute' DESC 'Attributes + for which uniqueness shall be enforced' EQUALITY caseIgnoreMatch ORDERING ca + seIgnoreOrderingMatch SUBSTR caseIgnoreSubstringsMatch SYNTAX OMsDirectoryStr + ing ) +olcAttributeTypes: ( OLcfgOvAt:10.4 NAME 'olcUniqueStrict' DESC 'Enforce uniqu + eness of null values' EQUALITY booleanMatch SYNTAX OMsBoolean SINGLE-VALUE ) +olcAttributeTypes: ( OLcfgOvAt:10.5 NAME 'olcUniqueURI' DESC 'List of keywords + and LDAP URIs for a uniqueness domain' EQUALITY caseExactMatch ORDERING case + ExactOrderingMatch SUBSTR caseExactSubstringsMatch SYNTAX OMsDirectoryString + ) +olcAttributeTypes: ( olmDatabaseAttributes:1 NAME 'olmDbDirectory' DESC 'Path + name of the directory where the database environment resides' SUP monitoredIn + fo NO-USER-MODIFICATION USAGE dSAOperation ) +olcAttributeTypes: ( olmMDBAttributes:1 NAME 'olmMDBPagesMax' DESC 'Maximum nu + mber of pages' SUP monitorCounter NO-USER-MODIFICATION USAGE dSAOperation ) +olcAttributeTypes: ( olmMDBAttributes:2 NAME 'olmMDBPagesUsed' DESC 'Number of + pages in use' SUP monitorCounter NO-USER-MODIFICATION USAGE dSAOperation ) +olcAttributeTypes: ( olmMDBAttributes:3 NAME 'olmMDBPagesFree' DESC 'Number of + free pages' SUP monitorCounter NO-USER-MODIFICATION USAGE dSAOperation ) +olcAttributeTypes: ( olmMDBAttributes:4 NAME 'olmMDBReadersMax' DESC 'Maximum + number of readers' SUP monitorCounter NO-USER-MODIFICATION USAGE dSAOperation + ) +olcAttributeTypes: ( olmMDBAttributes:5 NAME 'olmMDBReadersUsed' DESC 'Number + of readers in use' SUP monitorCounter NO-USER-MODIFICATION USAGE dSAOperation + ) +olcAttributeTypes: ( olmMDBAttributes:6 NAME 'olmMDBEntries' DESC 'Number of e + ntries in DB' SUP monitorCounter NO-USER-MODIFICATION USAGE dSAOperation ) +olcObjectClasses: ( 2.5.6.0 NAME 'top' DESC 'top of the superclass chain' ABST + RACT MUST objectClass ) +olcObjectClasses: ( 1.3.6.1.4.1.1466.101.120.111 NAME 'extensibleObject' DESC + 'RFC4512: extensible object' SUP top AUXILIARY ) +olcObjectClasses: ( 2.5.6.1 NAME 'alias' DESC 'RFC4512: an alias' SUP top STRU + CTURAL MUST aliasedObjectName ) +olcObjectClasses: ( 2.16.840.1.113730.3.2.6 NAME 'referral' DESC 'namedref: na + med subordinate referral' SUP top STRUCTURAL MUST ref ) +olcObjectClasses: ( 1.3.6.1.4.1.4203.1.4.1 NAME ( 'OpenLDAProotDSE' 'LDAProotD + SE' ) DESC 'OpenLDAP Root DSE object' SUP top STRUCTURAL MAY cn ) +olcObjectClasses: ( 2.5.17.0 NAME 'subentry' DESC 'RFC3672: subentry' SUP top + STRUCTURAL MUST ( cn $ subtreeSpecification ) ) +olcObjectClasses: ( 2.5.20.1 NAME 'subschema' DESC 'RFC4512: controlling subsc + hema (sub)entry' AUXILIARY MAY ( dITStructureRules $ nameForms $ dITContentRu + les $ objectClasses $ attributeTypes $ matchingRules $ matchingRuleUse ) ) +olcObjectClasses: ( 2.5.17.2 NAME 'collectiveAttributeSubentry' DESC 'RFC3671: + collective attribute subentry' AUXILIARY ) +olcObjectClasses: ( 1.3.6.1.4.1.1466.101.119.2 NAME 'dynamicObject' DESC 'RFC2 + 589: Dynamic Object' SUP top AUXILIARY ) +olcObjectClasses: ( 1.3.6.1.4.1.4203.666.3.4 NAME 'glue' DESC 'Glue Entry' SUP + top STRUCTURAL ) +olcObjectClasses: ( 1.3.6.1.4.1.4203.666.3.5 NAME 'syncConsumerSubentry' DESC + 'Persistent Info for SyncRepl Consumer' AUXILIARY MAY syncreplCookie ) +olcObjectClasses: ( 1.3.6.1.4.1.4203.666.3.6 NAME 'syncProviderSubentry' DESC + 'Persistent Info for SyncRepl Producer' AUXILIARY MAY contextCSN ) +olcObjectClasses: ( OLcfgGlOc:0 NAME 'olcConfig' DESC 'OpenLDAP configuration + object' SUP top ABSTRACT ) +olcObjectClasses: ( OLcfgGlOc:1 NAME 'olcGlobal' DESC 'OpenLDAP Global configu + ration options' SUP olcConfig STRUCTURAL MAY ( cn $ olcConfigFile $ olcConfig + Dir $ olcAllows $ olcArgsFile $ olcAttributeOptions $ olcAuthIDRewrite $ olcA + uthzPolicy $ olcAuthzRegexp $ olcConcurrency $ olcConnMaxPending $ olcConnMax + PendingAuth $ olcDisallows $ olcGentleHUP $ olcIdleTimeout $ olcIndexSubstrIf + MaxLen $ olcIndexSubstrIfMinLen $ olcIndexSubstrAnyLen $ olcIndexSubstrAnySte + p $ olcIndexHash64 $ olcIndexIntLen $ olcListenerThreads $ olcLocalSSF $ olcL + ogFile $ olcLogLevel $ olcMaxFilterDepth $ olcPasswordCryptSaltFormat $ olcPa + sswordHash $ olcPidFile $ olcPluginLogFile $ olcReadOnly $ olcReferral $ olcR + eplogFile $ olcRequires $ olcRestrict $ olcReverseLookup $ olcRootDSE $ olcSa + slAuxprops $ olcSaslAuxpropsDontUseCopy $ olcSaslAuxpropsDontUseCopyIgnore $ + olcSaslCBinding $ olcSaslHost $ olcSaslRealm $ olcSaslSecProps $ olcSecurity + $ olcServerID $ olcSizeLimit $ olcSockbufMaxIncoming $ olcSockbufMaxIncomingA + uth $ olcTCPBuffer $ olcThreads $ olcThreadQueues $ olcTimeLimit $ olcTLSCACe + rtificateFile $ olcTLSCACertificatePath $ olcTLSCertificateFile $ olcTLSCerti + ficateKeyFile $ olcTLSCipherSuite $ olcTLSCRLCheck $ olcTLSCACertificate $ ol + cTLSCertificate $ olcTLSCertificateKey $ olcTLSRandFile $ olcTLSVerifyClient + $ olcTLSDHParamFile $ olcTLSECName $ olcTLSCRLFile $ olcTLSProtocolMin $ olcT + oolThreads $ olcWriteTimeout $ olcObjectIdentifier $ olcAttributeTypes $ olcO + bjectClasses $ olcDitContentRules $ olcLdapSyntaxes ) ) +olcObjectClasses: ( OLcfgGlOc:2 NAME 'olcSchemaConfig' DESC 'OpenLDAP schema o + bject' SUP olcConfig STRUCTURAL MAY ( cn $ olcObjectIdentifier $ olcLdapSynta + xes $ olcAttributeTypes $ olcObjectClasses $ olcDitContentRules ) ) +olcObjectClasses: ( OLcfgGlOc:3 NAME 'olcBackendConfig' DESC 'OpenLDAP Backend + -specific options' SUP olcConfig STRUCTURAL MUST olcBackend ) +olcObjectClasses: ( OLcfgGlOc:4 NAME 'olcDatabaseConfig' DESC 'OpenLDAP Databa + se-specific options' SUP olcConfig STRUCTURAL MUST olcDatabase MAY ( olcDisab + led $ olcHidden $ olcSuffix $ olcSubordinate $ olcAccess $ olcAddContentAcl $ + olcLastMod $ olcLastBind $ olcLimits $ olcMaxDerefDepth $ olcPlugin $ olcRea + dOnly $ olcReplica $ olcReplicaArgsFile $ olcReplicaPidFile $ olcReplicationI + nterval $ olcReplogFile $ olcRequires $ olcRestrict $ olcRootDN $ olcRootPW $ + olcSchemaDN $ olcSecurity $ olcSizeLimit $ olcSyncUseSubentry $ olcSyncrepl + $ olcTimeLimit $ olcUpdateDN $ olcUpdateRef $ olcMultiProvider $ olcMonitorin + g $ olcExtraAttrs ) ) +olcObjectClasses: ( OLcfgGlOc:5 NAME 'olcOverlayConfig' DESC 'OpenLDAP Overlay + -specific options' SUP olcConfig STRUCTURAL MUST olcOverlay MAY olcDisabled ) +olcObjectClasses: ( OLcfgGlOc:6 NAME 'olcIncludeFile' DESC 'OpenLDAP configura + tion include file' SUP olcConfig STRUCTURAL MUST olcInclude MAY ( cn $ olcRoo + tDSE ) ) +olcObjectClasses: ( OLcfgGlOc:7 NAME 'olcFrontendConfig' DESC 'OpenLDAP fronte + nd configuration' AUXILIARY MAY ( olcDefaultSearchBase $ olcPasswordHash $ ol + cSortVals ) ) +olcObjectClasses: ( OLcfgGlOc:8 NAME 'olcModuleList' DESC 'OpenLDAP dynamic mo + dule info' SUP olcConfig STRUCTURAL MAY ( cn $ olcModulePath $ olcModuleLoad + ) ) +olcObjectClasses: ( OLcfgDbOc:2.1 NAME 'olcLdifConfig' DESC 'LDIF backend conf + iguration' SUP olcDatabaseConfig STRUCTURAL MUST olcDbDirectory ) +olcObjectClasses: ( 1.3.6.1.4.1.4203.666.3.16.1 NAME 'monitor' DESC 'OpenLDAP + system monitoring' SUP top STRUCTURAL MUST cn MAY ( description $ seeAlso $ l + abeledURI $ monitoredInfo $ managedInfo $ monitorOverlay ) ) +olcObjectClasses: ( 1.3.6.1.4.1.4203.666.3.16.2 NAME 'monitorServer' DESC 'Ser + ver monitoring root entry' SUP monitor STRUCTURAL ) +olcObjectClasses: ( 1.3.6.1.4.1.4203.666.3.16.3 NAME 'monitorContainer' DESC ' + monitor container class' SUP monitor STRUCTURAL ) +olcObjectClasses: ( 1.3.6.1.4.1.4203.666.3.16.4 NAME 'monitorCounterObject' DE + SC 'monitor counter class' SUP monitor STRUCTURAL ) +olcObjectClasses: ( 1.3.6.1.4.1.4203.666.3.16.5 NAME 'monitorOperation' DESC ' + monitor operation class' SUP monitor STRUCTURAL ) +olcObjectClasses: ( 1.3.6.1.4.1.4203.666.3.16.6 NAME 'monitorConnection' DESC + 'monitor connection class' SUP monitor STRUCTURAL ) +olcObjectClasses: ( 1.3.6.1.4.1.4203.666.3.16.7 NAME 'managedObject' DESC 'mon + itor managed entity class' SUP monitor STRUCTURAL ) +olcObjectClasses: ( 1.3.6.1.4.1.4203.666.3.16.8 NAME 'monitoredObject' DESC 'm + onitor monitored entity class' SUP monitor STRUCTURAL ) +olcObjectClasses: ( OLcfgDbOc:4.1 NAME 'olcMonitorConfig' DESC 'Monitor backen + d configuration' SUP olcDatabaseConfig STRUCTURAL ) +olcObjectClasses: ( olmSyncReplObjectClasses:1 NAME 'olmSyncReplInstance' SUP + monitoredObject STRUCTURAL MAY ( olmSRProviderURIList $ olmSRConnection $ olm + SRSyncPhase $ olmSRNextConnect $ olmSRLastConnect $ olmSRLastContact $ olmSRL + astCookieRcvd $ olmSRLastCookieSent ) ) +olcObjectClasses: ( OLcfgBkOc:12.1 NAME 'olcMdbBkConfig' DESC 'MDB backend con + figuration' SUP olcBackendConfig STRUCTURAL MAY olcBkMdbIdlExp ) +olcObjectClasses: ( OLcfgDbOc:12.1 NAME 'olcMdbConfig' DESC 'MDB database conf + iguration' SUP olcDatabaseConfig STRUCTURAL MUST olcDbDirectory MAY ( olcDbCh + eckpoint $ olcDbEnvFlags $ olcDbNoSync $ olcDbIndex $ olcDbMaxReaders $ olcDb + MaxSize $ olcDbMode $ olcDbSearchStack $ olcDbMaxEntrySize $ olcDbRtxnSize $ + olcDbMultival ) ) +olcObjectClasses: ( OLcfgOvOc:18.1 NAME ( 'olcMemberOfConfig' 'olcMemberOf' ) + DESC 'Member-of configuration' SUP olcOverlayConfig STRUCTURAL MAY ( olcMembe + rOfDN $ olcMemberOfDangling $ olcMemberOfDanglingError $ olcMemberOfRefInt $ + olcMemberOfGroupOC $ olcMemberOfMemberAD $ olcMemberOfMemberOfAD ) ) +olcObjectClasses: ( OLcfgOvOc:11.1 NAME 'olcRefintConfig' DESC 'Referential in + tegrity configuration' SUP olcOverlayConfig STRUCTURAL MAY ( olcRefintAttribu + te $ olcRefintNothing $ olcRefintModifiersName ) ) +olcObjectClasses: ( OLcfgOvOc:10.1 NAME 'olcUniqueConfig' DESC 'Attribute valu + e uniqueness configuration' SUP olcOverlayConfig STRUCTURAL MAY ( olcUniqueBa + se $ olcUniqueIgnore $ olcUniqueAttribute $ olcUniqueStrict $ olcUniqueURI ) + ) +olcObjectClasses: ( olmMDBObjectClasses:2 NAME 'olmMDBDatabase' SUP top AUXILI + ARY MAY ( olmDbDirectory $ olmMDBPagesMax $ olmMDBPagesUsed $ olmMDBPagesFree + $ olmMDBReadersMax $ olmMDBReadersUsed $ olmMDBEntries ) ) +structuralObjectClass: olcSchemaConfig +entryUUID: bae035d4-4e5c-103c-8e3d-83bde8bbc3e8 +creatorsName: cn=config +createTimestamp: 20220412033105Z +entryCSN: 20220412033105.684579Z#000000#000#000000 +modifiersName: cn=config +modifyTimestamp: 20220412033105Z diff --git a/dirsrvtests/tests/data/openldap_2_389/memberof/slapd.d/cn=config/cn=schema/cn={0}core.ldif b/dirsrvtests/tests/data/openldap_2_389/memberof/slapd.d/cn=config/cn=schema/cn={0}core.ldif new file mode 100755 index 0000000..4a571f8 --- /dev/null +++ b/dirsrvtests/tests/data/openldap_2_389/memberof/slapd.d/cn=config/cn=schema/cn={0}core.ldif @@ -0,0 +1,244 @@ +# AUTO-GENERATED FILE - DO NOT EDIT!! Use ldapmodify. +# CRC32 0e84f447 +dn: cn={0}core +objectClass: olcSchemaConfig +cn: {0}core +olcAttributeTypes: {0}( 2.5.4.2 NAME 'knowledgeInformation' DESC 'RFC2256: kno + wledge information' EQUALITY caseIgnoreMatch SYNTAX 1.3.6.1.4.1.1466.115.121. + 1.15{32768} ) +olcAttributeTypes: {1}( 2.5.4.4 NAME ( 'sn' 'surname' ) DESC 'RFC2256: last (f + amily) name(s) for which the entity is known by' SUP name ) +olcAttributeTypes: {2}( 2.5.4.5 NAME 'serialNumber' DESC 'RFC2256: serial numb + er of the entity' EQUALITY caseIgnoreMatch SUBSTR caseIgnoreSubstringsMatch S + YNTAX 1.3.6.1.4.1.1466.115.121.1.44{64} ) +olcAttributeTypes: {3}( 2.5.4.6 NAME ( 'c' 'countryName' ) DESC 'RFC4519: two- + letter ISO-3166 country code' SUP name SYNTAX 1.3.6.1.4.1.1466.115.121.1.11 S + INGLE-VALUE ) +olcAttributeTypes: {4}( 2.5.4.7 NAME ( 'l' 'localityName' ) DESC 'RFC2256: loc + ality which this object resides in' SUP name ) +olcAttributeTypes: {5}( 2.5.4.8 NAME ( 'st' 'stateOrProvinceName' ) DESC 'RFC2 + 256: state or province which this object resides in' SUP name ) +olcAttributeTypes: {6}( 2.5.4.9 NAME ( 'street' 'streetAddress' ) DESC 'RFC225 + 6: street address of this object' EQUALITY caseIgnoreMatch SUBSTR caseIgnoreS + ubstringsMatch SYNTAX 1.3.6.1.4.1.1466.115.121.1.15{128} ) +olcAttributeTypes: {7}( 2.5.4.10 NAME ( 'o' 'organizationName' ) DESC 'RFC2256 + : organization this object belongs to' SUP name ) +olcAttributeTypes: {8}( 2.5.4.11 NAME ( 'ou' 'organizationalUnitName' ) DESC ' + RFC2256: organizational unit this object belongs to' SUP name ) +olcAttributeTypes: {9}( 2.5.4.12 NAME 'title' DESC 'RFC2256: title associated + with the entity' SUP name ) +olcAttributeTypes: {10}( 2.5.4.14 NAME 'searchGuide' DESC 'RFC2256: search gui + de, deprecated by enhancedSearchGuide' SYNTAX 1.3.6.1.4.1.1466.115.121.1.25 ) +olcAttributeTypes: {11}( 2.5.4.15 NAME 'businessCategory' DESC 'RFC2256: busin + ess category' EQUALITY caseIgnoreMatch SUBSTR caseIgnoreSubstringsMatch SYNTA + X 1.3.6.1.4.1.1466.115.121.1.15{128} ) +olcAttributeTypes: {12}( 2.5.4.16 NAME 'postalAddress' DESC 'RFC2256: postal a + ddress' EQUALITY caseIgnoreListMatch SUBSTR caseIgnoreListSubstringsMatch SYN + TAX 1.3.6.1.4.1.1466.115.121.1.41 ) +olcAttributeTypes: {13}( 2.5.4.17 NAME 'postalCode' DESC 'RFC2256: postal code + ' EQUALITY caseIgnoreMatch SUBSTR caseIgnoreSubstringsMatch SYNTAX 1.3.6.1.4. + 1.1466.115.121.1.15{40} ) +olcAttributeTypes: {14}( 2.5.4.18 NAME 'postOfficeBox' DESC 'RFC2256: Post Off + ice Box' EQUALITY caseIgnoreMatch SUBSTR caseIgnoreSubstringsMatch SYNTAX 1.3 + .6.1.4.1.1466.115.121.1.15{40} ) +olcAttributeTypes: {15}( 2.5.4.19 NAME 'physicalDeliveryOfficeName' DESC 'RFC2 + 256: Physical Delivery Office Name' EQUALITY caseIgnoreMatch SUBSTR caseIgnor + eSubstringsMatch SYNTAX 1.3.6.1.4.1.1466.115.121.1.15{128} ) +olcAttributeTypes: {16}( 2.5.4.20 NAME 'telephoneNumber' DESC 'RFC2256: Teleph + one Number' EQUALITY telephoneNumberMatch SUBSTR telephoneNumberSubstringsMat + ch SYNTAX 1.3.6.1.4.1.1466.115.121.1.50{32} ) +olcAttributeTypes: {17}( 2.5.4.21 NAME 'telexNumber' DESC 'RFC2256: Telex Numb + er' SYNTAX 1.3.6.1.4.1.1466.115.121.1.52 ) +olcAttributeTypes: {18}( 2.5.4.22 NAME 'teletexTerminalIdentifier' DESC 'RFC22 + 56: Teletex Terminal Identifier' SYNTAX 1.3.6.1.4.1.1466.115.121.1.51 ) +olcAttributeTypes: {19}( 2.5.4.23 NAME ( 'facsimileTelephoneNumber' 'fax' ) DE + SC 'RFC2256: Facsimile (Fax) Telephone Number' SYNTAX 1.3.6.1.4.1.1466.115.12 + 1.1.22 ) +olcAttributeTypes: {20}( 2.5.4.24 NAME 'x121Address' DESC 'RFC2256: X.121 Addr + ess' EQUALITY numericStringMatch SUBSTR numericStringSubstringsMatch SYNTAX 1 + .3.6.1.4.1.1466.115.121.1.36{15} ) +olcAttributeTypes: {21}( 2.5.4.25 NAME 'internationaliSDNNumber' DESC 'RFC2256 + : international ISDN number' EQUALITY numericStringMatch SUBSTR numericString + SubstringsMatch SYNTAX 1.3.6.1.4.1.1466.115.121.1.36{16} ) +olcAttributeTypes: {22}( 2.5.4.26 NAME 'registeredAddress' DESC 'RFC2256: regi + stered postal address' SUP postalAddress SYNTAX 1.3.6.1.4.1.1466.115.121.1.41 + ) +olcAttributeTypes: {23}( 2.5.4.27 NAME 'destinationIndicator' DESC 'RFC2256: d + estination indicator' EQUALITY caseIgnoreMatch SUBSTR caseIgnoreSubstringsMat + ch SYNTAX 1.3.6.1.4.1.1466.115.121.1.44{128} ) +olcAttributeTypes: {24}( 2.5.4.28 NAME 'preferredDeliveryMethod' DESC 'RFC2256 + : preferred delivery method' SYNTAX 1.3.6.1.4.1.1466.115.121.1.14 SINGLE-VALU + E ) +olcAttributeTypes: {25}( 2.5.4.29 NAME 'presentationAddress' DESC 'RFC2256: pr + esentation address' EQUALITY presentationAddressMatch SYNTAX 1.3.6.1.4.1.1466 + .115.121.1.43 SINGLE-VALUE ) +olcAttributeTypes: {26}( 2.5.4.30 NAME 'supportedApplicationContext' DESC 'RFC + 2256: supported application context' EQUALITY objectIdentifierMatch SYNTAX 1. + 3.6.1.4.1.1466.115.121.1.38 ) +olcAttributeTypes: {27}( 2.5.4.31 NAME 'member' DESC 'RFC2256: member of a gro + up' SUP distinguishedName ) +olcAttributeTypes: {28}( 2.5.4.32 NAME 'owner' DESC 'RFC2256: owner (of the ob + ject)' SUP distinguishedName ) +olcAttributeTypes: {29}( 2.5.4.33 NAME 'roleOccupant' DESC 'RFC2256: occupant + of role' SUP distinguishedName ) +olcAttributeTypes: {30}( 2.5.4.36 NAME 'userCertificate' DESC 'RFC2256: X.509 + user certificate, use ;binary' EQUALITY certificateExactMatch SYNTAX 1.3.6.1. + 4.1.1466.115.121.1.8 ) +olcAttributeTypes: {31}( 2.5.4.37 NAME 'cACertificate' DESC 'RFC2256: X.509 CA + certificate, use ;binary' EQUALITY certificateExactMatch SYNTAX 1.3.6.1.4.1. + 1466.115.121.1.8 ) +olcAttributeTypes: {32}( 2.5.4.38 NAME 'authorityRevocationList' DESC 'RFC2256 + : X.509 authority revocation list, use ;binary' SYNTAX 1.3.6.1.4.1.1466.115.1 + 21.1.9 ) +olcAttributeTypes: {33}( 2.5.4.39 NAME 'certificateRevocationList' DESC 'RFC22 + 56: X.509 certificate revocation list, use ;binary' SYNTAX 1.3.6.1.4.1.1466.1 + 15.121.1.9 ) +olcAttributeTypes: {34}( 2.5.4.40 NAME 'crossCertificatePair' DESC 'RFC2256: X + .509 cross certificate pair, use ;binary' SYNTAX 1.3.6.1.4.1.1466.115.121.1.1 + 0 ) +olcAttributeTypes: {35}( 2.5.4.42 NAME ( 'givenName' 'gn' ) DESC 'RFC2256: fir + st name(s) for which the entity is known by' SUP name ) +olcAttributeTypes: {36}( 2.5.4.43 NAME 'initials' DESC 'RFC2256: initials of s + ome or all of names, but not the surname(s).' SUP name ) +olcAttributeTypes: {37}( 2.5.4.44 NAME 'generationQualifier' DESC 'RFC2256: na + me qualifier indicating a generation' SUP name ) +olcAttributeTypes: {38}( 2.5.4.45 NAME 'x500UniqueIdentifier' DESC 'RFC2256: X + .500 unique identifier' EQUALITY bitStringMatch SYNTAX 1.3.6.1.4.1.1466.115.1 + 21.1.6 ) +olcAttributeTypes: {39}( 2.5.4.46 NAME 'dnQualifier' DESC 'RFC2256: DN qualifi + er' EQUALITY caseIgnoreMatch ORDERING caseIgnoreOrderingMatch SUBSTR caseIgno + reSubstringsMatch SYNTAX 1.3.6.1.4.1.1466.115.121.1.44 ) +olcAttributeTypes: {40}( 2.5.4.47 NAME 'enhancedSearchGuide' DESC 'RFC2256: en + hanced search guide' SYNTAX 1.3.6.1.4.1.1466.115.121.1.21 ) +olcAttributeTypes: {41}( 2.5.4.48 NAME 'protocolInformation' DESC 'RFC2256: pr + otocol information' EQUALITY protocolInformationMatch SYNTAX 1.3.6.1.4.1.1466 + .115.121.1.42 ) +olcAttributeTypes: {42}( 2.5.4.50 NAME 'uniqueMember' DESC 'RFC2256: unique me + mber of a group' EQUALITY uniqueMemberMatch SYNTAX 1.3.6.1.4.1.1466.115.121.1 + .34 ) +olcAttributeTypes: {43}( 2.5.4.51 NAME 'houseIdentifier' DESC 'RFC2256: house + identifier' EQUALITY caseIgnoreMatch SUBSTR caseIgnoreSubstringsMatch SYNTAX + 1.3.6.1.4.1.1466.115.121.1.15{32768} ) +olcAttributeTypes: {44}( 2.5.4.52 NAME 'supportedAlgorithms' DESC 'RFC2256: su + pported algorithms' SYNTAX 1.3.6.1.4.1.1466.115.121.1.49 ) +olcAttributeTypes: {45}( 2.5.4.53 NAME 'deltaRevocationList' DESC 'RFC2256: de + lta revocation list; use ;binary' SYNTAX 1.3.6.1.4.1.1466.115.121.1.9 ) +olcAttributeTypes: {46}( 2.5.4.54 NAME 'dmdName' DESC 'RFC2256: name of DMD' S + UP name ) +olcAttributeTypes: {47}( 2.5.4.65 NAME 'pseudonym' DESC 'X.520(4th): pseudonym + for the object' SUP name ) +olcAttributeTypes: {48}( 0.9.2342.19200300.100.1.3 NAME ( 'mail' 'rfc822Mailbo + x' ) DESC 'RFC1274: RFC822 Mailbox' EQUALITY caseIgnoreIA5Match SUBSTR caseIg + noreIA5SubstringsMatch SYNTAX 1.3.6.1.4.1.1466.115.121.1.26{256} ) +olcAttributeTypes: {49}( 0.9.2342.19200300.100.1.25 NAME ( 'dc' 'domainCompone + nt' ) DESC 'RFC1274/2247: domain component' EQUALITY caseIgnoreIA5Match SUBST + R caseIgnoreIA5SubstringsMatch SYNTAX 1.3.6.1.4.1.1466.115.121.1.26 SINGLE-VA + LUE ) +olcAttributeTypes: {50}( 0.9.2342.19200300.100.1.37 NAME 'associatedDomain' DE + SC 'RFC1274: domain associated with object' EQUALITY caseIgnoreIA5Match SUBST + R caseIgnoreIA5SubstringsMatch SYNTAX 1.3.6.1.4.1.1466.115.121.1.26 ) +olcAttributeTypes: {51}( 1.2.840.113549.1.9.1 NAME ( 'email' 'emailAddress' 'p + kcs9email' ) DESC 'RFC3280: legacy attribute for email addresses in DNs' EQUA + LITY caseIgnoreIA5Match SUBSTR caseIgnoreIA5SubstringsMatch SYNTAX 1.3.6.1.4. + 1.1466.115.121.1.26{128} ) +olcObjectClasses: {0}( 2.5.6.2 NAME 'country' DESC 'RFC2256: a country' SUP to + p STRUCTURAL MUST c MAY ( searchGuide $ description ) ) +olcObjectClasses: {1}( 2.5.6.3 NAME 'locality' DESC 'RFC2256: a locality' SUP + top STRUCTURAL MAY ( street $ seeAlso $ searchGuide $ st $ l $ description ) + ) +olcObjectClasses: {2}( 2.5.6.4 NAME 'organization' DESC 'RFC2256: an organizat + ion' SUP top STRUCTURAL MUST o MAY ( userPassword $ searchGuide $ seeAlso $ b + usinessCategory $ x121Address $ registeredAddress $ destinationIndicator $ pr + eferredDeliveryMethod $ telexNumber $ teletexTerminalIdentifier $ telephoneNu + mber $ internationaliSDNNumber $ facsimileTelephoneNumber $ street $ postOffi + ceBox $ postalCode $ postalAddress $ physicalDeliveryOfficeName $ st $ l $ de + scription ) ) +olcObjectClasses: {3}( 2.5.6.5 NAME 'organizationalUnit' DESC 'RFC2256: an org + anizational unit' SUP top STRUCTURAL MUST ou MAY ( userPassword $ searchGuide + $ seeAlso $ businessCategory $ x121Address $ registeredAddress $ destination + Indicator $ preferredDeliveryMethod $ telexNumber $ teletexTerminalIdentifier + $ telephoneNumber $ internationaliSDNNumber $ facsimileTelephoneNumber $ str + eet $ postOfficeBox $ postalCode $ postalAddress $ physicalDeliveryOfficeName + $ st $ l $ description ) ) +olcObjectClasses: {4}( 2.5.6.6 NAME 'person' DESC 'RFC2256: a person' SUP top + STRUCTURAL MUST ( sn $ cn ) MAY ( userPassword $ telephoneNumber $ seeAlso $ + description ) ) +olcObjectClasses: {5}( 2.5.6.7 NAME 'organizationalPerson' DESC 'RFC2256: an o + rganizational person' SUP person STRUCTURAL MAY ( title $ x121Address $ regis + teredAddress $ destinationIndicator $ preferredDeliveryMethod $ telexNumber $ + teletexTerminalIdentifier $ telephoneNumber $ internationaliSDNNumber $ facs + imileTelephoneNumber $ street $ postOfficeBox $ postalCode $ postalAddress $ + physicalDeliveryOfficeName $ ou $ st $ l ) ) +olcObjectClasses: {6}( 2.5.6.8 NAME 'organizationalRole' DESC 'RFC2256: an org + anizational role' SUP top STRUCTURAL MUST cn MAY ( x121Address $ registeredAd + dress $ destinationIndicator $ preferredDeliveryMethod $ telexNumber $ telete + xTerminalIdentifier $ telephoneNumber $ internationaliSDNNumber $ facsimileTe + lephoneNumber $ seeAlso $ roleOccupant $ preferredDeliveryMethod $ street $ p + ostOfficeBox $ postalCode $ postalAddress $ physicalDeliveryOfficeName $ ou $ + st $ l $ description ) ) +olcObjectClasses: {7}( 2.5.6.9 NAME 'groupOfNames' DESC 'RFC2256: a group of n + ames (DNs)' SUP top STRUCTURAL MUST ( member $ cn ) MAY ( businessCategory $ + seeAlso $ owner $ ou $ o $ description ) ) +olcObjectClasses: {8}( 2.5.6.10 NAME 'residentialPerson' DESC 'RFC2256: an res + idential person' SUP person STRUCTURAL MUST l MAY ( businessCategory $ x121Ad + dress $ registeredAddress $ destinationIndicator $ preferredDeliveryMethod $ + telexNumber $ teletexTerminalIdentifier $ telephoneNumber $ internationaliSDN + Number $ facsimileTelephoneNumber $ preferredDeliveryMethod $ street $ postOf + ficeBox $ postalCode $ postalAddress $ physicalDeliveryOfficeName $ st $ l ) + ) +olcObjectClasses: {9}( 2.5.6.11 NAME 'applicationProcess' DESC 'RFC2256: an ap + plication process' SUP top STRUCTURAL MUST cn MAY ( seeAlso $ ou $ l $ descri + ption ) ) +olcObjectClasses: {10}( 2.5.6.12 NAME 'applicationEntity' DESC 'RFC2256: an ap + plication entity' SUP top STRUCTURAL MUST ( presentationAddress $ cn ) MAY ( + supportedApplicationContext $ seeAlso $ ou $ o $ l $ description ) ) +olcObjectClasses: {11}( 2.5.6.13 NAME 'dSA' DESC 'RFC2256: a directory system + agent (a server)' SUP applicationEntity STRUCTURAL MAY knowledgeInformation ) +olcObjectClasses: {12}( 2.5.6.14 NAME 'device' DESC 'RFC2256: a device' SUP to + p STRUCTURAL MUST cn MAY ( serialNumber $ seeAlso $ owner $ ou $ o $ l $ desc + ription ) ) +olcObjectClasses: {13}( 2.5.6.15 NAME 'strongAuthenticationUser' DESC 'RFC2256 + : a strong authentication user' SUP top AUXILIARY MUST userCertificate ) +olcObjectClasses: {14}( 2.5.6.16 NAME 'certificationAuthority' DESC 'RFC2256: + a certificate authority' SUP top AUXILIARY MUST ( authorityRevocationList $ c + ertificateRevocationList $ cACertificate ) MAY crossCertificatePair ) +olcObjectClasses: {15}( 2.5.6.17 NAME 'groupOfUniqueNames' DESC 'RFC2256: a gr + oup of unique names (DN and Unique Identifier)' SUP top STRUCTURAL MUST ( uni + queMember $ cn ) MAY ( businessCategory $ seeAlso $ owner $ ou $ o $ descript + ion ) ) +olcObjectClasses: {16}( 2.5.6.18 NAME 'userSecurityInformation' DESC 'RFC2256: + a user security information' SUP top AUXILIARY MAY supportedAlgorithms ) +olcObjectClasses: {17}( 2.5.6.16.2 NAME 'certificationAuthority-V2' SUP certif + icationAuthority AUXILIARY MAY deltaRevocationList ) +olcObjectClasses: {18}( 2.5.6.19 NAME 'cRLDistributionPoint' SUP top STRUCTURA + L MUST cn MAY ( certificateRevocationList $ authorityRevocationList $ deltaRe + vocationList ) ) +olcObjectClasses: {19}( 2.5.6.20 NAME 'dmd' SUP top STRUCTURAL MUST dmdName MA + Y ( userPassword $ searchGuide $ seeAlso $ businessCategory $ x121Address $ r + egisteredAddress $ destinationIndicator $ preferredDeliveryMethod $ telexNumb + er $ teletexTerminalIdentifier $ telephoneNumber $ internationaliSDNNumber $ + facsimileTelephoneNumber $ street $ postOfficeBox $ postalCode $ postalAddres + s $ physicalDeliveryOfficeName $ st $ l $ description ) ) +olcObjectClasses: {20}( 2.5.6.21 NAME 'pkiUser' DESC 'RFC2587: a PKI user' SUP + top AUXILIARY MAY userCertificate ) +olcObjectClasses: {21}( 2.5.6.22 NAME 'pkiCA' DESC 'RFC2587: PKI certificate a + uthority' SUP top AUXILIARY MAY ( authorityRevocationList $ certificateRevoca + tionList $ cACertificate $ crossCertificatePair ) ) +olcObjectClasses: {22}( 2.5.6.23 NAME 'deltaCRL' DESC 'RFC4523: X.509 delta CR + L' SUP top AUXILIARY MAY deltaRevocationList ) +olcObjectClasses: {23}( 1.3.6.1.4.1.250.3.15 NAME 'labeledURIObject' DESC 'RFC + 2079: object that contains the URI attribute type' SUP top AUXILIARY MAY labe + ledURI ) +olcObjectClasses: {24}( 0.9.2342.19200300.100.4.19 NAME 'simpleSecurityObject' + DESC 'RFC1274: simple security object' SUP top AUXILIARY MUST userPassword ) +olcObjectClasses: {25}( 1.3.6.1.4.1.1466.344 NAME 'dcObject' DESC 'RFC2247: do + main component object' SUP top AUXILIARY MUST dc ) +olcObjectClasses: {26}( 1.3.6.1.1.3.1 NAME 'uidObject' DESC 'RFC2377: uid obje + ct' SUP top AUXILIARY MUST uid ) +structuralObjectClass: olcSchemaConfig +entryUUID: bae046e6-4e5c-103c-8e3e-83bde8bbc3e8 +creatorsName: cn=config +createTimestamp: 20220412033105Z +entryCSN: 20220412033105.684579Z#000000#000#000000 +modifiersName: cn=config +modifyTimestamp: 20220412033105Z diff --git a/dirsrvtests/tests/data/openldap_2_389/memberof/slapd.d/cn=config/cn=schema/cn={1}cosine.ldif b/dirsrvtests/tests/data/openldap_2_389/memberof/slapd.d/cn=config/cn=schema/cn={1}cosine.ldif new file mode 100755 index 0000000..9bff5cf --- /dev/null +++ b/dirsrvtests/tests/data/openldap_2_389/memberof/slapd.d/cn=config/cn=schema/cn={1}cosine.ldif @@ -0,0 +1,177 @@ +# AUTO-GENERATED FILE - DO NOT EDIT!! Use ldapmodify. +# CRC32 d2273202 +dn: cn={1}cosine +objectClass: olcSchemaConfig +cn: {1}cosine +olcAttributeTypes: {0}( 0.9.2342.19200300.100.1.2 NAME 'textEncodedORAddress' + EQUALITY caseIgnoreMatch SUBSTR caseIgnoreSubstringsMatch SYNTAX 1.3.6.1.4.1. + 1466.115.121.1.15{256} ) +olcAttributeTypes: {1}( 0.9.2342.19200300.100.1.4 NAME 'info' DESC 'RFC1274: g + eneral information' EQUALITY caseIgnoreMatch SUBSTR caseIgnoreSubstringsMatch + SYNTAX 1.3.6.1.4.1.1466.115.121.1.15{2048} ) +olcAttributeTypes: {2}( 0.9.2342.19200300.100.1.5 NAME ( 'drink' 'favouriteDri + nk' ) DESC 'RFC1274: favorite drink' EQUALITY caseIgnoreMatch SUBSTR caseIgno + reSubstringsMatch SYNTAX 1.3.6.1.4.1.1466.115.121.1.15{256} ) +olcAttributeTypes: {3}( 0.9.2342.19200300.100.1.6 NAME 'roomNumber' DESC 'RFC1 + 274: room number' EQUALITY caseIgnoreMatch SUBSTR caseIgnoreSubstringsMatch S + YNTAX 1.3.6.1.4.1.1466.115.121.1.15{256} ) +olcAttributeTypes: {4}( 0.9.2342.19200300.100.1.7 NAME 'photo' DESC 'RFC1274: + photo (G3 fax)' SYNTAX 1.3.6.1.4.1.1466.115.121.1.23{25000} ) +olcAttributeTypes: {5}( 0.9.2342.19200300.100.1.8 NAME 'userClass' DESC 'RFC12 + 74: category of user' EQUALITY caseIgnoreMatch SUBSTR caseIgnoreSubstringsMat + ch SYNTAX 1.3.6.1.4.1.1466.115.121.1.15{256} ) +olcAttributeTypes: {6}( 0.9.2342.19200300.100.1.9 NAME 'host' DESC 'RFC1274: h + ost computer' EQUALITY caseIgnoreMatch SUBSTR caseIgnoreSubstringsMatch SYNTA + X 1.3.6.1.4.1.1466.115.121.1.15{256} ) +olcAttributeTypes: {7}( 0.9.2342.19200300.100.1.10 NAME 'manager' DESC 'RFC127 + 4: DN of manager' EQUALITY distinguishedNameMatch SYNTAX 1.3.6.1.4.1.1466.115 + .121.1.12 ) +olcAttributeTypes: {8}( 0.9.2342.19200300.100.1.11 NAME 'documentIdentifier' D + ESC 'RFC1274: unique identifier of document' EQUALITY caseIgnoreMatch SUBSTR + caseIgnoreSubstringsMatch SYNTAX 1.3.6.1.4.1.1466.115.121.1.15{256} ) +olcAttributeTypes: {9}( 0.9.2342.19200300.100.1.12 NAME 'documentTitle' DESC ' + RFC1274: title of document' EQUALITY caseIgnoreMatch SUBSTR caseIgnoreSubstri + ngsMatch SYNTAX 1.3.6.1.4.1.1466.115.121.1.15{256} ) +olcAttributeTypes: {10}( 0.9.2342.19200300.100.1.13 NAME 'documentVersion' DES + C 'RFC1274: version of document' EQUALITY caseIgnoreMatch SUBSTR caseIgnoreSu + bstringsMatch SYNTAX 1.3.6.1.4.1.1466.115.121.1.15{256} ) +olcAttributeTypes: {11}( 0.9.2342.19200300.100.1.14 NAME 'documentAuthor' DESC + 'RFC1274: DN of author of document' EQUALITY distinguishedNameMatch SYNTAX 1 + .3.6.1.4.1.1466.115.121.1.12 ) +olcAttributeTypes: {12}( 0.9.2342.19200300.100.1.15 NAME 'documentLocation' DE + SC 'RFC1274: location of document original' EQUALITY caseIgnoreMatch SUBSTR c + aseIgnoreSubstringsMatch SYNTAX 1.3.6.1.4.1.1466.115.121.1.15{256} ) +olcAttributeTypes: {13}( 0.9.2342.19200300.100.1.20 NAME ( 'homePhone' 'homeTe + lephoneNumber' ) DESC 'RFC1274: home telephone number' EQUALITY telephoneNumb + erMatch SUBSTR telephoneNumberSubstringsMatch SYNTAX 1.3.6.1.4.1.1466.115.121 + .1.50 ) +olcAttributeTypes: {14}( 0.9.2342.19200300.100.1.21 NAME 'secretary' DESC 'RFC + 1274: DN of secretary' EQUALITY distinguishedNameMatch SYNTAX 1.3.6.1.4.1.146 + 6.115.121.1.12 ) +olcAttributeTypes: {15}( 0.9.2342.19200300.100.1.22 NAME 'otherMailbox' SYNTAX + 1.3.6.1.4.1.1466.115.121.1.39 ) +olcAttributeTypes: {16}( 0.9.2342.19200300.100.1.26 NAME 'aRecord' EQUALITY ca + seIgnoreIA5Match SYNTAX 1.3.6.1.4.1.1466.115.121.1.26 ) +olcAttributeTypes: {17}( 0.9.2342.19200300.100.1.27 NAME 'mDRecord' EQUALITY c + aseIgnoreIA5Match SYNTAX 1.3.6.1.4.1.1466.115.121.1.26 ) +olcAttributeTypes: {18}( 0.9.2342.19200300.100.1.28 NAME 'mXRecord' EQUALITY c + aseIgnoreIA5Match SYNTAX 1.3.6.1.4.1.1466.115.121.1.26 ) +olcAttributeTypes: {19}( 0.9.2342.19200300.100.1.29 NAME 'nSRecord' EQUALITY c + aseIgnoreIA5Match SYNTAX 1.3.6.1.4.1.1466.115.121.1.26 ) +olcAttributeTypes: {20}( 0.9.2342.19200300.100.1.30 NAME 'sOARecord' EQUALITY + caseIgnoreIA5Match SYNTAX 1.3.6.1.4.1.1466.115.121.1.26 ) +olcAttributeTypes: {21}( 0.9.2342.19200300.100.1.31 NAME 'cNAMERecord' EQUALIT + Y caseIgnoreIA5Match SYNTAX 1.3.6.1.4.1.1466.115.121.1.26 ) +olcAttributeTypes: {22}( 0.9.2342.19200300.100.1.38 NAME 'associatedName' DESC + 'RFC1274: DN of entry associated with domain' EQUALITY distinguishedNameMatc + h SYNTAX 1.3.6.1.4.1.1466.115.121.1.12 ) +olcAttributeTypes: {23}( 0.9.2342.19200300.100.1.39 NAME 'homePostalAddress' D + ESC 'RFC1274: home postal address' EQUALITY caseIgnoreListMatch SUBSTR caseIg + noreListSubstringsMatch SYNTAX 1.3.6.1.4.1.1466.115.121.1.41 ) +olcAttributeTypes: {24}( 0.9.2342.19200300.100.1.40 NAME 'personalTitle' DESC + 'RFC1274: personal title' EQUALITY caseIgnoreMatch SUBSTR caseIgnoreSubstring + sMatch SYNTAX 1.3.6.1.4.1.1466.115.121.1.15{256} ) +olcAttributeTypes: {25}( 0.9.2342.19200300.100.1.41 NAME ( 'mobile' 'mobileTel + ephoneNumber' ) DESC 'RFC1274: mobile telephone number' EQUALITY telephoneNum + berMatch SUBSTR telephoneNumberSubstringsMatch SYNTAX 1.3.6.1.4.1.1466.115.12 + 1.1.50 ) +olcAttributeTypes: {26}( 0.9.2342.19200300.100.1.42 NAME ( 'pager' 'pagerTelep + honeNumber' ) DESC 'RFC1274: pager telephone number' EQUALITY telephoneNumber + Match SUBSTR telephoneNumberSubstringsMatch SYNTAX 1.3.6.1.4.1.1466.115.121.1 + .50 ) +olcAttributeTypes: {27}( 0.9.2342.19200300.100.1.43 NAME ( 'co' 'friendlyCount + ryName' ) DESC 'RFC1274: friendly country name' EQUALITY caseIgnoreMatch SUBS + TR caseIgnoreSubstringsMatch SYNTAX 1.3.6.1.4.1.1466.115.121.1.15 ) +olcAttributeTypes: {28}( 0.9.2342.19200300.100.1.44 NAME 'uniqueIdentifier' DE + SC 'RFC1274: unique identifer' EQUALITY caseIgnoreMatch SYNTAX 1.3.6.1.4.1.14 + 66.115.121.1.15{256} ) +olcAttributeTypes: {29}( 0.9.2342.19200300.100.1.45 NAME 'organizationalStatus + ' DESC 'RFC1274: organizational status' EQUALITY caseIgnoreMatch SUBSTR caseI + gnoreSubstringsMatch SYNTAX 1.3.6.1.4.1.1466.115.121.1.15{256} ) +olcAttributeTypes: {30}( 0.9.2342.19200300.100.1.46 NAME 'janetMailbox' DESC ' + RFC1274: Janet mailbox' EQUALITY caseIgnoreIA5Match SUBSTR caseIgnoreIA5Subst + ringsMatch SYNTAX 1.3.6.1.4.1.1466.115.121.1.26{256} ) +olcAttributeTypes: {31}( 0.9.2342.19200300.100.1.47 NAME 'mailPreferenceOption + ' DESC 'RFC1274: mail preference option' SYNTAX 1.3.6.1.4.1.1466.115.121.1.27 + ) +olcAttributeTypes: {32}( 0.9.2342.19200300.100.1.48 NAME 'buildingName' DESC ' + RFC1274: name of building' EQUALITY caseIgnoreMatch SUBSTR caseIgnoreSubstrin + gsMatch SYNTAX 1.3.6.1.4.1.1466.115.121.1.15{256} ) +olcAttributeTypes: {33}( 0.9.2342.19200300.100.1.49 NAME 'dSAQuality' DESC 'RF + C1274: DSA Quality' SYNTAX 1.3.6.1.4.1.1466.115.121.1.19 SINGLE-VALUE ) +olcAttributeTypes: {34}( 0.9.2342.19200300.100.1.50 NAME 'singleLevelQuality' + DESC 'RFC1274: Single Level Quality' SYNTAX 1.3.6.1.4.1.1466.115.121.1.13 SIN + GLE-VALUE ) +olcAttributeTypes: {35}( 0.9.2342.19200300.100.1.51 NAME 'subtreeMinimumQualit + y' DESC 'RFC1274: Subtree Minimum Quality' SYNTAX 1.3.6.1.4.1.1466.115.121.1. + 13 SINGLE-VALUE ) +olcAttributeTypes: {36}( 0.9.2342.19200300.100.1.52 NAME 'subtreeMaximumQualit + y' DESC 'RFC1274: Subtree Maximum Quality' SYNTAX 1.3.6.1.4.1.1466.115.121.1. + 13 SINGLE-VALUE ) +olcAttributeTypes: {37}( 0.9.2342.19200300.100.1.53 NAME 'personalSignature' D + ESC 'RFC1274: Personal Signature (G3 fax)' SYNTAX 1.3.6.1.4.1.1466.115.121.1. + 23 ) +olcAttributeTypes: {38}( 0.9.2342.19200300.100.1.54 NAME 'dITRedirect' DESC 'R + FC1274: DIT Redirect' EQUALITY distinguishedNameMatch SYNTAX 1.3.6.1.4.1.1466 + .115.121.1.12 ) +olcAttributeTypes: {39}( 0.9.2342.19200300.100.1.55 NAME 'audio' DESC 'RFC1274 + : audio (u-law)' SYNTAX 1.3.6.1.4.1.1466.115.121.1.4{25000} ) +olcAttributeTypes: {40}( 0.9.2342.19200300.100.1.56 NAME 'documentPublisher' D + ESC 'RFC1274: publisher of document' EQUALITY caseIgnoreMatch SUBSTR caseIgno + reSubstringsMatch SYNTAX 1.3.6.1.4.1.1466.115.121.1.15 ) +olcObjectClasses: {0}( 0.9.2342.19200300.100.4.4 NAME ( 'pilotPerson' 'newPilo + tPerson' ) SUP person STRUCTURAL MAY ( userid $ textEncodedORAddress $ rfc822 + Mailbox $ favouriteDrink $ roomNumber $ userClass $ homeTelephoneNumber $ hom + ePostalAddress $ secretary $ personalTitle $ preferredDeliveryMethod $ busine + ssCategory $ janetMailbox $ otherMailbox $ mobileTelephoneNumber $ pagerTelep + honeNumber $ organizationalStatus $ mailPreferenceOption $ personalSignature + ) ) +olcObjectClasses: {1}( 0.9.2342.19200300.100.4.5 NAME 'account' SUP top STRUCT + URAL MUST userid MAY ( description $ seeAlso $ localityName $ organizationNam + e $ organizationalUnitName $ host ) ) +olcObjectClasses: {2}( 0.9.2342.19200300.100.4.6 NAME 'document' SUP top STRUC + TURAL MUST documentIdentifier MAY ( commonName $ description $ seeAlso $ loca + lityName $ organizationName $ organizationalUnitName $ documentTitle $ docume + ntVersion $ documentAuthor $ documentLocation $ documentPublisher ) ) +olcObjectClasses: {3}( 0.9.2342.19200300.100.4.7 NAME 'room' SUP top STRUCTURA + L MUST commonName MAY ( roomNumber $ description $ seeAlso $ telephoneNumber + ) ) +olcObjectClasses: {4}( 0.9.2342.19200300.100.4.9 NAME 'documentSeries' SUP top + STRUCTURAL MUST commonName MAY ( description $ seeAlso $ telephonenumber $ l + ocalityName $ organizationName $ organizationalUnitName ) ) +olcObjectClasses: {5}( 0.9.2342.19200300.100.4.13 NAME 'domain' SUP top STRUCT + URAL MUST domainComponent MAY ( associatedName $ organizationName $ descripti + on $ businessCategory $ seeAlso $ searchGuide $ userPassword $ localityName $ + stateOrProvinceName $ streetAddress $ physicalDeliveryOfficeName $ postalAdd + ress $ postalCode $ postOfficeBox $ streetAddress $ facsimileTelephoneNumber + $ internationalISDNNumber $ telephoneNumber $ teletexTerminalIdentifier $ tel + exNumber $ preferredDeliveryMethod $ destinationIndicator $ registeredAddress + $ x121Address ) ) +olcObjectClasses: {6}( 0.9.2342.19200300.100.4.14 NAME 'RFC822localPart' SUP d + omain STRUCTURAL MAY ( commonName $ surname $ description $ seeAlso $ telepho + neNumber $ physicalDeliveryOfficeName $ postalAddress $ postalCode $ postOffi + ceBox $ streetAddress $ facsimileTelephoneNumber $ internationalISDNNumber $ + telephoneNumber $ teletexTerminalIdentifier $ telexNumber $ preferredDelivery + Method $ destinationIndicator $ registeredAddress $ x121Address ) ) +olcObjectClasses: {7}( 0.9.2342.19200300.100.4.15 NAME 'dNSDomain' SUP domain + STRUCTURAL MAY ( ARecord $ MDRecord $ MXRecord $ NSRecord $ SOARecord $ CNAME + Record ) ) +olcObjectClasses: {8}( 0.9.2342.19200300.100.4.17 NAME 'domainRelatedObject' D + ESC 'RFC1274: an object related to an domain' SUP top AUXILIARY MUST associat + edDomain ) +olcObjectClasses: {9}( 0.9.2342.19200300.100.4.18 NAME 'friendlyCountry' SUP c + ountry STRUCTURAL MUST friendlyCountryName ) +olcObjectClasses: {10}( 0.9.2342.19200300.100.4.20 NAME 'pilotOrganization' SU + P ( organization $ organizationalUnit ) STRUCTURAL MAY buildingName ) +olcObjectClasses: {11}( 0.9.2342.19200300.100.4.21 NAME 'pilotDSA' SUP dsa STR + UCTURAL MAY dSAQuality ) +olcObjectClasses: {12}( 0.9.2342.19200300.100.4.22 NAME 'qualityLabelledData' + SUP top AUXILIARY MUST dsaQuality MAY ( subtreeMinimumQuality $ subtreeMaximu + mQuality ) ) +structuralObjectClass: olcSchemaConfig +entryUUID: bae04f1a-4e5c-103c-8e3f-83bde8bbc3e8 +creatorsName: cn=config +createTimestamp: 20220412033105Z +entryCSN: 20220412033105.684579Z#000000#000#000000 +modifiersName: cn=config +modifyTimestamp: 20220412033105Z diff --git a/dirsrvtests/tests/data/openldap_2_389/memberof/slapd.d/cn=config/cn=schema/cn={2}inetorgperson.ldif b/dirsrvtests/tests/data/openldap_2_389/memberof/slapd.d/cn=config/cn=schema/cn={2}inetorgperson.ldif new file mode 100755 index 0000000..8c4664a --- /dev/null +++ b/dirsrvtests/tests/data/openldap_2_389/memberof/slapd.d/cn=config/cn=schema/cn={2}inetorgperson.ldif @@ -0,0 +1,48 @@ +# AUTO-GENERATED FILE - DO NOT EDIT!! Use ldapmodify. +# CRC32 34e31df3 +dn: cn={2}inetorgperson +objectClass: olcSchemaConfig +cn: {2}inetorgperson +olcAttributeTypes: {0}( 2.16.840.1.113730.3.1.1 NAME 'carLicense' DESC 'RFC279 + 8: vehicle license or registration plate' EQUALITY caseIgnoreMatch SUBSTR cas + eIgnoreSubstringsMatch SYNTAX 1.3.6.1.4.1.1466.115.121.1.15 ) +olcAttributeTypes: {1}( 2.16.840.1.113730.3.1.2 NAME 'departmentNumber' DESC ' + RFC2798: identifies a department within an organization' EQUALITY caseIgnoreM + atch SUBSTR caseIgnoreSubstringsMatch SYNTAX 1.3.6.1.4.1.1466.115.121.1.15 ) +olcAttributeTypes: {2}( 2.16.840.1.113730.3.1.241 NAME 'displayName' DESC 'RFC + 2798: preferred name to be used when displaying entries' EQUALITY caseIgnoreM + atch SUBSTR caseIgnoreSubstringsMatch SYNTAX 1.3.6.1.4.1.1466.115.121.1.15 SI + NGLE-VALUE ) +olcAttributeTypes: {3}( 2.16.840.1.113730.3.1.3 NAME 'employeeNumber' DESC 'RF + C2798: numerically identifies an employee within an organization' EQUALITY ca + seIgnoreMatch SUBSTR caseIgnoreSubstringsMatch SYNTAX 1.3.6.1.4.1.1466.115.12 + 1.1.15 SINGLE-VALUE ) +olcAttributeTypes: {4}( 2.16.840.1.113730.3.1.4 NAME 'employeeType' DESC 'RFC2 + 798: type of employment for a person' EQUALITY caseIgnoreMatch SUBSTR caseIgn + oreSubstringsMatch SYNTAX 1.3.6.1.4.1.1466.115.121.1.15 ) +olcAttributeTypes: {5}( 0.9.2342.19200300.100.1.60 NAME 'jpegPhoto' DESC 'RFC2 + 798: a JPEG image' SYNTAX 1.3.6.1.4.1.1466.115.121.1.28 ) +olcAttributeTypes: {6}( 2.16.840.1.113730.3.1.39 NAME 'preferredLanguage' DESC + 'RFC2798: preferred written or spoken language for a person' EQUALITY caseIg + noreMatch SUBSTR caseIgnoreSubstringsMatch SYNTAX 1.3.6.1.4.1.1466.115.121.1. + 15 SINGLE-VALUE ) +olcAttributeTypes: {7}( 2.16.840.1.113730.3.1.40 NAME 'userSMIMECertificate' D + ESC 'RFC2798: PKCS#7 SignedData used to support S/MIME' SYNTAX 1.3.6.1.4.1.14 + 66.115.121.1.5 ) +olcAttributeTypes: {8}( 2.16.840.1.113730.3.1.216 NAME 'userPKCS12' DESC 'RFC2 + 798: personal identity information, a PKCS #12 PFX' SYNTAX 1.3.6.1.4.1.1466.1 + 15.121.1.5 ) +olcObjectClasses: {0}( 2.16.840.1.113730.3.2.2 NAME 'inetOrgPerson' DESC 'RFC2 + 798: Internet Organizational Person' SUP organizationalPerson STRUCTURAL MAY + ( audio $ businessCategory $ carLicense $ departmentNumber $ displayName $ em + ployeeNumber $ employeeType $ givenName $ homePhone $ homePostalAddress $ ini + tials $ jpegPhoto $ labeledURI $ mail $ manager $ mobile $ o $ pager $ photo + $ roomNumber $ secretary $ uid $ userCertificate $ x500uniqueIdentifier $ pre + ferredLanguage $ userSMIMECertificate $ userPKCS12 ) ) +structuralObjectClass: olcSchemaConfig +entryUUID: bae0537a-4e5c-103c-8e40-83bde8bbc3e8 +creatorsName: cn=config +createTimestamp: 20220412033105Z +entryCSN: 20220412033105.684579Z#000000#000#000000 +modifiersName: cn=config +modifyTimestamp: 20220412033105Z diff --git a/dirsrvtests/tests/data/openldap_2_389/memberof/slapd.d/cn=config/cn=schema/cn={3}rfc2307bis.ldif b/dirsrvtests/tests/data/openldap_2_389/memberof/slapd.d/cn=config/cn=schema/cn={3}rfc2307bis.ldif new file mode 100755 index 0000000..e839145 --- /dev/null +++ b/dirsrvtests/tests/data/openldap_2_389/memberof/slapd.d/cn=config/cn=schema/cn={3}rfc2307bis.ldif @@ -0,0 +1,153 @@ +# AUTO-GENERATED FILE - DO NOT EDIT!! Use ldapmodify. +# CRC32 746f71bc +dn: cn={3}rfc2307bis +objectClass: olcSchemaConfig +cn: {3}rfc2307bis +olcAttributeTypes: {0}( 1.3.6.1.1.1.1.2 NAME 'gecos' DESC 'The GECOS field; th + e common name' EQUALITY caseIgnoreIA5Match SUBSTR caseIgnoreIA5SubstringsMatc + h SYNTAX 1.3.6.1.4.1.1466.115.121.1.26 SINGLE-VALUE ) +olcAttributeTypes: {1}( 1.3.6.1.1.1.1.3 NAME 'homeDirectory' DESC 'The absolut + e path to the home directory' EQUALITY caseExactIA5Match SYNTAX 1.3.6.1.4.1.1 + 466.115.121.1.26 SINGLE-VALUE ) +olcAttributeTypes: {2}( 1.3.6.1.1.1.1.4 NAME 'loginShell' DESC 'The path to th + e login shell' EQUALITY caseExactIA5Match SYNTAX 1.3.6.1.4.1.1466.115.121.1.2 + 6 SINGLE-VALUE ) +olcAttributeTypes: {3}( 1.3.6.1.1.1.1.5 NAME 'shadowLastChange' EQUALITY integ + erMatch SYNTAX 1.3.6.1.4.1.1466.115.121.1.27 SINGLE-VALUE ) +olcAttributeTypes: {4}( 1.3.6.1.1.1.1.6 NAME 'shadowMin' EQUALITY integerMatch + SYNTAX 1.3.6.1.4.1.1466.115.121.1.27 SINGLE-VALUE ) +olcAttributeTypes: {5}( 1.3.6.1.1.1.1.7 NAME 'shadowMax' EQUALITY integerMatch + SYNTAX 1.3.6.1.4.1.1466.115.121.1.27 SINGLE-VALUE ) +olcAttributeTypes: {6}( 1.3.6.1.1.1.1.8 NAME 'shadowWarning' EQUALITY integerM + atch SYNTAX 1.3.6.1.4.1.1466.115.121.1.27 SINGLE-VALUE ) +olcAttributeTypes: {7}( 1.3.6.1.1.1.1.9 NAME 'shadowInactive' EQUALITY integer + Match SYNTAX 1.3.6.1.4.1.1466.115.121.1.27 SINGLE-VALUE ) +olcAttributeTypes: {8}( 1.3.6.1.1.1.1.10 NAME 'shadowExpire' EQUALITY integerM + atch SYNTAX 1.3.6.1.4.1.1466.115.121.1.27 SINGLE-VALUE ) +olcAttributeTypes: {9}( 1.3.6.1.1.1.1.11 NAME 'shadowFlag' EQUALITY integerMat + ch SYNTAX 1.3.6.1.4.1.1466.115.121.1.27 SINGLE-VALUE ) +olcAttributeTypes: {10}( 1.3.6.1.1.1.1.12 NAME 'memberUid' EQUALITY caseExactI + A5Match SYNTAX 1.3.6.1.4.1.1466.115.121.1.26 ) +olcAttributeTypes: {11}( 1.3.6.1.1.1.1.13 NAME 'memberNisNetgroup' EQUALITY ca + seExactIA5Match SUBSTR caseExactIA5SubstringsMatch SYNTAX 1.3.6.1.4.1.1466.11 + 5.121.1.26 ) +olcAttributeTypes: {12}( 1.3.6.1.1.1.1.14 NAME 'nisNetgroupTriple' DESC 'Netgr + oup triple' EQUALITY caseIgnoreIA5Match SYNTAX 1.3.6.1.4.1.1466.115.121.1.26 + ) +olcAttributeTypes: {13}( 1.3.6.1.1.1.1.15 NAME 'ipServicePort' DESC 'Service p + ort number' EQUALITY integerMatch SYNTAX 1.3.6.1.4.1.1466.115.121.1.27 SINGLE + -VALUE ) +olcAttributeTypes: {14}( 1.3.6.1.1.1.1.16 NAME 'ipServiceProtocol' DESC 'Servi + ce protocol name' SUP name ) +olcAttributeTypes: {15}( 1.3.6.1.1.1.1.17 NAME 'ipProtocolNumber' DESC 'IP pro + tocol number' EQUALITY integerMatch SYNTAX 1.3.6.1.4.1.1466.115.121.1.27 SING + LE-VALUE ) +olcAttributeTypes: {16}( 1.3.6.1.1.1.1.18 NAME 'oncRpcNumber' DESC 'ONC RPC nu + mber' EQUALITY integerMatch SYNTAX 1.3.6.1.4.1.1466.115.121.1.27 SINGLE-VALUE + ) +olcAttributeTypes: {17}( 1.3.6.1.1.1.1.19 NAME 'ipHostNumber' DESC 'IPv4 addre + sses as a dotted decimal omitting leading zeros or IPv6 addresses as d + efined in RFC2373' SUP name ) +olcAttributeTypes: {18}( 1.3.6.1.1.1.1.20 NAME 'ipNetworkNumber' DESC 'IP netw + ork as a dotted decimal, eg. 192.168, omitting leading zeros' SUP name + SINGLE-VALUE ) +olcAttributeTypes: {19}( 1.3.6.1.1.1.1.21 NAME 'ipNetmaskNumber' DESC 'IP netm + ask as a dotted decimal, eg. 255.255.255.0, omitting leading zeros' EQ + UALITY caseIgnoreIA5Match SYNTAX 1.3.6.1.4.1.1466.115.121.1.26 SINGLE-VALUE ) +olcAttributeTypes: {20}( 1.3.6.1.1.1.1.22 NAME 'macAddress' DESC 'MAC address + in maximal, colon separated hex notation, eg. 00:00:92:90:ee:e2' EQUAL + ITY caseIgnoreIA5Match SYNTAX 1.3.6.1.4.1.1466.115.121.1.26 ) +olcAttributeTypes: {21}( 1.3.6.1.1.1.1.23 NAME 'bootParameter' DESC 'rpc.bootp + aramd parameter' EQUALITY caseExactIA5Match SYNTAX 1.3.6.1.4.1.1466.115.121.1 + .26 ) +olcAttributeTypes: {22}( 1.3.6.1.1.1.1.24 NAME 'bootFile' DESC 'Boot image nam + e' EQUALITY caseExactIA5Match SYNTAX 1.3.6.1.4.1.1466.115.121.1.26 ) +olcAttributeTypes: {23}( 1.3.6.1.1.1.1.26 NAME 'nisMapName' DESC 'Name of a A + generic NIS map' SUP name ) +olcAttributeTypes: {24}( 1.3.6.1.1.1.1.27 NAME 'nisMapEntry' DESC 'A generic N + IS entry' EQUALITY caseExactIA5Match SUBSTR caseExactIA5SubstringsMatch SYNTA + X 1.3.6.1.4.1.1466.115.121.1.26 SINGLE-VALUE ) +olcAttributeTypes: {25}( 1.3.6.1.1.1.1.28 NAME 'nisPublicKey' DESC 'NIS public + key' EQUALITY octetStringMatch SYNTAX 1.3.6.1.4.1.1466.115.121.1.40 SINGLE-V + ALUE ) +olcAttributeTypes: {26}( 1.3.6.1.1.1.1.29 NAME 'nisSecretKey' DESC 'NIS secret + key' EQUALITY octetStringMatch SYNTAX 1.3.6.1.4.1.1466.115.121.1.40 SINGLE-V + ALUE ) +olcAttributeTypes: {27}( 1.3.6.1.1.1.1.30 NAME 'nisDomain' DESC 'NIS domain' E + QUALITY caseIgnoreIA5Match SYNTAX 1.3.6.1.4.1.1466.115.121.1.26 ) +olcAttributeTypes: {28}( 1.3.6.1.1.1.1.31 NAME 'automountMapName' DESC 'automo + unt Map Name' EQUALITY caseExactIA5Match SUBSTR caseExactIA5SubstringsMatch S + YNTAX 1.3.6.1.4.1.1466.115.121.1.26 SINGLE-VALUE ) +olcAttributeTypes: {29}( 1.3.6.1.1.1.1.32 NAME 'automountKey' DESC 'Automount + Key value' EQUALITY caseExactIA5Match SUBSTR caseExactIA5SubstringsMatch SYNT + AX 1.3.6.1.4.1.1466.115.121.1.26 SINGLE-VALUE ) +olcAttributeTypes: {30}( 1.3.6.1.1.1.1.33 NAME 'automountInformation' DESC 'Au + tomount information' EQUALITY caseExactIA5Match SUBSTR caseExactIA5Substrings + Match SYNTAX 1.3.6.1.4.1.1466.115.121.1.26 SINGLE-VALUE ) +olcObjectClasses: {0}( 1.3.6.1.1.1.2.0 NAME 'posixAccount' DESC 'Abstraction o + f an account with POSIX attributes' SUP top AUXILIARY MUST ( cn $ uid $ uidNu + mber $ gidNumber $ homeDirectory ) MAY ( userPassword $ loginShell $ gecos $ + description ) ) +olcObjectClasses: {1}( 1.3.6.1.1.1.2.1 NAME 'shadowAccount' DESC 'Additional a + ttributes for shadow passwords' SUP top AUXILIARY MUST uid MAY ( userPassword + $ description $ shadowLastChange $ shadowMin $ shadowMax $ shadowWarning $ s + hadowInactive $ shadowExpire $ shadowFlag ) ) +olcObjectClasses: {2}( 1.3.6.1.1.1.2.2 NAME 'posixGroup' DESC 'Abstraction of + a group of accounts' SUP top AUXILIARY MUST gidNumber MAY ( userPassword $ me + mberUid $ description ) ) +olcObjectClasses: {3}( 1.3.6.1.1.1.2.3 NAME 'ipService' DESC 'Abstraction an I + nternet Protocol service. Maps an IP port and protocol (such as tcp or + udp) to one or more names; the distinguished value of the cn a + ttribute denotes the services canonical name' SUP top STRUCTURAL MUST + ( cn $ ipServicePort $ ipServiceProtocol ) MAY description ) +olcObjectClasses: {4}( 1.3.6.1.1.1.2.4 NAME 'ipProtocol' DESC 'Abstraction of + an IP protocol. Maps a protocol number to one or more names. The disti + nguished value of the cn attribute denotes the protocols canonical nam + e' SUP top STRUCTURAL MUST ( cn $ ipProtocolNumber ) MAY description ) +olcObjectClasses: {5}( 1.3.6.1.1.1.2.5 NAME 'oncRpc' DESC 'Abstraction of an O + pen Network Computing (ONC) [RFC1057] Remote Procedure Call (RPC) bindi + ng. This class maps an ONC RPC number to a name. The distinguishe + d value of the cn attribute denotes the RPC services canonical name' SU + P top STRUCTURAL MUST ( cn $ oncRpcNumber ) MAY description ) +olcObjectClasses: {6}( 1.3.6.1.1.1.2.6 NAME 'ipHost' DESC 'Abstraction of a ho + st, an IP device. The distinguished value of the cn attribute denotes + the hosts canonical name. Device SHOULD be used as a structural class' + SUP top AUXILIARY MUST ( cn $ ipHostNumber ) MAY ( userPassword $ l $ descri + ption $ manager ) ) +olcObjectClasses: {7}( 1.3.6.1.1.1.2.7 NAME 'ipNetwork' DESC 'Abstraction of a + network. The distinguished value of the cn attribute denotes the netw + orks canonical name' SUP top STRUCTURAL MUST ipNetworkNumber MAY ( cn $ ipNet + maskNumber $ l $ description $ manager ) ) +olcObjectClasses: {8}( 1.3.6.1.1.1.2.8 NAME 'nisNetgroup' DESC 'Abstraction of + a netgroup. May refer to other netgroups' SUP top STRUCTURAL MUST cn MAY ( n + isNetgroupTriple $ memberNisNetgroup $ description ) ) +olcObjectClasses: {9}( 1.3.6.1.1.1.2.9 NAME 'nisMap' DESC 'A generic abstracti + on of a NIS map' SUP top STRUCTURAL MUST nisMapName MAY description ) +olcObjectClasses: {10}( 1.3.6.1.1.1.2.10 NAME 'nisObject' DESC 'An entry in a + NIS map' SUP top STRUCTURAL MUST ( cn $ nisMapEntry $ nisMapName ) MAY descri + ption ) +olcObjectClasses: {11}( 1.3.6.1.1.1.2.11 NAME 'ieee802Device' DESC 'A device w + ith a MAC address; device SHOULD be used as a structural class' SUP to + p AUXILIARY MAY macAddress ) +olcObjectClasses: {12}( 1.3.6.1.1.1.2.12 NAME 'bootableDevice' DESC 'A device + with boot parameters; device SHOULD be used as a structural class' SUP + top AUXILIARY MAY ( bootFile $ bootParameter ) ) +olcObjectClasses: {13}( 1.3.6.1.1.1.2.14 NAME 'nisKeyObject' DESC 'An object w + ith a public and secret key' SUP top AUXILIARY MUST ( cn $ nisPublicKey $ nis + SecretKey ) MAY ( uidNumber $ description ) ) +olcObjectClasses: {14}( 1.3.6.1.1.1.2.15 NAME 'nisDomainObject' DESC 'Associat + es a NIS domain with a naming context' SUP top AUXILIARY MUST nisDomain ) +olcObjectClasses: {15}( 1.3.6.1.1.1.2.16 NAME 'automountMap' SUP top STRUCTURA + L MUST automountMapName MAY description ) +olcObjectClasses: {16}( 1.3.6.1.1.1.2.17 NAME 'automount' DESC 'Automount info + rmation' SUP top STRUCTURAL MUST ( automountKey $ automountInformation ) MAY + description ) +olcObjectClasses: {17}( 1.3.6.1.4.1.5322.13.1.1 NAME 'namedObject' SUP top STR + UCTURAL MAY cn ) +structuralObjectClass: olcSchemaConfig +entryUUID: bae05988-4e5c-103c-8e41-83bde8bbc3e8 +creatorsName: cn=config +createTimestamp: 20220412033105Z +entryCSN: 20220412033105.684579Z#000000#000#000000 +modifiersName: cn=config +modifyTimestamp: 20220412033105Z diff --git a/dirsrvtests/tests/data/openldap_2_389/memberof/slapd.d/cn=config/cn=schema/cn={4}yast.ldif b/dirsrvtests/tests/data/openldap_2_389/memberof/slapd.d/cn=config/cn=schema/cn={4}yast.ldif new file mode 100755 index 0000000..ccc110f --- /dev/null +++ b/dirsrvtests/tests/data/openldap_2_389/memberof/slapd.d/cn=config/cn=schema/cn={4}yast.ldif @@ -0,0 +1,107 @@ +# AUTO-GENERATED FILE - DO NOT EDIT!! Use ldapmodify. +# CRC32 02d5bb23 +dn: cn={4}yast +objectClass: olcSchemaConfig +cn: {4}yast +olcObjectIdentifier: {0}SUSE 1.3.6.1.4.1.7057 +olcObjectIdentifier: {1}SUSE.YaST SUSE:10.1 +olcObjectIdentifier: {2}SUSE.YaST.ModuleConfig SUSE:10.1.2 +olcObjectIdentifier: {3}SUSE.YaST.ModuleConfig.OC SUSE.YaST.ModuleConfig:1 +olcObjectIdentifier: {4}SUSE.YaST.ModuleConfig.Attr SUSE.YaST.ModuleConfig:2 +olcAttributeTypes: {0}( SUSE.YaST.ModuleConfig.Attr:2 NAME 'suseDefaultBase' D + ESC 'Base DN where new Objects should be created by default' EQUALITY disting + uishedNameMatch SYNTAX 1.3.6.1.4.1.1466.115.121.1.12 SINGLE-VALUE ) +olcAttributeTypes: {1}( SUSE.YaST.ModuleConfig.Attr:3 NAME 'suseNextUniqueId' + DESC 'Next unused unique ID, can be used to generate directory wide uniqe IDs + ' EQUALITY integerMatch ORDERING integerOrderingMatch SYNTAX 1.3.6.1.4.1.1466 + .115.121.1.27 SINGLE-VALUE ) +olcAttributeTypes: {2}( SUSE.YaST.ModuleConfig.Attr:4 NAME 'suseMinUniqueId' D + ESC 'lower Border for Unique IDs' EQUALITY integerMatch ORDERING integerOrder + ingMatch SYNTAX 1.3.6.1.4.1.1466.115.121.1.27 SINGLE-VALUE ) +olcAttributeTypes: {3}( SUSE.YaST.ModuleConfig.Attr:5 NAME 'suseMaxUniqueId' D + ESC 'upper Border for Unique IDs' EQUALITY integerMatch ORDERING integerOrder + ingMatch SYNTAX 1.3.6.1.4.1.1466.115.121.1.27 SINGLE-VALUE ) +olcAttributeTypes: {4}( SUSE.YaST.ModuleConfig.Attr:6 NAME 'suseDefaultTemplat + e' DESC 'The DN of a template that should be used by default' EQUALITY distin + guishedNameMatch SYNTAX 1.3.6.1.4.1.1466.115.121.1.12 SINGLE-VALUE ) +olcAttributeTypes: {5}( SUSE.YaST.ModuleConfig.Attr:7 NAME 'suseSearchFilter' + DESC 'Search filter to localize Objects' SYNTAX 1.3.6.1.4.1.1466.115.121.1.15 + SINGLE-VALUE ) +olcAttributeTypes: {6}( SUSE.YaST.ModuleConfig.Attr:11 NAME 'suseDefaultValue' + DESC 'an Attribute-Value-Assertions to define defaults for specific Attribut + es' EQUALITY caseIgnoreMatch SYNTAX 1.3.6.1.4.1.1466.115.121.1.15 ) +olcAttributeTypes: {7}( SUSE.YaST.ModuleConfig.Attr:12 NAME 'suseNamingAttribu + te' DESC 'AttributeType that should be used as the RDN' EQUALITY caseIgnoreIA + 5Match SYNTAX 1.3.6.1.4.1.1466.115.121.1.26 SINGLE-VALUE ) +olcAttributeTypes: {8}( SUSE.YaST.ModuleConfig.Attr:15 NAME 'suseSecondaryGrou + p' DESC 'seconday group DN' EQUALITY distinguishedNameMatch SYNTAX 1.3.6.1.4. + 1.1466.115.121.1.12 ) +olcAttributeTypes: {9}( SUSE.YaST.ModuleConfig.Attr:16 NAME 'suseMinPasswordLe + ngth' DESC 'minimum Password length for new users' EQUALITY integerMatch ORDE + RING integerOrderingMatch SYNTAX 1.3.6.1.4.1.1466.115.121.1.27 SINGLE-VALUE ) +olcAttributeTypes: {10}( SUSE.YaST.ModuleConfig.Attr:17 NAME 'suseMaxPasswordL + ength' DESC 'maximum Password length for new users' EQUALITY integerMatch ORD + ERING integerOrderingMatch SYNTAX 1.3.6.1.4.1.1466.115.121.1.27 SINGLE-VALUE + ) +olcAttributeTypes: {11}( SUSE.YaST.ModuleConfig.Attr:18 NAME 'susePasswordHash + ' DESC 'Hash method to use for new users' EQUALITY caseIgnoreIA5Match SYNTAX + 1.3.6.1.4.1.1466.115.121.1.26 SINGLE-VALUE ) +olcAttributeTypes: {12}( SUSE.YaST.ModuleConfig.Attr:19 NAME 'suseSkelDir' DES + C '' EQUALITY caseExactIA5Match SYNTAX 1.3.6.1.4.1.1466.115.121.1.26 ) +olcAttributeTypes: {13}( SUSE.YaST.ModuleConfig.Attr:20 NAME 'susePlugin' DESC + 'plugin to use upon user/ group creation' EQUALITY caseIgnoreMatch SYNTAX 1. + 3.6.1.4.1.1466.115.121.1.15 ) +olcAttributeTypes: {14}( SUSE.YaST.ModuleConfig.Attr:21 NAME 'suseMapAttribute + ' DESC '' EQUALITY caseIgnoreMatch SYNTAX 1.3.6.1.4.1.1466.115.121.1.15 ) +olcAttributeTypes: {15}( SUSE.YaST.ModuleConfig.Attr:22 NAME 'suseImapServer' + DESC '' EQUALITY caseIgnoreMatch SYNTAX 1.3.6.1.4.1.1466.115.121.1.15 SINGLE- + VALUE ) +olcAttributeTypes: {16}( SUSE.YaST.ModuleConfig.Attr:23 NAME 'suseImapAdmin' D + ESC '' EQUALITY caseIgnoreMatch SYNTAX 1.3.6.1.4.1.1466.115.121.1.15 SINGLE-V + ALUE ) +olcAttributeTypes: {17}( SUSE.YaST.ModuleConfig.Attr:24 NAME 'suseImapDefaultQ + uota' DESC '' EQUALITY integerMatch SYNTAX 1.3.6.1.4.1.1466.115.121.1.27 SING + LE-VALUE ) +olcAttributeTypes: {18}( SUSE.YaST.ModuleConfig.Attr:25 NAME 'suseImapUseSsl' + DESC '' EQUALITY booleanMatch SYNTAX 1.3.6.1.4.1.1466.115.121.1.7 SINGLE-VALU + E ) +olcObjectClasses: {0}( SUSE.YaST.ModuleConfig.OC:2 NAME 'suseModuleConfigurati + on' DESC 'Contains configuration of Management Modules' SUP top STRUCTURAL MU + ST cn MAY suseDefaultBase ) +olcObjectClasses: {1}( SUSE.YaST.ModuleConfig.OC:3 NAME 'suseUserConfiguration + ' DESC 'Configuration of user management tools' SUP suseModuleConfiguration S + TRUCTURAL MAY ( suseMinPasswordLength $ suseMaxPasswordLength $ susePasswordH + ash $ suseSkelDir $ suseNextUniqueId $ suseMinUniqueId $ suseMaxUniqueId $ su + seDefaultTemplate $ suseSearchFilter $ suseMapAttribute ) ) +olcObjectClasses: {2}( SUSE.YaST.ModuleConfig.OC:4 NAME 'suseObjectTemplate' D + ESC 'Base Class for Object-Templates' SUP top STRUCTURAL MUST cn MAY ( susePl + ugin $ suseDefaultValue $ suseNamingAttribute ) ) +olcObjectClasses: {3}( SUSE.YaST.ModuleConfig.OC:5 NAME 'suseUserTemplate' DES + C 'User object template' SUP suseObjectTemplate STRUCTURAL MUST cn MAY suseSe + condaryGroup ) +olcObjectClasses: {4}( SUSE.YaST.ModuleConfig.OC:6 NAME 'suseGroupTemplate' DE + SC 'Group object template' SUP suseObjectTemplate STRUCTURAL MUST cn ) +olcObjectClasses: {5}( SUSE.YaST.ModuleConfig.OC:7 NAME 'suseGroupConfiguratio + n' DESC 'Configuration of user management tools' SUP suseModuleConfiguration + STRUCTURAL MAY ( suseNextUniqueId $ suseMinUniqueId $ suseMaxUniqueId $ suseD + efaultTemplate $ suseSearchFilter $ suseMapAttribute ) ) +olcObjectClasses: {6}( SUSE.YaST.ModuleConfig.OC:8 NAME 'suseCaConfiguration' + DESC 'Configuration of CA management tools' SUP suseModuleConfiguration STRUC + TURAL ) +olcObjectClasses: {7}( SUSE.YaST.ModuleConfig.OC:9 NAME 'suseDnsConfiguration' + DESC 'Configuration of mail server management tools' SUP suseModuleConfigura + tion STRUCTURAL ) +olcObjectClasses: {8}( SUSE.YaST.ModuleConfig.OC:10 NAME 'suseDhcpConfiguratio + n' DESC 'Configuration of DHCP server management tools' SUP suseModuleConfigu + ration STRUCTURAL ) +olcObjectClasses: {9}( SUSE.YaST.ModuleConfig.OC:11 NAME 'suseMailConfiguratio + n' DESC 'Configuration of IMAP user management tools' SUP suseModuleConfigura + tion STRUCTURAL MUST ( suseImapServer $ suseImapAdmin $ suseImapDefaultQuota + $ suseImapUseSsl ) ) +structuralObjectClass: olcSchemaConfig +entryUUID: bae05f32-4e5c-103c-8e42-83bde8bbc3e8 +creatorsName: cn=config +createTimestamp: 20220412033105Z +entryCSN: 20220412033105.684579Z#000000#000#000000 +modifiersName: cn=config +modifyTimestamp: 20220412033105Z diff --git a/dirsrvtests/tests/data/openldap_2_389/memberof/slapd.d/cn=config/olcDatabase={-1}frontend.ldif b/dirsrvtests/tests/data/openldap_2_389/memberof/slapd.d/cn=config/olcDatabase={-1}frontend.ldif new file mode 100755 index 0000000..b204dda --- /dev/null +++ b/dirsrvtests/tests/data/openldap_2_389/memberof/slapd.d/cn=config/olcDatabase={-1}frontend.ldif @@ -0,0 +1,26 @@ +# AUTO-GENERATED FILE - DO NOT EDIT!! Use ldapmodify. +# CRC32 01978aef +dn: olcDatabase={-1}frontend +objectClass: olcDatabaseConfig +objectClass: olcFrontendConfig +olcDatabase: {-1}frontend +olcAccess: {0}to dn.base="" by * read +olcAccess: {1}to dn.base="cn=subschema" by * read +olcAccess: {2}to attrs=userPassword,userPKCS12 by self write by * auth +olcAccess: {3}to attrs=shadowLastChange by self write by * read +olcAccess: {4}to * by * read +olcAddContentAcl: FALSE +olcLastMod: TRUE +olcLastBind: TRUE +olcMaxDerefDepth: 0 +olcReadOnly: FALSE +olcSchemaDN: cn=Subschema +olcSyncUseSubentry: FALSE +olcMonitoring: FALSE +structuralObjectClass: olcDatabaseConfig +entryUUID: bae062e8-4e5c-103c-8e43-83bde8bbc3e8 +creatorsName: cn=config +createTimestamp: 20220412033105Z +entryCSN: 20220412033105.684579Z#000000#000#000000 +modifiersName: cn=config +modifyTimestamp: 20220412033105Z diff --git a/dirsrvtests/tests/data/openldap_2_389/memberof/slapd.d/cn=config/olcDatabase={0}config.ldif b/dirsrvtests/tests/data/openldap_2_389/memberof/slapd.d/cn=config/olcDatabase={0}config.ldif new file mode 100755 index 0000000..927f3e7 --- /dev/null +++ b/dirsrvtests/tests/data/openldap_2_389/memberof/slapd.d/cn=config/olcDatabase={0}config.ldif @@ -0,0 +1,21 @@ +# AUTO-GENERATED FILE - DO NOT EDIT!! Use ldapmodify. +# CRC32 bafe8b21 +dn: olcDatabase={0}config +objectClass: olcDatabaseConfig +olcDatabase: {0}config +olcAccess: {0}to * by * none +olcAddContentAcl: TRUE +olcLastMod: TRUE +olcLastBind: TRUE +olcMaxDerefDepth: 15 +olcReadOnly: FALSE +olcRootDN: cn=config +olcSyncUseSubentry: FALSE +olcMonitoring: FALSE +structuralObjectClass: olcDatabaseConfig +entryUUID: bae06540-4e5c-103c-8e44-83bde8bbc3e8 +creatorsName: cn=config +createTimestamp: 20220412033105Z +entryCSN: 20220412033105.684579Z#000000#000#000000 +modifiersName: cn=config +modifyTimestamp: 20220412033105Z diff --git a/dirsrvtests/tests/data/openldap_2_389/memberof/slapd.d/cn=config/olcDatabase={1}mdb.ldif b/dirsrvtests/tests/data/openldap_2_389/memberof/slapd.d/cn=config/olcDatabase={1}mdb.ldif new file mode 100755 index 0000000..77eb2c5 --- /dev/null +++ b/dirsrvtests/tests/data/openldap_2_389/memberof/slapd.d/cn=config/olcDatabase={1}mdb.ldif @@ -0,0 +1,33 @@ +# AUTO-GENERATED FILE - DO NOT EDIT!! Use ldapmodify. +# CRC32 397d9772 +dn: olcDatabase={1}mdb +objectClass: olcDatabaseConfig +objectClass: olcMdbConfig +olcDatabase: {1}mdb +olcSuffix: dc=ldapdom,dc=net +olcAddContentAcl: FALSE +olcLastMod: TRUE +olcLastBind: TRUE +olcMaxDerefDepth: 15 +olcReadOnly: FALSE +olcRootDN: cn=root,dc=ldapdom,dc=net +olcRootPW:: cGFzcw== +olcSyncUseSubentry: FALSE +olcMonitoring: TRUE +olcDbDirectory: /tmp/ldap-sssdtest +olcDbCheckpoint: 1024 5 +olcDbNoSync: FALSE +olcDbIndex: objectClass eq +olcDbMaxReaders: 0 +olcDbMaxSize: 10485760 +olcDbMode: 0600 +olcDbSearchStack: 16 +olcDbMaxEntrySize: 0 +olcDbRtxnSize: 10000 +structuralObjectClass: olcMdbConfig +entryUUID: bae06806-4e5c-103c-8e45-83bde8bbc3e8 +creatorsName: cn=config +createTimestamp: 20220412033105Z +entryCSN: 20220412033105.684579Z#000000#000#000000 +modifiersName: cn=config +modifyTimestamp: 20220412033105Z diff --git a/dirsrvtests/tests/data/openldap_2_389/memberof/slapd.d/cn=config/olcDatabase={1}mdb/olcOverlay={0}memberof.ldif b/dirsrvtests/tests/data/openldap_2_389/memberof/slapd.d/cn=config/olcDatabase={1}mdb/olcOverlay={0}memberof.ldif new file mode 100755 index 0000000..a5dc9c8 --- /dev/null +++ b/dirsrvtests/tests/data/openldap_2_389/memberof/slapd.d/cn=config/olcDatabase={1}mdb/olcOverlay={0}memberof.ldif @@ -0,0 +1,15 @@ +# AUTO-GENERATED FILE - DO NOT EDIT!! Use ldapmodify. +# CRC32 f9e64128 +dn: olcOverlay={0}memberof +objectClass: olcOverlayConfig +objectClass: olcMemberOfConfig +olcOverlay: {0}memberof +olcMemberOfDangling: ignore +olcMemberOfRefInt: FALSE +structuralObjectClass: olcMemberOfConfig +entryUUID: bae06a4a-4e5c-103c-8e46-83bde8bbc3e8 +creatorsName: cn=config +createTimestamp: 20220412033105Z +entryCSN: 20220412033105.684579Z#000000#000#000000 +modifiersName: cn=config +modifyTimestamp: 20220412033105Z diff --git a/dirsrvtests/tests/data/openldap_2_389/memberof/slapd.d/cn=config/olcDatabase={1}mdb/olcOverlay={1}unique.ldif b/dirsrvtests/tests/data/openldap_2_389/memberof/slapd.d/cn=config/olcDatabase={1}mdb/olcOverlay={1}unique.ldif new file mode 100755 index 0000000..bbb99bf --- /dev/null +++ b/dirsrvtests/tests/data/openldap_2_389/memberof/slapd.d/cn=config/olcDatabase={1}mdb/olcOverlay={1}unique.ldif @@ -0,0 +1,14 @@ +# AUTO-GENERATED FILE - DO NOT EDIT!! Use ldapmodify. +# CRC32 0ab75d66 +dn: olcOverlay={1}unique +objectClass: olcOverlayConfig +objectClass: olcUniqueConfig +olcOverlay: {1}unique +olcUniqueURI: ldap:///?mail?sub? +structuralObjectClass: olcUniqueConfig +entryUUID: bae06c84-4e5c-103c-8e47-83bde8bbc3e8 +creatorsName: cn=config +createTimestamp: 20220412033105Z +entryCSN: 20220412033105.684579Z#000000#000#000000 +modifiersName: cn=config +modifyTimestamp: 20220412033105Z diff --git a/dirsrvtests/tests/data/openldap_2_389/memberof/slapd.d/cn=config/olcDatabase={1}mdb/olcOverlay={2}refint.ldif b/dirsrvtests/tests/data/openldap_2_389/memberof/slapd.d/cn=config/olcDatabase={1}mdb/olcOverlay={2}refint.ldif new file mode 100755 index 0000000..7d6f66b --- /dev/null +++ b/dirsrvtests/tests/data/openldap_2_389/memberof/slapd.d/cn=config/olcDatabase={1}mdb/olcOverlay={2}refint.ldif @@ -0,0 +1,15 @@ +# AUTO-GENERATED FILE - DO NOT EDIT!! Use ldapmodify. +# CRC32 9d344a40 +dn: olcOverlay={2}refint +objectClass: olcOverlayConfig +objectClass: olcRefintConfig +olcOverlay: {2}refint +olcRefintAttribute: member +olcRefintNothing: cn=admin,dc=example,dc=com +structuralObjectClass: olcRefintConfig +entryUUID: bae06e5a-4e5c-103c-8e48-83bde8bbc3e8 +creatorsName: cn=config +createTimestamp: 20220412033105Z +entryCSN: 20220412033105.684579Z#000000#000#000000 +modifiersName: cn=config +modifyTimestamp: 20220412033105Z diff --git a/dirsrvtests/tests/data/openldap_2_389/saslauthd/slapd.d/cn=config.ldif b/dirsrvtests/tests/data/openldap_2_389/saslauthd/slapd.d/cn=config.ldif new file mode 100644 index 0000000..e9b19f1 --- /dev/null +++ b/dirsrvtests/tests/data/openldap_2_389/saslauthd/slapd.d/cn=config.ldif @@ -0,0 +1,39 @@ +# AUTO-GENERATED FILE - DO NOT EDIT!! Use ldapmodify. +# CRC32 7edbd1a7 +dn: cn=config +objectClass: olcGlobal +cn: config +olcConfigFile: /etc/openldap/slapd.conf +olcConfigDir: /root/slapd.d +olcAttributeOptions: lang- +olcAuthzPolicy: none +olcConcurrency: 0 +olcConnMaxPending: 100 +olcConnMaxPendingAuth: 1000 +olcGentleHUP: FALSE +olcIdleTimeout: 0 +olcIndexSubstrIfMaxLen: 4 +olcIndexSubstrIfMinLen: 2 +olcIndexSubstrAnyLen: 4 +olcIndexSubstrAnyStep: 2 +olcIndexIntLen: 4 +olcListenerThreads: 1 +olcLocalSSF: 71 +olcLogLevel: 0 +olcReadOnly: FALSE +olcSaslSecProps: noplain,noanonymous +olcSockbufMaxIncoming: 262143 +olcSockbufMaxIncomingAuth: 16777215 +olcThreads: 16 +olcTLSCRLCheck: none +olcTLSVerifyClient: never +olcTLSProtocolMin: 0.0 +olcToolThreads: 1 +olcWriteTimeout: 0 +structuralObjectClass: olcGlobal +entryUUID: 7ed3eeea-f8e9-103c-8ebf-e51fa20c07f9 +creatorsName: cn=config +createTimestamp: 20221115042701Z +entryCSN: 20221115042701.652581Z#000000#000#000000 +modifiersName: cn=config +modifyTimestamp: 20221115042701Z diff --git a/dirsrvtests/tests/data/openldap_2_389/saslauthd/slapd.d/cn=config/cn=module{0}.ldif b/dirsrvtests/tests/data/openldap_2_389/saslauthd/slapd.d/cn=config/cn=module{0}.ldif new file mode 100644 index 0000000..b7cfd86 --- /dev/null +++ b/dirsrvtests/tests/data/openldap_2_389/saslauthd/slapd.d/cn=config/cn=module{0}.ldif @@ -0,0 +1,16 @@ +# AUTO-GENERATED FILE - DO NOT EDIT!! Use ldapmodify. +# CRC32 5a12264a +dn: cn=module{0} +objectClass: olcModuleList +cn: module{0} +olcModuleLoad: {0}back_mdb.la +olcModuleLoad: {1}memberof.la +olcModuleLoad: {2}refint.la +olcModuleLoad: {3}unique.la +structuralObjectClass: olcModuleList +entryUUID: 7ed3fc32-f8e9-103c-8ec0-e51fa20c07f9 +creatorsName: cn=config +createTimestamp: 20221115042701Z +entryCSN: 20221115042701.652581Z#000000#000#000000 +modifiersName: cn=config +modifyTimestamp: 20221115042701Z diff --git a/dirsrvtests/tests/data/openldap_2_389/saslauthd/slapd.d/cn=config/cn=schema.ldif b/dirsrvtests/tests/data/openldap_2_389/saslauthd/slapd.d/cn=config/cn=schema.ldif new file mode 100644 index 0000000..8c54f94 --- /dev/null +++ b/dirsrvtests/tests/data/openldap_2_389/saslauthd/slapd.d/cn=config/cn=schema.ldif @@ -0,0 +1,669 @@ +# AUTO-GENERATED FILE - DO NOT EDIT!! Use ldapmodify. +# CRC32 84ab7aa0 +dn: cn=schema +objectClass: olcSchemaConfig +cn: schema +olcObjectIdentifier: OLcfg 1.3.6.1.4.1.4203.1.12.2 +olcObjectIdentifier: OLcfgAt OLcfg:3 +olcObjectIdentifier: OLcfgGlAt OLcfgAt:0 +olcObjectIdentifier: OLcfgBkAt OLcfgAt:1 +olcObjectIdentifier: OLcfgDbAt OLcfgAt:2 +olcObjectIdentifier: OLcfgOvAt OLcfgAt:3 +olcObjectIdentifier: OLcfgCtAt OLcfgAt:4 +olcObjectIdentifier: OLcfgOc OLcfg:4 +olcObjectIdentifier: OLcfgGlOc OLcfgOc:0 +olcObjectIdentifier: OLcfgBkOc OLcfgOc:1 +olcObjectIdentifier: OLcfgDbOc OLcfgOc:2 +olcObjectIdentifier: OLcfgOvOc OLcfgOc:3 +olcObjectIdentifier: OLcfgCtOc OLcfgOc:4 +olcObjectIdentifier: OMsyn 1.3.6.1.4.1.1466.115.121.1 +olcObjectIdentifier: OMsBoolean OMsyn:7 +olcObjectIdentifier: OMsDN OMsyn:12 +olcObjectIdentifier: OMsDirectoryString OMsyn:15 +olcObjectIdentifier: OMsIA5String OMsyn:26 +olcObjectIdentifier: OMsInteger OMsyn:27 +olcObjectIdentifier: OMsOID OMsyn:38 +olcObjectIdentifier: OMsOctetString OMsyn:40 +olcLdapSyntaxes: ( 1.3.6.1.4.1.1466.115.121.1.1 DESC 'ACI Item' X-BINARY-TRA + NSFER-REQUIRED 'TRUE' X-NOT-HUMAN-READABLE 'TRUE' ) +olcLdapSyntaxes: ( 1.3.6.1.4.1.1466.115.121.1.2 DESC 'Access Point' X-NOT-HU + MAN-READABLE 'TRUE' ) +olcLdapSyntaxes: ( 1.3.6.1.4.1.1466.115.121.1.3 DESC 'Attribute Type Descrip + tion' ) +olcLdapSyntaxes: ( 1.3.6.1.4.1.1466.115.121.1.4 DESC 'Audio' X-NOT-HUMAN-REA + DABLE 'TRUE' ) +olcLdapSyntaxes: ( 1.3.6.1.4.1.1466.115.121.1.5 DESC 'Binary' X-NOT-HUMAN-RE + ADABLE 'TRUE' ) +olcLdapSyntaxes: ( 1.3.6.1.4.1.1466.115.121.1.6 DESC 'Bit String' ) +olcLdapSyntaxes: ( 1.3.6.1.4.1.1466.115.121.1.7 DESC 'Boolean' ) +olcLdapSyntaxes: ( 1.3.6.1.4.1.1466.115.121.1.8 DESC 'Certificate' X-BINARY- + TRANSFER-REQUIRED 'TRUE' X-NOT-HUMAN-READABLE 'TRUE' ) +olcLdapSyntaxes: ( 1.3.6.1.4.1.1466.115.121.1.9 DESC 'Certificate List' X-BI + NARY-TRANSFER-REQUIRED 'TRUE' X-NOT-HUMAN-READABLE 'TRUE' ) +olcLdapSyntaxes: ( 1.3.6.1.4.1.1466.115.121.1.10 DESC 'Certificate Pair' X-B + INARY-TRANSFER-REQUIRED 'TRUE' X-NOT-HUMAN-READABLE 'TRUE' ) +olcLdapSyntaxes: ( 1.3.6.1.4.1.4203.666.11.10.2.1 DESC 'X.509 AttributeCerti + ficate' X-BINARY-TRANSFER-REQUIRED 'TRUE' X-NOT-HUMAN-READABLE 'TRUE' ) +olcLdapSyntaxes: ( 1.3.6.1.4.1.1466.115.121.1.12 DESC 'Distinguished Name' ) +olcLdapSyntaxes: ( 1.2.36.79672281.1.5.0 DESC 'RDN' ) +olcLdapSyntaxes: ( 1.3.6.1.4.1.1466.115.121.1.13 DESC 'Data Quality' ) +olcLdapSyntaxes: ( 1.3.6.1.4.1.1466.115.121.1.14 DESC 'Delivery Method' ) +olcLdapSyntaxes: ( 1.3.6.1.4.1.1466.115.121.1.15 DESC 'Directory String' ) +olcLdapSyntaxes: ( 1.3.6.1.4.1.1466.115.121.1.16 DESC 'DIT Content Rule Desc + ription' ) +olcLdapSyntaxes: ( 1.3.6.1.4.1.1466.115.121.1.17 DESC 'DIT Structure Rule De + scription' ) +olcLdapSyntaxes: ( 1.3.6.1.4.1.1466.115.121.1.19 DESC 'DSA Quality' ) +olcLdapSyntaxes: ( 1.3.6.1.4.1.1466.115.121.1.20 DESC 'DSE Type' ) +olcLdapSyntaxes: ( 1.3.6.1.4.1.1466.115.121.1.21 DESC 'Enhanced Guide' ) +olcLdapSyntaxes: ( 1.3.6.1.4.1.1466.115.121.1.22 DESC 'Facsimile Telephone N + umber' ) +olcLdapSyntaxes: ( 1.3.6.1.4.1.1466.115.121.1.23 DESC 'Fax' X-NOT-HUMAN-READ + ABLE 'TRUE' ) +olcLdapSyntaxes: ( 1.3.6.1.4.1.1466.115.121.1.24 DESC 'Generalized Time' ) +olcLdapSyntaxes: ( 1.3.6.1.4.1.1466.115.121.1.25 DESC 'Guide' ) +olcLdapSyntaxes: ( 1.3.6.1.4.1.1466.115.121.1.26 DESC 'IA5 String' ) +olcLdapSyntaxes: ( 1.3.6.1.4.1.1466.115.121.1.27 DESC 'Integer' ) +olcLdapSyntaxes: ( 1.3.6.1.4.1.1466.115.121.1.28 DESC 'JPEG' X-NOT-HUMAN-REA + DABLE 'TRUE' ) +olcLdapSyntaxes: ( 1.3.6.1.4.1.1466.115.121.1.29 DESC 'Master And Shadow Acc + ess Points' ) +olcLdapSyntaxes: ( 1.3.6.1.4.1.1466.115.121.1.30 DESC 'Matching Rule Descrip + tion' ) +olcLdapSyntaxes: ( 1.3.6.1.4.1.1466.115.121.1.31 DESC 'Matching Rule Use Des + cription' ) +olcLdapSyntaxes: ( 1.3.6.1.4.1.1466.115.121.1.32 DESC 'Mail Preference' ) +olcLdapSyntaxes: ( 1.3.6.1.4.1.1466.115.121.1.33 DESC 'MHS OR Address' ) +olcLdapSyntaxes: ( 1.3.6.1.4.1.1466.115.121.1.34 DESC 'Name And Optional UID + ' ) +olcLdapSyntaxes: ( 1.3.6.1.4.1.1466.115.121.1.35 DESC 'Name Form Description + ' ) +olcLdapSyntaxes: ( 1.3.6.1.4.1.1466.115.121.1.36 DESC 'Numeric String' ) +olcLdapSyntaxes: ( 1.3.6.1.4.1.1466.115.121.1.37 DESC 'Object Class Descript + ion' ) +olcLdapSyntaxes: ( 1.3.6.1.4.1.1466.115.121.1.38 DESC 'OID' ) +olcLdapSyntaxes: ( 1.3.6.1.4.1.1466.115.121.1.39 DESC 'Other Mailbox' ) +olcLdapSyntaxes: ( 1.3.6.1.4.1.1466.115.121.1.40 DESC 'Octet String' ) +olcLdapSyntaxes: ( 1.3.6.1.4.1.1466.115.121.1.41 DESC 'Postal Address' ) +olcLdapSyntaxes: ( 1.3.6.1.4.1.1466.115.121.1.42 DESC 'Protocol Information' + ) +olcLdapSyntaxes: ( 1.3.6.1.4.1.1466.115.121.1.43 DESC 'Presentation Address' + ) +olcLdapSyntaxes: ( 1.3.6.1.4.1.1466.115.121.1.44 DESC 'Printable String' ) +olcLdapSyntaxes: ( 1.3.6.1.4.1.1466.115.121.1.11 DESC 'Country String' ) +olcLdapSyntaxes: ( 1.3.6.1.4.1.1466.115.121.1.45 DESC 'SubtreeSpecification' + ) +olcLdapSyntaxes: ( 1.3.6.1.4.1.1466.115.121.1.49 DESC 'Supported Algorithm' + X-BINARY-TRANSFER-REQUIRED 'TRUE' X-NOT-HUMAN-READABLE 'TRUE' ) +olcLdapSyntaxes: ( 1.3.6.1.4.1.1466.115.121.1.50 DESC 'Telephone Number' ) +olcLdapSyntaxes: ( 1.3.6.1.4.1.1466.115.121.1.51 DESC 'Teletex Terminal Iden + tifier' ) +olcLdapSyntaxes: ( 1.3.6.1.4.1.1466.115.121.1.52 DESC 'Telex Number' ) +olcLdapSyntaxes: ( 1.3.6.1.4.1.1466.115.121.1.54 DESC 'LDAP Syntax Descripti + on' ) +olcLdapSyntaxes: ( 1.3.6.1.4.1.1466.115.121.1.55 DESC 'Modify Rights' ) +olcLdapSyntaxes: ( 1.3.6.1.4.1.1466.115.121.1.56 DESC 'LDAP Schema Definitio + n' ) +olcLdapSyntaxes: ( 1.3.6.1.4.1.1466.115.121.1.57 DESC 'LDAP Schema Descripti + on' ) +olcLdapSyntaxes: ( 1.3.6.1.4.1.1466.115.121.1.58 DESC 'Substring Assertion' + ) +olcLdapSyntaxes: ( 1.3.6.1.1.1.0.0 DESC 'RFC2307 NIS Netgroup Triple' ) +olcLdapSyntaxes: ( 1.3.6.1.1.1.0.1 DESC 'RFC2307 Boot Parameter' ) +olcLdapSyntaxes: ( 1.3.6.1.1.15.1 DESC 'Certificate Exact Assertion' ) +olcLdapSyntaxes: ( 1.3.6.1.1.15.2 DESC 'Certificate Assertion' ) +olcLdapSyntaxes: ( 1.3.6.1.1.15.3 DESC 'Certificate Pair Exact Assertion' ) +olcLdapSyntaxes: ( 1.3.6.1.1.15.4 DESC 'Certificate Pair Assertion' ) +olcLdapSyntaxes: ( 1.3.6.1.1.15.5 DESC 'Certificate List Exact Assertion' ) +olcLdapSyntaxes: ( 1.3.6.1.1.15.6 DESC 'Certificate List Assertion' ) +olcLdapSyntaxes: ( 1.3.6.1.1.15.7 DESC 'Algorithm Identifier' ) +olcLdapSyntaxes: ( 1.3.6.1.4.1.4203.666.11.10.2.2 DESC 'AttributeCertificate + Exact Assertion' ) +olcLdapSyntaxes: ( 1.3.6.1.4.1.4203.666.11.10.2.3 DESC 'AttributeCertificate + Assertion' ) +olcLdapSyntaxes: ( 1.3.6.1.1.16.1 DESC 'UUID' ) +olcLdapSyntaxes: ( 1.3.6.1.4.1.4203.666.11.2.1 DESC 'CSN' ) +olcLdapSyntaxes: ( 1.3.6.1.4.1.4203.666.11.2.4 DESC 'CSN SID' ) +olcLdapSyntaxes: ( 1.3.6.1.4.1.4203.1.1.1 DESC 'OpenLDAP void' ) +olcLdapSyntaxes: ( 1.3.6.1.4.1.4203.666.2.7 DESC 'OpenLDAP authz' ) +olcLdapSyntaxes: ( 1.3.6.1.4.1.4203.666.2.1 DESC 'OpenLDAP Experimental ACI' + ) +olcAttributeTypes: ( 2.5.4.0 NAME 'objectClass' DESC 'RFC4512: object classe + s of the entity' EQUALITY objectIdentifierMatch SYNTAX 1.3.6.1.4.1.1466.115 + .121.1.38 ) +olcAttributeTypes: ( 2.5.21.9 NAME 'structuralObjectClass' DESC 'RFC4512: st + ructural object class of entry' EQUALITY objectIdentifierMatch SYNTAX 1.3.6 + .1.4.1.1466.115.121.1.38 SINGLE-VALUE NO-USER-MODIFICATION USAGE directoryO + peration ) +olcAttributeTypes: ( 2.5.18.1 NAME 'createTimestamp' DESC 'RFC4512: time whi + ch object was created' EQUALITY generalizedTimeMatch ORDERING generalizedTi + meOrderingMatch SYNTAX 1.3.6.1.4.1.1466.115.121.1.24 SINGLE-VALUE NO-USER-M + ODIFICATION USAGE directoryOperation ) +olcAttributeTypes: ( 2.5.18.2 NAME 'modifyTimestamp' DESC 'RFC4512: time whi + ch object was last modified' EQUALITY generalizedTimeMatch ORDERING general + izedTimeOrderingMatch SYNTAX 1.3.6.1.4.1.1466.115.121.1.24 SINGLE-VALUE NO- + USER-MODIFICATION USAGE directoryOperation ) +olcAttributeTypes: ( 2.5.18.3 NAME 'creatorsName' DESC 'RFC4512: name of cre + ator' EQUALITY distinguishedNameMatch SYNTAX 1.3.6.1.4.1.1466.115.121.1.12 + SINGLE-VALUE NO-USER-MODIFICATION USAGE directoryOperation ) +olcAttributeTypes: ( 2.5.18.4 NAME 'modifiersName' DESC 'RFC4512: name of la + st modifier' EQUALITY distinguishedNameMatch SYNTAX 1.3.6.1.4.1.1466.115.12 + 1.1.12 SINGLE-VALUE NO-USER-MODIFICATION USAGE directoryOperation ) +olcAttributeTypes: ( 2.5.18.9 NAME 'hasSubordinates' DESC 'X.501: entry has + children' EQUALITY booleanMatch SYNTAX 1.3.6.1.4.1.1466.115.121.1.7 SINGLE- + VALUE NO-USER-MODIFICATION USAGE directoryOperation ) +olcAttributeTypes: ( 2.5.18.10 NAME 'subschemaSubentry' DESC 'RFC4512: name + of controlling subschema entry' EQUALITY distinguishedNameMatch SYNTAX 1.3. + 6.1.4.1.1466.115.121.1.12 SINGLE-VALUE NO-USER-MODIFICATION USAGE directory + Operation ) +olcAttributeTypes: ( 2.5.18.12 NAME 'collectiveAttributeSubentries' DESC 'RF + C3671: collective attribute subentries' EQUALITY distinguishedNameMatch SYN + TAX 1.3.6.1.4.1.1466.115.121.1.12 NO-USER-MODIFICATION USAGE directoryOpera + tion ) +olcAttributeTypes: ( 2.5.18.7 NAME 'collectiveExclusions' DESC 'RFC3671: col + lective attribute exclusions' EQUALITY objectIdentifierMatch SYNTAX 1.3.6.1 + .4.1.1466.115.121.1.38 USAGE directoryOperation ) +olcAttributeTypes: ( 1.3.6.1.1.20 NAME 'entryDN' DESC 'DN of the entry' EQUA + LITY distinguishedNameMatch SYNTAX 1.3.6.1.4.1.1466.115.121.1.12 SINGLE-VAL + UE NO-USER-MODIFICATION USAGE directoryOperation ) +olcAttributeTypes: ( 1.3.6.1.1.16.4 NAME 'entryUUID' DESC 'UUID of the entry + ' EQUALITY UUIDMatch ORDERING UUIDOrderingMatch SYNTAX 1.3.6.1.1.16.1 SINGL + E-VALUE NO-USER-MODIFICATION USAGE directoryOperation ) +olcAttributeTypes: ( 1.3.6.1.4.1.4203.666.1.7 NAME 'entryCSN' DESC 'change s + equence number of the entry content' EQUALITY CSNMatch ORDERING CSNOrdering + Match SYNTAX 1.3.6.1.4.1.4203.666.11.2.1{64} SINGLE-VALUE NO-USER-MODIFICAT + ION USAGE directoryOperation ) +olcAttributeTypes: ( 1.3.6.1.4.1.4203.666.1.13 NAME 'namingCSN' DESC 'change + sequence number of the entry naming (RDN)' EQUALITY CSNMatch ORDERING CSNO + rderingMatch SYNTAX 1.3.6.1.4.1.4203.666.11.2.1{64} SINGLE-VALUE NO-USER-MO + DIFICATION USAGE directoryOperation ) +olcAttributeTypes: ( 1.3.6.1.4.1.4203.666.1.23 NAME 'syncreplCookie' DESC 's + yncrepl Cookie for shadow copy' EQUALITY octetStringMatch ORDERING octetStr + ingOrderingMatch SYNTAX 1.3.6.1.4.1.1466.115.121.1.40 SINGLE-VALUE NO-USER- + MODIFICATION USAGE dSAOperation ) +olcAttributeTypes: ( 1.3.6.1.4.1.4203.666.1.25 NAME 'contextCSN' DESC 'the l + argest committed CSN of a context' EQUALITY CSNMatch ORDERING CSNOrderingMa + tch SYNTAX 1.3.6.1.4.1.4203.666.11.2.1{64} NO-USER-MODIFICATION USAGE dSAOp + eration ) +olcAttributeTypes: ( 1.3.6.1.4.1.1466.101.120.6 NAME 'altServer' DESC 'RFC45 + 12: alternative servers' SYNTAX 1.3.6.1.4.1.1466.115.121.1.26 USAGE dSAOper + ation ) +olcAttributeTypes: ( 1.3.6.1.4.1.1466.101.120.5 NAME 'namingContexts' DESC ' + RFC4512: naming contexts' SYNTAX 1.3.6.1.4.1.1466.115.121.1.12 USAGE dSAOpe + ration ) +olcAttributeTypes: ( 1.3.6.1.4.1.1466.101.120.13 NAME 'supportedControl' DES + C 'RFC4512: supported controls' SYNTAX 1.3.6.1.4.1.1466.115.121.1.38 USAGE + dSAOperation ) +olcAttributeTypes: ( 1.3.6.1.4.1.1466.101.120.7 NAME 'supportedExtension' DE + SC 'RFC4512: supported extended operations' SYNTAX 1.3.6.1.4.1.1466.115.121 + .1.38 USAGE dSAOperation ) +olcAttributeTypes: ( 1.3.6.1.4.1.1466.101.120.15 NAME 'supportedLDAPVersion' + DESC 'RFC4512: supported LDAP versions' SYNTAX 1.3.6.1.4.1.1466.115.121.1. + 27 USAGE dSAOperation ) +olcAttributeTypes: ( 1.3.6.1.4.1.1466.101.120.14 NAME 'supportedSASLMechanis + ms' DESC 'RFC4512: supported SASL mechanisms' SYNTAX 1.3.6.1.4.1.1466.115.1 + 21.1.15 USAGE dSAOperation ) +olcAttributeTypes: ( 1.3.6.1.4.1.4203.1.3.5 NAME 'supportedFeatures' DESC 'R + FC4512: features supported by the server' EQUALITY objectIdentifierMatch SY + NTAX 1.3.6.1.4.1.1466.115.121.1.38 USAGE dSAOperation ) +olcAttributeTypes: ( 1.3.6.1.4.1.4203.666.1.10 NAME 'monitorContext' DESC 'm + onitor context' EQUALITY distinguishedNameMatch SYNTAX 1.3.6.1.4.1.1466.115 + .121.1.12 SINGLE-VALUE NO-USER-MODIFICATION USAGE dSAOperation ) +olcAttributeTypes: ( 1.3.6.1.4.1.4203.1.12.2.1 NAME 'configContext' DESC 'co + nfig context' EQUALITY distinguishedNameMatch SYNTAX 1.3.6.1.4.1.1466.115.1 + 21.1.12 SINGLE-VALUE NO-USER-MODIFICATION USAGE dSAOperation ) +olcAttributeTypes: ( 1.3.6.1.1.4 NAME 'vendorName' DESC 'RFC3045: name of im + plementation vendor' EQUALITY caseExactMatch SYNTAX 1.3.6.1.4.1.1466.115.12 + 1.1.15 SINGLE-VALUE NO-USER-MODIFICATION USAGE dSAOperation ) +olcAttributeTypes: ( 1.3.6.1.1.5 NAME 'vendorVersion' DESC 'RFC3045: version + of implementation' EQUALITY caseExactMatch SYNTAX 1.3.6.1.4.1.1466.115.121 + .1.15 SINGLE-VALUE NO-USER-MODIFICATION USAGE dSAOperation ) +olcAttributeTypes: ( 2.5.18.5 NAME 'administrativeRole' DESC 'RFC3672: admin + istrative role' EQUALITY objectIdentifierMatch SYNTAX 1.3.6.1.4.1.1466.115. + 121.1.38 USAGE directoryOperation ) +olcAttributeTypes: ( 2.5.18.6 NAME 'subtreeSpecification' DESC 'RFC3672: sub + tree specification' SYNTAX 1.3.6.1.4.1.1466.115.121.1.45 SINGLE-VALUE USAGE + directoryOperation ) +olcAttributeTypes: ( 2.5.21.1 NAME 'dITStructureRules' DESC 'RFC4512: DIT st + ructure rules' EQUALITY integerFirstComponentMatch SYNTAX 1.3.6.1.4.1.1466. + 115.121.1.17 USAGE directoryOperation ) +olcAttributeTypes: ( 2.5.21.2 NAME 'dITContentRules' DESC 'RFC4512: DIT cont + ent rules' EQUALITY objectIdentifierFirstComponentMatch SYNTAX 1.3.6.1.4.1. + 1466.115.121.1.16 USAGE directoryOperation ) +olcAttributeTypes: ( 2.5.21.4 NAME 'matchingRules' DESC 'RFC4512: matching r + ules' EQUALITY objectIdentifierFirstComponentMatch SYNTAX 1.3.6.1.4.1.1466. + 115.121.1.30 USAGE directoryOperation ) +olcAttributeTypes: ( 2.5.21.5 NAME 'attributeTypes' DESC 'RFC4512: attribute + types' EQUALITY objectIdentifierFirstComponentMatch SYNTAX 1.3.6.1.4.1.146 + 6.115.121.1.3 USAGE directoryOperation ) +olcAttributeTypes: ( 2.5.21.6 NAME 'objectClasses' DESC 'RFC4512: object cla + sses' EQUALITY objectIdentifierFirstComponentMatch SYNTAX 1.3.6.1.4.1.1466. + 115.121.1.37 USAGE directoryOperation ) +olcAttributeTypes: ( 2.5.21.7 NAME 'nameForms' DESC 'RFC4512: name forms ' E + QUALITY objectIdentifierFirstComponentMatch SYNTAX 1.3.6.1.4.1.1466.115.121 + .1.35 USAGE directoryOperation ) +olcAttributeTypes: ( 2.5.21.8 NAME 'matchingRuleUse' DESC 'RFC4512: matching + rule uses' EQUALITY objectIdentifierFirstComponentMatch SYNTAX 1.3.6.1.4.1 + .1466.115.121.1.31 USAGE directoryOperation ) +olcAttributeTypes: ( 1.3.6.1.4.1.1466.101.120.16 NAME 'ldapSyntaxes' DESC 'R + FC4512: LDAP syntaxes' EQUALITY objectIdentifierFirstComponentMatch SYNTAX + 1.3.6.1.4.1.1466.115.121.1.54 USAGE directoryOperation ) +olcAttributeTypes: ( 2.5.4.1 NAME ( 'aliasedObjectName' 'aliasedEntryName' ) + DESC 'RFC4512: name of aliased object' EQUALITY distinguishedNameMatch SYN + TAX 1.3.6.1.4.1.1466.115.121.1.12 SINGLE-VALUE ) +olcAttributeTypes: ( 2.16.840.1.113730.3.1.34 NAME 'ref' DESC 'RFC3296: subo + rdinate referral URL' EQUALITY caseExactMatch SYNTAX 1.3.6.1.4.1.1466.115.1 + 21.1.15 USAGE distributedOperation ) +olcAttributeTypes: ( 1.3.6.1.4.1.4203.1.3.1 NAME 'entry' DESC 'OpenLDAP ACL + entry pseudo-attribute' SYNTAX 1.3.6.1.4.1.4203.1.1.1 SINGLE-VALUE NO-USER- + MODIFICATION USAGE dSAOperation ) +olcAttributeTypes: ( 1.3.6.1.4.1.4203.1.3.2 NAME 'children' DESC 'OpenLDAP A + CL children pseudo-attribute' SYNTAX 1.3.6.1.4.1.4203.1.1.1 SINGLE-VALUE NO + -USER-MODIFICATION USAGE dSAOperation ) +olcAttributeTypes: ( 1.3.6.1.4.1.4203.666.1.8 NAME ( 'authzTo' 'saslAuthzTo' + ) DESC 'proxy authorization targets' EQUALITY authzMatch SYNTAX 1.3.6.1.4. + 1.4203.666.2.7 USAGE distributedOperation X-ORDERED 'VALUES' ) +olcAttributeTypes: ( 1.3.6.1.4.1.4203.666.1.9 NAME ( 'authzFrom' 'saslAuthzF + rom' ) DESC 'proxy authorization sources' EQUALITY authzMatch SYNTAX 1.3.6. + 1.4.1.4203.666.2.7 USAGE distributedOperation X-ORDERED 'VALUES' ) +olcAttributeTypes: ( 1.3.6.1.4.1.1466.101.119.3 NAME 'entryTtl' DESC 'RFC258 + 9: entry time-to-live' SYNTAX 1.3.6.1.4.1.1466.115.121.1.27 SINGLE-VALUE NO + -USER-MODIFICATION USAGE dSAOperation ) +olcAttributeTypes: ( 1.3.6.1.4.1.1466.101.119.4 NAME 'dynamicSubtrees' DESC + 'RFC2589: dynamic subtrees' SYNTAX 1.3.6.1.4.1.1466.115.121.1.12 NO-USER-MO + DIFICATION USAGE dSAOperation ) +olcAttributeTypes: ( 2.5.4.49 NAME 'distinguishedName' DESC 'RFC4519: common + supertype of DN attributes' EQUALITY distinguishedNameMatch SYNTAX 1.3.6.1 + .4.1.1466.115.121.1.12 ) +olcAttributeTypes: ( 2.5.4.41 NAME 'name' DESC 'RFC4519: common supertype of + name attributes' EQUALITY caseIgnoreMatch SUBSTR caseIgnoreSubstringsMatch + SYNTAX 1.3.6.1.4.1.1466.115.121.1.15{32768} ) +olcAttributeTypes: ( 2.5.4.3 NAME ( 'cn' 'commonName' ) DESC 'RFC4519: commo + n name(s) for which the entity is known by' SUP name ) +olcAttributeTypes: ( 0.9.2342.19200300.100.1.1 NAME ( 'uid' 'userid' ) DESC + 'RFC4519: user identifier' EQUALITY caseIgnoreMatch SUBSTR caseIgnoreSubstr + ingsMatch SYNTAX 1.3.6.1.4.1.1466.115.121.1.15{256} ) +olcAttributeTypes: ( 1.3.6.1.1.1.1.0 NAME 'uidNumber' DESC 'RFC2307: An inte + ger uniquely identifying a user in an administrative domain' EQUALITY integ + erMatch ORDERING integerOrderingMatch SYNTAX 1.3.6.1.4.1.1466.115.121.1.27 + SINGLE-VALUE ) +olcAttributeTypes: ( 1.3.6.1.1.1.1.1 NAME 'gidNumber' DESC 'RFC2307: An inte + ger uniquely identifying a group in an administrative domain' EQUALITY inte + gerMatch ORDERING integerOrderingMatch SYNTAX 1.3.6.1.4.1.1466.115.121.1.27 + SINGLE-VALUE ) +olcAttributeTypes: ( 2.5.4.35 NAME 'userPassword' DESC 'RFC4519/2307: passwo + rd of user' EQUALITY octetStringMatch SYNTAX 1.3.6.1.4.1.1466.115.121.1.40{ + 128} ) +olcAttributeTypes: ( 1.3.6.1.4.1.250.1.57 NAME 'labeledURI' DESC 'RFC2079: U + niform Resource Identifier with optional label' EQUALITY caseExactMatch SYN + TAX 1.3.6.1.4.1.1466.115.121.1.15 ) +olcAttributeTypes: ( 2.5.4.13 NAME 'description' DESC 'RFC4519: descriptive + information' EQUALITY caseIgnoreMatch SUBSTR caseIgnoreSubstringsMatch SYNT + AX 1.3.6.1.4.1.1466.115.121.1.15{1024} ) +olcAttributeTypes: ( 2.5.4.34 NAME 'seeAlso' DESC 'RFC4519: DN of related ob + ject' SUP distinguishedName ) +olcAttributeTypes: ( OLcfgGlAt:78 NAME 'olcConfigFile' DESC 'File for slapd + configuration directives' EQUALITY caseIgnoreMatch SYNTAX OMsDirectoryStrin + g SINGLE-VALUE ) +olcAttributeTypes: ( OLcfgGlAt:79 NAME 'olcConfigDir' DESC 'Directory for sl + apd configuration backend' EQUALITY caseIgnoreMatch SYNTAX OMsDirectoryStri + ng SINGLE-VALUE ) +olcAttributeTypes: ( OLcfgGlAt:1 NAME 'olcAccess' DESC 'Access Control List' + EQUALITY caseIgnoreMatch SYNTAX OMsDirectoryString X-ORDERED 'VALUES' ) +olcAttributeTypes: ( OLcfgGlAt:86 NAME 'olcAddContentAcl' DESC 'Check ACLs a + gainst content of Add ops' SYNTAX OMsBoolean SINGLE-VALUE ) +olcAttributeTypes: ( OLcfgGlAt:2 NAME 'olcAllows' DESC 'Allowed set of depre + cated features' EQUALITY caseIgnoreMatch SYNTAX OMsDirectoryString ) +olcAttributeTypes: ( OLcfgGlAt:3 NAME 'olcArgsFile' DESC 'File for slapd com + mand line options' EQUALITY caseIgnoreMatch SYNTAX OMsDirectoryString SINGL + E-VALUE ) +olcAttributeTypes: ( OLcfgGlAt:5 NAME 'olcAttributeOptions' EQUALITY caseIgn + oreMatch SYNTAX OMsDirectoryString ) +olcAttributeTypes: ( OLcfgGlAt:4 NAME 'olcAttributeTypes' DESC 'OpenLDAP att + ributeTypes' EQUALITY caseIgnoreMatch SUBSTR caseIgnoreSubstringsMatch SYNT + AX OMsDirectoryString X-ORDERED 'VALUES' ) +olcAttributeTypes: ( OLcfgGlAt:6 NAME 'olcAuthIDRewrite' EQUALITY caseIgnore + Match SYNTAX OMsDirectoryString X-ORDERED 'VALUES' ) +olcAttributeTypes: ( OLcfgGlAt:7 NAME 'olcAuthzPolicy' EQUALITY caseIgnoreMa + tch SYNTAX OMsDirectoryString SINGLE-VALUE ) +olcAttributeTypes: ( OLcfgGlAt:8 NAME 'olcAuthzRegexp' EQUALITY caseIgnoreMa + tch SYNTAX OMsDirectoryString X-ORDERED 'VALUES' ) +olcAttributeTypes: ( OLcfgGlAt:9 NAME 'olcBackend' DESC 'A type of backend' + EQUALITY caseIgnoreMatch SYNTAX OMsDirectoryString SINGLE-VALUE X-ORDERED ' + SIBLINGS' ) +olcAttributeTypes: ( OLcfgGlAt:10 NAME 'olcConcurrency' SYNTAX OMsInteger SI + NGLE-VALUE ) +olcAttributeTypes: ( OLcfgGlAt:11 NAME 'olcConnMaxPending' SYNTAX OMsInteger + SINGLE-VALUE ) +olcAttributeTypes: ( OLcfgGlAt:12 NAME 'olcConnMaxPendingAuth' SYNTAX OMsInt + eger SINGLE-VALUE ) +olcAttributeTypes: ( OLcfgGlAt:13 NAME 'olcDatabase' DESC 'The backend type + for a database instance' SUP olcBackend SINGLE-VALUE X-ORDERED 'SIBLINGS' ) +olcAttributeTypes: ( OLcfgGlAt:14 NAME 'olcDefaultSearchBase' SYNTAX OMsDN S + INGLE-VALUE ) +olcAttributeTypes: ( OLcfgGlAt:15 NAME 'olcDisallows' EQUALITY caseIgnoreMat + ch SYNTAX OMsDirectoryString ) +olcAttributeTypes: ( OLcfgGlAt:16 NAME 'olcDitContentRules' DESC 'OpenLDAP D + IT content rules' EQUALITY caseIgnoreMatch SUBSTR caseIgnoreSubstringsMatch + SYNTAX OMsDirectoryString X-ORDERED 'VALUES' ) +olcAttributeTypes: ( OLcfgDbAt:0.20 NAME 'olcExtraAttrs' EQUALITY caseIgnore + Match SYNTAX OMsDirectoryString ) +olcAttributeTypes: ( OLcfgGlAt:17 NAME 'olcGentleHUP' SYNTAX OMsBoolean SING + LE-VALUE ) +olcAttributeTypes: ( OLcfgDbAt:0.17 NAME 'olcHidden' SYNTAX OMsBoolean SINGL + E-VALUE ) +olcAttributeTypes: ( OLcfgGlAt:18 NAME 'olcIdleTimeout' SYNTAX OMsInteger SI + NGLE-VALUE ) +olcAttributeTypes: ( OLcfgGlAt:19 NAME 'olcInclude' SUP labeledURI ) +olcAttributeTypes: ( OLcfgGlAt:20 NAME 'olcIndexSubstrIfMinLen' SYNTAX OMsIn + teger SINGLE-VALUE ) +olcAttributeTypes: ( OLcfgGlAt:21 NAME 'olcIndexSubstrIfMaxLen' SYNTAX OMsIn + teger SINGLE-VALUE ) +olcAttributeTypes: ( OLcfgGlAt:22 NAME 'olcIndexSubstrAnyLen' SYNTAX OMsInte + ger SINGLE-VALUE ) +olcAttributeTypes: ( OLcfgGlAt:23 NAME 'olcIndexSubstrAnyStep' SYNTAX OMsInt + eger SINGLE-VALUE ) +olcAttributeTypes: ( OLcfgGlAt:84 NAME 'olcIndexIntLen' SYNTAX OMsInteger SI + NGLE-VALUE ) +olcAttributeTypes: ( OLcfgDbAt:0.4 NAME 'olcLastMod' SYNTAX OMsBoolean SINGL + E-VALUE ) +olcAttributeTypes: ( OLcfgGlAt:85 NAME 'olcLdapSyntaxes' DESC 'OpenLDAP ldap + Syntax' EQUALITY caseIgnoreMatch SUBSTR caseIgnoreSubstringsMatch SYNTAX OM + sDirectoryString X-ORDERED 'VALUES' ) +olcAttributeTypes: ( OLcfgDbAt:0.5 NAME 'olcLimits' EQUALITY caseIgnoreMatch + SYNTAX OMsDirectoryString X-ORDERED 'VALUES' ) +olcAttributeTypes: ( OLcfgGlAt:93 NAME 'olcListenerThreads' SYNTAX OMsIntege + r SINGLE-VALUE ) +olcAttributeTypes: ( OLcfgGlAt:26 NAME 'olcLocalSSF' SYNTAX OMsInteger SINGL + E-VALUE ) +olcAttributeTypes: ( OLcfgGlAt:27 NAME 'olcLogFile' SYNTAX OMsDirectoryStrin + g SINGLE-VALUE ) +olcAttributeTypes: ( OLcfgGlAt:28 NAME 'olcLogLevel' EQUALITY caseIgnoreMatc + h SYNTAX OMsDirectoryString ) +olcAttributeTypes: ( OLcfgDbAt:0.6 NAME 'olcMaxDerefDepth' SYNTAX OMsInteger + SINGLE-VALUE ) +olcAttributeTypes: ( OLcfgDbAt:0.16 NAME 'olcMirrorMode' SYNTAX OMsBoolean S + INGLE-VALUE ) +olcAttributeTypes: ( OLcfgGlAt:30 NAME 'olcModuleLoad' EQUALITY caseIgnoreMa + tch SYNTAX OMsDirectoryString X-ORDERED 'VALUES' ) +olcAttributeTypes: ( OLcfgGlAt:31 NAME 'olcModulePath' SYNTAX OMsDirectorySt + ring SINGLE-VALUE ) +olcAttributeTypes: ( OLcfgDbAt:0.18 NAME 'olcMonitoring' SYNTAX OMsBoolean S + INGLE-VALUE ) +olcAttributeTypes: ( OLcfgGlAt:32 NAME 'olcObjectClasses' DESC 'OpenLDAP obj + ect classes' EQUALITY caseIgnoreMatch SUBSTR caseIgnoreSubstringsMatch SYNT + AX OMsDirectoryString X-ORDERED 'VALUES' ) +olcAttributeTypes: ( OLcfgGlAt:33 NAME 'olcObjectIdentifier' EQUALITY caseIg + noreMatch SUBSTR caseIgnoreSubstringsMatch SYNTAX OMsDirectoryString X-ORDE + RED 'VALUES' ) +olcAttributeTypes: ( OLcfgGlAt:34 NAME 'olcOverlay' SUP olcDatabase SINGLE-V + ALUE X-ORDERED 'SIBLINGS' ) +olcAttributeTypes: ( OLcfgGlAt:35 NAME 'olcPasswordCryptSaltFormat' SYNTAX O + MsDirectoryString SINGLE-VALUE ) +olcAttributeTypes: ( OLcfgGlAt:36 NAME 'olcPasswordHash' EQUALITY caseIgnore + Match SYNTAX OMsDirectoryString ) +olcAttributeTypes: ( OLcfgGlAt:37 NAME 'olcPidFile' SYNTAX OMsDirectoryStrin + g SINGLE-VALUE ) +olcAttributeTypes: ( OLcfgGlAt:38 NAME 'olcPlugin' EQUALITY caseIgnoreMatch + SYNTAX OMsDirectoryString ) +olcAttributeTypes: ( OLcfgGlAt:39 NAME 'olcPluginLogFile' SYNTAX OMsDirector + yString SINGLE-VALUE ) +olcAttributeTypes: ( OLcfgGlAt:40 NAME 'olcReadOnly' SYNTAX OMsBoolean SINGL + E-VALUE ) +olcAttributeTypes: ( OLcfgGlAt:41 NAME 'olcReferral' SUP labeledURI SINGLE-V + ALUE ) +olcAttributeTypes: ( OLcfgDbAt:0.7 NAME 'olcReplica' SUP labeledURI EQUALITY + caseIgnoreMatch X-ORDERED 'VALUES' ) +olcAttributeTypes: ( OLcfgGlAt:43 NAME 'olcReplicaArgsFile' SYNTAX OMsDirect + oryString SINGLE-VALUE ) +olcAttributeTypes: ( OLcfgGlAt:44 NAME 'olcReplicaPidFile' SYNTAX OMsDirecto + ryString SINGLE-VALUE ) +olcAttributeTypes: ( OLcfgGlAt:45 NAME 'olcReplicationInterval' SYNTAX OMsIn + teger SINGLE-VALUE ) +olcAttributeTypes: ( OLcfgGlAt:46 NAME 'olcReplogFile' SYNTAX OMsDirectorySt + ring SINGLE-VALUE ) +olcAttributeTypes: ( OLcfgGlAt:47 NAME 'olcRequires' EQUALITY caseIgnoreMatc + h SYNTAX OMsDirectoryString ) +olcAttributeTypes: ( OLcfgGlAt:48 NAME 'olcRestrict' EQUALITY caseIgnoreMatc + h SYNTAX OMsDirectoryString ) +olcAttributeTypes: ( OLcfgGlAt:49 NAME 'olcReverseLookup' SYNTAX OMsBoolean + SINGLE-VALUE ) +olcAttributeTypes: ( OLcfgDbAt:0.8 NAME 'olcRootDN' EQUALITY distinguishedNa + meMatch SYNTAX OMsDN SINGLE-VALUE ) +olcAttributeTypes: ( OLcfgGlAt:51 NAME 'olcRootDSE' EQUALITY caseIgnoreMatch + SYNTAX OMsDirectoryString ) +olcAttributeTypes: ( OLcfgDbAt:0.9 NAME 'olcRootPW' SYNTAX OMsDirectoryStrin + g SINGLE-VALUE ) +olcAttributeTypes: ( OLcfgGlAt:89 NAME 'olcSaslAuxprops' SYNTAX OMsDirectory + String SINGLE-VALUE ) +olcAttributeTypes: ( OLcfgGlAt:53 NAME 'olcSaslHost' SYNTAX OMsDirectoryStri + ng SINGLE-VALUE ) +olcAttributeTypes: ( OLcfgGlAt:54 NAME 'olcSaslRealm' SYNTAX OMsDirectoryStr + ing SINGLE-VALUE ) +olcAttributeTypes: ( OLcfgGlAt:56 NAME 'olcSaslSecProps' SYNTAX OMsDirectory + String SINGLE-VALUE ) +olcAttributeTypes: ( OLcfgGlAt:58 NAME 'olcSchemaDN' EQUALITY distinguishedN + ameMatch SYNTAX OMsDN SINGLE-VALUE ) +olcAttributeTypes: ( OLcfgGlAt:59 NAME 'olcSecurity' EQUALITY caseIgnoreMatc + h SYNTAX OMsDirectoryString ) +olcAttributeTypes: ( OLcfgGlAt:81 NAME 'olcServerID' EQUALITY caseIgnoreMatc + h SYNTAX OMsDirectoryString ) +olcAttributeTypes: ( OLcfgGlAt:60 NAME 'olcSizeLimit' SYNTAX OMsDirectoryStr + ing SINGLE-VALUE ) +olcAttributeTypes: ( OLcfgGlAt:61 NAME 'olcSockbufMaxIncoming' SYNTAX OMsInt + eger SINGLE-VALUE ) +olcAttributeTypes: ( OLcfgGlAt:62 NAME 'olcSockbufMaxIncomingAuth' SYNTAX OM + sInteger SINGLE-VALUE ) +olcAttributeTypes: ( OLcfgGlAt:83 NAME 'olcSortVals' DESC 'Attributes whose + values will always be sorted' EQUALITY caseIgnoreMatch SYNTAX OMsDirectoryS + tring ) +olcAttributeTypes: ( OLcfgDbAt:0.15 NAME 'olcSubordinate' SYNTAX OMsDirector + yString SINGLE-VALUE ) +olcAttributeTypes: ( OLcfgDbAt:0.10 NAME 'olcSuffix' EQUALITY distinguishedN + ameMatch SYNTAX OMsDN ) +olcAttributeTypes: ( OLcfgDbAt:0.19 NAME 'olcSyncUseSubentry' DESC 'Store sy + nc context in a subentry' SYNTAX OMsBoolean SINGLE-VALUE ) +olcAttributeTypes: ( OLcfgDbAt:0.11 NAME 'olcSyncrepl' EQUALITY caseIgnoreMa + tch SYNTAX OMsDirectoryString X-ORDERED 'VALUES' ) +olcAttributeTypes: ( OLcfgGlAt:90 NAME 'olcTCPBuffer' DESC 'Custom TCP buffe + r size' SYNTAX OMsDirectoryString ) +olcAttributeTypes: ( OLcfgGlAt:66 NAME 'olcThreads' SYNTAX OMsInteger SINGLE + -VALUE ) +olcAttributeTypes: ( OLcfgGlAt:67 NAME 'olcTimeLimit' SYNTAX OMsDirectoryStr + ing ) +olcAttributeTypes: ( OLcfgGlAt:68 NAME 'olcTLSCACertificateFile' SYNTAX OMsD + irectoryString SINGLE-VALUE ) +olcAttributeTypes: ( OLcfgGlAt:69 NAME 'olcTLSCACertificatePath' SYNTAX OMsD + irectoryString SINGLE-VALUE ) +olcAttributeTypes: ( OLcfgGlAt:70 NAME 'olcTLSCertificateFile' SYNTAX OMsDir + ectoryString SINGLE-VALUE ) +olcAttributeTypes: ( OLcfgGlAt:71 NAME 'olcTLSCertificateKeyFile' SYNTAX OMs + DirectoryString SINGLE-VALUE ) +olcAttributeTypes: ( OLcfgGlAt:72 NAME 'olcTLSCipherSuite' SYNTAX OMsDirecto + ryString SINGLE-VALUE ) +olcAttributeTypes: ( OLcfgGlAt:73 NAME 'olcTLSCRLCheck' SYNTAX OMsDirectoryS + tring SINGLE-VALUE ) +olcAttributeTypes: ( OLcfgGlAt:82 NAME 'olcTLSCRLFile' SYNTAX OMsDirectorySt + ring SINGLE-VALUE ) +olcAttributeTypes: ( OLcfgGlAt:74 NAME 'olcTLSRandFile' SYNTAX OMsDirectoryS + tring SINGLE-VALUE ) +olcAttributeTypes: ( OLcfgGlAt:75 NAME 'olcTLSVerifyClient' SYNTAX OMsDirect + oryString SINGLE-VALUE ) +olcAttributeTypes: ( OLcfgGlAt:77 NAME 'olcTLSDHParamFile' SYNTAX OMsDirecto + ryString SINGLE-VALUE ) +olcAttributeTypes: ( OLcfgGlAt:87 NAME 'olcTLSProtocolMin' SYNTAX OMsDirecto + ryString SINGLE-VALUE ) +olcAttributeTypes: ( OLcfgGlAt:80 NAME 'olcToolThreads' SYNTAX OMsInteger SI + NGLE-VALUE ) +olcAttributeTypes: ( OLcfgDbAt:0.12 NAME 'olcUpdateDN' SYNTAX OMsDN SINGLE-V + ALUE ) +olcAttributeTypes: ( OLcfgDbAt:0.13 NAME 'olcUpdateRef' SUP labeledURI EQUAL + ITY caseIgnoreMatch ) +olcAttributeTypes: ( OLcfgGlAt:88 NAME 'olcWriteTimeout' SYNTAX OMsInteger S + INGLE-VALUE ) +olcAttributeTypes: ( OLcfgDbAt:0.1 NAME 'olcDbDirectory' DESC 'Directory for + database content' EQUALITY caseIgnoreMatch SYNTAX OMsDirectoryString SINGL + E-VALUE ) +olcAttributeTypes: ( 1.3.6.1.4.1.4203.666.1.5 NAME 'OpenLDAPaci' DESC 'OpenL + DAP access control information (experimental)' EQUALITY OpenLDAPaciMatch SY + NTAX 1.3.6.1.4.1.4203.666.2.1 USAGE directoryOperation ) +olcAttributeTypes: ( OLcfgDbAt:1.2 NAME 'olcDbCheckpoint' DESC 'Database che + ckpoint interval in kbytes and minutes' SYNTAX OMsDirectoryString SINGLE-VA + LUE ) +olcAttributeTypes: ( OLcfgDbAt:1.4 NAME 'olcDbNoSync' DESC 'Disable synchron + ous database writes' SYNTAX OMsBoolean SINGLE-VALUE ) +olcAttributeTypes: ( OLcfgDbAt:12.3 NAME 'olcDbEnvFlags' DESC 'Database envi + ronment flags' EQUALITY caseIgnoreMatch SYNTAX OMsDirectoryString ) +olcAttributeTypes: ( OLcfgDbAt:0.2 NAME 'olcDbIndex' DESC 'Attribute index p + arameters' EQUALITY caseIgnoreMatch SYNTAX OMsDirectoryString ) +olcAttributeTypes: ( OLcfgDbAt:12.1 NAME 'olcDbMaxReaders' DESC 'Maximum num + ber of threads that may access the DB concurrently' SYNTAX OMsInteger SINGL + E-VALUE ) +olcAttributeTypes: ( OLcfgDbAt:12.2 NAME 'olcDbMaxSize' DESC 'Maximum size o + f DB in bytes' SYNTAX OMsInteger SINGLE-VALUE ) +olcAttributeTypes: ( OLcfgDbAt:0.3 NAME 'olcDbMode' DESC 'Unix permissions o + f database files' SYNTAX OMsDirectoryString SINGLE-VALUE ) +olcAttributeTypes: ( OLcfgDbAt:12.5 NAME 'olcDbRtxnSize' DESC 'Number of ent + ries to process in one read transaction' SYNTAX OMsInteger SINGLE-VALUE ) +olcAttributeTypes: ( OLcfgDbAt:1.9 NAME 'olcDbSearchStack' DESC 'Depth of se + arch stack in IDLs' SYNTAX OMsInteger SINGLE-VALUE ) +olcAttributeTypes: ( 1.2.840.113556.1.2.102 NAME 'memberOf' DESC 'Group that + the entry belongs to' EQUALITY distinguishedNameMatch SYNTAX 1.3.6.1.4.1.1 + 466.115.121.1.12 USAGE dSAOperation X-ORIGIN 'iPlanet Delegated Administrat + or' ) +olcAttributeTypes: ( OLcfgOvAt:18.0 NAME 'olcMemberOfDN' DESC 'DN to be used + as modifiersName' SYNTAX OMsDN SINGLE-VALUE ) +olcAttributeTypes: ( OLcfgOvAt:18.1 NAME 'olcMemberOfDangling' DESC 'Behavio + r with respect to dangling members, constrained to ignore, drop, error' SYN + TAX OMsDirectoryString SINGLE-VALUE ) +olcAttributeTypes: ( OLcfgOvAt:18.2 NAME 'olcMemberOfRefInt' DESC 'Take care + of referential integrity' SYNTAX OMsBoolean SINGLE-VALUE ) +olcAttributeTypes: ( OLcfgOvAt:18.3 NAME 'olcMemberOfGroupOC' DESC 'Group ob + jectClass' SYNTAX OMsDirectoryString SINGLE-VALUE ) +olcAttributeTypes: ( OLcfgOvAt:18.4 NAME 'olcMemberOfMemberAD' DESC 'member + attribute' SYNTAX OMsDirectoryString SINGLE-VALUE ) +olcAttributeTypes: ( OLcfgOvAt:18.5 NAME 'olcMemberOfMemberOfAD' DESC 'membe + rOf attribute' SYNTAX OMsDirectoryString SINGLE-VALUE ) +olcAttributeTypes: ( OLcfgOvAt:18.7 NAME 'olcMemberOfDanglingError' DESC 'Er + ror code returned in case of dangling back reference' SYNTAX OMsDirectorySt + ring SINGLE-VALUE ) +olcAttributeTypes: ( OLcfgOvAt:11.1 NAME 'olcRefintAttribute' DESC 'Attribut + es for referential integrity' EQUALITY caseIgnoreMatch SYNTAX OMsDirectoryS + tring ) +olcAttributeTypes: ( OLcfgOvAt:11.2 NAME 'olcRefintNothing' DESC 'Replacemen + t DN to supply when needed' SYNTAX OMsDN SINGLE-VALUE ) +olcAttributeTypes: ( OLcfgOvAt:11.3 NAME 'olcRefintModifiersName' DESC 'The + DN to use as modifiersName' SYNTAX OMsDN SINGLE-VALUE ) +olcAttributeTypes: ( OLcfgOvAt:10.1 NAME 'olcUniqueBase' DESC 'Subtree for u + niqueness searches' EQUALITY distinguishedNameMatch SYNTAX OMsDN SINGLE-VAL + UE ) +olcAttributeTypes: ( OLcfgOvAt:10.2 NAME 'olcUniqueIgnore' DESC 'Attributes + for which uniqueness shall not be enforced' EQUALITY caseIgnoreMatch ORDERI + NG caseIgnoreOrderingMatch SUBSTR caseIgnoreSubstringsMatch SYNTAX OMsDirec + toryString ) +olcAttributeTypes: ( OLcfgOvAt:10.3 NAME 'olcUniqueAttribute' DESC 'Attribut + es for which uniqueness shall be enforced' EQUALITY caseIgnoreMatch ORDERIN + G caseIgnoreOrderingMatch SUBSTR caseIgnoreSubstringsMatch SYNTAX OMsDirect + oryString ) +olcAttributeTypes: ( OLcfgOvAt:10.4 NAME 'olcUniqueStrict' DESC 'Enforce uni + queness of null values' EQUALITY booleanMatch SYNTAX OMsBoolean SINGLE-VALU + E ) +olcAttributeTypes: ( OLcfgOvAt:10.5 NAME 'olcUniqueURI' DESC 'List of keywor + ds and LDAP URIs for a uniqueness domain' EQUALITY caseExactMatch ORDERING + caseExactOrderingMatch SUBSTR caseExactSubstringsMatch SYNTAX OMsDirectoryS + tring ) +olcObjectClasses: ( 2.5.6.0 NAME 'top' DESC 'top of the superclass chain' AB + STRACT MUST objectClass ) +olcObjectClasses: ( 1.3.6.1.4.1.1466.101.120.111 NAME 'extensibleObject' DES + C 'RFC4512: extensible object' SUP top AUXILIARY ) +olcObjectClasses: ( 2.5.6.1 NAME 'alias' DESC 'RFC4512: an alias' SUP top ST + RUCTURAL MUST aliasedObjectName ) +olcObjectClasses: ( 2.16.840.1.113730.3.2.6 NAME 'referral' DESC 'namedref: + named subordinate referral' SUP top STRUCTURAL MUST ref ) +olcObjectClasses: ( 1.3.6.1.4.1.4203.1.4.1 NAME ( 'OpenLDAProotDSE' 'LDAProo + tDSE' ) DESC 'OpenLDAP Root DSE object' SUP top STRUCTURAL MAY cn ) +olcObjectClasses: ( 2.5.17.0 NAME 'subentry' DESC 'RFC3672: subentry' SUP to + p STRUCTURAL MUST ( cn $ subtreeSpecification ) ) +olcObjectClasses: ( 2.5.20.1 NAME 'subschema' DESC 'RFC4512: controlling sub + schema (sub)entry' AUXILIARY MAY ( dITStructureRules $ nameForms $ dITConte + ntRules $ objectClasses $ attributeTypes $ matchingRules $ matchingRuleUse + ) ) +olcObjectClasses: ( 2.5.17.2 NAME 'collectiveAttributeSubentry' DESC 'RFC367 + 1: collective attribute subentry' AUXILIARY ) +olcObjectClasses: ( 1.3.6.1.4.1.1466.101.119.2 NAME 'dynamicObject' DESC 'RF + C2589: Dynamic Object' SUP top AUXILIARY ) +olcObjectClasses: ( 1.3.6.1.4.1.4203.666.3.4 NAME 'glue' DESC 'Glue Entry' S + UP top STRUCTURAL ) +olcObjectClasses: ( 1.3.6.1.4.1.4203.666.3.5 NAME 'syncConsumerSubentry' DES + C 'Persistent Info for SyncRepl Consumer' AUXILIARY MAY syncreplCookie ) +olcObjectClasses: ( 1.3.6.1.4.1.4203.666.3.6 NAME 'syncProviderSubentry' DES + C 'Persistent Info for SyncRepl Producer' AUXILIARY MAY contextCSN ) +olcObjectClasses: ( OLcfgGlOc:0 NAME 'olcConfig' DESC 'OpenLDAP configuratio + n object' SUP top ABSTRACT ) +olcObjectClasses: ( OLcfgGlOc:1 NAME 'olcGlobal' DESC 'OpenLDAP Global confi + guration options' SUP olcConfig STRUCTURAL MAY ( cn $ olcConfigFile $ olcCo + nfigDir $ olcAllows $ olcArgsFile $ olcAttributeOptions $ olcAuthIDRewrite + $ olcAuthzPolicy $ olcAuthzRegexp $ olcConcurrency $ olcConnMaxPending $ ol + cConnMaxPendingAuth $ olcDisallows $ olcGentleHUP $ olcIdleTimeout $ olcInd + exSubstrIfMaxLen $ olcIndexSubstrIfMinLen $ olcIndexSubstrAnyLen $ olcIndex + SubstrAnyStep $ olcIndexIntLen $ olcListenerThreads $ olcLocalSSF $ olcLogF + ile $ olcLogLevel $ olcPasswordCryptSaltFormat $ olcPasswordHash $ olcPidFi + le $ olcPluginLogFile $ olcReadOnly $ olcReferral $ olcReplogFile $ olcRequ + ires $ olcRestrict $ olcReverseLookup $ olcRootDSE $ olcSaslAuxprops $ olcS + aslHost $ olcSaslRealm $ olcSaslSecProps $ olcSecurity $ olcServerID $ olcS + izeLimit $ olcSockbufMaxIncoming $ olcSockbufMaxIncomingAuth $ olcTCPBuffer + $ olcThreads $ olcTimeLimit $ olcTLSCACertificateFile $ olcTLSCACertificat + ePath $ olcTLSCertificateFile $ olcTLSCertificateKeyFile $ olcTLSCipherSuit + e $ olcTLSCRLCheck $ olcTLSRandFile $ olcTLSVerifyClient $ olcTLSDHParamFil + e $ olcTLSCRLFile $ olcTLSProtocolMin $ olcToolThreads $ olcWriteTimeout $ + olcObjectIdentifier $ olcAttributeTypes $ olcObjectClasses $ olcDitContentR + ules $ olcLdapSyntaxes ) ) +olcObjectClasses: ( OLcfgGlOc:2 NAME 'olcSchemaConfig' DESC 'OpenLDAP schema + object' SUP olcConfig STRUCTURAL MAY ( cn $ olcObjectIdentifier $ olcLdapS + yntaxes $ olcAttributeTypes $ olcObjectClasses $ olcDitContentRules ) ) +olcObjectClasses: ( OLcfgGlOc:3 NAME 'olcBackendConfig' DESC 'OpenLDAP Backe + nd-specific options' SUP olcConfig STRUCTURAL MUST olcBackend ) +olcObjectClasses: ( OLcfgGlOc:4 NAME 'olcDatabaseConfig' DESC 'OpenLDAP Data + base-specific options' SUP olcConfig STRUCTURAL MUST olcDatabase MAY ( olcH + idden $ olcSuffix $ olcSubordinate $ olcAccess $ olcAddContentAcl $ olcLast + Mod $ olcLimits $ olcMaxDerefDepth $ olcPlugin $ olcReadOnly $ olcReplica $ + olcReplicaArgsFile $ olcReplicaPidFile $ olcReplicationInterval $ olcReplo + gFile $ olcRequires $ olcRestrict $ olcRootDN $ olcRootPW $ olcSchemaDN $ o + lcSecurity $ olcSizeLimit $ olcSyncUseSubentry $ olcSyncrepl $ olcTimeLimit + $ olcUpdateDN $ olcUpdateRef $ olcMirrorMode $ olcMonitoring $ olcExtraAtt + rs ) ) +olcObjectClasses: ( OLcfgGlOc:5 NAME 'olcOverlayConfig' DESC 'OpenLDAP Overl + ay-specific options' SUP olcConfig STRUCTURAL MUST olcOverlay ) +olcObjectClasses: ( OLcfgGlOc:6 NAME 'olcIncludeFile' DESC 'OpenLDAP configu + ration include file' SUP olcConfig STRUCTURAL MUST olcInclude MAY ( cn $ ol + cRootDSE ) ) +olcObjectClasses: ( OLcfgGlOc:7 NAME 'olcFrontendConfig' DESC 'OpenLDAP fron + tend configuration' AUXILIARY MAY ( olcDefaultSearchBase $ olcPasswordHash + $ olcSortVals ) ) +olcObjectClasses: ( OLcfgGlOc:8 NAME 'olcModuleList' DESC 'OpenLDAP dynamic + module info' SUP olcConfig STRUCTURAL MAY ( cn $ olcModulePath $ olcModuleL + oad ) ) +olcObjectClasses: ( OLcfgDbOc:2.1 NAME 'olcLdifConfig' DESC 'LDIF backend co + nfiguration' SUP olcDatabaseConfig STRUCTURAL MUST olcDbDirectory ) +olcObjectClasses: ( OLcfgDbOc:12.1 NAME 'olcMdbConfig' DESC 'MDB backend con + figuration' SUP olcDatabaseConfig STRUCTURAL MUST olcDbDirectory MAY ( olcD + bCheckpoint $ olcDbEnvFlags $ olcDbNoSync $ olcDbIndex $ olcDbMaxReaders $ + olcDbMaxSize $ olcDbMode $ olcDbSearchStack $ olcDbRtxnSize ) ) +olcObjectClasses: ( OLcfgOvOc:18.1 NAME 'olcMemberOf' DESC 'Member-of config + uration' SUP olcOverlayConfig STRUCTURAL MAY ( olcMemberOfDN $ olcMemberOfD + angling $ olcMemberOfDanglingError $ olcMemberOfRefInt $ olcMemberOfGroupOC + $ olcMemberOfMemberAD $ olcMemberOfMemberOfAD ) ) +olcObjectClasses: ( OLcfgOvOc:11.1 NAME 'olcRefintConfig' DESC 'Referential + integrity configuration' SUP olcOverlayConfig STRUCTURAL MAY ( olcRefintAtt + ribute $ olcRefintNothing $ olcRefintModifiersName ) ) +olcObjectClasses: ( OLcfgOvOc:10.1 NAME 'olcUniqueConfig' DESC 'Attribute va + lue uniqueness configuration' SUP olcOverlayConfig STRUCTURAL MAY ( olcUniq + ueBase $ olcUniqueIgnore $ olcUniqueAttribute $ olcUniqueStrict $ olcUnique + URI ) ) +structuralObjectClass: olcSchemaConfig +entryUUID: 7ed44174-f8e9-103c-8ec1-e51fa20c07f9 +creatorsName: cn=config +createTimestamp: 20221115042701Z +entryCSN: 20221115042701.652581Z#000000#000#000000 +modifiersName: cn=config +modifyTimestamp: 20221115042701Z diff --git a/dirsrvtests/tests/data/openldap_2_389/saslauthd/slapd.d/cn=config/cn=schema/cn={0}core.ldif b/dirsrvtests/tests/data/openldap_2_389/saslauthd/slapd.d/cn=config/cn=schema/cn={0}core.ldif new file mode 100644 index 0000000..f7a4313 --- /dev/null +++ b/dirsrvtests/tests/data/openldap_2_389/saslauthd/slapd.d/cn=config/cn=schema/cn={0}core.ldif @@ -0,0 +1,247 @@ +# AUTO-GENERATED FILE - DO NOT EDIT!! Use ldapmodify. +# CRC32 49e5af84 +dn: cn={0}core +objectClass: olcSchemaConfig +cn: {0}core +olcAttributeTypes: {0}( 2.5.4.2 NAME 'knowledgeInformation' DESC 'RFC2256: k + nowledge information' EQUALITY caseIgnoreMatch SYNTAX 1.3.6.1.4.1.1466.115. + 121.1.15{32768} ) +olcAttributeTypes: {1}( 2.5.4.4 NAME ( 'sn' 'surname' ) DESC 'RFC2256: last + (family) name(s) for which the entity is known by' SUP name ) +olcAttributeTypes: {2}( 2.5.4.5 NAME 'serialNumber' DESC 'RFC2256: serial nu + mber of the entity' EQUALITY caseIgnoreMatch SUBSTR caseIgnoreSubstringsMat + ch SYNTAX 1.3.6.1.4.1.1466.115.121.1.44{64} ) +olcAttributeTypes: {3}( 2.5.4.6 NAME ( 'c' 'countryName' ) DESC 'RFC4519: tw + o-letter ISO-3166 country code' SUP name SYNTAX 1.3.6.1.4.1.1466.115.121.1. + 11 SINGLE-VALUE ) +olcAttributeTypes: {4}( 2.5.4.7 NAME ( 'l' 'localityName' ) DESC 'RFC2256: l + ocality which this object resides in' SUP name ) +olcAttributeTypes: {5}( 2.5.4.8 NAME ( 'st' 'stateOrProvinceName' ) DESC 'RF + C2256: state or province which this object resides in' SUP name ) +olcAttributeTypes: {6}( 2.5.4.9 NAME ( 'street' 'streetAddress' ) DESC 'RFC2 + 256: street address of this object' EQUALITY caseIgnoreMatch SUBSTR caseIgn + oreSubstringsMatch SYNTAX 1.3.6.1.4.1.1466.115.121.1.15{128} ) +olcAttributeTypes: {7}( 2.5.4.10 NAME ( 'o' 'organizationName' ) DESC 'RFC22 + 56: organization this object belongs to' SUP name ) +olcAttributeTypes: {8}( 2.5.4.11 NAME ( 'ou' 'organizationalUnitName' ) DESC + 'RFC2256: organizational unit this object belongs to' SUP name ) +olcAttributeTypes: {9}( 2.5.4.12 NAME 'title' DESC 'RFC2256: title associate + d with the entity' SUP name ) +olcAttributeTypes: {10}( 2.5.4.14 NAME 'searchGuide' DESC 'RFC2256: search g + uide, deprecated by enhancedSearchGuide' SYNTAX 1.3.6.1.4.1.1466.115.121.1. + 25 ) +olcAttributeTypes: {11}( 2.5.4.15 NAME 'businessCategory' DESC 'RFC2256: bus + iness category' EQUALITY caseIgnoreMatch SUBSTR caseIgnoreSubstringsMatch S + YNTAX 1.3.6.1.4.1.1466.115.121.1.15{128} ) +olcAttributeTypes: {12}( 2.5.4.16 NAME 'postalAddress' DESC 'RFC2256: postal + address' EQUALITY caseIgnoreListMatch SUBSTR caseIgnoreListSubstringsMatch + SYNTAX 1.3.6.1.4.1.1466.115.121.1.41 ) +olcAttributeTypes: {13}( 2.5.4.17 NAME 'postalCode' DESC 'RFC2256: postal co + de' EQUALITY caseIgnoreMatch SUBSTR caseIgnoreSubstringsMatch SYNTAX 1.3.6. + 1.4.1.1466.115.121.1.15{40} ) +olcAttributeTypes: {14}( 2.5.4.18 NAME 'postOfficeBox' DESC 'RFC2256: Post O + ffice Box' EQUALITY caseIgnoreMatch SUBSTR caseIgnoreSubstringsMatch SYNTAX + 1.3.6.1.4.1.1466.115.121.1.15{40} ) +olcAttributeTypes: {15}( 2.5.4.19 NAME 'physicalDeliveryOfficeName' DESC 'RF + C2256: Physical Delivery Office Name' EQUALITY caseIgnoreMatch SUBSTR caseI + gnoreSubstringsMatch SYNTAX 1.3.6.1.4.1.1466.115.121.1.15{128} ) +olcAttributeTypes: {16}( 2.5.4.20 NAME 'telephoneNumber' DESC 'RFC2256: Tele + phone Number' EQUALITY telephoneNumberMatch SUBSTR telephoneNumberSubstring + sMatch SYNTAX 1.3.6.1.4.1.1466.115.121.1.50{32} ) +olcAttributeTypes: {17}( 2.5.4.21 NAME 'telexNumber' DESC 'RFC2256: Telex Nu + mber' SYNTAX 1.3.6.1.4.1.1466.115.121.1.52 ) +olcAttributeTypes: {18}( 2.5.4.22 NAME 'teletexTerminalIdentifier' DESC 'RFC + 2256: Teletex Terminal Identifier' SYNTAX 1.3.6.1.4.1.1466.115.121.1.51 ) +olcAttributeTypes: {19}( 2.5.4.23 NAME ( 'facsimileTelephoneNumber' 'fax' ) + DESC 'RFC2256: Facsimile (Fax) Telephone Number' SYNTAX 1.3.6.1.4.1.1466.11 + 5.121.1.22 ) +olcAttributeTypes: {20}( 2.5.4.24 NAME 'x121Address' DESC 'RFC2256: X.121 Ad + dress' EQUALITY numericStringMatch SUBSTR numericStringSubstringsMatch SYNT + AX 1.3.6.1.4.1.1466.115.121.1.36{15} ) +olcAttributeTypes: {21}( 2.5.4.25 NAME 'internationaliSDNNumber' DESC 'RFC22 + 56: international ISDN number' EQUALITY numericStringMatch SUBSTR numericSt + ringSubstringsMatch SYNTAX 1.3.6.1.4.1.1466.115.121.1.36{16} ) +olcAttributeTypes: {22}( 2.5.4.26 NAME 'registeredAddress' DESC 'RFC2256: re + gistered postal address' SUP postalAddress SYNTAX 1.3.6.1.4.1.1466.115.121. + 1.41 ) +olcAttributeTypes: {23}( 2.5.4.27 NAME 'destinationIndicator' DESC 'RFC2256: + destination indicator' EQUALITY caseIgnoreMatch SUBSTR caseIgnoreSubstring + sMatch SYNTAX 1.3.6.1.4.1.1466.115.121.1.44{128} ) +olcAttributeTypes: {24}( 2.5.4.28 NAME 'preferredDeliveryMethod' DESC 'RFC22 + 56: preferred delivery method' SYNTAX 1.3.6.1.4.1.1466.115.121.1.14 SINGLE- + VALUE ) +olcAttributeTypes: {25}( 2.5.4.29 NAME 'presentationAddress' DESC 'RFC2256: + presentation address' EQUALITY presentationAddressMatch SYNTAX 1.3.6.1.4.1. + 1466.115.121.1.43 SINGLE-VALUE ) +olcAttributeTypes: {26}( 2.5.4.30 NAME 'supportedApplicationContext' DESC 'R + FC2256: supported application context' EQUALITY objectIdentifierMatch SYNTA + X 1.3.6.1.4.1.1466.115.121.1.38 ) +olcAttributeTypes: {27}( 2.5.4.31 NAME 'member' DESC 'RFC2256: member of a g + roup' SUP distinguishedName ) +olcAttributeTypes: {28}( 2.5.4.32 NAME 'owner' DESC 'RFC2256: owner (of the + object)' SUP distinguishedName ) +olcAttributeTypes: {29}( 2.5.4.33 NAME 'roleOccupant' DESC 'RFC2256: occupan + t of role' SUP distinguishedName ) +olcAttributeTypes: {30}( 2.5.4.36 NAME 'userCertificate' DESC 'RFC2256: X.50 + 9 user certificate, use ;binary' EQUALITY certificateExactMatch SYNTAX 1.3. + 6.1.4.1.1466.115.121.1.8 ) +olcAttributeTypes: {31}( 2.5.4.37 NAME 'cACertificate' DESC 'RFC2256: X.509 + CA certificate, use ;binary' EQUALITY certificateExactMatch SYNTAX 1.3.6.1. + 4.1.1466.115.121.1.8 ) +olcAttributeTypes: {32}( 2.5.4.38 NAME 'authorityRevocationList' DESC 'RFC22 + 56: X.509 authority revocation list, use ;binary' SYNTAX 1.3.6.1.4.1.1466.1 + 15.121.1.9 ) +olcAttributeTypes: {33}( 2.5.4.39 NAME 'certificateRevocationList' DESC 'RFC + 2256: X.509 certificate revocation list, use ;binary' SYNTAX 1.3.6.1.4.1.14 + 66.115.121.1.9 ) +olcAttributeTypes: {34}( 2.5.4.40 NAME 'crossCertificatePair' DESC 'RFC2256: + X.509 cross certificate pair, use ;binary' SYNTAX 1.3.6.1.4.1.1466.115.121 + .1.10 ) +olcAttributeTypes: {35}( 2.5.4.42 NAME ( 'givenName' 'gn' ) DESC 'RFC2256: f + irst name(s) for which the entity is known by' SUP name ) +olcAttributeTypes: {36}( 2.5.4.43 NAME 'initials' DESC 'RFC2256: initials of + some or all of names, but not the surname(s).' SUP name ) +olcAttributeTypes: {37}( 2.5.4.44 NAME 'generationQualifier' DESC 'RFC2256: + name qualifier indicating a generation' SUP name ) +olcAttributeTypes: {38}( 2.5.4.45 NAME 'x500UniqueIdentifier' DESC 'RFC2256: + X.500 unique identifier' EQUALITY bitStringMatch SYNTAX 1.3.6.1.4.1.1466.1 + 15.121.1.6 ) +olcAttributeTypes: {39}( 2.5.4.46 NAME 'dnQualifier' DESC 'RFC2256: DN quali + fier' EQUALITY caseIgnoreMatch ORDERING caseIgnoreOrderingMatch SUBSTR case + IgnoreSubstringsMatch SYNTAX 1.3.6.1.4.1.1466.115.121.1.44 ) +olcAttributeTypes: {40}( 2.5.4.47 NAME 'enhancedSearchGuide' DESC 'RFC2256: + enhanced search guide' SYNTAX 1.3.6.1.4.1.1466.115.121.1.21 ) +olcAttributeTypes: {41}( 2.5.4.48 NAME 'protocolInformation' DESC 'RFC2256: + protocol information' EQUALITY protocolInformationMatch SYNTAX 1.3.6.1.4.1. + 1466.115.121.1.42 ) +olcAttributeTypes: {42}( 2.5.4.50 NAME 'uniqueMember' DESC 'RFC2256: unique + member of a group' EQUALITY uniqueMemberMatch SYNTAX 1.3.6.1.4.1.1466.115.1 + 21.1.34 ) +olcAttributeTypes: {43}( 2.5.4.51 NAME 'houseIdentifier' DESC 'RFC2256: hous + e identifier' EQUALITY caseIgnoreMatch SUBSTR caseIgnoreSubstringsMatch SYN + TAX 1.3.6.1.4.1.1466.115.121.1.15{32768} ) +olcAttributeTypes: {44}( 2.5.4.52 NAME 'supportedAlgorithms' DESC 'RFC2256: + supported algorithms' SYNTAX 1.3.6.1.4.1.1466.115.121.1.49 ) +olcAttributeTypes: {45}( 2.5.4.53 NAME 'deltaRevocationList' DESC 'RFC2256: + delta revocation list; use ;binary' SYNTAX 1.3.6.1.4.1.1466.115.121.1.9 ) +olcAttributeTypes: {46}( 2.5.4.54 NAME 'dmdName' DESC 'RFC2256: name of DMD' + SUP name ) +olcAttributeTypes: {47}( 2.5.4.65 NAME 'pseudonym' DESC 'X.520(4th): pseudon + ym for the object' SUP name ) +olcAttributeTypes: {48}( 0.9.2342.19200300.100.1.3 NAME ( 'mail' 'rfc822Mail + box' ) DESC 'RFC1274: RFC822 Mailbox' EQUALITY caseIgnoreIA5Match SUBSTR ca + seIgnoreIA5SubstringsMatch SYNTAX 1.3.6.1.4.1.1466.115.121.1.26{256} ) +olcAttributeTypes: {49}( 0.9.2342.19200300.100.1.25 NAME ( 'dc' 'domainCompo + nent' ) DESC 'RFC1274/2247: domain component' EQUALITY caseIgnoreIA5Match S + UBSTR caseIgnoreIA5SubstringsMatch SYNTAX 1.3.6.1.4.1.1466.115.121.1.26 SIN + GLE-VALUE ) +olcAttributeTypes: {50}( 0.9.2342.19200300.100.1.37 NAME 'associatedDomain' + DESC 'RFC1274: domain associated with object' EQUALITY caseIgnoreIA5Match S + UBSTR caseIgnoreIA5SubstringsMatch SYNTAX 1.3.6.1.4.1.1466.115.121.1.26 ) +olcAttributeTypes: {51}( 1.2.840.113549.1.9.1 NAME ( 'email' 'emailAddress' + 'pkcs9email' ) DESC 'RFC3280: legacy attribute for email addresses in DNs' + EQUALITY caseIgnoreIA5Match SUBSTR caseIgnoreIA5SubstringsMatch SYNTAX 1.3. + 6.1.4.1.1466.115.121.1.26{128} ) +olcObjectClasses: {0}( 2.5.6.2 NAME 'country' DESC 'RFC2256: a country' SUP + top STRUCTURAL MUST c MAY ( searchGuide $ description ) ) +olcObjectClasses: {1}( 2.5.6.3 NAME 'locality' DESC 'RFC2256: a locality' SU + P top STRUCTURAL MAY ( street $ seeAlso $ searchGuide $ st $ l $ descriptio + n ) ) +olcObjectClasses: {2}( 2.5.6.4 NAME 'organization' DESC 'RFC2256: an organiz + ation' SUP top STRUCTURAL MUST o MAY ( userPassword $ searchGuide $ seeAlso + $ businessCategory $ x121Address $ registeredAddress $ destinationIndicato + r $ preferredDeliveryMethod $ telexNumber $ teletexTerminalIdentifier $ tel + ephoneNumber $ internationaliSDNNumber $ facsimileTelephoneNumber $ street + $ postOfficeBox $ postalCode $ postalAddress $ physicalDeliveryOfficeName $ + st $ l $ description ) ) +olcObjectClasses: {3}( 2.5.6.5 NAME 'organizationalUnit' DESC 'RFC2256: an o + rganizational unit' SUP top STRUCTURAL MUST ou MAY ( userPassword $ searchG + uide $ seeAlso $ businessCategory $ x121Address $ registeredAddress $ desti + nationIndicator $ preferredDeliveryMethod $ telexNumber $ teletexTerminalId + entifier $ telephoneNumber $ internationaliSDNNumber $ facsimileTelephoneNu + mber $ street $ postOfficeBox $ postalCode $ postalAddress $ physicalDelive + ryOfficeName $ st $ l $ description ) ) +olcObjectClasses: {4}( 2.5.6.6 NAME 'person' DESC 'RFC2256: a person' SUP to + p STRUCTURAL MUST ( sn $ cn ) MAY ( userPassword $ telephoneNumber $ seeAls + o $ description ) ) +olcObjectClasses: {5}( 2.5.6.7 NAME 'organizationalPerson' DESC 'RFC2256: an + organizational person' SUP person STRUCTURAL MAY ( title $ x121Address $ r + egisteredAddress $ destinationIndicator $ preferredDeliveryMethod $ telexNu + mber $ teletexTerminalIdentifier $ telephoneNumber $ internationaliSDNNumbe + r $ facsimileTelephoneNumber $ street $ postOfficeBox $ postalCode $ postal + Address $ physicalDeliveryOfficeName $ ou $ st $ l ) ) +olcObjectClasses: {6}( 2.5.6.8 NAME 'organizationalRole' DESC 'RFC2256: an o + rganizational role' SUP top STRUCTURAL MUST cn MAY ( x121Address $ register + edAddress $ destinationIndicator $ preferredDeliveryMethod $ telexNumber $ + teletexTerminalIdentifier $ telephoneNumber $ internationaliSDNNumber $ fac + simileTelephoneNumber $ seeAlso $ roleOccupant $ preferredDeliveryMethod $ + street $ postOfficeBox $ postalCode $ postalAddress $ physicalDeliveryOffic + eName $ ou $ st $ l $ description ) ) +olcObjectClasses: {7}( 2.5.6.9 NAME 'groupOfNames' DESC 'RFC2256: a group of + names (DNs)' SUP top STRUCTURAL MUST ( member $ cn ) MAY ( businessCategor + y $ seeAlso $ owner $ ou $ o $ description ) ) +olcObjectClasses: {8}( 2.5.6.10 NAME 'residentialPerson' DESC 'RFC2256: an r + esidential person' SUP person STRUCTURAL MUST l MAY ( businessCategory $ x1 + 21Address $ registeredAddress $ destinationIndicator $ preferredDeliveryMet + hod $ telexNumber $ teletexTerminalIdentifier $ telephoneNumber $ internati + onaliSDNNumber $ facsimileTelephoneNumber $ preferredDeliveryMethod $ stree + t $ postOfficeBox $ postalCode $ postalAddress $ physicalDeliveryOfficeName + $ st $ l ) ) +olcObjectClasses: {9}( 2.5.6.11 NAME 'applicationProcess' DESC 'RFC2256: an + application process' SUP top STRUCTURAL MUST cn MAY ( seeAlso $ ou $ l $ de + scription ) ) +olcObjectClasses: {10}( 2.5.6.12 NAME 'applicationEntity' DESC 'RFC2256: an + application entity' SUP top STRUCTURAL MUST ( presentationAddress $ cn ) MA + Y ( supportedApplicationContext $ seeAlso $ ou $ o $ l $ description ) ) +olcObjectClasses: {11}( 2.5.6.13 NAME 'dSA' DESC 'RFC2256: a directory syste + m agent (a server)' SUP applicationEntity STRUCTURAL MAY knowledgeInformati + on ) +olcObjectClasses: {12}( 2.5.6.14 NAME 'device' DESC 'RFC2256: a device' SUP + top STRUCTURAL MUST cn MAY ( serialNumber $ seeAlso $ owner $ ou $ o $ l $ + description ) ) +olcObjectClasses: {13}( 2.5.6.15 NAME 'strongAuthenticationUser' DESC 'RFC22 + 56: a strong authentication user' SUP top AUXILIARY MUST userCertificate ) +olcObjectClasses: {14}( 2.5.6.16 NAME 'certificationAuthority' DESC 'RFC2256 + : a certificate authority' SUP top AUXILIARY MUST ( authorityRevocationList + $ certificateRevocationList $ cACertificate ) MAY crossCertificatePair ) +olcObjectClasses: {15}( 2.5.6.17 NAME 'groupOfUniqueNames' DESC 'RFC2256: a + group of unique names (DN and Unique Identifier)' SUP top STRUCTURAL MUST ( + uniqueMember $ cn ) MAY ( businessCategory $ seeAlso $ owner $ ou $ o $ de + scription ) ) +olcObjectClasses: {16}( 2.5.6.18 NAME 'userSecurityInformation' DESC 'RFC225 + 6: a user security information' SUP top AUXILIARY MAY supportedAlgorithms ) +olcObjectClasses: {17}( 2.5.6.16.2 NAME 'certificationAuthority-V2' SUP cert + ificationAuthority AUXILIARY MAY deltaRevocationList ) +olcObjectClasses: {18}( 2.5.6.19 NAME 'cRLDistributionPoint' SUP top STRUCTU + RAL MUST cn MAY ( certificateRevocationList $ authorityRevocationList $ del + taRevocationList ) ) +olcObjectClasses: {19}( 2.5.6.20 NAME 'dmd' SUP top STRUCTURAL MUST dmdName + MAY ( userPassword $ searchGuide $ seeAlso $ businessCategory $ x121Address + $ registeredAddress $ destinationIndicator $ preferredDeliveryMethod $ tel + exNumber $ teletexTerminalIdentifier $ telephoneNumber $ internationaliSDNN + umber $ facsimileTelephoneNumber $ street $ postOfficeBox $ postalCode $ po + stalAddress $ physicalDeliveryOfficeName $ st $ l $ description ) ) +olcObjectClasses: {20}( 2.5.6.21 NAME 'pkiUser' DESC 'RFC2587: a PKI user' S + UP top AUXILIARY MAY userCertificate ) +olcObjectClasses: {21}( 2.5.6.22 NAME 'pkiCA' DESC 'RFC2587: PKI certificate + authority' SUP top AUXILIARY MAY ( authorityRevocationList $ certificateRe + vocationList $ cACertificate $ crossCertificatePair ) ) +olcObjectClasses: {22}( 2.5.6.23 NAME 'deltaCRL' DESC 'RFC2587: PKI user' SU + P top AUXILIARY MAY deltaRevocationList ) +olcObjectClasses: {23}( 1.3.6.1.4.1.250.3.15 NAME 'labeledURIObject' DESC 'R + FC2079: object that contains the URI attribute type' SUP top AUXILIARY MAY + labeledURI ) +olcObjectClasses: {24}( 0.9.2342.19200300.100.4.19 NAME 'simpleSecurityObjec + t' DESC 'RFC1274: simple security object' SUP top AUXILIARY MUST userPasswo + rd ) +olcObjectClasses: {25}( 1.3.6.1.4.1.1466.344 NAME 'dcObject' DESC 'RFC2247: + domain component object' SUP top AUXILIARY MUST dc ) +olcObjectClasses: {26}( 1.3.6.1.1.3.1 NAME 'uidObject' DESC 'RFC2377: uid ob + ject' SUP top AUXILIARY MUST uid ) +structuralObjectClass: olcSchemaConfig +entryUUID: 7ed45fa6-f8e9-103c-8ec2-e51fa20c07f9 +creatorsName: cn=config +createTimestamp: 20221115042701Z +entryCSN: 20221115042701.652581Z#000000#000#000000 +modifiersName: cn=config +modifyTimestamp: 20221115042701Z diff --git a/dirsrvtests/tests/data/openldap_2_389/saslauthd/slapd.d/cn=config/cn=schema/cn={1}cosine.ldif b/dirsrvtests/tests/data/openldap_2_389/saslauthd/slapd.d/cn=config/cn=schema/cn={1}cosine.ldif new file mode 100644 index 0000000..63892e1 --- /dev/null +++ b/dirsrvtests/tests/data/openldap_2_389/saslauthd/slapd.d/cn=config/cn=schema/cn={1}cosine.ldif @@ -0,0 +1,178 @@ +# AUTO-GENERATED FILE - DO NOT EDIT!! Use ldapmodify. +# CRC32 90794340 +dn: cn={1}cosine +objectClass: olcSchemaConfig +cn: {1}cosine +olcAttributeTypes: {0}( 0.9.2342.19200300.100.1.2 NAME 'textEncodedORAddress + ' EQUALITY caseIgnoreMatch SUBSTR caseIgnoreSubstringsMatch SYNTAX 1.3.6.1. + 4.1.1466.115.121.1.15{256} ) +olcAttributeTypes: {1}( 0.9.2342.19200300.100.1.4 NAME 'info' DESC 'RFC1274: + general information' EQUALITY caseIgnoreMatch SUBSTR caseIgnoreSubstringsM + atch SYNTAX 1.3.6.1.4.1.1466.115.121.1.15{2048} ) +olcAttributeTypes: {2}( 0.9.2342.19200300.100.1.5 NAME ( 'drink' 'favouriteD + rink' ) DESC 'RFC1274: favorite drink' EQUALITY caseIgnoreMatch SUBSTR case + IgnoreSubstringsMatch SYNTAX 1.3.6.1.4.1.1466.115.121.1.15{256} ) +olcAttributeTypes: {3}( 0.9.2342.19200300.100.1.6 NAME 'roomNumber' DESC 'RF + C1274: room number' EQUALITY caseIgnoreMatch SUBSTR caseIgnoreSubstringsMat + ch SYNTAX 1.3.6.1.4.1.1466.115.121.1.15{256} ) +olcAttributeTypes: {4}( 0.9.2342.19200300.100.1.7 NAME 'photo' DESC 'RFC1274 + : photo (G3 fax)' SYNTAX 1.3.6.1.4.1.1466.115.121.1.23{25000} ) +olcAttributeTypes: {5}( 0.9.2342.19200300.100.1.8 NAME 'userClass' DESC 'RFC + 1274: category of user' EQUALITY caseIgnoreMatch SUBSTR caseIgnoreSubstring + sMatch SYNTAX 1.3.6.1.4.1.1466.115.121.1.15{256} ) +olcAttributeTypes: {6}( 0.9.2342.19200300.100.1.9 NAME 'host' DESC 'RFC1274: + host computer' EQUALITY caseIgnoreMatch SUBSTR caseIgnoreSubstringsMatch S + YNTAX 1.3.6.1.4.1.1466.115.121.1.15{256} ) +olcAttributeTypes: {7}( 0.9.2342.19200300.100.1.10 NAME 'manager' DESC 'RFC1 + 274: DN of manager' EQUALITY distinguishedNameMatch SYNTAX 1.3.6.1.4.1.1466 + .115.121.1.12 ) +olcAttributeTypes: {8}( 0.9.2342.19200300.100.1.11 NAME 'documentIdentifier' + DESC 'RFC1274: unique identifier of document' EQUALITY caseIgnoreMatch SUB + STR caseIgnoreSubstringsMatch SYNTAX 1.3.6.1.4.1.1466.115.121.1.15{256} ) +olcAttributeTypes: {9}( 0.9.2342.19200300.100.1.12 NAME 'documentTitle' DESC + 'RFC1274: title of document' EQUALITY caseIgnoreMatch SUBSTR caseIgnoreSub + stringsMatch SYNTAX 1.3.6.1.4.1.1466.115.121.1.15{256} ) +olcAttributeTypes: {10}( 0.9.2342.19200300.100.1.13 NAME 'documentVersion' D + ESC 'RFC1274: version of document' EQUALITY caseIgnoreMatch SUBSTR caseIgno + reSubstringsMatch SYNTAX 1.3.6.1.4.1.1466.115.121.1.15{256} ) +olcAttributeTypes: {11}( 0.9.2342.19200300.100.1.14 NAME 'documentAuthor' DE + SC 'RFC1274: DN of author of document' EQUALITY distinguishedNameMatch SYNT + AX 1.3.6.1.4.1.1466.115.121.1.12 ) +olcAttributeTypes: {12}( 0.9.2342.19200300.100.1.15 NAME 'documentLocation' + DESC 'RFC1274: location of document original' EQUALITY caseIgnoreMatch SUBS + TR caseIgnoreSubstringsMatch SYNTAX 1.3.6.1.4.1.1466.115.121.1.15{256} ) +olcAttributeTypes: {13}( 0.9.2342.19200300.100.1.20 NAME ( 'homePhone' 'home + TelephoneNumber' ) DESC 'RFC1274: home telephone number' EQUALITY telephone + NumberMatch SUBSTR telephoneNumberSubstringsMatch SYNTAX 1.3.6.1.4.1.1466.1 + 15.121.1.50 ) +olcAttributeTypes: {14}( 0.9.2342.19200300.100.1.21 NAME 'secretary' DESC 'R + FC1274: DN of secretary' EQUALITY distinguishedNameMatch SYNTAX 1.3.6.1.4.1 + .1466.115.121.1.12 ) +olcAttributeTypes: {15}( 0.9.2342.19200300.100.1.22 NAME 'otherMailbox' SYNT + AX 1.3.6.1.4.1.1466.115.121.1.39 ) +olcAttributeTypes: {16}( 0.9.2342.19200300.100.1.26 NAME 'aRecord' EQUALITY + caseIgnoreIA5Match SYNTAX 1.3.6.1.4.1.1466.115.121.1.26 ) +olcAttributeTypes: {17}( 0.9.2342.19200300.100.1.27 NAME 'mDRecord' EQUALITY + caseIgnoreIA5Match SYNTAX 1.3.6.1.4.1.1466.115.121.1.26 ) +olcAttributeTypes: {18}( 0.9.2342.19200300.100.1.28 NAME 'mXRecord' EQUALITY + caseIgnoreIA5Match SYNTAX 1.3.6.1.4.1.1466.115.121.1.26 ) +olcAttributeTypes: {19}( 0.9.2342.19200300.100.1.29 NAME 'nSRecord' EQUALITY + caseIgnoreIA5Match SYNTAX 1.3.6.1.4.1.1466.115.121.1.26 ) +olcAttributeTypes: {20}( 0.9.2342.19200300.100.1.30 NAME 'sOARecord' EQUALIT + Y caseIgnoreIA5Match SYNTAX 1.3.6.1.4.1.1466.115.121.1.26 ) +olcAttributeTypes: {21}( 0.9.2342.19200300.100.1.31 NAME 'cNAMERecord' EQUAL + ITY caseIgnoreIA5Match SYNTAX 1.3.6.1.4.1.1466.115.121.1.26 ) +olcAttributeTypes: {22}( 0.9.2342.19200300.100.1.38 NAME 'associatedName' DE + SC 'RFC1274: DN of entry associated with domain' EQUALITY distinguishedName + Match SYNTAX 1.3.6.1.4.1.1466.115.121.1.12 ) +olcAttributeTypes: {23}( 0.9.2342.19200300.100.1.39 NAME 'homePostalAddress' + DESC 'RFC1274: home postal address' EQUALITY caseIgnoreListMatch SUBSTR ca + seIgnoreListSubstringsMatch SYNTAX 1.3.6.1.4.1.1466.115.121.1.41 ) +olcAttributeTypes: {24}( 0.9.2342.19200300.100.1.40 NAME 'personalTitle' DES + C 'RFC1274: personal title' EQUALITY caseIgnoreMatch SUBSTR caseIgnoreSubst + ringsMatch SYNTAX 1.3.6.1.4.1.1466.115.121.1.15{256} ) +olcAttributeTypes: {25}( 0.9.2342.19200300.100.1.41 NAME ( 'mobile' 'mobileT + elephoneNumber' ) DESC 'RFC1274: mobile telephone number' EQUALITY telephon + eNumberMatch SUBSTR telephoneNumberSubstringsMatch SYNTAX 1.3.6.1.4.1.1466. + 115.121.1.50 ) +olcAttributeTypes: {26}( 0.9.2342.19200300.100.1.42 NAME ( 'pager' 'pagerTel + ephoneNumber' ) DESC 'RFC1274: pager telephone number' EQUALITY telephoneNu + mberMatch SUBSTR telephoneNumberSubstringsMatch SYNTAX 1.3.6.1.4.1.1466.115 + .121.1.50 ) +olcAttributeTypes: {27}( 0.9.2342.19200300.100.1.43 NAME ( 'co' 'friendlyCou + ntryName' ) DESC 'RFC1274: friendly country name' EQUALITY caseIgnoreMatch + SUBSTR caseIgnoreSubstringsMatch SYNTAX 1.3.6.1.4.1.1466.115.121.1.15 ) +olcAttributeTypes: {28}( 0.9.2342.19200300.100.1.44 NAME 'uniqueIdentifier' + DESC 'RFC1274: unique identifer' EQUALITY caseIgnoreMatch SYNTAX 1.3.6.1.4. + 1.1466.115.121.1.15{256} ) +olcAttributeTypes: {29}( 0.9.2342.19200300.100.1.45 NAME 'organizationalStat + us' DESC 'RFC1274: organizational status' EQUALITY caseIgnoreMatch SUBSTR c + aseIgnoreSubstringsMatch SYNTAX 1.3.6.1.4.1.1466.115.121.1.15{256} ) +olcAttributeTypes: {30}( 0.9.2342.19200300.100.1.46 NAME 'janetMailbox' DESC + 'RFC1274: Janet mailbox' EQUALITY caseIgnoreIA5Match SUBSTR caseIgnoreIA5S + ubstringsMatch SYNTAX 1.3.6.1.4.1.1466.115.121.1.26{256} ) +olcAttributeTypes: {31}( 0.9.2342.19200300.100.1.47 NAME 'mailPreferenceOpti + on' DESC 'RFC1274: mail preference option' SYNTAX 1.3.6.1.4.1.1466.115.121. + 1.27 ) +olcAttributeTypes: {32}( 0.9.2342.19200300.100.1.48 NAME 'buildingName' DESC + 'RFC1274: name of building' EQUALITY caseIgnoreMatch SUBSTR caseIgnoreSubs + tringsMatch SYNTAX 1.3.6.1.4.1.1466.115.121.1.15{256} ) +olcAttributeTypes: {33}( 0.9.2342.19200300.100.1.49 NAME 'dSAQuality' DESC ' + RFC1274: DSA Quality' SYNTAX 1.3.6.1.4.1.1466.115.121.1.19 SINGLE-VALUE ) +olcAttributeTypes: {34}( 0.9.2342.19200300.100.1.50 NAME 'singleLevelQuality + ' DESC 'RFC1274: Single Level Quality' SYNTAX 1.3.6.1.4.1.1466.115.121.1.13 + SINGLE-VALUE ) +olcAttributeTypes: {35}( 0.9.2342.19200300.100.1.51 NAME 'subtreeMinimumQual + ity' DESC 'RFC1274: Subtree Mininum Quality' SYNTAX 1.3.6.1.4.1.1466.115.12 + 1.1.13 SINGLE-VALUE ) +olcAttributeTypes: {36}( 0.9.2342.19200300.100.1.52 NAME 'subtreeMaximumQual + ity' DESC 'RFC1274: Subtree Maximun Quality' SYNTAX 1.3.6.1.4.1.1466.115.12 + 1.1.13 SINGLE-VALUE ) +olcAttributeTypes: {37}( 0.9.2342.19200300.100.1.53 NAME 'personalSignature' + DESC 'RFC1274: Personal Signature (G3 fax)' SYNTAX 1.3.6.1.4.1.1466.115.12 + 1.1.23 ) +olcAttributeTypes: {38}( 0.9.2342.19200300.100.1.54 NAME 'dITRedirect' DESC + 'RFC1274: DIT Redirect' EQUALITY distinguishedNameMatch SYNTAX 1.3.6.1.4.1. + 1466.115.121.1.12 ) +olcAttributeTypes: {39}( 0.9.2342.19200300.100.1.55 NAME 'audio' DESC 'RFC12 + 74: audio (u-law)' SYNTAX 1.3.6.1.4.1.1466.115.121.1.4{25000} ) +olcAttributeTypes: {40}( 0.9.2342.19200300.100.1.56 NAME 'documentPublisher' + DESC 'RFC1274: publisher of document' EQUALITY caseIgnoreMatch SUBSTR case + IgnoreSubstringsMatch SYNTAX 1.3.6.1.4.1.1466.115.121.1.15 ) +olcObjectClasses: {0}( 0.9.2342.19200300.100.4.4 NAME ( 'pilotPerson' 'newPi + lotPerson' ) SUP person STRUCTURAL MAY ( userid $ textEncodedORAddress $ rf + c822Mailbox $ favouriteDrink $ roomNumber $ userClass $ homeTelephoneNumber + $ homePostalAddress $ secretary $ personalTitle $ preferredDeliveryMethod + $ businessCategory $ janetMailbox $ otherMailbox $ mobileTelephoneNumber $ + pagerTelephoneNumber $ organizationalStatus $ mailPreferenceOption $ person + alSignature ) ) +olcObjectClasses: {1}( 0.9.2342.19200300.100.4.5 NAME 'account' SUP top STRU + CTURAL MUST userid MAY ( description $ seeAlso $ localityName $ organizatio + nName $ organizationalUnitName $ host ) ) +olcObjectClasses: {2}( 0.9.2342.19200300.100.4.6 NAME 'document' SUP top STR + UCTURAL MUST documentIdentifier MAY ( commonName $ description $ seeAlso $ + localityName $ organizationName $ organizationalUnitName $ documentTitle $ + documentVersion $ documentAuthor $ documentLocation $ documentPublisher ) ) +olcObjectClasses: {3}( 0.9.2342.19200300.100.4.7 NAME 'room' SUP top STRUCTU + RAL MUST commonName MAY ( roomNumber $ description $ seeAlso $ telephoneNum + ber ) ) +olcObjectClasses: {4}( 0.9.2342.19200300.100.4.9 NAME 'documentSeries' SUP t + op STRUCTURAL MUST commonName MAY ( description $ seeAlso $ telephonenumber + $ localityName $ organizationName $ organizationalUnitName ) ) +olcObjectClasses: {5}( 0.9.2342.19200300.100.4.13 NAME 'domain' SUP top STRU + CTURAL MUST domainComponent MAY ( associatedName $ organizationName $ descr + iption $ businessCategory $ seeAlso $ searchGuide $ userPassword $ locality + Name $ stateOrProvinceName $ streetAddress $ physicalDeliveryOfficeName $ p + ostalAddress $ postalCode $ postOfficeBox $ streetAddress $ facsimileTeleph + oneNumber $ internationalISDNNumber $ telephoneNumber $ teletexTerminalIden + tifier $ telexNumber $ preferredDeliveryMethod $ destinationIndicator $ reg + isteredAddress $ x121Address ) ) +olcObjectClasses: {6}( 0.9.2342.19200300.100.4.14 NAME 'RFC822localPart' SUP + domain STRUCTURAL MAY ( commonName $ surname $ description $ seeAlso $ tel + ephoneNumber $ physicalDeliveryOfficeName $ postalAddress $ postalCode $ po + stOfficeBox $ streetAddress $ facsimileTelephoneNumber $ internationalISDNN + umber $ telephoneNumber $ teletexTerminalIdentifier $ telexNumber $ preferr + edDeliveryMethod $ destinationIndicator $ registeredAddress $ x121Address ) + ) +olcObjectClasses: {7}( 0.9.2342.19200300.100.4.15 NAME 'dNSDomain' SUP domai + n STRUCTURAL MAY ( ARecord $ MDRecord $ MXRecord $ NSRecord $ SOARecord $ C + NAMERecord ) ) +olcObjectClasses: {8}( 0.9.2342.19200300.100.4.17 NAME 'domainRelatedObject' + DESC 'RFC1274: an object related to an domain' SUP top AUXILIARY MUST asso + ciatedDomain ) +olcObjectClasses: {9}( 0.9.2342.19200300.100.4.18 NAME 'friendlyCountry' SUP + country STRUCTURAL MUST friendlyCountryName ) +olcObjectClasses: {10}( 0.9.2342.19200300.100.4.20 NAME 'pilotOrganization' + SUP ( organization $ organizationalUnit ) STRUCTURAL MAY buildingName ) +olcObjectClasses: {11}( 0.9.2342.19200300.100.4.21 NAME 'pilotDSA' SUP dsa S + TRUCTURAL MAY dSAQuality ) +olcObjectClasses: {12}( 0.9.2342.19200300.100.4.22 NAME 'qualityLabelledData + ' SUP top AUXILIARY MUST dsaQuality MAY ( subtreeMinimumQuality $ subtreeMa + ximumQuality ) ) +structuralObjectClass: olcSchemaConfig +entryUUID: 7ed48b0c-f8e9-103c-8ec3-e51fa20c07f9 +creatorsName: cn=config +createTimestamp: 20221115042701Z +entryCSN: 20221115042701.652581Z#000000#000#000000 +modifiersName: cn=config +modifyTimestamp: 20221115042701Z diff --git a/dirsrvtests/tests/data/openldap_2_389/saslauthd/slapd.d/cn=config/cn=schema/cn={2}inetorgperson.ldif b/dirsrvtests/tests/data/openldap_2_389/saslauthd/slapd.d/cn=config/cn=schema/cn={2}inetorgperson.ldif new file mode 100644 index 0000000..85a358e --- /dev/null +++ b/dirsrvtests/tests/data/openldap_2_389/saslauthd/slapd.d/cn=config/cn=schema/cn={2}inetorgperson.ldif @@ -0,0 +1,49 @@ +# AUTO-GENERATED FILE - DO NOT EDIT!! Use ldapmodify. +# CRC32 c2e2b45e +dn: cn={2}inetorgperson +objectClass: olcSchemaConfig +cn: {2}inetorgperson +olcAttributeTypes: {0}( 2.16.840.1.113730.3.1.1 NAME 'carLicense' DESC 'RFC2 + 798: vehicle license or registration plate' EQUALITY caseIgnoreMatch SUBSTR + caseIgnoreSubstringsMatch SYNTAX 1.3.6.1.4.1.1466.115.121.1.15 ) +olcAttributeTypes: {1}( 2.16.840.1.113730.3.1.2 NAME 'departmentNumber' DESC + 'RFC2798: identifies a department within an organization' EQUALITY caseIgn + oreMatch SUBSTR caseIgnoreSubstringsMatch SYNTAX 1.3.6.1.4.1.1466.115.121.1 + .15 ) +olcAttributeTypes: {2}( 2.16.840.1.113730.3.1.241 NAME 'displayName' DESC 'R + FC2798: preferred name to be used when displaying entries' EQUALITY caseIgn + oreMatch SUBSTR caseIgnoreSubstringsMatch SYNTAX 1.3.6.1.4.1.1466.115.121.1 + .15 SINGLE-VALUE ) +olcAttributeTypes: {3}( 2.16.840.1.113730.3.1.3 NAME 'employeeNumber' DESC ' + RFC2798: numerically identifies an employee within an organization' EQUALIT + Y caseIgnoreMatch SUBSTR caseIgnoreSubstringsMatch SYNTAX 1.3.6.1.4.1.1466. + 115.121.1.15 SINGLE-VALUE ) +olcAttributeTypes: {4}( 2.16.840.1.113730.3.1.4 NAME 'employeeType' DESC 'RF + C2798: type of employment for a person' EQUALITY caseIgnoreMatch SUBSTR cas + eIgnoreSubstringsMatch SYNTAX 1.3.6.1.4.1.1466.115.121.1.15 ) +olcAttributeTypes: {5}( 0.9.2342.19200300.100.1.60 NAME 'jpegPhoto' DESC 'RF + C2798: a JPEG image' SYNTAX 1.3.6.1.4.1.1466.115.121.1.28 ) +olcAttributeTypes: {6}( 2.16.840.1.113730.3.1.39 NAME 'preferredLanguage' DE + SC 'RFC2798: preferred written or spoken language for a person' EQUALITY ca + seIgnoreMatch SUBSTR caseIgnoreSubstringsMatch SYNTAX 1.3.6.1.4.1.1466.115. + 121.1.15 SINGLE-VALUE ) +olcAttributeTypes: {7}( 2.16.840.1.113730.3.1.40 NAME 'userSMIMECertificate' + DESC 'RFC2798: PKCS#7 SignedData used to support S/MIME' SYNTAX 1.3.6.1.4. + 1.1466.115.121.1.5 ) +olcAttributeTypes: {8}( 2.16.840.1.113730.3.1.216 NAME 'userPKCS12' DESC 'RF + C2798: personal identity information, a PKCS #12 PFX' SYNTAX 1.3.6.1.4.1.14 + 66.115.121.1.5 ) +olcObjectClasses: {0}( 2.16.840.1.113730.3.2.2 NAME 'inetOrgPerson' DESC 'RF + C2798: Internet Organizational Person' SUP organizationalPerson STRUCTURAL + MAY ( audio $ businessCategory $ carLicense $ departmentNumber $ displayNam + e $ employeeNumber $ employeeType $ givenName $ homePhone $ homePostalAddre + ss $ initials $ jpegPhoto $ labeledURI $ mail $ manager $ mobile $ o $ page + r $ photo $ roomNumber $ secretary $ uid $ userCertificate $ x500uniqueIden + tifier $ preferredLanguage $ userSMIMECertificate $ userPKCS12 ) ) +structuralObjectClass: olcSchemaConfig +entryUUID: 7ed4987c-f8e9-103c-8ec4-e51fa20c07f9 +creatorsName: cn=config +createTimestamp: 20221115042701Z +entryCSN: 20221115042701.652581Z#000000#000#000000 +modifiersName: cn=config +modifyTimestamp: 20221115042701Z diff --git a/dirsrvtests/tests/data/openldap_2_389/saslauthd/slapd.d/cn=config/cn=schema/cn={3}rfc2307bis.ldif b/dirsrvtests/tests/data/openldap_2_389/saslauthd/slapd.d/cn=config/cn=schema/cn={3}rfc2307bis.ldif new file mode 100644 index 0000000..7f86655 --- /dev/null +++ b/dirsrvtests/tests/data/openldap_2_389/saslauthd/slapd.d/cn=config/cn=schema/cn={3}rfc2307bis.ldif @@ -0,0 +1,155 @@ +# AUTO-GENERATED FILE - DO NOT EDIT!! Use ldapmodify. +# CRC32 3dd68676 +dn: cn={3}rfc2307bis +objectClass: olcSchemaConfig +cn: {3}rfc2307bis +olcAttributeTypes: {0}( 1.3.6.1.1.1.1.2 NAME 'gecos' DESC 'The GECOS field; + the common name' EQUALITY caseIgnoreIA5Match SUBSTR caseIgnoreIA5Substrings + Match SYNTAX 1.3.6.1.4.1.1466.115.121.1.26 SINGLE-VALUE ) +olcAttributeTypes: {1}( 1.3.6.1.1.1.1.3 NAME 'homeDirectory' DESC 'The absol + ute path to the home directory' EQUALITY caseExactIA5Match SYNTAX 1.3.6.1.4 + .1.1466.115.121.1.26 SINGLE-VALUE ) +olcAttributeTypes: {2}( 1.3.6.1.1.1.1.4 NAME 'loginShell' DESC 'The path to + the login shell' EQUALITY caseExactIA5Match SYNTAX 1.3.6.1.4.1.1466.115.121 + .1.26 SINGLE-VALUE ) +olcAttributeTypes: {3}( 1.3.6.1.1.1.1.5 NAME 'shadowLastChange' EQUALITY int + egerMatch SYNTAX 1.3.6.1.4.1.1466.115.121.1.27 SINGLE-VALUE ) +olcAttributeTypes: {4}( 1.3.6.1.1.1.1.6 NAME 'shadowMin' EQUALITY integerMat + ch SYNTAX 1.3.6.1.4.1.1466.115.121.1.27 SINGLE-VALUE ) +olcAttributeTypes: {5}( 1.3.6.1.1.1.1.7 NAME 'shadowMax' EQUALITY integerMat + ch SYNTAX 1.3.6.1.4.1.1466.115.121.1.27 SINGLE-VALUE ) +olcAttributeTypes: {6}( 1.3.6.1.1.1.1.8 NAME 'shadowWarning' EQUALITY intege + rMatch SYNTAX 1.3.6.1.4.1.1466.115.121.1.27 SINGLE-VALUE ) +olcAttributeTypes: {7}( 1.3.6.1.1.1.1.9 NAME 'shadowInactive' EQUALITY integ + erMatch SYNTAX 1.3.6.1.4.1.1466.115.121.1.27 SINGLE-VALUE ) +olcAttributeTypes: {8}( 1.3.6.1.1.1.1.10 NAME 'shadowExpire' EQUALITY intege + rMatch SYNTAX 1.3.6.1.4.1.1466.115.121.1.27 SINGLE-VALUE ) +olcAttributeTypes: {9}( 1.3.6.1.1.1.1.11 NAME 'shadowFlag' EQUALITY integerM + atch SYNTAX 1.3.6.1.4.1.1466.115.121.1.27 SINGLE-VALUE ) +olcAttributeTypes: {10}( 1.3.6.1.1.1.1.12 NAME 'memberUid' EQUALITY caseExac + tIA5Match SYNTAX 1.3.6.1.4.1.1466.115.121.1.26 ) +olcAttributeTypes: {11}( 1.3.6.1.1.1.1.13 NAME 'memberNisNetgroup' EQUALITY + caseExactIA5Match SUBSTR caseExactIA5SubstringsMatch SYNTAX 1.3.6.1.4.1.146 + 6.115.121.1.26 ) +olcAttributeTypes: {12}( 1.3.6.1.1.1.1.14 NAME 'nisNetgroupTriple' DESC 'Net + group triple' EQUALITY caseIgnoreIA5Match SYNTAX 1.3.6.1.4.1.1466.115.121.1 + .26 ) +olcAttributeTypes: {13}( 1.3.6.1.1.1.1.15 NAME 'ipServicePort' DESC 'Service + port number' EQUALITY integerMatch SYNTAX 1.3.6.1.4.1.1466.115.121.1.27 SI + NGLE-VALUE ) +olcAttributeTypes: {14}( 1.3.6.1.1.1.1.16 NAME 'ipServiceProtocol' DESC 'Ser + vice protocol name' SUP name ) +olcAttributeTypes: {15}( 1.3.6.1.1.1.1.17 NAME 'ipProtocolNumber' DESC 'IP p + rotocol number' EQUALITY integerMatch SYNTAX 1.3.6.1.4.1.1466.115.121.1.27 + SINGLE-VALUE ) +olcAttributeTypes: {16}( 1.3.6.1.1.1.1.18 NAME 'oncRpcNumber' DESC 'ONC RPC + number' EQUALITY integerMatch SYNTAX 1.3.6.1.4.1.1466.115.121.1.27 SINGLE-V + ALUE ) +olcAttributeTypes: {17}( 1.3.6.1.1.1.1.19 NAME 'ipHostNumber' DESC 'IPv4 add + resses as a dotted decimal omitting leading zeros or IPv6 addresses + as defined in RFC2373' SUP name ) +olcAttributeTypes: {18}( 1.3.6.1.1.1.1.20 NAME 'ipNetworkNumber' DESC 'IP ne + twork as a dotted decimal, eg. 192.168, omitting leading zeros' SUP + name SINGLE-VALUE ) +olcAttributeTypes: {19}( 1.3.6.1.1.1.1.21 NAME 'ipNetmaskNumber' DESC 'IP ne + tmask as a dotted decimal, eg. 255.255.255.0, omitting leading zeros + ' EQUALITY caseIgnoreIA5Match SYNTAX 1.3.6.1.4.1.1466.115.121.1.26 SINGLE-V + ALUE ) +olcAttributeTypes: {20}( 1.3.6.1.1.1.1.22 NAME 'macAddress' DESC 'MAC addres + s in maximal, colon separated hex notation, eg. 00:00:92:90:ee:e2' E + QUALITY caseIgnoreIA5Match SYNTAX 1.3.6.1.4.1.1466.115.121.1.26 ) +olcAttributeTypes: {21}( 1.3.6.1.1.1.1.23 NAME 'bootParameter' DESC 'rpc.boo + tparamd parameter' EQUALITY caseExactIA5Match SYNTAX 1.3.6.1.4.1.1466.115.1 + 21.1.26 ) +olcAttributeTypes: {22}( 1.3.6.1.1.1.1.24 NAME 'bootFile' DESC 'Boot image n + ame' EQUALITY caseExactIA5Match SYNTAX 1.3.6.1.4.1.1466.115.121.1.26 ) +olcAttributeTypes: {23}( 1.3.6.1.1.1.1.26 NAME 'nisMapName' DESC 'Name of a + A generic NIS map' SUP name ) +olcAttributeTypes: {24}( 1.3.6.1.1.1.1.27 NAME 'nisMapEntry' DESC 'A generic + NIS entry' EQUALITY caseExactIA5Match SUBSTR caseExactIA5SubstringsMatch S + YNTAX 1.3.6.1.4.1.1466.115.121.1.26 SINGLE-VALUE ) +olcAttributeTypes: {25}( 1.3.6.1.1.1.1.28 NAME 'nisPublicKey' DESC 'NIS publ + ic key' EQUALITY octetStringMatch SYNTAX 1.3.6.1.4.1.1466.115.121.1.40 SING + LE-VALUE ) +olcAttributeTypes: {26}( 1.3.6.1.1.1.1.29 NAME 'nisSecretKey' DESC 'NIS secr + et key' EQUALITY octetStringMatch SYNTAX 1.3.6.1.4.1.1466.115.121.1.40 SING + LE-VALUE ) +olcAttributeTypes: {27}( 1.3.6.1.1.1.1.30 NAME 'nisDomain' DESC 'NIS domain' + EQUALITY caseIgnoreIA5Match SYNTAX 1.3.6.1.4.1.1466.115.121.1.26 ) +olcAttributeTypes: {28}( 1.3.6.1.1.1.1.31 NAME 'automountMapName' DESC 'auto + mount Map Name' EQUALITY caseExactIA5Match SUBSTR caseExactIA5SubstringsMat + ch SYNTAX 1.3.6.1.4.1.1466.115.121.1.26 SINGLE-VALUE ) +olcAttributeTypes: {29}( 1.3.6.1.1.1.1.32 NAME 'automountKey' DESC 'Automoun + t Key value' EQUALITY caseExactIA5Match SUBSTR caseExactIA5SubstringsMatch + SYNTAX 1.3.6.1.4.1.1466.115.121.1.26 SINGLE-VALUE ) +olcAttributeTypes: {30}( 1.3.6.1.1.1.1.33 NAME 'automountInformation' DESC ' + Automount information' EQUALITY caseExactIA5Match SUBSTR caseExactIA5Substr + ingsMatch SYNTAX 1.3.6.1.4.1.1466.115.121.1.26 SINGLE-VALUE ) +olcObjectClasses: {0}( 1.3.6.1.1.1.2.0 NAME 'posixAccount' DESC 'Abstraction + of an account with POSIX attributes' SUP top AUXILIARY MUST ( cn $ uid $ u + idNumber $ gidNumber $ homeDirectory ) MAY ( userPassword $ loginShell $ ge + cos $ description ) ) +olcObjectClasses: {1}( 1.3.6.1.1.1.2.1 NAME 'shadowAccount' DESC 'Additional + attributes for shadow passwords' SUP top AUXILIARY MUST uid MAY ( userPass + word $ description $ shadowLastChange $ shadowMin $ shadowMax $ shadowWarni + ng $ shadowInactive $ shadowExpire $ shadowFlag ) ) +olcObjectClasses: {2}( 1.3.6.1.1.1.2.2 NAME 'posixGroup' DESC 'Abstraction o + f a group of accounts' SUP top AUXILIARY MUST gidNumber MAY ( userPassword + $ memberUid $ description ) ) +olcObjectClasses: {3}( 1.3.6.1.1.1.2.3 NAME 'ipService' DESC 'Abstraction an + Internet Protocol service. Maps an IP port and protocol (such as tc + p or udp) to one or more names; the distinguished value of th + e cn attribute denotes the services canonical name' SUP top STRUCTUR + AL MUST ( cn $ ipServicePort $ ipServiceProtocol ) MAY description ) +olcObjectClasses: {4}( 1.3.6.1.1.1.2.4 NAME 'ipProtocol' DESC 'Abstraction o + f an IP protocol. Maps a protocol number to one or more names. The d + istinguished value of the cn attribute denotes the protocols canonic + al name' SUP top STRUCTURAL MUST ( cn $ ipProtocolNumber ) MAY description + ) +olcObjectClasses: {5}( 1.3.6.1.1.1.2.5 NAME 'oncRpc' DESC 'Abstraction of an + Open Network Computing (ONC) [RFC1057] Remote Procedure Call (RPC) b + inding. This class maps an ONC RPC number to a name. The distin + guished value of the cn attribute denotes the RPC services canonical + name' SUP top STRUCTURAL MUST ( cn $ oncRpcNumber ) MAY description ) +olcObjectClasses: {6}( 1.3.6.1.1.1.2.6 NAME 'ipHost' DESC 'Abstraction of a + host, an IP device. The distinguished value of the cn attribute deno + tes the hosts canonical name. Device SHOULD be used as a structural + class' SUP top AUXILIARY MUST ( cn $ ipHostNumber ) MAY ( userPassword $ l + $ description $ manager ) ) +olcObjectClasses: {7}( 1.3.6.1.1.1.2.7 NAME 'ipNetwork' DESC 'Abstraction of + a network. The distinguished value of the cn attribute denotes the + networks canonical name' SUP top STRUCTURAL MUST ipNetworkNumber MAY ( cn $ + ipNetmaskNumber $ l $ description $ manager ) ) +olcObjectClasses: {8}( 1.3.6.1.1.1.2.8 NAME 'nisNetgroup' DESC 'Abstraction + of a netgroup. May refer to other netgroups' SUP top STRUCTURAL MUST cn MAY + ( nisNetgroupTriple $ memberNisNetgroup $ description ) ) +olcObjectClasses: {9}( 1.3.6.1.1.1.2.9 NAME 'nisMap' DESC 'A generic abstrac + tion of a NIS map' SUP top STRUCTURAL MUST nisMapName MAY description ) +olcObjectClasses: {10}( 1.3.6.1.1.1.2.10 NAME 'nisObject' DESC 'An entry in + a NIS map' SUP top STRUCTURAL MUST ( cn $ nisMapEntry $ nisMapName ) MAY de + scription ) +olcObjectClasses: {11}( 1.3.6.1.1.1.2.11 NAME 'ieee802Device' DESC 'A device + with a MAC address; device SHOULD be used as a structural class' SU + P top AUXILIARY MAY macAddress ) +olcObjectClasses: {12}( 1.3.6.1.1.1.2.12 NAME 'bootableDevice' DESC 'A devic + e with boot parameters; device SHOULD be used as a structural class' + SUP top AUXILIARY MAY ( bootFile $ bootParameter ) ) +olcObjectClasses: {13}( 1.3.6.1.1.1.2.14 NAME 'nisKeyObject' DESC 'An object + with a public and secret key' SUP top AUXILIARY MUST ( cn $ nisPublicKey $ + nisSecretKey ) MAY ( uidNumber $ description ) ) +olcObjectClasses: {14}( 1.3.6.1.1.1.2.15 NAME 'nisDomainObject' DESC 'Associ + ates a NIS domain with a naming context' SUP top AUXILIARY MUST nisDomain ) +olcObjectClasses: {15}( 1.3.6.1.1.1.2.16 NAME 'automountMap' SUP top STRUCTU + RAL MUST automountMapName MAY description ) +olcObjectClasses: {16}( 1.3.6.1.1.1.2.17 NAME 'automount' DESC 'Automount in + formation' SUP top STRUCTURAL MUST ( automountKey $ automountInformation ) + MAY description ) +olcObjectClasses: {17}( 1.3.6.1.4.1.5322.13.1.1 NAME 'namedObject' SUP top S + TRUCTURAL MAY cn ) +structuralObjectClass: olcSchemaConfig +entryUUID: 7ed4a4a2-f8e9-103c-8ec5-e51fa20c07f9 +creatorsName: cn=config +createTimestamp: 20221115042701Z +entryCSN: 20221115042701.652581Z#000000#000#000000 +modifiersName: cn=config +modifyTimestamp: 20221115042701Z diff --git a/dirsrvtests/tests/data/openldap_2_389/saslauthd/slapd.d/cn=config/cn=schema/cn={4}yast.ldif b/dirsrvtests/tests/data/openldap_2_389/saslauthd/slapd.d/cn=config/cn=schema/cn={4}yast.ldif new file mode 100644 index 0000000..cc0f1e9 --- /dev/null +++ b/dirsrvtests/tests/data/openldap_2_389/saslauthd/slapd.d/cn=config/cn=schema/cn={4}yast.ldif @@ -0,0 +1,108 @@ +# AUTO-GENERATED FILE - DO NOT EDIT!! Use ldapmodify. +# CRC32 4ad098e5 +dn: cn={4}yast +objectClass: olcSchemaConfig +cn: {4}yast +olcObjectIdentifier: {0}SUSE 1.3.6.1.4.1.7057 +olcObjectIdentifier: {1}SUSE.YaST SUSE:10.1 +olcObjectIdentifier: {2}SUSE.YaST.ModuleConfig SUSE:10.1.2 +olcObjectIdentifier: {3}SUSE.YaST.ModuleConfig.OC SUSE.YaST.ModuleConfig:1 +olcObjectIdentifier: {4}SUSE.YaST.ModuleConfig.Attr SUSE.YaST.ModuleConfig:2 +olcAttributeTypes: {0}( SUSE.YaST.ModuleConfig.Attr:2 NAME 'suseDefaultBase' + DESC 'Base DN where new Objects should be created by default' EQUALITY dis + tinguishedNameMatch SYNTAX 1.3.6.1.4.1.1466.115.121.1.12 SINGLE-VALUE ) +olcAttributeTypes: {1}( SUSE.YaST.ModuleConfig.Attr:3 NAME 'suseNextUniqueId + ' DESC 'Next unused unique ID, can be used to generate directory wide uniqe + IDs' EQUALITY integerMatch ORDERING integerOrderingMatch SYNTAX 1.3.6.1.4. + 1.1466.115.121.1.27 SINGLE-VALUE ) +olcAttributeTypes: {2}( SUSE.YaST.ModuleConfig.Attr:4 NAME 'suseMinUniqueId' + DESC 'lower Border for Unique IDs' EQUALITY integerMatch ORDERING integerO + rderingMatch SYNTAX 1.3.6.1.4.1.1466.115.121.1.27 SINGLE-VALUE ) +olcAttributeTypes: {3}( SUSE.YaST.ModuleConfig.Attr:5 NAME 'suseMaxUniqueId' + DESC 'upper Border for Unique IDs' EQUALITY integerMatch ORDERING integerO + rderingMatch SYNTAX 1.3.6.1.4.1.1466.115.121.1.27 SINGLE-VALUE ) +olcAttributeTypes: {4}( SUSE.YaST.ModuleConfig.Attr:6 NAME 'suseDefaultTempl + ate' DESC 'The DN of a template that should be used by default' EQUALITY di + stinguishedNameMatch SYNTAX 1.3.6.1.4.1.1466.115.121.1.12 SINGLE-VALUE ) +olcAttributeTypes: {5}( SUSE.YaST.ModuleConfig.Attr:7 NAME 'suseSearchFilter + ' DESC 'Search filter to localize Objects' SYNTAX 1.3.6.1.4.1.1466.115.121. + 1.15 SINGLE-VALUE ) +olcAttributeTypes: {6}( SUSE.YaST.ModuleConfig.Attr:11 NAME 'suseDefaultValu + e' DESC 'an Attribute-Value-Assertions to define defaults for specific Attr + ibutes' EQUALITY caseIgnoreMatch SYNTAX 1.3.6.1.4.1.1466.115.121.1.15 ) +olcAttributeTypes: {7}( SUSE.YaST.ModuleConfig.Attr:12 NAME 'suseNamingAttri + bute' DESC 'AttributeType that should be used as the RDN' EQUALITY caseIgno + reIA5Match SYNTAX 1.3.6.1.4.1.1466.115.121.1.26 SINGLE-VALUE ) +olcAttributeTypes: {8}( SUSE.YaST.ModuleConfig.Attr:15 NAME 'suseSecondaryGr + oup' DESC 'seconday group DN' EQUALITY distinguishedNameMatch SYNTAX 1.3.6. + 1.4.1.1466.115.121.1.12 ) +olcAttributeTypes: {9}( SUSE.YaST.ModuleConfig.Attr:16 NAME 'suseMinPassword + Length' DESC 'minimum Password length for new users' EQUALITY integerMatch + ORDERING integerOrderingMatch SYNTAX 1.3.6.1.4.1.1466.115.121.1.27 SINGLE-V + ALUE ) +olcAttributeTypes: {10}( SUSE.YaST.ModuleConfig.Attr:17 NAME 'suseMaxPasswor + dLength' DESC 'maximum Password length for new users' EQUALITY integerMatch + ORDERING integerOrderingMatch SYNTAX 1.3.6.1.4.1.1466.115.121.1.27 SINGLE- + VALUE ) +olcAttributeTypes: {11}( SUSE.YaST.ModuleConfig.Attr:18 NAME 'susePasswordHa + sh' DESC 'Hash method to use for new users' EQUALITY caseIgnoreIA5Match SYN + TAX 1.3.6.1.4.1.1466.115.121.1.26 SINGLE-VALUE ) +olcAttributeTypes: {12}( SUSE.YaST.ModuleConfig.Attr:19 NAME 'suseSkelDir' D + ESC '' EQUALITY caseExactIA5Match SYNTAX 1.3.6.1.4.1.1466.115.121.1.26 ) +olcAttributeTypes: {13}( SUSE.YaST.ModuleConfig.Attr:20 NAME 'susePlugin' DE + SC 'plugin to use upon user/ group creation' EQUALITY caseIgnoreMatch SYNTA + X 1.3.6.1.4.1.1466.115.121.1.15 ) +olcAttributeTypes: {14}( SUSE.YaST.ModuleConfig.Attr:21 NAME 'suseMapAttribu + te' DESC '' EQUALITY caseIgnoreMatch SYNTAX 1.3.6.1.4.1.1466.115.121.1.15 ) +olcAttributeTypes: {15}( SUSE.YaST.ModuleConfig.Attr:22 NAME 'suseImapServer + ' DESC '' EQUALITY caseIgnoreMatch SYNTAX 1.3.6.1.4.1.1466.115.121.1.15 SIN + GLE-VALUE ) +olcAttributeTypes: {16}( SUSE.YaST.ModuleConfig.Attr:23 NAME 'suseImapAdmin' + DESC '' EQUALITY caseIgnoreMatch SYNTAX 1.3.6.1.4.1.1466.115.121.1.15 SING + LE-VALUE ) +olcAttributeTypes: {17}( SUSE.YaST.ModuleConfig.Attr:24 NAME 'suseImapDefaul + tQuota' DESC '' EQUALITY integerMatch SYNTAX 1.3.6.1.4.1.1466.115.121.1.27 + SINGLE-VALUE ) +olcAttributeTypes: {18}( SUSE.YaST.ModuleConfig.Attr:25 NAME 'suseImapUseSsl + ' DESC '' EQUALITY booleanMatch SYNTAX 1.3.6.1.4.1.1466.115.121.1.7 SINGLE- + VALUE ) +olcObjectClasses: {0}( SUSE.YaST.ModuleConfig.OC:2 NAME 'suseModuleConfigura + tion' DESC 'Contains configuration of Management Modules' SUP top STRUCTURA + L MUST cn MAY suseDefaultBase ) +olcObjectClasses: {1}( SUSE.YaST.ModuleConfig.OC:3 NAME 'suseUserConfigurati + on' DESC 'Configuration of user management tools' SUP suseModuleConfigurati + on STRUCTURAL MAY ( suseMinPasswordLength $ suseMaxPasswordLength $ susePas + swordHash $ suseSkelDir $ suseNextUniqueId $ suseMinUniqueId $ suseMaxUniqu + eId $ suseDefaultTemplate $ suseSearchFilter $ suseMapAttribute ) ) +olcObjectClasses: {2}( SUSE.YaST.ModuleConfig.OC:4 NAME 'suseObjectTemplate' + DESC 'Base Class for Object-Templates' SUP top STRUCTURAL MUST cn MAY ( su + sePlugin $ suseDefaultValue $ suseNamingAttribute ) ) +olcObjectClasses: {3}( SUSE.YaST.ModuleConfig.OC:5 NAME 'suseUserTemplate' D + ESC 'User object template' SUP suseObjectTemplate STRUCTURAL MUST cn MAY su + seSecondaryGroup ) +olcObjectClasses: {4}( SUSE.YaST.ModuleConfig.OC:6 NAME 'suseGroupTemplate' + DESC 'Group object template' SUP suseObjectTemplate STRUCTURAL MUST cn ) +olcObjectClasses: {5}( SUSE.YaST.ModuleConfig.OC:7 NAME 'suseGroupConfigurat + ion' DESC 'Configuration of user management tools' SUP suseModuleConfigurat + ion STRUCTURAL MAY ( suseNextUniqueId $ suseMinUniqueId $ suseMaxUniqueId $ + suseDefaultTemplate $ suseSearchFilter $ suseMapAttribute ) ) +olcObjectClasses: {6}( SUSE.YaST.ModuleConfig.OC:8 NAME 'suseCaConfiguration + ' DESC 'Configuration of CA management tools' SUP suseModuleConfiguration S + TRUCTURAL ) +olcObjectClasses: {7}( SUSE.YaST.ModuleConfig.OC:9 NAME 'suseDnsConfiguratio + n' DESC 'Configuration of mail server management tools' SUP suseModuleConfi + guration STRUCTURAL ) +olcObjectClasses: {8}( SUSE.YaST.ModuleConfig.OC:10 NAME 'suseDhcpConfigurat + ion' DESC 'Configuration of DHCP server management tools' SUP suseModuleCon + figuration STRUCTURAL ) +olcObjectClasses: {9}( SUSE.YaST.ModuleConfig.OC:11 NAME 'suseMailConfigurat + ion' DESC 'Configuration of IMAP user management tools' SUP suseModuleConfi + guration STRUCTURAL MUST ( suseImapServer $ suseImapAdmin $ suseImapDefault + Quota $ suseImapUseSsl ) ) +structuralObjectClass: olcSchemaConfig +entryUUID: 7ed4b19a-f8e9-103c-8ec6-e51fa20c07f9 +creatorsName: cn=config +createTimestamp: 20221115042701Z +entryCSN: 20221115042701.652581Z#000000#000#000000 +modifiersName: cn=config +modifyTimestamp: 20221115042701Z diff --git a/dirsrvtests/tests/data/openldap_2_389/saslauthd/slapd.d/cn=config/olcDatabase={-1}frontend.ldif b/dirsrvtests/tests/data/openldap_2_389/saslauthd/slapd.d/cn=config/olcDatabase={-1}frontend.ldif new file mode 100644 index 0000000..58c3877 --- /dev/null +++ b/dirsrvtests/tests/data/openldap_2_389/saslauthd/slapd.d/cn=config/olcDatabase={-1}frontend.ldif @@ -0,0 +1,25 @@ +# AUTO-GENERATED FILE - DO NOT EDIT!! Use ldapmodify. +# CRC32 5f134215 +dn: olcDatabase={-1}frontend +objectClass: olcDatabaseConfig +objectClass: olcFrontendConfig +olcDatabase: {-1}frontend +olcAccess: {0}to dn.base="" by * read +olcAccess: {1}to dn.base="cn=subschema" by * read +olcAccess: {2}to attrs=userPassword,userPKCS12 by self write by * auth +olcAccess: {3}to attrs=shadowLastChange by self write by * read +olcAccess: {4}to * by * read +olcAddContentAcl: FALSE +olcLastMod: TRUE +olcMaxDerefDepth: 0 +olcReadOnly: FALSE +olcSchemaDN: cn=Subschema +olcSyncUseSubentry: FALSE +olcMonitoring: FALSE +structuralObjectClass: olcDatabaseConfig +entryUUID: 7ed4b9b0-f8e9-103c-8ec7-e51fa20c07f9 +creatorsName: cn=config +createTimestamp: 20221115042701Z +entryCSN: 20221115042701.652581Z#000000#000#000000 +modifiersName: cn=config +modifyTimestamp: 20221115042701Z diff --git a/dirsrvtests/tests/data/openldap_2_389/saslauthd/slapd.d/cn=config/olcDatabase={0}config.ldif b/dirsrvtests/tests/data/openldap_2_389/saslauthd/slapd.d/cn=config/olcDatabase={0}config.ldif new file mode 100644 index 0000000..4301e84 --- /dev/null +++ b/dirsrvtests/tests/data/openldap_2_389/saslauthd/slapd.d/cn=config/olcDatabase={0}config.ldif @@ -0,0 +1,20 @@ +# AUTO-GENERATED FILE - DO NOT EDIT!! Use ldapmodify. +# CRC32 6008748c +dn: olcDatabase={0}config +objectClass: olcDatabaseConfig +olcDatabase: {0}config +olcAccess: {0}to * by * none +olcAddContentAcl: TRUE +olcLastMod: TRUE +olcMaxDerefDepth: 15 +olcReadOnly: FALSE +olcRootDN: cn=config +olcSyncUseSubentry: FALSE +olcMonitoring: FALSE +structuralObjectClass: olcDatabaseConfig +entryUUID: 7ed4bea6-f8e9-103c-8ec8-e51fa20c07f9 +creatorsName: cn=config +createTimestamp: 20221115042701Z +entryCSN: 20221115042701.652581Z#000000#000#000000 +modifiersName: cn=config +modifyTimestamp: 20221115042701Z diff --git a/dirsrvtests/tests/data/openldap_2_389/saslauthd/slapd.d/cn=config/olcDatabase={1}mdb.ldif b/dirsrvtests/tests/data/openldap_2_389/saslauthd/slapd.d/cn=config/olcDatabase={1}mdb.ldif new file mode 100644 index 0000000..3db6ba0 --- /dev/null +++ b/dirsrvtests/tests/data/openldap_2_389/saslauthd/slapd.d/cn=config/olcDatabase={1}mdb.ldif @@ -0,0 +1,31 @@ +# AUTO-GENERATED FILE - DO NOT EDIT!! Use ldapmodify. +# CRC32 1bf297e7 +dn: olcDatabase={1}mdb +objectClass: olcDatabaseConfig +objectClass: olcMdbConfig +olcDatabase: {1}mdb +olcSuffix: dc=ldapdom,dc=net +olcAddContentAcl: FALSE +olcLastMod: TRUE +olcMaxDerefDepth: 15 +olcReadOnly: FALSE +olcRootDN: cn=root,dc=ldapdom,dc=net +olcRootPW:: cGFzcw== +olcSyncUseSubentry: FALSE +olcMonitoring: FALSE +olcDbDirectory: /tmp/ldap +olcDbCheckpoint: 1024 5 +olcDbNoSync: FALSE +olcDbIndex: objectClass eq +olcDbMaxReaders: 0 +olcDbMaxSize: 10485760 +olcDbMode: 0600 +olcDbSearchStack: 16 +olcDbRtxnSize: 10000 +structuralObjectClass: olcMdbConfig +entryUUID: 7ed4c48c-f8e9-103c-8ec9-e51fa20c07f9 +creatorsName: cn=config +createTimestamp: 20221115042701Z +entryCSN: 20221115042701.652581Z#000000#000#000000 +modifiersName: cn=config +modifyTimestamp: 20221115042701Z diff --git a/dirsrvtests/tests/data/openldap_2_389/saslauthd/slapd.d/cn=config/olcDatabase={1}mdb/olcOverlay={0}memberof.ldif b/dirsrvtests/tests/data/openldap_2_389/saslauthd/slapd.d/cn=config/olcDatabase={1}mdb/olcOverlay={0}memberof.ldif new file mode 100644 index 0000000..f338491 --- /dev/null +++ b/dirsrvtests/tests/data/openldap_2_389/saslauthd/slapd.d/cn=config/olcDatabase={1}mdb/olcOverlay={0}memberof.ldif @@ -0,0 +1,15 @@ +# AUTO-GENERATED FILE - DO NOT EDIT!! Use ldapmodify. +# CRC32 4d7b0d30 +dn: olcOverlay={0}memberof +objectClass: olcOverlayConfig +objectClass: olcMemberOf +olcOverlay: {0}memberof +olcMemberOfDangling: ignore +olcMemberOfRefInt: FALSE +structuralObjectClass: olcMemberOf +entryUUID: 7ed4c928-f8e9-103c-8eca-e51fa20c07f9 +creatorsName: cn=config +createTimestamp: 20221115042701Z +entryCSN: 20221115042701.652581Z#000000#000#000000 +modifiersName: cn=config +modifyTimestamp: 20221115042701Z diff --git a/dirsrvtests/tests/data/openldap_2_389/saslauthd/slapd.d/cn=config/olcDatabase={1}mdb/olcOverlay={1}unique.ldif b/dirsrvtests/tests/data/openldap_2_389/saslauthd/slapd.d/cn=config/olcDatabase={1}mdb/olcOverlay={1}unique.ldif new file mode 100644 index 0000000..e996e52 --- /dev/null +++ b/dirsrvtests/tests/data/openldap_2_389/saslauthd/slapd.d/cn=config/olcDatabase={1}mdb/olcOverlay={1}unique.ldif @@ -0,0 +1,14 @@ +# AUTO-GENERATED FILE - DO NOT EDIT!! Use ldapmodify. +# CRC32 6651500c +dn: olcOverlay={1}unique +objectClass: olcOverlayConfig +objectClass: olcUniqueConfig +olcOverlay: {1}unique +olcUniqueURI: ldap:///?mail?sub? +structuralObjectClass: olcUniqueConfig +entryUUID: 7ed4e9d0-f8e9-103c-8ecb-e51fa20c07f9 +creatorsName: cn=config +createTimestamp: 20221115042701Z +entryCSN: 20221115042701.652581Z#000000#000#000000 +modifiersName: cn=config +modifyTimestamp: 20221115042701Z diff --git a/dirsrvtests/tests/data/openldap_2_389/saslauthd/slapd.d/cn=config/olcDatabase={1}mdb/olcOverlay={2}refint.ldif b/dirsrvtests/tests/data/openldap_2_389/saslauthd/slapd.d/cn=config/olcDatabase={1}mdb/olcOverlay={2}refint.ldif new file mode 100644 index 0000000..d62cf88 --- /dev/null +++ b/dirsrvtests/tests/data/openldap_2_389/saslauthd/slapd.d/cn=config/olcDatabase={1}mdb/olcOverlay={2}refint.ldif @@ -0,0 +1,15 @@ +# AUTO-GENERATED FILE - DO NOT EDIT!! Use ldapmodify. +# CRC32 e78c9fea +dn: olcOverlay={2}refint +objectClass: olcOverlayConfig +objectClass: olcRefintConfig +olcOverlay: {2}refint +olcRefintAttribute: member +olcRefintNothing: cn=admin,dc=example,dc=com +structuralObjectClass: olcRefintConfig +entryUUID: 7ed4ee94-f8e9-103c-8ecc-e51fa20c07f9 +creatorsName: cn=config +createTimestamp: 20221115042701Z +entryCSN: 20221115042701.652581Z#000000#000#000000 +modifiersName: cn=config +modifyTimestamp: 20221115042701Z diff --git a/dirsrvtests/tests/data/openldap_2_389/saslauthd/suffix.ldif b/dirsrvtests/tests/data/openldap_2_389/saslauthd/suffix.ldif new file mode 100644 index 0000000..02e9988 --- /dev/null +++ b/dirsrvtests/tests/data/openldap_2_389/saslauthd/suffix.ldif @@ -0,0 +1,130 @@ +dn: dc=ldapdom,dc=net +dc: ldapdom +objectClass: top +objectClass: domain +structuralObjectClass: domain +entryUUID: 1901b5a4-f8e7-103c-8e7f-bd7408fb505a +creatorsName: cn=root,dc=ldapdom,dc=net +createTimestamp: 20221115040951Z +entryCSN: 20221115040951.831536Z#000000#000#000000 +modifiersName: cn=root,dc=ldapdom,dc=net +modifyTimestamp: 20221115040951Z + +dn: ou=UnixUser,dc=ldapdom,dc=net +ou: People +ou: UnixUser +objectClass: top +objectClass: organizationalUnit +structuralObjectClass: organizationalUnit +entryUUID: 1901c6fc-f8e7-103c-8e80-bd7408fb505a +creatorsName: cn=root,dc=ldapdom,dc=net +createTimestamp: 20221115040951Z +entryCSN: 20221115040951.832003Z#000000#000#000000 +modifiersName: cn=root,dc=ldapdom,dc=net +modifyTimestamp: 20221115040951Z + +dn: ou=UnixGroup,dc=ldapdom,dc=net +ou: Group +ou: UnixGroup +objectClass: top +objectClass: organizationalUnit +structuralObjectClass: organizationalUnit +entryUUID: 1901d26e-f8e7-103c-8e81-bd7408fb505a +creatorsName: cn=root,dc=ldapdom,dc=net +createTimestamp: 20221115040951Z +entryCSN: 20221115040951.832297Z#000000#000#000000 +modifiersName: cn=root,dc=ldapdom,dc=net +modifyTimestamp: 20221115040951Z + +dn: uid=testuser1,ou=UnixUser,dc=ldapdom,dc=net +objectClass: account +objectClass: posixAccount +objectClass: top +objectClass: shadowAccount +uid: testuser1 +cn: testuser1 +userPassword:: e1NBU0x9d2lsbGlhbUBpZG0uYmxhY2toYXRzLm5ldC5hdQ== +loginShell: /bin/bash +uidNumber: 9000 +gidNumber: 8000 +homeDirectory: /tmp +structuralObjectClass: account +entryUUID: 1901e196-f8e7-103c-8e82-bd7408fb505a +creatorsName: cn=root,dc=ldapdom,dc=net +createTimestamp: 20221115040951Z +entryCSN: 20221115040951.832685Z#000000#000#000000 +modifiersName: cn=root,dc=ldapdom,dc=net +modifyTimestamp: 20221115040951Z + +dn: uid=testuser2,ou=UnixUser,dc=ldapdom,dc=net +objectClass: account +objectClass: posixAccount +objectClass: top +objectClass: shadowAccount +uid: testuser2 +cn: testuser2 +userPassword:: e2NyeXB0fSQ2JDdzeXFxLkVRJDY4aU9XRjBCVFdDMjRhS0Uwcko4Y1V0UGQyQ + 3M3SGtydXdqRWlrY0pBRDVkTk5FZ01NSjVKazd3MnNDMmhZVXdOMnM2NXNyVFFUVTgzQUR0Mi50 + NGww +loginShell: /bin/bash +uidNumber: 9001 +gidNumber: 8000 +homeDirectory: /tmp +structuralObjectClass: account +entryUUID: 1901e920-f8e7-103c-8e83-bd7408fb505a +creatorsName: cn=root,dc=ldapdom,dc=net +createTimestamp: 20221115040951Z +entryCSN: 20221115040951.832878Z#000000#000#000000 +modifiersName: cn=root,dc=ldapdom,dc=net +modifyTimestamp: 20221115040951Z + +dn: cn=group1,ou=UnixGroup,dc=ldapdom,dc=net +objectClass: groupOfNames +objectClass: posixGroup +objectClass: top +cn: group1 +gidNumber: 8000 +member: uid=testuser1,ou=UnixUser,dc=ldapdom,dc=net +member: uid=testuser2,ou=UnixUser,dc=ldapdom,dc=net +memberUid: 9000 +memberUid: 9001 +structuralObjectClass: groupOfNames +entryUUID: 1901f154-f8e7-103c-8e84-bd7408fb505a +creatorsName: cn=root,dc=ldapdom,dc=net +createTimestamp: 20221115040951Z +entryCSN: 20221115040951.833088Z#000000#000#000000 +modifiersName: cn=root,dc=ldapdom,dc=net +modifyTimestamp: 20221115040951Z + +dn: cn=group2,ou=UnixGroup,dc=ldapdom,dc=net +objectClass: groupOfNames +objectClass: posixGroup +objectClass: top +cn: group2 +gidNumber: 8001 +member: uid=testuser1,ou=UnixUser,dc=ldapdom,dc=net +memberUid: 9000 +structuralObjectClass: groupOfNames +entryUUID: 1901fa28-f8e7-103c-8e85-bd7408fb505a +creatorsName: cn=root,dc=ldapdom,dc=net +createTimestamp: 20221115040951Z +entryCSN: 20221115040951.833314Z#000000#000#000000 +modifiersName: cn=root,dc=ldapdom,dc=net +modifyTimestamp: 20221115040951Z + +dn: cn=group3,ou=UnixGroup,dc=ldapdom,dc=net +objectClass: groupOfNames +objectClass: posixGroup +objectClass: top +cn: group3 +gidNumber: 8002 +member: uid=testuser2,ou=UnixUser,dc=ldapdom,dc=net +memberUid: 9001 +structuralObjectClass: groupOfNames +entryUUID: 1902084c-f8e7-103c-8e86-bd7408fb505a +creatorsName: cn=root,dc=ldapdom,dc=net +createTimestamp: 20221115040951Z +entryCSN: 20221115040951.833676Z#000000#000#000000 +modifiersName: cn=root,dc=ldapdom,dc=net +modifyTimestamp: 20221115040951Z + diff --git a/dirsrvtests/tests/data/ticket47953/__init__.py b/dirsrvtests/tests/data/ticket47953/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/dirsrvtests/tests/data/ticket47953/ticket47953.ldif b/dirsrvtests/tests/data/ticket47953/ticket47953.ldif new file mode 100644 index 0000000..e59977e --- /dev/null +++ b/dirsrvtests/tests/data/ticket47953/ticket47953.ldif @@ -0,0 +1,27 @@ +dn: dc=example,dc=com +objectClass: top +objectClass: domain +dc: example +aci: (targetattr!="userPassword")(version 3.0; acl "Enable anonymous access"; + allow (read, search, compare) userdn="ldap:///anyone";) +aci: (targetattr="carLicense || description || displayName || facsimileTelepho + neNumber || homePhone || homePostalAddress || initials || jpegPhoto || labele + dURI || mail || mobile || pager || photo || postOfficeBox || postalAddress || + postalCode || preferredDeliveryMethod || preferredLanguage || registeredAddr + ess || roomNumber || secretary || seeAlso || st || street || telephoneNumber + || telexNumber || title || userCertificate || userPassword || userSMIMECertif + icate || x500UniqueIdentifier")(version 3.0; acl "Enable self write for commo + n attributes"; allow (write) userdn="ldap:///self";) +aci: (targetattr ="fffff")(version 3.0;acl "Directory Administrators Group";al + low (all) (groupdn = "ldap:///cn=Directory Administrators, dc=example,dc=com" + );) +aci: (targetattr="*")(version 3.0; acl "Configuration Administrators Group"; a + llow (all) groupdn="ldap:///cn=Configuration Administrators,ou=Groups,ou=Topo + logyManagement,o=NetscapeRoot";) +aci: (targetattr="*")(version 3.0; acl "Configuration Administrator"; allow (a + ll) userdn="ldap:///uid=admin,ou=Administrators,ou=TopologyManagement,o=Netsc + apeRoot";) +aci: (targetattr = "*")(version 3.0; acl "TEST ACI"; allow (writ + e) groupdn = "ldap:///cn=slapd-localhost,cn=389 Directory Server,cn=Server Gr + oup,cn=localhost.localdomain,ou=example.com,o=NetscapeRoot";) + diff --git a/dirsrvtests/tests/data/ticket47988/__init__.py b/dirsrvtests/tests/data/ticket47988/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/dirsrvtests/tests/data/ticket47988/schema_ipa3.3.tar.gz b/dirsrvtests/tests/data/ticket47988/schema_ipa3.3.tar.gz new file mode 100644 index 0000000000000000000000000000000000000000..2b309a04ee00be73556d5941c0bda10ba46d33f7 GIT binary patch literal 98049 zcmV)CK*GNtiwFq#8NyTm19M|&Wo=egPE;BB4VR8WMU1@XMM$-1z^{<$cTxGjT zqIpVoQu~1>DVZBe;>#nk_ZtWdNkkyP#UY8=+W-D`&jHK;phSw2pd>^pWpM@=Jl)gN z*BmH)dh_g88LrdQGW^>pmrv@-zwz~^QmvnqEA?`@R(VsdRN($Mr1AXF-n@x|(C`U) z6HYweGX7Y5-$V6((yR38q`dS3`|kMGc7v5`9xt6}X1gWFvZqh<4(#B1eQp0YDpl71 zjnmV5<+M_R{y(WUs&7d7iQa$j^*8VT!?%azYv;UsNm^I^AA|1sw-Nc;yK4Oc<)gVB zkiPHDd}Bf2x-3A|}|YxwkMvhpHg8ZPmvWe1^ePoj_#J0yl{ee!&=@IZoASP;1u z^`X6RPDx1pML@hM`*D6bCg;?pzTuF5G;wT`^lX#50VPI2mRJ(ZsYNC$cKaE=e<{RD%0*NJ$V|#CF*Ob8iXo=0=E+Z*9jR6H20hP9x`t zz@6l`?&#aqcm%CAFMp8Vnu9^}a`fY8xHp`8P)~0tZ^>RP9UB@3n7-kLD};2>8MMB^ z{mrl4UU&3^0E5oDqsz{4h=g1XNR#xNgHg9N?llLbKOXe2hMf`_Qu+|5ri|A?v`*@QB>{HuQPu zWxGe|oFme8&C(HRd?6zWtii79JBCS*$S}g^Yqjzb`RWBB-h0s`or(`^A z3VOOOF)YjG4T5wSBv?|@2H5n@q)T2t0Q$ZeA%7#80lfiZ1j3U~8xFMQ+p~Fyz#ZGY zX4R%=QO*=aQjmCF1LVRVAZ73dCeM<4(AXOOeK@%E+~f0pkF=nLgO4~c@RiDF!M8($ zrni{bE@M^z?b)vBM3xQAXCvm;J0d|e`JI}fWVK^2?2zBj#TPIy4c`FJ%Xx{Y?}hf3 zFy`zqRFK2%G1tffR3PzOXC;{7t>p)k1K*2gbNJT-5^pIC6N^r5myIJB+*9aRz+_Q3 zw5N75x~8@ROf(iy()r%%^he#ROK5J2QX=mOyvR4joALgDe8A#^=5pzKD7xMF4F?}f zFs9{g-V~Zri6#pJG>;3a9bgzJq9}OqM}Pwl1~uGZ`X2jbyFfOy6uBm6=+yTXLR;m$ zIUeq&5e2f3d>TYfh`(+rbvibH+6sra2dtPGI81PmdLWMRo8cGUaSP@MDse~wt3k-qZ~n;p?TG39{Gp&&4dCIUu^x9hy)+7th8f!gPK z??dY&seKpCJuh!YlfF56l60RJOHb50iqlM|mV@fnv51Evc=)e>t3 zgC02HIIH6s6FX$^43x-{Ea+kaR6;|R`x7zUe+I0zw?-)R<7l;{!B6A^sg^1yrO)+p zsZy#`YNxewsRn;mEA>Jbio=N+tz#1?hISxeaB&JLjNCg0#0E9Xss-M80l}4#jtstrs^8NSVjU$D}#<0S5R7U`a8QeB6+zqW#(ZwcSnGLm0F{L@gxLz<*Epg_{7}RRG-*m{s_VW&iE_PDrl2&=x-F8 zR`UX!m>!)P3~jQI^CwpiyKR0qxFQa18}1zG=&EGs8iGxW0!i0_VWK`bI3fq&=*qu=r~_^j ztpjERBNiGO+YtF+2R$8;RfR~fk^W93V)wo_0&6J(zlGs)Jm|%`oq-O>Z*+ogD46(w z2Aj~JEs1%0*mVW=FmAMInh;!sT}y%Q+W(2DlI6vKNBZyz9Ps}P0!SRJ3qxu1bXGLlQpBJ;w@o*e9DK%ejxr2>nmE z7v1}{&b^Lk0MP@)CmY#)kc=0T?vq^pC1{_5eZFYz@!V7T^Cj4~=|n{JOGd-T<0`s? z<_vI8xi5{j>c;lN2p6HTJ0WvOO1p|*ihO~L3hACbC@xoM^*UbF%3Jp_Qj zu#{)5mPr3ImzP@!RlZree-z}HFChRq8X{98b=eMbU_MA!MRkK%TqZ}$h2dXAF^56*jyI0MWj?9*1dc^dxV*Bf^r`xJX**u{JX3${ zdSLzMY5BB~n*Xcq=l@>l+Ry*(=l}NefBX5r{rum4{%=43x1axe$@xEKl+hUD&Otu; zhSW;i>(d33=jO24FA=aT3lO>`m{wD0z957NOUf46-aYu|>kWbdF z_(Fs#-=sh+i8)F&_%R#+3HS5FlD;w(Y)A;qXZ!DtL{ z&>?+?V)kvq*w1`L*7h_CBOntY@PZ&RO$rT53{08C;s{a#A{VPbuEm3NUj$5OMciE4 zV1aVE^rcj-)ghGr0;8x_h5#G?FEwPsKw4qqGmDkCP8$v_fy+uDhr=`hi`sh>IQuM ziUvtv;Q17Q@|Jb41!TKc8W1Zg5|CWFl7S^Fit4^(uW*i3VR5m*ntah^4mMF!6ukH( zu&z7x26U<+Oji~qc`=UhpqCF&S?l@);Br^JJ_zm%e0a2lJuvI}DAhZPg3FfG@b!6^ z;M0Jn>WY#{JOlujccB^7@TnDrfirvy!z741zAQ6R5^U%3;ap^Hn4g2m*E zZI8+qy|}6N1I4LfabHi>YnwqspJxPVmmI+OCEpbhAajjYK*r&kr|eAO$eLmUpbNIB0yxDc zTHUl3Tz6Bkpj&vr^aGeP_Y?)WXBkVra3fEbXKD-MEKVE@mmb>}qw`a(DQvX+zUTKm z{rovuSeT8rtu!QFRG2$znluQ~nBvmHk{NVrL{5k}mZ97h0%pQ2NVYFOk(vC`bQPBQn_&67mv8YOU zVHtp2Q~#pIJ?Df@B{8=yVYm6IoSw3!({RSF9g4+inFQeGJ1Kc?E|aW#)KQ_PyZ%s6 zi{eH|K4tfgIP6?dH7m#qQdu}w7$?8BAu7pAtw~nxN?DtHgS#m7O2qZXS{&jMJDzo z_`n4t?$fF0A!^FM6_d3s=Ir2&OP><`wmO9`AI>TjeOdA2k%1*W6PA=K5qxA@m7aGJ z_x-1EZ-_6gA5sTBYZIfqL=6qOc!-dUs}L3parG!;5cVtA-B;cb5BDv&qD_jfpDx|K z5BY3A*G*oV%xse5jUBzZ~)HQ6H9uAA-m9@#EXWC%#fEz7C)M1&14- z6(x_kF%4g=jC}u=2u*xL=eFt4cugMr^5IOJa{^pr%S`e1b;lAy)k6mp;w>9QesG(< zF9*93b#S4~s{$;skBy?Z;JQXJMHDkJULTC>xRcx13PygKk{VwAt+`pRB>c1kkggS* z=Z#sJoR7)eG23%Ts)~3n`>H=18#guHi3725E%|&!1oz zCX$55u?;V}7airpM+uVydxp{G0-|ALXCw~C45H-{d(1?N-tj#-sH(Vd8IG_C;fig<_7}10e|4-mOwK!C$57yA zl8|rtz>0dKT4!gR@}o(w{F!!QK0lp8LCiMfhf8~JfEI&|> zn_wGVViBhg#!7 z_kH{$@*RYivL>B7!whqb2`hgpM+8Fg_pG$t=?^-s=BU$7!+I93ZFuNu68mYIg9$`t z&XCt()o3tojmCp!?=4B4a52F>mSB8ZV3lV0rTKRx3Ch_Vkse}_chCf0N~{F8=`Oue zfIFrG4pcf89x0arYbXZ+>y!pshS zd|As>bT;hoca=V!lqaC2ZfehW0PF+jf7DJZ^(6mWE#vvGwaUpp|N9cx;UVevn}mIL zCA)iulG~$TFla;thI)|oiv$5vuIZ(6x2Xxkdq2N(u)>Jx2 zJ`7%JiO&Qmo4!XxR1seH=O$XC`OGo2-ADNQ(Lwa2!9iq#L zVP5xrdjZ~%w)#TWiQ$VW3L1)oC)nNM{P{!>5(i0~B3l@ng^*OS*% zl?8p@!*!^ABb;k25aond{Dr9htjRY7P4S`;jee{lan)as4PVx>q3uJQnZpY#COsO4 zJNg-~jT4o!$_j3a&&Qgf76zJ^zEaDao9y5ORx!avoOq`X)NRhp*skQV{?3)AvBJA%BxzPp-E!Ks&Y^sk29@4#;?dj+Fj zGx|e5JxZ;PYx*m;LYXxYAM`Bi35{1V8E6Nqo83yxyH2H43pQ;Nx*3O1I7X zJTYmCJ2j-Hhu<(*Jw#(5Z+sop9`i-{ox9LC0BDX$BLys>OG@N_Zt`Xjj&=q^eRIG}d6^M=L#q>hXR=RbQhB=3+)si}_V;)cMPC)9}6y;-)2xiGCI{7=vd+L3YH=ngDHQ8j1oFC1V zdnNmqOqR;s!?}Y($#P2iQaMGfI7!T@r6M7=j$xT3Q>H>1Ppv|k3D(L}i4Yx0l30-C z>w@>VKF5gX)k*Llx9LCWN@XJHd&E)cpj6Dht28M|CGdY5;;Y)gkY=mL0c9h8RjW#& z$wt+jVOlu5E7(eY6nbF@m5=WW0?)L$xlL<-{i*o7%XcuSeV2Y>gNIHLBc^IaTegq^xrkb- zZl7yRNm;2=S~w(C%N-kQg8)!MCkU_sP)pC9cmLcyb}SS(NmQoPQxtFQ|MuE`}k{6iWm+_!Q{QuDZ^P8Ul)yL^md4Nv$ z|J>Q#+iU0jKlgT9Z~mXp@t2FbXXo)P)^z$!kc2GadZef^)eniPFOB_ro%KOczmYen zx))X#O5)r2BfA{1{hi(VE`=Wb?)TD@`26pU?K%~J=|1#=UvCC~-*7jL5;IL8kdLA} zQI)2wx(xMradMRY4FAI4KSAKiwt_U?a@`=yHkzBfoiR6_L|O8%(b!c@=>EO|OWJ5) zR*1qz!@9-9cs7s$G!An2m{ps&@GAbvb+*x}m)+J#SHd*IrM|JVNk{XljL}ae@4_F| zLmLLJkwwn4rfr7r^&g_lx!-7)4bS{UO3%&Ozo^MfJa1!nlNZaK85|?vOn!>L0rPC& znajRgSF!hC%sXS4nhunkKGs+&&i5$D7*1%ry_16(&qmXSvY8A$XS|Vr9RF-XODPR# ztZeMAE{VnpJ7RzP8y}}lgeC_ln=SZTk6$Jh>9KC$WcrORkhEQ{tRRpYrpG!s*fhyfVvZwRrY*22 z^`p9#;`S4IRF6D*pmK%OlfI7;Y_PO=q-!cnCcE2I&6EOP8_oJ$G~Ftt>49y$UG7B8 z!pAUlu95s?gg7t@$~r;?0zG9<EkD(}JpPE)N9}$B zvm5LH5nI{Gm(FRwtQRWJS@t5)Fo?!OSfEk1J=Z+6&3U-&9!Jhq=&Sl>pW`_6ooJRs zuT{7Q3PSUQlL>z@n_2ppq0&$18uD%_>F8XS1_533LXx1BQZ@0J32CPi82IFXycNCx zL7D95CJw#Sa#(V%@RZ1!u_3)Al0L|h8@&sjv{ApWi{xW$i{ZsSU2W1#OW~6GNkvWe zl*mJLWpvZv{ZKdiqSH<3975!Xy^NC>hQot6sa7S*J?XYFGCp$rhUA+mW)@J3pKhI< z45+v;)ru5INc@kYo1>F{=k)aAkAHM8`hz134gQ;kGylw&i{2ld-cjfC>9KyQnSW~V z>);=sJN@G$xyP$Vck+++j?a((!0rl*NvT|t71SG)u{X68{mB4&RCAbSh)u`vd>d9|* zR_Mnf0s9gscfi*e#fJNltIBxyd?y?U$4|+jHohF4OZf)IMvolEz=Hxj#^KLy%7;PP zLLT)q(5f5#y>obWynT3a_7N|n?n4*SygDC}c1$GEKzb4ala>oEZZjXDHzkHnMPcRB zqfS@k5Es6GYQK@K$nRamhejDLgZ%F0IhiA6a8x20z%OL(k9p+b{q*NgpB#7OZ@cm6 z=iSzB&DM_8eij#m^~mR97;Zvi4~6fKlmpHXJT0*BzzW__Do8>}f8nJOm6Qfb=e3Ky z`J|tQCH_z*Z2rL|y&U8eb(rw}9pI(6K2F5>r6M25my$@fg7H7Z*M5?H|AhTYp+SC6$e(o_Wn&xu4Y*W%dDPuz z4S6*lJLaZ}$(+#5V|w=^AT0pS(VVRWGG4-((x z!GQY0-j(O2ggHMTtrZa1U>g_b$D4@8Cfn#=e*VXxcYM4#C)O+`*r)OL5uVlW>9|J6 zXagtw#M%OD;AyU*m+OL0wd3ZdU)xwjD3m#sIQtYVE!Yo}%P(gJ$XUQ(CmqL;%quDJ zV{O{PJy&pV9T@Z%2SE99Zc4(RwGy;@E?C=DDNNzDs@M++} zsAvL8OaKb8K3ap_QbH>qe#;vEDeviMQm|{UOdPYMF-OZ|O5N(lVcZ= zVVGM+PKpatzI}#oZ8KBuRNsYeI{kCN0dy97zC5@ps)u6#;F=4MNw)yN@!XF?hf?TX z`LI_?Gt+MgI$icbEd2TBzq3Bz^Z5MmnEkgrgOgosjhugi-gz6v!utH5_5Wg*u+|&9 zAB)chk^O8V|Liw?GxztOP5x}FDW4rxKK!38`7FEW4o)u4JE!Uc=t)}s($0TrI4`~k z49NGkx253vKNhn2Q{_lXoiNi#fhYUeP%oMsDORUey+UmTQbslP2B zbUOnl=VuQ#AD>Sj+^H9jW}c6im!1Pi=Mv7=&_faw>~wOql_kC}O)Yn1EjGi>Bag*o zI#K2=shkU=cS2_iq*7A}I1nDZ5{(fwLo|Dnia7q)A{ye3rKAX3(2DiLuWEbnc!4-0 zo5LqsSVaGBJh}=Z?iR>pXoWCf*~Sh*mZ@#lN7-3nj4F?!<;s8fnfeqyECsB2e!Ase zxm#}H{v6@<5YC4%OHvgT?_iZ9R}r}@R${T&NGnT%@oZ?rO*E~}!a-hGrb`YU!?zB0 z8>O$SL8s<%{P{)itaGY2AkqB$j=eAGr;j_QXZhxf^V7e~8=|@2Jw7}!I+uo?Fa1_Y z-_vNsWa>HbM-~Q1rMbt9jN&0txZ=$lidyorK{_W(Y}w0Ke|OdNIFrZ+ja;(WuvF6- zq&{x=`gFrzz)s4~2j4`#pXL?$*wz+v+-ySin2LCQlL&uciEyXFn6xT+a1I4H1iepR ziLx;_&Ug%(>zt?QJN9WhbPSLk-sEy;Jd;YSL_Dq7Zkz9zE^r`G3Jz%;$Kb*=6kz)^ z$}tp8a|z#rl^1EAIp!H)87EyC4?U|UZ^67IzqF`Zt*A3X7j#m(iqzf5R#3(2)$6Ey z4KKEuEKAeb3iT@PO3fv=jLzODivn~v=6Q| zMeD<>avBddz${@dq4zW(OW1aK-qR;1kdTn zFZ7q26C6wLr_4f_v1z6J%B(g<<1Zeki}Q9J_mDeC&+|RwSiSHL_Nmv7=s=SNe~Pv# z&~M@4IT2>*{X9_bncO?dkms%8=RX_vRNXcg(lORUs`Vl?I2Q3r6 z^4NEIq|%%?EC$;sC;*qMMq-|&I@)&wD;Y$1K<4vM7RRN-zM0R*+BWATuaHaX@Tpkm z3jS8ukqUlfR?O=22S2LVM@I|Vu38!7c^-z^q?&RM2(->_KU%9zQ7%rD8Coa%*I5h9 z3^TbXotzIiO8g?q55rc3p%<2+T;+J3W9)Mb)Z$Y1r^`pnGgJt1mAz+X^cBC9MqzQK z-ST*Y;jAp5FR?=LTP5aFexuZw&u?j8Uw*A!S^2g0Y7+NtKQZ>c^w8IM)?`7(^mQFkxY1d@tW8@QSDUE07wm2g3pC^gj)L?ZZ?5n)oSp-%;C1_`o$maTw3L6RQrk zYb@0u;IVrTgKy}>lihGtMLYYFc@V}YO)LCm7&}3wgPT?5*n(>?JRZi^w7k)>2%|+6 zPYMA}kSIYhrTAVwTZL%GlJ3-twnlBa!hg&;`U#V-;s4D^hf@yWk0@%cu+6&OAA-myE^V}A5;mtC0={fr#FlMadnK4lC zPK;Gr12y^9LE6ld?FqGHnkk3{`f6up5ktg!}S02-m#$ z>Bpj{?Hu41{fNnCF}eRx$a7VQ&ZlK!1=E*qlICIhqgz@@tgyIIaN$vV-t>+{nnBBk zE4F9T`k8x6SZ(x!wzWgIYs-LW%c;M-RcVp>MME&QNl9`)lN2~dE>&Bm=qvM)Lb(UT zhceDtAO(Yu=Jmhobz*GC6{`gWR0B|1gI0D&+l3>y<++5`MgdqDF_C?Fnz!=Mg?Slb zr&O|8oGqSsbLK-vrL0*<$3s*?cU=@;$R&Ehv{t$^fQf%V-G?{hy zGB9WXBM}k42kr_YbZ)~L4&*)2r(tZ79;+a{_`@L#=cDRK;ZI3M(k0nS-N1>CYW;mM zp4qtUZqA3({P7aU(qo#4a`oqoM7z!_7H2x2n~mHFI?oAnt=hL(HI+H&s+&3Y*W&b> ztR9w^P~{Y7%gbIey$xoj)T`X#x7NM-%gpIX_scQB z7h6suFJ^5nk;uzXS&^P}W%G~uKgX~|470KKGWkF2yL)g?t^Y=g=l^`G|Nbn0Ke62W zvn|$z>#Z+?E@&nqE$E8>D1l?&_gU|_b98pRO;WQs&(|OKVt1kw9icovLY>;sq_?S~ zKawEJKn`ry>&>ltW2?Ett{&KDX!+GovVRW)KlIrD4Un+EcLslFo_`g<{=9W`G1$U% zpZ~j-FDt(~TfPBsfwk6{#i(`<_8Y`lw;OPWa~43ue@x=c`2reg`T?3bTctL(0SGfr zm+_HJnHdmXrNYD1eiYwD9D5Ub7f%i(k^UG6+%XY|=$IFU zmt&4VPrUo%p|D+~!Ou?Q-34BDqmihZXzl*?`4cQ#&>zD9Q}=S}!}Ay)9ZBLJ2YI_? z^fxd1B<=l(2;4Ifh}oeh#%9ssjuU8_h4!DBFO1^2UW1h9@H8=X#S?kXj;U9KsC^Sr zlfcth>OZ8JKf<&qY$Lw)lO*u`)TpTuP{sf>t#1A}bah~&>-uqYU4M`0$}7Hm#peYc*k z|FHxAyy^eX@b?o#qtXTFKdJ->7R^!@1UmJZd}2jGKFdSV$U!lHK3w#&;w4%Yi$-9m9iZSh)&_)T2?!Xk(a-#` zHGXW|N~v{i2H(n4(OslBMyi%#q=f-}C*=;m^ejL3EJV1hc!WE-5%wG}n4~x~UJyuB zSBw)-q{ngOp;pMBAq_GrRt0cgQJjIdJ z8aUfE+6V0x22B~xc8CT0>=Cav(oJ@EgCg4lW6309_?UG%TgV86XRo>1vv4bCWt{CC zch@>E#0mNEQU}}`WlalZ7<#KwhF2!a-Y`b-85YJ48=>~bO3Z3{n!AJ!@c8e5J z&ZNIbDyhK9UJ)n1Ch=sfkwTY07*^0t%|MGjoyK-y4#Py|lxPbRC%O;8hG2Z0KT5!R~AxCF0-C11WPqZ(9x4q}X+an%>5^>F(#E!`L zK=LD5UGSH(yJ*Ktcn}bHtZ`<~#q29KEboz3T)k(X9Y-~OKAZ~IfT{ZHrC*%C4LPhkdn#Q(EVZ|37a zcUtwg_|Iqg!vM<9$H+_$FS>sLTlqR*pHD9if2)ZE4&5ZaPMi_d490r8X*|sCoW%cy zJ;W1A?2!0)+$?ivXXu3eiGnqOlpe5wYVy{2@=oA#BbDp)*YivEE8pB%cXAa5E;|ie zKT3V{fE;5B9$=&3ijRN6@At*`YgoDldL{$c$WN%qId;OoWZ0D9}3|Hc03^m?81!C$|SQ#zX1{acZY1cYu%dIG~t zoG5$1A)Ouf4!^?lozExy2svS2P6p@4{XPP6(PJIf?eqpGhnJ_F9_wEAx)=T9ZSu2y z1XC(7*7Gwbz`Q{O@n0}IDU2%=xn~mJ^(n(2CZQQW%rXU-QC1YR;C>zgfxlp+LKI{E zl_cONnuX~xCg&qNiQMgvtbM=+m^ucht{Xa-TCP9A_gk&{NA?-ETYUDc!|KgOqp{U! z)%VzCza!u|iBtxqk4PO3Pr?Vo_U~PP%-eea-&ZH&U!+RCDIvm68wzJ(*k?YSq1(!-krzM*00@Db_hMZ+vF+vVuSYR>(HTD z>jwG@A~&3Pvcz?E7k|XsOO#tgpqiq$L~R@*-vBEPBn}LEe6!8W??Qi|0*)|5T92GE z+zYI+5wJiON8y7&MqG&xq5}|Wu2H0q0mM5WmI?WL(>j91J#;8Tqjbbns|!OLDV4j& zj{iJ7rb-gf-4LlnK9k0i#1&trP)N3c&F?znaS|ixo~SRp+uY_E{pqoI2wj;#lkJhG zxy&%fipp7(;=+SP4G*|fp9*tR0@%>gByu@I@nl|btCBCPoyTTpWiG0AwLLQ zhc?M#p3V@Hd*Ry3Qk7^{FblaXXohsPFS^8RXGFW0mZjf$MX&w{f_M$r2jCej|1k;6k9tv{Vq@ zWy&Xl_(q!YL<`0D+cv%8zKFC#DaxpGwf zg*n&(hT~mWF5FfQNhQpsk1}{#TB0qE(fKbNEbgz30PH zE_{oPTKR0_u)jDiFHlrYY6ekBs)O;LADyUlh4TINp)%#ojqs_wiyhB{^{jmS#HC-B z5RKp~tAV))XiG!WG@v<(UFFLStAl2zn68GzD4esGUm?n~+hT0#4CH3=FjnF|y_$I_ zq`4~xM5lr0tV^YUmj|ge1nIG;UZ^A zbZhl672@*@OyFDa?d3;p?qO$6lHP#aU5e$Ay7?4XerbEu+u!s4ZGWoQ-8k`I&i`+B zr_pZb{r~E_Z~lMJ^7j)n=AUhWxFs@9_mz8|EXkDeMCDj4JT>$-UTztkQBYanHk|Zl zdJ+5MNdlVI)d9aEwXHw7;zz?|wutYycZJtN){9@*S8@VZq0Tcz*JTVKKS~J_uCZFG24P zHK1-h8o>bI30}M%urz^p<2zw8q4$ss#z{Q(lkfq)hZacceLTr#0VEQ3Ej?t+7LD|U zh7CPDnP+;nhn}|KfN!uQ2>(pS%T_~(C4vO`+IP~)mFU5R{b+#-1LO@I6fI+0H03T23B!+7{!xRq>0}|dWJ87xBfTEgL{?|$L4!8Wky&PP~dSUKq< z6It#yDWJ)V*x#A@suMlHYI^s}v$66YX$S_-%{o~%&*~QVx zmy^Q|9kb2IeqIho4HHK``uM?yI2ylzli+uNOlgVYXHfnd#g!qcX(H+2jp7zZW)m~$ z2!G5hnYh^maB}f9{QNt>l{)mf#slvN=%p{P=JzI{Kh2+afxMru5lOfo)Rl*?(4(Z9 z^@H-UQo*;WOSdaL8I_KdU$b{IuxJcT*LaZgB}P}g_W)dm79PN_xJypR)+ga9d$OQm zgvdtYy77|PcA5G)Z{?>(6B}F{U9c^z=$bFztdB!dN>o|R*;g7q-uJ@ZYxl9)}7_Ehs;U-86O4E_h5cieR5n*03{^g$<2HT#Vf&wl0b zf0+5_he$m8^lUR<1hI|+VO^)SBf~zu|73iiy|XGxm=~NMhBA&$222RH3v3F~LIBdU zDIGr%IZ#sdRWgRk;r~$6uk@Y;2%w=cXsb^)2?!;9@tI$>3NU5dvkm+8PC0qYp;|lN zOiPC`r4&Ea?%!VVRF@3=yI$VYz3J@DPkA3xYzUp73uijxfY3CSyr|);5{_MUGMI_8 z;Mo&&h&k(b*2cb931(YUGDxwsW8G)zj0fz^*9EKc#ZD(<^epndL+)xY8@OZ%e?{Q# z<)J<13zFwG`YzzzY1Yxdq@~DM!TQB_aQ|KDDad-`!Y1_sLQq}%ojV!L$jSCz8OS?% zko%qf>6wo~oIyI8m8Z4W5^C7fHsbeubu`p%IcB$5sG!TkXLVVTV2frm+M3ehU!j#V?j)R@67>EHvk9vydk$A_K4 z@loNc&kBD_!ygd>Mx!_?g?~0k%-5d@kUAz(YV9<)8wbsLy;TRUh5v0g+X^-hC)bNd zP7t|LW#9l#Dcw*;>dPeZT!1o4hWxxNLJD_A2-c6p9h!R@{!HYwIEg%rg^!cxK*Y|@ zdLUxwK(D2|p8_teD7TuHb!M1paYWCL2C2Y$(SL~W(#Oa^Iauji1>dUgic;i~@gtBC zT@V<_qyE=P;LYe_-fXM~Bu%|H4jnfQMnUKg{LmlY05d)pMeE?!z1S)r2W(c#7JWYN zyh%x)I|3r7vq*fpB=GE`l_vt26)BQFq3boqwp1)`;t8y5UCr?a+CXec%#%qpnb)ZSY`BYY;L7 z0h6y@Bwftt8VSj5A#lq3c~;O44A2ta=iuQFM`m5DIKg?Aowx*Lxv@R(UZ&DqzSPaf(oPav+wO{ zMuIF{D{F1l`LQ-pfE)t62QvZTkF{btdQi_RI7n_|=Do65jd^1Z6X==<1}!NWA%43~ zn%>2UUFIuxGe67OfoDj%ydK~+rI>mT<0><6{1A5T9XZMO?RuT3sH-@oxeDf<>Su zaQtehSsLoiT$_)fD_0qipL!Wk=KVicgU9v5oib(@vy#q=*(+IiPS~}O#8FnxoX8;Ywt!Z(fN&p^t7tt4 zWWSU^9;vmQ7c?KGjtZ*Kn607?n<}iCub_@83$>Curcg!6DmM=eXQJ#X!kS7@8aGo2 z%Oa?C7696%WQGn%kBR{kMP?-%y9w!|R5oSpSCd9K>r@mNf-$)k3$ZBRQQ|3EO%)Ol zCxn7LS!9#5GFQ!xb0V0z;Maq|R3b?j6R$}+<2Hvh7EU9vA7SfzQ69D?`=K-#;lI){ z2rFzRQxQD(!lTa@LTDUd3ALRj2Z@>ap(aQeH<@YG zUY4GMU)U#J25ZV%yHdwr$(CZDV4q zlfL=C?{4kAcXzA$!>R6%-Bo?gbDrmS7(w|5C?=}#@d=FKhthxcnIC#8D|1ZoswcIo zL9m2BK`@D&puP7WF~L|m%JZ#Qbvo4*wDk1vJe6#{t>Rcm4U9P>^U7eD5l|(e)e8+=qy}!PTi>{)0)ZDsnsq3g<+?X; z(&*A<1O(+G%|!tyK4wW1f~&s)r;`$E*jlB9cNp|KpAbDeSUaJg-TJp~hBiH=QDxuX zlarR*Z_jzY$UylUK!yhsBMr3jfgnVEfPj2+kNmKyAIO2@)zvzqx^X|aJ!GHCP68nenyw;|K zorANJ=kh`QxqhYVl*qQB`7`$ta+L-p*zQj0$S4HzxbH3K{0 zKXAYx65UWNI+X7YO<%lTE6-!)_3oTtF+~*JBklU2sy{DTZ(0(@+(5{yK-3Sm3|b9M!|MzyN?e{mddW}L6*ke=N4s> z!w8p!N7L>~o)9~s4h#lK@Mpj%0#!C`SJu`Lk-}oG*D>f4iTFilNG0_d!TtA*-ecFSTaNHJC%HweZEG7PXMs<3 zp20hB$XTnyZztikm71dYnjt}A`G}R6>WequlDkfpqCl*^3WPTNJoOE^|D*z?7U!cz8PL9A#wKzT5n=XYDbZ>VS;ZTV|Lh^6Z@lp@e=1g zPU;;zd)jGmJ8&UKlP&9Ndl|WMgWp&93Pr)M$KMR}0`0tj(9oY<-$1?_M(K0RGGszn zNFrKO!t1#+%Ij+o&Yp$w=Tm|o3xOuX>hn(?ZdzbjYOpGMItR8!PiNLaFV!31avD9o zf@2QMaq8|D-b5h^b*+303xNSw?T=zTR0{HmUgA1TS#m zv&0AJgywE^cpo)?sC@oy^6MPq7oBOm#H|$Sv7O7Xx~Gg6`}X zovHxvyJ;Bc@G`@iDFd-_P(dA_5?B- zSRY(besdSp3>n}MUDz_7j3UedOnw}`p0Tf5wg{h`l|19U98FoD8r_rOWYps)^=*`@ z?(&FirvF+Xf^#I2McGsv&L+wJ_yXF1DQ5bq2=}BL@@Q<|4J?0M<`cDcLnGkqTUK|5U5UB;G752M6q zrg|VT9Dyz@KIsdJkQ^^Cri?Hc^D?^0Im;Af(?k}+QyjwtWm?_);5J`q7 zl^_0Xa#zZ}Qdu>nR@4gVA=9g9R|PsyNHwaQHxUDg7=m+j!TaQ}Oq&S0XyhG5vGC?@ zHBb>*2kSLYcBNw2A3b_T}>>Xw{^O-2X+78kvN+>Pbjx|f;* zQ%k;M9m`3%VO}-9D_+Y@GeprdpQr_lf_DqG6CW7(Ee>}PhDIbS#DN5|8s@zVY@V#d z12lQX>lKg5pF%#xbJe;4z1J{Si-lV|WSwWqJrAVzwN>j?H$Kk%e2Qwctb85SXu{CR zYP3*1G2X_7z7?|it@@vh)wSRBfI+oh?auvUaDt0$VLFtLUca^#F#s;J9_5w*0aUs#OsKlf5??DeyVCe1Ay*)5NE^o^6#VOf={bVjyHe(ow_9 z@Ph1{X{D6n-hDTB6*O1+0zv<}SG%t*M81^nT)if~Jvom_Ty_PbVyn_pvHtkDQUe?C z=>iWC+!y}dj+rey@PZ*T>QexoI#YX6Pej>0N}Sl8&1=-7!U*Bww_r(J zoP{dxVc)5U_}Cbd|K!sF9y0mNRS%X(wxaoBO_V4jL>MHP$+_n-P}idcI&!oMrp{2h zq~-dvu&XACm)?3MVeC>frbUCjQ3<=U_oXbs)XYh}`*+~k_^l2lNf*tUXOA?WhmKg# zWgye5`Qvh7NpPMt?O%oRF6WbUD3bsMv-lx7WRhix zPZjIY#WK?5<=+Nh0^#&KG=6UpAF_@Rk8ic&B98IefyqQ%NX$Zt>GU^8NwYdai_13z z@o4)brh;A{-?(o)}`tPIEO2&$bRdpd!*OZSY01U_{M zGN9h-(ae}!RVQ6LU?yHn>JMpgX1kQ^pK+w?EWT&gHL3+rOykaX6Pb9^dP}mf;_$!E zyA#4+@Hc1Ik14^T&VU^X!o=eDCWL9XmFb_4`+50?Z`Pz-?w5Qv9q{1~D@0Jd>&8gW zTQ!dNx;s~kzlZ2y%h9>4>Xp^CT0>#-Z?qlwbXgIJb`#0zb_q1;(QjbJe$S_W%m^c? zQ*Lr5PTmHGk1nXs8p$nO>1kv83uRJqbWiU6G62JrS++8b^lgwy@%kNIdeND(49M#T zlVOuq9O-O3;zoEEaUlT(mWftg zUrX-bvm9fmHi;RzjdrE$SN|+KbSi4hmJO{~Ef+xwn0%u;qX}hSaG%A8+xznn^z=^QilE5#)TD_;KWi+__fqJr8n*f;vQa1G9b0mj>X|00oM zk^c*h+@!rku?4MthRH+UN9Zpz$Rb^x3_(aY+9RE>o~0QH_WM<0UCzi_`G<(VV7^%f#CU~qI zMBw)#hMDb(dWWVv#q@*_EiyBn5Y9E+_3olv_E>E9np5hkZKG!p-iBWOkFuW z(+pb9DNc2P3Mo`s0drs{5A#*nt0&_H;;Kj@WeYB-W?yE11-lL3>zrf9#px^a#n7#t z8zxVYW|xQjI12_$M1v&U#6n=a+#vN}(R)<|45saxWT0=2LlFJ!rey5`=dLuj+a$tL`lfNpmXB23Rd7Du;%V{Zx_>t&83Tj%FEx9V>CbR0%SgxDKMxuM(I1k(?DY$SV3TttJ_vZ@^=M>u>h{2FgN8@` zi5oQoUBYn~TvROF!~;AXcc7tV>-18^&Q&rs`yD$-mn?XzO|oK@_V+A1igUL;BQZrY3CRF$Z{b_w;`O|p zBMqxNxLIBN1u|1dIgPoIr03Hu3)cpT^p32ULt1yOAo8Q!I8?>gdlj;|JnlPF2`j-X zUS@nsc4z{~$0OQ>n&S~{nbM{Sgb zQ-6$TBxA_f-E-+31DXI(_v|S{ncP2B- zB{}y77bG=3%BoB~bM7nh$tl5%}Wp}TM|4jhdXZu zJjJj{p||E2!bn7GXD+T87e~s$_MQ4^=5G9I)E?PPbD|bA#|IPzQR#z6WH}8qEDv@? z=fCLXF>tv`tKoQyXc}qZqMAZaXyCgd+Go`d6;(noi$cw6;H;A_Rg?1YZer2A;XuxE zC0_WPZYi*TJ?r&z$ifcgovY)w-zq4F7OxkgoVFbj{& zc-iaIu9I`Sr-H<@biqBahln6$w?8eS9kkS4z$k+bqllBOu{u!Wrh<|p5m$!-!9J|wv&uB#hH5Mlog{nM@l5YD`qYAOI=xXjslx)Trf1|!3ty38 zDCAw;>TGGICiPHt95@+=w%HPqDIady>{sVGXmQj~VqW*A!X)0QJwbxPJMxcfNNC7* z#!*zpzbqh)#3iY@Z(+4_#N>}OG>R;gMGweCyr7cN{2k)g^KfN0o$2|gj&IwX5#KFuDJdUqwXe( z+aP2G^#1|6&gE`CR4a6mZ*i&jFJWMi_Bj1NX6`|^N#sKZ*rl+WX=|ArSRRkOX$6XZCvF39FzTe?0?uV64Gjd=!SWGl`qpMT+;h-yQ&m= zsuNpZr&^f>gK~5Ha4Axm$gnkDB6Mm<`7M8&ny2WHfo!j&@MI@=PJ?oAI2IG}kxDXf z-ywFABAqkBa%QfRfU!j2)j}D@3Zp?d5O%}VTBx>tkbSg=^|?5|en;oA?)@8a(G~c@ z8cPx=^o);1r&q!(jFLtKW~2MtG$fE%vl)t!L=1)lN3}p?KE00*_UmErkin72L*fK_ z`}9P!qX~#x`i}^}XsU0)(=aXI4@D@Ul_(gq8XRSSF!F-dF1lAu*a2~38PZKz4@Mv)ry8*e+7lOXJ zWfMWU{&sammHh1w` zLA~DQN_yF#mYPk`hhV!cWiL5BdsN2k$+n+q_j!1YJq=@t<5|h&v4DA(`s+ue8DK)* z#-=3!Ptxmi-92z?%Z1DqXx+SS`?PcN@*ci+j|5lqd=$I)DCTk+l&cEnkw}Kj4zo$P zpGWh%xI`?==CT9>h%uv?vyT@1Q9P_KXYMe(W!|*m<+!@G5sF7zmW>7~`9){m#Gzw@ zTi?yu*3Ea{$2p!Rk+E33^rd zxf$*>>}M6=;=GiwBl5v@yG8`bT6tjxfpPniU9Kr?(H(rx^qmR53}SXnNCAG~c`b?Y zo9*rG9p*V7EAHFx#y`3aIZpf`dJ>UTWguk4%Lu9{e)S&mPgV7s@9{*k-#nV|O=D?K zho7I_J`2C)%mTGMqmJRufK^Yr4ZFZ4tE+#s@LQ?}Ushl)ut3)KP1qIfD)Y}Rum7L# z_NsDQ^u^aL&3Nw{&MY&acMHkG27i? z*R+GE0O+nc0n*l^D~<7^bN9K|ov7n&U?r~sJEEj~ zrVJ|8CT!NJ$G-Nv7M)19Y&=iexeMjC!5x89dV2@VCf+d>jEA-eVH>!NBHP`PdHEN^_=J$O4@Vb2hf#_Rnp6&<4{nohjAbUkdlPK1ye+=d$2hrfeY=g`XOI?_ z@~8JUjjX|o$PnolnONX>Xz5(d%0;_f=0B)#Fcu>uIPzn$KS;2LU4?pMO(9KZ#!Q7? zg(S-v+n)Q8+J|-F-Wp*fO~b|=^1Pg#em?Xu)ny;QIAa=jJ6J$Zygv17v!hC_R|Yrf zg+A}HTdusojg<942xGZwvv*Wd%8ZvsHu-44?DYpaC1pZ}eB}wOeUHs3UHL@>{vR$` zn9mtUuBnis0{X~V495G%LHD`!LUoRm<8o=r0}h$M3V9Lf@7t4lQVg+9J1Ix zEc+ie0p}U9hE}I@*FG`Um`8T(gi&bOO;zyEp>J%&*SKg9+mZ8F>`Qs_cIYH!V%+JC*;jh?!rzP;SD*@DP8!jB&-7iJuNn+rutvp93(6Pq3VX7dG|?+3*6^(GNE~7sn83>^>vx(n9g=Xf zWb#3u14zV=oE`n_{c{}&K4->wtkkQ2KZfnjH;`+I?hPt;QA0Yrag;*)D1^*t5K@bI zX8eFZp}%EXU>+)Hr4rwH%;vwXeTLBNU(OGE?OHdg*UOA5ADaE@g=!{M6Q@SYm%M6H zi*AE49;eUilt04U@_+^#c1Bf+07+fa`^G4zLh`~-&w3pwOF_C?cF%63Y%i^r=<#B_ z+a|02o3}Pv#6lK%!B#z6^iJd*o^UBNq4Jj#KgP)WTUlqCsD-Fvp3IKCOIZ<0~N?aN)v~ssEnm7Lmduz&iE313R{lRcHM7qsyoc z9|pxZhHIqAhHU!t_jEp#eVO4MT4|`B4>p2@;sSH`8vsM=V?yZ9)nJe%*(9FdI3$Ns zlWyOap5#K=-k`=YirLU-7fR_s$K_L7&c#qtB1~E3&9-a>^tBGh-(TvI+%o45Y}M(8 z9OeQhFFUn_bmgDy;fibFCQf|C2c)~G4FVv+k&8tz&J<2gS*|}tV9-QI)CY$iu31e*`0?W9R%_m7W{Aa%s>Z{)0%0W2`KL>9842 z#Kj|5dsnwPJ(Rf@>5K@)isN89&F*Qdt;+QWPcFD!J@ri8Qk%wCzG3Q4kHwZ}pU9L) z^TC#;P2*{wwwrYLAjt;t_~c^2^;~ugrp}(*hC>*6H|FYoqot@pWlM!2{ClGQ%5lcswOG_fk8U8@&s3j6&T@WX0x>lY3Zi5XSR&V$E^D7 zhFk})5ah*^=@9x%znS%I1_V4T$^F_bgDv3t>-N73gUft4R*yV@z4XQuzz(Qp*Szdn z)&e{`0+KMAxXxVso?_^PL2a5rXb@)(V_a=A>M3KUVV{|md`-sapIe@ruWzH^Jd!4yIK8-L1o)qD&TFX{^D3iz071}4xQJF1Hpx}vx}9X>Xe zXR97a<^AQKfZ${GKTJ4`7a!iIyM~qVL3%gU41Pw~meIjEV|fQJgy7a`jpo~p<1zk2 z6XoLBxzP%4LvUB0;}eTF&Xke_IwV+Org~N#Y`7i?r3tEbMxR~RvQ9}vEJ~7%)-%y@U8s@xJ7zyL zD9-J|f; zJ&)}?SUbN=H7UO*uVGZ`Hs=jVk(t-lEZv}~L>YvkWkV+EdZPZIIXm*Ns)|9Vl?2W- z6z90%;5V7NWb33i?cDR(lSLSWES9J(KeU==GB~v4??MgFfA4-Mo_JEiAo#48*ZoG0 zAUKv@q=DDAP*BpF{XL-rYeBa1`zoMKt5Ne$RwAnw9NyL5;umr?^9^GuspcMtg!D@} zlsCnwNHr)=bl^F+V!d2r636PnlzShAvV?7OMs08Vr|APEkbyu>L(`*!w!8}K!0%<7 zCElbGh)-W0F1KNiR)LQ8P##E@Vi?WlH1kbZWY~Nu)@QZd;8+=y)#HBN7M`1qK!eX2 z1;8~id{#<5uvshW*XPXBw$?|?SG@zsIR>ow81C0QE)^AQ$Z~Y&C1zeQ+y;|@j|xVQ zuM9Ldf8O)Q@d7uo6lC6kId6}8z#-hJuQE5Law^&@WhH9!^a(0sl>~#$MCgPt<$2bA zK_5Lxy?B&pXE4rpx~p@K(436q7IgR9#=QCz+vj`F7xV|}RkXGAtN?O=X$HjHmof0< ze2nqloTwTk>Ax-8?Grf#6BS7pH(Z5P<10d+CB@S;rp6oDZmBnc(yU^q`R*6O$+9KuH_wPLMQsW*AXp?U|i1-gD6 zdwm$^W726cAOc`!bMju1~FsoWyhQ^cMeSZB=&C?aSVKZ0s{!#GSh)-0habs*K* zA&=;JB6)#nr9LH}xZyr6@UEe^;LI=HP%J%aklQAx)s)o7D)nt2Jd&Sb3AVO)gYc=` z)B~F_($X?Ni>;{(Goh~7X=MBOHvbfhvs$lT7+>yLV(y*4+u*y8kWoP+AS?ZTbU!!c z6y%XWs0G?IyXui0HAbX%Nq^L=QS&q~a!Y*RnJ=a4FmWEI?;DNuiY2>9eL9TD zGDzy^&&28eifCop$V?}z+1wI=uGH}rJWL|~+spQFYeb%sBL<^YQ&YL=70V$?Lup3D z1yM#5e4bafT+%oM;xd3rM1bK%=;#koH+QUMfxUfu`J(zUv>GHBMd(vqP)T6(wF6>FDfz?irqEhg;2+8Bu}WG zL|bM3ai`%+x{Vn$VyBKh(0Pfv{nMIyoAUx2b$5Lf1hZk%otI{7euU@ZBqZz8WShF4 z=`B_P=fZs^_DY$UG`^nhLKX(uE@<o&08$VH5*pf^}L)%+4jVCzs*(Zjr_yoQ=UeB{_`{ z_Rk$&QB9FZZSobd6I=p2nD(olwrheOMyb;|7VD!X5{nXai4=uc6_Rn7xiOLqB)u9e z!AsJBb_u^L+#)NnO`|_`IoBbTE>d-vZGEdS*2#tGYnhQ}9eqo>C~6}p;kVe6HH=A? zfUDPMpQG^~d-4ujNp|H#0Wt*WaZ9cTb=OG_-+e3vDVt2iJqE|~zecNwy+^f_TW7q( zwZprg-Rs7!$ofcEdL2?L1q3eZ)vV@g#QXxs9<{Qx|95psMvG1ATr#oFr8FY_U&ohiWz?U6`1wL#UR znueBH92gvlcN~CMyQaS&$(G;L**B7e*sF#A)sE(#Q%YACDrh#_OLph8-y{SuF5DOO z;q5W*QT*E)>rAC)uv@>-`&h6zA~4C=tRD}VhQABv(?j2l^x++zXJ*LSL#_8ve4*vk zh%l4nmoIi9QSpILOHZf{EtJg4Nma(58ML|m`9Ft^Ml7n@PqyCMN({K6smY)rX&fjs zAD%9o+yK}aSH7ItJb2>MiLqTk>yDTJok!(Bd)D^umu=X%V6-)%`<(CFDU+Nk#!&QI zz|-d#+<=Do6V6l&d}xQG1zYO+t?wtL@=)R#QiHT0w%7_d!Bm#$E!;Xq&hC4a>Id07ehChr=d!`0X~Ep;@5aBL z7xg8AKv4 z#T94NLVw09j-F&)$={7*mr4(~A zRzZ?i-gpi3&u{{e$rC^p2pm%*8WVa_+3U|9WLTT}V7&!eV-U_n#tj_6T3WI6J(MWC z5XYjZ?FXI`hf}R!fAgnEyZWNL0OQF6XUb598m8-SMf?~Qe@K7aSPdBci|#^EVV(Rt zZY0acI-Pj&8fT+s=wK$KVvN5Ef9vmbK z&u8^N1jfXy&~m+bifld9OGCgUKLnF=ZRGz_H^z^NXT@}4%my)*D`$?AS_vTY_=zWU z1o{Q`MDh@D3)lota6>9Z+T(XBT{Wh?Gfcf$K#_NuR!4rDt1M-asFga?Yp$w)AWp7m`Rm#n*GTgWyUfxrljcf7WCTFCZDoFc4~ zndm9P5UwR`P^eNmBXkCrE_OCP{&iXYiBV~1e7mgIRjbK zqm@`KuMGXr8Ts+8JE4{RJnAoj<;4e!_Vr$~^*9H(H|UZ)m9!d%&5!WC-@{!MBi?@t zads30vV_RyJ&e3&h+|h8kgl#zLZ>Kr&89eW>X4p@NK+mFT4r5XNB(sv4s(sJt;=9G z_!n!6^Xs-dQy#M9-nhfh;0M!V1&4mXVCBsw)77cBXkNRc{pM-o`hrSmg@}s-$b`;J zeDAz?C`;ZuQ?eFvq#nEGuI&z^hB)Rxr|QhnKL&0**K;&jCrYssy3yLy!@@RGnZ#Tgw}z}b zx8vV9h!{vW$djuz93?;yz#y}EZd9UtODtqSW;R`}cAT}b4zhJIsDEappTZxp_SY5c z=F%3{*lS!gT4ST@Xh>A(L6CQ}tr6FIIP0_EMHM5A`h`4s5StUJPjqB}xBO7i%yxO` zaJGENl5n}FaH>|TaOGu$ezNgSqfDjU@YR`gUfI5Gmu1qdbNd)PXkq7ksFvyQxeLAa z^^_@WG!nGu!^s+6v41F671{vxAEu|+RTGx&aCgvb0)Ib5zBp|A4CL(@W+|^?+4y{Y z(=^AOnf*bHvZBW^fAgh3{pTxg@triX8}-lUGtnzu+}d?@*Ju8t*Q<$E5lmCv#VT{Z zk9uzaSC(36-0ttFqP5+>IGdN~5{zQvcb}5xl&9~?WzqF8QpVlC<$7uZovP$8Hrj+9 znx6BlPh z(KEavmkQw`dgWPPANsXg+l7pdc#&9?HyM%EUyh(89bbl2xEQ?Z zwA55PSZc{+S^C48Twdm{pLo;hJ?4i5PnWz}aeOsy(y*2}4&viTBVF-&qhW!VK0e#& zRpM1{dLwytdSdX7j~oMO6a~cxuC;a|iawr_^#>lF-3Qm4# z=Ycz#-6}(#B5)l#o=Nd}sAj$7ut6gurvoxlv{;u^%UDQ6byX2Kr~lhg9dRzbJf-e)6NpH6bI+ubLKe3mq@2 z<>%L;keAoA)|S*Dtw36FsYuUrRy5KmZ+NR{?47UW`x7#q*vtkjXIva(2 zgi*NQrXv4fSKUfHGUdAS+>1&DN`mjiD@hQ0r8^fxLs_^8DDc+F!Oz@G}_#O9{6FU1-IZkjJ^ zA!Gr|G)4e&k6UkApCwk}&d|w5`+RCy zsMy{XVt)TRdJWt_kSFuCJzd|A=Tza@R*spOb%i6cncjRx^yNu$@Yr5-;OrFvTP75f z@pD^E=|$>7?=E)U2!l;BvUZ$4%CxNH$*k+Z<2N+G_Z`S&$_KTL|DDB`QM_<{5KLE{ zP1Fn3aW#3{bzoY5F)6JYCIf;a;X4sH4A(vbqNOyTirqi%caVh`fMR#85DQ`s8V#TB z21+UIGGO^O$?VZ^(e;5~bdf4`rn&e#Z7Ot!rC^oeK|d@!MUp{u`kx_L^}!1EK_O{` z5TqNfjWzP{V?R?Wq^Q@XB(T9B?$m~kXQ&)lI%H*AgRV>&i$^d;%&0h~H&-0DEs2My zulLfLT7eePdqO5MHEGhFsI-d)O6W1O&9%YvNz5?Jl|v|-N;>Gpcg*$RMbxdrex
$g12Z|qgBD~d_asHX-x!XWV?%yb2y({_lTeQ9}CVehV?no z>;r=Vi2b-B$MIQU^c(%A3DV#eu?l}hVjkUb+{Omoa16Pk2h}OX=mn5!7 z1^yP?hMVqY>B-bUp(7VRy;5F8)+Zzj_DMU!7`@mQi zC?C!p*ih4zokm0h9vQD}YBM4*mg zgxEEsRP&PaaTy8gG`J`Zuv)GzfpRvvcIovfltrD+{o&V1nNX^N%M67ej>-;f)Skw> z$B?n6f&nwis0Sj~r>#({bc$zW7)B?&KgD<-n>pW6sHciS9Dy$^VU*8+Mq^scOmJOo zrdwo`g9#8FdlyM;bMI6*5Kk58R&gHho8>l$ZlOPO!r$V59q*XY4x0t0kOz)1)Tip& zGP+f*1G!*cK>Z=!f1KiZiO5sxs)yGC(>kWCR9C$l2XHt30Q#YHSOl zO<*tIVG(Y<1?N2UW)-$mCT_1QgtwoM;`iQ}=KgxO)!ud+TC(?8Ttw_Ayy`@Osf|}d z{2IsfUHhqtnK5a)<7F<6fEj}huZ?jlJbXn35m3q?r}5tNO3+4Ka~TsJvm5=*SWl8; z*|b4poLg_5;q~duK`{r;psHWW8NZM8i2dbfbNw5o{Sm5K(S2_U^o{1P1eTr2rHoP_i`6qVdYWGUif#6+z zd>?HH%mZ(51pYJitGb3WCXbham!_6bjbAGmUEAe>K3|1%OW_!po8g6F8n&Fevfgi@ zQ*Tb=R#bkT4Ij;@%nwip<0|D~Tv}uB8`)BEtnDm<8||YBR#5Gw>eh5{lJyKY{S$aK zlfzo*pDgRZEvxI#ne}}UY!bFrDE7}@KKO**n9iHNqvD`x7hamb$u_L03p)2sMPeyWL*=}HP5#r0&R8GCP`dxG9tdq6T35!dT{zu?JaaYEG zjGA(Y7iGVOT4$-Fd3Xv)@`X1Uwlzaa*lQM+lBZCVlX0NVCRlIxp6J!)Fk0n-`ygJn z8B$Kb*HySK_8uHrorapa@O9^v48Fg)U=NhfY0T|g=H?H_)5(ht&dlR0x`0&|Z_!NE z$j724#Ir>a^C&@9B<}2#7YofgC&v2xc&6K9Qt?fT%JaWVrxFe9ro~w?b;SG3c?I1K zJw2hs)7eGpY+SS+=tEB_C?SZRJGv;wH*Z@1IU;tg9)a6~+ce z^4_t#+fG8!*cMXa`)=f>=Lp|v#BBa{gy(RM4ItGXNu1e}&Z~B6kYU&y;y`;y#?JJ* z{z(Y1y>{f#Q{MF!3&!DEfB$8w@(rL%h)Cy|rF5pm2*YzEZa}p=>&{M%5h^|6uW2;B zus?Ro&3x*PckQ(L#|#=4gU(PN)2kORQlnVOYpxj^_a`8e@;SY69&hIR!NYp~8*%90 z?wCB51(YjMj7C$1)L$$l65$4E|!mrQR1X!#(g!QQ6LO=gTd=7{6KcKn2jUokV z@w>}g*dd=iYu7RsliXiJ`7i}+|LQUxTnO2v+3t1gBS%e_oaN_Aab5cdODiikBI`@L zrHNwR=dxg?mDNw`bMHmkg)Slr!6X*J)+*Q_P#TOM3MW4L+g%JNblu9 zbwcUcK66@N>2Iv``pgpAit(nGm-?|PW=oXhnt91;#s2J4sHvm*(OnP>uOmj;!k6SB z{1s>3Yn=4M$&3cVjhU3x;)s;RfLs~ni7NzHcme)JujEL?XPGY}dup}vYFJHn@(Zlp zs=?8D4dp0_+QLT|I`OCmNOm9$r~@uT6K=>xZ*blZjBg(I9lmurPXi5oojkgxUw3Qt zs3AZoKX1Me78;X~G%xgj+vYMB0s;5t#K|Ojeuo|viE&nMS-{K0*UfhEsPtSr*M0a( zKX#sp1{P!Ovh)>);Z^Pb1+l1t?SlzB>}|ZI?+ypDuNbAT*nCn=VEZ}RDNoI>SU`RI z7LgS)y^UQ@ll(#0{=bDTzALoZe^hYydgEfb9m2dJsRfBtync5d0p17q1|Mi z=t}-s=#m=Y6Ce8eN&a6OUB)ZlY$d~b_r_YXkUgZn3<`Lte{$UR%Y1cVaYpLkdZ}50 zZ&*H?`QU?R8iq(*6}>yd9rqmTZ%v-#@`oNR;c#I5|D`LE;24_gAh+!WDeBBdwEf|) zPaOF!rL6SE7>AlZiZvXmaKdnUJH^J_1NRZ7uszOXba>O+kBes@jT8*9|9_fXBUQ@C zx8(0l;))Own0AEXW3p9r<8)zkt^claxjEW6La#!^Delq|q^EO#J1Ic7-cbR0ZA z3V)XerPf6nF@iiI=tMu-C_5dJ&@LghI)HlcC3Nqft*%fB9@Ne9!-?N*9f*G)y`lwV zhm`SJwf7G*#1~1%j{XWhsCF$kuR-kZQ0fI49C`&t3)8J)rD*rk=%_7WATirRfRB8`+fd9RVXwnlT@GYV*hKD>(u!_40f5HjQyKw z9eSb`3`*RH1IN0+y9_*Ba};~YF6VNWVo|2@QVAhABY!?HimEyO4~t!60@1eL-lJAE z{NVOL5<3)Vpsy9tor$~S_NkjEwrw{aBHl^o5&7atVGJ6JLZ?Pf#6->ltggM|JkUp_ ziw1eYvS!+bo5<%!8?r9nt!F71kg~b@#VywHXH#z_V{Y-l&lLJA)m(oBXww7{q_H(Res633G?b6 zNKbIBMK;Sp(vmPwK)DRK4yp0%S#qPAMI;ni^xqu<~Sx%{DgUTRT8 zCTUHo)Y$#Y&c(`(QX|q?n{c`` zY{Hzkj~|qHd+>Nss;HV$O_O}^U|%ktx)yLgeRXSIuwU637k`^ph*do1q1!`$Bzga| zG)9Wz{j=d6p71q-pvw+r%X)7dTBIN7%iqTEd?5fGt2k^`C_P5az0 z;rD+4)j%r0j+~aydTB|@1*X@STw=ZtzM<)-cqk-V3tFpaSEpg(S_IRiX~Qrt!8XZ!@jB177!%F@N~tZ?##&g$t?Q_y>} zT6HlXC+CS>bv$NR;`HpeH$;S&wA8Rh1?T;slF&*!(G_S&Cjmsgo5B5C8bJB~WU8!5;K}@5 zqUH_o56=N5pMO|(!>~B)Ocvt3Dr2eyw%jmm4^QT1x?a!W$t2X*z8j~j8d|j(dY++4 zQ!{3hJqkbq%0Sy+8TKlVKIa*t)bK;25dAuhPnvJi8^??9PMtJ6q}^z}XmHmWovT6Yj}S+Nn})eUXLLjSVKIC91t&V*Ng~N)>uBsVT?(f#csT^J=m&CKiAN7Id(awb(H;7%BMJTdt-Mkhj^eciRXf=9pb|C zEBy~)R6>Oc{P z^(9z?eL^~|;ud!1jH%e(i%;I3HKt=qV)v|CSs??LIl!heSeVtc%Lh2y!LMuh2T3CX z{Ju7RTPn0VNzY}#)JiPAjrM@t(fa0M@Y+)A(f}i*O_KzEt(Hbh@u4%+Srnk>yJCKoP`(-%T6ytnc@QjV)4CQ?lmcx9 zR$L{$J1W_tQ`8 zq}ySioz!O+Cr3a3t;`d3zlN!Ee_|b#ee37Hl|KkF#uod%$&SwZEa3^xm6DWTd5v7X zjf(Gq2vF$8@|nc}J#xrvDq+o?=KglGzFk+UlXu=93{K^;>qO+ef*YgfM=^PFWWmVi z9zuA~po*V}=cug7Y(}gSg|mEY4LvqqbVZB~9bE`+^j~oBXq`LCfHp>HiKdkFn@uB~ zafF6>emv;)#Mk*4MJ9Cg@L}VaT!B8)k$$7zYCkf>$7>F*8Vq$XjQ#^QAXFqJ&~625 z426orC_*7>DWZQ9#Y>Qr(CdIoO8?oP0~@9cgB|#n*Le6+9(KO;k3QP~pd(A?OP?J% znZr457`x_lH1=ET29L5B_JeC18fb`|Y?AnV@Xhx6O@>TO_u)7H!5%y`tnsiF4Rtyl zd)z18+g)@4MB!Wz&Hn!G+5sTXTux)x9{;IL@cVmfH};m&2J`$UilZ(6J|L&k^(7ve zTJl5M``sRY$}M3zrqkTpZ>}8-a@|IqZeGv;H3y|LUX3fr9$43uHkt>G^_vm7qka3c zz=YWGJ@@WqM}FvEoAUx{+S)-PO;@g;WHv0=!d+hVez3oG<5!Gz*B*Umm+=OM9S8vS z_jc9}09CpTwf)?}0QMv+>yY9^27#XM(W#B5tKGflTJP3D*UT{PsA_C|8$i867|I@U z$+vdfYe#6RwrJY_`lXwboBCfFuKX|XaX%Tu7+^Q?5@(og1%5W%N_{uI+47=vtKQzu zFo(sO!KNzc_WCEk+OYMoNleRLd+j*!jyl-wA0Osc545bqz$+{CN~}A*C)D~YzkZ+) z_Y$9%nTtQT_|M8l@ECup#D?5PMt{D@WJoF=9X;jeE9`s9f))UaGTk+B%lYyjN9QlG zxo8778Z&R3rCuIY05uGNDC5J?Lv%Uj$znNZJb;dQ4-p8TE}PKJl*JsLliFui>5^wuYyO0X5 zTNOSkmMWZchlo;PQT_-j)db*pU=F(>ch)LLE~!vU-_3$L%HE@0cQ82RKV)V*aYTr} z)OjIZ?5dyVef9HDTTtLiovy&|zC162-vfE}1;5Y!)IX2Z&#x!vUoP|w>#C!VzOi3v zB!u6;9si|%pI#j50UF;s@0g?dO@ICHxTl{wvfwz<0QR-T6|Q>OR*(P>C(PZr-b{nkU`4Daj4~KCy46bd*T$K5)!}to&;nHmuldmJrgMoMj z<|AXPgvns4q9xC%=7Fox)b1g{*a09oF;)1jTKs_fZF`>|@*JzRTmnYS|2Oi-TjOwY z9Yjy#13KOR6TNuy`TusB^|$)(&+_+{|L-mT-&_8_xBP!^`TySX|Gnk^`;qehHR>y^ z-+smgRE9Bc0T64K1j<4#K8zz-?H_bGbE%m6zBHMD%zGw9>JK$0^ol$XSC0ElL^I$+ zv(z}QnMGk*`v*)%&CAT~n=g)qy#f|^l7mV_e2+-j*GTB&X6j;iQ#A@wdd8@xGZQX$ z>aPM92PQ#`$G$fm7bAM+`^3dTUE`v6bUqys@W8xSXtZAr3o{uin%k|$!MqH@yr##5 zQ3HM{vj+SY0mcUN1yI)>9ZiEIE?|QUqDQd7ADR~#JY4)W;G?1OF%XjVBN&m7&4v?R z=*Q&C{95xSel6DLY$$taQ_EWZZGWn_vHr(F^QG#4z{TE9q5enft^UWe{Jq)#-t2#G z_P;m#-<$pK&Hndh|ND{be`{I)1H{nUPk(!Nb^_Xkmf?YT2x=rpN{qzcdq;g7^%;8N zcrmOHoTLXo19mwcV*)@D1oR8+2w4>^A9U2q98n=L5c5EP8`G5c_O1+MFYOgeGvj>5 zlBE$(3`TtB0z}IgcRoU|1JegP!NkQR3ZBSpu{}ri5%9KDNDJMv-GA=eII}5|KUusnCI4e8aI$f{6CtY`{ne%HvGNmf6wvv zrvJU^e{cHVoBsEv|GnvdZ~EVlr2jPjQ2S-=RP0z@a<(I$_PYw^^)-ZstM*T-!d^;VH?f)sP%dsApeJh6~J z(yPNzn-x$raFT2Pv6Yq4Lvt^z^}x@}{yD`}X4t4Jal<3+Oe1-TvyP6hLvJ0UL59`~P$Nt-Ah4ix=)dl3J+n$Byy- zycItxMx?{{$IfLG{A=Q$9LYd(ey#15<(bKNZi$QD(Qyyt0OQ65#*f1$Hf!<;t0d*z z$&gw)E`IDBh5?NEcWxz8l|poh1i`nj@TW`9=B;4(eR-P0jin(9n9Cp_pBH8Rq$tZ` zNba|Ol*941oD9xxzH~|E78(j^_a)Z~VmHX2<*_k`8Ku49`0q zcH1(l)$&HlmIi~tQw8Xk8A2_(utdJ^rQ$4u`kfW!4I@*0ZkVc;3v@uau{wurocKdO zNu*OuDr}Qsv=iXB2#XP-m;@8UuWh}%#{|4ftHQPTBbIle>?00xB_>& z=)kgLZ|kvlmD`&+blqBsP*HNz=m6Krk|Q&wHdiH=_HR+sW9pC$iqmOi-sd^Ua>G>R z4E;3oJ*yxwS3}9OLipvS3?Bo|!`VFU?;z;mjzH1?1;`;kjt9uC8D1uhIb8CB9Lf-0#J=^qHaG zvnIj|!)PkPD3@$HvO1GjqOSR5yCz=ov$@d!iYW0wI24z8S+eUi*xxa9Y8x z=fi+_>9r%l2%;q;vAvmDq>HpVc1C)46K+U~exHU&o25bkzGX6J?G?|4p>zG(r{%yD z+Q_c{1)wrZk0O*fE9p&qCyNIFk;ZPoX%y89p2!f1mAf2*$`BAh-m_-vtlDsOMR}*A zM-Gw_qQ7%ECgQt&O3RIT#E%4^5ohSAh@qy8^A$}3k4f( z!AfyuA!VQq#$Y>m;HfaXG#ggMDueWhs$iQ{UCvMX%&3jMrj$ABtQy&C0kCsFyUwew z#u)>G9;rtjnR}Y&+#Fd5Ra0u3&}N)Ei4gw3mg$u>iYHNa=8VlbQ|%0Hl$GNlW8_Lo z5l}c?tG;&?q_d2?nIHLYzTPI(sIj_WS+S278+I!{>=Rvyyg2;8y!ku;{W^0^K?hlZwiKsj&mw7O)ttRu$#xWdnR^UyY~u6ZxNZ+RfGr z<$r3lTCLqe{-@@f|If4h{X}^eaQfL+F`ofvr+;p@>j%s@Y)hZQrSj?HgxKKX=mNk5 zh1!Hhq=KmZROWu%#+AGktWA z4bB)_>}Gz}&(yZJNIp+VNKfT-yYTd>s`Z^|lXuv8{;8`N+4O;ZAJf)#g5=Su+mTWS z2J5D|w+k~!51F`PWBFLwOkyAN&Ss;I&y^!@K_F&>;rp2MQ;b!9biGO$*T6#0zm2)k z6T2wRyn(MoJxZa`mD7(M%{%=E^UwY)(98kMk3Vd zf#$z0%|@OdX3isXPtUB|yibQY^J?V6xd z#SWAsbnEx#OgR0hlPgN}mUp;*Z04n{>+AoA`Cq-rdBOU>%|>glnXmubfZK29KhN;@ zmjCrF|La@+*SGwyZ~0%}^1r_2fBm8Izczoo`o9fjU;vqyLJX7wbadV^gHF)-oU&2? zSA+k)r>vAp&juWH6JXx=HWow#&X{h|+`@yxq4@jFD5yM70oZfkC%;R0HDV&nuH)Q- zkcAOl93z}XrfF~;QL=;Cl2Y!yMg;9^1kn})@Z0zaL@R85*_{uYxxw@h%u2?8dnxb zmLv#^ghAgkQW_d5xs>FH6H177biM>q)+deW97t)J>+8ORCy$)_U^E%YRTc|g{2lWg zeF6l8as$GvcR0L0I?wIlbOS~4VQ@VWOvb1B()`iym0)%>a(X^WZ?I5)SKE+AP$F-N zO^Sv7F#9p+#}t8YK+eC3L(fJ|CYy=fT>BZRwj}-}sEXot_1*97O+!q^fJ8K2*l4;D zt%wdR><0=TQV`~sj{LjDQ1v?ZE<<+i#z}TQ8QGYkVvNy*qLNmKtk+3bh8#`DynZRl z=w#P_!<>n^16xJwA>=u>ZX-kDiOebjci-_kY#D+zA*aWqA~DQ8o@6;D&l`!w!&Auv z@_0#K*!Y9-`}UqCI>ed(8RYE4#H_i%+u0S|U@qK2E4LGV&#b+@UxpP^+%$3WUCWm3 zgFGq@<0$GTaTdF=VG6-kpPRqO;^xm(h4>>Ioc2HBnM1#S`W@5EZoQ0V&iw2qu2@SW zy7x0cde%KX{)qU2|JUiCemXk&^|(LSIy-7}#9`a7{eGu=^6C&cbIk}thv8OqUYH{% zQqdh-!?Wu|6n(#h(ObJaF=Ya_+mr#+Zk8&z-71ZN`Ddj$%x;&ZK+HB#FvM!onyr09 zsVv12)0Ss9m+XSYFVOrq?r$!iPuO+A0sNqJenn|1la`X8w?x^kYb{Kq5yUpoye{-4+XfKBAh z|LZyae)@@};t&_ll~L~h;(Kx8eCqk$S0^hh9Z?G8{9U-%iX+%cmUxA+5FUyk~`k57K)eu8JywQs$~wrK7NIJQ9G`S$!$_PZ0DImM$VXAYfx^Z;w2|XDR~OS zcaio;<9!T#bd90!z-Ee>Xo3{|n)onMw#6oaCm9yoaN{(H{B3@B6W=QR+d@a7ap*iy zil7a61qOaS_LDS@W_bBJ@Xoo}06rN7sqqTFWr|FDjomh;58A$m_iQmQt$4Oc&stj$ zFVR9cpo2i^XCxv;NF%s&W=R}YJ-uT; zeM}B3Ku8xVOHtL3+HHCj2y%?iBOQADr~_p`(AI%&<$-JGh)5V$h36z*x|Jo4`>j|y z=ckoPBqbE6s|1l#5Uv%zPFmY_$!v0eD;*EfpsdGm`Nz?pH2?oaugnVat_d=HB>TeiFslri^wlqw$(iQ^ zud!PJo}w|dq`{IK!H)6&W0Y(w;H+s@fc8v)x_92GlV*KiWp5kE{^cYBXUfzJ@-i-F zu0LeI*CNz)CTrcIw6~ud_n{LVlCNUv$nS21e>Cm6fmQv7!ypoW7bx@D06NG4bOzGl zED))=AP*1!ehebb3yfdce>wAtuj9zf9czu9Xeh)-USgn){6L4p$45FGzQjSnRwhKgyqHb5hA$34pKh z$UjPgTZMPJNLUCR^ApM&ctWdmB(_qapXFg&R77g+6sDk0St;!~;F&6cX0n?p`9f#0 zIR7`u>kCx zy`*Jx z$t3UZpB#N82jl30;u_}BOe}%2uj}T@`~(b}rjAypPnOxc=TD#v0PPmpzp^%s99V(e zY)7!V@d(ypfIj*Ze-vf?z?%o^md%6d1C(Caz<~U&D8x21J{AOaRx)kmCX7-|6(^1H z_LYoO&V(|Yj;#VN^9UOaOVlIOnuS1M<#2TK#?=XPVxeR)RYD%dd`0?#YtBPD7XI;2 z61T)&pzJZ&gd89VXrH^LvPqMqv^d?fslCnopb5*5j+*-p(X_wBuv-WDVb3^XV1&SK ztsUgqbQz1GsuV1N3q!|E&)fhRMvSPV^#J}Syv4vKUXtj zxq@r6kI<5hPB_8k1{q00EROQoDAZD|cwW6cDute07~?9ckIX*M-QqxTzpfZ84*)M4 z@!V?*cfwgEje1Taobx??&8S32nT6v2SRtyWddW_Jl9t>|nQS>ZWdv zq(`E%*xJ1#+<9f0s|FI2WQcPS`T5!5+$n+c;tI2F5J=p1r^_Sb zm++vOIYYF8N&Hp-zG_JEG(~<=9Hz_;X}bhc9eJ3WQc{8iko>F2yTmu2gHwA!>T(Z$ z!uRZ8T2dQv;8SsEDUM2X$DA_*xG5%);UM1N8Bl>y5l5~+4M$G=p-6>vgBg%=9AS3A zEn{wvIz8ikqECQzeBL20B<||{SY|kS>e-zcBuh*>1SKjN`1e`fxrT1A3{4FRc^hQW z#l1E|84!L8(|hSIlTYzIQ-bfgAhAUxV1q$CNh=|e4={KALEEbrWN@o$Fh)9{!$3Io)>aki$Qt)<*TmBaAM48K_zK6eH-He%I42MjNX>!L!|b&@PXUwTvA~TEA;QnGbOto8R_o9FDC(1~u8oV(3-M0^lWsdpd$W0!|GD)v1GE+tsWMv7-H@9sgy))`a=Z!-B0ZH|WBs!D(AVlYZe&>v-_e6~Xj1_v%Ok83?H-MWoUJ(5TrjQX?i+apSl)sso62oldZv|qB=BIN z{{#0J+=(ZlKl^-JlKN#7+@E;eI1Z~B0o8kBuxUW9OjIpw4~>L;N`|xTr^tB zp{|80&@Uh3tfOdHBC*K1@Cd)Q(l4M>p;)^--Gae+=NLxOFXvJI!JT#V=S>_)#~w9X z3l6JU#E$fDcw}@olH^O~Ahmms&D6B>j9k%{vx;JdhHHr3hTB{$+w*_}14#TVz%&}( zM5NFbt_pC2N@c(^GomN&g39|Tp&#uS!Lg&C#XAM^rACbr%gI7Op>}WpMC>FzbKjn0 zvo0ElK-!`%=i9XPNM_x=jXk+w>@M4^nT3=JE4hIn#p+MGoqq^rVh+R5q}`!m_bc3xwlg(11^bgNVcCWodo0=v zqI#<^Hf+A-c$wjE`%}9frCWFY)zc+_nV$cp-rm`5VLh}|N4>Yf7Khnej|1k;6iu+m12|%!n+h_Sj0C{9L09o`|Xw14?Fj> z)OBDB{=>ggspfKNt7c6>yRWPZk?6JO6F#r-G{N?GvaHl!2ZJs<3ShBipD=#ct}DK@ zbCXU=Spw~cGTTXcPEVDmMuG>u$WD}t2=8AqXWp}mJ!=h9>x}PNl=2=7JDhFmmk(eD zPk3ZiuryR;b@g0H9hbuvi>(kvHQ+T^o((hiv1xOy&20!;^^=c-_0j_SivKYGC+4IX zrZ1-dH5=`M{*I)HEm*~=>?3BUVHIj3`Y z1a7`pM5D%CN7kT1IWsbGg$Ltyx@m3w#C+4nMpZUFl=8XcIAZP5wOd5j8OMae7*DG} zwO>M#PSulQQ%@g990;Hv9f4KigX}X0+7*8buJhK18Zr{%D|GVwx+y4I? ze?O5|Amu$EAr~}F)=`!+p`d}Lc9Uh}pFe%NySv*CoXFXZlj~2q>}~q#I!&E%u$A&I zC?R)RrM+dvT^RSicB_dS2(m?!E8fIO@NeG}gEKbb&UuIZ+V5|5x+hFO$HWkrtk+iM z6F=hyOxYSN-g(~dbEI*&IL|cnt7)(`rokrE9L@aEO)x+|L80=v+B?A``(U999Y`|0 zcthdE6++ll5MqJ6`GLZ>W`zTveSGLIy~x)1Z~N210ft;YVfhE*^SkwO{@-XcnoVH# z!2frF@9*ukcaZ-#n)NsS{~Uidi#7z-pJ~Ne4Jae@Cs!oG+jr8*mFU5R{v^M517H(? z-D^Ml`#XFYHjq#1WA8S_JzVPS|N8qocploM-Lth_|HW}b_Rfjo=wTGsK->DrqL{&B z>APP#i9h|IlKlS6n)ODz4!c96-P~`r|KBn@R~xvZ^!@@YeqbGpkm(!_xIAvW zp*hzE`_TJx2wLb)yV3l>K2X94VZHlM?pVb?B>lS_;Hqh<_XvHIZu`QN29Vd7U-%*jG>mWq zhrNPcfbQ&;%hWdwlyd`Dln3hY1Ho0!badcAmP2Wqlz6Wh7+4O!yV`fp8B;2(gVo%J7J0*G@t;!XJNr`)g#p&44N)4}DTblK%aRXEjR}`(jr}^o z^OJ=JG~1-LR9Y&}%}Rlhg)zQo18^Ay z|C;!Zf}xhjgq}ZJ5||oWl5#mP(%}<*xG{XG90E=^S>kj5Hy`A7F+jeqeF;|NI69E0fbthwo2OGj9!szDb64jGf%T43z zWG+6X?b0cQ%{ZH~F-+8ymau%=@t|TxbJ)dgcoq&FK1T5zvo&VmHjj8~Rx0-xlR9fm zc2>3XrE>OZ?u&$GktHtR0%2+r&aq!}(UeYijm;-_N!=ED3`7m|M;I*5IY_$IQ$GK~ z1{}<{%TH&yC<9E{)8}QmK#q&8)^cuJTT~swf29@)H1~)igt6!F)f!uvAy_UxjRT6< zZSs*w0he2mL{Oec{)QIY8k5E)ZWI2feBc_D!Z2(NCOuO>qL&(b;I5Dn%+L{@cP<2j za8pu=yH(8(?h^FHchIsS0jaUsJg*8eeM~{Lbih4ASJG2>_Xu`^Dn_k7Q!H(~1dwu$ zqLeDE^6KLSpR(HQuvrdfylN{qv5BC5Y=niZnonTYKu_Gl^CE=FZanb{O~0djn@W#0 z`%dH$zp$H+a!zjMz2nHa0If%TYbH2XO13hG5_a$u;e9q4SWtHHb%77!KsqGqj7}7T)LC7i5+ed8dZn z^3JJ$?YIv>Z0Tg=C;1O2aTdFAh=)4pt#98&{typ`X91xdcHrVI=YG-M7>}dpS@kVN zeF`PZEF7_ibsbE~#LcB*4|RDa2OtN_QS7NUsz$w4vaWCw?py<3L!T0a6GXz9o!Kkx z)p~M~zsm)39*Y1an_j=hX>Kt(YO+ourw^5Uq#t$OmYM4-!FXo=)Wd8in z>FvsQk;6^W+--jAB%CfA8v z0R>DUg-p_iKb#;FnWt*jKrjkrjB}m`uJ2RqPS=4|D&8#Rf-n{QqtBpWv<_?yL#9Pp zd_OHJflnj~wA67EY?n?Ns;pYeiC|j9=6&}3YyX}{7K!~w+`w!~N9O|($Y{RuB?w`I zRCl=Cpzd~)Af~Esh@%>NS_M*b_5S%;w<;Q`#^%KhbS4*^R>iROBDsdeFU^kX;nUc) z8%XYSGj*L;`>;(2JT-B7=Aa2mZ3wz49|1KTo3mNRqg1D_;EQ4lO#%{|`I|OoYix{%FyU!D+vNCaN_2o!e%(m}Fc#_wo(Fe>8uQJ;@t( z^I;gkK7jc`)VB|V@r^I|8oo%^sNbtk$lM?D$?{p5jigO1pHbGXiYeeXS2zU#$sh)$ zRD#tdAkh#B>lUtMd&kX!TW5+jXes{Il>99LspIIpYnV6;SkWZ4xGaWUd(l=r_M>wNk^@4PTk^Nk4knx*@Y`{=s0Jdkh+8!{2=3daJ3G zMeaD=jbW#K$gx56yqeEA$Wwn(k6dE8(64nssHKqV$|rh0`Ptg{l?>U*y$ytH-4|~! zDjQTf50!7RwnGQ9CLTwva|bU?g4}t<=o-&z&CTG^11aTDC7nX#Db8ljc>XjZR4eE8 z(mPE{Hrg6n968Ws9uKJN%!`R9m&oMFBbxoIl3a9i3$Wnafwe^7A=`^Fzm5|PX+ z+j>5{EUn!MUep*}Qf)ZVfy0x@Uq)E0E!U~eQ7&7k8rPwuLzGMPyw<TR z8+K%iEPMCf9Mu;m2qy`>SYvAXVU~^D*79*a)x{o59z8_m+5OgkfNkgA{Gyr6UpDpu z8t0m9#1<6+PCDwr8dF0PW0nMJkr6-XwfZ;;u7fPi^;=`%_{Pb#UXI7y24Kz#=!5yT zK2HZ{wAL85?_UKu^!EI*@8mx&r2W9pwUc5_R8V6k9Si`lZ(Ntktuxw=@E~RSnnC}R z70AF+BDxMIB}YSYTduK3R>pkGd!Ze*l~2kf5~&|KnX`m;_sGa=t3a|8sBgpDKKbvJ zVwyPKT$WAfPbA65p51sXev(BO9ku!1Z+HZ0`E*`}%+;Xz%(+bs_Y{8;3>ugh>^`@l za)u1o`Z(nlxWMft27W!mdBnc-OmO4$2;UFP>UImBM*x)Q?}w5e`Uj#k7pF1pf>N91^n7+uK!AM$P<9R1rU+yX zZy8X^=CuqU9(MLLRHn6Nel%AO5sY9ah)Uf^=l)!2%Bylls@3RkW@oBm;= z5&8z*RLf4?t}*L@bN>!HKpS8GA#!bpHgS`eP%Pf157*dTDyq=vPp0tjVH|%8@H2No zgfANoym93J;XGiYzI_(o`W+Y8q(No{FFWz90!qj^>V`mtZbp30A25#1;Fca^|W$1X}4)v*Eb^d*I*A8%~nX1;cw$Q&bU#T6?;q^M+P66qD3` zJ<9}fS?+4OODu?3V^k7x)x<~HSEM*I{uY=gb65%ZfOUU6VZ1drAvGqZ1V3bWLX^aY zHd}Lzr>Vzt*eQ<2{8opjpuQNcwF|g4)0{}1F^ZYP#a;-B! z{b&}DrWC^H0|1x}DYY_cwkEd5)YO)ZY+35l<>s;^j;=da^8VcNfR)3aVz6gh57MHj zj*xwH#njlGqn7&xzM)E^nWbCJK~uLWJVjjP!K`3k#dlNbB&n*i7SoPlo{c$&Em^6i zP2_iaQJVyzpTR>$M?X}7hSP!0o&uc7+(0j%pC*f|fH^?IvbNBh9`j!N&~3F&G3^{D zkdseefiTqAs-aN+0$z3HOhK8ROh1?NvYI;&l}_ZbaPk>5Lq%=*T6Xl=;i5xmK~E3cQC3HhVjhQ zh$0v%YOZ3?Wjx4qyIm1JRk3k8carj(O?Nd`Dn}NJ4$Y0yiuzDuOz>iMkzAX(dCVkD zm_?{tmJE&LFE6NIgszjwdU1&TF;EZgAx4ri&~nD1^B_0_+Cbt5U-KW?AVSiyOrxyb zd=roSE>0R9lG9Z#bq?91xsmjrWB(c+&S*P!u3)~s%iamYWUxU<|KMpNesL6E!FEa; zb4;E(ym>5Mw&)3j>%`9-0(C-JS@JSJ>;)j@*XuHX+ThWO)1Nxg^~51ZDU$2YPR@?) zK8ON-JM1@_FtxP;4K()-_LmFikyL>7LU@wP;$c7sm7kdL+gROOR99z)<^OnkVppRq z%bCypSEVP5{<5<9=q55P2B4;4`sYnWcE(s8P;$bIp0nNrJzItXnpkV?A3S1Afz5kP z9JUHLr1;F95&zXeW!^#|J;EsM$vn8j_XkdLjkY`vbP+4Z_ihL)^490yFkd~1a<}Zj z?RjxEFQ!!X+sG-496LJiFg*LiJB+8+77mKl=0fO7un6oFR#tG&f1msJ+3!v`;U25d zaOT|OwYW0O&2;ZNa1kdFzt-l*$dQDKKW`k2`t}Zo*K{_6AHTt4kT~@8S9`)s5-#ay z&j${WW;&D+Z}_7)^FO0Im?yiEZIOrPJWQm^oNGp=8fD7oq^;)!z(e2)bJD-5VF64} ztovE!1dqTX&aQD@;v@s!DUj&9FvW>)HEP(E*sEIMtC0*uSM4Bz0AHX9O(_EJNhw$D z;3I!5{7)=bf8K^RZfr*XQ1MiLaoecUy?uTt0syc}gK&`ql)0(^0Y+U*2>3&@ z0>Hf&@-Wm^%s?6%1{um;go0cRB@`;+eJUJ+$3nMwBp0}}-@NDGfnYY~=cT-*2TOu) zN2?(@yV{qWCD5#b`;IiHaOhxck>`LcNU;;t5isuR^AuzE5JF1eDG}z{_>D3Y4IX!? zOzs3WazSGS!)I%h1G% zb`(hRbE}vw-J3OOULF7yez^{q4|*L_q^UiSZaxsH8!(@m+o)?!zL>MXMgAV4n>JQ4PZK` zIY{j=#ASE}bav+Oq;Bd;7+&+v7DqE)nnbUW6oteOKEjxS3 zAw+F83r|8@n*meyFlfdj*jx#DH+iLg@)##XS zx?7rZv25)l3>Q97O|}MwPTe5%fNN|CC#?g+M0zBSkMacy#Zsf%gttMGO`K3#$fMZv zr#L$4UU}9ovRh*G1aB0+ffWuxR7@)gaI)xtCm#EWa~1HSFv}PO8Y);2$7ec37%+#R z6amb4H-US@Mw2vSS3a6GQ$NZ?-DaMn8?oYvlOQZlYz#Y95%_>0;GAVB;#!Lwi1L<{ z4PUct%+rwM1XEdk=wi4m{s?ORl_Fby?lg$L`QC+PR?k;K8&keb^O+^G zH%%E^GdH#_S)=`krXufwvr%fS)@^t)WVN7?>;Fa5e7<(mDzZ;CS~~`9_SF)NM^sEe$M*BRO zQHynz@<_6}CT1gsUtf4HFDFOGTk@6_PD;C+Ucyyv^oAo5J50lP)3D(1=6_# zNvaz<89KdE<3%bx>}JX6SbYF2P)G*W;-p!2*9Ed``nV}Sk5dW2>5cx$ zab=wR%*3fn$=Y!0_GQ!d8i1;zEBQ#WZUR^{5~)S$dEkCi7b44=_MVK=&yyh3#~M-E z16AA1x<***o2o9aIYOz}xI8PqUX`9LN_9e}BMMX1AC`_i)N+bZy_7D~93Vd!It#9O z4}|qd0wmLr943D5?sC%!@@qUvTpzY8YdVeg6-}qKCsd7g7V|-rV&w>HpxCf7*223x zMPDG!GW>817!Y&oyYlyGeBH0H$#`5+6K&oZz%LfrhH9M#Tbcc<0yYz}X|)bay00yT_mV8x71`tq)Pxp}xwApa1+Kz<`6Y;l=r22S)S+okzN zp=SBUjf=?d9G=3o>!#xf7BAY_`@jBSwL;r*tgQsma=SeS zEhSEl?=wG23ldi0L0}x}(b=;VSgbl1&tlGqG>EAb1Qc7~J|cK%bV%8IA%y%I##iK7 z&JQip5}7*p#0u;E2%-Q&CR6g@?37oov&78#lgZAuzfOg`4CoaV5#Zj)W)?9lWPu1c zIr>5|f&@P{^p4Ik<4zp;q5zRVZohOENh*cbKm0KKEsF0VzKNs51V`aty#=s0)9%E# z!3*<_y-AfkFi$q6sS=jiIc&nz{gZ$xd2kwslw!4%LCc#fA`#T7q)DqP)_zH_4ku|A zk1)jFmdaG~JQ{Q*mlJbkgcua3h_JLIG=q2?hw=481yse`8akCGZCc7J^8Ny6E_V4%#6IctmP`=N@f~0A}VoP;Bg6 z$f$^HYUO5DDFqfrX37;fdmdR{{9+!*Uqf3dnaJ`%FI>K0$~596lIv25_6qu>pKG)f zG}$1PH}1*sEaf59`pt?vpfRO0B$g-qP%qp%eo`KxP98KhW=Ape_qs`Nt1iBBZ)$Z7 z0@Fx{OLVR3q{0=@eA1_H$YR&@2*!V#kNL@ETIdHP#F2jBk2v{8!MHeD-HdL{r|~tn zPFO9&q5hofX53?0zj)@{_sJbxm>Bpdq99AV@G;;2raQ#$zk#!3E;A?RDB<4P5|cpA zxPW=KeP6#$rda>nmk#La23M4R`jpH%ce)rL5$6=KE( zP|Jt$i_Z&`({H6;q`FNZQikgBb`ZiiChg|2tVC=Uprwe-itR+8w#HV3-2JBBQDsq# zouWl#-VODULX;T9u&P?%Ryl2|aQvFa-;!?H=pI;AQG3w>R+V*|fTIYiPOR*SiIvwe zu~IrFIsqe6#^WTu_02#^ zXf@uYl2%x-gWnOPFo?x9offC+5-BnfNu=mH#Y&P?MN&0j3OK|N76*v%IGF%{c zQXI&G7E|_>(bJ;T#M6c8Ae_f)!Hx>U()ePPAxZPI+66;CR9X&Qb14Wbjfcx%uA-*t zqae-8PITQ9N)T~E2E`RA52DY*lRoiResSCvzGZYd5S~T+A(d-cgnVa^S0N)2qTXM4nH(_M$X~jj;d=R$BC@rQC3&=K*C-2(kxW z0MH!2=nxhb{}7W0O$!8_N`Vq{A&tyb5*^uKrpcWiu&(;}%|~XhE{%k|whNC4!YqqX zyb5MK#md36n}^4Ut<)evBY_3`Yd-&P9xSt?$096i3yi;P1??k!skn59dywZs5{Ccs zBIUJ`i}Qnb4#U98pz23SjIO-o+HV9lQGPZ$tOuS@W5!IJbUW;}xg-!x4TbFD3KJ=g(RlsDN~FTm)9DO&`iOIL z_3*ScJYCFxnwoA=_=PTNoTA;{T5R@XbwIms)mRskO>wjaQS|NPCD?z;oAUPgyw+mS z@JQu~BGUgYxx#aze^Z!_#C46W4%_-16>E1{iWxW8<0b~<7szC(E&AI+b% z3WDD==HzqdkY{j2Y@5K7r%hq^BqI#IK)gg8v6H=7z@{$@=*d2YT%dpv!7y+~yC)-s zv`X+Vl@#3O2tdhY85Hbm6r7**^GQ_b8#rdpUWW{xL1|Pxlt)!r(%2k0QIH-1MpaOn zgx}n~S3l7Q8p^{C$LNQ`P%&qXuQH^;ymdqc@h8~V>Ne%6?V^~ayy6t=36a%kln&13 zSTzP;!n<-`tK?i&`+_xW>TbD#H=3qwc`V58S52v03a+sVMc;VotU!{56z0Nm2RWo+zEqVkxf+a4BaM#Qwo1a zCJD?GZ_YJxr-~v0)|N*?%S3{5GO)1#qR$LiD6EcUhqhx5jT?FkW2>=3=CJb73ADm) zCxaf1ZhyTLc8?nSOFZX<;g#clGo4>n4`JJcaF9$=l?h)i|3QM*e>@WwmTsv8J!($V zeF_s8Zvdeink+{W{Uj$i5vQqIF++V+fx;S-PoczoG0CoDp1x7PjyFysHy-70NLc$M z)`;~%cfQ04=Jmy&;+V!54W?qe7!Ffkt*XAPv7<|WAA0 z0AsBcrMx+2c-OY0135)5d^I$Q7`C6XBvhOwsy9|oeTGm58c$oKAHkwGho^E!K@++) z?xNg*atR*B4C0f~m=fCaLoeCxdjqP*{xnS7+~E7ir$IEi=T3Hp)P(1_rE-&CXU2Of zeT$L>i{}QGCb;zYjjgnw-ux=)WODVJ|Bwo0pvFiY;L0^T#P6=2AWUNme1@M03na?1 z)tDMx#reZ(hSIWz!VOq2G!$bWIys7S{Fi?iX2{H~RhkR9YG*2uD{E}&LAT98C+HoN z(=1607YR-sbT5`%_~eL^tBZ3^XrN2lL`sG_bJm~gp&E*?@RH;sP!oOviVT5J%6xF_ zJvn&5Z;WY99%O6jkY}zmRyAaUb&9R5bHvB4Mh>DL)5tH)^Jx#gWLM1Byh4A2eIsSTEr4y@@Qc zlDF~EEg&Aa*%=No%bt*K)H{Y#mTJ35cohP(0Ufk?BWBLv%|bY==vnQV`R)yaFC9|* zx&ExL$$L~R!81Q)Q}#`kjeq|1X@ohWz0k*0E!(_8m7g`Yp_@19FsqjbX>uY%B&^aB!|@wIEY5b(S*AD%(IjgWF2`RU2^LJ$lsG2P^o3xl5yvg zaCfYYn$v=POmpo=Xx0i`QFlbzEs(JQ2Pio`;Jx%gK><0u)a0_TL)1Pn5hX8rn5V_1 zV_-g1XkfhMBarVToq!Jbz#KABRV!TBkC9>yFG=h@dWg9DD6MhtygwMALjnBYob^3( z_{Z>9r_<3FyXt4uS3g}GwYWmHW<2Prdui`?^*j4>pni}3)IX2Z&l{e>LEgYB?8de8 zrGNBUUG;-&^&8|Y_4_n-^&pKeMx8F&ebn_gJpwGNsV=Azoxajwrv0y9y6P(F^)W-5 zfsf-uaj^p8-PNoNCJmEoN=l$ao6@od>RFd*YAM^v#p@kessJ1M7oCw*llf&rmo?px zQobbP0e^@!Z_EasKw4b-vi5=OANJoG8%;*)Kmc!WsuKtM?=?aG$HY&ph5<+>>5#Jq z*;b=ykM5u+8ijwQ)thKT|CW`^LerRqG8S@l-e=b}M#1}vi;$LIVq2bN)X)GcQb26O zi__S%rlH_dUIw#D7e9|2W}FBBh@AdPYlRos%&4@s%@WdK;{ZKT>J9D)A;$GBxiT__ z8vGAdL{&P_xv9YTa?|{B&v(L6;qzF_uhQZLZ=G?+5%_i473W+0686tQ;?qWqS2UA- z*tmHGDrlmtV!~@nyrGRseJ9MgMTu_7eLR1o#;yh+PWr-qMb_G~p7d4&Ym zw-dz?R+ZIFxrL)%!j)CyZBvL`J+71ai5tSX$ZQ8G>oJ1Hu7v%XKl&ATF+V23MeC#) zw2A1aTJF6LXiBMEv-oI#LT0_;1S1S?bi0V}(ru_Oe5o;Nmch2rete%L4vEg{Ri0@n z3S;-%Qk$VMtp}ykI*kDr<`$PAEVqjB&6GLY+#(uuUpQ*t-aj?WATsTe+Oac zbgsDlrxT8EoGGWWI(U-hkI?Ocz*VKmDWs~R8ZeWQJ&daP^00p~KyHU=vq2LN;s`WE7WBgpE88E5VV#&{Nj< zK`CDVf~tzEQtcc&BRg+Fw{ynEA+HgPzId}#q@@ES=;;joeWUCCWXS6TLlad?_Izrp zG0{kMl0~CzIugna{XTQXDvTc*q#p*q;%Re8aHK2g$%hZ}DC%79c#vYhI@x1^(xh$7 zQ!Q1VSa}b?7Xr7$f!D=Qhm6G63hlFiQ3#%Wjf5 z%R*Y#k;jOZN-Q?ekE%fQ1zn^l$QG+_@fLo4_W2=`4VU9EcDx?;#~Y5~yC~PhpZQ$F zPu~R|$wbDJpY3KIkqtktgM2JDg1UvU&R(XgDZ{SYZA5E(dtV+H!rWVRb$ee9n|rN; zpV^@k`gm|D>I6?Ks0%xS6JGXC(5UK!Uq16lHTZdv-JlCF-b+Obp1k=9JtLY9J6-iI ze}NkEiMW%Yivyc=r62`0_P6U;2XedFRy-yl+V5OpmORZYyS$kte#p}um?1*jWffgS z$s~GhOKisaG6OypZiOOmbB%4u8t0_N%I89zgkCm@LKgCJ6vI^K+kk?;`CC9PaNDB* zM0E?}`^TZb<;CsMl;I z9&zJwP%(6T``cFRW`3q0;KzXK?MZ@V%(kcmR%T&T_yuqA(_NDgM5===7?h8io>K`S z$eHdpm=t)_^UDY$*#f0-JOQHekqR+3)Of1EJ3y71(Z@K)IodYo$c6Vv#v$1SWm*sn zBgbos`-}u+k^}}=_eo%(e08Agm4X6-qUmX&K&qASz&uF#tzZg7RA6D%le23>bkglA zH+A7^zz7`N@w*8X2Ev-p8}Q&}6^pL`DHxBpZh7#bAT(f2xn=zr`<)7SZ9dRjHt46_ zo&+6!7GOi?pOW&T)&@qk2}Oe=OqCVmou@b`%5cpVx=%xcHczF_mGgiuCggrD$1Yll z6N(#;R}iv#;=)HRddPC~?DOa6mqZOHnx#;nHNe!u(Bh*aza?k_Formf#j;j7&f!kb{%Gd6k^ne z`=C~;=PE0Ha{-+9Ya}5c{SufNwx6CE5z6%B`5@nd228Z{lR1Q1C<;S*^700Zf*TIa zw%-ExTdoPu6`V*ME$FthNt)S>of>mpWu>%@ZS(+{sd&7Hx|V4&^$=B{NKJgp3KX#| zdh`IQW0adzXC@UdYNw_?x=zX)M`kb>wVX`=4)2INAy5nE7ZoU#2OoAjdJ|b_5>M-M z84VIoT{@Qmxs>X5OsXTe&Yba>;$cm?`xVB9db7G&No=NDt|B?{Q)kJn=~Nijq?XJD zynmWY)V3h34GUNie<9OV7|_DB(oLrCR_9fyn0{mV)MOivzaV1XR1Dw7kg>+*Jijf4 zxNQP#N5AlS6|QnMW~6~w09uo+KtqiI0{r`|rx_1k@?s2j8?H2?sS{qKlz%e{Txb<8 zd1BRy6C8O6Bk`3#oH004e0XyWPpBnHM?%0;G=t%NcCHP6VIR@h%@S z^ytJBgB$d3i9_|#FLC1fDs?e6_52}B=$Ye!s^;r6Rx1VXf+W6YPWWXM+@E;eI1ZIS z48O^0iZ*|)vx7K!d4BRIZW8GySiIE*+^gL0Eumz=aqpYsKJiA**fQk^w!?z(4?!dFg-p`VWOU4Djy5aeaFjOx!HTI>MUfmp= zZI$LcfVY8CqY4$c(+tz4=~WgTT*JcLjg$EH*r*Bp1oLAm#}__5Jr7JVz2mGyZ5Hv! z6DHatuCYVYywBt_QwPo;GF)A`U;wMN#xAVg5S77eKd~FRLvL9d?wqO}5CZa4NhX3t zXRlq@Pi2IKwXae+3XUD|klc3`+F&txM{e6Z41FiTjDS;Q)=eSDlNjgU2Js}N*J{jq zacQLNRwAWT$XFo!RQD|0cf^)ED1@KtC(ZCvQ~Z(Pr+lwc;inSEW#Omx4I|`}&q`T* zaLp8e3IV9bc&q?a-d{-oYK@U!;KCH#Xdkp&Sms%)@Dne_`tfRt;2JtaMjr57jE~O> zydu(t;7^%B1Abx+1U#hSSG`G8W1UoAbyDLsx|Tr`*q~Q6T%5<*XMf1^u8Ql-7Vr4NAe_GL}oWebB);#o2hCl*Gz83l+M&rnLPHbOj^hVMoP=n`PJZS z+LZy9CUZHn_)s3zXyhtNd7pN|B0y~+(Z{NjtL&SJEd*r0CgVJCSXV&+2aMh3d<&wl8rV_%{8M@6(^SsXX12KUiW!)Op!7!2HoWn$Pq3L z%BRl_s9TYB1AeoCz$MBrihB{-OT&;kf(G4h{)36JLDv!Hf*KpyH@x6F$Wo&XKkK3^ zv6Bai!xl zh`#YyKU^-xZS1K~QgBtk?UVv%szEgt=`%TmOn`Y?qNLTe=WAwU;ey~3b5@@32 z{d~$P`%$#EVm+U%aHr>!e4n(Fswi6zg`{?3W5>X5o`@c2Oh1`b%#0u^?HJ6%^_`vMb%o;wmJdRE@ zb5to8q7y19g&~L+LtdLoT>^*a!PDYN96LIvs)nAQ@+f9;E%zI7*}3*3B~-~iXbw+2 z(Sr3!q{bK~YT(pporYHmFP*Bi zQAZrB7D-UM5bfzV$0zyG(e=f6PQq}L@FYzqKv-SSqeZx`x%|uSYwzA^WK&r(s)|r* zj7SAOsSKjfvgV|1Byjhe?;$Hfj8CFm1CQ2Kmu|5QJtmZKHJ|?m+LKKemWyn#S7R2ohJLX@ua-1;{s<0Kfo4wLP|VV5sd9 zL?>Z^?@?q2oobJh;1+)WrlyFJMaob{cv6&V(Gm=??z+;|x1J5sV%j4q}wP`Bw zA5;x@)gnIWP+x+WT*{e#;?d}!Ye(8X7ucl7-vVi->xFv@E%^-|ZW7H+(Z=U# zH!dHUrMfWOFrfPBEGaf&PYDo_Oeis63}S7PNL4Vwlt> zW|Adx=99v!FDL3{5?^EwyalBWtL6BZ=iNuEb(dV;;MQeie0|=TBB9a7i^x&6faol6 za)q_+q%;b8y!=_jm1VPfe7vbe)qU=SOed8F1XT|KwjPnbXP9U1hXi`gB{|(mY186L z3)%EVc?nh5BuIgY*fB(Q8;-X&wPs6@c8MEtlLK6Gtr9OcOU{$+f-Z9d(J;YiYcTjU z6d=*fXk@Fj5U3{?Jtx`01Fa?nXDRnI==9tNK5Ps}Tvw}Lx(<6o_P)Q>G8^&r~WOO`|Rv;GWH9)8^&nc zr(p4f7wYuvMAVdjtfG7ox;pwpw01CuX|rB$)d5WWZ(9ZAI#L-e zk|b8;UVeF1Ev|CBw|10$b*REcFpfJ>yBc(bd7kD%bBfWO?a6O4dE_6Y_)ChHzvXQ;(D0E_y}5R)#@eNPylj(I zcPeHN#!+KWxUlvqKHaRHP;}@*7;B8?YQW@ea>QL2NLW~NaF3}B9UW|n`tYNZDjfLW z!g81`I;87ZA`;l03}mVRMh@aG_gG_?c3MOh)3&aRUg=Fd2|dQ+OwHX5Wef7_oEZsw z6-X#144-{vHtPFIIPPK+#8X)A1K3I?*EghO=!yRfx(CQLs496Z4MyY8XBeRD2|Y!? zh~S6N38gE5URcPxnTeQlAYS%6zaBGBF5e6F0ehjh_Va=Dv&_EC%+OLE^eS^gzEUU| z9D!bc2EB)SXn;oK32zN!POsuP^qpv?83il=1y<9wl_y~a(3FMWFzu+eGCPiuIi)ry zdtG=3Z`KbQir&)D8GBnPXi7jHh3TtfEh0F6;12!pIB<#lQwRX`iL`1tZP4SFZ*2bsT> zj(r!;^yt)rob>1|rh~q(9%j%^YixBS+Rnv0PYbXwTe@wmSt*$eY`!oQV*yUnt!r#0 zqqm$&Q5@W9H)h?Rsx-2e(i<>I+6ThQezS=eSMfde#$cCTnYg)4`t%IC5(lrjQv%+f zTSv#;-tl2)aC`*gt+CvQXWlb*>wBjCj)Ezz1xm|PN5E zl^N3|AVQILRyxE!;wH@<>{=d3!6UAX7!M2Kc$&FMStXL5gB*^aw` za?PAOZiR)nxo25;F`$qKbemLIW2-dq+CuG047?UMZYKKJ^wa9Cp&&-XNkJ@ ztpZ=Ht8C_TUK*ue{czm#ht>>>wnwxXSiGy!`V;b0oc*X8JLDBXRfav=4^3z2 zxerDYjKjoON=(e-pi3??lT;>~=X1+_>EKVC6oD>{S#d)@D_Ddvu|duxE-b~l*_}<2 zsj+Haw6tg0PRmjfu;O>-=kg?+1MXMBgA`M(u}TSZVLhSZRCwDcZCj}1DK$G_jLTZ{ zO?9&nNjZmDxo2++hhKOYlecP}%~m*<-480Po-*u>ioGA{kOb0a!kDws8e8gQOn1gr zHCTM}`M8K!x_p?PJ`t-B_i&^&_GIFoI-P1;>56m~Mddw&E$nNJ@y4m9%h|mYYHVRE zit;WIMGklcfRp~wxs(X>Afw3$7^2(HN=^y&Jt`wHpC&-=LS=^jC9Nt42m1xJ{nrFU zPUI}Oy^!_}GES~>DcyYrGMmTeK#KA!UiF5Lt5vYfD$%rNEO&o0GZffy{F%eg@sG@Tk!;3@m&au1`u-{br*qutW$+wv-_WWG2D`KR9XB zTgx4rpsDUDyfAIUQ`@pTp+!{M*2-*8iveK=nPOQ|rr*D{4sGZ~|0xr2}W zt6;f9M0h-~k7`44^f)2|pMM{uS;a;=^X9OCyluE2DWm?h8ZbJy1qH(aT>7cha5_Q) z7|Y5Rl250VzDUo;uIKedstk^4l%>m(8L70(EhWyv=BHsLg z$Ijwgzhk9qc}`Qv3U%>)tVyjrKfNgZ5+`>c=g8`%95@Qikir zB~Ud^H3niZ*>go7sIpT)6L|Ix`&8!KmncdrvfO9`VX+hY9OKcGx1b9gnYs8c#r z|G-nNSVy{)KSgac{8o(_H)l4DG^?zCWtk4kmf@-AjQdnx_jjJ0%36_yyQ_|y)uYEM zR?EeI7`C~ce1UpViNuaSr^|RM zI`3!)o?!fu^d=aCP$`}RsT>|;3C}bA=-5$Sw#a1(|Idn(NM*FKb4f_5;n`APrnxu6 zh?fpYkyO2p zQhgj7QMFc2rm!bG%xl=$v7>=0xNaqrNfFR3WiYnu(W`3of~%p?*kol(D|ZxaH;1dr zBT>+J;4-K7ziSnVFiHfeBoRSMWv(O45ssFwl*N+_3$(J3j;U^_{?~>`7V|UPYOufm zZ&0F#A6cD#MVkc`5tNcu#g|S`hasGR>*|0f26V;AwIj`BBC=i1uQ|K56)Nul4(6|@ zw6&HVVcD@NxlsemRY7GGz}2Aw27*xh)Y|9D9(X42q~(Iks;uWb(im$Z$q8lHEX^sX z3JVVZg1?`u4AvlBUlBI zqDQbu8{E%~xMd>{b7f~3Kv&J@I>Tr-R8AEya?n98G8{IK&O7AxR#WC);lYBAQrCXQ zFb!-%Nde&=X%DkR>EObdvc)a2u~z`8ZNTDO9_J9J5+!{jiNfMO_(f0=$c26Ii}2Rw z3lrH>De_V*C1f1g7%W7W9$Vy0v|!RR9kidu_h?Ur7*JomIL=DgqVOqP6jj+xudDYj(~*b^N`-R4IQP?b4yHMS@axD z;1WiT;`i$KrTX@cW-AG3Y8Qs&VSB_BY$0=)JFK~>J!;%rz)xCE33B=>Um2{?S>`K) z@672agU6k?*RL>rL_yJC*kMK=53BmTvYt~udtopdV;YPU#7v>JKc2vF{;hwGvt1!A z9#bZmqyDdd6eUT0@0o0iE6pUK?2Iat1Um7LLw`#i#cusGzDoJAF6LC$2fHVcNAVab zySwozgb9c@itP-C(gu^V5hkS1Zk&i=!e{Iq)&3|0PLeSA#`c1_b+%<@oeEEf`Ob06 z61g>WLI!Rrz?5cOru%R&nj&c8B3*^vJHLihWAhTpR6~Z5GO8-45czA^DTOQ-j!FmL z#}kCkZPRZfm8u9<%3+U=9cku_#-?Lz(J_RAmlTs++A#!Ed7Jyi`Nh^{|G2kxc5!s_ z<>at4IJr2VEqN6wur=0R%Bi5SeNc>tJ-#3R;Uv@(hQ#8LEv!{u$6B;6850XwSe;+3 zbh25c6H>d?Y=Y`#RC?n^`!nbM3n-eC#5bvC4HB|ZCO+i>j+ywrI_Zs(O9?MQ2Lx2n zC*(XCY_f3nps3!slcV?!k>* zv{KaU))wW4Ey5^5s(2EBW}*XS@nCQqPOgD|yxF`Ln$x&GkmekG|54+Mu(X##t?B_8 z`?6|VL7?@DGX1GIYmw=Xt8Z4qKl5;Q8NpcSqcyfVl?@X_DlfgrC*NZ`JLEg8Ms^Dm z_3k5k7esW0zd_`E=TR)6_fFQTp(+D7l*hoq*qa%RSt+nB7a;LP@HXY&ybACrZDt9t z^r)(7)xAIWMd{sMU_{d0V}=mm#0gL!S}F0^7*m;}w#rMs&g-rSkk54%HKSvy%9kOv z8PF@X;5X)HeaO3}B0Xvy2AysrUhV_t58h+)Jr-i+gY>aHdBC5G@Gl%^@ zQN@4)Wskp3#^>?TM1jpsMBPZ#nz;)HL)tgWOy)bUqd#UyH+y z!#F91ArPm>b6vw;Wf`jAClDCg@@NB(9ta=!n|jnS3OtR)N75lNM<6A#2(E8J_%GuR zFJ{ZCF~gd*5{BOEO68Xg)|Yl_PhKD9dtgMa9rd z95iMHrOHL2txj5nwX)0W`<7LSS&`34jXh<2U%QzvH!>RKlN7JOv}{wDm4kQr=(i{x zrPo;B9utD_^Ebj&}=~K&*?H9nW5Oj(Ihe7(n zVUzM9iHZ^M?Rj&|&Ll$5E>7T~^Xve(w7TCvKkJ^xu44y(sWal$9e`oJk|!wf(ozw} zo$jEQ%XjRIaOrd~T;zu9f>gZQPekpPgT`XP{57@+-7XgAV;!%n;*&}~LRT^6myaW< zvB#}=Vw3N8rW|yL&3&faZxwM#QnXWXNmMS;v!j!QgFC;)I?Dzr0o9seN=pYRRf^<| z-GT)iYbu^ZZj4l?14Hdfi#(~sNV^N5;UW+(F`QFX=#rdaKA5hzD-2fPek#@3k}-9A zFLA!CI>oCpbAdD!xj4;`i&dorE2%9%vnPt24F9g@ zRb(xkJH6@jtc5tab7d{8QrT3QljQ&3-kUbJjb!P<^BKP)qwhqt9bwrbC2Hv#_l=+> zyGq?!*rL?cbHChx2uMO31ULYwrGNc<&dD7FNGwT}JW=-al$iiQB#_Hlp7T6uS_H`s zg(G$z4LVy_6c)BfP~XiHOFgecc;bc`quSnw^c$JCd&W>cJEaiwH2SwD5QBfinXA-Q2gQL9; zfe_dkItB2WFHf^LBGx}+v6M}34{EK%U+l?!i0~I!B0cd71p~y-luQAaj$ZZc4#&bOrjpmIE@h&H8ineDFr}9Sj>`1BYB9+$kYMh0Iu7iQk3>!T5$_t3x~=&*c&IIN;vSlrhzIcG~BLWxKdCS=hbL`JbD7hAoOMSccgJ_vwhm>>E*@8=j^dr9y-k&<9 zmG@z95zcrQ>);LwH4k&G5HRP!j1i`B059*!hu4-+jA*5DrlZQh6xPf$)lH#XdL=W6cr}B`*mSC$n3Eb*=b@w`Tj+0IFD4T}c zoSyTPF0nzuC%Y5nqwk6`uF{X&x70^O>6In5ImVg@i$CrASA&Aj#=$B-n(=*w@00-WAslH`4L2B>VZ{6=#I%s zKsLel*u1Ko6b#u0F}uCp{ZWSjeQ13w-#=4GwpA5@sqkix!UG7HCX zv8h#DH+<_F@(XOdMfr{f#;i?RU03o=Q$vW-{!E$x&O-ULXoP5VUpt%gI>c^#>yuVu6$Gm2GKZhyhbzQ0xxk)wWUJmVN9?7n38Sp$pqX+nW&+k*T{_oe?PvU& zbgswR3H{YFuFAN7kPwH#wJBDJl&hJK;s^$(s-#=J&(gW1U~Ls%UKM`r$Fs`!kd;lP z(b|ipHjb1`)62Vz|Oho!`Q4*xj;16PWTIia=!`Gs3^xJn~oWc&?ILQRl zW;^ILWO^OwR}lT%7DE3Dy#Zh6j!ck1ikzp)cFy4oEXHrJ52DoTW3x18d7|_(&zmFB z&3ghP9sVBjbwTN;L^=G*Szw`Td85ZvbKg*=&1{ZUgs>{yMNMf9Ysf8>#twnP%uLyq z$!*={-SsRh=EW{yFIB zJy+Is{+5(k?!3+#PR-ELG9L$XzVPKX5rY$VhTo~TepoL!69oJiXYwjfIpfgEoWpR| zR_9!e|DmeuvvU~uiB3sBgT6)`RMcEXQPCDTIO_@ z0WDJ6Oa&t%lZ=5p&x0nzES-Z*7a;pmf=QojnJ^#opJ>X>NwE$yZ_YOn{xT6Q*RLdT zI2agmFOw`(gqJ+>z+V;o&Vml{oOt3($a6QlM0-jOB-{93IZBsNut8NwR_Z%K^rH#l zvBn{wx^$9~*_Nrqo2ZCGJEf%422Pd>1#ERr19hO$ggE$-0s^TMN{UrI8CJGT1H~gj zd~-Ef<*oLb>I;umG5k9W^$vHXoR7UvkK@IoFUbbbn2`-G_G!x5x!b1^y4KHT$2M)V z-$b(?VZo)&jn&(nLVZ}tw5m{X!Aa4Pky1w$l256a!)ihxKMM1D00B**$ezL#K@IsZc8f-e{{&DjSy zB7l*oi$k1?oIX0tKgu8vU0jhGp#Etk7jL&rd!wvVPf`P7WK@6;>xA`$j3|rakdQHj zrL7Mk!~bkUEC4 zFq*5Hih3|;ERKsbjRih!imFsNx=q{yeccFyR8M0u5PH49!a?U!vTG!hwo_p@+SeVN zkUYR`68-lwv@2!aId@eJQ=ROYVye5!9-w!H*5ZQ(*toTdF2lnNkGFPPABzGx>O8hx zmb$^`Be}QFq73t%rRmIR zb3q|0Bs8W9VVTWAlu*j}!Td2=pzB0uw9YHg7=m@|Eo6O@aHmn2v5V#iuPxbS_41j_`e<}h4 zVxFE&Rj8mLMMmk)+9inv*=tF)%4q#PC}3~SrvuB2!-jW-u7ec?;bz87ik6Mon+MUm(9nW=Mb-|;Mbj9Q(?s&uyWa_yJ0!CNGS{8@I zx=8R^c~|{5n=`p{qCO*BGtD0-%R6Js!vc;4%t?eO;rLu9KG~Q{S|_hMWK)%K8H6Z^ zwlI~!>)(o1Th}Qkyb4=xkw3+Gj3cQuwfW%so$C@L_Qce#7!#r7B7Q~EhyNKxorDsX zLp?}*;)^%)w0B5rcZv}=%+SL1N!Uj~c)LhOQm>Rsp>tDxI9$qb58vt4N##_H0+-ElBAx z;yg40&i^+}!Z9g-%8v1iB4U&0r~ZxF_7PR-<`HM>juLbY+XSAQCcho=vTNp|Tnun5 zb5e0jE#T$H##CnjP}L-9GGeE6F*547_f1f$^#+r zD~MZd&(`9QpkN z!}9rgB(duF9;-AtZZbarY(SI0$uUbU{9Q>r=g2VvtMKi31Bv-jRe2fAk7N1u>k#wR z@0-(d9u|HnBq5uAUoPOm=y?o?SS9t zSSp|e4NGyK>Fw!NFFeb!Y28rPg{a;I3_x^j;<2mg**^sWpnvLxOaAppy%448{dM@X zsuxr~^!9}VCG^c+SwQ3=OmH6!gPp`LmKp32v7bCF{fBS|EU+>t=aQephe2FRhYWUiM@GE+5LS`=HJP2H>5oY7MK16gTlJUf*fnjyD7g~9 zrpqjMYJ%Irsk-&KGh(!s_EdVhChe&%8%*PXJ%O!elb)jf4Y%l8XQHpaV-T}9<>{V= zCyH{O;Cm%~rW2@;cm>jwasw68;nAV=gg&SM=`;j4C-{Z|Be?8~Xw8ut4cbHA{;@JP z*MilX#6KI6AvTJe!t<$%oVJRD4^>u!>sW!ShhRNF&YcEd>ZmH+E+2clL!-ht3ZHEe zQ&+0P_E1$%rw?Jm@FZG1!b~soCdl?0gA|dY&dSODT*0rhmcms8@GE#*SJi^ISft*s z%uP$)8ma}f*cc%!&jCS-(IbV$8}7T{N|6)Y;p!)ylc4z&h41n)~$sDpYq%ZVLY`aCed@g+^Q`zm7e9-A*@kpji( z)hf4K1a@V%J6(a!QMrR-Mf#M46vKoU0B_{v(U0R$QYR7E_!ePJMT!ya`zV`(dnixl z)d|JxXU1;OOVq>i5iD}f2QiZuWQ#|SWtd{aG7mVL;8Ib7*ZtQr;jl~M59WHt=3QNf z`68ua_$H9Qb(WwS_PQ4wZkVERllga%U~=qPR%KBk;DEtRAq=pLj|Z3~86;u;hf7jpG0!5+^9Xrw<>{R37AL{Ne+YAA zZH0FU`+H_Exx&k*P$7mFvUI+N$F;AlRo}3I@jmevjQ1J%i!!%`$2#6|^^4>Aj7`}L ze$jmuW}Er(_pi6tiYy;-TZence_^-RXN=Ze$+m^e)5OUKFCh%?mia4DC$K`6 z+LHqnkfX3-q{5QCn8A;U0SFi7&A`*fa}OziTX5}f@+_K~;Z=AuId4Vaz0PLauL2K*z*cghT>;d zF52BKdGdb?!^N-QIT?H{6>;$%ZoI15bhUc6fxQn+QKK}x!sAR}(9EKRAM1!B#*Xw5 zbl^0Vme&RwxTs#ZdQPljaI?7zj&DiXY>wzMjq~p< zYQLwe&yj2NWXv}{g=65`AOhvsWBQCMYmtS1LBynT_~evSpM_bWDTDKMUaz-~^{jou zy%PgXE0AF+xUgFmiScbEB@E?a$RLl35Z0D_L9$9SB7v3Z%O_q5n)CRYjuv3}$Pa>u z%yE3&uy1jeT7DUiMFPbje;hBqS0Y}kDPZ=|#}cTb$c z?V7GvD`FmPFcUlU3C@G?GJLiuq@Czm>v8L`H8!MUVQWi6S8TlJDcEq+$1qkN9K4Ml z(tb6i1Pzw2oK$-g6X+(J-b=N~Pz4=R?e=e#^79lKbh_+Kc+Tmi@ho{mgPOceYIaVZ zq+*_!FXHqyq|@cS;WNWUGsFr4I7$?!xE}h9(?h)RQrMbdpl&oS+pT`&e0*ppV z5;}Fr&BV)gD_>N)X=LfG5N6UM{-YK)L1a4jzQxhC{#g_8fddB?(!i*!CZ>()+3DBV zpZ&l*?A5Y(NKpw-(La5Af$bfh@M(_9kOfsspok1fa-`yPb}`+Vj8E!&D8}MO98tq{ z*IPAxn&x^Hll!S(_*D$=rxas1<<<-&i6X4fG&R%S)q6FghAriiNbIf`S{ zetCEZiFumiOeI;0np$gFO;hc;cZ$_S!i7tYrv@f!+`z=t-o=3maBQY&(|#nK{eDN*`Pa zpPB%c9ugmV)LJwhjkEEV*39HNIPsH{N7GBBd_3|D@_hXBW0+`>8O&0P_XV$*XXfeX zLP;N2rRf_&5PPKQb1ofs!c{{p!_7QUxp+Wlw^-DJSRA$8K7cPKw0-Q7_$m_u=@gyN zaT}QychuF2Ed$;23qMx{Z)X!WhmSZwVD@qbMpC0#-X&qN&U>uz)Gb@eb6JCa1~x$d z+P_09voXn1lxTL1P%h1iN*q#W2My6)Kf(Y71ad1br<)zb<=n7m99BvLOfgtNxM3t6 z_L$Ywmv-3enkuZeZavmNeSM@u)~Z_WtmFjtWVSAM%yVkE*OtibYKsb0v5OFM^R{A+ z$M_L;2Zw|#&R^hWFDB>TKC$Wa9RA;Udj93~?3ZuTo1Kf(5lQ1v+b`4UcyiXCQM%r^ zY9St|6WRN!Y)vGxPryY<=X-CX=G&7krA0N{V8t_2<&TDYhSC{@9@Fq7{o$tbjC@E*sbmQ8fB_i;gPwzZ>NZ?f}im;u-pvxG#1@YSGCnSbPlP{QDg z6pYyxYv^Q|g*)&gL3aWwdZa24&nstjf#-Sn6vmqBaLQBGh5#l73UxiG6un%Q6ffZq zNnFTs`#U5>_H_ze{BZYCZ1ByLHkJLO#!8m89LT!3`RtS^P-lKpQc4DUpKB#oDkYzt z@`WZx;V+?>4F3!wipo`JCwr`M?5#;XphuoiPr77-F_?H!PNtVD=}ga|Ckq|(m^0p_ zHGGk>B6S+hmY7(pC{~q+Tz9zK5f0AlsX9C~yornGfj={@;bA!Eyn#?9un*GjiGj$W zkqcN`ctwErj3gLH@UBCgS{igj_wHJW)>+3bB=D0T3bk@6w}utCsMzxSU~9ADun zZ}@0|03Fbdtoqi?rQB>cf^HR4c=~4RgXyZHImS5iWyS;QAJ>M|;kmn1xb&)gz|HQG zzS%aM!G=l~Pu6pGfg6M@2p%wAR^|*31WV*r3IwXEI0km`i?ar2Ay$Z5?-AahL+L?f zUT-7*RZ}Q%tO6KZl~Y%mIpHZNnJLEd<US95iiLaeKIAcAB$-4@ln_SLNR zURPfGYF2xXHCbHR6sag2sn~XJlh|OXtk~;%)u%LwFS}Yx#9u9?JLPu&Buk5Qmc~`V zC67(|@8vqU)R>r|hE{ps%6s?`26f2ORgU4Q2d9AVBMx$BG-q*mUl7#e13?VF2qaS z9r^3A4n9Wx&Z(Tpo<6;Nt9XqQ@FX6Liq)|w41w1W^C&R})%c*o2SEjn?Z7-(c7sk{ zm%^KMCPuGX8+BntBCDeP%OJg8Cgc1z*RvP@jVzxL64ua2@cT2!>Vn?x46+nV3TgHu zbG)&RmX1^?hI3!YU7Ea9!=$)1Wf_rFnBTc4scDNLMU-3G3@JP-A-~4>*K+p!7{PO$ z`>)96OP7WjEooBtkNtEK$2H+}S?+P4BL0%Fwwb||I8K=VfZ*GLV&Z(!hX)ptB4GZQ zE@Q0ZB3ai=e|ddwABIJ3AJ-%h>f+;V3$}(z6=7Oktp#t$W38*TIuxy1NmteOKI$rA zXpEH1D0-~fLs($^#5MV({k}c6bwJqhzIx(C_DXz!6ihG`J}QiHLt~0?_c!U8-hMPgh=QF9fIVi0aA`EQDvhmQi>&upTWme|CQ5tpjwt>pRYEo+ zV}Qq@XkvND?CM^=c=8H(Coyg4ab|7kY@EVc%~0003r~-DfpEDve4w4(u&2}J?-2t@ z66^8-=ibDI#s zI8fP`L>EGFS};a+Vb79CmH~lwkvh#UR45pKJ4i>#n5tFg5035MjbJH%{c# z?eZ#G#A+oni4~Tnu`Ak31?G+8mRhT?3zw3P;fPbf*nSlrR8O?OQI$58X0cU_e2qHR zVHS$&p9gz-s(?GZ!IP*xMKQR4zi~5HyU@1z}wHB^624_Fi`U zgJWq?!#4{~zVj6fB3)Jcm5SNf=_QVF`WYDBXcY)2_Vm(LG;P2bx>+%0l@xLfCFBOU zpk1|O$$U-EPE|}-WY8Hll?trt+4rPdw!z)>Sj%$%rsypGR|X+aLRT%dbx256-`Hl6 zU1Oo6wV;8KT}u%Tj|~88kv#)tUy<}2+>&^pK4vhbl4u6x>nt`0W>wq z|LSK51xw0dQltHy5mDEa@o4Jv`PnHJ0cbsydC2Qw_47qCTugaP#uqfr+vjPB3E@}R ziaL%H=~0*^$*SJ*eIyBn14V0T70>7$0o$+yffvqTduyiXvYCM!j1z~?D5Kr0tfJ+l z3??gx6ApNM;vQ=UtJR5cOPy(wg-=ns%unLb&u+q(Vj@O}*7TC6uo<4iupU)As~?EZ znaobB(nZ6hcw$`E0CZOmwg#Hkf8QL&8t z%fJ77>Bpp-!~yg|iAVS#RJy^9by11W!j~v7D)1kaknJnq(O{P7f;#DjeZQGJXfDL* zD>zYewuHs&L;8fnI*S7EKHwCddO^kc1?(*_5UU}_O*MZVmeSP)OpfH-num)EKLIx? z_=f2&*;#D!7__o)qS$7U(zNJcS_=qhH?Btk*_)nyvlt)EX18RO)d@VjTcl_2%Ib4u zDKcbQT#T!BPf>wgBiuKdk?V=Kg&XU!U&_Z@XOUJ2x+}9tZULM%tePc}mFhM0JyB@a z5!D)HZztE>y(b*rn) zki)%v$CZyD#OPNk6{gzWOX6FR3ZORjI3|^=A}ovk>Ka=o5iPB?^F~3eVOfc75ij8f zY>u~guDDW}GT9s1DHCE#ocOvX=sNZ1ggt}Bh7Pg?9qlV>*rRfsN{;1yzl?TwZM$jHewV?A9ZEUJNYHP>juc;Uzh7f!FnIq}yk=jdw8$kmTje&)@>B8}sR zIyKeCbXP0HEM0&%`#nqNuIu30@9(M?g*nqzj5-@04w~8rgaG4l zgEB76F7En$dH;|dR7)3WcF;17CR%h3wL>&&(XD3Jbb3CX712|L(9QS2IFr9)1Xkc%lgfwke^DLdi0+plh zL#tIT2(EL+e=^3O>?PSCeJ}gfpUA7TGm-pZrb9T8N3f55T z5H^|qfwww#_*Q{8qsr(kxlgad`|1)TanPD~Jlx-7d$bo7EvMIu-&10(zTZqv)8`~k z{opIud(Tmz!yI49)T1UO`(%In`X&E}Wiy<`F%aKFD$6k7#cb;nlxvH|+9xPI)}GE% z73=1>dj?gUmQ_LS+w4?*rFPypx8{FbT7HM0!3P5H1C0P|0PXjNSd?C}qO}XIq6#wvL?yf3_ ziY!bHSzF=Rbz^NU-DzW|OVMU!*VD6KY`5zgn+TQ)x3zaTa^!Mt(#-A0$Zr&CK_APN zT9=r-d*Jna&>4^QNZPHAN3jj{DUnlz&m74Sgvskpv~cTGjB1>U?qP6(9bKPZDiIsY zMOry(bVrI9IO>j-Ht5GQr=a9j@bb4u8&ntgS%Os;{>$V!80)~G2oAyRs3JaaV?H6T zZ4NUX*$y9jwkCtHQYSq`e&}y;C0C9nWUPH+uZJ>uI9j0<3#Ak#@%c z-odVhC&|%t@)*v3z>gG2_=0+fu)UZ~ef1F6h8jB1{UT{dH^N^91tR*8%P5CmcrYnR z`V;=vs!26W1g$U{?SI}^30jzESEy_9{p;=Zxf+h}8^goB%m%C;DibzVok>TJqF&&ye3y-`aywiSssfI|@b*)>}vr?+;7Adf1#~m4r z?j^6?H8wBZ_-_1(eWm*p<%`&VHE&KapQ6x^BwY~Hrs!bYk>GQ{<)6j((bHIELb8oJ z!aTWxcGe*aIC$gusIdh#$_hLKVi+T01{a*bqA5Nc{+UGq@u6)>oNvw+ zG*Axe!@n9PL0M~0Hocz!q^i8-ETq4td9lKk3&Vu77CVIJ;BOn#MK&p|7L9X4`5E6SN~a>{HhNn+;%e9htX4h=) z`>s*0k)`J(S*w19yE6=&lhk7guFah7{^E4RNX0Lgh_M`YdN^=-qr+N!xkh&aGjslo zDv;!e^jIfR*}BoTzG9NdcvUDqe1N`%;QDA~CuAYD)Sl$L@mRImQ%9|*Rlz*2_T)C` zb$ooh5*GR;^Jk$zVqjG0mkd_%8DiWb3DD>l8QQQ$w2ch)=glTUU`2mGFhM^)$wHiR zxv^V=N6yQuSkRmc)#yp7#M)ymse0FM4=s^6NYyL*jKzD{XN-HQm^Fr!o)94bUZN1I zHeM&v^%8wh&T#rG2~Ug=4xDM~Hs0Zf6Op>jl{T0aE4cfn%nm`SBWoYGtCVn=r0CPL653F$t z9A|=tyGkT(;m@a1_*~Z0aWxo}a+(u6J1!0Uzw`f>6PZaDLTQ^gtuIQX7H4o!$p}jd zILJ1iW64avA_IH85nQha%HTjIc%GvK-?6Ck;hMb_l#&jzj+H?H_gcM5ds%Aro-Dyk znD4K~_AI!aBbBdh2=&7G6x1D`oKG!@AKm=+L?o4NnpPG>H;tIQ_H0WC0^Y&;^kGY}-HcCIJ9`fqhpk}+}0ajj&MYehROm$chF5YlYBx z=&f0#zSQB)6s!h~qg-bcjpJoB`(eqi!e>v!w0jdca{5LdhjU8cINH~>7?QDLtA-xN z0l-)4B-zs+8&l&j%BePsP+w=7$5mb~pA*i!TtsU)=dm<$-h#){3=#7jBgG>7?s)R0 zdaV79C%HJzPAdpHbdtW(@`IP)ik<`S)9dI*jI`D?3H|;W#)}pCub#!wW%(~n*OYe` z`{kg@0Mm%M#bBkI9awU05d|1|R`ums8i!Z2*>d40g$O#=@7+kl+uN1V+o0DbtXz|@ zR*wD*o+t*7{P;Y4(j*o{i3H>L9*r~{3^*b6xj73X%7tKF;1fw};;Z=t-W$(-7{xq( zB9bDWA!pi>zPxJOt)WAxQ!Q;-25IJoF9?}<%9Hn6Esc!ZF&t<7g#0mDs1(0IFt+tq z8wHcw?jE}qm%c%RdX^y16~zr?p_UsG)L)qAk_460nmHput(UM}qh~WP!xtA(GIpaIg8XKUv!@gh+Io9BZnUSI3K2K@ zHJyi3DykaVV+=FoJ(EXf-?*6XmSRV>e6b34VpJI+0`QFrb6gTOj%aQ}~SYMdqQHI8!l_>spvyEQLw$?`vt-o1~l^Osav(lBY{}NNM)q%V4}8gU={t z*hg-vCEPlzFKX*FR6Vwt=e0qFqP%z1OfB7g8@5|%{4eV%E-mVi?LkvpZUi;;F-rHy zk91AbqQn^|=b|I^Ba-8IJFZx*$`&NVvVOT#^>e3}V@5zp&k$h~W*mw1h6PK)XOWVnB92UE?E^>!=F`b1Le$Khb#9EaDTAI}XKhDG_{BzDsV(Dg#O<5*jEgg$SZUod(3~Jf|AGd8TFtL$qZ&fI*a&HMY!bx%tnerKBmh!z{<#o z23@eiBM4?zdnUp9MWY$3sr4hH;}P`y^T3c&3{uFw!NN8)7J0|^%})y?tk?% z_cZs?Ic%Z{yrU$ziAuun+pExVzn3F;O(hme#H$9|hLI(5(l6LComgBW9JG5nF=D{W zH-OR_*5a*)o9iTV0}%oz;-<&4v(llu4vXydgcos#@+);^q}p({1!o6L=$J%sA%+`x8;pS7$M zT7R?|iO_@(&g|?5{No-e;x|H_V8}U1mqPz{p#M$7-I&J=!6GNXpe2`EI#Q8-hTmu& zSSqc{-5G~9nqF$!@q_~9d_o~F>oK?YGRxvAxFmKxHnwam$KFgM-SSqFe7XeVVY)~O z18!qA%p>ngSfZA5R@D;k7EHGCH#0JTq+WN67+UEY*SiY`v~v0M^YZ&5C}OrlvDXb!M0 z&<3JE-W-N#kJMGDRB3t({KXE(<(WpfBpNARWzPgrK6_lw{X`-+B(V&`ihfJvmbMR8$^C667%X_4f&(`Ly zA~Zo#Cb$%aWzBFOn7|$*%Wa%E36{|?(B_w9`Qk4eP zF`ekcn$@!vH@EJp=ahH-%wx_L^wH2(3Hr_{<{jEuzfawuv|Q#7*+OK7=9uQOeCK^>9@pgK4lGRP-$F~_3@hm2w>wEK^wts@9GgL@O7J84oNDRn4}RQ4 z4&**ve>_lwNlRQ(A}Z5HrmIAX!t_EhnyyKI={_DEnfsQ3 zyyub?)50$>Gwn=@}q+k+ug13;T84i>$CRf3m%%A<>Ge@RM z!P9U966C$~m?MzjM%Jfln6{-j=G?jWV0ur=9Lw9W&j)2$D^gL3+JR#^A*orI8d-k|cFa zDe9@Kza$M&9A-ku4M0Q`;GeHP!@d8~qJLZ5}q@Pp)O%lD34TJ(2vc zKV_{30(zy%i-6wg)tFaqtBHCw#u9H>lCqFP8eMSPZf5#!h3@U53aB*SaxMTrJ~?xm zcCJi@R;ua2Rd#z?UI%JpnLUPcpPO79TtKY&*WtgQD=UGYK7NZ$g6qIV7g><6udA7n zudi>P8L5eZ9351|K!kG$&puejp{@wC5z-{Zo3IPHP6w4Jxiq~F zTzc;3KQ5_Wu-Z)a9Q8++3RMtwb$~|i8o4aE;?Zz;;YTs&*rl-}bfq!FaxOx8UWb9?JLK4k z;lbgsjS)!H6Ysh{SZ{)@l?sVQFcC>cXC*>c80y{w!B%DDW#1B8IOEUGuFn}KjHBQluwHvnqUY}P zxM8UF;7L&XIPt=)%JlF^1YK>WkD~qJKzm-7pg4#@5J8hBILl*y5$tgC|C2Pij~>QZ z=(nIWYFNDiUgmZTFTIJ&V8BzD1Zk$k?DQL`idgT$jHi;mZe)j6ZU|2vat zpr=dvE^^35v888NIm6b8p00}kPL^4?13w;gitn4;YYJk_gWh8cMRq|TGK11c6tCwZ zm0yiHlYDIOr%cI4d~6;w8QB;-PPoD{_SX}#)w*RK>lrz-8QPE_uN$jb6(;CBeDG(l zunOeMInF<~uJaT(xVLk@FpsY2mbpCc)TV^zWxJ^9LwxWUHeL901=p5!lc}7>pPNt( zBl(P*`Wp(XHPqNmaxkE95?dyLX!{AUQKO*>GF1j~-ZPw~E;uc-dW**tlR=FVal^fp zy<#vtY#vlbQ{_ORXl8%Lf|w;zXIHT}Z8@AY*hr_gnbJw#J&887O62bzXd7qh=kbNV zxP~q5g&SZ$kpe-T4L4eqJ*T>41dAgZ5PpD{iXUCk0aa3etfS5A500sHAPy`U08S@q zR7pbv)=i@lj_jZR5Pkxq2G;gjnglC6Y?Mo5(R>c>HP0T3@9|n}&e7&Eht3|=UN>iH zQyx?3XS2s&mQfI{WePGFsv482RsHl3j^ zLVJ(jG|G{M+0Gr&UGPD^ldLCaXjF1pdQa0S&zbOq zcu${BzO4KI4%99rsm^NQqfJrJ)g~{D)V?4WYjb05f*#6^$WnCXZf;a{I4+$8IEzpu z!?bTcvAggdI)MQT{x!s?!Jiox9tWuV%24PvtO@y;QkJ5nGpGOB|Bc%mDaN3SSnkDv&yBjbnK1FgZgd*^Zem*Ilsss{=Q7D z{7(5$MY*86vO4IW=R60Q64uf}}%RSGpk%q=}W+Lak#x8n$_M7GpaKc2vN||00*% zMzNhyl{#?a*iNlpyuqb@8NL+%;6P)|rk;c^un{r;$&X@$=?Uir6N)hUqK5=Gg;r$) zm4Q>o{OO3jHwtY`=)FmEbpZxrJLEmxP&M#AWH9>N0qEIo{AQ@?OhJADV`jEEzQw zU}V;CdUk41LqL}TQJLKki77)6Xn#-~v@Z}-ze;bj#c2{9l^Le*(XV{O`&r>Rf}xCb zcZ)O^V9|^%>5N=hdEKgGo$beb zEr7f0NrW?0MLjlL`!vjnPn}Ochj{JLHu2grWbV%%L)3u9e11hl-8}q8@(0@ps_zZ8 zm3)>2;mghIo5|m}tz_KSi_2_*dX}#hw4}W{vMTZbK7R+^-r7Oxv5vvy*uKudPH8;uS14{%u3sVEE!6^Z9e#YHoFtAAfFK=$pc`H}sqQ^=HbVqU? z+pKr?)`Zr*eX0a0l4Eyuun{pOo^Y|r0}Am?84iaM&WO}323Jkap7Kt9e$I}@A3j1>d7*D z2+tCXBbN!`*RG@6DbA`{Y7v{=v;IM;bZP#=6-g*!*K$!W!{-?|Qa#0z2TFzu-qdgY zD@Hb5n``BF;W3BCHIHpvr)gmWGF5Z!c0Aw_-<3(ApE?l8DyHu6*zO8K--Uf zn97g47UQx1avdgMf!)O+6(5+&k8WxNaF=Om5O5ZmJ(cMcEHao!zs=$eEHZrvEI37s z$GVZkM0vg}(GAI_jgo3LV zt!{Nq!3t8xtAKU9Lf)$|YT@55PFc70>MfaUT?w|Ybz5m4_8=H3Om0UpUprC1qO)^8_NphX70Zr-fKA*SR1g6In<^*2toSV zj$t+^dN`^KSDqzNale%2l)J?68Wl%JvC3~rfb0CPHy7s=!&{wD zi&$0^91tH0GB>wEXMFMVwAuceMI1)^6g>%Ef!QZ>>yXzIq;L@UzbrFNL+#~6J;KG_mYj6 z+1WD^JJYN2+}w?j=oCL&{5k!J?rV`ftq4v#)cR?@GM}@Lgh>$iC0IF!XgMo}z|r?s z8`$WmN{s|7In5k_$_)3)5vU${E>?I}bRY7tQO|Fh2cd4su=noqZCq>;Q zAx^O(iPjpPi^ta-hAeCWOdeJQFby=@>flS@UU&D_RnY&2XM;ttw&1y}THarl za?4oJIyOoN&dqwV2#eenLV8YJ_q|O$x*u|E6j2ZCoHhmdT$xnTd5aQ z48EC^lq<4V8wT!V(Kp|LlSRL}>c*B2dGxc;GD)z3@kCO4oyyu-ZgKVYiA`zpTL0S% z*Vna4U%$Oj#sPJA2~{Sr`OJj3whC8ytYt~?dXNb0;UyI(_@wNj4jC-h3hz$Zq|Le>Q@>EUBmfpV#zJ96vvu~ZE<~i zi8*&waKqDqNT&+x2d4kvgw|!htA!yjJl(}K4vrPhG=XuV6x6S|mbU?Rzs-5#a)t?) zY|e6ZE}v+{>u1Jp(5%pJ?JV^JdYC?EbQFs)UYWd-BmnPn7Bo1$hLc*9E=W1nT*`e zOGFZI1NXhWPnte!B&{{t#W2fI2IENei|5{m}| zJ`xOx8k{g;P$aK8T4vMRBw52Ib^Q649N@lOWc8O+d_M>=`r4N}u zKNU8J_JFLoM*q;O$JWub^{z1*@@@|HMkD^i`zlmE^Wzgg9tYX&wNg8SCpkRaJN^s1 zMXo-wFv2IK*x@jX9DWe-a!m@?O3N)0?R1p=D4AS%gN>`2>nd8bwDa`xVsZg14($3= z&g?J_xhB-&TD)s?JlgwwObd=eCkH7(dSsXUM8d)taU-O7ko-41^*RNMJLDd49oDm! zlMSJJO@U_SBbeTvUR8om+ThM*a1&wYCutDg`MKH=Z?Dfkss2 zJevINWQuVXQPu{Z*#Cl=F!9*;f3nHxEjLtNI9oamQ>sAoV-|^^rUdsMnwL?niq@8;|j*&MTsqNfq%Wlyl zrJ61o{(1CsSfs!*LK|0WWJdSw=KA*ACyYbOgDg}DgSr0-qt8z0+@&cNzBipihh=%1 zilVQEC11&Mj(x!N81IFXpq_z3Hx`r2o9Wr<*Vv!^z|(kz8K2bq69T8v4s2C#ob^DVa}K2O11u_)>`3ZJ1%qBC9IqEG z64Ccb6YK@9C_+zWAS^*CrD$fQ7H_|L3?HR+to@_hQ+BRUvT;HHczmoFr=J5J?8sGp zWi42tHBo4lQCX_Hd?VV>I~HBv*&~&w5%}IZ(f?3q~!{<$#Z}t8G6C-m@|Qz3W=T;vuChp#6gUS zbHe8YMg=Geat9mv3#{FOF$~YHFFChhD5i~U7E4ikZ#wBSFmC%-0`FU{4OpBMSI=$p z41*)i&~|kS^{$y22by=16Pui!qQ^R&`H8L*AiOAE9o9sjx)%j#sK6~HT*74sT?K}c zFE3xP!(X0w=}Tp|J?0{u-WJ+?ToGd_v$*_Bp-Rf*?d$E0r_D~5d6CYC8!AK{vGi>t zhgThrJu+6I%e44PvKYq3l~Gj1VvH!YKe`>VX1Sve`tV{_35O6sc5D5NP)oITCnR51ArC6&3EljhwBU$&rNVf5y zQ5O0^n#Ak;xt=@N3R(AdHs>Q5q?|>BTHpkM;|1)|I!m)GUi$S%ix>o%|x0b3{VNTkbKt7OSqPbwa}`T^23& zm{tq4l?m%W-&NX%IK;ap<(vX~h6|*3iDs)+8)&M16G@ny&S_V;Q7WMEm~+jSJ(N^e z_VkG^8UoK1-G;9OR%u2Q=LE7Lr@19`k5L8x)g;<~k3#oi=T~^jSwi~q^Ld*54)XGi zb*;;a^2YtPm<|UjE8@@3l3BbA;I{AYqZeiWFYnR&e1WL|Mp?v9p-94Hd_h91=%CJ; zv%n6^r1>xke)Ws4Z|^qYDY8=D%xtceog(6dEozAjj*l&K(wL`)RfafNcR=fdIs~!{ zgMM#-g*bcIITdob^IwrJnB-V*ueFNnBbJvy1E*jW=SkgGo*jz)yD$!e+v~G>HdaGi z#A8lE;8T7pjj^7UF{cAdQ{_6mhbvuBu}e644meOkv*nDD`ICv`OmZ$T4%o=q2Vr;o zh3&Hc^Z&sEyZ>0dv3jg;=A)S#q_ol&DeCDW{IR0wyu9*xj?znF<-;g@i}X^GPp9WY zD=32vjpi2Ted)yuY2G0!A-4kaAFqJ<&(qltFvL!kng?8T@x0pH8QAaj1j&`w6(k_& zr&gvF6cN?9L9q-ObsYwwwC-ukG7L@r8^tD!(?Z#M%$W=@_Mt-UejC(n(me3Z zbFAhyx!a0I1m9CLnF9}DfnFTSC>AV}h&hS?G%6q*9-_HgH{7;B=eD~t=`NOeA(w8_ zU_-W7)#9Lr2K=A?Q%yerv>f{601vebh&cTU0p~gx#o+_EE=J|Lk{VTn8W4uqOxB?9 z8$Q%f3EaeD`dxHzVKtdA-pD7}3h%)Fp&jD;8YQc5PKMPxrQWNl(fUO8>ipP4%R)BS zP=Q67i85uB=Ei4-QJ0L1lIoaP;r~E+oY;rsAC~#jM_mAf%YU?hNIb>_?!6lz5)VL` z6o%b_^D4Eg3`?oxYKdjhhY$HjSSzEsfFp0fm^2|0AV`%6JT<7KX+lpWXP>Oqj~0s^ z96VTUU)JD5Q~Yc)LGPeQXKBpDi3F!XkeOL+FSYpc+Om5vMBRuYmp5Ez(tAULa+$KB zh@-ji7plTVJB4FCeNi`HA?XJ~9_{8?eB6Mq$ERnvQ!z(No_P)9pgY3l<~h__nk!{X z{qy&ux#CJ3GW$j)8@OZSe8*Uqi>5KBfLy&H5@lPcJ8qjoNFM7e)-f6><(w2VD6rPT z^eyi@J&BfwjWAzMt%PG~0|{Q(+av}btHfbe2150O1ygRRj7oST0jOI1y)ILxop=J{ zbq1@Gg2PUT_K%t<6q<_~G#=j}aPZZ=`c}?c)|PR3%poRKBq3of+G(LwU-K;X_3C_i z^PS31WpygT0WO3ZIB(XJ!$!OK4>?V5#E4z4Px07G;j-oJ) zNRg^w(;Xj{3Ew?7vwUg@kZ3HwZP-CO>&8|r;4&N8qEc;OY`Bx>;M>b$96Vj7@FKLl zlE<7$TwGQtg|u+K2fB8UhE*AtiA#WvSvZkvsU+g=jIOPMf#6NZB?fs9-s~ben6)^z2lxtuX&-vG}F5 zSe$m_27~$UgqypLHcrtpCSk!<7ciH)RH(5(`4|$6p29zUVt*1$>iBP1@ctBpaR{cv zpD4Au%=!Kywdb1P`5MJaDyZEoVz*KWKqLCGLCK~0S9-VeV~3WtN|y$6ASiKw0tbZJ zh!XZAiqpGSET8)@__PZ~0RK9JM?_xhM_s>0Kf~ZhU^~IEiR}eFM{x|_nuUwv^H!M^ zHjwuYDK_0BOmk%FFMrKaKbQ&L3_}pt+Ys$+8BtaK?iRSjXIadn*QV~ZY}+9F(=aOU zUp?B;w*EAMoA-sm;LO1qUne3GB14^vns5npU2>WuccKuZAQJDf(DE8??S9PUn~n=`AVC!$5tpon{CO} z&yY~4UAE*tym>a8KIzItncGOu?@-+*La$19ONL&r{UBNjB}t^;(V^FqG~>+00U84Q zJjPEH33zZ)k{|0mcJeCwWEj>wsCJRpO4>H*lFj`13X#+&48DGB8A zc2YX#g3AWAh6bkC5MhjPwoGoW<=dltE7qgt_AJ8H4NgNUaoloR@Qc8Dw8Aj5MTw!| zK#5bL{ey}4YoVfQAeE^emoN#uk@iRI11rU*Xh?(_IJMQzh@Rfa&6ic4;6;!xqINaOqESA+urlw`CX!T0F zxLWV(AeRaVOvlsnix9gQ<#V*B8hUxcR+^4ClxB7TGLDXiqV_@65{JQZLJ4INq=F%L zn?waig}^QI8~{bB%+UxY6Q0&i=1RaM9*bh}moH!DQ4qx8v!8{ZBf%*&{0z7A1^&l# z++X@e^cA9!Li&|TP>NlsrxKJf+7Y7GH_%->`jY%tKMU}QO=7>mJGA_%^1-(e+32u9 z92_6voBFKzx4+*^mXTT=rc|xPpoM)%3GtLg0hr^^z~d?C$fMr~UPujQwqcF%n&o{0 zuFwwLSJ1G71#B>RsX|z_y_W#caQCPJ02d`kcQ}^`{RSUAMjxHfN7wDF-FeuW#BgiE zUq_k(T~|NpTH9|)&_e5UBXa`x_&f*92_*u8N?1TQXkDGKUJbjY3`dG-xU#&-m^so!7AHhmqLAeT{p=d`JRj$qzke~IrXwMErMhx z@L%&CAH5g%m15$-du;2X17<$ls(O`i!EY~%NTT+=wNRqgYk|jHgG#g0;QNlve-QMvBkQDbAh zX~97qOit1Y)2{tMqm}-91CG<;F-?&F73Mu2_-Kr>!Z@r2oVe>$I<^v#fi-XA(orcH zw_jPdh{zdTxVUoj{MK6(mMOe%EsWl9qcl-6@ZOCtjWf~JMEB_CUfc-sa zqFu^Q?(Lh&5~7KlQsJZG0LLbVJT2G?24zuQKHmze3fo7Y@;9|Z1Grz z8Mwgaj%~wRhzy^VlW_$3bO{4)i^y z^KgQO_MiE`mEjGqO`6^=yoP2(1&zH1;q6$y!;{C0NdvDfC( zyqj%um=-j`$b-5f$`~_|z+1r1;*EWJ+n|7?ZR&xjm6Q({hT}L- zo3Sbq8>Bl{#imJ8vXb{sqQxUP;AQJsw!d4DqRyv{bUy8N5vP9e)lY)wC@7R?_mxzw zsfm4v685*RU$ERZ<=ld7N#A1mA`DULA#Jk9)hUE_%&ygVH+AdWaal!ZCPOy4qO{_9 zY!?<7jWdsJB!g+o>t-0z({JZdvV0M>!Ka)gNZTNyfr--ygTH10uv7-Np$@So*xstr zEXi+4YhG18Thp*~%rz|0o{9=!)0f8q*FjZ53m#D6Ux!aZEz*?4`zIJh$Ba&38J&S7 z?gTWiBELqJ26bUrT>5+TaJ|Ej7XXmOt&-f9VWkn z6*3%O8)g}zEs{W%3C+nc3Ue%c@mP%DTb??>egDJVD-$GI9}O^WLlZzKZ1f-cRn_sm zAh{-&YGsin-}KEY%L?8HgF~$agPKj&*|w6t(u0(SfJ_?aA=M}prji1@qcK|N zRB8Qff~(NAqQG{ayq-o_sfS>b)p1!h;)qnY*rH14*obV5zJ}nK$WWXP6T90}X(D;- zEu$KP-Qz>Ek78(kUFNU9`B5Q=09qqK)`tOFqWU-Wya|07Za_zB%;H}1XCl?U$|hO* z1gl9{M|Mg8c6S5WV5aUmkj)!MPJ*yC@9AJPsLZxzettspT* z)?i@!4{mj(Y>TUo^4769*kE^m1u^-XpCu?wN{C5<_`pk5jXyDEV2Pd6R*k$h5V|f@ z64&a(Y>JvC`FIAa3l@=DtDkCJvxK82l*4qV$GY=v%EUvE_?o) z816JG6=jJFEF%vg90;GAFmU3Ac)8s1TxFnhZQ zWi@E!*j&CZ!GL%GaRW>D^5L!$wv{K6QC^38w$!YP6B9!@I25VFl zkh(UPb@*uX(PNEw$R-?JUVppJeD2_R!rw!C6$ax9e&Z~-%{8NhXoEp{yBY^BQgQ_ zjue_z)ve0^ZB>#L0)S0QQ0p?AH1AH3))YoQDUK#(Awt!x0BBao+B*~_&;Iw3C zf$P=ncTuI52I*7$1bwT{Ka;)HMKb2F@?Buc#$^S#a&Y|dcx=~zpG}?)m`}Q+9?J#K z-_g;*n*ojcr#6lc_sl3V2FWq<23}d=I_Rd2P>;Dm5w9adR+NotKzPmE%sADpZJdC+ zHMvP0$#E0Vv;(1Hc#Cq3E_Bh>8mv@b8eqPj(9XD}j0Z;jhlgqt!#72b4(vW;22bnD z8eDOxi<*TMYNtL`3q+A>>XR6@3(|m;9a_$u4u1Fsipd&7LPkEsbaP?89)33J?54{g zz0wsi9MF|>lK=aZ7e@9a_{W}+%~XoEwK9@&E-qLqZ8uui6t`o^)E476{I4qHd-Q|! z`NDr$7p0_IYO_nOv%}bM&tqoLvRhJFAw<&Ia&G!GzfV%RvVyNg`gqT>$dD!^pM=?O zVfe%8rZ{R;#NpZ~+m-S;Hi5!v^Z?zI6bfLJ$p#lB3gjC&O9u3lz*Fi$6eT4dmGQE8 zyjm6yDMdhKLBQCOqF7;6Ate{_B?@c{uUs&#lvX@=y@opAHkK2UNU$tok|X;_nd(>Y z2Z9?YER0NT4R{G<={72Il!$TF|4(F?OMG%`G5J4bVKL z$=(*CM1+ybFvMSlSGPhI$%%0^-96Ti!;*`^Zo$;3W#KamX#Xz^?Q6S&_TN-q!$e&zrw4sPU5&0aL+h3DI)}Xs zvNpw8fBg^JQ_p!An#Bs3DrnjckA^+H^@cuU51KZE^GtS7ah_de4_f|C(3?rAIy}&U zsnd8T#B=7cZ8M!A!qT7(M3j`%NKuWAvAx>T!RI4sWXz)MYm@{hSx6g-ALmX@NXsp< zz)T&x!xP&SM-5l<-MqQhTjIPuZ?=RD?EUWYhKEjnxz*e+U^{z^H#6S;E+_yrbiE}7 zfB`ByGBIU0(d=WFe>U>J|MH*w({yH>(!Hq~6KOujN7F<^eeTG(L zm^h~DBW})RW{-A{%($>YGlqx<%zZ?nx$2@HIPdKA zJNbro#gB2o>3&v&8?a?6I%MDL!aFjH^GGVE)h?XiHE<)bCHfO-vJL=<#Ci&N86bCE1F;@G#tvZF} z?%>#{Ii08fj^fxy;ebhsNQ#p*Fmpy$y;c7hrYYuAYcE{zdxAlPX~vv=P17HJi3}A7 zCMPl^RK!kXWsLeW)L8GyF}Q5FH^4yGo&l%H{X72^IiOR7X_-O&8&poFDsvv46KA5_ z3`caeE&%N&DPU%+Qa($@EMwYJ2MU+gQgYz(X&=u3r7R|vcjCII1rV|W^{-`Al zv5ofV1O4O+oU##%sKA#}uI8zJJz$MHRN^ED%1NG2(RQOEAXot4IinVbn8X#FGcNc# zYEmLDC_1w63Hdjypw^Sg$WcQJjkoKux1fa{3=P4B@5C5lrdBzJ2qh*ngyoYmiU8Gi z6P~<6=Dcm0-$NO}H*z%BVGb@HjHhK5a%B%3u8ZfuY&%m?y7p-s)Ew)IC)yeiM}yl3 z<}!!1N%!}r+_BKR_Sk%Z>pWOU1=S|q-)|*i*_@Q|SiQ*=$jP5^wA!_|;v8^?ol%HX z(S+HBiF6Y!(C*qd)EsxernzH|yJqQ8r|?`|sHdc9PG1vF60XH*JeDCmKX`@xDVnh* z9nKlp6qM}xH}O<8mZlka>`kO;jM&k>Rgj2YJ=hGKC{~!e9&<(VT!I_6P2z_`4yvth*W=kM=&}dr@ZWGDyvtg#Z4Ke`L>(2n9lxsBD=uK-83| z6RHKPJl3wnZF+lp)s1vRQ7OrU^c;+}oyagCI3pJB&S0sggk{(1cK;6RlxTA0i*N=m z?(C;z$*4hVOMgs#;lfwW*x1;e=l%i%hdDCl1fMA12tzXQI=5>)&iQ{g>Ls?7orvWL!4EIC|*w19boi<3XcjUeaPuPZhaBZ1CtZ&wS zy3&C*tepEsnyJcBz!dBUFf1QO{!L_wbIbqef)^EUK-#3zz+0$%5 z;eD9FY&sW|K5Dy%1DN_i+MnkvDvB_nIkg}Ng(3jr8cRGEx@k%Bm@Cezsxoq(-#9+% zvL;bLZ1`C_A0i}nn}@#^^Y{Wh4Y=%!Xr)>I2<>xf|HvBL`x@is0v3MP!7fE{%Q*?a zZMfjV<|c3E`;>bjf$Iyw9%W1-tNrcaBvfh>`Kwyt_JOxacyt)!qrKq}%}X`PiI?g* zI8SFkz{)sPlq0_zHAfA`Uz)>+C_qA^tLu~Z&4*f>wYN0wM8tOD>v-iYXCx3u*krx7 zLX&bvCvigkNt#fcJdjj7iHa#pY4zuxWMxjF^r$1&(qN_AsASy;<&=heQx%w`asQBy- zc5OVCQ*m}<fMx##Wc^fjC zEwiSYz38C2lkSBL1`4OXcn)}O!AxXfIG!!Q@)l`2o?al0_(sFB6np10&f8=wxldv4 zX{lONf8iXy##&EOFoDxpsVy>Z1JtUW+Tvygpd_DA+$v0F zHXVj;uI0b*+&SV1aYI*P3*4D|dnJPnmT`%0G3le7dFU$^W-C1Uk?Gn}ZQ&%%qnE4u z`#dc4_A|Mfp8XRUF`>1(X7Ekf*JfE0_lgz#|7OrG)C=KsC6!Rswz%9Rte(6*bDW z0p>B+g11_`*D^PN(zdB(_LwU^v4J7H!AX<3yn=jRzk~?2&7zsoFW@UQc(|Fx_t;o> z1D=H|n72`f)tev;T%aS1_T7?rKSOBf17;O4YV3>>>JSTpHhMiRCT#;M)>6!1w=8He zd-OBdS4}H7&a^2c?UweWXnm{Z-GWubCGVE^ z296Yll8rS1(d;=2kW#^e@lMdg=t8jR*lr?3dWpV>oIj37Ql3$@+uoVV$e{DD_L$m< z-6Ceik;@HQq6!9A>PMuiwdts z1!&?+SdsSU)^ua&s=&2>^^3|?tPvOl>?iqE1h68;3k8KNFQ)R07P1x%d*!jtlzt?y zHsu@`LR7OXzGfeN8{fBYhHlSP^9o_Z2>k37`6I7YZ7po0$c#~NqHa-IAx~v+9;foI zb7_=yAzI-bv@Q2rhm-VJ-;5lQbs6O58d=OIOU7Egi~gCN446Xpk%awBk4S9sGtgj3~qLbika{>tklK&Tt;;>O~rPPvQ0 zAUm+)_FN3mG0E$S!CbYkxwbV&3cZSm`RUadYhCAB{sCIp9DzUB$XCaALc*Y5XiE|X zO{8{I6^1xZpDCfNh}2}mPfVm%6AU}DB*=EN<&#q^NM*E{JTkYLiot;*V4OXMa}hC6 z$ZHg2Srksg1{k+}gK`$m309&66Kjv+NV$RK6``!Wo#`U-4i0>&3~@V*V4Cvfz@u+iB!* ztZJj%$+yJ6i*-=yYnsNe0{e|9Zkw5oZ3UP$OSxS+>ITtPm00exiY56sjMA0Wa%AJX zk|?PmYEzu*2R2XJ_<->S23LSVAz0du4)=c|PW2;&Ly9uKm0!wiq%?ZNjRwxA8C!If1nw9 zs&vVn#(S;lqU&^Wjx%ICB3=>^P_hLB%$|G^voVShaaT(D?p%NSWK4B8>e#oxd#tbU zw&g*3ZH2exr4Npe1{7FQm5$$UCVyY1g@5t&wa|48)WklZ+05qQgMWtyKMIL52Hz>G z#W%~7o6+7iVWY`13wPj0f}ZLt3Ng%45L|vm0$@^OZ=dpN#xR$9V1YU*H|qO%y&pUS zRwCE+ai$f~YzaH4uL}L}Oe+3cGwn8LYd2XeSK}frR9$Lm)g&2@rd7M~vxm@3LZjDG zW>qWbBpy>}NqbpQ{k8=@HBE4dc*HOXrpx>L$QH5UOG~7KH z=`Q$reBm$9>Nvf;3FnJgY=S=tx&#SUdOAYZQO^8(G`;WqK&W%W zkHJ(a@(M}rO4={6ruCwWZQ_m-@t5r3-7<%>^IWG9)=Y@4K#*Eef7R|9N2*6rj6_pq z>8Vvo#=hGJFSgl&6v|*%&LfM4N>|C&ZXZ2Frc7c)my53*M;z!f)Jc{W=`4-c1QKC{ z^e<*yWtwXdNKEoGDenpEKzS9eIQs6P21q5P&Em5l4oe^jv>(H4$75VGd6p&#ymt~~ zfs?5u0vEjx9Cu);Be&r6>Vo%imP1i$G*s4qo5hvkyUrSk)kQ76nS$$ddPjDM-x1Uje&iO%9jzbI*S&5%!)_9z}JV= zQ#nSQe7@X#SmsMuTVKh(`uLe+7FMc+y$$k`EpZ!yN2C?{0MDu53^AMEUcRAB1~yRh z$_6a2=u=H`=Rz!!I3qWrUs5ALk3|l>!qfBDr0`$#QqAL@_!Z>1M3Ayt?TG~RwF_8z z4dtp6RxlZSxPwg)_r>#Q9;uyCu-M=ThC?(`OKe|U>^c`q68ypI$kK15Dohy?RK;Oo z05ah5tVQXy#OQ$`w?TifMZ^%}qtR0se0?>^a8%>(&M0LjJnf=2TwbtL(1&Yfu=@Pj zqBC1!C;MsTv6!yCuF1ed=^f&*xAl;2>_5lpHCn^G=8B<#pimb*lM;HxT;FG5_n zUv9z-tS3Jp_7OM1|h+prBb)Tn+{(9l~0*{K7y>ye=p#cBWyP0<2(2MmSB z=CL;hh7Na+k5@8Y-(F_%GKYQb8V1;lE_f&>y+U~&%kV^&5ijoMH3!q7uJT>g2oF`2 zSp4(GhIqiJEd(my6Ra>e@&>zmqyG4gCcxf~Dt!E}oO=(@pS&_iWG;gq>yI`wLPQ

%mBh-$&m`ZK zzPtk~Tj7`M?J&JEdef!Q3EL+9aJD8fvAPml`?!@n5R5S3V1@(1H#9tP2J z{<&!xme?~r)}{<=qwDLjF2LnuZB0lmV?PEv0Gy4vhR|p^{?U*xQa-MC+@NK8+gfvQqmz& zK3U$ys9UN+H+EYk+i5SddXH?WyQpWDL#|6mQ+AO*T!%P;Z@`4A06DqPb&@0hEg8x* zcbDeTJ3$$f>)U@^rnFMgqVB9BX6UiQmJxt%n41 zBz;c)>SxFVZKK&%j=mACT))g|O+kdhrkCC2Bu#e0mk4b;=#{947&-Bm8&DecP_4JA z^W=IG2mWG8*_p6p1$nI64(ezHZ6rKwt#nH79cuye9KmGys<5na&^4^rymLdYU>jNd zJ-tWskpe@83ogA$p@ajcb96iuKzv1n69&r(W#uO_*K5$*!D^_Q8E@|sw!zinu_zXQ z`SN8R1wkA>`&syzv+#z+@H5=Y7x*7X34G}ZE7zq)l?X>=T{y*zmAP)KgL)Js#|BKc z<#{@bh|mXfZP1~=UY?#Y!_wbWUj;Tu8-k1B;h-mZ%I=Yzd-HIC1o;nUmR2;Nf5JsL z*_}W8u}Eo_G_2oP4PblB;kbleD9#kVu9|5KnZ&>Zd|etNSIco@l0Blj=#sc-hop2u zwpYcq&Eu0go7RkMyK!iFC5N~UUeG^>SgX{bmn&l7rrNFVQ5Xlgts7IJ%-7N8A_QP7 z1pHuEdji*K98PEHB0RlRi3|LQ>mV+VS=uadWK>O6)=atD8ceOqff?k0v44|=w(aP- z)N4?c-fKd>az#g&#{ovSb#d2n4tJQM*t2g`HB#o7KkI0Y5m_Yk&aD$$g=cAH@R&=b z%U_V?eS^td>K!{1CJj5y{QIJD$k)1CQXJb;cA8KYL()Q8M6y=_o)>c9$dKbsqoOJ^ zzApNEu|c0qPSSRKe{vno(zFN?l=?_^3WE^dl|Q>izi3PJBXB>4rV$oxY1$D*9_KMh zA4gQ#Y!h=_lTwJa+;IJw;g0zqbN>s%yXAvO*a=*^d&Za^TV2Oe7L{csXT^_iqQaKm zHI+o2)Uwc&>{-i#zerLUNU8J^nGY+6}--Aiu_4=Q88H?-EsR|lSp%S~k!9kCT0!JbmeMjXN4 zB9ito<;!0_6PsLcDHaZD}R=G;b)BfM}c0p|L@xKHXZ*-|NJlSf3dIMaJHRXP5uk! z-LE(7>-p8m-=O^l%eF|v!=JMVx$nb}<>`Ha*_HpzUehJ)2nhp24fA1qo<{{rS$s(| zHcw$Ry<)=>ON!xoXcH76miYY9z&WPE*dc5-_@zGjo#>&eyh+h^EH!c9-=o}M)+ zYZl-%w5cy00u;uP$D>>a&3{Q&@WxSN zmkYV39NB4hlxKeE2y_?z0iz80_HaDQrw13Sd43=MbeSeQzf8{A30&b%A4%f{e$tpN z`0M26%z6?TTtY;5WGb>fxB!_XX$3jkm zb`|&fnFc?9zjxtl`6h54+CvsA3gL~SdNGVPQOODoVzH3Zg5D60<`h~p{t*fDXzZfX z{x9(``2;e*%7+~_SuadKEE;ieB{e`lv$K2JY{ZU2BNAWyC*BH=DSRnHoLu(6h6e;E z;6szUgXjnUGOp=(0bH$cdZ+Lx9;AXxkiu|OzYL%7jAz)6;2=&Zc+?05Fa4q%8+4~4 zFDdR4UqV17WTh{`)lm3}ux?yl25(y3GCt;5ydKqaxVJdO+IYPZ!GjdPZa+J|dcyym zz^c!ey$>fJ*>HDv0LyQ}E+U)=6r#(;z`M=)i=cIblvF-feSc(+SKJqI?BFTLq)7Y_ z0XBcTJYBUe@GqMZ`8SOn{J~8i8~9F;{DsYu|HE0s>p?>&57BRaR7|2^6*HudISm$9 z_$~Edp*46XOm9{ooo;i@9rwO&xD!C@ecil*ebjY8o$xL@ag~JQlk?LkTfKdC=w048 zhsd5zfB9|o&h@c(eCywm0E0YrcP|dV%MP6Qu!f#T_aWxaDppDJ@Vo2)%uLKzpC$Kc z<+POt-`mZfk`+MUrNl2i`qHuYcI#qj2e=vidUJ7p2KL8G)!?_!cyHa{st;p%#Ogrl z*t@%Rc&=xs-wA_h-?wq#J>K;7>7{~}uiUtK@V(tUv0&HU{mRkzcKh8cI^M4II{PH*7KvxNO8vZ&J)&Q94! zb}{}hXqTD{cxxXW-9CT56^1UgH^2QP=id;wx)M~N9%+|de>7a-a-%Lg_|FxePB&?f zsN5#}4Bc#5@6I0Kb;v`s--4(}vu_x5%gaM~-HB};GJgm>>a9o1NBgN8RzCH;WIj9V z_%CsK=f@K%u~QKyD4)@FI+QwCwxp?-m(lEptM0Asnfp$atZV9yX}PKf)1;!t6+??! zP5b3LWH~ z+<6$^e_nwJ;ZLc+DWZrPlhI4A7n*@KO+V*d0MDl@0jzX*^o7dR-7V@~B=<9EvYsX` zZqlmMcX5-4*3&dg<9glZ$nk9+S+|+HxJTEwsOq4@=j-5ozU+alW9EzARUA0FQ`b{R zF$2NFoFYY3lscK?&Y$R>Vl6;D6Cp#16zE(l%a8M(^=x#tr#<6(J#JRl!wTaf_@2Hq zAXLqrAC~WYiaXBDyMsa1-2PGd_P=p4qo3jC6~#Om57~8^zB4>U&20~?+Xh=VSk8*~ zmxivn`=NFBqOj98cHZ^ziRdlA&h-wHTzdx zyS?cHYwTlk{->F?C3E^MmiW~g_ZBv*(jXKnPg=r!?Y;LOQk>4}!xm@pdd(51y=^2}_?9b{>kb~R?nrOABDwC(@#^b} zlJhWmC?3Td(;t7{mp0hlExonBh2cWYyq~vQgO%5JmRPHc@o~Ga&w1BY?I0#Cxkc>S$5Vm#d59pB z+w1eKV+H!%@3D2iCt+4_cz1ZJi3;9z7ltb6@f1IyDv(R4z;Fu-o4#98O>_Bh58dDH zvA*5!QCv91O>JQ?C~Pi1-Za}j#;)Jp?&-U=HMQUAa2V{Df^wAMIN1!-`FS(r!+|jc zZr$=^Zs83LD-8c-UYJ=a^YhQU)eeVc)YTNO)W%74bM}>NrttKmfkMcBooD~Tov#6F`8kidQ$ii z)&^rabx3u=p-}strSnA=iIU|gXB_dIi|XPSe@;n&xl#9ZQS>xaa3A$pF8r5hzMRwY z{*G6Kp%Q~G-}yB9_j|wfm9Z#p{Ul9h%PfO2^&3(@Y@f=qSvSnf4oBFV18w0I1#E_Y zMk2hM^gMbpi~=IZBNB)ws(sHxm_Pw9d!HeLo}?|xTX%p8^N@uP=Ub1u-bQ+MzmsnX zS2YLrNbTFvzzPNZ=X!V_)Ti+~c|%*>MpX%3lAovPj~mYSA+pcB7hfZ!9oV_^ktZMz{S4kzsvc?# z-%>OoL{!2)%={#X0^`+x?9$b@a%M-Wo-fI9t!~lz98Q;={yDljR9(f;M2oJ{;wic+ z7S4Cx)jieKvmmBO3?cX`It#r?Le5|k!KqBhi~k%Q9V#pz_Vn}|*4HUaz!tCe?3A<0 zC-z=(uHXb;0(;e+YvOQx@5|Lhykf*3aK})4Lz9&lj3H7-*AJ5PqK<7-#kV{n)s7bm7FwV9filmp=*TsZoF4YK$EWbku4Qg~9hM zox6QAzmE-apmJbHHVHoP|CEj$?P@FXt3Ue@Cc$)he;+y4IC{GpPu-m$A6lUKezkj;k!*>+dIQZ+CyRsShKo4}zfYXX!=_ z+F^I7ttOPLaUNaSMq2zp-}AR*mc0`2dmLjaN%82|zuj)-$g%E89{Y=6N5MdH=k9D3 zKD_1K?w9U%;UflN=k4wlDZ~v4Be-2?S`kXBy8q$lJ!v{Tpi^*lZ`2R;mJ3@~k-+|w z-JwQ7nCGJ4=@ps+>jc!zOPBcno)Z!f%)@XT=V`TQRMnVk`VjW)hj4p+?x^!{rT5iS zxm+U#q@Orul^*7QvRL*+_TWF6Mu)(M3Zzs;E2hbM^t}wZfr+Y!(-Atub-iTAQz2i1 z@;Vks?dIm3eK2u2A6I9sd@S@RiXyh|dIQj=;XBhIX7LcV@!mA4vHD<81*Fmsjw}`qp%|z_`HD07G_iS3`%A-BKh6~j?GB=XLe9=$4<~KMGAFm$D44Fd)vFae z-#3$@g9f77Mc0lHj;{^$TooPR9}@ZEwN8Q>P*Ms8IvqY|bOp|@pT+_I2<^FGvA3SX zIxuF#{>yZjAR7hq8#@2`m+%OzviY_NLk=1S993f2RNmx z(WZG%p|OG<6lZFgMbr4YV1F}ON_9oX5sN*=`3^TIGrCUb`8avl7Sp8tK#fm|%Ft*t zcmR!7#hPcd@keGf2$C|RMbHbGKdIQStXFDKM;Yz>1+HGN;#7kU&ipl1eq=OSwLA9H zupib+ok)3T5S+rk0w-Q6U6nXW>^4s4QF<4|d||gqiU_+E`4sGjDwqfC({zEp-ENez z8wX0Y(dG={)!^=JJZUdH#LN0va^*O8d?Uk>;c`KyPH*Y~M&Y$(khhcEB-Mm3u2!cPz_*!@PU-iYhl)jkwo=la}l z<(rXs(_OJ_&3E?rrD z40=Df5$pfy%KGoVH%7VbBqizDE%wI!?Hkp$5}N7y{#PKM&bu)(kT0iG8)`+Fkq0mh zP}j6?91Sl&5*NnU745#dX@X{y-2^T{k}sfLM8wQv2p@-)!frf)1r|k%xyMpI{UR(L z)1Y)fMDolgSm0E>Izrb=hq;e}{#&q&dFU$0M_)P3>z!`EXQCOKc@jr&{%>8VU|E)ZS-Lc1u2ae{gc&b z3w^+XF6d?h2jG6i93JTvQ~DX5+!nDnEktSN7D5C2R${x|O^$E>d3Js_zWy(@^6BJw zX;E-~;{$CWt3^zwxw|{MmC5qvx8H&_O!^{GuGeGNnzL)~^*{dj^T(e*{`~Rhk3WC> j`Qy(YfByLM$Dcp`{PE|HKY#prkAMDuewN4x0C)-jQ#yn~ literal 0 HcmV?d00001 diff --git a/dirsrvtests/tests/data/ticket47988/schema_ipa4.1.tar.gz b/dirsrvtests/tests/data/ticket47988/schema_ipa4.1.tar.gz new file mode 100644 index 0000000000000000000000000000000000000000..84de0e93835cb23d76bbaa22a753ea407dca493e GIT binary patch literal 87335 zcmV)2K+L}%iwFq*8NyTm19M|&Wo=egQE-@~2VR8WMU1@XMM$-1z^{<$cTxGjT zqIpVoQu~1>DVZBe;>#nk_ZtWdNkkyP#UY8=+W-D`&jHK;phSw2pd>^pWpM@=Jl)gN z*BmH)dh_g88LrdQGW^>pmrv@-zwz~^QmvnqEA>jba{8uRsZ`4KH>C0W(B8a>g3#~@ zc@s`N-!lGKd*4I#f6}Y;>7=~$0{iax)^>xHYaTD1XlA=5$FiqS^bYLcdVOvGH!4-u z|BchrM!D8#K>wdq8_@sdCwl+A*WbMV58ob=ubuPmC23vte+;_k-$vwX@2d3+l#k|i zK>EHn^Nj_8Kc_yWB=Dx;t>M$3$;yj}X}H9vmK}t?J&8g}?2s6)^~v+e!UG9fVL{|t z)Q9%MIVB?$6|g`T_hws*88g@!#Na;hInlfJdlvp%0 zY$p&D|A5{J0GDHtxp70GmrQEk04jr+-g31`7dW0fW7M&}OBnbwv8TlK!Xt9)+tBBs zm+c;jw`kDDH#u& zf}XBR49l{4gCHFS36|8f0XDre>5`WZfWB`=$lpk2KySbpf$-$hh6Ann_G}&^aL0D9 zS+(g|lru$<6eOP40J-o7NEy6=$+P4hG`5C+9}X@(_xQZuBQ0p*;3EzUe5Ep4@a@o` z=`AL<%a|2Fd$wyjk!1t(*@(IIjz|zqey3(AS?$;hJLLCs@deCF!#4o*a$X|pd!fB0 zj5#|D736Sx%r&wA6-YeSSqWx%Yx%+C!1tor9RBry#9Io(#G+H%W#b42_Z0dSFj>?M z?WvuNuBq(+6O9FwbiTJb{ZaSo5}KQ$l*oGmFY-MV8q_!#4 z-%56ZMI9QZAA;6EPC-Cn$S~Pspkv6$W=C{SOgUh4C2tkY zs+1~~+G(v^s==RCh!@CZ^P)rETLBKB3)bM+w%(I=XV@a|I}T$jAJbqm_!}b|te|`c zF?}jK-KSFrwX6WG!<^g8_hk43O!#*~DgfXFAk-02{RIB3G)k3f1yP-l@5fibD)6b^ z3kH+64fZ$m{1sSq;Hv+ge0=LMmELlUAXrajwN!%!1(6)qIiivjvL86)!1lWva9$7t zl#Kg?IsUwb=J-1}Ce6VQFu*?mONyc7>K0@ptN+sA+V_-`No?c=|F{I`$){<863Uieo5_3M%g z8wbKF004myym7!gg6WL_QIwuPe7vM#U>ab8TZt=5E)3V0an1}G#0_FaH^3HJo;$T? z#~y?(Ja|%zW$+ZgQ>w5eDh#2yINZA@+N(5_z-TiXwut>IpCx+HveK}Oxz>=v-(cb! zc(50GOL8$Dj^JfVofg>bfScBrBu8jGSEodS$Cu!@Q->rjDpxaLt9@f)J9dbJ zt7im~=YVJkq_cS|1X*TpLRH}g+?}`jz*&?9u2Fd~0ep2l(@@-jI3Q>PFeVmAptZ1F zRNqDjAO&#j5N4#-M5dvPOe+x4u@J9P12-3^3QLu8RfI@w zw8=R1H;PTGc>zvLk4_DSHd)B|6R#&_Iqzy<69W)N30rB?Mz4caJvNNq1nMZ^Zo2g{ zvY=dwCyBvA0voUVnc>=hu(A1`bpCJLWDXfxWl-P|^Efehyf8v@z8RN?-OKY{=lIuV zZ`{eDV_UpZB$hs%QXjNp4@5Izaa_zq*5MwYful9hC^C(Jc4scQhJuvw*WqZO6c1rg z0V)uvjcIK>HLbNYts{Fu|LwUs)P_(){_xyC1Ho&?&6lzu7d(!z?JW%3{R-&Xmy%li zMcS8WsKB85sP{s2t2xE(%ORK|d}_nt{{Z&;)arJ0RWfu9!KOulr0c*iQ6C%}kppma z<=;Tm0k?_P0W*RT3k{8JhLL}HoeLdtV!YwG@Hh!f-hr^kUu4KnLVE zIzcxSOng9tO=!@T#5_Ihx&nI`H`+8!2rj~|rNDRX|3p;D@?yXveRu^9_ygQo29KGc}LQQBtKdIM8ws~DaE&r zwA;B2ZUc+uAix*v9a+@u3J)EL2YnC9-)A{C1tA7R5Xjxb2V9uQNl(m+Qt)jw>VrCl z64b+e0Cem5_=Jodo`y+lowa1SrEI&$@>jY?trL~?x!3qm+rX%%VDy=fNL|L1lcVLr z@UNk9=Br}1R*MArT3Vic%V$E~+@xTQVcM~n!=QS{8^_=>pVWH-$D$`(UfEUpRQSM3eA-Cu|EpC`_xt}|ljr8J*)I{WEDI32C74!IXucqXJE5H@;6{@9u~%GCDq#fg zY3RQ>4p8a@(D#8ugRF(;I|F$f{II3Sphz?7cNS&Tz3Mg>i=zFycf z+;K;YTaZuIt@uKOD&M3)EQvWvHTW?c015Z=#FD-;6>LZd%xC-Wjzm@hS_B$7J1oM| z5MoiQySjy8N*U?#o7l*WTMgJKNHn>qM+Isbz9pwR9s8QGnhkUc5!H-O!if4KLcjR>aMv4HhVuOJ7RWS{*{^FEEN~WeBkG|58IH43y55q4iL9M6ccKKW3S-Iu$Ib z%}<}MgIAUCfYR_nOtybrNrWCanm2}TgT+M&7qjtxfK#b2g|L=`5c4_rVO-%2Am@wa z)+v4!WhN_>53*j0Cp;5nmEXzv>!5i$1iV9!ydze3Bn5@#f~Hvb`;yN3b1%fd^{z@4 zAmNx*VXGp1%z=rg_%umZ`f$F!`Bx#j=-_pK?R=F!)!6=DwiM@?azOXzf2s`_TB-a` zyk|9RQ{zhVV=GDXc|=1qd=Wbdc8%~TdX z_YU9uz|;-+`V|e5zQFS-0Oc*~UJJ-}tu!E3R3sp|bR`2zRut8J$zI_cslwu7fi?M} z%N%T?rYLyvNnl-f>J8{rLzu2CO7dbH<3TSUpt9EW3Bcv9dVLVw8TjyM3wvPJ^HHjI z6a|+ptKsYOFu|t*P1O}8lXwUKF7HA!sNqv93Ik{O7KTX>2Zi8dEp&jX%A!DC1;279 zL_-&s76psR7uz0{FM4rRQ2_bz5(CfE2XIncDCdUVPRb!jii=Xszk>Jf=?8}JqM|qj z;ffWRVMf4|@}i(!+U9izmnRML`?T8yi`ZgHEq1 z(D}3tCkLKdQ53i?W_cWkFXhhI`b=$6Fgtfk+h+*c5bJ9g^{PS#$dHeHoy{agHBO|!(858Pg*y7T{LU8IV)2S*^R-BVS%`_xYR+v5TX2WQ?%%mNa@}i*q z%kXh7Bx6yP^1?Czxu*U_jeE`sol0VEUBYhjQ#n0lOQ+$CT{{$u(=rLb&397r+*~GE z_o$;nO?UmFpcciAkbKJS9dX#XplVi-7o@UqtT0Y~Z9`O&m0FXm+Lgd`;wwXnbxM-*-AUnzpBxgD+zb0Ha~^7??45Ud$j^T3!L&RZHV$A}K_*^g>nd3p z`-y8Nhqzdj-K|G%$xEfhWhEJ(Svi$98>%RknV4Xkt{PCjnj&pZl=9t&6>Z`g^(|1L z(a(4RyNgWhOYngUM%<@U(L>afe=8 zS0ebxwkkdEB<}l9;ocBmT0f)?de$aJd5Ibta`6x$8CM}J7~<+t#vtrhuDh?iBOdNs za7CLGT|ZsAdmr-Iey*F88Fo&XGV!SwsB?^ZjhMt;OKZ^!TKCshSLsvvMBB>q3~Rt2 zIR6_Q*^|`z??$512;l^i0$zyIz!xt+f-@he76W`FeZ8|hwlgGY%I1}fb0N2KK^Fkgnuwu9czsY}H3Y!|ExucyI_aaKL0>~%7_ z7PA?bihC4Sg9r)z~AJsQYHPf@1)-zGm)Zqd`}LlDlS}xBWyyrVjHpjMXdT? z9cvDg^A6ZC6u6lr57gr(*hZHa#@xH5H`G6Z=J;V2a!-zOB839&H<<@XAIK08Jm{iSw4!dv zqk0(+G`-`QAq|I2t!%DU>gE4d8yGA9FC!>&D}KQgpGti#ZPEtTr^bmYoF)|`Aj19= za^3ao`q{0 z9(tO@ewyZB0+E?B}Y&&*s6+MNM} zd`iWYZ_(Jr+jbQX_Qy>vcWKdD`l<2-~Ch%P6FdENKz1$aZ+>I+#XhA*ZlXebVzV0Vl2=MzCl93*j)pwEx?AmjeI zm{QMt5^P6m8Y(s4j9{(^EdGqY`m~I6!!iArA2n|bb#tYUhSWC~cd6Q6hQSx(q#SoD7`S^fRRvX6!eEz#o ze%CW_`-UI7i4%Sn;SBPBBE#V)q{(@E^j5_8vX~W`@&5(2kk^$-n|5R6!@?s%WX`0eQUIgGTOv~r&2=W5^?rJ&(r(V+0 zzZ!PG1HZBC6^we#=nwhyD78AS>95!dW!6M|(6g*3G`>NcC0;lsRcW*N*R~*ha!nvA z-li89FufIsk8*L`EXKN;#&bt6A}5u;SZMtQheoPGF9_?n2D!Cj0RfqSzs^ zc=GLpo%Jg3v^I(ww})3v!K2p5@Mppx;@;jC|5*h7|i?8xBb;I%xgTdH|=e9?n2)Hplx=-Yse16#iL}|LYdglyciz$o2?g$ly_5&U)E8c z4jt~LJPsQEJe1cJbBJ~jVMtD0ySL_*6u_|s>AU38K(+fFU z>DEmm=FRwt83VanE?IVxvvDpnQK)`qIlVX8yM^%n<8FAhGz}9Q=(YTBMRTxgrutM0 z_$&jiR!ODgWs#DimPJ~^GPjVYD|aMUOX{$Wc|5&40jXzGl#fLsm?fv^=el%C^mF! zSSwE@LUbfaVnLR#3*O`U93!4rC&7Q*rvIcXm5HS95l5wiQZf6k(xfDn!2fB8uWAEB znynrOl#TdRtty2k8&z|LY2oayU@Q4i=!GFvKE5vqJk#dpHm&{jr{eF9<34clp{%^~ z4q_X-IZQodJQoaPkt2S+9&Kl{HDUnNsB%ZbLlp>xfiMdT7l6Ik{9UR7xAFy`2V5* z=QlS3st@B;d4Nv$|7`9b9JI3jp9lNzmJ+GhA(?=eWMhpkr8@bpi_^xUldi<->D^S1W4d9mD? z!O;TFFwYvEx$L`p6FQIDyfcQW=|H*ZVU4Bye2=_@;e@tY%?!+VHX1(`&7|*G z3xh~gQdkIU1MP~+25mTrWE*Et5@fu z=|&+<_f6yNVkcq-J_f!uMDmjn;=n8@>j)JHbd)`jJHW~uWVcqVB`FczL7Uttl_T_? z{8ab)bO)=Cn*DfsH`oCpwzQMa?XzA{FI1kh=tZJo5RC`0K!ap&u6bzb^KgB28dx{J ztLmG53Pazuf>{#1mf;>K2+b2#BK*ZnX6a&vN;jTs$oqw)ljgeA2x!O)NrF~N)x>8e zq-H5F@X0=TD|`lm(%H{#=sU6Duw>ofDUmT_1A0j$eUKy9dKWxttNKtC$;a3(!;3?@ z+NPP7!X@<+i<;~)k%#C?>!!y0p>7UEr`yswgvb$l8Al-uhX-*|txA-8;yrC-eB}5I z$v0!nET9%Y-8%2|skkuJiWEqQ+#S))NvGF7JG=b-A6J*X{t1Q#|1E_x`^@Ld?(gmH zN&C~;Y5G(>`&9o+{~w>)z0(u9$E!!1*+;vl7pK2tcezIobK~n?{?RFVv{ubNdUAdB zkKV7Rf3obO_1vTI|NqhN{K0aMHszyTpxaLOv8yFisavTB= z^6(gkKf5g-25Ad<)XzYxcJTN1@%icA@#XmrUP#@CE~I&NJ|s1DB+)>+5(1r;b1rT( zAEDPJhR#G`<+GFamB=A3eE-ybBU_Q*+lY@8Ww;EoyO(1nmXyI!iDUr3khyoV$iw^b z&mTWp_Q>6{!_m+Cjs1$L9jW~+%n9p(%f&Fnq-e)k7Q&2uOpIdb2~+bfH78N$D|-TQaVf`D~KsQ7KnXG zXoo%tZ`hJzQ0E|s#9QcC7YB(cpkjkJ{=rL0M7!SjAL462$-aNWex=YLzbE9+Dvq+Y z2mg9pD!x9s+G90&)jz(PZ#mZu=8L%C*q^S4&tV2B#d~arW4`0Yt`*t0c;;?sI?@8d zE26{bK;7?0uFiu2^|`$(%S#D!u18ucAh5=^E-y~E5sh`W)w}-mkAC;`bbC&$8BDN8 z3$AL%%}jr4V-cZH`c&fVQ?RsPKTIyaj2R$f z0fU`%90oG4q{NT0X$$u}&3ESSkR6rM?4lS;W zy8jiC#SHA9$_AuKjAz^g;H?=q0So(I+9psp6aX=brEdaK^j88u&#kM;`_e!(pJgs9 z=EEko8r216bQR+_%;oTrRgXWI$Ot3*){0KmiFidWh#rRnRZ{!DlM%^xb)E8<3#*usHg_Bqw-4Qd8!xzdCGR!yut~Dn- z%?vzWJ?!Oi-r>?Ax^g2ggl%tt>w|BYHLLmIPEc}%HWSE29O zk7E0rZ^7(mXp48HWV7XLhR->5&~}z*9Y8b;rC6fsMGhcJ)DIoBZ|)=8(xu^F-N$Xt z`iLaSX&AFogBGEgTSiul3sb&*j&JQTUG7xhg>Krt3%~(%7CNpxxXP=CV*lWp3y(=R z0KoA)jD3qz=-#-nS4uO}uMs+3_CPHB`RBj09^muz;`o&PTb{wmE_O%OKSA%j3qoOi z{;%qPvujxEwf&v^vtD36TgyKCExnof`>#5Gwo#YQj!GZ?uZDbPr3H-xc+Sng3Tx< zELB!_EVTg=9KW<}In53!`^LXA7iKQwcZ_9-IP=5i~U_2c-9%kFvmEZu-a^KVUaUrL`oZJ(WIn=dcU{?u=X=HAum zaYyT18hSqW8U=k%qY<5{XT%>_7$BDB9z8OOheY9u*Ka6l$;t-toGh_nF5mRK%cjSf zL_TQblEs#xn$95gal_ZgTmAxeQhYx6Ci49>ugJ%CcbR1;6RO9Q$Mc&+_ybFX%@SkM zvgE-z6ks3pzVu3zjk&ePW6)gZJWX$!r|Hl!Kz4ka$(`{+DzOsrQpI*pe@AzL1Bp^_ zNbNWV7pA5Fo1amRp=g?m_#UjhNVCi_%K(cw>C$-Uc{zCt<|X;1LEUOaofEpClfqS` z?mo4GN>*>Wj*8dt^H!5#X*yq`Ud3Ihx#X76*&C%?NV3OvvC6n-_>O75?6t*00GbDd zDrDRD@SI_Z=i5*54PqDZgyN0|6F(ar_)Nxem}MM2^#u$lT&YLz4Dh0jJ)aQenXn#Z zVf{?KuNY4A;5wz~^zf<}NO;K|jT0#_OPCAjJ@!aBN7u2bG%}}ux?y}c3x{67@kK`c z)x-)EZ6{jrTpIa>{&I7IW9a>qSqL*WEtOyC)y8Q2`QvnP-k!!iWDe4^d`~-8&%J|v zrfWxZkdg&|inb}xZ{guN5oYN9JW%h2+&hYpXR)kXsW3BXI+f)(9G6?X1%;ljhmQ9C!GoO#O zOwLJOA=lL5s#xa|{+8I03Vvip%<9VrKdRVAD;2bDwKB-_-1k$HYRo+#&^o*KWUV$u zxj0dJXr1go&03&mn8`%xWPHF;;uleV7`7q|y|4`BD#z;#W1nN72A48FT|8Qzp+bnO z>^;+?ulS`j3X7}MEsr;7&dT!n0xJ~1RbVdVHwumU{8sAg%db;cR(_p&HHrJCpBQ^z zc<8G=tFs_&`r4Su8abpb9D-zcHD>j4c+x$>2lwkT=T&JLf+RD-43eekNG80!cunjK zQHaj+yJw_DAb^V4ry8HyV|tk3B%%Wcc*Rludhpc0CT@(~x7Bvy zKXOe_9LDqR#Hxes8cQ_@c+B2??;AStB)42uNu7PkJP6~HrWO7&jGdrLgPT?5*qm!H zJnn}>THa_`gwdjkCxrmZim2P`bVW_ej zgk57yB-|IjLAd6{Pj~X3wsU}+_ai2o#pK~bF3(jiI-iz_5lmmWN$Q8`Pi|=ivBKg; z&V@(qdDA-*X$B1&uGpS+>u2UIVYNvgG_4(JdukaFZ5j2Kw<;|%eNhvPO;VEFPbUS= zkqgz9Df-HMq)_Yu@u7%w=14)~qgnm0e4S|9ami{y0aXK3)}WQ$(RN|UZFw%CwUz@G zMoeU1oaU`KbYWhG*e;Z8p3fFL&Kx_%^Lb*;*jv+$eCi(2S3zEeUkg`A&|u}0jaF_E zWBw3{J5p@o`ZSq!_%hIF0V5F+zDMo~B6M!e84lz<(Who?kshlcytsn_3};7mr0}OC zBk7WCq;B9uN45SwXwPh2Uv1Ba)9mpQ$I?@ph;sGkj3jlPS1e9ht$0QeBFo4G6ILBT;83d$7v z|DgPTHK`)(!WUOj2YPz`Klr(Skjeko#FT(<`Tt(v@3{q)8(KoyU$e58OmBmkDbrPM z@muR&{blC#q=&^A;EOFIk>|5E7f9qqs4PiOy0rPn{GVgkA_mFWd7b>9)qPm@>H2T& zQ~u9``rG<{iNBv%X8zeOyMpW8&;2XVOhj7HCI3+Z$DZr5?rHnv{B)0`W^taMe%uM| zNt);g4LsL(*#8TVu)nwa ze`k(+Ov z8fp9inmJpgHns%_(@vN1k!_h75MJc9w&0z@6I^JY#-kNP8ZiyiZ8lkTNp5JMN&$IhpGJ}ybn0`CiE_z z97rPlF%a0)5s2uR7lqejjzCAe`{SXom8Zc^R^Z%wPI8+fQ8m%p{q@sFShk=)`W~k4 z<C}KQmVt#c{oQG0)+t zW9o({@|+!08-%Ds9Z?g{NwL&>j1zZ+X;Ii#c;`lu=eV&}Q^TW-0ccv?{&DE)NJrQ3 zX><+0M|5SC=n`zzH8})_g`>I_a+oN)D_VK3AKE~}u{cKg7fN)PWjP9y#K+J~w6sTZ z59XFWwYWjSHsopTnv06OL6e1egR!^k&!n8ycrMe7vrRu>o&no0JrPgoe|sMfs@~WN zlkuvw|EKi-TD`ho&DQ^D)*Em7|4aP+#L%d83HpyJ0fGf{E;}bbmz6}_m&!Y)CT`UZ zNQ&i!Bd`{SQK{j1=0)z9mYUa+@S7W42j0IXTHWG4HZtDI(h^O}OrVu9y#bcysHTaZ zsAqvVk-~!j;&QB9k49GX4J9yy%8FU~WTB8TcgFCAVQFMxxd@X_loQ9Ckvq%x9CT)K zsCxR6-9{G%qgen(8*}EEP`Ih9!g|8d%EIxR8#rN_W8lw&w_a# zI(T6^4^s?e5S`~{%V5l9S z;5XI=gn9u87_ZSw+_5o!Y}}2hb!`UU$WqaLkZz1rEk#QU1Nu(P9e(Lqe(srza9Q#Q zo0$=IEytV0I5b`mNK{u0BT%HrVc?)v$e$q%GRRjAdgX=NDg@}5(&L?q|k5(Lq>PP@oF(ofAcQueeap&+FVT$f0%xzL^U!w!QN0Z@GWy{})f3aCO%IC;k5ps*Q%Q|F^1*RtxR_wd$My z?~DAQ|DT$Fwkzv>lS%*j1-||5T3neYR|&0jFoY=bKHI7ybvek!)qOIrzoo%D8ta(6lHj&qwEc16rW*W z>^K~atU&V%5Di=0Gv~%-?gJJiz~p0(Gh%N;5z{mf)13tEIKTY7(U^i|tsem+EdwL` zd?Q5(zmB)ab5-cX-w8%a@Ufq#kYXnNJyJ;tP7d-o`4WYbaf%eW{K2q-ZfY7@($i^d z7v?ZbWKM~;FoBXM10qq_Ni%G5Q;Zk$gKTe(+ophA~24;b-F_~0z(#Q5Br;r6JK zK`lQqSM+v@ncwF{>r+^r6{$1gbj?s&tD2l1fl3i-1=6yKB0-2Zd9oH<@S)Gq=}mVQ zSNId{&%oQk%i-+_k3osJdPZVLWPBj`k*qHGOW9pg$4ht+5O|v6OpAk6o`O;VVDFg} zY{gF@pouGDxcYDYQbnc$Gnuk@@KTy*4BnJp3#+8j2~8M0qT6@2Uv5TTTQz&wYT`s7x}{g%1@`rOpY(F{sgx2wa-4CT^|2h z5eXcwqHq{lBdQsU^>pKKklb65`wM#vCzRMBa`CuX=FU#g3Hu`jYXT`fVguFWt?=ZX zz~x3N*XfswYxae2?(AxE<9jwc^K3VWUG#t)V+$T&qu`24+T}czTPe|9EzWv^w4-b_WxGhryHV_fG%o<#iu=YhV1yes6cX z?Th}OzmQWpn%Lbtk&Fa{Zc2Ir!;GvTdBh=|pLUPG!t?D<9e#uyvCp0U#c8jHfLwN2 zn_acL{m$|AS-Z=wuDe&4z0*DNvwZ?nDlpb@6U)QAK?L!iFgr1f%NMz4BG+~)!yhK0 z89&Z41(;D*6tv)e9s+^CV5CA2V*ZuL<0qQA>Cq-Wv8zX4+WjjS}hbIH=$o-Nt0`94Q*5P`m-%0dV05P#JuI*O(UAgU(9uxApEK z40gXvM}%;1v0`?d=MRgP%}i4J_Hc&U05dM z?@j9n7WcrS42{weQ>`uxZKPE04m?M1Kz9SA68TIVP9j@;nL;7i7B;_YkH=An zq}x$nc(=XBGy2nG;Q+eQfhOA{Pji`Ijun-&D8_{ciy9uVsXi6vrUbB|r%7OQgyPA( z;8q1+R?FSCCb68O$c-mH6$iTyqi@2BC&}gJQS?^6*7n#X;@>1+gkfZTb4Aez94vGs z$v94FAts%+V61yY{Cs|I+{f%7a2=W?i+MVIOzwqiD~VO2S-~vivY;7Ct9{WWW;-L= z#T-}v6v#|7S;<#9*-?`*VJWhR;-Du*pnI${pv^|?C`6Gs@jJW~(BwWOF6Fy42pbRt z4-Q-Z0A09jOFWIA}FwM6)lZM?uc8G7}Jt#)$xt;_D{vIQNAAI|i=L z+1}RiHYQ7`0s9TuxrYmZBGY0)c-JwX2;v)Y%o8o--*1`piu)qc4y7ogva-<~mET~e z7bkh?g6Dt`r%ebohLh9}YK_WIe|x8$LaFR*xVru~NUB@OXj^1wjzwZw${%qev#!0% zoiR<<$(wa_&N$$KX+pI`k(hK*-K+5P1miURHL-lo+NhvBxzTFgb~Hxe<3ij;EXJZ5 z5{tX;NTi_>I6IUFI+U>bZeT#l%d>lihC`m=_E9o8Ka?qc2P$E(jPhByI>y5A9BS}z zZymH1@x=ei5l*zK(r^}^DX#Z?SjdI%d81Z7+c@mcmdy(k6_c7qRFdjo{1+!3m99{} zzdlr^ytxrRm3Oi2IIy0TkDs`7ixQ#{d_^@dp99*$(9|_(PC{Gxa>MGN*~zD?Au$T) z?DbcO^6WMkTRa1~={$@TxKA%<9&%~!iUCQ}z;o86QozfDR3k%vW`XDZvh-|t3y!O) zjq6f;kq=561ni1ws?o5>VN#69m?f1%(wsZ?p6UEsTmofgEryGN)+*$#1wh6rlhu)) zq9mUMEzhwRAFsZ<@r3yr4-vyf&Jv}q<-=5n&oVH9Z^5@0AGN-Rom)|S3vzcMmPhL5 zDzN<8_NceNm;KxOSgqP&QID_-A?%`{PLh>c-U(zaq7*H@V?Q!(_Gy+=QJ()*zwx zQMNkirPVgxS+@7iEYxWEai}a%R4EFs?_D%Zqhbx+S*koYyv=@Mp)fXa5a`=HkKn~+ z|MX}4W<)(>Hg-_LAJLo}MrcWbj_y1m-bu&_28oxSxuJ)@Z?T)nP?@$Q=2Eo@W%wbU z5MB;0WZe*WUo`O`2!3Oix)`|_4fAnAH_qw8M1}Yi@_gtA%|bh5wV+1W$9M}rksrAh z%%`y6<3l_@dBCt3-6J0as_xgIcl#-zb~qZr0N@E;ydAJKfp^1uVKSljkPOCAICdld z5x$2ONa}q&NoD~g5O%F}$e1k}=?e`T>F{Kp>E#}JnuY_u!IB{SGaWA*4L+6#669;% zi6=Lr2OIiJJza?esTyjGX}V>7nB2nBvYw-L>duS38{g%oC$z`$$?!e+bj-G}io@e5 zoWvqc{5H}vddzlPepH~n+ZOlmQ|$lwJ1wp4EopUX>{WlUY#&yf z>>FtbhabQDh0g}t!lFAnu*R@*(hd_@?zSnQ$qCrsnfj^~Ji=;v_sfg9ToeUy)b?6Z zk_9kQSBncbiEV4_DlQ^&?PKAeC^Rc~q-2VP)IQAQL{USRzUyB5%V}B|;1U)GuO6bG z=Btq?Qx=mUl`rIU0f=h>9MUiW6pt`U?L_)bXm1(gm%RR^hbwhLo?tq41~FYhXz28a z6swWDzAOMml-km&_~eFX`1cf751fq%>(erVy&Bw-(6os)Am;69a-Z1&ZXB)ygtfyx zX?US37hx2ybXGxz*}d+ac6ZM&PdcAF$89=ho00vz9*$}{j$HKdgAH*seg!AN?*N(7 z62;G;+&79VLQ-8v(&HP&Js+8M%%CIuF_UOwClkQQ?8SmTyjv zLt;u)S^24Sh3@BNQuG=BX$l+r>_su)a5M%jH{ge1`1}l9?Es%ErLmvNus84E=N$WT z)!LVw_B723m5!Wi;+1v4cwkjCzvPK88~hJ5^XG!~FyuY+O}vtrb&j^Gd9N;c;!6hq zgU&l{I&;naVFCJ}lU2=rZOOA=I{Y7I{`nyi&ptid%;!O@qCi+p)7p_?pWa^?A80j~ zMG5nQ^TSZaNvF?*VEc?sL0Skvde)`m6_EoaRbM7!C>{O}HT_EOc?JR0QVg2vlSu+X zNq_##FIxqeGVa-iy}46P-g2nc&NtK2VN5B-tJ?irOP=b2fq&P_d%8EBx%nyYV~P!- z^K;=$ryUUL#*$Yxe3innuTBONaTYv#f(|ifz4qGJ_e#NRrIZX(EbUkiNj&2LdwsKD zRleBqWQ?9gu5-*?4Q2zEEa4jj?m-sXQ@$X1UZd{<-e$du{v{1X#tPQYu7&&WQcpqF z8y6<2=MaMG+Hc&+Xhu$UuwfuKvmp1{y|Z%{gE+l-G%HVQttHglYol4yZVS;W z;Eq)@z*L*TE$Qe3wVs?_bx)7m{nL}&S6>wVMhbsG2pEmRpb-AqAkklcAwa5_NU71R z@70d#)oP;(Tnqo(tG5(v4oh}0KJZj~vIr^MX(3oY z5_hN{r0^#qr$r}lFcvkNEj|Qp0I>CR6 z@Y1KqKsi{Yw{pH!-xa0ECF2GlBibM^l1KfQiRaAdVqUMU2PE}$ZyZ>5?2SC%?Yq7^ zz6EA{A&S<)tp~AHJ`LEclr8#v*?E(aK6eB}PG^w#bV=aFM=MVRFe_3ZeL~l3j4i2H z+=dfa*=aS$A7}$@sv2whw-7#2VM9E0~Ocb=z9CtaQeV zd(WA)#{PO>`wBZa_i`*FDD2v-N1luoHY-}&Cn zno0_d@$@5}M9&sGwRc|>gu@hs1VO;$tC!IgW^@gNljwJ z8Q@uy-Mn7Y;CV=Ab_NJajLt8+;@bR)stXrP%9;LO##f2UqhYZI(9BT0Dc+i6cAd+UW++W zia+M0w5OH8%adlRx#|Vy@)V0^_%aF(H-?&}pzywmj-3l|8qHbTtD0? zV|G3(>6|DuvVHz|h?J&9bu>H$dwi9WYCIJk#oRVdHfCi?7N(y+K1EhxN>Q4eiFGt} zY!qNCPiagO<=`r3(Kd0Hb7q8BN-X@^i<j^q5LKq>T4?U3$2gp*WxYIjQFa+&O(m!lH&Y19AgFZ~09u7)h7L$iiUAWvW(6C&4(XFrHf8OX zlZHR*ROA?fHn|21F(}|k;wf59B@z%Pgq%E?XOpus*OVP+L@<59uLpstM3OKjUXygj zZ4PP7pGIOo!q)eyJZw$&LuoL=f2Cy*R@h9YB6#kVN1rW(P&>jBYRx(aiRt;FCO4eRf=>w3fGWU`tc>bh8x$175psvg3OV#6F~=aw?1$BlZARVE$gx zks3lA0&|$rUkrOzC;VT(@XQHlWj3oovP9omn8nOd-v^IKpsiUHdlzlG-5QG9`ZjmA z$3D+jajxP%C+1+)RKRV#KI^nv@~Xa2>*8!GwzapT0qC=h9U-}#pvAfeqE zZ#`>nc6dJP(pNOJt9)-M-+Q*MS{dH_zlx&#y4>jj6S%Ry)kQEvF@m^0|F_iu7Qz3| zYGB!UtDKRINJm1WkE(%J00YVR=oxGDh^PAv;}Wtq{AiEPEc{?hv#cMnp@2_N!a1Gf z2J;xczHt?yE1tJH6z#ZNA{$SSz~?{9fgSM0|11a6uC4<(0{|R2OZZRFA)-D{-G3_| zwQ5?mtyK{nI8bj~K+L$}#(0|RyKz!OgAJ>UPI%0OoWc_0n215bDR;^`pSu&d;5MIw z?7I~iCM!eGVwL|OF>MXtST>6HsJVXb<8adq>@4&241RXm{yK_t{r7Jv=w~T#<={f9 z8iMt953-TZ+4$h*M0j2BN9c7LywW#e)e~M*=?Q+6x0M7Ig0FYW_|3gRgYO(pswj4o zN=u>iY^dKKZ5&GGR>%b@G{tq6VH^K#Nh>{tIK7Y%*Et<$` zM9~&;(mql`yTrRfG_OVAhf<+X`h~ZFWKoUfCzIO?D={*hozt&`BW&%GWNZ1r>BohyqRSX=L|M@N+4yK8xc;vjO7`%-LwVXItwmBwL+k@kpo|8uoGta>@#E`g(wX zJqrZ{pf!jy;*~Pycst4>*mA?^don2+Xd=v=MF^X@FW{HpJ01eyRYC-a7!d1`?Hn1fek;zfG2c_t4ksy<>mC?@;8S zWQp@T1FbCw{es(=jy3g97D-!)I335A_fh;e9wdqibXTr}ViO zAo*>px2S5y)Y4e`L1u=r{&x9-7~x+4B;D$x{OO? z$6Y`kSVo4rZVoWc8P_9Kk*U4xDANqKO9YLKSq^*>ock?^)-p#xUR7Jnl)UkQS1U?J ze_HJ^H-rhbVs_gM=LM8G2j1E|8V4Da&{%rXZE*r&KEn+x~F zEkX~i$}V{feFg@|`r(;^+L_m5(ar)>O%zaAIrLL1Hz3d-o{kZ_1Paiy;V5cJc@BR| zP(U}kP8Qv&8hp((?Y`C=I>wwd78q<(On79=kqGoXt7%L;TpNNFL?j*S_wEz+Ugwpb zSX%T1LZHYrk*0)4_qu}OG&yfwVU?@%eX;Vk@S%e{2AL`4Sg?%!LE^L`h#}W{34S$`s-*T*4yNjLNjy-F?99{99 z8heMw$Vr7w?%Jx-IOKuY$rYX~c&u$>L-r0iL1 z{o$#irAvuFiszXs>pPi8Nt5AHbyMblNBOM>gz_YLM{c;BDVYs3EsGZ?=r<55evPCi z`XsXA#7XcH_{%WoI9rY8t#6#c$X|qGugs%&oA2RadrHdP6m^kSmy<9fEBDf;e(m$t zDcZ5pzB2O2dN5@wbIBvAo);V$r72%9xRgwh`8Vh-DL;xgLbGh~#v-x^!2)C?I7_;* z7q8`tOL#0ufwW^%^4iqcjzM(}=?qLiYA=OV-hGn}!jON#hkDJLU}Nl1@k-*i)q8C4 zr?}deH7mmQVPE=-H_x~4|LgTxIOA{K?+5C3y(jDe_S+l{cCah%RTOC_!V%4zhFjpe zQ{x+~EH1;KgqA6pbL+7w%wRRoc!ltCLU${g>wec>x>$U8R>y+gevlD3E!v8mU$W=Y z0exQB8{P57Mv{<0s!Ykps&L8gzw$j>1T@Hz`-a1jdMzvjSi=Nz06SDdF^V7vWehu1oOlZoLXF@J1+WSY*l`Mb zt(+D`i`6Z{phC+={VP3c##@wmX|2JTZjQd~w#}dEF}wg7KEg4rDdN~Nrkkd@?GT~1 z7$M_Z{VDlnpXv9}4E0gIbpC5lCvgazSX_Ti03%Xk^tqZ7EV22hrT*akvJoR4x#0U)eCngEo!DgRWYKmzOD=!8o zs09pJ8O_PRc!t z06#89JT+K@QwcSsVrxT6tPosQ;_wy_1O9=$woA6YhrktB2tT|Il3>MdF7HvpViTBY zpmI5)vY%bu7NgFuaIKocKd!640*D=RTQ7l*##=D_v?atA62J=yDbo739q_I(`yKh* zfH2^!T$goWE%D1H^HAF==MU{_#2|W3wXmXwi|9+&wcTqyRmregD5cu7_UfE)o~j_ZY4d%p+hsfKj*2-} zm3E@~YS@l0)#D$o&Xai_f>Q2CIkt)V5`E?PfHX>qxFW*}j^39KrW4SPVSRkbUGz~~ zR9@mHpl`97i0t?SVnKl3_f8>n6Lhnr8ml({4$LiMETbxd!v-RaH*B^|@5b(bDk`v1z44_*`N0bt8Of@|zo6JK`-{TupME~=7z z8_kfLKG46XazUM0t(@S2O%x9ka0uiE;&fCNR3ig=hfZ-yLhwD@EV$M{Yz=MzQURnVIR+P8UA*$z&y~>G7{#`Ztwaw{Z5IQHtLMlVduf1E$CmM>S!V_ZfvacE=y(~O(CN%D zH)_({_cm7@zF(;MQ_0E~Rv7C&fspb}+BAcv>A8;@LKQBWi)j*J1@sC>d=@@>&`YdPV;envUBE5fecpVzue^;)6v8t*pP)0XA@+3$ zVfZe-LMUfRXIK9Iww3% z6EPi?nad^3K`%RXD}&13kJPPI%e2H<@yPF+BG{LS^m0|d+*5n9olz6PRJ;vpGDW$T zvm|io5>5gD=Gzfv0XLnaz?0|K3C3B!Uxu+}+5G#|m7??sJ%GK#}NBp*9eW-R}!lISL)B z$2`&>=H;A3fjJRmIiW(fHcH2a>HT-ZGoc-V$6~Rbm5>K});^*1%~xxzfICY}o?hFJ&qRQtgcUxhWQBPi((S!Ni1Smx7f`DkK%t zynq_L6dTvrLfaqey;%mLc)J>&SD4i&CxMme!Fe6&R$@GfVQ^u!nr&qXyN|8)Ur|zk z=3Oi5N3%d36@f&xdwa3(9XR1b{{GAYwJO%9-0hXVe1!`Xv{%74oho`;Q;6YRz1}DE zEP~FD^(Lk%Xcz{)HwtjS`2XDFf~!1T5<@P8U6Dg-Y6szp88#2xn4m6bp^nM0W98W- zImZ%*3`s6p#SnseuWU72AP3OXWKo~L69h~hi>Q~p@ByW>m8AVUvgKT(#mG;kn6z+O z0OFaRz#xBUcMM2{#lapI&n^uiG%Jl|R^DW7Zxn`9+Ajl5P3DdI<1jG>TowL{#vd(l zcN91kohOQO@8^ojF?ni_Li?LqucO*ABb9B^!r=gDDhkKcrQI?BUyjK*L}YFlkKqd= zp3b{iO=E^fxqK)V{AcYlXW?KRI0=mf@#zV?Jh&x0-l!ZV6r2sGF*8@Vutx!!hRW`u zIM8tW@{&Rh^_aIp>_pVwJ6au9NY~ zyM4r+%yxOY&3bfv3Tq|>(ulv=vHw_tKmxm7a9v-MwYThRudLzk*rT~jC=@1!hiSyT}k$djSf8;9@I4|qDJAM zdn^so+0m#ITyP0Bl8j+HG&@ydfR@|41X6Wu(v;QOU;f@`lnb+B!ExKG!)Si!YAL5^ zv4B6v6W6hroVY26u)C9X=ZWui&=2jnT~AYDf_A#SSR2xdz38lXmxh z7TSJz49Tc<>spF+rfsi@m^N5YkC!tuLaVNXHvS+^d{X+i;?OjYm;C<6w1Lr3L(3(YHBA^<2$DibK_{X`Jxrtu@Q)3lHyUyYC+_L?J3mXVdtQUQwY)OF;j`fTCAMQ8Tnl} z4vd^jeuZ7dnab4`lJ}=ktyYzPkOW!13!B6ID$+ z{?jGU#H~vv!t$Odvh=FZV$E~Gb|u9WE;u_hq&2!cT--W#S(#}{ITA*8l?mn7q@V!M zGQ`U;otPo`tEq2SfL*$omFGkTRY>6b(h>Abe#7e1ggIn<1L~ z=ymQ9q+O1U7DSX6P1gzJvcp=clp37S45z;Ug72)~p*cJ*c+}>(>EyQj#|WZ5?RON= zI+fxROTkTNF)q~jVuny2fpycl?UMG|)pi`{-du;0!6Dw&rU?po*LAQdW!F;@oo-c_ znLyWCFdX}B^48&Y3p=%vt*=1a(v-TPJ$A>b?rt1)fNycZq}_W#+onN7@!zC}=cWz5 zfWVHgZifS+3A^`<8VA9aCpRUOjdl}*$It2_|FLMxk%=-}#`lgKv4K7cV#WXu=It$7 z`=!sEB!KwDI1XT6YSIi}=u%-q;;_d1`p}C$*bs-U`73z#gjO|>6_@elGK`?(IT5mo z3um?EO~^UXeJCtEd&qqAU>LCMexRgVko;VjQHw$CqKJ>BR zlnLcIxGEP+a$<_PzQYb5cG;R6Gnx${EH48&VZ2~4&a>8C1B9Z+`pKcHn2sLskQ^n4-1OE8XDKM zY_jwUmU2Q43O1A%*uBTW9TXAK+43F2(4?zdJoLT4ira3H%+%cttBUQ`aVg+B(7Xs} ziDClKLFStgYUPdwLEzG1c&;y)3Jrw;d<{)-QE35iwT-ThKruU#|1 zv^0p;fLSFH-xF)GTPcPly_lgt%M;_WDLX@sB0ZfU6vFm7+YKmjc65veC~94**4G+b zk9VEwmN-3P78k>4zmdRS1to==l%ybGit$&-X?9MOex6kzU?>;>ozGd;GWAGYww1QbVgmdqQ_M) zzsir{!Djk$ToKql{^J$Xg5_sbwEQC1?JM(@AThOGc*}gy{@OYXl(;p|cZC~04V5-ta5+gX_9Y9bS|U>c)E=eUo~m2kwl0y*G=NohOhSbRl*fC31Q zJ18{GcXP7!TWpsadNZUv)=`%0gDD`_Ne*vra^8NMuWabW?NF-&vnlmYx&YIe+InV| zj!78x8DCHYYmpsVi97_2i~8UG4)94d+S%)2_l1qzWEP}WFQTtJL$v3AhP9SCpw>*F zK|F(*2K+bVA7hwPgvDvf_nSEJz*3*%y2ZDa%vf{wMgvrn&(GLD#6z*vQjz16JeGXZ~L2p-M8)QEOCyOt+c7z5PaNWvjm3kuLj zRku-ldO>S)%c(Z{WQAEqWWIeuYM+A=zuSx@*#fzz*(H(f4m{z;9(>0cl`4ZCI&QKi zX$-y4WX=4PG>5)*_zkBg_yiKxIZ%J z%}#~tLsva9AM5DHl}e;DUC)aSIN;pjxbP0iebZdscqsUPHscr$-* zKnR9`LZ0k+OnsS#_{8cU4Vdp*xTZ+vbk5#62dgg$?A^b#^}!wCx~c-fq1Heb&}T<+ zRKN{C*p>}TB)cbp^ZlR6;$;f12=bn1RC6Y-3$mbJR96#q73>aRioM#X?#LcJaqCh( zOZ_pf*DF3Zm$dv;y^1M3BYZ~*D7!$|3MT*;J^h(w4!MXv5^87 zhUw<+0h+#IQKf6s(ITpoze_b;7m;JT>s*KJBq#|yj5r8qPN85!fxG_nNv8ARy) zSx+UttJQ-E>$5`a#!F$Za6({i!!+KxyGHz?vg1>O)8FoxX4y;hrgfevj5jZIC~YZ= z7Xh`8u^i}f1Ibw7a-gjRC^7fh(Z+aTJBUITN%cAdFGKxzhLOO(OszNMyLqY>ahMRR zlqOi2Sav5yQp;Q!VBlxj+Hr%@oYVWe zoAs)^B7WKf4*ZrHuR+*bTMEHmdb% z2pa0HdjJ#@*XUF+YKQfxa2W7oe?IyTiW|^*uj`w7s`whOp6q)vVwL<_#NmF>Br%Ne z;6nnmHvLs&PQzC8dZ;P}>Exz#Mj8kHFyw9lVaHG8Y>Gh8PCKUy=2bZfCRj+9+oSy~ zUq?{P7C_YmJkqcK#!Wr6LjoJQn+(uAFykp!|C08+VUPVoH8#wjUrENYz8zlx=7xLX zxZK5f!TqR5!Q1IMdA}SrC0azF_(FI9nr-~=Ov!h-_a*sr``T>-^Hab-&|D}ws4IYb zI~h_K;}_zk^)JT&eTzNO_(FB^U30tLtESUjC%$r^GpL|IJiGnAFWH*50_ylNM-rVqi_*1K!1azN!;4`A$PI{Ny zHC(~x{7=Rhx7BhaIuM?Ii~lgFzum24t!yD@2RId}hUSMul|EQ>W+nP_?ooRky896( zh=w#t4vsB6=Aiu`2)^BCf@uT>vIUrSwLNm3v%8e6o=GGO6`-+lVge*UE0cA-3z~F z4M25b=W=?$awG*Q(0li1elsS7xXH8?Z1SU=#r(85Rja1)Hjt zSjfTzU0nzSM7MSM!$j7bq_vrYcHcc*_q_b1plWOp4M^j|y0H(>DIe-pOWI2@A1kXn z{>eWEO~mIug>uP=v2=BR5GlW_CI$2Ku|DbB)AP(AUA5}quVbXkJd9&Fp?yp_I5gkr zID>F=@PcXTvoj}x2ll=a4zU>#L1}V6;?uv>#|@_Ao>?jX2^1YLxauS)d5lafhby}7 z=r*dGS}j@x~7DL0mO_nnDb#QlxmQVQ+j}P`ux4|+F7Lj&^{)nNVbxtoXP;V=KBJVE~ih}Ddt@0>LmKVO$UB{>fpq za69_{;)(-6;wjh1_Z&mn*g-JH2mg0~NIdyT9N+;MWKL{FMyY^ctYf6$)+=JcYUmtq zjR=hY?+~$ar=vUI8bXr%E|W7ywDpCVA90`gKVY%YfZ)HYKMdU)+reyfgrsuPkXf;` zyS#^M2_978l!L?g2ZO<4@QABQlInUu1|kZ{{l`%FqI;76XM~unF@roS9;U9r4W;C9 z@XM*g6Rjt;u7%Gn@`Ymp(-2E@r-0N?bVO1+%>=K!IbeZ<3g`ck9u_4)v%u`eD7BJp z2f+tvj++Xxge>9-TMsh;S0K#O>p=XF#VOoIZ@(ycte7ci#W0lc9!jA8U(q4s3^HcA z)M@~c2hqP6Fc5?<<~x^5pbm6;0Folpu(6C*32{R48?(+H>T^B*fSitUfECf*B__G; zoa9JH0~Wb_&_R$|mM^pyscvG5N(moMGY)gn8s_tI<|Ht^Q}@PxM`? zfwb*9O&Rn8fqnHHwf%p>#QZ2MnWWg+3Jmgh|G+J`Cl#d-C78S=f~YS0uTSxlv)sh* z^VSrUc}(8myZEjj)beB{K^mu&C6Ar17dQXC6ey%~oxXipL2NNYZs}Xub*Q^8g=Y#&|u#yg%uH|>t0x(7cV;%Im_M? zg}`HZ{KezsI=AhwztHrd*$ITl@-g$iOq_QB9nD=M?(6(|>GXcG>N>2ZXUODh6sZ$9 zfYW;?h&Pe^ge}Ps$pcgNkz_E?mS^A|JW;hHJqx|TRP|x#*(WRF%e+mUKOwc1?6Gq# zQ@x~C2|HYAP4=zx_xK`&zkCxqu=stvQ$F+k{H)zl{n@r%-LvMezYm}^n-QSxzCJMy zTj9A}y%VbWF}CZY+M%@bjEekTl%17dS5M7P`$!kC)*YdAdY20w$WuIt=~@O7F8N#N z=>WySdD2QpsOYqHRU#oAKs&rN3#T>T9pJ{$IO0TL4hy#rTmJ32MX|v^u>(*8GMHC8XvfEF~cw-&yUBL1YA!`#QVue^usmR_1*o*)3`F{{PXI_?lU7uD6^vvbW`Qm>I=`f-Yr8tzp~5&;n`H+EvEywb4Bxx zzon}A(ts`gX$D4yKTE%OQLFePNYw3(s-BC{l--SRDE?>Bh=)T+Vz0hOCq~tIBMXFz z@aiY2kR|{$p}koOszs{)0#03CM76CKE~UXz&7s9rty1I(y2hkNfueWJ-jtJwS-^oK z3Ff}ZC)456L(BcGTSfRWyN>F1^#^EVf~@gFcLyd6)%rB*O*olK=Gc-46~Oq4z%946 z}t z*A3DiIolyaLmdnE#M9o{)ZOh)!CY5B_KhQGA5X0Kj3t#FvLc;D9yR{`r$rLzQb)It zfsQ;lk-kowXj=&{EY5=ZW`rT;)>XujPodqy6X*T97Ihufg=G8F+(RYivcIqxFM+~N zmsuY8U9j(1Sk!qG1QmG0V3&D~#|xb0C5Q`=!!RPV+HCA*U|q$Tt*>vbO%1JUf|fbY z;{jc?OEbc|uK{?oL@nwYWc|9Mggc7TNKQc2aBC7irn=c>dFa4Ilieq+PuS3i*&kxW ziyry;BJ8qzvUZ%Q?f`A~o1}Qb2Alh>3AvNZtT&HyRy!LF_T-;V=EM)3*x-Ip6+u0c z{c7D{z=ehFd~nxZ?RdB^O7y))=uI~?k!4r?2x3S0X2`n1Z)H;4U9Z?_lZxp4OqtAenT@B`BB7p+wd`jZ|qs%-KrXjwF}7GgWiQ@ z^>uRH>^!Pj<5zIcE{{<;8&EfGC1UnZQJ6yyBCCBE4w$$E-|Yp=-;+Jup2%&3WF2Fr z{H>52D=Wm6a?x{Bkna*qZTv*3=?egGA+T`-)Zeq0)9!Qmd%}DMnRd$!I#UqaBNG<% z;NI6_{*oNst|TxmoW8jmHJ@b6ZPRo_+0Kp07iINNI&}2%#Jt(pA{Ba&ig-n1T=iiV z&zogD#StT%MSib>dT4zmkJcp=mCYzjyzoe#m!S=I&5rHHhiLdMeMjAP`|oe+wt@Da zx8fT=!l(~$UQ%G!)6-cA!^Rt&HG|uRetOVD^VC8-=w%^FFJ9+pfuaB`|KUlk04JDg z!h2QppzpEdfqriXp>h=D%f(k*j$d45Q!;!$%r`n?49x^sV@0f#5oa8<3Z$>yz7X-} zpHj*so%xXmpAQw)nc|k>%7w8zigpFO_Ds7x$9)tjA|5!)XF{Wd!(h$|M*MJoXvex! z#gF-W@I`4!`3fAsWTOx;0H#-hd!~!S>G!>Os=}41hwlb;`pTH<|M;JqcHA#l zZD$$m_?}b22XEWCxO_&RPJ%7JDdKU&ppC6zzO730&Ya$I&|(^K*$W-JZEpcH_`NlQtE`3|-3yc$mSv?g z3W0(^9dt)MlsLQOxPBLfM3N8}$H<>B889cemFYo;Ujm+Umd0gK-;I$ONe(2aLVxXi zeT=1G>C>*t&X0WQ^`w$Uvy)((ET+&Nwv(>3dE^-!8F!>dat&cy6F$>0AIj$2Kk0SR zx?-&4-7UcU?li7(tN0LlWFPWZ>bni`&v>%f?yJ}}n~!dKiL!`K@8mkoXHnRR;aCVNZkGm6odJ5+$gtdECy zKl**`yJ_!fOq0jCM>FCd45QDZc)KH~^j++AgYjd1_7@YY;a0aT_)L~b49K<(j2dEj zJ;?3qte!|V3w_V5=qox*5BG-jMtm79?T{|-ag?;j$`^vCk!Sq1cPQNMHy)M5sxz%7 z7Jh=#5$#Nk`vEzP zkmZ)(StKqmeU#D|eD|a1A3Om96ojEuCi^O?p`__a>-Zew+s0uf?Nmm?6dbiBEF#ii z(b;h23t=yfq@LT9dBbg+K83>YsQp4AAaOYE%q(HtArK(mJP0?pe@WDVOxA*6FCB3W zs}&KuBAQ|vrXD`wX~-{WJci-VOtEl)yN1FJb9+?9NbR;8%q(NNNt5CF=jVdpsRn5j zzlp9vpg5rnC)y8L2>fzHB8#aKvHP;$elWCpYGQX_t}%l&dJfO(dSzjCEK2965FLVy z??xLO^K(D7;`R_yWKAkJG6dSK6lz0i+AmMDksx?S`S4**$iuX>h(eE2K}2tju_U6t zXA}Z~WB&FhvYNYKGwI0Io;4;X#el?xa1(0S$qXh?{fnNtO-N==YBTxTB)pb9+!G5S z$k-MBZ0z@N@KuS$*)~+gFnRev)T^To3&wvH`IC&dnfnjl@&ozP4K4WSg3sVxG&)WU z-E-1=LssS{4=12Ogzb2qWZB;tn(0lX#cFhVmJq_nx>i0BI~!W45&9ERRZEi*PQ7dr z#yU3hO7riD=>FNp56Weu?z4NpxmIV;A4RY4dF}s^b_&SZfpDzhHHgas@Lm%BvGmqj zVj4A-EP@m^Xv!no!Y~%KSVI*qo9+i+Oyo>QbV?owe^TgAwaWU=%(noX&O^f7@WNZi zA~{+}VNuiQMVg+^?`V`yyeXjJ{jw?>tdOGck7Z?lW3@|*{e1{h(>_*~#LZOm#hl(O zTFL^H{8`gjbwDitB9Z0q04?Ei-69}k1?naXXD-$lfm10>InXq(vYSa0?|f--SeM|( zQW>vFp8M;%`)R%JCJ%TAPl(dkuS?>YPPnf#0K zyPW?!H9w=>W=Cg!-gbji=IFw(tLytwe>~6sI`U81(07&J=d*v4 z*5`?rZFtLSr1gDgqPBX^7rUEesN7n=SN{=T{pBjICx05X(^#9~uAT#feLLiOghDqE zXMY~6T2qbx_q!SMM_=sQz@E^j|Cf1KAgeYH92q}Tk_Z=yCUKUP^}1n#h8~g%$zoSn za}eyvubN|(cN}A8)%2@HtS6=;PgOS;(a6LX-5xu6NG$GyQk%Q$3`X_IZjeM)H9yJJ2zV8 zEseYIus_nSAzJIRKuUL5PjOKSa=SAoEC=Bc^8!*L{D!0(TzfE~3kwT?tE_%t@D1h1 zSHI;l!{30IGV=;5Q=JW){_2m_(#Tz?@Xr=vrddl{=!*CfqzXmtsWn~{#WjB;1J8mxRRda%TJ&@!A zt=8GsCf0}4XNOjtuNWQVCDKl;iAERC@0PEOf;6Py>-|@>-?JsTVtLK#jGwKU?`Gwu z<&B;(v{a_)Gw__{Zs!+okcY2kI!=80^`BPn#2er*xg^sQNNR^!%gx4VoS(VCmMeQn z?qHByO>o{`n3Dd^7KhZV{8qE$m&7AQPi8^P?bb*gbe&VZP+m7e>@rM@j)S3(@8}O& zwYaBag9A85Q{<_I0&|sAN`?0xy3Z}cute}hAy!1F`0DV$Z5Mw~oJKkYv!&elSl96) zkOUWWpWA4>=fnBcgVG%i%;b>tN?>j@*eBXRsh7HaKHHpbbp(1`Vt_k!!}c-xA!D+9 zp>7u_ef8up8g@;yFE+u)+K8~&j*r(J!|P_V07}k-*5u!mTA@u#i&{I^po)aniP}b* z#Wp1WiN`{g#b$+=vEMO3xdHbvTCnCA(8jZsNMT(V?2Q~5?5%ZgKs(jykthiF_Lz%Ks_Aku(SG~uk#e7?;X`_8FRJT~!pY$=@ zhM)8ue+(V3IqSM*lO5y>_~CiR9dtdb9bS9x$eaTci6xyKkYscnt1sD4p zU}pU-_wAo3z#8t)Jp%6Ke#xtGYSFGTewmpsH zUp~9FZSqTg)xos@4K)ZO^JtArz%0tw~wd`2(m zAp&*~7K(R-JBn?!nos8m8UA5f&7jDhS67`llne$=>zH$1dn zY#9pwmd*U)oI-msWF$MaxK;-H3PNrW%DAk5AD2-LsAu9^KfJ7$HvnAKFC$-c1>od# z((E*D;V)BYhWo-CfaUYqH(B+lRb4IPEP=XhS;Km8TbBLBPn^P6u^Fu2f|Ny( zztRl=RHfc9{5untjVO4_|K)> zD4w3V6VUyydj!Qdm4;5LMvz%{Jh!*8+q2(&&Yr+q@Cnj~EF@lmy)m<6HqjtW8~>{= zGVh7wA7g~&QgCJylO8I~%bO#WaPvnmIKm7|(+%xkD&~tvJx2x@y9AXPHpXV z#xhx&Gg=1|gw=r)bg@0juxirO-{R96pPH)%nc+UEUMNtr$Kj&ZfrPNQJ@TNKgA@#p zr}dzQYvkMe*B(d0rr>^8i==7^>{Kd5E=63#$03OSHb@j*@~VC&(cgX~SzsQRe^Do7 zLQIYSe>fx}V;0x;Y62)?luGwiAq+u-c7a8+Hu1smK1>hKR)L*pbhe2Lj3j~O+a*Pa zBpyxM2Pv-&d4jUrOMsR%0%-5Wq;Of4k@4xpBa?UAd$UdSgSP8QSTS3uhu9Fb;IZc~ zqtBEQhE{!kusJSdB4wYgVf>RkEE{m0{HC6o1KSwC%KuSF>@kG_4+q`U8uKF$b5PBm z>&?H@CF?ori>?ny7?Ban?j0E1pN&ZAbruAB(3R+kYRV0_F>I2x(yb=`wl!NW&JnDR zj2vKw1mh}A1$Ufnp-i08?}RBf`N1{|V~*@6>s1~#Cl;GsVCNpgJ&QYK1_TLXKmJ4u zn+io&PqMNxK4*M0VTD-p?UuF_0&=?qTe8e|_08OW}u>bLql1PtgGG7WoSi;^Mm4j8#oqPpNeE6z3J zy}I_Q9I*}0M*cnF8_d)PSP#&uB6I?J`|Yu0VN=-8e%#kAH1aQTAm?~EiOaYLkT`)h zGyEPN8Tjn!57lfZKI{Ce-f%Ym?g7=0dMOg?;^Sfq*SA`T?QSjx(o)uazAIJOi7sq0 z_TUFI!oPx!Qs_jSNK@Jz--jbv4=idqPKdNLcL$lAN2-RBK@my7My zSVDr84ObqwcxWU!uV|A62xxsjl=I=J5)Y&}8@mgd4o51x_R)bI=FLT0m8f)^j*~#F zaNu}7cg17-G&#xw%6bC$waKSSuG&b6m`kFS@6@%x^6b!o=$s&ODm&sT^_bvNuQE55QsOX7PTD-6U@tUwS zV31gS>3&>M`FBkxtyZuuCnyBw4dDns23$7CaO^a+v1h}>9R3)gmi3Z~KTDNm5e-Tq ziiTM{dbf6~AXKoU_{m+Rl$5+AM{YtL*G-`6k_*Ovz>V2Xxucj<3VcgpBqi zx-}g#-f`E*EXucOi4b4{vEbNYVVtwAH{1r>+Tn|-RN(pA2)v}QG-X05&J#tZD!>XL8+27R*zAuZ z81v!dgI8|SYE_K(-kd4bqYA0)O+U{DQGk(C(7125DI_#OkK(Ye1;jGfyFj8s7WLZ(YB2kd70+eJa zDX*<0e|Q1bwUp+pQpvKrBW-}_Q=>AvqE)p)N}>Rty=04eONgyBsohE$!|$GNAVQgz z`b^#Wps0i&rcvo4Vug&YCX;qxjhdKCa;U)2My@aGHdv7Zt`21T*Qko*+{oZi44JRV zqyU5L0yjU)o%*_2-bMMmQW?LZRu;f)9t)ACNwfBL_8>*V(b& z#qW1jX*M++ZC=c#MUVKOL;LC@%KeHwv0&C#>@)dft_2z;B@I{WMv*8zV_FzE_hAE$d_{%$5r0n+U$ z6d{TbVSnjtFFG^BTg!aURz22l#AE-b(KI={^WS#@E9Xn%bbr6Q^~XLGmOe+dzsl2J z-p?q1?_iYvl6?U_?C0c@C5blw)30L#7j06UGW#DR;df1aWt#^xIBzI6Tes(dJm+cZMCU?w zf1>gF(!w&74NCFu?dFCOX+GL91#4iK5ZVuz?Dv=kc_sv9;toq2F4 zV7k%RW6?q$4&jZzX+#C&<@&%9^S2Q=3<+KPA43cssu^>kItIUfAPkuP2QszN(^tXxfGrAoh zFzOy)*{T}*ASLhv^YY$t9Pi6avM~sZn(hluGKKMi|2((BUv9kLY;n~quRkeKBSzt` zn-K+sbPC^?d0sEEi+y##aQ=H(%Qqv0J$ z4vJv`Q{z&I+w1QA?iNhvP=d;mo8cj$FV)XkCO2r}dF3m(aQwfo`eb=~1q}xLfPI7O zpeBc5X^@`;eA_br!gxoxY1n!v>c1plz$1k%kt__p8?}7+CJMjzii-H;t2A$)xTHLf zIDNb`CqDZ&n7s5#HhbyQ+H^Cltv}jZbrrpsxTzoXqc>UyCKQe6yF{mru(Paj5aK8Y z50HX`s*CX^HgqY)5lK(0jDFSmOVCdizB1h+Zz2a=!a|R&+O)!Yo>=2j?0o&ODOcZ8 zU{$_it^Ob0MPJ4}nl+~90_-?H*2d3o@~`?YWA5+%j<^cp2A3;g3gI@Pz`G8_$r_Hh ziWlMGAkMFFfQGT=ouN(aDUKb_)6SK{!}myX_P4?Y=(juu;+|PU&!d0aAAr(n`_~R6 z>Q0-@M(n{A-IqPeS1!-5zdg3I^>uw}krQc1`Y+#=m4Zly`*=)+OUY!cH=sDbRGyT< z56V-ylas%cW}aX%iXgfPuj>6 zjyHYPHU8BSuH0Phe~V<^(7)PN5-1RNBMW0qjqnVWd~4tf-(NHKq(5$q-YvPD5S+&o zTa?kQ>JyOrc?wB%J>1>;SyCBRpd9`Wy52cBviM)ujWe-rYhv4&*tRFOZQHhOdt%$R z?WAvhd!K#l;+(2g)vMNjUA@%r_x(JNO6R`m{+qe&w0}0EHd_3kqj~=FyX#M8#%~#k zVGy)xNN{-z`oR#!!*2H}p2_VBne^@SA$qGEMQp*#*?pda-}+HXQB2}x!b1(vr)Z)$ zeU2ppHhBD^|LxpI0emw`Y%sTRFSO7cp4~XB8t{Y=lez2z!LC-obO?hq-LWs4EiBdb zSZ{oT(_aMP7@oG{#@NihC?U?a`VSB2636CnS%;&aD~F}ga8A(%Ee(I~` zK*R<&^|ww`6=kt>*BdS=o8?7^htlKF7!I>kc5FT2K1_b$I*+!NR}nQfeh{d51jMsJ zcMJymo-mcAUbuk~K143gvE>B9e5Hdi`10%*aMBOulHd)b!N5#cA4;?T^%;u)@iiuR*UxMi%SJhZAYJL!Tm>u#lkIsC%% zWpzlPjk}=R`jZUV`;jI8GHn|P#|PMt95lCx@qt?8QG?+H7DvzyJd(!09AG5#B^)^POb;2KEQD{Rkt z&|c;@(b-E>4CHacaNi-d+Q;n)YY)1BPLOQCtH{=`4l*rX;eF^j~hX?*lTJR||pk zCtlnh%Cl!F;OV{tp}U#>+qk#2bL&|YFr!Ba4ov=SccZs8#yNLj(a#RgKyesM)&4i(H;O4mi2WOUXY;kT^!?#L&NYMdHJeYC3G^oe zCbAQYD;0=u|03dlaxBls@jjr;%}4XZ#z8Ls?$$v~@v57BKH#nYVx81&gYDR|RB&IS zj1s?Ix@W3fG;3P7E|=lgUFd4{E&Orhydrc~|Bc^Ci1sn*UAM((djsO7R~_UV4+p%R zhUh<-XsgBdi0(eP0S0;P+0Wi#U8pYdpYmdZ)GQtlhRJodl6Z}0)X-oD$c@{%SsL(gZf8$Vp;w(29KuB6LRAG;aByEDSU;=oSr(%OrWn=?P z0Zh%HEP#Wr``nd|KCcs4G2Dy1#JKQt;??U1F!-k9$rB?)x)t+2(0RqHzHnb}2c&Z* z&=?P%MFF%NGbR`>5`X)9(tc8Dp%3)G11(Gd!+!#;Q$lF*04+2x4O-U?q3)a+q|x#; z3(!<4OmCEMF4Fc*o`~)7m3911k!q5sNB05*f(?=Af(>LgTfM-$K$BE(I+g?UFP3U;RbV9tuXu zEU?hW+)1ffoDx@8DsY~8e7OH-riEeTq$CR#hN{PIKhjR|xx;AZJ^kDg(~32E$Ex28 z%>80V#1r7a9s$0m6IlR1#BPZD@Bfo(MYTB9y*=)e((A-sbyFzDf~1!xwVmWWXb5Xd z5l8b_Uh`i1)A_sKE+UCZFPQrGfNhOZqhg%!F0Xrzf@4oN<+CPe+;{~2?&p2M?n88a0fG zj+4)6UR<~Mq6Q+>ZPG`KF2k>Ft8Q0jIFjTN)&v^~MCRzb1jMUSZ(&eSiDh}h0#rZX zf~VWOw}}=*L!&MAv~=4xn89fo^RU#Mg$CMK@lToEU=ItLPc;8%6&+ij9jQ!g38+G< z<G9P3vkCTIr4&n-MeZ_UoFljUe|%BacrgY z*osw6#3=0z9M97$ehiUd6N+Y*6%VvaBZX=$$BCl-rqA4}W?_~{kYx8YnVu3V*M>i8 zaShfIkp)vc%E-~2@FeTboKh@k=74?pa-dh7*n$}#1&R>Qy&++7*+H}JI?^NN617b2 zGgPdh3W9AJgSTsynS0F{rG8T?E!IS0Ry2vS*&Ob zDZuWz%uLvAiyjmCQIV@Tt5upR7Z~hmgB|S4;lB@?*OM0@bhsv4Bt2-?$wfMCR1;;e zKzTwGHz9xIyC8<~MAphCXAX8b-0nhVPi{-7kGi$0*$AU!ZicCNv1j?RxJKnG&(wBm zYRyBUp<%ollx|B>Ht=@38{tn^H)}tF1Is0I{S}<0z?FMI4IVB_!xO>z({d)@Me4Kr z&A0b|yTt(5g`(qgDMM6!-Zyr(n?Yq~QoN-^++?KSES2rH5vZZ6n`AZpL!6KW7CW&v zdy9dS0CFYc8E!F?J7gByKmfbn^7n6{ChV}0gVV-De3Z;3lKPQwgU7eVHYIx=@N(q& z_#G~o^p@sFrl;;9T``%TWhKR@iU7B2?_i%CQ>yO}0w zFFDCx6G(b1^#<1z)f5{RV7DRX(JAce!b_fWL3vT7iox8)o0c(C5@z2dyr~GiIp~`k z|Ev8c$WSD6pedmM-9#4Eo1QqA0_;b1L+jW@E3B^NRn;-uI%$86gSoBMD^$Ms^cR2lbXd-oEv~EoqP|iQ-3Va_|o_w8b|3oYh`C^ zKdKL*WYLgO4cud=d*G1mfFcpDMDH<2f_m)7si_T|PrdKyc6X?$R3tjiUA)X3HdsO) zb_d`T9zYPeJCJUV9^nhQ&E`eqhMkmEjai9Rfdn_YSv5OZb)80F;&F8#Zl3n)@6BxDI}?K76+ATt(DAFh)q9p6(3}6Sad}&BpswubcBS4wYKM)8W86 zGemuZk`pSE{*&5*UdDt*3ylvIT2vGPkVm8vQshthNlF672)t2Bzyk4JQ&-$x74cj6 zUS#>wf$~VCJ{)7S)x-aB{wP%5ofhG7E+AqXFrT~fq2O4+RGGrH`CdINzsQA4NWWSU>C{>aF8uxHEiTwe9K=7K#bcoA7a*^8M2iBw; zSXEe9@uKuO1v4b^9wyjcwl2raDqGLBy*xdrh}oQbo3!2Y4~}e^!y5T*4TV<;He9ri z>9f0By}GPG?QdCF()Z({TS8=3B>Ax=^G=@srRr!mW; zuh7v6VZ)gVcsSf=WlLS`iyofr^vF*Y#}Z(LBOHIVyT{ayb#G9I$41CAnl=I&D4imA zVm8eW>-wO9b%NQG!F;bln}n7sPu*Y^D7Vb}B*0YJuuDAl_rUB5M-t|+fDLXQBz(q& z?zsoyxzj_C$Hu1Yo_HR;h3&9n_USe#T4wQ&Xb=!JNV6h0rbS`$5h)2A)>0?L{lyO3 z^))%v9yl6p8(n@Nb2)uak;(22sqyNR${qkO*T9a_B8oyT0u75Ec-_yncu4I?%%iV8F0jC4<`Jt*qXl`@fbcf4+rYECqUM%o3DqVC-DM=S~rvJamY zDzFJog>2hdsUyL;QjJW2CX6=SCJa_zM|tBX$qLMYpZ?EgEW3F!UW$5g-j%sDrs2hW z#Zn;jVgc0XswLJ1y2%;Fk>-K3-E! zRZxy)kZ<2KfbC(7>&Un9DR|`_aP~DIT$g9<(|5|F`Ry3+hV1(z_S(J&bRPjcCjc)g zfE!xCj@EZC0Tn>6x?}PE>O@<>1oPn-xL!A2AbsLrs6FY#S0J4k9hwqv5;;no`%JOK zSN=edZanz@a6@mnIfR#a3ib$EH>LE!ZCJ6w1C!&&bEVN9VJLXcVIC^d4hb^666vEI zb2DRCIW9OOZ=uRUIuyp#x!CT$1Vz~0s&mPKp~#F^ZW~8MN!w|TwzN1H4+A~AMfJ+y zfOJU2yKbQi3w_{h?HQxNqMzP)6>zVpU;VHb@M5#LxDH9U)GA^3E|k@6=&>jS0>%!Q z*P^1=W3cfTnAo9F<6aQP``jB-udF*tFZzWeTT*vRxd|=cnNL+q9dJ8zFuG z_Eu5+@KvbfW5_q^Wf zzvG^kdJqj5&k@Dx_I$!)^I;SoBz%te1twA4KicAev%qY9LnCBxapRWN%tDVpTs_p- zTNUMU$v=TIUz?k1A(&0Go@_cvybtH_cu^RYaC<`IV~M;s!H;RkKqevXWLYa3XB6d| zz7C#cu%7w-^7D(o@BDn00J_iq@7*tncfINsWk4+3ulUQXqS4o7oZ520Nc>`) z{P9uYRlmz=38b0^hhf*0)#fSjOvWIv5CZdBnB&bLt_7cGkKH@v@RZ;6Ok~zFIq?Z) zXx0McYOxU_W5@NZH>G$2O>Q77I6{N@zO7$r-(*?2N+su_Q^1=10r(E5g$3tgRvRdc z2)J&2Au1!hZv@hAV?m#Ao0vR@!zOC4{L3!jJNMYn@sik=t4Ax5uKMA z^i&u{&QWZWJ*GuHZp6DPi&L(q1{5#L%YY`r=j3H>qMk`cbO$Os&%7*q0tUxL(Vg>Y zbzySXsD!c@3GnZEs6uW=yN~U%lNk2BVAlpwl7= z2{SQFzKbX$4JEEyZ4iThO;gayp(e}OGr)nsjt9s@ZtaAaJ3QCswCkN~I#@f-(^6IO=i!@;m6yIudcwMjL zgZu}QRX5-y8@!1nmmz`P1I~rafPnw$NWTmME|-R#@%o}^#o_!nt%Zj)UV@nY zU+nZ34fuvt1{F?Wc3JHaN{PuO`THtFA9u#V5x)mJ7rEiI31Fsoj?7;1Xz%#`zd`PJ zFbjb(qD5qU(Ebv<<%tmSd~Kq`3rtwjHayCKR!|4&?vYmIZgkYQxTPA{ouipsedP2A zkp}G`I3ZULZ>S9@m-N7mui-Z-(yk#ATiaUUtE7Q#;gEg%1R;N8CR-dHb|sWf>YOCQ zZBZ#+JfCR2O-~V`#^$9+&B!P62g(T#g-FKg^Y723t86isqIEgWqKMY2&4-Hw!gAAW zhi{{0av9NNX;z=SFcImkxsaO(HJOkzGt&h-Br1(8ldf$B;Qq|rScEm{QiAPjaXHF% z@2~Z>hGg>5ig(P4xjfAPCF^>i<_~Fv+YWka@BoFeH1Wz7mg(M#gh*6v9w912!~!|! zvM8CYg=DHGwXUCy3MvXcw#I(A8@!S#(dk-8DL$e~sY5-EhVBWO*=Y~Y*Krl=zB#r< zXv=Tf%I^G{4k96t41B!%>x@n?csuBBJA|IUxT84n74$mxJZUn-n{GIeKKtTan$lt9Gy~$D#E#ft zBagJ7DA~q`lprCRBBI1UwkK5;>aP+ETB#43(o+?#S|h;gYmu+yK{khEMZdqDr=1-9 z*L7wM+-}}ma6oj4esNnv9MddBVLcwtt#X=98eFAevaXkqO5IZh9Y!|(f`w5nmB}<$ zrRK9H(B*(kh{lo%w~@ds=+h{QqufJQItggLg`_jedXy3c(g0v(w!1-Wk5uA^`^#rm zk=-9%;J{-Xr7cMX0$uupxVGtXONn1;BmpeRv_i33RMDv`Umh%lY>mQqCaMz8AnYvI z_P_#ZyswPt$vs?NfF%;JBS$hmH;CM5bda2{fDfL;JP>kEQ^aYp_0g7~q}d^i$!5hx zn_<6PPCg2-qc)9*DL}{f;P3@y{5lVhhiM>!3XF$Q^!zP^UWaL2@w+B4s*OcPOlc)% z!wY5Nghwq(ab!OL;=ZCipXP~Ijyq$$2xTPE%JVpX5jr%Xv0$IqM2NDnBxNlp65@5e z2fOkW~E;`9kn*K8Ue-7dn!dL5IN-{7Yi|($WbZr%K>_hHP#8-StJ323D^lcuBcOtu^GNZ zYtEyXM@F<9Pj?`0O44#VUq>lKX*6vU3i}zE#fGOWpWGjbN{0^b=g0Djr>nn-uA4q@ zh6E4OrqvNMXe2l30+SGf zFN|kqn3bQ09?48KA;C4C7!HLLF7dhUaC-3`NkWBEr3Bm=uJ5tOxnCN2>7Z!@;qXm< ziyI902_qC`0=%s*EVrbj%%Jz6Q@&Ce(YPyeRTRfLIU23*x$yncKvuV2FKvyf7foM8 ziVs((XH;sX2JNl~n@N>sX(oI{3?=K_*nfw2 zcb$1~mnumPYQ@ z`RUa{L0=4~fN)T6iw{LJCD8cU(&j0dl`~u!Fbo|9MrFn_jfnY;+@6?||6qCTlLBI$ z8e+0U!KZe2J!*Ib#}^UCneq~ODbQ!IPPmlZW%`Gk$cw6D*5-?0W-)&^Ny{|k95$Y- zSGxU^N?{1LG;V8xt)w$-wG5C&4-gBamsiBMJx9LY6M**=z!NRt0wEdD{C%(lsHOr1 z9qmFj%tOsfjBLSo3Yb1gN(O1e{(RvzDOUDi+ufxGIsVc&6cpKwIzykL<)Orl=-r75 z7=J+Z>jv_<&kSN0mG)b6uHr^)U?_+*3ThnXcn$$3<4&WcI_f7UK4lO^v}OeA4JVLm z8aPhf>?DLm2{Ek_XiVqel(cuSf#bzVJO(FM0iP)p2Lw;Yc2V4pE5}x`h0xDk-;Uqm?+slmblIgWh#-$gU*w{Z~W5b9D zMhKJ$NloGgw!|71&WsrMEuo~jv6_z%PCQ5Pl89x|QD& z*;H{+!!%EhKTOg&KlirK(gN|ryM&&)JII5VYEWUPUnnmqqiZAax{m&>jW})nR&9Z* zHWPpx4bO|ZUo;(UX2|ej=a1boIXcvO(G0b0P*PKu!%DMGAEuI4O;eLbf zd!q|sbd3Jt1A|y80*xt`#@-kreouM@8lJ$_L8v>!}SfcGgr=|D>l}~ z&^O5J;Pl$)Qp?Ef1(2M5MGW(@Im+_ap3)^;}o5L(`OoyZr+9YmGR18*fK$$LxP3A(J;6Q zv&rk@inH^BS`rn{ueaq@jL4&P1Bc+l?Nq>pVD931%!09D^OJ+nt?CZ2JtJNawC)W) z3w*}kvVcs2%q#Npat2H3|Vu9 zLTsRo7K6rlXM?Qq-0F3fm778yk1DUPNfwqBU{`(XJil5gX`1+^b?z1hVZs}tMbqPa zkQ=d>IzW~eWNRp7cG5tStbz8hQcx;2G_M7|R(w3J_Gg!pvm8D#wOftK%!_m@O|^KU?=ZaUhbH=gmU z#C`gSfhrc$S!J$NBJ&KSu5~XMkEoIPpFlG90K5ah2<2MJh{i${5Z)<}&K9?sYSyE6 za(6ZTM{44)EGh-&(TTRx_V2hac+s*8>E(`-`E3K!%@I{Q>7IySlHOjl@42QE53wf# z_&|xz?7~S5&{Im#Sx$-2MuDQ^03M#rP{uHpS_U5zz|kZPZis&l_cA8;TwH8y*RX)) znm;^*T&xp=TXg>NQ~vk)+D=LIQjtBDC{UpIiiBIdnPL-z;@k^?9&lDky(zy&%%S0k zM~{TL++S=D7jYve-%<))pHXo#;pypsetcVK0k`1(R?T*@H?&8>W-SF66d9Nsm8phm z^~)|s{_$KhN5wHMD8JghQQq(_?qJuBurvg`2b6IwH)54D39Diu{>Z^+ZKU$KNg2V| z2E`1=K4uk=DbiM@``~Fq`)~Hq%NhJCL3%ZXu=OXj=s#&w!j?l@j%Ul2u`7tf3?d&d z4DaZXyGc#>z_ALu+6ta|()fANP(EGc#*i8VE2Vpte~wo$Mx@fiHNS3sk1-4SCm+T} zd|yByC&;xQP2e2_uD+_xc1qlO?HMO6HNpMSV7vbAK6`gzT|niF_8Q>Kc~et zx5&ab_Mi=}Nht3@m_HIoC#y>w#)1iKNQGjZ8yhShJFhpWF(zXHo27@pv%#5#0f}mc zMs%phL(xmEfv0&!7G!LqvqBJdgvH-Un!h}*ZV=IWigrbOeB1YjK&c9_Ktc0g?#5(B z?C+={d<2yZuHzjR3~%WfSdnPjnchJmR*QRJsaVE@P(LRYMxnfL^S=92Lm8@vgH6;# z86VS!z{>;ZzpoDCIv$`d^C8Dt+D$04WR?mnqO+JBb zI`KCTrM*=Bk*y=rTj|6jvu>ssCZ>25^^_lTq-+R8Wk0AZ^?402G#_8cb6<WF3#miaat0y4> zW}Lo~dvLM=L(is*4A;qweerd^X3= zsw2lc*v&FxSD$Dl8GqTI=DXs+D9f`o!j%Up70C9y8|1jZ7VgO+|8XkCmL{`P8m#4g z+K55SF*=LVwMNYf0J;z$rsc&!N-CcxU$5%cqN zXH9`{(AzRxK==pKfy!HKn5h8-0TZ|**VQ!6A2*}Ms)w8NpKBvDKe{yL;DwYY2+)m=*vqJ)`FjH;Ce&NrlOsm9tLe^@WUaUsB*`Abv z&Uu`PSN0{8$K{U~a|}9;QdD zpMUkmV~x{`R?yPR$0=$&{9;NDRQl_=d|#w7x1;cHq+H!bjLXsnyT@^9>KTBGFB)mb zIXo`{@?t46G2QJ}R!!@lFdu@9>3;1K)5clvaZ=2HNj6`&f(o<34(%Q4zLq&h$^;5L(0TFkerHTlOYwJfFZ z3QY>{2S@IyG96Hp zbczGaW{vvTL77lWbq?8LXVH>I#_=?&dp{Lu`*8jSTiso&nV1O8MuF*6E-OJ^PduFC z{3s!DDC@%&Wsk{1=q$6jQd;id=p9?(oh>-XPN9-C+iN(i50NkL!rO zqjwNe;Z=$K*t%sSZ%YLjeiM+5+g>b4Gl?}kFl4z|{0T@3bp*LS<9tv2qg9wuIa!K7 z*+5dEoClO}XYD7P>5Ru1GSo-%D?(_eXtDOo3X8Du>WA8H0eU!R7_;T_RXRB~f(KoW zup~FB52S$IX|A&qXIzO9d{(U)Tvgs!ytAF#p;KwnvD8Pwl51XaC8=z)!w9x46;$R$ zZQ)6_&10dp9B^DrfkSfqk~uqa;@}oL6+awhYS5f?Bwh^+b0x>p5A>E*RF!lLC_A$=bqk zhELg*&-_75jF5)+V&vgz?nn+)zfnQ(T=WMDCig=S3xmxsrpj5>Y|&hlo9Uq~i`Mc@ zUnaKVT9{ecIX@Dq4AMhz9DkQ%i9D$qTCMs_+%XOQHvrQ0W-D`v%r89$P`qw&y)yqv zU+p~!m^WYi`BUCCw4?P^+xo8eBgNZ3_w8N+cvJ$OQ~?*_6#$j$|Cz=_QjSpg{p&_E z%K+)Wq&uQFttp!PKiQh$MgZ-r>x)=%#zawd1}pegeghp?!AgzPYghjQ*uueBoucp6 zOYLenWF+ld*j=Zo&RuYJ=6q2S_^c0QLpJKkqq4gNsoQMirAT|&?Khd@G4IBzV}(Hp z*MTScP#5GByjjNk5olUJ3cd#VZ&ox79X>V1k)yg`LmCkUI5)Tyv%yJkt99+tV>Ynr z&#}^e4{Lw#v7ZGv6vuhkeYFL^d9$i4z_RDem;T`eh50|aYD@&JI24UQ;}^D}!l$jD z!jK?lZ@;zsm~X8zLBjxQ=`p~Pg#m}hUGT=5mok)Q!gxk`a%J+B1Wr9?xe*jzE(UwO6Bkt?Q7w!Xo>Z2(C7u1Imr$^@XC zRe(XOt;q|(`}M>c;OpO;J4zFGj~^EYlR<7tBb6>cE1=XVlW_NAecs;Lxp7z3zY3na zHPU=$`{sx;vb?++?t+mV#Y_Eb`^N`kxVv_#>;NLbO5r4;)!^{!h|AD!U2L<*ypg_p z+vcNUaQ-9>6D09zecWr`Y}1cQE3luN-h(%W1s>xhJ;R3;u4SENH7FyI$=3Mf~nh0`1&82kh&V8p?}w#|3Is0Fler~$7yZ%=<@3iv3CwBM7yT$ z6ZG!w%K{3pW6SpZ_O|2Gyj-PX2{{U|yq&@M`F{L~qLv^RGfn$Z5J^ZLi8#o6Q`~ol zVvqX5z6m=U_;pZy*RwL;-|%`NaEFEOtDV;N2&r5jzCTBDLVw`nR{y2B&h2gtU~t0? zMw@MMQAcEQ`|a}*8D_Q29Gl4suoW4mkc z- zZ>76gSwd1JemmOxs6eD&x0yH+bv3^vl5V;Vk!nml|ATPOm!k(V_pS}5G4Pec-m@kI zcE+7QP7uFu`_R%E2=k{AkHW8H@R&_Tmb`CE%(-$jkL;XCbHn?IPU(O=FzzA8`eh(1 zenv@HwVsBp1XNyPp3kGn!#KpBESeRnua|avP0!0HNRA>>+)P@i0$Ha^VRN9Zd0L%< z3YXyUSwS=igf|e?iA1r}j@{cNhp-$d>=3z$I#l3(b3cpKeZSY-#Mt_SQ?F=p)dsWc z0~66Yc~D@S;|~kxoh-dRGY)GXQPjk`=Md0C`njn^Sw!vQv3fxh3^gCA$@_-s*Hr|= zzwQz{K-!?T$6QLIe;CGd2d(u#^ggOg&-Y4GyQN95*lfg(;Y4ZoCXA91)R97}36)wQ zc=L1%wK>r<1EF8@m6_Qev$pm@?%q25+Gu3ZKDDulh~EG~-2EB-;M?HqSS_#Pn?X=0 znTn74MKD_)QWgveVX{)ixC@>6cME+bw$c*4!Vv?>HQ>uljOmA5i_GMM(=8I5N+(5N za%8ir@vzNVrFAK>2yT3qYVssd9X^08U@MR-=Imq=wFE+Z@BSKacN~tP#olva%%G;t zAZvz<#zqg1!-;G7cPO-G_;iCo5+(TY*Pz+)0A9}yWQY_*vi;s9d|_8~dXg zx8QNgE;B>N=kzU0&GAsuI1snPh&E>8^#@ZP(1t#U@-^L^GP>xoOWgDG86yJFZ5mlX zUIO``I6aB&DL6PHhj}_8;%%rYxF7yRzr4mZOBqkg?N-li_L#8$rko%VK_+}?p7Lo+ zD7-XV_4*4g52C|x9e7#F^;od_;sg=r!ZV%O-zd6&)V~^~gK&Xd;l3CKEPKe8u);7Rt>j*uwq3ewyZlmySqldcBGvC#amm& zg^2K^GxQiK!^8p^pCGeGoD$K#*id?{0=g!axkKdr@cbed924S$slT+dm@dhC^Y5c^ zA_$i<;O5_P8PKFE#hy@&$G4e#=zXAFB_fSvAN27i`*vENrYLFeXis(UmJ zc<1dxbYp!9VrWZL!j^0^xRa?S&EoW#O^_TvG>)dSqrZJd~?efhnWP z@6k)zqh#PH=Sotn?L_Pzur~lXKBihYTaF{bwF@o{%X%~uAsQnGE3E0EO__q7jr!m6 ziNDBDLL;;}!qX%wwQ2FYhXGc4gd-7R$|B?@Ejm6;NbVdO#tenfFg$~kRVjJMWh%{L zxCG>ilzwZF#lQ5AInPEzKZiwyHKV5YHzOlFm5Eew4b+6qh^$xl1+qwfahM@Rv@H`u z5;G*QdHp;BonL<2GF1NB_Tp4W^$p^^%5z_zldOj3KlhVUaT4;mjsmfuE6hi8^NnXj z{RfSr$wLH<{FM$KwWTzyIumsjahYo)pd=_vjOp zk10XDL3_qE@@BMVV_m;2r<$x{H2OSF6pFr-Xpp?|J3ld!b!T@i9q6#(foGgIa2`?y zsaxl^ldv{Ra>#R}c|X1f;jP1Dgoa!27=bz_0OD0{JGndOJ>nT{;3XT?HWWg-gy^^k z@u6y>sJXfKOLBRivXeD}L^ee+n!bSal#$s0lsbq0#hCTvJK|}`9{}sS^c^&!hS^5l zqiv??xvfZP)M;S~qQ;G=wHp=FB0#Df^{ zi$2D4FmYuUg(Cl}sjYmFyKpffxV5-{gMayonkUFF5dRc_Mt|{PTT$iJImhuR4vQX9Dk& zdQnU=b2*CZfPlX4s0PuQS>T;I>1q7B0q=1X#dj2442t-KEzCuB%G3=&RW=ZU4`7=t zn8OJzs&H(GEMNffIIx;>Jd~Rq-z9T*9*DHoC(*2B=~nU)XUzLorh>O(IGWkyD-zqFlVy#%9#py22D|xuhb8T~?u6a9FY4%UGjStE)07P{EmFh9M+)>r96oh??RB%2ImgTb+9J) z3!jRs=<@4#kk{QHjzh^o%Uhd3$uy7uFF#tJ<0vS(@D%}gw2JG;W?!FWDBPERROVa| zeuPh}j|=6|NI%^KOGe6&pNbK6HlapPKC4*GLwbQDl#e8emxDcS%&@^OL|6O?%5=;3 z_4{|Npc}!h4DJ^Q2Q=W457d+SO4bXfYw;#6Q@I}$H?*$_odmbhaW%*tWL?FN%AKJk zrvHu7c=rA{K?2N~`fvLm|BBxzRR@2vf(mzdUbd6Ql^=Mpov2b#9E(kaX5^3z{tBAE z?{Pd1?ioQTzim)C3Gm3>-#&Kp*sWF+FHdEQ&|40W!m526r`4;Zx89>rT#Y(f$t@59 zMe)LfB#?j|ArtXAR_&u|%hU+Vb-%fb5n;Xp$Hj{Ffw>|S|5oIcJYji&9*!-LfoSuL1TK2k(FodeGGwt|6KJYVKvFEo zjXEx&Gt9Qf#m(~M%s|${wyW%^mQNJ!8Yb$A7J1O4D1sa zDkivvJY?~VOFv*17Xw7T@v6Nxokp~lMr!G>A0m}7&)Bf{q*V_sga3NufX?`!<4=kI z28Y6S_s1@w9!c^qc704LRX@Typw|=(mCzgSUEUWDN-tdM@68fOCu91SyoE;qlbw;@ zB!s@JI$kI|%k#H&u!hwIUE8)$>OfgJ%ZdZyRr1>X>|0VgmG0#5Av$o%0LInbd*@*2 z++0P+Q#+Tk&4vh1eI>Ox59Ef6v-k(+44Xqx`0O$A3-#@(O|4RD5~sl$xCSG*G#zFHK&{|rG`O77(YKO6DNxQ82E(|hB8Cr=dBT=asv2eZtS!gx(uY= zu3yj+HI^L7Xz>;Nyn`68@0Gl|vWTk6Btsj&xlL(?dpczd!SSHTi_4N>ngt(Bg9^ND zA%S_bX*5*9%-FOJGBz+m<&q5{ELL$j8dLuU%Crko|Dkipm2&-M?6uAW@zbA;B6od+ zXBwlv8dQ!vWwS;S66F=-4mlqy;Z3(MMb~_GWVAXC=8%CH6XMzl0rmDK(&shx{mlqE z1}MoKU9jIEA|O(h9*0zDr;l->Yx5RXvRt^Bj44s}-b?_?4uqZ$ILac}kK^;brsp%9 zPcom!#xcrg6b{YEK^qMi`e7O0@y-eSLvyK!r&;IJsG^Qae40i?wOW;fBhg3&(+ldR zLfXb8tI|r3Aw*`2mhJY(f6FQNvrhR8ON6JU9u6$vdB!82pg~WD&j8X^Tlf3f>=FDo zxKJW7-TSD>-6lB-yMjSUoy#13*>X`|$Eoj+y>MEW{@oNx&7y*#F^tAn972#Hg53<; z-DH&qSQTAdNLU*zyWN`pST)9aTOS5SI`nb+G32`njfneB5?I_;HG+OQkwudTHJv$I5x{>CO& zTDKH&gW$1JCbF6N6=t%T`?~veCqwU~B%TE^qa>b#1s%ovs4o0hl2T*X!ihiP)=|*ucei6%$D4OpOzK!63#>^pA02vbSc>Y4MX_iUHYa1 zgT)-CtM0WxJ6Ph()8MrEr6O^)$Sh8Cr)=*1dUZ(n1&-Iv<0K3@!sz%|8c8v^6Dv(p zCK&g^w$dJzB)YT_8VqZH@JOw7arf(UWM1OfRPD9V?<9X{5@$b}m0QH#POpI3XxiI! zOSJyN=<0#RQFeCF93%?c)uzis-_6@4MB`R zO?U?s83Lh@`QX^Qa`1rP5W{;PWozM((NAR|>}sr2Y-Q~eK6W*75cOD!{K7m2=Fkgv zMQ!kv<)hn!1>O-phoTWle$n0~N+U@$AKE zWL7?^H@FdBz~MTRE82JRHa@z=vpcp21B@aT(v5mYi+E6Nm(h@p7WD=U`QeQiGQpdL za9Gl_+B37=YX)C(5OXvA8DEq4DEFJAf6An6k&t|ZIdL4{#TcDEp3%@v>U+@5NBEC# z&mR{dAVc^2GV!DrYXFJ}e8CezHFr<6Bb#9RufWDb$ZZMQe+SR>t`Y>)gcNbxT zHP_xt=nOwKR|n8=k28jIiYNO*oaY~v;VRrDhuPdXh?=hnFL?NgM`7k<9eGe)aO)Dt z-;o3lJ1Vr1?0fPUT#p?K2!sO6 zE_(g`8UG=V(k%W({MBx^(~EueGw7+GHVz%LxQgp>zpL)W-QU#j1_6w;Yxw%--pMC*)$@kxHwatm_gQGCgG|4OZXE3^ZDill5n!%bbwSx&)2kHB zxcBw*mDXL)@`3ZIf;u*VO^WTB8GKx(PP63BOh_av&g1UV9A~lWZ>umT_&=S z2D-)^;%1Mze}jbYe1%L}CEuDy>eJ3207Oo_8L{>T|gGC9icx=wZ`2s z#JIjiH(LG$joZRstOO_xTE&Vqb%|&bj*v{JOppr)m5WcI$rR z(zcIRG?P6L6L|VwXrka%;X5YYNKIHh%TKudif&1%A$z03Zu%g+y26P_r3Bz_{p#4p z2S9=`uG`MYybodm$g4a_fyg<^Un}gMgjBs%`W8{k4G}&< zgeM1C$!HP!5T72&v+AD!XQ6}Kzv0p7t_OioefUSM3q z1vH&_g8`5@-xmN9(X6y#V_{@k;R{IJlPK*`ToYY)Dhw?WcO=5{+n_^y3n$6RB+3~z z7Q)oNjwzjkD2Am@b|rfFQ_=&Rn*U{{xt~`YlkJbQJ5q?%~jwq zT5|0*Dp;Uv5dGEMxMPZj+1z2^|y z(4PEcHuDUO@MGv@H9-V*7h#<}FKC^wx>}>vLbSH_4y6Sjq!A;LfA{lo{h)F5Gds3? z7f%vIVS;HH{4F0IzV3F=z+w5HKk<+f_<5P!qE8;)i$x2X!rh3T5lzSKEA=jaff}+Y zQLrj44D3UJ==u}|9n|()6n!91T4HN-WA&_2Hn}}EW2G^yEQo6Ymqf39&eU_I390#X z@qV?~5?+f1rX1SO&h^yy*mBX&w&0F{q7$9%QwurK$b(qkfKH5qf!_8j@XJgS3Tjr6 z=G7N%=Mz2IJ~o2ZYuk$LJ6cGqcmU1H!d*vQbB7##h|15WBEDt#`ItUU^Z+j2-^1uz z%+vJ6YLJ>ux`!uU^2Uko%SSEe6M(}x;U*z!`DCfUHz`S)?Pv@(&?KHt1>F>+ui+#r zSDK#OVieUib*dw{&aLs7+?jQ{`vPM_JG{DCN^JV6sv#@?RMnZ&?-(;#Htk{ zv6N~hzOvG;#+kz7+aWxmmL$p8z*ABN!~1LxHGW~ryk_&|4f=V~o!`B{3GdUv(AR8Jniy^@! zAx0K;&k^2drxc_XtDzQd47JnCVsZ!I^oD zI1k`$6*f@`2JC5$1BFp@&mCONz#M3{t~oYp!c&{rBKLthWBvVv#^JXEw`WZ9!{Usy@z%2M|uzQy-IlxOB@$@51ThMk6k`1 zMef}dT>#4Y4{PHw{D*me1^&YoMh7i(QE;oUpjtv;WHc=|i5cCHVcao#%oGVonGJ_t zIu9M!mqFm@sA^^e4dVD86)I2tacqtLfj<`X8Pg6Af6bYRg|!vdy*Lrb@9G8T0hzAI ze7*vQw?o}usW)-L8Dr0kkSY;jmx-`KW6!x2e`7Lyg-&>nN8D*)B+6|DkJQ5Kk)y?; ziNRdT&=5YdpdDu}Pf2{m!!)z~+6?y7Z)x64dN*2gg_#fQadU9vB1}HHK#6X8-5&7T zE_ivFx)bqkDohPDk_9%Ge{z3lME$~t0j>F1a8}VwcfISAORRxDvI6Hru?MCWR~hp@ z&EQJFd@d+y*@0k(dJv9)>`OGx0*8eL1#lpiqCz4!sBcE zrkfyD+g8Tb8%&jj2|QrUVHNIlz7o8SGiSZ(q!&7(2h^Z5(R#FmbFtj?%H_R^U!iMN z={9ZBiJsajjJHbCmk+9J+OmN&_cG%C8IAcjAN-CCPJdL zo7ho7s>>w;*enE4*ARd~EA5=16QCMe#0+&FMgIUGzOFni%nyTb7Aa9U8%%3HiiXG# zox^i?x-KuGo3;yY)-<(d9-p)dW^)d;M=?I7r85&Ps`ATuFGB+BcmjlV1uCZSLDrXl(f#!7dm7o4X09(IlnNtK zflsCtOe0^!TP3%iaL5%)LtTKBkyz>tx}tZ`_CN4|`CP-K+_}=tS_BSs-8)fqL*#I$ z7jvyJ0Ls+O{jK5dH`hT{gczR$nFj7wtS;@go@{bXKR{!s*-6RX%TgN!bRo9=pmU6O5$6Li3_}9qV(UFfzVt7F)re<_OHNC9)))qb&!)%o?cY= zzzHD1^YsMLTI;j81HFIo)FPs38e3ZB#c{2!x%9OCO!V{7O(V%;Ueh7H$I?%p8?@W=|2N3CWG+qr?L>0q={F!(g&AW56iBv)#w zS5H3oyhqkvtZANGyc~ST(S{?g%T=&UE%}Zk%-Ql%Qi8lD^Y#KK?jqf7O>+)8xo&e~ zgKje&?pk@n1DRA|&T&Num4HuN!5y{JKjVZ01CPYCf=l?gUl9UfW3<8y=~h|xwt z*W3du7o2?K1&)3V^hw82mVf5nkwMUmn?I+pd6KUOAs!TvV|s)sRq(5-E<_qf5OaT6 zJ`|b1I*igguxK4h>aKlM+-etoTTh6InQh;v9hN-sbncolb;~ow0c=_-8g4tv zj1^cG+Kz;^i2G@*W+ZwxS0zej9lC!PXcofe40ula$|0Io$x9(xGk}W}Lzh_jS_EmD z87$C+XFSG0ZaONZl;Kh6^Bd8P2mA*ezqR*^tiBl9l%_gzoql-bMxp>JAtp><@Y*=0 z!%j{?&xKwy_+Ckwy=)0_p?><*>irZCdcoYWm}ah7uf^=`ica8=7Xh7bVeviW z{lFGO2Y|ute}pFno*to;hlYQk!h_WdQsFLz2DS68G~e-Wb^X+?!qPiB>)laSg1S1L z(Vi?^d2qYJu#SaFh5BB$tE#Ho>G1@%i@;G4%qbc>f$dKKne7D2y`=4@53*fv?Rvx8 zB)#N`ygqxrIuc7;F|2D9s6H^x62H}ICaH_7DcTObBn}Oo8AnLK%s0lmKE@uW-Ymxz z&A9^d=|QKMKj=d6T0%2ilAXZAQ%l)WVY0?5T^@(yNBuSk48Z4j(<~D+e+pv)yQPB- z3r+${KZ&+0EQgUofZ+{A*KpZ!)v{db6>i^`Wg8bYA6N#?j^U< z{ml3fdc%#zzBYfncTHwhchts2X0E~+s<36u(V?+?j$vES=8EAds>?cYL<5^fV zoSj_sdZ))EV^zqoj8@)hwxzTSUePScg8m>h?7(HhYZ$hK7kv(WCzP0>7 zTK*snr4^RXwqxd}S(Q$vr9{+@r=}$mNc~Fig$^A;^;Cu<-m0%mO-mt&d5*d1>B5!* zn(qAyf=0f*% z4i`G5pe@0N4!=vj;KfN{4ZthUCT1=BC$HS3)fVU3e4z7;mtaX1tK}`RhBhCUu&YL% z<8rl9Px4Way15@yrw3Dn~&yup0+}u4j_CN`}(}MC-|L z(yVSDm;?{a@d(N3184-Dc5Bx0*Y^vTWrfYWFspk3yTbA@1 z%1;GMoxAv8)liKP(^5K9oi`5lk5YGW3KMI0&OOYc62$h{@tt{b4&44&~a4T1zz ztnn>q9GuA+6qUKh$tp)wm1&T+ss^Oz;a{HbTdeIk5gyrm3d3(Dc~55M9e9l1GHkS5 zjo0jzn5aY++`Ge@GUNIEtsA+Rw}pK2tOWC0c~o;24_N}odq5_O_qon2(FQ2I2}9qt zf?22hU>b+1Firq-Fd6xodb57GS0_4^lGd+T|2AmYVN{j0)u^ymAuCC#H_J`nPRY!} zIADD}lg6LDPa4IY@&Tg=G{|jOP`S_KZ3^^%Hq@Yc&#|(n?j#uUSeU4I`Yek8VYn~? z^79EVQbB>d9;^gN+=G!5iBk|YRwdvYCEse4`6j*%C%z-Z2z^^c{g2|tXGX$QVyEI( z^H73vtJQ1N%eErtDJ_{-Wwx!a+>dAayvPh*4RsQrGHhi_Ka)QtP}0xL6?rj1+H^#? znuhIp_zJAPshq{gr}fM=2lG@tlACrA$G$aocH_xl;5|&KT(0_N30Je{l`JbvmgruH zUo?zEb)z7!96fpAsj?0HIFWf8KPeTwF1shEUBJC|gF=t*LY|Bq zV$tL;6QGcrR&nX?Clv-&e?}?{BiHe)UCg%Q@5VBlLCWQOUgGY?W7kHRhTNhlW|-b$ z>UnnaID~-1^tjzWJ%RC7SZ2h9WZt*pZ{z}C(6?$e2-_rUbp2R}uuZO; zE95<^Fxr%KoYr(uqlOkPj)L@f&|0E63Ul>rmFsEVgDyHJ7sib6rYY0Gd%<|YXKsT< zo1o^;ksqfXGMEpRq~=nQslr4C)MblI6ldn zZ$%mzr_NjM!`Sz15ELHaCx2d-G^a}mSXnS9C!|us{M(OVd3&y8SF6?)N0kv*Jf9qN zJHB_w49SBEWb<|zv8#wJQSrHSi>9EGFiZ1? zB^G9gb$EVX=nBp2bFtNFI#BmBCbg zmrY0z3h{l+ze^c^<7sWomt>+> zDonH*Xn(eCSJr1m3V_vf&~K6CmqF~Cs-xyUllg7d{hM;9(D*+ zWt^?B{|zsTTsJ^|Bw!CLJG&(i(B~+jEo_e`Q{F+XQIaLQNO&buG3Kot z3#lwmX~I4M{Ka*8mHAQ6+ojTdB2T!pDh!zdU%C3wZ>@wMpvB~gt}?)r?o@uvg504@ z-+zX-KYbH?wzgj?@$Bn?73u(6qKu3lD5Z)+(9R;(4}lG0rwfre7jb4j5((n3u0QU& z1B1Fn+Y>T6tnp>3OcBK_&VG1<9rCKD(yJ^Jk(Q>heegywOCyF!m*|WFgL9GR#Ih&@ zr)wTn$GgGv3p*T*(UGpekb^0W^tlx6^NOcZ+NrP-epMP^HZu+HY3?n#TAuPu4mOBI ztyKEZDQ;y+2N$?$?i=9_%9S@gr$Jv-4;WK==UH}PX%()pX)9~y^CpqZC7UNEHKCRW zBRI+mTbVGT%$Y4Cm28^fUJj31*jKE0KV1Zz>x8GmreJmnvnkHAyPacxyXDzO4P9!HU0(CAFlv4oRgTt)Ll&D1RlQ-Yf~}lTlF%rL zZiR6l8E!$WRU3=h55@ksxXT8I8B2c8W`1ZH)(1>8EiEiX7Kq9-gzqU9h>Fl57PUYe z94ZsUFdD|LkpQ93bvJlhQ6)nDX$F;5thKeavCDJ zjPM*0RIiDj!doIUwEF-GWC{0~nIenyg!w44+!axiwWNo;;;r2I5Nj^Fk8HwkmHVbW zlNHYA45J;ODWjM?K*N-Y{)F<}WzWw(R2b!pfcZ+FinU`i+$*RbBq}GgCwN30m!76o z*zDeUGwc?cgJNc`=Q~ZC;&WLmN;oTXx~85qlzJAMcVlXQ_IYrHJ>eQMyQgI_RISu3 zl?#xOvre9t*eryhpII3AkElmcvc38DlL~v5jc^Kgo=Y4s6NG6}x_P5eSO&=Y1=TU*HFyOTWe)@GSedJ3Gj>?OF-!Jm7ms8y&;7OX$*5FSY$j5+JgaBJ=C*! zwku3;oEwL?NizQV8PSt_YnRV&Qs^m1N)kv|32O_=4Ah8|_WU(bKETDEiL9wVfzRUPw zj7J&lW~2kEnNBq{f8*IR43fy*)j-1JU`zDtL2YI+w=+!_Yc?}AeMPM?)7L$;6F1>8 zj28A30oW|kNb1elWc2#v+VCKj&^NhgM^CO%Q&vF!FCMRcy{3$Rc?he^m zb%&|~1FYd7{#@%)k>#|Y2Ju`DoQG1NAGuVb(QtM6z>_{=yfUUO#O&-No?~b(#VkXX z6}F_ARQlMyQ5;Dtji&-?yx0x{Waf54QE)K}CrOMUU_O}uZ>jF(mdFJ6Guy4PzyB`~ z{|7s)O22~boNx=W;2rq`w&H&wumNztw_4G`w(0kl88z%eWfCKKQ?@Cgmk1JZV`DogdLS(nRw^;fy(5THin@Ve zF;7o1bfU{VMATd=_dLh`QzG{u6zHJ5kjJX#Mk7P$!@%y2ZSxR$Biv||Gc>xe3V1mXPms`#bqUQ^k$tOx{-OO2VDX8l~u?;K&h z#@tk%RGFUPwIQbjjrnCKHdtD==)?x!ne(^?&#;l-EHM-#C#=q8rxBd~l)`^bdS8Ce z?*i$)7t-678oZ!l@>TExy5fv|cURg@cmEmQ#M};xdBRgmEM|wqtTHi%WRlMwOnMNS zLC{kE2Kh9kuuY754_va$VrCLAwXs(I@1@X{#(} zeN%FTb|>QuHv`{kR-cXnpV3#aJPf{}LvECc?g`IMt`YaMrJ|o36fcroEbFXG`Ug#^ z1U$QQU@e;>NlzpsB8}#8MO%U^mu3oX*p9t2+3F3yOy8`V$KBRbP%{yTG~vQ@D5Xf-T}hj z?XY(qB?i62-=Gk@b0{IG^G;T$qPikDl&8SKBsKYMx6u9)1Anst5?91>P(q9w&j*e< zv#?lbzgEQ!$bNm{ipt$xFAa@_Hil9WCUZm2?I9khU@j=sfq6;jdG!E*d@cj^TwZ*7 z7X_waS zY3N#96GZnpWqKS&G63X4I)w`H#8=TYrh=#e@gDsylM$YSB2J2r2oqlk! zNS33{bGbi(<&zCfPbQaRUAatlPa3@P=E$tc>*8|vx(6G^`Q=IHbLY6-?_6G#%H=@6 z%Lzo*#1<+@rl;ZH70N1Em=`nAPR-U;g2akjg{7pTk{&_sEYll=749}*>E^m12Q>6=f_<3fb3VSMp*L=(%h)3}d z3)Q11B{#(zmcZQI0*8ks9Yjz9p%olNlv(Aqla?-QYgh!8+hSG>V%b^PvLlzJHmXG~ zx1A^#yj)?8qn~rZ%dp?`aAlFK0Y-q_O%^ZOfXL-30(;ZQ(8tSqlw{~bzhc~~@5Bkm zN>XDz!^=1yvY?V12konVHg^!K#@Z z4<(CNj)zoL&oSK(g})HL#oCK{Ay$~-hq$m8V(Hdg+t1lou#8Cu^huo9)H&KL$j75p zM(N2F3b&qki6McjdWdu``$6~FSfVpneP*8_t>k<@VGOoT(fiRTxDv-<4Ct2CugrK} z?M9)y1TQ1JGJw&BGHc{rxwZGrVoM#X~sOab+|*{-l9 zXAj7T@YQTCx`)Wi3-Ep5Zxb>OW2sYz`68m`50GfKs5(6+O2YV>No4DZ731wGYCSHy z115R%0x0kkhfB@r%NJ8V2d=K$60S&C$d z&QW2{b^&i3=qB{9;mEzR5_RN@*JP~2vg0`W*B8eiQs4s%1*q!QzUNxXQ@2bxgFNyZ z-3^2~u^}F_==5Uvyj->;H%VRj;F@6r<}xAU#&T?zPo3UAnR$H~anV}1Y`!pbdgh+` zZzLg#^<(+$+=OU~@=HFU)*aHcB5&l6))TduMT|`U3X?(eB3ZnUTOd5WpSiN0K9E~R zI4}=#KFYw>wH$8}cU_8gNlg?N>Ig~}*inznwfCawY33uh#UO-FH?$)pbn*tm%Lh^sdNP5-sRAzJuL`JRy6z~vbMwiRcovJLZRI3`E&pV+v=WOZWA;wZykJ6E9M7#WnK@JOxqWAO zKArfB$GF01hp@aKFy5LQMQ1Rj8e!OrhXKvxJF|$_gjxrw*TfijLb25qQ=wr>(2?dm z#h>Q}BpGzWbKZ`uN3M!s#}st{Tn3;9EThL2`LvYP2zicQ)f5Z;bNs3*Q-h=r)Byxm z1)*vw++9!pEQ~ynZevR#k*T`c%%Re9!~k(-@Us`+kUMSM`}+CHG!pfCUm42GpK&XL z*`wLsVNPTXl3mYD2D`Cq$G5wn*zZ}i}Vyf70uS-MBqt3}dizU(c-m!13BccoZE&g^Lgo<-tO z7d+r7&LClA^g9dfZ$JbmlgKztJ_})+#T(2KcYxEe5$r?gqn0L*XjFGZ0tu``2U}!5 zcLe8{m01d%%Z2dduN358xPzE{r~<}m4XN0MpiJfe8yiU z4M1e1<>&at)!{48@T#A(ZF3Q5F87nMjLZ5=4s;r`$lNIqhv0K_*{~NljMXeVsxr9Y z0(kBww_)nwj+rdE%L|WmL4xBNn&~(=qSs<+=_yD*HjctO54+>;VY-oM>`p(6Db;A^ zsBQd2Nt(G5m6=bPijS=QM=%ikh$`UK@!8bhp_JaHf#j~lEb{64In&4bFlC!s+ z5#Rk|GaukmL}tG70I;PMu4o$eId2%CR(a=Y-W}w`EstSTHT77OSVv6;=7*ddBmOGd zZ=-NzR$n)g5iPC6dS&i&8{#G!Xud%R3>GhJ>4;HS>H7K`~m*wY;JdlM(@PJ(tC zW+N;Tlqfh9LBdQ1(Q=C@G9Fn8MDeCcz$`p3K_O{G=}pl_y+RKCJSQR|`nT*+bLa!h;&O|4o&=|%CWbJ=6u zw?ZD1ASo{9B3NNdP|fw|*<22fcCtvea!|)?17$0NrFEm%e9pSiuiS;OA zmS>7^<87%Hn$_Yl5AaO&;%D0{X0m>+tiDAy%^Fs+uzj=!4hFHC;PMfeq*QBo2lD~P zhRt(!hiefkyga8bBQM}-;NUxDX#>UWmpRl}Lgv79H>>Zb2B>putgqOD{fw6>f3EbJ zD}40a#BVj-2^Jln02v_s^0<#QcLVtnqH#vt7j+p8zm-`P9d-Wv)M3GMSg01*DS4<0 z?7WPI*MXO$SyZ6`Sg6cG3akZhGrT{y93)ChRM<-db>@?1@f|g_GgL5-xM(Vl)0`{* z0#%Bor(MY!ahtjk*QjNz1mb+Ds*P8}7c>vQS z0OM2JVH=NljIsodcw%mSd`dafWmPEH6N^Eyz7rvd2>#vaDcNL32lx!u=d)aih~G^1 zFRk_El4 zEh`FC?TerkE;*X7;LL`yu%&U@Ld^w*g)&iT@!D7_7_&)pZGO!2cFZW-6eM=7OiS&G zu)-(lgM2{jitxRN8r-PrYA_W*)j z9(QWFjhdpzeceoSYz3IG+VZ)sriqK77UWLhQWF8)cuwV+C&T*|kA6Q;2@mcpekM!K z=8jpOa6{5ljoie_h;p*&bz_-ROWl#qqIh{-40#up88OJK3cKue{t!l)#T7E1@obq2 z8LzV0b?@@&YO=Rp{-`ixS8QZcdhpnY#C3M*{1v#6Zw14NHFS4XMy^!9y5P?+`OKD) z!-Ho#0&c4|J4XAeupWxM7t8X;H=-<8Wk0Mp3+#vSV_DwrjrV@5%MV=L4CX|51E^JIroH~x;`O?yAv*Q8e;?*6}^?(#ZjMtg-@jHv}@@B@7K8r9$ms>zbV+ad|21Bs}zUh_ym2d##oHo-0) z=0d1p!Vy!jo|Ca?BJiCoCKgkFvyzO9B%ZHll!LRbyV3rl=1L{&`avQ1NQ{OiIE{+z zRpc?n!)%rW8S*8x@EvnS_O8`5hZdCzOr{fX0;k&W5-wtoEmty6NN%D^b+1>sDop3x z2o7xe{Z9%dANk~XaBBC-ofn2a@hZux!)p}d$zt>s7eTT#c;Jr-z?nUMVC)L*8$B)W z;&9GUoiq7`Y;}iHGO5on^%`&;S={X5d;htPcwT`Dn>jP2*{e%GU4hq;o9R_y_D!4L z1~z*v@5uAnx5f@wm`0pIxFq>=;-j-_>wdSlJ<;-E2@Uwf^S$C+$ z)XukawolBsui`Wfusl_s&P!79FzbdC;!Xm>r2JvKn%vnU(7j+X&k)F0ZY)SI0^>n`#0-Dv zJv6|J!Ub*Wf|Sqoer*Qos+n$(CX6|0Q!a`6YH~59eDZz022aKpu;7+1!c-C}Y?tM7 z9dSIivN8D53}4|RlBULL++%Kt_o=bOn0dC41<}>5s=Gd_V1?UWbBVtUf=>|$qW{v5G565U3HIb33W8H1FP>yZ97n) z8qOag3r=S?S4PP>{Ol>pI{Y6QF~XWtL$N&%brrGc;+V=i{);ZZZd#-HMp@Ggppf8up6cJDmUwWty|~8Vs-Q*yG}iQ zcX1EvF{B<;%-I-4T=R1@E`wMXjun-5eO8j)k6l$TOoH${Ta9P4TAc_g@HEJLSbNeH zQC!WadR0P4zjMG@%3}>2iJr;64I%Tta8~AIaEaj|Ggl++N)&JkD)G7X^YQcc|FNPGT^E zrgwfA+5+`$8S@H*P?Cg=2ayteX=ElwWz)-m7|D{$UDM9Nl1oc2XT9pNidsvnpb{fc zUW8f_u$%-xcHGojiaUb-V9h&%=Nz6H_k%ao!1&dWS$4zPZjh#&T}_V< zKZoAz9+eKx*V%21Tx6<{0G9N`gR66xB0XV7m=jWP#9G%_oX$?CI~U_gwJk2zBUhqM zC1JBB3}uDj@yH_5q1-(x#i*5^TG$0*L3prO9Np-8)L@d?EM?SyV&D+we;L8RAw3t* zOqW?z;HW48DXT#K?PRH99K1TK$1s;nC61z&QCB$4w3M@nvED)GUB$I$d> zVEj*pZmWFUhK6Hv>ZSLZ6^kw5Ene}S6tE%wjrC3BXPa!!61T-NMtfartV^b7eINLH zeDBYZGa$8mVBPG%2X+Cu9`+AP*#M^K33#EX)a@~=scWt7MxoT^q3#~5AM6GLB1#Lq z6GrRvS7MYS_yz?lU@Sjrm;$(37@=nF{RJB-_KghL$}?6KaB4;`gIyL_`w6rRl_j5TZGTH>2cY4)<3Iav0# z)5YpeVqMVoQd?=c!yU*jyaeWHR?QyKf;F&Eg=SRS*5Ek2@o$4D9|hHo8=XQ-_CA-C z7YS2XtVrJyZ&l%!*FB0f=y5xAr z>Wek(o7)X9OT!RjPOt$U1OD_0DZ(<%vv3$9@`0sU` zd0)8L1Ro5wuy=^yH`v!nkAkaIco7O$Fiyx&A0n6E_8JRRG%JXsOS-GWph)2`Nqll~ zyzs>T?hDffX=bJ?DN!M53@O5(PTxdbXeoVq%t!`^Uv)mGn`u)>B=FRqtx>DH1SBjE z6)-bBr}J6B6}oCI81|Zy72-4@YC-QY-avU4ST5=J5J-c!n9u_&m|z?7T;M=8($aBr*OMqS02qV8;^yuqFt zXo^*#={h+p(=*786837BbZ)(pan{f?(4DQ#(r!^W2_EnB8(b)88jIQ+6>-1xu`&!x zDwwyx)M3a!QbpTd&eIN$oy!rk2IE;AmKo?hcF}#s(FS_W9!pYu zv&=2Wp#OaNGb%cnkVY#)j`QUx`pAna?GUU@M5kS-jIa;A_{|3G*?rsUJC3sAyag!*zPOfJeDk1_f3nGCt9bT z$lmkmaYSJS;~~u*DWR_{(}t+rIok7&OZ4+1#+(o(1QQB=i@Ow|cwCPm0eqs^!soG1 zj8ExZybcjULj+}s{_y4)1q`hXDxk_7uR_Y?3zz%qspM!MDQ>mHiNLK=4S|}pOEtIy ztu(Rt$5J8GAjGlgtKqqq^&0%gUL7G~mA$NW+Nb3ZBj|6BHHH{%vh8a`{5A!hcXLr^ zEVD-1{L=Dk+~*7B*Ey2?jn&uxAZB@I)z@6Zs;h)C*u<)YP1PJ(M{h z<L6nu-~T7cz>oTa!RE(&GpweuJT5#z+gQiuC7fgP zEe7SYfnEp5Kc}Z6Au+Dv<0aqiB|ivN5h@^pfLsV(=|G=5uTtR1VJ&*h3CHw~K>F0F zvMAIbz~$)6NCHQpoG;`}`ORk1?s2jQDRl#Lt_2sQ&Yi;7h5jvGE|Y0Au3s;NezROZ z-l4laOM_()`d9G1t2?mRByTnx`oUf48av!@*w_p1G2!q3sPbV{qWJpKR#Y|*l$L6d z9BmtudcX9tx2={_5DEh)rzqJiF(I%~a3cBwwA77_ot_j_AaKLRPNrdpI&qN|GD2?% z+*}h@TP@W`mpk~R%wh_;Avl_!kf+_OPrYZmEqtjidzS8)2s z&v@n6XRu>5c=R&?rx{mGqMw25oh82X*I?_kNH%L!==WIjn(GY}*ITl-Hh9cpb(*<|h8;HBUu-bmN( zJpBW?ovnbF(Ym3+ws~@WBSB~lQ#(rk<|UYqy0tkamciTiPcp|$Co3Dx?74x z<&}wM{ev^_0`qZ>G*K-Ywu{979>nW(0v;U z?Sh!VEO?jBA8~g}B5B$)%vy^LU!vNIE#yhjh{R!Mg|Z8jKCnf%qZnEhq)U@N!1*HG zUwK8t&rzNyY%M4Iz*jqAK8)0s8>J7+BTR~c%-D;U{bhoS00hI3yp&Nj5glkCyun=$;44Mo}8dl zis8eCXa%ASXlOsxeL}rb7?)Rp-+FeiuvVp4+>==NYm3rji;$7)v0o`hH5~1JY(yMW zUL4h9T_c#*4d^w?C8<*B5Lshpcm5Jpr0QxYY#|OY^0%Lc(q7$;@wIs6wB(2EsgnFS zLdq*S+eK*`rV?~xW9v^SeJGs|aD<;u>BHEagwn@_mt;m^KbZQciD4*WL_!ma3M9GN zLNFM#3uE($~T`!%885FCH zyKT1pj|VEIy5bsm^COdi9@}~hTG^3+V>P3oFX0VDde-K&O9GBCJwCDubj@bBN~7iB zVC*fIx@=jYZlK{P9LYtRz65?aPn}e#mlr@chvo-|5k0 z_hNiGKD9S<$}nM2a^ztDt`m}P9*IKQ?S{|nU{7Y@k~q%h(atQ4Q-3E5%z(*@*Tkr< za5tWiulaIX4FM(^F?o!6{9qfU*~$v{c1p8bY9s2_x>qI6_jDd<7GuT;*5qg&K9Xoh z@<@Ixsq(nPnpSzc?Xac^(xv2kTzUzJcP1CkRBbOM?yn&0q`?ys*I(x6S)8^0p%b@N z#8#eBh9}SB$Ypr6M`w`Rxmj>du6yk1M0m6~&BzWp#u$1w)rBr&Z%GMo)JVaDY_(a_ zx+X*FQ&)+OK3GtV^KoichyDT$jC`~$6`@bz;?@o}2w3d4LWvwC&TM`T?fKdkq50LO zT%x35K($0(!;6%3CjG~h;E24P;9^{Sgk%$Ak<_i_LdZ*xG4C=x?fTM)lz`#GY)PsU2x zArC>JJrA$UGr29jdw<7^<~z%H?uXhhz4x&oF`6@K&y%*bf*|~l(UC%Zp z!KIg?*NWJ$=;|r;n`v-s`*>41fM3~VkMVG=BX_T%%&9wm2TK|_FC35W|4J6+O%SK- zY;)$SCR-)UqVA5*MsEC0Bwu?L$=7F?ow-VDK4Vb_x>#l1dNP;nk}Y&cl(~w6XP5q> z99Aa|S}QZJEa_lR+aatTYe=+P9@pL1aoxlCAqrz}{u!3%hhVO)=FhY;QxjGuv#+23 zn!ZPrT^KBbjLpU8u=LiSbAaaNNbkm|O8{0tslW3`?*G5y5xH&da@*BFfd*Cj7zPXsgu{~vg;$YBuRNNWHb#9R)jIu60=7geO?ms!v zaR-DZgHhj(`h$!5xD%}0S`4EKi=l1ME#jE_%fqutyj;Z*mdTm+PvK~Zyse|*prfRQ zV-#}Wt|Cg@)22X|GMLNrN2bfcloli2srTdJVLsNZRi~mTt)~H#-Yxi0x){wMD%csY z|M&mF@{O@%AJr86Jcv@7ZW!8eRkH>UPS24;wRd0}8kfQCU8Zdev@l>jmo`R%M>iQ6 zHSS=KZIGF1y9f;WI{PDU06`9>k5Pt2JxD$&5C^iUEBWL`)4PSv64up3sv8o z1u6W(ty$62ospuFGOjj~Td>3V15iu{vCA>h z7@>&SUz@3z_3EXw>t2hVOWhoctWJm}o%*vo50>O*s$G}q%A5H+slW0_7+SEXE!a|> zYf=#3_*~#ns11SHd+$H~vT<*3Wx4fOGo_CDv9Ev_J99i7y7Bo9WTlQ;R#;*mdhzri zAC1xo*aohn;9qNB*Ddk)ESOXE=(gDZ_OO~9V~;gL-qqde{Aix1rj$)DBTPY;TZSn( z<-zZ$(vzp}fN8=BJj&@ikN~@hzfaJ|_If;Z@$Zf9)hr1o-t5l2SF*{$y>eAJv}aWg zO=S$>D4I>ZFvBkZgS>>Mp>_PA=Q3ozZ-kP*364|TQdFtXSFnqw|1lUS22{O^Fq@xX z^7VgDk0!s5&yJ_3|9kb1i=*=Chbd$yDL%eIG-T<@(XU9~P?+r8M?*$ZQveA(AlN*` zDUz|MJ$*<$CjU*Nx0qOJ6bq)J*V0QFe;c*tJ7 z3F>;1l|&md3u+Q=ER4wvQ@O_mWF`ujxG;9|gKOlmxH4p@&uf@4RR9h9;LDC-Y*I$r zH!WGx+nHrZWT}ZQF*ggQsIQL^pO2swpv9;@9eEjnS=vj6WDeSQ)&Aj3{IG=J#PnnTWU)(6#3OfryPF?=G}tzbY@ zXFu!)K0+^qJ7FP8rT0MaWC)h#P=4 zu*GgunQ=;R+sFuy($7IOzbF>*+XtL-+f*P1JK=%`A3iF#DiU9KiIG{#1?pkQ7>b&~ z=uQ28b#(z#?bc7TzvAO>Q<80}P^uHQ&XvQfOZd&d$?v#LO3L(Bqz1may0o_5-%{1NK|DLBGXM)IaskhR-1Wb zL*8w_l#PPx#YLEKfixwR3bq<4)rEcvo23jQ)q#)J zU{P`yTa>~LqYhR@C=xI zRBrmH!xShO&wH(vT3{F`wZz`7qx&d+0MlBKm;6c(2mkwhlUY?#@b7R|<@rnU+^F>5 z9ja^29xWh?ASsl>%yc3zfX)I7!=i+h(GHQMsi`ta6|`j|y?%3OOOLrCC&@F1TAJ?H zJn+E@3(4|=yPoo)(b+XA8ucN~wO@}!Yzz1t)8@^SbqcSG+D<4};&AKQBWO=`jZz1V>R66fe!@XIH$Gg;U3eCDl|q zQC5m4g-6aEeO#h*hYdkeDhwj{|9|(xRe`V+Yg0PJ7xRRry0$ZQqyeDA`h&j87nLX> z!Mwvpxb|EM>9M^GQl!IIJtsOyA7re&sv5ePC{h(Du5ac5P6-%t$nzVT>y_+`F&t<7 zg!C?0sVs;9khE|y1e=B9i1NUqY$d*hzzir)sK(*O$Eqhz6@#~LwF-+qe66|%Q~1$c zBzXfp;lT6=!Kb}=n4bg-UndrO%po};M&R9$6MnP_PmT0w@<c(q|QK!U2zIM zqG=O{WdnMkjnwRhx`#@#3F)zzuv5&+KNC{ZWwrOnCYXGX0rC?+y4A%iCx`%n_jsRV zJdKGn6$_3ANA?jL87~%v?6$!===(eA=8yrOdmy)>f)&y~T7bK(&OBE)va>2hj3nS1t7 zY%07St02Q0^Gq4L;1}_N_%a&F{gf*GREYfOZr30{YYpfcDVPbm$fmo9+~r^X8yUDt z*N;|2OCL6{&p4y39N7?KX-v*$@L+1%(J{S8Y!K()ZKb$umT0`oCEJ z_t%xTq(2&L6O~{vt6&Bnoave_!eSkA5j!*72Cf4<14K`O&s*(r7>`zwV9HhD%-$#4 zCp5zYPYYJ<;t*Efn!%oVYc72+TpIZltCp#IKBYCmcNHYmkmV2>u&B&z(poP1-Uq84@6Pw;C02QZg?7t)XhP0XiS#q~RF3q!Sl@*3$m-1%Jc#Mh1QCAh z-754V_?h=?g=Yz*G&g28ug40Saf|+qdAtFudznd;D_wO!Ov(|xW?&a=xj356l3&86=6Kfac0eZhduiu3*{2_nIRSjTBibg9kjA zHM4u4^apDsnJ_KsNZkl^!0M)zgaF%=lGJ6=Z1PA+xEy|?zMq!c$lLQxnISw*@p?`a zT@x&B@CE61k9B5c-db~zlS~pVTPqH>^WM0*P9#ueVJi+*hFhU4aI?)=6lb`>OC68) z5l?U?o(MY&Yxf$q@aZbf5NT&;P|PDQihHX{%SxrJBcD3`dzP7blriIY{gj|xf_wm{ zc5Ase1}1Y8sj3D-RTjhJ*;O9scY3&oC|F$k1qyZ@1+#drOPt%}L3*r_yV^85ZL){# z<}6E*z1fla&)UY6wQbp>9O`VHB$)QOTQ9vx$@20~GOm=uDMl7bwHJd`(h5Zq{%#7i zbp;5XP(udhyDY2zWggAqrK%_9f&gB~{WzmOrSPP-K(O^#ai2Up-?^S1UGAKoA0B@> zo{X=K&(CUTFBGe1N;gu}X`P=())p$-elBN$K}w=-#MH)j9&!mz;%W%cp^@m$-ThHl zaSm{|oS{3(>ev85CPhFSd~AsXR+2Trlk0-Iz7`6#nnt0oEgRexFcM^jQ@|4KF|sjp zTE|O2yNl;7C>3x?*k}_qp_Ma%YI&APeC}}7rR(nIYzWuQWV zAXf{a+AC3eNhC;$;!9vwyA)r_xsNV#BT;(v!=L53_C(v3M-A){cQ}@<9LAei5-jG= zum0n2exyhea1bz$2!JfBSgBHnoICXHsgOf+AZ&l^$Yg0>zgk%H%f&=Dgq$s`_~}>d zw_T9XV-EEBnkG`KaA%*)@I)yb0PS#v;_40u*j5tP5Dw5{Vv=4=X?{^u^#{bDwCb^k zd}EhvV;x_)LADWr-rd1EyBaSF4yxBdcAe7l+zO*_>jTb>y zol@hiP&}Mc*kHi=n%tn}_HUD79tMCWCPH4u6BOWD_(>vSEfs*kE#>9;SW8#yt%&Ba z7|y20hbG^FmZ9#kL&lGEhBLZl_A~ar(ZJmMsE(Z99Mxw8F}o3{r5@KVK`g!UyZh#q zM^E5eoZP1w8T)F>JLxe|+(AF|V4T9^EOzdk=z%GiZ_xqkd*zN)|2ucU+i8L;3>j7B z9Y;JBSykWATv>Z#X&z@duP+K-u^tRR>>5=m;}luo{rU0XAA$d%TyAtM;O2dYHcY?L zDR`5(pW`Lq-K|59Q}jG>0%1cMlHbpc#nJ=Av7N5x@#Q)igTG*ah{+3-b<=gLrU_ik zbGH<2i-jfB=s!I(G%M5nI#Lvj368fp+gkas(9Qi)s zAZ3?x?1Bw<94a+5WoBFvlJI{NrI+5@N;CG-+fw0YgOEi@rCD?PbGL7VSl}B! z4u_{+5c0|dIg(|Ml^$(tz`%{ED4lb|Ry4@56&*(*d<`mdz|7IhC#{a~Y~ew6ct)6p zRG!pOuE8jh-tPF;WnC=~O7F@YpLQ)fE`1h9J6Fg~AHjSJaZrRoV3+<}mMjYq>HULY zlckP$?Ar2n7OzJ^Hnz6Wx=K(q$cME9EmC96vk#Ome6YSW2nD(#vfH%)az&Is1NW|T z6=1a~(%{-gsjR!S0+oeg%$HDcSLz7tS+qmP{9@*$St7>sgm9#&&C=@nZxq;cfEXS0 zbs3>4cQ}coMQ}S#d`}B8xKY=bVidnqOrX|%(;_a?`u}oualJw*mP>TZ6elshVee+J z2ra?Tmz=tAG95wLm-mzsUejl>NIwYublZJ>J`tCya!Q2--~o-Xm8CkRB$ZLY#nG6e zvNEm6X>BX91WQ_CR!pa9Q;p@O)Hwo%u^gP(0JXVS!vL*|KdY3kQFa{JW>a}P#REiuY-C&WxyOuXlBEJzNFZ-4 zbNL_YusmStlN$gYxjVH*1!0HB$J^SXMB8i0r5*Im$$A|j?c$ZcT!nJZQjPE-#XB<5 zjyDcrW{>y>I7i6tBj_68VOKC1JHICGAPn45n4_hN9xnjgSGk(mFQ^wyg3Dz!f7O~m z-80n~{N7A9RjU9{_8#o)W>tT~!KkR3K>8mDyTGD2d;^2Vx2~$<$~Ch$(5-CV=If*? zOAWMri%W@@Tbn8%K~!G_gbH1S54T)`gM$x=`W$AVZ+#B8N8vnY)-~FD&a68QV=+jW zPz{75r@sWK-mSe=E(@YWK{7fC4V|nxt=Hxv*oI!}_W~FHcbkI$=R$ko&;EYlfjjkn zeDQ*ighr_l6`Vue;^46Z-;uow0FOZy9Dr8!yN7GC7(SyDeEJ9k9G;W@<+)FQeVz5RH@J2ZO@oE>6(y%~_Mj z<1>^(`fWPh8DAVTeV#HXqs?_Mwcw|@`!t>MIBGh(^Os&ntc1K-i+|14DkbZMD?tKv zQ4!_F3Cz^4NstFwEid79q-_d2%Xy7@yTwbJh3 zpD?~&`G)M68bcEHkBW|<$-jwZC_+bFczbP<6FN0fhgG!pEu&jLxedN~L3RQ%hsw_ybYuZI<$toD4jNC}4Fpz$MuJm;j z#1i+Y_p?PWgZY4vpZ$p z?XoD{lx)}!tWsvym)ruH`IqO!BsRkFD zR;yZ#>JVeyWC-V=Iy#z!fgfd)59SEyR*H(FB1VC6If{zVyei`eV z9v>QI7;rDRo74f-GjpS+PU6Ib` zIdv|*d9Y69DlLsJ;ac%~%Q(Jao#bg1YM{-^gvm+@X6xVd*)m15fB?8AiG|Vp^W-Y{ zKP8L?bBY5jjiNyY^9B7$AL?eR3f9WGr$#`Z{Vk&pfjo;5NQ30-85M*gdkaEN(WO=r#5?j$0@lap#d-6A4ZC_u|K$Q@6~HC!aY(n}u4 z3$W?~v0O`*G;xes2b*J`?XX|GA(Y1L?ZTkq`N5KNij0smF`SpL)d?tSrQre5Qhbr?Rff2|Q(%(>qG1}AZqY{dxZ zvA>-i9-cgg5ACwP@I+=_i|*o z@jMyFy{-#Zd#V?Cw`Nhu;TaWslVk&th#Qq}{N%=BKT3G9H4LoQSoaF2ZRyt;F-7;|$s7X=p{zArN5nG7u@&=3r$Jb|qorQDskjAL8;SvObW z3|iLiRN)M`VsCrG-M&Rk;r+PuBiIAs<`a_2B;0iacoK{*jl-@%nyzz&X|ic6G-!Kx z##LR~u32k&bP`1CA7%h}9H9a(Z>QL9cise8GF(GJ}<}gvv5A_P^)z zYO5CaJuM%xjBg0-EB?6hLw|J_M?TmRB4YdANJ?euFdei!rQEUH-Ke#Zjrs+PWN$Pq z+Yx#>3~9divhUR%h9>qXx)&v+A5WTX?d1Qd(g)POK+MIZzX~6(U?(#>FL=vc<@d?c ze?IApQK^S}90l289nG}w;L5$Nu-X2ABBHz3^Jvol%{*U4m%%9=fh5!3bkChC| zos%mtQ$#AE#TO6`B#0G)>``d6%UHcs)(F%IEaYc&UCO&?c9ipE?3xJ(%6=yXR|LID zScnk`%*}ysaEA$Sg*X7OAP+!kAB%OD-i^}F>mZ!Vgo;gPXdZd-T(=k<+%bWO^a$b-K0dA@txmG-0g+%P%}n5#347fwg_djJNk$E#Hm ze^;9K#yl)J`os!Po99M23<+tPL4 z3U^h~D6~wp)W;`})^{6nTbG=VZxJbuM2$vd0gMU7=~9=i^-* z&N)S)kU&72BJnVc%n(}|q?~IRtR(o_f373KOK^@1{FL3N$sJ4#Kf29dIt-39bOE$l zMo}vRuCEKV42#(j9^?$=*%YOt^pJZ@G`mm;->8(-WNpeBi>ek=8jEuw`MQIo#1W^W zl#nraj^~yUDIsI>L(KRPrSVqqn82)|iSo#BwdTJwXO#3}1f@Z0lq6o(H$I!w;5WmJ zSrk_ze605SD$JW@u)6(%hQ?KVLV*eiR>d~B>%4HyH|xXEd6*&4VlG^US~4&=h8{CU zLHR)hY@l~^EU*RaF!2^yXLi-Sp_#xrvA3XMpCxOHE;tbu>{V_%gfZDT`u7oa7B+-Rqe0yL=wV&99bn>@RqoMsliFn z)&{AdHn+7}vn5xmpp14R|23scONteE#?D!>La#gfq-HJhg76}VZxe4Rh0wLRZskb2 z2gwXc>I(6%4sk#%m7`$}c1I)r!+Rg)T>_JQ;)UaRa(#KMsF}V@h6lT!{*zrJl{al) z{&&>C;yKt9eh?uxbuiGmKoP80Z3!E5`#E;7c2NeHwP|)8rQUbn41t=>KMyILhr7v; z4Wiv9@!Y@hQnkomU!J^6-(v|b5wAURiY4C);&JR7{d7=rhsp}Y5iryQS)GHowZnkP z=PV_4%HLy6On(wY_k?GIej#%<62)3{~l6%D?@^DtB%hR{pk>z64+ za6-iA}q;^_&M}u_hEpkqFKDeRI{SLBrqiO@h}y& zqv+wSGw#m@;&gU8-MJV~sy9lJDr%M=tzN4;V6S*>;`kQ!`qHXJb>x>K2&pus-l1Hv zsz7?E!g9FyANtYbWT}=@PAVC}Oyq!QRqJf-Afhc5rP2vb(kPI%Z9aJw7TaWxH2@md z%l3BIYHV{`25`a{wwCFgABG#O?S^cVo8oL_1@u@~nT>-FN}M%a&tv0OPWW9ybBF4* zZ+zzefLSPt5b^A4ug}^W&Gpm-J5u{k)`&7Wx$_v>i-$Q@=IE4Nk2&KDA9YDXVVcsqt@o?0Pi>+!~CfbDE}F6~ZVCo%jh(5uWFlGIclq1V3h$wV)Jh1d(%- zJj9yL9Oy?xR9-shfwu^e@fJ2YS8nc zT7l3i0J|y9$loOBh0J|)TH;I0R|gKvXs9MEabemuz4;<;H~IITosVqKiHYZ=xq1lI zrm!*E8ih&d6X8DmAJ{td6PUf#t5%%+iY&%a#yR+J(#I6K^PxFhy}mct=JVeuHOB+$$=$}qMF69I_LBjzKxviQk5iPo)guI;ganxMQ5rBVl` znfG-*o61OsY38-{ElNBK7wjk`YpaSj{S}ts1hXP00yo+tfr%g?!&x&}L{5wm{EPMs zN|mw%yS8~3ED+e|3oGw4MO*7ti`Ii~ZosQ@3VKbwK8uKhez}aJpFlxhv=wTy&KvzL z8RT`ug9o{Yg4tXWPT)Nx6!_DzAMIQ0N0^?YO6QXO^^&(^9br;9kh0W?- zV1bVpi!yj9+>Rb|0S|2`^&6$r>B-Qto3Wu*ypC58I8dkHJw%@1M#6q{QoQfsOZ)(uZ0#WXH4{VG2a}X65lMd|iRi#W_ z4CyM9;J}J0n=@4elxG6j<-soyn><*d31`z(ENLwB?vWBH^x*irb-E_7E2b;omuMG( zrQ{}>%q>2%2C;E8*{mLs#*qXB2CLPM(9S4u$6@;Y_~HV&*t2*Rhf;QfVzwAtHz9qJ zLoThv_Vt(}KHB_3;EXbE*v|#hu(I^jTPgM(e5iz%tS3A&m#ielQD}PmDAYs9ip%lg z@%2>9>RhQwMNOT%s{H!lx5^3=wZ{MbHB^|W(5|X5nfi()@9=E;^$2C;r$-lT5qh`m z9ctCVGJ~En+{c?B12g&kf0A73UVnE~1HxWe$k-0TUQV4<8OzRTuwB4|G=s^jl0+9& zv_(ziWuyXB!D*8yWp=($pGtYl#-x0Yxq5(g5CyKsPVFRSHO|mX?0%t?_3YkG#mibS zc~lhBz^zaHjGkUe9mcbXvWnNJv*DtT7@c7cJ%)Jq>xQN2FyF?X_CcCfW;_oZW*~W>C7+gvptR+o(Kon>e=r0GGJL}j zYhf1Oa&_o|z1(gG8)7w$3$S5}=~-SkVlf?<>(2D}P_M^ue`$gFEw{iNwyPvTX~8F9 zIH;;RQl*NKpKL0cQtQtiy2HVD|Ifd&KMR5i{2NxyKT{#6KmRku4%aE)gd|U4 zopfU@PpG6a$f^sft0ZXF(F}5C^FQO8o%(PgZK`HVsrdW2a*2ivF8vAp{hi$md=7@O&-+Ms}w_;w197pBdv;HJ_39H4;O zAkp$Tv_(-d0Y4%^+NnG*M~~4VmccO{IC@8EF|;~!^c3=p^nJO3P#+~*Zm{L{3h*Q9 z6V&HPDyUL$2NvQTm;~^_omm{liM?J%xb~r+vX&kSog}IZ;j@f?R^;H9-XrGV<8`)L zX9jP5j}=M`>#dCC7pp`Tsou)imA*dDYr*O3Idvo==yfb&5+}S$caF;hKhI{e2b(Lp z-|VB1+1w>aWnU7jvazYyK}kJ}v@mC0c#gU6$@t>>@>o6lvZMuVOcc-A;ju$F1)aN? zvgI@M^XFa#=)Ba-?6FQz-F!_;k6~3yM<#cZM1xXR)5mCbadjy##R_A;%OGPZwBIFh z6#QG&j=*VHgN}oF%V!_J8Ma*s%{ z^xXg5c8AcI4rhQr-{Yo4t}^gLUFa1A*<45oIu*X5bo&y(BhD9BLp zRBCU?U<(}b6F;fG3VDc1Y_LY-geXL zcQ2XaMZ5^T3_ESvU-^U2!*Wvdwtg>PJ}x{-7pucf%rnu}=IQ=-0e^ylY^w$4uhYqo5Y& z_rQzbF}Xp_x|OKV=ty!8dzE8i@k+c@Nsc&(KHlKk#TCz@k3B|jU5Rw|n5*n??LbIX zB-Q{~abzFj#sy93%c*CmEO(RJa}vkdJldIikLiw(BGBHLLz3L1ogRDgA~!R-Z=n#S zqj~gW6(mY4r8WG&4E$fzn08mNv1_R3%Pq|a`mf2{4XcYg-f^UycgQ=AkOdP(Sm=Bu?RXU=Cts4l zsjdIukc5)poP&oJoLKh{ZE1vPch8Ivfxi<+Qv$H78ft7}SaX4`uyJJNi0DSxfsN{f z4xZi72P+?cK2H1vm@aoFw{U_WaBSpopY9~Xbqu%$B*WXhAi`NvbyXgGdBsWaHUKbt z%u#T%rcGa65@&4Z7wa3aMZRhsKi&ZQ#XaRHq4jddk7n^4Mj=qu>NDv1uy?pFjD{ck z6@kh6AlV8xrZ_1PbJ{GvmRE;7h4CoZ#Q7=%ZOGBqOOggixpnX zE1~kRA#VTiI@6$Nk#tb)Z7Y9ztR5e3C#;gT$dN?;0(Y8U_}^;hfhEtSTt~fWTzlJu zX>5bRQupw{T zW7`C78S86&)OOcxn=T!4%#yNNkYz}8z%9$G>Gpn#lwQFWJ?54Yu zV08y}U4BEA?JqXrr-M5q9o)HIg|RpP>_zj3V4f-4=QB}cQWJX@MC|M5zap$Q=1LxH zO`l@v%J-4UBCfOYRx!f04D~eFQ;ASFPa(sD++lp1t{02ohYtI?FwrUW+hbdcQJNNy z8D@c@%ysIm1htRe8ASt|wifDA_No(1^oBS?BgkNsUPr;d*1keGTaRJx*z?FfIZ)d% z!91UG`$gns>%@KbkYB6rG*nKUD=ZfD{%OnwFdK0Gh&Cke*&qq#&mEorCo zQ^f7`Scu_H9&o`8>D|pE6NP2(w~MdmSL-L0b?cPMHY;LFfu?*nGLfbw>~;FVXhP;< zQ3Y?$bG0)zHRC5m@9DdOxouYVI&QKRvF)~%oT`hJrK%s*ps+;d7b-wMPcD-9JKQzD zs%Dj-1MWtOrK;6+Yqdsdb{z5Ves%e3B|Yz;G~To`_g8*22iqjt$-tOg<(I$?3*ZKs zpw=pCe+2%61qHEI^TJWr{2qER7Ne?DMrC8E8IH`g#-bc}Iq2|mh*-Z60KAGPcefW0 zDk1SDPG-2lUQ_V_T8b1CTo6RcDpoG3RsNL?>95F28lu2~tMrT`Dgs{0VVGe^8GVw| zI66U`yh63)sH4Y|<01viHb_$$z1;>17FWE{dxae0H!q1u?+$Z_l(2z&t{Pu)ooTM% z+2;8(C{vyA+HGV-+U90Q>39aK5n@bjWf$4HVaZ1pG>1Xm8ntjp<#oP5lo1zpP#v}p zl)@gMx6LcCJ5e(1elpj&Gyg|+sc${LWoK6mp-d!Ma*H5jV%#K?`Z2q+r6B7rk~t7j1AOu8WRs8Oe05 zCsM-f5U&Z3owyeuS!6 z@Xr~YEz7UDlAll8UK%elgkiyx05)$JVyDbIg@o+Q)xs;S_Ily&vLIkLlVDfFUOifb zbNUhFoiD(-_HJ?6NPK~iLa705$Hu*txd7Y{048a%dtfq4?Duclu2Dbsq zcyJ&rSf{eV1r)ZDa-$s-wyw%dsXrr!Z;)f7#_;(%yN$_LkzmrXEi#<110bp)@gauE zYGLSk1y*gGuszmPL}KHTu$=%bU*DIf5lRuHo-c$JWqq(K%YwdONfn!Xf{!a;FgZp~wt&pqqv3fym>KJZe zkcFAqQJ#zn{7lT`|G@KcX@e&JO(-TNpp?AW^*b{Dv?paoN+c%*eHF!UiW%eb58F~o zdElS)tzc`@WpD={haIgY&Nk-ETG}p;mn(ylV^(Ew=gBRvedD@NRPiQ**m$55B{(mA z>oJ$bZ#;fT4v1d}EAr;&S5>R7$TODMvn$R_^4qnp6ga!Wa!X!g$J<{7(73*YdUe+8 zqnY%&3TF4dlq`NZl{jTF$UQR#iE?;{@yxUw{sv8Kj;WsYQtA#XV`;_(U|ZPnIm#Tz z+IN^c?#n@W>nuK{1P9Umz}!LP42`kJ5S|Ff89yPt3s#(Uj9AZ`#N~04QLw7?W;v(W z=5>7*kJX8Hqruahd%wq4RqBLo;3?JhMfG7#aMtnR7c#qTOp|e-G`K?1b$8f8n+&Bz z0z69T^XHL2o}9qFwX;;#p~852UBX4h@gT&db3#8#Kd*yuUZ4oF+Fumvv$C*Irnf|4 zA%&fHf|c2MamifWhF*7=NU3K#vPf&dr$@aiKf2WrR%i>BV8Vpjc@OCX1Aj1TSfpH& z5#Al6nX2SRZaP>4z;Z$wm7G+MQ4Hz=DPMu)asiG{{O?$VV>xKwwXktz-8*)-$DV;^ zYdG93M2x_B1e*nJz&i0c!v_XsRu6Nt?O1s_&4Wnrb($)wesDe{2-=@NM&2@*u{9k|8Ip!$bn4$!i1c*1jP4lSJqtK@ z0u#&-$e^Bs0p_Swyqq7Zu{U9|n(yVpkxTH{b^>G3<8?sjJ1mK<+KRt=HwZDIJC%~| zHr&G32*IvMol28Lec;qn_Q=>+Acfj*N;*uxQoIk=xcRSKL&}J_dXXV6#Vx)3Wzxi1 zE%}J-jOz9P^$dj^32$#wOsd9>n;*t@oChi@ixhRfWIn?+kwOx)c)5Z_BI0Brvi7np z1ElQ4`r!-7&)$l|thn;@nCUGK_v!{bt>4)_*oW(5Xd^A-e+OacfiL!ZwhT{U--oaL zAzEoozk~KAwZE?;-?U-TJcTPl-^+xV$3DmLeaE=%(A+3E_xu>iZ%qw1Fe9haqX|JS z-mO9}g5P=1JboH!OaxYqUg>J9Ks>N<+Q+w08E#vZsrt0FgH3Frr=r%ycnGcb=g$ui zkRtGk{Im9f=kfbnJSn5y;SlDQepaZ8rbG+rs0a__>^4&^lmyow|2 zVl&A_9WYyuxvQ)z6DjRN#;;lJ4fX|5Rd|F8-JxK{#!CRMS$n3B;#18*44vrCZN`$* zJelnY8n_YoEGQ(Z54PbrHzsP2JqO3R0?I-?BB;LmF!YGOcno zSc{3A#D#8P&%t9{j8p!Bw$YMaQUV< zG4l#NLFbV@N*JU#o87IKVBE^jFwUx*1~(xG6bdaUsr4&<3!WusW%boKQAdo!W#qaC zGSWBm!ig7xr}enfm0w$Ru!jin`~N-IiS?lgt~22?DqT0d-QM;=yC*} zKlMUJ-~L@++&Z>+{Tx*O=ANDJTu+ZKcTUd_kG~vG##hJZXY6`9{_O}JI1zL@SE^)& zN!}%Wx@R%~`N{G)nuY7RKUevxi8KX_N-O^day157_=wU4biNr<>;$iFP7>;*Bx`6g zri?ji(2xRnViADc1a}b+j?#-nirm?SDyq2>P~3B8`(lO&nc1J9=yP{Bw^yA8kxW#W z_2tftVy=tl$|yFOm;{yZMVtmd&KHZ+&$O*^aXvl%JC<^yR8=W5@aI-Tp`M407#1zh zpLj^~fuMAI5J=AL%^KnjJ0h@gV~my&0-%VS)5mh%!>6UCMrv92>I=&exot5=15Xvf z6Xm1DC3*9$?eV=e=qH=td%=-(Bq%;GijjG-Jg2df_v<$wv(QN}Q~V%&fdkt(v8oko zQ^LTta3+(9*9YdbyXvBWz8c>W9x^t`0>4h6_x~{%aFdan*AL=6jC(Z)glw{vI+k@9 z9z;LS3NyR&5?E?YG8pAUVD+X!1|v?b$pKSjfZ?WVa}Jn}7t9H~*(6f@J%({!#&Jf# z6Qko`NVF;%eTEH~YL#?l=KWxB=+BZzT}XlzNg9 zTJIAuvMf`t^bE~p<)Dy^3|CX6*i9kXKnrQ`V#XMq%}1LAx~FNN6$x}IPi;;I4Wnop zM&EQdd#oAx)^vI@>2P2J=8hKD;pqJ2Zz z476(cNQO~)8|zTzJGFN!uFjU$-Etswi(@mVTQi*y&mW_zY`fl1CBj+%td9?*K1d&@n1^K~a+gaOgZJxOE zj4l0Jk2KBS3yL-dpUG2U*UL*7E51u@x+*>)1z+K5C{8HEOV?HsFFH9)c)G+Thm;{45m**f%aosjj;c&Tm)?O8dQw^JYYShE?^jHIZtwowvZ=-S3B zFS+&290u-b`O?OWPU0S2!(wN|K0QlOoLxtr@;5% z20>TYLy+C^Xs#Xjmf=D)_-gH(^}>bqKtDW)BqwQw)I$EaYke)74X|ODX*C#XB|D?% zR{PImqOdl-eTyGPfHtr0kh&obHCPV(8ahIEFlF8XH@q8fE;xSS$Nc3gOG}7-OY7_E ziVf8ah&5%%5&tyY^CF3}cov5nydU_D7GdX&Ir@g)Pm~@@5=F^3Y&{l*qc1A zIP*mdm|i1I?d_o>v;k{R$FqN2S`!f7*W~_dUeS_+>exUXSTs98byOlTd-fsEE~|PaAyq=bugPc-{E?m6IESv%3W;p-Bwu3Vc^W1jjH-T`u<~aH@Nh z=+i+l$>seRV;YNjmOOE8?_h|WWlHRJD;42UOSxh3Iy}2w0;#3bZ{0C;EF+}0= zuo*w6`SK|@f+yz?!IZaw;Z+&$Ya?N;36D1?yvhBmk?)St&~=yZKHDmJ?mFL1ih-XB zp`@c9vv8fl%5w=f`G?+3^U)+baX5+sTFcs?B+Yj+{3gMD*^ijepiDF&Jn5(nZ#FOZOUgN6PnXP|?9?1iw3 z2HD+GhxIkx?pPmuK_CK?Z8=YG67gF0^{Rw%xyKMV7pJ^xk)KH(7=3sL77pKA!ZwRw zWSHK;63+G6dWBI&mcq@3PZ2T=?={K>ld=f9<^ot5!deNo$kLw&>*a^Ks7kRVHNc#} zqq5b3>#v?{(6<4lh=6D6uJY~%tYMAt8U)J;HfpAr^8 zSGwys$DW{iuo87rV8#0HwIMS0`S|Sc6Grrj{_tOTh>a&F3>J@NG3{CJ&gE5IpM(-i z-oo8-^-miPJAa3h0J2|UfniS1pZY^{myU0$XqSY5dQ;qP&prA5kD^q zB}cZdUAi9(=tgTpeL}|djn8kEKntyqPr++jTqYe6cg1Cu<-z+ZmA68cpIb9hEE>?( zjNl95{@r-9`&CTOQ%&ekF+|g24$(fl8a6AE7-pbOhUvH>MR}kuRf}Ei@?NS@=_n#- zTr5?^{{$j}CJ-M3Bpe<0CGf*}YBP;>QUkT-Bw4TyYB@id4;@x0e}g5%c=)vL+zh#X zqVN%)LMl<#>x+CSD=L?bN3xzIX6;9^hT!i87T19#f4;uCN_;;xD{*@+Ih)vbUS-#< zVs0dZiB~7naiLyr2T{RuHbw<#b2Ef}4m*#v% z_z^e!_V+)1#KcwiuqOM`rN}hQc5)wS~@yd^OGJ`VOmHEDb`>v~NQeE?Fa1r6{ z*Y!WD&KIVG2uKlE3forXTT9b#+e2E{B*2M63~xC;6s!H0at_lx^>d}s+M0h|E3bP& z$ejjBG+6K&Ttz470=wtBucC0;FLmCp9#_WW0h_za$D^oE@+6`RyF)X&|23H1mr}*U z-OH6y(=ST$i7>te0}aum2wslSrc~ z({+fvx`;g#lG?^CMpT)=t)y^o@F*`f#;e9bxcnSUw6ZA9*pi#IXs6?Nh&C`@E|KH| z4GFlc$vOg8r77=OM@ukA_EB=#yAYybtj_|zoR1Te(+P``aSMWL#GBo0s+&oe8VCQX z#oJ(k5F+fSfr<9D+Mi5$#=!V)>74hYymMME_GvrU`;*((hrf>cQMyCi&>zBiuyFkS z`0B%JZ?6yk9rgSBd&Yn9HS7=f_y05)?u`x(_WGmI{-64T-Tl4MpIHA1`cim-d*LOF z{VBTvd)NDS<$2qVztY!#^!|f=KEi1>IluS^Ot{~#*yoe;$yaE>upg`0{dmcJ+_{ zB2N`7M*Qz$iw~BoFhHNxOTZT9Ci>|%U* zbv(H~8DFxC>&uJt>Cp!;RQzpMinKIhqJqu9kW9sh{{eR=g?@!|cIOd1XXYc25G?gs zyn3v23&I#SV;mjbJADEFivt9f3iSC0dveJk@Rvlm{W`2pS$UWNgB6Se(!D z>cN*)T)%gJK8vHB-!4wr1iskF>%0i`(?vY;5f2pneR6bxT)Oae3!EkLoHSl1Gx5um0%Py6 z`Oo9kDv5FG9;;vY^ZO6%0rU_H@9IvT7BAqd%t3>V_$cra8HTGFLKs6X1w`N>IANB= z^e%{C*zis3XvQz-B8it`wJP~#^|;Txbt>;sLYm9?4yh#wWbCWya`Py@t@>H}z|Qgd z+tU}vu=MVIMEFOy)OGkGIgL|27gC}jE$QnAn*99!-uRFCPT+HBPg%GTV2cyoHq17G zB!>pESjfkM?hwxA7+Mql7RzpF?jj%GKgGqAg_ruHeA-cyb;I<6k#2Qd!*VO-Pk3vji<$2*2gaZ9-kd`M%M zj_Q};9iH(3+YubZM+z=AVsmo8DCY+CR7A)`FYzYKeT3iVJ_O%}+=P^mr@2R+R$a!+ z9E#hcMh<-oeJr`rD-m2s@$2@rH z=>Mhk!RiF<@|gSO31O4!25%zpT!y z3}@8<)Cn(g5UuoQUqZ(@jOTFr+qYuR^|2S+^`mHx@r|n9i^DH+022?^(34={W45AX zl{62($N@+h7+Ykuc<`ll{*bHyN@Yi0?$YOuy|k{2sXfQd==ZDBlVh+yew0mq`;3=1 z49=N0mPc$2a{Jhe>w4N&JN+kj`ttB>BDEC@9XAiYw9Zp9qq6sfqc5%d?NO&MqWkTs z7oKrFCnlJ4iqK4O>CP?b`NJ=50PS^Ye%2Ka(pBg^o)vaa-Tuo~hQp&y(iG-CicsD|*boZpJ4**3)M3ik)6hui%?!5&H`l2K|n^7kkf6$NzwK z$?S$*d+(_G{Q8y{y3}63`%%h2A-Y-!D$t0u%dX!Vu5i9lBW(Xt@ac4u_K5Or!q3pn zmi6xJ5?=Z|M0+$3vN$=ypj(=s%9~DXa*_E(;8AZqTi)AO-LUd$>^bw<*}#7b;~Ou$ zkPMh5VS@Y_Z6Al+0P~hKjq)s*-Mbpz#*w)nRL;7l;h2`IYA{VIYAhI9)N0xa(rDHvM>&IAA~j@Ty%c2PP2H4yQI}^-Q0R* zk=I;X4leq86|dqjzJ09dckyte?MHp8^vV}(36}%9AOK(KVECc7H=Q63|IX{f zU;9Hu&&5xM{@H^68T1FE!GXa4jQYEy5#oRLhJ!c!&vU%q@IP<(pEvx^8~*1F|MQ0b zdBgv_;eVbN|1-gTi(`3yWq0sN7!zz%V2{G>#7@Aq#0xLg9q`Vey~Za{zMcm*?$i$# z9||xb{3#VUMHErp3MNSqz@I>yG@UCag6GqYE?ql3`iqVQ(E&S5yhYZemP~Qok+{QQ zLC z*GY-IP3uluyOh&T?^|;plk>0UvCWy&&#}Z885A1WtV-HMFw1L+u#J6h!L`3d?(}*( z{_RN1oSZ9-4A&zb7IX$+%-_6(YX4DcKwkpc>QZuG3+>*sU54GdAY(!x|KB-*IHE zF2>uZy?xGyRM|X$+=rHRDX6qCDlDXUaR3Xk9DswRF?8T(i)djvvf{%D1n2pruiSDTmtX zq3VX$JlK!@vcZ15k*lwI^Qs3~3@m^lf9bD$Pc6H~`&T{GVxZv^Yhg)IB=Gh0HJe`b zfQ#V<7#*o;<^q3K+bbUOU{oA3B~uU2L^MghdCEZ!pLmda#f9k#EW!Kh)f{H-vs=Wj zeLNL-Uy_jY_2tQvV+FeP_mkD%iJxQ~-t8Z1qJkGagrN$0d=I~)l!r^G!0-tcHr?Cl zEMB?nu7$mJ)ZI_IyI+DZbBddKg2|w;xp;ZwBQq%qV6g&B^? zc9_oV&5RER#v^d;mM8NG?$EHr@L#8ynUyl%zuv8OILxE2rtnSPtxwFmPdO8xd|;)v zzD6gAzhCpMD_XjgsSr6B0ieRcnk@v`|L%padFZ2&+@NN0I+h=L(Yw@WG|*k9%H&1h z8b1BBmwc%E&#TC?#CLqnpLPJd#sEaQ@7yVo+S7V{yLPeRNvt@@u-4I1i=;woS%&=5F%Dez;gRz|2r@CNYXur(rLe^Al$Y4Got-u1#<6kVUh(QKV0FsHsD^~2LsS=JkdC1^Ur z(d=srFUw#v`~`{duHuv6yI~X%IUbQfe5cy?)Q1OX&dc6s$e<@_i_*pepu#+4;lt;x zLtU>DiM*wn{TCHzfN%MzN>T5;bh<{493-;P6C+r#tIO*nVz7`0)sn8 zO7NoeB#!T|INyiJKJ!t04v=iMD+*Xjm?PvLYq=+`mSp&BZNCK?Qt7Edu$v2ecdq3)`o9?wIH#1Mk7WUw%r z$ma|u0h~%>MP83rG0>sH^5IAiPhfqW!UNdg)*c^nR+$v(y`K9K-u?Kg;b?Wi_5}*V zZh2&poy!Lw_;&O2`(j4-(H>pBMS0^Oi!1bt8SE!}u(f9I#<3Qs^2-?ONY(sC=_Ob^ zZvE7@W{{(Wc4Q3=ld&!b_k82nx5n`lnVqLKDDWy}q?#A%MVa0(i+m0ju zoA-$0cujL@U@XJv3=k?!dp7hIvuz$r$eZhwPXu4GD`YA+WlrPo{&?0Xd+_rw`f#v6 zw1=U-t)VaWBF3_7k3}r2uW=-M_DF;ca^W>l8GfdOAbhdJrGcd$C!E|%C7X)B-v0zW zfVVBK^QNH7bX;cfUVfRd-TvNa+ZaaH7z9DzYZ*ob+Mz$xRuf9rIFGJ8MOu7c_xUKA zC65IB9*0;;lHED>Z=beu1xBJA21;rjB#(cs~mUe-wEa*Y^}ZsM3_dYFGq*O^OrpY>tJrB8o2UQWLBXoxAddY#uLcRp$bu5tD)zt}mXX0?)7oS>rS?E$^ zS!nfo3(%(KJ<}m(aS@*4zG+fp^~RtIR&5pmFSM4@+CqC3Su7kvF;Io_6=(9t#O68e zE)7?HISUlpEkp%{oSnfQPTG!nPHwYNFkL^@s0w(#Ba@RSbl` ziR6vfItgk*Nhuf@wEvLMH*kLaIGpph(4Gnwd+R!E0%O)3zf6Zoe)tL!`VF@ePAWXS zTL|at-W%yD%4u(}KU*>I8#-MeWr3?)&S7gG;FPXLo8~=*#u9o^d{Xl)n#RWk`ZaN?ERw-QH* z-NxxWa`%FmFYGo+5n;C?pMrf+1@nM?nl8|{+l?}I<3O%9+I&KIHMn~lPudH&;kr7O zTsqGkcVw6|Tu#Xp@(;bt`>FjhD|iW%(_SE6lq~bVc%+s7Ae3RbzUBYrokfG7!=;HQd@OGXswuKH~hV-gW_+s zVUHVKfQ@+KI+!~jrV-vC2vUoYhI4N@d0A>7)IBYy>4_fzNC zP?|mTf4tO=YBblW7a>}(eMiggi0kfZABwMYUHV&kXXIjJqTAe0V+~(D^I?H_+Ce9jjvx8Zip%gEJ-Vd(C`hQqh|J{$qD7U?cNqY7P zN8`TxMzyVkX1eZw0rKg57$XDuayYf2R+JgJ1=9d^O}oa?@asq7i!pXZyRUA#Kr_m2 z0+%34SI{mZV&*Y~kHbn~H%?%IMbTpJvE)xb^|QNpo;x5Sd1ezVaH?J%q3gNB+($wG zZN3h9=xUyhKKr*pB)`*NBz}5Vh+fl){bc=}{)YdG^H01qJ0-PQe=bjxmp~M)h}J}m zlSh!IFxF`h&3GVfy~>@1U&}jt6wND-(PuXu&DLq|Fh6u0ksh$0<`3hik6|L0|0*7- z`b}}n_{F(@v%XE_Y%IGLpV#T*H!sj}M15%Llkn|@w@!WIa5=))QclAAC3C(F?m`hW#p(9H%8!2OCj{HRw<>1VWZ zTg2Y95~Z0>5E|IC65Gu>Illh;@yYS{@*isD)5-DNqTsya18pIzMNFr;+dI0Y$@2Q$ zZ@?NReUTv5>#=Li*^Pbu)7$Iq_4ayuy}jOEZ?Cu4+w1N1_Ii80z207LueaCR>+SXS wdV9US-d=C7x7XY2?e+G0d%eBhUT?3r*W2su_4ayuJ=g312Xc7;Bml4p0C5%+oB#j- literal 0 HcmV?d00001 diff --git a/dirsrvtests/tests/data/ticket48212/__init__.py b/dirsrvtests/tests/data/ticket48212/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/dirsrvtests/tests/data/ticket48212/example1k_posix.ldif b/dirsrvtests/tests/data/ticket48212/example1k_posix.ldif new file mode 100644 index 0000000..50000f2 --- /dev/null +++ b/dirsrvtests/tests/data/ticket48212/example1k_posix.ldif @@ -0,0 +1,17017 @@ +dn: dc=example,dc=com +objectClass: top +objectClass: domain +dc: example +aci: (target=ldap:///dc=example,dc=com)(targetattr=*)(version 3.0; acl "acl1"; allow(write) userdn = "ldap:///self";) +aci: (target=ldap:///dc=example,dc=com)(targetattr=*)(version 3.0; acl "acl2"; allow(read, search, compare) userdn = "ldap:///anyone";) + +dn: ou=People,dc=example,dc=com +objectClass: top +objectClass: organizationalunit +ou: People + +dn: ou=Groups,dc=example,dc=com +objectClass: top +objectClass: organizationalunit +ou: Groups + +dn: cn=user0,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user0 +sn: user0 +uid: uid0 +givenname: givenname0 +description: description0 +userPassword: password0 +mail: uid0 +uidnumber: 0 +gidnumber: 0 +homeDirectory: /home/uid0 + +dn: cn=user1,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user1 +sn: user1 +uid: uid1 +givenname: givenname1 +description: description1 +userPassword: password1 +mail: uid1 +uidnumber: 1 +gidnumber: 1 +homeDirectory: /home/uid1 + +dn: cn=user2,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user2 +sn: user2 +uid: uid2 +givenname: givenname2 +description: description2 +userPassword: password2 +mail: uid2 +uidnumber: 2 +gidnumber: 2 +homeDirectory: /home/uid2 + +dn: cn=user3,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user3 +sn: user3 +uid: uid3 +givenname: givenname3 +description: description3 +userPassword: password3 +mail: uid3 +uidnumber: 3 +gidnumber: 3 +homeDirectory: /home/uid3 + +dn: cn=user4,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user4 +sn: user4 +uid: uid4 +givenname: givenname4 +description: description4 +userPassword: password4 +mail: uid4 +uidnumber: 4 +gidnumber: 4 +homeDirectory: /home/uid4 + +dn: cn=user5,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user5 +sn: user5 +uid: uid5 +givenname: givenname5 +description: description5 +userPassword: password5 +mail: uid5 +uidnumber: 5 +gidnumber: 5 +homeDirectory: /home/uid5 + +dn: cn=user6,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user6 +sn: user6 +uid: uid6 +givenname: givenname6 +description: description6 +userPassword: password6 +mail: uid6 +uidnumber: 6 +gidnumber: 6 +homeDirectory: /home/uid6 + +dn: cn=user7,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user7 +sn: user7 +uid: uid7 +givenname: givenname7 +description: description7 +userPassword: password7 +mail: uid7 +uidnumber: 7 +gidnumber: 7 +homeDirectory: /home/uid7 + +dn: cn=user8,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user8 +sn: user8 +uid: uid8 +givenname: givenname8 +description: description8 +userPassword: password8 +mail: uid8 +uidnumber: 8 +gidnumber: 8 +homeDirectory: /home/uid8 + +dn: cn=user9,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user9 +sn: user9 +uid: uid9 +givenname: givenname9 +description: description9 +userPassword: password9 +mail: uid9 +uidnumber: 9 +gidnumber: 9 +homeDirectory: /home/uid9 + +dn: cn=user10,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user10 +sn: user10 +uid: uid10 +givenname: givenname10 +description: description10 +userPassword: password10 +mail: uid10 +uidnumber: 10 +gidnumber: 10 +homeDirectory: /home/uid10 + +dn: cn=user11,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user11 +sn: user11 +uid: uid11 +givenname: givenname11 +description: description11 +userPassword: password11 +mail: uid11 +uidnumber: 11 +gidnumber: 11 +homeDirectory: /home/uid11 + +dn: cn=user12,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user12 +sn: user12 +uid: uid12 +givenname: givenname12 +description: description12 +userPassword: password12 +mail: uid12 +uidnumber: 12 +gidnumber: 12 +homeDirectory: /home/uid12 + +dn: cn=user13,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user13 +sn: user13 +uid: uid13 +givenname: givenname13 +description: description13 +userPassword: password13 +mail: uid13 +uidnumber: 13 +gidnumber: 13 +homeDirectory: /home/uid13 + +dn: cn=user14,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user14 +sn: user14 +uid: uid14 +givenname: givenname14 +description: description14 +userPassword: password14 +mail: uid14 +uidnumber: 14 +gidnumber: 14 +homeDirectory: /home/uid14 + +dn: cn=user15,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user15 +sn: user15 +uid: uid15 +givenname: givenname15 +description: description15 +userPassword: password15 +mail: uid15 +uidnumber: 15 +gidnumber: 15 +homeDirectory: /home/uid15 + +dn: cn=user16,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user16 +sn: user16 +uid: uid16 +givenname: givenname16 +description: description16 +userPassword: password16 +mail: uid16 +uidnumber: 16 +gidnumber: 16 +homeDirectory: /home/uid16 + +dn: cn=user17,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user17 +sn: user17 +uid: uid17 +givenname: givenname17 +description: description17 +userPassword: password17 +mail: uid17 +uidnumber: 17 +gidnumber: 17 +homeDirectory: /home/uid17 + +dn: cn=user18,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user18 +sn: user18 +uid: uid18 +givenname: givenname18 +description: description18 +userPassword: password18 +mail: uid18 +uidnumber: 18 +gidnumber: 18 +homeDirectory: /home/uid18 + +dn: cn=user19,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user19 +sn: user19 +uid: uid19 +givenname: givenname19 +description: description19 +userPassword: password19 +mail: uid19 +uidnumber: 19 +gidnumber: 19 +homeDirectory: /home/uid19 + +dn: cn=user20,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user20 +sn: user20 +uid: uid20 +givenname: givenname20 +description: description20 +userPassword: password20 +mail: uid20 +uidnumber: 20 +gidnumber: 20 +homeDirectory: /home/uid20 + +dn: cn=user21,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user21 +sn: user21 +uid: uid21 +givenname: givenname21 +description: description21 +userPassword: password21 +mail: uid21 +uidnumber: 21 +gidnumber: 21 +homeDirectory: /home/uid21 + +dn: cn=user22,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user22 +sn: user22 +uid: uid22 +givenname: givenname22 +description: description22 +userPassword: password22 +mail: uid22 +uidnumber: 22 +gidnumber: 22 +homeDirectory: /home/uid22 + +dn: cn=user23,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user23 +sn: user23 +uid: uid23 +givenname: givenname23 +description: description23 +userPassword: password23 +mail: uid23 +uidnumber: 23 +gidnumber: 23 +homeDirectory: /home/uid23 + +dn: cn=user24,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user24 +sn: user24 +uid: uid24 +givenname: givenname24 +description: description24 +userPassword: password24 +mail: uid24 +uidnumber: 24 +gidnumber: 24 +homeDirectory: /home/uid24 + +dn: cn=user25,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user25 +sn: user25 +uid: uid25 +givenname: givenname25 +description: description25 +userPassword: password25 +mail: uid25 +uidnumber: 25 +gidnumber: 25 +homeDirectory: /home/uid25 + +dn: cn=user26,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user26 +sn: user26 +uid: uid26 +givenname: givenname26 +description: description26 +userPassword: password26 +mail: uid26 +uidnumber: 26 +gidnumber: 26 +homeDirectory: /home/uid26 + +dn: cn=user27,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user27 +sn: user27 +uid: uid27 +givenname: givenname27 +description: description27 +userPassword: password27 +mail: uid27 +uidnumber: 27 +gidnumber: 27 +homeDirectory: /home/uid27 + +dn: cn=user28,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user28 +sn: user28 +uid: uid28 +givenname: givenname28 +description: description28 +userPassword: password28 +mail: uid28 +uidnumber: 28 +gidnumber: 28 +homeDirectory: /home/uid28 + +dn: cn=user29,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user29 +sn: user29 +uid: uid29 +givenname: givenname29 +description: description29 +userPassword: password29 +mail: uid29 +uidnumber: 29 +gidnumber: 29 +homeDirectory: /home/uid29 + +dn: cn=user30,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user30 +sn: user30 +uid: uid30 +givenname: givenname30 +description: description30 +userPassword: password30 +mail: uid30 +uidnumber: 30 +gidnumber: 30 +homeDirectory: /home/uid30 + +dn: cn=user31,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user31 +sn: user31 +uid: uid31 +givenname: givenname31 +description: description31 +userPassword: password31 +mail: uid31 +uidnumber: 31 +gidnumber: 31 +homeDirectory: /home/uid31 + +dn: cn=user32,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user32 +sn: user32 +uid: uid32 +givenname: givenname32 +description: description32 +userPassword: password32 +mail: uid32 +uidnumber: 32 +gidnumber: 32 +homeDirectory: /home/uid32 + +dn: cn=user33,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user33 +sn: user33 +uid: uid33 +givenname: givenname33 +description: description33 +userPassword: password33 +mail: uid33 +uidnumber: 33 +gidnumber: 33 +homeDirectory: /home/uid33 + +dn: cn=user34,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user34 +sn: user34 +uid: uid34 +givenname: givenname34 +description: description34 +userPassword: password34 +mail: uid34 +uidnumber: 34 +gidnumber: 34 +homeDirectory: /home/uid34 + +dn: cn=user35,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user35 +sn: user35 +uid: uid35 +givenname: givenname35 +description: description35 +userPassword: password35 +mail: uid35 +uidnumber: 35 +gidnumber: 35 +homeDirectory: /home/uid35 + +dn: cn=user36,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user36 +sn: user36 +uid: uid36 +givenname: givenname36 +description: description36 +userPassword: password36 +mail: uid36 +uidnumber: 36 +gidnumber: 36 +homeDirectory: /home/uid36 + +dn: cn=user37,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user37 +sn: user37 +uid: uid37 +givenname: givenname37 +description: description37 +userPassword: password37 +mail: uid37 +uidnumber: 37 +gidnumber: 37 +homeDirectory: /home/uid37 + +dn: cn=user38,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user38 +sn: user38 +uid: uid38 +givenname: givenname38 +description: description38 +userPassword: password38 +mail: uid38 +uidnumber: 38 +gidnumber: 38 +homeDirectory: /home/uid38 + +dn: cn=user39,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user39 +sn: user39 +uid: uid39 +givenname: givenname39 +description: description39 +userPassword: password39 +mail: uid39 +uidnumber: 39 +gidnumber: 39 +homeDirectory: /home/uid39 + +dn: cn=user40,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user40 +sn: user40 +uid: uid40 +givenname: givenname40 +description: description40 +userPassword: password40 +mail: uid40 +uidnumber: 40 +gidnumber: 40 +homeDirectory: /home/uid40 + +dn: cn=user41,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user41 +sn: user41 +uid: uid41 +givenname: givenname41 +description: description41 +userPassword: password41 +mail: uid41 +uidnumber: 41 +gidnumber: 41 +homeDirectory: /home/uid41 + +dn: cn=user42,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user42 +sn: user42 +uid: uid42 +givenname: givenname42 +description: description42 +userPassword: password42 +mail: uid42 +uidnumber: 42 +gidnumber: 42 +homeDirectory: /home/uid42 + +dn: cn=user43,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user43 +sn: user43 +uid: uid43 +givenname: givenname43 +description: description43 +userPassword: password43 +mail: uid43 +uidnumber: 43 +gidnumber: 43 +homeDirectory: /home/uid43 + +dn: cn=user44,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user44 +sn: user44 +uid: uid44 +givenname: givenname44 +description: description44 +userPassword: password44 +mail: uid44 +uidnumber: 44 +gidnumber: 44 +homeDirectory: /home/uid44 + +dn: cn=user45,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user45 +sn: user45 +uid: uid45 +givenname: givenname45 +description: description45 +userPassword: password45 +mail: uid45 +uidnumber: 45 +gidnumber: 45 +homeDirectory: /home/uid45 + +dn: cn=user46,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user46 +sn: user46 +uid: uid46 +givenname: givenname46 +description: description46 +userPassword: password46 +mail: uid46 +uidnumber: 46 +gidnumber: 46 +homeDirectory: /home/uid46 + +dn: cn=user47,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user47 +sn: user47 +uid: uid47 +givenname: givenname47 +description: description47 +userPassword: password47 +mail: uid47 +uidnumber: 47 +gidnumber: 47 +homeDirectory: /home/uid47 + +dn: cn=user48,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user48 +sn: user48 +uid: uid48 +givenname: givenname48 +description: description48 +userPassword: password48 +mail: uid48 +uidnumber: 48 +gidnumber: 48 +homeDirectory: /home/uid48 + +dn: cn=user49,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user49 +sn: user49 +uid: uid49 +givenname: givenname49 +description: description49 +userPassword: password49 +mail: uid49 +uidnumber: 49 +gidnumber: 49 +homeDirectory: /home/uid49 + +dn: cn=user50,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user50 +sn: user50 +uid: uid50 +givenname: givenname50 +description: description50 +userPassword: password50 +mail: uid50 +uidnumber: 50 +gidnumber: 50 +homeDirectory: /home/uid50 + +dn: cn=user51,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user51 +sn: user51 +uid: uid51 +givenname: givenname51 +description: description51 +userPassword: password51 +mail: uid51 +uidnumber: 51 +gidnumber: 51 +homeDirectory: /home/uid51 + +dn: cn=user52,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user52 +sn: user52 +uid: uid52 +givenname: givenname52 +description: description52 +userPassword: password52 +mail: uid52 +uidnumber: 52 +gidnumber: 52 +homeDirectory: /home/uid52 + +dn: cn=user53,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user53 +sn: user53 +uid: uid53 +givenname: givenname53 +description: description53 +userPassword: password53 +mail: uid53 +uidnumber: 53 +gidnumber: 53 +homeDirectory: /home/uid53 + +dn: cn=user54,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user54 +sn: user54 +uid: uid54 +givenname: givenname54 +description: description54 +userPassword: password54 +mail: uid54 +uidnumber: 54 +gidnumber: 54 +homeDirectory: /home/uid54 + +dn: cn=user55,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user55 +sn: user55 +uid: uid55 +givenname: givenname55 +description: description55 +userPassword: password55 +mail: uid55 +uidnumber: 55 +gidnumber: 55 +homeDirectory: /home/uid55 + +dn: cn=user56,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user56 +sn: user56 +uid: uid56 +givenname: givenname56 +description: description56 +userPassword: password56 +mail: uid56 +uidnumber: 56 +gidnumber: 56 +homeDirectory: /home/uid56 + +dn: cn=user57,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user57 +sn: user57 +uid: uid57 +givenname: givenname57 +description: description57 +userPassword: password57 +mail: uid57 +uidnumber: 57 +gidnumber: 57 +homeDirectory: /home/uid57 + +dn: cn=user58,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user58 +sn: user58 +uid: uid58 +givenname: givenname58 +description: description58 +userPassword: password58 +mail: uid58 +uidnumber: 58 +gidnumber: 58 +homeDirectory: /home/uid58 + +dn: cn=user59,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user59 +sn: user59 +uid: uid59 +givenname: givenname59 +description: description59 +userPassword: password59 +mail: uid59 +uidnumber: 59 +gidnumber: 59 +homeDirectory: /home/uid59 + +dn: cn=user60,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user60 +sn: user60 +uid: uid60 +givenname: givenname60 +description: description60 +userPassword: password60 +mail: uid60 +uidnumber: 60 +gidnumber: 60 +homeDirectory: /home/uid60 + +dn: cn=user61,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user61 +sn: user61 +uid: uid61 +givenname: givenname61 +description: description61 +userPassword: password61 +mail: uid61 +uidnumber: 61 +gidnumber: 61 +homeDirectory: /home/uid61 + +dn: cn=user62,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user62 +sn: user62 +uid: uid62 +givenname: givenname62 +description: description62 +userPassword: password62 +mail: uid62 +uidnumber: 62 +gidnumber: 62 +homeDirectory: /home/uid62 + +dn: cn=user63,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user63 +sn: user63 +uid: uid63 +givenname: givenname63 +description: description63 +userPassword: password63 +mail: uid63 +uidnumber: 63 +gidnumber: 63 +homeDirectory: /home/uid63 + +dn: cn=user64,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user64 +sn: user64 +uid: uid64 +givenname: givenname64 +description: description64 +userPassword: password64 +mail: uid64 +uidnumber: 64 +gidnumber: 64 +homeDirectory: /home/uid64 + +dn: cn=user65,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user65 +sn: user65 +uid: uid65 +givenname: givenname65 +description: description65 +userPassword: password65 +mail: uid65 +uidnumber: 65 +gidnumber: 65 +homeDirectory: /home/uid65 + +dn: cn=user66,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user66 +sn: user66 +uid: uid66 +givenname: givenname66 +description: description66 +userPassword: password66 +mail: uid66 +uidnumber: 66 +gidnumber: 66 +homeDirectory: /home/uid66 + +dn: cn=user67,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user67 +sn: user67 +uid: uid67 +givenname: givenname67 +description: description67 +userPassword: password67 +mail: uid67 +uidnumber: 67 +gidnumber: 67 +homeDirectory: /home/uid67 + +dn: cn=user68,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user68 +sn: user68 +uid: uid68 +givenname: givenname68 +description: description68 +userPassword: password68 +mail: uid68 +uidnumber: 68 +gidnumber: 68 +homeDirectory: /home/uid68 + +dn: cn=user69,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user69 +sn: user69 +uid: uid69 +givenname: givenname69 +description: description69 +userPassword: password69 +mail: uid69 +uidnumber: 69 +gidnumber: 69 +homeDirectory: /home/uid69 + +dn: cn=user70,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user70 +sn: user70 +uid: uid70 +givenname: givenname70 +description: description70 +userPassword: password70 +mail: uid70 +uidnumber: 70 +gidnumber: 70 +homeDirectory: /home/uid70 + +dn: cn=user71,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user71 +sn: user71 +uid: uid71 +givenname: givenname71 +description: description71 +userPassword: password71 +mail: uid71 +uidnumber: 71 +gidnumber: 71 +homeDirectory: /home/uid71 + +dn: cn=user72,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user72 +sn: user72 +uid: uid72 +givenname: givenname72 +description: description72 +userPassword: password72 +mail: uid72 +uidnumber: 72 +gidnumber: 72 +homeDirectory: /home/uid72 + +dn: cn=user73,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user73 +sn: user73 +uid: uid73 +givenname: givenname73 +description: description73 +userPassword: password73 +mail: uid73 +uidnumber: 73 +gidnumber: 73 +homeDirectory: /home/uid73 + +dn: cn=user74,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user74 +sn: user74 +uid: uid74 +givenname: givenname74 +description: description74 +userPassword: password74 +mail: uid74 +uidnumber: 74 +gidnumber: 74 +homeDirectory: /home/uid74 + +dn: cn=user75,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user75 +sn: user75 +uid: uid75 +givenname: givenname75 +description: description75 +userPassword: password75 +mail: uid75 +uidnumber: 75 +gidnumber: 75 +homeDirectory: /home/uid75 + +dn: cn=user76,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user76 +sn: user76 +uid: uid76 +givenname: givenname76 +description: description76 +userPassword: password76 +mail: uid76 +uidnumber: 76 +gidnumber: 76 +homeDirectory: /home/uid76 + +dn: cn=user77,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user77 +sn: user77 +uid: uid77 +givenname: givenname77 +description: description77 +userPassword: password77 +mail: uid77 +uidnumber: 77 +gidnumber: 77 +homeDirectory: /home/uid77 + +dn: cn=user78,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user78 +sn: user78 +uid: uid78 +givenname: givenname78 +description: description78 +userPassword: password78 +mail: uid78 +uidnumber: 78 +gidnumber: 78 +homeDirectory: /home/uid78 + +dn: cn=user79,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user79 +sn: user79 +uid: uid79 +givenname: givenname79 +description: description79 +userPassword: password79 +mail: uid79 +uidnumber: 79 +gidnumber: 79 +homeDirectory: /home/uid79 + +dn: cn=user80,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user80 +sn: user80 +uid: uid80 +givenname: givenname80 +description: description80 +userPassword: password80 +mail: uid80 +uidnumber: 80 +gidnumber: 80 +homeDirectory: /home/uid80 + +dn: cn=user81,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user81 +sn: user81 +uid: uid81 +givenname: givenname81 +description: description81 +userPassword: password81 +mail: uid81 +uidnumber: 81 +gidnumber: 81 +homeDirectory: /home/uid81 + +dn: cn=user82,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user82 +sn: user82 +uid: uid82 +givenname: givenname82 +description: description82 +userPassword: password82 +mail: uid82 +uidnumber: 82 +gidnumber: 82 +homeDirectory: /home/uid82 + +dn: cn=user83,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user83 +sn: user83 +uid: uid83 +givenname: givenname83 +description: description83 +userPassword: password83 +mail: uid83 +uidnumber: 83 +gidnumber: 83 +homeDirectory: /home/uid83 + +dn: cn=user84,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user84 +sn: user84 +uid: uid84 +givenname: givenname84 +description: description84 +userPassword: password84 +mail: uid84 +uidnumber: 84 +gidnumber: 84 +homeDirectory: /home/uid84 + +dn: cn=user85,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user85 +sn: user85 +uid: uid85 +givenname: givenname85 +description: description85 +userPassword: password85 +mail: uid85 +uidnumber: 85 +gidnumber: 85 +homeDirectory: /home/uid85 + +dn: cn=user86,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user86 +sn: user86 +uid: uid86 +givenname: givenname86 +description: description86 +userPassword: password86 +mail: uid86 +uidnumber: 86 +gidnumber: 86 +homeDirectory: /home/uid86 + +dn: cn=user87,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user87 +sn: user87 +uid: uid87 +givenname: givenname87 +description: description87 +userPassword: password87 +mail: uid87 +uidnumber: 87 +gidnumber: 87 +homeDirectory: /home/uid87 + +dn: cn=user88,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user88 +sn: user88 +uid: uid88 +givenname: givenname88 +description: description88 +userPassword: password88 +mail: uid88 +uidnumber: 88 +gidnumber: 88 +homeDirectory: /home/uid88 + +dn: cn=user89,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user89 +sn: user89 +uid: uid89 +givenname: givenname89 +description: description89 +userPassword: password89 +mail: uid89 +uidnumber: 89 +gidnumber: 89 +homeDirectory: /home/uid89 + +dn: cn=user90,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user90 +sn: user90 +uid: uid90 +givenname: givenname90 +description: description90 +userPassword: password90 +mail: uid90 +uidnumber: 90 +gidnumber: 90 +homeDirectory: /home/uid90 + +dn: cn=user91,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user91 +sn: user91 +uid: uid91 +givenname: givenname91 +description: description91 +userPassword: password91 +mail: uid91 +uidnumber: 91 +gidnumber: 91 +homeDirectory: /home/uid91 + +dn: cn=user92,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user92 +sn: user92 +uid: uid92 +givenname: givenname92 +description: description92 +userPassword: password92 +mail: uid92 +uidnumber: 92 +gidnumber: 92 +homeDirectory: /home/uid92 + +dn: cn=user93,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user93 +sn: user93 +uid: uid93 +givenname: givenname93 +description: description93 +userPassword: password93 +mail: uid93 +uidnumber: 93 +gidnumber: 93 +homeDirectory: /home/uid93 + +dn: cn=user94,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user94 +sn: user94 +uid: uid94 +givenname: givenname94 +description: description94 +userPassword: password94 +mail: uid94 +uidnumber: 94 +gidnumber: 94 +homeDirectory: /home/uid94 + +dn: cn=user95,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user95 +sn: user95 +uid: uid95 +givenname: givenname95 +description: description95 +userPassword: password95 +mail: uid95 +uidnumber: 95 +gidnumber: 95 +homeDirectory: /home/uid95 + +dn: cn=user96,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user96 +sn: user96 +uid: uid96 +givenname: givenname96 +description: description96 +userPassword: password96 +mail: uid96 +uidnumber: 96 +gidnumber: 96 +homeDirectory: /home/uid96 + +dn: cn=user97,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user97 +sn: user97 +uid: uid97 +givenname: givenname97 +description: description97 +userPassword: password97 +mail: uid97 +uidnumber: 97 +gidnumber: 97 +homeDirectory: /home/uid97 + +dn: cn=user98,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user98 +sn: user98 +uid: uid98 +givenname: givenname98 +description: description98 +userPassword: password98 +mail: uid98 +uidnumber: 98 +gidnumber: 98 +homeDirectory: /home/uid98 + +dn: cn=user99,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user99 +sn: user99 +uid: uid99 +givenname: givenname99 +description: description99 +userPassword: password99 +mail: uid99 +uidnumber: 99 +gidnumber: 99 +homeDirectory: /home/uid99 + +dn: cn=user100,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user100 +sn: user100 +uid: uid100 +givenname: givenname100 +description: description100 +userPassword: password100 +mail: uid100 +uidnumber: 100 +gidnumber: 100 +homeDirectory: /home/uid100 + +dn: cn=user101,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user101 +sn: user101 +uid: uid101 +givenname: givenname101 +description: description101 +userPassword: password101 +mail: uid101 +uidnumber: 101 +gidnumber: 101 +homeDirectory: /home/uid101 + +dn: cn=user102,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user102 +sn: user102 +uid: uid102 +givenname: givenname102 +description: description102 +userPassword: password102 +mail: uid102 +uidnumber: 102 +gidnumber: 102 +homeDirectory: /home/uid102 + +dn: cn=user103,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user103 +sn: user103 +uid: uid103 +givenname: givenname103 +description: description103 +userPassword: password103 +mail: uid103 +uidnumber: 103 +gidnumber: 103 +homeDirectory: /home/uid103 + +dn: cn=user104,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user104 +sn: user104 +uid: uid104 +givenname: givenname104 +description: description104 +userPassword: password104 +mail: uid104 +uidnumber: 104 +gidnumber: 104 +homeDirectory: /home/uid104 + +dn: cn=user105,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user105 +sn: user105 +uid: uid105 +givenname: givenname105 +description: description105 +userPassword: password105 +mail: uid105 +uidnumber: 105 +gidnumber: 105 +homeDirectory: /home/uid105 + +dn: cn=user106,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user106 +sn: user106 +uid: uid106 +givenname: givenname106 +description: description106 +userPassword: password106 +mail: uid106 +uidnumber: 106 +gidnumber: 106 +homeDirectory: /home/uid106 + +dn: cn=user107,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user107 +sn: user107 +uid: uid107 +givenname: givenname107 +description: description107 +userPassword: password107 +mail: uid107 +uidnumber: 107 +gidnumber: 107 +homeDirectory: /home/uid107 + +dn: cn=user108,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user108 +sn: user108 +uid: uid108 +givenname: givenname108 +description: description108 +userPassword: password108 +mail: uid108 +uidnumber: 108 +gidnumber: 108 +homeDirectory: /home/uid108 + +dn: cn=user109,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user109 +sn: user109 +uid: uid109 +givenname: givenname109 +description: description109 +userPassword: password109 +mail: uid109 +uidnumber: 109 +gidnumber: 109 +homeDirectory: /home/uid109 + +dn: cn=user110,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user110 +sn: user110 +uid: uid110 +givenname: givenname110 +description: description110 +userPassword: password110 +mail: uid110 +uidnumber: 110 +gidnumber: 110 +homeDirectory: /home/uid110 + +dn: cn=user111,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user111 +sn: user111 +uid: uid111 +givenname: givenname111 +description: description111 +userPassword: password111 +mail: uid111 +uidnumber: 111 +gidnumber: 111 +homeDirectory: /home/uid111 + +dn: cn=user112,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user112 +sn: user112 +uid: uid112 +givenname: givenname112 +description: description112 +userPassword: password112 +mail: uid112 +uidnumber: 112 +gidnumber: 112 +homeDirectory: /home/uid112 + +dn: cn=user113,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user113 +sn: user113 +uid: uid113 +givenname: givenname113 +description: description113 +userPassword: password113 +mail: uid113 +uidnumber: 113 +gidnumber: 113 +homeDirectory: /home/uid113 + +dn: cn=user114,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user114 +sn: user114 +uid: uid114 +givenname: givenname114 +description: description114 +userPassword: password114 +mail: uid114 +uidnumber: 114 +gidnumber: 114 +homeDirectory: /home/uid114 + +dn: cn=user115,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user115 +sn: user115 +uid: uid115 +givenname: givenname115 +description: description115 +userPassword: password115 +mail: uid115 +uidnumber: 115 +gidnumber: 115 +homeDirectory: /home/uid115 + +dn: cn=user116,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user116 +sn: user116 +uid: uid116 +givenname: givenname116 +description: description116 +userPassword: password116 +mail: uid116 +uidnumber: 116 +gidnumber: 116 +homeDirectory: /home/uid116 + +dn: cn=user117,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user117 +sn: user117 +uid: uid117 +givenname: givenname117 +description: description117 +userPassword: password117 +mail: uid117 +uidnumber: 117 +gidnumber: 117 +homeDirectory: /home/uid117 + +dn: cn=user118,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user118 +sn: user118 +uid: uid118 +givenname: givenname118 +description: description118 +userPassword: password118 +mail: uid118 +uidnumber: 118 +gidnumber: 118 +homeDirectory: /home/uid118 + +dn: cn=user119,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user119 +sn: user119 +uid: uid119 +givenname: givenname119 +description: description119 +userPassword: password119 +mail: uid119 +uidnumber: 119 +gidnumber: 119 +homeDirectory: /home/uid119 + +dn: cn=user120,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user120 +sn: user120 +uid: uid120 +givenname: givenname120 +description: description120 +userPassword: password120 +mail: uid120 +uidnumber: 120 +gidnumber: 120 +homeDirectory: /home/uid120 + +dn: cn=user121,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user121 +sn: user121 +uid: uid121 +givenname: givenname121 +description: description121 +userPassword: password121 +mail: uid121 +uidnumber: 121 +gidnumber: 121 +homeDirectory: /home/uid121 + +dn: cn=user122,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user122 +sn: user122 +uid: uid122 +givenname: givenname122 +description: description122 +userPassword: password122 +mail: uid122 +uidnumber: 122 +gidnumber: 122 +homeDirectory: /home/uid122 + +dn: cn=user123,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user123 +sn: user123 +uid: uid123 +givenname: givenname123 +description: description123 +userPassword: password123 +mail: uid123 +uidnumber: 123 +gidnumber: 123 +homeDirectory: /home/uid123 + +dn: cn=user124,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user124 +sn: user124 +uid: uid124 +givenname: givenname124 +description: description124 +userPassword: password124 +mail: uid124 +uidnumber: 124 +gidnumber: 124 +homeDirectory: /home/uid124 + +dn: cn=user125,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user125 +sn: user125 +uid: uid125 +givenname: givenname125 +description: description125 +userPassword: password125 +mail: uid125 +uidnumber: 125 +gidnumber: 125 +homeDirectory: /home/uid125 + +dn: cn=user126,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user126 +sn: user126 +uid: uid126 +givenname: givenname126 +description: description126 +userPassword: password126 +mail: uid126 +uidnumber: 126 +gidnumber: 126 +homeDirectory: /home/uid126 + +dn: cn=user127,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user127 +sn: user127 +uid: uid127 +givenname: givenname127 +description: description127 +userPassword: password127 +mail: uid127 +uidnumber: 127 +gidnumber: 127 +homeDirectory: /home/uid127 + +dn: cn=user128,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user128 +sn: user128 +uid: uid128 +givenname: givenname128 +description: description128 +userPassword: password128 +mail: uid128 +uidnumber: 128 +gidnumber: 128 +homeDirectory: /home/uid128 + +dn: cn=user129,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user129 +sn: user129 +uid: uid129 +givenname: givenname129 +description: description129 +userPassword: password129 +mail: uid129 +uidnumber: 129 +gidnumber: 129 +homeDirectory: /home/uid129 + +dn: cn=user130,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user130 +sn: user130 +uid: uid130 +givenname: givenname130 +description: description130 +userPassword: password130 +mail: uid130 +uidnumber: 130 +gidnumber: 130 +homeDirectory: /home/uid130 + +dn: cn=user131,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user131 +sn: user131 +uid: uid131 +givenname: givenname131 +description: description131 +userPassword: password131 +mail: uid131 +uidnumber: 131 +gidnumber: 131 +homeDirectory: /home/uid131 + +dn: cn=user132,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user132 +sn: user132 +uid: uid132 +givenname: givenname132 +description: description132 +userPassword: password132 +mail: uid132 +uidnumber: 132 +gidnumber: 132 +homeDirectory: /home/uid132 + +dn: cn=user133,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user133 +sn: user133 +uid: uid133 +givenname: givenname133 +description: description133 +userPassword: password133 +mail: uid133 +uidnumber: 133 +gidnumber: 133 +homeDirectory: /home/uid133 + +dn: cn=user134,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user134 +sn: user134 +uid: uid134 +givenname: givenname134 +description: description134 +userPassword: password134 +mail: uid134 +uidnumber: 134 +gidnumber: 134 +homeDirectory: /home/uid134 + +dn: cn=user135,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user135 +sn: user135 +uid: uid135 +givenname: givenname135 +description: description135 +userPassword: password135 +mail: uid135 +uidnumber: 135 +gidnumber: 135 +homeDirectory: /home/uid135 + +dn: cn=user136,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user136 +sn: user136 +uid: uid136 +givenname: givenname136 +description: description136 +userPassword: password136 +mail: uid136 +uidnumber: 136 +gidnumber: 136 +homeDirectory: /home/uid136 + +dn: cn=user137,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user137 +sn: user137 +uid: uid137 +givenname: givenname137 +description: description137 +userPassword: password137 +mail: uid137 +uidnumber: 137 +gidnumber: 137 +homeDirectory: /home/uid137 + +dn: cn=user138,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user138 +sn: user138 +uid: uid138 +givenname: givenname138 +description: description138 +userPassword: password138 +mail: uid138 +uidnumber: 138 +gidnumber: 138 +homeDirectory: /home/uid138 + +dn: cn=user139,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user139 +sn: user139 +uid: uid139 +givenname: givenname139 +description: description139 +userPassword: password139 +mail: uid139 +uidnumber: 139 +gidnumber: 139 +homeDirectory: /home/uid139 + +dn: cn=user140,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user140 +sn: user140 +uid: uid140 +givenname: givenname140 +description: description140 +userPassword: password140 +mail: uid140 +uidnumber: 140 +gidnumber: 140 +homeDirectory: /home/uid140 + +dn: cn=user141,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user141 +sn: user141 +uid: uid141 +givenname: givenname141 +description: description141 +userPassword: password141 +mail: uid141 +uidnumber: 141 +gidnumber: 141 +homeDirectory: /home/uid141 + +dn: cn=user142,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user142 +sn: user142 +uid: uid142 +givenname: givenname142 +description: description142 +userPassword: password142 +mail: uid142 +uidnumber: 142 +gidnumber: 142 +homeDirectory: /home/uid142 + +dn: cn=user143,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user143 +sn: user143 +uid: uid143 +givenname: givenname143 +description: description143 +userPassword: password143 +mail: uid143 +uidnumber: 143 +gidnumber: 143 +homeDirectory: /home/uid143 + +dn: cn=user144,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user144 +sn: user144 +uid: uid144 +givenname: givenname144 +description: description144 +userPassword: password144 +mail: uid144 +uidnumber: 144 +gidnumber: 144 +homeDirectory: /home/uid144 + +dn: cn=user145,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user145 +sn: user145 +uid: uid145 +givenname: givenname145 +description: description145 +userPassword: password145 +mail: uid145 +uidnumber: 145 +gidnumber: 145 +homeDirectory: /home/uid145 + +dn: cn=user146,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user146 +sn: user146 +uid: uid146 +givenname: givenname146 +description: description146 +userPassword: password146 +mail: uid146 +uidnumber: 146 +gidnumber: 146 +homeDirectory: /home/uid146 + +dn: cn=user147,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user147 +sn: user147 +uid: uid147 +givenname: givenname147 +description: description147 +userPassword: password147 +mail: uid147 +uidnumber: 147 +gidnumber: 147 +homeDirectory: /home/uid147 + +dn: cn=user148,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user148 +sn: user148 +uid: uid148 +givenname: givenname148 +description: description148 +userPassword: password148 +mail: uid148 +uidnumber: 148 +gidnumber: 148 +homeDirectory: /home/uid148 + +dn: cn=user149,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user149 +sn: user149 +uid: uid149 +givenname: givenname149 +description: description149 +userPassword: password149 +mail: uid149 +uidnumber: 149 +gidnumber: 149 +homeDirectory: /home/uid149 + +dn: cn=user150,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user150 +sn: user150 +uid: uid150 +givenname: givenname150 +description: description150 +userPassword: password150 +mail: uid150 +uidnumber: 150 +gidnumber: 150 +homeDirectory: /home/uid150 + +dn: cn=user151,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user151 +sn: user151 +uid: uid151 +givenname: givenname151 +description: description151 +userPassword: password151 +mail: uid151 +uidnumber: 151 +gidnumber: 151 +homeDirectory: /home/uid151 + +dn: cn=user152,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user152 +sn: user152 +uid: uid152 +givenname: givenname152 +description: description152 +userPassword: password152 +mail: uid152 +uidnumber: 152 +gidnumber: 152 +homeDirectory: /home/uid152 + +dn: cn=user153,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user153 +sn: user153 +uid: uid153 +givenname: givenname153 +description: description153 +userPassword: password153 +mail: uid153 +uidnumber: 153 +gidnumber: 153 +homeDirectory: /home/uid153 + +dn: cn=user154,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user154 +sn: user154 +uid: uid154 +givenname: givenname154 +description: description154 +userPassword: password154 +mail: uid154 +uidnumber: 154 +gidnumber: 154 +homeDirectory: /home/uid154 + +dn: cn=user155,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user155 +sn: user155 +uid: uid155 +givenname: givenname155 +description: description155 +userPassword: password155 +mail: uid155 +uidnumber: 155 +gidnumber: 155 +homeDirectory: /home/uid155 + +dn: cn=user156,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user156 +sn: user156 +uid: uid156 +givenname: givenname156 +description: description156 +userPassword: password156 +mail: uid156 +uidnumber: 156 +gidnumber: 156 +homeDirectory: /home/uid156 + +dn: cn=user157,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user157 +sn: user157 +uid: uid157 +givenname: givenname157 +description: description157 +userPassword: password157 +mail: uid157 +uidnumber: 157 +gidnumber: 157 +homeDirectory: /home/uid157 + +dn: cn=user158,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user158 +sn: user158 +uid: uid158 +givenname: givenname158 +description: description158 +userPassword: password158 +mail: uid158 +uidnumber: 158 +gidnumber: 158 +homeDirectory: /home/uid158 + +dn: cn=user159,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user159 +sn: user159 +uid: uid159 +givenname: givenname159 +description: description159 +userPassword: password159 +mail: uid159 +uidnumber: 159 +gidnumber: 159 +homeDirectory: /home/uid159 + +dn: cn=user160,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user160 +sn: user160 +uid: uid160 +givenname: givenname160 +description: description160 +userPassword: password160 +mail: uid160 +uidnumber: 160 +gidnumber: 160 +homeDirectory: /home/uid160 + +dn: cn=user161,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user161 +sn: user161 +uid: uid161 +givenname: givenname161 +description: description161 +userPassword: password161 +mail: uid161 +uidnumber: 161 +gidnumber: 161 +homeDirectory: /home/uid161 + +dn: cn=user162,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user162 +sn: user162 +uid: uid162 +givenname: givenname162 +description: description162 +userPassword: password162 +mail: uid162 +uidnumber: 162 +gidnumber: 162 +homeDirectory: /home/uid162 + +dn: cn=user163,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user163 +sn: user163 +uid: uid163 +givenname: givenname163 +description: description163 +userPassword: password163 +mail: uid163 +uidnumber: 163 +gidnumber: 163 +homeDirectory: /home/uid163 + +dn: cn=user164,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user164 +sn: user164 +uid: uid164 +givenname: givenname164 +description: description164 +userPassword: password164 +mail: uid164 +uidnumber: 164 +gidnumber: 164 +homeDirectory: /home/uid164 + +dn: cn=user165,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user165 +sn: user165 +uid: uid165 +givenname: givenname165 +description: description165 +userPassword: password165 +mail: uid165 +uidnumber: 165 +gidnumber: 165 +homeDirectory: /home/uid165 + +dn: cn=user166,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user166 +sn: user166 +uid: uid166 +givenname: givenname166 +description: description166 +userPassword: password166 +mail: uid166 +uidnumber: 166 +gidnumber: 166 +homeDirectory: /home/uid166 + +dn: cn=user167,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user167 +sn: user167 +uid: uid167 +givenname: givenname167 +description: description167 +userPassword: password167 +mail: uid167 +uidnumber: 167 +gidnumber: 167 +homeDirectory: /home/uid167 + +dn: cn=user168,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user168 +sn: user168 +uid: uid168 +givenname: givenname168 +description: description168 +userPassword: password168 +mail: uid168 +uidnumber: 168 +gidnumber: 168 +homeDirectory: /home/uid168 + +dn: cn=user169,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user169 +sn: user169 +uid: uid169 +givenname: givenname169 +description: description169 +userPassword: password169 +mail: uid169 +uidnumber: 169 +gidnumber: 169 +homeDirectory: /home/uid169 + +dn: cn=user170,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user170 +sn: user170 +uid: uid170 +givenname: givenname170 +description: description170 +userPassword: password170 +mail: uid170 +uidnumber: 170 +gidnumber: 170 +homeDirectory: /home/uid170 + +dn: cn=user171,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user171 +sn: user171 +uid: uid171 +givenname: givenname171 +description: description171 +userPassword: password171 +mail: uid171 +uidnumber: 171 +gidnumber: 171 +homeDirectory: /home/uid171 + +dn: cn=user172,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user172 +sn: user172 +uid: uid172 +givenname: givenname172 +description: description172 +userPassword: password172 +mail: uid172 +uidnumber: 172 +gidnumber: 172 +homeDirectory: /home/uid172 + +dn: cn=user173,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user173 +sn: user173 +uid: uid173 +givenname: givenname173 +description: description173 +userPassword: password173 +mail: uid173 +uidnumber: 173 +gidnumber: 173 +homeDirectory: /home/uid173 + +dn: cn=user174,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user174 +sn: user174 +uid: uid174 +givenname: givenname174 +description: description174 +userPassword: password174 +mail: uid174 +uidnumber: 174 +gidnumber: 174 +homeDirectory: /home/uid174 + +dn: cn=user175,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user175 +sn: user175 +uid: uid175 +givenname: givenname175 +description: description175 +userPassword: password175 +mail: uid175 +uidnumber: 175 +gidnumber: 175 +homeDirectory: /home/uid175 + +dn: cn=user176,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user176 +sn: user176 +uid: uid176 +givenname: givenname176 +description: description176 +userPassword: password176 +mail: uid176 +uidnumber: 176 +gidnumber: 176 +homeDirectory: /home/uid176 + +dn: cn=user177,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user177 +sn: user177 +uid: uid177 +givenname: givenname177 +description: description177 +userPassword: password177 +mail: uid177 +uidnumber: 177 +gidnumber: 177 +homeDirectory: /home/uid177 + +dn: cn=user178,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user178 +sn: user178 +uid: uid178 +givenname: givenname178 +description: description178 +userPassword: password178 +mail: uid178 +uidnumber: 178 +gidnumber: 178 +homeDirectory: /home/uid178 + +dn: cn=user179,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user179 +sn: user179 +uid: uid179 +givenname: givenname179 +description: description179 +userPassword: password179 +mail: uid179 +uidnumber: 179 +gidnumber: 179 +homeDirectory: /home/uid179 + +dn: cn=user180,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user180 +sn: user180 +uid: uid180 +givenname: givenname180 +description: description180 +userPassword: password180 +mail: uid180 +uidnumber: 180 +gidnumber: 180 +homeDirectory: /home/uid180 + +dn: cn=user181,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user181 +sn: user181 +uid: uid181 +givenname: givenname181 +description: description181 +userPassword: password181 +mail: uid181 +uidnumber: 181 +gidnumber: 181 +homeDirectory: /home/uid181 + +dn: cn=user182,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user182 +sn: user182 +uid: uid182 +givenname: givenname182 +description: description182 +userPassword: password182 +mail: uid182 +uidnumber: 182 +gidnumber: 182 +homeDirectory: /home/uid182 + +dn: cn=user183,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user183 +sn: user183 +uid: uid183 +givenname: givenname183 +description: description183 +userPassword: password183 +mail: uid183 +uidnumber: 183 +gidnumber: 183 +homeDirectory: /home/uid183 + +dn: cn=user184,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user184 +sn: user184 +uid: uid184 +givenname: givenname184 +description: description184 +userPassword: password184 +mail: uid184 +uidnumber: 184 +gidnumber: 184 +homeDirectory: /home/uid184 + +dn: cn=user185,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user185 +sn: user185 +uid: uid185 +givenname: givenname185 +description: description185 +userPassword: password185 +mail: uid185 +uidnumber: 185 +gidnumber: 185 +homeDirectory: /home/uid185 + +dn: cn=user186,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user186 +sn: user186 +uid: uid186 +givenname: givenname186 +description: description186 +userPassword: password186 +mail: uid186 +uidnumber: 186 +gidnumber: 186 +homeDirectory: /home/uid186 + +dn: cn=user187,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user187 +sn: user187 +uid: uid187 +givenname: givenname187 +description: description187 +userPassword: password187 +mail: uid187 +uidnumber: 187 +gidnumber: 187 +homeDirectory: /home/uid187 + +dn: cn=user188,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user188 +sn: user188 +uid: uid188 +givenname: givenname188 +description: description188 +userPassword: password188 +mail: uid188 +uidnumber: 188 +gidnumber: 188 +homeDirectory: /home/uid188 + +dn: cn=user189,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user189 +sn: user189 +uid: uid189 +givenname: givenname189 +description: description189 +userPassword: password189 +mail: uid189 +uidnumber: 189 +gidnumber: 189 +homeDirectory: /home/uid189 + +dn: cn=user190,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user190 +sn: user190 +uid: uid190 +givenname: givenname190 +description: description190 +userPassword: password190 +mail: uid190 +uidnumber: 190 +gidnumber: 190 +homeDirectory: /home/uid190 + +dn: cn=user191,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user191 +sn: user191 +uid: uid191 +givenname: givenname191 +description: description191 +userPassword: password191 +mail: uid191 +uidnumber: 191 +gidnumber: 191 +homeDirectory: /home/uid191 + +dn: cn=user192,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user192 +sn: user192 +uid: uid192 +givenname: givenname192 +description: description192 +userPassword: password192 +mail: uid192 +uidnumber: 192 +gidnumber: 192 +homeDirectory: /home/uid192 + +dn: cn=user193,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user193 +sn: user193 +uid: uid193 +givenname: givenname193 +description: description193 +userPassword: password193 +mail: uid193 +uidnumber: 193 +gidnumber: 193 +homeDirectory: /home/uid193 + +dn: cn=user194,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user194 +sn: user194 +uid: uid194 +givenname: givenname194 +description: description194 +userPassword: password194 +mail: uid194 +uidnumber: 194 +gidnumber: 194 +homeDirectory: /home/uid194 + +dn: cn=user195,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user195 +sn: user195 +uid: uid195 +givenname: givenname195 +description: description195 +userPassword: password195 +mail: uid195 +uidnumber: 195 +gidnumber: 195 +homeDirectory: /home/uid195 + +dn: cn=user196,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user196 +sn: user196 +uid: uid196 +givenname: givenname196 +description: description196 +userPassword: password196 +mail: uid196 +uidnumber: 196 +gidnumber: 196 +homeDirectory: /home/uid196 + +dn: cn=user197,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user197 +sn: user197 +uid: uid197 +givenname: givenname197 +description: description197 +userPassword: password197 +mail: uid197 +uidnumber: 197 +gidnumber: 197 +homeDirectory: /home/uid197 + +dn: cn=user198,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user198 +sn: user198 +uid: uid198 +givenname: givenname198 +description: description198 +userPassword: password198 +mail: uid198 +uidnumber: 198 +gidnumber: 198 +homeDirectory: /home/uid198 + +dn: cn=user199,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user199 +sn: user199 +uid: uid199 +givenname: givenname199 +description: description199 +userPassword: password199 +mail: uid199 +uidnumber: 199 +gidnumber: 199 +homeDirectory: /home/uid199 + +dn: cn=user200,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user200 +sn: user200 +uid: uid200 +givenname: givenname200 +description: description200 +userPassword: password200 +mail: uid200 +uidnumber: 200 +gidnumber: 200 +homeDirectory: /home/uid200 + +dn: cn=user201,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user201 +sn: user201 +uid: uid201 +givenname: givenname201 +description: description201 +userPassword: password201 +mail: uid201 +uidnumber: 201 +gidnumber: 201 +homeDirectory: /home/uid201 + +dn: cn=user202,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user202 +sn: user202 +uid: uid202 +givenname: givenname202 +description: description202 +userPassword: password202 +mail: uid202 +uidnumber: 202 +gidnumber: 202 +homeDirectory: /home/uid202 + +dn: cn=user203,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user203 +sn: user203 +uid: uid203 +givenname: givenname203 +description: description203 +userPassword: password203 +mail: uid203 +uidnumber: 203 +gidnumber: 203 +homeDirectory: /home/uid203 + +dn: cn=user204,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user204 +sn: user204 +uid: uid204 +givenname: givenname204 +description: description204 +userPassword: password204 +mail: uid204 +uidnumber: 204 +gidnumber: 204 +homeDirectory: /home/uid204 + +dn: cn=user205,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user205 +sn: user205 +uid: uid205 +givenname: givenname205 +description: description205 +userPassword: password205 +mail: uid205 +uidnumber: 205 +gidnumber: 205 +homeDirectory: /home/uid205 + +dn: cn=user206,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user206 +sn: user206 +uid: uid206 +givenname: givenname206 +description: description206 +userPassword: password206 +mail: uid206 +uidnumber: 206 +gidnumber: 206 +homeDirectory: /home/uid206 + +dn: cn=user207,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user207 +sn: user207 +uid: uid207 +givenname: givenname207 +description: description207 +userPassword: password207 +mail: uid207 +uidnumber: 207 +gidnumber: 207 +homeDirectory: /home/uid207 + +dn: cn=user208,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user208 +sn: user208 +uid: uid208 +givenname: givenname208 +description: description208 +userPassword: password208 +mail: uid208 +uidnumber: 208 +gidnumber: 208 +homeDirectory: /home/uid208 + +dn: cn=user209,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user209 +sn: user209 +uid: uid209 +givenname: givenname209 +description: description209 +userPassword: password209 +mail: uid209 +uidnumber: 209 +gidnumber: 209 +homeDirectory: /home/uid209 + +dn: cn=user210,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user210 +sn: user210 +uid: uid210 +givenname: givenname210 +description: description210 +userPassword: password210 +mail: uid210 +uidnumber: 210 +gidnumber: 210 +homeDirectory: /home/uid210 + +dn: cn=user211,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user211 +sn: user211 +uid: uid211 +givenname: givenname211 +description: description211 +userPassword: password211 +mail: uid211 +uidnumber: 211 +gidnumber: 211 +homeDirectory: /home/uid211 + +dn: cn=user212,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user212 +sn: user212 +uid: uid212 +givenname: givenname212 +description: description212 +userPassword: password212 +mail: uid212 +uidnumber: 212 +gidnumber: 212 +homeDirectory: /home/uid212 + +dn: cn=user213,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user213 +sn: user213 +uid: uid213 +givenname: givenname213 +description: description213 +userPassword: password213 +mail: uid213 +uidnumber: 213 +gidnumber: 213 +homeDirectory: /home/uid213 + +dn: cn=user214,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user214 +sn: user214 +uid: uid214 +givenname: givenname214 +description: description214 +userPassword: password214 +mail: uid214 +uidnumber: 214 +gidnumber: 214 +homeDirectory: /home/uid214 + +dn: cn=user215,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user215 +sn: user215 +uid: uid215 +givenname: givenname215 +description: description215 +userPassword: password215 +mail: uid215 +uidnumber: 215 +gidnumber: 215 +homeDirectory: /home/uid215 + +dn: cn=user216,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user216 +sn: user216 +uid: uid216 +givenname: givenname216 +description: description216 +userPassword: password216 +mail: uid216 +uidnumber: 216 +gidnumber: 216 +homeDirectory: /home/uid216 + +dn: cn=user217,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user217 +sn: user217 +uid: uid217 +givenname: givenname217 +description: description217 +userPassword: password217 +mail: uid217 +uidnumber: 217 +gidnumber: 217 +homeDirectory: /home/uid217 + +dn: cn=user218,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user218 +sn: user218 +uid: uid218 +givenname: givenname218 +description: description218 +userPassword: password218 +mail: uid218 +uidnumber: 218 +gidnumber: 218 +homeDirectory: /home/uid218 + +dn: cn=user219,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user219 +sn: user219 +uid: uid219 +givenname: givenname219 +description: description219 +userPassword: password219 +mail: uid219 +uidnumber: 219 +gidnumber: 219 +homeDirectory: /home/uid219 + +dn: cn=user220,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user220 +sn: user220 +uid: uid220 +givenname: givenname220 +description: description220 +userPassword: password220 +mail: uid220 +uidnumber: 220 +gidnumber: 220 +homeDirectory: /home/uid220 + +dn: cn=user221,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user221 +sn: user221 +uid: uid221 +givenname: givenname221 +description: description221 +userPassword: password221 +mail: uid221 +uidnumber: 221 +gidnumber: 221 +homeDirectory: /home/uid221 + +dn: cn=user222,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user222 +sn: user222 +uid: uid222 +givenname: givenname222 +description: description222 +userPassword: password222 +mail: uid222 +uidnumber: 222 +gidnumber: 222 +homeDirectory: /home/uid222 + +dn: cn=user223,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user223 +sn: user223 +uid: uid223 +givenname: givenname223 +description: description223 +userPassword: password223 +mail: uid223 +uidnumber: 223 +gidnumber: 223 +homeDirectory: /home/uid223 + +dn: cn=user224,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user224 +sn: user224 +uid: uid224 +givenname: givenname224 +description: description224 +userPassword: password224 +mail: uid224 +uidnumber: 224 +gidnumber: 224 +homeDirectory: /home/uid224 + +dn: cn=user225,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user225 +sn: user225 +uid: uid225 +givenname: givenname225 +description: description225 +userPassword: password225 +mail: uid225 +uidnumber: 225 +gidnumber: 225 +homeDirectory: /home/uid225 + +dn: cn=user226,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user226 +sn: user226 +uid: uid226 +givenname: givenname226 +description: description226 +userPassword: password226 +mail: uid226 +uidnumber: 226 +gidnumber: 226 +homeDirectory: /home/uid226 + +dn: cn=user227,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user227 +sn: user227 +uid: uid227 +givenname: givenname227 +description: description227 +userPassword: password227 +mail: uid227 +uidnumber: 227 +gidnumber: 227 +homeDirectory: /home/uid227 + +dn: cn=user228,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user228 +sn: user228 +uid: uid228 +givenname: givenname228 +description: description228 +userPassword: password228 +mail: uid228 +uidnumber: 228 +gidnumber: 228 +homeDirectory: /home/uid228 + +dn: cn=user229,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user229 +sn: user229 +uid: uid229 +givenname: givenname229 +description: description229 +userPassword: password229 +mail: uid229 +uidnumber: 229 +gidnumber: 229 +homeDirectory: /home/uid229 + +dn: cn=user230,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user230 +sn: user230 +uid: uid230 +givenname: givenname230 +description: description230 +userPassword: password230 +mail: uid230 +uidnumber: 230 +gidnumber: 230 +homeDirectory: /home/uid230 + +dn: cn=user231,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user231 +sn: user231 +uid: uid231 +givenname: givenname231 +description: description231 +userPassword: password231 +mail: uid231 +uidnumber: 231 +gidnumber: 231 +homeDirectory: /home/uid231 + +dn: cn=user232,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user232 +sn: user232 +uid: uid232 +givenname: givenname232 +description: description232 +userPassword: password232 +mail: uid232 +uidnumber: 232 +gidnumber: 232 +homeDirectory: /home/uid232 + +dn: cn=user233,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user233 +sn: user233 +uid: uid233 +givenname: givenname233 +description: description233 +userPassword: password233 +mail: uid233 +uidnumber: 233 +gidnumber: 233 +homeDirectory: /home/uid233 + +dn: cn=user234,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user234 +sn: user234 +uid: uid234 +givenname: givenname234 +description: description234 +userPassword: password234 +mail: uid234 +uidnumber: 234 +gidnumber: 234 +homeDirectory: /home/uid234 + +dn: cn=user235,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user235 +sn: user235 +uid: uid235 +givenname: givenname235 +description: description235 +userPassword: password235 +mail: uid235 +uidnumber: 235 +gidnumber: 235 +homeDirectory: /home/uid235 + +dn: cn=user236,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user236 +sn: user236 +uid: uid236 +givenname: givenname236 +description: description236 +userPassword: password236 +mail: uid236 +uidnumber: 236 +gidnumber: 236 +homeDirectory: /home/uid236 + +dn: cn=user237,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user237 +sn: user237 +uid: uid237 +givenname: givenname237 +description: description237 +userPassword: password237 +mail: uid237 +uidnumber: 237 +gidnumber: 237 +homeDirectory: /home/uid237 + +dn: cn=user238,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user238 +sn: user238 +uid: uid238 +givenname: givenname238 +description: description238 +userPassword: password238 +mail: uid238 +uidnumber: 238 +gidnumber: 238 +homeDirectory: /home/uid238 + +dn: cn=user239,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user239 +sn: user239 +uid: uid239 +givenname: givenname239 +description: description239 +userPassword: password239 +mail: uid239 +uidnumber: 239 +gidnumber: 239 +homeDirectory: /home/uid239 + +dn: cn=user240,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user240 +sn: user240 +uid: uid240 +givenname: givenname240 +description: description240 +userPassword: password240 +mail: uid240 +uidnumber: 240 +gidnumber: 240 +homeDirectory: /home/uid240 + +dn: cn=user241,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user241 +sn: user241 +uid: uid241 +givenname: givenname241 +description: description241 +userPassword: password241 +mail: uid241 +uidnumber: 241 +gidnumber: 241 +homeDirectory: /home/uid241 + +dn: cn=user242,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user242 +sn: user242 +uid: uid242 +givenname: givenname242 +description: description242 +userPassword: password242 +mail: uid242 +uidnumber: 242 +gidnumber: 242 +homeDirectory: /home/uid242 + +dn: cn=user243,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user243 +sn: user243 +uid: uid243 +givenname: givenname243 +description: description243 +userPassword: password243 +mail: uid243 +uidnumber: 243 +gidnumber: 243 +homeDirectory: /home/uid243 + +dn: cn=user244,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user244 +sn: user244 +uid: uid244 +givenname: givenname244 +description: description244 +userPassword: password244 +mail: uid244 +uidnumber: 244 +gidnumber: 244 +homeDirectory: /home/uid244 + +dn: cn=user245,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user245 +sn: user245 +uid: uid245 +givenname: givenname245 +description: description245 +userPassword: password245 +mail: uid245 +uidnumber: 245 +gidnumber: 245 +homeDirectory: /home/uid245 + +dn: cn=user246,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user246 +sn: user246 +uid: uid246 +givenname: givenname246 +description: description246 +userPassword: password246 +mail: uid246 +uidnumber: 246 +gidnumber: 246 +homeDirectory: /home/uid246 + +dn: cn=user247,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user247 +sn: user247 +uid: uid247 +givenname: givenname247 +description: description247 +userPassword: password247 +mail: uid247 +uidnumber: 247 +gidnumber: 247 +homeDirectory: /home/uid247 + +dn: cn=user248,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user248 +sn: user248 +uid: uid248 +givenname: givenname248 +description: description248 +userPassword: password248 +mail: uid248 +uidnumber: 248 +gidnumber: 248 +homeDirectory: /home/uid248 + +dn: cn=user249,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user249 +sn: user249 +uid: uid249 +givenname: givenname249 +description: description249 +userPassword: password249 +mail: uid249 +uidnumber: 249 +gidnumber: 249 +homeDirectory: /home/uid249 + +dn: cn=user250,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user250 +sn: user250 +uid: uid250 +givenname: givenname250 +description: description250 +userPassword: password250 +mail: uid250 +uidnumber: 250 +gidnumber: 250 +homeDirectory: /home/uid250 + +dn: cn=user251,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user251 +sn: user251 +uid: uid251 +givenname: givenname251 +description: description251 +userPassword: password251 +mail: uid251 +uidnumber: 251 +gidnumber: 251 +homeDirectory: /home/uid251 + +dn: cn=user252,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user252 +sn: user252 +uid: uid252 +givenname: givenname252 +description: description252 +userPassword: password252 +mail: uid252 +uidnumber: 252 +gidnumber: 252 +homeDirectory: /home/uid252 + +dn: cn=user253,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user253 +sn: user253 +uid: uid253 +givenname: givenname253 +description: description253 +userPassword: password253 +mail: uid253 +uidnumber: 253 +gidnumber: 253 +homeDirectory: /home/uid253 + +dn: cn=user254,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user254 +sn: user254 +uid: uid254 +givenname: givenname254 +description: description254 +userPassword: password254 +mail: uid254 +uidnumber: 254 +gidnumber: 254 +homeDirectory: /home/uid254 + +dn: cn=user255,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user255 +sn: user255 +uid: uid255 +givenname: givenname255 +description: description255 +userPassword: password255 +mail: uid255 +uidnumber: 255 +gidnumber: 255 +homeDirectory: /home/uid255 + +dn: cn=user256,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user256 +sn: user256 +uid: uid256 +givenname: givenname256 +description: description256 +userPassword: password256 +mail: uid256 +uidnumber: 256 +gidnumber: 256 +homeDirectory: /home/uid256 + +dn: cn=user257,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user257 +sn: user257 +uid: uid257 +givenname: givenname257 +description: description257 +userPassword: password257 +mail: uid257 +uidnumber: 257 +gidnumber: 257 +homeDirectory: /home/uid257 + +dn: cn=user258,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user258 +sn: user258 +uid: uid258 +givenname: givenname258 +description: description258 +userPassword: password258 +mail: uid258 +uidnumber: 258 +gidnumber: 258 +homeDirectory: /home/uid258 + +dn: cn=user259,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user259 +sn: user259 +uid: uid259 +givenname: givenname259 +description: description259 +userPassword: password259 +mail: uid259 +uidnumber: 259 +gidnumber: 259 +homeDirectory: /home/uid259 + +dn: cn=user260,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user260 +sn: user260 +uid: uid260 +givenname: givenname260 +description: description260 +userPassword: password260 +mail: uid260 +uidnumber: 260 +gidnumber: 260 +homeDirectory: /home/uid260 + +dn: cn=user261,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user261 +sn: user261 +uid: uid261 +givenname: givenname261 +description: description261 +userPassword: password261 +mail: uid261 +uidnumber: 261 +gidnumber: 261 +homeDirectory: /home/uid261 + +dn: cn=user262,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user262 +sn: user262 +uid: uid262 +givenname: givenname262 +description: description262 +userPassword: password262 +mail: uid262 +uidnumber: 262 +gidnumber: 262 +homeDirectory: /home/uid262 + +dn: cn=user263,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user263 +sn: user263 +uid: uid263 +givenname: givenname263 +description: description263 +userPassword: password263 +mail: uid263 +uidnumber: 263 +gidnumber: 263 +homeDirectory: /home/uid263 + +dn: cn=user264,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user264 +sn: user264 +uid: uid264 +givenname: givenname264 +description: description264 +userPassword: password264 +mail: uid264 +uidnumber: 264 +gidnumber: 264 +homeDirectory: /home/uid264 + +dn: cn=user265,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user265 +sn: user265 +uid: uid265 +givenname: givenname265 +description: description265 +userPassword: password265 +mail: uid265 +uidnumber: 265 +gidnumber: 265 +homeDirectory: /home/uid265 + +dn: cn=user266,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user266 +sn: user266 +uid: uid266 +givenname: givenname266 +description: description266 +userPassword: password266 +mail: uid266 +uidnumber: 266 +gidnumber: 266 +homeDirectory: /home/uid266 + +dn: cn=user267,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user267 +sn: user267 +uid: uid267 +givenname: givenname267 +description: description267 +userPassword: password267 +mail: uid267 +uidnumber: 267 +gidnumber: 267 +homeDirectory: /home/uid267 + +dn: cn=user268,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user268 +sn: user268 +uid: uid268 +givenname: givenname268 +description: description268 +userPassword: password268 +mail: uid268 +uidnumber: 268 +gidnumber: 268 +homeDirectory: /home/uid268 + +dn: cn=user269,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user269 +sn: user269 +uid: uid269 +givenname: givenname269 +description: description269 +userPassword: password269 +mail: uid269 +uidnumber: 269 +gidnumber: 269 +homeDirectory: /home/uid269 + +dn: cn=user270,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user270 +sn: user270 +uid: uid270 +givenname: givenname270 +description: description270 +userPassword: password270 +mail: uid270 +uidnumber: 270 +gidnumber: 270 +homeDirectory: /home/uid270 + +dn: cn=user271,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user271 +sn: user271 +uid: uid271 +givenname: givenname271 +description: description271 +userPassword: password271 +mail: uid271 +uidnumber: 271 +gidnumber: 271 +homeDirectory: /home/uid271 + +dn: cn=user272,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user272 +sn: user272 +uid: uid272 +givenname: givenname272 +description: description272 +userPassword: password272 +mail: uid272 +uidnumber: 272 +gidnumber: 272 +homeDirectory: /home/uid272 + +dn: cn=user273,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user273 +sn: user273 +uid: uid273 +givenname: givenname273 +description: description273 +userPassword: password273 +mail: uid273 +uidnumber: 273 +gidnumber: 273 +homeDirectory: /home/uid273 + +dn: cn=user274,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user274 +sn: user274 +uid: uid274 +givenname: givenname274 +description: description274 +userPassword: password274 +mail: uid274 +uidnumber: 274 +gidnumber: 274 +homeDirectory: /home/uid274 + +dn: cn=user275,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user275 +sn: user275 +uid: uid275 +givenname: givenname275 +description: description275 +userPassword: password275 +mail: uid275 +uidnumber: 275 +gidnumber: 275 +homeDirectory: /home/uid275 + +dn: cn=user276,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user276 +sn: user276 +uid: uid276 +givenname: givenname276 +description: description276 +userPassword: password276 +mail: uid276 +uidnumber: 276 +gidnumber: 276 +homeDirectory: /home/uid276 + +dn: cn=user277,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user277 +sn: user277 +uid: uid277 +givenname: givenname277 +description: description277 +userPassword: password277 +mail: uid277 +uidnumber: 277 +gidnumber: 277 +homeDirectory: /home/uid277 + +dn: cn=user278,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user278 +sn: user278 +uid: uid278 +givenname: givenname278 +description: description278 +userPassword: password278 +mail: uid278 +uidnumber: 278 +gidnumber: 278 +homeDirectory: /home/uid278 + +dn: cn=user279,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user279 +sn: user279 +uid: uid279 +givenname: givenname279 +description: description279 +userPassword: password279 +mail: uid279 +uidnumber: 279 +gidnumber: 279 +homeDirectory: /home/uid279 + +dn: cn=user280,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user280 +sn: user280 +uid: uid280 +givenname: givenname280 +description: description280 +userPassword: password280 +mail: uid280 +uidnumber: 280 +gidnumber: 280 +homeDirectory: /home/uid280 + +dn: cn=user281,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user281 +sn: user281 +uid: uid281 +givenname: givenname281 +description: description281 +userPassword: password281 +mail: uid281 +uidnumber: 281 +gidnumber: 281 +homeDirectory: /home/uid281 + +dn: cn=user282,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user282 +sn: user282 +uid: uid282 +givenname: givenname282 +description: description282 +userPassword: password282 +mail: uid282 +uidnumber: 282 +gidnumber: 282 +homeDirectory: /home/uid282 + +dn: cn=user283,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user283 +sn: user283 +uid: uid283 +givenname: givenname283 +description: description283 +userPassword: password283 +mail: uid283 +uidnumber: 283 +gidnumber: 283 +homeDirectory: /home/uid283 + +dn: cn=user284,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user284 +sn: user284 +uid: uid284 +givenname: givenname284 +description: description284 +userPassword: password284 +mail: uid284 +uidnumber: 284 +gidnumber: 284 +homeDirectory: /home/uid284 + +dn: cn=user285,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user285 +sn: user285 +uid: uid285 +givenname: givenname285 +description: description285 +userPassword: password285 +mail: uid285 +uidnumber: 285 +gidnumber: 285 +homeDirectory: /home/uid285 + +dn: cn=user286,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user286 +sn: user286 +uid: uid286 +givenname: givenname286 +description: description286 +userPassword: password286 +mail: uid286 +uidnumber: 286 +gidnumber: 286 +homeDirectory: /home/uid286 + +dn: cn=user287,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user287 +sn: user287 +uid: uid287 +givenname: givenname287 +description: description287 +userPassword: password287 +mail: uid287 +uidnumber: 287 +gidnumber: 287 +homeDirectory: /home/uid287 + +dn: cn=user288,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user288 +sn: user288 +uid: uid288 +givenname: givenname288 +description: description288 +userPassword: password288 +mail: uid288 +uidnumber: 288 +gidnumber: 288 +homeDirectory: /home/uid288 + +dn: cn=user289,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user289 +sn: user289 +uid: uid289 +givenname: givenname289 +description: description289 +userPassword: password289 +mail: uid289 +uidnumber: 289 +gidnumber: 289 +homeDirectory: /home/uid289 + +dn: cn=user290,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user290 +sn: user290 +uid: uid290 +givenname: givenname290 +description: description290 +userPassword: password290 +mail: uid290 +uidnumber: 290 +gidnumber: 290 +homeDirectory: /home/uid290 + +dn: cn=user291,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user291 +sn: user291 +uid: uid291 +givenname: givenname291 +description: description291 +userPassword: password291 +mail: uid291 +uidnumber: 291 +gidnumber: 291 +homeDirectory: /home/uid291 + +dn: cn=user292,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user292 +sn: user292 +uid: uid292 +givenname: givenname292 +description: description292 +userPassword: password292 +mail: uid292 +uidnumber: 292 +gidnumber: 292 +homeDirectory: /home/uid292 + +dn: cn=user293,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user293 +sn: user293 +uid: uid293 +givenname: givenname293 +description: description293 +userPassword: password293 +mail: uid293 +uidnumber: 293 +gidnumber: 293 +homeDirectory: /home/uid293 + +dn: cn=user294,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user294 +sn: user294 +uid: uid294 +givenname: givenname294 +description: description294 +userPassword: password294 +mail: uid294 +uidnumber: 294 +gidnumber: 294 +homeDirectory: /home/uid294 + +dn: cn=user295,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user295 +sn: user295 +uid: uid295 +givenname: givenname295 +description: description295 +userPassword: password295 +mail: uid295 +uidnumber: 295 +gidnumber: 295 +homeDirectory: /home/uid295 + +dn: cn=user296,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user296 +sn: user296 +uid: uid296 +givenname: givenname296 +description: description296 +userPassword: password296 +mail: uid296 +uidnumber: 296 +gidnumber: 296 +homeDirectory: /home/uid296 + +dn: cn=user297,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user297 +sn: user297 +uid: uid297 +givenname: givenname297 +description: description297 +userPassword: password297 +mail: uid297 +uidnumber: 297 +gidnumber: 297 +homeDirectory: /home/uid297 + +dn: cn=user298,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user298 +sn: user298 +uid: uid298 +givenname: givenname298 +description: description298 +userPassword: password298 +mail: uid298 +uidnumber: 298 +gidnumber: 298 +homeDirectory: /home/uid298 + +dn: cn=user299,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user299 +sn: user299 +uid: uid299 +givenname: givenname299 +description: description299 +userPassword: password299 +mail: uid299 +uidnumber: 299 +gidnumber: 299 +homeDirectory: /home/uid299 + +dn: cn=user300,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user300 +sn: user300 +uid: uid300 +givenname: givenname300 +description: description300 +userPassword: password300 +mail: uid300 +uidnumber: 300 +gidnumber: 300 +homeDirectory: /home/uid300 + +dn: cn=user301,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user301 +sn: user301 +uid: uid301 +givenname: givenname301 +description: description301 +userPassword: password301 +mail: uid301 +uidnumber: 301 +gidnumber: 301 +homeDirectory: /home/uid301 + +dn: cn=user302,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user302 +sn: user302 +uid: uid302 +givenname: givenname302 +description: description302 +userPassword: password302 +mail: uid302 +uidnumber: 302 +gidnumber: 302 +homeDirectory: /home/uid302 + +dn: cn=user303,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user303 +sn: user303 +uid: uid303 +givenname: givenname303 +description: description303 +userPassword: password303 +mail: uid303 +uidnumber: 303 +gidnumber: 303 +homeDirectory: /home/uid303 + +dn: cn=user304,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user304 +sn: user304 +uid: uid304 +givenname: givenname304 +description: description304 +userPassword: password304 +mail: uid304 +uidnumber: 304 +gidnumber: 304 +homeDirectory: /home/uid304 + +dn: cn=user305,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user305 +sn: user305 +uid: uid305 +givenname: givenname305 +description: description305 +userPassword: password305 +mail: uid305 +uidnumber: 305 +gidnumber: 305 +homeDirectory: /home/uid305 + +dn: cn=user306,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user306 +sn: user306 +uid: uid306 +givenname: givenname306 +description: description306 +userPassword: password306 +mail: uid306 +uidnumber: 306 +gidnumber: 306 +homeDirectory: /home/uid306 + +dn: cn=user307,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user307 +sn: user307 +uid: uid307 +givenname: givenname307 +description: description307 +userPassword: password307 +mail: uid307 +uidnumber: 307 +gidnumber: 307 +homeDirectory: /home/uid307 + +dn: cn=user308,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user308 +sn: user308 +uid: uid308 +givenname: givenname308 +description: description308 +userPassword: password308 +mail: uid308 +uidnumber: 308 +gidnumber: 308 +homeDirectory: /home/uid308 + +dn: cn=user309,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user309 +sn: user309 +uid: uid309 +givenname: givenname309 +description: description309 +userPassword: password309 +mail: uid309 +uidnumber: 309 +gidnumber: 309 +homeDirectory: /home/uid309 + +dn: cn=user310,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user310 +sn: user310 +uid: uid310 +givenname: givenname310 +description: description310 +userPassword: password310 +mail: uid310 +uidnumber: 310 +gidnumber: 310 +homeDirectory: /home/uid310 + +dn: cn=user311,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user311 +sn: user311 +uid: uid311 +givenname: givenname311 +description: description311 +userPassword: password311 +mail: uid311 +uidnumber: 311 +gidnumber: 311 +homeDirectory: /home/uid311 + +dn: cn=user312,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user312 +sn: user312 +uid: uid312 +givenname: givenname312 +description: description312 +userPassword: password312 +mail: uid312 +uidnumber: 312 +gidnumber: 312 +homeDirectory: /home/uid312 + +dn: cn=user313,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user313 +sn: user313 +uid: uid313 +givenname: givenname313 +description: description313 +userPassword: password313 +mail: uid313 +uidnumber: 313 +gidnumber: 313 +homeDirectory: /home/uid313 + +dn: cn=user314,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user314 +sn: user314 +uid: uid314 +givenname: givenname314 +description: description314 +userPassword: password314 +mail: uid314 +uidnumber: 314 +gidnumber: 314 +homeDirectory: /home/uid314 + +dn: cn=user315,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user315 +sn: user315 +uid: uid315 +givenname: givenname315 +description: description315 +userPassword: password315 +mail: uid315 +uidnumber: 315 +gidnumber: 315 +homeDirectory: /home/uid315 + +dn: cn=user316,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user316 +sn: user316 +uid: uid316 +givenname: givenname316 +description: description316 +userPassword: password316 +mail: uid316 +uidnumber: 316 +gidnumber: 316 +homeDirectory: /home/uid316 + +dn: cn=user317,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user317 +sn: user317 +uid: uid317 +givenname: givenname317 +description: description317 +userPassword: password317 +mail: uid317 +uidnumber: 317 +gidnumber: 317 +homeDirectory: /home/uid317 + +dn: cn=user318,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user318 +sn: user318 +uid: uid318 +givenname: givenname318 +description: description318 +userPassword: password318 +mail: uid318 +uidnumber: 318 +gidnumber: 318 +homeDirectory: /home/uid318 + +dn: cn=user319,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user319 +sn: user319 +uid: uid319 +givenname: givenname319 +description: description319 +userPassword: password319 +mail: uid319 +uidnumber: 319 +gidnumber: 319 +homeDirectory: /home/uid319 + +dn: cn=user320,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user320 +sn: user320 +uid: uid320 +givenname: givenname320 +description: description320 +userPassword: password320 +mail: uid320 +uidnumber: 320 +gidnumber: 320 +homeDirectory: /home/uid320 + +dn: cn=user321,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user321 +sn: user321 +uid: uid321 +givenname: givenname321 +description: description321 +userPassword: password321 +mail: uid321 +uidnumber: 321 +gidnumber: 321 +homeDirectory: /home/uid321 + +dn: cn=user322,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user322 +sn: user322 +uid: uid322 +givenname: givenname322 +description: description322 +userPassword: password322 +mail: uid322 +uidnumber: 322 +gidnumber: 322 +homeDirectory: /home/uid322 + +dn: cn=user323,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user323 +sn: user323 +uid: uid323 +givenname: givenname323 +description: description323 +userPassword: password323 +mail: uid323 +uidnumber: 323 +gidnumber: 323 +homeDirectory: /home/uid323 + +dn: cn=user324,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user324 +sn: user324 +uid: uid324 +givenname: givenname324 +description: description324 +userPassword: password324 +mail: uid324 +uidnumber: 324 +gidnumber: 324 +homeDirectory: /home/uid324 + +dn: cn=user325,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user325 +sn: user325 +uid: uid325 +givenname: givenname325 +description: description325 +userPassword: password325 +mail: uid325 +uidnumber: 325 +gidnumber: 325 +homeDirectory: /home/uid325 + +dn: cn=user326,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user326 +sn: user326 +uid: uid326 +givenname: givenname326 +description: description326 +userPassword: password326 +mail: uid326 +uidnumber: 326 +gidnumber: 326 +homeDirectory: /home/uid326 + +dn: cn=user327,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user327 +sn: user327 +uid: uid327 +givenname: givenname327 +description: description327 +userPassword: password327 +mail: uid327 +uidnumber: 327 +gidnumber: 327 +homeDirectory: /home/uid327 + +dn: cn=user328,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user328 +sn: user328 +uid: uid328 +givenname: givenname328 +description: description328 +userPassword: password328 +mail: uid328 +uidnumber: 328 +gidnumber: 328 +homeDirectory: /home/uid328 + +dn: cn=user329,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user329 +sn: user329 +uid: uid329 +givenname: givenname329 +description: description329 +userPassword: password329 +mail: uid329 +uidnumber: 329 +gidnumber: 329 +homeDirectory: /home/uid329 + +dn: cn=user330,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user330 +sn: user330 +uid: uid330 +givenname: givenname330 +description: description330 +userPassword: password330 +mail: uid330 +uidnumber: 330 +gidnumber: 330 +homeDirectory: /home/uid330 + +dn: cn=user331,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user331 +sn: user331 +uid: uid331 +givenname: givenname331 +description: description331 +userPassword: password331 +mail: uid331 +uidnumber: 331 +gidnumber: 331 +homeDirectory: /home/uid331 + +dn: cn=user332,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user332 +sn: user332 +uid: uid332 +givenname: givenname332 +description: description332 +userPassword: password332 +mail: uid332 +uidnumber: 332 +gidnumber: 332 +homeDirectory: /home/uid332 + +dn: cn=user333,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user333 +sn: user333 +uid: uid333 +givenname: givenname333 +description: description333 +userPassword: password333 +mail: uid333 +uidnumber: 333 +gidnumber: 333 +homeDirectory: /home/uid333 + +dn: cn=user334,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user334 +sn: user334 +uid: uid334 +givenname: givenname334 +description: description334 +userPassword: password334 +mail: uid334 +uidnumber: 334 +gidnumber: 334 +homeDirectory: /home/uid334 + +dn: cn=user335,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user335 +sn: user335 +uid: uid335 +givenname: givenname335 +description: description335 +userPassword: password335 +mail: uid335 +uidnumber: 335 +gidnumber: 335 +homeDirectory: /home/uid335 + +dn: cn=user336,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user336 +sn: user336 +uid: uid336 +givenname: givenname336 +description: description336 +userPassword: password336 +mail: uid336 +uidnumber: 336 +gidnumber: 336 +homeDirectory: /home/uid336 + +dn: cn=user337,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user337 +sn: user337 +uid: uid337 +givenname: givenname337 +description: description337 +userPassword: password337 +mail: uid337 +uidnumber: 337 +gidnumber: 337 +homeDirectory: /home/uid337 + +dn: cn=user338,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user338 +sn: user338 +uid: uid338 +givenname: givenname338 +description: description338 +userPassword: password338 +mail: uid338 +uidnumber: 338 +gidnumber: 338 +homeDirectory: /home/uid338 + +dn: cn=user339,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user339 +sn: user339 +uid: uid339 +givenname: givenname339 +description: description339 +userPassword: password339 +mail: uid339 +uidnumber: 339 +gidnumber: 339 +homeDirectory: /home/uid339 + +dn: cn=user340,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user340 +sn: user340 +uid: uid340 +givenname: givenname340 +description: description340 +userPassword: password340 +mail: uid340 +uidnumber: 340 +gidnumber: 340 +homeDirectory: /home/uid340 + +dn: cn=user341,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user341 +sn: user341 +uid: uid341 +givenname: givenname341 +description: description341 +userPassword: password341 +mail: uid341 +uidnumber: 341 +gidnumber: 341 +homeDirectory: /home/uid341 + +dn: cn=user342,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user342 +sn: user342 +uid: uid342 +givenname: givenname342 +description: description342 +userPassword: password342 +mail: uid342 +uidnumber: 342 +gidnumber: 342 +homeDirectory: /home/uid342 + +dn: cn=user343,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user343 +sn: user343 +uid: uid343 +givenname: givenname343 +description: description343 +userPassword: password343 +mail: uid343 +uidnumber: 343 +gidnumber: 343 +homeDirectory: /home/uid343 + +dn: cn=user344,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user344 +sn: user344 +uid: uid344 +givenname: givenname344 +description: description344 +userPassword: password344 +mail: uid344 +uidnumber: 344 +gidnumber: 344 +homeDirectory: /home/uid344 + +dn: cn=user345,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user345 +sn: user345 +uid: uid345 +givenname: givenname345 +description: description345 +userPassword: password345 +mail: uid345 +uidnumber: 345 +gidnumber: 345 +homeDirectory: /home/uid345 + +dn: cn=user346,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user346 +sn: user346 +uid: uid346 +givenname: givenname346 +description: description346 +userPassword: password346 +mail: uid346 +uidnumber: 346 +gidnumber: 346 +homeDirectory: /home/uid346 + +dn: cn=user347,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user347 +sn: user347 +uid: uid347 +givenname: givenname347 +description: description347 +userPassword: password347 +mail: uid347 +uidnumber: 347 +gidnumber: 347 +homeDirectory: /home/uid347 + +dn: cn=user348,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user348 +sn: user348 +uid: uid348 +givenname: givenname348 +description: description348 +userPassword: password348 +mail: uid348 +uidnumber: 348 +gidnumber: 348 +homeDirectory: /home/uid348 + +dn: cn=user349,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user349 +sn: user349 +uid: uid349 +givenname: givenname349 +description: description349 +userPassword: password349 +mail: uid349 +uidnumber: 349 +gidnumber: 349 +homeDirectory: /home/uid349 + +dn: cn=user350,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user350 +sn: user350 +uid: uid350 +givenname: givenname350 +description: description350 +userPassword: password350 +mail: uid350 +uidnumber: 350 +gidnumber: 350 +homeDirectory: /home/uid350 + +dn: cn=user351,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user351 +sn: user351 +uid: uid351 +givenname: givenname351 +description: description351 +userPassword: password351 +mail: uid351 +uidnumber: 351 +gidnumber: 351 +homeDirectory: /home/uid351 + +dn: cn=user352,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user352 +sn: user352 +uid: uid352 +givenname: givenname352 +description: description352 +userPassword: password352 +mail: uid352 +uidnumber: 352 +gidnumber: 352 +homeDirectory: /home/uid352 + +dn: cn=user353,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user353 +sn: user353 +uid: uid353 +givenname: givenname353 +description: description353 +userPassword: password353 +mail: uid353 +uidnumber: 353 +gidnumber: 353 +homeDirectory: /home/uid353 + +dn: cn=user354,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user354 +sn: user354 +uid: uid354 +givenname: givenname354 +description: description354 +userPassword: password354 +mail: uid354 +uidnumber: 354 +gidnumber: 354 +homeDirectory: /home/uid354 + +dn: cn=user355,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user355 +sn: user355 +uid: uid355 +givenname: givenname355 +description: description355 +userPassword: password355 +mail: uid355 +uidnumber: 355 +gidnumber: 355 +homeDirectory: /home/uid355 + +dn: cn=user356,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user356 +sn: user356 +uid: uid356 +givenname: givenname356 +description: description356 +userPassword: password356 +mail: uid356 +uidnumber: 356 +gidnumber: 356 +homeDirectory: /home/uid356 + +dn: cn=user357,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user357 +sn: user357 +uid: uid357 +givenname: givenname357 +description: description357 +userPassword: password357 +mail: uid357 +uidnumber: 357 +gidnumber: 357 +homeDirectory: /home/uid357 + +dn: cn=user358,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user358 +sn: user358 +uid: uid358 +givenname: givenname358 +description: description358 +userPassword: password358 +mail: uid358 +uidnumber: 358 +gidnumber: 358 +homeDirectory: /home/uid358 + +dn: cn=user359,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user359 +sn: user359 +uid: uid359 +givenname: givenname359 +description: description359 +userPassword: password359 +mail: uid359 +uidnumber: 359 +gidnumber: 359 +homeDirectory: /home/uid359 + +dn: cn=user360,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user360 +sn: user360 +uid: uid360 +givenname: givenname360 +description: description360 +userPassword: password360 +mail: uid360 +uidnumber: 360 +gidnumber: 360 +homeDirectory: /home/uid360 + +dn: cn=user361,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user361 +sn: user361 +uid: uid361 +givenname: givenname361 +description: description361 +userPassword: password361 +mail: uid361 +uidnumber: 361 +gidnumber: 361 +homeDirectory: /home/uid361 + +dn: cn=user362,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user362 +sn: user362 +uid: uid362 +givenname: givenname362 +description: description362 +userPassword: password362 +mail: uid362 +uidnumber: 362 +gidnumber: 362 +homeDirectory: /home/uid362 + +dn: cn=user363,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user363 +sn: user363 +uid: uid363 +givenname: givenname363 +description: description363 +userPassword: password363 +mail: uid363 +uidnumber: 363 +gidnumber: 363 +homeDirectory: /home/uid363 + +dn: cn=user364,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user364 +sn: user364 +uid: uid364 +givenname: givenname364 +description: description364 +userPassword: password364 +mail: uid364 +uidnumber: 364 +gidnumber: 364 +homeDirectory: /home/uid364 + +dn: cn=user365,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user365 +sn: user365 +uid: uid365 +givenname: givenname365 +description: description365 +userPassword: password365 +mail: uid365 +uidnumber: 365 +gidnumber: 365 +homeDirectory: /home/uid365 + +dn: cn=user366,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user366 +sn: user366 +uid: uid366 +givenname: givenname366 +description: description366 +userPassword: password366 +mail: uid366 +uidnumber: 366 +gidnumber: 366 +homeDirectory: /home/uid366 + +dn: cn=user367,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user367 +sn: user367 +uid: uid367 +givenname: givenname367 +description: description367 +userPassword: password367 +mail: uid367 +uidnumber: 367 +gidnumber: 367 +homeDirectory: /home/uid367 + +dn: cn=user368,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user368 +sn: user368 +uid: uid368 +givenname: givenname368 +description: description368 +userPassword: password368 +mail: uid368 +uidnumber: 368 +gidnumber: 368 +homeDirectory: /home/uid368 + +dn: cn=user369,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user369 +sn: user369 +uid: uid369 +givenname: givenname369 +description: description369 +userPassword: password369 +mail: uid369 +uidnumber: 369 +gidnumber: 369 +homeDirectory: /home/uid369 + +dn: cn=user370,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user370 +sn: user370 +uid: uid370 +givenname: givenname370 +description: description370 +userPassword: password370 +mail: uid370 +uidnumber: 370 +gidnumber: 370 +homeDirectory: /home/uid370 + +dn: cn=user371,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user371 +sn: user371 +uid: uid371 +givenname: givenname371 +description: description371 +userPassword: password371 +mail: uid371 +uidnumber: 371 +gidnumber: 371 +homeDirectory: /home/uid371 + +dn: cn=user372,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user372 +sn: user372 +uid: uid372 +givenname: givenname372 +description: description372 +userPassword: password372 +mail: uid372 +uidnumber: 372 +gidnumber: 372 +homeDirectory: /home/uid372 + +dn: cn=user373,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user373 +sn: user373 +uid: uid373 +givenname: givenname373 +description: description373 +userPassword: password373 +mail: uid373 +uidnumber: 373 +gidnumber: 373 +homeDirectory: /home/uid373 + +dn: cn=user374,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user374 +sn: user374 +uid: uid374 +givenname: givenname374 +description: description374 +userPassword: password374 +mail: uid374 +uidnumber: 374 +gidnumber: 374 +homeDirectory: /home/uid374 + +dn: cn=user375,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user375 +sn: user375 +uid: uid375 +givenname: givenname375 +description: description375 +userPassword: password375 +mail: uid375 +uidnumber: 375 +gidnumber: 375 +homeDirectory: /home/uid375 + +dn: cn=user376,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user376 +sn: user376 +uid: uid376 +givenname: givenname376 +description: description376 +userPassword: password376 +mail: uid376 +uidnumber: 376 +gidnumber: 376 +homeDirectory: /home/uid376 + +dn: cn=user377,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user377 +sn: user377 +uid: uid377 +givenname: givenname377 +description: description377 +userPassword: password377 +mail: uid377 +uidnumber: 377 +gidnumber: 377 +homeDirectory: /home/uid377 + +dn: cn=user378,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user378 +sn: user378 +uid: uid378 +givenname: givenname378 +description: description378 +userPassword: password378 +mail: uid378 +uidnumber: 378 +gidnumber: 378 +homeDirectory: /home/uid378 + +dn: cn=user379,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user379 +sn: user379 +uid: uid379 +givenname: givenname379 +description: description379 +userPassword: password379 +mail: uid379 +uidnumber: 379 +gidnumber: 379 +homeDirectory: /home/uid379 + +dn: cn=user380,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user380 +sn: user380 +uid: uid380 +givenname: givenname380 +description: description380 +userPassword: password380 +mail: uid380 +uidnumber: 380 +gidnumber: 380 +homeDirectory: /home/uid380 + +dn: cn=user381,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user381 +sn: user381 +uid: uid381 +givenname: givenname381 +description: description381 +userPassword: password381 +mail: uid381 +uidnumber: 381 +gidnumber: 381 +homeDirectory: /home/uid381 + +dn: cn=user382,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user382 +sn: user382 +uid: uid382 +givenname: givenname382 +description: description382 +userPassword: password382 +mail: uid382 +uidnumber: 382 +gidnumber: 382 +homeDirectory: /home/uid382 + +dn: cn=user383,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user383 +sn: user383 +uid: uid383 +givenname: givenname383 +description: description383 +userPassword: password383 +mail: uid383 +uidnumber: 383 +gidnumber: 383 +homeDirectory: /home/uid383 + +dn: cn=user384,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user384 +sn: user384 +uid: uid384 +givenname: givenname384 +description: description384 +userPassword: password384 +mail: uid384 +uidnumber: 384 +gidnumber: 384 +homeDirectory: /home/uid384 + +dn: cn=user385,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user385 +sn: user385 +uid: uid385 +givenname: givenname385 +description: description385 +userPassword: password385 +mail: uid385 +uidnumber: 385 +gidnumber: 385 +homeDirectory: /home/uid385 + +dn: cn=user386,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user386 +sn: user386 +uid: uid386 +givenname: givenname386 +description: description386 +userPassword: password386 +mail: uid386 +uidnumber: 386 +gidnumber: 386 +homeDirectory: /home/uid386 + +dn: cn=user387,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user387 +sn: user387 +uid: uid387 +givenname: givenname387 +description: description387 +userPassword: password387 +mail: uid387 +uidnumber: 387 +gidnumber: 387 +homeDirectory: /home/uid387 + +dn: cn=user388,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user388 +sn: user388 +uid: uid388 +givenname: givenname388 +description: description388 +userPassword: password388 +mail: uid388 +uidnumber: 388 +gidnumber: 388 +homeDirectory: /home/uid388 + +dn: cn=user389,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user389 +sn: user389 +uid: uid389 +givenname: givenname389 +description: description389 +userPassword: password389 +mail: uid389 +uidnumber: 389 +gidnumber: 389 +homeDirectory: /home/uid389 + +dn: cn=user390,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user390 +sn: user390 +uid: uid390 +givenname: givenname390 +description: description390 +userPassword: password390 +mail: uid390 +uidnumber: 390 +gidnumber: 390 +homeDirectory: /home/uid390 + +dn: cn=user391,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user391 +sn: user391 +uid: uid391 +givenname: givenname391 +description: description391 +userPassword: password391 +mail: uid391 +uidnumber: 391 +gidnumber: 391 +homeDirectory: /home/uid391 + +dn: cn=user392,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user392 +sn: user392 +uid: uid392 +givenname: givenname392 +description: description392 +userPassword: password392 +mail: uid392 +uidnumber: 392 +gidnumber: 392 +homeDirectory: /home/uid392 + +dn: cn=user393,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user393 +sn: user393 +uid: uid393 +givenname: givenname393 +description: description393 +userPassword: password393 +mail: uid393 +uidnumber: 393 +gidnumber: 393 +homeDirectory: /home/uid393 + +dn: cn=user394,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user394 +sn: user394 +uid: uid394 +givenname: givenname394 +description: description394 +userPassword: password394 +mail: uid394 +uidnumber: 394 +gidnumber: 394 +homeDirectory: /home/uid394 + +dn: cn=user395,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user395 +sn: user395 +uid: uid395 +givenname: givenname395 +description: description395 +userPassword: password395 +mail: uid395 +uidnumber: 395 +gidnumber: 395 +homeDirectory: /home/uid395 + +dn: cn=user396,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user396 +sn: user396 +uid: uid396 +givenname: givenname396 +description: description396 +userPassword: password396 +mail: uid396 +uidnumber: 396 +gidnumber: 396 +homeDirectory: /home/uid396 + +dn: cn=user397,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user397 +sn: user397 +uid: uid397 +givenname: givenname397 +description: description397 +userPassword: password397 +mail: uid397 +uidnumber: 397 +gidnumber: 397 +homeDirectory: /home/uid397 + +dn: cn=user398,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user398 +sn: user398 +uid: uid398 +givenname: givenname398 +description: description398 +userPassword: password398 +mail: uid398 +uidnumber: 398 +gidnumber: 398 +homeDirectory: /home/uid398 + +dn: cn=user399,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user399 +sn: user399 +uid: uid399 +givenname: givenname399 +description: description399 +userPassword: password399 +mail: uid399 +uidnumber: 399 +gidnumber: 399 +homeDirectory: /home/uid399 + +dn: cn=user400,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user400 +sn: user400 +uid: uid400 +givenname: givenname400 +description: description400 +userPassword: password400 +mail: uid400 +uidnumber: 400 +gidnumber: 400 +homeDirectory: /home/uid400 + +dn: cn=user401,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user401 +sn: user401 +uid: uid401 +givenname: givenname401 +description: description401 +userPassword: password401 +mail: uid401 +uidnumber: 401 +gidnumber: 401 +homeDirectory: /home/uid401 + +dn: cn=user402,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user402 +sn: user402 +uid: uid402 +givenname: givenname402 +description: description402 +userPassword: password402 +mail: uid402 +uidnumber: 402 +gidnumber: 402 +homeDirectory: /home/uid402 + +dn: cn=user403,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user403 +sn: user403 +uid: uid403 +givenname: givenname403 +description: description403 +userPassword: password403 +mail: uid403 +uidnumber: 403 +gidnumber: 403 +homeDirectory: /home/uid403 + +dn: cn=user404,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user404 +sn: user404 +uid: uid404 +givenname: givenname404 +description: description404 +userPassword: password404 +mail: uid404 +uidnumber: 404 +gidnumber: 404 +homeDirectory: /home/uid404 + +dn: cn=user405,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user405 +sn: user405 +uid: uid405 +givenname: givenname405 +description: description405 +userPassword: password405 +mail: uid405 +uidnumber: 405 +gidnumber: 405 +homeDirectory: /home/uid405 + +dn: cn=user406,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user406 +sn: user406 +uid: uid406 +givenname: givenname406 +description: description406 +userPassword: password406 +mail: uid406 +uidnumber: 406 +gidnumber: 406 +homeDirectory: /home/uid406 + +dn: cn=user407,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user407 +sn: user407 +uid: uid407 +givenname: givenname407 +description: description407 +userPassword: password407 +mail: uid407 +uidnumber: 407 +gidnumber: 407 +homeDirectory: /home/uid407 + +dn: cn=user408,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user408 +sn: user408 +uid: uid408 +givenname: givenname408 +description: description408 +userPassword: password408 +mail: uid408 +uidnumber: 408 +gidnumber: 408 +homeDirectory: /home/uid408 + +dn: cn=user409,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user409 +sn: user409 +uid: uid409 +givenname: givenname409 +description: description409 +userPassword: password409 +mail: uid409 +uidnumber: 409 +gidnumber: 409 +homeDirectory: /home/uid409 + +dn: cn=user410,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user410 +sn: user410 +uid: uid410 +givenname: givenname410 +description: description410 +userPassword: password410 +mail: uid410 +uidnumber: 410 +gidnumber: 410 +homeDirectory: /home/uid410 + +dn: cn=user411,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user411 +sn: user411 +uid: uid411 +givenname: givenname411 +description: description411 +userPassword: password411 +mail: uid411 +uidnumber: 411 +gidnumber: 411 +homeDirectory: /home/uid411 + +dn: cn=user412,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user412 +sn: user412 +uid: uid412 +givenname: givenname412 +description: description412 +userPassword: password412 +mail: uid412 +uidnumber: 412 +gidnumber: 412 +homeDirectory: /home/uid412 + +dn: cn=user413,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user413 +sn: user413 +uid: uid413 +givenname: givenname413 +description: description413 +userPassword: password413 +mail: uid413 +uidnumber: 413 +gidnumber: 413 +homeDirectory: /home/uid413 + +dn: cn=user414,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user414 +sn: user414 +uid: uid414 +givenname: givenname414 +description: description414 +userPassword: password414 +mail: uid414 +uidnumber: 414 +gidnumber: 414 +homeDirectory: /home/uid414 + +dn: cn=user415,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user415 +sn: user415 +uid: uid415 +givenname: givenname415 +description: description415 +userPassword: password415 +mail: uid415 +uidnumber: 415 +gidnumber: 415 +homeDirectory: /home/uid415 + +dn: cn=user416,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user416 +sn: user416 +uid: uid416 +givenname: givenname416 +description: description416 +userPassword: password416 +mail: uid416 +uidnumber: 416 +gidnumber: 416 +homeDirectory: /home/uid416 + +dn: cn=user417,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user417 +sn: user417 +uid: uid417 +givenname: givenname417 +description: description417 +userPassword: password417 +mail: uid417 +uidnumber: 417 +gidnumber: 417 +homeDirectory: /home/uid417 + +dn: cn=user418,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user418 +sn: user418 +uid: uid418 +givenname: givenname418 +description: description418 +userPassword: password418 +mail: uid418 +uidnumber: 418 +gidnumber: 418 +homeDirectory: /home/uid418 + +dn: cn=user419,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user419 +sn: user419 +uid: uid419 +givenname: givenname419 +description: description419 +userPassword: password419 +mail: uid419 +uidnumber: 419 +gidnumber: 419 +homeDirectory: /home/uid419 + +dn: cn=user420,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user420 +sn: user420 +uid: uid420 +givenname: givenname420 +description: description420 +userPassword: password420 +mail: uid420 +uidnumber: 420 +gidnumber: 420 +homeDirectory: /home/uid420 + +dn: cn=user421,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user421 +sn: user421 +uid: uid421 +givenname: givenname421 +description: description421 +userPassword: password421 +mail: uid421 +uidnumber: 421 +gidnumber: 421 +homeDirectory: /home/uid421 + +dn: cn=user422,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user422 +sn: user422 +uid: uid422 +givenname: givenname422 +description: description422 +userPassword: password422 +mail: uid422 +uidnumber: 422 +gidnumber: 422 +homeDirectory: /home/uid422 + +dn: cn=user423,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user423 +sn: user423 +uid: uid423 +givenname: givenname423 +description: description423 +userPassword: password423 +mail: uid423 +uidnumber: 423 +gidnumber: 423 +homeDirectory: /home/uid423 + +dn: cn=user424,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user424 +sn: user424 +uid: uid424 +givenname: givenname424 +description: description424 +userPassword: password424 +mail: uid424 +uidnumber: 424 +gidnumber: 424 +homeDirectory: /home/uid424 + +dn: cn=user425,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user425 +sn: user425 +uid: uid425 +givenname: givenname425 +description: description425 +userPassword: password425 +mail: uid425 +uidnumber: 425 +gidnumber: 425 +homeDirectory: /home/uid425 + +dn: cn=user426,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user426 +sn: user426 +uid: uid426 +givenname: givenname426 +description: description426 +userPassword: password426 +mail: uid426 +uidnumber: 426 +gidnumber: 426 +homeDirectory: /home/uid426 + +dn: cn=user427,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user427 +sn: user427 +uid: uid427 +givenname: givenname427 +description: description427 +userPassword: password427 +mail: uid427 +uidnumber: 427 +gidnumber: 427 +homeDirectory: /home/uid427 + +dn: cn=user428,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user428 +sn: user428 +uid: uid428 +givenname: givenname428 +description: description428 +userPassword: password428 +mail: uid428 +uidnumber: 428 +gidnumber: 428 +homeDirectory: /home/uid428 + +dn: cn=user429,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user429 +sn: user429 +uid: uid429 +givenname: givenname429 +description: description429 +userPassword: password429 +mail: uid429 +uidnumber: 429 +gidnumber: 429 +homeDirectory: /home/uid429 + +dn: cn=user430,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user430 +sn: user430 +uid: uid430 +givenname: givenname430 +description: description430 +userPassword: password430 +mail: uid430 +uidnumber: 430 +gidnumber: 430 +homeDirectory: /home/uid430 + +dn: cn=user431,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user431 +sn: user431 +uid: uid431 +givenname: givenname431 +description: description431 +userPassword: password431 +mail: uid431 +uidnumber: 431 +gidnumber: 431 +homeDirectory: /home/uid431 + +dn: cn=user432,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user432 +sn: user432 +uid: uid432 +givenname: givenname432 +description: description432 +userPassword: password432 +mail: uid432 +uidnumber: 432 +gidnumber: 432 +homeDirectory: /home/uid432 + +dn: cn=user433,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user433 +sn: user433 +uid: uid433 +givenname: givenname433 +description: description433 +userPassword: password433 +mail: uid433 +uidnumber: 433 +gidnumber: 433 +homeDirectory: /home/uid433 + +dn: cn=user434,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user434 +sn: user434 +uid: uid434 +givenname: givenname434 +description: description434 +userPassword: password434 +mail: uid434 +uidnumber: 434 +gidnumber: 434 +homeDirectory: /home/uid434 + +dn: cn=user435,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user435 +sn: user435 +uid: uid435 +givenname: givenname435 +description: description435 +userPassword: password435 +mail: uid435 +uidnumber: 435 +gidnumber: 435 +homeDirectory: /home/uid435 + +dn: cn=user436,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user436 +sn: user436 +uid: uid436 +givenname: givenname436 +description: description436 +userPassword: password436 +mail: uid436 +uidnumber: 436 +gidnumber: 436 +homeDirectory: /home/uid436 + +dn: cn=user437,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user437 +sn: user437 +uid: uid437 +givenname: givenname437 +description: description437 +userPassword: password437 +mail: uid437 +uidnumber: 437 +gidnumber: 437 +homeDirectory: /home/uid437 + +dn: cn=user438,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user438 +sn: user438 +uid: uid438 +givenname: givenname438 +description: description438 +userPassword: password438 +mail: uid438 +uidnumber: 438 +gidnumber: 438 +homeDirectory: /home/uid438 + +dn: cn=user439,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user439 +sn: user439 +uid: uid439 +givenname: givenname439 +description: description439 +userPassword: password439 +mail: uid439 +uidnumber: 439 +gidnumber: 439 +homeDirectory: /home/uid439 + +dn: cn=user440,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user440 +sn: user440 +uid: uid440 +givenname: givenname440 +description: description440 +userPassword: password440 +mail: uid440 +uidnumber: 440 +gidnumber: 440 +homeDirectory: /home/uid440 + +dn: cn=user441,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user441 +sn: user441 +uid: uid441 +givenname: givenname441 +description: description441 +userPassword: password441 +mail: uid441 +uidnumber: 441 +gidnumber: 441 +homeDirectory: /home/uid441 + +dn: cn=user442,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user442 +sn: user442 +uid: uid442 +givenname: givenname442 +description: description442 +userPassword: password442 +mail: uid442 +uidnumber: 442 +gidnumber: 442 +homeDirectory: /home/uid442 + +dn: cn=user443,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user443 +sn: user443 +uid: uid443 +givenname: givenname443 +description: description443 +userPassword: password443 +mail: uid443 +uidnumber: 443 +gidnumber: 443 +homeDirectory: /home/uid443 + +dn: cn=user444,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user444 +sn: user444 +uid: uid444 +givenname: givenname444 +description: description444 +userPassword: password444 +mail: uid444 +uidnumber: 444 +gidnumber: 444 +homeDirectory: /home/uid444 + +dn: cn=user445,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user445 +sn: user445 +uid: uid445 +givenname: givenname445 +description: description445 +userPassword: password445 +mail: uid445 +uidnumber: 445 +gidnumber: 445 +homeDirectory: /home/uid445 + +dn: cn=user446,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user446 +sn: user446 +uid: uid446 +givenname: givenname446 +description: description446 +userPassword: password446 +mail: uid446 +uidnumber: 446 +gidnumber: 446 +homeDirectory: /home/uid446 + +dn: cn=user447,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user447 +sn: user447 +uid: uid447 +givenname: givenname447 +description: description447 +userPassword: password447 +mail: uid447 +uidnumber: 447 +gidnumber: 447 +homeDirectory: /home/uid447 + +dn: cn=user448,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user448 +sn: user448 +uid: uid448 +givenname: givenname448 +description: description448 +userPassword: password448 +mail: uid448 +uidnumber: 448 +gidnumber: 448 +homeDirectory: /home/uid448 + +dn: cn=user449,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user449 +sn: user449 +uid: uid449 +givenname: givenname449 +description: description449 +userPassword: password449 +mail: uid449 +uidnumber: 449 +gidnumber: 449 +homeDirectory: /home/uid449 + +dn: cn=user450,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user450 +sn: user450 +uid: uid450 +givenname: givenname450 +description: description450 +userPassword: password450 +mail: uid450 +uidnumber: 450 +gidnumber: 450 +homeDirectory: /home/uid450 + +dn: cn=user451,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user451 +sn: user451 +uid: uid451 +givenname: givenname451 +description: description451 +userPassword: password451 +mail: uid451 +uidnumber: 451 +gidnumber: 451 +homeDirectory: /home/uid451 + +dn: cn=user452,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user452 +sn: user452 +uid: uid452 +givenname: givenname452 +description: description452 +userPassword: password452 +mail: uid452 +uidnumber: 452 +gidnumber: 452 +homeDirectory: /home/uid452 + +dn: cn=user453,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user453 +sn: user453 +uid: uid453 +givenname: givenname453 +description: description453 +userPassword: password453 +mail: uid453 +uidnumber: 453 +gidnumber: 453 +homeDirectory: /home/uid453 + +dn: cn=user454,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user454 +sn: user454 +uid: uid454 +givenname: givenname454 +description: description454 +userPassword: password454 +mail: uid454 +uidnumber: 454 +gidnumber: 454 +homeDirectory: /home/uid454 + +dn: cn=user455,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user455 +sn: user455 +uid: uid455 +givenname: givenname455 +description: description455 +userPassword: password455 +mail: uid455 +uidnumber: 455 +gidnumber: 455 +homeDirectory: /home/uid455 + +dn: cn=user456,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user456 +sn: user456 +uid: uid456 +givenname: givenname456 +description: description456 +userPassword: password456 +mail: uid456 +uidnumber: 456 +gidnumber: 456 +homeDirectory: /home/uid456 + +dn: cn=user457,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user457 +sn: user457 +uid: uid457 +givenname: givenname457 +description: description457 +userPassword: password457 +mail: uid457 +uidnumber: 457 +gidnumber: 457 +homeDirectory: /home/uid457 + +dn: cn=user458,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user458 +sn: user458 +uid: uid458 +givenname: givenname458 +description: description458 +userPassword: password458 +mail: uid458 +uidnumber: 458 +gidnumber: 458 +homeDirectory: /home/uid458 + +dn: cn=user459,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user459 +sn: user459 +uid: uid459 +givenname: givenname459 +description: description459 +userPassword: password459 +mail: uid459 +uidnumber: 459 +gidnumber: 459 +homeDirectory: /home/uid459 + +dn: cn=user460,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user460 +sn: user460 +uid: uid460 +givenname: givenname460 +description: description460 +userPassword: password460 +mail: uid460 +uidnumber: 460 +gidnumber: 460 +homeDirectory: /home/uid460 + +dn: cn=user461,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user461 +sn: user461 +uid: uid461 +givenname: givenname461 +description: description461 +userPassword: password461 +mail: uid461 +uidnumber: 461 +gidnumber: 461 +homeDirectory: /home/uid461 + +dn: cn=user462,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user462 +sn: user462 +uid: uid462 +givenname: givenname462 +description: description462 +userPassword: password462 +mail: uid462 +uidnumber: 462 +gidnumber: 462 +homeDirectory: /home/uid462 + +dn: cn=user463,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user463 +sn: user463 +uid: uid463 +givenname: givenname463 +description: description463 +userPassword: password463 +mail: uid463 +uidnumber: 463 +gidnumber: 463 +homeDirectory: /home/uid463 + +dn: cn=user464,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user464 +sn: user464 +uid: uid464 +givenname: givenname464 +description: description464 +userPassword: password464 +mail: uid464 +uidnumber: 464 +gidnumber: 464 +homeDirectory: /home/uid464 + +dn: cn=user465,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user465 +sn: user465 +uid: uid465 +givenname: givenname465 +description: description465 +userPassword: password465 +mail: uid465 +uidnumber: 465 +gidnumber: 465 +homeDirectory: /home/uid465 + +dn: cn=user466,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user466 +sn: user466 +uid: uid466 +givenname: givenname466 +description: description466 +userPassword: password466 +mail: uid466 +uidnumber: 466 +gidnumber: 466 +homeDirectory: /home/uid466 + +dn: cn=user467,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user467 +sn: user467 +uid: uid467 +givenname: givenname467 +description: description467 +userPassword: password467 +mail: uid467 +uidnumber: 467 +gidnumber: 467 +homeDirectory: /home/uid467 + +dn: cn=user468,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user468 +sn: user468 +uid: uid468 +givenname: givenname468 +description: description468 +userPassword: password468 +mail: uid468 +uidnumber: 468 +gidnumber: 468 +homeDirectory: /home/uid468 + +dn: cn=user469,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user469 +sn: user469 +uid: uid469 +givenname: givenname469 +description: description469 +userPassword: password469 +mail: uid469 +uidnumber: 469 +gidnumber: 469 +homeDirectory: /home/uid469 + +dn: cn=user470,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user470 +sn: user470 +uid: uid470 +givenname: givenname470 +description: description470 +userPassword: password470 +mail: uid470 +uidnumber: 470 +gidnumber: 470 +homeDirectory: /home/uid470 + +dn: cn=user471,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user471 +sn: user471 +uid: uid471 +givenname: givenname471 +description: description471 +userPassword: password471 +mail: uid471 +uidnumber: 471 +gidnumber: 471 +homeDirectory: /home/uid471 + +dn: cn=user472,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user472 +sn: user472 +uid: uid472 +givenname: givenname472 +description: description472 +userPassword: password472 +mail: uid472 +uidnumber: 472 +gidnumber: 472 +homeDirectory: /home/uid472 + +dn: cn=user473,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user473 +sn: user473 +uid: uid473 +givenname: givenname473 +description: description473 +userPassword: password473 +mail: uid473 +uidnumber: 473 +gidnumber: 473 +homeDirectory: /home/uid473 + +dn: cn=user474,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user474 +sn: user474 +uid: uid474 +givenname: givenname474 +description: description474 +userPassword: password474 +mail: uid474 +uidnumber: 474 +gidnumber: 474 +homeDirectory: /home/uid474 + +dn: cn=user475,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user475 +sn: user475 +uid: uid475 +givenname: givenname475 +description: description475 +userPassword: password475 +mail: uid475 +uidnumber: 475 +gidnumber: 475 +homeDirectory: /home/uid475 + +dn: cn=user476,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user476 +sn: user476 +uid: uid476 +givenname: givenname476 +description: description476 +userPassword: password476 +mail: uid476 +uidnumber: 476 +gidnumber: 476 +homeDirectory: /home/uid476 + +dn: cn=user477,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user477 +sn: user477 +uid: uid477 +givenname: givenname477 +description: description477 +userPassword: password477 +mail: uid477 +uidnumber: 477 +gidnumber: 477 +homeDirectory: /home/uid477 + +dn: cn=user478,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user478 +sn: user478 +uid: uid478 +givenname: givenname478 +description: description478 +userPassword: password478 +mail: uid478 +uidnumber: 478 +gidnumber: 478 +homeDirectory: /home/uid478 + +dn: cn=user479,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user479 +sn: user479 +uid: uid479 +givenname: givenname479 +description: description479 +userPassword: password479 +mail: uid479 +uidnumber: 479 +gidnumber: 479 +homeDirectory: /home/uid479 + +dn: cn=user480,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user480 +sn: user480 +uid: uid480 +givenname: givenname480 +description: description480 +userPassword: password480 +mail: uid480 +uidnumber: 480 +gidnumber: 480 +homeDirectory: /home/uid480 + +dn: cn=user481,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user481 +sn: user481 +uid: uid481 +givenname: givenname481 +description: description481 +userPassword: password481 +mail: uid481 +uidnumber: 481 +gidnumber: 481 +homeDirectory: /home/uid481 + +dn: cn=user482,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user482 +sn: user482 +uid: uid482 +givenname: givenname482 +description: description482 +userPassword: password482 +mail: uid482 +uidnumber: 482 +gidnumber: 482 +homeDirectory: /home/uid482 + +dn: cn=user483,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user483 +sn: user483 +uid: uid483 +givenname: givenname483 +description: description483 +userPassword: password483 +mail: uid483 +uidnumber: 483 +gidnumber: 483 +homeDirectory: /home/uid483 + +dn: cn=user484,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user484 +sn: user484 +uid: uid484 +givenname: givenname484 +description: description484 +userPassword: password484 +mail: uid484 +uidnumber: 484 +gidnumber: 484 +homeDirectory: /home/uid484 + +dn: cn=user485,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user485 +sn: user485 +uid: uid485 +givenname: givenname485 +description: description485 +userPassword: password485 +mail: uid485 +uidnumber: 485 +gidnumber: 485 +homeDirectory: /home/uid485 + +dn: cn=user486,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user486 +sn: user486 +uid: uid486 +givenname: givenname486 +description: description486 +userPassword: password486 +mail: uid486 +uidnumber: 486 +gidnumber: 486 +homeDirectory: /home/uid486 + +dn: cn=user487,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user487 +sn: user487 +uid: uid487 +givenname: givenname487 +description: description487 +userPassword: password487 +mail: uid487 +uidnumber: 487 +gidnumber: 487 +homeDirectory: /home/uid487 + +dn: cn=user488,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user488 +sn: user488 +uid: uid488 +givenname: givenname488 +description: description488 +userPassword: password488 +mail: uid488 +uidnumber: 488 +gidnumber: 488 +homeDirectory: /home/uid488 + +dn: cn=user489,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user489 +sn: user489 +uid: uid489 +givenname: givenname489 +description: description489 +userPassword: password489 +mail: uid489 +uidnumber: 489 +gidnumber: 489 +homeDirectory: /home/uid489 + +dn: cn=user490,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user490 +sn: user490 +uid: uid490 +givenname: givenname490 +description: description490 +userPassword: password490 +mail: uid490 +uidnumber: 490 +gidnumber: 490 +homeDirectory: /home/uid490 + +dn: cn=user491,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user491 +sn: user491 +uid: uid491 +givenname: givenname491 +description: description491 +userPassword: password491 +mail: uid491 +uidnumber: 491 +gidnumber: 491 +homeDirectory: /home/uid491 + +dn: cn=user492,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user492 +sn: user492 +uid: uid492 +givenname: givenname492 +description: description492 +userPassword: password492 +mail: uid492 +uidnumber: 492 +gidnumber: 492 +homeDirectory: /home/uid492 + +dn: cn=user493,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user493 +sn: user493 +uid: uid493 +givenname: givenname493 +description: description493 +userPassword: password493 +mail: uid493 +uidnumber: 493 +gidnumber: 493 +homeDirectory: /home/uid493 + +dn: cn=user494,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user494 +sn: user494 +uid: uid494 +givenname: givenname494 +description: description494 +userPassword: password494 +mail: uid494 +uidnumber: 494 +gidnumber: 494 +homeDirectory: /home/uid494 + +dn: cn=user495,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user495 +sn: user495 +uid: uid495 +givenname: givenname495 +description: description495 +userPassword: password495 +mail: uid495 +uidnumber: 495 +gidnumber: 495 +homeDirectory: /home/uid495 + +dn: cn=user496,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user496 +sn: user496 +uid: uid496 +givenname: givenname496 +description: description496 +userPassword: password496 +mail: uid496 +uidnumber: 496 +gidnumber: 496 +homeDirectory: /home/uid496 + +dn: cn=user497,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user497 +sn: user497 +uid: uid497 +givenname: givenname497 +description: description497 +userPassword: password497 +mail: uid497 +uidnumber: 497 +gidnumber: 497 +homeDirectory: /home/uid497 + +dn: cn=user498,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user498 +sn: user498 +uid: uid498 +givenname: givenname498 +description: description498 +userPassword: password498 +mail: uid498 +uidnumber: 498 +gidnumber: 498 +homeDirectory: /home/uid498 + +dn: cn=user499,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user499 +sn: user499 +uid: uid499 +givenname: givenname499 +description: description499 +userPassword: password499 +mail: uid499 +uidnumber: 499 +gidnumber: 499 +homeDirectory: /home/uid499 + +dn: cn=user500,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user500 +sn: user500 +uid: uid500 +givenname: givenname500 +description: description500 +userPassword: password500 +mail: uid500 +uidnumber: 500 +gidnumber: 500 +homeDirectory: /home/uid500 + +dn: cn=user501,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user501 +sn: user501 +uid: uid501 +givenname: givenname501 +description: description501 +userPassword: password501 +mail: uid501 +uidnumber: 501 +gidnumber: 501 +homeDirectory: /home/uid501 + +dn: cn=user502,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user502 +sn: user502 +uid: uid502 +givenname: givenname502 +description: description502 +userPassword: password502 +mail: uid502 +uidnumber: 502 +gidnumber: 502 +homeDirectory: /home/uid502 + +dn: cn=user503,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user503 +sn: user503 +uid: uid503 +givenname: givenname503 +description: description503 +userPassword: password503 +mail: uid503 +uidnumber: 503 +gidnumber: 503 +homeDirectory: /home/uid503 + +dn: cn=user504,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user504 +sn: user504 +uid: uid504 +givenname: givenname504 +description: description504 +userPassword: password504 +mail: uid504 +uidnumber: 504 +gidnumber: 504 +homeDirectory: /home/uid504 + +dn: cn=user505,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user505 +sn: user505 +uid: uid505 +givenname: givenname505 +description: description505 +userPassword: password505 +mail: uid505 +uidnumber: 505 +gidnumber: 505 +homeDirectory: /home/uid505 + +dn: cn=user506,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user506 +sn: user506 +uid: uid506 +givenname: givenname506 +description: description506 +userPassword: password506 +mail: uid506 +uidnumber: 506 +gidnumber: 506 +homeDirectory: /home/uid506 + +dn: cn=user507,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user507 +sn: user507 +uid: uid507 +givenname: givenname507 +description: description507 +userPassword: password507 +mail: uid507 +uidnumber: 507 +gidnumber: 507 +homeDirectory: /home/uid507 + +dn: cn=user508,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user508 +sn: user508 +uid: uid508 +givenname: givenname508 +description: description508 +userPassword: password508 +mail: uid508 +uidnumber: 508 +gidnumber: 508 +homeDirectory: /home/uid508 + +dn: cn=user509,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user509 +sn: user509 +uid: uid509 +givenname: givenname509 +description: description509 +userPassword: password509 +mail: uid509 +uidnumber: 509 +gidnumber: 509 +homeDirectory: /home/uid509 + +dn: cn=user510,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user510 +sn: user510 +uid: uid510 +givenname: givenname510 +description: description510 +userPassword: password510 +mail: uid510 +uidnumber: 510 +gidnumber: 510 +homeDirectory: /home/uid510 + +dn: cn=user511,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user511 +sn: user511 +uid: uid511 +givenname: givenname511 +description: description511 +userPassword: password511 +mail: uid511 +uidnumber: 511 +gidnumber: 511 +homeDirectory: /home/uid511 + +dn: cn=user512,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user512 +sn: user512 +uid: uid512 +givenname: givenname512 +description: description512 +userPassword: password512 +mail: uid512 +uidnumber: 512 +gidnumber: 512 +homeDirectory: /home/uid512 + +dn: cn=user513,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user513 +sn: user513 +uid: uid513 +givenname: givenname513 +description: description513 +userPassword: password513 +mail: uid513 +uidnumber: 513 +gidnumber: 513 +homeDirectory: /home/uid513 + +dn: cn=user514,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user514 +sn: user514 +uid: uid514 +givenname: givenname514 +description: description514 +userPassword: password514 +mail: uid514 +uidnumber: 514 +gidnumber: 514 +homeDirectory: /home/uid514 + +dn: cn=user515,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user515 +sn: user515 +uid: uid515 +givenname: givenname515 +description: description515 +userPassword: password515 +mail: uid515 +uidnumber: 515 +gidnumber: 515 +homeDirectory: /home/uid515 + +dn: cn=user516,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user516 +sn: user516 +uid: uid516 +givenname: givenname516 +description: description516 +userPassword: password516 +mail: uid516 +uidnumber: 516 +gidnumber: 516 +homeDirectory: /home/uid516 + +dn: cn=user517,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user517 +sn: user517 +uid: uid517 +givenname: givenname517 +description: description517 +userPassword: password517 +mail: uid517 +uidnumber: 517 +gidnumber: 517 +homeDirectory: /home/uid517 + +dn: cn=user518,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user518 +sn: user518 +uid: uid518 +givenname: givenname518 +description: description518 +userPassword: password518 +mail: uid518 +uidnumber: 518 +gidnumber: 518 +homeDirectory: /home/uid518 + +dn: cn=user519,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user519 +sn: user519 +uid: uid519 +givenname: givenname519 +description: description519 +userPassword: password519 +mail: uid519 +uidnumber: 519 +gidnumber: 519 +homeDirectory: /home/uid519 + +dn: cn=user520,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user520 +sn: user520 +uid: uid520 +givenname: givenname520 +description: description520 +userPassword: password520 +mail: uid520 +uidnumber: 520 +gidnumber: 520 +homeDirectory: /home/uid520 + +dn: cn=user521,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user521 +sn: user521 +uid: uid521 +givenname: givenname521 +description: description521 +userPassword: password521 +mail: uid521 +uidnumber: 521 +gidnumber: 521 +homeDirectory: /home/uid521 + +dn: cn=user522,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user522 +sn: user522 +uid: uid522 +givenname: givenname522 +description: description522 +userPassword: password522 +mail: uid522 +uidnumber: 522 +gidnumber: 522 +homeDirectory: /home/uid522 + +dn: cn=user523,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user523 +sn: user523 +uid: uid523 +givenname: givenname523 +description: description523 +userPassword: password523 +mail: uid523 +uidnumber: 523 +gidnumber: 523 +homeDirectory: /home/uid523 + +dn: cn=user524,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user524 +sn: user524 +uid: uid524 +givenname: givenname524 +description: description524 +userPassword: password524 +mail: uid524 +uidnumber: 524 +gidnumber: 524 +homeDirectory: /home/uid524 + +dn: cn=user525,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user525 +sn: user525 +uid: uid525 +givenname: givenname525 +description: description525 +userPassword: password525 +mail: uid525 +uidnumber: 525 +gidnumber: 525 +homeDirectory: /home/uid525 + +dn: cn=user526,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user526 +sn: user526 +uid: uid526 +givenname: givenname526 +description: description526 +userPassword: password526 +mail: uid526 +uidnumber: 526 +gidnumber: 526 +homeDirectory: /home/uid526 + +dn: cn=user527,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user527 +sn: user527 +uid: uid527 +givenname: givenname527 +description: description527 +userPassword: password527 +mail: uid527 +uidnumber: 527 +gidnumber: 527 +homeDirectory: /home/uid527 + +dn: cn=user528,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user528 +sn: user528 +uid: uid528 +givenname: givenname528 +description: description528 +userPassword: password528 +mail: uid528 +uidnumber: 528 +gidnumber: 528 +homeDirectory: /home/uid528 + +dn: cn=user529,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user529 +sn: user529 +uid: uid529 +givenname: givenname529 +description: description529 +userPassword: password529 +mail: uid529 +uidnumber: 529 +gidnumber: 529 +homeDirectory: /home/uid529 + +dn: cn=user530,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user530 +sn: user530 +uid: uid530 +givenname: givenname530 +description: description530 +userPassword: password530 +mail: uid530 +uidnumber: 530 +gidnumber: 530 +homeDirectory: /home/uid530 + +dn: cn=user531,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user531 +sn: user531 +uid: uid531 +givenname: givenname531 +description: description531 +userPassword: password531 +mail: uid531 +uidnumber: 531 +gidnumber: 531 +homeDirectory: /home/uid531 + +dn: cn=user532,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user532 +sn: user532 +uid: uid532 +givenname: givenname532 +description: description532 +userPassword: password532 +mail: uid532 +uidnumber: 532 +gidnumber: 532 +homeDirectory: /home/uid532 + +dn: cn=user533,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user533 +sn: user533 +uid: uid533 +givenname: givenname533 +description: description533 +userPassword: password533 +mail: uid533 +uidnumber: 533 +gidnumber: 533 +homeDirectory: /home/uid533 + +dn: cn=user534,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user534 +sn: user534 +uid: uid534 +givenname: givenname534 +description: description534 +userPassword: password534 +mail: uid534 +uidnumber: 534 +gidnumber: 534 +homeDirectory: /home/uid534 + +dn: cn=user535,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user535 +sn: user535 +uid: uid535 +givenname: givenname535 +description: description535 +userPassword: password535 +mail: uid535 +uidnumber: 535 +gidnumber: 535 +homeDirectory: /home/uid535 + +dn: cn=user536,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user536 +sn: user536 +uid: uid536 +givenname: givenname536 +description: description536 +userPassword: password536 +mail: uid536 +uidnumber: 536 +gidnumber: 536 +homeDirectory: /home/uid536 + +dn: cn=user537,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user537 +sn: user537 +uid: uid537 +givenname: givenname537 +description: description537 +userPassword: password537 +mail: uid537 +uidnumber: 537 +gidnumber: 537 +homeDirectory: /home/uid537 + +dn: cn=user538,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user538 +sn: user538 +uid: uid538 +givenname: givenname538 +description: description538 +userPassword: password538 +mail: uid538 +uidnumber: 538 +gidnumber: 538 +homeDirectory: /home/uid538 + +dn: cn=user539,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user539 +sn: user539 +uid: uid539 +givenname: givenname539 +description: description539 +userPassword: password539 +mail: uid539 +uidnumber: 539 +gidnumber: 539 +homeDirectory: /home/uid539 + +dn: cn=user540,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user540 +sn: user540 +uid: uid540 +givenname: givenname540 +description: description540 +userPassword: password540 +mail: uid540 +uidnumber: 540 +gidnumber: 540 +homeDirectory: /home/uid540 + +dn: cn=user541,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user541 +sn: user541 +uid: uid541 +givenname: givenname541 +description: description541 +userPassword: password541 +mail: uid541 +uidnumber: 541 +gidnumber: 541 +homeDirectory: /home/uid541 + +dn: cn=user542,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user542 +sn: user542 +uid: uid542 +givenname: givenname542 +description: description542 +userPassword: password542 +mail: uid542 +uidnumber: 542 +gidnumber: 542 +homeDirectory: /home/uid542 + +dn: cn=user543,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user543 +sn: user543 +uid: uid543 +givenname: givenname543 +description: description543 +userPassword: password543 +mail: uid543 +uidnumber: 543 +gidnumber: 543 +homeDirectory: /home/uid543 + +dn: cn=user544,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user544 +sn: user544 +uid: uid544 +givenname: givenname544 +description: description544 +userPassword: password544 +mail: uid544 +uidnumber: 544 +gidnumber: 544 +homeDirectory: /home/uid544 + +dn: cn=user545,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user545 +sn: user545 +uid: uid545 +givenname: givenname545 +description: description545 +userPassword: password545 +mail: uid545 +uidnumber: 545 +gidnumber: 545 +homeDirectory: /home/uid545 + +dn: cn=user546,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user546 +sn: user546 +uid: uid546 +givenname: givenname546 +description: description546 +userPassword: password546 +mail: uid546 +uidnumber: 546 +gidnumber: 546 +homeDirectory: /home/uid546 + +dn: cn=user547,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user547 +sn: user547 +uid: uid547 +givenname: givenname547 +description: description547 +userPassword: password547 +mail: uid547 +uidnumber: 547 +gidnumber: 547 +homeDirectory: /home/uid547 + +dn: cn=user548,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user548 +sn: user548 +uid: uid548 +givenname: givenname548 +description: description548 +userPassword: password548 +mail: uid548 +uidnumber: 548 +gidnumber: 548 +homeDirectory: /home/uid548 + +dn: cn=user549,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user549 +sn: user549 +uid: uid549 +givenname: givenname549 +description: description549 +userPassword: password549 +mail: uid549 +uidnumber: 549 +gidnumber: 549 +homeDirectory: /home/uid549 + +dn: cn=user550,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user550 +sn: user550 +uid: uid550 +givenname: givenname550 +description: description550 +userPassword: password550 +mail: uid550 +uidnumber: 550 +gidnumber: 550 +homeDirectory: /home/uid550 + +dn: cn=user551,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user551 +sn: user551 +uid: uid551 +givenname: givenname551 +description: description551 +userPassword: password551 +mail: uid551 +uidnumber: 551 +gidnumber: 551 +homeDirectory: /home/uid551 + +dn: cn=user552,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user552 +sn: user552 +uid: uid552 +givenname: givenname552 +description: description552 +userPassword: password552 +mail: uid552 +uidnumber: 552 +gidnumber: 552 +homeDirectory: /home/uid552 + +dn: cn=user553,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user553 +sn: user553 +uid: uid553 +givenname: givenname553 +description: description553 +userPassword: password553 +mail: uid553 +uidnumber: 553 +gidnumber: 553 +homeDirectory: /home/uid553 + +dn: cn=user554,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user554 +sn: user554 +uid: uid554 +givenname: givenname554 +description: description554 +userPassword: password554 +mail: uid554 +uidnumber: 554 +gidnumber: 554 +homeDirectory: /home/uid554 + +dn: cn=user555,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user555 +sn: user555 +uid: uid555 +givenname: givenname555 +description: description555 +userPassword: password555 +mail: uid555 +uidnumber: 555 +gidnumber: 555 +homeDirectory: /home/uid555 + +dn: cn=user556,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user556 +sn: user556 +uid: uid556 +givenname: givenname556 +description: description556 +userPassword: password556 +mail: uid556 +uidnumber: 556 +gidnumber: 556 +homeDirectory: /home/uid556 + +dn: cn=user557,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user557 +sn: user557 +uid: uid557 +givenname: givenname557 +description: description557 +userPassword: password557 +mail: uid557 +uidnumber: 557 +gidnumber: 557 +homeDirectory: /home/uid557 + +dn: cn=user558,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user558 +sn: user558 +uid: uid558 +givenname: givenname558 +description: description558 +userPassword: password558 +mail: uid558 +uidnumber: 558 +gidnumber: 558 +homeDirectory: /home/uid558 + +dn: cn=user559,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user559 +sn: user559 +uid: uid559 +givenname: givenname559 +description: description559 +userPassword: password559 +mail: uid559 +uidnumber: 559 +gidnumber: 559 +homeDirectory: /home/uid559 + +dn: cn=user560,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user560 +sn: user560 +uid: uid560 +givenname: givenname560 +description: description560 +userPassword: password560 +mail: uid560 +uidnumber: 560 +gidnumber: 560 +homeDirectory: /home/uid560 + +dn: cn=user561,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user561 +sn: user561 +uid: uid561 +givenname: givenname561 +description: description561 +userPassword: password561 +mail: uid561 +uidnumber: 561 +gidnumber: 561 +homeDirectory: /home/uid561 + +dn: cn=user562,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user562 +sn: user562 +uid: uid562 +givenname: givenname562 +description: description562 +userPassword: password562 +mail: uid562 +uidnumber: 562 +gidnumber: 562 +homeDirectory: /home/uid562 + +dn: cn=user563,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user563 +sn: user563 +uid: uid563 +givenname: givenname563 +description: description563 +userPassword: password563 +mail: uid563 +uidnumber: 563 +gidnumber: 563 +homeDirectory: /home/uid563 + +dn: cn=user564,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user564 +sn: user564 +uid: uid564 +givenname: givenname564 +description: description564 +userPassword: password564 +mail: uid564 +uidnumber: 564 +gidnumber: 564 +homeDirectory: /home/uid564 + +dn: cn=user565,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user565 +sn: user565 +uid: uid565 +givenname: givenname565 +description: description565 +userPassword: password565 +mail: uid565 +uidnumber: 565 +gidnumber: 565 +homeDirectory: /home/uid565 + +dn: cn=user566,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user566 +sn: user566 +uid: uid566 +givenname: givenname566 +description: description566 +userPassword: password566 +mail: uid566 +uidnumber: 566 +gidnumber: 566 +homeDirectory: /home/uid566 + +dn: cn=user567,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user567 +sn: user567 +uid: uid567 +givenname: givenname567 +description: description567 +userPassword: password567 +mail: uid567 +uidnumber: 567 +gidnumber: 567 +homeDirectory: /home/uid567 + +dn: cn=user568,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user568 +sn: user568 +uid: uid568 +givenname: givenname568 +description: description568 +userPassword: password568 +mail: uid568 +uidnumber: 568 +gidnumber: 568 +homeDirectory: /home/uid568 + +dn: cn=user569,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user569 +sn: user569 +uid: uid569 +givenname: givenname569 +description: description569 +userPassword: password569 +mail: uid569 +uidnumber: 569 +gidnumber: 569 +homeDirectory: /home/uid569 + +dn: cn=user570,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user570 +sn: user570 +uid: uid570 +givenname: givenname570 +description: description570 +userPassword: password570 +mail: uid570 +uidnumber: 570 +gidnumber: 570 +homeDirectory: /home/uid570 + +dn: cn=user571,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user571 +sn: user571 +uid: uid571 +givenname: givenname571 +description: description571 +userPassword: password571 +mail: uid571 +uidnumber: 571 +gidnumber: 571 +homeDirectory: /home/uid571 + +dn: cn=user572,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user572 +sn: user572 +uid: uid572 +givenname: givenname572 +description: description572 +userPassword: password572 +mail: uid572 +uidnumber: 572 +gidnumber: 572 +homeDirectory: /home/uid572 + +dn: cn=user573,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user573 +sn: user573 +uid: uid573 +givenname: givenname573 +description: description573 +userPassword: password573 +mail: uid573 +uidnumber: 573 +gidnumber: 573 +homeDirectory: /home/uid573 + +dn: cn=user574,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user574 +sn: user574 +uid: uid574 +givenname: givenname574 +description: description574 +userPassword: password574 +mail: uid574 +uidnumber: 574 +gidnumber: 574 +homeDirectory: /home/uid574 + +dn: cn=user575,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user575 +sn: user575 +uid: uid575 +givenname: givenname575 +description: description575 +userPassword: password575 +mail: uid575 +uidnumber: 575 +gidnumber: 575 +homeDirectory: /home/uid575 + +dn: cn=user576,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user576 +sn: user576 +uid: uid576 +givenname: givenname576 +description: description576 +userPassword: password576 +mail: uid576 +uidnumber: 576 +gidnumber: 576 +homeDirectory: /home/uid576 + +dn: cn=user577,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user577 +sn: user577 +uid: uid577 +givenname: givenname577 +description: description577 +userPassword: password577 +mail: uid577 +uidnumber: 577 +gidnumber: 577 +homeDirectory: /home/uid577 + +dn: cn=user578,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user578 +sn: user578 +uid: uid578 +givenname: givenname578 +description: description578 +userPassword: password578 +mail: uid578 +uidnumber: 578 +gidnumber: 578 +homeDirectory: /home/uid578 + +dn: cn=user579,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user579 +sn: user579 +uid: uid579 +givenname: givenname579 +description: description579 +userPassword: password579 +mail: uid579 +uidnumber: 579 +gidnumber: 579 +homeDirectory: /home/uid579 + +dn: cn=user580,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user580 +sn: user580 +uid: uid580 +givenname: givenname580 +description: description580 +userPassword: password580 +mail: uid580 +uidnumber: 580 +gidnumber: 580 +homeDirectory: /home/uid580 + +dn: cn=user581,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user581 +sn: user581 +uid: uid581 +givenname: givenname581 +description: description581 +userPassword: password581 +mail: uid581 +uidnumber: 581 +gidnumber: 581 +homeDirectory: /home/uid581 + +dn: cn=user582,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user582 +sn: user582 +uid: uid582 +givenname: givenname582 +description: description582 +userPassword: password582 +mail: uid582 +uidnumber: 582 +gidnumber: 582 +homeDirectory: /home/uid582 + +dn: cn=user583,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user583 +sn: user583 +uid: uid583 +givenname: givenname583 +description: description583 +userPassword: password583 +mail: uid583 +uidnumber: 583 +gidnumber: 583 +homeDirectory: /home/uid583 + +dn: cn=user584,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user584 +sn: user584 +uid: uid584 +givenname: givenname584 +description: description584 +userPassword: password584 +mail: uid584 +uidnumber: 584 +gidnumber: 584 +homeDirectory: /home/uid584 + +dn: cn=user585,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user585 +sn: user585 +uid: uid585 +givenname: givenname585 +description: description585 +userPassword: password585 +mail: uid585 +uidnumber: 585 +gidnumber: 585 +homeDirectory: /home/uid585 + +dn: cn=user586,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user586 +sn: user586 +uid: uid586 +givenname: givenname586 +description: description586 +userPassword: password586 +mail: uid586 +uidnumber: 586 +gidnumber: 586 +homeDirectory: /home/uid586 + +dn: cn=user587,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user587 +sn: user587 +uid: uid587 +givenname: givenname587 +description: description587 +userPassword: password587 +mail: uid587 +uidnumber: 587 +gidnumber: 587 +homeDirectory: /home/uid587 + +dn: cn=user588,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user588 +sn: user588 +uid: uid588 +givenname: givenname588 +description: description588 +userPassword: password588 +mail: uid588 +uidnumber: 588 +gidnumber: 588 +homeDirectory: /home/uid588 + +dn: cn=user589,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user589 +sn: user589 +uid: uid589 +givenname: givenname589 +description: description589 +userPassword: password589 +mail: uid589 +uidnumber: 589 +gidnumber: 589 +homeDirectory: /home/uid589 + +dn: cn=user590,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user590 +sn: user590 +uid: uid590 +givenname: givenname590 +description: description590 +userPassword: password590 +mail: uid590 +uidnumber: 590 +gidnumber: 590 +homeDirectory: /home/uid590 + +dn: cn=user591,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user591 +sn: user591 +uid: uid591 +givenname: givenname591 +description: description591 +userPassword: password591 +mail: uid591 +uidnumber: 591 +gidnumber: 591 +homeDirectory: /home/uid591 + +dn: cn=user592,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user592 +sn: user592 +uid: uid592 +givenname: givenname592 +description: description592 +userPassword: password592 +mail: uid592 +uidnumber: 592 +gidnumber: 592 +homeDirectory: /home/uid592 + +dn: cn=user593,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user593 +sn: user593 +uid: uid593 +givenname: givenname593 +description: description593 +userPassword: password593 +mail: uid593 +uidnumber: 593 +gidnumber: 593 +homeDirectory: /home/uid593 + +dn: cn=user594,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user594 +sn: user594 +uid: uid594 +givenname: givenname594 +description: description594 +userPassword: password594 +mail: uid594 +uidnumber: 594 +gidnumber: 594 +homeDirectory: /home/uid594 + +dn: cn=user595,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user595 +sn: user595 +uid: uid595 +givenname: givenname595 +description: description595 +userPassword: password595 +mail: uid595 +uidnumber: 595 +gidnumber: 595 +homeDirectory: /home/uid595 + +dn: cn=user596,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user596 +sn: user596 +uid: uid596 +givenname: givenname596 +description: description596 +userPassword: password596 +mail: uid596 +uidnumber: 596 +gidnumber: 596 +homeDirectory: /home/uid596 + +dn: cn=user597,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user597 +sn: user597 +uid: uid597 +givenname: givenname597 +description: description597 +userPassword: password597 +mail: uid597 +uidnumber: 597 +gidnumber: 597 +homeDirectory: /home/uid597 + +dn: cn=user598,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user598 +sn: user598 +uid: uid598 +givenname: givenname598 +description: description598 +userPassword: password598 +mail: uid598 +uidnumber: 598 +gidnumber: 598 +homeDirectory: /home/uid598 + +dn: cn=user599,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user599 +sn: user599 +uid: uid599 +givenname: givenname599 +description: description599 +userPassword: password599 +mail: uid599 +uidnumber: 599 +gidnumber: 599 +homeDirectory: /home/uid599 + +dn: cn=user600,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user600 +sn: user600 +uid: uid600 +givenname: givenname600 +description: description600 +userPassword: password600 +mail: uid600 +uidnumber: 600 +gidnumber: 600 +homeDirectory: /home/uid600 + +dn: cn=user601,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user601 +sn: user601 +uid: uid601 +givenname: givenname601 +description: description601 +userPassword: password601 +mail: uid601 +uidnumber: 601 +gidnumber: 601 +homeDirectory: /home/uid601 + +dn: cn=user602,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user602 +sn: user602 +uid: uid602 +givenname: givenname602 +description: description602 +userPassword: password602 +mail: uid602 +uidnumber: 602 +gidnumber: 602 +homeDirectory: /home/uid602 + +dn: cn=user603,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user603 +sn: user603 +uid: uid603 +givenname: givenname603 +description: description603 +userPassword: password603 +mail: uid603 +uidnumber: 603 +gidnumber: 603 +homeDirectory: /home/uid603 + +dn: cn=user604,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user604 +sn: user604 +uid: uid604 +givenname: givenname604 +description: description604 +userPassword: password604 +mail: uid604 +uidnumber: 604 +gidnumber: 604 +homeDirectory: /home/uid604 + +dn: cn=user605,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user605 +sn: user605 +uid: uid605 +givenname: givenname605 +description: description605 +userPassword: password605 +mail: uid605 +uidnumber: 605 +gidnumber: 605 +homeDirectory: /home/uid605 + +dn: cn=user606,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user606 +sn: user606 +uid: uid606 +givenname: givenname606 +description: description606 +userPassword: password606 +mail: uid606 +uidnumber: 606 +gidnumber: 606 +homeDirectory: /home/uid606 + +dn: cn=user607,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user607 +sn: user607 +uid: uid607 +givenname: givenname607 +description: description607 +userPassword: password607 +mail: uid607 +uidnumber: 607 +gidnumber: 607 +homeDirectory: /home/uid607 + +dn: cn=user608,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user608 +sn: user608 +uid: uid608 +givenname: givenname608 +description: description608 +userPassword: password608 +mail: uid608 +uidnumber: 608 +gidnumber: 608 +homeDirectory: /home/uid608 + +dn: cn=user609,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user609 +sn: user609 +uid: uid609 +givenname: givenname609 +description: description609 +userPassword: password609 +mail: uid609 +uidnumber: 609 +gidnumber: 609 +homeDirectory: /home/uid609 + +dn: cn=user610,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user610 +sn: user610 +uid: uid610 +givenname: givenname610 +description: description610 +userPassword: password610 +mail: uid610 +uidnumber: 610 +gidnumber: 610 +homeDirectory: /home/uid610 + +dn: cn=user611,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user611 +sn: user611 +uid: uid611 +givenname: givenname611 +description: description611 +userPassword: password611 +mail: uid611 +uidnumber: 611 +gidnumber: 611 +homeDirectory: /home/uid611 + +dn: cn=user612,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user612 +sn: user612 +uid: uid612 +givenname: givenname612 +description: description612 +userPassword: password612 +mail: uid612 +uidnumber: 612 +gidnumber: 612 +homeDirectory: /home/uid612 + +dn: cn=user613,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user613 +sn: user613 +uid: uid613 +givenname: givenname613 +description: description613 +userPassword: password613 +mail: uid613 +uidnumber: 613 +gidnumber: 613 +homeDirectory: /home/uid613 + +dn: cn=user614,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user614 +sn: user614 +uid: uid614 +givenname: givenname614 +description: description614 +userPassword: password614 +mail: uid614 +uidnumber: 614 +gidnumber: 614 +homeDirectory: /home/uid614 + +dn: cn=user615,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user615 +sn: user615 +uid: uid615 +givenname: givenname615 +description: description615 +userPassword: password615 +mail: uid615 +uidnumber: 615 +gidnumber: 615 +homeDirectory: /home/uid615 + +dn: cn=user616,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user616 +sn: user616 +uid: uid616 +givenname: givenname616 +description: description616 +userPassword: password616 +mail: uid616 +uidnumber: 616 +gidnumber: 616 +homeDirectory: /home/uid616 + +dn: cn=user617,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user617 +sn: user617 +uid: uid617 +givenname: givenname617 +description: description617 +userPassword: password617 +mail: uid617 +uidnumber: 617 +gidnumber: 617 +homeDirectory: /home/uid617 + +dn: cn=user618,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user618 +sn: user618 +uid: uid618 +givenname: givenname618 +description: description618 +userPassword: password618 +mail: uid618 +uidnumber: 618 +gidnumber: 618 +homeDirectory: /home/uid618 + +dn: cn=user619,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user619 +sn: user619 +uid: uid619 +givenname: givenname619 +description: description619 +userPassword: password619 +mail: uid619 +uidnumber: 619 +gidnumber: 619 +homeDirectory: /home/uid619 + +dn: cn=user620,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user620 +sn: user620 +uid: uid620 +givenname: givenname620 +description: description620 +userPassword: password620 +mail: uid620 +uidnumber: 620 +gidnumber: 620 +homeDirectory: /home/uid620 + +dn: cn=user621,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user621 +sn: user621 +uid: uid621 +givenname: givenname621 +description: description621 +userPassword: password621 +mail: uid621 +uidnumber: 621 +gidnumber: 621 +homeDirectory: /home/uid621 + +dn: cn=user622,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user622 +sn: user622 +uid: uid622 +givenname: givenname622 +description: description622 +userPassword: password622 +mail: uid622 +uidnumber: 622 +gidnumber: 622 +homeDirectory: /home/uid622 + +dn: cn=user623,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user623 +sn: user623 +uid: uid623 +givenname: givenname623 +description: description623 +userPassword: password623 +mail: uid623 +uidnumber: 623 +gidnumber: 623 +homeDirectory: /home/uid623 + +dn: cn=user624,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user624 +sn: user624 +uid: uid624 +givenname: givenname624 +description: description624 +userPassword: password624 +mail: uid624 +uidnumber: 624 +gidnumber: 624 +homeDirectory: /home/uid624 + +dn: cn=user625,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user625 +sn: user625 +uid: uid625 +givenname: givenname625 +description: description625 +userPassword: password625 +mail: uid625 +uidnumber: 625 +gidnumber: 625 +homeDirectory: /home/uid625 + +dn: cn=user626,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user626 +sn: user626 +uid: uid626 +givenname: givenname626 +description: description626 +userPassword: password626 +mail: uid626 +uidnumber: 626 +gidnumber: 626 +homeDirectory: /home/uid626 + +dn: cn=user627,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user627 +sn: user627 +uid: uid627 +givenname: givenname627 +description: description627 +userPassword: password627 +mail: uid627 +uidnumber: 627 +gidnumber: 627 +homeDirectory: /home/uid627 + +dn: cn=user628,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user628 +sn: user628 +uid: uid628 +givenname: givenname628 +description: description628 +userPassword: password628 +mail: uid628 +uidnumber: 628 +gidnumber: 628 +homeDirectory: /home/uid628 + +dn: cn=user629,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user629 +sn: user629 +uid: uid629 +givenname: givenname629 +description: description629 +userPassword: password629 +mail: uid629 +uidnumber: 629 +gidnumber: 629 +homeDirectory: /home/uid629 + +dn: cn=user630,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user630 +sn: user630 +uid: uid630 +givenname: givenname630 +description: description630 +userPassword: password630 +mail: uid630 +uidnumber: 630 +gidnumber: 630 +homeDirectory: /home/uid630 + +dn: cn=user631,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user631 +sn: user631 +uid: uid631 +givenname: givenname631 +description: description631 +userPassword: password631 +mail: uid631 +uidnumber: 631 +gidnumber: 631 +homeDirectory: /home/uid631 + +dn: cn=user632,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user632 +sn: user632 +uid: uid632 +givenname: givenname632 +description: description632 +userPassword: password632 +mail: uid632 +uidnumber: 632 +gidnumber: 632 +homeDirectory: /home/uid632 + +dn: cn=user633,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user633 +sn: user633 +uid: uid633 +givenname: givenname633 +description: description633 +userPassword: password633 +mail: uid633 +uidnumber: 633 +gidnumber: 633 +homeDirectory: /home/uid633 + +dn: cn=user634,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user634 +sn: user634 +uid: uid634 +givenname: givenname634 +description: description634 +userPassword: password634 +mail: uid634 +uidnumber: 634 +gidnumber: 634 +homeDirectory: /home/uid634 + +dn: cn=user635,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user635 +sn: user635 +uid: uid635 +givenname: givenname635 +description: description635 +userPassword: password635 +mail: uid635 +uidnumber: 635 +gidnumber: 635 +homeDirectory: /home/uid635 + +dn: cn=user636,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user636 +sn: user636 +uid: uid636 +givenname: givenname636 +description: description636 +userPassword: password636 +mail: uid636 +uidnumber: 636 +gidnumber: 636 +homeDirectory: /home/uid636 + +dn: cn=user637,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user637 +sn: user637 +uid: uid637 +givenname: givenname637 +description: description637 +userPassword: password637 +mail: uid637 +uidnumber: 637 +gidnumber: 637 +homeDirectory: /home/uid637 + +dn: cn=user638,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user638 +sn: user638 +uid: uid638 +givenname: givenname638 +description: description638 +userPassword: password638 +mail: uid638 +uidnumber: 638 +gidnumber: 638 +homeDirectory: /home/uid638 + +dn: cn=user639,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user639 +sn: user639 +uid: uid639 +givenname: givenname639 +description: description639 +userPassword: password639 +mail: uid639 +uidnumber: 639 +gidnumber: 639 +homeDirectory: /home/uid639 + +dn: cn=user640,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user640 +sn: user640 +uid: uid640 +givenname: givenname640 +description: description640 +userPassword: password640 +mail: uid640 +uidnumber: 640 +gidnumber: 640 +homeDirectory: /home/uid640 + +dn: cn=user641,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user641 +sn: user641 +uid: uid641 +givenname: givenname641 +description: description641 +userPassword: password641 +mail: uid641 +uidnumber: 641 +gidnumber: 641 +homeDirectory: /home/uid641 + +dn: cn=user642,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user642 +sn: user642 +uid: uid642 +givenname: givenname642 +description: description642 +userPassword: password642 +mail: uid642 +uidnumber: 642 +gidnumber: 642 +homeDirectory: /home/uid642 + +dn: cn=user643,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user643 +sn: user643 +uid: uid643 +givenname: givenname643 +description: description643 +userPassword: password643 +mail: uid643 +uidnumber: 643 +gidnumber: 643 +homeDirectory: /home/uid643 + +dn: cn=user644,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user644 +sn: user644 +uid: uid644 +givenname: givenname644 +description: description644 +userPassword: password644 +mail: uid644 +uidnumber: 644 +gidnumber: 644 +homeDirectory: /home/uid644 + +dn: cn=user645,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user645 +sn: user645 +uid: uid645 +givenname: givenname645 +description: description645 +userPassword: password645 +mail: uid645 +uidnumber: 645 +gidnumber: 645 +homeDirectory: /home/uid645 + +dn: cn=user646,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user646 +sn: user646 +uid: uid646 +givenname: givenname646 +description: description646 +userPassword: password646 +mail: uid646 +uidnumber: 646 +gidnumber: 646 +homeDirectory: /home/uid646 + +dn: cn=user647,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user647 +sn: user647 +uid: uid647 +givenname: givenname647 +description: description647 +userPassword: password647 +mail: uid647 +uidnumber: 647 +gidnumber: 647 +homeDirectory: /home/uid647 + +dn: cn=user648,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user648 +sn: user648 +uid: uid648 +givenname: givenname648 +description: description648 +userPassword: password648 +mail: uid648 +uidnumber: 648 +gidnumber: 648 +homeDirectory: /home/uid648 + +dn: cn=user649,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user649 +sn: user649 +uid: uid649 +givenname: givenname649 +description: description649 +userPassword: password649 +mail: uid649 +uidnumber: 649 +gidnumber: 649 +homeDirectory: /home/uid649 + +dn: cn=user650,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user650 +sn: user650 +uid: uid650 +givenname: givenname650 +description: description650 +userPassword: password650 +mail: uid650 +uidnumber: 650 +gidnumber: 650 +homeDirectory: /home/uid650 + +dn: cn=user651,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user651 +sn: user651 +uid: uid651 +givenname: givenname651 +description: description651 +userPassword: password651 +mail: uid651 +uidnumber: 651 +gidnumber: 651 +homeDirectory: /home/uid651 + +dn: cn=user652,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user652 +sn: user652 +uid: uid652 +givenname: givenname652 +description: description652 +userPassword: password652 +mail: uid652 +uidnumber: 652 +gidnumber: 652 +homeDirectory: /home/uid652 + +dn: cn=user653,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user653 +sn: user653 +uid: uid653 +givenname: givenname653 +description: description653 +userPassword: password653 +mail: uid653 +uidnumber: 653 +gidnumber: 653 +homeDirectory: /home/uid653 + +dn: cn=user654,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user654 +sn: user654 +uid: uid654 +givenname: givenname654 +description: description654 +userPassword: password654 +mail: uid654 +uidnumber: 654 +gidnumber: 654 +homeDirectory: /home/uid654 + +dn: cn=user655,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user655 +sn: user655 +uid: uid655 +givenname: givenname655 +description: description655 +userPassword: password655 +mail: uid655 +uidnumber: 655 +gidnumber: 655 +homeDirectory: /home/uid655 + +dn: cn=user656,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user656 +sn: user656 +uid: uid656 +givenname: givenname656 +description: description656 +userPassword: password656 +mail: uid656 +uidnumber: 656 +gidnumber: 656 +homeDirectory: /home/uid656 + +dn: cn=user657,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user657 +sn: user657 +uid: uid657 +givenname: givenname657 +description: description657 +userPassword: password657 +mail: uid657 +uidnumber: 657 +gidnumber: 657 +homeDirectory: /home/uid657 + +dn: cn=user658,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user658 +sn: user658 +uid: uid658 +givenname: givenname658 +description: description658 +userPassword: password658 +mail: uid658 +uidnumber: 658 +gidnumber: 658 +homeDirectory: /home/uid658 + +dn: cn=user659,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user659 +sn: user659 +uid: uid659 +givenname: givenname659 +description: description659 +userPassword: password659 +mail: uid659 +uidnumber: 659 +gidnumber: 659 +homeDirectory: /home/uid659 + +dn: cn=user660,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user660 +sn: user660 +uid: uid660 +givenname: givenname660 +description: description660 +userPassword: password660 +mail: uid660 +uidnumber: 660 +gidnumber: 660 +homeDirectory: /home/uid660 + +dn: cn=user661,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user661 +sn: user661 +uid: uid661 +givenname: givenname661 +description: description661 +userPassword: password661 +mail: uid661 +uidnumber: 661 +gidnumber: 661 +homeDirectory: /home/uid661 + +dn: cn=user662,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user662 +sn: user662 +uid: uid662 +givenname: givenname662 +description: description662 +userPassword: password662 +mail: uid662 +uidnumber: 662 +gidnumber: 662 +homeDirectory: /home/uid662 + +dn: cn=user663,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user663 +sn: user663 +uid: uid663 +givenname: givenname663 +description: description663 +userPassword: password663 +mail: uid663 +uidnumber: 663 +gidnumber: 663 +homeDirectory: /home/uid663 + +dn: cn=user664,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user664 +sn: user664 +uid: uid664 +givenname: givenname664 +description: description664 +userPassword: password664 +mail: uid664 +uidnumber: 664 +gidnumber: 664 +homeDirectory: /home/uid664 + +dn: cn=user665,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user665 +sn: user665 +uid: uid665 +givenname: givenname665 +description: description665 +userPassword: password665 +mail: uid665 +uidnumber: 665 +gidnumber: 665 +homeDirectory: /home/uid665 + +dn: cn=user666,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user666 +sn: user666 +uid: uid666 +givenname: givenname666 +description: description666 +userPassword: password666 +mail: uid666 +uidnumber: 666 +gidnumber: 666 +homeDirectory: /home/uid666 + +dn: cn=user667,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user667 +sn: user667 +uid: uid667 +givenname: givenname667 +description: description667 +userPassword: password667 +mail: uid667 +uidnumber: 667 +gidnumber: 667 +homeDirectory: /home/uid667 + +dn: cn=user668,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user668 +sn: user668 +uid: uid668 +givenname: givenname668 +description: description668 +userPassword: password668 +mail: uid668 +uidnumber: 668 +gidnumber: 668 +homeDirectory: /home/uid668 + +dn: cn=user669,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user669 +sn: user669 +uid: uid669 +givenname: givenname669 +description: description669 +userPassword: password669 +mail: uid669 +uidnumber: 669 +gidnumber: 669 +homeDirectory: /home/uid669 + +dn: cn=user670,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user670 +sn: user670 +uid: uid670 +givenname: givenname670 +description: description670 +userPassword: password670 +mail: uid670 +uidnumber: 670 +gidnumber: 670 +homeDirectory: /home/uid670 + +dn: cn=user671,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user671 +sn: user671 +uid: uid671 +givenname: givenname671 +description: description671 +userPassword: password671 +mail: uid671 +uidnumber: 671 +gidnumber: 671 +homeDirectory: /home/uid671 + +dn: cn=user672,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user672 +sn: user672 +uid: uid672 +givenname: givenname672 +description: description672 +userPassword: password672 +mail: uid672 +uidnumber: 672 +gidnumber: 672 +homeDirectory: /home/uid672 + +dn: cn=user673,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user673 +sn: user673 +uid: uid673 +givenname: givenname673 +description: description673 +userPassword: password673 +mail: uid673 +uidnumber: 673 +gidnumber: 673 +homeDirectory: /home/uid673 + +dn: cn=user674,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user674 +sn: user674 +uid: uid674 +givenname: givenname674 +description: description674 +userPassword: password674 +mail: uid674 +uidnumber: 674 +gidnumber: 674 +homeDirectory: /home/uid674 + +dn: cn=user675,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user675 +sn: user675 +uid: uid675 +givenname: givenname675 +description: description675 +userPassword: password675 +mail: uid675 +uidnumber: 675 +gidnumber: 675 +homeDirectory: /home/uid675 + +dn: cn=user676,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user676 +sn: user676 +uid: uid676 +givenname: givenname676 +description: description676 +userPassword: password676 +mail: uid676 +uidnumber: 676 +gidnumber: 676 +homeDirectory: /home/uid676 + +dn: cn=user677,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user677 +sn: user677 +uid: uid677 +givenname: givenname677 +description: description677 +userPassword: password677 +mail: uid677 +uidnumber: 677 +gidnumber: 677 +homeDirectory: /home/uid677 + +dn: cn=user678,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user678 +sn: user678 +uid: uid678 +givenname: givenname678 +description: description678 +userPassword: password678 +mail: uid678 +uidnumber: 678 +gidnumber: 678 +homeDirectory: /home/uid678 + +dn: cn=user679,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user679 +sn: user679 +uid: uid679 +givenname: givenname679 +description: description679 +userPassword: password679 +mail: uid679 +uidnumber: 679 +gidnumber: 679 +homeDirectory: /home/uid679 + +dn: cn=user680,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user680 +sn: user680 +uid: uid680 +givenname: givenname680 +description: description680 +userPassword: password680 +mail: uid680 +uidnumber: 680 +gidnumber: 680 +homeDirectory: /home/uid680 + +dn: cn=user681,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user681 +sn: user681 +uid: uid681 +givenname: givenname681 +description: description681 +userPassword: password681 +mail: uid681 +uidnumber: 681 +gidnumber: 681 +homeDirectory: /home/uid681 + +dn: cn=user682,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user682 +sn: user682 +uid: uid682 +givenname: givenname682 +description: description682 +userPassword: password682 +mail: uid682 +uidnumber: 682 +gidnumber: 682 +homeDirectory: /home/uid682 + +dn: cn=user683,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user683 +sn: user683 +uid: uid683 +givenname: givenname683 +description: description683 +userPassword: password683 +mail: uid683 +uidnumber: 683 +gidnumber: 683 +homeDirectory: /home/uid683 + +dn: cn=user684,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user684 +sn: user684 +uid: uid684 +givenname: givenname684 +description: description684 +userPassword: password684 +mail: uid684 +uidnumber: 684 +gidnumber: 684 +homeDirectory: /home/uid684 + +dn: cn=user685,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user685 +sn: user685 +uid: uid685 +givenname: givenname685 +description: description685 +userPassword: password685 +mail: uid685 +uidnumber: 685 +gidnumber: 685 +homeDirectory: /home/uid685 + +dn: cn=user686,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user686 +sn: user686 +uid: uid686 +givenname: givenname686 +description: description686 +userPassword: password686 +mail: uid686 +uidnumber: 686 +gidnumber: 686 +homeDirectory: /home/uid686 + +dn: cn=user687,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user687 +sn: user687 +uid: uid687 +givenname: givenname687 +description: description687 +userPassword: password687 +mail: uid687 +uidnumber: 687 +gidnumber: 687 +homeDirectory: /home/uid687 + +dn: cn=user688,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user688 +sn: user688 +uid: uid688 +givenname: givenname688 +description: description688 +userPassword: password688 +mail: uid688 +uidnumber: 688 +gidnumber: 688 +homeDirectory: /home/uid688 + +dn: cn=user689,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user689 +sn: user689 +uid: uid689 +givenname: givenname689 +description: description689 +userPassword: password689 +mail: uid689 +uidnumber: 689 +gidnumber: 689 +homeDirectory: /home/uid689 + +dn: cn=user690,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user690 +sn: user690 +uid: uid690 +givenname: givenname690 +description: description690 +userPassword: password690 +mail: uid690 +uidnumber: 690 +gidnumber: 690 +homeDirectory: /home/uid690 + +dn: cn=user691,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user691 +sn: user691 +uid: uid691 +givenname: givenname691 +description: description691 +userPassword: password691 +mail: uid691 +uidnumber: 691 +gidnumber: 691 +homeDirectory: /home/uid691 + +dn: cn=user692,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user692 +sn: user692 +uid: uid692 +givenname: givenname692 +description: description692 +userPassword: password692 +mail: uid692 +uidnumber: 692 +gidnumber: 692 +homeDirectory: /home/uid692 + +dn: cn=user693,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user693 +sn: user693 +uid: uid693 +givenname: givenname693 +description: description693 +userPassword: password693 +mail: uid693 +uidnumber: 693 +gidnumber: 693 +homeDirectory: /home/uid693 + +dn: cn=user694,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user694 +sn: user694 +uid: uid694 +givenname: givenname694 +description: description694 +userPassword: password694 +mail: uid694 +uidnumber: 694 +gidnumber: 694 +homeDirectory: /home/uid694 + +dn: cn=user695,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user695 +sn: user695 +uid: uid695 +givenname: givenname695 +description: description695 +userPassword: password695 +mail: uid695 +uidnumber: 695 +gidnumber: 695 +homeDirectory: /home/uid695 + +dn: cn=user696,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user696 +sn: user696 +uid: uid696 +givenname: givenname696 +description: description696 +userPassword: password696 +mail: uid696 +uidnumber: 696 +gidnumber: 696 +homeDirectory: /home/uid696 + +dn: cn=user697,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user697 +sn: user697 +uid: uid697 +givenname: givenname697 +description: description697 +userPassword: password697 +mail: uid697 +uidnumber: 697 +gidnumber: 697 +homeDirectory: /home/uid697 + +dn: cn=user698,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user698 +sn: user698 +uid: uid698 +givenname: givenname698 +description: description698 +userPassword: password698 +mail: uid698 +uidnumber: 698 +gidnumber: 698 +homeDirectory: /home/uid698 + +dn: cn=user699,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user699 +sn: user699 +uid: uid699 +givenname: givenname699 +description: description699 +userPassword: password699 +mail: uid699 +uidnumber: 699 +gidnumber: 699 +homeDirectory: /home/uid699 + +dn: cn=user700,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user700 +sn: user700 +uid: uid700 +givenname: givenname700 +description: description700 +userPassword: password700 +mail: uid700 +uidnumber: 700 +gidnumber: 700 +homeDirectory: /home/uid700 + +dn: cn=user701,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user701 +sn: user701 +uid: uid701 +givenname: givenname701 +description: description701 +userPassword: password701 +mail: uid701 +uidnumber: 701 +gidnumber: 701 +homeDirectory: /home/uid701 + +dn: cn=user702,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user702 +sn: user702 +uid: uid702 +givenname: givenname702 +description: description702 +userPassword: password702 +mail: uid702 +uidnumber: 702 +gidnumber: 702 +homeDirectory: /home/uid702 + +dn: cn=user703,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user703 +sn: user703 +uid: uid703 +givenname: givenname703 +description: description703 +userPassword: password703 +mail: uid703 +uidnumber: 703 +gidnumber: 703 +homeDirectory: /home/uid703 + +dn: cn=user704,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user704 +sn: user704 +uid: uid704 +givenname: givenname704 +description: description704 +userPassword: password704 +mail: uid704 +uidnumber: 704 +gidnumber: 704 +homeDirectory: /home/uid704 + +dn: cn=user705,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user705 +sn: user705 +uid: uid705 +givenname: givenname705 +description: description705 +userPassword: password705 +mail: uid705 +uidnumber: 705 +gidnumber: 705 +homeDirectory: /home/uid705 + +dn: cn=user706,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user706 +sn: user706 +uid: uid706 +givenname: givenname706 +description: description706 +userPassword: password706 +mail: uid706 +uidnumber: 706 +gidnumber: 706 +homeDirectory: /home/uid706 + +dn: cn=user707,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user707 +sn: user707 +uid: uid707 +givenname: givenname707 +description: description707 +userPassword: password707 +mail: uid707 +uidnumber: 707 +gidnumber: 707 +homeDirectory: /home/uid707 + +dn: cn=user708,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user708 +sn: user708 +uid: uid708 +givenname: givenname708 +description: description708 +userPassword: password708 +mail: uid708 +uidnumber: 708 +gidnumber: 708 +homeDirectory: /home/uid708 + +dn: cn=user709,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user709 +sn: user709 +uid: uid709 +givenname: givenname709 +description: description709 +userPassword: password709 +mail: uid709 +uidnumber: 709 +gidnumber: 709 +homeDirectory: /home/uid709 + +dn: cn=user710,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user710 +sn: user710 +uid: uid710 +givenname: givenname710 +description: description710 +userPassword: password710 +mail: uid710 +uidnumber: 710 +gidnumber: 710 +homeDirectory: /home/uid710 + +dn: cn=user711,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user711 +sn: user711 +uid: uid711 +givenname: givenname711 +description: description711 +userPassword: password711 +mail: uid711 +uidnumber: 711 +gidnumber: 711 +homeDirectory: /home/uid711 + +dn: cn=user712,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user712 +sn: user712 +uid: uid712 +givenname: givenname712 +description: description712 +userPassword: password712 +mail: uid712 +uidnumber: 712 +gidnumber: 712 +homeDirectory: /home/uid712 + +dn: cn=user713,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user713 +sn: user713 +uid: uid713 +givenname: givenname713 +description: description713 +userPassword: password713 +mail: uid713 +uidnumber: 713 +gidnumber: 713 +homeDirectory: /home/uid713 + +dn: cn=user714,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user714 +sn: user714 +uid: uid714 +givenname: givenname714 +description: description714 +userPassword: password714 +mail: uid714 +uidnumber: 714 +gidnumber: 714 +homeDirectory: /home/uid714 + +dn: cn=user715,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user715 +sn: user715 +uid: uid715 +givenname: givenname715 +description: description715 +userPassword: password715 +mail: uid715 +uidnumber: 715 +gidnumber: 715 +homeDirectory: /home/uid715 + +dn: cn=user716,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user716 +sn: user716 +uid: uid716 +givenname: givenname716 +description: description716 +userPassword: password716 +mail: uid716 +uidnumber: 716 +gidnumber: 716 +homeDirectory: /home/uid716 + +dn: cn=user717,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user717 +sn: user717 +uid: uid717 +givenname: givenname717 +description: description717 +userPassword: password717 +mail: uid717 +uidnumber: 717 +gidnumber: 717 +homeDirectory: /home/uid717 + +dn: cn=user718,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user718 +sn: user718 +uid: uid718 +givenname: givenname718 +description: description718 +userPassword: password718 +mail: uid718 +uidnumber: 718 +gidnumber: 718 +homeDirectory: /home/uid718 + +dn: cn=user719,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user719 +sn: user719 +uid: uid719 +givenname: givenname719 +description: description719 +userPassword: password719 +mail: uid719 +uidnumber: 719 +gidnumber: 719 +homeDirectory: /home/uid719 + +dn: cn=user720,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user720 +sn: user720 +uid: uid720 +givenname: givenname720 +description: description720 +userPassword: password720 +mail: uid720 +uidnumber: 720 +gidnumber: 720 +homeDirectory: /home/uid720 + +dn: cn=user721,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user721 +sn: user721 +uid: uid721 +givenname: givenname721 +description: description721 +userPassword: password721 +mail: uid721 +uidnumber: 721 +gidnumber: 721 +homeDirectory: /home/uid721 + +dn: cn=user722,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user722 +sn: user722 +uid: uid722 +givenname: givenname722 +description: description722 +userPassword: password722 +mail: uid722 +uidnumber: 722 +gidnumber: 722 +homeDirectory: /home/uid722 + +dn: cn=user723,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user723 +sn: user723 +uid: uid723 +givenname: givenname723 +description: description723 +userPassword: password723 +mail: uid723 +uidnumber: 723 +gidnumber: 723 +homeDirectory: /home/uid723 + +dn: cn=user724,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user724 +sn: user724 +uid: uid724 +givenname: givenname724 +description: description724 +userPassword: password724 +mail: uid724 +uidnumber: 724 +gidnumber: 724 +homeDirectory: /home/uid724 + +dn: cn=user725,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user725 +sn: user725 +uid: uid725 +givenname: givenname725 +description: description725 +userPassword: password725 +mail: uid725 +uidnumber: 725 +gidnumber: 725 +homeDirectory: /home/uid725 + +dn: cn=user726,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user726 +sn: user726 +uid: uid726 +givenname: givenname726 +description: description726 +userPassword: password726 +mail: uid726 +uidnumber: 726 +gidnumber: 726 +homeDirectory: /home/uid726 + +dn: cn=user727,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user727 +sn: user727 +uid: uid727 +givenname: givenname727 +description: description727 +userPassword: password727 +mail: uid727 +uidnumber: 727 +gidnumber: 727 +homeDirectory: /home/uid727 + +dn: cn=user728,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user728 +sn: user728 +uid: uid728 +givenname: givenname728 +description: description728 +userPassword: password728 +mail: uid728 +uidnumber: 728 +gidnumber: 728 +homeDirectory: /home/uid728 + +dn: cn=user729,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user729 +sn: user729 +uid: uid729 +givenname: givenname729 +description: description729 +userPassword: password729 +mail: uid729 +uidnumber: 729 +gidnumber: 729 +homeDirectory: /home/uid729 + +dn: cn=user730,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user730 +sn: user730 +uid: uid730 +givenname: givenname730 +description: description730 +userPassword: password730 +mail: uid730 +uidnumber: 730 +gidnumber: 730 +homeDirectory: /home/uid730 + +dn: cn=user731,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user731 +sn: user731 +uid: uid731 +givenname: givenname731 +description: description731 +userPassword: password731 +mail: uid731 +uidnumber: 731 +gidnumber: 731 +homeDirectory: /home/uid731 + +dn: cn=user732,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user732 +sn: user732 +uid: uid732 +givenname: givenname732 +description: description732 +userPassword: password732 +mail: uid732 +uidnumber: 732 +gidnumber: 732 +homeDirectory: /home/uid732 + +dn: cn=user733,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user733 +sn: user733 +uid: uid733 +givenname: givenname733 +description: description733 +userPassword: password733 +mail: uid733 +uidnumber: 733 +gidnumber: 733 +homeDirectory: /home/uid733 + +dn: cn=user734,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user734 +sn: user734 +uid: uid734 +givenname: givenname734 +description: description734 +userPassword: password734 +mail: uid734 +uidnumber: 734 +gidnumber: 734 +homeDirectory: /home/uid734 + +dn: cn=user735,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user735 +sn: user735 +uid: uid735 +givenname: givenname735 +description: description735 +userPassword: password735 +mail: uid735 +uidnumber: 735 +gidnumber: 735 +homeDirectory: /home/uid735 + +dn: cn=user736,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user736 +sn: user736 +uid: uid736 +givenname: givenname736 +description: description736 +userPassword: password736 +mail: uid736 +uidnumber: 736 +gidnumber: 736 +homeDirectory: /home/uid736 + +dn: cn=user737,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user737 +sn: user737 +uid: uid737 +givenname: givenname737 +description: description737 +userPassword: password737 +mail: uid737 +uidnumber: 737 +gidnumber: 737 +homeDirectory: /home/uid737 + +dn: cn=user738,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user738 +sn: user738 +uid: uid738 +givenname: givenname738 +description: description738 +userPassword: password738 +mail: uid738 +uidnumber: 738 +gidnumber: 738 +homeDirectory: /home/uid738 + +dn: cn=user739,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user739 +sn: user739 +uid: uid739 +givenname: givenname739 +description: description739 +userPassword: password739 +mail: uid739 +uidnumber: 739 +gidnumber: 739 +homeDirectory: /home/uid739 + +dn: cn=user740,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user740 +sn: user740 +uid: uid740 +givenname: givenname740 +description: description740 +userPassword: password740 +mail: uid740 +uidnumber: 740 +gidnumber: 740 +homeDirectory: /home/uid740 + +dn: cn=user741,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user741 +sn: user741 +uid: uid741 +givenname: givenname741 +description: description741 +userPassword: password741 +mail: uid741 +uidnumber: 741 +gidnumber: 741 +homeDirectory: /home/uid741 + +dn: cn=user742,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user742 +sn: user742 +uid: uid742 +givenname: givenname742 +description: description742 +userPassword: password742 +mail: uid742 +uidnumber: 742 +gidnumber: 742 +homeDirectory: /home/uid742 + +dn: cn=user743,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user743 +sn: user743 +uid: uid743 +givenname: givenname743 +description: description743 +userPassword: password743 +mail: uid743 +uidnumber: 743 +gidnumber: 743 +homeDirectory: /home/uid743 + +dn: cn=user744,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user744 +sn: user744 +uid: uid744 +givenname: givenname744 +description: description744 +userPassword: password744 +mail: uid744 +uidnumber: 744 +gidnumber: 744 +homeDirectory: /home/uid744 + +dn: cn=user745,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user745 +sn: user745 +uid: uid745 +givenname: givenname745 +description: description745 +userPassword: password745 +mail: uid745 +uidnumber: 745 +gidnumber: 745 +homeDirectory: /home/uid745 + +dn: cn=user746,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user746 +sn: user746 +uid: uid746 +givenname: givenname746 +description: description746 +userPassword: password746 +mail: uid746 +uidnumber: 746 +gidnumber: 746 +homeDirectory: /home/uid746 + +dn: cn=user747,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user747 +sn: user747 +uid: uid747 +givenname: givenname747 +description: description747 +userPassword: password747 +mail: uid747 +uidnumber: 747 +gidnumber: 747 +homeDirectory: /home/uid747 + +dn: cn=user748,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user748 +sn: user748 +uid: uid748 +givenname: givenname748 +description: description748 +userPassword: password748 +mail: uid748 +uidnumber: 748 +gidnumber: 748 +homeDirectory: /home/uid748 + +dn: cn=user749,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user749 +sn: user749 +uid: uid749 +givenname: givenname749 +description: description749 +userPassword: password749 +mail: uid749 +uidnumber: 749 +gidnumber: 749 +homeDirectory: /home/uid749 + +dn: cn=user750,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user750 +sn: user750 +uid: uid750 +givenname: givenname750 +description: description750 +userPassword: password750 +mail: uid750 +uidnumber: 750 +gidnumber: 750 +homeDirectory: /home/uid750 + +dn: cn=user751,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user751 +sn: user751 +uid: uid751 +givenname: givenname751 +description: description751 +userPassword: password751 +mail: uid751 +uidnumber: 751 +gidnumber: 751 +homeDirectory: /home/uid751 + +dn: cn=user752,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user752 +sn: user752 +uid: uid752 +givenname: givenname752 +description: description752 +userPassword: password752 +mail: uid752 +uidnumber: 752 +gidnumber: 752 +homeDirectory: /home/uid752 + +dn: cn=user753,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user753 +sn: user753 +uid: uid753 +givenname: givenname753 +description: description753 +userPassword: password753 +mail: uid753 +uidnumber: 753 +gidnumber: 753 +homeDirectory: /home/uid753 + +dn: cn=user754,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user754 +sn: user754 +uid: uid754 +givenname: givenname754 +description: description754 +userPassword: password754 +mail: uid754 +uidnumber: 754 +gidnumber: 754 +homeDirectory: /home/uid754 + +dn: cn=user755,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user755 +sn: user755 +uid: uid755 +givenname: givenname755 +description: description755 +userPassword: password755 +mail: uid755 +uidnumber: 755 +gidnumber: 755 +homeDirectory: /home/uid755 + +dn: cn=user756,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user756 +sn: user756 +uid: uid756 +givenname: givenname756 +description: description756 +userPassword: password756 +mail: uid756 +uidnumber: 756 +gidnumber: 756 +homeDirectory: /home/uid756 + +dn: cn=user757,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user757 +sn: user757 +uid: uid757 +givenname: givenname757 +description: description757 +userPassword: password757 +mail: uid757 +uidnumber: 757 +gidnumber: 757 +homeDirectory: /home/uid757 + +dn: cn=user758,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user758 +sn: user758 +uid: uid758 +givenname: givenname758 +description: description758 +userPassword: password758 +mail: uid758 +uidnumber: 758 +gidnumber: 758 +homeDirectory: /home/uid758 + +dn: cn=user759,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user759 +sn: user759 +uid: uid759 +givenname: givenname759 +description: description759 +userPassword: password759 +mail: uid759 +uidnumber: 759 +gidnumber: 759 +homeDirectory: /home/uid759 + +dn: cn=user760,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user760 +sn: user760 +uid: uid760 +givenname: givenname760 +description: description760 +userPassword: password760 +mail: uid760 +uidnumber: 760 +gidnumber: 760 +homeDirectory: /home/uid760 + +dn: cn=user761,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user761 +sn: user761 +uid: uid761 +givenname: givenname761 +description: description761 +userPassword: password761 +mail: uid761 +uidnumber: 761 +gidnumber: 761 +homeDirectory: /home/uid761 + +dn: cn=user762,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user762 +sn: user762 +uid: uid762 +givenname: givenname762 +description: description762 +userPassword: password762 +mail: uid762 +uidnumber: 762 +gidnumber: 762 +homeDirectory: /home/uid762 + +dn: cn=user763,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user763 +sn: user763 +uid: uid763 +givenname: givenname763 +description: description763 +userPassword: password763 +mail: uid763 +uidnumber: 763 +gidnumber: 763 +homeDirectory: /home/uid763 + +dn: cn=user764,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user764 +sn: user764 +uid: uid764 +givenname: givenname764 +description: description764 +userPassword: password764 +mail: uid764 +uidnumber: 764 +gidnumber: 764 +homeDirectory: /home/uid764 + +dn: cn=user765,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user765 +sn: user765 +uid: uid765 +givenname: givenname765 +description: description765 +userPassword: password765 +mail: uid765 +uidnumber: 765 +gidnumber: 765 +homeDirectory: /home/uid765 + +dn: cn=user766,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user766 +sn: user766 +uid: uid766 +givenname: givenname766 +description: description766 +userPassword: password766 +mail: uid766 +uidnumber: 766 +gidnumber: 766 +homeDirectory: /home/uid766 + +dn: cn=user767,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user767 +sn: user767 +uid: uid767 +givenname: givenname767 +description: description767 +userPassword: password767 +mail: uid767 +uidnumber: 767 +gidnumber: 767 +homeDirectory: /home/uid767 + +dn: cn=user768,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user768 +sn: user768 +uid: uid768 +givenname: givenname768 +description: description768 +userPassword: password768 +mail: uid768 +uidnumber: 768 +gidnumber: 768 +homeDirectory: /home/uid768 + +dn: cn=user769,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user769 +sn: user769 +uid: uid769 +givenname: givenname769 +description: description769 +userPassword: password769 +mail: uid769 +uidnumber: 769 +gidnumber: 769 +homeDirectory: /home/uid769 + +dn: cn=user770,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user770 +sn: user770 +uid: uid770 +givenname: givenname770 +description: description770 +userPassword: password770 +mail: uid770 +uidnumber: 770 +gidnumber: 770 +homeDirectory: /home/uid770 + +dn: cn=user771,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user771 +sn: user771 +uid: uid771 +givenname: givenname771 +description: description771 +userPassword: password771 +mail: uid771 +uidnumber: 771 +gidnumber: 771 +homeDirectory: /home/uid771 + +dn: cn=user772,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user772 +sn: user772 +uid: uid772 +givenname: givenname772 +description: description772 +userPassword: password772 +mail: uid772 +uidnumber: 772 +gidnumber: 772 +homeDirectory: /home/uid772 + +dn: cn=user773,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user773 +sn: user773 +uid: uid773 +givenname: givenname773 +description: description773 +userPassword: password773 +mail: uid773 +uidnumber: 773 +gidnumber: 773 +homeDirectory: /home/uid773 + +dn: cn=user774,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user774 +sn: user774 +uid: uid774 +givenname: givenname774 +description: description774 +userPassword: password774 +mail: uid774 +uidnumber: 774 +gidnumber: 774 +homeDirectory: /home/uid774 + +dn: cn=user775,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user775 +sn: user775 +uid: uid775 +givenname: givenname775 +description: description775 +userPassword: password775 +mail: uid775 +uidnumber: 775 +gidnumber: 775 +homeDirectory: /home/uid775 + +dn: cn=user776,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user776 +sn: user776 +uid: uid776 +givenname: givenname776 +description: description776 +userPassword: password776 +mail: uid776 +uidnumber: 776 +gidnumber: 776 +homeDirectory: /home/uid776 + +dn: cn=user777,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user777 +sn: user777 +uid: uid777 +givenname: givenname777 +description: description777 +userPassword: password777 +mail: uid777 +uidnumber: 777 +gidnumber: 777 +homeDirectory: /home/uid777 + +dn: cn=user778,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user778 +sn: user778 +uid: uid778 +givenname: givenname778 +description: description778 +userPassword: password778 +mail: uid778 +uidnumber: 778 +gidnumber: 778 +homeDirectory: /home/uid778 + +dn: cn=user779,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user779 +sn: user779 +uid: uid779 +givenname: givenname779 +description: description779 +userPassword: password779 +mail: uid779 +uidnumber: 779 +gidnumber: 779 +homeDirectory: /home/uid779 + +dn: cn=user780,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user780 +sn: user780 +uid: uid780 +givenname: givenname780 +description: description780 +userPassword: password780 +mail: uid780 +uidnumber: 780 +gidnumber: 780 +homeDirectory: /home/uid780 + +dn: cn=user781,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user781 +sn: user781 +uid: uid781 +givenname: givenname781 +description: description781 +userPassword: password781 +mail: uid781 +uidnumber: 781 +gidnumber: 781 +homeDirectory: /home/uid781 + +dn: cn=user782,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user782 +sn: user782 +uid: uid782 +givenname: givenname782 +description: description782 +userPassword: password782 +mail: uid782 +uidnumber: 782 +gidnumber: 782 +homeDirectory: /home/uid782 + +dn: cn=user783,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user783 +sn: user783 +uid: uid783 +givenname: givenname783 +description: description783 +userPassword: password783 +mail: uid783 +uidnumber: 783 +gidnumber: 783 +homeDirectory: /home/uid783 + +dn: cn=user784,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user784 +sn: user784 +uid: uid784 +givenname: givenname784 +description: description784 +userPassword: password784 +mail: uid784 +uidnumber: 784 +gidnumber: 784 +homeDirectory: /home/uid784 + +dn: cn=user785,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user785 +sn: user785 +uid: uid785 +givenname: givenname785 +description: description785 +userPassword: password785 +mail: uid785 +uidnumber: 785 +gidnumber: 785 +homeDirectory: /home/uid785 + +dn: cn=user786,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user786 +sn: user786 +uid: uid786 +givenname: givenname786 +description: description786 +userPassword: password786 +mail: uid786 +uidnumber: 786 +gidnumber: 786 +homeDirectory: /home/uid786 + +dn: cn=user787,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user787 +sn: user787 +uid: uid787 +givenname: givenname787 +description: description787 +userPassword: password787 +mail: uid787 +uidnumber: 787 +gidnumber: 787 +homeDirectory: /home/uid787 + +dn: cn=user788,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user788 +sn: user788 +uid: uid788 +givenname: givenname788 +description: description788 +userPassword: password788 +mail: uid788 +uidnumber: 788 +gidnumber: 788 +homeDirectory: /home/uid788 + +dn: cn=user789,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user789 +sn: user789 +uid: uid789 +givenname: givenname789 +description: description789 +userPassword: password789 +mail: uid789 +uidnumber: 789 +gidnumber: 789 +homeDirectory: /home/uid789 + +dn: cn=user790,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user790 +sn: user790 +uid: uid790 +givenname: givenname790 +description: description790 +userPassword: password790 +mail: uid790 +uidnumber: 790 +gidnumber: 790 +homeDirectory: /home/uid790 + +dn: cn=user791,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user791 +sn: user791 +uid: uid791 +givenname: givenname791 +description: description791 +userPassword: password791 +mail: uid791 +uidnumber: 791 +gidnumber: 791 +homeDirectory: /home/uid791 + +dn: cn=user792,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user792 +sn: user792 +uid: uid792 +givenname: givenname792 +description: description792 +userPassword: password792 +mail: uid792 +uidnumber: 792 +gidnumber: 792 +homeDirectory: /home/uid792 + +dn: cn=user793,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user793 +sn: user793 +uid: uid793 +givenname: givenname793 +description: description793 +userPassword: password793 +mail: uid793 +uidnumber: 793 +gidnumber: 793 +homeDirectory: /home/uid793 + +dn: cn=user794,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user794 +sn: user794 +uid: uid794 +givenname: givenname794 +description: description794 +userPassword: password794 +mail: uid794 +uidnumber: 794 +gidnumber: 794 +homeDirectory: /home/uid794 + +dn: cn=user795,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user795 +sn: user795 +uid: uid795 +givenname: givenname795 +description: description795 +userPassword: password795 +mail: uid795 +uidnumber: 795 +gidnumber: 795 +homeDirectory: /home/uid795 + +dn: cn=user796,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user796 +sn: user796 +uid: uid796 +givenname: givenname796 +description: description796 +userPassword: password796 +mail: uid796 +uidnumber: 796 +gidnumber: 796 +homeDirectory: /home/uid796 + +dn: cn=user797,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user797 +sn: user797 +uid: uid797 +givenname: givenname797 +description: description797 +userPassword: password797 +mail: uid797 +uidnumber: 797 +gidnumber: 797 +homeDirectory: /home/uid797 + +dn: cn=user798,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user798 +sn: user798 +uid: uid798 +givenname: givenname798 +description: description798 +userPassword: password798 +mail: uid798 +uidnumber: 798 +gidnumber: 798 +homeDirectory: /home/uid798 + +dn: cn=user799,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user799 +sn: user799 +uid: uid799 +givenname: givenname799 +description: description799 +userPassword: password799 +mail: uid799 +uidnumber: 799 +gidnumber: 799 +homeDirectory: /home/uid799 + +dn: cn=user800,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user800 +sn: user800 +uid: uid800 +givenname: givenname800 +description: description800 +userPassword: password800 +mail: uid800 +uidnumber: 800 +gidnumber: 800 +homeDirectory: /home/uid800 + +dn: cn=user801,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user801 +sn: user801 +uid: uid801 +givenname: givenname801 +description: description801 +userPassword: password801 +mail: uid801 +uidnumber: 801 +gidnumber: 801 +homeDirectory: /home/uid801 + +dn: cn=user802,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user802 +sn: user802 +uid: uid802 +givenname: givenname802 +description: description802 +userPassword: password802 +mail: uid802 +uidnumber: 802 +gidnumber: 802 +homeDirectory: /home/uid802 + +dn: cn=user803,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user803 +sn: user803 +uid: uid803 +givenname: givenname803 +description: description803 +userPassword: password803 +mail: uid803 +uidnumber: 803 +gidnumber: 803 +homeDirectory: /home/uid803 + +dn: cn=user804,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user804 +sn: user804 +uid: uid804 +givenname: givenname804 +description: description804 +userPassword: password804 +mail: uid804 +uidnumber: 804 +gidnumber: 804 +homeDirectory: /home/uid804 + +dn: cn=user805,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user805 +sn: user805 +uid: uid805 +givenname: givenname805 +description: description805 +userPassword: password805 +mail: uid805 +uidnumber: 805 +gidnumber: 805 +homeDirectory: /home/uid805 + +dn: cn=user806,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user806 +sn: user806 +uid: uid806 +givenname: givenname806 +description: description806 +userPassword: password806 +mail: uid806 +uidnumber: 806 +gidnumber: 806 +homeDirectory: /home/uid806 + +dn: cn=user807,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user807 +sn: user807 +uid: uid807 +givenname: givenname807 +description: description807 +userPassword: password807 +mail: uid807 +uidnumber: 807 +gidnumber: 807 +homeDirectory: /home/uid807 + +dn: cn=user808,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user808 +sn: user808 +uid: uid808 +givenname: givenname808 +description: description808 +userPassword: password808 +mail: uid808 +uidnumber: 808 +gidnumber: 808 +homeDirectory: /home/uid808 + +dn: cn=user809,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user809 +sn: user809 +uid: uid809 +givenname: givenname809 +description: description809 +userPassword: password809 +mail: uid809 +uidnumber: 809 +gidnumber: 809 +homeDirectory: /home/uid809 + +dn: cn=user810,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user810 +sn: user810 +uid: uid810 +givenname: givenname810 +description: description810 +userPassword: password810 +mail: uid810 +uidnumber: 810 +gidnumber: 810 +homeDirectory: /home/uid810 + +dn: cn=user811,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user811 +sn: user811 +uid: uid811 +givenname: givenname811 +description: description811 +userPassword: password811 +mail: uid811 +uidnumber: 811 +gidnumber: 811 +homeDirectory: /home/uid811 + +dn: cn=user812,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user812 +sn: user812 +uid: uid812 +givenname: givenname812 +description: description812 +userPassword: password812 +mail: uid812 +uidnumber: 812 +gidnumber: 812 +homeDirectory: /home/uid812 + +dn: cn=user813,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user813 +sn: user813 +uid: uid813 +givenname: givenname813 +description: description813 +userPassword: password813 +mail: uid813 +uidnumber: 813 +gidnumber: 813 +homeDirectory: /home/uid813 + +dn: cn=user814,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user814 +sn: user814 +uid: uid814 +givenname: givenname814 +description: description814 +userPassword: password814 +mail: uid814 +uidnumber: 814 +gidnumber: 814 +homeDirectory: /home/uid814 + +dn: cn=user815,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user815 +sn: user815 +uid: uid815 +givenname: givenname815 +description: description815 +userPassword: password815 +mail: uid815 +uidnumber: 815 +gidnumber: 815 +homeDirectory: /home/uid815 + +dn: cn=user816,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user816 +sn: user816 +uid: uid816 +givenname: givenname816 +description: description816 +userPassword: password816 +mail: uid816 +uidnumber: 816 +gidnumber: 816 +homeDirectory: /home/uid816 + +dn: cn=user817,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user817 +sn: user817 +uid: uid817 +givenname: givenname817 +description: description817 +userPassword: password817 +mail: uid817 +uidnumber: 817 +gidnumber: 817 +homeDirectory: /home/uid817 + +dn: cn=user818,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user818 +sn: user818 +uid: uid818 +givenname: givenname818 +description: description818 +userPassword: password818 +mail: uid818 +uidnumber: 818 +gidnumber: 818 +homeDirectory: /home/uid818 + +dn: cn=user819,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user819 +sn: user819 +uid: uid819 +givenname: givenname819 +description: description819 +userPassword: password819 +mail: uid819 +uidnumber: 819 +gidnumber: 819 +homeDirectory: /home/uid819 + +dn: cn=user820,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user820 +sn: user820 +uid: uid820 +givenname: givenname820 +description: description820 +userPassword: password820 +mail: uid820 +uidnumber: 820 +gidnumber: 820 +homeDirectory: /home/uid820 + +dn: cn=user821,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user821 +sn: user821 +uid: uid821 +givenname: givenname821 +description: description821 +userPassword: password821 +mail: uid821 +uidnumber: 821 +gidnumber: 821 +homeDirectory: /home/uid821 + +dn: cn=user822,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user822 +sn: user822 +uid: uid822 +givenname: givenname822 +description: description822 +userPassword: password822 +mail: uid822 +uidnumber: 822 +gidnumber: 822 +homeDirectory: /home/uid822 + +dn: cn=user823,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user823 +sn: user823 +uid: uid823 +givenname: givenname823 +description: description823 +userPassword: password823 +mail: uid823 +uidnumber: 823 +gidnumber: 823 +homeDirectory: /home/uid823 + +dn: cn=user824,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user824 +sn: user824 +uid: uid824 +givenname: givenname824 +description: description824 +userPassword: password824 +mail: uid824 +uidnumber: 824 +gidnumber: 824 +homeDirectory: /home/uid824 + +dn: cn=user825,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user825 +sn: user825 +uid: uid825 +givenname: givenname825 +description: description825 +userPassword: password825 +mail: uid825 +uidnumber: 825 +gidnumber: 825 +homeDirectory: /home/uid825 + +dn: cn=user826,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user826 +sn: user826 +uid: uid826 +givenname: givenname826 +description: description826 +userPassword: password826 +mail: uid826 +uidnumber: 826 +gidnumber: 826 +homeDirectory: /home/uid826 + +dn: cn=user827,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user827 +sn: user827 +uid: uid827 +givenname: givenname827 +description: description827 +userPassword: password827 +mail: uid827 +uidnumber: 827 +gidnumber: 827 +homeDirectory: /home/uid827 + +dn: cn=user828,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user828 +sn: user828 +uid: uid828 +givenname: givenname828 +description: description828 +userPassword: password828 +mail: uid828 +uidnumber: 828 +gidnumber: 828 +homeDirectory: /home/uid828 + +dn: cn=user829,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user829 +sn: user829 +uid: uid829 +givenname: givenname829 +description: description829 +userPassword: password829 +mail: uid829 +uidnumber: 829 +gidnumber: 829 +homeDirectory: /home/uid829 + +dn: cn=user830,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user830 +sn: user830 +uid: uid830 +givenname: givenname830 +description: description830 +userPassword: password830 +mail: uid830 +uidnumber: 830 +gidnumber: 830 +homeDirectory: /home/uid830 + +dn: cn=user831,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user831 +sn: user831 +uid: uid831 +givenname: givenname831 +description: description831 +userPassword: password831 +mail: uid831 +uidnumber: 831 +gidnumber: 831 +homeDirectory: /home/uid831 + +dn: cn=user832,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user832 +sn: user832 +uid: uid832 +givenname: givenname832 +description: description832 +userPassword: password832 +mail: uid832 +uidnumber: 832 +gidnumber: 832 +homeDirectory: /home/uid832 + +dn: cn=user833,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user833 +sn: user833 +uid: uid833 +givenname: givenname833 +description: description833 +userPassword: password833 +mail: uid833 +uidnumber: 833 +gidnumber: 833 +homeDirectory: /home/uid833 + +dn: cn=user834,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user834 +sn: user834 +uid: uid834 +givenname: givenname834 +description: description834 +userPassword: password834 +mail: uid834 +uidnumber: 834 +gidnumber: 834 +homeDirectory: /home/uid834 + +dn: cn=user835,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user835 +sn: user835 +uid: uid835 +givenname: givenname835 +description: description835 +userPassword: password835 +mail: uid835 +uidnumber: 835 +gidnumber: 835 +homeDirectory: /home/uid835 + +dn: cn=user836,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user836 +sn: user836 +uid: uid836 +givenname: givenname836 +description: description836 +userPassword: password836 +mail: uid836 +uidnumber: 836 +gidnumber: 836 +homeDirectory: /home/uid836 + +dn: cn=user837,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user837 +sn: user837 +uid: uid837 +givenname: givenname837 +description: description837 +userPassword: password837 +mail: uid837 +uidnumber: 837 +gidnumber: 837 +homeDirectory: /home/uid837 + +dn: cn=user838,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user838 +sn: user838 +uid: uid838 +givenname: givenname838 +description: description838 +userPassword: password838 +mail: uid838 +uidnumber: 838 +gidnumber: 838 +homeDirectory: /home/uid838 + +dn: cn=user839,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user839 +sn: user839 +uid: uid839 +givenname: givenname839 +description: description839 +userPassword: password839 +mail: uid839 +uidnumber: 839 +gidnumber: 839 +homeDirectory: /home/uid839 + +dn: cn=user840,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user840 +sn: user840 +uid: uid840 +givenname: givenname840 +description: description840 +userPassword: password840 +mail: uid840 +uidnumber: 840 +gidnumber: 840 +homeDirectory: /home/uid840 + +dn: cn=user841,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user841 +sn: user841 +uid: uid841 +givenname: givenname841 +description: description841 +userPassword: password841 +mail: uid841 +uidnumber: 841 +gidnumber: 841 +homeDirectory: /home/uid841 + +dn: cn=user842,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user842 +sn: user842 +uid: uid842 +givenname: givenname842 +description: description842 +userPassword: password842 +mail: uid842 +uidnumber: 842 +gidnumber: 842 +homeDirectory: /home/uid842 + +dn: cn=user843,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user843 +sn: user843 +uid: uid843 +givenname: givenname843 +description: description843 +userPassword: password843 +mail: uid843 +uidnumber: 843 +gidnumber: 843 +homeDirectory: /home/uid843 + +dn: cn=user844,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user844 +sn: user844 +uid: uid844 +givenname: givenname844 +description: description844 +userPassword: password844 +mail: uid844 +uidnumber: 844 +gidnumber: 844 +homeDirectory: /home/uid844 + +dn: cn=user845,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user845 +sn: user845 +uid: uid845 +givenname: givenname845 +description: description845 +userPassword: password845 +mail: uid845 +uidnumber: 845 +gidnumber: 845 +homeDirectory: /home/uid845 + +dn: cn=user846,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user846 +sn: user846 +uid: uid846 +givenname: givenname846 +description: description846 +userPassword: password846 +mail: uid846 +uidnumber: 846 +gidnumber: 846 +homeDirectory: /home/uid846 + +dn: cn=user847,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user847 +sn: user847 +uid: uid847 +givenname: givenname847 +description: description847 +userPassword: password847 +mail: uid847 +uidnumber: 847 +gidnumber: 847 +homeDirectory: /home/uid847 + +dn: cn=user848,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user848 +sn: user848 +uid: uid848 +givenname: givenname848 +description: description848 +userPassword: password848 +mail: uid848 +uidnumber: 848 +gidnumber: 848 +homeDirectory: /home/uid848 + +dn: cn=user849,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user849 +sn: user849 +uid: uid849 +givenname: givenname849 +description: description849 +userPassword: password849 +mail: uid849 +uidnumber: 849 +gidnumber: 849 +homeDirectory: /home/uid849 + +dn: cn=user850,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user850 +sn: user850 +uid: uid850 +givenname: givenname850 +description: description850 +userPassword: password850 +mail: uid850 +uidnumber: 850 +gidnumber: 850 +homeDirectory: /home/uid850 + +dn: cn=user851,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user851 +sn: user851 +uid: uid851 +givenname: givenname851 +description: description851 +userPassword: password851 +mail: uid851 +uidnumber: 851 +gidnumber: 851 +homeDirectory: /home/uid851 + +dn: cn=user852,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user852 +sn: user852 +uid: uid852 +givenname: givenname852 +description: description852 +userPassword: password852 +mail: uid852 +uidnumber: 852 +gidnumber: 852 +homeDirectory: /home/uid852 + +dn: cn=user853,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user853 +sn: user853 +uid: uid853 +givenname: givenname853 +description: description853 +userPassword: password853 +mail: uid853 +uidnumber: 853 +gidnumber: 853 +homeDirectory: /home/uid853 + +dn: cn=user854,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user854 +sn: user854 +uid: uid854 +givenname: givenname854 +description: description854 +userPassword: password854 +mail: uid854 +uidnumber: 854 +gidnumber: 854 +homeDirectory: /home/uid854 + +dn: cn=user855,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user855 +sn: user855 +uid: uid855 +givenname: givenname855 +description: description855 +userPassword: password855 +mail: uid855 +uidnumber: 855 +gidnumber: 855 +homeDirectory: /home/uid855 + +dn: cn=user856,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user856 +sn: user856 +uid: uid856 +givenname: givenname856 +description: description856 +userPassword: password856 +mail: uid856 +uidnumber: 856 +gidnumber: 856 +homeDirectory: /home/uid856 + +dn: cn=user857,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user857 +sn: user857 +uid: uid857 +givenname: givenname857 +description: description857 +userPassword: password857 +mail: uid857 +uidnumber: 857 +gidnumber: 857 +homeDirectory: /home/uid857 + +dn: cn=user858,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user858 +sn: user858 +uid: uid858 +givenname: givenname858 +description: description858 +userPassword: password858 +mail: uid858 +uidnumber: 858 +gidnumber: 858 +homeDirectory: /home/uid858 + +dn: cn=user859,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user859 +sn: user859 +uid: uid859 +givenname: givenname859 +description: description859 +userPassword: password859 +mail: uid859 +uidnumber: 859 +gidnumber: 859 +homeDirectory: /home/uid859 + +dn: cn=user860,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user860 +sn: user860 +uid: uid860 +givenname: givenname860 +description: description860 +userPassword: password860 +mail: uid860 +uidnumber: 860 +gidnumber: 860 +homeDirectory: /home/uid860 + +dn: cn=user861,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user861 +sn: user861 +uid: uid861 +givenname: givenname861 +description: description861 +userPassword: password861 +mail: uid861 +uidnumber: 861 +gidnumber: 861 +homeDirectory: /home/uid861 + +dn: cn=user862,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user862 +sn: user862 +uid: uid862 +givenname: givenname862 +description: description862 +userPassword: password862 +mail: uid862 +uidnumber: 862 +gidnumber: 862 +homeDirectory: /home/uid862 + +dn: cn=user863,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user863 +sn: user863 +uid: uid863 +givenname: givenname863 +description: description863 +userPassword: password863 +mail: uid863 +uidnumber: 863 +gidnumber: 863 +homeDirectory: /home/uid863 + +dn: cn=user864,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user864 +sn: user864 +uid: uid864 +givenname: givenname864 +description: description864 +userPassword: password864 +mail: uid864 +uidnumber: 864 +gidnumber: 864 +homeDirectory: /home/uid864 + +dn: cn=user865,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user865 +sn: user865 +uid: uid865 +givenname: givenname865 +description: description865 +userPassword: password865 +mail: uid865 +uidnumber: 865 +gidnumber: 865 +homeDirectory: /home/uid865 + +dn: cn=user866,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user866 +sn: user866 +uid: uid866 +givenname: givenname866 +description: description866 +userPassword: password866 +mail: uid866 +uidnumber: 866 +gidnumber: 866 +homeDirectory: /home/uid866 + +dn: cn=user867,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user867 +sn: user867 +uid: uid867 +givenname: givenname867 +description: description867 +userPassword: password867 +mail: uid867 +uidnumber: 867 +gidnumber: 867 +homeDirectory: /home/uid867 + +dn: cn=user868,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user868 +sn: user868 +uid: uid868 +givenname: givenname868 +description: description868 +userPassword: password868 +mail: uid868 +uidnumber: 868 +gidnumber: 868 +homeDirectory: /home/uid868 + +dn: cn=user869,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user869 +sn: user869 +uid: uid869 +givenname: givenname869 +description: description869 +userPassword: password869 +mail: uid869 +uidnumber: 869 +gidnumber: 869 +homeDirectory: /home/uid869 + +dn: cn=user870,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user870 +sn: user870 +uid: uid870 +givenname: givenname870 +description: description870 +userPassword: password870 +mail: uid870 +uidnumber: 870 +gidnumber: 870 +homeDirectory: /home/uid870 + +dn: cn=user871,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user871 +sn: user871 +uid: uid871 +givenname: givenname871 +description: description871 +userPassword: password871 +mail: uid871 +uidnumber: 871 +gidnumber: 871 +homeDirectory: /home/uid871 + +dn: cn=user872,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user872 +sn: user872 +uid: uid872 +givenname: givenname872 +description: description872 +userPassword: password872 +mail: uid872 +uidnumber: 872 +gidnumber: 872 +homeDirectory: /home/uid872 + +dn: cn=user873,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user873 +sn: user873 +uid: uid873 +givenname: givenname873 +description: description873 +userPassword: password873 +mail: uid873 +uidnumber: 873 +gidnumber: 873 +homeDirectory: /home/uid873 + +dn: cn=user874,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user874 +sn: user874 +uid: uid874 +givenname: givenname874 +description: description874 +userPassword: password874 +mail: uid874 +uidnumber: 874 +gidnumber: 874 +homeDirectory: /home/uid874 + +dn: cn=user875,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user875 +sn: user875 +uid: uid875 +givenname: givenname875 +description: description875 +userPassword: password875 +mail: uid875 +uidnumber: 875 +gidnumber: 875 +homeDirectory: /home/uid875 + +dn: cn=user876,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user876 +sn: user876 +uid: uid876 +givenname: givenname876 +description: description876 +userPassword: password876 +mail: uid876 +uidnumber: 876 +gidnumber: 876 +homeDirectory: /home/uid876 + +dn: cn=user877,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user877 +sn: user877 +uid: uid877 +givenname: givenname877 +description: description877 +userPassword: password877 +mail: uid877 +uidnumber: 877 +gidnumber: 877 +homeDirectory: /home/uid877 + +dn: cn=user878,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user878 +sn: user878 +uid: uid878 +givenname: givenname878 +description: description878 +userPassword: password878 +mail: uid878 +uidnumber: 878 +gidnumber: 878 +homeDirectory: /home/uid878 + +dn: cn=user879,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user879 +sn: user879 +uid: uid879 +givenname: givenname879 +description: description879 +userPassword: password879 +mail: uid879 +uidnumber: 879 +gidnumber: 879 +homeDirectory: /home/uid879 + +dn: cn=user880,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user880 +sn: user880 +uid: uid880 +givenname: givenname880 +description: description880 +userPassword: password880 +mail: uid880 +uidnumber: 880 +gidnumber: 880 +homeDirectory: /home/uid880 + +dn: cn=user881,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user881 +sn: user881 +uid: uid881 +givenname: givenname881 +description: description881 +userPassword: password881 +mail: uid881 +uidnumber: 881 +gidnumber: 881 +homeDirectory: /home/uid881 + +dn: cn=user882,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user882 +sn: user882 +uid: uid882 +givenname: givenname882 +description: description882 +userPassword: password882 +mail: uid882 +uidnumber: 882 +gidnumber: 882 +homeDirectory: /home/uid882 + +dn: cn=user883,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user883 +sn: user883 +uid: uid883 +givenname: givenname883 +description: description883 +userPassword: password883 +mail: uid883 +uidnumber: 883 +gidnumber: 883 +homeDirectory: /home/uid883 + +dn: cn=user884,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user884 +sn: user884 +uid: uid884 +givenname: givenname884 +description: description884 +userPassword: password884 +mail: uid884 +uidnumber: 884 +gidnumber: 884 +homeDirectory: /home/uid884 + +dn: cn=user885,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user885 +sn: user885 +uid: uid885 +givenname: givenname885 +description: description885 +userPassword: password885 +mail: uid885 +uidnumber: 885 +gidnumber: 885 +homeDirectory: /home/uid885 + +dn: cn=user886,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user886 +sn: user886 +uid: uid886 +givenname: givenname886 +description: description886 +userPassword: password886 +mail: uid886 +uidnumber: 886 +gidnumber: 886 +homeDirectory: /home/uid886 + +dn: cn=user887,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user887 +sn: user887 +uid: uid887 +givenname: givenname887 +description: description887 +userPassword: password887 +mail: uid887 +uidnumber: 887 +gidnumber: 887 +homeDirectory: /home/uid887 + +dn: cn=user888,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user888 +sn: user888 +uid: uid888 +givenname: givenname888 +description: description888 +userPassword: password888 +mail: uid888 +uidnumber: 888 +gidnumber: 888 +homeDirectory: /home/uid888 + +dn: cn=user889,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user889 +sn: user889 +uid: uid889 +givenname: givenname889 +description: description889 +userPassword: password889 +mail: uid889 +uidnumber: 889 +gidnumber: 889 +homeDirectory: /home/uid889 + +dn: cn=user890,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user890 +sn: user890 +uid: uid890 +givenname: givenname890 +description: description890 +userPassword: password890 +mail: uid890 +uidnumber: 890 +gidnumber: 890 +homeDirectory: /home/uid890 + +dn: cn=user891,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user891 +sn: user891 +uid: uid891 +givenname: givenname891 +description: description891 +userPassword: password891 +mail: uid891 +uidnumber: 891 +gidnumber: 891 +homeDirectory: /home/uid891 + +dn: cn=user892,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user892 +sn: user892 +uid: uid892 +givenname: givenname892 +description: description892 +userPassword: password892 +mail: uid892 +uidnumber: 892 +gidnumber: 892 +homeDirectory: /home/uid892 + +dn: cn=user893,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user893 +sn: user893 +uid: uid893 +givenname: givenname893 +description: description893 +userPassword: password893 +mail: uid893 +uidnumber: 893 +gidnumber: 893 +homeDirectory: /home/uid893 + +dn: cn=user894,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user894 +sn: user894 +uid: uid894 +givenname: givenname894 +description: description894 +userPassword: password894 +mail: uid894 +uidnumber: 894 +gidnumber: 894 +homeDirectory: /home/uid894 + +dn: cn=user895,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user895 +sn: user895 +uid: uid895 +givenname: givenname895 +description: description895 +userPassword: password895 +mail: uid895 +uidnumber: 895 +gidnumber: 895 +homeDirectory: /home/uid895 + +dn: cn=user896,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user896 +sn: user896 +uid: uid896 +givenname: givenname896 +description: description896 +userPassword: password896 +mail: uid896 +uidnumber: 896 +gidnumber: 896 +homeDirectory: /home/uid896 + +dn: cn=user897,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user897 +sn: user897 +uid: uid897 +givenname: givenname897 +description: description897 +userPassword: password897 +mail: uid897 +uidnumber: 897 +gidnumber: 897 +homeDirectory: /home/uid897 + +dn: cn=user898,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user898 +sn: user898 +uid: uid898 +givenname: givenname898 +description: description898 +userPassword: password898 +mail: uid898 +uidnumber: 898 +gidnumber: 898 +homeDirectory: /home/uid898 + +dn: cn=user899,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user899 +sn: user899 +uid: uid899 +givenname: givenname899 +description: description899 +userPassword: password899 +mail: uid899 +uidnumber: 899 +gidnumber: 899 +homeDirectory: /home/uid899 + +dn: cn=user900,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user900 +sn: user900 +uid: uid900 +givenname: givenname900 +description: description900 +userPassword: password900 +mail: uid900 +uidnumber: 900 +gidnumber: 900 +homeDirectory: /home/uid900 + +dn: cn=user901,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user901 +sn: user901 +uid: uid901 +givenname: givenname901 +description: description901 +userPassword: password901 +mail: uid901 +uidnumber: 901 +gidnumber: 901 +homeDirectory: /home/uid901 + +dn: cn=user902,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user902 +sn: user902 +uid: uid902 +givenname: givenname902 +description: description902 +userPassword: password902 +mail: uid902 +uidnumber: 902 +gidnumber: 902 +homeDirectory: /home/uid902 + +dn: cn=user903,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user903 +sn: user903 +uid: uid903 +givenname: givenname903 +description: description903 +userPassword: password903 +mail: uid903 +uidnumber: 903 +gidnumber: 903 +homeDirectory: /home/uid903 + +dn: cn=user904,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user904 +sn: user904 +uid: uid904 +givenname: givenname904 +description: description904 +userPassword: password904 +mail: uid904 +uidnumber: 904 +gidnumber: 904 +homeDirectory: /home/uid904 + +dn: cn=user905,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user905 +sn: user905 +uid: uid905 +givenname: givenname905 +description: description905 +userPassword: password905 +mail: uid905 +uidnumber: 905 +gidnumber: 905 +homeDirectory: /home/uid905 + +dn: cn=user906,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user906 +sn: user906 +uid: uid906 +givenname: givenname906 +description: description906 +userPassword: password906 +mail: uid906 +uidnumber: 906 +gidnumber: 906 +homeDirectory: /home/uid906 + +dn: cn=user907,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user907 +sn: user907 +uid: uid907 +givenname: givenname907 +description: description907 +userPassword: password907 +mail: uid907 +uidnumber: 907 +gidnumber: 907 +homeDirectory: /home/uid907 + +dn: cn=user908,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user908 +sn: user908 +uid: uid908 +givenname: givenname908 +description: description908 +userPassword: password908 +mail: uid908 +uidnumber: 908 +gidnumber: 908 +homeDirectory: /home/uid908 + +dn: cn=user909,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user909 +sn: user909 +uid: uid909 +givenname: givenname909 +description: description909 +userPassword: password909 +mail: uid909 +uidnumber: 909 +gidnumber: 909 +homeDirectory: /home/uid909 + +dn: cn=user910,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user910 +sn: user910 +uid: uid910 +givenname: givenname910 +description: description910 +userPassword: password910 +mail: uid910 +uidnumber: 910 +gidnumber: 910 +homeDirectory: /home/uid910 + +dn: cn=user911,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user911 +sn: user911 +uid: uid911 +givenname: givenname911 +description: description911 +userPassword: password911 +mail: uid911 +uidnumber: 911 +gidnumber: 911 +homeDirectory: /home/uid911 + +dn: cn=user912,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user912 +sn: user912 +uid: uid912 +givenname: givenname912 +description: description912 +userPassword: password912 +mail: uid912 +uidnumber: 912 +gidnumber: 912 +homeDirectory: /home/uid912 + +dn: cn=user913,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user913 +sn: user913 +uid: uid913 +givenname: givenname913 +description: description913 +userPassword: password913 +mail: uid913 +uidnumber: 913 +gidnumber: 913 +homeDirectory: /home/uid913 + +dn: cn=user914,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user914 +sn: user914 +uid: uid914 +givenname: givenname914 +description: description914 +userPassword: password914 +mail: uid914 +uidnumber: 914 +gidnumber: 914 +homeDirectory: /home/uid914 + +dn: cn=user915,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user915 +sn: user915 +uid: uid915 +givenname: givenname915 +description: description915 +userPassword: password915 +mail: uid915 +uidnumber: 915 +gidnumber: 915 +homeDirectory: /home/uid915 + +dn: cn=user916,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user916 +sn: user916 +uid: uid916 +givenname: givenname916 +description: description916 +userPassword: password916 +mail: uid916 +uidnumber: 916 +gidnumber: 916 +homeDirectory: /home/uid916 + +dn: cn=user917,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user917 +sn: user917 +uid: uid917 +givenname: givenname917 +description: description917 +userPassword: password917 +mail: uid917 +uidnumber: 917 +gidnumber: 917 +homeDirectory: /home/uid917 + +dn: cn=user918,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user918 +sn: user918 +uid: uid918 +givenname: givenname918 +description: description918 +userPassword: password918 +mail: uid918 +uidnumber: 918 +gidnumber: 918 +homeDirectory: /home/uid918 + +dn: cn=user919,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user919 +sn: user919 +uid: uid919 +givenname: givenname919 +description: description919 +userPassword: password919 +mail: uid919 +uidnumber: 919 +gidnumber: 919 +homeDirectory: /home/uid919 + +dn: cn=user920,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user920 +sn: user920 +uid: uid920 +givenname: givenname920 +description: description920 +userPassword: password920 +mail: uid920 +uidnumber: 920 +gidnumber: 920 +homeDirectory: /home/uid920 + +dn: cn=user921,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user921 +sn: user921 +uid: uid921 +givenname: givenname921 +description: description921 +userPassword: password921 +mail: uid921 +uidnumber: 921 +gidnumber: 921 +homeDirectory: /home/uid921 + +dn: cn=user922,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user922 +sn: user922 +uid: uid922 +givenname: givenname922 +description: description922 +userPassword: password922 +mail: uid922 +uidnumber: 922 +gidnumber: 922 +homeDirectory: /home/uid922 + +dn: cn=user923,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user923 +sn: user923 +uid: uid923 +givenname: givenname923 +description: description923 +userPassword: password923 +mail: uid923 +uidnumber: 923 +gidnumber: 923 +homeDirectory: /home/uid923 + +dn: cn=user924,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user924 +sn: user924 +uid: uid924 +givenname: givenname924 +description: description924 +userPassword: password924 +mail: uid924 +uidnumber: 924 +gidnumber: 924 +homeDirectory: /home/uid924 + +dn: cn=user925,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user925 +sn: user925 +uid: uid925 +givenname: givenname925 +description: description925 +userPassword: password925 +mail: uid925 +uidnumber: 925 +gidnumber: 925 +homeDirectory: /home/uid925 + +dn: cn=user926,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user926 +sn: user926 +uid: uid926 +givenname: givenname926 +description: description926 +userPassword: password926 +mail: uid926 +uidnumber: 926 +gidnumber: 926 +homeDirectory: /home/uid926 + +dn: cn=user927,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user927 +sn: user927 +uid: uid927 +givenname: givenname927 +description: description927 +userPassword: password927 +mail: uid927 +uidnumber: 927 +gidnumber: 927 +homeDirectory: /home/uid927 + +dn: cn=user928,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user928 +sn: user928 +uid: uid928 +givenname: givenname928 +description: description928 +userPassword: password928 +mail: uid928 +uidnumber: 928 +gidnumber: 928 +homeDirectory: /home/uid928 + +dn: cn=user929,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user929 +sn: user929 +uid: uid929 +givenname: givenname929 +description: description929 +userPassword: password929 +mail: uid929 +uidnumber: 929 +gidnumber: 929 +homeDirectory: /home/uid929 + +dn: cn=user930,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user930 +sn: user930 +uid: uid930 +givenname: givenname930 +description: description930 +userPassword: password930 +mail: uid930 +uidnumber: 930 +gidnumber: 930 +homeDirectory: /home/uid930 + +dn: cn=user931,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user931 +sn: user931 +uid: uid931 +givenname: givenname931 +description: description931 +userPassword: password931 +mail: uid931 +uidnumber: 931 +gidnumber: 931 +homeDirectory: /home/uid931 + +dn: cn=user932,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user932 +sn: user932 +uid: uid932 +givenname: givenname932 +description: description932 +userPassword: password932 +mail: uid932 +uidnumber: 932 +gidnumber: 932 +homeDirectory: /home/uid932 + +dn: cn=user933,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user933 +sn: user933 +uid: uid933 +givenname: givenname933 +description: description933 +userPassword: password933 +mail: uid933 +uidnumber: 933 +gidnumber: 933 +homeDirectory: /home/uid933 + +dn: cn=user934,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user934 +sn: user934 +uid: uid934 +givenname: givenname934 +description: description934 +userPassword: password934 +mail: uid934 +uidnumber: 934 +gidnumber: 934 +homeDirectory: /home/uid934 + +dn: cn=user935,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user935 +sn: user935 +uid: uid935 +givenname: givenname935 +description: description935 +userPassword: password935 +mail: uid935 +uidnumber: 935 +gidnumber: 935 +homeDirectory: /home/uid935 + +dn: cn=user936,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user936 +sn: user936 +uid: uid936 +givenname: givenname936 +description: description936 +userPassword: password936 +mail: uid936 +uidnumber: 936 +gidnumber: 936 +homeDirectory: /home/uid936 + +dn: cn=user937,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user937 +sn: user937 +uid: uid937 +givenname: givenname937 +description: description937 +userPassword: password937 +mail: uid937 +uidnumber: 937 +gidnumber: 937 +homeDirectory: /home/uid937 + +dn: cn=user938,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user938 +sn: user938 +uid: uid938 +givenname: givenname938 +description: description938 +userPassword: password938 +mail: uid938 +uidnumber: 938 +gidnumber: 938 +homeDirectory: /home/uid938 + +dn: cn=user939,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user939 +sn: user939 +uid: uid939 +givenname: givenname939 +description: description939 +userPassword: password939 +mail: uid939 +uidnumber: 939 +gidnumber: 939 +homeDirectory: /home/uid939 + +dn: cn=user940,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user940 +sn: user940 +uid: uid940 +givenname: givenname940 +description: description940 +userPassword: password940 +mail: uid940 +uidnumber: 940 +gidnumber: 940 +homeDirectory: /home/uid940 + +dn: cn=user941,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user941 +sn: user941 +uid: uid941 +givenname: givenname941 +description: description941 +userPassword: password941 +mail: uid941 +uidnumber: 941 +gidnumber: 941 +homeDirectory: /home/uid941 + +dn: cn=user942,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user942 +sn: user942 +uid: uid942 +givenname: givenname942 +description: description942 +userPassword: password942 +mail: uid942 +uidnumber: 942 +gidnumber: 942 +homeDirectory: /home/uid942 + +dn: cn=user943,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user943 +sn: user943 +uid: uid943 +givenname: givenname943 +description: description943 +userPassword: password943 +mail: uid943 +uidnumber: 943 +gidnumber: 943 +homeDirectory: /home/uid943 + +dn: cn=user944,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user944 +sn: user944 +uid: uid944 +givenname: givenname944 +description: description944 +userPassword: password944 +mail: uid944 +uidnumber: 944 +gidnumber: 944 +homeDirectory: /home/uid944 + +dn: cn=user945,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user945 +sn: user945 +uid: uid945 +givenname: givenname945 +description: description945 +userPassword: password945 +mail: uid945 +uidnumber: 945 +gidnumber: 945 +homeDirectory: /home/uid945 + +dn: cn=user946,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user946 +sn: user946 +uid: uid946 +givenname: givenname946 +description: description946 +userPassword: password946 +mail: uid946 +uidnumber: 946 +gidnumber: 946 +homeDirectory: /home/uid946 + +dn: cn=user947,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user947 +sn: user947 +uid: uid947 +givenname: givenname947 +description: description947 +userPassword: password947 +mail: uid947 +uidnumber: 947 +gidnumber: 947 +homeDirectory: /home/uid947 + +dn: cn=user948,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user948 +sn: user948 +uid: uid948 +givenname: givenname948 +description: description948 +userPassword: password948 +mail: uid948 +uidnumber: 948 +gidnumber: 948 +homeDirectory: /home/uid948 + +dn: cn=user949,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user949 +sn: user949 +uid: uid949 +givenname: givenname949 +description: description949 +userPassword: password949 +mail: uid949 +uidnumber: 949 +gidnumber: 949 +homeDirectory: /home/uid949 + +dn: cn=user950,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user950 +sn: user950 +uid: uid950 +givenname: givenname950 +description: description950 +userPassword: password950 +mail: uid950 +uidnumber: 950 +gidnumber: 950 +homeDirectory: /home/uid950 + +dn: cn=user951,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user951 +sn: user951 +uid: uid951 +givenname: givenname951 +description: description951 +userPassword: password951 +mail: uid951 +uidnumber: 951 +gidnumber: 951 +homeDirectory: /home/uid951 + +dn: cn=user952,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user952 +sn: user952 +uid: uid952 +givenname: givenname952 +description: description952 +userPassword: password952 +mail: uid952 +uidnumber: 952 +gidnumber: 952 +homeDirectory: /home/uid952 + +dn: cn=user953,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user953 +sn: user953 +uid: uid953 +givenname: givenname953 +description: description953 +userPassword: password953 +mail: uid953 +uidnumber: 953 +gidnumber: 953 +homeDirectory: /home/uid953 + +dn: cn=user954,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user954 +sn: user954 +uid: uid954 +givenname: givenname954 +description: description954 +userPassword: password954 +mail: uid954 +uidnumber: 954 +gidnumber: 954 +homeDirectory: /home/uid954 + +dn: cn=user955,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user955 +sn: user955 +uid: uid955 +givenname: givenname955 +description: description955 +userPassword: password955 +mail: uid955 +uidnumber: 955 +gidnumber: 955 +homeDirectory: /home/uid955 + +dn: cn=user956,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user956 +sn: user956 +uid: uid956 +givenname: givenname956 +description: description956 +userPassword: password956 +mail: uid956 +uidnumber: 956 +gidnumber: 956 +homeDirectory: /home/uid956 + +dn: cn=user957,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user957 +sn: user957 +uid: uid957 +givenname: givenname957 +description: description957 +userPassword: password957 +mail: uid957 +uidnumber: 957 +gidnumber: 957 +homeDirectory: /home/uid957 + +dn: cn=user958,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user958 +sn: user958 +uid: uid958 +givenname: givenname958 +description: description958 +userPassword: password958 +mail: uid958 +uidnumber: 958 +gidnumber: 958 +homeDirectory: /home/uid958 + +dn: cn=user959,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user959 +sn: user959 +uid: uid959 +givenname: givenname959 +description: description959 +userPassword: password959 +mail: uid959 +uidnumber: 959 +gidnumber: 959 +homeDirectory: /home/uid959 + +dn: cn=user960,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user960 +sn: user960 +uid: uid960 +givenname: givenname960 +description: description960 +userPassword: password960 +mail: uid960 +uidnumber: 960 +gidnumber: 960 +homeDirectory: /home/uid960 + +dn: cn=user961,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user961 +sn: user961 +uid: uid961 +givenname: givenname961 +description: description961 +userPassword: password961 +mail: uid961 +uidnumber: 961 +gidnumber: 961 +homeDirectory: /home/uid961 + +dn: cn=user962,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user962 +sn: user962 +uid: uid962 +givenname: givenname962 +description: description962 +userPassword: password962 +mail: uid962 +uidnumber: 962 +gidnumber: 962 +homeDirectory: /home/uid962 + +dn: cn=user963,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user963 +sn: user963 +uid: uid963 +givenname: givenname963 +description: description963 +userPassword: password963 +mail: uid963 +uidnumber: 963 +gidnumber: 963 +homeDirectory: /home/uid963 + +dn: cn=user964,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user964 +sn: user964 +uid: uid964 +givenname: givenname964 +description: description964 +userPassword: password964 +mail: uid964 +uidnumber: 964 +gidnumber: 964 +homeDirectory: /home/uid964 + +dn: cn=user965,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user965 +sn: user965 +uid: uid965 +givenname: givenname965 +description: description965 +userPassword: password965 +mail: uid965 +uidnumber: 965 +gidnumber: 965 +homeDirectory: /home/uid965 + +dn: cn=user966,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user966 +sn: user966 +uid: uid966 +givenname: givenname966 +description: description966 +userPassword: password966 +mail: uid966 +uidnumber: 966 +gidnumber: 966 +homeDirectory: /home/uid966 + +dn: cn=user967,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user967 +sn: user967 +uid: uid967 +givenname: givenname967 +description: description967 +userPassword: password967 +mail: uid967 +uidnumber: 967 +gidnumber: 967 +homeDirectory: /home/uid967 + +dn: cn=user968,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user968 +sn: user968 +uid: uid968 +givenname: givenname968 +description: description968 +userPassword: password968 +mail: uid968 +uidnumber: 968 +gidnumber: 968 +homeDirectory: /home/uid968 + +dn: cn=user969,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user969 +sn: user969 +uid: uid969 +givenname: givenname969 +description: description969 +userPassword: password969 +mail: uid969 +uidnumber: 969 +gidnumber: 969 +homeDirectory: /home/uid969 + +dn: cn=user970,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user970 +sn: user970 +uid: uid970 +givenname: givenname970 +description: description970 +userPassword: password970 +mail: uid970 +uidnumber: 970 +gidnumber: 970 +homeDirectory: /home/uid970 + +dn: cn=user971,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user971 +sn: user971 +uid: uid971 +givenname: givenname971 +description: description971 +userPassword: password971 +mail: uid971 +uidnumber: 971 +gidnumber: 971 +homeDirectory: /home/uid971 + +dn: cn=user972,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user972 +sn: user972 +uid: uid972 +givenname: givenname972 +description: description972 +userPassword: password972 +mail: uid972 +uidnumber: 972 +gidnumber: 972 +homeDirectory: /home/uid972 + +dn: cn=user973,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user973 +sn: user973 +uid: uid973 +givenname: givenname973 +description: description973 +userPassword: password973 +mail: uid973 +uidnumber: 973 +gidnumber: 973 +homeDirectory: /home/uid973 + +dn: cn=user974,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user974 +sn: user974 +uid: uid974 +givenname: givenname974 +description: description974 +userPassword: password974 +mail: uid974 +uidnumber: 974 +gidnumber: 974 +homeDirectory: /home/uid974 + +dn: cn=user975,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user975 +sn: user975 +uid: uid975 +givenname: givenname975 +description: description975 +userPassword: password975 +mail: uid975 +uidnumber: 975 +gidnumber: 975 +homeDirectory: /home/uid975 + +dn: cn=user976,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user976 +sn: user976 +uid: uid976 +givenname: givenname976 +description: description976 +userPassword: password976 +mail: uid976 +uidnumber: 976 +gidnumber: 976 +homeDirectory: /home/uid976 + +dn: cn=user977,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user977 +sn: user977 +uid: uid977 +givenname: givenname977 +description: description977 +userPassword: password977 +mail: uid977 +uidnumber: 977 +gidnumber: 977 +homeDirectory: /home/uid977 + +dn: cn=user978,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user978 +sn: user978 +uid: uid978 +givenname: givenname978 +description: description978 +userPassword: password978 +mail: uid978 +uidnumber: 978 +gidnumber: 978 +homeDirectory: /home/uid978 + +dn: cn=user979,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user979 +sn: user979 +uid: uid979 +givenname: givenname979 +description: description979 +userPassword: password979 +mail: uid979 +uidnumber: 979 +gidnumber: 979 +homeDirectory: /home/uid979 + +dn: cn=user980,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user980 +sn: user980 +uid: uid980 +givenname: givenname980 +description: description980 +userPassword: password980 +mail: uid980 +uidnumber: 980 +gidnumber: 980 +homeDirectory: /home/uid980 + +dn: cn=user981,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user981 +sn: user981 +uid: uid981 +givenname: givenname981 +description: description981 +userPassword: password981 +mail: uid981 +uidnumber: 981 +gidnumber: 981 +homeDirectory: /home/uid981 + +dn: cn=user982,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user982 +sn: user982 +uid: uid982 +givenname: givenname982 +description: description982 +userPassword: password982 +mail: uid982 +uidnumber: 982 +gidnumber: 982 +homeDirectory: /home/uid982 + +dn: cn=user983,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user983 +sn: user983 +uid: uid983 +givenname: givenname983 +description: description983 +userPassword: password983 +mail: uid983 +uidnumber: 983 +gidnumber: 983 +homeDirectory: /home/uid983 + +dn: cn=user984,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user984 +sn: user984 +uid: uid984 +givenname: givenname984 +description: description984 +userPassword: password984 +mail: uid984 +uidnumber: 984 +gidnumber: 984 +homeDirectory: /home/uid984 + +dn: cn=user985,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user985 +sn: user985 +uid: uid985 +givenname: givenname985 +description: description985 +userPassword: password985 +mail: uid985 +uidnumber: 985 +gidnumber: 985 +homeDirectory: /home/uid985 + +dn: cn=user986,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user986 +sn: user986 +uid: uid986 +givenname: givenname986 +description: description986 +userPassword: password986 +mail: uid986 +uidnumber: 986 +gidnumber: 986 +homeDirectory: /home/uid986 + +dn: cn=user987,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user987 +sn: user987 +uid: uid987 +givenname: givenname987 +description: description987 +userPassword: password987 +mail: uid987 +uidnumber: 987 +gidnumber: 987 +homeDirectory: /home/uid987 + +dn: cn=user988,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user988 +sn: user988 +uid: uid988 +givenname: givenname988 +description: description988 +userPassword: password988 +mail: uid988 +uidnumber: 988 +gidnumber: 988 +homeDirectory: /home/uid988 + +dn: cn=user989,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user989 +sn: user989 +uid: uid989 +givenname: givenname989 +description: description989 +userPassword: password989 +mail: uid989 +uidnumber: 989 +gidnumber: 989 +homeDirectory: /home/uid989 + +dn: cn=user990,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user990 +sn: user990 +uid: uid990 +givenname: givenname990 +description: description990 +userPassword: password990 +mail: uid990 +uidnumber: 990 +gidnumber: 990 +homeDirectory: /home/uid990 + +dn: cn=user991,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user991 +sn: user991 +uid: uid991 +givenname: givenname991 +description: description991 +userPassword: password991 +mail: uid991 +uidnumber: 991 +gidnumber: 991 +homeDirectory: /home/uid991 + +dn: cn=user992,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user992 +sn: user992 +uid: uid992 +givenname: givenname992 +description: description992 +userPassword: password992 +mail: uid992 +uidnumber: 992 +gidnumber: 992 +homeDirectory: /home/uid992 + +dn: cn=user993,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user993 +sn: user993 +uid: uid993 +givenname: givenname993 +description: description993 +userPassword: password993 +mail: uid993 +uidnumber: 993 +gidnumber: 993 +homeDirectory: /home/uid993 + +dn: cn=user994,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user994 +sn: user994 +uid: uid994 +givenname: givenname994 +description: description994 +userPassword: password994 +mail: uid994 +uidnumber: 994 +gidnumber: 994 +homeDirectory: /home/uid994 + +dn: cn=user995,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user995 +sn: user995 +uid: uid995 +givenname: givenname995 +description: description995 +userPassword: password995 +mail: uid995 +uidnumber: 995 +gidnumber: 995 +homeDirectory: /home/uid995 + +dn: cn=user996,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user996 +sn: user996 +uid: uid996 +givenname: givenname996 +description: description996 +userPassword: password996 +mail: uid996 +uidnumber: 996 +gidnumber: 996 +homeDirectory: /home/uid996 + +dn: cn=user997,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user997 +sn: user997 +uid: uid997 +givenname: givenname997 +description: description997 +userPassword: password997 +mail: uid997 +uidnumber: 997 +gidnumber: 997 +homeDirectory: /home/uid997 + +dn: cn=user998,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user998 +sn: user998 +uid: uid998 +givenname: givenname998 +description: description998 +userPassword: password998 +mail: uid998 +uidnumber: 998 +gidnumber: 998 +homeDirectory: /home/uid998 + +dn: cn=user999,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user999 +sn: user999 +uid: uid999 +givenname: givenname999 +description: description999 +userPassword: password999 +mail: uid999 +uidnumber: 999 +gidnumber: 999 +homeDirectory: /home/uid999 + diff --git a/dirsrvtests/tests/data/ticket49121/utf8str.txt b/dirsrvtests/tests/data/ticket49121/utf8str.txt new file mode 100644 index 0000000..0005c4e --- /dev/null +++ b/dirsrvtests/tests/data/ticket49121/utf8str.txt @@ -0,0 +1 @@ +あいうえお diff --git a/dirsrvtests/tests/data/ticket49441/binary.ldif b/dirsrvtests/tests/data/ticket49441/binary.ldif new file mode 100644 index 0000000..bdebaf8 --- /dev/null +++ b/dirsrvtests/tests/data/ticket49441/binary.ldif @@ -0,0 +1,858 @@ +version: 1 + +# entry-id: 1 +dn: dc=example,dc=com +objectClass: domain +objectClass: top +dc: example +nsUniqueId: f49ca102-c2ee11e7-9170b029-e68fda34 +creatorsName: +modifiersName: +createTimestamp: 20171106123544Z +modifyTimestamp: 20171106123544Z + +# entry-id: 2 +dn: ou=binary,dc=example,dc=com +certificateRevocationList;binary:: MIITbjCCElYCAQEwDQYJKoZIhvcNAQEFBQAwVzELMAk + GA1UEBhMCVVMxEDAOBgNVBAoTB0VudHJ1c3QxIjAgBgNVBAsTGUNlcnRpZmljYXRpb24gQXV0aG9y + aXRpZXMxEjAQBgNVBAsTCURDb21TdWJDQRcNMTcxMDE2MTUxNjAyWhcNMTcxMDE5MTUxNjAyWjCCE + ZcwIwIEV4cj0hcNMTYxMTMwMDAyNDA0WjAMMAoGA1UdFQQDCgEAMCMCBFeHI9EXDTE2MTEzMDAwMj + gwNVowDDAKBgNVHRUEAwoBADAjAgRXhyPPFw0xNjExMzAwMDIxNDJaMAwwCgYDVR0VBAMKAQAwIwI + EV4cjzhcNMTYxMTMwMDAzMTE0WjAMMAoGA1UdFQQDCgEAMCMCBFeHI2gXDTE2MTEyOTE1MTM0M1ow + DDAKBgNVHRUEAwoBADA9AgRXhwCzFw0xNjExMDIyMjQ0NThaMCYwCgYDVR0VBAMKAQEwGAYDVR0YB + BEYDzIwMTYwOTA3MDEzODU1WjAjAgRXhvE4Fw0xNjA4MDExNDA5MTFaMAwwCgYDVR0VBAMKAQAwIw + IEV4bxNxcNMTYwODAxMTQwODU4WjAMMAoGA1UdFQQDCgEAMCMCBEkD2YYXDTE2MDcwNTE1NTg0NVo + wDDAKBgNVHRUEAwoBADAjAgRJA9mFFw0xNjA3MDUxNTU1MTlaMAwwCgYDVR0VBAMKAQAwIwIESQPT + cRcNMTYxMTMwMDAyODA1WjAMMAoGA1UdFQQDCgEAMCMCBEkD03AXDTE2MTEzMDAwMjgwNVowDDAKB + gNVHRUEAwoBADAjAgRJA9NuFw0xNjA2MjAxNjQ4NTlaMAwwCgYDVR0VBAMKAQAwIwIESQPSOBcNMT + YwNjE3MTU1OTM4WjAMMAoGA1UdFQQDCgEAMCMCBEkD0jcXDTE2MTEzMDAwMzExNFowDDAKBgNVHRU + EAwoBADAjAgRJA9I0Fw0xNjA2MjAxNzAyMDJaMAwwCgYDVR0VBAMKAQAwIwIESQPSMxcNMTYwNjIw + MTcwMjAyWjAMMAoGA1UdFQQDCgEAMCMCBEkD0jEXDTE2MDYxNzE1NDgwMlowDDAKBgNVHRUEAwoBA + DAjAgRJA9IwFw0xNjExMzAwMDMxMTRaMAwwCgYDVR0VBAMKAQAwIwIESQPSLhcNMTYwNjE3MTU0MD + A2WjAMMAoGA1UdFQQDCgEAMCMCBEkD0VIXDTE2MTEzMDAwMzExNFowDDAKBgNVHRUEAwoBADAjAgR + JA9FRFw0xNjExMzAwMDMxMTRaMAwwCgYDVR0VBAMKAQAwIwIESQPRTxcNMTYwNjE1MTkyMDU4WjAM + MAoGA1UdFQQDCgEAMCMCBEkD0U4XDTE2MDYxNTE5MjYyMlowDDAKBgNVHRUEAwoBADAjAgRJA9FLF + w0xNjA2MTUxODQ5MzZaMAwwCgYDVR0VBAMKAQAwIwIESQPRShcNMTYwNjE1MTQzNDU1WjAMMAoGA1 + UdFQQDCgEAMCMCBEkD0UkXDTE2MDYxNTE0MzEyMlowDDAKBgNVHRUEAwoBADAjAgRJA9FIFw0xNjA + 2MTUxNDMwMTdaMAwwCgYDVR0VBAMKAQAwIwIESQPQexcNMTYwNjE1MTkyNjIyWjAMMAoGA1UdFQQD + CgEAMCMCBEkD0HoXDTE2MDYxNTE5MjYyMlowDDAKBgNVHRUEAwoBADAjAgRJA9B4Fw0xNjA2MTQxM + TQ3MzlaMAwwCgYDVR0VBAMKAQAwIwIESQPQdxcNMTYwNjE1MTkyNTU5WjAMMAoGA1UdFQQDCgEAMC + MCBEkD0HYXDTE2MDYxNTE5MjU1OVowDDAKBgNVHRUEAwoBADAjAgRJA9B0Fw0xNjA2MTQxMTQzMzh + aMAwwCgYDVR0VBAMKAQAwIwIESQPQcxcNMTYwNjE0MTE0MDU4WjAMMAoGA1UdFQQDCgEAMCMCBEkD + 0HIXDTE2MDYxNTE5MjU0NlowDDAKBgNVHRUEAwoBADAjAgRJA9BwFw0xNjA2MTQxMTE3NDlaMAwwC + gYDVR0VBAMKAQAwIwIESQPLhhcNMTYwNjAxMjI1NTA1WjAMMAoGA1UdFQQDCgEAMCMCBEkDyRgXDT + E2MDUyNjIxNDQwOFowDDAKBgNVHRUEAwoBADAjAgRJA8kXFw0xNjA1MjYyMTQzMjdaMAwwCgYDVR0 + VBAMKAQAwIwIESQPIsRcNMTYwNTI2MTUxOTMwWjAMMAoGA1UdFQQDCgEAMCMCBEkDmmEXDTE2MDYx + NTE5MjU0NlowDDAKBgNVHRUEAwoBADAjAgRJA5pgFw0xNjA2MTUxOTI1NDZaMAwwCgYDVR0VBAMKA + QAwIwIESQOZ9RcNMTYwNjE1MTkyNDQzWjAMMAoGA1UdFQQDCgEFMCMCBEkDmfQXDTE2MDYxNTE5Mj + Q0M1owDDAKBgNVHRUEAwoBBTAjAgRJA5nyFw0xNjAyMDExOTM0MTlaMAwwCgYDVR0VBAMKAQAwIwI + ESQOXgBcNMTYwMTI2MTUwNTE5WjAMMAoGA1UdFQQDCgEAMCMCBEkDh0oXDTE1MTIxNzE3MzE0NVow + DDAKBgNVHRUEAwoBAzAjAgRJA3ZBFw0xNjAyMDIxNDM3MTZaMAwwCgYDVR0VBAMKAQMwIwIESQN2Q + BcNMTYwMjAyMTQzNzAzWjAMMAoGA1UdFQQDCgEDMCMCBEkDXsUXDTE1MTIwODIwMTM0OVowDDAKBg + NVHRUEAwoBAzAjAgRJA17EFw0xNTEyMDgyMDEzNDlaMAwwCgYDVR0VBAMKAQMwIwIESQNewxcNMTU + xMjA4MjAxMzUwWjAMMAoGA1UdFQQDCgEDMCMCBEkDWrkXDTE1MTIwODIwMTM1MFowDDAKBgNVHRUE + AwoBAzAjAgRJA1q4Fw0xNTEyMDgyMDEzNTBaMAwwCgYDVR0VBAMKAQMwIwIESQNatxcNMTUxMjA4M + jAxMzUwWjAMMAoGA1UdFQQDCgEDMCMCBEkDNjMXDTE2MDcwNTIwMDcxMlowDDAKBgNVHRUEAwoBBT + AjAgRJAwpwFw0xNjA2MTUxOTQwMDNaMAwwCgYDVR0VBAMKAQAwIwIESQMKbxcNMTYwNjE1MTk0MDA + zWjAMMAoGA1UdFQQDCgEAMCMCBEkC2Z0XDTE0MTAyMDE2NDgzN1owDDAKBgNVHRUEAwoBBTAjAgRJ + AthhFw0xNDEwMjAxNjQ4MzdaMAwwCgYDVR0VBAMKAQUwIwIESQLX7RcNMTQxMTEyMjAyNjA1WjAMM + AoGA1UdFQQDCgEFMCMCBEkC1+sXDTE0MTAyNzE1NTI1OVowDDAKBgNVHRUEAwoBAzAjAgRJAn2hFw + 0xNDAzMTMxNjUwMjZaMAwwCgYDVR0VBAMKAQAwIwIESQJ9MxcNMTQwMzEyMTUxODI5WjAMMAoGA1U + dFQQDCgEAMCMCBEkCfTEXDTE0MDMxMjExMzMzNVowDDAKBgNVHRUEAwoBADAjAgRJAn0wFw0xNDAz + MTIxMjE4MjFaMAwwCgYDVR0VBAMKAQAwIwIESQJ8YxcNMTQwMzEyMTEyNzEwWjAMMAoGA1UdFQQDC + gEAMCMCBEkCfGEXDTE0MDMxMDE0NTYxNlowDDAKBgNVHRUEAwoBADAjAgRJAnxgFw0xNDAzMTAxNT + A4MTVaMAwwCgYDVR0VBAMKAQAwIwIESQJ8XhcNMTQwMzEwMTIzMDM3WjAMMAoGA1UdFQQDCgEAMCM + CBEkCfF0XDTE0MDMxMDE0NTMyMlowDDAKBgNVHRUEAwoBADAjAgRJAnxbFw0xNDAzMTAxMDQ5NDBa + MAwwCgYDVR0VBAMKAQAwIwIESQJ8WhcNMTQwMzEwMTIwOTM2WjAMMAoGA1UdFQQDCgEAMCMCBEkCe + ywXDTE0MDMwNzEwMzcxM1owDDAKBgNVHRUEAwoBADAjAgRJAnsrFw0xNDAzMTAxMDQ3MTdaMAwwCg + YDVR0VBAMKAQAwIwIESQJ6xRcNMTQwMzA2MTEwMDM3WjAMMAoGA1UdFQQDCgEAMCMCBEkCesQXDTE + 0MDMwNzEwMzMyNVowDDAKBgNVHRUEAwoBADAjAgRJAm7jFw0xNDAyMDQyMTMwMjFaMAwwCgYDVR0V + BAMKAQAwIwIESQJrWhcNMTQwMTI3MTIyMTI0WjAMMAoGA1UdFQQDCgEAMCMCBEkCa1kXDTE0MDMwN + jEwNTY0OFowDDAKBgNVHRUEAwoBADAjAgRJAmjyFw0xNDAxMjExMDEyMTlaMAwwCgYDVR0VBAMKAQ + AwIwIESQJiPRcNMTQwMTAyMTYwMjIxWjAMMAoGA1UdFQQDCgEAMCMCBEkCXFgXDTEzMTIxODE3NTI + wNVowDDAKBgNVHRUEAwoBADAjAgRJAlW1Fw0xMzEyMDIxNTAzNTVaMAwwCgYDVR0VBAMKAQAwIwIE + SQJVshcNMTMxMjAyMTQ1NTM2WjAMMAoGA1UdFQQDCgEAMCMCBEkCVbEXDTEzMTIwMjE0NTk1OVowD + DAKBgNVHRUEAwoBADAjAgRJAlWvFw0xMzEyMDIxNDE3MzBaMAwwCgYDVR0VBAMKAQAwIwIESQJVrh + cNMTMxMjAyMTQ0OTMxWjAMMAoGA1UdFQQDCgEAMCMCBEkCVawXDTEzMTIwMjEzMTA1OFowDDAKBgN + VHRUEAwoBADAjAgRJAlWrFw0xMzEyMDIxNDEyMTVaMAwwCgYDVR0VBAMKAQAwIwIESQJONRcNMTMx + MTEyMjExMzI0WjAMMAoGA1UdFQQDCgEAMCMCBEkCJrkXDTEzMDkxMDA2NDUyNFowDDAKBgNVHRUEA + woBADAjAgRJAhmPFw0xMzA4MjExMDM0MTFaMAwwCgYDVR0VBAMKAQAwIwIESQIVrBcNMTMwODEyMT + g1NTU1WjAMMAoGA1UdFQQDCgEAMCMCBEkCFasXDTEzMTIxODE3MDQ0MlowDDAKBgNVHRUEAwoBADA + jAgRJAhAoFw0xMzA3MjkxNjAwMzVaMAwwCgYDVR0VBAMKAQAwIwIESQIQJxcNMTQwMTAyMTU1MDUy + WjAMMAoGA1UdFQQDCgEAMCMCBEkCCh8XDTEzMDcxNTA3MzY1NlowDDAKBgNVHRUEAwoBADAjAgRJA + gexFw0xMzA3MDgxNTU5MTRaMAwwCgYDVR0VBAMKAQAwIwIESQH73BcNMTMwNzI5MTU1NTAzWjAMMA + oGA1UdFQQDCgEAMCMCBEkB5EcXDTEzMDUyOTE0MDUyNVowDDAKBgNVHRUEAwoBADAjAgRJAcDtFw0 + xMzA1MTAyMDExNTBaMAwwCgYDVR0VBAMKAQAwIwIESQGmXBcNMTMwNDEwMDkyMTI2WjAMMAoGA1Ud + FQQDCgEAMCMCBEkBnj0XDTEzMDMyNTE4MTc0MFowDDAKBgNVHRUEAwoBADAjAgRJAYMOFw0xMzAyM + TExMTEwNDdaMAwwCgYDVR0VBAMKAQAwIwIESQF4PRcNMTMwODEyMTg0ODE2WjAMMAoGA1UdFQQDCg + EAMCMCBEkBcwcXDTEzMDEwMzE2NTgyMFowDDAKBgNVHRUEAwoBADAjAgRJAXMEFw0xMzAxMDMxMDA + yMjRaMAwwCgYDVR0VBAMKAQAwIwIESQFuRxcNMTMxMDA3MTMwMjM1WjAMMAoGA1UdFQQDCgEFMCMC + BEkBaLsXDTEzMDQxMDA5MTY1NVowDDAKBgNVHRUEAwoBADAjAgRJAWaQFw0xMjExMjkxNjAxMzJaM + AwwCgYDVR0VBAMKAQAwIwIESQFmhBcNMTIxMTI5MTE1NTIyWjAMMAoGA1UdFQQDCgEAMCMCBEkBZo + MXDTEyMTEyOTE1MjYwNVowDDAKBgNVHRUEAwoBADAjAgRJAWaBFw0xMjExMjkxMTAzNTJaMAwwCgY + DVR0VBAMKAQAwIwIESQFmgBcNMTIxMTI5MTE1MTU4WjAMMAoGA1UdFQQDCgEAMCMCBEkBYT8XDTEy + MTExNTA5NTI1OVowDDAKBgNVHRUEAwoBADAjAgRJAWCrFw0xMjExMTQxNDM2NDVaMAwwCgYDVR0VB + AMKAQAwIwIESQFgqhcNMTIxMTE1MDk0ODI1WjAMMAoGA1UdFQQDCgEAMCMCBEkBXT4XDTEzMTIwMj + EzMDcwMVowDDAKBgNVHRUEAwoBADAjAgRJAVvbFw0xMjExMjkxMTAwMzFaMAwwCgYDVR0VBAMKAQC + gMDAuMAsGA1UdFAQEAgIo8DAfBgNVHSMEGDAWgBT0Fi4Bu6uQGaQoQg2dwB+crxCGKzANBgkqhkiG + 9w0BAQUFAAOCAQEATe14zpsSjrGcW4yNZrdGtsupuJge+DQV+h1ZwBEQtsmOmMvbSdMsu+vMvTzHQ + KWJq56picjixY6v4vPqhRRZWP8evOc0NuoxpiUhgez3CKFQoJ2bdeaS/WCfqss3Sa4FZTUzkVWZde + moDH8CcHt5in3H7SwF5i9/rKB/bLuTjQg+LRKh2E9+FAkJn1S/ZRh1Vjd/KuRFOXD6odjV54oTWE0 + 6PcHBdwip62ridLdQopt3+e1UgwKBNJAmBD6uMN1tPmenUYWxh4xI7Ft4HQR58TdIiTZmfQHmEkjl + dBNEAoUK1hvRy6E2mSdRq9Yex8f+rGdxI1+++6lHaN1+M8jQ4g== +userCertificate;binary:: MIKE/jCCg+YCAQEwX6FdMFukWTBXMQswCQYDVQQGEwJVUzEQMA4GA + 1UEChMHRW50cnVzdDEiMCAGA1UECxMZQ2VydGlmaWNhdGlvbiBBdXRob3JpdGllczESMBAGA1UECx + MJRENvbVN1YkNBMGegZTBjMFukWTBXMQswCQYDVQQGEwJVUzEQMA4GA1UEChMHRW50cnVzdDEiMCA + GA1UECxMZQ2VydGlmaWNhdGlvbiBBdXRob3JpdGllczESMBAGA1UECxMJRENvbVN1YkNBAgRIwMPg + MA0GCSqGSIb3DQEBBQUAAgRXh6kjMCIYDzIwMTcxMDE1MjI0NjEzWhgPMjAxNzExMTQyMjQ2MTNaM + IKCuTCCEQoGCSqGSIb2fQdEADGCEPswghD3gAEEMIIQ8DBvMFcxCzAJBgNVBAYTAlVTMRAwDgYDVQ + QKEwdFbnRydXN0MSIwIAYDVQQLExlDZXJ0aWZpY2F0aW9uIEF1dGhvcml0aWVzMRIwEAYDVQQLEwl + EQ29tU3ViQ0EWFENBIERvbWFpbiBTZWFyY2hiYXNlME4wPzEVMBMGCgmSJomT8ixkARkWBWxvY2Fs + MRQwEgYKCZImiZPyLGQBGRYEVGVzdDEQMA4GA1UECxMHRGV2aWNlcxYLQ0xTIERldmljZXMwgYswa + DEVMBMGCgmSJomT8ixkARkWBWxvY2FsMRQwEgYKCZImiZPyLGQBGRYEVGVzdDETMBEGA1UECxMKVG + VzdCBVc2VyczEkMCIGA1UECxMbU1NPIEFkbWluaXN0cmF0aW9uIEFjY291bnRzFh9DTFMgU1NPIEF + kbWluaXN0cmF0aW9uIEFjY291bnRzMFQwQjEVMBMGCgmSJomT8ixkARkWBWxvY2FsMRQwEgYKCZIm + iZPyLGQBGRYEVGVzdDETMBEGA1UECxMKVGVzdCBVc2VycxYOQ0xTIFRlc3QgVXNlcnMwfDBfMRUwE + wYKCZImiZPyLGQBGRYFbG9jYWwxFDASBgoJkiaJk/IsZAEZFgRUZXN0MRswGQYDVQQLExJEb21haW + 4gQ29udHJvbGxlcnMxEzARBgNVBAsTCkdCIFNlcnZlcnMWGUNMUyBHQiBEb21haW4gQ29udHJvbGx + lcnMwfDBfMRUwEwYKCZImiZPyLGQBGRYFbG9jYWwxFDASBgoJkiaJk/IsZAEZFgR0ZXN0MRswGQYD + VQQLExJEb21haW4gQ29udHJvbGxlcnMxEzARBgNVBAsTClVTIFNlcnZlcnMWGUNMUyBVUyBEb21ha + W4gQ29udHJvbGxlcnMwgaIwgY4xFTATBgoJkiaJk/IsZAEZFgVsb2NhbDEUMBIGCgmSJomT8ixkAR + kWBFRlc3QxFDASBgNVBAsTC1Rlc3QtT2ZmaWNlMRAwDgYDVQQLEwdTZXJ2ZXJzMRMwEQYDVQQLEwp + HQiBTZXJ2ZXJzMRQwEgYDVQQLEwtBcHBsaWNhdGlvbjEMMAoGA1UECxMDV0VCFg9DTFMgR0IgV2Vi + IEFwcHMwgbUwgaExFTATBgoJkiaJk/IsZAEZFgVsb2NhbDEUMBIGCgmSJomT8ixkARkWBFRlc3QxF + DASBgNVBAsTC1Rlc3QtT2ZmaWNlMRAwDgYDVQQLEwdTZXJ2ZXJzMRMwEQYDVQQLEwpHQiBTZXJ2ZX + JzMRQwEgYDVQQLEwtBcHBsaWNhdGlvbjEMMAoGA1UECxMDV0VCMREwDwYDVQQLEwhJbnRyYW5ldBY + PQ0xTIEdCIEludHJhbmV0MIG1MIGhMRUwEwYKCZImiZPyLGQBGRYFbG9jYWwxFDASBgoJkiaJk/Is + ZAEZFgRUZXN0MRQwEgYDVQQLEwtUZXN0LU9mZmljZTEQMA4GA1UECxMHU2VydmVyczETMBEGA1UEC + xMKVVMgU2VydmVyczEUMBIGA1UECxMLQXBwbGljYXRpb24xDDAKBgNVBAsTA1dFQjERMA8GA1UECx + MISW50cmFuZXQWD0NMUyBVUyBJbnRyYW5ldDA8MDExCzAJBgNVBAYTAlVTMRAwDgYDVQQKEwdFbnR + ydXN0MRAwDgYDVQQLEwdEeW5Db3JwFgdEeW5Db3JwMEowODELMAkGA1UEBhMCVVMxEDAOBgNVBAoT + B0VudHJ1c3QxFzAVBgNVBAsTDkFkbWluaXN0cmF0b3JzFg5BZG1pbmlzdHJhdG9yczBKMDgxCzAJB + gNVBAYTAlVTMRAwDgYDVQQKEwdFbnRydXN0MRcwFQYDVQQLEw5HZW5lcmFsIE1vdG9ycxYOR2VuZX + JhbCBNb3RvcnMwczBZMQswCQYDVQQGEwJVUzEQMA4GA1UEChMHRW50cnVzdDEXMBUGA1UECxMOR2V + uZXJhbCBNb3RvcnMxHzAdBgNVBAsTFkdNIFVzZXIgQWRtaW5pc3RyYXRvcnMWFkdNIFVzZXIgQWRt + aW5pc3RyYXRvcnMwXzBPMQswCQYDVQQGEwJVUzEQMA4GA1UEChMHRW50cnVzdDEXMBUGA1UECxMOR + 2VuZXJhbCBNb3RvcnMxFTATBgNVBAsTDEdNIEVuZCBVc2VycxYMR00gRW5kIFVzZXJzMFYwQzEVMB + MGCgmSJomT8ixkARkWBWxvY2FsMRQwEgYKCZImiZPyLGQBGRYEVGVzdDEUMBIGA1UECxMLV2ViIFN + lcnZlcnMWD0NMUyBXZWIgU2VydmVyczBeMEcxFTATBgoJkiaJk/IsZAEZFgVsb2NhbDEUMBIGCgmS + JomT8ixkARkWBFRlc3QxGDAWBgNVBAsTD0NNUyBBZG1pbiBVc2VycxYTQ0xTIENNUyBBZG1pbiBVc + 2VyczBeMEcxFTATBgoJkiaJk/IsZAEZFgVsb2NhbDEUMBIGCgmSJomT8ixkARkWBFRlc3QxGDAWBg + NVBAsTD1BLSSBBZG1pbiBVc2VycxYTQ0xTIFBLSSBBZG1pbiBVc2VyczBLMD8xCzAJBgNVBAYTAnV + zMRAwDgYDVQQKEwdlbnRydXN0MQ8wDQYDVQQLEwZtb2JpbGUxDTALBgNVBAsTBGRlbW8WCERlbW8g + TURNMEgwMzELMAkGA1UEBhMCVVMxEDAOBgNVBAoTB0VudHJ1c3QxEjAQBgNVBAsTCUVtcGxveWVlc + xYRRW50cnVzdCBFbXBsb3llZXMwWzBQMRUwEwYKCZImiZPyLGQBGRYFTG9jYWwxFDASBgoJkiaJk/ + IsZAEZFgRUZXN0MRMwEQYDVQQLEwpUZXN0IFVzZXJzMQwwCgYDVQQHEwNERVYWB0NMUyBERVYwJDA + cMQswCQYDVQQGEwJ1czENMAsGA1UEChMETklTVBYETklTVDB2MGcxCzAJBgNVBAYTAlVTMRAwDgYD + VQQKEwdFbnRydXN0MRkwFwYDVQQLExBNYW5hZ2VkIFNlcnZpY2VzMRkwFwYDVQQLExBEZW1vIENvb + VByaXYgU3ViMRAwDgYDVQQLEwdEZXZpY2VzFgtNU08gRGV2aWNlczCBhDBuMQswCQYDVQQGEwJVUz + EQMA4GA1UEChMHRW50cnVzdDEZMBcGA1UECxMQTWFuYWdlZCBTZXJ2aWNlczEZMBcGA1UECxMQRGV + tbyBDb21Qcml2IFN1YjEXMBUGA1UECxMOQWRtaW5pc3RyYXRvcnMWEk1TTyBBZG1pbmlzdHJhdG9y + czB6MGkxCzAJBgNVBAYTAlVTMRAwDgYDVQQKEwdFbnRydXN0MRkwFwYDVQQLExBNYW5hZ2VkIFNlc + nZpY2VzMRkwFwYDVQQLExBEZW1vIENvbVByaXYgU3ViMRIwEAYDVQQLEwlFbXBsb3llZXMWDU1TTy + BFbXBsb3llZXMwRDAxMQswCQYDVQQGEwJVUzEQMA4GA1UEChMHR290U3ZlbjEQMA4GA1UECxMHRGV + 2aWNlcxYPR290U3ZlbiBEZXZpY2VzMIGEMFoxCzAJBgNVBAYTAlVTMRAwDgYDVQQKEwdFbnRydXN0 + MSAwHgYDVQQLExdFbnRydXN0IFNhbGVzIEVuZ2luZWVyczEXMBUGA1UECxMOQWRtaW5pc3RyYXRvc + nMWJkVudHJ1c3QgU2FsZXMgRW5naW5lZXJzIEFkbWluaXN0cmF0b3JzMHYwUzELMAkGA1UEBhMCVV + MxEDAOBgNVBAoTB0VudHJ1c3QxIDAeBgNVBAsTF0VudHJ1c3QgU2FsZXMgRW5naW5lZXJzMRAwDgY + DVQQLEwdEZXZpY2VzFh9FbnRydXN0IFNhbGVzIEVuZ2luZWVycyBEZXZpY2VzMHIwUTELMAkGA1UE + BhMCVVMxEDAOBgNVBAoTB0VudHJ1c3QxIDAeBgNVBAsTF0VudHJ1c3QgU2FsZXMgRW5naW5lZXJzM + Q4wDAYDVQQLEwVDYXJkcxYdRW50cnVzdCBTYWxlcyBFbmdpbmVlcnMgQ2FyZHMwdDBSMQswCQYDVQ + QGEwJVUzEQMA4GA1UEChMHRW50cnVzdDEgMB4GA1UECxMXRW50cnVzdCBTYWxlcyBFbmdpbmVlcnM + xDzANBgNVBAsTBlBlb3BsZRYeRW50cnVzdCBTYWxlcyBFbmdpbmVlcnMgUGVvcGxlMIGKMF0xCzAJ + BgNVBAYTAlVTMRAwDgYDVQQKEwdFbnRydXN0MSMwIQYDVQQLExpFbnRydXN0IFByb2R1Y3QgTWFuY + WdlbWVudDEXMBUGA1UECxMOQWRtaW5pc3RyYXRvcnMWKUVudHJ1c3QgUHJvZHVjdCBNYW5hZ2VtZW + 50IEFkbWluaXN0cmF0b3JzMHwwVjELMAkGA1UEBhMCVVMxEDAOBgNVBAoTB0VudHJ1c3QxIzAhBgN + VBAsTGkVudHJ1c3QgUHJvZHVjdCBNYW5hZ2VtZW50MRAwDgYDVQQLEwdEZXZpY2VzFiJFbnRydXN0 + IFByb2R1Y3QgTWFuYWdlbWVudCBEZXZpY2VzMHgwVDELMAkGA1UEBhMCVVMxEDAOBgNVBAoTB0Vud + HJ1c3QxIzAhBgNVBAsTGkVudHJ1c3QgUHJvZHVjdCBNYW5hZ2VtZW50MQ4wDAYDVQQLEwVDYXJkcx + YgRW50cnVzdCBQcm9kdWN0IE1hbmFnZW1lbnQgQ2FyZHMwejBVMQswCQYDVQQGEwJVUzEQMA4GA1U + EChMHRW50cnVzdDEjMCEGA1UECxMaRW50cnVzdCBQcm9kdWN0IE1hbmFnZW1lbnQxDzANBgNVBAsT + BlBlb3BsZRYhRW50cnVzdCBQcm9kdWN0IE1hbmFnZW1lbnQgUGVvcGxlMCQwHDELMAkGA1UEBhMCT + loxDTALBgNVBAoTBExJTloWBExJTlowTDA1MQswCQYDVQQGEwJOWjENMAsGA1UEChMETElOWjEXMB + UGA1UECxMOQWRtaW5pc3RyYXRvcnMWE0xJTlogQWRtaW5pc3RyYXRvcnMwPjAuMQswCQYDVQQGEwJ + OWjENMAsGA1UEChMETElOWjEQMA4GA1UECxMHRGV2aWNlcxYMTElOWiBEZXZpY2VzMDwwLTELMAkG + A1UEBhMCTloxDTALBgNVBAoTBExJTloxDzANBgNVBAsTBlBlb3BsZRYLTElOWiBQZW9wbGUwVDA0M + QswCQYDVQQGEwJVUzElMCMGA1UEChMcTWFnZWxsYW4gSGVhbHRoIFNlcnZpY2VzIEluYxYcTWFnZW + xsYW4gSGVhbHRoIFNlcnZpY2VzIEluYzBnMFExFTATBgoJkiaJk/IsZAEZFgVsb2NhbDEUMBIGCgm + SJomT8ixkARkWBHRlc3QxEzARBgNVBAsTClRlc3QgVXNlcnMxDTALBgNVBAcTBFRlc3QWEkNMUyBU + ZXN0IFVzZXIgVGVzdDBEMDoxCzAJBgNVBAYTAnVzMSswKQYDVQQKEyJGZWRlcmFsIEhvbWUgTG9hb + iBCYW5rIG9mIE5ldyBZb3JrFgZGSExCTlkwWjBKMQswCQYDVQQGEwJ1czErMCkGA1UEChMiRmVkZX + JhbCBIb21lIExvYW4gQmFuayBvZiBOZXcgWW9yazEOMAwGA1UECxMFMUxpbmsWDEZITEJOWSAxTGl + uazBcMEsxCzAJBgNVBAYTAnVzMSswKQYDVQQKEyJGZWRlcmFsIEhvbWUgTG9hbiBCYW5rIG9mIE5l + dyBZb3JrMQ8wDQYDVQQLEwZBZG1pbnMWDUZITEJOWSBBZG1pbnMwSAYJKoZIhvZ9B0QQMTswOTAQA + gEAAgEAAgEIAgEPAwIDeDAQAgEAAgEAAgEIAgEKAwIAeTAQAgEAAgEAAgEIAgEKAwIAeQMBADBxBg + kqhkiG9n0HTUAxZAxiQUVTLUNCQy0xMjgsIEFFUy1DQkMtMjU2LCBBRVMtR0NNLTEyOCwgQUVTLUd + DTS0yNTYsIFRSSVBMRURFUy1DQkMtMTkyLCBDQVNUNS1DQkMtODAsIENBU1Q1LUNCQy0xMjgwdgYJ + KoZIhvZ9B01BMWkMZ0VDRFNBLVJFQ09NTUVOREVELCBSU0FQU1MtUkVDT01NRU5ERUQsIFJTQS1SR + UNPTU1FTkRFRCwgRFNBLVJFQ09NTUVOREVELCBFQ0RTQS1TSEExLCBSU0EtU0hBMSwgRFNBLVNIQT + EwFwYJKoZIhvZ9B00QMQoECFJTQS0yMDQ4MIIWSQYJKoZIhvZ9B00AMYIWOjCCFjYwgYACAQAwADB + 5MQswCQYDVQQGEwJVUzEQMA4GA1UEChMHRW50cnVzdDEiMCAGA1UECxMZQ2VydGlmaWNhdGlvbiBB + dXRob3JpdGllczESMBAGA1UECxMJRENvbVN1YkNBMSAwHgYDVQQDExdTZWN1cml0eSBPZmZpY2VyI + FBvbGljeTB9AgEBMAAwdjELMAkGA1UEBhMCVVMxEDAOBgNVBAoTB0VudHJ1c3QxIjAgBgNVBAsTGU + NlcnRpZmljYXRpb24gQXV0aG9yaXRpZXMxEjAQBgNVBAsTCURDb21TdWJDQTEdMBsGA1UEAxMUQWR + taW5pc3RyYXRvciBQb2xpY3kweAIBAjAAMHExCzAJBgNVBAYTAlVTMRAwDgYDVQQKEwdFbnRydXN0 + MSIwIAYDVQQLExlDZXJ0aWZpY2F0aW9uIEF1dGhvcml0aWVzMRIwEAYDVQQLEwlEQ29tU3ViQ0ExG + DAWBgNVBAMTD0VuZCBVc2VyIFBvbGljeTB9AgEDMAAwdjELMAkGA1UEBhMCVVMxEDAOBgNVBAoTB0 + VudHJ1c3QxIjAgBgNVBAsTGUNlcnRpZmljYXRpb24gQXV0aG9yaXRpZXMxEjAQBgNVBAsTCURDb21 + TdWJDQTEdMBsGA1UEAxMUQWRtaW5pc3RyYXRvciBQb2xpY3kwfQIBBDAAMHYxCzAJBgNVBAYTAlVT + MRAwDgYDVQQKEwdFbnRydXN0MSIwIAYDVQQLExlDZXJ0aWZpY2F0aW9uIEF1dGhvcml0aWVzMRIwE + AYDVQQLEwlEQ29tU3ViQ0ExHTAbBgNVBAMTFEFkbWluaXN0cmF0b3IgUG9saWN5MHMCAQUwADBsMQ + swCQYDVQQGEwJVUzEQMA4GA1UEChMHRW50cnVzdDEiMCAGA1UECxMZQ2VydGlmaWNhdGlvbiBBdXR + ob3JpdGllczESMBAGA1UECxMJRENvbVN1YkNBMRMwEQYDVQQDEwpBU0ggUG9saWN5MH0CAQYwADB2 + MQswCQYDVQQGEwJVUzEQMA4GA1UEChMHRW50cnVzdDEiMCAGA1UECxMZQ2VydGlmaWNhdGlvbiBBd + XRob3JpdGllczESMBAGA1UECxMJRENvbVN1YkNBMR0wGwYDVQQDExRBZG1pbmlzdHJhdG9yIFBvbG + ljeTB9AgEHMAAwdjELMAkGA1UEBhMCVVMxEDAOBgNVBAoTB0VudHJ1c3QxIjAgBgNVBAsTGUNlcnR + pZmljYXRpb24gQXV0aG9yaXRpZXMxEjAQBgNVBAsTCURDb21TdWJDQTEdMBsGA1UEAxMUQWRtaW5p + c3RyYXRvciBQb2xpY3kwfAIBCDAAMHUxCzAJBgNVBAYTAlVTMRAwDgYDVQQKEwdFbnRydXN0MSIwI + AYDVQQLExlDZXJ0aWZpY2F0aW9uIEF1dGhvcml0aWVzMRIwEAYDVQQLEwlEQ29tU3ViQ0ExHDAaBg + NVBAMTE1NlcnZlciBMb2dpbiBQb2xpY3kwfAIBCTAAMHUxCzAJBgNVBAYTAlVTMRAwDgYDVQQKEwd + FbnRydXN0MSIwIAYDVQQLExlDZXJ0aWZpY2F0aW9uIEF1dGhvcml0aWVzMRIwEAYDVQQLEwlEQ29t + U3ViQ0ExHDAaBgNVBAMTE1NlcnZlciBMb2dpbiBQb2xpY3kwfQIBCjAAMHYxCzAJBgNVBAYTAlVTM + RAwDgYDVQQKEwdFbnRydXN0MSIwIAYDVQQLExlDZXJ0aWZpY2F0aW9uIEF1dGhvcml0aWVzMRIwEA + YDVQQLEwlEQ29tU3ViQ0ExHTAbBgNVBAMTFEFkbWluaXN0cmF0b3IgUG9saWN5MIGAAgEMMAAweTE + LMAkGA1UEBhMCVVMxEDAOBgNVBAoTB0VudHJ1c3QxIjAgBgNVBAsTGUNlcnRpZmljYXRpb24gQXV0 + aG9yaXRpZXMxEjAQBgNVBAsTCURDb21TdWJDQTEgMB4GA1UEAxMXQ0xTIFNlcnZlciBMb2dpbiBQb + 2xpY3kwgYACAQ0wADB5MQswCQYDVQQGEwJVUzEQMA4GA1UEChMHRW50cnVzdDEiMCAGA1UECxMZQ2 + VydGlmaWNhdGlvbiBBdXRob3JpdGllczESMBAGA1UECxMJRENvbVN1YkNBMSAwHgYDVQQDExdTZWN + 1cml0eSBPZmZpY2VyIFBvbGljeTCBgAIBDjAAMHkxCzAJBgNVBAYTAlVTMRAwDgYDVQQKEwdFbnRy + dXN0MSIwIAYDVQQLExlDZXJ0aWZpY2F0aW9uIEF1dGhvcml0aWVzMRIwEAYDVQQLEwlEQ29tU3ViQ + 0ExIDAeBgNVBAMTF1NlY3VyaXR5IE9mZmljZXIgUG9saWN5MH0CAQ8wADB2MQswCQYDVQQGEwJVUz + EQMA4GA1UEChMHRW50cnVzdDEiMCAGA1UECxMZQ2VydGlmaWNhdGlvbiBBdXRob3JpdGllczESMBA + GA1UECxMJRENvbVN1YkNBMR0wGwYDVQQDExRBZG1pbmlzdHJhdG9yIFBvbGljeTB9AgERMAAwdjEL + MAkGA1UEBhMCVVMxEDAOBgNVBAoTB0VudHJ1c3QxIjAgBgNVBAsTGUNlcnRpZmljYXRpb24gQXV0a + G9yaXRpZXMxEjAQBgNVBAsTCURDb21TdWJDQTEdMBsGA1UEAxMUQWRtaW5pc3RyYXRvciBQb2xpY3 + kwfAIBCzAAMHUxCzAJBgNVBAYTAlVTMRAwDgYDVQQKEwdFbnRydXN0MSIwIAYDVQQLExlDZXJ0aWZ + pY2F0aW9uIEF1dGhvcml0aWVzMRIwEAYDVQQLEwlEQ29tU3ViQ0ExHDAaBgNVBAMTE0NMUyBFbmQg + VXNlciBQb2xpY3kwfQIBEjAAMHYxCzAJBgNVBAYTAlVTMRAwDgYDVQQKEwdFbnRydXN0MSIwIAYDV + QQLExlDZXJ0aWZpY2F0aW9uIEF1dGhvcml0aWVzMRIwEAYDVQQLEwlEQ29tU3ViQ0ExHTAbBgNVBA + MTFEFkbWluaXN0cmF0b3IgUG9saWN5MH0CARMwADB2MQswCQYDVQQGEwJVUzEQMA4GA1UEChMHRW5 + 0cnVzdDEiMCAGA1UECxMZQ2VydGlmaWNhdGlvbiBBdXRob3JpdGllczESMBAGA1UECxMJRENvbVN1 + YkNBMR0wGwYDVQQDExRBZG1pbmlzdHJhdG9yIFBvbGljeTCBgAIBFDAAMHkxCzAJBgNVBAYTAlVTM + RAwDgYDVQQKEwdFbnRydXN0MSIwIAYDVQQLExlDZXJ0aWZpY2F0aW9uIEF1dGhvcml0aWVzMRIwEA + YDVQQLEwlEQ29tU3ViQ0ExIDAeBgNVBAMTF0R5bkNvcnAgRW5kIFVzZXIgUG9saWN5MH8CASAwADB + 4MQswCQYDVQQGEwJVUzEQMA4GA1UEChMHRW50cnVzdDEiMCAGA1UECxMZQ2VydGlmaWNhdGlvbiBB + dXRob3JpdGllczESMBAGA1UECxMJRENvbVN1YkNBMR8wHQYDVQQDExZDU1JFUyBSZXF1ZXN0b3IgU + G9saWN5MHkCASEwADByMQswCQYDVQQGEwJVUzEQMA4GA1UEChMHRW50cnVzdDEiMCAGA1UECxMZQ2 + VydGlmaWNhdGlvbiBBdXRob3JpdGllczESMBAGA1UECxMJRENvbVN1YkNBMRkwFwYDVQQDExBNRE1 + XUyBYQVAgUG9saWN5MEkCASIwADBCMQswCQYDVQQGEwJVUzEQMA4GA1UEChMHRW50cnVzdDEhMB8G + A1UEAxMYU09BUCBBZG1pbiBFeHBvcnQgUG9saWN5MIGDAgEjMAAwfDELMAkGA1UEBhMCVVMxEDAOB + gNVBAoTB0VudHJ1c3QxIjAgBgNVBAsTGUNlcnRpZmljYXRpb24gQXV0aG9yaXRpZXMxEjAQBgNVBA + sTCURDb21TdWJDQTEjMCEGA1UEAxMaRXhwb3J0YWJsZSBFbmQgVXNlciBQb2xpY3kweAIBJDAAMHE + xCzAJBgNVBAYTAlVTMRAwDgYDVQQKEwdFbnRydXN0MSIwIAYDVQQLExlDZXJ0aWZpY2F0aW9uIEF1 + dGhvcml0aWVzMRIwEAYDVQQLEwlEQ29tU3ViQ0ExGDAWBgNVBAMTD0VuZCBVc2VyIFBvbGljeTB9A + gElMAAwdjELMAkGA1UEBhMCVVMxEDAOBgNVBAoTB0VudHJ1c3QxIjAgBgNVBAsTGUNlcnRpZmljYX + Rpb24gQXV0aG9yaXRpZXMxEjAQBgNVBAsTCURDb21TdWJDQTEdMBsGA1UEAxMUQWRtaW5pc3RyYXR + vciBQb2xpY3kwfQIBJjAAMHYxCzAJBgNVBAYTAlVTMRAwDgYDVQQKEwdFbnRydXN0MSIwIAYDVQQL + ExlDZXJ0aWZpY2F0aW9uIEF1dGhvcml0aWVzMRIwEAYDVQQLEwlEQ29tU3ViQ0ExHTAbBgNVBAMTF + E1vYmlsZSBEZXZpY2UgUG9saWN5MHwCAScwADB1MQswCQYDVQQGEwJVUzEQMA4GA1UEChMHRW50cn + VzdDEiMCAGA1UECxMZQ2VydGlmaWNhdGlvbiBBdXRob3JpdGllczESMBAGA1UECxMJRENvbVN1YkN + BMRwwGgYDVQQDExNTZXJ2ZXIgTG9naW4gUG9saWN5MH0CASgwADB2MQswCQYDVQQGEwJVUzEQMA4G + A1UEChMHRW50cnVzdDEiMCAGA1UECxMZQ2VydGlmaWNhdGlvbiBBdXRob3JpdGllczESMBAGA1UEC + xMJRENvbVN1YkNBMR0wGwYDVQQDExRBZG1pbmlzdHJhdG9yIFBvbGljeTCBgQIBKTAAMHoxCzAJBg + NVBAYTAlVTMRAwDgYDVQQKEwdFbnRydXN0MSIwIAYDVQQLExlDZXJ0aWZpY2F0aW9uIEF1dGhvcml + 0aWVzMRIwEAYDVQQLEwlEQ29tU3ViQ0ExITAfBgNVBAMTGFNQT0MgU2VydmVyIExvZ2luIFBvbGlj + eTCBggIBKjAAMHsxCzAJBgNVBAYTAlVTMRAwDgYDVQQKEwdFbnRydXN0MSIwIAYDVQQLExlDZXJ0a + WZpY2F0aW9uIEF1dGhvcml0aWVzMRIwEAYDVQQLEwlEQ29tU3ViQ0ExIjAgBgNVBAMTGVNQT0MgQW + RtaW5pc3RyYXRvciBQb2xpY3kwfAIBKzAAMHUxCzAJBgNVBAYTAlVTMRAwDgYDVQQKEwdFbnRydXN + 0MSIwIAYDVQQLExlDZXJ0aWZpY2F0aW9uIEF1dGhvcml0aWVzMRIwEAYDVQQLEwlEQ29tU3ViQ0Ex + HDAaBgNVBAMTE1NlcnZlciBMb2dpbiBQb2xpY3kwgZECASwwADCBiTELMAkGA1UEBhMCVVMxEDAOB + gNVBAoTB0VudHJ1c3QxIjAgBgNVBAsTGUNlcnRpZmljYXRpb24gQXV0aG9yaXRpZXMxEjAQBgNVBA + sTCURDb21TdWJDQTEwMC4GA1UEAxMnTWFzdGVyIExpc3QgU2lnbmVyIEFkbWluaXN0cmF0b3IgUG9 + saWN5MH0CAS0wADB2MQswCQYDVQQGEwJVUzEQMA4GA1UEChMHRW50cnVzdDEiMCAGA1UECxMZQ2Vy + dGlmaWNhdGlvbiBBdXRob3JpdGllczESMBAGA1UECxMJRENvbVN1YkNBMR0wGwYDVQQDExRBZG1pb + mlzdHJhdG9yIFBvbGljeTB4AgEuMAAwcTELMAkGA1UEBhMCVVMxEDAOBgNVBAoTB0VudHJ1c3QxIj + AgBgNVBAsTGUNlcnRpZmljYXRpb24gQXV0aG9yaXRpZXMxEjAQBgNVBAsTCURDb21TdWJDQTEYMBY + GA1UEAxMPRW5kIFVzZXIgUG9saWN5MH0CAS8wADB2MQswCQYDVQQGEwJVUzEQMA4GA1UEChMHRW50 + cnVzdDEiMCAGA1UECxMZQ2VydGlmaWNhdGlvbiBBdXRob3JpdGllczESMBAGA1UECxMJRENvbVN1Y + kNBMR0wGwYDVQQDExRBZG1pbmlzdHJhdG9yIFBvbGljeTB4AgExMAAwcTELMAkGA1UEBhMCVVMxED + AOBgNVBAoTB0VudHJ1c3QxIjAgBgNVBAsTGUNlcnRpZmljYXRpb24gQXV0aG9yaXRpZXMxEjAQBgN + VBAsTCURDb21TdWJDQTEYMBYGA1UEAxMPRW5kIFVzZXIgUG9saWN5MH0CATIwADB2MQswCQYDVQQG + EwJVUzEQMA4GA1UEChMHRW50cnVzdDEiMCAGA1UECxMZQ2VydGlmaWNhdGlvbiBBdXRob3JpdGllc + zESMBAGA1UECxMJRENvbVN1YkNBMR0wGwYDVQQDExRBZG1pbmlzdHJhdG9yIFBvbGljeTB8AgEwMA + AwdTELMAkGA1UEBhMCVVMxEDAOBgNVBAoTB0VudHJ1c3QxIjAgBgNVBAsTGUNlcnRpZmljYXRpb24 + gQXV0aG9yaXRpZXMxEjAQBgNVBAsTCURDb21TdWJDQTEcMBoGA1UEAxMTU2VydmVyIExvZ2luIFBv + bGljeTB9AgEzMAAwdjELMAkGA1UEBhMCVVMxEDAOBgNVBAoTB0VudHJ1c3QxIjAgBgNVBAsTGUNlc + nRpZmljYXRpb24gQXV0aG9yaXRpZXMxEjAQBgNVBAsTCURDb21TdWJDQTEdMBsGA1UEAxMUQWRtaW + 5pc3RyYXRvciBQb2xpY3kwfQIBNTAAMHYxCzAJBgNVBAYTAlVTMRAwDgYDVQQKEwdFbnRydXN0MSI + wIAYDVQQLExlDZXJ0aWZpY2F0aW9uIEF1dGhvcml0aWVzMRIwEAYDVQQLEwlEQ29tU3ViQ0ExHTAb + BgNVBAMTFENhcmQgRW5kIFVzZXIgUG9saWN5MHgCATQwADBxMQswCQYDVQQGEwJVUzEQMA4GA1UEC + hMHRW50cnVzdDEiMCAGA1UECxMZQ2VydGlmaWNhdGlvbiBBdXRob3JpdGllczESMBAGA1UECxMJRE + NvbVN1YkNBMRgwFgYDVQQDEw9FbmQgVXNlciBQb2xpY3kwfAIBNjAAMHUxCzAJBgNVBAYTAlVTMRA + wDgYDVQQKEwdFbnRydXN0MSIwIAYDVQQLExlDZXJ0aWZpY2F0aW9uIEF1dGhvcml0aWVzMRIwEAYD + VQQLEwlEQ29tU3ViQ0ExHDAaBgNVBAMTE01ETSBFbmQgVXNlciBQb2xpY3kwfAIBNzAAMHUxCzAJB + gNVBAYTAlVTMRAwDgYDVQQKEwdFbnRydXN0MSIwIAYDVQQLExlDZXJ0aWZpY2F0aW9uIEF1dGhvcm + l0aWVzMRIwEAYDVQQLEwlEQ29tU3ViQ0ExHDAaBgNVBAMTE1NlcnZlciBMb2dpbiBQb2xpY3kwgYU + CATgwADB+MQswCQYDVQQGEwJVUzEQMA4GA1UEChMHRW50cnVzdDEiMCAGA1UECxMZQ2VydGlmaWNh + dGlvbiBBdXRob3JpdGllczESMBAGA1UECxMJRENvbVN1YkNBMSUwIwYDVQQDExxNU08gVU1TIEFkb + WluaXN0cmF0b3IgUG9saWN5MIJZ7wYKKoZIhvZ9B00uADGCWd8wglnbMDEwFwwSY3NjX3BpdjFrX2 + NhcmRhdXRoAgEnMBYwFDASDA1QaXYxS0NhcmRBdXRoAgFDMEwwEwwOY3NjX3Bpdm1peGVkXzMCASg + wNTAQMA4MCVBpdjFLQXV0aAIBRDAPMA0MCFBpdjJLRW5jAgFFMBAwDgwJUGl2MktTaWduAgFGMIG4 + MBAMC2VudF9hZF9jbHMxAgE3MIGjMIGgMA4MCUR1YWxVc2FnZQIBXDCBjTELMAkGA1UEBhMCVVMxE + DAOBgNVBAoTB0VudHJ1c3QxIjAgBgNVBAsTGUNlcnRpZmljYXRpb24gQXV0aG9yaXRpZXMxEjAQBg + NVBAsTCURDb21TdWJDQTE0MDIGA1UEAxMrQ0xTIDF5ciBEb21haW4gQ29udHJvbGxlciBEdWFsIFV + zYWdlIFBvbGljeTCBuDAQDAtlbnRfYWRfY2xzMgIBODCBozCBoDAODAlEdWFsVXNhZ2UCAV0wgY0x + CzAJBgNVBAYTAlVTMRAwDgYDVQQKEwdFbnRydXN0MSIwIAYDVQQLExlDZXJ0aWZpY2F0aW9uIEF1d + Ghvcml0aWVzMRIwEAYDVQQLEwlEQ29tU3ViQ0ExNDAyBgNVBAMTK0NMUyAyeXIgRG9tYWluIENvbn + Ryb2xsZXIgRHVhbCBVc2FnZSBQb2xpY3kwdTARDAxlbnRfYWRfY2xzMm0CAVIwTjAjMBAMCkVuY3J + 5cHRpb24CAgCQog8MCkVuY3J5cHRpb24CAQEwJzASDAxWZXJpZmljYXRpb24CAgCRohEMDFZlcmlm + aWNhdGlvbgIBAqIQDAtlbnRfZGVmYXVsdAIBAzCBvjASDA1lbnRfYWRfY2xzMm1hAgFUMIGnMIGkM + A8MCUR1YWxVc2FnZQICAJQwgZAxCzAJBgNVBAYTAlVTMRAwDgYDVQQKEwdFbnRydXN0MSIwIAYDVQ + QLExlDZXJ0aWZpY2F0aW9uIEF1dGhvcml0aWVzMRIwEAYDVQQLEwlEQ29tU3ViQ0ExNzA1BgNVBAM + TLkNMUyAybW9udGggRG9tYWluIENvbnRyb2xsZXIgRHVhbCBVc2FnZSBQb2xpY3kwgbAwDgwJZW50 + X2FkX2RjAgF4MIGdMIGaMBAMCkR1YWwgVXNhZ2UCAgDSMIGFMQswCQYDVQQGEwJVUzEQMA4GA1UEC + hMHRW50cnVzdDEiMCAGA1UECxMZQ2VydGlmaWNhdGlvbiBBdXRob3JpdGllczESMBAGA1UECxMJRE + NvbVN1YkNBMSwwKgYDVQQDEyNFbnRlcnByaXNlIERvbWFpbiBDb250cm9sbGVyIFBvbGljeTBHMBk + ME2VudF9hZG1zcnZjc191bXNfZWECAgCLMCowEjAQDApFbmNyeXB0aW9uAgIA9DAUMBIMDFZlcmlm + aWNhdGlvbgICAPUwRTAZDBRlbnRfYWRtc3J2Y3NfdXNlcnJlZwIBEjAoMBEwDwwKRW5jcnlwdGlvb + gIBHjATMBEMDFZlcmlmaWNhdGlvbgIBHzCBzzAZDBRlbnRfYWRtc3J2Y3NfdXNybWdtdAIBETCBsT + ARMA8MCkVuY3J5cHRpb24CARwwgZswEQwMVmVyaWZpY2F0aW9uAgEdMIGFMQswCQYDVQQGEwJVUzE + QMA4GA1UEChMHRW50cnVzdDEiMCAGA1UECxMZQ2VydGlmaWNhdGlvbiBBdXRob3JpdGllczESMBAG + A1UECxMJRENvbVN1YkNBMSwwKgYDVQQDEyNUcnVlUGFzcyBTZXJ2ZXIgVmVyaWZpY2F0aW9uIFBvb + GljeTA6MA4MCWVudF9iYXNpYwIBJjAoMBEwDwwKRW5jcnlwdGlvbgIBQTATMBEMDFZlcmlmaWNhdG + lvbgIBQjCCATkwDQwIZW50X2NsczECAS8wggEmMIGOMA8MCkVuY3J5cHRpb24CAVIwezELMAkGA1U + EBhMCVVMxEDAOBgNVBAoTB0VudHJ1c3QxIjAgBgNVBAsTGUNlcnRpZmljYXRpb24gQXV0aG9yaXRp + ZXMxEjAQBgNVBAsTCURDb21TdWJDQTEiMCAGA1UEAxMZQ0xTIDF5ciBFbmNyeXB0aW9uIFBvbGlje + TCBkjARDAxWZXJpZmljYXRpb24CAVMwfTELMAkGA1UEBhMCVVMxEDAOBgNVBAoTB0VudHJ1c3QxIj + AgBgNVBAsTGUNlcnRpZmljYXRpb24gQXV0aG9yaXRpZXMxEjAQBgNVBAsTCURDb21TdWJDQTEkMCI + GA1UEAxMbQ0xTIDF5ciBWZXJpZmljYXRpb24gUG9saWN5MIIBOTANDAhlbnRfY2xzMgIBMDCCASYw + gY4wDwwKRW5jcnlwdGlvbgIBVDB7MQswCQYDVQQGEwJVUzEQMA4GA1UEChMHRW50cnVzdDEiMCAGA + 1UECxMZQ2VydGlmaWNhdGlvbiBBdXRob3JpdGllczESMBAGA1UECxMJRENvbVN1YkNBMSIwIAYDVQ + QDExlDTFMgMnlyIEVuY3J5cHRpb24gUG9saWN5MIGSMBEMDFZlcmlmaWNhdGlvbgIBVTB9MQswCQY + DVQQGEwJVUzEQMA4GA1UEChMHRW50cnVzdDEiMCAGA1UECxMZQ2VydGlmaWNhdGlvbiBBdXRob3Jp + dGllczESMBAGA1UECxMJRENvbVN1YkNBMSQwIgYDVQQDExtDTFMgMnlyIFZlcmlmaWNhdGlvbiBQb + 2xpY3kwQDASDA1lbnRfY2xzX2FkbWluAgFXMCowEjAQDApFbmNyeXB0aW9uAgIAmDAUMBIMDFZlcm + lmaWNhdGlvbgICAJkwggFPMBMMDmVudF9jbHNfYWRtaW4yAgFWMIIBNjCBljAQDApFbmNyeXB0aW9 + uAgIAljCBgTELMAkGA1UEBhMCVVMxEDAOBgNVBAoTB0VudHJ1c3QxIjAgBgNVBAsTGUNlcnRpZmlj + YXRpb24gQXV0aG9yaXRpZXMxEjAQBgNVBAsTCURDb21TdWJDQTEoMCYGA1UEAxMfQ0xTIEFkbWluI + DJ5ciBFbmNyeXB0aW9uIFBvbGljeTCBmjASDAxWZXJpZmljYXRpb24CAgCXMIGDMQswCQYDVQQGEw + JVUzEQMA4GA1UEChMHRW50cnVzdDEiMCAGA1UECxMZQ2VydGlmaWNhdGlvbiBBdXRob3JpdGllczE + SMBAGA1UECxMJRENvbVN1YkNBMSowKAYDVQQDEyFDTFMgQWRtaW4gMnlyIFZlcmlmaWNhdGlvbiBQ + b2xpY3kwgbgwFwwSZW50X2Ntc2NsaWVudF9jbHMxAgExMIGcMIGZMA8MCkR1YWwgVXNhZ2UCAVYwg + YUxCzAJBgNVBAYTAlVTMRAwDgYDVQQKEwdFbnRydXN0MSIwIAYDVQQLExlDZXJ0aWZpY2F0aW9uIE + F1dGhvcml0aWVzMRIwEAYDVQQLEwlEQ29tU3ViQ0ExLDAqBgNVBAMTI0NMUyAxeXIgQUkgQ2xpZW5 + 0IER1YWwgVXNhZ2UgUG9saWN5MIG/MBkMFGVudF9jbXNjbGllbnRfY2xzMV9mAgEzMIGhMIGeMA8M + CkR1YWwgVXNhZ2UCAVgwgYoxCzAJBgNVBAYTAlVTMRAwDgYDVQQKEwdFbnRydXN0MSIwIAYDVQQLE + xlDZXJ0aWZpY2F0aW9uIEF1dGhvcml0aWVzMRIwEAYDVQQLEwlEQ29tU3ViQ0ExMTAvBgNVBAMTKE + NMUyAxeXIgQUkgQ2xpZW50IEZpbGUgRHVhbCBVc2FnZSBQb2xpY3kwgbgwFwwSZW50X2Ntc2NsaWV + udF9jbHMyAgEyMIGcMIGZMA8MCkR1YWwgVXNhZ2UCAVcwgYUxCzAJBgNVBAYTAlVTMRAwDgYDVQQK + EwdFbnRydXN0MSIwIAYDVQQLExlDZXJ0aWZpY2F0aW9uIEF1dGhvcml0aWVzMRIwEAYDVQQLEwlEQ + 29tU3ViQ0ExLDAqBgNVBAMTI0NMUyAyeXIgQUkgQ2xpZW50IER1YWwgVXNhZ2UgUG9saWN5MIG/MB + kMFGVudF9jbXNjbGllbnRfY2xzMl9mAgE0MIGhMIGeMA8MCkR1YWwgVXNhZ2UCAVkwgYoxCzAJBgN + VBAYTAlVTMRAwDgYDVQQKEwdFbnRydXN0MSIwIAYDVQQLExlDZXJ0aWZpY2F0aW9uIEF1dGhvcml0 + aWVzMRIwEAYDVQQLEwlEQ29tU3ViQ0ExMTAvBgNVBAMTKENMUyAyeXIgQUkgQ2xpZW50IEZpbGUgR + HVhbCBVc2FnZSBQb2xpY3kwLjAXDBJlbnRfY21zY2xpZW50X3NrZHUCASowEzARMA8MCkR1YWwgVX + NhZ2UCAUkwMDAZDBRlbnRfY21zY2xpZW50X3NrZHVfZgIBKzATMBEwDwwKRHVhbCBVc2FnZQIBSjC + BuDAXDBJlbnRfY21zc2VydmVyX2NsczECATUwgZwwgZkwDwwKRHVhbCBVc2FnZQIBWjCBhTELMAkG + A1UEBhMCVVMxEDAOBgNVBAoTB0VudHJ1c3QxIjAgBgNVBAsTGUNlcnRpZmljYXRpb24gQXV0aG9ya + XRpZXMxEjAQBgNVBAsTCURDb21TdWJDQTEsMCoGA1UEAxMjQ0xTIDF5ciBBSSBTZXJ2ZXIgRHVhbC + BVc2FnZSBQb2xpY3kwgbgwFwwSZW50X2Ntc3NlcnZlcl9jbHMyAgE2MIGcMIGZMA8MCkR1YWwgVXN + hZ2UCAVswgYUxCzAJBgNVBAYTAlVTMRAwDgYDVQQKEwdFbnRydXN0MSIwIAYDVQQLExlDZXJ0aWZp + Y2F0aW9uIEF1dGhvcml0aWVzMRIwEAYDVQQLEwlEQ29tU3ViQ0ExLDAqBgNVBAMTI0NMUyAyeXIgQ + UkgU2VydmVyIER1YWwgVXNhZ2UgUG9saWN5MC4wFwwSZW50X2Ntc3NlcnZlcl9za2R1AgEsMBMwET + APDApEdWFsIFVzYWdlAgFLMEYwGAwSZW50X2NzcmVzX2FwcHJvdmVyAgIAjDAqMBIwEAwKRW5jcnl + wdGlvbgICAPYwFDASDAxWZXJpZmljYXRpb24CAgD3MEYwGAwTZW50X2NzcmVzX3JlcXVlc3RvcgIB + bzAqMBIwEAwKRW5jcnlwdGlvbgICAMUwFDASDAxWZXJpZmljYXRpb24CAgDGMDwwEAwLZW50X2RlZ + mF1bHQCAQMwKDARMA8MCkVuY3J5cHRpb24CAQEwEzARDAxWZXJpZmljYXRpb24CAQIwggE8MBAMC2 + VudF9kZXNrdG9wAgEHMIIBJjCBjjAPDApFbmNyeXB0aW9uAgEJMHsxCzAJBgNVBAYTAlVTMRAwDgY + DVQQKEwdFbnRydXN0MSIwIAYDVQQLExlDZXJ0aWZpY2F0aW9uIEF1dGhvcml0aWVzMRIwEAYDVQQL + EwlEQ29tU3ViQ0ExIjAgBgNVBAMTGVNhZmVOZXQgRW5jcnlwdGlvbiBQb2xpY3kwgZIwEQwMVmVya + WZpY2F0aW9uAgEKMH0xCzAJBgNVBAYTAlVTMRAwDgYDVQQKEwdFbnRydXN0MSIwIAYDVQQLExlDZX + J0aWZpY2F0aW9uIEF1dGhvcml0aWVzMRIwEAYDVQQLEwlEQ29tU3ViQ0ExJDAiBgNVBAMTG1NhZmV + OZXQgVmVyaWZpY2F0aW9uIFBvbGljeTCBpDAVDBBlbnRfZHVfYmFzaWNfZWt1AgFtMIGKMIGHMBAM + CkR1YWwgVXNhZ2UCAgDCMHMxCzAJBgNVBAYTAlVTMRAwDgYDVQQKEwdFbnRydXN0MSIwIAYDVQQLE + xlDZXJ0aWZpY2F0aW9uIEF1dGhvcml0aWVzMRIwEAYDVQQLEwlEQ29tU3ViQ0ExGjAYBgNVBAMTEU + R1YWwgVXNhZ2UgUG9saWN5MEMwFQwQZW50X2VhY2NhdHRhY2hlZAIBaDAqMBIwEAwKRW5jcnlwdGl + vbgICALgwFDASDAxWZXJpZmljYXRpb24CAgC5MD0wDwwKZW50X2VhY2NvbgIBajAqMBIwEAwKRW5j + cnlwdGlvbgICALwwFDASDAxWZXJpZmljYXRpb24CAgC9MEUwFwwSZW50X2VhY2NzdGFuZGFsb25lA + gFpMCowEjAQDApFbmNyeXB0aW9uAgIAujAUMBIMDFZlcmlmaWNhdGlvbgICALswggGiMAwMB2VudF + 9lZnMCARUwggGQMHgwCAwDRUZTAgEnMGwxCzAJBgNVBAYTAlVTMRAwDgYDVQQKEwdFbnRydXN0MSI + wIAYDVQQLExlDZXJ0aWZpY2F0aW9uIEF1dGhvcml0aWVzMRIwEAYDVQQLEwlEQ29tU3ViQ0ExEzAR + BgNVBAMTCkVGUyBQb2xpY3kwgYYwDwwKRW5jcnlwdGlvbgIBJTBzMQswCQYDVQQGEwJVUzEQMA4GA + 1UEChMHRW50cnVzdDEiMCAGA1UECxMZQ2VydGlmaWNhdGlvbiBBdXRob3JpdGllczESMBAGA1UECx + MJRENvbVN1YkNBMRowGAYDVQQDExFFbmNyeXB0aW9uIFBvbGljeTCBijARDAxWZXJpZmljYXRpb24 + CASYwdTELMAkGA1UEBhMCVVMxEDAOBgNVBAoTB0VudHJ1c3QxIjAgBgNVBAsTGUNlcnRpZmljYXRp + b24gQXV0aG9yaXRpZXMxEjAQBgNVBAsTCURDb21TdWJDQTEcMBoGA1UEAxMTVmVyaWZpY2F0aW9uI + FBvbGljeTBEMBgME2VudF9lc3Zwbl9jb21tZWRvaWQCASUwKDARMA8MCkVuY3J5cHRpb24CAT8wEz + ARDAxWZXJpZmljYXRpb24CAUAwggE7MA8MCmVudF9ldG9rZW4CAWwwggEmMIGOMBAMCkVuY3J5cHR + pb24CAgDAMHoxCzAJBgNVBAYTAlVTMRAwDgYDVQQKEwdFbnRydXN0MSIwIAYDVQQLExlDZXJ0aWZp + Y2F0aW9uIEF1dGhvcml0aWVzMRIwEAYDVQQLEwlEQ29tU3ViQ0ExITAfBgNVBAMTGGVUb2tlbiBFb + mNyeXB0aW9uIFBvbGljeTCBkjASDAxWZXJpZmljYXRpb24CAgDBMHwxCzAJBgNVBAYTAlVTMRAwDg + YDVQQKEwdFbnRydXN0MSIwIAYDVQQLExlDZXJ0aWZpY2F0aW9uIEF1dGhvcml0aWVzMRIwEAYDVQQ + LEwlEQ29tU3ViQ0ExIzAhBgNVBAMTGmVUb2tlbiBWZXJpZmljYXRpb24gUG9saWN5MIIBOTAPDApl + bnRfZXhwb3J0AgEGMIIBJDCBjTAPDApFbmNyeXB0aW9uAgEHMHoxCzAJBgNVBAYTAlVTMRAwDgYDV + QQKEwdFbnRydXN0MSIwIAYDVQQLExlDZXJ0aWZpY2F0aW9uIEF1dGhvcml0aWVzMRIwEAYDVQQLEw + lEQ29tU3ViQ0ExITAfBgNVBAMUGEVuY3J5cHRpb24gUG9saWN5X0V4cG9ydDCBkTARDAxWZXJpZml + jYXRpb24CAQgwfDELMAkGA1UEBhMCVVMxEDAOBgNVBAoTB0VudHJ1c3QxIjAgBgNVBAsTGUNlcnRp + ZmljYXRpb24gQXV0aG9yaXRpZXMxEjAQBgNVBAsTCURDb21TdWJDQTEjMCEGA1UEAxQaVmVyaWZpY + 2F0aW9uIFBvbGljeV9FeHBvcnQwggE9MBQMD2VudF9nZW1hbHRvX2NzcAIBXjCCASMwgYowEAwKRW + 5jcnlwdGlvbgICAKswdjELMAkGA1UEBhMCVVMxEDAOBgNVBAoTB0VudHJ1c3QxIjAgBgNVBAsTGUN + lcnRpZmljYXRpb24gQXV0aG9yaXRpZXMxEjAQBgNVBAsTCURDb21TdWJDQTEdMBsGA1UEAxMUR00g + RW5jcnlwdGlvbiBQb2xpY3kwgZMwEgwMVmVyaWZpY2F0aW9uAgIArDB9MQswCQYDVQQGEwJVUzEQM + A4GA1UEChMHRW50cnVzdDEiMCAGA1UECxMZQ2VydGlmaWNhdGlvbiBBdXRob3JpdGllczESMBAGA1 + UECxMJRENvbVN1YkNBMSQwIgYDVQQDExtHZW1hbHRvIFZlcmlmaWNhdGlvbiBQb2xpY3kwgb8wFgw + RZW50X2lpc19za2R1X2NsczECATkwgaQwgaEwDwwKRHVhbCBVc2FnZQIBXjCBjTELMAkGA1UEBhMC + VVMxEDAOBgNVBAoTB0VudHJ1c3QxIjAgBgNVBAsTGUNlcnRpZmljYXRpb24gQXV0aG9yaXRpZXMxE + jAQBgNVBAsTCURDb21TdWJDQTE0MDIGA1UEAxMrQ0xTIDF5ciBJSVMgRHVhbCBVc2FnZSBObyBLZX + kgQmFja3VwIFBvbGljeTCBvzAWDBFlbnRfaWlzX3NrZHVfY2xzMgIBOjCBpDCBoTAPDApEdWFsIFV + zYWdlAgFfMIGNMQswCQYDVQQGEwJVUzEQMA4GA1UEChMHRW50cnVzdDEiMCAGA1UECxMZQ2VydGlm + aWNhdGlvbiBBdXRob3JpdGllczESMBAGA1UECxMJRENvbVN1YkNBMTQwMgYDVQQDEytDTFMgMnlyI + ElJUyBEdWFsIFVzYWdlIE5vIEtleSBCYWNrdXAgUG9saWN5MHswFwwSZW50X2lpc19za2R1X2Nscz + JtAgFTME4wIzAQDApFbmNyeXB0aW9uAgIAkqIPDApFbmNyeXB0aW9uAgEBMCcwEgwMVmVyaWZpY2F + 0aW9uAgIAk6IRDAxWZXJpZmljYXRpb24CAQKiEAwLZW50X2RlZmF1bHQCAQMwgcUwGAwTZW50X2lp + c19za2R1X2NsczJtYQIBVTCBqDCBpTAQDApEdWFsIFVzYWdlAgIAlTCBkDELMAkGA1UEBhMCVVMxE + DAOBgNVBAoTB0VudHJ1c3QxIjAgBgNVBAsTGUNlcnRpZmljYXRpb24gQXV0aG9yaXRpZXMxEjAQBg + NVBAsTCURDb21TdWJDQTE3MDUGA1UEAxMuQ0xTIDJtb250aCBJSVMgRHVhbCBVc2FnZSBObyBLZXk + gQmFja3VwIFBvbGljeTCBpzAQDAtlbnRfbWFjaGluZQIBeTCBkjCBjzAQDApEdWFsIFVzYWdlAgIA + 0zB7MQswCQYDVQQGEwJVUzEQMA4GA1UEChMHRW50cnVzdDEiMCAGA1UECxMZQ2VydGlmaWNhdGlvb + iBBdXRob3JpdGllczESMBAGA1UECxMJRENvbVN1YkNBMSIwIAYDVQQDExlFbnRlcnByaXNlIE1hY2 + hpbmUgUG9saWN5MEAwEgwNZW50X21kbXdzX2NsaQIBcDAqMBIwEAwKRW5jcnlwdGlvbgICAMcwFDA + SDAxWZXJpZmljYXRpb24CAgDIMEIwFAwPZW50X21saXN0X2FkbWluAgF/MCowEjAQDApFbmNyeXB0 + aW9uAgIA3jAUMBIMDFZlcmlmaWNhdGlvbgICAN8wggE5MBUMEGVudF9tbGlzdF9zaWduZXICAX4wg + gEeMIGHMBAMCkVuY3J5cHRpb24CAgDcMHMxCzAJBgNVBAYTAlVTMRAwDgYDVQQKEwdFbnRydXN0MS + IwIAYDVQQLExlDZXJ0aWZpY2F0aW9uIEF1dGhvcml0aWVzMRIwEAYDVQQLEwlEQ29tU3ViQ0ExGjA + YBgNVBAMTEUVuY3J5cHRpb24gUG9saWN5MIGRMBIMDFZlcmlmaWNhdGlvbgICAN0wezELMAkGA1UE + BhMCVVMxEDAOBgNVBAoTB0VudHJ1c3QxIjAgBgNVBAsTGUNlcnRpZmljYXRpb24gQXV0aG9yaXRpZ + XMxEjAQBgNVBAsTCURDb21TdWJDQTEiMCAGA1UEAxMZTWFzdGVyIExpc3QgU2lnbmVyIFBvbGljeT + CCAeQwGAwTZW50X21zX3NjX2NhcGlfY2xzMQIBLTCCAcYwgZ0wDwwKRHVhbCBVc2FnZQIBTDCBiTE + LMAkGA1UEBhMCVVMxEDAOBgNVBAoTB0VudHJ1c3QxIjAgBgNVBAsTGUNlcnRpZmljYXRpb24gQXV0 + aG9yaXRpZXMxEjAQBgNVBAsTCURDb21TdWJDQTEwMC4GA1UEAxMnQ0xTIDF5ciBEdWFsIFVzYWdlI + E5vIEtleSBCYWNrdXAgUG9saWN5MIGOMA8MCkVuY3J5cHRpb24CAU4wezELMAkGA1UEBhMCVVMxED + AOBgNVBAoTB0VudHJ1c3QxIjAgBgNVBAsTGUNlcnRpZmljYXRpb24gQXV0aG9yaXRpZXMxEjAQBgN + VBAsTCURDb21TdWJDQTEiMCAGA1UEAxMZQ0xTIDF5ciBFbmNyeXB0aW9uIFBvbGljeTCBkjARDAxW + ZXJpZmljYXRpb24CAU0wfTELMAkGA1UEBhMCVVMxEDAOBgNVBAoTB0VudHJ1c3QxIjAgBgNVBAsTG + UNlcnRpZmljYXRpb24gQXV0aG9yaXRpZXMxEjAQBgNVBAsTCURDb21TdWJDQTEkMCIGA1UEAxMbQ0 + xTIDF5ciBWZXJpZmljYXRpb24gUG9saWN5MIIB5DAYDBNlbnRfbXNfc2NfY2FwaV9jbHMyAgEuMII + BxjCBnTAPDApEdWFsIFVzYWdlAgFPMIGJMQswCQYDVQQGEwJVUzEQMA4GA1UEChMHRW50cnVzdDEi + MCAGA1UECxMZQ2VydGlmaWNhdGlvbiBBdXRob3JpdGllczESMBAGA1UECxMJRENvbVN1YkNBMTAwL + gYDVQQDEydDTFMgMnlyIER1YWwgVXNhZ2UgTm8gS2V5IEJhY2t1cCBQb2xpY3kwgY4wDwwKRW5jcn + lwdGlvbgIBUTB7MQswCQYDVQQGEwJVUzEQMA4GA1UEChMHRW50cnVzdDEiMCAGA1UECxMZQ2VydGl + maWNhdGlvbiBBdXRob3JpdGllczESMBAGA1UECxMJRENvbVN1YkNBMSIwIAYDVQQDExlDTFMgMnly + IEVuY3J5cHRpb24gUG9saWN5MIGSMBEMDFZlcmlmaWNhdGlvbgIBUDB9MQswCQYDVQQGEwJVUzEQM + A4GA1UEChMHRW50cnVzdDEiMCAGA1UECxMZQ2VydGlmaWNhdGlvbiBBdXRob3JpdGllczESMBAGA1 + UECxMJRENvbVN1YkNBMSQwIgYDVQQDExtDTFMgMnlyIFZlcmlmaWNhdGlvbiBQb2xpY3kwggHyMBk + MFGVudF9tc19zY19jYXBpX2NsczJtAgFRMIIB0zCBoTAQDApEdWFsIFVzYWdlAgIAjTCBjDELMAkG + A1UEBhMCVVMxEDAOBgNVBAoTB0VudHJ1c3QxIjAgBgNVBAsTGUNlcnRpZmljYXRpb24gQXV0aG9ya + XRpZXMxEjAQBgNVBAsTCURDb21TdWJDQTEzMDEGA1UEAxMqQ0xTIDJtb250aCBEdWFsIFVzYWdlIE + 5vIEtleSBCYWNrdXAgUG9saWN5MIGSMBAMCkVuY3J5cHRpb24CAgCPMH4xCzAJBgNVBAYTAlVTMRA + wDgYDVQQKEwdFbnRydXN0MSIwIAYDVQQLExlDZXJ0aWZpY2F0aW9uIEF1dGhvcml0aWVzMRIwEAYD + VQQLEwlEQ29tU3ViQ0ExJTAjBgNVBAMTHENMUyAybW9udGggRW5jcnlwdGlvbiBQb2xpY3kwgZcwE + gwMVmVyaWZpY2F0aW9uAgIAjjCBgDELMAkGA1UEBhMCVVMxEDAOBgNVBAoTB0VudHJ1c3QxIjAgBg + NVBAsTGUNlcnRpZmljYXRpb24gQXV0aG9yaXRpZXMxEjAQBgNVBAsTCURDb21TdWJDQTEnMCUGA1U + EAxMeQ0xTIDJtb250aCBWZXJpZmljYXRpb24gUG9saWN5MIIB5zAYDBNlbnRfbXNfc2NfY2FwaV9j + bHM0AgFPMIIByTCBnjAQDApEdWFsIFVzYWdlAgIAhzCBiTELMAkGA1UEBhMCVVMxEDAOBgNVBAoTB + 0VudHJ1c3QxIjAgBgNVBAsTGUNlcnRpZmljYXRpb24gQXV0aG9yaXRpZXMxEjAQBgNVBAsTCURDb2 + 1TdWJDQTEwMC4GA1UEAxMnQ0xTIDR5ciBEdWFsIFVzYWdlIE5vIEtleSBCYWNrdXAgUG9saWN5MIG + PMBAMCkVuY3J5cHRpb24CAgCJMHsxCzAJBgNVBAYTAlVTMRAwDgYDVQQKEwdFbnRydXN0MSIwIAYD + VQQLExlDZXJ0aWZpY2F0aW9uIEF1dGhvcml0aWVzMRIwEAYDVQQLEwlEQ29tU3ViQ0ExIjAgBgNVB + AMTGUNMUyA0eXIgRW5jcnlwdGlvbiBQb2xpY3kwgZMwEgwMVmVyaWZpY2F0aW9uAgIAiDB9MQswCQ + YDVQQGEwJVUzEQMA4GA1UEChMHRW50cnVzdDEiMCAGA1UECxMZQ2VydGlmaWNhdGlvbiBBdXRob3J + pdGllczESMBAGA1UECxMJRENvbVN1YkNBMSQwIgYDVQQDExtDTFMgNHlyIFZlcmlmaWNhdGlvbiBQ + b2xpY3kwggHnMBgME2VudF9tc19zY19jYXBpX2NsczUCAVAwggHJMIGeMBAMCkR1YWwgVXNhZ2UCA + gCKMIGJMQswCQYDVQQGEwJVUzEQMA4GA1UEChMHRW50cnVzdDEiMCAGA1UECxMZQ2VydGlmaWNhdG + lvbiBBdXRob3JpdGllczESMBAGA1UECxMJRENvbVN1YkNBMTAwLgYDVQQDEydDTFMgNXlyIER1YWw + gVXNhZ2UgTm8gS2V5IEJhY2t1cCBQb2xpY3kwgY8wEAwKRW5jcnlwdGlvbgICAIwwezELMAkGA1UE + BhMCVVMxEDAOBgNVBAoTB0VudHJ1c3QxIjAgBgNVBAsTGUNlcnRpZmljYXRpb24gQXV0aG9yaXRpZ + XMxEjAQBgNVBAsTCURDb21TdWJDQTEiMCAGA1UEAxMZQ0xTIDV5ciBFbmNyeXB0aW9uIFBvbGljeT + CBkzASDAxWZXJpZmljYXRpb24CAgCLMH0xCzAJBgNVBAYTAlVTMRAwDgYDVQQKEwdFbnRydXN0MSI + wIAYDVQQLExlDZXJ0aWZpY2F0aW9uIEF1dGhvcml0aWVzMRIwEAYDVQQLEwlEQ29tU3ViQ0ExJDAi + BgNVBAMTG0NMUyA1eXIgVmVyaWZpY2F0aW9uIFBvbGljeTCCAfgwGAwTZW50X21zX3NjX2NsczRfM + TAyNAIBXDCCAdowgaMwEAwKRHVhbCBVc2FnZQICAKUwgY4xCzAJBgNVBAYTAlVTMRAwDgYDVQQKEw + dFbnRydXN0MSIwIAYDVQQLExlDZXJ0aWZpY2F0aW9uIEF1dGhvcml0aWVzMRIwEAYDVQQLEwlEQ29 + tU3ViQ0ExNTAzBgNVBAMTLENMUyAxMDI0IDR5ciBEdWFsIFVzYWdlIE5vIEtleSBCYWNrdXAgUG9s + aWN5MIGVMBAMCkVuY3J5cHRpb24CAgCnMIGAMQswCQYDVQQGEwJVUzEQMA4GA1UEChMHRW50cnVzd + DEiMCAGA1UECxMZQ2VydGlmaWNhdGlvbiBBdXRob3JpdGllczESMBAGA1UECxMJRENvbVN1YkNBMS + cwJQYDVQQDEx5DTFMgMTAyNCA0eXIgRW5jcnlwdGlvbiBQb2xpY3kwgZkwEgwMVmVyaWZpY2F0aW9 + uAgIApjCBgjELMAkGA1UEBhMCVVMxEDAOBgNVBAoTB0VudHJ1c3QxIjAgBgNVBAsTGUNlcnRpZmlj + YXRpb24gQXV0aG9yaXRpZXMxEjAQBgNVBAsTCURDb21TdWJDQTEpMCcGA1UEAxMgQ0xTIDEwMjQgN + HlyIFZlcmlmaWNhdGlvbiBQb2xpY3kwggH4MBgME2VudF9tc19zY19jbHM0XzIwNDgCAVowggHaMI + GjMBAMCkR1YWwgVXNhZ2UCAgCfMIGOMQswCQYDVQQGEwJVUzEQMA4GA1UEChMHRW50cnVzdDEiMCA + GA1UECxMZQ2VydGlmaWNhdGlvbiBBdXRob3JpdGllczESMBAGA1UECxMJRENvbVN1YkNBMTUwMwYD + VQQDEyxDTFMgMjA0OCA0eXIgRHVhbCBVc2FnZSBObyBLZXkgQmFja3VwIFBvbGljeTCBlTAQDApFb + mNyeXB0aW9uAgIAoTCBgDELMAkGA1UEBhMCVVMxEDAOBgNVBAoTB0VudHJ1c3QxIjAgBgNVBAsTGU + NlcnRpZmljYXRpb24gQXV0aG9yaXRpZXMxEjAQBgNVBAsTCURDb21TdWJDQTEnMCUGA1UEAxMeQ0x + TIDIwNDggNHlyIEVuY3J5cHRpb24gUG9saWN5MIGZMBIMDFZlcmlmaWNhdGlvbgICAKAwgYIxCzAJ + BgNVBAYTAlVTMRAwDgYDVQQKEwdFbnRydXN0MSIwIAYDVQQLExlDZXJ0aWZpY2F0aW9uIEF1dGhvc + ml0aWVzMRIwEAYDVQQLEwlEQ29tU3ViQ0ExKTAnBgNVBAMTIENMUyAyMDQ4IDR5ciBWZXJpZmljYX + Rpb24gUG9saWN5MIIB+DAYDBNlbnRfbXNfc2NfY2xzNV8xMDI0AgFdMIIB2jCBozAQDApEdWFsIFV + zYWdlAgIAqDCBjjELMAkGA1UEBhMCVVMxEDAOBgNVBAoTB0VudHJ1c3QxIjAgBgNVBAsTGUNlcnRp + ZmljYXRpb24gQXV0aG9yaXRpZXMxEjAQBgNVBAsTCURDb21TdWJDQTE1MDMGA1UEAxMsQ0xTIDEwM + jQgNXlyIER1YWwgVXNhZ2UgTm8gS2V5IEJhY2t1cCBQb2xpY3kwgZUwEAwKRW5jcnlwdGlvbgICAK + owgYAxCzAJBgNVBAYTAlVTMRAwDgYDVQQKEwdFbnRydXN0MSIwIAYDVQQLExlDZXJ0aWZpY2F0aW9 + uIEF1dGhvcml0aWVzMRIwEAYDVQQLEwlEQ29tU3ViQ0ExJzAlBgNVBAMTHkNMUyAxMDI0IDV5ciBF + bmNyeXB0aW9uIFBvbGljeTCBmTASDAxWZXJpZmljYXRpb24CAgCpMIGCMQswCQYDVQQGEwJVUzEQM + A4GA1UEChMHRW50cnVzdDEiMCAGA1UECxMZQ2VydGlmaWNhdGlvbiBBdXRob3JpdGllczESMBAGA1 + UECxMJRENvbVN1YkNBMSkwJwYDVQQDEyBDTFMgMTAyNCA1eXIgVmVyaWZpY2F0aW9uIFBvbGljeTC + CAfgwGAwTZW50X21zX3NjX2NsczVfMjA0OAIBWzCCAdowgaMwEAwKRHVhbCBVc2FnZQICAKIwgY4x + CzAJBgNVBAYTAlVTMRAwDgYDVQQKEwdFbnRydXN0MSIwIAYDVQQLExlDZXJ0aWZpY2F0aW9uIEF1d + Ghvcml0aWVzMRIwEAYDVQQLEwlEQ29tU3ViQ0ExNTAzBgNVBAMTLENMUyAyMDQ4IDV5ciBEdWFsIF + VzYWdlIE5vIEtleSBCYWNrdXAgUG9saWN5MIGVMBAMCkVuY3J5cHRpb24CAgCkMIGAMQswCQYDVQQ + GEwJVUzEQMA4GA1UEChMHRW50cnVzdDEiMCAGA1UECxMZQ2VydGlmaWNhdGlvbiBBdXRob3JpdGll + czESMBAGA1UECxMJRENvbVN1YkNBMScwJQYDVQQDEx5DTFMgMjA0OCA1eXIgRW5jcnlwdGlvbiBQb + 2xpY3kwgZkwEgwMVmVyaWZpY2F0aW9uAgIAozCBgjELMAkGA1UEBhMCVVMxEDAOBgNVBAoTB0VudH + J1c3QxIjAgBgNVBAsTGUNlcnRpZmljYXRpb24gQXV0aG9yaXRpZXMxEjAQBgNVBAsTCURDb21TdWJ + DQTEpMCcGA1UEAxMgQ0xTIDIwNDggNXlyIFZlcmlmaWNhdGlvbiBQb2xpY3kwggHpMBcMEmVudF9t + c19zY19jbHNfMjA0OAIBWDCCAcwwgZ8wEAwKRHVhbCBVc2FnZQICAJowgYoxCzAJBgNVBAYTAlVTM + RAwDgYDVQQKEwdFbnRydXN0MSIwIAYDVQQLExlDZXJ0aWZpY2F0aW9uIEF1dGhvcml0aWVzMRIwEA + YDVQQLEwlEQ29tU3ViQ0ExMTAvBgNVBAMTKENMUyAyMDQ4IER1YWwgVXNhZ2UgTm8gS2V5IEJhY2t + 1cCBQb2xpY3kwgZAwEAwKRW5jcnlwdGlvbgICAJwwfDELMAkGA1UEBhMCVVMxEDAOBgNVBAoTB0Vu + dHJ1c3QxIjAgBgNVBAsTGUNlcnRpZmljYXRpb24gQXV0aG9yaXRpZXMxEjAQBgNVBAsTCURDb21Td + WJDQTEjMCEGA1UEAxMaQ0xTIDIwNDggRW5jcnlwdGlvbiBQb2xpY3kwgZQwEgwMVmVyaWZpY2F0aW + 9uAgIAmzB+MQswCQYDVQQGEwJVUzEQMA4GA1UEChMHRW50cnVzdDEiMCAGA1UECxMZQ2VydGlmaWN + hdGlvbiBBdXRob3JpdGllczESMBAGA1UECxMJRENvbVN1YkNBMSUwIwYDVQQDExxDTFMgMjA0OCBW + ZXJpZmljYXRpb24gUG9saWN5MIG1MBgME2VudF9tc19zbXJ0Y3JkX2NhcGkCAQ8wgZgwgZUwDwwKR + HVhbCBVc2FnZQIBGTCBgTELMAkGA1UEBhMCVVMxEDAOBgNVBAoTB0VudHJ1c3QxIjAgBgNVBAsTGU + NlcnRpZmljYXRpb24gQXV0aG9yaXRpZXMxEjAQBgNVBAsTCURDb21TdWJDQTEoMCYGA1UEAxMfRHV + hbCBVc2FnZSBObyBLZXkgQmFja3VwIFBvbGljeTCCAakwEAwKZW50X21zY2FwaQICAIEwggGTMHkw + CQwDRUZTAgIA4zBsMQswCQYDVQQGEwJVUzEQMA4GA1UEChMHRW50cnVzdDEiMCAGA1UECxMZQ2Vyd + GlmaWNhdGlvbiBBdXRob3JpdGllczESMBAGA1UECxMJRENvbVN1YkNBMRMwEQYDVQQDEwpFRlMgUG + 9saWN5MIGHMBAMCkVuY3J5cHRpb24CAgDhMHMxCzAJBgNVBAYTAlVTMRAwDgYDVQQKEwdFbnRydXN + 0MSIwIAYDVQQLExlDZXJ0aWZpY2F0aW9uIEF1dGhvcml0aWVzMRIwEAYDVQQLEwlEQ29tU3ViQ0Ex + GjAYBgNVBAMTEUVuY3J5cHRpb24gUG9saWN5MIGLMBIMDFZlcmlmaWNhdGlvbgICAOIwdTELMAkGA + 1UEBhMCVVMxEDAOBgNVBAoTB0VudHJ1c3QxIjAgBgNVBAsTGUNlcnRpZmljYXRpb24gQXV0aG9yaX + RpZXMxEjAQBgNVBAsTCURDb21TdWJDQTEcMBoGA1UEAxMTVmVyaWZpY2F0aW9uIFBvbGljeTBDMBc + MEmVudF9tc2Z0X3NtYXJ0Y2FyZAIBDjAoMBEwDwwKRW5jcnlwdGlvbgIBFzATMBEMDFZlcmlmaWNh + dGlvbgIBGDA/MBMMDmVudF9tc2dzY2FubmVyAgENMCgwETAPDApFbmNyeXB0aW9uAgEVMBMwEQwMV + mVyaWZpY2F0aW9uAgEWMD4wEgwNZW50X21zZ3NlcnZlcgIBDDAoMBEwDwwKRW5jcnlwdGlvbgIBEz + ATMBEMDFZlcmlmaWNhdGlvbgIBFDBAMBIMDGVudF9tc29hZG1pbgICAIkwKjASMBAMCkVuY3J5cHR + pb24CAgDxMBQwEgwMVmVyaWZpY2F0aW9uAgIA8DCCATkwFQwQZW50X21zdHdva2V5cGFpcgIBWTCC + AR4wgYowEAwKRW5jcnlwdGlvbgICAJ0wdjELMAkGA1UEBhMCVVMxEDAOBgNVBAoTB0VudHJ1c3QxI + jAgBgNVBAsTGUNlcnRpZmljYXRpb24gQXV0aG9yaXRpZXMxEjAQBgNVBAsTCURDb21TdWJDQTEdMB + sGA1UEAxMUR00gRW5jcnlwdGlvbiBQb2xpY3kwgY4wEgwMVmVyaWZpY2F0aW9uAgIAnjB4MQswCQY + DVQQGEwJVUzEQMA4GA1UEChMHRW50cnVzdDEiMCAGA1UECxMZQ2VydGlmaWNhdGlvbiBBdXRob3Jp + dGllczESMBAGA1UECxMJRENvbVN1YkNBMR8wHQYDVQQDExZHTSBWZXJpZmljYXRpb24gUG9saWN5M + IIBvjARDAxlbnRfbm9ucmVwdWQCARQwggGnMIGGMA8MCkVuY3J5cHRpb24CASIwczELMAkGA1UEBh + MCVVMxEDAOBgNVBAoTB0VudHJ1c3QxIjAgBgNVBAsTGUNlcnRpZmljYXRpb24gQXV0aG9yaXRpZXM + xEjAQBgNVBAsTCURDb21TdWJDQTEaMBgGA1UEAxMRRW5jcnlwdGlvbiBQb2xpY3kwgY4wEwwOTm9u + cmVwdWRpYXRpb24CASQwdzELMAkGA1UEBhMCVVMxEDAOBgNVBAoTB0VudHJ1c3QxIjAgBgNVBAsTG + UNlcnRpZmljYXRpb24gQXV0aG9yaXRpZXMxEjAQBgNVBAsTCURDb21TdWJDQTEeMBwGA1UEAxMVTm + 9ucmVwdWRpYXRpb24gUG9saWN5MIGKMBEMDFZlcmlmaWNhdGlvbgIBIzB1MQswCQYDVQQGEwJVUzE + QMA4GA1UEChMHRW50cnVzdDEiMCAGA1UECxMZQ2VydGlmaWNhdGlvbiBBdXRob3JpdGllczESMBAG + A1UECxMJRENvbVN1YkNBMRwwGgYDVQQDExNWZXJpZmljYXRpb24gUG9saWN5MIICQDAZDBRlbnRfb + m9ucmVwdWRfYW5kX2VmcwIBFzCCAiEweDAIDANFRlMCAS0wbDELMAkGA1UEBhMCVVMxEDAOBgNVBA + oTB0VudHJ1c3QxIjAgBgNVBAsTGUNlcnRpZmljYXRpb24gQXV0aG9yaXRpZXMxEjAQBgNVBAsTCUR + Db21TdWJDQTETMBEGA1UEAxMKRUZTIFBvbGljeTCBhjAPDApFbmNyeXB0aW9uAgEqMHMxCzAJBgNV + BAYTAlVTMRAwDgYDVQQKEwdFbnRydXN0MSIwIAYDVQQLExlDZXJ0aWZpY2F0aW9uIEF1dGhvcml0a + WVzMRIwEAYDVQQLEwlEQ29tU3ViQ0ExGjAYBgNVBAMTEUVuY3J5cHRpb24gUG9saWN5MIGOMBMMDk + 5vbnJlcHVkaWF0aW9uAgEsMHcxCzAJBgNVBAYTAlVTMRAwDgYDVQQKEwdFbnRydXN0MSIwIAYDVQQ + LExlDZXJ0aWZpY2F0aW9uIEF1dGhvcml0aWVzMRIwEAYDVQQLEwlEQ29tU3ViQ0ExHjAcBgNVBAMT + FU5vbnJlcHVkaWF0aW9uIFBvbGljeTCBijARDAxWZXJpZmljYXRpb24CASswdTELMAkGA1UEBhMCV + VMxEDAOBgNVBAoTB0VudHJ1c3QxIjAgBgNVBAsTGUNlcnRpZmljYXRpb24gQXV0aG9yaXRpZXMxEj + AQBgNVBAsTCURDb21TdWJDQTEcMBoGA1UEAxMTVmVyaWZpY2F0aW9uIFBvbGljeTA5MA0MCGVudF9 + vY3NwAgEpMCgwETAPDApFbmNyeXB0aW9uAgFHMBMwEQwMVmVyaWZpY2F0aW9uAgFIMD0wEQwMZW50 + X3Byb2ZzcnZyAgEFMCgwETAPDApFbmNyeXB0aW9uAgEFMBMwEQwMVmVyaWZpY2F0aW9uAgEGMDgwD + AwHZW50X3JkcAIBQDAoMBEwDwwKRW5jcnlwdGlvbgIBaTATMBEMDFZlcmlmaWNhdGlvbgIBajCBqj + ASDA1lbnRfc2lnbl9uaXN0AgFyMIGTMIGQMBIMDFZlcmlmaWNhdGlvbgICAMowejELMAkGA1UEBhM + CVVMxEDAOBgNVBAoTB0VudHJ1c3QxIjAgBgNVBAsTGUNlcnRpZmljYXRpb24gQXV0aG9yaXRpZXMx + EjAQBgNVBAsTCURDb21TdWJDQTEhMB8GA1UEAxMYTklTVCBWZXJpZmljYXRpb24gUG9saWN5MIGkM + BYMEWVudF9za3BfZHVhbHVzYWdlAgEYMIGJMIGGMA8MCkR1YWwgVXNhZ2UCAS4wczELMAkGA1UEBh + MCVVMxEDAOBgNVBAoTB0VudHJ1c3QxIjAgBgNVBAsTGUNlcnRpZmljYXRpb24gQXV0aG9yaXRpZXM + xEjAQBgNVBAsTCURDb21TdWJDQTEaMBgGA1UEAxMRRHVhbCBVc2FnZSBQb2xpY3kwLTATDA1lbnRf + c2twbm9ucmVwAgIAgDAWMBQwEgwMVmVyaWZpY2F0aW9uAgIA4DAwMBgMEmVudF9za3Bub25yZXBfY + XV0aAICAIYwFDASMBAMCkR1YWwgVXNhZ2UCAgDrMEEwEwwOZW50X3Nwb2NfYWRtaW4CAXwwKjASMB + AMCkVuY3J5cHRpb24CAgDYMBQwEgwMVmVyaWZpY2F0aW9uAgIA2TBCMBQMD2VudF9zcG9jX2NsaWV + udAIBejAqMBIwEAwKRW5jcnlwdGlvbgICANQwFDASDAxWZXJpZmljYXRpb24CAgDVMD4wEAwLZW50 + X3Nwb2NfZHYCAX0wKjASMBAMCkVuY3J5cHRpb24CAgDaMBQwEgwMVmVyaWZpY2F0aW9uAgIA2zBCM + BQMD2VudF9zcG9jX3NlcnZlcgIBezAqMBIwEAwKRW5jcnlwdGlvbgICANYwFDASDAxWZXJpZmljYX + Rpb24CAgDXMIG+MBMMDWVudF9zc2xfYmFzaWMCAgCIMIGmMBIwEAwKRW5jcnlwdGlvbgICAO8wgY8 + wEgwMVmVyaWZpY2F0aW9uAgIA7jB5MQswCQYDVQQGEwJVUzEQMA4GA1UEChMHRW50cnVzdDEiMCAG + A1UECxMZQ2VydGlmaWNhdGlvbiBBdXRob3JpdGllczESMBAGA1UECxMJRENvbVN1YkNBMSAwHgYDV + QQDFBdWZXJpZmljYXRpb25fcDEwIFBvbGljeTB7MBIMDGVudF9zc2xfY2VydAICAIcwUDAkMBAMCk + VuY3J5cHRpb24CAgDsohAMCkVuY3J5cHRpb24CAgDvMCgwEgwMVmVyaWZpY2F0aW9uAgIA7aISDAx + WZXJpZmljYXRpb24CAgDuohMMDWVudF9zc2xfYmFzaWMCAgCIMIIBKDAXDBJlbnRfc3RhbmRhbG9u + ZV9lZnMCARYwggELMIGLMBAMC0NNUCBTaWduaW5nAgEpMHcxCzAJBgNVBAYTAlVTMRAwDgYDVQQKE + wdFbnRydXN0MSIwIAYDVQQLExlDZXJ0aWZpY2F0aW9uIEF1dGhvcml0aWVzMRIwEAYDVQQLEwlEQ2 + 9tU3ViQ0ExHjAcBgNVBAMTFU1TIENNUCBTaWduaW5nIFBvbGljeTB7MAgMA0VGUwIBKDBvMQswCQY + DVQQGEwJVUzEQMA4GA1UEChMHRW50cnVzdDEiMCAGA1UECxMZQ2VydGlmaWNhdGlvbiBBdXRob3Jp + dGllczESMBAGA1UECxMJRENvbVN1YkNBMRYwFAYDVQQDEw1NUyBFRlMgUG9saWN5MD4wEgwNZW50X + 3RpbWVzdGFtcAIBBDAoMBEwDwwKRW5jcnlwdGlvbgIBAzATMBEMDFZlcmlmaWNhdGlvbgIBBDBDMB + UMEGVudF90aW1lc3RhbXBpbmcCAXcwKjASMBAMCkVuY3J5cHRpb24CAgDQMBQwEgwMVmVyaWZpY2F + 0aW9uAgIA0TCBxzARDAxlbnRfdHJ1ZXBhc3MCAQgwgbEwETAPDApFbmNyeXB0aW9uAgELMIGbMBEM + DFZlcmlmaWNhdGlvbgIBDDCBhTELMAkGA1UEBhMCVVMxEDAOBgNVBAoTB0VudHJ1c3QxIjAgBgNVB + AsTGUNlcnRpZmljYXRpb24gQXV0aG9yaXRpZXMxEjAQBgNVBAsTCURDb21TdWJDQTEsMCoGA1UEAx + MjVHJ1ZVBhc3MgU2VydmVyIFZlcmlmaWNhdGlvbiBQb2xpY3kwgc0wFwwSZW50X3RydWVwYXNzX21 + 1bHRpAgEJMIGxMBEwDwwKRW5jcnlwdGlvbgIBDTCBmzARDAxWZXJpZmljYXRpb24CAQ4wgYUxCzAJ + BgNVBAYTAlVTMRAwDgYDVQQKEwdFbnRydXN0MSIwIAYDVQQLExlDZXJ0aWZpY2F0aW9uIEF1dGhvc + ml0aWVzMRIwEAYDVQQLEwlEQ29tU3ViQ0ExLDAqBgNVBAMTI1RydWVQYXNzIFNlcnZlciBWZXJpZm + ljYXRpb24gUG9saWN5MIIBLzATDA5lbnRfdHdva2V5cGFpcgIBEzCCARYwgYYwDwwKRW5jcnlwdGl + vbgIBIDBzMQswCQYDVQQGEwJVUzEQMA4GA1UEChMHRW50cnVzdDEiMCAGA1UECxMZQ2VydGlmaWNh + dGlvbiBBdXRob3JpdGllczESMBAGA1UECxMJRENvbVN1YkNBMRowGAYDVQQDExFFbmNyeXB0aW9uI + FBvbGljeTCBijARDAxWZXJpZmljYXRpb24CASEwdTELMAkGA1UEBhMCVVMxEDAOBgNVBAoTB0VudH + J1c3QxIjAgBgNVBAsTGUNlcnRpZmljYXRpb24gQXV0aG9yaXRpZXMxEjAQBgNVBAsTCURDb21TdWJ + DQTEcMBoGA1UEAxMTVmVyaWZpY2F0aW9uIFBvbGljeTCCAUMwFwwSZW50X3R3b2tleXBhaXJfcDEw + AgEkMIIBJjCBjjATDA5FbmNyeXB0aW9uX3AxMAIBPTB3MQswCQYDVQQGEwJVUzEQMA4GA1UEChMHR + W50cnVzdDEiMCAGA1UECxMZQ2VydGlmaWNhdGlvbiBBdXRob3JpdGllczESMBAGA1UECxMJRENvbV + N1YkNBMR4wHAYDVQQDFBVFbmNyeXB0aW9uX3AxMCBQb2xpY3kwgZIwFQwQVmVyaWZpY2F0aW9uX3A + xMAIBPjB5MQswCQYDVQQGEwJVUzEQMA4GA1UEChMHRW50cnVzdDEiMCAGA1UECxMZQ2VydGlmaWNh + dGlvbiBBdXRob3JpdGllczESMBAGA1UECxMJRENvbVN1YkNBMSAwHgYDVQQDFBdWZXJpZmljYXRpb + 25fcDEwIFBvbGljeTBBMBMMDWVudF91bXNfYWRtaW4CAgCKMCowEjAQDApFbmNyeXB0aW9uAgIA8j + AUMBIMDFZlcmlmaWNhdGlvbgICAPMwOzAPDAplbnRfeGFwc3J2AgEQMCgwETAPDApFbmNyeXB0aW9 + uAgEaMBMwEQwMVmVyaWZpY2F0aW9uAgEbMIGuMBUMEGVwYXNzX2RvY19zaWduZXICAWQwgZQwgZEw + FQwPRG9jdW1lbnQgU2lnbmVyAgIAtzB4MQswCQYDVQQGEwJVUzEQMA4GA1UEChMHRW50cnVzdDEiM + CAGA1UECxMZQ2VydGlmaWNhdGlvbiBBdXRob3JpdGllczESMBAGA1UECxMJRENvbVN1YkNBMR8wHQ + YDVQQDExZEb2N1bWVudCBTaWduZXIgUG9saWN5MIGzMBoMFGVwYXNzX2RvY19zaWduZXJfZHRsAgI + AhDCBlDCBkTAVDA9Eb2N1bWVudCBTaWduZXICAgDoMHgxCzAJBgNVBAYTAlVTMRAwDgYDVQQKEwdF + bnRydXN0MSIwIAYDVQQLExlDZXJ0aWZpY2F0aW9uIEF1dGhvcml0aWVzMRIwEAYDVQQLEwlEQ29tU + 3ViQ0ExHzAdBgNVBAMTFkRvY3VtZW50IFNpZ25lciBQb2xpY3kwgbYwFwwSZXBhc3NfbWxpc3Rfc2 + lnbmVyAgFjMIGaMIGXMBgMEk1hc3RlciBMaXN0IFNpZ25lcgICALYwezELMAkGA1UEBhMCVVMxEDA + OBgNVBAoTB0VudHJ1c3QxIjAgBgNVBAsTGUNlcnRpZmljYXRpb24gQXV0aG9yaXRpZXMxEjAQBgNV + BAsTCURDb21TdWJDQTEiMCAGA1UEAxMZTWFzdGVyIExpc3QgU2lnbmVyIFBvbGljeTAqMBIMDW1vY + mlsZV9kZXZpY2UCAXEwFDASMBAMCkR1YWwgVXNhZ2UCAgDJMIG4MBYMEW1vYmlsZV9kZXZpY2VfMW + twAgF2MIGdMIGaMBIMDFZlcmlmaWNhdGlvbgICAM8wgYMxCzAJBgNVBAYTAlVTMRAwDgYDVQQKEwd + FbnRydXN0MSIwIAYDVQQLExlDZXJ0aWZpY2F0aW9uIEF1dGhvcml0aWVzMRIwEAYDVQQLEwlEQ29t + U3ViQ0ExKjAoBgNVBAMTIU1vYmlsZSBEZXZpY2UgVmVyaWZpY2F0aW9uIFBvbGljeTCCAVowEAwKb + XNfdGhyZWV5cgICAIUwggFEMIGdMBAMCkVuY3J5cHRpb24CAgDpMIGIMQswCQYDVQQGEwJVUzEQMA + 4GA1UEChMHRW50cnVzdDEiMCAGA1UECxMZQ2VydGlmaWNhdGlvbiBBdXRob3JpdGllczESMBAGA1U + ECxMJRENvbVN1YkNBMS8wLQYDVQQDEyZNaWNyb1NvZnQgVGhyZWUgWWVhciBFbmNyeXB0aW9uIFBv + bGljeTCBoTASDAxWZXJpZmljYXRpb24CAgDqMIGKMQswCQYDVQQGEwJVUzEQMA4GA1UEChMHRW50c + nVzdDEiMCAGA1UECxMZQ2VydGlmaWNhdGlvbiBBdXRob3JpdGllczESMBAGA1UECxMJRENvbVN1Yk + NBMTEwLwYDVQQDEyhNaWNyb1NvZnQgVGhyZWUgWWVhciBWZXJpZmljYXRpb24gUG9saWN5MD4wEgw + NbXNfdnBuX3NlcnZlcgIBIDAoMBEwDwwKRW5jcnlwdGlvbgIBODATMBEMDFZlcmlmaWNhdGlvbgIB + OTCBmDAPDApzc2xfZGV2aWNlAgFzMIGEMIGBMAkMA3NzbAICAMswdDELMAkGA1UEBhMCVVMxEDAOB + gNVBAoTB0VudHJ1c3QxIjAgBgNVBAsTGUNlcnRpZmljYXRpb24gQXV0aG9yaXRpZXMxEjAQBgNVBA + sTCURDb21TdWJDQTEbMBkGA1UEAxMSU1NMIEludGVyb3AgUG9saWN5MIIBQDAXDBJzc2xfZGV2aWN + lX2ludGVyb3ACAXQwggEjMIGQMAoMBHNzbDECAgDMMIGBMQswCQYDVQQGEwJVUzEQMA4GA1UEChMH + RW50cnVzdDEiMCAGA1UECxMZQ2VydGlmaWNhdGlvbiBBdXRob3JpdGllczESMBAGA1UECxMJRENvb + VN1YkNBMSgwJgYDVQQDEx9TU0wgSW50ZXJvcCBWZXJpZmljYXRpb24gUG9saWN5MIGNMAoMBHNzbD + ICAgDNMH8xCzAJBgNVBAYTAlVTMRAwDgYDVQQKEwdFbnRydXN0MSIwIAYDVQQLExlDZXJ0aWZpY2F + 0aW9uIEF1dGhvcml0aWVzMRIwEAYDVQQLEwlEQ29tU3ViQ0ExJjAkBgNVBAMTHVNTTCBJbnRlcm9w + IEVuY3J5cHRpb24gUG9saWN5MEMwFwwSdnBuX2NsaWVudF9tYWNoaW5lAgEhMCgwETAPDApFbmNye + XB0aW9uAgE6MBMwEQwMVmVyaWZpY2F0aW9uAgE7MIGiMBQMD3Zwbl9jbGllbnRfdXNlcgIBGTCBiT + CBhjAPDApEdWFsIFVzYWdlAgEvMHMxCzAJBgNVBAYTAlVTMRAwDgYDVQQKEwdFbnRydXN0MSIwIAY + DVQQLExlDZXJ0aWZpY2F0aW9uIEF1dGhvcml0aWVzMRIwEAYDVQQLEwlEQ29tU3ViQ0ExGjAYBgNV + BAMTEUR1YWwgVXNhZ2UgUG9saWN5MDgwDAwHdnBuX2RpcgIBCjAoMBEwDwwKRW5jcnlwdGlvbgIBD + zATMBEMDFZlcmlmaWNhdGlvbgIBEDA6MA4MCXZwbl9ub2RpcgIBCzAoMBEwDwwKRW5jcnlwdGlvbg + IBETATMBEMDFZlcmlmaWNhdGlvbgIBEjA6MA4MCXdlYl9hZF9kYwIBHzAoMBEwDwwKRW5jcnlwdGl + vbgIBNjATMBEMDFZlcmlmaWNhdGlvbgIBNzA/MBMMDndlYl9hZF9kY19jbHMxAgFDMCgwETAPDApF + bmNyeXB0aW9uAgFvMBMwEQwMVmVyaWZpY2F0aW9uAgFwMD8wEwwOd2ViX2FkX2RjX2NsczICAUQwK + DARMA8MCkVuY3J5cHRpb24CAXEwEzARDAxWZXJpZmljYXRpb24CAXIwggE+MBAMC3dlYl9hZF9zdn + IyAgFhMIIBKDCBjzAQDApFbmNyeXB0aW9uAgIAsTB7MQswCQYDVQQGEwJVUzEQMA4GA1UEChMHRW5 + 0cnVzdDEiMCAGA1UECxMZQ2VydGlmaWNhdGlvbiBBdXRob3JpdGllczESMBAGA1UECxMJRENvbVN1 + YkNBMSIwIAYDVQQDExlDTFMgMnlyIEVuY3J5cHRpb24gUG9saWN5MIGTMBIMDFZlcmlmaWNhdGlvb + gICALIwfTELMAkGA1UEBhMCVVMxEDAOBgNVBAoTB0VudHJ1c3QxIjAgBgNVBAsTGUNlcnRpZmljYX + Rpb24gQXV0aG9yaXRpZXMxEjAQBgNVBAsTCURDb21TdWJDQTEkMCIGA1UEAxMbQ0xTIDJ5ciBWZXJ + pZmljYXRpb24gUG9saWN5MIIBLjAQDAt3ZWJfYWRfc3ZyMwIBYjCCARgwgYcwEAwKRW5jcnlwdGlv + bgICALMwczELMAkGA1UEBhMCVVMxEDAOBgNVBAoTB0VudHJ1c3QxIjAgBgNVBAsTGUNlcnRpZmljY + XRpb24gQXV0aG9yaXRpZXMxEjAQBgNVBAsTCURDb21TdWJDQTEaMBgGA1UEAxMRRW5jcnlwdGlvbi + BQb2xpY3kwgYswEgwMVmVyaWZpY2F0aW9uAgIAtDB1MQswCQYDVQQGEwJVUzEQMA4GA1UEChMHRW5 + 0cnVzdDEiMCAGA1UECxMZQ2VydGlmaWNhdGlvbiBBdXRob3JpdGllczESMBAGA1UECxMJRENvbVN1 + YkNBMRwwGgYDVQQDExNWZXJpZmljYXRpb24gUG9saWN5MD8wEwwOd2ViX2FpX2Ntc19jbGkCAT4wK + DARMA8MCkVuY3J5cHRpb24CAWUwEzARDAxWZXJpZmljYXRpb24CAWYwPjASDA13ZWJfYWlfY21zX2 + RzAgE/MCgwETAPDApFbmNyeXB0aW9uAgFnMBMwEQwMVmVyaWZpY2F0aW9uAgFoMD8wEwwOd2ViX2F + pX2Ntc19zdnICAT0wKDARMA8MCkVuY3J5cHRpb24CAWMwEzARDAxWZXJpZmljYXRpb24CAWQwPDAO + DAl3ZWJfYmFzaWMCAWswKjASMBAMCkVuY3J5cHRpb24CAgC+MBQwEgwMVmVyaWZpY2F0aW9uAgIAv + zBCMBQMDndlYl9jbGlzdnJfZXhwAgIAgjAqMBIwEAwKRW5jcnlwdGlvbgICAOQwFDASDAxWZXJpZm + ljYXRpb24CAgDlMDkwDQwId2ViX2NsczECAUUwKDARMA8MCkVuY3J5cHRpb24CAXMwEzARDAxWZXJ + pZmljYXRpb24CAXQwOTANDAh3ZWJfY2xzMgIBRjAoMBEwDwwKRW5jcnlwdGlvbgIBdTATMBEMDFZl + cmlmaWNhdGlvbgIBdjA+MBIMDXdlYl9jbXNjbGllbnQCAUEwKDARMA8MCkVuY3J5cHRpb24CAWswE + zARDAxWZXJpZmljYXRpb24CAWwwRDAXDBJ3ZWJfY21zY2xpZW50X2NsczECAUswKTARMA8MCkVuY3 + J5cHRpb24CAX8wFDASDAxWZXJpZmljYXRpb24CAgCAMEUwFwwSd2ViX2Ntc2NsaWVudF9jbHMyAgF + MMCowEjAQDApFbmNyeXB0aW9uAgIAgTAUMBIMDFZlcmlmaWNhdGlvbgICAIIwPjASDA13ZWJfY21z + c2VydmVyAgFCMCgwETAPDApFbmNyeXB0aW9uAgFtMBMwEQwMVmVyaWZpY2F0aW9uAgFuMEUwFwwSd + 2ViX2Ntc3NlcnZlcl9jbHMxAgFNMCowEjAQDApFbmNyeXB0aW9uAgIAgzAUMBIMDFZlcmlmaWNhdG + lvbgICAIQwRTAXDBJ3ZWJfY21zc2VydmVyX2NsczICAU4wKjASMBAMCkVuY3J5cHRpb24CAgCFMBQ + wEgwMVmVyaWZpY2F0aW9uAgIAhjA9MBEMDHdlYl9jb2Rlc2lnbgIBHjAoMBEwDwwKRW5jcnlwdGlv + bgIBNDATMBEMDFZlcmlmaWNhdGlvbgIBNTBCMBYMEXdlYl9jb2Rlc2lnbl9jbHMxAgFJMCgwETAPD + ApFbmNyeXB0aW9uAgF7MBMwEQwMVmVyaWZpY2F0aW9uAgF8MEIwFgwRd2ViX2NvZGVzaWduX2Nscz + ICAUowKDARMA8MCkVuY3J5cHRpb24CAX0wEzARDAxWZXJpZmljYXRpb24CAX4wggEsMBAMC3dlYl9 + kZWZhdWx0AgEcMIIBFjCBhjAPDApFbmNyeXB0aW9uAgEwMHMxCzAJBgNVBAYTAlVTMRAwDgYDVQQK + EwdFbnRydXN0MSIwIAYDVQQLExlDZXJ0aWZpY2F0aW9uIEF1dGhvcml0aWVzMRIwEAYDVQQLEwlEQ + 29tU3ViQ0ExGjAYBgNVBAMTEUVuY3J5cHRpb24gUG9saWN5MIGKMBEMDFZlcmlmaWNhdGlvbgIBMT + B1MQswCQYDVQQGEwJVUzEQMA4GA1UEChMHRW50cnVzdDEiMCAGA1UECxMZQ2VydGlmaWNhdGlvbiB + BdXRob3JpdGllczESMBAGA1UECxMJRENvbVN1YkNBMRwwGgYDVQQDExNWZXJpZmljYXRpb24gUG9s + aWN5MCwwEwwOd2ViX29uZWtleXBhaXICATwwFTATMBEMDFZlcmlmaWNhdGlvbgIBYjA7MA8MCndlY + l9zZXJ2ZXICAR0wKDARMA8MCkVuY3J5cHRpb24CATIwEzARDAxWZXJpZmljYXRpb24CATMwKjAQDA + t3ZWJfc2VydmVyMgIBdTAWMBQwEgwMVmVyaWZpY2F0aW9uAgIAzjBEMBYMEHdlYl9zZXJ2ZXJfYmF + zaWMCAgCDMCowEjAQDApFbmNyeXB0aW9uAgIA5jAUMBIMDFZlcmlmaWNhdGlvbgICAOcwQDAUDA93 + ZWJfc2VydmVyX2NsczECAUcwKDARMA8MCkVuY3J5cHRpb24CAXcwEzARDAxWZXJpZmljYXRpb24CA + XgwggFAMBQMD3dlYl9zZXJ2ZXJfY2xzMgIBSDCCASYwgY4wDwwKRW5jcnlwdGlvbgIBeTB7MQswCQ + YDVQQGEwJVUzEQMA4GA1UEChMHRW50cnVzdDEiMCAGA1UECxMZQ2VydGlmaWNhdGlvbiBBdXRob3J + pdGllczESMBAGA1UECxMJRENvbVN1YkNBMSIwIAYDVQQDExlDTFMgMnlyIEVuY3J5cHRpb24gUG9s + aWN5MIGSMBEMDFZlcmlmaWNhdGlvbgIBejB9MQswCQYDVQQGEwJVUzEQMA4GA1UEChMHRW50cnVzd + DEiMCAGA1UECxMZQ2VydGlmaWNhdGlvbiBBdXRob3JpdGllczESMBAGA1UECxMJRENvbVN1YkNBMS + QwIgYDVQQDExtDTFMgMnlyIFZlcmlmaWNhdGlvbiBQb2xpY3kwggEyMBQMD3dlYl9zZXJ2ZXJfY2x + zMwIBYDCCARgwgYcwEAwKRW5jcnlwdGlvbgICAK8wczELMAkGA1UEBhMCVVMxEDAOBgNVBAoTB0Vu + dHJ1c3QxIjAgBgNVBAsTGUNlcnRpZmljYXRpb24gQXV0aG9yaXRpZXMxEjAQBgNVBAsTCURDb21Td + WJDQTEaMBgGA1UEAxMRRW5jcnlwdGlvbiBQb2xpY3kwgYswEgwMVmVyaWZpY2F0aW9uAgIAsDB1MQ + swCQYDVQQGEwJVUzEQMA4GA1UEChMHRW50cnVzdDEiMCAGA1UECxMZQ2VydGlmaWNhdGlvbiBBdXR + ob3JpdGllczESMBAGA1UECxMJRENvbVN1YkNBMRwwGgYDVQQDExNWZXJpZmljYXRpb24gUG9saWN5 + MIIBQjAUDA93ZWJfc2VydmVyX2NsczQCAV8wggEoMIGPMBAMCkVuY3J5cHRpb24CAgCtMHsxCzAJB + gNVBAYTAlVTMRAwDgYDVQQKEwdFbnRydXN0MSIwIAYDVQQLExlDZXJ0aWZpY2F0aW9uIEF1dGhvcm + l0aWVzMRIwEAYDVQQLEwlEQ29tU3ViQ0ExIjAgBgNVBAMTGUNMUyA0eXIgRW5jcnlwdGlvbiBQb2x + pY3kwgZMwEgwMVmVyaWZpY2F0aW9uAgIArjB9MQswCQYDVQQGEwJVUzEQMA4GA1UEChMHRW50cnVz + dDEiMCAGA1UECxMZQ2VydGlmaWNhdGlvbiBBdXRob3JpdGllczESMBAGA1UECxMJRENvbVN1YkNBM + SQwIgYDVQQDExtDTFMgNHlyIFZlcmlmaWNhdGlvbiBQb2xpY3kwRjAYDBN3ZWJfc2VydmVyX2V4cG + VyaWFuAgFuMCowEjAQDApFbmNyeXB0aW9uAgIAwzAUMBIMDFZlcmlmaWNhdGlvbgICAMQwQDAUDA9 + 3ZWJfc2VydmVyX2hpZ2gCATswKDARMA8MCkVuY3J5cHRpb24CAWAwEzARDAxWZXJpZmljYXRpb24C + AWEwGwYJKoZIhvZ9B000MQ4wDAYKKoZIhvZ9B001ATAhMB8GA1UdIwQYMBaAFDy++9gIa1JL8T+Oh + 9HW5F160lV9MA0GCSqGSIb3DQEBBQUAA4IBAQBelvaP82tFhjcHOTSDP97QLcqo2yE9RjjLtC/In8 + u/Zi/8y6jR9GRE11U6GbF+5+EJ5pckTMJ8Oorn3ZVOl4dKyzTN9m2rLjdUXNWd/th8Ja1RD/9hpMD + o5HUUYJEoOQxufTZnWfEZ2AISB7rXLCFZpdHGvc3H2ORtkhV+SuTmLpNkN1Zsbv8TXNi4szuX5sbA + y/mX7G8q0Twbb+GGpZjlKV226xc2Ejy3uYGrUK0kEr6u/ONTK1844vsuZPkcJOMcj7/c4o8oKKVMT + Fyafl1swsxHWn6MTh6WqI5k2LBcyEZSptDcG1brE7BU1JAOE9F7nkaoOOWefJs3n7B8piLg +crossCertificatePair;binary:: MIIGUqCCBk4wggZKMIIFMqADAgECAgRIwMPgMA0GCSqGSIb3 + DQEBBQUAMFgxCzAJBgNVBAYTAlVTMRAwDgYDVQQKEwdFbnRydXN0MSIwIAYDVQQLExlDZXJ0aWZpY + 2F0aW9uIEF1dGhvcml0aWVzMRMwEQYDVQQLEwpEQ29tUm9vdENBMB4XDTEwMDQyMDE0NDQwNloXDT + MwMDMyMDE1MTQwNlowVzELMAkGA1UEBhMCVVMxEDAOBgNVBAoTB0VudHJ1c3QxIjAgBgNVBAsTGUN + lcnRpZmljYXRpb24gQXV0aG9yaXRpZXMxEjAQBgNVBAsTCURDb21TdWJDQTCCASIwDQYJKoZIhvcN + AQEBBQADggEPADCCAQoCggEBAOMj486WAJ+GC3aOTn7g1p3+tzHJ8YUAoLW0y4WC6eleA+Yq9M+FP + Xlo+E6AMak4+HENfQMBa5bUgqJMGL20ZOktm0jpMtGtbS/J6Y9TrujpysVnO4SZwuWJOlwV+DLfgH + JYFcE/oeVej/TcoQw+zV0RkeDVA4npgOw5FWKzPlnKANF8KN598KK92jx+p60egFYyIY04MknO/cH + APZXT7tVIp1ljyHyNwMPWiwYdyVdR7IkrFQrb55lHEj4/KdHoLISe4/sQB1Yw6S9fz+A7HhF3BBkb + tNJk+jfjDL2/hNq0VP9b9zURJKSGEUTBaoAbvcWw7p7v2t7VOTB5Wb496SECAwEAAaOCAxswggMXM + A4GA1UdDwEB/wQEAwIBBjA8BgNVHSAENTAzMA8GDWCGSAGG+muBSAMKAgEwDwYNYIZIAYb6a4FIAw + oCAjAPBg1ghkgBhvprgUgDCgIDMA8GA1UdEwEB/wQFMAMBAf8wggEBBggrBgEFBQcBAQSB9DCB8TC + BnQYIKwYBBQUHMAKGgZBsZGFwOi8vZGNvbWRpcjEubWFuYWdlZC5lbnRydXN0LmNvbS9vdT1EQ29t + Um9vdENBLG91PUNlcnRpZmljYXRpb24lMjBBdXRob3JpdGllcyxvPUVudHJ1c3QsYz1VUz9jQUNlc + nRpZmljYXRlO2JpbmFyeSxjcm9zc0NlcnRpZmljYXRlUGFpcjtiaW5hcnkwTwYIKwYBBQUHMAKGQ2 + h0dHA6Ly9kY29td2ViMS5tYW5hZ2VkLmVudHJ1c3QuY29tL0FJQS9DZXJ0c0lzc3VlZFRvRENvbVJ + vb3RDQS5wN2MwggFUBgNVHR8EggFLMIIBRzCB06CB0KCBzYY4aHR0cDovL2Rjb213ZWIxLm1hbmFn + ZWQuZW50cnVzdC5jb20vQ1JMcy9EQ29tUm9vdENBMS5jcmyGgZBsZGFwOi8vZGNvbWRpcjEubWFuY + WdlZC5lbnRydXN0LmNvbS9jbj1XaW5Db21iaW5lZDEsb3U9RENvbVJvb3RDQSxvdT1DZXJ0aWZpY2 + F0aW9uJTIwQXV0aG9yaXRpZXMsbz1FbnRydXN0LGM9VVM/Y2VydGlmaWNhdGVSZXZvY2F0aW9uTGl + zdDtiaW5hcnkwb6BtoGukaTBnMQswCQYDVQQGEwJVUzEQMA4GA1UEChMHRW50cnVzdDEiMCAGA1UE + CxMZQ2VydGlmaWNhdGlvbiBBdXRob3JpdGllczETMBEGA1UECxMKRENvbVJvb3RDQTENMAsGA1UEA + xMEQ1JMMTAfBgNVHSMEGDAWgBRFx/xyHQhRD4vvL4V0iTRGDDP/JTAdBgNVHQ4EFgQUPL772AhrUk + vxP46H0dbkXXrSVX0wGQYJKoZIhvZ9B0EABAwwChsEVjcuMQMCAIEwDQYJKoZIhvcNAQEFBQADggE + BAJQrdloQCgTe0ahJyTU/fsKLzYXVGJOwnrwyof/+7emUfZS/OhKYuCfQ9w/wWLhT5SUzm9GDlUfk + YUfpL+/5joymDJO8/thcEq/k2PJepSFf7IMY8635kNz27kI9fA8JQGn7nEI8WBjX26qs7Ho7QKVkv + 6YEDuGeJwBLTGyNerDEf5n+DdMvrDmVAOs62T8uTZDb9gn/uIEGv3vaR+rs3KxvDhEr/2OFJtDWHw + PdHFOrr1pNkNWqdStwoE2/fxUfccQhLn+H5GgKLD7YT74uUCi+VFP1juV3F7jUlytgtMnnbqRIbDn + 4bMPn2HOmxdQ20amsdKX4bfosqFMepfSxWRQ= +crossCertificatePair;binary:: MIIGQaCCBj0wggY5MIIFIaADAgECAgRIwJY0MA0GCSqGSIb3 + DQEBBQUAMFgxCzAJBgNVBAYTAlVTMRAwDgYDVQQKEwdFbnRydXN0MSIwIAYDVQQLExlDZXJ0aWZpY + 2F0aW9uIEF1dGhvcml0aWVzMRMwEQYDVQQLEwpEQ29tUm9vdENBMB4XDTA4MDkwNTE4MDQxMVoXDT + E4MDkwNTAyMTMzN1owVzELMAkGA1UEBhMCVVMxEDAOBgNVBAoTB0VudHJ1c3QxIjAgBgNVBAsTGUN + lcnRpZmljYXRpb24gQXV0aG9yaXRpZXMxEjAQBgNVBAsTCURDb21TdWJDQTCCASIwDQYJKoZIhvcN + AQEBBQADggEPADCCAQoCggEBAL+MSY0GXRSMIIm5l+bMpXvk8rlG/Rjqaw0TNZ2w+KsG6ktNWXDll + A1i1l0Fvx2qj4O/z5bNfgmUmJZFamyWOS6TkwX2C+2DspI7P3a+gVTVu+7VJkevo3Hye2Pd6bAf/+ + bfV2IhSyAOe0wW0sANyQrIjzsU1r6YBjpcT1E5QZdnzSrEYRoBhJGXf8/v+Zu21AqOZ9EpagpvmsZ + 4UI8ORFg2PV0UOmnwNkMVO21JH1sUGYfKP9JAoO8vTzgwYbDN1w5DMC7SqWBl00OF6pGGaglJ5D16 + OcopR8aZVePxj+dW+MADgEufai5CqhUKZ6CA1pa+P6c1lPcFEGgz9AQS420CAwEAAaOCAwowggMGM + A4GA1UdDwEB/wQEAwIBBjA8BgNVHSAENTAzMA8GDWCGSAGG+muBSAMKAgEwDwYNYIZIAYb6a4FIAw + oCAjAPBg1ghkgBhvprgUgDCgIDMA8GA1UdEwEB/wQFMAMBAf8wggEBBggrBgEFBQcBAQSB9DCB8TC + BnQYIKwYBBQUHMAKGgZBsZGFwOi8vZGNvbWRpcjEubWFuYWdlZC5lbnRydXN0LmNvbS9vdT1EQ29t + Um9vdENBLG91PUNlcnRpZmljYXRpb24lMjBBdXRob3JpdGllcyxvPUVudHJ1c3QsYz1VUz9jQUNlc + nRpZmljYXRlO2JpbmFyeSxjcm9zc0NlcnRpZmljYXRlUGFpcjtiaW5hcnkwTwYIKwYBBQUHMAKGQ2 + h0dHA6Ly9kY29td2ViMS5tYW5hZ2VkLmVudHJ1c3QuY29tL0FJQS9DZXJ0c0lzc3VlZFRvRENvbVJ + vb3RDQS5wN2MwggFDBgNVHR8EggE6MIIBNjCBwqCBv6CBvIaBgGxkYXA6Ly9kY29tZGlyMS5tYW5h + Z2VkLmVudHJ1c3QuY29tL291PURDb21Sb290Q0Esb3U9Q2VydGlmaWNhdGlvbiUyMEF1dGhvcml0a + WVzLG89RW50cnVzdCxjPVVTP2NlcnRpZmljYXRlUmV2b2NhdGlvbkxpc3Q7YmluYXJ5hjdodHRwOi + 8vZGNvbXdlYjEubWFuYWdlZC5lbnRydXN0LmNvbS9DUkxzL0RDb21Sb290Q0EuY3JsMG+gbaBrpGk + wZzELMAkGA1UEBhMCVVMxEDAOBgNVBAoTB0VudHJ1c3QxIjAgBgNVBAsTGUNlcnRpZmljYXRpb24g + QXV0aG9yaXRpZXMxEzARBgNVBAsTCkRDb21Sb290Q0ExDTALBgNVBAMTBENSTDEwHwYDVR0jBBgwF + oAUh1mBY1JFXsCw39HI6bl1OBAu3tkwHQYDVR0OBBYEFPQWLgG7q5AZpChCDZ3AH5yvEIYrMBkGCS + qGSIb2fQdBAAQMMAobBFY3LjEDAgCBMA0GCSqGSIb3DQEBBQUAA4IBAQCrafi2DFqdhpXtzeJpUgZ + glNOwZUBOp5thJUH7+yMcgl5Ka4JIqqNpw3ZbFPFT9Ni4IzDmJYyPgqHmgRubxFWpAHdP8SjEK7pl + 6DwDmbCAWBiq7SmSfqt502FUUyiTcZsCLi6GqE4fetej41t3NaGidqyVQXPJ26Ti2jNT4NzRnADi6 + vOzMzxMSkWH1OaHoGLtTVpIjkbJZygnSmof4+gs4M1fmH4FVTcWV6t8zbTwkH4RTYSHVX04aM4ZBp + nhMq6sk9uNL+qndpWkO7u7zr6K527kl6/t1Xr9/vnzD0ACVk/gluI7MvCUIzP55o01Rp90ZCMIMak + u0qrESgh0GXln +cACertificate;binary:: MIIGSjCCBTKgAwIBAgIESMDD4DANBgkqhkiG9w0BAQUFADBYMQswCQY + DVQQGEwJVUzEQMA4GA1UEChMHRW50cnVzdDEiMCAGA1UECxMZQ2VydGlmaWNhdGlvbiBBdXRob3Jp + dGllczETMBEGA1UECxMKRENvbVJvb3RDQTAeFw0xMDA0MjAxNDQ0MDZaFw0zMDAzMjAxNTE0MDZaM + FcxCzAJBgNVBAYTAlVTMRAwDgYDVQQKEwdFbnRydXN0MSIwIAYDVQQLExlDZXJ0aWZpY2F0aW9uIE + F1dGhvcml0aWVzMRIwEAYDVQQLEwlEQ29tU3ViQ0EwggEiMA0GCSqGSIb3DQEBAQUAA4IBDwAwggE + KAoIBAQDjI+POlgCfhgt2jk5+4Nad/rcxyfGFAKC1tMuFgunpXgPmKvTPhT15aPhOgDGpOPhxDX0D + AWuW1IKiTBi9tGTpLZtI6TLRrW0vyemPU67o6crFZzuEmcLliTpcFfgy34ByWBXBP6HlXo/03KEMP + s1dEZHg1QOJ6YDsORVisz5ZygDRfCjeffCivdo8fqetHoBWMiGNODJJzv3BwD2V0+7VSKdZY8h8jc + DD1osGHclXUeyJKxUK2+eZRxI+PynR6CyEnuP7EAdWMOkvX8/gOx4RdwQZG7TSZPo34wy9v4TatFT + /W/c1ESSkhhFEwWqAG73FsO6e79re1TkweVm+PekhAgMBAAGjggMbMIIDFzAOBgNVHQ8BAf8EBAMC + AQYwPAYDVR0gBDUwMzAPBg1ghkgBhvprgUgDCgIBMA8GDWCGSAGG+muBSAMKAgIwDwYNYIZIAYb6a + 4FIAwoCAzAPBgNVHRMBAf8EBTADAQH/MIIBAQYIKwYBBQUHAQEEgfQwgfEwgZ0GCCsGAQUFBzACho + GQbGRhcDovL2Rjb21kaXIxLm1hbmFnZWQuZW50cnVzdC5jb20vb3U9RENvbVJvb3RDQSxvdT1DZXJ + 0aWZpY2F0aW9uJTIwQXV0aG9yaXRpZXMsbz1FbnRydXN0LGM9VVM/Y0FDZXJ0aWZpY2F0ZTtiaW5h + cnksY3Jvc3NDZXJ0aWZpY2F0ZVBhaXI7YmluYXJ5ME8GCCsGAQUFBzAChkNodHRwOi8vZGNvbXdlY + jEubWFuYWdlZC5lbnRydXN0LmNvbS9BSUEvQ2VydHNJc3N1ZWRUb0RDb21Sb290Q0EucDdjMIIBVA + YDVR0fBIIBSzCCAUcwgdOggdCggc2GOGh0dHA6Ly9kY29td2ViMS5tYW5hZ2VkLmVudHJ1c3QuY29 + tL0NSTHMvRENvbVJvb3RDQTEuY3JshoGQbGRhcDovL2Rjb21kaXIxLm1hbmFnZWQuZW50cnVzdC5j + b20vY249V2luQ29tYmluZWQxLG91PURDb21Sb290Q0Esb3U9Q2VydGlmaWNhdGlvbiUyMEF1dGhvc + ml0aWVzLG89RW50cnVzdCxjPVVTP2NlcnRpZmljYXRlUmV2b2NhdGlvbkxpc3Q7YmluYXJ5MG+gba + BrpGkwZzELMAkGA1UEBhMCVVMxEDAOBgNVBAoTB0VudHJ1c3QxIjAgBgNVBAsTGUNlcnRpZmljYXR + pb24gQXV0aG9yaXRpZXMxEzARBgNVBAsTCkRDb21Sb290Q0ExDTALBgNVBAMTBENSTDEwHwYDVR0j + BBgwFoAURcf8ch0IUQ+L7y+FdIk0Rgwz/yUwHQYDVR0OBBYEFDy++9gIa1JL8T+Oh9HW5F160lV9M + BkGCSqGSIb2fQdBAAQMMAobBFY3LjEDAgCBMA0GCSqGSIb3DQEBBQUAA4IBAQCUK3ZaEAoE3tGoSc + k1P37Ci82F1RiTsJ68MqH//u3plH2UvzoSmLgn0PcP8Fi4U+UlM5vRg5VH5GFH6S/v+Y6MpgyTvP7 + YXBKv5NjyXqUhX+yDGPOt+ZDc9u5CPXwPCUBp+5xCPFgY19uqrOx6O0ClZL+mBA7hnicAS0xsjXqw + xH+Z/g3TL6w5lQDrOtk/Lk2Q2/YJ/7iBBr972kfq7Nysbw4RK/9jhSbQ1h8D3RxTq69aTZDVqnUrc + KBNv38VH3HEIS5/h+RoCiw+2E++LlAovlRT9Y7ldxe41JcrYLTJ526kSGw5+GzD59hzpsXUNtGprH + Sl+G36LKhTHqX0sVkU +cACertificate;binary:: MIIGOTCCBSGgAwIBAgIESMCWNDANBgkqhkiG9w0BAQUFADBYMQswCQY + DVQQGEwJVUzEQMA4GA1UEChMHRW50cnVzdDEiMCAGA1UECxMZQ2VydGlmaWNhdGlvbiBBdXRob3Jp + dGllczETMBEGA1UECxMKRENvbVJvb3RDQTAeFw0wODA5MDUxODA0MTFaFw0xODA5MDUwMjEzMzdaM + FcxCzAJBgNVBAYTAlVTMRAwDgYDVQQKEwdFbnRydXN0MSIwIAYDVQQLExlDZXJ0aWZpY2F0aW9uIE + F1dGhvcml0aWVzMRIwEAYDVQQLEwlEQ29tU3ViQ0EwggEiMA0GCSqGSIb3DQEBAQUAA4IBDwAwggE + KAoIBAQC/jEmNBl0UjCCJuZfmzKV75PK5Rv0Y6msNEzWdsPirBupLTVlw5ZQNYtZdBb8dqo+Dv8+W + zX4JlJiWRWpsljkuk5MF9gvtg7KSOz92voFU1bvu1SZHr6Nx8ntj3emwH//m31diIUsgDntMFtLAD + ckKyI87FNa+mAY6XE9ROUGXZ80qxGEaAYSRl3/P7/mbttQKjmfRKWoKb5rGeFCPDkRYNj1dFDpp8D + ZDFTttSR9bFBmHyj/SQKDvL084MGGwzdcOQzAu0qlgZdNDheqRhmoJSeQ9ejnKKUfGmVXj8Y/nVvj + AA4BLn2ouQqoVCmeggNaWvj+nNZT3BRBoM/QEEuNtAgMBAAGjggMKMIIDBjAOBgNVHQ8BAf8EBAMC + AQYwPAYDVR0gBDUwMzAPBg1ghkgBhvprgUgDCgIBMA8GDWCGSAGG+muBSAMKAgIwDwYNYIZIAYb6a + 4FIAwoCAzAPBgNVHRMBAf8EBTADAQH/MIIBAQYIKwYBBQUHAQEEgfQwgfEwgZ0GCCsGAQUFBzACho + GQbGRhcDovL2Rjb21kaXIxLm1hbmFnZWQuZW50cnVzdC5jb20vb3U9RENvbVJvb3RDQSxvdT1DZXJ + 0aWZpY2F0aW9uJTIwQXV0aG9yaXRpZXMsbz1FbnRydXN0LGM9VVM/Y0FDZXJ0aWZpY2F0ZTtiaW5h + cnksY3Jvc3NDZXJ0aWZpY2F0ZVBhaXI7YmluYXJ5ME8GCCsGAQUFBzAChkNodHRwOi8vZGNvbXdlY + jEubWFuYWdlZC5lbnRydXN0LmNvbS9BSUEvQ2VydHNJc3N1ZWRUb0RDb21Sb290Q0EucDdjMIIBQw + YDVR0fBIIBOjCCATYwgcKggb+ggbyGgYBsZGFwOi8vZGNvbWRpcjEubWFuYWdlZC5lbnRydXN0LmN + vbS9vdT1EQ29tUm9vdENBLG91PUNlcnRpZmljYXRpb24lMjBBdXRob3JpdGllcyxvPUVudHJ1c3Qs + Yz1VUz9jZXJ0aWZpY2F0ZVJldm9jYXRpb25MaXN0O2JpbmFyeYY3aHR0cDovL2Rjb213ZWIxLm1hb + mFnZWQuZW50cnVzdC5jb20vQ1JMcy9EQ29tUm9vdENBLmNybDBvoG2ga6RpMGcxCzAJBgNVBAYTAl + VTMRAwDgYDVQQKEwdFbnRydXN0MSIwIAYDVQQLExlDZXJ0aWZpY2F0aW9uIEF1dGhvcml0aWVzMRM + wEQYDVQQLEwpEQ29tUm9vdENBMQ0wCwYDVQQDEwRDUkwxMB8GA1UdIwQYMBaAFIdZgWNSRV7AsN/R + yOm5dTgQLt7ZMB0GA1UdDgQWBBT0Fi4Bu6uQGaQoQg2dwB+crxCGKzAZBgkqhkiG9n0HQQAEDDAKG + wRWNy4xAwIAgTANBgkqhkiG9w0BAQUFAAOCAQEAq2n4tgxanYaV7c3iaVIGYJTTsGVATqebYSVB+/ + sjHIJeSmuCSKqjacN2WxTxU/TYuCMw5iWMj4Kh5oEbm8RVqQB3T/EoxCu6Zeg8A5mwgFgYqu0pkn6 + redNhVFMok3GbAi4uhqhOH3rXo+NbdzWhonaslUFzyduk4tozU+Dc0ZwA4urzszM8TEpFh9Tmh6Bi + 7U1aSI5GyWcoJ0pqH+PoLODNX5h+BVU3FlerfM208JB+EU2Eh1V9OGjOGQaZ4TKurJPbjS/qp3aVp + Du7u86+iudu5Jev7dV6/f758w9AAlZP4JbiOzLwlCMz+eaNNUafdGQjCDGpLtKqxEoIdBl5Zw== +objectClass: organizationalUnit +objectClass: top +objectClass: extensibleobject +ou: binary +nsUniqueId: f49ca103-c2ee11e7-9170b029-e68fda34 +creatorsName: +modifiersName: +createTimestamp: 20171106123544Z +modifyTimestamp: 20171106123544Z + +# entry-id: 3 +dn: cn=test,ou=binary,dc=example,dc=com +userCertificate:: MIIGfzCCBWcCAQEwgYOhgYAwfqR8MHoxCzAJBgNVBAYTAlVTMRAwDgYDVQQK + EwdFbnRydXN0MSIwIAYDVQQLExlDZXJ0aWZpY2F0aW9uIEF1dGhvcml0aWVzMRIwEAYDVQQLEwlEQ + 29tU3ViQ0ExITAfBgNVBAMTGFNQT0MgU2VydmVyIExvZ2luIFBvbGljeTBnoGUwYzBbpFkwVzELMA + kGA1UEBhMCVVMxEDAOBgNVBAoTB0VudHJ1c3QxIjAgBgNVBAsTGUNlcnRpZmljYXRpb24gQXV0aG9 + yaXRpZXMxEjAQBgNVBAsTCURDb21TdWJDQQIESMDD4DANBgkqhkiG9w0BAQUFAAIEV4eo1TAiGA8y + MDE3MTAxNTIyNDYwOVoYDzIwMTcxMTE0MjI0NjA5WjCCBBUwHwYJKoZIhvZ9B00BMRIwEAIBAAIBC + AIBCAIBCgMCAGkwFAYJKoZIhvZ9B00DMQcwBQwDQUxMMBEGCSqGSIb2fQdNBTEEAwID2DAPBgkqhk + iG9n0HTQYxAgwAMBcGCSqGSIb2fQdNCTEKDAhSU0EtMjA0ODApBgkqhkiG9n0HTQ4xHDAaDAlwcml + udGFibGUMB3RlbGV0ZXgMBHV0ZjgwEQYJKoZIhvZ9B00PMQQDAgeAMBEGCSqGSIb2fQdNFTEEAwIH + gDAQBgkqhkiG9n0HTRYxAwMBADAQBgkqhkiG9n0HTQgxAwMBADAQBgkqhkiG9n0HTSwxAwMBADAPB + gkqhkiG9n0HTQsxAjAAMBAGCSqGSIb2fQdNDDEDAwEAMBAGCSqGSIb2fQdNDTEDAgEeMA8GCSqGSI + b2fQdNEzECDAAwEAYJKoZIhvZ9B00XMQMBAQAwEQYJKoZIhvZ9B00YMQQCAgfQMBAGCSqGSIb2fQd + NHzEDAQEAMBAGCSqGSIb2fQdNJjEDAwEAMBAGCSqGSIb2fQdNGTEDAgECMBAGCSqGSIb2fQdNGzED + AQEAMBAGCSqGSIb2fQdNKTEDAQEAMBAGCSqGSIb2fQdNHDEDAgEAMBAGCSqGSIb2fQdNHTEDAgEBM + BAGCSqGSIb2fQdNIDEDAwEAMBEGCSqGSIb2fQdNITEEAwIE8DAPBgkqhkiG9n0HTSMxAgwAMA8GCS + qGSIb2fQdNJDECDAAwJAYJKoZIhvZ9B00lMRcwFQwJRGlyZWN0b3J5DANFQUIMA0dBTDAQBgkqhki + G9n0HTSsxAwMBADAPBgkqhkiG9n0HTTYxAgwAMBEGCSqGSIb2fQdNMzEEAwIHgDAPBgkqhkiG9n0H + TScxAgwAMBAGCSqGSIb2fQdNETEDAgECMBAGCSqGSIb2fQdNKDEDAgFkMBEGCiqGSIb2fQdNLQExA + wIBAzBEBgoqhkiG9n0HTS0CMTYwNAwMZW50ZWxsaWdlbmNlDAZkaXJlY3QMCHpmLWxvY2FsDAp6Zi + 1yb2FtaW5nDAZ6Zi1tc2YwFwYKKoZIhvZ9B00tAzEJDAdleGVjdXRlMBAGCSqGSIb2fQdNMTEDAQE + AMBAGCSqGSIb2fQdNMjEDAQEAMBAGCSqGSIb2fQdNOTEDAQH/MA8GCSqGSIb2fQdNLzECDAAwEAYJ + KoZIhvZ9B004MQMBAQAwEwYJKoZIhvZ9B003MQYMBENBU1QwEAYJKoZIhvZ9B007MQMBAQAwFgYJK + oZIhvZ9B009MQkMB0VudHJ1c3QwEAYJKoZIhvZ9B00+MQMBAQAwEAYJKoZIhvZ9B00/MQMBAQAwFw + YJKoZIhvZ9B00KMQoMCFJTQS0yMDQ4MBAGCSqGSIb2fQdNQzEDAQEAMCEwHwYDVR0jBBgwFoAUPL7 + 72AhrUkvxP46H0dbkXXrSVX0wDQYJKoZIhvcNAQEFBQADggEBADrezRWX0fuPC415BUa3tafMLaVO + 24v3CP+qYud4Z6IKI7jNtt2pcneaYjQ7iaxypE3N7Wwlim6Ak4yuwwJ9SrKOSe7YPiFOuugvNy2fk + +f2h3bFkLm40bkjPPH8bih4sLyU8RcN2cAJLxHINwXO3ALKBo3IdxrfcoKquO7g+R4+ZPvmS/95J9 + aQ08FZKpkv+ORPRZySkr0zMUARdBBguklHqFeczn5tQnmJcsfVlP4DC7IPqw2xM8l3b+iAH5pyqgb + o/Lk11VWkD11s3K8/Bf40eH23upDOwmYBAszHdXU4+5HNZ/An6xfVEjr/+KxUAEVD5TGQMVJY6SCS + zN3ONRc= +objectClass: top +objectClass: extensibleobject +cn: test +nsUniqueId: f49ca104-c2ee11e7-9170b029-e68fda34 +creatorsName: +modifiersName: +createTimestamp: 20171106123544Z +modifyTimestamp: 20171106123544Z + diff --git a/dirsrvtests/tests/data/tls/ca.crt b/dirsrvtests/tests/data/tls/ca.crt new file mode 100644 index 0000000..3756a15 --- /dev/null +++ b/dirsrvtests/tests/data/tls/ca.crt @@ -0,0 +1,20 @@ +-----BEGIN CERTIFICATE----- +MIIDTjCCAjagAwIBAgIFALr2peswDQYJKoZIhvcNAQELBQAwYDELMAkGA1UEBhMC +QVUxEzARBgNVBAgTClF1ZWVuc2xhbmQxDjAMBgNVBAcTBTM4OWRzMRAwDgYDVQQK +Ewd0ZXN0aW5nMRowGAYDVQQDExF0ZXN0cy5leGFtcGxlLmNvbTAeFw0yMjAyMTEw +MjU0MzJaFw00MjAyMTEwMjU0MzJaMGAxCzAJBgNVBAYTAkFVMRMwEQYDVQQIEwpR +dWVlbnNsYW5kMQ4wDAYDVQQHEwUzODlkczEQMA4GA1UEChMHdGVzdGluZzEaMBgG +A1UEAxMRdGVzdHMuZXhhbXBsZS5jb20wggEiMA0GCSqGSIb3DQEBAQUAA4IBDwAw +ggEKAoIBAQCr2vsHEGtvlishhWeAU+qhPbdoJ6CBW6Dk7APlvwuOaAls4BA6I7CX +ZG2tbaK38TuB1rB21/KOciTcy7TaF9X6OW+6Hkb4gGMnpy4sRbw4CKIfkNsCZ5av +bQ9fsRbgM0q72YPjZlzO6tuvLimOLolhmSiSS00Ll20CteMMWZ/ApGBl163iohD4 +pFWJhtyYG9DnZp5N6T3yHDFsrIyil2+G6ZSTOObRwXUEvHeZcGRiG6Py9t3vDOSg +IUKYcgyihg9boEHVe76wHfMm6i3ELa7/QeVJNofbiPso6doqD0V+qmGhZsmpjP56 +RcBR85ijo/eprohjDNXHAOUgdZ7K9DqrAgMBAAGjDzANMAsGA1UdDwQEAwICBDAN +BgkqhkiG9w0BAQsFAAOCAQEASH8xxpue07K1K8T5SLDUT8iaBnCwub6s8atfqPbR +xb2vdIX0p6WN+kmsNNsafyQYYz+M5LdMSeaTrzj52zvKvZ/5bgc+VqLXx35khaQU +0RgNgKxDgeY2vGVPFHDSNhJvBTtMxksUK0otW8tF70bTZEp2whkoHCu1nAXuEzaX +BeglYO6YRtuY71u84gvd8vtq2Zy0sb5vG7uWn2ZTpA5maCK58r9XpUdpjyA4qhFB +ClwQ45UzkLzTbolioT10N7Xp5clLzqiLYexFuoZhK2HROvgr8EFF7xl17qwVbTkt +ZsURjsTOrWKVLiVn9AuCHeToPosZr4/pWWjFoweO+yfZEg== +-----END CERTIFICATE----- diff --git a/dirsrvtests/tests/data/tls/cert9.db b/dirsrvtests/tests/data/tls/cert9.db new file mode 100644 index 0000000000000000000000000000000000000000..8ccb6f0213ab328662cfe8c43d415bd15f136ea9 GIT binary patch literal 36864 zcmeI52UrwG`@navava>!3^&Qu5NJIC;uGyBzfM#`%;z|%VqFMB zxdmZA!p^Wu*o)YvL;!A(03-kjKmw2eBmfCO0+7Hk5}3|lXlomw-XY-u(xv{3B_RRf z(b4X)exV`$Ogj&;&1^B((RGIS6E2y?b#vwRrgO(jcsSo=DxK8eMQR9@_(?;1b1(*I zAk$vfATXIhV`%Fdp!3P{(XoDur2a9z8t4NwC{kb6!NYkbHdKc8n4WfoL`TO;qk5Il z1}KpyzO2KFXR^i(WojE3ps8Uo62DMsZ}DD%qR=zj#@1QfpF>Ch*WJU>#m2*%J5%h< z^>THbq zKo<`O%6PaxI4Y_quOF$IFPBNpd{Q%?)XXO}^GVHoQZt{_%qKMq$g%>mtbi;lAj=BK zvKC~j1(|9=rdp7x7V=b5vjwTylFUg`;*ps6WPABID9b0?%O~qBAmasOynu`sk}*Ou zMyQAZR)jdPA|%NONisrGzmU`~B+rA0EGr_*ipa7ek}9DS$N4;9Tp@zmTY*@d%$e<8G289T$D@0HQg$P=!5J8g_A|R>|0am$4@==I=#Yj?$*uG*G{fdE8BF|vi zR|m_UI#~48mk@jEONc#r39(<^9>YXzPkjlo$1oxG7$(FX!-NPfHBbja1lOBF1Q(q` z4CoW3ePUp*NM1DZyncPv@dMQf5J~U=KO8sVQ5f1{0~C=DEuV%G9{oZ|Y)k|hUfA2O z7xH>VDlyb5f#DW7^(gifh%EUB5a9+1Kmw2eBmfCO0+0YC00}?>kN_kA2|xmG4*?~H zGSL_oR)Sz%m<(&i>aok%dF(WHjEI68BmfCO0+0YC00}?>kN_kA2|xmn03-kj{7nRu zs5I1CyP}?@5N;?bggPb=gvYhG4257oR|xtvAWX`Rze!aH4HPDo#v)?sN&jCp5yAe% zj5!{vZK{b_IyQlGiZkbLY9KTV2|xmn03-kjKmw2eBmfCO0+7I4OCX4r;Ev+HJT}f^ zP`zjjO^S^dPZL)M0VPf4S+P>$5sgqucmR)stAI$RCS71<9T3e^!#N)Qp`!PLiGuDl*!mzit!@-6@0+0YC00}?>kN_kA2|xmn z03-kj{4)sv-&5 zVUz}pj+pb$plYy)mdfw{FG8?(>}%|2!t-y&8n9~O34o%1=EVSO1qna`kN_kA2|xmn z03-kjKmw2eBmfD#{R9*%1uQmI2L(4n_(#R`E?Fqj7(HpkIz#Wuf+CHoOdTyRC!q}R z|9ud`=iiF?V5Qi0tOffCi^n8yKTX&|NB|Om1Rw!O01|)%AOT1K5`Y9C0Z3pd2#_}} zjHU|F=jS_4Z-4RUTxBQ2tCKCI8=7sALZ#Ow$nQM>snYfKkzbc_CS4253k+Ji&^LM9 zb!+$zXGLWMP$BDIC$%nBI zy+s86`CFn9Bc7(k;b?%WCb;XoVU0O&m>)s3hz%6t2qz%mB?}ik?L1M0xPfBGpWGSr zkPC&59DlrTGfJ5BQ0$(-JaQsDr|Xv5$DEqoxj*cPTl(8ac3W0>{<@!D{yuAJEP|V1 zTO5Md+0mB+P&!Boj%W=emEbP?YYuOYs=rPGl>reZep6EZh*AumNOi>%DHd5&3QAG5 z3|D2aOp_gwJ5^DXO-F{GD6X%q$|rtCh4|eUzDOjpnyalU>>nKR8VtJU2Tw%b?cZ#a zj!+X(3?br`DTxHG`};cYcAnj9nNpRzz9b>)^V6pK{|(H1d@M7cZ51kOK9*G$wDEVV zOVPVd7*(}y6+T?O-iLDgnaRaGw9(HudXg75f7B*n~v5#SoB9xL$k20Hpja% zeN=|Mz0!)DPYgy?FBNxHeBiYeqfk)<&Bbt4oK4)TH5Ma=HwuN;9~p6_5Xev@esWaT zkE)0Lzi>2}qjLKA0-y3_FD6w5(XUtEUBdQcd^6*nFhjrK#KJVAuTP%XX6buMb6Q6F z)be}W4}(`-O}dbKZ|AY+zMB%_g9Fi0nUc+yE9Oe0#ucCXaa5J^rtrLZG}(TyRb9pV zKRVdDx4tBmHr=xiph;`PYcv8Uor-qcLf=zT8W1{n(&ZD3ay>OX z_R#^gr?T=S_o6Sq)0OU*ZJWZaFPRvYuEzWQ_UUS7YKo(Yua@(z&!V#FEoCM-*MzBG zB%B1@ZOAX!*9}5LZfH=?c-5qbCJAVL!+rnpMN317aEL!*U?wI5ja4cLIV1GT!XXm> z-(#3Qp^!oj_dqi->c6sCu1BBu$`dAkKnflJ=Jjo}=@BeQD$!@^N6$fHPo@77F1Y|H-eXs~fs!n~};->6Ne*r~y+&|7Mg-z&)s2s;#(bM(QKgsUZSwGUIH z#@#qy{)_gNC)N?i_3gV>`WqLRPY_pFDdiZX)lq^nio>vlrSIGlIGwnjbT`oFLR($2 zxk;+|I?0alTV}ejGs7rH?y7k2t<3Y4)*iZ(qt=z@wJ!O_cHN8j0{6JIe)v$27h1S6 zCo>|sHGQj9^QCWNX%$O$LpEGPK zdSTP#XtAbl?bGqCznEg;6$d5Rf#(ivd>${Zob_mXZf@s+qP6}iSH4G@xUqYxJ0^ei zf1jfrzTlnRwWsFr5wogphmy-~|J})mEID?v@WEFL_dmXL=0C+Tt2+wvQ=T{$3hQ*f z7&qE~+Q!DC&W_Dni>c=^?->(Q*ou_qawCc)2yM$(o3aOh`+ddvI1?c43^w#(*K++o*0e7flR z?());5xOTb_6B<9aRP(iJ@)X_ocxbMHgT;Es@o|0sNLNrNUNv`U+0(oVT|RG)~0ha zFYMnS`AvL0Sz;!+G`Y*{zKvKmCbqqBRG9U|z%%NpRTmEVZ8LvAsDqt2wy|aEhB3vP zTD^nMpq~X6{$*;NAOT1K5`Y9C0Z8D@2qYS@`o02=w8IbHiadM{ z|Dbg$^2>ICE^zqC)%c!IWDM(xx>Nb5mCLn@0xI=Frxh=+o%?a?l&edgf39=P(nNyH zzc6XtETx`RdDlU!@~*?aDo>!gL7dqhhd&Ce%FicO<;8ug@&&rX?ePyHI>6(%nzO|Kp`h0!&LR4veUg6=*7q1Jho0O(M54*<~ zT`1FQ^ohHdeb~aPt%JItO!YQ~q$Y^XoVHS@Hw4bfSVq;kR2g*e<~(Iv)=x;0%O-(v z+03;ZKZ!%6^8JgV7UL9CWAkzQYHN2E>m8P59@;X?F5d9nO?ST3YuGs6JJ|Rc`_At- z(k`xXZM~<%H;B2S+3CmKtGjGGH7;ywc9_x4j{BvTRTIyfE#;iCby%Nk_&7i9yV@}+ z8!r{vSDZbZocTv}hg;pQmFkShz0KMCjHiCZnk-pjGv8rCs5U)&8Z#}TYKM_oCc?`& zZrU>Me8cDJ+d5T#d?t7J4e+tJE?AUTr)l9DiGv=Ze(Ay?7wPK`Ysj)6RI<()yqKug z^&KZFvU7@2TI`d<4qnSYet%15`j18~I)*J=Le;go#GZ&zM<^0Q^#2kW1EWR+}-VB-hYd~?br?Pl`?=WVM#m)$q@ z$QLDae^>DJ;`Qy{YOIv$J(oSnc?YX5ogmaYmndz}lbMw!xl_gk?3;P<12B#bxnDl0 x2Pwv+Vecrf`u}Xg|7Z3M{-C`*+erBT#E4c$*c;&gzp|En{{O$dGt2$|{{fj7ys-cP literal 0 HcmV?d00001 diff --git a/dirsrvtests/tests/data/tls/int.crt b/dirsrvtests/tests/data/tls/int.crt new file mode 100644 index 0000000..d74f775 --- /dev/null +++ b/dirsrvtests/tests/data/tls/int.crt @@ -0,0 +1,20 @@ +-----BEGIN CERTIFICATE----- +MIIDTjCCAjagAwIBAgIFALr2ppEwDQYJKoZIhvcNAQELBQAwYDELMAkGA1UEBhMC +QVUxEzARBgNVBAgTClF1ZWVuc2xhbmQxDjAMBgNVBAcTBTM4OWRzMRAwDgYDVQQK +Ewd0ZXN0aW5nMRowGAYDVQQDExF0ZXN0cy5leGFtcGxlLmNvbTAeFw0yMjAyMTEw +MjU1NThaFw00MjAyMTEwMjU1NThaMGAxCzAJBgNVBAYTAkFVMRMwEQYDVQQIEwpR +dWVlbnNsYW5kMQ4wDAYDVQQHEwUzODlkczEQMA4GA1UEChMHdGVzdGluZzEaMBgG +A1UEAxMRaW50ZXIuZXhhbXBsZS5jb20wggEiMA0GCSqGSIb3DQEBAQUAA4IBDwAw +ggEKAoIBAQDwpvfj98afN43Eo4qwgXJ9wioeyGaX9rmXpQs4bO/muaCzZ4ztONFz +m7smxOmTNPKFilwC4f0p0KQB5GJgcy9VD10VWIS0iKckKuGzqrPQP9ROawIvlKe7 +k046XnIHfvJKFaikcQfcipqLfDqU5+SXaLXj1sqnEWqXklUDqd6zSB4Ko3blQ3t6 +hh2axcudpHSwvj/gbdqNkX41pSbET2MJY2/025AksWLx3CukzNMRLe7pLoNZ5Ztb +Id1EsxHK/dFBCtRkke7TAUuai6BfMFj8NTWk8FB3Rd1rx608lo+31/Fq9a7e5jTq +16JZwJQVlkZGCH6iWB8VynhF+Lw8VZMPAgMBAAGjDzANMAsGA1UdDwQEAwICBDAN +BgkqhkiG9w0BAQsFAAOCAQEAejpy+M0NsmN/SSlXDMK6Xly0ef8vxGcF2crjdwtT +BrhEFm0hYmiCrJAmtr67lTdgvxM/lpQ+tOcgPWiE1oPOo+eauf5gj4F6aGYBse8I +QZ1+WmVyJK+/zRXECo9upFsE76hVhPivqEeq6Z/dsHblnESNxoLjdIRp3JBirUL/ +aifYfLH9UrQ+ZU1nCIufQP3w/jUuB1dPQgaiy3gG9/sA5jEd10ZU6QyWVB2B+UH5 +VKkBg4hu0xJmL79zSZIFnrCxZGxaL9S7BrQcEDB186kQ+++0g1CuncMbZajvlTsg +3bAtbZQQMe3hwsoHjo1JKWAUTOB7cqEF6LMpotg0jn+Bvg== +-----END CERTIFICATE----- diff --git a/dirsrvtests/tests/data/tls/key4.db b/dirsrvtests/tests/data/tls/key4.db new file mode 100644 index 0000000000000000000000000000000000000000..ba367ec0871858f40bb653f57b2955eb218b3fa4 GIT binary patch literal 61440 zcmeI52|QG9-~VT^Z)2xKW5^oKFxFJ|5RxTJ$-ZT)EJesj$dawdQrV(x*-BZW5RomR z>^s>>DB*9Wy8HFJduHbKx}WF&{O|vL4s+&v&gWd;^EvP9m~&lct}%KiG#%V+;I^(8 zoGskp;vjku7z~n!!$BYr!_EV?^Y}TD?F1x07j~Y(e~JIAkO3rWs6h)`2T@STfvD$U zHPjEOEvWZW^-{@EW&c-&fb{?YfB--MAOH{m2mk~C0{?Fka3UvXPq@>T@@Wo1tpdJ@ST>aGwS=3nBV~lGBzbnFv!8h+UAnmd8ZxG%)-On6}N2m zGrt+?XGEz;M#jd*4L*h2z{=6W!P>>mP3MAxr-i!>`44mI+R7?MaGZ#>@M{COfCUOE zCMZve%VCJi;bdWHT-7Hv4Yd!}6?+mHV$8q`&2G`!4@e z78DXCs01Zr+x7Ek9NgSIY%cstL-Jdi@9IBgkwysy>w?MIxVgbMTya(QE5WY_@*~yv zYWuaGn2J+SJg%$1A8vu%dF<4ih1kwR92?_SVAr6qtE91u(%405+#+@pX*6~wft^WW zXHwXiGUTq@-EIW8j-n*)XXz8`EB6cU>Tg~aATA+d>2NNgq)5}OKz z#O6XFvB^+KEA0G-z3={TnZ>@xxXfa>%wo9AVz|s=xXfa>%wo9AVz|uWxU%B7vf{Y1 z;<&QnxU!NsRT8I4;#5hTD*0W-WtPNcmcng`qlCg?62l#@7!q4n40pU@IM3p^_2Rho z;<)u_+!{1)4f@9#>|W4F>|W3~GH4tbG%i0HmmiI*2MJtR30zqTTv-Vm)t#J3q!{wgN{`w0xL;Z@OJ69A6>{W$y=K+DOVnaIE#`gfb089V@fB--MAOH{m z2mk~C0ssMk06+jB01yBO{C_}z8nSZ`RA8!|ITSVlf~~_AcAkIfCLHxL$Nam6bhpJ-v9pyf_;H4 z!9M=BA+P`t00;mC00IC3fB--MAOH{m2mk~C0ssMkz`q*-awsGCKF9?6;~7E*Wu)9$ z2KxX1ZWRgm1qc8H00IC3fB--MAOH{m2mk~C0ssMk03HH(`~OcsuvXX<>A7mhGP&DOLwF%epQ7Saj$4hPVIrq}khn9b9e-swR zry_;$sgi>DR0#omDw-dkD$a*bmEMO>mE!#!2inpCg%(5p<22yGD@!Br$(9n__+$x7 zI6m1Dy%(P>VZnt@mPB&?K}Om9qbB#@m6063LBml&S^uN(ZhR_o7d};z9iJ+}hEGMa z;#0+0@TtTYWcXAxDLz%41fMDm#ivR^sG*#c z=PcaZF1lW@rro(fg2`@zc7A~gAOH{m2mk~C0{^Q7VjRF22njPC9pm3^Ff*d*Ks!Hl zbRZBNBV!;9h%snS;O?J4KR4gev77x1Qy>lY%W-xRK*8AmnuE=>^Yo_ybYSd+6?cAf zdN#=E9bMyuxv$yz>rJN~Dz)cN!JJANn}v>7QQxqqbQ+?U=_>H- zUcPaGx38(BhpqFgjB6Q}>bk!bf0pQel?rK!w7pTi5WBcX&M>n=)**3?@())>ZBMsM z_dXI8ydiqsB0(VLm=<-sGvx6ojq$ze40D_Aypc5e^$dgSL4CJ5+9qx9YJEJkya(l! z8=e;L8uamItn@_t3lEZtiwWnfEjiMcSNj)e#!|QyZ(pEE3xb_WQRuw*(A(^(H1hkM*@T#?NI zgTTlDa3Io!j8Z5_EohAvOiGKiKq~%Pq(MrM!1Hsc<7VPi6{uL}mblUalCKB}L&sKM z52AeHg~-*Xc0nNg^bjx{NkvBSJq?5s$w5YG9;61g+4=q=Ard+cXO;at7{jYHnh{C=v27LSX3I zo_I46aem`x+Qk%~qX!e#E{_g7=3aN5yGOwUh7K7WzRhsSr!HzGrbjcHbi^^NYs~Ie zk85hCw9yCil5%gZg6x2~imR1FheYgY#bURNfPS?_MMKdX9thg`1aGS9%i~EE@x;;dM;kn>bQYVnEUu_s zALkim@8WDwCR`78p{X9MXveVj^nSbUmn4{|#cztIz^G%%7Zc-3bOfH#vY<6Kwj>HA zCiQ%)U)!NR@t2Vv_xC)6$QD9PM=;wmpLBo^(=~=}yL6FwWGZT?b4_V_9>3d6RlkNZ zZ8*mKsJNK)+PA{;WMmoq(w>pcS=LYAVlO=#&KQhP9O^c|^W}J~j}-f}dV8PpmMtyr zOHT9-w{)4Fj7n)<)RElV`_hXT48nAS$IBznh07(%CGnS=MPs5nY36;VHE9%{rFa#; ze?l08VPk;{x5K4me}>7%j_I!GGkMe%XawuS6H5eNWDP+v9lv& z6(_gLQ>EY$o{3ZC`fo@ceYpC;+RM{4O3)PTNVla@N#5hEHO~|!dds{btV!g@NFqFuSV0i~~r5Ch5#PfhKhNUL& z;jHK~yF@{4i<8zn7#`&7hrpgXpNL>+Z|ww=vL4AyE5USs7`Q*5AJY>VAMV9TU7f#>#Z$VWbYvt%2$@aFWv{h>=4_ufAQWB9kULH#}9(b;pb_hIlHQe)}&b@6@Qs z3d|LDFsNNW+b%xVW>9TXVtTsqF66*jo^>=&f zz;OsZ{wL?_*|}^59()6CO`U8!E45W;a|so~0l4*xK2|@Ihbx#%|Qus+;bi zmZVKbuQj*~owJjA36H(t<)!taHqyZ?{)k&9Td%7&goZC{Os05Yl$Ag0W=#9ezijKj z%dcHD#N=Bn=R$1&`}n}oGeoDrpu~w&HY+piynUDG^=t%bA!(0b2zaOG3;s{PWCQ*G zf1#BK6a@$X1ONg6f&W$lF%FRb{r*4pTXJ`f>bL&?pFUal*Z%)v=Gy4mt9w!rwdtV+ zm-=~&#T9|m2QfwDKC*AIrRL)oXF&4diE%Y>GcvG+~13YGF?smWc zX3Am|SQ3)OD>Pi3UEFr$owg(7z|E}kSZ&!e7s&mVHCP^GoG0%KO$_sqy*W7+Z~u6- zt1*k=Yh!&1zUM&BePG8T0%s`NY7 z=gGxImUb_l{hU8Afu8P8Gp@eL5~r$4;g@E#m!i&k^XfQ;UVa<-vap)db+OFP zYtWZ`u;?zmHiT9yPc>Jg$)Z)2YHD1naJBsSTNgjwga--!WUTej=tD6H+WK4xa#?X< zO_|b3lH?Nc77FJ6{^cGnq{H-*&vwhWQ--r2h0=7W>}4EKe*IQ2YLPeT>NZF0+smGz z6dL_tO&wy|_{Ucv7qc2g)FpHepGzKll;GMgDnxZ|H8Hu8seW8_rI>I%Xf1a&=mnjL zA07ERM)GW@jo+Q(Ad%kOcD+#OsCJq!$ai`(SM8i2=uP%X+39@hyf#S_>o{S31v^SO^3taPhPp2#m?qp|5H5&^u2awSU33aUFX* zir{UKz|_lSVhWImJ-htwJ+Sh)%YrCmWjyz=$EKCb_{qkkFkuX!(%1Jp7(B%Vcj>;n zfxQhvUgA1-Y|O0XQ?71+$i^dynmcNyU6EyP4a!g=S=HS#bIs!|M%&CMCan! z)SRoolH~(dXv!5T>F6%g3>A1xc{>YX)M%1axm29C%+20XK#M2jaqCo zi7n==k!!3iy?mcDW?ypyNHa2dRZ_Y}mz3$ohUUuA;22ON23)xLw^n(=`A;H560*7QTalHA#qy@;bww`LKb; z;#R{9)7nOp;Cm+;#70$16{ZjopZ8p0c~%kN9a1fn;iOndGL^7-f5Wh3s^`(#dY-=5 z*hhWjyhLaA!?|cF=|yy8#EbKx5SeY(t>@z?$TF(QoNi{(A=YMg%NY5wv`^{E{URx6qdv6@23Z*O#l)j0RQ`D`O_ z)!MH%rLi{NzGQevyXnOH3d6&D`m0lW7n&UXy3U>*tf9}mq*P?QmqV%Ol?NN5&6lFq zA|}n$M1V6r?M(A)($e?EE*0re?N0KK+WKnmbej?z5S`(?NFQp?3(3VMa)<)Ir&3e|NlSzl8x>Evx6>!V1{%>bhNbgG&M9_)LvBY zs18tGrx>L;L>^DJM0SkyKFKzTF|-5zS7 z^0opAoP4g*StmZ*e}Rp680FAmU6Ypb+TV$Wp1aP=K|=(c_LX&tWR0N7aHiaC8+&I3MrUt@)!m9bvo_%m-TE3T zq1Vpkj$A;U!5~=innV*}6167Y*M>kjwnozW2)4dR~z4TNX^ohi?Y4Ac7|E zNu;i|znz0qzurT>#v)^5${I41ctp}HOVON3+Kj#dgJ8jHQk)2r3hGM8+=+b7CU1$< zmr7n=4Ij=0Q|4#0N`*<>@|?I_LC~b+c1zclmro*Pz-r3&4I+E;_Q<`Ms5b8E4b#wk zuV_<^K``SrDNTe)b=oA*YpT`mVUFbW_6CVVk_spe&Cr6oBGEE~cWmWt2%6js%!tta zjPV@d@E^Xe&OtJSIJK+BjpXxb@0-!;4^Mev5KMSYN)cglp;2yM-DI;thQHq<(DsSvC--M!CQD&5BslViU^yK~xj-+cC9tQj_iK%dq`$Ys9Dl!HYCY*_ZCX1o2}-G&62$P(;&mxuL4-?`m1NFb$XY;;+iJJ$K3l1g zz1QE=rCd7{7(G7wN;1Wmpv!|BxyaOG8aXd!k8OOAE|y8AiNsvJ&s~R5;rj^99@)n5 z(BO3`i6p|Mf&sTz_?wXiGy5TK`a2B#kLb9MIu^Rz_1&M-+13=uMbKr@){PN#pmR% zo7|1}kyN*bJoVPA@B38~bQ!t*KAr2d(j@1jt~cmnuN%i`_!`3LWhd`V_UYZEkiZEdeR_-m5}*ppHw zD|%8*$qtfeAfVA3I2c?po9yu(gCNIil9<+SpIq9l7!6v!=O*ns;fqVkt&*K83FM0M z)?QaEw{}f_A!w4FYOqGCti2#l}sXk-dqA!u@TS1{kHo=S%s<)=Ck;FVXl z^HHzmR6QS))36wFDCS#Y5Ttlb67zz-$-6uh)I0u7X*+m*FPHbOu$6}8jmL;O-vGC} zH5KwB1WooXJA)qtR}~3BF*IHeVw7fTK14ox^_7i{sM?%BfZ@_Y33Plt09pY6YSv11FKsN zeJO=LgW&^{a-suRe-5+v!I0-4?mEuhh#5~-u$4cun{DnI5)(jZgjT5QiR*Jc)GT&& zH2UWgi=uChaV-`qz3vPLbsA~joTU7d&)-MN9rQI3bWiB$Y0uKs?)3jJQN5+wPZ>lpL~)QjZm0jRNt#6pAu)wM zg)%~%z^``t|K9%%-G3hB?-#tkeLRh*hS+j|r+Pu`nSjG*{P>lquC3|08HzlY7P`N* zpC*OX%Hr0a0R7hgUz~!IA2)j5B{gvAye^4q zh)sx&KG=85NKsML+hnAQt9gi<-EmLb+{%?1`(wUCzI_B;?prRQo|b>vIRajM6Onf| zlsYAa&P1l<1b68%w4*Ov2g4(Z*CkO6u}P3>s7-}J{o&8SFGMFt7JI(kZ3b2*cDF zh2VmNrJMrR(WinmxxZp~_TzU+O!I-N;?^Udb8UN@@B0s5p*64EAJ#HyOW*o0@B43+daSM9%D*6TmvJ3GN3YL`$z9yk)%o}Pq32>IUck6ujj*gLbw>=-P)%TJ*IxR)eWM@*eyi1M-ugkpKP0rx) z#VEM%`qVuRn?*&8=|#qT77RiNuSsG^HnKESX-3Ifpl9X%b8_T$-Y7(F8=_Z&-P&Ii zc)rI75j1&>XL`^5&bH0nJt1umBRc%0m%m)jJ39L+BQ7lR<&E|olY)3n64MZ~CA}Uf zYbP82&EGCIBrIt|cfBP><&#prsZC}RArW8dCf*-F*Vj5x$0(VT?3}xDF zATGKFFWc<2m8}bWnT?Vxjb6_kC6j$h&}6r?L>KLOr59t6Y#AHbN z(?FINZ!2LCe0WU~(-5O_7HsEGncE$5jGO5WMc)I3&!|LZIXDBPKknW;RZVk(CM7%1 zuDeI*iq{@^^5VjTqfUDF7W2sCPYFFQTHtWbxFHN;A6}EhG{i_YJD=AZf7?A=e4yqV zuOfZ*YV?i9nXB)Xm+EWC*LX7snp9E9!HgSioHf0itI=+=>B9BpWo`(jUU(_me!yV= z$`l5{i`OJE4Y4L>wOm~~vyB6e!u>Z6E~W8S-BWq+eh4B6Y3YMQ!RiD}hDIHG<``=hL?A zK^PtcewV~lco2=l?L`vuy66|VWWp-)ejD7GB6@??mNCfgvtRlb?htf&rPs?u+pWY| zW7m=h$&txcPRJT7EG~#C|2egIvMr_v!^4f&B~iB_3z%bB6zQyKC#R|;5~bC-={!yM zZ4LHWj!3;?V|!KPN6_Uxnwc*aUHi^>ixg-Jo7%B7UL9LG7OQsW@T{S(InC-b3=bTy zOQLQ=R@0NZF}#XQKY5#TqgAA|hT4Gj{LoBFrpE*8(aUq(kpx|)$HcU}XxD02fpJ1n z;b{rFYYnFY~SxShc8(#fGu{cs{x9C&NDu_xY`jesyDPfunKEsnu!Q8g7Id z$?={{kX{p1as52d0XbkLhvDJG>yoI}?{3c4iKE}h_SHPuRnspd?ffX|IB8-FXnffx z^y(WYCINyhKi2y9Rk}*luq==~6uH`I7+=v^=4;UyB$Du6&c57}3B$AJPcHxR?+S=` zC5p3nt=``{S;ZS6x3F$~LX+d1J;xh9P)$yKK3pfS%blRhuo?OF`sn;I6gAtLsGVH2 zs)Vg>UEXD0z6&ab`qy1bF+3djT@uq~LtnWfHb>FFE7`{WGosX8k zl+VwbGJhDqaXW2!+|sx{7~wra)*muk%eHCvZcSG7m?J@xZ(D7&_+mfwaxENA3gU-hh%vIW;TvW~>@DfMb@MlKm?uA8P@ zI=Ntq(2U%o=(Z2y9|Q|;ZaGhiNwgI289VJcnOY<%J-q@wQ%pMy1Kq%=2y4Vb4$Xs7kTd?-W$=zam-8-4qEaCS!yab@0&WBT-`36@O3*XTO zGznj#t5Z}9P3K;{A63)6D@QBeHn#+Bo}N&XCE=k86UX7V4-p_H|_s9>tL z--(Sh^$2;CYi$Cba6AYVS0*%ds%gNN;no^W^Cpqw|6iRGQ2$zQdiV1!ievWh%J$jk z8Ht*h^|J`2BU`7$^8a|Z?Duyye2;Xf)v6UuzhMh6Kl1@Q{$E8hzq_1dZ`mu&rBMU6 zYM`*%F*~@)kIr~n)3cHeQ&4j z!LL9n>a5728s^2REl=C4g*zX^$kZ@7PxbEElKQE!(ca=Ppi{4#KY$%A)ql zSSa_aJYrDXD9cF;FEJ_6IFYF{l3fp=8RPqj?pxO!Laykx=nX~Fr%{<3hCL3gZ8;rZ_TBcgxpX}HPc7iz>tRkhbH4i4s4w| zeVKk`>JFPrMwPHz5|`2dPgRH`)zE&E5aBY4`z34J3UaV)=-ZA z_+ke`JtRApQEOI!YeZ|x&kVCJLgPP^PP#UB62_1flRR}nG$vu;c?sm(=4qF7I*X|0 z*rAFL%6x}?vRkel8`M>)tyeh{=Xc$pHu%DOy|wm}%~n6n@V-~tu&Pdhbq9+B=A?pI z*GLw3&oyYa^PJtxJrPW<$&i?RmpNtnDeq&-J$5<|#`VY0h}f2;7xvJEAhx%qimQNb2_U2U9JL*40|< zAcK_y#boHVhC`gW6z(KVD(jaAA!}smBqyvF%GpB>23*`lYh7U+nE?i6-b<>|$;$oE zSJo(0+3K*Y)5Wyc+zeX8n<)A&SmuZl`(Dl>7ZOSy4(luDKL)u30P0sqjJJ?VD8?Mkc8@gwF=b-0Ls+81MFmMW5YLTiwBs_b#{7kbR52 zW8GJm%|4BEck9#YOV@jza0~OFTS6J!m&i_kPc1qd!FNFLLz+!QnQTe%lls8^nYCA6 zY3C_eztWyyZi~FObY99IVsbeH(WX~=<{j*rqD+eS(T+mF-1O&;7)1_0qvSK&K}+XD zTQ<)69|b=)==IQJ?S<>zzEbeDH(Z3|e#8DmYbhbcC5juwU^rd*iZ@=`|13$U;PPE9 z7QJz(l0ev$px9ke*F)oMhmKJD+-Lp(@&uI1#1b zIsd}sqR*f_m0s7^$mS=VVQ-^~?+vF2Jlj328Ob$eI&OT^v?Vm6jXNv<5v$bs(^l2h zM(=sH1U2QJsW^=4L80U-!CxyEG_80 z?vjel?89I>y5f7M6AneS_axDBJ(fpts%z%q#{d7PU$U|Ne-c`I5X^+GgpO_}049I{ zKmZ^B5C8}O1ONg60e}EN03ZMm_?HM^c+h`)U;Xc2#zDke)}7ZaHnXeyIsNZEPP&~M z9@ns@O=Fu-S0YKaeVx|Ca^p{(^4~^MaEx|;<47Qp*b`z~suT8VGDttF!dN%iPeJJX z*Mpn!OBfz;{4R+Zhcv`Pp{g)|D(gL;G1{M9S$}fxn-lA8Ue^MX58ZxWlW>=y%O_+- z(SGE5(g!duSH>NVth|>txKjA!y6Qu?QBUu9c{7Gb46jR~#vz5(vMAr<=e(sCln?>u%|d%f3^BPLeF9-#r{arc<*;MF|| z9>DOR@VX>w98yU5BGeTb0vrIfvJpD9rP?u zqbu0Q;Djvc(=H$ak~>`>AP0<;2=XsG5DAQxh{Q7TG0=_x3={-Yb&i^T*w>lyX63m& z5hdA}1*5E&K1{evzWPfP@1&RpJhVK0M=-=8!+WxmOV2qqlA*n`vVJ6 zb|U0t^)&J%8Yx(T_$u&uleW{MCl4)tu#eE{1PA#6tpuU8?~}R^($<&?j`b zE2F2nZ_i{a3g@2%Yp?4r-kBqMy%St}0HttJ)9=`}AT|Z6;1fA5#;vEm+T2BoG& z3U^5l@dqw364>+Kd&$^*IoUcE!z153Bmp?bc4H|^s>*L(`A6KBYMOMs-m}E!b3HGy zz!-QJS@|%BdAiW*v=BG*(>;0UN=?@?pY=e?FbkooGGK*$PL!;c`LU>+YEp6~;%u1| zt{(J&xS}X$;!M#={km|UI7MUrVW}r$Tyr(2NKDBFtLkW)C$D21BaWx{>6tNV-Ap{C z<>{iPd!G@dRMvI4=%S9nZzBGc1ErhZa05sX&h*aRSLthc@_y-Ns%ftn&!Xq~Aj@3J zS}}!ve%<=vUR~cEf~Cr)PPjaxFUs~BGR|eT$*y6Q$03tSP9m7tEoLOsh{#4Y*~U}G zDYIQO;*Nv_GxQlIR=Bf)uv@tKVnHSemx|V8~ zSLKD^+)3#J0eMV z6btVgP2I^$)~H+t`LjBzoYECbEx!Ku&>Y_5WMT~v{71OUHB?L&Gp*r zJ$<;Ovd;pS?>KCn&VK}~<@r#GzDxaFkwMzXv3Fxq9GzEa?jbkk!Yxtt|9tz9aj3NV76s2M}N3LgOyL=o=4&aFuD3`0}jH(m+-}F zC=NGO ztAUf#o%~H7@sbx}YHB~Gyp}BoBa#D$GK7e~hu3q5ZhFUR ztw?*G>N-4!ZEqyuzf78IGeC%qihgMR{`J9d-)j=pPO&`etvRuCU1AgL9?>h}2@ca# zL)g7|$yu{kr4>TWKY=zD@xm6Z5+p6&!ALpSROU9FF{IS(U2Kw9|7B{tr4j4_!Djgq z&_KJ|E*1x_kG%HH{ln>fjjqSEAjPGWaA&k?*PWN&9rrs7xG^!Al-+W-!KymC+(H>n z7f1IZe{rhhS-A&TjB;@Kw3tN-z1tnW*m$$(BZqY|N0NlE6}(CZDaQ{ti9Pxs=>5%8 z5*jfoHcEgGzz*R3C))tL06sA8|2)%*(Sd0gNq$qSH@xTyp|7^>VsTZf37Itk{4@R= z+C&9^7!Cpg{#yh7e~g}N${m{9JNDyRG(x3n(YpQ3XsZ8<(I7At0VFRClK(T|N9eTk zXr@Vc!Jh^~MzzDjpxh`V!KO|(cNkW#nDu zBz(VYrCvsdZxZ0=WA%X7;tGI~h&fCg8La_>^Rp7uSQTh}|kvA72|l9C!Ocn~7kog#xa z@nGE0?wWlqR-5nuW^Fb6R=JPmnV`2MuLweieJzRE+$c~lNH(p9 zXMGO`0XW-H($mlyDzW=i``N@@x;s5bj%9ao^n3dX<;qXA<9ZDTm(CyCp5A|O%Ft-f z=zy^eOY$+}Wl|^O0zHhB8O*_(aepEx;cbTB>~{JlbS-10TGw?ZLhTs6vFN}P@vRp

OjB{yb)j8`LbjrcHgp zOk)vkzy~pZ5+=rwvBvL}WeGfSQa4swu(eaasB%%;Jv#g#=g)UVgic=lmN-W*C4l#@ z*?HvZmtJ|O$D#CaXj;-n4M0!-ySaMEL3{F=@DXVeIVSgluJC;ZQS*mR@fAf%S2vip zGyXcoRmZVmEG&<9u%k{S9i9FBR^+l$J+&X8!~H>h|QQaoZAL!dNb?eFjPip}*zJjc;T z7xMkwcQqh#k)x~1ZL*M{olv+rx*U2VK2F@;mOLXu_eFF6;I4GG12y3V5N6{-_|(~ zLpXs+Zz2`u9s7bQZ%rL-)HspJK{ZEsM5(sQic7%uWZVk3l%!~-`0_}8dOpm_YBYJ5 zUz&5k*g|Rd)gx+V;p#w3>$oDOTv~;-k6&D8_lu$*v0yW=RVIZxR2uomv{@ z({DmRmQjLJq)z>@hf?b%$IP3ca)~b?ovp9HpXMn9_ieop9I|%>ybOMrg3+sfDm$fy zw`;tisp&KxEm$J763OxAx5OyuZC}L6d(B9z-82f_q2K5Q6{MON%?~MAyR5C3Ld-W} z*hfchT(=#k#I9?>wf0KEE#Y0K&WMDdzp^bLwr_f+Q<@ny z9zA0;kbSIpUBFLwn&))R*Z81H>{U^TeJJ+li+I_@7TaTZM!irQIbL;WZX#_bdG3+I zw=|HmLx&k~jHYjkK{-$IyNZk`HC^eX<8)X-*EPrbsXowSKzFkyiDJ+2j_``@1P%XB zP$r+-KoPNykq;B?2>zPb0)k*Ds$4d*GvTFllWvX;TIOkLWMV2m($UoAFqgPL_)&>9 z#I};9OzZLURf9Omy29pKMqRksJyj!ff~J~#x7IeYN0O51&O2MkBfsX1UoBghTc&vV zkWqRGaxXz$ZgD_6vFBVrI6vKyeN&s%B0XJfMJ{IsaEAaac7ZLCNP?JspQ*wEt)yc0%ruZJG>ptn>+weuj}K_oQ#`kbrF<6@8umUWNM-uk z%B+lo33EkRmV?;<2N%mDJrDa1a>YH&N1f57I)-6U(-+)P>TZYGY{U4gr55jrY9@0L+xa-}#lNfxvv8@^mif&oZ-j*4T<7G82 zlWrLZy|f-+BrUH}R-Z?L8O0A%%>x*?s?uKK;Sud@8T~ssFon6MZ^CMmdP05Z!pdY+ z)qA{;30tw9ZgH*ZRM^2guNL;rVn8H(iD-Njd7A+q=6*ce(6QRU*CpkjvGFVJLBQyT zND7Ae@AU!jLw0G2TYScRXuDAsa&1drza@A#fMit^*mG*X$<(0Ip3eD}U?bPAzDW`a z4yzh^1rlLnFx;{fErrpNdGGdhg<}c1*)VGzDz-M>1$F(3fhyf~RW+n1som~M^H7%M zARmg=w{lmOTQv7&65CpaS%U%yPq9&ajp&o@XNNx_qV#2XoB_Dh9VygGe>CzXR6Z>| zI7sEL`_$0OX&2VW)vsH0K}Y>9c#oAv)ncGhI?;K^3*8Je-1BF_L#~s6F(c`j>}Yde zX7;HYRsK;~OQle^lGMbmlFy7cHtu+PY4axl-Zg?5v&Rij!w^`3v*$t9fane>Po=z1YB#c1zpJtm26RNuE#_5#D+HOvdZ)8XNj{Ky{M(t?76EFDa$x| zgnrF!)b_-tJjY}pwWnrYtDr4vC6D7vQe*HRaC1-DYXef<4o5 z-DlR@966JYK5)z4y+Q?d!!A*QoQjGKhUgK(vM)Kf zo*EB%6|utC031zJe%MI$wzebOx1Sob=0;HG%-fTb@!q|(FhJMBX2)xlA+l?Wa(3$4AIx){*?}$NHx0eC^r!2 z&#oax=y=63v`7drkEgSW#jmWrmHy~WsB!bOIYg8MOqHHlN?erd@JtR=@>`e+@$p|1 z`+sMcI!p{k@$Ur)2uKJ3b9^8YZ2vgKl7jN~X(J2t|ELzfqvdh=?uAs2Cp)^XZ3#?8 Z2rPa0y?|b%IDa}Y=jMmXKWBCT;2%`kZ +-----BEGIN PRIVATE KEY----- +MIIEvAIBADANBgkqhkiG9w0BAQEFAASCBKYwggSiAgEAAoIBAQDkuu9vlgbTmuXo +ScAhEh45OB6UXR5QrAEIiqSsspHQ2TPZKbGU/m3nMjXOsxzkXHbnobI2OOr0A16z +DeEPAI6BRStKkwNA3mZXlnkDGtHAZ6rfWwpCCc8Ark2PMzR5S4gOYmFIcArw0DXo +J40qJy4kptfXmq8csu+XrZJRQ3ohIo/inRzejChZaCf9C+Lt3JDQh07p5xoyH3TV +E/diIJ8beSgDdm0+oW0m3/TwsdQNgssreA7FQkiKoyH2pZDD1xmNjNGuRrzGsoaX +9cr0T9ubgBEGcJ/moaYnPqsJOmF3QV1ILGwXBaE/B5BvxJkmK5cAMZa6Kuhby959 +EZX3DM39AgMBAAECggEABFzUaEpyQuL3c6DEe1z/GpRJcQb9pwhA1MrgLTMSuOsL +pB65dmAL9Jbuk8yyxmBFHFHnNkWLpa/SxJOFMWYPUcPh+YAoVbpoNU93a2m9im/v +wGbaITxSqG6qqAqP+6hHJg8WT+1jKAiwnobymFU6+hP8le4rXN7E1x3GZqpkz/Dx +k3eNkQbuykqKH046iDgZUqSHw4hriieJc3RiVnaThTnRd1qeKnAIUZ/nhSas5nQa +d/sd6dWCycOb7tvw1Vcm6zTu+uTA8ai1uW6wU7N/vYa4jxZYYsYZYgHQxoK+RSzH +glWD1n0u9VTUVDqISd+BocHwuRunOvOlZlMonTqhAQKBgQD+moK+5AyQjd3RJnVi +xv/MYmw7nT+zXufZJ+bgPGJE8mEgCWAMy/8ysDZX77P4ZZuDVXb1mxVetxVo1n6d +0Ggl/UP/rAqMb3WW86mJfDfGorL6fHg8GzWnQhNWDe6MmGOAPZOJN6UcMWSZshzj +yuey1cDQpuBe8j0UXwYA+ys84QKBgQDl/BiEXLM9brLOEEUF/9I2JaL6teIqFivP +fhscTHfng1dgnrq5hkD/jSUT+zeZ43/fXuVEYzpNgfY7sS0n77//xzN7nljk4809 +2KReeIoQqnkaQ75Vo5dhlkfX/J+jvD7MbeuXGIMKEV3PnLXwQYCdC87iH4CI4PtC +9I+wwd94nQKBgGgsUjjG2HlBArRz9u2+nKVE1CIkOg8rUtPgZq/zJQYu4hyYmWtD +AJz9yo56bnnBITtAedcOaFUDtkfaE56AykxY7zyqaPqDFGr6MbEmWS/2HCMvUIbP +X0mbWIwKUUPHilbLWxV25iC9+PqGDRoLSHg8y5LT5NQUa3dtVeiK3GshAoGAKa7F +Ksg6XDoCAkMEn4+8I8Ayh8oLUaFvE05Bz6E0Yit13LcoFJP2l9qXC8YOT7/h3zQt +zXVGjeGuJSd5jbFwVQVfmVobtnBrNHhdYhnqvBaJmG8Kwi7CMxevsb/Bl0V5BEgv +2NTCe0KmhAhdGUxl6RDI0EbxXt2X7IyytlCNFikCgYBysxlYfApfVJKwdNevnV1V +CI1gGJpIJNZnlX2At7Db2llClxPTQBFRh820k0o+Vaj95VEGDWci/nIUq5odvIzQ +GjVDHSEPsp699J31dreJYZN6mJR9YOI5f5Hak8TP4mlwJWQr+edBofOql6lUkaQJ +8muEOzjKY0ty08BdBhC+lQ== +-----END PRIVATE KEY----- diff --git a/dirsrvtests/tests/data/tls/tls_import_key_chain.pem b/dirsrvtests/tests/data/tls/tls_import_key_chain.pem new file mode 100644 index 0000000..865c1c5 --- /dev/null +++ b/dirsrvtests/tests/data/tls/tls_import_key_chain.pem @@ -0,0 +1,53 @@ +-----BEGIN CERTIFICATE----- +MIIDXTCCAkWgAwIBAgIFALr2pxswDQYJKoZIhvcNAQELBQAwYDELMAkGA1UEBhMC +QVUxEzARBgNVBAgTClF1ZWVuc2xhbmQxDjAMBgNVBAcTBTM4OWRzMRAwDgYDVQQK +Ewd0ZXN0aW5nMRowGAYDVQQDExFpbnRlci5leGFtcGxlLmNvbTAeFw0yMjAyMTEw +MjU3MTFaFw00MjAyMTEwMjU3MTFaMF8xCzAJBgNVBAYTAkFVMRMwEQYDVQQIEwpR +dWVlbnNsYW5kMQ4wDAYDVQQHEwUzODlkczEQMA4GA1UEChMHdGVzdGluZzEZMBcG +A1UEAxMQbGVhZi5leGFtcGxlLmNvbTCCASIwDQYJKoZIhvcNAQEBBQADggEPADCC +AQoCggEBAOS672+WBtOa5ehJwCESHjk4HpRdHlCsAQiKpKyykdDZM9kpsZT+becy +Nc6zHORcduehsjY46vQDXrMN4Q8AjoFFK0qTA0DeZleWeQMa0cBnqt9bCkIJzwCu +TY8zNHlLiA5iYUhwCvDQNegnjSonLiSm19earxyy75etklFDeiEij+KdHN6MKFlo +J/0L4u3ckNCHTunnGjIfdNUT92Ignxt5KAN2bT6hbSbf9PCx1A2Cyyt4DsVCSIqj +IfalkMPXGY2M0a5GvMayhpf1yvRP25uAEQZwn+ahpic+qwk6YXdBXUgsbBcFoT8H +kG/EmSYrlwAxlroq6FvL3n0RlfcMzf0CAwEAAaMfMB0wGwYDVR0RBBQwEoIQbGVh +Zi5leGFtcGxlLmNvbTANBgkqhkiG9w0BAQsFAAOCAQEA27IS76HxwAnJH/8tEPjD +DnJw9zsmkHX6skhVfFYlkpfukl0Lm0DGmfeeqYfTBU1g2x5NTxeUBip104gES0iX +eq7Yr+7pdvnV6pB42EAeWRDN9DGDpTL/9/aO8Vm+O28SdILYjuGqXnoPbuUgYLPO +nO/8REbQp7jk6kwje1eJ81JyYINXCwzEEpq0ycwaU6aIcCP3BY5c9PV5DStN+ddV +esI2SkVABd8b0zmh+aw1zzACpUnBgNX60jfbPIr+UqCwlW8LMKmHuL9NkN/mLEyV +hH3v8CpSpTWB+cOntmuK7sESgO8c/u/6ohYPyrEsNBTJgmXeHO8rsYNQAiRkpkvQ +PA== +-----END CERTIFICATE----- +Bag Attributes + friendlyName: testcrt + localKeyID: 19 09 FA 11 4A B4 71 C0 7C 17 AE 64 C0 1C 6C 3F AF 7C D7 5A +Key Attributes: +-----BEGIN PRIVATE KEY----- +MIIEvAIBADANBgkqhkiG9w0BAQEFAASCBKYwggSiAgEAAoIBAQDkuu9vlgbTmuXo +ScAhEh45OB6UXR5QrAEIiqSsspHQ2TPZKbGU/m3nMjXOsxzkXHbnobI2OOr0A16z +DeEPAI6BRStKkwNA3mZXlnkDGtHAZ6rfWwpCCc8Ark2PMzR5S4gOYmFIcArw0DXo +J40qJy4kptfXmq8csu+XrZJRQ3ohIo/inRzejChZaCf9C+Lt3JDQh07p5xoyH3TV +E/diIJ8beSgDdm0+oW0m3/TwsdQNgssreA7FQkiKoyH2pZDD1xmNjNGuRrzGsoaX +9cr0T9ubgBEGcJ/moaYnPqsJOmF3QV1ILGwXBaE/B5BvxJkmK5cAMZa6Kuhby959 +EZX3DM39AgMBAAECggEABFzUaEpyQuL3c6DEe1z/GpRJcQb9pwhA1MrgLTMSuOsL +pB65dmAL9Jbuk8yyxmBFHFHnNkWLpa/SxJOFMWYPUcPh+YAoVbpoNU93a2m9im/v +wGbaITxSqG6qqAqP+6hHJg8WT+1jKAiwnobymFU6+hP8le4rXN7E1x3GZqpkz/Dx +k3eNkQbuykqKH046iDgZUqSHw4hriieJc3RiVnaThTnRd1qeKnAIUZ/nhSas5nQa +d/sd6dWCycOb7tvw1Vcm6zTu+uTA8ai1uW6wU7N/vYa4jxZYYsYZYgHQxoK+RSzH +glWD1n0u9VTUVDqISd+BocHwuRunOvOlZlMonTqhAQKBgQD+moK+5AyQjd3RJnVi +xv/MYmw7nT+zXufZJ+bgPGJE8mEgCWAMy/8ysDZX77P4ZZuDVXb1mxVetxVo1n6d +0Ggl/UP/rAqMb3WW86mJfDfGorL6fHg8GzWnQhNWDe6MmGOAPZOJN6UcMWSZshzj +yuey1cDQpuBe8j0UXwYA+ys84QKBgQDl/BiEXLM9brLOEEUF/9I2JaL6teIqFivP +fhscTHfng1dgnrq5hkD/jSUT+zeZ43/fXuVEYzpNgfY7sS0n77//xzN7nljk4809 +2KReeIoQqnkaQ75Vo5dhlkfX/J+jvD7MbeuXGIMKEV3PnLXwQYCdC87iH4CI4PtC +9I+wwd94nQKBgGgsUjjG2HlBArRz9u2+nKVE1CIkOg8rUtPgZq/zJQYu4hyYmWtD +AJz9yo56bnnBITtAedcOaFUDtkfaE56AykxY7zyqaPqDFGr6MbEmWS/2HCMvUIbP +X0mbWIwKUUPHilbLWxV25iC9+PqGDRoLSHg8y5LT5NQUa3dtVeiK3GshAoGAKa7F +Ksg6XDoCAkMEn4+8I8Ayh8oLUaFvE05Bz6E0Yit13LcoFJP2l9qXC8YOT7/h3zQt +zXVGjeGuJSd5jbFwVQVfmVobtnBrNHhdYhnqvBaJmG8Kwi7CMxevsb/Bl0V5BEgv +2NTCe0KmhAhdGUxl6RDI0EbxXt2X7IyytlCNFikCgYBysxlYfApfVJKwdNevnV1V +CI1gGJpIJNZnlX2At7Db2llClxPTQBFRh820k0o+Vaj95VEGDWci/nIUq5odvIzQ +GjVDHSEPsp699J31dreJYZN6mJR9YOI5f5Hak8TP4mlwJWQr+edBofOql6lUkaQJ +8muEOzjKY0ty08BdBhC+lQ== +-----END PRIVATE KEY----- diff --git a/dirsrvtests/tests/longduration/automembers_long_test.py b/dirsrvtests/tests/longduration/automembers_long_test.py new file mode 100644 index 0000000..f35e89d --- /dev/null +++ b/dirsrvtests/tests/longduration/automembers_long_test.py @@ -0,0 +1,728 @@ +# --- BEGIN COPYRIGHT BLOCK --- +# Copyright (C) 2019 Red Hat, Inc. +# All rights reserved. +# +# License: GPL (version 3 or any later version). +# See LICENSE for details. +# --- END COPYRIGHT BLOCK --- + +""" +Will do stress testing of automember plugin +""" + +import os +import pytest + +from lib389.tasks import DEFAULT_SUFFIX +from lib389.topologies import topology_m4 as topo_m4 +from lib389.idm.nscontainer import nsContainers, nsContainer +from lib389.idm.organizationalunit import OrganizationalUnits +from lib389.idm.domain import Domain +from lib389.idm.posixgroup import PosixGroups +from lib389.plugins import AutoMembershipPlugin, AutoMembershipDefinitions, \ + MemberOfPlugin, AutoMembershipRegexRules +from lib389.backend import Backends +from lib389.config import Config +from lib389.replica import ReplicationManager +from lib389.tasks import AutomemberRebuildMembershipTask +from lib389.idm.group import Groups, Group, nsAdminGroups, nsAdminGroup + + +SUBSUFFIX = f'dc=SubSuffix,{DEFAULT_SUFFIX}' +REPMANDN = "cn=ReplManager" +REPMANSFX = "dc=replmangr,dc=com" +CACHE_SIZE = '-1' +CACHEMEM_SIZE = '10485760' + + +pytestmark = pytest.mark.tier3 + + +@pytest.fixture(scope="module") +def _create_entries(topo_m4): + """ + Will act as module .Will set up required user/entries for the test cases. + """ + for instance in [topo_m4.ms['supplier1'], topo_m4.ms['supplier2'], + topo_m4.ms['supplier3'], topo_m4.ms['supplier4']]: + assert instance.status() + + for org in ['autouserGroups', 'Employees', 'TaskEmployees']: + OrganizationalUnits(topo_m4.ms['supplier1'], DEFAULT_SUFFIX).create(properties={'ou': org}) + + Backends(topo_m4.ms['supplier1']).create(properties={ + 'cn': 'SubAutoMembers', + 'nsslapd-suffix': SUBSUFFIX, + 'nsslapd-CACHE_SIZE': CACHE_SIZE, + 'nsslapd-CACHEMEM_SIZE': CACHEMEM_SIZE + }) + + Domain(topo_m4.ms['supplier1'], SUBSUFFIX).create(properties={ + 'dc': SUBSUFFIX.split('=')[1].split(',')[0], + 'aci': [ + f'(targetattr="userPassword")(version 3.0;aci "Replication Manager Access";' + f'allow (write,compare) userdn="ldap:///{REPMANDN},cn=config";)', + f'(target ="ldap:///{SUBSUFFIX}")(targetattr !="cn||sn||uid")(version 3.0;' + f'acl "Group Permission";allow (write)(groupdn = "ldap:///cn=GroupMgr,{SUBSUFFIX}");)', + f'(target ="ldap:///{SUBSUFFIX}")(targetattr !="userPassword")(version 3.0;' + f'acl "Anonym-read access"; allow (read,search,compare) (userdn="ldap:///anyone");)'] + }) + + for suff, grp in [(DEFAULT_SUFFIX, 'SubDef1'), + (DEFAULT_SUFFIX, 'SubDef2'), + (DEFAULT_SUFFIX, 'SubDef3'), + (DEFAULT_SUFFIX, 'SubDef4'), + (DEFAULT_SUFFIX, 'SubDef5'), + (DEFAULT_SUFFIX, 'Employees'), + (DEFAULT_SUFFIX, 'NewEmployees'), + (DEFAULT_SUFFIX, 'testuserGroups'), + (SUBSUFFIX, 'subsuffGroups'), + (SUBSUFFIX, 'Employees'), + (DEFAULT_SUFFIX, 'autoMembersPlugin'), + (DEFAULT_SUFFIX, 'replsubGroups'), + ("cn=replsubGroups,{}".format(DEFAULT_SUFFIX), 'Managers'), + ("cn=replsubGroups,{}".format(DEFAULT_SUFFIX), 'Contractors'), + ("cn=replsubGroups,{}".format(DEFAULT_SUFFIX), 'Interns'), + ("cn=replsubGroups,{}".format(DEFAULT_SUFFIX), 'Visitors'), + ("ou=autouserGroups,{}".format(DEFAULT_SUFFIX), 'SuffDef1'), + ("ou=autouserGroups,{}".format(DEFAULT_SUFFIX), 'SuffDef2'), + ("ou=autouserGroups,{}".format(DEFAULT_SUFFIX), 'SuffDef3'), + ("ou=autouserGroups,{}".format(DEFAULT_SUFFIX), 'SuffDef4'), + ("ou=autouserGroups,{}".format(DEFAULT_SUFFIX), 'SuffDef5'), + ("ou=autouserGroups,{}".format(DEFAULT_SUFFIX), 'Contractors'), + ("ou=autouserGroups,{}".format(DEFAULT_SUFFIX), 'Managers'), + ("CN=testuserGroups,{}".format(DEFAULT_SUFFIX), 'TestDef1'), + ("CN=testuserGroups,{}".format(DEFAULT_SUFFIX), 'TestDef2'), + ("CN=testuserGroups,{}".format(DEFAULT_SUFFIX), 'TestDef3'), + ("CN=testuserGroups,{}".format(DEFAULT_SUFFIX), 'TestDef4'), + ("CN=testuserGroups,{}".format(DEFAULT_SUFFIX), 'TestDef5')]: + Groups(topo_m4.ms['supplier1'], suff, rdn=None).create(properties={'cn': grp}) + + for suff, grp, gid in [(SUBSUFFIX, 'SubDef1', '111'), + (SUBSUFFIX, 'SubDef2', '222'), + (SUBSUFFIX, 'SubDef3', '333'), + (SUBSUFFIX, 'SubDef4', '444'), + (SUBSUFFIX, 'SubDef5', '555'), + ('cn=subsuffGroups,{}'.format(SUBSUFFIX), 'Managers', '666'), + ('cn=subsuffGroups,{}'.format(SUBSUFFIX), 'Contractors', '999')]: + PosixGroups(topo_m4.ms['supplier1'], suff, rdn=None).create(properties={ + 'cn': grp, + 'gidNumber': gid}) + + for supplier in [topo_m4.ms['supplier1'], topo_m4.ms['supplier2'], + topo_m4.ms['supplier3'], topo_m4.ms['supplier4']]: + AutoMembershipPlugin(supplier).add("nsslapd-pluginConfigArea", + "cn=autoMembersPlugin,{}".format(DEFAULT_SUFFIX)) + MemberOfPlugin(supplier).enable() + + automembers = AutoMembershipDefinitions(topo_m4.ms['supplier1'], + f'cn=autoMembersPlugin,{DEFAULT_SUFFIX}') + automember1 = automembers.create(properties={ + 'cn': 'replsubGroups', + 'autoMemberScope': f'ou=Employees,{DEFAULT_SUFFIX}', + 'autoMemberFilter': "objectclass=posixAccount", + 'autoMemberDefaultGroup': [f'cn=SubDef1,{DEFAULT_SUFFIX}', + f'cn=SubDef2,{DEFAULT_SUFFIX}', + f'cn=SubDef3,{DEFAULT_SUFFIX}', + f'cn=SubDef4,{DEFAULT_SUFFIX}', + f'cn=SubDef5,{DEFAULT_SUFFIX}'], + 'autoMemberGroupingAttr': 'member:dn' + }) + + automembers = AutoMembershipRegexRules(topo_m4.ms['supplier1'], automember1.dn) + automembers.create(properties={ + 'cn': 'Managers', + 'description': f'Group placement for Managers', + 'autoMemberTargetGroup': [f'cn=Managers,cn=replsubGroups,{DEFAULT_SUFFIX}'], + 'autoMemberInclusiveRegex': ['uidNumber=^5..5$', 'gidNumber=^[1-4]..3$', + 'nsAdminGroupName=^Manager$|^Supervisor$'], + "autoMemberExclusiveRegex": ['uidNumber=^999$', + 'gidNumber=^[6-8].0$', + 'nsAdminGroupName=^Junior$'], + }) + automembers.create(properties={ + 'cn': 'Contractors', + 'description': f'Group placement for Contractors', + 'autoMemberTargetGroup': [f'cn=Contractors,cn=replsubGroups,{DEFAULT_SUFFIX}'], + 'autoMemberInclusiveRegex': ['uidNumber=^8..5$', + 'gidNumber=^[5-9]..3$', + 'nsAdminGroupName=^Contract|^Temporary$'], + "autoMemberExclusiveRegex": ['uidNumber=^[1,3,8]99$', + 'gidNumber=^[2-4]00$', + 'nsAdminGroupName=^Employee$'], + }) + automembers.create(properties={ + 'cn': 'Interns', + 'description': f'Group placement for Interns', + 'autoMemberTargetGroup': [f'cn=Interns,cn=replsubGroups,{DEFAULT_SUFFIX}'], + 'autoMemberInclusiveRegex': ['uidNumber=^1..6$', + 'gidNumber=^[1-9]..3$', + 'nsAdminGroupName=^Interns$|^Trainees$'], + "autoMemberExclusiveRegex": ['uidNumber=^[1-9]99$', + 'gidNumber=^[1-9]00$', + 'nsAdminGroupName=^Students$'],}) + automembers.create(properties={ + 'cn': 'Visitors', + 'description': f'Group placement for Visitors', + 'autoMemberTargetGroup': [f'cn=Visitors,cn=replsubGroups,{DEFAULT_SUFFIX}'], + 'autoMemberInclusiveRegex': ['uidNumber=^1..6$', + 'gidNumber=^[1-5]6.3$', + 'nsAdminGroupName=^Visitors$'], + "autoMemberExclusiveRegex": ['uidNumber=^[7-9]99$', + 'gidNumber=^[7-9]00$', + 'nsAdminGroupName=^Inter'], + }) + for instance in [topo_m4.ms['supplier1'], topo_m4.ms['supplier2'], + topo_m4.ms['supplier3'], topo_m4.ms['supplier4']]: + instance.restart() + + +def delete_users_and_wait(topo_m4, automem_scope): + """ + Deletes entries after test and waits for replication. + """ + for user in nsAdminGroups(topo_m4.ms['supplier1'], automem_scope, rdn=None).list(): + user.delete() + for supplier in [topo_m4.ms['supplier2'], topo_m4.ms['supplier3'], topo_m4.ms['supplier4']]: + ReplicationManager(DEFAULT_SUFFIX).wait_for_replication(topo_m4.ms['supplier1'], + supplier, timeout=30000) + + +def create_entry(topo_m4, user_id, suffix, uid_no, gid_no, role_usr): + """ + Will create entries with nsAdminGroup objectclass + """ + user = nsAdminGroups(topo_m4.ms['supplier1'], suffix, rdn=None).create(properties={ + 'cn': user_id, + 'sn': user_id, + 'uid': user_id, + 'homeDirectory': '/home/{}'.format(user_id), + 'loginShell': '/bin/bash', + 'uidNumber': uid_no, + 'gidNumber': gid_no, + 'objectclass': ['top', 'person', 'posixaccount', 'inetuser', + 'nsMemberOf', 'nsAccount', 'nsAdminGroup'], + 'nsAdminGroupName': role_usr, + 'seeAlso': 'uid={},{}'.format(user_id, suffix), + 'entrydn': 'uid={},{}'.format(user_id, suffix) + }) + return user + + +def test_adding_300_user(topo_m4, _create_entries): + """ + Adding 300 user entries matching the inclusive regex rules for + all targetted groups at M1 and checking the same created in M2 & M3 + :id: fcd867bc-be57-11e9-9842-8c16451d917b + :setup: Instance with 4 suppliers + :steps: + 1. Add 300 user entries matching the inclusive regex rules at topo_m4.ms['supplier1'] + 2. Check the same created in rest suppliers + :expectedresults: + 1. Pass + 2. Pass + """ + user_rdn = "long01usr" + automem_scope = "ou=Employees,{}".format(DEFAULT_SUFFIX) + grp_container = "cn=replsubGroups,{}".format(DEFAULT_SUFFIX) + default_group1 = "cn=SubDef1,{}".format(DEFAULT_SUFFIX) + default_group2 = "cn=SubDef2,{}".format(DEFAULT_SUFFIX) + # Adding BulkUsers + for number in range(300): + create_entry(topo_m4, f'{user_rdn}{number}', automem_scope, '5795', '5693', 'Contractor') + try: + # Check to sync the entries + for supplier in [topo_m4.ms['supplier2'], topo_m4.ms['supplier3'], topo_m4.ms['supplier4']]: + ReplicationManager(DEFAULT_SUFFIX).wait_for_replication(topo_m4.ms['supplier1'], + supplier, timeout=30000) + for instance, grp in [(topo_m4.ms['supplier2'], 'Managers'), + (topo_m4.ms['supplier3'], 'Contractors'), + (topo_m4.ms['supplier4'], 'Interns')]: + assert len(nsAdminGroup( + instance, f'cn={grp},{grp_container}').get_attr_vals_utf8('member')) == 300 + for grp in [default_group1, default_group2]: + assert not Group(topo_m4.ms['supplier4'], grp).get_attr_vals_utf8('member') + assert not Group(topo_m4.ms['supplier3'], grp).get_attr_vals_utf8('member') + + finally: + delete_users_and_wait(topo_m4, automem_scope) + + +def test_adding_1000_users(topo_m4, _create_entries): + """ + Adding 1000 users matching inclusive regex for Managers/Contractors + and exclusive regex for Interns/Visitors + :id: f641e612-be57-11e9-94e6-8c16451d917b + :setup: Instance with 4 suppliers + :steps: + 1. Add 1000 user entries matching the inclusive/exclusive + regex rules at topo_m4.ms['supplier1'] + 2. Check the same created in rest suppliers + :expectedresults: + 1. Pass + 2. Pass + """ + automem_scope = "ou=Employees,{}".format(DEFAULT_SUFFIX) + grp_container = "cn=replsubGroups,{}".format(DEFAULT_SUFFIX) + default_group1 = "cn=SubDef1,{}".format(DEFAULT_SUFFIX) + default_group2 = "cn=SubDef2,{}".format(DEFAULT_SUFFIX) + # Adding 1000 users + for number in range(1000): + create_entry(topo_m4, f'automemusrs{number}', automem_scope, '799', '5693', 'Manager') + try: + # Check to sync the entries + for supplier in [topo_m4.ms['supplier2'], topo_m4.ms['supplier3'], topo_m4.ms['supplier4']]: + ReplicationManager(DEFAULT_SUFFIX).wait_for_replication(topo_m4.ms['supplier1'], + supplier, timeout=30000) + for instance, grp in [(topo_m4.ms['supplier1'], 'Managers'), + (topo_m4.ms['supplier3'], 'Contractors')]: + assert len(nsAdminGroup( + instance, "cn={},{}".format(grp, + grp_container)).get_attr_vals_utf8('member')) == 1000 + for instance, grp in [(topo_m4.ms['supplier2'], 'Interns'), + (topo_m4.ms['supplier4'], 'Visitors')]: + assert not nsAdminGroup( + instance, "cn={},{}".format(grp, grp_container)).get_attr_vals_utf8('member') + for grp in [default_group1, default_group2]: + assert not Group(topo_m4.ms['supplier2'], grp).get_attr_vals_utf8('member') + assert not Group(topo_m4.ms['supplier3'], grp).get_attr_vals_utf8('member') + finally: + delete_users_and_wait(topo_m4, automem_scope) + + +def test_adding_3000_users(topo_m4, _create_entries): + """ + Adding 3000 users matching all inclusive regex rules and no matching exclusive regex rules + :id: ee54576e-be57-11e9-b536-8c16451d917b + :setup: Instance with 4 suppliers + :steps: + 1. Add 3000 user entries matching the inclusive/exclusive regex + rules at topo_m4.ms['supplier1'] + 2. Check the same created in rest suppliers + :expectedresults: + 1. Pass + 2. Pass + """ + automem_scope = "ou=Employees,{}".format(DEFAULT_SUFFIX) + grp_container = "cn=replsubGroups,{}".format(DEFAULT_SUFFIX) + default_group1 = "cn=SubDef3,{}".format(DEFAULT_SUFFIX) + default_group2 = "cn=SubDef5,{}".format(DEFAULT_SUFFIX) + # Adding 3000 users + for number in range(3000): + create_entry(topo_m4, f'automemusrs{number}', automem_scope, '5995', '5693', 'Manager') + try: + for supplier in [topo_m4.ms['supplier2'], topo_m4.ms['supplier3'], topo_m4.ms['supplier4']]: + ReplicationManager(DEFAULT_SUFFIX).wait_for_replication(topo_m4.ms['supplier1'], + supplier, timeout=30000) + for instance, grp in [(topo_m4.ms['supplier1'], 'Managers'), + (topo_m4.ms['supplier3'], 'Contractors'), + (topo_m4.ms['supplier2'], 'Interns'), + (topo_m4.ms['supplier4'], 'Visitors') + ]: + assert len( + nsAdminGroup(instance, + "cn={},{}".format(grp, + grp_container)).get_attr_vals_utf8('member')) == 3000 + for grp in [default_group1, default_group2]: + assert not Group(topo_m4.ms['supplier2'], grp).get_attr_vals_utf8('member') + assert not Group(topo_m4.ms['supplier3'], grp).get_attr_vals_utf8('member') + finally: + delete_users_and_wait(topo_m4, automem_scope) + + +def test_3000_users_matching_all_exclusive_regex(topo_m4, _create_entries): + """ + Adding 3000 users matching all exclusive regex rules and no matching inclusive regex rules + :id: e789331e-be57-11e9-b298-8c16451d917b + :setup: Instance with 4 suppliers + :steps: + 1. Add 3000 user entries matching the inclusive/exclusive regex + rules at topo_m4.ms['supplier1'] + 2. Check the same created in rest suppliers + :expectedresults: + 1. Pass + 2. Pass + """ + automem_scope = "ou=Employees,{}".format(DEFAULT_SUFFIX) + grp_container = "cn=replsubGroups,{}".format(DEFAULT_SUFFIX) + default_group1 = "cn=SubDef1,{}".format(DEFAULT_SUFFIX) + default_group2 = "cn=SubDef2,{}".format(DEFAULT_SUFFIX) + default_group4 = "cn=SubDef4,{}".format(DEFAULT_SUFFIX) + # Adding 3000 users + for number in range(3000): + create_entry(topo_m4, f'automemusrs{number}', automem_scope, '399', '700', 'Manager') + try: + for supplier in [topo_m4.ms['supplier2'], topo_m4.ms['supplier3'], topo_m4.ms['supplier4']]: + ReplicationManager(DEFAULT_SUFFIX).wait_for_replication(topo_m4.ms['supplier1'], + supplier, timeout=30000) + + for instance, grp in [(topo_m4.ms['supplier1'], default_group4), + (topo_m4.ms['supplier2'], default_group1), + (topo_m4.ms['supplier3'], default_group2), + (topo_m4.ms['supplier4'], default_group2)]: + assert len(nsAdminGroup(instance, grp).get_attr_vals_utf8('member')) == 3000 + for grp, instance in [('Managers', topo_m4.ms['supplier3']), + ('Contractors', topo_m4.ms['supplier2'])]: + assert not nsAdminGroup( + instance, "cn={},{}".format(grp, grp_container)).get_attr_vals_utf8('member') + + finally: + delete_users_and_wait(topo_m4, automem_scope) + + +def test_no_matching_inclusive_regex_rules(topo_m4, _create_entries): + """ + Adding 3000 users matching all exclusive regex rules and no matching inclusive regex rules + :id: e0cc0e16-be57-11e9-9c0f-8c16451d917b + :setup: Instance with 4 suppliers + :steps: + 1. Add 3000 user entries matching the inclusive/exclusive regex + rules at topo_m4.ms['supplier1'] + 2. Check the same created in rest suppliers + :expectedresults: + 1. Pass + 2. Pass + """ + automem_scope = "ou=Employees,{}".format(DEFAULT_SUFFIX) + grp_container = "cn=replsubGroups,{}".format(DEFAULT_SUFFIX) + default_group1 = "cn=SubDef1,{}".format(DEFAULT_SUFFIX) + # Adding 3000 users + for number in range(3000): + create_entry(topo_m4, f'automemusrs{number}', automem_scope, '399', '700', 'Manager') + try: + for supplier in [topo_m4.ms['supplier2'], topo_m4.ms['supplier3'], topo_m4.ms['supplier4']]: + ReplicationManager(DEFAULT_SUFFIX).wait_for_replication(topo_m4.ms['supplier1'], + supplier, timeout=30000) + for instance, grp in [(topo_m4.ms['supplier1'], "cn=SubDef4,{}".format(DEFAULT_SUFFIX)), + (topo_m4.ms['supplier2'], default_group1), + (topo_m4.ms['supplier3'], "cn=SubDef2,{}".format(DEFAULT_SUFFIX)), + (topo_m4.ms['supplier4'], "cn=SubDef3,{}".format(DEFAULT_SUFFIX))]: + assert len(nsAdminGroup(instance, grp).get_attr_vals_utf8('member')) == 3000 + for grp, instance in [('Managers', topo_m4.ms['supplier3']), + ('Contractors', topo_m4.ms['supplier2'])]: + assert not nsAdminGroup( + instance, "cn={},{}".format(grp, grp_container)).get_attr_vals_utf8('member') + finally: + delete_users_and_wait(topo_m4, automem_scope) + + +def test_adding_deleting_and_re_adding_the_same_3000(topo_m4, _create_entries): + """ + Adding, Deleting and re-adding the same 3000 users matching all + exclusive regex rules and no matching inclusive regex rules + :id: d939247c-be57-11e9-825d-8c16451d917b + :setup: Instance with 4 suppliers + :steps: + 1. Add 3000 user entries matching the inclusive/exclusive regex + rules at topo_m4.ms['supplier1'] + 2. Check the same created in rest suppliers + 3. Delete 3000 users + 4. Again add 3000 users + 5. Check the same created in rest suppliers + :expectedresults: + 1. Pass + 2. Pass + 3. Pass + 4. Pass + 5. Pass + """ + automem_scope = "ou=Employees,{}".format(DEFAULT_SUFFIX) + grp_container = "cn=replsubGroups,{}".format(DEFAULT_SUFFIX) + default_group1 = "cn=SubDef1,{}".format(DEFAULT_SUFFIX) + # Adding + for number in range(3000): + create_entry(topo_m4, f'automemusrs{number}', automem_scope, '399', '700', 'Manager') + try: + for supplier in [topo_m4.ms['supplier2'], topo_m4.ms['supplier3'], topo_m4.ms['supplier4']]: + ReplicationManager(DEFAULT_SUFFIX).wait_for_replication(topo_m4.ms['supplier1'], + supplier, timeout=30000) + assert len(nsAdminGroup(topo_m4.ms['supplier2'], + default_group1).get_attr_vals_utf8('member')) == 3000 + # Deleting + for user in nsAdminGroups(topo_m4.ms['supplier2'], automem_scope, rdn=None).list(): + user.delete() + for supplier in [topo_m4.ms['supplier1'], topo_m4.ms['supplier3'], topo_m4.ms['supplier4']]: + ReplicationManager(DEFAULT_SUFFIX).wait_for_replication(topo_m4.ms['supplier2'], + supplier, timeout=30000) + # Again adding + for number in range(3000): + create_entry(topo_m4, f'automemusrs{number}', automem_scope, '399', '700', 'Manager') + for supplier in [topo_m4.ms['supplier2'], topo_m4.ms['supplier3'], topo_m4.ms['supplier4']]: + ReplicationManager(DEFAULT_SUFFIX).wait_for_replication(topo_m4.ms['supplier1'], + supplier, timeout=30000) + for instance, grp in [(topo_m4.ms['supplier1'], "cn=SubDef4,{}".format(DEFAULT_SUFFIX)), + (topo_m4.ms['supplier3'], "cn=SubDef5,{}".format(DEFAULT_SUFFIX)), + (topo_m4.ms['supplier4'], "cn=SubDef3,{}".format(DEFAULT_SUFFIX))]: + assert len(nsAdminGroup(instance, grp).get_attr_vals_utf8('member')) == 3000 + for grp, instance in [('Interns', topo_m4.ms['supplier3']), + ('Contractors', topo_m4.ms['supplier2'])]: + assert not nsAdminGroup( + instance, "cn={},{}".format(grp, grp_container)).get_attr_vals_utf8('member') + finally: + delete_users_and_wait(topo_m4, automem_scope) + + +def test_re_adding_the_same_3000_users(topo_m4, _create_entries): + """ + Adding, Deleting and re-adding the same 3000 users matching all inclusive + regex rules and no matching exclusive regex rules + :id: d2f5f112-be57-11e9-b164-8c16451d917b + :setup: Instance with 4 suppliers + :steps: + 1. Add 3000 user entries matching the inclusive/exclusive regex + rules at topo_m4.ms['supplier1'] + 2. Check the same created in rest suppliers + 3. Delete 3000 users + 4. Again add 3000 users + 5. Check the same created in rest suppliers + :expectedresults: + 1. Pass + 2. Pass + 3. Pass + 4. Pass + 5. Pass + """ + automem_scope = "ou=Employees,{}".format(DEFAULT_SUFFIX) + grp_container = "cn=replsubGroups,{}".format(DEFAULT_SUFFIX) + default_group1 = "cn=SubDef3,{}".format(DEFAULT_SUFFIX) + default_group2 = "cn=SubDef5,{}".format(DEFAULT_SUFFIX) + # Adding + for number in range(3000): + create_entry(topo_m4, f'automemusrs{number}', automem_scope, '5995', '5693', 'Manager') + try: + for supplier in [topo_m4.ms['supplier1'], topo_m4.ms['supplier3'], topo_m4.ms['supplier4']]: + ReplicationManager(DEFAULT_SUFFIX).wait_for_replication(topo_m4.ms['supplier2'], + supplier, timeout=30000) + assert len(nsAdminGroup( + topo_m4.ms['supplier2'], + f'cn=Contractors,{grp_container}').get_attr_vals_utf8('member')) == 3000 + # Deleting + delete_users_and_wait(topo_m4, automem_scope) + + # re-adding + for number in range(3000): + create_entry(topo_m4, f'automemusrs{number}', automem_scope, '5995', '5693', 'Manager') + for supplier in [topo_m4.ms['supplier2'], topo_m4.ms['supplier3'], topo_m4.ms['supplier4']]: + ReplicationManager(DEFAULT_SUFFIX).wait_for_replication(topo_m4.ms['supplier1'], + supplier, timeout=30000) + for instance, grp in [(topo_m4.ms['supplier1'], "cn=Managers,{}".format(grp_container)), + (topo_m4.ms['supplier3'], "cn=Contractors,{}".format(grp_container)), + (topo_m4.ms['supplier4'], "cn=Visitors,{}".format(grp_container)), + (topo_m4.ms['supplier2'], "cn=Interns,{}".format(grp_container))]: + assert len(nsAdminGroup(instance, grp).get_attr_vals_utf8('member')) == 3000 + for grp, instance in [(default_group2, topo_m4.ms['supplier4']), + (default_group1, topo_m4.ms['supplier3'])]: + assert not nsAdminGroup(instance, grp).get_attr_vals_utf8('member') + finally: + delete_users_and_wait(topo_m4, automem_scope) + + +def test_users_with_different_uid_and_gid_nos(topo_m4, _create_entries): + """ + Adding, Deleting and re-adding the same 3000 users with + different uid and gid nos, with different inclusive/exclusive matching regex rules + :id: cc595a1a-be57-11e9-b053-8c16451d917b + :setup: Instance with 4 suppliers + :steps: + 1. Add 3000 user entries matching the inclusive/exclusive regex + rules at topo_m4.ms['supplier1'] + 2. Check the same created in rest suppliers + 3. Delete 3000 users + 4. Again add 3000 users + 5. Check the same created in rest suppliers + :expectedresults: + 1. Pass + 2. Pass + 3. Pass + 4. Pass + 5. Pass + """ + automem_scope = "ou=Employees,{}".format(DEFAULT_SUFFIX) + grp_container = "cn=replsubGroups,{}".format(DEFAULT_SUFFIX) + default_group1 = "cn=SubDef3,{}".format(DEFAULT_SUFFIX) + default_group2 = "cn=SubDef5,{}".format(DEFAULT_SUFFIX) + # Adding + for number in range(3000): + create_entry(topo_m4, f'automemusrs{number}', automem_scope, '3994', '5695', 'OnDeputation') + try: + for supplier in [topo_m4.ms['supplier2'], topo_m4.ms['supplier3'], topo_m4.ms['supplier4']]: + ReplicationManager(DEFAULT_SUFFIX).wait_for_replication(topo_m4.ms['supplier1'], + supplier, timeout=30000) + for intstance, grp in [(topo_m4.ms['supplier2'], default_group1), + (topo_m4.ms['supplier3'], default_group2)]: + assert len(nsAdminGroup(intstance, grp).get_attr_vals_utf8('member')) == 3000 + for grp, instance in [('Contractors', topo_m4.ms['supplier3']), + ('Managers', topo_m4.ms['supplier1'])]: + assert not nsAdminGroup( + instance, "cn={},{}".format(grp, grp_container)).get_attr_vals_utf8('member') + # Deleting + for user in nsAdminGroups(topo_m4.ms['supplier1'], automem_scope, rdn=None).list(): + user.delete() + for supplier in [topo_m4.ms['supplier2'], topo_m4.ms['supplier3'], topo_m4.ms['supplier4']]: + ReplicationManager(DEFAULT_SUFFIX).wait_for_replication(topo_m4.ms['supplier1'], + supplier, timeout=30000) + # re-adding + for number in range(3000): + create_entry(topo_m4, f'automemusrs{number}', automem_scope, + '5995', '5693', 'OnDeputation') + + for supplier in [topo_m4.ms['supplier2'], topo_m4.ms['supplier3'], topo_m4.ms['supplier4']]: + ReplicationManager(DEFAULT_SUFFIX).wait_for_replication(topo_m4.ms['supplier1'], + supplier, timeout=30000) + for grp, instance in [('Contractors', topo_m4.ms['supplier3']), + ('Managers', topo_m4.ms['supplier1']), + ('Interns', topo_m4.ms['supplier2']), + ('Visitors', topo_m4.ms['supplier4'])]: + assert len(nsAdminGroup( + instance, f'cn={grp},{grp_container}').get_attr_vals_utf8('member')) == 3000 + + for instance, grp in [(topo_m4.ms['supplier2'], default_group1), + (topo_m4.ms['supplier3'], default_group2)]: + assert not nsAdminGroup(instance, grp).get_attr_vals_utf8('member') + finally: + delete_users_and_wait(topo_m4, automem_scope) + + +def test_bulk_users_to_non_automemscope(topo_m4, _create_entries): + """ + Adding bulk users to non-automem_scope and then running modrdn + operation to change the ou to automem_scope + :id: c532dc0c-be57-11e9-bcca-8c16451d917b + :setup: Instance with 4 suppliers + :steps: + 1. Running modrdn operation to change the ou to automem_scope + 2. Add 3000 user entries to non-automem_scope at topo_m4.ms['supplier1'] + 3. Run AutomemberRebuildMembershipTask + 4. Check the same created in rest suppliers + :expectedresults: + 1. Pass + 2. Pass + 3. Pass + 4. Pass + """ + automem_scope = "cn=EmployeesNew,{}".format(DEFAULT_SUFFIX) + grp_container = "cn=replsubGroups,{}".format(DEFAULT_SUFFIX) + default_group1 = "cn=SubDef3,{}".format(DEFAULT_SUFFIX) + default_group2 = "cn=SubDef5,{}".format(DEFAULT_SUFFIX) + nsContainers(topo_m4.ms['supplier1'], DEFAULT_SUFFIX).create(properties={'cn': 'ChangeThisCN'}) + Group(topo_m4.ms['supplier1'], + f'cn=replsubGroups,cn=autoMembersPlugin,{DEFAULT_SUFFIX}').replace('autoMemberScope', + automem_scope) + for instance in [topo_m4.ms['supplier1'], topo_m4.ms['supplier2'], + topo_m4.ms['supplier3'], topo_m4.ms['supplier4']]: + instance.restart() + # Adding BulkUsers + for number in range(3000): + create_entry(topo_m4, f'automemusrs{number}', f'cn=ChangeThisCN,{DEFAULT_SUFFIX}', + '5995', '5693', 'Supervisor') + try: + for supplier in [topo_m4.ms['supplier2'], topo_m4.ms['supplier3'], topo_m4.ms['supplier4']]: + ReplicationManager(DEFAULT_SUFFIX).wait_for_replication(topo_m4.ms['supplier1'], + supplier, timeout=30000) + for instance, grp in [(topo_m4.ms['supplier2'], default_group1), + (topo_m4.ms['supplier1'], "cn=Managers,{}".format(grp_container))]: + assert not nsAdminGroup(instance, grp).get_attr_vals_utf8('member') + # Deleting BulkUsers "User_Name" Suffix "Nof_Users" + topo_m4.ms['supplier3'].rename_s(f"CN=ChangeThisCN,{DEFAULT_SUFFIX}", + f'cn=EmployeesNew', newsuperior=DEFAULT_SUFFIX, delold=1) + for supplier in [topo_m4.ms['supplier2'], topo_m4.ms['supplier3'], topo_m4.ms['supplier4']]: + ReplicationManager(DEFAULT_SUFFIX).wait_for_replication(topo_m4.ms['supplier1'], + supplier, timeout=30000) + AutomemberRebuildMembershipTask(topo_m4.ms['supplier1']).create(properties={ + 'basedn': automem_scope, + 'filter': "objectClass=posixAccount" + }) + for supplier in [topo_m4.ms['supplier2'], topo_m4.ms['supplier3'], topo_m4.ms['supplier4']]: + ReplicationManager(DEFAULT_SUFFIX).wait_for_replication(topo_m4.ms['supplier1'], + supplier, timeout=30000) + for instance, grp in [(topo_m4.ms['supplier1'], 'Managers'), + (topo_m4.ms['supplier2'], 'Interns'), + (topo_m4.ms['supplier3'], 'Contractors'), + (topo_m4.ms['supplier4'], 'Visitors')]: + assert len(nsAdminGroup( + instance, f'cn={grp},{grp_container}').get_attr_vals_utf8('member')) == 3000 + for grp, instance in [(default_group1, topo_m4.ms['supplier2']), + (default_group2, topo_m4.ms['supplier3'])]: + assert not nsAdminGroup(instance, grp).get_attr_vals_utf8('member') + finally: + delete_users_and_wait(topo_m4, automem_scope) + nsContainer(topo_m4.ms['supplier1'], "CN=EmployeesNew,{}".format(DEFAULT_SUFFIX)).delete() + + +def test_automemscope_and_running_modrdn(topo_m4, _create_entries): + """ + Adding bulk users to non-automem_scope and running modrdn operation + with new superior to automem_scope + :id: bf60f958-be57-11e9-945d-8c16451d917b + :setup: Instance with 4 suppliers + :steps: + 1. Running modrdn operation to change the ou to automem_scope + 2. Add 3000 user entries to non-automem_scope at topo_m4.ms['supplier1'] + 3. Run AutomemberRebuildMembershipTask + 4. Check the same created in rest suppliers + :expectedresults: + 1. Pass + 2. Pass + 3. Pass + 4. Pass + """ + user_rdn = "long09usr" + automem_scope1 = "ou=Employees,{}".format(DEFAULT_SUFFIX) + automem_scope2 = "cn=NewEmployees,{}".format(DEFAULT_SUFFIX) + grp_container = "cn=replsubGroups,{}".format(DEFAULT_SUFFIX) + default_group1 = "cn=SubDef3,{}".format(DEFAULT_SUFFIX) + default_group2 = "cn=SubDef5,{}".format(DEFAULT_SUFFIX) + OrganizationalUnits(topo_m4.ms['supplier1'], + DEFAULT_SUFFIX).create(properties={'ou': 'NewEmployees'}) + Group(topo_m4.ms['supplier1'], + f'cn=replsubGroups,cn=autoMembersPlugin,{DEFAULT_SUFFIX}').replace('autoMemberScope', + automem_scope2) + for instance in [topo_m4.ms['supplier1'], topo_m4.ms['supplier2'], + topo_m4.ms['supplier3'], topo_m4.ms['supplier4']]: + Config(instance).replace('nsslapd-errorlog-level', '73728') + instance.restart() + # Adding bulk users + for number in range(3000): + create_entry(topo_m4, f'automemusrs{number}', automem_scope1, + '3994', '5695', 'OnDeputation') + try: + for supplier in [topo_m4.ms['supplier2'], topo_m4.ms['supplier3'], topo_m4.ms['supplier4']]: + ReplicationManager(DEFAULT_SUFFIX).wait_for_replication(topo_m4.ms['supplier1'], + supplier, timeout=30000) + for grp, instance in [(default_group2, topo_m4.ms['supplier3']), + ("cn=Managers,{}".format(grp_container), topo_m4.ms['supplier1']), + ("cn=Contractors,{}".format(grp_container), topo_m4.ms['supplier3'])]: + assert not nsAdminGroup(instance, grp).get_attr_vals_utf8('member') + count = 0 + for user in nsAdminGroups(topo_m4.ms['supplier3'], automem_scope1, rdn=None).list(): + topo_m4.ms['supplier1'].rename_s(user.dn, + f'cn=New{user_rdn}{count}', + newsuperior=automem_scope2, delold=1) + count += 1 + for supplier in [topo_m4.ms['supplier2'], topo_m4.ms['supplier3'], topo_m4.ms['supplier4']]: + ReplicationManager(DEFAULT_SUFFIX).wait_for_replication(topo_m4.ms['supplier1'], + supplier, timeout=30000) + AutomemberRebuildMembershipTask(topo_m4.ms['supplier1']).create(properties={ + 'basedn': automem_scope2, + 'filter': "objectClass=posixAccount" + }) + for supplier in [topo_m4.ms['supplier2'], topo_m4.ms['supplier3'], topo_m4.ms['supplier4']]: + ReplicationManager(DEFAULT_SUFFIX).wait_for_replication(topo_m4.ms['supplier1'], + supplier, timeout=30000) + for instance, grp in [(topo_m4.ms['supplier3'], default_group2), + (topo_m4.ms['supplier3'], default_group1)]: + assert len(nsAdminGroup(instance, grp).get_attr_vals_utf8('member')) == 3000 + for instance, grp in [(topo_m4.ms['supplier1'], 'Managers'), + (topo_m4.ms['supplier3'], 'Contractors'), + (topo_m4.ms['supplier2'], 'Interns'), + (topo_m4.ms['supplier4'], 'Visitors')]: + assert not nsAdminGroup( + instance, "cn={},{}".format(grp, grp_container)).get_attr_vals_utf8('member') + finally: + for scope in [automem_scope1, automem_scope2]: + delete_users_and_wait(topo_m4, scope) + + +if __name__ == '__main__': + CURRENT_FILE = os.path.realpath(__file__) + pytest.main("-s -v %s" % CURRENT_FILE) diff --git a/dirsrvtests/tests/longduration/db_protect_long_test.py b/dirsrvtests/tests/longduration/db_protect_long_test.py new file mode 100644 index 0000000..3ec337b --- /dev/null +++ b/dirsrvtests/tests/longduration/db_protect_long_test.py @@ -0,0 +1,372 @@ +#nunn --- BEGIN COPYRIGHT BLOCK --- +# Copyright (C) 2021 Red Hat, Inc. +# All rights reserved. +# +# License: GPL (version 3 or any later version). +# See LICENSE for details. +# --- END COPYRIGHT BLOCK --- + +""" +Will Verify which tasks (Import/Export/Backup/Restore/Reindex (Offline/Online)) may run at the same time +""" + +import os +import logging +import pytest +import time +import enum +import shutil +import json +from threading import Thread, get_ident as get_tid +from enum import auto as EnumAuto +from lib389.topologies import topology_st as topo +from lib389.dbgen import dbgen_users +from lib389.backend import Backend +from lib389.properties import ( TASK_WAIT ) + + +#pytestmark = pytest.mark.tier1 + +NBUSERS=15000 # Should have enough user so that jobs spends at least a few seconds +BASE_SUFFIX="dc=i4585,dc=test" +# result reference file got from version 1.4.2.12 +JSONREFNAME = os.path.join(os.path.dirname(__file__), '../data/longduration/db_protect_long_test_reference_1.4.2.12.json') + + +#Results +OK="OK" +KO="KO" +BUSY="KO" # So far, no diffrence between failure and failure due to busy + +# data associated with both suffixes (i.e DN, bakend name, ldif files, and backup directory ) +_suffix1_info={ 'index': 1 } +_suffix2_info={ 'index': 2 } +# Threads result +_result = {} +# Threads +_threads = {} + +#Mode +OFFLINE="OFFLINE" +ONLINE="ONLINE" + +DEBUGGING = os.getenv("DEBUGGING", default=False) +if DEBUGGING: + logging.getLogger(__name__).setLevel(logging.DEBUG) +else: + logging.getLogger(__name__).setLevel(logging.INFO) +log = logging.getLogger(__name__) + + + +""" + create suffix bakend, generate ldif, populate the bakend, get a backup + and initialize suffix_info + Note: suffix_info['index'] must be set when calling the function +""" +def _init_suffix(topo, suffix_info): + index = suffix_info['index'] + # Init suffix_info values + suffix = f'dc=suffix{index},' + BASE_SUFFIX + suffix_info['suffix'] = suffix + ldif_dir = topo.standalone.get_ldif_dir() + bak_dir = topo.standalone.get_bak_dir() + suffix_info['name'] = f'suffix{index}' + suffix_info['rbak'] = bak_dir + f'/r_i4585.bak' # For archive2db + suffix_info['wbak'] = bak_dir + f'/w_i4585.bak' # For db2archive + suffix_info['rldif'] = ldif_dir + f'/r_suffix{index}.ldif' # For ldif2db + suffix_info['wldif'] = ldif_dir + f'/w_suffix{index}.ldif' # For db2ldif + # create suffix backend + be = Backend(topo.standalone) + be.create(properties={'cn': suffix_info['name'], 'nsslapd-suffix': suffix}) + # Generate rldif ldif file, populate backend, and generate rbak archive + dbgen_users(topo.standalone, NBUSERS, suffix_info['rldif'], suffix) + # Populate the backend + result = _run_ldif2db(topo, ONLINE, suffix_info) + assert( result == 0 ) + # Generate archive (only second suffix is created) + if index == 2: + shutil.rmtree(suffix_info['rbak'], ignore_errors=True) + result = _job_db2archive(topo, ONLINE, suffix_info['rbak']) + assert( result == 0 ) + + +""" + determine json file name +""" +def _get_json_filename(topo): + return f"{topo.standalone.ds_paths.prefix}/var/log/dirsrv/test_db_protect.json" + + +""" + Compare two results pairs + Note: In the Success + Failure case, do not care about the order + because of the threads race +""" +def is_same_result(res1, res2): + if res1 == res2: + return True + if res1 == "OK + KO" and res2 == "KO + OK": + return True + if res2 == "OK + KO" and res1 == "KO + OK": + return True + return False + + +""" + Run a job within a dedicated thread +""" +def _worker(idx, job, topo, mode): + log.info(f"Thread {idx} id: {get_tid()} started {mode} job {job.__name__}") + rc0 = None + rc = None + try: + rc = job(topo, mode) + rc0 = rc + if mode == ONLINE: + if rc == 0: + rc = OK + else: + rc = KO + else: + if rc: + rc = OK + else: + rc = KO + except Exception as err: + log.info(f"Thread {idx} ended {mode} job {job.__name__} with exception {err}") + log.info(err, exc_info=True) + rc = KO + _result[idx] = rc + log.info(f"Thread {idx} ended {mode} job {job.__name__} with result {rc} (was {rc0})") + +""" + Create a new thread to run a job +""" +def _start_work(*args): + idx = args[0] + _threads[idx] = Thread(target=_worker, args=args) + log.info(f"created Thread {idx} id: {_threads[idx].ident}") + _result[idx] = None + _threads[idx].start() + + +""" + Wait until thread worker has finished then return the result +""" +def _wait4work(idx): + _threads[idx].join() + log.info(f"completed wait on thread {idx} id: {_threads[idx].ident} result is {_result[idx]}") + return _result[idx] + + +""" + Tests all pairs of jobs and check that we got the expected result + (first job is running in mode1 (ONLINE/OFFLINE)mode) + (second job is running in mode2 (ONLINE/OFFLINE)mode) +""" +def _check_all_job_pairs(topo, state, mode1, mode2, result): + """ + Checks all couple of jobs with mode1 online/offline for first job and mode2 for second job + """ + for idx1, job1 in enumerate(job_list): + for idx2, job2 in enumerate(job_list): + log.info(f"Testing {mode1} {job1} + {mode2} {job2}") + _start_work("job1", job1, topo, mode1) + # Wait enough to insure job1 is started + time.sleep(0.5) + _start_work("job2", job2, topo, mode2) + res1 = _wait4work("job1") + res2 = _wait4work("job2") + key = f"Instance {state} {mode1} {job1.__name__} + {mode2} {job2.__name__}" + val = f"{res1} + {res2}" + result[key] = val + log.info(f"{key} ==> {val}") + + +""" + ********* JOBS DEFINITION ********** +""" + +def _run_ldif2db(topo, mode, suffix_info): + if mode == OFFLINE: + return topo.standalone.ldif2db(suffix_info['name'], None, None, None, suffix_info['rldif']) + else: + return topo.standalone.tasks.importLDIF(benamebase=suffix_info['name'], input_file=suffix_info['rldif'], args={TASK_WAIT: True}) + +def _job_ldif2dbSuffix1(topo, mode): + return _run_ldif2db(topo, mode, _suffix1_info) + +def _job_ldif2dbSuffix2(topo, mode): + return _run_ldif2db(topo, mode, _suffix2_info) + + +def _run_db2ldif(topo, mode, suffix_info): + if os.path.exists(suffix_info['wldif']): + os.remove(suffix_info['wldif']) + if mode == OFFLINE: + return topo.standalone.db2ldif(suffix_info['name'], None, None, False, False, suffix_info['wldif']) + else: + return topo.standalone.tasks.exportLDIF(benamebase=suffix_info['name'], output_file=suffix_info['wldif'], args={TASK_WAIT: True}) + +def _job_db2ldifSuffix1(topo, mode): + return _run_db2ldif(topo, mode, _suffix1_info) + +def _job_db2ldifSuffix2(topo, mode): + return _run_db2ldif(topo, mode, _suffix2_info) + + +def _run_db2index(topo, mode, suffix_info): + if mode == OFFLINE: + return topo.standalone.db2index(bename=suffix_info['name'], attrs=['cn']) + else: + return topo.standalone.tasks.reindex(topo.standalone, benamebase=suffix_info['name'], attrname='cn', args={TASK_WAIT: True}) + +def _job_db2indexSuffix1(topo, mode): + return _run_db2index(topo, mode, _suffix1_info) + +def _job_db2indexSuffix2(topo, mode): + return _run_db2index(topo, mode, _suffix2_info) + + +def _job_db2archive(topo, mode, backup_dir=None): + # backup is quite fast solets do it several time to increase chance of having concurrent task + if backup_dir is None: + backup_dir = _suffix1_info['wbak'] + shutil.rmtree(backup_dir, ignore_errors=True) + if mode == OFFLINE: + for i in range(3): + rc = topo.standalone.db2bak(backup_dir) + if not rc: + return False + return True + else: + for i in range(3): + rc = topo.standalone.tasks.db2bak(backup_dir=backup_dir, args={TASK_WAIT: True}) + if (rc != 0): + return rc + return 0 + +def _job_archive2db(topo, mode, backup_dir=None): + # restore is quite fast solets do it several time to increase chance of having concurrent task + if backup_dir is None: + backup_dir = _suffix1_info['rbak'] + if mode == OFFLINE: + for i in range(3): + rc = topo.standalone.bak2db(backup_dir) + if not rc: + return False + return True + else: + for i in range(3): + rc = topo.standalone.tasks.bak2db(backup_dir=backup_dir, args={TASK_WAIT: True}) + if (rc != 0): + return rc + return 0 + +def _job_nothing(topo, mode): + if mode == OFFLINE: + return True + return 0 + +""" + ********* END OF JOBS DEFINITION ********** +""" + + +# job_list must be defined after the job get defined +job_list = [ _job_nothing, _job_db2ldifSuffix1, _job_db2ldifSuffix2, _job_ldif2dbSuffix1, _job_ldif2dbSuffix2, + _job_db2indexSuffix1, _job_db2indexSuffix2, _job_db2archive, _job_archive2db ] + + + +""" + Beware this test is very long (several hours) + it checks the results when two task (like import/export/reindex/backup/archive are run at the same time) + and store the result in a json file + the compare with a reference +""" + +def test_db_protect(topo): + """ + Add an index, then import via cn=tasks + + :id: 462bc550-87d6-11eb-9310-482ae39447e5 + :setup: Standalone Instance + :steps: + 1. Initialize suffixes + 2. Stop server instance + 3. Compute results for all couples of jobs in OFFLINE,OFFLINE mode + 4. Start server instance + 5. Compute results for all couples of jobs in OFFLINE,OFFLINE mode + 6. Compute results for all couples of jobs in ONLINE,OFFLINE mode + 7. Compute results for all couples of jobs in OFFLINE,ONLINE mode + 8. Compute results for all couples of jobs in ONLINE,ONLINE mode + 9. Store results in log file and json file + 10. Read json reference file + 11. Compute the difference between result and reference + 12. Logs the differences + 13. Assert if differences is not empty + + :expectedresults: + 1. Operation successful + 2. Operation successful + 3. Operation successful + 4. Operation successful + 5. Operation successful + 6. Operation successful + 7. Operation successful + 8. Operation successful + 9. Operation successful + 10. Operation successful + 11. Operation successful + 12. Operation successful + 13. Operation successful + """ + # Step 1: Initialize suffixes + _init_suffix(topo, _suffix1_info) + _init_suffix(topo, _suffix2_info) + result={} + # Step 2: Stop server instance + topo.standalone.stop() + log.info("Server instance is now stopped.") + # Step 3: Compute results for all couples of jobs in OFFLINE,OFFLINE mode + _check_all_job_pairs(topo, OFFLINE, OFFLINE, OFFLINE, result) + # Step 4: Start server instance + topo.standalone.start() + log.info("Server instance is now started.") + # Step 5: Compute results for all couples of jobs in OFFLINE,OFFLINE mode + _check_all_job_pairs(topo, ONLINE, OFFLINE, OFFLINE, result) + # Step 6: Compute results for all couples of jobs in ONLINE,OFFLINE mode + _check_all_job_pairs(topo, ONLINE, ONLINE, OFFLINE, result) + # Step 7: Compute results for all couples of jobs in OFFLINE,ONLINE mode + _check_all_job_pairs(topo, ONLINE, OFFLINE, ONLINE, result) + # Step 8: Compute results for all couples of jobs in ONLINE,ONLINE mode + _check_all_job_pairs(topo, ONLINE, ONLINE, ONLINE, result) + # Step 9: Logs the results and store the json file + for key,val in result.items(): + log.info(f"{key} ==> {val}") + with open(_get_json_filename(topo), "w") as jfile: + json.dump(result, jfile) + # Step 10: read json reference file + with open(JSONREFNAME, "r") as jfile: + ref = json.load(jfile) + # Step 11: Compute the differences + differences={} + for key, value in result.items(): + if key in ref: + if not is_same_result(value, ref[key]): + differences[key] = ( value, ref[key] ) + else: + differences[key] = ( value, None ) + for key, value in ref.items(): + if not key in result: + differences[key] = ( None, value ) + # Step 12: Log the differences + log.info(f"difference between result an 1.4.2.12 reference are:") + log.info(f" key: (result, reference)") + for key, value in differences.items(): + log.info(f"{key}: {value}") + # Step 13: assert if there are differences + assert not differences + diff --git a/dirsrvtests/tests/perf/create_data.py b/dirsrvtests/tests/perf/create_data.py new file mode 100755 index 0000000..0d7e385 --- /dev/null +++ b/dirsrvtests/tests/perf/create_data.py @@ -0,0 +1,289 @@ +#!/usr/bin/python2 +from __future__ import ( + print_function, + division +) + +import sys +import math + + +class RHDSData(object): + def __init__( + self, + stream=sys.stdout, + users=10000, + groups=100, + grps_puser=20, + nest_level=10, + ngrps_puser=10, + domain="redhat.com", + basedn="dc=example,dc=com", + ): + self.users = users + self.groups = groups + self.basedn = basedn + self.domain = domain + self.stream = stream + + self.grps_puser = grps_puser + self.nest_level = nest_level + self.ngrps_puser = ngrps_puser + + self.user_defaults = { + 'objectClass': [ + 'person', + 'top', + 'inetorgperson', + 'organizationalperson', + 'inetuser', + 'posixaccount'], + 'uidNumber': ['-1'], + 'gidNumber': ['-1'], + } + + self.group_defaults = { + 'objectClass': [ + 'top', + 'inetuser', + 'posixgroup', + 'groupofnames'], + 'gidNumber': [-1], + } + + def put_entry(self, entry): + """ + Abstract method, implementation depends on if we want just print LDIF, + or update LDAP directly + """ + raise NotImplementedError() + + def gen_user(self, uid): + user = dict(self.user_defaults) + user['dn'] = 'uid={uid},ou=people,{suffix}'.format( + uid=uid, + suffix=self.basedn, + ) + user['uid'] = [uid] + user['displayName'] = ['{} {}'.format(uid, uid)] + user['sn'] = [uid] + user['homeDirectory'] = ['/other-home/{}'.format(uid)] + user['mail'] = ['{uid}@{domain}'.format( + uid=uid, domain=self.domain)] + user['givenName'] = [uid] + user['cn'] = ['{} {}'.format(uid, uid)] + + return user + + def username_generator(self, start, stop, step=1): + for i in range(start, stop, step): + yield 'user%s' % i + + def gen_group(self, name, members=(), group_members=()): + group = dict(self.group_defaults) + group['dn'] = 'cn={name},ou=groups,{suffix}'.format( + name=name, + suffix=self.basedn, + ) + group['cn'] = [name] + group['member'] = ['uid={uid},ou=people,{suffix}'.format( + uid=uid, + suffix=self.basedn, + ) for uid in members] + group['member'].extend( + ['cn={name},ou=groups,{suffix}'.format( + name=name, + suffix=self.basedn, + ) for name in group_members]) + return group + + def groupname_generator(self, start, stop, step=1): + for i in range(start, stop, step): + yield 'group%s' % i + + def gen_users_and_groups(self): + self.__gen_entries_with_groups( + self.users, + self.groups, + self.grps_puser, + self.ngrps_puser, + self.nest_level, + self.username_generator, self.gen_user, + self.groupname_generator, self.gen_group + ) + + def __gen_entries_with_groups( + self, + num_of_entries, + num_of_groups, + groups_per_entry, + nested_groups_per_entry, + max_nesting_level, + gen_entry_name_f, gen_entry_f, + gen_group_name_f, gen_group_f + ): + assert num_of_groups % groups_per_entry == 0 + assert num_of_groups >= groups_per_entry + assert groups_per_entry > nested_groups_per_entry + assert max_nesting_level > 0 + assert nested_groups_per_entry > 0 + assert ( + groups_per_entry - nested_groups_per_entry > + int(math.ceil(nested_groups_per_entry / float(max_nesting_level))) + ), ( + "At least {} groups is required to generate proper amount of " + "nested groups".format( + nested_groups_per_entry + + int(math.ceil( + nested_groups_per_entry / float(max_nesting_level)) + ) + ) + ) + + for uid in gen_entry_name_f(0, num_of_entries): + self.put_entry(gen_entry_f(uid)) + + # create N groups per entry, of them are nested + # User/Host (max nesting level = 2) + # | + # +--- G1 --- G2 (nested) --- G3 (nested, max level) + # | + # +--- G5 --- G6 (nested) + # | + # ...... + # | + # +--- GN + + # how many members should be added to groups (set of groups_per_entry + # have the same members) + entries_per_group = num_of_entries // (num_of_groups // groups_per_entry) + + # generate groups and put users there + for i in range(num_of_groups // groups_per_entry): + + uids = list(gen_entry_name_f( + i * entries_per_group, + (i + 1) * entries_per_group + )) + + # per user + last_grp_name = None + nest_lvl = 0 + nested_groups_added = 0 + + for group_name in gen_group_name_f( + i * groups_per_entry, + (i + 1) * groups_per_entry, + ): + # create nested groups first + if nested_groups_added < nested_groups_per_entry: + if nest_lvl == 0: + # the top group + self.put_entry( + gen_group_f( + group_name, + members=uids + ) + ) + nest_lvl += 1 + nested_groups_added += 1 + elif nest_lvl == max_nesting_level: + # the last level group this group is not nested + self.put_entry( + gen_group_f( + group_name, + group_members=[last_grp_name], + ) + ) + nest_lvl = 0 + else: + # mid level group + self.put_entry( + gen_group_f( + group_name, + group_members=[last_grp_name] + ) + ) + nested_groups_added += 1 + nest_lvl += 1 + + last_grp_name = group_name + else: + # rest of groups have direct membership + if nest_lvl != 0: + # assign the last nested group if exists + self.put_entry( + gen_group_f( + group_name, + members=uids, + group_members=[last_grp_name], + ) + ) + nest_lvl = 0 + else: + self.put_entry( + gen_group_f( + group_name, + members=uids + ) + ) + + def __generate_entries_with_users_groups( + self, + num_of_entries_direct_members, + num_of_entries_indirect_members, + entries_per_user, + entries_per_group, + gen_entry_name_f, gen_entry_f, + ): + assert num_of_entries_direct_members % entries_per_user == 0 + assert num_of_entries_indirect_members % entries_per_group == 0 + + num_of_entries = num_of_entries_direct_members + num_of_entries_indirect_members + + # direct members + users_per_entry = self.users // (num_of_entries_direct_members // entries_per_user) + + start_user = 0 + stop_user = users_per_entry + for name in gen_entry_name_f(0, num_of_entries_direct_members): + self.put_entry( + gen_entry_f( + name, + user_members=self.username_generator(start_user, stop_user), + ) + ) + start_user = stop_user % self.users + stop_user = start_user + users_per_entry + stop_user = stop_user if stop_user < self.users else self.users + + groups_per_entry = self.groups // (num_of_entries_indirect_members // entries_per_group) + + # indirect members + start_group = 0 + stop_group = groups_per_entry + for name in gen_entry_name_f(num_of_entries_direct_members, num_of_entries): + self.put_entry( + gen_entry_f( + name, + usergroup_members=self.groupname_generator(start_group, stop_group), + ) + ) + start_group = stop_group % self.groups + stop_group = start_group + groups_per_entry + stop_group = stop_group if stop_group < self.groups else self.groups + + def do_magic(self): + self.gen_users_and_groups() + + +class RHDSDataLDIF(RHDSData): + def put_entry(self, entry): + print(file=self.stream) + print("dn:", entry['dn'], file=self.stream) + for k, values in entry.items(): + if k == 'dn': + continue + for v in values: + print("{}: {}".format(k, v), file=self.stream) + print(file=self.stream) diff --git a/dirsrvtests/tests/perf/ltest.py b/dirsrvtests/tests/perf/ltest.py new file mode 100755 index 0000000..c8cf894 --- /dev/null +++ b/dirsrvtests/tests/perf/ltest.py @@ -0,0 +1,75 @@ +#!/usr/bin/env python3 +# --- BEGIN COPYRIGHT BLOCK --- +# Copyright (C) 2023 Red Hat, Inc. +# All rights reserved. +# +# License: GPL (version 3 or any later version). +# See LICENSE for details. +# --- END COPYRIGHT BLOCK --- +# +import argparse +import time +import random +import ldap + +DESC=""" +A test tool that measure base search operation latency when n connections are open. (With moderate average load) +""" + +parser = argparse.ArgumentParser( + prog='ltest', + description='Latency tester') + +parser.add_argument('-t', '--test-duration', type=int, help='Latency test duration in seconds') +parser.add_argument('-T', '--wait-time', type=int, default=10, help='Wait time between operations in milliseconds') +parser.add_argument('-H', '--uri', default='ldap://localhost:389', help='LDAP Uniform Resource Identifier') +parser.add_argument('-b', '--basedn', default='ou=people, dc=example, dc=com', help='Search Base DN') +parser.add_argument('-D', '--binddn', default='cn=directory manager', help='Bind DN') +parser.add_argument('-w', '--bindpw', default='password', help='Bind password') +parser.add_argument('-n', '--nbconn', type=int, default=20000, help='Number of connections') +parser.add_argument('-v', '--verbose', action='count', default=0, help='Verbose mode') + +args = parser.parse_args() + +conns = [] +for i in range(args.nbconn): + try: + if (i+1) % 1000 == 0: + print (f'{i+1} connections are open') + conn = ldap.initialize(args.uri, trace_level=args.verbose) + conn.set_option(ldap.OPT_REFERRALS, 0) + conn.simple_bind_s(args.binddn, args.bindpw) + conns.append(conn) + except ldap.LDAPError as ex: + print (f'Failed to open connection #{i}') + raise ex +print (f'{args.nbconn} connections are open. Starting the latency test using {args} as parameters') + +now = int(time.time()) +end_time = None +if args.test_duration: + print(f'{now}') + end_time = now + args.test_duration + +ltime = now +sum = 0 +nbops = 0 +while True: + now = int(time.time()) + if now != ltime and nbops > 0: + print(f"Performed {nbops} operations. Average operation time is: {sum/nbops/1000000} ms.") + sum = 0 + nbops = 0 + if end_time and int(time.time()) >= end_time: + break + ltime = now + time.sleep(args.wait_time/1000.0) + conn = random.choice(conns) + stime = time.perf_counter_ns() + conn.search_s(args.basedn, ldap.SCOPE_BASE, attrlist = ['dn']) + etime = time.perf_counter_ns() + sum += (etime-stime) + nbops += 1 + +for conn in conns: + conn.unbind() diff --git a/dirsrvtests/tests/perf/memberof_test.py b/dirsrvtests/tests/perf/memberof_test.py new file mode 100755 index 0000000..6d89d93 --- /dev/null +++ b/dirsrvtests/tests/perf/memberof_test.py @@ -0,0 +1,405 @@ +# --- BEGIN COPYRIGHT BLOCK --- +# Copyright (C) 2017 Red Hat, Inc. +# All rights reserved. +# +# License: GPL (version 3 or any later version). +# See LICENSE for details. +# --- END COPYRIGHT BLOCK --- +# +import pytest +import subprocess +from lib389 import Entry +from lib389.tasks import Tasks +from lib389.dseldif import DSEldif +from create_data import RHDSDataLDIF +from lib389.properties import TASK_WAIT +from lib389.utils import ldap, os, time, logging, ds_is_older +from lib389._constants import SUFFIX, DN_SCHEMA, DN_DM, DEFAULT_SUFFIX, PASSWORD, PLUGIN_MEMBER_OF, \ + PLUGIN_MANAGED_ENTRY, PLUGIN_AUTOMEMBER, DN_CONFIG_LDBM, HOST_STANDALONE, PORT_STANDALONE +from lib389.topologies import topology_st as topo + +pytestmark = pytest.mark.tier3 + +MEMOF_PLUGIN = ('cn=' + PLUGIN_MEMBER_OF + ',cn=plugins,cn=config') +MAN_ENTRY_PLUGIN = ('cn=' + PLUGIN_MANAGED_ENTRY + ',cn=plugins,cn=config') +AUTO_MEM_PLUGIN = ('cn=' + PLUGIN_AUTOMEMBER + ',cn=plugins,cn=config') +DOMAIN = 'redhat.com' +LDAP_MOD = '/usr/bin/ldapmodify' +FILTER = 'objectClass=*' +USER_FILTER = '(|(uid=user*)(cn=group*))' +MEMBEROF_ATTR = 'memberOf' +DN_ATTR = 'dn:' + +logging.basicConfig(level=logging.DEBUG) +log = logging.getLogger(__name__) + + +@pytest.fixture(scope="module") +def memberof_setup(topo, request): + """Configure required plugins and restart the server""" + + log.info('Configuring memberOf, managedEntry and autoMembers plugins and restarting the server') + topo.standalone.simple_bind_s(DN_DM, PASSWORD) + try: + topo.standalone.plugins.enable(name=PLUGIN_MEMBER_OF) + except ldap.LDAPError as e: + log.error('Failed to enable {} plugin'.format(PLUGIN_MEMBER_OF)) + raise e + try: + topo.standalone.plugins.enable(name=PLUGIN_MANAGED_ENTRY) + topo.standalone.plugins.enable(name=PLUGIN_AUTOMEMBER) + except ldap.LDAPError as e: + log.error('Failed to enable {}, {} plugins'.format(PLUGIN_MANAGED_ENTRY, PLUGIN_AUTOMEMBER)) + raise e + + log.info('Change config values for db-locks and dbcachesize to import large ldif files') + if ds_is_older('1.3.6'): + topo.standalone.stop(timeout=10) + dse_ldif = DSEldif(topo.standalone) + try: + dse_ldif.replace(DN_CONFIG_LDBM, 'nsslapd-db-locks', '100000') + dse_ldif.replace(DN_CONFIG_LDBM, 'nsslapd-dbcachesize', '10000000') + except: + log.error('Failed to replace cn=config values of db-locks and dbcachesize') + raise + topo.standalone.start(timeout=10) + else: + try: + topo.standalone.modify_s(DN_CONFIG_LDBM, [(ldap.MOD_REPLACE, 'nsslapd-db-locks', '100000')]) + topo.standalone.modify_s(DN_CONFIG_LDBM, [(ldap.MOD_REPLACE, 'nsslapd-cache-autosize', '0')]) + topo.standalone.modify_s(DN_CONFIG_LDBM, [(ldap.MOD_REPLACE, 'nsslapd-dbcachesize', '10000000')]) + except ldap.LDAPError as e: + log.error( + 'Failed to replace values of nsslapd-db-locks and nsslapd-dbcachesize {}'.format(e.message['desc'])) + raise e + topo.standalone.restart(timeout=10) + + def fin(): + log.info('Disabling plugins {}, {}, {}'.format(PLUGIN_MEMBER_OF, PLUGIN_MANAGED_ENTRY, PLUGIN_AUTOMEMBER)) + topo.standalone.simple_bind_s(DN_DM, PASSWORD) + try: + topo.standalone.plugins.disable(name=PLUGIN_MEMBER_OF) + topo.standalone.plugins.disable(name=PLUGIN_MANAGED_ENTRY) + topo.standalone.plugins.disable(name=PLUGIN_AUTOMEMBER) + except ldap.LDAPError as e: + log.error('Failed to disable plugins, {}'.format(e.message['desc'])) + assert False + topo.standalone.restart(timeout=10) + + request.addfinalizer(fin) + + +def _create_base_ldif(topo, import_base=False): + """Create base ldif file to clean entries from suffix""" + + log.info('Add base entry for online import') + ldif_dir = topo.standalone.get_ldif_dir() + ldif_file = os.path.join(ldif_dir, '/perf.ldif') + log.info('LDIF FILE is this: {}'.format(ldif_file)) + base_ldif = """dn: dc=example,dc=com +objectclass: top +objectclass: domain +dc: example + +dn: ou=people,dc=example,dc=com +objectclass: top +objectclass: organizationalUnit +ou: people + +dn: ou=groups,dc=example,dc=com +objectclass: top +objectclass: organizationalUnit +ou: groups +""" + with open(ldif_file, "w") as fd: + fd.write(base_ldif) + if import_base: + log.info('Adding base entry to suffix to remove users/groups and leave only the OUs') + try: + topo.standalone.tasks.importLDIF(suffix=SUFFIX, input_file=ldif_file, args={TASK_WAIT: True}) + except ValueError as e: + log.error('Online import failed' + e.message('desc')) + assert False + else: + log.info('Return LDIF file') + return ldif_file + + +def _run_fixup_memberof(topo): + """Run fixup memberOf task and measure the time taken""" + + log.info('Running fixup memberOf task and measuring the time taken') + start = time.time() + try: + topo.standalone.tasks.fixupMemberOf(suffix=SUFFIX, args={TASK_WAIT: True}) + except ValueError as e: + log.error('Running fixup MemberOf task failed' + e.message('desc')) + assert False + end = time.time() + cmd_time = int(end - start) + return cmd_time + + +def _nested_import_add_ldif(topo, nof_users, nof_groups, grps_user, ngrps_user, nof_depth, is_import=False): + """Create LDIF files for given nof users, groups and nested group levels""" + + log.info('Checking if the operation is Import or Ldapadd') + if is_import: + log.info('Import: Create base entry before adding users and groups') + exp_entries = nof_users + nof_groups + data_ldif = _create_base_ldif(topo, False) + log.info('Create data LDIF file by appending users, groups and nested groups') + with open(data_ldif, 'a') as file1: + data = RHDSDataLDIF(stream=file1, users=nof_users, groups=nof_groups, grps_puser=grps_user, + nest_level=nof_depth, ngrps_puser=ngrps_user, basedn=SUFFIX) + data.do_magic() + start = time.time() + log.info('Run importLDIF task to add entries to Server') + try: + topo.standalone.tasks.importLDIF(suffix=SUFFIX, input_file=data_ldif, args={TASK_WAIT: True}) + except ValueError as e: + log.error('Online import failed' + e.message('desc')) + assert False + end = time.time() + time_import = int(end - start) + + log.info('Check if number of entries created matches the expected entries') + users_groups = topo.standalone.search_s(SUFFIX, ldap.SCOPE_SUBTREE, USER_FILTER, [DN_ATTR]) + act_entries = str(users_groups).count(DN_ATTR) + log.info('Expected entries: {}, Actual entries: {}'.format(exp_entries, act_entries)) + assert act_entries == exp_entries + return time_import + else: + log.info('Ldapadd: Create data LDIF file with users, groups and nested groups') + ldif_dir = topo.standalone.get_ldif_dir() + data_ldif = os.path.join(ldif_dir, '/perf_add.ldif') + with open(data_ldif, 'w') as file1: + data = RHDSDataLDIF(stream=file1, users=nof_users, groups=nof_groups, grps_puser=grps_user, + nest_level=nof_depth, ngrps_puser=ngrps_user, basedn=SUFFIX) + data.do_magic() + start = time.time() + log.info('Run LDAPMODIFY to add entries to Server') + try: + subprocess.check_output( + [LDAP_MOD, '-cx', '-D', DN_DM, '-w', PASSWORD, '-h', HOST_STANDALONE, '-p', str(PORT_STANDALONE), '-af', + data_ldif]) + except subprocess.CalledProcessError as e: + log.error('LDAPMODIFY failed to add entries, error:{:s}'.format(str(e))) + raise e + end = time.time() + cmd_time = int(end - start) + log.info('Time taken to complete LDAPADD: {} secs'.format(cmd_time)) + return cmd_time + + +def _sync_memberof_attrs(topo, exp_memberof): + """Check if expected entries are created or attributes are synced""" + + log.info('_sync_memberof_attrs: Check if expected memberOf attributes are synced/created') + loop = 0 + start = time.time() + entries = topo.standalone.search_s(SUFFIX, ldap.SCOPE_SUBTREE, FILTER, [MEMBEROF_ATTR]) + act_memberof = str(entries).count(MEMBEROF_ATTR) + end = time.time() + cmd_time = int(end - start) + log.info('Loop-{}, expected memberOf attrs: {}, synced: {}, time for search-{} secs'.format(loop, exp_memberof, + act_memberof, cmd_time)) + while act_memberof != exp_memberof: + loop = loop + 1 + time.sleep(30) + start = time.time() + entries = topo.standalone.search_s(SUFFIX, ldap.SCOPE_SUBTREE, FILTER, [MEMBEROF_ATTR]) + act_memberof = str(entries).count(MEMBEROF_ATTR) + end = time.time() + cmd_time = cmd_time + int(end - start) + log.info('Loop-{}, expected memberOf attrs: {}, synced: {}, time for search-{} secs'.format(loop, exp_memberof, + act_memberof, + cmd_time)) + # Worst case scenario, exit the test after 10hrs of wait + if loop > 1200: + log.error('Either syncing memberOf attrs takes too long or some issue with the test itself') + assert False + sync_time = 1 + loop * 30 + log.info('Expected memberOf attrs: {}, Actual memberOf attrs: {}'.format(exp_memberof, act_memberof)) + assert act_memberof == exp_memberof + return sync_time + + +@pytest.mark.parametrize("nof_users, nof_groups, grps_user, ngrps_user, nof_depth", + [(20000, 200, 20, 10, 5), (50000, 500, 50, 10, 10), (100000, 1000, 100, 20, 20)]) +def test_nestgrps_import(topo, memberof_setup, nof_users, nof_groups, grps_user, ngrps_user, nof_depth): + """Import large users and nested groups with N depth and measure the time taken + + :id: 169a09f2-2c2d-4e42-8b90-a0bd1034f278 + :feature: MemberOf Plugin + :setup: Standalone instance, memberOf plugin enabled + :steps: 1. Create LDIF file for given nof_users and nof_groups + 2. Import entries to server + 3. Check if entries are created + 4. Run fixupMemberOf task to create memberOf attributes + 5. Check if memberOf attributes are synced for all users and groups + 6. Compare the actual no of memberOf attributes to the expected + 7. Measure the time taken to sync memberOf attributes + :expectedresults: MemberOf attributes should be synced + """ + + exp_memberof = (nof_users * grps_user) + ( + (nof_groups // grps_user) * (ngrps_user // nof_depth) * (nof_depth * (nof_depth + 1)) // 2) + log.info('Create nested ldif file with users-{}, groups-{}, nested-{}'.format(nof_users, nof_groups, nof_depth)) + log.info('Import LDIF file and measure the time taken') + import_time = _nested_import_add_ldif(topo, nof_users, nof_groups, grps_user, ngrps_user, nof_depth, True) + + log.info('Run fixup memberOf task and measure the time taken to complete the task') + fixup_time = _run_fixup_memberof(topo) + + log.info('Check the total number of memberOf entries created for users and groups') + sync_memberof = _sync_memberof_attrs(topo, exp_memberof) + + total_time = import_time + fixup_time + sync_memberof + log.info('Time for import-{}secs, fixup task-{}secs, total time for memberOf sync: {}secs'.format(import_time, + fixup_time, + total_time)) + + +@pytest.mark.parametrize("nof_users, nof_groups, grps_user, ngrps_user, nof_depth", + [(20000, 100, 20, 10, 5), (50000, 200, 50, 10, 10), (100000, 100, 20, 10, 10)]) +def test_nestgrps_add(topo, memberof_setup, nof_users, nof_groups, grps_user, ngrps_user, nof_depth): + """Import large users and nested groups with n depth and measure the time taken + + :id: 6eda75c6-5ae0-4b17-b610-d217d7ec7542 + :feature: MemberOf Plugin + :setup: Standalone instance, memberOf plugin enabled + :steps: 1. Create LDIF file for given nof_users and nof_groups + 2. Add entries using LDAPADD + 3. Check if entries are created + 4. Check if memberOf attributes are synced for all users and groups + 5. Compare the actual no of memberOf attributes to the expected + 6. Measure the time taken to sync memberOf attributes + :expectedresults: MemberOf attributes should be created and synced + """ + + exp_memberof = (nof_users * grps_user) + ( + (nof_groups // grps_user) * (ngrps_user // nof_depth) * (nof_depth * (nof_depth + 1)) // 2) + log.info('Creating base_ldif file and importing it to wipe out all users and groups') + _create_base_ldif(topo, True) + log.info('Create nested ldif file with users-{}, groups-{}, nested-{}'.format(nof_users, nof_groups, nof_depth)) + log.info('Run LDAPADD to add entries to Server') + add_time = _nested_import_add_ldif(topo, nof_users, nof_groups, grps_user, ngrps_user, nof_depth, False) + + log.info('Check the total number of memberOf entries created for users and groups') + sync_memberof = _sync_memberof_attrs(topo, exp_memberof) + total_time = add_time + sync_memberof + log.info('Time for ldapadd-{}secs, total time for memberOf sync: {}secs'.format(add_time, total_time)) + + +@pytest.mark.parametrize("nof_users, nof_groups, grps_user, ngrps_user, nof_depth", + [(20000, 200, 20, 10, 5), (50000, 500, 50, 10, 10), (100000, 1000, 100, 20, 20)]) +def test_mod_nestgrp(topo, memberof_setup, nof_users, nof_groups, grps_user, ngrps_user, nof_depth): + """Import bulk entries, modify nested groups at N depth and measure the time taken + + :id: 4bf8e753-6ded-4177-8225-aaf6aef4d131 + :feature: MemberOf Plugin + :setup: Standalone instance, memberOf plugin enabled + :steps: 1. Import bulk entries with nested group and create memberOf attributes + 2. Modify nested groups by adding new members at each nested level + 3. Check new memberOf attributes created for users and groups + 4. Compare the actual memberOf attributes with the expected + 5. Measure the time taken to sync memberOf attributes + :expectedresults: MemberOf attributes should be modified and synced + """ + + exp_memberof = (nof_users * grps_user) + ( + (nof_groups // grps_user) * (ngrps_user // nof_depth) * (nof_depth * (nof_depth + 1)) // 2) + log.info('Create nested ldif file, import it and measure the time taken') + import_time = _nested_import_add_ldif(topo, nof_users, nof_groups, grps_user, ngrps_user, nof_depth, True) + log.info('Run fixup memberOf task and measure the time to complete the task') + fixup_time = _run_fixup_memberof(topo) + sync_memberof = _sync_memberof_attrs(topo, exp_memberof) + total_time = import_time + fixup_time + sync_memberof + log.info('Time for import-{}secs, fixup task-{}secs, total time for memberOf sync: {}secs'.format(import_time, + fixup_time, + total_time)) + + log.info('Add {} users to existing nested groups at all depth level'.format(nof_groups)) + log.info('Add one user to each groups at different nest levels') + start = time.time() + for usr in range(nof_groups): + usrrdn = 'newcliusr{}'.format(usr) + userdn = 'uid={},ou=people,{}'.format(usrrdn, SUFFIX) + groupdn = 'cn=group{},ou=groups,{}'.format(usr, SUFFIX) + try: + topo.standalone.add_s(Entry((userdn, { + 'objectclass': 'top person inetUser inetOrgperson'.split(), + 'cn': usrrdn, + 'sn': usrrdn, + 'userpassword': 'Secret123'}))) + except ldap.LDAPError as e: + log.error('Failed to add {} user: error {}'.format(userdn, e.message['desc'])) + raise + try: + topo.standalone.modify_s(groupdn, [(ldap.MOD_ADD, 'member', userdn)]) + except ldap.LDAPError as e: + log.error('Error-{}: Failed to add user to group'.format(e.message['desc'])) + assert False + end = time.time() + cmd_time = int(end - start) + + exp_memberof = (nof_users * grps_user) + nof_groups + ( + (nof_groups // grps_user) * (ngrps_user // nof_depth) * (nof_depth * (nof_depth + 1))) + log.info('Check the total number of memberOf entries created for users and groups') + sync_memberof = _sync_memberof_attrs(topo, exp_memberof) + total_time = cmd_time + sync_memberof + log.info('Time taken add new members to existing nested groups + memberOf sync: {} secs'.format(total_time)) + + +@pytest.mark.parametrize("nof_users, nof_groups, grps_user, ngrps_user, nof_depth", + [(20000, 200, 20, 10, 5), (50000, 500, 50, 10, 10), (100000, 1000, 100, 20, 20)]) +def test_del_nestgrp(topo, memberof_setup, nof_users, nof_groups, grps_user, ngrps_user, nof_depth): + """Import bulk entries, delete nested groups at N depth and measure the time taken + + :id: d3d82ac5-d968-4cd6-a268-d380fc9fd51b + :feature: MemberOf Plugin + :setup: Standalone instance, memberOf plugin enabled + :steps: 1. Import bulk users and groups with nested level N. + 2. Run fixup memberOf task to create memberOf attributes + 3. Delete nested groups at nested level N + 4. Check memberOf attributes deleted for users and groups + 5. Compare the actual memberOf attributes with the expected + 6. Measure the time taken to sync memberOf attributes + :expectedresults: MemberOf attributes should be deleted and synced + """ + + exp_memberof = (nof_users * grps_user) + ( + (nof_groups // grps_user) * (ngrps_user // nof_depth) * (nof_depth * (nof_depth + 1)) // 2) + log.info('Create nested ldif file, import it and measure the time taken') + import_time = _nested_import_add_ldif(topo, nof_users, nof_groups, grps_user, ngrps_user, nof_depth, True) + log.info('Run fixup memberOf task and measure the time to complete the task') + fixup_time = _run_fixup_memberof(topo) + sync_memberof = _sync_memberof_attrs(topo, exp_memberof) + total_time = import_time + fixup_time + sync_memberof + log.info('Time taken to complete add users + memberOf sync: {} secs'.format(total_time)) + + log.info('Delete {} groups from nested groups at depth level-{}'.format(nof_depth, nof_depth)) + start = time.time() + for nos in range(nof_depth, nof_groups, grps_user): + groupdn = 'cn=group{},ou=groups,{}'.format(nos, SUFFIX) + try: + topo.standalone.delete_s(groupdn) + except ldap.LDAPError as e: + log.error('Error-{}: Failed to delete group'.format(e.message['desc'])) + assert False + end = time.time() + cmd_time = int(end - start) + + exp_memberof = exp_memberof - (nof_users + (nof_depth * (nof_groups // grps_user))) + log.info('Check memberOf attributes after deleting groups at depth-{}'.format(nof_depth)) + sync_memberof = _sync_memberof_attrs(topo, exp_memberof) + total_time = cmd_time + sync_memberof + log.info('Time taken to delete and sync memberOf attributes: {}secs'.format(total_time)) + + +if __name__ == '__main__': + # Run isolated + # -s for DEBUG mode + CURRENT_FILE = os.path.realpath(__file__) + pytest.main("-s {}".format(CURRENT_FILE)) diff --git a/dirsrvtests/tests/perf/search_performance_test.py b/dirsrvtests/tests/perf/search_performance_test.py new file mode 100644 index 0000000..bad54f4 --- /dev/null +++ b/dirsrvtests/tests/perf/search_performance_test.py @@ -0,0 +1,161 @@ +# --- BEGIN COPYRIGHT BLOCK --- +# Copyright (C) 2018 Red Hat, Inc. +# Copyright (C) 2019 William Brown +# All rights reserved. +# +# License: GPL (version 3 or any later version). +# See LICENSE for details. +# --- END COPYRIGHT BLOCK --- +# + +# Performance tests look different to others, they require some extra +# environmental settings. + +import ldap +import os +from lib389 import DirSrv +from lib389._constants import DEFAULT_SUFFIX + +from lib389.topologies import topology_st as topology + +from lib389.idm.domain import Domain +from lib389.idm.group import Groups +from lib389.idm.user import nsUserAccounts +from lib389.backend import Backends + +from lib389.ldclt import Ldclt +import time + +# We want to write a CSV such as: +# category,1 thread,4 thread,8 thread,16 thread +# testcategory,500,800,1000,2000 +# testcategory2,500,800,1000,2000 +TEST_MARKER = 'configured: search_performance_test.py' +# GROUP_MAX = 4000 +# USER_MAX = 6000 + +GROUP_MAX = 4000 +USER_MAX = 6000 + +TARGET_HOST = os.environ.get('PERF_TARGET_HOST', 'localhost') +TARGET_PORT = os.environ.get('PERF_TARGET_PORT', '389') + +def assert_data_present(inst): + # Do we have the backend marker? + d = Domain(inst, DEFAULT_SUFFIX) + try: + desc = d.get_attr_val_utf8('description') + if desc == TEST_MARKER: + return + except: + # Just reset everything. + pass + # Reset the backends + bes = Backends(inst) + try: + be = bes.get(DEFAULT_SUFFIX) + be.delete() + except: + pass + + be = bes.create(properties={ + 'nsslapd-suffix': DEFAULT_SUFFIX, + 'cn': 'userRoot', + }) + be.create_sample_entries('001004002') + + # Load our data + # We can't use dbgen as that relies on local access :( + + # Add 40,000 groups + groups = Groups(inst, DEFAULT_SUFFIX) + for i in range(1,GROUP_MAX): + rdn = 'group_{0:07d}'.format(i) + groups.create(properties={ + 'cn': rdn, + }) + + # Add 60,000 users + users = nsUserAccounts(inst, DEFAULT_SUFFIX) + for i in range(1,USER_MAX): + rdn = 'user_{0:07d}'.format(i) + users.create(properties={ + 'uid': rdn, + 'cn': rdn, + 'displayName': rdn, + 'uidNumber' : '%s' % i, + 'gidNumber' : '%s' % i, + 'homeDirectory' : '/home/%s' % rdn, + 'userPassword': rdn, + }) + + # Add the marker + d.replace('description', TEST_MARKER) + # Done! + +# Single uid +# 1000 uid +# 4000 uid +# 5000 uid +# 10,000 uid + +# & of single uid +# & of two 1000 uid sets +# & of two 4000 uid sets +# & of two 5000 uid sets +# & of two 10,000 uid sets + +# | of single uid +# | of two 1000 uid sets +# | of two 4000 uid sets +# | of two 5000 uid sets +# | of two 10,000 uid sets + +# & of user and group + +# | of user and group + +def _do_search_performance(inst, thread_count): + # Configure thread count + # Restart + print("Configuring with %s threads ..." % thread_count) + time.sleep(1) + inst.config.set('nsslapd-threadnumber', str(thread_count)) + inst.restart() + ld = Ldclt(inst) + out = ld.search_loadtest(DEFAULT_SUFFIX, "(uid=user_XXXXXXX)", min=1, max=USER_MAX) + return out + +# Need a check here +def test_user_search_performance(): + inst = DirSrv(verbose=True) + inst.remote_simple_allocate( + f"ldaps://{TARGET_HOST}", + password="password" + ) + # Need a better way to set this. + inst.host = TARGET_HOST + inst.port = TARGET_PORT + inst.open(reqcert=ldap.OPT_X_TLS_NEVER) + assert_data_present(inst) + r1 = _do_search_performance(inst, 1) + # r2 = _do_search_performance(inst, 4) + # r3 = _do_search_performance(inst, 6) + # r4 = _do_search_performance(inst, 8) + # r5 = _do_search_performance(inst, 12) + r6 = _do_search_performance(inst, 16) + # print("category,t1,t4,t6,t8,t12,t16") + # print("search,%s,%s,%s,%s,%s,%s" % (r1, r2, r3, r4, r5, r6)) + +def test_group_search_performance(): + pass + +## TODO +# Tweak cache levels +# turbo mode +# ldclt threads = 2x server? +# add perf logs to each test + + + + diff --git a/dirsrvtests/tests/stress/README b/dirsrvtests/tests/stress/README new file mode 100644 index 0000000..460e438 --- /dev/null +++ b/dirsrvtests/tests/stress/README @@ -0,0 +1,13 @@ +README for "Stress" Tests + +Reliablity Tests +============================== + +A generic high load, long running tests + +reliab7_5_test.py +------------------------------ + +This script is a light-weight version of the legacy TET stress test called "Reliabilty 15". This test consists of two MMR Suppliers, and a 5000 entry database. The test starts off with two threads doing unindexed searchesi(1 for each supplier). These do not exit untl the entire test completes. Then while the unindexed searches are going on, the test performs a set of adds, mods, deletes, and modrdns on each supplier at the same time. It performs this set of operations 1000 times. The main goal of this script is to test stablilty, replication convergence, and memory growth/fragmentation. + +Known issue: the server can deadlock in the libdb4 code while performing modrdns(under investigation via https://fedorahosted.org/389/ticket/48166) diff --git a/dirsrvtests/tests/stress/__init__.py b/dirsrvtests/tests/stress/__init__.py new file mode 100644 index 0000000..40a96af --- /dev/null +++ b/dirsrvtests/tests/stress/__init__.py @@ -0,0 +1 @@ +# -*- coding: utf-8 -*- diff --git a/dirsrvtests/tests/stress/cos/cos_scale_template_test.py b/dirsrvtests/tests/stress/cos/cos_scale_template_test.py new file mode 100644 index 0000000..352ad0a --- /dev/null +++ b/dirsrvtests/tests/stress/cos/cos_scale_template_test.py @@ -0,0 +1,150 @@ +# --- BEGIN COPYRIGHT BLOCK --- +# Copyright (C) 2017 Red Hat, Inc. +# All rights reserved. +# +# License: GPL (version 3 or any later version). +# See LICENSE for details. +# --- END COPYRIGHT BLOCK --- +# + +import pytest + +from lib389.topologies import topology_st + +from lib389.plugins import ClassOfServicePlugin +from lib389.cos import CosIndirectDefinitions, CosTemplates, CosTemplate +from lib389.idm.user import UserAccounts, TEST_USER_PROPERTIES +from lib389.idm.organizationalunit import OrganizationalUnits + +from lib389._constants import DEFAULT_SUFFIX + +import time + +pytestmark = pytest.mark.tier3 + +# Given this should complete is about 0.005, this is generous. +# For the final test with 20 templates, about 0.02 is an acceptable time. +THRESHOLD = 0.05 + +class OUCosTemplate(CosTemplate): + def __init__(self, instance, dn=None): + """Create a OU specific cos template to replicate a specific user setup. + This template provides ou attrs onto the target entry. + + :param instance: A dirsrv instance + :type instance: DirSrv + :param dn: The dn of the template + :type dn: str + """ + super(OUCosTemplate, self).__init__(instance, dn) + self._rdn_attribute = 'ou' + self._must_attributes = ['ou'] + self._create_objectclasses = [ + 'top', + 'cosTemplate', + 'organizationalUnit', + ] + +class OUCosTemplates(CosTemplates): + def __init__(self, instance, basedn, rdn=None): + """Create an OU specific cos templates to replicate a specific use setup. + This costemplates object allows access to the OUCosTemplate types. + + :param instance: A dirsrv instance + :type instance: DirSrv + :param basedn: The basedn of the templates + :type basedn: str + :param rdn: The rdn of the templates + :type rdn: str + """ + super(OUCosTemplates, self).__init__(instance, basedn, rdn) + self._objectclasses = [ + 'cosTemplate', + 'organizationalUnit', + ] + self._filterattrs = ['ou'] + self._childobject = OUCosTemplate + +def test_indirect_template_scale(topology_st): + """Test that cos templates can be added at a reasonable scale + + :id: 7cbcdf22-1f9c-4222-9e76-685fe374fc20 + :steps: + 1. Enable COS plugin + 2. Create the test user + 3. Add an indirect cos template + 4. Add a cos template + 5. Add the user to the cos template and assert it works. + 6. Add 25,000 templates to the database + 7. Search the user. It should not exceed THRESHOLD. + :expectedresults: + 1. It is enabled. + 2. It is created. + 3. Is is created. + 4. It is created. + 5. It is valid. + 6. They are created. + 7. It is fast. + """ + + cos_plugin = ClassOfServicePlugin(topology_st.standalone) + cos_plugin.enable() + + topology_st.standalone.restart() + + # Now create, the indirect specifier, and a user to template onto. + users = UserAccounts(topology_st.standalone, DEFAULT_SUFFIX) + user = users.create(properties=TEST_USER_PROPERTIES) + + cos_inds = CosIndirectDefinitions(topology_st.standalone, DEFAULT_SUFFIX) + cos_ind = cos_inds.create(properties={ + 'cn' : 'cosIndirectDef', + 'cosIndirectSpecifier': 'seeAlso', + 'cosAttribute': [ + 'ou merge-schemes', + 'description merge-schemes', + 'postalCode merge-schemes', + ], + }) + + ous = OrganizationalUnits(topology_st.standalone, DEFAULT_SUFFIX) + ou_temp = ous.create(properties={'ou': 'templates'}) + cos_temps = OUCosTemplates(topology_st.standalone, ou_temp.dn) + + cos_temp_u = cos_temps.create(properties={ + 'ou' : 'ou_temp_u', + 'description' : 'desc_temp_u', + 'postalCode': '0' + }) + # Edit the user to add the seeAlso ... + user.set('seeAlso', cos_temp_u.dn) + + # Now create 25,0000 templates, they *don't* need to apply to the user though! + for i in range(1, 25001): + cos_temp_u = cos_temps.create(properties={ + 'ou' : 'ou_temp_%s' % i, + 'description' : 'desc_temp_%s' % i, + 'postalCode': '%s' % i + }) + + if i % 500 == 0: + start_time = time.monotonic() + u_search = users.get('testuser') + attrs = u_search.get_attr_vals_utf8('postalCode') + end_time = time.monotonic() + diff_time = end_time - start_time + assert diff_time < THRESHOLD + + if i == 10000: + # Now add our user to this template also. + user.add('seeAlso', cos_temp_u.dn) + + start_time = time.monotonic() + attrs_after = u_search.get_attr_vals_utf8('postalCode') + end_time = time.monotonic() + diff_time = end_time - start_time + assert(set(attrs) < set(attrs_after)) + assert diff_time < THRESHOLD + + + diff --git a/dirsrvtests/tests/stress/reliabilty/__init__.py b/dirsrvtests/tests/stress/reliabilty/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/dirsrvtests/tests/stress/reliabilty/reliab_7_5_test.py b/dirsrvtests/tests/stress/reliabilty/reliab_7_5_test.py new file mode 100644 index 0000000..0693c6a --- /dev/null +++ b/dirsrvtests/tests/stress/reliabilty/reliab_7_5_test.py @@ -0,0 +1,576 @@ +# --- BEGIN COPYRIGHT BLOCK --- +# Copyright (C) 2016 Red Hat, Inc. +# All rights reserved. +# +# License: GPL (version 3 or any later version). +# See LICENSE for details. +# --- END COPYRIGHT BLOCK --- + +import sys +import time +import ldap +import logging +import pytest +import threading +import random +from lib389 import DirSrv, Entry +from lib389._constants import * +from lib389.properties import * +from lib389.tasks import * +from lib389.utils import * + +from lib389.idm.directorymanager import DirectoryManager + +pytestmark = pytest.mark.tier3 + +logging.getLogger(__name__).setLevel(logging.DEBUG) +formatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s' + + ' - %(message)s') +handler = logging.StreamHandler() +handler.setFormatter(formatter) +log = logging.getLogger(__name__) +log.addHandler(handler) + +installation1_prefix = None +NUM_USERS = 5000 +MAX_PASSES = 1000 +CHECK_CONVERGENCE = True +ENABLE_VALGRIND = False +RUNNING = True + +DEBUGGING = os.getenv('DEBUGGING', default=False) + +class TopologyReplication(object): + def __init__(self, supplier1, supplier2): + supplier1.open() + self.supplier1 = supplier1 + supplier2.open() + self.supplier2 = supplier2 + + +@pytest.fixture(scope="module") +def topology(request): + global installation1_prefix + if installation1_prefix: + args_instance[SER_DEPLOYED_DIR] = installation1_prefix + + # Creating supplier 1... + supplier1 = DirSrv(verbose=DEBUGGING) + args_instance[SER_HOST] = HOST_SUPPLIER_1 + args_instance[SER_PORT] = PORT_SUPPLIER_1 + args_instance[SER_SECURE_PORT] = SECUREPORT_SUPPLIER_1 + args_instance[SER_SERVERID_PROP] = SERVERID_SUPPLIER_1 + args_instance[SER_CREATION_SUFFIX] = DEFAULT_SUFFIX + args_supplier = args_instance.copy() + supplier1.allocate(args_supplier) + instance_supplier1 = supplier1.exists() + if instance_supplier1: + supplier1.delete() + supplier1.create() + supplier1.open() + supplier1.replica.enableReplication(suffix=SUFFIX, role=ReplicaRole.SUPPLIER, + replicaId=REPLICAID_SUPPLIER_1) + + # Creating supplier 2... + supplier2 = DirSrv(verbose=DEBUGGING) + args_instance[SER_HOST] = HOST_SUPPLIER_2 + args_instance[SER_PORT] = PORT_SUPPLIER_2 + args_instance[SER_SECURE_PORT] = SECUREPORT_SUPPLIER_2 + args_instance[SER_SERVERID_PROP] = SERVERID_SUPPLIER_2 + args_instance[SER_CREATION_SUFFIX] = DEFAULT_SUFFIX + args_supplier = args_instance.copy() + supplier2.allocate(args_supplier) + instance_supplier2 = supplier2.exists() + if instance_supplier2: + supplier2.delete() + supplier2.create() + supplier2.open() + supplier2.replica.enableReplication(suffix=SUFFIX, role=ReplicaRole.SUPPLIER, + replicaId=REPLICAID_SUPPLIER_2) + + # + # Create all the agreements + # + # Creating agreement from supplier 1 to supplier 2 + properties = {RA_NAME: r'meTo_$host:$port', + RA_BINDDN: defaultProperties[REPLICATION_BIND_DN], + RA_BINDPW: defaultProperties[REPLICATION_BIND_PW], + RA_METHOD: defaultProperties[REPLICATION_BIND_METHOD], + RA_TRANSPORT_PROT: defaultProperties[REPLICATION_TRANSPORT]} + m1_m2_agmt = supplier1.agreement.create(suffix=SUFFIX, host=supplier2.host, + port=supplier2.port, + properties=properties) + if not m1_m2_agmt: + log.fatal("Fail to create a supplier -> supplier replica agreement") + sys.exit(1) + log.debug("%s created" % m1_m2_agmt) + + # Creating agreement from supplier 2 to supplier 1 + properties = {RA_NAME: r'meTo_$host:$port', + RA_BINDDN: defaultProperties[REPLICATION_BIND_DN], + RA_BINDPW: defaultProperties[REPLICATION_BIND_PW], + RA_METHOD: defaultProperties[REPLICATION_BIND_METHOD], + RA_TRANSPORT_PROT: defaultProperties[REPLICATION_TRANSPORT]} + m2_m1_agmt = supplier2.agreement.create(suffix=SUFFIX, host=supplier1.host, + port=supplier1.port, + properties=properties) + if not m2_m1_agmt: + log.fatal("Fail to create a supplier -> supplier replica agreement") + sys.exit(1) + log.debug("%s created" % m2_m1_agmt) + + # Allow the replicas to get situated with the new agreements... + time.sleep(5) + + # + # Import tests entries into supplier1 before we initialize supplier2 + # + ldif_dir = supplier1.get_ldif_dir() + + import_ldif = ldif_dir + '/rel7.5-entries.ldif' + + # First generate an ldif + try: + ldif = open(import_ldif, 'w') + except IOError as e: + log.fatal('Failed to create test ldif, error: %s - %s' % + (e.errno, e.strerror)) + assert False + + # Create the root node + ldif.write('dn: ' + DEFAULT_SUFFIX + '\n') + ldif.write('objectclass: top\n') + ldif.write('objectclass: domain\n') + ldif.write('dc: example\n') + ldif.write('\n') + + # Create the entries + idx = 0 + while idx < NUM_USERS: + count = str(idx) + ldif.write('dn: uid=supplier1_entry' + count + ',' + + DEFAULT_SUFFIX + '\n') + ldif.write('objectclass: top\n') + ldif.write('objectclass: person\n') + ldif.write('objectclass: inetorgperson\n') + ldif.write('objectclass: organizationalperson\n') + ldif.write('uid: supplier1_entry' + count + '\n') + ldif.write('cn: supplier1 entry' + count + '\n') + ldif.write('givenname: supplier1 ' + count + '\n') + ldif.write('sn: entry ' + count + '\n') + ldif.write('userpassword: supplier1_entry' + count + '\n') + ldif.write('description: ' + 'a' * random.randint(1, 1000) + '\n') + ldif.write('\n') + + ldif.write('dn: uid=supplier2_entry' + count + ',' + + DEFAULT_SUFFIX + '\n') + ldif.write('objectclass: top\n') + ldif.write('objectclass: person\n') + ldif.write('objectclass: inetorgperson\n') + ldif.write('objectclass: organizationalperson\n') + ldif.write('uid: supplier2_entry' + count + '\n') + ldif.write('cn: supplier2 entry' + count + '\n') + ldif.write('givenname: supplier2 ' + count + '\n') + ldif.write('sn: entry ' + count + '\n') + ldif.write('userpassword: supplier2_entry' + count + '\n') + ldif.write('description: ' + 'a' * random.randint(1, 1000) + '\n') + ldif.write('\n') + idx += 1 + + ldif.close() + + # Now import it + try: + supplier1.tasks.importLDIF(suffix=DEFAULT_SUFFIX, input_file=import_ldif, + args={TASK_WAIT: True}) + except ValueError: + log.fatal('test_reliab_7.5: Online import failed') + assert False + + # + # Initialize all the agreements + # + supplier1.agreement.init(SUFFIX, HOST_SUPPLIER_2, PORT_SUPPLIER_2) + supplier1.waitForReplInit(m1_m2_agmt) + + # Check replication is working... + if supplier1.testReplication(DEFAULT_SUFFIX, supplier2): + log.info('Replication is working.') + else: + log.fatal('Replication is not working.') + assert False + + # Clear out the tmp dir + supplier1.clearTmpDir(__file__) + + # Delete each instance in the end + def fin(): + supplier1.delete() + supplier2.delete() + if ENABLE_VALGRIND: + sbin_dir = get_sbin_dir(prefix=supplier1.prefix) + valgrind_disable(sbin_dir) + request.addfinalizer(fin) + + return TopologyReplication(supplier1, supplier2) + + +class AddDelUsers(threading.Thread): + def __init__(self, inst, supplierid): + threading.Thread.__init__(self) + self.daemon = True + self.inst = inst + self.id = supplierid + + def run(self): + # Add 5000 entries + idx = 0 + RDN = 'uid=add_del_supplier_' + self.id + '-' + + conn = DirectoryManager(self.inst).bind() + + while idx < NUM_USERS: + USER_DN = RDN + str(idx) + ',' + DEFAULT_SUFFIX + try: + conn.add_s(Entry((USER_DN, {'objectclass': + 'top extensibleObject'.split(), + 'uid': 'user' + str(idx), + 'cn': 'g' * random.randint(1, 500) + }))) + except ldap.LDAPError as e: + log.fatal('Add users to supplier ' + self.id + ' failed (' + + USER_DN + ') error: ' + e.message['desc']) + idx += 1 + conn.close() + + # Delete 5000 entries + conn = DirectoryManager(self.inst).bind() + idx = 0 + while idx < NUM_USERS: + USER_DN = RDN + str(idx) + ',' + DEFAULT_SUFFIX + try: + conn.delete_s(USER_DN) + except ldap.LDAPError as e: + log.fatal('Failed to delete (' + USER_DN + ') on supplier ' + + self.id + ': error ' + e.message['desc']) + idx += 1 + conn.close() + + +class ModUsers(threading.Thread): + # Do mods and modrdns + def __init__(self, inst, supplierid): + threading.Thread.__init__(self) + self.daemon = True + self.inst = inst + self.id = supplierid + + def run(self): + # Mod existing entries + conn = DirectoryManager(self.inst).bind() + idx = 0 + while idx < NUM_USERS: + USER_DN = ('uid=supplier' + self.id + '_entry' + str(idx) + ',' + + DEFAULT_SUFFIX) + try: + conn.modify(USER_DN, [(ldap.MOD_REPLACE, + 'givenname', + 'new givenname supplier1-' + str(idx))]) + except ldap.LDAPError as e: + log.fatal('Failed to modify (' + USER_DN + ') on supplier ' + + self.id + ': error ' + e.message['desc']) + idx += 1 + conn.close() + + # Modrdn existing entries + conn = DirectoryManager(self.inst).bind() + idx = 0 + while idx < NUM_USERS: + USER_DN = ('uid=supplier' + self.id + '_entry' + str(idx) + ',' + + DEFAULT_SUFFIX) + NEW_RDN = 'cn=supplier' + self.id + '_entry' + str(idx) + try: + conn.rename_s(USER_DN, NEW_RDN, delold=1) + except ldap.LDAPError as e: + log.error('Failed to modrdn (' + USER_DN + ') on supplier ' + + self.id + ': error ' + e.message['desc']) + idx += 1 + conn.close() + + # Undo modrdn to we can rerun this test + conn = DirectoryManager(self.inst).bind() + idx = 0 + while idx < NUM_USERS: + USER_DN = ('cn=supplier' + self.id + '_entry' + str(idx) + ',' + + DEFAULT_SUFFIX) + NEW_RDN = 'uid=supplier' + self.id + '_entry' + str(idx) + try: + conn.rename_s(USER_DN, NEW_RDN, delold=1) + except ldap.LDAPError as e: + log.error('Failed to modrdn (' + USER_DN + ') on supplier ' + + self.id + ': error ' + e.message['desc']) + idx += 1 + conn.close() + + +class DoSearches(threading.Thread): + # Search a supplier + def __init__(self, inst, supplierid): + threading.Thread.__init__(self) + self.daemon = True + self.inst = inst + self.id = supplierid + + def run(self): + # Equality + conn = DirectoryManager(self.inst).bind() + idx = 0 + while idx < NUM_USERS: + search_filter = ('(|(uid=supplier' + self.id + '_entry' + str(idx) + + ')(cn=supplier' + self.id + '_entry' + str(idx) + + '))') + try: + conn.search(DEFAULT_SUFFIX, ldap.SCOPE_SUBTREE, search_filter) + except ldap.LDAPError as e: + log.fatal('Search Users: Search failed (%s): %s' % + (search_filter, e.message['desc'])) + conn.close() + return + + idx += 1 + conn.close() + + # Substring + conn = DirectoryManager(self.inst).bind() + idx = 0 + while idx < NUM_USERS: + search_filter = ('(|(uid=supplier' + self.id + '_entry' + str(idx) + + '*)(cn=supplier' + self.id + '_entry' + str(idx) + + '*))') + try: + conn.search(DEFAULT_SUFFIX, ldap.SCOPE_SUBTREE, search_filter) + except ldap.LDAPError as e: + log.fatal('Search Users: Search failed (%s): %s' % + (search_filter, e.message['desc'])) + conn.close() + return + + idx += 1 + conn.close() + + +class DoFullSearches(threading.Thread): + # Search a supplier + def __init__(self, inst): + threading.Thread.__init__(self) + self.daemon = True + self.inst = inst + + def run(self): + global RUNNING + conn = DirectoryManager(self.inst).bind() + while RUNNING: + time.sleep(2) + try: + conn.search_s(DEFAULT_SUFFIX, + ldap.SCOPE_SUBTREE, + 'objectclass=top') + except ldap.LDAPError as e: + log.fatal('Full Search Users: Search failed (%s): %s' % + ('objectclass=*', e.message['desc'])) + conn.close() + assert False + + conn.close() + + +def test_reliab7_5_init(topology): + ''' + Reduce entry cache - to increase the cache churn + + Then process "reliability 15" type tests + ''' + + BACKEND_DN = 'cn=userroot,cn=ldbm database,cn=plugins,cn=config' + + # Update supplier 1 + try: + topology.supplier1.modify_s(BACKEND_DN, [(ldap.MOD_REPLACE, + 'nsslapd-cachememsize', + '512000'), + (ldap.MOD_REPLACE, + 'nsslapd-cachesize', + '500')]) + except ldap.LDAPError as e: + log.fatal('Failed to set cache settings: error ' + e.message['desc']) + assert False + + # Update supplier 2 + try: + topology.supplier2.modify_s(BACKEND_DN, [(ldap.MOD_REPLACE, + 'nsslapd-cachememsize', + '512000'), + (ldap.MOD_REPLACE, + 'nsslapd-cachesize', + '500')]) + except ldap.LDAPError as e: + log.fatal('Failed to set cache settings: error ' + e.message['desc']) + assert False + + # Restart the suppliers to pick up the new cache settings + topology.supplier1.stop(timeout=10) + topology.supplier2.stop(timeout=10) + + # This is the time to enable valgrind (if enabled) + if ENABLE_VALGRIND: + sbin_dir = get_sbin_dir(prefix=topology.supplier1.prefix) + valgrind_enable(sbin_dir) + + topology.supplier1.start(timeout=30) + topology.supplier2.start(timeout=30) + + +def test_reliab7_5_run(topology): + ''' + Starting issuing adds, deletes, mods, modrdns, and searches + ''' + global RUNNING + count = 1 + RUNNING = True + + # Start some searches to run through the entire stress test + fullSearch1 = DoFullSearches(topology.supplier1) + fullSearch1.start() + fullSearch2 = DoFullSearches(topology.supplier2) + fullSearch2.start() + + while count <= MAX_PASSES: + log.info('################## Reliabilty 7.5 Pass: %d' % count) + + # Supplier 1 + add_del_users1 = AddDelUsers(topology.supplier1, '1') + add_del_users1.start() + mod_users1 = ModUsers(topology.supplier1, '1') + mod_users1.start() + search1 = DoSearches(topology.supplier1, '1') + search1.start() + + # Supplier 2 + add_del_users2 = AddDelUsers(topology.supplier2, '2') + add_del_users2.start() + mod_users2 = ModUsers(topology.supplier2, '2') + mod_users2.start() + search2 = DoSearches(topology.supplier2, '2') + search2.start() + + # Search the suppliers + search3 = DoSearches(topology.supplier1, '1') + search3.start() + search4 = DoSearches(topology.supplier2, '2') + search4.start() + + # Wait for threads to finish + log.info('################## Waiting for threads to finish...') + add_del_users1.join() + mod_users1.join() + add_del_users2.join() + mod_users2.join() + log.info('################## Update threads finished.') + search1.join() + search2.join() + search3.join() + search4.join() + log.info('################## All threads finished.') + + # Allow some time for replication to catch up before firing + # off the next round of updates + time.sleep(5) + count += 1 + + # + # Wait for replication to converge + # + if CHECK_CONVERGENCE: + # Add an entry to each supplier, and wait for it to replicate + SUPPLIER1_DN = 'uid=rel7.5-supplier1,' + DEFAULT_SUFFIX + SUPPLIER2_DN = 'uid=rel7.5-supplier2,' + DEFAULT_SUFFIX + + # Supplier 1 + try: + topology.supplier1.add_s(Entry((SUPPLIER1_DN, {'objectclass': + ['top', + 'extensibleObject'], + 'sn': '1', + 'cn': 'user 1', + 'uid': 'rel7.5-supplier1', + 'userpassword': + PASSWORD}))) + except ldap.LDAPError as e: + log.fatal('Failed to add replication test entry ' + SUPPLIER1_DN + + ': error ' + e.message['desc']) + assert False + + log.info('################## Waiting for supplier 2 to converge...') + + while True: + entry = None + try: + entry = topology.supplier2.search_s(SUPPLIER1_DN, + ldap.SCOPE_BASE, + 'objectclass=*') + except ldap.NO_SUCH_OBJECT: + pass + except ldap.LDAPError as e: + log.fatal('Search Users: Search failed (%s): %s' % + (SUPPLIER1_DN, e.message['desc'])) + assert False + if entry: + break + time.sleep(5) + + log.info('################## Supplier 2 converged.') + + # Supplier 2 + try: + topology.supplier2.add_s( + Entry((SUPPLIER2_DN, {'objectclass': ['top', + 'extensibleObject'], + 'sn': '1', + 'cn': 'user 1', + 'uid': 'rel7.5-supplier2', + 'userpassword': PASSWORD}))) + except ldap.LDAPError as e: + log.fatal('Failed to add replication test entry ' + SUPPLIER1_DN + + ': error ' + e.message['desc']) + assert False + + log.info('################## Waiting for supplier 1 to converge...') + while True: + entry = None + try: + entry = topology.supplier1.search_s(SUPPLIER2_DN, + ldap.SCOPE_BASE, + 'objectclass=*') + except ldap.NO_SUCH_OBJECT: + pass + except ldap.LDAPError as e: + log.fatal('Search Users: Search failed (%s): %s' % + (SUPPLIER2_DN, e.message['desc'])) + assert False + if entry: + break + time.sleep(5) + + log.info('################## Supplier 1 converged.') + + # Stop the full searches + RUNNING = False + fullSearch1.join() + fullSearch2.join() + + +if __name__ == '__main__': + # Run isolated + # -s for DEBUG mode + CURRENT_FILE = os.path.realpath(__file__) + pytest.main("-s %s" % CURRENT_FILE) diff --git a/dirsrvtests/tests/stress/reliabilty/reliab_conn_test.py b/dirsrvtests/tests/stress/reliabilty/reliab_conn_test.py new file mode 100644 index 0000000..35e377f --- /dev/null +++ b/dirsrvtests/tests/stress/reliabilty/reliab_conn_test.py @@ -0,0 +1,235 @@ +# --- BEGIN COPYRIGHT BLOCK --- +# Copyright (C) 2022 Red Hat, Inc. +# All rights reserved. +# +# License: GPL (version 3 or any later version). +# See LICENSE for details. +# --- END COPYRIGHT BLOCK --- +# +import os +import sys +import time +import ldap +import logging +import pytest +import signal +import threading +from lib389.tools import DirSrvTools +from lib389._constants import * +from lib389.properties import * +from lib389.tasks import * +from lib389.utils import * +from lib389.idm.directorymanager import DirectoryManager +from lib389.idm.user import UserAccounts +from lib389.topologies import topology_st + +pytestmark = pytest.mark.tier3 + +logging.getLogger(__name__).setLevel(logging.DEBUG) +log = logging.getLogger(__name__) + +MAX_CONNS = 10000000 +MAX_THREADS = 20 +STOP = False +HOSTNAME = DirSrvTools.getLocalhost() +PORT = 389 +NUNC_STANS = False + + +def signalHandler(signal, frame): + """ + handle control-C cleanly + """ + global STOP + STOP = True + sys.exit(0) + + +def init(inst): + """Set the idle timeout, and add sample entries + """ + + inst.config.set('nsslapd-idletimeout', '5') + if NUNC_STANS: + inst.config.set('nsslapd-enable-nunc-stans', 'on') + inst.restart() + + users = UserAccounts(inst, DEFAULT_SUFFIX) + for idx in range(0, 9): + user = users.create_test_user(uid=str(idx), gid=str(idx)) + user.reset_password('password') + + +class BindOnlyConn(threading.Thread): + """This class opens and closes connections + """ + def __init__(self, inst): + """Initialize the thread class with the server instance info""" + threading.Thread.__init__(self) + self.daemon = True + self.inst = inst + + def run(self): + """Keep opening and closing connections""" + idx = 0 + err_count = 0 + global STOP + while idx < MAX_CONNS and not STOP: + try: + conn = DirectoryManager(self.inst).bind(connOnly=True) + conn.unbind_s() + time.sleep(.2) + err_count = 0 + except ldap.LDAPError as e: + err_count += 1 + if err_count > 3: + log.error('BindOnlyConn exiting thread: %s' % + (str(e))) + return + time.sleep(.4) + idx += 1 + + +class IdleConn(threading.Thread): + """This class opens and closes connections + """ + def __init__(self, inst): + """Initialize the thread class with the server instance info""" + threading.Thread.__init__(self) + self.daemon = True + self.inst = inst + + def run(self): + """Assume idleTimeout is set to less than 10 seconds + """ + idx = 0 + err_count = 0 + global STOP + while idx < (MAX_CONNS / 10) and not STOP: + try: + conn = self.inst.clone() + conn.simple_bind_s('uid=test_user_0,dc=example,dc=com', 'password') + conn.search_s('dc=example,dc=com', ldap.SCOPE_SUBTREE, + 'uid=*') + time.sleep(10) + conn.search_s('dc=example,dc=com', ldap.SCOPE_SUBTREE, + 'cn=*') + conn.unbind_s() + time.sleep(.2) + err_count = 0 + except ldap.LDAPError as e: + err_count += 1 + if err_count > 3: + log.error('IdleConn exiting thread: %s' % + (str(e))) + return + time.sleep(.4) + idx += 1 + + +class LongConn(threading.Thread): + """This class opens and closes connections to a specified server + """ + def __init__(self, inst): + """Initialize the thread class with the server instance info""" + threading.Thread.__init__(self) + self.daemon = True + self.inst = inst + + def run(self): + """Assume idleTimeout is set to less than 10 seconds + """ + idx = 0 + err_count = 0 + global STOP + while idx < MAX_CONNS and not STOP: + try: + conn = self.inst.clone() + conn.search_s('dc=example,dc=com', ldap.SCOPE_SUBTREE, + 'objectclass=*') + conn.search_s('dc=example,dc=com', ldap.SCOPE_SUBTREE, + 'uid=mark') + conn.search_s('dc=example,dc=com', ldap.SCOPE_SUBTREE, + 'cn=*') + conn.search_s('', ldap.SCOPE_BASE, 'objectclass=*') + conn.unbind_s() + time.sleep(.2) + err_count = 0 + except ldap.LDAPError as e: + err_count += 1 + if err_count > 3: + log.error('LongConn search exiting thread: %s' % + (str(e))) + return + time.sleep(.4) + idx += 1 + + +def test_connection_load(topology_st): + """Send the server a variety of connections using many threads: + - Open, Bind, Close + - Open, Bind, Search, wait to trigger idletimeout, Search, Close + - Open, Bind, Search, Search, Search, Close + """ + + # setup the control-C signal handler + signal.signal(signal.SIGINT, signalHandler) + + # Set the config and add sample entries + log.info('Initializing setup...') + init(topology_st.standalone) + + # + # Bind/Unbind Conn Threads + # + log.info('Launching Bind-Only Connection threads...') + threads = [] + idx = 0 + while idx < MAX_THREADS: + threads.append(BindOnlyConn(topology_st.standalone)) + idx += 1 + for thread in threads: + thread.start() + time.sleep(0.1) + + # + # Idle Conn Threads + # + log.info('Launching Idle Connection threads...') + idx = 0 + idle_threads = [] + while idx < MAX_THREADS: + idle_threads.append(IdleConn(topology_st.standalone)) + idx += 1 + for thread in idle_threads: + thread.start() + time.sleep(0.1) + + # + # Long Conn Threads + # + log.info('Launching Long Connection threads...') + idx = 0 + long_threads = [] + while idx < MAX_THREADS: + long_threads.append(LongConn(topology_st.standalone)) + idx += 1 + for thread in long_threads: + thread.start() + time.sleep(0.1) + + # + # Now wait for all the threads to complete + # + log.info('Waiting for threads to finish...') + while threading.active_count() > 0: + time.sleep(1) + + log.info('Done') + + +if __name__ == '__main__': + # Run isolated + # -s for DEBUG mode + CURRENT_FILE = os.path.realpath(__file__) + pytest.main("-s %s" % CURRENT_FILE) diff --git a/dirsrvtests/tests/stress/replication/mmr_01_4m-2h-4c_test.py b/dirsrvtests/tests/stress/replication/mmr_01_4m-2h-4c_test.py new file mode 100644 index 0000000..e6e959c --- /dev/null +++ b/dirsrvtests/tests/stress/replication/mmr_01_4m-2h-4c_test.py @@ -0,0 +1,979 @@ +# --- BEGIN COPYRIGHT BLOCK --- +# Copyright (C) 2022 Red Hat, Inc. +# All rights reserved. +# +# License: GPL (version 3 or any later version). +# See LICENSE for details. +# --- END COPYRIGHT BLOCK --- +# +import os +import sys +import time +import datetime +import ldap +import logging +import pytest +import threading +from lib389._constants import * +from lib389.properties import * +from lib389.tasks import * +from lib389.utils import * +from lib389.repltools import ReplTools + +pytestmark = pytest.mark.tier3 + +logging.getLogger(__name__).setLevel(logging.INFO) +log = logging.getLogger(__name__) + +DEBUGGING = False +ADD_DEL_COUNT = 5000 +MAX_LOOPS = 5 +TEST_CONVERGE_LATENCY = True +CONVERGENCE_TIMEOUT = '60' +supplier_list = [] +hub_list = [] +con_list = [] +TEST_START = time.time() + +LAST_DN_IDX = ADD_DEL_COUNT - 1 +LAST_DN_M1 = 'DEL dn="uid=supplier_1-%d,%s' % (LAST_DN_IDX, DEFAULT_SUFFIX) +LAST_DN_M2 = 'DEL dn="uid=supplier_2-%d,%s' % (LAST_DN_IDX, DEFAULT_SUFFIX) +LAST_DN_M3 = 'DEL dn="uid=supplier_3-%d,%s' % (LAST_DN_IDX, DEFAULT_SUFFIX) +LAST_DN_M4 = 'DEL dn="uid=supplier_4-%d,%s' % (LAST_DN_IDX, DEFAULT_SUFFIX) + + +class TopologyReplication(object): + """The Replication Topology Class""" + def __init__(self, supplier1, supplier2, supplier3, supplier4, hub1, hub2, + consumer1, consumer2, consumer3, consumer4): + """Init""" + supplier1.open() + self.supplier1 = supplier1 + supplier2.open() + self.supplier2 = supplier2 + supplier3.open() + self.supplier3 = supplier3 + supplier4.open() + self.supplier4 = supplier4 + hub1.open() + self.hub1 = hub1 + hub2.open() + self.hub2 = hub2 + consumer1.open() + self.consumer1 = consumer1 + consumer2.open() + self.consumer2 = consumer2 + consumer3.open() + self.consumer3 = consumer3 + consumer4.open() + self.consumer4 = consumer4 + supplier_list.append(supplier1.serverid) + supplier_list.append(supplier2.serverid) + supplier_list.append(supplier3.serverid) + supplier_list.append(supplier4.serverid) + hub_list.append(hub1.serverid) + hub_list.append(hub2.serverid) + con_list.append(consumer1.serverid) + con_list.append(consumer2.serverid) + con_list.append(consumer3.serverid) + con_list.append(consumer4.serverid) + + +@pytest.fixture(scope="module") +def topology(request): + """Create Replication Deployment""" + + # Creating supplier 1... + if DEBUGGING: + supplier1 = DirSrv(verbose=True) + else: + supplier1 = DirSrv(verbose=False) + args_instance[SER_HOST] = HOST_SUPPLIER_1 + args_instance[SER_PORT] = PORT_SUPPLIER_1 + args_instance[SER_SERVERID_PROP] = SERVERID_SUPPLIER_1 + args_instance[SER_CREATION_SUFFIX] = DEFAULT_SUFFIX + args_supplier = args_instance.copy() + supplier1.allocate(args_supplier) + instance_supplier1 = supplier1.exists() + if instance_supplier1: + supplier1.delete() + supplier1.create() + supplier1.open() + supplier1.replica.enableReplication(suffix=SUFFIX, role=ReplicaRole.SUPPLIER, + replicaId=REPLICAID_SUPPLIER_1) + + # Creating supplier 2... + if DEBUGGING: + supplier2 = DirSrv(verbose=True) + else: + supplier2 = DirSrv(verbose=False) + args_instance[SER_HOST] = HOST_SUPPLIER_2 + args_instance[SER_PORT] = PORT_SUPPLIER_2 + args_instance[SER_SERVERID_PROP] = SERVERID_SUPPLIER_2 + args_instance[SER_CREATION_SUFFIX] = DEFAULT_SUFFIX + args_supplier = args_instance.copy() + supplier2.allocate(args_supplier) + instance_supplier2 = supplier2.exists() + if instance_supplier2: + supplier2.delete() + supplier2.create() + supplier2.open() + supplier2.replica.enableReplication(suffix=SUFFIX, role=ReplicaRole.SUPPLIER, + replicaId=REPLICAID_SUPPLIER_2) + + # Creating supplier 3... + if DEBUGGING: + supplier3 = DirSrv(verbose=True) + else: + supplier3 = DirSrv(verbose=False) + args_instance[SER_HOST] = HOST_SUPPLIER_3 + args_instance[SER_PORT] = PORT_SUPPLIER_3 + args_instance[SER_SERVERID_PROP] = SERVERID_SUPPLIER_3 + args_instance[SER_CREATION_SUFFIX] = DEFAULT_SUFFIX + args_supplier = args_instance.copy() + supplier3.allocate(args_supplier) + instance_supplier3 = supplier3.exists() + if instance_supplier3: + supplier3.delete() + supplier3.create() + supplier3.open() + supplier3.replica.enableReplication(suffix=SUFFIX, role=ReplicaRole.SUPPLIER, + replicaId=REPLICAID_SUPPLIER_3) + + # Creating supplier 4... + if DEBUGGING: + supplier4 = DirSrv(verbose=True) + else: + supplier4 = DirSrv(verbose=False) + args_instance[SER_HOST] = HOST_SUPPLIER_4 + args_instance[SER_PORT] = PORT_SUPPLIER_4 + args_instance[SER_SERVERID_PROP] = SERVERID_SUPPLIER_4 + args_instance[SER_CREATION_SUFFIX] = DEFAULT_SUFFIX + args_supplier = args_instance.copy() + supplier4.allocate(args_supplier) + instance_supplier4 = supplier4.exists() + if instance_supplier4: + supplier4.delete() + supplier4.create() + supplier4.open() + supplier4.replica.enableReplication(suffix=SUFFIX, role=ReplicaRole.SUPPLIER, + replicaId=REPLICAID_SUPPLIER_4) + + # Creating hub 1... + if DEBUGGING: + hub1 = DirSrv(verbose=True) + else: + hub1 = DirSrv(verbose=False) + args_instance[SER_HOST] = HOST_HUB_1 + args_instance[SER_PORT] = PORT_HUB_1 + args_instance[SER_SERVERID_PROP] = SERVERID_HUB_1 + args_instance[SER_CREATION_SUFFIX] = DEFAULT_SUFFIX + args_hub = args_instance.copy() + hub1.allocate(args_hub) + instance_hub1 = hub1.exists() + if instance_hub1: + hub1.delete() + hub1.create() + hub1.open() + hub1.replica.enableReplication(suffix=SUFFIX, role=ReplicaRole.HUB, + replicaId=REPLICAID_HUB_1) + + # Creating hub 2... + if DEBUGGING: + hub2 = DirSrv(verbose=True) + else: + hub2 = DirSrv(verbose=False) + args_instance[SER_HOST] = HOST_HUB_2 + args_instance[SER_PORT] = PORT_HUB_2 + args_instance[SER_SERVERID_PROP] = SERVERID_HUB_2 + args_instance[SER_CREATION_SUFFIX] = DEFAULT_SUFFIX + args_hub = args_instance.copy() + hub2.allocate(args_hub) + instance_hub2 = hub2.exists() + if instance_hub2: + hub2.delete() + hub2.create() + hub2.open() + hub2.replica.enableReplication(suffix=SUFFIX, role=ReplicaRole.HUB, + replicaId=REPLICAID_HUB_2) + + # Creating consumer 1... + if DEBUGGING: + consumer1 = DirSrv(verbose=True) + else: + consumer1 = DirSrv(verbose=False) + args_instance[SER_HOST] = HOST_CONSUMER_1 + args_instance[SER_PORT] = PORT_CONSUMER_1 + args_instance[SER_SERVERID_PROP] = SERVERID_CONSUMER_1 + args_instance[SER_CREATION_SUFFIX] = DEFAULT_SUFFIX + args_consumer = args_instance.copy() + consumer1.allocate(args_consumer) + instance_consumer1 = consumer1.exists() + if instance_consumer1: + consumer1.delete() + consumer1.create() + consumer1.open() + consumer1.replica.enableReplication(suffix=SUFFIX, + role=ReplicaRole.CONSUMER, + replicaId=CONSUMER_REPLICAID) + + # Creating consumer 2... + if DEBUGGING: + consumer2 = DirSrv(verbose=True) + else: + consumer2 = DirSrv(verbose=False) + args_instance[SER_HOST] = HOST_CONSUMER_2 + args_instance[SER_PORT] = PORT_CONSUMER_2 + args_instance[SER_SERVERID_PROP] = SERVERID_CONSUMER_2 + args_instance[SER_CREATION_SUFFIX] = DEFAULT_SUFFIX + args_consumer = args_instance.copy() + consumer2.allocate(args_consumer) + instance_consumer2 = consumer2.exists() + if instance_consumer2: + consumer2.delete() + consumer2.create() + consumer2.open() + consumer2.replica.enableReplication(suffix=SUFFIX, + role=ReplicaRole.CONSUMER, + replicaId=CONSUMER_REPLICAID) + + # Creating consumer 3... + if DEBUGGING: + consumer3 = DirSrv(verbose=True) + else: + consumer3 = DirSrv(verbose=False) + args_instance[SER_HOST] = HOST_CONSUMER_3 + args_instance[SER_PORT] = PORT_CONSUMER_3 + args_instance[SER_SERVERID_PROP] = SERVERID_CONSUMER_3 + args_instance[SER_CREATION_SUFFIX] = DEFAULT_SUFFIX + args_consumer = args_instance.copy() + consumer3.allocate(args_consumer) + instance_consumer3 = consumer3.exists() + if instance_consumer3: + consumer3.delete() + consumer3.create() + consumer3.open() + consumer3.replica.enableReplication(suffix=SUFFIX, + role=ReplicaRole.CONSUMER, + replicaId=CONSUMER_REPLICAID) + + # Creating consumer 4... + if DEBUGGING: + consumer4 = DirSrv(verbose=True) + else: + consumer4 = DirSrv(verbose=False) + args_instance[SER_HOST] = HOST_CONSUMER_4 + args_instance[SER_PORT] = PORT_CONSUMER_4 + args_instance[SER_SERVERID_PROP] = SERVERID_CONSUMER_4 + args_instance[SER_CREATION_SUFFIX] = DEFAULT_SUFFIX + args_consumer = args_instance.copy() + consumer4.allocate(args_consumer) + instance_consumer4 = consumer4.exists() + if instance_consumer4: + consumer4.delete() + consumer4.create() + consumer4.open() + consumer4.replica.enableReplication(suffix=SUFFIX, + role=ReplicaRole.CONSUMER, + replicaId=CONSUMER_REPLICAID) + + # + # Create all the agreements + # + + # Creating agreement from supplier 1 to supplier 2 + properties = {RA_NAME: 'meTo_' + supplier2.host + ':' + str(supplier2.port), + RA_BINDDN: defaultProperties[REPLICATION_BIND_DN], + RA_BINDPW: defaultProperties[REPLICATION_BIND_PW], + RA_METHOD: defaultProperties[REPLICATION_BIND_METHOD], + RA_TRANSPORT_PROT: defaultProperties[REPLICATION_TRANSPORT]} + m1_m2_agmt = supplier1.agreement.create(suffix=SUFFIX, host=supplier2.host, + port=supplier2.port, + properties=properties) + if not m1_m2_agmt: + log.fatal("Fail to create a supplier -> supplier replica agreement") + sys.exit(1) + log.debug("%s created" % m1_m2_agmt) + + # Creating agreement from supplier 1 to supplier 3 + properties = {RA_NAME: 'meTo_' + supplier3.host + ':' + str(supplier3.port), + RA_BINDDN: defaultProperties[REPLICATION_BIND_DN], + RA_BINDPW: defaultProperties[REPLICATION_BIND_PW], + RA_METHOD: defaultProperties[REPLICATION_BIND_METHOD], + RA_TRANSPORT_PROT: defaultProperties[REPLICATION_TRANSPORT]} + m1_m3_agmt = supplier1.agreement.create(suffix=SUFFIX, host=supplier3.host, + port=supplier3.port, + properties=properties) + if not m1_m3_agmt: + log.fatal("Fail to create a supplier -> supplier replica agreement") + sys.exit(1) + log.debug("%s created" % m1_m3_agmt) + + # Creating agreement from supplier 1 to supplier 4 + properties = {RA_NAME: 'meTo_' + supplier4.host + ':' + str(supplier4.port), + RA_BINDDN: defaultProperties[REPLICATION_BIND_DN], + RA_BINDPW: defaultProperties[REPLICATION_BIND_PW], + RA_METHOD: defaultProperties[REPLICATION_BIND_METHOD], + RA_TRANSPORT_PROT: defaultProperties[REPLICATION_TRANSPORT]} + m1_m4_agmt = supplier1.agreement.create(suffix=SUFFIX, host=supplier4.host, + port=supplier4.port, + properties=properties) + if not m1_m4_agmt: + log.fatal("Fail to create a supplier -> supplier replica agreement") + sys.exit(1) + log.debug("%s created" % m1_m4_agmt) + + # Creating agreement from supplier 1 to hub 1 + properties = {RA_NAME: 'meTo_' + hub1.host + ':' + str(hub1.port), + RA_BINDDN: defaultProperties[REPLICATION_BIND_DN], + RA_BINDPW: defaultProperties[REPLICATION_BIND_PW], + RA_METHOD: defaultProperties[REPLICATION_BIND_METHOD], + RA_TRANSPORT_PROT: defaultProperties[REPLICATION_TRANSPORT]} + m1_h1_agmt = supplier1.agreement.create(suffix=SUFFIX, host=hub1.host, + port=hub1.port, + properties=properties) + if not m1_h1_agmt: + log.fatal("Fail to create a supplier -> hub replica agreement") + sys.exit(1) + log.debug("%s created" % m1_h1_agmt) + + # Creating agreement from supplier 1 to hub 2 + properties = {RA_NAME: 'meTo_' + hub2.host + ':' + str(hub2.port), + RA_BINDDN: defaultProperties[REPLICATION_BIND_DN], + RA_BINDPW: defaultProperties[REPLICATION_BIND_PW], + RA_METHOD: defaultProperties[REPLICATION_BIND_METHOD], + RA_TRANSPORT_PROT: defaultProperties[REPLICATION_TRANSPORT]} + m1_h2_agmt = supplier1.agreement.create(suffix=SUFFIX, host=hub2.host, + port=hub2.port, + properties=properties) + if not m1_h2_agmt: + log.fatal("Fail to create a supplier -> hub replica agreement") + sys.exit(1) + log.debug("%s created" % m1_h2_agmt) + + # Creating agreement from supplier 2 to supplier 1 + properties = {RA_NAME: 'meTo_' + supplier1.host + ':' + str(supplier1.port), + RA_BINDDN: defaultProperties[REPLICATION_BIND_DN], + RA_BINDPW: defaultProperties[REPLICATION_BIND_PW], + RA_METHOD: defaultProperties[REPLICATION_BIND_METHOD], + RA_TRANSPORT_PROT: defaultProperties[REPLICATION_TRANSPORT]} + m2_m1_agmt = supplier2.agreement.create(suffix=SUFFIX, host=supplier1.host, + port=supplier1.port, + properties=properties) + if not m2_m1_agmt: + log.fatal("Fail to create a supplier -> supplier replica agreement") + sys.exit(1) + log.debug("%s created" % m2_m1_agmt) + + # Creating agreement from supplier 2 to supplier 3 + properties = {RA_NAME: 'meTo_' + supplier3.host + ':' + str(supplier3.port), + RA_BINDDN: defaultProperties[REPLICATION_BIND_DN], + RA_BINDPW: defaultProperties[REPLICATION_BIND_PW], + RA_METHOD: defaultProperties[REPLICATION_BIND_METHOD], + RA_TRANSPORT_PROT: defaultProperties[REPLICATION_TRANSPORT]} + m2_m3_agmt = supplier2.agreement.create(suffix=SUFFIX, host=supplier3.host, + port=supplier3.port, + properties=properties) + if not m2_m3_agmt: + log.fatal("Fail to create a supplier -> supplier replica agreement") + sys.exit(1) + log.debug("%s created" % m2_m3_agmt) + + # Creating agreement from supplier 2 to supplier 4 + properties = {RA_NAME: 'meTo_' + supplier4.host + ':' + str(supplier4.port), + RA_BINDDN: defaultProperties[REPLICATION_BIND_DN], + RA_BINDPW: defaultProperties[REPLICATION_BIND_PW], + RA_METHOD: defaultProperties[REPLICATION_BIND_METHOD], + RA_TRANSPORT_PROT: defaultProperties[REPLICATION_TRANSPORT]} + m2_m4_agmt = supplier2.agreement.create(suffix=SUFFIX, host=supplier4.host, + port=supplier4.port, + properties=properties) + if not m2_m4_agmt: + log.fatal("Fail to create a supplier -> supplier replica agreement") + sys.exit(1) + log.debug("%s created" % m2_m4_agmt) + + # Creating agreement from supplier 2 to hub 1 + properties = {RA_NAME: 'meTo_' + hub1.host + ':' + str(hub1.port), + RA_BINDDN: defaultProperties[REPLICATION_BIND_DN], + RA_BINDPW: defaultProperties[REPLICATION_BIND_PW], + RA_METHOD: defaultProperties[REPLICATION_BIND_METHOD], + RA_TRANSPORT_PROT: defaultProperties[REPLICATION_TRANSPORT]} + m2_h1_agmt = supplier2.agreement.create(suffix=SUFFIX, host=hub1.host, + port=hub1.port, + properties=properties) + if not m2_h1_agmt: + log.fatal("Fail to create a supplier -> hub replica agreement") + sys.exit(1) + log.debug("%s created" % m2_h1_agmt) + + # Creating agreement from supplier 2 to hub 2 + properties = {RA_NAME: 'meTo_' + hub2.host + ':' + str(hub2.port), + RA_BINDDN: defaultProperties[REPLICATION_BIND_DN], + RA_BINDPW: defaultProperties[REPLICATION_BIND_PW], + RA_METHOD: defaultProperties[REPLICATION_BIND_METHOD], + RA_TRANSPORT_PROT: defaultProperties[REPLICATION_TRANSPORT]} + m2_h2_agmt = supplier2.agreement.create(suffix=SUFFIX, host=hub2.host, + port=hub2.port, + properties=properties) + if not m2_h2_agmt: + log.fatal("Fail to create a supplier -> hub replica agreement") + sys.exit(1) + log.debug("%s created" % m2_h2_agmt) + + # Creating agreement from supplier 3 to supplier 1 + properties = {RA_NAME: 'meTo_' + supplier1.host + ':' + str(supplier1.port), + RA_BINDDN: defaultProperties[REPLICATION_BIND_DN], + RA_BINDPW: defaultProperties[REPLICATION_BIND_PW], + RA_METHOD: defaultProperties[REPLICATION_BIND_METHOD], + RA_TRANSPORT_PROT: defaultProperties[REPLICATION_TRANSPORT]} + m3_m1_agmt = supplier3.agreement.create(suffix=SUFFIX, host=supplier1.host, + port=supplier1.port, + properties=properties) + if not m3_m1_agmt: + log.fatal("Fail to create a supplier -> supplier replica agreement") + sys.exit(1) + log.debug("%s created" % m3_m1_agmt) + + # Creating agreement from supplier 3 to supplier 2 + properties = {RA_NAME: 'meTo_' + supplier2.host + ':' + str(supplier2.port), + RA_BINDDN: defaultProperties[REPLICATION_BIND_DN], + RA_BINDPW: defaultProperties[REPLICATION_BIND_PW], + RA_METHOD: defaultProperties[REPLICATION_BIND_METHOD], + RA_TRANSPORT_PROT: defaultProperties[REPLICATION_TRANSPORT]} + m3_m2_agmt = supplier3.agreement.create(suffix=SUFFIX, host=supplier2.host, + port=supplier2.port, + properties=properties) + if not m3_m2_agmt: + log.fatal("Fail to create a supplier -> supplier replica agreement") + sys.exit(1) + log.debug("%s created" % m3_m2_agmt) + + # Creating agreement from supplier 3 to supplier 4 + properties = {RA_NAME: 'meTo_' + supplier4.host + ':' + str(supplier4.port), + RA_BINDDN: defaultProperties[REPLICATION_BIND_DN], + RA_BINDPW: defaultProperties[REPLICATION_BIND_PW], + RA_METHOD: defaultProperties[REPLICATION_BIND_METHOD], + RA_TRANSPORT_PROT: defaultProperties[REPLICATION_TRANSPORT]} + m3_m4_agmt = supplier3.agreement.create(suffix=SUFFIX, host=supplier4.host, + port=supplier4.port, + properties=properties) + if not m3_m4_agmt: + log.fatal("Fail to create a supplier -> supplier replica agreement") + sys.exit(1) + log.debug("%s created" % m3_m4_agmt) + + # Creating agreement from supplier 3 to hub 1 + properties = {RA_NAME: 'meTo_' + hub1.host + ':' + str(hub1.port), + RA_BINDDN: defaultProperties[REPLICATION_BIND_DN], + RA_BINDPW: defaultProperties[REPLICATION_BIND_PW], + RA_METHOD: defaultProperties[REPLICATION_BIND_METHOD], + RA_TRANSPORT_PROT: defaultProperties[REPLICATION_TRANSPORT]} + m3_h1_agmt = supplier3.agreement.create(suffix=SUFFIX, host=hub1.host, + port=hub1.port, + properties=properties) + if not m3_h1_agmt: + log.fatal("Fail to create a supplier -> hub replica agreement") + sys.exit(1) + log.debug("%s created" % m3_h1_agmt) + + # Creating agreement from supplier 3 to hub 2 + properties = {RA_NAME: 'meTo_' + hub2.host + ':' + str(hub2.port), + RA_BINDDN: defaultProperties[REPLICATION_BIND_DN], + RA_BINDPW: defaultProperties[REPLICATION_BIND_PW], + RA_METHOD: defaultProperties[REPLICATION_BIND_METHOD], + RA_TRANSPORT_PROT: defaultProperties[REPLICATION_TRANSPORT]} + m3_h2_agmt = supplier3.agreement.create(suffix=SUFFIX, host=hub2.host, + port=hub2.port, + properties=properties) + if not m3_h2_agmt: + log.fatal("Fail to create a supplier -> hub replica agreement") + sys.exit(1) + log.debug("%s created" % m3_h2_agmt) + + # Creating agreement from supplier 4 to supplier 1 + properties = {RA_NAME: 'meTo_' + supplier1.host + ':' + str(supplier1.port), + RA_BINDDN: defaultProperties[REPLICATION_BIND_DN], + RA_BINDPW: defaultProperties[REPLICATION_BIND_PW], + RA_METHOD: defaultProperties[REPLICATION_BIND_METHOD], + RA_TRANSPORT_PROT: defaultProperties[REPLICATION_TRANSPORT]} + m4_m1_agmt = supplier4.agreement.create(suffix=SUFFIX, host=supplier1.host, + port=supplier1.port, + properties=properties) + if not m4_m1_agmt: + log.fatal("Fail to create a supplier -> supplier replica agreement") + sys.exit(1) + log.debug("%s created" % m4_m1_agmt) + + # Creating agreement from supplier 4 to supplier 2 + properties = {RA_NAME: 'meTo_' + supplier2.host + ':' + str(supplier2.port), + RA_BINDDN: defaultProperties[REPLICATION_BIND_DN], + RA_BINDPW: defaultProperties[REPLICATION_BIND_PW], + RA_METHOD: defaultProperties[REPLICATION_BIND_METHOD], + RA_TRANSPORT_PROT: defaultProperties[REPLICATION_TRANSPORT]} + m4_m2_agmt = supplier4.agreement.create(suffix=SUFFIX, host=supplier2.host, + port=supplier2.port, + properties=properties) + if not m4_m2_agmt: + log.fatal("Fail to create a supplier -> supplier replica agreement") + sys.exit(1) + log.debug("%s created" % m4_m2_agmt) + + # Creating agreement from supplier 4 to supplier 3 + properties = {RA_NAME: 'meTo_' + supplier3.host + ':' + str(supplier3.port), + RA_BINDDN: defaultProperties[REPLICATION_BIND_DN], + RA_BINDPW: defaultProperties[REPLICATION_BIND_PW], + RA_METHOD: defaultProperties[REPLICATION_BIND_METHOD], + RA_TRANSPORT_PROT: defaultProperties[REPLICATION_TRANSPORT]} + m4_m3_agmt = supplier4.agreement.create(suffix=SUFFIX, host=supplier3.host, + port=supplier3.port, + properties=properties) + if not m4_m3_agmt: + log.fatal("Fail to create a supplier -> supplier replica agreement") + sys.exit(1) + log.debug("%s created" % m4_m3_agmt) + + # Creating agreement from supplier 4 to hub 1 + properties = {RA_NAME: 'meTo_' + hub1.host + ':' + str(hub1.port), + RA_BINDDN: defaultProperties[REPLICATION_BIND_DN], + RA_BINDPW: defaultProperties[REPLICATION_BIND_PW], + RA_METHOD: defaultProperties[REPLICATION_BIND_METHOD], + RA_TRANSPORT_PROT: defaultProperties[REPLICATION_TRANSPORT]} + m4_h1_agmt = supplier4.agreement.create(suffix=SUFFIX, host=hub1.host, + port=hub1.port, + properties=properties) + if not m4_h1_agmt: + log.fatal("Fail to create a supplier -> hub replica agreement") + sys.exit(1) + log.debug("%s created" % m4_h1_agmt) + + # Creating agreement from supplier 4 to hub 2 + properties = {RA_NAME: 'meTo_' + hub2.host + ':' + str(hub2.port), + RA_BINDDN: defaultProperties[REPLICATION_BIND_DN], + RA_BINDPW: defaultProperties[REPLICATION_BIND_PW], + RA_METHOD: defaultProperties[REPLICATION_BIND_METHOD], + RA_TRANSPORT_PROT: defaultProperties[REPLICATION_TRANSPORT]} + m4_h2_agmt = supplier4.agreement.create(suffix=SUFFIX, host=hub2.host, + port=hub2.port, + properties=properties) + if not m4_h2_agmt: + log.fatal("Fail to create a supplier -> hub replica agreement") + sys.exit(1) + log.debug("%s created" % m4_h2_agmt) + + # Creating agreement from hub 1 to consumer 1 + properties = {RA_NAME: 'me2_' + consumer1.host + ':' + str(consumer1.port), + RA_BINDDN: defaultProperties[REPLICATION_BIND_DN], + RA_BINDPW: defaultProperties[REPLICATION_BIND_PW], + RA_METHOD: defaultProperties[REPLICATION_BIND_METHOD], + RA_TRANSPORT_PROT: defaultProperties[REPLICATION_TRANSPORT]} + h1_c1_agmt = hub1.agreement.create(suffix=SUFFIX, host=consumer1.host, + port=consumer1.port, + properties=properties) + if not h1_c1_agmt: + log.fatal("Fail to create a hub -> consumer replica agreement") + sys.exit(1) + log.debug("%s created" % h1_c1_agmt) + + # Creating agreement from hub 1 to consumer 2 + properties = {RA_NAME: 'me2_' + consumer2.host + ':' + str(consumer2.port), + RA_BINDDN: defaultProperties[REPLICATION_BIND_DN], + RA_BINDPW: defaultProperties[REPLICATION_BIND_PW], + RA_METHOD: defaultProperties[REPLICATION_BIND_METHOD], + RA_TRANSPORT_PROT: defaultProperties[REPLICATION_TRANSPORT]} + h1_c2_agmt = hub1.agreement.create(suffix=SUFFIX, host=consumer2.host, + port=consumer2.port, + properties=properties) + if not h1_c2_agmt: + log.fatal("Fail to create a hub -> consumer replica agreement") + sys.exit(1) + log.debug("%s created" % h1_c2_agmt) + + # Creating agreement from hub 1 to consumer 3 + properties = {RA_NAME: 'me2_' + consumer3.host + ':' + str(consumer3.port), + RA_BINDDN: defaultProperties[REPLICATION_BIND_DN], + RA_BINDPW: defaultProperties[REPLICATION_BIND_PW], + RA_METHOD: defaultProperties[REPLICATION_BIND_METHOD], + RA_TRANSPORT_PROT: defaultProperties[REPLICATION_TRANSPORT]} + h1_c3_agmt = hub1.agreement.create(suffix=SUFFIX, host=consumer3.host, + port=consumer3.port, + properties=properties) + if not h1_c3_agmt: + log.fatal("Fail to create a hub -> consumer replica agreement") + sys.exit(1) + log.debug("%s created" % h1_c3_agmt) + + # Creating agreement from hub 1 to consumer 4 + properties = {RA_NAME: 'me2_' + consumer4.host + ':' + str(consumer4.port), + RA_BINDDN: defaultProperties[REPLICATION_BIND_DN], + RA_BINDPW: defaultProperties[REPLICATION_BIND_PW], + RA_METHOD: defaultProperties[REPLICATION_BIND_METHOD], + RA_TRANSPORT_PROT: defaultProperties[REPLICATION_TRANSPORT]} + h1_c4_agmt = hub1.agreement.create(suffix=SUFFIX, host=consumer4.host, + port=consumer4.port, + properties=properties) + if not h1_c4_agmt: + log.fatal("Fail to create a hub -> consumer replica agreement") + sys.exit(1) + log.debug("%s created" % h1_c4_agmt) + + # Creating agreement from hub 2 to consumer 1 + properties = {RA_NAME: 'me2_' + consumer1.host + ':' + str(consumer1.port), + RA_BINDDN: defaultProperties[REPLICATION_BIND_DN], + RA_BINDPW: defaultProperties[REPLICATION_BIND_PW], + RA_METHOD: defaultProperties[REPLICATION_BIND_METHOD], + RA_TRANSPORT_PROT: defaultProperties[REPLICATION_TRANSPORT]} + h2_c1_agmt = hub2.agreement.create(suffix=SUFFIX, host=consumer1.host, + port=consumer1.port, + properties=properties) + if not h2_c1_agmt: + log.fatal("Fail to create a hub -> consumer replica agreement") + sys.exit(1) + log.debug("%s created" % h2_c1_agmt) + + # Creating agreement from hub 2 to consumer 2 + properties = {RA_NAME: 'me2_' + consumer2.host + ':' + str(consumer2.port), + RA_BINDDN: defaultProperties[REPLICATION_BIND_DN], + RA_BINDPW: defaultProperties[REPLICATION_BIND_PW], + RA_METHOD: defaultProperties[REPLICATION_BIND_METHOD], + RA_TRANSPORT_PROT: defaultProperties[REPLICATION_TRANSPORT]} + h2_c2_agmt = hub2.agreement.create(suffix=SUFFIX, host=consumer2.host, + port=consumer2.port, + properties=properties) + if not h2_c2_agmt: + log.fatal("Fail to create a hub -> consumer replica agreement") + sys.exit(1) + log.debug("%s created" % h2_c2_agmt) + + # Creating agreement from hub 2 to consumer 3 + properties = {RA_NAME: 'me2_' + consumer3.host + ':' + str(consumer3.port), + RA_BINDDN: defaultProperties[REPLICATION_BIND_DN], + RA_BINDPW: defaultProperties[REPLICATION_BIND_PW], + RA_METHOD: defaultProperties[REPLICATION_BIND_METHOD], + RA_TRANSPORT_PROT: defaultProperties[REPLICATION_TRANSPORT]} + h2_c3_agmt = hub2.agreement.create(suffix=SUFFIX, host=consumer3.host, + port=consumer3.port, + properties=properties) + if not h2_c3_agmt: + log.fatal("Fail to create a hub -> consumer replica agreement") + sys.exit(1) + log.debug("%s created" % h2_c3_agmt) + + # Creating agreement from hub 2 to consumer 4 + properties = {RA_NAME: 'me2_' + consumer4.host + ':' + str(consumer4.port), + RA_BINDDN: defaultProperties[REPLICATION_BIND_DN], + RA_BINDPW: defaultProperties[REPLICATION_BIND_PW], + RA_METHOD: defaultProperties[REPLICATION_BIND_METHOD], + RA_TRANSPORT_PROT: defaultProperties[REPLICATION_TRANSPORT]} + h2_c4_agmt = hub2.agreement.create(suffix=SUFFIX, host=consumer4.host, + port=consumer4.port, + properties=properties) + if not h2_c4_agmt: + log.fatal("Fail to create a hub -> consumer replica agreement") + sys.exit(1) + log.debug("%s created" % h2_c4_agmt) + + # Allow the replicas to get situated with the new agreements... + time.sleep(5) + + # + # Initialize all the agreements + # + supplier1.agreement.init(SUFFIX, HOST_SUPPLIER_2, PORT_SUPPLIER_2) + supplier1.waitForReplInit(m1_m2_agmt) + supplier1.agreement.init(SUFFIX, HOST_SUPPLIER_3, PORT_SUPPLIER_3) + supplier1.waitForReplInit(m1_m3_agmt) + supplier1.agreement.init(SUFFIX, HOST_SUPPLIER_4, PORT_SUPPLIER_4) + supplier1.waitForReplInit(m1_m4_agmt) + supplier1.agreement.init(SUFFIX, HOST_HUB_1, PORT_HUB_1) + supplier1.waitForReplInit(m1_h1_agmt) + hub1.agreement.init(SUFFIX, HOST_CONSUMER_1, PORT_CONSUMER_1) + hub1.waitForReplInit(h1_c1_agmt) + hub1.agreement.init(SUFFIX, HOST_CONSUMER_2, PORT_CONSUMER_2) + hub1.waitForReplInit(h1_c2_agmt) + hub1.agreement.init(SUFFIX, HOST_CONSUMER_3, PORT_CONSUMER_3) + hub1.waitForReplInit(h1_c3_agmt) + hub1.agreement.init(SUFFIX, HOST_CONSUMER_4, PORT_CONSUMER_4) + hub1.waitForReplInit(h1_c4_agmt) + supplier1.agreement.init(SUFFIX, HOST_HUB_2, PORT_HUB_2) + supplier1.waitForReplInit(m1_h2_agmt) + + # Check replication is working... + if supplier1.testReplication(DEFAULT_SUFFIX, consumer1): + log.info('Replication is working.') + else: + log.fatal('Replication is not working.') + assert False + + def fin(): + """If we are debugging just stop the instances, otherwise remove + them + """ + if DEBUGGING: + supplier1.stop() + supplier2.stop() + supplier3.stop() + supplier4.stop() + hub1.stop() + hub2.stop() + consumer1.stop() + consumer2.stop() + consumer3.stop() + consumer4.stop() + else: + supplier1.delete() + supplier2.delete() + supplier3.delete() + supplier4.delete() + hub1.delete() + hub2.delete() + consumer1.delete() + consumer2.delete() + consumer3.delete() + consumer4.delete() + request.addfinalizer(fin) + + return TopologyReplication(supplier1, supplier2, supplier3, supplier4, hub1, hub2, + consumer1, consumer2, consumer3, consumer4) + + +class AddDelUsers(threading.Thread): + """Add's and delets 50000 entries""" + def __init__(self, inst): + """ + Initialize the thread + """ + threading.Thread.__init__(self) + self.daemon = True + self.inst = inst + self.name = inst.serverid + + def run(self): + """ + Start adding users + """ + idx = 0 + + log.info('AddDelUsers (%s) Adding and deleting %d entries...' % + (self.name, ADD_DEL_COUNT)) + + while idx < ADD_DEL_COUNT: + RDN_VAL = ('uid=%s-%d' % (self.name, idx)) + USER_DN = ('%s,%s' % (RDN_VAL, DEFAULT_SUFFIX)) + + try: + self.inst.add_s(Entry((USER_DN, {'objectclass': + 'top extensibleObject'.split(), + 'uid': RDN_VAL}))) + except ldap.LDAPError as e: + log.fatal('AddDelUsers (%s): failed to add (%s) error: %s' % + (self.name, USER_DN, str(e))) + assert False + + try: + self.inst.delete_s(USER_DN) + except ldap.LDAPError as e: + log.fatal('AddDelUsers (%s): failed to delete (%s) error: %s' % + (self.name, USER_DN, str(e))) + assert False + + idx += 1 + + log.info('AddDelUsers (%s) - Finished at: %s' % + (self.name, getDateTime())) + + +def measureConvergence(topology): + """Find and measure the convergence of entries from each supplier + """ + + replicas = [topology.supplier1, topology.supplier2, topology.supplier3, + topology.supplier4, topology.hub1, topology.hub2, + topology.consumer1, topology.consumer2, topology.consumer3, + topology.consumer4] + + if ADD_DEL_COUNT > 10: + interval = int(ADD_DEL_COUNT / 10) + else: + interval = 1 + + for supplier in [('1', topology.supplier1), + ('2', topology.supplier2), + ('3', topology.supplier3), + ('4', topology.supplier4)]: + # Start with the first entry + entries = ['ADD dn="uid=supplier_%s-0,%s' % + (supplier[0], DEFAULT_SUFFIX)] + + # Add incremental entries to the list + idx = interval + while idx < ADD_DEL_COUNT: + entries.append('ADD dn="uid=supplier_%s-%d,%s' % + (supplier[0], idx, DEFAULT_SUFFIX)) + idx += interval + + # Add the last entry to the list (if it was not already added) + if idx != (ADD_DEL_COUNT - 1): + entries.append('ADD dn="uid=supplier_%s-%d,%s' % + (supplier[0], (ADD_DEL_COUNT - 1), + DEFAULT_SUFFIX)) + + ReplTools.replConvReport(DEFAULT_SUFFIX, entries, supplier[1], replicas) + + +def test_MMR_Integrity(topology): + """Apply load to 4 suppliers at the same time. Perform adds and deletes. + If any updates are missed we will see an error 32 in the access logs or + we will have entries left over once the test completes. + """ + loop = 0 + + ALL_REPLICAS = [topology.supplier1, topology.supplier2, topology.supplier3, + topology.supplier4, + topology.hub1, topology.hub2, + topology.consumer1, topology.consumer2, + topology.consumer3, topology.consumer4] + + if TEST_CONVERGE_LATENCY: + try: + for inst in ALL_REPLICAS: + replica = inst.replicas.get(DEFAULT_SUFFIX) + replica.set('nsds5ReplicaReleaseTimeout', CONVERGENCE_TIMEOUT) + except ldap.LDAPError as e: + log.fatal('Failed to set replicas release timeout - error: %s' % + (str(e))) + assert False + + if DEBUGGING: + # Enable Repl logging, and increase the max logs + try: + for inst in ALL_REPLICAS: + inst.enableReplLogging() + inst.modify_s("cn=config", [(ldap.MOD_REPLACE, + 'nsslapd-errorlog-maxlogsperdir', + '5')]) + except ldap.LDAPError as e: + log.fatal('Failed to set max logs - error: %s' % (str(e))) + assert False + + while loop < MAX_LOOPS: + # Remove the current logs so we have a clean set of logs to check. + log.info('Pass %d...' % (loop + 1)) + log.info("Removing logs...") + for inst in ALL_REPLICAS: + inst.deleteAllLogs() + + # Fire off 4 threads to apply the load + log.info("Start adding/deleting: " + getDateTime()) + startTime = time.time() + add_del_m1 = AddDelUsers(topology.supplier1) + add_del_m1.start() + add_del_m2 = AddDelUsers(topology.supplier2) + add_del_m2.start() + add_del_m3 = AddDelUsers(topology.supplier3) + add_del_m3.start() + add_del_m4 = AddDelUsers(topology.supplier4) + add_del_m4.start() + + # Wait for threads to finish sending their updates + add_del_m1.join() + add_del_m2.join() + add_del_m3.join() + add_del_m4.join() + log.info("Finished adding/deleting entries: " + getDateTime()) + + # + # Loop checking for error 32's, and for convergence to complete + # + log.info("Waiting for replication to converge...") + while True: + # First check for error 32's + for inst in ALL_REPLICAS: + if inst.searchAccessLog(" err=32 "): + log.fatal('An add was missed on: ' + inst.serverid) + assert False + + # Next check to see if the last update is in the access log + converged = True + for inst in ALL_REPLICAS: + if not inst.searchAccessLog(LAST_DN_M1) or \ + not inst.searchAccessLog(LAST_DN_M2) or \ + not inst.searchAccessLog(LAST_DN_M3) or \ + not inst.searchAccessLog(LAST_DN_M4): + converged = False + break + + if converged: + elapsed_tm = int(time.time() - startTime) + convtime = str(datetime.timedelta(seconds=elapsed_tm)) + log.info('Replication converged at: ' + getDateTime() + + ' - Elapsed Time: ' + convtime) + break + else: + # Check if replication is idle + replicas = [topology.supplier1, topology.supplier2, + topology.supplier3, topology.supplier4, + topology.hub1, topology.hub2] + if ReplTools.replIdle(replicas, DEFAULT_SUFFIX): + # Replication is idle - wait 30 secs for access log buffer + time.sleep(30) + + # Now check the access log again... + converged = True + for inst in ALL_REPLICAS: + if not inst.searchAccessLog(LAST_DN_M1) or \ + not inst.searchAccessLog(LAST_DN_M2) or \ + not inst.searchAccessLog(LAST_DN_M3) or \ + not inst.searchAccessLog(LAST_DN_M4): + converged = False + break + + if converged: + elapsed_tm = int(time.time() - startTime) + convtime = str(datetime.timedelta(seconds=elapsed_tm)) + log.info('Replication converged at: ' + getDateTime() + + ' - Elapsed Time: ' + convtime) + break + else: + log.fatal('Stopping replication check: ' + + getDateTime()) + log.fatal('Failure: Replication is complete, but we ' + + 'never converged.') + assert False + + # Sleep a bit before the next pass + time.sleep(3) + + # + # Finally check the CSN's + # + log.info("Check the CSN's...") + if not ReplTools.checkCSNs(ALL_REPLICAS): + assert False + log.info("All CSN's present and accounted for.") + + # + # Print the convergence report + # + log.info('Measuring convergence...') + measureConvergence(topology) + + # + # Test complete + # + log.info('No lingering entries.') + log.info('Pass %d complete.' % (loop + 1)) + elapsed_tm = int(time.time() - TEST_START) + convtime = str(datetime.timedelta(seconds=elapsed_tm)) + log.info('Entire test ran for: ' + convtime) + + loop += 1 + + log.info('Test PASSED') + + +if __name__ == '__main__': + # Run isolated + # -s for DEBUG mode + CURRENT_FILE = os.path.realpath(__file__) + pytest.main("-s %s" % CURRENT_FILE) diff --git a/dirsrvtests/tests/stress/replication/mmr_01_4m_test.py b/dirsrvtests/tests/stress/replication/mmr_01_4m_test.py new file mode 100644 index 0000000..c9fd71e --- /dev/null +++ b/dirsrvtests/tests/stress/replication/mmr_01_4m_test.py @@ -0,0 +1,582 @@ +# --- BEGIN COPYRIGHT BLOCK --- +# Copyright (C) 2022 Red Hat, Inc. +# All rights reserved. +# +# License: GPL (version 3 or any later version). +# See LICENSE for details. +# --- END COPYRIGHT BLOCK --- +# +import os +import sys +import time +import datetime +import ldap +import logging +import pytest +import threading +from lib389._constants import * +from lib389.properties import * +from lib389.tasks import * +from lib389.utils import * +from lib389.repltools import ReplTools + +pytestmark = pytest.mark.tier3 + +logging.getLogger(__name__).setLevel(logging.DEBUG) +log = logging.getLogger(__name__) + +DEBUGGING = False +ADD_DEL_COUNT = 50000 +MAX_LOOPS = 2 +TEST_CONVERGE_LATENCY = True +CONVERGENCE_TIMEOUT = '60' +supplier_list = [] +hub_list = [] +con_list = [] +TEST_START = time.time() + +LAST_DN_IDX = ADD_DEL_COUNT - 1 +LAST_DN_M1 = 'DEL dn="uid=supplier_1-%d,%s' % (LAST_DN_IDX, DEFAULT_SUFFIX) +LAST_DN_M2 = 'DEL dn="uid=supplier_2-%d,%s' % (LAST_DN_IDX, DEFAULT_SUFFIX) +LAST_DN_M3 = 'DEL dn="uid=supplier_3-%d,%s' % (LAST_DN_IDX, DEFAULT_SUFFIX) +LAST_DN_M4 = 'DEL dn="uid=supplier_4-%d,%s' % (LAST_DN_IDX, DEFAULT_SUFFIX) + + +class TopologyReplication(object): + """The Replication Topology Class""" + def __init__(self, supplier1, supplier2, supplier3, supplier4): + """Init""" + supplier1.open() + self.supplier1 = supplier1 + supplier2.open() + self.supplier2 = supplier2 + supplier3.open() + self.supplier3 = supplier3 + supplier4.open() + self.supplier4 = supplier4 + + +@pytest.fixture(scope="module") +def topology(request): + """Create Replication Deployment""" + + # Creating supplier 1... + if DEBUGGING: + supplier1 = DirSrv(verbose=True) + else: + supplier1 = DirSrv(verbose=False) + args_instance[SER_HOST] = HOST_SUPPLIER_1 + args_instance[SER_PORT] = PORT_SUPPLIER_1 + args_instance[SER_SERVERID_PROP] = SERVERID_SUPPLIER_1 + args_instance[SER_CREATION_SUFFIX] = DEFAULT_SUFFIX + args_supplier = args_instance.copy() + supplier1.allocate(args_supplier) + instance_supplier1 = supplier1.exists() + if instance_supplier1: + supplier1.delete() + supplier1.create() + supplier1.open() + supplier1.replica.enableReplication(suffix=SUFFIX, role=ReplicaRole.SUPPLIER, + replicaId=REPLICAID_SUPPLIER_1) + + # Creating supplier 2... + if DEBUGGING: + supplier2 = DirSrv(verbose=True) + else: + supplier2 = DirSrv(verbose=False) + args_instance[SER_HOST] = HOST_SUPPLIER_2 + args_instance[SER_PORT] = PORT_SUPPLIER_2 + args_instance[SER_SERVERID_PROP] = SERVERID_SUPPLIER_2 + args_instance[SER_CREATION_SUFFIX] = DEFAULT_SUFFIX + args_supplier = args_instance.copy() + supplier2.allocate(args_supplier) + instance_supplier2 = supplier2.exists() + if instance_supplier2: + supplier2.delete() + supplier2.create() + supplier2.open() + supplier2.replica.enableReplication(suffix=SUFFIX, role=ReplicaRole.SUPPLIER, + replicaId=REPLICAID_SUPPLIER_2) + + # Creating supplier 3... + if DEBUGGING: + supplier3 = DirSrv(verbose=True) + else: + supplier3 = DirSrv(verbose=False) + args_instance[SER_HOST] = HOST_SUPPLIER_3 + args_instance[SER_PORT] = PORT_SUPPLIER_3 + args_instance[SER_SERVERID_PROP] = SERVERID_SUPPLIER_3 + args_instance[SER_CREATION_SUFFIX] = DEFAULT_SUFFIX + args_supplier = args_instance.copy() + supplier3.allocate(args_supplier) + instance_supplier3 = supplier3.exists() + if instance_supplier3: + supplier3.delete() + supplier3.create() + supplier3.open() + supplier3.replica.enableReplication(suffix=SUFFIX, role=ReplicaRole.SUPPLIER, + replicaId=REPLICAID_SUPPLIER_3) + + # Creating supplier 4... + if DEBUGGING: + supplier4 = DirSrv(verbose=True) + else: + supplier4 = DirSrv(verbose=False) + args_instance[SER_HOST] = HOST_SUPPLIER_4 + args_instance[SER_PORT] = PORT_SUPPLIER_4 + args_instance[SER_SERVERID_PROP] = SERVERID_SUPPLIER_4 + args_instance[SER_CREATION_SUFFIX] = DEFAULT_SUFFIX + args_supplier = args_instance.copy() + supplier4.allocate(args_supplier) + instance_supplier4 = supplier4.exists() + if instance_supplier4: + supplier4.delete() + supplier4.create() + supplier4.open() + supplier4.replica.enableReplication(suffix=SUFFIX, role=ReplicaRole.SUPPLIER, + replicaId=REPLICAID_SUPPLIER_4) + + # + # Create all the agreements + # + # Creating agreement from supplier 1 to supplier 2 + properties = {RA_NAME: 'meTo_' + supplier2.host + ':' + str(supplier2.port), + RA_BINDDN: defaultProperties[REPLICATION_BIND_DN], + RA_BINDPW: defaultProperties[REPLICATION_BIND_PW], + RA_METHOD: defaultProperties[REPLICATION_BIND_METHOD], + RA_TRANSPORT_PROT: defaultProperties[REPLICATION_TRANSPORT]} + m1_m2_agmt = supplier1.agreement.create(suffix=SUFFIX, host=supplier2.host, + port=supplier2.port, + properties=properties) + if not m1_m2_agmt: + log.fatal("Fail to create a supplier -> supplier replica agreement") + sys.exit(1) + log.debug("%s created" % m1_m2_agmt) + + # Creating agreement from supplier 1 to supplier 3 + properties = {RA_NAME: 'meTo_' + supplier3.host + ':' + str(supplier3.port), + RA_BINDDN: defaultProperties[REPLICATION_BIND_DN], + RA_BINDPW: defaultProperties[REPLICATION_BIND_PW], + RA_METHOD: defaultProperties[REPLICATION_BIND_METHOD], + RA_TRANSPORT_PROT: defaultProperties[REPLICATION_TRANSPORT]} + m1_m3_agmt = supplier1.agreement.create(suffix=SUFFIX, host=supplier3.host, + port=supplier3.port, + properties=properties) + if not m1_m3_agmt: + log.fatal("Fail to create a supplier -> supplier replica agreement") + sys.exit(1) + log.debug("%s created" % m1_m3_agmt) + + # Creating agreement from supplier 1 to supplier 4 + properties = {RA_NAME: 'meTo_' + supplier4.host + ':' + str(supplier4.port), + RA_BINDDN: defaultProperties[REPLICATION_BIND_DN], + RA_BINDPW: defaultProperties[REPLICATION_BIND_PW], + RA_METHOD: defaultProperties[REPLICATION_BIND_METHOD], + RA_TRANSPORT_PROT: defaultProperties[REPLICATION_TRANSPORT]} + m1_m4_agmt = supplier1.agreement.create(suffix=SUFFIX, host=supplier4.host, + port=supplier4.port, + properties=properties) + if not m1_m4_agmt: + log.fatal("Fail to create a supplier -> supplier replica agreement") + sys.exit(1) + log.debug("%s created" % m1_m4_agmt) + + # Creating agreement from supplier 2 to supplier 1 + properties = {RA_NAME: 'meTo_' + supplier1.host + ':' + str(supplier1.port), + RA_BINDDN: defaultProperties[REPLICATION_BIND_DN], + RA_BINDPW: defaultProperties[REPLICATION_BIND_PW], + RA_METHOD: defaultProperties[REPLICATION_BIND_METHOD], + RA_TRANSPORT_PROT: defaultProperties[REPLICATION_TRANSPORT]} + m2_m1_agmt = supplier2.agreement.create(suffix=SUFFIX, host=supplier1.host, + port=supplier1.port, + properties=properties) + if not m2_m1_agmt: + log.fatal("Fail to create a supplier -> supplier replica agreement") + sys.exit(1) + log.debug("%s created" % m2_m1_agmt) + + # Creating agreement from supplier 2 to supplier 3 + properties = {RA_NAME: 'meTo_' + supplier3.host + ':' + str(supplier3.port), + RA_BINDDN: defaultProperties[REPLICATION_BIND_DN], + RA_BINDPW: defaultProperties[REPLICATION_BIND_PW], + RA_METHOD: defaultProperties[REPLICATION_BIND_METHOD], + RA_TRANSPORT_PROT: defaultProperties[REPLICATION_TRANSPORT]} + m2_m3_agmt = supplier2.agreement.create(suffix=SUFFIX, host=supplier3.host, + port=supplier3.port, + properties=properties) + if not m2_m3_agmt: + log.fatal("Fail to create a supplier -> supplier replica agreement") + sys.exit(1) + log.debug("%s created" % m2_m3_agmt) + + # Creating agreement from supplier 2 to supplier 4 + properties = {RA_NAME: 'meTo_' + supplier4.host + ':' + str(supplier4.port), + RA_BINDDN: defaultProperties[REPLICATION_BIND_DN], + RA_BINDPW: defaultProperties[REPLICATION_BIND_PW], + RA_METHOD: defaultProperties[REPLICATION_BIND_METHOD], + RA_TRANSPORT_PROT: defaultProperties[REPLICATION_TRANSPORT]} + m2_m4_agmt = supplier2.agreement.create(suffix=SUFFIX, host=supplier4.host, + port=supplier4.port, + properties=properties) + if not m2_m4_agmt: + log.fatal("Fail to create a supplier -> supplier replica agreement") + sys.exit(1) + log.debug("%s created" % m2_m4_agmt) + + # Creating agreement from supplier 3 to supplier 1 + properties = {RA_NAME: 'meTo_' + supplier1.host + ':' + str(supplier1.port), + RA_BINDDN: defaultProperties[REPLICATION_BIND_DN], + RA_BINDPW: defaultProperties[REPLICATION_BIND_PW], + RA_METHOD: defaultProperties[REPLICATION_BIND_METHOD], + RA_TRANSPORT_PROT: defaultProperties[REPLICATION_TRANSPORT]} + m3_m1_agmt = supplier3.agreement.create(suffix=SUFFIX, host=supplier1.host, + port=supplier1.port, + properties=properties) + if not m3_m1_agmt: + log.fatal("Fail to create a supplier -> supplier replica agreement") + sys.exit(1) + log.debug("%s created" % m3_m1_agmt) + + # Creating agreement from supplier 3 to supplier 2 + properties = {RA_NAME: 'meTo_' + supplier2.host + ':' + str(supplier2.port), + RA_BINDDN: defaultProperties[REPLICATION_BIND_DN], + RA_BINDPW: defaultProperties[REPLICATION_BIND_PW], + RA_METHOD: defaultProperties[REPLICATION_BIND_METHOD], + RA_TRANSPORT_PROT: defaultProperties[REPLICATION_TRANSPORT]} + m3_m2_agmt = supplier3.agreement.create(suffix=SUFFIX, host=supplier2.host, + port=supplier2.port, + properties=properties) + if not m3_m2_agmt: + log.fatal("Fail to create a supplier -> supplier replica agreement") + sys.exit(1) + log.debug("%s created" % m3_m2_agmt) + + # Creating agreement from supplier 3 to supplier 4 + properties = {RA_NAME: 'meTo_' + supplier4.host + ':' + str(supplier4.port), + RA_BINDDN: defaultProperties[REPLICATION_BIND_DN], + RA_BINDPW: defaultProperties[REPLICATION_BIND_PW], + RA_METHOD: defaultProperties[REPLICATION_BIND_METHOD], + RA_TRANSPORT_PROT: defaultProperties[REPLICATION_TRANSPORT]} + m3_m4_agmt = supplier3.agreement.create(suffix=SUFFIX, host=supplier4.host, + port=supplier4.port, + properties=properties) + if not m3_m4_agmt: + log.fatal("Fail to create a supplier -> supplier replica agreement") + sys.exit(1) + log.debug("%s created" % m3_m4_agmt) + + # Creating agreement from supplier 4 to supplier 1 + properties = {RA_NAME: 'meTo_' + supplier1.host + ':' + str(supplier1.port), + RA_BINDDN: defaultProperties[REPLICATION_BIND_DN], + RA_BINDPW: defaultProperties[REPLICATION_BIND_PW], + RA_METHOD: defaultProperties[REPLICATION_BIND_METHOD], + RA_TRANSPORT_PROT: defaultProperties[REPLICATION_TRANSPORT]} + m4_m1_agmt = supplier4.agreement.create(suffix=SUFFIX, host=supplier1.host, + port=supplier1.port, + properties=properties) + if not m4_m1_agmt: + log.fatal("Fail to create a supplier -> supplier replica agreement") + sys.exit(1) + log.debug("%s created" % m4_m1_agmt) + + # Creating agreement from supplier 4 to supplier 2 + properties = {RA_NAME: 'meTo_' + supplier2.host + ':' + str(supplier2.port), + RA_BINDDN: defaultProperties[REPLICATION_BIND_DN], + RA_BINDPW: defaultProperties[REPLICATION_BIND_PW], + RA_METHOD: defaultProperties[REPLICATION_BIND_METHOD], + RA_TRANSPORT_PROT: defaultProperties[REPLICATION_TRANSPORT]} + m4_m2_agmt = supplier4.agreement.create(suffix=SUFFIX, host=supplier2.host, + port=supplier2.port, + properties=properties) + if not m4_m2_agmt: + log.fatal("Fail to create a supplier -> supplier replica agreement") + sys.exit(1) + log.debug("%s created" % m4_m2_agmt) + + # Creating agreement from supplier 4 to supplier 3 + properties = {RA_NAME: 'meTo_' + supplier3.host + ':' + str(supplier3.port), + RA_BINDDN: defaultProperties[REPLICATION_BIND_DN], + RA_BINDPW: defaultProperties[REPLICATION_BIND_PW], + RA_METHOD: defaultProperties[REPLICATION_BIND_METHOD], + RA_TRANSPORT_PROT: defaultProperties[REPLICATION_TRANSPORT]} + m4_m3_agmt = supplier4.agreement.create(suffix=SUFFIX, host=supplier3.host, + port=supplier3.port, + properties=properties) + if not m4_m3_agmt: + log.fatal("Fail to create a supplier -> supplier replica agreement") + sys.exit(1) + log.debug("%s created" % m4_m3_agmt) + + # Allow the replicas to get situated with the new agreements... + time.sleep(5) + + # + # Initialize all the agreements + # + supplier1.agreement.init(SUFFIX, HOST_SUPPLIER_2, PORT_SUPPLIER_2) + supplier1.waitForReplInit(m1_m2_agmt) + supplier1.agreement.init(SUFFIX, HOST_SUPPLIER_3, PORT_SUPPLIER_3) + supplier1.waitForReplInit(m1_m3_agmt) + supplier1.agreement.init(SUFFIX, HOST_SUPPLIER_4, PORT_SUPPLIER_4) + supplier1.waitForReplInit(m1_m4_agmt) + + # Check replication is working... + if supplier1.testReplication(DEFAULT_SUFFIX, supplier4): + log.info('Replication is working.') + else: + log.fatal('Replication is not working.') + assert False + + def fin(): + """If we are debugging just stop the instances, otherwise remove + them + """ + if 1 or DEBUGGING: + supplier1.stop() + supplier2.stop() + supplier3.stop() + supplier4.stop() + else: + supplier1.delete() + supplier2.delete() + supplier3.delete() + supplier4.delete() + request.addfinalizer(fin) + + return TopologyReplication(supplier1, supplier2, supplier3, supplier4) + + +class AddDelUsers(threading.Thread): + """Add's and delets 50000 entries""" + def __init__(self, inst): + """ + Initialize the thread + """ + threading.Thread.__init__(self) + self.daemon = True + self.inst = inst + self.name = inst.serverid + + def run(self): + """ + Start adding users + """ + idx = 0 + + log.info('AddDelUsers (%s) Adding and deleting %d entries...' % + (self.name, ADD_DEL_COUNT)) + + while idx < ADD_DEL_COUNT: + RDN_VAL = ('uid=%s-%d' % (self.name, idx)) + USER_DN = ('%s,%s' % (RDN_VAL, DEFAULT_SUFFIX)) + + try: + self.inst.add_s(Entry((USER_DN, {'objectclass': + 'top extensibleObject'.split(), + 'uid': RDN_VAL}))) + except ldap.LDAPError as e: + log.fatal('AddDelUsers (%s): failed to add (%s) error: %s' % + (self.name, USER_DN, str(e))) + assert False + + try: + self.inst.delete_s(USER_DN) + except ldap.LDAPError as e: + log.fatal('AddDelUsers (%s): failed to delete (%s) error: %s' % + (self.name, USER_DN, str(e))) + assert False + + idx += 1 + + log.info('AddDelUsers (%s) - Finished at: %s' % + (self.name, getDateTime())) + + +def measureConvergence(topology): + """Find and measure the convergence of entries from each supplier + """ + + replicas = [topology.supplier1, topology.supplier2, topology.supplier3, + topology.supplier4] + + if ADD_DEL_COUNT > 10: + interval = int(ADD_DEL_COUNT / 10) + else: + interval = 1 + + for supplier in [('1', topology.supplier1), + ('2', topology.supplier2), + ('3', topology.supplier3), + ('4', topology.supplier4)]: + # Start with the first entry + entries = ['ADD dn="uid=supplier_%s-0,%s' % + (supplier[0], DEFAULT_SUFFIX)] + + # Add incremental entries to the list + idx = interval + while idx < ADD_DEL_COUNT: + entries.append('ADD dn="uid=supplier_%s-%d,%s' % + (supplier[0], idx, DEFAULT_SUFFIX)) + idx += interval + + # Add the last entry to the list (if it was not already added) + if idx != (ADD_DEL_COUNT - 1): + entries.append('ADD dn="uid=supplier_%s-%d,%s' % + (supplier[0], (ADD_DEL_COUNT - 1), + DEFAULT_SUFFIX)) + + ReplTools.replConvReport(DEFAULT_SUFFIX, entries, supplier[1], replicas) + + +def test_MMR_Integrity(topology): + """Apply load to 4 suppliers at the same time. Perform adds and deletes. + If any updates are missed we will see an error 32 in the access logs or + we will have entries left over once the test completes. + """ + loop = 0 + + ALL_REPLICAS = [topology.supplier1, topology.supplier2, topology.supplier3, + topology.supplier4] + + if TEST_CONVERGE_LATENCY: + try: + for inst in ALL_REPLICAS: + replica = inst.replicas.get(DEFAULT_SUFFIX) + replica.set('nsds5ReplicaReleaseTimeout', CONVERGENCE_TIMEOUT) + except ldap.LDAPError as e: + log.fatal('Failed to set replicas release timeout - error: %s' % + (str(e))) + assert False + + if DEBUGGING: + # Enable Repl logging, and increase the max logs + try: + for inst in ALL_REPLICAS: + inst.enableReplLogging() + inst.modify_s("cn=config", [(ldap.MOD_REPLACE, + 'nsslapd-errorlog-maxlogsperdir', + '5')]) + except ldap.LDAPError as e: + log.fatal('Failed to set max logs - error: %s' % (str(e))) + assert False + + while loop < MAX_LOOPS: + # Remove the current logs so we have a clean set of logs to check. + log.info('Pass %d...' % (loop + 1)) + log.info("Removing logs...") + for inst in ALL_REPLICAS: + inst.deleteAllLogs() + + # Fire off 4 threads to apply the load + log.info("Start adding/deleting: " + getDateTime()) + startTime = time.time() + add_del_m1 = AddDelUsers(topology.supplier1) + add_del_m1.start() + add_del_m2 = AddDelUsers(topology.supplier2) + add_del_m2.start() + add_del_m3 = AddDelUsers(topology.supplier3) + add_del_m3.start() + add_del_m4 = AddDelUsers(topology.supplier4) + add_del_m4.start() + + # Wait for threads to finish sending their updates + add_del_m1.join() + add_del_m2.join() + add_del_m3.join() + add_del_m4.join() + log.info("Finished adding/deleting entries: " + getDateTime()) + + # + # Loop checking for error 32's, and for convergence to complete + # + log.info("Waiting for replication to converge...") + while True: + # First check for error 32's + for inst in ALL_REPLICAS: + if inst.searchAccessLog(" err=32 "): + log.fatal('An add was missed on: ' + inst.serverid) + assert False + + # Next check to see if the last update is in the access log + converged = True + for inst in ALL_REPLICAS: + if not inst.searchAccessLog(LAST_DN_M1) or \ + not inst.searchAccessLog(LAST_DN_M2) or \ + not inst.searchAccessLog(LAST_DN_M3) or \ + not inst.searchAccessLog(LAST_DN_M4): + converged = False + break + + if converged: + elapsed_tm = int(time.time() - startTime) + convtime = str(datetime.timedelta(seconds=elapsed_tm)) + log.info('Replication converged at: ' + getDateTime() + + ' - Elapsed Time: ' + convtime) + break + else: + # Check if replication is idle + replicas = [topology.supplier1, topology.supplier2, + topology.supplier3, topology.supplier4] + if ReplTools.replIdle(replicas, DEFAULT_SUFFIX): + # Replication is idle - wait 30 secs for access log buffer + time.sleep(30) + + # Now check the access log again... + converged = True + for inst in ALL_REPLICAS: + if not inst.searchAccessLog(LAST_DN_M1) or \ + not inst.searchAccessLog(LAST_DN_M2) or \ + not inst.searchAccessLog(LAST_DN_M3) or \ + not inst.searchAccessLog(LAST_DN_M4): + converged = False + break + + if converged: + elapsed_tm = int(time.time() - startTime) + convtime = str(datetime.timedelta(seconds=elapsed_tm)) + log.info('Replication converged at: ' + getDateTime() + + ' - Elapsed Time: ' + convtime) + break + else: + log.fatal('Stopping replication check: ' + + getDateTime()) + log.fatal('Failure: Replication is complete, but we ' + + 'never converged.') + assert False + + # Sleep a bit before the next pass + time.sleep(3) + + # + # Finally check the CSN's + # + log.info("Check the CSN's...") + if not ReplTools.checkCSNs(ALL_REPLICAS): + assert False + log.info("All CSN's present and accounted for.") + + # + # Print the convergence report + # + log.info('Measuring convergence...') + measureConvergence(topology) + + # + # Test complete + # + log.info('No lingering entries.') + log.info('Pass %d complete.' % (loop + 1)) + elapsed_tm = int(time.time() - TEST_START) + convtime = str(datetime.timedelta(seconds=elapsed_tm)) + log.info('Entire test ran for: ' + convtime) + + loop += 1 + + log.info('Test PASSED') + + +if __name__ == '__main__': + # Run isolated + # -s for DEBUG mode + CURRENT_FILE = os.path.realpath(__file__) + pytest.main("-s %s" % CURRENT_FILE) diff --git a/dirsrvtests/tests/stress/search/__init__.py b/dirsrvtests/tests/stress/search/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/dirsrvtests/tests/stress/search/simple.py b/dirsrvtests/tests/stress/search/simple.py new file mode 100644 index 0000000..d745ff4 --- /dev/null +++ b/dirsrvtests/tests/stress/search/simple.py @@ -0,0 +1,55 @@ +# --- BEGIN COPYRIGHT BLOCK --- +# Copyright (C) 2019 William Brown +# Copyright (C) 2020 Red Hat, Inc. +# All rights reserved. +# +# License: GPL (version 3 or any later version). +# See LICENSE for details. +# --- END COPYRIGHT BLOCK --- +# + +from lib389.topologies import topology_st +from lib389.dbgen import dbgen_users +from lib389.ldclt import Ldclt +from lib389.tasks import ImportTask +from lib389._constants import DEFAULT_SUFFIX + + +def test_stress_search_simple(topology_st): + """Test a simple stress test of searches on the directory server. + + :id: 3786d01c-ea03-4655-a4f9-450693c75863 + :setup: Standalone Instance + :steps: + 1. Create test users + 2. Import them + 3. Stress test! + :expectedresults: + 1. Success + 2. Success + 3. Results are written to /tmp + """ + + inst = topology_st.standalone + inst.config.set("nsslapd-verify-filter-schema", "off") + # Bump idllimit to test OR worst cases. + from lib389.config import LDBMConfig + lconfig = LDBMConfig(inst) + # lconfig.set("nsslapd-idlistscanlimit", '20000') + # lconfig.set("nsslapd-lookthroughlimit", '20000') + + ldif_dir = inst.get_ldif_dir() + import_ldif = ldif_dir + '/basic_import.ldif' + dbgen_users(inst, 10000, import_ldif, DEFAULT_SUFFIX) + + r = ImportTask(inst) + r.import_suffix_from_ldif(ldiffile=import_ldif, suffix=DEFAULT_SUFFIX) + r.wait() + + # Run a small to warm up the server's caches ... + l = Ldclt(inst) + l.search_loadtest(DEFAULT_SUFFIX, "(mail=XXXX@example.com)", rounds=1) + + # Now do it for realsies! + # l.search_loadtest(DEFAULT_SUFFIX, "(|(mail=XXXX@example.com)(nonexist=foo))", rounds=10) + l.search_loadtest(DEFAULT_SUFFIX, "(mail=XXXX@example.com)", rounds=10) diff --git a/dirsrvtests/tests/suites/__init__.py b/dirsrvtests/tests/suites/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/dirsrvtests/tests/suites/acl/__init__.py b/dirsrvtests/tests/suites/acl/__init__.py new file mode 100644 index 0000000..147ecba --- /dev/null +++ b/dirsrvtests/tests/suites/acl/__init__.py @@ -0,0 +1,3 @@ +""" + :Requirement: 389-ds-base: Access Control Instructions (ACI) +""" diff --git a/dirsrvtests/tests/suites/acl/aci_excl_filter_test.py b/dirsrvtests/tests/suites/acl/aci_excl_filter_test.py new file mode 100644 index 0000000..8e8c880 --- /dev/null +++ b/dirsrvtests/tests/suites/acl/aci_excl_filter_test.py @@ -0,0 +1,154 @@ +# --- BEGIN COPYRIGHT BLOCK --- +# Copyright (C) 2021 Red Hat, Inc. +# All rights reserved. +# +# License: GPL (version 3 or any later version). +# See LICENSE for details. +# --- END COPYRIGHT BLOCK --- +import logging +import os +import pytest +from lib389.topologies import topology_st as topo +from lib389._mapped_object import DSLdapObject +from lib389.idm.organizationalunit import OrganizationalUnit +from lib389.idm.user import UserAccounts +from lib389._constants import DEFAULT_SUFFIX +from lib389.idm.domain import Domain +from lib389.idm.account import Accounts + +pytestmark = pytest.mark.tier1 + +logging.getLogger(__name__).setLevel(logging.DEBUG) +log = logging.getLogger(__name__) + +@pytest.fixture(scope="function") +def add_anon_aci_access(topo, request): + # Add anonymous access aci + ACI_TARGET = "(targetattr != \"userpassword\")(target = \"ldap:///%s\")" % (DEFAULT_SUFFIX) + ACI_ALLOW = "(version 3.0; acl \"Anonymous Read access\"; allow (read,search,compare)" + ACI_SUBJECT = "(userdn=\"ldap:///anyone\");)" + ANON_ACI = ACI_TARGET + ACI_ALLOW + ACI_SUBJECT + suffix = Domain(topo.standalone, DEFAULT_SUFFIX) + + try: + suffix.add('aci', ANON_ACI) + except ldap.TYPE_OR_VALUE_EXISTS: + pass + def fin(): + suffix.delete() + request.addfinalizer(fin) + + +def add_ou_entry(topo, name, myparent): + + ou_dn = 'ou={},{}'.format(name, myparent) + ou = OrganizationalUnit(topo.standalone, dn=ou_dn) + assert ou.create(properties={'ou': name}) + log.info('Organisation {} created for ou :{} .'.format(name, ou_dn)) + + +def add_user_entry(topo, user, name, pw, myparent): + + dn = 'ou=%s,%s' % (name, myparent) + properties = { + 'uid': name, + 'cn': 'admin', + 'sn': name, + 'uidNumber': '1000', + 'gidNumber': '2000', + 'homeDirectory': '/home/{}'.format(name), + 'telephonenumber': '+1 222 333-4444', + 'userpassword': pw, + } + + assert user.create(properties=properties) + log.info('User created for dn :{} .'.format(dn)) + return user + + +def test_aci_with_exclude_filter(topo, add_anon_aci_access): + """Test an ACI(Access control instruction) which contains an extensible filter. + + :id: 238da674-81d9-11eb-a965-98fa9ba19b65 + :setup: Standalone instance + :steps: + 1. Bind to a new Standalone instance + 2. Generate text for the Access Control Instruction(ACI) and add to the standalone instance + 3. Create a test user 'admin' with a marker -> deniedattr = 'telephonenumber' + 4. Create 2 top Organizational units (ou) under the same root suffix + 5. Create 2 test users for each Organizational unit (ou) above with the same username 'admin' + 6. Bind to the Standalone instance as the user 'admin' from the ou created in step 4 above + 7. Search for user(s) ' admin in the subtree that satisfy this criteria: DEFAULT_SUFFIX, ldap.SCOPE_SUBTREE, cn_filter, [deniedattr, 'dn'] + 8. The search should return 2 entries with the username 'admin' + 9. Verify that the users found do not have the --> deniedattr = 'telephonenumber' marker + :expectedresults: + 1. Bind should be successful + 2. Operation should be successful + 3. Operation should be successful + 4. Operation to create 2 Orgs (ou) should be successful + 5. Operation to create 2 (admin*) users should be successful + 6. Operation should be successful + 7. Operation should be successful + 8. Should successfully return 2 users that match "admin*" + 9. PASS - users found do not have the --> deniedattr = 'telephonenumber' marker + + """ + + log.info('Create an OU for them') + ous = OrganizationalUnit(topo.standalone, DEFAULT_SUFFIX) + log.info('Create an top org users') + users = UserAccounts(topo.standalone, DEFAULT_SUFFIX) + log.info('Add aci which contains extensible filter.') + ouname = 'outest' + username = 'admin' + passwd = 'Password' + deniedattr = 'telephonenumber' + log.info('Add aci which contains extensible filter.') + + aci_text = ('(targetattr = "{}")'.format(deniedattr) + + '(target = "ldap:///{}")'.format(DEFAULT_SUFFIX) + + '(version 3.0;acl "admin-tel-matching-rule-outest";deny (all)' + + '(userdn = "ldap:///{}??sub?(&(cn={})(ou:dn:={}))");)'.format(DEFAULT_SUFFIX, username, ouname)) + + suffix = Domain(topo.standalone, DEFAULT_SUFFIX) + suffix.add('aci', aci_text) + log.info('Adding OU entries ...') + for idx in range(0, 2): + ou0 = 'OU%d' % idx + log.info('Adding "ou" : %s under "dn" : %s...' % (ou0, DEFAULT_SUFFIX)) + add_ou_entry(topo, ou0, DEFAULT_SUFFIX) + parent = 'ou=%s,%s' % (ou0, DEFAULT_SUFFIX) + log.info('Adding %s under %s...' % (ouname, parent)) + add_ou_entry(topo, ouname, parent) + user = UserAccounts(topo.standalone, parent, rdn=None) + + for idx in range(0, 2): + parent = 'ou=%s,ou=OU%d,%s' % (ouname, idx, DEFAULT_SUFFIX) + user = UserAccounts(topo.standalone, parent, rdn=None) + username = '{}{}'.format(username, idx) + log.info('Adding User: %s under %s...' % (username, parent)) + user = add_user_entry(topo, user, username, passwd, parent) + + log.info('Bind as user %s' % username) + binddn_user = user.get(username) + + conn = binddn_user.bind(passwd) + if not conn: + log.error(" {} failed to authenticate: ".format(binddn_user)) + assert False + + cn_filter = '(cn=%s)' % username + entries = Accounts(conn, DEFAULT_SUFFIX).filter('(cn=admin*)') + log.info('Verify 2 Entries returned for cn {}'.format(cn_filter)) + assert len(entries) == 2 + for entry in entries: + assert not entry.get_attr_val_utf8('telephonenumber') + log.info("Verified the entries do not contain 'telephonenumber' ") + log.info('Test complete') + + +if __name__ == '__main__': + # Run isolated + # -s for DEBUG mode + CURRENT_FILE = os.path.realpath(__file__) + pytest.main("-s %s" % CURRENT_FILE) diff --git a/dirsrvtests/tests/suites/acl/acivattr_test.py b/dirsrvtests/tests/suites/acl/acivattr_test.py new file mode 100644 index 0000000..d55eea0 --- /dev/null +++ b/dirsrvtests/tests/suites/acl/acivattr_test.py @@ -0,0 +1,254 @@ +# --- BEGIN COPYRIGHT BLOCK --- +# Copyright (C) 2019 Red Hat, Inc. +# All rights reserved. +# +# License: GPL (version 3 or any later version). +# See LICENSE for details. +# --- END COPYRIGHT BLOCK --- + +import pytest, os, ldap +from lib389._constants import DEFAULT_SUFFIX, PW_DM +from lib389.idm.user import UserAccount +from lib389.idm.organization import Organization +from lib389.idm.organizationalunit import OrganizationalUnit +from lib389.cos import CosTemplate, CosClassicDefinition +from lib389.topologies import topology_st as topo +from lib389.idm.nscontainer import nsContainer +from lib389.idm.domain import Domain +from lib389.idm.role import FilteredRoles + +pytestmark = pytest.mark.tier1 + +DNBASE = "o=acivattr,{}".format(DEFAULT_SUFFIX) +ENG_USER = "cn=enguser1,ou=eng,{}".format(DNBASE) +SALES_UESER = "cn=salesuser1,ou=sales,{}".format(DNBASE) +ENG_MANAGER = "cn=engmanager1,ou=eng,{}".format(DNBASE) +SALES_MANAGER = "cn=salesmanager1,ou=sales,{}".format(DNBASE) +SALES_OU = "ou=sales,{}".format(DNBASE) +ENG_OU = "ou=eng,{}".format(DNBASE) +FILTERROLESALESROLE = "cn=FILTERROLESALESROLE,{}".format(DNBASE) +FILTERROLEENGROLE = "cn=FILTERROLEENGROLE,{}".format(DNBASE) + + +@pytest.fixture(scope="function") +def aci_of_user(request, topo): + aci_list = Domain(topo.standalone, DEFAULT_SUFFIX).get_attr_vals('aci') + + def finofaci(): + domain = Domain(topo.standalone, DEFAULT_SUFFIX) + domain.set('aci', None) + for i in aci_list: + domain.add("aci", i) + + request.addfinalizer(finofaci) + + +@pytest.fixture(scope="function") +def _add_user(request, topo): + org = Organization(topo.standalone).create(properties={"o": "acivattr"}, basedn=DEFAULT_SUFFIX) + org.add('aci', '(targetattr="*")(targetfilter="(nsrole=*)")(version 3.0; aci "tester"; ' + 'allow(all) userdn="ldap:///cn=enguser1,ou=eng,o=acivattr,{}";)'.format(DEFAULT_SUFFIX)) + + ou = OrganizationalUnit(topo.standalone, "ou=eng,o=acivattr,{}".format(DEFAULT_SUFFIX)) + ou.create(properties={'ou': 'eng'}) + + ou = OrganizationalUnit(topo.standalone, "ou=sales,o=acivattr,{}".format(DEFAULT_SUFFIX)) + ou.create(properties={'ou': 'sales'}) + + roles = FilteredRoles(topo.standalone, DNBASE) + roles.create(properties={'cn':'FILTERROLEENGROLE', 'nsRoleFilter':'cn=eng*'}) + roles.create(properties={'cn': 'FILTERROLESALESROLE', 'nsRoleFilter': 'cn=sales*'}) + + nsContainer(topo.standalone, + 'cn=cosClassicGenerateEmployeeTypeUsingnsroleTemplates,o=acivattr,{}'.format(DEFAULT_SUFFIX)).create( + properties={'cn': 'cosTemplates'}) + + properties = {'employeeType': 'EngType', 'cn':'"cn=filterRoleEngRole,o=acivattr,dc=example,dc=com",cn=cosClassicGenerateEmployeeTypeUsingnsroleTemplates,o=acivattr,dc=example,dc=com'} + CosTemplate(topo.standalone,'cn="cn=filterRoleEngRole,o=acivattr,dc=example,dc=com",' + 'cn=cosClassicGenerateEmployeeTypeUsingnsroleTemplates,o=acivattr,{}'.format(DEFAULT_SUFFIX)).\ + create(properties=properties) + + properties = {'employeeType': 'SalesType', 'cn': '"cn=filterRoleSalesRole,o=acivattr,dc=example,dc=com",cn=cosClassicGenerateEmployeeTypeUsingnsroleTemplates,o=acivattr,dc=example,dc=com'} + CosTemplate(topo.standalone, + 'cn="cn=filterRoleSalesRole,o=acivattr,dc=example,dc=com",cn=cosClassicGenerateEmployeeTypeUsingnsroleTemplates,' + 'o=acivattr,{}'.format(DEFAULT_SUFFIX)).create(properties=properties) + + properties = { + 'cosTemplateDn': 'cn=cosClassicGenerateEmployeeTypeUsingnsroleTemplates,o=acivattr,{}'.format(DEFAULT_SUFFIX), + 'cosAttribute': 'employeeType', 'cosSpecifier': 'nsrole', 'cn': 'cosClassicGenerateEmployeeTypeUsingnsrole'} + CosClassicDefinition(topo.standalone, + 'cn=cosClassicGenerateEmployeeTypeUsingnsrole,o=acivattr,{}'.format(DEFAULT_SUFFIX)).create( + properties=properties) + + properties = { + 'uid': 'salesuser1', + 'cn': 'salesuser1', + 'sn': 'user', + 'uidNumber': '1000', + 'gidNumber': '2000', + 'homeDirectory': '/home/' + 'salesuser1', + 'userPassword': PW_DM + } + user = UserAccount(topo.standalone, 'cn=salesuser1,ou=sales,o=acivattr,{}'.format(DEFAULT_SUFFIX)) + user.create(properties=properties) + + properties = { + 'uid': 'salesmanager1', + 'cn': 'salesmanager1', + 'sn': 'user', + 'uidNumber': '1000', + 'gidNumber': '2000', + 'homeDirectory': '/home/' + 'salesmanager1', + 'userPassword': PW_DM, + } + user = UserAccount(topo.standalone, 'cn=salesmanager1,ou=sales,o=acivattr,{}'.format(DEFAULT_SUFFIX)) + user.create(properties=properties) + + properties = { + 'uid': 'enguser1', + 'cn': 'enguser1', + 'sn': 'user', + 'uidNumber': '1000', + 'gidNumber': '2000', + 'homeDirectory': '/home/' + 'enguser1', + 'userPassword': PW_DM + } + user = UserAccount(topo.standalone, 'cn=enguser1,ou=eng,o=acivattr,{}'.format(DEFAULT_SUFFIX)) + user.create(properties=properties) + + properties = { + 'uid': 'engmanager1', + 'cn': 'engmanager1', + 'sn': 'user', + 'uidNumber': '1000', + 'gidNumber': '2000', + 'homeDirectory': '/home/' + 'engmanager1', + 'userPassword': PW_DM + } + user = UserAccount(topo.standalone, 'cn=engmanager1,ou=eng,o=acivattr,{}'.format(DEFAULT_SUFFIX)) + user.create(properties=properties) + + def fin(): + for DN in [ENG_USER,SALES_UESER,ENG_MANAGER,SALES_MANAGER,FILTERROLESALESROLE,FILTERROLEENGROLE,ENG_OU,SALES_OU, + 'cn="cn=filterRoleEngRole,o=acivattr,dc=example,dc=com",' + 'cn=cosClassicGenerateEmployeeTypeUsingnsroleTemplates,o=acivattr,dc=example,dc=com', + 'cn="cn=filterRoleSalesRole,o=acivattr,dc=example,dc=com",' + 'cn=cosClassicGenerateEmployeeTypeUsingnsroleTemplates,o=acivattr,{}'.format(DEFAULT_SUFFIX), 'cn=cosClassicGenerateEmployeeTypeUsingnsroleTemplates,o=acivattr,{}'.format(DEFAULT_SUFFIX), + 'cn=cosClassicGenerateEmployeeTypeUsingnsrole,o=acivattr,{}'.format(DEFAULT_SUFFIX), DNBASE]: + UserAccount(topo.standalone, DN).delete() + + request.addfinalizer(fin) + + +REAL_EQ_ACI = '(targetattr="*")(targetfilter="(cn=engmanager1)") (version 3.0; acl "real-eq"; allow (all) userdn="ldap:///{}";)'.format(ENG_USER) +REAL_PRES_ACI = '(targetattr="*")(targetfilter="(cn=*)") (version 3.0; acl "real-pres"; allow (all) userdn="ldap:///{}";)'.format(ENG_USER) +REAL_SUB_ACI = '(targetattr="*")(targetfilter="(cn=eng*)") (version 3.0; acl "real-sub"; allow (all) userdn="ldap:///{}";)'.format(ENG_USER) +ROLE_EQ_ACI = '(targetattr="*")(targetfilter="(nsrole=cn=filterroleengrole,o=sun.com)") (version 3.0; acl "role-eq"; allow (all) userdn="ldap:///{}";)'.format(ENG_USER) +ROLE_PRES_ACI = '(targetattr="*")(targetfilter="(nsrole=*)") (version 3.0; acl "role-pres"; allow (all) userdn="ldap:///{}";)'.format(ENG_USER) +ROLE_SUB_ACI = '(targetattr="*")(targetfilter="(nsrole=cn=filterroleeng*)") (version 3.0; acl "role-sub"; allow (all) userdn="ldap:///{}";)'.format(ENG_USER) +COS_EQ_ACI = '(targetattr="*")(targetfilter="(employeetype=engtype)") (version 3.0; acl "cos-eq"; allow (all) userdn="ldap:///{}";)'.format(ENG_USER) +COS_PRES_ACI = '(targetattr="*")(targetfilter="(employeetype=*)") (version 3.0; acl "cos-pres"; allow (all) userdn="ldap:///{}";)'.format(ENG_USER) +COS_SUB_ACI = '(targetattr="*")(targetfilter="(employeetype=eng*)") (version 3.0; acl "cos-sub"; allow (all) userdn="ldap:///{}";)'.format(ENG_USER) +LDAPURL_ACI = '(targetattr="*")(version 3.0; acl "url"; allow (all) userdn="ldap:///o=acivattr,dc=example,dc=com??sub?(nsrole=*eng*)";)' + + +@pytest.mark.parametrize("user,entry,aci", [ + (ENG_USER, ENG_MANAGER, REAL_EQ_ACI), + (ENG_USER, ENG_MANAGER, REAL_PRES_ACI), + (ENG_USER, ENG_MANAGER, REAL_SUB_ACI), + (ENG_USER, ENG_MANAGER, ROLE_PRES_ACI), + (ENG_USER, ENG_MANAGER, ROLE_SUB_ACI), + (ENG_USER, ENG_MANAGER, COS_EQ_ACI), + (ENG_USER, ENG_MANAGER, COS_PRES_ACI), + (ENG_USER, ENG_MANAGER, COS_SUB_ACI), + (ENG_USER, ENG_MANAGER, LDAPURL_ACI), +], ids=[ + "(ENG_USER, ENG_MANAGER, REAL_EQ_ACI)", + "(ENG_USER, ENG_MANAGER, REAL_PRES_ACI)", + "(ENG_USER, ENG_MANAGER, REAL_SUB_ACI)", + "(ENG_USER, ENG_MANAGER, ROLE_PRES_ACI)", + '(ENG_USER, ENG_MANAGER, ROLE_SUB_ACI)', + '(ENG_USER, ENG_MANAGER, COS_EQ_ACI)', + '(ENG_USER, ENG_MANAGER, COS_PRES_ACI)', + '(ENG_USER, ENG_MANAGER, COS_SUB_ACI)', + '(ENG_USER, ENG_MANAGER, LDAPURL_ACI)', +]) +def test_positive(topo, _add_user, aci_of_user, user, entry, aci): + """Positive testing of ACLs + + :id: ba6d5e9c-786b-11e8-860d-8c16451d917b + :parametrized: yes + :setup: server + :steps: + 1. Add test entry + 2. Add ACI + 3. ACI role should be followed + :expectedresults: + 1. Entry should be added + 2. Operation should succeed + 3. Operation should succeed + """ + # set aci + Domain(topo.standalone, DNBASE).set("aci", aci) + # create connection + conn = UserAccount(topo.standalone, user).bind(PW_DM) + # according to the aci , user will be able to change description + UserAccount(conn, entry).replace("description", "Fred") + assert UserAccount(conn, entry).present('description') + + +@pytest.mark.parametrize("user,entry,aci", [ + (ENG_USER, SALES_MANAGER, REAL_EQ_ACI), + (ENG_USER, SALES_OU, REAL_PRES_ACI), + (ENG_USER, SALES_MANAGER, REAL_SUB_ACI), + (ENG_USER, SALES_MANAGER, ROLE_EQ_ACI), + (ENG_USER, SALES_OU, ROLE_PRES_ACI), + (ENG_USER, SALES_MANAGER, ROLE_SUB_ACI), + (ENG_USER, SALES_MANAGER, COS_EQ_ACI), + (ENG_USER, SALES_OU, COS_PRES_ACI), + (ENG_USER, SALES_MANAGER, COS_SUB_ACI), + (SALES_UESER, SALES_MANAGER, LDAPURL_ACI), + (ENG_USER, ENG_MANAGER, ROLE_EQ_ACI), +], ids=[ + + "(ENG_USER, SALES_MANAGER, REAL_EQ_ACI)", + "(ENG_USER, SALES_OU, REAL_PRES_ACI)", + "(ENG_USER, SALES_MANAGER, REAL_SUB_ACI)", + "(ENG_USER, SALES_MANAGER, ROLE_EQ_ACI)", + "(ENG_USER, SALES_MANAGER, ROLE_PRES_ACI)", + '(ENG_USER, SALES_MANAGER, ROLE_SUB_ACI)', + '(ENG_USER, SALES_MANAGER, COS_EQ_ACI)', + '(ENG_USER, SALES_MANAGER, COS_PRES_ACI)', + '(ENG_USER, SALES_MANAGER, COS_SUB_ACI)', + '(SALES_UESER, SALES_MANAGER, LDAPURL_ACI)', + '(ENG_USER, ENG_MANAGER, ROLE_EQ_ACI)' + + +]) +def test_negative(topo, _add_user, aci_of_user, user, entry, aci): + """Negative testing of ACLs + + :id: c4c887c2-786b-11e8-a328-8c16451d917b + :parametrized: yes + :setup: server + :steps: + 1. Add test entry + 2. Add ACI + 3. ACI role should be followed + :expectedresults: + 1. Entry should be added + 2. Operation should succeed + 3. Operation should not succeed + """ + # set aci + Domain(topo.standalone, DNBASE).set("aci", aci) + # create connection + conn = UserAccount(topo.standalone, user).bind(PW_DM) + # according to the aci , user will not be able to change description + with pytest.raises(ldap.INSUFFICIENT_ACCESS): + UserAccount(conn, entry).replace("description", "Fred") + + +if __name__ == "__main__": + CURRENT_FILE = os.path.realpath(__file__) + pytest.main("-s -v %s" % CURRENT_FILE) diff --git a/dirsrvtests/tests/suites/acl/acl_deny_test.py b/dirsrvtests/tests/suites/acl/acl_deny_test.py new file mode 100644 index 0000000..96d08e9 --- /dev/null +++ b/dirsrvtests/tests/suites/acl/acl_deny_test.py @@ -0,0 +1,208 @@ +# --- BEGIN COPYRIGHT BLOCK --- +# Copyright (C) 2020 Red Hat, Inc. +# All rights reserved. +# +# License: GPL (version 3 or any later version). +# See LICENSE for details. +# --- END COPYRIGHT BLOCK --- +# +import logging +import pytest +import os +import ldap +import time +from lib389._constants import * +from lib389.topologies import topology_st as topo +from lib389.idm.user import UserAccount, TEST_USER_PROPERTIES +from lib389.idm.domain import Domain + +pytestmark = pytest.mark.tier1 + +DEBUGGING = os.getenv("DEBUGGING", default=False) +if DEBUGGING: + logging.getLogger(__name__).setLevel(logging.DEBUG) +else: + logging.getLogger(__name__).setLevel(logging.INFO) +log = logging.getLogger(__name__) + +BIND_DN2 = 'uid=tuser,ou=People,dc=example,dc=com' +BIND_RDN2 = 'tuser' +BIND_DN = 'uid=tuser1,ou=People,dc=example,dc=com' +BIND_RDN = 'tuser1' +SRCH_FILTER = "uid=tuser1" +SRCH_FILTER2 = "uid=tuser" + +aci_list_A = ['(targetattr != "userPassword") (version 3.0; acl "Anonymous access"; allow (read, search, compare)userdn = "ldap:///anyone";)', + '(targetattr = "*") (version 3.0;acl "allow tuser";allow (all)(userdn = "ldap:///uid=tuser5,ou=People,dc=example,dc=com");)', + '(targetattr != "uid || mail") (version 3.0; acl "deny-attrs"; deny (all) (userdn = "ldap:///anyone");)', + '(targetfilter = "(inetUserStatus=1)") ( version 3.0; acl "deny-specific-entry"; deny(all) (userdn = "ldap:///anyone");)'] + +aci_list_B = ['(targetattr != "userPassword") (version 3.0; acl "Anonymous access"; allow (read, search, compare)userdn = "ldap:///anyone";)', + '(targetattr != "uid || mail") (version 3.0; acl "deny-attrs"; deny (all) (userdn = "ldap:///anyone");)', + '(targetfilter = "(inetUserStatus=1)") ( version 3.0; acl "deny-specific-entry"; deny(all) (userdn = "ldap:///anyone");)'] + + +@pytest.fixture(scope="module") +def aci_setup(topo): + topo.standalone.log.info("Add {}".format(BIND_DN)) + user = UserAccount(topo.standalone, BIND_DN) + user_props = TEST_USER_PROPERTIES.copy() + user_props.update({'sn': BIND_RDN, + 'cn': BIND_RDN, + 'uid': BIND_RDN, + 'inetUserStatus': '1', + 'objectclass': 'extensibleObject', + 'userpassword': PASSWORD}) + user.create(properties=user_props, basedn=SUFFIX) + + topo.standalone.log.info("Add {}".format(BIND_DN2)) + user2 = UserAccount(topo.standalone, BIND_DN2) + user_props = TEST_USER_PROPERTIES.copy() + user_props.update({'sn': BIND_RDN2, + 'cn': BIND_RDN2, + 'uid': BIND_RDN2, + 'userpassword': PASSWORD}) + user2.create(properties=user_props, basedn=SUFFIX) + + +def test_multi_deny_aci(topo, aci_setup): + """Test that mutliple deny rules work, and that they the cache properly + stores the result + + :id: 294c366d-850e-459e-b5a0-3cc828ec3aca + :setup: Standalone Instance + :steps: + 1. Add aci_list_A aci's and verify two searches on the same connection + behave the same + 2. Add aci_list_B aci's and verify search fails as expected + :expectedresults: + 1. Both searches do not return any entries + 2. Seaches do not return any entries + """ + + if DEBUGGING: + # Maybe add aci logging? + pass + + suffix = Domain(topo.standalone, DEFAULT_SUFFIX) + + for run in range(2): + topo.standalone.log.info("Pass " + str(run + 1)) + + # Test ACI List A + topo.standalone.log.info("Testing two searches behave the same...") + topo.standalone.simple_bind_s(DN_DM, PASSWORD) + suffix.set('aci', aci_list_A, ldap.MOD_REPLACE) + time.sleep(1) + + topo.standalone.simple_bind_s(BIND_DN, PASSWORD) + entries = topo.standalone.search_s(DEFAULT_SUFFIX, ldap.SCOPE_SUBTREE, SRCH_FILTER) + if entries and entries[0]: + topo.standalone.log.fatal("Incorrectly got an entry returned from search 1") + assert False + + entries = topo.standalone.search_s(DEFAULT_SUFFIX, ldap.SCOPE_SUBTREE, SRCH_FILTER) + if entries and entries[0]: + topo.standalone.log.fatal("Incorrectly got an entry returned from search 2") + assert False + + entries = topo.standalone.search_s(DEFAULT_SUFFIX, ldap.SCOPE_SUBTREE, SRCH_FILTER2) + if entries is None or len(entries) == 0: + topo.standalone.log.fatal("Failed to get entry as good user") + assert False + + entries = topo.standalone.search_s(DEFAULT_SUFFIX, ldap.SCOPE_SUBTREE, SRCH_FILTER2) + if entries is None or len(entries) == 0: + topo.standalone.log.fatal("Failed to get entry as good user") + assert False + + # Bind a different user who has rights + topo.standalone.simple_bind_s(BIND_DN2, PASSWORD) + entries = topo.standalone.search_s(DEFAULT_SUFFIX, ldap.SCOPE_SUBTREE, SRCH_FILTER2) + if entries is None or len(entries) == 0: + topo.standalone.log.fatal("Failed to get entry as good user") + assert False + + entries = topo.standalone.search_s(DEFAULT_SUFFIX, ldap.SCOPE_SUBTREE, SRCH_FILTER2) + if entries is None or len(entries) == 0: + topo.standalone.log.fatal("Failed to get entry as good user (2)") + assert False + + if run > 0: + # Second pass + topo.standalone.restart() + + # Reset ACI's and do the second test + topo.standalone.log.info("Testing search does not return any entries...") + topo.standalone.simple_bind_s(DN_DM, PASSWORD) + suffix.set('aci', aci_list_B, ldap.MOD_REPLACE) + time.sleep(1) + + topo.standalone.simple_bind_s(BIND_DN, PASSWORD) + entries = topo.standalone.search_s(DEFAULT_SUFFIX, ldap.SCOPE_SUBTREE, SRCH_FILTER) + if entries and entries[0]: + topo.standalone.log.fatal("Incorrectly got an entry returned from search 1") + assert False + + entries = topo.standalone.search_s(DEFAULT_SUFFIX, ldap.SCOPE_SUBTREE, SRCH_FILTER) + if entries and entries[0]: + topo.standalone.log.fatal("Incorrectly got an entry returned from search 2") + assert False + + if run > 0: + # Second pass + topo.standalone.restart() + + # Bind as different user who has rights + topo.standalone.simple_bind_s(BIND_DN2, PASSWORD) + entries = topo.standalone.search_s(DEFAULT_SUFFIX, ldap.SCOPE_SUBTREE, SRCH_FILTER2) + if entries is None or len(entries) == 0: + topo.standalone.log.fatal("Failed to get entry as good user") + assert False + + entries = topo.standalone.search_s(DEFAULT_SUFFIX, ldap.SCOPE_SUBTREE, SRCH_FILTER2) + if entries is None or len(entries) == 0: + topo.standalone.log.fatal("Failed to get entry as good user (2)") + assert False + + entries = topo.standalone.search_s(DEFAULT_SUFFIX, ldap.SCOPE_SUBTREE, SRCH_FILTER) + if entries and entries[0]: + topo.standalone.log.fatal("Incorrectly got an entry returned from search 1") + assert False + + entries = topo.standalone.search_s(DEFAULT_SUFFIX, ldap.SCOPE_SUBTREE, SRCH_FILTER) + if entries and entries[0]: + topo.standalone.log.fatal("Incorrectly got an entry returned from search 2") + assert False + + # back to user 1 + topo.standalone.simple_bind_s(BIND_DN, PASSWORD) + entries = topo.standalone.search_s(DEFAULT_SUFFIX, ldap.SCOPE_SUBTREE, SRCH_FILTER2) + if entries is None or len(entries) == 0: + topo.standalone.log.fatal("Failed to get entry as user1") + assert False + + entries = topo.standalone.search_s(DEFAULT_SUFFIX, ldap.SCOPE_SUBTREE, SRCH_FILTER2) + if entries is None or len(entries) == 0: + topo.standalone.log.fatal("Failed to get entry as user1 (2)") + assert False + + entries = topo.standalone.search_s(DEFAULT_SUFFIX, ldap.SCOPE_SUBTREE, SRCH_FILTER) + if entries and entries[0]: + topo.standalone.log.fatal("Incorrectly got an entry returned from search 1") + assert False + + entries = topo.standalone.search_s(DEFAULT_SUFFIX, ldap.SCOPE_SUBTREE, SRCH_FILTER) + if entries and entries[0]: + topo.standalone.log.fatal("Incorrectly got an entry returned from search 2") + assert False + + topo.standalone.log.info("Test PASSED") + + +if __name__ == '__main__': + # Run isolated + # -s for DEBUG mode + CURRENT_FILE = os.path.realpath(__file__) + pytest.main(["-s", CURRENT_FILE]) + diff --git a/dirsrvtests/tests/suites/acl/acl_test.py b/dirsrvtests/tests/suites/acl/acl_test.py new file mode 100644 index 0000000..1131f09 --- /dev/null +++ b/dirsrvtests/tests/suites/acl/acl_test.py @@ -0,0 +1,1286 @@ +# --- BEGIN COPYRIGHT BLOCK --- +# Copyright (C) 2020 Red Hat, Inc. +# All rights reserved. +# +# License: GPL (version 3 or any later version). +# See LICENSE for details. +# --- END COPYRIGHT BLOCK --- +# +import pytest +from ldap.controls.simple import GetEffectiveRightsControl +from lib389.tasks import * +from lib389.utils import * +from lib389.schema import Schema +from lib389.idm.domain import Domain +from lib389.idm.user import UserAccount, UserAccounts, TEST_USER_PROPERTIES +from lib389.idm.organizationalunit import OrganizationalUnits +from lib389.idm.organizationalrole import OrganizationalRole, OrganizationalRoles +from lib389.topologies import topology_m2 +from lib389._constants import SUFFIX, DN_DM, DEFAULT_SUFFIX, PASSWORD + +pytestmark = pytest.mark.tier1 + +logging.getLogger(__name__).setLevel(logging.DEBUG) +log = logging.getLogger(__name__) + +TEST_REPL_DN = "cn=test_repl, %s" % SUFFIX + +STAGING_CN = "staged user" +PRODUCTION_CN = "accounts" +EXCEPT_CN = "excepts" + +STAGING_DN = "cn=%s,%s" % (STAGING_CN, SUFFIX) +PRODUCTION_DN = "cn=%s,%s" % (PRODUCTION_CN, SUFFIX) +PROD_EXCEPT_DN = "cn=%s,%s" % (EXCEPT_CN, PRODUCTION_DN) + +STAGING_PATTERN = "cn=%s*,%s" % (STAGING_CN[:2], SUFFIX) +PRODUCTION_PATTERN = "cn=%s*,%s" % (PRODUCTION_CN[:2], SUFFIX) +BAD_STAGING_PATTERN = "cn=bad*,%s" % (SUFFIX) +BAD_PRODUCTION_PATTERN = "cn=bad*,%s" % (SUFFIX) + +BIND_RDN = "bind_entry" +BIND_DN = "uid=%s,%s" % (BIND_RDN, SUFFIX) +BIND_PW = "password" + +NEW_ACCOUNT = "new_account" +MAX_ACCOUNTS = 20 + +CONFIG_MODDN_ACI_ATTR = "nsslapd-moddn-aci" + +SRC_ENTRY_CN = "tuser" +EXT_RDN = "01" +DST_ENTRY_CN = SRC_ENTRY_CN + EXT_RDN + +SRC_ENTRY_DN = "cn=%s,%s" % (SRC_ENTRY_CN, SUFFIX) +DST_ENTRY_DN = "cn=%s,%s" % (DST_ENTRY_CN, SUFFIX) + +TARGET_ATTR_SEARCH = 'description' + + +def add_attr(topology_m2, attr_name): + """Adds attribute to the schema""" + + ATTR_VALUE = """(NAME '%s' \ + DESC 'Attribute filteri-Multi-Valued' \ + SYNTAX 1.3.6.1.4.1.1466.115.121.1.27)""" % attr_name + schema = Schema(topology_m2.ms["supplier1"]) + schema.add('attributeTypes', ATTR_VALUE) + + +@pytest.fixture(params=["lang-ja", "binary", "phonetic"]) +def aci_with_attr_subtype(request, topology_m2): + """Adds and deletes an ACI in the DEFAULT_SUFFIX""" + + TARGET_ATTR = 'protectedOperation' + USER_ATTR = 'allowedToPerform' + SUBTYPE = request.param + suffix = Domain(topology_m2.ms["supplier1"], DEFAULT_SUFFIX) + + log.info("========Executing test with '%s' subtype========" % SUBTYPE) + log.info(" Add a target attribute") + add_attr(topology_m2, TARGET_ATTR) + + log.info(" Add a user attribute") + add_attr(topology_m2, USER_ATTR) + + ACI_TARGET = '(targetattr=%s;%s)' % (TARGET_ATTR, SUBTYPE) + ACI_ALLOW = '(version 3.0; acl "test aci for subtypes"; allow (read) ' + ACI_SUBJECT = 'userattr = "%s;%s#GROUPDN";)' % (USER_ATTR, SUBTYPE) + ACI_BODY = ACI_TARGET + ACI_ALLOW + ACI_SUBJECT + + log.info("Add an ACI with attribute subtype") + suffix.add('aci', ACI_BODY) + + def fin(): + log.info("Finally, delete an ACI with the '%s' subtype" % + SUBTYPE) + suffix.remove('aci', ACI_BODY) + + request.addfinalizer(fin) + + return ACI_BODY + + +@pytest.fixture(scope="function") +def user_for_test(request, topology_m2): + """Create test users""" + + # Get the first supplier + s1 = topology_m2.ms["supplier1"] + + # Create a user for testing + log.info("Creating test user...") + users = UserAccounts(s1, DEFAULT_SUFFIX) + user1000 = users.create_test_user() + user1000.set("userPassword", PW_DM) + + # Create an organizational unit 'Accounting' + log.info("Creating 'Accounting' organizational unit...") + ous = OrganizationalUnits(s1, DEFAULT_SUFFIX) + accounting = ous.create(properties={'ou': 'Accounting'}) + + # Create another user within the 'Accounting' unit + users = UserAccounts(s1, DEFAULT_SUFFIX, rdn='ou=Accounting') + user1 = users.create_test_user(uid=1, gid=1) + + def fin(): + """Deletes entries after the test.""" + log.info("Deleting test entries post test execution...") + user1.delete() + user1000.delete() + accounting.delete() + + request.addfinalizer(fin) + + return user1 + + +@pytest.mark.parametrize('subtype', (';lang-ja', None)) +def test_aci_subtype_search(topology_m2, user_for_test, subtype): + """Test to verify the ACI subtype search functionality + + :id: 83ac5e20-91e8-408e-a253-05787569a580 + :parametrized: yes + :setup: MMR with two suppliers + :steps: + 1. Set password for the test user. + 2. Add an ACI with attribute subtype. + 3. Set attributes for the test user. + 4. Search for the added attribute. + :expectedresults: + 1. Password for the test user is successfully set. + 2. The ACI with the specified subtype (if any) is successfully added. + 3. The attributes are successfully set for the test user. + 4. If a subtype is defined: + - The search retrieves the attribute with the subtype. + - Attributes 'description' and 'description;binary' are not retrieved. + If no subtype is defined: + - The search retrieves the attributes 'description' and 'description;binary'. + - The attribute with the subtype is also retrieved. + """ + + TEST_VALUE = "test" + USER_SEARCH_DN = user_for_test.dn + aci_attr = TARGET_ATTR_SEARCH + + if subtype is not None: + aci_attr += subtype + + s1 = topology_m2.ms["supplier1"] + + log.info("Setting password for the test user...") + users = UserAccounts(s1, DEFAULT_SUFFIX) + user1000 = users.get("test_user_1000") + user1000.set("userPassword", PW_DM) + + ous = OrganizationalUnits(s1, DEFAULT_SUFFIX) + accounting = ous.get("Accounting") + + # Construct the ACI body + ACI_TARGET = '(targetattr = "%s || objectclass")' % aci_attr + ACI_ALLOW = '(target = "ldap:///%s") (version 3.0; acl "Read Entries"; allow (read,compare,search) ' % user_for_test.dn + ACI_SUBJECT = 'userdn = "ldap:///%s";)' % user1000.dn + ACI_BODY = ACI_TARGET + ACI_ALLOW + ACI_SUBJECT + + log.info("Adding an ACI with attribute subtype...") + accounting.add('aci', ACI_BODY) + + # Add attributes that we'll search to the test entry + log.info("Setting attributes for the test user...") + user_for_test.add('description', TEST_VALUE) + user_for_test.add('description;binary', TEST_VALUE) + if subtype is not None: + user_for_test.add(aci_attr, TEST_VALUE) + + conn = user1000.bind(PW_DM) + + log.info("Searching for the added attribute...") + try: + entries = conn.search_s(USER_SEARCH_DN, ldap.SCOPE_BASE, '(objectclass=*)') + entry = str(entries[0]) + assert f'{aci_attr}: {TEST_VALUE}' in entry + + if subtype is not None: + assert f'description: {TEST_VALUE}' not in entry + assert f'description;binary: {TEST_VALUE}' not in entry + else: + assert f'description: {TEST_VALUE}' in entry + assert f'description;binary: {TEST_VALUE}' in entry + + # Search specifically for the aci attribute + entries = conn.search_s(USER_SEARCH_DN, ldap.SCOPE_BASE, '(objectclass=*)', [aci_attr]) + entry = str(entries[0]) + assert f'{aci_attr}: {TEST_VALUE}' in entry + + if subtype is not None: + assert f'description: {TEST_VALUE}' not in entry + assert f'description;binary: {TEST_VALUE}' not in entry + else: + assert f'description: {TEST_VALUE}' in entry + assert f'description;binary: {TEST_VALUE}' in entry + + except ldap.LDAPError as e: + log.fatal('Search failed, error: ' + e.message['desc']) + assert False + + +def test_aci_attr_subtype_targetattr(topology_m2, aci_with_attr_subtype): + """Checks, that ACIs allow attribute subtypes in the targetattr keyword + + :id: a99ccda0-5d0b-4d41-99cc-c5e207b3b687 + :parametrized: yes + :setup: MMR with two suppliers, + Define two attributes in the schema - targetattr and userattr, + Add an ACI with attribute subtypes - "lang-ja", "binary", "phonetic" + one by one + :steps: + 1. Search for the added attribute during setup + one by one for each subtypes "lang-ja", "binary", "phonetic" + :expectedresults: + 1. Attributes should be found successfully + one by one for each subtypes "lang-ja", "binary", "phonetic" + """ + + log.info("Search for the added attribute") + try: + entries = topology_m2.ms["supplier1"].search_s(DEFAULT_SUFFIX, + ldap.SCOPE_BASE, + '(objectclass=*)', ['aci']) + entry = str(entries[0]) + assert aci_with_attr_subtype in entry + log.info("The added attribute was found") + + except ldap.LDAPError as e: + log.fatal('Search failed, error: ' + e.message['desc']) + assert False + + +def _bind_manager(topology_m2): + topology_m2.ms["supplier1"].log.info("Bind as %s " % DN_DM) + topology_m2.ms["supplier1"].simple_bind_s(DN_DM, PASSWORD) + + +def _bind_normal(topology_m2): + # bind as bind_entry + topology_m2.ms["supplier1"].log.info("Bind as %s" % BIND_DN) + topology_m2.ms["supplier1"].simple_bind_s(BIND_DN, BIND_PW) + + +def _moddn_aci_deny_tree(topology_m2, mod_type=None, + target_from=STAGING_DN, target_to=PROD_EXCEPT_DN): + """It denies the access moddn_to in cn=except,cn=accounts,SUFFIX""" + + assert mod_type is not None + + ACI_TARGET_FROM = "" + ACI_TARGET_TO = "" + if target_from: + ACI_TARGET_FROM = "(target_from = \"ldap:///%s\")" % (target_from) + if target_to: + ACI_TARGET_TO = "(target_to = \"ldap:///%s\")" % (target_to) + + ACI_ALLOW = "(version 3.0; acl \"Deny MODDN to prod_except\"; deny (moddn)" + ACI_SUBJECT = " userdn = \"ldap:///%s\";)" % BIND_DN + ACI_BODY = ACI_TARGET_TO + ACI_TARGET_FROM + ACI_ALLOW + ACI_SUBJECT + # topology_m2.ms["supplier1"].modify_s(SUFFIX, mod) + topology_m2.ms["supplier1"].log.info("Add a DENY aci under %s " % PROD_EXCEPT_DN) + prod_except = OrganizationalRole(topology_m2.ms["supplier1"], PROD_EXCEPT_DN) + prod_except.set('aci', ACI_BODY, mod_type) + + +def _write_aci_staging(topology_m2, mod_type=None): + assert mod_type is not None + + ACI_TARGET = "(targetattr= \"uid\")(target=\"ldap:///uid=*,%s\")" % STAGING_DN + ACI_ALLOW = "(version 3.0; acl \"write staging entries\"; allow (write)" + ACI_SUBJECT = " userdn = \"ldap:///%s\";)" % BIND_DN + ACI_BODY = ACI_TARGET + ACI_ALLOW + ACI_SUBJECT + suffix = Domain(topology_m2.ms["supplier1"], SUFFIX) + suffix.set('aci', ACI_BODY, mod_type) + + +def _write_aci_production(topology_m2, mod_type=None): + assert mod_type is not None + + ACI_TARGET = "(targetattr= \"uid\")(target=\"ldap:///uid=*,%s\")" % PRODUCTION_DN + ACI_ALLOW = "(version 3.0; acl \"write production entries\"; allow (write)" + ACI_SUBJECT = " userdn = \"ldap:///%s\";)" % BIND_DN + ACI_BODY = ACI_TARGET + ACI_ALLOW + ACI_SUBJECT + suffix = Domain(topology_m2.ms["supplier1"], SUFFIX) + suffix.set('aci', ACI_BODY, mod_type) + + +def _moddn_aci_staging_to_production(topology_m2, mod_type=None, + target_from=STAGING_DN, target_to=PRODUCTION_DN): + assert mod_type is not None + + ACI_TARGET_FROM = "" + ACI_TARGET_TO = "" + if target_from: + ACI_TARGET_FROM = "(target_from = \"ldap:///%s\")" % (target_from) + if target_to: + ACI_TARGET_TO = "(target_to = \"ldap:///%s\")" % (target_to) + + ACI_ALLOW = "(version 3.0; acl \"MODDN from staging to production\"; allow (moddn)" + ACI_SUBJECT = " userdn = \"ldap:///%s\";)" % BIND_DN + ACI_BODY = ACI_TARGET_FROM + ACI_TARGET_TO + ACI_ALLOW + ACI_SUBJECT + suffix = Domain(topology_m2.ms["supplier1"], SUFFIX) + suffix.set('aci', ACI_BODY, mod_type) + + _write_aci_staging(topology_m2, mod_type=mod_type) + + +def _moddn_aci_from_production_to_staging(topology_m2, mod_type=None): + assert mod_type is not None + + ACI_TARGET = "(target_from = \"ldap:///%s\") (target_to = \"ldap:///%s\")" % ( + PRODUCTION_DN, STAGING_DN) + ACI_ALLOW = "(version 3.0; acl \"MODDN from production to staging\"; allow (moddn)" + ACI_SUBJECT = " userdn = \"ldap:///%s\";)" % BIND_DN + ACI_BODY = ACI_TARGET + ACI_ALLOW + ACI_SUBJECT + suffix = Domain(topology_m2.ms["supplier1"], SUFFIX) + suffix.set('aci', ACI_BODY, mod_type) + + _write_aci_production(topology_m2, mod_type=mod_type) + + +@pytest.fixture(scope="module") +def moddn_setup(topology_m2): + """Creates + - a staging DIT + - a production DIT + - add accounts in staging DIT + - enable ACL logging (commented for performance reason) + """ + + m1 = topology_m2.ms["supplier1"] + o_roles = OrganizationalRoles(m1, SUFFIX) + + m1.log.info("\n\n######## INITIALIZATION ########\n") + + # entry used to bind with + m1.log.info("Add {}".format(BIND_DN)) + user = UserAccount(m1, BIND_DN) + user_props = TEST_USER_PROPERTIES.copy() + user_props.update({'sn': BIND_RDN, + 'cn': BIND_RDN, + 'uid': BIND_RDN, + 'userpassword': BIND_PW}) + user.create(properties=user_props, basedn=SUFFIX) + + # Add anonymous read aci + ACI_TARGET = "(target = \"ldap:///%s\")(targetattr=\"*\")" % (SUFFIX) + ACI_ALLOW = "(version 3.0; acl \"Anonymous Read access\"; allow (read,search,compare)" + ACI_SUBJECT = " userdn = \"ldap:///anyone\";)" + ACI_BODY = ACI_TARGET + ACI_ALLOW + ACI_SUBJECT + suffix = Domain(m1, SUFFIX) + suffix.add('aci', ACI_BODY) + + # DIT for staging + m1.log.info("Add {}".format(STAGING_DN)) + o_roles.create(properties={'cn': STAGING_CN, 'description': "staging DIT"}) + + # DIT for production + m1.log.info("Add {}".format(PRODUCTION_DN)) + o_roles.create(properties={'cn': PRODUCTION_CN, 'description': "production DIT"}) + + # DIT for production/except + m1.log.info("Add {}".format(PROD_EXCEPT_DN)) + o_roles_prod = OrganizationalRoles(m1, PRODUCTION_DN) + o_roles_prod.create(properties={'cn': EXCEPT_CN, 'description': "production except DIT"}) + + # enable acl error logging + # mod = [(ldap.MOD_REPLACE, 'nsslapd-errorlog-level', '128')] + # m1.modify_s(DN_CONFIG, mod) + # topology_m2.ms["supplier2"].modify_s(DN_CONFIG, mod) + + # add dummy entries in the staging DIT + staging_users = UserAccounts(m1, SUFFIX, rdn="cn={}".format(STAGING_CN)) + user_props = TEST_USER_PROPERTIES.copy() + for cpt in range(MAX_ACCOUNTS): + name = "{}{}".format(NEW_ACCOUNT, cpt) + user_props.update({'sn': name, 'cn': name, 'uid': name}) + staging_users.create(properties=user_props) + + +def test_mode_default_add_deny(topology_m2, moddn_setup): + """Tests that the ADD operation fails (no ADD aci on production) + + :id: 301d41d3-b8d8-44c5-8eb9-c2d2816b5a4f + :setup: MMR with two suppliers, + M1 - staging DIT + M2 - production DIT + add test accounts in staging DIT + :steps: + 1. Add an entry in production + :expectedresults: + 1. It should fail due to INSUFFICIENT_ACCESS + """ + + topology_m2.ms["supplier1"].log.info("\n\n######## mode moddn_aci : ADD (should fail) ########\n") + + _bind_normal(topology_m2) + + # + # First try to add an entry in production => INSUFFICIENT_ACCESS + # + try: + topology_m2.ms["supplier1"].log.info("Try to add %s" % PRODUCTION_DN) + name = "%s%d" % (NEW_ACCOUNT, 0) + topology_m2.ms["supplier1"].add_s(Entry(("uid=%s,%s" % (name, PRODUCTION_DN), { + 'objectclass': "top person".split(), + 'sn': name, + 'cn': name, + 'uid': name}))) + assert 0 # this is an error, we should not be allowed to add an entry in production + except Exception as e: + topology_m2.ms["supplier1"].log.info("Exception (expected): %s" % type(e).__name__) + assert isinstance(e, ldap.INSUFFICIENT_ACCESS) + + +def test_mode_default_delete_deny(topology_m2, moddn_setup): + """Tests that the DEL operation fails (no 'delete' aci on production) + + :id: 5dcb2213-3875-489a-8cb5-ace057120ad6 + :setup: MMR with two suppliers, + M1 - staging DIT + M2 - production DIT + add test accounts in staging DIT + :steps: + 1. Delete an entry in staging + :expectedresults: + 1. It should fail due to INSUFFICIENT_ACCESS + """ + + topology_m2.ms["supplier1"].log.info("\n\n######## DELETE (should fail) ########\n") + + _bind_normal(topology_m2) + # + # Second try to delete an entry in staging => INSUFFICIENT_ACCESS + # + try: + topology_m2.ms["supplier1"].log.info("Try to delete %s" % STAGING_DN) + name = "%s%d" % (NEW_ACCOUNT, 0) + topology_m2.ms["supplier1"].delete_s("uid=%s,%s" % (name, STAGING_DN)) + assert 0 # this is an error, we should not be allowed to add an entry in production + except Exception as e: + topology_m2.ms["supplier1"].log.info("Exception (expected): %s" % type(e).__name__) + assert isinstance(e, ldap.INSUFFICIENT_ACCESS) + + +@pytest.mark.parametrize("index,tfrom,tto,failure", + [(0, STAGING_DN, PRODUCTION_DN, False), + (1, STAGING_DN, PRODUCTION_DN, False), + (2, STAGING_DN, BAD_PRODUCTION_PATTERN, True), + (3, STAGING_PATTERN, PRODUCTION_DN, False), + (4, BAD_STAGING_PATTERN, PRODUCTION_DN, True), + (5, STAGING_PATTERN, PRODUCTION_PATTERN, False), + (6, None, PRODUCTION_PATTERN, False), + (7, STAGING_PATTERN, None, False), + (8, None, None, False)]) +def test_moddn_staging_prod(topology_m2, moddn_setup, + index, tfrom, tto, failure): + """This test case MOVE entry NEW_ACCOUNT0 from staging to prod + target_to/target_from: equality filter + + :id: cbafdd68-64d6-431f-9f22-6fbf9ed23ca0 + :parametrized: yes + :setup: MMR with two suppliers, + M1 - staging DIT + M2 - production DIT + add test accounts in staging DIT + :steps: + 1. Try to modify DN with moddn for each value of + STAGING_DN -> PRODUCTION_DN + 2. Try to modify DN with moddn for each value of + STAGING_DN -> PRODUCTION_DN with appropriate ACI + :expectedresults: + 1. It should fail due to INSUFFICIENT_ACCESS + 2. It should pass due to appropriate ACI + """ + + topology_m2.ms["supplier1"].log.info("\n\n######## MOVE staging -> Prod (%s) ########\n" % index) + _bind_normal(topology_m2) + + old_rdn = "uid=%s%s" % (NEW_ACCOUNT, index) + old_dn = "%s,%s" % (old_rdn, STAGING_DN) + new_rdn = old_rdn + new_superior = PRODUCTION_DN + + # + # Try to rename without the appropriate ACI => INSUFFICIENT_ACCESS + # + try: + topology_m2.ms["supplier1"].log.info("Try to MODDN %s -> %s,%s" % (old_dn, new_rdn, new_superior)) + topology_m2.ms["supplier1"].rename_s(old_dn, new_rdn, newsuperior=new_superior) + assert 0 + except AssertionError: + topology_m2.ms["supplier1"].log.info( + "Exception (not really expected exception but that is fine as it fails to rename)") + except Exception as e: + topology_m2.ms["supplier1"].log.info("Exception (expected): %s" % type(e).__name__) + assert isinstance(e, ldap.INSUFFICIENT_ACCESS) + + # successful MOD with the ACI + topology_m2.ms["supplier1"].log.info("\n\n######## MOVE to and from equality filter ########\n") + _bind_manager(topology_m2) + _moddn_aci_staging_to_production(topology_m2, mod_type=ldap.MOD_ADD, + target_from=tfrom, target_to=tto) + _bind_normal(topology_m2) + + try: + topology_m2.ms["supplier1"].log.info("Try to MODDN %s -> %s,%s" % (old_dn, new_rdn, new_superior)) + topology_m2.ms["supplier1"].rename_s(old_dn, new_rdn, newsuperior=new_superior) + except Exception as e: + topology_m2.ms["supplier1"].log.info("Exception (expected): %s" % type(e).__name__) + if failure: + assert isinstance(e, ldap.INSUFFICIENT_ACCESS) + + # successful MOD with the both ACI + _bind_manager(topology_m2) + _moddn_aci_staging_to_production(topology_m2, mod_type=ldap.MOD_DELETE, + target_from=tfrom, target_to=tto) + _bind_normal(topology_m2) + + +def test_moddn_staging_prod_9(topology_m2, moddn_setup): + """Test with nsslapd-moddn-aci set to off so that MODDN requires an 'add' aci. + + :id: 222dd7e8-7ff1-40b8-ad26-6f8e42fbfcd9 + :setup: MMR with two suppliers, + M1 - staging DIT + M2 - production DIT + add test accounts in staging DIT + :steps: + 1. Try to modify DN with moddn STAGING_DN -> PRODUCTION_DN + 2. Add the moddn aci that will not be evaluated because of the config flag + 3. Try to do modDN + 4. Remove the moddn aci + 5. Add the 'add' right to the production DN + 6. Try to modify DN with moddn with 'add' right + 7. Enable the moddn right + 8. Try to rename without the appropriate ACI + 9. Add the 'add' right to the production DN + 10. Try to rename without the appropriate ACI + 11. Remove the moddn aci + :expectedresults: + 1. It should fail due to INSUFFICIENT_ACCESS + 2. It should pass + 3. It should fail due to INSUFFICIENT_ACCESS + 4. It should pass + 5. It should pass + 6. It should pass + 7. It should pass + 8. It should fail due to INSUFFICIENT_ACCESS + 9. It should pass + 10. It should fail due to INSUFFICIENT_ACCESS + 11. It should pass + """ + topology_m2.ms["supplier1"].log.info("\n\n######## MOVE staging -> Prod (9) ########\n") + + _bind_normal(topology_m2) + old_rdn = "uid=%s9" % NEW_ACCOUNT + old_dn = "%s,%s" % (old_rdn, STAGING_DN) + new_rdn = old_rdn + new_superior = PRODUCTION_DN + prod = OrganizationalRole(topology_m2.ms["supplier1"], PRODUCTION_DN) + + # + # Try to rename without the appropriate ACI => INSUFFICIENT_ACCESS + # + try: + topology_m2.ms["supplier1"].log.info("Try to MODDN %s -> %s,%s" % (old_dn, new_rdn, new_superior)) + topology_m2.ms["supplier1"].rename_s(old_dn, new_rdn, newsuperior=new_superior) + assert 0 + except AssertionError: + topology_m2.ms["supplier1"].log.info( + "Exception (not really expected exception but that is fine as it fails to rename)") + except Exception as e: + topology_m2.ms["supplier1"].log.info("Exception (expected): %s" % type(e).__name__) + assert isinstance(e, ldap.INSUFFICIENT_ACCESS) + + ############# + # Now do tests with no support of moddn aci + ############# + topology_m2.ms["supplier1"].log.info("Disable the moddn right") + _bind_manager(topology_m2) + topology_m2.ms["supplier1"].config.set(CONFIG_MODDN_ACI_ATTR, 'off') + + # Add the moddn aci that will not be evaluated because of the config flag + topology_m2.ms["supplier1"].log.info("\n\n######## MOVE to and from equality filter ########\n") + _bind_manager(topology_m2) + _moddn_aci_staging_to_production(topology_m2, mod_type=ldap.MOD_ADD, + target_from=STAGING_DN, target_to=PRODUCTION_DN) + _bind_normal(topology_m2) + + # It will fail because it will test the ADD right + try: + topology_m2.ms["supplier1"].log.info("Try to MODDN %s -> %s,%s" % (old_dn, new_rdn, new_superior)) + topology_m2.ms["supplier1"].rename_s(old_dn, new_rdn, newsuperior=new_superior) + assert 0 + except AssertionError: + topology_m2.ms["supplier1"].log.info( + "Exception (not really expected exception but that is fine as it fails to rename)") + except Exception as e: + topology_m2.ms["supplier1"].log.info("Exception (expected): %s" % type(e).__name__) + assert isinstance(e, ldap.INSUFFICIENT_ACCESS) + + # remove the moddn aci + _bind_manager(topology_m2) + _moddn_aci_staging_to_production(topology_m2, mod_type=ldap.MOD_DELETE, + target_from=STAGING_DN, target_to=PRODUCTION_DN) + _bind_normal(topology_m2) + + # + # add the 'add' right to the production DN + # Then do a successful moddn + # + ACI_ALLOW = "(version 3.0; acl \"ADD rights to allow moddn\"; allow (add)" + ACI_SUBJECT = " userdn = \"ldap:///%s\";)" % BIND_DN + ACI_BODY = ACI_ALLOW + ACI_SUBJECT + + _bind_manager(topology_m2) + prod.add('aci', ACI_BODY) + _write_aci_staging(topology_m2, mod_type=ldap.MOD_ADD) + _bind_normal(topology_m2) + + topology_m2.ms["supplier1"].log.info("Try to MODDN %s -> %s,%s" % (old_dn, new_rdn, new_superior)) + topology_m2.ms["supplier1"].rename_s(old_dn, new_rdn, newsuperior=new_superior) + + _bind_manager(topology_m2) + prod.remove('aci', ACI_BODY) + _write_aci_staging(topology_m2, mod_type=ldap.MOD_DELETE) + _bind_normal(topology_m2) + + ############# + # Now do tests with support of moddn aci + ############# + topology_m2.ms["supplier1"].log.info("Enable the moddn right") + _bind_manager(topology_m2) + topology_m2.ms["supplier1"].config.set(CONFIG_MODDN_ACI_ATTR, 'on') + + topology_m2.ms["supplier1"].log.info("\n\n######## MOVE staging -> Prod (10) ########\n") + + _bind_normal(topology_m2) + old_rdn = "uid=%s10" % NEW_ACCOUNT + old_dn = "%s,%s" % (old_rdn, STAGING_DN) + new_rdn = old_rdn + new_superior = PRODUCTION_DN + + # + # Try to rename without the appropriate ACI => INSUFFICIENT_ACCESS + # + try: + topology_m2.ms["supplier1"].log.info("Try to MODDN %s -> %s,%s" % (old_dn, new_rdn, new_superior)) + topology_m2.ms["supplier1"].rename_s(old_dn, new_rdn, newsuperior=new_superior) + assert 0 + except AssertionError: + topology_m2.ms["supplier1"].log.info( + "Exception (not really expected exception but that is fine as it fails to rename)") + except Exception as e: + topology_m2.ms["supplier1"].log.info("Exception (expected): %s" % type(e).__name__) + assert isinstance(e, ldap.INSUFFICIENT_ACCESS) + + # + # add the 'add' right to the production DN + # Then do a failing moddn + # + ACI_ALLOW = "(version 3.0; acl \"ADD rights to allow moddn\"; allow (add)" + ACI_SUBJECT = " userdn = \"ldap:///%s\";)" % BIND_DN + ACI_BODY = ACI_ALLOW + ACI_SUBJECT + + _bind_manager(topology_m2) + prod.add('aci', ACI_BODY) + _write_aci_staging(topology_m2, mod_type=ldap.MOD_ADD) + _bind_normal(topology_m2) + + try: + topology_m2.ms["supplier1"].log.info("Try to MODDN %s -> %s,%s" % (old_dn, new_rdn, new_superior)) + topology_m2.ms["supplier1"].rename_s(old_dn, new_rdn, newsuperior=new_superior) + assert 0 + except AssertionError: + topology_m2.ms["supplier1"].log.info( + "Exception (not really expected exception but that is fine as it fails to rename)") + except Exception as e: + topology_m2.ms["supplier1"].log.info("Exception (expected): %s" % type(e).__name__) + assert isinstance(e, ldap.INSUFFICIENT_ACCESS) + + _bind_manager(topology_m2) + prod.remove('aci', ACI_BODY) + _write_aci_staging(topology_m2, mod_type=ldap.MOD_DELETE) + _bind_normal(topology_m2) + + # Add the moddn aci that will be evaluated because of the config flag + topology_m2.ms["supplier1"].log.info("\n\n######## MOVE to and from equality filter ########\n") + _bind_manager(topology_m2) + _moddn_aci_staging_to_production(topology_m2, mod_type=ldap.MOD_ADD, + target_from=STAGING_DN, target_to=PRODUCTION_DN) + _bind_normal(topology_m2) + + topology_m2.ms["supplier1"].log.info("Try to MODDN %s -> %s,%s" % (old_dn, new_rdn, new_superior)) + topology_m2.ms["supplier1"].rename_s(old_dn, new_rdn, newsuperior=new_superior) + + # remove the moddn aci + _bind_manager(topology_m2) + _moddn_aci_staging_to_production(topology_m2, mod_type=ldap.MOD_DELETE, + target_from=STAGING_DN, target_to=PRODUCTION_DN) + _bind_normal(topology_m2) + + +def test_moddn_prod_staging(topology_m2, moddn_setup): + """This test checks that we can move ACCOUNT11 from staging to prod + but not move back ACCOUNT11 from prod to staging + + :id: 2b061e92-483f-4399-9f56-8d1c1898b043 + :setup: MMR with two suppliers, + M1 - staging DIT + M2 - production DIT + add test accounts in staging DIT + :steps: + 1. Try to rename without the appropriate ACI + 2. Try to MOD with the ACI from stage to production + 3. Try to move back the entry to staging from production + :expectedresults: + 1. It should fail due to INSUFFICIENT_ACCESS + 2. It should pass + 3. It should fail due to INSUFFICIENT_ACCESS + """ + + topology_m2.ms["supplier1"].log.info("\n\n######## MOVE staging -> Prod (11) ########\n") + + _bind_normal(topology_m2) + + old_rdn = "uid=%s11" % NEW_ACCOUNT + old_dn = "%s,%s" % (old_rdn, STAGING_DN) + new_rdn = old_rdn + new_superior = PRODUCTION_DN + + # + # Try to rename without the appropriate ACI => INSUFFICIENT_ACCESS + # + try: + topology_m2.ms["supplier1"].log.info("Try to MODDN %s -> %s,%s" % (old_dn, new_rdn, new_superior)) + topology_m2.ms["supplier1"].rename_s(old_dn, new_rdn, newsuperior=new_superior) + assert 0 + except AssertionError: + topology_m2.ms["supplier1"].log.info( + "Exception (not really expected exception but that is fine as it fails to rename)") + except Exception as e: + topology_m2.ms["supplier1"].log.info("Exception (expected): %s" % type(e).__name__) + assert isinstance(e, ldap.INSUFFICIENT_ACCESS) + + # successful MOD with the ACI + topology_m2.ms["supplier1"].log.info("\n\n######## MOVE to and from equality filter ########\n") + _bind_manager(topology_m2) + _moddn_aci_staging_to_production(topology_m2, mod_type=ldap.MOD_ADD, + target_from=STAGING_DN, target_to=PRODUCTION_DN) + _bind_normal(topology_m2) + + topology_m2.ms["supplier1"].log.info("Try to MODDN %s -> %s,%s" % (old_dn, new_rdn, new_superior)) + topology_m2.ms["supplier1"].rename_s(old_dn, new_rdn, newsuperior=new_superior) + + # Now check we can not move back the entry to staging + old_rdn = "uid=%s11" % NEW_ACCOUNT + old_dn = "%s,%s" % (old_rdn, PRODUCTION_DN) + new_rdn = old_rdn + new_superior = STAGING_DN + + # add the write right because we want to check the moddn + _bind_manager(topology_m2) + _write_aci_production(topology_m2, mod_type=ldap.MOD_ADD) + _bind_normal(topology_m2) + + try: + topology_m2.ms["supplier1"].log.info("Try to move back MODDN %s -> %s,%s" % (old_dn, new_rdn, new_superior)) + topology_m2.ms["supplier1"].rename_s(old_dn, new_rdn, newsuperior=new_superior) + assert 0 + except AssertionError: + topology_m2.ms["supplier1"].log.info( + "Exception (not really expected exception but that is fine as it fails to rename)") + except Exception as e: + topology_m2.ms["supplier1"].log.info("Exception (expected): %s" % type(e).__name__) + assert isinstance(e, ldap.INSUFFICIENT_ACCESS) + + _bind_manager(topology_m2) + _write_aci_production(topology_m2, mod_type=ldap.MOD_DELETE) + _bind_normal(topology_m2) + + # successful MOD with the both ACI + _bind_manager(topology_m2) + _moddn_aci_staging_to_production(topology_m2, mod_type=ldap.MOD_DELETE, + target_from=STAGING_DN, target_to=PRODUCTION_DN) + _bind_normal(topology_m2) + + +def test_check_repl_M2_to_M1(topology_m2, moddn_setup): + """Checks that replication is still working M2->M1, using ACCOUNT12 + + :id: 08ac131d-34b7-443f-aacd-23025bbd7de1 + :setup: MMR with two suppliers, + M1 - staging DIT + M2 - production DIT + add test accounts in staging DIT + :steps: + 1. Add an entry in M2 + 2. Search entry on M1 + :expectedresults: + 1. It should pass + 2. It should pass + """ + + topology_m2.ms["supplier1"].log.info("Bind as %s (M2)" % DN_DM) + topology_m2.ms["supplier2"].simple_bind_s(DN_DM, PASSWORD) + + rdn = "uid=%s12" % NEW_ACCOUNT + dn = "%s,%s" % (rdn, STAGING_DN) + new_account = UserAccount(topology_m2.ms["supplier2"], dn) + + # First wait for the ACCOUNT19 entry being replicated on M2 + loop = 0 + while loop <= 10: + try: + ent = topology_m2.ms["supplier2"].getEntry(dn, ldap.SCOPE_BASE, "(objectclass=*)") + break + except ldap.NO_SUCH_OBJECT: + time.sleep(1) + loop += 1 + assert loop <= 10 + + attribute = 'description' + tested_value = b'Hello world' + topology_m2.ms["supplier1"].log.info("Update (M2) %s (%s)" % (dn, attribute)) + new_account.add(attribute, tested_value) + + loop = 0 + while loop <= 10: + ent = topology_m2.ms["supplier1"].getEntry(dn, ldap.SCOPE_BASE, "(objectclass=*)") + assert ent is not None + if ent.hasAttr(attribute) and (ent.getValue(attribute) == tested_value): + break + + time.sleep(1) + loop += 1 + assert loop < 10 + topology_m2.ms["supplier1"].log.info("Update %s (%s) replicated on M1" % (dn, attribute)) + + +def test_moddn_staging_prod_except(topology_m2, moddn_setup): + """This test case MOVE entry NEW_ACCOUNT13 from staging to prod + but fails to move entry NEW_ACCOUNT14 from staging to prod_except + + :id: 02d34f4c-8574-428d-b43f-31227426392c + :setup: MMR with two suppliers, + M1 - staging DIT + M2 - production DIT + add test accounts in staging DIT + :steps: + 1. Try to move entry staging -> Prod + without the appropriate ACI + 2. Do MOD with the appropriate ACI + 3. Try to move an entry under Prod/Except from stage + 4. Try to do MOD with appropriate ACI + :expectedresults: + 1. It should fail due to INSUFFICIENT_ACCESS + 2. It should pass + 3. It should fail due to INSUFFICIENT_ACCESS + 4. It should pass + """ + + topology_m2.ms["supplier1"].log.info("\n\n######## MOVE staging -> Prod (13) ########\n") + _bind_normal(topology_m2) + + old_rdn = "uid=%s13" % NEW_ACCOUNT + old_dn = "%s,%s" % (old_rdn, STAGING_DN) + new_rdn = old_rdn + new_superior = PRODUCTION_DN + + # + # Try to rename without the appropriate ACI => INSUFFICIENT_ACCESS + # + try: + topology_m2.ms["supplier1"].log.info("Try to MODDN %s -> %s,%s" % (old_dn, new_rdn, new_superior)) + topology_m2.ms["supplier1"].rename_s(old_dn, new_rdn, newsuperior=new_superior) + assert 0 + except AssertionError: + topology_m2.ms["supplier1"].log.info( + "Exception (not really expected exception but that is fine as it fails to rename)") + except Exception as e: + topology_m2.ms["supplier1"].log.info("Exception (expected): %s" % type(e).__name__) + assert isinstance(e, ldap.INSUFFICIENT_ACCESS) + + # successful MOD with the ACI + topology_m2.ms["supplier1"].log.info("\n\n######## MOVE to and from equality filter ########\n") + _bind_manager(topology_m2) + _moddn_aci_staging_to_production(topology_m2, mod_type=ldap.MOD_ADD, + target_from=STAGING_DN, target_to=PRODUCTION_DN) + _moddn_aci_deny_tree(topology_m2, mod_type=ldap.MOD_ADD) + _bind_normal(topology_m2) + + topology_m2.ms["supplier1"].log.info("Try to MODDN %s -> %s,%s" % (old_dn, new_rdn, new_superior)) + topology_m2.ms["supplier1"].rename_s(old_dn, new_rdn, newsuperior=new_superior) + + # + # Now try to move an entry under except + # + topology_m2.ms["supplier1"].log.info("\n\n######## MOVE staging -> Prod/Except (14) ########\n") + old_rdn = "uid=%s14" % NEW_ACCOUNT + old_dn = "%s,%s" % (old_rdn, STAGING_DN) + new_rdn = old_rdn + new_superior = PROD_EXCEPT_DN + try: + topology_m2.ms["supplier1"].log.info("Try to MODDN %s -> %s,%s" % (old_dn, new_rdn, new_superior)) + topology_m2.ms["supplier1"].rename_s(old_dn, new_rdn, newsuperior=new_superior) + assert 0 + except AssertionError: + topology_m2.ms["supplier1"].log.info( + "Exception (not really expected exception but that is fine as it fails to rename)") + except Exception as e: + topology_m2.ms["supplier1"].log.info("Exception (expected): %s" % type(e).__name__) + assert isinstance(e, ldap.INSUFFICIENT_ACCESS) + + # successful MOD with the both ACI + _bind_manager(topology_m2) + _moddn_aci_staging_to_production(topology_m2, mod_type=ldap.MOD_DELETE, + target_from=STAGING_DN, target_to=PRODUCTION_DN) + _moddn_aci_deny_tree(topology_m2, mod_type=ldap.MOD_DELETE) + _bind_normal(topology_m2) + + +def test_mode_default_ger_no_moddn(topology_m2, moddn_setup): + """mode moddn_aci : Check Get Effective Rights Controls for entries + + :id: f4785d73-3b14-49c0-b981-d6ff96fa3496 + :setup: MMR with two suppliers, + M1 - staging DIT + M2 - production DIT + add test accounts in staging DIT + :steps: + 1. Search for GER controls on M1 + 2. Check 'n' is not in the entryLevelRights + :expectedresults: + 1. It should pass + 2. It should pass + """ + + topology_m2.ms["supplier1"].log.info("\n\n######## mode moddn_aci : GER no moddn ########\n") + request_ctrl = GetEffectiveRightsControl(criticality=True, + authzId=ensure_bytes("dn: " + BIND_DN)) + msg_id = topology_m2.ms["supplier1"].search_ext(PRODUCTION_DN, + ldap.SCOPE_SUBTREE, + "objectclass=*", + serverctrls=[request_ctrl]) + rtype, rdata, rmsgid, response_ctrl = topology_m2.ms["supplier1"].result3(msg_id) + # ger={} + value = '' + for dn, attrs in rdata: + topology_m2.ms["supplier1"].log.info("dn: %s" % dn) + value = attrs['entryLevelRights'][0] + + topology_m2.ms["supplier1"].log.info("######## entryLevelRights: %r" % value) + assert b'n' not in value + + +def test_mode_default_ger_with_moddn(topology_m2, moddn_setup): + """This test case adds the moddn aci and check ger contains 'n' + + :id: a752a461-432d-483a-89c0-dfb34045a969 + :setup: MMR with two suppliers, + M1 - staging DIT + M2 - production DIT + add test accounts in staging DIT + :steps: + 1. Add moddn ACI on M2 + 2. Search for GER controls on M1 + 3. Check entryLevelRights value for entries + 4. Check 'n' is in the entryLevelRights + :expectedresults: + 1. It should pass + 2. It should pass + 3. It should pass + 4. It should pass + """ + + topology_m2.ms["supplier1"].log.info("\n\n######## mode moddn_aci: GER with moddn ########\n") + + # successful MOD with the ACI + _bind_manager(topology_m2) + _moddn_aci_staging_to_production(topology_m2, mod_type=ldap.MOD_ADD, + target_from=STAGING_DN, target_to=PRODUCTION_DN) + _bind_normal(topology_m2) + + request_ctrl = GetEffectiveRightsControl(criticality=True, + authzId=ensure_bytes("dn: " + BIND_DN)) + msg_id = topology_m2.ms["supplier1"].search_ext(PRODUCTION_DN, + ldap.SCOPE_SUBTREE, + "objectclass=*", + serverctrls=[request_ctrl]) + rtype, rdata, rmsgid, response_ctrl = topology_m2.ms["supplier1"].result3(msg_id) + # ger={} + value = '' + for dn, attrs in rdata: + topology_m2.ms["supplier1"].log.info("dn: %s" % dn) + value = attrs['entryLevelRights'][0] + + topology_m2.ms["supplier1"].log.info("######## entryLevelRights: %r" % value) + assert b'n' in value + + # successful MOD with the both ACI + _bind_manager(topology_m2) + _moddn_aci_staging_to_production(topology_m2, mod_type=ldap.MOD_DELETE, + target_from=STAGING_DN, target_to=PRODUCTION_DN) + _bind_normal(topology_m2) + + +def test_mode_legacy_ger_no_moddn1(topology_m2, moddn_setup): + """This test checks mode legacy : GER no moddn + + :id: e783e05b-d0d0-4fd4-9572-258a81b7bd24 + :setup: MMR with two suppliers, + M1 - staging DIT + M2 - production DIT + add test accounts in staging DIT + :steps: + 1. Disable ACI checks - set nsslapd-moddn-aci: off + 2. Search for GER controls on M1 + 3. Check entryLevelRights value for entries + 4. Check 'n' is not in the entryLevelRights + :expectedresults: + 1. It should pass + 2. It should pass + 3. It should pass + 4. It should pass + """ + + topology_m2.ms["supplier1"].log.info("\n\n######## Disable the moddn aci mod ########\n") + _bind_manager(topology_m2) + topology_m2.ms["supplier1"].config.set(CONFIG_MODDN_ACI_ATTR, 'off') + + topology_m2.ms["supplier1"].log.info("\n\n######## mode legacy 1: GER no moddn ########\n") + request_ctrl = GetEffectiveRightsControl(criticality=True, authzId=ensure_bytes("dn: " + BIND_DN)) + msg_id = topology_m2.ms["supplier1"].search_ext(PRODUCTION_DN, + ldap.SCOPE_SUBTREE, + "objectclass=*", + serverctrls=[request_ctrl]) + rtype, rdata, rmsgid, response_ctrl = topology_m2.ms["supplier1"].result3(msg_id) + # ger={} + value = '' + for dn, attrs in rdata: + topology_m2.ms["supplier1"].log.info("dn: %s" % dn) + value = attrs['entryLevelRights'][0] + + topology_m2.ms["supplier1"].log.info("######## entryLevelRights: %r" % value) + assert b'n' not in value + + +def test_mode_legacy_ger_no_moddn2(topology_m2, moddn_setup): + """This test checks mode legacy : GER no moddn + + :id: af87e024-1744-4f1d-a2d3-ea2687e2351d + :setup: MMR with two suppliers, + M1 - staging DIT + M2 - production DIT + add test accounts in staging DIT + :steps: + 1. Disable ACI checks - set nsslapd-moddn-aci: off + 2. Add moddn ACI on M1 + 3. Search for GER controls on M1 + 4. Check entryLevelRights value for entries + 5. Check 'n' is not in the entryLevelRights + :expectedresults: + 1. It should pass + 2. It should pass + 3. It should pass + 4. It should be pass + 5. It should pass + """ + + topology_m2.ms["supplier1"].log.info("\n\n######## Disable the moddn aci mod ########\n") + _bind_manager(topology_m2) + topology_m2.ms["supplier1"].config.set(CONFIG_MODDN_ACI_ATTR, 'off') + + topology_m2.ms["supplier1"].log.info("\n\n######## mode legacy 2: GER no moddn ########\n") + # successful MOD with the ACI + _bind_manager(topology_m2) + _moddn_aci_staging_to_production(topology_m2, mod_type=ldap.MOD_ADD, + target_from=STAGING_DN, target_to=PRODUCTION_DN) + _bind_normal(topology_m2) + + request_ctrl = GetEffectiveRightsControl(criticality=True, + authzId=ensure_bytes("dn: " + BIND_DN)) + msg_id = topology_m2.ms["supplier1"].search_ext(PRODUCTION_DN, + ldap.SCOPE_SUBTREE, + "objectclass=*", + serverctrls=[request_ctrl]) + rtype, rdata, rmsgid, response_ctrl = topology_m2.ms["supplier1"].result3(msg_id) + # ger={} + value = '' + for dn, attrs in rdata: + topology_m2.ms["supplier1"].log.info("dn: %s" % dn) + value = attrs['entryLevelRights'][0] + + topology_m2.ms["supplier1"].log.info("######## entryLevelRights: %r" % value) + assert b'n' not in value + + # successful MOD with the both ACI + _bind_manager(topology_m2) + _moddn_aci_staging_to_production(topology_m2, mod_type=ldap.MOD_DELETE, + target_from=STAGING_DN, target_to=PRODUCTION_DN) + _bind_normal(topology_m2) + + +def test_mode_legacy_ger_with_moddn(topology_m2, moddn_setup): + """This test checks mode legacy : GER with moddn + + :id: 37c1e537-1b5d-4fab-b62a-50cd8c5b3493 + :setup: MMR with two suppliers, + M1 - staging DIT + M2 - production DIT + add test accounts in staging DIT + :steps: + 1. Disable ACI checks - set nsslapd-moddn-aci: off + 2. Add moddn ACI on M1 + 3. Search for GER controls on M1 + 4. Check entryLevelRights value for entries + 5. Check 'n' is in the entryLevelRights + 6. Try MOD with the both ACI + :expectedresults: + 1. It should pass + 2. It should pass + 3. It should pass + 4. It should pass + 5. It should pass + 6. It should pass + """ + + suffix = Domain(topology_m2.ms["supplier1"], SUFFIX) + + topology_m2.ms["supplier1"].log.info("\n\n######## Disable the moddn aci mod ########\n") + _bind_manager(topology_m2) + topology_m2.ms["supplier1"].config.set(CONFIG_MODDN_ACI_ATTR, 'off') + + topology_m2.ms["supplier1"].log.info("\n\n######## mode legacy : GER with moddn ########\n") + + # being allowed to read/write the RDN attribute use to allow the RDN + ACI_TARGET = "(target = \"ldap:///%s\")(targetattr=\"uid\")" % (PRODUCTION_DN) + ACI_ALLOW = "(version 3.0; acl \"MODDN production changing the RDN attribute\"; allow (read,search,write)" + ACI_SUBJECT = " userdn = \"ldap:///%s\";)" % BIND_DN + ACI_BODY = ACI_TARGET + ACI_ALLOW + ACI_SUBJECT + + # successful MOD with the ACI + _bind_manager(topology_m2) + suffix.add('aci', ACI_BODY) + _bind_normal(topology_m2) + + request_ctrl = GetEffectiveRightsControl(criticality=True, authzId=ensure_bytes("dn: " + BIND_DN)) + msg_id = topology_m2.ms["supplier1"].search_ext(PRODUCTION_DN, + ldap.SCOPE_SUBTREE, + "objectclass=*", + serverctrls=[request_ctrl]) + rtype, rdata, rmsgid, response_ctrl = topology_m2.ms["supplier1"].result3(msg_id) + # ger={} + value = '' + for dn, attrs in rdata: + topology_m2.ms["supplier1"].log.info("dn: %s" % dn) + value = attrs['entryLevelRights'][0] + + topology_m2.ms["supplier1"].log.info("######## entryLevelRights: %r" % value) + assert b'n' in value + + # successful MOD with the both ACI + _bind_manager(topology_m2) + suffix.remove('aci', ACI_BODY) + # _bind_normal(topology_m2) + + +@pytest.fixture(scope="module") +def rdn_write_setup(topology_m2): + topology_m2.ms["supplier1"].log.info("\n\n######## Add entry tuser ########\n") + user = UserAccount(topology_m2.ms["supplier1"], SRC_ENTRY_DN) + user_props = TEST_USER_PROPERTIES.copy() + user_props.update({'sn': SRC_ENTRY_CN, + 'cn': SRC_ENTRY_CN, + 'userpassword': BIND_PW}) + user.create(properties=user_props, basedn=SUFFIX) + + +def test_rdn_write_get_ger(topology_m2, rdn_write_setup): + """This test checks GER rights for anonymous + + :id: d5d85f87-b53d-4f50-8fa6-a9e55c75419b + :setup: MMR with two suppliers, + Add entry tuser + :steps: + 1. Search for GER controls on M1 + 2. Check entryLevelRights value for entries + 3. Check 'n' is not in the entryLevelRights + :expectedresults: + 1. It should pass + 2. It should be pass + 3. It should pass + """ + + ANONYMOUS_DN = "" + topology_m2.ms["supplier1"].log.info("\n\n######## GER rights for anonymous ########\n") + request_ctrl = GetEffectiveRightsControl(criticality=True, + authzId=ensure_bytes("dn:" + ANONYMOUS_DN)) + msg_id = topology_m2.ms["supplier1"].search_ext(SUFFIX, + ldap.SCOPE_SUBTREE, + "objectclass=*", + serverctrls=[request_ctrl]) + rtype, rdata, rmsgid, response_ctrl = topology_m2.ms["supplier1"].result3(msg_id) + value = '' + for dn, attrs in rdata: + topology_m2.ms["supplier1"].log.info("dn: %s" % dn) + for value in attrs['entryLevelRights']: + topology_m2.ms["supplier1"].log.info("######## entryLevelRights: %r" % value) + assert b'n' not in value + + +def test_rdn_write_modrdn_anonymous(topology_m2, rdn_write_setup): + """Tests anonymous user for modrdn + + :id: fc07be23-3341-44ab-a53c-c68c5f9569c7 + :setup: MMR with two suppliers, + Add entry tuser + :steps: + 1. Bind as anonymous user + 2. Try to perform MODRDN operation (SRC_ENTRY_DN -> DST_ENTRY_CN) + 3. Try to search DST_ENTRY_CN + :expectedresults: + 1. It should pass + 2. It should fails with INSUFFICIENT_ACCESS + 3. It should fails with NO_SUCH_OBJECT + """ + + ANONYMOUS_DN = "" + topology_m2.ms["supplier1"].close() + topology_m2.ms["supplier1"].binddn = ANONYMOUS_DN + topology_m2.ms["supplier1"].open() + msg_id = topology_m2.ms["supplier1"].search_ext("", ldap.SCOPE_BASE, "objectclass=*") + rtype, rdata, rmsgid, response_ctrl = topology_m2.ms["supplier1"].result3(msg_id) + for dn, attrs in rdata: + topology_m2.ms["supplier1"].log.info("dn: %s" % dn) + for attr in attrs: + topology_m2.ms["supplier1"].log.info("######## %r: %r" % (attr, attrs[attr])) + + try: + topology_m2.ms["supplier1"].rename_s(SRC_ENTRY_DN, "cn=%s" % DST_ENTRY_CN, delold=True) + except Exception as e: + topology_m2.ms["supplier1"].log.info("Exception (expected): %s" % type(e).__name__) + isinstance(e, ldap.INSUFFICIENT_ACCESS) + + try: + topology_m2.ms["supplier1"].getEntry(DST_ENTRY_DN, ldap.SCOPE_BASE, "objectclass=*") + assert False + except Exception as e: + topology_m2.ms["supplier1"].log.info("The entry was not renamed (expected)") + isinstance(e, ldap.NO_SUCH_OBJECT) + + _bind_manager(topology_m2) + + +if __name__ == '__main__': + # Run isolated + # -s for DEBUG mode + CURRENT_FILE = os.path.realpath(__file__) + pytest.main("-s %s" % CURRENT_FILE) diff --git a/dirsrvtests/tests/suites/acl/conftest.py b/dirsrvtests/tests/suites/acl/conftest.py new file mode 100644 index 0000000..b0a7241 --- /dev/null +++ b/dirsrvtests/tests/suites/acl/conftest.py @@ -0,0 +1,125 @@ +# --- BEGIN COPYRIGHT BLOCK --- +# Copyright (C) 2019 Red Hat, Inc. +# All rights reserved. +# +# License: GPL (version 3 or any later version). +# See LICENSE for details. +# --- END COPYRIGHT BLOCK ---- + +""" +This is the config file for keywords test scripts. + +""" + +import pytest + +from lib389._constants import DEFAULT_SUFFIX, PW_DM +from lib389.idm.user import UserAccounts +from lib389.idm.organizationalunit import OrganizationalUnit, OrganizationalUnits +from lib389.topologies import topology_st as topo +from lib389.idm.domain import Domain + + +@pytest.fixture(scope="function") +def aci_of_user(request, topo): + """ + Removes and Restores ACIs after the test. + """ + aci_list = Domain(topo.standalone, DEFAULT_SUFFIX).get_attr_vals_utf8('aci') + + def finofaci(): + """ + Removes and Restores ACIs after the test. + """ + domain = Domain(topo.standalone, DEFAULT_SUFFIX) + domain.remove_all('aci') + for aci in aci_list: + domain.add("aci", aci) + + request.addfinalizer(finofaci) + + +@pytest.fixture(scope="module") +def add_user(request, topo): + """ + This function will create user for the test and in the end entries will be deleted . + """ + ous_origin = OrganizationalUnits(topo.standalone, DEFAULT_SUFFIX) + ou_origin = ous_origin.create(properties={'ou': 'Keywords'}) + + ous_next = OrganizationalUnits(topo.standalone, ou_origin.dn) + for ou in ['Authmethod', 'Dayofweek', 'DNS', 'IP', 'Timeofday']: + ous_next.create(properties={'ou': ou}) + + users_day_of_week = UserAccounts(topo.standalone, f"ou=Dayofweek,ou=Keywords,{DEFAULT_SUFFIX}", rdn=None) + for user in ['EVERYDAY_KEY', 'TODAY_KEY', 'NODAY_KEY']: + users_day_of_week.create(properties={ + 'uid': user, + 'cn': user, + 'sn': 'user', + 'uidNumber': '1000', + 'gidNumber': '2000', + 'homeDirectory': '/home/' + user, + 'userPassword': PW_DM + }) + + users_ip = UserAccounts(topo.standalone, f"ou=IP,ou=Keywords,{DEFAULT_SUFFIX}", rdn=None) + for user in ['FULLIP_KEY', 'NETSCAPEIP_KEY', 'NOIP_KEY']: + users_ip.create(properties={ + 'uid': user, + 'cn': user, + 'sn': 'user', + 'uidNumber': '1000', + 'gidNumber': '2000', + 'homeDirectory': '/home/' + user, + 'userPassword': PW_DM + }) + + users_timeof_day = UserAccounts(topo.standalone, f"ou=Timeofday,ou=Keywords,{DEFAULT_SUFFIX}", rdn=None) + for user in ['FULLWORKER_KEY', 'DAYWORKER_KEY', 'NOWORKER_KEY', 'NIGHTWORKER_KEY']: + users_timeof_day.create(properties={ + 'uid': user, + 'cn': user, + 'sn': 'user', + 'uidNumber': '1000', + 'gidNumber': '2000', + 'homeDirectory': '/home/' + user, + 'userPassword': PW_DM + }) + + users_authmethod = UserAccounts(topo.standalone, f"ou=Authmethod,ou=Keywords,{DEFAULT_SUFFIX}", rdn=None) + for user in ['NONE_1_KEY', 'NONE_2_KEY', 'SIMPLE_1_KEY']: + users_authmethod.create(properties={ + 'uid': user, + 'cn': user, + 'sn': 'user', + 'uidNumber': '1000', + 'gidNumber': '2000', + 'homeDirectory': '/home/' + user, + 'userPassword': PW_DM + }) + + users_dns = UserAccounts(topo.standalone, f"ou=DNS,ou=Keywords,{DEFAULT_SUFFIX}", rdn=None) + for user in ['FULLDNS_KEY', 'SUNDNS_KEY', 'NODNS_KEY', 'NETSCAPEDNS_KEY']: + users_dns.create(properties={ + 'uid': user, + 'cn': user, + 'sn': 'user', + 'uidNumber': '1000', + 'gidNumber': '2000', + 'homeDirectory': '/home/' + user, + 'userPassword': PW_DM + }) + + def fin(): + """ + Deletes entries after the test. + """ + for user in users_day_of_week.list() + users_ip.list() + users_timeof_day.list() + \ + users_authmethod.list() + users_dns.list(): + user.delete() + + for ou in sorted(ous_next.list(), key=lambda x: len(x.dn), reverse=True): + ou.delete() + + request.addfinalizer(fin) diff --git a/dirsrvtests/tests/suites/acl/default_aci_allows_self_write_test.py b/dirsrvtests/tests/suites/acl/default_aci_allows_self_write_test.py new file mode 100644 index 0000000..9c7226b --- /dev/null +++ b/dirsrvtests/tests/suites/acl/default_aci_allows_self_write_test.py @@ -0,0 +1,133 @@ +# --- BEGIN COPYRIGHT BLOCK --- +# Copyright (C) 2019 William Brown +# All rights reserved. +# +# License: GPL (version 3 or any later version). +# See LICENSE for details. +# --- END COPYRIGHT BLOCK --- + + +import pytest +from lib389.idm.user import nsUserAccounts, UserAccounts +from lib389.topologies import topology_st as topology +from lib389.paths import Paths +from lib389.utils import ds_is_older +from lib389._constants import * + +default_paths = Paths() + +pytestmark = pytest.mark.tier1 + +USER_PASSWORD = "some test password" +NEW_USER_PASSWORD = "some new password" + +@pytest.mark.skipif(ds_is_older('1.4.2.0'), reason="Default aci's in older versions do not support this functionality") +def test_acl_default_allow_self_write_nsuser(topology): + """ + Testing nsusers can self write and self read. This it a sanity test + so that our default entries have their aci's checked. + + :id: 4f0fb01a-36a6-430c-a2ee-ebeb036bd951 + + :setup: Standalone instance + + :steps: + 1. Testing comparison of two different users. + + :expectedresults: + 1. Should fail to compare + """ + topology.standalone.enable_tls() + nsusers = nsUserAccounts(topology.standalone, DEFAULT_SUFFIX) + # Create a user as dm. + user = nsusers.create(properties={ + 'uid': 'test_nsuser', + 'cn': 'test_nsuser', + 'displayName': 'testNsuser', + 'legalName': 'testNsuser', + 'uidNumber': '1001', + 'gidNumber': '1001', + 'homeDirectory': '/home/testnsuser', + 'userPassword': USER_PASSWORD, + }) + # Create a new con and bind as the user. + user_conn = user.bind(USER_PASSWORD) + + user_nsusers = nsUserAccounts(user_conn, DEFAULT_SUFFIX) + self_ent = user_nsusers.get(dn=user.dn) + + # Can we self read x,y,z + check = self_ent.get_attrs_vals_utf8([ + 'uid', + 'cn', + 'displayName', + 'legalName', + 'uidNumber', + 'gidNumber', + 'homeDirectory', + ]) + for k in check.values(): + # Could we read the values? + assert(isinstance(k, list)) + assert(len(k) > 0) + # Can we self change a,b,c + self_ent.ensure_attr_state({ + 'legalName': ['testNsuser_update'], + 'displayName': ['testNsuser_update'], + 'nsSshPublicKey': ['testkey'], + }) + # self change pw + self_ent.change_password(USER_PASSWORD, NEW_USER_PASSWORD) + + +@pytest.mark.skipif(ds_is_older('1.4.2.0'), reason="Default aci's in older versions do not support this functionality") +def test_acl_default_allow_self_write_user(topology): + """ + Testing users can self write and self read. This it a sanity test + so that our default entries have their aci's checked. + + :id: 4c52321b-f473-4c32-a1d5-489b138cd199 + + :setup: Standalone instance + + :steps: + 1. Testing comparison of two different users. + + :expectedresults: + 1. Should fail to compare + """ + topology.standalone.enable_tls() + users = UserAccounts(topology.standalone, DEFAULT_SUFFIX) + # Create a user as dm. + user = users.create(properties={ + 'uid': 'test_user', + 'cn': 'test_user', + 'sn': 'User', + 'uidNumber': '1002', + 'gidNumber': '1002', + 'homeDirectory': '/home/testuser', + 'userPassword': USER_PASSWORD, + }) + # Create a new con and bind as the user. + user_conn = user.bind(USER_PASSWORD) + + user_users = UserAccounts(user_conn, DEFAULT_SUFFIX) + self_ent = user_users.get(dn=user.dn) + # Can we self read x,y,z + check = self_ent.get_attrs_vals_utf8([ + 'uid', + 'cn', + 'sn', + 'uidNumber', + 'gidNumber', + 'homeDirectory', + ]) + for (a, k) in check.items(): + print(a) + # Could we read the values? + assert(isinstance(k, list)) + assert(len(k) > 0) + # Self change pw + self_ent.change_password(USER_PASSWORD, NEW_USER_PASSWORD) + + diff --git a/dirsrvtests/tests/suites/acl/deladd_test.py b/dirsrvtests/tests/suites/acl/deladd_test.py new file mode 100644 index 0000000..afdc772 --- /dev/null +++ b/dirsrvtests/tests/suites/acl/deladd_test.py @@ -0,0 +1,456 @@ +# --- BEGIN COPYRIGHT BLOCK --- +# Copyright (C) 2019 Red Hat, Inc. +# All rights reserved. +# +# License: GPL (version 3 or any later version). +# See LICENSE for details. +# --- END COPYRIGHT BLOCK --- + +""" +Importing necessary Modules. +""" + +import os +import pytest + +from lib389._constants import DEFAULT_SUFFIX, PW_DM +from lib389.idm.user import UserAccount, UserAccounts +from lib389.idm.group import Groups +from lib389.idm.organizationalunit import OrganizationalUnit, OrganizationalUnits +from lib389.topologies import topology_st as topo +from lib389.idm.domain import Domain + +import ldap + +pytestmark = pytest.mark.tier1 + + +USER_WITH_ACI_DELADD = 'uid=test_user_1000,ou=People,dc=example,dc=com' +USER_DELADD = 'uid=test_user_1,ou=Accounting,dc=example,dc=com' + + +@pytest.fixture(scope="function") +def _aci_of_user(request, topo): + """ + Removes and Restores ACIs after the test. + """ + aci_list = Domain(topo.standalone, DEFAULT_SUFFIX).get_attr_vals('aci') + + def finofaci(): + """ + Removes and Restores ACIs after the test. + """ + domain = Domain(topo.standalone, DEFAULT_SUFFIX) + domain.remove_all('aci') + for i in aci_list: + domain.add("aci", i) + + request.addfinalizer(finofaci) + + +@pytest.fixture(scope="function") +def _add_user(request, topo): + """ + This function will create user for the test and in the end entries will be deleted . + """ + + users = UserAccounts(topo.standalone, DEFAULT_SUFFIX) + user = users.create_test_user() + user.set("userPassword", PW_DM) + + ous = OrganizationalUnits(topo.standalone, DEFAULT_SUFFIX) + ous.create(properties={'ou':'Accounting'}) + + users = UserAccounts(topo.standalone, DEFAULT_SUFFIX, rdn='ou=Accounting') + for i in range(1, 3): + user = users.create_test_user(uid=i, gid=i) + user.set("userPassword", PW_DM) + + def fin(): + """ + Deletes entries after the test. + """ + users1 = UserAccounts(topo.standalone, DEFAULT_SUFFIX, rdn=None) + for dn_dn in users1.list(): + dn_dn.delete() + + groups = Groups(topo.standalone, DEFAULT_SUFFIX) + for dn_dn in groups.list(): + dn_dn.delete() + + ou_ou = OrganizationalUnit(topo.standalone, f'ou=Accounting,{DEFAULT_SUFFIX}') + ou_ou.delete() + + request.addfinalizer(fin) + + +def test_allow_delete_access_to_groupdn(topo, _add_user, _aci_of_user): + + """Test allow delete access to groupdn + + :id: 7cf15992-68ad-11e8-85af-54e1ad30572c + :setup: topo.standalone + :steps: + 1. Add test entry + 2. Add ACI that allows groupdn to delete + 3. Delete something using test USER_DELADD + 4. Remove ACI + :expectedresults: + 1. Entry should be added + 2. ACI should be added + 3. Delete operation should succeed + 4. Delete operation for ACI should succeed + """ + # Create Group and add member + groups = Groups(topo.standalone, DEFAULT_SUFFIX) + group = groups.create(properties={"cn": "group1", + "description": "testgroup"}) + group.add_member(USER_WITH_ACI_DELADD) + + # set aci + aci_target = f'(targetattr="*")' + aci_allow = f'(version 3.0; acl "All rights for {group.dn}"; allow (delete) ' + aci_subject = f'groupdn="ldap:///{group.dn}";)' + + Domain(topo.standalone, DEFAULT_SUFFIX).add("aci", (aci_target + aci_allow + aci_subject)) + + # create connection with USER_WITH_ACI_DELADD + conn = UserAccount(topo.standalone, USER_WITH_ACI_DELADD).bind(PW_DM) + + # Perform delete operation + for i in [USER_DELADD, USER_WITH_ACI_DELADD]: + UserAccount(conn, i).delete() + + +def test_allow_add_access_to_anyone(topo, _add_user, _aci_of_user): + + """Test to allow add access to anyone + + :id: 5ca31cc4-68e0-11e8-8666-8c16451d917b + :setup: topo.standalone + :steps: + 1. Add test entry + 2. Add ACI that allows groupdn to add + 3. Add something using test USER_DELADD + 4. Remove ACI + :expectedresults: + 1. Entry should be added + 2. ACI should be added + 3. Add operation should succeed + 4. Delete operation for ACI should succeed + """ + # set aci + aci_target = f'(targetattr="*")' + aci_allow = f'(version 3.0; acl "All rights for anyone"; allow (add) ' + aci_subject = f'userdn="ldap:///anyone";)' + Domain(topo.standalone, DEFAULT_SUFFIX).add("aci", (aci_target + aci_allow + aci_subject)) + + # create connection with USER_WITH_ACI_DELADD + conn = UserAccount(topo.standalone, USER_WITH_ACI_DELADD).bind(PW_DM) + + # Perform add operation + users = UserAccounts(conn, DEFAULT_SUFFIX, rdn='ou=Accounting') + user = users.create_test_user(gid=3, uid=3) + assert user.exists() + + users = UserAccounts(conn, DEFAULT_SUFFIX) + user = users.create_test_user(gid=3, uid=3) + assert user.exists() + + +def test_allow_delete_access_to_anyone(topo, _add_user, _aci_of_user): + + """Test to allow delete access to anyone + + :id: f5447c7e-68e1-11e8-84c4-8c16451d917b + :setup: server + :steps: + 1. Add test entry + 2. Add ACI that allows groupdn to delete some userdn + 3. Delete something using test USER_DELADD + 4. Remove ACI + :expectedresults: + 1. Entry should be added + 2. ACI should be added + 3. Operation should succeed + 4. Delete operation for ACI should succeed + """ + # set aci + aci_target = f'(targetattr="*")' + aci_allow = f'(version 3.0; acl "All rights for anyone"; allow (delete) ' + aci_subject = f'userdn="ldap:///anyone";)' + + Domain(topo.standalone, DEFAULT_SUFFIX).add("aci", (aci_target + aci_allow + aci_subject)) + + # create connection with USER_WITH_ACI_DELADD + conn = UserAccount(topo.standalone, USER_WITH_ACI_DELADD).bind(PW_DM) + + # Perform delete operation + UserAccount(conn, USER_DELADD).delete() + + +def test_allow_delete_access_not_to_userdn(topo, _add_user, _aci_of_user): + + """Test to Allow delete access to != userdn + + :id: 00637f6e-68e3-11e8-92a3-8c16451d917b + :setup: server + :steps: + 1. Add test entry + 2. Add ACI that allows userdn not to delete some userdn + 3. Delete something using test USER_DELADD + 4. Remove ACI + :expectedresults: + 1. Entry should be added + 2. ACI should be added + 3. Operation should not succeed + 4. Delete operation for ACI should succeed + """ + # set aci + aci_target = f'(targetattr="*")' + aci_allow = f'(version 3.0; acl "All rights for %s"; allow (delete) ' % USER_DELADD + aci_subject = f'userdn!="ldap:///{USER_WITH_ACI_DELADD}";)' + + Domain(topo.standalone, DEFAULT_SUFFIX).add("aci", (aci_target + aci_allow + aci_subject)) + + # create connection with USER_WITH_ACI_DELADD + conn = UserAccount(topo.standalone, USER_WITH_ACI_DELADD).bind(PW_DM) + + # Perform delete operation + user = UserAccount(conn, USER_DELADD) + with pytest.raises(ldap.INSUFFICIENT_ACCESS): + user.delete() + + +def test_allow_delete_access_not_to_group(topo, _add_user, _aci_of_user): + + """Test to Allow delete access to != groupdn + + :id: f58fc8b0-68e5-11e8-9313-8c16451d917b + :setup: server + :steps: + 1. Add test entry + 2. Add ACI that allows groupdn not to delete some userdn + 3. Delete something using test USER_DELADD belong to test group + 4. Remove ACI + :expectedresults: + 1. Entry should be added + 2. ACI should be added + 3. Operation should not succeed + 4. Delete operation for ACI should succeed + """ + # Create group + groups = Groups(topo.standalone, DEFAULT_SUFFIX) + group = groups.create(properties={"cn": "group1", + "description": "testgroup"}) + group.add_member(USER_WITH_ACI_DELADD) + + # set aci + aci_target = f'(targetattr="*")' + aci_allow = f'(version 3.0; acl "All rights for {group.dn}"; allow (delete)' + aci_subject = f'groupdn!="ldap:///{group.dn}";)' + + Domain(topo.standalone, DEFAULT_SUFFIX).add("aci", (aci_target + aci_allow + aci_subject)) + + # create connection with USER_WITH_ACI_DELADD + conn = UserAccount(topo.standalone, USER_WITH_ACI_DELADD).bind(PW_DM) + user = UserAccount(conn, USER_DELADD) + + # Perform delete operation + with pytest.raises(ldap.INSUFFICIENT_ACCESS): + user.delete() + + +def test_allow_add_access_to_parent(topo, _add_user, _aci_of_user): + + """Test to Allow add privilege to parent + + :id: 9f099845-9dbc-412f-bdb9-19a5ea729694 + :setup: server + :steps: + 1. Add test entry + 2. Add ACI that Allow add privilege to parent + 3. Add something using test USER_DELADD + 4. Remove ACI + :expectedresults: + 1. Entry should be added + 2. ACI should be added + 3. Operation should succeed + 4. Delete operation for ACI should succeed + """ + # set aci + aci_target = f'(targetattr="*")' + aci_allow = f'(version 3.0; acl "All rights for parent"; allow (add) ' + aci_subject = f'userdn="ldap:///parent";)' + + Domain(topo.standalone, DEFAULT_SUFFIX).add("aci", (aci_target + aci_allow + aci_subject)) + + # create connection with USER_WITH_ACI_DELADD + conn = UserAccount(topo.standalone, USER_WITH_ACI_DELADD).bind(PW_DM) + + # Perform Allow add privilege to parent + users = UserAccounts(conn, DEFAULT_SUFFIX, rdn='uid=test_user_1000, ou=people') + user = users.create_test_user(gid=1, uid=1) + assert user.exists() + + # Delete created user + UserAccounts(topo.standalone, DEFAULT_SUFFIX).get('test_user_1').delete() + + +def test_allow_delete_access_to_parent(topo, _add_user, _aci_of_user): + + """Test to Allow delete access to parent + + :id: 2dd7f624-68e7-11e8-8591-8c16451d917b + :setup: server + :steps: + 1. Add test entry + 2. Add ACI that Allow delete privilege to parent + 3. Delete something using test USER_DELADD + 4. Remove ACI + :expectedresults: + 1. Entry should be added + 2. ACI should be added + 3. Operation should succeed + 4. Delete operation for ACI should succeed + """ + # set aci + aci_target = f'(targetattr="*")' + aci_allow = f'(version 3.0; acl "All rights for parent"; allow (add,delete) ' + aci_subject = f'userdn="ldap:///parent";)' + + Domain(topo.standalone, DEFAULT_SUFFIX).add("aci", (aci_target + aci_allow + aci_subject)) + + # create connection with USER_WITH_ACI_DELADD + conn = UserAccount(topo.standalone, USER_WITH_ACI_DELADD).bind(PW_DM) + + # Create a user with parent 'uid=test_user_1000, ou=people, {}'.format(DEFAULT_SUFFIX) + users = UserAccounts(conn, DEFAULT_SUFFIX, rdn='uid=test_user_1000, ou=people') + new_user = users.create_test_user(gid=1, uid=1) + assert new_user.exists() + + # Perform Allow delete access to parent + new_user.delete() + + +def test_allow_delete_access_to_dynamic_group(topo, _add_user, _aci_of_user, request): + + """Test to Allow delete access to dynamic group + + :id: 14ffa452-68ed-11e8-a60d-8c16451d917b + :setup: server + :steps: + 1. Add test entry + 2. Add ACI that Allow delete privilege to dynamic group + 3. Delete something using test USER_DELADD + 4. Remove ACI + :expectedresults: + 1. Entry should be added + 2. ACI should be added + 3. Operation should succeed + 4. Delete operation for ACI should succeed + """ + # Create dynamic group + groups = Groups(topo.standalone, DEFAULT_SUFFIX) + group = groups.create(properties={"cn": "group1", + "description": "testgroup"}) + + group.add("objectclass", "groupOfURLs") + group.add("memberURL", + f"ldap:///dc=example,dc=com??sub?(&(objectclass=person)(uid=test_user_1000))") + + # Set ACI + Domain(topo.standalone, DEFAULT_SUFFIX).\ + add("aci", f'(target = ldap:///{DEFAULT_SUFFIX})(targetattr="*")' + f'(version 3.0; acl "{request.node.name}"; ' + f'allow (delete) (groupdn = "ldap:///{group.dn}"); )') + + # create connection with USER_WITH_ACI_DELADD + conn = UserAccount(topo.standalone, USER_WITH_ACI_DELADD).bind(PW_DM) + + # Perform Allow delete access to dynamic group + UserAccount(conn, USER_DELADD).delete() + + +def test_allow_delete_access_to_dynamic_group_uid(topo, _add_user, _aci_of_user, request): + + """Test to Allow delete access to dynamic group + + :id: 010a4f20-752a-4173-b763-f520c7a85b82 + :setup: server + :steps: + 1. Add test entry + 2. Add ACI that Allow delete privilege to dynamic group + 3. Delete something using test USER_DELADD + 4. Remove ACI + :expectedresults: + 1. Entry should be added + 2. ACI should be added + 3. Operation should succeed + 4. Delete operation for ACI should succeed + """ + # Create dynamic group + groups = Groups(topo.standalone, DEFAULT_SUFFIX) + group = groups.create(properties={"cn": "group1", + "description": "testgroup"}) + + group.add("objectclass", "groupOfURLs") + group.add("memberURL", + f'ldap:///{DEFAULT_SUFFIX}??sub?(&(objectclass=person)(cn=test_user_1000))') + + # Set ACI + Domain(topo.standalone, DEFAULT_SUFFIX).\ + add("aci", f'(target = ldap:///{DEFAULT_SUFFIX})' + f'(targetattr="uid")(version 3.0; acl "{request.node.name}"; ' + f'allow (delete) (groupdn = "ldap:///{group.dn}"); )') + + # create connection with USER_WITH_ACI_DELADD + conn = UserAccount(topo.standalone, USER_WITH_ACI_DELADD).bind(PW_DM) + + # Perform Allow delete access to dynamic group + UserAccount(conn, USER_DELADD).delete() + + +def test_allow_delete_access_not_to_dynamic_group(topo, _add_user, _aci_of_user, request): + + """Test to Allow delete access to != dynamic group + + :id: 9ecb139d-bca8-428e-9044-fd89db5a3d14 + :setup: server + :steps: + 1. Add test entry + 2. Add ACI that delete access to != dynamic group + 3. Delete something using test USER_DELADD + 4. Remove ACI + :expectedresults: + 1. Entry should be added + 2. ACI should be added + 3. Operation should not succeed + 4. Delete operation for ACI should succeed + """ + # Create dynamic group + groups = Groups(topo.standalone, DEFAULT_SUFFIX) + group = groups.create(properties={"cn": "group1", + "description": "testgroup"}) + group.add("objectclass", "groupOfURLs") + group.add("memberURL", + f'ldap:///{DEFAULT_SUFFIX}??sub?(&(objectclass=person)(cn=test_user_1000))') + + # Set ACI + Domain(topo.standalone, DEFAULT_SUFFIX).\ + add("aci", f'(target = ldap:///{DEFAULT_SUFFIX})' + f'(targetattr="*")(version 3.0; acl "{request.node.name}"; ' + f'allow (delete) (groupdn != "ldap:///{group.dn}"); )') + + # create connection with USER_WITH_ACI_DELADD + conn = UserAccount(topo.standalone, USER_WITH_ACI_DELADD).bind(PW_DM) + user = UserAccount(conn, USER_DELADD) + + # Perform Allow delete access to != dynamic group + with pytest.raises(ldap.INSUFFICIENT_ACCESS): + user.delete() + + +if __name__ == "__main__": + CURRENT_FILE = os.path.realpath(__file__) + pytest.main("-s -v %s" % CURRENT_FILE) diff --git a/dirsrvtests/tests/suites/acl/enhanced_aci_modrnd_test.py b/dirsrvtests/tests/suites/acl/enhanced_aci_modrnd_test.py new file mode 100644 index 0000000..0cecde4 --- /dev/null +++ b/dirsrvtests/tests/suites/acl/enhanced_aci_modrnd_test.py @@ -0,0 +1,121 @@ +# --- BEGIN COPYRIGHT BLOCK --- +# Copyright (C) 2020 Red Hat, Inc. +# All rights reserved. +# +# License: GPL (version 3 or any later version). +# See LICENSE for details. +# --- END COPYRIGHT BLOCK --- +# +import pytest +from lib389.tasks import * +from lib389.utils import * +from lib389.topologies import topology_st + +pytestmark = pytest.mark.tier1 + +logging.getLogger(__name__).setLevel(logging.DEBUG) +log = logging.getLogger(__name__) + +CONTAINER_1_OU = 'test_ou_1' +CONTAINER_2_OU = 'test_ou_2' +CONTAINER_1 = f'ou={CONTAINER_1_OU},dc=example,dc=com' +CONTAINER_2 = f'ou={CONTAINER_2_OU},dc=example,dc=com' +USER_CN = 'test_user' +USER_PWD = 'Secret123' +USER = f'cn={USER_CN},{CONTAINER_1}' + + +@pytest.fixture(scope="module") +def env_setup(topology_st): + """Adds two containers, one user and two ACI rules""" + + log.info("Add a container: %s" % CONTAINER_1) + topology_st.standalone.add_s(Entry((CONTAINER_1, + {'objectclass': ['top','organizationalunit'], + 'ou': CONTAINER_1_OU, + }))) + + log.info("Add a container: %s" % CONTAINER_2) + topology_st.standalone.add_s(Entry((CONTAINER_2, + {'objectclass': ['top', 'organizationalunit'], + 'ou': CONTAINER_2_OU, + }))) + + log.info("Add a user: %s" % USER) + topology_st.standalone.add_s(Entry((USER, + {'objectclass': 'top person'.split(), + 'cn': USER_CN, + 'sn': USER_CN, + 'userpassword': USER_PWD + }))) + + ACI_TARGET = '(targetattr="*")' + ACI_ALLOW = '(version 3.0; acl "All rights for %s"; allow (all) ' % USER + ACI_SUBJECT = 'userdn="ldap:///%s";)' % USER + ACI_BODY = ACI_TARGET + ACI_ALLOW + ACI_SUBJECT + mod = [(ldap.MOD_ADD, 'aci', ensure_bytes(ACI_BODY))] + + log.info("Add an ACI 'allow (all)' by %s to the %s" % (USER, + CONTAINER_1)) + topology_st.standalone.modify_s(CONTAINER_1, mod) + + log.info("Add an ACI 'allow (all)' by %s to the %s" % (USER, + CONTAINER_2)) + topology_st.standalone.modify_s(CONTAINER_2, mod) + + +@pytest.mark.ds47553 +def test_enhanced_aci_modrnd(topology_st, env_setup): + """Tests, that MODRDN operation is allowed, + if user has ACI right '(all)' under superior entries, + but doesn't have '(modrdn)' + + :id: 492cf2a9-2efe-4e3b-955e-85eca61d66b9 + :setup: Standalone instance + :steps: + 1. Create two containers + 2. Create a user within "ou=test_ou_1,dc=example,dc=com" + 3. Add an aci with a rule "cn=test_user is allowed all" within these containers + 4. Run MODRDN operation on the "cn=test_user" and set "newsuperior" to + the "ou=test_ou_2,dc=example,dc=com" + 5. Check there is no user under container one (ou=test_ou_1,dc=example,dc=com) + 6. Check there is a user under container two (ou=test_ou_2,dc=example,dc=com) + + :expectedresults: + 1. Two containers should be created + 2. User should be added successfully + 3. This should pass + 4. This should pass + 5. User should not be found under container ou=test_ou_1,dc=example,dc=com + 6. User should be found under container ou=test_ou_2,dc=example,dc=com + """ + + log.info("Bind as %s" % USER) + + topology_st.standalone.simple_bind_s(USER, USER_PWD) + + log.info("User MODRDN operation from %s to %s" % (CONTAINER_1, + CONTAINER_2)) + + topology_st.standalone.rename_s(USER, "cn=%s" % USER_CN, + newsuperior=CONTAINER_2, delold=1) + + log.info("Check there is no user in %s" % CONTAINER_1) + entries = topology_st.standalone.search_s(CONTAINER_1, + ldap.SCOPE_ONELEVEL, + 'cn=%s' % USER_CN) + assert not entries + + log.info("Check there is our user in %s" % CONTAINER_2) + entries = topology_st.standalone.search_s(CONTAINER_2, + ldap.SCOPE_ONELEVEL, + 'cn=%s' % USER_CN) + assert entries + + +if __name__ == '__main__': + # Run isolated + # -s for DEBUG mode + # -v for additional verbose + CURRENT_FILE = os.path.realpath(__file__) + pytest.main("-s -v %s" % CURRENT_FILE) diff --git a/dirsrvtests/tests/suites/acl/globalgroup_part2_test.py b/dirsrvtests/tests/suites/acl/globalgroup_part2_test.py new file mode 100644 index 0000000..eb6f704 --- /dev/null +++ b/dirsrvtests/tests/suites/acl/globalgroup_part2_test.py @@ -0,0 +1,478 @@ +# --- BEGIN COPYRIGHT BLOCK --- +# Copyright (C) 2020 Red Hat, Inc. +# All rights reserved. +# +# License: GPL (version 3 or any later version). +# See LICENSE for details. +# --- END COPYRIGHT BLOCK ---- + + +import pytest, os, ldap +from lib389._constants import DEFAULT_SUFFIX, PW_DM +from lib389.idm.user import UserAccount, UserAccounts +from lib389.idm.group import UniqueGroup, UniqueGroups +from lib389.idm.organizationalunit import OrganizationalUnit +from lib389.topologies import topology_st as topo +from lib389.idm.domain import Domain + +pytestmark = pytest.mark.tier1 + +NESTEDGROUP_OU_GLOBAL = "ou=nestedgroup, {}".format(DEFAULT_SUFFIX) +DEEPUSER_GLOBAL = "uid=DEEPUSER_GLOBAL, {}".format(NESTEDGROUP_OU_GLOBAL) +DEEPUSER2_GLOBAL = "uid=DEEPUSER2_GLOBAL, {}".format(NESTEDGROUP_OU_GLOBAL) +DEEPUSER3_GLOBAL = "uid=DEEPUSER3_GLOBAL, {}".format(NESTEDGROUP_OU_GLOBAL) +DEEPGROUPSCRATCHENTRY_GLOBAL = "uid=scratchEntry,{}".format(NESTEDGROUP_OU_GLOBAL) +GROUPDNATTRSCRATCHENTRY_GLOBAL = "uid=GROUPDNATTRSCRATCHENTRY_GLOBAL,{}".format(NESTEDGROUP_OU_GLOBAL) +GROUPDNATTRCHILDSCRATCHENTRY_GLOBAL = "uid=c1,{}".format(GROUPDNATTRSCRATCHENTRY_GLOBAL) +NEWCHILDSCRATCHENTRY_GLOBAL = "uid=newChild,{}".format(NESTEDGROUP_OU_GLOBAL) +ALLGROUPS_GLOBAL = "cn=ALLGROUPS_GLOBAL,{}".format(NESTEDGROUP_OU_GLOBAL) +GROUPA_GLOBAL = "cn=GROUPA_GLOBAL,{}".format(NESTEDGROUP_OU_GLOBAL) +GROUPB_GLOBAL = "cn=GROUPB_GLOBAL,{}".format(NESTEDGROUP_OU_GLOBAL) +GROUPC_GLOBAL = "cn=GROUPC_GLOBAL,{}".format(NESTEDGROUP_OU_GLOBAL) +GROUPD_GLOBAL = "cn=GROUPD_GLOBAL,{}".format(NESTEDGROUP_OU_GLOBAL) +GROUPE_GLOBAL = "cn=GROUPE_GLOBAL,{}".format(NESTEDGROUP_OU_GLOBAL) +GROUPF_GLOBAL = "cn=GROUPF_GLOBAL,{}".format(NESTEDGROUP_OU_GLOBAL) +GROUPG_GLOBAL = "cn=GROUPG_GLOBAL,{}".format(NESTEDGROUP_OU_GLOBAL) +GROUPH_GLOBAL = "cn=GROUPH_GLOBAL,{}".format(NESTEDGROUP_OU_GLOBAL) +CHILD1_GLOBAL = "uid=CHILD1_GLOBAL,{}".format(GROUPDNATTRSCRATCHENTRY_GLOBAL) +CONTAINER_1_DELADD = "ou=Product Development,{}".format(DEFAULT_SUFFIX) +CONTAINER_2_DELADD = "ou=Accounting,{}".format(DEFAULT_SUFFIX) + + +@pytest.fixture(scope="function") +def aci_of_user(request, topo): + aci_list = Domain(topo.standalone, DEFAULT_SUFFIX).get_attr_vals('aci') + + def finofaci(): + domain = Domain(topo.standalone, DEFAULT_SUFFIX) + domain.set('aci', None) + for i in aci_list: + domain.add("aci", i) + + request.addfinalizer(finofaci) + + +@pytest.fixture(scope="module") +def add_test_user(request, topo): + for demo in ['Product Development', 'Accounting', 'nestedgroup']: + OrganizationalUnit(topo.standalone, "ou={},{}".format(demo, DEFAULT_SUFFIX)).create(properties={'ou': demo}) + + uas = UserAccounts(topo.standalone, DEFAULT_SUFFIX, 'ou=nestedgroup') + for demo1 in ['DEEPUSER_GLOBAL', 'scratchEntry', 'DEEPUSER2_GLOBAL', + 'DEEPUSER3_GLOBAL', 'GROUPDNATTRSCRATCHENTRY_GLOBAL', 'newChild']: + uas.create(properties={ + 'uid': demo1, + 'cn': demo1, + 'sn': 'user', + 'uidNumber': '1000', + 'gidNumber': '2000', + 'homeDirectory': '/home/' + demo1, + 'userPassword': PW_DM + }) + + # Add anonymous access aci + ACI_TARGET = "(targetattr=\"*\")(target = \"ldap:///%s\")" % (DEFAULT_SUFFIX) + ACI_ALLOW = "(version 3.0; acl \"Anonymous Read access\"; allow (read,search,compare)" + ACI_SUBJECT = "(userdn=\"ldap:///anyone\");)" + ANON_ACI = ACI_TARGET + ACI_ALLOW + ACI_SUBJECT + suffix = Domain(topo.standalone, DEFAULT_SUFFIX) + suffix.add('aci', ANON_ACI) + + uas = UserAccounts(topo.standalone, DEFAULT_SUFFIX, 'uid=GROUPDNATTRSCRATCHENTRY_GLOBAL,ou=nestedgroup') + for demo1 in ['c1', 'CHILD1_GLOBAL']: + uas.create(properties={ + 'uid': demo1, + 'cn': demo1, + 'sn': 'user', + 'uidNumber': '1000', + 'gidNumber': '2000', + 'homeDirectory': '/home/' + demo1, + 'userPassword': PW_DM + }) + + grp = UniqueGroups(topo.standalone, DEFAULT_SUFFIX, rdn='ou=nestedgroup') + for i in [('ALLGROUPS_GLOBAL', GROUPA_GLOBAL), ('GROUPA_GLOBAL', GROUPB_GLOBAL), ('GROUPB_GLOBAL', GROUPC_GLOBAL), + ('GROUPC_GLOBAL', GROUPD_GLOBAL), ('GROUPD_GLOBAL', GROUPE_GLOBAL), ('GROUPE_GLOBAL', GROUPF_GLOBAL), + ('GROUPF_GLOBAL', GROUPG_GLOBAL), ('GROUPG_GLOBAL', GROUPH_GLOBAL), ('GROUPH_GLOBAL', DEEPUSER_GLOBAL)]: + grp.create(properties={'cn': i[0], + 'ou': 'groups', + 'uniquemember': i[1] + }) + + +def test_undefined_in_group_eval_five(topo, add_test_user, aci_of_user): + """ + Aci will not allow access as Group dn is not allowed so members will not allowed access. + + :id: 11451a96-7841-11e8-9f79-8c16451d917b + :setup: server + :steps: + 1. Add test entry + 2. Take a count of users using DN_DM + 3. Add test user + 4. add aci + 5. test should fulfil the aci rules + :expectedresults: + 1. Entry should be added + 2. Operation should succeed + 3. Operation should succeed + 4. Operation should succeed + 5. Operation should succeed + """ + + Domain(topo.standalone, DEFAULT_SUFFIX).add("aci",'(targetattr="*")(version 3.0; aci "tester"; allow(all) groupdn != "ldap:///{}\ || ldap:///{}";)'.format(ALLGROUPS_GLOBAL, GROUPF_GLOBAL)) + conn = UserAccount(topo.standalone, DEEPUSER2_GLOBAL).bind(PW_DM) + # This aci should NOT allow access + user = UserAccount(conn, DEEPGROUPSCRATCHENTRY_GLOBAL) + with pytest.raises(ldap.INSUFFICIENT_ACCESS): + user.replace("description", "Fred") + assert user.get_attr_val_utf8('uid') == 'scratchEntry' + + +def test_undefined_in_group_eval_six(topo, add_test_user, aci_of_user): + """ + Aci will not allow access as tested user is not a member of allowed Group dn + + :id: 1904572e-7841-11e8-a9d8-8c16451d917b + :setup: server + :steps: + 1. Add test entry + 2. Take a count of users using DN_DM + 3. Add test user + 4. add aci + 5. test should fullfil the aci rules + :expectedresults: + 1. Entry should be added + 2. Operation should succeed + 3. Operation should succeed + 4. Operation should succeed + 5. Operation should succeed + """ + Domain(topo.standalone, DEFAULT_SUFFIX).add("aci",'(targetattr="*")(version 3.0; aci "tester"; allow(all) groupdn = "ldap:///{} || ldap:///{}" ;)'.format(GROUPH_GLOBAL, ALLGROUPS_GLOBAL)) + conn = UserAccount(topo.standalone, DEEPUSER3_GLOBAL).bind(PW_DM) + # test UNDEFINED in group + user = UserAccount(conn, DEEPGROUPSCRATCHENTRY_GLOBAL) + with pytest.raises(ldap.INSUFFICIENT_ACCESS): + user.replace("description", "Fred") + assert user.get_attr_val_utf8('uid') == 'scratchEntry' + + +def test_undefined_in_group_eval_seven(topo, add_test_user, aci_of_user): + """ + Aci will not allow access as tested user is not a member of allowed Group dn + + :id: 206b43c4-7841-11e8-b3ed-8c16451d917b + :setup: server + :steps: + 1. Add test entry + 2. Take a count of users using DN_DM + 3. Add test user + 4. add aci + 5. test should fullfil the aci rules + :expectedresults: + 1. Entry should be added + 2. Operation should succeed + 3. Operation should succeed + 4. Operation should succeed + 5. Operation should succeed + """ + Domain(topo.standalone, DEFAULT_SUFFIX).add("aci",'(targetattr="*")(version 3.0; aci "tester"; allow(all) groupdn = "ldap:///{}\ || ldap:///{}";)'.format(ALLGROUPS_GLOBAL, GROUPH_GLOBAL)) + conn = UserAccount(topo.standalone, DEEPUSER3_GLOBAL).bind(PW_DM) + # test UNDEFINED in group + user = UserAccount(conn, DEEPGROUPSCRATCHENTRY_GLOBAL) + with pytest.raises(ldap.INSUFFICIENT_ACCESS): + user.replace("description", "Fred") + assert user.get_attr_val_utf8('uid') == 'scratchEntry' + + +def test_undefined_in_group_eval_eight(topo, add_test_user, aci_of_user): + """ + Aci will not allow access as Group dn is not allowed so members will not allowed access. + + :id: 26ca7456-7841-11e8-801e-8c16451d917b + :setup: server + :steps: + 1. Add test entry + 2. Take a count of users using DN_DM + 3. Add test user + 4. add aci + 5. test should fullfil the aci rules + :expectedresults: + 1. Entry should be added + 2. Operation should succeed + 3. Operation should succeed + 4. Operation should succeed + 5. Operation should succeed + """ + Domain(topo.standalone, DEFAULT_SUFFIX).add("aci",'(targetattr="*")(version 3.0; aci "tester"; allow(all) groupdn != "ldap:///{} || ldap:///{} || ldap:///{}" ;)'.format(GROUPH_GLOBAL, GROUPA_GLOBAL, ALLGROUPS_GLOBAL)) + conn = UserAccount(topo.standalone, DEEPUSER3_GLOBAL).bind(PW_DM) + # test UNDEFINED in group + user = UserAccount(conn, DEEPGROUPSCRATCHENTRY_GLOBAL) + with pytest.raises(ldap.INSUFFICIENT_ACCESS): + user.replace("description", "Fred") + assert user.get_attr_val_utf8('uid') == 'scratchEntry' + + +def test_undefined_in_group_eval_nine(topo, add_test_user, aci_of_user): + """ + Aci will not allow access as Group dn is not allowed so members will not allowed access. + + :id: 38c7fbb0-7841-11e8-90aa-8c16451d917b + :setup: server + :steps: + 1. Add test entry + 2. Take a count of users using DN_DM + 3. Add test user + 4. add aci + 5. test should fullfil the aci rules + :expectedresults: + 1. Entry should be added + 2. Operation should succeed + 3. Operation should succeed + 4. Operation should succeed + 5. Operation should succeed + """ + Domain(topo.standalone, DEFAULT_SUFFIX).add("aci",'(targetattr="*")(version 3.0; aci "tester"; allow(all) groupdn != "ldap:///{}\ || ldap:///{} || ldap:///{}";)'.format(ALLGROUPS_GLOBAL, GROUPA_GLOBAL, GROUPH_GLOBAL)) + conn = UserAccount(topo.standalone, DEEPUSER3_GLOBAL).bind(PW_DM) + # test UNDEFINED in group + user = UserAccount(conn, DEEPGROUPSCRATCHENTRY_GLOBAL) + with pytest.raises(ldap.INSUFFICIENT_ACCESS): + user.replace("sn", "Fred") + assert user.get_attr_val_utf8('uid') == 'scratchEntry' + + +def test_undefined_in_group_eval_ten(topo, add_test_user, aci_of_user): + """ + Test the userattr keyword to ensure that it evaluates correctly. + + :id: 46c0fb72-7841-11e8-af1d-8c16451d917b + :setup: server + :steps: + 1. Add test entry + 2. Take a count of users using DN_DM + 3. Add test user + 4. add aci + 5. test should fullfil the aci rules + :expectedresults: + 1. Entry should be added + 2. Operation should succeed + 3. Operation should succeed + 4. Operation should succeed + 5. Operation should succeed + """ + Domain(topo.standalone, DEFAULT_SUFFIX).add("aci",'(targetattr="*")(version 3.0; aci "tester"; allow(all) userattr = "description#GROUPDN";)') + user = UserAccount(topo.standalone, DEEPGROUPSCRATCHENTRY_GLOBAL) + user.add("description", [ALLGROUPS_GLOBAL, GROUPG_GLOBAL]) + conn = UserAccount(topo.standalone, DEEPUSER_GLOBAL).bind(PW_DM) + # Test the userattr keyword + user.add("sn", "Fred") + assert UserAccount(conn, DEEPGROUPSCRATCHENTRY_GLOBAL).get_attr_val_utf8('uid') == 'scratchEntry' + user.remove("description", [ALLGROUPS_GLOBAL, GROUPG_GLOBAL]) + + +def test_undefined_in_group_eval_eleven(topo, add_test_user, aci_of_user): + """ + Aci will not allow access as description is there with the user entry which is not allowed in ACI + + :id: 4cfa28e2-7841-11e8-8117-8c16451d917b + :setup: server + :steps: + 1. Add test entry + 2. Take a count of users using DN_DM + 3. Add test user + 4. add aci + 5. test should fullfil the aci rules + :expectedresults: + 1. Entry should be added + 2. Operation should succeed + 3. Operation should succeed + 4. Operation should succeed + 5. Operation should succeed + """ + Domain(topo.standalone, DEFAULT_SUFFIX).add("aci",'(targetattr="*")(version 3.0; aci "tester"; allow(all) not( userattr = "description#GROUPDN");)') + user = UserAccount(topo.standalone, DEEPGROUPSCRATCHENTRY_GLOBAL) + user.add("description", [ALLGROUPS_GLOBAL, GROUPH_GLOBAL]) + conn = UserAccount(topo.standalone, DEEPUSER_GLOBAL).bind(PW_DM) + # Test that not(UNDEFINED(attrval1)) + user1 = UserAccount(conn, DEEPGROUPSCRATCHENTRY_GLOBAL) + with pytest.raises(ldap.INSUFFICIENT_ACCESS): + user1.add("sn", "Fred1") + assert user.get_attr_val_utf8('cn') + user.remove("description", [ALLGROUPS_GLOBAL, GROUPH_GLOBAL]) + + +def test_undefined_in_group_eval_twelve(topo, add_test_user, aci_of_user): + """ + Test with the parent keyord that Yields TRUE as description is present in tested entry + + :id: 54f471ec-7841-11e8-8910-8c16451d917b + :setup: server + :steps: + 1. Add test entry + 2. Take a count of users using DN_DM + 3. Add test user + 4. add aci + 5. test should fullfil the aci rules + :expectedresults: + 1. Entry should be added + 2. Operation should succeed + 3. Operation should succeed + 4. Operation should succeed + 5. Operation should succeed + """ + Domain(topo.standalone, DEFAULT_SUFFIX).add("aci",'(targetattr="*")(version 3.0; aci "tester"; allow(all) userattr = "parent[0,1].description#GROUPDN";)') + user = UserAccount(topo.standalone, GROUPDNATTRSCRATCHENTRY_GLOBAL) + user.add("description", [ALLGROUPS_GLOBAL, GROUPD_GLOBAL]) + conn = UserAccount(topo.standalone, DEEPUSER_GLOBAL).bind(PW_DM) + # Test with the parent keyord + UserAccount(conn, GROUPDNATTRCHILDSCRATCHENTRY_GLOBAL).add("sn", "Fred") + assert UserAccount(conn, DEEPGROUPSCRATCHENTRY_GLOBAL).get_attr_val_utf8('cn') + user.remove("description", [ALLGROUPS_GLOBAL, GROUPD_GLOBAL]) + + +def test_undefined_in_group_eval_fourteen(topo, add_test_user, aci_of_user): + """ + Test with parent keyword that Yields FALSE as description is not present in tested entry + + :id: 5c527218-7841-11e8-8909-8c16451d917b + :setup: server + :steps: + 1. Add test entry + 2. Take a count of users using DN_DM + 3. Add test user + 4. add aci + 5. test should fullfil the aci rules + :expectedresults: + 1. Entry should be added + 2. Operation should succeed + 3. Operation should succeed + 4. Operation should succeed + 5. Operation should succeed + """ + Domain(topo.standalone, DEFAULT_SUFFIX).add("aci",'(targetattr="*")(version 3.0; aci "tester"; allow(all) userattr = "parent[0,1].description#GROUPDN";)') + user = UserAccount(topo.standalone, GROUPDNATTRSCRATCHENTRY_GLOBAL) + user.add("description", [ALLGROUPS_GLOBAL, GROUPG_GLOBAL]) + conn = UserAccount(topo.standalone, DEEPUSER2_GLOBAL).bind(PW_DM) + # Test with parent keyword + user1 = UserAccount(conn, GROUPDNATTRCHILDSCRATCHENTRY_GLOBAL) + with pytest.raises(ldap.INSUFFICIENT_ACCESS): + user1.add("sn", "Fred") + assert UserAccount(conn, DEEPGROUPSCRATCHENTRY_GLOBAL).get_attr_val_utf8('cn') + user.remove("description", [ALLGROUPS_GLOBAL, GROUPG_GLOBAL]) + + +def test_undefined_in_group_eval_fifteen(topo, add_test_user, aci_of_user): + """ + Here do the same tests for userattr with the parent keyword. + + :id: 6381c070-7841-11e8-a6b6-8c16451d917b + :setup: server + :steps: + 1. Add test entry + 2. Take a count of users using DN_DM + 3. Add test user + 4. add aci + 5. test should fullfil the aci rules + :expectedresults: + 1. Entry should be added + 2. Operation should succeed + 3. Operation should succeed + 4. Operation should succeed + 5. Operation should succeed + """ + Domain(topo.standalone, DEFAULT_SUFFIX).add("aci",'(targetattr="*")(version 3.0; aci "tester"; allow(all) userattr = "parent[0,1].description#USERDN";)') + UserAccount(topo.standalone, NESTEDGROUP_OU_GLOBAL).add("description", DEEPUSER_GLOBAL) + # Here do the same tests for userattr with the parent keyword. + conn = UserAccount(topo.standalone, DEEPUSER_GLOBAL).bind(PW_DM) + UserAccount(conn, NEWCHILDSCRATCHENTRY_GLOBAL).add("description", DEEPUSER_GLOBAL) + + +def test_undefined_in_group_eval_sixteen(topo, add_test_user, aci_of_user): + """ + Test with parent keyword with not key + + :id: 69852688-7841-11e8-8db1-8c16451d917b + :setup: server + :steps: + 1. Add test entry + 2. Take a count of users using DN_DM + 3. Add test user + 4. add aci + 5. test should fullfil the aci rules + :expectedresults: + 1. Entry should be added + 2. Operation should succeed + 3. Operation should succeed + 4. Operation should succeed + 5. Operation should succeed + """ + domain = Domain(topo.standalone, DEFAULT_SUFFIX) + domain.add("aci",'(targetattr="*")(version 3.0; aci "tester"; allow(all) not ( userattr = "parent[0,1].description#USERDN");)') + domain.add("description", DEEPUSER_GLOBAL) + conn = UserAccount(topo.standalone, DEEPUSER_GLOBAL).bind(PW_DM) + # Test with parent keyword with not key + user = UserAccount(conn, NEWCHILDSCRATCHENTRY_GLOBAL) + with pytest.raises(ldap.INSUFFICIENT_ACCESS): + user.add("description",DEEPUSER_GLOBAL) + + +def test_undefined_in_group_eval_seventeen(topo, add_test_user, aci_of_user): + """ + Test with the parent keyord that Yields TRUE as description is present in tested entry + + :id: 7054d1c0-7841-11e8-8177-8c16451d917b + :setup: server + :steps: + 1. Add test entry + 2. Take a count of users using DN_DM + 3. Add test user + 4. add aci + 5. test should fullfil the aci rules + :expectedresults: + 1. Entry should be added + 2. Operation should succeed + 3. Operation should succeed + 4. Operation should succeed + 5. Operation should succeed + """ + Domain(topo.standalone, DEFAULT_SUFFIX).add("aci",'(targetattr="*")(version 3.0; aci "tester"; allow(all) userattr = "parent[0,1].description#GROUPDN";)') + user = UserAccount(topo.standalone, GROUPDNATTRSCRATCHENTRY_GLOBAL) + # Test with the parent keyord + user.add("description", [ALLGROUPS_GLOBAL, GROUPD_GLOBAL]) + conn = UserAccount(topo.standalone, DEEPUSER_GLOBAL).bind(PW_DM) + UserAccount(conn, CHILD1_GLOBAL).add("description", DEEPUSER_GLOBAL) + user.remove("description", [ALLGROUPS_GLOBAL, GROUPD_GLOBAL]) + + +def test_undefined_in_group_eval_eighteen(topo, add_test_user, aci_of_user): + """ + Test with parent keyword with not key + + :id: 768b9ab0-7841-11e8-87c3-8c16451d917b + :setup: server + :steps: + 1. Add test entry + 2. Take a count of users using DN_DM + 3. Add test user + 4. add aci + 5. test should fullfil the aci rules + :expectedresults: + 1. Entry should be added + 2. Operation should succeed + 3. Operation should succeed + 4. Operation should succeed + 5. Operation should succeed + """ + Domain(topo.standalone, DEFAULT_SUFFIX).add("aci",'(targetattr="*")(version 3.0; aci "tester"; allow(all) not (userattr = "parent[0,1].description#GROUPDN" );)') + user = UserAccount(topo.standalone, GROUPDNATTRSCRATCHENTRY_GLOBAL) + # Test with parent keyword with not key + user.add("description", [ALLGROUPS_GLOBAL, GROUPH_GLOBAL]) + conn = UserAccount(topo.standalone, DEEPUSER_GLOBAL).bind(PW_DM) + user = UserAccount(conn, CHILD1_GLOBAL) + with pytest.raises(ldap.INSUFFICIENT_ACCESS): + user.add("description", DEEPUSER_GLOBAL) + + +if __name__ == "__main__": + CURRENT_FILE = os.path.realpath(__file__) + pytest.main("-s -v %s" % CURRENT_FILE) diff --git a/dirsrvtests/tests/suites/acl/globalgroup_test.py b/dirsrvtests/tests/suites/acl/globalgroup_test.py new file mode 100644 index 0000000..6057472 --- /dev/null +++ b/dirsrvtests/tests/suites/acl/globalgroup_test.py @@ -0,0 +1,438 @@ +# --- BEGIN COPYRIGHT BLOCK --- +# Copyright (C) 2020 Red Hat, Inc. +# All rights reserved. +# +# License: GPL (version 3 or any later version). +# See LICENSE for details. +# --- END COPYRIGHT BLOCK ---- + + +import pytest, os, ldap +from lib389._constants import DEFAULT_SUFFIX, PW_DM +from lib389.idm.user import UserAccount, UserAccounts +from lib389.idm.group import UniqueGroup, UniqueGroups +from lib389.idm.organizationalunit import OrganizationalUnit +from lib389.topologies import topology_st as topo +from lib389.idm.domain import Domain + +pytestmark = pytest.mark.tier1 + +ACLGROUP_OU_GLOBAL = "ou=ACLGroup,{}".format(DEFAULT_SUFFIX) +NESTEDGROUP_OU_GLOBAL = "ou=nestedgroup, {}".format(DEFAULT_SUFFIX) +TESTING_OU_GLOBAL = "ou=Product Testing,{}".format(DEFAULT_SUFFIX) +DEEPUSER_GLOBAL = "uid=DEEPUSER_GLOBAL, {}".format(NESTEDGROUP_OU_GLOBAL) +DEEPUSER1_GLOBAL = "uid=DEEPUSER1_GLOBAL, {}".format(NESTEDGROUP_OU_GLOBAL) +DEEPUSER2_GLOBAL = "uid=DEEPUSER2_GLOBAL, {}".format(NESTEDGROUP_OU_GLOBAL) +DEEPUSER3_GLOBAL = "uid=DEEPUSER3_GLOBAL, {}".format(NESTEDGROUP_OU_GLOBAL) +DEEPGROUPSCRATCHENTRY_GLOBAL = "uid=scratchEntry,{}".format(NESTEDGROUP_OU_GLOBAL) +GROUPDNATTRSCRATCHENTRY_GLOBAL = "uid=GROUPDNATTRSCRATCHENTRY_GLOBAL,{}".format(NESTEDGROUP_OU_GLOBAL) +GROUPDNATTRCHILDSCRATCHENTRY_GLOBAL = "uid=c1,{}".format(GROUPDNATTRSCRATCHENTRY_GLOBAL) +NEWCHILDSCRATCHENTRY_GLOBAL = "uid=newChild,{}".format(NESTEDGROUP_OU_GLOBAL) +BIG_GLOBAL = "cn=BIG_GLOBAL Group,{}".format(DEFAULT_SUFFIX) +ALLGROUPS_GLOBAL = "cn=ALLGROUPS_GLOBAL,{}".format(NESTEDGROUP_OU_GLOBAL) +GROUPA_GLOBAL = "cn=GROUPA_GLOBAL,{}".format(NESTEDGROUP_OU_GLOBAL) +GROUPB_GLOBAL = "cn=GROUPB_GLOBAL,{}".format(NESTEDGROUP_OU_GLOBAL) +GROUPC_GLOBAL = "cn=GROUPC_GLOBAL,{}".format(NESTEDGROUP_OU_GLOBAL) +GROUPD_GLOBAL = "cn=GROUPD_GLOBAL,{}".format(NESTEDGROUP_OU_GLOBAL) +GROUPE_GLOBAL = "cn=GROUPE_GLOBAL,{}".format(NESTEDGROUP_OU_GLOBAL) +GROUPF_GLOBAL = "cn=GROUPF_GLOBAL,{}".format(NESTEDGROUP_OU_GLOBAL) +GROUPG_GLOBAL = "cn=GROUPG_GLOBAL,{}".format(NESTEDGROUP_OU_GLOBAL) +GROUPH_GLOBAL = "cn=GROUPH_GLOBAL,{}".format(NESTEDGROUP_OU_GLOBAL) +CONTAINER_1_DELADD = "ou=Product Development,{}".format(DEFAULT_SUFFIX) +CONTAINER_2_DELADD = "ou=Accounting,{}".format(DEFAULT_SUFFIX) + + +@pytest.fixture(scope="function") +def aci_of_user(request, topo): + aci_list = Domain(topo.standalone, DEFAULT_SUFFIX).get_attr_vals('aci') + + def finofaci(): + domain = Domain(topo.standalone, DEFAULT_SUFFIX) + domain.set('aci', None) + for i in aci_list: + domain.add("aci", i) + + request.addfinalizer(finofaci) + + +@pytest.fixture(scope="module") +def add_test_user(request, topo): + for demo in ['Product Development', 'Accounting', 'Product Testing', 'nestedgroup', 'ACLGroup']: + OrganizationalUnit(topo.standalone, "ou={},{}".format(demo, DEFAULT_SUFFIX)).create(properties={'ou': demo}) + + user = UserAccounts(topo.standalone, DEFAULT_SUFFIX, rdn='ou=Accounting') + for demo1 in ['Ted Morris', 'David Miller']: + user.create(properties= { + 'uid': demo1, + 'cn': demo1, + 'sn': 'user', + 'uidNumber': '1000', + 'gidNumber': '2000', + 'homeDirectory': '/home/' + demo1, + 'userPassword': PW_DM + }) + + # Add anonymous access aci + ACI_TARGET = "(targetattr=\"*\")(target = \"ldap:///%s\")" % (DEFAULT_SUFFIX) + ACI_ALLOW = "(version 3.0; acl \"Anonymous Read access\"; allow (read,search,compare)" + ACI_SUBJECT = "(userdn=\"ldap:///anyone\");)" + ANON_ACI = ACI_TARGET + ACI_ALLOW + ACI_SUBJECT + suffix = Domain(topo.standalone, DEFAULT_SUFFIX) + suffix.add('aci', ANON_ACI) + + uas = UserAccounts(topo.standalone, DEFAULT_SUFFIX, 'ou=nestedgroup') + for demo1 in ['DEEPUSER_GLOBAL', 'scratchEntry', 'DEEPUSER2_GLOBAL', 'DEEPUSER1_GLOBAL', + 'DEEPUSER3_GLOBAL', 'GROUPDNATTRSCRATCHENTRY_GLOBAL', 'newChild']: + uas.create(properties={ + 'uid': demo1, + 'cn': demo1, + 'sn': 'user', + 'uidNumber': '1000', + 'gidNumber': '2000', + 'homeDirectory': '/home/' + demo1, + 'userPassword': PW_DM + }) + + uas = UserAccounts(topo.standalone, DEFAULT_SUFFIX, 'uid=GROUPDNATTRSCRATCHENTRY_GLOBAL,ou=nestedgroup') + for demo1 in ['c1', 'CHILD1_GLOBAL']: + uas.create(properties={ + 'uid': demo1, + 'cn': demo1, + 'sn': 'user', + 'uidNumber': '1000', + 'gidNumber': '2000', + 'homeDirectory': '/home/' + demo1, + 'userPassword': PW_DM + }) + + grp = UniqueGroups(topo.standalone, DEFAULT_SUFFIX, rdn='ou=nestedgroup') + for i in [('ALLGROUPS_GLOBAL', GROUPA_GLOBAL), ('GROUPA_GLOBAL', GROUPB_GLOBAL), ('GROUPB_GLOBAL', GROUPC_GLOBAL), + ('GROUPC_GLOBAL', GROUPD_GLOBAL), ('GROUPD_GLOBAL', GROUPE_GLOBAL), ('GROUPE_GLOBAL', GROUPF_GLOBAL), + ('GROUPF_GLOBAL', GROUPG_GLOBAL), ('GROUPG_GLOBAL', GROUPH_GLOBAL), ('GROUPH_GLOBAL', DEEPUSER_GLOBAL)]: + grp.create(properties={'cn': i[0], + 'ou': 'groups', + 'uniquemember': i[1] + }) + + grp = UniqueGroup(topo.standalone, 'cn=BIG_GLOBAL Group,{}'.format(DEFAULT_SUFFIX)) + grp.create(properties={'cn': 'BIG_GLOBAL Group', + 'ou': 'groups', + 'uniquemember': ["uid=Ted Morris,ou=Accounting,{}".format(DEFAULT_SUFFIX), + "uid=David Miller,ou=Accounting,{}".format(DEFAULT_SUFFIX),] + }) + + +def test_caching_changes(topo, aci_of_user, add_test_user): + """ + Add user and then test deny + + :id: 26ed2dc2-783f-11e8-b1a5-8c16451d917b + :setup: server + :steps: + 1. Add test entry + 2. Take a count of users using DN_DM + 3. Add test user + 4. add aci + 5. test should fullfil the aci rules + :expectedresults: + 1. Entry should be added + 2. Operation should succeed + 3. Operation should succeed + 4. Operation should succeed + 5. Operation should succeed + """ + Domain(topo.standalone, DEFAULT_SUFFIX).add("aci",'(targetattr="roomnumber")(version 3.0; acl "ACLGroup"; deny ( read, search ) userdn = "ldap:///all" ;)') + user = UserAccounts(topo.standalone, DEFAULT_SUFFIX, "ou=AclGroup").create_test_user() + user.set('roomnumber', '3445') + conn = UserAccount(topo.standalone, DEEPUSER_GLOBAL).bind(PW_DM) + # targetattr="roomnumber" will be denied access + user = UserAccount(conn, 'uid=test_user_1000,ou=ACLGroup,dc=example,dc=com') + with pytest.raises(AssertionError): + assert user.get_attr_val_utf8('roomNumber') + UserAccount(topo.standalone, 'uid=test_user_1000,ou=ACLGroup,dc=example,dc=com').delete() + + +def test_deny_group_member_all_rights_to_user(topo, aci_of_user, add_test_user): + """ + Try deleting user while no access + + :id: 0da68a4c-7840-11e8-98c2-8c16451d917b + :setup: server + :steps: + 1. Add test entry + 2. Take a count of users using DN_DM + 3. delete test user + 4. add aci + 5. test should fullfil the aci rules + :expectedresults: + 1. Entry should be added + 2. Operation should succeed + 3. Operation should succeed + 4. Operation should succeed + 5. Operation should succeed + """ + Domain(topo.standalone, DEFAULT_SUFFIX).add("aci",'(targetattr="*")(version 3.0; acl "ACLGroup"; deny (all) groupdn = "ldap:///{}" ;)'.format(BIG_GLOBAL)) + conn = UserAccount(topo.standalone, "uid=Ted Morris, ou=Accounting, {}".format(DEFAULT_SUFFIX)).bind(PW_DM) + # group BIG_GLOBAL will have no access + user = UserAccount(conn, DEEPUSER3_GLOBAL) + with pytest.raises(ldap.INSUFFICIENT_ACCESS): + user.delete() + + +def test_deny_group_member_all_rights_to_group_members(topo, aci_of_user, add_test_user): + """ + Deny group member all rights + + :id: 2d4ff70c-7840-11e8-8472-8c16451d917b + :setup: server + :steps: + 1. Add test entry + 2. Take a count of users using DN_DM + 3. Add test user + 4. add aci + 5. test should fullfil the aci rules + :expectedresults: + 1. Entry should be added + 2. Operation should succeed + 3. Operation should succeed + 4. Operation should succeed + 5. Operation should succeed + """ + Domain(topo.standalone, DEFAULT_SUFFIX).add("aci",'(targetattr="*")(version 3.0; acl "ACLGroup"; deny (all) groupdn = "ldap:///{}" ;)'.format(BIG_GLOBAL)) + UserAccounts(topo.standalone, DEFAULT_SUFFIX, "ou=AclGroup").create_test_user() + conn = UserAccount(topo.standalone, "uid=Ted Morris, ou=Accounting, {}".format(DEFAULT_SUFFIX)).bind(PW_DM) + # group BIG_GLOBAL no access + user = UserAccount(conn, 'uid=test_user_1000,ou=ACLGroup,dc=example,dc=com') + assert len(user.get_attr_val_utf8('uid')) == 0 + UserAccount(topo.standalone, 'uid=test_user_1000,ou=ACLGroup,dc=example,dc=com').delete() + + +def test_deeply_nested_groups_aci_denial(topo, add_test_user, aci_of_user): + """ + Test deeply nested groups (1) + This aci will not allow search or modify to a user too deep to be detected. + + :id: 3d98229c-7840-11e8-9f55-8c16451d917b + :setup: server + :steps: + 1. Add test entry + 2. Take a count of users using DN_DM + 3. Add test user + 4. add aci + 5. test should fullfil the aci rules + :expectedresults: + 1. Entry should be added + 2. Operation should succeed + 3. Operation should succeed + 4. Operation should succeed + 5. Operation should succeed + """ + Domain(topo.standalone, DEFAULT_SUFFIX).add("aci",'(targetattr="*")(version 3.0; acl "ACLGroup"; allow (all) groupdn = "ldap:///{}" ;)'.format(ALLGROUPS_GLOBAL)) + conn = UserAccount(topo.standalone, DEEPUSER_GLOBAL).bind(PW_DM) + # ALLGROUPS_GLOBAL have all access + assert UserAccount(conn, DEEPGROUPSCRATCHENTRY_GLOBAL).get_attr_val_utf8('uid') == 'scratchEntry' + user = UserAccount(conn, DEEPGROUPSCRATCHENTRY_GLOBAL) + with pytest.raises(ldap.INSUFFICIENT_ACCESS): + user.delete() + + +def test_deeply_nested_groups_aci_denial_two(topo, add_test_user, aci_of_user): + """ + Test deeply nested groups (2) + This aci will allow search and modify + + :id: 4ef6348e-7840-11e8-a70c-8c16451d917b + :setup: server + :steps: + 1. Add test entry + 2. Take a count of users using DN_DM + 3. Add test user + 4. add aci + 5. test should fullfil the aci rules + :expectedresults: + 1. Entry should be added + 2. Operation should succeed + 3. Operation should succeed + 4. Operation should succeed + 5. Operation should succeed + """ + Domain(topo.standalone, DEFAULT_SUFFIX).add("aci",'(targetattr="*")(version 3.0; acl "ACLGroup"; allow (all) groupdn = "ldap:///{}" ;)'.format(GROUPE_GLOBAL)) + conn = UserAccount(topo.standalone, DEEPUSER_GLOBAL).bind(PW_DM) + # GROUPE_GLOBAL have all access + user = UserAccount(conn, DEEPGROUPSCRATCHENTRY_GLOBAL) + user.add("sn", "Fred") + user.remove("sn", "Fred") + + +def test_deeply_nested_groups_aci_allow(topo, add_test_user, aci_of_user): + """ + Test deeply nested groups (3) + This aci will allow search and modify + + :id: 8d338210-7840-11e8-8584-8c16451d917b + :setup: server + :steps: + 1. Add test entry + 2. Take a count of users using DN_DM + 3. Add test user + 4. add aci + 5. test should fullfil the aci rules + :expectedresults: + 1. Entry should be added + 2. Operation should succeed + 3. Operation should succeed + 4. Operation should succeed + 5. Operation should succeed + """ + Domain(topo.standalone, DEFAULT_SUFFIX).add("aci", ['(targetattr="*")(version 3.0; acl "ACLGroup"; allow (all) groupdn = "ldap:///{}" ;)'.format(ALLGROUPS_GLOBAL), '(targetattr="*")(version 3.0; acl "ACLGroup"; allow (all) groupdn = "ldap:///{}" ;)'.format(GROUPE_GLOBAL)]) + conn = UserAccount(topo.standalone, DEEPUSER_GLOBAL).bind(PW_DM) + # test deeply nested groups + user = UserAccount(conn, DEEPGROUPSCRATCHENTRY_GLOBAL) + user.add("sn", "Fred") + user.remove("sn", "Fred") + + +def test_deeply_nested_groups_aci_allow_two(topo, add_test_user, aci_of_user): + """ + This aci will not allow search or modify to a user too deep to be detected. + + :id: 8d3459c4-7840-11e8-8ed8-8c16451d917b + :setup: server + :steps: + 1. Add test entry + 2. Take a count of users using DN_DM + 3. Add test user + 4. add aci + 5. test should fullfil the aci rules + :expectedresults: + 1. Entry should be added + 2. Operation should succeed + 3. Operation should succeed + 4. Operation should succeed + 5. Operation should succeed + """ + Domain(topo.standalone, DEFAULT_SUFFIX).add("aci",'(targetattr="*")(version 3.0; acl "ACLGroup"; allow (all) groupdn = "ldap:///{}" ;)'.format(ALLGROUPS_GLOBAL)) + conn = UserAccount(topo.standalone, DEEPUSER_GLOBAL).bind(PW_DM) + # This aci should not allow search or modify to a user too deep to be detected. + user = UserAccount(conn, DEEPGROUPSCRATCHENTRY_GLOBAL) + with pytest.raises(ldap.INSUFFICIENT_ACCESS): + user.add("sn", "Fred") + assert user.get_attr_val_utf8('uid') == 'scratchEntry' + + +def test_undefined_in_group_eval(topo, add_test_user, aci_of_user): + """ + + This aci will not allow access . + + :id: f1605e16-7840-11e8-b954-8c16451d917b + :setup: server + :steps: + 1. Add test entry + 2. Take a count of users using DN_DM + 3. Add test user + 4. add aci + 5. test should fullfil the aci rules + :expectedresults: + 1. Entry should be added + 2. Operation should succeed + 3. Operation should succeed + 4. Operation should succeed + 5. Operation should succeed + """ + Domain(topo.standalone, DEFAULT_SUFFIX).add("aci",'(targetattr="*")(version 3.0; acl "ACLGroup"; allow (all) groupdn != "ldap:///{}" ;)'.format(ALLGROUPS_GLOBAL)) + conn = UserAccount(topo.standalone, DEEPUSER_GLOBAL).bind(PW_DM) + # This aci should NOT allow access + user = UserAccount(conn, DEEPGROUPSCRATCHENTRY_GLOBAL) + with pytest.raises(ldap.INSUFFICIENT_ACCESS): + user.add("sn", "Fred") + assert user.get_attr_val_utf8('uid') == 'scratchEntry' + + +def test_undefined_in_group_eval_two(topo, add_test_user, aci_of_user): + """ + This aci will allow access + + :id: fcfbcce2-7840-11e8-ba77-8c16451d917b + :setup: server + :steps: + 1. Add test entry + 2. Take a count of users using DN_DM + 3. Add test user + 4. add aci + 5. test should fullfil the aci rules + :expectedresults: + 1. Entry should be added + 2. Operation should succeed + 3. Operation should succeed + 4. Operation should succeed + 5. Operation should succeed + """ + Domain(topo.standalone, DEFAULT_SUFFIX).add("aci",'(targetattr="*")(version 3.0; aci "tester"; allow(all) groupdn = "ldap:///{}\ || ldap:///{}";)'.format(ALLGROUPS_GLOBAL, GROUPG_GLOBAL)) + conn = UserAccount(topo.standalone, DEEPUSER_GLOBAL).bind(PW_DM) + user = UserAccount(conn, DEEPGROUPSCRATCHENTRY_GLOBAL) + # This aci should allow access + user.add("sn", "Fred") + assert UserAccount(conn, DEEPGROUPSCRATCHENTRY_GLOBAL).get_attr_val_utf8('uid') == 'scratchEntry' + user.remove("sn", "Fred") + + +def test_undefined_in_group_eval_three(topo, add_test_user, aci_of_user): + """ + This aci will allow access + + :id: 04943dcc-7841-11e8-8c46-8c16451d917b + :setup: server + :steps: + 1. Add test entry + 2. Take a count of users using DN_DM + 3. Add test user + 4. add aci + 5. test should fullfil the aci rules + :expectedresults: + 1. Entry should be added + 2. Operation should succeed + 3. Operation should succeed + 4. Operation should succeed + 5. Operation should succeed + """ + Domain(topo.standalone, DEFAULT_SUFFIX).add("aci",'(targetattr="*")(version 3.0; aci "tester"; allow(all) groupdn = "ldap:///{}\ || ldap:///{}";)'.format(GROUPG_GLOBAL, ALLGROUPS_GLOBAL)) + conn = UserAccount(topo.standalone, DEEPUSER_GLOBAL).bind(PW_DM) + user = Domain(conn, DEEPGROUPSCRATCHENTRY_GLOBAL) + # test UNDEFINED in group + user.add("sn", "Fred") + assert UserAccount(conn, DEEPGROUPSCRATCHENTRY_GLOBAL).get_attr_val_utf8('uid') == 'scratchEntry' + user.remove("sn", "Fred") + + +def test_undefined_in_group_eval_four(topo, add_test_user, aci_of_user): + """ + This aci will not allow access + + :id: 0b03d10e-7841-11e8-9341-8c16451d917b + :setup: server + :steps: + 1. Add test entry + 2. Take a count of users using DN_DM + 3. Add test user + 4. add aci + 5. test should fullfil the aci rules + :expectedresults: + 1. Entry should be added + 2. Operation should succeed + 3. Operation should succeed + 4. Operation should succeed + 5. Operation should succeed + """ + Domain(topo.standalone, DEFAULT_SUFFIX).add("aci",'(targetattr="*")(version 3.0; aci "tester"; allow(all) groupdn != "ldap:///{}\ || ldap:///{}";)'.format(ALLGROUPS_GLOBAL, GROUPG_GLOBAL)) + conn = UserAccount(topo.standalone, DEEPUSER1_GLOBAL).bind(PW_DM) + # test UNDEFINED in group + user = UserAccount(conn, DEEPGROUPSCRATCHENTRY_GLOBAL) + with pytest.raises(ldap.INSUFFICIENT_ACCESS): + user.add("sn", "Fred") + assert user.get_attr_val_utf8('uid') == 'scratchEntry' + + +if __name__ == "__main__": + CURRENT_FILE = os.path.realpath(__file__) + pytest.main("-s -v %s" % CURRENT_FILE) diff --git a/dirsrvtests/tests/suites/acl/keywords_part2_test.py b/dirsrvtests/tests/suites/acl/keywords_part2_test.py new file mode 100644 index 0000000..ed838a3 --- /dev/null +++ b/dirsrvtests/tests/suites/acl/keywords_part2_test.py @@ -0,0 +1,425 @@ +# --- BEGIN COPYRIGHT BLOCK --- +# Copyright (C) 2020 Red Hat, Inc. +# All rights reserved. +# +# License: GPL (version 3 or any later version). +# See LICENSE for details. +# --- END COPYRIGHT BLOCK ---- + + +""" +This test script will test wrong/correct key value with ACIs. +""" +import ldap +import os +import pytest +import socket +import time +from datetime import datetime +from lib389._constants import DEFAULT_SUFFIX, PW_DM +from lib389.idm.domain import Domain +from lib389.idm.organizationalunit import OrganizationalUnit +from lib389.idm.user import UserAccount +from lib389.utils import * + +pytestmark = pytest.mark.tier1 + +KEYWORDS_OU_KEY = "ou=Keywords,{}".format(DEFAULT_SUFFIX) +DAYOFWEEK_OU_KEY = "ou=Dayofweek,{}".format(KEYWORDS_OU_KEY) +IP_OU_KEY = "ou=IP,{}".format(KEYWORDS_OU_KEY) +TIMEOFDAY_OU_KEY = "ou=Timeofday,{}".format(KEYWORDS_OU_KEY) +EVERYDAY_KEY = "uid=EVERYDAY_KEY,{}".format(DAYOFWEEK_OU_KEY) +TODAY_KEY = "uid=TODAY_KEY,{}".format(DAYOFWEEK_OU_KEY) +NODAY_KEY = "uid=NODAY_KEY,{}".format(DAYOFWEEK_OU_KEY) +FULLIP_KEY = "uid=FULLIP_KEY,{}".format(IP_OU_KEY) +NETSCAPEIP_KEY = "uid=NETSCAPEIP_KEY,{}".format(IP_OU_KEY) +NOIP_KEY = "uid=NOIP_KEY,{}".format(IP_OU_KEY) +FULLWORKER_KEY = "uid=FULLWORKER_KEY,{}".format(TIMEOFDAY_OU_KEY) +DAYWORKER_KEY = "uid=DAYWORKER_KEY,{}".format(TIMEOFDAY_OU_KEY) +NIGHTWORKER_KEY = "uid=NIGHTWORKER_KEY,{}".format(TIMEOFDAY_OU_KEY) +NOWORKER_KEY = "uid=NOWORKER_KEY,{}".format(TIMEOFDAY_OU_KEY) + + +def test_access_from_certain_network_only_ip(topo, add_user, aci_of_user, request): + """ + User can access the data when connecting from certain network only as per the ACI. + + :id: 4ec38296-7ac5-11e8-9816-8c16451d917b + :customerscenario: True + :setup: Standalone Server + :steps: + 1. Add test entry + 2. Add ACI + 3. User should follow ACI role + :expectedresults: + 1. Entry should be added + 2. Operation should succeed + 3. Operation should succeed + """ + # Turn access log buffering off to make less time consuming + topo.standalone.config.set('nsslapd-accesslog-logbuffering', 'off') + + # Find the ip from ds logs , as we need to know the exact ip used by ds to run the instances. + # Wait till Access Log is generated + topo.standalone.restart() + + old_hostname = socket.gethostname() + socket.sethostname('localhost') + hostname = socket.gethostname() + IP = socket.gethostbyname(hostname) + + # Add ACI + domain = Domain(topo.standalone, DEFAULT_SUFFIX) + domain.add("aci", f'(target = "ldap:///{IP_OU_KEY}")(targetattr=\"*\")(version 3.0; aci "IP aci"; ' + f'allow(all)userdn = "ldap:///{NETSCAPEIP_KEY}" and (ip = "127.0.0.1" or ip = "::1" or ip = "{IP}") ;)') + + # create a new connection for the test + new_uri = topo.standalone.ldapuri.replace(old_hostname, hostname) + topo.standalone.ldapuri = new_uri + conn = UserAccount(topo.standalone, NETSCAPEIP_KEY).bind(PW_DM) + + # Perform Operation + topo.standalone.config.set('nsslapd-errorlog-level', '128') + org = OrganizationalUnit(conn, IP_OU_KEY) + topo.standalone.host = hostname + org.replace("seeAlso", "cn=1") + + # remove the aci + domain.ensure_removed("aci", f'(target = "ldap:///{IP_OU_KEY}")(targetattr=\"*\")(version 3.0; aci ' + f'"IP aci"; allow(all)userdn = "ldap:///{NETSCAPEIP_KEY}" and ' + f'(ip = "127.0.0.1" or ip = "::1" or ip = "{IP}") ;)') + # Now add aci with new ip + domain.add("aci", f'(target = "ldap:///{IP_OU_KEY}")(targetattr="*")(version 3.0; aci "IP aci"; ' + f'allow(all)userdn = "ldap:///{NETSCAPEIP_KEY}" and ip = "100.1.1.1" ;)') + + # After changing the ip user cant access data + with pytest.raises(ldap.INSUFFICIENT_ACCESS): + org.replace("seeAlso", "cn=1") + + def fin(): + log.info('Setting the hostname back to orginal') + socket.sethostname(old_hostname) + + request.addfinalizer(fin) + + +def test_connection_from_an_unauthorized_network(topo, add_user, aci_of_user, request): + """ + User cannot access the data when connectin from an unauthorized network as per the ACI. + + :id: 52d1ecce-7ac5-11e8-9ad9-8c16451d917b + :customerscenario: True + :setup: Standalone Server + :steps: + 1. Add test entry + 2. Add ACI + 3. User should follow ACI role + :expectedresults: + 1. Entry should be added + 2. Operation should succeed + 3. Operation should succeed + """ + old_hostname = socket.gethostname() + socket.sethostname('localhost') + hostname = socket.gethostname() + + # Add ACI + domain = Domain(topo.standalone, DEFAULT_SUFFIX) + domain.add("aci", f'(target = "ldap:///{IP_OU_KEY}")' + f'(targetattr="*")(version 3.0; aci "IP aci"; ' + f'allow(all) userdn = "ldap:///{NETSCAPEIP_KEY}" ' + f'and (ip != "127.0.0.1" and ip != "::1") ;)') + + # create a new connection for the test + new_uri = topo.standalone.ldapuri.replace(old_hostname, hostname) + topo.standalone.ldapuri = new_uri + conn = UserAccount(topo.standalone, NETSCAPEIP_KEY).bind(PW_DM) + + # Perform Operation + topo.standalone.config.set('nsslapd-errorlog-level', '128') + org = OrganizationalUnit(conn, IP_OU_KEY) + with pytest.raises(ldap.INSUFFICIENT_ACCESS): + org.replace("seeAlso", "cn=1") + + # Remove the ACI + domain.ensure_removed('aci', domain.get_attr_vals('aci')[-1]) + # Add new ACI + domain.add('aci', f'(target = "ldap:///{IP_OU_KEY}")(targetattr="*")' + f'(version 3.0; aci "IP aci"; allow(all) ' + f'userdn = "ldap:///{NETSCAPEIP_KEY}" and (ip = "127.0.0.1" or ip = "::1") ;)') + time.sleep(1) + + # now user can access data + org.replace("seeAlso", "cn=1") + + def fin(): + log.info('Setting the hostname back to orginal') + socket.sethostname(old_hostname) + + request.addfinalizer(fin) + + +def test_ip_keyword_test_noip_cannot(topo, add_user, aci_of_user): + """ + User NoIP cannot assess the data as per the ACI. + + :id: 570bc7f6-7ac5-11e8-88c1-8c16451d917b + :customerscenario: True + :setup: Standalone Server + :steps: + 1. Add test entry + 2. Add ACI + 3. User should follow ACI role + :expectedresults: + 1. Entry should be added + 2. Operation should succeed + 3. Operation should succeed + """ + # Add ACI + Domain(topo.standalone, + DEFAULT_SUFFIX).add("aci", f'(target ="ldap:///{IP_OU_KEY}")' + f'(targetattr="*")(version 3.0; aci "IP aci"; allow(all) ' + f'userdn = "ldap:///{FULLIP_KEY}" and ip = "*" ;)') + + # Create a new connection for this test. + conn = UserAccount(topo.standalone, NOIP_KEY).bind(PW_DM) + # Perform Operation + org = OrganizationalUnit(conn, IP_OU_KEY) + with pytest.raises(ldap.INSUFFICIENT_ACCESS): + org.replace("seeAlso", "cn=1") + + +def test_user_can_access_the_data_at_any_time(topo, add_user, aci_of_user): + """ + User can access the data at any time as per the ACI. + + :id: 5b4da91a-7ac5-11e8-bbda-8c16451d917b + :customerscenario: True + :setup: Standalone Server + :steps: + 1. Add test entry + 2. Add ACI + 3. User should follow ACI role + :expectedresults: + 1. Entry should be added + 2. Operation should succeed + 3. Operation should succeed + """ + # Add ACI + Domain(topo.standalone, + DEFAULT_SUFFIX).add("aci", f'(target = "ldap:///{TIMEOFDAY_OU_KEY}")' + f'(targetattr="*")(version 3.0; aci "Timeofday aci"; ' + f'allow(all) userdn ="ldap:///{FULLWORKER_KEY}" and ' + f'(timeofday >= "0000" and timeofday <= "2359") ;)') + + # Create a new connection for this test. + conn = UserAccount(topo.standalone, FULLWORKER_KEY).bind(PW_DM) + # Perform Operation + org = OrganizationalUnit(conn, TIMEOFDAY_OU_KEY) + org.replace("seeAlso", "cn=1") + + +def test_user_can_access_the_data_only_in_the_morning(topo, add_user, aci_of_user): + """ + User can access the data only in the morning as per the ACI. + + :id: 5f7d380c-7ac5-11e8-8124-8c16451d917b + :customerscenario: True + :setup: Standalone Server + :steps: + 1. Add test entry + 2. Add ACI + 3. User should follow ACI role + :expectedresults: + 1. Entry should be added + 2. Operation should succeed + 3. Operation should succeed + """ + # Add ACI + Domain(topo.standalone, + DEFAULT_SUFFIX).add("aci", f'(target = "ldap:///{TIMEOFDAY_OU_KEY}")' + f'(targetattr="*")(version 3.0; aci "Timeofday aci"; ' + f'allow(all) userdn = "ldap:///{DAYWORKER_KEY}" ' + f'and timeofday < "1200" ;)') + + # Create a new connection for this test. + conn = UserAccount(topo.standalone, DAYWORKER_KEY).bind(PW_DM) + # Perform Operation + org = OrganizationalUnit(conn, TIMEOFDAY_OU_KEY) + if datetime.now().hour >= 12: + with pytest.raises(ldap.INSUFFICIENT_ACCESS): + org.replace("seeAlso", "cn=1") + else: + org.replace("seeAlso", "cn=1") + + +def test_user_can_access_the_data_only_in_the_afternoon(topo, add_user, aci_of_user): + """ + User can access the data only in the afternoon as per the ACI. + + :id: 63eb5b1c-7ac5-11e8-bd46-8c16451d917b + :customerscenario: True + :setup: Standalone Server + :steps: + 1. Add test entry + 2. Add ACI + 3. User should follow ACI role + :expectedresults: + 1. Entry should be added + 2. Operation should succeed + 3. Operation should succeed + """ + # Add ACI + Domain(topo.standalone, + DEFAULT_SUFFIX).add("aci", f'(target = "ldap:///{TIMEOFDAY_OU_KEY}")' + f'(targetattr="*")(version 3.0; aci "Timeofday aci"; ' + f'allow(all) userdn = "ldap:///{NIGHTWORKER_KEY}" ' + f'and timeofday > \'1200\' ;)') + + # create a new connection for the test + conn = UserAccount(topo.standalone, NIGHTWORKER_KEY).bind(PW_DM) + # Perform Operation + org = OrganizationalUnit(conn, TIMEOFDAY_OU_KEY) + if datetime.now().hour < 12: + with pytest.raises(ldap.INSUFFICIENT_ACCESS): + org.replace("seeAlso", "cn=1") + else: + org.replace("seeAlso", "cn=1") + + +def test_timeofday_keyword(topo, add_user, aci_of_user): + """ + User NOWORKER_KEY can access the data as per the ACI after removing + ACI it cant. + + :id: 681dd58e-7ac5-11e8-bed1-8c16451d917b + :customerscenario: True + :setup: Standalone Server + :steps: + 1. Add test entry + 2. Add ACI + 3. User should follow ACI role + :expectedresults: + 1. Entry should be added + 2. Operation should succeed + 3. Operation should succeed + """ + now = time.strftime("%c") + now_1 = "".join(now.split()[3].split(":"))[:4] + # Add ACI + domain = Domain(topo.standalone, DEFAULT_SUFFIX) + domain.add("aci", f'(target = "ldap:///{TIMEOFDAY_OU_KEY}")' + f'(targetattr="*")(version 3.0; aci "Timeofday aci"; ' + f'allow(all) userdn = "ldap:///{NOWORKER_KEY}" ' + f'and timeofday = \'{now_1}\' ;)') + + # Create a new connection for this test. + conn = UserAccount(topo.standalone, NOWORKER_KEY).bind(PW_DM) + # Perform Operation + org = OrganizationalUnit(conn, TIMEOFDAY_OU_KEY) + org.replace("seeAlso", "cn=1") + # Remove ACI + aci = domain.get_attr_vals_utf8('aci')[-1] + domain.ensure_removed('aci', aci) + assert aci not in domain.get_attr_vals_utf8('aci') + # after removing the ACI user cannot access the data + time.sleep(1) + with pytest.raises(ldap.INSUFFICIENT_ACCESS): + org.replace("seeAlso", "cn=1") + + +def test_dayofweek_keyword_test_everyday_can_access(topo, add_user, aci_of_user): + """ + User can access the data EVERYDAY_KEY as per the ACI. + + :id: 6c5922ca-7ac5-11e8-8f01-8c16451d917b + :customerscenario: True + :setup: Standalone Server + :steps: + 1. Add test entry + 2. Add ACI + 3. User should follow ACI role + :expectedresults: + 1. Entry should be added + 2. Operation should succeed + 3. Operation should succeed + """ + # Add ACI + Domain(topo.standalone, + DEFAULT_SUFFIX).add("aci", f'(target = "ldap:///{DAYOFWEEK_OU_KEY}")' + f'(targetattr="*")(version 3.0; aci "Dayofweek aci"; ' + f'allow(all) userdn = "ldap:///{EVERYDAY_KEY}" and ' + f'dayofweek = "Sun, Mon, Tue, Wed, Thu, Fri, Sat" ;)') + + # Create a new connection for this test. + conn = UserAccount(topo.standalone, EVERYDAY_KEY).bind(PW_DM) + # Perform Operation + org = OrganizationalUnit(conn, DAYOFWEEK_OU_KEY) + org.replace("seeAlso", "cn=1") + + +def test_dayofweek_keyword_today_can_access(topo, add_user, aci_of_user): + """ + User can access the data one day per week as per the ACI. + + :id: 7131dc88-7ac5-11e8-acc2-8c16451d917b + :customerscenario: True + :setup: Standalone Server + :steps: + 1. Add test entry + 2. Add ACI + 3. User should follow ACI role + :expectedresults: + 1. Entry should be added + 2. Operation should succeed + 3. Operation should succeed + """ + today_1 = time.strftime("%c").split()[0] + # Add ACI + Domain(topo.standalone, + DEFAULT_SUFFIX).add("aci", f'(target = "ldap:///{DAYOFWEEK_OU_KEY}")' + f'(targetattr="*")(version 3.0; aci "Dayofweek aci"; ' + f'allow(all) userdn = "ldap:///{TODAY_KEY}" ' + f'and dayofweek = \'{today_1}\' ;)') + + # Create a new connection for this test. + conn = UserAccount(topo.standalone, TODAY_KEY).bind(PW_DM) + # Perform Operation + org = OrganizationalUnit(conn, DAYOFWEEK_OU_KEY) + org.replace("seeAlso", "cn=1") + + +def test_user_cannot_access_the_data_at_all(topo, add_user, aci_of_user): + """ + User cannot access the data at all as per the ACI. + + :id: 75cdac5e-7ac5-11e8-968a-8c16451d917b + :customerscenario: True + :setup: Standalone Server + :steps: + 1. Add test entry + 2. Add ACI + 3. User should follow ACI role + :expectedresults: + 1. Entry should be added + 2. Operation should succeed + 3. Operation should succeed + """ + # Add ACI + Domain(topo.standalone, + DEFAULT_SUFFIX).add("aci", f'(target = "ldap:///{DAYOFWEEK_OU_KEY}")' + f'(targetattr="*")(version 3.0; aci "Dayofweek aci"; ' + f'allow(all) userdn = "ldap:///{TODAY_KEY}" ' + f'and dayofweek = "$NEW_DATE" ;)') + + # Create a new connection for this test. + conn = UserAccount(topo.standalone, NODAY_KEY).bind(PW_DM) + # Perform Operation + org = OrganizationalUnit(conn, DAYOFWEEK_OU_KEY) + with pytest.raises(ldap.INSUFFICIENT_ACCESS): + org.replace("seeAlso", "cn=1") + + +if __name__ == "__main__": + CURRENT_FILE = os.path.realpath(__file__) + pytest.main("-s -v %s" % CURRENT_FILE) diff --git a/dirsrvtests/tests/suites/acl/keywords_test.py b/dirsrvtests/tests/suites/acl/keywords_test.py new file mode 100644 index 0000000..84fc486 --- /dev/null +++ b/dirsrvtests/tests/suites/acl/keywords_test.py @@ -0,0 +1,475 @@ +# --- BEGIN COPYRIGHT BLOCK --- +# Copyright (C) 2019 Red Hat, Inc. +# All rights reserved. +# +# License: GPL (version 3 or any later version). +# See LICENSE for details. +# --- END COPYRIGHT BLOCK ---- + +""" +This test script will test wrong/correct key value with ACIs. +""" + +import os +import socket +import pytest + +from lib389.idm.account import Anonymous +from lib389._constants import DEFAULT_SUFFIX, PW_DM +from lib389.idm.domain import Domain +from lib389.idm.organizationalunit import OrganizationalUnit +from lib389.idm.user import UserAccount + +import ldap + +pytestmark = pytest.mark.tier1 + +KEYWORDS_OU_KEY = "ou=Keywords,{}".format(DEFAULT_SUFFIX) +DNS_OU_KEY = "ou=DNS,{}".format(KEYWORDS_OU_KEY) +IP_OU_KEY = "ou=IP,{}".format(KEYWORDS_OU_KEY) +FULLIP_KEY = "uid=FULLIP_KEY,{}".format(IP_OU_KEY) +AUTHMETHOD_OU_KEY = "ou=Authmethod,{}".format(KEYWORDS_OU_KEY) +SIMPLE_1_KEY = "uid=SIMPLE_1_KEY,{}".format(AUTHMETHOD_OU_KEY) +FULLDNS_KEY = "uid=FULLDNS_KEY,{}".format(DNS_OU_KEY) +SUNDNS_KEY = "uid=SUNDNS_KEY,{}".format(DNS_OU_KEY) +NODNS_KEY = "uid=NODNS_KEY,{}".format(DNS_OU_KEY) +NETSCAPEDNS_KEY = "uid=NETSCAPEDNS_KEY,{}".format(DNS_OU_KEY) +NONE_1_KEY = "uid=NONE_1_KEY,{}".format(AUTHMETHOD_OU_KEY) +NONE_2_KEY = "uid=NONE_2_KEY,{}".format(AUTHMETHOD_OU_KEY) + + +NONE_ACI_KEY = f'(target = "ldap:///{AUTHMETHOD_OU_KEY}")' \ + f'(targetattr="*")(version 3.0; aci "Authmethod aci"; ' \ + f'allow(all) userdn = "ldap:///{NONE_1_KEY}" and authmethod = "none" ;)' + +SIMPLE_ACI_KEY = f'(target = "ldap:///{AUTHMETHOD_OU_KEY}")' \ + f'(targetattr="*")(version 3.0; aci "Authmethod aci"; ' \ + f'allow(all) userdn = "ldap:///{SIMPLE_1_KEY}" and authmethod = "simple" ;)' + + +def _add_aci(topo, name): + """ + This function will add ACI to DEFAULT_SUFFIX + """ + Domain(topo.standalone, DEFAULT_SUFFIX).add("aci", name) + + +def test_user_binds_with_a_password_and_can_access_the_data(topo, add_user, aci_of_user): + """User binds with a password and can access the data as per the ACI. + + :id: f6c4b6f0-7ac4-11e8-a517-8c16451d917b + :customerscenario: True + :setup: Standalone Server + :steps: + 1. Add test entry + 2. Add ACI + 3. User should follow ACI role + :expectedresults: + 1. Entry should be added + 2. Operation should succeed + 3. Operation should succeed + """ + # Add ACI + _add_aci(topo, NONE_ACI_KEY) + # Create a new connection for this test. + conn = UserAccount(topo.standalone, NONE_1_KEY).bind(PW_DM) + # Perform Operation + OrganizationalUnit(conn, AUTHMETHOD_OU_KEY).replace("seeAlso", "cn=1") + + +def test_user_binds_with_a_bad_password_and_cannot_access_the_data(topo, add_user, aci_of_user): + """User binds with a BAD password and cannot access the data . + + :id: 0397744e-7ac5-11e8-bfb1-8c16451d917b + :customerscenario: True + :setup: Standalone Server + :steps: + 1. Add test entry + 2. Add ACI + 3. User should follow ACI role + :expectedresults: + 1. Entry should be added + 2. Operation should succeed + 3. Operation should succeed + """ + # User binds with a bad password and cannot access the data + with pytest.raises(ldap.UNWILLING_TO_PERFORM): + UserAccount(topo.standalone, NONE_1_KEY).bind("") + + +def test_anonymous_user_cannot_access_the_data(topo, add_user, aci_of_user): + """Anonymous user cannot access the data + + :id: 0821a55c-7ac5-11e8-b214-8c16451d917b + :customerscenario: True + :setup: Standalone Server + :steps: + 1. Add test entry + 2. Add ACI + 3. User should follow ACI role + :expectedresults: + 1. Entry should be added + 2. Operation should succeed + 3. Operation should succeed + """ + # Add ACI + _add_aci(topo, NONE_ACI_KEY) + + # Create a new connection for this test. + conn = Anonymous(topo.standalone).bind() + # Perform Operation + org = OrganizationalUnit(conn, AUTHMETHOD_OU_KEY) + with pytest.raises(ldap.INSUFFICIENT_ACCESS): + org.replace("seeAlso", "cn=1") + + +def test_authenticated_but_has_no_rigth_on_the_data(topo, add_user, aci_of_user): + """User has a password. He is authenticated but has no rigth on the data. + + :id: 11be7ebe-7ac5-11e8-b754-8c16451d917b + :customerscenario: True + :setup: Standalone Server + :steps: + 1. Add test entry + 2. Add ACI + 3. User should follow ACI role + :expectedresults: + 1. Entry should be added + 2. Operation should succeed + 3. Operation should succeed + """ + # Add ACI + _add_aci(topo, NONE_ACI_KEY) + + # Create a new connection for this test. + conn = UserAccount(topo.standalone, SIMPLE_1_KEY).bind(PW_DM) + # Perform Operation + org = OrganizationalUnit(conn, AUTHMETHOD_OU_KEY) + with pytest.raises(ldap.INSUFFICIENT_ACCESS): + org.replace("seeAlso", "cn=1") + + +def test_the_bind_client_is_accessing_the_directory(topo, add_user, aci_of_user): + """The bind rule is evaluated to be true if the client is accessing the directory as per the ACI. + + :id: 1715bfb2-7ac5-11e8-8f2c-8c16451d917b + :customerscenario: True + :setup: Standalone Server + :steps: + 1. Add test entry + 2. Add ACI + 3. User should follow ACI role + :expectedresults: + 1. Entry should be added + 2. Operation should succeed + 3. Operation should succeed + """ + # Add ACI + _add_aci(topo, SIMPLE_ACI_KEY) + + # Create a new connection for this test. + conn = UserAccount(topo.standalone, SIMPLE_1_KEY).bind(PW_DM) + # Perform Operation + OrganizationalUnit(conn, AUTHMETHOD_OU_KEY).replace("seeAlso", "cn=1") + + +def test_users_binds_with_a_password_and_can_access_the_data( + topo, add_user, aci_of_user): + """User binds with a password and can access the data as per the ACI. + + :id: 1bd01cb4-7ac5-11e8-a2f1-8c16451d917b + :customerscenario: True + :setup: Standalone Server + :steps: + 1. Add test entry + 2. Add ACI + 3. User should follow ACI role + :expectedresults: + 1. Entry should be added + 2. Operation should succeed + 3. Operation should succeed + """ + # Add ACI + _add_aci(topo, SIMPLE_ACI_KEY) + + # Create a new connection for this test. + conn = UserAccount(topo.standalone, SIMPLE_1_KEY).bind(PW_DM) + # Perform Operation + OrganizationalUnit(conn, AUTHMETHOD_OU_KEY).replace("seeAlso", "cn=1") + + +def test_user_binds_without_any_password_and_cannot_access_the_data(topo, add_user, aci_of_user): + """User binds without any password and cannot access the data + + :id: 205777fa-7ac5-11e8-ba2f-8c16451d917b + :customerscenario: True + :setup: Standalone Server + :steps: + 1. Add test entry + 2. Add ACI + 3. User should follow ACI role + :expectedresults: + 1. Entry should be added + 2. Operation should succeed + 3. Operation should succeed + """ + # Add ACI + _add_aci(topo, SIMPLE_ACI_KEY) + + # Create a new connection for this test. + conn = Anonymous(topo.standalone).bind() + # Perform Operation + org = OrganizationalUnit(conn, AUTHMETHOD_OU_KEY) + with pytest.raises(ldap.INSUFFICIENT_ACCESS): + org.replace("seeAlso", "cn=1") + +#unstable or unstatus tests, skipped for now +@pytest.mark.flaky(max_runs=2, min_passes=1) +def test_user_can_access_the_data_when_connecting_from_any_machine( + topo, add_user, aci_of_user +): + """User can access the data when connecting from any machine as per the ACI. + + :id: 28cbc008-7ac5-11e8-934e-8c16451d917b + :customerscenario: True + :setup: Standalone Server + :steps: + 1. Add test entry + 2. Add ACI + 3. User should follow ACI role + :expectedresults: + 1. Entry should be added + 2. Operation should succeed + 3. Operation should succeed + """ + # Add ACI + Domain(topo.standalone, DEFAULT_SUFFIX)\ + .add("aci", f'(target ="ldap:///{DNS_OU_KEY}")' + f'(targetattr="*")(version 3.0; aci "DNS aci"; allow(all) ' + f'userdn = "ldap:///{FULLDNS_KEY}" and dns = "*" ;)') + + # Create a new connection for this test. + conn = UserAccount(topo.standalone, FULLDNS_KEY).bind(PW_DM) + # Perform Operation + OrganizationalUnit(conn, DNS_OU_KEY).replace("seeAlso", "cn=1") + + +#unstable or unstatus tests, skipped for now +@pytest.mark.flaky(max_runs=2, min_passes=1) +def test_user_can_access_the_data_when_connecting_from_internal_ds_network_only( + topo, add_user, aci_of_user +): + """User can access the data when connecting from internal ICNC network only as per the ACI. + + :id: 2cac2136-7ac5-11e8-8328-8c16451d917b + :customerscenario: True + :setup: Standalone Server + :steps: + 1. Add test entry + 2. Add ACI + 3. User should follow ACI role + :expectedresults: + 1. Entry should be added + 2. Operation should succeed + 3. Operation should succeed + """ + dns_name = socket.getfqdn() + # Add ACI + Domain(topo.standalone, DEFAULT_SUFFIX).\ + add("aci", [f'(target = "ldap:///{DNS_OU_KEY}")(targetattr="*")' + f'(version 3.0; aci "DNS aci"; allow(all) ' + f'userdn = "ldap:///{SUNDNS_KEY}" and ' + f'(dns = "*redhat.com" or dns = "{dns_name}");)']) + + # Create a new connection for this test. + conn = UserAccount(topo.standalone, SUNDNS_KEY).bind(PW_DM) + # Perform Operation + OrganizationalUnit(conn, DNS_OU_KEY).replace("seeAlso", "cn=1") + +#unstable or unstatus tests, skipped for now +@pytest.mark.flaky(max_runs=2, min_passes=1) +def test_user_can_access_the_data_when_connecting_from_some_network_only( + topo, add_user, aci_of_user +): + """User can access the data when connecting from some network only as per the ACI. + + :id: 3098512a-7ac5-11e8-af85-8c16451d917b + :customerscenario: True + :setup: Standalone Server + :steps: + 1. Add test entry + 2. Add ACI + 3. User should follow ACI role + :expectedresults: + 1. Entry should be added + 2. Operation should succeed + 3. Operation should succeed + """ + dns_name = socket.getfqdn() + # Add ACI + Domain(topo.standalone, DEFAULT_SUFFIX)\ + .add("aci", f'(target = "ldap:///{DNS_OU_KEY}")' + f'(targetattr="*")(version 3.0; aci "DNS aci"; allow(all) ' + f'userdn = "ldap:///{NETSCAPEDNS_KEY}" ' + f'and dns = "{dns_name}" ;)') + + # Create a new connection for this test. + conn = UserAccount(topo.standalone, NETSCAPEDNS_KEY).bind(PW_DM) + # Perform Operation + OrganizationalUnit(conn, DNS_OU_KEY).replace("seeAlso", "cn=1") + +#unstable or unstatus tests, skipped for now +@pytest.mark.flaky(max_runs=2, min_passes=1) +def test_from_an_unauthorized_network(topo, add_user, aci_of_user): + """User cannot access the data when connecting from an unauthorized network as per the ACI. + + :id: 34cf9726-7ac5-11e8-bc12-8c16451d917b + :customerscenario: True + :setup: Standalone Server + :steps: + 1. Add test entry + 2. Add ACI + 3. User should follow ACI role + :expectedresults: + 1. Entry should be added + 2. Operation should succeed + 3. Operation should succeed + """ + # Add ACI + Domain(topo.standalone, DEFAULT_SUFFIX).\ + add("aci", f'(target = "ldap:///{DNS_OU_KEY}")' + f'(targetattr="*")(version 3.0; aci "DNS aci"; allow(all) ' + f'userdn = "ldap:///{NETSCAPEDNS_KEY}" and dns != "red.iplanet.com" ;)') + + # Create a new connection for this test. + conn = UserAccount(topo.standalone, NETSCAPEDNS_KEY).bind(PW_DM) + # Perform Operation + OrganizationalUnit(conn, DNS_OU_KEY).replace("seeAlso", "cn=1") + +#unstable or unstatus tests, skipped for now +@pytest.mark.flaky(max_runs=2, min_passes=1) +def test_user_cannot_access_the_data_when_connecting_from_an_unauthorized_network_2( + topo, add_user, aci_of_user): + """User cannot access the data when connecting from an unauthorized network as per the ACI. + + :id: 396bdd44-7ac5-11e8-8014-8c16451d917b + :customerscenario: True + :setup: Standalone Server + :steps: + 1. Add test entry + 2. Add ACI + 3. User should follow ACI role + :expectedresults: + 1. Entry should be added + 2. Operation should succeed + 3. Operation should succeed + """ + # Add ACI + Domain(topo.standalone, DEFAULT_SUFFIX).\ + add("aci", f'(target = "ldap:///{DNS_OU_KEY}")' + f'(targetattr="*")(version 3.0; aci "DNS aci"; allow(all) ' + f'userdn = "ldap:///{NETSCAPEDNS_KEY}" ' + f'and dnsalias != "www.redhat.com" ;)') + + # Create a new connection for this test. + conn = UserAccount(topo.standalone, NETSCAPEDNS_KEY).bind(PW_DM) + # Perform Operation + OrganizationalUnit(conn, DNS_OU_KEY).replace("seeAlso", "cn=1") + + +def test_user_cannot_access_the_data_if_not_from_a_certain_domain(topo, add_user, aci_of_user): + """User cannot access the data if not from a certain domain as per the ACI. + + :id: 3d658972-7ac5-11e8-930f-8c16451d917b + :customerscenario: True + :setup: Standalone Server + :steps: + 1. Add test entry + 2. Add ACI + 3. User should follow ACI role + :expectedresults: + 1. Entry should be added + 2. Operation should succeed + 3. Operation should succeed + """ + # Add ACI + Domain(topo.standalone, DEFAULT_SUFFIX).\ + add("aci", f'(target = "ldap:///{DNS_OU_KEY}")(targetattr="*")' + f'(version 3.0; aci "DNS aci"; allow(all) ' + f'userdn = "ldap:///{NODNS_KEY}" ' + f'and dns = "RAP.rock.SALSA.house.COM" ;)') + + # Create a new connection for this test. + conn = UserAccount(topo.standalone, NODNS_KEY).bind(PW_DM) + # Perform Operation + org = OrganizationalUnit(conn, AUTHMETHOD_OU_KEY) + with pytest.raises(ldap.INSUFFICIENT_ACCESS): + org.replace("seeAlso", "cn=1") + + +def test_dnsalias_keyword_test_nodns_cannot(topo, add_user, aci_of_user): + """Dnsalias Keyword NODNS_KEY cannot assess data as per the ACI. + + :id: 41b467be-7ac5-11e8-89a3-8c16451d917b + :customerscenario: True + :setup: Standalone Server + :steps: + 1. Add test entry + 2. Add ACI + 3. User should follow ACI role + :expectedresults: + 1. Entry should be added + 2. Operation should succeed + 3. Operation should succeed + """ + # Add ACI + Domain(topo.standalone, DEFAULT_SUFFIX).\ + add("aci", f'(target = "ldap:///{DNS_OU_KEY}")(targetattr="*")' + f'(version 3.0; aci "DNS aci"; allow(all) ' + f'userdn = "ldap:///{NODNS_KEY}" and ' + f'dnsalias = "RAP.rock.SALSA.house.COM" ;)') + + # Create a new connection for this test. + conn = UserAccount(topo.standalone, NODNS_KEY).bind(PW_DM) + # Perform Operation + org = OrganizationalUnit(conn, DNS_OU_KEY) + with pytest.raises(ldap.INSUFFICIENT_ACCESS): + org.replace("seeAlso", "cn=1") + +#unstable or unstatus tests, skipped for now +@pytest.mark.flaky(max_runs=2, min_passes=1) +@pytest.mark.ds50378 +@pytest.mark.bz1710848 +@pytest.mark.parametrize("ip_addr", ['127.0.0.1', "[::1]"]) +def test_user_can_access_from_ipv4_or_ipv6_address(topo, add_user, aci_of_user, ip_addr): + """User can modify the data when accessing the server from the allowed IPv4 and IPv6 addresses + + :id: 461e761e-7ac5-11e8-9ae4-8c16451d917b + :customerscenario: True + :parametrized: yes + :setup: Standalone Server + :steps: + 1. Add ACI that has both IPv4 and IPv6 + 2. Connect from one of the IPs allowed in ACI + 3. Modify an attribute + :expectedresults: + 1. ACI should be added + 2. Conection should be successful + 3. Operation should be successful + """ + # Add ACI that contains both IPv4 and IPv6 + Domain(topo.standalone, DEFAULT_SUFFIX).\ + add("aci", f'(target ="ldap:///{IP_OU_KEY}")(targetattr="*") ' + f'(version 3.0; aci "IP aci"; allow(all) ' + f'userdn = "ldap:///{FULLIP_KEY}" and (ip = "127.0.0.1" or ip = "::1");)') + + # Create a new connection for this test. + conn = UserAccount(topo.standalone, FULLIP_KEY).bind(PW_DM, uri=f'ldap://{ip_addr}:{topo.standalone.port}') + + # Perform Operation + OrganizationalUnit(conn, IP_OU_KEY).replace("seeAlso", "cn=1") + + +if __name__ == "__main__": + CURRENT_FILE = os.path.realpath(__file__) + pytest.main("-s -v %s" % CURRENT_FILE) diff --git a/dirsrvtests/tests/suites/acl/misc_test.py b/dirsrvtests/tests/suites/acl/misc_test.py new file mode 100644 index 0000000..9a2b9a1 --- /dev/null +++ b/dirsrvtests/tests/suites/acl/misc_test.py @@ -0,0 +1,509 @@ +""" +# --- BEGIN COPYRIGHT BLOCK --- +# Copyright (C) 2020 RED Hat, Inc. +# All rights reserved. +# +# License: GPL (version 3 or any later version). +# See LICENSE for details. +# --- END COPYRIGHT BLOCK ---- +""" + +import ldap +import os +import pytest + +from lib389._constants import DEFAULT_SUFFIX, PW_DM, DN_DM +from lib389.idm.user import UserAccount, UserAccounts +from lib389._mapped_object import DSLdapObject +from lib389.idm.account import Accounts, Anonymous +from lib389.idm.organizationalunit import OrganizationalUnit, OrganizationalUnits +from lib389.idm.group import Group, Groups +from lib389.topologies import topology_st as topo +from lib389.idm.domain import Domain +from lib389.plugins import ACLPlugin + +pytestmark = pytest.mark.tier1 + +PEOPLE = "ou=PEOPLE,{}".format(DEFAULT_SUFFIX) +DYNGROUP = "cn=DYNGROUP,{}".format(PEOPLE) +CONTAINER_1_DELADD = "ou=Product Development,{}".format(DEFAULT_SUFFIX) +CONTAINER_2_DELADD = "ou=Accounting,{}".format(DEFAULT_SUFFIX) + + +@pytest.fixture(scope="function") +def aci_of_user(request, topo): + """ + :param request: + :param topo: + """ + + # Add anonymous access aci + ACI_TARGET = "(targetattr != \"userpassword\")(target = \"ldap:///%s\")" % (DEFAULT_SUFFIX) + ACI_ALLOW = "(version 3.0; acl \"Anonymous Read access\"; allow (read,search,compare)" + ACI_SUBJECT = "(userdn=\"ldap:///anyone\");)" + ANON_ACI = ACI_TARGET + ACI_ALLOW + ACI_SUBJECT + suffix = Domain(topo.standalone, DEFAULT_SUFFIX) + try: + suffix.add('aci', ANON_ACI) + except ldap.TYPE_OR_VALUE_EXISTS: + pass + + aci_list = suffix.get_attr_vals('aci') + + def finofaci(): + """ + Removes and Restores ACIs after the test. + """ + domain = Domain(topo.standalone, DEFAULT_SUFFIX) + domain.remove_all('aci') + for i in aci_list: + domain.add("aci", i) + + request.addfinalizer(finofaci) + + +@pytest.fixture(scope="function") +def clean(request, topo): + """ + :param request: + :param topo: + """ + ous = OrganizationalUnits(topo.standalone, DEFAULT_SUFFIX) + try: + for i in ['Product Development', 'Accounting']: + ous.create(properties={'ou': i}) + except ldap.ALREADY_EXISTS as eoor_eoor: + topo.standalone.log.info("Exception (expected): %s" % type(eoor_eoor).__name__) + + def fin(): + """ + Deletes entries after the test. + """ + for scope_scope in [CONTAINER_1_DELADD, CONTAINER_2_DELADD, PEOPLE]: + try: + DSLdapObject(topo.standalone, scope_scope).delete() + except ldap.ALREADY_EXISTS as eoor_eoor: + topo.standalone.log.info("Exception (expected): %s" % type(eoor_eoor).__name__) + + request.addfinalizer(fin) + + +def test_accept_aci_in_addition_to_acl(topo, clean, aci_of_user): + """Misc Test 2 accept aci in addition to acl + + :id: 8e9408fa-7db8-11e8-adaa-8c16451d917b + :setup: Standalone Instance + :steps: + 1. Add test entry + 2. Add ACI + 3. User should follow ACI role + :expectedresults: + 1. Entry should be added + 2. Operation should succeed + 3. Operation should succeed + """ + uas = UserAccounts(topo.standalone, DEFAULT_SUFFIX, rdn='ou=product development') + user = uas.create_test_user() + for i in [('mail', 'anujborah@okok.com'), ('givenname', 'Anuj'), ('userPassword', PW_DM)]: + user.set(i[0], i[1]) + + aci_target = '(targetattr="givenname")' + aci_allow = ('(version 3.0; acl "Name of the ACI"; deny (read, search, compare, write)') + aci_subject = 'userdn="ldap:///anyone";)' + Domain(topo.standalone, CONTAINER_1_DELADD).add("aci", aci_target + aci_allow + aci_subject) + + conn = Anonymous(topo.standalone).bind() + # aci will block targetattr=givenname to anyone + user = UserAccount(conn, user.dn) + with pytest.raises(AssertionError): + assert user.get_attr_val_utf8('givenname') == 'Anuj' + # aci will allow targetattr=uid to anyone + assert user.get_attr_val_utf8('uid') == 'test_user_1000' + + for i in uas.list(): + i.delete() + + +@pytest.mark.bz334451 +def test_more_then_40_acl_will_crash_slapd(topo, clean, aci_of_user): + """bug 334451 : more then 40 acl will crash slapd + superseded by Bug 772778 - acl cache overflown problem with > 200 acis + + :id: 93a44c60-7db8-11e8-9439-8c16451d917b + :setup: Standalone Instance + :steps: + 1. Add test entry + 2. Add ACI + 3. User should follow ACI role + :expectedresults: + 1. Entry should be added + 2. Operation should succeed + 3. Operation should succeed + """ + uas = UserAccounts(topo.standalone, DEFAULT_SUFFIX, rdn='ou=Accounting') + user = uas.create_test_user() + + aci_target = '(target ="ldap:///{}")(targetattr!="userPassword")'.format(CONTAINER_1_DELADD) + # more_then_40_acl_will not crash_slapd + for i in range(40): + aci_allow = '(version 3.0;acl "ACI_{}";allow (read, search, compare)'.format(i) + aci_subject = 'userdn="ldap:///anyone";)' + aci_body = aci_target + aci_allow + aci_subject + Domain(topo.standalone, CONTAINER_1_DELADD).add("aci", aci_body) + conn = Anonymous(topo.standalone).bind() + assert UserAccount(conn, user.dn).get_attr_val_utf8('uid') == 'test_user_1000' + + for i in uas.list(): + i.delete() + +@pytest.mark.bz345643 +def test_search_access_should_not_include_read_access(topo, clean, aci_of_user): + """bug 345643 + Misc Test 4 search access should not include read access + + :id: 98ab173e-7db8-11e8-a309-8c16451d917b + :setup: Standalone Instance + :steps: + 1. Add test entry + 2. Add ACI + 3. User should follow ACI role + :expectedresults: + 1. Entry should be added + 2. Operation should succeed + 3. Operation should succeed + """ + assert Domain(topo.standalone, DEFAULT_SUFFIX).present('aci') + Domain(topo.standalone, DEFAULT_SUFFIX)\ + .replace("aci", [f'(target ="ldap:///{DEFAULT_SUFFIX}")(targetattr != "userPassword")' + '(version 3.0;acl "anonymous access";allow (search)' + '(userdn = "ldap:///anyone");)', + f'(target="ldap:///{DEFAULT_SUFFIX}") (targetattr = "*")(version 3.0; ' + 'acl "allow self write";allow(write) ' + 'userdn = "ldap:///self";)', + f'(target="ldap:///{DEFAULT_SUFFIX}") (targetattr = "*")(version 3.0; ' + 'acl "Allow all admin group"; allow(all) groupdn = "ldap:///cn=Directory ' + 'Administrators, {}";)']) + + conn = Anonymous(topo.standalone).bind() + # search_access_should_not_include_read_access + suffix = Domain(conn, DEFAULT_SUFFIX) + with pytest.raises(Exception): + assert suffix.present('aci') + + +def test_only_allow_some_targetattr(topo, clean, aci_of_user): + """Misc Test 5 only allow some targetattr (1/2) + + :id: 9d27f048-7db8-11e8-a71c-8c16451d917b + :setup: Standalone Instance + :steps: + 1. Add test entry + 2. Add ACI + 3. User should follow ACI role + :expectedresults: + 1. Entry should be added + 2. Operation should succeed + 3. Operation should succeed + """ + + uas = UserAccounts(topo.standalone, DEFAULT_SUFFIX, rdn=None) + for i in range(1, 3): + user = uas.create_test_user(uid=i, gid=i) + user.replace_many(('cn', 'Anuj1'), ('mail', 'annandaBorah@anuj.com')) + + Domain(topo.standalone, DEFAULT_SUFFIX).\ + replace("aci", '(target="ldap:///{}")(targetattr="mail||objectClass")' + '(version 3.0; acl "Test";allow (read,search,compare) ' + '(userdn = "ldap:///anyone"); )'.format(DEFAULT_SUFFIX)) + + conn = Anonymous(topo.standalone).bind() + accounts = Accounts(conn, DEFAULT_SUFFIX) + + # aci will allow only mail targetattr + assert len(accounts.filter('(mail=*)')) == 2 + # aci will allow only mail targetattr + assert not accounts.filter('(cn=*)', scope=1) + # with root no , blockage + assert len(Accounts(topo.standalone, DEFAULT_SUFFIX).filter('(uid=*)', scope=1)) == 2 + + for i in uas.list(): + i.delete() + + +def test_only_allow_some_targetattr_two(topo, clean, aci_of_user, request): + """Misc Test 6 only allow some targetattr (2/2)" + + :id: a188239c-7db8-11e8-903e-8c16451d917b + :setup: Standalone Instance + :steps: + 1. Add test entry + 2. Add ACI + 3. User should follow ACI role + :expectedresults: + 1. Entry should be added + 2. Operation should succeed + 3. Operation should succeed + """ + uas = UserAccounts(topo.standalone, DEFAULT_SUFFIX, rdn=None) + for i in range(5): + user = uas.create_test_user(uid=i, gid=i) + user.replace_many(('mail', 'anujborah@anujborah.com'), + ('cn', 'Anuj'), ('userPassword', PW_DM)) + + user1 = uas.create_test_user() + user1.replace_many(('mail', 'anujborah@anujborah.com'), ('userPassword', PW_DM)) + + Domain(topo.standalone, DEFAULT_SUFFIX).\ + replace("aci", '(target="ldap:///{}") (targetattr="mail||objectClass")' + '(targetfilter="cn=Anuj") (version 3.0; acl "{}"; ' + 'allow (compare,read,search) ' + '(userdn = "ldap:///anyone"); )'.format(DEFAULT_SUFFIX, request.node.name)) + + conn = UserAccount(topo.standalone, user.dn).bind(PW_DM) + # aci will allow only mail targetattr but only for cn=Anuj + account = Accounts(conn, DEFAULT_SUFFIX) + assert len(account.filter('(mail=*)', scope=1)) == 5 + assert not account.filter('(cn=*)', scope=1) + + for i in account.filter('(mail=*)'): + assert i.get_attr_val_utf8('mail') == 'anujborah@anujborah.com' + + + conn = Anonymous(topo.standalone).bind() + # aci will allow only mail targetattr but only for cn=Anuj + account = Accounts(conn, DEFAULT_SUFFIX) + assert len(account.filter('(mail=*)', scope=1)) == 5 + assert not account.filter('(cn=*)', scope=1) + + for i in account.filter('(mail=*)'): + assert i.get_attr_val_utf8('mail') == 'anujborah@anujborah.com' + + # with root no blockage + assert len(Accounts(topo.standalone, DEFAULT_SUFFIX).filter('(mail=*)')) == 6 + + for i in uas.list(): + i.delete() + + +@pytest.mark.bz326000 +def test_memberurl_needs_to_be_normalized(topo, clean, aci_of_user): + """Non-regression test for BUG 326000: MemberURL needs to be normalized + + :id: a5d172e6-7db8-11e8-aca7-8c16451d917b + :setup: Standalone Instance + :steps: + 1. Add test entry + 2. Add ACI + 3. User should follow ACI role + :expectedresults: + 1. Entry should be added + 2. Operation should succeed + 3. Operation should succeed + """ + ou_ou = OrganizationalUnit(topo.standalone, "ou=PEOPLE,{}".format(DEFAULT_SUFFIX)) + ou_ou.set('aci', '(targetattr="*")' + '(version 3.0; acl "tester"; allow(all) ' + 'groupdn = "ldap:///cn =DYNGROUP,ou=PEOPLE, {}";)'.format(DEFAULT_SUFFIX)) + + groups = Groups(topo.standalone, DEFAULT_SUFFIX, rdn='ou=PEOPLE') + groups.create(properties={"cn": "DYNGROUP", + "description": "DYNGROUP", + 'objectClass': 'groupOfURLS', + 'memberURL': "ldap:///ou=PEOPLE,{}??sub?" + "(uid=test_user_2)".format(DEFAULT_SUFFIX)}) + + uas = UserAccounts(topo.standalone, DEFAULT_SUFFIX) + for demo1 in [(1, "Entry to test rights on."), (2, "Member of DYNGROUP")]: + user = uas.create_test_user(uid=demo1[0], gid=demo1[0]) + user.replace_many(('description', demo1[1]), ('userPassword', PW_DM)) + + ##with normal aci + conn = UserAccount(topo.standalone, uas.list()[1].dn).bind(PW_DM) + harry = UserAccount(conn, uas.list()[1].dn) + harry.add('sn', 'FRED') + + ##with abnomal aci + dygrp = Group(topo.standalone, DYNGROUP) + dygrp.remove('memberurl', "ldap:///ou=PEOPLE,{}??sub?(uid=test_user_2)".format(DEFAULT_SUFFIX)) + dygrp.add('memberurl', "ldap:///ou=PEOPLE,{}??sub?(uid=tesT_UsEr_2)".format(DEFAULT_SUFFIX)) + harry.add('sn', 'Not FRED') + + for i in uas.list(): + i.delete() + +@pytest.mark.bz624370 +def test_greater_than_200_acls_can_be_created(topo, clean, aci_of_user): + """Misc 10, check that greater than 200 ACLs can be created. Bug 624370 + + :id: ac020252-7db8-11e8-8652-8c16451d917b + :setup: Standalone Instance + :steps: + 1. Add test entry + 2. Add ACI + 3. User should follow ACI role + :expectedresults: + 1. Entry should be added + 2. Operation should succeed + 3. Operation should succeed + """ + # greater_than_200_acls_can_be_created + uas = UserAccounts(topo.standalone, DEFAULT_SUFFIX) + for i in range(200): + user = uas.create_test_user(uid=i, gid=i) + user.set('aci', '(targetattr = "description")' + '(version 3.0;acl "foo{}"; allow (read, search, compare)' + '(userdn="ldap:///anyone");)'.format(i)) + + assert user.\ + get_attr_val_utf8('aci') == '(targetattr = "description")' \ + '(version 3.0;acl "foo{}"; allow ' \ + '(read, search, compare)' \ + '(userdn="ldap:///anyone");)'.format(i) + for i in uas.list(): + i.delete() + + +@pytest.mark.bz624453 +def test_server_bahaves_properly_with_very_long_attribute_names(topo, clean, aci_of_user): + """Make sure the server bahaves properly with very long attribute names. Bug 624453. + + :id: b0d31942-7db8-11e8-a833-8c16451d917b + :setup: Standalone Instance + :steps: + 1. Add test entry + 2. Add ACI + 3. User should follow ACI role + :expectedresults: + 1. Entry should be added + 2. Operation should succeed + 3. Operation should succeed + """ + users = UserAccounts(topo.standalone, DEFAULT_SUFFIX) + users.create_test_user() + users.list()[0].set('userpassword', PW_DM) + + user = UserAccount(topo.standalone, 'uid=test_user_1000,ou=People,{}'.format(DEFAULT_SUFFIX)) + with pytest.raises(ldap.INVALID_SYNTAX): + user.add("aci", "a" * 9000) + + +def test_do_bind_as_201_distinct_users(topo, clean, aci_of_user): + """Test bind as 201 distinct users + + :id: c0060532-7db8-11e8-a124-8c16451d917b + :setup: Standalone Instance + :steps: + 1. Add test entries + 2. Increase the nsslapd-aclpb-max-selected-acls in cn=ACL Plugin,cn=plugins,cn=config + 3. Restart the server + 4. Do bind as 201 distinct users + :expectedresults: + 1. Entries should be added + 2. Operation should succeed + 3. Operation should succeed + 4. Operation should succeed + """ + uas = UserAccounts(topo.standalone, DEFAULT_SUFFIX) + for i in range(201): + user = uas.create_test_user(uid=i, gid=i) + user.set('userPassword', PW_DM) + + users = uas.list() + for user in users: + user.bind(PW_DM) + + ACLPlugin(topo.standalone).replace("nsslapd-aclpb-max-selected-acls", '220') + topo.standalone.restart() + + users = uas.list() + for user in users: + user.bind(PW_DM) + + +def test_info_disclosure(request, topo): + """Test that a search returns 32 when base entry does not exist + + :id: f6dec4c2-65a3-41e4-a4c0-146196863333 + :setup: Standalone Instance + :steps: + 1. Add aci + 2. Add test user + 3. Bind as user and search for non-existent entry + :expectedresults: + 1. Success + 2. Success + 3. Error 32 is returned + """ + + ACI_TARGET = "(targetattr = \"*\")(target = \"ldap:///%s\")" % (DEFAULT_SUFFIX) + ACI_ALLOW = "(version 3.0; acl \"Read/Search permission for all users\"; allow (read,search)" + ACI_SUBJECT = "(userdn=\"ldap:///all\");)" + ACI = ACI_TARGET + ACI_ALLOW + ACI_SUBJECT + + # Get current ACi's so we can restore them when we are done + suffix = Domain(topo.standalone, DEFAULT_SUFFIX) + preserved_acis = suffix.get_attr_vals_utf8('aci') + + def finofaci(): + domain = Domain(topo.standalone, DEFAULT_SUFFIX) + try: + domain.remove_all('aci') + domain.replace_values('aci', preserved_acis) + except: + pass + request.addfinalizer(finofaci) + + # Remove aci's + suffix.remove_all('aci') + + # Add test user + USER_DN = "uid=test,ou=people," + DEFAULT_SUFFIX + users = UserAccounts(topo.standalone, DEFAULT_SUFFIX) + users.create(properties={ + 'uid': 'test', + 'cn': 'test', + 'sn': 'test', + 'uidNumber': '1000', + 'gidNumber': '2000', + 'homeDirectory': '/home/test', + 'userPassword': PW_DM + }) + + # bind as user + conn = UserAccount(topo.standalone, USER_DN).bind(PW_DM) + + # Search fo existing base DN + test = Domain(conn, DEFAULT_SUFFIX) + assert len(test.get_attr_vals_utf8_l('dc')) == 0 + + # Search for a non existent bases + subtree = Domain(conn, "ou=does_not_exist," + DEFAULT_SUFFIX) + assert len(subtree.get_attr_vals_utf8_l('objectclass')) == 0 + + subtree = Domain(conn, "ou=also does not exist,ou=does_not_exist," + DEFAULT_SUFFIX) + assert len(subtree.get_attr_vals_utf8_l('objectclass')) == 0 + + # Try ONE level search instead of BASE + assert len(Accounts(conn, "ou=does_not_exist," + DEFAULT_SUFFIX).filter("(objectclass=top)", scope=ldap.SCOPE_ONELEVEL)) == 0 + + # add aci + suffix.add('aci', ACI) + + # Search for a non existent entry which should raise an exception + with pytest.raises(ldap.NO_SUCH_OBJECT): + conn = UserAccount(topo.standalone, USER_DN).bind(PW_DM) + subtree = Domain(conn, "ou=does_not_exist," + DEFAULT_SUFFIX) + subtree.get_attr_vals_utf8_l('objectclass') + with pytest.raises(ldap.NO_SUCH_OBJECT): + conn = UserAccount(topo.standalone, USER_DN).bind(PW_DM) + subtree = Domain(conn, "ou=also does not exist,ou=does_not_exist," + DEFAULT_SUFFIX) + subtree.get_attr_vals_utf8_l('objectclass') + with pytest.raises(ldap.NO_SUCH_OBJECT): + conn = UserAccount(topo.standalone, USER_DN).bind(PW_DM) + DN = "ou=also does not exist,ou=does_not_exist," + DEFAULT_SUFFIX + Accounts(conn, DN).filter("(objectclass=top)", scope=ldap.SCOPE_ONELEVEL, strict=True) + + +if __name__ == "__main__": + CURRENT_FILE = os.path.realpath(__file__) + pytest.main("-s -v %s" % CURRENT_FILE) diff --git a/dirsrvtests/tests/suites/acl/modify_test.py b/dirsrvtests/tests/suites/acl/modify_test.py new file mode 100644 index 0000000..aec8286 --- /dev/null +++ b/dirsrvtests/tests/suites/acl/modify_test.py @@ -0,0 +1,584 @@ +# --- BEGIN COPYRIGHT BLOCK --- +# Copyright (C) 2019 Red Hat, Inc. +# All rights reserved. +# +# License: GPL (version 3 or any later version). +# See LICENSE for details. +# --- END COPYRIGHT BLOCK ---- + + +import pytest, os, ldap +from lib389._constants import DEFAULT_SUFFIX, PW_DM +from lib389.idm.user import UserAccount +from lib389.idm.account import Anonymous +from lib389.idm.group import Group, UniqueGroup +from lib389.idm.organizationalunit import OrganizationalUnit +from lib389.idm.group import Groups +from lib389.topologies import topology_st as topo +from lib389.idm.domain import Domain + +pytestmark = pytest.mark.tier1 + +CONTAINER_1_DELADD = "ou=Product Development,{}".format(DEFAULT_SUFFIX) +CONTAINER_2_DELADD = "ou=Accounting,{}".format(DEFAULT_SUFFIX) +USER_DELADD = "cn=Jeff Vedder,{}".format(CONTAINER_1_DELADD) +USER_WITH_ACI_DELADD = "cn=Sam Carter,{}".format(CONTAINER_2_DELADD) +KIRSTENVAUGHAN = "cn=Kirsten Vaughan, ou=Human Resources, {}".format(DEFAULT_SUFFIX) +HUMAN_OU_GLOBAL = "ou=Human Resources,{}".format(DEFAULT_SUFFIX) + + +@pytest.fixture(scope="function") +def cleanup_tree(request, topo): + + def fin(): + for i in [USER_DELADD, USER_WITH_ACI_DELADD, KIRSTENVAUGHAN, CONTAINER_1_DELADD, CONTAINER_2_DELADD, HUMAN_OU_GLOBAL]: + try: + UserAccount(topo.standalone, i).delete() + except: + pass + + request.addfinalizer(fin) + + +@pytest.fixture(scope="function") +def aci_of_user(request, topo): + # Add anonymous access aci + ACI_TARGET = "(targetattr=\"*\")(target = \"ldap:///%s\")" % (DEFAULT_SUFFIX) + ACI_ALLOW = "(version 3.0; acl \"Anonymous Read access\"; allow (read,search,compare)" + ACI_SUBJECT = "(userdn=\"ldap:///anyone\");)" + ANON_ACI = ACI_TARGET + ACI_ALLOW + ACI_SUBJECT + suffix = Domain(topo.standalone, DEFAULT_SUFFIX) + try: + suffix.add('aci', ANON_ACI) + except ldap.TYPE_OR_VALUE_EXISTS: + pass + + aci_list = suffix.get_attr_vals('aci') + + def finofaci(): + domain = Domain(topo.standalone, DEFAULT_SUFFIX) + domain.set('aci', None) + for i in aci_list: + domain.add("aci", i) + + request.addfinalizer(finofaci) + + +def test_allow_write_access_to_targetattr_with_a_single_attribute( + topo, aci_of_user, cleanup_tree): + """Modify Test 1 Allow write access to targetattr with a single attribute + + :id: 620d7b82-7abf-11e8-a4db-8c16451d917b + :setup: server + :steps: + 1. Add test entry + 2. Add ACI + 3. User should follow ACI role + :expectedresults: + 1. Entry should be added + 2. Operation should succeed + 3. Operation should succeed + """ + ACI_BODY = '(targetattr = "title")(version 3.0; acl "ACI NAME"; allow (write) (userdn = "ldap:///anyone") ;)' + Domain(topo.standalone, DEFAULT_SUFFIX).add("aci", ACI_BODY) + + ou = OrganizationalUnit(topo.standalone, "ou=Product Development,{}".format(DEFAULT_SUFFIX)) + ou.create(properties={'ou': 'Product Development'}) + + properties = { + 'uid': 'Jeff Vedder', + 'cn': 'Jeff Vedder', + 'sn': 'user', + 'uidNumber': '1000', + 'gidNumber': '2000', + 'homeDirectory': '/home/' + 'JeffVedder', + 'userPassword': PW_DM + } + user = UserAccount(topo.standalone, "cn=Jeff Vedder,ou=Product Development,{}".format(DEFAULT_SUFFIX)) + user.create(properties=properties) + + # Allow write access to targetattr with a single attribute + conn = Anonymous(topo.standalone).bind() + ua = UserAccount(conn, USER_DELADD) + ua.add("title", "Architect") + assert ua.get_attr_val('title') + ua.remove("title", "Architect") + + +def test_allow_write_access_to_targetattr_with_multiple_attibutes( + topo, aci_of_user, cleanup_tree): + """Modify Test 2 Allow write access to targetattr with multiple attibutes + + :id: 6b9f05c6-7abf-11e8-9ba1-8c16451d917b + :setup: server + :steps: + 1. Add test entry + 2. Add ACI + 3. User should follow ACI role + :expectedresults: + 1. Entry should be added + 2. Operation should succeed + 3. Operation should succeed + """ + ACI_BODY = '(targetattr = "telephonenumber || roomnumber")(version 3.0; acl "ACI NAME"; allow (write) (userdn = "ldap:///anyone") ;)' + Domain(topo.standalone, DEFAULT_SUFFIX).add("aci", ACI_BODY) + + ou = OrganizationalUnit(topo.standalone, "ou=Product Development,{}".format(DEFAULT_SUFFIX)) + ou.create(properties={'ou': 'Product Development'}) + + properties = { + 'uid': 'Jeff Vedder', + 'cn': 'Jeff Vedder', + 'sn': 'user', + 'uidNumber': '1000', + 'gidNumber': '2000', + 'homeDirectory': '/home/' + 'JeffVedder', + 'userPassword': PW_DM + } + user = UserAccount(topo.standalone, "cn=Jeff Vedder,ou=Product Development,{}".format(DEFAULT_SUFFIX)) + user.create(properties=properties) + + # Allow write access to targetattr with multiple attibutes + conn = Anonymous(topo.standalone).bind() + ua = UserAccount(conn, USER_DELADD) + ua.add("telephonenumber", "+1 408 555 1212") + assert ua.get_attr_val('telephonenumber') + ua.add("roomnumber", "101") + assert ua.get_attr_val('roomnumber') + + +def test_allow_write_access_to_userdn_all(topo, aci_of_user, cleanup_tree): + """Modify Test 3 Allow write access to userdn 'all' + + :id: 70c58818-7abf-11e8-afa1-8c16451d917b + :setup: server + :steps: + 1. Add test entry + 2. Add ACI + 3. User should follow ACI role + :expectedresults: + 1. Entry should be added + 2. Operation should succeed + 3. Operation should succeed + """ + ACI_BODY = '(targetattr = "*")(version 3.0; acl "ACI NAME"; allow (write) (userdn = "ldap:///all") ;)' + Domain(topo.standalone, DEFAULT_SUFFIX).add("aci", ACI_BODY) + + for i in ['Product Development', 'Accounting']: + ou = OrganizationalUnit(topo.standalone, "ou={},{}".format(i, DEFAULT_SUFFIX)) + ou.create(properties={'ou': i}) + + for i in ['Jeff Vedder,ou=Product Development', 'Sam Carter,ou=Accounting']: + properties = { + 'uid': i, + 'cn': i, + 'sn': 'user', + 'uidNumber': '1000', + 'gidNumber': '2000', + 'homeDirectory': '/home/' + i, + 'userPassword': PW_DM + } + user = UserAccount(topo.standalone, "cn={},{}".format(i, DEFAULT_SUFFIX)) + user.create(properties=properties) + + # Allow write access to userdn 'all' + conn = Anonymous(topo.standalone).bind() + with pytest.raises(ldap.INSUFFICIENT_ACCESS): + UserAccount(conn, USER_DELADD).add("title", "Architect") + conn = UserAccount(topo.standalone, USER_WITH_ACI_DELADD).bind(PW_DM) + UserAccount(conn, USER_DELADD).add("title", "Architect") + assert UserAccount(conn, USER_DELADD).get_attr_val('title') + + +def test_allow_write_access_to_userdn_with_wildcards_in_dn( + topo, aci_of_user, cleanup_tree): + """Modify Test 4 Allow write access to userdn with wildcards in DN + + :id: 766c2312-7abf-11e8-b57d-8c16451d917b + :setup: server + :steps: + 1. Add test entry + 2. Add ACI + 3. User should follow ACI role + :expectedresults: + 1. Entry should be added + 2. Operation should succeed + 3. Operation should succeed + """ + ACI_BODY = '(targetattr = "*")(version 3.0; acl "ACI NAME"; allow (write)(userdn = "ldap:///cn=*, ou=Product Development,{}") ;)'.format(DEFAULT_SUFFIX) + Domain(topo.standalone, DEFAULT_SUFFIX).add("aci", ACI_BODY) + + ou = OrganizationalUnit(topo.standalone, "ou=Product Development,{}".format(DEFAULT_SUFFIX)) + ou.create(properties={'ou': 'Product Development'}) + + properties = { + 'uid': 'Jeff Vedder', + 'cn': 'Jeff Vedder', + 'sn': 'user', + 'uidNumber': '1000', + 'gidNumber': '2000', + 'homeDirectory': '/home/' + 'JeffVedder', + 'userPassword': PW_DM + } + user = UserAccount(topo.standalone, "cn=Jeff Vedder,ou=Product Development,{}".format(DEFAULT_SUFFIX)) + user.create(properties=properties) + + conn = UserAccount(topo.standalone, USER_DELADD).bind(PW_DM) + # Allow write access to userdn with wildcards in DN + ua = UserAccount(conn, USER_DELADD) + ua.add("title", "Architect") + assert ua.get_attr_val('title') + + +def test_allow_write_access_to_userdn_with_multiple_dns(topo, aci_of_user, cleanup_tree): + """Modify Test 5 Allow write access to userdn with multiple DNs + + :id: 7aae760a-7abf-11e8-bc3a-8c16451d917b + :setup: server + :steps: + 1. Add test entry + 2. Add ACI + 3. User should follow ACI role + :expectedresults: + 1. Entry should be added + 2. Operation should succeed + 3. Operation should succeed + """ + ACI_BODY = '(targetattr = "*")(version 3.0; acl "ACI NAME"; allow (write)(userdn = "ldap:///{} || ldap:///{}") ;)'.format(USER_DELADD, USER_WITH_ACI_DELADD) + Domain(topo.standalone, DEFAULT_SUFFIX).add("aci", ACI_BODY) + + for i in ['Product Development', 'Accounting', 'Human Resources']: + ou = OrganizationalUnit(topo.standalone, "ou={},{}".format(i, DEFAULT_SUFFIX)) + ou.create(properties={'ou': i}) + + for i in ['Jeff Vedder,ou=Product Development', 'Sam Carter,ou=Accounting', 'Kirsten Vaughan, ou=Human Resources']: + properties = { + 'uid': i, + 'cn': i, + 'sn': 'user', + 'uidNumber': '1000', + 'gidNumber': '2000', + 'homeDirectory': '/home/' + i, + 'userPassword': PW_DM + } + user = UserAccount(topo.standalone, "cn={},{}".format(i, DEFAULT_SUFFIX)) + user.create(properties=properties) + + conn = UserAccount(topo.standalone, USER_DELADD).bind(PW_DM) + # Allow write access to userdn with multiple DNs + ua = UserAccount(conn, KIRSTENVAUGHAN) + ua.add("title", "Architect") + conn = UserAccount(topo.standalone, USER_WITH_ACI_DELADD).bind(PW_DM) + # Allow write access to userdn with multiple DNs + ua = UserAccount(conn, USER_DELADD) + ua.add("title", "Architect") + assert ua.get_attr_val('title') + + +def test_allow_write_access_to_target_with_wildcards(topo, aci_of_user, cleanup_tree): + """Modify Test 6 Allow write access to target with wildcards + + :id: 825fe884-7abf-11e8-8541-8c16451d917b + :setup: server + :steps: + 1. Add test entry + 2. Add ACI + 3. User should follow ACI role + :expectedresults: + 1. Entry should be added + 2. Operation should succeed + 3. Operation should succeed + """ + ACI_BODY = '(target = ldap:///{})(targetattr = "*")(version 3.0; acl "ACI NAME"; allow (write) (userdn = "ldap:///anyone") ;)'.format(DEFAULT_SUFFIX) + Domain(topo.standalone, DEFAULT_SUFFIX).add("aci", ACI_BODY) + + for i in ['Product Development', 'Accounting', 'Human Resources']: + ou = OrganizationalUnit(topo.standalone, "ou={},{}".format(i, DEFAULT_SUFFIX)) + ou.create(properties={'ou': i}) + + for i in ['Jeff Vedder,ou=Product Development', 'Sam Carter,ou=Accounting', 'Kirsten Vaughan, ou=Human Resources']: + properties = { + 'uid': i, + 'cn': i, + 'sn': 'user', + 'uidNumber': '1000', + 'gidNumber': '2000', + 'homeDirectory': '/home/' + i, + 'userPassword': PW_DM + } + user = UserAccount(topo.standalone, "cn={},{}".format(i, DEFAULT_SUFFIX)) + user.create(properties=properties) + + conn = UserAccount(topo.standalone, USER_DELADD).bind(PW_DM) + # Allow write access to target with wildcards + ua = UserAccount(conn, KIRSTENVAUGHAN) + ua.add("title", "Architect") + assert ua.get_attr_val('title') + conn = UserAccount(topo.standalone, USER_WITH_ACI_DELADD).bind(PW_DM) + # Allow write access to target with wildcards + ua = UserAccount(conn, USER_DELADD) + ua.add("title", "Architect") + assert ua.get_attr_val('title') + + +def test_allow_write_access_to_userdnattr(topo, aci_of_user, cleanup_tree, request): + """Modify Test 7 Allow write access to userdnattr + + :id: 86b418f6-7abf-11e8-ae28-8c16451d917b + :setup: server + :steps: + 1. Add test entry + 2. Add ACI + 3. User should follow ACI role + :expectedresults: + 1. Entry should be added + 2. Operation should succeed + 3. Operation should succeed + """ + ACI_BODY = '(target = ldap:///{})(targetattr="*")(version 3.0; acl "{}";allow (write) (userdn = "ldap:///anyone"); )'.format(DEFAULT_SUFFIX, request.node.name) + Domain(topo.standalone, DEFAULT_SUFFIX).add("aci", ACI_BODY) + + for i in ['Product Development', 'Accounting']: + ou = OrganizationalUnit(topo.standalone, "ou={},{}".format(i, DEFAULT_SUFFIX)) + ou.create(properties={'ou': i}) + + for i in ['Jeff Vedder,ou=Product Development', 'Sam Carter,ou=Accounting']: + properties = { + 'uid': i, + 'cn': i, + 'sn': 'user', + 'uidNumber': '1000', + 'gidNumber': '2000', + 'homeDirectory': '/home/' + i, + 'userPassword': PW_DM + } + user = UserAccount(topo.standalone, "cn={},{}".format(i, DEFAULT_SUFFIX)) + user.create(properties=properties) + + UserAccount(topo.standalone, USER_WITH_ACI_DELADD).add('manager', USER_WITH_ACI_DELADD) + conn = UserAccount(topo.standalone, USER_WITH_ACI_DELADD).bind(PW_DM) + # Allow write access to userdnattr + ua = UserAccount(conn, USER_DELADD) + ua.add('uid', 'scoobie') + assert ua.get_attr_val('uid') + ua.add('uid', 'jvedder') + assert ua.get_attr_val('uid') + + +def test_allow_selfwrite_access_to_anyone(topo, aci_of_user, cleanup_tree): + """Modify Test 8 Allow selfwrite access to anyone + + :id: 8b3becf0-7abf-11e8-ac34-8c16451d917b + :setup: server + :steps: + 1. Add test entry + 2. Add ACI + 3. User should follow ACI role + :expectedresults: + 1. Entry should be added + 2. Operation should succeed + 3. Operation should succeed + """ + groups = Groups(topo.standalone, DEFAULT_SUFFIX) + group = groups.create(properties={"cn": "group1", + "description": "testgroup"}) + + ACI_BODY = '(target = ldap:///cn=group1,ou=Groups,{})(targetattr = "member")(version 3.0; acl "ACI NAME"; allow (selfwrite) (userdn = "ldap:///anyone") ;)'.format(DEFAULT_SUFFIX) + Domain(topo.standalone, DEFAULT_SUFFIX).add("aci", ACI_BODY) + + ou = OrganizationalUnit(topo.standalone, "ou=Product Development,{}".format(DEFAULT_SUFFIX)) + ou.create(properties={'ou': 'Product Development'}) + + properties = { + 'uid': 'Jeff Vedder', + 'cn': 'Jeff Vedder', + 'sn': 'user', + 'uidNumber': '1000', + 'gidNumber': '2000', + 'homeDirectory': '/home/' + 'JeffVedder', + 'userPassword': PW_DM + } + user = UserAccount(topo.standalone, "cn=Jeff Vedder,ou=Product Development,{}".format(DEFAULT_SUFFIX)) + user.create(properties=properties) + + conn = UserAccount(topo.standalone, USER_DELADD).bind(PW_DM) + # Allow selfwrite access to anyone + groups = Groups(conn, DEFAULT_SUFFIX) + groups.list()[1].add_member(USER_DELADD) + + +def test_uniquemember_should_also_be_the_owner(topo, aci_of_user): + """Modify Test 10 groupdnattr = \"ldap:///$BASEDN?owner\" if owner is a group, group's + uniquemember should also be the owner + + :id: 9456b2d4-7abf-11e8-829d-8c16451d917b + :setup: server + :steps: + 1. Add test entry + 2. Add ACI + 3. User should follow ACI role + :expectedresults: + 1. Entry should be added + 2. Operation should succeed + 3. Operation should succeed + """ + for i in ['ACLGroupTest']: + ou = OrganizationalUnit(topo.standalone, "ou={},{}".format(i, DEFAULT_SUFFIX)) + ou.create(properties={'ou': i}) + + ou = OrganizationalUnit(topo.standalone, "ou=ACLDevelopment,{}".format(DEFAULT_SUFFIX)) + ou.create(properties={'ou': 'ACLDevelopment'}) + ou.set('aci','(targetattr="*")(version 3.0; acl "groupdnattr acl"; ' + 'allow (all)groupdnattr = "ldap:///{}?owner";)'.format(DEFAULT_SUFFIX)) + + grp = UniqueGroup(topo.standalone, "uid=anuj,ou=ACLDevelopment, {}".format(DEFAULT_SUFFIX)) + user_props = ( + {'sn': 'Borah', + 'cn': 'Anuj', + 'objectclass': ['top', 'person', 'organizationalPerson', 'inetOrgPerson', 'groupofUniquenames'], + 'userpassword': PW_DM, + 'givenname': 'Anuj', + 'ou': ['ACLDevelopment', 'People'], + 'roomnumber': '123', + 'uniquemember': 'cn=mandatory member' + } + ) + grp.create(properties=user_props) + + grp = UniqueGroup(topo.standalone, "uid=2ishani,ou=ACLDevelopment, {}".format(DEFAULT_SUFFIX)) + user_props = ( + {'sn': 'Borah', + 'cn': '2ishani', + 'objectclass': ['top', 'person','organizationalPerson', 'inetOrgPerson', 'groupofUniquenames'], + 'userpassword': PW_DM, + 'givenname': '2ishani', + 'ou': ['ACLDevelopment', 'People'], + 'roomnumber': '1234', + 'uniquemember': 'cn=mandatory member', "owner": "cn=group4, ou=ACLGroupTest, {}".format(DEFAULT_SUFFIX) + } + ) + grp.create(properties=user_props) + + grp = UniqueGroup(topo.standalone, 'cn=group1,ou=ACLGroupTest,'+DEFAULT_SUFFIX) + grp.create(properties={'cn': 'group1', + 'ou': 'groups'}) + grp.set('uniquemember', ["cn=group2, ou=ACLGroupTest, {}".format(DEFAULT_SUFFIX), + "cn=group3, ou=ACLGroupTest, {}".format(DEFAULT_SUFFIX)]) + + grp = UniqueGroup(topo.standalone, 'cn=group3,ou=ACLGroupTest,' + DEFAULT_SUFFIX) + grp.create(properties={'cn': 'group3', + 'ou': 'groups'}) + grp.set('uniquemember', ["cn=group4, ou=ACLGroupTest, {}".format(DEFAULT_SUFFIX)]) + + grp = UniqueGroup(topo.standalone, 'cn=group4,ou=ACLGroupTest,' + DEFAULT_SUFFIX) + grp.create(properties={ + 'cn': 'group4', + 'ou': 'groups'}) + grp.set('uniquemember', ["uid=anuj, ou=ACLDevelopment, {}".format(DEFAULT_SUFFIX)]) + + #uniquemember should also be the owner + conn = UserAccount(topo.standalone, "uid=anuj,ou=ACLDevelopment, {}".format(DEFAULT_SUFFIX)).bind(PW_DM) + ua = UserAccount(conn, "uid=2ishani, ou=ACLDevelopment, {}".format(DEFAULT_SUFFIX)) + ua.add('roomnumber', '9999') + assert ua.get_attr_val('roomnumber') + + for DN in ["cn=group4,ou=ACLGroupTest,{}".format(DEFAULT_SUFFIX), + "cn=group3,ou=ACLGroupTest,{}".format(DEFAULT_SUFFIX), + "cn=group1,ou=ACLGroupTest,{}".format(DEFAULT_SUFFIX), + "uid=2ishani,ou=ACLDevelopment,{}".format(DEFAULT_SUFFIX), + "uid=anuj,ou=ACLDevelopment,{}".format(DEFAULT_SUFFIX), "ou=ACLDevelopment,{}".format(DEFAULT_SUFFIX), + "ou=ACLGroupTest, {}".format(DEFAULT_SUFFIX)]: + UserAccount(topo.standalone, DN).delete() + + +def test_aci_with_both_allow_and_deny(topo, aci_of_user, cleanup_tree): + """Modify Test 12 aci with both allow and deny + + :id: 9dcfe902-7abf-11e8-86dc-8c16451d917b + :setup: server + :steps: + 1. Add test entry + 2. Add ACI + 3. User should follow ACI role + :expectedresults: + 1. Entry should be added + 2. Operation should succeed + 3. Operation should succeed + """ + ACI_BODY = '(targetattr = "*")(version 3.0; acl "ACI NAME"; deny (read, search)userdn = "ldap:///{}"; allow (all) userdn = "ldap:///{}" ;)'.format(USER_WITH_ACI_DELADD, USER_DELADD) + Domain(topo.standalone, DEFAULT_SUFFIX).add("aci", ACI_BODY) + + for i in ['Product Development', 'Accounting']: + ou = OrganizationalUnit(topo.standalone, "ou={},{}".format(i, DEFAULT_SUFFIX)) + ou.create(properties={'ou': i}) + + for i in ['Jeff Vedder,ou=Product Development', 'Sam Carter,ou=Accounting']: + properties = { + 'uid': i, + 'cn': i, + 'sn': 'user', + 'uidNumber': '1000', + 'gidNumber': '2000', + 'homeDirectory': '/home/' + i, + 'userPassword': PW_DM + } + user = UserAccount(topo.standalone, "cn={},{}".format(i, DEFAULT_SUFFIX)) + user.create(properties=properties) + + conn = UserAccount(topo.standalone, USER_DELADD).bind(PW_DM) + # aci with both allow and deny, testing allow + assert UserAccount(conn, USER_WITH_ACI_DELADD).get_attr_val('uid') + conn = UserAccount(topo.standalone, USER_WITH_ACI_DELADD).bind(PW_DM) + # aci with both allow and deny, testing deny + assert len(UserAccount(conn, USER_WITH_ACI_DELADD).get_attr_val('uid')) == 0 + + +def test_allow_owner_to_modify_entry(topo, aci_of_user, cleanup_tree, request): + """Modify Test 14 allow userdnattr = owner to modify entry + + :id: aa302090-7abf-11e8-811a-8c16451d917b + :setup: server + :steps: + 1. Add test entry + 2. Add ACI + 3. User should follow ACI role + :expectedresults: + 1. Entry should be added + 2. Operation should succeed + 3. Operation should succeed + """ + grp = UniqueGroup(topo.standalone, 'cn=intranet,' + DEFAULT_SUFFIX) + grp.create(properties={ + 'cn': 'intranet', + 'ou': 'groups'}) + grp.set('owner', USER_WITH_ACI_DELADD) + + ACI_BODY = '(target ="ldap:///cn=intranet, {}") (targetattr ="*")(targetfilter ="(objectclass=groupOfUniqueNames)") (version 3.0;acl "{}";allow(read, write, delete, search, compare, add) (userdnattr = "owner");)'.format(DEFAULT_SUFFIX, request.node.name) + Domain(topo.standalone, DEFAULT_SUFFIX).add("aci", ACI_BODY) + + for i in ['Product Development', 'Accounting']: + ou = OrganizationalUnit(topo.standalone, "ou={},{}".format(i, DEFAULT_SUFFIX)) + ou.create(properties={'ou': i}) + for i in ['Jeff Vedder,ou=Product Development', 'Sam Carter,ou=Accounting']: + properties = { + 'uid': i, + 'cn': i, + 'sn': 'user', + 'uidNumber': '1000', + 'gidNumber': '2000', + 'homeDirectory': '/home/' + i, + 'userPassword': PW_DM + } + user = UserAccount(topo.standalone, "cn={},{}".format(i, DEFAULT_SUFFIX)) + user.create(properties=properties) + + conn = UserAccount(topo.standalone, USER_WITH_ACI_DELADD).bind(PW_DM) + # allow userdnattr = owner to modify entry + ua = UserAccount(conn, 'cn=intranet,dc=example,dc=com') + ua.set('uniquemember', "cn=Andy Walker, ou=Accounting,dc=example,dc=com") + assert ua.get_attr_val('uniquemember') + + +if __name__ == "__main__": + CURRENT_FILE = os.path.realpath(__file__) + pytest.main("-s -v %s" % CURRENT_FILE) diff --git a/dirsrvtests/tests/suites/acl/modrdn_test.py b/dirsrvtests/tests/suites/acl/modrdn_test.py new file mode 100644 index 0000000..c4ae8ee --- /dev/null +++ b/dirsrvtests/tests/suites/acl/modrdn_test.py @@ -0,0 +1,299 @@ +# --- BEGIN COPYRIGHT BLOCK --- +# Copyright (C) 2020 Red Hat, Inc. +# All rights reserved. +# +# License: GPL (version 3 or any later version). +# See LICENSE for details. +# --- END COPYRIGHT BLOCK ---- + +import pytest, os, ldap +from lib389._constants import DEFAULT_SUFFIX, PW_DM +from lib389.idm.user import UserAccount +from lib389.idm.account import Anonymous +from lib389.idm.group import Group, UniqueGroup +from lib389.idm.organizationalunit import OrganizationalUnit, OrganizationalUnits +from lib389.topologies import topology_st as topo +from lib389.idm.domain import Domain + +pytestmark = pytest.mark.tier1 + +CONTAINER_1_DELADD = "ou=Product Development,{}".format(DEFAULT_SUFFIX) +CONTAINER_2_DELADD = "ou=Accounting,{}".format(DEFAULT_SUFFIX) +USER_DELADD = "cn=Jeff Vedder,{}".format(CONTAINER_1_DELADD) +USER_WITH_ACI_DELADD = "cn=Sam Carter,{}".format(CONTAINER_2_DELADD) +DYNAMIC_MODRDN = "cn=Test DYNAMIC_MODRDN Group 70, {}".format(DEFAULT_SUFFIX) +SAM_DAMMY_MODRDN = "cn=Sam Carter1,ou=Accounting,{}".format(DEFAULT_SUFFIX) +TRAC340_MODRDN = "cn=TRAC340_MODRDN,{}".format(DEFAULT_SUFFIX) +NEWENTRY9_MODRDN = "cn=NEWENTRY9_MODRDN,{}".format("ou=People,{}".format(DEFAULT_SUFFIX)) +OU0_OU_MODRDN = "ou=OU0,{}".format(DEFAULT_SUFFIX) +OU2_OU_MODRDN = "ou=OU2,{}".format(DEFAULT_SUFFIX) + + +@pytest.fixture(scope="function") +def aci_of_user(request, topo): + aci_list = Domain(topo.standalone, DEFAULT_SUFFIX).get_attr_vals('aci') + + def finofaci(): + domain = Domain(topo.standalone, DEFAULT_SUFFIX) + domain.set('aci', None) + for i in aci_list: + domain.add("aci", i) + + request.addfinalizer(finofaci) + + +@pytest.fixture(scope="function") +def _add_user(request, topo): + ou = OrganizationalUnit(topo.standalone, 'ou=Product Development,{}'.format(DEFAULT_SUFFIX)) + ou.create(properties={'ou': 'Product Development'}) + + ou = OrganizationalUnit(topo.standalone, 'ou=Accounting,{}'.format(DEFAULT_SUFFIX)) + ou.create(properties={'ou': 'Accounting'}) + + groups = Group(topo.standalone, DYNAMIC_MODRDN) + group_properties = {"cn": "Test DYNAMIC_MODRDN Group 70", + "objectclass": ["top", 'groupofURLs'], + 'memberURL': 'ldap:///{}??base?(cn=*)'.format(USER_WITH_ACI_DELADD)} + groups.create(properties=group_properties) + + properties = { + 'uid': 'Jeff Vedder', + 'cn': 'Jeff Vedder', + 'sn': 'user', + 'uidNumber': '1000', + 'gidNumber': '2000', + 'homeDirectory': '/home/' + 'JeffVedder', + 'userPassword': PW_DM + } + user = UserAccount(topo.standalone, 'cn=Jeff Vedder,ou=Product Development,{}'.format(DEFAULT_SUFFIX)) + user.create(properties=properties) + + properties = { + 'uid': 'Sam Carter', + 'cn': 'Sam Carter', + 'sn': 'user', + 'uidNumber': '1000', + 'gidNumber': '2000', + 'homeDirectory': '/home/' + 'SamCarter', + 'userPassword': PW_DM + } + user = UserAccount(topo.standalone, 'cn=Sam Carter,ou=Accounting,{}'.format(DEFAULT_SUFFIX)) + user.create(properties=properties) + + def fin(): + for DN in [USER_DELADD,USER_WITH_ACI_DELADD,DYNAMIC_MODRDN,CONTAINER_2_DELADD,CONTAINER_1_DELADD]: + UserAccount(topo.standalone, DN).delete() + + request.addfinalizer(fin) + + +def test_allow_write_privilege_to_anyone(topo, _add_user, aci_of_user, request): + """Modrdn Test 1 Allow write privilege to anyone + + :id: 4406f12e-7932-11e8-9dea-8c16451d917b + :setup: server + :steps: + 1. Add test entry + 2. Add ACI + 3. User should follow ACI role + :expectedresults: + 1. Entry should be added + 2. Operation should succeed + 3. Operation should succeed + """ + Domain(topo.standalone, DEFAULT_SUFFIX).add("aci", + '(target ="ldap:///{}")(targetattr="*")(version 3.0;acl "{}";allow ' + '(write) (userdn = "ldap:///anyone");)'.format(DEFAULT_SUFFIX, request.node.name)) + conn = Anonymous(topo.standalone).bind() + # Allow write privilege to anyone + useraccount = UserAccount(conn, USER_WITH_ACI_DELADD) + useraccount.rename("cn=Jeff Vedder") + assert 'cn=Jeff Vedder,ou=Accounting,dc=example,dc=com' == useraccount.dn + useraccount = UserAccount(conn, "cn=Jeff Vedder,ou=Accounting,dc=example,dc=com") + useraccount.rename("cn=Sam Carter") + assert 'cn=Sam Carter,ou=Accounting,dc=example,dc=com' == useraccount.dn + + +def test_allow_write_privilege_to_dynamic_group_with_scope_set_to_base_in_ldap_url( + topo, _add_user, aci_of_user, request +): + """Modrdn Test 2 Allow write privilege to DYNAMIC_MODRDN group with scope set to base in LDAP URL + + :id: 4c0f8c00-7932-11e8-8398-8c16451d917b + :setup: server + :steps: + 1. Add test entry + 2. Add ACI + 3. User should follow ACI role + :expectedresults: + 1. Entry should be added + 2. Operation should succeed + 3. Operation should succeed + """ + Domain(topo.standalone, DEFAULT_SUFFIX).add("aci",'(target = ldap:///{})(targetattr="*")(version 3.0; acl "{}"; allow(all)(groupdn = "ldap:///{}"); )'.format(DEFAULT_SUFFIX, request.node.name, DYNAMIC_MODRDN)) + conn = UserAccount(topo.standalone, USER_WITH_ACI_DELADD).bind(PW_DM) + # Allow write privilege to DYNAMIC_MODRDN group with scope set to base in LDAP URL + useraccount = UserAccount(conn, USER_DELADD) + useraccount.rename("cn=Jeffbo Vedder") + assert 'cn=Jeffbo Vedder,ou=Product Development,dc=example,dc=com' == useraccount.dn + useraccount = UserAccount(conn, "cn=Jeffbo Vedder,{}".format(CONTAINER_1_DELADD)) + useraccount.rename("cn=Jeff Vedder") + assert 'cn=Jeff Vedder,ou=Product Development,dc=example,dc=com' == useraccount.dn + + +def test_write_access_to_naming_atributes(topo, _add_user, aci_of_user, request): + """Test for write access to naming atributes + Test that check for add writes to the new naming attr + + :id: 532fc630-7932-11e8-8924-8c16451d917b + :setup: server + :steps: + 1. Add test entry + 2. Add ACI + 3. User should follow ACI role + :expectedresults: + 1. Entry should be added + 2. Operation should succeed + 3. Operation should succeed + """ + Domain(topo.standalone, DEFAULT_SUFFIX).add("aci", '(target ="ldap:///{}")(targetattr != "uid")(version 3.0;acl "{}";allow (write) (userdn = "ldap:///anyone");)'.format(DEFAULT_SUFFIX, request.node.name)) + conn = UserAccount(topo.standalone, USER_WITH_ACI_DELADD).bind(PW_DM) + #Test for write access to naming atributes + useraccount = UserAccount(conn, USER_WITH_ACI_DELADD) + with pytest.raises(ldap.INSUFFICIENT_ACCESS): + useraccount.rename("uid=Jeffbo Vedder") + + +def test_write_access_to_naming_atributes_two(topo, _add_user, aci_of_user, request): + """Test for write access to naming atributes (2) + + :id: 5a2077d2-7932-11e8-9e7b-8c16451d917b + :setup: server + :steps: + 1. Add test entry + 2. Add ACI + 3. User should follow ACI role + 4. Now try to modrdn it to cn, won't work if request deleteoldrdn. + :expectedresults: + 1. Entry should be added + 2. Operation should succeed + 3. Operation should succeed + 4. Operation should not succeed + """ + Domain(topo.standalone, DEFAULT_SUFFIX).add("aci", '(target ="ldap:///{}")(targetattr != "uid")(version 3.0;acl "{}";allow (write) (userdn = "ldap:///anyone");)'.format(DEFAULT_SUFFIX, request.node.name)) + properties = { + 'uid': 'Sam Carter1', + 'cn': 'Sam Carter1', + 'sn': 'user', + 'uidNumber': '1000', + 'gidNumber': '2000', + 'homeDirectory': '/home/' + 'SamCarter1' + } + user = UserAccount(topo.standalone, 'cn=Sam Carter1,ou=Accounting,{}'.format(DEFAULT_SUFFIX)) + user.create(properties=properties) + user.set("userPassword", "password") + conn = UserAccount(topo.standalone, USER_WITH_ACI_DELADD).bind(PW_DM) + # Test for write access to naming atributes + useraccount = UserAccount(conn, SAM_DAMMY_MODRDN) + with pytest.raises(ldap.INSUFFICIENT_ACCESS): + useraccount.rename("uid=Jeffbo Vedder") + UserAccount(topo.standalone, SAM_DAMMY_MODRDN).delete() + + +@pytest.mark.bz950351 +def test_access_aci_list_contains_any_deny_rule(topo, _add_user, aci_of_user): + """RHDS denies MODRDN access if ACI list contains any DENY rule + Bug description: If you create a deny ACI for some or more attributes there is incorrect behaviour + as you cannot rename the entry anymore + + :id: 62cbbb8a-7932-11e8-96a7-8c16451d917b + :setup: server + :steps: + 1. Add test entry + 2. Adding a new ou ou=People to $BASEDN + 3. Adding a user NEWENTRY9_MODRDN to ou=People,$BASEDN + 4. Adding an allow rule for NEWENTRY9_MODRDN and for others an aci deny rule + :expectedresults: + 1. Entry should be added + 2. Operation should succeed + 3. Operation should succeed + 4. Operation should succeed + """ + properties = { + 'uid': 'NEWENTRY9_MODRDN', + 'cn': 'NEWENTRY9_MODRDN_People', + 'sn': 'user', + 'uidNumber': '1000', + 'gidNumber': '2000', + 'homeDirectory': '/home/' + 'NEWENTRY9_MODRDN' + } + user = UserAccount(topo.standalone, 'cn=NEWENTRY9_MODRDN,ou=People,{}'.format(DEFAULT_SUFFIX)) + user.create(properties=properties) + user.set("userPassword", "password") + user.set("telephoneNumber", "989898191") + user.set("mail", "anuj@anuj.com") + user.set("givenName", "givenName") + user.set("uid", "NEWENTRY9_MODRDN") + OrganizationalUnits(topo.standalone, DEFAULT_SUFFIX).get('People').add("aci", ['(targetattr = "*") ' + '(version 3.0;acl "admin";allow (all)(userdn = "ldap:///{}");)'.format(NEWENTRY9_MODRDN), + '(targetattr = "mail") (version 3.0;acl "deny_mail";deny (write)(userdn = "ldap:///anyone");)', + '(targetattr = "uid") (version 3.0;acl "allow uid";allow (write)(userdn = "ldap:///{}");)'.format(NEWENTRY9_MODRDN)]) + UserAccount(topo.standalone, NEWENTRY9_MODRDN).replace("userpassword", "Anuj") + useraccount = UserAccount(topo.standalone, NEWENTRY9_MODRDN) + useraccount.rename("uid=newrdnchnged") + assert 'uid=newrdnchnged,ou=People,dc=example,dc=com' == useraccount.dn + + +def test_renaming_target_entry(topo, _add_user, aci_of_user): + """Test for renaming target entry + + :id: 6be1d33a-7932-11e8-9115-8c16451d917b + :setup: server + :steps: + 1. Add test entry + 2. Create a test user entry + 3. Create a new ou entry with an aci + 4. Make sure uid=$MYUID has the access + 5. Rename ou=OU0 to ou=OU1 + 6. Create another ou=OU2 + 7. Move ou=OU1 under ou=OU2 + 8. Make sure uid=$MYUID still has the access + :expectedresults: + 1. Entry should be added + 2. Operation should succeed + 3. Operation should succeed + 4. Operation should succeed + 5. Operation should succeed + 6. Operation should succeed + 7. Operation should succeed + 8. Operation should succeed + """ + properties = { + 'uid': 'TRAC340_MODRDN', + 'cn': 'TRAC340_MODRDN', + 'sn': 'user', + 'uidNumber': '1000', + 'gidNumber': '2000', + 'homeDirectory': '/home/' + 'TRAC340_MODRDN' + } + user = UserAccount(topo.standalone, 'cn=TRAC340_MODRDN,{}'.format(DEFAULT_SUFFIX)) + user.create(properties=properties) + user.set("userPassword", "password") + ou = OrganizationalUnit(topo.standalone, 'ou=OU0,{}'.format(DEFAULT_SUFFIX)) + ou.create(properties={'ou': 'OU0'}) + ou.set('aci', '(targetattr="*")(version 3.0; acl "$MYUID";allow(read, search, compare) userdn = "ldap:///{}";)'.format(TRAC340_MODRDN)) + conn = UserAccount(topo.standalone, TRAC340_MODRDN).bind(PW_DM) + assert OrganizationalUnits(conn, DEFAULT_SUFFIX).get('OU0') + # Test for renaming target entry + OrganizationalUnits(topo.standalone, DEFAULT_SUFFIX).get('OU0').rename("ou=OU1") + assert OrganizationalUnits(conn, DEFAULT_SUFFIX).get('OU1') + ou = OrganizationalUnit(topo.standalone, 'ou=OU2,{}'.format(DEFAULT_SUFFIX)) + ou.create(properties={'ou': 'OU2'}) + # Test for renaming target entry + OrganizationalUnits(topo.standalone, DEFAULT_SUFFIX).get('OU1').rename("ou=OU1", newsuperior=OU2_OU_MODRDN) + assert OrganizationalUnits(conn, DEFAULT_SUFFIX).get('OU1') + + +if __name__ == "__main__": + CURRENT_FILE = os.path.realpath(__file__) + pytest.main("-s -v %s" % CURRENT_FILE) diff --git a/dirsrvtests/tests/suites/acl/repeated_ldap_add_test.py b/dirsrvtests/tests/suites/acl/repeated_ldap_add_test.py new file mode 100644 index 0000000..b627fb0 --- /dev/null +++ b/dirsrvtests/tests/suites/acl/repeated_ldap_add_test.py @@ -0,0 +1,489 @@ +# --- BEGIN COPYRIGHT BLOCK --- +# Copyright (C) 2020 Red Hat, Inc. +# All rights reserved. +# +# License: GPL (version 3 or any later version). +# See LICENSE for details. +# --- END COPYRIGHT BLOCK --- +# +from subprocess import Popen + +import pytest +from lib389.paths import Paths +from lib389.tasks import * +from lib389.utils import * +from lib389.topologies import topology_st + +from lib389._constants import DN_DM, DEFAULT_SUFFIX, PASSWORD, SERVERID_STANDALONE + +pytestmark = pytest.mark.tier1 + +logging.getLogger(__name__).setLevel(logging.DEBUG) +log = logging.getLogger(__name__) + +CONFIG_DN = 'cn=config' +BOU = 'BOU' +BINDOU = 'ou=%s,%s' % (BOU, DEFAULT_SUFFIX) +BUID = 'buser123' +TUID = 'tuser0' +BINDDN = 'uid=%s,%s' % (BUID, BINDOU) +BINDPW = BUID +TESTDN = 'uid=%s,ou=people,%s' % (TUID, DEFAULT_SUFFIX) +TESTPW = TUID +BOGUSDN = 'uid=bogus,%s' % DEFAULT_SUFFIX +BOGUSDN2 = 'uid=bogus,ou=people,%s' % DEFAULT_SUFFIX +BOGUSSUFFIX = 'uid=bogus,ou=people,dc=bogus' +GROUPOU = 'ou=groups,%s' % DEFAULT_SUFFIX +BOGUSOU = 'ou=OU,%s' % DEFAULT_SUFFIX + +def get_ldap_error_msg(e, type): + return e.args[0][type] + +def pattern_accesslog(file, log_pattern): + for i in range(5): + try: + pattern_accesslog.last_pos += 1 + except AttributeError: + pattern_accesslog.last_pos = 0 + + found = None + file.seek(pattern_accesslog.last_pos) + + # Use a while true iteration because 'for line in file: hit a + # python bug that break file.tell() + while True: + line = file.readline() + found = log_pattern.search(line) + if ((line == '') or (found)): + break + + pattern_accesslog.last_pos = file.tell() + if found: + return line + else: + time.sleep(1) + return None + + +def check_op_result(server, op, dn, superior, exists, rc): + targetdn = dn + if op == 'search': + if exists: + opstr = 'Searching existing entry' + else: + opstr = 'Searching non-existing entry' + elif op == 'add': + if exists: + opstr = 'Adding existing entry' + else: + opstr = 'Adding non-existing entry' + elif op == 'modify': + if exists: + opstr = 'Modifying existing entry' + else: + opstr = 'Modifying non-existing entry' + elif op == 'modrdn': + if superior is not None: + targetdn = superior + if exists: + opstr = 'Moving to existing superior' + else: + opstr = 'Moving to non-existing superior' + else: + if exists: + opstr = 'Renaming existing entry' + else: + opstr = 'Renaming non-existing entry' + elif op == 'delete': + if exists: + opstr = 'Deleting existing entry' + else: + opstr = 'Deleting non-existing entry' + + if ldap.SUCCESS == rc: + expstr = 'be ok' + else: + expstr = 'fail with %s' % rc.__name__ + + log.info('%s %s, which should %s.' % (opstr, targetdn, expstr)) + time.sleep(1) + hit = 0 + try: + if op == 'search': + centry = server.search_s(dn, ldap.SCOPE_BASE, 'objectclass=*') + elif op == 'add': + server.add_s(Entry((dn, {'objectclass': 'top extensibleObject'.split(), + 'cn': 'test entry'}))) + elif op == 'modify': + server.modify_s(dn, [(ldap.MOD_REPLACE, 'description', b'test')]) + elif op == 'modrdn': + if superior is not None: + server.rename_s(dn, 'uid=new', newsuperior=superior, delold=1) + else: + server.rename_s(dn, 'uid=new', delold=1) + elif op == 'delete': + server.delete_s(dn) + else: + log.fatal('Unknown operation %s' % op) + assert False + except ldap.LDAPError as e: + hit = 1 + log.info("Exception (expected): %s" % type(e).__name__) + log.info('Desc {}'.format(get_ldap_error_msg(e,'desc'))) + assert isinstance(e, rc) + if 'matched' in e.args: + log.info('Matched is returned: {}'.format(get_ldap_error_msg(e, 'matched'))) + if rc != ldap.NO_SUCH_OBJECT: + assert False + + if ldap.SUCCESS == rc: + if op == 'search': + log.info('Search should return none') + assert len(centry) == 0 + else: + if 0 == hit: + log.info('Expected to fail with %s, but passed' % rc.__name__) + assert False + + log.info('PASSED\n') + + +@pytest.mark.bz1347760 +def test_repeated_ldap_add(topology_st): + """Prevent revealing the entry info to whom has no access rights. + + :id: 76d278bd-3e51-4579-951a-753e6703b4df + :setup: Standalone instance + :steps: + 1. Disable accesslog logbuffering + 2. Bind as "cn=Directory Manager" + 3. Add a organisational unit as BOU + 4. Add a bind user as uid=buser123,ou=BOU,dc=example,dc=com + 5. Add a test user as uid=tuser0,ou=People,dc=example,dc=com + 6. Delete aci in dc=example,dc=com + 7. Bind as Directory Manager, acquire an access log path and instance dir + 8. Bind as uid=buser123,ou=BOU,dc=example,dc=com who has no right to read the entry + 9. Bind as uid=bogus,ou=people,dc=bogus,bogus who does not exist + 10. Bind as uid=buser123,ou=BOU,dc=example,dc=com,bogus with wrong password + 11. Adding aci for uid=buser123,ou=BOU,dc=example,dc=com to ou=BOU,dc=example,dc=com. + 12. Bind as uid=buser123,ou=BOU,dc=example,dc=com now who has right to read the entry + :expectedresults: + 1. Operation should be successful + 2. Operation should be successful + 3. Operation should be successful + 4. Operation should be successful + 5. Operation should be successful + 6. Operation should be successful + 7. Operation should be successful + 8. Bind operation should be successful with no search result + 9. Bind operation should Fail + 10. Bind operation should Fail + 11. Operation should be successful + 12. Bind operation should be successful with search result + """ + log.info('Testing Bug 1347760 - Information disclosure via repeated use of LDAP ADD operation, etc.') + + log.info('Disabling accesslog logbuffering') + topology_st.standalone.modify_s(CONFIG_DN, [(ldap.MOD_REPLACE, 'nsslapd-accesslog-logbuffering', b'off')]) + + log.info('Bind as {%s,%s}' % (DN_DM, PASSWORD)) + topology_st.standalone.simple_bind_s(DN_DM, PASSWORD) + + log.info('Adding ou=%s a bind user belongs to.' % BOU) + topology_st.standalone.add_s(Entry((BINDOU, { + 'objectclass': 'top organizationalunit'.split(), + 'ou': BOU}))) + + log.info('Adding a bind user.') + topology_st.standalone.add_s(Entry((BINDDN, + {'objectclass': "top person organizationalPerson inetOrgPerson".split(), + 'cn': 'bind user', + 'sn': 'user', + 'userPassword': BINDPW}))) + + log.info('Adding a test user.') + topology_st.standalone.add_s(Entry((TESTDN, + {'objectclass': "top person organizationalPerson inetOrgPerson".split(), + 'cn': 'test user', + 'sn': 'user', + 'userPassword': TESTPW}))) + + log.info('Deleting aci in %s.' % DEFAULT_SUFFIX) + topology_st.standalone.modify_s(DEFAULT_SUFFIX, [(ldap.MOD_DELETE, 'aci', None)]) + + log.info('While binding as DM, acquire an access log path and instance dir') + ds_paths = Paths(serverid=topology_st.standalone.serverid, + instance=topology_st.standalone) + file_path = ds_paths.access_log + inst_dir = ds_paths.inst_dir + + log.info('Bind case 1. the bind user has no rights to read the entry itself, bind should be successful.') + log.info('Bind as {%s,%s} who has no access rights.' % (BINDDN, BINDPW)) + try: + topology_st.standalone.simple_bind_s(BINDDN, BINDPW) + except ldap.LDAPError as e: + log.info('Desc {}'.format(get_ldap_error_msg(e,'desc'))) + assert False + + file_obj = open(file_path, "r") + log.info('Access log path: %s' % file_path) + + log.info( + 'Bind case 2-1. the bind user does not exist, bind should fail with error %s' % ldap.INVALID_CREDENTIALS.__name__) + log.info('Bind as {%s,%s} who does not exist.' % (BOGUSDN, 'bogus')) + try: + topology_st.standalone.simple_bind_s(BOGUSDN, 'bogus') + except ldap.LDAPError as e: + log.info("Exception (expected): %s" % type(e).__name__) + log.info('Desc {}'.format(get_ldap_error_msg(e,'desc'))) + assert isinstance(e, ldap.INVALID_CREDENTIALS) + regex = re.compile('No such entry') + cause = pattern_accesslog(file_obj, regex) + if cause is None: + log.fatal('Cause not found - %s' % cause) + assert False + else: + log.info('Cause found - %s' % cause) + time.sleep(1) + + log.info( + 'Bind case 2-2. the bind user\'s suffix does not exist, bind should fail with error %s' % ldap.INVALID_CREDENTIALS.__name__) + log.info('Bind as {%s,%s} who does not exist.' % (BOGUSSUFFIX, 'bogus')) + with pytest.raises(ldap.INVALID_CREDENTIALS): + topology_st.standalone.simple_bind_s(BOGUSSUFFIX, 'bogus') + regex = re.compile('No suffix for bind') + cause = pattern_accesslog(file_obj, regex) + if cause is None: + log.fatal('Cause not found - %s' % cause) + assert False + else: + log.info('Cause found - %s' % cause) + time.sleep(1) + + log.info( + 'Bind case 2-3. the bind user\'s password is wrong, bind should fail with error %s' % ldap.INVALID_CREDENTIALS.__name__) + log.info('Bind as {%s,%s} who does not exist.' % (BINDDN, 'bogus')) + try: + topology_st.standalone.simple_bind_s(BINDDN, 'bogus') + except ldap.LDAPError as e: + log.info("Exception (expected): %s" % type(e).__name__) + log.info('Desc {}'.format(get_ldap_error_msg(e,'desc'))) + assert isinstance(e, ldap.INVALID_CREDENTIALS) + regex = re.compile('Invalid credentials') + cause = pattern_accesslog(file_obj, regex) + if cause is None: + log.fatal('Cause not found - %s' % cause) + assert False + else: + log.info('Cause found - %s' % cause) + time.sleep(1) + + log.info('Adding aci for %s to %s.' % (BINDDN, BINDOU)) + acival = '(targetattr="*")(version 3.0; acl "%s"; allow(all) userdn = "ldap:///%s";)' % (BUID, BINDDN) + log.info('aci: %s' % acival) + log.info('Bind as {%s,%s}' % (DN_DM, PASSWORD)) + topology_st.standalone.simple_bind_s(DN_DM, PASSWORD) + topology_st.standalone.modify_s(BINDOU, [(ldap.MOD_ADD, 'aci', ensure_bytes(acival))]) + time.sleep(1) + + log.info('Bind case 3. the bind user has the right to read the entry itself, bind should be successful.') + log.info('Bind as {%s,%s} which should be ok.\n' % (BINDDN, BINDPW)) + topology_st.standalone.simple_bind_s(BINDDN, BINDPW) + + log.info('The following operations are against the subtree the bind user %s has no rights.' % BINDDN) + # Search + exists = True + rc = ldap.SUCCESS + log.info( + 'Search case 1. the bind user has no rights to read the search entry, it should return no search results with %s' % rc) + check_op_result(topology_st.standalone, 'search', TESTDN, None, exists, rc) + + exists = False + rc = ldap.SUCCESS + log.info( + 'Search case 2-1. the search entry does not exist, the search should return no search results with %s' % rc.__name__) + check_op_result(topology_st.standalone, 'search', BOGUSDN, None, exists, rc) + + exists = False + rc = ldap.SUCCESS + log.info( + 'Search case 2-2. the search entry does not exist, the search should return no search results with %s' % rc.__name__) + check_op_result(topology_st.standalone, 'search', BOGUSDN2, None, exists, rc) + + # Add + exists = True + rc = ldap.INSUFFICIENT_ACCESS + log.info( + 'Add case 1. the bind user has no rights AND the adding entry exists, it should fail with %s' % rc.__name__) + check_op_result(topology_st.standalone, 'add', TESTDN, None, exists, rc) + + exists = False + rc = ldap.INSUFFICIENT_ACCESS + log.info( + 'Add case 2-1. the bind user has no rights AND the adding entry does not exist, it should fail with %s' % rc.__name__) + check_op_result(topology_st.standalone, 'add', BOGUSDN, None, exists, rc) + + exists = False + rc = ldap.INSUFFICIENT_ACCESS + log.info( + 'Add case 2-2. the bind user has no rights AND the adding entry does not exist, it should fail with %s' % rc.__name__) + check_op_result(topology_st.standalone, 'add', BOGUSDN2, None, exists, rc) + + # Modify + exists = True + rc = ldap.INSUFFICIENT_ACCESS + log.info( + 'Modify case 1. the bind user has no rights AND the modifying entry exists, it should fail with %s' % rc.__name__) + check_op_result(topology_st.standalone, 'modify', TESTDN, None, exists, rc) + + exists = False + rc = ldap.INSUFFICIENT_ACCESS + log.info( + 'Modify case 2-1. the bind user has no rights AND the modifying entry does not exist, it should fail with %s' % rc.__name__) + check_op_result(topology_st.standalone, 'modify', BOGUSDN, None, exists, rc) + + exists = False + rc = ldap.INSUFFICIENT_ACCESS + log.info( + 'Modify case 2-2. the bind user has no rights AND the modifying entry does not exist, it should fail with %s' % rc.__name__) + check_op_result(topology_st.standalone, 'modify', BOGUSDN2, None, exists, rc) + + # Modrdn + exists = True + rc = ldap.INSUFFICIENT_ACCESS + log.info( + 'Modrdn case 1. the bind user has no rights AND the renaming entry exists, it should fail with %s' % rc.__name__) + check_op_result(topology_st.standalone, 'modrdn', TESTDN, None, exists, rc) + + exists = False + rc = ldap.INSUFFICIENT_ACCESS + log.info( + 'Modrdn case 2-1. the bind user has no rights AND the renaming entry does not exist, it should fail with %s' % rc.__name__) + check_op_result(topology_st.standalone, 'modrdn', BOGUSDN, None, exists, rc) + + exists = False + rc = ldap.INSUFFICIENT_ACCESS + log.info( + 'Modrdn case 2-2. the bind user has no rights AND the renaming entry does not exist, it should fail with %s' % rc.__name__) + check_op_result(topology_st.standalone, 'modrdn', BOGUSDN2, None, exists, rc) + + exists = True + rc = ldap.INSUFFICIENT_ACCESS + log.info( + 'Modrdn case 3. the bind user has no rights AND the node moving an entry to exists, it should fail with %s' % rc.__name__) + check_op_result(topology_st.standalone, 'modrdn', TESTDN, GROUPOU, exists, rc) + + exists = False + rc = ldap.INSUFFICIENT_ACCESS + log.info( + 'Modrdn case 4-1. the bind user has no rights AND the node moving an entry to does not, it should fail with %s' % rc.__name__) + check_op_result(topology_st.standalone, 'modrdn', TESTDN, BOGUSOU, exists, rc) + + exists = False + rc = ldap.INSUFFICIENT_ACCESS + log.info( + 'Modrdn case 4-2. the bind user has no rights AND the node moving an entry to does not, it should fail with %s' % rc.__name__) + check_op_result(topology_st.standalone, 'modrdn', TESTDN, BOGUSOU, exists, rc) + + # Delete + exists = True + rc = ldap.INSUFFICIENT_ACCESS + log.info( + 'Delete case 1. the bind user has no rights AND the deleting entry exists, it should fail with %s' % rc.__name__) + check_op_result(topology_st.standalone, 'delete', TESTDN, None, exists, rc) + + exists = False + rc = ldap.INSUFFICIENT_ACCESS + log.info( + 'Delete case 2-1. the bind user has no rights AND the deleting entry does not exist, it should fail with %s' % rc.__name__) + check_op_result(topology_st.standalone, 'delete', BOGUSDN, None, exists, rc) + + exists = False + rc = ldap.INSUFFICIENT_ACCESS + log.info( + 'Delete case 2-2. the bind user has no rights AND the deleting entry does not exist, it should fail with %s' % rc.__name__) + check_op_result(topology_st.standalone, 'delete', BOGUSDN2, None, exists, rc) + + log.info('EXTRA: Check no regressions') + log.info('Adding aci for %s to %s.' % (BINDDN, DEFAULT_SUFFIX)) + acival = '(targetattr="*")(version 3.0; acl "%s-all"; allow(all) userdn = "ldap:///%s";)' % (BUID, BINDDN) + log.info('Bind as {%s,%s}' % (DN_DM, PASSWORD)) + topology_st.standalone.simple_bind_s(DN_DM, PASSWORD) + topology_st.standalone.modify_s(DEFAULT_SUFFIX, [(ldap.MOD_ADD, 'aci', ensure_bytes(acival))]) + time.sleep(1) + + log.info('Bind as {%s,%s}.' % (BINDDN, BINDPW)) + try: + topology_st.standalone.simple_bind_s(BINDDN, BINDPW) + except ldap.LDAPError as e: + log.info('Desc {}'.format(get_ldap_error_msg(e,'desc'))) + assert False + time.sleep(1) + + exists = False + rc = ldap.NO_SUCH_OBJECT + log.info('Search case. the search entry does not exist, the search should fail with %s' % rc.__name__) + check_op_result(topology_st.standalone, 'search', BOGUSDN2, None, exists, rc) + file_obj.close() + + exists = True + rc = ldap.ALREADY_EXISTS + log.info('Add case. the adding entry already exists, it should fail with %s' % rc.__name__) + check_op_result(topology_st.standalone, 'add', TESTDN, None, exists, rc) + + exists = False + rc = ldap.NO_SUCH_OBJECT + log.info('Modify case. the modifying entry does not exist, it should fail with %s' % rc.__name__) + check_op_result(topology_st.standalone, 'modify', BOGUSDN, None, exists, rc) + + exists = False + rc = ldap.NO_SUCH_OBJECT + log.info('Modrdn case 1. the renaming entry does not exist, it should fail with %s' % rc.__name__) + check_op_result(topology_st.standalone, 'modrdn', BOGUSDN, None, exists, rc) + + exists = False + rc = ldap.NO_SUCH_OBJECT + log.info('Modrdn case 2. the node moving an entry to does not, it should fail with %s' % rc.__name__) + check_op_result(topology_st.standalone, 'modrdn', TESTDN, BOGUSOU, exists, rc) + + exists = False + rc = ldap.NO_SUCH_OBJECT + log.info('Delete case. the deleting entry does not exist, it should fail with %s' % rc.__name__) + check_op_result(topology_st.standalone, 'delete', BOGUSDN, None, exists, rc) + + log.info('Inactivate %s' % BINDDN) + if ds_paths.version < '1.3': + nsinactivate = '%s/ns-inactivate.pl' % inst_dir + cli_cmd = [nsinactivate, '-D', DN_DM, '-w', PASSWORD, '-I', BINDDN] + else: + dsidm = '%s/dsidm' % ds_paths.sbin_dir + cli_cmd = [dsidm, SERVERID_STANDALONE, '-b', DEFAULT_SUFFIX, 'account', 'lock', BINDDN] + log.info(cli_cmd) + p = Popen(cli_cmd) + assert (p.wait() == 0) + + log.info('Bind as {%s,%s} which should fail with %s.' % (BINDDN, BUID, ldap.UNWILLING_TO_PERFORM.__name__)) + try: + topology_st.standalone.simple_bind_s(BINDDN, BUID) + except ldap.LDAPError as e: + log.info("Exception (expected): %s" % type(e).__name__) + log.info('Desc {}'.format(get_ldap_error_msg(e,'desc'))) + assert isinstance(e, ldap.UNWILLING_TO_PERFORM) + + log.info('Bind as {%s,%s} which should fail with %s.' % (BINDDN, 'bogus', ldap.UNWILLING_TO_PERFORM.__name__)) + try: + topology_st.standalone.simple_bind_s(BINDDN, 'bogus') + except ldap.LDAPError as e: + log.info("Exception (expected): %s" % type(e).__name__) + log.info('Desc {}'.format(get_ldap_error_msg(e,'desc'))) + assert isinstance(e, ldap.UNWILLING_TO_PERFORM) + + log.info('SUCCESS') + + +if __name__ == '__main__': + # Run isolated + # -s for DEBUG mode + CURRENT_FILE = os.path.realpath(__file__) + pytest.main("-s %s" % CURRENT_FILE) + diff --git a/dirsrvtests/tests/suites/acl/roledn_test.py b/dirsrvtests/tests/suites/acl/roledn_test.py new file mode 100644 index 0000000..6ccd652 --- /dev/null +++ b/dirsrvtests/tests/suites/acl/roledn_test.py @@ -0,0 +1,274 @@ +# --- BEGIN COPYRIGHT BLOCK --- +# Copyright (C) 2019 Red Hat, Inc. +# All rights reserved. +# +# License: GPL (version 3 or any later version). +# See LICENSE for details. +# --- END COPYRIGHT BLOCK ---- + +""" +This script will test different type of roles. +""" + +import os +import pytest + +from lib389._constants import DEFAULT_SUFFIX, PW_DM +from lib389.idm.user import UserAccounts, UserAccount +from lib389.idm.organizationalunit import OrganizationalUnits +from lib389.topologies import topology_st as topo +from lib389.idm.domain import Domain +from lib389.idm.role import NestedRoles, ManagedRoles, FilteredRoles +from lib389.idm.account import Anonymous + +import ldap + + +pytestmark = pytest.mark.tier1 + + +OU_ROLE = f"ou=roledntest,{DEFAULT_SUFFIX}" +STEVE_ROLE = f"uid=STEVE_ROLE,{OU_ROLE}" +HARRY_ROLE = f"uid=HARRY_ROLE,{OU_ROLE}" +MARY_ROLE = f"uid=MARY_ROLE,{OU_ROLE}" +ROLE1 = f"cn=ROLE1,{OU_ROLE}" +ROLE2 = f"cn=ROLE2,{OU_ROLE}" +ROLE3 = f"cn=ROLE3,{OU_ROLE}" +ROLE21 = f"cn=ROLE21,{OU_ROLE}" +ROLE31 = f"cn=ROLE31,{OU_ROLE}" +FILTERROLE = f"cn=FILTERROLE,{OU_ROLE}" +JOE_ROLE = f"uid=JOE_ROLE,{OU_ROLE}" +NOROLEUSER = f"uid=NOROLEUSER,{OU_ROLE}" +SCRACHENTRY = f"uid=SCRACHENTRY,{OU_ROLE}" +ALL_ACCESS = f"uid=all access,{OU_ROLE}" +NOT_RULE_ACCESS = f"uid=not rule access,{OU_ROLE}" +OR_RULE_ACCESS = f"uid=or rule access,{OU_ROLE}" +NESTED_ROLE_TESTER = f"uid=nested role tester,{OU_ROLE}" + + +@pytest.fixture(scope="function") +def _aci_of_user(request, topo): + """ + Removes and Restores ACIs after the test. + """ + aci_list = Domain(topo.standalone, DEFAULT_SUFFIX).get_attr_vals_utf8('aci') + + def finofaci(): + """ + Removes and Restores ACIs after the test. + """ + domain = Domain(topo.standalone, DEFAULT_SUFFIX) + domain.remove_all('aci') + for i in aci_list: + domain.add("aci", i) + + request.addfinalizer(finofaci) + + +@pytest.fixture(scope="module") +def _add_user(request, topo): + """ + A Function that will create necessary users delete the created user + """ + ous = OrganizationalUnits(topo.standalone, DEFAULT_SUFFIX) + ou_ou = ous.create(properties={'ou': 'roledntest'}) + ou_ou.set('aci', [f'(target="ldap:///{NESTED_ROLE_TESTER}")(targetattr="*") ' + f'(version 3.0; aci "nested role aci"; allow(all)' + f'roledn = "ldap:///{ROLE2}";)', + f'(target="ldap:///{OR_RULE_ACCESS}")(targetattr="*")' + f'(version 3.0; aci "or role aci"; allow(all) ' + f'roledn = "ldap:///{ROLE1} || ldap:///{ROLE21}";)', + f'(target="ldap:///{ALL_ACCESS}")(targetattr="*")' + f'(version 3.0; aci "anyone role aci"; allow(all) ' + f'roledn = "ldap:///anyone";)', + f'(target="ldap:///{NOT_RULE_ACCESS}")(targetattr="*")' + f'(version 3.0; aci "not role aci"; allow(all)' + f'roledn != "ldap:///{ROLE1} || ldap:///{ROLE21}";)']) + + nestedroles = NestedRoles(topo.standalone, OU_ROLE) + for i in [('role2', [ROLE1, ROLE21]), ('role3', [ROLE2, ROLE31])]: + nestedroles.create(properties={'cn': i[0], + 'nsRoleDN': i[1]}) + + managedroles = ManagedRoles(topo.standalone, OU_ROLE) + for i in ['ROLE1', 'ROLE21', 'ROLE31']: + managedroles.create(properties={'cn': i}) + + filterroles = FilteredRoles(topo.standalone, OU_ROLE) + filterroles.create(properties={'cn': 'filterRole', + 'nsRoleFilter': 'sn=Dr Drake', + 'description': 'filter role tester'}) + + users = UserAccounts(topo.standalone, OU_ROLE, rdn=None) + for i in [('STEVE_ROLE', ROLE1, 'Has roles 1, 2 and 3.'), + ('HARRY_ROLE', ROLE21, 'Has roles 21, 2 and 3.'), + ('MARY_ROLE', ROLE31, 'Has roles 31 and 3.')]: + users.create(properties={ + 'uid': i[0], + 'cn': i[0], + 'sn': 'user', + 'uidNumber': '1000', + 'gidNumber': '2000', + 'homeDirectory': '/home/' + i[0], + 'userPassword': PW_DM, + 'nsRoleDN': i[1], + 'Description': i[2] + }) + + for i in [('JOE_ROLE', 'Has filterRole.'), + ('NOROLEUSER', 'Has no roles.'), + ('SCRACHENTRY', 'Entry to test rights on.'), + ('all access', 'Everyone has acccess (incl anon).'), + ('not rule access', 'Only accessible to mary.'), + ('or rule access', 'Only to steve and harry but nbot mary or anon'), + ('nested role tester', 'Only accessible to harry and steve.')]: + users.create(properties={ + 'uid': i[0], + 'cn': i[0], + 'sn': 'user', + 'uidNumber': '1000', + 'gidNumber': '2000', + 'homeDirectory': '/home/' + i[0], + 'userPassword': PW_DM, + 'Description': i[1] + }) + + # Setting SN for user JOE + UserAccount(topo.standalone, f'uid=JOE_ROLE,ou=roledntest,{DEFAULT_SUFFIX}').set('sn', 'Dr Drake') + + def fin(): + """ + It will delete the created users + """ + for i in users.list() + managedroles.list() + nestedroles.list(): + i.delete() + + request.addfinalizer(fin) + + +@pytest.mark.parametrize("user,entry", [ + (STEVE_ROLE, NESTED_ROLE_TESTER), + (HARRY_ROLE, NESTED_ROLE_TESTER), + (MARY_ROLE, NOT_RULE_ACCESS), + (STEVE_ROLE, OR_RULE_ACCESS), + (HARRY_ROLE, OR_RULE_ACCESS), + (STEVE_ROLE, ALL_ACCESS), + (HARRY_ROLE, ALL_ACCESS), + (MARY_ROLE, ALL_ACCESS), +], ids=[ + "(STEVE_ROLE, NESTED_ROLE_TESTER)", + "(HARRY_ROLE, NESTED_ROLE_TESTER)", + "(MARY_ROLE, NOT_RULE_ACCESS)", + "(STEVE_ROLE, OR_RULE_ACCESS)", + "(HARRY_ROLE, OR_RULE_ACCESS)", + "(STEVE_ROLE, ALL_ACCESS)", + "(HARRY_ROLE, ALL_ACCESS)", + "(MARY_ROLE, ALL_ACCESS)", +]) +def test_mod_seealso_positive(topo, _add_user, _aci_of_user, user, entry): + """ + Testing the roledn keyword that allows access control + based on the role of the bound user. + + :id: a33c5d6a-79f4-11e8-8551-8c16451d917b + :parametrized: yes + :setup: Standalone server + :steps: + 1. Add test entry + 2. Add ACI + 3. User should follow ACI role + :expectedresults: + 1. Entry should be added + 2. Operation should succeed + 3. Operation should succeed + """ + conn = UserAccount(topo.standalone, user).bind(PW_DM) + UserAccount(conn, entry).replace('seeAlso', 'cn=1') + + +@pytest.mark.parametrize( + "user,entry", [ + (MARY_ROLE, NESTED_ROLE_TESTER), + (STEVE_ROLE, NOT_RULE_ACCESS), + (HARRY_ROLE, NOT_RULE_ACCESS), + (MARY_ROLE, OR_RULE_ACCESS), + ], ids=[ + "(MARY_ROLE, NESTED_ROLE_TESTER)", + "(STEVE_ROLE, NOT_RULE_ACCESS)", + "(HARRY_ROLE, NOT_RULE_ACCESS)", + "(MARY_ROLE , OR_RULE_ACCESS)"] +) +def test_mod_seealso_negative(topo, _add_user, _aci_of_user, user, entry): + """ + Testing the roledn keyword that do not allows access control + based on the role of the bound user. + + :id: b2444aa2-79f4-11e8-a2c3-8c16451d917b + :parametrized: yes + :setup: Standalone server + :steps: + 1. Add test entry + 2. Add ACI + 3. User should follow ACI role + :expectedresults: + 1. Entry should be added + 2. Operation should succeed + 3. Operation should succeed + """ + conn = UserAccount(topo.standalone, user).bind(PW_DM) + user = UserAccount(conn, entry) + with pytest.raises(ldap.INSUFFICIENT_ACCESS): + user.replace('seeAlso', 'cn=1') + + +@pytest.mark.parametrize("entry", [NOT_RULE_ACCESS, ALL_ACCESS], + ids=["NOT_RULE_ACCESS", "ALL_ACCESS"]) +def test_mod_anonseealso_positive(topo, _add_user, _aci_of_user, entry): + """ + Testing the roledn keyword that allows access control + based on the role of the bound user. + + :id: c3eb41ac-79f4-11e8-aa8b-8c16451d917b + :parametrized: yes + :setup: Standalone server + :steps: + 1. Add test entry + 2. Add ACI + 3. User should follow ACI role + :expectedresults: + 1. Entry should be added + 2. Operation should succeed + 3. Operation should succeed + """ + conn = Anonymous(topo.standalone).bind() + UserAccount(conn, entry).replace('seeAlso', 'cn=1') + + +@pytest.mark.parametrize("entry", [NESTED_ROLE_TESTER, OR_RULE_ACCESS], + ids=["NESTED_ROLE_TESTER", "OR_RULE_ACCESS"]) +def test_mod_anonseealso_negaive(topo, _add_user, _aci_of_user, entry): + """ + Testing the roledn keyword that do not allows access control + based on the role of the bound user. + + :id: d385611a-79f4-11e8-adc8-8c16451d917b + :parametrized: yes + :setup: Standalone Instance + :steps: + 1. Add test entry + 2. Add ACI + 3. User should follow ACI role + :expectedresults: + 1. Entry should be added + 2. Operation should succeed + 3. Operation should succeed + """ + conn = Anonymous(topo.standalone).bind() + user = UserAccount(conn, entry) + with pytest.raises(ldap.INSUFFICIENT_ACCESS): + user.replace('seeAlso', 'cn=1') + + +if __name__ == "__main__": + CURRENT_FILE = os.path.realpath(__file__) + pytest.main("-s -v %s" % CURRENT_FILE) diff --git a/dirsrvtests/tests/suites/acl/search_real_part2_test.py b/dirsrvtests/tests/suites/acl/search_real_part2_test.py new file mode 100644 index 0000000..312c5ae --- /dev/null +++ b/dirsrvtests/tests/suites/acl/search_real_part2_test.py @@ -0,0 +1,474 @@ +# --- BEGIN COPYRIGHT BLOCK --- +# Copyright (C) 2020 Red Hat, Inc. +# All rights reserved. +# +# License: GPL (version 3 or any later version). +# See LICENSE for details. +# --- END COPYRIGHT BLOCK ---- + +import pytest, os, ldap +from lib389._constants import DEFAULT_SUFFIX, PW_DM, ErrorLog +from lib389.idm.user import UserAccount, UserAccounts +from lib389.idm.account import Accounts +from lib389.idm.organizationalunit import OrganizationalUnits +from lib389.topologies import topology_st as topo +from lib389.idm.domain import Domain + +pytestmark = pytest.mark.tier1 + +CONTAINER_1_DELADD = "ou=Product Development,{}".format(DEFAULT_SUFFIX) +CONTAINER_2_DELADD = "ou=Accounting,{}".format(DEFAULT_SUFFIX) +USER_ANUJ = "uid=Anuj Borah,{}".format(CONTAINER_1_DELADD) +USER_ANANDA = "uid=Ananda Borah,{}".format(CONTAINER_2_DELADD) + + +@pytest.fixture(scope="function") +def aci_of_user(request, topo): + # Add anonymous access aci + ACI_TARGET = "(targetattr != \"userpassword\")(target = \"ldap:///%s\")" % (DEFAULT_SUFFIX) + ACI_ALLOW = "(version 3.0; acl \"Anonymous Read access\"; allow (read,search,compare)" + ACI_SUBJECT = "(userdn=\"ldap:///anyone\");)" + ANON_ACI = ACI_TARGET + ACI_ALLOW + ACI_SUBJECT + suffix = Domain(topo.standalone, DEFAULT_SUFFIX) + try: + suffix.add('aci', ANON_ACI) + except ldap.TYPE_OR_VALUE_EXISTS: + pass + + aci_list = Domain(topo.standalone, DEFAULT_SUFFIX).get_attr_vals('aci') + + def finofaci(): + domain = Domain(topo.standalone, DEFAULT_SUFFIX) + domain.set('aci', None) + for i in aci_list: + domain.add("aci", i) + pass + + request.addfinalizer(finofaci) + + +@pytest.fixture(scope="module") +def add_test_user(request, topo): + topo.standalone.config.loglevel((ErrorLog.ACL_SUMMARY,)) + + ous = OrganizationalUnits(topo.standalone, DEFAULT_SUFFIX) + for i in ['Product Development', 'Accounting']: + ous.create(properties={'ou': i}) + + users = UserAccounts(topo.standalone, DEFAULT_SUFFIX, rdn='ou=Product Development') + users.create(properties={ + 'uid': 'Anuj Borah', + 'cn': 'Anuj Borah', + 'sn': 'user', + 'uidNumber': '1000', + 'gidNumber': '2000', + 'homeDirectory': '/home/' + 'AnujBorah', + 'userPassword': PW_DM + }) + + users = UserAccounts(topo.standalone, DEFAULT_SUFFIX, rdn='ou=Accounting') + users.create(properties={ + 'uid': 'Ananda Borah', + 'cn': 'Ananda Borah', + 'sn': 'user', + 'uidNumber': '1000', + 'gidNumber': '2000', + 'homeDirectory': '/home/' + 'AnandaBorah', + 'userPassword': PW_DM + }) + + +def test_deny_all_access_with__target_set_on_non_leaf(topo, add_test_user, aci_of_user): + """Search Test 11 Deny all access with != target set on non-leaf + + :id: f1c5d72a-6e11-11e8-aa9d-8c16451d917b + :setup: Standalone Instance + :steps: + 1. Add Entry + 2. Add ACI + 3. Bind with test USER_ANUJ + 4. Try search + 5. Delete Entry,test USER_ANUJ, ACI + :expectedresults: + 1. Operation should success + 2. Operation should success + 3. Operation should success + 4. Operation should Fail + 5. Operation should success + """ + ACI_TARGET = "(target != ldap:///{})(targetattr=\"*\")".format(CONTAINER_2_DELADD) + ACI_ALLOW = '(version 3.0; acl "Name of the ACI"; deny absolute (all)' + ACI_SUBJECT = 'userdn="ldap:///anyone";)' + ACI_BODY = ACI_TARGET + ACI_ALLOW + ACI_SUBJECT + Domain(topo.standalone, DEFAULT_SUFFIX).add("aci", ACI_BODY) + conn = UserAccount(topo.standalone, USER_ANANDA).bind(PW_DM) + # After binding with USER_ANANDA , aci will limit the search to itself + assert 1 == len(Accounts(conn, DEFAULT_SUFFIX).filter('(cn=*)')) + conn = UserAccount(topo.standalone, USER_ANUJ).bind(PW_DM) + # After binding with USER_ANUJ , aci will limit the search to itself + assert 1 == len(Accounts(conn, DEFAULT_SUFFIX).filter('(cn=*)')) + # After binding with root , the actual number of users will be given + assert 4 == len(Accounts(topo.standalone, DEFAULT_SUFFIX).filter('(cn=*)')) + + +def test_deny_all_access_with__target_set_on_wildcard_non_leaf( + topo, add_test_user, aci_of_user +): + """Search Test 12 Deny all access with != target set on wildcard non-leaf + + :id: 02f34640-6e12-11e8-a382-8c16451d917b + :setup: Standalone Instance + :steps: + 1. Add Entry + 2. Add ACI + 3. Bind with test USER_ANUJ + 4. Try search + 5. Delete Entry,test USER_ANUJ, ACI + :expectedresults: + 1. Operation should success + 2. Operation should success + 3. Operation should success + 4. Operation should Fail + 5. Operation should success + """ + ACI_TARGET = "(target != ldap:///ou=Product*,{})(targetattr=\"*\")".format( + DEFAULT_SUFFIX) + ACI_ALLOW = '(version 3.0; acl "Name of the ACI"; deny absolute (all)' + ACI_SUBJECT = 'userdn="ldap:///anyone";)' + ACI_BODY = ACI_TARGET + ACI_ALLOW + ACI_SUBJECT + Domain(topo.standalone, DEFAULT_SUFFIX).add("aci", ACI_BODY) + conn = UserAccount(topo.standalone, USER_ANANDA).bind(PW_DM) + # aci will limit the search to ou=Product it will block others + assert 1 == len(Accounts(conn, DEFAULT_SUFFIX).filter('(cn=*)')) + conn = UserAccount(topo.standalone, USER_ANUJ).bind(PW_DM) + # aci will limit the search to ou=Product it will block others + assert 1 == len(Accounts(conn, DEFAULT_SUFFIX).filter('(cn=*)')) + # with root , aci will give actual no of users , without any limit. + assert 4 == len(Accounts(topo.standalone, DEFAULT_SUFFIX).filter('(cn=*)')) + + +def test_deny_all_access_with__target_set_on_wildcard_leaf( + topo, add_test_user, aci_of_user +): + """Search Test 13 Deny all access with != target set on wildcard leaf + + :id: 16c54d76-6e12-11e8-b5ba-8c16451d917b + :setup: Standalone Instance + :steps: + 1. Add Entry + 2. Add ACI + 3. Bind with test USER_ANUJ + 4. Try search + 5. Delete Entry,test USER_ANUJ, ACI + :expectedresults: + 1. Operation should success + 2. Operation should success + 3. Operation should success + 4. Operation should Fail + 5. Operation should success + """ + ACI_TARGET = "(target != ldap:///uid=Anuj*, ou=*,{})(targetattr=\"*\")".format( + DEFAULT_SUFFIX) + ACI_ALLOW = '(version 3.0; acl "Name of the ACI"; deny absolute (all)' + ACI_SUBJECT = 'userdn="ldap:///anyone";)' + ACI_BODY = ACI_TARGET + ACI_ALLOW + ACI_SUBJECT + Domain(topo.standalone, DEFAULT_SUFFIX).add("aci", ACI_BODY) + conn = UserAccount(topo.standalone, USER_ANANDA).bind(PW_DM) + # aci will limit the search to cn=Jeff it will block others + assert 1 == len(Accounts(conn, DEFAULT_SUFFIX).filter('(cn=*)')) + conn = UserAccount(topo.standalone, USER_ANUJ).bind(PW_DM) + # aci will limit the search to cn=Jeff it will block others + assert 1 == len(Accounts(conn, DEFAULT_SUFFIX).filter('(cn=*)')) + # with root there is no aci blockage + assert 4 == len(Accounts(topo.standalone, DEFAULT_SUFFIX).filter('(cn=*)')) + + +def test_deny_all_access_with_targetfilter_using_equality_search( + topo, add_test_user, aci_of_user +): + """Search Test 14 Deny all access with targetfilter using equality search + + :id: 27255e04-6e12-11e8-8e35-8c16451d917b + :setup: Standalone Instance + :steps: + 1. Add Entry + 2. Add ACI + 3. Bind with test USER_ANUJ + 4. Try search + 5. Delete Entry,test USER_ANUJ, ACI + :expectedresults: + 1. Operation should success + 2. Operation should success + 3. Operation should success + 4. Operation should Fail + 5. Operation should success + """ + ACI_TARGET = '(targetfilter ="(uid=Anuj Borah)")(target = ldap:///{})(targetattr="*")'.format( + DEFAULT_SUFFIX) + ACI_ALLOW = '(version 3.0; acl "Name of the ACI"; deny absolute (all)' + ACI_SUBJECT = 'userdn="ldap:///anyone";)' + ACI_BODY = ACI_TARGET + ACI_ALLOW + ACI_SUBJECT + Domain(topo.standalone, DEFAULT_SUFFIX).add("aci", ACI_BODY) + conn = UserAccount(topo.standalone, USER_ANANDA).bind(PW_DM) + # aci will block the search to cn=Jeff + assert 0 == len(Accounts(conn, DEFAULT_SUFFIX).filter('(uid=Anuj Borah)')) + conn = UserAccount(topo.standalone, USER_ANUJ).bind(PW_DM) + # aci will block the search to cn=Jeff + assert 0 == len(Accounts(conn, DEFAULT_SUFFIX).filter('(uid=Anuj Borah)')) + # with root there is no blockage + assert 1 == len(Accounts(topo.standalone, DEFAULT_SUFFIX).filter('(uid=Anuj Borah)')) + + +def test_deny_all_access_with_targetfilter_using_equality_search_two( + topo, add_test_user, aci_of_user +): + """Test that Search Test 15 Deny all access with targetfilter using != equality search + + :id: 3966bcd4-6e12-11e8-83ce-8c16451d917b + :setup: Standalone Instance + :steps: + 1. Add Entry + 2. Add ACI + 3. Bind with test USER_ANUJ + 4. Try search + 5. Delete Entry,test USER_ANUJ, ACI + :expectedresults: + 1. Operation should success + 2. Operation should success + 3. Operation should success + 4. Operation should Fail + 5. Operation should success + """ + ACI_TARGET = '(targetfilter !="(uid=Anuj Borah)")(target = ldap:///{})(targetattr="*")'.format( + DEFAULT_SUFFIX) + ACI_ALLOW = '(version 3.0; acl "Name of the ACI"; deny absolute (all)' + ACI_SUBJECT = 'userdn="ldap:///anyone";)' + ACI_BODY = ACI_TARGET + ACI_ALLOW + ACI_SUBJECT + Domain(topo.standalone, DEFAULT_SUFFIX).add("aci", ACI_BODY) + conn = UserAccount(topo.standalone, USER_ANANDA).bind(PW_DM) + # aci will limit the search to cn=Jeff it will block others + assert 1 == len(Accounts(conn, DEFAULT_SUFFIX).filter('(cn=*)')) + conn = UserAccount(topo.standalone, USER_ANUJ).bind(PW_DM) + # aci will limit the search to cn=Jeff it will block others + assert 1 == len(Accounts(conn, DEFAULT_SUFFIX).filter('(cn=*)')) + # with root there is no blockage + assert 4 == len(Accounts(topo.standalone, DEFAULT_SUFFIX).filter('(cn=*)')) + + +def test_deny_all_access_with_targetfilter_using_substring_search( + topo, add_test_user, aci_of_user +): + """Test that Search Test 16 Deny all access with targetfilter using substring search + + :id: 44d7b4ba-6e12-11e8-b420-8c16451d917b + :setup: Standalone Instance + :steps: + 1. Add Entry + 2. Add ACI + 3. Bind with test USER_ANUJ + 4. Try search + 5. Delete Entry,test USER_ANUJ, ACI + :expectedresults: + 1. Operation should success + 2. Operation should success + 3. Operation should success + 4. Operation should Fail + 5. Operation should success + """ + ACI_TARGET = '(targetfilter ="(uid=Anu*)")(target = ldap:///{})(targetattr="*")'.format( + DEFAULT_SUFFIX) + ACI_ALLOW = '(version 3.0; acl "Name of the ACI"; deny absolute (all)' + ACI_SUBJECT = 'userdn="ldap:///anyone";)' + ACI_BODY = ACI_TARGET + ACI_ALLOW + ACI_SUBJECT + Domain(topo.standalone, DEFAULT_SUFFIX).add("aci", ACI_BODY) + conn = UserAccount(topo.standalone, USER_ANANDA).bind(PW_DM) + # aci block anything cn=j* + assert 0 == len(Accounts(conn, DEFAULT_SUFFIX).filter('(cn=Anu*)')) + conn = UserAccount(topo.standalone, USER_ANUJ).bind(PW_DM) + # aci block anything cn=j* + assert 0 == len(Accounts(conn, DEFAULT_SUFFIX).filter('(cn=Anu*)')) + # with root there is no blockage + assert 1 == len(Accounts(topo.standalone, DEFAULT_SUFFIX).filter('(cn=Anu*)')) + + +def test_deny_all_access_with_targetfilter_using_substring_search_two( + topo, add_test_user, aci_of_user +): + """Test that Search Test 17 Deny all access with targetfilter using != substring search + + :id: 55b12d98-6e12-11e8-8cf4-8c16451d917b + :setup: Standalone Instance + :steps: + 1. Add Entry + 2. Add ACI + 3. Bind with test USER_ANUJ + 4. Try search + 5. Delete Entry,test USER_ANUJ, ACI + :expectedresults: + 1. Operation should success + 2. Operation should success + 3. Operation should success + 4. Operation should Fail + 5. Operation should success + """ + ACI_TARGET = '(targetfilter !="(uid=Anu*)")(target = ldap:///{})(targetattr="*")'.format( + DEFAULT_SUFFIX + ) + ACI_ALLOW = '(version 3.0; acl "Name of the ACI"; deny (all)' + ACI_SUBJECT = 'userdn="ldap:///anyone";)' + ACI_BODY = ACI_TARGET + ACI_ALLOW + ACI_SUBJECT + Domain(topo.standalone, DEFAULT_SUFFIX).add("aci", ACI_BODY) + conn = UserAccount(topo.standalone, USER_ANANDA).bind(PW_DM) + # aci allow anything cn=j*, it will block others + assert 1 == len(Accounts(conn, DEFAULT_SUFFIX).filter('(uid=*)')) + conn = UserAccount(topo.standalone, USER_ANUJ).bind(PW_DM) + # aci allow anything cn=j*, it will block others + assert 1 == len(Accounts(conn, DEFAULT_SUFFIX).filter('(uid=*)')) + # with root there is no blockage + assert 3 == len(Accounts(topo.standalone, DEFAULT_SUFFIX).filter('(uid=*)')) + + +def test_deny_all_access_with_targetfilter_using_boolean_or_of_two_equality_search( + topo, add_test_user, aci_of_user, request +): + """Search Test 18 Deny all access with targetfilter using boolean OR of two equality search + + :id: 29cc35fa-793f-11e8-988f-8c16451d917b + :setup: Standalone Instance + :steps: + 1. Add Entry + 2. Add ACI + 3. Bind with test USER_ANUJ + 4. Try search + 5. Delete Entry,test USER_ANUJ, ACI + :expectedresults: + 1. Operation should success + 2. Operation should success + 3. Operation should success + 4. Operation should Fail + 5. Operation should success + """ + Domain(topo.standalone, DEFAULT_SUFFIX).add("aci",'(target = ldap:///{})(targetattr = "*")' + '(targetfilter = (|(cn=scarter)(cn=jvaughan)))(version 3.0; acl "{}"; ' + 'deny absolute (all) (userdn = "ldap:///anyone") ;)'.format(DEFAULT_SUFFIX, request.node.name)) + UserAccount(topo.standalone, USER_ANANDA).set("cn", "scarter") + UserAccount(topo.standalone, USER_ANUJ).set("cn", "jvaughan") + conn = UserAccount(topo.standalone, USER_ANANDA).bind(PW_DM) + # aci will deny_all_access_with_targetfilter_using_boolean_or_of_two_equality_search + user = UserAccount(conn, USER_ANANDA) + assert len(user.get_attr_val_utf8('uid')) == 0 + # aci will deny_all_access_with_targetfilter_using_boolean_or_of_two_equality_search + user = UserAccount(conn, USER_ANUJ) + assert len(user.get_attr_val_utf8('uid')) == 0 + # with root no blockage + assert UserAccount(topo.standalone, USER_ANANDA).get_attr_val_utf8('uid') == 'Ananda Borah' + # with root no blockage + assert UserAccount(topo.standalone, USER_ANUJ).get_attr_val_utf8('uid') == 'Anuj Borah' + + +def test_deny_all_access_to__userdn_two(topo, add_test_user, aci_of_user): + """Search Test 19 Deny all access to != userdn + + :id: 693496c0-6e12-11e8-80dc-8c16451d917b + :setup: Standalone Instance + :steps: + 1. Add Entry + 2. Add ACI + 3. Bind with test USER_ANUJ + 4. Try search + 5. Delete Entry,test USER_ANUJ, ACI + :expectedresults: + 1. Operation should success + 2. Operation should success + 3. Operation should success + 4. Operation should Fail + 5. Operation should success + """ + ACI_TARGET = "(target = ldap:///{})(targetattr=\"*\")".format(DEFAULT_SUFFIX) + ACI_ALLOW = '(version 3.0; acl "Name of the ACI"; deny absolute (all)' + ACI_SUBJECT = 'userdn!="ldap:///{}";)'.format(USER_ANANDA) + ACI_BODY = ACI_TARGET + ACI_ALLOW + ACI_SUBJECT + Domain(topo.standalone, DEFAULT_SUFFIX).add("aci", ACI_BODY) + conn = UserAccount(topo.standalone, USER_ANANDA).bind(PW_DM) + # aci will not block anything for USER_ANANDA , it block other users + assert 4 == len(Accounts(conn, DEFAULT_SUFFIX).filter('(cn=*)')) + conn = UserAccount(topo.standalone, USER_ANUJ).bind(PW_DM) + # aci will block everything for other users + assert 0 == len(Accounts(conn, DEFAULT_SUFFIX).filter('(cn=*)')) + # with root there is no aci blockage + assert 4 == len(Accounts(topo.standalone, DEFAULT_SUFFIX).filter('(cn=*)')) + + +def test_deny_all_access_with_userdn(topo, add_test_user, aci_of_user): + """Search Test 20 Deny all access with userdn + + :id: 75aada86-6e12-11e8-bd34-8c16451d917b + :setup: Standalone Instance + :steps: + 1. Add Entry + 2. Add ACI + 3. Bind with test USER_ANUJ + 4. Try search + 5. Delete Entry,test USER_ANUJ, ACI + :expectedresults: + 1. Operation should success + 2. Operation should success + 3. Operation should success + 4. Operation should Fail + 5. Operation should success + """ + ACI_TARGET = "(target = ldap:///{})(targetattr=\"*\")".format(DEFAULT_SUFFIX) + ACI_ALLOW = '(version 3.0; acl "Name of the ACI"; deny (all)' + ACI_SUBJECT = 'userdn="ldap:///{}";)'.format(USER_ANANDA) + ACI_BODY = ACI_TARGET + ACI_ALLOW + ACI_SUBJECT + Domain(topo.standalone, DEFAULT_SUFFIX).add("aci", ACI_BODY) + conn = UserAccount(topo.standalone, USER_ANANDA).bind(PW_DM) + # aci will block anything for USER_ANANDA , it not block other users + assert 0 == len(Accounts(conn, DEFAULT_SUFFIX).filter('(cn=*)')) + conn = UserAccount(topo.standalone, USER_ANUJ).bind(PW_DM) + # aci will block anything for other users + assert 4 == len(Accounts(conn, DEFAULT_SUFFIX).filter('(cn=*)')) + # with root thers is no aci blockage + assert 4 == len(Accounts(topo.standalone, DEFAULT_SUFFIX).filter('(cn=*)')) + + +def test_deny_all_access_with_targetfilter_using_presence_search( + topo, add_test_user, aci_of_user +): + """Search Test 21 Deny all access with targetfilter using presence search + + :id: 85244a42-6e12-11e8-9480-8c16451d917b + :setup: Standalone Instance + :steps: + 1. Add Entry + 2. Add ACI + 3. Bind with test USER_ANUJ + 4. Try search + 5. Delete Entry,test USER_ANUJ, ACI + :expectedresults: + 1. Operation should success + 2. Operation should success + 3. Operation should success + 4. Operation should Fail + 5. Operation should success + """ + user = UserAccounts(topo.standalone, DEFAULT_SUFFIX).create_test_user() + user.set('userPassword', PW_DM) + + ACI_TARGET = '(targetfilter ="(cn=*)")(target = ldap:///{})(targetattr="*")'.format( + DEFAULT_SUFFIX) + ACI_ALLOW = '(version 3.0; acl "Name of the ACI"; deny absolute (all)' + ACI_SUBJECT = 'userdn="ldap:///anyone";)' + ACI_BODY = ACI_TARGET + ACI_ALLOW + ACI_SUBJECT + Domain(topo.standalone, DEFAULT_SUFFIX).add("aci", ACI_BODY) + conn = UserAccount(topo.standalone, USER_ANANDA).bind(PW_DM) + # aci will eny_all_access_with_targetfilter_using_presence_search + user = UserAccount(conn, 'uid=test_user_1000,ou=People,{}'.format(DEFAULT_SUFFIX)) + assert len(user.get_attr_val_utf8('cn')) == 0 + # with root no blockage + assert UserAccount(topo.standalone, 'uid=test_user_1000,ou=People,{}'.format(DEFAULT_SUFFIX)).get_attr_val_utf8('cn') == 'test_user_1000' + + +if __name__ == "__main__": + CURRENT_FILE = os.path.realpath(__file__) + pytest.main("-s -v %s" % CURRENT_FILE) diff --git a/dirsrvtests/tests/suites/acl/search_real_part3_test.py b/dirsrvtests/tests/suites/acl/search_real_part3_test.py new file mode 100644 index 0000000..afb9ed7 --- /dev/null +++ b/dirsrvtests/tests/suites/acl/search_real_part3_test.py @@ -0,0 +1,480 @@ +# --- BEGIN COPYRIGHT BLOCK --- +# Copyright (C) 2020 Red Hat, Inc. +# All rights reserved. +# +# License: GPL (version 3 or any later version). +# See LICENSE for details. +# --- END COPYRIGHT BLOCK ---- + +import pytest, os, ldap +from lib389._constants import DEFAULT_SUFFIX, PW_DM, ErrorLog +from lib389.idm.user import UserAccount, UserAccounts +from lib389.idm.organization import Organization +from lib389.idm.account import Accounts, Anonymous +from lib389.idm.group import Group, UniqueGroup +from lib389.idm.organizationalunit import OrganizationalUnit +from lib389.idm.group import Groups +from lib389.topologies import topology_st as topo +from lib389.idm.domain import Domain + +pytestmark = pytest.mark.tier1 + +CONTAINER_1_DELADD = "ou=Product Development,{}".format(DEFAULT_SUFFIX) +CONTAINER_2_DELADD = "ou=Accounting,{}".format(DEFAULT_SUFFIX) +USER_ANUJ = "uid=Anuj Borah,{}".format(CONTAINER_1_DELADD) +USER_ANANDA = "uid=Ananda Borah,{}".format(CONTAINER_2_DELADD) + + +@pytest.fixture(scope="function") +def aci_of_user(request, topo): + # Add anonymous access aci + ACI_TARGET = "(targetattr != \"userpassword\")(target = \"ldap:///%s\")" % (DEFAULT_SUFFIX) + ACI_ALLOW = "(version 3.0; acl \"Anonymous Read access\"; allow (read,search,compare)" + ACI_SUBJECT = "(userdn=\"ldap:///anyone\");)" + ANON_ACI = ACI_TARGET + ACI_ALLOW + ACI_SUBJECT + suffix = Domain(topo.standalone, DEFAULT_SUFFIX) + try: + suffix.add('aci', ANON_ACI) + except ldap.TYPE_OR_VALUE_EXISTS: + pass + + aci_list = suffix.get_attr_vals('aci') + + def finofaci(): + domain = Domain(topo.standalone, DEFAULT_SUFFIX) + domain.set('aci', None) + for i in aci_list: + domain.add("aci", i) + + request.addfinalizer(finofaci) + + +@pytest.fixture(scope="module") +def add_test_user(request, topo): + topo.standalone.config.loglevel((ErrorLog.ACL_SUMMARY,)) + + for i in ['Product Development', 'Accounting']: + OrganizationalUnit(topo.standalone, "ou={},{}".format(i, DEFAULT_SUFFIX)).create(properties={'ou': i}) + + users = UserAccounts(topo.standalone, DEFAULT_SUFFIX, rdn='ou=Product Development') + users.create(properties={ + 'uid': 'Anuj Borah', + 'cn': 'Anuj Borah', + 'sn': 'user', + 'uidNumber': '1000', + 'gidNumber': '2000', + 'homeDirectory': '/home/' + 'AnujBorah', + 'userPassword': PW_DM + }) + + users = UserAccounts(topo.standalone, DEFAULT_SUFFIX, rdn='ou=Accounting') + users.create(properties={ + 'uid': 'Ananda Borah', + 'cn': 'Ananda Borah', + 'sn': 'user', + 'uidNumber': '1000', + 'gidNumber': '2000', + 'homeDirectory': '/home/' + 'AnandaBorah', + 'userPassword': PW_DM + }) + + +def test_deny_search_access_to_userdn_with_ldap_url(topo, add_test_user, aci_of_user): + """Search Test 23 Deny search access to userdn with LDAP URL + + :id: 94f082d8-6e12-11e8-be72-8c16451d917b + :setup: Standalone Instance + :steps: + 1. Add Entry + 2. Add ACI + 3. Bind with test USER_ANUJ + 4. Try search + 5. Delete Entry,test USER_ANUJ, ACI + :expectedresults: + 1. Operation should success + 2. Operation should success + 3. Operation should success + 4. Operation should Fail + 5. Operation should success + """ + ACI_TARGET = '(target = ldap:///{})(targetattr="*")'.format(DEFAULT_SUFFIX) + ACI_ALLOW = '(version 3.0; acl "Name of the ACI"; deny (search)' + ACI_SUBJECT = ( + 'userdn="ldap:///%s";)' % "{}??sub?(&(roomnumber=3445))".format(DEFAULT_SUFFIX) + ) + ACI_BODY = ACI_TARGET + ACI_ALLOW + ACI_SUBJECT + Domain(topo.standalone, DEFAULT_SUFFIX).add("aci", ACI_BODY) + UserAccount(topo.standalone, USER_ANANDA).set('roomnumber', '3445') + conn = UserAccount(topo.standalone, USER_ANANDA).bind(PW_DM) + # aci will block all users having roomnumber=3445 + assert 0 == len(Accounts(conn, DEFAULT_SUFFIX).filter('(cn=*)')) + conn = UserAccount(topo.standalone, USER_ANUJ).bind(PW_DM) + # aci will block roomnumber=3445 for all users USER_ANUJ does not have roomnumber + assert 4 == len(Accounts(conn, DEFAULT_SUFFIX).filter('(cn=*)')) + # with root there is no aci blockage + UserAccount(topo.standalone, USER_ANANDA).remove('roomnumber', '3445') + + +def test_deny_search_access_to_userdn_with_ldap_url_two(topo, add_test_user, aci_of_user): + """Search Test 24 Deny search access to != userdn with LDAP URL + + :id: a1ee05d2-6e12-11e8-8260-8c16451d917b + :setup: Standalone Instance + :steps: + 1. Add Entry + 2. Add ACI + 3. Bind with test USER_ANUJ + 4. Try search + 5. Delete Entry,test USER_ANUJ, ACI + :expectedresults: + 1. Operation should success + 2. Operation should success + 3. Operation should success + 4. Operation should Fail + 5. Operation should success + """ + ACI_TARGET = '(target = ldap:///{})(targetattr="*")'.format(DEFAULT_SUFFIX) + ACI_ALLOW = '(version 3.0; acl "Name of the ACI"; deny (search)' + ACI_SUBJECT = ( + 'userdn != "ldap:///%s";)' % "{}??sub?(&(roomnumber=3445))".format(DEFAULT_SUFFIX) + ) + ACI_BODY = ACI_TARGET + ACI_ALLOW + ACI_SUBJECT + Domain(topo.standalone, DEFAULT_SUFFIX).add("aci", ACI_BODY) + UserAccount(topo.standalone, USER_ANANDA).set('roomnumber', '3445') + conn = UserAccount(topo.standalone, USER_ANANDA).bind(PW_DM) + # aci will not block all users having roomnumber=3445 , it will block others + assert 4 == len(Accounts(conn, DEFAULT_SUFFIX).filter('(cn=*)')) + conn = UserAccount(topo.standalone, USER_ANUJ).bind(PW_DM) + # aci will not block all users having roomnumber=3445 , it will block others + assert 0 == len(Accounts(conn, DEFAULT_SUFFIX).filter('(cn=*)')) + # with root there is no aci blockage + UserAccount(topo.standalone, USER_ANANDA).remove('roomnumber', '3445') + + +def test_deny_search_access_to_userdn_with_ldap_url_matching_all_users( + topo, add_test_user, aci_of_user +): + """Search Test 25 Deny search access to userdn with LDAP URL matching all users + + :id: b37f72ae-6e12-11e8-9c98-8c16451d917b + :setup: Standalone Instance + :steps: + 1. Add Entry + 2. Add ACI + 3. Bind with test USER_ANUJ + 4. Try search + 5. Delete Entry,test USER_ANUJ, ACI + :expectedresults: + 1. Operation should success + 2. Operation should success + 3. Operation should success + 4. Operation should Fail + 5. Operation should success + """ + ACI_TARGET = '(target = ldap:///{})(targetattr="*")'.format(DEFAULT_SUFFIX) + ACI_ALLOW = '(version 3.0; acl "Name of the ACI"; deny (search)' + ACI_SUBJECT = 'userdn = "ldap:///%s";)' % "{}??sub?(&(cn=*))".format(DEFAULT_SUFFIX) + ACI_BODY = ACI_TARGET + ACI_ALLOW + ACI_SUBJECT + Domain(topo.standalone, DEFAULT_SUFFIX).add("aci", ACI_BODY) + conn = UserAccount(topo.standalone, USER_ANANDA).bind(PW_DM) + # aci will block all users LDAP URL matching all users + assert 0 == len(Accounts(conn, DEFAULT_SUFFIX).filter('(cn=*)')) + conn = UserAccount(topo.standalone, USER_ANUJ).bind(PW_DM) + # aci will block all users LDAP URL matching all users + assert 0 == len(Accounts(conn, DEFAULT_SUFFIX).filter('(cn=*)')) + # with root there is no aci blockage + assert 4 == len(Accounts(topo.standalone, DEFAULT_SUFFIX).filter('(cn=*)')) + + +def test_deny_read_access_to_a_dynamic_group(topo, add_test_user, aci_of_user): + """Search Test 26 Deny read access to a dynamic group + + :id: c0c5290e-6e12-11e8-a900-8c16451d917b + :setup: Standalone Instance + :steps: + 1. Add Entry + 2. Add ACI + 3. Bind with test USER_ANUJ + 4. Try search + 5. Delete Entry,test USER_ANUJ, ACI + :expectedresults: + 1. Operation should success + 2. Operation should success + 3. Operation should success + 4. Operation should Fail + 5. Operation should success + """ + groups = Groups(topo.standalone, DEFAULT_SUFFIX) + group_properties = {"cn": "group1", "description": "testgroup"} + group = groups.create(properties=group_properties) + group.add('objectClass', 'groupOfURLS') + group.set('memberURL', "ldap:///{}??sub?(&(ou=Accounting)(cn=Sam*))".format(DEFAULT_SUFFIX)) + group.add_member(USER_ANANDA) + + ACI_TARGET = '(target = ldap:///{})(targetattr = "*")'.format(DEFAULT_SUFFIX) + ACI_ALLOW = '(version 3.0; acl "All rights for %s"; deny(read)' % "Unknown" + ACI_SUBJECT = 'groupdn = "ldap:///cn=group1,ou=Groups,{}";)'.format(DEFAULT_SUFFIX) + ACI_BODY = ACI_TARGET + ACI_ALLOW + ACI_SUBJECT + Domain(topo.standalone, DEFAULT_SUFFIX).add("aci", ACI_BODY) + conn = UserAccount(topo.standalone, USER_ANANDA).bind(PW_DM) + # aci will block all 'memberURL', "ldap:///{}??sub?(&(ou=Accounting)(cn=Sam*))".format(DEFAULT_SUFFIX) + assert 0 == len(Accounts(conn, DEFAULT_SUFFIX).filter('(cn=*)')) + conn = UserAccount(topo.standalone, USER_ANUJ).bind(PW_DM) + # USER_ANUJ is not a member + assert 4 == len(Accounts(conn, DEFAULT_SUFFIX).filter('(cn=*)')) + group.delete() + + +def test_deny_read_access_to_dynamic_group_with_host_port_set_on_ldap_url( + topo, add_test_user, aci_of_user +): + """Search Test 27 Deny read access to dynamic group with host:port set on LDAP URL + + :id: ceb62158-6e12-11e8-8c36-8c16451d917b + :setup: Standalone Instance + :steps: + 1. Add Entry + 2. Add ACI + 3. Bind with test USER_ANUJ + 4. Try search + 5. Delete Entry,test USER_ANUJ, ACI + :expectedresults: + 1. Operation should success + 2. Operation should success + 3. Operation should success + 4. Operation should Fail + 5. Operation should success + """ + groups = Groups(topo.standalone, DEFAULT_SUFFIX) + group = groups.create(properties={"cn": "group1", + "description": "testgroup" + }) + group.add('objectClass', 'groupOfURLS') + group.set('memberURL', "ldap:///localhost:38901/{}??sub?(&(ou=Accounting)(cn=Sam*))".format(DEFAULT_SUFFIX)) + group.add_member(USER_ANANDA) + + ACI_TARGET = '(target = ldap:///{})(targetattr = "*")'.format(DEFAULT_SUFFIX) + ACI_ALLOW = '(version 3.0; acl "All rights for %s"; deny(read)' % "Unknown" + ACI_SUBJECT = 'groupdn = "ldap:///cn=group1,ou=Groups,{}";)'.format(DEFAULT_SUFFIX) + ACI_BODY = ACI_TARGET + ACI_ALLOW + ACI_SUBJECT + Domain(topo.standalone, DEFAULT_SUFFIX).add("aci", ACI_BODY) + conn = UserAccount(topo.standalone, USER_ANANDA).bind(PW_DM) + # aci will block 'memberURL', "ldap:///localhost:38901/dc=example,dc=com??sub?(&(ou=Accounting)(cn=Sam*))" + assert 0 == len(Accounts(conn, DEFAULT_SUFFIX).filter('(cn=*)')) + # with root there is no aci blockage + assert 4 == len(Accounts(topo.standalone, DEFAULT_SUFFIX).filter('(cn=*)')) + group.delete() + + +def test_deny_read_access_to_dynamic_group_with_scope_set_to_one_in_ldap_url( + topo, add_test_user, aci_of_user +): + """Search Test 28 Deny read access to dynamic group with scope set to "one" in LDAP URL + + :id: ddb30432-6e12-11e8-94db-8c16451d917b + :setup: Standalone Instance + :steps: + 1. Add Entry + 2. Add ACI + 3. Bind with test USER_ANUJ + 4. Try search + 5. Delete Entry,test USER_ANUJ, ACI + :expectedresults: + 1. Operation should success + 2. Operation should success + 3. Operation should success + 4. Operation should Fail + 5. Operation should success + """ + groups = Groups(topo.standalone, DEFAULT_SUFFIX) + group = groups.create(properties={"cn": "group1", + "description": "testgroup" + }) + group.add('objectClass', 'groupOfURLS') + group.set('memberURL', "ldap:///{}??sub?(&(ou=Accounting)(cn=Sam*))".format(DEFAULT_SUFFIX)) + group.add_member(USER_ANANDA) + + ACI_TARGET = '(targetattr = "*")' + ACI_ALLOW = '(version 3.0; acl "All rights for %s"; deny(read) ' % "Unknown" + ACI_SUBJECT = 'groupdn != "ldap:///cn=group1,ou=Groups,{}";)'.format(DEFAULT_SUFFIX) + ACI_BODY = ACI_TARGET + ACI_ALLOW + ACI_SUBJECT + Domain(topo.standalone, DEFAULT_SUFFIX).add("aci", ACI_BODY) + conn = UserAccount(topo.standalone, USER_ANANDA).bind(PW_DM) + # aci will allow only 'memberURL', "ldap:///{dc=example,dc=com??sub?(&(ou=Accounting)(cn=Sam*))" + assert 4 == len(Accounts(conn, DEFAULT_SUFFIX).filter('(cn=*)')) + conn = UserAccount(topo.standalone, USER_ANUJ).bind(PW_DM) + # aci will allow only 'memberURL', "ldap:///{dc=example,dc=com??sub?(&(ou=Accounting)(cn=Sam*))" + assert 0 == len(Accounts(conn, DEFAULT_SUFFIX).filter('(cn=*)')) + group.delete() + + +def test_deny_read_access_to_dynamic_group_two(topo, add_test_user, aci_of_user): + """Search Test 29 Deny read access to != dynamic group + + :id: eae2a6c6-6e12-11e8-80f3-8c16451d917b + :setup: Standalone Instance + :steps: + 1. Add Entry + 2. Add ACI + 3. Bind with test USER_ANUJ + 4. Try search + 5. Delete Entry,test USER_ANUJ, ACI + :expectedresults: + 1. Operation should success + 2. Operation should success + 3. Operation should success + 4. Operation should Fail + 5. Operation should success + """ + groups = Groups(topo.standalone, DEFAULT_SUFFIX) + group_properties = {"cn": "group1", + "description": "testgroup" + } + group = groups.create(properties=group_properties) + group.add('objectClass', 'groupofuniquenames') + group.set('uniquemember', [USER_ANANDA,USER_ANUJ]) + + ACI_TARGET = '(targetattr = "*")' + ACI_ALLOW = '(version 3.0; acl "All rights for %s"; deny(read) ' % "Unknown" + ACI_SUBJECT = 'groupdn = "ldap:///cn=group1,ou=Groups,{}";)'.format(DEFAULT_SUFFIX) + ACI_BODY = ACI_TARGET + ACI_ALLOW + ACI_SUBJECT + Domain(topo.standalone, DEFAULT_SUFFIX).add("aci", ACI_BODY) + conn = UserAccount(topo.standalone, USER_ANANDA).bind(PW_DM) + # aci will block groupdn = "ldap:///cn=group1,ou=Groups,dc=example,dc=com";) + assert 0 == len(Accounts(conn, DEFAULT_SUFFIX).filter('(cn=*)')) + conn = UserAccount(topo.standalone, USER_ANUJ).bind(PW_DM) + # aci will block groupdn = "ldap:///cn=group1,ou=Groups,dc=example,dc=com";) + assert 0 == len(Accounts(conn, DEFAULT_SUFFIX).filter('(cn=*)')) + # with root there is no aci blockage + assert 4 == len(Accounts(topo.standalone, DEFAULT_SUFFIX).filter('(cn=*)')) + group.delete() + + +def test_deny_access_to_group_should_deny_access_to_all_uniquemember( + topo, add_test_user, aci_of_user, request +): + """Search Test 38 Deny access to group should deny access to all uniquemember (including chain group) + + :id: 56b470e4-7941-11e8-912b-8c16451d917b + :setup: Standalone Instance + :steps: + 1. Add Entry + 2. Add ACI + 3. Bind with test USER_ANUJ + 4. Try search + 5. Delete Entry,test USER_ANUJ, ACI + :expectedresults: + 1. Operation should success + 2. Operation should success + 3. Operation should success + 4. Operation should Fail + 5. Operation should success + """ + + grp = UniqueGroup(topo.standalone, 'cn=Nested Group 1,' + DEFAULT_SUFFIX) + grp.create(properties={ + 'cn': 'Nested Group 1', + 'ou': 'groups', + 'uniquemember': "cn=Nested Group 2, {}".format(DEFAULT_SUFFIX) + }) + + grp = UniqueGroup(topo.standalone, 'cn=Nested Group 2,' + DEFAULT_SUFFIX) + grp.create(properties={ + 'cn': 'Nested Group 2', + 'ou': 'groups', + 'uniquemember': "cn=Nested Group 3, {}".format(DEFAULT_SUFFIX) + }) + + grp = UniqueGroup(topo.standalone, 'cn=Nested Group 3,' + DEFAULT_SUFFIX) + grp.create(properties={ + 'cn': 'Nested Group 3', + 'ou': 'groups', + 'uniquemember': [USER_ANANDA, USER_ANUJ] + }) + + Domain(topo.standalone, DEFAULT_SUFFIX).add("aci", '(target = ldap:///{})(targetattr="*")' + '(version 3.0; acl "{}"; deny(read)(groupdn = "ldap:///cn=Nested Group 1, {}"); )'.format(DEFAULT_SUFFIX, request.node.name, DEFAULT_SUFFIX)) + conn = UserAccount(topo.standalone, USER_ANANDA).bind(PW_DM) + # deny_access_to_group_should_deny_access_to_all_uniquemember + assert 0 == len(Accounts(conn, DEFAULT_SUFFIX).filter('(cn=*)')) + conn = UserAccount(topo.standalone, USER_ANUJ).bind(PW_DM) + # deny_access_to_group_should_deny_access_to_all_uniquemember + assert 0 == len(Accounts(conn, DEFAULT_SUFFIX).filter('(cn=*)')) + # with root there is no aci blockage + assert 4 == len(Accounts(topo.standalone, DEFAULT_SUFFIX).filter('(cn=*)')) + + +def test_entry_with_lots_100_attributes(topo, add_test_user, aci_of_user): + """Search Test 39 entry with lots (>100) attributes + + :id: fc155f74-6e12-11e8-96ac-8c16451d917b + :setup: Standalone Instance + :steps: + 1. Add Entry + 2. Bind with test USER_ANUJ + 3. Try search + 4. Delete Entry,test USER_ANUJ, ACI + :expectedresults: + 1. Operation should success + 3. Operation should success + 4. Operation should success + 5. Operation should success + """ + for i in range(100): + user = UserAccounts(topo.standalone, DEFAULT_SUFFIX, rdn='ou=People').create_test_user(uid=i) + user.set("userPassword", "password") + + conn = UserAccount(topo.standalone, "uid=test_user_1,ou=People,{}".format(DEFAULT_SUFFIX)).bind(PW_DM) + # no aci no blockage + assert 1 == len(Accounts(conn, DEFAULT_SUFFIX).filter('(cn=Anuj*)')) + # no aci no blockage + assert 103 == len(Accounts(conn, DEFAULT_SUFFIX).filter('(uid=*)')) + conn = Anonymous(topo.standalone).bind() + # anonymous_search_on_monitor_entry + assert 103 == len(Accounts(conn, DEFAULT_SUFFIX).filter('(uid=*)')) + + +@pytest.mark.bz301798 +def test_groupdnattr_value_is_another_group(topo): + """Search Test 42 groupdnattr value is another group test #1 + + :id: 52299e16-7944-11e8-b471-8c16451d917b + :setup: server + :steps: + 1. Add test entry + 2. Add ACI + 3. USER_ANUJ should follow ACI role + :expectedresults: + 1. Entry should be added + 2. Operation should succeed + 3. Operation should succeed + """ + Organization(topo.standalone).create(properties={"o": "nscpRoot"}, basedn=DEFAULT_SUFFIX) + + user = UserAccount(topo.standalone, "cn=dchan,o=nscpRoot,{}".format(DEFAULT_SUFFIX)) + user.create(properties={ + 'uid': 'dchan', + 'cn': 'dchan', + 'sn': 'user', + 'uidNumber': '1000', + 'gidNumber': '2000', + 'homeDirectory': '/home/' + 'dchan', + 'userPassword': PW_DM + }) + + grp = UniqueGroup(topo.standalone, 'cn=groupx,o=nscpRoot,' + DEFAULT_SUFFIX) + grp.create(properties={ + 'cn': 'groupx', + 'ou': 'groups', + }) + grp.set('uniquemember', 'cn=dchan,o=nscpRoot,{}'.format(DEFAULT_SUFFIX)) + grp.set('aci', '(targetattr="*")(version 3.0; acl "Enable Group Expansion"; allow (read, search, compare) groupdnattr="ldap:///o=nscpRoot?uniquemember?sub";)') + + conn = UserAccount(topo.standalone, 'cn=dchan,o=nscpRoot,{}'.format(DEFAULT_SUFFIX),).bind(PW_DM) + # acil will allow ldap:///o=nscpRoot?uniquemember?sub" + assert UserAccount(conn, 'cn=groupx,o=nscpRoot,{}'.format(DEFAULT_SUFFIX)).get_attr_val_utf8('cn') == 'groupx' + + +if __name__ == "__main__": + CURRENT_FILE = os.path.realpath(__file__) + pytest.main("-s -v %s" % CURRENT_FILE) diff --git a/dirsrvtests/tests/suites/acl/search_real_test.py b/dirsrvtests/tests/suites/acl/search_real_test.py new file mode 100644 index 0000000..5695d46 --- /dev/null +++ b/dirsrvtests/tests/suites/acl/search_real_test.py @@ -0,0 +1,430 @@ +# --- BEGIN COPYRIGHT BLOCK --- +# Copyright (C) 2020 Red Hat, Inc. +# All rights reserved. +# +# License: GPL (version 3 or any later version). +# See LICENSE for details. +# --- END COPYRIGHT BLOCK ---- + +import pytest, os, ldap +from lib389._constants import DEFAULT_SUFFIX, PW_DM, ErrorLog +from lib389.idm.user import UserAccount, UserAccounts +from lib389.idm.account import Accounts +from lib389.idm.organizationalunit import OrganizationalUnit +from lib389.idm.group import Groups +from lib389.topologies import topology_st as topo +from lib389.idm.domain import Domain +from lib389.idm.posixgroup import PosixGroups + +pytestmark = pytest.mark.tier1 + +CONTAINER_1_DELADD = "ou=Product Development,{}".format(DEFAULT_SUFFIX) +CONTAINER_2_DELADD = "ou=Accounting,{}".format(DEFAULT_SUFFIX) +USER_ANUJ = "uid=Anuj Borah,{}".format(CONTAINER_1_DELADD) +USER_ANANDA = "uid=Ananda Borah,{}".format(CONTAINER_2_DELADD) + + +@pytest.fixture(scope="function") +def aci_of_user(request, topo): + # Add anonymous access aci + ACI_TARGET = "(targetattr != \"userpassword\")(target = \"ldap:///%s\")" % (DEFAULT_SUFFIX) + ACI_ALLOW = "(version 3.0; acl \"Anonymous Read access\"; allow (read,search,compare)" + ACI_SUBJECT = "(userdn=\"ldap:///anyone\");)" + ANON_ACI = ACI_TARGET + ACI_ALLOW + ACI_SUBJECT + suffix = Domain(topo.standalone, DEFAULT_SUFFIX) + try: + suffix.add('aci', ANON_ACI) + except ldap.TYPE_OR_VALUE_EXISTS: + pass + + aci_list = Domain(topo.standalone, DEFAULT_SUFFIX).get_attr_vals('aci') + + def finofaci(): + domain = Domain(topo.standalone, DEFAULT_SUFFIX) + domain.set('aci', None) + for i in aci_list: + domain.add("aci", i) + + request.addfinalizer(finofaci) + + +@pytest.fixture(scope="module") +def add_test_user(request, topo): + topo.standalone.config.loglevel((ErrorLog.ACL_SUMMARY,)) + + for i in ['Product Development', 'Accounting']: + OrganizationalUnit(topo.standalone, "ou={},{}".format(i, DEFAULT_SUFFIX)).create(properties={'ou': i}) + + users = UserAccounts(topo.standalone, DEFAULT_SUFFIX, rdn='ou=Product Development') + users.create(properties={ + 'uid': 'Anuj Borah', + 'cn': 'Anuj Borah', + 'sn': 'user', + 'uidNumber': '1000', + 'gidNumber': '2000', + 'homeDirectory': '/home/' + 'AnujBorah', + 'userPassword': PW_DM + }) + + users = UserAccounts(topo.standalone, DEFAULT_SUFFIX, rdn='ou=Accounting') + users.create(properties={ + 'uid': 'Ananda Borah', + 'cn': 'Ananda Borah', + 'sn': 'user', + 'uidNumber': '1000', + 'gidNumber': '2000', + 'homeDirectory': '/home/' + 'AnandaBorah', + 'userPassword': PW_DM + }) + + +def test_deny_all_access_with_target_set(topo, add_test_user, aci_of_user): + """Test that Deny all access with target set + + :id: 0550e680-6e0e-11e8-82f4-8c16451d917b + :setup: Standalone Instance + :steps: + 1. Add Entry + 2. Add ACI + 3. Bind with test USER_ANUJ + 4. Try search + 5. Delete Entry,test USER_ANUJ, ACI + :expectedresults: + 1. Operation should success + 2. Operation should success + 3. Operation should success + 4. Operation should Fail + 5. Operation should success + """ + ACI_TARGET = '(target = ldap:///{})(targetattr="*")'.format(USER_ANANDA) + ACI_ALLOW = '(version 3.0; acl "Name of the ACI"; deny absolute (all)' + ACI_SUBJECT = 'userdn="ldap:///anyone";)' + ACI_BODY = ACI_TARGET + ACI_ALLOW + ACI_SUBJECT + Domain(topo.standalone, DEFAULT_SUFFIX).add("aci", ACI_BODY) + conn = UserAccount(topo.standalone, USER_ANANDA).bind(PW_DM) + # aci will block all for all usrs + assert 0 == len(Accounts(conn, DEFAULT_SUFFIX).filter('(cn=Ananda*)')) + conn = UserAccount(topo.standalone, USER_ANUJ).bind(PW_DM) + # aci will block all for all usrs + assert 0 == len(Accounts(conn, DEFAULT_SUFFIX).filter('(cn=Ananda*)')) + # with root there is no aci blockage + assert 1 == len(Accounts(topo.standalone, DEFAULT_SUFFIX).filter('(cn=Ananda*)')) + + +def test_deny_all_access_to_a_target_with_wild_card(topo, add_test_user, aci_of_user): + """Search Test 2 Deny all access to a target with wild card + + :id: 1c370f98-6e11-11e8-9f10-8c16451d917b + :setup: Standalone Instance + :steps: + 1. Add Entry + 2. Add ACI + 3. Bind with test USER_ANUJ + 4. Try search + 5. Delete Entry,test USER_ANUJ, ACI + :expectedresults: + 1. Operation should success + 2. Operation should success + 3. Operation should success + 4. Operation should Fail + 5. Operation should success + """ + ACI_TARGET = '(target = ldap:///uid=Ananda*, ou=*,{})(targetattr="*")'.format( + DEFAULT_SUFFIX + ) + ACI_ALLOW = '(version 3.0; acl "Name of the ACI"; deny absolute (all)' + ACI_SUBJECT = 'userdn="ldap:///anyone";)' + ACI_BODY = ACI_TARGET + ACI_ALLOW + ACI_SUBJECT + Domain(topo.standalone, DEFAULT_SUFFIX).add("aci", ACI_BODY) + conn = UserAccount(topo.standalone, USER_ANANDA).bind(PW_DM) + # aci will block (cn=Sam*) for all usrs + assert 0 == len(Accounts(conn, DEFAULT_SUFFIX).filter('(cn=Ananda*)')) + conn = UserAccount(topo.standalone, USER_ANUJ).bind(PW_DM) + # aci will block (cn=Sam*) for all usrs + assert 0 == len(Accounts(conn, DEFAULT_SUFFIX).filter('(cn=Ananda*)')) + # with root there is no aci blockage + assert 1 == len(Accounts(topo.standalone, DEFAULT_SUFFIX).filter('(cn=Ananda*)')) + + +def test_deny_all_access_without_a_target_set(topo, add_test_user, aci_of_user): + """Search Test 3 Deny all access without a target set + + :id: 2dbeb36a-6e11-11e8-ab9f-8c16451d917b + :setup: Standalone Instance + :steps: + 1. Add Entry + 2. Add ACI + 3. Bind with test USER_ANUJ + 4. Try search + 5. Delete Entry,test USER_ANUJ, ACI + :expectedresults: + 1. Operation should success + 2. Operation should success + 3. Operation should success + 4. Operation should Fail + 5. Operation should success + """ + ACI_TARGET = '(targetattr="*")' + ACI_ALLOW = '(version 3.0; acl "Name of the ACI"; deny absolute (all)' + ACI_SUBJECT = 'userdn="ldap:///anyone";)' + ACI_BODY = ACI_TARGET + ACI_ALLOW + ACI_SUBJECT + Domain(topo.standalone, DEFAULT_SUFFIX).add("aci", ACI_BODY) + conn = UserAccount(topo.standalone, USER_ANANDA).bind(PW_DM) + # aci will block all for all usrs + assert 0 == len(Accounts(conn, DEFAULT_SUFFIX).filter('(ou=Accounting)')) + conn = UserAccount(topo.standalone, USER_ANUJ).bind(PW_DM) + # aci will block all for all usrs + assert 0 == len(Accounts(conn, DEFAULT_SUFFIX).filter('(ou=Accounting)')) + # with root there is no aci blockage + assert 1 == len(Accounts(topo.standalone, DEFAULT_SUFFIX).filter('(ou=Accounting)')) + + +def test_deny_read_search_and_compare_access_with_target_and_targetattr_set( + topo, add_test_user, aci_of_user +): + """Search Test 4 Deny read, search and compare access with target and targetattr set + + :id: 3f4a87e4-6e11-11e8-a09f-8c16451d917b + :setup: Standalone Instance + :steps: + 1. Add Entry + 2. Add ACI + 3. Bind with test USER_ANUJ + 4. Try search + 5. Delete Entry,test USER_ANUJ, ACI + :expectedresults: + 1. Operation should success + 2. Operation should success + 3. Operation should success + 4. Operation should Fail + 5. Operation should success + """ + ACI_TARGET = '(target = ldap:///{})(targetattr="*")'.format(CONTAINER_2_DELADD) + ACI_ALLOW = '(version 3.0; acl "Name of the ACI"; deny absolute (all)' + ACI_SUBJECT = 'userdn="ldap:///anyone";)' + ACI_BODY = ACI_TARGET + ACI_ALLOW + ACI_SUBJECT + Domain(topo.standalone, DEFAULT_SUFFIX).add("aci", ACI_BODY) + conn = UserAccount(topo.standalone, USER_ANANDA).bind(PW_DM) + # aci will block all for all usrs + assert 0 == len(Accounts(conn, DEFAULT_SUFFIX).filter('(ou=Accounting)')) + conn = UserAccount(topo.standalone, USER_ANUJ).bind(PW_DM) + # aci will block all for all usrs + assert 0 == len(Accounts(conn, DEFAULT_SUFFIX).filter('(ou=Accounting)')) + # with root there is no aci blockage + assert 1 == len(Accounts(topo.standalone, DEFAULT_SUFFIX).filter('(ou=Accounting)')) + + +def test_deny_read_access_to_multiple_groupdns(topo, add_test_user, aci_of_user): + """Search Test 6 Deny read access to multiple groupdn's + + :id: 8f3ba440-6e11-11e8-8b20-8c16451d917b + :setup: Standalone Instance + :steps: + 1. Add Entry + 2. Add ACI + 3. Bind with test USER_ANUJ + 4. Try search + 5. Delete Entry,test USER_ANUJ, ACI + :expectedresults: + 1. Operation should success + 2. Operation should success + 3. Operation should success + 4. Operation should Fail + 5. Operation should success + """ + groups = Groups(topo.standalone, DEFAULT_SUFFIX) + group = groups.create(properties={"cn": "group1", + "description": "testgroup" + }) + group.add_member(USER_ANANDA) + + posix_groups = PosixGroups(topo.standalone, DEFAULT_SUFFIX) + posix_group = posix_groups.create(properties={ + "cn": "group2", + "description": "testgroup2", + "gidNumber": "2000", + }) + posix_group.add_member(USER_ANUJ) + + ACI_TARGET = '(targetattr="*")' + ACI_ALLOW = '(version 3.0; acl "All rights for cn=group1,ou=Groups,{}"; deny(read)'.format(DEFAULT_SUFFIX) + ACI_SUBJECT = 'groupdn="ldap:///cn=group1,ou=Groups,{}||ldap:///cn=group2,ou=Groups,{}";)'.format(DEFAULT_SUFFIX, DEFAULT_SUFFIX) + ACI_BODY = ACI_TARGET + ACI_ALLOW + ACI_SUBJECT + Domain(topo.standalone, DEFAULT_SUFFIX).add("aci", ACI_BODY) + conn = UserAccount(topo.standalone, USER_ANANDA).bind(PW_DM) + # aci will block 'groupdn="ldap:///cn=group1,ou=Groups,dc=example,dc=com||ldap:///cn=group2,ou=Groups,dc=example,dc=com";) + assert 0 == len(Accounts(conn, DEFAULT_SUFFIX).filter('(cn=*)')) + conn = UserAccount(topo.standalone, USER_ANUJ).bind(PW_DM) + # aci will block 'groupdn="ldap:///cn=group1,ou=Groups,dc=example,dc=com||ldap:///cn=group2,ou=Groups,dc=example,dc=com";) + assert 0 == len(Accounts(conn, DEFAULT_SUFFIX).filter('(cn=*)')) + # with root there is no aci blockage + assert 5 == len(Accounts(topo.standalone, DEFAULT_SUFFIX).filter('(cn=*)')) + group = groups.get("group1") + group.delete() + posix_groups.get("group2") + posix_group.delete() + + +def test_deny_all_access_to_userdnattr(topo, add_test_user, aci_of_user): + """Search Test 7 Deny all access to userdnattr" + + :id: ae482494-6e11-11e8-ae33-8c16451d917b + :setup: Standalone Instance + :steps: + 1. Add Entry + 2. Add ACI + 3. Bind with test USER_ANUJ + 4. Try search + 5. Delete Entry,test USER_ANUJ, ACI + :expectedresults: + 1. Operation should success + 2. Operation should success + 3. Operation should success + 4. Operation should Fail + 5. Operation should success + """ + UserAccount(topo.standalone, USER_ANUJ).add('manager', USER_ANANDA) + ACI_TARGET = '(target = ldap:///{})(targetattr="*")'.format(DEFAULT_SUFFIX) + ACI_ALLOW = '(version 3.0; acl "Name of the ACI"; deny absolute (all)' + ACI_SUBJECT = 'userdnattr="manager";)' + ACI_BODY = ACI_TARGET + ACI_ALLOW + ACI_SUBJECT + Domain(topo.standalone, DEFAULT_SUFFIX).add("aci", ACI_BODY) + conn = UserAccount(topo.standalone, USER_ANANDA).bind(PW_DM) + # aci will block only 'userdnattr="manager" + assert 0 == len(Accounts(conn, DEFAULT_SUFFIX).filter('(cn=Anuj Borah)')) + conn = UserAccount(topo.standalone, USER_ANUJ).bind(PW_DM) + # aci will block only 'userdnattr="manager" + assert 1 == len(Accounts(conn, DEFAULT_SUFFIX).filter('(cn=Anuj Borah)')) + # with root there is no aci blockage + assert 1 == len(Accounts(topo.standalone, DEFAULT_SUFFIX).filter('(cn=Anuj Borah)')) + UserAccount(topo.standalone, USER_ANUJ).remove('manager', USER_ANANDA) + + +def test_deny_all_access_with__target_set(topo, add_test_user, aci_of_user, request): + """Search Test 8 Deny all access with != target set + + :id: bc00aed0-6e11-11e8-be66-8c16451d917b + :setup: Standalone Instance + :steps: + 1. Add Entry + 2. Add ACI + 3. Bind with test USER_ANUJ + 4. Try search + 5. Delete Entry,test USER_ANUJ, ACI + :expectedresults: + 1. Operation should success + 2. Operation should success + 3. Operation should success + 4. Operation should Fail + 5. Operation should success + """ + Domain(topo.standalone, DEFAULT_SUFFIX).add("aci",'(target != "ldap:///{}")(targetattr = "*")' + '(version 3.0; acl "{}"; deny absolute (all) (userdn = "ldap:///anyone") ;)'.format(USER_ANANDA, request.node.name)) + conn = UserAccount(topo.standalone, USER_ANANDA).bind(PW_DM) + # aci will not block USER_ANANDA will block others + assert 1 == len(Accounts(conn, DEFAULT_SUFFIX).filter('(cn=*)')) + conn = UserAccount(topo.standalone, USER_ANUJ).bind(PW_DM) + # aci will not block USER_ANANDA will block others + assert 1 == len(Accounts(conn, DEFAULT_SUFFIX).filter('(cn=*)')) + # with root there is no aci blockage + assert 4 == len(Accounts(topo.standalone, DEFAULT_SUFFIX).filter('(cn=*)')) + + +def test_deny_all_access_with__targetattr_set(topo, add_test_user, aci_of_user): + """Search Test 9 Deny all access with != targetattr set + + :id: d2d73b2e-6e11-11e8-ad3d-8c16451d917b + :setup: Standalone Instance + :steps: + 1. Add Entry + 2. Add ACI + 3. Bind with test USER_ANUJ + 4. Try search + 5. Delete Entry,test USER_ANUJ, ACI + :expectedresults: + 1. Operation should success + 2. Operation should success + 3. Operation should success + 4. Operation should Fail + 5. Operation should success + """ + testusers = UserAccounts(topo.standalone, DEFAULT_SUFFIX) + user = testusers.create(properties={ + 'uid': 'Anuj', + 'cn': 'Anuj', + 'sn': 'user', + 'uidNumber': '1000', + 'gidNumber': '2000', + 'homeDirectory': '/home/' + 'Anuj', + 'userPassword': PW_DM + }) + + ACI_TARGET = '(targetattr != "uid||Objectclass")' + ACI_ALLOW = '(version 3.0; acl "Name of the ACI"; deny absolute (all)' + ACI_SUBJECT = 'userdn="ldap:///anyone";)' + ACI_BODY = ACI_TARGET + ACI_ALLOW + ACI_SUBJECT + Domain(topo.standalone, DEFAULT_SUFFIX).add("aci", ACI_BODY) + conn = UserAccount(topo.standalone, USER_ANANDA).bind(PW_DM) + # aci will allow only uid=* + assert 4 == len(Accounts(conn, DEFAULT_SUFFIX).filter('(uid=*)')) + # aci will allow only uid=* + assert 0 == len(Accounts(conn, DEFAULT_SUFFIX).filter('(cn=*)')) + conn = UserAccount(topo.standalone, USER_ANUJ).bind(PW_DM) + # aci will allow only uid=* + assert 4 == len(Accounts(conn, DEFAULT_SUFFIX).filter('(uid=*)')) + # aci will allow only uid=* + assert 0 == len(Accounts(conn, DEFAULT_SUFFIX).filter('(cn=*)')) + # with root there is no aci blockage + assert 4 == len(Accounts(topo.standalone, DEFAULT_SUFFIX).filter('(uid=*)')) + # with root there is no aci blockage + assert 5 == len(Accounts(topo.standalone, DEFAULT_SUFFIX).filter('(cn=*)')) + user.delete() + + +def test_deny_all_access_with_targetattr_set(topo, add_test_user, aci_of_user): + """Search Test 10 Deny all access with targetattr set + + :id: e1602ff2-6e11-11e8-8e55-8c16451d917b + :setup: Standalone Instance + :steps: + 1. Add Entry + 2. Add ACI + 3. Bind with test USER_ANUJ + 4. Try search + 5. Delete Entry,test USER_ANUJ, ACI + :expectedresults: + 1. Operation should success + 2. Operation should success + 3. Operation should success + 4. Operation should Fail + 5. Operation should success + """ + testuser = UserAccount(topo.standalone, "cn=Anuj12,ou=People,{}".format(DEFAULT_SUFFIX)) + testuser.create(properties={ + 'uid': 'Anuj12', + 'cn': 'Anuj12', + 'sn': 'user', + 'uidNumber': '1000', + 'gidNumber': '2000', + 'homeDirectory': '/home/' + 'Anuj12' + }) + + ACI_TARGET = '(targetattr="uid")' + ACI_ALLOW = '(version 3.0; acl "Name of the ACI"; deny absolute (all)' + ACI_SUBJECT = 'userdn="ldap:///anyone";)' + ACI_BODY = ACI_TARGET + ACI_ALLOW + ACI_SUBJECT + Domain(topo.standalone, DEFAULT_SUFFIX).add("aci", ACI_BODY) + conn = UserAccount(topo.standalone, USER_ANANDA).bind(PW_DM) + # aci will block only uid=* + assert 0 == len(Accounts(conn, DEFAULT_SUFFIX).filter('(uid=*)')) + conn = UserAccount(topo.standalone, USER_ANUJ).bind(PW_DM) + # aci will block only uid=* + assert 0 == len(Accounts(conn, DEFAULT_SUFFIX).filter('(uid=*)')) + # with root there is no aci blockage + assert 4 == len(Accounts(topo.standalone, DEFAULT_SUFFIX).filter('(uid=*)')) + testuser.delete() + + +if __name__ == "__main__": + CURRENT_FILE = os.path.realpath(__file__) + pytest.main("-s -v %s" % CURRENT_FILE) diff --git a/dirsrvtests/tests/suites/acl/selfdn_permissions_test.py b/dirsrvtests/tests/suites/acl/selfdn_permissions_test.py new file mode 100644 index 0000000..dd506a7 --- /dev/null +++ b/dirsrvtests/tests/suites/acl/selfdn_permissions_test.py @@ -0,0 +1,353 @@ +# --- BEGIN COPYRIGHT BLOCK --- +# Copyright (C) 2020 Red Hat, Inc. +# All rights reserved. +# +# License: GPL (version 3 or any later version). +# See LICENSE for details. +# --- END COPYRIGHT BLOCK --- +# +import logging + +import ldap +import pytest +from lib389 import Entry +from lib389._constants import * +from lib389.topologies import topology_st + +log = logging.getLogger(__name__) + +from lib389.utils import * + +# Skip on older versions +pytestmark = [pytest.mark.tier1, + pytest.mark.skipif(ds_is_older('1.3.2'), reason="Not implemented")] +OC_NAME = 'OCticket47653' +MUST = "(postalAddress $ postalCode)" +MAY = "(member $ street)" + +OTHER_NAME = 'other_entry' +MAX_OTHERS = 10 + +BIND_NAME = 'bind_entry' +BIND_DN = 'cn=%s, %s' % (BIND_NAME, SUFFIX) +BIND_PW = 'password' + +ENTRY_NAME = 'test_entry' +ENTRY_DN = 'cn=%s, %s' % (ENTRY_NAME, SUFFIX) +ENTRY_OC = "top person %s" % OC_NAME + + +def _oc_definition(oid_ext, name, must=None, may=None): + oid = "1.2.3.4.5.6.7.8.9.10.%d" % oid_ext + desc = 'To test ticket 47490' + sup = 'person' + if not must: + must = MUST + if not may: + may = MAY + + new_oc = "( %s NAME '%s' DESC '%s' SUP %s AUXILIARY MUST %s MAY %s )" % (oid, name, desc, sup, must, may) + return ensure_bytes(new_oc) + + +@pytest.fixture(scope="module") +def allow_user_init(topology_st): + """Initialize the test environment + + """ + topology_st.standalone.log.info("Add %s that allows 'member' attribute" % OC_NAME) + new_oc = _oc_definition(2, OC_NAME, must=MUST, may=MAY) + topology_st.standalone.schema.add_schema('objectClasses', new_oc) + + # entry used to bind with + topology_st.standalone.log.info("Add %s" % BIND_DN) + topology_st.standalone.add_s(Entry((BIND_DN, { + 'objectclass': "top person".split(), + 'sn': BIND_NAME, + 'cn': BIND_NAME, + 'userpassword': BIND_PW}))) + + # enable acl error logging + mod = [(ldap.MOD_REPLACE, 'nsslapd-errorlog-level', b'128')] + topology_st.standalone.modify_s(DN_CONFIG, mod) + + # Remove aci's to start with a clean slate + mod = [(ldap.MOD_DELETE, 'aci', None)] + topology_st.standalone.modify_s(SUFFIX, mod) + + # add dummy entries + for cpt in range(MAX_OTHERS): + name = "%s%d" % (OTHER_NAME, cpt) + topology_st.standalone.add_s(Entry(("cn=%s,%s" % (name, SUFFIX), { + 'objectclass': "top person".split(), + 'sn': name, + 'cn': name}))) + + +@pytest.mark.ds47653 +def test_selfdn_permission_add(topology_st, allow_user_init): + """Check add entry operation with and without SelfDN aci + + :id: e837a9ef-be92-48da-ad8b-ebf42b0fede1 + :setup: Standalone instance, add a entry which is used to bind, + enable acl error logging by setting 'nsslapd-errorlog-level' to '128', + remove aci's to start with a clean slate, and add dummy entries + :steps: + 1. Check we can not ADD an entry without the proper SELFDN aci + 2. Check with the proper ACI we can not ADD with 'member' attribute + 3. Check entry to add with memberS and with the ACI + 4. Check with the proper ACI and 'member' it succeeds to ADD + :expectedresults: + 1. Operation should be successful + 2. Operation should be successful + 3. Operation should fail with Insufficient Access + 4. Operation should be successful + """ + topology_st.standalone.log.info("\n\n######################### ADD ######################\n") + + # bind as bind_entry + topology_st.standalone.log.info("Bind as %s" % BIND_DN) + topology_st.standalone.simple_bind_s(BIND_DN, BIND_PW) + + # Prepare the entry with multivalued members + entry_with_members = Entry(ENTRY_DN) + entry_with_members.setValues('objectclass', 'top', 'person', 'OCticket47653') + entry_with_members.setValues('sn', ENTRY_NAME) + entry_with_members.setValues('cn', ENTRY_NAME) + entry_with_members.setValues('postalAddress', 'here') + entry_with_members.setValues('postalCode', '1234') + members = [] + for cpt in range(MAX_OTHERS): + name = "%s%d" % (OTHER_NAME, cpt) + members.append("cn=%s,%s" % (name, SUFFIX)) + members.append(BIND_DN) + entry_with_members.setValues('member', members) + + # Prepare the entry with one member + entry_with_member = Entry(ENTRY_DN) + entry_with_member.setValues('objectclass', 'top', 'person', 'OCticket47653') + entry_with_member.setValues('sn', ENTRY_NAME) + entry_with_member.setValues('cn', ENTRY_NAME) + entry_with_member.setValues('postalAddress', 'here') + entry_with_member.setValues('postalCode', '1234') + member = [] + member.append(BIND_DN) + entry_with_member.setValues('member', member) + + # entry to add WITH member being BIND_DN but WITHOUT the ACI -> ldap.INSUFFICIENT_ACCESS + try: + topology_st.standalone.log.info("Try to add Add %s (aci is missing): %r" % (ENTRY_DN, entry_with_member)) + + topology_st.standalone.add_s(entry_with_member) + except Exception as e: + topology_st.standalone.log.info("Exception (expected): %s" % type(e).__name__) + assert isinstance(e, ldap.INSUFFICIENT_ACCESS) + + # Ok Now add the proper ACI + topology_st.standalone.log.info("Bind as %s and add the ADD SELFDN aci" % DN_DM) + topology_st.standalone.simple_bind_s(DN_DM, PASSWORD) + + ACI_TARGET = "(target = \"ldap:///cn=*,%s\")" % SUFFIX + ACI_TARGETFILTER = "(targetfilter =\"(objectClass=%s)\")" % OC_NAME + ACI_ALLOW = "(version 3.0; acl \"SelfDN add\"; allow (add)" + ACI_SUBJECT = " userattr = \"member#selfDN\";)" + ACI_BODY = ACI_TARGET + ACI_TARGETFILTER + ACI_ALLOW + ACI_SUBJECT + mod = [(ldap.MOD_ADD, 'aci', ensure_bytes(ACI_BODY))] + topology_st.standalone.modify_s(SUFFIX, mod) + + # bind as bind_entry + topology_st.standalone.log.info("Bind as %s" % BIND_DN) + topology_st.standalone.simple_bind_s(BIND_DN, BIND_PW) + + # entry to add WITHOUT member and WITH the ACI -> ldap.INSUFFICIENT_ACCESS + try: + topology_st.standalone.log.info("Try to add Add %s (member is missing)" % ENTRY_DN) + topology_st.standalone.add_s(Entry((ENTRY_DN, { + 'objectclass': ENTRY_OC.split(), + 'sn': ENTRY_NAME, + 'cn': ENTRY_NAME, + 'postalAddress': 'here', + 'postalCode': '1234'}))) + except Exception as e: + topology_st.standalone.log.info("Exception (expected): %s" % type(e).__name__) + assert isinstance(e, ldap.INSUFFICIENT_ACCESS) + + # entry to add WITH memberS and WITH the ACI -> ldap.INSUFFICIENT_ACCESS + # member should contain only one value + try: + topology_st.standalone.log.info("Try to add Add %s (with several member values)" % ENTRY_DN) + topology_st.standalone.add_s(entry_with_members) + except Exception as e: + topology_st.standalone.log.info("Exception (expected): %s" % type(e).__name__) + assert isinstance(e, ldap.INSUFFICIENT_ACCESS) + + topology_st.standalone.log.info("Try to add Add %s should be successful" % ENTRY_DN) + topology_st.standalone.add_s(entry_with_member) + + +@pytest.mark.ds47653 +def test_selfdn_permission_search(topology_st, allow_user_init): + """Check search operation with and without SelfDN aci + + :id: 06d51ef9-c675-4583-99b2-4852dbda190e + :setup: Standalone instance, add a entry which is used to bind, + enable acl error logging by setting 'nsslapd-errorlog-level' to '128', + remove aci's to start with a clean slate, and add dummy entries + :steps: + 1. Check we can not search an entry without the proper SELFDN aci + 2. Add proper ACI + 3. Check we can search with the proper ACI + :expectedresults: + 1. Operation should be successful + 2. Operation should be successful + 3. Operation should be successful + """ + topology_st.standalone.log.info("\n\n######################### SEARCH ######################\n") + # bind as bind_entry + topology_st.standalone.log.info("Bind as %s" % BIND_DN) + topology_st.standalone.simple_bind_s(BIND_DN, BIND_PW) + + # entry to search WITH member being BIND_DN but WITHOUT the ACI -> no entry returned + topology_st.standalone.log.info("Try to search %s (aci is missing)" % ENTRY_DN) + ents = topology_st.standalone.search_s(ENTRY_DN, ldap.SCOPE_BASE, 'objectclass=*') + assert len(ents) == 0 + + # Ok Now add the proper ACI + topology_st.standalone.log.info("Bind as %s and add the READ/SEARCH SELFDN aci" % DN_DM) + topology_st.standalone.simple_bind_s(DN_DM, PASSWORD) + + ACI_TARGET = "(target = \"ldap:///cn=*,%s\")" % SUFFIX + ACI_TARGETATTR = '(targetattr="*")' + ACI_TARGETFILTER = "(targetfilter =\"(objectClass=%s)\")" % OC_NAME + ACI_ALLOW = "(version 3.0; acl \"SelfDN search-read\"; allow (read, search, compare)" + ACI_SUBJECT = " userattr = \"member#selfDN\";)" + ACI_BODY = ACI_TARGET + ACI_TARGETATTR + ACI_TARGETFILTER + ACI_ALLOW + ACI_SUBJECT + mod = [(ldap.MOD_ADD, 'aci', ensure_bytes(ACI_BODY))] + topology_st.standalone.modify_s(SUFFIX, mod) + + # bind as bind_entry + topology_st.standalone.log.info("Bind as %s" % BIND_DN) + topology_st.standalone.simple_bind_s(BIND_DN, BIND_PW) + + # entry to search with the proper aci + topology_st.standalone.log.info("Try to search %s should be successful" % ENTRY_DN) + ents = topology_st.standalone.search_s(ENTRY_DN, ldap.SCOPE_BASE, 'objectclass=*') + assert len(ents) == 1 + + +@pytest.mark.ds47653 +def test_selfdn_permission_modify(topology_st, allow_user_init): + """Check modify operation with and without SelfDN aci + + :id: 97a58844-095f-44b0-9029-dd29a7d83d68 + :setup: Standalone instance, add a entry which is used to bind, + enable acl error logging by setting 'nsslapd-errorlog-level' to '128', + remove aci's to start with a clean slate, and add dummy entries + :steps: + 1. Check we can not modify an entry without the proper SELFDN aci + 2. Add proper ACI + 3. Modify the entry and check the modified value + :expectedresults: + 1. Operation should be successful + 2. Operation should be successful + 3. Operation should be successful + """ + # bind as bind_entry + topology_st.standalone.log.info("Bind as %s" % BIND_DN) + topology_st.standalone.simple_bind_s(BIND_DN, BIND_PW) + + topology_st.standalone.log.info("\n\n######################### MODIFY ######################\n") + + # entry to modify WITH member being BIND_DN but WITHOUT the ACI -> ldap.INSUFFICIENT_ACCESS + try: + topology_st.standalone.log.info("Try to modify %s (aci is missing)" % ENTRY_DN) + mod = [(ldap.MOD_REPLACE, 'postalCode', b'9876')] + topology_st.standalone.modify_s(ENTRY_DN, mod) + except Exception as e: + topology_st.standalone.log.info("Exception (expected): %s" % type(e).__name__) + assert isinstance(e, ldap.INSUFFICIENT_ACCESS) + + # Ok Now add the proper ACI + topology_st.standalone.log.info("Bind as %s and add the WRITE SELFDN aci" % DN_DM) + topology_st.standalone.simple_bind_s(DN_DM, PASSWORD) + + ACI_TARGET = "(target = \"ldap:///cn=*,%s\")" % SUFFIX + ACI_TARGETATTR = '(targetattr="*")' + ACI_TARGETFILTER = "(targetfilter =\"(objectClass=%s)\")" % OC_NAME + ACI_ALLOW = "(version 3.0; acl \"SelfDN write\"; allow (write)" + ACI_SUBJECT = " userattr = \"member#selfDN\";)" + ACI_BODY = ACI_TARGET + ACI_TARGETATTR + ACI_TARGETFILTER + ACI_ALLOW + ACI_SUBJECT + mod = [(ldap.MOD_ADD, 'aci', ensure_bytes(ACI_BODY))] + topology_st.standalone.modify_s(SUFFIX, mod) + + # bind as bind_entry + topology_st.standalone.log.info("Bind as %s" % BIND_DN) + topology_st.standalone.simple_bind_s(BIND_DN, BIND_PW) + + # modify the entry and checks the value + topology_st.standalone.log.info("Try to modify %s. It should succeeds" % ENTRY_DN) + mod = [(ldap.MOD_REPLACE, 'postalCode', b'1928')] + topology_st.standalone.modify_s(ENTRY_DN, mod) + + ents = topology_st.standalone.search_s(ENTRY_DN, ldap.SCOPE_BASE, 'objectclass=*') + assert len(ents) == 1 + assert ensure_str(ents[0].postalCode) == '1928' + + +@pytest.mark.ds47653 +def test_selfdn_permission_delete(topology_st, allow_user_init): + """Check delete operation with and without SelfDN aci + + :id: 0ec4c0ec-e7b0-4ef1-8373-ab25aae34516 + :setup: Standalone instance, add a entry which is used to bind, + enable acl error logging by setting 'nsslapd-errorlog-level' to '128', + remove aci's to start with a clean slate, and add dummy entries + :steps: + 1. Check we can not delete an entry without the proper SELFDN aci + 2. Add proper ACI + 3. Check we can perform delete operation with proper ACI + :expectedresults: + 1. Operation should be successful + 2. Operation should be successful + 3. Operation should be successful + """ + topology_st.standalone.log.info("\n\n######################### DELETE ######################\n") + + # bind as bind_entry + topology_st.standalone.log.info("Bind as %s" % BIND_DN) + topology_st.standalone.simple_bind_s(BIND_DN, BIND_PW) + + # entry to delete WITH member being BIND_DN but WITHOUT the ACI -> ldap.INSUFFICIENT_ACCESS + try: + topology_st.standalone.log.info("Try to delete %s (aci is missing)" % ENTRY_DN) + topology_st.standalone.delete_s(ENTRY_DN) + except Exception as e: + topology_st.standalone.log.info("Exception (expected): %s" % type(e).__name__) + assert isinstance(e, ldap.INSUFFICIENT_ACCESS) + + # Ok Now add the proper ACI + topology_st.standalone.log.info("Bind as %s and add the READ/SEARCH SELFDN aci" % DN_DM) + topology_st.standalone.simple_bind_s(DN_DM, PASSWORD) + + ACI_TARGET = "(target = \"ldap:///cn=*,%s\")" % SUFFIX + ACI_TARGETFILTER = "(targetfilter =\"(objectClass=%s)\")" % OC_NAME + ACI_ALLOW = "(version 3.0; acl \"SelfDN delete\"; allow (delete)" + ACI_SUBJECT = " userattr = \"member#selfDN\";)" + ACI_BODY = ACI_TARGET + ACI_TARGETFILTER + ACI_ALLOW + ACI_SUBJECT + mod = [(ldap.MOD_ADD, 'aci', ensure_bytes(ACI_BODY))] + topology_st.standalone.modify_s(SUFFIX, mod) + + # bind as bind_entry + topology_st.standalone.log.info("Bind as %s" % BIND_DN) + topology_st.standalone.simple_bind_s(BIND_DN, BIND_PW) + + # entry to delete with the proper aci + topology_st.standalone.log.info("Try to delete %s should be successful" % ENTRY_DN) + topology_st.standalone.delete_s(ENTRY_DN) + + +if __name__ == '__main__': + # Run isolated + # -s for DEBUG mode + CURRENT_FILE = os.path.realpath(__file__) + pytest.main("-s %s" % CURRENT_FILE) diff --git a/dirsrvtests/tests/suites/acl/syntax_test.py b/dirsrvtests/tests/suites/acl/syntax_test.py new file mode 100644 index 0000000..4edc7fa --- /dev/null +++ b/dirsrvtests/tests/suites/acl/syntax_test.py @@ -0,0 +1,256 @@ +# --- BEGIN COPYRIGHT BLOCK --- +# Copyright (C) 2020 Red Hat, Inc. +# All rights reserved. +# +# License: GPL (version 3 or any later version). +# See LICENSE for details. +# --- END COPYRIGHT BLOCK ---- + +import os +import pytest +from lib389._constants import DEFAULT_SUFFIX +from lib389.idm.domain import Domain +from lib389.topologies import topology_st as topo +from lib389.utils import ds_is_older + +import ldap + +pytestmark = pytest.mark.tier1 + +INVALID = [('test_targattrfilters_1', + f'(targattrfilters ="add=title:title=fred),del=cn:(cn!=harry)")' + f'(version 3.0; acl "Name of the ACI"; deny absolute (all)userdn="ldap:///anyone";)'), + ('test_targattrfilters_2', + f'(targattrfilters ="add=:(title=fred),del=cn:(cn!=harry)")' + f'(version 3.0; acl "Name of the ACI"; deny absolute (all)userdn="ldap:///anyone";)'), + ('test_targattrfilters_3', + f'(targattrfilters ="add=:(title=fred),del=cn:(cn!=harry))' + f'(version 3.0; acl "Name of the ACI"; deny absolute (all)userdn="ldap:///anyone";)'), + ('test_targattrfilters_4', + f'(targattrfilters ="add=title:(title=fred),=cn:(cn!=harry")' + f'(version 3.0; acl "Name of the ACI"; deny absolute (all)userdn="ldap:///anyone";)'), + ('test_targattrfilters_5', + f'(targattrfilters ="add=title:(|(title=fred)(cn=harry)),del=cn:(cn=harry)")' + f'(version 3.0; acl "Name of the ACI"; deny absolute (all)userdn="ldap:///anyone";)'), + ('test_targattrfilters_6', + f'(targattrfilters ="add=title:(|(title=fred)(title=harry)),del=cn:(title=harry)")' + f'(version 3.0; acl "Name of the ACI"; deny absolute (all)userdn="ldap:///anyone";)'), + ('test_targattrfilters_7', + f'(targattrfilters ="add=title:(cn=architect), ' + f'del=title:(title=architect) && l:(l=cn=Meylan,dc=example,dc=com")")' + f'(version 3.0; acl "Name of the ACI"; deny absolute (all)userdn="ldap:///anyone";)'), + ('test_targattrfilters_8', + f'(targattrfilters ="add=title:(cn=architect)")' + f'(version 3.0; acl "Name of the ACI"; deny absolute (all)userdn="ldap:///anyone";)'), + ('test_targattrfilters_9', + f'(targattrfilters ="add=title:(cn=arch*)")' + f'(version 3.0; acl "Name of the ACI"; deny absolute (all)userdn="ldap:///anyone";)'), + ('test_targattrfilters_10', + f'(targattrfilters ="add=title:(cn >= 1)")' + f'(version 3.0; acl "Name of the ACI"; deny absolute (all)userdn="ldap:///anyone";)'), + ('test_targattrfilters_11', + f'(targattrfilters ="add=title:(cn <= 1)")' + f'(version 3.0; acl "Name of the ACI"; deny absolute (all)userdn="ldap:///anyone";)'), + ('test_targattrfilters_12', + f'(targattrfilters ="add=title:(cn ~= 1)")' + f'(version 3.0; acl "Name of the ACI"; deny absolute (all)userdn="ldap:///anyone";)'), + ('test_targattrfilters_13', + f'(targattrfilters ="add=title:(!(cn ~= 1))")' + f'(version 3.0; acl "Name of the ACI"; deny absolute (all)userdn="ldap:///anyone";)'), + ('test_targattrfilters_14', + f'(targattrfilters ="add=title:(&(cn=fred)(cn ~= 1))")' + f'(version 3.0; acl "Name of the ACI"; deny absolute (all)userdn="ldap:///anyone";)'), + ('test_targattrfilters_15', + f'(targattrfilters ="add=title:(|(cn=fred)(cn ~= 1))")' + f'(version 3.0; acl "Name of the ACI"; deny absolute (all)userdn="ldap:///anyone";)'), + ('test_targattrfilters_16', + f'(targattrfilters ="add=title:(&(|(title=fred)(title=harry))(cn ~= 1))")' + f'(version 3.0; acl "Name of the ACI"; deny absolute (all)userdn="ldap:///anyone";)'), + ('test_targattrfilters_17', + f'\(targattrfilters ="add=title:(&(|(&(title=harry)(title=fred))' + f'(title=harry))(title ~= 1))")' + f'(version 3.0; acl "Name of the ACI"; deny absolute (all)userdn="ldap:///anyone";)'), + ('test_targattrfilters_19', + f'(target = ldap:///cn=Jeff Vedder,ou=Product Development,{DEFAULT_SUFFIX})' + f'(targetattr="*")' + f'(version 3.0; acl "Name of the ACI"; deny(write)gropdn="ldap:///anyone";)'), + ('test_targattrfilters_21', + f'(target = ldap:///cn=Jeff Vedder,ou=Product Development,{DEFAULT_SUFFIX})' + f'(targetattr="*")' + f'(version 3.0; acl "Name of the ACI"; deny(rite)userdn="ldap:///anyone";)'), + ('test_targattrfilters_22', + f'(targt = ldap:///cn=Jeff Vedder,ou=Product Development,{DEFAULT_SUFFIX})' + f'(targetattr="*")' + f'(version 3.0; acl "Name of the ACI"; deny absolute (all)userdn="ldap:///anyone";)'), + ('test_targattrfilters_23', + f'(target = ldap:///cn=Jeff Vedder,ou=Product Development,{DEFAULT_SUFFIX})' + f'(targetattr="*")' + f'(version 3.0; acl "Name of the ACI"; absolute (all)userdn="ldap:///anyone";)'), + ('test_Missing_acl_mispel', + f'(target = ldap:///cn=Jeff Vedder,ou=Product Development,{DEFAULT_SUFFIX})' + f'(targetattr="*")' + f'(version 3.0; alc "Name of the ACI"; deny absolute (all)userdn="ldap:///anyone";)'), + ('test_Missing_acl_string', + f'(target = ldap:///cn=Jeff Vedder,ou=Product Development,{DEFAULT_SUFFIX})' + f'(targetattr="*")' + f'(version 3.0; "Name of the ACI"; deny absolute (all)userdn="ldap:///anyone";)'), + ('test_Wrong_version_string', + f'(target = ldap:///cn=Jeff Vedder,ou=Product Development,{DEFAULT_SUFFIX})' + f'(targetattr="*")' + f'(version 2.0; acl "Name of the ACI"; deny absolute (all)userdn="ldap:///anyone";)'), + ('test_Missing_version_string', + f'(target = ldap:///cn=Jeff Vedder,ou=Product Development,{DEFAULT_SUFFIX})' + f'(targetattr="*")' + f'(; acl "Name of the ACI"; deny absolute (all)userdn="ldap:///anyone";)'), + ('test_Authenticate_statement', + f'(target = ldap:///cn=Jeff Vedder,ou=Product Development,{DEFAULT_SUFFIX})' + f'(targetattr != "uid")' + f'(targetattr="*")(version 3.0; acl "Name of the ACI"; deny absolute (all)' + f'userdn="ldap:///anyone";)'), + ('test_Multiple_targets', + f'(target = ldap:///ou=Product Development,{DEFAULT_SUFFIX})' + f'(target = ldap:///ou=Product Testing,{DEFAULT_SUFFIX})(targetattr="*")' + f'(version 3.0; acl "Name of the ACI"; deny absolute (all)userdn="ldap:///anyone";)'), + ('test_Target_set_to_self', + f'(target = ldap:///self)(targetattr="*")' + f'(version 3.0; acl "Name of the ACI"; deny absolute (all)userdn="ldap:///anyone";)'), + ('test_target_set_with_ldap_instead_of_ldap', + f'(target = ldap:\\\{DEFAULT_SUFFIX})(targetattr="*")' + f'(version 3.0; acl "Name of the ACI"; deny absolute (all)userdn="ldap:///anyone";)'), + ('test_target_set_with_more_than_three', + f'(target = ldap:////{DEFAULT_SUFFIX})(targetattr="*")' + f'(version 3.0; acl "Name of the ACI"; deny absolute (all)userdn="ldap:///anyone";)'), + ('test_target_set_with_less_than_three', + f'(target = ldap://{DEFAULT_SUFFIX})(targetattr="*")' + f'(version 3.0; acl "Name of the ACI"; deny absolute (all)userdn="ldap:///anyone";)'), + ('test_bind_rule_set_with_less_than_three', + f'(target = ldap:///{DEFAULT_SUFFIX})(targetattr="*")' + f'(version 3.0; acl "Name of the ACI"; deny absolute (all)userdn="ldap:/anyone";)'), + ('test_Use_semicolon_instead_of_comma_in_permission', + f'(target = ldap:///{DEFAULT_SUFFIX})(targetattr="*")' + f'(version 3.0; acl "Name of the ACI"; deny ' + f'(read; search; compare; write)userdn="ldap:///anyone";)'), + ('test_Use_double_equal_instead_of_equal_in_the_target', + f'(target == ldap:///{DEFAULT_SUFFIX})(targetattr="*")' + f'(version 3.0; acl "Name of the ACI"; deny absolute (all)userdn="ldap:///anyone";)'), + ('test_use_double_equal_instead_of_equal_in_user_and_group_access', + f'(target = ldap:///{DEFAULT_SUFFIX})' + f'(version 3.0; acl "Name of the ACI"; deny absolute (all)' + f'userdn == "ldap:///anyone";)'), + ('test_donot_cote_the_name_of_the_aci', + f'(target = ldap:///{DEFAULT_SUFFIX})' + f'(version 3.0; acl Name of the ACI ; deny absolute (all)userdn = "ldap:///anyone";)'), + ('test_extra_parentheses_case_1', + f'( )(target = ldap:///{DEFAULT_SUFFIX}) (targetattr="*")' + f'(version 3.0; acl "Name of the ACI"; deny absolute (all)userdn = "ldap:///anyone";)'), + ('test_extra_parentheses_case_2', + f'(((((target = ldap:///{DEFAULT_SUFFIX})(targetattr="*")' + f'(version 3.0; acl "Name of the ACI"; deny absolute (all)' + f'userdn == "ldap:///anyone";)'), + ('test_extra_parentheses_case_3', + f'(((target = ldap:///{DEFAULT_SUFFIX}) (targetattr="*")' + f'(version 3.0; acl "Name of the ACI"; deny absolute ' + f'(all)userdn = "ldap:///anyone";)))'), + ('test_no_semicolon_at_the_end_of_the_aci', + f'(target = ldap:///{DEFAULT_SUFFIX}) (targetattr="*")' + f'(version 3.0; acl "Name of the ACI"; deny absolute (all)userdn = "ldap:///anyone")'), + ('test_a_character_different_of_a_semicolon_at_the_end_of_the_aci', + f'(target = ldap:///{DEFAULT_SUFFIX}) (targetattr="*")' + f'(version 3.0; acl "Name of the ACI"; deny absolute (all)userdn = "ldap:///anyone"%)'), + ('test_bad_filter', + f'(target = ldap:///{DEFAULT_SUFFIX}) ' + f'(targetattr="cn |&| sn |(|) uid")' + f'(version 3.0; acl "Name of the ACI"; deny absolute (all)userdn = "ldap:///anyone";)'), + ('test_Use_double_equal_instead_of_equal_in_the_targattrfilters', + f'(target = ldap:///{DEFAULT_SUFFIX})(targattrfilters== "add=title:(title=architect)")' + f'(version 3.0; acl "Name of the ACI"; deny absolute (all)userdn="ldap:///anyone";)'), + ('test_Use_double_equal_instead_of_equal_inside_the_targattrfilters', + f'(target = ldap:///{DEFAULT_SUFFIX})(targattrfilters="add==title:(title==architect)")' + f'(version 3.0; acl "Name of the ACI"; deny absolute (all)userdn="ldap:///anyone";)'),] + + +FAILED = [('test_targattrfilters_18', + f'(target = ldap:///cn=Jeff Vedder,ou=Product Development,{DEFAULT_SUFFIX})' + f'(targetattr="*")' + f'(version 3.0; acl "Name of the ACI"; deny(write)userdn="ldap:///{"123" * 300}";)'), + ('test_targattrfilters_20', + f'(target = ldap:///cn=Jeff Vedder,ou=Product Development,{DEFAULT_SUFFIX})' + f'(targetattr="*")' + f'(version 3.0; acl "Name of the ACI"; deny(write)userdns="ldap:///anyone";)'), + ('test_bind_rule_set_with_more_than_three', + f'(target = ldap:///{DEFAULT_SUFFIX})(targetattr="*")' + f'(version 3.0; acl "Name of the ACI"; deny absolute (all)' + f'userdn="ldap:////////anyone";)'), + ('test_Use_double_equal_instead_of_equal_in_the_targetattr', + f'(target = ldap:///{DEFAULT_SUFFIX})(targetattr==*)' + f'(version 3.0; acl "Name of the ACI"; deny absolute (all)userdn="ldap:///anyone";)'), + ('test_Use_double_equal_instead_of_equal_in_the_targetfilter', + f'(target = ldap:///{DEFAULT_SUFFIX})(targetfilter==*)' + f'(version 3.0; acl "Name of the ACI"; deny absolute ' + f'(all)userdn="ldap:///anyone";)'), ] + + +@pytest.mark.xfail(reason='https://bugzilla.redhat.com/show_bug.cgi?id=1691473') +@pytest.mark.parametrize("real_value", [a[1] for a in FAILED], + ids=[a[0] for a in FAILED]) +def test_aci_invalid_syntax_fail(topo, real_value): + """Try to set wrong ACI syntax. + + :id: 83c40784-fff5-49c8-9535-7064c9c19e7e + :parametrized: yes + :setup: Standalone Instance + :steps: + 1. Create ACI + 2. Try to setup the ACI with Instance + :expectedresults: + 1. It should pass + 2. It should not pass + """ + domain = Domain(topo.standalone, DEFAULT_SUFFIX) + with pytest.raises(ldap.INVALID_SYNTAX): + domain.add("aci", real_value) + + +@pytest.mark.parametrize("real_value", [a[1] for a in INVALID], + ids=[a[0] for a in INVALID]) +def test_aci_invalid_syntax(topo, real_value): + """Try to set wrong ACI syntax. + + :id: e8bf20b6-48be-4574-8300-056e42a0f0a8 + :parametrized: yes + :setup: Standalone Instance + :steps: + 1. Create ACI + 2. Try to setup the ACI with Instance + :expectedresults: + 1. It should pass + 2. It should not pass + """ + domain = Domain(topo.standalone, DEFAULT_SUFFIX) + with pytest.raises(ldap.INVALID_SYNTAX): + domain.add("aci", real_value) + + +def test_target_set_above_the_entry_test(topo): + """ + Try to set wrong ACI syntax. + + :id: d544d09a-6ed1-11e8-8872-8c16451d917b + :setup: Standalone Instance + :steps: + 1. Create ACI + 2. Try to setup the ACI with Instance + :expectedresults: + 1. It should pass + 2. It should not pass + """ + domain = Domain(topo.standalone, "ou=People,{}".format(DEFAULT_SUFFIX)) + with pytest.raises(ldap.INVALID_SYNTAX): + domain.add("aci", f'(target = ldap:///{DEFAULT_SUFFIX})' + f'(targetattr="*")(version 3.0; acl "Name of the ACI"; deny absolute ' + f'(all)userdn="ldap:///anyone";)') + + +if __name__ == "__main__": + CURRENT_FILE = os.path.realpath(__file__) + pytest.main("-s -v %s" % CURRENT_FILE) diff --git a/dirsrvtests/tests/suites/acl/userattr_test.py b/dirsrvtests/tests/suites/acl/userattr_test.py new file mode 100644 index 0000000..3a13d32 --- /dev/null +++ b/dirsrvtests/tests/suites/acl/userattr_test.py @@ -0,0 +1,298 @@ +# --- BEGIN COPYRIGHT BLOCK --- +# Copyright (C) 2020 Red Hat, Inc. +# All rights reserved. +# +# License: GPL (version 3 or any later version). +# See LICENSE for details. +# --- END COPYRIGHT BLOCK ---- + +""" +This script will test different type of user attributes. +""" + +import os +import pytest + +from lib389._constants import DEFAULT_SUFFIX, PW_DM +from lib389.idm.user import UserAccount, UserAccounts +from lib389.idm.organizationalunit import OrganizationalUnits +from lib389.idm.group import Groups +from lib389.idm.role import ManagedRoles +from lib389.topologies import topology_st as topo + +import ldap + +pytestmark = pytest.mark.tier1 + + +OU = f"ou=Accounting,{DEFAULT_SUFFIX}" +OU_2 = f"ou=Inheritance,{DEFAULT_SUFFIX}" +CAN = f"uid=Anuj Borah,{OU}" +CANNOT = f"uid=Ananda Borah,{OU}" +LEVEL_0 = f"uid=Grandson,{OU_2}" +LEVEL_1 = f"uid=Child,{OU_2}" +LEVEL_2 = f"uid=Parent,{OU_2}" +LEVEL_3 = f"uid=Grandparent,{OU_2}" +LEVEL_4 = f"uid=Ancestor,{OU_2}" +ROLE1 = f'cn=ROLE1,{OU}' +ROLE2 = f'cn=ROLE2,{OU}' +NSSIMPLEGROUP = f'cn=NSSIMPLEGROUP,{OU}' +NSSIMPLEGROUP1 = f'cn=NSSIMPLEGROUP1,{OU}' +ROLEDNACCESS = f'uid=ROLEDNACCESS,{OU}' +USERDNACCESS = f'uid=USERDNACCESS,{OU}' +GROUPDNACCESS = f'uid=GROUPDNACCESS,{OU}' +LDAPURLACCESS = f'uid=LDAPURLACCESS,{OU}' +ATTRNAMEACCESS = f'uid=ATTRNAMEACCESS,{OU}' +ANCESTORS = f'ou=ANCESTORS,{OU_2}' +GRANDPARENTS = f'ou=GRANDPARENTS,{ANCESTORS}' +PARENTS = f'ou=PARENTS,{GRANDPARENTS}' +CHILDREN = f'ou=CHILDREN,{PARENTS}' +GRANDSONS = f'ou=GRANDSONS,{CHILDREN}' + + +@pytest.fixture(scope="module") +def _add_user(topo): + """ + This function will create user for the test and in the end entries will be deleted . + """ + role_aci_body = '(targetattr="*")(version 3.0; aci "role aci"; allow(all)' + # Creating OUs + ous = OrganizationalUnits(topo.standalone, DEFAULT_SUFFIX) + ou_accounting = ous.create(properties={'ou': 'Accounting'}) + ou_accounting.set('aci', [f'(target="ldap:///{ROLEDNACCESS}"){role_aci_body} ' + f'userattr = "Description#ROLEDN";)', + f'(target="ldap:///{USERDNACCESS}"){role_aci_body} ' + f'userattr = "Description#USERDN";)', + f'(target="ldap:///{GROUPDNACCESS}"){role_aci_body} ' + f'userattr = "Description#GROUPDN";)', + f'(target="ldap:///{LDAPURLACCESS}"){role_aci_body} ' + f'userattr = "Description#LDAPURL";)', + f'(target="ldap:///{ATTRNAMEACCESS}"){role_aci_body} ' + f'userattr = "Description#4612";)']) + + ou_inheritance = ous.create(properties={'ou': 'Inheritance', + 'street': LEVEL_4, + 'seeAlso': LEVEL_3, + 'st': LEVEL_2, + 'description': LEVEL_1, + 'businessCategory': LEVEL_0}) + + inheritance_aci_body = '(targetattr="*")(version 3.0; aci "Inheritance aci"; allow(all) ' + ou_inheritance.set('aci', [f'{inheritance_aci_body} ' + f'userattr = "parent[0].businessCategory#USERDN";)', + f'{inheritance_aci_body} ' + f'userattr = "parent[0,1].description#USERDN";)', + f'{inheritance_aci_body} ' + f'userattr = "parent[0,1,2].st#USERDN";)', + f'{inheritance_aci_body} ' + f'userattr = "parent[0,1,2,3].seeAlso#USERDN";)', + f'{inheritance_aci_body} ' + f'userattr = "parent[0,1,2,3,4].street#USERDN";)']) + + # Creating Users + users = UserAccounts(topo.standalone, OU, rdn=None) + + for i in [['Anuj Borah', 'Sunnyvale', ROLE1, '4612'], + ['Ananda Borah', 'Santa Clara', ROLE2, 'Its Unknown']]: + users.create(properties={ + 'uid': i[0], + 'cn': i[0].split()[0], + 'sn': 'user', + 'uidNumber': '1000', + 'gidNumber': '2000', + 'homeDirectory': '/home/' + i[0].split()[0], + 'userPassword': PW_DM, + 'givenname': i[0].split()[0], + 'l': i[1], + 'mail': "anuj@borah.com", + 'telephonenumber': "+1 408 555 4798", + 'facsimiletelephonenumber': "+1 408 555 9751", + 'roomnumber': i[3], + 'Description': i[3], + 'nsRoleDN': i[2] + }) + + for demo1 in [('ROLEDNACCESS', ROLE1), + ('USERDNACCESS', CAN), + ('GROUPDNACCESS', NSSIMPLEGROUP), + ('ATTRNAMEACCESS', '4612'), + ('LDAPURLACCESS', f"ldap:///{DEFAULT_SUFFIX}??sub?(l=Sunnyvale)")]: + users.create(properties={ + 'uid': demo1[0], + 'cn': demo1[0], + 'sn': 'user', + 'uidNumber': '1000', + 'gidNumber': '2000', + 'homeDirectory': '/home/' + demo1[0], + 'userPassword': PW_DM, + 'Description': demo1[1] + }) + + # Creating roles + roles = ManagedRoles(topo.standalone, OU) + for i in ['ROLE1', 'ROLE2']: + roles.create(properties={"cn": i}) + + # Creating Groups + grps = Groups(topo.standalone, OU, rdn=None) + for i in [('NSSIMPLEGROUP', CAN), ('NSSIMPLEGROUP1', CANNOT)]: + grps.create(properties={ + 'cn': i[0], + 'ou': 'groups', + 'member': i[1] + }) + + users = UserAccounts(topo.standalone, OU_2, rdn=None) + for i in ['Grandson', 'Child', 'Parent', 'Grandparent', 'Ancestor']: + users.create( + properties={ + 'uid': i, + 'cn': i, + 'sn': 'user', + 'uidNumber': '1000', + 'gidNumber': '2000', + 'homeDirectory': '/home/' + i, + 'userPassword': PW_DM + }) + + # Creating Other OUs + for dn_dn in [(OU_2, 'ANCESTORS'), + (ANCESTORS, 'GRANDPARENTS'), + (GRANDPARENTS, 'PARENTS'), + (PARENTS, 'CHILDREN'), + (CHILDREN, 'GRANDSONS')]: + OrganizationalUnits(topo.standalone, dn_dn[0]).create(properties={'ou': dn_dn[1]}) + + +@pytest.mark.parametrize("user,entry", [ + (CAN, ROLEDNACCESS), + (CAN, USERDNACCESS), + (CAN, GROUPDNACCESS), + (CAN, LDAPURLACCESS), + (CAN, ATTRNAMEACCESS), + (LEVEL_0, OU_2), + (LEVEL_1, ANCESTORS), + (LEVEL_2, GRANDPARENTS), + (LEVEL_4, OU_2), + (LEVEL_4, ANCESTORS), + (LEVEL_4, GRANDPARENTS), + (LEVEL_4, PARENTS), + (LEVEL_4, CHILDREN), + pytest.param(LEVEL_3, CHILDREN, marks=pytest.mark.xfail(reason="May be some bug")), +], ids=[ + "(CAN,ROLEDNACCESS)", + "(CAN,USERDNACCESS)", + "(CAN,GROUPDNACCESS)", + "(CAN,LDAPURLACCESS)", + "(CAN,ATTRNAMEACCESS)", + "(LEVEL_0, OU_2)", + "(LEVEL_1,ANCESTORS)", + "(LEVEL_2,GRANDPARENTS)", + "(LEVEL_4,OU_2)", + "(LEVEL_4, ANCESTORS)", + "(LEVEL_4,GRANDPARENTS)", + "(LEVEL_4,PARENTS)", + "(LEVEL_4,CHILDREN)", + "(LEVEL_3, CHILDREN)" +]) +def test_mod_see_also_positive(topo, _add_user, user, entry): + """ + Try to set seeAlso on entry with binding specific user, it will success + as per the ACI. + + :id: 65745426-7a01-11e8-8ac2-8c16451d917b + :parametrized: yes + :setup: Standalone Instance + :steps: + 1. Add test entry + 2. Add ACI + 3. User should follow ACI role + :expectedresults: + 1. Entry should be added + 2. Operation should succeed + 3. Operation should succeed + """ + conn = UserAccount(topo.standalone, user).bind(PW_DM) + UserAccount(conn, entry).replace('seeAlso', 'cn=1') + + +@pytest.mark.parametrize("user,entry", [ + (CANNOT, ROLEDNACCESS), + (CANNOT, USERDNACCESS), + (CANNOT, GROUPDNACCESS), + (CANNOT, LDAPURLACCESS), + (CANNOT, ATTRNAMEACCESS), + (LEVEL_0, ANCESTORS), + (LEVEL_0, GRANDPARENTS), + (LEVEL_0, PARENTS), + (LEVEL_0, CHILDREN), + (LEVEL_2, PARENTS), + (LEVEL_4, GRANDSONS), +], ids=[ + "(CANNOT,ROLEDNACCESS)", + "(CANNOT,USERDNACCESS)", + "(CANNOT,GROUPDNACCESS)", + "(CANNOT,LDAPURLACCESS)", + "(CANNOT,ATTRNAMEACCESS)", + "(LEVEL_0, ANCESTORS)", + "(LEVEL_0,GRANDPARENTS)", + "(LEVEL_0,PARENTS)", + "(LEVEL_0,CHILDREN)", + "(LEVEL_2,PARENTS)", + "(LEVEL_4,GRANDSONS)", +]) +def test_mod_see_also_negative(topo, _add_user, user, entry): + """ + Try to set seeAlso on entry with binding specific user, it will Fail + as per the ACI. + + :id: 9ea93252-7a01-11e8-a85b-8c16451d917b + :parametrized: yes + :setup: Standalone Instance + :steps: + 1. Add test entry + 2. Add ACI + 3. User should follow ACI role + :expectedresults: + 1. Entry should be added + 2. Operation should succeed + 3. Operation should succeed + """ + conn = UserAccount(topo.standalone, user).bind(PW_DM) + user = UserAccount(conn, entry) + with pytest.raises(ldap.INSUFFICIENT_ACCESS): + user.replace('seeAlso', 'cn=1') + + +@pytest.mark.parametrize("user,entry", [ + (CANNOT, USERDNACCESS), + (CANNOT, ROLEDNACCESS), + (CANNOT, GROUPDNACCESS) +]) +def test_last_three(topo, _add_user, user, entry): + """ + When we use the userattr keyword to associate the entry used to bind + with the target entry the ACI applies only to the target specified and + not to subentries. + + :id: add58a0a-7a01-11e8-85f1-8c16451d917b + :parametrized: yes + :setup: Standalone Instance + :steps: + 1. Add test entry + 2. Add ACI + 3. User should follow ACI role + :expectedresults: + 1. Entry should be added + 2. Operation should succeed + 3. Operation should succeed + """ + conn = UserAccount(topo.standalone, user).bind(PW_DM) + users = UserAccounts(conn, entry) + with pytest.raises(ldap.INSUFFICIENT_ACCESS): + users.create_test_user() + + +if __name__ == "__main__": + CURRENT_FILE = os.path.realpath(__file__) + pytest.main("-s -v %s" % CURRENT_FILE) diff --git a/dirsrvtests/tests/suites/acl/valueacl_part2_test.py b/dirsrvtests/tests/suites/acl/valueacl_part2_test.py new file mode 100644 index 0000000..5f160d0 --- /dev/null +++ b/dirsrvtests/tests/suites/acl/valueacl_part2_test.py @@ -0,0 +1,442 @@ +# --- BEGIN COPYRIGHT BLOCK --- +# Copyright (C) 2019 Red Hat, Inc. +# All rights reserved. +# +# License: GPL (version 3 or any later version). +# See LICENSE for details. +# --- END COPYRIGHT BLOCK ---- + +import pytest, os, ldap +from lib389._constants import DEFAULT_SUFFIX, PW_DM +from lib389.idm.user import UserAccount +from lib389.idm.account import Anonymous +from lib389.idm.organizationalunit import OrganizationalUnit +from lib389.topologies import topology_st as topo +from lib389.idm.domain import Domain + +pytestmark = pytest.mark.tier1 + +CONTAINER_1_DELADD = "ou=Product Development,{}".format(DEFAULT_SUFFIX) +CONTAINER_2_DELADD = "ou=Accounting,{}".format(DEFAULT_SUFFIX) +USER_DELADD = "cn=Jeff Vedder,{}".format(CONTAINER_1_DELADD) +USER_WITH_ACI_DELADD = "cn=Sam Carter,{}".format(CONTAINER_2_DELADD) +FRED = "cn=FRED,ou=Accounting,{}".format(DEFAULT_SUFFIX) +HARRY = "cn=HARRY,ou=Accounting,{}".format(DEFAULT_SUFFIX) +KIRSTENVAUGHAN = "cn=Kirsten Vaughan,ou=Human Resources,{}".format(DEFAULT_SUFFIX) +HUMAN_OU_GLOBAL = "ou=Human Resources,{}".format(DEFAULT_SUFFIX) + + +@pytest.fixture(scope="function") +def aci_of_user(request, topo): + # Add anonymous access aci + ACI_TARGET = "(targetattr != \"userpassword\")(target = \"ldap:///%s\")" % (DEFAULT_SUFFIX) + ACI_ALLOW = "(version 3.0; acl \"Anonymous Read access\"; allow (read,search,compare)" + ACI_SUBJECT = "(userdn=\"ldap:///anyone\");)" + ANON_ACI = ACI_TARGET + ACI_ALLOW + ACI_SUBJECT + suffix = Domain(topo.standalone, DEFAULT_SUFFIX) + try: + suffix.add('aci', ANON_ACI) + except ldap.TYPE_OR_VALUE_EXISTS: + pass + + aci_list = Domain(topo.standalone, DEFAULT_SUFFIX).get_attr_vals('aci') + + def finofaci(): + domain = Domain(topo.standalone, DEFAULT_SUFFIX) + domain.set('aci', None) + for i in aci_list: + domain.add("aci", i) + + request.addfinalizer(finofaci) + + +@pytest.fixture(scope="function") +def _add_user(request, topo): + for i in ["Product Development", 'Accounting', "Human Resources"]: + ou = OrganizationalUnit(topo.standalone, "ou={},{}".format(i, DEFAULT_SUFFIX)) + ou.create(properties={'ou': i}) + + properties = { + 'uid': 'Jeff Vedder', + 'cn': 'Jeff Vedder', + 'sn': 'user', + 'uidNumber': '1000', + 'gidNumber': '2000', + 'homeDirectory': '/home/' + 'JeffVedder', + 'userPassword': 'password' + } + user = UserAccount(topo.standalone, 'cn=Jeff Vedder,{}'.format(CONTAINER_1_DELADD)) + user.create(properties=properties) + user.set('secretary', 'cn=Arpitoo Borah, o=Red Hat, c=As') + user.set('mail', 'anuj@anuj.Borah') + + properties = { + 'uid': 'Sam Carter', + 'cn': 'Sam Carter', + 'sn': 'user', + 'uidNumber': '1000', + 'gidNumber': '2000', + 'homeDirectory': '/home/' + 'SamCarter', + 'userPassword': 'password' + } + user = UserAccount(topo.standalone, 'cn=Sam Carter,{}'.format(CONTAINER_2_DELADD)) + user.create(properties=properties) + + properties = { + 'uid': 'Kirsten Vaughan', + 'cn': 'Kirsten Vaughan', + 'sn': 'Kirsten Vaughan', + 'uidNumber': '1000', + 'gidNumber': '2000', + 'homeDirectory': '/home/' + 'KirstenVaughan', + 'userPassword': 'password' + } + user = UserAccount(topo.standalone, 'cn=Kirsten Vaughan, ou=Human Resources,{}'.format(DEFAULT_SUFFIX)) + user.create(properties=properties) + + properties = { + 'uid': 'HARRY', + 'cn': 'HARRY', + 'sn': 'HARRY', + 'uidNumber': '1000', + 'gidNumber': '2000', + 'homeDirectory': '/home/' + 'HARRY', + 'userPassword': 'password' + } + user = UserAccount(topo.standalone, 'cn=HARRY, ou=Accounting,{}'.format(DEFAULT_SUFFIX)) + user.create(properties=properties) + + def fin(): + for DN in [USER_DELADD, USER_WITH_ACI_DELADD, FRED, HARRY, KIRSTENVAUGHAN, + HUMAN_OU_GLOBAL, CONTAINER_2_DELADD,CONTAINER_1_DELADD]: + ua = UserAccount(topo.standalone, DN) + try: + ua.delete() + except: + pass + + request.addfinalizer(fin) + + +def test_we_can_search_as_expected(topo, _add_user, aci_of_user, request): + """Testing the targattrfilters keyword that allows access control based on the value of the attributes being added (or deleted)) + Test that we can search as expected + + :id: e845dbba-7aa9-11e8-8988-8c16451d917b + :setup: server + :steps: + 1. Add test entry + 2. Add ACI + 3. User should follow ACI role + :expectedresults: + 1. Entry should be added + 2. Operation should succeed + 3. Operation should succeed + """ + ACI_BODY = '(target="ldap:///cn=*,ou=Product Development, {}")' \ + '(targetfilter="cn=Jeff*")(targetattr="secretary || objectclass || mail")' \ + '(targattrfilters = "add=title:(title=arch*)")(version 3.0; acl "{}"; ' \ + 'allow (write,read,search,compare) (userdn = "ldap:///anyone") ;)'.format(DEFAULT_SUFFIX, request.node.name) + Domain(topo.standalone, DEFAULT_SUFFIX).add("aci", ACI_BODY) + conn = Anonymous(topo.standalone).bind() + # aci will allow secretary , mail , objectclass + user = UserAccount(conn, USER_DELADD) + assert user.get_attr_vals('secretary') + assert user.get_attr_vals('mail') + assert user.get_attr_vals('objectclass') + + +def test_we_can_mod_title_as_expected(topo, _add_user, aci_of_user, request): + """Testing the targattrfilters keyword that allows access control based on the + value of the attributes being added (or deleted)) + Test search will work with targattrfilters present. + + :id: f8c1ea88-7aa9-11e8-a55c-8c16451d917b + :setup: server + :steps: + 1. Add test entry + 2. Add ACI + 3. User should follow ACI role + :expectedresults: + 1. Entry should be added + 2. Operation should succeed + 3. Operation should succeed + """ + ACI_BODY = '(target="ldap:///cn=*,ou=Product Development, {}")' \ + '(targetfilter="cn=Jeff*")(targetattr="secretary || objectclass || mail")' \ + '(targattrfilters = "add=title:(title=arch*)")(version 3.0; acl "{}"; ' \ + 'allow (write,read,search,compare) (userdn = "ldap:///anyone") ;)'.format(DEFAULT_SUFFIX, request.node.name) + Domain(topo.standalone, DEFAULT_SUFFIX).add("aci", ACI_BODY) + # aci will not allow 'title', 'topdog' + conn = UserAccount(topo.standalone, USER_WITH_ACI_DELADD).bind(PW_DM) + user = UserAccount(conn, USER_DELADD) + with pytest.raises(ldap.INSUFFICIENT_ACCESS): + user.add('title', 'topdog') + + +def test_modify_with_multiple_filters(topo, _add_user, aci_of_user, request): + """Testing the targattrfilters keyword that allows access control based on the + value of the attributes being added (or deleted)) + Allowed by multiple filters + + :id: fd9d223e-7aa9-11e8-a83b-8c16451d917b + :setup: server + :steps: + 1. Add test entry + 2. Add ACI + 3. User should follow ACI role + :expectedresults: + 1. Entry should be added + 2. Operation should succeed + 3. Operation should succeed + """ + ACI_BODY = '(targattrfilters = "add=title:(title=architect) && secretary:' \ + '(secretary=cn=Meylan,{}), del=title:(title=architect) && secretary:' \ + '(secretary=cn=Meylan,{})")(version 3.0; acl "{}"; allow (write) ' \ + '(userdn = "ldap:///anyone") ;)'.format( + DEFAULT_SUFFIX, DEFAULT_SUFFIX, request.node.name + ) + Domain(topo.standalone, DEFAULT_SUFFIX).add("aci", ACI_BODY) + conn = UserAccount(topo.standalone, USER_WITH_ACI_DELADD).bind(PW_DM) + # aci will allow title some attribute only + user = UserAccount(conn, USER_DELADD) + user.add("title", "architect") + assert user.get_attr_val('title') + user.add("secretary", "cn=Meylan,dc=example,dc=com") + assert user.get_attr_val('secretary') + + +def test_denied_by_multiple_filters(topo, _add_user, aci_of_user, request): + """Testing the targattrfilters keyword that allows access control based on the value of the + attributes being added (or deleted)) + Denied by multiple filters + + :id: 034c6c62-7aaa-11e8-8634-8c16451d917b + :setup: server + :steps: + 1. Add test entry + 2. Add ACI + 3. User should follow ACI role + :expectedresults: + 1. Entry should be added + 2. Operation should succeed + 3. Operation should succeed + """ + ACI_BODY = '(targattrfilters = "add=title:(title=architect) && secretary:' \ + '(secretary=cn=Meylan,{}), del=title:(title=architect) && secretary:' \ + '(secretary=cn=Meylan,{})")(version 3.0; acl "{}"; allow (write) ' \ + '(userdn = "ldap:///anyone") ;)'.format(DEFAULT_SUFFIX, DEFAULT_SUFFIX, request.node.name) + Domain(topo.standalone, DEFAULT_SUFFIX).add("aci", ACI_BODY) + conn = UserAccount(topo.standalone, USER_WITH_ACI_DELADD).bind(PW_DM) + # aci will allow title some attribute only + user = UserAccount(conn, USER_DELADD) + user.add("title", "architect") + assert user.get_attr_val('title') + user.add("secretary", "cn=Meylan,dc=example,dc=com") + assert user.get_attr_val('secretary') + # aci will allow title some attribute only + with pytest.raises(ldap.INSUFFICIENT_ACCESS): + user.add("secretary", "cn=Grenoble,dc=example,dc=com") + + +def test_allowed_add_one_attribute(topo, _add_user, aci_of_user, request): + """Testing the targattrfilters keyword that allows access control based on the value of the + attributes being added (or deleted)) + Allowed add one attribute (in presence of multiple filters) + + :id: 086c7f0c-7aaa-11e8-b69f-8c16451d917b + :setup: server + :steps: + 1. Add test entry + 2. Add ACI + 3. User should follow ACI role + :expectedresults: + 1. Entry should be added + 2. Operation should succeed + 3. Operation should succeed + """ + ACI_BODY = '(targattrfilters = "add=title:(title=architect) && secretary:(secretary=cn=Meylan, {}), ' \ + 'del=title:(title=architect) && secretary:(secretary=cn=Meylan, {})")(version 3.0; acl "{}"; ' \ + 'allow (write) (userdn = "ldap:///{}") ;)'.format( + DEFAULT_SUFFIX, DEFAULT_SUFFIX, request.node.name, USER_WITH_ACI_DELADD) + Domain(topo.standalone, DEFAULT_SUFFIX).add("aci", ACI_BODY) + conn = UserAccount(topo.standalone, USER_WITH_ACI_DELADD).bind(PW_DM) + user = UserAccount(conn, USER_DELADD) + # aci will allow add ad delete + user.add('title', 'architect') + assert user.get_attr_val('title') + user.remove('title', 'architect') + + +def test_cannot_add_an_entry_with_attribute_values_we_are_not_allowed_add( + topo, _add_user, aci_of_user, request +): + """Testing the targattrfilters keyword that allows access control based on the value of the + attributes being added (or deleted)) + Test not allowed add an entry + + :id: 0d0effee-7aaa-11e8-b673-8c16451d917b + :setup: server + :steps: + 1. Add test entry + 2. Add ACI + 3. User should follow ACI role + :expectedresults: + 1. Entry should be added + 2. Operation should succeed + 3. Operation should succeed + """ + ACI_BODY = '(targattrfilters = "add=title:(|(title=engineer)(title=cool dude)(title=scum)) ' \ + '&& secretary:(secretary=cn=Meylan, {}), del=title:(|(title=engineer)(title=cool dude)' \ + '(title=scum))")(version 3.0; aci "{}"; allow (add) userdn = "ldap:///{}";)'.format( + DEFAULT_SUFFIX, request.node.name, DEFAULT_SUFFIX) + Domain(topo.standalone, DEFAULT_SUFFIX).add("aci", ACI_BODY) + properties = { + 'uid': 'FRED', + 'cn': 'FRED', + 'sn': 'user', + 'uidNumber': '1000', + 'gidNumber': '2000', + 'homeDirectory': '/home/' + 'FRED' + } + user = UserAccount(topo.standalone, 'cn=FRED,ou=Accounting,{}'.format(DEFAULT_SUFFIX)) + user.create(properties=properties) + user.set('title', ['anuj', 'kumar', 'borah']) + conn = UserAccount(topo.standalone, USER_WITH_ACI_DELADD).bind(PW_DM) + # aci will not allow adding objectclass + user = UserAccount(conn, USER_WITH_ACI_DELADD) + with pytest.raises(ldap.INSUFFICIENT_ACCESS): + user.add("objectclass", "person") + + +def test_on_modrdn(topo, _add_user, aci_of_user, request): + """Testing the targattrfilters keyword that allows access control based on the value of the + attributes being added (or deleted)) + Test that valuacls kick in for modrdn operation. + + :id: 12985dde-7aaa-11e8-abde-8c16451d917b + :setup: server + :steps: + 1. Add test entry + 2. Add ACI + 3. User should follow ACI role + :expectedresults: + 1. Entry should be added + 2. Operation should succeed + 3. Operation should succeed + """ + ACI_BODY = '(target="ldap:///cn=*,ou=Accounting,{}")(targattrfilters = "add=cn:(|(cn=engineer)), ' \ + 'del=title:(|(title=engineer)(title=cool dude)(title=scum))")(version 3.0; aci "{}"; ' \ + 'allow (write) userdn = "ldap:///{}";)'.format(DEFAULT_SUFFIX, request.node.name, USER_WITH_ACI_DELADD) + Domain(topo.standalone, DEFAULT_SUFFIX).add("aci", ACI_BODY) + conn = UserAccount(topo.standalone, USER_WITH_ACI_DELADD).bind(PW_DM) + # modrdn_s is not allowed with ou=OU1 + useraccount = UserAccount(conn, FRED) + with pytest.raises(ldap.INSUFFICIENT_ACCESS): + useraccount.rename("ou=OU1") + + +def test_on_modrdn_allow(topo, _add_user, aci_of_user, request): + """Testing the targattrfilters keyword that allows access control based on the value of the attributes being + added (or deleted)) + Test modrdn still works (2) + + :id: 17720562-7aaa-11e8-82ee-8c16451d917b + :setup: server + :steps: + 1. Add test entry + 2. Add ACI + 3. User should follow ACI role + :expectedresults: + 1. Entry should be added + 2. Operation should succeed + 3. Operation should succeed + """ + ACI_BODY = '(target="ldap:///{}")(targattrfilters = "add=cn:((cn=engineer)), del=cn:((cn=jonny))")' \ + '(version 3.0; aci "{}"; allow (write) ' \ + 'userdn = "ldap:///{}";)'.format(DEFAULT_SUFFIX, request.node.name, USER_WITH_ACI_DELADD) + Domain(topo.standalone, DEFAULT_SUFFIX).add("aci", ACI_BODY) + properties = { + 'uid': 'jonny', + 'cn': 'jonny', + 'sn': 'user', + 'uidNumber': '1000', + 'gidNumber': '2000', + 'homeDirectory': '/home/' + 'jonny' + } + user = UserAccount(topo.standalone, 'cn=jonny,{}'.format(DEFAULT_SUFFIX)) + user.create(properties=properties) + conn = UserAccount(topo.standalone, USER_WITH_ACI_DELADD).bind(PW_DM) + # aci will allow modrdn_s on cn=engineer + useraccount = UserAccount(conn, "cn=jonny,{}".format(DEFAULT_SUFFIX)) + useraccount.rename("cn=engineer") + assert useraccount.dn == 'cn=engineer,dc=example,dc=com' + + +@pytest.mark.bz979515 +def test_targattrfilters_keyword(topo): + """Testing the targattrfilters keyword that allows access control based on the value + of the attributes being added (or deleted)) + "Bug #979515 - ACLs inoperative in some search scenarios [rhel-6.5]" + "Bug #979516 is a clone for DS8.2 on RHEL5.9" + "Bug #979514 is a clone for RHEL6.4 zStream errata" + + :id: 23f9e9d0-7aaa-11e8-b16b-8c16451d917b + :setup: server + :steps: + 1. Add test entry + 2. Add ACI + 3. User should follow ACI role + :expectedresults: + 1. Entry should be added + 2. Operation should succeed + 3. Operation should succeed + """ + domain = Domain(topo.standalone, DEFAULT_SUFFIX) + domain.set('aci', None) + ou = OrganizationalUnit(topo.standalone, 'ou=bug979515,{}'.format(DEFAULT_SUFFIX)) + ou.create(properties={'ou': 'bug979515'}) + Domain(topo.standalone, DEFAULT_SUFFIX).add("aci", '(target="ldap:///ou=bug979515,{}") ' + '(targetattr= "uid") ( version 3.0; acl "read other subscriber"; allow (compare, read, search) ' + 'userdn="ldap:///uid=*,ou=bug979515,{}" ; )'.format(DEFAULT_SUFFIX, DEFAULT_SUFFIX)) + properties = { + 'uid': 'acientryusr1', + 'cn': 'acientryusr1', + 'sn': 'user', + 'uidNumber': '1000', + 'gidNumber': '2000', + 'homeDirectory': '/home/' + 'acientryusr1' + } + user = UserAccount(topo.standalone, 'cn=acientryusr1,ou=bug979515,{}'.format(DEFAULT_SUFFIX)) + user.create(properties=properties) + user.set('telephoneNumber', '99972566596') + user.set('mail', 'anuj@anuj.com') + user.set("userPassword", "password") + + properties = { + 'uid': 'newaciphoneusr1', + 'cn': 'newaciphoneusr1', + 'sn': 'user', + 'uidNumber': '1000', + 'gidNumber': '2000', + 'homeDirectory': '/home/' + 'newaciphoneusr1' + } + user = UserAccount(topo.standalone, 'cn=newaciphoneusr1,ou=bug979515,{}'.format(DEFAULT_SUFFIX)) + user.create(properties=properties) + user.set('telephoneNumber', '99972566596') + user.set('mail', 'anuj@anuj.com') + conn = UserAccount(topo.standalone, "cn=acientryusr1,ou=bug979515,{}".format(DEFAULT_SUFFIX)).bind(PW_DM) + # Testing the targattrfilters keyword that allows access control based on the value of the attributes being added (or deleted)) + user = UserAccount(conn, "cn=acientryusr1,ou=bug979515,{}".format(DEFAULT_SUFFIX)) + assert len(user.get_attr_vals('mail')) == 0 + assert len(user.get_attr_vals('telephoneNumber')) == 0 + assert len(user.get_attr_vals('cn')) == 0 + user = UserAccount(topo.standalone, "cn=acientryusr1,ou=bug979515,{}".format(DEFAULT_SUFFIX)) + user.get_attr_vals('mail') + user.get_attr_vals('telephoneNumber') + user.get_attr_vals('cn') + + +if __name__ == '__main__': + CURRENT_FILE = os.path.realpath(__file__) + pytest.main("-s -v %s" % CURRENT_FILE) diff --git a/dirsrvtests/tests/suites/acl/valueacl_test.py b/dirsrvtests/tests/suites/acl/valueacl_test.py new file mode 100644 index 0000000..3bbbdca --- /dev/null +++ b/dirsrvtests/tests/suites/acl/valueacl_test.py @@ -0,0 +1,758 @@ +# --- BEGIN COPYRIGHT BLOCK --- +# Copyright (C) 2020 Red Hat, Inc. +# All rights reserved. +# +# License: GPL (version 3 or any later version). +# See LICENSE for details. +# --- END COPYRIGHT BLOCK ---- + +import pytest, os, ldap +from lib389._constants import DEFAULT_SUFFIX, PW_DM +from lib389.idm.user import UserAccount +from lib389.idm.account import Anonymous +from lib389.idm.organizationalunit import OrganizationalUnit +from lib389.topologies import topology_st as topo +from lib389.idm.domain import Domain + +pytestmark = pytest.mark.tier1 + +CONTAINER_1_DELADD = "ou=Product Development,{}".format(DEFAULT_SUFFIX) +CONTAINER_2_DELADD = "ou=Accounting,{}".format(DEFAULT_SUFFIX) +USER_DELADD = "cn=Jeff Vedder,{}".format(CONTAINER_1_DELADD) +USER_WITH_ACI_DELADD = "cn=Sam Carter,{}".format(CONTAINER_2_DELADD) +FRED = "cn=FRED,ou=Accounting,{}".format(DEFAULT_SUFFIX) +HARRY = "cn=HARRY,ou=Accounting,{}".format(DEFAULT_SUFFIX) +KIRSTENVAUGHAN = "cn=Kirsten Vaughan,ou=Human Resources,{}".format(DEFAULT_SUFFIX) +HUMAN_OU_GLOBAL = "ou=Human Resources,{}".format(DEFAULT_SUFFIX) + + +@pytest.fixture(scope="function") +def aci_of_user(request, topo): + # Add anonymous access aci + ACI_TARGET = "(targetattr != \"userpassword\")(target = \"ldap:///%s\")" % (DEFAULT_SUFFIX) + ACI_ALLOW = "(version 3.0; acl \"Anonymous Read access\"; allow (read,search,compare)" + ACI_SUBJECT = "(userdn=\"ldap:///anyone\");)" + ANON_ACI = ACI_TARGET + ACI_ALLOW + ACI_SUBJECT + suffix = Domain(topo.standalone, DEFAULT_SUFFIX) + try: + suffix.add('aci', ANON_ACI) + except ldap.TYPE_OR_VALUE_EXISTS: + pass + + aci_list = Domain(topo.standalone, DEFAULT_SUFFIX).get_attr_vals('aci') + + def finofaci(): + domain = Domain(topo.standalone, DEFAULT_SUFFIX) + domain.set('aci', None) + for i in aci_list: + domain.add("aci", i) + + request.addfinalizer(finofaci) + + +@pytest.fixture(scope="function") +def _add_user(request, topo): + for i in ["Product Development", 'Accounting', "Human Resources"]: + ou = OrganizationalUnit(topo.standalone, "ou={},{}".format(i, DEFAULT_SUFFIX)) + ou.create(properties={'ou': i}) + + properties = { + 'uid': 'Jeff Vedder', + 'cn': 'Jeff Vedder', + 'sn': 'user', + 'uidNumber': '1000', + 'gidNumber': '2000', + 'homeDirectory': '/home/' + 'JeffVedder', + 'userPassword': 'password' + } + user = UserAccount(topo.standalone, 'cn=Jeff Vedder,{}'.format(CONTAINER_1_DELADD)) + user.create(properties=properties) + user.set('secretary', 'cn=Arpitoo Borah, o=Red Hat, c=As') + user.set('mail', 'anuj@anuj.Borah') + + properties = { + 'uid': 'Sam Carter', + 'cn': 'Sam Carter', + 'sn': 'user', + 'uidNumber': '1000', + 'gidNumber': '2000', + 'homeDirectory': '/home/' + 'SamCarter', + 'userPassword': 'password' + } + user = UserAccount(topo.standalone, 'cn=Sam Carter,{}'.format(CONTAINER_2_DELADD)) + user.create(properties=properties) + + properties = { + 'uid': 'Kirsten Vaughan', + 'cn': 'Kirsten Vaughan', + 'sn': 'Kirsten Vaughan', + 'uidNumber': '1000', + 'gidNumber': '2000', + 'homeDirectory': '/home/' + 'KirstenVaughan', + 'userPassword': 'password' + } + user = UserAccount(topo.standalone, 'cn=Kirsten Vaughan, ou=Human Resources,{}'.format(DEFAULT_SUFFIX)) + user.create(properties=properties) + + properties = { + 'uid': 'HARRY', + 'cn': 'HARRY', + 'sn': 'HARRY', + 'uidNumber': '1000', + 'gidNumber': '2000', + 'homeDirectory': '/home/' + 'HARRY', + 'userPassword': 'password' + } + user = UserAccount(topo.standalone, 'cn=HARRY, ou=Accounting,{}'.format(DEFAULT_SUFFIX)) + user.create(properties=properties) + + def fin(): + for DN in [USER_DELADD, USER_WITH_ACI_DELADD, FRED, HARRY, KIRSTENVAUGHAN, + HUMAN_OU_GLOBAL, CONTAINER_2_DELADD,CONTAINER_1_DELADD]: + ua = UserAccount(topo.standalone, DN) + try: + ua.delete() + except: + pass + + request.addfinalizer(fin) + + +class _ModTitleArchitectJeffVedder: + def __init__(self, topo, value, conn): + self.topo = topo + self.value = value + self.conn = conn + self.user = UserAccount(self.conn, USER_DELADD) + + def add(self): + self.user.add("title", self.value) + + def delete(self): + self.user.remove("title", self.value) + + +class _DelTitleArchitectJeffVedder: + def __init__(self, topo, conn): + self.topo = topo + self.conn = conn + + def delete(self): + UserAccount(self.conn, USER_DELADD).remove("title", None) + + +class _AddTitleWithRoot: + def __init__(self, topo, value): + self.topo = topo + self.value = value + self.user = UserAccount(self.topo.standalone, USER_DELADD) + + def add(self): + self.user.add("title", self.value) + + def delete(self): + self.user.remove("title", self.value) + + +class _AddFREDWithRoot: + def __init__(self, topo, title1, title2, title3): + self.topo = topo + self.title1 = title1 + self.title2 = title2 + self.title3 = title3 + + def create(self): + properties = { + 'uid': 'FRED', + 'cn': 'FRED', + 'sn': 'user', + 'uidNumber': '1000', + 'gidNumber': '2000', + 'homeDirectory': '/home/' + 'FRED' + } + user = UserAccount(self.topo.standalone, "cn=FRED, ou=Accounting,{}".format(DEFAULT_SUFFIX)) + user.create(properties=properties) + user.set("title", [self.title1, self.title2, self.title3]) + + +def test_delete_an_attribute_value_we_are_not_allowed_to_delete( + topo, _add_user, aci_of_user +): + """Testing the targattrfilters keyword that allows access control based on the value + of the attributes being added (or deleted)) + Test that we can MODIFY:add an attribute value we are allowed to add + + :id: 7c41baa6-7aa9-11e8-9bdc-8c16451d917b + :setup: server + :steps: + 1. Add test entry + 2. Add ACI + 3. User should follow ACI role + :expectedresults: + 1. Entry should be added + 2. Operation should succeed + 3. Operation should succeed + """ + ACI_BODY = '(targattrfilters = "add=title:(title=architect), del=title:(title=architect)")' \ + '(version 3.0; acl "ACI NAME"; allow (write) (userdn = "ldap:///{}") ;)'.format(USER_WITH_ACI_DELADD) + Domain(topo.standalone, DEFAULT_SUFFIX).add("aci", ACI_BODY) + conn = UserAccount(topo.standalone, USER_WITH_ACI_DELADD).bind(PW_DM) + with pytest.raises(ldap.INSUFFICIENT_ACCESS): + _ModTitleArchitectJeffVedder(topo, "engineer", conn).add() + _ModTitleArchitectJeffVedder(topo, "architect", conn).add() + + +def test_donot_allow_write_access_to_title_if_value_is_not_architect( + topo, _add_user, aci_of_user, request +): + """Testing the targattrfilters keyword that allows access control based on the value of the + attributes being added (or deleted)) + Test that we cannot MODIFY:add an attribute value we are not allowed to add + + :id: 822c607e-7aa9-11e8-b2e7-8c16451d917b + :setup: server + :steps: + 1. Add test entry + 2. Add ACI + 3. User should follow ACI role + :expectedresults: + 1. Entry should be added + 2. Operation should succeed + 3. Operation should succeed + """ + ACI_BODY = '(targattrfilters = "add=title:(title=architect), del=title:(title=architect)")' \ + '(version 3.0; acl "{}"; allow (write) (userdn = "ldap:///{}") ;)'.format(request.node.name, USER_WITH_ACI_DELADD) + Domain(topo.standalone, DEFAULT_SUFFIX).add("aci", ACI_BODY) + # aci will allow to add title architect + conn = UserAccount(topo.standalone, USER_WITH_ACI_DELADD).bind(PW_DM) + _ModTitleArchitectJeffVedder(topo, "architect", conn).add() + # aci will noo allow to add title architect1 + with pytest.raises(ldap.INSUFFICIENT_ACCESS): + _ModTitleArchitectJeffVedder(topo, "architect1", conn).add() + + +def test_delete_an_attribute_value_we_are_allowed_to_delete( + topo, _add_user, aci_of_user, request +): + """Testing the targattrfilters keyword that allows access control based on the value of + the attributes being added (or deleted)) + Test that we can MODIFY:delete an attribute value we are allowed to delete + + :id: 86f36b34-7aa9-11e8-ab16-8c16451d917b + :setup: server + :steps: + 1. Add test entry + 2. Add ACI + 3. User should follow ACI role + :expectedresults: + 1. Entry should be added + 2. Operation should succeed + 3. Operation should succeed + """ + ACI_BODY = '(targattrfilters = "add=title:(title=architect), del=title:(title=architect)")' \ + '(version 3.0; acl "{}"; allow (write) (userdn = "ldap:///{}") ;)'.format(request.node.name, USER_WITH_ACI_DELADD) + Domain(topo.standalone, DEFAULT_SUFFIX).add("aci", ACI_BODY) + _AddTitleWithRoot(topo, "architect").add() + # aci will allow to delete title architect + conn = UserAccount(topo.standalone, USER_WITH_ACI_DELADD).bind(PW_DM) + r1 = _ModTitleArchitectJeffVedder(topo, "architect", conn) + r1.delete() + + +def test_delete_an_attribute_value_we_are_not_allowed_to_deleted( + topo, _add_user, aci_of_user, request +): + """Testing the targattrfilters keyword that allows access control based on the value of the + attributes being added (or deleted)) + Test that we cannot MODIFY:delete an attribute value we are allowed to delete + + :id: 8c9f3a90-7aa9-11e8-bf2e-8c16451d917b + :setup: server + :steps: + 1. Add test entry + 2. Add ACI + 3. User should follow ACI role + :expectedresults: + 1. Entry should be added + 2. Operation should succeed + 3. Operation should succeed + """ + ACI_BODY = '(targattrfilters = "add=title:(title=architect), del=title:(title=architect)")' \ + '(version 3.0; acl "{}"; allow (write) (userdn = "ldap:///{}") ;)'.format(request.node.name, USER_WITH_ACI_DELADD) + Domain(topo.standalone, DEFAULT_SUFFIX).add("aci", ACI_BODY) + _AddTitleWithRoot(topo, "engineer").add() + # acl will not allow to delete title engineer + conn = UserAccount(topo.standalone, USER_WITH_ACI_DELADD).bind(PW_DM) + with pytest.raises(ldap.INSUFFICIENT_ACCESS): + _ModTitleArchitectJeffVedder(topo, "engineer", conn).delete() + + +def test_allow_modify_replace(topo, _add_user, aci_of_user, request): + """Testing the targattrfilters keyword that allows access control based on the value of the + attributes being added (or deleted)) + Test that we can MODIFY:replace an attribute if we have correct add/delete rights. + + :id: 9148a234-7aa9-11e8-a1f1-8c16451d917b + :setup: server + :steps: + 1. Add test entry + 2. Add ACI + 3. User should follow ACI role + :expectedresults: + 1. Entry should be added + 2. Operation should succeed + 3. Operation should succeed + """ + ACI_BODY = '(targattrfilters = "add=title:(title=engineer), del=title:(|(title=architect)' \ + '(title=idiot))")(version 3.0; acl "{}"; ' \ + 'allow (write) (userdn = "ldap:///{}") ;)'.format(request.node.name, USER_WITH_ACI_DELADD) + Domain(topo.standalone, DEFAULT_SUFFIX).add("aci", ACI_BODY) + _AddTitleWithRoot(topo, "architect").add() + _AddTitleWithRoot(topo, "idiot").add() + _AddTitleWithRoot(topo, "engineer").add() + # acl will not allow to delete title engineer + conn = UserAccount(topo.standalone, USER_WITH_ACI_DELADD).bind(PW_DM) + with pytest.raises(ldap.INSUFFICIENT_ACCESS): + _ModTitleArchitectJeffVedder(topo, "engineer", conn).delete() + + +def test_allow_modify_delete(topo, _add_user, aci_of_user, request): + """Testing the targattrfilters keyword that allows access control based on the value of the + attributes being added (or deleted)) + Don't Allow modify:replace because of lack of delete rights + + :id: 962842d2-7aa9-11e8-b39e-8c16451d917b + :setup: server + :steps: + 1. Add test entry + 2. Add ACI + 3. User should follow ACI role + :expectedresults: + 1. Entry should be added + 2. Operation should succeed + 3. Operation should succeed + """ + ACI_BODY = '(targattrfilters = "add=title:(title=engineer), del=title:(|(title=architect))")' \ + '(version 3.0; acl "{}"; allow (write) ' \ + '(userdn = "ldap:///{}") ;)'.format(request.node.name, USER_WITH_ACI_DELADD) + Domain(topo.standalone, DEFAULT_SUFFIX).add("aci", ACI_BODY) + _AddTitleWithRoot(topo, "architect").add() + _AddTitleWithRoot(topo, "idiot").add() + conn = UserAccount(topo.standalone, USER_WITH_ACI_DELADD).bind(PW_DM) + _ModTitleArchitectJeffVedder(topo, "architect", conn).delete() + # acl will not allow to delete title idiot + conn = UserAccount(topo.standalone, USER_WITH_ACI_DELADD).bind(PW_DM) + with pytest.raises(ldap.INSUFFICIENT_ACCESS): + _ModTitleArchitectJeffVedder(topo, "idiot", conn).delete() + + +def test_replace_an_attribute_if_we_lack(topo, _add_user, aci_of_user, request): + """Testing the targattrfilters keyword that allows access control based on the value of the + attributes being added (or deleted)) + Test that we cannot MODIFY:replace an attribute if we lack + + :id: 9b1e6afa-7aa9-11e8-ac5b-8c16451d917b + :setup: server + :steps: + 1. Add test entry + 2. Add ACI + 3. User should follow ACI role + :expectedresults: + 1. Entry should be added + 2. Operation should succeed + 3. Operation should succeed + """ + ACI_BODY = '(targattrfilters = "add=title:(title=engineer), del=title:(|(title=architect))")' \ + '(version 3.0; acl "{}"; allow (write) ' \ + '(userdn = "ldap:///{}") ;)'.format(request.node.name, USER_WITH_ACI_DELADD) + Domain(topo.standalone, DEFAULT_SUFFIX).add("aci", ACI_BODY) + _AddTitleWithRoot(topo, "architect").add() + _AddTitleWithRoot(topo, "idiot").add() + conn = UserAccount(topo.standalone, USER_WITH_ACI_DELADD).bind(PW_DM) + _ModTitleArchitectJeffVedder(topo, "architect", conn).delete() + # acl will not allow to delete title idiot + with pytest.raises(ldap.INSUFFICIENT_ACCESS): + _ModTitleArchitectJeffVedder(topo, "idiot", conn).delete() + + +def test_remove_an_attribute_if_we_have_del_rights_to_all_attr_value( + topo, _add_user, aci_of_user, request +): + """Testing the targattrfilters keyword that allows access control based on the value of the + attributes being added (or deleted)) + Test that we can use MODIFY:delete to entirely remove an attribute if we have del rights + to all attr values negative case tested next. + + :id: a0c9e0c4-7aa9-11e8-8880-8c16451d917b + :setup: server + :steps: + 1. Add test entry + 2. Add ACI + 3. User should follow ACI role + :expectedresults: + 1. Entry should be added + 2. Operation should succeed + 3. Operation should succeed + """ + ACI_BODY = '(targattrfilters = "add=title:(title=engineer), del=title:(|(title=architect)' \ + '(title=idiot))")(version 3.0; acl "{}"; allow (write)' \ + ' (userdn = "ldap:///{}") ;)'.format(request.node.name, USER_WITH_ACI_DELADD) + Domain(topo.standalone, DEFAULT_SUFFIX).add("aci", ACI_BODY) + _AddTitleWithRoot(topo, "architect").add() + _AddTitleWithRoot(topo, "idiot").add() + # acl will allow to delete title idiot + conn = UserAccount(topo.standalone, USER_WITH_ACI_DELADD).bind(PW_DM) + _DelTitleArchitectJeffVedder(topo,conn).delete() + + +def test_remove_an_attribute_if_we_donot_have_del_rights_to_all_attr_value( + topo, _add_user, aci_of_user, request +): + """Testing the targattrfilters keyword that allows access control based on the value of the + attributes being added (or deleted)) + Test that we can use MODIFY:delete to entirely remove an attribute if we have not del + rights to all attr values + + :id: a6862eaa-7aa9-11e8-8bf9-8c16451d917b + :setup: server + :steps: + 1. Add test entry + 2. Add ACI + 3. User should follow ACI role + :expectedresults: + 1. Entry should be added + 2. Operation should succeed + 3. Operation should succeed + """ + ACI_BODY = '(targattrfilters = "add=title:(title=engineer), del=title:(|(title=architect)' \ + '(title=idiot))")(version 3.0; acl "{}"; allow (write) ' \ + '(userdn = "ldap:///{}") ;)'.format(request.node.name, USER_WITH_ACI_DELADD) + Domain(topo.standalone, DEFAULT_SUFFIX).add("aci", ACI_BODY) + _AddTitleWithRoot(topo, "architect").add() + _AddTitleWithRoot(topo, "sailor").add() + # aci will not allow to delete all titles + conn = UserAccount(topo.standalone, USER_WITH_ACI_DELADD).bind(PW_DM) + with pytest.raises(ldap.INSUFFICIENT_ACCESS): + _DelTitleArchitectJeffVedder(topo, conn).delete() + + +def test_remove_an_attribute_if_we_have_del_rights_to_all_attr_values( + topo, _add_user, aci_of_user, request +): + """Testing the targattrfilters keyword that allows access control based on the value of the + attributes being added (or deleted)) + Test that we can use MODIFY:replace to entirely remove an attribute if we have del rights to all attr values + + :id: ab04c7e8-7aa9-11e8-84db-8c16451d917b + :setup: server + :steps: + 1. Add test entry + 2. Add ACI + 3. User should follow ACI role + :expectedresults: + 1. Entry should be added + 2. Operation should succeed + 3. Operation should succeed + """ + ACI_BODY = '(targattrfilters = "add=title:(title=engineer), del=title:(|(title=architect)' \ + '(title=idiot))")(version 3.0; acl "{}"; allow (write) ' \ + '(userdn = "ldap:///{}") ;)'.format(request.node.name, USER_WITH_ACI_DELADD) + Domain(topo.standalone, DEFAULT_SUFFIX).add("aci", ACI_BODY) + _AddTitleWithRoot(topo, "architect").add() + _AddTitleWithRoot(topo, "idiot").add() + # aci allowing to delete an_attribute_if_we_have_del_rights_to_all_attr_values + conn = UserAccount(topo.standalone, USER_WITH_ACI_DELADD).bind(PW_DM) + _DelTitleArchitectJeffVedder(topo, conn).delete() + + +def test_cantnot_delete_an_entry_with_attribute_values_we_are_not_allowed_delete( + topo, _add_user, aci_of_user, request +): + """Testing the targattrfilters keyword that allows access control based on the value of + the attributes being added (or deleted)) + Test we cannot DELETE an entry with attribute values we are not allowed delete + + :id: b525d94c-7aa9-11e8-8539-8c16451d917b + :setup: server + :steps: + 1. Add test entry + 2. Add ACI + 3. User should follow ACI role + :expectedresults: + 1. Entry should be added + 2. Operation should succeed + 3. Operation should succeed + """ + ACI_BODY = '(targattrfilters = "add=title:(|(title=engineer)(title=cool dude)(title=scum)), ' \ + 'del=title:(|(title=engineer)(title=cool dude)(title=scum))")(version 3.0; ' \ + 'aci "{}"; allow (delete) userdn = "ldap:///{}";)'.format(request.node.name, USER_WITH_ACI_DELADD) + Domain(topo.standalone, DEFAULT_SUFFIX).add("aci", ACI_BODY) + _AddFREDWithRoot(topo, "engineer", "cool dude", "ANuj").create() + conn = UserAccount(topo.standalone, USER_WITH_ACI_DELADD).bind(PW_DM) + # aci will not allow to delete + with pytest.raises(ldap.INSUFFICIENT_ACCESS): + UserAccount(conn, FRED).delete() + + +def test_we_can_add_and_delete_an_entry_with_attribute_values_we_are_allowed_add_and_delete( + topo, _add_user, aci_of_user, request +): + """Testing the targattrfilters keyword that allows access control based on the value of the + attributes being added (or deleted)) + Test we can DELETE an entry with attribute values we are allowed delete + + :id: ba138e54-7aa9-11e8-8037-8c16451d917b + :setup: server + :steps: + 1. Add test entry + 2. Add ACI + 3. User should follow ACI role + :expectedresults: + 1. Entry should be added + 2. Operation should succeed + 3. Operation should succeed + """ + ACI_BODY = '(targattrfilters = "add=title:(|(title=engineer)(title=cool dude)(title=scum)), ' \ + 'del=title:(|(title=engineer)(title=cool dude)(title=scum))")(version 3.0; ' \ + 'aci "{}"; allow (delete) userdn = "ldap:///{}";)'.format(request.node.name, USER_WITH_ACI_DELADD) + Domain(topo.standalone, DEFAULT_SUFFIX).add("aci", ACI_BODY) + _AddFREDWithRoot(topo, "engineer", "cool dude", "scum").create() + conn = UserAccount(topo.standalone, USER_WITH_ACI_DELADD).bind(PW_DM) + # aci will allow to delete + UserAccount(conn, FRED).delete() + + +def test_allow_title(topo, _add_user, aci_of_user, request): + """Testing the targattrfilters keyword that allows access control based on the value of the + attributes being added (or deleted)) + Test that if attr appears in targetattr and in targattrfilters then targattrfilters + applies--ie. targattrfilters is a refinement of targattrfilters. + + :id: beadf328-7aa9-11e8-bb08-8c16451d917b + :setup: server + :steps: + 1. Add test entry + 2. Add ACI + 3. User should follow ACI role + :expectedresults: + 1. Entry should be added + 2. Operation should succeed + 3. Operation should succeed + """ + ACI_BODY = '(targetattr="title")(targattrfilters = "add=title:(|(title=engineer)' \ + '(title=cool dude)(title=scum)), del=title:(|(title=engineer)(title=cool dude)' \ + '(title=scum))")(version 3.0; aci "{}"; allow (write) ' \ + 'userdn = "ldap:///{}";)'.format(request.node.name, USER_WITH_ACI_DELADD) + Domain(topo.standalone, DEFAULT_SUFFIX).add("aci", ACI_BODY) + _AddTitleWithRoot(topo, "engineer").add() + _AddTitleWithRoot(topo, "cool dude").add() + # # aci will not allow to add title topdog + conn = UserAccount(topo.standalone, USER_WITH_ACI_DELADD).bind(PW_DM) + with pytest.raises(ldap.INSUFFICIENT_ACCESS): + _ModTitleArchitectJeffVedder(topo, "topdog", conn).add() + + +def test_allow_to_modify(topo, _add_user, aci_of_user, request): + """Testing the targattrfilters keyword that allows access control based on the value of the + attributes being added (or deleted)) + Test that I can have secretary in targetattr and title in targattrfilters. + + :id: c32e4704-7aa9-11e8-951d-8c16451d917b + :setup: server + :steps: + 1. Add test entry + 2. Add ACI + 3. User should follow ACI role + :expectedresults: + 1. Entry should be added + 2. Operation should succeed + 3. Operation should succeed + """ + ACI_BODY = '(targetattr="secretary")(targattrfilters = "add=title:(|(title=engineer)' \ + '(title=cool dude)(title=scum)), del=title:(|(title=engineer)(title=cool dude)' \ + '(title=scum))")(version 3.0; aci "{}"; allow (write)' \ + ' userdn = "ldap:///{}";)'.format(request.node.name, USER_WITH_ACI_DELADD) + Domain(topo.standalone, DEFAULT_SUFFIX).add("aci", ACI_BODY) + _AddTitleWithRoot(topo, "engineer").add() + _AddTitleWithRoot(topo, "cool dude").add() + conn = UserAccount(topo.standalone, USER_WITH_ACI_DELADD).bind(PW_DM) + user = UserAccount(conn, USER_DELADD) + # aci will allow to add 'secretary', "cn=emporte quoi + user.add('secretary', "cn=emporte quoi, {}".format(DEFAULT_SUFFIX)) + assert user.get_attr_val('secretary') + + +def test_selfwrite_does_not_confer_write_on_a_targattrfilters_atribute(topo, _add_user, aci_of_user, request): + """Testing the targattrfilters keyword that allows access control based on the value of + the attributes being added (or deleted)) + Selfwrite does not confer "write" on a targattrfilters atribute. + + :id: c7b9ec2e-7aa9-11e8-ba4a-8c16451d917b + :setup: server + :steps: + 1. Add test entry + 2. Add ACI + 3. User should follow ACI role + :expectedresults: + 1. Entry should be added + 2. Operation should succeed + 3. Operation should succeed + """ + ACI_BODY = '(targattrfilters = "add=title:(|(title=engineer)(title=cool dude)(title=scum)), ' \ + 'del=title:(|(title=engineer)(title=cool dude)(title=scum))")(version 3.0; ' \ + 'aci "{}"; allow (selfwrite) userdn = "ldap:///{}";)'.format(request.node.name, USER_WITH_ACI_DELADD) + Domain(topo.standalone, DEFAULT_SUFFIX).add("aci", ACI_BODY) + # aci will not allow to add selfwrite_does_not_confer_write_on_a_targattrfilters_atribute + conn = UserAccount(topo.standalone, USER_WITH_ACI_DELADD).bind(PW_DM) + with pytest.raises(ldap.INSUFFICIENT_ACCESS): + _ModTitleArchitectJeffVedder(topo, "engineer", conn).add() + + +def test_selfwrite_continues_to_give_rights_to_attr_in_targetattr_list( + topo, _add_user, aci_of_user, request +): + """Testing the targattrfilters keyword that allows access control based on the value of + the attributes being added (or deleted)) + Selfwrite continues to give rights to attr in targetattr list. + + :id: cd287680-7aa9-11e8-a8e2-8c16451d917b + :setup: server + :steps: + 1. Add test entry + 2. Add ACI + 3. User should follow ACI role + :expectedresults: + 1. Entry should be added + 2. Operation should succeed + 3. Operation should succeed + """ + ACI_BODY = '(targetattr="secretary")(targattrfilters = "add=title:(|(title=engineer)' \ + '(title=cool dude)(title=scum)), del=title:(|(title=engineer)(title=cool dude)' \ + '(title=scum))")(version 3.0; aci "{}"; allow (selfwrite) ' \ + 'userdn = "ldap:///{}";)'.format(request.node.name, USER_WITH_ACI_DELADD) + Domain(topo.standalone, DEFAULT_SUFFIX).add("aci", ACI_BODY) + # selfwrite_continues_to_give_rights_to_attr_in_targetattr_list + conn = UserAccount(topo.standalone, USER_WITH_ACI_DELADD).bind(PW_DM) + with pytest.raises(ldap.INSUFFICIENT_ACCESS): + _ModTitleArchitectJeffVedder(topo, "engineer", conn).add() + + +def test_add_an_attribute_value_we_are_allowed_to_add_with_ldapanyone( + topo, _add_user, aci_of_user, request +): + """Testing the targattrfilters keyword that allows access control based on the value of the + attributes being added (or deleted)) + Test that we can MODIFY:add an attribute value we are allowed to add with ldap:///anyone + + :id: d1e1d7ac-7aa9-11e8-b968-8c16451d917b + :setup: server + :steps: + 1. Add test entry + 2. Add ACI + 3. User should follow ACI role + :expectedresults: + 1. Entry should be added + 2. Operation should succeed + 3. Operation should succeed + """ + ACI_BODY = '(targattrfilters = "add=title:(title=architect), del=title:(title=architect)")' \ + '(version 3.0; acl "{}"; allow (write) userdn = "ldap:///anyone";)'.format(request.node.name) + Domain(topo.standalone, DEFAULT_SUFFIX).add("aci", ACI_BODY) + _AddTitleWithRoot(topo, "engineer").add() + # aci will allow to add title architect + conn = UserAccount(topo.standalone, USER_WITH_ACI_DELADD).bind(PW_DM) + _ModTitleArchitectJeffVedder(topo, "architect", conn).add() + + +def test_hierarchy(topo, _add_user, aci_of_user, request): + """Testing the targattrfilters keyword that allows access control based on the value of + the attributes being added (or deleted)) + Test that with two targattrfilters in the hierarchy that the general one applies. + This is the correct behaviour, even if it's a bit confusing + + :id: d7ae354a-7aa9-11e8-8b0d-8c16451d917b + :setup: server + :steps: + 1. Add test entry + 2. Add ACI + 3. User should follow ACI role + :expectedresults: + 1. Entry should be added + 2. Operation should succeed + 3. Operation should succeed + """ + ACI_BODY = '(targattrfilters = "add=title:(title=arch*)")(version 3.0; acl "{}"; ' \ + 'allow (write) (userdn = "ldap:///anyone") ;)'.format(request.node.name) + ACI_BODY1 = '(targattrfilters = "add=title:(title=architect)")(version 3.0; ' \ + 'acl "{}"; allow (write) (userdn = "ldap:///anyone") ;)'.format(request.node.name) + Domain(topo.standalone, DEFAULT_SUFFIX).add("aci", ACI_BODY) + Domain(topo.standalone, DEFAULT_SUFFIX).add("aci", ACI_BODY1) + _AddTitleWithRoot(topo, "engineer").add() + # aci will allow to add title architect + conn = UserAccount(topo.standalone, USER_WITH_ACI_DELADD).bind(PW_DM) + _ModTitleArchitectJeffVedder(topo, "architect", conn).add() + # aci will not allow to add title architect + with pytest.raises(ldap.INSUFFICIENT_ACCESS): + _ModTitleArchitectJeffVedder(topo, "engineer", conn).add() + + +def test_targattrfilters_and_search_permissions_and_that_ldapmodify_works_as_expected( + topo, _add_user, aci_of_user, request +): + """Testing the targattrfilters keyword that allows access control based on the value of the + attributes being added (or deleted)) + Test that we can have targattrfilters and search permissions and that ldapmodify works as expected. + + :id: ddae7a22-7aa9-11e8-ad6b-8c16451d917b + :setup: server + :steps: + 1. Add test entry + 2. Add ACI + 3. User should follow ACI role + :expectedresults: + 1. Entry should be added + 2. Operation should succeed + 3. Operation should succeed + """ + ACI_BODY = '(targetattr="secretary || objectclass || mail")(targattrfilters = "add=title:' \ + '(title=arch*)")(version 3.0; acl "{}"; ' \ + 'allow (write,read,search,compare) (userdn = "ldap:///anyone") ;)'.format(request.node.name) + Domain(topo.standalone, DEFAULT_SUFFIX).add("aci", ACI_BODY) + # aci will allow to add title architect + conn = UserAccount(topo.standalone, USER_WITH_ACI_DELADD).bind(PW_DM) + _ModTitleArchitectJeffVedder(topo, "architect", conn).add() + + +def test_targattrfilters_and_search_permissions_and_that_ldapmodify_works_as_expected_two( + topo, _add_user, aci_of_user, request +): + """Testing the targattrfilters keyword that allows access control based on the value of + the attributes being added (or deleted)) + Test that we can have targattrfilters and search permissions and that ldapsearch works as expected. + + :id: e25d116e-7aa9-11e8-81d8-8c16451d917b + :setup: server + :steps: + 1. Add test entry + 2. Add ACI + 3. User should follow ACI role + :expectedresults: + 1. Entry should be added + 2. Operation should succeed + 3. Operation should succeed + """ + ACI_BODY = '(targetattr="secretary || objectclass || mail")(targattrfilters = ' \ + '"add=title:(title=arch*)")(version 3.0; acl "{}"; allow ' \ + '(write,read,search,compare) (userdn = "ldap:///anyone") ;)'.format(request.node.name) + Domain(topo.standalone, DEFAULT_SUFFIX).add("aci", ACI_BODY) + conn = Anonymous(topo.standalone).bind() + user = UserAccount(conn, USER_DELADD) + #targattrfilters_and_search_permissions_and_that_ldapmodify_works_as_expected + assert user.get_attr_vals('secretary') + assert user.get_attr_vals('mail') + assert user.get_attr_vals('objectclass') + + +if __name__ == "__main__": + CURRENT_FILE = os.path.realpath(__file__) + pytest.main("-s -v %s" % CURRENT_FILE) diff --git a/dirsrvtests/tests/suites/attr_encryption/__init__.py b/dirsrvtests/tests/suites/attr_encryption/__init__.py new file mode 100644 index 0000000..52af093 --- /dev/null +++ b/dirsrvtests/tests/suites/attr_encryption/__init__.py @@ -0,0 +1,3 @@ +""" + :Requirement: 389-ds-base: Attribute Encryption +""" diff --git a/dirsrvtests/tests/suites/attr_encryption/attr_encryption_test.py b/dirsrvtests/tests/suites/attr_encryption/attr_encryption_test.py new file mode 100644 index 0000000..15ac610 --- /dev/null +++ b/dirsrvtests/tests/suites/attr_encryption/attr_encryption_test.py @@ -0,0 +1,459 @@ +# --- BEGIN COPYRIGHT BLOCK --- +# Copyright (C) 2020 Red Hat, Inc. +# All rights reserved. +# +# License: GPL (version 3 or any later version). +# See LICENSE for details. +# --- END COPYRIGHT BLOCK --- +# + +import logging +import pytest +from lib389.tasks import * +from lib389.topologies import topology_st as topo +from lib389.utils import * +from lib389._constants import DEFAULT_SUFFIX +from lib389.idm.user import UserAccounts, TEST_USER_PROPERTIES +from lib389.backend import Backends +from lib389.idm.domain import Domain +from lib389.encrypted_attributes import EncryptedAttrs + +pytestmark = pytest.mark.tier1 + +USER_DN = 'uid=test_user,%s' % DEFAULT_SUFFIX + +logging.getLogger(__name__).setLevel(logging.INFO) +log = logging.getLogger(__name__) + + +@pytest.fixture(scope="module") +def enable_user_attr_encryption(topo, request): + """ Enables attribute encryption for various attributes + Adds a test user with encrypted attributes + """ + + log.info("Enable TLS for attribute encryption") + topo.standalone.enable_tls() + + log.info("Enables attribute encryption") + backends = Backends(topo.standalone) + backend = backends.list()[0] + encrypt_attrs = EncryptedAttrs(topo.standalone, basedn='cn=encrypted attributes,{}'.format(backend.dn)) + log.info("Enables attribute encryption for employeeNumber and telephoneNumber") + emp_num_encrypt = encrypt_attrs.create(properties={'cn': 'employeeNumber', 'nsEncryptionAlgorithm': 'AES'}) + telephone_encrypt = encrypt_attrs.create(properties={'cn': 'telephoneNumber', 'nsEncryptionAlgorithm': '3DES'}) + + log.info("Add a test user with encrypted attributes") + users = UserAccounts(topo.standalone, DEFAULT_SUFFIX) + test_user = users.create(properties=TEST_USER_PROPERTIES) + test_user.replace('employeeNumber', '1000') + test_user.replace('telephoneNumber', '1234567890') + + def fin(): + log.info("Remove attribute encryption for various attributes") + emp_num_encrypt.delete() + telephone_encrypt.delete() + + request.addfinalizer(fin) + return test_user + + +def test_basic(topo, enable_user_attr_encryption): + """Tests encrypted attributes with a test user entry + + :id: d767d5c8-b934-4b14-9774-bd13480d81b3 + :setup: Standalone instance + Enable AES encryption config on employeenumber + Enable 3DES encryption config on telephoneNumber + Add a test user with with encrypted attributes + :steps: + 1. Restart the server + 2. Check employeenumber encryption enabled + 3. Check telephoneNumber encryption enabled + 4. Check that encrypted attribute is present for user i.e. telephoneNumber + :expectedresults: + 1. This should be successful + 2. This should be successful + 3. This should be successful + 4. This should be successful + """ + + log.info("Restart the server") + topo.standalone.restart() + backends = Backends(topo.standalone) + backend = backends.list()[0] + encrypt_attrs = backend.get_encrypted_attrs() + + log.info("Extracting values of cn from the list of objects in encrypt_attrs") + log.info("And appending the cn values in a list") + enc_attrs_cns = [] + for enc_attr in encrypt_attrs: + enc_attrs_cns.append(enc_attr.rdn) + + log.info("Check employeenumber encryption is enabled") + assert "employeeNumber" in enc_attrs_cns + + log.info("Check telephoneNumber encryption is enabled") + assert "telephoneNumber" in enc_attrs_cns + + log.info("Check that encrypted attribute is present for user i.e. telephoneNumber") + assert enable_user_attr_encryption.present('telephoneNumber') + + +def test_export_import_ciphertext(topo, enable_user_attr_encryption): + """Configure attribute encryption, store some data, check that we can export the ciphertext + + :id: b433e215-2926-48a5-818f-c21abc40fc2d + :setup: Standalone instance + Enable AES encryption config on employeenumber + Enable 3DES encryption config on telephoneNumber + Add a test user with encrypted attributes + :steps: + 1. Export data as ciphertext + 2. Check that the attribute is present in the exported file + 3. Check that the encrypted value of attribute is not present in the exported file + 4. Delete the test user entry with encrypted data + 5. Import the previously exported data as ciphertext + 6. Check attribute telephoneNumber should be imported + :expectedresults: + 1. This should be successful + 2. This should be successful + 3. This should be successful + 4. This should be successful + 5. This should be successful + 6. This should be successful + """ + + log.info("Export data as ciphertext") + export_ldif = os.path.join(topo.standalone.ds_paths.ldif_dir, "export_ciphertext.ldif") + + # Offline export + topo.standalone.stop() + if not topo.standalone.db2ldif(bename=DEFAULT_BENAME, suffixes=(DEFAULT_SUFFIX,), + excludeSuffixes=None, encrypt=False, repl_data=None, outputfile=export_ldif): + log.fatal('Failed to run offline db2ldif') + assert False + topo.standalone.start() + + log.info("Check that the attribute is present in the exported file") + log.info("Check that the encrypted value of attribute is not present in the exported file") + with open(export_ldif, 'r') as ldif_file: + ldif = ldif_file.read() + assert 'telephoneNumber' in ldif + assert 'telephoneNumber: 1234567890' not in ldif + + log.info("Delete the test user entry with encrypted data") + enable_user_attr_encryption.delete() + + log.info("Import data as ciphertext, which was exported previously") + import_ldif = os.path.join(topo.standalone.ds_paths.ldif_dir, "export_ciphertext.ldif") + + # Offline export + topo.standalone.stop() + if not topo.standalone.ldif2db(bename=DEFAULT_BENAME, suffixes=(DEFAULT_SUFFIX,), + excludeSuffixes=None, encrypt=False, import_file=import_ldif): + log.fatal('Failed to run offline ldif2db') + assert False + topo.standalone.start() + + log.info("Check that the data with encrypted attribute is imported properly") + users = UserAccounts(topo.standalone, DEFAULT_SUFFIX) + user = users.get('testuser') + assert user.present("telephoneNumber") + + +def test_export_import_plaintext(topo, enable_user_attr_encryption): + """Configure attribute encryption, store some data, check that we can export the plain text + + :id: b171e215-0456-48a5-245f-c21abc40fc2d + :setup: Standalone instance + Enable AES encryption config on employeenumber + Enable 3DES encryption config on telephoneNumber + Add a test user with encrypted attributes + :steps: + 1. Export data as plain text + 2. Check that the attribute is present in the exported file + 3. Check that the encrypted value of attribute is also present in the exported file + 4. Delete the test user entry with encrypted data + 5. Import data as plaintext + 6. Check attribute value of telephoneNumber + :expectedresults: + 1. This should be successful + 2. This should be successful + 3. This should be successful + 4. This should be successful + 5. This should be successful + 6. This should be successful + """ + + log.info("Export data as plain text") + export_ldif = os.path.join(topo.standalone.ds_paths.ldif_dir, "export_plaintext.ldif") + + # Offline export + topo.standalone.stop() + if not topo.standalone.db2ldif(bename=DEFAULT_BENAME, suffixes=(DEFAULT_SUFFIX,), + excludeSuffixes=None, encrypt=True, repl_data=None, outputfile=export_ldif): + log.fatal('Failed to run offline db2ldif') + assert False + topo.standalone.start() + + log.info("Check that the attribute is present in the exported file") + log.info("Check that the plain text value of the encrypted attribute is present in the exported file") + with open(export_ldif, 'r') as ldif_file: + assert 'telephoneNumber: 1234567890' in ldif_file.read() + + log.info("Delete the test user entry with encrypted data") + enable_user_attr_encryption.delete() + + log.info("Import data as plain text, which was exported previously") + import_ldif = os.path.join(topo.standalone.ds_paths.ldif_dir, "export_plaintext.ldif") + + # Offline export + topo.standalone.stop() + if not topo.standalone.ldif2db(bename=DEFAULT_BENAME, suffixes=(DEFAULT_SUFFIX,), + excludeSuffixes=None, encrypt=True, import_file=import_ldif): + log.fatal('Failed to run offline ldif2db') + assert False + topo.standalone.start() + + log.info("Check that the attribute is imported properly") + users = UserAccounts(topo.standalone, DEFAULT_SUFFIX) + user = users.get('testuser') + assert user.present("telephoneNumber") + + +def test_attr_encryption_unindexed(topo, enable_user_attr_encryption): + """Configure attribute encryption for an un-indexed attribute, check that we can export encrypted data + + :id: d3ef38e1-bb5a-44d8-a3a4-4a25a57e3454 + :setup: Standalone instance + Enable AES encryption config on employeenumber + Enable 3DES encryption config on telephoneNumber + Add a test user with encrypted attributes + :steps: + 1. Export data as cipher text + 2. Check that the unindexed attribute employeenumber is present in exported ldif file + 3. Check that the unindexed attribute employeenumber value is not present in exported ldif file + :expectedresults: + 1. This should be successful + 2. This should be successful + 3. This should be successful + """ + log.info("Export data as cipher text") + export_ldif = os.path.join(topo.standalone.ds_paths.ldif_dir, "emp_num_ciphertext.ldif") + + # Offline export + topo.standalone.stop() + if not topo.standalone.db2ldif(bename=DEFAULT_BENAME, suffixes=(DEFAULT_SUFFIX,), + excludeSuffixes=None, encrypt=False, repl_data=None, outputfile=export_ldif): + log.fatal('Failed to run offline db2ldif') + assert False + topo.standalone.start() + + log.info("Check that the attribute is present in the exported file") + log.info("Check that the encrypted value of attribute is not present in the exported file") + with open(export_ldif, 'r') as ldif_file: + ldif = ldif_file.read() + assert 'employeeNumber' in ldif + assert 'employeeNumber: 1000' not in ldif + + +def test_attr_encryption_multiple_backends(topo, enable_user_attr_encryption): + """Tests Configuration of attribute encryption for multiple backends + Where both the backends have attribute encryption + + :id: 9ece3e6c-96b7-4dd5-b092-d76dda23472d + :setup: Standalone instance + SSL Enabled + :steps: + 1. Add two test backends + 2. Configure attribute encryption for telephoneNumber in one test backend + 3. Configure attribute encryption for employeenumber in another test backend + 4. Add a test user in both backends with encrypted attributes + 5. Export data as ciphertext from both backends + 6. Check that telephoneNumber is encrypted in the ldif file of db1 + 7. Check that employeeNumber is encrypted in the ldif file of db2 + 8. Delete both test backends + :expectedresults: + 1. This should be successful + 2. This should be successful + 3. This should be successful + 4. This should be successful + 5. This should be successful + 6. This should be successful + 7. This should be successful + 8. This should be successful + """ + log.info("Add two test backends") + test_suffix1 = 'dc=test1,dc=com' + test_db1 = 'test_db1' + test_suffix2 = 'dc=test2,dc=com' + test_db2 = 'test_db2' + + # Create backends + backends = Backends(topo.standalone) + backend = backends.list()[0] + test_backend1 = backends.create(properties={'cn': test_db1, + 'nsslapd-suffix': test_suffix1}) + test_backend2 = backends.create(properties={'cn': test_db2, + 'nsslapd-suffix': test_suffix2}) + + # Create the top of the tree + suffix1 = Domain(topo.standalone, test_suffix1) + test1 = suffix1.create(properties={'dc': 'test1'}) + suffix2 = Domain(topo.standalone, test_suffix2) + test2 = suffix2.create(properties={'dc': 'test2'}) + + log.info("Enables attribute encryption for telephoneNumber in test_backend1") + backend1_encrypt_attrs = EncryptedAttrs(topo.standalone, basedn='cn=encrypted attributes,{}'.format(test_backend1.dn)) + b1_encrypt = backend1_encrypt_attrs.create(properties={'cn': 'telephoneNumber', + 'nsEncryptionAlgorithm': 'AES'}) + + log.info("Enables attribute encryption for employeeNumber in test_backend2") + backend2_encrypt_attrs = EncryptedAttrs(topo.standalone, basedn='cn=encrypted attributes,{}'.format(test_backend2.dn)) + b2_encrypt = backend2_encrypt_attrs.create(properties={'cn': 'employeeNumber', + 'nsEncryptionAlgorithm': 'AES'}) + + log.info("Add a test user with encrypted attributes in both backends") + users = UserAccounts(topo.standalone, test1.dn, None) + test_user = users.create(properties=TEST_USER_PROPERTIES) + test_user.replace('telephoneNumber', '1234567890') + + users = UserAccounts(topo.standalone, test2.dn, None) + test_user = users.create(properties=TEST_USER_PROPERTIES) + test_user.replace('employeeNumber', '1000') + + log.info("Export data as ciphertext from both backends") + export_db1 = os.path.join(topo.standalone.ds_paths.ldif_dir, "export_db1.ldif") + export_db2 = os.path.join(topo.standalone.ds_paths.ldif_dir, "export_db2.ldif") + + # Offline export + topo.standalone.stop() + if not topo.standalone.db2ldif(bename=test_db1, suffixes=(test_suffix1,), + excludeSuffixes=None, encrypt=False, repl_data=None, outputfile=export_db1): + log.fatal('Failed to run offline db2ldif') + assert False + + if not topo.standalone.db2ldif(bename=test_db2, suffixes=(test_suffix2,), + excludeSuffixes=None, encrypt=False, repl_data=None, outputfile=export_db2): + log.fatal('Failed to run offline db2ldif') + assert False + topo.standalone.start() + + log.info("Check that the attribute is present in the exported file in db1") + log.info("Check that the encrypted value of attribute is not present in the exported file in db1") + with open(export_db1, 'r') as ldif_file: + ldif = ldif_file.read() + assert 'telephoneNumber' in ldif + assert 'telephoneNumber: 1234567890' not in ldif + + log.info("Check that the attribute is present in the exported file in db2") + log.info("Check that the encrypted value of attribute is not present in the exported file in db2") + with open(export_db2, 'r') as ldif_file: + ldif = ldif_file.read() + assert 'employeeNumber' in ldif + assert 'employeeNumber: 1000' not in ldif + + log.info("Delete test backends") + test_backend1.delete() + test_backend2.delete() + + +def test_attr_encryption_backends(topo, enable_user_attr_encryption): + """Tests Configuration of attribute encryption for single backend + where more backends are present + + :id: f3ef40e1-17d6-44d8-a3a4-4a25a57e9064 + :setup: Standalone instance + SSL Enabled + :steps: + 1. Add two test backends + 2. Configure attribute encryption for telephoneNumber in one test backend + 3. Add a test user in both backends with telephoneNumber + 4. Export ldif from both test backends + 5. Check that telephoneNumber is encrypted in the ldif file of db1 + 6. Check that telephoneNumber is not encrypted in the ldif file of db2 + 7. Delete both test backends + :expectedresults: + 1. This should be successful + 2. This should be successful + 3. This should be successful + 4. This should be successful + 5. This should be successful + 6. This should be successful + 7. This should be successful + """ + log.info("Add two test backends") + test_suffix1 = 'dc=test1,dc=com' + test_db1 = 'test_db1' + test_suffix2 = 'dc=test2,dc=com' + test_db2 = 'test_db2' + + # Create backends + backends = Backends(topo.standalone) + test_backend1 = backends.create(properties={'cn': test_db1, + 'nsslapd-suffix': test_suffix1}) + test_backend2 = backends.create(properties={'cn': test_db2, + 'nsslapd-suffix': test_suffix2}) + + # Create the top of the tree + suffix1 = Domain(topo.standalone, test_suffix1) + test1 = suffix1.create(properties={'dc': 'test1'}) + suffix2 = Domain(topo.standalone, test_suffix2) + test2 = suffix2.create(properties={'dc': 'test2'}) + + log.info("Enables attribute encryption for telephoneNumber in test_backend1") + backend1_encrypt_attrs = EncryptedAttrs(topo.standalone, basedn='cn=encrypted attributes,{}'.format(test_backend1.dn)) + b1_encrypt = backend1_encrypt_attrs.create(properties={'cn': 'telephoneNumber', + 'nsEncryptionAlgorithm': 'AES'}) + + log.info("Add a test user with telephoneNumber in both backends") + users = UserAccounts(topo.standalone, test1.dn, None) + test_user = users.create(properties=TEST_USER_PROPERTIES) + test_user.replace('telephoneNumber', '1234567890') + + users = UserAccounts(topo.standalone, test2.dn, None) + test_user = users.create(properties=TEST_USER_PROPERTIES) + test_user.replace('telephoneNumber', '1234567890') + + log.info("Export data as ciphertext from both backends") + export_db1 = os.path.join(topo.standalone.ds_paths.ldif_dir, "export_db1.ldif") + export_db2 = os.path.join(topo.standalone.ds_paths.ldif_dir, "export_db2.ldif") + + # Offline export + topo.standalone.stop() + if not topo.standalone.db2ldif(bename=test_db1, suffixes=(test_suffix1,), + excludeSuffixes=None, encrypt=False, repl_data=None, outputfile=export_db1): + log.fatal('Failed to run offline db2ldif') + assert False + + if not topo.standalone.db2ldif(bename=test_db2, suffixes=(test_suffix2,), + excludeSuffixes=None, encrypt=False, repl_data=None, outputfile=export_db2): + log.fatal('Failed to run offline db2ldif') + assert False + topo.standalone.start() + + log.info("Check that the attribute is present in the exported file in db1") + log.info("Check that the encrypted value of attribute is not present in the exported file in db1") + with open(export_db1, 'r') as ldif_file: + ldif = ldif_file.read() + assert 'telephoneNumber' in ldif + assert 'telephoneNumber: 1234567890' not in ldif + + log.info("Check that the attribute is present in the exported file in db2") + log.info("Check that the value of attribute is also present in the exported file in db2") + with open(export_db2, 'r') as ldif_file: + ldif = ldif_file.read() + assert 'telephoneNumber' in ldif + assert 'telephoneNumber: 1234567890' in ldif + + log.info("Delete test backends") + test_backend1.delete() + test_backend2.delete() + + +if __name__ == '__main__': + # Run isolated + # -s for DEBUG mode + CURRENT_FILE = os.path.realpath(__file__) + pytest.main("-s %s" % CURRENT_FILE) diff --git a/dirsrvtests/tests/suites/auth_token/__init__.py b/dirsrvtests/tests/suites/auth_token/__init__.py new file mode 100644 index 0000000..9e245fe --- /dev/null +++ b/dirsrvtests/tests/suites/auth_token/__init__.py @@ -0,0 +1,3 @@ +""" + :Requirement: 389-ds-base: Authentication Token +""" diff --git a/dirsrvtests/tests/suites/auth_token/basic_auth_test.py b/dirsrvtests/tests/suites/auth_token/basic_auth_test.py new file mode 100644 index 0000000..8c55013 --- /dev/null +++ b/dirsrvtests/tests/suites/auth_token/basic_auth_test.py @@ -0,0 +1,240 @@ +# --- BEGIN COPYRIGHT BLOCK --- +# Copyright (C) 2019 William Brown +# All rights reserved. +# +# License: GPL (version 3 or any later version). +# See LICENSE for details. +# --- END COPYRIGHT BLOCK --- + +import ldap +import pytest +import time +from lib389.idm.user import nsUserAccounts, UserAccounts +from lib389.topologies import topology_st as topology +from lib389.paths import Paths +from lib389.utils import ds_is_older +from lib389._constants import * +from lib389.idm.directorymanager import DirectoryManager +from lib389.idm.account import Anonymous +from lib389.extended_operations import LdapSSOTokenRequest + +default_paths = Paths() + +pytestmark = pytest.mark.tier1 + +USER_PASSWORD = "password aouoaeu" +TEST_KEY = "4PXhmtKG7iCdT9C49GoBdD92x5X1tvF3eW9bHq4ND2Q=" + +@pytest.mark.skipif(not default_paths.rust_enabled or ds_is_older('1.4.3.3'), reason="Auth tokens are not available in older versions") +def test_ldap_auth_token_config(topology): + """ Test that we are able to configure the ldapssotoken backend with various types and states. + + :id: e9b9360b-76df-40ef-9f45-b448df4c9eda + + :setup: Standalone instance + + :steps: + 1. Enable the feature + 2. Set a key manually. + 3. Regerate a key server side. + 4. Attempt to set invalid keys. + 5. Disable the feature + 6. Assert that key changes are rejected + + :expectedresults: + 1. Feature enables + 2. Key is set and accepted + 3. The key is regenerated and unique + 4. The key is rejected + 5. The disable functions online + 6. The key changes are rejected + """ + # Enable token + topology.standalone.config.set('nsslapd-enable-ldapssotoken', 'on') # enable it. + # Set a key + topology.standalone.config.set('nsslapd-ldapssotoken-secret', TEST_KEY) + # regen a key + topology.standalone.config.remove_all('nsslapd-ldapssotoken-secret') + k1 = topology.standalone.config.get_attr_val_utf8('nsslapd-ldapssotoken-secret') + assert(k1 != TEST_KEY) + # set an invalid key + with pytest.raises(ldap.UNWILLING_TO_PERFORM): + topology.standalone.config.set('nsslapd-ldapssotoken-secret', 'invalid key') + with pytest.raises(ldap.UNWILLING_TO_PERFORM): + topology.standalone.config.set('nsslapd-ldapssotoken-secret', '') + # Disable token + topology.standalone.config.set('nsslapd-enable-ldapssotoken', 'off') # disable it. + # Set a key + with pytest.raises(ldap.OPERATIONS_ERROR): + topology.standalone.config.set('nsslapd-ldapssotoken-secret', TEST_KEY) + # regen a key + with pytest.raises(ldap.OPERATIONS_ERROR): + topology.standalone.config.remove_all('nsslapd-ldapssotoken-secret') + + +@pytest.mark.skipif(not default_paths.rust_enabled or ds_is_older('1.4.3.3'), reason="Auth tokens are not available in older versions") +def test_ldap_auth_token_nsuser(topology): + """ + Test that we can generate and authenticate with authentication tokens + for users in the directory, as well as security properties around these + tokens. + + :id: 65335341-c85b-457d-ac7d-c4079ac90a60 + + :setup: Standalone instance + + :steps: + 1. Create an account + 2. Generate a token for the account + 3. Authenticate with the token + 4. Assert that a token can not be issued from a token-authed account + 5. Regenerate the server key + 6. Assert the token no longer authenticates + + :expectedresults: + 1. Account is created + 2. Token is generated + 3. Token authenticates + 4. Token is NOT issued + 5. The key is regenerated + 6. The token fails to bind. + """ + topology.standalone.enable_tls() + topology.standalone.config.set('nsslapd-enable-ldapssotoken', 'on') # enable it. + nsusers = nsUserAccounts(topology.standalone, DEFAULT_SUFFIX) + # Create a user as dm. + user = nsusers.create(properties={ + 'uid': 'test_nsuser', + 'cn': 'test_nsuser', + 'displayName': 'testNsuser', + 'legalName': 'testNsuser', + 'uidNumber': '1001', + 'gidNumber': '1001', + 'homeDirectory': '/home/testnsuser', + 'userPassword': USER_PASSWORD, + }) + # Create a new con and bind as the user. + user_conn = user.bind(USER_PASSWORD) + user_account = nsUserAccounts(user_conn, DEFAULT_SUFFIX).get('test_nsuser') + # From the user_conn do an extop_s for the token + token = user_account.request_sso_token() + # Great! Now do a bind where the token is the pw: + # user_conn_tok = user.bind(token) + user_conn_tok = user.authenticate_sso_token(token) + # Assert whoami. + # Assert that user_conn_tok with the token can NOT get a new token. + user_tok_account = nsUserAccounts(user_conn_tok, DEFAULT_SUFFIX).get('test_nsuser') + with pytest.raises(ldap.UNWILLING_TO_PERFORM): + user_tok_account.request_sso_token() + + # Check with a lowered ttl (should deny) + topology.standalone.config.set('nsslapd-ldapssotoken-ttl-secs', '1') # Set a low ttl + # Ensure it's past - the one time I'll allow a sleep .... + time.sleep(2) + with pytest.raises(ldap.INVALID_CREDENTIALS): + user.authenticate_sso_token(token) + topology.standalone.config.set('nsslapd-ldapssotoken-ttl-secs', '3600') # Set a reasonable + + # Regenerate the server token key + topology.standalone.config.remove_all('nsslapd-ldapssotoken-secret') + # check we fail to authenticate. + with pytest.raises(ldap.INVALID_CREDENTIALS): + user.authenticate_sso_token(token) + +@pytest.mark.skipif(not default_paths.rust_enabled or ds_is_older('1.4.3.3'), reason="Auth tokens are not available in older versions") +def test_ldap_auth_token_disabled(topology): + """ Assert when the feature is disabled that token operations are not able to progress + + :id: ccde5d0b-7f2d-49d5-b9d5-f7082f8f36a3 + + :setup: Standalone instance + + :steps: + 1. Create a user + 2. Attempt to get a token. + 3. Enable the feature, get a token, then disable it. + 4. Attempt to auth + + :expectedresults: + 1. Success + 2. Fails to get a token + 3. Token is received + 4. Auth fails as token is disabled. + """ + topology.standalone.enable_tls() + topology.standalone.config.set('nsslapd-enable-ldapssotoken', 'off') # disable it. + nsusers = nsUserAccounts(topology.standalone, DEFAULT_SUFFIX) + # Create a user as dm. + user = nsusers.create(properties={ + 'uid': 'test_nsuser1', + 'cn': 'test_nsuser1', + 'displayName': 'testNsuser1', + 'legalName': 'testNsuser1', + 'uidNumber': '1002', + 'gidNumber': '1002', + 'homeDirectory': '/home/testnsuser1', + 'userPassword': USER_PASSWORD, + }) + # Create a new con and bind as the user. + user_conn = user.bind(USER_PASSWORD) + user_account = nsUserAccounts(user_conn, DEFAULT_SUFFIX).get('test_nsuser1') + # From the user_conn do an extop_s for the token + with pytest.raises(ldap.PROTOCOL_ERROR): + user_account.request_sso_token() + # Now enable it + topology.standalone.config.set('nsslapd-enable-ldapssotoken', 'on') + token = user_account.request_sso_token() + # Now disable + topology.standalone.config.set('nsslapd-enable-ldapssotoken', 'off') + # Now attempt to bind (should fail) + with pytest.raises(ldap.INVALID_CREDENTIALS): + user_account.authenticate_sso_token(token) + + +@pytest.mark.skipif(not default_paths.rust_enabled or ds_is_older('1.4.3.3'), reason="Auth tokens are not available in older versions") +def test_ldap_auth_token_directory_manager(topology): + """ Test token auth with directory manager is denied + + :id: ec9aec64-3edf-4f3f-853a-7527b0c42124 + + :setup: Standalone instance + + :steps: + 1. Attempt to generate a token as DM + + :expectedresults: + 1. Fails + """ + topology.standalone.enable_tls() + topology.standalone.config.set('nsslapd-enable-ldapssotoken', 'on') # enable it. + + dm = DirectoryManager(topology.standalone) + # Try getting a token at DM, should fail. + with pytest.raises(ldap.UNWILLING_TO_PERFORM): + dm.request_sso_token() + +## test as anon (will fail) +@pytest.mark.skipif(not default_paths.rust_enabled or ds_is_older('1.4.3.3'), reason="Auth tokens are not available in older versions") +def test_ldap_auth_token_anonymous(topology): + """ Test token auth with Anonymous is denied. + + :id: 966068c3-fbc6-468d-a554-18d68d1d895b + + :setup: Standalone instance + + :steps: + 1. Attempt to generate a token as Anonymous + + :expectedresults: + 1. Fails + """ + topology.standalone.enable_tls() + topology.standalone.config.set('nsslapd-enable-ldapssotoken', 'on') # enable it. + + anon_conn = Anonymous(topology.standalone).bind() + # Build the request + req = LdapSSOTokenRequest() + # Get the response + with pytest.raises(ldap.UNWILLING_TO_PERFORM): + (_, res) = anon_conn.extop_s(req, escapehatch='i am sure') + diff --git a/dirsrvtests/tests/suites/automember_plugin/__init__.py b/dirsrvtests/tests/suites/automember_plugin/__init__.py new file mode 100644 index 0000000..fd6c4a5 --- /dev/null +++ b/dirsrvtests/tests/suites/automember_plugin/__init__.py @@ -0,0 +1,3 @@ +""" + :Requirement: 389-ds-base: Auto Member +""" diff --git a/dirsrvtests/tests/suites/automember_plugin/automember_abort_test.py b/dirsrvtests/tests/suites/automember_plugin/automember_abort_test.py new file mode 100644 index 0000000..98f4f23 --- /dev/null +++ b/dirsrvtests/tests/suites/automember_plugin/automember_abort_test.py @@ -0,0 +1,102 @@ +# --- BEGIN COPYRIGHT BLOCK --- +# Copyright (C) 2022 Red Hat, Inc. +# All rights reserved. +# +# License: GPL (version 3 or any later version). +# See LICENSE for details. +# --- END COPYRIGHT BLOCK --- +# +import logging +import pytest +import os +import time +from lib389._constants import DEFAULT_SUFFIX +from lib389.plugins import AutoMembershipPlugin, AutoMembershipDefinitions +from lib389.idm.user import UserAccounts +from lib389.idm.group import Groups +from lib389.topologies import topology_st as topo + +log = logging.getLogger(__name__) + + +@pytest.fixture(scope="module") +def automember_fixture(topo, request): + # Create group + group_obj = Groups(topo.standalone, DEFAULT_SUFFIX) + automem_group = group_obj.create(properties={'cn': 'testgroup'}) + + # Create users + users = UserAccounts(topo.standalone, DEFAULT_SUFFIX, rdn=None) + NUM_USERS = 1000 + for num in range(NUM_USERS): + num_ran = int(round(num)) + USER_NAME = 'test%05d' % num_ran + users.create(properties={ + 'uid': USER_NAME, + 'sn': USER_NAME, + 'cn': USER_NAME, + 'uidNumber': '%s' % num_ran, + 'gidNumber': '%s' % num_ran, + 'homeDirectory': '/home/%s' % USER_NAME, + 'mail': '%s@redhat.com' % USER_NAME, + 'userpassword': 'pass%s' % num_ran, + }) + + + # Create automember definitions and regex rules + automember_prop = { + 'cn': 'testgroup_definition', + 'autoMemberScope': DEFAULT_SUFFIX, + 'autoMemberFilter': 'objectclass=posixaccount', + 'autoMemberDefaultGroup': automem_group.dn, + 'autoMemberGroupingAttr': 'member:dn', + } + automembers = AutoMembershipDefinitions(topo.standalone) + auto_def = automembers.create(properties=automember_prop) + auto_def.add_regex_rule("regex1", automem_group.dn, include_regex=['uid=.*']) + + # Enable plugin + automemberplugin = AutoMembershipPlugin(topo.standalone) + automemberplugin.enable() + topo.standalone.restart() + + +def test_abort(automember_fixture, topo): + """Test the abort rebuild task + + :id: 24763279-48ec-4c34-91b3-f681679dec3a + :setup: Standalone Instance + :steps: + 1. Setup automember and create a bunch of users + 2. Start rebuild task + 3. Abort rebuild task + 4. Verify rebuild task was aborted + :expectedresults: + 1. Success + 2. Success + 3. Success + 4. Success + """ + + automemberplugin = AutoMembershipPlugin(topo.standalone) + + # Run rebuild task + task = automemberplugin.fixup(DEFAULT_SUFFIX, "objectclass=top") + time.sleep(1) + + # Abort rebuild task + automemberplugin.abort_fixup() + + # Wait for rebuild task to finish + task.wait() + + # Check errors log for abort message + assert topo.standalone.searchErrorsLog("task was intentionally aborted") + + +if __name__ == '__main__': + # Run isolated + # -s for DEBUG mode + CURRENT_FILE = os.path.realpath(__file__) + pytest.main(["-s", CURRENT_FILE]) + diff --git a/dirsrvtests/tests/suites/automember_plugin/automember_mod_test.py b/dirsrvtests/tests/suites/automember_plugin/automember_mod_test.py new file mode 100644 index 0000000..7a0ed32 --- /dev/null +++ b/dirsrvtests/tests/suites/automember_plugin/automember_mod_test.py @@ -0,0 +1,170 @@ +# --- BEGIN COPYRIGHT BLOCK --- +# Copyright (C) 2022 Red Hat, Inc. +# All rights reserved. +# +# License: GPL (version 3 or any later version). +# See LICENSE for details. +# --- END COPYRIGHT BLOCK --- +import ldap +import logging +import pytest +import os +import time +from lib389.utils import ds_is_older +from lib389._constants import DEFAULT_SUFFIX +from lib389.plugins import AutoMembershipPlugin, AutoMembershipDefinitions +from lib389.idm.user import UserAccounts +from lib389.idm.group import Groups +from lib389.topologies import topology_st as topo + +# Skip on older versions +pytestmark = [pytest.mark.tier1, + pytest.mark.skipif(ds_is_older('1.4.0'), reason="Not implemented")] + +DEBUGGING = os.getenv("DEBUGGING", default=False) +if DEBUGGING: + logging.getLogger(__name__).setLevel(logging.DEBUG) +else: + logging.getLogger(__name__).setLevel(logging.INFO) +log = logging.getLogger(__name__) + + +@pytest.fixture(scope="module") +def automember_fixture(topo, request): + # Create group + groups = [] + group_obj = Groups(topo.standalone, DEFAULT_SUFFIX) + groups.append(group_obj.create(properties={'cn': 'testgroup'})) + groups.append(group_obj.create(properties={'cn': 'testgroup2'})) + groups.append(group_obj.create(properties={'cn': 'testgroup3'})) + + # Create test user + user_accts = UserAccounts(topo.standalone, DEFAULT_SUFFIX) + user = user_accts.create_test_user() + + # Create extra users + users = UserAccounts(topo.standalone, DEFAULT_SUFFIX) + for i in range(0, 100): + users.create_test_user(uid=i) + + # Create automember definitions and regex rules + automember_prop = { + 'cn': 'testgroup_definition', + 'autoMemberScope': DEFAULT_SUFFIX, + 'autoMemberFilter': 'objectclass=posixaccount', + 'autoMemberDefaultGroup': groups[0].dn, + 'autoMemberGroupingAttr': 'member:dn', + } + automembers = AutoMembershipDefinitions(topo.standalone) + auto_def = automembers.create(properties=automember_prop) + auto_def.add_regex_rule("regex1", groups[1].dn, include_regex=['cn=mark.*']) + auto_def.add_regex_rule("regex2", groups[2].dn, include_regex=['cn=simon.*']) + + # Enable plugin + automemberplugin = AutoMembershipPlugin(topo.standalone) + automemberplugin.enable() + topo.standalone.restart() + + return user, groups + + +def test_mods(automember_fixture, topo): + """Modify the user so that it is added to the various automember groups + + :id: 28a2b070-7f16-4905-8831-c80fa6441693 + :setup: Standalone Instance + :steps: + 1. Update user that should add it to group[0] + 2. Update user that should add it to group[1] + 3. Update user that should add it to group[2] + 4. Update user that should add it to group[0] + 5. Test rebuild task adds user to group[1] + 6. Test rebuild task cleanups groups and only adds it to group[1] + :expectedresults: + 1. Success + 2. Success + 3. Success + 4. Success + 5. Success + 6. Success + """ + (user, groups) = automember_fixture + + # Update user which should go into group[0] + user.replace('cn', 'whatever') + assert groups[0].is_member(user.dn) + if groups[1].is_member(user.dn): + assert False + if groups[2].is_member(user.dn): + assert False + + # Update user0 which should go into group[1] + user.replace('cn', 'mark') + assert groups[1].is_member(user.dn) + if groups[0].is_member(user.dn): + assert False + if groups[2].is_member(user.dn): + assert False + + # Update user which should go into group[2] + user.replace('cn', 'simon') + assert groups[2].is_member(user.dn) + if groups[0].is_member(user.dn): + assert False + if groups[1].is_member(user.dn): + assert False + + # Update user which should go back into group[0] (full circle) + user.replace('cn', 'whatever') + assert groups[0].is_member(user.dn) + if groups[1].is_member(user.dn): + assert False + if groups[2].is_member(user.dn): + assert False + + # + # Test rebuild task. First disable plugin + # + automemberplugin = AutoMembershipPlugin(topo.standalone) + automemberplugin.disable() + topo.standalone.restart() + + # Make change that would move the entry from group[0] to group[1] + user.replace('cn', 'mark') + + # Enable plugin + automemberplugin.enable() + topo.standalone.restart() + + # Run rebuild task (no cleanup) + task = automemberplugin.fixup(DEFAULT_SUFFIX, "objectclass=posixaccount") + with pytest.raises(ldap.UNWILLING_TO_PERFORM): + # test only one fixup task is allowed at a time + automemberplugin.fixup(DEFAULT_SUFFIX, "objectclass=top") + task.wait() + + # Test membership (user should still be in groups[0]) + assert groups[1].is_member(user.dn) + if not groups[0].is_member(user.dn): + assert False + + # Run rebuild task with cleanup + task = automemberplugin.fixup(DEFAULT_SUFFIX, "objectclass=posixaccount", cleanup=True) + task.wait() + + # Test membership (user should only be in groups[1]) + assert groups[1].is_member(user.dn) + if groups[0].is_member(user.dn): + assert False + if groups[2].is_member(user.dn): + assert False + + # Success + log.info("Test PASSED") + + +if __name__ == '__main__': + # Run isolated + # -s for DEBUG mode + CURRENT_FILE = os.path.realpath(__file__) + pytest.main(["-s", CURRENT_FILE]) diff --git a/dirsrvtests/tests/suites/automember_plugin/automember_test.py b/dirsrvtests/tests/suites/automember_plugin/automember_test.py new file mode 100644 index 0000000..e1976bd --- /dev/null +++ b/dirsrvtests/tests/suites/automember_plugin/automember_test.py @@ -0,0 +1,308 @@ +# --- BEGIN COPYRIGHT BLOCK --- +# Copyright (C) 2017 alisha17 +# All rights reserved. +# +# License: GPL (version 3 or any later version). +# See LICENSE for details. +# --- END COPYRIGHT BLOCK --- +# +import logging +import pytest +import os +import ldap +from lib389.utils import ds_is_older +from lib389._constants import * +from lib389.plugins import AutoMembershipPlugin, AutoMembershipDefinition, AutoMembershipDefinitions, AutoMembershipRegexRule +from lib389._mapped_object import DSLdapObjects, DSLdapObject +from lib389 import agreement +from lib389.idm.user import UserAccount, UserAccounts, TEST_USER_PROPERTIES +from lib389.idm.group import Groups, Group +from lib389.topologies import topology_st as topo +from lib389._constants import DEFAULT_SUFFIX + + +# Skip on older versions +pytestmark = [pytest.mark.tier1, + pytest.mark.skipif(ds_is_older('1.3.7'), reason="Not implemented")] + +DEBUGGING = os.getenv("DEBUGGING", default=False) +if DEBUGGING: + logging.getLogger(__name__).setLevel(logging.DEBUG) +else: + logging.getLogger(__name__).setLevel(logging.INFO) +log = logging.getLogger(__name__) + + +@pytest.fixture(scope="module") +def automember_fixture(topo, request): + + groups = Groups(topo.standalone, DEFAULT_SUFFIX) + group = groups.create(properties={'cn': 'testgroup'}) + + automemberplugin = AutoMembershipPlugin(topo.standalone) + automemberplugin.enable() + + topo.standalone.restart() + + automember_prop = { + 'cn': 'testgroup_definition', + 'autoMemberScope': 'ou=People,' + DEFAULT_SUFFIX, + 'autoMemberFilter': 'objectclass=*', + 'autoMemberDefaultGroup': group.dn, + 'autoMemberGroupingAttr': 'member:dn', + } + + automembers = AutoMembershipDefinitions(topo.standalone, "cn=Auto Membership Plugin,cn=plugins,cn=config") + + automember = automembers.create(properties=automember_prop) + + return (group, automembers, automember) + + +def test_automemberscope(automember_fixture, topo): + """Test if the automember scope is valid + + :id: c3d3f250-e7fd-4441-8387-3d24c156e982 + :setup: Standalone instance, enabled Auto Membership Plugin + :steps: + 1. Create automember with invalid cn that raises + UNWILLING_TO_PERFORM exception + 2. If exception raised, set scope to any cn + 3. If exception is not raised, set scope to with ou=People + :expectedresults: + 1. Should be success + 2. Should be success + 3. Should be success + """ + + (group, automembers, automember) = automember_fixture + + automember_prop = { + 'cn': 'anyrandomcn', + 'autoMemberScope': 'ou=People,' + DEFAULT_SUFFIX, + 'autoMemberFilter': 'objectclass=*', + 'autoMemberDefaultGroup': group.dn, + 'autoMemberGroupingAttr': 'member:dn', + } + + # depends on issue #49465 + + # with pytest.raises(ldap.UNWILLING_TO_PERFORM): + # automember = automembers.create(properties=automember_prop) + # automember.set_scope("cn=No Entry,%s" % DEFAULT_SUFFIX) + + automember.set_scope("ou=People,%s" % DEFAULT_SUFFIX) + + +def test_automemberfilter(automember_fixture, topo): + """Test if the automember filter is valid + + :id: 935c55de-52dc-4f80-b7dd-3aacd30f6df2 + :setup: Standalone instance, enabled Auto Membership Plugin + :steps: + 1. Create automember with invalid filter that raises + UNWILLING_TO_PERFORM exception + 2. If exception raised, set filter to the invalid filter + 3. If exception is not raised, set filter as all objectClasses + :expectedresults: + 1. Should be success + 2. Should be success + 3. Should be success + """ + + (group, automembers, automember) = automember_fixture + + automember_prop = { + 'cn': 'anyrandomcn', + 'autoMemberScope': 'ou=People,' + DEFAULT_SUFFIX, + 'autoMemberFilter': '(ou=People', + 'autoMemberDefaultGroup': group.dn, + 'autoMemberGroupingAttr': 'member:dn', + } + + with pytest.raises(ldap.UNWILLING_TO_PERFORM): + automember = automembers.create(properties=automember_prop) + automember.set_filter("(ou=People") + + automember.set_filter("objectClass=*") + + +def test_adduser(automember_fixture, topo): + """Test if member is automatically added to the group + + :id: 14f1e2f5-2162-41ab-962c-5293516baf2e + :setup: Standalone instance, enabled Auto Membership Plugin + :steps: + 1. Create a user + 2. Assert that the user is member of the group + :expectedresults: + 1. Should be success + 2. Should be success + """ + + (group, automembers, automember) = automember_fixture + + users = UserAccounts(topo.standalone, DEFAULT_SUFFIX) + user = users.create(properties=TEST_USER_PROPERTIES) + + assert group.is_member(user.dn) + user.delete() + + +@pytest.mark.skipif(ds_is_older("1.4.1.2"), reason="Not implemented") +def test_delete_default_group(automember_fixture, topo): + """If memberof is enable and a user became member of default group + because of automember rule then delete the default group should succeeds + + :id: 8b55d077-8851-45a2-a547-b28a7983a3c2 + :setup: Standalone instance, enabled Auto Membership Plugin + :steps: + 1. Enable memberof plugin + 2. Create a user + 3. Assert that the user is member of the default group + 4. Delete the default group + :expectedresults: + 1. Should be success + 2. Should be success + 3. Should be success + 4. Should be success + """ + + (group, automembers, automember) = automember_fixture + + from lib389.plugins import MemberOfPlugin + memberof = MemberOfPlugin(topo.standalone) + memberof.enable() + topo.standalone.restart() + topo.standalone.setLogLevel(65536) + + users = UserAccounts(topo.standalone, DEFAULT_SUFFIX) + user_1 = users.create_test_user(uid=1) + + try: + assert group.is_member(user_1.dn) + group.delete() + error_lines = topo.standalone.ds_error_log.match('.*auto-membership-plugin - automember_update_member_value - group .default or target. does not exist .%s.$' % group.dn) + assert (len(error_lines) == 1) + finally: + user_1.delete() + topo.standalone.setLogLevel(0) + +@pytest.mark.skipif(ds_is_older("1.4.3.3"), reason="Not implemented") +def test_no_default_group(automember_fixture, topo): + """If memberof is enable and a user became member of default group + and default group does not exist then an INFO should be logged + + :id: 8882972f-fb3e-4d77-9729-0235897676bc + :setup: Standalone instance, enabled Auto Membership Plugin + :steps: + 1. Enable memberof plugin + 2. Set errorlog level to 0 (default) + 3. delete the default group + 4. Create a user + 5. Retrieve message in log + :expectedresults: + 1. Should be success + 2. Should be success + 3. Should be success + 4. Should be success + 5. Should be success + """ + + (group, automembers, automember) = automember_fixture + + from lib389.plugins import MemberOfPlugin + memberof = MemberOfPlugin(topo.standalone) + memberof.enable() + topo.standalone.restart() + topo.standalone.setLogLevel(0) + + # delete it if it exists + try: + group.get_attr_val_utf8('creatorsname') + group.delete() + except ldap.NO_SUCH_OBJECT: + pass + users = UserAccounts(topo.standalone, DEFAULT_SUFFIX) + user_1 = users.create_test_user(uid=1) + + try: + error_lines = topo.standalone.ds_error_log.match('.*auto-membership-plugin - automember_update_member_value - group .default or target. does not exist .%s.$' % group.dn) + assert (len(error_lines) > 0) + finally: + user_1.delete() + topo.standalone.setLogLevel(0) + +@pytest.mark.skipif(ds_is_older("1.4.1.2"), reason="Not implemented") +def test_delete_target_group(automember_fixture, topo): + """If memberof is enabld and a user became member of target group + because of automember rule then delete the target group should succeeds + + :id: bf5745e3-3de8-485d-8a68-e2fd460ce1cb + :setup: Standalone instance, enabled Auto Membership Plugin + :steps: + 1. Recreate the default group if it was deleted before + 2. Create a target group (using regex) + 3. Create a target group automember rule (regex) + 4. Enable memberof plugin + 5. Create a user that goes into the target group + 6. Assert that the user is member of the target group + 7. Delete the target group + 8. Check automember skipped the regex automember rule because target group did not exist + :expectedresults: + 1. Should be success + 2. Should be success + 3. Should be success + 4. Should be success + 5. Should be success + 6. Should be success + 7. Should be success + 8. Should be success + """ + + (group, automembers, automember) = automember_fixture + + # default group that may have been deleted in previous tests + try: + groups = Groups(topo.standalone, DEFAULT_SUFFIX) + group = groups.create(properties={'cn': 'testgroup'}) + except: + pass + + # target group that will receive regex automember + groups = Groups(topo.standalone, DEFAULT_SUFFIX) + group_regex = groups.create(properties={'cn': 'testgroup_regex'}) + + # regex automember definition + automember_regex_prop = { + 'cn': 'automember regex', + 'autoMemberTargetGroup': group_regex.dn, + 'autoMemberInclusiveRegex': 'uid=.*1', + } + automember_regex_dn = 'cn=automember regex, %s' % automember.dn + automember_regexes = AutoMembershipRegexRule(topo.standalone, automember_regex_dn) + automember_regex = automember_regexes.create(properties=automember_regex_prop) + + from lib389.plugins import MemberOfPlugin + memberof = MemberOfPlugin(topo.standalone) + memberof.enable() + + topo.standalone.restart() + topo.standalone.setLogLevel(65536) + + # create a user that goes into the target group but not in the default group + users = UserAccounts(topo.standalone, DEFAULT_SUFFIX) + user_1 = users.create_test_user(uid=1) + + try: + assert group_regex.is_member(user_1.dn) + assert not group.is_member(user_1.dn) + + # delete that target filter group + group_regex.delete() + error_lines = topo.standalone.ds_error_log.match('.*auto-membership-plugin - automember_update_member_value - group .default or target. does not exist .%s.$' % group_regex.dn) + # one line for default group and one for target group + assert (len(error_lines) == 1) + finally: + user_1.delete() + topo.standalone.setLogLevel(0) diff --git a/dirsrvtests/tests/suites/automember_plugin/basic_test.py b/dirsrvtests/tests/suites/automember_plugin/basic_test.py new file mode 100644 index 0000000..3f2338f --- /dev/null +++ b/dirsrvtests/tests/suites/automember_plugin/basic_test.py @@ -0,0 +1,1182 @@ +# --- BEGIN COPYRIGHT BLOCK --- +# Copyright (C) 2019 Red Hat, Inc. +# All rights reserved. +# +# License: GPL (version 3 or any later version). +# See LICENSE for details. +# --- END COPYRIGHT BLOCK --- + +""" +Will test AutoMememer Plugin with AotoMember Task and Retro Changelog +""" + +import os +import pytest +import time +import re +from lib389.topologies import topology_m1 as topo +from lib389.idm.organizationalunit import OrganizationalUnits +from lib389.idm.domain import Domain +from lib389.idm.posixgroup import PosixGroups +from lib389.plugins import AutoMembershipPlugin, AutoMembershipDefinitions, \ + MemberOfPlugin, AutoMembershipRegexRules, AutoMembershipDefinition, RetroChangelogPlugin +from lib389.backend import Backends +from lib389.config import Config +from lib389._constants import DEFAULT_SUFFIX +from lib389.idm.user import UserAccounts +from lib389.idm.group import Groups, Group, UniqueGroup, nsAdminGroups, nsAdminGroup +from lib389.tasks import Tasks, AutomemberRebuildMembershipTask, ExportTask +from lib389.utils import ds_is_older +from lib389.paths import Paths +import ldap + +pytestmark = pytest.mark.tier1 + +BASE_SUFF = "dc=autoMembers,dc=com" +TEST_BASE = "dc=testAutoMembers,dc=com" +BASE_REPL = "dc=replAutoMembers,dc=com" +SUBSUFFIX = f'dc=SubSuffix,{BASE_SUFF}' +PLUGIN_AUTO = "cn=Auto Membership Plugin,cn=plugins,cn=config" +REPMANDN = "cn=ReplManager" +CACHE_SIZE = '-1' +CACHEMEM_SIZE = '10485760' +AUTO_MEM_SCOPE_TEST = f'ou=Employees,{TEST_BASE}' +AUTO_MEM_SCOPE_BASE = f'ou=Employees,{BASE_SUFF}' + + +def add_base_entries(topo): + """ + Will create suffix + """ + for suffix, backend_name in [(BASE_SUFF, 'AutoMembers'), (SUBSUFFIX, 'SubAutoMembers'), + (TEST_BASE, 'testAutoMembers'), (BASE_REPL, 'ReplAutoMembers'), + ("dc=SubSuffix,{}".format(BASE_REPL), 'ReplSubAutoMembers')]: + Backends(topo.ms["supplier1"]).create(properties={ + 'cn': backend_name, + 'nsslapd-suffix': suffix, + 'nsslapd-CACHE_SIZE': CACHE_SIZE, + 'nsslapd-CACHEMEM_SIZE': CACHEMEM_SIZE}) + Domain(topo.ms["supplier1"], suffix).create(properties={ + 'dc': suffix.split('=')[1].split(',')[0], + 'aci': [ + f'(targetattr="userPassword")(version 3.0;aci "Replication Manager ' + f'Access";allow (write,compare) userdn="ldap:///{REPMANDN},cn=config";)', + f'(target ="ldap:///{suffix}")(targetattr !="cn||sn||uid") (version 3.0;' + f'acl "Group Permission";allow (write) ' + f'(groupdn = "ldap:///cn=GroupMgr,{suffix}");)', + f'(target ="ldap:///{suffix}")(targetattr !="userPassword")(version 3.0;acl ' + f'"Anonym-read access"; allow (read,search,compare)(userdn="ldap:///anyone");)' + ] + }) + for suffix, ou_cn in [(BASE_SUFF, 'userGroups'), + (BASE_SUFF, 'Employees'), + (BASE_SUFF, 'TaskEmployees'), + (TEST_BASE, 'Employees')]: + OrganizationalUnits(topo.ms["supplier1"], suffix).create(properties={'ou': ou_cn}) + + +def add_user(topo, user_id, suffix, uid_no, gid_no, role_usr): + """ + Will create entries with nsAdminGroup objectclass + """ + objectclasses = ['top', 'person', 'posixaccount', 'inetuser', + 'nsMemberOf', 'nsAccount', 'nsAdminGroup'] + if ds_is_older('1.4.0'): + objectclasses.remove('nsAccount') + + user = nsAdminGroups(topo.ms["supplier1"], suffix, rdn=None).create(properties={ + 'cn': user_id, + 'sn': user_id, + 'uid': user_id, + 'homeDirectory': '/home/{}'.format(user_id), + 'loginShell': '/bin/bash', + 'uidNumber': uid_no, + 'gidNumber': gid_no, + 'objectclass': objectclasses, + 'nsAdminGroupName': role_usr, + 'seeAlso': 'uid={},{}'.format(user_id, suffix), + 'entrydn': 'uid={},{}'.format(user_id, suffix) + }) + return user + + +def check_groups(topo, group_dn, user_dn, member): + """ + Will check MEMBATTR + """ + return bool(Group(topo.ms["supplier1"], group_dn).present(member, user_dn)) + + +def add_group(topo, suffix, group_id): + """ + Will create groups + """ + Groups(topo.ms["supplier1"], suffix, rdn=None).create(properties={ + 'cn': group_id + }) + + +def number_memberof(topo, user, number): + """ + Function to check if the memberOf attribute is present. + """ + return len(nsAdminGroup(topo.ms["supplier1"], user).get_attr_vals_utf8('memberOf')) == number + + +def add_group_entries(topo): + """ + Will create multiple entries needed for this test script + """ + for suffix, group in [(SUBSUFFIX, 'subsuffGroups'), + (SUBSUFFIX, 'Employees'), + (TEST_BASE, 'testuserGroups'), + ("dc=SubSuffix,{}".format(BASE_REPL), 'replsubGroups'), + (BASE_REPL, 'replsubGroups')]: + add_group(topo, suffix, group) + for group_cn in ['SubDef1', 'SubDef2', 'SubDef3', 'SubDef4', 'SubDef5']: + add_group(topo, BASE_REPL, group_cn) + for user in ['Managers', 'Contractors', 'Interns', 'Visitors']: + add_group(topo, "cn=replsubGroups,{}".format(BASE_REPL), user) + for ou_ou, group_cn in [("ou=userGroups,{}".format(BASE_SUFF), 'SuffDef1'), + ("ou=userGroups,{}".format(BASE_SUFF), 'SuffDef2'), + ("ou=userGroups,{}".format(BASE_SUFF), 'SuffDef3'), + ("ou=userGroups,{}".format(BASE_SUFF), 'SuffDef4'), + ("ou=userGroups,{}".format(BASE_SUFF), 'SuffDef5'), + ("ou=userGroups,{}".format(BASE_SUFF), 'Contractors'), + ("ou=userGroups,{}".format(BASE_SUFF), 'Managers'), + ("CN=testuserGroups,{}".format(TEST_BASE), 'TestDef1'), + ("CN=testuserGroups,{}".format(TEST_BASE), 'TestDef2'), + ("CN=testuserGroups,{}".format(TEST_BASE), 'TestDef3'), + ("CN=testuserGroups,{}".format(TEST_BASE), 'TestDef4'), + ("CN=testuserGroups,{}".format(TEST_BASE), 'TestDef5')]: + add_group(topo, ou_ou, group_cn) + for ou_ou, group_cn, grp_no in [(SUBSUFFIX, 'SubDef1', '111'), + (SUBSUFFIX, 'SubDef2', '222'), + (SUBSUFFIX, 'SubDef3', '333'), + (SUBSUFFIX, 'SubDef4', '444'), + (SUBSUFFIX, 'SubDef5', '555'), + ('cn=subsuffGroups,{}'.format(SUBSUFFIX), + 'Managers', '666'), + ('cn=subsuffGroups,{}'.format(SUBSUFFIX), + 'Contractors', '999')]: + PosixGroups(topo.ms["supplier1"], ou_ou, rdn=None).create(properties={ + 'cn': group_cn, + 'gidNumber': grp_no + }) + + +def add_member_attr(topo, group_dn, user_dn, member): + """ + Will add members to groups + """ + Group(topo.ms["supplier1"], group_dn).add(member, user_dn) + + +def change_grp_objclass(new_object, member, type_of): + """ + Will change objectClass + """ + try: + type_of.remove(member, None) + except ldap.NO_SUCH_ATTRIBUTE: + pass + type_of.ensure_state(properties={ + 'cn': type_of.get_attr_val_utf8('cn'), + 'objectClass': ['top', 'nsMemberOf', new_object] + }) + + +@pytest.fixture(scope="module") +def _create_all_entries(topo): + """ + Fixture module that will create required entries for test cases. + """ + add_base_entries(topo) + add_group_entries(topo) + auto = AutoMembershipPlugin(topo.ms["supplier1"]) + auto.add("nsslapd-pluginConfigArea", "cn=autoMembersPlugin,{}".format(BASE_REPL)) + MemberOfPlugin(topo.ms["supplier1"]).enable() + automembers_definitions = AutoMembershipDefinitions(topo.ms["supplier1"]) + automembers_definitions.create(properties={ + 'cn': 'userGroups', + 'autoMemberScope': f'ou=Employees,{BASE_SUFF}', + 'autoMemberFilter': "objectclass=posixAccount", + 'autoMemberDefaultGroup': [ + f'cn=SuffDef1,ou=userGroups,{BASE_SUFF}', + f'cn=SuffDef2,ou=userGroups,{BASE_SUFF}', + f'cn=SuffDef3,ou=userGroups,{BASE_SUFF}', + f'cn=SuffDef4,ou=userGroups,{BASE_SUFF}', + f'cn=SuffDef5,ou=userGroups,{BASE_SUFF}' + ], + 'autoMemberGroupingAttr': 'member:dn', + }) + + automembers_definitions.create(properties={ + 'cn': 'subsuffGroups', + 'autoMemberScope': f'ou=Employees,{BASE_SUFF}', + 'autoMemberFilter': "objectclass=posixAccount", + 'autoMemberDefaultGroup': [ + f'cn=SubDef1,dc=subSuffix,{BASE_SUFF}', + f'cn=SubDef2,dc=subSuffix,{BASE_SUFF}', + f'cn=SubDef3,dc=subSuffix,{BASE_SUFF}', + f'cn=SubDef4,dc=subSuffix,{BASE_SUFF}', + f'cn=SubDef5,dc=subSuffix,{BASE_SUFF}', + ], + 'autoMemberGroupingAttr': 'memberuid:dn', + }) + + automembers_regex_usergroup = AutoMembershipRegexRules(topo.ms["supplier1"], + f'cn=userGroups,{auto.dn}') + automembers_regex_usergroup.create(properties={ + 'cn': 'Managers', + 'description': f'Group placement for Managers', + 'autoMemberTargetGroup': [f'cn=Managers,ou=userGroups,{BASE_SUFF}'], + 'autoMemberInclusiveRegex': [ + "gidNumber=^9", + "nsAdminGroupName=^Manager", + ], + "autoMemberExclusiveRegex": [ + "gidNumber=^[6-8]", + "nsAdminGroupName=^Junior$", + ], + }) + + automembers_regex_usergroup.create(properties={ + 'cn': 'Contractors', + 'description': f'Group placement for Contractors', + 'autoMemberTargetGroup': [f'cn=Contractors,ou=userGroups,{BASE_SUFF}'], + 'autoMemberInclusiveRegex': [ + "gidNumber=^1", + "nsAdminGroupName=Contractor", + ], + "autoMemberExclusiveRegex": [ + "gidNumber=^[2-4]", + "nsAdminGroupName=^Employee$", + ], + }) + + automembers_regex_sub = AutoMembershipRegexRules(topo.ms["supplier1"], + f'cn=subsuffGroups,{auto.dn}') + automembers_regex_sub.create(properties={ + 'cn': 'Managers', + 'description': f'Group placement for Managers', + 'autoMemberTargetGroup': [f'cn=Managers,cn=subsuffGroups,dc=subSuffix,{BASE_SUFF}'], + 'autoMemberInclusiveRegex': [ + "gidNumber=^[1-4]..3$", + "uidNumber=^5.5$", + "nsAdminGroupName=^Manager$|^Supervisor$", + ], + "autoMemberExclusiveRegex": [ + "gidNumber=^[6-8].0$", + "uidNumber=^999$", + "nsAdminGroupName=^Junior$", + ], + }) + + automembers_regex_sub.create(properties={ + 'cn': 'Contractors', + 'description': f'Group placement for Contractors', + 'autoMemberTargetGroup': [f'cn=Contractors,cn=subsuffGroups,dc=SubSuffix,{BASE_SUFF}'], + 'autoMemberInclusiveRegex': [ + "gidNumber=^[5-9].3$", + "uidNumber=^8..5$", + "nsAdminGroupName=^Contract|^Temporary$", + ], + "autoMemberExclusiveRegex": [ + "gidNumber=^[2-4]00$", + "uidNumber=^[1,3,8]99$", + "nsAdminGroupName=^Employee$", + ], + }) + for cn_name, ou_name in [('testuserGroups', 'Employees'), ('hostGroups', 'HostEntries')]: + automembers_definitions.create(properties={ + 'cn': cn_name, + 'autoMemberScope': f'ou={ou_name},dc=testautoMembers,dc=com', + 'autoMemberFilter': "objectclass=posixAccount", + 'autoMemberDefaultGroup': [ + f'cn=TestDef1,cn={cn_name},dc=testautoMembers,dc=com', + f'cn=TestDef2,cn={cn_name},dc=testautoMembers,dc=com', + f'cn=TestDef3,cn={cn_name},dc=testautoMembers,dc=com', + f'cn=TestDef4,cn={cn_name},dc=testautoMembers,dc=com', + f'cn=TestDef5,cn={cn_name},dc=testautoMembers,dc=com', + ], + 'autoMemberGroupingAttr': 'member:dn', + }) + + topo.ms["supplier1"].restart() + + +def test_disable_the_plug_in(topo, _create_all_entries): + """Plug-in and check the status + + :id: 4feee76c-e7ff-11e8-836e-8c16451d917b + :setup: Instance with replication + :steps: + 1. Disable the plug-in and check the status + 2. Enable the plug-in and check the status + :expectedresults: + 1. Should success + 2. Should success + """ + instance_auto = AutoMembershipPlugin(topo.ms["supplier1"]) + instance_auto.disable() + assert not instance_auto.status() + instance_auto.enable() + assert instance_auto.status() + + +def test_custom_config_area(topo, _create_all_entries): + """Custom config area + + :id: 4fefb8cc-e7ff-11e8-92fd-8c16451d917b + :setup: Instance with replication + :steps: + 1. Check whether the plugin can be configured for custom config area + 2. After adding custom config area can be removed + :expectedresults: + 1. Should success + 2. Should success + """ + instance_auto = AutoMembershipPlugin(topo.ms["supplier1"]) + instance_auto.replace("nsslapd-pluginConfigArea", DEFAULT_SUFFIX) + assert instance_auto.get_attr_val_utf8("nsslapd-pluginConfigArea") + instance_auto.remove("nsslapd-pluginConfigArea", DEFAULT_SUFFIX) + assert not instance_auto.get_attr_val_utf8("nsslapd-pluginConfigArea") + + +@pytest.mark.bz834053 +def test_ability_to_control_behavior_of_modifiers_name(topo, _create_all_entries): + """Control behaviour of modifier's name + + :id: 4ff16370-e7ff-11e8-838d-8c16451d917b + :setup: Instance with replication + :steps: + 1. Turn on 'nsslapd-plugin-binddn-tracking' + 2. Add an user + 3. Check the creatorsname in the user entry + 4. Check the internalCreatorsname in the user entry + 5. Check the modifiersname in the user entry + 6. Check the internalModifiersname in the user entry + 7. Unset nsslapd-plugin-binddn-tracking attribute under + cn=config and delete the test enteries + :expectedresults: + 1. Should success + 2. Should success + 3. Should success + 4. Should success + 5. Should success + 6. Should success + 7. Should success + """ + instance1 = topo.ms["supplier1"] + configure = Config(instance1) + configure.replace('nsslapd-plugin-binddn-tracking', 'on') + instance1.restart() + assert configure.get_attr_val_utf8('nsslapd-plugin-binddn-tracking') == 'on' + user = add_user(topo, "User_autoMembers_05", "ou=Employees,{}".format(TEST_BASE), + "19", "18", "Supervisor") + # search the User DN name for the creatorsname in user entry + assert user.get_attr_val_utf8('creatorsname') == 'cn=directory manager' + # search the User DN name for the internalCreatorsname in user entry + assert user.get_attr_val_utf8('internalCreatorsname') == \ + 'cn=ldbm database,cn=plugins,cn=config' + # search the modifiersname in the user entry + assert user.get_attr_val_utf8('modifiersname') == 'cn=directory manager' + # search the internalModifiersname in the user entry + assert user.get_attr_val_utf8('internalModifiersname') == \ + 'cn=MemberOf Plugin,cn=plugins,cn=config' + # unset nsslapd-plugin-binddn-tracking attribute + configure.replace('nsslapd-plugin-binddn-tracking', 'off') + instance1.restart() + # deleting test enteries of automember05 test case + user.delete() + + +def test_posixaccount_objectclass_automemberdefaultgroup(topo, _create_all_entries): + """Verify the PosixAccount user + + :id: 4ff0f642-e7ff-11e8-ac88-8c16451d917b + :setup: Instance with replication + :steps: + 1. Add users with PosixAccount ObjectClass + 2. Verify the same user added as a member to autoMemberDefaultGroup + :expectedresults: + 1. Should success + 2. Should success + """ + test_id = "autoMembers_05" + default_group = "cn=TestDef1,CN=testuserGroups,{}".format(TEST_BASE) + user = add_user(topo, "User_{}".format(test_id), AUTO_MEM_SCOPE_TEST, "19", "18", "Supervisor") + assert check_groups(topo, default_group, user.dn, "member") + user.delete() + with pytest.raises(AssertionError): + assert check_groups(topo, default_group, user.dn, "member") + + +def test_duplicated_member_attributes_added_when_the_entry_is_re_created(topo, _create_all_entries): + """Checking whether duplicated member attributes added when the entry is re-created + + :id: 4ff2afaa-e7ff-11e8-8a92-8c16451d917b + :setup: Instance with replication + :steps: + 1. Create a user + 2. It should present as member in all automember groups + 3. Delete use + 4. It should not present as member in all automember groups + 5. Recreate same user + 6. It should present as member in all automember groups + :expectedresults: + 1. Should success + 2. Should success + 3. Should success + 4. Should success + 5. Should success + 6. Should success + """ + test_id = "autoMembers_06" + default_group = "cn=TestDef1,CN=testuserGroups,{}".format(TEST_BASE) + user = add_user(topo, "User_{}".format(test_id), AUTO_MEM_SCOPE_TEST, "19", "16", "Supervisor") + assert check_groups(topo, default_group, user.dn, "member") + user.delete() + with pytest.raises(AssertionError): + assert check_groups(topo, default_group, user.dn, "member") + user = add_user(topo, "User_{}".format(test_id), AUTO_MEM_SCOPE_TEST, "19", "15", "Supervisor") + assert check_groups(topo, default_group, user.dn, "member") + user.delete() + + +def test_multi_valued_automemberdefaultgroup_for_hostgroups(topo, _create_all_entries): + """Multi-valued autoMemberDefaultGroup + + :id: 4ff32a02-e7ff-11e8-99a1-8c16451d917b + :setup: Instance with replication + :steps: + 1. Create a user + 2. Check user is present in all Automember Groups as member + 3. Delete the user + 4. Check user is not present in all Automember Groups + :expectedresults: + 1. Should success + 2. Should success + 3. Should success + 4. Should success + """ + test_id = "autoMembers_07" + default_group1 = "cn=TestDef1,CN=testuserGroups,{}".format(TEST_BASE) + default_group2 = "cn=TestDef2,CN=testuserGroups,{}".format(TEST_BASE) + default_group3 = "cn=TestDef3,CN=testuserGroups,{}".format(TEST_BASE) + user = add_user(topo, "User_{}".format(test_id), AUTO_MEM_SCOPE_TEST, "19", "14", "TestEngr") + for grp in [default_group1, default_group2, default_group3]: + assert check_groups(topo, grp, user.dn, "member") + user.delete() + with pytest.raises(AssertionError): + assert check_groups(topo, default_group1, user.dn, "member") + + +def test_plugin_creates_member_attributes_of_the_automemberdefaultgroup(topo, _create_all_entries): + """Checking whether plugin creates member attributes if it already + exists for some of the autoMemberDefaultGroup + + :id: 4ff3ba76-e7ff-11e8-9846-8c16451d917b + :setup: Instance with replication + :steps: + 1. Add a non existing user to some groups as member + 2. Then Create the user + 3. Check the same user is present to other groups also as member + :expectedresults: + 1. Should success + 2. Should success + 3. Should success + """ + test_id = "autoMembers_08" + default_group1 = "cn=TestDef1,CN=testuserGroups,{}".format(TEST_BASE) + default_group2 = "cn=TestDef5,CN=testuserGroups,{}".format(TEST_BASE) + default_group3 = "cn=TestDef3,CN=testuserGroups,{}".format(TEST_BASE) + add_member_attr(topo, + "cn=TestDef2,CN=testuserGroups,{}".format(TEST_BASE), + "uid=User_{},{}".format(test_id, AUTO_MEM_SCOPE_TEST), "member") + add_member_attr(topo, + "cn=TestDef4,CN=testuserGroups,{}".format(TEST_BASE), + "uid=User_{},{}".format(test_id, AUTO_MEM_SCOPE_TEST), "member") + user = add_user(topo, "User_{}".format(test_id), AUTO_MEM_SCOPE_TEST, "19", "14", "TestEngr") + for grp in [default_group1, default_group2, default_group3]: + assert check_groups(topo, grp, user.dn, "member") + user.delete() + + +def test_multi_valued_automemberdefaultgroup_with_uniquemember(topo, _create_all_entries): + """Multi-valued autoMemberDefaultGroup with uniquemember attributes + + :id: 4ff4461c-e7ff-11e8-8124-8c16451d917b + :setup: Instance with replication + :steps: + 1. Modify automember config entry to use uniquemember + 2. Change object class for all groups which is used for automember grouping + 3. Add user uniquemember attributes + 4. Check uniqueMember attribute in groups + 5. Revert the changes done above + :expectedresults: + 1. Should success + 2. Should success + 3. Should success + 4. Should success + 5. Should success + """ + test_id = "autoMembers_09" + instance = topo.ms["supplier1"] + auto = AutoMembershipPlugin(topo.ms["supplier1"]) + # Modify automember config entry to use uniquemember: cn=testuserGroups,PLUGIN_AUTO + AutoMembershipDefinition( + instance, "cn=testuserGroups,{}".format(auto.dn)).replace('autoMemberGroupingAttr', + "uniquemember: dn") + instance.restart() + default_group1 = "cn=TestDef1,CN=testuserGroups,{}".format(TEST_BASE) + default_group2 = "cn=TestDef2,CN=testuserGroups,{}".format(TEST_BASE) + default_group3 = "cn=TestDef3,CN=testuserGroups,{}".format(TEST_BASE) + default_group4 = "cn=TestDef4,CN=testuserGroups,{}".format(TEST_BASE) + default_group5 = "cn=TestDef5,CN=testuserGroups,{}".format(TEST_BASE) + for grp in (default_group1, default_group2, default_group3, default_group4, default_group5): + instance_of_group = Group(topo.ms["supplier1"], grp) + change_grp_objclass("groupOfUniqueNames", "member", instance_of_group) + # Add user: uid=User_{test_id}, AutoMemScope + user = add_user(topo, "User_{}".format(test_id), AUTO_MEM_SCOPE_TEST, "19", "14", "New") + # Checking groups... + assert user.dn.lower() in UniqueGroup(topo.ms["supplier1"], + default_group1).get_attr_val_utf8("uniqueMember") + # Delete user uid=User_{test_id},AutoMemScope + user.delete() + # Change the automember config back to using \"member\" + AutoMembershipDefinition( + instance, "cn=testuserGroups,{}".format(auto.dn)).replace('autoMemberGroupingAttr', + "member: dn") + for grp in [default_group1, default_group2, default_group3, default_group4, default_group5]: + instance_of_group = UniqueGroup(topo.ms["supplier1"], grp) + change_grp_objclass("groupOfNames", "uniquemember", instance_of_group) + topo.ms["supplier1"].restart() + + +def test_invalid_automembergroupingattr_member(topo, _create_all_entries): + """Invalid autoMemberGroupingAttr-member + + :id: 4ff4b598-e7ff-11e8-a3a3-8c16451d917b + :setup: Instance with replication + :steps: + 1. Change object class for one group which is used for automember grouping + 2. Try to add user with invalid parameter + 3. Check member attribute on other groups + 4. Check member attribute on group where object class was changed + 5. Revert the object class where it was changed + :expectedresults: + 1. Should success + 2. Should fail (ldap.UNWILLING_TO_PERFORM) + 3. Should success + 4. Should fail (AssertionError) + 5. Should success + """ + test_id = "autoMembers_10" + default_group = "cn=TestDef1,CN=testuserGroups,{}".format(TEST_BASE) + instance_of_group = Group(topo.ms["supplier1"], default_group) + change_grp_objclass("groupOfUniqueNames", "member", instance_of_group) + with pytest.raises(ldap.UNWILLING_TO_PERFORM): + add_user(topo, "User_{}".format(test_id), AUTO_MEM_SCOPE_TEST, "19", "20", "Invalid") + with pytest.raises(AssertionError): + assert check_groups(topo, default_group, + "uid=User_{},{}".format(test_id, AUTO_MEM_SCOPE_TEST), "member") + change_grp_objclass("groupOfNames", "uniquemember", instance_of_group) + + +def test_valid_and_invalid_automembergroupingattr(topo, _create_all_entries): + """Valid and invalid autoMemberGroupingAttr + + :id: 4ff4fad0-e7ff-11e8-9cbd-8c16451d917b + :setup: Instance with replication + :steps: + 1. Change object class for some groups which is used for automember grouping + 2. Try to add user with invalid parameter + 3. Check member attribute on other groups + 4. Check member attribute on groups where object class was changed + 5. Revert the object class where it was changed + :expectedresults: + 1. Should success + 2. Should fail (ldap.UNWILLING_TO_PERFORM) + 3. Should success + 4. Should fail (AssertionError) + 5. Should success + """ + test_id = "autoMembers_11" + default_group_1 = "cn=TestDef1,CN=testuserGroups,{}".format(TEST_BASE) + default_group_2 = "cn=TestDef2,CN=testuserGroups,{}".format(TEST_BASE) + default_group_3 = "cn=TestDef3,CN=testuserGroups,{}".format(TEST_BASE) + default_group_4 = "cn=TestDef4,CN=testuserGroups,{}".format(TEST_BASE) + default_group_5 = "cn=TestDef5,CN=testuserGroups,{}".format(TEST_BASE) + grp_4_5 = [default_group_4, default_group_5] + for grp in grp_4_5: + instance_of_group = Group(topo.ms["supplier1"], grp) + change_grp_objclass("groupOfUniqueNames", "member", instance_of_group) + with pytest.raises(ldap.UNWILLING_TO_PERFORM): + add_user(topo, "User_{}".format(test_id), AUTO_MEM_SCOPE_TEST, "19", "24", "MixUsers") + for grp in [default_group_1, default_group_2, default_group_3]: + assert not check_groups(topo, grp, "cn=User_{},{}".format(test_id, + AUTO_MEM_SCOPE_TEST), "member") + for grp in grp_4_5: + with pytest.raises(AssertionError): + assert check_groups(topo, grp, "cn=User_{},{}".format(test_id, + AUTO_MEM_SCOPE_TEST), "member") + for grp in grp_4_5: + instance_of_group = Group(topo.ms["supplier1"], grp) + change_grp_objclass("groupOfNames", "uniquemember", instance_of_group) + + +def test_add_regular_expressions_for_user_groups_and_check_for_member_attribute_after_adding_users( + topo, _create_all_entries): + """Regular expressions for user groups + + :id: 4ff53fc2-e7ff-11e8-9a18-8c16451d917b + :setup: Instance with replication + :steps: + 1. Add user with a match with regular expressions for user groups + 2. check for member attribute after adding users + :expectedresults: + 1. Should success + 2. Should success + """ + test_id = "autoMembers_12" + default_group = f'cn=SuffDef1,ou=userGroups,{BASE_SUFF}' + user = add_user(topo, "User_{}".format(test_id), AUTO_MEM_SCOPE_BASE, "19", "0", "HR") + assert check_groups(topo, default_group, user.dn, "member") + assert number_memberof(topo, user.dn, 5) + user.delete() + + +LIST_FOR_PARAMETERIZATION = [ + ("autoMembers_22", "5288", "5289", "Contractor", "5291", "5292", "Contractors"), + ("autoMembers_21", "1161", "1162", "Contractor", "1162", "1163", "Contractors"), + ("autoMembers_20", "1188", "1189", "CEO", "1191", "1192", "Contractors"), + ("autoMembers_15", "9288", "9289", "Manager", "9291", "9292", "Managers"), + ("autoMembers_14", "561", "562", "Manager", "562", "563", "Managers"), + ("autoMembers_13", "9788", "9789", "VPEngg", "9392", "9393", "Managers")] + + +@pytest.mark.parametrize("testid, uid, gid, role, uid2, gid2, m_grp", LIST_FOR_PARAMETERIZATION) +def test_matching_gid_role_inclusive_regular_expression(topo, _create_all_entries, + testid, uid, gid, role, uid2, gid2, m_grp): + """Matching gid nos and Role for the Inclusive regular expression + + :id: 4ff71ce8-e7ff-11e8-b69b-8c16451d917b + :parametrized: yes + :setup: Instance with replication + :steps: + 1. Create users with matching gid nos and Role for the Inclusive regular expression + 2. It will be filtered with gidNumber, uidNumber and nsAdminGroupName + 3. It will a match for contract_grp + :expectedresults: + 1. Should success + 2. Should success + 3. Should success + """ + contract_grp = f'cn={m_grp},ou=userGroups,{BASE_SUFF}' + user1 = add_user(topo, "User_{}".format(testid), AUTO_MEM_SCOPE_BASE, uid, gid, role) + user2 = add_user(topo, "SecondUser_{}".format(testid), AUTO_MEM_SCOPE_BASE, + uid2, gid2, role) + for user_dn in [user1.dn, user2.dn]: + assert check_groups(topo, contract_grp, user_dn, "member") + assert number_memberof(topo, user1.dn, 1) + for user in [user1, user2]: + user.delete() + + +LIST_FOR_PARAMETERIZATION = [ + ("autoMembers_26", "5788", "5789", "Intern", "Contractors", "SuffDef1", 5), + ("autoMembers_25", "9788", "9789", "Employee", "Contractors", "Managers", 1), + ("autoMembers_24", "1110", "1111", "Employee", "Contractors", "SuffDef1", 5), + ("autoMembers_23", "2788", "2789", "Contractor", "Contractors", "SuffDef1", 5), + ("autoMembers_19", "5788", "5789", "HRManager", "Managers", "SuffDef1", 5), + ("autoMembers_18", "6788", "6789", "Junior", "Managers", "SuffDef1", 5), + ("autoMembers_17", "562", "563", "Junior", "Managers", "SuffDef1", 5), + ("autoMembers_16", "6788", "6789", "Manager", "Managers", "SuffDef1", 5)] + + +@pytest.mark.parametrize("testid, uid, gid, role, c_grp, m_grp, number", LIST_FOR_PARAMETERIZATION) +def test_gid_and_role_inclusive_exclusive_regular_expression(topo, _create_all_entries, + testid, uid, gid, role, + c_grp, m_grp, number): + """Matching gid nos and Role for the Inclusive and Exclusive regular expression + + :id: 4ff7d160-e7ff-11e8-8fbc-8c16451d917b + :parametrized: yes + :setup: Instance with replication + :steps: + 1. Create user with not matching gid nos and Role for + the Inclusive and Exclusive regular expression + 2. It will be filtered with gidNumber, uidNumber and nsAdminGroupName + 3. It will not match for contract_grp(Exclusive regular expression) + 4. It will match for default_group(Inclusive regular expression) + :expectedresults: + 1. Should success + 2. Should success + 3. Should success + 4. Should success + """ + contract_grp = f'cn={c_grp},ou=userGroups,{BASE_SUFF}' + default_group = f'cn={m_grp},ou=userGroups,{BASE_SUFF}' + user = add_user(topo, "User_{}".format(testid), AUTO_MEM_SCOPE_BASE, uid, gid, role) + with pytest.raises(AssertionError): + assert check_groups(topo, contract_grp, user.dn, "member") + check_groups(topo, default_group, user.dn, "member") + assert number_memberof(topo, user.dn, number) + user.delete() + + +LIST_FOR_PARAMETERIZATION = [ + ("autoMembers_32", "555", "720", "Employee", "SubDef1", "SubDef3"), + ("autoMembers_31", "515", "200", "Junior", "SubDef1", "SubDef5"), + ("autoMembers_30", "999", "400", "Supervisor", "SubDef1", "SubDef2"), + ("autoMembers_28", "555", "3663", "ContractHR", "Contractors,cn=subsuffGroups", + "Managers,cn=subsuffGroups")] + + +@pytest.mark.parametrize("testid, uid, gid, role, c_grp, m_grp", LIST_FOR_PARAMETERIZATION) +def test_managers_contractors_exclusive_regex_rules_member_uid(topo, _create_all_entries, + testid, uid, gid, role, + c_grp, m_grp): + """Match both managers and contractors exclusive regex rules + + :id: 4ff8be18-e7ff-11e8-94aa-8c16451d917b + :parametrized: yes + :setup: Instance with replication + :steps: + 1. Add Users to match both managers and contractors exclusive regex rules, + memberUid created in Default grp + 2. It will be filtered with gidNumber, uidNumber and nsAdminGroupName + 3. It will match for default_group1 and default_group2(Inclusive regular expression) + :expectedresults: + 1. Should success + 2. Should success + 3. Should success + """ + default_group1 = f'cn={c_grp},{SUBSUFFIX}' + default_group2 = f'cn={m_grp},{SUBSUFFIX}' + user = add_user(topo, "User_{}".format(testid), AUTO_MEM_SCOPE_BASE, uid, gid, role) + for group in [default_group1, default_group2]: + assert check_groups(topo, group, user.dn, "memberuid") + user.delete() + + +LIST_FOR_PARAMETERIZATION = [ + ("autoMembers_27", "595", "690", "ContractHR", "Managers", "Contractors"), + ("autoMembers_29", "8195", "2753", "Employee", "Contractors", "Managers"), + ("autoMembers_33", "545", "3333", "Supervisor", "Contractors", "Managers"), + ("autoMembers_34", "8195", "693", "Temporary", "Managers", "Contractors")] + + +@pytest.mark.parametrize("testid, uid, gid, role, c_grp, m_grp", LIST_FOR_PARAMETERIZATION) +def test_managers_inclusive_regex_rule(topo, _create_all_entries, + testid, uid, gid, role, c_grp, m_grp): + """Match managers inclusive regex rule, and no + inclusive/exclusive Contractors regex rules + + :id: 4ff8d862-e7ff-11e8-b688-8c16451d917b + :parametrized: yes + :setup: Instance with replication + :steps: + 1. Add User to match managers inclusive regex rule, and no + inclusive/exclusive Contractors regex rules + 2. It will be filtered with gidNumber, uidNumber and nsAdminGroupName(Supervisor) + 3. It will match for managers_grp(Inclusive regular expression) + 4. It will not match for contract_grp(Exclusive regular expression) + :expectedresults: + 1. Should success + 2. Should success + 3. Should success + 4. Should success + """ + contract_grp = f'cn={c_grp},cn=subsuffGroups,{SUBSUFFIX}' + managers_grp = f'cn={m_grp},cn=subsuffGroups,{SUBSUFFIX}' + user = add_user(topo, "User_{}".format(testid), AUTO_MEM_SCOPE_BASE, uid, gid, role) + check_groups(topo, managers_grp, user.dn, "memberuid") + with pytest.raises(AssertionError): + assert check_groups(topo, contract_grp, user.dn, "memberuid") + user.delete() + + +def test_reject_invalid_config_and_we_donot_deadlock_the_server(topo, _create_all_entries): + """Verify DS reject invalid config, and we don't deadlock the server + + :id: 4ff90c38-e7ff-11e8-b72a-8c16451d917b + :setup: Instance with replication + :steps: + 1. Verify DS reject invalid config, + 2. This operation don't deadlock the server + :expectedresults: + 1. Should success + 2. Should success + """ + # Changing config area to dc=automembers,dc=com + instance = AutoMembershipPlugin(topo.ms["supplier1"]) + instance.replace("nsslapd-pluginConfigArea", BASE_SUFF) + topo.ms["supplier1"] .restart() + # Attempting to add invalid config... + automembers = AutoMembershipDefinitions(topo.ms["supplier1"], BASE_SUFF) + with pytest.raises(ldap.UNWILLING_TO_PERFORM): + automembers.create(properties={ + 'cn': 'userGroups', + "autoMemberScope": BASE_SUFF, + "autoMemberFilter": "objectclass=posixAccount", + "autoMemberDefaultGroup": f'cn=SuffDef1,ou=userGroups,{BASE_SUFF}', + "autoMemberGroupingAttr": "member: dn" + }) + # Verify server is still working + automembers = AutoMembershipRegexRules(topo.ms["supplier1"], + f'cn=userGroups,cn=Auto Membership Plugin,' + f'cn=plugins,cn=config') + with pytest.raises(ldap.ALREADY_EXISTS): + automembers.create(properties={ + 'cn': 'Managers', + 'description': f'Group placement for Managers', + 'autoMemberTargetGroup': [f'cn=Managers,ou=userGroups,{BASE_SUFF}'], + 'autoMemberInclusiveRegex': [ + "gidNumber=^9", + "nsAdminGroupName=^Manager", + ], + }) + + # Adding first user... + for uid in range(300, 302): + UserAccounts(topo.ms["supplier1"], BASE_SUFF, rdn=None).create_test_user(uid=uid, gid=uid) + # Adding this line code to remove the automembers plugin configuration. + instance.remove("nsslapd-pluginConfigArea", BASE_SUFF) + topo.ms["supplier1"] .restart() + + +@pytest.fixture(scope="module") +def _startuptask(topo): + """ + Fixture module that will change required entries for test cases. + """ + for Configs in ["cn=Managers,cn=subsuffGroups", + "cn=Contractors,cn=subsuffGroups", + "cn=testuserGroups", + "cn=subsuffGroups", + "cn=hostGroups"]: + AutoMembershipDefinition(topo.ms["supplier1"], f'{Configs},{PLUGIN_AUTO}').delete() + AutoMembershipDefinition(topo.ms["supplier1"], "cn=userGroups,{}".format(PLUGIN_AUTO)).replace( + 'autoMemberScope', 'ou=TaskEmployees,dc=autoMembers,dc=com') + topo.ms['supplier1'].restart() + + +@pytest.fixture(scope="function") +def _fixture_for_build_task(request, topo): + def finof(): + supplier = topo.ms['supplier1'] + auto_mem_scope = "ou=TaskEmployees,{}".format(BASE_SUFF) + for user in nsAdminGroups(supplier, auto_mem_scope, rdn=None).list(): + user.delete() + + request.addfinalizer(finof) + + +def bulk_check_groups(topo, GROUP_DN, MEMBATTR, TOTAL_MEM): + assert len(nsAdminGroup(topo, GROUP_DN).get_attr_vals_utf8(MEMBATTR)) == TOTAL_MEM + + +def test_automemtask_re_build_task(topo, _create_all_entries, _startuptask, _fixture_for_build_task): + """ + :id: 4ff973a8-e7ff-11e8-a89b-8c16451d917b + :setup: 4 Instances with replication + :steps: + 1. Add 10 users and enable autoMembers plug-in + 2. Run automembers re-build task to create the member attributes + 3. Search for any error logs + :expectedresults: + 1. Success + 2. Success + 3. Success + """ + supplier = topo.ms['supplier1'] + testid = "autoMemTask_01" + auto_mem_scope = "ou=TaskEmployees,{}".format(BASE_SUFF) + managers_grp = "cn=Managers,ou=userGroups,{}".format(BASE_SUFF) + contract_grp = "cn=Contractors,ou=userGroups,{}".format(BASE_SUFF) + user_rdn = "User_{}".format(testid) + # make sure the retro changelog is disabled + RetroChangelogPlugin(supplier).disable() + AutoMembershipPlugin(supplier).disable() + supplier.restart() + for i in range(10): + add_user(topo, "{}{}".format(user_rdn, str(i)), auto_mem_scope, str(1188), str(1189), "Manager") + for grp in (managers_grp, contract_grp): + with pytest.raises(AssertionError): + assert check_groups(topo, grp, f'uid=User_autoMemTask_010,{auto_mem_scope}', 'member') + AutoMembershipPlugin(supplier).enable() + supplier.restart() + error_string = "automember_rebuild_task_thread" + AutomemberRebuildMembershipTask(supplier).create(properties={ + 'basedn': auto_mem_scope, + 'filter': "objectClass=posixAccount" + }) + time.sleep(10) + + # Search for any error logs + assert not supplier.searchErrorsLog(error_string) + for grp in (managers_grp, contract_grp): + bulk_check_groups(supplier, grp, "member", 10) + + +def ldif_check_groups(USERS_DN, MEMBATTR, TOTAL_MEM, LDIF_FILE): + study = open('{}'.format(LDIF_FILE), 'r') + study_ready = study.read() + assert len(re.findall("{}: {}".format(MEMBATTR, USERS_DN.lower()), study_ready)) == TOTAL_MEM + + +def check_file_exists(export_ldif): + count = 0 + while not os.path.exists(export_ldif) and count < 3: + time.sleep(1) + count += 1 + + count = 0 + while (os.stat(export_ldif).st_size == 0) and count < 3: + time.sleep(1) + count += 1 + + if os.path.exists(export_ldif) and os.stat(export_ldif).st_size != 0: + return True + else: + return False + + +def test_automemtask_export_task(topo, _create_all_entries, _startuptask, _fixture_for_build_task): + """ + :id: 4ff98b18-e7ff-11e8-872a-8c16451d917b + :setup: 4 Instances with replication + :steps: + 1. Add 10 users and enable autoMembers plug-in + 2. Run automembers export task to create an ldif file with member attributes + :expectedresults: + 1. Success + 2. Success + """ + supplier = topo.ms['supplier1'] + p = Paths('supplier1') + testid = "autoMemTask_02" + auto_mem_scope = "ou=TaskEmployees,{}".format(BASE_SUFF) + managers_grp = "cn=Managers,ou=userGroups,{}".format(BASE_SUFF) + user_rdn = "User_{}".format(testid) + # Disabling plugin + AutoMembershipPlugin(supplier).disable() + supplier.restart() + for i in range(10): + add_user(topo, "{}{}".format(user_rdn, str(i)), auto_mem_scope, str(2788), str(2789), "Manager") + with pytest.raises(AssertionError): + bulk_check_groups(supplier, managers_grp, "member", 10) + AutoMembershipPlugin(supplier).enable() + supplier.restart() + export_ldif = p.backup_dir + "/Out_Export_02.ldif" + if os.path.exists(export_ldif): + os.remove(export_ldif) + exp_task = Tasks(supplier) + exp_task.automemberExport(suffix=auto_mem_scope, fstr='objectclass=posixAccount', ldif_out=export_ldif) + check_file_exists(export_ldif) + ldif_check_groups("cn={}".format(user_rdn), "member", 10, export_ldif) + os.remove(export_ldif) + + +def test_automemtask_mapping(topo, _create_all_entries, _startuptask, _fixture_for_build_task): + """ + :id: 4ff9a206-e7ff-11e8-bf59-8c16451d917b + :setup: 4 Instances with replication + :steps: + 1. Add 10 users and enable autoMembers plug-in + 2. Run automembers Mapping task with input/output ldif files + :expectedresults: + 1. Should success + 2. Should success + """ + supplier = topo.ms['supplier1'] + p = Paths('supplier1') + testid = "autoMemTask_02" + auto_mem_scope = "ou=TaskEmployees,{}".format(BASE_SUFF) + user_rdn = "User_{}".format(testid) + export_ldif = p.backup_dir+"/Out_Export_02.ldif" + output_ldif3 = p.backup_dir+"/Output_03.ldif" + for file in [export_ldif, output_ldif3]: + if os.path.exists(file): + os.remove(file) + for i in range(10): + add_user(topo, "{}{}".format(user_rdn, str(i)), auto_mem_scope, str(2788), str(2789), "Manager") + ExportTask(supplier).export_suffix_to_ldif(ldiffile=export_ldif, suffix=BASE_SUFF) + check_file_exists(export_ldif) + map_task = Tasks(supplier) + map_task.automemberMap(ldif_in=export_ldif, ldif_out=output_ldif3) + check_file_exists(output_ldif3) + ldif_check_groups("cn={}".format(user_rdn), "member", 10, output_ldif3) + for file in [export_ldif, output_ldif3]: + os.remove(file) + + +def test_automemtask_re_build(topo, _create_all_entries, _startuptask, _fixture_for_build_task): + """ + :id: 4ff9b944-e7ff-11e8-ad35-8c16451d917b + :setup: 4 Instances with replication + :steps: + 1. Add 10 users with inetOrgPerson object class + 2. Run automembers re-build task to create the member attributes, exp to FAIL + :expectedresults: + 1. Should success + 2. Should not success + """ + supplier = topo.ms['supplier1'] + testid = "autoMemTask_04" + auto_mem_scope = "ou=TaskEmployees,{}".format(BASE_SUFF) + managers_grp = "cn=Managers,ou=userGroups,{}".format(BASE_SUFF) + user_rdn = "User_{}".format(testid) + # Disabling plugin + AutoMembershipPlugin(supplier).disable() + supplier.restart() + for number in range(10): + add_user(topo, f'{user_rdn}{number}', auto_mem_scope, str(number), str(number), "Manager") + with pytest.raises(AssertionError): + bulk_check_groups(supplier, managers_grp, "member", 10) + # Enabling plugin + AutoMembershipPlugin(supplier).enable() + supplier.restart() + AutomemberRebuildMembershipTask(supplier).create(properties={ + 'basedn': auto_mem_scope, + 'filter': "objectClass=inetOrgPerson" + }) + time.sleep(10) + with pytest.raises(AssertionError): + bulk_check_groups(supplier, managers_grp, "member", 10) + + +def test_automemtask_export(topo, _create_all_entries, _startuptask, _fixture_for_build_task): + """ + :id: 4ff9cf74-e7ff-11e8-b712-8c16451d917b + :setup: 4 Instances with replication + :steps: + 1. Add 10 users with inetOrgPerson objectClass + 2. Run automembers export task to create an ldif file with member attributes, exp to FAIL + :expectedresults: + 1. Should success + 2. Should not success + """ + supplier = topo.ms['supplier1'] + p = Paths('supplier1') + testid = "autoMemTask_05" + auto_mem_scope = "ou=TaskEmployees,{}".format(BASE_SUFF) + managers_grp = "cn=Managers,ou=userGroups,{}".format(BASE_SUFF) + user_rdn = "User_{}".format(testid) + # Disabling plugin + AutoMembershipPlugin(supplier).disable() + supplier.restart() + for number in range(10): + add_user(topo, f'{user_rdn}{number}', auto_mem_scope, str(number), str(number), "Manager") + with pytest.raises(AssertionError): + bulk_check_groups(supplier, managers_grp, "member", 10) + # Enabling plugin + AutoMembershipPlugin(supplier).enable() + supplier.restart() + export_ldif = p.backup_dir + "/Out_Export_02.ldif" + if os.path.exists(export_ldif): + os.remove(export_ldif) + exp_task = Tasks(supplier) + exp_task.automemberExport(suffix=auto_mem_scope, fstr='objectclass=inetOrgPerson', ldif_out=export_ldif) + check_file_exists(export_ldif) + with pytest.raises(AssertionError): + ldif_check_groups("uid={}".format(user_rdn), "member", 10, export_ldif) + os.remove(export_ldif) + + +def test_automemtask_run_re_build(topo, _create_all_entries, _startuptask, _fixture_for_build_task): + """ + :id: 4ff9e5c2-e7ff-11e8-943e-8c16451d917b + :setup: 4 Instances with replication + :steps: + 1. Add 10 users with inetOrgPerson obj class + 2. Change plugin config + 3. Enable plug-in and run re-build task to create the member attributes + :expectedresults: + 1. Should success + 2. Should success + 3. Should success + """ + supplier = topo.ms['supplier1'] + p = Paths('supplier1') + testid = "autoMemTask_06" + auto_mem_scope = "ou=TaskEmployees,{}".format(BASE_SUFF) + managers_grp = "cn=Managers,ou=userGroups,{}".format(BASE_SUFF) + user_rdn = "User_{}".format(testid) + # Disabling plugin + AutoMembershipPlugin(supplier).disable() + supplier.restart() + for number in range(10): + add_user(topo, f'{user_rdn}{number}', auto_mem_scope, '111', '111', "Manager") + for user in nsAdminGroups(supplier, auto_mem_scope, rdn=None).list(): + user.add('objectclass', 'inetOrgPerson') + AutoMembershipDefinition(supplier, + f'cn=userGroups,{PLUGIN_AUTO}').replace('autoMemberFilter', + "objectclass=inetOrgPerson") + supplier.restart() + with pytest.raises(AssertionError): + bulk_check_groups(supplier, managers_grp, "member", 10) + AutoMembershipPlugin(supplier).enable() + supplier.restart() + AutomemberRebuildMembershipTask(supplier).create(properties={ + 'basedn': auto_mem_scope, + 'filter': "objectClass=inetOrgPerson"}) + time.sleep(10) + bulk_check_groups(supplier, managers_grp, "member", 10) + AutoMembershipDefinition(supplier, + f'cn=userGroups,{PLUGIN_AUTO}').replace('autoMemberFilter', + "objectclass=posixAccount") + supplier.restart() + + +def test_automemtask_run_export(topo, _create_all_entries, _startuptask, _fixture_for_build_task): + """ + :id: 4ff9fba2-e7ff-11e8-a5ec-8c16451d917b + :setup: 4 Instances with replication + :steps: + 1. Add 10 users with inetOrgPerson objectClass + 2. change plugin config + 3. Run export task to create an ldif file with member attributes + :expectedresults: + 1. Should success + 2. Should success + 3. Should success + """ + supplier = topo.ms['supplier1'] + p = Paths('supplier1') + testid = "autoMemTask_07" + auto_mem_scope = "ou=TaskEmployees,{}".format(BASE_SUFF) + managers_grp = "cn=Managers,ou=userGroups,{}".format(BASE_SUFF) + user_rdn = "User_{}".format(testid) + # Disabling plugin + AutoMembershipPlugin(supplier).disable() + supplier.restart() + for number in range(10): + add_user(topo, f'{user_rdn}{number}', auto_mem_scope, '222', '222', "Manager") + for user in nsAdminGroups(supplier, auto_mem_scope, rdn=None).list(): + user.add('objectclass', 'inetOrgPerson') + AutoMembershipDefinition(supplier, f'cn=userGroups,{PLUGIN_AUTO}').replace('autoMemberFilter', + "objectclass=inetOrgPerson") + supplier.restart() + # Enabling plugin + AutoMembershipPlugin(supplier).enable() + supplier.restart() + with pytest.raises(AssertionError): + bulk_check_groups(supplier, managers_grp, "member", 10) + export_ldif = p.backup_dir + "/Out_Export_02.ldif" + if os.path.exists(export_ldif): + os.remove(export_ldif) + exp_task = Tasks(supplier) + exp_task.automemberExport(suffix=auto_mem_scope, fstr='objectclass=inetOrgPerson', ldif_out=export_ldif) + check_file_exists(export_ldif) + ldif_check_groups("cn={}".format(user_rdn), "member", 10, export_ldif) + AutoMembershipDefinition(supplier, f'cn=userGroups,{PLUGIN_AUTO}').\ + replace('autoMemberFilter', "objectclass=posixAccount") + + +if __name__ == "__main__": + CURRENT_FILE = os.path.realpath(__file__) + pytest.main("-s -v %s" % CURRENT_FILE) diff --git a/dirsrvtests/tests/suites/automember_plugin/configuration_test.py b/dirsrvtests/tests/suites/automember_plugin/configuration_test.py new file mode 100644 index 0000000..6c364dc --- /dev/null +++ b/dirsrvtests/tests/suites/automember_plugin/configuration_test.py @@ -0,0 +1,99 @@ +# --- BEGIN COPYRIGHT BLOCK --- +# Copyright (C) 2021 Red Hat, Inc. +# All rights reserved. +# +# License: GPL (version 3 or any later version). +# See LICENSE for details. +# --- END COPYRIGHT BLOCK --- + +import ldap +import os +import pytest +from lib389.topologies import topology_st as topo +from lib389.plugins import AutoMembershipPlugin, AutoMembershipDefinitions, MemberOfPlugin +from lib389._constants import DEFAULT_SUFFIX + +pytestmark = pytest.mark.tier1 + +@pytest.mark.bz834056 +def test_configuration(topo): + """Automembership plugin and mixed in the plugin configuration + + :id: 45a5a8f8-e800-11e8-ab16-8c16451d917b + :setup: Single Instance + :steps: + 1. Automembership plugin fails in a MMR setup, if data and config + area mixed in the plugin configuration + 2. Plugin configuration should throw proper error messages if not configured properly + :expectedresults: + 1. Should success + 2. Should success + """ + # Configure pluginConfigArea for PLUGIN_AUTO + AutoMembershipPlugin(topo.standalone).set("nsslapd-pluginConfigArea", 'cn=config') + # Enable MemberOf plugin + MemberOfPlugin(topo.standalone).enable() + topo.standalone.restart() + # Add invalid configuration, which mixes data and config area: All will fail + automembers = AutoMembershipDefinitions(topo.standalone) + with pytest.raises(ldap.UNWILLING_TO_PERFORM): + automembers.create(properties={ + 'cn': 'autouserGroups', + 'autoMemberScope': f'ou=Employees,cn=config', + 'autoMemberFilter': "objectclass=posixAccount", + 'autoMemberDefaultGroup': [f'cn=SuffDef1,ou=autouserGroups,cn=config', + f'cn=SuffDef2,ou=autouserGroups,cn=config'], + 'autoMemberGroupingAttr': 'member:dn' + }) + # Search in error logs + assert topo.standalone.ds_error_log.match('.*ERR - auto-membership-plugin - ' + 'automember_parse_config_entry - The default group ' + '"cn=SuffDef1,ou=autouserGroups,cn=config" ' + 'can not be a child of the plugin config area "cn=config"') + +def test_invalid_regex(topo): + """Test invalid regex is properly reportedin the error log + + :id: a6d89f84-ec76-4871-be96-411d051800b1 + :setup: Standalone Instance + :steps: + 1. Setup automember + 2. Add invalid regex + 3. Error log reports useful message + :expectedresults: + 1. Success + 2. Success + 3. Success + """ + REGEX_DN = "cn=regex1,cn=testregex,cn=auto membership plugin,cn=plugins,cn=config" + REGEX_VALUE = "cn=*invalid*" + REGEX_ESC_VALUE = "cn=\\*invalid\\*" + GROUP_DN = "cn=demo_group,ou=groups," + DEFAULT_SUFFIX + + AutoMembershipPlugin(topo.standalone).remove_all("nsslapd-pluginConfigArea") + automemberplugin = AutoMembershipPlugin(topo.standalone) + + automember_prop = { + 'cn': 'testRegex', + 'autoMemberScope': 'ou=People,' + DEFAULT_SUFFIX, + 'autoMemberFilter': 'objectclass=*', + 'autoMemberDefaultGroup': GROUP_DN, + 'autoMemberGroupingAttr': 'member:dn', + } + automember_defs = AutoMembershipDefinitions(topo.standalone, "cn=Auto Membership Plugin,cn=plugins,cn=config") + automember_def = automember_defs.create(properties=automember_prop) + automember_def.add_regex_rule("regex1", GROUP_DN, include_regex=[REGEX_VALUE]) + + automemberplugin.enable() + topo.standalone.restart() + + # Check errors log for invalid message + ERR_STR1 = "automember_parse_regex_rule - Unable to parse regex rule" + ERR_STR2 = f"Skipping invalid inclusive regex rule in rule entry \"{REGEX_DN}\" \\(rule = \"{REGEX_ESC_VALUE}\"\\)" + assert topo.standalone.searchErrorsLog(ERR_STR1) + assert topo.standalone.searchErrorsLog(ERR_STR2) + + +if __name__ == "__main__": + CURRENT_FILE = os.path.realpath(__file__) + pytest.main("-s -v %s" % CURRENT_FILE) diff --git a/dirsrvtests/tests/suites/backups/__init__.py b/dirsrvtests/tests/suites/backups/__init__.py new file mode 100644 index 0000000..fca4af8 --- /dev/null +++ b/dirsrvtests/tests/suites/backups/__init__.py @@ -0,0 +1,3 @@ +""" + :Requirement: 389-ds-base: Directory Server Backup Operations +""" diff --git a/dirsrvtests/tests/suites/backups/backup_test.py b/dirsrvtests/tests/suites/backups/backup_test.py new file mode 100644 index 0000000..11e60c7 --- /dev/null +++ b/dirsrvtests/tests/suites/backups/backup_test.py @@ -0,0 +1,114 @@ +# --- BEGIN COPYRIGHT BLOCK --- +# Copyright (C) 2022 Red Hat, Inc. +# All rights reserved. +# +# License: GPL (version 3 or any later version). +# See LICENSE for details. +# --- END COPYRIGHT BLOCK --- +# +import logging +import pytest +import os +from datetime import datetime +from lib389._constants import DEFAULT_SUFFIX, INSTALL_LATEST_CONFIG +from lib389.properties import BACKEND_SAMPLE_ENTRIES, TASK_WAIT +from lib389.topologies import topology_st as topo +from lib389.backend import Backend +from lib389.tasks import BackupTask, RestoreTask +from lib389.config import BDB_LDBMConfig +from lib389 import DSEldif +from lib389.utils import ds_is_older, get_default_db_lib +import tempfile + +pytestmark = pytest.mark.tier1 + +DEBUGGING = os.getenv("DEBUGGING", default=False) +if DEBUGGING: + logging.getLogger(__name__).setLevel(logging.DEBUG) +else: + logging.getLogger(__name__).setLevel(logging.INFO) +log = logging.getLogger(__name__) + +def test_missing_backend(topo): + """Test that an error is returned when a restore is performed for a + backend that is no longer present. + + :id: 889b8028-35cf-41d7-91f6-bc5193683646 + :setup: Standalone Instance + :steps: + 1. Create a second backend + 2. Perform a back up + 3. Remove one of the backends from the config + 4. Perform a restore + :expectedresults: + 1. Success + 2. Success + 3. Success + 4. Failure + """ + + # Create a new backend + BE_NAME = 'backupRoot' + BE_SUFFIX = 'dc=back,dc=up' + props = { + 'cn': BE_NAME, + 'nsslapd-suffix': BE_SUFFIX, + BACKEND_SAMPLE_ENTRIES: INSTALL_LATEST_CONFIG + } + be = Backend(topo.standalone) + backend_entry = be.create(properties=props) + + # perform backup + backup_dir_name = "backup-%s" % datetime.now().strftime("%Y_%m_%d_%H_%M_%S") + archive = os.path.join(topo.standalone.ds_paths.backup_dir, backup_dir_name) + backup_task = BackupTask(topo.standalone) + task_properties = {'nsArchiveDir': archive} + backup_task.create(properties=task_properties) + backup_task.wait() + assert backup_task.get_exit_code() == 0 + + # Remove new backend + backend_entry.delete() + + # Restore the backup - it should fail + restore_task = RestoreTask(topo.standalone) + task_properties = {'nsArchiveDir': archive} + restore_task.create(properties=task_properties) + restore_task.wait() + assert restore_task.get_exit_code() != 0 + + +@pytest.mark.bz1851967 +@pytest.mark.ds4112 +@pytest.mark.skipif(ds_is_older('1.4.1'), reason="Not implemented") +@pytest.mark.skipif(get_default_db_lib() == "mdb", reason="Not supported over mdb") +def test_db_home_dir_online_backup(topo): + """Test that if the dbhome directory is set causing an online backup to fail, + the dblayer_backup function should go to error processing section. + + :id: cfc495d6-2a58-4e4e-aa40-39a15c71f973 + :setup: Standalone Instance + :steps: + 1. Change the dbhome to directory to eg-/tmp/test + 2. Perform an online back-up + 3. Check for the correct errors in the log + :expectedresults: + 1. Success + 2. Failure + 3. Success + """ + bdb_ldbmconfig = BDB_LDBMConfig(topo.standalone) + dseldif = DSEldif(topo.standalone) + topo.standalone.stop() + with tempfile.TemporaryDirectory() as backup_dir: + dseldif.replace(bdb_ldbmconfig.dn, 'nsslapd-db-home-directory', f'{backup_dir}') + topo.standalone.start() + topo.standalone.tasks.db2bak(backup_dir=f'{backup_dir}', args={TASK_WAIT: True}) + assert topo.standalone.ds_error_log.match(f".*Failed renaming {backup_dir}.bak back to {backup_dir}") + + +if __name__ == '__main__': + # Run isolated + # -s for DEBUG mode + CURRENT_FILE = os.path.realpath(__file__) + pytest.main(["-s", CURRENT_FILE]) diff --git a/dirsrvtests/tests/suites/basic/__init__.py b/dirsrvtests/tests/suites/basic/__init__.py new file mode 100644 index 0000000..8371b76 --- /dev/null +++ b/dirsrvtests/tests/suites/basic/__init__.py @@ -0,0 +1,3 @@ +""" + :Requirement: 389-ds-base: Basic Directory Server Operations +""" diff --git a/dirsrvtests/tests/suites/basic/basic_test.py b/dirsrvtests/tests/suites/basic/basic_test.py new file mode 100644 index 0000000..a7efcc8 --- /dev/null +++ b/dirsrvtests/tests/suites/basic/basic_test.py @@ -0,0 +1,2480 @@ +# --- BEGIN COPYRIGHT BLOCK --- +# Copyright (C) 2023 Red Hat, Inc. +# All rights reserved. +# +# License: GPL (version 3 or any later version). +# See LICENSE for details. +# --- END COPYRIGHT BLOCK --- +# + +from subprocess import check_output, PIPE, run +from lib389 import DirSrv +from lib389.idm.user import UserAccount, UserAccounts +import pytest +from lib389.tasks import * +from lib389.utils import * +from lib389.topologies import topology_st +from lib389.dbgen import dbgen_users +from lib389.idm.organizationalunit import OrganizationalUnits +from lib389._constants import DN_DM, PASSWORD, PW_DM, ReplicaRole +from lib389.paths import Paths +from lib389.idm.directorymanager import DirectoryManager +from lib389.config import LDBMConfig, CertmapLegacy +from lib389.dseldif import DSEldif +from lib389.rootdse import RootDSE +from ....conftest import get_rpm_version +from lib389._mapped_object import DSLdapObjects +from lib389.replica import Replicas, Changelog +from lib389.backend import Backends, BackendSuffixView +from lib389.idm.domain import Domain +from lib389.nss_ssl import NssSsl +from lib389._constants import * +from lib389 import DirSrv +from lib389.instance.setup import SetupDs +from lib389.instance.options import General2Base, Slapd2Base +import os +import random +import ldap +import time +import subprocess + + +pytestmark = pytest.mark.tier0 + +default_paths = Paths() + +log = logging.getLogger(__name__) +DEBUGGING = os.getenv("DEBUGGING", default=False) + +# Globals +USER1_DN = 'uid=user1,' + DEFAULT_SUFFIX +USER2_DN = 'uid=user2,' + DEFAULT_SUFFIX +USER3_DN = 'uid=user3,' + DEFAULT_SUFFIX +USER4_DN = 'uid=user4,' + DEFAULT_SUFFIX + +ROOTDSE_DEF_ATTR_LIST = ('namingContexts', + 'supportedLDAPVersion', + 'supportedControl', + 'supportedExtension', + 'supportedSASLMechanisms', + 'vendorName', + 'vendorVersion') + +# This MAX_FDS value left about 22 connections available with bdb +# (should have more connections with lmdb) +MAX_FDS = 150 + + + +default_paths = Paths() + + + +log = logging.getLogger(__name__) +DEBUGGING = os.getenv("DEBUGGING", default=False) + + +class CustomSetup(): + DEFAULT_GENERAL = { 'config_version': 2, + 'full_machine_name': 'localhost.localdomain', + 'strict_host_checking': False, + # Not setting 'systemd' because it is not used. + # (that is the global default.inf setting that matters) + } + DEFAULT_SLAPD = { 'root_password': PW_DM, + 'defaults': INSTALL_LATEST_CONFIG, + } + DEFAULT_BACKENDS = [ { + 'cn': 'userroot', + 'nsslapd-suffix': DEFAULT_SUFFIX, + 'sample_entries': 'yes', + BACKEND_SAMPLE_ENTRIES: INSTALL_LATEST_CONFIG, + }, ] + + WRAPPER_FORMAT = '''#!/bin/sh +{wrapper_options} +exec {nsslapd} -D {cfgdir} -i {pidfile} +''' + + + class CustomDirSrv(DirSrv): + def __init__(self, verbose=False, external_log=log): + super().__init__(verbose=verbose, external_log=external_log) + self.wrapper = None # placeholder for the wrapper file name + + def _reset_systemd(self): + self.systemd_override = False + + def status(self): + self._reset_systemd() + return super().status() + + def start(self, timeout=120, *args): + if self.status(): + return + tmp_env = os.environ + # Unset PYTHONPATH to avoid mixing old CLI tools and new lib389 + if "PYTHONPATH" in tmp_env: + del tmp_env["PYTHONPATH"] + try: + subprocess.check_call([ + '/usr/bin/sh', + self.wrapper + ], env=tmp_env, stderr=subprocess.STDOUT) + except subprocess.CalledProcessError as e: + log.fatal("%s failed! Error (%s) %s" % (self.wrapper, e.returncode, e.output)) + raise e from None + for count in range(timeout): + if self.status(): + return + time.sleep(1) + raise TimeoutException('Failed to start ns-slpad') + + def stop(self, timeout=120): + self._reset_systemd() + super().stop(timeout=timeout) + + + def _search_be(belist, beinfo): + for be in belist: + if be['cn'] == beinfo['cn']: + return be + return None + + def __init__(self, serverid, general=None, slapd=None, backends=None, log=log): + verbose = log.level > logging.DEBUG + self.log = log + self.serverid = serverid + self.verbose = verbose + self.wrapper = f'/tmp/ds_{serverid}_wrapper.sh' + if serverid.startswith('slapd-'): + self.instname = server.id + else: + self.instname = 'slapd-'+serverid + self.ldapi = None + self.pid_file = None + self.inst = None + + # Lets prepare the options + general_options = General2Base(log) + for d in (CustomSetup.DEFAULT_GENERAL, general): + if d: + for key,val in d.items(): + general_options.set(key,val) + log.debug('[general]: %s' % general_options._options) + self.general = general_options + + slapd_options = Slapd2Base(self.log) + slapd_options.set('instance_name', serverid) + for d in (CustomSetup.DEFAULT_SLAPD, slapd): + if d: + for key,val in d.items(): + slapd_options.set(key,val) + log.debug('[slapd]: %s' % slapd_options._options) + self.slapd = slapd_options + + backend_options = [] + for backend_list in (CustomSetup.DEFAULT_BACKENDS, backends): + if not backend_list: + continue + for backend in backend_list: + target_be = CustomSetup._search_be(backend_options, backend) + if not target_be: + target_be = {} + backend_options.append(target_be) + for key,val in backend.items(): + target_be[key] = val + log.debug('[backends]: %s' % backend_options) + self.backends = backend_options + + def _to_dirsrv_args(self): + args = {} + slapd = self.slapd.collect() + general = self.general.collect() + args["SER_HOST"] = general['full_machine_name'] + args["SER_PORT"] = slapd['port'] + args["SER_SECURE_PORT"] = slapd['secure_port'] + args["SER_SERVERID_PROP"] = self.serverid + return args + + def create_instance(self): + sds = SetupDs(verbose=self.verbose, dryrun=False, log=self.log) + self.general.verify() + general = self.general.collect() + self.slapd.verify() + slapd = self.slapd.collect() + sds.create_from_args(general, slapd, self.backends, None) + self.ldapi = get_ldapurl_from_serverid(self.serverid)[0] + args = self._to_dirsrv_args() + log.debug('DirSrv.allocate args = %s' % str(args)) + log.debug('ldapi = %s' % str(self.ldapi)) + root_dn = slapd['root_dn'] + root_password = slapd['root_password'] + inst = DirSrv(verbose=self.verbose, external_log=self.log) + inst.local_simple_allocate(self.serverid, ldapuri=self.ldapi, binddn=root_dn, password=root_password) + self.pid_file = inst.pid_file() + # inst.setup_ldapi() + log.debug('DirSrv = %s' % str(inst.__dict__)) + inst.open() + inst.stop() + inst = CustomSetup.CustomDirSrv(verbose=self.verbose, external_log=self.log) + inst.local_simple_allocate(self.serverid, ldapuri=self.ldapi, binddn=root_dn, password=root_password) + self.inst = inst + return inst + + def create_wrapper(self, maxfds=None): + self.inst.wrapper = self.wrapper + slapd = self.slapd.collect() + sbin_dir = slapd['sbin_dir'] + config_dir = slapd['config_dir'] + fmtvalues = { + 'nsslapd': f'{sbin_dir}/ns-slapd', + 'cfgdir': config_dir.format(instance_name=self.instname), + 'pidfile': self.pid_file, + 'wrapper_options': '' + } + if maxfds: + fmtvalues['wrapper_options']=f'ulimit -n {maxfds}\nulimit -H -n {maxfds}' + with open(self.wrapper, 'w') as f: + f.write(CustomSetup.WRAPPER_FORMAT.format(**fmtvalues)) + + def cleanup(self): + self.inst.stop() + self.inst.delete() + if os.path.exists(self.wrapper): + os.remove(self.wrapper) + + +@pytest.fixture(scope="function") +def _reset_attr(request, topology_st): + """ Reset nsslapd-close-on-failed-bind attr to the default (off) """ + + def fin(): + dm = DirectoryManager(topology_st.standalone) + try: + dm_conn = dm.bind() + dm_conn.config.replace('nsslapd-close-on-failed-bind', 'off') + assert (dm_conn.config.get_attr_val_utf8('nsslapd-close-on-failed-bind')) == 'off' + except ldap.LDAPError as e: + log.error('Failure reseting attr') + assert False + topology_st.standalone.restart() + + request.addfinalizer(fin) + + +@pytest.fixture(scope="module") +def import_example_ldif(topology_st): + """Import the Example LDIF for the tests in this suite""" + + log.info('Initializing the "basic" test suite') + + ldif = '%s/dirsrv/data/Example.ldif' % topology_st.standalone.get_data_dir() + import_ldif = topology_st.standalone.get_ldif_dir() + "/Example.ldif" + shutil.copy(ldif, import_ldif) + + import_task = ImportTask(topology_st.standalone) + import_task.import_suffix_from_ldif(ldiffile=import_ldif, suffix=DEFAULT_SUFFIX) + import_task.wait() + + +def check_db_sanity(topology_st): + try: + entries = topology_st.standalone.search_s(DEFAULT_SUFFIX, + ldap.SCOPE_SUBTREE, + '(uid=scarter)') + if entries is None: + log.fatal('Unable to find user uid=scarter. DB or indexes are probably corrupted !') + assert False + except ldap.LDAPError as e: + log.fatal('test_basic_acl: Search suffix failed: ' + e.args[0]['desc']) + assert False + + +@pytest.fixture(params=ROOTDSE_DEF_ATTR_LIST) +def rootdse_attr(topology_st, request): + """Adds an attr from the list + as the default attr to the rootDSE + """ + # Ensure the server is started and connected + topology_st.standalone.start() + + RETURN_DEFAULT_OPATTR = "nsslapd-return-default-opattr" + rootdse_attr_name = ensure_bytes(request.param) + + log.info(" Add the %s: %s to rootdse" % (RETURN_DEFAULT_OPATTR, + rootdse_attr_name)) + mod = [(ldap.MOD_ADD, RETURN_DEFAULT_OPATTR, rootdse_attr_name)] + try: + topology_st.standalone.modify_s("", mod) + except ldap.LDAPError as e: + log.fatal('Failed to add attr: error (%s)' % (e.args[0]['desc'])) + assert False + + def fin(): + log.info(" Delete the %s: %s from rootdse" % (RETURN_DEFAULT_OPATTR, + rootdse_attr_name)) + mod = [(ldap.MOD_DELETE, RETURN_DEFAULT_OPATTR, rootdse_attr_name)] + try: + topology_st.standalone.modify_s("", mod) + except ldap.LDAPError as e: + log.fatal('Failed to delete attr: error (%s)' % (e.args[0]['desc'])) + assert False + + request.addfinalizer(fin) + + return rootdse_attr_name + + +def change_conf_attr(topology_st, suffix, attr_name, attr_value): + """Change configuration attribute in the given suffix. + + Returns previous attribute value. + """ + + entry = DSLdapObject(topology_st.standalone, suffix) + + attr_value_bck = entry.get_attr_val_bytes(attr_name) + log.info('Set %s to %s. Previous value - %s. Modified suffix - %s.' % ( + attr_name, attr_value, attr_value_bck, suffix)) + if attr_value is None: + entry.remove_all(attr_name) + else: + entry.replace(attr_name, attr_value) + return attr_value_bck + + +@pytest.fixture(scope="function") +def ldapagent_config(topology_st, request): + """Creates agent.conf for snmp agent + """ + + var_dir = topology_st.standalone.get_local_state_dir() + config_file = os.path.join(topology_st.standalone.get_sysconf_dir(), 'dirsrv/config/agent.conf') + config = f"""agentx-supplier {var_dir}/agentx/supplier +agent-logdir {var_dir}/log/dirsrv +server slapd-{topology_st.standalone.serverid} +""" + + with open(config_file, 'w') as agent_config_file: + agent_config_file.write(config) + + def fin(): + os.remove(config_file) + + request.addfinalizer(fin) + + return config_file + + +def test_basic_ops(topology_st, import_example_ldif): + """Tests adds, mods, modrdns, and deletes operations + + :id: 33f97f55-60bf-46c7-b880-6c488517ae19 + + :setup: Standalone instance + + :steps: + 1. Add 3 test users USER1, USER2 and USER3 to database + 2. Modify (ADD, REPLACE and DELETE) description for USER1 in database + 3. Rename USER1, USER2 and USER3 using Modrds + 4. Delete test entries USER1, USER2 and USER3 + + :expectedresults: + 1. Add operation should PASS. + 2. Modify operations should PASS. + 3. Rename operations should PASS. + 4. Delete operations should PASS. + """ + log.info('Running test_basic_ops...') + USER1_NEWDN = 'cn=user1' + USER2_NEWDN = 'cn=user2' + USER3_NEWDN = 'cn=user3' + NEW_SUPERIOR = 'ou=people,' + DEFAULT_SUFFIX + USER1_RDN_DN = 'cn=user1,' + DEFAULT_SUFFIX + USER2_RDN_DN = 'cn=user2,' + DEFAULT_SUFFIX + USER3_RDN_DN = 'cn=user3,' + NEW_SUPERIOR # New superior test + + # + # Adds# + try: + topology_st.standalone.add_s(Entry((USER1_DN, + {'objectclass': "top extensibleObject".split(), + 'sn': '1', + 'cn': 'user1', + 'uid': 'user1', + 'userpassword': 'password'}))) + except ldap.LDAPError as e: + log.error('Failed to add test user' + USER1_DN + ': error ' + e.args[0]['desc']) + assert False + + try: + topology_st.standalone.add_s(Entry((USER2_DN, + {'objectclass': "top extensibleObject".split(), + 'sn': '2', + 'cn': 'user2', + 'uid': 'user2', + 'userpassword': 'password'}))) + except ldap.LDAPError as e: + log.error('Failed to add test user' + USER2_DN + ': error ' + e.args[0]['desc']) + assert False + + try: + topology_st.standalone.add_s(Entry((USER3_DN, + {'objectclass': "top extensibleObject".split(), + 'sn': '3', + 'cn': 'user3', + 'uid': 'user3', + 'userpassword': 'password'}))) + except ldap.LDAPError as e: + log.error('Failed to add test user' + USER3_DN + ': error ' + e.args[0]['desc']) + assert False + + # + # Mods + # + try: + topology_st.standalone.modify_s(USER1_DN, [(ldap.MOD_ADD, 'description', + b'New description')]) + except ldap.LDAPError as e: + log.error('Failed to add description: error ' + e.args[0]['desc']) + assert False + + try: + topology_st.standalone.modify_s(USER1_DN, [(ldap.MOD_REPLACE, 'description', + b'Modified description')]) + except ldap.LDAPError as e: + log.error('Failed to modify description: error ' + e.args[0]['desc']) + assert False + + try: + topology_st.standalone.modify_s(USER1_DN, [(ldap.MOD_DELETE, 'description', + None)]) + except ldap.LDAPError as e: + log.error('Failed to delete description: error ' + e.args[0]['desc']) + assert False + + # + # Modrdns + # + try: + topology_st.standalone.rename_s(USER1_DN, USER1_NEWDN, delold=1) + except ldap.LDAPError as e: + log.error('Failed to modrdn user1: error ' + e.args[0]['desc']) + assert False + + try: + topology_st.standalone.rename_s(USER2_DN, USER2_NEWDN, delold=0) + except ldap.LDAPError as e: + log.error('Failed to modrdn user2: error ' + e.args[0]['desc']) + assert False # Modrdn - New superior + + try: + topology_st.standalone.rename_s(USER3_DN, USER3_NEWDN, + newsuperior=NEW_SUPERIOR, delold=1) + except ldap.LDAPError as e: + log.error('Failed to modrdn(new superior) user3: error ' + e.args[0]['desc']) + assert False + # + # Deletes + # + try: + topology_st.standalone.delete_s(USER1_RDN_DN) + except ldap.LDAPError as e: + log.error('Failed to delete test entry1: ' + e.args[0]['desc']) + assert False + + try: + topology_st.standalone.delete_s(USER2_RDN_DN) + except ldap.LDAPError as e: + log.error('Failed to delete test entry2: ' + e.args[0]['desc']) + assert False + + try: + topology_st.standalone.delete_s(USER3_RDN_DN) + except ldap.LDAPError as e: + log.error('Failed to delete test entry3: ' + e.args[0]['desc']) + assert False + check_db_sanity(topology_st) + log.info('test_basic_ops: PASSED') + + +def test_basic_import_export(topology_st, import_example_ldif): + """Test online and offline LDIF import & export + + :id: 3ceeea11-9235-4e20-b80e-7203b2c6e149 + + :setup: Standalone instance + + :steps: + 1. Generate a test ldif (50k entries) + 2. Import test ldif file using Online import. + 3. Import test ldif file using Offline import (ldif2db). + 4. Export test ldif file using Online export. + 5. Export test ldif file using Offline export (db2ldif). + 6. Cleanup - Import the Example LDIF for the other tests in this suite + + :expectedresults: + 1. Test ldif file creation should PASS. + 2. Online import should PASS. + 3. Offline import should PASS. + 4. Online export should PASS. + 5. Offline export should PASS. + 6. Cleanup should PASS. + """ + + log.info('Running test_basic_import_export...') + # + # Test online/offline LDIF imports + # + topology_st.standalone.start() + # topology_st.standalone.config.set('nsslapd-errorlog-level', '1') + + # Generate a test ldif (50k entries) + log.info("Generating LDIF...") + ldif_dir = topology_st.standalone.get_ldif_dir() + import_ldif = ldif_dir + '/basic_import.ldif' + dbgen_users(topology_st.standalone, 50000, import_ldif, DEFAULT_SUFFIX) + + # Online + log.info("Importing LDIF online...") + import_task = ImportTask(topology_st.standalone) + import_task.import_suffix_from_ldif(ldiffile=import_ldif, suffix=DEFAULT_SUFFIX) + + # Wait a bit till the task is created and available for searching + time.sleep(0.5) + + # Good as place as any to quick test the task has some expected attributes + if ds_is_newer('1.4.1.2'): + assert import_task.present('nstaskcreated') + assert import_task.present('nstasklog') + assert import_task.present('nstaskcurrentitem') + assert import_task.present('nstasktotalitems') + assert import_task.present('ttl') + + import_task.wait() + + # Offline + log.info("Importing LDIF offline...") + topology_st.standalone.stop() + if not topology_st.standalone.ldif2db(DEFAULT_BENAME, None, None, None, import_ldif): + log.fatal('test_basic_import_export: Offline import failed') + assert False + topology_st.standalone.start() + + # + # Test online and offline LDIF export + # + + # Online export + log.info("Exporting LDIF online...") + export_ldif = ldif_dir + '/export.ldif' + + export_task = ExportTask(topology_st.standalone) + export_task.export_suffix_to_ldif(ldiffile=export_ldif, suffix=DEFAULT_SUFFIX) + export_task.wait() + + # Offline export + log.info("Exporting LDIF offline...") + topology_st.standalone.stop() + if not topology_st.standalone.db2ldif(DEFAULT_BENAME, (DEFAULT_SUFFIX,), + None, None, None, export_ldif): + log.fatal('test_basic_import_export: Failed to run offline db2ldif') + assert False + + topology_st.standalone.start() + + # + # Cleanup - Import the Example LDIF for the other tests in this suite + # + log.info("Restore datrabase, import initial LDIF...") + ldif = '%s/dirsrv/data/Example.ldif' % topology_st.standalone.get_data_dir() + import_ldif = topology_st.standalone.get_ldif_dir() + "/Example.ldif" + shutil.copyfile(ldif, import_ldif) + + import_task = ImportTask(topology_st.standalone) + import_task.import_suffix_from_ldif(ldiffile=import_ldif, suffix=DEFAULT_SUFFIX) + import_task.wait() + + check_db_sanity(topology_st) + log.info('test_basic_import_export: PASSED') + + +def test_basic_backup(topology_st, import_example_ldif): + """Tests online and offline backup and restore + + :id: 0e9d91f8-8748-40b6-ab03-fbd1998eb985 + + :setup: Standalone instance and import example.ldif + + :steps: + 1. Test online backup using db2bak. + 2. Test config files are backed up + 3. Test online restore using bak2db. + 4. Test offline backup using db2bak. + 5. Test config files are backed up + 6. Test offline restore using bak2db. + + :expectedresults: + 1. Online backup should PASS. + 2. Config files were backed up + 3. Online restore should PASS. + 4. Offline backup should PASS. + 5. Config files were backed up + 6. Offline restore should PASS. + """ + + log.info('Running test_basic_backup...') + + backup_dir = topology_st.standalone.get_bak_dir() + '/backup_test_online' + log.info(f'Backup directory is {backup_dir}') + + # Test online backup + try: + topology_st.standalone.tasks.db2bak(backup_dir=backup_dir, + args={TASK_WAIT: True}) + except ValueError: + log.fatal('test_basic_backup: Online backup failed') + assert False + + # Test config files were backed up + assert os.path.isfile(backup_dir + "/config_files/dse.ldif") + assert os.path.isfile(backup_dir + "/config_files/schema/99user.ldif") + assert os.path.isfile(backup_dir + "/config_files/certmap.conf") + assert os.path.isfile(backup_dir + "/config_files/cert9.db") + + # Test online restore + try: + topology_st.standalone.tasks.bak2db(backup_dir=backup_dir, + args={TASK_WAIT: True}) + except ValueError: + log.fatal('test_basic_backup: Online restore failed') + assert False + + # Test offline backup + backup_dir = topology_st.standalone.get_bak_dir() + '/backup_test_offline' + topology_st.standalone.stop() + if not topology_st.standalone.db2bak(backup_dir): + log.fatal('test_basic_backup: Offline backup failed') + assert False + + # Test config files wre backed up + assert os.path.isfile(backup_dir + "/config_files/dse.ldif") + assert os.path.isfile(backup_dir + "/config_files/schema/99user.ldif") + assert os.path.isfile(backup_dir + "/config_files/certmap.conf") + assert os.path.isfile(backup_dir + "/config_files/cert9.db") + + # Test offline restore + if not topology_st.standalone.bak2db(backup_dir): + log.fatal('test_basic_backup: Offline backup failed') + assert False + topology_st.standalone.start() + + check_db_sanity(topology_st) + log.info('test_basic_backup: PASSED') + + +def test_basic_db2index(topology_st): + """Assert db2index can operate correctly. + + :id: 191fc0fd-9722-46b5-a7c3-e8760effe119 + + :setup: Standalone instance + + :steps: + 1: Call db2index with a single index attribute + 2: Call db2index with multiple index attributes + 3: Call db2index with no index attributes + + :expectedresults: + 1: Index succeeds for single index attribute + 2: Index succeeds for multiple index attributes + 3: Index succeeds for all backend indexes which have been obtained from dseldif + + """ + + # Error log message to confirm a reindex + if get_default_db_lib() == "mdb": + dbprefix = "dbmdb" + else: + dbprefix = "bdb" + info_message = f'INFO - {dbprefix}_db2index - {DEFAULT_BENAME}: Indexing attribute: ' + + log.info('Start the server') + topology_st.standalone.start() + check_db_sanity(topology_st) + + log.info('Offline reindex, stopping the server') + topology_st.standalone.stop() + + log.info('Reindex with a single index attribute') + topology_st.standalone.db2index(bename=DEFAULT_BENAME, attrs=['uid']) + assert topology_st.standalone.searchErrorsLog(info_message + 'uid') + + log.info('Restart the server to clear the logs') + topology_st.standalone.start() + check_db_sanity(topology_st) + topology_st.standalone.stop() + + log.info('Reindex with multiple attributes') + topology_st.standalone.db2index(bename=DEFAULT_BENAME, attrs=['cn','aci','givenname']) + assert topology_st.standalone.searchErrorsLog(info_message + 'cn') + assert topology_st.standalone.searchErrorsLog(info_message + 'aci') + assert topology_st.standalone.searchErrorsLog(info_message + 'givenname') + + log.info('Restart the server to clear the logs') + topology_st.standalone.start() + check_db_sanity(topology_st) + topology_st.standalone.stop() + + log.info('Start the server and get all indexes for specified backend') + topology_st.standalone.start() + check_db_sanity(topology_st) + dse_ldif = DSEldif(topology_st.standalone) + indexes = dse_ldif.get_indexes(DEFAULT_BENAME) + numIndexes = len(indexes) + assert numIndexes > 0 + + log.info('Stop the server and reindex with all backend indexes') + topology_st.standalone.stop() + topology_st.standalone.db2index(bename=DEFAULT_BENAME, attrs=indexes) + log.info('Checking the server logs for %d backend indexes INFO' % numIndexes) + for indexNum, index in enumerate(indexes): + if index in "entryrdn": + assert topology_st.standalone.searchErrorsLog( + f'INFO - {dbprefix}_db2index - {DEFAULT_BENAME}: Indexing {index}') + else: + assert topology_st.standalone.searchErrorsLog( + f'INFO - {dbprefix}_db2index - {DEFAULT_BENAME}: Indexing attribute: {index}') + + assert indexNum+1 == numIndexes + + topology_st.standalone.start() + check_db_sanity(topology_st) + log.info('test_basic_db2index: PASSED') + + +def test_basic_acl(topology_st, import_example_ldif): + """Run some basic access control (ACL) tests + + :id: 4f4e705f-32f4-4065-b3a8-2b0c2525798b + + :setup: Standalone instance + + :steps: + 1. Add two test users USER1_DN and USER2_DN. + 2. Add an aci that denies USER1 from doing anything. + 3. Set the default anonymous access for USER2. + 4. Try searching entries using USER1. + 5. Try searching entries using USER2. + 6. Try searching entries using root dn. + 7. Cleanup - delete test users and test ACI. + + :expectedresults: + 1. Test Users should be added. + 2. ACI should be added. + 3. This operation should PASS. + 4. USER1 should not be able to search anything. + 5. USER2 should be able to search everything except password. + 6. RootDN should be allowed to search everything. + 7. Cleanup should PASS. + """ + + """Run some basic access control(ACL) tests""" + log.info('Running test_basic_acl...') + + DENY_ACI = ensure_bytes('(targetattr = "*")(version 3.0;acl "deny user";deny (all)(userdn = "ldap:///%s");)' % USER1_DN) + + # + # Add two users + # + try: + topology_st.standalone.add_s(Entry((USER1_DN, + {'objectclass': "top extensibleObject".split(), + 'sn': '1', + 'cn': 'user 1', + 'uid': 'user1', + 'userpassword': PASSWORD}))) + except ldap.LDAPError as e: + log.fatal('test_basic_acl: Failed to add test user ' + USER1_DN + + ': error ' + e.args[0]['desc']) + assert False + + try: + topology_st.standalone.add_s(Entry((USER2_DN, + {'objectclass': "top extensibleObject".split(), + 'sn': '2', + 'cn': 'user 2', + 'uid': 'user2', + 'userpassword': PASSWORD}))) + except ldap.LDAPError as e: + log.fatal('test_basic_acl: Failed to add test user ' + USER1_DN + + ': error ' + e.args[0]['desc']) + assert False + + # + # Add an aci that denies USER1 from doing anything, + # and also set the default anonymous access + # + try: + topology_st.standalone.modify_s(DEFAULT_SUFFIX, [(ldap.MOD_ADD, 'aci', DENY_ACI)]) + except ldap.LDAPError as e: + log.fatal('test_basic_acl: Failed to add DENY ACI: error ' + e.args[0]['desc']) + assert False + + # + # Make sure USER1_DN can not search anything, but USER2_dn can... + # + try: + topology_st.standalone.simple_bind_s(USER1_DN, PASSWORD) + except ldap.LDAPError as e: + log.fatal('test_basic_acl: Failed to bind as user1, error: ' + e.args[0]['desc']) + assert False + + try: + entries = topology_st.standalone.search_s(DEFAULT_SUFFIX, + ldap.SCOPE_SUBTREE, + '(uid=*)') + if entries: + log.fatal('test_basic_acl: User1 was incorrectly able to search the suffix!') + assert False + except ldap.LDAPError as e: + log.fatal('test_basic_acl: Search suffix failed(as user1): ' + e.args[0]['desc']) + assert False + + # Now try user2... Also check that userpassword is stripped out + try: + topology_st.standalone.simple_bind_s(USER2_DN, PASSWORD) + except ldap.LDAPError as e: + log.fatal('test_basic_acl: Failed to bind as user2, error: ' + e.args[0]['desc']) + assert False + + try: + entries = topology_st.standalone.search_s(DEFAULT_SUFFIX, + ldap.SCOPE_SUBTREE, + '(uid=user1)') + if not entries: + log.fatal('test_basic_acl: User1 incorrectly not able to search the suffix') + assert False + if entries[0].hasAttr('userpassword'): + # The default anonymous access aci should have stripped out userpassword + log.fatal('test_basic_acl: User2 was incorrectly able to see userpassword') + assert False + except ldap.LDAPError as e: + log.fatal('test_basic_acl: Search for user1 failed(as user2): ' + e.args[0]['desc']) + assert False + + # Make sure RootDN can also search (this also resets the bind dn to the + # Root DN for future operations) + try: + topology_st.standalone.simple_bind_s(DN_DM, PW_DM) + except ldap.LDAPError as e: + log.fatal('test_basic_acl: Failed to bind as ROotDN, error: ' + e.args[0]['desc']) + assert False + + try: + entries = topology_st.standalone.search_s(DEFAULT_SUFFIX, + ldap.SCOPE_SUBTREE, + '(uid=*)') + if not entries: + log.fatal('test_basic_acl: Root DN incorrectly not able to search the suffix') + assert False + except ldap.LDAPError as e: + log.fatal('test_basic_acl: Search for user1 failed(as user2): ' + e.args[0]['desc']) + assert False + + # + # Cleanup + # + try: + topology_st.standalone.modify_s(DEFAULT_SUFFIX, [(ldap.MOD_DELETE, 'aci', DENY_ACI)]) + except ldap.LDAPError as e: + log.fatal('test_basic_acl: Failed to delete DENY ACI: error ' + e.args[0]['desc']) + assert False + + try: + topology_st.standalone.delete_s(USER1_DN) + except ldap.LDAPError as e: + log.fatal('test_basic_acl: Failed to delete test entry1: ' + e.args[0]['desc']) + assert False + + try: + topology_st.standalone.delete_s(USER2_DN) + except ldap.LDAPError as e: + log.fatal('test_basic_acl: Failed to delete test entry2: ' + e.args[0]['desc']) + assert False + + log.info('test_basic_acl: PASSED') + + +def test_basic_searches(topology_st, import_example_ldif): + """Tests basic search operations with filters. + + :id: 426a59ff-49b8-4a70-b377-0c0634a29b6f + + :setup: Standalone instance, add example.ldif to the database + + :steps: + 1. Execute search command while using different filters. + 2. Check number of entries returned by search filters. + + :expectedresults: + 1. Search command should PASS. + 2. Number of result entries returned should match number of the database entries according to the search filter. + """ + + log.info('Running test_basic_searches...') + + filters = (('(uid=scarter)', 1), + ('(uid=tmorris*)', 1), + ('(uid=*hunt*)', 4), + ('(uid=*cope)', 2), + ('(mail=*)', 150), + ('(roomnumber>=4000)', 35), + ('(roomnumber<=4000)', 115), + ('(&(roomnumber>=4000)(roomnumber<=4500))', 18), + ('(!(l=sunnyvale))', 120), + ('(&(uid=t*)(l=santa clara))', 7), + ('(|(uid=k*)(uid=r*))', 18), + ('(|(uid=t*)(l=sunnyvale))', 50), + ('(&(!(uid=r*))(ou=people))', 139), + ('(&(uid=m*)(l=sunnyvale)(ou=people)(mail=*example*)(roomNumber=*))', 3), + ('(&(|(uid=m*)(l=santa clara))(roomNumber=22*))', 5), + ('(&(|(uid=m*)(l=santa clara))(roomNumber=22*)(!(roomnumber=2254)))', 4),) + + for (search_filter, search_result) in filters: + try: + entries = topology_st.standalone.search_s(DEFAULT_SUFFIX, + ldap.SCOPE_SUBTREE, + search_filter) + if len(entries) != search_result: + log.fatal('test_basic_searches: An incorrect number of entries\ + was returned from filter (%s): (%d) expected (%d)' % + (search_filter, len(entries), search_result)) + assert False + except ldap.LDAPError as e: + log.fatal('Search failed: ' + e.args[0]['desc']) + assert False + + log.info('test_basic_searches: PASSED') + + +@pytest.mark.parametrize('limit,resp', + ((('200'), 'PASS'), + (('50'), ldap.ADMINLIMIT_EXCEEDED))) +def test_basic_search_lookthroughlimit(topology_st, limit, resp, import_example_ldif): + """ + Tests normal search with lookthroughlimit set high and low. + + :id: b5119970-6c9f-41b7-9649-de9233226fec + + :setup: Standalone instance, add example.ldif to the database, search filter (uid=*). + + :steps: + 1. Import ldif user file. + 2. Change lookthroughlimit to 200. + 3. Bind to server as low priv user + 4. Run search 1 with "high" lookthroughlimit. + 5. Change lookthroughlimit to 50. + 6. Run search 2 with "low" lookthroughlimit. + 7. Delete user from DB. + 8. Reset lookthroughlimit to original. + + :expectedresults: + 1. Success + 2. Success + 3. Success + 4. Success, first search should complete with no error. + 5. Success + 6. Success, second search should return ldap.ADMINLIMIT_EXCEEDED error. + 7. Success + 8. Success + + """ + + log.info('Running test_basic_search_lookthroughlimit...') + + search_filter = "(uid=*)" + + ltl_orig = change_conf_attr(topology_st, 'cn=config,cn=ldbm database,cn=plugins,cn=config', 'nsslapd-lookthroughlimit', limit) + + try: + users = UserAccounts(topology_st.standalone, DEFAULT_SUFFIX, rdn=None) + user = users.create_test_user() + user.replace('userPassword', PASSWORD) + except ldap.LDAPError as e: + log.fatal('Failed to create test user: error ' + e.args[0]['desc']) + assert False + + try: + conn = UserAccount(topology_st.standalone, user.dn).bind(PASSWORD) + except ldap.LDAPError as e: + log.fatal('Failed to bind test user: error ' + e.args[0]['desc']) + assert False + + try: + if resp == ldap.ADMINLIMIT_EXCEEDED: + with pytest.raises(ldap.ADMINLIMIT_EXCEEDED): + searchid = conn.search(DEFAULT_SUFFIX, ldap.SCOPE_SUBTREE, search_filter) + rtype, rdata = conn.result(searchid) + else: + searchid = conn.search(DEFAULT_SUFFIX, ldap.SCOPE_SUBTREE, search_filter) + rtype, rdata = conn.result(searchid) + assert(len(rdata) == 151) #151 entries in the imported ldif file using "(uid=*)" + except ldap.LDAPError as e: + log.fatal('Failed to perform search: error ' + e.args[0]['desc']) + assert False + + finally: + #Cleanup + change_conf_attr(topology_st, 'cn=config,cn=ldbm database,cn=plugins,cn=config', 'nsslapd-lookthroughlimit', ltl_orig) + user.delete() + + log.info('test_basic_search_lookthroughlimit: PASSED') + + +@pytest.fixture(scope="module") +def add_test_entry(topology_st, request): + # Add test entry + topology_st.standalone.add_s(Entry((USER4_DN, + {'objectclass': "top extensibleObject".split(), + 'cn': 'user1', 'uid': 'user1'}))) + + +search_params = [(['1.1'], 'cn', False), + (['1.1', 'cn'], 'cn', True), + (['+'], 'nsUniqueId', True), + (['*'], 'cn', True), + (['cn'], 'cn', True)] +@pytest.mark.skipif(ds_is_older("1.4.2.0"), reason="Not implemented") +@pytest.mark.parametrize("attrs, attr, present", search_params) +def test_search_req_attrs(topology_st, add_test_entry, attrs, attr, present): + """Test requested attributes in search operations. + + :id: 426a59ff-49b8-4a70-b377-0c0634a29b6e + :parametrized: yes + :setup: Standalone instance + :steps: + 1. Test "1.1" does not return any attributes. + 2. Test "1.1" is ignored if there are other requested attributes + 3. Test "+" returns all operational attributes + 4. Test "*" returns all attributes + 5. Test requested attributes + + :expectedresults: + 1. Success + 2. Success + 3. Success + 4. Success + 5. Success + """ + + log.info("Testing attrs: {} attr: {} present: {}".format(attrs, attr, present)) + entry = topology_st.standalone.search_s(USER4_DN, + ldap.SCOPE_BASE, + 'objectclass=top', + attrs) + if present: + assert entry[0].hasAttr(attr) + else: + assert not entry[0].hasAttr(attr) + + +def test_basic_referrals(topology_st, import_example_ldif): + """Test LDAP server in referral mode. + + :id: c586aede-7ac3-4e8d-a1cf-bfa8b8d78cc2 + + :setup: Standalone instance + + :steps: + 1. Set the referral and the backend state + 2. Set backend state to referral mode. + 3. Set server to not follow referral. + 4. Search using referral. + 5. Make sure server can restart in referral mode. + 6. Cleanup - Delete referral. + + :expectedresults: + 1. Set the referral, and the backend state should PASS. + 2. Set backend state to referral mode should PASS. + 3. Set server to not follow referral should PASS. + 4. referral error(10) should occur. + 5. Restart should PASS. + 6. Cleanup should PASS. + """ + + log.info('Running test_basic_referrals...') + backends = Backends(topology_st.standalone) + backend = backends.list()[0] + bev = BackendSuffixView(topology_st.standalone, backend) + bev.set('nsslapd-referral', 'ldap://localhost.localdomain:389/o%3dnetscaperoot') + bev.set('nsslapd-state', 'referral') + + log.info('Checking that the settings were applied...') + assert bev.get_attr_val_utf8('nsslapd-referral') == 'ldap://localhost.localdomain:389/o%3dnetscaperoot' + assert bev.get_attr_val_utf8('nsslapd-state') == 'referral' + + log.info('Testing that a referral error is returned...') + topology_st.standalone.set_option(ldap.OPT_REFERRALS, 0) # Do not follow referral + with pytest.raises(ldap.REFERRAL): + topology_st.standalone.search_s(DEFAULT_SUFFIX, ldap.SCOPE_SUBTREE, 'objectclass=top') + + # Make sure server can restart in referral mode + log.info('Restarting the server...') + topology_st.standalone.restart(timeout=10) + + log.info('Cleaning up...') + bev.set('nsslapd-state', 'backend') + bev.remove_all('nsslapd-referral') + topology_st.standalone.set_option(ldap.OPT_REFERRALS, 1) + + +def test_basic_systemctl(topology_st, import_example_ldif): + """Tests systemctl/lib389 can stop and start the server. + + :id: a92a7438-ecfa-4583-a89c-5fbfc0220b69 + + :setup: Standalone instance + + :steps: + 1. Stop the server. + 2. Start the server. + 3. Stop the server, break the dse.ldif and dse.ldif.bak, so a start fails. + 4. Verify that systemctl detects the failed start. + 5. Fix the dse.ldif, and make sure the server starts up. + 6. Verify systemctl correctly identifies the successful start. + + :expectedresults: + 1. Server should be stopped. + 2. Server should start + 3. Stop should work but start after breaking dse.ldif should fail. + 4. Systemctl should be able to detect the failed start. + 5. Server should start. + 6. Systemctl should be able to detect the successful start. + """ + + log.info('Running test_basic_systemctl...') + + config_dir = topology_st.standalone.get_config_dir() + + # + # Stop the server + # + log.info('Stopping the server...') + topology_st.standalone.stop() + log.info('Stopped the server.') + + # + # Start the server + # + log.info('Starting the server...') + topology_st.standalone.start() + log.info('Started the server.') + + # + # Stop the server, break the dse.ldif so a start fails, + # and verify that systemctl detects the failed start + # + log.info('Stopping the server...') + topology_st.standalone.stop() + log.info('Stopped the server before breaking the dse.ldif.') + + shutil.copy(config_dir + '/dse.ldif', config_dir + '/dse.ldif.correct') + open(config_dir + '/dse.ldif', 'w').close() + # We need to kill the .bak file too, DS is just too smart! + open(config_dir + '/dse.ldif.bak', 'w').close() + + log.info('Attempting to start the server with broken dse.ldif...') + try: + topology_st.standalone.start() + except Exception as e: + log.info('Server failed to start as expected: ' + str(e)) + log.info('Check the status...') + assert (not topology_st.standalone.status()) + log.info('Server failed to start as expected') + time.sleep(5) + + # + # Fix the dse.ldif, and make sure the server starts up, + # and systemctl correctly identifies the successful start + # + shutil.copy(config_dir + '/dse.ldif.correct', config_dir + '/dse.ldif') + log.info('Starting the server with good dse.ldif...') + topology_st.standalone.start() + log.info('Check the status...') + assert (topology_st.standalone.status()) + log.info('Server started after fixing dse.ldif.') + + log.info('test_basic_systemctl: PASSED') + + +def test_basic_ldapagent(topology_st, import_example_ldif, ldapagent_config): + """Tests that the ldap agent starts + + :id: da1d1846-8fc4-4b8c-8e53-4c9c16eff1ba + + :setup: Standalone instance + + :steps: + 1. Start SNMP ldap agent using command. + 2. Cleanup - Kill SNMP agent process. + + :expectedresults: + 1. SNMP agent should start. + 2. SNMP agent process should be successfully killed. + """ + + log.info('Running test_basic_ldapagent...') + + if not os.path.exists(os.path.join(topology_st.standalone.get_sbin_dir(), 'ldap-agent')): + pytest.skip("ldap-agent is not present") + + # Remember, this is *forking* + check_output([os.path.join(topology_st.standalone.get_sbin_dir(), 'ldap-agent'), ldapagent_config]) + # First kill any previous agents .... + run_dir = topology_st.standalone.get_run_dir() + pidpath = os.path.join(run_dir, 'ldap-agent.pid') + pid = None + with open(pidpath, 'r') as pf: + pid = pf.readlines()[0].strip() + if pid: + log.debug('test_basic_ldapagent: Terminating agent %s', pid) + check_output(['kill', pid]) + + log.info('test_basic_ldapagent: PASSED') + + +@pytest.mark.skipif(not get_user_is_ds_owner(), + reason="process ownership permission is required") +def test_basic_dse_survives_kill9(topology_st, import_example_ldif): + """Tests that the dse.ldif is not wiped out after the process is killed (bug 910581) + + :id: 10f141da-9b22-443a-885c-87271dcd7a59 + + :setup: Standalone instance + + :steps: + 1. Check out pid of ns-slapd process and Kill ns-slapd process. + 2. Check the contents of dse.ldif file. + 3. Start server. + + :expectedresults: + 1. ns-slapd process should be killed. + 2. dse.ldif should not be corrupted. + 3. Server should start successfully. + """ + log.info('Running test_basic_dse...') + + dse_file = topology_st.standalone.confdir + '/dse.ldif' + pid = check_output(['pidof', '-s', 'ns-slapd']).strip() + # We can't guarantee we have access to sudo in any environment ... Either + # run py.test with sudo, or as the same user as the dirsrv. + check_output(['kill', '-9', ensure_str(pid)]) + if os.path.getsize(dse_file) == 0: + log.fatal('test_basic_dse: dse.ldif\'s content was incorrectly removed!') + assert False + + topology_st.standalone.start(timeout=60) + log.info('dse.ldif was not corrupted, and the server was restarted') + + log.info('test_basic_dse: PASSED') + # Give the server time to startup, in some conditions this can be racey without systemd notification. + # Only affects this one test though... + time.sleep(10) + + +@pytest.mark.parametrize("rootdse_attr_name", ROOTDSE_DEF_ATTR_LIST) +def test_def_rootdse_attr(topology_st, import_example_ldif, rootdse_attr_name): + """Tests that operational attributes are not returned by default in rootDSE searches + + :id: 4fee33cc-4019-4c27-89e8-998e6c770dc0 + :parametrized: yes + :setup: Standalone instance + + :steps: + 1. Make an ldapsearch for rootdse attribute + 2. Check the returned entries. + + :expectedresults: + 1. Search should not fail + 2. Operational attributes should not be returned. + """ + + topology_st.standalone.start() + + log.info(" Assert rootdse search hasn't %s attr" % rootdse_attr_name) + try: + entry = topology_st.standalone.search_s("", ldap.SCOPE_BASE)[0] + assert not entry.hasAttr(rootdse_attr_name) + + except ldap.LDAPError as e: + log.fatal('Search failed, error: ' + e.args[0]['desc']) + assert False + + +def test_mod_def_rootdse_attr(topology_st, import_example_ldif, rootdse_attr): + """Tests that operational attributes are returned by default in rootDSE searches after config modification + + :id: c7831e04-f458-4e23-83c7-b6f66109f639 + :parametrized: yes + :setup: Standalone instance and we are using rootdse_attr fixture which + adds nsslapd-return-default-opattr attr with value of one operation attribute. + + :steps: + 1. Make an ldapsearch for rootdse attribute + 2. Check the returned entries. + + :expectedresults: + 1. Search should not fail + 2. Operational attributes should be returned after the config modification + """ + + log.info(" Assert rootdse search has %s attr" % rootdse_attr) + try: + entry = topology_st.standalone.search_s("", ldap.SCOPE_BASE)[0] + assert entry.hasAttr(rootdse_attr) + + except ldap.LDAPError as e: + log.fatal('Search failed, error: ' + e.args[0]['desc']) + assert False + + +@pytest.fixture(scope="module") +def create_users(topology_st): + """Add users to the default suffix + """ + + users = UserAccounts(topology_st.standalone, DEFAULT_SUFFIX) + user_names = ["Directory", "Server", "389", "lib389", "pytest"] + + log.info('Adding 5 test users') + for name in user_names: + users.create(properties={ + 'uid': name, + 'sn': name, + 'cn': name, + 'uidNumber': '1000', + 'gidNumber': '1000', + 'homeDirectory': '/home/%s' % name, + 'mail': '%s@example.com' % name, + 'userpassword': 'pass%s' % name, + }) + + +def test_basic_anonymous_search(topology_st, create_users): + """Tests basic anonymous search operations + + :id: c7831e04-f458-4e50-83c7-b6f77109f639 + :setup: Standalone instance + Add 5 test users with different user names + :steps: + 1. Execute anonymous search with different filters + :expectedresults: + 1. Search should be successful + """ + + filters = ["uid=Directory", "(|(uid=S*)(uid=3*))", "(&(uid=l*)(mail=l*))", "(&(!(uid=D*))(ou=People))"] + log.info("Execute anonymous search with different filters") + for filtr in filters: + entries = topology_st.standalone.search_s(DEFAULT_SUFFIX, ldap.SCOPE_SUBTREE, filtr) + assert len(entries) != 0 + + +@pytest.mark.ds604 +@pytest.mark.bz915801 +def test_search_original_type(topology_st, create_users): + """Test ldapsearch returning original attributes + using nsslapd-search-return-original-type-switch + + :id: d7831d04-f558-4e50-93c7-b6f77109f640 + :setup: Standalone instance + Add some test entries + :steps: + 1. Set nsslapd-search-return-original-type-switch to ON + 2. Check that ldapsearch *does* return unknown attributes + 3. Turn off nsslapd-search-return-original-type-switch + 4. Check that ldapsearch doesn't return any unknown attributes + :expectedresults: + 1. nsslapd-search-return-original-type-switch should be set to ON + 2. ldapsearch should return unknown attributes + 3. nsslapd-search-return-original-type-switch should be OFF + 4. ldapsearch should not return any unknown attributes + """ + + log.info("Set nsslapd-search-return-original-type-switch to ON") + topology_st.standalone.config.set('nsslapd-search-return-original-type-switch', 'on') + + log.info("Check that ldapsearch *does* return unknown attributes") + entries = topology_st.standalone.search_s(DEFAULT_SUFFIX, ldap.SCOPE_SUBTREE, 'uid=Directory', + ['objectclass overflow', 'unknown']) + assert "objectclass overflow" in entries[0].getAttrs() + + log.info("Set nsslapd-search-return-original-type-switch to Off") + topology_st.standalone.config.set('nsslapd-search-return-original-type-switch', 'off') + log.info("Check that ldapsearch *does not* return unknown attributes") + entries = topology_st.standalone.search_s(DEFAULT_SUFFIX, ldap.SCOPE_SUBTREE, 'uid=Directory', + ['objectclass overflow', 'unknown']) + assert "objectclass overflow" not in entries[0].getAttrs() + + +@pytest.mark.bz192901 +def test_search_ou(topology_st): + """Test that DS should not return an entry that does not match the filter + + :id: d7831d05-f117-4e89-93c7-b6f77109f640 + :setup: Standalone instance + :steps: + 1. Create an OU entry without sub entries + 2. Search from the OU with the filter that does not match the OU + :expectedresults: + 1. Creation of OU should be successful + 2. Search should not return any results + """ + + log.info("Create a test OU without sub entries") + ou = OrganizationalUnits(topology_st.standalone, DEFAULT_SUFFIX) + ou.create(properties={ + 'ou': 'test_ou', + }) + + search_base = ("ou=test_ou,%s" % DEFAULT_SUFFIX) + log.info("Search from the OU with the filter that does not match the OU, it should not return anything") + entries = topology_st.standalone.search_s(search_base, ldap.SCOPE_SUBTREE, 'uid=*', ['dn']) + assert len(entries) == 0 + + +def test_bind_invalid_entry(topology_st): + """Test the failing bind does not return information about the entry + + :id: 5cd9b083-eea6-426b-84ca-83c26fc49a6f + :customerscenario: True + :setup: Standalone instance + :steps: + 1: bind as non existing entry + 2: check that bind info does not report 'No such entry' + :expectedresults: + 1: pass + 2: pass + """ + + topology_st.standalone.restart() + INVALID_ENTRY="cn=foooo,%s" % DEFAULT_SUFFIX + try: + topology_st.standalone.simple_bind_s(INVALID_ENTRY, PASSWORD) + except ldap.LDAPError as e: + log.info('test_bind_invalid_entry: Failed to bind as %s (expected)' % INVALID_ENTRY) + log.info('exception description: ' + e.args[0]['desc']) + if 'info' in e.args[0]: + log.info('exception info: ' + e.args[0]['info']) + assert e.args[0]['desc'] == 'Invalid credentials' + assert 'info' not in e.args[0] + pass + + log.info('test_bind_invalid_entry: PASSED') + + # reset credentials + topology_st.standalone.simple_bind_s(DN_DM, PW_DM) + + +def test_bind_entry_missing_passwd(topology_st): + """ + :id: af209149-8fb8-48cb-93ea-3e82dd7119d2 + :setup: Standalone Instance + :steps: + 1. Bind as database entry that does not have userpassword set + 2. Bind as database entry that does not exist + 3. Bind as cn=config entry that does not have userpassword set + 4. Bind as cn=config entry that does not exist + :expectedresults: + 1. Fails with error 49 + 2. Fails with error 49 + 3. Fails with error 49 + 4. Fails with error 49 + """ + user = UserAccount(topology_st.standalone, DEFAULT_SUFFIX) + with pytest.raises(ldap.INVALID_CREDENTIALS): + # Bind as the suffix root entry which does not have a userpassword + user.bind("some_password") + + user = UserAccount(topology_st.standalone, "cn=not here," + DEFAULT_SUFFIX) + with pytest.raises(ldap.INVALID_CREDENTIALS): + # Bind as the entry which does not exist + user.bind("some_password") + + # Test cn=config since it has its own code path + user = UserAccount(topology_st.standalone, "cn=config") + with pytest.raises(ldap.INVALID_CREDENTIALS): + # Bind as the config entry which does not have a userpassword + user.bind("some_password") + + user = UserAccount(topology_st.standalone, "cn=does not exist,cn=config") + with pytest.raises(ldap.INVALID_CREDENTIALS): + # Bind as an entry under cn=config that does not exist + user.bind("some_password") + + +@pytest.mark.bz1044135 +@pytest.mark.ds47319 +def test_connection_buffer_size(topology_st): + """Test connection buffer size adjustable with different values(valid values and invalid) + + :id: e7831d05-f117-4ec9-1203-b6f77109f117 + :setup: Standalone instance + :steps: + 1. Set nsslapd-connection-buffer to some valid values (2, 0 , 1) + 2. Set nsslapd-connection-buffer to some invalid values (-1, a) + :expectedresults: + 1. This should pass + 2. This should fail + """ + + valid_values = ['2', '0', '1'] + for value in valid_values: + topology_st.standalone.config.replace('nsslapd-connection-buffer', value) + + invalid_values = ['-1', 'a'] + for value in invalid_values: + with pytest.raises(ldap.OPERATIONS_ERROR): + topology_st.standalone.config.replace('nsslapd-connection-buffer', value) + + +@pytest.mark.bz1637439 +def test_critical_msg_on_empty_range_idl(topology_st): + """Doing a range index lookup should not report a critical message even if IDL is empty + + :id: a07a2222-0551-44a6-b113-401d23799364 + :setup: Standalone instance + :steps: + 1. Create an index for internationalISDNNumber. (attribute chosen because it is + unlikely that previous tests used it) + 2. telephoneNumber being indexed by default create 20 users without telephoneNumber + 3. add a telephoneNumber value and delete it to trigger an empty index database + 4. Do a search that triggers a range lookup on empty telephoneNumber + 5. Check that the critical message is not logged in error logs + :expectedresults: + 1. This should pass + 2. This should pass + 3. This should pass + 4. This should pass on normal build but could abort a debug build + 5. This should pass + """ + indexedAttr = 'internationalISDNNumber' + + # Step 1 + from lib389.index import Indexes + + indexes = Indexes(topology_st.standalone) + indexes.create(properties={ + 'cn': indexedAttr, + 'nsSystemIndex': 'false', + 'nsIndexType': 'eq' + }) + topology_st.standalone.restart() + + # Step 2 + users = UserAccounts(topology_st.standalone, DEFAULT_SUFFIX) + log.info('Adding 20 users without "%s"' % indexedAttr) + for i in range(20): + name = 'user_%d' % i + last_user = users.create(properties={ + 'uid': name, + 'sn': name, + 'cn': name, + 'uidNumber': '1000', + 'gidNumber': '1000', + 'homeDirectory': '/home/%s' % name, + 'mail': '%s@example.com' % name, + 'userpassword': 'pass%s' % name, + }) + + # Step 3 + # required update to create the indexAttr (i.e. 'loginShell') database, and then make it empty + topology_st.standalone.modify_s(last_user.dn, [(ldap.MOD_ADD, indexedAttr, b'1234')]) + ent = topology_st.standalone.getEntry(last_user.dn, ldap.SCOPE_BASE,) + assert ent + assert ent.hasAttr(indexedAttr) + topology_st.standalone.modify_s(last_user.dn, [(ldap.MOD_DELETE, indexedAttr, None)]) + ent = topology_st.standalone.getEntry(last_user.dn, ldap.SCOPE_BASE,) + assert ent + assert not ent.hasAttr(indexedAttr) + + # Step 4 + # The first component being not indexed the range on second is evaluated + try: + ents = topology_st.standalone.search_s(DEFAULT_SUFFIX, ldap.SCOPE_SUBTREE, '(&(sudoNotAfter=*)(%s>=111))' % indexedAttr) + assert len(ents) == 0 + except ldap.SERVER_DOWN: + log.error('Likely testing against a debug version that asserted') + pass + + # Step 5 + assert not topology_st.standalone.searchErrorsLog('CRIT - list_candidates - NULL idl was recieved from filter_candidates_ext.') + + +@pytest.mark.bz1870624 +@pytest.mark.ds4379 +@pytest.mark.parametrize("case,value", [('positive', ['cn','','']), + ("positive", ['cn', '', '', '', '', '', '', '', '', '', '']), + ("negative", ['cn', '', '', '', '', '', '', '', '', '', '', ''])]) +def test_attr_description_limit(topology_st, case, value): + """Test that up to 10 empty attributeDescription is allowed + + :id: 5afd3dcd-1028-428d-822d-a489ecf4b67e + :customerscenario: True + :parametrized: yes + :setup: Standalone instance + :steps: + 1. Check that 2 empty values are allowed + 2. Check that 10 empty values are allowed + 3. Check that more than 10 empty values are allowed + :expectedresults: + 1. Should succeed + 2. Should succeed + 3. Should fail + """ + if case == 'positive': + DSLdapObjects(topology_st.standalone, basedn='').filter("(objectclass=*)", attrlist=value, scope=0) + else: + with pytest.raises(ldap.PROTOCOL_ERROR): + DSLdapObjects(topology_st.standalone, basedn='').filter("(objectclass=*)", attrlist=value, scope=0) + + +@pytest.mark.bz1647099 +@pytest.mark.ds50026 +def test_ldbm_modification_audit_log(topology_st): + """When updating LDBM config attributes, those attributes/values are not listed + in the audit log + + :id: 5bf75c47-a283-430e-a65c-3c5fd8dbadb8 + :setup: Standalone Instance + :steps: + 1. Bind as DM + 2. Enable audit log + 3. Update a set of config attrs in LDBM config + 4. Restart the server + 5. Check that config attrs are listed in the audit log + :expectedresults: + 1. Operation successful + 2. Operation successful + 3. Operation successful + 4. Operation successful + 5. Audit log should contain modification of attrs" + """ + + VALUE = '10001' + + d_manager = DirectoryManager(topology_st.standalone) + conn = d_manager.bind() + config_ldbm = LDBMConfig(conn) + + log.info("Enable audit logging") + conn.config.enable_log('audit') + + attrs = ['nsslapd-lookthroughlimit', 'nsslapd-pagedidlistscanlimit', 'nsslapd-idlistscanlimit', 'nsslapd-db-locks'] + + for attr in attrs: + log.info("Set attribute %s to value %s" % (attr, VALUE)) + config_ldbm.set(attr, VALUE) + + log.info('Restart the server to flush the logs') + conn.restart() + + for attr in attrs: + log.info("Check if attribute %s is replaced in the audit log" % attr) + assert conn.searchAuditLog('replace: %s' % attr) + assert conn.searchAuditLog('%s: %s' % (attr, VALUE)) + + +def test_suffix_case(topology_st): + """Test that the suffix case is preserved when creating a new backend + + :id: 4eff15be-6cde-4312-b492-c88941876bda + :setup: Standalone Instance + :steps: + 1. Create backend with uppercase characters + 2. Create root node entry + 3. Search should return suffix with upper case characters + :expectedresults: + 1. Success + 2. Success + 3. Success + """ + + # Start with a clean slate + topology_st.standalone.restart() + + TEST_SUFFIX = 'dc=UPPER_CASE' + + backends = Backends(topology_st.standalone) + backends.create(properties={'nsslapd-suffix': TEST_SUFFIX, + 'name': 'upperCaseRoot', + 'sample_entries': '001004002'}) + + domain = Domain(topology_st.standalone, TEST_SUFFIX) + assert domain.dn == TEST_SUFFIX + + +def test_bind_disconnect_invalid_entry(topology_st, _reset_attr): + """Test close connection on failed bind with invalid entry + + :id: b378543e-32dc-432a-9756-ce318d6d654b + :setup: Standalone instance + :steps: + 1. create/get user + 2. bind and search as user + 3. enable nsslapd-close-on-failed-bind attr + 4. bind as non existing entry to trigger connection closure + 5. verify connection has been closed and server is still running + 6. cleanup + :expectedresults: + 1. success + 2. success + 3. nsslapd-close-on-failed-bind attr set to on + 4. returns INVALID_CREDENTIALS, triggering connection closure + 5. success + 6. success + """ + + INVALID_ENTRY="cn=foooo,%s" % DEFAULT_SUFFIX + inst = topology_st.standalone + + dm = DirectoryManager(inst) + + # create/get user + users = UserAccounts(inst, DEFAULT_SUFFIX) + try: + user = users.create_test_user() + user.set("userPassword", PW_DM) + except ldap.ALREADY_EXISTS: + user = users.get('test_user_1000') + + # verify user can bind and search + try: + inst.simple_bind_s(user.dn, PW_DM) + except ldap.LDAPError as e: + log.error('Failed to bind {}'.format(user.dn)) + raise e + try: + inst.search_s(DEFAULT_SUFFIX, ldap.SCOPE_SUBTREE, '(objectclass=top)', ['dn']) + except ldap.LDAPError as e: + log.error('Search failed on {}'.format(DEFAULT_SUFFIX)) + raise e + + # enable and verify attr + try: + dm_conn = dm.bind() + dm_conn.config.replace('nsslapd-close-on-failed-bind', 'on') + assert (dm_conn.config.get_attr_val_utf8('nsslapd-close-on-failed-bind')) == 'on' + except ldap.LDAPError as e: + log.error('Failed to replace nsslapd-close-on-failed-bind attr') + raise e + + # bind as non existing entry which triggers connection close + with pytest.raises(ldap.INVALID_CREDENTIALS): + inst.simple_bind_s(INVALID_ENTRY, PW_DM) + + # verify the connection is closed but the server is still running + assert (inst.status()) + with pytest.raises(ldap.SERVER_DOWN): + inst.search_s(DEFAULT_SUFFIX, ldap.SCOPE_SUBTREE, '(objectclass=top)', ['dn']) + try: + dm_conn = dm.bind() + except ldap.LDAPError as e: + log.error('DM bind failed') + raise e + + +def test_bind_disconnect_cert_map_failed(topology_st, _reset_attr): + """Test close connection on failed bind with a failed cert mapping + + :id: 0ac60f76-1fd9-4080-a82b-21807e6bc292 + :setup: Standalone Instance + :steps: + 1. enable TLS + 2. create/get a user + 3. get details of ssca key and cert + 4. create 2 user certificates, one good, one bad + 5. configure certmap + 6. check that EXTERNAL is listed in supported mechns. + 7. bind with good cert + 8. bind with bad cert + 9. enable nsslapd-close-on-failed-bind attr + 10. bind with bad cert + 11. verify connection has been closed and server is still running + 12. cleanup + :expectedresults: + 1. success + 2. success + 3. success + 4. success + 5. success + 6. success + 7. success + 8. generates INVALID_CREDENTIALS exception + 9. success + 10. generates INVALID_CREDENTIALS exception, triggering connection closure + 11. success + 12. success + """ + + RDN_TEST_USER = 'test_user_1000' + RDN_TEST_USER_WRONG = 'test_user_wrong' + inst = topology_st.standalone + + inst.enable_tls() + dm = DirectoryManager(inst) + + # create/get user + users = UserAccounts(inst, DEFAULT_SUFFIX) + try: + user = users.create_test_user() + user.set("userPassword", PW_DM) + except ldap.ALREADY_EXISTS: + user = users.get(RDN_TEST_USER) + + ssca_dir = inst.get_ssca_dir() + ssca = NssSsl(dbpath=ssca_dir) + + ssca.create_rsa_user(RDN_TEST_USER) + ssca.create_rsa_user(RDN_TEST_USER_WRONG) + + # Get the details of where the key and crt are. + tls_locs = ssca.get_rsa_user(RDN_TEST_USER) + tls_locs_wrong = ssca.get_rsa_user(RDN_TEST_USER_WRONG) + + user.enroll_certificate(tls_locs['crt_der_path']) + + # Turn on the certmap. + cm = CertmapLegacy(inst) + certmaps = cm.list() + certmaps['default']['DNComps'] = '' + certmaps['default']['FilterComps'] = ['cn'] + certmaps['default']['VerifyCert'] = 'off' + cm.set(certmaps) + + # Check that EXTERNAL is listed in supported mechns. + assert(inst.rootdse.supports_sasl_external()) + + # Restart to allow certmaps to be re-read: Note, we CAN NOT use post_open + # here, it breaks on auth. see lib389/__init__.py + inst.restart(post_open=False) + + # bind with good cert + try: + inst.open(saslmethod='EXTERNAL', connOnly=True, certdir=ssca_dir, userkey=tls_locs['key'], usercert=tls_locs['crt']) + except ldap.LDAPError as e: + log.error('Bind with good cert failed') + raise e + + inst.restart() + + # bind with bad cert + with pytest.raises(ldap.INVALID_CREDENTIALS): + inst.open(saslmethod='EXTERNAL', connOnly=True, certdir=ssca_dir, userkey=tls_locs_wrong['key'], usercert=tls_locs_wrong['crt']) + + # enable and verify attr + try: + dm_conn = dm.bind() + dm_conn.config.replace('nsslapd-close-on-failed-bind', 'on') + assert (dm_conn.config.get_attr_val_utf8('nsslapd-close-on-failed-bind')) == 'on' + except ldap.LDAPError as e: + log.error('Failed to replace nsslapd-close-on-failed-bind attr') + raise e + + # bind with bad cert + with pytest.raises(ldap.INVALID_CREDENTIALS): + inst.open(saslmethod='EXTERNAL', connOnly=True, certdir=ssca_dir, userkey=tls_locs_wrong['key'], usercert=tls_locs_wrong['crt']) + + # check the connection is closed but the server is still running + assert (inst.status()) + with pytest.raises(ldap.SERVER_DOWN): + inst.search_s(DEFAULT_SUFFIX, ldap.SCOPE_SUBTREE, '(objectclass=top)', ['dn']) + try: + dm_conn = dm.bind() + except ldap.LDAPError as e: + log.error('DM bind failed') + raise e + + +def test_bind_disconnect_account_lockout(topology_st, _reset_attr): + """Test close connection on failed bind with user account lockout + + :id: 12e56d79-ce57-4574-a80a-d3b6d1d74d8f + :setup: Standalone Instance + :steps: + 1. configure account lockout + 2. create/get a user + 3. bind and search as user + 4. force account lock out + 5. enable nsslapd-close-on-failed-bind attr + 6. attempt user bind + 7. verify connection has been closed and server is still running + 8. cleanup + :expectedresults: + 1. success + 2. success + 3. success + 4. generates CONSTRAINT_VIOLATION exception + 5. success + 6. generates CONSTRAINT_VIOLATION exception, triggering connection closure + 7. success + 8. success + """ + + inst = topology_st.standalone + dm = DirectoryManager(inst) + inst.config.set('passwordlockout', 'on') + inst.config.set('passwordMaxFailure', '2') + + # create/get user + users = UserAccounts(inst, DEFAULT_SUFFIX) + try: + user = users.create_test_user() + user.set("userPassword", PW_DM) + except ldap.ALREADY_EXISTS: + user = users.get('test_user_1000') + + # verify user bind and search + try: + inst.simple_bind_s(user.dn, PW_DM) + except ldap.LDAPError as e: + log.error('Failed to bind {}'.format(user.dn)) + raise e + try: + inst.search_s(DEFAULT_SUFFIX, ldap.SCOPE_SUBTREE, '(objectclass=top)', ['dn']) + except ldap.LDAPError as e: + log.error('Search failed on {}'.format(DEFAULT_SUFFIX)) + raise e + + # Force entry to get locked out + with pytest.raises(ldap.INVALID_CREDENTIALS): + inst.simple_bind_s(user.dn, 'whateverlike') + with pytest.raises(ldap.INVALID_CREDENTIALS): + inst.simple_bind_s(user.dn, 'whateverlike') + with pytest.raises(ldap.CONSTRAINT_VIOLATION): + # Should fail with good or bad password + inst.simple_bind_s(user.dn, PW_DM) + + # enable and verify attr + try: + dm_conn = dm.bind() + dm_conn.config.replace('nsslapd-close-on-failed-bind', 'on') + assert (dm_conn.config.get_attr_val_utf8('nsslapd-close-on-failed-bind')) == 'on' + except ldap.LDAPError as e: + log.error('Failed to replace nsslapd-close-on-failed-bind attr') + raise e + + # Should fail with good or bad password + with pytest.raises(ldap.CONSTRAINT_VIOLATION): + inst.simple_bind_s(user.dn, PW_DM) + + # check the connection is closed but the server is still running + assert (inst.status()) + with pytest.raises(ldap.SERVER_DOWN): + inst.search_s(DEFAULT_SUFFIX, ldap.SCOPE_SUBTREE, '(objectclass=top)', ['dn']) + try: + dm_conn = dm.bind() + except ldap.LDAPError as e: + log.error('DM bind failed') + raise e + + +def test_dscreate(request): + """Test that dscreate works + + :id: 5bf75c47-a283-430e-a65c-3c5fd8dbadb9 + :setup: None + :steps: + 1. Create template file for dscreate + 2. Create instance using template file + :expectedresults: + 1. Should succeeds + 2. Should succeeds + """ + + template_file = "/tmp/dssetup.inf" + template_text = """[general] +config_version = 2 +# This invalid hostname ... +full_machine_name = localhost.localdomain +# Means we absolutely require this. +strict_host_checking = False +# In tests, we can be run in containers, NEVER trust +# that systemd is there, or functional in any capacity +systemd = False + +[slapd] +instance_name = test_dscreate +root_dn = cn=directory manager +root_password = someLongPassword_123 +# We do not have access to high ports in containers, +# so default to something higher. +port = 38999 +secure_port = 63699 + + +[backend-userroot] +suffix = dc=example,dc=com +sample_entries = yes +""" + + with open(template_file, "w") as template_fd: + template_fd.write(template_text) + + # Unset PYTHONPATH to avoid mixing old CLI tools and new lib389 + tmp_env = os.environ + if "PYTHONPATH" in tmp_env: + del tmp_env["PYTHONPATH"] + try: + subprocess.check_call([ + 'dscreate', + 'from-file', + template_file + ], env=tmp_env) + except subprocess.CalledProcessError as e: + log.fatal("dscreate failed! Error ({}) {}".format(e.returncode, e.output)) + assert False + + def fin(): + os.remove(template_file) + try: + subprocess.check_call(['dsctl', 'test_dscreate', 'remove', '--do-it']) + except subprocess.CalledProcessError as e: + log.fatal("Failed to remove test instance Error ({}) {}".format(e.returncode, e.output)) + + request.addfinalizer(fin) + + +def test_dscreate_with_replication(request): + """Test dscreate works with replication shortcuts + + :id: 8391ffc4-5158-4141-9312-0f47ae56f1ed + :setup: Standalone Instance + :steps: + 1. Create instance and prepare DirSrv object + 2. Check replication is enabled + 3. Check repl role + 4. Check rid + 5. Check bind dn + 6. Changelog trimming settings + :expectedresults: + 1. Success + 2. Success + 3. Success + 4. Success + 5. Success + 6. Success + """ + template_file = "/tmp/dssetup.inf" + template_text = """[general] +config_version = 2 +# This invalid hostname ... +full_machine_name = localhost.localdomain +# Means we absolutely require this. +strict_host_checking = False +# In tests, we can be run in containers, NEVER trust +# that systemd is there, or functional in any capacity +systemd = False + +[slapd] +instance_name = dscreate_repl +root_dn = cn=directory manager +root_password = someLongPassword_123 +# We do not have access to high ports in containers, +# so default to something higher. +port = 38999 +secure_port = 63699 + +[backend-userroot] +suffix = dc=example,dc=com +sample_entries = yes +enable_replication = True +replica_binddn = cn=replication manager,cn=config +replica_bindpw = password +replica_id = 111 +replica_role = supplier +changelog_max_age = 8d +changelog_max_entries = 200000 +""" + + with open(template_file, "w") as template_fd: + template_fd.write(template_text) + + # Unset PYTHONPATH to avoid mixing old CLI tools and new lib389 + tmp_env = os.environ + if "PYTHONPATH" in tmp_env: + del tmp_env["PYTHONPATH"] + try: + subprocess.check_call([ + 'dscreate', + 'from-file', + template_file + ], env=tmp_env) + except subprocess.CalledProcessError as e: + log.fatal("dscreate failed! Error ({}) {}".format(e.returncode, e.output)) + assert False + + def fin(): + os.remove(template_file) + try: + subprocess.check_call(['dsctl', 'dscreate_repl', 'remove', '--do-it']) + except subprocess.CalledProcessError as e: + log.fatal("Failed to remove test instance Error ({}) {}".format(e.returncode, e.output)) + + request.addfinalizer(fin) + + # Prepare Dirsrv instance + from lib389 import DirSrv + container_result = subprocess.run(["systemd-detect-virt", "-c"], stdout=subprocess.PIPE) + if container_result.returncode == 0: + ds_instance = DirSrv(False, containerised=True) + else: + ds_instance = DirSrv(False) + args = { + SER_HOST: "localhost.localdomain", + SER_PORT: 38999, + SER_SECURE_PORT: 63699, + SER_SERVERID_PROP: 'dscreate_repl', + SER_ROOT_DN: 'cn=directory manager', + SER_ROOT_PW: 'someLongPassword_123', + SER_LDAPI_ENABLED: 'on', + SER_LDAPI_AUTOBIND: 'on' + } + ds_instance.allocate(args) + ds_instance.start(timeout=60) + + dse_ldif = DSEldif(ds_instance, serverid="dscreate_repl") + socket_path = dse_ldif.get("cn=config", "nsslapd-ldapifilepath") + ldapiuri=f"ldapi://{socket_path[0].replace('/', '%2f')}" + ds_instance.open(uri=ldapiuri) + + # Check replication is enabled + replicas = Replicas(ds_instance) + replica = replicas.get(DEFAULT_SUFFIX) + assert replica + + # Check role + assert replica.get_role() == ReplicaRole.SUPPLIER + + # Check rid + assert replica.get_rid() == '111' + + # Check bind dn is in config + assert replica.get_attr_val_utf8('nsDS5ReplicaBindDN') == 'cn=replication manager,cn=config' + + # Check repl manager entry was created + repl_mgr = UserAccount(ds_instance, 'cn=replication manager,cn=config') + assert repl_mgr.exists() + + # Changelog trimming settings + cl = Changelog(ds_instance, DEFAULT_SUFFIX) + assert cl.get_attr_val_utf8('nsslapd-changelogmaxage') == '8d' + assert cl.get_attr_val_utf8('nsslapd-changelogmaxentries') == '200000' + + +@pytest.fixture(scope="function") +def dscreate_long_instance(request): + template_file = "/tmp/dssetup.inf" + longname_serverid = "test-longname-deadbeef-deadbeef-deadbeef-deadbeef-deadbeef" + template_text = """[general] +config_version = 2 +# This invalid hostname ... +full_machine_name = localhost.localdomain +# Means we absolutely require this. +strict_host_checking = False +# In tests, we can be run in containers, NEVER trust +# that systemd is there, or functional in any capacity +systemd = False + +[slapd] +instance_name = %s +root_dn = cn=directory manager +root_password = someLongPassword_123 +# We do not have access to high ports in containers, +# so default to something higher. +port = 38999 +secure_port = 63699 + + +[backend-userroot] +suffix = dc=example,dc=com +sample_entries = yes +""" % longname_serverid + + with open(template_file, "w") as template_fd: + template_fd.write(template_text) + + # Unset PYTHONPATH to avoid mixing old CLI tools and new lib389 + tmp_env = os.environ + if "PYTHONPATH" in tmp_env: + del tmp_env["PYTHONPATH"] + try: + subprocess.check_call([ + 'dscreate', + 'from-file', + template_file + ], env=tmp_env) + except subprocess.CalledProcessError as e: + log.fatal("dscreate failed! Error ({}) {}".format(e.returncode, e.output)) + assert False + + inst = DirSrv(verbose=True, external_log=log) + dse_ldif = DSEldif(inst, serverid=longname_serverid) + + socket_path = dse_ldif.get("cn=config", "nsslapd-ldapifilepath") + inst.local_simple_allocate( + serverid=longname_serverid, + ldapuri=f"ldapi://{socket_path[0].replace('/', '%2f')}", + password="someLongPassword_123" + ) + inst.ldapi_enabled = 'on' + inst.ldapi_socket = socket_path + inst.ldapi_autobind = 'off' + try: + inst.open() + except: + log.fatal("Failed to connect via ldapi to %s instance" % longname_serverid) + os.remove(template_file) + try: + subprocess.check_call(['dsctl', longname_serverid, 'remove', '--do-it']) + except subprocess.CalledProcessError as e: + log.fatal("Failed to remove test instance Error ({}) {}".format(e.returncode, e.output)) + + def fin(): + os.remove(template_file) + try: + subprocess.check_call(['dsctl', longname_serverid, 'remove', '--do-it']) + except subprocess.CalledProcessError as e: + log.fatal("Failed to remove test instance Error ({}) {}".format(e.returncode, e.output)) + + request.addfinalizer(fin) + + return inst + + +@pytest.mark.skipif(not get_user_is_root() or ds_is_older('1.4.2.0'), + reason="This test is only required with new admin cli, and requires root.") +@pytest.mark.bz1748016 +@pytest.mark.ds50581 +def test_dscreate_ldapi(dscreate_long_instance): + """Test that an instance with a long name can + handle ldapi connection using a long socket name + + :id: 5d72d955-aff8-4741-8c9a-32c1c707cf1f + :setup: None + :steps: + 1. Ccreate an instance with a long serverId name, that open a ldapi connection + 2. Connect with ldapi, that hit 50581 and crash the instance + :expectedresults: + 1. Should succeeds + 2. Should succeeds + """ + + root_dse = RootDSE(dscreate_long_instance) + log.info(root_dse.get_supported_ctrls()) + + +@pytest.mark.skipif(not get_user_is_root() or ds_is_older('1.4.2.0'), + reason="This test is only required with new admin cli, and requires root.") +@pytest.mark.bz1715406 +@pytest.mark.ds50923 +def test_dscreate_multiple_dashes_name(dscreate_long_instance): + """Test that an instance with a multiple dashes in the name + can be removed with dsctl --remove-all + + :id: 265c3ac7-5ba6-4278-b8f4-4e7692afd1a5 + :setup: An instance with a few dashes in its name + :steps: + 1. Run 'dsctl --remove-all' command + 2. Check if the instance exists + :expectedresults: + 1. Should succeeds + 2. Instance doesn't exists + """ + + p = run(['dsctl', '--remove-all'], stdout=PIPE, input='Yes\n', encoding='ascii') + assert not dscreate_long_instance.exists() + + +@pytest.fixture(scope="module", params=('c=uk', 'cn=test_user', 'dc=example,dc=com', 'o=south', 'ou=sales', 'wrong=some_value')) +def dscreate_test_rdn_value(request): + template_file = "/tmp/dssetup.inf" + template_text = f"""[general] +config_version = 2 +# This invalid hostname ... +full_machine_name = localhost.localdomain +# Means we absolutely require this. +strict_host_checking = False +# In tests, we can be run in containers, NEVER trust +# that systemd is there, or functional in any capacity +systemd = False + +[slapd] +instance_name = test_different_rdn +root_dn = cn=directory manager +root_password = someLongPassword_123 +# We do not have access to high ports in containers, +# so default to something higher. +port = 38999 +secure_port = 63699 + +[backend-userroot] +create_suffix_entry = True +suffix = {request.param} +""" + + with open(template_file, "w") as template_fd: + template_fd.write(template_text) + + # Unset PYTHONPATH to avoid mixing old CLI tools and new lib389 + tmp_env = os.environ + if "PYTHONPATH" in tmp_env: + del tmp_env["PYTHONPATH"] + + def fin(): + os.remove(template_file) + if request.param != "wrong=some_value": + try: + subprocess.check_call(['dsctl', 'test_different_rdn', 'remove', '--do-it']) + except subprocess.CalledProcessError as e: + log.fatal(f"Failed to remove test instance Error ({e.returncode}) {e.output}") + else: + log.info("Wrong RDN is passed, instance not created") + request.addfinalizer(fin) + return template_file, tmp_env, request.param, + + +@pytest.mark.skipif(not get_user_is_root() or ds_is_older('1.4.0.0'), + reason="This test is only required with new admin cli, and requires root.") +@pytest.mark.bz1807419 +@pytest.mark.ds50928 +def test_dscreate_with_different_rdn(dscreate_test_rdn_value): + """Test that dscreate works with different RDN attributes as suffix + + :id: 77ed6300-6a2f-4e79-a862-1f1105f1e3ef + :customerscenario: True + :parametrized: yes + :setup: None + :steps: + 1. Create template file for dscreate with different RDN attributes as suffix + 2. Create instance using template file + 3. Create instance with 'wrong=some_value' as suffix's RDN attribute + :expectedresults: + 1. Should succeeds + 2. Should succeeds + 3. Should fail + """ + try: + subprocess.check_call([ + 'dscreate', + 'from-file', + dscreate_test_rdn_value[0] + ], env=dscreate_test_rdn_value[1]) + except subprocess.CalledProcessError as e: + log.fatal(f"dscreate failed! Error ({e.returncode}) {e.output}") + if dscreate_test_rdn_value[2] != "wrong=some_value": + assert False + else: + assert True + + +@pytest.fixture(scope="module") +def dscreate_custom_instance(request): + topo = CustomSetup('custom') + + def fin(): + topo.cleanup() + + request.addfinalizer(fin) + topo.create_instance() + # Return CustomSetup object associated with + # a stopped instance named "custom" + return topo + + obj.create_wrapper(maxfds=150) + log.info("Starting wrapper") + inst.start() + log.info("Server is started.") + log.info("Open connection") + inst.open() + + +@pytest.fixture(scope="module", params=set(range(1,5))) +def dscreate_with_numlistener(request, dscreate_custom_instance): + numlisteners = request.param + dscreate_custom_instance.create_wrapper(maxfds=MAX_FDS) + inst = dscreate_custom_instance.inst + inst.stop() + dse_ldif = DSEldif(inst) + dse_ldif.replace('cn=config', 'nsslapd-numlisteners', str(numlisteners)) + inst.start() + inst.open() + return inst + + +@pytest.mark.skipif(ds_is_older('2.2.0.0'), + reason="This test is only required with multiple listener support.") +def test_conn_limits(dscreate_with_numlistener): + """Check the connections limits for various number of listeners. + + :id: 7be2eb5c-4d8f-11ee-ae3d-482ae39447e5 + :parametrized: yes + :setup: Setup an instance then set nsslapd-numlisteners and maximum file descriptors + :steps: + 1. Loops on: + Open new connection and perform search until timeout expires + 2. Close one of the previously open connections + 3. Loops MAX_FDS times on: + - opening a new connection + - perform a search + - close the connection + 4. Close all open connections + 5. Remove the instance + :expectedresults: + 1. Should get a timeout (because the server has no more any connections) + 2. Should success + 3. Should success (otherwise issue #5924 has probably been hit) + 4. Should success + 5. Should success + """ + inst = dscreate_with_numlistener + + conns = [] + timeout_occured = False + for i in range(MAX_FDS): + try: + ldc = ldap.initialize(f'ldap://localhost:{inst.port}') + ldc.set_option(ldap.OPT_TIMEOUT, 5) + ldc.search_s(DEFAULT_SUFFIX, ldap.SCOPE_SUBTREE, "(uid=demo)") + conns.append(ldc) + except ldap.TIMEOUT: + timeout_occured = True + break + # Should not be able to open MAX_FDS connections (some file descriptor are + # reserved (for example for the listening socket ) + assert timeout_occured + + conn = random.choice(conns) + conn.unbind() + conns.remove(conn) + + # Should loop enough time so trigger issue #5924 if it is not fixed. + for i in range(MAX_FDS): + ldc = ldap.initialize(f'ldap://localhost:{inst.port}') + # Set a timeout long enough so that the test fails if server is unresponsive + ldc.set_option(ldap.OPT_TIMEOUT, 60) + ldc.search_s(DEFAULT_SUFFIX, ldap.SCOPE_SUBTREE, "(uid=demo)") + ldc.unbind() + + # Close all open connections + for c in conns: + c.unbind() + + # Step 6 is done in teardown phase by dscreate_instance finalizer + +if __name__ == '__main__': + # Run isolated + # -s for DEBUG mode + CURRENT_FILE = os.path.realpath(__file__) + pytest.main("-s %s" % CURRENT_FILE) diff --git a/dirsrvtests/tests/suites/basic/ds_entrydn_test.py b/dirsrvtests/tests/suites/basic/ds_entrydn_test.py new file mode 100644 index 0000000..1966e85 --- /dev/null +++ b/dirsrvtests/tests/suites/basic/ds_entrydn_test.py @@ -0,0 +1,97 @@ +# --- BEGIN COPYRIGHT BLOCK --- +# Copyright (C) 2022 Red Hat, Inc. +# All rights reserved. +# +# License: GPL (version 3 or any later version). +# See LICENSE for details. +# --- END COPYRIGHT BLOCK --- + +import logging +import pytest +import os +import time +from lib389.topologies import topology_st as topo +from lib389.idm.user import UserAccount, UserAccounts +from lib389.idm.organizationalunit import OrganizationalUnit + +log = logging.getLogger(__name__) + +SUFFIX = "dc=Example,DC=COM" +SUBTREE = "ou=People,dc=Example,DC=COM" +NEW_SUBTREE = "ou=humans,dc=Example,DC=COM" +USER_DN = "uid=tUser,ou=People,dc=Example,DC=COM" +NEW_USER_DN = "uid=tUser,ou=humans,dc=Example,DC=COM" +NEW_USER_NORM_DN = "uid=tUser,ou=humans,dc=example,dc=com" + + +def test_dsentrydn(topo): + """Test that the dsentrydn attribute is properly maintained and preserves the case of the DN + + :id: f0f2fe6b-c70d-4de1-a9a9-06dda74e7c30 + :setup: Standalone Instance + :steps: + 1. Create user and make sure dsentrydn is set to the correct value/case + 2. Moddn of "ou=people" and check dsentrydn is correct for parent and the children + 3. Check the DN matches dsEntryDN + 4. Disable nsslapd-return-original-entrydn + 5. Check the DN matches normalized DN + :expectedresults: + 1. Success + 2. Success + 3. Success + 4. Success + 5. Success + """ + + inst = topo.standalone + inst.config.replace('nsslapd-return-original-entrydn', 'on') + + # Create user and makes sure "dsEntryDN" is set correctly + users = UserAccounts(inst, SUFFIX) + user_properties = { + 'uid': 'tUser', + 'givenname': 'test', + 'cn': 'Test User', + 'sn': 'user', + 'userpassword': 'password', + 'uidNumber': '1000', + 'gidNumber': '2000', + 'homeDirectory': '/home/tUser' + } + user = users.create(properties=user_properties) + assert user.get_attr_val_utf8('dsentrydn') == USER_DN + + # Move subtree ou=people to ou=humans + ou = OrganizationalUnit(inst, SUBTREE) + ou.rename("ou=humans", SUFFIX) # NEW_SUBTREE + + # check dsEntryDN is properly updated to new subtree + ou = OrganizationalUnit(inst, NEW_SUBTREE) + assert ou.get_attr_val_utf8('dsentrydn') == NEW_SUBTREE + + user = UserAccount(inst, NEW_USER_DN) + assert user.get_attr_val_utf8('dsentrydn') == NEW_USER_DN + + # Check DN returned to client matches "dsEntryDN" + users = UserAccounts(inst, SUFFIX, rdn="ou=humans").list() + for user in users: + if user.rdn.startswith("tUser"): + assert user.dn == NEW_USER_DN + break + + # Disable 'nsslapd-return-original-entrydn' and check DN is normalized + inst.config.replace('nsslapd-return-original-entrydn', 'off') + inst.restart() + users = UserAccounts(inst, SUFFIX, rdn="ou=humans").list() + for user in users: + if user.rdn.startswith("tUser"): + assert user.dn == NEW_USER_NORM_DN + break + + +if __name__ == '__main__': + # Run isolated + # -s for DEBUG mode + CURRENT_FILE = os.path.realpath(__file__) + pytest.main(["-s", CURRENT_FILE]) + diff --git a/dirsrvtests/tests/suites/basic/haproxy_test.py b/dirsrvtests/tests/suites/basic/haproxy_test.py new file mode 100644 index 0000000..08db0b5 --- /dev/null +++ b/dirsrvtests/tests/suites/basic/haproxy_test.py @@ -0,0 +1,96 @@ +# --- BEGIN COPYRIGHT BLOCK --- +# Copyright (C) 2023 Red Hat, Inc. +# All rights reserved. +# +# License: GPL (version 3 or any later version). +# See LICENSE for details. +# --- END COPYRIGHT BLOCK --- + +import ldap +import logging +import pytest +from lib389._constants import DEFAULT_SUFFIX, PASSWORD +from lib389.topologies import topology_st as topo +from lib389.idm.user import UserAccount, UserAccounts +from lib389.idm.account import Anonymous + +log = logging.getLogger(__name__) +DN = "uid=common,ou=people," + DEFAULT_SUFFIX +HOME_DIR = '/home/common' + +@pytest.fixture(scope="function") +def setup_test(topo, request): + """Setup test environment""" + log.info("Add nsslapd-haproxy-trusted-ip attribute") + topo.standalone.config.set('nsslapd-haproxy-trusted-ip', '192.168.0.1') + assert topo.standalone.config.present('nsslapd-haproxy-trusted-ip', '192.168.0.1') + + log.info("Add a user") + users = UserAccounts(topo.standalone, DEFAULT_SUFFIX) + try: + users.create(properties={ + 'uid': 'common', + 'cn': 'common', + 'sn': 'common', + 'uidNumber': '3000', + 'gidNumber': '4000', + 'homeDirectory': HOME_DIR, + 'description': 'test haproxy with this user', + 'userPassword': PASSWORD + }) + except ldap.ALREADY_EXISTS: + log.info("User already exists") + pass + + +def test_haproxy_trust_ip_attribute(topo, setup_test): + """Test nsslapd-haproxy-trusted-ip attribute set and delete + + :id: 8a0789a6-3ede-40e2-966c-9a2c87eaac05 + :setup: Standalone instance with nsslapd-haproxy-trusted-ip attribute and a user + :steps: + 1. Check that nsslapd-haproxy-trusted-ip attribute is present + 2. Delete nsslapd-haproxy-trusted-ip attribute + 3. Check that nsslapd-haproxy-trusted-ip attribute is not present + :expectedresults: + 1. Success + 2. Success + 3. Success + """ + + log.info("Check that nsslapd-haproxy-trusted-ip attribute is present") + assert topo.standalone.config.present('nsslapd-haproxy-trusted-ip', '192.168.0.1') + + log.info("Delete nsslapd-haproxy-trusted-ip attribute") + topo.standalone.config.remove_all('nsslapd-haproxy-trusted-ip') + + log.info("Check that nsslapd-haproxy-trusted-ip attribute is not present") + assert not topo.standalone.config.present('nsslapd-haproxy-trusted-ip', '192.168.0.1') + + +def test_binds_with_haproxy_trust_ip_attribute(topo, setup_test): + """Test that non-proxy binds are not blocked when nsslapd-haproxy-trusted-ip attribute is set + + :id: 14273c16-fed9-497e-8ebb-09e3dabc7914 + :setup: Standalone instance with nsslapd-haproxy-trusted-ip attribute and a user + :steps: + 1. Try to bind as anonymous user + 2. Try to bind as a user + 3. Check that userPassword is correct and we can get it + :expectedresults: + 1. Success + 2. Success + 3. Success + """ + + log.info("Bind as anonymous user") + Anonymous(topo.standalone).bind() + + log.info("Bind as a user") + user_entry = UserAccount(topo.standalone, DN) + user_conn = user_entry.bind(PASSWORD) + + log.info("Check that userPassword is correct and we can get it") + user_entry = UserAccount(user_conn, DN) + home = user_entry.get_attr_val_utf8('homeDirectory') + assert home == HOME_DIR diff --git a/dirsrvtests/tests/suites/basic/vlv.py b/dirsrvtests/tests/suites/basic/vlv.py new file mode 100644 index 0000000..40267da --- /dev/null +++ b/dirsrvtests/tests/suites/basic/vlv.py @@ -0,0 +1,148 @@ +# --- BEGIN COPYRIGHT BLOCK --- +# Copyright (C) 2021 Red Hat, Inc. +# All rights reserved. +# +# License: GPL (version 3 or any later version). +# See LICENSE for details. +# --- END COPYRIGHT BLOCK --- +# +import pytest, time +from lib389.tasks import * +from lib389.utils import * +from lib389.topologies import topology_st +from lib389.replica import * +from lib389._constants import * +from lib389.index import * +from lib389.mappingTree import * +from lib389.backend import * +from lib389.idm.user import UserAccount, UserAccounts +import ldap +from ldap.controls.vlv import VLVRequestControl +from ldap.controls.sss import SSSRequestControl + + +pytestmark = pytest.mark.tier1 + +logging.getLogger(__name__).setLevel(logging.DEBUG) +log = logging.getLogger(__name__) + + +def open_new_ldapi_conn(dsinstance): + ldapurl, certdir = get_ldapurl_from_serverid(dsinstance) + # Only ldapi is handled in this functon + assert 'ldapi://' in ldapurl + conn = ldap.initialize(ldapurl) + # Send SASL bind request for mechanism EXTERNAL + conn.sasl_interactive_bind_s("", ldap.sasl.external()) + return conn + + +def check_vlv_search(conn): + before_count=1 + after_count=3 + offset=3501 + + vlv_control = VLVRequestControl(criticality=True, + before_count=before_count, + after_count=after_count, + offset=offset, + content_count=0, + greater_than_or_equal=None, + context_id=None) + + sss_control = SSSRequestControl(criticality=True, ordering_rules=['cn']) + r = conn.search_ext_s(base='dc=example,dc=com', scope=ldap.SCOPE_SUBTREE, filterstr='(uid=*)', serverctrls=[vlv_control, sss_control]) + imin=offset+999-before_count + if imin < 1000: + imin = 1000 + imax=offset+999+after_count + i=imin + for dn,entry in r: + assert i <= imax + expected_dn = f'uid=testuser{i},ou=People,dc=example,dc=com' + print(f'found {repr(dn)} expected {expected_dn}') + assert dn.lower() == expected_dn.lower() + i=i+1 + + + +def add_users(topology_st, users_num): + users = UserAccounts(topology_st, DEFAULT_SUFFIX) + log.info('Adding %d users' % users_num) + for i in range(0, users_num): + uid = 1000 + i + users.create(properties={ + 'uid': 'testuser%d' % uid, + 'cn': 'testuser%d' % uid, + 'sn': 'user', + 'uidNumber': '%d' % uid, + 'gidNumber': '%d' % uid, + 'homeDirectory': '/home/testuser%d' % uid + }) + + +@pytest.mark.DS47966 +def test_vlv(topology_st): + """ + Testing bulk import when the backend with VLV was recreated. + If the test passes without the server crash, 47966 is verified. + + :id: 512963fa-fe02-11e8-b1d3-8c16451d917b + :setup: Replication with two suppliers. + :steps: + 1. Generate vlvSearch entry + 2. Generate vlvIndex entry + 3. Add 5K users + 4. Search users + 5. test a vlv search result + :expectedresults: + 1. Should Success. + 2. Should Success. + 3. Should Success. + 4. Should Success. + 5. Should Success. + """ + inst = topology_st.standalone + + # generate vlvSearch entry + properties_for_search = { + "objectclass": ["top", "vlvSearch"], + "cn": "vlvSrch", + "vlvbase": DEFAULT_SUFFIX, + "vlvfilter": "(uid=*)", + "vlvscope": "2", + } + vlv_searches = VLVSearch(inst) + userroot_vlvsearch = vlv_searches.create( + basedn="cn=userRoot,cn=ldbm database,cn=plugins,cn=config", + properties=properties_for_search, + ) + assert "cn=vlvSrch,cn=userRoot,cn=ldbm database,cn=plugins,cn=config" in inst.getEntry( + "cn=vlvSrch,cn=userRoot,cn=ldbm database,cn=plugins,cn=config").dn + # generate vlvIndex entry + properties_for_index = { + "objectclass": ["top", "vlvIndex"], + "cn": "vlvIdx", + "vlvsort": "cn", + } + vlv_index = VLVIndex(inst) + userroot_index = vlv_index.create( + basedn="cn=vlvSrch,cn=userRoot,cn=ldbm database,cn=plugins,cn=config", + properties=properties_for_index, + ) + assert "cn=vlvIdx,cn=vlvSrch,cn=userRoot,cn=ldbm database,cn=plugins,cn=config" in inst.getEntry( + "cn=vlvIdx,cn=vlvSrch,cn=userRoot,cn=ldbm database,cn=plugins,cn=config").dn + + # opening a new LDAPSimpleObject connection avoid the warning we got when using directly inst + conn = open_new_ldapi_conn(inst.serverid) + add_users(inst, 5000); + entries = conn.search_s(DEFAULT_SUFFIX, ldap.SCOPE_SUBTREE, "(cn=*)") + assert len(entries) > 0 + check_vlv_search(conn) + + +if __name__ == "__main__": + # Run isolated + # -s for DEBUG mode + CURRENT_FILE = os.path.realpath(__file__) + pytest.main("-s %s" % CURRENT_FILE) diff --git a/dirsrvtests/tests/suites/betxns/__init__.py b/dirsrvtests/tests/suites/betxns/__init__.py new file mode 100644 index 0000000..1c260e0 --- /dev/null +++ b/dirsrvtests/tests/suites/betxns/__init__.py @@ -0,0 +1,3 @@ +""" + :Requirement: 389-ds-base: betxn Plugin +""" diff --git a/dirsrvtests/tests/suites/betxns/betxn_test.py b/dirsrvtests/tests/suites/betxns/betxn_test.py new file mode 100644 index 0000000..b5ca010 --- /dev/null +++ b/dirsrvtests/tests/suites/betxns/betxn_test.py @@ -0,0 +1,364 @@ +# --- BEGIN COPYRIGHT BLOCK --- +# Copyright (C) 2016 Red Hat, Inc. +# All rights reserved. +# +# License: GPL (version 3 or any later version). +# See LICENSE for details. +# --- END COPYRIGHT BLOCK --- +# +import pytest +import ldap +from lib389.tasks import * +from lib389.utils import * +from lib389.topologies import topology_st +from lib389.plugins import (SevenBitCheckPlugin, AttributeUniquenessPlugin, + MemberOfPlugin, ManagedEntriesPlugin, + ReferentialIntegrityPlugin, MEPTemplates, + MEPConfigs) +from lib389.idm.user import UserAccounts, TEST_USER_PROPERTIES +from lib389.idm.organizationalunit import OrganizationalUnits +from lib389.idm.group import Groups, Group +from lib389.idm.domain import Domain +from lib389._constants import DEFAULT_SUFFIX + +pytestmark = pytest.mark.tier1 + +logging.getLogger(__name__).setLevel(logging.DEBUG) +log = logging.getLogger(__name__) +USER_PASSWORD = 'password' + + +def test_betxt_7bit(topology_st): + """Test that the 7-bit plugin correctly rejects an invalid update + + :id: 9e2ab27b-eda9-4cd9-9968-a1a8513210fd + + :setup: Standalone instance and enabled dynamic plugins + + :steps: 1. Enable PLUGIN_7_BIT_CHECK to "ON" + 2. Add test user + 3. Try to Modify test user's RDN to have 8 bit RDN + 4. Execute search operation for new 8 bit RDN + 5. Remove the test user for cleanup + + :expectedresults: + 1. PLUGIN_7_BIT_CHECK should be ON + 2. Test users should be added + 3. Modify RDN for test user should FAIL + 4. Search operation should FAIL + 5. Test user should be removed + """ + + log.info('Running test_betxt_7bit...') + + BAD_RDN = u'uid=Fu\u00c4\u00e8' + + sevenbc = SevenBitCheckPlugin(topology_st.standalone) + sevenbc.enable() + topology_st.standalone.restart() + + users = UserAccounts(topology_st.standalone, basedn=DEFAULT_SUFFIX) + user = users.create(properties=TEST_USER_PROPERTIES) + + # Attempt a modrdn, this should fail + with pytest.raises(ldap.LDAPError): + user.rename(BAD_RDN) + + # Make sure the operation did not succeed, attempt to search for the new RDN + with pytest.raises(ldap.LDAPError): + users.get(u'Fu\u00c4\u00e8') + + # Make sure original entry is present + user_check = users.get("testuser") + assert user_check.dn.lower() == user.dn.lower() + + # Cleanup - remove the user + user.delete() + + log.info('test_betxt_7bit: PASSED') + + +def test_betxn_attr_uniqueness(topology_st): + """Test that we can not add two entries that have the same attr value that is + defined by the plugin + + :id: 42aeb41c-fbb5-4bc6-a97b-56274034d29f + + :setup: Standalone instance and enabled dynamic plugins + + :steps: 1. Enable PLUGIN_ATTR_UNIQUENESS plugin as "ON" + 2. Add a test user + 3. Add another test user having duplicate uid as previous one + 4. Cleanup - disable PLUGIN_ATTR_UNIQUENESS plugin as "OFF" + 5. Cleanup - remove test user entry + + :expectedresults: + 1. PLUGIN_ATTR_UNIQUENESS plugin should be ON + 2. Test user should be added + 3. Add operation should FAIL + 4. PLUGIN_ATTR_UNIQUENESS plugin should be "OFF" + 5. Test user entry should be removed + """ + + attruniq = AttributeUniquenessPlugin(topology_st.standalone, dn="cn=attruniq,cn=plugins,cn=config") + attruniq.create(properties={'cn': 'attruniq'}) + attruniq.add_unique_attribute('uid') + attruniq.add_unique_subtree(DEFAULT_SUFFIX) + attruniq.enable_all_subtrees() + attruniq.enable() + topology_st.standalone.restart() + + users = UserAccounts(topology_st.standalone, basedn=DEFAULT_SUFFIX) + user1 = users.create(properties={ + 'uid': 'testuser1', + 'cn': 'testuser1', + 'sn': 'user1', + 'uidNumber': '1001', + 'gidNumber': '2001', + 'homeDirectory': '/home/testuser1' + }) + + with pytest.raises(ldap.LDAPError): + users.create(properties={ + 'uid': ['testuser2', 'testuser1'], + 'cn': 'testuser2', + 'sn': 'user2', + 'uidNumber': '1002', + 'gidNumber': '2002', + 'homeDirectory': '/home/testuser2' + }) + + user1.delete() + + log.info('test_betxn_attr_uniqueness: PASSED') + + +def test_betxn_memberof(topology_st): + """Test PLUGIN_MEMBER_OF plugin + + :id: 70d0b96e-b693-4bf7-bbf5-102a66ac5993 + + :setup: Standalone instance and enabled dynamic plugins + + :steps: 1. Enable and configure memberOf plugin + 2. Set memberofgroupattr="member" and memberofAutoAddOC="referral" + 3. Add two test groups - group1 and group2 + 4. Add group2 to group1 + 5. Add group1 to group2 + + :expectedresults: + 1. memberOf plugin plugin should be ON + 2. Set memberofgroupattr="member" and memberofAutoAddOC="referral" should PASS + 3. Add operation should PASS + 4. Add operation should FAIL + 5. Add operation should FAIL + """ + + memberof = MemberOfPlugin(topology_st.standalone) + memberof.enable() + memberof.set_autoaddoc('referral') + topology_st.standalone.restart() + + groups = Groups(topology_st.standalone, DEFAULT_SUFFIX) + group1 = groups.create(properties={'cn': 'group1'}) + group2 = groups.create(properties={'cn': 'group2'}) + + # We may need to mod groups to not have nsMemberOf ... ? + if not ds_is_older('1.3.7'): + group1.remove('objectClass', 'nsMemberOf') + group2.remove('objectClass', 'nsMemberOf') + + # Add group2 to group1 - it should fail with objectclass violation + with pytest.raises(ldap.OBJECT_CLASS_VIOLATION): + group1.add_member(group2.dn) + + # verify entry cache reflects the current/correct state of group1 + assert not group1.is_member(group2.dn) + + # Done + log.info('test_betxn_memberof: PASSED') + + +def test_betxn_modrdn_memberof_cache_corruption(topology_st): + """Test modrdn operations and memberOf be txn post op failures + + :id: 70d0b96e-b693-4bf7-bbf5-102a66ac5994 + + :setup: Standalone instance + + :steps: 1. Enable and configure memberOf plugin + 2. Set memberofgroupattr="member" and memberofAutoAddOC="nsContainer" + 3. Create group and user outside of memberOf plugin scope + 4. Do modrdn to move group into scope + 5. Do modrdn to move group into scope (again) + + :expectedresults: + 1. memberOf plugin plugin should be ON + 2. Set memberofgroupattr="member" and memberofAutoAddOC="nsContainer" should PASS + 3. Creating group and user should PASS + 4. Modrdn should fail with objectclass violation + 5. Second modrdn should also fail with objectclass violation + """ + + peoplebase = 'ou=people,%s' % DEFAULT_SUFFIX + memberof = MemberOfPlugin(topology_st.standalone) + memberof.enable() + memberof.set_autoaddoc('nsContainer') # Bad OC + memberof.set('memberOfEntryScope', peoplebase) + memberof.set('memberOfAllBackends', 'on') + topology_st.standalone.restart() + + groups = Groups(topology_st.standalone, DEFAULT_SUFFIX) + group = groups.create(properties={ + 'cn': 'group', + }) + + # Create user and add it to group + users = UserAccounts(topology_st.standalone, basedn=DEFAULT_SUFFIX) + user = users.ensure_state(properties=TEST_USER_PROPERTIES) + if not ds_is_older('1.3.7'): + user.remove('objectClass', 'nsMemberOf') + + group.add_member(user.dn) + + # Attempt modrdn that should fail, but the original entry should stay in the cache + with pytest.raises(ldap.OBJECT_CLASS_VIOLATION): + group.rename('cn=group_to_people', newsuperior=peoplebase) + + # Should fail, but not with NO_SUCH_OBJECT as the original entry should still be in the cache + with pytest.raises(ldap.OBJECT_CLASS_VIOLATION): + group.rename('cn=group_to_people', newsuperior=peoplebase) + + # Done + log.info('test_betxn_modrdn_memberof: PASSED') + + +def test_ri_and_mep_cache_corruption(topology_st): + """Test RI plugin aborts change after MEP plugin fails. + This is really testing the entry cache for corruption + + :id: 70d0b96e-b693-4bf7-bbf5-102a66ac5995 + + :setup: Standalone instance + + :steps: 1. Enable and configure mep and ri plugins + 2. Add user and add it to a group + 3. Disable MEP plugin and remove MEP group + 4. Delete user + 5. Check that user is still a member of the group + + :expectedresults: + 1. Success + 2. Success + 3. Success + 4. It fails with NO_SUCH_OBJECT + 5. Success + + """ + # Add ACI so we can test that non-DM user can't delete managed entry + domain = Domain(topology_st.standalone, DEFAULT_SUFFIX) + ACI_TARGET = f"(target = \"ldap:///{DEFAULT_SUFFIX}\")" + ACI_TARGETATTR = "(targetattr = *)" + ACI_ALLOW = "(version 3.0; acl \"Admin Access\"; allow (all) " + ACI_SUBJECT = "(userdn = \"ldap:///anyone\");)" + ACI_BODY = ACI_TARGET + ACI_TARGETATTR + ACI_ALLOW + ACI_SUBJECT + domain.add('aci', ACI_BODY) + + # Start plugins + topology_st.standalone.config.set('nsslapd-dynamic-plugins', 'on') + mep_plugin = ManagedEntriesPlugin(topology_st.standalone) + mep_plugin.enable() + ri_plugin = ReferentialIntegrityPlugin(topology_st.standalone) + ri_plugin.enable() + + # Add our org units + ous = OrganizationalUnits(topology_st.standalone, DEFAULT_SUFFIX) + ou_people = ous.create(properties={'ou': 'managed_people'}) + ou_groups = ous.create(properties={'ou': 'managed_groups'}) + + # Configure MEP + mep_templates = MEPTemplates(topology_st.standalone, DEFAULT_SUFFIX) + mep_template1 = mep_templates.create(properties={ + 'cn': 'MEP template', + 'mepRDNAttr': 'cn', + 'mepStaticAttr': 'objectclass: groupOfNames|objectclass: extensibleObject'.split('|'), + 'mepMappedAttr': 'cn: $cn|uid: $cn|gidNumber: $uidNumber'.split('|') + }) + mep_configs = MEPConfigs(topology_st.standalone) + mep_configs.create(properties={'cn': 'config', + 'originScope': ou_people.dn, + 'originFilter': 'objectclass=posixAccount', + 'managedBase': ou_groups.dn, + 'managedTemplate': mep_template1.dn}) + + # Add an entry that meets the MEP scope + users = UserAccounts(topology_st.standalone, DEFAULT_SUFFIX, + rdn='ou={}'.format(ou_people.rdn)) + user = users.create(properties={ + 'uid': 'test-user1', + 'cn': 'test-user', + 'sn': 'test-user', + 'uidNumber': '10011', + 'gidNumber': '20011', + 'homeDirectory': '/home/test-user1' + }) + user.reset_password(USER_PASSWORD) + user_bound_conn = user.bind(USER_PASSWORD) + + # Add group + groups = Groups(topology_st.standalone, DEFAULT_SUFFIX) + user_group = groups.ensure_state(properties={'cn': 'group', 'member': user.dn}) + + # Check if a managed group entry was created + mep_group = Group(topology_st.standalone, dn='cn={},{}'.format(user.rdn, ou_groups.dn)) + if not mep_group.exists(): + log.fatal("MEP group was not created for the user") + assert False + + # Test MEP be txn pre op failure does not corrupt entry cache + # Should get the same exception for both rename attempts + # Try to remove the entry while bound as Admin (non-DM) + managed_groups_user_conn = Groups(user_bound_conn, ou_groups.dn, rdn=None) + managed_entry_user_conn = managed_groups_user_conn.get(user.rdn) + with pytest.raises(ldap.UNWILLING_TO_PERFORM): + managed_entry_user_conn.rename("cn=modrdn group") + with pytest.raises(ldap.UNWILLING_TO_PERFORM): + managed_entry_user_conn.rename("cn=modrdn group") + + # Mess with MEP so it fails + mep_plugin.disable() + users_mep_group = UserAccounts(topology_st.standalone, mep_group.dn, rdn=None) + users_mep_group.create_test_user(1001) + mep_plugin.enable() + + # Add another group to verify entry cache is not corrupted + test_group = groups.create(properties={'cn': 'test_group'}) + + # Try to delete user - it fails because managed entry can't be deleted + with pytest.raises(ldap.NOT_ALLOWED_ON_NONLEAF): + user.delete() + + # Verify membership is intact + if not user_group.is_member(user.dn): + log.fatal("Member was incorrectly removed from the group!! Or so it seems") + + # Restart server and test again in case this was a cache issue + topology_st.standalone.restart() + if user_group.is_member(user.dn): + log.info("The entry cache was corrupted") + assert False + + assert False + + # Verify test group is still found in entry cache by deleting it + test_group.delete() + + # Success + log.info("Test PASSED") + + +if __name__ == '__main__': + # Run isolated + # -s for DEBUG mode + CURRENT_FILE = os.path.realpath(__file__) + pytest.main("-s %s" % CURRENT_FILE) diff --git a/dirsrvtests/tests/suites/chaining_plugin/__init__.py b/dirsrvtests/tests/suites/chaining_plugin/__init__.py new file mode 100644 index 0000000..cb50c7e --- /dev/null +++ b/dirsrvtests/tests/suites/chaining_plugin/__init__.py @@ -0,0 +1,3 @@ +""" + :Requirement: 389-ds-base: Chaining Plugin +""" diff --git a/dirsrvtests/tests/suites/chaining_plugin/anonymous_access_denied_basic.py b/dirsrvtests/tests/suites/chaining_plugin/anonymous_access_denied_basic.py new file mode 100644 index 0000000..1d03340 --- /dev/null +++ b/dirsrvtests/tests/suites/chaining_plugin/anonymous_access_denied_basic.py @@ -0,0 +1,149 @@ +# --- BEGIN COPYRIGHT BLOCK --- +# Copyright (C) 2021 William Brown +# All rights reserved. +# +# License: GPL (version 3 or any later version). +# See LICENSE for details. +# --- END COPYRIGHT BLOCK --- + +import ldap +import pytest +import time +import shutil +from lib389.idm.account import Accounts, Account +from lib389.topologies import topology_i2 as topology +from lib389.backend import Backends +from lib389._constants import DEFAULT_SUFFIX +from lib389.plugins import ChainingBackendPlugin +from lib389.chaining import ChainingLinks +from lib389.mappingTree import MappingTrees +from lib389.idm.services import ServiceAccounts, ServiceAccount +from lib389.idm.domain import Domain + +PW = 'thnaoehtnuaoenhtuaoehtnu' + +pytestmark = pytest.mark.tier1 + +def test_chaining_paged_search(topology): + """ Check that when the chaining target has anonymous access + disabled that the ping still functions and allows the search + to continue with an appropriate bind user. + + :id: 00bf31db-d93b-4224-8e70-86abb2d4cd17 + :setup: Two standalones in chaining. + :steps: + 1. Configure chaining between the nodes + 2. Do a chaining search (w anon allow) to assert it works + 3. Configure anon dis allowed on st2 + 4. Restart both + 5. Check search still works + + :expectedresults: + 1. Success + 2. Success + 3. Success + 4. Success + 5. Success + """ + st1 = topology.ins["standalone1"] + st2 = topology.ins["standalone2"] + + ### We setup so that st1 -> st2 + + # Setup a chaining user on st2 to authenticate to. + sa = ServiceAccounts(st2, DEFAULT_SUFFIX).create(properties = { + 'cn': 'sa', + 'userPassword': PW + }) + + # Add a proxy user. + sproxy = ServiceAccounts(st2, DEFAULT_SUFFIX).create(properties = { + 'cn': 'proxy', + 'userPassword': PW + }) + + # Add the read and proxy ACI + dc = Domain(st2, DEFAULT_SUFFIX) + dc.add('aci', + f"""(targetattr="objectClass || cn || uid")(version 3.0; acl "Enable sa read"; allow (read, search, compare)(userdn="ldap:///{sa.dn}");)""" + ) + # Add the proxy ACI + dc.add('aci', + f"""(targetattr="*")(version 3.0; acl "Enable proxy access"; allow (proxy)(userdn="ldap:///{sproxy.dn}");)""" + ) + + # Clear all the BE in st1 + bes1 = Backends(st1) + for be in bes1.list(): + be.delete() + + # Setup st1 to chain to st2 + chain_plugin_1 = ChainingBackendPlugin(st1) + chain_plugin_1.enable() + + # Chain with the proxy user. + chains = ChainingLinks(st1) + chain = chains.create(properties={ + 'cn': 'demochain', + 'nsfarmserverurl': st2.toLDAPURL(), + 'nsslapd-suffix': DEFAULT_SUFFIX, + 'nsmultiplexorbinddn': sproxy.dn, + 'nsmultiplexorcredentials': PW, + 'nsCheckLocalACI': 'on', + 'nsConnectionLife': '30', + }) + + mts = MappingTrees(st1) + # Due to a bug in lib389, we need to delete and recreate the mt. + for mt in mts.list(): + mt.delete() + mts.ensure_state(properties={ + 'cn': DEFAULT_SUFFIX, + 'nsslapd-state': 'backend', + 'nsslapd-backend': 'demochain', + 'nsslapd-distribution-plugin': 'libreplication-plugin', + 'nsslapd-distribution-funct': 'repl_chain_on_update', + }) + + # Enable pwpolicy (Not sure if part of the issue). + st1.config.set('passwordIsGlobalPolicy', 'on') + st2.config.set('passwordIsGlobalPolicy', 'on') + + # Restart to enable everything. + st1.restart() + + # Get a proxy auth connection. + sa1 = ServiceAccount(st1, sa.dn) + sa1_conn = sa1.bind(password=PW) + + # Now do a search from st1 -> st2 + sa1_dc = Domain(sa1_conn, DEFAULT_SUFFIX) + assert sa1_dc.exists() + + # Now on st2 disable anonymous access. + st2.config.set('nsslapd-allow-anonymous-access', 'rootdse') + + # Stop st2 to force the connection to be dead. + st2.stop() + # Restart st1 - this means it must re-do the ping/keepalive. + st1.restart() + + # do a bind - this should fail, and forces the conn offline. + with pytest.raises(ldap.OPERATIONS_ERROR): + sa1.bind(password=PW) + + # Allow time to attach lldb if needed. + # print("🔥🔥🔥") + # time.sleep(45) + + # Bring st2 online. + st2.start() + + # Wait a bit + time.sleep(5) + + # Get a proxy auth connection (again) + sa1_conn = sa1.bind(password=PW) + # Now do a search from st1 -> st2 + sa1_dc = Domain(sa1_conn, DEFAULT_SUFFIX) + assert sa1_dc.exists() diff --git a/dirsrvtests/tests/suites/chaining_plugin/paged_search_test.py b/dirsrvtests/tests/suites/chaining_plugin/paged_search_test.py new file mode 100644 index 0000000..108f9b7 --- /dev/null +++ b/dirsrvtests/tests/suites/chaining_plugin/paged_search_test.py @@ -0,0 +1,91 @@ +# --- BEGIN COPYRIGHT BLOCK --- +# Copyright (C) 2020 William Brown +# All rights reserved. +# +# License: GPL (version 3 or any later version). +# See LICENSE for details. +# --- END COPYRIGHT BLOCK --- + +import ldap +import pytest +import time +import shutil +from lib389.idm.account import Accounts, Account +from lib389.topologies import topology_i2 as topology +from lib389.backend import Backends +from lib389._constants import DEFAULT_SUFFIX +from lib389.plugins import ChainingBackendPlugin +from lib389.chaining import ChainingLinks +from lib389.mappingTree import MappingTrees + +pytestmark = pytest.mark.tier1 + +def test_chaining_paged_search(topology): + """ Test paged search through the chaining db. This + would cause a SIGSEGV with paged search which could + be triggered by SSSD. + + :id: 7b29b1f5-26cf-49fa-9fe7-ee29a1408633 + :setup: Two standalones in chaining. + :steps: + 1. Configure chaining between the nodes + 2. Do a chaining search (no page) to assert it works + 3. Do a paged search through chaining. + + :expectedresults: + 1. Success + 2. Success + 3. Success + """ + st1 = topology.ins["standalone1"] + st2 = topology.ins["standalone2"] + + ### We setup so that st1 -> st2 + + # Clear all the BE in st1 + bes1 = Backends(st1) + for be in bes1.list(): + be.delete() + + # Setup st1 to chain to st2 + chain_plugin_1 = ChainingBackendPlugin(st1) + chain_plugin_1.enable() + + chains = ChainingLinks(st1) + chain = chains.create(properties={ + 'cn': 'demochain', + 'nsslapd-suffix': DEFAULT_SUFFIX, + 'nsmultiplexorbinddn': '', + 'nsmultiplexorcredentials': '', + 'nsfarmserverurl': st2.toLDAPURL(), + }) + + mts = MappingTrees(st1) + # Due to a bug in lib389, we need to delete and recreate the mt. + for mt in mts.list(): + mt.delete() + mts.ensure_state(properties={ + 'cn': DEFAULT_SUFFIX, + 'nsslapd-state': 'backend', + 'nsslapd-backend': 'demochain', + }) + # Restart to enable + st1.restart() + + # Get an anonymous connection. + anon = Account(st1, dn='') + anon_conn = anon.bind(password='') + + # Now do a search from st1 -> st2 + accs_1 = Accounts(anon_conn, DEFAULT_SUFFIX) + assert len(accs_1.list()) > 0 + + # Allow time to attach lldb if needed. + # import time + # print("🔥🔥🔥") + # time.sleep(45) + + # Now do a *paged* search from st1 -> st2 + assert len(accs_1.list(paged_search=2, paged_critical=False)) > 0 + + diff --git a/dirsrvtests/tests/suites/clu/__init__.py b/dirsrvtests/tests/suites/clu/__init__.py new file mode 100644 index 0000000..0388a32 --- /dev/null +++ b/dirsrvtests/tests/suites/clu/__init__.py @@ -0,0 +1,31 @@ +""" + :Requirement: 389-ds-base: Command Line Utility +""" + +import logging + + +log = logging.getLogger(__name__) + +def check_value_in_log_and_reset(topology, content_list=None, content_list2=None, check_value=None, + check_value_not=None): + if content_list2 is not None: + log.info('Check if content is present in output') + for item in content_list + content_list2: + assert topology.logcap.contains(item) + + if content_list is not None: + log.info('Check if content is present in output') + for item in content_list: + assert topology.logcap.contains(item) + + if check_value is not None: + log.info('Check if value is present in output') + assert topology.logcap.contains(check_value) + + if check_value_not is not None: + log.info('Check if value is not present in output') + assert not topology.logcap.contains(check_value_not) + + log.info('Reset the log for next test') + topology.logcap.flush() diff --git a/dirsrvtests/tests/suites/clu/ca_cert_bundle_test.py b/dirsrvtests/tests/suites/clu/ca_cert_bundle_test.py new file mode 100644 index 0000000..1d2ccc5 --- /dev/null +++ b/dirsrvtests/tests/suites/clu/ca_cert_bundle_test.py @@ -0,0 +1,166 @@ +# --- BEGIN COPYRIGHT BLOCK --- +# Copyright (C) 2022 Red Hat, Inc. +# All rights reserved. +# +# License: GPL (version 3 or any later version). +# See LICENSE for details. +# --- END COPYRIGHT BLOCK --- + +import logging +import pytest +import os +from lib389._constants import DEFAULT_SUFFIX +from lib389.topologies import topology_st as topo +from lib389.cli_base import FakeArgs +from lib389.cli_conf.security import cacert_add, cacert_list, cert_del +from lib389.cli_ctl.tls import import_ca +from lib389.cli_base import LogCapture + +log = logging.getLogger(__name__) + +PEM_CONTEXT = """ +-----BEGIN CERTIFICATE----- +MIIFZjCCA06gAwIBAgIFAL18JOowDQYJKoZIhvcNAQELBQAwZTELMAkGA1UEBhMC +QVUxEzARBgNVBAgTClF1ZWVuc2xhbmQxDjAMBgNVBAcTBTM4OWRzMRAwDgYDVQQK +Ewd0ZXN0aW5nMR8wHQYDVQQDExZzc2NhLjM4OWRzLmV4YW1wbGUuY29tMB4XDTIy +MTAyNTE5NDU0M1oXDTI0MTAyNTE5NDU0M1owZTELMAkGA1UEBhMCQVUxEzARBgNV +BAgTClF1ZWVuc2xhbmQxDjAMBgNVBAcTBTM4OWRzMRAwDgYDVQQKEwd0ZXN0aW5n +MR8wHQYDVQQDExZzc2NhLjM4OWRzLmV4YW1wbGUuY29tMIICIjANBgkqhkiG9w0B +AQEFAAOCAg8AMIICCgKCAgEA5E+pd7+8lBsbTKdjHgkSLi2Z5T5G9T+3wziDHhsz +F0nG+IOu5yYVkoj/bMxR3sNNlbDLk5ATyNAfytW3cAUZ3NLqm6bmEZdUjD6YycVk +AvrfY3zVVE9Debfw6JI3ml8JlC3t8dqn2KT7dmSjvr9zPS95HU+RepjzAqJAKY3B +27v0cMetUnxG4pqc7zqnSZJXVP/OXMKSNpujHnK8HyjT8tUJIYQ0YvU2JPJpz3fC +BJrmzgO2xYLgLPu6abhP6PQ6uUU+d4j36lG4J/4OiMY0Lr+mnaBAaD3ULPtN5eZh +fjQ9d+Sh89xHz92icWhkn8c7IHNEZNtMHNTNJiNbWKuU9HpBWNjWHJoxSxXn4Emr +DSfG+lq2UU2m9m+XrDK/7t0W/zC3S+zwcyqM8SJAiZnGEi85058wB0BB1HnnAfFX +gel3uZFhnR4d86O/vO5VUqg5Ko795DPzPa3SU4rR36U3nUF7g5WhEAmYNCj683D3 +DJDPJeCZmis7xtYB5K6Wu6SnFDxBEfhcWSsamWM286KntOiUtqQEzDy4OpZEUsgq +s7uqQSl/dfGdY9hCpXMYhlvMfVv3aIoM5zPuXN2cE1QkTnE1pyo8gZqnPLFZnwc9 +FT+Wjpy0EmsAM/5AIed5h+JgJ304P+wkyjf7APUZyUwf4UJN6aro6N8W23F7dAu5 +uJ0CAwEAAaMdMBswDAYDVR0TBAUwAwEB/zALBgNVHQ8EBAMCAgQwDQYJKoZIhvcN +AQELBQADggIBADFlVdDp1gTF/gl5ysZoo4/4BBSpLx5JDeyPE6BityZ/lwHvBUq3 +VzmIsU6kxAJfk0p9S7G4LgHIC/DVsLTIE5do6tUdyrawvcaanbYn9ScNoFVQ0GDS +C6Ir8PftEvc6bpI4hjkv4goK9fTq5Jtv4LSuRfxFEwoLO+WN8a65IFWlaHJl/Erp +9fzP+JKDo8zeh4qnMkaTSCBmLWa8kErrV462RU+qZktf/V4/gWg6k5Vp+82xNk7f +9/Mrg9KshNux7A4YCd8LgLEeCgsigi4N6zcfjQB0Rh5u9kXu/hzOjh379ki/vqju +i+MTVH97LMB47uR1LEl0VvhWSjID0ePUtbPHCJwOsxWyxBCJY6V7A9nj52uXMGuX +xghssZTFvRK6Bb1OiPNYRGqmuymm8rcSFdsY5yemkxJ6kfn40JIRCmVFwqaqu7MC +nxyaWAKpRHKM5IyeVZHkFzL9jR/2tVBbjfCAl6YSwM759VcOsw2SGMQKpGIPEBTa +1NBdlG45aWJBx5jBdVfOskLjxmBjosByJJHRLtrUBvg66ZBsx1k0c9XjsKmC59JP +AzI8zYp/TY/6T5igxM+CSx98DsJFccPBZFFJX+ZYRL7DFN38Yb7jMgIUXYHS28Gc +1c8kz7ylcQB8lKgCgpcBCH5ZSnLVAnH3uqCygxSTgTo+jgJklKc0xFuR +-----END CERTIFICATE----- +-----BEGIN CERTIFICATE----- +MIIFZjCCA06gAwIBAgIFAL2uT7gwDQYJKoZIhvcNAQELBQAwZTELMAkGA1UEBhMC +QVUxEzARBgNVBAgTClF1ZWVuc2xhbmQxDjAMBgNVBAcTBTM4OWRzMRAwDgYDVQQK +Ewd0ZXN0aW5nMR8wHQYDVQQDExZzc2NhLjM4OWRzLmV4YW1wbGUuY29tMB4XDTIy +MTExNDE4MzQzNloXDTI0MTExNDE4MzQzNlowZTELMAkGA1UEBhMCQVUxEzARBgNV +BAgTClF1ZWVuc2xhbmQxDjAMBgNVBAcTBTM4OWRzMRAwDgYDVQQKEwd0ZXN0aW5n +MR8wHQYDVQQDExZzc2NhLjM4OWRzLmV4YW1wbGUuY29tMIICIjANBgkqhkiG9w0B +AQEFAAOCAg8AMIICCgKCAgEA5NwbBn/W/KcZDzfw0fs/JI0+1aWWTu7PfSJxXySt +Z/CagcdKtmSRqWasI4QkdQN8ydiDuJJoWWcrO2UOuJw0m5uRbZTDn29Khr7x8SbT +L8luDi+2cZ0ewrlBdae3C1lx9fRpKxITv40D1KLGPsyy3a5+aiI/vbZqG2JjxYzn +d6DQju5mpch+ATNPp43vLRtET5Zq/QcOELBhVuBqcOf+UmwK/fCQ3GjluyIK71AD +eezafnYyZtCoaVlkyFdSBDOg8/OcwnjeWSQSoV61nwKbmLjVJf1OKgUVXViQZkKD +2vUTaG7CcmNaP/IFHSjFKWDngmCCbPH554B6vDMt7IpVUd13f8+Zl7zzUAqpAsTd +02D+GqrIYPIPI/ONComkoHwaWodWrI3/CXAMMnjelQSmt/uivGkANxWm9bG1YHAW +cqGRYm8Zb5vkUotcHQc17h5SkRoThlVynXD3oVgadHgm+LgPRiB3uCnVrCpfaa56 +5UnRYZu/mB4jWVQlQ1ASA7HS0mZTPxfUvHXBdyYlpGUtvGbklW5RKjTSlM0vUYJl +kj0p2DaN9cjll+Oa4keqgIfa6Boi9rkuMGFE48rk/u6FadGgGhp2Bb19hUl6OQjD +qnWBDrX8dvfQxiz8MbETwd9TKJVHxOCdQzpmS0GgZbDO5wMMaARdntTRg2MUH7e2 +Z+UCAwEAAaMdMBswDAYDVR0TBAUwAwEB/zALBgNVHQ8EBAMCAgQwDQYJKoZIhvcN +AQELBQADggIBAGauYyCzqKiJKVkqXSvUeOxQpFb21sv3dWrUwq1YsyLx8sxJSXwe +Na1YjdymUueh75rLOChZVyYBQQeW5OKpmigUH3j70tQkg6DnZXGBZ532hxTwfm4F +5uHXIfwd4wFbuu8ZDa8DFZVqQpWBAyyQsdmGG2OZaQBp4MH3kk4zLUFG9xgp1qke +KSYsOTmtuPR0Aw7vWJUClZoSC5WG+b3d19oEXVR+3vPPLkL5UJmcJgiS2BPpsbyt +fX+yD6QmRj2XLi5/T8h7ZeSRPzSsccAu/hIpzpyQxa8lSJ4+I0DjQSz3N7xW/0FS +b9yKzMgaz1ctEhtNj3paT14C4/uKUdlQLb3UzKmne1iPuJPtBOhBh6ofPPflbI/v +ceNZPSG68XiR7qvgMRYyotop106EwHDUr2YzSpoD+CmjQ4n/+6/zfyrsuSq10XjF +0U3dtQu5ETFakOwB2Mj+T1b5Q01km3FN+p7n0lLhB+F9n3WQ5uQHPXntAsh9w9HH +hTOSIFeJCMPuWd4OoGBsiT7koFQtW5I+e19sxkKuOxoO5fgpCf1IgMAuKYppGJNb +Bbc+LAFMLKxlOy8WzEFjewV2fSBtCrSlyu+aMXBYtXLeW6iqvgYQpmDKOvxLM0j2 +jBqLRMRQN4FvzuCZiMl/DwJv4yhAZ8hylYjRjqjY/fEPvhvJRncPVy8z +-----END CERTIFICATE----- +""" + + +def test_ca_cert_bundle(topo): + """Test we can add a CAS certificate bundle + + :id: b39c98f5-374f-4b40-abee-4dd0a0c41641 + :setup: Standalone Instance + :steps: + 1. Create PEM file + 2. Add PEM file with two CA certs + 3. List the new CA certs + 4. Remove CA certs + 5. Add CA certs using dsctl + 6. List the new CA certs + :expectedresults: + 1. Success + 2. Success + 3. Success + 4. Success + 5. Success + 6. Success + + """ + inst = topo.standalone + lc = LogCapture() + + # Create PEM file with 2 CA certs + pem_file = "/tmp/ca-bundle.pem" + log.info('Write pem file') + with open(pem_file, 'w') as f: + for line in PEM_CONTEXT: + f.write(line) + + # Add PEM file + args = FakeArgs() + args.name = ['CA_CERT_1', 'CA_CERT_2'] + args.file = pem_file + cacert_add(inst, DEFAULT_SUFFIX, log, args) + + # List CA certs + args = FakeArgs() + args.json = False + cacert_list(inst, DEFAULT_SUFFIX, lc.log, args) + assert lc.contains('CA_CERT_1') + assert lc.contains('CA_CERT_2') + + # Test dsctl now, first remove the certs + args = FakeArgs() + args.name = 'CA_CERT_1' + cert_del(inst, DEFAULT_SUFFIX, log, args) + args.name = 'CA_CERT_2' + cert_del(inst, DEFAULT_SUFFIX, log, args) + + # List CA certs + lc.flush() + args = FakeArgs() + args.json = False + cacert_list(inst, DEFAULT_SUFFIX, lc.log, args) + assert not lc.contains('CA_CERT_1') + assert not lc.contains('CA_CERT_2') + + # Add certs using dsctl + args = FakeArgs() + args.nickname = ['CA_CERT_1', 'CA_CERT_2'] + args.cert_path = pem_file + import_ca(inst, log, args) + + # List CA certs + lc.flush() + args = FakeArgs() + args.json = False + cacert_list(inst, DEFAULT_SUFFIX, lc.log, args) + assert lc.contains('CA_CERT_1') + assert lc.contains('CA_CERT_2') + + +if __name__ == '__main__': + # Run isolated + # -s for DEBUG mode + CURRENT_FILE = os.path.realpath(__file__) + pytest.main(["-s", CURRENT_FILE]) + diff --git a/dirsrvtests/tests/suites/clu/clu_test.py b/dirsrvtests/tests/suites/clu/clu_test.py new file mode 100644 index 0000000..fec6915 --- /dev/null +++ b/dirsrvtests/tests/suites/clu/clu_test.py @@ -0,0 +1,95 @@ +# --- BEGIN COPYRIGHT BLOCK --- +# Copyright (C) 2016 Red Hat, Inc. +# All rights reserved. +# +# License: GPL (version 3 or any later version). +# See LICENSE for details. +# --- END COPYRIGHT BLOCK --- +# +import time +import subprocess +import pytest +from lib389.tasks import * +from lib389.utils import * +from lib389.topologies import topology_st + +pytestmark = pytest.mark.tier0 + +logging.getLogger(__name__).setLevel(logging.DEBUG) +log = logging.getLogger(__name__) + + +def test_clu_pwdhash(topology_st): + """Test the pwdhash script output and encrypted password length + + :id: faaafd01-6748-4451-9d2b-f3bd47902447 + + :setup: Standalone instance + + :steps: + 1. Execute /usr/bin/pwdhash -s ssha testpassword command from command line + 2. Check if there is any output + 3. Check the length of the generated output + + :expectedresults: + 1. Execution should PASS + 2. There should be an output from the command + 3. Output length should not be less than 20 + """ + + log.info('Running test_clu_pwdhash...') + + cmd = '%s -s ssha testpassword' % os.path.join(topology_st.standalone.get_bin_dir(), 'pwdhash') + p = os.popen(cmd) + result = p.readline() + p.close() + + if not result: + log.fatal('test_clu_pwdhash: Failed to run pwdhash') + assert False + + if len(result) < 20: + log.fatal('test_clu_pwdhash: Encrypted password is too short') + assert False + log.info('pwdhash generated: ' + result) + log.info('test_clu_pwdhash: PASSED') + + +def test_clu_pwdhash_mod(topology_st): + """Test the pwdhash script output with -D configdir + + :id: 874ab5e2-207b-4a95-b4c0-22d97b8ab643 + + :setup: Standalone instance + + :steps: + 1. Set nsslapd-rootpwstoragescheme & passwordStorageScheme to SSHA256 & SSHA384 respectively + 2. Execute /usr/bin/pwdhash -D /etc/dirsrv/slapd-instance_name/ + 3. Check if there is any output + 4. Check if the command returns the hashed string using the algorithm set in nsslapd-rootpwstoragescheme + + :expectedresults: + 1. nsslapd-rootpwstoragescheme & passwordStorageScheme should set to SSHA256 & SSHA384 respectively + 2. Execution should PASS + 3. There should be an output from the command + 4. Command should return the hashed string using the algorithm set in nsslapd-rootpwstoragescheme + """ + + log.info('Running test_clu_pwdhash_mod...') + topology_st.standalone.config.set('nsslapd-rootpwstoragescheme', 'SSHA256') + topology_st.standalone.config.set('passwordStorageScheme', 'SSHA384') + cmd = [os.path.join(topology_st.standalone.get_bin_dir(), 'pwdhash'), '-D', '/etc/dirsrv/slapd-standalone1', + 'password'] + result = subprocess.check_output(cmd) + stdout = ensure_str(result) + assert result, "Failed to run pwdhash" + assert 'SSHA256' in stdout + log.info('pwdhash generated: ' + stdout) + log.info('returned the hashed string using the algorithm set in nsslapd-rootpwstoragescheme') + + +if __name__ == '__main__': + # Run isolated + # -s for DEBUG mode + CURRENT_FILE = os.path.realpath(__file__) + pytest.main("-s %s" % CURRENT_FILE) diff --git a/dirsrvtests/tests/suites/clu/dbgen_test.py b/dirsrvtests/tests/suites/clu/dbgen_test.py new file mode 100644 index 0000000..de6020a --- /dev/null +++ b/dirsrvtests/tests/suites/clu/dbgen_test.py @@ -0,0 +1,789 @@ +# --- BEGIN COPYRIGHT BLOCK --- +# Copyright (C) 2020 Red Hat, Inc. +# All rights reserved. +# +# License: GPL (version 3 or any later version). +# See LICENSE for details. +# --- END COPYRIGHT BLOCK --- +# +import time +import subprocess +import pytest + +from lib389.cli_ctl.dbgen import * +from lib389.cos import CosClassicDefinitions, CosPointerDefinitions, CosIndirectDefinitions, CosTemplates +from lib389.idm.account import Accounts +from lib389.idm.group import Groups +from lib389.idm.role import ManagedRoles, FilteredRoles, NestedRoles +from lib389.tasks import * +from lib389.utils import * +from lib389.topologies import topology_st +from lib389.cli_base import FakeArgs + +pytestmark = pytest.mark.tier0 + +LOG_FILE = '/tmp/dbgen.log' +logging.getLogger(__name__).setLevel(logging.DEBUG) +log = logging.getLogger(__name__) + + +@pytest.fixture(scope="function") +def set_log_file_and_ldif(topology_st, request): + global ldif_file + ldif_file = get_ldif_dir(topology_st.standalone) + '/created.ldif' + + fh = logging.FileHandler(LOG_FILE) + fh.setLevel(logging.DEBUG) + log.addHandler(fh) + + def fin(): + log.info('Delete files') + os.remove(LOG_FILE) + os.remove(ldif_file) + + request.addfinalizer(fin) + + +def run_offline_import(instance, ldif_file): + log.info('Stopping the server and running offline import...') + instance.stop() + assert instance.ldif2db(bename=DEFAULT_BENAME, suffixes=[DEFAULT_SUFFIX], encrypt=None, excludeSuffixes=None, + import_file=ldif_file) + instance.start() + + +def run_ldapmodify_from_file(instance, ldif_file, output_to_check=None): + LDAP_MOD = '/usr/bin/ldapmodify' + log.info('Add entries from ldif file with ldapmodify') + result = subprocess.check_output([LDAP_MOD, '-cx', '-D', DN_DM, '-w', PASSWORD, + '-H', f'ldap://{instance.host}:{instance.port}', '-af', ldif_file]) + + if output_to_check is not None: + assert output_to_check in ensure_str(result) + + +def check_value_in_log_and_reset(content_list): + with open(LOG_FILE, 'r+') as f: + file_content = f.read() + log.info('Check if content is present in output') + for item in content_list: + assert item in file_content + + log.info('Reset log file for next test') + f.truncate(0) + + +@pytest.mark.ds50545 +@pytest.mark.bz1798394 +@pytest.mark.skipif(ds_is_older("1.4.3"), reason="Not implemented") +def test_dsconf_dbgen_users(topology_st, set_log_file_and_ldif): + """Test ldifgen (formerly dbgen) tool to create ldif with users + + :id: 426b5b94-9923-454d-a736-7e71ca985e98 + :setup: Standalone instance + :steps: + 1. Create DS instance + 2. Run ldifgen to generate ldif with users + 3. Import generated ldif to database + 4. Check it was properly imported + :expectedresults: + 1. Success + 2. Success + 3. Success + 4. Success + """ + + standalone = topology_st.standalone + + args = FakeArgs() + args.suffix = DEFAULT_SUFFIX + args.parent = 'ou=people,dc=example,dc=com' + args.number = 1000 + args.rdn_cn = False + args.generic = True + args.start_idx = 50 + args.localize = False + args.ldif_file = ldif_file + + content_list = ['Generating LDIF with the following options:', + 'suffix={}'.format(args.suffix), + 'parent={}'.format(args.parent), + 'number={}'.format(args.number), + 'rdn-cn={}'.format(args.rdn_cn), + 'generic={}'.format(args.generic), + 'start-idx={}'.format(args.start_idx), + 'localize={}'.format(args.localize), + 'ldif-file={}'.format(args.ldif_file), + 'Writing LDIF', + 'Successfully created LDIF file: {}'.format(args.ldif_file)] + + log.info('Run ldifgen to create users ldif') + dbgen_create_users(standalone, log, args) + + log.info('Check if file exists') + assert os.path.exists(ldif_file) + + check_value_in_log_and_reset(content_list) + + log.info('Get number of accounts before import') + accounts = Accounts(standalone, DEFAULT_SUFFIX) + count_account = len(accounts.filter('(uid=*)')) + + run_offline_import(standalone, ldif_file) + + log.info('Check that accounts are imported') + assert len(accounts.filter('(uid=*)')) > count_account + + +@pytest.mark.ds50545 +@pytest.mark.bz1798394 +@pytest.mark.skipif(ds_is_older("1.4.3"), reason="Not implemented") +def test_dsconf_dbgen_groups(topology_st, set_log_file_and_ldif): + """Test ldifgen (formerly dbgen) tool to create ldif with group + + :id: 97207413-9a93-4065-a5ec-63aa93801a3f + :setup: Standalone instance + :steps: + 1. Create DS instance + 2. Run ldifgen to generate ldif with group + 3. Import generated ldif to database + 4. Check it was properly imported + :expectedresults: + 1. Success + 2. Success + 3. Success + 4. Success + """ + LDAP_RESULT = 'adding new entry "cn=myGroup-1,ou=groups,dc=example,dc=com"' + + standalone = topology_st.standalone + + args = FakeArgs() + args.NAME = 'myGroup' + args.parent = 'ou=groups,dc=example,dc=com' + args.suffix = DEFAULT_SUFFIX + args.number = 1 + args.num_members = 1000 + args.create_members = True + args.member_attr = 'uniquemember' + args.member_parent = 'ou=people,dc=example,dc=com' + args.ldif_file = ldif_file + + content_list = ['Generating LDIF with the following options:', + 'NAME={}'.format(args.NAME), + 'number={}'.format(args.number), + 'suffix={}'.format(args.suffix), + 'num-members={}'.format(args.num_members), + 'create-members={}'.format(args.create_members), + 'member-parent={}'.format(args.member_parent), + 'member-attr={}'.format(args.member_attr), + 'ldif-file={}'.format(args.ldif_file), + 'Writing LDIF', + 'Successfully created LDIF file: {}'.format(args.ldif_file)] + + log.info('Run ldifgen to create group ldif') + dbgen_create_groups(standalone, log, args) + + log.info('Check if file exists') + assert os.path.exists(ldif_file) + + check_value_in_log_and_reset(content_list) + + log.info('Get number of accounts before import') + accounts = Accounts(standalone, DEFAULT_SUFFIX) + count_account = len(accounts.filter('(uid=*)')) + + # Groups, COS, Roles and modification ldifs are designed to be used by ldapmodify, not ldif2db + # ldapmodify will complain about already existing parent which causes subprocess to return exit code != 0 + with pytest.raises(subprocess.CalledProcessError): + run_ldapmodify_from_file(standalone, ldif_file, LDAP_RESULT) + + log.info('Check that accounts are imported') + assert len(accounts.filter('(uid=*)')) > count_account + + log.info('Check that group is imported') + groups = Groups(standalone, DEFAULT_SUFFIX) + assert groups.exists(args.NAME + '-1') + new_group = groups.get(args.NAME + '-1') + new_group.present('uniquemember', 'uid=group_entry1-0152,ou=people,dc=example,dc=com') + + +@pytest.mark.ds50545 +@pytest.mark.bz1798394 +@pytest.mark.skipif(ds_is_older("1.4.3"), reason="Not implemented") +def test_dsconf_dbgen_cos_classic(topology_st, set_log_file_and_ldif): + """Test ldifgen (formerly dbgen) tool to create a COS definition + + :id: 8557f994-8a91-4f8a-86f6-9cb826a0b8fd + :setup: Standalone instance + :steps: + 1. Create DS instance + 2. Run ldifgen to generate ldif with classic COS definition + 3. Import generated ldif to database + 4. Check it was properly imported + :expectedresults: + 1. Success + 2. Success + 3. Success + 4. Success + """ + + LDAP_RESULT = 'adding new entry "cn=My_Postal_Def,ou=cos definitions,dc=example,dc=com"' + + standalone = topology_st.standalone + + args = FakeArgs() + args.type = 'classic' + args.NAME = 'My_Postal_Def' + args.parent = 'ou=cos definitions,dc=example,dc=com' + args.create_parent = True + args.cos_specifier = 'businessCategory' + args.cos_attr = ['postalcode', 'telephonenumber'] + args.cos_template = 'cn=sales,cn=classicCoS,dc=example,dc=com' + args.ldif_file = ldif_file + + content_list = ['Generating LDIF with the following options:', + 'NAME={}'.format(args.NAME), + 'type={}'.format(args.type), + 'parent={}'.format(args.parent), + 'create-parent={}'.format(args.create_parent), + 'cos-specifier={}'.format(args.cos_specifier), + 'cos-template={}'.format(args.cos_template), + 'cos-attr={}'.format(args.cos_attr), + 'ldif-file={}'.format(args.ldif_file), + 'Writing LDIF', + 'Successfully created LDIF file: {}'.format(args.ldif_file)] + + log.info('Run ldifgen to create COS definition ldif') + dbgen_create_cos_def(standalone, log, args) + + log.info('Check if file exists') + assert os.path.exists(ldif_file) + + check_value_in_log_and_reset(content_list) + + # Groups, COS, Roles and modification ldifs are designed to be used by ldapmodify, not ldif2db + run_ldapmodify_from_file(standalone, ldif_file, LDAP_RESULT) + + log.info('Check that COS definition is imported') + cos_def = CosClassicDefinitions(standalone, args.parent) + assert cos_def.exists(args.NAME) + new_cos = cos_def.get(args.NAME) + assert new_cos.present('cosTemplateDN', args.cos_template) + assert new_cos.present('cosSpecifier', args.cos_specifier) + assert new_cos.present('cosAttribute', args.cos_attr[0]) + assert new_cos.present('cosAttribute', args.cos_attr[1]) + + +@pytest.mark.ds50545 +@pytest.mark.bz1798394 +@pytest.mark.skipif(ds_is_older("1.4.3"), reason="Not implemented") +def test_dsconf_dbgen_cos_pointer(topology_st, set_log_file_and_ldif): + """Test ldifgen (formerly dbgen) tool to create a COS definition + + :id: 6b26ca6d-226a-4f93-925e-faf95cc20214 + :setup: Standalone instance + :steps: + 1. Create DS instance + 2. Run ldifgen to generate ldif with pointer COS definition + 3. Import generated ldif to database + 4. Check it was properly imported + :expectedresults: + 1. Success + 2. Success + 3. Success + 4. Success + """ + + LDAP_RESULT = 'adding new entry "cn=My_Postal_Def_pointer,ou=cos pointer definitions,dc=example,dc=com"' + + standalone = topology_st.standalone + + args = FakeArgs() + args.type = 'pointer' + args.NAME = 'My_Postal_Def_pointer' + args.parent = 'ou=cos pointer definitions,dc=example,dc=com' + args.create_parent = True + args.cos_specifier = None + args.cos_attr = ['postalcode', 'telephonenumber'] + args.cos_template = 'cn=sales,cn=pointerCoS,dc=example,dc=com' + args.ldif_file = ldif_file + + content_list = ['Generating LDIF with the following options:', + 'NAME={}'.format(args.NAME), + 'type={}'.format(args.type), + 'parent={}'.format(args.parent), + 'create-parent={}'.format(args.create_parent), + 'cos-template={}'.format(args.cos_template), + 'cos-attr={}'.format(args.cos_attr), + 'ldif-file={}'.format(args.ldif_file), + 'Writing LDIF', + 'Successfully created LDIF file: {}'.format(args.ldif_file)] + + log.info('Run ldifgen to create COS definition ldif') + dbgen_create_cos_def(standalone, log, args) + + log.info('Check if file exists') + assert os.path.exists(ldif_file) + + check_value_in_log_and_reset(content_list) + + # Groups, COS, Roles and modification ldifs are designed to be used by ldapmodify, not ldif2db + run_ldapmodify_from_file(standalone, ldif_file, LDAP_RESULT) + + log.info('Check that COS definition is imported') + cos_def = CosPointerDefinitions(standalone, args.parent) + assert cos_def.exists(args.NAME) + new_cos = cos_def.get(args.NAME) + assert new_cos.present('cosTemplateDN', args.cos_template) + assert new_cos.present('cosAttribute', args.cos_attr[0]) + assert new_cos.present('cosAttribute', args.cos_attr[1]) + + +@pytest.mark.ds50545 +@pytest.mark.bz1798394 +@pytest.mark.skipif(ds_is_older("1.4.3"), reason="Not implemented") +def test_dsconf_dbgen_cos_indirect(topology_st, set_log_file_and_ldif): + """Test ldifgen (formerly dbgen) tool to create a COS definition + + :id: ab4b799e-e801-432a-a61d-badad2628203 + :setup: Standalone instance + :steps: + 1. Create DS instance + 2. Run ldifgen to generate ldif with indirect COS definition + 3. Import generated ldif to database + 4. Check it was properly imported + :expectedresults: + 1. Success + 2. Success + 3. Success + 4. Success + """ + + LDAP_RESULT = 'adding new entry "cn=My_Postal_Def_indirect,ou=cos indirect definitions,dc=example,dc=com"' + + standalone = topology_st.standalone + + args = FakeArgs() + args.type = 'indirect' + args.NAME = 'My_Postal_Def_indirect' + args.parent = 'ou=cos indirect definitions,dc=example,dc=com' + args.create_parent = True + args.cos_specifier = 'businessCategory' + args.cos_attr = ['postalcode', 'telephonenumber'] + args.cos_template = None + args.ldif_file = ldif_file + + content_list = ['Generating LDIF with the following options:', + 'NAME={}'.format(args.NAME), + 'type={}'.format(args.type), + 'parent={}'.format(args.parent), + 'create-parent={}'.format(args.create_parent), + 'cos-specifier={}'.format(args.cos_specifier), + 'cos-attr={}'.format(args.cos_attr), + 'ldif-file={}'.format(args.ldif_file), + 'Writing LDIF', + 'Successfully created LDIF file: {}'.format(args.ldif_file)] + + log.info('Run ldifgen to create COS definition ldif') + dbgen_create_cos_def(standalone, log, args) + + log.info('Check if file exists') + assert os.path.exists(ldif_file) + + check_value_in_log_and_reset(content_list) + + # Groups, COS, Roles and modification ldifs are designed to be used by ldapmodify, not ldif2db + run_ldapmodify_from_file(standalone, ldif_file, LDAP_RESULT) + + log.info('Check that COS definition is imported') + cos_def = CosIndirectDefinitions(standalone, args.parent) + assert cos_def.exists(args.NAME) + new_cos = cos_def.get(args.NAME) + assert new_cos.present('cosIndirectSpecifier', args.cos_specifier) + assert new_cos.present('cosAttribute', args.cos_attr[0]) + assert new_cos.present('cosAttribute', args.cos_attr[1]) + + +@pytest.mark.ds50545 +@pytest.mark.bz1798394 +@pytest.mark.skipif(ds_is_older("1.4.3"), reason="Not implemented") +def test_dsconf_dbgen_cos_template(topology_st, set_log_file_and_ldif): + """Test ldifgen (formerly dbgen) tool to create a COS template + + :id: 544017c7-4a82-4e7d-a047-00b68a28e070 + :setup: Standalone instance + :steps: + 1. Create DS instance + 2. Run ldifgen to generate ldif with COS template + 3. Import generated ldif to database + 4. Check it was properly imported + :expectedresults: + 1. Success + 2. Success + 3. Success + 4. Success + """ + + LDAP_RESULT = 'adding new entry "cn=My_Template,ou=cos templates,dc=example,dc=com"' + + standalone = topology_st.standalone + + args = FakeArgs() + args.NAME = 'My_Template' + args.parent = 'ou=cos templates,dc=example,dc=com' + args.create_parent = True + args.cos_priority = 1 + args.cos_attr_val = 'postalcode:12345' + args.ldif_file = ldif_file + + content_list = ['Generating LDIF with the following options:', + 'NAME={}'.format(args.NAME), + 'parent={}'.format(args.parent), + 'create-parent={}'.format(args.create_parent), + 'cos-priority={}'.format(args.cos_priority), + 'cos-attr-val={}'.format(args.cos_attr_val), + 'ldif-file={}'.format(args.ldif_file), + 'Writing LDIF', + 'Successfully created LDIF file: {}'.format(args.ldif_file)] + + log.info('Run ldifgen to create COS template ldif') + dbgen_create_cos_tmp(standalone, log, args) + + log.info('Check if file exists') + assert os.path.exists(ldif_file) + + check_value_in_log_and_reset(content_list) + + # Groups, COS, Roles and modification ldifs are designed to be used by ldapmodify, not ldif2db + run_ldapmodify_from_file(standalone, ldif_file, LDAP_RESULT) + + log.info('Check that COS template is imported') + cos_temp = CosTemplates(standalone, args.parent) + assert cos_temp.exists(args.NAME) + new_cos = cos_temp.get(args.NAME) + assert new_cos.present('cosPriority', str(args.cos_priority)) + assert new_cos.present('postalcode', '12345') + + +@pytest.mark.ds50545 +@pytest.mark.bz1798394 +@pytest.mark.skipif(ds_is_older("1.4.3"), reason="Not implemented") +def test_dsconf_dbgen_managed_role(topology_st, set_log_file_and_ldif): + """Test ldifgen (formerly dbgen) tool to create a managed role + + :id: 10e77b41-0bc1-4ad5-a144-2c5107455b92 + :setup: Standalone instance + :steps: + 1. Create DS instance + 2. Run ldifgen to generate ldif with managed role + 3. Import generated ldif to database + 4. Check it was properly imported + :expectedresults: + 1. Success + 2. Success + 3. Success + 4. Success + """ + + LDAP_RESULT = 'adding new entry "cn=My_Managed_Role,ou=managed roles,dc=example,dc=com"' + + standalone = topology_st.standalone + + args = FakeArgs() + + args.NAME = 'My_Managed_Role' + args.parent = 'ou=managed roles,dc=example,dc=com' + args.create_parent = True + args.type = 'managed' + args.filter = None + args.role_dn = None + args.ldif_file = ldif_file + + content_list = ['Generating LDIF with the following options:', + 'NAME={}'.format(args.NAME), + 'parent={}'.format(args.parent), + 'create-parent={}'.format(args.create_parent), + 'type={}'.format(args.type), + 'ldif-file={}'.format(args.ldif_file), + 'Writing LDIF', + 'Successfully created LDIF file: {}'.format(args.ldif_file)] + + log.info('Run ldifgen to create managed role ldif') + dbgen_create_role(standalone, log, args) + + log.info('Check if file exists') + assert os.path.exists(ldif_file) + + check_value_in_log_and_reset(content_list) + + # Groups, COS, Roles and modification ldifs are designed to be used by ldapmodify, not ldif2db + run_ldapmodify_from_file(standalone, ldif_file, LDAP_RESULT) + + log.info('Check that managed role is imported') + roles = ManagedRoles(standalone, DEFAULT_SUFFIX) + assert roles.exists(args.NAME) + + +@pytest.mark.ds50545 +@pytest.mark.bz1798394 +@pytest.mark.skipif(ds_is_older("1.4.3"), reason="Not implemented") +def test_dsconf_dbgen_filtered_role(topology_st, set_log_file_and_ldif): + """Test ldifgen (formerly dbgen) tool to create a filtered role + + :id: cb3c8ea8-4234-40e2-8810-fb6a25973927 + :setup: Standalone instance + :steps: + 1. Create DS instance + 2. Run ldifgen to generate ldif with filtered role + 3. Import generated ldif to database + 4. Check it was properly imported + :expectedresults: + 1. Success + 2. Success + 3. Success + 4. Success + """ + + LDAP_RESULT = 'adding new entry "cn=My_Filtered_Role,ou=filtered roles,dc=example,dc=com"' + + standalone = topology_st.standalone + + args = FakeArgs() + + args.NAME = 'My_Filtered_Role' + args.parent = 'ou=filtered roles,dc=example,dc=com' + args.create_parent = True + args.type = 'filtered' + args.filter = '"objectclass=posixAccount"' + args.role_dn = None + args.ldif_file = ldif_file + + content_list = ['Generating LDIF with the following options:', + 'NAME={}'.format(args.NAME), + 'parent={}'.format(args.parent), + 'create-parent={}'.format(args.create_parent), + 'type={}'.format(args.type), + 'filter={}'.format(args.filter), + 'ldif-file={}'.format(args.ldif_file), + 'Writing LDIF', + 'Successfully created LDIF file: {}'.format(args.ldif_file)] + + log.info('Run ldifgen to create filtered role ldif') + dbgen_create_role(standalone, log, args) + + log.info('Check if file exists') + assert os.path.exists(ldif_file) + + check_value_in_log_and_reset(content_list) + + # Groups, COS, Roles and modification ldifs are designed to be used by ldapmodify, not ldif2db + run_ldapmodify_from_file(standalone, ldif_file, LDAP_RESULT) + + log.info('Check that filtered role is imported') + roles = FilteredRoles(standalone, DEFAULT_SUFFIX) + assert roles.exists(args.NAME) + new_role = roles.get(args.NAME) + assert new_role.present('nsRoleFilter', args.filter) + + +@pytest.mark.ds50545 +@pytest.mark.bz1798394 +@pytest.mark.skipif(ds_is_older("1.4.3"), reason="Not implemented") +def test_dsconf_dbgen_nested_role(topology_st, set_log_file_and_ldif): + """Test ldifgen (formerly dbgen) tool to create a nested role + + :id: 97fff0a8-3103-4adb-be04-2799ff58d8f4 + :setup: Standalone instance + :steps: + 1. Create DS instance + 2. Run ldifgen to generate ldif with nested role + 3. Import generated ldif to database + 4. Check it was properly imported + :expectedresults: + 1. Success + 2. Success + 3. Success + 4. Success + """ + + LDAP_RESULT = 'adding new entry "cn=My_Nested_Role,ou=nested roles,dc=example,dc=com"' + + standalone = topology_st.standalone + + args = FakeArgs() + args.NAME = 'My_Nested_Role' + args.parent = 'ou=nested roles,dc=example,dc=com' + args.create_parent = True + args.type = 'nested' + args.filter = None + args.role_dn = ['cn=some_role,ou=roles,dc=example,dc=com'] + args.ldif_file = ldif_file + + content_list = ['Generating LDIF with the following options:', + 'NAME={}'.format(args.NAME), + 'parent={}'.format(args.parent), + 'create-parent={}'.format(args.create_parent), + 'type={}'.format(args.type), + 'role-dn={}'.format(args.role_dn), + 'ldif-file={}'.format(args.ldif_file), + 'Writing LDIF', + 'Successfully created LDIF file: {}'.format(args.ldif_file)] + + log.info('Run ldifgen to create nested role ldif') + dbgen_create_role(standalone, log, args) + + log.info('Check if file exists') + assert os.path.exists(ldif_file) + + check_value_in_log_and_reset(content_list) + + # Groups, COS, Roles and modification ldifs are designed to be used by ldapmodify, not ldif2db + run_ldapmodify_from_file(standalone, ldif_file, LDAP_RESULT) + + log.info('Check that nested role is imported') + roles = NestedRoles(standalone, DEFAULT_SUFFIX) + assert roles.exists(args.NAME) + new_role = roles.get(args.NAME) + assert new_role.present('nsRoleDN', args.role_dn[0]) + + +@pytest.mark.ds50545 +@pytest.mark.bz1798394 +@pytest.mark.skipif(ds_is_older("1.4.3"), reason="Not implemented") +def test_dsconf_dbgen_mod_ldif_mixed(topology_st, set_log_file_and_ldif): + """Test ldifgen (formerly dbgen) tool to create mixed modification ldif + + :id: 4a2e0901-2b48-452e-a4a0-507735132c8d + :setup: Standalone instance + :steps: + 1. Create DS instance + 2. Run ldifgen to generate modification ldif + 3. Import generated ldif to database + 4. Check it was properly imported + :expectedresults: + 1. Success + 2. Success + 3. Success + 4. Success + """ + + standalone = topology_st.standalone + + args = FakeArgs() + args.parent = DEFAULT_SUFFIX + args.create_users = True + args.delete_users = True + args.create_parent = False + args.num_users = 1000 + args.add_users = 100 + args.del_users = 999 + args.modrdn_users = 100 + args.mod_users = 10 + args.mod_attrs = ['cn', 'uid', 'sn'] + args.randomize = False + args.ldif_file = ldif_file + + content_list = ['Generating LDIF with the following options:', + 'create-users={}'.format(args.create_users), + 'parent={}'.format(args.parent), + 'create-parent={}'.format(args.create_parent), + 'delete-users={}'.format(args.delete_users), + 'num-users={}'.format(args.num_users), + 'add-users={}'.format(args.add_users), + 'del-users={}'.format(args.del_users), + 'modrdn-users={}'.format(args.modrdn_users), + 'mod-users={}'.format(args.mod_users), + 'mod-attrs={}'.format(args.mod_attrs), + 'randomize={}'.format(args.randomize), + 'ldif-file={}'.format(args.ldif_file), + 'Writing LDIF', + 'Successfully created LDIF file: {}'.format(args.ldif_file)] + + log.info('Run ldifgen to create modification ldif') + dbgen_create_mods(standalone, log, args) + + log.info('Check if file exists') + assert os.path.exists(ldif_file) + + check_value_in_log_and_reset(content_list) + + log.info('Get number of accounts before import') + accounts = Accounts(standalone, DEFAULT_SUFFIX) + count_account = len(accounts.filter('(uid=*)')) + + # Groups, COS, Roles and modification ldifs are designed to be used by ldapmodify, not ldif2db + # ldapmodify will complain about a lot of changes done which causes subprocess to return exit code != 0 + with pytest.raises(subprocess.CalledProcessError): + run_ldapmodify_from_file(standalone, ldif_file) + + log.info('Check that some accounts are imported') + assert len(accounts.filter('(uid=*)')) > count_account + + +@pytest.mark.ds50545 +@pytest.mark.bz1798394 +@pytest.mark.skipif(ds_is_older("1.4.3"), reason="Not implemented") +def test_dsconf_dbgen_nested_ldif(topology_st, set_log_file_and_ldif): + """Test ldifgen (formerly dbgen) tool to create nested ldif + + :id: 9c281c28-4169-45e0-8c07-c5502d9a7585 + :setup: Standalone instance + :steps: + 1. Create DS instance + 2. Run ldifgen to generate nested ldif + 3. Import generated ldif to database + 4. Check it was properly imported + :expectedresults: + 1. Success + 2. Success + 3. Success + 4. Success + """ + + standalone = topology_st.standalone + + args = FakeArgs() + args.suffix = DEFAULT_SUFFIX + args.node_limit = 100 + args.num_users = 600 + args.ldif_file = ldif_file + + content_list = ['Generating LDIF with the following options:', + 'suffix={}'.format(args.suffix), + 'node-limit={}'.format(args.node_limit), + 'num-users={}'.format(args.num_users), + 'ldif-file={}'.format(args.ldif_file), + 'Writing LDIF', + 'Successfully created nested LDIF file ({}) containing 6 nodes/subtrees'.format(args.ldif_file)] + + log.info('Run ldifgen to create nested ldif') + dbgen_create_nested(standalone, log, args) + + log.info('Check if file exists') + assert os.path.exists(ldif_file) + + check_value_in_log_and_reset(content_list) + + log.info('Get number of accounts before import') + accounts = Accounts(standalone, DEFAULT_SUFFIX) + count_account = len(accounts.filter('(uid=*)')) + count_ou = len(accounts.filter('(ou=*)')) + + # Groups, COS, Roles and modification ldifs are designed to be used by ldapmodify, not ldif2db + # ldapmodify will complain about already existing suffix which causes subprocess to return exit code != 0 + with pytest.raises(subprocess.CalledProcessError): + run_ldapmodify_from_file(standalone, ldif_file) + + standalone.restart() + + log.info('Check that accounts are imported') + assert len(accounts.filter('(uid=*)')) > count_account + assert len(accounts.filter('(ou=*)')) > count_ou + + +if __name__ == '__main__': + # Run isolated + # -s for DEBUG mode + CURRENT_FILE = os.path.realpath(__file__) + pytest.main("-s %s" % CURRENT_FILE) diff --git a/dirsrvtests/tests/suites/clu/dbgen_test_usan.py b/dirsrvtests/tests/suites/clu/dbgen_test_usan.py new file mode 100644 index 0000000..13d2e35 --- /dev/null +++ b/dirsrvtests/tests/suites/clu/dbgen_test_usan.py @@ -0,0 +1,807 @@ +# --- BEGIN COPYRIGHT BLOCK --- +# Copyright (C) 2020 Red Hat, Inc. +# All rights reserved. +# +# License: GPL (version 3 or any later version). +# See LICENSE for details. +# --- END COPYRIGHT BLOCK --- +# +import time + +""" + This file contains tests similar to dbgen_test.py + except that paramaters that are number are expressed as string + (to mimic the parameters parser default behavior which returns an + int when parsing "option value" and a string when parsing "option=value" + This file has been generated by usign: +sed ' +9r z1 +s/ test_/ test_usan/ +/args.*= [0-9]/s,[0-9]*$,"&", +/:id:/s/.$/1/ +' dbgen_test.py > dbgen_test_usan.py + ( with z1 file containing this comment ) +""" + + + +import subprocess +import pytest + +from lib389.cli_ctl.dbgen import * +from lib389.cos import CosClassicDefinitions, CosPointerDefinitions, CosIndirectDefinitions, CosTemplates +from lib389.idm.account import Accounts +from lib389.idm.group import Groups +from lib389.idm.role import ManagedRoles, FilteredRoles, NestedRoles +from lib389.tasks import * +from lib389.utils import * +from lib389.topologies import topology_st +from lib389.cli_base import FakeArgs + +pytestmark = pytest.mark.tier0 + +LOG_FILE = '/tmp/dbgen.log' +logging.getLogger(__name__).setLevel(logging.DEBUG) +log = logging.getLogger(__name__) + + +@pytest.fixture(scope="function") +def set_log_file_and_ldif(topology_st, request): + global ldif_file + ldif_file = get_ldif_dir(topology_st.standalone) + '/created.ldif' + + fh = logging.FileHandler(LOG_FILE) + fh.setLevel(logging.DEBUG) + log.addHandler(fh) + + def fin(): + log.info('Delete files') + os.remove(LOG_FILE) + os.remove(ldif_file) + + request.addfinalizer(fin) + + +def run_offline_import(instance, ldif_file): + log.info('Stopping the server and running offline import...') + instance.stop() + assert instance.ldif2db(bename=DEFAULT_BENAME, suffixes=[DEFAULT_SUFFIX], encrypt=None, excludeSuffixes=None, + import_file=ldif_file) + instance.start() + + +def run_ldapmodify_from_file(instance, ldif_file, output_to_check=None): + LDAP_MOD = '/usr/bin/ldapmodify' + log.info('Add entries from ldif file with ldapmodify') + result = subprocess.check_output([LDAP_MOD, '-cx', '-D', DN_DM, '-w', PASSWORD, + '-H', f'ldap://{instance.host}:{instance.port}', '-af', ldif_file]) + + if output_to_check is not None: + assert output_to_check in ensure_str(result) + + +def check_value_in_log_and_reset(content_list): + with open(LOG_FILE, 'r+') as f: + file_content = f.read() + log.info('Check if content is present in output') + for item in content_list: + assert item in file_content + + log.info('Reset log file for next test') + f.truncate(0) + + +@pytest.mark.ds50545 +@pytest.mark.bz1798394 +@pytest.mark.skipif(ds_is_older("1.4.3"), reason="Not implemented") +def test_usandsconf_dbgen_users(topology_st, set_log_file_and_ldif): + """Test ldifgen (formerly dbgen) tool to create ldif with users + + :id: 426b5b94-9923-454d-a736-7e71ca985e91 + :setup: Standalone instance + :steps: + 1. Create DS instance + 2. Run ldifgen to generate ldif with users + 3. Import generated ldif to database + 4. Check it was properly imported + :expectedresults: + 1. Success + 2. Success + 3. Success + 4. Success + """ + + standalone = topology_st.standalone + + args = FakeArgs() + args.suffix = DEFAULT_SUFFIX + args.parent = 'ou=people,dc=example,dc=com' + args.number = "1000" + args.rdn_cn = False + args.generic = True + args.start_idx = "50" + args.localize = False + args.ldif_file = ldif_file + + content_list = ['Generating LDIF with the following options:', + 'suffix={}'.format(args.suffix), + 'parent={}'.format(args.parent), + 'number={}'.format(args.number), + 'rdn-cn={}'.format(args.rdn_cn), + 'generic={}'.format(args.generic), + 'start-idx={}'.format(args.start_idx), + 'localize={}'.format(args.localize), + 'ldif-file={}'.format(args.ldif_file), + 'Writing LDIF', + 'Successfully created LDIF file: {}'.format(args.ldif_file)] + + log.info('Run ldifgen to create users ldif') + dbgen_create_users(standalone, log, args) + + log.info('Check if file exists') + assert os.path.exists(ldif_file) + + check_value_in_log_and_reset(content_list) + + log.info('Get number of accounts before import') + accounts = Accounts(standalone, DEFAULT_SUFFIX) + count_account = len(accounts.filter('(uid=*)')) + + run_offline_import(standalone, ldif_file) + + log.info('Check that accounts are imported') + assert len(accounts.filter('(uid=*)')) > count_account + + +@pytest.mark.ds50545 +@pytest.mark.bz1798394 +@pytest.mark.skipif(ds_is_older("1.4.3"), reason="Not implemented") +def test_usandsconf_dbgen_groups(topology_st, set_log_file_and_ldif): + """Test ldifgen (formerly dbgen) tool to create ldif with group + + :id: 97207413-9a93-4065-a5ec-63aa93801a31 + :setup: Standalone instance + :steps: + 1. Create DS instance + 2. Run ldifgen to generate ldif with group + 3. Import generated ldif to database + 4. Check it was properly imported + :expectedresults: + 1. Success + 2. Success + 3. Success + 4. Success + """ + LDAP_RESULT = 'adding new entry "cn=myGroup-1,ou=groups,dc=example,dc=com"' + + standalone = topology_st.standalone + + args = FakeArgs() + args.NAME = 'myGroup' + args.parent = 'ou=groups,dc=example,dc=com' + args.suffix = DEFAULT_SUFFIX + args.number = "1" + args.num_members = "1000" + args.create_members = True + args.member_attr = 'uniquemember' + args.member_parent = 'ou=people,dc=example,dc=com' + args.ldif_file = ldif_file + + content_list = ['Generating LDIF with the following options:', + 'NAME={}'.format(args.NAME), + 'number={}'.format(args.number), + 'suffix={}'.format(args.suffix), + 'num-members={}'.format(args.num_members), + 'create-members={}'.format(args.create_members), + 'member-parent={}'.format(args.member_parent), + 'member-attr={}'.format(args.member_attr), + 'ldif-file={}'.format(args.ldif_file), + 'Writing LDIF', + 'Successfully created LDIF file: {}'.format(args.ldif_file)] + + log.info('Run ldifgen to create group ldif') + dbgen_create_groups(standalone, log, args) + + log.info('Check if file exists') + assert os.path.exists(ldif_file) + + check_value_in_log_and_reset(content_list) + + log.info('Get number of accounts before import') + accounts = Accounts(standalone, DEFAULT_SUFFIX) + count_account = len(accounts.filter('(uid=*)')) + + # Groups, COS, Roles and modification ldifs are designed to be used by ldapmodify, not ldif2db + # ldapmodify will complain about already existing parent which causes subprocess to return exit code != 0 + with pytest.raises(subprocess.CalledProcessError): + run_ldapmodify_from_file(standalone, ldif_file, LDAP_RESULT) + + log.info('Check that accounts are imported') + assert len(accounts.filter('(uid=*)')) > count_account + + log.info('Check that group is imported') + groups = Groups(standalone, DEFAULT_SUFFIX) + assert groups.exists(args.NAME + '-1') + new_group = groups.get(args.NAME + '-1') + new_group.present('uniquemember', 'uid=group_entry1-0152,ou=people,dc=example,dc=com') + + +@pytest.mark.ds50545 +@pytest.mark.bz1798394 +@pytest.mark.skipif(ds_is_older("1.4.3"), reason="Not implemented") +def test_usandsconf_dbgen_cos_classic(topology_st, set_log_file_and_ldif): + """Test ldifgen (formerly dbgen) tool to create a COS definition + + :id: 8557f994-8a91-4f8a-86f6-9cb826a0b8f1 + :setup: Standalone instance + :steps: + 1. Create DS instance + 2. Run ldifgen to generate ldif with classic COS definition + 3. Import generated ldif to database + 4. Check it was properly imported + :expectedresults: + 1. Success + 2. Success + 3. Success + 4. Success + """ + + LDAP_RESULT = 'adding new entry "cn=My_Postal_Def,ou=cos definitions,dc=example,dc=com"' + + standalone = topology_st.standalone + + args = FakeArgs() + args.type = 'classic' + args.NAME = 'My_Postal_Def' + args.parent = 'ou=cos definitions,dc=example,dc=com' + args.create_parent = True + args.cos_specifier = 'businessCategory' + args.cos_attr = ['postalcode', 'telephonenumber'] + args.cos_template = 'cn=sales,cn=classicCoS,dc=example,dc=com' + args.ldif_file = ldif_file + + content_list = ['Generating LDIF with the following options:', + 'NAME={}'.format(args.NAME), + 'type={}'.format(args.type), + 'parent={}'.format(args.parent), + 'create-parent={}'.format(args.create_parent), + 'cos-specifier={}'.format(args.cos_specifier), + 'cos-template={}'.format(args.cos_template), + 'cos-attr={}'.format(args.cos_attr), + 'ldif-file={}'.format(args.ldif_file), + 'Writing LDIF', + 'Successfully created LDIF file: {}'.format(args.ldif_file)] + + log.info('Run ldifgen to create COS definition ldif') + dbgen_create_cos_def(standalone, log, args) + + log.info('Check if file exists') + assert os.path.exists(ldif_file) + + check_value_in_log_and_reset(content_list) + + # Groups, COS, Roles and modification ldifs are designed to be used by ldapmodify, not ldif2db + run_ldapmodify_from_file(standalone, ldif_file, LDAP_RESULT) + + log.info('Check that COS definition is imported') + cos_def = CosClassicDefinitions(standalone, args.parent) + assert cos_def.exists(args.NAME) + new_cos = cos_def.get(args.NAME) + assert new_cos.present('cosTemplateDN', args.cos_template) + assert new_cos.present('cosSpecifier', args.cos_specifier) + assert new_cos.present('cosAttribute', args.cos_attr[0]) + assert new_cos.present('cosAttribute', args.cos_attr[1]) + + +@pytest.mark.ds50545 +@pytest.mark.bz1798394 +@pytest.mark.skipif(ds_is_older("1.4.3"), reason="Not implemented") +def test_usandsconf_dbgen_cos_pointer(topology_st, set_log_file_and_ldif): + """Test ldifgen (formerly dbgen) tool to create a COS definition + + :id: 6b26ca6d-226a-4f93-925e-faf95cc20211 + :setup: Standalone instance + :steps: + 1. Create DS instance + 2. Run ldifgen to generate ldif with pointer COS definition + 3. Import generated ldif to database + 4. Check it was properly imported + :expectedresults: + 1. Success + 2. Success + 3. Success + 4. Success + """ + + LDAP_RESULT = 'adding new entry "cn=My_Postal_Def_pointer,ou=cos pointer definitions,dc=example,dc=com"' + + standalone = topology_st.standalone + + args = FakeArgs() + args.type = 'pointer' + args.NAME = 'My_Postal_Def_pointer' + args.parent = 'ou=cos pointer definitions,dc=example,dc=com' + args.create_parent = True + args.cos_specifier = None + args.cos_attr = ['postalcode', 'telephonenumber'] + args.cos_template = 'cn=sales,cn=pointerCoS,dc=example,dc=com' + args.ldif_file = ldif_file + + content_list = ['Generating LDIF with the following options:', + 'NAME={}'.format(args.NAME), + 'type={}'.format(args.type), + 'parent={}'.format(args.parent), + 'create-parent={}'.format(args.create_parent), + 'cos-template={}'.format(args.cos_template), + 'cos-attr={}'.format(args.cos_attr), + 'ldif-file={}'.format(args.ldif_file), + 'Writing LDIF', + 'Successfully created LDIF file: {}'.format(args.ldif_file)] + + log.info('Run ldifgen to create COS definition ldif') + dbgen_create_cos_def(standalone, log, args) + + log.info('Check if file exists') + assert os.path.exists(ldif_file) + + check_value_in_log_and_reset(content_list) + + # Groups, COS, Roles and modification ldifs are designed to be used by ldapmodify, not ldif2db + run_ldapmodify_from_file(standalone, ldif_file, LDAP_RESULT) + + log.info('Check that COS definition is imported') + cos_def = CosPointerDefinitions(standalone, args.parent) + assert cos_def.exists(args.NAME) + new_cos = cos_def.get(args.NAME) + assert new_cos.present('cosTemplateDN', args.cos_template) + assert new_cos.present('cosAttribute', args.cos_attr[0]) + assert new_cos.present('cosAttribute', args.cos_attr[1]) + + +@pytest.mark.ds50545 +@pytest.mark.bz1798394 +@pytest.mark.skipif(ds_is_older("1.4.3"), reason="Not implemented") +def test_usandsconf_dbgen_cos_indirect(topology_st, set_log_file_and_ldif): + """Test ldifgen (formerly dbgen) tool to create a COS definition + + :id: ab4b799e-e801-432a-a61d-badad2628201 + :setup: Standalone instance + :steps: + 1. Create DS instance + 2. Run ldifgen to generate ldif with indirect COS definition + 3. Import generated ldif to database + 4. Check it was properly imported + :expectedresults: + 1. Success + 2. Success + 3. Success + 4. Success + """ + + LDAP_RESULT = 'adding new entry "cn=My_Postal_Def_indirect,ou=cos indirect definitions,dc=example,dc=com"' + + standalone = topology_st.standalone + + args = FakeArgs() + args.type = 'indirect' + args.NAME = 'My_Postal_Def_indirect' + args.parent = 'ou=cos indirect definitions,dc=example,dc=com' + args.create_parent = True + args.cos_specifier = 'businessCategory' + args.cos_attr = ['postalcode', 'telephonenumber'] + args.cos_template = None + args.ldif_file = ldif_file + + content_list = ['Generating LDIF with the following options:', + 'NAME={}'.format(args.NAME), + 'type={}'.format(args.type), + 'parent={}'.format(args.parent), + 'create-parent={}'.format(args.create_parent), + 'cos-specifier={}'.format(args.cos_specifier), + 'cos-attr={}'.format(args.cos_attr), + 'ldif-file={}'.format(args.ldif_file), + 'Writing LDIF', + 'Successfully created LDIF file: {}'.format(args.ldif_file)] + + log.info('Run ldifgen to create COS definition ldif') + dbgen_create_cos_def(standalone, log, args) + + log.info('Check if file exists') + assert os.path.exists(ldif_file) + + check_value_in_log_and_reset(content_list) + + # Groups, COS, Roles and modification ldifs are designed to be used by ldapmodify, not ldif2db + run_ldapmodify_from_file(standalone, ldif_file, LDAP_RESULT) + + log.info('Check that COS definition is imported') + cos_def = CosIndirectDefinitions(standalone, args.parent) + assert cos_def.exists(args.NAME) + new_cos = cos_def.get(args.NAME) + assert new_cos.present('cosIndirectSpecifier', args.cos_specifier) + assert new_cos.present('cosAttribute', args.cos_attr[0]) + assert new_cos.present('cosAttribute', args.cos_attr[1]) + + +@pytest.mark.ds50545 +@pytest.mark.bz1798394 +@pytest.mark.skipif(ds_is_older("1.4.3"), reason="Not implemented") +def test_usandsconf_dbgen_cos_template(topology_st, set_log_file_and_ldif): + """Test ldifgen (formerly dbgen) tool to create a COS template + + :id: 544017c7-4a82-4e7d-a047-00b68a28e071 + :setup: Standalone instance + :steps: + 1. Create DS instance + 2. Run ldifgen to generate ldif with COS template + 3. Import generated ldif to database + 4. Check it was properly imported + :expectedresults: + 1. Success + 2. Success + 3. Success + 4. Success + """ + + LDAP_RESULT = 'adding new entry "cn=My_Template,ou=cos templates,dc=example,dc=com"' + + standalone = topology_st.standalone + + args = FakeArgs() + args.NAME = 'My_Template' + args.parent = 'ou=cos templates,dc=example,dc=com' + args.create_parent = True + args.cos_priority = "1" + args.cos_attr_val = 'postalcode:12345' + args.ldif_file = ldif_file + + content_list = ['Generating LDIF with the following options:', + 'NAME={}'.format(args.NAME), + 'parent={}'.format(args.parent), + 'create-parent={}'.format(args.create_parent), + 'cos-priority={}'.format(args.cos_priority), + 'cos-attr-val={}'.format(args.cos_attr_val), + 'ldif-file={}'.format(args.ldif_file), + 'Writing LDIF', + 'Successfully created LDIF file: {}'.format(args.ldif_file)] + + log.info('Run ldifgen to create COS template ldif') + dbgen_create_cos_tmp(standalone, log, args) + + log.info('Check if file exists') + assert os.path.exists(ldif_file) + + check_value_in_log_and_reset(content_list) + + # Groups, COS, Roles and modification ldifs are designed to be used by ldapmodify, not ldif2db + run_ldapmodify_from_file(standalone, ldif_file, LDAP_RESULT) + + log.info('Check that COS template is imported') + cos_temp = CosTemplates(standalone, args.parent) + assert cos_temp.exists(args.NAME) + new_cos = cos_temp.get(args.NAME) + assert new_cos.present('cosPriority', str(args.cos_priority)) + assert new_cos.present('postalcode', '12345') + + +@pytest.mark.ds50545 +@pytest.mark.bz1798394 +@pytest.mark.skipif(ds_is_older("1.4.3"), reason="Not implemented") +def test_usandsconf_dbgen_managed_role(topology_st, set_log_file_and_ldif): + """Test ldifgen (formerly dbgen) tool to create a managed role + + :id: 10e77b41-0bc1-4ad5-a144-2c5107455b91 + :setup: Standalone instance + :steps: + 1. Create DS instance + 2. Run ldifgen to generate ldif with managed role + 3. Import generated ldif to database + 4. Check it was properly imported + :expectedresults: + 1. Success + 2. Success + 3. Success + 4. Success + """ + + LDAP_RESULT = 'adding new entry "cn=My_Managed_Role,ou=managed roles,dc=example,dc=com"' + + standalone = topology_st.standalone + + args = FakeArgs() + + args.NAME = 'My_Managed_Role' + args.parent = 'ou=managed roles,dc=example,dc=com' + args.create_parent = True + args.type = 'managed' + args.filter = None + args.role_dn = None + args.ldif_file = ldif_file + + content_list = ['Generating LDIF with the following options:', + 'NAME={}'.format(args.NAME), + 'parent={}'.format(args.parent), + 'create-parent={}'.format(args.create_parent), + 'type={}'.format(args.type), + 'ldif-file={}'.format(args.ldif_file), + 'Writing LDIF', + 'Successfully created LDIF file: {}'.format(args.ldif_file)] + + log.info('Run ldifgen to create managed role ldif') + dbgen_create_role(standalone, log, args) + + log.info('Check if file exists') + assert os.path.exists(ldif_file) + + check_value_in_log_and_reset(content_list) + + # Groups, COS, Roles and modification ldifs are designed to be used by ldapmodify, not ldif2db + run_ldapmodify_from_file(standalone, ldif_file, LDAP_RESULT) + + log.info('Check that managed role is imported') + roles = ManagedRoles(standalone, DEFAULT_SUFFIX) + assert roles.exists(args.NAME) + + +@pytest.mark.ds50545 +@pytest.mark.bz1798394 +@pytest.mark.skipif(ds_is_older("1.4.3"), reason="Not implemented") +def test_usandsconf_dbgen_filtered_role(topology_st, set_log_file_and_ldif): + """Test ldifgen (formerly dbgen) tool to create a filtered role + + :id: cb3c8ea8-4234-40e2-8810-fb6a25973921 + :setup: Standalone instance + :steps: + 1. Create DS instance + 2. Run ldifgen to generate ldif with filtered role + 3. Import generated ldif to database + 4. Check it was properly imported + :expectedresults: + 1. Success + 2. Success + 3. Success + 4. Success + """ + + LDAP_RESULT = 'adding new entry "cn=My_Filtered_Role,ou=filtered roles,dc=example,dc=com"' + + standalone = topology_st.standalone + + args = FakeArgs() + + args.NAME = 'My_Filtered_Role' + args.parent = 'ou=filtered roles,dc=example,dc=com' + args.create_parent = True + args.type = 'filtered' + args.filter = '"objectclass=posixAccount"' + args.role_dn = None + args.ldif_file = ldif_file + + content_list = ['Generating LDIF with the following options:', + 'NAME={}'.format(args.NAME), + 'parent={}'.format(args.parent), + 'create-parent={}'.format(args.create_parent), + 'type={}'.format(args.type), + 'filter={}'.format(args.filter), + 'ldif-file={}'.format(args.ldif_file), + 'Writing LDIF', + 'Successfully created LDIF file: {}'.format(args.ldif_file)] + + log.info('Run ldifgen to create filtered role ldif') + dbgen_create_role(standalone, log, args) + + log.info('Check if file exists') + assert os.path.exists(ldif_file) + + check_value_in_log_and_reset(content_list) + + # Groups, COS, Roles and modification ldifs are designed to be used by ldapmodify, not ldif2db + run_ldapmodify_from_file(standalone, ldif_file, LDAP_RESULT) + + log.info('Check that filtered role is imported') + roles = FilteredRoles(standalone, DEFAULT_SUFFIX) + assert roles.exists(args.NAME) + new_role = roles.get(args.NAME) + assert new_role.present('nsRoleFilter', args.filter) + + +@pytest.mark.ds50545 +@pytest.mark.bz1798394 +@pytest.mark.skipif(ds_is_older("1.4.3"), reason="Not implemented") +def test_usandsconf_dbgen_nested_role(topology_st, set_log_file_and_ldif): + """Test ldifgen (formerly dbgen) tool to create a nested role + + :id: 97fff0a8-3103-4adb-be04-2799ff58d8f1 + :setup: Standalone instance + :steps: + 1. Create DS instance + 2. Run ldifgen to generate ldif with nested role + 3. Import generated ldif to database + 4. Check it was properly imported + :expectedresults: + 1. Success + 2. Success + 3. Success + 4. Success + """ + + LDAP_RESULT = 'adding new entry "cn=My_Nested_Role,ou=nested roles,dc=example,dc=com"' + + standalone = topology_st.standalone + + args = FakeArgs() + args.NAME = 'My_Nested_Role' + args.parent = 'ou=nested roles,dc=example,dc=com' + args.create_parent = True + args.type = 'nested' + args.filter = None + args.role_dn = ['cn=some_role,ou=roles,dc=example,dc=com'] + args.ldif_file = ldif_file + + content_list = ['Generating LDIF with the following options:', + 'NAME={}'.format(args.NAME), + 'parent={}'.format(args.parent), + 'create-parent={}'.format(args.create_parent), + 'type={}'.format(args.type), + 'role-dn={}'.format(args.role_dn), + 'ldif-file={}'.format(args.ldif_file), + 'Writing LDIF', + 'Successfully created LDIF file: {}'.format(args.ldif_file)] + + log.info('Run ldifgen to create nested role ldif') + dbgen_create_role(standalone, log, args) + + log.info('Check if file exists') + assert os.path.exists(ldif_file) + + check_value_in_log_and_reset(content_list) + + # Groups, COS, Roles and modification ldifs are designed to be used by ldapmodify, not ldif2db + run_ldapmodify_from_file(standalone, ldif_file, LDAP_RESULT) + + log.info('Check that nested role is imported') + roles = NestedRoles(standalone, DEFAULT_SUFFIX) + assert roles.exists(args.NAME) + new_role = roles.get(args.NAME) + assert new_role.present('nsRoleDN', args.role_dn[0]) + + +@pytest.mark.ds50545 +@pytest.mark.bz1798394 +@pytest.mark.skipif(ds_is_older("1.4.3"), reason="Not implemented") +def test_usandsconf_dbgen_mod_ldif_mixed(topology_st, set_log_file_and_ldif): + """Test ldifgen (formerly dbgen) tool to create mixed modification ldif + + :id: 4a2e0901-2b48-452e-a4a0-507735132c81 + :setup: Standalone instance + :steps: + 1. Create DS instance + 2. Run ldifgen to generate modification ldif + 3. Import generated ldif to database + 4. Check it was properly imported + :expectedresults: + 1. Success + 2. Success + 3. Success + 4. Success + """ + + standalone = topology_st.standalone + + args = FakeArgs() + args.parent = DEFAULT_SUFFIX + args.create_users = True + args.delete_users = True + args.create_parent = False + args.num_users = "1000" + args.add_users = "100" + args.del_users = "999" + args.modrdn_users = "100" + args.mod_users = "10" + args.mod_attrs = ['cn', 'uid', 'sn'] + args.randomize = False + args.ldif_file = ldif_file + + content_list = ['Generating LDIF with the following options:', + 'create-users={}'.format(args.create_users), + 'parent={}'.format(args.parent), + 'create-parent={}'.format(args.create_parent), + 'delete-users={}'.format(args.delete_users), + 'num-users={}'.format(args.num_users), + 'add-users={}'.format(args.add_users), + 'del-users={}'.format(args.del_users), + 'modrdn-users={}'.format(args.modrdn_users), + 'mod-users={}'.format(args.mod_users), + 'mod-attrs={}'.format(args.mod_attrs), + 'randomize={}'.format(args.randomize), + 'ldif-file={}'.format(args.ldif_file), + 'Writing LDIF', + 'Successfully created LDIF file: {}'.format(args.ldif_file)] + + log.info('Run ldifgen to create modification ldif') + dbgen_create_mods(standalone, log, args) + + log.info('Check if file exists') + assert os.path.exists(ldif_file) + + check_value_in_log_and_reset(content_list) + + log.info('Get number of accounts before import') + accounts = Accounts(standalone, DEFAULT_SUFFIX) + count_account = len(accounts.filter('(uid=*)')) + + # Groups, COS, Roles and modification ldifs are designed to be used by ldapmodify, not ldif2db + # ldapmodify will complain about a lot of changes done which causes subprocess to return exit code != 0 + with pytest.raises(subprocess.CalledProcessError): + run_ldapmodify_from_file(standalone, ldif_file) + + log.info('Check that some accounts are imported') + assert len(accounts.filter('(uid=*)')) > count_account + + +@pytest.mark.ds50545 +@pytest.mark.bz1798394 +@pytest.mark.skipif(ds_is_older("1.4.3"), reason="Not implemented") +def test_usandsconf_dbgen_nested_ldif(topology_st, set_log_file_and_ldif): + """Test ldifgen (formerly dbgen) tool to create nested ldif + + :id: 9c281c28-4169-45e0-8c07-c5502d9a7581 + :setup: Standalone instance + :steps: + 1. Create DS instance + 2. Run ldifgen to generate nested ldif + 3. Import generated ldif to database + 4. Check it was properly imported + :expectedresults: + 1. Success + 2. Success + 3. Success + 4. Success + """ + + standalone = topology_st.standalone + + args = FakeArgs() + args.suffix = DEFAULT_SUFFIX + args.node_limit = "100" + args.num_users = "600" + args.ldif_file = ldif_file + + content_list = ['Generating LDIF with the following options:', + 'suffix={}'.format(args.suffix), + 'node-limit={}'.format(args.node_limit), + 'num-users={}'.format(args.num_users), + 'ldif-file={}'.format(args.ldif_file), + 'Writing LDIF', + 'Successfully created nested LDIF file ({}) containing 6 nodes/subtrees'.format(args.ldif_file)] + + log.info('Run ldifgen to create nested ldif') + dbgen_create_nested(standalone, log, args) + + log.info('Check if file exists') + assert os.path.exists(ldif_file) + + check_value_in_log_and_reset(content_list) + + log.info('Get number of accounts before import') + accounts = Accounts(standalone, DEFAULT_SUFFIX) + count_account = len(accounts.filter('(uid=*)')) + count_ou = len(accounts.filter('(ou=*)')) + + # Groups, COS, Roles and modification ldifs are designed to be used by ldapmodify, not ldif2db + # ldapmodify will complain about already existing suffix which causes subprocess to return exit code != 0 + with pytest.raises(subprocess.CalledProcessError): + run_ldapmodify_from_file(standalone, ldif_file) + + standalone.restart() + + log.info('Check that accounts are imported') + assert len(accounts.filter('(uid=*)')) > count_account + assert len(accounts.filter('(ou=*)')) > count_ou + + +if __name__ == '__main__': + # Run isolated + # -s for DEBUG mode + CURRENT_FILE = os.path.realpath(__file__) + pytest.main("-s %s" % CURRENT_FILE) diff --git a/dirsrvtests/tests/suites/clu/dbmon_test.py b/dirsrvtests/tests/suites/clu/dbmon_test.py new file mode 100644 index 0000000..4a82eb0 --- /dev/null +++ b/dirsrvtests/tests/suites/clu/dbmon_test.py @@ -0,0 +1,281 @@ +# --- BEGIN COPYRIGHT BLOCK --- +# Copyright (C) 2020 Red Hat, Inc. +# All rights reserved. +# +# License: GPL (version 3 or any later version). +# See LICENSE for details. +# --- END COPYRIGHT BLOCK --- +# +import time +import subprocess +import pytest +import json +import glob + +from lib389.tasks import * +from lib389.utils import * +from lib389.topologies import topology_st, topology_m2 +from lib389.cli_conf.monitor import db_monitor +from lib389.monitor import MonitorLDBM +from lib389.cli_base import FakeArgs, LogCapture +from lib389.backend import Backends + +pytestmark = pytest.mark.tier1 + +logging.getLogger(__name__).setLevel(logging.DEBUG) +log = logging.getLogger(__name__) + +OUTPUT_NO_INDEXES = [ + 'DB Monitor Report', + 'Database Cache:', + 'Cache Hit Ratio:', + 'Free Space:', + 'Free Percentage:', + 'RO Page Drops:', + 'Pages In:', + 'Pages Out:', + 'Normalized DN Cache:', + 'Cache Hit Ratio:', + 'Free Space:', + 'Free Percentage:', + 'DN Count:', + 'Evictions:', + 'Backends:', + 'dc=example,dc=com (userRoot):', + 'Entry Cache Hit Ratio:', + 'Entry Cache Count:', + 'Entry Cache Free Space:', + 'Entry Cache Free Percentage:', + 'Entry Cache Average Size:', + 'DN Cache Hit Ratio:', + 'DN Cache Count:', + 'DN Cache Free Space:', + 'DN Cache Free Percentage:', + 'DN Cache Average Size:' + ] + +OUTPUT_INDEXES = [ + 'DB Monitor Report', + 'Database Cache:', + 'Cache Hit Ratio:', + 'Free Space:', + 'Free Percentage:', + 'RO Page Drops:', + 'Pages In:', + 'Pages Out:', + 'Normalized DN Cache:', + 'Cache Hit Ratio:', + 'Free Space:', + 'Free Percentage:', + 'DN Count:', + 'Evictions:', + 'Backends:', + 'dc=example,dc=com (userRoot):', + 'Entry Cache Hit Ratio:', + 'Entry Cache Count:', + 'Entry Cache Free Space:', + 'Entry Cache Free Percentage:', + 'Entry Cache Average Size:', + 'DN Cache Hit Ratio:', + 'DN Cache Count:', + 'DN Cache Free Space:', + 'DN Cache Free Percentage:', + 'DN Cache Average Size:', + 'Indexes:', + 'Index: aci.db', + 'Cache Hit:', + 'Cache Miss:', + 'Page In:', + 'Page Out:', + 'Index: id2entry.db', + 'Index: objectclass.db', + 'Index: entryrdn.db' + ] + +JSON_OUTPUT = [ + 'date', + 'dbcache', + 'hit_ratio', + 'free', + 'free_percentage', + 'roevicts', + 'pagein', + 'pageout', + 'ndncache', + 'hit_ratio', + 'free', + 'free_percentage', + 'count', + 'evictions', + 'backends', + 'userRoot', + '"suffix": "dc=example,dc=com"', + 'entry_cache_count', + 'entry_cache_free', + 'entry_cache_free_percentage', + 'entry_cache_size', + 'entry_cache_hit_ratio', + 'dn_cache_count', + 'dn_cache_free', + 'dn_cache_free_percentage', + 'dn_cache_size', + 'dn_cache_hit_ratio', + 'indexes', + 'name', + 'objectclass.db', + 'cachehit', + 'cachemiss', + 'pagein', + 'pageout', + 'entryrdn.db', + 'aci.db', + 'id2entry.db' + ] + + +def clear_log(inst): + log.info('Clear the log') + inst.logcap.flush() + + +def _set_dbsizes(inst, dbpagesize, dbcachesize): + backends = Backends(inst) + backend = backends.get(DEFAULT_BENAME) + dir = backend.get_attr_val_utf8('nsslapd-directory') + inst.stop() + # Export the db to ldif + ldif_file = f'{inst.get_ldif_dir()}/db.ldif' + inst.db2ldif(bename=DEFAULT_BENAME, suffixes=[DEFAULT_SUFFIX], + excludeSuffixes=None, repl_data=False, + outputfile=ldif_file, encrypt=False) + # modify dse.ldif + dse_ldif = DSEldif(inst, serverid=inst.serverid) + bdb = 'cn=bdb,cn=config,cn=ldbm database,cn=plugins,cn=config' + dse_ldif.replace(bdb, 'nsslapd-db-page-size', str(dbpagesize)) + dse_ldif.replace(bdb, 'nsslapd-cache-autosize', '0') + dse_ldif.replace(bdb, 'nsslapd-cache-autosize-split', '0') + dse_ldif.replace(bdb, 'nsslapd-dbcachesize', str(dbcachesize)) + # remove the database files and the database environment files + for d in (dir, inst.ds_paths.db_home_dir): + for f in glob.glob(f'{d}/*'): + if os.path.isfile(f): + os.remove(f) + # Reimport the db + inst.ldif2db(DEFAULT_BENAME, None, None, None, ldif_file) + inst.start() + + +@pytest.mark.ds50545 +@pytest.mark.bz1795943 +@pytest.mark.skipif(ds_is_older("1.4.2"), reason="Not implemented") +@pytest.mark.skipif(get_default_db_lib() == "mdb", reason="Not supported over mdb") +def test_dsconf_dbmon(topology_st): + """Test dbmon tool, that was ported from legacy tools to dsconf + + :id: 4d584ba9-12a9-4e90-ba9a-7e103affdac5 + :setup: Standalone instance + :steps: + 1. Create DS instance + 2. Run dbmon without --indexes + 3. Run dbmon with --indexes + 4. Run dbmon with --json + :expectedresults: + 1. Success + 2. Success + 3. Success + 4. Success + """ + + standalone = topology_st.standalone + + args = FakeArgs() + args.backends = DEFAULT_BENAME + args.indexes = False + args.json = False + + log.info('Sanity check for syntax') + db_monitor(standalone, DEFAULT_SUFFIX, topology_st.logcap.log, args) + for item in OUTPUT_NO_INDEXES: + assert topology_st.logcap.contains(item) + + clear_log(topology_st) + + log.info('Sanity check for --indexes output') + args.indexes = True + db_monitor(standalone, DEFAULT_SUFFIX, topology_st.logcap.log, args) + for index_item in OUTPUT_INDEXES: + assert topology_st.logcap.contains(index_item) + + clear_log(topology_st) + + log.info('Sanity check for --json output') + args.json = True + db_monitor(standalone, DEFAULT_SUFFIX, topology_st.logcap.log, args) + for json_item in JSON_OUTPUT: + assert topology_st.logcap.contains(json_item) + + clear_log(topology_st) + + + +@pytest.mark.skipif(get_default_db_lib() == "mdb", reason="Not supported over mdb") +def test_dbmon_mp_pagesize(topology_st): + """Test dbmon tool, that was ported from legacy tools to dsconf + + :id: 20c1e5b0-75a0-11ed-91de-482ae39447e5 + :setup: Standalone instance + :steps: + 1. Set bdb parameters (pagesize and cachesize) + 2. Query ldbm database statistics and extract dbpages and dbcachesize values + 3. Capture dsconf monitor dbmon output and extract dbcache free_percentage value + 4. Check that free_percentage is computed rightly + :expectedresults: + 1. Success + 2. Success + 3. Success + 4. free_percentage computation should be based on file system prefered block size + rather than on db page size. + + """ + + inst = topology_st.standalone + fspath = inst.ds_paths.db_home_dir + os.makedirs(fspath, mode=0o750, exist_ok=True) + fs_pagesize = os.statvfs(fspath).f_bsize + db_pagesize = 1024*64 # Maximum value supported by bdb + if fs_pagesize == db_pagesize: + fs_pagesize = db_pagesize / 2; + _set_dbsizes(inst, db_pagesize, 80960) + + # Now lets check that we are really in the condition + # needed to reproduce RHBZ 2034407 + ldbm_mon = MonitorLDBM(inst).get_status() + dbcachesize = int(ldbm_mon['nsslapd-db-cache-size-bytes'][0]) + dbpages = int(ldbm_mon['nsslapd-db-pages-in-use'][0]) + + args = FakeArgs() + args.backends = DEFAULT_BENAME + args.indexes = False + args.json = True + lc = LogCapture() + db_monitor(inst, DEFAULT_SUFFIX, lc.log, args) + db_mon_as_str = "".join( ( str(rec) for rec in lc.outputs ) ) + db_mon_as_str = re.sub("^[^{]*{", "{", db_mon_as_str)[:-2] + db_mon = json.loads(db_mon_as_str); + + dbmon_free_percentage = int(10 * float(db_mon['dbcache']['free_percentage'])) + real_free_percentage = int(1000 * ( dbcachesize - dbpages * fs_pagesize ) / dbcachesize) + log.info(f'dbcachesize: {dbcachesize}') + log.info(f'dbpages: {dbpages}') + log.info(f'db_pagesize: {db_pagesize}') + log.info(f'fs_pagesize: {fs_pagesize}') + log.info(f'dbmon_free_percentage: {dbmon_free_percentage}') + log.info(f'real_free_percentage: {real_free_percentage}') + assert real_free_percentage == dbmon_free_percentage + + +if __name__ == '__main__': + # Run isolated + # -s for DEBUG mode + CURRENT_FILE = os.path.realpath(__file__) + pytest.main("-s %s" % CURRENT_FILE) diff --git a/dirsrvtests/tests/suites/clu/dbverify_test.py b/dirsrvtests/tests/suites/clu/dbverify_test.py new file mode 100644 index 0000000..ecad36d --- /dev/null +++ b/dirsrvtests/tests/suites/clu/dbverify_test.py @@ -0,0 +1,77 @@ +# --- BEGIN COPYRIGHT BLOCK --- +# Copyright (C) 2020 Red Hat, Inc. +# All rights reserved. +# +# License: GPL (version 3 or any later version). +# See LICENSE for details. +# --- END COPYRIGHT BLOCK --- +# +import time +import subprocess +import pytest + +from lib389.cli_ctl.dbtasks import dbtasks_verify +from lib389.tasks import * +from lib389.utils import * +from lib389.topologies import topology_st +from lib389.cli_base import FakeArgs + +pytestmark = pytest.mark.tier0 + +LOG_FILE = '/tmp/dbverify.log' +logging.getLogger(__name__).setLevel(logging.DEBUG) +log = logging.getLogger(__name__) + + +@pytest.fixture(scope="function") +def set_log_file(request): + fh = logging.FileHandler(LOG_FILE) + fh.setLevel(logging.DEBUG) + log.addHandler(fh) + + def fin(): + log.info('Delete log file') + os.remove(LOG_FILE) + + request.addfinalizer(fin) + + +@pytest.mark.ds50545 +@pytest.mark.bz1739718 +@pytest.mark.skipif(ds_is_older("1.4.2"), reason="Not implemented") +def test_dsctl_dbverify(topology_st, set_log_file): + """Test dbverify tool, that was ported from legacy tools to dsctl + + :id: 1b22b363-a6e5-4922-ad42-ae80446d69fe + :setup: Standalone instance + :steps: + 1. Create DS instance + 2. Run dbverify + 3. Check if dbverify was successful + :expectedresults: + 1. Success + 2. Success + 3. Success + """ + + standalone = topology_st.standalone + message = 'dbverify successful' + + args = FakeArgs() + args.backend = DEFAULT_BENAME + + log.info('Run dbverify') + standalone.stop() + dbtasks_verify(standalone, log, args) + + log.info('Check dbverify was successful') + with open(LOG_FILE, 'r+') as f: + file_content = f.read() + assert message in file_content + + +if __name__ == '__main__': + # Run isolated + # -s for DEBUG mode + CURRENT_FILE = os.path.realpath(__file__) + pytest.main("-s %s" % CURRENT_FILE) diff --git a/dirsrvtests/tests/suites/clu/dsconf_pta_add_url_test.py b/dirsrvtests/tests/suites/clu/dsconf_pta_add_url_test.py new file mode 100644 index 0000000..e79b42c --- /dev/null +++ b/dirsrvtests/tests/suites/clu/dsconf_pta_add_url_test.py @@ -0,0 +1,49 @@ +# --- BEGIN COPYRIGHT BLOCK --- +# Copyright (C) 2023 Red Hat, Inc. +# All rights reserved. +# +# License: GPL (version 3 or any later version). +# See LICENSE for details. +# --- END COPYRIGHT BLOCK --- +# +import pytest +import os +import logging + +from lib389.topologies import topology_st +from lib389.cli_conf.plugins.ldappassthrough import pta_add +from lib389._constants import DEFAULT_SUFFIX +from lib389.cli_base import FakeArgs +from . import check_value_in_log_and_reset + +logging.getLogger(__name__).setLevel(logging.DEBUG) +log = logging.getLogger(__name__) +new_url = "ldap://localhost:7389/o=redhat" + + +def test_dsconf_add_pta_url(topology_st): + """ Test dsconf add a PTA URL + + :id: 38c7331c-b828-4671-a39f-4f57d1742178 + :setup: Standalone instance + :steps: + 1. Try to add new PTA URL + 2. Check if new PTA URL is added. + :expectedresults: + 1. Success + 2. Success + """ + + args = FakeArgs() + args.URL = new_url + + log.info("Add new URL.") + pta_add(topology_st.standalone, DEFAULT_SUFFIX, topology_st.logcap.log, args) + check_value_in_log_and_reset(topology_st, check_value="Successfully added URL") + + +if __name__ == '__main__': + # Run isolated + # -s for DEBUG mode + CURRENT_FILE = os.path.realpath(__file__) + pytest.main(["-s", CURRENT_FILE]) diff --git a/dirsrvtests/tests/suites/clu/dsconf_tasks_test.py b/dirsrvtests/tests/suites/clu/dsconf_tasks_test.py new file mode 100644 index 0000000..ac1109b --- /dev/null +++ b/dirsrvtests/tests/suites/clu/dsconf_tasks_test.py @@ -0,0 +1,219 @@ +# --- BEGIN COPYRIGHT BLOCK --- +# Copyright (C) 2023 Red Hat, Inc. +# All rights reserved. +# +# License: GPL (version 3 or any later version). +# See LICENSE for details. +# --- END COPYRIGHT BLOCK --- +# +import logging +import pytest +import os +from lib389._constants import DEFAULT_SUFFIX +# from lib389.topologies import topology_m1 as topo +from lib389.topologies import topology_st as topo +from lib389.tasks import (ImportTask, ExportTask, BackupTask, RestoreTask, AutomemberRebuildMembershipTask, + AutomemberAbortRebuildTask, MemberUidFixupTask, MemberOfFixupTask, USNTombstoneCleanupTask, + DBCompactTask, EntryUUIDFixupTask, SchemaReloadTask, SyntaxValidateTask, + FixupLinkedAttributesTask, DBCompactTask) +from lib389.plugins import USNPlugin, POSIXWinsyncPlugin, LinkedAttributesPlugin, AutoMembershipPlugin, MemberOfPlugin +from lib389.dbgen import dbgen_users +from lib389.idm.user import UserAccount +from lib389.idm.group import Groups +from lib389.idm.posixgroup import PosixGroups # not sure if this is need yet MARK + +log = logging.getLogger(__name__) + + +def test_task_timeout(topo): + """All thath te timeoutsetting works for all "tasks" + + :id: 6a6f5176-76bf-424d-bc10-d33bdfa529eb + :setup: Standalone Instance + :steps: + 1. Test timeout for import task + 2. Test timeout for export task + 3. Test timeout for schema validate task + 4. Test timeout for schema reload task + 5. Test timeout for automember rebuild + 6. Test timeout for automember abort + 7. Test timeout for usn cleanup task + 8. Test timeout for posix group fixup task + 9. Test timeout for member UID fixup task + 10. Test timeout for memberof fixup task + 11. Test timeout for entryuuid fixup task + 12. Test timeout for linked attrs fixup task + 13. test timeout for db compact task + :expectedresults: + 1. Task timed out + 2. Task timed out + 3. Task timed out + 4. Task timed out + 5. Task timed out + 6. Task timed out + 7. Task timed out + 8. Task timed out + 9. Task timed out + 10. Task timed out + 11. Task timed out + 12. Task timed out + 13. Task timed out + """ + + #inst = topo.ms['supplier1'] --> this leads to a deadlock when testing MemberOfFixupTask + inst = topo.standalone + + # Enable plugins + plugins = [USNPlugin, POSIXWinsyncPlugin, LinkedAttributesPlugin, AutoMembershipPlugin, MemberOfPlugin] + for plugin in plugins: + plugin(inst).enable() + inst.restart() + + # Test timeout for import task, first create LDIF + import_ldif = inst.ldifdir + '/import_task_timeout.ldif' + dbgen_users(inst, 100000, import_ldif, DEFAULT_SUFFIX, parent="ou=people," + DEFAULT_SUFFIX, generic=True) + + task = ImportTask(inst) + task.import_suffix_from_ldif(ldiffile=import_ldif, suffix=DEFAULT_SUFFIX) + task.wait(timeout=.5, sleep_interval=.5) + assert task.get_exit_code() is None + task.wait(timeout=0) + + # Test timeout for export task + export_ldif = inst.ldifdir + '/export_task_timeout.ldif' + task = ExportTask(inst) + task.export_suffix_to_ldif(export_ldif, DEFAULT_SUFFIX) + task.wait(timeout=.5, sleep_interval=.5) + assert task.get_exit_code() is None + task.wait(timeout=0) + + # Test timeout for schema validate task + task = SyntaxValidateTask(inst).create(properties={ + 'basedn': DEFAULT_SUFFIX, + 'filter': "objectClass=*" + }) + task.wait(timeout=.5, sleep_interval=.5) + assert task.get_exit_code() is None + task.wait(timeout=0) + + # Test timeout for schema reload task (runs too fast) + """ + task = SchemaReloadTask(inst).create(properties={ + 'schemadir': inst.schemadir, + }) + task.wait(timeout=.5, sleep_interval=.5) + assert task.get_exit_code() is None + task.wait(timeout=0) + """ + + # Test timeout for automember rebuild + task = AutomemberRebuildMembershipTask(inst).create(properties={ + 'basedn': DEFAULT_SUFFIX, + 'filter': "objectClass=*" + }) + task.wait(timeout=.5, sleep_interval=.5) + assert task.get_exit_code() is None + task.wait(timeout=0) + + # Test timeout for automember abort (runs too fast) + """ + AutomemberRebuildMembershipTask(inst).create(properties={ + 'basedn': DEFAULT_SUFFIX, + 'filter': "objectClass=*" + }) + task = AutomemberAbortRebuildTask(inst).create() + task.wait(timeout=.5, sleep_interval=.5) + assert task.get_exit_code() is None + task.wait(timeout=0) + """ + + # Test timeout for usn cleanup task, first delete a bunch of users + for idx in range(1, 1001): + entry_idx = str(idx).zfill(6) + dn = f"uid=user{entry_idx},ou=people,{DEFAULT_SUFFIX}" + UserAccount(inst, dn=dn).delete() + task = USNTombstoneCleanupTask(inst).create(properties={ + 'suffix': DEFAULT_SUFFIX, + }) + task.wait(timeout=.5, sleep_interval=.5) + assert task.get_exit_code() is None + task.wait(timeout=0) + + # Test timeout for Posix Group fixup task (runs too fast) + """ + groups = PosixGroups(inst, DEFAULT_SUFFIX) + start_range = 10000 + for idx in range(1, 10): + group_props = { + 'cn': 'test_posix_group_' + str(idx), + 'objectclass': ['posixGroup', 'groupofuniquenames'], + 'gidNumber': str(idx) + } + group = groups.create(properties=group_props) + for user_idx in range(start_range, start_range + 1000): + entry_idx = str(user_idx).zfill(6) + dn = f"uid=user{entry_idx},ou=people,{DEFAULT_SUFFIX}" + group.add('memberuid', dn) + group.add('uniquemember', dn) + start_range += 1000 + + task = MemberUidFixupTask(inst).create(properties={ + 'basedn': DEFAULT_SUFFIX, + 'filter': "objectClass=*" + }) + task.wait(timeout=.5, sleep_interval=.5) + assert task.get_exit_code() is None + task.wait(timeout=0) + """ + + # Test timeout for memberOf fixup task + groups = Groups(inst, DEFAULT_SUFFIX) + group_props = {'cn': 'test_group'} + group = groups.create(properties=group_props) + for idx in range(5000, 6000): + entry_idx = str(idx).zfill(6) + dn = f"uid=user{entry_idx},ou=people,{DEFAULT_SUFFIX}" + group.add_member(dn) + + task = MemberOfFixupTask(inst).create(properties={ + 'basedn': DEFAULT_SUFFIX, + 'filter': "objectClass=*" + }) + task.wait(timeout=.5, sleep_interval=.5) + assert task.get_exit_code() is None + task.wait(timeout=0) + + # Test timeout for entryuuid fixup task + task = EntryUUIDFixupTask(inst).create(properties={ + 'basedn': DEFAULT_SUFFIX, + 'filter': "objectClass=*" + }) + task.wait(timeout=.5, sleep_interval=.5) + assert task.get_exit_code() is None + task.wait(timeout=0) + + # test timeout for linked attrs fixup (runs too fast) + """ + task = FixupLinkedAttributesTask(inst).create(properties={ + 'basedn': DEFAULT_SUFFIX, + 'filter': "objectClass=*" + }) + task.wait(timeout=.5, sleep_interval=.5) + assert task.get_exit_code() is None + task.wait(timeout=0) + """ + + # Test time out for db compact task (runs too fast) + """ + task = DBCompactTask(inst).create() + task.wait(timeout=.5, sleep_interval=.5) + assert task.get_exit_code() is None + task.wait(timeout=0) + """ + + +if __name__ == '__main__': + # Run isolated + # -s for DEBUG mode + CURRENT_FILE = os.path.realpath(__file__) + pytest.main(["-s", CURRENT_FILE]) diff --git a/dirsrvtests/tests/suites/clu/dsconf_test.py b/dirsrvtests/tests/suites/clu/dsconf_test.py new file mode 100644 index 0000000..43d4794 --- /dev/null +++ b/dirsrvtests/tests/suites/clu/dsconf_test.py @@ -0,0 +1,246 @@ +# --- BEGIN COPYRIGHT BLOCK --- +# Copyright (C) 2021 Red Hat, Inc. +# All rights reserved. +# +# License: GPL (version 3 or any later version). +# See LICENSE for details. +# --- END COPYRIGHT BLOCK --- +# +"""Test dsconf CLI with LDAPS""" + +import subprocess +import logging +import os +from lib389.cli_base import LogCapture +import pytest +import ldap +from lib389._constants import DEFAULT_SUFFIX, DN_DM, ReplicaRole +from lib389.topologies import create_topology + + +pytestmark = pytest.mark.tier1 + +log = logging.getLogger(__name__) + + +@pytest.fixture(scope="function") +def enable_config(request, topology_st, config_type): + if config_type == 'ldapfile': + with open('/tmp/ldap_temp.conf', 'w') as f: + f.write("TLS_CACERT /etc/dirsrv/slapd-standalone1/ca.crt\n") + f.close() + + else: + data = ['[localhost]\n', 'tls_cacertdir = /etc/dirsrv/slapd-standalone1\n', + f'uri = {topology_st.standalone.get_ldaps_uri()}\n'] + fd = open(f'{os.environ.get("HOME")}/.dsrc', 'w') + for line in data: + fd.write(line) + fd.close() + + def fin(): + if config_type == 'ldapfile': + os.remove('/tmp/ldap_temp.conf') + else: + os.remove(f'{os.environ.get("HOME")}/.dsrc') + + request.addfinalizer(fin) + + +@pytest.fixture(scope="function") +def topology_st(request): + """Create DS standalone instance""" + + topology = create_topology({ReplicaRole.STANDALONE: 1}) + + topology.logcap = LogCapture() + return topology + +def test_backend_referral(topology_st): + """Test setting and deleting referral in backend + + :id: e65fa6c3-da7c-49f8-be0c-738be46a1180 + :setup: Standalone Instance + :steps: + 1. Set referral using dsconf command + 2. Verify that referral is set correctly + 3. Set nsslapd-state to referral and verify + 4. Test a referral error + 5. Restart the server + 6. Cleanup - delete referral and set nsslapd-state to 'backend' + :expectedresults: + 1. Success + 2. Success + 3. Success + 4. Success + 5. Success + 6. Success + """ + dsconf_cmd= ['/usr/sbin/dsconf', topology_st.standalone.serverid, '-D', DN_DM, '-w', 'password'] + # Set referral + log.info("Use dsconf to set referral") + cmdline = dsconf_cmd + ['backend', 'suffix', 'set', '--add-referral', 'ldap://localhost.localdomain:389/o%3dnetscaperoot', 'userRoot'] + log.info(f'Command used: %{cmdline}') + proc = subprocess.Popen(cmdline, stdout=subprocess.PIPE) + msg = proc.communicate() + log.info('output message : %s' % msg[0]) + assert proc.returncode == 0 + + # Check referral + log.info("Verify referral is set correctly") + cmdline = dsconf_cmd + ['backend', 'suffix', 'get', 'userRoot'] + log.info(f'Command used: %{cmdline}') + proc = subprocess.Popen(cmdline, stdout=subprocess.PIPE) + out, _ = proc.communicate() + log.info('output message : %s' % out[0]) + assert 'referral: ldap://localhost.localdomain:389/o%3dnetscaperoot' in out.decode('utf-8') + + # Set nsslapd-state to referral + log.info("Use dsconf to set nsslapd-state to referral") + cmdline = dsconf_cmd + ['backend', 'suffix', 'set', '--state', 'referral', 'userRoot'] + log.info(f'Command used: %{cmdline}') + proc = subprocess.Popen(cmdline, stdout=subprocess.PIPE) + msg = proc.communicate() + log.info('output message : %s' % msg[0]) + assert proc.returncode == 0 + + # Verify nsslapd-state + log.info("Verify nsslapd-state is set to referral") + cmdline = dsconf_cmd + ['backend', 'suffix', 'get', 'userRoot'] + log.info(f'Command used: %{cmdline}') + proc = subprocess.Popen(cmdline, stdout=subprocess.PIPE) + out, _ = proc.communicate() + log.info('output message : %s' % out[0]) + assert 'state: referral' in out.decode('utf-8') + + # Test a referral error + topology_st.standalone.set_option(ldap.OPT_REFERRALS, 0) # Do not follow referral + with pytest.raises(ldap.REFERRAL): + topology_st.standalone.search_s(DEFAULT_SUFFIX, ldap.SCOPE_SUBTREE, 'objectclass=top') + + # Restart the server + log.info('Restarting the server...') + topology_st.standalone.restart(timeout=10) + + # Cleanup + log.info('Cleaning up...') + cmdline = dsconf_cmd + ['backend', 'suffix', 'set', '--state', 'backend', 'userRoot'] + log.info(f'Command used: %{cmdline}') + proc = subprocess.Popen(cmdline, stdout=subprocess.PIPE) + out, _ = proc.communicate() + log.info('output message : %s' % out[0]) + assert proc.returncode == 0 + + cmdline = dsconf_cmd + ['backend', 'suffix', 'set', '--del-referral', 'ldap://localhost.localdomain:389/o%3dnetscaperoot', 'userRoot'] + log.info(f'Command used: %{cmdline}') + proc = subprocess.Popen(cmdline, stdout=subprocess.PIPE) + out, _ = proc.communicate() + log.info('output message : %s' % out[0]) + assert proc.returncode == 0 + topology_st.standalone.set_option(ldap.OPT_REFERRALS, 1) + + +@pytest.mark.parametrize('config_type', ('ldapfile','dsrfile')) +def test_dsconf_with_ldaps(topology_st, enable_config, config_type): + """Test dsconf CLI with LDAPS + + :id: 5288a288-60f0-4e81-a44b-d2ee2611ca86 + :parametrized: yes + :customerscenario: True + :setup: Standalone Instance + :steps: + 1. Enable TLS + 2. Set only ldap.conf + 3. Verify dsconf command is working correctly on ldaps + 4. Set only ~/.dsrc file + 5. Verify dsconf command is working correctly on ldaps + :expectedresults: + 1. Success + 2. Success + 3. Success + 4. Success + 5. Success + """ + log.info("Enable TLS") + topology_st.standalone.enable_tls() + if config_type == 'ldapfile': + log.info("Use dsconf to list certificate") + cmdline=['/usr/sbin/dsconf', topology_st.standalone.get_ldaps_uri(), '-D', + DN_DM, '-w', 'password', 'security', 'certificate', 'list'] + log.info(f'Command used : %{cmdline}') + proc = subprocess.Popen(cmdline, stdout=subprocess.PIPE, env={'LDAPCONF': '/tmp/ldap_temp.conf'}) + else: + log.info("Use dsconf to list certificate") + cmdline=['/usr/sbin/dsconf','standalone1', '-D', DN_DM, '-w', 'password', + 'security', 'certificate', 'list'] + log.info(f'Command used : %{cmdline}') + proc = subprocess.Popen(cmdline, stdout=subprocess.PIPE) + + msg = proc.communicate() + log.info(f'output message : {msg[0]}') + assert proc.returncode == 0 + + +@pytest.mark.parametrize('instance_role', ('consumer', 'hub')) +def test_check_replica_id_rejected_hub_consumer(instance_role): + """Test dsconf CLI does not accept replica-id parameter for comsumer and hubs + + :id: 274b47f8-111a-11ee-8321-98fa9ba19b65 + :parametrized: yes + :customerscenario: True + :setup: Create DS instance + :steps: + 1. Create ldap instance + 2. Use dsconf cli to create replica and specify replica id for a consumer + 3. Verify dsconf command rejects replica_id for consumers + 4. Repeat for a hub use dsconf to create a replica w replica id + 5. Verify dsconf command rejects replica_id for hubs + :expectedresults: + 1. Success + 2. Success + 3. Setting the "replica-id" manually for consumers not allowed. + 4. Success + 5. Setting the "replica-id" manually for hubs is not allowed. + """ + print("DN_DM {}".format(DN_DM)) + cmdline = ['/usr/sbin/dsconf', 'standalone1', '-D', DN_DM, '-w', 'password', 'replication', 'enable', '--suffix', DEFAULT_SUFFIX, '--role', instance_role, '--replica-id=1'] + log.info(f'Command used : {cmdline}') + proc = subprocess.Popen(cmdline, stdout=subprocess.PIPE) + + msg = proc.communicate() + msg = msg[0].decode('utf-8') + log.info(f'output message : {msg}') + assert "Replication successfully enabled for" not in msg, f"Test Failed: --replica-id option is accepted....It shouldn't for {instance_role}" + log.info(f"Test PASSED: --replica-id option is NOT accepted for {instance_role}.") + + +@pytest.mark.parametrize('instance_role, replica_id', + [('consumer', None), ('hub', None), ('consumer', "65535"), ('hub', "65535")]) +def test_check_replica_id_accepted_hub_consumer(topology_st, instance_role, replica_id): + """Test dsconf CLI accepts 65535 replica-id parameter for comsumer and hubs + + :id: e0a1a1e6-11c1-40e6-92fe-cb550fb2170d + :parametrized: yes + :customerscenario: True + :setup: Create DS instance + :steps: + 1. Create ldap instance + 2. Use dsconf cli to create replica and don't specify replica id for a consumer or hub + 3. Use dsconf cli to create replica and specify replica id for a consumer or hub + :expectedresults: + 1. Success + 2. Success + 3. Success + """ + print("DN_DM {}".format(DN_DM)) + cmdline = ['/usr/sbin/dsconf', 'standalone1', '-D', DN_DM, '-w', 'password', 'replication', 'enable', '--suffix', DEFAULT_SUFFIX, '--role', instance_role] + if replica_id is not None: + cmdline.append(f'--replica-id={replica_id}') + log.info(f'Command used : {cmdline}') + proc = subprocess.Popen(cmdline, stdout=subprocess.PIPE) + + msg = proc.communicate() + msg = msg[0].decode('utf-8') + log.info(f'output message : {msg}') + assert "Replication successfully enabled for" in msg + log.info(f"Test PASSED: --replica-id option is accepted for {instance_role}.") diff --git a/dirsrvtests/tests/suites/clu/dsctl_acceptance_test.py b/dirsrvtests/tests/suites/clu/dsctl_acceptance_test.py new file mode 100644 index 0000000..7968330 --- /dev/null +++ b/dirsrvtests/tests/suites/clu/dsctl_acceptance_test.py @@ -0,0 +1,63 @@ +# --- BEGIN COPYRIGHT BLOCK --- +# Copyright (C) 2021 Red Hat, Inc. +# All rights reserved. +# +# License: GPL (version 3 or any later version). +# See LICENSE for details. +# --- END COPYRIGHT BLOCK --- + +import logging +import pytest +import os +import time +from lib389.topologies import topology_st as topo + +log = logging.getLogger(__name__) + +#unstable or unstatus tests, skipped for now +@pytest.mark.flaky(max_runs=2, min_passes=1) +def test_custom_path(topo): + """Test that a custom path, backup directory, is correctly used by lib389 + when the server is stopped. + + :id: 8659e209-ee83-477e-8183-1d2f555669ea + :setup: Standalone Instance + :steps: + 1. Get the LDIF directory + 2. Change the server's backup directory to the LDIF directory + 3. Stop the server, and perform a backup + 4. Backup was written to LDIF directory + :expectedresults: + 1. Success + 2. Success + 3. Success + 4. Success + """ + + # Get LDIF dir + ldif_dir = topo.standalone.get_ldif_dir() + bak_dir = topo.standalone.get_bak_dir() + log.info("ldif dir: " + ldif_dir + " items: " + str(len(os.listdir(ldif_dir)))) + log.info("bak dir: " + bak_dir + " items: " + str(len(os.listdir(bak_dir)))) + + # Set backup directory to LDIF directory + topo.standalone.config.replace('nsslapd-bakdir', ldif_dir) + time.sleep(.5) + + # Stop the server and take a backup + topo.standalone.stop() + time.sleep(.5) + topo.standalone.db2bak(None) # Bug, bak dir is being pulled from defaults.inf, and not from config + + # Verify backup was written to LDIF directory + log.info("AFTER: ldif dir (new bak dir): " + ldif_dir + " items: " + str(len(os.listdir(ldif_dir)))) + log.info("AFTER: bak dir: " + bak_dir + " items: " + str(len(os.listdir(bak_dir)))) + + assert len(os.listdir(ldif_dir)) + + +if __name__ == '__main__': + # Run isolated + # -s for DEBUG mode + CURRENT_FILE = os.path.realpath(__file__) + pytest.main(["-s", CURRENT_FILE]) diff --git a/dirsrvtests/tests/suites/clu/dsctl_dblib_test.py b/dirsrvtests/tests/suites/clu/dsctl_dblib_test.py new file mode 100644 index 0000000..44316d2 --- /dev/null +++ b/dirsrvtests/tests/suites/clu/dsctl_dblib_test.py @@ -0,0 +1,119 @@ +# --- BEGIN COPYRIGHT BLOCK --- +# Copyright (C) 2021 Red Hat, Inc. +# All rights reserved. +# +# License: GPL (version 3 or any later version). +# See LICENSE for details. +# --- END COPYRIGHT BLOCK --- + +import logging +import pytest +import ldap +import os +from lib389._constants import DEFAULT_SUFFIX +from lib389.backend import DatabaseConfig +from lib389.cli_ctl.dblib import (FakeArgs, dblib_bdb2mdb, dblib_mdb2bdb, dblib_cleanup) +from lib389.idm.user import UserAccounts +from lib389.replica import ReplicationManager +from lib389.topologies import topology_m2 as topo_m2 + + +log = logging.getLogger(__name__) + + +@pytest.fixture +def init_user(topo_m2, request): + """Initialize a user - Delete and re-add test user + """ + s1 = topo_m2.ms["supplier1"] + users = UserAccounts(s1, DEFAULT_SUFFIX) + try: + user_data = {'uid': 'test entry', + 'cn': 'test entry', + 'sn': 'test entry', + 'uidNumber': '3000', + 'gidNumber': '4000', + 'homeDirectory': '/home/test_entry', + 'userPassword': 'foo'} + test_user = users.create(properties=user_data) + except ldap.ALREADY_EXISTS: + pass + except ldap.SERVER_DOWN: + pass + + def fin(): + try: + test_user.delete() + except (ldap.NO_SUCH_OBJECT, ldap.SERVER_DOWN): + pass + + request.addfinalizer(fin) + + +def _check_db(inst, log, impl): + users = UserAccounts(inst, DEFAULT_SUFFIX) + # Cannot use inst..get_db_lib() because it caches the value + assert DatabaseConfig(inst).get_db_lib() == impl + assert users.get('test entry') + + db_files = os.listdir(inst.dbdir) + if inst.ds_paths.db_home_dir is not None and inst.ds_paths.db_home_dir != inst.dbdir: + db_files.extend(os.listdir(inst.ds_paths.db_home_dir)) + mdb_list = ['data.mdb', 'INFO.mdb', 'lock.mdb'] + bdb_list = ['__db.001', 'DBVERSION', '__db.003', 'userRoot', 'log.0000000001', '__db.002'] + mdb_list.sort() + bdb_list.sort() + db_files = sorted(set(db_files)) + log.debug(f"INFO: _check_db db_home={inst.ds_paths.db_home_dir}") + log.debug(f"INFO: _check_db dbdir={inst.dbdir}") + log.debug(f"INFO: _check_db db_files={db_files}") + log.debug(f"INFO: _check_db mdb_list={mdb_list}") + log.debug(f"INFO: _check_db bdb_list={bdb_list}") + if impl == 'bdb': + assert db_files == bdb_list + assert db_files != mdb_list + else: + assert db_files != bdb_list + assert db_files == mdb_list + + +def test_dblib_migration(topo_m2, init_user): + """ + Verify dsctl dblib xxxxxxx sub commands (migration between bdb and lmdb) + + :id: 5d327c34-e77a-46e5-a8aa-0a552f9bbdef + :setup: Two suppliers Instance + :steps: + 1. Determine current database + 2. Switch to the other database + 3. Check that replication works + :expectedresults: + 1. Success + 2. Success + 3. Success + """ + s1 = topo_m2.ms["supplier1"] + s2 = topo_m2.ms["supplier2"] + db_lib = s1.get_db_lib() + repl = ReplicationManager(DEFAULT_SUFFIX) + users = UserAccounts(s1, DEFAULT_SUFFIX) + assert users.get('test entry') + args = FakeArgs({'tmpdir': None}) + if db_lib == 'bdb': + dblib_bdb2mdb(s1, log, args) + dblib_cleanup(s1, log, args) + _check_db(s1, log, 'mdb') + repl.test_replication_topology([s1, s2]) + dblib_mdb2bdb(s1, log, args) + dblib_cleanup(s1, log, args) + _check_db(s1, log, 'bdb') + repl.test_replication_topology([s1, s2]) + else: + dblib_mdb2bdb(s1, log, args) + dblib_cleanup(s1, log, args) + _check_db(s1, log, 'bdb') + repl.test_replication_topology([s1, s2]) + dblib_bdb2mdb(s1, log, args) + dblib_cleanup(s1, log, args) + _check_db(s1, log, 'mdb') + repl.test_replication_topology([s1, s2]) diff --git a/dirsrvtests/tests/suites/clu/dsctl_tls_test.py b/dirsrvtests/tests/suites/clu/dsctl_tls_test.py new file mode 100644 index 0000000..22360fa --- /dev/null +++ b/dirsrvtests/tests/suites/clu/dsctl_tls_test.py @@ -0,0 +1,92 @@ +# --- BEGIN COPYRIGHT BLOCK --- +# Copyright (C) 2022 Red Hat, Inc. +# All rights reserved. +# +# License: GPL (version 3 or any later version). +# See LICENSE for details. +# --- END COPYRIGHT BLOCK --- +# +import logging +import pytest +import ssl +import os +from lib389.topologies import topology_st as topo +from lib389.nss_ssl import NssSsl + +log = logging.getLogger(__name__) + + +def test_tls_command_returns_error_text(topo): + """CLI commands that called certutil should return the error text from + certutil when something goes wrong, and not the system error code number. + + :id: 7f0c28d0-6e13-4ca4-bec2-4586d56b73f6 + :setup: Standalone Instance + :steps: + 1. Issue invalid "generate key and cert" command, and error text is returned + 2. Issue invalid "delete cert" command, and error text is returned + 3. Issue invalid "import ca cert" command, and error text is returned + 4. Issue invalid "import server cert" command, and error text is returned + 5. Issue invalid "import key and server cert" command, and error text is returned + :expectedresults: + 1. Success + 2. Success + 3. Success + 4. Success + 5. Success + """ + + # dsctl localhost tls generate-server-cert-csr -s "bad" + tls = NssSsl(dirsrv=topo.standalone) + try: + tls.create_rsa_key_and_csr([], "bad") + assert False + except ValueError as e: + assert '255' not in str(e) + assert 'improperly formatted name' in str(e) + + # dsctl localhost tls remove-cert + try: + tls.del_cert("bad") + assert False + except ValueError as e: + assert '255' not in str(e) + assert 'could not find certificate named' in str(e) + + # dsctl localhost tls import-ca + try: + invalid_file = topo.standalone.confdir + '/dse.ldif' + tls.add_cert(nickname="bad", input_file=invalid_file) + assert False + except ValueError as e: + assert '255' not in str(e) + assert 'Unable to load PEM file' in str(e) + + # dsctl localhost tls import-server-cert + try: + invalid_file = topo.standalone.confdir + '/dse.ldif' + tls.import_rsa_crt(crt=invalid_file) + assert False + except ValueError as e: + assert '255' not in str(e) + assert 'error converting ascii to binary' in str(e) + + # dsctl localhost tls import-server-key-cert + try: + invalid_file = topo.standalone.confdir + '/dse.ldif' + tls.add_server_key_and_cert(invalid_file, invalid_file) + assert False + except ValueError as e: + assert '255' not in str(e) + if 'OpenSSL 3' in ssl.OPENSSL_VERSION: + assert 'Could not read private key from' in str(e) + else: + assert 'unable to load private key' in str(e) + + +if __name__ == '__main__': + # Run isolated + # -s for DEBUG mode + CURRENT_FILE = os.path.realpath(__file__) + pytest.main(["-s", CURRENT_FILE]) + diff --git a/dirsrvtests/tests/suites/clu/dsidm_account_test.py b/dirsrvtests/tests/suites/clu/dsidm_account_test.py new file mode 100644 index 0000000..7af0758 --- /dev/null +++ b/dirsrvtests/tests/suites/clu/dsidm_account_test.py @@ -0,0 +1,129 @@ +# --- BEGIN COPYRIGHT BLOCK --- +# Copyright (C) 2023 Red Hat, Inc. +# All rights reserved. +# +# License: GPL (version 3 or any later version). +# See LICENSE for details. +# --- END COPYRIGHT BLOCK --- +# +import ldap +import time +import subprocess +import pytest +import logging +import os + +from lib389 import DEFAULT_SUFFIX +from lib389.cli_idm.account import list, get_dn, lock, unlock, delete, modify, rename, entry_status, \ + subtree_status, reset_password, change_password +from lib389.topologies import topology_st +from lib389.cli_base import FakeArgs +from lib389.utils import ds_is_older +from lib389.idm.user import nsUserAccounts +from . import check_value_in_log_and_reset + +pytestmark = pytest.mark.tier0 + +logging.getLogger(__name__).setLevel(logging.DEBUG) +log = logging.getLogger(__name__) + + +@pytest.fixture(scope="function") +def create_test_user(topology_st, request): + log.info('Create test user') + users = nsUserAccounts(topology_st.standalone, DEFAULT_SUFFIX) + test_user = users.create_test_user() + + def fin(): + log.info('Delete test user') + if test_user.exists(): + test_user.delete() + + request.addfinalizer(fin) + + +@pytest.mark.bz1862971 +@pytest.mark.ds4281 +@pytest.mark.skipif(ds_is_older("1.4.2"), reason="Not implemented") +def test_dsidm_account_entry_status_with_lock(topology_st, create_test_user): + """ Test dsidm account entry-status option with account lock/unlock + + :id: d911bbf2-3a65-42a4-ad76-df1114caa396 + :setup: Standalone instance + :steps: + 1. Create user account + 2. Run dsidm account entry status + 3. Run dsidm account lock + 4. Run dsidm account subtree status + 5. Run dsidm account entry status + 6. Run dsidm account unlock + 7. Run dsidm account subtree status + 8. Run dsidm account entry status + :expectedresults: + 1. Success + 2. The state message should be Entry State: activated + 3. Success + 4. The state message should be Entry State: directly locked through nsAccountLock + 5. Success + 6. The state message should be Entry State: activated + 7. Success + 8. The state message should be Entry State: activated + """ + + standalone = topology_st.standalone + users = nsUserAccounts(standalone, DEFAULT_SUFFIX) + test_user = users.get('test_user_1000') + + entry_list = ['Entry DN: {}'.format(test_user.dn), + 'Entry Creation Date', + 'Entry Modification Date'] + + state_lock = 'Entry State: directly locked through nsAccountLock' + state_unlock= 'Entry State: activated' + + lock_msg = 'Entry {} is locked'.format(test_user.dn) + unlock_msg = 'Entry {} is unlocked'.format(test_user.dn) + + args = FakeArgs() + args.dn = test_user.dn + args.json = False + args.basedn = DEFAULT_SUFFIX + args.scope = ldap.SCOPE_SUBTREE + args.filter = "(uid=*)" + args.become_inactive_on = False + args.inactive_only = False + args.json = False + + log.info('Test dsidm account entry-status') + entry_status(standalone, DEFAULT_SUFFIX, topology_st.logcap.log, args) + check_value_in_log_and_reset(topology_st, content_list=entry_list, check_value=state_unlock) + + log.info('Test dsidm account lock') + lock(standalone, DEFAULT_SUFFIX, topology_st.logcap.log, args) + check_value_in_log_and_reset(topology_st, check_value=lock_msg) + + log.info('Test dsidm account subtree-status with locked account') + subtree_status(standalone, DEFAULT_SUFFIX, topology_st.logcap.log, args) + check_value_in_log_and_reset(topology_st, content_list=entry_list, check_value=state_lock) + + log.info('Test dsidm account entry-status with locked account') + entry_status(standalone, DEFAULT_SUFFIX, topology_st.logcap.log, args) + check_value_in_log_and_reset(topology_st, content_list=entry_list, check_value=state_lock) + + log.info('Test dsidm account unlock') + unlock(standalone, DEFAULT_SUFFIX, topology_st.logcap.log, args) + check_value_in_log_and_reset(topology_st, check_value=unlock_msg) + + log.info('Test dsidm account subtree-status with unlocked account') + subtree_status(standalone, DEFAULT_SUFFIX, topology_st.logcap.log, args) + check_value_in_log_and_reset(topology_st, content_list=entry_list, check_value=state_unlock) + + log.info('Test dsidm account entry-status with unlocked account') + entry_status(standalone, DEFAULT_SUFFIX, topology_st.logcap.log, args) + check_value_in_log_and_reset(topology_st, content_list=entry_list, check_value=state_unlock) + +if __name__ == '__main__': + # Run isolated + # -s for DEBUG mode + CURRENT_FILE = os.path.realpath(__file__) + pytest.main("-s %s" % CURRENT_FILE) diff --git a/dirsrvtests/tests/suites/clu/dsidm_bulk_update_test.py b/dirsrvtests/tests/suites/clu/dsidm_bulk_update_test.py new file mode 100644 index 0000000..7d63665 --- /dev/null +++ b/dirsrvtests/tests/suites/clu/dsidm_bulk_update_test.py @@ -0,0 +1,92 @@ +# --- BEGIN COPYRIGHT BLOCK --- +# Copyright (C) 2023 Red Hat, Inc. +# All rights reserved. +# +# License: GPL (version 3 or any later version). +# See LICENSE for details. +# --- END COPYRIGHT BLOCK --- +# +import ldap +import logging +import pytest +import os +from lib389._constants import DEFAULT_SUFFIX +from lib389.topologies import topology_st as topo +from lib389.cli_base import FakeArgs +from lib389.cli_idm.account import bulk_update +from lib389.idm.user import UserAccounts + +logging.getLogger(__name__).setLevel(logging.DEBUG) +log = logging.getLogger(__name__) + + +@pytest.fixture(scope="function") +def create_test_users(topo): + users = UserAccounts(topo.standalone, DEFAULT_SUFFIX) + u_range = list(range(5)) + for idx in u_range: + try: + users.create(properties={ + 'uid': f'testuser{idx}', + 'cn': f'testuser{idx}', + 'sn': f'user{idx}', + 'uidNumber': f'{1000 + idx}', + 'gidNumber': f'{1000 + idx}', + 'homeDirectory': f'/home/testuser{idx}' + }) + except ldap.ALREADY_EXISTS: + pass + + +def test_bulk_operations(topo, create_test_users): + """Testing adding, replacing, an removing attribute/values to a bulk set + of users + + :id: c89ff057-2f44-4070-8d42-850257025b2b + :setup: Standalone Instance + :steps: + 1. Bulk add attribute + 2. Bulk replace attribute + 3. Bulk delete attribute + :expectedresults: + 1. Success + 2. Success + 3. Success + """ + inst = topo.standalone + + args = FakeArgs() + args.json = False + args.basedn = DEFAULT_SUFFIX + args.scope = ldap.SCOPE_SUBTREE + args.filter = "(uid=*)" + args.stop = False + args.changes = [] + + # Test ADD + args.changes = ['add:objectclass:extensibleObject'] + bulk_update(inst, DEFAULT_SUFFIX, log, args) + users = UserAccounts(inst, DEFAULT_SUFFIX).list() + for user in users: + assert 'extensibleobject' in user.get_attr_vals_utf8_l('objectclass') + + # Test REPLACE + args.changes = ['replace:cn:hello_new_cn'] + bulk_update(inst, DEFAULT_SUFFIX, log, args) + users = UserAccounts(inst, DEFAULT_SUFFIX).list() + for user in users: + assert user.get_attr_val_utf8_l('cn') == "hello_new_cn" + + # Test DELETE + args.changes = ['delete:objectclass:extensibleObject'] + bulk_update(inst, DEFAULT_SUFFIX, log, args) + users = UserAccounts(inst, DEFAULT_SUFFIX).list() + for user in users: + assert 'extensibleobject' not in user.get_attr_vals_utf8_l('objectclass') + + +if __name__ == '__main__': + # Run isolated + # -s for DEBUG mode + CURRENT_FILE = os.path.realpath(__file__) + pytest.main(["-s", CURRENT_FILE]) diff --git a/dirsrvtests/tests/suites/clu/dsidm_config_test.py b/dirsrvtests/tests/suites/clu/dsidm_config_test.py new file mode 100644 index 0000000..1bf5a34 --- /dev/null +++ b/dirsrvtests/tests/suites/clu/dsidm_config_test.py @@ -0,0 +1,218 @@ +# --- BEGIN COPYRIGHT BLOCK --- +# Copyright (C) 2020 Red Hat, Inc. +# All rights reserved. +# +# License: GPL (version 3 or any later version). +# See LICENSE for details. +# --- END COPYRIGHT BLOCK --- +# +import time +import pytest +import logging +import os + +from lib389 import DEFAULT_SUFFIX +from lib389.cli_idm.client_config import sssd_conf, ldap_conf, display +from lib389.plugins import MemberOfPlugin +from lib389.topologies import topology_st +from lib389.cli_base import FakeArgs +from lib389.idm.group import Groups +from lib389.idm.user import nsUserAccounts +from lib389.utils import ds_is_older + +pytestmark = pytest.mark.tier0 + +LOG_FILE = '/tmp/dsidm.log' +logging.getLogger(__name__).setLevel(logging.DEBUG) +log = logging.getLogger(__name__) + + +@pytest.fixture(scope="function") +def set_log_file(request): + fh = logging.FileHandler(LOG_FILE) + fh.setLevel(logging.DEBUG) + log.addHandler(fh) + + def fin(): + log.info('Delete log file') + os.remove(LOG_FILE) + + request.addfinalizer(fin) + + +def check_value_in_log_and_reset(content_list, content_list2=None, check_value=None): + with open(LOG_FILE, 'r+') as f: + file_content = f.read() + if content_list2 is not None: + log.info('Check if content is present in output') + for item in content_list + content_list2: + assert item.lower() in file_content.lower() + else: + log.info('Check if content is present in output') + for item in content_list: + assert item.lower() in file_content.lower() + + if check_value is not None: + log.info('Check if value is present in output') + assert check_value in file_content + + log.info('Reset log file for next test') + f.truncate(0) + + +@pytest.mark.skipif(ds_is_older("1.4.2"), reason="Not implemented") +def test_dsidm_config_sssd(topology_st, set_log_file): + """ Test dsidm creation of sssd.conf content + + :id: 77812ba6-b133-40f4-91a7-13309618f24d + :setup: Standalone instance + :steps: + 1. Run dsidm client_config sssd.conf + 2. Enable MemberOfPlugin + 3. Run dsidm client_config sssd.conf with allowed group + :expectedresults: + 1. Success + 2. Success + 3. Success + """ + + standalone = topology_st.standalone + + sssd_content_list = ['Generated by 389 Directory Server - dsidm', + 'id_provider = ldap', + 'auth_provider = ldap', + 'access_provider = ldap', + 'chpass_provider = ldap', + 'ldap_search_base = ' + DEFAULT_SUFFIX, + 'ldap_uri = ' + standalone.ldapuri, + 'ldap_user_member_of = memberof', + 'ignore_group_members = False', + '[sssd]', + 'services = nss, pam, ssh, sudo', + 'config_file_version = 2', + 'domains = ldap', + '[nss]', + 'homedir_substring = /home'] + + schema = 'ldap_schema = rfc2307' + args = FakeArgs() + args.allowed_group = None + + log.info('Create sssd.conf content') + sssd_conf(standalone, DEFAULT_SUFFIX, log, args) + + log.info('Check if config creation was successful') + check_value_in_log_and_reset(sssd_content_list, check_value=schema) + + log.info('Now we test allowed_group argument') + log.info('Enable MemberOf plugin') + plugin = MemberOfPlugin(standalone) + plugin.enable() + standalone.restart() + + log.info('Create test group') + groups = Groups(standalone, DEFAULT_SUFFIX) + test_group = groups.create(properties={"cn": "new_group", + "description": "testgroup"}) + + log.info('Create sssd.conf content with allowed group') + filter_msg = ['ldap_access_filter = (memberOf={})'.format(test_group.dn), 'ldap_schema = rfc2307bis'] + args.allowed_group = test_group.rdn + sssd_conf(standalone, DEFAULT_SUFFIX, log, args) + + log.info('Check if config creation was successful') + check_value_in_log_and_reset(sssd_content_list, filter_msg) + + +@pytest.mark.skipif(ds_is_older("1.4.2"), reason="Not implemented") +def test_dsidm_config_ldap(topology_st, set_log_file): + """ Test dsidm creation of ldap.conf content + + :id: 29ffcc91-9104-4c90-bcdf-0f6a4082322c + :setup: Standalone instance + :steps: + 1. Create instance + 2. Run dsidm client_config ldap.conf + :expectedresults: + 1. Success + 2. Success + """ + + standalone = topology_st.standalone + args = FakeArgs() + + ldap_content_list = ['OpenLDAP client configuration', + 'Generated by 389 Directory Server - dsidm', + 'BASE ' + DEFAULT_SUFFIX, + 'URI ' + standalone.ldapuri, + 'DEREF never', + 'TLS_CACERTDIR /etc/openldap/certs'] + + log.info('Create ldap.conf content') + ldap_conf(standalone, DEFAULT_SUFFIX, log, args) + + log.info('Check if config creation was successful') + check_value_in_log_and_reset(ldap_content_list) + + +@pytest.mark.skipif(ds_is_older("1.4.2"), reason="Not implemented") +def test_dsidm_config_display(topology_st, set_log_file): + """ Test dsidm display option + + :id: 6e888ae2-8835-44d5-846b-e971d76aa461 + :setup: Standalone instance + :steps: + 1. Run dsidm client_config display + 2. Enable MemberOfPlugin + 3. Run dsidm client_config display with MemberOfPlugin + :expectedresults: + 1. Success + 2. Success + 3. Success + """ + + standalone = topology_st.standalone + users = nsUserAccounts(standalone, DEFAULT_SUFFIX) + groups = Groups(standalone, DEFAULT_SUFFIX) + + display_content_list = ['ldap_uri = ' + standalone.ldapuri, + 'ldap_uri = ldaps:///dc%3Dexample%2Cdc%3Dcom', + 'group_basedn = ' + groups._basedn, + 'basedn = ' + DEFAULT_SUFFIX, + 'user_basedn = ' + users._basedn, + 'user_filter = (&(objectclass=nsPerson)(objectclass=nsAccount)(objectclass=nsOrgPerson)' + '(objectclass=posixAccount))', + 'unique id = nsUniqueId', + 'group member attribute = member', + 'user rdn = uid', + 'user identifier = uid', + 'group_filter = (&(objectclass=groupOfNames))', + 'group rdn = cn'] + + schema_type = 'rfc2307' + args = FakeArgs() + + log.info('Test dsidm display option') + display(standalone, DEFAULT_SUFFIX, log, args) + + log.info('Check if display option was successful') + check_value_in_log_and_reset(display_content_list, check_value=schema_type) + + log.info('Enable MemberOf plugin') + plugin = MemberOfPlugin(standalone) + plugin.enable() + standalone.restart() + + log.info('Test dsidm display option with MemberOf plugin') + display(standalone, DEFAULT_SUFFIX, log, args) + + log.info('Check if display option was successful with MemberOf plugin enabled') + schema_type = 'rfc2307bis' + check_value_in_log_and_reset(display_content_list, check_value=schema_type) + + +if __name__ == '__main__': + # Run isolated + # -s for DEBUG mode + CURRENT_FILE = os.path.realpath(__file__) + pytest.main("-s %s" % CURRENT_FILE) diff --git a/dirsrvtests/tests/suites/clu/dsidm_init_test.py b/dirsrvtests/tests/suites/clu/dsidm_init_test.py new file mode 100644 index 0000000..a905d11 --- /dev/null +++ b/dirsrvtests/tests/suites/clu/dsidm_init_test.py @@ -0,0 +1,86 @@ +# --- BEGIN COPYRIGHT BLOCK --- +# Copyright (C) 2022 Red Hat, Inc. +# All rights reserved. +# +# License: GPL (version 3 or any later version). +# See LICENSE for details. +# --- END COPYRIGHT BLOCK --- +# + +import pytest + +from lib389.utils import * +from lib389.topologies import topology_no_sample +from lib389._constants import * +from lib389.cli_base import FakeArgs +from lib389.cli_idm.initialise import initialise +from lib389.cli_base import FakeArgs +from lib389.idm.user import nsUserAccounts +from lib389.idm.group import Groups +from lib389.idm.organizationalunit import OrganizationalUnits + + +pytestmark = pytest.mark.tier0 + + +DEBUGGING = os.getenv("DEBUGGING", default=False) +if DEBUGGING: + logging.getLogger(__name__).setLevel(logging.DEBUG) +else: + logging.getLogger(__name__).setLevel(logging.INFO) +log = logging.getLogger(__name__) + + +@pytest.mark.ds4281 +@pytest.mark.skipif(ds_is_older("1.4.2"), reason="Not implemented") +def test_dsidm_initialise(topology_no_sample): + """Check that keep alive entries are created when initializing a master from another one + + :id: eefb59fc-4fdd-4d68-b6c4-b067eb52e881 + :setup: Standalone instance + :steps: + 1. Create instance without sample entries + 2. Check there are no sample entries + 3. Run dsidm initialise + 4. Check there are sample entries created + 5. Run dsidm initialise again + :expectedresults: + 1. Success + 2. Success + 3. Success + 4. Success + 5. Exception should be raised that entries already exist + """ + + standalone = topology_no_sample.standalone + + log.info('Check there are no sample entries before running initialise') + users = nsUserAccounts(standalone, DEFAULT_SUFFIX) + groups = Groups(standalone, DEFAULT_SUFFIX) + ous = OrganizationalUnits(standalone, DEFAULT_SUFFIX) + assert not users.exists('demo_user') + assert not groups.exists('demo_group') + assert not ous.exists('Permissions') + assert not ous.exists('Services') + + log.info('Run dsidm initialise') + args = FakeArgs() + args.version = INSTALL_LATEST_CONFIG + initialise(standalone, DEFAULT_SUFFIX, topology_no_sample.logcap.log, args) + + log.info('Check there are sample entries') + assert users.exists('demo_user') + assert groups.exists('demo_group') + assert ous.exists('Permissions') + assert ous.exists('Services') + + log.info('Try to initialise again and exception should be raised') + with pytest.raises(ldap.ALREADY_EXISTS): + initialise(standalone, DEFAULT_SUFFIX, topology_no_sample.logcap.log, args) + + +if __name__ == '__main__': + # Run isolated + # -s for DEBUG mode + CURRENT_FILE = os.path.realpath(__file__) + pytest.main("-s %s" % CURRENT_FILE) diff --git a/dirsrvtests/tests/suites/clu/dsidm_organizational_unit_test.py b/dirsrvtests/tests/suites/clu/dsidm_organizational_unit_test.py new file mode 100644 index 0000000..e0b08ab --- /dev/null +++ b/dirsrvtests/tests/suites/clu/dsidm_organizational_unit_test.py @@ -0,0 +1,84 @@ +# --- BEGIN COPYRIGHT BLOCK --- +# Copyright (C) 2020 Red Hat, Inc. +# All rights reserved. +# +# License: GPL (version 3 or any later version). +# See LICENSE for details. +# --- END COPYRIGHT BLOCK --- +# +import time +import subprocess +import pytest +import logging +import os + +from lib389 import DEFAULT_SUFFIX +from lib389.cli_idm.organizationalunit import get, get_dn, create, modify, delete, list, rename +from lib389.topologies import topology_st +from lib389.cli_base import FakeArgs +from lib389.utils import ds_is_older +from lib389.idm.organizationalunit import OrganizationalUnits +from . import check_value_in_log_and_reset + +pytestmark = pytest.mark.tier0 + +logging.getLogger(__name__).setLevel(logging.DEBUG) +log = logging.getLogger(__name__) + + +@pytest.fixture(scope="function") +def create_test_ou(topology_st, request): + log.info('Create organizational unit') + ous = OrganizationalUnits(topology_st.standalone, DEFAULT_SUFFIX) + test_ou = ous.create(properties={ + 'ou': 'toDelete', + 'description': 'Test OU', + }) + + def fin(): + log.info('Delete organizational unit') + if test_ou.exists(): + test_ou.delete() + + request.addfinalizer(fin) + + +@pytest.mark.bz1866294 +@pytest.mark.ds4284 +@pytest.mark.skipif(ds_is_older("1.4.2"), reason="Not implemented") +@pytest.mark.xfail(ds_is_older("1.4.3.16"), reason="Might fail because of bz1866294") +def test_dsidm_organizational_unit_delete(topology_st, create_test_ou): + """ Test dsidm organizationalunit delete + + :id: 5d35a5ee-85c2-4b83-9101-938ba7732ccd + :customerscenario: True + :setup: Standalone instance + :steps: + 1. Run dsidm organizationalunit delete + 2. Check the ou is deleted + :expectedresults: + 1. Success + 2. Entry is deleted + """ + + standalone = topology_st.standalone + ous = OrganizationalUnits(standalone, DEFAULT_SUFFIX) + test_ou = ous.get('toDelete') + delete_value = 'Successfully deleted {}'.format(test_ou.dn) + + args = FakeArgs() + args.dn = test_ou.dn + + log.info('Test dsidm organizationalunit delete') + delete(standalone, DEFAULT_SUFFIX, topology_st.logcap.log, args, warn=False) + check_value_in_log_and_reset(topology_st, check_value=delete_value) + + log.info('Check the entry is deleted') + assert not test_ou.exists() + + +if __name__ == '__main__': + # Run isolated + # -s for DEBUG mode + CURRENT_FILE = os.path.realpath(__file__) + pytest.main("-s %s" % CURRENT_FILE) diff --git a/dirsrvtests/tests/suites/clu/dsidm_services_test.py b/dirsrvtests/tests/suites/clu/dsidm_services_test.py new file mode 100644 index 0000000..e3a9916 --- /dev/null +++ b/dirsrvtests/tests/suites/clu/dsidm_services_test.py @@ -0,0 +1,407 @@ +# --- BEGIN COPYRIGHT BLOCK --- +# Copyright (C) 2021 Red Hat, Inc. +# All rights reserved. +# +# License: GPL (version 3 or any later version). +# See LICENSE for details. +# --- END COPYRIGHT BLOCK --- +# +import time +import subprocess +import pytest +import logging +import os + +from lib389 import DEFAULT_SUFFIX +from lib389.cli_idm.service import list, get, get_dn, create, delete, modify, rename +from lib389.topologies import topology_st +from lib389.cli_base import FakeArgs +from lib389.utils import ds_is_older, ensure_str +from lib389.idm.services import ServiceAccounts +from . import check_value_in_log_and_reset + +pytestmark = pytest.mark.tier0 + +logging.getLogger(__name__).setLevel(logging.DEBUG) +log = logging.getLogger(__name__) + + +@pytest.fixture(scope="function") +def create_test_service(topology_st, request): + service_name = 'test_service' + services = ServiceAccounts(topology_st.standalone, DEFAULT_SUFFIX) + + log.info('Create test service') + if services.exists(service_name): + test_service = services.get(service_name) + test_service.delete() + else: + test_service = services.create_test_service() + + def fin(): + log.info('Delete test service') + if test_service.exists(): + test_service.delete() + + request.addfinalizer(fin) + + +@pytest.mark.skipif(ds_is_older("2.1.0"), reason="Not implemented") +def test_dsidm_service_list(topology_st, create_test_service): + """ Test dsidm service list option + + :id: 218aa060-51e1-11ec-8a70-3497f624ea11 + :setup: Standalone instance + :steps: + 1. Run dsidm service list option without json + 2. Check the output content is correct + 3. Run dsidm service list option with json + 4. Check the json content is correct + 5. Delete the service + 6. Check the service is not in the list with json + 7. Check the service is not in the list without json + :expectedresults: + 1. Success + 2. Success + 3. Success + 4. Success + 5. Success + 6. Success + 7. Success + """ + + standalone = topology_st.standalone + args = FakeArgs() + args.json = False + service_value = 'test_service' + json_list = ['type', + 'list', + 'items'] + + log.info('Empty the log file to prevent false data to check about service') + topology_st.logcap.flush() + + log.info('Test dsidm service list without json') + list(standalone, DEFAULT_SUFFIX, topology_st.logcap.log, args) + check_value_in_log_and_reset(topology_st, check_value=service_value) + + log.info('Test dsidm service list with json') + args.json = True + list(standalone, DEFAULT_SUFFIX, topology_st.logcap.log, args) + check_value_in_log_and_reset(topology_st, content_list=json_list, check_value=service_value) + + log.info('Delete the service') + services = ServiceAccounts(topology_st.standalone, DEFAULT_SUFFIX) + testservice = services.get(service_value) + testservice.delete() + + log.info('Test empty dsidm service list with json') + list(standalone, DEFAULT_SUFFIX, topology_st.logcap.log, args) + check_value_in_log_and_reset(topology_st, content_list=json_list, check_value_not=service_value) + + log.info('Test empty dsidm service list without json') + args.json = False + list(standalone, DEFAULT_SUFFIX, topology_st.logcap.log, args) + check_value_in_log_and_reset(topology_st, check_value_not=service_value) + + +@pytest.mark.skipif(ds_is_older("2.1.0"), reason="Not implemented") +def test_dsidm_service_get_rdn(topology_st, create_test_service): + """ Test dsidm service get option + + :id: 294ef774-51e1-11ec-a2c7-3497f624ea11 + :setup: Standalone instance + :steps: + 1. Run dsidm get option for created service with json + 2. Check the output content is correct + 3. Run dsidm get option for created service without json + 4. Check the json content is correct + :expectedresults: + 1. Success + 2. Success + 3. Success + 4. Success + """ + + standalone = topology_st.standalone + services = ServiceAccounts(topology_st.standalone, DEFAULT_SUFFIX) + testservice = services.get('test_service') + + service_content = [f'dn: {testservice.dn}', + f'cn: {testservice.rdn}', + 'description: Test Service', + 'objectClass: top', + 'objectClass: nsAccount', + 'objectClass: nsMemberOf'] + + json_content = ['attrs', + 'objectclass', + 'top', + 'nsAccount', + 'nsMemberOf', + testservice.rdn, + 'cn', + 'description', + 'creatorsname', + 'cn=directory manager', + 'modifiersname', + 'createtimestamp', + 'modifytimestamp', + 'nsuniqueid', + 'parentid', + 'entryid', + 'entrydn', + testservice.dn] + + args = FakeArgs() + args.json = False + args.selector = 'test_service' + + log.info('Empty the log file to prevent false data to check about service') + topology_st.logcap.flush() + + log.info('Test dsidm service get without json') + get(standalone, DEFAULT_SUFFIX, topology_st.logcap.log, args) + check_value_in_log_and_reset(topology_st, content_list=service_content) + + log.info('Test dsidm service get with json') + args.json = True + get(standalone, DEFAULT_SUFFIX, topology_st.logcap.log, args) + check_value_in_log_and_reset(topology_st, content_list=json_content) + + +@pytest.mark.bz1893667 +@pytest.mark.xfail(reason="Will fail because of bz1893667") +@pytest.mark.skipif(ds_is_older("2.1.0"), reason="Not implemented") +def test_dsidm_service_get_dn(topology_st, create_test_service): + """ Test dsidm service get_dn option + + :id: 2e4c8f98-51e1-11ec-b472-3497f624ea11 + :setup: Standalone instance + :steps: + 1. Run dsidm service get_dn for created service + 2. Check the output content is correct + :expectedresults: + 1. Success + 2. Success + """ + + standalone = topology_st.standalone + services = ServiceAccounts(standalone, DEFAULT_SUFFIX) + test_service = services.get('test_service') + args = FakeArgs() + args.dn = test_service.dn + + log.info('Empty the log file to prevent false data to check about service') + topology_st.logcap.flush() + + log.info('Test dsidm service get_dn without json') + get_dn(standalone, DEFAULT_SUFFIX, topology_st.logcap.log, args) + # check_value_in_log_and_reset(topology_st, content_list=service_content) + # The check_value_in_log_and_reset will have to be updated accordinly after bz1893667 is fixed + # because now I can't determine the output + + +@pytest.mark.skipif(ds_is_older("2.1.0"), reason="Not implemented") +def test_dsidm_service_create(topology_st): + """ Test dsidm service create option + + :id: 338efbc6-51e1-11ec-a83a-3497f624ea11 + :setup: Standalone instance + :steps: + 1. Run dsidm service create + 2. Check that a message is provided on creation + 3. Check that created service exists + :expectedresults: + 1. Success + 2. Success + 3. Success + """ + + standalone = topology_st.standalone + service_name = 'new_service' + output = f'Successfully created {service_name}' + + args = FakeArgs() + args.cn = service_name + args.description = service_name + + log.info('Test dsidm service create') + create(standalone, DEFAULT_SUFFIX, topology_st.logcap.log, args) + check_value_in_log_and_reset(topology_st, check_value=output) + + log.info('Check that service is present') + services = ServiceAccounts(standalone, DEFAULT_SUFFIX) + new_service = services.get(service_name) + assert new_service.exists() + + log.info('Clean up for next test') + new_service.delete() + + +@pytest.mark.skipif(ds_is_older("2.1.0"), reason="Not implemented") +def test_dsidm_service_delete(topology_st, create_test_service): + """ Test dsidm service delete option + + :id: 3b382a96-51e1-11ec-a1c2-3497f624ea11 + :setup: Standalone instance + :steps: + 1. Run dsidm service delete on created service + 2. Check that a message is provided on deletion + 3. Check that service does not exist + :expectedresults: + 1. Success + 2. Success + 3. Success + """ + + standalone = topology_st.standalone + services = ServiceAccounts(standalone, DEFAULT_SUFFIX) + test_service = services.get('test_service') + output = f'Successfully deleted {test_service.dn}' + + args = FakeArgs() + args.dn = test_service.dn + + log.info('Test dsidm service delete') + delete(standalone, DEFAULT_SUFFIX, topology_st.logcap.log, args, warn=False) + check_value_in_log_and_reset(topology_st, check_value=output) + + log.info('Check that service does not exist') + assert not test_service.exists() + + +@pytest.mark.skipif(ds_is_older("2.1.0"), reason="Not implemented") +def test_dsidm_service_modify(topology_st, create_test_service): + """ Test dsidm service modify add, replace, delete option + + :id: 4023ef22-51e1-11ec-93c5-3497f624ea11 + :setup: Standalone instance + :steps: + 1. Run dsidm service modify replace description value + 2. Run dsidm service modify add seeAlso attribute to service + 3. Run dsidm service modify delete for seeAlso attribute + :expectedresults: + 1. description value is replaced with new text + 2. seeAlso attribute is present + 3. seeAlso attribute is deleted + """ + + standalone = topology_st.standalone + services = ServiceAccounts(standalone, DEFAULT_SUFFIX) + test_service = services.get('test_service') + output = f'Successfully modified {test_service.dn}' + + args = FakeArgs() + args.selector = 'test_service' + args.changes = ['replace:description:Test Service Modified'] + + log.info('Test dsidm service modify replace') + modify(standalone, DEFAULT_SUFFIX, topology_st.logcap.log, args, warn=False) + check_value_in_log_and_reset(topology_st, check_value=output) + + log.info('Test dsidm service modify add') + args.changes = [f'add:seeAlso:ou=services,{DEFAULT_SUFFIX}'] + modify(standalone, DEFAULT_SUFFIX, topology_st.logcap.log, args, warn=False) + check_value_in_log_and_reset(topology_st, check_value=output) + assert test_service.present('seeAlso', f'ou=services,{DEFAULT_SUFFIX}') + + log.info('Test dsidm service modify delete') + args.changes = [f'delete:seeAlso:ou=services,{DEFAULT_SUFFIX}'] + modify(standalone, DEFAULT_SUFFIX, topology_st.logcap.log, args, warn=False) + check_value_in_log_and_reset(topology_st, check_value=output) + assert not test_service.present('seeAlso', f'ou=services,{DEFAULT_SUFFIX}') + + +@pytest.mark.skipif(ds_is_older("1.4.2"), reason="Not implemented") +def test_dsidm_service_rename_keep_old_rdn(topology_st, create_test_service): + """ Test dsidm service rename option with keep-old-rdn + + :id: 44cc6b08-51e1-11ec-89e7-3497f624ea11 + :setup: Standalone instance + :steps: + 1. Run dsidm service rename option with keep-old-rdn + 2. Check the service does have another cn attribute with the old rdn + 3. Check the old service is deleted + :expectedresults: + 1. Success + 2. Success + 3. Success + """ + + standalone = topology_st.standalone + services = ServiceAccounts(standalone, DEFAULT_SUFFIX) + test_service = services.get('test_service') + + args = FakeArgs() + args.selector = test_service.rdn + args.new_name = 'my_service' + args.keep_old_rdn = True + + log.info('Test dsidm service rename') + rename(standalone, DEFAULT_SUFFIX, topology_st.logcap.log, args) + my_service = services.get(args.new_name) + output = f'Successfully renamed to {my_service.dn}' + check_value_in_log_and_reset(topology_st, check_value=output) + + log.info('my_service should have cn attribute with the old rdn') + assert my_service.present('cn', 'test_service') + assert my_service.get_attr_val_utf8('cn') == 'test_service' + assert my_service.get_attr_val_utf8('description') == 'Test Service' + + log.info('Old service dn should not exist') + assert not test_service.exists() + + log.info('Clean up') + my_service.delete() + + +@pytest.mark.skipif(ds_is_older("1.4.2"), reason="Not implemented") +def test_dsidm_service_rename(topology_st, create_test_service): + """ Test dsidm service rename option + + :id: 4a13ea64-51e1-11ec-b3ff-3497f624ea11 + :setup: Standalone instance + :steps: + 1. Run dsidm service rename option on created service + 2. Check the service does not have another cn attribute with the old rdn + 3. Check the old service is deleted + :expectedresults: + 1. Success + 2. Success + 3. Success + """ + + standalone = topology_st.standalone + services = ServiceAccounts(standalone, DEFAULT_SUFFIX) + test_service = services.get('test_service') + + args = FakeArgs() + args.selector = test_service.rdn + args.new_name = 'my_service' + args.keep_old_rdn = False + + log.info('Test dsidm service rename') + args.new_name = 'my_service' + rename(standalone, DEFAULT_SUFFIX, topology_st.logcap.log, args) + my_service = services.get(args.new_name) + output = f'Successfully renamed to {my_service.dn}' + check_value_in_log_and_reset(topology_st, check_value=output) + + log.info('New service should not have cn attribute with the old rdn') + assert not my_service.present('cn', 'test_service') + assert my_service.get_attr_val_utf8('cn') == 'my_service' + assert my_service.get_attr_val_utf8('description') == 'Test Service' + + log.info('Old service dn should not exist.') + assert not test_service.exists() + + log.info('Clean up') + my_service.delete() + + +if __name__ == '__main__': + # Run isolated + # -s for DEBUG mode + CURRENT_FILE = os.path.realpath(__file__) + pytest.main("-s %s" % CURRENT_FILE) diff --git a/dirsrvtests/tests/suites/clu/dsidm_user_test.py b/dirsrvtests/tests/suites/clu/dsidm_user_test.py new file mode 100644 index 0000000..ed6114f --- /dev/null +++ b/dirsrvtests/tests/suites/clu/dsidm_user_test.py @@ -0,0 +1,426 @@ +# --- BEGIN COPYRIGHT BLOCK --- +# Copyright (C) 2021 Red Hat, Inc. +# All rights reserved. +# +# License: GPL (version 3 or any later version). +# See LICENSE for details. +# --- END COPYRIGHT BLOCK --- +# +import time +import subprocess +import pytest +import logging +import os + +from lib389 import DEFAULT_SUFFIX +from lib389.cli_idm.user import list, get, get_dn, create, delete, modify, rename +from lib389.topologies import topology_st +from lib389.cli_base import FakeArgs +from lib389.utils import ds_is_older, ensure_str +from lib389.idm.user import nsUserAccounts +from . import check_value_in_log_and_reset + +pytestmark = pytest.mark.tier0 + +logging.getLogger(__name__).setLevel(logging.DEBUG) +log = logging.getLogger(__name__) + + +@pytest.fixture(scope="function") +def create_test_user(topology_st, request): + user_name = 'test_user_1000' + users = nsUserAccounts(topology_st.standalone, DEFAULT_SUFFIX) + + log.info('Create test user') + if users.exists(user_name): + test_user = users.get(user_name) + test_user.delete() + else: + test_user = users.create_test_user() + + def fin(): + log.info('Delete test user') + if test_user.exists(): + test_user.delete() + + request.addfinalizer(fin) + + +@pytest.mark.skipif(ds_is_older("1.4.2"), reason="Not implemented") +def test_dsidm_user_list(topology_st, create_test_user): + """ Test dsidm user list option + + :id: a7400ac2-b629-4507-bc05-c6402a5b437b + :setup: Standalone instance + :steps: + 1. Run dsidm user list option without json + 2. Check the output content is correct + 3. Run dsidm user list option with json + 4. Check the json content is correct + 5. Delete the user + 6. Check the user is not in the list with json + 7. Check the user is not in the list without json + :expectedresults: + 1. Success + 2. Success + 3. Success + 4. Success + 5. Success + 6. Success + 7. Success + """ + + standalone = topology_st.standalone + args = FakeArgs() + args.json = False + user_value = 'test_user_1000' + json_list = ['type', + 'list', + 'items'] + + + log.info('Empty the log file to prevent false data to check about user') + topology_st.logcap.flush() + + log.info('Test dsidm user list without json') + list(standalone, DEFAULT_SUFFIX, topology_st.logcap.log, args) + check_value_in_log_and_reset(topology_st, check_value=user_value) + + log.info('Test dsidm user list with json') + args.json = True + list(standalone, DEFAULT_SUFFIX, topology_st.logcap.log, args) + check_value_in_log_and_reset(topology_st, content_list=json_list, check_value=user_value) + + log.info('Delete the user') + users = nsUserAccounts(topology_st.standalone, DEFAULT_SUFFIX) + testuser = users.get(user_value) + testuser.delete() + + log.info('Test empty dsidm user list with json') + list(standalone, DEFAULT_SUFFIX, topology_st.logcap.log, args) + check_value_in_log_and_reset(topology_st, content_list=json_list, check_value_not=user_value) + + log.info('Test empty dsidm user list without json') + args.json = False + list(standalone, DEFAULT_SUFFIX, topology_st.logcap.log, args) + check_value_in_log_and_reset(topology_st, check_value_not=user_value) + + +@pytest.mark.skipif(ds_is_older("1.4.2"), reason="Not implemented") +def test_dsidm_user_get_rdn(topology_st, create_test_user): + """ Test dsidm user get option + + :id: 8c7247cd-7588-45d3-817c-ac5a9f135b32 + :setup: Standalone instance + :steps: + 1. Run dsidm get option for created user with json + 2. Check the output content is correct + 3. Run dsidm get option for created user without json + 4. Check the json content is correct + :expectedresults: + 1. Success + 2. Success + 3. Success + 4. Success + """ + + standalone = topology_st.standalone + users = nsUserAccounts(topology_st.standalone, DEFAULT_SUFFIX) + testuser = users.get('test_user_1000') + + user_content = ['dn: {}'.format(testuser.dn), + 'cn: {}'.format(testuser.rdn), + 'displayName: {}'.format(testuser.rdn), + 'gidNumber: 2000', + 'homeDirectory: /home/{}'.format(testuser.rdn), + 'objectClass: top', + 'objectClass: nsPerson', + 'objectClass: nsAccount', + 'objectClass: nsOrgPerson', + 'objectClass: posixAccount', + 'uid: {}'.format(testuser.rdn), + 'uidNumber: 1000'] + + json_content = ['attrs', + 'objectclass', + 'top', + 'nsPerson', + 'nsAccount', + 'nsOrgPerson', + 'posixAccount', + 'uid', + testuser.rdn, + 'cn', + 'displayname', + 'uidnumber', + 'gidnumber', + '2000', + 'homedirectory', + '/home/{}'.format(testuser.rdn), + 'creatorsname', + 'cn=directory manager', + 'modifiersname', + 'createtimestamp', + 'modifytimestamp', + 'nsuniqueid', + 'parentid', + 'entryid', + 'entrydn', + testuser.dn] + + args = FakeArgs() + args.json = False + args.selector = 'test_user_1000' + + log.info('Empty the log file to prevent false data to check about user') + topology_st.logcap.flush() + + log.info('Test dsidm user get without json') + get(standalone, DEFAULT_SUFFIX, topology_st.logcap.log, args) + check_value_in_log_and_reset(topology_st, content_list=user_content) + + log.info('Test dsidm user get with json') + args.json = True + get(standalone, DEFAULT_SUFFIX, topology_st.logcap.log, args) + check_value_in_log_and_reset(topology_st, content_list=json_content) + + +@pytest.mark.bz1893667 +@pytest.mark.xfail(reason="Will fail because of bz1893667") +@pytest.mark.skipif(ds_is_older("1.4.2"), reason="Not implemented") +def test_dsidm_user_get_dn(topology_st, create_test_user): + """ Test dsidm user get_dn option + + :id: 787bf278-87c3-402e-936e-6161799d098d + :setup: Standalone instance + :steps: + 1. Run dsidm user get_dn for created user + 2. Check the output content is correct + :expectedresults: + 1. Success + 2. Success + """ + + standalone = topology_st.standalone + users = nsUserAccounts(standalone, DEFAULT_SUFFIX) + test_user = users.get('test_user_1000') + args = FakeArgs() + args.dn = test_user.dn + + log.info('Empty the log file to prevent false data to check about user') + topology_st.logcap.flush() + + log.info('Test dsidm user get_dn without json') + get_dn(standalone, DEFAULT_SUFFIX, topology_st.logcap.log, args) + # check_value_in_log_and_reset(topology_st, content_list=user_content) + # The check_value_in_log_and_reset will have to be updated accordinly after bz1893667 is fixed + # because now I can't determine the output + + +@pytest.mark.skipif(ds_is_older("1.4.2"), reason="Not implemented") +def test_dsidm_user_create(topology_st): + """ Test dsidm user create option + + :id: 862f5875-11fd-4e8e-92c1-397010386eb8 + :setup: Standalone instance + :steps: + 1. Run dsidm user create + 2. Check that a message is provided on creation + 3. Check that created user exists + :expectedresults: + 1. Success + 2. Success + 3. Success + """ + + standalone = topology_st.standalone + user_name = 'new_user' + output = 'Successfully created {}'.format(user_name) + + args = FakeArgs() + args.uid = user_name + args.cn = user_name + args.displayName = user_name + args.uidNumber = '1030' + args.gidNumber = '2030' + args.homeDirectory = '/home/{}'.format(user_name) + + log.info('Test dsidm user create') + create(standalone, DEFAULT_SUFFIX, topology_st.logcap.log, args) + check_value_in_log_and_reset(topology_st, check_value=output) + + log.info('Check that user is present') + users = nsUserAccounts(standalone, DEFAULT_SUFFIX) + new_user = users.get(user_name) + assert new_user.exists() + + log.info('Clean up for next test') + new_user.delete() + + +@pytest.mark.skipif(ds_is_older("1.4.2"), reason="Not implemented") +def test_dsidm_user_delete(topology_st, create_test_user): + """ Test dsidm user delete option + + :id: 3704dc3a-9787-4f74-aaa8-45f38e4a6a52 + :setup: Standalone instance + :steps: + 1. Run dsidm user delete on created user + 2. Check that a message is provided on deletion + 3. Check that user does not exist + :expectedresults: + 1. Success + 2. Success + 3. Success + """ + + standalone = topology_st.standalone + users = nsUserAccounts(standalone, DEFAULT_SUFFIX) + test_user = users.get('test_user_1000') + output = 'Successfully deleted {}'.format(test_user.dn) + + args = FakeArgs() + args.dn = test_user.dn + + log.info('Test dsidm user delete') + delete(standalone, DEFAULT_SUFFIX, topology_st.logcap.log, args, warn=False) + check_value_in_log_and_reset(topology_st, check_value=output) + + log.info('Check that user does not exist') + assert not test_user.exists() + + +@pytest.mark.skipif(ds_is_older("1.4.2"), reason="Not implemented") +def test_dsidm_user_modify(topology_st, create_test_user): + """ Test dsidm user modify add, replace, delete option + + :id: 7a27be19-1a63-44d0-b11b-f877e06e1a9b + :setup: Standalone instance + :steps: + 1. Run dsidm user modify replace cn value + 2. Run dsidm user modify add telephoneNumber attribute to user + 3. Run dsidm user modify delete for telephoneNumber attribute + :expectedresults: + 1. cn value is replaced with new name + 2. telephoneNumber attribute is present + 3. telephoneNumber attribute is deleted + """ + + standalone = topology_st.standalone + users = nsUserAccounts(standalone, DEFAULT_SUFFIX) + test_user = users.get('test_user_1000') + output = 'Successfully modified {}'.format(test_user.dn) + + args = FakeArgs() + args.selector = 'test_user_1000' + args.changes = ['replace:cn:test'] + + log.info('Test dsidm user modify replace') + modify(standalone, DEFAULT_SUFFIX, topology_st.logcap.log, args, warn=False) + check_value_in_log_and_reset(topology_st, check_value=output) + + log.info('Test dsidm user modify add') + args.changes = ['add:telephoneNumber:1234567890'] + modify(standalone, DEFAULT_SUFFIX, topology_st.logcap.log, args, warn=False) + check_value_in_log_and_reset(topology_st, check_value=output) + assert test_user.present('telephoneNumber', '1234567890') + + log.info('Test dsidm user modify delete') + args.changes = ['delete:telephoneNumber:1234567890'] + modify(standalone, DEFAULT_SUFFIX, topology_st.logcap.log, args, warn=False) + check_value_in_log_and_reset(topology_st, check_value=output) + assert not test_user.present('telephoneNumber', '1234567890') + + +@pytest.mark.skipif(ds_is_older("1.4.2"), reason="Not implemented") +def test_dsidm_user_rename_keep_old_rdn(topology_st, create_test_user): + """ Test dsidm user rename option with keep-old-rdn + + :id: 3fd0827c-ab5e-4586-9493-55bc5076a887 + :setup: Standalone instance + :steps: + 1. Run dsidm user rename option with keep-old-rdn + 2. Check the user does have another uid attribute with the old rdn + 3. Check the old user is deleted + :expectedresults: + 1. Success + 2. Success + 3. Success + """ + + standalone = topology_st.standalone + users = nsUserAccounts(standalone, DEFAULT_SUFFIX) + test_user = users.get('test_user_1000') + + args = FakeArgs() + args.selector = test_user.rdn + args.new_name = 'my_user' + args.keep_old_rdn = True + + log.info('Test dsidm user rename') + rename(standalone, DEFAULT_SUFFIX, topology_st.logcap.log, args) + my_user = users.get(args.new_name) + output = 'Successfully renamed to {}'.format(my_user.dn) + check_value_in_log_and_reset(topology_st, check_value=output) + + log.info('my_user should have uid attribute with the old rdn') + assert my_user.present('uid', 'test_user_1000') + assert my_user.get_attr_val_utf8('cn') == 'test_user_1000' + assert my_user.get_attr_val_utf8('displayName') == 'test_user_1000' + + log.info('Old user dn should not exist') + assert not test_user.exists() + + log.info('Clean up') + my_user.delete() + + +@pytest.mark.skipif(ds_is_older("1.4.2"), reason="Not implemented") +def test_dsidm_user_rename(topology_st, create_test_user): + """ Test dsidm user rename option + + :id: fa569966-3954-465f-92b0-331a3a088b1b + :setup: Standalone instance + :steps: + 1. Run dsidm user rename option on created user + 2. Check the user does not have another uid attribute with the old rdn + 3. Check the old user is deleted + :expectedresults: + 1. Success + 2. Success + 3. Success + """ + + standalone = topology_st.standalone + users = nsUserAccounts(standalone, DEFAULT_SUFFIX) + test_user = users.get('test_user_1000') + + args = FakeArgs() + args.selector = test_user.rdn + args.new_name = 'my_user' + args.keep_old_rdn = False + + log.info('Test dsidm user rename') + args.new_name = 'my_user' + rename(standalone, DEFAULT_SUFFIX, topology_st.logcap.log, args) + my_user = users.get(args.new_name) + output = 'Successfully renamed to {}'.format(my_user.dn) + check_value_in_log_and_reset(topology_st, check_value=output) + + log.info('New user should not have uid attribute with the old rdn') + assert not my_user.present('uid', 'test_user_1000') + assert my_user.get_attr_val_utf8('cn') == 'test_user_1000' + assert my_user.get_attr_val_utf8('displayName') == 'test_user_1000' + + log.info('Old user dn should not exist.') + assert not test_user.exists() + + log.info('Clean up') + my_user.delete() + + +if __name__ == '__main__': + # Run isolated + # -s for DEBUG mode + CURRENT_FILE = os.path.realpath(__file__) + pytest.main("-s %s" % CURRENT_FILE) diff --git a/dirsrvtests/tests/suites/clu/dsrc_test.py b/dirsrvtests/tests/suites/clu/dsrc_test.py new file mode 100644 index 0000000..c1fb086 --- /dev/null +++ b/dirsrvtests/tests/suites/clu/dsrc_test.py @@ -0,0 +1,263 @@ +# --- BEGIN COPYRIGHT BLOCK --- +# Copyright (C) 2023 Red Hat, Inc. +# All rights reserved. +# +# License: GPL (version 3 or any later version). +# See LICENSE for details. +# --- END COPYRIGHT BLOCK --- +# +import logging +import pytest +import os +from os.path import expanduser +from lib389.cli_base import FakeArgs +from lib389.cli_ctl.dsrc import create_dsrc, modify_dsrc, delete_dsrc, display_dsrc, replmon_dsrc +from lib389._constants import DEFAULT_SUFFIX, DN_DM +from lib389.topologies import topology_st as topo + +log = logging.getLogger(__name__) + + +def get_fake_args(): + # Setup our args + args = FakeArgs() + args.basedn = DEFAULT_SUFFIX + args.groups_rdn = None + args.people_rdn = None + args.binddn = DN_DM + args.json = None + args.uri = None + args.saslmech = None + args.tls_cacertdir = None + args.tls_cert = None + args.tls_key = None + args.tls_reqcert = None + args.starttls = None + args.cancel_starttls = None + args.pwdfile = None + args.do_it = True + args.add_conn = None + args.del_conn = None + args.add_alias = None + args.del_alias = None + + return args + + +@pytest.fixture(scope="function") +def setup(topo, request): + """Preserve any existing .dsrc file""" + + dsrc_file = f'{expanduser("~")}/.dsrc' + backup_file = dsrc_file + ".original" + if os.path.exists(dsrc_file): + os.rename(dsrc_file, backup_file) + + def fin(): + if os.path.exists(backup_file): + os.rename(backup_file, dsrc_file) + + request.addfinalizer(fin) + + +def test_dsrc(topo, setup): + """Test "dsctl dsrc" command + + :id: 0610de6c-e167-4761-bdab-3e677b2d44bb + :setup: Standalone Instance + :steps: + 1. Test creation works + 2. Test creating duplicate section + 3. Test adding an additional inst config works + 4. Test removing an instance works + 5. Test modify works + 6. Test delete works + 7. Test display fails when no file is present + + :expectedresults: + 1. Success + 2. Success + 3. Success + 4. Success + 5. Success + 6. Success + 7. Success + """ + + inst = topo.standalone + serverid = inst.serverid + second_inst_name = "Second" + second_inst_basedn = "o=second" + different_suffix = "o=different" + + # Setup our args + args = get_fake_args() + + # Create a dsrc configuration entry + create_dsrc(inst, log, args) + display_dsrc(inst, topo.logcap.log, args) + assert topo.logcap.contains("basedn = " + args.basedn) + assert topo.logcap.contains("binddn = " + args.binddn) + assert topo.logcap.contains("[" + serverid + "]") + topo.logcap.flush() + + # Attempt to add duplicate instance section + with pytest.raises(ValueError): + create_dsrc(inst, log, args) + + # Test adding a second instance works correctly + inst.serverid = second_inst_name + args.basedn = second_inst_basedn + create_dsrc(inst, log, args) + display_dsrc(inst, topo.logcap.log, args) + assert topo.logcap.contains("basedn = " + args.basedn) + assert topo.logcap.contains("[" + second_inst_name + "]") + topo.logcap.flush() + + # Delete second instance + delete_dsrc(inst, log, args) + inst.serverid = serverid # Restore original instance name + display_dsrc(inst, topo.logcap.log, args) + assert not topo.logcap.contains("[" + second_inst_name + "]") + assert not topo.logcap.contains("basedn = " + args.basedn) + # Make sure first instance config is still present + assert topo.logcap.contains("[" + serverid + "]") + assert topo.logcap.contains("binddn = " + args.binddn) + topo.logcap.flush() + + # Modify the config + args.basedn = different_suffix + modify_dsrc(inst, log, args) + display_dsrc(inst, topo.logcap.log, args) + assert topo.logcap.contains(different_suffix) + topo.logcap.flush() + + # Remove an arg from the config + args.basedn = "" + modify_dsrc(inst, log, args) + display_dsrc(inst, topo.logcap.log, args) + assert not topo.logcap.contains(different_suffix) + topo.logcap.flush() + + # Remove the last entry, which should delete the file + delete_dsrc(inst, log, args) + dsrc_file = f'{expanduser("~")}/.dsrc' + assert not os.path.exists(dsrc_file) + + # Make sure display fails + with pytest.raises(ValueError): + display_dsrc(inst, log, args) + + +def test_dsrc_repl_mon(topo, setup): + """Test "dsctl dsrc repl-mon" command, add & remove creds and aliases + + :id: 33007d01-f11c-456b-bb16-fcd7920c9fc8 + :setup: Standalone Instance + :steps: + 1. Add connection + 2. Add same connection - should fail + 3. Delete connection + 4. Delete same connection - should fail + 5. Add alias + 6. Add same alias - should fail + 7. Delete alias + 8. Delete same alias again 0 should fail + + :expectedresults: + 1. Success + 2. Success + 3. Success + 4. Success + 5. Success + 6. Success + 7. Success + 8. Success + """ + + inst = topo.standalone + args = get_fake_args() + create_dsrc(inst, log, args) + + # Add replica connection + assert not topo.logcap.contains("repl-monitor-connections") + repl_conn = "replica_1:localhost:5555:cn=directory manager:password" + args.add_conn = [repl_conn,] + replmon_dsrc(inst, log, args) + display_dsrc(inst, topo.logcap.log, args) + assert topo.logcap.contains("repl-monitor-connections") + assert topo.logcap.contains("replica_1 = localhost:5555:cn=directory manager:password") + topo.logcap.flush() + args.add_conn = None + + # Add duplicate replica connection + args.add_conn = [repl_conn, ] + try: + replmon_dsrc(inst, log, args) + assert False + except ValueError: + pass + args.add_conn = None + + # Delete replica connection + args.del_conn = ["replica_1"] + replmon_dsrc(inst, log, args) + display_dsrc(inst, topo.logcap.log, args) + assert not topo.logcap.contains("replica_1 = localhost:5555:cn=directory manager:password") + assert not topo.logcap.contains("repl-monitor-connections") + topo.logcap.flush() + args.del_conn = None + + # Delete replica connection (already deleted) + args.del_conn = ["replica_1"] + try: + replmon_dsrc(inst, log, args) + assert False + except ValueError: + pass + args.del_conn = None + + # Add Alias + assert not topo.logcap.contains("repl-monitor-aliases") + repl_alias = "my_alias:localhost:4444" + args.add_alias = [repl_alias,] + replmon_dsrc(inst, log, args) + display_dsrc(inst, topo.logcap.log, args) + assert topo.logcap.contains("repl-monitor-aliases") + assert topo.logcap.contains("my_alias = localhost:4444") + topo.logcap.flush() + args.add_alias = None + + # Add Duplicate Alias + args.add_alias = [repl_alias,] + try: + replmon_dsrc(inst, log, args) + assert False + except ValueError: + pass + args.add_alias = None + + # Delete Alias + args.del_alias = ["my_alias",] + replmon_dsrc(inst, log, args) + display_dsrc(inst, topo.logcap.log, args) + assert not topo.logcap.contains("my_alias = localhost:4444") + assert not topo.logcap.contains("repl-monitor-aliases") + topo.logcap.flush() + args.del_alias = None + + # Delete alias (already deleted) + args.del_alias = ["my_alias", ] + try: + replmon_dsrc(inst, log, args) + assert False + except ValueError: + pass + args.del_alias = None + + +if __name__ == '__main__': + # Run isolated + # -s for DEBUG mode + CURRENT_FILE = os.path.realpath(__file__) + pytest.main(["-s", CURRENT_FILE]) diff --git a/dirsrvtests/tests/suites/clu/fixup_test.py b/dirsrvtests/tests/suites/clu/fixup_test.py new file mode 100644 index 0000000..c37824a --- /dev/null +++ b/dirsrvtests/tests/suites/clu/fixup_test.py @@ -0,0 +1,102 @@ +# --- BEGIN COPYRIGHT BLOCK --- +# Copyright (C) 2020 Red Hat, Inc. +# All rights reserved. +# +# License: GPL (version 3 or any later version). +# See LICENSE for details. +# --- END COPYRIGHT BLOCK --- +# +import time +import subprocess +import pytest + +from lib389.tasks import * +from lib389.utils import * +from lib389.topologies import topology_st +from lib389.cli_base import FakeArgs +from lib389.plugins import POSIXWinsyncPlugin +from lib389.cli_conf.plugins.posix_winsync import do_fixup + +pytestmark = pytest.mark.tier0 + +LOG_FILE = '/tmp/fixup.log' +logging.getLogger(__name__).setLevel(logging.DEBUG) +log = logging.getLogger(__name__) + + +@pytest.fixture(scope="function") +def set_log_file_and_ldif(topology_st, request): + MYLDIF = 'example1k_posix.ldif' + global ldif_file + + fh = logging.FileHandler(LOG_FILE) + fh.setLevel(logging.DEBUG) + log.addHandler(fh) + + data_dir_path = topology_st.standalone.getDir(__file__, DATA_DIR) + ldif_file = f"{data_dir_path}ticket48212/{MYLDIF}" + ldif_dir = topology_st.standalone.get_ldif_dir() + shutil.copy(ldif_file, ldif_dir) + ldif_file = ldif_dir + '/' + MYLDIF + + def fin(): + log.info('Delete files') + os.remove(LOG_FILE) + os.remove(ldif_file) + + request.addfinalizer(fin) + + +@pytest.mark.ds50545 +@pytest.mark.bz1739718 +@pytest.mark.skipif(ds_is_older("1.4.1"), reason="Not implemented") +def test_posix_winsync_fixup(topology_st, set_log_file_and_ldif): + """Test posix-winsync fixup that was ported from legacy tools + + :id: ce691017-cbd2-49ed-ac2d-8c3ea78050f6 + :setup: Standalone instance + :steps: + 1. Create DS instance + 2. Enable PosixWinsync plugin + 3. Run fixup task + 4. Check log for output + :expectedresults: + 1. Success + 2. Success + 3. Success + 4. Success + """ + + standalone = topology_st.standalone + output_list = ['Attempting to add task entry', 'Successfully added task entry'] + + log.info('Enable POSIXWinsyncPlugin') + posix = POSIXWinsyncPlugin(standalone) + posix.enable() + + log.info('Stopping the server and importing posix accounts') + standalone.stop() + assert standalone.ldif2db(bename=DEFAULT_BENAME, suffixes=[DEFAULT_SUFFIX], encrypt=None, excludeSuffixes=None, + import_file=ldif_file) + standalone.start() + + args = FakeArgs() + args.DN = DEFAULT_SUFFIX + args.filter = None + args.timeout = 0 + + log.info('Run Fixup task') + do_fixup(standalone, DEFAULT_SUFFIX, log, args) + + log.info('Check log if fixup task was successful') + with open(LOG_FILE, 'r') as f: + file_content = f.read() + for item in output_list: + assert item in file_content + + +if __name__ == '__main__': + # Run isolated + # -s for DEBUG mode + CURRENT_FILE = os.path.realpath(__file__) + pytest.main("-s %s" % CURRENT_FILE) diff --git a/dirsrvtests/tests/suites/clu/repl_monitor_test.py b/dirsrvtests/tests/suites/clu/repl_monitor_test.py new file mode 100644 index 0000000..d834168 --- /dev/null +++ b/dirsrvtests/tests/suites/clu/repl_monitor_test.py @@ -0,0 +1,278 @@ +# --- BEGIN COPYRIGHT BLOCK --- +# Copyright (C) 2020 Red Hat, Inc. +# All rights reserved. +# +# License: GPL (version 3 or any later version). +# See LICENSE for details. +# --- END COPYRIGHT BLOCK --- +# +import time +import subprocess +import pytest +import re + +from lib389.cli_conf.replication import get_repl_monitor_info +from lib389.tasks import * +from lib389.utils import * +from lib389.topologies import topology_m2 +from lib389.cli_base import FakeArgs +from lib389.cli_base.dsrc import dsrc_arg_concat +from lib389.cli_base import connect_instance +from lib389.replica import Replicas + + +pytestmark = pytest.mark.tier0 + +LOG_FILE = '/tmp/monitor.log' +logging.getLogger(__name__).setLevel(logging.DEBUG) +log = logging.getLogger(__name__) + + +@pytest.fixture(scope="function") +def set_log_file(request): + fh = logging.FileHandler(LOG_FILE) + fh.setLevel(logging.DEBUG) + log.addHandler(fh) + + def fin(): + log.info('Delete files') + os.remove(LOG_FILE) + + config = os.path.expanduser(DSRC_HOME) + if os.path.exists(config): + os.remove(config) + + request.addfinalizer(fin) + + +def check_value_in_log_and_reset(content_list, second_list=None, single_value=None, error_list=None): + with open(LOG_FILE, 'r+') as f: + file_content = f.read() + + for item in content_list: + log.info('Check that "{}" is present'.format(item)) + assert item in file_content + + if second_list is not None: + log.info('Check for "{}"'.format(second_list)) + for item in second_list: + assert item in file_content + + if single_value is not None: + log.info('Check for "{}"'.format(single_value)) + assert single_value in file_content + + if error_list is not None: + log.info('Check that "{}" is not present'.format(error_list)) + for item in error_list: + assert item not in file_content + + log.info('Reset log file') + f.truncate(0) + +def get_hostnames_from_log(port1, port2): + # Get the supplier host names as displayed in replication monitor output + with open(LOG_FILE, 'r') as logfile: + logtext = logfile.read() + # search for Supplier :hostname:port + # and use \D to insure there is no more number is after + # the matched port (i.e that 10 is not matching 101) + regexp = '(Supplier: )([^:]*)(:' + str(port1) + '\D)' + match=re.search(regexp, logtext) + host_m1 = 'localhost.localdomain' + if (match is not None): + host_m1 = match.group(2) + # Same for supplier 2 + regexp = '(Supplier: )([^:]*)(:' + str(port2) + '\D)' + match=re.search(regexp, logtext) + host_m2 = 'localhost.localdomain' + if (match is not None): + host_m2 = match.group(2) + return (host_m1, host_m2) + +#unstable or unstatus tests, skipped for now +@pytest.mark.flaky(max_runs=2, min_passes=1) +@pytest.mark.ds50545 +@pytest.mark.bz1739718 +@pytest.mark.skipif(ds_is_older("1.4.0"), reason="Not implemented") +def test_dsconf_replication_monitor(topology_m2, set_log_file): + """Test replication monitor that was ported from legacy tools + + :id: ce48020d-7c30-41b7-8f68-144c9cd757f6 + :setup: 2 MM topology + :steps: + 1. Create DS instance + 2. Run replication monitor with connections option + 3. Run replication monitor with aliases option + 4. Run replication monitor with --json option + 5. Run replication monitor with .dsrc file created + 6. Run replication monitor with connections option as if using dsconf CLI + :expectedresults: + 1. Success + 2. Success + 3. Success + 4. Success + 5. Success + 6. Success + """ + + m1 = topology_m2.ms["supplier1"] + m2 = topology_m2.ms["supplier2"] + + # Enable ldapi if not already done. + for inst in [topology_m2.ms["supplier1"], topology_m2.ms["supplier2"]]: + if not inst.can_autobind(): + # Update ns-slapd instance + inst.config.set('nsslapd-ldapilisten', 'on') + inst.config.set('nsslapd-ldapiautobind', 'on') + inst.restart() + # Ensure that updates have been sent both ways. + replicas = Replicas(m1) + replica = replicas.get(DEFAULT_SUFFIX) + replica.test_replication([m2]) + replicas = Replicas(m2) + replica = replicas.get(DEFAULT_SUFFIX) + replica.test_replication([m1]) + + alias_content = ['Supplier: M1 (' + m1.host + ':' + str(m1.port) + ')', + 'Supplier: M2 (' + m2.host + ':' + str(m2.port) + ')'] + + connection_content = 'Supplier: '+ m1.host + ':' + str(m1.port) + content_list = ['Replica Root: dc=example,dc=com', + 'Replica ID: 1', + 'Replica Status: Online', + 'Max CSN', + 'Status For Agreement: "002" ('+ m2.host + ':' + str(m2.port) + ')', + 'Replica Enabled: on', + 'Update In Progress: FALSE', + 'Last Update Start:', + 'Last Update End:', + 'Number Of Changes Sent:', + 'Number Of Changes Skipped: None', + 'Last Update Status: Error (0) Replica acquired successfully: Incremental update succeeded', + 'Last Init Start:', + 'Last Init End:', + 'Last Init Status:', + 'Reap Active: 0', + 'Replication Status: In Synchronization', + 'Replication Lag Time:', + 'Supplier: ', + m2.host + ':' + str(m2.port), + 'Replica Root: dc=example,dc=com', + 'Replica ID: 2', + 'Status For Agreement: "001" (' + m1.host + ':' + str(m1.port)+')'] + + error_list = ['consumer (Unavailable)', + 'Failed to retrieve database RUV entry from consumer'] + + json_list = ['type', + 'list', + 'items', + 'name', + m1.host + ':' + str(m1.port), + 'data', + '"replica_id": "1"', + '"replica_root": "dc=example,dc=com"', + '"replica_status": "Online"', + 'maxcsn', + 'agmts_status', + 'agmt-name', + '002', + 'replica', + m2.host + ':' + str(m2.port), + 'replica-enabled', + 'update-in-progress', + 'last-update-start', + 'last-update-end', + 'number-changes-sent', + 'number-changes-skipped', + 'last-update-status', + 'Error (0) Replica acquired successfully: Incremental update succeeded', + 'last-init-start', + 'last-init-end', + 'last-init-status', + 'reap-active', + 'replication-status', + 'In Synchronization', + 'replication-lag-time', + '"replica_id": "2"', + '001', + m1.host + ':' + str(m1.port)] + + connections = [m1.host + ':' + str(m1.port) + ':' + DN_DM + ':' + PW_DM, + m2.host + ':' + str(m2.port) + ':' + DN_DM + ':' + PW_DM] + + args = FakeArgs() + args.connections = connections + args.aliases = None + args.json = False + + log.info('Run replication monitor with connections option') + get_repl_monitor_info(m1, DEFAULT_SUFFIX, log, args) + (host_m1, host_m2) = get_hostnames_from_log(m1.port, m2.port) + check_value_in_log_and_reset(content_list, connection_content, error_list=error_list) + + # Prepare the data for next tests + aliases = ['M1=' + host_m1 + ':' + str(m1.port), + 'M2=' + host_m2 + ':' + str(m2.port)] + + alias_content = ['Supplier: M1 (' + host_m1 + ':' + str(m1.port) + ')', + 'Supplier: M2 (' + host_m2 + ':' + str(m2.port) + ')'] + + dsrc_content = '[repl-monitor-connections]\n' \ + 'connection1 = ' + m1.host + ':' + str(m1.port) + ':' + DN_DM + ':' + PW_DM + '\n' \ + 'connection2 = ' + m2.host + ':' + str(m2.port) + ':' + DN_DM + ':' + PW_DM + '\n' \ + '\n' \ + '[repl-monitor-aliases]\n' \ + 'M1 = ' + host_m1 + ':' + str(m1.port) + '\n' \ + 'M2 = ' + host_m2 + ':' + str(m2.port) + + log.info('Run replication monitor with aliases option') + args.aliases = aliases + get_repl_monitor_info(m1, DEFAULT_SUFFIX, log, args) + check_value_in_log_and_reset(content_list, alias_content) + + log.info('Run replication monitor with --json option') + args.aliases = None + args.json = True + get_repl_monitor_info(m1, DEFAULT_SUFFIX, log, args) + check_value_in_log_and_reset(json_list) + + with open(os.path.expanduser(DSRC_HOME), 'w+') as f: + f.write(dsrc_content) + + args.connections = None + args.aliases = None + args.json = False + + log.info('Run replication monitor when .dsrc file is present with content') + get_repl_monitor_info(m1, DEFAULT_SUFFIX, log, args) + check_value_in_log_and_reset(content_list, alias_content) + os.remove(os.path.expanduser(DSRC_HOME)) + + log.info('Run replication monitor with connections option as if using dsconf CLI') + # Perform same test than steps 2 test but without using directly the topology instance. + # but with an instance similar to those than dsconf cli generates: + # step 2 args + args.connections = connections + args.aliases = None + args.json = False + # args needed to generate an instance with dsrc_arg_concat + args.instance = 'supplier1' + args.basedn = None + args.binddn = None + args.bindpw = None + args.pwdfile = None + args.prompt = False + args.starttls = False + dsrc_inst = dsrc_arg_concat(args, None) + inst = connect_instance(dsrc_inst, True, args) + get_repl_monitor_info(inst, DEFAULT_SUFFIX, log, args) + check_value_in_log_and_reset(content_list, connection_content, error_list=error_list) + + +if __name__ == '__main__': + # Run isolated + # -s for DEBUG mode + CURRENT_FILE = os.path.realpath(__file__) + pytest.main("-s %s" % CURRENT_FILE) diff --git a/dirsrvtests/tests/suites/clu/schema_test.py b/dirsrvtests/tests/suites/clu/schema_test.py new file mode 100644 index 0000000..183e0a2 --- /dev/null +++ b/dirsrvtests/tests/suites/clu/schema_test.py @@ -0,0 +1,144 @@ +# --- BEGIN COPYRIGHT BLOCK --- +# Copyright (C) 2023 Red Hat, Inc. +# All rights reserved. +# +# License: GPL (version 3 or any later version). +# See LICENSE for details. +# --- END COPYRIGHT BLOCK --- +# +import logging +import pytest +import os +from lib389.topologies import topology_st as topo +from lib389.schema import Schema + +pytestmark = pytest.mark.tier0 +log = logging.getLogger(__name__) + + +def test_origins_with_extra_parenthesis(topo): + """Test the custom schema with extra parenthesis in X-ORIGIN can be parsed + into JSON + + :id: 4230f83b-0dc3-4bc4-a7a8-5ab0826a4f05 + :setup: Standalone Instance + :steps: + 1. Add attribute with X-ORIGIN that contains extra parenthesis + 2. Querying for that attribute with JSON flag + :expectedresults: + 1. Success + 2. Success + """ + + ATTR_NAME = 'testAttribute' + X_ORG_VAL = 'test (TEST)' + schema = Schema(topo.standalone) + + # Add new attribute + parameters = { + 'names': [ATTR_NAME], + 'oid': '1.1.1.1.1.1.1.22222', + 'desc': 'Test extra parenthesis in X-ORIGIN', + 'x_origin': [X_ORG_VAL], + 'syntax': '1.3.6.1.4.1.1466.115.121.1.15', + 'syntax_len': None, + 'x_ordered': None, + 'collective': None, + 'obsolete': None, + 'single_value': None, + 'no_user_mod': None, + 'equality': None, + 'substr': None, + 'ordering': None, + 'usage': None, + 'sup': None + } + schema.add_attributetype(parameters) + + # Search for attribute with JSON option + attr_result = schema.query_attributetype(ATTR_NAME, json=True) + + # Verify the x-origin value is correct + assert attr_result['at']['x_origin'][0] == X_ORG_VAL + + +schema_params = [ + ['attr1', '99999.1', None], + ['attr2', '99999.2', 'test-str'], + ['attr3', '99999.3', ['test-list']], + ['attr4', '99999.4', ('test-tuple')], +] +@pytest.mark.parametrize("name, oid, xorg", schema_params) +def test_origins(topo, name, oid, xorg): + """Test the various possibilities of x-origin + + :id: 3229f6f8-67c1-4558-9be5-71434283086a + :setup: Standalone Instance + :steps: + 1. Add an attribute with different x-origin values/types + :expectedresults: + 1. Success + """ + + schema = Schema(topo.standalone) + + # Add new attribute + parameters = { + 'names': [name], + 'oid': oid, + 'desc': 'Test X-ORIGIN', + 'x_origin': xorg, + 'syntax': '1.3.6.1.4.1.1466.115.121.1.15', + 'syntax_len': None, + 'x_ordered': None, + 'collective': None, + 'obsolete': None, + 'single_value': None, + 'no_user_mod': None, + 'equality': None, + 'substr': None, + 'ordering': None, + 'usage': None, + 'sup': None + } + schema.add_attributetype(parameters) + + +def test_mrs(topo): + """Test an attribute can be added with a matching rule + + :id: e4eb06e0-7f80-41fe-8868-08c2bafc7590 + :setup: Standalone Instance + :steps: + 1. Add an attribute with a matching rule + :expectedresults: + 1. Success + """ + schema = Schema(topo.standalone) + + # Add new attribute + parameters = { + 'names': ['test-mr'], + 'oid': '99999999', + 'desc': 'Test matching rule', + 'syntax': '1.3.6.1.4.1.1466.115.121.1.15', + 'syntax_len': None, + 'x_ordered': None, + 'collective': None, + 'obsolete': None, + 'single_value': None, + 'no_user_mod': None, + 'equality': None, + 'substr': 'numericstringsubstringsmatch', + 'ordering': None, + 'usage': None, + 'sup': None + } + schema.add_attributetype(parameters) + + +if __name__ == '__main__': + # Run isolated + # -s for DEBUG mode + CURRENT_FILE = os.path.realpath(__file__) + pytest.main(["-s", CURRENT_FILE]) diff --git a/dirsrvtests/tests/suites/config/__init__.py b/dirsrvtests/tests/suites/config/__init__.py new file mode 100644 index 0000000..bda0655 --- /dev/null +++ b/dirsrvtests/tests/suites/config/__init__.py @@ -0,0 +1,3 @@ +""" + :Requirement: 389-ds-base: Directory Server Configurations +""" diff --git a/dirsrvtests/tests/suites/config/autotuning_test.py b/dirsrvtests/tests/suites/config/autotuning_test.py new file mode 100644 index 0000000..0561714 --- /dev/null +++ b/dirsrvtests/tests/suites/config/autotuning_test.py @@ -0,0 +1,366 @@ +# --- BEGIN COPYRIGHT BLOCK --- +# Copyright (C) 2017 Red Hat, Inc. +# All rights reserved. +# +# License: GPL (version 3 or any later version). +# See LICENSE for details. +# --- END COPYRIGHT BLOCK --- +# +import pytest +from lib389._mapped_object import DSLdapObject +from lib389.utils import * +from lib389.topologies import topology_st as topo + +from lib389._constants import DN_CONFIG_LDBM, DN_CONFIG_LDBM_BDB, DN_USERROOT_LDBM, DEFAULT_SUFFIX + +pytestmark = pytest.mark.tier0 + +DEBUGGING = os.getenv("DEBUGGING", default=False) +if DEBUGGING: + logging.getLogger(__name__).setLevel(logging.DEBUG) +else: + logging.getLogger(__name__).setLevel(logging.INFO) +log = logging.getLogger(__name__) + + +def test_threads_basic(topo): + """Check that a number of threads are able to be autotuned + + :id: 371fb9c4-9607-4a4b-a4a2-6f00809d6257 + :setup: Standalone instance + :steps: + 1. Set nsslapd-threadnumber to -1 + 2. Check that number of threads is positive + :expectedresults: + 1. nsslapd-threadnumber should be successfully set + 2. nsslapd-threadnumber is positive + """ + + log.info("Set nsslapd-threadnumber: -1 to enable autotuning") + topo.standalone.config.set("nsslapd-threadnumber", "-1") + + log.info("Assert nsslapd-threadnumber is equal to the documented expected value") + assert topo.standalone.config.get_attr_val_int("nsslapd-threadnumber") > 0 + + +def test_threads_warning(topo): + """Check that we log a warning if the thread number is too high or low + + :id: db92412b-2812-49de-84b0-00f452cd254f + :setup: Standalone Instance + :steps: + 1. Get autotuned thread number + 2. Set threads way higher than hw threads, and find a warning in the log + 3. Set threads way lower than hw threads, and find a warning in the log + :expectedresults: + 1. Success + 2. Success + 3. Success + """ + topo.standalone.config.set("nsslapd-threadnumber", "-1") + autotuned_value = topo.standalone.config.get_attr_val_utf8("nsslapd-threadnumber") + + topo.standalone.config.set("nsslapd-threadnumber", str(int(autotuned_value) * 4)) + time.sleep(.5) + assert topo.standalone.ds_error_log.match('.*higher.*hurt server performance.*') + + if int(autotuned_value) > 1: + # If autotuned is 1, there isn't anything to test here + topo.standalone.config.set("nsslapd-threadnumber", "1") + time.sleep(.5) + assert topo.standalone.ds_error_log.match('.*lower.*hurt server performance.*') + + +@pytest.mark.parametrize("invalid_value", ('-2', '0', 'invalid')) +def test_threads_invalid_value(topo, invalid_value): + """Check nsslapd-threadnumber for an invalid values + + :id: 1979eddf-8222-4c9d-809d-269c26de636e + :parametrized: yes + :setup: Standalone instance + :steps: + 1. Set nsslapd-threadnumber to -2, 0, invalid_str + :expectedresults: + 1. The operation should fail + """ + + log.info("Set nsslapd-threadnumber: {}. Operation should fail".format(invalid_value)) + with pytest.raises(ldap.OPERATIONS_ERROR): + topo.standalone.config.set("nsslapd-threadnumber", invalid_value) + + +def test_threads_back_from_manual_value(topo): + """Check that thread autotuning works after manual tuning + + :id: 4b674016-e5ca-426b-a9c0-a94745a7dd25 + :setup: Standalone instance + :steps: + 1. Set nsslapd-threadnumber to -1 and save the autotuned value + 2. Decrease nsslapd-threadnumber by 2 + 3. Set nsslapd-threadnumber to -1 + 4. Check that nsslapd-threadnumber is back to autotuned value + :expectedresults: + 1. nsslapd-threadnumber should be successfully set + 2. nsslapd-threadnumber should be successfully decreased + 3. nsslapd-threadnumber should be successfully set + 4. nsslapd-threadnumber is set back to the autotuned value + """ + + log.info("Set nsslapd-threadnumber: -1 to enable autotuning and save the new value") + topo.standalone.config.set("nsslapd-threadnumber", "-1") + autotuned_value = topo.standalone.config.get_attr_val_utf8("nsslapd-threadnumber") + + log.info("Set nsslapd-threadnumber to the autotuned value decreased by 2") + new_value = str(int(autotuned_value) - 2) + topo.standalone.config.set("nsslapd-threadnumber", new_value) + assert topo.standalone.config.get_attr_val_utf8("nsslapd-threadnumber") == new_value + + log.info("Set nsslapd-threadnumber: -1 to enable autotuning") + topo.standalone.config.set("nsslapd-threadnumber", "-1") + + log.info("Assert nsslapd-threadnumber is back to the autotuned value") + assert topo.standalone.config.get_attr_val_utf8("nsslapd-threadnumber") == autotuned_value + + +@pytest.mark.skipif(get_default_db_lib() == "mdb", reason="Not supported over mdb") +@pytest.mark.parametrize("autosize,autosize_split", (('', ''), ('', '0'), ('10', '40'), ('', '40'), + ('10', ''), ('10', '40'), ('10', '0'))) +def test_cache_autosize_non_zero(topo, autosize, autosize_split): + """Check that autosizing works works properly in different combinations + + :id: 83fa099c-a6c9-457a-82db-0982b67e8598 + :parametrized: yes + :setup: Standalone instance + :steps: + 1. Set in the cn=config,cn=ldbm database,cn=plugins,cn=config: + nsslapd-cache-autosize, nsslapd-cache-autosize-split to the next value pairs: + ('', ''), ('', '0'), ('10', '40'), ('', '40'), + ('10', ''), ('10', '40'), ('10', '0') + '' - for deleting the value (set to default) + 2. Try to modify nsslapd-dbcachesize and nsslapd-cachememsize to + some real value, it should be rejected + 3. Restart the instance + 4. Check nsslapd-dbcachesize and nsslapd-cachememsize + :expectedresults: + 1. nsslapd-cache-autosize, nsslapd-cache-autosize-split are successfully set + 2. Modify operation should be rejected + 3. The instance should be successfully restarted + 4. nsslapd-dbcachesize and nsslapd-cachememsize should set + to value greater than 512KB + """ + + config_ldbm = DSLdapObject(topo.standalone, DN_CONFIG_LDBM) + bdb_config_ldbm = DSLdapObject(topo.standalone, DN_CONFIG_LDBM_BDB) + userroot_ldbm = DSLdapObject(topo.standalone, DN_USERROOT_LDBM) + + cachesize = '33333333' + + if ds_is_older('1.4.2'): + dbcachesize_val = config_ldbm.get_attr_val('nsslapd-dbcachesize') + autosize_val = config_ldbm.get_attr_val('nsslapd-cache-autosize') + autosize_split_val = config_ldbm.get_attr_val('nsslapd-cache-autosize-split') + else: + dbcachesize_val = bdb_config_ldbm.get_attr_val('nsslapd-dbcachesize') + autosize_val = bdb_config_ldbm.get_attr_val('nsslapd-cache-autosize') + autosize_split_val = bdb_config_ldbm.get_attr_val('nsslapd-cache-autosize-split') + + cachenensize_val = userroot_ldbm.get_attr_val('nsslapd-cachememsize') + dncachenensize_val = userroot_ldbm.get_attr_val('nsslapd-dncachememsize') + + log.info("Check nsslapd-dbcachesize and nsslapd-cachememsize before the test") + log.info("nsslapd-dbcachesize == {}".format(dbcachesize_val)) + log.info("nsslapd-cachememsize == {}".format(cachenensize_val)) + log.info("nsslapd-dncachememsize == {}".format(dncachenensize_val)) + log.info("nsslapd-cache-autosize == {}".format(autosize_val)) + log.info("nsslapd-cache-autosize-split == {}".format(autosize_split_val)) + + if autosize: + log.info("Set nsslapd-cache-autosize to {}".format(autosize)) + config_ldbm.set('nsslapd-cache-autosize', autosize) + else: + log.info("Delete nsslapd-cache-autosize") + try: + config_ldbm.remove('nsslapd-cache-autosize', autosize_val) + except ValueError: + log.info("nsslapd-cache-autosize wasn't found") + + if autosize_split: + log.info("Set nsslapd-cache-autosize-split to {}".format(autosize_split)) + config_ldbm.set('nsslapd-cache-autosize-split', autosize_split) + else: + log.info("Delete nsslapd-cache-autosize-split") + try: + config_ldbm.remove('nsslapd-cache-autosize-split', autosize_split_val) + except ValueError: + log.info("nsslapd-cache-autosize-split wasn't found") + + log.info("Trying to set nsslapd-cachememsize to {}".format(cachesize)) + with pytest.raises(ldap.UNWILLING_TO_PERFORM): + userroot_ldbm.set('nsslapd-cachememsize', cachesize) + log.info("Trying to set nsslapd-dbcachesize to {}".format(cachesize)) + with pytest.raises(ldap.UNWILLING_TO_PERFORM): + config_ldbm.set('nsslapd-dbcachesize ', cachesize) + topo.standalone.restart() + + if ds_is_older('1.4.2'): + dbcachesize_val = config_ldbm.get_attr_val('nsslapd-dbcachesize') + autosize_val = config_ldbm.get_attr_val('nsslapd-cache-autosize') + autosize_split_val = config_ldbm.get_attr_val('nsslapd-cache-autosize-split') + else: + dbcachesize_val = bdb_config_ldbm.get_attr_val('nsslapd-dbcachesize') + autosize_val = bdb_config_ldbm.get_attr_val('nsslapd-cache-autosize') + autosize_split_val = bdb_config_ldbm.get_attr_val('nsslapd-cache-autosize-split') + + cachenensize_val = userroot_ldbm.get_attr_val('nsslapd-cachememsize') + dncachenensize_val = userroot_ldbm.get_attr_val('nsslapd-dncachememsize') + + log.info("Check nsslapd-dbcachesize and nsslapd-cachememsize in the appropriate range.") + log.info("nsslapd-dbcachesize == {}".format(dbcachesize_val)) + log.info("nsslapd-cachememsize == {}".format(cachenensize_val)) + log.info("nsslapd-dncachememsize == {}".format(dncachenensize_val)) + log.info("nsslapd-cache-autosize == {}".format(autosize_val)) + log.info("nsslapd-cache-autosize-split == {}".format(autosize_split_val)) + assert int(dbcachesize_val) >= 512000 + assert int(cachenensize_val) >= 512000 + assert int(dncachenensize_val) >= 512000 + + +@pytest.mark.skipif(get_default_db_lib() == "mdb", reason="Not supported over mdb") +@pytest.mark.parametrize("autosize_split", ('0', '', '40')) +def test_cache_autosize_basic_sane(topo, autosize_split): + """Check that autotuning cachesizes works properly with different values + + :id: 9dc363ef-f551-446d-8b83-8ac45dabb8df + :parametrized: yes + :setup: Standalone instance + :steps: + 1. Set in the cn=config,cn=ldbm database,cn=plugins,cn=config: + nsslapd-cache-autosize, nsslapd-cache-autosize-split to the next value pairs: + ('0', '0'), ('0', ''), ('0', '40') + '' - for deleting the value (set to default) + 2. Set in the cn=config,cn=ldbm database,cn=plugins,cn=config: + nsslapd-dbcachesize: 0 and some same value + 3. Set in the cn=UserRoot,cn=ldbm database,cn=plugins,cn=config: + nsslapd-cachememsize: 0 and some same value + 4. Restart the instance + 5. Check nsslapd-dbcachesize and nsslapd-cachememsize + :expectedresults: + 1. nsslapd-cache-autosize, nsslapd-cache-autosize-split are successfully set + 2. nsslapd-dbcachesize are successfully set + 3. nsslapd-cachememsize are successfully set + 4. The instance should be successfully restarted + 5. nsslapd-dbcachesize and nsslapd-cachememsize should set + to value greater than 512KB + """ + + config_ldbm = DSLdapObject(topo.standalone, DN_CONFIG_LDBM) + bdb_config_ldbm = DSLdapObject(topo.standalone, DN_CONFIG_LDBM_BDB) + userroot_ldbm = DSLdapObject(topo.standalone, DN_USERROOT_LDBM) + config_ldbm.set('nsslapd-cache-autosize', '0') + + # Test with caches with both real values and 0 + for cachesize in ('0', '33333333'): + if ds_is_older('1.4.2'): + dbcachesize_val = config_ldbm.get_attr_val('nsslapd-dbcachesize') + autosize_val = config_ldbm.get_attr_val('nsslapd-cache-autosize') + autosize_split_val = config_ldbm.get_attr_val('nsslapd-cache-autosize-split') + else: + dbcachesize_val = bdb_config_ldbm.get_attr_val('nsslapd-dbcachesize') + autosize_val = bdb_config_ldbm.get_attr_val('nsslapd-cache-autosize') + autosize_split_val = bdb_config_ldbm.get_attr_val('nsslapd-cache-autosize-split') + + cachenensize_val = userroot_ldbm.get_attr_val('nsslapd-cachememsize') + dncachenensize_val = userroot_ldbm.get_attr_val('nsslapd-dncachememsize') + + log.info("Check nsslapd-dbcachesize and nsslapd-cachememsize before the test") + log.info("nsslapd-dbcachesize == {}".format(dbcachesize_val)) + log.info("nsslapd-cachememsize == {}".format(cachenensize_val)) + log.info("nsslapd-cache-autosize == {}".format(autosize_val)) + log.info("nsslapd-cache-autosize-split == {}".format(autosize_split_val)) + + if autosize_split: + log.info("Set nsslapd-cache-autosize-split to {}".format(autosize_split)) + config_ldbm.set('nsslapd-cache-autosize-split', autosize_split) + else: + log.info("Delete nsslapd-cache-autosize-split") + try: + config_ldbm.remove('nsslapd-cache-autosize-split', autosize_split_val) + except ValueError: + log.info("nsslapd-cache-autosize-split wasn't found") + + log.info("Set nsslapd-dbcachesize to {}".format(cachesize)) + config_ldbm.set('nsslapd-dbcachesize', cachesize) + log.info("Set nsslapd-cachememsize to {}".format(cachesize)) + userroot_ldbm.set('nsslapd-cachememsize', cachesize) + topo.standalone.restart() + + if ds_is_older('1.4.2'): + dbcachesize_val = config_ldbm.get_attr_val('nsslapd-dbcachesize') + autosize_val = config_ldbm.get_attr_val('nsslapd-cache-autosize') + autosize_split_val = config_ldbm.get_attr_val('nsslapd-cache-autosize-split') + else: + dbcachesize_val = bdb_config_ldbm.get_attr_val('nsslapd-dbcachesize') + autosize_val = bdb_config_ldbm.get_attr_val('nsslapd-cache-autosize') + autosize_split_val = bdb_config_ldbm.get_attr_val('nsslapd-cache-autosize-split') + + cachenensize_val = userroot_ldbm.get_attr_val('nsslapd-cachememsize') + dncachenensize_val = userroot_ldbm.get_attr_val('nsslapd-dncachememsize') + + log.info("Check nsslapd-dbcachesize and nsslapd-cachememsize in the appropriate range.") + log.info("nsslapd-dbcachesize == {}".format(dbcachesize_val)) + log.info("nsslapd-cachememsize == {}".format(cachenensize_val)) + log.info("nsslapd-dncachememsize == {}".format(dncachenensize_val)) + log.info("nsslapd-cache-autosize == {}".format(autosize_val)) + log.info("nsslapd-cache-autosize-split == {}".format(autosize_split_val)) + assert int(dbcachesize_val) >= 512000 + assert int(cachenensize_val) >= 512000 + assert int(dncachenensize_val) >= 512000 + + +@pytest.mark.skipif(get_default_db_lib() == "mdb", reason="Not supported over mdb") +@pytest.mark.parametrize("invalid_value", ('-2', '102', 'invalid')) +def test_cache_autosize_invalid_values(topo, invalid_value): + """Check that we can't set invalid values to autosize attributes + + :id: 2f0d01b5-ca91-4dc2-97bc-ad0ac8d08633 + :parametrized: yes + :setup: Standalone instance + :steps: + 1. Stop the instance + 2. Set in the cn=config,cn=ldbm database,cn=plugins,cn=config: + nsslapd-cache-autosize and nsslapd-cache-autosize-split + to invalid values like (-2, 102, invalid_str) + 3. Try to start the instance + :expectedresults: + 1. The instance should stop successfully + 2. nsslapd-cache-autosize, nsslapd-cache-autosize-split are successfully set + 3. Starting the instance should fail + """ + + config_ldbm = DSLdapObject(topo.standalone, DN_CONFIG_LDBM) + bdb_config_ldbm = DSLdapObject(topo.standalone, DN_CONFIG_LDBM_BDB) + if ds_is_older('1.4.2'): + autosize_val = config_ldbm.get_attr_val('nsslapd-cache-autosize') + autosize_split_val = config_ldbm.get_attr_val('nsslapd-cache-autosize-split') + else: + autosize_val = bdb_config_ldbm.get_attr_val('nsslapd-cache-autosize') + autosize_split_val = bdb_config_ldbm.get_attr_val('nsslapd-cache-autosize-split') + + log.info("Set nsslapd-cache-autosize-split to {}".format(invalid_value)) + with pytest.raises(ldap.UNWILLING_TO_PERFORM): + config_ldbm.set('nsslapd-cache-autosize-split', invalid_value) + topo.standalone.restart() + config_ldbm.remove('nsslapd-cache-autosize-split', autosize_split_val) + + log.info("Set nsslapd-cache-autosize to {}".format(invalid_value)) + with pytest.raises(ldap.UNWILLING_TO_PERFORM): + config_ldbm.set('nsslapd-cache-autosize', invalid_value) + topo.standalone.restart() + config_ldbm.remove('nsslapd-cache-autosize', autosize_val) + + +if __name__ == '__main__': + # Run isolated + # -s for DEBUG mode + CURRENT_FILE = os.path.realpath(__file__) + pytest.main("-s %s" % CURRENT_FILE) diff --git a/dirsrvtests/tests/suites/config/compact_test.py b/dirsrvtests/tests/suites/config/compact_test.py new file mode 100644 index 0000000..b0e57dd --- /dev/null +++ b/dirsrvtests/tests/suites/config/compact_test.py @@ -0,0 +1,184 @@ +# --- BEGIN COPYRIGHT BLOCK --- +# Copyright (C) 2022 Red Hat, Inc. +# All rights reserved. +# +# License: GPL (version 3 or any later version). +# See LICENSE for details. +# --- END COPYRIGHT BLOCK --- +# +import logging +import pytest +import os +import time +import datetime +from lib389.utils import get_default_db_lib +from lib389.tasks import DBCompactTask +from lib389.backend import DatabaseConfig +from lib389.topologies import topology_m1 as topo +from lib389.utils import ldap, ds_is_older + +pytestmark = pytest.mark.tier2 +log = logging.getLogger(__name__) + + +def test_compact_db_task(topo): + """Test creation of dbcompact task is successful + + :id: 1b3222ef-a336-4259-be21-6a52f76e1859 + :customerscenario: True + :setup: Standalone Instance + :steps: + 1. Create task + 2. Check task was successful + 3. Check errors log to show task was run + 4. Create task just for changelog + :expectedresults: + 1. Success + 2. Success + 3. Success + 4. Success + """ + inst = topo.ms["supplier1"] + + task = DBCompactTask(inst) + task.create() + task.wait() + assert task.get_exit_code() == 0 + + # Check errors log to make sure task actually compacted db + assert inst.searchErrorsLog("Compacting databases") + inst.deleteErrorLogs() + + # Create new task that only compacts changelog + task = DBCompactTask(inst) + task_properties = {'justChangelog': 'yes'} + task.create(properties=task_properties) + task.wait() + assert task.get_exit_code() == 0 + + # On bdb, check errors log to make sure task only performed changelog compaction + # Note: as mdb contains a single map file (the justChangelog flags has + # no impact (and whole db is compacted)) + if get_default_db_lib() == "bdb": + assert inst.searchErrorsLog("Compacting DB") == False + assert inst.searchErrorsLog("Compacting Replication Changelog") + inst.deleteErrorLogs(restart=False) + + +@pytest.mark.skipif(get_default_db_lib() == "mdb", reason="Not supported over mdb") +def test_compaction_interval_and_time(topo): + """Test dbcompact is successful when nsslapd-db-compactdb-interval and nsslapd-db-compactdb-time is set + + :id: f361bee9-d7e7-4569-9255-d7b60dd9d92e + :customerscenario: True + :setup: Supplier Instance + :steps: + 1. Configure compact interval and time + 2. Check compaction occurs as expected + :expectedresults: + 1. Success + 2. Success + """ + + inst = topo.ms["supplier1"] + + # Calculate the compaction time (1 minute from now) + now = datetime.datetime.now() + current_hour = now.hour + current_minute = now.minute + 2 + + if current_minute >= 60: + # handle time wrapping/rollover + current_minute = current_minute - 60 + # Bump to the next hour + current_hour += 1 + + if current_hour < 10: + hour = "0" + str(current_hour) + else: + hour = str(current_hour) + if current_minute < 10: + minute = "0" + str(current_minute) + else: + minute = str(current_minute) + + compact_time = hour + ":" + minute + + # Set compaction TOD + config = DatabaseConfig(inst) + config.set([('nsslapd-db-compactdb-interval', '2'), ('nsslapd-db-compactdb-time', compact_time)]) + inst.deleteErrorLogs(restart=True) + + # Check compaction occurred as expected + time.sleep(45) + assert not inst.searchErrorsLog("Compacting databases") + + time.sleep(90) + assert inst.searchErrorsLog("Compacting databases") + inst.deleteErrorLogs(restart=False) + + +@pytest.mark.ds4778 +@pytest.mark.bz1748441 +@pytest.mark.skipif(ds_is_older("1.4.3.23"), reason="Not implemented") +def test_no_compaction(topo): + """Test there is no compaction when nsslapd-db-compactdb-interval is set to 0 + + :id: 80fdb0e3-a70c-42ad-9841-eebb74287b19 + :customerscenario: True + :setup: Supplier Instance + :steps: + 1. Configure nsslapd-db-compactdb-interval to 0 + 2. Check there is no compaction + :expectedresults: + 1. Success + 2. Success + """ + + inst = topo.ms["supplier1"] + config = DatabaseConfig(inst) + config.set([('nsslapd-db-compactdb-interval', '0'), ('nsslapd-db-compactdb-time', '00:01')]) + inst.deleteErrorLogs() + + time.sleep(3) + assert not inst.searchErrorsLog("Compacting databases") + inst.deleteErrorLogs(restart=False) + + +@pytest.mark.ds4778 +@pytest.mark.bz1748441 +@pytest.mark.skipif(ds_is_older("1.4.3.23"), reason="Not implemented") +def test_compaction_interval_invalid(topo): + """Test that invalid value is rejected for nsslapd-db-compactdb-interval + + :id: 408ee3ee-727c-4565-8b08-2e07d0c6f7d7 + :customerscenario: True + :setup: Supplier Instance + :steps: + 1. Set nsslapd-db-compactdb-interval to 2147483650 + 2. Check exception message contains invalid value and no compaction occurred + :expectedresults: + 1. Exception is raised + 2. Success + """ + + inst = topo.ms["supplier1"] + msg = 'value 2147483650 for attr nsslapd-db-compactdb-interval is greater than the maximum 2147483647' + config = DatabaseConfig(inst) + + try: + config.set([('nsslapd-db-compactdb-interval', '2147483650'), ('nsslapd-db-compactdb-time', '00:01')]) + except ldap.UNWILLING_TO_PERFORM as e: + log.info('Got expected error: {}'.format(str(e))) + assert msg in str(e) + time.sleep(3) + assert not inst.searchErrorsLog("Compacting databases") + inst.deleteErrorLogs(restart=False) + + +if __name__ == '__main__': + # Run isolated + # -s for DEBUG mode + CURRENT_FILE = os.path.realpath(__file__) + pytest.main(["-s", CURRENT_FILE]) + diff --git a/dirsrvtests/tests/suites/config/config_delete_attr_test.py b/dirsrvtests/tests/suites/config/config_delete_attr_test.py new file mode 100644 index 0000000..a5a2587 --- /dev/null +++ b/dirsrvtests/tests/suites/config/config_delete_attr_test.py @@ -0,0 +1,156 @@ +# --- BEGIN COPYRIGHT BLOCK --- +# Copyright (C) 2021 Red Hat, Inc. +# All rights reserved. +# +# License: GPL (version 3 or any later version). +# See LICENSE for details. +# --- END COPYRIGHT BLOCK --- +# +import pytest + +from lib389.utils import os, logging, ds_is_older, ldap +from lib389.topologies import topology_st + +logging.getLogger(__name__).setLevel(logging.DEBUG) +log = logging.getLogger(__name__) + +# Skip on older versions +pytestmark = [pytest.mark.tier2, + pytest.mark.skipif(ds_is_older('1.3.6'), reason="Not implemented")] + + +@pytest.mark.ds48961 +def test_delete_storagescheme(topology_st): + """ Test that deletion of passwordStorageScheme is rejected + + :id: 53ab2dbf-e37c-4d30-8cce-0d5f44ed204a + :setup: Standalone instance + :steps: + 1. Create instance + 2. Modify passwordStorageScheme attribute + 3. Remove passwordStorageScheme attribute + 4. Check exception message + :expectedresults: + 1. Success + 2. Success + 3. Removal should be rejected + 4. Message should be about rejected change + """ + + standalone = topology_st.standalone + + log.info('Check we can modify passwordStorageScheme') + standalone.config.set('passwordStorageScheme', 'CLEAR') + assert standalone.config.get_attr_val_utf8('passwordStorageScheme') == 'CLEAR' + + log.info('Check removal of passwordStorageScheme is rejected') + with pytest.raises(ldap.OPERATIONS_ERROR) as excinfo: + standalone.config.remove('passwordStorageScheme', None) + assert "deleting the value is not allowed" in str(excinfo.value) + + +@pytest.mark.ds48961 +def test_reset_attributes(topology_st): + """ Test that we can reset some attributes while others are rejected + + :id: 5f78088f-36d3-4a0b-8c1b-4abc161e996f + :setup: Standalone instance + :steps: + 1. Create instance + 2. Check attributes from attr_to_test can be reset + 3. Check value of that attribute is empty + 4. Check reset of attributes from attr_to_fail is rejected + :expectedresults: + 1. Success + 2. Success + 3. Success + 4. Success + """ + + standalone = topology_st.standalone + + # These attributes should not be able to reset + attr_to_fail = [ + 'nsslapd-localuser', + 'nsslapd-defaultnamingcontext', + 'nsslapd-accesslog', + 'nsslapd-auditlog', + 'nsslapd-securitylog', + 'nsslapd-errorlog', + 'nsslapd-tmpdir', + 'nsslapd-rundir', + 'nsslapd-bakdir', + 'nsslapd-certdir', + 'nsslapd-instancedir', + 'nsslapd-ldifdir', + 'nsslapd-lockdir', + 'nsslapd-schemadir', + 'nsslapd-workingdir', + 'nsslapd-localhost', + 'nsslapd-certmap-basedn', + 'nsslapd-port', + 'nsslapd-secureport', + 'nsslapd-rootpw', + 'nsslapd-hash-filters', + 'nsslapd-requiresrestart', + 'nsslapd-plugin', + 'nsslapd-privatenamespaces', + 'nsslapd-allowed-to-delete-attrs', + 'nsslapd-accesslog-list', + 'nsslapd-auditfaillog-list', + 'nsslapd-auditlog-list', + 'nsslapd-errorlog-list', + 'nsslapd-config', + 'nsslapd-versionstring', + 'objectclass', + 'cn', + 'nsslapd-backendconfig', + 'nsslapd-betype', + 'nsslapd-connection-buffer', + 'nsslapd-malloc-mmap-threshold', + 'nsslapd-malloc-mxfast', + 'nsslapd-malloc-trim-threshold', + 'nsslapd-referralmode', + 'nsslapd-saslpath', + 'passwordadmindn' + ] + + attr_to_test = { + 'nsslapd-listenhost': 'localhost', + 'nsslapd-securelistenhost': 'localhost', + 'nsslapd-allowed-sasl-mechanisms': 'GSSAPI', + 'nsslapd-svrtab': 'Some data' + } + + for attr in attr_to_test: + newval = attr_to_test[attr] + + log.info("Change %s value to --> %s" % (attr, newval)) + standalone.config.set(attr, newval) + assert standalone.config.get_attr_val_utf8(attr) == newval + + log.info('Now reset the attribute') + standalone.config.reset(attr) + assert standalone.config.get_attr_val_utf8(attr) == '' + log.info("%s is reset to None" % attr) + + for attr in attr_to_fail: + log.info("Resetting %s" % attr) + try: + standalone.config.reset(attr) + # Shouldn't reach here, the reset should fail! + log.info('Attribute deletion should fail => test failed!') + assert False + except (ldap.UNWILLING_TO_PERFORM, ldap.OPERATIONS_ERROR, ldap.OBJECT_CLASS_VIOLATION): + log.info('Change was rejected, test passed') + pass + except ldap.NO_SUCH_ATTRIBUTE: + log.info("This attribute isn't part of cn=config, so is already default!") + pass + + +if __name__ == '__main__': + # Run isolated + # -s for DEBUG mode + CURRENT_FILE = os.path.realpath(__file__) + pytest.main("-s %s" % CURRENT_FILE) diff --git a/dirsrvtests/tests/suites/config/config_test.py b/dirsrvtests/tests/suites/config/config_test.py new file mode 100644 index 0000000..69882d3 --- /dev/null +++ b/dirsrvtests/tests/suites/config/config_test.py @@ -0,0 +1,759 @@ +# --- BEGIN COPYRIGHT BLOCK --- +# Copyright (C) 2020 Red Hat, Inc. +# All rights reserved. +# +# License: GPL (version 3 or any later version). +# See LICENSE for details. +# --- END COPYRIGHT BLOCK --- +# + +import logging +import pytest +import os +from lib389 import DirSrv, pid_from_file +from lib389.tasks import * +from lib389.topologies import topology_m2, topology_st as topo +from lib389.utils import * +from lib389._constants import DN_CONFIG, DEFAULT_SUFFIX, DEFAULT_BENAME +from lib389._mapped_object import DSLdapObjects +from lib389.cli_base import FakeArgs +from lib389.cli_conf.backend import db_config_set +from lib389.idm.user import UserAccounts, TEST_USER_PROPERTIES +from lib389.idm.group import Groups +from lib389.instance.setup import SetupDs +from lib389.config import LDBMConfig, BDB_LDBMConfig, Config +from lib389.cos import CosPointerDefinitions, CosTemplates +from lib389.backend import Backends, DatabaseConfig +from lib389.monitor import MonitorLDBM, Monitor +from lib389.plugins import ReferentialIntegrityPlugin + +pytestmark = pytest.mark.tier0 + +USER_DN = 'uid=test_user,%s' % DEFAULT_SUFFIX +PSTACK_CMD = '/usr/bin/pstack' + +logging.getLogger(__name__).setLevel(logging.INFO) +log = logging.getLogger(__name__) + +@pytest.fixture(scope="module") +def big_file(): + TEMP_BIG_FILE = '' + # 1024*1024=1048576 + # B for 1 MiB + # Big for 3 MiB + for x in range(1048576): + TEMP_BIG_FILE += '+' + + return TEMP_BIG_FILE + + +@pytest.mark.bz1897248 +@pytest.mark.ds4315 +@pytest.mark.skipif(ds_is_older('1.4.3.16'), reason="This config setting exists in 1.4.3.16 and higher") +def test_nagle_default_value(topo): + """Test that nsslapd-nagle attribute is off by default + + :id: 00361f5d-d638-4d39-8231-66fa52637203 + :setup: Standalone instance + :steps: + 1. Create instance + 2. Check the value of nsslapd-nagle + :expectedresults: + 1. Success + 2. The value of nsslapd-nagle should be off + """ + + log.info('Check the value of nsslapd-nagle attribute is off by default') + assert topo.standalone.config.get_attr_val_utf8('nsslapd-nagle') == 'off' + + +def test_maxbersize_repl(topology_m2, big_file): + """maxbersize is ignored in the replicated operations. + + :id: ad57de60-7d56-4323-bbca-5556e5cdb126 + :setup: MMR with two suppliers, test user, + 1 MiB big value for any attribute + :steps: + 1. Set maxbersize attribute to a small value (20KiB) on supplier2 + 2. Add the big value to supplier2 + 3. Add the big value to supplier1 + 4. Check if the big value was successfully replicated to supplier2 + :expectedresults: + 1. maxbersize should be successfully set + 2. Adding the big value to supplier2 failed + 3. Adding the big value to supplier1 succeed + 4. The big value is successfully replicated to supplier2 + """ + + users_m1 = UserAccounts(topology_m2.ms["supplier1"], DEFAULT_SUFFIX) + users_m2 = UserAccounts(topology_m2.ms["supplier2"], DEFAULT_SUFFIX) + + user_m1 = users_m1.create(properties=TEST_USER_PROPERTIES) + time.sleep(2) + user_m2 = users_m2.get(dn=user_m1.dn) + + log.info("Set nsslapd-maxbersize: 20K to supplier2") + topology_m2.ms["supplier2"].config.set('nsslapd-maxbersize', '20480') + + topology_m2.ms["supplier2"].restart() + + log.info('Try to add attribute with a big value to supplier2 - expect to FAIL') + with pytest.raises(ldap.SERVER_DOWN): + user_m2.add('jpegphoto', big_file) + + topology_m2.ms["supplier2"].restart() + topology_m2.ms["supplier1"].restart() + + log.info('Try to add attribute with a big value to supplier1 - expect to PASS') + user_m1.add('jpegphoto', big_file) + + time.sleep(2) + + log.info('Check if a big value was successfully added to supplier1') + + photo_m1 = user_m1.get_attr_vals('jpegphoto') + + log.info('Check if a big value was successfully replicated to supplier2') + photo_m2 = user_m2.get_attr_vals('jpegphoto') + nbtries = 0; + while photo_m2 != photo_m1 and nbtries < 10: + nbtries = nbtries + 1 + photo_m2 = user_m2.get_attr_vals('jpegphoto') + assert photo_m2 == photo_m1 + +def test_config_listen_backport_size(topology_m2): + """Check that nsslapd-listen-backlog-size acted as expected + + :id: a4385d58-a6ab-491e-a604-6df0e8ed91cd + :setup: MMR with two suppliers + :steps: + 1. Search for nsslapd-listen-backlog-size + 2. Set nsslapd-listen-backlog-size to a positive value + 3. Set nsslapd-listen-backlog-size to a negative value + 4. Set nsslapd-listen-backlog-size to an invalid value + 5. Set nsslapd-listen-backlog-size back to a default value + :expectedresults: + 1. Search should be successful + 2. nsslapd-listen-backlog-size should be successfully set + 3. nsslapd-listen-backlog-size should be successfully set + 4. Modification with an invalid value should throw an error + 5. nsslapd-listen-backlog-size should be successfully set + """ + + default_val = topology_m2.ms["supplier1"].config.get_attr_val_bytes('nsslapd-listen-backlog-size') + + topology_m2.ms["supplier1"].config.replace('nsslapd-listen-backlog-size', '256') + + topology_m2.ms["supplier1"].config.replace('nsslapd-listen-backlog-size', '-1') + + with pytest.raises(ldap.LDAPError): + topology_m2.ms["supplier1"].config.replace('nsslapd-listen-backlog-size', 'ZZ') + + topology_m2.ms["supplier1"].config.replace('nsslapd-listen-backlog-size', default_val) + + +@pytest.mark.skipif(get_default_db_lib() == "mdb", reason="Not supported over mdb") +def test_config_deadlock_policy(topology_m2): + """Check that nsslapd-db-deadlock-policy acted as expected + + :id: a24e25fd-bc15-47fa-b018-372f6a2ec59c + :setup: MMR with two suppliers + :steps: + 1. Search for nsslapd-db-deadlock-policy and check if + it contains a default value + 2. Set nsslapd-db-deadlock-policy to a positive value + 3. Set nsslapd-db-deadlock-policy to a negative value + 4. Set nsslapd-db-deadlock-policy to an invalid value + 5. Set nsslapd-db-deadlock-policy back to a default value + :expectedresults: + 1. Search should be a successful and should contain a default value + 2. nsslapd-db-deadlock-policy should be successfully set + 3. nsslapd-db-deadlock-policy should be successfully set + 4. Modification with an invalid value should throw an error + 5. nsslapd-db-deadlock-policy should be successfully set + """ + + default_val = b'9' + + ldbmconfig = LDBMConfig(topology_m2.ms["supplier1"]) + bdbconfig = BDB_LDBMConfig(topology_m2.ms["supplier1"]) + + if ds_is_older('1.4.2'): + deadlock_policy = ldbmconfig.get_attr_val_bytes('nsslapd-db-deadlock-policy') + else: + deadlock_policy = bdbconfig.get_attr_val_bytes('nsslapd-db-deadlock-policy') + + assert deadlock_policy == default_val + + # Try a range of valid values + for val in (b'0', b'5', b'9'): + ldbmconfig.replace('nsslapd-db-deadlock-policy', val) + if ds_is_older('1.4.2'): + deadlock_policy = ldbmconfig.get_attr_val_bytes('nsslapd-db-deadlock-policy') + else: + deadlock_policy = bdbconfig.get_attr_val_bytes('nsslapd-db-deadlock-policy') + + assert deadlock_policy == val + + # Try a range of invalid values + for val in ('-1', '10'): + with pytest.raises(ldap.LDAPError): + ldbmconfig.replace('nsslapd-db-deadlock-policy', val) + + # Cleanup - undo what we've done + ldbmconfig.replace('nsslapd-db-deadlock-policy', deadlock_policy) + + +@pytest.mark.bz766322 +@pytest.mark.ds26 +def test_defaultnamingcontext(topo): + """Tests configuration attribute defaultNamingContext in the rootdse + + :id: de9a21d3-00f9-4c6d-bb40-56aa1ba36578 + :setup: Standalone instance + :steps: + 1. Check the attribute nsslapd-defaultnamingcontext is present in cn=config + 2. Delete nsslapd-defaultnamingcontext attribute + 3. Add new valid Suffix and modify nsslapd-defaultnamingcontext with new suffix + 4. Add new invalid value at runtime to nsslapd-defaultnamingcontext + 5. Modify nsslapd-defaultnamingcontext with blank value + 6. Add new suffix when nsslapd-defaultnamingcontext is empty + 7. Check the value of the nsslapd-defaultnamingcontext automatically have the new suffix + 8. Adding new suffix when nsslapd-defaultnamingcontext is not empty + 9. Check the value of the nsslapd-defaultnamingcontext has not changed + 10. Remove the newly added suffix and check the values of the attribute is not changed + 11. Remove the original suffix which is currently nsslapd-defaultnamingcontext + 12. Check nsslapd-defaultnamingcontext become empty. + :expectedresults: + 1. This should be successful + 2. It should give 'server unwilling to perform' error + 3. It should be successful + 4. It should give 'no such object' error + 5. It should be successful + 6. Add should be successful + 7. nsslapd-defaultnamingcontext should have new suffix + 8. Add should be successful + 9. defaultnamingcontext should not change + 10. Remove should be successful and defaultnamingcontext should not change + 11. Removal should be successful + 12. nsslapd-defaultnamingcontext should be empty + """ + + backends = Backends(topo.standalone) + test_suffix1 = 'dc=test1,dc=com' + test_db1 = 'test1_db' + test_suffix2 = 'dc=test2,dc=com' + test_db2 = 'test2_db' + test_suffix3 = 'dc=test3,dc=com' + test_db3 = 'test3_db' + + log.info("Check the attribute nsslapd-defaultnamingcontext is present in cn=config") + assert topo.standalone.config.present('nsslapd-defaultnamingcontext') + + log.info("Delete nsslapd-defaultnamingcontext attribute") + with pytest.raises(ldap.UNWILLING_TO_PERFORM): + topo.standalone.config.remove_all('nsslapd-defaultnamingcontext') + + b1 = backends.create(properties={'cn': test_db1, + 'nsslapd-suffix': test_suffix1}) + + log.info("modify nsslapd-defaultnamingcontext with new suffix") + topo.standalone.config.replace('nsslapd-defaultnamingcontext', test_suffix1) + + log.info("Add new invalid value at runtime to nsslapd-defaultnamingcontext") + with pytest.raises(ldap.NO_SUCH_OBJECT): + topo.standalone.config.replace('nsslapd-defaultnamingcontext', 'some_invalid_value') + + log.info("Modify nsslapd-defaultnamingcontext with blank value") + topo.standalone.config.replace('nsslapd-defaultnamingcontext', ' ') + + log.info("Add new suffix when nsslapd-defaultnamingcontext is empty") + b2 = backends.create(properties={'cn': test_db2, + 'nsslapd-suffix': test_suffix2}) + + log.info("Check the value of the nsslapd-defaultnamingcontext automatically have the new suffix") + assert topo.standalone.config.get_attr_val_utf8('nsslapd-defaultnamingcontext') == test_suffix2 + + log.info("Adding new suffix when nsslapd-defaultnamingcontext is not empty") + b3 = backends.create(properties={'cn': test_db3, + 'nsslapd-suffix': test_suffix3}) + + log.info("Check the value of the nsslapd-defaultnamingcontext has not changed") + assert topo.standalone.config.get_attr_val_utf8('nsslapd-defaultnamingcontext') == test_suffix2 + + log.info("Remove the newly added suffix and check the values of the attribute is not changed") + b3.delete() + assert topo.standalone.config.get_attr_val_utf8('nsslapd-defaultnamingcontext') == test_suffix2 + + log.info("Remove all the suffix at the end") + b1.delete() + b2.delete() + + +@pytest.mark.bz602456 +def test_allow_add_delete_config_attributes(topo): + """Tests configuration attributes are allowed to add and delete + + :id: d9a3f264-4111-406b-9900-a70e5403458a + :setup: Standalone instance + :steps: + 1. Add a new valid attribute at runtime to cn=config + 2. Check if the new valid attribute is present + 3. Delete nsslapd-listenhost to restore the default value + 4. Restart the server + 5. Check nsslapd-listenhost is present with default value + 6. Add new invalid attribute at runtime to cn=config + 7. Make sure the invalid attribute is not added + :expectedresults: + 1. This should be successful + 2. This should be successful + 3. This should be successful + 4. This should be successful + 5. This should be successful + 6. It should give 'server unwilling to perform' error + 7. Invalid attribute should not be added + """ + default_listenhost = topo.standalone.config.get_attr_val_utf8('nsslapd-listenhost') + + log.info("Add a new valid attribute at runtime to cn=config") + topo.standalone.config.add('nsslapd-listenhost', 'localhost') + assert topo.standalone.config.present('nsslapd-listenhost', 'localhost') + + log.info("Delete nsslapd-listenhost to restore the default value") + topo.standalone.config.remove('nsslapd-listenhost', 'localhost') + topo.standalone.restart() + assert topo.standalone.config.present('nsslapd-listenhost', default_listenhost) + + log.info("Add new invalid attribute at runtime to cn=config") + with pytest.raises(ldap.UNWILLING_TO_PERFORM): + topo.standalone.config.add('invalid-attribute', 'invalid-value') + + log.info("Make sure the invalid attribute is not added") + assert not topo.standalone.config.present('invalid-attribute', 'invalid-value') + + +@pytest.mark.bz918705 +@pytest.mark.ds511 +def test_ignore_virtual_attrs(topo): + """Test nsslapd-ignore-virtual-attrs configuration attribute + + :id: 9915d71b-2c71-4ac0-91d7-92655d53541b + :customerscenario: True + :setup: Standalone instance + :steps: + 1. Check the attribute nsslapd-ignore-virtual-attrs is present in cn=config + 2. Check the default value of attribute nsslapd-ignore-virtual-attrs should be ON + 3. Set the valid values i.e. on/ON and off/OFF for nsslapd-ignore-virtual-attrs + 4. Set invalid value for attribute nsslapd-ignore-virtual-attrs + 5. Set nsslapd-ignore-virtual-attrs=off + 6. Add cosPointer, cosTemplate and test entry to default suffix, where virtual attribute is postal code + 7. Test if virtual attribute i.e. postal code shown in test entry while nsslapd-ignore-virtual-attrs: off + 8. Set nsslapd-ignore-virtual-attrs=on + 9. Test if virtual attribute i.e. postal code not shown while nsslapd-ignore-virtual-attrs: on + :expectedresults: + 1. This should be successful + 2. This should be successful + 3. This should be successful + 4. This should fail + 5. This should be successful + 6. This should be successful + 7. Postal code should be present + 8. This should be successful + 9. Postal code should not be present + """ + + log.info("Check the attribute nsslapd-ignore-virtual-attrs is present in cn=config") + assert topo.standalone.config.present('nsslapd-ignore-virtual-attrs') + + log.info("Check the default value of attribute nsslapd-ignore-virtual-attrs should be ON") + assert topo.standalone.config.get_attr_val_utf8('nsslapd-ignore-virtual-attrs') == "on" + + log.info("Set the valid values i.e. on/ON and off/OFF for nsslapd-ignore-virtual-attrs") + for attribute_value in ['on', 'off', 'ON', 'OFF']: + topo.standalone.config.set('nsslapd-ignore-virtual-attrs', attribute_value) + assert topo.standalone.config.present('nsslapd-ignore-virtual-attrs', attribute_value) + + log.info("Set invalid value for attribute nsslapd-ignore-virtual-attrs") + with pytest.raises(ldap.OPERATIONS_ERROR): + topo.standalone.config.set('nsslapd-ignore-virtual-attrs', 'invalid_value') + + cos_template_properties = { + 'cn': 'cosTemplateExample', + 'postalcode': '117' + } + cos_templates = CosTemplates(topo.standalone, DEFAULT_SUFFIX, 'ou=People') + test_cos_template = cos_templates.create(properties=cos_template_properties) + + log.info("Add cosPointer, cosTemplate and test entry to default suffix, where virtual attribute is postal code") + cos_pointer_properties = { + 'cn': 'cosPointer', + 'description': 'cosPointer example', + 'cosTemplateDn': 'cn=cosTemplateExample,ou=People,dc=example,dc=com', + 'cosAttribute': 'postalcode', + } + cos_pointer_definitions = CosPointerDefinitions(topo.standalone, DEFAULT_SUFFIX, 'ou=People') + test_cos_pointer_definition = cos_pointer_definitions.create(properties=cos_pointer_properties) + + test_users = UserAccounts(topo.standalone, DEFAULT_SUFFIX) + test_user = test_users.create(properties=TEST_USER_PROPERTIES) + + log.info("Test if virtual attribute i.e. postal code shown in test entry while nsslapd-ignore-virtual-attrs: off") + assert test_user.present('postalcode', '117') + + log.info("Set nsslapd-ignore-virtual-attrs=on") + topo.standalone.config.set('nsslapd-ignore-virtual-attrs', 'on') + + log.info("Test if virtual attribute i.e. postal code not shown while nsslapd-ignore-virtual-attrs: on") + assert not test_user.present('postalcode', '117') + +def test_ignore_virtual_attrs_after_restart(topo): + """Test nsslapd-ignore-virtual-attrs configuration attribute + The attribute is ON by default. If it set to OFF, it keeps + its value on restart + + :id: ac368649-4fda-473c-9ef8-e0c728b162af + :customerscenario: True + :setup: Standalone instance + :steps: + 1. Check the attribute nsslapd-ignore-virtual-attrs is present in cn=config + 2. Check the default value of attribute nsslapd-ignore-virtual-attrs should be ON + 3. Set nsslapd-ignore-virtual-attrs=off + 4. restart the instance + 5. Check the attribute nsslapd-ignore-virtual-attrs is OFF + :expectedresults: + 1. This should be successful + 2. This should be successful + 3. This should be successful + 4. This should be successful + 5. This should be successful + """ + + log.info("Check the attribute nsslapd-ignore-virtual-attrs is present in cn=config") + assert topo.standalone.config.present('nsslapd-ignore-virtual-attrs') + + log.info("Check the default value of attribute nsslapd-ignore-virtual-attrs should be ON") + assert topo.standalone.config.get_attr_val_utf8('nsslapd-ignore-virtual-attrs') == "on" + + log.info("Set nsslapd-ignore-virtual-attrs = off") + topo.standalone.config.set('nsslapd-ignore-virtual-attrs', 'off') + + topo.standalone.restart() + + log.info("Check the default value of attribute nsslapd-ignore-virtual-attrs should be OFF") + assert topo.standalone.config.present('nsslapd-ignore-virtual-attrs', 'off') + +@pytest.mark.bz918694 +@pytest.mark.ds408 +def test_ndn_cache_enabled(topo): + """Test nsslapd-ignore-virtual-attrs configuration attribute + + :id: 2caa3ec0-cd05-458e-9e21-3b73cf4697ff + :setup: Standalone instance + :steps: + 1. Check the attribute nsslapd-ndn-cache-enabled is present in cn=config + 2. Check the attribute nsslapd-ndn-cache-enabled has the default value set as ON + 3. Check the attribute nsslapd-ndn-cache-max-size is present in cn=config + 4. Check the backend monitor output for Normalized DN cache statistics while nsslapd-ndn-cache-enabled is OFF + 5. Set nsslapd-ndn-cache-enabled ON and check the backend monitor output for Normalized DN cache statistics + 6. Set invalid value for nsslapd-ndn-cache-enabled + 7. Set invalid value for nsslapd-ndn-cache-max-size + :expectedresults: + 1. This should be successful + 2. This should be successful + 3. This should be successful + 4. Backend monitor output should not have NDN cache statistics + 5. Backend monitor output should have NDN cache statistics + 6. This should fail + 7. This should fail + """ + log.info("Check the attribute nsslapd-ndn-cache-enabled is present in cn=config") + assert topo.standalone.config.present('nsslapd-ndn-cache-enabled') + + log.info("Check the attribute nsslapd-ndn-cache-enabled has the default value set as ON") + assert topo.standalone.config.get_attr_val_utf8('nsslapd-ndn-cache-enabled') == 'on' + + log.info("Check the attribute nsslapd-ndn-cache-max-size is present in cn=config") + assert topo.standalone.config.present('nsslapd-ndn-cache-max-size') + + backends = Backends(topo.standalone) + backend = backends.get(DEFAULT_BENAME) + + log.info("Ticket#49593 : NDN cache stats should be under the global stats - Implemented in 1.4") + log.info("Fetch the monitor value according to the ds version") + if ds_is_older('1.4'): + monitor = backend.get_monitor() + else: + monitor = MonitorLDBM(topo.standalone) + + log.info("Check the backend monitor output for Normalized DN cache statistics, " + "while nsslapd-ndn-cache-enabled is off") + topo.standalone.config.set('nsslapd-ndn-cache-enabled', 'off') + topo.standalone.restart() + assert not monitor.present('normalizedDnCacheHits') + + log.info("Check the backend monitor output for Normalized DN cache statistics, " + "while nsslapd-ndn-cache-enabled is on") + topo.standalone.config.set('nsslapd-ndn-cache-enabled', 'on') + topo.standalone.restart() + assert monitor.present('normalizedDnCacheHits') + + log.info("Set invalid value for nsslapd-ndn-cache-enabled") + with pytest.raises(ldap.OPERATIONS_ERROR): + topo.standalone.config.set('nsslapd-ndn-cache-enabled', 'invalid_value') + + log.info("Set invalid value for nsslapd-ndn-cache-max-size") + with pytest.raises(ldap.OPERATIONS_ERROR): + topo.standalone.config.set('nsslapd-ndn-cache-max-size', 'invalid_value') + + +def test_require_index(topo): + """Test nsslapd-ignore-virtual-attrs configuration attribute + + :id: fb6e31f2-acc2-4e75-a195-5c356faeb803 + :setup: Standalone instance + :steps: + 1. Set "nsslapd-require-index" to "on" + 2. Test an unindexed search is rejected + :expectedresults: + 1. Success + 2. Success + """ + + # Set the config + be_insts = Backends(topo.standalone).list() + for be in be_insts: + if be.get_attr_val_utf8_l('nsslapd-suffix') == DEFAULT_SUFFIX: + be.set('nsslapd-require-index', 'on') + + db_cfg = DatabaseConfig(topo.standalone) + db_cfg.set([('nsslapd-idlistscanlimit', '100')]) + + users = UserAccounts(topo.standalone, DEFAULT_SUFFIX) + for i in range(101): + users.create_test_user(uid=i) + + # Issue unindexed search,a nd make sure it is rejected + raw_objects = DSLdapObjects(topo.standalone, basedn=DEFAULT_SUFFIX) + with pytest.raises(ldap.UNWILLING_TO_PERFORM): + raw_objects.filter("(description=test*)") + + + +@pytest.mark.skipif(ds_is_older('1.4.2'), reason="The config setting only exists in 1.4.2 and higher") +def test_require_internal_index(topo): + """Test nsslapd-ignore-virtual-attrs configuration attribute + + :id: 22b94f30-59e3-4f27-89a1-c4f4be036f7f + :setup: Standalone instance + :steps: + 1. Set "nsslapd-require-internalop-index" to "on" + 2. Enable RI plugin, and configure it to use an attribute that is not indexed + 3. Create a user and add it a group + 4. Deleting user should be rejected as the RI plugin issues an unindexed internal search + :expectedresults: + 1. Success + 2. Success + 3. Success + 4. Success + """ + # Set the config + be_insts = Backends(topo.standalone).list() + for be in be_insts: + if be.get_attr_val_utf8_l('nsslapd-suffix') == DEFAULT_SUFFIX: + be.set('nsslapd-require-index', 'off') + be.set('nsslapd-require-internalop-index', 'on') + + # Configure RI plugin + rip = ReferentialIntegrityPlugin(topo.standalone) + rip.set('referint-membership-attr', 'description') + rip.enable() + + # Create a bunch of users + db_cfg = DatabaseConfig(topo.standalone) + db_cfg.set([('nsslapd-idlistscanlimit', '100')]) + users = UserAccounts(topo.standalone, DEFAULT_SUFFIX) + for i in range(102, 202): + users.create_test_user(uid=i) + + # Create user and group + user = users.create(properties={ + 'uid': 'indexuser', + 'cn' : 'indexuser', + 'sn' : 'user', + 'uidNumber' : '1010', + 'gidNumber' : '2010', + 'homeDirectory' : '/home/indexuser' + }) + groups = Groups(topo.standalone, DEFAULT_SUFFIX) + group = groups.create(properties={'cn': 'group', + 'member': user.dn}) + + # Restart the server + topo.standalone.restart() + + # Deletion of user should be rejected + with pytest.raises(ldap.UNWILLING_TO_PERFORM): + user.delete() + + +def get_pstack(pid): + """Get a pstack of the pid.""" + res = subprocess.run((PSTACK_CMD, str(pid)), stdout=subprocess.PIPE, + stderr=subprocess.STDOUT, encoding='utf-8') + return str(res.stdout) + +def check_number_of_threads(cfgnbthreads, monitor, pid): + monresults = monitor.get_threads() + # Add waitingthreads and busythreads + waiting = int(monresults[3][0]) + busy = int(monresults[4][0]) + log.info('Number of threads: configured={cfgnbthreads} waiting={waiting} busy={busy}') + + monnbthreads = int(monresults[3][0]) + int(monresults[4][0]); + assert monnbthreads == cfgnbthreads + if os.path.isfile(PSTACK_CMD): + pstackresult = get_pstack(pid) + assert pstackresult.count('connection_threadmain') == cfgnbthreads + else: + log.info('pstack is not installed ==> skipping pstack test.') + +def test_changing_threadnumber(topo): + """Test nsslapd-ignore-virtual-attrs configuration attribute + + :id: 11bcf426-061c-11ee-8c22-482ae39447e5 + :setup: Standalone instance + :steps: + 1. Check that feature is supported + 2 Get nsslapd-threadnumber original value + 3. Change nsslapd-threadnumber to 40 + 4. Check that monitoring and pstack shows the same number than configured number of threads + 5. Create a user and add it a group + 6. Change nsslapd-threadnumber to 10 + 7. Check that monitoring and pstack shows the same number than configured number of threads + 8. Set back the number of threads to the original value + 9. Check that monitoring and pstack shows the same number than configured number of threads + :expectedresults: + 1. Skip the test if monitoring result does not have the new attributes. + 2. Success + 3. Success + 4. Success + 5. Success + 6. Success + 7. Success + 8. Success + 9. Success + """ + inst = topo.standalone + pid = pid_from_file(inst.pid_file()) + assert pid != 0 and pid != None + + config = Config(inst) + cfgattr = 'nsslapd-threadnumber' + cfgnbthreads = config.get_attr_vals_utf8(cfgattr)[0] + + monitor = Monitor(inst) + monresults = monitor.get_threads() + if len(monresults) < 5: + pytest.skip("This version does not support dynamic change of nsslapd-threadnumber without restart.") + + config.replace(cfgattr, '40'); + time.sleep(3) + check_number_of_threads(40, monitor, pid) + + config.replace(cfgattr, '10'); + # No need to wait here (threads are closed before config change result is returned) + check_number_of_threads(10, monitor, pid) + + config.replace(cfgattr, cfgnbthreads); + time.sleep(3) + check_number_of_threads(int(cfgnbthreads), monitor, pid) + + +@pytest.fixture(scope="module") +def create_lmdb_instance(request): + verbose = log.level > logging.DEBUG + instname = 'i_lmdb' + assert SetupDs(verbose=True, log=log).create_from_dict( { + 'general' : {}, + 'slapd' : { + 'instance_name': instname, + 'db_lib': 'mdb', + 'mdb_max_size': '0.5 Gb', + }, + 'backend-userroot': { + 'sample_entries': 'yes', + 'suffix': DEFAULT_SUFFIX, + }, + } ) + inst = DirSrv(verbose=verbose, external_log=log) + inst.local_simple_allocate(instname, binddn=DN_DM, password=PW_DM) + inst.setup_ldapi() + + def fin(): + inst.delete() + + request.addfinalizer(fin) + inst.open() + return inst + + +def set_and_check(inst, db_config, dsconf_attr, ldap_attr, val): + val = str(val) + args = FakeArgs() + setattr(args, dsconf_attr, val) + db_config_set(inst, db_config.dn, log, args) + cfg_vals = db_config.get() + assert ldap_attr in cfg_vals + assert cfg_vals[ldap_attr][0] == val + + +def test_lmdb_config(create_lmdb_instance): + """Test nsslapd-ignore-virtual-attrs configuration attribute + + :id: bca28086-61cf-11ee-a064-482ae39447e5 + :setup: Custom instance named 'i_lmdb' having db_lib=mdb and lmdb_size=0.5 + :steps: + 1. Get dscreate create-template output + 2. Check that 'db_lib' is in output + 3. Check that 'lmdb_size' is in output + 4. Get the database config + 5. Check that nsslapd-backend-implement is mdb + 6. Check that nsslapd-mdb-max-size is 536870912 (i.e 0.5Gb) + 7. Set a value for nsslapd-mdb-max-size and test the value is properly set + 8. Set a value for nsslapd-mdb-max-readers and test the value is properly set + 9. Set a value for nsslapd-mdb-max-dbs and test the value is properly set + :expectedresults: + 1. Success + 2. Success + 3. Success + 4. Success + 5. Success + 6. Success + 7. Success + 8. Success + 9. Success + """ + + res = subprocess.run(('dscreate', 'create-template'), stdout=subprocess.PIPE, + stderr=subprocess.STDOUT, encoding='utf-8') + inst = create_lmdb_instance + assert 'db_lib' in res.stdout + assert 'mdb_max_size' in res.stdout + db_config = DatabaseConfig(inst) + cfg_vals = db_config.get() + assert 'nsslapd-backend-implement' in cfg_vals + assert cfg_vals['nsslapd-backend-implement'][0] == 'mdb' + assert 'nsslapd-mdb-max-size' in cfg_vals + assert cfg_vals['nsslapd-mdb-max-size'][0] == '536870912' + set_and_check(inst, db_config, 'mdb_max_size', 'nsslapd-mdb-max-size', parse_size('2G')) + set_and_check(inst, db_config, 'mdb_max_readers', 'nsslapd-mdb-max-readers', 200) + set_and_check(inst, db_config, 'mdb_max_dbs', 'nsslapd-mdb-max-dbs', 200) + + +if __name__ == '__main__': + # Run isolated + # -s for DEBUG mode + CURRENT_FILE = os.path.realpath(__file__) + pytest.main("-s %s" % CURRENT_FILE) + + diff --git a/dirsrvtests/tests/suites/config/regression_test.py b/dirsrvtests/tests/suites/config/regression_test.py new file mode 100644 index 0000000..0000dd8 --- /dev/null +++ b/dirsrvtests/tests/suites/config/regression_test.py @@ -0,0 +1,114 @@ +# --- BEGIN COPYRIGHT BLOCK --- +# Copyright (C) 2017 Red Hat, Inc. +# All rights reserved. +# +# License: GPL (version 3 or any later version). +# See LICENSE for details. +# --- END COPYRIGHT BLOCK --- +# +import logging +import pytest +from lib389.utils import * +from lib389.dseldif import DSEldif +from lib389.config import LDBMConfig +from lib389.backend import Backends +from lib389.topologies import topology_st as topo + +pytestmark = pytest.mark.tier0 + +logging.getLogger(__name__).setLevel(logging.INFO) +log = logging.getLogger(__name__) + +CUSTOM_MEM = '9100100100' + + +# Function to return value of available memory in kb +def get_available_memory(): + with open('/proc/meminfo') as file: + for line in file: + if 'MemAvailable' in line: + free_mem_in_kb = line.split()[1] + return int(free_mem_in_kb) + + +@pytest.mark.skipif(get_available_memory() < (int(CUSTOM_MEM)/1024), reason="available memory is too low") +@pytest.mark.bz1627512 +@pytest.mark.ds49618 +def test_set_cachememsize_to_custom_value(topo): + """Test if value nsslapd-cachememsize remains set + at the custom setting of value above 3805132804 bytes + after changing the value to 9100100100 bytes + + :id: 8a3efc00-65a9-4ee7-b8ee-e35840991ea9 + :setup: Standalone Instance + :steps: + 1. Disable in the cn=config,cn=ldbm database,cn=plugins,cn=config: + nsslapd-cache-autosize by setting it to 0 + 2. Disable in the cn=config,cn=ldbm database,cn=plugins,cn=config: + nsslapd-cache-autosize-split by setting it to 0 + 3. Restart the instance + 4. Set in the cn=UserRoot,cn=ldbm database,cn=plugins,cn=config: + nsslapd-cachememsize: CUSTOM_MEM + :expectedresults: + 1. nsslapd-cache-autosize is successfully disabled + 2. nsslapd-cache-autosize-split is successfully disabled + 3. The instance should be successfully restarted + 4. nsslapd-cachememsize is successfully set + """ + + config_ldbm = LDBMConfig(topo.standalone) + backends = Backends(topo.standalone) + userroot_ldbm = backends.get("userroot") + + log.info("Disabling nsslapd-cache-autosize by setting it to 0") + assert config_ldbm.set('nsslapd-cache-autosize', '0') + + log.info("Disabling nsslapd-cache-autosize-split by setting it to 0") + assert config_ldbm.set('nsslapd-cache-autosize-split', '0') + + log.info("Restarting instance") + topo.standalone.restart() + log.info("Instance restarted successfully") + + log.info("Set nsslapd-cachememsize to value {}".format(CUSTOM_MEM)) + assert userroot_ldbm.set('nsslapd-cachememsize', CUSTOM_MEM) + + +def test_maxbersize_repl(topo): + """Check that instance starts when nsslapd-errorlog-maxlogsize + nsslapd-errorlog-logmaxdiskspace are set in certain order + + :id: 743e912c-2be4-4f5f-9c2a-93dcb18f51a0 + :setup: MMR with two suppliers + :steps: + 1. Stop the instance + 2. Set nsslapd-errorlog-maxlogsize before/after + nsslapd-errorlog-logmaxdiskspace + 3. Start the instance + 4. Check the error log for errors + :expectedresults: + 1. Success + 2. Success + 3. Success + 4. The error log should contain no errors + """ + + inst = topo.standalone + dse_ldif = DSEldif(inst) + + inst.stop() + log.info("Set nsslapd-errorlog-maxlogsize before nsslapd-errorlog-logmaxdiskspace") + dse_ldif.replace('cn=config', 'nsslapd-errorlog-maxlogsize', '300') + dse_ldif.replace('cn=config', 'nsslapd-errorlog-logmaxdiskspace', '500') + inst.start() + log.info("Assert no init_dse_file errors in the error log") + assert not inst.ds_error_log.match('.*ERR - init_dse_file.*') + + inst.stop() + log.info("Set nsslapd-errorlog-maxlogsize after nsslapd-errorlog-logmaxdiskspace") + dse_ldif.replace('cn=config', 'nsslapd-errorlog-logmaxdiskspace', '500') + dse_ldif.replace('cn=config', 'nsslapd-errorlog-maxlogsize', '300') + inst.start() + log.info("Assert no init_dse_file errors in the error log") + assert not inst.ds_error_log.match('.*ERR - init_dse_file.*') + diff --git a/dirsrvtests/tests/suites/config/removed_config_49298_test.py b/dirsrvtests/tests/suites/config/removed_config_49298_test.py new file mode 100644 index 0000000..7b585b4 --- /dev/null +++ b/dirsrvtests/tests/suites/config/removed_config_49298_test.py @@ -0,0 +1,90 @@ +# --- BEGIN COPYRIGHT BLOCK --- +# Copyright (C) 2017 Red Hat, Inc. +# All rights reserved. +# +# License: GPL (version 3 or any later version). +# See LICENSE for details. +# --- END COPYRIGHT BLOCK --- +# +import pytest +import os +import logging +import subprocess + +from lib389.topologies import topology_st as topo + +pytestmark = pytest.mark.tier0 + +DEBUGGING = os.getenv("DEBUGGING", default=False) +if DEBUGGING: + logging.getLogger(__name__).setLevel(logging.DEBUG) +else: + logging.getLogger(__name__).setLevel(logging.INFO) +log = logging.getLogger(__name__) + +def test_restore_config(topo): + """ + Check that if a dse.ldif and backup are removed, that the server still starts. + + :id: e1c38fa7-30bc-46f2-a934-f8336f387581 + :setup: Standalone instance + :steps: + 1. Stop the instance + 2. Delete 'dse.ldif' + 3. Start the instance + :expectedresults: + 1. Steps 1 and 2 succeed. + 2. Server will succeed to start with restored cfg. + """ + topo.standalone.stop() + + dse_path = topo.standalone.get_config_dir() + + log.info(dse_path) + + for i in ('dse.ldif', 'dse.ldif.startOK'): + p = os.path.join(dse_path, i) + d = os.path.join(dse_path, i + '.49298') + os.rename(p, d) + + # This will pass. + topo.standalone.start() + +def test_removed_config(topo): + """ + Check that if a dse.ldif and backup are removed, that the server + exits better than "segfault". + + :id: b45272d1-c197-473e-872f-07257fcb2ec0 + :setup: Standalone instance + :steps: + 1. Stop the instance + 2. Delete 'dse.ldif', 'dse.ldif.bak', 'dse.ldif.startOK' + 3. Start the instance + :expectedresults: + 1. Steps 1 and 2 succeed. + 2. Server will fail to start, but will not crash. + """ + topo.standalone.stop() + + dse_path = topo.standalone.get_config_dir() + + log.info(dse_path) + + for i in ('dse.ldif', 'dse.ldif.bak', 'dse.ldif.startOK'): + p = os.path.join(dse_path, i) + d = os.path.join(dse_path, i + '.49298') + os.rename(p, d) + + # We actually can't check the log output, because it can't read dse.ldif, + # don't know where to write it yet! All we want is the server fail to + # start here, rather than infinite run + segfault. + with pytest.raises(subprocess.CalledProcessError): + topo.standalone.start() + + # Restore the files so that setup-ds.l can work + for i in ('dse.ldif', 'dse.ldif.bak', 'dse.ldif.startOK'): + p = os.path.join(dse_path, i) + d = os.path.join(dse_path, i + '.49298') + os.rename(d, p) + diff --git a/dirsrvtests/tests/suites/cos/__init__.py b/dirsrvtests/tests/suites/cos/__init__.py new file mode 100644 index 0000000..b16a278 --- /dev/null +++ b/dirsrvtests/tests/suites/cos/__init__.py @@ -0,0 +1,3 @@ +""" + :Requirement: 389-ds-base: Class of Service +""" diff --git a/dirsrvtests/tests/suites/cos/cos_test.py b/dirsrvtests/tests/suites/cos/cos_test.py new file mode 100644 index 0000000..f9bd79c --- /dev/null +++ b/dirsrvtests/tests/suites/cos/cos_test.py @@ -0,0 +1,149 @@ +# --- BEGIN COPYRIGHT BLOCK --- +# Copyright (C) 2019 Red Hat, Inc. +# All rights reserved. +# +# License: GPL (version 3 or any later version). +# See LICENSE for details. +# --- END COPYRIGHT BLOCK --- + +import logging +import time +import pytest, os, ldap +from lib389.cos import CosClassicDefinition, CosClassicDefinitions, CosTemplate +from lib389._constants import DEFAULT_SUFFIX +from lib389.topologies import topology_st as topo +from lib389.idm.role import FilteredRoles +from lib389.idm.nscontainer import nsContainer +from lib389.idm.user import UserAccount + +logging.getLogger(__name__).setLevel(logging.INFO) +log = logging.getLogger(__name__) + +pytestmark = pytest.mark.tier1 +@pytest.fixture(scope="function") +def reset_ignore_vattr(topo, request): + default_ignore_vattr_value = topo.standalone.config.get_attr_val_utf8('nsslapd-ignore-virtual-attrs') + def fin(): + topo.standalone.config.set('nsslapd-ignore-virtual-attrs', default_ignore_vattr_value) + + request.addfinalizer(fin) + +def test_positive(topo, reset_ignore_vattr): + """CoS positive tests + + :id: a5a74235-597f-4fe8-8c38-826860927472 + :setup: server + :steps: + 1. Add filter role entry + 2. Add ns container + 3. Add cos template + 4. Add CosClassic Definition + 5. Cos entries should be added and searchable + 6. employeeType attribute should be there in user entry as per the cos plugin property + :expectedresults: + 1. Operation should success + 2. Operation should success + 3. Operation should success + 4. Operation should success + 5. Operation should success + 6. Operation should success + """ + # Adding ns filter role + roles = FilteredRoles(topo.standalone, DEFAULT_SUFFIX) + roles.create(properties={'cn': 'FILTERROLEENGROLE', + 'nsRoleFilter': 'cn=eng*'}) + # adding ns container + nsContainer(topo.standalone,'cn=cosClassicGenerateEmployeeTypeUsingnsroleTemplates,{}'.format(DEFAULT_SUFFIX))\ + .create(properties={'cn': 'cosTemplates'}) + + # creating cos template + properties = {'employeeType': 'EngType', + 'cn': '"cn=filterRoleEngRole,dc=example,dc=com",cn=cosClassicGenerateEmployeeTypeUsingnsroleTemplates,dc=example,dc=com' + } + CosTemplate(topo.standalone, 'cn="cn=filterRoleEngRole,dc=example,dc=com",cn=cosClassicGenerateEmployeeTypeUsingnsroleTemplates,{}'.format(DEFAULT_SUFFIX))\ + .create(properties=properties) + + # creating CosClassicDefinition + properties = {'cosTemplateDn': 'cn=cosClassicGenerateEmployeeTypeUsingnsroleTemplates,{}'.format(DEFAULT_SUFFIX), + 'cosAttribute': 'employeeType', + 'cosSpecifier': 'nsrole', + 'cn': 'cosClassicGenerateEmployeeTypeUsingnsrole'} + CosClassicDefinition(topo.standalone,'cn=cosClassicGenerateEmployeeTypeUsingnsrole,{}'.format(DEFAULT_SUFFIX))\ + .create(properties=properties) + + # Adding User entry + properties = { + 'uid': 'enguser1', + 'cn': 'enguser1', + 'sn': 'user', + 'uidNumber': '1000', + 'gidNumber': '2000', + 'homeDirectory': '/home/' + 'enguser1' + } + user = UserAccount(topo.standalone, 'cn=enguser1,{}'.format(DEFAULT_SUFFIX)) + user.create(properties=properties) + + # Asserting Cos should be added and searchable + cosdef = CosClassicDefinitions(topo.standalone, DEFAULT_SUFFIX).get('cosClassicGenerateEmployeeTypeUsingnsrole') + assert cosdef.dn == 'cn=cosClassicGenerateEmployeeTypeUsingnsrole,dc=example,dc=com' + assert cosdef.get_attr_val_utf8('cn') == 'cosClassicGenerateEmployeeTypeUsingnsrole' + + # CoS definition entry's cosSpecifier attribute specifies the employeeType attribute + assert user.present('employeeType') + cosdef.delete() + +def test_vattr_on_cos_definition(topo, reset_ignore_vattr): + """Test nsslapd-ignore-virtual-attrs configuration attribute + The attribute is ON by default. If a cos definition is + added it is moved to OFF + + :id: e7ef5254-386f-4362-bbb4-9409f3f51b08 + :customerscenario: True + :setup: Standalone instance + :steps: + 1. Check the attribute nsslapd-ignore-virtual-attrs is present in cn=config + 2. Check the default value of attribute nsslapd-ignore-virtual-attrs should be ON + 3. Create a cos definition for employeeType + 4. Check the value of nsslapd-ignore-virtual-attrs should be OFF (with a delay for postop processing) + 5. Check a message "slapi_vattrspi_regattr - Because employeeType,.." in error logs + 6. Check after deleting cos definition value of attribute nsslapd-ignore-virtual-attrs is set back to ON + :expectedresults: + 1. This should be successful + 2. This should be successful + 3. This should be successful + 4. This should be successful + 5. This should be successful + 6. This should be successful + """ + + log.info("Check the attribute nsslapd-ignore-virtual-attrs is present in cn=config") + assert topo.standalone.config.present('nsslapd-ignore-virtual-attrs') + + log.info("Check the default value of attribute nsslapd-ignore-virtual-attrs should be ON") + assert topo.standalone.config.get_attr_val_utf8('nsslapd-ignore-virtual-attrs') == "on" + + # creating CosClassicDefinition + log.info("Create a cos definition") + properties = {'cosTemplateDn': 'cn=cosClassicGenerateEmployeeTypeUsingnsroleTemplates,{}'.format(DEFAULT_SUFFIX), + 'cosAttribute': 'employeeType', + 'cosSpecifier': 'nsrole', + 'cn': 'cosClassicGenerateEmployeeTypeUsingnsrole'} + cosdef = CosClassicDefinition(topo.standalone,'cn=cosClassicGenerateEmployeeTypeUsingnsrole,{}'.format(DEFAULT_SUFFIX))\ + .create(properties=properties) + + log.info("Check the default value of attribute nsslapd-ignore-virtual-attrs should be OFF") + time.sleep(2) + assert topo.standalone.config.present('nsslapd-ignore-virtual-attrs', 'off') + + topo.standalone.stop() + assert topo.standalone.searchErrorsLog("slapi_vattrspi_regattr - Because employeeType is a new registered virtual attribute , nsslapd-ignore-virtual-attrs was set to \'off\'") + topo.standalone.start() + log.info("Delete a cos definition") + cosdef.delete() + log.info("Check the default value of attribute nsslapd-ignore-virtual-attrs is back to ON") + topo.standalone.restart() + assert topo.standalone.config.get_attr_val_utf8('nsslapd-ignore-virtual-attrs') == "on" + +if __name__ == "__main__": + CURRENT_FILE = os.path.realpath(__file__) + pytest.main("-s -v %s" % CURRENT_FILE) diff --git a/dirsrvtests/tests/suites/cos/indirect_cos_test.py b/dirsrvtests/tests/suites/cos/indirect_cos_test.py new file mode 100644 index 0000000..62f0071 --- /dev/null +++ b/dirsrvtests/tests/suites/cos/indirect_cos_test.py @@ -0,0 +1,179 @@ +# --- BEGIN COPYRIGHT BLOCK --- +# Copyright (C) 2020 Red Hat, Inc. +# All rights reserved. +# +# License: GPL (version 3 or any later version). +# See LICENSE for details. +# --- END COPYRIGHT BLOCK --- + +import logging +import pytest +import os +import ldap +import time +import subprocess + +from lib389 import Entry +from lib389.idm.user import UserAccounts +from lib389.idm.domain import Domain +from lib389.topologies import topology_st as topo +from lib389._constants import (DEFAULT_SUFFIX, DN_DM, PASSWORD, HOST_STANDALONE, + SERVERID_STANDALONE, PORT_STANDALONE) + +pytestmark = pytest.mark.tier1 + +DEBUGGING = os.getenv("DEBUGGING", default=False) +if DEBUGGING: + logging.getLogger(__name__).setLevel(logging.DEBUG) +else: + logging.getLogger(__name__).setLevel(logging.INFO) +log = logging.getLogger(__name__) + +TEST_USER_DN = "uid=test_user,ou=people,dc=example,dc=com" +OU_PEOPLE = 'ou=people,{}'.format(DEFAULT_SUFFIX) + +PW_POLICY_CONT_PEOPLE = 'cn="cn=nsPwPolicyEntry,' \ + 'ou=people,dc=example,dc=com",' \ + 'cn=nsPwPolicyContainer,ou=people,dc=example,dc=com' + + +def check_user(inst): + """Search the test user and make sure it has the expected attrs + """ + try: + entries = inst.search_s('dc=example,dc=com', ldap.SCOPE_SUBTREE, "uid=test_user") + log.debug('user: \n' + str(entries[0])) + assert entries[0].hasAttr('ou'), "Entry is missing ou cos attribute" + assert entries[0].hasAttr('x-department'), "Entry is missing description cos attribute" + assert entries[0].hasAttr('x-en-ou'), "Entry is missing givenname cos attribute" + except ldap.LDAPError as e: + log.fatal('Failed to search for user: ' + str(e)) + raise e + + +def setup_subtree_policy(topo): + """Set up subtree password policy + """ + + topo.standalone.config.set('nsslapd-pwpolicy-local', 'on') + + log.info('Create password policy for subtree {}'.format(OU_PEOPLE)) + try: + subprocess.call(['%s/dsconf' % topo.standalone.get_sbin_dir(), + 'slapd-standalone1', + 'localpwp', + 'addsubtree', + OU_PEOPLE]) + + except subprocess.CalledProcessError as e: + log.error('Failed to create pw policy policy for {}: error {}'.format( + OU_PEOPLE, e.message['desc'])) + raise e + + domain = Domain(topo.standalone, DEFAULT_SUFFIX) + domain.replace('pwdpolicysubentry', PW_POLICY_CONT_PEOPLE) + + time.sleep(1) + + +def setup_indirect_cos(topo): + """Setup indirect COS definition and template + """ + cosDef = Entry(('cn=cosDefinition,dc=example,dc=com', + {'objectclass': ['top', 'ldapsubentry', + 'cossuperdefinition', + 'cosIndirectDefinition'], + 'cosAttribute': ['ou merge-schemes', + 'x-department merge-schemes', + 'x-en-ou merge-schemes'], + 'cosIndirectSpecifier': 'seeAlso', + 'cn': 'cosDefinition'})) + + cosTemplate = Entry(('cn=cosTemplate,dc=example,dc=com', + {'objectclass': ['top', + 'extensibleObject', + 'cosTemplate'], + 'ou': 'My COS Org', + 'x-department': 'My COS x-department', + 'x-en-ou': 'my COS x-en-ou', + 'cn': 'cosTemplate'})) + try: + topo.standalone.add_s(cosDef) + topo.standalone.add_s(cosTemplate) + except ldap.LDAPError as e: + log.fatal('Failed to add cos: error ' + str(e)) + raise e + time.sleep(1) + + +@pytest.fixture(scope="module") +def setup(topo, request): + """Add schema, and test user + """ + log.info('Add custom schema...') + try: + ATTR_1 = (b"( 1.3.6.1.4.1.409.389.2.189 NAME 'x-department' SYNTAX 1.3.6.1.4.1.1466.115.121.1.15 X-ORIGIN 'user defined' )") + ATTR_2 = (b"( 1.3.6.1.4.1.409.389.2.187 NAME 'x-en-ou' SYNTAX 1.3.6.1.4.1.1466.115.121.1.15 X-ORIGIN 'user defined' )") + OC = (b"( xPerson-oid NAME 'xPerson' DESC '' SUP person STRUCTURAL MAY ( x-department $ x-en-ou ) X-ORIGIN 'user defined' )") + topo.standalone.modify_s("cn=schema", [(ldap.MOD_ADD, 'attributeTypes', ATTR_1), + (ldap.MOD_ADD, 'attributeTypes', ATTR_2), + (ldap.MOD_ADD, 'objectClasses', OC)]) + except ldap.LDAPError as e: + log.fatal('Failed to add custom schema') + raise e + time.sleep(1) + + log.info('Add test user...') + users = UserAccounts(topo.standalone, DEFAULT_SUFFIX) + + user_properties = { + 'uid': 'test_user', + 'cn': 'test user', + 'sn': 'user', + 'uidNumber': '1000', + 'gidNumber': '2000', + 'homeDirectory': '/home/test_user', + 'seeAlso': 'cn=cosTemplate,dc=example,dc=com' + } + user = users.create(properties=user_properties) + + user.add('objectClass', 'xPerson') + + # Setup COS + log.info("Setup indirect COS...") + setup_indirect_cos(topo) + + +def test_indirect_cos(topo, setup): + """Test indirect cos + + :id: 890d5929-7d52-4a56-956e-129611b4649a + :setup: standalone + :steps: + 1. Test cos is working for test user + 2. Add subtree password policy + 3. Test cos is working for test user + :expectedresults: + 1. User has expected cos attrs + 2. Substree password policy setup is successful + 3. User still has expected cos attrs + """ + + # Step 1 - Search user and see if the COS attrs are included + log.info('Checking user...') + check_user(topo.standalone) + + # Step 2 - Add subtree password policy (Second COS - operational attribute) + setup_subtree_policy(topo) + + # Step 3 - Check user again now hat we have a mix of vattrs + log.info('Checking user...') + check_user(topo.standalone) + + +if __name__ == '__main__': + # Run isolated + # -s for DEBUG mode + CURRENT_FILE = os.path.realpath(__file__) + pytest.main("-s %s" % CURRENT_FILE) + diff --git a/dirsrvtests/tests/suites/disk_monitoring/__init__.py b/dirsrvtests/tests/suites/disk_monitoring/__init__.py new file mode 100644 index 0000000..2257178 --- /dev/null +++ b/dirsrvtests/tests/suites/disk_monitoring/__init__.py @@ -0,0 +1,3 @@ +""" + :Requirement: 389-ds-base: Disk Monitoring +""" diff --git a/dirsrvtests/tests/suites/disk_monitoring/disk_monitoring_divide_test.py b/dirsrvtests/tests/suites/disk_monitoring/disk_monitoring_divide_test.py new file mode 100644 index 0000000..ec4fa18 --- /dev/null +++ b/dirsrvtests/tests/suites/disk_monitoring/disk_monitoring_divide_test.py @@ -0,0 +1,104 @@ +# --- BEGIN COPYRIGHT BLOCK --- +# Copyright (C) 2021 Red Hat, Inc. +# All rights reserved. +# +# License: GPL (version 3 or any later version). +# See LICENSE for details. +# --- END COPYRIGHT BLOCK --- +# +import time +import subprocess +import pytest +from lib389.tasks import * +from lib389.utils import * +from lib389.topologies import topology_st +from lib389._mapped_object import DSLdapObjects + +pytestmark = pytest.mark.tier2 +disk_monitoring_ack = pytest.mark.skipif(not os.environ.get('DISK_MONITORING_ACK', False), reason="Disk monitoring tests may damage system configuration.") + +logging.getLogger(__name__).setLevel(logging.DEBUG) +log = logging.getLogger(__name__) + + +@pytest.fixture(scope="function") +def create_dummy_mount(topology_st, request): + cmds = ['setenforce 0', + 'mkdir /var/log/dirsrv/slapd-{}/tmp'.format(topology_st.standalone.serverid), + 'mount -t tmpfs tmpfs /var/log/dirsrv/slapd-{}/tmp -o size=0'.format(topology_st.standalone.serverid), + 'chown dirsrv: /var/log/dirsrv/slapd-{}/tmp'.format(topology_st.standalone.serverid)] + + log.info('Create dummy mount') + for cmd in cmds: + log.info('Command used : %s' % cmd) + subprocess.Popen(cmd, shell=True) + + def fin(): + cmds = ['umount /var/log/dirsrv/slapd-{}/tmp'.format(topology_st.standalone.serverid), + 'setenforce 1'] + + for cmd in cmds: + log.info('Command used : %s' % cmds) + subprocess.Popen(cmd, shell=True) + + request.addfinalizer(fin) + + +@pytest.fixture(scope="function") +def change_config(topology_st): + topology_st.standalone.config.set('nsslapd-disk-monitoring', 'on') + topology_st.standalone.config.set('nsslapd-disk-monitoring-readonly-on-threshold', 'on') + + +@pytest.mark.ds4414 +@pytest.mark.bz1890118 +@pytest.mark.skipif(ds_is_older("1.4.3.16"), reason="Might fail because of bz1890118") +@disk_monitoring_ack +def test_produce_division_by_zero(topology_st, create_dummy_mount, change_config): + """Test dirsrv will not crash when division by zero occurs + + :id: 51b11093-8851-41bd-86cb-217b1a3339c7 + :customerscenario: True + :setup: Standalone + :steps: + 1. Turn on disk monitoring + 2. Go below the threshold + 3. Check DS is up and not entering shutdown mode + :expectedresults: + 1. Success + 2. Success + 3. Success + """ + + standalone = topology_st.standalone + + log.info('Check search works before changing the nsslapd-auditlog attribute') + try: + DSLdapObjects(topology_st.standalone, basedn='cn=disk space,cn=monitor').filter("(objectclass=*)", scope=0) + except ldap.SERVER_DOWN as e: + log.info('Test failed - dirsrv crashed') + assert False + + log.info('Change location of nsslapd-auditlog') + standalone.config.set('nsslapd-auditlog', '/var/log/dirsrv/slapd-{}/tmp/audit'.format(standalone.serverid)) + + log.info('Check search will not fail') + try: + DSLdapObjects(topology_st.standalone, basedn='cn=disk space,cn=monitor').filter("(objectclass=*)", scope=0) + except ldap.SERVER_DOWN as e: + log.info('Test failed - dirsrv crashed') + assert False + + log.info('If passed, run search again just in case') + try: + DSLdapObjects(topology_st.standalone, basedn='cn=disk space,cn=monitor').filter("(objectclass=*)", scope=0) + except ldap.SERVER_DOWN as e: + log.info('Test failed - dirsrv crashed') + assert False + + +if __name__ == '__main__': + # Run isolated + # -s for DEBUG mode + CURRENT_FILE = os.path.realpath(__file__) + pytest.main("-s %s" % CURRENT_FILE) diff --git a/dirsrvtests/tests/suites/disk_monitoring/disk_monitoring_test.py b/dirsrvtests/tests/suites/disk_monitoring/disk_monitoring_test.py new file mode 100644 index 0000000..78d7dd7 --- /dev/null +++ b/dirsrvtests/tests/suites/disk_monitoring/disk_monitoring_test.py @@ -0,0 +1,773 @@ +# --- BEGIN COPYRIGHT BLOCK --- +# Copyright (C) 2018 Red Hat, Inc. +# All rights reserved. +# +# License: GPL (version 3 or any later version). +# See LICENSE for details. +# --- END COPYRIGHT BLOCK --- + + +import os +import subprocess +import re +import time +import pytest +from lib389.tasks import * +from lib389._constants import * +from lib389.utils import ensure_bytes +from lib389.backend import Backends +from lib389.topologies import topology_st as topo +from lib389.paths import * +from lib389.idm.user import UserAccounts + +pytestmark = pytest.mark.tier2 +disk_monitoring_ack = pytest.mark.skipif(not os.environ.get('DISK_MONITORING_ACK', False), reason="Disk monitoring tests may damage system configuration.") + +THRESHOLD = '30' +THRESHOLD_BYTES = '30000000' + + +def _withouterrorlog(topo, condition, maxtimesleep): + timecount = 0 + while eval(condition): + time.sleep(1) + timecount += 1 + if timecount >= maxtimesleep: break + assert not eval(condition) + + +def _witherrorlog(topo, condition, maxtimesleep): + timecount = 0 + with open(topo.standalone.errlog, 'r') as study: study = study.read() + while condition not in study: + time.sleep(1) + timecount += 1 + with open(topo.standalone.errlog, 'r') as study: study = study.read() + if timecount >= maxtimesleep: break + assert condition in study + + +def presetup(topo): + """ + This is function is part of fixture function setup , will setup the environment for this test. + """ + topo.standalone.stop() + if os.path.exists(topo.standalone.ds_paths.log_dir): + subprocess.call(['mount', '-t', 'tmpfs', '-o', 'size=35M', 'tmpfs', topo.standalone.ds_paths.log_dir]) + else: + os.mkdir(topo.standalone.ds_paths.log_dir) + subprocess.call(['mount', '-t', 'tmpfs', '-o', 'size=35M', 'tmpfs', topo.standalone.ds_paths.log_dir]) + subprocess.call('chown {}: -R {}'.format(DEFAULT_USER, topo.standalone.ds_paths.log_dir), shell=True) + subprocess.call('chown {}: -R {}/*'.format(DEFAULT_USER, topo.standalone.ds_paths.log_dir), shell=True) + subprocess.call('restorecon -FvvR {}'.format(topo.standalone.ds_paths.log_dir), shell=True) + topo.standalone.start() + + +def setupthesystem(topo): + """ + This function is part of fixture function setup , will setup the environment for this test. + """ + global TOTAL_SIZE, USED_SIZE, AVAIL_SIZE, HALF_THR_FILL_SIZE, FULL_THR_FILL_SIZE + topo.standalone.start() + topo.standalone.config.set('nsslapd-disk-monitoring-grace-period', '1') + topo.standalone.config.set('nsslapd-accesslog-logbuffering', 'off') + topo.standalone.config.set('nsslapd-disk-monitoring-threshold', ensure_bytes(THRESHOLD_BYTES)) + TOTAL_SIZE = int(re.findall(r'\d+', str(os.statvfs(topo.standalone.ds_paths.log_dir)))[2])*4096/1024/1024 + AVAIL_SIZE = round(int(re.findall(r'\d+', str(os.statvfs(topo.standalone.ds_paths.log_dir)))[3]) * 4096 / 1024 / 1024) + USED_SIZE = TOTAL_SIZE - AVAIL_SIZE + HALF_THR_FILL_SIZE = TOTAL_SIZE - float(THRESHOLD) + 5 - USED_SIZE + FULL_THR_FILL_SIZE = TOTAL_SIZE - 0.5 * float(THRESHOLD) + 5 - USED_SIZE + HALF_THR_FILL_SIZE = round(HALF_THR_FILL_SIZE) + FULL_THR_FILL_SIZE = round(FULL_THR_FILL_SIZE) + topo.standalone.restart() + + +@pytest.fixture(scope="module") +def setup(request, topo): + """ + This is the fixture function , will run before running every test case. + """ + presetup(topo) + setupthesystem(topo) + + def fin(): + topo.standalone.stop() + subprocess.call(['umount', '-fl', topo.standalone.ds_paths.log_dir]) + topo.standalone.start() + + request.addfinalizer(fin) + + +@pytest.fixture(scope="function") +def reset_logs(topo): + """ + Reset the errors log file before the test + """ + open('{}/errors'.format(topo.standalone.ds_paths.log_dir), 'w').close() + + +@disk_monitoring_ack +def test_verify_operation_when_disk_monitoring_is_off(topo, setup, reset_logs): + """Verify operation when Disk monitoring is off + + :id: 73a97536-fe9e-11e8-ba9f-8c16451d917b + :setup: Standalone + :steps: + 1. Turn off disk monitoring + 2. Go below the threshold + 3. Check DS is up and not entering shutdown mode + :expectedresults: + 1. Should Success + 2. Should Success + 3. Should Success + """ + try: + # Turn off disk monitoring + topo.standalone.config.set('nsslapd-disk-monitoring', 'off') + topo.standalone.restart() + # go below the threshold + subprocess.call(['dd', 'if=/dev/zero', 'of={}/foo'.format(topo.standalone.ds_paths.log_dir), 'bs=1M', 'count={}'.format(FULL_THR_FILL_SIZE)]) + subprocess.call(['dd', 'if=/dev/zero', 'of={}/foo1'.format(topo.standalone.ds_paths.log_dir), 'bs=1M', 'count={}'.format(FULL_THR_FILL_SIZE)]) + # Wait for disk monitoring plugin thread to wake up + _withouterrorlog(topo, 'topo.standalone.status() != True', 10) + # Check DS is up and not entering shutdown mode + assert topo.standalone.status() == True + finally: + os.remove('{}/foo'.format(topo.standalone.ds_paths.log_dir)) + os.remove('{}/foo1'.format(topo.standalone.ds_paths.log_dir)) + + +@disk_monitoring_ack +def test_enable_external_libs_debug_log(topo, setup, reset_logs): + """Check that OpenLDAP logs are successfully enabled and disabled when + disk threshold is reached + + :id: 121b2b24-ecba-48e2-9ee2-312d929dc8c6 + :setup: Standalone instance + :steps: 1. Set nsslapd-external-libs-debug-enabled to "on" + 2. Go straight below 1/2 of the threshold + 3. Verify that the external libs debug setting is disabled + 4. Go back above 1/2 of the threshold + 5. Verify that the external libs debug setting is enabled back + :expectedresults: 1. Success + 2. Success + 3. Success + 4. Success + 5. Success + """ + try: + # Verify that verbose logging was set to default level + assert topo.standalone.config.set('nsslapd-disk-monitoring', 'on') + assert topo.standalone.config.set('nsslapd-disk-monitoring-logging-critical', 'off') + assert topo.standalone.config.set('nsslapd-external-libs-debug-enabled', 'on') + assert topo.standalone.config.set('nsslapd-errorlog-level', '8') + topo.standalone.restart() + subprocess.call(['dd', 'if=/dev/zero', 'of={}/foo'.format(topo.standalone.ds_paths.log_dir), 'bs=1M', 'count={}'.format(HALF_THR_FILL_SIZE)]) + # Verify that logging is disabled + _withouterrorlog(topo, "topo.standalone.config.get_attr_val_utf8('nsslapd-external-libs-debug-enabled') != 'off'", 31) + finally: + os.remove('{}/foo'.format(topo.standalone.ds_paths.log_dir)) + _withouterrorlog(topo, "topo.standalone.config.get_attr_val_utf8('nsslapd-external-libs-debug-enabled') != 'on'", 31) + assert topo.standalone.config.set('nsslapd-external-libs-debug-enabled', 'off') + + +@disk_monitoring_ack +def test_free_up_the_disk_space_and_change_ds_config(topo, setup, reset_logs): + """Free up the disk space and change DS config + + :id: 7be4d560-fe9e-11e8-a307-8c16451d917b + :setup: Standalone + :steps: + 1. Enabling Disk Monitoring plugin and setting disk monitoring logging to critical + 2. Verify no message about loglevel is present in the error log + 3. Verify no message about disabling logging is present in the error log + 4. Verify no message about removing rotated logs is present in the error log + :expectedresults: + 1. Should Success + 2. Should Success + 3. Should Success + 4. Should Success + """ + # Enabling Disk Monitoring plugin and setting disk monitoring logging to critical + assert topo.standalone.config.set('nsslapd-disk-monitoring', 'on') + assert topo.standalone.config.set('nsslapd-disk-monitoring-logging-critical', 'on') + assert topo.standalone.config.set('nsslapd-errorlog-level', '8') + topo.standalone.restart() + # Verify no message about loglevel is present in the error log + # Verify no message about disabling logging is present in the error log + # Verify no message about removing rotated logs is present in the error log + with open(topo.standalone.errlog, 'r') as study: study = study.read() + assert 'temporarily setting error loglevel to zero' not in study + assert 'disabling access and audit logging' not in study + assert 'deleting rotated logs' not in study + + +@disk_monitoring_ack +def test_verify_operation_with_nsslapd_disk_monitoring_logging_critical_off(topo, setup, reset_logs): + """Verify operation with "nsslapd-disk-monitoring-logging-critical: off + + :id: 82363bca-fe9e-11e8-9ae7-8c16451d917b + :setup: Standalone + :steps: + 1. Verify that verbose logging was set to default level + 2. Verify that logging is disabled + 3. Verify that rotated logs were not removed + :expectedresults: + 1. Should Success + 2. Should Success + 3. Should Success + """ + try: + # Verify that verbose logging was set to default level + assert topo.standalone.config.set('nsslapd-disk-monitoring', 'on') + assert topo.standalone.config.set('nsslapd-disk-monitoring-logging-critical', 'off') + assert topo.standalone.config.set('nsslapd-errorlog-level', '8') + topo.standalone.restart() + subprocess.call(['dd', 'if=/dev/zero', 'of={}/foo'.format(topo.standalone.ds_paths.log_dir), 'bs=1M', 'count={}'.format(HALF_THR_FILL_SIZE)]) + _witherrorlog(topo, 'temporarily setting error loglevel to the default level', 11) + assert LOG_DEFAULT == int(re.findall(r'nsslapd-errorlog-level: \d+', str( + topo.standalone.search_s('cn=config', ldap.SCOPE_SUBTREE, '(objectclass=*)', ['nsslapd-errorlog-level'])))[ + 0].split(' ')[1]) + # Verify that logging is disabled + _withouterrorlog(topo, "topo.standalone.config.get_attr_val_utf8('nsslapd-accesslog-logging-enabled') != 'off'", 10) + assert topo.standalone.config.get_attr_val_utf8('nsslapd-accesslog-logging-enabled') == 'off' + # Verify that rotated logs were not removed + with open(topo.standalone.errlog, 'r') as study: study = study.read() + assert 'disabling access and audit logging' in study + _witherrorlog(topo, 'deleting rotated logs', 11) + study = open(topo.standalone.errlog).read() + assert "Unable to remove file: {}".format(topo.standalone.ds_paths.log_dir) not in study + assert 'is too far below the threshold' not in study + finally: + os.remove('{}/foo'.format(topo.standalone.ds_paths.log_dir)) + + +@disk_monitoring_ack +def test_operation_with_nsslapd_disk_monitoring_logging_critical_on_below_half_of_the_threshold(topo, setup, reset_logs): + """Verify operation with \"nsslapd-disk-monitoring-logging-critical: on\" below 1/2 of the threshold + Verify recovery + + :id: 8940c502-fe9e-11e8-bcc0-8c16451d917b + :setup: Standalone + :steps: + 1. Verify that DS goes into shutdown mode + 2. Verify that DS exited shutdown mode + :expectedresults: + 1. Should Success + 2. Should Success + """ + assert topo.standalone.config.set('nsslapd-disk-monitoring', 'on') + assert topo.standalone.config.set('nsslapd-disk-monitoring-logging-critical', 'on') + topo.standalone.restart() + # Verify that DS goes into shutdown mode + if float(THRESHOLD) > FULL_THR_FILL_SIZE: + FULL_THR_FILL_SIZE_new = FULL_THR_FILL_SIZE + round(float(THRESHOLD) - FULL_THR_FILL_SIZE) + 1 + subprocess.call(['dd', 'if=/dev/zero', 'of={}/foo'.format(topo.standalone.ds_paths.log_dir), 'bs=1M', 'count={}'.format(FULL_THR_FILL_SIZE_new)]) + else: + subprocess.call(['dd', 'if=/dev/zero', 'of={}/foo'.format(topo.standalone.ds_paths.log_dir), 'bs=1M', 'count={}'.format(FULL_THR_FILL_SIZE)]) + _witherrorlog(topo, 'is too far below the threshold', 20) + os.remove('{}/foo'.format(topo.standalone.ds_paths.log_dir)) + # Verify that DS exited shutdown mode + _witherrorlog(topo, 'Available disk space is now acceptable', 25) + + +@disk_monitoring_ack +def test_setting_nsslapd_disk_monitoring_logging_critical_to_off(topo, setup, reset_logs): + """Setting nsslapd-disk-monitoring-logging-critical to "off" + + :id: 93265ec4-fe9e-11e8-af93-8c16451d917b + :setup: Standalone + :steps: + 1. Setting nsslapd-disk-monitoring-logging-critical to "off" + :expectedresults: + 1. Should Success + """ + assert topo.standalone.config.set('nsslapd-disk-monitoring', 'on') + assert topo.standalone.config.set('nsslapd-disk-monitoring-logging-critical', 'off') + assert topo.standalone.config.set('nsslapd-errorlog-level', '8') + topo.standalone.restart() + assert topo.standalone.status() == True + + +@disk_monitoring_ack +def test_operation_with_nsslapd_disk_monitoring_logging_critical_off(topo, setup, reset_logs): + """Verify operation with nsslapd-disk-monitoring-logging-critical: off + + :id: 97985a52-fe9e-11e8-9914-8c16451d917b + :setup: Standalone + :steps: + 1. Verify that logging is disabled + 2. Verify that rotated logs were removed + 3. Verify that verbose logging was set to default level + 4. Verify that logging is disabled + 5. Verify that rotated logs were removed + :expectedresults: + 1. Should Success + 2. Should Success + 3. Should Success + 4. Should Success + 5. Should Success + """ + # Verify that logging is disabled + try: + assert topo.standalone.config.set('nsslapd-disk-monitoring', 'on') + assert topo.standalone.config.set('nsslapd-disk-monitoring-logging-critical', 'off') + assert topo.standalone.config.set('nsslapd-errorlog-level', '8') + assert topo.standalone.config.set('nsslapd-accesslog-maxlogsize', '1') + assert topo.standalone.config.set('nsslapd-accesslog-logrotationtimeunit', 'minute') + assert topo.standalone.config.set('nsslapd-accesslog-level', '772') + topo.standalone.restart() + # Verify that rotated logs were removed + users = UserAccounts(topo.standalone, DEFAULT_SUFFIX) + for i in range(10): + user_properties = { + 'uid': 'cn=anuj{}'.format(i), + 'cn': 'cn=anuj{}'.format(i), + 'sn': 'cn=anuj{}'.format(i), + 'userPassword': "Itsme123", + 'uidNumber': '1{}'.format(i), + 'gidNumber': '2{}'.format(i), + 'homeDirectory': '/home/{}'.format(i) + } + users.create(properties=user_properties) + for j in range(100): + for i in [i for i in users.list()]: i.bind('Itsme123') + assert re.findall(r'access.\d+-\d+',str(os.listdir(topo.standalone.ds_paths.log_dir))) + topo.standalone.bind_s(DN_DM, PW_DM) + assert topo.standalone.config.set('nsslapd-accesslog-maxlogsize', '100') + assert topo.standalone.config.set('nsslapd-accesslog-logrotationtimeunit', 'day') + assert topo.standalone.config.set('nsslapd-accesslog-level', '256') + topo.standalone.restart() + subprocess.call(['dd', 'if=/dev/zero', 'of={}/foo2'.format(topo.standalone.ds_paths.log_dir), 'bs=1M', 'count={}'.format(HALF_THR_FILL_SIZE)]) + # Verify that verbose logging was set to default level + _witherrorlog(topo, 'temporarily setting error loglevel to the default level', 10) + assert LOG_DEFAULT == int(re.findall(r'nsslapd-errorlog-level: \d+', str( + topo.standalone.search_s('cn=config', ldap.SCOPE_SUBTREE, '(objectclass=*)', ['nsslapd-errorlog-level'])))[0].split(' ')[1]) + # Verify that logging is disabled + _withouterrorlog(topo, "topo.standalone.config.get_attr_val_utf8('nsslapd-accesslog-logging-enabled') != 'off'", 20) + with open(topo.standalone.errlog, 'r') as study: study = study.read() + assert 'disabling access and audit logging' in study + # Verify that rotated logs were removed + _witherrorlog(topo, 'deleting rotated logs', 10) + with open(topo.standalone.errlog, 'r') as study:study = study.read() + assert 'Unable to remove file:' not in study + assert 'is too far below the threshold' not in study + for i in [i for i in users.list()]: i.delete() + finally: + os.remove('{}/foo2'.format(topo.standalone.ds_paths.log_dir)) + + +@disk_monitoring_ack +def test_operation_with_nsslapd_disk_monitoring_logging_critical_off_below_half_of_the_threshold(topo, setup, reset_logs): + """Verify operation with nsslapd-disk-monitoring-logging-critical: off below 1/2 of the threshold + Verify shutdown + Recovery and setup + + :id: 9d4c7d48-fe9e-11e8-b5d6-8c16451d917b + :setup: Standalone + :steps: + 1. Verify that DS goes into shutdown mode + 2. Verifying that DS has been shut down after the grace period + 3. Verify logging enabled + 4. Create rotated logfile + 5. Enable verbose logging + :expectedresults: + 1. Should Success + 2. Should Success + 3. Should Success + 4. Should Success + 5. Should Success + """ + assert topo.standalone.config.set('nsslapd-disk-monitoring', 'on') + assert topo.standalone.config.set('nsslapd-disk-monitoring-logging-critical', 'off') + topo.standalone.restart() + # Verify that DS goes into shutdown mode + if float(THRESHOLD) > FULL_THR_FILL_SIZE: + FULL_THR_FILL_SIZE_new = FULL_THR_FILL_SIZE + round(float(THRESHOLD) - FULL_THR_FILL_SIZE) + subprocess.call(['dd', 'if=/dev/zero', 'of={}/foo'.format(topo.standalone.ds_paths.log_dir), 'bs=1M', 'count={}'.format(FULL_THR_FILL_SIZE_new)]) + else: + subprocess.call(['dd', 'if=/dev/zero', 'of={}/foo'.format(topo.standalone.ds_paths.log_dir), 'bs=1M', 'count={}'.format(FULL_THR_FILL_SIZE)]) + # Increased sleep to avoid failure + _witherrorlog(topo, 'is too far below the threshold', 100) + _witherrorlog(topo, 'Signaling slapd for shutdown', 90) + # Verifying that DS has been shut down after the grace period + time.sleep(2) + assert topo.standalone.status() == False + # free_space + os.remove('{}/foo'.format(topo.standalone.ds_paths.log_dir)) + open('{}/errors'.format(topo.standalone.ds_paths.log_dir), 'w').close() + # StartSlapd + topo.standalone.start() + # verify logging enabled + assert topo.standalone.config.get_attr_val_utf8('nsslapd-accesslog-logging-enabled') == 'on' + assert topo.standalone.config.get_attr_val_utf8('nsslapd-errorlog-logging-enabled') == 'on' + with open(topo.standalone.errlog, 'r') as study: study = study.read() + assert 'disabling access and audit logging' not in study + assert topo.standalone.config.set('nsslapd-accesslog-maxlogsize', '1') + assert topo.standalone.config.set('nsslapd-accesslog-logrotationtimeunit', 'minute') + assert topo.standalone.config.set('nsslapd-accesslog-level', '772') + topo.standalone.restart() + # create rotated logfile + users = UserAccounts(topo.standalone, DEFAULT_SUFFIX) + for i in range(10): + user_properties = { + 'uid': 'cn=anuj{}'.format(i), + 'cn': 'cn=anuj{}'.format(i), + 'sn': 'cn=anuj{}'.format(i), + 'userPassword': "Itsme123", + 'uidNumber': '1{}'.format(i), + 'gidNumber': '2{}'.format(i), + 'homeDirectory': '/home/{}'.format(i) + } + users.create(properties=user_properties) + for j in range(100): + for i in [i for i in users.list()]: i.bind('Itsme123') + assert re.findall(r'access.\d+-\d+',str(os.listdir(topo.standalone.ds_paths.log_dir))) + topo.standalone.bind_s(DN_DM, PW_DM) + # enable verbose logging + assert topo.standalone.config.set('nsslapd-accesslog-maxlogsize', '100') + assert topo.standalone.config.set('nsslapd-accesslog-logrotationtimeunit', 'day') + assert topo.standalone.config.set('nsslapd-accesslog-level', '256') + assert topo.standalone.config.set('nsslapd-errorlog-level', '8') + topo.standalone.restart() + for i in [i for i in users.list()]: i.delete() + + +@disk_monitoring_ack +def test_go_straight_below_half_of_the_threshold(topo, setup, reset_logs): + """Go straight below 1/2 of the threshold + Recovery and setup + + :id: a2a0664c-fe9e-11e8-b220-8c16451d917b + :setup: Standalone + :steps: + 1. Go straight below 1/2 of the threshold + 2. Verify that verbose logging was set to default level + 3. Verify that logging is disabled + 4. Verify DS is in shutdown mode + 5. Verify DS has recovered from shutdown + :expectedresults: + 1. Should Success + 2. Should Success + 3. Should Success + 4. Should Success + 5. Should Success + """ + assert topo.standalone.config.set('nsslapd-disk-monitoring', 'on') + assert topo.standalone.config.set('nsslapd-disk-monitoring-logging-critical', 'off') + assert topo.standalone.config.set('nsslapd-errorlog-level', '8') + topo.standalone.restart() + if float(THRESHOLD) > FULL_THR_FILL_SIZE: + FULL_THR_FILL_SIZE_new = FULL_THR_FILL_SIZE + round(float(THRESHOLD) - FULL_THR_FILL_SIZE) + 1 + subprocess.call(['dd', 'if=/dev/zero', 'of={}/foo'.format(topo.standalone.ds_paths.log_dir), 'bs=1M', 'count={}'.format(FULL_THR_FILL_SIZE_new)]) + else: + subprocess.call(['dd', 'if=/dev/zero', 'of={}/foo'.format(topo.standalone.ds_paths.log_dir), 'bs=1M', 'count={}'.format(FULL_THR_FILL_SIZE)]) + _witherrorlog(topo, 'temporarily setting error loglevel to the default level', 11) + # Verify that verbose logging was set to default level + assert LOG_DEFAULT == int(re.findall(r'nsslapd-errorlog-level: \d+', + str(topo.standalone.search_s('cn=config', ldap.SCOPE_SUBTREE, + '(objectclass=*)', + ['nsslapd-errorlog-level'])) + )[0].split(' ')[1]) + # Verify that logging is disabled + _withouterrorlog(topo, "topo.standalone.config.get_attr_val_utf8('nsslapd-accesslog-logging-enabled') != 'off'", 11) + # Verify that rotated logs were removed + _witherrorlog(topo, 'disabling access and audit logging', 2) + _witherrorlog(topo, 'deleting rotated logs', 11) + with open(topo.standalone.errlog, 'r') as study:study = study.read() + assert 'Unable to remove file:' not in study + # Verify DS is in shutdown mode + _withouterrorlog(topo, 'topo.standalone.status() != False', 90) + _witherrorlog(topo, 'is too far below the threshold', 2) + # Verify DS has recovered from shutdown + os.remove('{}/foo'.format(topo.standalone.ds_paths.log_dir)) + open('{}/errors'.format(topo.standalone.ds_paths.log_dir), 'w').close() + topo.standalone.start() + _withouterrorlog(topo, "topo.standalone.config.get_attr_val_utf8('nsslapd-accesslog-logging-enabled') != 'on'", 20) + with open(topo.standalone.errlog, 'r') as study: study = study.read() + assert 'disabling access and audit logging' not in study + + +@disk_monitoring_ack +def test_readonly_on_threshold(topo, setup, reset_logs): + """Verify that nsslapd-disk-monitoring-readonly-on-threshold switches the server to read-only mode + + :id: 06814c19-ef3c-4800-93c9-c7c6e76fcbb9 + :customerscenario: True + :setup: Standalone + :steps: + 1. Verify that the backend is in read-only mode + 2. Go back above the threshold + 3. Verify that the backend is in read-write mode + :expectedresults: + 1. Should Success + 2. Should Success + 3. Should Success + """ + file_path = '{}/foo'.format(topo.standalone.ds_paths.log_dir) + backends = Backends(topo.standalone) + backend_name = backends.list()[0].rdn + # Verify that verbose logging was set to default level + topo.standalone.deleteErrorLogs() + assert topo.standalone.config.set('nsslapd-disk-monitoring', 'on') + assert topo.standalone.config.set('nsslapd-disk-monitoring-readonly-on-threshold', 'on') + topo.standalone.restart() + try: + subprocess.call(['dd', 'if=/dev/zero', f'of={file_path}', 'bs=1M', f'count={HALF_THR_FILL_SIZE}']) + _witherrorlog(topo, f"Putting the backend '{backend_name}' to read-only mode", 11) + users = UserAccounts(topo.standalone, DEFAULT_SUFFIX) + try: + user = users.create_test_user() + user.delete() + except ldap.UNWILLING_TO_PERFORM as e: + if 'database is read-only' not in str(e): + raise + os.remove(file_path) + _witherrorlog(topo, f"Putting the backend '{backend_name}' back to read-write mode", 11) + user = users.create_test_user() + assert user.exists() + user.delete() + finally: + if os.path.exists(file_path): + os.remove(file_path) + + +@disk_monitoring_ack +def test_readonly_on_threshold_below_half_of_the_threshold(topo, setup, reset_logs): + """Go below 1/2 of the threshold when readonly on threshold is enabled + + :id: 10262663-b41f-420e-a2d0-9532dd54fa7c + :customerscenario: True + :setup: Standalone + :steps: + 1. Go straight below 1/2 of the threshold + 2. Verify that the backend is in read-only mode + 3. Go back above the threshold + 4. Verify that the backend is in read-write mode + :expectedresults: + 1. Should Success + 2. Should Success + 3. Should Success + 4. Should Success + """ + file_path = '{}/foo'.format(topo.standalone.ds_paths.log_dir) + backends = Backends(topo.standalone) + backend_name = backends.list()[0].rdn + topo.standalone.deleteErrorLogs() + assert topo.standalone.config.set('nsslapd-disk-monitoring', 'on') + assert topo.standalone.config.set('nsslapd-disk-monitoring-readonly-on-threshold', 'on') + topo.standalone.restart() + try: + if float(THRESHOLD) > FULL_THR_FILL_SIZE: + FULL_THR_FILL_SIZE_new = FULL_THR_FILL_SIZE + round(float(THRESHOLD) - FULL_THR_FILL_SIZE) + 1 + subprocess.call(['dd', 'if=/dev/zero', f'of={file_path}', 'bs=1M', f'count={FULL_THR_FILL_SIZE_new}']) + else: + subprocess.call(['dd', 'if=/dev/zero', f'of={file_path}', 'bs=1M', f'count={FULL_THR_FILL_SIZE}']) + _witherrorlog(topo, f"Putting the backend '{backend_name}' to read-only mode", 11) + users = UserAccounts(topo.standalone, DEFAULT_SUFFIX) + try: + user = users.create_test_user() + user.delete() + except ldap.UNWILLING_TO_PERFORM as e: + if 'database is read-only' not in str(e): + raise + _witherrorlog(topo, 'is too far below the threshold', 51) + # Verify DS has recovered from shutdown + os.remove(file_path) + _witherrorlog(topo, f"Putting the backend '{backend_name}' back to read-write mode", 51) + user = users.create_test_user() + assert user.exists() + user.delete() + finally: + if os.path.exists(file_path): + os.remove(file_path) + + +@disk_monitoring_ack +def test_below_half_of_the_threshold_not_starting_after_shutdown(topo, setup, reset_logs): + """Test that the instance won't start if we are below 1/2 of the threshold + + :id: cceeaefd-9fa4-45c5-9ac6-9887a0671ef8 + :customerscenario: True + :setup: Standalone + :steps: + 1. Go straight below 1/2 of the threshold + 2. Try to start the instance + 3. Go back above the threshold + 4. Try to start the instance + :expectedresults: + 1. Should Success + 2. Should Fail + 3. Should Success + 4. Should Success + """ + file_path = '{}/foo'.format(topo.standalone.ds_paths.log_dir) + topo.standalone.deleteErrorLogs() + assert topo.standalone.config.set('nsslapd-disk-monitoring', 'on') + topo.standalone.restart() + try: + if float(THRESHOLD) > FULL_THR_FILL_SIZE: + FULL_THR_FILL_SIZE_new = FULL_THR_FILL_SIZE + round(float(THRESHOLD) - FULL_THR_FILL_SIZE) + 1 + subprocess.call(['dd', 'if=/dev/zero', f'of={file_path}', 'bs=1M', f'count={FULL_THR_FILL_SIZE_new}']) + else: + subprocess.call(['dd', 'if=/dev/zero', f'of={file_path}', 'bs=1M', f'count={FULL_THR_FILL_SIZE}']) + _withouterrorlog(topo, 'topo.standalone.status() == True', 120) + try: + topo.standalone.start() + except (ValueError, subprocess.CalledProcessError): + topo.standalone.log.info("Instance start up has failed as expected") + _witherrorlog(topo, f'is too far below the threshold({THRESHOLD_BYTES} bytes). Exiting now', 2) + # Verify DS has recovered from shutdown + os.remove(file_path) + topo.standalone.start() + finally: + if os.path.exists(file_path): + os.remove(file_path) + + +@disk_monitoring_ack +def test_go_straight_below_4kb(topo, setup, reset_logs): + """Go straight below 4KB + + :id: a855115a-fe9e-11e8-8e91-8c16451d917b + :setup: Standalone + :steps: + 1. Go straight below 4KB + 2. Clean space + :expectedresults: + 1. Should Success + 2. Should Success + """ + assert topo.standalone.config.set('nsslapd-disk-monitoring', 'on') + topo.standalone.restart() + subprocess.call(['dd', 'if=/dev/zero', 'of={}/foo'.format(topo.standalone.ds_paths.log_dir), 'bs=1M', 'count={}'.format(FULL_THR_FILL_SIZE)]) + subprocess.call(['dd', 'if=/dev/zero', 'of={}/foo1'.format(topo.standalone.ds_paths.log_dir), 'bs=1M', 'count={}'.format(FULL_THR_FILL_SIZE)]) + _withouterrorlog(topo, 'topo.standalone.status() != False', 11) + os.remove('{}/foo'.format(topo.standalone.ds_paths.log_dir)) + os.remove('{}/foo1'.format(topo.standalone.ds_paths.log_dir)) + topo.standalone.start() + assert topo.standalone.status() == True + + +@disk_monitoring_ack +@pytest.mark.bz982325 +def test_threshold_to_overflow_value(topo, setup, reset_logs): + """Overflow in nsslapd-disk-monitoring-threshold + + :id: ad60ab3c-fe9e-11e8-88dc-8c16451d917b + :setup: Standalone + :steps: + 1. Setting nsslapd-disk-monitoring-threshold to overflow_value + :expectedresults: + 1. Should Success + """ + overflow_value = '3000000000' + # Setting nsslapd-disk-monitoring-threshold to overflow_value + assert topo.standalone.config.set('nsslapd-disk-monitoring-threshold', ensure_bytes(overflow_value)) + assert overflow_value == re.findall(r'nsslapd-disk-monitoring-threshold: \d+', str( + topo.standalone.search_s('cn=config', ldap.SCOPE_SUBTREE, '(objectclass=*)', + ['nsslapd-disk-monitoring-threshold'])))[0].split(' ')[1] + + +@disk_monitoring_ack +@pytest.mark.bz970995 +def test_threshold_is_reached_to_half(topo, setup, reset_logs): + """RHDS not shutting down when disk monitoring threshold is reached to half. + + :id: b2d3665e-fe9e-11e8-b9c0-8c16451d917b + :setup: Standalone + :steps: Standalone + 1. Verify that there is not endless loop of error messages + :expectedresults: + 1. Should Success + """ + + assert topo.standalone.config.set('nsslapd-disk-monitoring', 'on') + assert topo.standalone.config.set('nsslapd-disk-monitoring-logging-critical', 'on') + assert topo.standalone.config.set('nsslapd-errorlog-level', '8') + assert topo.standalone.config.set('nsslapd-disk-monitoring-threshold', ensure_bytes(THRESHOLD_BYTES)) + topo.standalone.restart() + subprocess.call(['dd', 'if=/dev/zero', 'of={}/foo'.format(topo.standalone.ds_paths.log_dir), 'bs=1M', 'count={}'.format(HALF_THR_FILL_SIZE)]) + # Verify that there is not endless loop of error messages + _witherrorlog(topo, "temporarily setting error loglevel to the default level", 10) + with open(topo.standalone.errlog, 'r') as study:study = study.read() + assert len(re.findall("temporarily setting error loglevel to the default level", study)) == 1 + os.remove('{}/foo'.format(topo.standalone.ds_paths.log_dir)) + + +@disk_monitoring_ack +@pytest.mark.parametrize("test_input,expected", [ + ("nsslapd-disk-monitoring-threshold", '-2'), + ("nsslapd-disk-monitoring-threshold", '9223372036854775808'), + ("nsslapd-disk-monitoring-threshold", '2047'), + ("nsslapd-disk-monitoring-threshold", '0'), + ("nsslapd-disk-monitoring-threshold", '-1294967296'), + ("nsslapd-disk-monitoring-threshold", 'invalid'), + ("nsslapd-disk-monitoring", 'invalid'), + ("nsslapd-disk-monitoring", '1'), + ("nsslapd-disk-monitoring-grace-period", '0'), + ("nsslapd-disk-monitoring-grace-period", '525 948'), + ("nsslapd-disk-monitoring-grace-period", '-1'), + ("nsslapd-disk-monitoring-logging-critical", 'oninvalid'), + ("nsslapd-disk-monitoring-grace-period", '-1'), + ("nsslapd-disk-monitoring-grace-period", '0'), +]) +def test_negagtive_parameterize(topo, setup, reset_logs, test_input, expected): + """Verify that invalid operations are not permitted + + :id: b88efbf8-fe9e-11e8-8499-8c16451d917b + :parametrized: yes + :setup: Standalone + :steps: + 1. Verify that invalid operations are not permitted. + :expectedresults: + 1. Should not success. + """ + with pytest.raises(Exception): + topo.standalone.config.set(test_input, ensure_bytes(expected)) + + +@disk_monitoring_ack +def test_valid_operations_are_permitted(topo, setup, reset_logs): + """Verify that valid operations are permitted + + :id: bd4f83f6-fe9e-11e8-88f4-8c16451d917b + :setup: Standalone + :steps: + 1. Verify that valid operations are permitted + :expectedresults: + 1. Should Success. + """ + assert topo.standalone.config.set('nsslapd-disk-monitoring', 'on') + assert topo.standalone.config.set('nsslapd-disk-monitoring-logging-critical', 'on') + assert topo.standalone.config.set('nsslapd-errorlog-level', '8') + topo.standalone.restart() + # Trying to delete nsslapd-disk-monitoring-threshold + assert topo.standalone.modify_s('cn=config', [(ldap.MOD_DELETE, 'nsslapd-disk-monitoring-threshold', '')]) + # Trying to add another value to nsslapd-disk-monitoring-threshold (check that it is not multivalued) + topo.standalone.config.add('nsslapd-disk-monitoring-threshold', '2000001') + # Trying to delete nsslapd-disk-monitoring + assert topo.standalone.modify_s('cn=config', [(ldap.MOD_DELETE, 'nsslapd-disk-monitoring', ensure_bytes(str( + topo.standalone.search_s('cn=config', ldap.SCOPE_SUBTREE, '(objectclass=*)', ['nsslapd-disk-monitoring'])[ + 0]).split(' ')[2].split('\n\n')[0]))]) + # Trying to add another value to nsslapd-disk-monitoring + topo.standalone.config.add('nsslapd-disk-monitoring', 'off') + # Trying to delete nsslapd-disk-monitoring-grace-period + assert topo.standalone.modify_s('cn=config', [(ldap.MOD_DELETE, 'nsslapd-disk-monitoring-grace-period', '')]) + # Trying to add another value to nsslapd-disk-monitoring-grace-period + topo.standalone.config.add('nsslapd-disk-monitoring-grace-period', '61') + # Trying to delete nsslapd-disk-monitoring-logging-critical + assert topo.standalone.modify_s('cn=config', [(ldap.MOD_DELETE, 'nsslapd-disk-monitoring-logging-critical', + ensure_bytes(str( + topo.standalone.search_s('cn=config', ldap.SCOPE_SUBTREE, + '(objectclass=*)', [ + 'nsslapd-disk-monitoring-logging-critical'])[ + 0]).split(' ')[2].split('\n\n')[0]))]) + # Trying to add another value to nsslapd-disk-monitoring-logging-critical + assert topo.standalone.config.set('nsslapd-disk-monitoring-logging-critical', 'on') + + +if __name__ == '__main__': + CURRENT_FILE = os.path.realpath(__file__) + pytest.main("-s -v %s" % CURRENT_FILE) + diff --git a/dirsrvtests/tests/suites/disk_monitoring/disk_space_test.py b/dirsrvtests/tests/suites/disk_monitoring/disk_space_test.py new file mode 100644 index 0000000..623b5e0 --- /dev/null +++ b/dirsrvtests/tests/suites/disk_monitoring/disk_space_test.py @@ -0,0 +1,47 @@ +# --- BEGIN COPYRIGHT BLOCK --- +# Copyright (C) 2019 Red Hat, Inc. +# All rights reserved. +# +# License: GPL (version 3 or any later version). +# See LICENSE for details. +# --- END COPYRIGHT BLOCK --- +import pytest +from lib389.monitor import MonitorDiskSpace +from lib389.topologies import topology_st as topo + +pytestmark = pytest.mark.tier2 + +def test_basic(topo): + """Test that the cn=disk space,cn=monitor gives at least one value + + :id: f1962762-2c6c-4e50-97af-a00012a7486d + :setup: Standalone + :steps: + 1. Get cn=disk space,cn=monitor entry + 2. Check it has at least one dsDisk attribute + 3. Check dsDisk attribute has the partition and sizes + 4. Check the numbers are valid integers + :expectedresults: + 1. It should succeed + 2. It should succeed + 3. It should succeed + 4. It should succeed + """ + + inst = topo.standalone + + # Turn off disk monitoring + disk_space_mon = MonitorDiskSpace(inst) + disk_str = disk_space_mon.get_disks()[0] + + inst.log.info('Check that "partition", "size", "used", "available", "use%" words are present in the string') + words = ["partition", "size", "used", "available", "use%"] + assert all(map(lambda word: word in disk_str, words)) + + inst.log.info("Check that the sizes are numbers") + for word in words[1:]: + number = disk_str.split(f'{word}="')[1].split('"')[0] + try: + int(number) + except ValueError: + raise ValueError(f'A "{word}" value is not a number') diff --git a/dirsrvtests/tests/suites/ds_logs/__init__.py b/dirsrvtests/tests/suites/ds_logs/__init__.py new file mode 100644 index 0000000..feefbd2 --- /dev/null +++ b/dirsrvtests/tests/suites/ds_logs/__init__.py @@ -0,0 +1,3 @@ +""" + :Requirement: 389-ds-base: Directory Server Logs +""" diff --git a/dirsrvtests/tests/suites/ds_logs/audit_log_test.py b/dirsrvtests/tests/suites/ds_logs/audit_log_test.py new file mode 100644 index 0000000..d09ce8b --- /dev/null +++ b/dirsrvtests/tests/suites/ds_logs/audit_log_test.py @@ -0,0 +1,104 @@ +# --- BEGIN COPYRIGHT BLOCK --- +# Copyright (C) 2022 Red Hat, Inc. +# All rights reserved. +# +# License: GPL (version 3 or any later version). +# See LICENSE for details. +# --- END COPYRIGHT BLOCK --- + +import logging +import pytest +import os +import time +from lib389._constants import DEFAULT_SUFFIX +from lib389.topologies import topology_st as topo +from lib389.idm.user import UserAccounts + +log = logging.getLogger(__name__) + + +def test_auditlog_display_attrs(topo): + """Test "display attributes" feature of the audit log + + :id: 01beaf71-4cb5-4943-9774-3210ae5d68a2 + :setup: Standalone Instance + :steps: + 1. Test "cn" attribute is displayed + 2. Test multiple attributes are displayed + 3. Test modrdn updates log + 4. Test all attributes are displayed + 5. Test delete updates log + :expectedresults: + 1. Success + 2. Success + 3. Success + 4. Success + 5. Sucecss + """ + + inst = topo.standalone + inst.config.replace('nsslapd-auditlog-logging-enabled', 'on') + + # Test "cn" attribute + inst.config.replace('nsslapd-auditlog-display-attrs', 'cn') + users = UserAccounts(inst, DEFAULT_SUFFIX) + user = users.ensure_state(properties={ + 'uid': 'test_audit_log', + 'cn': 'test', + 'sn': 'user', + 'uidNumber': '1000', + 'gidNumber': '1000', + 'homeDirectory': '/home/test', + 'userPassword': 'pppppppp' + }) + user2 = users.ensure_state(properties={ + 'uid': 'test_modrdn_delete', + 'cn': 'modrdn_delete', + 'sn': 'modrdn_delete', + 'uidNumber': '1001', + 'gidNumber': '1001', + 'homeDirectory': '/home/modrdn_delete', + 'userPassword': 'pppppppp' + }) + time.sleep(1) + assert inst.ds_audit_log.match("#cn: test") + assert not inst.ds_audit_log.match("#uid: test_audit_log") + + # Test multiple attributes + inst.config.replace('nsslapd-auditlog-display-attrs', 'uidNumber gidNumber, homeDirectory') + user.replace('sn', 'new value') + time.sleep(1) + assert inst.ds_audit_log.match("#uidNumber: 1000") + assert inst.ds_audit_log.match("#gidNumber: 1000") + assert inst.ds_audit_log.match("#homeDirectory: /home/test") + assert not inst.ds_audit_log.match("#uid: test_audit_log") + assert not inst.ds_audit_log.match("#uidNumber: 1001") + assert not inst.ds_audit_log.match("#sn: modrdn_delete") + + # Test modrdn + user2.rename("uid=modrdn_delete", DEFAULT_SUFFIX) + time.sleep(1) + assert inst.ds_audit_log.match("#uidNumber: 1001") + assert inst.ds_audit_log.match("#gidNumber: 1001") + + # Test ALL attributes + inst.config.replace('nsslapd-auditlog-display-attrs', '*') + user.replace('sn', 'new value again') + time.sleep(1) + assert inst.ds_audit_log.match("#uid: test_audit_log") + assert inst.ds_audit_log.match("#cn: test") + assert inst.ds_audit_log.match("#uidNumber: 1000") + assert inst.ds_audit_log.match("#objectClass: top") + + # Test delete + user2.delete() + time.sleep(1) + assert inst.ds_audit_log.match("#sn: modrdn_delete") + + +if __name__ == '__main__': + # Run isolated + # -s for DEBUG mode + CURRENT_FILE = os.path.realpath(__file__) + pytest.main(["-s", CURRENT_FILE]) + diff --git a/dirsrvtests/tests/suites/ds_logs/ds_logs_test.py b/dirsrvtests/tests/suites/ds_logs/ds_logs_test.py new file mode 100644 index 0000000..3511d2a --- /dev/null +++ b/dirsrvtests/tests/suites/ds_logs/ds_logs_test.py @@ -0,0 +1,1713 @@ +# --- BEGIN COPYRIGHT BLOCK --- +# Copyright (C) 2020 Red Hat, Inc. +# All rights reserved. +# +# License: GPL (version 3 or any later version). +# See LICENSE for details. +# --- END COPYRIGHT BLOCK --- +# +from decimal import * +import os +import time +import logging +import pytest +import shutil +from lib389.rootdse import RootDSE +import subprocess +from lib389.backend import Backend +from lib389.mappingTree import MappingTrees +from lib389.idm.domain import Domain +from lib389.configurations.sample import create_base_domain +from lib389._mapped_object import DSLdapObject +from lib389.topologies import topology_st +from lib389.plugins import AutoMembershipPlugin, ReferentialIntegrityPlugin, AutoMembershipDefinitions, MemberOfPlugin +from lib389.idm.user import UserAccounts, UserAccount +from lib389.idm.group import Groups +from lib389.idm.organizationalunit import OrganizationalUnits +from lib389._constants import DEFAULT_SUFFIX, LOG_ACCESS_LEVEL, PASSWORD +from lib389.utils import ds_is_older, ds_is_newer +from lib389.config import RSA +from lib389.dseldif import DSEldif +import ldap +import glob +import re + +pytestmark = pytest.mark.tier1 + +logging.getLogger(__name__).setLevel(logging.DEBUG) +log = logging.getLogger(__name__) + +PLUGIN_TIMESTAMP = 'nsslapd-logging-hr-timestamps-enabled' +PLUGIN_LOGGING = 'nsslapd-plugin-logging' +USER1_DN = 'uid=user1,' + DEFAULT_SUFFIX + +def add_users(topology_st, users_num): + users = UserAccounts(topology_st, DEFAULT_SUFFIX) + log.info('Adding %d users' % users_num) + for i in range(0, users_num): + uid = 1000 + i + users.create(properties={ + 'uid': 'testuser%d' % uid, + 'cn': 'testuser%d' % uid, + 'sn': 'user', + 'uidNumber': '%d' % uid, + 'gidNumber': '%d' % uid, + 'homeDirectory': '/home/testuser%d' % uid + }) + + +def search_users(topology_st): + users = UserAccounts(topology_st, DEFAULT_SUFFIX) + entries = users.list() + # We just assert we got some data ... + assert len(entries) > 0 + + +def delete_obj(obj): + if obj.exists(): + obj.delete() + + +def add_group_and_perform_user_operations(topology_st): + topo = topology_st.standalone + + # Add the automember group + groups = Groups(topo, DEFAULT_SUFFIX) + group = groups.create(properties={'cn': 'group'}) + + ous = OrganizationalUnits(topo, DEFAULT_SUFFIX) + branch1 = ous.create(properties={'ou': 'branch1'}) + + # Add the automember config entry + am_configs = AutoMembershipDefinitions(topo) + am_config = am_configs.create(properties={'cn': 'config', + 'autoMemberScope': branch1.dn, + 'autoMemberFilter': 'objectclass=top', + 'autoMemberDefaultGroup': group.dn, + 'autoMemberGroupingAttr': 'member:dn'}) + + # Add a user that should get added to the group + users = UserAccounts(topo, DEFAULT_SUFFIX, rdn='ou={}'.format(branch1.rdn)) + test_user = users.create_test_user(uid=777) + + # Check if created user is group member + assert test_user.dn in group.list_members() + + log.info('Renaming user') + test_user.rename('uid=new_test_user_777', newsuperior=DEFAULT_SUFFIX) + + log.info('Delete the user') + delete_obj(test_user) + + log.info('Delete automember entry, org. unit and group for the next test') + delete_obj(am_config) + delete_obj(branch1) + delete_obj(group) + + +@pytest.fixture(scope="module") +def enable_plugins(topology_st): + topo = topology_st.standalone + + log.info("Enable automember plugin") + plugin = AutoMembershipPlugin(topo) + plugin.enable() + + log.info('Enable Referential Integrity plugin') + plugin = ReferentialIntegrityPlugin(topo) + plugin.enable() + + log.info('Set nsslapd-plugin-logging to on') + topo.config.set(PLUGIN_LOGGING, 'ON') + + log.info('Restart the server') + topo.restart() + + +def add_user_log_level(topology_st, loglevel, request): + topo = topology_st.standalone + default_log_level = topo.config.get_attr_val_utf8(LOG_ACCESS_LEVEL) + log.info(f'Configure access log level to {loglevel}') + topo.config.set(LOG_ACCESS_LEVEL, str(loglevel)) + add_group_and_perform_user_operations(topology_st) + + def fin(): + topo.config.set(LOG_ACCESS_LEVEL, default_log_level) + log.info('Delete the previous access logs for the next test') + topo.deleteAccessLogs() + request.addfinalizer(fin) + + +@pytest.fixture(scope="function") +def add_user_log_level_260(topology_st, enable_plugins, request): + access_log_level = 4 + 256 + add_user_log_level(topology_st, access_log_level, request) + + +@pytest.fixture(scope="function") +def add_user_log_level_516(topology_st, enable_plugins, request): + access_log_level = 4 + 512 + add_user_log_level(topology_st, access_log_level, request) + + +@pytest.fixture(scope="function") +def add_user_log_level_131076(topology_st, enable_plugins, request): + access_log_level = 4 + 131072 + add_user_log_level(topology_st, access_log_level, request) + + +@pytest.fixture(scope="function") +def clean_access_logs(topology_st, request): + def _clean_access_logs(): + topo = topology_st.standalone + log.info("Stopping the instance") + topo.stop() + log.info("Deleting the access logs") + topo.deleteAccessLogs() + log.info("Starting the instance") + topo.start() + + request.addfinalizer(_clean_access_logs) + + return clean_access_logs + +@pytest.fixture(scope="function") +def remove_users(topology_st, request): + def _remove_users(): + topo = topology_st.standalone + users = UserAccounts(topo, DEFAULT_SUFFIX) + entries = users.list() + assert len(entries) > 0 + + log.info("Removing all added users") + for entry in entries: + delete_obj(entry) + + request.addfinalizer(_remove_users) + + +def set_audit_log_config_values(topology_st, request, enabled, logsize): + topo = topology_st.standalone + + topo.config.set('nsslapd-auditlog-logging-enabled', enabled) + topo.config.set('nsslapd-auditlog-maxlogsize', logsize) + + def fin(): + topo.start() + log.info('Setting audit log config back to default values') + topo.config.set('nsslapd-auditlog-logging-enabled', 'off') + topo.config.set('nsslapd-auditlog-maxlogsize', '100') + + request.addfinalizer(fin) + + +@pytest.fixture(scope="function") +def set_audit_log_config_values_to_rotate(topology_st, request): + set_audit_log_config_values(topology_st, request, 'on', '1') + +@pytest.fixture(scope="function") +def disable_access_log_buffering(topology_st, request): + log.info('Disable access log buffering') + topology_st.standalone.config.set('nsslapd-accesslog-logbuffering', 'off') + def fin(): + log.info('Enable access log buffering') + topology_st.standalone.config.set('nsslapd-accesslog-logbuffering', 'on') + + request.addfinalizer(fin) + + return disable_access_log_buffering + +def create_backend(inst, rdn, suffix): + # We only support dc= in this test. + assert suffix.startswith('dc=') + be1 = Backend(inst) + be1.create(properties={ + 'cn': rdn, + 'nsslapd-suffix': suffix, + }, + create_mapping_tree=False + ) + + # Now we temporarily make the MT for this node so we can add the base entry. + mts = MappingTrees(inst) + mt = mts.create(properties={ + 'cn': suffix, + 'nsslapd-state': 'backend', + 'nsslapd-backend': rdn, + }) + + # Create the domain entry + create_base_domain(inst, suffix) + # Now delete the mt + mt.delete() + + return be1 + +@pytest.mark.bz1273549 +def test_check_default(topology_st): + """Check the default value of nsslapd-logging-hr-timestamps-enabled, + it should be ON + + :id: 2d15002e-9ed3-4796-b0bb-bf04e4e59bd3 + + :setup: Standalone instance + + :steps: + 1. Fetch the value of nsslapd-logging-hr-timestamps-enabled attribute + 2. Test that the attribute value should be "ON" by default + + :expectedresults: + 1. Value should be fetched successfully + 2. Value should be "ON" by default + """ + + # Get the default value of nsslapd-logging-hr-timestamps-enabled attribute + default = topology_st.standalone.config.get_attr_val_utf8(PLUGIN_TIMESTAMP) + + # Now check it should be ON by default + assert default == "on" + log.debug(default) + + +@pytest.mark.bz1273549 +def test_plugin_set_invalid(topology_st): + """Try to set some invalid values for nsslapd-logging-hr-timestamps-enabled + attribute + + :id: c60a68d2-703a-42bf-a5c2-4040736d511a + + :setup: Standalone instance + + :steps: + 1. Set some "JUNK" value of nsslapd-logging-hr-timestamps-enabled attribute + + :expectedresults: + 1. There should be an operation error + """ + + log.info('test_plugin_set_invalid - Expect to fail with junk value') + with pytest.raises(ldap.OPERATIONS_ERROR): + topology_st.standalone.config.set(PLUGIN_TIMESTAMP, 'JUNK') + + +@pytest.mark.bz1273549 +def test_log_plugin_on(topology_st, remove_users): + """Check access logs for millisecond, when + nsslapd-logging-hr-timestamps-enabled=ON + + :id: 65ae4e2a-295f-4222-8d69-12124bc7a872 + + :setup: Standalone instance + + :steps: + 1. To generate big logs, add 100 test users + 2. Search users to generate more access logs + 3. Restart server + 4. Parse the logs to check the milliseconds got recorded in logs + + :expectedresults: + 1. Add operation should be successful + 2. Search operation should be successful + 3. Server should be restarted successfully + 4. There should be milliseconds added in the access logs + """ + + log.info('Bug 1273549 - Check access logs for millisecond, when attribute is ON') + log.info('perform any ldap operation, which will trigger the logs') + add_users(topology_st.standalone, 10) + search_users(topology_st.standalone) + + log.info('Restart the server to flush the logs') + topology_st.standalone.restart(timeout=10) + + log.info('parse the access logs') + access_log_lines = topology_st.standalone.ds_access_log.readlines() + assert len(access_log_lines) > 0 + assert topology_st.standalone.ds_access_log.match(r'^\[.+\d{9}.+\].+') + + +@pytest.mark.bz1273549 +def test_log_plugin_off(topology_st, remove_users): + """Milliseconds should be absent from access logs when + nsslapd-logging-hr-timestamps-enabled=OFF + + :id: b3400e46-d940-4574-b399-e3f4b49bc4b5 + + :setup: Standalone instance + + :steps: + 1. Set nsslapd-logging-hr-timestamps-enabled=OFF + 2. Restart the server + 3. Delete old access logs + 4. Do search operations to generate fresh access logs + 5. Restart the server + 6. Check access logs + + :expectedresults: + 1. Attribute nsslapd-logging-hr-timestamps-enabled should be set to "OFF" + 2. Server should restart + 3. Access logs should be deleted + 4. Search operation should PASS + 5. Server should restart + 6. There should not be any milliseconds added in the access logs + """ + + log.info('Bug 1273549 - Check access logs for missing millisecond, when attribute is OFF') + + log.info('test_log_plugin_off - set the configuration attribute to OFF') + topology_st.standalone.config.set(PLUGIN_TIMESTAMP, 'OFF') + + log.info('Restart the server to flush the logs') + topology_st.standalone.restart(timeout=10) + + log.info('test_log_plugin_off - delete the previous access logs') + topology_st.standalone.deleteAccessLogs() + + # Now generate some fresh logs + add_users(topology_st.standalone, 10) + search_users(topology_st.standalone) + + log.info('Restart the server to flush the logs') + topology_st.standalone.restart(timeout=10) + + log.info('check access log that microseconds are not present') + access_log_lines = topology_st.standalone.ds_access_log.readlines() + assert len(access_log_lines) > 0 + assert not topology_st.standalone.ds_access_log.match(r'^\[.+\d{9}.+\].+') + + +@pytest.mark.xfail(ds_is_older('1.4.0'), reason="May fail on 1.3.x because of bug 1358706") +@pytest.mark.bz1358706 +@pytest.mark.ds49029 +def test_internal_log_server_level_0(topology_st, clean_access_logs, disable_access_log_buffering): + """Tests server-initiated internal operations + + :id: 798d06fe-92e8-4648-af66-21349c20638e + :setup: Standalone instance + :steps: + 1. Set nsslapd-plugin-logging to on + 2. Configure access log level to only 0 + 3. Check the access logs. + :expectedresults: + 1. Operation should be successful + 2. Operation should be successful + 3. Access log should not contain internal operations log formats + """ + + topo = topology_st.standalone + default_log_level = topo.config.get_attr_val_utf8(LOG_ACCESS_LEVEL) + + + log.info('Set nsslapd-plugin-logging to on') + topo.config.set(PLUGIN_LOGGING, 'ON') + + log.info('Configure access log level to 0') + access_log_level = '0' + topo.config.set(LOG_ACCESS_LEVEL, access_log_level) + + log.info('Restart the server to flush the logs') + topo.restart() + + # These comments contain lines we are trying to find without regex (the op numbers are just examples) + log.info("Check if access log does not contain internal log of MOD operation") + # (Internal) op=2(2)(1) SRCH base="cn=config + assert not topo.ds_access_log.match(r'.*\(Internal\) op=[0-9]+\([0-9]+\)\([0-9]+\) SRCH base="cn=config.*') + # (Internal) op=2(2)(1) RESULT err=0 tag=48 nentries=1 + assert not topo.ds_access_log.match(r'.*\(Internal\) op=[0-9]+\([0-9]+\)\([0-9]+\) RESULT err=0 tag=48 nentries=1.*') + + log.info("Check if the other internal operations are not present") + # conn=Internal(0) op=0 + assert not topo.ds_access_log.match(r'.*conn=Internal\([0-9]+\) op=[0-9]+\([0-9]+\)\([0-9]+\).*') + + topo.config.set(LOG_ACCESS_LEVEL, default_log_level) + + +@pytest.mark.xfail(ds_is_older('1.4.0'), reason="May fail on 1.3.x because of bug 1358706") +@pytest.mark.bz1358706 +@pytest.mark.ds49029 +def test_internal_log_server_level_4(topology_st, clean_access_logs, disable_access_log_buffering): + """Tests server-initiated internal operations + + :id: a3500e47-d941-4575-b399-e3f4b49bc4b6 + :setup: Standalone instance + :steps: + 1. Set nsslapd-plugin-logging to on + 2. Configure access log level to only 4 + 3. Check the access logs, it should contain info about MOD operation of cn=config and other + internal operations should have the conn field set to Internal + and all values inside parenthesis set to 0. + :expectedresults: + 1. Operation should be successful + 2. Operation should be successful + 3. Access log should contain correct internal log formats with cn=config modification: + "(Internal) op=2(1)(1)" + "conn=Internal(0)" + """ + + topo = topology_st.standalone + default_log_level = topo.config.get_attr_val_utf8(LOG_ACCESS_LEVEL) + + log.info('Set nsslapd-plugin-logging to on') + topo.config.set(PLUGIN_LOGGING, 'ON') + + log.info('Configure access log level to 4') + access_log_level = '4' + topo.config.set(LOG_ACCESS_LEVEL, access_log_level) + + log.info('Restart the server to flush the logs') + topo.restart() + + try: + # These comments contain lines we are trying to find without regex (the op numbers are just examples) + log.info("Check if access log contains internal MOD operation in correct format") + # (Internal) op=2(2)(1) SRCH base="cn=config + assert topo.ds_access_log.match(r'.*\(Internal\) op=[0-9]+\([0-9]+\)\([0-9]+\) SRCH base="cn=config.*') + # (Internal) op=2(2)(1) RESULT err=0 tag=48 nentries= + assert topo.ds_access_log.match(r'.*\(Internal\) op=[0-9]+\([0-9]+\)\([0-9]+\) RESULT err=0 tag=48 nentries=.*') + + log.info("Check if the other internal operations have the correct format") + # conn=Internal(0) op=0 + assert topo.ds_access_log.match(r'.*conn=Internal\([0-9]+\) op=[0-9]+\([0-9]+\)\([0-9]+\).*') + finally: + topo.config.set(LOG_ACCESS_LEVEL, default_log_level) + + +@pytest.mark.xfail(ds_is_older('1.4.0'), reason="May fail on 1.3.x because of bug 1358706") +@pytest.mark.bz1358706 +@pytest.mark.ds49029 +def test_internal_log_level_260(topology_st, add_user_log_level_260, disable_access_log_buffering): + """Tests client initiated operations when automember plugin is enabled + + :id: e68a303e-c037-42b2-a5a0-fbea27c338a9 + :setup: Standalone instance with internal operation + logging on and nsslapd-plugin-logging to on + :steps: + 1. Configure access log level to 260 (4 + 256) + 2. Set nsslapd-plugin-logging to on + 3. Enable Referential Integrity and automember plugins + 4. Restart the server + 5. Add a test group + 6. Add a test user and add it as member of the test group + 7. Rename the test user + 8. Delete the test user + 9. Check the access logs for nested internal operation logs + :expectedresults: + 1. Operation should be successful + 2. Operation should be successful + 3. Operation should be successful + 4. Operation should be successful + 5. Operation should be successful + 6. Operation should be successful + 7. Operation should be successful + 8. Operation should be successful + 9. Access log should contain internal info about operations of the user + """ + + topo = topology_st.standalone + + log.info('Restart the server to flush the logs') + topo.restart() + + # These comments contain lines we are trying to find without regex (the op numbers are just examples) + log.info("Check the access logs for ADD operation of the user") + # op=10 ADD dn="uid=test_user_777,ou=topology_st, branch1,dc=example,dc=com" + assert topo.ds_access_log.match(r'.*op=[0-9]+ ADD dn="uid=test_user_777,ou=branch1,dc=example,dc=com".*') + # (Internal) op=10(1)(1) MOD dn="cn=group,ou=Groups,dc=example,dc=com" + assert topo.ds_access_log.match(r'.*\(Internal\) op=[0-9]+\([0-9]+\)\([0-9]+\) ' + r'MOD dn="cn=group,ou=Groups,dc=example,dc=com".*') + # (Internal) op=10(1)(2) SRCH base="cn=group,ou=Groups,dc=example,dc=com" + assert topo.ds_access_log.match(r'.*\(Internal\) op=[0-9]+\([0-9]+\)\([0-9]+\) SRCH base="cn=group,' + r'ou=Groups,dc=example,dc=com".*') + # (Internal) op=10(1)(2) RESULT err=0 tag=48 nentries=1 + assert topo.ds_access_log.match(r'.*\(Internal\) op=[0-9]+\([0-9]+\)\([0-9]+\) RESULT err=0 tag=48 nentries=1*') + # (Internal) op=10(1)(1) RESULT err=0 tag=48 + assert topo.ds_access_log.match(r'.*\(Internal\) op=[0-9]+\([0-9]+\)\([0-9]+\) RESULT err=0 tag=48.*') + # op=10 RESULT err=0 tag=105 + assert topo.ds_access_log.match(r'.*op=[0-9]+ RESULT err=0 tag=105.*') + + log.info("Check the access logs for MOD operation of the user") + # op=12 MODRDN dn="uid=test_user_777,ou=branch1,dc=example,dc=com" ' + # 'newrdn="uid=new_test_user_777" newsuperior="dc=example,dc=com" + assert topo.ds_access_log.match(r'.*op=[0-9]+ MODRDN dn="uid=test_user_777,ou=branch1,dc=example,dc=com" ' + 'newrdn="uid=new_test_user_777" newsuperior="dc=example,dc=com".*') + if ds_is_older(('1.4.3.9', '1.4.4.3')): + # (Internal) op=12(1)(1) SRCH base="uid=test_user_777, ou=branch1,dc=example,dc=com" + assert topo.ds_access_log.match(r'.*\(Internal\) op=[0-9]+\([0-9]+\)\([0-9]+\) SRCH base="uid=test_user_777,' + 'ou=branch1,dc=example,dc=com".*') + # (Internal) op=12(1)(1) RESULT err=0 tag=48 nentries=1 + assert topo.ds_access_log.match(r'.*\(Internal\) op=[0-9]+\([0-9]+\)\([0-9]+\) RESULT err=0 tag=48 nentries=1.*') + # op=12 RESULT err=0 tag=109 + assert topo.ds_access_log.match(r'.*op=[0-9]+ RESULT err=0 tag=109.*') + + log.info("Check the access logs for DEL operation of the user") + # op=15 DEL dn="uid=new_test_user_777,dc=example,dc=com" + assert topo.ds_access_log.match(r'.*op=[0-9]+ DEL dn="uid=new_test_user_777,dc=example,dc=com".*') + if ds_is_older(('1.4.3.9', '1.4.4.3')): + # (Internal) op=15(1)(1) SRCH base="uid=new_test_user_777, dc=example,dc=com" + assert topo.ds_access_log.match(r'.*\(Internal\) op=[0-9]+\([0-9]+\)\([0-9]+\) SRCH base="uid=new_test_user_777,' + 'dc=example,dc=com".*') + # (Internal) op=15(1)(1) RESULT err=0 tag=48 nentries=1 + assert topo.ds_access_log.match(r'.*\(Internal\) op=[0-9]+\([0-9]+\)\([0-9]+\) RESULT err=0 tag=48 nentries=1.*') + # op=15 RESULT err=0 tag=107 + assert topo.ds_access_log.match(r'.*op=[0-9]+ RESULT err=0 tag=107.*') + + log.info("Check if the other internal operations have the correct format") + # conn=Internal(0) op=0 + assert topo.ds_access_log.match(r'.*conn=Internal\([0-9]+\) op=[0-9]+\([0-9]+\)\([0-9]+\).*') + + +@pytest.mark.xfail(ds_is_older('1.4.0'), reason="May fail on 1.3.x because of bug 1358706") +@pytest.mark.bz1358706 +@pytest.mark.ds49029 +def test_internal_log_level_131076(topology_st, add_user_log_level_131076, disable_access_log_buffering): + """Tests client-initiated operations while referential integrity plugin is enabled + + :id: 44836ac9-dabd-4a8c-abd5-ecd7c2509739 + :setup: Standalone instance + Configure access log level to - 131072 + 4 + Set nsslapd-plugin-logging to on + :steps: + 1. Configure access log level to 131076 + 2. Set nsslapd-plugin-logging to on + 3. Enable Referential Integrity and automember plugins + 4. Restart the server + 5. Add a test group + 6. Add a test user and add it as member of the test group + 7. Rename the test user + 8. Delete the test user + 9. Check the access logs for nested internal operation logs + :expectedresults: + 1. Operation should be successful + 2. Operation should be successful + 3. Operation should be successful + 4. Operation should be successful + 5. Operation should be successful + 6. Operation should be successful + 7. Operation should be successful + 8. Operation should be successful + 9. Access log should contain internal info about operations of the user + """ + + topo = topology_st.standalone + + log.info('Restart the server to flush the logs') + topo.restart() + + # These comments contain lines we are trying to find without regex (the op numbers are just examples) + log.info("Check the access logs for ADD operation of the user") + # op=10 ADD dn="uid=test_user_777,ou=branch1,dc=example,dc=com" + assert not topo.ds_access_log.match(r'.*op=[0-9]+ ADD dn="uid=test_user_777,ou=branch1,dc=example,dc=com".*') + # (Internal) op=10(1)(1) MOD dn="cn=group,ou=Groups,dc=example,dc=com" + assert topo.ds_access_log.match(r'.*\(Internal\) op=[0-9]+\([0-9]+\)\([0-9]+\) ' + r'MOD dn="cn=group,ou=Groups,dc=example,dc=com".*') + # (Internal) op=10(1)(2) SRCH base="cn=group,ou=Groups,dc=example,dc=com" + assert topo.ds_access_log.match(r'.*\(Internal\) op=[0-9]+\([0-9]+\)\([0-9]+\) ' + r'SRCH base="cn=group,ou=Groups,dc=example,dc=com".*') + # (Internal) op=10(1)(2) RESULT err=0 tag=48 nentries=1*') + assert topo.ds_access_log.match(r'.*\(Internal\) op=[0-9]+\([0-9]+\)\([0-9]+\) RESULT err=0 tag=48 nentries=1*') + # (Internal) op=10(1)(1) RESULT err=0 tag=48 + assert topo.ds_access_log.match(r'.*\(Internal\) op=[0-9]+\([0-9]+\)\([0-9]+\) RESULT err=0 tag=48.*') + # op=10 RESULT err=0 tag=105 + assert not topo.ds_access_log.match(r'.*op=[0-9]+ RESULT err=0 tag=105.*') + + log.info("Check the access logs for MOD operation of the user") + # op=12 MODRDN dn="uid=test_user_777,ou=branch1,dc=example,dc=com" ' + # 'newrdn="uid=new_test_user_777" newsuperior="dc=example,dc=com" + assert not topo.ds_access_log.match(r'.*op=[0-9]+ MODRDN dn="uid=test_user_777,ou=branch1,dc=example,dc=com" ' + 'newrdn="uid=new_test_user_777" newsuperior="dc=example,dc=com".*') + if ds_is_older(('1.4.3.9', '1.4.4.3')): + # (Internal) op=12(1)(1) SRCH base="uid=test_user_777, ou=branch1,dc=example,dc=com" + assert topo.ds_access_log.match(r'.*\(Internal\) op=[0-9]+\([0-9]+\)\([0-9]+\) SRCH base="uid=test_user_777,' + 'ou=branch1,dc=example,dc=com".*') + # (Internal) op=12(1)(1) RESULT err=0 tag=48 nentries=1 + assert topo.ds_access_log.match(r'.*\(Internal\) op=[0-9]+\([0-9]+\)\([0-9]+\) RESULT err=0 tag=48 nentries=1.*') + # op=12 RESULT err=0 tag=109 + assert not topo.ds_access_log.match(r'.*op=[0-9]+ RESULT err=0 tag=109.*') + + log.info("Check the access logs for DEL operation of the user") + # op=15 DEL dn="uid=new_test_user_777,dc=example,dc=com" + assert not topo.ds_access_log.match(r'.*op=[0-9]+ DEL dn="uid=new_test_user_777,dc=example,dc=com".*') + if ds_is_older(('1.4.3.9', '1.4.4.3')): + # (Internal) op=15(1)(1) SRCH base="uid=new_test_user_777, dc=example,dc=com" + assert topo.ds_access_log.match(r'.*\(Internal\) op=[0-9]+\([0-9]+\)\([0-9]+\) SRCH base="uid=new_test_user_777,' + 'dc=example,dc=com".*') + # (Internal) op=15(1)(1) RESULT err=0 tag=48 nentries=1 + assert topo.ds_access_log.match(r'.*\(Internal\) op=[0-9]+\([0-9]+\)\([0-9]+\) RESULT err=0 tag=48 nentries=1.*') + # op=15 RESULT err=0 tag=107 + assert not topo.ds_access_log.match(r'.*op=[0-9]+ RESULT err=0 tag=107.*') + + log.info("Check if the other internal operations have the correct format") + # conn=Internal(0) op=0 + assert topo.ds_access_log.match(r'.*conn=Internal\([0-9]+\) op=[0-9]+\([0-9]+\)\([0-9]+\).*') + + +@pytest.mark.xfail(ds_is_older('1.4.0'), reason="May fail on 1.3.x because of bug 1358706") +@pytest.mark.bz1358706 +@pytest.mark.ds49029 +def test_internal_log_level_516(topology_st, add_user_log_level_516, disable_access_log_buffering): + """Tests client initiated operations when referential integrity plugin is enabled + + :id: bee1d681-763d-4fa5-aca2-569cf93f8b71 + :setup: Standalone instance + Configure access log level to - 512+4 + Set nsslapd-plugin-logging to on + :steps: + 1. Configure access log level to 516 + 2. Set nsslapd-plugin-logging to on + 3. Enable Referential Integrity and automember plugins + 4. Restart the server + 5. Add a test group + 6. Add a test user and add it as member of the test group + 7. Rename the test user + 8. Delete the test user + 9. Check the access logs for nested internal operation logs + :expectedresults: + 1. Operation should be successful + 2. Operation should be successful + 3. Operation should be successful + 4. Operation should be successful + 5. Operation should be successful + 6. Operation should be successful + 7. Operation should be successful + 8. Operation should be successful + 9. Access log should contain internal info about operations of the user + """ + + topo = topology_st.standalone + + log.info('Restart the server to flush the logs') + topo.restart() + + # These comments contain lines we are trying to find without regex (the op numbers are just examples) + log.info("Check the access logs for ADD operation of the user") + # op=10 ADD dn="uid=test_user_777,ou=branch1,dc=example,dc=com" + assert not topo.ds_access_log.match(r'.*op=[0-9]+ ADD dn="uid=test_user_777,ou=branch1,dc=example,dc=com".*') + # (Internal) op=10(1)(1) MOD dn="cn=group,ou=Groups,dc=example,dc=com" + assert topo.ds_access_log.match(r'.*\(Internal\) op=[0-9]+\([0-9]+\)\([0-9]+\) ' + r'MOD dn="cn=group,ou=Groups,dc=example,dc=com".*') + # (Internal) op=10(1)(2) SRCH base="cn=group,ou=Groups,dc=example,dc=com" + assert topo.ds_access_log.match(r'.*\(Internal\) op=[0-9]+\([0-9]+\)\([0-9]+\) ' + r'SRCH base="cn=group,ou=Groups,dc=example,dc=com".*') + # (Internal) op=10(1)(2) ENTRY dn="cn=group,ou=Groups,dc=example,dc=com" + assert topo.ds_access_log.match(r'.*\(Internal\) op=[0-9]+\([0-9]+\)\([0-9]+\) ' + r'ENTRY dn="cn=group,ou=Groups,dc=example,dc=com".*') + # (Internal) op=10(1)(2) RESULT err=0 tag=48 nentries=1*') + assert topo.ds_access_log.match(r'.*\(Internal\) op=[0-9]+\([0-9]+\)\([0-9]+\) RESULT err=0 tag=48 nentries=1*') + # (Internal) op=10(1)(1) RESULT err=0 tag=48 + assert topo.ds_access_log.match(r'.*\(Internal\) op=[0-9]+\([0-9]+\)\([0-9]+\) RESULT err=0 tag=48.*') + + log.info("Check the access logs for MOD operation of the user") + # op=12 MODRDN dn="uid=test_user_777,ou=branch1,dc=example,dc=com" ' + # 'newrdn="uid=new_test_user_777" newsuperior="dc=example,dc=com" + assert not topo.ds_access_log.match(r'.*op=[0-9]+ MODRDN dn="uid=test_user_777,ou=branch1,dc=example,dc=com" ' + 'newrdn="uid=new_test_user_777" newsuperior="dc=example,dc=com".*') + if ds_is_older(('1.4.3.9', '1.4.4.3')): + # Internal) op=12(1)(1) SRCH base="uid=test_user_777, ou=branch1,dc=example,dc=com" + assert topo.ds_access_log.match(r'.*\(Internal\) op=[0-9]+\([0-9]+\)\([0-9]+\) SRCH base="uid=test_user_777,' + 'ou=branch1,dc=example,dc=com".*') + # (Internal) op=12(1)(1) ENTRY dn="uid=test_user_777, ou=branch1,dc=example,dc=com" + assert topo.ds_access_log.match(r'.*\(Internal\) op=[0-9]+\([0-9]+\)\([0-9]+\) ENTRY dn="uid=test_user_777,' + 'ou=branch1,dc=example,dc=com".*') + # (Internal) op=12(1)(1) RESULT err=0 tag=48 nentries=1 + assert topo.ds_access_log.match(r'.*\(Internal\) op=[0-9]+\([0-9]+\)\([0-9]+\) RESULT err=0 tag=48 nentries=1.*') + # op=12 RESULT err=0 tag=48 + assert not topo.ds_access_log.match(r'.*op=[0-9]+ RESULT err=0 tag=48.*') + + log.info("Check the access logs for DEL operation of the user") + # op=15 DEL dn="uid=new_test_user_777,dc=example,dc=com" + assert not topo.ds_access_log.match(r'.*op=[0-9]+ DEL dn="uid=new_test_user_777,dc=example,dc=com".*') + if ds_is_older(('1.4.3.9', '1.4.4.3')): + # (Internal) op=15(1)(1) SRCH base="uid=new_test_user_777, dc=example,dc=com" + assert topo.ds_access_log.match(r'.*\(Internal\) op=[0-9]+\([0-9]+\)\([0-9]+\) SRCH base="uid=new_test_user_777,' + 'dc=example,dc=com".*') + # (Internal) op=15(1)(1) ENTRY dn="uid=new_test_user_777, dc=example,dc=com" + assert topo.ds_access_log.match(r'.*\(Internal\) op=[0-9]+\([0-9]+\)\([0-9]+\) ENTRY dn="uid=new_test_user_777,' + 'dc=example,dc=com".*') + # (Internal) op=15(1)(1) RESULT err=0 tag=48 nentries=1 + assert topo.ds_access_log.match(r'.*\(Internal\) op=[0-9]+\([0-9]+\)\([0-9]+\) RESULT err=0 tag=48 nentries=1.*') + # op=15 RESULT err=0 tag=107 + assert not topo.ds_access_log.match(r'.*op=[0-9]+ RESULT err=0 tag=107.*') + + log.info("Check if the other internal operations have the correct format") + # conn=Internal(0) op=0 + assert topo.ds_access_log.match(r'.*conn=Internal\([0-9]+\) op=[0-9]+\([0-9]+\)\([0-9]+\).*') + + +@pytest.mark.skipif(ds_is_older('1.4.2.0'), reason="Not implemented") +@pytest.mark.bz1358706 +@pytest.mark.ds49232 +def test_access_log_truncated_search_message(topology_st, clean_access_logs): + """Tests that the access log message is properly truncated when the message is too long + + :id: 0a9af37d-3311-4a2f-ac0a-9a1c631aaf27 + :setup: Standalone instance + :steps: + 1. Make a search with a 2048+ characters basedn, filter and attribute list + 2. Check the access log has the message and it's truncated + :expectedresults: + 1. Operation should be successful + 2. Access log should contain truncated basedn, filter and attribute list + """ + + topo = topology_st.standalone + + large_str_base = "".join("cn=test," for _ in range(512)) + large_str_filter = "".join("(cn=test)" for _ in range(512)) + users = UserAccounts(topo, f'{large_str_base}dc=ending') + users._list_attrlist = [f'cn{i}' for i in range(512)] + log.info("Make a search") + users.filter(f'(|(objectclass=tester){large_str_filter}(cn=ending))') + + log.info('Restart the server to flush the logs') + topo.restart() + + assert topo.ds_access_log.match(r'.*cn=test,cn=test,.*') + assert topo.ds_access_log.match(r'.*objectClass=tester.*') + assert topo.ds_access_log.match(r'.*cn10.*') + assert not topo.ds_access_log.match(r'.*dc=ending.*') + assert not topo.ds_access_log.match(r'.*cn=ending.*') + assert not topo.ds_access_log.match(r'.*cn500.*') + + +@pytest.mark.skipif(ds_is_newer("1.4.3"), reason="rsearch was removed") +@pytest.mark.xfail(ds_is_older('1.4.2.0'), reason="May fail because of bug 1732053") +@pytest.mark.bz1732053 +@pytest.mark.ds50510 +def test_etime_at_border_of_second(topology_st, clean_access_logs): + """Test that the etime reported in the access log doesn't contain wrong nsec value + + :id: 622be191-235b-4e1f-b581-2627fb10e494 + :setup: Standalone instance + :steps: + 1. Run rsearch + 2. Check access logs + :expectedresults: + 1. Success + 2. No etime with 0.199xxx (everything should be few ms) + """ + topo = topology_st.standalone + + prog = os.path.join(topo.ds_paths.bin_dir, 'rsearch') + + cmd = [prog] + + # base search + cmd.extend(['-s', DN_CONFIG]) + + # scope of the search + cmd.extend(['-S', '0']) + + # host / port + cmd.extend(['-h', HOST_STANDALONE]) + cmd.extend(['-p', str(PORT_STANDALONE)]) + + # bound as DM to make it faster + cmd.extend(['-D', DN_DM]) + cmd.extend(['-w', PASSWORD]) + + # filter + cmd.extend(['-f', "(cn=config)"]) + + # 2 samples SRCH + cmd.extend(['-C', "2"]) + + output = subprocess.check_output(cmd) + topo.stop() + + # No etime with 0.199xxx (everything should be few ms) + invalid_etime = topo.ds_access_log.match(r'.*etime=0\.19.*') + if invalid_etime: + for i in range(len(invalid_etime)): + log.error('It remains invalid or weird etime: %s' % invalid_etime[i]) + assert not invalid_etime + + +@pytest.mark.skipif(ds_is_older('1.3.10.1', '1.4.1'), reason="Fail because of bug 1749236") +@pytest.mark.bz1749236 +def test_etime_order_of_magnitude(topology_st, clean_access_logs, remove_users, disable_access_log_buffering): + """Test that the etime reported in the access log has a correct order of magnitude + + :id: e815cfa0-8136-4932-b50f-c3dfac34b0e6 + :setup: Standalone instance + :steps: + 1. Unset log buffering for the access log + 2. Delete potential existing access logs + 3. Add users + 4. Search users + 5. Restart the server to flush the logs + 6. Parse the access log looking for the SRCH operation log + 7. From the SRCH string get the start time and op number of the operation + 8. From the op num find the associated RESULT string in the access log + 9. From the RESULT string get the end time and the etime for the operation + 10. Calculate the ratio between the calculated elapsed time (end time - start time) and the logged etime + :expectedresults: + 1. access log buffering is off + 2. Previously existing access logs are deleted + 3. Users are successfully added + 4. Search operation is successful + 5. Server is restarted and logs are flushed + 6. SRCH operation log string is catched + 7. start time and op number are collected + 8. RESULT string is catched from the access log + 9. end time and etime are collected + 10. ratio between calculated elapsed time and logged etime is less or equal to 1 + """ + + DSLdapObject(topology_st.standalone, DEFAULT_SUFFIX) + + log.info('add_users') + add_users(topology_st.standalone, 30) + + log.info ('search users') + search_users(topology_st.standalone) + + log.info('parse the access logs to get the SRCH string') + # Here we are looking at the whole string logged for the search request with base ou=People,dc=example,dc=com + search_str = str(topology_st.standalone.ds_access_log.match(r'.*SRCH base="ou=People,dc=example,dc=com.*'))[1:-1] + assert len(search_str) > 0 + + # the search_str returned looks like : + # [23/Apr/2020:06:06:14.360857624 -0400] conn=1 op=93 SRCH base="ou=People,dc=example,dc=com" scope=2 filter="(&(objectClass=account)(objectClass=posixaccount)(objectClass=inetOrgPerson)(objectClass=organizationalPerson))" attrs="distinguishedName" + + log.info('get the operation start time from the SRCH string') + # Here we are getting the sec.nanosec part of the date, '14.360857624' in the example above + start_time = (search_str.split()[0]).split(':')[3] + + log.info('get the OP number from the SRCH string') + # Here we are getting the op number, 'op=93' in the above example + op_num = search_str.split()[3] + + log.info('get the RESULT string matching the SRCH OP number') + # Here we are looking at the RESULT string for the above search op, 'op=93' in this example + result_str = str(topology_st.standalone.ds_access_log.match(r'.*{} RESULT*'.format(op_num)))[1:-1] + assert len(result_str) > 0 + + # The result_str returned looks like : + # For ds older than 1.4.3.8: [23/Apr/2020:06:06:14.366429900 -0400] conn=1 op=93 RESULT err=0 tag=101 nentries=30 etime=0.005723017 + # For ds newer than 1.4.3.8: [21/Oct/2020:09:27:50.095209871 -0400] conn=1 op=96 RESULT err=0 tag=101 nentries=30 wtime=0.000412584 optime=0.005428971 etime=0.005836077 + + log.info('get the operation end time from the RESULT string') + # Here we are getting the sec.nanosec part of the date, '14.366429900' in the above example + end_time = (result_str.split()[0]).split(':')[3] + + log.info('get the logged etime for the operation from the RESULT string') + # Here we are getting the etime value, '0.005723017' in the example above + if ds_is_older('1.4.3.8'): + etime = result_str.split()[8].split('=')[1][:-3] + else: + etime = result_str.split()[10].split('=')[1][:-3] + + log.info('Calculate the ratio between logged etime for the operation and elapsed time from its start time to its end time - should be around 1') + etime_ratio = (Decimal(end_time) - Decimal(start_time)) // Decimal(etime) + assert etime_ratio <= 1 + + +@pytest.mark.skipif(ds_is_older('1.4.3.8'), reason="Fail because of bug 1850275") +@pytest.mark.bz1850275 +@pytest.mark.bz1924848 +def test_optime_and_wtime_keywords(topology_st, clean_access_logs, remove_users, disable_access_log_buffering): + """Test that the new optime and wtime keywords are present in the access log and have correct values + + :id: dfb4a49d-1cfc-400e-ba43-c107f58d62cf + :customerscenario: True + :setup: Standalone instance + :steps: + 1. Unset log buffering for the access log + 2. Delete potential existing access logs + 3. Add users + 4. Search users + 5. Parse the access log looking for the SRCH operation log + 6. From the SRCH string get the op number of the operation + 7. From the op num find the associated RESULT string in the access log + 8. Search for the wtime optime keywords in the RESULT string + 9. From the RESULT string get the wtime, optime and etime values for the operation + :expectedresults: + 1. access log buffering is off + 2. Previously existing access logs are deleted + 3. Users are successfully added + 4. Search operation is successful + 5. SRCH operation log string is catched + 6. op number is collected + 7. RESULT string is catched from the access log + 8. wtime and optime keywords are collected + 9. wtime, optime and etime values are collected + """ + + log.info('add_users') + add_users(topology_st.standalone, 30) + + log.info ('search users') + search_users(topology_st.standalone) + + log.info('parse the access logs to get the SRCH string') + # Here we are looking at the whole string logged for the search request with base ou=People,dc=example,dc=com + search_str = str(topology_st.standalone.ds_access_log.match(r'.*SRCH base="ou=People,dc=example,dc=com.*'))[1:-1] + assert len(search_str) > 0 + + # the search_str returned looks like : + # [22/Oct/2020:09:47:11.951316798 -0400] conn=1 op=96 SRCH base="ou=People,dc=example,dc=com" scope=2 filter="(&(objectClass=account)(objectClass=posixaccount)(objectClass=inetOrgPerson)(objectClass=organizationalPerson))" attrs="distinguishedName" + + log.info('get the OP number from the SRCH string') + # Here we are getting the op number, 'op=96' in the above example + op_num = search_str.split()[3] + + log.info('get the RESULT string matching the SRCH op number') + # Here we are looking at the RESULT string for the above search op, 'op=96' in this example + result_str = str(topology_st.standalone.ds_access_log.match(r'.*{} RESULT*'.format(op_num)))[1:-1] + assert len(result_str) > 0 + + # The result_str returned looks like : + # [22/Oct/2020:09:47:11.963276018 -0400] conn=1 op=96 RESULT err=0 tag=101 nentries=30 wtime=0.000180294 optime=0.011966632 etime=0.012141311 + log.info('Search for the wtime keyword in the RESULT string') + assert re.search('wtime', result_str) + + log.info('get the wtime value from the RESULT string') + wtime_value = result_str.split()[8].split('=')[1][:-3] + + log.info('Search for the optime keyword in the RESULT string') + assert re.search('optime', result_str) + + log.info('get the optime value from the RESULT string') + optime_value = result_str.split()[9].split('=')[1][:-3] + + log.info('get the etime value from the RESULT string') + etime_value = result_str.split()[10].split('=')[1][:-3] + + log.info('Perform a compare operation') + topology_st.standalone.compare_s('uid=testuser1000,ou=people,dc=example,dc=com','uid', 'testuser1000') + ops = topology_st.standalone.ds_access_log.match('.*CMP dn="uid=testuser1000,ou=people,dc=example,dc=com"') + + log.info('get the wtime and optime values from the RESULT string') + ops_value = topology_st.standalone.ds_access_log.parse_line(ops[0]) + value = topology_st.standalone.ds_access_log.match(f'.*op={ops_value["op"]} RESULT') + time_value = topology_st.standalone.ds_access_log.parse_line(value[0]) + wtime = time_value['rem'].split()[3].split('=')[1] + optime = time_value['rem'].split()[4].split('=')[1] + + log.info('Check that compare operation is not generating negative values for wtime and optime') + if (Decimal(wtime) > 0) and (Decimal(optime) > 0): + assert True + else: + log.info('wtime and optime values are negatives') + assert False + + +@pytest.mark.xfail(ds_is_older('1.3.10.1'), reason="May fail because of bug 1662461") +@pytest.mark.bz1662461 +@pytest.mark.ds50428 +@pytest.mark.ds49969 +def test_log_base_dn_when_invalid_attr_request(topology_st, disable_access_log_buffering): + """Test that DS correctly logs the base dn when a search with invalid attribute request is performed + + :id: 859de962-c261-4ffb-8705-97bceab1ba2c + :setup: Standalone instance + :steps: + 1. Disable the accesslog-logbuffering config parameter + 2. Delete the previous access log + 3. Perform a base search on the DEFAULT_SUFFIX, using ten empty attribute requests + 4. Check the access log file for 'invalid attribute request' + 5. Check the access log file for 'SRCH base="\(null\)"' + 6. Check the access log file for 'SRCH base="DEFAULT_SUFFIX"' + :expectedresults: + 1. Operations are visible in the access log in real time + 2. Fresh new access log is created + 3. The search operation raises a Protocol error + 4. The access log should have an 'invalid attribute request' message + 5. The access log should not have "\(null\)" as value for the Search base dn + 6. The access log should have the value of DEFAULT_SUFFIX as Search base dn + """ + + entry = DSLdapObject(topology_st.standalone, DEFAULT_SUFFIX) + + log.info('delete the previous access logs to get a fresh new one') + topology_st.standalone.deleteAccessLogs() + + log.info("Search the default suffix, with invalid '\"\" \"\"' attribute request") + log.info("A Protocol error exception should be raised, see https://github.com/389ds/389-ds-base/issues/3028") + # A ldap.PROTOCOL_ERROR exception is expected after 10 empty values + with pytest.raises(ldap.PROTOCOL_ERROR): + assert entry.get_attrs_vals_utf8(['', '', '', '', '', '', '', '', '', '', '']) + + # Search for appropriate messages in the access log + log.info('Check the access logs for correct messages') + # We should find the 'invalid attribute request' information + assert topology_st.standalone.ds_access_log.match(r'.*invalid attribute request.*') + # We should not find a "(null)" base dn mention + assert not topology_st.standalone.ds_access_log.match(r'.*SRCH base="\(null\)".*') + # We should find the base dn for the search + assert topology_st.standalone.ds_access_log.match(r'.*SRCH base="{}".*'.format(DEFAULT_SUFFIX)) + + +@pytest.mark.xfail(ds_is_older('1.3.8', '1.4.2'), reason="May fail because of bug 1676948") +@pytest.mark.bz1676948 +@pytest.mark.ds50536 +def test_audit_log_rotate_and_check_string(topology_st, clean_access_logs, set_audit_log_config_values_to_rotate): + """Version string should be logged only once at the top of audit log + after it is rotated. + + :id: 14dffb22-2f9c-11e9-8a03-54e1ad30572c + + :customerscenario: True + + :setup: Standalone instance + + :steps: + 1. Set nsslapd-auditlog-logging-enabled: on + 2. Set nsslapd-auditlog-maxlogsize: 1 + 3. Do modifications to the entry, until audit log file is rotated + 4. Check audit logs + + :expectedresults: + 1. Attribute nsslapd-auditlog-logging-enabled should be set to on + 2. Attribute nsslapd-auditlog-maxlogsize should be set to 1 + 3. Audit file should grow till 1MB and then should be rotated + 4. Audit file log should contain version string only once at the top + """ + + standalone = topology_st.standalone + search_ds = '389-Directory' + + users = UserAccounts(standalone, DEFAULT_SUFFIX) + user = users.create(properties={ + 'uid': 'test_audit_log', + 'cn': 'test', + 'sn': 'user', + 'uidNumber': '1000', + 'gidNumber': '1000', + 'homeDirectory': '/home/test', + }) + + log.info('Doing modifications to rotate audit log') + audit_log = standalone.ds_paths.audit_log + while len(glob.glob(audit_log + '*')) == 2: + user.replace('description', 'test'*100) + + log.info('Doing one more modification just in case') + user.replace('description', 'test2'*100) + + standalone.stop() + + count = 0 + with open(audit_log) as f: + log.info('Check that DS string is present on first line') + assert search_ds in f.readline() + f.seek(0) + + log.info('Check that DS string is present only once') + for line in f.readlines(): + if search_ds in line: + count += 1 + assert count == 1 + + +def test_enable_external_libs_debug_log(topology_st): + """Check that OpenLDAP logs are successfully enabled and disabled + + :id: b04646e3-9a5e-45ae-ad81-2882c1daf23e + :setup: Standalone instance + :steps: 1. Create a user to bind on + 2. Set nsslapd-external-libs-debug-enabled to "on" + 3. Clean the error log + 4. Bind as the user to generate OpenLDAP output + 5. Restart the servers to flush the logs + 6. Check the error log for OpenLDAP debug log + 7. Set nsslapd-external-libs-debug-enabled to "on" + 8. Clean the error log + 9. Bind as the user to generate OpenLDAP output + 10. Restart the servers to flush the logs + 11. Check the error log for OpenLDAP debug log + :expectedresults: 1. Success + 2. Success + 3. Success + 4. Success + 5. Success + 6. Logs are present + 7. Success + 8. Success + 9. Success + 10. Success + 11. No logs are present + """ + + standalone = topology_st.standalone + + log.info('Create a user to bind on') + users = UserAccounts(standalone, DEFAULT_SUFFIX) + user = users.ensure_state(properties={ + 'uid': 'test_audit_log', + 'cn': 'test', + 'sn': 'user', + 'uidNumber': '1000', + 'gidNumber': '1000', + 'homeDirectory': '/home/test', + 'userPassword': PASSWORD + }) + + log.info('Set nsslapd-external-libs-debug-enabled to "on"') + standalone.config.set('nsslapd-external-libs-debug-enabled', 'on') + + log.info('Clean the error log') + standalone.deleteErrorLogs() + + log.info('Bind as the user to generate OpenLDAP output') + user.bind(PASSWORD) + + log.info('Restart the servers to flush the logs') + standalone.restart() + + log.info('Check the error log for OpenLDAP debug log') + assert standalone.ds_error_log.match('.*libldap/libber.*') + + log.info('Set nsslapd-external-libs-debug-enabled to "off"') + standalone.config.set('nsslapd-external-libs-debug-enabled', 'off') + + log.info('Clean the error log') + standalone.deleteErrorLogs() + + log.info('Bind as the user to generate OpenLDAP output') + user.bind(PASSWORD) + + log.info('Restart the servers to flush the logs') + standalone.restart() + + log.info('Check the error log for OpenLDAP debug log') + assert not standalone.ds_error_log.match('.*libldap/libber.*') + + +@pytest.mark.skipif(ds_is_older('1.4.3'), reason="Might fail because of bug 1895460") +@pytest.mark.bz1895460 +@pytest.mark.ds4593 +def test_cert_personality_log_help(topology_st, request): + """Test changing the nsSSLPersonalitySSL attribute will raise help message in log + + :id: d6f17f64-d784-438e-89b6-8595bdf6defb + :customerscenario: True + :setup: Standalone + :steps: + 1. Create instance + 2. Change nsSSLPersonalitySSL to wrong certificate nickname + 3. Check there is a help message in error log + :expectedresults: + 1. Success + 2. Success + 3. Success + """ + + WRONG_NICK = 'otherNick' + standalone = topology_st.standalone + standalone.enable_tls() + + log.info('Change nsSSLPersonalitySSL to wrong certificate nickname') + config_RSA = RSA(standalone) + config_RSA.set('nsSSLPersonalitySSL', WRONG_NICK) + + with pytest.raises(subprocess.CalledProcessError): + standalone.restart() + + assert standalone.ds_error_log.match(r".*Please, make sure that nsSSLPersonalitySSL value " + r"is correctly set to the certificate from NSS database " + r"\(currently, nsSSLPersonalitySSL attribute " + r"is set to '{}'\)\..*".format(WRONG_NICK)) + def fin(): + log.info('Restore certificate nickname') + dse_ldif = DSEldif(standalone) + dse_ldif.replace("cn=RSA,cn=encryption,cn=config", "nsSSLPersonalitySSL", "Server-Cert") + + request.addfinalizer(fin) + +def test_stat_index(topology_st, request): + """Testing nsslapd-statlog-level with indexing statistics + + :id: fcabab05-f000-468c-8eb4-02ce3c39c902 + :setup: Standalone instance + :steps: + 1. Check that nsslapd-statlog-level is 0 (default) + 2. Create 20 users with 'cn' starting with 'user\_' + 3. Check there is no statistic record in the access log with ADD + 4. Check there is no statistic record in the access log with SRCH + 5. Set nsslapd-statlog-level=LDAP_STAT_READ_INDEX (0x1) to get + statistics when reading indexes + 6. Check there is statistic records in access log with SRCH + :expectedresults: + 1. This should pass + 2. This should pass + 3. This should pass + 4. This should pass + 5. This should pass + 6. This should pass + """ + topology_st.standalone.start() + + # Step 1 + log.info("Assert nsslapd-statlog-level is by default 0") + assert topology_st.standalone.config.get_attr_val_int("nsslapd-statlog-level") == 0 + + # Step 2 + users = UserAccounts(topology_st.standalone, DEFAULT_SUFFIX) + users_set = [] + log.info('Adding 20 users') + for i in range(20): + name = 'user_%d' % i + last_user = users.create(properties={ + 'uid': name, + 'sn': name, + 'cn': name, + 'uidNumber': '1000', + 'gidNumber': '1000', + 'homeDirectory': '/home/%s' % name, + 'mail': '%s@example.com' % name, + 'userpassword': 'pass%s' % name, + }) + users_set.append(last_user) + + # Step 3 + assert not topology_st.standalone.ds_access_log.match('.*STAT read index.*') + + # Step 4 + entries = topology_st.standalone.search_s(DEFAULT_SUFFIX, ldap.SCOPE_SUBTREE, "cn=user_*") + assert not topology_st.standalone.ds_access_log.match('.*STAT read index.*') + + # Step 5 + log.info("Set nsslapd-statlog-level: 1 to enable indexing statistics") + topology_st.standalone.config.set("nsslapd-statlog-level", "1") + + # Step 6 + entries = topology_st.standalone.search_s(DEFAULT_SUFFIX, ldap.SCOPE_SUBTREE, "cn=user_*") + topology_st.standalone.stop() + assert topology_st.standalone.ds_access_log.match('.*STAT read index.*') + assert topology_st.standalone.ds_access_log.match('.*STAT read index: attribute.*') + assert topology_st.standalone.ds_access_log.match('.*STAT read index: duration.*') + topology_st.standalone.start() + + def fin(): + log.info('Deleting users') + for user in users_set: + user.delete() + topology_st.standalone.config.set("nsslapd-statlog-level", "0") + + request.addfinalizer(fin) + +def test_stat_internal_op(topology_st, request): + """Check that statistics can also be collected for internal operations + + :id: 19f393bd-5866-425a-af7a-4dade06d5c77 + :setup: Standalone Instance + :steps: + 1. Check that nsslapd-statlog-level is 0 (default) + 2. Enable memberof plugins + 3. Create a user + 4. Remove access log (to only detect new records) + 5. Enable statistic logging nsslapd-statlog-level=1 + 6. Check that on direct SRCH there is no 'Internal' Stat records + 7. Remove access log (to only detect new records) + 8. Add group with the user, so memberof triggers internal search + and check it exists 'Internal' Stat records + :expectedresults: + 1. Success + 2. Success + 3. Success + 4. Success + 5. Success + 6. Success + 7. Success + 8. Success + """ + + inst = topology_st.standalone + + # Step 1 + log.info("Assert nsslapd-statlog-level is by default 0") + assert topology_st.standalone.config.get_attr_val_int("nsslapd-statlog-level") == 0 + + # Step 2 + memberof = MemberOfPlugin(inst) + memberof.enable() + inst.restart() + + # Step 3 Add setup entries + users = UserAccounts(inst, DEFAULT_SUFFIX, rdn=None) + user = users.create(properties={'uid': 'test_1', + 'cn': 'test_1', + 'sn': 'test_1', + 'description': 'member', + 'uidNumber': '1000', + 'gidNumber': '2000', + 'homeDirectory': '/home/testuser'}) + # Step 4 reset accesslog + topology_st.standalone.stop() + lpath = topology_st.standalone.ds_access_log._get_log_path() + os.unlink(lpath) + topology_st.standalone.start() + + # Step 5 enable statistics + log.info("Set nsslapd-statlog-level: 1 to enable indexing statistics") + topology_st.standalone.config.set("nsslapd-statlog-level", "1") + + # Step 6 for direct SRCH only non internal STAT records + entries = topology_st.standalone.search_s(DEFAULT_SUFFIX, ldap.SCOPE_SUBTREE, "uid=test_1") + topology_st.standalone.stop() + assert topology_st.standalone.ds_access_log.match('.*STAT read index.*') + assert topology_st.standalone.ds_access_log.match('.*STAT read index: attribute.*') + assert topology_st.standalone.ds_access_log.match('.*STAT read index: duration.*') + assert not topology_st.standalone.ds_access_log.match('.*Internal.*STAT.*') + topology_st.standalone.start() + + # Step 7 reset accesslog + topology_st.standalone.stop() + lpath = topology_st.standalone.ds_access_log._get_log_path() + os.unlink(lpath) + topology_st.standalone.start() + + # Step 8 trigger internal searches and check internal stat records + groups = Groups(inst, DEFAULT_SUFFIX, rdn=None) + group = groups.create(properties={'cn': 'mygroup', + 'member': 'uid=test_1,%s' % DEFAULT_SUFFIX, + 'description': 'group'}) + topology_st.standalone.restart() + assert topology_st.standalone.ds_access_log.match('.*Internal.*STAT read index.*') + assert topology_st.standalone.ds_access_log.match('.*Internal.*STAT read index: attribute.*') + assert topology_st.standalone.ds_access_log.match('.*Internal.*STAT read index: duration.*') + + def fin(): + log.info('Deleting user/group') + user.delete() + group.delete() + + request.addfinalizer(fin) + +def test_referral_check(topology_st, request): + """Check that referral detection mechanism works + + :id: ff9b4247-d1fd-4edc-ba74-6ad61e65c0a4 + :setup: Standalone Instance + :steps: + 1. Set nsslapd-referral-check-period=7 to accelerate test + 2. Add a test entry + 3. Remove error log file + 4. Check that no referral entry exist + 5. Create a referral entry + 6. Check that the server detects the referral + 7. Delete the referral entry + 8. Check that the server detects the deletion of the referral + :expectedresults: + 1. Success + 2. Success + 3. Success + 4. Success + 5. Success + 6. Success + 7. Success + 8. Success + """ + + inst = topology_st.standalone + + # Step 1 reduce nsslapd-referral-check-period to accelerate test + REFERRAL_CHECK=7 + topology_st.standalone.config.set("nsslapd-referral-check-period", str(REFERRAL_CHECK)) + topology_st.standalone.restart() + + # Step 2 Add a test entry + users = UserAccounts(inst, DEFAULT_SUFFIX, rdn=None) + user = users.create(properties={'uid': 'test_1', + 'cn': 'test_1', + 'sn': 'test_1', + 'description': 'member', + 'uidNumber': '1000', + 'gidNumber': '2000', + 'homeDirectory': '/home/testuser'}) + + # Step 3 Remove error log file + topology_st.standalone.stop() + lpath = topology_st.standalone.ds_error_log._get_log_path() + os.unlink(lpath) + topology_st.standalone.start() + + # Step 4 Check that no referral entry is found (on regular deployment) + entries = topology_st.standalone.search_s(DEFAULT_SUFFIX, ldap.SCOPE_SUBTREE, "uid=test_1") + time.sleep(REFERRAL_CHECK + 1) + assert not topology_st.standalone.ds_error_log.match('.*slapd_daemon - New referral entries are detected.*') + + # Step 5 Create a referral entry + REFERRAL_DN = "cn=my_ref,%s" % DEFAULT_SUFFIX + properties = ({'cn': 'my_ref', + 'uid': 'my_ref', + 'sn': 'my_ref', + 'uidNumber': '1000', + 'gidNumber': '2000', + 'homeDirectory': '/home/testuser', + 'description': 'referral entry', + 'objectclass': "top referral extensibleObject".split(), + 'ref': 'ref: ldap://remote/%s' % REFERRAL_DN}) + referral = UserAccount(inst, REFERRAL_DN) + referral.create(properties=properties) + + # Step 6 Check that the server detected the referral + time.sleep(REFERRAL_CHECK + 1) + assert topology_st.standalone.ds_error_log.match('.*slapd_daemon - New referral entries are detected under %s.*' % DEFAULT_SUFFIX) + assert not topology_st.standalone.ds_error_log.match('.*slapd_daemon - No more referral entry under %s' % DEFAULT_SUFFIX) + + # Step 7 Delete the referral entry + referral.delete() + + # Step 8 Check that the server detected the deletion of the referral + time.sleep(REFERRAL_CHECK + 1) + assert topology_st.standalone.ds_error_log.match('.*slapd_daemon - No more referral entry under %s' % DEFAULT_SUFFIX) + + def fin(): + log.info('Deleting user/referral') + try: + user.delete() + referral.delete() + except: + pass + + request.addfinalizer(fin) + +def test_referral_subsuffix(topology_st, request): + """Test the results of an inverted parent suffix definition in the configuration. + + For more details see: + https://www.port389.org/docs/389ds/design/mapping_tree_assembly.html + + :id: 4faf210a-4fde-4e4f-8834-865bdc8f4d37 + :setup: Standalone instance + :steps: + 1. First create two Backends, without mapping trees. + 2. create the mapping trees for these backends + 3. reduce nsslapd-referral-check-period to accelerate test + 4. Remove error log file + 5. Create a referral entry on parent suffix + 6. Check that the server detected the referral + 7. Delete the referral entry + 8. Check that the server detected the deletion of the referral + 9. Remove error log file + 10. Create a referral entry on child suffix + 11. Check that the server detected the referral on both parent and child suffixes + 12. Delete the referral entry + 13. Check that the server detected the deletion of the referral on both parent and child suffixes + 14. Remove error log file + 15. Create a referral entry on parent suffix + 16. Check that the server detected the referral on both parent and child suffixes + 17. Delete the child referral entry + 18. Check that the server detected the deletion of the referral on child suffix but not on parent suffix + 19. Delete the parent referral entry + 20. Check that the server detected the deletion of the referral parent suffix + + :expectedresults: + all steps succeeds + """ + inst = topology_st.standalone + # Step 1 First create two Backends, without mapping trees. + PARENT_SUFFIX='dc=parent,dc=com' + CHILD_SUFFIX='dc=child,%s' % PARENT_SUFFIX + be1 = create_backend(inst, 'Parent', PARENT_SUFFIX) + be2 = create_backend(inst, 'Child', CHILD_SUFFIX) + # Step 2 create the mapping trees for these backends + mts = MappingTrees(inst) + mt1 = mts.create(properties={ + 'cn': PARENT_SUFFIX, + 'nsslapd-state': 'backend', + 'nsslapd-backend': 'Parent', + }) + mt2 = mts.create(properties={ + 'cn': CHILD_SUFFIX, + 'nsslapd-state': 'backend', + 'nsslapd-backend': 'Child', + 'nsslapd-parent-suffix': PARENT_SUFFIX, + }) + + dc_ex = Domain(inst, dn=PARENT_SUFFIX) + assert dc_ex.exists() + + dc_st = Domain(inst, dn=CHILD_SUFFIX) + assert dc_st.exists() + + # Step 3 reduce nsslapd-referral-check-period to accelerate test + # requires a restart done on step 4 + REFERRAL_CHECK=7 + topology_st.standalone.config.set("nsslapd-referral-check-period", str(REFERRAL_CHECK)) + + # Check that if we create a referral at parent level + # - referral is detected at parent backend + # - referral is not detected at child backend + + # Step 3 Remove error log file + topology_st.standalone.stop() + lpath = topology_st.standalone.ds_error_log._get_log_path() + os.unlink(lpath) + topology_st.standalone.start() + + # Step 4 Create a referral entry on parent suffix + REFERRAL_DN = "cn=my_ref,%s" % PARENT_SUFFIX + properties = ({'cn': 'my_ref', + 'uid': 'my_ref', + 'sn': 'my_ref', + 'uidNumber': '1000', + 'gidNumber': '2000', + 'homeDirectory': '/home/testuser', + 'description': 'referral entry', + 'objectclass': "top referral extensibleObject".split(), + 'ref': 'ref: ldap://remote/%s' % REFERRAL_DN}) + referral = UserAccount(inst, REFERRAL_DN) + referral.create(properties=properties) + + # Step 5 Check that the server detected the referral + time.sleep(REFERRAL_CHECK + 1) + assert topology_st.standalone.ds_error_log.match('.*slapd_daemon - New referral entries are detected under %s.*' % PARENT_SUFFIX) + assert not topology_st.standalone.ds_error_log.match('.*slapd_daemon - New referral entries are detected under %s.*' % CHILD_SUFFIX) + assert not topology_st.standalone.ds_error_log.match('.*slapd_daemon - No more referral entry under %s' % PARENT_SUFFIX) + + # Step 6 Delete the referral entry + referral.delete() + + # Step 7 Check that the server detected the deletion of the referral + time.sleep(REFERRAL_CHECK + 1) + assert topology_st.standalone.ds_error_log.match('.*slapd_daemon - No more referral entry under %s' % PARENT_SUFFIX) + + # Check that if we create a referral at child level + # - referral is detected at parent backend + # - referral is detected at child backend + + # Step 8 Remove error log file + topology_st.standalone.stop() + lpath = topology_st.standalone.ds_error_log._get_log_path() + os.unlink(lpath) + topology_st.standalone.start() + + # Step 9 Create a referral entry on child suffix + REFERRAL_DN = "cn=my_ref,%s" % CHILD_SUFFIX + properties = ({'cn': 'my_ref', + 'uid': 'my_ref', + 'sn': 'my_ref', + 'uidNumber': '1000', + 'gidNumber': '2000', + 'homeDirectory': '/home/testuser', + 'description': 'referral entry', + 'objectclass': "top referral extensibleObject".split(), + 'ref': 'ref: ldap://remote/%s' % REFERRAL_DN}) + referral = UserAccount(inst, REFERRAL_DN) + referral.create(properties=properties) + + # Step 10 Check that the server detected the referral on both parent and child suffixes + time.sleep(REFERRAL_CHECK + 1) + assert topology_st.standalone.ds_error_log.match('.*slapd_daemon - New referral entries are detected under %s.*' % PARENT_SUFFIX) + assert topology_st.standalone.ds_error_log.match('.*slapd_daemon - New referral entries are detected under %s.*' % CHILD_SUFFIX) + assert not topology_st.standalone.ds_error_log.match('.*slapd_daemon - No more referral entry under %s' % CHILD_SUFFIX) + + # Step 11 Delete the referral entry + referral.delete() + + # Step 12 Check that the server detected the deletion of the referral on both parent and child suffixes + time.sleep(REFERRAL_CHECK + 1) + assert topology_st.standalone.ds_error_log.match('.*slapd_daemon - No more referral entry under %s' % PARENT_SUFFIX) + assert topology_st.standalone.ds_error_log.match('.*slapd_daemon - No more referral entry under %s' % CHILD_SUFFIX) + + # Check that if we create a referral at child level and parent level + # - referral is detected at parent backend + # - referral is detected at child backend + + # Step 13 Remove error log file + topology_st.standalone.stop() + lpath = topology_st.standalone.ds_error_log._get_log_path() + os.unlink(lpath) + topology_st.standalone.start() + + # Step 14 Create a referral entry on parent suffix + # Create a referral entry on child suffix + REFERRAL_DN = "cn=my_ref,%s" % PARENT_SUFFIX + properties = ({'cn': 'my_ref', + 'uid': 'my_ref', + 'sn': 'my_ref', + 'uidNumber': '1000', + 'gidNumber': '2000', + 'homeDirectory': '/home/testuser', + 'description': 'referral entry', + 'objectclass': "top referral extensibleObject".split(), + 'ref': 'ref: ldap://remote/%s' % REFERRAL_DN}) + referral = UserAccount(inst, REFERRAL_DN) + referral.create(properties=properties) + REFERRAL_DN = "cn=my_ref,%s" % CHILD_SUFFIX + properties = ({'cn': 'my_ref', + 'uid': 'my_ref', + 'sn': 'my_ref', + 'uidNumber': '1000', + 'gidNumber': '2000', + 'homeDirectory': '/home/testuser', + 'description': 'referral entry', + 'objectclass': "top referral extensibleObject".split(), + 'ref': 'ref: ldap://remote/%s' % REFERRAL_DN}) + referral = UserAccount(inst, REFERRAL_DN) + referral.create(properties=properties) + + # Step 15 Check that the server detected the referral on both parent and child suffixes + time.sleep(REFERRAL_CHECK + 1) + assert topology_st.standalone.ds_error_log.match('.*slapd_daemon - New referral entries are detected under %s.*' % PARENT_SUFFIX) + assert topology_st.standalone.ds_error_log.match('.*slapd_daemon - New referral entries are detected under %s.*' % CHILD_SUFFIX) + assert not topology_st.standalone.ds_error_log.match('.*slapd_daemon - No more referral entry under %s' % CHILD_SUFFIX) + + # Step 16 Delete the child referral entry + REFERRAL_DN = "cn=my_ref,%s" % CHILD_SUFFIX + referral = UserAccount(inst, REFERRAL_DN) + referral.delete() + + # Step 17 Check that the server detected the deletion of the referral on child suffix but not on parent suffix + time.sleep(REFERRAL_CHECK + 1) + assert topology_st.standalone.ds_error_log.match('.*slapd_daemon - No more referral entry under %s' % CHILD_SUFFIX) + assert not topology_st.standalone.ds_error_log.match('.*slapd_daemon - No more referral entry under %s' % PARENT_SUFFIX) + + # Step 18 Delete the parent referral entry + REFERRAL_DN = "cn=my_ref,%s" % PARENT_SUFFIX + referral = UserAccount(inst, REFERRAL_DN) + referral.delete() + + # Step 19 Check that the server detected the deletion of the referral parent suffix + time.sleep(REFERRAL_CHECK + 1) + assert topology_st.standalone.ds_error_log.match('.*slapd_daemon - No more referral entry under %s' % PARENT_SUFFIX) + + def fin(): + log.info('Deleting referral') + try: + REFERRAL_DN = "cn=my_ref,%s" % PARENT_SUFFIX + referral = UserAccount(inst, REFERRAL_DN) + referral.delete() + REFERRAL_DN = "cn=my_ref,%s" % CHILD_SUFFIX + referral = UserAccount(inst, REFERRAL_DN) + referral.delete() + except: + pass + + request.addfinalizer(fin) + +def test_missing_backend_suffix(topology_st, request): + """Test that the server does not crash if a backend has no suffix + + :id: 427c9780-4875-4a94-a3e4-afa11be7d1a9 + :setup: Standalone instance + :steps: + 1. Stop the instance + 2. remove 'nsslapd-suffix' from the backend (userRoot) + 3. start the instance + 4. Check it started successfully with SRCH on rootDSE + :expectedresults: + all steps succeeds + """ + topology_st.standalone.stop() + dse_ldif = topology_st.standalone.confdir + '/dse.ldif' + shutil.copy(dse_ldif, dse_ldif + '.correct') + os.system('sed -e "/nsslapd-suffix/d" %s > %s' % (dse_ldif + '.correct', dse_ldif)) + topology_st.standalone.start() + rdse = RootDSE(topology_st.standalone) + + def fin(): + log.info('Restore dse.ldif') + topology_st.standalone.stop() + shutil.copy(dse_ldif + '.correct', dse_ldif) + + request.addfinalizer(fin) + +if __name__ == '__main__': + # Run isolated + # -s for DEBUG mode + CURRENT_FILE = os.path.realpath(__file__) + pytest.main("-s %s" % CURRENT_FILE) diff --git a/dirsrvtests/tests/suites/ds_logs/regression_test.py b/dirsrvtests/tests/suites/ds_logs/regression_test.py new file mode 100644 index 0000000..e2de21a --- /dev/null +++ b/dirsrvtests/tests/suites/ds_logs/regression_test.py @@ -0,0 +1,79 @@ +# --- BEGIN COPYRIGHT BLOCK --- +# Copyright (C) 2017 Red Hat, Inc. +# All rights reserved. +# +# License: GPL (version 3 or any later version). +# See LICENSE for details. +# --- END COPYRIGHT BLOCK --- +# + +import pytest +from lib389.dseldif import DSEldif +from lib389._constants import DN_CONFIG, LOG_REPLICA, LOG_DEFAULT, LOG_TRACE, LOG_ACL +from lib389.utils import os, logging +from lib389.topologies import topology_st as topo + +pytestmark = pytest.mark.tier2 + +DEBUGGING = os.getenv("DEBUGGING", default=False) +if DEBUGGING: + logging.getLogger(__name__).setLevel(logging.DEBUG) +else: + logging.getLogger(__name__).setLevel(logging.INFO) +log = logging.getLogger(__name__) + + +@pytest.mark.bz1460718 +@pytest.mark.parametrize("log_level", [(LOG_REPLICA + LOG_DEFAULT), (LOG_ACL + LOG_DEFAULT), (LOG_TRACE + LOG_DEFAULT)]) +def test_default_loglevel_stripped(topo, log_level): + """The default log level 16384 is stripped from the log level returned to a client + + :id: c300f8f1-aa11-4621-b124-e2be51930a6b + :parametrized: yes + :setup: Standalone instance + + :steps: 1. Change the error log level to the default and custom value. + 2. Check if the server returns the new value. + + :expectedresults: + 1. Changing the error log level should be successful. + 2. Server should return the new log level. + """ + + assert topo.standalone.config.set('nsslapd-errorlog-level', str(log_level)) + assert topo.standalone.config.get_attr_val_int('nsslapd-errorlog-level') == log_level + + +@pytest.mark.bz1460718 +def test_dse_config_loglevel_error(topo): + """Manually setting nsslapd-errorlog-level to 64 in dse.ldif throws error + + :id: 0eeefa17-ec1c-4208-8e7b-44d8fbc38f10 + + :setup: Standalone instance + + :steps: 1. Stop the server, edit dse.ldif file and change nsslapd-errorlog-level value to 64 + 2. Start the server and observe the error logs. + + :expectedresults: + 1. Server should be successfully stopped and nsslapd-errorlog-level value should be changed. + 2. Server should be successfully started without any errors being reported in the logs. + """ + + topo.standalone.stop(timeout=10) + dse_ldif = DSEldif(topo.standalone) + try: + dse_ldif.replace(DN_CONFIG, 'nsslapd-errorlog-level', 64) + except: + log.error('Failed to replace cn=config values of nsslapd-errorlog-level') + raise + topo.standalone.start(timeout=10) + assert not topo.standalone.ds_error_log.match( + '.*nsslapd-errorlog-level: ignoring 64 \\(since -d 266354688 was given on the command line\\).*') + + +if __name__ == '__main__': + # Run isolated + # -s for DEBUG mode + CURRENT_FILE = os.path.realpath(__file__) + pytest.main("-s %s" % CURRENT_FILE) diff --git a/dirsrvtests/tests/suites/ds_tools/__init__.py b/dirsrvtests/tests/suites/ds_tools/__init__.py new file mode 100644 index 0000000..10d3805 --- /dev/null +++ b/dirsrvtests/tests/suites/ds_tools/__init__.py @@ -0,0 +1,4 @@ + +""" + :Requirement: 389-ds-base: Directory Server Tools +""" diff --git a/dirsrvtests/tests/suites/ds_tools/logpipe_test.py b/dirsrvtests/tests/suites/ds_tools/logpipe_test.py new file mode 100644 index 0000000..4f0e65d --- /dev/null +++ b/dirsrvtests/tests/suites/ds_tools/logpipe_test.py @@ -0,0 +1,78 @@ +# --- BEGIN COPYRIGHT BLOCK --- +# Copyright (C) 2017 Red Hat, Inc. +# All rights reserved. +# +# License: GPL (version 3 or any later version). +# See LICENSE for details. +# --- END COPYRIGHT BLOCK --- +# +import pytest +import subprocess +from lib389.utils import * +from lib389.topologies import topology_st as topo + +pytestmark = pytest.mark.tier1 + +DEBUGGING = os.getenv("DEBUGGING", default=False) +if DEBUGGING: + logging.getLogger(__name__).setLevel(logging.DEBUG) +else: + logging.getLogger(__name__).setLevel(logging.INFO) +log = logging.getLogger(__name__) + +SYS_TEST_USER = 'dirsrv_testuser' + + +@pytest.fixture(scope="module") +def sys_test_user(request): + """Creates and deletes a system test user""" + + cmd = ['/usr/sbin/useradd', SYS_TEST_USER] + + log.info('Add system test user - {}'.format(SYS_TEST_USER)) + try: + subprocess.call(cmd) + except subprocess.CalledProcessError as e: + log.exception('Failed to add user {} error {}'.format(SYS_TEST_USER, e.output)) + + def fin(): + cmd = ['/usr/sbin/userdel', SYS_TEST_USER] + + log.info('Delete system test user - {}'.format(SYS_TEST_USER)) + try: + subprocess.call(cmd) + except subprocess.CalledProcessError as e: + log.exception('Failed to delete user {} error {}'.format(SYS_TEST_USER, e.output)) + + request.addfinalizer(fin) + + +def test_user_permissions(topo, sys_test_user): + """Check permissions for usual user operations in log dir + + :id: 4e423cd5-300c-4df0-ab40-aec7e51c3be8 + :feature: ds-logpipe + :setup: Standalone instance + :steps: 1. Add a new user to the system + 2. Try to create a logpipe in the log directory with '-u' option specifying the user + 3. Delete the user + :expectedresults: Permission denied error happens + """ + + ds_logpipe_path = os.path.join(topo.standalone.ds_paths.bin_dir, 'ds-logpipe.py') + fakelogpipe_path = os.path.join(topo.standalone.ds_paths.log_dir, 'fakelog.pipe') + + # I think we need to add a function for this to lib389, when we will port the full test suite + cmd = [ds_logpipe_path, fakelogpipe_path, '-u', SYS_TEST_USER] + + log.info('Try to create a logpipe in the log directory with "-u" option specifying the user') + with pytest.raises(subprocess.CalledProcessError) as cp: + result = subprocess.check_output(cmd) + assert 'Permission denied' in result + + +if __name__ == '__main__': + # Run isolated + # -s for DEBUG mode + CURRENT_FILE = os.path.realpath(__file__) + pytest.main("-s %s" % CURRENT_FILE) diff --git a/dirsrvtests/tests/suites/ds_tools/replcheck_test.py b/dirsrvtests/tests/suites/ds_tools/replcheck_test.py new file mode 100644 index 0000000..f061386 --- /dev/null +++ b/dirsrvtests/tests/suites/ds_tools/replcheck_test.py @@ -0,0 +1,554 @@ +# --- BEGIN COPYRIGHT BLOCK --- +# Copyright (C) 2018 Red Hat, Inc. +# All rights reserved. +# +# License: GPL (version 3 or any later version). +# See LICENSE for details. +# --- END COPYRIGHT BLOCK --- +# +import pytest +import subprocess +from lib389.utils import * +from lib389.replica import Replicas, Replica, ReplicationManager +from lib389._constants import * +from lib389.config import CertmapLegacy +from lib389.idm.nscontainer import nsContainers +from lib389.idm.user import UserAccounts, TEST_USER_PROPERTIES +from lib389.idm.services import ServiceAccounts +from lib389.topologies import topology_m2 as topo + +pytestmark = pytest.mark.tier1 + +DEBUGGING = os.getenv("DEBUGGING", default=False) +if DEBUGGING: + logging.getLogger(__name__).setLevel(logging.DEBUG) +else: + logging.getLogger(__name__).setLevel(logging.INFO) +log = logging.getLogger(__name__) + + +def _create_container(inst, dn, name): + """Creates container entry""" + + conts = nsContainers(inst, dn) + cont = conts.create(properties={'cn': name}) + time.sleep(1) + return cont + + +def _delete_container(cont): + """Deletes container entry""" + + cont.delete() + time.sleep(1) + + +@pytest.fixture(scope="module") +def topo_tls_ldapi(topo): + """Enable TLS on both suppliers and reconfigure both agreements + to use TLS Client auth. Also, setup ldapi and export DB + """ + + m1 = topo.ms["supplier1"] + m2 = topo.ms["supplier2"] + # Create the certmap before we restart for enable_tls + cm_m1 = CertmapLegacy(m1) + cm_m2 = CertmapLegacy(m2) + + # We need to configure the same maps for both .... + certmaps = cm_m1.list() + certmaps['default']['DNComps'] = None + certmaps['default']['CmapLdapAttr'] = 'nsCertSubjectDN' + + cm_m1.set(certmaps) + cm_m2.set(certmaps) + + [i.enable_tls() for i in topo] + + # Create the replication dns + services = ServiceAccounts(m1, DEFAULT_SUFFIX) + repl_m1 = services.get('%s:%s' % (m1.host, m1.sslport)) + repl_m1.set('nsCertSubjectDN', m1.get_server_tls_subject()) + + repl_m2 = services.get('%s:%s' % (m2.host, m2.sslport)) + repl_m2.set('nsCertSubjectDN', m2.get_server_tls_subject()) + + # Check the replication is "done". + repl = ReplicationManager(DEFAULT_SUFFIX) + repl.wait_for_replication(m1, m2) + # Now change the auth type + + replica_m1 = Replicas(m1).get(DEFAULT_SUFFIX) + agmt_m1 = replica_m1.get_agreements().list()[0] + + agmt_m1.replace_many( + ('nsDS5ReplicaBindMethod', 'SSLCLIENTAUTH'), + ('nsDS5ReplicaTransportInfo', 'SSL'), + ('nsDS5ReplicaPort', '%s' % m2.sslport), + ) + agmt_m1.remove_all('nsDS5ReplicaBindDN') + + replica_m2 = Replicas(m2).get(DEFAULT_SUFFIX) + agmt_m2 = replica_m2.get_agreements().list()[0] + + agmt_m2.replace_many( + ('nsDS5ReplicaBindMethod', 'SSLCLIENTAUTH'), + ('nsDS5ReplicaTransportInfo', 'SSL'), + ('nsDS5ReplicaPort', '%s' % m1.sslport), + ) + agmt_m2.remove_all('nsDS5ReplicaBindDN') + + log.info("Export LDAPTLS_CACERTDIR env variable for ds-replcheck") + os.environ["LDAPTLS_CACERTDIR"] = m1.get_ssca_dir() + + for inst in topo: + inst.config.set('nsslapd-ldapilisten', 'on') + inst.config.set('nsslapd-ldapifilepath', '/var/run/slapd-{}.socket'.format(inst.serverid)) + inst.restart() + + repl.test_replication(m1, m2) + repl.test_replication(m2, m1) + + return topo + + +def replcheck_cmd_list(topo_tls_ldapi): + """Check ds-replcheck tool through ldap, ldaps, ldap with StartTLS, ldapi + and compare exported ldif files + """ + + m1 = topo_tls_ldapi.ms["supplier1"] + m2 = topo_tls_ldapi.ms["supplier2"] + + for inst in topo_tls_ldapi: + inst.stop() + inst.db2ldif(bename=DEFAULT_BENAME, suffixes=[DEFAULT_SUFFIX], excludeSuffixes=[], + encrypt=False, repl_data=True, outputfile='/tmp/export_{}.ldif'.format(inst.serverid)) + inst.start() + + ds_replcheck_path = os.path.join(m1.ds_paths.bin_dir, 'ds-replcheck') + + if ds_is_newer("1.4.1.2"): + replcheck_cmd = [[ds_replcheck_path, 'online', '-b', DEFAULT_SUFFIX, '-D', DN_DM, '-w', PW_DM, '-l', '1', + '-m', 'ldap://{}:{}'.format(m1.host, m1.port), '--conflicts', + '-r', 'ldap://{}:{}'.format(m2.host, m2.port)], + [ds_replcheck_path, 'online', '-b', DEFAULT_SUFFIX, '-D', DN_DM, '-w', PW_DM, '-l', '1', + '-m', 'ldaps://{}:{}'.format(m1.host, m1.sslport), '--conflicts', + '-r', 'ldaps://{}:{}'.format(m2.host, m2.sslport)], + [ds_replcheck_path, 'online', '-b', DEFAULT_SUFFIX, '-D', DN_DM, '-w', PW_DM, '-l', '1', + '-m', 'ldap://{}:{}'.format(m1.host, m1.port), '-Z', m1.get_ssca_dir(), + '-r', 'ldap://{}:{}'.format(m2.host, m2.port), '--conflicts'], + [ds_replcheck_path, 'online', '-b', DEFAULT_SUFFIX, '-D', DN_DM, '-w', PW_DM, '-l', '1', + '-m', 'ldapi://%2fvar%2frun%2fslapd-{}.socket'.format(m1.serverid), '--conflict', + '-r', 'ldapi://%2fvar%2frun%2fslapd-{}.socket'.format(m2.serverid)], + [ds_replcheck_path, 'offline', '-b', DEFAULT_SUFFIX, '--conflicts', '--rid', '1', + '-m', '/tmp/export_{}.ldif'.format(m1.serverid), + '-r', '/tmp/export_{}.ldif'.format(m2.serverid)]] + else: + replcheck_cmd = [[ds_replcheck_path, '-b', DEFAULT_SUFFIX, '-D', DN_DM, '-w', PW_DM, '-l', '1', + '-m', 'ldap://{}:{}'.format(m1.host, m1.port), '--conflicts', + '-r', 'ldap://{}:{}'.format(m2.host, m2.port)], + [ds_replcheck_path, '-b', DEFAULT_SUFFIX, '-D', DN_DM, '-w', PW_DM, '-l', '1', + '-m', 'ldaps://{}:{}'.format(m1.host, m1.sslport), '--conflicts', + '-r', 'ldaps://{}:{}'.format(m2.host, m2.sslport)], + [ds_replcheck_path, '-b', DEFAULT_SUFFIX, '-D', DN_DM, '-w', PW_DM, '-l', '1', + '-m', 'ldap://{}:{}'.format(m1.host, m1.port), '-Z', m1.get_ssca_dir(), + '-r', 'ldap://{}:{}'.format(m2.host, m2.port), '--conflicts'], + [ds_replcheck_path, '-b', DEFAULT_SUFFIX, '-D', DN_DM, '-w', PW_DM, '-l', '1', + '-m', 'ldapi://%2fvar%2frun%2fslapd-{}.socket'.format(m1.serverid), '--conflict', + '-r', 'ldapi://%2fvar%2frun%2fslapd-{}.socket'.format(m2.serverid)], + [ds_replcheck_path, '-b', DEFAULT_SUFFIX, '-D', DN_DM, '-w', PW_DM, '--conflicts', + '-M', '/tmp/export_{}.ldif'.format(m1.serverid), + '-R', '/tmp/export_{}.ldif'.format(m2.serverid)]] + + return replcheck_cmd + +@pytest.mark.skipif(ds_is_older("1.4.1.2"), reason="Not implemented") +def test_state(topo_tls_ldapi): + """Check "state" report + + :id: 1cc6b28b-8a42-45fb-ab50-9552db0ac178 + :customerscenario: True + :setup: Two supplier replication + :steps: + 1. Get the replication state value + 2. The state value is as expected + :expectedresults: + 1. It should be successful + 2. It should be successful + """ + m1 = topo_tls_ldapi.ms["supplier1"] + m2 = topo_tls_ldapi.ms["supplier2"] + ds_replcheck_path = os.path.join(m1.ds_paths.bin_dir, 'ds-replcheck') + + tool_cmd = [ds_replcheck_path, 'state', '-b', DEFAULT_SUFFIX, '-D', DN_DM, '-w', PW_DM, + '-m', 'ldaps://{}:{}'.format(m1.host, m1.sslport), + '-r', 'ldaps://{}:{}'.format(m2.host, m2.sslport)] + result = subprocess.check_output(tool_cmd, encoding='utf-8') + assert (result.rstrip() == "Replication State: Supplier and Replica are in perfect synchronization") + + +def test_check_ruv(topo_tls_ldapi): + """Check that the report has RUV + + :id: 1cc6b28b-8a42-45fb-ab50-9552db0ac179 + :customerscenario: True + :setup: Two supplier replication + :steps: + 1. Get RUV from supplier and replica + 2. Generate the report + 3. Check that the RUV is mentioned in the report + :expectedresults: + 1. It should be successful + 2. It should be successful + 3. The RUV should be mentioned in the report + """ + + m1 = topo_tls_ldapi.ms["supplier1"] + + replicas_m1 = Replica(m1, DEFAULT_SUFFIX) + ruv_entries = replicas_m1.get_attr_vals_utf8('nsds50ruv') + + for tool_cmd in replcheck_cmd_list(topo_tls_ldapi): + result = subprocess.check_output(tool_cmd, encoding='utf-8') + assert all([ruv_entry in result for ruv_entry in ruv_entries]) + + +def test_missing_entries(topo_tls_ldapi): + """Check that the report has missing entries + + :id: f91b6798-6e6e-420a-ad2f-3222bb908b7d + :customerscenario: True + :setup: Two supplier replication + :steps: + 1. Pause replication between supplier and replica + 2. Add two entries to supplier and two entries to replica + 3. Generate the report + 4. Check that the entries DN are mentioned in the report + :expectedresults: + 1. It should be successful + 2. It should be successful + 3. It should be successful + 4. The entries DN should be mentioned in the report + """ + + m1 = topo_tls_ldapi.ms["supplier1"] + m2 = topo_tls_ldapi.ms["supplier2"] + + try: + topo_tls_ldapi.pause_all_replicas() + users_m1 = UserAccounts(m1, DEFAULT_SUFFIX) + user0 = users_m1.create_test_user(1000) + user1 = users_m1.create_test_user(1001) + users_m2 = UserAccounts(m2, DEFAULT_SUFFIX) + user2 = users_m2.create_test_user(1002) + user3 = users_m2.create_test_user(1003) + + for tool_cmd in replcheck_cmd_list(topo_tls_ldapi): + result = subprocess.check_output(tool_cmd, encoding='utf-8').lower() + assert user0.dn.lower() in result + assert user1.dn.lower() in result + finally: + user0.delete() + user1.delete() + user2.delete() + user3.delete() + topo_tls_ldapi.resume_all_replicas() + + +def test_tombstones(topo_tls_ldapi): + """Check that the report mentions right number of tombstones + + :id: bd27de78-0046-431c-8240-a93052df1cdc + :customerscenario: True + :setup: Two supplier replication + :steps: + 1. Add an entry to supplier and wait for replication + 2. Pause replication between supplier and replica + 3. Delete the entry from supplier + 4. Generate the report + 5. Check that we have different number of tombstones in the report + :expectedresults: + 1. It should be successful + 2. It should be successful + 3. It should be successful + 4. It should be successful + 5. It should be successful + """ + + m1 = topo_tls_ldapi.ms["supplier1"] + + try: + users_m1 = UserAccounts(m1, DEFAULT_SUFFIX) + user_m1 = users_m1.create(properties=TEST_USER_PROPERTIES) + time.sleep(1) + topo_tls_ldapi.pause_all_replicas() + user_m1.delete() + time.sleep(2) + + for tool_cmd in replcheck_cmd_list(topo_tls_ldapi): + result = subprocess.check_output(tool_cmd, encoding='utf-8').lower() + log.debug(result) + finally: + topo_tls_ldapi.resume_all_replicas() + + +def test_conflict_entries(topo_tls_ldapi): + """Check that the report has conflict entries + + :id: 4eda0c5d-0824-4cfd-896e-845faf49ddaf + :customerscenario: True + :setup: Two supplier replication + :steps: + 1. Pause replication between supplier and replica + 2. Add two entries to supplier and two entries to replica + 3. Delete first entry from supplier + 4. Add a child to the first entry + 5. Resume replication between supplier and replica + 6. Generate the report + 7. Check that the entries DN are mentioned in the report + :expectedresults: + 1. It should be successful + 2. It should be successful + 3. It should be successful + 4. It should be successful + 5. It should be successful + 6. It should be successful + 7. The entries DN should be mentioned in the report + """ + + m1 = topo_tls_ldapi.ms["supplier1"] + m2 = topo_tls_ldapi.ms["supplier2"] + + topo_tls_ldapi.pause_all_replicas() + + _create_container(m1, DEFAULT_SUFFIX, 'conflict_parent0') + _create_container(m2, DEFAULT_SUFFIX, 'conflict_parent0') + cont_p_m1 = _create_container(m1, DEFAULT_SUFFIX, 'conflict_parent1') + cont_p_m2 = _create_container(m2, DEFAULT_SUFFIX, 'conflict_parent1') + _delete_container(cont_p_m1) + _create_container(m2, cont_p_m2.dn, 'conflict_child0') + + topo_tls_ldapi.resume_all_replicas() + time.sleep(5) + + for tool_cmd in replcheck_cmd_list(topo_tls_ldapi): + result = subprocess.check_output(tool_cmd, encoding='utf-8') + assert 'conflict_parent1' in result + + +def test_inconsistencies(topo_tls_ldapi): + """Check that the report mentions inconsistencies with attributes + + :id: c8fe3e84-b346-4969-8f5d-3462b643a1d2 + :customerscenario: True + :setup: Two supplier replication + :steps: + 1. Add an entry to supplier and wait for replication + 2. Pause replication between supplier and replica + 3. Set different description attr values to supplier and replica + 4. Add telephoneNumber attribute to supplier and not to replica + 5. Generate the report + 6. Check that attribute values are mentioned in the report + 7. Generate the report with -i option to ignore some attributes + 8. Check that attribute values are mentioned in the report + :expectedresults: + 1. It should be successful + 2. It should be successful + 3. It should be successful + 4. It should be successful + 5. It should be successful + 6. The attribute values should be mentioned in the report + 7. It should be successful + 8. The attribute values should not be mentioned in the report + """ + + m1 = topo_tls_ldapi.ms["supplier1"] + m2 = topo_tls_ldapi.ms["supplier2"] + attr_m1 = "m1_inconsistency" + attr_m2 = "m2_inconsistency" + attr_first = "first ordered valued" + attr_second = "second ordered valued" + attr_m1_only = "123123123" + + try: + users_m1 = UserAccounts(m1, DEFAULT_SUFFIX) + users_m2 = UserAccounts(m2, DEFAULT_SUFFIX) + user_m1 = users_m1.create(properties=TEST_USER_PROPERTIES) + time.sleep(1) + user_m2 = users_m2.get(user_m1.rdn) + topo_tls_ldapi.pause_all_replicas() + user_m1.set("description", attr_m1) + user_m2.set("description", attr_m2) + user_m1.set("telephonenumber", attr_m1_only) + # Add the same multi-valued attrs, but out of order + user_m1.set("cn", [attr_first, attr_second]) + user_m2.set("cn", [attr_second, attr_first]) + time.sleep(2) + + for tool_cmd in replcheck_cmd_list(topo_tls_ldapi): + result = subprocess.check_output(tool_cmd, encoding='utf-8').lower() + assert attr_m1 in result + assert attr_m2 in result + assert attr_m1_only in result + if ds_is_newer("1.3.9.1", "1.4.1.2"): + assert attr_first not in result + assert attr_second not in result + # Ignore some attributes and check the output + tool_cmd.extend(['-i', '{},{}'.format('description', 'telephonenumber')]) + result = subprocess.check_output(tool_cmd, encoding='utf-8').lower() + assert attr_m1 not in result + assert attr_m2 not in result + assert attr_m1_only not in result + if ds_is_newer("1.3.9.1", "1.4.1.2"): + assert attr_first not in result + assert attr_second not in result + + finally: + topo_tls_ldapi.resume_all_replicas() + user_m1.delete() + + +def test_suffix_exists(topo_tls_ldapi): + """Check if wrong suffix is provided, server is giving Error: Failed + to validate suffix. + + :id: ce75debc-c07f-4e72-8787-8f99cbfaf1e2 + :customerscenario: True + :setup: Two supplier replication + :steps: + 1. Run ds-replcheck with wrong suffix (Non Existing) + :expectedresults: + 1. It should be unsuccessful + """ + m1 = topo_tls_ldapi.ms["supplier1"] + m2 = topo_tls_ldapi.ms["supplier2"] + ds_replcheck_path = os.path.join(m1.ds_paths.bin_dir, 'ds-replcheck') + + if ds_is_newer("1.4.1.2"): + tool_cmd = [ds_replcheck_path, 'online', '-b', 'dc=test,dc=com', '-D', DN_DM, '-w', PW_DM, + '-m', 'ldaps://{}:{}'.format(m1.host, m1.sslport), + '-r', 'ldaps://{}:{}'.format(m2.host, m2.sslport)] + else: + tool_cmd = [ds_replcheck_path, '-b', 'dc=test,dc=com', '-D', DN_DM, '-w', PW_DM, + '-m', 'ldaps://{}:{}'.format(m1.host, m1.sslport), + '-r', 'ldaps://{}:{}'.format(m2.host, m2.sslport)] + + result1 = subprocess.Popen(tool_cmd, stdout=subprocess.PIPE, stderr=subprocess.STDOUT, encoding='utf-8') + result = result1.communicate() + assert "Failed to validate suffix" in result[0] + + +def test_check_missing_tombstones(topo_tls_ldapi): + """Check missing tombstone entries is not reported. + + :id: 93067a5a-416e-4243-9418-c4dfcf42e093 + :customerscenario: True + :setup: Two supplier replication + :steps: + 1. Pause replication between supplier and replica + 2. Add and delete an entry on the supplier + 3. Run ds-replcheck + 4. Verify there are NO complaints about missing entries/tombstones + :expectedresults: + 1. It should be successful + 2. It should be successful + 3. It should be successful + 4. It should be successful + """ + m1 = topo_tls_ldapi.ms["supplier1"] + m2 = topo_tls_ldapi.ms["supplier2"] + + try: + topo_tls_ldapi.pause_all_replicas() + users_m1 = UserAccounts(m1, DEFAULT_SUFFIX) + user0 = users_m1.create_test_user(1000) + user0.delete() + for tool_cmd in replcheck_cmd_list(topo_tls_ldapi): + result = subprocess.check_output(tool_cmd, encoding='utf-8').lower() + assert "entries missing on replica" not in result + + finally: + topo_tls_ldapi.resume_all_replicas() + + +def test_dsreplcheck_with_password_file(topo_tls_ldapi, tmpdir): + """Check ds-replcheck works if password file is provided + with -y option. + + :id: 0d847ec7-6eaf-4cb5-a9c6-e4a5a1778f93 + :customerscenario: True + :setup: Two supplier replication + :steps: + 1. Create a password file with the default password of the server. + 2. Run ds-replcheck with -y option (used to pass password file) + :expectedresults: + 1. It should be successful + 2. It should be successful + """ + m1 = topo_tls_ldapi.ms["supplier1"] + m2 = topo_tls_ldapi.ms["supplier2"] + + ds_replcheck_path = os.path.join(m1.ds_paths.bin_dir, 'ds-replcheck') + f = tmpdir.mkdir("my_dir").join("password_file.txt") + f.write(PW_DM) + + if ds_is_newer("1.4.1.2"): + tool_cmd = [ds_replcheck_path, 'online', '-b', DEFAULT_SUFFIX, '-D', DN_DM, '-y', f.strpath, + '-m', 'ldaps://{}:{}'.format(m1.host, m1.sslport), + '-r', 'ldaps://{}:{}'.format(m2.host, m2.sslport)] + else: + tool_cmd = [ds_replcheck_path, '-b', DEFAULT_SUFFIX, '-D', DN_DM, '-y', f.strpath, + '-m', 'ldaps://{}:{}'.format(m1.host, m1.sslport), + '-r', 'ldaps://{}:{}'.format(m2.host, m2.sslport)] + + subprocess.Popen(tool_cmd, stdout=subprocess.PIPE, stderr=subprocess.STDOUT, encoding='utf-8') + + +@pytest.mark.ds51102 +@pytest.mark.bz1836428 +@pytest.mark.skipif(ds_is_older('1.4.1'), reason='Not implemented') +def test_dsreplcheck_timeout_connection_mechanisms(topo_tls_ldapi): + """Check that ds-replcheck timeout option works with various connection mechanisms + + :id: aeeb99c9-09e2-45dc-bd75-9f95409babe7 + :customerscenario: True + :setup: Two supplier replication + :steps: + 1. Create two suppliers with various connection mechanisms configured + 2. Run ds-replcheck with -t option + :expectedresults: + 1. Success + 2. Success + """ + + OUTPUT = 'Supplier and Replica are in perfect synchronization' + + m1 = topo_tls_ldapi.ms["supplier1"] + m2 = topo_tls_ldapi.ms["supplier2"] + + ds_replcheck_path = os.path.join(m1.ds_paths.bin_dir, 'ds-replcheck') + + replcheck_cmd = [[ds_replcheck_path, 'online', '-b', DEFAULT_SUFFIX, '-D', DN_DM, '-w', PW_DM, '-l', '1', + '-m', 'ldap://{}:{}'.format(m1.host, m1.port), '--conflicts', + '-r', 'ldap://{}:{}'.format(m2.host, m2.port), '-t', '120'], + [ds_replcheck_path, 'online', '-b', DEFAULT_SUFFIX, '-D', DN_DM, '-w', PW_DM, '-l', '1', + '-m', 'ldaps://{}:{}'.format(m1.host, m1.sslport), '--conflicts', + '-r', 'ldaps://{}:{}'.format(m2.host, m2.sslport), '-t', '120'], + [ds_replcheck_path, 'online', '-b', DEFAULT_SUFFIX, '-D', DN_DM, '-w', PW_DM, '-l', '1', + '-m', 'ldap://{}:{}'.format(m1.host, m1.port), '-Z', m1.get_ssca_dir(), + '-r', 'ldap://{}:{}'.format(m2.host, m2.port), '--conflicts', '-t', '120'], + [ds_replcheck_path, 'online', '-b', DEFAULT_SUFFIX, '-D', DN_DM, '-w', PW_DM, '-l', '1', + '-m', 'ldapi://%2fvar%2frun%2fslapd-{}.socket'.format(m1.serverid), '--conflict', + '-r', 'ldapi://%2fvar%2frun%2fslapd-{}.socket'.format(m2.serverid), '-t', '120']] + + log.info('Run ds-replcheck with -t option') + for connection in replcheck_cmd: + result = subprocess.check_output(connection) + assert OUTPUT in ensure_str(result) + + +if __name__ == '__main__': + # Run isolated + # -s for DEBUG mode + CURRENT_FILE = os.path.realpath(__file__) diff --git a/dirsrvtests/tests/suites/dynamic_plugins/__init__.py b/dirsrvtests/tests/suites/dynamic_plugins/__init__.py new file mode 100644 index 0000000..8041ca2 --- /dev/null +++ b/dirsrvtests/tests/suites/dynamic_plugins/__init__.py @@ -0,0 +1,3 @@ +""" + :Requirement: 389-ds-base: Dynamic Plugins +""" diff --git a/dirsrvtests/tests/suites/dynamic_plugins/dynamic_plugins_test.py b/dirsrvtests/tests/suites/dynamic_plugins/dynamic_plugins_test.py new file mode 100644 index 0000000..7558cc0 --- /dev/null +++ b/dirsrvtests/tests/suites/dynamic_plugins/dynamic_plugins_test.py @@ -0,0 +1,445 @@ +# --- BEGIN COPYRIGHT BLOCK --- +# Copyright (C) 2016 Red Hat, Inc. +# All rights reserved. +# +# License: GPL (version 3 or any later version). +# See LICENSE for details. +# --- END COPYRIGHT BLOCK --- +# +''' +Created on Dec 09, 2014 + +@author: mreynolds +''' +import logging + +import ldap.sasl +import pytest +from lib389.tasks import * +from lib389.replica import ReplicationManager +from lib389.config import LDBMConfig +from lib389._constants import * +from lib389.topologies import topology_m2 +from ..plugins import acceptance_test +from . import stress_tests + +pytestmark = pytest.mark.tier1 + +log = logging.getLogger(__name__) + + +def check_replicas(topology_m2): + """Check that replication is in sync and working""" + + m1 = topology_m2.ms["supplier1"] + m2 = topology_m2.ms["supplier2"] + + log.info('Checking if replication is in sync...') + repl = ReplicationManager(DEFAULT_SUFFIX) + repl.test_replication_topology(topology_m2) + # + # Verify the databases are identical. There should not be any "user, entry, employee" entries + # + log.info('Checking if the data is the same between the replicas...') + + # Check the supplier + try: + entries = m1.search_s(DEFAULT_SUFFIX, + ldap.SCOPE_SUBTREE, + "(|(uid=person*)(uid=entry*)(uid=employee*))") + if len(entries) > 0: + log.error('Supplier database has incorrect data set!\n') + assert False + except ldap.LDAPError as e: + log.fatal('Unable to search db on supplier: ' + e.message['desc']) + assert False + + # Check the consumer + try: + entries = m2.search_s(DEFAULT_SUFFIX, + ldap.SCOPE_SUBTREE, + "(|(uid=person*)(uid=entry*)(uid=employee*))") + if len(entries) > 0: + log.error('Consumer database in not consistent with supplier database') + assert False + except ldap.LDAPError as e: + log.fatal('Unable to search db on consumer: ' + e.message['desc']) + assert False + + log.info('Data is consistent across the replicas.\n') + +#unstable or unstatus tests, skipped for now +@pytest.mark.flaky(max_runs=2, min_passes=1) +def test_acceptance(topology_m2): + """Exercise each plugin and its main features, while + changing the configuration without restarting the server. + + :id: 96136538-0151-4b09-9933-0e0cbf2c786c + :setup: 2 Supplier Instances + :steps: + 1. Pause all replication + 2. Set nsslapd-dynamic-plugins to on + 3. Try to update LDBM config entry + 4. Go through all plugin basic functionality + 5. Resume replication + 6. Go through all plugin basic functionality again + 7. Check that data in sync and replication is working + :expectedresults: + 1. Success + 2. Success + 3. Success + 4. Success + 5. Success + 6. Success + 7. Success + """ + + m1 = topology_m2.ms["supplier1"] + msg = ' (no replication)' + replication_run = False + + # First part of the test should be without replication + topology_m2.pause_all_replicas() + + # First enable dynamic plugins + m1.config.replace('nsslapd-dynamic-plugins', 'on') + + # Test that critical plugins can be updated even though the change might not be applied + ldbm_config = LDBMConfig(m1) + ldbm_config.replace('description', 'test') + + while True: + # First run the tests with replication disabled, then rerun them with replication set up + + ############################################################################ + # Test plugin functionality + ############################################################################ + + log.info('####################################################################') + log.info('Testing Dynamic Plugins Functionality' + msg + '...') + log.info('####################################################################\n') + + acceptance_test.check_all_plugins(topology_m2) + + log.info('####################################################################') + log.info('Successfully Tested Dynamic Plugins Functionality' + msg + '.') + log.info('####################################################################\n') + + if replication_run: + # We're done. + break + else: + log.info('Resume replication and run everything one more time') + topology_m2.resume_all_replicas() + + replication_run = True + msg = ' (replication enabled)' + time.sleep(1) + + ############################################################################ + # Check replication, and data are in sync + ############################################################################ + check_replicas(topology_m2) + +#unstable or unstatus tests, skipped for now +@pytest.mark.flaky(max_runs=2, min_passes=1) +def test_memory_corruption(topology_m2): + """Check the plugins for memory corruption issues while + dynamic plugins option is enabled + + :id: 96136538-0151-4b09-9933-0e0cbf2c7862 + :setup: 2 Supplier Instances + :steps: + 1. Pause all replication + 2. Set nsslapd-dynamic-plugins to on + 3. Try to update LDBM config entry + 4. Restart the plugin many times in a linked list fashion + restarting previous and preprevious plugins in the list of all plugins + 5. Run the functional test + 6. Repeat 4 and 5 steps for all plugins + 7. Resume replication + 8. Go through 4-6 steps once more + 9. Check that data in sync and replication is working + :expectedresults: + 1. Success + 2. Success + 3. Success + 4. Success + 5. Success + 6. Success + 7. Success + 8. Success + 9. Success + """ + + + m1 = topology_m2.ms["supplier1"] + msg = ' (no replication)' + replication_run = False + + # First part of the test should be without replication + topology_m2.pause_all_replicas() + + # First enable dynamic plugins + m1.config.replace('nsslapd-dynamic-plugins', 'on') + + # Test that critical plugins can be updated even though the change might not be applied + ldbm_config = LDBMConfig(m1) + ldbm_config.replace('description', 'test') + + while True: + # First run the tests with replication disabled, then rerun them with replication set up + + ############################################################################ + # Test the stability by exercising the internal lists, callabcks, and task handlers + ############################################################################ + + log.info('####################################################################') + log.info('Testing Dynamic Plugins for Memory Corruption' + msg + '...') + log.info('####################################################################\n') + prev_plugin_test = None + prev_prev_plugin_test = None + + for plugin_test in acceptance_test.func_tests: + # + # Restart the plugin several times (and prev plugins) - work that linked list + # + plugin_test(topology_m2, "restart") + + if prev_prev_plugin_test: + prev_prev_plugin_test(topology_m2, "restart") + + plugin_test(topology_m2, "restart") + + if prev_plugin_test: + prev_plugin_test(topology_m2, "restart") + + plugin_test(topology_m2, "restart") + + # Now run the functional test + plugin_test(topology_m2, "dynamic") + + # Set the previous tests + if prev_plugin_test: + prev_prev_plugin_test = prev_plugin_test + prev_plugin_test = plugin_test + + log.info('####################################################################') + log.info('Successfully Tested Dynamic Plugins for Memory Corruption' + msg + '.') + log.info('####################################################################\n') + + if replication_run: + # We're done. + break + else: + log.info('Resume replication and run everything one more time') + topology_m2.resume_all_replicas() + + replication_run = True + msg = ' (replication enabled)' + time.sleep(1) + + ############################################################################ + # Check replication, and data are in sync + ############################################################################ + check_replicas(topology_m2) + +#unstable or unstatus tests, skipped for now +@pytest.mark.flaky(max_runs=2, min_passes=1) +@pytest.mark.tier2 +def test_stress(topology_m2): + """Test plugins while under a big load. Perform the test 5 times + + :id: 96136538-0151-4b09-9933-0e0cbf2c7863 + :setup: 2 Supplier Instances + :steps: + 1. Pause all replication + 2. Set nsslapd-dynamic-plugins to on + 3. Try to update LDBM config entry + 4. Do one run through all tests + 5. Enable Referential integrity and MemberOf plugins + 6. Launch three new threads to add a bunch of users + 7. While we are adding users restart the MemberOf and + Linked Attributes plugins many times + 8. Wait for the 'adding' threads to complete + 9. Now launch three threads to delete the users + 10. Restart both the MemberOf, Referential integrity and + Linked Attributes plugins during these deletes + 11. Wait for the 'deleting' threads to complete + 12. Now make sure both the MemberOf and Referential integrity plugins still work correctly + 13. Cleanup the stress tests (delete the group entry) + 14. Perform 4-13 steps five times + 15. Resume replication + 16. Go through 4-14 steps once more + 17. Check that data in sync and replication is working + :expectedresults: + 1. Success + 2. Success + 3. Success + 4. Success + 5. Success + 6. Success + 7. Success + 8. Success + 9. Success + 10. Success + 11. Success + 12. Success + 13. Success + 14. Success + 15. Success + 16. Success + 17. Success + """ + + m1 = topology_m2.ms["supplier1"] + msg = ' (no replication)' + replication_run = False + stress_max_runs = 5 + + # First part of the test should be without replication + topology_m2.pause_all_replicas() + + # First enable dynamic plugins + m1.config.replace('nsslapd-dynamic-plugins', 'on') + + # Test that critical plugins can be updated even though the change might not be applied + ldbm_config = LDBMConfig(m1) + ldbm_config.replace('description', 'test') + + while True: + # First run the tests with replication disabled, then rerun them with replication set up + + log.info('Do one run through all tests ' + msg + '...') + acceptance_test.check_all_plugins(topology_m2) + + log.info('####################################################################') + log.info('Stressing Dynamic Plugins' + msg + '...') + log.info('####################################################################\n') + + stress_tests.configureMO(m1) + stress_tests.configureRI(m1) + + stress_count = 0 + while stress_count < stress_max_runs: + log.info('####################################################################') + log.info('Running stress test' + msg + '. Run (%d/%d)...' % (stress_count + 1, stress_max_runs)) + log.info('####################################################################\n') + + # Launch three new threads to add a bunch of users + add_users = stress_tests.AddUsers(m1, 'employee', True) + add_users.start() + add_users2 = stress_tests.AddUsers(m1, 'entry', True) + add_users2.start() + add_users3 = stress_tests.AddUsers(m1, 'person', True) + add_users3.start() + time.sleep(1) + + # While we are adding users restart the MO plugin and an idle plugin + m1.plugins.disable(name=PLUGIN_MEMBER_OF) + m1.plugins.enable(name=PLUGIN_MEMBER_OF) + time.sleep(1) + m1.plugins.disable(name=PLUGIN_MEMBER_OF) + time.sleep(1) + m1.plugins.enable(name=PLUGIN_MEMBER_OF) + m1.plugins.disable(name=PLUGIN_LINKED_ATTRS) + m1.plugins.enable(name=PLUGIN_LINKED_ATTRS) + time.sleep(1) + m1.plugins.disable(name=PLUGIN_MEMBER_OF) + m1.plugins.enable(name=PLUGIN_MEMBER_OF) + time.sleep(2) + m1.plugins.disable(name=PLUGIN_MEMBER_OF) + time.sleep(1) + m1.plugins.enable(name=PLUGIN_MEMBER_OF) + m1.plugins.disable(name=PLUGIN_LINKED_ATTRS) + m1.plugins.enable(name=PLUGIN_LINKED_ATTRS) + m1.plugins.disable(name=PLUGIN_MEMBER_OF) + time.sleep(1) + m1.plugins.enable(name=PLUGIN_MEMBER_OF) + m1.plugins.disable(name=PLUGIN_MEMBER_OF) + m1.plugins.enable(name=PLUGIN_MEMBER_OF) + + # Wait for the 'adding' threads to complete + add_users.join() + add_users2.join() + add_users3.join() + + # Now launch three threads to delete the users + del_users = stress_tests.DelUsers(m1, 'employee') + del_users.start() + del_users2 = stress_tests.DelUsers(m1, 'entry') + del_users2.start() + del_users3 = stress_tests.DelUsers(m1, 'person') + del_users3.start() + time.sleep(1) + + # Restart both the MO, RI plugins during these deletes, and an idle plugin + m1.plugins.disable(name=PLUGIN_REFER_INTEGRITY) + m1.plugins.disable(name=PLUGIN_MEMBER_OF) + m1.plugins.enable(name=PLUGIN_MEMBER_OF) + m1.plugins.enable(name=PLUGIN_REFER_INTEGRITY) + time.sleep(1) + m1.plugins.disable(name=PLUGIN_REFER_INTEGRITY) + time.sleep(1) + m1.plugins.disable(name=PLUGIN_MEMBER_OF) + time.sleep(1) + m1.plugins.enable(name=PLUGIN_MEMBER_OF) + time.sleep(1) + m1.plugins.enable(name=PLUGIN_REFER_INTEGRITY) + m1.plugins.disable(name=PLUGIN_LINKED_ATTRS) + m1.plugins.enable(name=PLUGIN_LINKED_ATTRS) + m1.plugins.disable(name=PLUGIN_REFER_INTEGRITY) + m1.plugins.disable(name=PLUGIN_MEMBER_OF) + m1.plugins.enable(name=PLUGIN_MEMBER_OF) + m1.plugins.enable(name=PLUGIN_REFER_INTEGRITY) + time.sleep(2) + m1.plugins.disable(name=PLUGIN_REFER_INTEGRITY) + time.sleep(1) + m1.plugins.disable(name=PLUGIN_MEMBER_OF) + time.sleep(1) + m1.plugins.enable(name=PLUGIN_MEMBER_OF) + time.sleep(1) + m1.plugins.enable(name=PLUGIN_REFER_INTEGRITY) + m1.plugins.disable(name=PLUGIN_LINKED_ATTRS) + m1.plugins.enable(name=PLUGIN_LINKED_ATTRS) + + # Wait for the 'deleting' threads to complete + del_users.join() + del_users2.join() + del_users3.join() + + # Now make sure both the MO and RI plugins still work correctly + acceptance_test.func_tests[8](topology_m2, "dynamic") # RI plugin + acceptance_test.func_tests[5](topology_m2, "dynamic") # MO plugin + + # Cleanup the stress tests + stress_tests.cleanup(m1) + + stress_count += 1 + log.info('####################################################################') + log.info('Successfully Stressed Dynamic Plugins' + msg + + '. Completed (%d/%d)' % (stress_count, stress_max_runs)) + log.info('####################################################################\n') + + if replication_run: + # We're done. + break + else: + log.info('Resume replication and run everything one more time') + topology_m2.resume_all_replicas() + + replication_run = True + msg = ' (replication enabled)' + time.sleep(1) + + ############################################################################ + # Check replication, and data are in sync + ############################################################################ + check_replicas(topology_m2) + + +if __name__ == '__main__': + # Run isolated + # -s for DEBUG mode + CURRENT_FILE = os.path.realpath(__file__) + pytest.main("-s %s" % CURRENT_FILE) diff --git a/dirsrvtests/tests/suites/dynamic_plugins/notice_for_restart_test.py b/dirsrvtests/tests/suites/dynamic_plugins/notice_for_restart_test.py new file mode 100644 index 0000000..d1c8ca7 --- /dev/null +++ b/dirsrvtests/tests/suites/dynamic_plugins/notice_for_restart_test.py @@ -0,0 +1,51 @@ + +# --- BEGIN COPYRIGHT BLOCK --- +# Copyright (C) 2020 William Brown +# All rights reserved. +# +# License: GPL (version 3 or any later version). +# See LICENSE for details. +# --- END COPYRIGHT BLOCK --- + +import logging +import ldap +import time +import pytest +from lib389.topologies import topology_st as topology +from lib389.utils import ds_is_older +from lib389.paths import Paths +from lib389.plugins import MemberOfPlugin + +default_paths = Paths() +pytestmark = pytest.mark.tier1 + +log = logging.getLogger(__name__) + +@pytest.mark.skipif(ds_is_older('1.4.4.0'), reason="Notice not generated in older versions") +def test_notice_when_dynamic_not_enabled(topology): + """ Test to show the logged noticed when dynamic plugins is disabled. + + :id: e4923789-c187-44b0-8734-34f26cbae06e + + :setup: Standalone instance + + :steps: + 1. Ensure Dynamic Plugins is disabled + 2. Enable a plugin + + :expectedresults: + 1. Success + 2. Notice generated + """ + st = topology.standalone + + st.config.set("nsslapd-dynamic-plugins", "off") + st.restart() + + mo = MemberOfPlugin(st) + mo.enable() + # Now check the error log. + pattern = ".*nsslapd-dynamic-plugins is off.*" + assert st.ds_error_log.match(pattern) + + diff --git a/dirsrvtests/tests/suites/dynamic_plugins/stress_tests.py b/dirsrvtests/tests/suites/dynamic_plugins/stress_tests.py new file mode 100644 index 0000000..0f62b4f --- /dev/null +++ b/dirsrvtests/tests/suites/dynamic_plugins/stress_tests.py @@ -0,0 +1,131 @@ +# --- BEGIN COPYRIGHT BLOCK --- +# Copyright (C) 2016 Red Hat, Inc. +# All rights reserved. +# +# License: GPL (version 3 or any later version). +# See LICENSE for details. +# --- END COPYRIGHT BLOCK --- +# +''' +Created on Dec 16, 2014 + +@author: mreynolds +''' +import logging +import threading + +import ldap +import pytest +from lib389 import Entry +from lib389._constants import * +from lib389.properties import * +from lib389.plugins import ReferentialIntegrityPlugin, MemberOfPlugin +from lib389.utils import * +from lib389.idm.directorymanager import * + +pytestmark = pytest.mark.tier2 + +log = logging.getLogger(__name__) + +NUM_USERS = 250 +GROUP_DN = 'cn=stress-group,' + DEFAULT_SUFFIX + + +# Configure Referential Integrity Plugin for stress test +def configureRI(inst): + plugin = ReferentialIntegrityPlugin(inst) + plugin.enable() + plugin.replace('referint-membership-attr', 'uniquemember') + + +# Configure MemberOf Plugin for stress test +def configureMO(inst): + plugin = MemberOfPlugin(inst) + plugin.enable() + plugin.replace('memberofgroupattr', 'uniquemember') + + +def cleanup(conn): + try: + conn.delete_s(GROUP_DN) + except ldap.LDAPError as e: + log.fatal('cleanup: failed to delete group (' + GROUP_DN + ') error: ' + e.message['desc']) + assert False + + +class DelUsers(threading.Thread): + def __init__(self, inst, rdnval): + threading.Thread.__init__(self) + self.daemon = True + self.inst = inst + self.rdnval = rdnval + + def run(self): + dm = DirectoryManager(self.inst) + conn = dm.bind() + idx = 0 + log.info('DelUsers - Deleting ' + str(NUM_USERS) + ' entries (' + self.rdnval + ')...') + while idx < NUM_USERS: + USER_DN = 'uid=' + self.rdnval + str(idx) + ',' + DEFAULT_SUFFIX + try: + conn.delete_s(USER_DN) + except ldap.LDAPError as e: + if e == ldap.UNAVAILABLE or e == ldap.SERVER_DOWN: + log.fatal('DeleteUsers: failed to delete (' + USER_DN + ') error: ' + e.message['desc']) + assert False + + idx += 1 + + conn.close() + log.info('DelUsers - Finished deleting ' + str(NUM_USERS) + ' entries (' + self.rdnval + ').') + + +class AddUsers(threading.Thread): + def __init__(self, inst, rdnval, addToGroup): + threading.Thread.__init__(self) + self.daemon = True + self.inst = inst + self.addToGroup = addToGroup + self.rdnval = rdnval + + def run(self): + # Start adding users + dm = DirectoryManager(self.inst) + conn = dm.bind() + idx = 0 + + if self.addToGroup: + try: + conn.add_s(Entry((GROUP_DN, + {'objectclass': b'top groupOfNames groupOfUniqueNames'.split(), + 'cn': 'stress-group'}))) + except ldap.LDAPError as e: + if e == ldap.UNAVAILABLE or e == ldap.SERVER_DOWN: + log.fatal('AddUsers: failed to add group (' + GROUP_DN + ') error: ' + e.message['desc']) + assert False + + log.info('AddUsers - Adding ' + str(NUM_USERS) + ' entries (' + self.rdnval + ')...') + + while idx < NUM_USERS: + USER_DN = 'uid=' + self.rdnval + str(idx) + ',' + DEFAULT_SUFFIX + try: + conn.add_s(Entry((USER_DN, {'objectclass': b'top nsOrgPerson'.split(), + 'uid': ensure_bytes('user' + str(idx))}))) + except ldap.LDAPError as e: + if e == ldap.UNAVAILABLE or e == ldap.SERVER_DOWN: + log.fatal('AddUsers: failed to add (' + USER_DN + ') error: ' + e.message['desc']) + assert False + + if self.addToGroup: + # Add the user to the group + try: + conn.modify_s(GROUP_DN, [(ldap.MOD_ADD, 'uniquemember', ensure_bytes(USER_DN))]) + except ldap.LDAPError as e: + if e == ldap.UNAVAILABLE or e == ldap.SERVER_DOWN: + log.fatal('AddUsers: Failed to add user' + USER_DN + ' to group: error ' + e.message['desc']) + assert False + + idx += 1 + + conn.close() + log.info('AddUsers - Finished adding ' + str(NUM_USERS) + ' entries (' + self.rdnval + ').') diff --git a/dirsrvtests/tests/suites/entryuuid/__init__.py b/dirsrvtests/tests/suites/entryuuid/__init__.py new file mode 100644 index 0000000..f6b9ae6 --- /dev/null +++ b/dirsrvtests/tests/suites/entryuuid/__init__.py @@ -0,0 +1,3 @@ +""" + :Requirement: 389-ds-base: Entry uuid +""" diff --git a/dirsrvtests/tests/suites/entryuuid/basic_test.py b/dirsrvtests/tests/suites/entryuuid/basic_test.py new file mode 100644 index 0000000..2fee0af --- /dev/null +++ b/dirsrvtests/tests/suites/entryuuid/basic_test.py @@ -0,0 +1,337 @@ +# --- BEGIN COPYRIGHT BLOCK --- +# Copyright (C) 2020 William Brown +# All rights reserved. +# +# License: GPL (version 3 or any later version). +# See LICENSE for details. +# --- END COPYRIGHT BLOCK --- + +import ldap +import pytest +import time +import shutil +import uuid +import subprocess +import pytest +import logging +from lib389.idm.user import nsUserAccounts, UserAccounts +from lib389.idm.account import Accounts +from lib389.idm.domain import Domain +from lib389.topologies import topology_st as topology +from lib389.backend import Backends +from lib389.paths import Paths +from lib389.utils import ds_is_older +from lib389._constants import * +from lib389.plugins import EntryUUIDPlugin + +default_paths = Paths() + +pytestmark = pytest.mark.tier1 +log = logging.getLogger(__name__) + +DATADIR1 = os.path.join(os.path.dirname(__file__), '../../data/entryuuid/') +IMPORT_UUID_A = "973e1bbf-ba9c-45d4-b01b-ff7371fd9008" +UUID_BETWEEN = "eeeeeeee-0000-0000-0000-000000000000" +IMPORT_UUID_B = "f6df8fe9-6b30-46aa-aa13-f0bf755371e8" +UUID_MIN = "00000000-0000-0000-0000-000000000000" +UUID_MAX = "ffffffff-ffff-ffff-ffff-ffffffffffff" + +@pytest.mark.skipif(ds_is_older('1.4.3.27'), reason="CLI Entryuuid is not available in prior versions") +def test_cli_entryuuid_plugin_fixup(topology): + """Test that dsconf CLI entryuuid attribute is enabled and can execute. + + :id: 91b46be2-ac3f-11ec-a38a-98fa9ba19b65 + :parametrized: yes + :customerscenario: True + :setup: Standalone Instance + :steps: + 1. Create DS Instance + 2. Create a user "jdoe" with a dn + 3. Verify dsconf command is working correctly with plugin entryuuid fixup + + :expectedresults: + 1. Success + 2. Success + 3. Success + + """ + log.info("Use dsconf tool to configure entryuuid plugin") + parent = "ou=People,dc=example,dc=com" + name = 'jdoe' + dn = 'uid=%s,%s' % (name, parent) + log.info('Testing with User created for dn :{} .'.format(dn)) + cmd=['/usr/sbin/dsconf',topology.standalone.get_ldap_uri(),'-D',DN_DM,'-w','password','plugin','entryuuid','fixup',dn] + log.info(f'Dsconf Command used : %{cmd}') + proc = subprocess.Popen(cmd, stdout=subprocess.PIPE) + msg = proc.communicate() + log.info(f'output message : {msg[0]}') + assert proc.returncode == 0 + + +def _entryuuid_import_and_search(topology): + # 1 + ldif_dir = topology.standalone.get_ldif_dir() + target_ldif = os.path.join(ldif_dir, 'localhost-userRoot-2020_03_30_13_14_47.ldif') + import_ldif = os.path.join(DATADIR1, 'localhost-userRoot-2020_03_30_13_14_47.ldif') + shutil.copyfile(import_ldif, target_ldif) + os.chmod(target_ldif, 0o777) + + be = Backends(topology.standalone).get('userRoot') + task = be.import_ldif([target_ldif]) + task.wait() + assert(task.is_complete() and task.get_exit_code() == 0) + + accounts = Accounts(topology.standalone, DEFAULT_SUFFIX) + # 2 - positive eq test + r2 = accounts.filter("(entryUUID=%s)" % IMPORT_UUID_A) + assert(len(r2) == 1) + r3 = accounts.filter("(entryuuid=%s)" % IMPORT_UUID_B) + assert(len(r3) == 1) + # 3 - negative eq test + r4 = accounts.filter("(entryuuid=%s)" % UUID_MAX) + assert(len(r4) == 0) + # 4 - le search + r5 = accounts.filter("(entryuuid<=%s)" % UUID_BETWEEN) + assert(len(r5) == 1) + # 5 - ge search + r6 = accounts.filter("(entryuuid>=%s)" % UUID_BETWEEN) + assert(len(r6) == 1) + # 6 - le 0 search + r7 = accounts.filter("(entryuuid<=%s)" % UUID_MIN) + assert(len(r7) == 0) + # 7 - ge f search + r8 = accounts.filter("(entryuuid>=%s)" % UUID_MAX) + assert(len(r8) == 0) + # 8 - export db + task = be.export_ldif() + task.wait() + assert(task.is_complete() and task.get_exit_code() == 0) + + +@pytest.mark.skipif(not default_paths.rust_enabled or ds_is_older('1.4.2.0'), reason="Entryuuid is not available in older versions") +def test_entryuuid_indexed_import_and_search(topology): + """ Test that an ldif of entries containing entryUUID's can be indexed and searched + correctly. As https://tools.ietf.org/html/rfc4530 states, the MR's are equality and + ordering, so we check these are correct. + + :id: c98ee6dc-a7ee-4bd4-974d-597ea966dad9 + + :setup: Standalone instance + + :steps: + 1. Import the db from the ldif + 2. EQ search for an entryuuid (match) + 3. EQ search for an entryuuid that does not exist + 4. LE search for an entryuuid lower (1 res) + 5. GE search for an entryuuid greater (1 res) + 6. LE for the 0 uuid (0 res) + 7. GE for the f uuid (0 res) + 8. export the db to ldif + + :expectedresults: + 1. Success + 2. 1 match + 3. 0 match + 4. 1 match + 5. 1 match + 6. 0 match + 7. 0 match + 8. success + """ + # Assert that the index correctly exists. + be = Backends(topology.standalone).get('userRoot') + indexes = be.get_indexes() + indexes.ensure_state(properties={ + 'cn': 'entryUUID', + 'nsSystemIndex': 'false', + 'nsIndexType': ['eq', 'pres'], + }) + _entryuuid_import_and_search(topology) + +@pytest.mark.skipif(not default_paths.rust_enabled or ds_is_older('1.4.2.0'), reason="Entryuuid is not available in older versions") +def test_entryuuid_unindexed_import_and_search(topology): + """ Test that an ldif of entries containing entryUUID's can be UNindexed searched + correctly. As https://tools.ietf.org/html/rfc4530 states, the MR's are equality and + ordering, so we check these are correct. + + :id: b652b54d-f009-464b-b5bd-299a33f97243 + + :setup: Standalone instance + + :steps: + 1. Import the db from the ldif + 2. EQ search for an entryuuid (match) + 3. EQ search for an entryuuid that does not exist + 4. LE search for an entryuuid lower (1 res) + 5. GE search for an entryuuid greater (1 res) + 6. LE for the 0 uuid (0 res) + 7. GE for the f uuid (0 res) + 8. export the db to ldif + + :expectedresults: + 1. Success + 2. 1 match + 3. 0 match + 4. 1 match + 5. 1 match + 6. 0 match + 7. 0 match + 8. success + """ + # Assert that the index does NOT exist for this test. + be = Backends(topology.standalone).get('userRoot') + indexes = be.get_indexes() + try: + idx = indexes.get('entryUUID') + idx.delete() + except ldap.NO_SUCH_OBJECT: + # It's already not present, move along, nothing to see here. + pass + _entryuuid_import_and_search(topology) + +# Test entryUUID generation +@pytest.mark.skipif(not default_paths.rust_enabled or ds_is_older('1.4.2.0'), reason="Entryuuid is not available in older versions") +def test_entryuuid_generation_on_add(topology): + """ Test that when an entry is added, the entryuuid is added. + + :id: a7439b0a-dcee-4cd6-b8ef-771476c0b4f6 + + :setup: Standalone instance + + :steps: + 1. Create a new entry in the db + 2. Check it has an entry uuid + + :expectedresults: + 1. Success + 2. An entry uuid is present + """ + # Step one - create a user! + account = nsUserAccounts(topology.standalone, DEFAULT_SUFFIX).create_test_user() + # Step two - does it have an entryuuid? + euuid = account.get_attr_val_utf8('entryUUID') + print(euuid) + assert(euuid is not None) + +# Test fixup task +@pytest.mark.skipif(not default_paths.rust_enabled or ds_is_older('1.4.2.0'), reason="Entryuuid is not available in older versions") +def test_entryuuid_fixup_task(topology): + """Test that when an entries without UUID's can have one generated via + the fixup process. + + :id: ad42bba2-ffb2-4c22-a37d-cbe7bcf73d6b + + :setup: Standalone instance + + :steps: + 1. Disable the entryuuid plugin + 2. Create an entry + 3. Enable the entryuuid plugin + 4. Run the fixup + 5. Assert the entryuuid now exists + 6. Restart and check they persist + + :expectedresults: + 1. Success + 2. Success + 3. Success + 4. Success + 5. Suddenly EntryUUID! + 6. Still has EntryUUID! + """ + # 1. Disable the plugin + plug = EntryUUIDPlugin(topology.standalone) + plug.disable() + topology.standalone.restart() + + # 2. create the account + account = nsUserAccounts(topology.standalone, DEFAULT_SUFFIX).create_test_user(uid=2000) + euuid = account.get_attr_val_utf8('entryUUID') + assert(euuid is None) + + # 3. enable the plugin + plug.enable() + topology.standalone.restart() + + # 4. run the fix up + # For now set the log level to high! + topology.standalone.config.loglevel(vals=(ErrorLog.DEFAULT,ErrorLog.PLUGIN)) + task = plug.fixup(DEFAULT_SUFFIX) + task.wait() + assert(task.is_complete() and task.get_exit_code() == 0) + topology.standalone.config.loglevel(vals=(ErrorLog.DEFAULT,)) + + # 5.1 Assert the uuid on the user. + euuid_user = account.get_attr_val_utf8('entryUUID') + assert(euuid_user is not None) + + # 5.2 Assert it on the domain entry. + domain = Domain(topology.standalone, dn=DEFAULT_SUFFIX) + euuid_domain = domain.get_attr_val_utf8('entryUUID') + assert(euuid_domain is not None) + + # Assert it persists after a restart. + topology.standalone.restart() + # 6.1 Assert the uuid on the use. + euuid_user_2 = account.get_attr_val_utf8('entryUUID') + assert(euuid_user_2 == euuid_user) + + # 6.2 Assert it on the domain entry. + euuid_domain_2 = domain.get_attr_val_utf8('entryUUID') + assert(euuid_domain_2 == euuid_domain) + +@pytest.mark.skipif(not default_paths.rust_enabled or ds_is_older('1.4.2.0'), reason="Entryuuid is not available in older versions") +def test_entryuuid_import_and_fixup_of_invalid_values(topology): + """ Test that when we import a database with an invalid entryuuid + that it is accepted *and* that subsequently we can fix the invalid + entryuuid during a fixup. + + :id: ec8ef3a7-3cd2-4cbd-b6f1-2449fa17be75 + + :setup: Standalone instance + + :steps: + 1. Import the db from the ldif + 2. Check the entryuuid is invalid + 3. Run the fixup + 4. Check the entryuuid is now valid (regenerated) + + :expectedresults: + 1. Success + 2. The entryuuid is invalid + 3. Success + 4. The entryuuid is valid + """ + + # 1. Import the db + ldif_dir = topology.standalone.get_ldif_dir() + target_ldif = os.path.join(ldif_dir, 'localhost-userRoot-invalid.ldif') + import_ldif = os.path.join(DATADIR1, 'localhost-userRoot-invalid.ldif') + shutil.copyfile(import_ldif, target_ldif) + os.chmod(target_ldif, 0o777) + + be = Backends(topology.standalone).get('userRoot') + task = be.import_ldif([target_ldif]) + task.wait() + assert(task.is_complete() and task.get_exit_code() == 0) + + # 2. Check the entryuuid is invalid + account = nsUserAccounts(topology.standalone, DEFAULT_SUFFIX).get("demo_user") + euuid = account.get_attr_val_utf8('entryUUID') + assert(euuid == "INVALID_UUID") + + # 3. Run the fixup + topology.standalone.config.loglevel(vals=(ErrorLog.DEFAULT,ErrorLog.PLUGIN)) + plug = EntryUUIDPlugin(topology.standalone) + task = plug.fixup(DEFAULT_SUFFIX) + task.wait() + assert(task.is_complete() and task.get_exit_code() == 0) + topology.standalone.config.loglevel(vals=(ErrorLog.DEFAULT,)) + + # 4. Check the entryuuid is valid + euuid = account.get_attr_val_utf8('entryUUID') + print(f"❄️ account entryUUID -> {euuid}"); + assert(euuid != "INVALID_UUID") + # Raises an error if invalid + uuid.UUID(euuid) + diff --git a/dirsrvtests/tests/suites/entryuuid/replicated_test.py b/dirsrvtests/tests/suites/entryuuid/replicated_test.py new file mode 100644 index 0000000..2e9f799 --- /dev/null +++ b/dirsrvtests/tests/suites/entryuuid/replicated_test.py @@ -0,0 +1,153 @@ +# --- BEGIN COPYRIGHT BLOCK --- +# Copyright (C) 2020 William Brown +# All rights reserved. +# +# License: GPL (version 3 or any later version). +# See LICENSE for details. +# --- END COPYRIGHT BLOCK --- + +import ldap +import pytest +import logging +from lib389.topologies import topology_m2 as topo_m2 +from lib389.idm.user import nsUserAccounts +from lib389.paths import Paths +from lib389.utils import ds_is_older +from lib389._constants import * +from lib389.replica import ReplicationManager +from lib389.plugins import EntryUUIDPlugin +from lib389.tasks import EntryUUIDFixupTask + +default_paths = Paths() + +pytestmark = pytest.mark.tier1 + +@pytest.mark.skipif(not default_paths.rust_enabled or ds_is_older('1.4.2.0'), reason="Entryuuid is not available in older versions") + +def test_entryuuid_with_replication(topo_m2): + """ Check that entryuuid works with replication + + :id: a5f15bf9-7f63-473a-840c-b9037b787024 + + :setup: two node mmr + + :steps: + 1. Create an entry on one server + 2. Wait for replication + 3. Assert it is on the second + + :expectedresults: + 1. Success + 1. Success + 1. Success + """ + + server_a = topo_m2.ms["supplier1"] + server_b = topo_m2.ms["supplier2"] + server_a.config.loglevel(vals=(ErrorLog.DEFAULT,ErrorLog.TRACE)) + server_b.config.loglevel(vals=(ErrorLog.DEFAULT,ErrorLog.TRACE)) + + repl = ReplicationManager(DEFAULT_SUFFIX) + + account_a = nsUserAccounts(server_a, DEFAULT_SUFFIX).create_test_user(uid=2000) + euuid_a = account_a.get_attr_vals_utf8('entryUUID') + print("🧩 %s" % euuid_a) + assert(euuid_a is not None) + assert(len(euuid_a) == 1) + + repl.wait_for_replication(server_a, server_b) + + account_b = nsUserAccounts(server_b, DEFAULT_SUFFIX).get("test_user_2000") + euuid_b = account_b.get_attr_vals_utf8('entryUUID') + print("🧩 %s" % euuid_b) + + server_a.config.loglevel(vals=(ErrorLog.DEFAULT,)) + server_b.config.loglevel(vals=(ErrorLog.DEFAULT,)) + + assert(euuid_b is not None) + assert(len(euuid_b) == 1) + assert(euuid_b == euuid_a) + + account_b.set("description", "update") + repl.wait_for_replication(server_b, server_a) + + euuid_c = account_a.get_attr_vals_utf8('entryUUID') + print("🧩 %s" % euuid_c) + assert(euuid_c is not None) + assert(len(euuid_c) == 1) + assert(euuid_c == euuid_a) + +@pytest.mark.skipif(not default_paths.rust_enabled or ds_is_older('1.4.2.0'), reason="Entryuuid is not available in older versions") +def test_entryuuid_fixup_with_replication(topo_m2): + """ Check that entryuuid fixup task works with replication + + :id: 4ff25022-2de8-11ed-b393-482ae39447e5 + :setup: two node mmr + + :steps: + 1. Disable EntryUUID plugin. + 2. Create an user entry. + 3. Enable EntryUUID plugin. + 4. Check that the user entry does not have an entryuuid attribute + 5. Run fixup task + 6. Wait for task completion + 7. Check that the user entry has an entryuuid attribute + 8. Wait until changes get replicated + 9. Check that the user entry on the other supplier has same entryuuid attribute + + :expectedresults: + 1. Success + 2. Success + 3. Success + 4. Success + 5. Success + 6. Success + 7. Success + 8. Success + 9. Success + """ + server_a = topo_m2.ms["supplier1"] + server_b = topo_m2.ms["supplier2"] + server_a.config.loglevel(vals=(ErrorLog.DEFAULT,ErrorLog.TRACE)) + server_b.config.loglevel(vals=(ErrorLog.DEFAULT,ErrorLog.TRACE)) + + # 1. Disable EntryUUID plugin. + plugin = EntryUUIDPlugin(server_a) + plugin.disable() + server_a.restart() + + # 2. Create an user entry. + # uid must differ than the test test_entryuuid_with_replication one + # to avoid conflict between the tests. + account_a = nsUserAccounts(server_a, DEFAULT_SUFFIX).create_test_user(uid=3000) + + # 3. Enable EntryUUID plugin. + plugin.enable() + server_a.restart() + + # 4. Check that the user entry does not have an entryuuid attribute + euuid_a = account_a.get_attr_vals_utf8('entryUUID') + assert(not euuid_a) + + # 5. Run fixup task + task = EntryUUIDFixupTask(server_a).create(properties={ + 'basedn': DEFAULT_SUFFIX, + 'filter': "objectClass=*" + }) + + # 6. Wait for task completion + task.wait() + assert task.is_complete() + + # 7. Check that the user entry has an entryuuid attribute + euuid_a = account_a.get_attr_vals_utf8('entryUUID') + assert(euuid_a) + + # 8. Wait until changes get replicated + repl = ReplicationManager(DEFAULT_SUFFIX) + repl.wait_for_replication(server_a, server_b) + + # 9. Check that the user entry on the other supplier has same entryuuid attribute + account_b = nsUserAccounts(server_b, DEFAULT_SUFFIX).get("test_user_3000") + euuid_b = account_b.get_attr_vals_utf8('entryUUID') + assert euuid_a == euuid_b diff --git a/dirsrvtests/tests/suites/export/__init__.py b/dirsrvtests/tests/suites/export/__init__.py new file mode 100644 index 0000000..7defef9 --- /dev/null +++ b/dirsrvtests/tests/suites/export/__init__.py @@ -0,0 +1,3 @@ +""" + :Requirement: 389-ds-base: DataBase Export +""" diff --git a/dirsrvtests/tests/suites/export/export_test.py b/dirsrvtests/tests/suites/export/export_test.py new file mode 100644 index 0000000..7d90397 --- /dev/null +++ b/dirsrvtests/tests/suites/export/export_test.py @@ -0,0 +1,140 @@ +# --- BEGIN COPYRIGHT BLOCK --- +# Copyright (C) 2023 Red Hat, Inc. +# All rights reserved. +# +# License: GPL (version 3 or any later version). +# See LICENSE for details. +# --- END COPYRIGHT BLOCK --- + +import os +import pytest +import subprocess +from lib389.topologies import topology_st as topo +from lib389._constants import DEFAULT_SUFFIX, DEFAULT_BENAME +from lib389.utils import * +from lib389.paths import Paths +from lib389.cli_base import FakeArgs +from lib389.cli_ctl.dbtasks import dbtasks_db2ldif + +pytestmark = pytest.mark.tier1 + + +def run_db2ldif_and_clear_logs(topology, instance, backend, ldif, output_msg, encrypt=False, repl=False): + args = FakeArgs() + args.instance = instance.serverid + args.backend = backend + args.encrypted = encrypt + args.replication = repl + args.ldif = ldif + + dbtasks_db2ldif(instance, topology.logcap.log, args) + + log.info('checking output msg') + if not topology.logcap.contains(output_msg): + log.error('The output message is not the expected one') + assert False + + log.info('Clear the log') + topology.logcap.flush() + + +@pytest.mark.bz1806978 +@pytest.mark.ds51188 +@pytest.mark.skipif(ds_is_older("1.3.10", "1.4.2"), reason="Not implemented") +def test_dbtasks_db2ldif_with_non_accessible_ldif_file_path(topo): + """Export with dsctl db2ldif, giving a ldif file path which can't be accessed by the user (dirsrv by default) + + :id: 511e7702-7685-4951-9966-38f402d6214b + :setup: Standalone Instance - entries imported in the db + :steps: + 1. Stop the server + 2. Launch db2ldif with an non accessible ldif file path + 3. Catch the reported error code + 4. Check that an appropriate error was returned + :expectedresults: + 1. Operation successful + 2. Operation properly fails, without crashing + 3. An error code different from 139 (segmentation fault) should be reported + 4. "location does not exist" is returned + """ + export_ldif = '/tmp/nonexistent/export.ldif' + + log.info("Stopping the instance...") + topo.standalone.stop() + + log.info("Performing an offline export to a non accessible ldif file path - should fail properly") + expected_output="location does not exist" + with pytest.raises(ValueError) as e: + run_db2ldif_and_clear_logs(topo, topo.standalone, DEFAULT_BENAME, export_ldif, expected_output) + assert "location does not exist" in str(e.value) + + log.info("Restarting the instance...") + topo.standalone.start() + + +@pytest.mark.bz1806978 +@pytest.mark.ds51188 +@pytest.mark.skipif(ds_is_older("1.4.3.8"), reason="bz1806978 not fixed") +def test_db2ldif_cli_with_non_accessible_ldif_file_path(topo): + """Export with ns-slapd db2ldif, giving a ldif file path which can't be accessed by the user (dirsrv by default) + + :id: ca91eda7-27b1-4750-a013-531a63d3f5b0 + :setup: Standalone Instance - entries imported in the db + :steps: + 1. Stop the server + 2. Launch db2ldif with an non accessible ldif file path + 3. Catch the reported error code + 4. Check that an appropriate error was returned + :expectedresults: + 1. Operation successful + 2. Operation properly fails, without crashing + 3. An error code different from 139 (segmentation fault) should be reported + 4. "The LDIF file location does not exist" is returned + """ + export_ldif = '/tmp/nonexistent/export.ldif' + db2ldif_cmd = os.path.join(topo.standalone.ds_paths.sbin_dir, 'dsctl') + + log.info("Stopping the instance...") + topo.standalone.stop() + + log.info("Performing an offline export to a non accessible ldif file path - should fail properly") + try: + subprocess.check_output([db2ldif_cmd, topo.standalone.serverid, 'db2ldif', 'userroot', export_ldif]) + except subprocess.CalledProcessError as e: + if format(e.returncode) == '139': + log.error('db2ldif had a Segmentation fault (core dumped)') + assert False + + log.info("Restarting the instance...") + topo.standalone.start() + + +@pytest.mark.bz1860291 +@pytest.mark.xfail(reason="bug 1860291") +@pytest.mark.skipif(ds_is_older("1.3.10", "1.4.2"), reason="Not implemented") +def test_dbtasks_db2ldif_with_non_accessible_ldif_file_path_output(topo): + """Export with db2ldif, giving a ldif file path which can't be accessed by the user (dirsrv by default) + + :id: fcc63387-e650-40a7-b643-baa68c190037 + :setup: Standalone Instance - entries imported in the db + :steps: + 1. Stop the server + 2. Launch db2ldif with a non accessible ldif file path + 3. check the error reported in the command output + :expectedresults: + 1. Operation successful + 2. Operation properly fails + 3. An clear error message is reported as output of the cli + """ + export_ldif = '/tmp/nonexistent/export.ldif' + + log.info("Stopping the instance...") + topo.standalone.stop() + + log.info("Performing an offline export to a non accessible ldif file path - should fail and output a clear error message") + expected_output="No such file or directory" + run_db2ldif_and_clear_logs(topo, topo.standalone, DEFAULT_BENAME, export_ldif, expected_output) + # This test will possibly have to be updated with the error message reported after bz1860291 fix + + log.info("Restarting the instance...") + topo.standalone.start() diff --git a/dirsrvtests/tests/suites/filter/__init__.py b/dirsrvtests/tests/suites/filter/__init__.py new file mode 100644 index 0000000..beccf4b --- /dev/null +++ b/dirsrvtests/tests/suites/filter/__init__.py @@ -0,0 +1,3 @@ +""" + :Requirement: 389-ds-base: LDAP Filters +""" diff --git a/dirsrvtests/tests/suites/filter/basic_filter_test.py b/dirsrvtests/tests/suites/filter/basic_filter_test.py new file mode 100644 index 0000000..b758cf8 --- /dev/null +++ b/dirsrvtests/tests/suites/filter/basic_filter_test.py @@ -0,0 +1,49 @@ +# --- BEGIN COPYRIGHT BLOCK --- +# Copyright (C) 2019 RED Hat, Inc. +# All rights reserved. +# +# License: GPL (version 3 or any later version). +# See LICENSE for details. +# --- END COPYRIGHT BLOCK ---- + +import pytest, os + +from lib389._constants import DEFAULT_SUFFIX, PW_DM +from lib389.topologies import topology_st as topo + +from lib389.idm.user import UserAccount, UserAccounts +from lib389.idm.account import Accounts + +pytestmark = pytest.mark.tier0 + +def test_search_attr(topo): + """Test filter can search attributes + + :id: 9a1b0a4b-111c-4105-866d-4288f143ee07 + :setup: Standalone instance + :steps: + 1. Add test entry + 2. make search + :expectedresults: + 1. Entry should be added + 2. Operation should succeed + """ + user = UserAccounts(topo.standalone, DEFAULT_SUFFIX) + for i in range(1, 5): + user1 = user.create_test_user(uid=i) + user1.set("mail", "AnujBorah{}@ok.com".format(i)) + + # Testing filter is working for any king of attr + + user = Accounts(topo.standalone, DEFAULT_SUFFIX) + + assert len(user.filter('(mail=*)')) == 4 + assert len(user.filter('(uid=*)')) == 5 + + # Testing filter is working for other filters + assert len(user.filter("(objectclass=inetOrgPerson)")) == 4 + + +if __name__ == "__main__": + CURRENT_FILE = os.path.realpath(__file__) + pytest.main("-s -v %s" % CURRENT_FILE) diff --git a/dirsrvtests/tests/suites/filter/bitw_filter_test.py b/dirsrvtests/tests/suites/filter/bitw_filter_test.py new file mode 100644 index 0000000..73aadee --- /dev/null +++ b/dirsrvtests/tests/suites/filter/bitw_filter_test.py @@ -0,0 +1,397 @@ +# --- BEGIN COPYRIGHT BLOCK --- +# Copyright (C) 2019 Red Hat, Inc. +# All rights reserved. +# +# License: GPL (version 3 or any later version). +# See LICENSE for details. +# --- END COPYRIGHT BLOCK --- + +""" +This script will test different type of Filers. +""" + +import os +import ldap +import pytest +from lib389.topologies import topology_st as topo +from lib389._constants import PW_DM +from lib389.idm.user import UserAccounts +from lib389.idm.account import Accounts +from lib389.plugins import BitwisePlugin +from lib389.schema import Schema +from lib389.backend import Backends +from lib389.idm.domain import Domain + +pytestmark = pytest.mark.tier1 + +FILTER_TESTPERSON = "objectclass=testperson" +FILTER_TESTERPERSON = "objectclass=testerperson" +FILTER_CONTROL = f"(& ({FILTER_TESTPERSON}) (testUserAccountControl:1.2.840.113556.1.4.803:=514))" +SUFFIX = 'dc=anuj,dc=com' + + +class CreateUsers(): + """ + Will create users with different testUserAccountControl, testUserStatus + """ + def __init__(self, *args): + self.args = args + + def user_create(self): + """ + Will create users with different testUserAccountControl, testUserStatus + """ + self.args[0].create(properties={ + 'sn': self.args[1], + 'uid': self.args[1], + 'cn': self.args[1], + 'userpassword': PW_DM, + 'givenName': 'bit', + 'mail': '{}@redhat.com'.format(self.args[1]), + 'objectclass': 'top account posixaccount organizationalPerson ' + 'inetOrgPerson testperson'.split(), + 'testUserAccountControl': [i for i in self.args[2]], + 'testUserStatus': [i for i in self.args[3]], + 'uidNumber': str(self.args[4]), + 'gidNumber': str(self.args[4]), + 'homeDirectory': self.args[1] + }) + + def create_users_other(self): + """ + Will create users with different testUserAccountControl(8388608) + """ + self.args[0].create(properties={ + 'telephoneNumber': '98989819{}'.format(self.args[1]), + 'uid': 'anuj_{}'.format(self.args[1]), + 'sn': 'testwise_{}'.format(self.args[1]), + 'cn': 'bit testwise{}'.format(self.args[1]), + 'userpassword': PW_DM, + 'givenName': 'anuj_{}'.format(self.args[1]), + 'mail': 'anuj_{}@example.com'.format(self.args[1]), + 'objectclass': 'top account posixaccount organizationalPerson ' + 'inetOrgPerson testperson'.split(), + 'testUserAccountControl': '8388608', + 'testUserStatus': 'PasswordExpired', + 'uidNumber': str(self.args[1]), + 'gidNumber': str(self.args[1]), + 'homeDirectory': '/home/' + 'testwise_{}'.format(self.args[1]) + }) + + def user_create_52(self): + """ + Will create users with different testUserAccountControl(16777216) + """ + self.args[0].create(properties={ + 'telephoneNumber': '98989819{}'.format(self.args[1]), + 'uid': 'bditwfilter52_test{}'.format(self.args[1]), + 'sn': 'bditwfilter52_test{}'.format(self.args[1]), + 'cn': 'bit bditwfilter52_test{}'.format(self.args[1]), + 'userpassword': PW_DM, + 'givenName': 'bditwfilter52_test{}'.format(self.args[1]), + 'mail': 'bditwfilter52_test{}@example.com'.format(self.args[1]), + 'objectclass': 'top account posixaccount organizationalPerson ' + 'inetOrgPerson testperson'.split(), + 'testUserAccountControl': '16777216', + 'testUserStatus': 'PasswordExpired', + 'uidNumber': str(self.args[1]), + 'gidNumber': str(self.args[1]), + 'homeDirectory': '/home/' + 'bditwfilter52_test{}'.format(self.args[1]) + }) + + +@pytest.fixture(scope="module") +def _create_schema(request, topo): + Schema(topo.standalone).\ + add('attributetypes', + ["( NAME 'testUserAccountControl' DESC 'Attribute Bitwise filteri-Multi-Valued'" + "SYNTAX 1.3.6.1.4.1.1466.115.121.1.27 )", + "( NAME 'testUserStatus' DESC 'State of User account active/disabled'" + "SYNTAX 1.3.6.1.4.1.1466.115.121.1.15 )"]) + + Schema(topo.standalone).\ + add('objectClasses', "( NAME 'testperson' SUP top STRUCTURAL MUST " + "( sn $ cn $ testUserAccountControl $ " + "testUserStatus )MAY( userPassword $ telephoneNumber $ " + "seeAlso $ description ) X-ORIGIN 'BitWise' )") + + # Creating Backend + backends = Backends(topo.standalone) + backend = backends.create(properties={'nsslapd-suffix': SUFFIX, 'cn': 'AnujRoot'}) + + # Creating suffix + suffix = Domain(topo.standalone, SUFFIX).create(properties={'dc': 'anuj'}) + + # Creating users + users = UserAccounts(topo.standalone, suffix.dn, rdn=None) + for user in [('btestuser1', ['514'], ['Disabled'], 100), + ('btestuser2', ['65536'], ['PasswordNeverExpired'], 101), + ('btestuser3', ['8388608'], ['PasswordExpired'], 102), + ('btestuser4', ['256'], ['TempDuplicateAccount'], 103), + ('btestuser5', ['16777216'], ['TrustedAuthDelegation'], 104), + ('btestuser6', ['528'], ['AccountLocked'], 105), + ('btestuser7', ['513'], ['AccountActive'], 106), + ('btestuser11', ['655236'], ['TestStatus1'], 107), + ('btestuser12', ['665522'], ['TestStatus2'], 108), + ('btestuser13', ['266552'], ['TestStatus3'], 109), + ('btestuser8', ['98536', '99512', '99528'], + ['AccountActive', 'PasswordExxpired', 'AccountLocked'], 110), + ('btestuser9', ['87536', '912', ], ['AccountActive', + 'PasswordNeverExpired', ], 111), + ('btestuser10', ['89536', '97546', '96579'], + ['TestVerify1', 'TestVerify2', 'TestVerify3'], 112)]: + CreateUsers(users, user[0], user[1], user[2], user[3]).user_create() + + def fin(): + """ + Deletes entries after the test. + """ + for user in users.list(): + user.delete() + + suffix.delete() + backend.delete() + + request.addfinalizer(fin) + + +def increasesizelimit(topo, size): + """ + Will change nsslapd-sizelimit to desire value + """ + topo.standalone.config.set('nsslapd-sizelimit', str(size)) + + +def test_bitwise_plugin_status(topo, _create_schema): + """Checking bitwise plugin enabled or not, by default it should be enabled. + If disabled, this test case would enable the plugin + + :id: 3ade097e-9ebd-11e8-b2e7-8c16451d917b + :setup: Standalone + :steps: + 1. Create Filter rules. + 2. Try to pass filter rules as per the condition . + :expectedresults: + 1. It should pass + 2. It should pass + """ + # Assert plugin BitwisePlugin is on + assert BitwisePlugin(topo.standalone).status() + + +def test_search_disabled_accounts(topo, _create_schema): + """Searching for integer Disabled Accounts. + Bitwise AND operator should match each integer, so it should return one entry. + + :id: 467ef0ea-9ebd-11e8-a37f-8c16451d917b + :setup: Standalone + :steps: + 1. Create Filter rules. + 2. Try to pass filter rules as per the condition . + :expectedresults: + 1. It should pass + 2. It should pass + """ + assert len(Accounts(topo.standalone, SUFFIX).filter(FILTER_CONTROL)) == 2 + + +def test_plugin_can_be_disabled(topo, _create_schema): + """Verify whether plugin can be disabled + + :id: 4ed21588-9ebd-11e8-b862-8c16451d917b + :setup: Standalone + :steps: + 1. Create Filter rules. + 2. Try to pass filter rules as per the condition . + :expectedresults: + 1. It should pass + 2. It should pass + """ + bitwise = BitwisePlugin(topo.standalone) + assert bitwise.status() + # make BitwisePlugin off + bitwise.disable() + topo.standalone.restart() + assert not bitwise.status() + + +def test_plugin_is_disabled(topo, _create_schema): + """Testing Bitwise search when plugin is disabled + Bitwise search filter should give proper error message + + :id: 54bebbfe-9ebd-11e8-8ca4-8c16451d917b + :setup: Standalone + :steps: + 1. Create Filter rules. + 2. Try to pass filter rules as per the condition . + :expectedresults: + 1. It should pass + 2. It should pass + """ + with pytest.raises(ldap.UNAVAILABLE_CRITICAL_EXTENSION): + Accounts(topo.standalone, SUFFIX).filter(FILTER_CONTROL) + + +def test_enabling_works_fine(topo, _create_schema): + """Enabling the plugin to make sure re-enabling works fine + + :id: 5a2fc2b8-9ebd-11e8-8e18-8c16451d917b + :setup: Standalone + :steps: + 1. Create Filter rules. + 2. Try to pass filter rules as per the condition . + :expectedresults: + 1. It should pass + 2. It should pass + """ + # make BitwisePlugin off + bitwise = BitwisePlugin(topo.standalone) + bitwise.disable() + # make BitwisePlugin on again + bitwise.enable() + topo.standalone.restart() + assert bitwise.status() + assert len(Accounts(topo.standalone, SUFFIX).filter(FILTER_CONTROL)) == 2 + + +@pytest.mark.parametrize("filter_name, value", [ + (f"(& ({FILTER_TESTPERSON}) (testUserAccountControl:1.2.840.113556.1.4.803:=513))", 1), + (f"(& ({FILTER_TESTPERSON}) (testUserAccountControl:1.2.840.113556.1.4.803:=16777216))", 1), + (f"(& ({FILTER_TESTPERSON}) (testUserAccountControl:1.2.840.113556.1.4.803:=8388608))", 1), + (f"(& ({FILTER_TESTPERSON}) (testUserAccountControl:1.2.840.113556.1.4.804:=5))", 3), + (f"(& ({FILTER_TESTPERSON}) (testUserAccountControl:1.2.840.113556.1.4.804:=8))", 3), + (f"(& ({FILTER_TESTPERSON}) (testUserAccountControl:1.2.840.113556.1.4.804:=7))", 5), + (f"(& ({FILTER_TESTERPERSON}) (testUserAccountControl:1.2.840.113556.1.4.804:=7))", 0), + (f"(& ({FILTER_TESTPERSON}) (&(testUserAccountControl:1.2.840.113556.1.4.803:=98536)" + "(testUserAccountControl:1.2.840.113556.1.4.803:=912)))", 0), + (f"(& ({FILTER_TESTPERSON}) (&(testUserAccountControl:1.2.840.113556.1.4.804:=87)" + "(testUserAccountControl:1.2.840.113556.1.4.804:=91)))", 8), + (f"(& ({FILTER_TESTPERSON}) (&(testUserAccountControl:1.2.840.113556.1.4.803:=89536)" + "(testUserAccountControl:1.2.840.113556.1.4.804:=79)))", 1), + (f"(& ({FILTER_TESTPERSON}) (|(testUserAccountControl:1.2.840.113556.1.4.803:=89536)" + "(testUserAccountControl:1.2.840.113556.1.4.804:=79)))", 8), + (f"(& ({FILTER_TESTPERSON}) (|(testUserAccountControl:1.2.840.113556.1.4.803:=89)" + "(testUserAccountControl:1.2.840.113556.1.4.803:=536)))", 0), + (f"(& ({FILTER_TESTPERSON}) (testUserAccountControl:1.2.840.113556.1.4.803:=x))", 13), + (f"(& ({FILTER_TESTPERSON}) (testUserAccountControl:1.2.840.113556.1.4.803:=&\\*#$%))", 13), + (f"(& ({FILTER_TESTPERSON}) (testUserAccountControl:1.2.840.113556.1.4.803:=-65536))", 0), + (f"(& ({FILTER_TESTPERSON}) (testUserAccountControl:1.2.840.113556.1.4.803:=-1))", 0), + (f"(& ({FILTER_TESTPERSON}) (testUserAccountControl:1.2.840.113556.1.4.803:=-))", 13), + (f"(& ({FILTER_TESTPERSON}) (testUserAccountControl:1.2.840.113556.1.4.803:=))", 13), + (f"(& ({FILTER_TESTPERSON}) (testUserAccountControl:1.2.840.113556.1.4.803:=\\*))", 13), + (f"(& ({FILTER_TESTPERSON}) (testUserAccountControl:1.2.840.113556.1.4.804:=\\*))", 0), + (f"(& ({FILTER_TESTPERSON}) (testUserAccountControl:1.2.840.113556.1.4.803:=6552))", 0), + (f"(& ({FILTER_TESTPERSON}\\))(testUserAccountControl:1.2.840.113556.1.4.804:=6552))", 0), + (f"(& ({FILTER_TESTPERSON}) (testUserAccountControl:1.2.840.113556.1.4.803:=65536))", 5) +]) +def test_all_together(topo, _create_schema, filter_name, value): + """Target_set_with_ldap_instead_of_ldap + + :id: ba7f5106-9ebd-11e8-9ad6-8c16451d917b + :parametrized: yes + :setup: Standalone + :steps: + 1. Create Filter rules. + 2. Try to pass filter rules as per the condition . + :expectedresults: + 1. It should pass + 2. It should pass + """ + assert len(Accounts(topo.standalone, SUFFIX).filter(filter_name)) == value + + +def test_5_entries(topo, _create_schema): + """Bitwise filter test for 5 entries + By default the size limit is 2000 + Inorder to perform stress tests, we need to icrease the nsslapd-sizelimit. + IncrSizeLimit 52000 + + :id: e939aa64-9ebd-11e8-815e-8c16451d917b + :setup: Standalone + :steps: + 1. Create Filter rules. + 2. Try to pass filter rules as per the condition . + :expectedresults: + 1. It should pass + 2. It should pass + """ + filter51 = f"(& ({FILTER_TESTPERSON}) (testUserAccountControl:1.2.840.113556.1.4.803:=8388608))" + increasesizelimit(topo, 52000) + users = UserAccounts(topo.standalone, SUFFIX, rdn=None) + for i in range(5): + CreateUsers(users, i).create_users_other() + assert len(Accounts(topo.standalone, SUFFIX).filter(filter51)) == 6 + increasesizelimit(topo, 2000) + + +def test_5_entries1(topo, _create_schema): + """Bitwise filter for 5 entries + By default the size limit is 2000 + Inorder to perform stress tests, we need to icrease the nsslapd-sizelimit. + IncrSizeLimit 52000 + + :id: ef8b050c-9ebd-11e8-979d-8c16451d917b + :setup: Standalone + :steps: + 1. Create Filter rules. + 2. Try to pass filter rules as per the condition . + :expectedresults: + 1. It should pass + 2. It should pass + """ + filter52 = f"(& ({FILTER_TESTPERSON})(testUserAccountControl:1.2.840.113556.1.4.804:=16777216))" + increasesizelimit(topo, 52000) + users = UserAccounts(topo.standalone, SUFFIX, rdn=None) + for i in range(5): + CreateUsers(users, i).user_create_52() + assert len(Accounts(topo.standalone, SUFFIX).filter(filter52)) == 6 + increasesizelimit(topo, 2000) + + +def test_5_entries3(topo, _create_schema): + """Bitwise filter test for entries + By default the size limit is 2000 + Inorder to perform stress tests, we need to icrease the nsslapd-sizelimit. + IncrSizeLimit 52000 + + :id: f5b06648-9ebd-11e8-b08f-8c16451d917b + :setup: Standalone + :steps: + 1. Create Filter rules. + 2. Try to pass filter rules as per the condition . + :expectedresults: + 1. It should pass + 2. It should pass + """ + increasesizelimit(topo, 52000) + assert len(Accounts(topo.standalone, SUFFIX).filter( + "(testUserAccountControl:1.2.840.113556.1.4.803:=8388608, " + "['attrlist=cn:sn:uid:testUserAccountControl'])")) == 6 + increasesizelimit(topo, 2000) + + +def test_5_entries4(topo, _create_schema): + """Bitwise filter for entries + By default the size limit is 2000 + Inorder to perform stress tests, we need to icrease the nsslapd-sizelimit. + IncrSizeLimit 52000 + + :id: fa5f7a4e-9ebd-11e8-ad54-8c16451d917b + :setup: Standalone + :steps: + 1. Create Filter rules. + 2. Try to pass filter rules as per the condition . + :expectedresults: + 1. It should pass + 2. It should pass + """ + increasesizelimit(topo, 52000) + assert len(Accounts(topo.standalone, SUFFIX). + filter("(testUserAccountControl:1.2.840.113556.1.4.804:=16777216," + "['attrlist=cn:sn:uid:testUserAccountControl'])")) == 6 + increasesizelimit(topo, 2000) + + +if __name__ == '__main__': + CURRENT_FILE = os.path.realpath(__file__) + pytest.main("-s -v %s" % CURRENT_FILE) diff --git a/dirsrvtests/tests/suites/filter/complex_filters_test.py b/dirsrvtests/tests/suites/filter/complex_filters_test.py new file mode 100644 index 0000000..bdaa2ad --- /dev/null +++ b/dirsrvtests/tests/suites/filter/complex_filters_test.py @@ -0,0 +1,147 @@ +# --- BEGIN COPYRIGHT BLOCK --- +# Copyright (C) 2020 Red Hat, Inc. +# All rights reserved. +# +# License: GPL (version 3 or any later version). +# See LICENSE for details. +# --- END COPYRIGHT BLOCK ---- + +import logging +import pytest +import os +import ldap +from lib389._constants import * +from lib389.topologies import topology_st as topo +from lib389.idm.user import UserAccounts + +pytestmark = pytest.mark.tier1 + +DEBUGGING = os.getenv("DEBUGGING", default=False) +if DEBUGGING: + logging.getLogger(__name__).setLevel(logging.DEBUG) +else: + logging.getLogger(__name__).setLevel(logging.INFO) +log = logging.getLogger(__name__) +ALL_FILTERS = [] + + +# Parameterized filters to test +AND_FILTERS = [("(&(uid=uid1)(sn=last1)(givenname=first1))", 1), + ("(&(uid=uid1)(&(sn=last1)(givenname=first1)))", 1), + ("(&(uid=uid1)(&(&(sn=last1))(&(givenname=first1))))", 1), + ("(&(uid=*)(sn=last3)(givenname=*))", 1), + ("(&(uid=*)(&(sn=last3)(givenname=*)))", 1), + ("(&(uid=uid5)(&(&(sn=*))(&(givenname=*))))", 1), + ("(&(objectclass=*)(uid=*)(sn=last*))", 5), + ("(&(objectclass=*)(uid=*)(sn=last1))", 1)] + +OR_FILTERS = [("(|(uid=uid1)(sn=last1)(givenname=first1))", 1), + ("(|(uid=uid1)(|(sn=last1)(givenname=first1)))", 1), + ("(|(uid=uid1)(|(|(sn=last1))(|(givenname=first1))))", 1), + ("(|(objectclass=*)(sn=last1)(|(givenname=first1)))", 18), + ("(|(&(objectclass=*)(sn=last1))(|(givenname=first1)))", 1), + ("(|(&(objectclass=*)(sn=last))(|(givenname=first1)))", 1)] + +NOT_FILTERS = [("(&(uid=uid1)(!(cn=NULL)))", 1), + ("(&(!(cn=NULL))(uid=uid1))", 1), + ("(&(uid=*)(&(!(uid=1))(!(givenname=first1))))", 5)] + +MIX_FILTERS = [("(&(|(uid=uid1)(uid=NULL))(sn=last1))", 1), + ("(&(|(uid=uid1)(uid=NULL))(!(sn=NULL)))", 1), + ("(&(|(uid=uid1)(sn=last2))(givenname=first1))", 1), + ("(|(&(uid=uid1)(!(uid=NULL)))(sn=last2))", 2), + ("(|(&(uid=uid1)(uid=NULL))(sn=last2))", 1), + ("(&(uid=uid5)(sn=*)(cn=*)(givenname=*)(uid=u*)(sn=la*)" + + "(cn=full*)(givenname=f*)(uid>=u)(!(givenname=NULL)))", 1), + ("(|(&(objectclass=*)(sn=last))(&(givenname=first1)))", 1)] + +ZERO_AND_FILTERS = [("(&(uid=uid1)(sn=last1)(givenname=NULL))", 0), + ("(&(uid=uid1)(&(sn=last1)(givenname=NULL)))", 0), + ("(&(uid=uid1)(&(&(sn=last1))(&(givenname=NULL))))", 0), + ("(&(uid=uid1)(&(&(sn=last1))(&(givenname=NULL)(sn=*)))(|(sn=NULL)))", 0), + ("(&(uid=uid1)(&(&(sn=last*))(&(givenname=first*)))(&(sn=NULL)))", 0)] + +ZERO_OR_FILTERS = [("(|(uid=NULL)(sn=NULL)(givenname=NULL))", 0), + ("(|(uid=NULL)(|(sn=NULL)(givenname=NULL)))", 0), + ("(|(uid=NULL)(|(|(sn=NULL))(|(givenname=NULL))))", 0)] + +RANGE_FILTERS = [("(uid>=uid3)", 3), + ("(&(uid=*)(uid>=uid3))", 3), + ("(|(uid>=uid3)(uid<=uid5))", 6), + ("(&(uid>=uid3)(uid<=uid5))", 3), + ("(|(&(uid>=uid3)(uid<=uid5))(uid=*))", 6)] + +LONG_FILTERS = [("(|(uid=*)(uid=*)(uid=*)(uid=*)(uid=*)(uid=*)(uid=*)(uid=*)" + + "(uid=*)(uid=*)(uid=*)(uid=*)(uid=*)(uid=*)(uid=*)(uid=*)" + + "(uid=*)(uid=*)(uid=*)(uid=*)(uid=*)(uid=*)(uid=*)(uid=*)" + + "(uid=*)(uid=*)(uid=*)(uid=*)(uid=*)(uid=*)(uid=*)(uid=*)" + + "(uid=*)(uid=*)(uid=*)(uid=*)(uid=*)(uid=*)(uid=*)(uid=*)" + + "(uid=*)(uid=*)(uid=*)(uid=*)(uid=*)(uid=*)(uid=*)(uid=*)" + + "(uid=*)(uid=*)(uid=*)(uid=*)(uid=*)(uid=*)(uid=*)(uid=*)" + + "(uid=*)(uid=*)(uid=*)(uid=*)(uid=*)(uid=*)(uid=*)(uid=*)" + + "(uid=*))", 6)] + + +# Combine all the filters +ALL_FILTERS += AND_FILTERS +ALL_FILTERS += OR_FILTERS +ALL_FILTERS += NOT_FILTERS +ALL_FILTERS += MIX_FILTERS +ALL_FILTERS += ZERO_AND_FILTERS +ALL_FILTERS += ZERO_OR_FILTERS +ALL_FILTERS += LONG_FILTERS +ALL_FILTERS += RANGE_FILTERS + + +@pytest.fixture(scope="module") +def setup(topo, request): + """Add teset users + """ + + users = UserAccounts(topo.standalone, DEFAULT_SUFFIX) + for i in range(1, 6): + users.create(properties={ + 'uid': 'uid%s' % i, + 'cn': 'full%s' % i, + 'sn': 'last%s' % i, + 'givenname': 'first%s' % i, + 'uidNumber': '%s' % i, + 'gidNumber': '%s' % i, + 'homeDirectory': '/home/user%s' % i + }) + + +@pytest.mark.parametrize("myfilter, expected_results", ALL_FILTERS) +def test_filters(topo, setup, myfilter, expected_results): + """Test various complex search filters and verify they are returning the + expected number of entries + + :id: ee9ead27-5f63-4aed-844d-c39b99138c8d + :parametrized: yes + :setup: standalone + :steps: + 1. Issue search + 2. Check the number of returned entries against the expected number + :expectedresults: + 1. Search succeeds + 2. The number of returned entries matches the expected number + """ + + log.info("Testing filter \"{}\"...".format(myfilter)) + try: + entries = topo.standalone.search_s(DEFAULT_SUFFIX, ldap.SCOPE_SUBTREE, myfilter) + if len(entries) != expected_results: + log.fatal("Search filter \"{}\") returned {} entries, but we expected {}".format( + myfilter, len(entries), expected_results)) + assert False + except ldap.LDAPError as e: + log.fatal("Search filter \"{}\") generated ldap error: {}".format(myfilter, str(e))) + assert False + + +if __name__ == '__main__': + # Run isolated + # -s for DEBUG mode + CURRENT_FILE = os.path.realpath(__file__) + pytest.main("-s %s" % CURRENT_FILE) + diff --git a/dirsrvtests/tests/suites/filter/filter_cert_test.py b/dirsrvtests/tests/suites/filter/filter_cert_test.py new file mode 100644 index 0000000..d682ea1 --- /dev/null +++ b/dirsrvtests/tests/suites/filter/filter_cert_test.py @@ -0,0 +1,70 @@ +# --- BEGIN COPYRIGHT BLOCK --- +# Copyright (C) 2020 Red Hat, Inc. +# All rights reserved. +# +# License: GPL (version 3 or any later version). +# See LICENSE for details. +# --- END COPYRIGHT BLOCK ---- + + +""" +verify and testing Filter from a search +""" + +import os +import pytest + +from lib389._constants import DEFAULT_SUFFIX +from lib389.topologies import topology_st as topo +from lib389.idm.user import UserAccounts +from lib389.idm.account import Accounts +from lib389.nss_ssl import NssSsl +from lib389.utils import search_filter_escape_bytes + +pytestmark = pytest.mark.tier1 + + +def test_positive(topo): + """Test User certificate field + + :id: e984ac40-63d1-4176-ad1e-0cbe71391b5f + :setup: Standalone + :steps: + 1. Create entries with userCertificate field. + 2. Try to search/filter them with userCertificate field. + :expectedresults: + 1. Pass + 2. Pass + """ + # SETUP TLS + topo.standalone.stop() + NssSsl(topo.standalone).reinit() + NssSsl(topo.standalone).create_rsa_ca() + NssSsl(topo.standalone).create_rsa_key_and_cert() + # Create user + NssSsl(topo.standalone).create_rsa_user('testuser1') + NssSsl(topo.standalone).create_rsa_user('testuser2') + # Creating cert users + topo.standalone.start() + users_people = UserAccounts(topo.standalone, DEFAULT_SUFFIX) + for count in range(1, 3): + user = users_people.create_test_user(uid=count, gid=count) + tls_locs = NssSsl(topo.standalone).get_rsa_user(f'testuser{count}') + # {'ca': ca_path, 'key': key_path, 'crt': crt_path} + user.enroll_certificate(tls_locs['crt_der_path']) + + assert Accounts(topo.standalone, DEFAULT_SUFFIX).filter("(usercertificate=*)") + assert Accounts(topo.standalone, DEFAULT_SUFFIX).filter("(userCertificate;binary=*)") + user1_cert = users_people.list()[0].get_attr_val("userCertificate;binary") + assert Accounts(topo.standalone, DEFAULT_SUFFIX).filter( + f'(userCertificate;binary={search_filter_escape_bytes(user1_cert)})')[0].dn.lower() == \ + 'uid=test_user_1,ou=people,dc=example,dc=com' + user2_cert = users_people.list()[1].get_attr_val("userCertificate;binary") + assert Accounts(topo.standalone, DEFAULT_SUFFIX).filter( + f'(userCertificate;binary={search_filter_escape_bytes(user2_cert)})')[0].dn.lower() == \ + 'uid=test_user_2,ou=people,dc=example,dc=com' + + +if __name__ == '__main__': + CURRENT_FILE = os.path.realpath(__file__) + pytest.main("-s -v %s" % CURRENT_FILE) diff --git a/dirsrvtests/tests/suites/filter/filter_index_match_test.py b/dirsrvtests/tests/suites/filter/filter_index_match_test.py new file mode 100644 index 0000000..9e7f7cf --- /dev/null +++ b/dirsrvtests/tests/suites/filter/filter_index_match_test.py @@ -0,0 +1,869 @@ +# --- BEGIN COPYRIGHT BLOCK --- +# Copyright (C) 2019 Red Hat, Inc. +# All rights reserved. +# +# License: GPL (version 3 or any later version). +# See LICENSE for details. +# --- END COPYRIGHT BLOCK ---- + + +""" +Test the matching rules feature . +""" + +import os +import pytest + +from lib389._constants import DEFAULT_SUFFIX +from lib389.topologies import topology_st +from lib389.cos import CosTemplates +from lib389.index import Indexes +from lib389.schema import Schema + +import ldap + +pytestmark = pytest.mark.tier1 + + +TESTED_MATCHING_RULES = ["bitStringMatch", + "caseExactIA5Match", + "caseExactMatch", + "caseExactOrderingMatch", + "caseExactSubstringsMatch", + "caseExactIA5SubstringsMatch", + "generalizedTimeMatch", + "generalizedTimeOrderingMatch", + "booleanMatch", + "caseIgnoreIA5Match", + "caseIgnoreIA5SubstringsMatch", + "caseIgnoreMatch", + "caseIgnoreOrderingMatch", + "caseIgnoreSubstringsMatch", + "caseIgnoreListMatch", + "caseIgnoreListSubstringsMatch", + "objectIdentifierMatch", + "directoryStringFirstComponentMatch", + "objectIdentifierFirstComponentMatch", + "distinguishedNameMatch", + "integerMatch", + "integerOrderingMatch", + "integerFirstComponentMatch", + "uniqueMemberMatch", + "numericStringMatch", + "numericStringOrderingMatch", + "numericStringSubstringsMatch", + "telephoneNumberMatch", + "telephoneNumberSubstringsMatch", + "octetStringMatch", + "octetStringOrderingMatch"] + + +LIST_CN_INDEX = [('attroctetStringMatch', ['pres', 'eq']), + ('attrbitStringMatch', ['pres', 'eq']), + ('attrcaseExactIA5Match', ['pres', 'eq', 'sub']), + ('attrcaseExactMatch', ['pres', 'eq', 'sub']), + ('attrgeneralizedTimeMatch', ['pres', 'eq']), + ('attrbooleanMatch', ['pres', 'eq']), + ('attrcaseIgnoreIA5Match', ['pres', 'eq', 'sub']), + ('attrcaseIgnoreMatch', ['pres', 'eq', 'sub']), + ('attrcaseIgnoreListMatch', ['pres', 'eq', 'sub']), + ('attrobjectIdentifierMatch', ['pres', 'eq']), + ('attrdistinguishedNameMatch', ['pres', 'eq']), + ('attrintegerMatch', ['pres', 'eq']), + ('attruniqueMemberMatch', ['pres', 'eq']), + ('attrnumericStringMatch', ['pres', 'eq', 'sub']), + ('attrtelephoneNumberMatch', ['pres', 'eq', 'sub']), + ('attrdirectoryStringFirstComponentMatch', ['pres', 'eq']), + ('attrobjectIdentifierFirstComponentMatch', ['pres', 'eq']), + ('attrintegerFirstComponentMatch', ['pres', 'eq'])] + + +LIST_ATTR_INDEX = [ + {'attr': 'attrbitStringMatch', + 'positive': ["'0010'B", "'0011'B", "'0100'B", "'0101'B", "'0110'B"], + 'negative': ["'0001'B", "'0001'B", "'0010'B", "'0010'B", "'0011'B", + "'0011'B", "'0100'B", "'0100'B", "'0101'B", "'0101'B", + "'0110'B", "'0110'B"]}, + {'attr': 'attrcaseExactIA5Match', + 'positive': ['sPrain', 'spRain', 'sprAin', 'spraIn', 'sprain'], + 'negative': ['Sprain', 'Sprain', 'sPrain', 'sPrain', 'spRain', + 'spRain', 'sprAin', 'sprAin', 'spraIn', 'spraIn', + 'sprain', 'sprain']}, + {'attr': 'attrcaseExactMatch', + 'positive': ['ÇéliNé Ändrè', 'Çéliné ÄndrÈ', 'Çéliné Ändrè', 'çÉliné Ändrè'], + 'negative': ['ÇélIné Ändrè', 'ÇélIné Ändrè', 'ÇéliNé Ändrè', 'ÇéliNé Ändrè', + 'Çéliné ÄndrÈ', 'Çéliné ÄndrÈ', 'Çéliné Ändrè', 'Çéliné Ändrè', + 'çÉliné Ändrè', 'çÉliné Ändrè']}, + {'attr': 'attrgeneralizedTimeMatch', + 'positive': ['20100218171301Z', '20100218171302Z', '20100218171303Z', + '20100218171304Z', '20100218171305Z'], + 'negative': ['20100218171300Z', '20100218171300Z', '20100218171301Z', + '20100218171301Z', '20100218171302Z', '20100218171302Z', + '20100218171303Z', '20100218171303Z', '20100218171304Z', + '20100218171304Z', '20100218171305Z', '20100218171305Z']}, + {'attr': 'attrbooleanMatch', + 'positive': ['FALSE'], + 'negative': ['TRUE', 'TRUE', 'FALSE', 'FALSE']}, + {'attr': 'attrcaseIgnoreIA5Match', + 'positive': ['sprain2', 'sprain3', 'sprain4', 'sprain5', 'sprain6'], + 'negative': ['sprain1', 'sprain1', 'sprain2', 'sprain2', 'sprain3', + 'sprain3', 'sprain4', 'sprain4', 'sprain5', 'sprain5', + 'sprain6', 'sprain6']}, + {'attr': 'attrcaseIgnoreMatch', + 'positive': ['ÇélIné Ändrè2', 'ÇélIné Ändrè3', 'ÇélIné Ändrè4', + 'ÇélIné Ändrè5', 'ÇélIné Ändrè6'], + 'negative': ['ÇélIné Ändrè1', 'ÇélIné Ändrè1', 'ÇélIné Ändrè2', + 'ÇélIné Ändrè2', 'ÇélIné Ändrè3', 'ÇélIné Ändrè3', + 'ÇélIné Ändrè4', 'ÇélIné Ändrè4', 'ÇélIné Ändrè5', + 'ÇélIné Ändrè5', 'ÇélIné Ändrè6', 'ÇélIné Ändrè6']}, + {'attr': 'attrcaseIgnoreListMatch', + 'positive': ['foo2$bar', 'foo3$bar', 'foo4$bar', 'foo5$bar', 'foo6$bar'], + 'negative': ['foo1$bar', 'foo1$bar', 'foo2$bar', 'foo2$bar', 'foo3$bar', + 'foo3$bar', 'foo4$bar', 'foo4$bar', 'foo5$bar', 'foo5$bar', + 'foo6$bar', 'foo6$bar']}, + {'attr': 'attrobjectIdentifierMatch', + 'positive': ['1.3.6.1.4.1.1466.115.121.1.24', '1.3.6.1.4.1.1466.115.121.1.26', + '1.3.6.1.4.1.1466.115.121.1.40', '1.3.6.1.4.1.1466.115.121.1.41', + '1.3.6.1.4.1.1466.115.121.1.6'], + 'negative': ['1.3.6.1.4.1.1466.115.121.1.15', '1.3.6.1.4.1.1466.115.121.1.15', + '1.3.6.1.4.1.1466.115.121.1.24', '1.3.6.1.4.1.1466.115.121.1.24', + '1.3.6.1.4.1.1466.115.121.1.26', '1.3.6.1.4.1.1466.115.121.1.26', + '1.3.6.1.4.1.1466.115.121.1.40', '1.3.6.1.4.1.1466.115.121.1.40', + '1.3.6.1.4.1.1466.115.121.1.41', '1.3.6.1.4.1.1466.115.121.1.41', + '1.3.6.1.4.1.1466.115.121.1.6', '1.3.6.1.4.1.1466.115.121.1.6']}, + {'attr': 'attrdirectoryStringFirstComponentMatch', + 'positive': ['ÇélIné Ändrè2', 'ÇélIné Ändrè3', 'ÇélIné Ändrè4', 'ÇélIné Ändrè5', + 'ÇélIné Ändrè6'], + 'negative': ['ÇélIné Ändrè1', 'ÇélIné Ändrè1', 'ÇélIné Ändrè2', 'ÇélIné Ändrè2', + 'ÇélIné Ändrè3', 'ÇélIné Ändrè3', 'ÇélIné Ändrè4', 'ÇélIné Ändrè4', + 'ÇélIné Ändrè5', 'ÇélIné Ändrè5', 'ÇélIné Ändrè6', 'ÇélIné Ändrè6']}, + {'attr': 'attrobjectIdentifierFirstComponentMatch', + 'positive': ['1.3.6.1.4.1.1466.115.121.1.24', '1.3.6.1.4.1.1466.115.121.1.26', + '1.3.6.1.4.1.1466.115.121.1.40', '1.3.6.1.4.1.1466.115.121.1.41', + '1.3.6.1.4.1.1466.115.121.1.6'], + 'negative': ['1.3.6.1.4.1.1466.115.121.1.15', '1.3.6.1.4.1.1466.115.121.1.15', + '1.3.6.1.4.1.1466.115.121.1.24', '1.3.6.1.4.1.1466.115.121.1.24', + '1.3.6.1.4.1.1466.115.121.1.26', '1.3.6.1.4.1.1466.115.121.1.26', + '1.3.6.1.4.1.1466.115.121.1.40', '1.3.6.1.4.1.1466.115.121.1.40', + '1.3.6.1.4.1.1466.115.121.1.41', '1.3.6.1.4.1.1466.115.121.1.41', + '1.3.6.1.4.1.1466.115.121.1.6', '1.3.6.1.4.1.1466.115.121.1.6']}, + {'attr': 'attrdistinguishedNameMatch', + 'positive': ['cn=foo2,cn=bar', 'cn=foo3,cn=bar', 'cn=foo4,cn=bar', 'cn=foo5,cn=bar', + 'cn=foo6,cn=bar'], + 'negative': ['cn=foo1,cn=bar', 'cn=foo1,cn=bar', 'cn=foo2,cn=bar', 'cn=foo2,cn=bar', + 'cn=foo3,cn=bar', 'cn=foo3,cn=bar', 'cn=foo4,cn=bar', 'cn=foo4,cn=bar', + 'cn=foo5,cn=bar', 'cn=foo5,cn=bar', 'cn=foo6,cn=bar', 'cn=foo6,cn=bar']}, + {'attr': 'attrintegerMatch', + 'positive': ['-1', '0', '1', '2', '3'], + 'negative': ['-2', '-2', '-1', '-1', '0', '0', '1', '1', '2', '2', '3', '3']}, + {'attr': 'attrintegerFirstComponentMatch', + 'positive': ['-1', '0', '1', '2', '3'], + 'negative': ['-2', '-2', '-1', '-1', '0', '0', '1', '1', '2', '2', '3', '3']}, + {'attr': 'attruniqueMemberMatch', + 'positive': ["cn=foo2,cn=bar#'0010'B", "cn=foo3,cn=bar#'0011'B", + "cn=foo4,cn=bar#'0100'B", "cn=foo5,cn=bar#'0101'B", + "cn=foo6,cn=bar#'0110'B"], + 'negative': ["cn=foo1,cn=bar#'0001'B", "cn=foo1,cn=bar#'0001'B", + "cn=foo2,cn=bar#'0010'B", "cn=foo2,cn=bar#'0010'B", + "cn=foo3,cn=bar#'0011'B", "cn=foo3,cn=bar#'0011'B", + "cn=foo4,cn=bar#'0100'B", "cn=foo4,cn=bar#'0100'B", + "cn=foo5,cn=bar#'0101'B", "cn=foo5,cn=bar#'0101'B", + "cn=foo6,cn=bar#'0110'B", "cn=foo6,cn=bar#'0110'B"]}, + {'attr': 'attrnumericStringMatch', + 'positive': ['00002', '00003', '00004', '00005', '00006'], + 'negative': ['00001', '00001', '00002', '00002', '00003', '00003', + '00004', '00004', '00005', '00005', '00006', '00006']}, + {'attr': 'attrtelephoneNumberMatch', + 'positive': ['+1 408 555 5625', '+1 408 555 6201', '+1 408 555 8585', + '+1 408 555 9187', '+1 408 555 9423'], + 'negative': ['+1 408 555 4798', '+1 408 555 4798', '+1 408 555 5625', + '+1 408 555 5625', '+1 408 555 6201', '+1 408 555 6201', + '+1 408 555 8585', '+1 408 555 8585', '+1 408 555 9187', + '+1 408 555 9187', '+1 408 555 9423', '+1 408 555 9423']}, + {'attr': 'attroctetStringMatch', + 'positive': ['AAAAAAAAAAAAAAI=', 'AAAAAAAAAAAAAAM=', 'AAAAAAAAAAAAAAQ=', + 'AAAAAAAAAAAAAAU=', 'AAAAAAAAAAAAAAY='], + 'negative': ['AAAAAAAAAAAAAAE=', 'AAAAAAAAAAAAAAE=', 'AAAAAAAAAAAAAAI=', + 'AAAAAAAAAAAAAAI=', 'AAAAAAAAAAAAAAM=', 'AAAAAAAAAAAAAAM=', + 'AAAAAAAAAAAAAAQ=', 'AAAAAAAAAAAAAAQ=', 'AAAAAAAAAAAAAAU=', + 'AAAAAAAAAAAAAAU=', 'AAAAAAAAAAAAAAY=', 'AAAAAAAAAAAAAAY=']}] + + +LIST_MOD_ATTR_ALL = [ + {'attr': 'attrcaseExactMatch', + 'positive': ['ÇélIné Ändrè'], + 'negative': ['ÇélIné Ändrè', 'ÇéliNé Ändrè', 'Çéliné ÄndrÈ', 'Çéliné Ändrè', + 'çÉliné Ändrè']}, + {'attr': 'attrgeneralizedTimeMatch', + 'positive': ['20100218171300Z'], + 'negative': ['20100218171300Z', '20100218171301Z', '20100218171302Z', + '20100218171303Z', '20100218171304Z', '20100218171305Z']}, + {'attr': 'attrbooleanMatch', + 'positive': ['TRUE'], + 'negative': ['TRUE', 'FALSE']}, + {'attr': 'attrcaseIgnoreIA5Match', + 'positive': ['sprain1'], + 'negative': ['sprain1', 'sprain2', 'sprain3', 'sprain4', 'sprain5', 'sprain6']}, + {'attr': 'attrcaseIgnoreMatch', + 'positive': ['ÇélIné Ändrè1'], + 'negative': ['ÇélIné Ändrè1', 'ÇélIné Ändrè2', 'ÇélIné Ändrè3', 'ÇélIné Ändrè4', + 'ÇélIné Ändrè5', 'ÇélIné Ändrè6']}, + {'attr': 'attrcaseIgnoreListMatch', + 'positive': ['foo1$bar'], + 'negative': ['foo1$bar', 'foo2$bar', 'foo3$bar', 'foo4$bar', 'foo5$bar', 'foo6$bar']}, + {'attr': 'attrbitStringMatch', + 'positive': ["'0001'B"], + 'negative': ["'0001'B", "'0010'B", "'0011'B", "'0100'B", "'0101'B", "'0110'B"]}, + {'attr': 'attrcaseExactIA5Match', + 'positive': ['Sprain'], + 'negative': ['Sprain', 'sPrain', 'spRain', 'sprAin', 'spraIn', 'sprain']}, + {'attr': 'attrobjectIdentifierMatch', + 'positive': ['1.3.6.1.4.1.1466.115.121.1.15'], + 'negative': ['1.3.6.1.4.1.1466.115.121.1.15', '1.3.6.1.4.1.1466.115.121.1.24', + '1.3.6.1.4.1.1466.115.121.1.26', '1.3.6.1.4.1.1466.115.121.1.40', + '1.3.6.1.4.1.1466.115.121.1.41', '1.3.6.1.4.1.1466.115.121.1.6']}, + {'attr': 'attrdirectoryStringFirstComponentMatch', + 'positive': ['ÇélIné Ändrè1'], + 'negative': ['ÇélIné Ändrè1', 'ÇélIné Ändrè2', 'ÇélIné Ändrè3', 'ÇélIné Ändrè4', + 'ÇélIné Ändrè5', 'ÇélIné Ändrè6']}, + {'attr': 'attrobjectIdentifierFirstComponentMatch', + 'positive': ['1.3.6.1.4.1.1466.115.121.1.15'], + 'negative': ['1.3.6.1.4.1.1466.115.121.1.15', '1.3.6.1.4.1.1466.115.121.1.24', + '1.3.6.1.4.1.1466.115.121.1.26', '1.3.6.1.4.1.1466.115.121.1.40', + '1.3.6.1.4.1.1466.115.121.1.41', '1.3.6.1.4.1.1466.115.121.1.6']}, + {'attr': 'attrdistinguishedNameMatch', + 'positive': ['cn=foo1,cn=bar'], + 'negative': ['cn=foo1,cn=bar', 'cn=foo2,cn=bar', 'cn=foo3,cn=bar', + 'cn=foo4,cn=bar', 'cn=foo5,cn=bar', 'cn=foo6,cn=bar']}, + {'attr': 'attrintegerMatch', + 'positive': ['-2'], + 'negative': ['-2', '-1', '0', '1', '2', '3']}, + {'attr': 'attrintegerFirstComponentMatch', + 'positive': ['-2'], + 'negative': ['-2', '-1', '0', '1', '2', '3']}, + {'attr': 'attruniqueMemberMatch', + 'positive': ["cn=foo1,cn=bar#'0001'B"], + 'negative': ["cn=foo1,cn=bar#'0001'B", "cn=foo2,cn=bar#'0010'B", + "cn=foo3,cn=bar#'0011'B", "cn=foo4,cn=bar#'0100'B", + "cn=foo5,cn=bar#'0101'B", "cn=foo6,cn=bar#'0110'B"]}, + {'attr': 'attrnumericStringMatch', + 'positive': ['00001'], + 'negative': ['00001', '00002', '00003', '00004', '00005', '00006']}, + {'attr': 'attrgeneralizedTimeMatch', + 'positive': ['+1 408 555 4798'], + 'negative': ['+1 408 555 4798', '+1 408 555 5625', '+1 408 555 6201', + '+1 408 555 8585', '+1 408 555 9187', '+1 408 555 9423']}, + {'attr': 'attroctetStringMatch', + 'positive': ['AAAAAAAAAAAAAAE='], + 'negative': ['AAAAAAAAAAAAAAE=', 'AAAAAAAAAAAAAAI=', 'AAAAAAAAAAAAAAM=', + 'AAAAAAAAAAAAAAQ=', 'AAAAAAAAAAAAAAU=', 'AAAAAAAAAAAAAAY=']}] + + +LIST_MOD_REPLACE_ALL = [ + {'attr': 'attrcaseExactIA5Match', + 'positive': ['Sprain', 'sPrain', 'spRain', 'sprAin', 'spraIn', 'sprain'], + 'negative': ['Sprain', 'Sprain', 'sPrain', 'sPrain', 'spRain', 'spRain', + 'sprAin', 'sprAin', 'spraIn', 'spraIn', 'sprain', 'sprain']}, + {'attr': 'attrcaseExactMatch', + 'positive': ['ÇélIné Ändrè', 'ÇéliNé Ändrè', 'Çéliné ÄndrÈ', + 'Çéliné Ändrè', 'çÉliné Ändrè'], + 'negative': ['ÇélIné Ändrè', 'ÇélIné Ändrè', 'ÇéliNé Ändrè', 'ÇéliNé Ändrè', + 'Çéliné ÄndrÈ', 'Çéliné ÄndrÈ', 'Çéliné Ändrè', 'Çéliné Ändrè', + 'çÉliné Ändrè', 'çÉliné Ändrè']}, + {'attr': 'attrbitStringMatch', + 'positive': ["'0001'B", "'0010'B", "'0011'B", "'0100'B", "'0101'B", "'0110'B"], + 'negative': ["'0001'B", "'0001'B", "'0010'B", "'0010'B", "'0011'B", "'0011'B", + "'0100'B", "'0100'B", "'0101'B", "'0101'B", "'0110'B", "'0110'B"]}, + {'attr': 'attrgeneralizedTimeMatch', + 'positive': ['20100218171300Z', '20100218171301Z', '20100218171302Z', + '20100218171303Z', '20100218171304Z', '20100218171305Z'], + 'negative': ['20100218171300Z', '20100218171300Z', '20100218171301Z', + '20100218171301Z', '20100218171302Z', '20100218171302Z', + '20100218171303Z', '20100218171303Z', '20100218171304Z', + '20100218171304Z', '20100218171305Z', '20100218171305Z']}, + {'attr': 'attrbooleanMatch', + 'positive': ['TRUE', 'FALSE'], + 'negative': ['TRUE', 'TRUE', 'FALSE', 'FALSE']}, + {'attr': 'attrcaseIgnoreIA5Match', + 'positive': ['sprain1', 'sprain2', 'sprain3', 'sprain4', 'sprain5', 'sprain6'], + 'negative': ['sprain1', 'sprain1', 'sprain2', 'sprain2', 'sprain3', 'sprain3', + 'sprain4', 'sprain4', 'sprain5', 'sprain5', 'sprain6', 'sprain6']}, + {'attr': 'attrcaseIgnoreMatch', + 'positive': ['ÇélIné Ändrè1', 'ÇélIné Ändrè2', 'ÇélIné Ändrè3', 'ÇélIné Ändrè4', + 'ÇélIné Ändrè5', 'ÇélIné Ändrè6'], + 'negative': ['ÇélIné Ändrè1', 'ÇélIné Ändrè1', 'ÇélIné Ändrè2', 'ÇélIné Ändrè2', + 'ÇélIné Ändrè3', 'ÇélIné Ändrè3', 'ÇélIné Ändrè4', 'ÇélIné Ändrè4', + 'ÇélIné Ändrè5', 'ÇélIné Ändrè5', 'ÇélIné Ändrè6', 'ÇélIné Ändrè6']}, + {'attr': 'attrcaseIgnoreListMatch', + 'positive': ['foo1$bar', 'foo2$bar', 'foo3$bar', 'foo4$bar', 'foo5$bar', 'foo6$bar'], + 'negative': ['foo1$bar', 'foo1$bar', 'foo2$bar', 'foo2$bar', 'foo3$bar', 'foo3$bar', + 'foo4$bar', 'foo4$bar', 'foo5$bar', 'foo5$bar', 'foo6$bar', 'foo6$bar']}, + {'attr': 'attrobjectIdentifierMatch', + 'positive': ['1.3.6.1.4.1.1466.115.121.1.15', '1.3.6.1.4.1.1466.115.121.1.24', + '1.3.6.1.4.1.1466.115.121.1.26', '1.3.6.1.4.1.1466.115.121.1.40', + '1.3.6.1.4.1.1466.115.121.1.41', '1.3.6.1.4.1.1466.115.121.1.6'], + 'negative': ['1.3.6.1.4.1.1466.115.121.1.15', '1.3.6.1.4.1.1466.115.121.1.15', + '1.3.6.1.4.1.1466.115.121.1.24', '1.3.6.1.4.1.1466.115.121.1.24', + '1.3.6.1.4.1.1466.115.121.1.26', '1.3.6.1.4.1.1466.115.121.1.26', + '1.3.6.1.4.1.1466.115.121.1.40', '1.3.6.1.4.1.1466.115.121.1.40', + '1.3.6.1.4.1.1466.115.121.1.41', '1.3.6.1.4.1.1466.115.121.1.41', + '1.3.6.1.4.1.1466.115.121.1.6', '1.3.6.1.4.1.1466.115.121.1.6']}, + {'attr': 'attrdirectoryStringFirstComponentMatch', + 'positive': ['ÇélIné Ändrè1', 'ÇélIné Ändrè2', 'ÇélIné Ändrè3', 'ÇélIné Ändrè4', + 'ÇélIné Ändrè5', 'ÇélIné Ändrè6'], + 'negative': ['ÇélIné Ändrè1', 'ÇélIné Ändrè1', 'ÇélIné Ändrè2', 'ÇélIné Ändrè2', + 'ÇélIné Ändrè3', 'ÇélIné Ändrè3', 'ÇélIné Ändrè4', 'ÇélIné Ändrè4', + 'ÇélIné Ändrè5', 'ÇélIné Ändrè5', 'ÇélIné Ändrè6', 'ÇélIné Ändrè6']}, + {'attr': 'attrobjectIdentifierFirstComponentMatch', + 'positive': ['1.3.6.1.4.1.1466.115.121.1.15', '1.3.6.1.4.1.1466.115.121.1.24', + '1.3.6.1.4.1.1466.115.121.1.26', '1.3.6.1.4.1.1466.115.121.1.40', + '1.3.6.1.4.1.1466.115.121.1.41', '1.3.6.1.4.1.1466.115.121.1.6'], + 'negative': ['1.3.6.1.4.1.1466.115.121.1.15', '1.3.6.1.4.1.1466.115.121.1.15', + '1.3.6.1.4.1.1466.115.121.1.24', '1.3.6.1.4.1.1466.115.121.1.24', + '1.3.6.1.4.1.1466.115.121.1.26', '1.3.6.1.4.1.1466.115.121.1.26', + '1.3.6.1.4.1.1466.115.121.1.40', '1.3.6.1.4.1.1466.115.121.1.40', + '1.3.6.1.4.1.1466.115.121.1.41', '1.3.6.1.4.1.1466.115.121.1.41', + '1.3.6.1.4.1.1466.115.121.1.6', '1.3.6.1.4.1.1466.115.121.1.6']}, + {'attr': 'attrdistinguishedNameMatch', + 'positive': ['cn=foo1,cn=bar', 'cn=foo2,cn=bar', 'cn=foo3,cn=bar', 'cn=foo4,cn=bar', + 'cn=foo5,cn=bar', 'cn=foo6,cn=bar'], + 'negative': ['cn=foo1,cn=bar', 'cn=foo1,cn=bar', 'cn=foo2,cn=bar', 'cn=foo2,cn=bar', + 'cn=foo3,cn=bar', 'cn=foo3,cn=bar', 'cn=foo4,cn=bar', 'cn=foo4,cn=bar', + 'cn=foo5,cn=bar', 'cn=foo5,cn=bar', 'cn=foo6,cn=bar', 'cn=foo6,cn=bar']}, + {'attr': 'attrintegerMatch', + 'positive': ['-2', '-1', '0', '1', '2', '3'], + 'negative': ['-2', '-2', '-1', '-1', '0', '0', '1', '1', '2', '2', '3', '3']}, + {'attr': 'attrintegerFirstComponentMatch', + 'positive': ['-2', '-1', '0', '1', '2', '3'], + 'negative': ['-2', '-2', '-1', '-1', '0', '0', '1', '1', '2', '2', '3', '3']}, + {'attr': 'attruniqueMemberMatch', + 'positive': ["cn=foo1,cn=bar#'0001'B", "cn=foo2,cn=bar#'0010'B", + "cn=foo3,cn=bar#'0011'B", "cn=foo4,cn=bar#'0100'B", + "cn=foo5,cn=bar#'0101'B", "cn=foo6,cn=bar#'0110'B"], + 'negative': ["cn=foo1,cn=bar#'0001'B", "cn=foo1,cn=bar#'0001'B", + "cn=foo2,cn=bar#'0010'B", "cn=foo2,cn=bar#'0010'B", + "cn=foo3,cn=bar#'0011'B", "cn=foo3,cn=bar#'0011'B", + "cn=foo4,cn=bar#'0100'B", "cn=foo4,cn=bar#'0100'B", + "cn=foo5,cn=bar#'0101'B", "cn=foo5,cn=bar#'0101'B", + "cn=foo6,cn=bar#'0110'B", "cn=foo6,cn=bar#'0110'B"]}, + {'attr': 'attrnumericStringMatch', + 'positive': ['00001', '00002', '00003', '00004', '00005', '00006'], + 'negative': ['00001', '00001', '00002', '00002', '00003', '00003', + '00004', '00004', '00005', '00005', '00006', '00006']}, + {'attr': 'attrtelephoneNumberMatch', + 'positive': ['+1 408 555 4798', '+1 408 555 5625', '+1 408 555 6201', + '+1 408 555 8585', '+1 408 555 9187', '+1 408 555 9423'], + 'negative': ['+1 408 555 4798', '+1 408 555 4798', '+1 408 555 5625', + '+1 408 555 5625', '+1 408 555 6201', '+1 408 555 6201', + '+1 408 555 8585', '+1 408 555 8585', '+1 408 555 9187', + '+1 408 555 9187', '+1 408 555 9423', '+1 408 555 9423']}, + {'attr': 'attroctetStringMatch', + 'positive': ['AAAAAAAAAAAAAAE=', 'AAAAAAAAAAAAAAI=', 'AAAAAAAAAAAAAAM=', + 'AAAAAAAAAAAAAAQ=', 'AAAAAAAAAAAAAAU=', 'AAAAAAAAAAAAAAY='], + 'negative': ['AAAAAAAAAAAAAAE=', 'AAAAAAAAAAAAAAE=', 'AAAAAAAAAAAAAAI=', + 'AAAAAAAAAAAAAAI=', 'AAAAAAAAAAAAAAM=', 'AAAAAAAAAAAAAAM=', + 'AAAAAAAAAAAAAAQ=', 'AAAAAAAAAAAAAAQ=', 'AAAAAAAAAAAAAAU=', + 'AAAAAAAAAAAAAAU=', 'AAAAAAAAAAAAAAY=', 'AAAAAAAAAAAAAAY=']}] + + +LIST_MOD_DEL_ALL = [ + {'attr': 'attrbitStringMatch', + 'positive_negative': ["'0001'B"]}, + {'attr': 'attrcaseExactIA5Match', + 'positive_negative': ['Sprain']}, + {'attr': 'attrbitStringMatch', + 'positive_negative': ["'0001'B"]}, + {'attr': 'attrcaseExactMatch', + 'positive_negative': ['ÇélIné Ändrè']}, + {'attr': 'attrgeneralizedTimeMatch', + 'positive_negative': ['20100218171300Z']}, + {'attr': 'attrbooleanMatch', + 'positive_negative': ['TRUE']}, + {'attr': 'attrcaseIgnoreIA5Match', + 'positive_negative': ['sprain1']}, + {'attr': 'attrcaseIgnoreMatch', + 'positive_negative': ['ÇélIné Ändrè1']}, + {'attr': 'attrcaseIgnoreListMatch', + 'positive_negative': ['foo1$bar']}, + {'attr': 'attrobjectIdentifierMatch', + 'positive_negative': ['1.3.6.1.4.1.1466.115.121.1.15']}, + {'attr': 'attrdirectoryStringFirstComponentMatch', + 'positive_negative': ['ÇélIné Ändrè1']}, + {'attr': 'attrintegerMatch', + 'positive_negative': ['-2']}, + {'attr': 'attrintegerFirstComponentMatch', + 'positive_negative': ['cn=foo1,cn=bar']}, + {'attr': 'attrintegerFirstComponentMatch', + 'positive_negative': ['-2']}, + {'attr': 'attruniqueMemberMatch', + 'positive_negative': ["cn=foo1,cn=bar#'0001'B"]}, + {'attr': 'attrnumericStringMatch', + 'positive_negative': ['00001']}, + {'attr': 'attrtelephoneNumberMatch', + 'positive_negative': ['+1 408 555 4798']}, + {'attr': 'attroctetStringMatch', + 'positive_negative': ['AAAAAAAAAAAAAAE=']}] + + +@pytest.fixture(scope="module") +def _create_index_entry(topology_st): + """Create index entries. + + :id: 9c93aec8-b87d-11e9-93b0-8c16451d917b + :setup: Standalone + :steps: + 1. Test index entries can be created. + :expectedresults: + 1. Pass + """ + indexes = Indexes(topology_st.standalone) + for cn_cn, index_type in LIST_CN_INDEX: + indexes.create(properties={ + 'cn': cn_cn, + 'nsSystemIndex': 'true', + 'nsIndexType': index_type + }) + + +@pytest.mark.parametrize("index", LIST_ATTR_INDEX) +def test_valid_invalid_attributes(topology_st, _create_index_entry, index): + """Test valid and invalid values of attributes + + :id: 93dc9e02-b87d-11e9-b39b-8c16451d917b + :parametrized: yes + :setup: Standalone + :steps: + 1. Create entry with an attribute that uses that matching rule + 2. Delete existing entry + 3. Create entry with an attribute that uses that matching rule providing duplicate + values that are duplicates according to the equality matching rule. + :expectedresults: + 1. Pass + 2. Pass + 3. Fail(ldap.TYPE_OR_VALUE_EXISTS) + """ + cos = CosTemplates(topology_st.standalone, DEFAULT_SUFFIX) + # Entry with extensibleObject + entry = cos.create(properties={'cn': 'addentry' + index['attr'], + index['attr']: index['positive']}) + entry.delete() + with pytest.raises(ldap.TYPE_OR_VALUE_EXISTS): + cos.create(properties={'cn': 'addentry' + index['attr'].split('attr')[1], + index['attr']: index['negative']}) + + +@pytest.mark.parametrize("mod", LIST_MOD_ATTR_ALL) +def test_mods(topology_st, _create_index_entry, mod): + """Test valid and invalid values of attributes mods + + :id: 8c15874c-b87d-11e9-9c5d-8c16451d917b + :parametrized: yes + :setup: Standalone + :steps: + 1. Create entry with an attribute that uses matching mod + 2. Add an attribute that uses that matching mod providing duplicate + values that are duplicates according to the equality matching. + 3. Delete existing entry + :expectedresults: + 1. Pass + 2. Fail(ldap.TYPE_OR_VALUE_EXISTS) + 3. Pass + """ + cos = CosTemplates(topology_st.standalone, DEFAULT_SUFFIX) + # Entry with extensibleObject + entry = cos.create(properties={'cn': 'addentry'+mod['attr'], + mod['attr']: mod['positive']}) + with pytest.raises(ldap.TYPE_OR_VALUE_EXISTS): + entry.add(mod['attr'], mod['negative']) + entry.delete() + + +@pytest.mark.parametrize("mode", LIST_MOD_REPLACE_ALL) +def test_mods_replace(topology_st, _create_index_entry, mode): + """Test mods replace + + :id: 2dd46b7a-b928-11e9-91dd-8c16451d917b + :parametrized: yes + :setup: Standalone + :steps: + 1. Create entry with an attribute that uses matching mode + 2. Add an attribute that uses that matching mode providing duplicate + values that are duplicates according to the equality matching. + 3. Delete existing entry + :expectedresults: + 1. Pass + 2. Fail(ldap.TYPE_OR_VALUE_EXISTS) + 3. Pass + """ + cos = CosTemplates(topology_st.standalone, DEFAULT_SUFFIX) + # Entry with extensibleObject + entry = cos.create(properties={'cn': 'addentry'+mode['attr'], + mode['attr']: mode['positive']}) + with pytest.raises(ldap.TYPE_OR_VALUE_EXISTS): + entry.replace(mode['attr'], mode['negative']) + entry.delete() + + +@pytest.mark.parametrize("mode", LIST_MOD_DEL_ALL) +def test_mods_delete(topology_st, _create_index_entry, mode): + """Test mods delete + + :id: 1dda055e-b928-11e9-b5c1-8c16451d917b + :parametrized: yes + :setup: Standalone + :steps: + 1. Create entry with an attribute that uses matching mode + 2. Add an attribute that uses that matching mode providing duplicate + values that are duplicates according to the equality matching. + 3. Delete existing entry + :expectedresults: + 1. Pass + 2. Fail(ldap.NO_SUCH_ATTRIBUTE) + 3. Pass + """ + cos = CosTemplates(topology_st.standalone, DEFAULT_SUFFIX) + # Entry with extensibleObject + entry = cos.create(properties={'cn': 'addentry'+mode['attr'], + mode['attr']: mode['positive_negative']}) + entry.remove(mode['attr'], mode['positive_negative'][0]) + with pytest.raises(ldap.NO_SUCH_ATTRIBUTE): + entry.remove(mode['attr'], mode['positive_negative'][0]) + entry.delete() + + +ATTR = ["( 2.16.840.1.113730.3.1.999999.0 NAME 'attroctetStringMatch' " + "DESC 'for testing matching rules' EQUALITY octetStringMatch " + "ORDERING octetStringOrderingMatch SYNTAX 1.3.6.1.4.1.1466.115.121.1.40 " + "X-ORIGIN 'matching rule tests' )", + "( 2.16.840.1.113730.3.1.999999.1 NAME 'attrbitStringMatch' DESC " + "'for testing matching rules' EQUALITY bitStringMatch SYNTAX " + "1.3.6.1.4.1.1466.115.121.1.6 X-ORIGIN 'matching rule tests' )", + "( 2.16.840.1.113730.3.1.999999.2 NAME 'attrcaseExactIA5Match' " + "DESC 'for testing matching rules' EQUALITY caseExactIA5Match " + "SUBSTR caseExactIA5SubstringsMatch SYNTAX 1.3.6.1.4.1.1466.115.121.1.26 " + "X-ORIGIN 'matching rule tests' )", + "( 2.16.840.1.113730.3.1.999999.3 NAME 'attrcaseExactMatch' DESC " + "'for testing matching rules' EQUALITY caseExactMatch ORDERING " + "caseExactOrderingMatch SUBSTR caseExactSubstringsMatch SYNTAX " + "1.3.6.1.4.1.1466.115.121.1.15 " + "X-ORIGIN 'matching rule tests' )", + "( 2.16.840.1.113730.3.1.999999.4 NAME 'attrgeneralizedTimeMatch' DESC " + "'for testing matching rules' EQUALITY generalizedTimeMatch ORDERING " + "generalizedTimeOrderingMatch SYNTAX 1.3.6.1.4.1.1466.115.121.1.24 " + "X-ORIGIN 'matching rule tests' )", + "( 2.16.840.1.113730.3.1.999999.5 NAME 'attrbooleanMatch' DESC " + "'for testing matching rules' EQUALITY booleanMatch SYNTAX " + "1.3.6.1.4.1.1466.115.121.1.7 X-ORIGIN 'matching rule tests' )", + "( 2.16.840.1.113730.3.1.999999.6 NAME 'attrcaseIgnoreIA5Match' DESC " + "'for testing matching rules' EQUALITY caseIgnoreIA5Match SUBSTR " + "caseIgnoreIA5SubstringsMatch SYNTAX 1.3.6.1.4.1.1466.115.121.1.26 " + "X-ORIGIN 'matching rule tests' )", + "( 2.16.840.1.113730.3.1.999999.7 NAME 'attrcaseIgnoreMatch' DESC " + "'for testing matching rules' EQUALITY caseIgnoreMatch ORDERING " + "caseIgnoreOrderingMatch SUBSTR caseIgnoreSubstringsMatch " + "SYNTAX 1.3.6.1.4.1.1466.115.121.1.15 X-ORIGIN 'matching rule tests' )", + "( 2.16.840.1.113730.3.1.999999.8 NAME 'attrcaseIgnoreListMatch' DESC " + "'for testing matching rules' EQUALITY caseIgnoreListMatch SUBSTR " + "caseIgnoreListSubstringsMatch SYNTAX 1.3.6.1.4.1.1466.115.121.1.41 " + "X-ORIGIN 'matching rule tests' )", + "( 2.16.840.1.113730.3.1.999999.9 NAME 'attrobjectIdentifierMatch' DESC " + "'for testing matching rules' EQUALITY objectIdentifierMatch SYNTAX " + "1.3.6.1.4.1.1466.115.121.1.38 X-ORIGIN 'matching rule tests' )", + "( 2.16.840.1.113730.3.1.999999.10 NAME 'attrdistinguishedNameMatch' DESC " + "'for testing matching rules' EQUALITY distinguishedNameMatch SYNTAX " + "1.3.6.1.4.1.1466.115.121.1.12 X-ORIGIN 'matching rule tests' )", + "( 2.16.840.1.113730.3.1.999999.11 NAME 'attrintegerMatch' DESC " + "'for testing matching rules' EQUALITY integerMatch ORDERING " + "integerOrderingMatch SYNTAX 1.3.6.1.4.1.1466.115.121.1.27 " + "X-ORIGIN 'matching rule tests' )", + "( 2.16.840.1.113730.3.1.999999.12 NAME 'attruniqueMemberMatch' DESC " + "'for testing matching rules' EQUALITY uniqueMemberMatch SYNTAX " + "1.3.6.1.4.1.1466.115.121.1.34 X-ORIGIN 'matching rule tests' )", + "( 2.16.840.1.113730.3.1.999999.13 NAME 'attrnumericStringMatch' DESC " + "'for testing matching rules' EQUALITY numericStringMatch ORDERING " + "numericStringOrderingMatch SUBSTR numericStringSubstringsMatch " + "SYNTAX 1.3.6.1.4.1.1466.115.121.1.36 X-ORIGIN 'matching rule tests' )", + "( 2.16.840.1.113730.3.1.999999.14 NAME 'attrtelephoneNumberMatch' DESC " + "'for testing matching rules' EQUALITY telephoneNumberMatch SUBSTR " + "telephoneNumberSubstringsMatch SYNTAX 1.3.6.1.4.1.1466.115.121.1.50 " + "X-ORIGIN 'matching rule tests' )", + "( 2.16.840.1.113730.3.1.999999.15 NAME 'attrdirectoryStringFirstComponentMatch' " + "DESC 'for testing matching rules' EQUALITY directoryStringFirstComponentMatch " + "SYNTAX 1.3.6.1.4.1.1466.115.121.1.15 X-ORIGIN 'matching rule tests' )", + "( 2.16.840.1.113730.3.1.999999.16 NAME 'attrobjectIdentifierFirstComponentMatch' " + "DESC 'for testing matching rules' EQUALITY objectIdentifierFirstComponentMatch " + "SYNTAX 1.3.6.1.4.1.1466.115.121.1.38 X-ORIGIN 'matching rule tests' )", + "( 2.16.840.1.113730.3.1.999999.17 NAME 'attrintegerFirstComponentMatch' " + "DESC 'for testing matching rules' EQUALITY integerFirstComponentMatch SYNTAX " + "1.3.6.1.4.1.1466.115.121.1.27 X-ORIGIN 'matching rule tests' )"] + + +LIST_ATTR_TO_CREATE = [ + ('entryoctetStringMatch0', 'AAAAAAAAAAAAAAE='), + ('entryoctetStringMatch1', 'AAAAAAAAAAAAAAI='), + ('entryoctetStringMatch2', 'AAAAAAAAAAAAAAM='), + ('entryoctetStringMatch3', 'AAAAAAAAAAAAAAQ='), + ('entryoctetStringMatch4', 'AAAAAAAAAAAAAAU='), + ('entryoctetStringMatch5', 'AAAAAAAAAAAAAAY='), + ('entrybitStringMatch0', "'0001'B"), + ('entrybitStringMatch1', "'0010'B"), + ('entrybitStringMatch2', "'0011'B"), + ('entrybitStringMatch3', "'0100'B"), + ('entrybitStringMatch4', "'0101'B"), + ('entrybitStringMatch5', "'0110'B"), + ('entrycaseExactIA5Match0', "Sprain"), + ('entrycaseExactIA5Match1', "sPrain"), + ('entrycaseExactIA5Match2', "spRain"), + ('entrycaseExactIA5Match3', "sprAin"), + ('entrycaseExactIA5Match4', "spraIn"), + ('entrycaseExactIA5Match5', "sprain"), + ('entrycaseExactMatch0', "ÇélIné Ändrè"), + ('entrycaseExactMatch1', "ÇéliNé Ändrè"), + ('entrycaseExactMatch2', "Çéliné ÄndrÈ"), + ('entrycaseExactMatch3', "Çéliné Ändrè"), + ('entrycaseExactMatch4', "çÉliné Ändrè"), + ('entrygeneralizedTimeMatch0', "20100218171300Z"), + ('entrygeneralizedTimeMatch1', "20100218171301Z"), + ('entrygeneralizedTimeMatch2', "20100218171302Z"), + ('entrygeneralizedTimeMatch3', "20100218171303Z"), + ('entrygeneralizedTimeMatch4', "20100218171304Z"), + ('entrygeneralizedTimeMatch5', "20100218171305Z"), + ('entrybooleanMatch0', "TRUE"), + ('entrybooleanMatch1', "FALSE"), + ('entrycaseIgnoreIA5Match0', "sprain1"), + ('entrycaseIgnoreIA5Match1', "sprain2"), + ('entrycaseIgnoreIA5Match2', "sprain3"), + ('entrycaseIgnoreIA5Match3', "sprain4"), + ('entrycaseIgnoreIA5Match4', "sprain5"), + ('entrycaseIgnoreIA5Match5', "sprain6"), + ('entrycaseIgnoreMatch0', "ÇélIné Ändrè1"), + ('entrycaseIgnoreMatch1', "ÇélIné Ändrè2"), + ('entrycaseIgnoreMatch2', "ÇélIné Ändrè3"), + ('entrycaseIgnoreMatch3', "ÇélIné Ändrè4"), + ('entrycaseIgnoreMatch4', "ÇélIné Ändrè5"), + ('entrycaseIgnoreMatch5', "ÇélIné Ändrè6"), + ('entrycaseIgnoreListMatch0', "foo1$bar"), + ('entrycaseIgnoreListMatch1', "foo2$bar"), + ('entrycaseIgnoreListMatch2', "foo3$bar"), + ('entrycaseIgnoreListMatch3', "foo4$bar"), + ('entrycaseIgnoreListMatch4', "foo5$bar"), + ('entrycaseIgnoreListMatch5', "foo6$bar"), + ('entryobjectIdentifierMatch0', "1.3.6.1.4.1.1466.115.121.1.15"), + ('entryobjectIdentifierMatch1', "1.3.6.1.4.1.1466.115.121.1.24"), + ('entryobjectIdentifierMatch2', "1.3.6.1.4.1.1466.115.121.1.26"), + ('entryobjectIdentifierMatch3', "1.3.6.1.4.1.1466.115.121.1.40"), + ('entryobjectIdentifierMatch4', "1.3.6.1.4.1.1466.115.121.1.41"), + ('entryobjectIdentifierMatch5', "1.3.6.1.4.1.1466.115.121.1.6"), + ('entrydistinguishedNameMatch0', "cn=foo1,cn=bar"), + ('entrydistinguishedNameMatch1', "cn=foo2,cn=bar"), + ('entrydistinguishedNameMatch2', "cn=foo3,cn=bar"), + ('entrydistinguishedNameMatch3', "cn=foo4,cn=bar"), + ('entrydistinguishedNameMatch4', "cn=foo5,cn=bar"), + ('entrydistinguishedNameMatch5', "cn=foo6,cn=bar"), + ('entryintegerMatch0', "-2"), + ('entryintegerMatch1', "-1"), + ('entryintegerMatch2', "0"), + ('entryintegerMatch3', "1"), + ('entryintegerMatch4', "2"), + ('entryintegerMatch5', "3"), + ('entryuniqueMemberMatch0', "cn=foo1,cn=bar#'0001'B"), + ('entryuniqueMemberMatch1', "cn=foo2,cn=bar#'0010'B"), + ('entryuniqueMemberMatch2', "cn=foo3,cn=bar#'0011'B"), + ('entryuniqueMemberMatch3', "cn=foo4,cn=bar#'0100'B"), + ('entryuniqueMemberMatch4', "cn=foo5,cn=bar#'0101'B"), + ('entryuniqueMemberMatch5', "cn=foo6,cn=bar#'0110'B"), + ('entrynumericStringMatch0', "00001"), + ('entrynumericStringMatch1', "00002"), + ('entrynumericStringMatch2', "00003"), + ('entrynumericStringMatch3', "00004"), + ('entrynumericStringMatch4', "00005"), + ('entrynumericStringMatch5', "00006"), + ('entrytelephoneNumberMatch0', "+1 408 555 4798"), + ('entrytelephoneNumberMatch1', "+1 408 555 5625"), + ('entrytelephoneNumberMatch2', "+1 408 555 6201"), + ('entrytelephoneNumberMatch3', "+1 408 555 8585"), + ('entrytelephoneNumberMatch4', "+1 408 555 9187"), + ('entrytelephoneNumberMatch5', "+1 408 555 9423"), + ('entrydirectoryStringFirstComponentMatch0', "ÇélIné Ändrè1"), + ('entrydirectoryStringFirstComponentMatch1', "ÇélIné Ändrè2"), + ('entrydirectoryStringFirstComponentMatch2', "ÇélIné Ändrè3"), + ('entrydirectoryStringFirstComponentMatch3', "ÇélIné Ändrè4"), + ('entrydirectoryStringFirstComponentMatch4', "ÇélIné Ändrè5"), + ('entrydirectoryStringFirstComponentMatch5', "ÇélIné Ändrè6"), + ('entryobjectIdentifierFirstComponentMatch0', "1.3.6.1.4.1.1466.115.121.1.15"), + ('entryobjectIdentifierFirstComponentMatch1', "1.3.6.1.4.1.1466.115.121.1.24"), + ('entryobjectIdentifierFirstComponentMatch2', "1.3.6.1.4.1.1466.115.121.1.26"), + ('entryobjectIdentifierFirstComponentMatch3', "1.3.6.1.4.1.1466.115.121.1.40"), + ('entryobjectIdentifierFirstComponentMatch4', "1.3.6.1.4.1.1466.115.121.1.41"), + ('entryobjectIdentifierFirstComponentMatch5', "1.3.6.1.4.1.1466.115.121.1.6"), + ('entryintegerFirstComponentMatch0', "-2"), + ('entryintegerFirstComponentMatch1', "-1"), + ('entryintegerFirstComponentMatch2', "0"), + ('entryintegerFirstComponentMatch3', "1"), + ('entryintegerFirstComponentMatch4', "2"), + ('entryintegerFirstComponentMatch5', "3")] + + +@pytest.fixture(scope="module") +def _create_entries(topology_st): + """ + Add attribute types to schema and Create filter + entries(Entry with extensibleObject) + """ + for attribute in ATTR: + Schema(topology_st.standalone).add('attributetypes', attribute) + cos = CosTemplates(topology_st.standalone, DEFAULT_SUFFIX) + # Entry with extensibleObject + for attr, value in LIST_ATTR_TO_CREATE: + cos.create(properties={ + 'cn': attr, + 'attr' + attr.split('entry')[1][:-1]: value + }) + + +FILTER_VALUES = [ + ["(attrbitStringMatch='0001'B)", 1, + "(attrbitStringMatch:bitStringMatch:='000100000'B)"], + ["(attrgeneralizedTimeMatch=20100218171300Z)", 1, + "(attrcaseExactIA5Match=SPRAIN)"], + ["(attrcaseExactMatch>=ÇélIné Ändrè)", 5, + "(attrcaseExactMatch=ÇéLINé ÄNDRè)"], + ["(attrcaseExactMatch:caseExactMatch:=ÇélIné Ändrè)", 1, + "(attrcaseExactMatch>=çéliné ändrè)"], + ["(attrcaseExactIA5Match=Sprain)", 1, + "(attrgeneralizedTimeMatch=20300218171300Z)"], + ["(attrbooleanMatch=TRUE)", 1, + "(attrgeneralizedTimeMatch>=20300218171300Z)"], + ["(attrcaseIgnoreIA5Match=sprain1)", 1, + "(attrcaseIgnoreIA5Match=sprain9999)"], + ["(attrcaseIgnoreMatch=ÇélIné Ändrè1)", 1, + "(attrcaseIgnoreMatch=ÇélIné Ändrè9999)"], + ["(attrcaseIgnoreMatch>=ÇélIné Ändrè1)", 6, + "(attrcaseIgnoreMatch>=ÇélIné Ändrè9999)"], + ["(attrcaseIgnoreListMatch=foo1$bar)", 1, + "(attrcaseIgnoreListMatch=foo1$bar$baz$biff)"], + ["(attrobjectIdentifierMatch=1.3.6.1.4.1.1466.115.121.1.15)", 1, + "(attrobjectIdentifierMatch=1.3.6.1.4.1.1466.115.121.1.15.99999)"], + ["(attrgeneralizedTimeMatch>=20100218171300Z)", 6, + "(attrdirectoryStringFirstComponentMatch=ÇélIné Ändrè9999)"], + ["(attrdirectoryStringFirstComponentMatch=ÇélIné Ändrè1)", 1, + "(attrobjectIdentifierFirstComponentMatch=1.3.6.1.4.1.1466.115.121.1.15.99999)"], + ["(attrobjectIdentifierFirstComponentMatch=1.3.6.1.4.1.1466.115.121.1.15)", 1, + "(attrdistinguishedNameMatch=cn=foo1,cn=bar,cn=baz)"], + ["(attrdistinguishedNameMatch=cn=foo1,cn=bar)", 1, + "(attrintegerMatch=-20)"], + ["(attrintegerMatch=-2)", 1, + "(attrintegerMatch>=20)"], + ["(attrintegerMatch>=-2)", 6, + "(attrintegerFirstComponentMatch=-20)"], + ["(attrintegerFirstComponentMatch=-2)", 1, + "(attruniqueMemberMatch=cn=foo1,cn=bar#'00010000'B)"], + ["(attruniqueMemberMatch=cn=foo1,cn=bar#'0001'B)", 1, + "(attrnumericStringMatch=000000001)"], + ["(attrnumericStringMatch=00001)", 1, + "(attrnumericStringMatch>=01)"], + ["(attrnumericStringMatch>=00001)", 6, + "(attrtelephoneNumberMatch=+2 408 555 4798)"], + ["(attrtelephoneNumberMatch=+1 408 555 4798)", 1, + "(attroctetStringMatch=AAAAAAAAAAAAAAEB)"], + ["(attroctetStringMatch=AAAAAAAAAAAAAAE=)", 1, + "(attroctetStringMatch>=AAAAAAAAAAABAQE=)"], + ["(attroctetStringMatch>=AAAAAAAAAAAAAAE=)", 6, + "(attrdirectoryStringFirstComponentMatch=ÇélIné Ändrè9998)"]] + + +def test_search_positive_negative(topology_st, _create_entries): + """Filters with positive and with no output. + + :id: abe3e6dd-9ecc-12e8-adf0-8c16451d917b + :parametrized: yes + :setup: Standalone + :steps: + 1.For valid filer output should match the exact value given. + 2. For invalid filter there should not be any output. + :expectedresults: + 1. Pass + 2. Pass + """ + cos = CosTemplates(topology_st.standalone, DEFAULT_SUFFIX) + for attr, value, negative_filter in FILTER_VALUES: + assert len(cos.filter(attr)) == value + assert not cos.filter(negative_filter) + + +LIST_EXT_ATTR_COUNT = [ + ("(attrbitStringMatch:bitStringMatch:='0001'B)", 1), + ("(attrcaseExactIA5Match:caseExactIA5Match:=Sprain)", 1), + ("(attrcaseExactMatch:caseExactMatch:=ÇélIné Ändrè)", 1), + ("(attrcaseExactMatch:caseExactOrderingMatch:=ÇélIné Ändrè)", 5), + ("(attrgeneralizedTimeMatch:generalizedTimeMatch:=20100218171300Z)", 1), + ("(attrgeneralizedTimeMatch:generalizedTimeOrderingMatch:=20100218171300Z)", 6), + ("(attrbooleanMatch:booleanMatch:=TRUE)", 1), + ("(attrcaseIgnoreIA5Match:caseIgnoreIA5Match:=sprain1)", 1), + ("(attrcaseIgnoreMatch:caseIgnoreMatch:=ÇélIné Ändrè1)", 1), + ("(attrcaseIgnoreMatch:caseIgnoreOrderingMatch:=ÇélIné Ändrè1)", 6), + ("(attrcaseIgnoreListMatch:caseIgnoreListMatch:=foo1$bar)", 1), + ("(attrobjectIdentifierMatch:objectIdentifierMatch:=1.3.6.1.4.1.1466.115.121.1.15)", 1), + ("(attrdirectoryStringFirstComponentMatch:directoryString" + "FirstComponentMatch:=ÇélIné Ändrè1)", 1), + ("(attrobjectIdentifierFirstComponentMatch:objectIdentifier" + "FirstComponentMatch:=1.3.6.1.4.1.1466.115.121.1.15)", 1), + ("(attrdistinguishedNameMatch:distinguishedNameMatch:=cn=foo1,cn=bar)", 1), + ("(attrintegerMatch:integerMatch:=-2)", 1), + ("(attrintegerMatch:integerOrderingMatch:=-2)", 6), + ("(attrintegerFirstComponentMatch:integerFirstComponentMatch:=-2)", 1), + ("(attruniqueMemberMatch:uniqueMemberMatch:=cn=foo1,cn=bar#'0001'B)", 1), + ("(attrnumericStringMatch:numericStringMatch:=00001)", 1), + ("(attrnumericStringMatch:numericStringMatch:=00001)", 1), + ("(attrtelephoneNumberMatch:telephoneNumberMatch:=+1 408 555 4798)", 1), + ("(attroctetStringMatch:octetStringMatch:=AAAAAAAAAAAAAAE=)", 1), + ("(attroctetStringMatch:octetStringOrderingMatch:=AAAAAAAAAAAAAAE=)", 6), + ("(attrcaseExactMatch=*ÇélIné Ändrè*)", 1), + ("(attrcaseExactMatch=ÇélIné Ändrè*)", 1), + ("(attrcaseExactMatch=*ÇélIné Ändrè)", 1), + ("(attrcaseExactMatch=*é Ä*)", 5), + ("(attrcaseExactIA5Match=*Sprain*)", 1), + ("(attrcaseExactIA5Match=Sprain*)", 1), + ("(attrcaseExactIA5Match=*Sprain)", 1), + ("(attrcaseExactIA5Match=*rai*)", 3), + ("(attrcaseIgnoreIA5Match=*sprain1*)", 1), + ("(attrcaseIgnoreIA5Match=sprain1*)", 1), + ("(attrcaseIgnoreIA5Match=*sprain1)", 1), + ("(attrcaseIgnoreIA5Match=*rai*)", 6), + ("(attrcaseIgnoreMatch=*ÇélIné Ändrè1*)", 1), + ("(attrcaseIgnoreMatch=ÇélIné Ändrè1*)", 1), + ("(attrcaseIgnoreMatch=*ÇélIné Ändrè1)", 1), + ("(attrcaseIgnoreMatch=*é Ä*)", 6), + ("(attrcaseIgnoreListMatch=*foo1$bar*)", 1), + ("(attrcaseIgnoreListMatch=foo1$bar*)", 1), + ("(attrcaseIgnoreListMatch=*foo1$bar)", 1), + ("(attrcaseIgnoreListMatch=*1$b*)", 1), + ("(attrnumericStringMatch=*00001*)", 1), + ("(attrnumericStringMatch=00001*)", 1), + ("(attrnumericStringMatch=*00001)", 1), + ("(attrnumericStringMatch=*000*)", 6), + ("(attrtelephoneNumberMatch=*+1 408 555 4798*)", 1), + ("(attrtelephoneNumberMatch=+1 408 555 4798*)", 1), + ("(attrtelephoneNumberMatch=*+1 408 555 4798)", 1), + ("(attrtelephoneNumberMatch=* 55*)", 6)] + + +@pytest.mark.parametrize("attr, value", LIST_EXT_ATTR_COUNT) +def test_do_extensible_search(topology_st, _create_entries, attr, value): + """Match filter and output. + + :id: 50dd45c4-061f-43ce-843c-19c44da1e9b8 + :parametrized: yes + :setup: Standalone + :steps: + 1. Filer output should match the exact value given. + :expectedresults: + 1. Pass + """ + cos = CosTemplates(topology_st.standalone, DEFAULT_SUFFIX) + assert len(cos.filter(attr)) == value + + +if __name__ == '__main__': + CURRENT_FILE = os.path.realpath(__file__) + pytest.main("-s -v %s" % CURRENT_FILE) diff --git a/dirsrvtests/tests/suites/filter/filter_indexing_test.py b/dirsrvtests/tests/suites/filter/filter_indexing_test.py new file mode 100644 index 0000000..9fba289 --- /dev/null +++ b/dirsrvtests/tests/suites/filter/filter_indexing_test.py @@ -0,0 +1,170 @@ +# --- BEGIN COPYRIGHT BLOCK --- +# Copyright (C) 2019 Red Hat, Inc. +# All rights reserved. +# +# License: GPL (version 3 or any later version). +# See LICENSE for details. +# --- END COPYRIGHT BLOCK ---- + + +""" +verify and testing indexing Filter from a search +""" + +import os +import pytest + +from lib389._constants import DEFAULT_SUFFIX, PW_DM +from lib389.topologies import topology_st as topo +from lib389.idm.user import UserAccounts +from lib389.idm.account import Accounts +from lib389.cos import CosTemplates +from lib389.schema import Schema + +pytestmark = pytest.mark.tier1 + + +FILTERS = ["(|(|(ou=nothing1)(ou=people))(|(ou=nothing2)(ou=nothing3)))", + "(|(|(ou=people)(ou=nothing1))(|(ou=nothing2)(ou=nothing3)))", + "(|(|(ou=nothing1)(ou=nothing2))(|(ou=people)(ou=nothing3)))", + "(|(|(ou=nothing1)(ou=nothing2))(|(ou=nothing3)(ou=people)))", + "(&(sn<=0000000000000000)(givenname>=FFFFFFFFFFFFFFFF))", + "(&(sn>=0000000000000000)(sn<=1111111111111111))", + "(&(sn>=0000000000000000)(givenname<=FFFFFFFFFFFFFFFF))"] + +INDEXES = ["(uidNumber=18446744073709551617)", + "(gidNumber=18446744073709551617)", + "(MYINTATTR=18446744073709551617)", + "(&(uidNumber=*)(!(uidNumber=18446744073709551617)))", + "(&(gidNumber=*)(!(gidNumber=18446744073709551617)))", + "(&(uidNumber=*)(!(gidNumber=18446744073709551617)))", + "(&(myintattr=*)(!(myintattr=18446744073709551617)))", + "(uidNumber>=-18446744073709551617)", + "(gidNumber>=-18446744073709551617)", + "(uidNumber<=18446744073709551617)", + "(gidNumber<=18446744073709551617)", + "(myintattr<=18446744073709551617)"] + + +INDEXES_FALSE = ["(gidNumber=54321)", + "(uidNumber=54321)", + "(myintattr=54321)", + "(gidNumber<=-999999999999999999999999999999)", + "(uidNumber<=-999999999999999999999999999999)", + "(myintattr<=-999999999999999999999999999999)", + "(gidNumber>=999999999999999999999999999999)", + "(uidNumber>=999999999999999999999999999999)", + "(myintattr>=999999999999999999999999999999)"] + + +@pytest.fixture(scope="module") +def _create_entries(topo): + """ + Will create necessary users for this script. + """ + # Creating Users + users_people = UserAccounts(topo.standalone, DEFAULT_SUFFIX) + + for count in range(3): + users_people.create(properties={ + 'ou': ['Accounting', 'People'], + 'cn': f'User {count}F', + 'sn': f'{count}' * 16, + 'givenname': 'FFFFFFFFFFFFFFFF', + 'uid': f'user{count}F', + 'mail': f'user{count}F@test.com', + 'manager': f'uid=user{count}F,ou=People,{DEFAULT_SUFFIX}', + 'userpassword': PW_DM, + 'homeDirectory': '/home/' + f'user{count}F', + 'uidNumber': '1000', + 'gidNumber': '2000', + }) + + cos = CosTemplates(topo.standalone, DEFAULT_SUFFIX, rdn='ou=People') + for user, number, des in [('a', '18446744073709551617', '2^64+1'), + ('b', '18446744073709551618', '2^64+1'), + ('c', '-18446744073709551617', '-2^64+1'), + ('d', '-18446744073709551618', '-2^64+1'), + ('e', '0', '0'), + ('f', '2', '2'), + ('g', '-2', '-2')]: + cos.create(properties={ + 'cn': user, + 'uidnumber': number, + 'gidnumber': number, + 'myintattr': number, + 'description': f'uidnumber value {des} - gidnumber is same but not indexed' + }) + + +@pytest.mark.parametrize("real_value", FILTERS) +def test_positive(topo, _create_entries, real_value): + """Test positive filters + + :id: 57243326-91ae-11e9-aca3-8c16451d917b + :parametrized: yes + :setup: Standalone + :steps: + 1. Try to pass filter rules as per the condition . + :expectedresults: + 1. Pass + """ + assert Accounts(topo.standalone, DEFAULT_SUFFIX).filter(real_value) + + +def test_indexing_schema(topo, _create_entries): + """Test with schema + + :id: 67a2179a-91ae-11e9-9a33-8c16451d917b + :setup: Standalone + :steps: + 1. Add attribute types to Schema. + 2. Try to pass filter rules as per the condition . + :expectedresults: + 1. Pass + 2. Pass + """ + cos = CosTemplates(topo.standalone, DEFAULT_SUFFIX, rdn='ou=People') + Schema(topo.standalone).add('attributetypes', + "( 8.9.10.11.12.13.14.15 NAME 'myintattr' DESC 'for integer " + "syntax index ordering testing' EQUALITY integerMatch ORDERING " + "integerOrderingMatch SYNTAX 1.3.6.1.4.1.1466.115.121.1.27 )") + topo.standalone.restart() + assert cos.filter("(myintattr>=-18446744073709551617)") + + +@pytest.mark.parametrize("real_value", INDEXES) +def test_indexing(topo, _create_entries, real_value): + """Test positive index filters + + :id: 7337589a-91ae-11e9-ad44-8c16451d917b + :parametrized: yes + :setup: Standalone + :steps: + 1. Try to pass filter rules as per the condition . + :expectedresults: + 1. Pass + """ + cos = CosTemplates(topo.standalone, DEFAULT_SUFFIX, rdn='ou=People') + assert cos.filter(real_value) + + +@pytest.mark.parametrize("real_value", INDEXES_FALSE) +def test_indexing_negative(topo, _create_entries, real_value): + """Test negative index filters + + :id: 7e19deae-91ae-11e9-900c-8c16451d917b + :parametrized: yes + :setup: Standalone + :steps: + 1. Try to pass negative filter rules as per the condition . + :expectedresults: + 1. Fail + """ + cos = CosTemplates(topo.standalone, DEFAULT_SUFFIX, rdn='ou=People') + assert not cos.filter(real_value) + + +if __name__ == '__main__': + CURRENT_FILE = os.path.realpath(__file__) + pytest.main("-s -v %s" % CURRENT_FILE) diff --git a/dirsrvtests/tests/suites/filter/filter_logic_test.py b/dirsrvtests/tests/suites/filter/filter_logic_test.py new file mode 100644 index 0000000..7995336 --- /dev/null +++ b/dirsrvtests/tests/suites/filter/filter_logic_test.py @@ -0,0 +1,451 @@ +# --- BEGIN COPYRIGHT BLOCK --- +# Copyright (C) 2020 Red Hat, Inc. +# All rights reserved. +# +# License: GPL (version 3 or any later version). +# See LICENSE for details. +# --- END COPYRIGHT BLOCK --- +# + +import pytest +import ldap + +from lib389.topologies import topology_st +from lib389._constants import DEFAULT_SUFFIX + +from lib389.idm.user import UserAccount, UserAccounts + +pytestmark = pytest.mark.tier1 + +""" +This test case asserts that various logical filters apply correctly and as expected. +This is to assert that we have correct and working search operations, especially related +to indexed content from filterindex.c and idl_sets. + +important to note, some tests check greater than 10 elements to assert that k-way intersect +works, where as most of these actually hit the filtertest threshold so they early return. +""" + +USER0_DN = 'uid=user0,ou=people,%s' % DEFAULT_SUFFIX +USER1_DN = 'uid=user1,ou=people,%s' % DEFAULT_SUFFIX +USER2_DN = 'uid=user2,ou=people,%s' % DEFAULT_SUFFIX +USER3_DN = 'uid=user3,ou=people,%s' % DEFAULT_SUFFIX +USER4_DN = 'uid=user4,ou=people,%s' % DEFAULT_SUFFIX +USER5_DN = 'uid=user5,ou=people,%s' % DEFAULT_SUFFIX +USER6_DN = 'uid=user6,ou=people,%s' % DEFAULT_SUFFIX +USER7_DN = 'uid=user7,ou=people,%s' % DEFAULT_SUFFIX +USER8_DN = 'uid=user8,ou=people,%s' % DEFAULT_SUFFIX +USER9_DN = 'uid=user9,ou=people,%s' % DEFAULT_SUFFIX +USER10_DN = 'uid=user10,ou=people,%s' % DEFAULT_SUFFIX +USER11_DN = 'uid=user11,ou=people,%s' % DEFAULT_SUFFIX +USER12_DN = 'uid=user12,ou=people,%s' % DEFAULT_SUFFIX +USER13_DN = 'uid=user13,ou=people,%s' % DEFAULT_SUFFIX +USER14_DN = 'uid=user14,ou=people,%s' % DEFAULT_SUFFIX +USER15_DN = 'uid=user15,ou=people,%s' % DEFAULT_SUFFIX +USER16_DN = 'uid=user16,ou=people,%s' % DEFAULT_SUFFIX +USER17_DN = 'uid=user17,ou=people,%s' % DEFAULT_SUFFIX +USER18_DN = 'uid=user18,ou=people,%s' % DEFAULT_SUFFIX +USER19_DN = 'uid=user19,ou=people,%s' % DEFAULT_SUFFIX + +@pytest.fixture(scope="module") +def topology_st_f(topology_st): + # Add our users to the topology_st + users = UserAccounts(topology_st.standalone, DEFAULT_SUFFIX) + for i in range(0, 20): + users.create(properties={ + 'uid': 'user%s' % i, + 'cn': 'user%s' % i, + 'sn': '%s' % i, + 'uidNumber': '%s' % i, + 'gidNumber': '%s' % i, + 'homeDirectory': '/home/user%s' % i + }) + + + demo_user = UserAccount(topology_st.standalone, "uid=demo_user,ou=people,dc=example,dc=com") + demo_user.delete() + # return it + # print("ATTACH NOW") + # import time + # time.sleep(30) + return topology_st.standalone + +def _check_filter(topology_st_f, filt, expect_len, expect_dns): + # print("checking %s" % filt) + results = topology_st_f.search_s("ou=people,%s" % DEFAULT_SUFFIX, ldap.SCOPE_ONELEVEL, filt, ['uid',]) + assert len(results) == expect_len + result_dns = [result.dn.lower() for result in results] + assert set(expect_dns) == set(result_dns) + + +def test_eq(topology_st_f): + """Test filter logic with "equal to" operator + + :id: 1b0b7e59-a5ac-4825-8d36-525f4f0149a9 + :setup: Standalone instance with 20 test users added + from uid=user0 to uid=user20 + :steps: + 1. Search for test users with filter ``(uid=user0)`` + :expectedresults: + 1. There should be 1 user listed user0 + """ + _check_filter(topology_st_f, '(uid=user0)', 1, [USER0_DN]) + + +def test_sub(topology_st_f): + """Test filter logic with "sub" + + :id: 8cfa946d-7ddf-4f8e-9f9f-39da8f35304e + :setup: Standalone instance with 20 test users added + from uid=user0 to uid=user20 + :steps: + 1. Search for test users with filter ``(uid=user*)`` + :expectedresults: + 1. There should be 20 users listed from user0 to user19 + """ + _check_filter(topology_st_f, '(uid=user*)', 20, [ + USER0_DN, USER1_DN, USER2_DN, USER3_DN, USER4_DN, + USER5_DN, USER6_DN, USER7_DN, USER8_DN, USER9_DN, + USER10_DN, USER11_DN, USER12_DN, USER13_DN, USER14_DN, + USER15_DN, USER16_DN, USER17_DN, USER18_DN, USER19_DN + ]) + + +def test_not_eq(topology_st_f): + """Test filter logic with "not equal to" operator + + :id: 1422ec65-421d-473b-89ba-649f8decc1ab + :setup: Standalone instance with 20 test users added + from uid=user0 to uid=user20 + :steps: + 1. Search for test users with filter ``(!(uid=user0))`` + :expectedresults: + 1. There should be 19 users listed from user1 to user19 + """ + _check_filter(topology_st_f, '(!(uid=user0))', 19, [ + USER1_DN, USER2_DN, USER3_DN, USER4_DN, USER5_DN, + USER6_DN, USER7_DN, USER8_DN, USER9_DN, + USER10_DN, USER11_DN, USER12_DN, USER13_DN, USER14_DN, + USER15_DN, USER16_DN, USER17_DN, USER18_DN, USER19_DN + ]) + +# More not cases? + +def test_ranges(topology_st_f): + """Test filter logic with range + + :id: cc7c25f0-6a6e-465b-8d32-7fcc1aec84ee + :setup: Standalone instance with 20 test users added + from uid=user0 to uid=user20 + :steps: + 1. Search for test users with filter ``(uid>=user5)`` + 2. Search for test users with filter ``(uid<=user4)`` + 3. Search for test users with filter ``(uid>=ZZZZ)`` + 4. Search for test users with filter ``(uid<=aaaa)`` + :expectedresults: + 1. There should be 5 users listed from user5 to user9 + 2. There should be 15 users listed from user0 to user4 + and from user10 to user19 + 3. There should not be any user listed + 4. There should not be any user listed + """ + + ### REMEMBER: user10 is less than user5 because it's strcmp!!! + _check_filter(topology_st_f, '(uid>=user5)', 5, [ + USER5_DN, USER6_DN, USER7_DN, USER8_DN, USER9_DN, + ]) + _check_filter(topology_st_f, '(uid<=user4)', 15, [ + USER0_DN, USER1_DN, USER2_DN, USER3_DN, USER4_DN, + USER10_DN, USER11_DN, USER12_DN, USER13_DN, USER14_DN, + USER15_DN, USER16_DN, USER17_DN, USER18_DN, USER19_DN + ]) + _check_filter(topology_st_f, '(uid>=ZZZZ)', 0, []) + _check_filter(topology_st_f, '(uid<=aaaa)', 0, []) + + +def test_and_eq(topology_st_f): + """Test filter logic with "AND" operator + + :id: 4721fd7c-8d0b-43e6-b2e8-a5bac7674f99 + :setup: Standalone instance with 20 test users added + from uid=user0 to uid=user20 + :steps: + 1. Search for test users with filter ``(&(uid=user0)(cn=user0))`` + 2. Search for test users with filter ``(&(uid=user0)(cn=user1))`` + 3. Search for test users with filter ``(&(uid=user0)(cn=user0)(sn=0))`` + 4. Search for test users with filter ``(&(uid=user0)(cn=user1)(sn=0))`` + 5. Search for test users with filter ``(&(uid=user0)(cn=user0)(sn=1))`` + :expectedresults: + 1. There should be 1 user listed i.e. user0 + 2. There should not be any user listed + 3. There should be 1 user listed i.e. user0 + 4. There should not be any user listed + 5. There should not be any user listed + """ + _check_filter(topology_st_f, '(&(uid=user0)(cn=user0))', 1, [USER0_DN]) + _check_filter(topology_st_f, '(&(uid=user0)(cn=user1))', 0, []) + _check_filter(topology_st_f, '(&(uid=user0)(cn=user0)(sn=0))', 1, [USER0_DN]) + _check_filter(topology_st_f, '(&(uid=user0)(cn=user1)(sn=0))', 0, []) + _check_filter(topology_st_f, '(&(uid=user0)(cn=user0)(sn=1))', 0, []) + + +def test_range(topology_st_f): + """Test filter logic with range + + :id: 617e6290-866e-4b5d-a300-d8f1715ad052 + :setup: Standalone instance with 20 test users added + from uid=user0 to uid=user20 + :steps: + 1. Search for test users with filter ``(&(uid>=user5)(cn<=user7))`` + :expectedresults: + 1. There should be 3 users listed i.e. user5 to user7 + """ + _check_filter(topology_st_f, '(&(uid>=user5)(cn<=user7))', 3, [ + USER5_DN, USER6_DN, USER7_DN + ]) + + +def test_and_allid_shortcut(topology_st_f): + """Test filter logic with "AND" operator + and shortcuts + + :id: f4784752-d269-4ceb-aada-fafe0a5fc14c + :setup: Standalone instance with 20 test users added + from uid=user0 to uid=user20 + :steps: + 1. Search for test users with filter ``(&(objectClass=*)(uid=user0)(cn=user0))`` + 2. Search for test users with filter ``(&(uid=user0)(cn=user0)(objectClass=*))`` + :expectedresults: + 1. There should be 1 user listed i.e. user0 + 2. There should be 1 user listed i.e. user0 + """ + _check_filter(topology_st_f, '(&(objectClass=*)(uid=user0)(cn=user0))', 1, [USER0_DN]) + _check_filter(topology_st_f, '(&(uid=user0)(cn=user0)(objectClass=*))', 1, [USER0_DN]) + + +def test_or_eq(topology_st_f): + """Test filter logic with "or" and "equal to" operators + + :id: a23a4fc9-0f5c-49ce-b1f7-6ac10bcd7763 + :setup: Standalone instance with 20 test users added + from uid=user0 to uid=user20 + :steps: + 1. Search for test users with filter ``|(uid=user0)(cn=user0)`` + 2. Search for test users with filter ``(|(uid=user0)(uid=user1))`` + 3. Search for test users with filter ``(|(uid=user0)(cn=user0)(sn=0))`` + 4. Search for test users with filter ``(|(uid=user0)(uid=user1)(sn=0))`` + 5. Search for test users with filter ``(|(uid=user0)(uid=user1)(uid=user2))`` + :expectedresults: + 1. There should be 1 user listed i.e. user0 + 2. There should be 2 users listed i.e. user0 and user1 + 3. There should be 1 user listed i.e. user0 + 4. There should be 2 users listed i.e. user0 and user1 + 5. There should be 3 users listed i.e. user0 to user2 + """ + _check_filter(topology_st_f, '(|(uid=user0)(cn=user0))', 1, [USER0_DN]) + _check_filter(topology_st_f, '(|(uid=user0)(uid=user1))', 2, [USER0_DN, USER1_DN]) + _check_filter(topology_st_f, '(|(uid=user0)(cn=user0)(sn=0))', 1, [USER0_DN]) + _check_filter(topology_st_f, '(|(uid=user0)(uid=user1)(sn=0))', 2, [USER0_DN, USER1_DN]) + _check_filter(topology_st_f, '(|(uid=user0)(uid=user1)(uid=user2))', 3, [USER0_DN, USER1_DN, USER2_DN]) + + +def test_and_not_eq(topology_st_f): + """Test filter logic with "not equal" to operator + + :id: bd00cb2b-35bb-49c0-8387-f60a6ada7c87 + :setup: Standalone instance with 20 test users added + from uid=user0 to uid=user20 + :steps: + 1. Search for test users with filter ``(&(uid=user0)(!(cn=user0)))`` + 2. Search for test users with filter ``(&(uid=*)(!(uid=user0)))`` + :expectedresults: + 1. There should be no users listed + 2. There should be 19 users listed i.e. user1 to user19 + """ + _check_filter(topology_st_f, '(&(uid=user0)(!(cn=user0)))', 0, []) + _check_filter(topology_st_f, '(&(uid=*)(!(uid=user0)))', 19, [ + USER1_DN, USER2_DN, USER3_DN, USER4_DN, USER5_DN, + USER6_DN, USER7_DN, USER8_DN, USER9_DN, + USER10_DN, USER11_DN, USER12_DN, USER13_DN, USER14_DN, + USER15_DN, USER16_DN, USER17_DN, USER18_DN, USER19_DN + ]) + + +def test_or_not_eq(topology_st_f): + """Test filter logic with "OR and NOT" operators + + :id: 8f62f339-72c9-49e4-8126-b2a14e61b9c0 + :setup: Standalone instance with 20 test users added + from uid=user0 to uid=user20 + :steps: + 1. Search for test users with filter ``(|(!(uid=user0))(!(uid=user1)))`` + :expectedresults: + 1. There should be 20 users listed i.e. user0 to user19 + """ + _check_filter(topology_st_f, '(|(!(uid=user0))(!(uid=user1)))', 20, [ + USER0_DN, USER1_DN, USER2_DN, USER3_DN, USER4_DN, + USER5_DN, USER6_DN, USER7_DN, USER8_DN, USER9_DN, + USER10_DN, USER11_DN, USER12_DN, USER13_DN, USER14_DN, + USER15_DN, USER16_DN, USER17_DN, USER18_DN, USER19_DN + ]) + + +def test_and_range(topology_st_f): + """Test filter logic with range + + :id: 8e5a0e2a-4ee1-4cd7-b5ec-90ad4d3ace64 + :setup: Standalone instance with 20 test users added + from uid=user0 to uid=user20 + :steps: + 1. Search for test users with filter ``(&(uid>=user5)(uid=user6))`` + 2. Search for test users with filter ``(&(uid>=user5)(uid=user0))`` + 3. Search for test users with filter ``(&(uid>=user5)(uid=user6)(sn=6))`` + 4. Search for test users with filter ``(&(uid>=user5)(uid=user0)(sn=0))`` + 5. Search for test users with filter ``(&(uid>=user5)(uid=user0)(sn=1))`` + 6. Search for test users with filter ``(&(uid>=user5)(uid>=user6))`` + 7. Search for test users with filter ``(&(uid>=user5)(uid>=user6)(uid>=user7))`` + :expectedresults: + 1. There should be 1 user listed i.e. user6 + 2. There should be no users listed + 3. There should be 1 user listed i.e. user6 + 4. There should be no users listed + 5. There should be no users listed + 6. There should be 4 users listed i.e. user6 to user9 + 7. There should be 3 users listed i.e. user7 to user9 + """ + # These all hit shortcut cases. + _check_filter(topology_st_f, '(&(uid>=user5)(uid=user6))', 1, [USER6_DN]) + _check_filter(topology_st_f, '(&(uid>=user5)(uid=user0))', 0, []) + _check_filter(topology_st_f, '(&(uid>=user5)(uid=user6)(sn=6))', 1, [USER6_DN]) + _check_filter(topology_st_f, '(&(uid>=user5)(uid=user0)(sn=0))', 0, []) + _check_filter(topology_st_f, '(&(uid>=user5)(uid=user0)(sn=1))', 0, []) + # These all take 2-way or k-way cases. + _check_filter(topology_st_f, '(&(uid>=user5)(uid>=user6))', 4, [ + USER6_DN, USER7_DN, USER8_DN, USER9_DN, + ]) + _check_filter(topology_st_f, '(&(uid>=user5)(uid>=user6)(uid>=user7))', 3, [ + USER7_DN, USER8_DN, USER9_DN, + ]) + + + +def test_or_range(topology_st_f): + """Test filter logic with range + + :id: bc413e74-667a-48b0-8fbd-e9b7d18a01e4 + :setup: Standalone instance with 20 test users added + from uid=user0 to uid=user20 + :steps: + 1. Search for test users with filter ``(|(uid>=user5)(uid=user6))`` + 2. Search for test users with filter ``(|(uid>=user5)(uid=user0))`` + :expectedresults: + 1. There should be 5 users listed i.e. user5 to user9 + 2. There should be 6 users listed i.e. user5 to user9 and user0 + """ + _check_filter(topology_st_f, '(|(uid>=user5)(uid=user6))', 5, [ + USER5_DN, USER6_DN, USER7_DN, USER8_DN, USER9_DN, + ]) + _check_filter(topology_st_f, '(|(uid>=user5)(uid=user0))', 6, [ + USER0_DN, + USER5_DN, USER6_DN, USER7_DN, USER8_DN, USER9_DN, + ]) + + +def test_and_and_eq(topology_st_f): + """Test filter logic with "AND" and "equal to" operators + + :id: 5c66eb38-d01f-459e-81e4-d335f97211c7 + :setup: Standalone instance with 20 test users added + from uid=user0 to uid=user20 + :steps: + 1. Search for test users with filter ``(&(&(uid=user0)(sn=0))(cn=user0))`` + 2. Search for test users with filter ``(&(&(uid=user1)(sn=0))(cn=user0))`` + 3. Search for test users with filter ``(&(&(uid=user0)(sn=1))(cn=user0))`` + 4. Search for test users with filter ``(&(&(uid=user0)(sn=0))(cn=user1))`` + :expectedresults: + 1. There should be 1 user listed i.e. user0 + 2. There should be no users listed + 3. There should be no users listed + 4. There should be no users listed + """ + _check_filter(topology_st_f, '(&(&(uid=user0)(sn=0))(cn=user0))', 1, [USER0_DN]) + _check_filter(topology_st_f, '(&(&(uid=user1)(sn=0))(cn=user0))', 0, []) + _check_filter(topology_st_f, '(&(&(uid=user0)(sn=1))(cn=user0))', 0, []) + _check_filter(topology_st_f, '(&(&(uid=user0)(sn=0))(cn=user1))', 0, []) + + +def test_or_or_eq(topology_st_f): + """Test filter logic with "AND" and "equal to" operators + + :id: 0cab4bbd-637c-419d-8069-ad5463ecaa75 + :setup: Standalone instance with 20 test users added + from uid=user0 to uid=user20 + :steps: + 1. Search for test users with filter ``(|(|(uid=user0)(sn=0))(cn=user0))`` + 2. Search for test users with filter ``(|(|(uid=user1)(sn=0))(cn=user0))`` + 3. Search for test users with filter ``(|(|(uid=user0)(sn=1))(cn=user0))`` + 4. Search for test users with filter ``(|(|(uid=user0)(sn=0))(cn=user1))`` + 5. Search for test users with filter ``(|(|(uid=user0)(sn=1))(cn=user2))`` + :expectedresults: + 1. There should be 1 user listed i.e. user0 + 2. There should be 2 users listed i.e. user0, user1 + 3. There should be 2 users listed i.e. user0, user1 + 4. There should be 2 users listed i.e. user0, user1 + 5. There should be 3 users listed i.e. user0, user1 and user2 + """ + _check_filter(topology_st_f, '(|(|(uid=user0)(sn=0))(cn=user0))', 1, [USER0_DN]) + _check_filter(topology_st_f, '(|(|(uid=user1)(sn=0))(cn=user0))', 2, [USER0_DN, USER1_DN]) + _check_filter(topology_st_f, '(|(|(uid=user0)(sn=1))(cn=user0))', 2, [USER0_DN, USER1_DN]) + _check_filter(topology_st_f, '(|(|(uid=user0)(sn=0))(cn=user1))', 2, [USER0_DN, USER1_DN]) + _check_filter(topology_st_f, '(|(|(uid=user0)(sn=1))(cn=user2))', 3, [USER0_DN, USER1_DN, USER2_DN]) + + +def test_and_or_eq(topology_st_f): + """Test filter logic with "AND" and "equal to" operators + + :id: 2ce7cc2e-6058-422d-ac3e-e678decf1cc4 + :setup: Standalone instance with 20 test users added + from uid=user0 to uid=user20 + :steps: + 1. Search for test users with filter ``(&(|(uid=user0)(sn=0))(cn=user0))`` + 2. Search for test users with filter ``(&(|(uid=user1)(sn=0))(cn=user0))`` + 3. Search for test users with filter ``(&(|(uid=user0)(sn=1))(cn=user0))`` + 4. Search for test users with filter ``(&(|(uid=user0)(sn=0))(cn=user1))`` + 5. Search for test users with filter ``(&(|(uid=user0)(sn=1))(cn=*))`` + :expectedresults: + 1. There should be 1 user listed i.e. user0 + 2. There should be 1 user listed i.e. user0 + 3. There should be 1 user listed i.e. user0 + 4. There should be no users listed + 5. There should be 2 users listed i.e. user0 and user1 + """ + _check_filter(topology_st_f, '(&(|(uid=user0)(sn=0))(cn=user0))', 1, [USER0_DN]) + _check_filter(topology_st_f, '(&(|(uid=user1)(sn=0))(cn=user0))', 1, [USER0_DN]) + _check_filter(topology_st_f, '(&(|(uid=user0)(sn=1))(cn=user0))', 1, [USER0_DN]) + _check_filter(topology_st_f, '(&(|(uid=user0)(sn=0))(cn=user1))', 0, []) + _check_filter(topology_st_f, '(&(|(uid=user0)(sn=1))(cn=*))', 2, [USER0_DN, USER1_DN]) + + +def test_or_and_eq(topology_st_f): + """Test filter logic with "AND" and "equal to" operators + + :id: ee9fb400-451a-479e-852c-f59b4c937a8d + :setup: Standalone instance with 20 test users added + from uid=user0 to uid=user20 + :steps: + 1. Search for test users with filter ``(|(&(uid=user0)(sn=0))(uid=user0))`` + 2. Search for test users with filter ``(|(&(uid=user1)(sn=2))(uid=user0))`` + 3. Search for test users with filter ``(|(&(uid=user0)(sn=1))(uid=user0))`` + 4. Search for test users with filter ``(|(&(uid=user1)(sn=1))(uid=user0))`` + :expectedresults: + 1. There should be 1 user listed i.e. user0 + 2. There should be 1 user listed i.e. user0 + 3. There should be 1 user listed i.e. user0 + 4. There should be 2 user listed i.e. user0 and user1 + """ + _check_filter(topology_st_f, '(|(&(uid=user0)(sn=0))(uid=user0))', 1, [USER0_DN]) + _check_filter(topology_st_f, '(|(&(uid=user1)(sn=2))(uid=user0))', 1, [USER0_DN]) + _check_filter(topology_st_f, '(|(&(uid=user0)(sn=1))(uid=user0))', 1, [USER0_DN]) + _check_filter(topology_st_f, '(|(&(uid=user1)(sn=1))(uid=user0))', 2, [USER0_DN, USER1_DN]) + + diff --git a/dirsrvtests/tests/suites/filter/filter_match_test.py b/dirsrvtests/tests/suites/filter/filter_match_test.py new file mode 100644 index 0000000..b670bcb --- /dev/null +++ b/dirsrvtests/tests/suites/filter/filter_match_test.py @@ -0,0 +1,778 @@ +# --- BEGIN COPYRIGHT BLOCK --- +# Copyright (C) 2019 Red Hat, Inc. +# All rights reserved. +# +# License: GPL (version 3 or any later version). +# See LICENSE for details. +# --- END COPYRIGHT BLOCK ---- + + +""" +Test the matching rules feature . +""" + +import os +import pytest + +from lib389._constants import DEFAULT_SUFFIX +from lib389.topologies import topology_st +from lib389.cos import CosTemplates +from lib389.schema import Schema + +import ldap + +pytestmark = pytest.mark.tier1 + + +ATTR = ["( 2.16.840.1.113730.3.1.999999.0 NAME 'attroctetStringMatch' " + "DESC 'for testing matching rules' EQUALITY octetStringMatch " + "ORDERING octetStringOrderingMatch SYNTAX 1.3.6.1.4.1.1466.115.121.1.40 " + "X-ORIGIN 'matching rule tests' )", + "( 2.16.840.1.113730.3.1.999999.1 NAME 'attrbitStringMatch' DESC " + "'for testing matching rules' EQUALITY bitStringMatch SYNTAX " + "1.3.6.1.4.1.1466.115.121.1.6 X-ORIGIN 'matching rule tests' )", + "( 2.16.840.1.113730.3.1.999999.2 NAME 'attrcaseExactIA5Match' " + "DESC 'for testing matching rules' EQUALITY caseExactIA5Match " + "SUBSTR caseExactIA5SubstringsMatch SYNTAX 1.3.6.1.4.1.1466.115.121.1.26 " + "X-ORIGIN 'matching rule tests' )", + "( 2.16.840.1.113730.3.1.999999.3 NAME 'attrcaseExactMatch' DESC " + "'for testing matching rules' EQUALITY caseExactMatch ORDERING " + "caseExactOrderingMatch SUBSTR caseExactSubstringsMatch SYNTAX " + "1.3.6.1.4.1.1466.115.121.1.15 " + "X-ORIGIN 'matching rule tests' )", + "( 2.16.840.1.113730.3.1.999999.4 NAME 'attrgeneralizedTimeMatch' DESC " + "'for testing matching rules' EQUALITY generalizedTimeMatch ORDERING " + "generalizedTimeOrderingMatch SYNTAX 1.3.6.1.4.1.1466.115.121.1.24 " + "X-ORIGIN 'matching rule tests' )", + "( 2.16.840.1.113730.3.1.999999.5 NAME 'attrbooleanMatch' DESC " + "'for testing matching rules' EQUALITY booleanMatch SYNTAX " + "1.3.6.1.4.1.1466.115.121.1.7 X-ORIGIN 'matching rule tests' )", + "( 2.16.840.1.113730.3.1.999999.6 NAME 'attrcaseIgnoreIA5Match' DESC " + "'for testing matching rules' EQUALITY caseIgnoreIA5Match SUBSTR " + "caseIgnoreIA5SubstringsMatch SYNTAX 1.3.6.1.4.1.1466.115.121.1.26 " + "X-ORIGIN 'matching rule tests' )", + "( 2.16.840.1.113730.3.1.999999.7 NAME 'attrcaseIgnoreMatch' DESC " + "'for testing matching rules' EQUALITY caseIgnoreMatch ORDERING " + "caseIgnoreOrderingMatch SUBSTR caseIgnoreSubstringsMatch " + "SYNTAX 1.3.6.1.4.1.1466.115.121.1.15 X-ORIGIN 'matching rule tests' )", + "( 2.16.840.1.113730.3.1.999999.8 NAME 'attrcaseIgnoreListMatch' DESC " + "'for testing matching rules' EQUALITY caseIgnoreListMatch SUBSTR " + "caseIgnoreListSubstringsMatch SYNTAX 1.3.6.1.4.1.1466.115.121.1.41 " + "X-ORIGIN 'matching rule tests' )", + "( 2.16.840.1.113730.3.1.999999.9 NAME 'attrobjectIdentifierMatch' DESC " + "'for testing matching rules' EQUALITY objectIdentifierMatch SYNTAX " + "1.3.6.1.4.1.1466.115.121.1.38 X-ORIGIN 'matching rule tests' )", + "( 2.16.840.1.113730.3.1.999999.10 NAME 'attrdistinguishedNameMatch' DESC " + "'for testing matching rules' EQUALITY distinguishedNameMatch SYNTAX " + "1.3.6.1.4.1.1466.115.121.1.12 X-ORIGIN 'matching rule tests' )", + "( 2.16.840.1.113730.3.1.999999.11 NAME 'attrintegerMatch' DESC " + "'for testing matching rules' EQUALITY integerMatch ORDERING " + "integerOrderingMatch SYNTAX 1.3.6.1.4.1.1466.115.121.1.27 " + "X-ORIGIN 'matching rule tests' )", + "( 2.16.840.1.113730.3.1.999999.12 NAME 'attruniqueMemberMatch' DESC " + "'for testing matching rules' EQUALITY uniqueMemberMatch SYNTAX " + "1.3.6.1.4.1.1466.115.121.1.34 X-ORIGIN 'matching rule tests' )", + "( 2.16.840.1.113730.3.1.999999.13 NAME 'attrnumericStringMatch' DESC " + "'for testing matching rules' EQUALITY numericStringMatch ORDERING " + "numericStringOrderingMatch SUBSTR numericStringSubstringsMatch " + "SYNTAX 1.3.6.1.4.1.1466.115.121.1.36 X-ORIGIN 'matching rule tests' )", + "( 2.16.840.1.113730.3.1.999999.14 NAME 'attrtelephoneNumberMatch' DESC " + "'for testing matching rules' EQUALITY telephoneNumberMatch SUBSTR " + "telephoneNumberSubstringsMatch SYNTAX 1.3.6.1.4.1.1466.115.121.1.50 " + "X-ORIGIN 'matching rule tests' )", + "( 2.16.840.1.113730.3.1.999999.15 NAME 'attrdirectoryStringFirstComponentMatch' " + "DESC 'for testing matching rules' EQUALITY directoryStringFirstComponentMatch " + "SYNTAX 1.3.6.1.4.1.1466.115.121.1.15 X-ORIGIN 'matching rule tests' )", + "( 2.16.840.1.113730.3.1.999999.16 NAME 'attrobjectIdentifierFirstComponentMatch' " + "DESC 'for testing matching rules' EQUALITY objectIdentifierFirstComponentMatch " + "SYNTAX 1.3.6.1.4.1.1466.115.121.1.38 X-ORIGIN 'matching rule tests' )", + "( 2.16.840.1.113730.3.1.999999.17 NAME 'attrintegerFirstComponentMatch' " + "DESC 'for testing matching rules' EQUALITY integerFirstComponentMatch SYNTAX " + "1.3.6.1.4.1.1466.115.121.1.27 X-ORIGIN 'matching rule tests' )"] + +TESTED_MATCHING_RULES = ["bitStringMatch", "caseExactIA5Match", "caseExactMatch", + "caseExactOrderingMatch", "caseExactSubstringsMatch", + "caseExactIA5SubstringsMatch", "generalizedTimeMatch", + "generalizedTimeOrderingMatch", "booleanMatch", "caseIgnoreIA5Match", + "caseIgnoreIA5SubstringsMatch", "caseIgnoreMatch", + "caseIgnoreOrderingMatch", "caseIgnoreSubstringsMatch", + "caseIgnoreListMatch", "caseIgnoreListSubstringsMatch", + "objectIdentifierMatch", "directoryStringFirstComponentMatch", + "objectIdentifierFirstComponentMatch", "distinguishedNameMatch", + "integerMatch", "integerOrderingMatch", "integerFirstComponentMatch", + "uniqueMemberMatch", "numericStringMatch", "numericStringOrderingMatch", + "numericStringSubstringsMatch", "telephoneNumberMatch", + "telephoneNumberSubstringsMatch", "octetStringMatch", + "octetStringOrderingMatch"] + + +MATCHING_RULES = [ + {'attr': 'attrbitStringMatch', + 'positive': ["'0010'B", "'0011'B", "'0100'B", "'0101'B", "'0110'B"], + 'negative': ["'0001'B", "'0001'B", "'0010'B", "'0010'B", "'0011'B", + "'0011'B", "'0100'B", "'0100'B", "'0101'B", + "'0101'B", "'0110'B", "'0110'B"]}, + {'attr': 'attrcaseExactIA5Match', + 'positive': ['sPrain', 'spRain', 'sprAin', 'spraIn', 'sprain'], + 'negative': ['Sprain', 'Sprain', 'Sprain', 'Sprain', 'SpRain', + 'SpRain', 'SprAin', 'SprAin', 'SpraIn', 'SpraIn', + 'Sprain', 'Sprain']}, + {'attr': 'attrcaseExactMatch', + 'positive': ['ÇéliNé Ändrè', 'Çéliné ÄndrÈ', 'Çéliné Ändrè', 'çÉliné Ändrè'], + 'negative': ['ÇélIné Ändrè', 'ÇélIné Ändrè', 'ÇéliNé Ändrè', 'ÇéliNé Ändrè', + 'Çéliné ÄndrÈ', 'Çéliné ÄndrÈ', 'Çéliné Ändrè', 'Çéliné Ändrè', + 'çÉliné Ändrè', 'çÉliné Ändrè']}, + {'attr': 'attrgeneralizedTimeMatch', + 'positive': ['20100218171301Z', '20100218171302Z', '20100218171303Z', + '20100218171304Z', '20100218171305Z'], + 'negative': ['20100218171300Z', '20100218171300Z', '20100218171301Z', + '20100218171301Z', '20100218171302Z', '20100218171302Z', + '20100218171303Z', '20100218171303Z', '20100218171304Z', + '20100218171304Z', '20100218171305Z', '20100218171305Z']}, + {'attr': 'attrbooleanMatch', + 'positive': ['FALSE'], + 'negative': ['TRUE', 'TRUE', 'FALSE', 'FALSE']}, + {'attr': 'attrcaseIgnoreIA5Match', + 'positive': ['sprain2', 'sprain3', 'sprain4', 'sprain5', 'sprain6'], + 'negative': ['sprain1', 'sprain1', 'sprain2', 'sprain2', 'sprain3', + 'sprain3', 'sprain4', 'sprain4', 'sprain5', 'sprain5', + 'sprain6', 'sprain6']}, + {'attr': 'attrcaseIgnoreMatch', + 'positive': ['ÇélIné Ändrè2', 'ÇélIné Ändrè3', 'ÇélIné Ändrè4', + 'ÇélIné Ändrè5', 'ÇélIné Ändrè6'], + 'negative': ['ÇélIné Ändrè1', 'ÇélIné Ändrè1', 'ÇélIné Ändrè2', + 'ÇélIné Ändrè2', 'ÇélIné Ändrè3', 'ÇélIné Ändrè3', + 'ÇélIné Ändrè4', 'ÇélIné Ändrè4', 'ÇélIné Ändrè5', + 'ÇélIné Ändrè5', 'ÇélIné Ändrè6', 'ÇélIné Ändrè6']}, + {'attr': 'attrcaseIgnoreListMatch', + 'positive': ['foo2$bar', 'foo3$bar', 'foo4$bar', 'foo5$bar', 'foo6$bar'], + 'negative': ['foo1$bar', 'foo1$bar', 'foo2$bar', 'foo2$bar', 'foo3$bar', + 'foo3$bar', 'foo4$bar', 'foo4$bar', 'foo5$bar', 'foo5$bar', + 'foo6$bar', 'foo6$bar']}, + {'attr': 'attrobjectIdentifierMatch', + 'positive': ['1.3.6.1.4.1.1466.115.121.1.24', '1.3.6.1.4.1.1466.115.121.1.26', + '1.3.6.1.4.1.1466.115.121.1.40', '1.3.6.1.4.1.1466.115.121.1.41', + '1.3.6.1.4.1.1466.115.121.1.6'], + 'negative': ['1.3.6.1.4.1.1466.115.121.1.15', '1.3.6.1.4.1.1466.115.121.1.15', + '1.3.6.1.4.1.1466.115.121.1.24', '1.3.6.1.4.1.1466.115.121.1.24', + '1.3.6.1.4.1.1466.115.121.1.26', '1.3.6.1.4.1.1466.115.121.1.26', + '1.3.6.1.4.1.1466.115.121.1.40', '1.3.6.1.4.1.1466.115.121.1.40', + '1.3.6.1.4.1.1466.115.121.1.41', '1.3.6.1.4.1.1466.115.121.1.41', + '1.3.6.1.4.1.1466.115.121.1.6', '1.3.6.1.4.1.1466.115.121.1.6']}, + {'attr': 'attrdirectoryStringFirstComponentMatch', + 'positive': ['ÇélIné Ändrè2', 'ÇélIné Ändrè3', 'ÇélIné Ändrè4', 'ÇélIné Ändrè5', + 'ÇélIné Ändrè6'], + 'negative': ['ÇélIné Ändrè1', 'ÇélIné Ändrè1', 'ÇélIné Ändrè2', 'ÇélIné Ändrè2', + 'ÇélIné Ändrè3', 'ÇélIné Ändrè3', 'ÇélIné Ändrè4', 'ÇélIné Ändrè4', + 'ÇélIné Ändrè5', 'ÇélIné Ändrè5', 'ÇélIné Ändrè6', 'ÇélIné Ändrè6']}, + {'attr': 'attrobjectIdentifierFirstComponentMatch', + 'positive': ['1.3.6.1.4.1.1466.115.121.1.24', '1.3.6.1.4.1.1466.115.121.1.26', + '1.3.6.1.4.1.1466.115.121.1.40', '1.3.6.1.4.1.1466.115.121.1.41', + '1.3.6.1.4.1.1466.115.121.1.6'], + 'negative': ['1.3.6.1.4.1.1466.115.121.1.15', '1.3.6.1.4.1.1466.115.121.1.15', + '1.3.6.1.4.1.1466.115.121.1.24', '1.3.6.1.4.1.1466.115.121.1.24', + '1.3.6.1.4.1.1466.115.121.1.26', '1.3.6.1.4.1.1466.115.121.1.26', + '1.3.6.1.4.1.1466.115.121.1.40', '1.3.6.1.4.1.1466.115.121.1.40', + '1.3.6.1.4.1.1466.115.121.1.41', '1.3.6.1.4.1.1466.115.121.1.41', + '1.3.6.1.4.1.1466.115.121.1.6', '1.3.6.1.4.1.1466.115.121.1.6']}, + {'attr': 'attrdistinguishedNameMatch', + 'positive': ['cn=foo2,cn=bar', 'cn=foo3,cn=bar', 'cn=foo4,cn=bar', + 'cn=foo5,cn=bar', 'cn=foo6,cn=bar'], + 'negative': ['cn=foo1,cn=bar', 'cn=foo1,cn=bar', 'cn=foo2,cn=bar', + 'cn=foo2,cn=bar', 'cn=foo3,cn=bar', 'cn=foo3,cn=bar', + 'cn=foo4,cn=bar', 'cn=foo4,cn=bar', 'cn=foo5,cn=bar', + 'cn=foo5,cn=bar', 'cn=foo6,cn=bar', 'cn=foo6,cn=bar']}, + {'attr': 'attrintegerMatch', + 'positive': ['-1', '0', '1', '2', '3'], + 'negative': ['-2', '-2', '-1', '-1', '0', '0', '1', '1', '2', '2', '3', '3']}, + {'attr': 'attrintegerFirstComponentMatch', + 'positive': ['-1', '0', '1', '2', '3'], + 'negative': ['-2', '-2', '-1', '-1', '0', '0', '1', '1', '2', '2', '3', '3']}, + {'attr': 'attruniqueMemberMatch', + 'positive': ["cn=foo2,cn=bar#'0010'B", "cn=foo3,cn=bar#'0011'B", + "cn=foo4,cn=bar#'0100'B", "cn=foo5,cn=bar#'0101'B", + "cn=foo6,cn=bar#'0110'B"], + 'negative': ["cn=foo1,cn=bar#'0001'B", "cn=foo1,cn=bar#'0001'B", + "cn=foo2,cn=bar#'0010'B", "cn=foo2,cn=bar#'0010'B", + "cn=foo3,cn=bar#'0011'B", "cn=foo3,cn=bar#'0011'B", + "cn=foo4,cn=bar#'0100'B", "cn=foo4,cn=bar#'0100'B", + "cn=foo5,cn=bar#'0101'B", "cn=foo5,cn=bar#'0101'B", + "cn=foo6,cn=bar#'0110'B", "cn=foo6,cn=bar#'0110'B"]}, + {'attr': 'attrnumericStringMatch', + 'positive': ['00002', '00003', '00004', '00005', '00006'], + 'negative': ['00001', '00001', '00002', '00002', '00003', '00003', '00004', + '00004', '00005', '00005', '00006', '00006']}, + {'attr': 'attrtelephoneNumberMatch', + 'positive': ['+1 408 555 5625', '+1 408 555 6201', '+1 408 555 8585', + '+1 408 555 9187', '+1 408 555 9423'], + 'negative': ['+1 408 555 4798', '+1 408 555 4798', '+1 408 555 5625', + '+1 408 555 5625', '+1 408 555 6201', '+1 408 555 6201', + '+1 408 555 8585', '+1 408 555 8585', '+1 408 555 9187', + '+1 408 555 9187', '+1 408 555 9423', '+1 408 555 9423']}, + {'attr': 'attroctetStringMatch', + 'positive': ['AAAAAAAAAAAAAAI=', 'AAAAAAAAAAAAAAM=', 'AAAAAAAAAAAAAAQ=', + 'AAAAAAAAAAAAAAU=', 'AAAAAAAAAAAAAAY='], + 'negative': ['AAAAAAAAAAAAAAE=', 'AAAAAAAAAAAAAAE=', 'AAAAAAAAAAAAAAI=', + 'AAAAAAAAAAAAAAI=', 'AAAAAAAAAAAAAAM=', 'AAAAAAAAAAAAAAM=', + 'AAAAAAAAAAAAAAQ=', 'AAAAAAAAAAAAAAQ=', 'AAAAAAAAAAAAAAU=', + 'AAAAAAAAAAAAAAU=', 'AAAAAAAAAAAAAAY=', 'AAAAAAAAAAAAAAY=']}] + + +MATCHING_MODES = [ + {'attr': 'attrbitStringMatch', + 'positive': ["'0001'B"], + 'negative': ["'0001'B", "'0010'B", "'0011'B", "'0100'B", "'0101'B", "'0110'B"]}, + {'attr': 'attrcaseExactIA5Match', + 'positive': 'Sprain', + 'negative': ['Sprain', 'sPrain', 'spRain', 'sprAin', 'spraIn', 'sprain']}, + {'attr': 'attrcaseExactMatch', + 'positive': 'ÇélIné Ändrè', + 'negative': ['ÇélIné Ändrè', 'ÇéliNé Ändrè', 'Çéliné ÄndrÈ', 'Çéliné Ändrè', 'çÉliné Ändrè']}, + {'attr': 'attrgeneralizedTimeMatch', + 'positive': '20100218171300Z', + 'negative': ['20100218171300Z', '20100218171301Z', '20100218171302Z', + '20100218171303Z', '20100218171304Z', '20100218171305Z']}, + {'attr': 'attrbooleanMatch', + 'positive': 'TRUE', + 'negative': ['TRUE', 'FALSE']}, + {'attr': 'attrcaseIgnoreIA5Match', + 'positive': 'sprain1', + 'negative': ['sprain1', 'sprain2', 'sprain3', 'sprain4', 'sprain5', 'sprain6']}, + {'attr': 'attrcaseIgnoreMatch', + 'positive': 'ÇélIné Ändrè1', + 'negative': ['ÇélIné Ändrè1', 'ÇélIné Ändrè2', 'ÇélIné Ändrè3', 'ÇélIné Ändrè4', + 'ÇélIné Ändrè5', 'ÇélIné Ändrè6']}, + {'attr': 'attrcaseIgnoreListMatch', + 'positive': 'foo1$bar', + 'negative': ['foo1$bar', 'foo2$bar', 'foo3$bar', 'foo4$bar', 'foo5$bar', 'foo6$bar']}, + {'attr': 'attrobjectIdentifierMatch', + 'positive': '1.3.6.1.4.1.1466.115.121.1.15', + 'negative': ['1.3.6.1.4.1.1466.115.121.1.15', '1.3.6.1.4.1.1466.115.121.1.24', + '1.3.6.1.4.1.1466.115.121.1.26', '1.3.6.1.4.1.1466.115.121.1.40', + '1.3.6.1.4.1.1466.115.121.1.41', '1.3.6.1.4.1.1466.115.121.1.6']}, + {'attr': 'attrdirectoryStringFirstComponentMatch', + 'positive': 'ÇélIné Ändrè1', + 'negative': ['ÇélIné Ändrè1', 'ÇélIné Ändrè2', 'ÇélIné Ändrè3', 'ÇélIné Ändrè4', + 'ÇélIné Ändrè5', 'ÇélIné Ändrè6']}, + {'attr': 'attrobjectIdentifierFirstComponentMatch', + 'positive': '1.3.6.1.4.1.1466.115.121.1.15', + 'negative': ['1.3.6.1.4.1.1466.115.121.1.15', '1.3.6.1.4.1.1466.115.121.1.24', + '1.3.6.1.4.1.1466.115.121.1.26', '1.3.6.1.4.1.1466.115.121.1.40', + '1.3.6.1.4.1.1466.115.121.1.41', '1.3.6.1.4.1.1466.115.121.1.6']}, + {'attr': 'attrdistinguishedNameMatch', + 'positive': 'cn=foo1,cn=bar', + 'negative': ['cn=foo1,cn=bar', 'cn=foo2,cn=bar', 'cn=foo3,cn=bar', 'cn=foo4,cn=bar', + 'cn=foo5,cn=bar', 'cn=foo6,cn=bar']}, + {'attr': 'attrintegerMatch', + 'positive': '-2', + 'negative': ['-2', '-1', '0', '1', '2', '3']}, + {'attr': 'attrintegerFirstComponentMatch', + 'positive': '-2', + 'negative': ['-2', '-1', '0', '1', '2', '3']}, + {'attr': 'attruniqueMemberMatch', + 'positive': "cn=foo1,cn=bar#'0001'B", + 'negative': ["cn=foo1,cn=bar#'0001'B", "cn=foo2,cn=bar#'0010'B", "cn=foo3,cn=bar#'0011'B", + "cn=foo4,cn=bar#'0100'B", "cn=foo5,cn=bar#'0101'B", "cn=foo6,cn=bar#'0110'B"]}, + {'attr': 'attrnumericStringMatch', + 'positive': '00001', + 'negative': ['00001', '00002', '00003', '00004', '00005', '00006']}, + {'attr': 'attrtelephoneNumberMatch', + 'positive': '+1 408 555 4798', + 'negative': ['+1 408 555 4798', '+1 408 555 5625', '+1 408 555 6201', '+1 408 555 8585', + '+1 408 555 9187', '+1 408 555 9423']}, + {'attr': 'attroctetStringMatch', + 'positive': 'AAAAAAAAAAAAAAE=', + 'negative': ['AAAAAAAAAAAAAAE=', 'AAAAAAAAAAAAAAI=', 'AAAAAAAAAAAAAAM=', 'AAAAAAAAAAAAAAQ=', + 'AAAAAAAAAAAAAAU=', 'AAAAAAAAAAAAAAY=']}] + +MODE_REPLACE = [ + {'attr': 'attrbitStringMatch', + 'positive': ["'0001'B", "'0010'B", "'0011'B", "'0100'B", "'0101'B", "'0110'B"], + 'negative': ["'0001'B", "'0001'B", "'0010'B", "'0010'B", "'0011'B", "'0011'B", + "'0100'B", "'0100'B", "'0101'B", "'0101'B", "'0110'B", "'0110'B"]}, + {'attr': 'attrcaseExactIA5Match', + 'positive': ['Sprain', 'sPrain', 'spRain', 'sprAin', 'spraIn', 'sprain'], + 'negative': ['Sprain', 'Sprain', 'sPrain', 'sPrain', 'spRain', 'spRain', + 'sprAin', 'sprAin', 'spraIn', 'spraIn', 'sprain', 'sprain']}, + {'attr': 'attrcaseExactMatch', + 'positive': ['ÇélIné Ändrè', 'ÇéliNé Ändrè', 'Çéliné ÄndrÈ', 'Çéliné Ändrè', + 'çÉliné Ändrè'], + 'negative': ['ÇélIné Ändrè', 'ÇélIné Ändrè', 'ÇéliNé Ändrè', 'ÇéliNé Ändrè', + 'Çéliné ÄndrÈ', 'Çéliné ÄndrÈ', 'Çéliné Ändrè', 'Çéliné Ändrè', + 'çÉliné Ändrè', 'çÉliné Ändrè']}, + {'attr': 'attrgeneralizedTimeMatch', + 'positive': ['20100218171300Z', '20100218171301Z', '20100218171302Z', '20100218171303Z', + '20100218171304Z', '20100218171305Z'], + 'negative': ['20100218171300Z', '20100218171300Z', '20100218171301Z', '20100218171301Z', + '20100218171302Z', '20100218171302Z', '20100218171303Z', '20100218171303Z', + '20100218171304Z', '20100218171304Z', '20100218171305Z', '20100218171305Z']}, + {'attr': 'attrbooleanMatch', + 'positive': ['TRUE', 'FALSE'], + 'negative': ['TRUE', 'TRUE', 'FALSE', 'FALSE']}, + {'attr': 'attrcaseIgnoreIA5Match', + 'positive': ['sprain1', 'sprain2', 'sprain3', 'sprain4', 'sprain5', 'sprain6'], + 'negative': ['sprain1', 'sprain1', 'sprain2', 'sprain2', 'sprain3', 'sprain3', + 'sprain4', 'sprain4', 'sprain5', 'sprain5', 'sprain6', 'sprain6']}, + {'attr': 'attrcaseIgnoreMatch', + 'positive': ['ÇélIné Ändrè1', 'ÇélIné Ändrè2', 'ÇélIné Ändrè3', 'ÇélIné Ändrè4', + 'ÇélIné Ändrè5', 'ÇélIné Ändrè6'], + 'negative': ['ÇélIné Ändrè1', 'ÇélIné Ändrè1', 'ÇélIné Ändrè2', 'ÇélIné Ändrè2', + 'ÇélIné Ändrè3', 'ÇélIné Ändrè3', 'ÇélIné Ändrè4', 'ÇélIné Ändrè4', + 'ÇélIné Ändrè5', 'ÇélIné Ändrè5', 'ÇélIné Ändrè6', 'ÇélIné Ändrè6']}, + {'attr': 'attrcaseIgnoreListMatch', + 'positive': ['foo1$bar', 'foo2$bar', 'foo3$bar', 'foo4$bar', 'foo5$bar', 'foo6$bar'], + 'negative': ['foo1$bar', 'foo1$bar', 'foo2$bar', 'foo2$bar', 'foo3$bar', 'foo3$bar', + 'foo4$bar', 'foo4$bar', 'foo5$bar', 'foo5$bar', 'foo6$bar', 'foo6$bar']}, + {'attr': 'attrobjectIdentifierFirstComponentMatch', + 'positive': ['1.3.6.1.4.1.1466.115.121.1.15', '1.3.6.1.4.1.1466.115.121.1.24', + '1.3.6.1.4.1.1466.115.121.1.26', '1.3.6.1.4.1.1466.115.121.1.40', + '1.3.6.1.4.1.1466.115.121.1.41', '1.3.6.1.4.1.1466.115.121.1.6'], + 'negative': ['1.3.6.1.4.1.1466.115.121.1.15', '1.3.6.1.4.1.1466.115.121.1.15', + '1.3.6.1.4.1.1466.115.121.1.24', '1.3.6.1.4.1.1466.115.121.1.24', + '1.3.6.1.4.1.1466.115.121.1.26', '1.3.6.1.4.1.1466.115.121.1.26', + '1.3.6.1.4.1.1466.115.121.1.40', '1.3.6.1.4.1.1466.115.121.1.40', + '1.3.6.1.4.1.1466.115.121.1.41', '1.3.6.1.4.1.1466.115.121.1.41', + '1.3.6.1.4.1.1466.115.121.1.6', '1.3.6.1.4.1.1466.115.121.1.6']}, + {'attr': 'attrdistinguishedNameMatch', + 'positive': ['cn=foo1,cn=bar', 'cn=foo2,cn=bar', 'cn=foo3,cn=bar', 'cn=foo4,cn=bar', + 'cn=foo5,cn=bar', 'cn=foo6,cn=bar'], + 'negative': ['cn=foo1,cn=bar', 'cn=foo1,cn=bar', 'cn=foo2,cn=bar', 'cn=foo2,cn=bar', + 'cn=foo3,cn=bar', 'cn=foo3,cn=bar', 'cn=foo4,cn=bar', 'cn=foo4,cn=bar', + 'cn=foo5,cn=bar', 'cn=foo5,cn=bar', 'cn=foo6,cn=bar', 'cn=foo6,cn=bar']}, + {'attr': 'attrintegerMatch', + 'positive': ['-2', '-1', '0', '1', '2', '3'], + 'negative': ['-2', '-2', '-1', '-1', '0', '0', '1', '1', '2', '2', '3', '3']}, + {'attr': 'attrintegerFirstComponentMatch', + 'positive': ['-2', '-1', '0', '1', '2', '3'], + 'negative': ['-2', '-2', '-1', '-1', '0', '0', '1', '1', '2', '2', '3', '3']}, + {'attr': 'attruniqueMemberMatch', + 'positive': ["cn=foo1,cn=bar#'0001'B", "cn=foo2,cn=bar#'0010'B", "cn=foo3,cn=bar#'0011'B", + "cn=foo4,cn=bar#'0100'B", "cn=foo5,cn=bar#'0101'B", "cn=foo6,cn=bar#'0110'B"], + 'negative': ["cn=foo1,cn=bar#'0001'B", "cn=foo1,cn=bar#'0001'B", "cn=foo2,cn=bar#'0010'B", + "cn=foo2,cn=bar#'0010'B", "cn=foo3,cn=bar#'0011'B", "cn=foo3,cn=bar#'0011'B", + "cn=foo4,cn=bar#'0100'B", "cn=foo4,cn=bar#'0100'B", "cn=foo5,cn=bar#'0101'B", + "cn=foo5,cn=bar#'0101'B", "cn=foo6,cn=bar#'0110'B", "cn=foo6,cn=bar#'0110'B"]}, + {'attr': 'attrnumericStringMatch', + 'positive': ['00001', '00002', '00003', '00004', '00005', '00006'], + 'negative': ['00001', '00001', '00002', '00002', '00003', '00003', '00004', '00004', '00005', + '00005', '00006', '00006']}, + {'attr': 'attrtelephoneNumberMatch', + 'positive': ['+1 408 555 4798', '+1 408 555 5625', '+1 408 555 6201', '+1 408 555 8585', + '+1 408 555 9187', '+1 408 555 9423'], + 'negative': ['+1 408 555 4798', '+1 408 555 4798', '+1 408 555 5625', '+1 408 555 5625', + '+1 408 555 6201', '+1 408 555 6201', '+1 408 555 8585', '+1 408 555 8585', + '+1 408 555 9187', '+1 408 555 9187', '+1 408 555 9423', '+1 408 555 9423']}, + {'attr': 'attroctetStringMatch', + 'positive': ['AAAAAAAAAAAAAAE=', 'AAAAAAAAAAAAAAI=', 'AAAAAAAAAAAAAAM=', 'AAAAAAAAAAAAAAQ=', + 'AAAAAAAAAAAAAAU=', 'AAAAAAAAAAAAAAY='], + 'negative': ['AAAAAAAAAAAAAAE=', 'AAAAAAAAAAAAAAE=', 'AAAAAAAAAAAAAAI=', 'AAAAAAAAAAAAAAI=', + 'AAAAAAAAAAAAAAM=', 'AAAAAAAAAAAAAAM=', 'AAAAAAAAAAAAAAQ=', 'AAAAAAAAAAAAAAQ=', + 'AAAAAAAAAAAAAAU=', 'AAAAAAAAAAAAAAU=', 'AAAAAAAAAAAAAAY=', 'AAAAAAAAAAAAAAY=']}, + {'attr': 'attrobjectIdentifierMatch', + 'positive': ['1.3.6.1.4.1.1466.115.121.1.15', '1.3.6.1.4.1.1466.115.121.1.24', + '1.3.6.1.4.1.1466.115.121.1.26', '1.3.6.1.4.1.1466.115.121.1.40', + '1.3.6.1.4.1.1466.115.121.1.41', '1.3.6.1.4.1.1466.115.121.1.6'], + 'negative': ['1.3.6.1.4.1.1466.115.121.1.15', '1.3.6.1.4.1.1466.115.121.1.15', + '1.3.6.1.4.1.1466.115.121.1.24', '1.3.6.1.4.1.1466.115.121.1.24', + '1.3.6.1.4.1.1466.115.121.1.26', '1.3.6.1.4.1.1466.115.121.1.26', + '1.3.6.1.4.1.1466.115.121.1.40', '1.3.6.1.4.1.1466.115.121.1.40', + '1.3.6.1.4.1.1466.115.121.1.41', '1.3.6.1.4.1.1466.115.121.1.41', + '1.3.6.1.4.1.1466.115.121.1.6', '1.3.6.1.4.1.1466.115.121.1.6']}, + {'attr': 'attrdirectoryStringFirstComponentMatch', + 'positive': ['ÇélIné Ändrè1', 'ÇélIné Ändrè2', 'ÇélIné Ändrè3', 'ÇélIné Ändrè4', + 'ÇélIné Ändrè5', 'ÇélIné Ändrè6'], + 'negative': ['ÇélIné Ändrè1', 'ÇélIné Ändrè1', 'ÇélIné Ändrè2', 'ÇélIné Ändrè2', + 'ÇélIné Ändrè3', 'ÇélIné Ändrè3', 'ÇélIné Ändrè4', 'ÇélIné Ändrè4', + 'ÇélIné Ändrè5', 'ÇélIné Ändrè5', 'ÇélIné Ändrè6', 'ÇélIné Ändrè6']}] + + +LIST_ATTR = [ + ('entryoctetStringMatch0', 'AAAAAAAAAAAAAAE='), + ('entryoctetStringMatch1', 'AAAAAAAAAAAAAAI='), + ('entryoctetStringMatch2', 'AAAAAAAAAAAAAAM='), + ('entryoctetStringMatch3', 'AAAAAAAAAAAAAAQ='), + ('entryoctetStringMatch4', 'AAAAAAAAAAAAAAU='), + ('entryoctetStringMatch5', 'AAAAAAAAAAAAAAY='), + ('entrybitStringMatch0', "'0001'B"), + ('entrybitStringMatch1', "'0010'B"), + ('entrybitStringMatch2', "'0011'B"), + ('entrybitStringMatch3', "'0100'B"), + ('entrybitStringMatch4', "'0101'B"), + ('entrybitStringMatch5', "'0110'B"), + ('entrycaseExactIA5Match0', "Sprain"), + ('entrycaseExactIA5Match1', "sPrain"), + ('entrycaseExactIA5Match2', "spRain"), + ('entrycaseExactIA5Match3', "sprAin"), + ('entrycaseExactIA5Match4', "spraIn"), + ('entrycaseExactIA5Match5', "sprain"), + ('entrycaseExactMatch0', "ÇélIné Ändrè"), + ('entrycaseExactMatch1', "ÇéliNé Ändrè"), + ('entrycaseExactMatch2', "Çéliné ÄndrÈ"), + ('entrycaseExactMatch3', "Çéliné Ändrè"), + ('entrycaseExactMatch4', "çÉliné Ändrè"), + ('entrygeneralizedTimeMatch0', "20100218171300Z"), + ('entrygeneralizedTimeMatch1', "20100218171301Z"), + ('entrygeneralizedTimeMatch2', "20100218171302Z"), + ('entrygeneralizedTimeMatch3', "20100218171303Z"), + ('entrygeneralizedTimeMatch4', "20100218171304Z"), + ('entrygeneralizedTimeMatch5', "20100218171305Z"), + ('entrybooleanMatch0', "TRUE"), + ('entrybooleanMatch1', "FALSE"), + ('entrycaseIgnoreIA5Match0', "sprain1"), + ('entrycaseIgnoreIA5Match1', "sprain2"), + ('entrycaseIgnoreIA5Match2', "sprain3"), + ('entrycaseIgnoreIA5Match3', "sprain4"), + ('entrycaseIgnoreIA5Match4', "sprain5"), + ('entrycaseIgnoreIA5Match5', "sprain6"), + ('entrycaseIgnoreMatch0', "ÇélIné Ändrè1"), + ('entrycaseIgnoreMatch1', "ÇélIné Ändrè2"), + ('entrycaseIgnoreMatch2', "ÇélIné Ändrè3"), + ('entrycaseIgnoreMatch3', "ÇélIné Ändrè4"), + ('entrycaseIgnoreMatch4', "ÇélIné Ändrè5"), + ('entrycaseIgnoreMatch5', "ÇélIné Ändrè6"), + ('entrycaseIgnoreListMatch0', "foo1$bar"), + ('entrycaseIgnoreListMatch1', "foo2$bar"), + ('entrycaseIgnoreListMatch2', "foo3$bar"), + ('entrycaseIgnoreListMatch3', "foo4$bar"), + ('entrycaseIgnoreListMatch4', "foo5$bar"), + ('entrycaseIgnoreListMatch5', "foo6$bar"), + ('entryobjectIdentifierMatch0', "1.3.6.1.4.1.1466.115.121.1.15"), + ('entryobjectIdentifierMatch1', "1.3.6.1.4.1.1466.115.121.1.24"), + ('entryobjectIdentifierMatch2', "1.3.6.1.4.1.1466.115.121.1.26"), + ('entryobjectIdentifierMatch3', "1.3.6.1.4.1.1466.115.121.1.40"), + ('entryobjectIdentifierMatch4', "1.3.6.1.4.1.1466.115.121.1.41"), + ('entryobjectIdentifierMatch5', "1.3.6.1.4.1.1466.115.121.1.6"), + ('entrydistinguishedNameMatch0', "cn=foo1,cn=bar"), + ('entrydistinguishedNameMatch1', "cn=foo2,cn=bar"), + ('entrydistinguishedNameMatch2', "cn=foo3,cn=bar"), + ('entrydistinguishedNameMatch3', "cn=foo4,cn=bar"), + ('entrydistinguishedNameMatch4', "cn=foo5,cn=bar"), + ('entrydistinguishedNameMatch5', "cn=foo6,cn=bar"), + ('entryintegerMatch0', "-2"), + ('entryintegerMatch1', "-1"), + ('entryintegerMatch2', "0"), + ('entryintegerMatch3', "1"), + ('entryintegerMatch4', "2"), + ('entryintegerMatch5', "3"), + ('entryuniqueMemberMatch0', "cn=foo1,cn=bar#'0001'B"), + ('entryuniqueMemberMatch1', "cn=foo2,cn=bar#'0010'B"), + ('entryuniqueMemberMatch2', "cn=foo3,cn=bar#'0011'B"), + ('entryuniqueMemberMatch3', "cn=foo4,cn=bar#'0100'B"), + ('entryuniqueMemberMatch4', "cn=foo5,cn=bar#'0101'B"), + ('entryuniqueMemberMatch5', "cn=foo6,cn=bar#'0110'B"), + ('entrynumericStringMatch0', "00001"), + ('entrynumericStringMatch1', "00002"), + ('entrynumericStringMatch2', "00003"), + ('entrynumericStringMatch3', "00004"), + ('entrynumericStringMatch4', "00005"), + ('entrynumericStringMatch5', "00006"), + ('entrytelephoneNumberMatch0', "+1 408 555 4798"), + ('entrytelephoneNumberMatch1', "+1 408 555 5625"), + ('entrytelephoneNumberMatch2', "+1 408 555 6201"), + ('entrytelephoneNumberMatch3', "+1 408 555 8585"), + ('entrytelephoneNumberMatch4', "+1 408 555 9187"), + ('entrytelephoneNumberMatch5', "+1 408 555 9423"), + ('entrydirectoryStringFirstComponentMatch0', "ÇélIné Ändrè1"), + ('entrydirectoryStringFirstComponentMatch1', "ÇélIné Ändrè2"), + ('entrydirectoryStringFirstComponentMatch2', "ÇélIné Ändrè3"), + ('entrydirectoryStringFirstComponentMatch3', "ÇélIné Ändrè4"), + ('entrydirectoryStringFirstComponentMatch4', "ÇélIné Ändrè5"), + ('entrydirectoryStringFirstComponentMatch5', "ÇélIné Ändrè6"), + ('entryobjectIdentifierFirstComponentMatch0', "1.3.6.1.4.1.1466.115.121.1.15"), + ('entryobjectIdentifierFirstComponentMatch1', "1.3.6.1.4.1.1466.115.121.1.24"), + ('entryobjectIdentifierFirstComponentMatch2', "1.3.6.1.4.1.1466.115.121.1.26"), + ('entryobjectIdentifierFirstComponentMatch3', "1.3.6.1.4.1.1466.115.121.1.40"), + ('entryobjectIdentifierFirstComponentMatch4', "1.3.6.1.4.1.1466.115.121.1.41"), + ('entryobjectIdentifierFirstComponentMatch5', "1.3.6.1.4.1.1466.115.121.1.6"), + ('entryintegerFirstComponentMatch0', "-2"), + ('entryintegerFirstComponentMatch1', "-1"), + ('entryintegerFirstComponentMatch2', "0"), + ('entryintegerFirstComponentMatch3', "1"), + ('entryintegerFirstComponentMatch4', "2"), + ('entryintegerFirstComponentMatch5', "3")] + + +POSITIVE_NEGATIVE_VALUES = [ + ["(attrbitStringMatch='0001'B)", 1, + "(attrbitStringMatch:bitStringMatch:='000100000'B)"], + ["(attrgeneralizedTimeMatch=20100218171300Z)", 1, + "(attrcaseExactIA5Match=SPRAIN)"], + ["(attrcaseExactMatch>=ÇélIné Ändrè)", 5, + "(attrcaseExactMatch=ÇéLINé ÄNDRè)"], + ["(attrcaseExactMatch:caseExactMatch:=ÇélIné Ändrè)", 1, + "(attrcaseExactMatch>=çéliné ändrè)"], + ["(attrcaseExactIA5Match=Sprain)", 1, + "(attrgeneralizedTimeMatch=20300218171300Z)"], + ["(attrbooleanMatch=TRUE)", 1, + "(attrgeneralizedTimeMatch>=20300218171300Z)"], + ["(attrcaseIgnoreIA5Match=sprain1)", 1, + "(attrcaseIgnoreIA5Match=sprain9999)"], + ["(attrcaseIgnoreMatch=ÇélIné Ändrè1)", 1, + "(attrcaseIgnoreMatch=ÇélIné Ändrè9999)"], + ["(attrcaseIgnoreMatch>=ÇélIné Ändrè1)", 6, + "(attrcaseIgnoreMatch>=ÇélIné Ändrè9999)"], + ["(attrcaseIgnoreListMatch=foo1$bar)", 1, + "(attrcaseIgnoreListMatch=foo1$bar$baz$biff)"], + ["(attrobjectIdentifierMatch=1.3.6.1.4.1.1466.115.121.1.15)", 1, + "(attrobjectIdentifierMatch=1.3.6.1.4.1.1466.115.121.1.15.99999)"], + ["(attrgeneralizedTimeMatch>=20100218171300Z)", 6, + "(attroctetStringMatch>=AAAAAAAAAAABAQQ=)"], + ["(attrdirectoryStringFirstComponentMatch=ÇélIné Ändrè1)", 1, + "(attrdirectoryStringFirstComponentMatch=ÇélIné Ändrè9999)"], + ["(attrobjectIdentifierFirstComponentMatch=1.3.6.1.4.1.1466.115.121.1.15)", 1, + "(attrobjectIdentifierFirstComponentMatch=1.3.6.1.4.1.1466.115.121.1.15.99999)"], + ["(attrdistinguishedNameMatch=cn=foo1,cn=bar)", 1, + "(attrdistinguishedNameMatch=cn=foo1,cn=bar,cn=baz)"], + ["(attrintegerMatch=-2)", 1, + "(attrintegerMatch=-20)"], + ["(attrintegerMatch>=-2)", 6, + "(attrintegerMatch>=20)"], + ["(attrintegerFirstComponentMatch=-2)", 1, + "(attrintegerFirstComponentMatch=-20)"], + ["(attruniqueMemberMatch=cn=foo1,cn=bar#'0001'B)", 1, + "(attruniqueMemberMatch=cn=foo1,cn=bar#'00010000'B)"], + ["(attrnumericStringMatch=00001)", 1, + "(attrnumericStringMatch=000000001)"], + ["(attrnumericStringMatch>=00001)", 6, + "(attrnumericStringMatch>=01)"], + ["(attrtelephoneNumberMatch=+1 408 555 4798)", 1, + "(attrtelephoneNumberMatch=+2 408 555 4798)"], + ["(attroctetStringMatch=AAAAAAAAAAAAAAE=)", 1, + "(attroctetStringMatch=AAAAAAAAAAAAAAEB)"], + ["(attroctetStringMatch>=AAAAAAAAAAAAAAE=)", 6, + "(attroctetStringMatch>=AAAAAAAAAAABAQE=)"]] + + +LIST_EXT = [("(attrbitStringMatch:bitStringMatch:='0001'B)", 1), + ("(attrcaseExactIA5Match:caseExactIA5Match:=Sprain)", 1), + ("(attrcaseExactMatch:caseExactMatch:=ÇélIné Ändrè)", 1), + ("(attrcaseExactMatch:caseExactOrderingMatch:=ÇélIné Ändrè)", 5), + ("(attrgeneralizedTimeMatch:generalizedTimeMatch:=20100218171300Z)", 1), + ("(attrgeneralizedTimeMatch:generalizedTimeOrderingMatch:=20100218171300Z)", 6), + ("(attrbooleanMatch:booleanMatch:=TRUE)", 1), + ("(attrcaseIgnoreIA5Match:caseIgnoreIA5Match:=sprain1)", 1), + ("(attrcaseIgnoreMatch:caseIgnoreMatch:=ÇélIné Ändrè1)", 1), + ("(attrcaseIgnoreMatch:caseIgnoreOrderingMatch:=ÇélIné Ändrè1)", 6), + ("(attrcaseIgnoreListMatch:caseIgnoreListMatch:=foo1$bar)", 1), + ("(attrobjectIdentifierMatch:objectIdentifierMatch:=1.3.6.1.4.1.1466.115.121.1.15)", 1), + ("(attrdirectoryStringFirstComponentMatch:directory" + "StringFirstComponentMatch:=ÇélIné Ändrè1)", 1), + ("(attrobjectIdentifierFirstComponentMatch:objectIdentifier" + "FirstComponentMatch:=1.3.6.1.4.1.1466.115.121.1.15)", 1), + ("(attrdistinguishedNameMatch:distinguishedNameMatch:=cn=foo1,cn=bar)", 1), + ("(attrintegerMatch:integerMatch:=-2)", 1), + ("(attrintegerMatch:integerOrderingMatch:=-2)", 6), + ("(attrintegerFirstComponentMatch:integerFirstComponentMatch:=-2)", 1), + ("(attruniqueMemberMatch:uniqueMemberMatch:=cn=foo1,cn=bar#'0001'B)", 1), + ("(attrnumericStringMatch:numericStringMatch:=00001)", 1), + ("(attrnumericStringMatch:numericStringMatch:=00001)", 1), + ("(attrtelephoneNumberMatch:telephoneNumberMatch:=+1 408 555 4798)", 1), + ("(attroctetStringMatch:octetStringMatch:=AAAAAAAAAAAAAAE=)", 1), + ("(attroctetStringMatch:octetStringOrderingMatch:=AAAAAAAAAAAAAAE=)", 6), + ("(attrcaseExactMatch=*ÇélIné Ändrè*)", 1), + ("(attrcaseExactMatch=ÇélIné Ändrè*)", 1), + ("(attrcaseExactMatch=*ÇélIné Ändrè)", 1), + ("(attrcaseExactMatch=*é Ä*)", 5), + ("(attrcaseExactIA5Match=*Sprain*)", 1), + ("(attrcaseExactIA5Match=Sprain*)", 1), + ("(attrcaseExactIA5Match=*Sprain)", 1), + ("(attrcaseExactIA5Match=*rai*)", 3), + ("(attrcaseIgnoreIA5Match=*sprain1*)", 1), + ("(attrcaseIgnoreIA5Match=sprain1*)", 1), + ("(attrcaseIgnoreIA5Match=*sprain1)", 1), + ("(attrcaseIgnoreIA5Match=*rai*)", 6), + ("(attrcaseIgnoreMatch=*ÇélIné Ändrè1*)", 1), + ("(attrcaseIgnoreMatch=ÇélIné Ändrè1*)", 1), + ("(attrcaseIgnoreMatch=*ÇélIné Ändrè1)", 1), + ("(attrcaseIgnoreMatch=*é Ä*)", 6), + ("(attrcaseIgnoreListMatch=*foo1$bar*)", 1), + ("(attrcaseIgnoreListMatch=foo1$bar*)", 1), + ("(attrcaseIgnoreListMatch=*foo1$bar)", 1), + ("(attrcaseIgnoreListMatch=*1$b*)", 1), + ("(attrnumericStringMatch=*00001*)", 1), + ("(attrnumericStringMatch=00001*)", 1), + ("(attrnumericStringMatch=*00001)", 1), + ("(attrnumericStringMatch=*000*)", 6), + ("(attrtelephoneNumberMatch=*+1 408 555 4798*)", 1), + ("(attrtelephoneNumberMatch=+1 408 555 4798*)", 1), + ("(attrtelephoneNumberMatch=*+1 408 555 4798)", 1), + ("(attrtelephoneNumberMatch=* 55*)", 6)] + + +def test_matching_rules(topology_st): + """Test matching rules. + + :id: 8cb6e62a-8cfc-11e9-be9a-8c16451d917b + :setup: Standalone + :steps: + 1. Search for matching rule. + 2. Matching rule should be there in schema. + :expectedresults: + 1. Pass + 2. Pass + """ + matchingrules = Schema(topology_st.standalone).get_matchingrules() + assert matchingrules + rules = set(matchingrule.names for matchingrule in matchingrules) + rules1 = [role[0] for role in rules if len(role) != 0] + for rule in TESTED_MATCHING_RULES: + assert rule in rules1 + + +def test_add_attribute_types(topology_st): + """Test add attribute types to schema + + :id: 84d6dece-8cfc-11e9-89a3-8c16451d917b + :setup: Standalone + :steps: + 1. Add new attribute types to schema. + :expectedresults: + 1. Pass + """ + for attribute in ATTR: + Schema(topology_st.standalone).add('attributetypes', attribute) + + +@pytest.mark.parametrize("rule", MATCHING_RULES) +def test_valid_invalid_attributes(topology_st, rule): + """Delete duplicate attributes + + :id: d0bf3942-ba71-4947-90c8-1bfa9f0b838f + :parametrized: yes + :setup: Standalone + :steps: + 1. Create entry with an attribute that uses that matching rule + 2. Delete existing entry + 3. Create entry with an attribute that uses that matching rule providing duplicate + values that are duplicates according to the equality matching rule. + :expectedresults: + 1. Pass + 2. Pass + 3. Fail(ldap.TYPE_OR_VALUE_EXISTS) + """ + # Entry with extensibleObject + cos = CosTemplates(topology_st.standalone, DEFAULT_SUFFIX) + entry = cos.create(properties={'cn': 'addentry'+rule['attr'], + rule['attr']: rule['positive']}) + entry.delete() + with pytest.raises(ldap.TYPE_OR_VALUE_EXISTS): + cos.create(properties={'cn': 'addentry'+rule['attr'].split('attr')[1], + rule['attr']: rule['negative']}) + + +@pytest.mark.parametrize("mode", MATCHING_MODES) +def test_valid_invalid_modes(topology_st, mode): + """Add duplicate attributes + + :id: dec03362-ba26-41da-b479-e2b788403fce + :parametrized: yes + :setup: Standalone + :steps: + 1. Create entry with an attribute that uses matching mode + 2. Add an attribute that uses that matching mode providing duplicate + values that are duplicates according to the equality matching. + 3. Delete existing entry + :expectedresults: + 1. Pass + 2. Fail(ldap.TYPE_OR_VALUE_EXISTS) + 3. Pass + """ + # Entry with extensibleObject + cos = CosTemplates(topology_st.standalone, DEFAULT_SUFFIX) + entry = cos.create(properties={'cn': 'addentry'+mode['attr'], + mode['attr']: mode['positive']}) + with pytest.raises(ldap.TYPE_OR_VALUE_EXISTS): + entry.add(mode['attr'], mode['negative']) + entry.delete() + + +@pytest.mark.parametrize("mode", MODE_REPLACE) +def test_valid_invalid_mode_replace(topology_st, mode): + """Replace and Delete duplicate attribute + + :id: 7ec19eca-8cfc-11e9-a0df-8c16451d917b + :parametrized: yes + :setup: Standalone + :steps: + 1. Create entry with an attribute that uses that matching rule + 2. Replace an attribute that uses that matching rule + 3. Replace an attribute that uses that matching rule providing duplicate + values that are duplicates according to the equality matching mode. + 4. Delete existing attribute + 5. Try to delete the deleted attribute again. + 6. Delete entry + :expectedresults: + 1. Pass + 2. Pass + 3. Fail(ldap.TYPE_OR_VALUE_EXISTS) + 4. Pass + 5. Fail(ldap.NO_SUCH_ATTRIBUTE) + 6. Pass + """ + # Entry with extensibleObject + cos = CosTemplates(topology_st.standalone, DEFAULT_SUFFIX) + user = cos.create(properties={'cn': 'addentry'+mode['attr']}) + + # Replace Operation + user.replace(mode['attr'], mode['positive']) + with pytest.raises(ldap.TYPE_OR_VALUE_EXISTS): + user.replace(mode['attr'], mode['negative']) + # Delete Operation + user.remove(mode['attr'], mode['positive'][0]) + with pytest.raises(ldap.NO_SUCH_ATTRIBUTE): + user.remove(mode['attr'], mode['positive'][0]) + user.delete() + + +@pytest.fixture(scope="module") +def _searches(topology_st): + """ + Add attribute types to schema + """ + cos = CosTemplates(topology_st.standalone, DEFAULT_SUFFIX) + for attr, value in LIST_ATTR: + cos.create(properties={ + 'cn': attr, + 'attr' + attr.split('entry')[1][:-1]: value + }) + + +@pytest.mark.parametrize("attr, po_value, ne_attr", POSITIVE_NEGATIVE_VALUES) +def test_match_count(topology_st, _searches, attr, po_value, ne_attr): + """Search for an attribute with that matching rule with an assertion + value that should match + + :id: 00276180-b902-11e9-bff2-8c16451d917b + :parametrized: yes + :setup: Standalone + :steps: + 1. Filter rules as per the condition and assert the no of output. + 2. Negative filter with no outputs. + :expectedresults: + 1. Pass + 2. Pass + """ + cos = CosTemplates(topology_st.standalone, DEFAULT_SUFFIX) + assert len(cos.filter(attr)) == po_value + assert not cos.filter(ne_attr) + + +@pytest.mark.parametrize("attr, value", LIST_EXT) +def test_extensible_search(topology_st, _searches, attr, value): + """Match filter and output. + + :id: abe3e6dd-9ecc-11e8-adf0-8c16451d917c + :parametrized: yes + :setup: Standalone + :steps: + 1. Filer output should match the exact value given. + :expectedresults: + 1. Pass + """ + cos = CosTemplates(topology_st.standalone, DEFAULT_SUFFIX) + assert len(cos.filter(attr)) == value + + +if __name__ == '__main__': + CURRENT_FILE = os.path.realpath(__file__) + pytest.main("-s -v %s" % CURRENT_FILE) diff --git a/dirsrvtests/tests/suites/filter/filter_onelevel_aci_test.py b/dirsrvtests/tests/suites/filter/filter_onelevel_aci_test.py new file mode 100644 index 0000000..af9d5ef --- /dev/null +++ b/dirsrvtests/tests/suites/filter/filter_onelevel_aci_test.py @@ -0,0 +1,49 @@ +# --- BEGIN COPYRIGHT BLOCK --- +# Copyright (C) 2022 William Brown +# All rights reserved. +# +# License: GPL (version 3 or any later version). +# See LICENSE for details. +# --- END COPYRIGHT BLOCK ---- + +import pytest, os, ldap + +from lib389._constants import DEFAULT_SUFFIX +from lib389.topologies import topology_st + +from lib389.idm.account import Anonymous +from lib389.idm.user import UserAccount, UserAccounts + +pytestmark = pytest.mark.tier0 + +def test_search_attr(topology_st): + """Test filter can search attributes + + :id: 99104b2d-fe12-40d7-b977-a04fa184cfac + :setup: Standalone instance + :steps: + 1. Add test entry + 2. Search with onelevel + :expectedresults: + 1. Success + 2. Success + """ + users = UserAccounts(topology_st.standalone, DEFAULT_SUFFIX) + user = users.create_test_user(uid=1000) + + # Bind as anonymous + conn = Anonymous(topology_st.standalone).bind() + anon_users = UserAccounts(conn, DEFAULT_SUFFIX) + # Subtree, works. + res1 = anon_users.filter("(uid=test_user_1000)", scope=ldap.SCOPE_SUBTREE, strict=True) + assert len(res1) == 1 + + # Search with a one-level search. + # This previously hit a case with filter optimisation in how parent id values were added. + res2 = anon_users.filter("(uid=test_user_1000)", scope=ldap.SCOPE_ONELEVEL, strict=True) + # We must get at least one result! + assert len(res2) == 1 + +if __name__ == "__main__": + CURRENT_FILE = os.path.realpath(__file__) + pytest.main("-s -v %s" % CURRENT_FILE) diff --git a/dirsrvtests/tests/suites/filter/filter_test.py b/dirsrvtests/tests/suites/filter/filter_test.py new file mode 100644 index 0000000..d6bfa5a --- /dev/null +++ b/dirsrvtests/tests/suites/filter/filter_test.py @@ -0,0 +1,311 @@ +# --- BEGIN COPYRIGHT BLOCK --- +# Copyright (C) 2020 Red Hat, Inc. +# All rights reserved. +# +# License: GPL (version 3 or any later version). +# See LICENSE for details. +# --- END COPYRIGHT BLOCK --- +# +import logging + +import pytest +from lib389.tasks import * +from lib389.topologies import topology_st +from lib389._constants import PASSWORD, DEFAULT_SUFFIX, DN_DM, SUFFIX +from lib389.utils import * + +pytestmark = pytest.mark.tier1 + +logging.getLogger(__name__).setLevel(logging.DEBUG) +log = logging.getLogger(__name__) + +ENTRY_NAME = 'test_entry' + + +@pytest.mark.bz918686 +@pytest.mark.ds497 +def test_filter_escaped(topology_st): + """Test we can search for an '*' in a attribute value. + + :id: 5c9aa40c-c641-4603-bce3-b19f4c1f2031 + :setup: Standalone instance + :steps: + 1. Add a test user with an '*' in its attribute value + i.e. 'cn=test * me' + 2. Add another similar test user without '*' in its attribute value + 3. Search test user using search filter "cn=*\\**" + :expectedresults: + 1. This should pass + 2. This should pass + 3. Test user with 'cn=test * me' only, should be listed + """ + log.info('Running test_filter_escaped...') + + USER1_DN = 'uid=test_entry,' + DEFAULT_SUFFIX + USER2_DN = 'uid=test_entry2,' + DEFAULT_SUFFIX + + try: + topology_st.standalone.add_s(Entry((USER1_DN, {'objectclass': "top extensibleObject".split(), + 'sn': '1', + 'cn': 'test * me', + 'uid': 'test_entry', + 'userpassword': PASSWORD}))) + except ldap.LDAPError as e: + log.fatal('test_filter_escaped: Failed to add test user ' + USER1_DN + ': error ' + + e.message['desc']) + assert False + + try: + topology_st.standalone.add_s(Entry((USER2_DN, {'objectclass': "top extensibleObject".split(), + 'sn': '2', + 'cn': 'test me', + 'uid': 'test_entry2', + 'userpassword': PASSWORD}))) + except ldap.LDAPError as e: + log.fatal('test_filter_escaped: Failed to add test user ' + USER2_DN + ': error ' + e.message['desc']) + assert False + + try: + entry = topology_st.standalone.search_s(DEFAULT_SUFFIX, ldap.SCOPE_SUBTREE, 'cn=*\\**') + if not entry or len(entry) > 1: + log.fatal('test_filter_escaped: Entry was not found using "cn=*\\**"') + assert False + except ldap.LDAPError as e: + log.fatal('test_filter_escaped: Failed to search for user(%s), error: %s' % + (USER1_DN, e.message('desc'))) + assert False + + log.info('test_filter_escaped: PASSED') + + +def test_filter_search_original_attrs(topology_st): + """Search and request attributes with extra characters. The returned entry + should not have these extra characters: objectclass EXTRA" + + :id: d30d8a1c-84ac-47ba-95f9-41e3453fbf3a + :setup: Standalone instance + :steps: + 1. Execute a search operation for attributes with extra characters + 2. Check the search result have these extra characters or not + :expectedresults: + 1. Search should pass + 2. Search result should not have these extra characters attribute + """ + + log.info('Running test_filter_search_original_attrs...') + + try: + entry = topology_st.standalone.search_s(DEFAULT_SUFFIX, ldap.SCOPE_BASE, + 'objectclass=top', ['objectclass-EXTRA']) + if entry[0].hasAttr('objectclass-EXTRA'): + log.fatal('test_filter_search_original_attrs: Entry does not have the original attribute') + assert False + except ldap.LDAPError as e: + log.fatal('test_filter_search_original_attrs: Failed to search suffix(%s), error: %s' % + (DEFAULT_SUFFIX, e.message('desc'))) + assert False + + log.info('test_filter_search_original_attrs: PASSED') + +@pytest.mark.bz1511462 +def test_filter_scope_one(topology_st): + """Test ldapsearch with scope one gives only single entry + + :id: cf5a6078-bbe6-4d43-ac71-553c45923f91 + :setup: Standalone instance + :steps: + 1. Search ou=services,dc=example,dc=com using ldapsearch with + scope one using base as dc=example,dc=com + 2. Check that search should return only one entry + :expectedresults: + 1. This should pass + 2. This should pass + """ + + log.info('Search user using ldapsearch with scope one') + results = topology_st.standalone.search_s(DEFAULT_SUFFIX, ldap.SCOPE_ONELEVEL,'ou=services',['ou'] ) + log.info(results) + + log.info('Search should only have one entry') + assert len(results) == 1 + +@pytest.mark.ds47313 +def test_filter_with_attribute_subtype(topology_st): + """Adds 2 test entries and Search with + filters including subtype and ! + + :id: 0e69f5f2-6a0a-480e-8282-fbcc50231908 + :setup: Standalone instance + :steps: + 1. Add 2 entries and create 3 filters + 2. Search for entry with filter: (&(cn=test_entry en only)(!(cn=test_entry fr))) + 3. Search for entry with filter: (&(cn=test_entry en only)(!(cn;fr=test_entry fr))) + 4. Search for entry with filter: (&(cn=test_entry en only)(!(cn;en=test_entry en))) + 5. Delete the added entries + :expectedresults: + 1. Operation should be successful + 2. Search should be successful + 3. Search should be successful + 4. Search should not be successful + 5. Delete the added entries + """ + + # bind as directory manager + topology_st.standalone.log.info("Bind as %s" % DN_DM) + topology_st.standalone.simple_bind_s(DN_DM, PASSWORD) + + # enable filter error logging + # mod = [(ldap.MOD_REPLACE, 'nsslapd-errorlog-level', '32')] + # topology_st.standalone.modify_s(DN_CONFIG, mod) + + topology_st.standalone.log.info("\n\n######################### ADD ######################\n") + + # Prepare the entry with cn;fr & cn;en + entry_name_fr = '%s fr' % (ENTRY_NAME) + entry_name_en = '%s en' % (ENTRY_NAME) + entry_name_both = '%s both' % (ENTRY_NAME) + entry_dn_both = 'cn=%s, %s' % (entry_name_both, SUFFIX) + entry_both = Entry(entry_dn_both) + entry_both.setValues('objectclass', 'top', 'person') + entry_both.setValues('sn', entry_name_both) + entry_both.setValues('cn', entry_name_both) + entry_both.setValues('cn;fr', entry_name_fr) + entry_both.setValues('cn;en', entry_name_en) + + # Prepare the entry with one member + entry_name_en_only = '%s en only' % (ENTRY_NAME) + entry_dn_en_only = 'cn=%s, %s' % (entry_name_en_only, SUFFIX) + entry_en_only = Entry(entry_dn_en_only) + entry_en_only.setValues('objectclass', 'top', 'person') + entry_en_only.setValues('sn', entry_name_en_only) + entry_en_only.setValues('cn', entry_name_en_only) + entry_en_only.setValues('cn;en', entry_name_en) + + topology_st.standalone.log.info("Try to add Add %s: %r" % (entry_dn_both, entry_both)) + topology_st.standalone.add_s(entry_both) + + topology_st.standalone.log.info("Try to add Add %s: %r" % (entry_dn_en_only, entry_en_only)) + topology_st.standalone.add_s(entry_en_only) + + topology_st.standalone.log.info("\n\n######################### SEARCH ######################\n") + + # filter: (&(cn=test_entry en only)(!(cn=test_entry fr))) + myfilter = '(&(sn=%s)(!(cn=%s)))' % (entry_name_en_only, entry_name_fr) + topology_st.standalone.log.info("Try to search with filter %s" % myfilter) + ents = topology_st.standalone.search_s(SUFFIX, ldap.SCOPE_SUBTREE, myfilter) + assert len(ents) == 1 + assert ensure_str(ents[0].sn) == entry_name_en_only + topology_st.standalone.log.info("Found %s" % ents[0].dn) + + # filter: (&(cn=test_entry en only)(!(cn;fr=test_entry fr))) + myfilter = '(&(sn=%s)(!(cn;fr=%s)))' % (entry_name_en_only, entry_name_fr) + topology_st.standalone.log.info("Try to search with filter %s" % myfilter) + ents = topology_st.standalone.search_s(SUFFIX, ldap.SCOPE_SUBTREE, myfilter) + assert len(ents) == 1 + assert ensure_str(ents[0].sn) == entry_name_en_only + topology_st.standalone.log.info("Found %s" % ents[0].dn) + + # filter: (&(cn=test_entry en only)(!(cn;en=test_entry en))) + myfilter = '(&(sn=%s)(!(cn;en=%s)))' % (entry_name_en_only, entry_name_en) + topology_st.standalone.log.info("Try to search with filter %s" % myfilter) + ents = topology_st.standalone.search_s(SUFFIX, ldap.SCOPE_SUBTREE, myfilter) + assert len(ents) == 0 + topology_st.standalone.log.info("Found none") + + topology_st.standalone.log.info("\n\n######################### DELETE ######################\n") + + topology_st.standalone.log.info("Try to delete %s " % entry_dn_both) + topology_st.standalone.delete_s(entry_dn_both) + + topology_st.standalone.log.info("Try to delete %s " % entry_dn_en_only) + topology_st.standalone.delete_s(entry_dn_en_only) + + log.info('Testcase PASSED') + +@pytest.mark.bz1615155 +def test_extended_search(topology_st): + """Test we can search with equality extended matching rule + + :id: 396942ac-467b-435b-8d9f-e80c7ec4ba6c + :setup: Standalone instance + :steps: + 1. Add a test user with 'sn: ext-test-entry' + 2. Search '(cn:de:=ext-test-entry)' + 3. Search '(sn:caseIgnoreIA5Match:=EXT-TEST-ENTRY)' + 4. Search '(sn:caseIgnoreMatch:=EXT-TEST-ENTRY)' + 5. Search '(sn:caseExactMatch:=EXT-TEST-ENTRY)' + 6. Search '(sn:caseExactMatch:=ext-test-entry)' + 7. Search '(sn:caseExactIA5Match:=EXT-TEST-ENTRY)' + 8. Search '(sn:caseExactIA5Match:=ext-test-entry)' + :expectedresults: + 1. This should pass + 2. This should return one entry + 3. This should return one entry + 4. This should return one entry + 5. This should return NO entry + 6. This should return one entry + 7. This should return NO entry + 8. This should return one entry + """ + log.info('Running test_filter_escaped...') + + ATTR_VAL = 'ext-test-entry' + USER1_DN = "uid=%s,%s" % (ATTR_VAL, DEFAULT_SUFFIX) + + try: + topology_st.standalone.add_s(Entry((USER1_DN, {'objectclass': "top extensibleObject".split(), + 'sn': ATTR_VAL.encode(), + 'cn': ATTR_VAL.encode(), + 'uid': ATTR_VAL.encode()}))) + except ldap.LDAPError as e: + log.fatal('test_extended_search: Failed to add test user ' + USER1_DN + ': error ' + + e.message['desc']) + assert False + + # filter: '(cn:de:=ext-test-entry)' + myfilter = '(cn:de:=%s)' % ATTR_VAL + topology_st.standalone.log.info("Try to search with filter %s" % myfilter) + ents = topology_st.standalone.search_s(SUFFIX, ldap.SCOPE_SUBTREE, myfilter) + assert len(ents) == 1 + + # filter: '(sn:caseIgnoreIA5Match:=EXT-TEST-ENTRY)' + myfilter = '(cn:caseIgnoreIA5Match:=%s)' % ATTR_VAL.upper() + topology_st.standalone.log.info("Try to search with filter %s" % myfilter) + ents = topology_st.standalone.search_s(SUFFIX, ldap.SCOPE_SUBTREE, myfilter) + assert len(ents) == 1 + + # filter: '(sn:caseIgnoreMatch:=EXT-TEST-ENTRY)' + myfilter = '(cn:caseIgnoreMatch:=%s)' % ATTR_VAL.upper() + topology_st.standalone.log.info("Try to search with filter %s" % myfilter) + ents = topology_st.standalone.search_s(SUFFIX, ldap.SCOPE_SUBTREE, myfilter) + assert len(ents) == 1 + + # filter: '(sn:caseExactMatch:=EXT-TEST-ENTRY)' + myfilter = '(cn:caseExactMatch:=%s)' % ATTR_VAL.upper() + topology_st.standalone.log.info("Try to search with filter %s" % myfilter) + ents = topology_st.standalone.search_s(SUFFIX, ldap.SCOPE_SUBTREE, myfilter) + assert len(ents) == 0 + + # filter: '(sn:caseExactMatch:=ext-test-entry)' + myfilter = '(cn:caseExactMatch:=%s)' % ATTR_VAL + topology_st.standalone.log.info("Try to search with filter %s" % myfilter) + ents = topology_st.standalone.search_s(SUFFIX, ldap.SCOPE_SUBTREE, myfilter) + assert len(ents) == 1 + + # filter: '(sn:caseExactIA5Match:=EXT-TEST-ENTRY)' + myfilter = '(cn:caseExactIA5Match:=%s)' % ATTR_VAL.upper() + topology_st.standalone.log.info("Try to search with filter %s" % myfilter) + ents = topology_st.standalone.search_s(SUFFIX, ldap.SCOPE_SUBTREE, myfilter) + assert len(ents) == 0 + + # filter: '(sn:caseExactIA5Match:=ext-test-entry)' + myfilter = '(cn:caseExactIA5Match:=%s)' % ATTR_VAL + topology_st.standalone.log.info("Try to search with filter %s" % myfilter) + ents = topology_st.standalone.search_s(SUFFIX, ldap.SCOPE_SUBTREE, myfilter) + assert len(ents) == 1 + +if __name__ == '__main__': + # Run isolated + # -s for DEBUG mode + CURRENT_FILE = os.path.realpath(__file__) + pytest.main("-s %s" % CURRENT_FILE) diff --git a/dirsrvtests/tests/suites/filter/filter_test_aci_with_optimiser.py b/dirsrvtests/tests/suites/filter/filter_test_aci_with_optimiser.py new file mode 100644 index 0000000..d501b12 --- /dev/null +++ b/dirsrvtests/tests/suites/filter/filter_test_aci_with_optimiser.py @@ -0,0 +1,120 @@ + + +import ldap +import logging +import pytest +import os +from lib389._constants import * +from lib389.topologies import topology_st as topo +from lib389.idm.domain import Domain +from lib389.idm.organizationalunit import OrganizationalUnits +from lib389.idm.account import Anonymous + +log = logging.getLogger(__name__) + + +def test_filter_access(topo): + """Search that compound filters are correctly processed by access control + + :id: ad6a3ffc-2620-4e76-909b-926f94c1a920 + :setup: Standalone Instance + :steps: + 1. Add anonymous aci + 2. Add ou + 2. Test good filters + 4. Test bad filters + :expectedresults: + 1. Success + 2. Success + 3. The good filters return the OU entry + 4. The bad filters do not return the OU entry + """ + + # Add aci + ACI_TEXT = ('(targetattr="objectclass || cn")(version 3.0; acl "Anonymous read access"; allow' + + '(read, search, compare) userdn = "ldap:///anyone";)') + domain = Domain(topo.standalone, DEFAULT_SUFFIX) + domain.replace('aci', ACI_TEXT) + + # To remove noise, delete EVERYTHING else. + + ous = OrganizationalUnits(topo.standalone, DEFAULT_SUFFIX) + existing_ous = ous.list() + for eou in existing_ous: + eou.delete(recursive=True) + + # Create restricted entry + OU_PROPS = { + 'ou': 'restricted', + 'description': 'secret data' + } + ou = ous.create(properties=OU_PROPS) + OU_DN = ou.dn + + # Do anonymous search using different filters + GOOD_FILTERS = [ + "(|(objectClass=top)(&(objectClass=organizationalunit)(description=secret data)))", + "(|(&(objectClass=organizationalunit)(description=secret data))(objectClass=top))", + "(|(objectClass=organizationalunit)(description=secret data)(sn=*))", + "(|(description=secret data)(objectClass=organizationalunit)(sn=*))", + "(|(sn=*)(description=secret data)(objectClass=organizationalunit))", + "(objectClass=top)", + ] + BAD_FILTERS = [ + "(|(objectClass=person)(&(objectClass=organizationalunit)(description=secret data)))", + "(&(objectClass=top)(objectClass=organizationalunit)(description=secret data))", + "(|(&(description=*)(objectClass=top))(objectClass=person))", + "(description=secret data)", + "(description=*)", + "(ou=*)", + ] + conn = Anonymous(topo.standalone).bind() + + # These searches should return the OU + for search_filter in GOOD_FILTERS: + entries = conn.search_s(OU_DN, ldap.SCOPE_SUBTREE, search_filter) + log.debug(f"Testing good filter: {search_filter} result: {len(entries)}") + assert len(entries) == 1 + + # These searches should not return the OU + for search_filter in BAD_FILTERS: + entries = conn.search_s(OU_DN, ldap.SCOPE_SUBTREE, search_filter) + log.debug(f"Testing bad filter: {search_filter} result: {len(entries)}") + assert len(entries) == 0 + + +def test_base_search_with_substring_filter(topo): + """Test that filter normalization works correctly with base search using + substring filter + + :id: abc24774-4b07-481b-9f1f-9a209e459955 + :setup: Standalone Instance + :steps: + 1. Add ACI allowing search on "description" + 2. Add "description" to root suffix and make it upper case to test normalization + 3. Do base search with substring filter + :expectedresults: + 1. Success + 2. Success + 3. Success + """ + + # Add aci + ACI_TEXT = ('(targetattr="description")(version 3.0; acl "Anonymous read access"; allow' + + '(read, search, compare) userdn = "ldap:///anyone";)') + domain = Domain(topo.standalone, DEFAULT_SUFFIX) + domain.replace('aci', ACI_TEXT) + domain.replace('description', 'ACCESS') # case is important + + # open anonymous connection and do base search with substring filter + conn = Anonymous(topo.standalone).bind() + entries = conn.search_s(DEFAULT_SUFFIX, ldap.SCOPE_BASE, "description=ACCE*") + assert len(entries) == 1 + + +if __name__ == '__main__': + # Run isolated + # -s for DEBUG mode + CURRENT_FILE = os.path.realpath(__file__) + pytest.main(["-s", CURRENT_FILE]) + diff --git a/dirsrvtests/tests/suites/filter/filter_with_non_root_user_test.py b/dirsrvtests/tests/suites/filter/filter_with_non_root_user_test.py new file mode 100644 index 0000000..e7d7727 --- /dev/null +++ b/dirsrvtests/tests/suites/filter/filter_with_non_root_user_test.py @@ -0,0 +1,391 @@ +# --- BEGIN COPYRIGHT BLOCK --- +# Copyright (C) 2019 Red Hat, Inc. +# All rights reserved. +# +# License: GPL (version 3 or any later version). +# See LICENSE for details. +# --- END COPYRIGHT BLOCK ---- + + +""" +verify and testing Filter from a search +""" + +import os +import pytest + +from lib389._constants import DEFAULT_SUFFIX, PW_DM +from lib389.topologies import topology_st as topo +from lib389.idm.domain import Domain +from lib389.idm.user import UserAccounts, UserAccount +from lib389.idm.account import Accounts + +pytestmark = pytest.mark.tier1 + +FILTER_MWARD = "(uid=mward)" +FILTER_L = "(l=sunnyvale)" +FILTER_MAIL = "(mail=jreu*)" +FILTER_EXAM = "(mail=*exam*)" +FILTER_7393 = "(telephonenumber=*7393)" +FILTER_408 = "(telephonenumber=*408*3)" +FILTER_UID = "(uid=*)" +FILTER_PASSWD = "(userpassword=*)" +FILTER_FRED = "(fred=*)" +FILTER_AAA = "(uid:2.16.840.1.113730.3.3.2.15.1:=>AAA)" +FILTER_AAA_ES = "(uid:es:=>AAA)" +FILTER_AAA_UID = "(uid:2.16.840.1.113730.3.3.2.15.1.5:=AAA)" +FILTER_100 = "(uid:2.16.840.1.113730.3.3.2.15.1:=>user100)" +FILTER_ES_100 = "(uid:es:=>user100)" +FILTER_UID_100 = "(uid:2.16.840.1.113730.3.3.2.15.1.5:=user100)" +FILTER_UID_1 = "(uid:2.16.840.1.113730.3.3.2.15.1:=<1)" +FILTER_UID_ES = "(uid:es:=<1)" +FILTER_UID_2 = "(uid:2.16.840.1.113730.3.3.2.15.1.1:=1)" +FILTER_UID_USER1 = "(uid:2.16.840.1.113730.3.3.2.15.1:= +# All rights reserved. +# +# License: GPL (version 3 or any later version). +# See LICENSE for details. +# --- END COPYRIGHT BLOCK --- +# + +import pytest +import ldap +import time +from lib389.topologies import topology_st as topology_st_pre +from lib389.dirsrv_log import DirsrvAccessLog +from lib389._mapped_object import DSLdapObjects +from lib389._constants import DEFAULT_SUFFIX +from lib389.extensibleobject import UnsafeExtensibleObjects + +pytestmark = pytest.mark.tier1 + +def _check_value(inst_cfg, value, exvalue=None): + if exvalue is None: + exvalue = value + inst_cfg.set('nsslapd-verify-filter-schema', value) + assert(inst_cfg.get_attr_val_utf8('nsslapd-verify-filter-schema') == exvalue) + +@pytest.fixture(scope="module") +def topology_st(topology_st_pre): + raw_objects = UnsafeExtensibleObjects(topology_st_pre.standalone, basedn=DEFAULT_SUFFIX) + # Add an object that won't be able to be queried due to invalid attrs. + raw_objects.create(properties = { + "cn": "test_obj", + "a": "a", + "b": "b", + "uid": "foo" + }) + return topology_st_pre + + +@pytest.mark.ds50349 +def test_filter_validation_config(topology_st): + """Test that the new on/warn/off setting can be set and read + correctly + + :id: ac14dad5-5bdf-474f-9936-7ce2d20fb8b6 + :setup: Standalone instance + :steps: + 1. Check the default value of nsslapd-verify-filter-schema + 2. Set the value to "on". + 3. Read the value is "on". + 4. Set the value to "warn". + 5. Read the value is "warn". + 6. Set the value to "off". + 7. Read the value is "off". + 8. Delete the value (reset) + 9. Check the reset value matches 1. + :expectedresults: + 1. Value is "on", "off", or "warn". + 2. Success + 3. Value is "on" + 4. Success + 5. Value is "warn" + 6. Success + 7. Value is "off" + 8. Success + 9. Value is same as from 1. + """ + inst_cfg = topology_st.standalone.config + + initial_value = inst_cfg.get_attr_val_utf8('nsslapd-verify-filter-schema') + + # Check legacy values that may have been set + _check_value(inst_cfg, "on", "reject-invalid") + _check_value(inst_cfg, "warn", "process-safe") + _check_value(inst_cfg, "off") + # Check the more descriptive values + _check_value(inst_cfg, "reject-invalid") + _check_value(inst_cfg, "process-safe") + _check_value(inst_cfg, "warn-invalid") + _check_value(inst_cfg, "off") + + # This should fail + + with pytest.raises(ldap.OPERATIONS_ERROR): + _check_value(inst_cfg, "thnaounaou") + + inst_cfg.remove_all('nsslapd-verify-filter-schema') + final_value = inst_cfg.get_attr_val_utf8('nsslapd-verify-filter-schema') + assert(initial_value == final_value) + + +@pytest.mark.ds50349 +def test_filter_validation_enabled(topology_st): + """Test that queries which are invalid, are correctly rejected by the server. + + :id: 05afdbbd-0d7f-4774-958c-2139827fed70 + :setup: Standalone instance + :steps: + 1. Search a well formed query + 2. Search a poorly formed query + 3. Search a poorly formed complex (and/or) query + 4. Test the server can be restarted + :expectedresults: + 1. No warnings + 2. Query is rejected (err) + 3. Query is rejected (err) + 4. Server restarts + """ + inst = topology_st.standalone + + # In case the default has changed, we set the value to warn. + inst.config.set("nsslapd-verify-filter-schema", "reject-invalid") + raw_objects = DSLdapObjects(inst, basedn=DEFAULT_SUFFIX) + + # Check a good query has no errors. + r = raw_objects.filter("(objectClass=*)") + + with pytest.raises(ldap.UNWILLING_TO_PERFORM): + # Check a bad one DOES emit an error. + r = raw_objects.filter("(a=a)") + + with pytest.raises(ldap.UNWILLING_TO_PERFORM): + # Check a bad complex one does emit an error. + raw_objects.filter("(&(a=a)(b=b)(objectClass=*))") + + # Does restart work? + inst.restart() + + +@pytest.mark.ds50349 +def test_filter_validation_warn_safe(topology_st): + """Test that queries which are invalid, are correctly marked as "notes=F" in + the access log, and return no entries or partial sets. + + :id: 7c8b3374-63c7-4201-9032-faae84c86d50 + :setup: Standalone instance + :steps: + 1. Search a well formed query + 2. Search a poorly formed query + 3. Search a poorly formed complex (and/or) query + :expectedresults: + 1. No warnings + 2. notes=F is present + 3. notes=F is present + """ + inst = topology_st.standalone + + # In case the default has changed, we set the value to warn. + inst.config.set("nsslapd-verify-filter-schema", "process-safe") + # Set the access log to un-buffered so we get it immediately. + inst.config.set("nsslapd-accesslog-logbuffering", "off") + time.sleep(.5) + + # Setup the query object. + # Now we don't care if there are any results, we only care about good/bad queries. + # To do this we have to bypass some of the lib389 magic, and just emit raw queries + # to check them. Turns out lib389 is well designed and this just works as expected + # if you use a single DSLdapObjects and filter. :) + raw_objects = DSLdapObjects(inst, basedn=DEFAULT_SUFFIX) + + # Find any initial notes=F + access_log = DirsrvAccessLog(inst) + r_init = access_log.match(".*notes=F.*") + + # Check a good query has no warnings. + r = raw_objects.filter("(objectClass=*)") + time.sleep(.5) + assert(len(r) > 0) + r_s1 = access_log.match(".*notes=F.*") + # Should be the same number of log lines IE 0. + assert(len(r_init) == len(r_s1)) + + # Check a bad one DOES emit a warning. + r = raw_objects.filter("(a=a)") + time.sleep(.5) + assert(len(r) == 0) + r_s2 = access_log.match(".*notes=F.*") + # Should be the greater number of log lines IE +1 + assert(len(r_init) + 1 == len(r_s2)) + + # Check a bad complex one does emit a warning. + r = raw_objects.filter("(&(a=a)(b=b)(objectClass=*))") + time.sleep(.5) + assert(len(r) == 0) + r_s3 = access_log.match(".*notes=F.*") + # Should be the greater number of log lines IE +2 + assert(len(r_init) + 2 == len(r_s3)) + + # Check that we can still get things when partial + r = raw_objects.filter("(|(a=a)(b=b)(uid=foo))") + time.sleep(.5) + assert(len(r) == 1) + r_s4 = access_log.match(".*notes=F.*") + # Should be the greate number of log lines IE +2 + assert(len(r_init) + 3 == len(r_s4)) + + +@pytest.mark.ds50349 +def test_filter_validation_warn_unsafe(topology_st): + """Test that queries which are invalid, are correctly marked as "notes=F" in + the access log, and uses the legacy query behaviour to return unsafe sets. + + :id: 8b2b23fe-d878-435c-bc84-8c298be4ca1f + :setup: Standalone instance + :steps: + 1. Search a well formed query + 2. Search a poorly formed query + 3. Search a poorly formed complex (and/or) query + :expectedresults: + 1. No warnings + 2. notes=F is present + 3. notes=F is present + """ + inst = topology_st.standalone + + # In case the default has changed, we set the value to warn. + inst.config.set("nsslapd-verify-filter-schema", "warn-invalid") + # Set the access log to un-buffered so we get it immediately. + inst.config.set("nsslapd-accesslog-logbuffering", "off") + time.sleep(.5) + + # Setup the query object. + # Now we don't care if there are any results, we only care about good/bad queries. + # To do this we have to bypass some of the lib389 magic, and just emit raw queries + # to check them. Turns out lib389 is well designed and this just works as expected + # if you use a single DSLdapObjects and filter. :) + raw_objects = DSLdapObjects(inst, basedn=DEFAULT_SUFFIX) + + # Find any initial notes=F + access_log = DirsrvAccessLog(inst) + r_init = access_log.match(".*notes=(U,)?F.*") + + # Check a good query has no warnings. + r = raw_objects.filter("(objectClass=*)") + time.sleep(.5) + assert(len(r) > 0) + r_s1 = access_log.match(".*notes=(U,)?F.*") + # Should be the same number of log lines IE 0. + assert(len(r_init) == len(r_s1)) + + # Check a bad one DOES emit a warning. + r = raw_objects.filter("(a=a)") + time.sleep(.5) + assert(len(r) == 1) + # NOTE: Unlike warn-process-safely, these become UNINDEXED and show in the logs. + r_s2 = access_log.match(".*notes=(U,)?F.*") + # Should be the greater number of log lines IE +1 + assert(len(r_init) + 1 == len(r_s2)) + + # Check a bad complex one does emit a warning. + r = raw_objects.filter("(&(a=a)(b=b)(objectClass=*))") + time.sleep(.5) + assert(len(r) == 1) + r_s3 = access_log.match(".*notes=(U,)?F.*") + # Should be the greater number of log lines IE +2 + assert(len(r_init) + 2 == len(r_s3)) + + # Check that we can still get things when partial + r = raw_objects.filter("(|(a=a)(b=b)(uid=foo))") + time.sleep(.5) + assert(len(r) == 1) + r_s4 = access_log.match(".*notes=(U,)?F.*") + # Should be the greater number of log lines IE +2 + assert(len(r_init) + 3 == len(r_s4)) diff --git a/dirsrvtests/tests/suites/filter/vfilter_attribute_test.py b/dirsrvtests/tests/suites/filter/vfilter_attribute_test.py new file mode 100644 index 0000000..ce1185c --- /dev/null +++ b/dirsrvtests/tests/suites/filter/vfilter_attribute_test.py @@ -0,0 +1,219 @@ +# --- BEGIN COPYRIGHT BLOCK --- +# Copyright (C) 2019 Red Hat, Inc. +# All rights reserved. +# +# License: GPL (version 3 or any later version). +# See LICENSE for details. +# --- END COPYRIGHT BLOCK --- + +""" +This script will test different type of Filters. +""" + +import os +import pytest + +from lib389._constants import DEFAULT_SUFFIX, PW_DM +from lib389.topologies import topology_st as topo +from lib389.idm.user import UserAccounts +from lib389.idm.organizationalunit import OrganizationalUnits +from lib389.schema import Schema +from lib389.idm.account import Accounts + +pytestmark = pytest.mark.tier1 + +FILTER_COMBINE = f"(& (| (nsRoleDN=cn=new managed role) (sn=Hall)) (l=sunnyvale))" +FILTER_RJ = "(uid=rjense2)" +FILTER_CN = "(nsRoleDN=cn=new managed *)" +FILTER_CN_MT = f"(& {FILTER_CN} (uid=mtyler))" + +VALUES_POSITIVE = [ + (FILTER_COMBINE, ['*', 'cn'], 'cn'), + (FILTER_COMBINE, ['cn', 'cn', 'cn'], 'cn'), + (FILTER_COMBINE, ['cn', 'Cn', 'CN'], 'cn'), + (FILTER_COMBINE, ['cn', '*'], 'cn'), + (FILTER_COMBINE, ['modifiersName', 'modifyTimestamp'], 'modifiersName'), + (FILTER_COMBINE, ['modifiersName', 'modifyTimestamp'], 'modifyTimestamp'), + (FILTER_COMBINE, ['*', 'modifiersName', 'modifyTimestamp'], 'modifiersName'), + (FILTER_COMBINE, ['*', 'modifiersName', 'modifyTimestamp'], 'modifyTimestamp'), + (FILTER_COMBINE, ['cn', 'modifiersName', 'modifyTimestamp'], 'modifiersName'), + (FILTER_COMBINE, ['cn', 'modifiersName', 'modifyTimestamp'], 'modifyTimestamp'), + (FILTER_COMBINE, ['cn', 'modifiersName', 'modifyTimestamp'], 'cn'), + (FILTER_COMBINE, ['cn', 'modifiersName', 'nsRoleDN'], 'cn'), + (FILTER_COMBINE, ['cn', 'modifiersName', 'nsRoleDN'], 'modifiersName'), + (FILTER_COMBINE, ['cn', 'modifiersName', 'nsRoleDN'], 'nsRoleDN'), + (FILTER_COMBINE, ['cn', '*', 'modifiersName', 'nsRoleDN'], 'cn'), + (FILTER_COMBINE, ['cn', '*', 'modifiersName', 'nsRoleDN'], 'modifiersName'), + (FILTER_COMBINE, ['cn', '*', 'modifiersName', 'nsRoleDN'], 'nsRoleDN'), + (FILTER_RJ, ['*', 'mailquota'], 'mailquota'), + (FILTER_RJ, ['mailquota', '*'], 'mailquota'), + (FILTER_RJ, ['mailquota'], 'mailquota'), + (FILTER_RJ, ['mailquota', 'nsRoleDN'], 'mailquota'), + (FILTER_RJ, ['mailquota', 'nsRoleDN'], 'nsRoleDN'), + (FILTER_CN, ['cn', 'nsRoleDN'], 'cn'), + (FILTER_CN, ['cn', 'nsRoleDN'], 'nsRoleDN'), + (FILTER_CN_MT, ['mailquota', 'nsRoleDN'], 'mailquota'), + (FILTER_CN_MT, ['mailquota', 'nsRoleDN'], 'nsRoleDN'), + (FILTER_CN_MT, ['mailquota', 'modifiersName', 'nsRoleDN'], 'mailquota'), + (FILTER_CN_MT, ['mailquota', 'modifiersName', 'nsRoleDN'], 'modifiersName'), + (FILTER_CN_MT, ['mailquota', 'modifiersName', 'nsRoleDN'], 'nsRoleDN'), + (FILTER_CN_MT, ['*', 'modifiersName', 'nsRoleDN'], 'nsRoleDN'), + (FILTER_CN_MT, ['*', 'modifiersName', 'nsRoleDN'], 'modifiersName')] + + +LIST_OF_USER = ['scarter', 'tmorris', 'kvaughan', 'abergin', 'dmiller', + 'gfarmer', 'kwinters', 'trigden', 'cschmith', 'jwallace', + 'jwalker', 'tclow', 'rdaugherty', 'jreuter', 'tmason', + 'btalbot', 'mward', 'bjablons', 'jmcFarla', 'llabonte', + 'jcampaig', 'bhal2', 'alutz', 'achassin', 'hmiller', + 'jcampai2', 'lulrich', 'mlangdon', 'striplet', + 'gtriplet', 'jfalena', 'speterso', 'ejohnson', + 'prigden', 'bwalker', 'kjensen', 'mlott', + 'cwallace', 'tpierce', 'rbannist', 'bplante', + 'rmills', 'bschneid', 'skellehe', 'brentz', + 'dsmith', 'scarte2', 'dthorud', 'ekohler', + 'lcampbel', 'tlabonte', 'slee', 'bfree', + 'tschneid', 'prose', 'jhunter', 'ashelton', + 'mmcinnis', 'falbers', 'mschneid', 'pcruse', + 'tkelly', 'gtyler'] + + +@pytest.fixture(scope="module") +def _create_test_entries(topo): + """ + :param topo: + :return: Will create users used for this test script . + """ + users_people = UserAccounts(topo.standalone, DEFAULT_SUFFIX) + for demo1 in LIST_OF_USER: + users_people.create(properties={ + 'uid': demo1, + 'cn': demo1, + 'sn': demo1, + 'uidNumber': str(1000), + 'gidNumber': '2000', + 'homeDirectory': '/home/' + demo1, + 'givenname': demo1, + 'userpassword': PW_DM + }) + + users_people.create(properties={ + 'uid': 'bhall', + 'cn': 'Benjamin Hall', + 'sn': 'Hall', + 'uidNumber': str(1000), + 'gidNumber': '2000', + 'homeDirectory': '/home/' + 'bhall', + 'mail': 'bhall@anuj.com', + 'givenname': 'Benjamin', + 'ou': ['Product Development', 'People'], + 'l': 'sunnyvale', + 'telephonenumber': '+1 408 555 6067', + 'roomnumber': '2511', + 'manager': 'uid=trigden, ou=People, dc=example, dc=com', + 'nsRoleDN': 'cn=new managed role, ou=People, dc=example, dc=com', + 'userpassword': PW_DM, + }) + + ous = OrganizationalUnits(topo.standalone, DEFAULT_SUFFIX) + ou_ou = ous.create(properties={'ou': 'COS'}) + + ous = OrganizationalUnits(topo.standalone, ou_ou.dn) + ous.create(properties={'ou': 'MailSchemeClasses'}) + + Schema(topo.standalone).\ + add('attributetypes', "( 9.9.8.4 NAME 'emailclass' SYNTAX 1.3.6.1.4.1.1466.115.121.1.15 " + "X-ORIGIN 'RFC 2256' )") + Schema(topo.standalone).\ + add('objectclasses', "( 9.9.8.2 NAME 'mailSchemeUser' DESC " + "'User Defined ObjectClass' SUP 'top' MUST " + "( objectclass ) MAY (aci $ emailclass) X-ORIGIN 'RFC 2256' )") + + users_people.create(properties={ + 'cn': 'Randy Jensen', + 'sn': 'Jensen', + 'givenname': 'Randy', + 'objectclass': 'top account person organizationalPerson inetOrgPerson mailSchemeUser ' + 'mailRecipient posixaccount'.split(), + 'l': 'sunnyvale', + 'uid': 'rjense2', + 'uidNumber': str(1000), + 'gidNumber': str(1000), + 'homeDirectory': '/home/' + 'rjense2', + 'mail': 'rjense2@example.com', + 'telephonenumber': '+1 408 555 9045', + 'roomnumber': '1984', + 'manager': 'uid=jwalker, ou=People, dc=example,dc=com', + 'nsRoleDN': 'cn=new managed role, ou=People, dc=example, dc=com', + 'emailclass': 'vpemail', + 'mailquota': '600', + 'userpassword': PW_DM, + }) + + users_people.create(properties={ + 'cn': 'Bjorn Talbot', + 'sn': 'Talbot', + 'givenname': 'Bjorn', + 'objectclass': 'top account person organizationalPerson inetOrgPerson posixaccount'.split(), + 'ou': ['Product Development', 'People'], + 'l': 'Santa Clara', + 'uid': 'btalbo2', + 'mail': 'btalbo2@example.com', + 'telephonenumber': '+1 408 555 4234', + 'roomnumber': '1205', + 'uidNumber': str(1000), + 'gidNumber': str(1000), + 'homeDirectory': '/home/' + 'btalbo2', + 'manager': 'uid=trigden, ou=People, dc=example,dc=com', + 'nsRoleDN': 'cn=new managed role, ou=People, dc=example, dc=com', + 'userpassword': PW_DM + }) + + users_people.create(properties={ + 'objectclass': 'top ' + 'account ' + 'person ' + 'organizationalPerson ' + 'inetOrgPerson ' + 'mailRecipient ' + 'mailSchemeUser ' + 'posixaccount'.split(), + 'cn': 'Matthew Tyler', + 'sn': 'Tyler', + 'givenname': 'Matthew', + 'ou': ['Human Resources', 'People'], + 'l': 'Cupertino', + 'uid': 'mtyler', + 'mail': 'mtyler@example.com', + 'telephonenumber': '+1 408 555 7907', + 'roomnumber': '2701', + 'uidNumber': str(1000), + 'gidNumber': str(1000), + 'homeDirectory': '/home/' + 'mtyler', + 'manager': 'uid=jwalker, ou=People, dc=example,dc=com', + 'nsRoleDN': 'cn=new managed role, ou=People, dc=example, dc=com', + 'mailquota': '600', + 'userpassword': PW_DM}) + + +@pytest.mark.parametrize("filter_test, condition, filter_out", VALUES_POSITIVE) +def test_all_together_positive(topo, _create_test_entries, filter_test, condition, filter_out): + """Test filter with positive results. + + :id: 51924a38-9baa-11e8-b22a-8c16451d917b + :parametrized: yes + :setup: Standalone Server + :steps: + 1. Create Filter rules. + 2. Try to pass filter rules as per the condition . + :expectedresults: + 1. It should pass + 2. It should pass + """ + account = Accounts(topo.standalone, DEFAULT_SUFFIX) + assert account.filter(filter_test)[0].get_attrs_vals_utf8(condition)[filter_out] + + +if __name__ == '__main__': + CURRENT_FILE = os.path.realpath(__file__) + pytest.main("-s -v %s" % CURRENT_FILE) diff --git a/dirsrvtests/tests/suites/filter/vfilter_simple_test.py b/dirsrvtests/tests/suites/filter/vfilter_simple_test.py new file mode 100644 index 0000000..43e80b0 --- /dev/null +++ b/dirsrvtests/tests/suites/filter/vfilter_simple_test.py @@ -0,0 +1,556 @@ +# --- BEGIN COPYRIGHT BLOCK --- +# Copyright (C) 2019 Red Hat, Inc. +# All rights reserved. +# +# License: GPL (version 3 or any later version). +# See LICENSE for details. +# --- END COPYRIGHT BLOCK ---- + +""" +verify and testing Filter from a search +""" + +import os +import pytest + +from lib389._constants import DEFAULT_SUFFIX, PW_DM +from lib389.topologies import topology_st as topo +from lib389.idm.organizationalunit import OrganizationalUnits +from lib389.idm.account import Accounts +from lib389.idm.user import UserAccount, UserAccounts +from lib389.schema import Schema +from lib389.idm.role import ManagedRoles, FilteredRoles + +pytestmark = pytest.mark.tier1 + +FILTER_POSTAL = "(postalCode=99999)" +FILTER_ADDRESS = "(postalAddress=345 California Av., Mountain View, CA)" +FILTER_8888 = "(postalCode:2.16.840.1.113730.3.3.2.7.1:=88888)" +FILTER_6666 = "(postalCode:2.16.840.1.113730.3.3.2.7.1.3:=66666)" +FILTER_VPE = "(emailclass=vpe*)" +FILTER_EMAIL = "(emailclass=*emai*)" +FILTER_EMAILQUATA = "(mailquota=*00)" +FILTER_QUATA = '(mailquota=*6*0)' +FILTER_ROLE = '(nsRole=*)' +FILTER_POST = '(postalAddress=*)' +FILTER_CLASS = "(emailclass:2.16.840.1.113730.3.3.2.15.1:=>AAA)" +FILTER_CLASSES = "(emailclass:es:=>AAA)" +FILTER_AAA = "(emailclass:2.16.840.1.113730.3.3.2.15.1.5:=AAA)" +FILTER_VE = "(emailclass:2.16.840.1.113730.3.3.2.15.1:=>vpemail)" +FILTER_VPEM = "(emailclass:es:=>vpemail)" +FILTER_900 = "(mailquota:2.16.840.1.113730.3.3.2.15.1.1:=900)" +FILTER_7777 = "(postalCode:de:==77777)" +FILTER_FRED = '(fred=*)' +FILTER_ECLASS = "(emailclass:2.16.840.1.113730.3.3.2.15.1.5:=vpemail)" +FILTER_ECLASS_1 = "(emailclass:2.16.840.1.113730.3.3.2.15.1:=<1)" +FILTER_ECLASS_2 = "(emailclass:es:=<1)" +FILTER_ECLASS_3 = "(emailclass:2.16.840.1.113730.3.3.2.15.1.1:=1)" +FILTER_ECLASS_4 = "(emailclass:2.16.840.1.113730.3.3.2.15.1:= 0: + return True + return False + + +def _allow_machine_account(inst, name): + # First we need to get the mapping tree dn + mt = inst.mappingtree.list(suffix=DEFAULT_SUFFIX)[0] + inst.modify_s('cn=replica,%s' % mt.dn, [ + (ldap.MOD_REPLACE, 'nsDS5ReplicaBindDN', f"uid={name},ou=Machines,{DEFAULT_SUFFIX}".encode('utf-8')) + ]) + +def _verify_etc_hosts(): + #Check if /etc/hosts is compatible with the test + NEEDED_HOSTS = ( ('ldapkdc.example.com', '127.0.0.1'), + ('ldapkdc1.example.com', '127.0.1.1'), + ('ldapkdc2.example.com', '127.0.2.1')) + found_hosts = {} + with open('/etc/hosts','r') as f: + for l in f: + s = l.split() + if len(s) < 2: + continue + for nh in NEEDED_HOSTS: + if (s[0] == nh[1] and s[1] == nh[0]): + found_hosts[s[1]] = True + return len(found_hosts) == len(NEEDED_HOSTS) + +@pytest.mark.skipif(not _verify_etc_hosts(), reason="/etc/hosts does not contains the needed hosts.") +@pytest.mark.skipif(True, reason="Test disabled because it requires specific kerberos requirement (server principal, keytab, etc ...") +def test_gssapi_repl(topology_m2): + """Test gssapi authenticated replication agreement of two suppliers using KDC + + :id: 552850aa-afc3-473e-9c39-aae802b46f11 + + :setup: MMR with two suppliers + + :steps: + 1. Create the locations on each supplier for the other supplier to bind to + 2. Set on the cn=replica config to accept the other suppliers mapping under mapping tree + 3. Create the replication agreements from M1->M2 and vice versa (M2->M1) + 4. Set the replica bind method to sasl gssapi for both agreements + 5. Initialize all the agreements + 6. Create a user on M1 and check if user is created on M2 + 7. Create a user on M2 and check if user is created on M1 + + :expectedresults: + 1. Locations should be added successfully + 2. Configuration should be added successfully + 3. Replication agreements should be added successfully + 4. Bind method should be set to sasl gssapi for both agreements + 5. Agreements should be initialized successfully + 6. Test User should be created on M1 and M2 both + 7. Test User should be created on M1 and M2 both + """ + supplier1 = topology_m2.ms["supplier1"] + supplier2 = topology_m2.ms["supplier2"] + + # Create the locations on each supplier for the other to bind to. + _create_machine_ou(supplier1) + _create_machine_ou(supplier2) + + _create_machine_account(supplier1, 'ldap/%s' % HOST_SUPPLIER_1) + _create_machine_account(supplier1, 'ldap/%s' % HOST_SUPPLIER_2) + _create_machine_account(supplier2, 'ldap/%s' % HOST_SUPPLIER_1) + _create_machine_account(supplier2, 'ldap/%s' % HOST_SUPPLIER_2) + + # Set on the cn=replica config to accept the other suppliers princ mapping under mapping tree + _allow_machine_account(supplier1, 'ldap/%s' % HOST_SUPPLIER_2) + _allow_machine_account(supplier2, 'ldap/%s' % HOST_SUPPLIER_1) + + # + # Create all the agreements + # + # Creating agreement from supplier 1 to supplier 2 + + # Set the replica bind method to sasl gssapi + properties = {RA_NAME: r'meTo_$host:$port', + RA_METHOD: 'SASL/GSSAPI', + RA_TRANSPORT_PROT: defaultProperties[REPLICATION_TRANSPORT]} + supplier1.agreement.delete(suffix=SUFFIX, consumer_host=supplier2.host, consumer_port=supplier2.port) + m1_m2_agmt = supplier1.agreement.create(suffix=SUFFIX, host=supplier2.host, port=supplier2.port, properties=properties) + if not m1_m2_agmt: + log.fatal("Fail to create a supplier -> supplier replica agreement") + sys.exit(1) + log.debug("%s created" % m1_m2_agmt) + + # Creating agreement from supplier 2 to supplier 1 + + # Set the replica bind method to sasl gssapi + properties = {RA_NAME: r'meTo_$host:$port', + RA_METHOD: 'SASL/GSSAPI', + RA_TRANSPORT_PROT: defaultProperties[REPLICATION_TRANSPORT]} + supplier2.agreement.delete(suffix=SUFFIX, consumer_host=supplier1.host, consumer_port=supplier1.port) + m2_m1_agmt = supplier2.agreement.create(suffix=SUFFIX, host=supplier1.host, port=supplier1.port, properties=properties) + if not m2_m1_agmt: + log.fatal("Fail to create a supplier -> supplier replica agreement") + sys.exit(1) + log.debug("%s created" % m2_m1_agmt) + + # Allow the replicas to get situated with the new agreements... + time.sleep(5) + + # + # Initialize all the agreements + # + agmt = Agreement(supplier1, m1_m2_agmt) + agmt.begin_reinit() + agmt.wait_reinit() + + # Check replication is working... + if supplier1.testReplication(DEFAULT_SUFFIX, supplier2): + log.info('Replication is working.') + else: + log.fatal('Replication is not working.') + assert False + + # Add a user to supplier 1 + _create_machine_account(supplier1, 'http/one.example.com') + # Check it's on 2 + time.sleep(5) + assert (_check_machine_account(supplier2, 'http/one.example.com')) + # Add a user to supplier 2 + _create_machine_account(supplier2, 'http/two.example.com') + # Check it's on 1 + time.sleep(5) + assert (_check_machine_account(supplier2, 'http/two.example.com')) + + +if __name__ == '__main__': + # Run isolated + # -s for DEBUG mode + CURRENT_FILE = os.path.realpath(__file__) + pytest.main("-s %s" % CURRENT_FILE) diff --git a/dirsrvtests/tests/suites/healthcheck/__init__.py b/dirsrvtests/tests/suites/healthcheck/__init__.py new file mode 100644 index 0000000..ab645c2 --- /dev/null +++ b/dirsrvtests/tests/suites/healthcheck/__init__.py @@ -0,0 +1,3 @@ +""" + :Requirement: 389-ds-base: HealthCheck +""" \ No newline at end of file diff --git a/dirsrvtests/tests/suites/healthcheck/health_config_test.py b/dirsrvtests/tests/suites/healthcheck/health_config_test.py new file mode 100644 index 0000000..dec3a6c --- /dev/null +++ b/dirsrvtests/tests/suites/healthcheck/health_config_test.py @@ -0,0 +1,589 @@ +# --- BEGIN COPYRIGHT BLOCK --- +# Copyright (C) 2023 Red Hat, Inc. +# All rights reserved. +# +# License: GPL (version 3 or any later version). +# See LICENSE for details. +# --- END COPYRIGHT BLOCK --- +# + +import pytest +import os +import subprocess + +from lib389.backend import Backends, DatabaseConfig +from lib389.cos import CosTemplates, CosPointerDefinitions +from lib389.dbgen import dbgen_users +from lib389.idm.account import Accounts +from lib389.index import Index +from lib389.plugins import ReferentialIntegrityPlugin, MemberOfPlugin +from lib389.utils import * +from lib389._constants import * +from lib389.cli_base import FakeArgs +from lib389.topologies import topology_st +from lib389.cli_ctl.health import health_check_run +from lib389.paths import Paths + +pytestmark = pytest.mark.tier1 + +CMD_OUTPUT = 'No issues found.' +JSON_OUTPUT = '[]' +log = logging.getLogger(__name__) + + +def run_healthcheck_and_flush_log(topology, instance, searched_code, json, searched_code2=None): + args = FakeArgs() + args.instance = instance.serverid + args.verbose = instance.verbose + args.list_errors = False + args.list_checks = False + args.check = ['config', 'refint', 'backends', 'monitor-disk-space', 'logs', 'memberof'] + args.dry_run = False + + if json: + log.info('Use healthcheck with --json option') + args.json = json + health_check_run(instance, topology.logcap.log, args) + assert topology.logcap.contains(searched_code) + log.info('Healthcheck returned searched code: %s' % searched_code) + + if searched_code2 is not None: + assert topology.logcap.contains(searched_code2) + log.info('Healthcheck returned searched code: %s' % searched_code2) + else: + log.info('Use healthcheck without --json option') + args.json = json + health_check_run(instance, topology.logcap.log, args) + assert topology.logcap.contains(searched_code) + log.info('Healthcheck returned searched code: %s' % searched_code) + + if searched_code2 is not None: + assert topology.logcap.contains(searched_code2) + log.info('Healthcheck returned searched code: %s' % searched_code2) + + log.info('Clear the log') + topology.logcap.flush() + + +@pytest.fixture(scope="function") +def setup_ldif(topology_st, request): + log.info("Generating LDIF...") + ldif_dir = topology_st.standalone.get_ldif_dir() + global import_ldif + import_ldif = ldif_dir + '/basic_import.ldif' + dbgen_users(topology_st.standalone, 5000, import_ldif, DEFAULT_SUFFIX) + + def fin(): + log.info('Delete file') + os.remove(import_ldif) + + request.addfinalizer(fin) + + +@pytest.mark.ds50873 +@pytest.mark.bz1685160 +@pytest.mark.xfail(ds_is_older("1.4.1"), reason="Not implemented") +def test_healthcheck_logging_format_should_be_revised(topology_st): + """Check if HealthCheck returns DSCLE0001 code + + :id: 277d7980-123b-481b-acba-d90921b9f5ac + :setup: Standalone instance + :steps: + 1. Create DS instance + 2. Set nsslapd-logging-hr-timestamps-enabled to 'off' + 3. Use HealthCheck without --json option + 4. Use HealthCheck with --json option + 5. Set nsslapd-logging-hr-timestamps-enabled to 'on' + 6. Use HealthCheck without --json option + 7. Use HealthCheck with --json option + :expectedresults: + 1. Success + 2. Success + 3. Healthcheck reports DSCLE0001 code and related details + 4. Healthcheck reports DSCLE0001 code and related details + 5. Success + 6. Healthcheck reports no issue found + 7. Healthcheck reports no issue found + """ + + RET_CODE = 'DSCLE0001' + + standalone = topology_st.standalone + + log.info('Set nsslapd-logging-hr-timestamps-enabled to off') + standalone.config.set('nsslapd-logging-hr-timestamps-enabled', 'off') + standalone.config.set("nsslapd-accesslog-logbuffering", "on") + + run_healthcheck_and_flush_log(topology_st, standalone, json=False, searched_code=RET_CODE) + run_healthcheck_and_flush_log(topology_st, standalone, json=True, searched_code=RET_CODE) + + log.info('Set nsslapd-logging-hr-timestamps-enabled to off') + standalone.config.set('nsslapd-logging-hr-timestamps-enabled', 'on') + + run_healthcheck_and_flush_log(topology_st, standalone, json=False, searched_code=CMD_OUTPUT) + run_healthcheck_and_flush_log(topology_st, standalone, json=True, searched_code=JSON_OUTPUT) + + +@pytest.mark.ds50873 +@pytest.mark.bz1685160 +@pytest.mark.xfail(ds_is_older("1.4.1"), reason="Not implemented") +def test_healthcheck_RI_plugin_is_misconfigured(topology_st): + """Check if HealthCheck returns DSRILE0001 code + + :id: de2e90a2-89fe-472c-acdb-e13cbca5178d + :setup: Standalone instance + :steps: + 1. Create DS instance + 2. Configure the instance with Integrity Plugin + 3. Set the referint-update-delay attribute of the RI plugin, to a value upper than 0 + 4. Use HealthCheck without --json option + 5. Use HealthCheck with --json option + 6. Set the referint-update-delay attribute to 0 + 7. Use HealthCheck without --json option + 8. Use HealthCheck with --json option + :expectedresults: + 1. Success + 2. Success + 3. Success + 4. Healthcheck reports DSRILE0001 code and related details + 5. Healthcheck reports DSRILE0001 code and related details + 6. Success + 7. Healthcheck reports no issue found + 8. Healthcheck reports no issue found + """ + + RET_CODE = 'DSRILE0001' + + standalone = topology_st.standalone + + plugin = ReferentialIntegrityPlugin(standalone) + plugin.disable() + plugin.enable() + + log.info('Set the referint-update-delay attribute to a value upper than 0') + plugin.replace('referint-update-delay', '5') + + run_healthcheck_and_flush_log(topology_st, standalone, json=False, searched_code=RET_CODE) + run_healthcheck_and_flush_log(topology_st, standalone, json=True, searched_code=RET_CODE) + + log.info('Set the referint-update-delay attribute back to 0') + plugin.replace('referint-update-delay', '0') + + run_healthcheck_and_flush_log(topology_st, standalone, json=False, searched_code=CMD_OUTPUT) + run_healthcheck_and_flush_log(topology_st, standalone, json=True, searched_code=JSON_OUTPUT) + + +@pytest.mark.ds50873 +@pytest.mark.bz1685160 +@pytest.mark.xfail(ds_is_older("1.4.1"), reason="Not implemented") +def test_healthcheck_RI_plugin_missing_indexes(topology_st): + """Check if HealthCheck returns DSRILE0002 code + + :id: 05c55e37-bb3e-48d1-bbe8-29c980f94f10 + :setup: Standalone instance + :steps: + 1. Create DS instance + 2. Configure the instance with Integrity Plugin + 3. Change the index type of the member attribute index to ‘approx’ + 4. Use HealthCheck without --json option + 5. Use HealthCheck with --json option + 6. Set the index type of the member attribute index to ‘eq’ + 7. Use HealthCheck without --json option + 8. Use HealthCheck with --json option + :expectedresults: + 1. Success + 2. Success + 3. Success + 4. Healthcheck reports DSRILE0002 code and related details + 5. Healthcheck reports DSRILE0002 code and related details + 6. Success + 7. Healthcheck reports no issue found + 8. Healthcheck reports no issue found + """ + + RET_CODE = 'DSRILE0002' + MEMBER_DN = 'cn=member,cn=index,cn=userroot,cn=ldbm database,cn=plugins,cn=config' + + standalone = topology_st.standalone + + log.info('Enable RI plugin') + plugin = ReferentialIntegrityPlugin(standalone) + plugin.disable() + plugin.enable() + + log.info('Change the index type of the member attribute index to approx') + index = Index(topology_st.standalone, MEMBER_DN) + index.replace('nsIndexType', 'approx') + + run_healthcheck_and_flush_log(topology_st, standalone, json=False, searched_code=RET_CODE) + run_healthcheck_and_flush_log(topology_st, standalone, json=True, searched_code=RET_CODE) + + log.info('Set the index type of the member attribute index back to eq') + index.replace('nsIndexType', 'eq') + + run_healthcheck_and_flush_log(topology_st, standalone, json=False, searched_code=CMD_OUTPUT) + run_healthcheck_and_flush_log(topology_st, standalone, json=True, searched_code=JSON_OUTPUT) + + +def test_healthcheck_MO_plugin_missing_indexes(topology_st): + """Check if HealthCheck returns DSMOLE0002 code + + :id: 236b0ec2-13da-48fb-b65a-db7406d56d5d + :setup: Standalone instance + :steps: + 1. Create DS instance + 2. Configure the instance with MO Plugin with two memberOfGroupAttrs + 3. Use HealthCheck without --json option + 4. Use HealthCheck with --json option + 5. Add index for new group attr + 6. Use HealthCheck without --json option + 7. Use HealthCheck with --json option + :expectedresults: + 1. Success + 2. Success + 3. Healthcheck reports DSMOLE0002 code and related details + 4. Healthcheck reports DSMOLE0002 code and related details + 5. Success + 6. Healthcheck reports no issue found + 7. Healthcheck reports no issue found + """ + + RET_CODE = 'DSMOLE0001' + MO_GROUP_ATTR = 'creatorsname' + + standalone = topology_st.standalone + + log.info('Enable MO plugin') + plugin = MemberOfPlugin(standalone) + plugin.disable() + plugin.enable() + plugin.add('memberofgroupattr', MO_GROUP_ATTR) + time.sleep(.5) + + run_healthcheck_and_flush_log(topology_st, standalone, json=False, searched_code=RET_CODE) + run_healthcheck_and_flush_log(topology_st, standalone, json=True, searched_code=RET_CODE) + + log.info('Add the missing "eq" index') + be = Backends(standalone).get('userRoot') + be.add_index(MO_GROUP_ATTR, "eq", None) + time.sleep(.5) + + run_healthcheck_and_flush_log(topology_st, standalone, json=False, searched_code=CMD_OUTPUT) + run_healthcheck_and_flush_log(topology_st, standalone, json=True, searched_code=JSON_OUTPUT) + # Restart the intsnce after changing the plugin to avoid breaking the other tests + standalone.restart() + + +@pytest.mark.ds50873 +@pytest.mark.bz1685160 +@pytest.mark.xfail(ds_is_older("1.4.1"), reason="Not implemented") +def test_healthcheck_virtual_attr_incorrectly_indexed(topology_st): + """Check if HealthCheck returns DSVIRTLE0001 code + + :id: 1055173b-21aa-4aaa-9e91-4dc6c5e0c01f + :setup: Standalone instance + :steps: + 1. Create DS instance + 2. Create a CoS definition entry + 3. Create the matching CoS template entry, with postalcode as virtual attribute + 4. Create an index for postalcode + 5. Use HealthCheck without --json option + 6. Use HealthCheck with --json option + :expectedresults: + 1. Success + 2. Success + 3. Success + 4. Success + 5. Healthcheck reports DSVIRTLE0001 code and related details + 6. Healthcheck reports DSVIRTLE0001 code and related details + """ + + RET_CODE = 'DSVIRTLE0001' + + standalone = topology_st.standalone + postal_index_properties = { + 'cn': 'postalcode', + 'nsSystemIndex': 'False', + 'nsIndexType': ['eq', 'sub', 'pres'], + } + + log.info('Add cosPointer, cosTemplate and test entry to default suffix, where virtual attribute is postal code') + cos_pointer_properties = { + 'cn': 'cosPointer', + 'description': 'cosPointer example', + 'cosTemplateDn': 'cn=cosTemplateExample,ou=People,dc=example,dc=com', + 'cosAttribute': 'postalcode', + } + cos_pointer_definitions = CosPointerDefinitions(standalone, DEFAULT_SUFFIX, 'ou=People') + cos_pointer_definitions.create(properties=cos_pointer_properties) + + log.info('Create CoS template') + cos_template_properties = { + 'cn': 'cosTemplateExample', + 'postalcode': '117' + } + cos_templates = CosTemplates(standalone, DEFAULT_SUFFIX, 'ou=People') + cos_templates.create(properties=cos_template_properties) + + log.info('Create an index for postalcode') + backends = Backends(topology_st.standalone) + ur_indexes = backends.get('userRoot').get_indexes() + ur_indexes.create(properties=postal_index_properties) + + run_healthcheck_and_flush_log(topology_st, standalone, RET_CODE, json=False) + run_healthcheck_and_flush_log(topology_st, standalone, RET_CODE, json=True) + + +@pytest.mark.ds50873 +@pytest.mark.bz1685160 +@pytest.mark.xfail(ds_is_older("1.4.1"), reason="Not implemented") +@pytest.mark.xfail(ds_is_older("1.4.2.4"), reason="May fail because of bug 1796050") +def test_healthcheck_low_disk_space(topology_st): + """Check if HealthCheck returns DSDSLE0001 code + + :id: 144b335d-077e-430c-9c0e-cd6b0f2f73c1 + :setup: Standalone instance + :steps: + 1. Create DS instance + 2. Get the free disk space for / + 3. Use fallocate to create a file large enough for the use % be up 90% + 4. Use HealthCheck without --json option + 5. Use HealthCheck with --json option + :expectedresults: + 1. Success + 2. Success + 3. Success + 4. Healthcheck reports DSDSLE0001 code and related details + 5. Healthcheck reports DSDSLE0001 code and related details + """ + + RET_CODE = 'DSDSLE0001' + + standalone = topology_st.standalone + standalone.config.set("nsslapd-accesslog-logbuffering", "on") + file = '{}/foo'.format(standalone.ds_paths.log_dir) + + log.info('Count the disk space to allocate') + total_size = int(re.findall(r'\d+', str(os.statvfs(standalone.ds_paths.log_dir)))[2]) * 4096 + avail_size = round(int(re.findall(r'\d+', str(os.statvfs(standalone.ds_paths.log_dir)))[3]) * 4096) + used_size = total_size - avail_size + count_total_percent = total_size * 0.92 + final_value = count_total_percent - used_size + + log.info('Create a file large enough for the use % be up 90%') + subprocess.call(['fallocate', '-l', str(round(final_value)), file]) + + run_healthcheck_and_flush_log(topology_st, standalone, RET_CODE, json=False) + run_healthcheck_and_flush_log(topology_st, standalone, RET_CODE, json=True) + + log.info('Remove created file') + os.remove(file) + + +@pytest.mark.flaky(max_runs=2, min_passes=1) +@pytest.mark.ds50791 +@pytest.mark.bz1843567 +@pytest.mark.xfail(ds_is_older("1.4.3.8"), reason="Not implemented") +def test_healthcheck_notes_unindexed_search(topology_st, setup_ldif): + """Check if HealthCheck returns DSLOGNOTES0001 code + + :id: b25f7027-d43f-4ec2-ac49-9c9bb285df1d + :setup: Standalone instance + :steps: + 1. Create DS instance + 2. Set nsslapd-accesslog-logbuffering to off + 3. Import users from created ldif file + 4. Use HealthCheck without --json option + 5. Use HealthCheck with --json option + :expectedresults: + 1. Success + 2. Success + 3. Success + 4. Healthcheck reports DSLOGNOTES0001 + 5. Healthcheck reports DSLOGNOTES0001 + """ + + RET_CODE = 'DSLOGNOTES0001' + + standalone = topology_st.standalone + + log.info('Delete the previous access logs') + standalone.deleteAccessLogs() + + log.info('Set nsslapd-accesslog-logbuffering to off') + standalone.config.set("nsslapd-accesslog-logbuffering", "off") + db_cfg = DatabaseConfig(standalone) + db_cfg.set([('nsslapd-idlistscanlimit', '100')]) + + + log.info('Stopping the server and running offline import...') + standalone.stop() + assert standalone.ldif2db(bename=DEFAULT_BENAME, suffixes=[DEFAULT_SUFFIX], encrypt=None, excludeSuffixes=None, + import_file=import_ldif) + standalone.start() + + log.info('Use filters to reproduce "notes=A" in access log') + accounts = Accounts(standalone, DEFAULT_SUFFIX) + accounts.filter('(uid=test*)') + + log.info('Check that access log contains "notes=A"') + assert standalone.ds_access_log.match(r'.*notes=A.*') + + standalone.config.set("nsslapd-accesslog-logbuffering", "on") + + run_healthcheck_and_flush_log(topology_st, standalone, RET_CODE, json=False) + run_healthcheck_and_flush_log(topology_st, standalone, RET_CODE, json=True) + + +@pytest.mark.ds50791 +@pytest.mark.bz1843567 +@pytest.mark.xfail(ds_is_older("1.4.3.8"), reason="Not implemented") +def test_healthcheck_notes_unknown_attribute(topology_st, setup_ldif): + """Check if HealthCheck returns DSLOGNOTES0002 code + + :id: 71ccd1d7-3c71-416b-9d2a-27f9f6633101 + :setup: Standalone instance + :steps: + 1. Create DS instance + 2. Set nsslapd-accesslog-logbuffering to off + 3. Import users from created ldif file + 4. Use HealthCheck without --json option + 5. Use HealthCheck with --json option + :expectedresults: + 1. Success + 2. Success + 3. Success + 4. Healthcheck reports DSLOGNOTES0002 + 5. Healthcheck reports DSLOGNOTES0002 + """ + + RET_CODE = 'DSLOGNOTES0002' + + standalone = topology_st.standalone + + log.info('Delete the previous access logs') + topology_st.standalone.deleteAccessLogs() + + log.info('Set nsslapd-accesslog-logbuffering to off') + standalone.config.set("nsslapd-accesslog-logbuffering", "off") + db_cfg = DatabaseConfig(standalone) + db_cfg.set([('nsslapd-idlistscanlimit', '100')]) + + log.info('Stopping the server and running offline import...') + standalone.stop() + assert standalone.ldif2db(bename=DEFAULT_BENAME, suffixes=[DEFAULT_SUFFIX], encrypt=None, excludeSuffixes=None, + import_file=import_ldif) + standalone.start() + + log.info('Use filters to reproduce "notes=F" in access log') + accounts = Accounts(standalone, DEFAULT_SUFFIX) + accounts.filter('(unknown=test)') + + log.info('Check that access log contains "notes=F"') + assert standalone.ds_access_log.match(r'.*notes=F.*') + + standalone.config.set("nsslapd-accesslog-logbuffering", "on") + run_healthcheck_and_flush_log(topology_st, standalone, RET_CODE, json=False) + run_healthcheck_and_flush_log(topology_st, standalone, RET_CODE, json=True) + +def test_healthcheck_unauth_binds(topology_st): + """Check if HealthCheck returns DSCLE0003 code when unauthorized binds are + allowed + + :id: 13b88a3b-0dc5-4ce9-9fbf-058ad072339b + :setup: Standalone instance + :steps: + 1. Create DS instance + 2. Set nsslapd-allow-unauthenticated-binds to on + 3. Use HealthCheck without --json option + 4. Use HealthCheck with --json option + :expectedresults: + 1. Success + 2. Success + 3. Healthcheck reports DSCLE0003 + 4. Healthcheck reports DSCLE0003 + """ + + RET_CODE = 'DSCLE0003' + + inst = topology_st.standalone + + log.info('nsslapd-allow-unauthenticated-binds to on') + inst.config.set("nsslapd-allow-unauthenticated-binds", "on") + + run_healthcheck_and_flush_log(topology_st, inst, RET_CODE, json=False) + run_healthcheck_and_flush_log(topology_st, inst, RET_CODE, json=True) + + # reset setting + log.info('Reset nsslapd-allow-unauthenticated-binds to off') + inst.config.set("nsslapd-allow-unauthenticated-binds", "off") + +def test_healthcheck_accesslog_buffering(topology_st): + """Check if HealthCheck returns DSCLE0004 code when acccess log biffering + is disabled + + :id: 5a6512fd-1c7b-4557-9278-45150423148b + :setup: Standalone instance + :steps: + 1. Create DS instance + 2. Set nsslapd-accesslog-logbuffering to off + 3. Use HealthCheck without --json option + 4. Use HealthCheck with --json option + :expectedresults: + 1. Success + 2. Success + 3. Healthcheck reports DSCLE0004 + 4. Healthcheck reports DSCLE0004 + """ + + RET_CODE = 'DSCLE0004' + + inst = topology_st.standalone + + log.info('nsslapd-accesslog-logbuffering to off') + inst.config.set("nsslapd-accesslog-logbuffering", "off") + + run_healthcheck_and_flush_log(topology_st, inst, RET_CODE, json=False) + run_healthcheck_and_flush_log(topology_st, inst, RET_CODE, json=True) + + # reset setting + log.info('Reset nsslapd-accesslog-logbuffering to on') + inst.config.set("nsslapd-accesslog-logbuffering", "on") + +def test_healthcheck_securitylog_buffering(topology_st): + """Check if HealthCheck returns DSCLE0005 code when security log biffering + is disabled + + :id: 9b84287a-e022-4bdc-8c65-2276b37371b5 + :setup: Standalone instance + :steps: + 1. Create DS instance + 2. Set nsslapd-securitylog-logbuffering to off + 3. Use HealthCheck without --json option + 4. Use HealthCheck with --json option + :expectedresults: + 1. Success + 2. Success + 3. Healthcheck reports DSCLE0005 + 4. Healthcheck reports DSCLE0005 + """ + + RET_CODE = 'DSCLE0005' + + inst = topology_st.standalone + + log.info('nsslapd-securitylog-logbuffering to off') + inst.config.set("nsslapd-securitylog-logbuffering", "off") + + run_healthcheck_and_flush_log(topology_st, inst, RET_CODE, json=False) + run_healthcheck_and_flush_log(topology_st, inst, RET_CODE, json=True) + + # reset setting + log.info('Reset nnsslapd-securitylog-logbuffering to on') + inst.config.set("nsslapd-securitylog-logbuffering", "on") + + +if __name__ == '__main__': + # Run isolated + # -s for DEBUG mode + CURRENT_FILE = os.path.realpath(__file__) diff --git a/dirsrvtests/tests/suites/healthcheck/health_repl_test.py b/dirsrvtests/tests/suites/healthcheck/health_repl_test.py new file mode 100644 index 0000000..8f91362 --- /dev/null +++ b/dirsrvtests/tests/suites/healthcheck/health_repl_test.py @@ -0,0 +1,300 @@ +# --- BEGIN COPYRIGHT BLOCK --- +# Copyright (C) 2020 Red Hat, Inc. +# All rights reserved. +# +# License: GPL (version 3 or any later version). +# See LICENSE for details. +# --- END COPYRIGHT BLOCK --- +# + +import pytest +import os +from contextlib import suppress +from lib389.backend import Backend, Backends +from lib389.idm.user import UserAccounts +from lib389.replica import Changelog, ReplicationManager, Replicas +from lib389.utils import * +from lib389._constants import * +from lib389.cli_base import FakeArgs +from lib389.topologies import topology_m2, topology_m3 +from lib389.cli_ctl.health import health_check_run +from lib389.paths import Paths + +CMD_OUTPUT = 'No issues found.' +JSON_OUTPUT = '[]' + +ds_paths = Paths() +log = logging.getLogger(__name__) + + +def run_healthcheck_and_flush_log(topology, instance, searched_code, json, searched_code2=None): + args = FakeArgs() + args.instance = instance.serverid + args.verbose = instance.verbose + args.list_errors = False + args.list_checks = False + args.check = ['replication', 'backends:userroot:cl_trimming'] + args.dry_run = False + + if json: + log.info('Use healthcheck with --json option') + args.json = json + health_check_run(instance, topology.logcap.log, args) + assert topology.logcap.contains(searched_code) + log.info('Healthcheck returned searched code: %s' % searched_code) + + if searched_code2 is not None: + assert topology.logcap.contains(searched_code2) + log.info('Healthcheck returned searched code: %s' % searched_code2) + else: + log.info('Use healthcheck without --json option') + args.json = json + health_check_run(instance, topology.logcap.log, args) + assert topology.logcap.contains(searched_code) + log.info('Healthcheck returned searched code: %s' % searched_code) + + if searched_code2 is not None: + assert topology.logcap.contains(searched_code2) + log.info('Healthcheck returned searched code: %s' % searched_code2) + + log.info('Clear the log') + topology.logcap.flush() + + +def set_changelog_trimming(instance): + log.info('Get the changelog enteries') + inst_changelog = Changelog(instance, suffix=DEFAULT_SUFFIX) + + log.info('Set nsslapd-changelogmaxage to 30d') + inst_changelog.set_max_age('30d') + + +@pytest.mark.ds50873 +@pytest.mark.bz1685160 +@pytest.mark.xfail(ds_is_older("1.4.1"), reason="Not implemented") +def test_healthcheck_replication_replica_not_reachable(topology_m2): + """Check if HealthCheck returns DSREPLLE0005 code + + :id: d452a564-7b82-4c1a-b331-a71abbd82a10 + :setup: Replicated topology + :steps: + 1. Create a replicated topology + 2. On M1, set nsds5replicaport for the replication agreement to an unreachable port on the replica + 3. Use HealthCheck without --json option + 4. Use HealthCheck with --json option + 5. On M1, set nsds5replicaport for the replication agreement to a reachable port number + 6. Use HealthCheck without --json option + 7. Use HealthCheck with --json option + :expectedresults: + 1. Success + 2. Success + 3. Healthcheck reports DSREPLLE0005 code and related details + 4. Healthcheck reports DSREPLLE0005 code and related details + 5. Success + 6. Healthcheck reports no issue found + 7. Healthcheck reports no issue found + """ + + RET_CODE = 'DSREPLLE0005' + + M1 = topology_m2.ms['supplier1'] + M2 = topology_m2.ms['supplier2'] + + set_changelog_trimming(M1) + + log.info('Set nsds5replicaport for the replication agreement to an unreachable port') + repl = ReplicationManager(DEFAULT_SUFFIX) + repl.wait_for_replication(M1, M2) + + replica_m1 = Replicas(M1).get(DEFAULT_SUFFIX) + agmt_m1 = replica_m1.get_agreements().list()[0] + agmt_m1.replace('nsds5replicaport', '4389') + # Should generates updates here to insure that we starts a new replication session + # and really try to connect to the consumer + with suppress(Exception): + repl.wait_for_replication(M1, M2, timeout=5) + + run_healthcheck_and_flush_log(topology_m2, M1, RET_CODE, json=False) + run_healthcheck_and_flush_log(topology_m2, M1, RET_CODE, json=True) + + log.info('Set nsds5replicaport for the replication agreement to a reachable port') + agmt_m1.replace('nsDS5ReplicaPort', '{}'.format(M2.port)) + repl.wait_for_replication(M1, M2) + + run_healthcheck_and_flush_log(topology_m2, M1, CMD_OUTPUT, json=False) + run_healthcheck_and_flush_log(topology_m2, M1, JSON_OUTPUT, json=True) + + +@pytest.mark.ds50873 +@pytest.mark.bz1685160 +@pytest.mark.xfail(ds_is_older("1.4.1"), reason="Not implemented") +def test_healthcheck_changelog_trimming_not_configured(topology_m2): + """Check if HealthCheck returns DSCLLE0001 code + + :id: c2165032-88ba-4978-a4ca-2fecfd8c35d8 + :setup: Replicated topology + :steps: + 1. Create a replicated topology + 2. On M1, check that value of nsslapd-changelogmaxage from cn=changelog5,cn=config is None + 3. Use HealthCheck without --json option + 4. Use HealthCheck with --json option + 5. On M1, set nsslapd-changelogmaxage to 30d + 6. Use HealthCheck without --json option + 7. Use HealthCheck with --json option + :expectedresults: + 1. Success + 2. Success + 3. Healthcheck reports DSCLLE0001 code and related details + 4. Healthcheck reports DSCLLE0001 code and related details (json) + 5. Success + 6. Healthcheck reports no issue found + 7. Healthcheck reports no issue found (json) + """ + + M1 = topology_m2.ms['supplier1'] + + RET_CODE = 'DSCLLE0001' + + log.info('Get the changelog entries for M1') + changelog_m1 = Changelog(M1, suffix=DEFAULT_SUFFIX) + + log.info('Check nsslapd-changelogmaxage value') + if changelog_m1.get_attr_val('nsslapd-changelogmaxage') is not None: + changelog_m1.remove_all('nsslapd-changelogmaxage') + + time.sleep(3) + + run_healthcheck_and_flush_log(topology_m2, M1, RET_CODE, json=False) + run_healthcheck_and_flush_log(topology_m2, M1, RET_CODE, json=True) + + set_changelog_trimming(M1) + + run_healthcheck_and_flush_log(topology_m2, M1, CMD_OUTPUT, json=False) + run_healthcheck_and_flush_log(topology_m2, M1, JSON_OUTPUT, json=True) + + +@pytest.mark.ds50873 +@pytest.mark.bz1685160 +@pytest.mark.xfail(ds_is_older("1.4.1"), reason="Not implemented") +def test_healthcheck_replication_presence_of_conflict_entries(topology_m2): + """Check if HealthCheck returns DSREPLLE0002 code + + :id: 43abc6c6-2075-42eb-8fa3-aa092ff64cba + :setup: Replicated topology + :steps: + 1. Create a replicated topology + 2. Create conflict entries : different entries renamed to the same dn + 3. Use HealthCheck without --json option + 4. Use HealthCheck with --json option + :expectedresults: + 1. Success + 2. Success + 3. Healthcheck reports DSREPLLE0002 code and related details + 4. Healthcheck reports DSREPLLE0002 code and related details + """ + + RET_CODE = 'DSREPLLE0002' + + M1 = topology_m2.ms['supplier1'] + M2 = topology_m2.ms['supplier2'] + + repl = ReplicationManager(DEFAULT_SUFFIX) + repl.wait_for_replication(M1, M2) + + topology_m2.pause_all_replicas() + + log.info("Create conflict entries") + test_users_m1 = UserAccounts(M1, DEFAULT_SUFFIX) + test_users_m2 = UserAccounts(M2, DEFAULT_SUFFIX) + user_num = 1000 + test_users_m1.create_test_user(user_num, 2000) + test_users_m2.create_test_user(user_num, 2000) + + topology_m2.resume_all_replicas() + + repl.test_replication_topology(topology_m2) + + run_healthcheck_and_flush_log(topology_m2, M1, RET_CODE, json=False) + run_healthcheck_and_flush_log(topology_m2, M1, RET_CODE, json=True) + + +def test_healthcheck_non_replicated_suffixes(topology_m2): + """Check if backend lint function unexpectedly throws exception + + :id: f922edf8-c527-4802-9f42-0b75bf97098a + :setup: 2 MMR topology + :steps: + 1. Create a new suffix: cn=changelog + 2. Call healthcheck (there should not be any exceptions raised) + :expectedresults: + 1. Success + 2. Success + """ + + inst = topology_m2.ms['supplier1'] + + # Create second suffix + backends = Backends(inst) + backends.create(properties={'nsslapd-suffix': "cn=changelog", + 'name': 'changelog'}) + + # Call healthcheck + args = FakeArgs() + args.instance = inst.serverid + args.verbose = inst.verbose + args.list_errors = False + args.list_checks = False + args.check = ['backends'] + args.dry_run = False + args.json = False + + health_check_run(inst, topology_m2.logcap.log, args) + + +@pytest.mark.ds50873 +@pytest.mark.bz1685160 +@pytest.mark.xfail(ds_is_older("1.4.1"), reason="Not implemented") +def test_healthcheck_replication_out_of_sync_broken(topology_m3): + """Check if HealthCheck returns DSREPLLE0001 code + + :id: b5ae7cae-de0f-4206-95a4-f81538764bea + :setup: 3 MMR topology + :steps: + 1. Create a 3 suppliers full-mesh topology, on M2 and M3 don’t set nsds5BeginReplicaRefresh:start + 2. Perform modifications on M1 + 3. Use HealthCheck without --json option + 4. Use HealthCheck with --json option + :expectedresults: + 1. Success + 2. Success + 3. Healthcheck reports DSREPLLE0001 code and related details + 4. Healthcheck reports DSREPLLE0001 code and related details + """ + + RET_CODE = 'DSREPLLE0001' + + M1 = topology_m3.ms['supplier1'] + M2 = topology_m3.ms['supplier2'] + M3 = topology_m3.ms['supplier3'] + + log.info('Break supplier2 and supplier3') + replicas = Replicas(M2) + replica = replicas.list()[0] + replica.replace('nsds5ReplicaBindDNGroup', 'cn=repl') + + replicas = Replicas(M3) + replica = replicas.list()[0] + replica.replace('nsds5ReplicaBindDNGroup', 'cn=repl') + + log.info('Perform update on supplier1') + test_users_m1 = UserAccounts(M1, DEFAULT_SUFFIX) + test_users_m1.create_test_user(1005, 2000) + + run_healthcheck_and_flush_log(topology_m3, M1, RET_CODE, json=False) + run_healthcheck_and_flush_log(topology_m3, M1, RET_CODE, json=True) + + +if __name__ == '__main__': + # Run isolated + # -s for DEBUG mode + CURRENT_FILE = os.path.realpath(__file__) diff --git a/dirsrvtests/tests/suites/healthcheck/health_security_test.py b/dirsrvtests/tests/suites/healthcheck/health_security_test.py new file mode 100644 index 0000000..2967d8d --- /dev/null +++ b/dirsrvtests/tests/suites/healthcheck/health_security_test.py @@ -0,0 +1,344 @@ +# --- BEGIN COPYRIGHT BLOCK --- +# Copyright (C) 2020 Red Hat, Inc. +# All rights reserved. +# +# License: GPL (version 3 or any later version). +# See LICENSE for details. +# --- END COPYRIGHT BLOCK --- +# + +import pytest +import os +import subprocess +import distro +import time +from datetime import * +from lib389.config import Encryption +from lib389.utils import * +from lib389._constants import * +from lib389.cli_base import FakeArgs +from lib389.topologies import topology_st +from lib389.cli_ctl.health import health_check_run +from lib389.paths import Paths + +CMD_OUTPUT = 'No issues found.' +JSON_OUTPUT = '[]' + +ds_paths = Paths() +libfaketime = pytest.importorskip('libfaketime') +libfaketime.reexec_if_needed() + +log = logging.getLogger(__name__) + + +def run_healthcheck_and_flush_log(topology, instance, searched_code, json, searched_code2=None): + args = FakeArgs() + args.instance = instance.serverid + args.verbose = instance.verbose + args.list_errors = False + args.list_checks = False + args.check = ['config', 'encryption', 'tls', 'fschecks'] + args.dry_run = False + + if json: + log.info('Use healthcheck with --json option') + args.json = json + health_check_run(instance, topology.logcap.log, args) + assert topology.logcap.contains(searched_code) + log.info('Healthcheck returned searched code: %s' % searched_code) + + if searched_code2 is not None: + assert topology.logcap.contains(searched_code2) + log.info('Healthcheck returned searched code: %s' % searched_code2) + else: + log.info('Use healthcheck without --json option') + args.json = json + health_check_run(instance, topology.logcap.log, args) + assert topology.logcap.contains(searched_code) + log.info('Healthcheck returned searched code: %s' % searched_code) + + if searched_code2 is not None: + assert topology.logcap.contains(searched_code2) + log.info('Healthcheck returned searched code: %s' % searched_code2) + + log.info('Clear the log') + topology.logcap.flush() + + +@pytest.mark.ds50873 +@pytest.mark.bz1685160 +@pytest.mark.xfail(ds_is_older("1.4.1"), reason="Not implemented") +def test_healthcheck_insecure_pwd_hash_configured(topology_st): + """Check if HealthCheck returns DSCLE0002 code + + :id: 6baf949c-a5eb-4f4e-83b4-8302e677758a + :setup: Standalone instance + :steps: + 1. Create DS instance + 2. Configure an insecure passwordStorageScheme (as SHA) for the instance + 3. Use HealthCheck without --json option + 4. Use HealthCheck with --json option + 5. Set passwordStorageScheme and nsslapd-rootpwstoragescheme to PBKDF2_SHA512 + 6. Use HealthCheck without --json option + 7. Use HealthCheck with --json option + :expectedresults: + 1. Success + 2. Success + 3. Healthcheck reports DSCLE0002 code and related details + 4. Healthcheck reports DSCLE0002 code and related details + 5. Success + 6. Healthcheck reports no issue found + 7. Healthcheck reports no issue found + """ + + RET_CODE = 'DSCLE0002' + + standalone = topology_st.standalone + + log.info('Configure an insecure passwordStorageScheme (SHA)') + standalone.config.set('passwordStorageScheme', 'SHA') + + run_healthcheck_and_flush_log(topology_st, standalone, json=False, searched_code=RET_CODE) + run_healthcheck_and_flush_log(topology_st, standalone, json=True, searched_code=RET_CODE) + + if is_fips(): + log.info('Set passwordStorageScheme and nsslapd-rootpwstoragescheme to SSHA512 in FIPS mode') + standalone.config.set('passwordStorageScheme', 'SSHA512') + standalone.config.set('nsslapd-rootpwstoragescheme', 'SSHA512') + else: + log.info('Set passwordStorageScheme and nsslapd-rootpwstoragescheme to PBKDF2-SHA512') + standalone.config.set('passwordStorageScheme', 'PBKDF2-SHA512') + standalone.config.set('nsslapd-rootpwstoragescheme', 'PBKDF2-SHA512') + + run_healthcheck_and_flush_log(topology_st, standalone, json=False, searched_code=CMD_OUTPUT) + run_healthcheck_and_flush_log(topology_st, standalone, json=True, searched_code=JSON_OUTPUT) + + +@pytest.mark.ds50873 +@pytest.mark.bz1685160 +@pytest.mark.xfail(ds_is_older("1.4.1"), reason="Not implemented") +def test_healthcheck_min_allowed_tls_version_too_low(topology_st): + """Check if HealthCheck returns DSELE0001 code + + :id: a4be3390-9508-4827-8f82-e4e21081caab + :setup: Standalone instance + :steps: + 1. Create DS instance + 2. Set the TLS minimum version to TLS1.0 + 3. Use HealthCheck without --json option + 4. Use HealthCheck with --json option + 5. Set the TLS minimum version to TLS1.2 + 6. Use HealthCheck without --json option + 7. Use HealthCheck with --json option + :expectedresults: + 1. Success + 2. Success + 3. Healthcheck reports DSELE0001 code and related details + 4. Healthcheck reports DSELE0001 code and related details + 5. Success + 6. Healthcheck reports no issue found + 7. Healthcheck reports no issue found + """ + + RET_CODE = 'DSELE0001' + HIGHER_VS = 'TLS1.2' + SMALL_VS = 'TLS1.0' + RHEL = 'Red Hat Enterprise Linux' + + standalone = topology_st.standalone + + standalone.enable_tls() + + # We have to update-crypto-policies to LEGACY, otherwise we can't set TLS1.0 + log.info('Updating crypto policies') + assert subprocess.check_call(['update-crypto-policies', '--set', 'LEGACY']) == 0 + + log.info('Set the TLS minimum version to TLS1.0') + enc = Encryption(standalone) + enc.replace('sslVersionMin', SMALL_VS) + standalone.restart() + + run_healthcheck_and_flush_log(topology_st, standalone, json=False, searched_code=RET_CODE) + run_healthcheck_and_flush_log(topology_st, standalone, json=True, searched_code=RET_CODE) + + log.info('Set the TLS minimum version to TLS1.2') + enc.replace('sslVersionMin', HIGHER_VS) + standalone.restart() + + run_healthcheck_and_flush_log(topology_st, standalone, json=False, searched_code=CMD_OUTPUT) + run_healthcheck_and_flush_log(topology_st, standalone, json=True, searched_code=JSON_OUTPUT) + + if RHEL in distro.linux_distribution(): + log.info('Set crypto-policies back to DEFAULT') + assert subprocess.check_call(['update-crypto-policies', '--set', 'DEFAULT']) == 0 + + +@pytest.mark.ds50873 +@pytest.mark.bz1685160 +@pytest.mark.xfail(ds_is_older("1.4.1"), reason="Not implemented") +def test_healthcheck_resolvconf_bad_file_perm(topology_st): + """Check if HealthCheck returns DSPERMLE0001 code + + :id: 8572b9e9-70e7-49e9-b745-864f6f2468a8 + :setup: Standalone instance + :steps: + 1. Create DS instance + 2. Change the /etc/resolv.conf file permissions to 444 + 3. Use HealthCheck without --json option + 4. Use HealthCheck with --json option + 5. set /etc/resolv.conf permissions to 644 + 6. Use HealthCheck without --json option + 7. Use HealthCheck with --json option + :expectedresults: + 1. Success + 2. Success + 3. Healthcheck reports DSPERMLE0001 code and related details + 4. Healthcheck reports DSPERMLE0001 code and related details + 5. Success + 6. Healthcheck reports no issue found + 7. Healthcheck reports no issue found + """ + + RET_CODE = 'DSPERMLE0001' + + standalone = topology_st.standalone + + log.info('Change the /etc/resolv.conf file permissions to 444') + os.chmod('/etc/resolv.conf', 0o444) + + run_healthcheck_and_flush_log(topology_st, standalone, RET_CODE, json=False) + run_healthcheck_and_flush_log(topology_st, standalone, RET_CODE, json=True) + + log.info('Change the /etc/resolv.conf file permissions to 644') + os.chmod('/etc/resolv.conf', 0o644) + + run_healthcheck_and_flush_log(topology_st, standalone, CMD_OUTPUT, json=False) + run_healthcheck_and_flush_log(topology_st, standalone, JSON_OUTPUT, json=True) + + +@pytest.mark.ds50873 +@pytest.mark.bz1685160 +@pytest.mark.xfail(ds_is_older("1.4.1"), reason="Not implemented") +def test_healthcheck_pwdfile_bad_file_perm(topology_st): + """Check if HealthCheck returns DSPERMLE0002 code + + :id: ec137d66-bad6-4eed-90bd-fc1d572bbe1f + :setup: Standalone instance + :steps: + 1. Create DS instance + 2. Change the /etc/dirsrv/slapd-xxx/pwdfile.txt permissions to 000 + 3. Use HealthCheck without --json option + 4. Use HealthCheck with --json option + 5. Change the /etc/dirsrv/slapd-xxx/pwdfile.txt permissions to 400 + 6. Use HealthCheck without --json option + 7. Use HealthCheck with --json option + :expectedresults: + 1. Success + 2. Success + 3. Healthcheck reports DSPERMLE0002 code and related details + 4. Healthcheck reports DSPERMLE0002 code and related details + 5. Success + 6. Healthcheck reports no issue found + 7. Healthcheck reports no issue found + """ + + RET_CODE = 'DSPERMLE0002' + + standalone = topology_st.standalone + cert_dir = standalone.ds_paths.cert_dir + + log.info('Change the /etc/dirsrv/slapd-{}/pwdfile.txt permissions to 000'.format(standalone.serverid)) + os.chmod('{}/pwdfile.txt'.format(cert_dir), 0o000) + + run_healthcheck_and_flush_log(topology_st, standalone, RET_CODE, json=False) + run_healthcheck_and_flush_log(topology_st, standalone, RET_CODE, json=True) + + log.info('Change the /etc/dirsrv/slapd-{}/pwdfile.txt permissions to 400'.format(standalone.serverid)) + os.chmod('{}/pwdfile.txt'.format(cert_dir), 0o400) + + run_healthcheck_and_flush_log(topology_st, standalone, CMD_OUTPUT, json=False) + run_healthcheck_and_flush_log(topology_st, standalone, JSON_OUTPUT, json=True) + + +@pytest.mark.ds50873 +@pytest.mark.bz1685160 +@pytest.mark.xfail(ds_is_older("1.4.1"), reason="Not implemented") +def test_healthcheck_certif_expiring_within_30d(topology_st): + """Check if HealthCheck returns DSCERTLE0001 code + + :id: f30b8115-0fd3-4c1d-9f5a-383bea7ea869 + :setup: Standalone instance + :steps: + 1. Create DS instance + 2. Use libfaketime to tell the process the date is within 30 days before certificate expiration + 3. Use HealthCheck without --json option + 4. Use HealthCheck with --json option + :expectedresults: + 1. Success + 2. Success + 3. Healthcheck reports DSCERTLE0001 code and related details + 4. Healthcheck reports DSCERTLE0001 code and related details + """ + + RET_CODE = 'DSCERTLE0001' + + standalone = topology_st.standalone + + standalone.enable_tls() + + # Cert is valid two years from today, so we count the date that is within 30 days before certificate expiration + date_future = datetime.now() + timedelta(days=701) + + with libfaketime.fake_time(date_future): + time.sleep(1) + run_healthcheck_and_flush_log(topology_st, standalone, RET_CODE, json=False) + run_healthcheck_and_flush_log(topology_st, standalone, RET_CODE, json=True) + + # Try again with real time just to make sure no issues were found + run_healthcheck_and_flush_log(topology_st, standalone, CMD_OUTPUT, json=False) + run_healthcheck_and_flush_log(topology_st, standalone, JSON_OUTPUT, json=True) + + +@pytest.mark.ds50873 +@pytest.mark.bz1685160 +@pytest.mark.xfail(ds_is_older("1.4.1"), reason="Not implemented") +def test_healthcheck_certif_expired(topology_st): + """Check if HealthCheck returns DSCERTLE0002 code + + :id: ceff2c22-62c0-4fd9-b737-930a88458d68 + :setup: Standalone instance + :steps: + 1. Create DS instance + 2. Use libfaketime to tell the process the date is after certificate expiration + 3. Use HealthCheck without --json option + 4. Use HealthCheck with --json option + :expectedresults: + 1. Success + 2. Success + 3. Healthcheck reports DSCERTLE0002 code and related details + 4. Healthcheck reports DSCERTLE0002 code and related details + """ + + RET_CODE = 'DSCERTLE0002' + + standalone = topology_st.standalone + + standalone.enable_tls() + + # Cert is valid two years from today, so we count the date that is after expiration + date_future = datetime.now() + timedelta(days=731) + + with libfaketime.fake_time(date_future): + time.sleep(1) + run_healthcheck_and_flush_log(topology_st, standalone, RET_CODE, json=False) + run_healthcheck_and_flush_log(topology_st, standalone, RET_CODE, json=True) + + # Try again with real time just to make sure no issues were found + run_healthcheck_and_flush_log(topology_st, standalone, CMD_OUTPUT, json=False) + run_healthcheck_and_flush_log(topology_st, standalone, JSON_OUTPUT, json=True) + + +if __name__ == '__main__': + # Run isolated + # -s for DEBUG mode + CURRENT_FILE = os.path.realpath(__file__) diff --git a/dirsrvtests/tests/suites/healthcheck/health_sync_test.py b/dirsrvtests/tests/suites/healthcheck/health_sync_test.py new file mode 100644 index 0000000..6701f7f --- /dev/null +++ b/dirsrvtests/tests/suites/healthcheck/health_sync_test.py @@ -0,0 +1,132 @@ +# --- BEGIN COPYRIGHT BLOCK --- +# Copyright (C) 2020 Red Hat, Inc. +# All rights reserved. +# +# License: GPL (version 3 or any later version). +# See LICENSE for details. +# --- END COPYRIGHT BLOCK --- +# + +import pytest +import os +import time +from datetime import * +from lib389.idm.user import UserAccounts +from lib389.utils import * +from lib389._constants import * +from lib389.cli_base import FakeArgs +from lib389.topologies import topology_m3 +from lib389.cli_ctl.health import health_check_run +from lib389.paths import Paths + +ds_paths = Paths() +log = logging.getLogger(__name__) + + +def run_healthcheck_and_flush_log(topology, instance, searched_code, json, searched_code2=None): + args = FakeArgs() + args.instance = instance.serverid + args.verbose = instance.verbose + args.list_errors = False + args.list_checks = False + args.check = ['replication'] + args.dry_run = False + + if json: + log.info('Use healthcheck with --json option') + args.json = json + health_check_run(instance, topology.logcap.log, args) + assert topology.logcap.contains(searched_code) + log.info('Healthcheck returned searched code: %s' % searched_code) + + if searched_code2 is not None: + assert topology.logcap.contains(searched_code2) + log.info('Healthcheck returned searched code: %s' % searched_code2) + else: + log.info('Use healthcheck without --json option') + args.json = json + health_check_run(instance, topology.logcap.log, args) + assert topology.logcap.contains(searched_code) + log.info('Healthcheck returned searched code: %s' % searched_code) + + if searched_code2 is not None: + assert topology.logcap.contains(searched_code2) + log.info('Healthcheck returned searched code: %s' % searched_code2) + + log.info('Clear the log') + topology.logcap.flush() + + +# This test is in separate file because it is timeout specific +@pytest.mark.ds50873 +@pytest.mark.bz1685160 +@pytest.mark.xfail(ds_is_older("1.4.1"), reason="Not implemented") +#unstable or unstatus tests, skipped for now +@pytest.mark.flaky(max_runs=2, min_passes=1) +def test_healthcheck_replication_out_of_sync_not_broken(topology_m3): + """Check if HealthCheck returns DSREPLLE0003 code + + :id: 8305000d-ba4d-4c00-8331-be0e8bd92150 + :setup: 3 MMR topology + :steps: + 1. Create a 3 suppliers full-mesh topology, all replicas being synchronized + 2. Stop M1 + 3. Perform an update on M2 and M3. + 4. Check M2 and M3 are synchronized. + 5. From M2, reinitialize the M3 agreement + 6. Stop M2 and M3 + 7. Restart M1 + 8. Start M3 + 9. Use HealthCheck without --json option + 10. Use HealthCheck with --json option + :expectedresults: + 1. Success + 2. Success + 3. Success + 4. Success + 5. Success + 6. Success + 7. Success + 8. Success + 9. Healthcheck reports DSREPLLE0003 code and related details + 10. Healthcheck reports DSREPLLE0003 code and related details + """ + + RET_CODE = 'DSREPLLE0003' + + M1 = topology_m3.ms['supplier1'] + M2 = topology_m3.ms['supplier2'] + M3 = topology_m3.ms['supplier3'] + + log.info('Stop supplier1') + M1.stop() + + log.info('Perform update on supplier2 and supplier3') + test_users_m2 = UserAccounts(M2, DEFAULT_SUFFIX) + test_users_m3 = UserAccounts(M3, DEFAULT_SUFFIX) + test_users_m2.create_test_user(1000, 2000) + for user_num in range(1001, 3000): + test_users_m3.create_test_user(user_num, 2000) + time.sleep(2) + + log.info('Stop M2 and M3') + M2.stop() + M3.stop() + + log.info('Start M1 first, then M2, so that M2 acquires M1') + M1.start() + M2.start() + time.sleep(2) + + log.info('Start M3 which should not be able to acquire M1 since M2 is updating it') + M3.start() + time.sleep(2) + + run_healthcheck_and_flush_log(topology_m3, M3, RET_CODE, json=False) + run_healthcheck_and_flush_log(topology_m3, M3, RET_CODE, json=True) + + +if __name__ == '__main__': + # Run isolated + # -s for DEBUG mode + CURRENT_FILE = os.path.realpath(__file__) diff --git a/dirsrvtests/tests/suites/healthcheck/healthcheck_test.py b/dirsrvtests/tests/suites/healthcheck/healthcheck_test.py new file mode 100644 index 0000000..7f6ccad --- /dev/null +++ b/dirsrvtests/tests/suites/healthcheck/healthcheck_test.py @@ -0,0 +1,498 @@ +# --- BEGIN COPYRIGHT BLOCK --- +# Copyright (C) 2023 Red Hat, Inc. +# All rights reserved. +# +# License: GPL (version 3 or any later version). +# See LICENSE for details. +# --- END COPYRIGHT BLOCK --- +# + +import pytest +import os +from lib389.backend import Backends +from lib389.mappingTree import MappingTrees +from lib389.replica import Changelog5, Changelog +from lib389.utils import * +from lib389._constants import * +from lib389.cli_base import FakeArgs +from lib389.topologies import topology_st, topology_no_sample, topology_m2 +from lib389.cli_ctl.health import health_check_run +from lib389.paths import Paths + +CMD_OUTPUT = 'No issues found.' +JSON_OUTPUT = '[]' +CHANGELOG = 'cn=changelog,{}'.format(DN_USERROOT_LDBM) + +ds_paths = Paths() +log = logging.getLogger(__name__) + + +def run_healthcheck_and_flush_log(topology, instance, searched_code=None, json=False, searched_code2=None, + list_checks=False, list_errors=False, check=None, searched_list=None): + args = FakeArgs() + args.instance = instance.serverid + args.verbose = instance.verbose + args.list_errors = list_errors + args.list_checks = list_checks + args.check = check + args.dry_run = False + args.json = json + + log.info('Use healthcheck with --json == {} option'.format(json)) + health_check_run(instance, topology.logcap.log, args) + + if searched_list is not None: + for item in searched_list: + assert topology.logcap.contains(item) + log.info('Healthcheck returned searched item: %s' % item) + else: + assert topology.logcap.contains(searched_code) + log.info('Healthcheck returned searched code: %s' % searched_code) + + if searched_code2 is not None: + assert topology.logcap.contains(searched_code2) + log.info('Healthcheck returned searched code: %s' % searched_code2) + + log.info('Clear the log') + topology.logcap.flush() + + +def set_changelog_trimming(instance): + log.info('Set nsslapd-changelogmaxage to 30d') + + if ds_supports_new_changelog(): + cl = Changelog(instance, DEFAULT_SUFFIX) + else: + cl = Changelog5(instance) + cl.replace('nsslapd-changelogmaxage', '30') + + +def test_healthcheck_disabled_suffix(topology_st): + """Test that we report when a suffix is disabled + + :id: 49ebce72-7e7b-4eff-8bd9-8384d12251b4 + :setup: Standalone Instance + :steps: + 1. Disable suffix + 2. Use HealthCheck without --json option + 3. Use HealthCheck with --json option + :expectedresults: + 1. Success + 2. HealthCheck should return code DSBLE0002 + 3. HealthCheck should return code DSBLE0002 + """ + + RET_CODE = 'DSBLE0002' + + mts = MappingTrees(topology_st.standalone) + mt = mts.get(DEFAULT_SUFFIX) + mt.replace("nsslapd-state", "disabled") + topology_st.standalone.config.set("nsslapd-accesslog-logbuffering", "on") + + run_healthcheck_and_flush_log(topology_st, topology_st.standalone, RET_CODE, json=False) + run_healthcheck_and_flush_log(topology_st, topology_st.standalone, RET_CODE, json=True) + + # reset the suffix state + mt.replace("nsslapd-state", "backend") + + +@pytest.mark.ds50873 +@pytest.mark.bz1685160 +@pytest.mark.skipif(ds_is_older("1.4.1"), reason="Not implemented") +def test_healthcheck_standalone(topology_st): + """Check functionality of HealthCheck Tool on standalone instance with no errors + + :id: 4844b446-3939-4fbd-b14b-293b20bb8be0 + :setup: Standalone instance + :steps: + 1. Create DS instance + 2. Use HealthCheck without --json option + 3. Use HealthCheck with --json option + :expectedresults: + 1. Success + 2. Success + 3. Success + """ + + standalone = topology_st.standalone + + run_healthcheck_and_flush_log(topology_st, standalone, CMD_OUTPUT,json=False) + run_healthcheck_and_flush_log(topology_st, standalone, JSON_OUTPUT, json=True) + + +@pytest.mark.ds50746 +@pytest.mark.bz1816851 +@pytest.mark.xfail(ds_is_older("1.4.2"), reason="Not implemented") +def test_healthcheck_list_checks(topology_st): + """Check functionality of HealthCheck Tool with --list-checks option + + :id: 44b1d8d3-b94a-4c2d-9233-ebe876802803 + :setup: Standalone instance + :steps: + 1. Create DS instance + 2. Set list_checks to True + 3. Run HealthCheck + :expectedresults: + 1. Success + 2. Success + 3. Success + """ + + output_list = ['config:hr_timestamp', + 'config:passwordscheme', + 'backends:userroot:cl_trimming', + 'backends:userroot:mappingtree', + 'backends:userroot:search', + 'backends:userroot:virt_attrs', + 'encryption:check_tls_version', + 'fschecks:file_perms', + 'refint:attr_indexes', + 'refint:update_delay', + 'monitor-disk-space:disk_space', + 'replication:agmts_status', + 'replication:conflicts', + 'dseldif:nsstate', + 'tls:certificate_expiration', + 'logs:notes'] + + standalone = topology_st.standalone + + run_healthcheck_and_flush_log(topology_st, standalone, json=False, list_checks=True, searched_list=output_list) + + +@pytest.mark.ds50746 +@pytest.mark.bz1816851 +@pytest.mark.xfail(ds_is_older("1.4.2"), reason="Not implemented") +def test_healthcheck_list_errors(topology_st): + """Check functionality of HealthCheck Tool with --list-errors option + + :id: 295c07c0-a939-4d5e-b3a6-b4c9d0da3897 + :setup: Standalone instance + :steps: + 1. Create DS instance + 2. Set list_errors to True + 3. Run HealthCheck + :expectedresults: + 1. Success + 2. Success + 3. Success + """ + + output_list = ['DSBLE0001 :: Possibly incorrect mapping tree', + 'DSBLE0002 :: Unable to query backend', + 'DSBLE0003 :: Uninitialized backend database', + 'DSCERTLE0001 :: Certificate about to expire', + 'DSCERTLE0002 :: Certificate expired', + 'DSCLE0001 :: Different log timestamp format', + 'DSCLE0002 :: Weak passwordStorageScheme', + 'DSCLE0003 :: Unauthorized Binds Allowed', + 'DSCLE0004 :: Access Log buffering disabled', + 'DSCLE0005 :: Security Log buffering disabled', + 'DSCLLE0001 :: Changelog trimming not configured', + 'DSDSLE0001 :: Low disk space', + 'DSELE0001 :: Weak TLS protocol version', + 'DSLOGNOTES0001 :: Unindexed Search', + 'DSLOGNOTES0002 :: Unknown Attribute In Filter', + 'DSPERMLE0001 :: Incorrect file permissions', + 'DSPERMLE0002 :: Incorrect security database file permissions', + 'DSREPLLE0001 :: Replication agreement not set to be synchronized', + 'DSREPLLE0002 :: Replication conflict entries found', + 'DSREPLLE0003 :: Unsynchronized replication agreement', + 'DSREPLLE0004 :: Unable to get replication agreement status', + 'DSREPLLE0005 :: Replication consumer not reachable', + 'DSRILE0001 :: Referential integrity plugin may be slower', + 'DSRILE0002 :: Referential integrity plugin configured with unindexed attribute', + 'DSSKEWLE0001 :: Medium time skew', + 'DSSKEWLE0002 :: Major time skew', + 'DSSKEWLE0003 :: Extensive time skew', + 'DSVIRTLE0001 :: Virtual attribute indexed'] + + standalone = topology_st.standalone + + run_healthcheck_and_flush_log(topology_st, standalone, json=False, list_errors=True, searched_list=output_list) + + +@pytest.mark.ds50746 +@pytest.mark.bz1816851 +@pytest.mark.xfail(ds_is_older("1.4.2"), reason="Not implemented") +def test_healthcheck_check_option(topology_st): + """Check functionality of HealthCheck Tool with --check option + + :id: ee382d6f-8bec-4236-ace4-4700d19dc9fd + :setup: Standalone instance + :steps: + 1. Create DS instance + 2. Set check to value from list + 3. Run HealthCheck + :expectedresults: + 1. Success + 2. Success + 3. Success + """ + + output_list = ['config:hr_timestamp', + 'config:passwordscheme', + # 'config:accesslog_buffering', Skip test access log buffering is disabled + 'config:securitylog_buffering', + 'config:unauth_binds', + 'backends:userroot:cl_trimming', + 'backends:userroot:mappingtree', + 'backends:userroot:search', + 'backends:userroot:virt_attrs', + 'encryption:check_tls_version', + 'fschecks:file_perms', + 'refint:attr_indexes', + 'refint:update_delay', + 'memberof:member_attr_indexes', + 'monitor-disk-space:disk_space', + 'replication:agmts_status', + 'replication:conflicts', + 'replication:no_ruv', + 'dseldif:nsstate', + 'tls:certificate_expiration', + 'logs:notes'] + + standalone = topology_st.standalone + + for item in output_list: + pattern = 'Checking ' + item + log.info('Check {}'.format(item)) + run_healthcheck_and_flush_log(topology_st, standalone, searched_code=pattern, json=False, check=[item], + searched_code2=CMD_OUTPUT) + run_healthcheck_and_flush_log(topology_st, standalone, searched_code=JSON_OUTPUT, json=True, check=[item]) + + +@pytest.mark.ds50873 +@pytest.mark.bz1685160 +@pytest.mark.skipif(ds_is_older("1.4.1"), reason="Not implemented") +def test_healthcheck_standalone_tls(topology_st): + """Check functionality of HealthCheck Tool on TLS enabled standalone instance with no errors + + :id: 832374e6-6d2c-42af-80c8-d3685dbfa234 + :setup: Standalone instance + :steps: + 1. Create DS instance + 2. Enable TLS + 3. Use HealthCheck without --json option + 4. Use HealthCheck with --json option + :expectedresults: + 1. Success + 2. Success + 3. Success + 4. Success + """ + + standalone = topology_st.standalone + standalone.enable_tls() + + run_healthcheck_and_flush_log(topology_st, standalone, CMD_OUTPUT,json=False) + run_healthcheck_and_flush_log(topology_st, standalone, JSON_OUTPUT, json=True) + + +@pytest.mark.ds50873 +@pytest.mark.bz1685160 +@pytest.mark.skipif(ds_is_older("1.4.1"), reason="Not implemented") +def test_healthcheck_replication(topology_m2): + """Check functionality of HealthCheck Tool on replication instance with no errors + + :id: d7751cc3-271c-4c33-b296-8a4c8941233e + :setup: 2 MM topology + :steps: + 1. Create a two suppliers replication topology + 2. Set nsslapd-changelogmaxage to 30d + 3. Use HealthCheck without --json option + 4. Use HealthCheck with --json option + :expectedresults: + 1. Success + 2. Success + 3. Success + 4. Success + """ + + M1 = topology_m2.ms['supplier1'] + M2 = topology_m2.ms['supplier2'] + + # If we don't set changelog trimming, we will get error DSCLLE0001 + set_changelog_trimming(M1) + set_changelog_trimming(M2) + M1.config.set("nsslapd-accesslog-logbuffering", "on") + M2.config.set("nsslapd-accesslog-logbuffering", "on") + + log.info('Run healthcheck for supplier1') + run_healthcheck_and_flush_log(topology_m2, M1, CMD_OUTPUT, json=False) + run_healthcheck_and_flush_log(topology_m2, M1, JSON_OUTPUT, json=True) + + log.info('Run healthcheck for supplier2') + run_healthcheck_and_flush_log(topology_m2, M2, CMD_OUTPUT, json=False) + run_healthcheck_and_flush_log(topology_m2, M2, JSON_OUTPUT, json=True) + + +@pytest.mark.ds50873 +@pytest.mark.bz1685160 +@pytest.mark.skipif(ds_is_older("1.4.1"), reason="Not implemented") +def test_healthcheck_replication_tls(topology_m2): + """Check functionality of HealthCheck Tool on replication instance with no errors + + :id: 9ee6d491-d6d7-4c2c-ac78-70d08f054166 + :setup: 2 MM topology + :steps: + 1. Create a two suppliers replication topology + 2. Enable TLS + 3. Set nsslapd-changelogmaxage to 30d + 4. Use HealthCheck without --json option + 5. Use HealthCheck with --json option + :expectedresults: + 1. Success + 2. Success + 3. Success + 4. Success + 5. Success + """ + + M1 = topology_m2.ms['supplier1'] + M2 = topology_m2.ms['supplier2'] + + M1.enable_tls() + M2.enable_tls() + + log.info('Run healthcheck for supplier1') + M1.config.set("nsslapd-accesslog-logbuffering", "on") + M2.config.set("nsslapd-accesslog-logbuffering", "on") + run_healthcheck_and_flush_log(topology_m2, M1, CMD_OUTPUT, json=False) + run_healthcheck_and_flush_log(topology_m2, M1, JSON_OUTPUT, json=True) + + log.info('Run healthcheck for supplier2') + run_healthcheck_and_flush_log(topology_m2, M2, CMD_OUTPUT, json=False) + run_healthcheck_and_flush_log(topology_m2, M2, JSON_OUTPUT, json=True) + + +@pytest.mark.ds50873 +@pytest.mark.bz1685160 +@pytest.mark.skipif(ds_is_older("1.4.1"), reason="Not implemented") +@pytest.mark.xfail(ds_is_older("1.4.3"),reason="Might fail because of bz1835619") +def test_healthcheck_backend_missing_mapping_tree(topology_st): + """Check if HealthCheck returns DSBLE0001 and DSBLE0003 code + + :id: 4c83ffcf-01a4-4ec8-a3d2-01022b566225 + :setup: Standalone instance + :steps: + 1. Create DS instance + 2. Disable the dc=example,dc=com backend suffix entry in the mapping tree + 3. Use HealthCheck without --json option + 4. Use HealthCheck with --json option + 5. Enable the dc=example,dc=com backend suffix entry in the mapping tree + 6. Use HealthCheck without --json option + 7. Use HealthCheck with --json option + :expectedresults: + 1. Success + 2. Success + 3. Healthcheck reports DSBLE0001 and DSBLE0003 codes and related details + 4. Healthcheck reports DSBLE0001 and DSBLE0003 codes and related details + 5. Success + 6. Healthcheck reports no issue found + 7. Healthcheck reports no issue found + """ + + RET_CODE1 = 'DSBLE0001' + RET_CODE2 = 'DSBLE0003' + + standalone = topology_st.standalone + + log.info('Delete the dc=example,dc=com backend suffix entry in the mapping tree') + mts = MappingTrees(standalone) + mt = mts.get(DEFAULT_SUFFIX) + mt.delete() + + run_healthcheck_and_flush_log(topology_st, standalone, RET_CODE1, json=False, searched_code2=RET_CODE2) + run_healthcheck_and_flush_log(topology_st, standalone, RET_CODE1, json=True, searched_code2=RET_CODE2) + + log.info('Create the dc=example,dc=com backend suffix entry') + mts.create(properties={ + 'cn': DEFAULT_SUFFIX, + 'nsslapd-state': 'backend', + 'nsslapd-backend': 'USERROOT', + }) + + run_healthcheck_and_flush_log(topology_st, standalone, CMD_OUTPUT, json=False) + run_healthcheck_and_flush_log(topology_st, standalone, JSON_OUTPUT, json=True) + + +@pytest.mark.ds50873 +@pytest.mark.bz1796343 +@pytest.mark.skipif(ds_is_older("1.4.1"), reason="Not implemented") +@pytest.mark.xfail(reason="Will fail because of bz1837315. Set proper version after bug is fixed") +def test_healthcheck_unable_to_query_backend(topology_st): + """Check if HealthCheck returns DSBLE0002 code + + :id: 01de2fe5-079d-4166-b4c9-1f1e00bb091c + :setup: Standalone instance + :steps: + 1. Create DS instance + 2. Create a new root suffix and database + 3. Disable new suffix + 4. Use HealthCheck without --json option + 5. Use HealthCheck with --json option + :expectedresults: + 1. Success + 2. Success + 3. Success + 4. HealthCheck should return code DSBLE0002 + 5. HealthCheck should return code DSBLE0002 + """ + + RET_CODE = 'DSBLE0002' + NEW_SUFFIX = 'dc=test,dc=com' + NEW_BACKEND = 'userData' + + standalone = topology_st.standalone + + log.info('Create new suffix') + backends = Backends(standalone) + backends.create(properties={ + 'cn': NEW_BACKEND, + 'nsslapd-suffix': NEW_SUFFIX, + }) + + log.info('Disable the newly created suffix') + mts = MappingTrees(standalone) + mt_new = mts.get(NEW_SUFFIX) + mt_new.replace('nsslapd-state', 'disabled') + + run_healthcheck_and_flush_log(topology_st, standalone, RET_CODE, json=False) + run_healthcheck_and_flush_log(topology_st, standalone, RET_CODE, json=True) + + log.info('Enable the suffix again and check if nothing is broken') + mt_new.replace('nsslapd-state', 'backend') + run_healthcheck_and_flush_log(topology_st, standalone, RET_CODE, json=False) + run_healthcheck_and_flush_log(topology_st, standalone, RET_CODE, json=True) + + +@pytest.mark.ds50873 +@pytest.mark.bz1796343 +@pytest.mark.skipif(ds_is_older("1.4.1"), reason="Not implemented") +def test_healthcheck_database_not_initialized(topology_no_sample): + """Check if HealthCheck returns DSBLE0003 code + + :id: 716b1ff1-94bd-4780-98b8-96ff8ef21e30 + :setup: Standalone instance + :steps: + 1. Create DS instance without example entries + 2. Use HealthCheck without --json option + 3. Use HealthCheck with --json option + :expectedresults: + 1. Success + 2. HealthCheck should return code DSBLE0003 + 3. HealthCheck should return code DSBLE0003 + """ + + RET_CODE = 'DSBLE0003' + standalone = topology_no_sample.standalone + + run_healthcheck_and_flush_log(topology_no_sample, standalone, RET_CODE, json=False) + run_healthcheck_and_flush_log(topology_no_sample, standalone, RET_CODE, json=True) + + +if __name__ == '__main__': + # Run isolated + # -s for DEBUG mode + CURRENT_FILE = os.path.realpath(__file__) diff --git a/dirsrvtests/tests/suites/import/__init__.py b/dirsrvtests/tests/suites/import/__init__.py new file mode 100644 index 0000000..8584e71 --- /dev/null +++ b/dirsrvtests/tests/suites/import/__init__.py @@ -0,0 +1,3 @@ +""" + :Requirement: 389-ds-base: DataBase Import +""" diff --git a/dirsrvtests/tests/suites/import/import_test.py b/dirsrvtests/tests/suites/import/import_test.py new file mode 100644 index 0000000..832a27c --- /dev/null +++ b/dirsrvtests/tests/suites/import/import_test.py @@ -0,0 +1,634 @@ +# --- BEGIN COPYRIGHT BLOCK --- +# Copyright (C) 2023 Red Hat, Inc. +# All rights reserved. +# +# License: GPL (version 3 or any later version). +# See LICENSE for details. +# --- END COPYRIGHT BLOCK --- + +""" +Will test Import (Offline/Online) +""" + +import os +import pytest +import time +import glob +import logging +import subprocess +from datetime import datetime +from lib389.topologies import topology_st as topo +from lib389._constants import DEFAULT_SUFFIX, TaskWarning +from lib389.dbgen import dbgen_users +from lib389.tasks import ImportTask +from lib389.index import Indexes +from lib389.monitor import Monitor +from lib389.backend import Backends +from lib389.config import LDBMConfig +from lib389.config import LMDB_LDBMConfig +from lib389.utils import ds_is_newer, get_default_db_lib +from lib389.idm.user import UserAccount +from lib389.idm.account import Accounts +from lib389.cli_ctl.dbtasks import dbtasks_ldif2db +from lib389.cli_base import FakeArgs + +pytestmark = pytest.mark.tier1 + +DEBUGGING = os.getenv("DEBUGGING", default=False) +if DEBUGGING: + logging.getLogger(__name__).setLevel(logging.DEBUG) +else: + logging.getLogger(__name__).setLevel(logging.INFO) +log = logging.getLogger(__name__) + +bdb_values = { + 'wait30': 30 +} + +# Note: I still sometime get failure with a 60s timeout so lets use 90s +mdb_values = { + 'wait30': 90 +} + +if get_default_db_lib() == 'bdb': + values = bdb_values +else: + values = mdb_values + + +def _generate_ldif(topo, no_no): + """ + Will generate the ldifs + """ + ldif_dir = topo.standalone.get_ldif_dir() + import_ldif = ldif_dir + '/basic_import.ldif' + if os.path.isfile(import_ldif): + pass + else: + dbgen_users(topo.standalone, no_no, import_ldif, DEFAULT_SUFFIX) + + +def _check_users_before_test(topo, no_no): + """ + Will check no user before test. + """ + accounts = Accounts(topo.standalone, DEFAULT_SUFFIX) + assert len(accounts.filter('(uid=*)')) < no_no + + +def _search_for_user(topo, no_n0): + """ + Will make sure that users are imported + """ + accounts = Accounts(topo.standalone, DEFAULT_SUFFIX) + assert len(accounts.filter('(uid=*)')) == no_n0 + + +def _import_clean_topo(topo): + """ + Cleanup after import + """ + accounts = Accounts(topo.standalone, DEFAULT_SUFFIX) + for i in accounts.filter('(uid=*)'): + UserAccount(topo.standalone, i.dn).delete() + + ldif_dir = topo.standalone.get_ldif_dir() + import_ldif = ldif_dir + '/basic_import.ldif' + if os.path.exists(import_ldif): + os.remove(import_ldif) + syntax_err_ldif = ldif_dir + '/syntax_err.dif' + if os.path.exists(syntax_err_ldif): + os.remove(syntax_err_ldif) + + +@pytest.fixture(scope="function") +def _import_clean(request, topo): + request.addfinalizer(lambda: _import_clean_topo(topo)) + + +def _import_offline(topo, no_no): + """ + Will import ldifs offline + """ + _check_users_before_test(topo, no_no) + ldif_dir = topo.standalone.get_ldif_dir() + import_ldif = ldif_dir + '/basic_import.ldif' + # Generate ldif + _generate_ldif(topo, no_no) + # Offline import + topo.standalone.stop() + t1 = time.time() + if not topo.standalone.ldif2db('userRoot', None, None, None, import_ldif): + topo.standalone.start() + assert False + total_time = time.time() - t1 + topo.standalone.start() + _search_for_user(topo, no_no) + return total_time + + +def _import_online(topo, no_no): + """ + Will import ldifs online + """ + _check_users_before_test(topo, no_no) + ldif_dir = topo.standalone.get_ldif_dir() + import_ldif = ldif_dir + '/basic_import.ldif' + _generate_ldif(topo, no_no) + # Online + import_task = ImportTask(topo.standalone) + import_task.import_suffix_from_ldif(ldiffile=import_ldif, suffix=DEFAULT_SUFFIX) + + # Wait a bit till the task is created and available for searching + if ds_is_newer('1.4.1.2'): + for x in range(60): + if import_task.present('nstaskcreated'): + break + time.sleep(0.5) + assert import_task.present('nstaskcreated') + else: + time.sleep(0.5) + + # Good as place as any to quick test the task has some expected attributes + assert import_task.present('nstasklog') + assert import_task.present('nstaskcurrentitem') + assert import_task.present('nstasktotalitems') + assert import_task.present('ttl') + import_task.wait() + topo.standalone.searchAccessLog('ADD dn="cn=import') + topo.standalone.searchErrorsLog('import userRoot: Import complete.') + _search_for_user(topo, no_no) + + +def _create_bogus_ldif(topo): + """ + Will create bogus ldifs + """ + ldif_dir = topo.standalone.get_ldif_dir() + line1 = r'dn: cn=Eladio \"A\"\, Santabarbara\, (A\, B\, C),ou=Accounting, dc=example,dc=com' + line2 = """objectClass: top + objectClass: person + objectClass: organizationalPerson + objectClass: inetOrgPerson + cn: Eladio "A", Santabarbara, (A, B, C) + cn: Eladio Santabarbara + sn: Santabarbara + givenName: Eladio + ou: Accounting""" + with open(f'{ldif_dir}/bogus.dif', 'w') as out: + out.write(f'{line1}{line2}') + out.close() + import_ldif1 = ldif_dir + '/bogus.ldif' + return import_ldif1 + + +def _create_syntax_err_ldif(topo): + """ + Create an ldif file, which contains an entry that violates syntax check + """ + ldif_dir = topo.standalone.get_ldif_dir() + line1 = """dn: dc=example,dc=com +objectClass: top +objectClass: domain +dc: example + +dn: ou=groups,dc=example,dc=com +objectClass: top +objectClass: organizationalUnit +ou: groups + +dn: uid=JHunt,ou=groups,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectclass: inetUser +cn: James Hunt +sn: Hunt +uid: JHunt +givenName: +""" + with open(f'{ldif_dir}/syntax_err.ldif', 'w') as out: + out.write(f'{line1}') + os.chmod(out.name, 0o777) + out.close() + import_ldif1 = ldif_dir + '/syntax_err.ldif' + return import_ldif1 + + +def _now(): + """ + Get current time with the format that _check_for_core requires + """ + now = datetime.now() + return now.strftime("%Y-%m-%d %H:%M:%S") + + +def __check_for_core(now): + """ + Check if ns-slapd generated a core since the provided date by looking in the system logs. + """ + cmd = [ 'journalctl' ,'-S', now, '-t', 'audit', '-g', 'ANOM_ABEND.*ns-slapd' ] + result = subprocess.run(cmd, stdout=subprocess.PIPE, stderr=subprocess.STDOUT, text=True) + if result.returncode != 1: + # journalctl returns 1 if there is no matching records, and 0 if there are records + log.error('journalctl output is:\n%s' % result.stdout) + raise AssertionError(f'journalctl reported that ns-slapd crashes after {now}') + + +def test_import_with_index(topo, _import_clean): + """ + Add an index, then import via cn=tasks + + :id: 9ddaf0df-7298-42bb-bdfa-a889ee68bc09 + :setup: Standalone Instance + :steps: + 1. Creating the room number index + 2. Importing online + 3. Import is done -- verifying that it worked + :expectedresults: + 1. Operation successful + 2. Operation successful + 3. Operation successful + """ + place = topo.standalone.dbdir + if topo.standalone.is_dbi_supported(): + assert not topo.standalone.is_dbi('userRoot/roomNumber.db') + else: + assert not glob.glob(f'{place}/userRoot/roomNumber.db*', recursive=True) + # Creating the room number index + indexes = Indexes(topo.standalone) + indexes.create(properties={ + 'cn': 'roomNumber', + 'nsSystemIndex': 'false', + 'nsIndexType': 'eq'}) + topo.standalone.restart() + # Importing online + _import_online(topo, 5) + # Import is done -- verifying that it worked + if topo.standalone.is_dbi_supported(): + assert topo.standalone.is_dbi('userRoot/roomNumber.db') + else: + assert glob.glob(f'{place}/userRoot/roomNumber.db*', recursive=True) + + +def test_online_import_with_warning(topo, _import_clean): + """ + Import an ldif file with syntax errors, verify skipped entry warning code + + :id: 9b44cd0e-9d4b-4ae9-b750-cc7ba58d4529 + :setup: Standalone Instance + :steps: + 1. Create standalone Instance + 2. Create an ldif file with an entry that violates syntax check (empty givenname) + 3. Online import of troublesome ldif file + :expectedresults: + 1. Successful import with skipped entry warning + """ + topo.standalone.restart() + + import_task = ImportTask(topo.standalone) + import_ldif1 = _create_syntax_err_ldif(topo) + + # Importing the offending ldif file - online + import_task.import_suffix_from_ldif(ldiffile=import_ldif1, suffix=DEFAULT_SUFFIX) + + # There is just a single entry in this ldif + import_task.wait(5) + + # Check for the task nsTaskWarning attr, make sure its set to skipped entry code + assert import_task.present('nstaskwarning') + assert TaskWarning.WARN_SKIPPED_IMPORT_ENTRY == import_task.get_task_warn() + + +def test_crash_on_ldif2db(topo, _import_clean): + """ + Delete the cn=monitor entry for an LDBM backend instance. Doing this will + cause the DS to re-create that entry the next time it starts up. + + :id: aecad390-9352-11ea-8a31-8c16451d917b + :setup: Standalone Instance + :steps: + 1. Delete the cn=monitor entry for an LDBM backend instance + 2. Restart the server and verify that the LDBM monitor entry was re-created. + :expectedresults: + 1. Operation successful + 2. Operation successful + """ + # Delete the cn=monitor entry for an LDBM backend instance. Doing this will + # cause the DS to re-create that entry the next time it starts up. + monitor = Monitor(topo.standalone) + monitor.delete() + # Restart the server and verify that the LDBM monitor entry was re-created. + _import_offline(topo, 5) + + +@pytest.mark.bz185477 +def test_ldif2db_allows_entries_without_a_parent_to_be_imported(topo, _import_clean): + """Should reject import of entries that's missing parent suffix + + :id: 27195cea-9c0e-11ea-800b-8c16451d917b + :setup: Standalone Instance + :steps: + 1. Import the offending LDIF data - offline + 2. Violates schema, ending line + :expectedresults: + 1. Operation successful + 2. Operation Fail + """ + import_ldif1 = _create_bogus_ldif(topo) + # Import the offending LDIF data - offline + topo.standalone.stop() + topo.standalone.ldif2db('userRoot', None, None, None, import_ldif1) + # which violates schema, ending line + topo.standalone.searchErrorsLog('import_producer - import userRoot: Skipping entry ' + '"dc=example,dc=com" which violates schema') + topo.standalone.start() + + +def test_ldif2db_syntax_check(topo, _import_clean): + """ldif2db should return a warning when a skipped entry has occured. + + :id: 85e75670-42c5-4062-9edc-7f117c97a06f + :setup: + 1. Standalone Instance + 2. Ldif entry that violates syntax check rule (empty givenname) + :steps: + 1. Create an ldif file which violates the syntax checking rule + 2. Stop the server and import ldif file with ldif2db + :expectedresults: + 1. ldif2db import returns a warning to signify skipped entries + """ + import_ldif1 = _create_syntax_err_ldif(topo) + # Import the offending LDIF data - offline + topo.standalone.stop() + ret = topo.standalone.ldif2db('userRoot', None, None, None, import_ldif1) + assert ret == TaskWarning.WARN_SKIPPED_IMPORT_ENTRY + topo.standalone.start() + + +@pytest.mark.skipif(get_default_db_lib() == "mdb", reason="Not cache size over mdb") +def test_issue_a_warning_if_the_cache_size_is_smaller(topo, _import_clean): + """Report during startup if nsslapd-cachememsize is too small + + :id: 1aa8cbda-9c0e-11ea-9297-8c16451d917b + :setup: Standalone Instance + :steps: + 1. Set nsslapd-cache-autosize to 0 + 2. Change cachememsize + 3. Check that cachememsize is sufficiently small + 4. Import some users to make id2entry.db big + 5. Warning message should be there in error logs + :expectedresults: + 1. Operation successful + 2. Operation successful + 3. Operation successful + 4. Operation successful + 5. Operation successful + """ + config = LDBMConfig(topo.standalone) + backend = Backends(topo.standalone).list()[0] + # Set nsslapd-cache-autosize to 0 + config.replace('nsslapd-cache-autosize', '0') + # Change cachememsize + backend.replace('nsslapd-cachememsize', '1') + # Check that cachememsize is sufficiently small + assert int(backend.get_attr_val_utf8('nsslapd-cachememsize')) < 1500000 + # Import some users to make id2entry.db big + _import_offline(topo, 20) + # warning message should look like + assert topo.standalone.searchErrorsLog('INFO - ldbm_instance_config_cachememsize_set - ' + 'force a minimal value 512000') + + +@pytest.fixture(scope="function") +def _toggle_private_import_mem(request, topo): + config = LDBMConfig(topo.standalone) + config.replace_many( + ('nsslapd-db-private-import-mem', 'on'), + ('nsslapd-import-cache-autosize', '0')) + + def finofaci(): + # nsslapd-import-cache-autosize: off and + # nsslapd-db-private-import-mem: off + config.replace_many( + ('nsslapd-db-private-import-mem', 'off')) + request.addfinalizer(finofaci) + + +#unstable or unstatus tests, skipped for now +#@pytest.mark.flaky(max_runs=2, min_passes=1) +@pytest.mark.skipif(get_default_db_lib() == "mdb", reason="nsslapd-db-private-import-mem and nsslapd-import-cache-autosize parameters are ignored when usign lmdb") +def test_fast_slow_import(topo, _toggle_private_import_mem, _import_clean): + """With nsslapd-db-private-import-mem: on is faster import. + + :id: 3044331c-9c0e-11ea-ac9f-8c16451d917b + :setup: Standalone Instance + :steps: + 1. Let's set nsslapd-db-private-import-mem:on, nsslapd-import-cache-autosize: 0 + 2. Measure offline import time duration total_time1 + 3. Now nsslapd-db-private-import-mem:off + 4. Measure offline import time duration total_time2 + 5. total_time1 < total_time2 + 6. Set nsslapd-db-private-import-mem:on, nsslapd-import-cache-autosize: -1 + 7. Measure offline import time duration total_time1 + 8. Now nsslapd-db-private-import-mem:off + 9. Measure offline import time duration total_time2 + 10. total_time1 < total_time2 + :expectedresults: + 1. Operation successful + 2. Operation successful + 3. Operation successful + 4. Operation successful + 5. Operation successful + 6. Operation successful + 7. Operation successful + 8. Operation successful + 9. Operation successful + 10. Operation successful + """ + # Let's set nsslapd-db-private-import-mem:on, nsslapd-import-cache-autosize: 0 + config = LDBMConfig(topo.standalone) + # Measure offline import time duration total_time1 + total_time1 = _import_offline(topo, 1000) + # Now nsslapd-db-private-import-mem:off + config.replace('nsslapd-db-private-import-mem', 'off') + accounts = Accounts(topo.standalone, DEFAULT_SUFFIX) + for i in accounts.filter('(uid=*)'): + UserAccount(topo.standalone, i.dn).delete() + # Measure offline import time duration total_time2 + total_time2 = _import_offline(topo, 1000) + # total_time1 < total_time2 + log.info("total_time1 = %f" % total_time1) + log.info("total_time2 = %f" % total_time2) + assert total_time1 < total_time2 + + # Set nsslapd-db-private-import-mem:on, nsslapd-import-cache-autosize: -1 + config.replace_many( + ('nsslapd-db-private-import-mem', 'on'), + ('nsslapd-import-cache-autosize', '-1')) + for i in accounts.filter('(uid=*)'): + UserAccount(topo.standalone, i.dn).delete() + # Measure offline import time duration total_time1 + total_time1 = _import_offline(topo, 1000) + # Now nsslapd-db-private-import-mem:off + config.replace('nsslapd-db-private-import-mem', 'off') + for i in accounts.filter('(uid=*)'): + UserAccount(topo.standalone, i.dn).delete() + # Measure offline import time duration total_time2 + total_time2 = _import_offline(topo, 1000) + # total_time1 < total_time2 + log.info("total_time1 = %f" % total_time1) + log.info("total_time2 = %f" % total_time2) + assert total_time1 < total_time2 + + +@pytest.mark.bz175063 +def test_entry_with_escaped_characters_fails_to_import_and_index(topo, _import_clean): + """If missing entry_id is found, skip it and continue reading the primary db to be re indexed. + + :id: 358c938c-9c0e-11ea-adbc-8c16451d917b + :setup: Standalone Instance + :steps: + 1. Import the example data from ldif. + 2. Remove some of the other entries that were successfully imported. + 3. Now re-index the database. + 4. Should not return error. + :expectedresults: + 1. Operation successful + 2. Operation successful + 3. Operation successful + 4. Operation successful + """ + # Import the example data from ldif + _import_offline(topo, 10) + count = 0 + # Remove some of the other entries that were successfully imported + for user1 in [user for user in Accounts(topo.standalone, DEFAULT_SUFFIX).list() if user.dn.startswith('uid')]: + if count <= 2: + UserAccount(topo.standalone, user1.dn).delete() + count += 1 + # Now re-index the database + topo.standalone.stop() + topo.standalone.db2index() + topo.standalone.start() + # Should not return error. + assert not topo.standalone.searchErrorsLog('error') + assert not topo.standalone.searchErrorsLog('foreman fifo error') + + +def test_import_perf_after_failure(topo): + """Make an import fail by specifying the wrong LDIF file name, then + try the import with the correct name. Make sure the import performance + is what we expect. + + :id: d21dc67f-475e-402a-be9e-3eeb9181c156 + :setup: Standalone Instance + :steps: + 1. Build LDIF file + 2. Import invalid LDIF filename + 3. Import valid LDIF filename + 4. Import completes in a timely manner + :expectedresults: + 1. Success + 2. Success + 3. Success + 4. Success + """ + + ldif_dir = topo.standalone.get_ldif_dir() + import_ldif = ldif_dir + '/perf_import.ldif' + bad_import_ldif = ldif_dir + '/perf_import_typo.ldif' + + # Build LDIF file + dbgen_users(topo.standalone, 30000, import_ldif, DEFAULT_SUFFIX) + + # Online import which fails + import_task = ImportTask(topo.standalone) + import_task.import_suffix_from_ldif(ldiffile=bad_import_ldif, suffix=DEFAULT_SUFFIX) + import_task.wait() + + # Valid online import + time.sleep(1) + import_task = ImportTask(topo.standalone) + import_task.import_suffix_from_ldif(ldiffile=import_ldif, suffix=DEFAULT_SUFFIX) + import_task.wait(values['wait30']) # If things go wrong import takes a lot longer than this + assert import_task.is_complete() + + # Restart server + topo.standalone.restart() + + +def test_import_wrong_file_path(topo): + """Make an import fail by specifying the wrong LDIF file name + + :id: 6795a3cd-b95e-4777-bc77-25ab864882a3 + :setup: Standalone Instance + :steps: + 1. Do an import with an invalid file path + 2. Appropriate error is returned + :expectedresults: + 1. Success + 2. Success + """ + import_ldif = '/nope/perf_import.ldif' + args = FakeArgs() + args.instance = topo.standalone.serverid + args.backend = "userroot" + args.encrypted = False + args.replication = False + args.ldif = import_ldif + + with pytest.raises(ValueError) as e: + dbtasks_ldif2db(topo.standalone, log, args) + assert "The LDIF file does not exist" in str(e.value) + + +@pytest.mark.skipif(get_default_db_lib() != "mdb", reason="lmdb specific test") +def test_crash_on_ldif2db_with_lmdb(topo, _import_clean): + """Make an import fail by specifying a too small db size then check that + there is no crash. + + :id: d42585b6-31d0-11ee-8724-482ae39447e5 + :setup: Standalone Instance + :steps: + 1. Configure a small database size + 2. Import an ldif with 1K users + 3. Check that ns-slapd has not aborted + 4. Import an ldif with 500 users + 5. Check that ns-slapd has not aborted + :expectedresults: + 1. Success + 2. Success + 3. Success (ns-slapd should not have aborted) + 4. Import should fail + 5. Success (ns-slapd should not have aborted) + + """ + TINY_MAP_SIZE = 16 * 1024 * 1024 + inst = topo.standalone + handler = LMDB_LDBMConfig(inst) + mapsize = TINY_MAP_SIZE + log.info(f'Set lmdb map size to {mapsize}.') + handler.replace('nsslapd-mdb-max-size', str(mapsize)) + inst.stop() + for dbfile in ['data.mdb', 'INFO.mdb', 'lock.mdb']: + try: + os.remove(f'{inst.dbdir}/{dbfile}') + except FileNotFoundError: + pass + inst.start() + now = _now() + _import_offline(topo, 1000) + __check_for_core(now) + _import_clean_topo(topo) + with pytest.raises(AssertionError): + _import_offline(topo, 500_000) + __check_for_core(now) + + +if __name__ == '__main__': + # Run isolated + # -s for DEBUG mode + CURRENT_FILE = os.path.realpath(__file__) + pytest.main("-s %s" % CURRENT_FILE) diff --git a/dirsrvtests/tests/suites/import/import_warning_test.py b/dirsrvtests/tests/suites/import/import_warning_test.py new file mode 100644 index 0000000..255a67c --- /dev/null +++ b/dirsrvtests/tests/suites/import/import_warning_test.py @@ -0,0 +1,123 @@ +# --- BEGIN COPYRIGHT BLOCK --- +# Copyright (C) 2021 Red Hat, Inc. +# All rights reserved. +# +# License: GPL (version 3 or any later version). +# See LICENSE for details. +# --- END COPYRIGHT BLOCK --- +# + +import pytest + +from lib389.utils import * +from lib389.topologies import topology_st +from lib389.cli_conf.backend import * +from lib389.cli_base import FakeArgs +from lib389._constants import DEFAULT_SUFFIX + +pytestmark = pytest.mark.tier1 + + +DEBUGGING = os.getenv("DEBUGGING", default=False) +if DEBUGGING: + logging.getLogger(__name__).setLevel(logging.DEBUG) +else: + logging.getLogger(__name__).setLevel(logging.INFO) +log = logging.getLogger(__name__) + + +def create_example_ldif(topology_st): + ldif_dir = topology_st.standalone.get_ldif_dir() + line1 = """version: 1 + +# entry-id: 1 +dn: dc=example,dc=com +nsUniqueId: e5c4172a-97aa11eb-aaa8e47e-b1e12808 +objectClass: top +objectClass: domain +dc: example +description: dc=example,dc=com +creatorsName: cn=Directory Manager +modifiersName: cn=Directory Manager +createTimestamp: 20210407140942Z +modifyTimestamp: 20210407140942Z +aci: (targetattr="dc || description || objectClass")(targetfilter="(objectClas + s=domain)")(version 3.0; acl "Enable anyone domain read"; allow (read, search + , compare)(userdn="ldap:///anyone");) + +# entry-id: 3 +dn: uid=demo,ou=People,dc=example,dc=com +objectClass: person +objectClass: inetOrgPerson +objectClass: organizationalPerson +objectClass: posixAccount +objectClass: top +uidNumber: 1119 +gidNumber: 1000 +nsUniqueId: 9a0e6603-a1cb11eb-aa2daeeb-95660ab0 +creatorsName: +modifiersName: cn=directory manager +createTimestamp: 20210420112927Z +modifyTimestamp: 20210420113016Z +passwordGraceUserTime: 0 +cn: demo +homeDirectory: /home/demo +uid: demo +sn: demo + +""" + with open(f'{ldif_dir}/warning_parent.ldif', 'w') as out: + out.write(f'{line1}') + os.chmod(out.name, 0o777) + out.close() + import_ldif1 = ldif_dir + '/warning_parent.ldif' + return import_ldif1 + + +@pytest.mark.skipif(ds_is_older('1.4.3.26'), reason="Fail because of bug 1951537") +@pytest.mark.bz1951537 +@pytest.mark.ds4734 +def test_import_warning(topology_st): + """Import ldif file with skipped entries to generate a warning message + + :id: 66f9275b-11b4-4718-b401-18fa6011b362 + :setup: Standalone Instance + :steps: + 1. Create LDIF file with skipped entries + 2. Import the LDIF file with backend import + 3. Check the topology logs + 4. Check errors log + :expectedresults: + 1. Success + 2. Success + 3. Result message should contain warning code + 4. Errors log should contain skipped entry message + """ + + standalone = topology_st.standalone + message = 'The import task has finished successfully, with warning code 8, check the logs for more detail' + + args = FakeArgs() + args.be_name = 'userRoot' + args.ldifs = [create_example_ldif(topology_st)] + args.chunks_size = None + args.encrypted = False + args.gen_uniq_id = None + args.only_core = False + args.include_suffixes = 'dc=example,dc=com' + args.exclude_suffixes = None + args.timeout = 0 + + log.info('Import the LDIF file') + backend_import(standalone, DEFAULT_SUFFIX, topology_st.logcap.log, args) + + log.info('Check logs for a warning message') + assert topology_st.logcap.contains(message) + assert standalone.ds_error_log.match('.*Skipping entry "uid=demo,ou=People,dc=example,dc=com" which has no parent.*') + + +if __name__ == '__main__': + # Run isolated + # -s for DEBUG mode + CURRENT_FILE = os.path.realpath(__file__) + pytest.main("-s %s" % CURRENT_FILE) diff --git a/dirsrvtests/tests/suites/import/regression_test.py b/dirsrvtests/tests/suites/import/regression_test.py new file mode 100644 index 0000000..d4b7c99 --- /dev/null +++ b/dirsrvtests/tests/suites/import/regression_test.py @@ -0,0 +1,446 @@ +# Copyright (C) 2020 Red Hat, Inc. +# All rights reserved. +# +# License: GPL (version 3 or any later version). +# See LICENSE for details. +# --- END COPYRIGHT BLOCK --- +# +from decimal import * +import ldap +import logging +import os +import pytest +import threading +import time +from lib389.backend import Backends +from lib389.properties import TASK_WAIT +from lib389.topologies import topology_st as topo +from lib389.dbgen import dbgen_users +from lib389._constants import DEFAULT_SUFFIX +from lib389.tasks import * +from lib389.idm.user import UserAccounts +from lib389.idm.directorymanager import DirectoryManager +from lib389.dbgen import * +from lib389.utils import * +from lib389.config import LMDB_LDBMConfig + +pytestmark = pytest.mark.tier1 + +DEBUGGING = os.getenv("DEBUGGING", default=False) +if DEBUGGING: + logging.getLogger(__name__).setLevel(logging.DEBUG) +else: + logging.getLogger(__name__).setLevel(logging.INFO) +log = logging.getLogger(__name__) + +TEST_SUFFIX1 = "dc=importest1,dc=com" +TEST_BACKEND1 = "importest1" +TEST_SUFFIX2 = "dc=importest2,dc=com" +TEST_BACKEND2 = "importest2" +TEST_DEFAULT_SUFFIX = "dc=default,dc=com" +TEST_DEFAULT_NAME = "default" + +BIG_MAP_SIZE = 35 * 1024 * 1024 * 1024 + +def _check_disk_space(): + if get_default_db_lib() == "mdb": + statvfs = os.statvfs(os.environ.get('PREFIX', "/")) + available = statvfs.f_frsize * statvfs.f_bavail + return available >= BIG_MAP_SIZE + return True + + +@pytest.fixture(scope="function") +def _set_mdb_map_size(request, topo): + if get_default_db_lib() == "mdb": + handler = LMDB_LDBMConfig(topo.standalone) + mapsize = BIG_MAP_SIZE + log.info(f'Set lmdb map size to {mapsize}.') + handler.replace('nsslapd-mdb-max-size', str(mapsize)) + topo.standalone.restart() + +class AddDelUsers(threading.Thread): + def __init__(self, inst): + threading.Thread.__init__(self) + self.daemon = True + self.inst = inst + self._should_stop = False + self._ran = False + + def run(self): + # Add 1000 entries + log.info('Run.') + conn = DirectoryManager(self.inst.standalone).bind() + + time.sleep(30) + log.info('Adding users.') + for i in range(1000): + user = UserAccounts(conn, DEFAULT_SUFFIX) + users = user.create_test_user(uid=i) + users.delete() + self._ran = True + if self._should_stop: + break + if not self._should_stop: + raise RuntimeError('We finished too soon.') + conn.close() + + def stop(self): + self._should_stop = True + + def has_started(self): + return self._ran + + +def test_replay_import_operation(topo): + """ Check after certain failed import operation, is it + possible to replay an import operation + + :id: 5f5ca532-8e18-4f7b-86bc-ac585215a473 + :feature: Import + :setup: Standalone instance + :steps: + 1. Export the backend into an ldif file + 2. Perform high load of operation on the server (Add/Del users) + 3. Perform an import operation + 4. Again perform an import operation (same as 3) + :expectedresults: + 1. It should be successful + 2. It should be successful + 3. It should be unsuccessful, should give OPERATIONS_ERROR + 4. It should be successful now + """ + log.info("Exporting LDIF online...") + ldif_dir = topo.standalone.get_ldif_dir() + export_ldif = ldif_dir + '/export.ldif' + + r = ExportTask(topo.standalone) + r.export_suffix_to_ldif(ldiffile=export_ldif, suffix=DEFAULT_SUFFIX) + r.wait() + add_del_users1 = AddDelUsers(topo) + add_del_users1.start() + + log.info("Importing LDIF online, should raise operation error.") + + trials = 0 + while not add_del_users1.has_started() and trials < 10: + trials += 1 + time.sleep(1) + r = ImportTask(topo.standalone) + try: + r.import_suffix_from_ldif(ldiffile=export_ldif, suffix=DEFAULT_SUFFIX) + except ldap.OPERATIONS_ERROR: + break + log.info(f'Looping. Tried {trials} times so far.') + add_del_users1.stop() + add_del_users1.join() + + log.info("Importing LDIF online") + + r = ImportTask(topo.standalone) + r.import_suffix_from_ldif(ldiffile=export_ldif, suffix=DEFAULT_SUFFIX) + + +def test_import_be_default(topo): + """ Create a backend using the name "default". previously this name was + used int + + :id: 8e507beb-e917-4330-8cac-1ff0eee10508 + :feature: Import + :setup: Standalone instance + :steps: + 1. Create a test suffix using the be name of "default" + 2. Create an ldif for the "default" backend + 3. Import ldif + 4. Verify all entries were imported + :expectedresults: + 1. Success + 2. Success + 3. Success + 4. Success + """ + log.info('Adding suffix:{} and backend: {}...'.format(TEST_DEFAULT_SUFFIX, + TEST_DEFAULT_NAME)) + backends = Backends(topo.standalone) + backends.create(properties={'nsslapd-suffix': TEST_DEFAULT_SUFFIX, + 'name': TEST_DEFAULT_NAME}) + + log.info('Create LDIF file and import it...') + ldif_dir = topo.standalone.get_ldif_dir() + ldif_file = os.path.join(ldif_dir, 'default.ldif') + dbgen_users(topo.standalone, 5, ldif_file, TEST_DEFAULT_SUFFIX) + + log.info('Stopping the server and running offline import...') + topo.standalone.stop() + assert topo.standalone.ldif2db(TEST_DEFAULT_NAME, None, None, + None, ldif_file) + topo.standalone.start() + + log.info('Verifying entry count after import...') + entries = topo.standalone.search_s(TEST_DEFAULT_SUFFIX, + ldap.SCOPE_SUBTREE, + "(objectclass=*)") + assert len(entries) > 1 + + log.info('Test PASSED') + + +def test_del_suffix_import(topo): + """Adding a database entry fails if the same database was deleted after an import + + :id: 652421ef-738b-47ed-80ec-2ceece6b5d77 + :feature: Import + :setup: Standalone instance + :steps: 1. Create a test suffix and add few entries + 2. Stop the server and do offline import using ldif2db + 3. Delete the suffix backend + 4. Add a new suffix with the same database name + 5. Check if adding the same database name is a success + :expectedresults: Adding database with the same name should be successful + """ + + log.info('Adding suffix:{} and backend: {}'.format(TEST_SUFFIX1, TEST_BACKEND1)) + backends = Backends(topo.standalone) + backend = backends.create(properties={'nsslapd-suffix': TEST_SUFFIX1, + 'name': TEST_BACKEND1}) + + log.info('Create LDIF file and import it') + ldif_dir = topo.standalone.get_ldif_dir() + ldif_file = os.path.join(ldif_dir, 'suffix_del1.ldif') + + dbgen_users(topo.standalone, 10, ldif_file, TEST_SUFFIX1) + + log.info('Stopping the server and running offline import') + topo.standalone.stop() + assert topo.standalone.ldif2db(TEST_BACKEND1, TEST_SUFFIX1, None, None, ldif_file) + topo.standalone.start() + + log.info('Deleting suffix-{}'.format(TEST_SUFFIX2)) + backend.delete() + + log.info('Adding the same database-{} after deleting it'.format(TEST_BACKEND1)) + backends.create(properties={'nsslapd-suffix': TEST_SUFFIX1, + 'name': TEST_BACKEND1}) + + +def test_del_suffix_backend(topo): + """Adding a database entry fails if the same database was deleted after an import + + :id: ac702c35-74b6-434e-8e30-316433f3e91a + :feature: Import + :setup: Standalone instance + :steps: 1. Create a test suffix and add entries + 2. Stop the server and do online import using ldif2db + 3. Delete the suffix backend + 4. Add a new suffix with the same database name + 5. Restart the server and check the status + :expectedresults: Adding database with the same name should be successful and the server should not hang + """ + + log.info('Adding suffix:{} and backend: {}'.format(TEST_SUFFIX2, TEST_BACKEND2)) + backends = Backends(topo.standalone) + backend = backends.create(properties={'nsslapd-suffix': TEST_SUFFIX2, + 'name': TEST_BACKEND2}) + + log.info('Create LDIF file and import it') + ldif_dir = topo.standalone.get_ldif_dir() + ldif_file = os.path.join(ldif_dir, 'suffix_del2.ldif') + + dbgen_users(topo.standalone, 10, ldif_file, TEST_SUFFIX2) + + topo.standalone.tasks.importLDIF(suffix=TEST_SUFFIX2, input_file=ldif_file, args={TASK_WAIT: True}) + + log.info('Deleting suffix-{}'.format(TEST_SUFFIX2)) + backend.delete() + + log.info('Adding the same database-{} after deleting it'.format(TEST_BACKEND2)) + backends.create(properties={'nsslapd-suffix': TEST_SUFFIX2, + 'name': TEST_BACKEND2}) + log.info('Checking if server can be restarted after re-adding the same database') + topo.standalone.restart() + assert not topo.standalone.detectDisorderlyShutdown() + + +@pytest.mark.bz1406101 +@pytest.mark.ds49071 +def test_import_duplicate_dn(topo): + """Import ldif with duplicate DNs, should not log error "unable to flush" + + :id: dce2b898-119d-42b8-a236-1130f58bff17 + :setup: Standalone instance, ldif file with duplicate entries + :steps: + 1. Create a ldif file with duplicate entries + 2. Import ldif file to DS + 3. Check error log file, it should not log "unable to flush" + 4. Check error log file, it should log "Duplicated DN detected" + :expectedresults: + 1. Success + 2. Success + 3. Success + 4. Success + """ + + standalone = topo.standalone + + log.info('Delete the previous error logs') + standalone.deleteErrorLogs() + + log.info('Create import file') + l = """dn: dc=example,dc=com +objectclass: top +objectclass: domain +dc: example + +dn: ou=myDups00001,dc=example,dc=com +objectclass: top +objectclass: organizationalUnit +ou: myDups00001 + +dn: ou=myDups00001,dc=example,dc=com +objectclass: top +objectclass: organizationalUnit +ou: myDups00001 +""" + + ldif_dir = standalone.get_ldif_dir() + ldif_file = os.path.join(ldif_dir, 'data.ldif') + with open(ldif_file, "w") as fd: + fd.write(l) + fd.close() + os.chmod(ldif_file, 0o777) + + log.info('Import ldif with duplicate entry') + assert standalone.tasks.importLDIF(suffix=DEFAULT_SUFFIX, input_file=ldif_file, args={TASK_WAIT: True}) + + log.info('Restart the server to flush the logs') + standalone.restart() + + log.info('Error log should not have "unable to flush" message') + assert not standalone.ds_error_log.match('.*unable to flush.*') + + log.info('Error log should have "Duplicated DN detected" message') + assert standalone.ds_error_log.match('.*Duplicated DN detected.*') + +@pytest.mark.bz1749595 +@pytest.mark.tier2 +@pytest.mark.xfail(not _check_disk_space(), reason="not enough disk space for lmdb map") +@pytest.mark.xfail(ds_is_older("1.3.10.1"), reason="bz1749595 not fixed on versions older than 1.3.10.1") +def test_large_ldif2db_ancestorid_index_creation(topo, _set_mdb_map_size): + """Import with ldif2db a large file - check that the ancestorid index creation phase has a correct performance + + :id: fe7f78f6-6e60-425d-ad47-b39b67e29113 + :setup: Standalone instance + :steps: + 1. Delete the previous errors log to start from a fresh one + 2. Create test suffix and backend + 3. Create a large nested ldif file + 4. Stop the server + 5. Run an offline import + 6. Restart the server + 7. Check in the errors log that an independant ancestorid IDs sorting is done + 8. Get the log of the starting of the ancestorid indexing process + 9. Get the log of the end of the ancestorid indexing process + 10. Get the start and end time for ancestorid index creation from these logs + 11. Calculate the duration of the ancestorid indexing process + :expectedresults: + 1. Success + 2. Test suffix and backend successfully created + 3. ldif file successfully created + 4. Success + 5. Import is successfully performed + 6. Success + 7. Log of ancestorid sorting start and end are present + 8. Log of the beginning of gathering ancestorid is found + 9. Log of the final ancestorid index creation is found + 10. Start and end times are successfully extracted + 11. The duration of the ancestorid index creation process should be less than 10s + """ + + ldif_dir = topo.standalone.get_ldif_dir() + ldif_file = os.path.join(topo.standalone.ds_paths.ldif_dir, 'large_nested.ldif') + + # Have a reasonable balance between the need for a large ldif file to import and the time of test execution + # total number of users + num_users = 100000 + + # Choose a limited number of users per node to get as much as possible non-leaf entries + node_limit = 5 + + # top suffix + suffix = 'o=test' + + # backend + backend = 'test' + + log.info('Delete the previous errors logs') + topo.standalone.deleteErrorLogs() + + log.info('Add suffix:{} and backend: {}...'.format(suffix, backend)) + + backends = Backends(topo.standalone) + backends.create(properties={'nsslapd-suffix': suffix, + 'name': backend}) + + props = { + 'numUsers' : num_users, + 'nodeLimit' : node_limit, + 'suffix' : suffix + } + instance = topo.standalone + + log.info('Create a large nested ldif file using dbgen : %s' % ldif_file) + dbgen_nested_ldif(instance, ldif_file, props) + + log.info('Stop the server and run offline import...') + topo.standalone.stop() + assert topo.standalone.ldif2db(backend, None, None, + None, ldif_file) + + log.info('Starting the server') + topo.standalone.start() + + # With lmdb there is no more any special phase for ancestorid + # because ancestorsid get updated on the fly while processing the + # entryrdn (by up the parents chain to compute the parentid + # + # But there is still a numSubordinates generation phase + if get_default_db_lib() == "mdb": + log.info('parse the errors logs to check lines with "Generating numSubordinates complete." are present') + end_numsubordinates = str(topo.standalone.ds_error_log.match(r'.*Generating numSubordinates complete.*'))[1:-1] + assert len(end_numsubordinates) > 0 + + else: + log.info('parse the errors logs to check lines with "Starting sort of ancestorid" are present') + start_sort_str = str(topo.standalone.ds_error_log.match(r'.*Starting sort of ancestorid non-leaf IDs*'))[1:-1] + assert len(start_sort_str) > 0 + + log.info('parse the errors logs to check lines with "Finished sort of ancestorid" are present') + end_sort_str = str(topo.standalone.ds_error_log.match(r'.*Finished sort of ancestorid non-leaf IDs*'))[1:-1] + assert len(end_sort_str) > 0 + + log.info('parse the error logs for the line with "Gathering ancestorid non-leaf IDs"') + start_ancestorid_indexing_op_str = str(topo.standalone.ds_error_log.match(r'.*Gathering ancestorid non-leaf IDs*'))[1:-1] + assert len(start_ancestorid_indexing_op_str) > 0 + + log.info('parse the error logs for the line with "Created ancestorid index"') + end_ancestorid_indexing_op_str = str(topo.standalone.ds_error_log.match(r'.*Created ancestorid index*'))[1:-1] + assert len(end_ancestorid_indexing_op_str) > 0 + + log.info('get the ancestorid non-leaf IDs indexing start and end time from the collected strings') + # Collected lines look like : '[15/May/2020:05:30:27.245967313 -0400] - INFO - bdb_get_nonleaf_ids - import userRoot: Gathering ancestorid non-leaf IDs...' + # We are getting the sec.nanosec part of the date, '27.245967313' in the above example + start_time = (start_ancestorid_indexing_op_str.split()[0]).split(':')[3] + end_time = (end_ancestorid_indexing_op_str.split()[0]).split(':')[3] + + log.info('Calculate the elapsed time for the ancestorid non-leaf IDs index creation') + etime = (Decimal(end_time) - Decimal(start_time)) + # The time for the ancestorid index creation should be less than 10s for an offline import of an ldif file with 100000 entries / 5 entries per node + # Should be adjusted if these numbers are modified in the test + assert etime <= 10 + + +if __name__ == '__main__': + # Run isolated + # -s for DEBUG mode + CURRENT_FILE = os.path.realpath(__file__) + pytest.main("-s {}".format(CURRENT_FILE)) diff --git a/dirsrvtests/tests/suites/indexes/__init__.py b/dirsrvtests/tests/suites/indexes/__init__.py new file mode 100644 index 0000000..0444166 --- /dev/null +++ b/dirsrvtests/tests/suites/indexes/__init__.py @@ -0,0 +1,3 @@ +""" + :Requirement: 389-ds-base: Indexes +""" diff --git a/dirsrvtests/tests/suites/indexes/entryrdn_test.py b/dirsrvtests/tests/suites/indexes/entryrdn_test.py new file mode 100644 index 0000000..49535f8 --- /dev/null +++ b/dirsrvtests/tests/suites/indexes/entryrdn_test.py @@ -0,0 +1,149 @@ +# --- BEGIN COPYRIGHT BLOCK --- +# Copyright (C) 2022 Red Hat, Inc. +# All rights reserved. +# +# License: GPL (version 3 or any later version). +# See LICENSE for details. +# --- END COPYRIGHT BLOCK --- +# +import time +import os +import pytest +import ldap +import logging +from lib389._constants import DEFAULT_BENAME, DEFAULT_SUFFIX +from lib389.backend import Backends +from lib389.idm.user import UserAccounts +from lib389.idm.organizationalunit import OrganizationalUnits +from lib389.topologies import topology_m2 as topo_m2 +from lib389.agreement import Agreements +from lib389.utils import ds_is_older, ensure_bytes +from lib389.tasks import Tasks,ExportTask, ImportTask +from lib389.topologies import topology_st as topo + +pytestmark = pytest.mark.tier1 + + +OUNAME = 'NewOU' +OUDN = f'ou={OUNAME},ou=people,{DEFAULT_SUFFIX}' +OUPROPERTIES = { 'ou' : OUNAME } +USERNAME = 'NewUser' +USERID = '100' +USERSDN = f'uid={USERNAME},{OUDN}' +USERPROPERTIES = { + 'uid': USERNAME, + 'sn': USERNAME, + 'cn': USERNAME, + 'uidNumber': USERID, + 'gidNumber': USERID, + 'homeDirectory': f'/home/{USERNAME}' + } + +# 2 tombstone entry + 1 RUV have 2 records in entryrdn index +# Each record have 1 rdn + 1 normalized rdn both containing nsuniqueid +# So 3 * 2 * 2 nsuniqueid substrings are expected +EXPECTED_NB_NSNIQUEID = 12 + +logging.getLogger(__name__).setLevel(logging.INFO) +log = logging.getLogger(__name__) + +def checkdbscancount(inst, pattern, expected_count): + inst.restart() + dbscanOut = inst.dbscan(args=['-f', f'{inst.dbdir}/{DEFAULT_BENAME}/entryrdn.db', '-A'], stopping=False) + count = dbscanOut.count(ensure_bytes(pattern)) + if count != expected_count: + log.info(f"dbscan output is: {dbscanOut}") + assert count == expected_count + + +def test_tombstone(topo_m2): + """ + An internal unindexed search was able to crash the server due to missing logging function. + + :id: a12eacac-4e35-11ed-8625-482ae39447e5 + :setup: 2 Suplier instances + :steps: + 1. Add an OrganizationalUnit + 2. Add an User as child of the new OrganizationalUnit + 3. Modify User's description + 4. Delele User + 5. Delete OrganizationalUnit + 6. Dump supplier1 entryrdn index + 7. Check that nsuniqueid appears three times in the dump result + 8. Export supplier1 with replication data + 9. Import supplier2 with previously exported ldif file + 10. Dump entryrdn index + 11. Check that nsuniqueid appears three times in the dump result + 12. Reindex entryrdn on supplier1 + 13. Dump entryrdn index on supplier + 14. Check that nsuniqueid appears three times in the dump result + 15. Perform bulk import from supplier1 to supplier2 + 16. Wait until bulk import is completed + 17. Dump entryrdn index on supplier + 18. Check that nsuniqueid appears three times in the dump result + :expectedresults: + 1. Should succeed + 2. Should succeed + 3. Should succeed + 4. Should succeed + 5. Should succeed + 6. Should succeed + 7. Should succeed + 8. Should succeed + 9. Should succeed + 10. Should succeed + 11. Should succeed + 12. Should succeed + 13. Should succeed + 14. Should succeed + 15. Should succeed + 16. Should succeed + 17. Should succeed + 18. Should succeed + """ + s1 = topo_m2.ms["supplier1"] + s2 = topo_m2.ms["supplier2"] + ldif_dir = s1.get_ldif_dir() + + log.info("Create tombstones...") + ous = OrganizationalUnits(s1, DEFAULT_SUFFIX) + ou = ous.create(properties=OUPROPERTIES) + users = UserAccounts(s1, DEFAULT_SUFFIX, rdn=None) + user = users.create(properties=USERPROPERTIES) + user.replace('description', 'New Description') + user.delete() + ou.delete() + # Need to restart the server otherwise bdb changes may not be up to date. + checkdbscancount(s1, 'nsuniqueid', EXPECTED_NB_NSNIQUEID) + + log.info("Exporting LDIF online...") + export_ldif = ldif_dir + '/export.ldif' + export_task = Backends(s1).export_ldif(be_names=DEFAULT_BENAME, ldif=export_ldif, replication=True) + export_task.wait() + + log.info("Importing LDIF online...") + import_task = ImportTask(s2) + import_task.import_suffix_from_ldif(ldiffile=export_ldif, suffix=DEFAULT_SUFFIX) + import_task.wait() + checkdbscancount(s1, 'nsuniqueid', EXPECTED_NB_NSNIQUEID) + + log.info("Reindex online...") + task = Tasks(s2) + task.reindex(suffix=DEFAULT_SUFFIX, args={'wait': True}) + checkdbscancount(s1, 'nsuniqueid', EXPECTED_NB_NSNIQUEID) + + log.info("Bulk import...") + agmt = Agreements(s1).list()[0] + agmt.begin_reinit() + (done, error) = agmt.wait_reinit() + assert done is True + assert error is False + checkdbscancount(s1, 'nsuniqueid', EXPECTED_NB_NSNIQUEID) + + + +if __name__ == "__main__": + # Run isolated + # -s for DEBUG mode + CURRENT_FILE = os.path.realpath(__file__) + pytest.main("-s %s" % CURRENT_FILE) diff --git a/dirsrvtests/tests/suites/indexes/huge_index_key.py b/dirsrvtests/tests/suites/indexes/huge_index_key.py new file mode 100644 index 0000000..de6f872 --- /dev/null +++ b/dirsrvtests/tests/suites/indexes/huge_index_key.py @@ -0,0 +1,144 @@ +# --- BEGIN COPYRIGHT BLOCK --- +# Copyright (C) 2020 Red Hat, Inc. +# All rights reserved. +# +# License: GPL (version 3 or any later version). +# See LICENSE for details. +# --- END COPYRIGHT BLOCK --- +# +import time +import os +import pytest +import ldap +import logging +from lib389._constants import DEFAULT_BENAME, DEFAULT_SUFFIX +from lib389.index import Indexes +from lib389.backend import Backends, DatabaseConfig +from lib389.idm.user import UserAccounts +from lib389.idm.group import Groups, Group +from lib389.topologies import topology_st as topo +from lib389.utils import ds_is_older, get_default_db_lib +from lib389.plugins import MemberOfPlugin +from lib389 import DirSrv +pytestmark = pytest.mark.tier1 + +DEBUGGING = os.getenv("DEBUGGING", default=False) +if DEBUGGING: + logging.getLogger(__name__).setLevel(logging.DEBUG) +else: + logging.getLogger(__name__).setLevel(logging.INFO) +log = logging.getLogger(__name__) + +users_list = [] + +@pytest.fixture(scope="function") +def add_users(request, topo): + """ + Add users + """ + users_num = 200 + users = UserAccounts(topo.standalone, DEFAULT_SUFFIX, rdn=None) + for num in range(users_num): + USER_NAME = "test_{:0>3d}".format( num ) + user = users.create(properties={ + 'uid': USER_NAME, + 'sn': USER_NAME, + 'cn': USER_NAME, + 'uidNumber': f'{num}', + 'gidNumber': f'{num}', + 'homeDirectory': f'/home/{USER_NAME}' + }) + users_list.append(user) + + def fin(): + """ + Removes users. + """ + if not topo.standalone.status(): + topo.standalone.start() + for user in users_list: + user.delete() + + request.addfinalizer(fin) + + +def test_huge_index_key(topo, add_users): + """ + Test very long indexed attribute values (that should be hashed on mdb) + + :id: 4bbd0ee2-0108-11ec-a5ce-482ae39447e5 + :customerscenario: False + :setup: Standalone instance + :steps: + 1. Add users + 2. Change nsslapd-idlistscanlimit to a smaller value to accelerate the reproducer + 3. Replace sn with a 600 bytes value + 4. equality search for the sn + 5. Range search including the sn + 6. Replace sn back with small value + 7. equality search for the sn + 8. Range search including the sn + :expectedresults: + 1. Should succeed + 2. Should succeed + 3. Should succeed + 4. Should succeed and have exactly 1 result + search should be indexed. + 5. Should succeed and have exactly 3 results + on bdb: search should be indexed. + on mdb: search should be unindexed. + 6. Should succeed + 7. Should succeed and have exactly 1 result + search should be indexed. + 8. Should succeed and have exactly 3 results + search should be indexed. + """ + inst = topo.standalone + ldc = super(DirSrv, inst); # ldap connection to be able to use + # the SimpleLDAPObject methods + shortsn='test_020' + test_user=users_list[20] + log.debug(f'Check user {test_user} sn: {test_user.get_attr_val_utf8("sn")}') + assert(test_user) + + users = UserAccounts(topo.standalone, DEFAULT_SUFFIX, rdn=None) + log.debug(f'Check users {users.list()}') + + sn600b = shortsn + \ + "0001abcdefghijklmnopqrstuvwxyz0001abcdefghijklmnopqrstuvwxyz" + \ + "0002abcdefghijklmnopqrstuvwxyz0001abcdefghijklmnopqrstuvwxyz" + \ + "0003abcdefghijklmnopqrstuvwxyz0001abcdefghijklmnopqrstuvwxyz" + \ + "0004abcdefghijklmnopqrstuvwxyz0001abcdefghijklmnopqrstuvwxyz" + \ + "0005abcdefghijklmnopqrstuvwxyz0001abcdefghijklmnopqrstuvwxyz" + \ + "0006abcdefghijklmnopqrstuvwxyz0001abcdefghijklmnopqrstuvwxyz" + \ + "0007abcdefghijklmnopqrstuvwxyz0001abcdefghijklmnopqrstuvwxyz" + \ + "0008abcdefghijklmnopqrstuvwxyz0001abcdefghijklmnopqrstuvwxyz" + \ + "0009abcdefghijklmnopqrstuvwxyz0001abcdefghijklmnopqrstuvwxyz" + \ + "0010abcdefghijklmnopqrstuvwxyz0001abcdefghijklmnopqrstuvwxyz" + + test_user.replace('sn', sn600b); + result = ldc.search_s(base=DEFAULT_SUFFIX, scope=ldap.SCOPE_SUBTREE, filterstr=f'(sn={sn600b})') + assert (len(result) == 1) + assert (not inst.searchAccessLog("notes=U")) + result = ldc.search_s(base=DEFAULT_SUFFIX, scope=ldap.SCOPE_SUBTREE, filterstr=f'(&(sn>=test_019)(sn<=test_021))') + #with pytest.raises(ldap.INVALID_SYNTAX): + assert (len(result) == 3) + if (get_default_db_lib() == "bdb"): + assert (not inst.searchAccessLog("notes=U")) + else: + assert (inst.searchAccessLog("notes=U")) + inst.deleteLog(inst.accesslog); + test_user.replace('sn', shortsn); + result = ldc.search_s(base=DEFAULT_SUFFIX, scope=ldap.SCOPE_SUBTREE, filterstr=f'(sn={shortsn})') + assert (len(result) == 1) + assert (not inst.searchAccessLog("notes=U")) + result = ldc.search_s(base=DEFAULT_SUFFIX, scope=ldap.SCOPE_SUBTREE, filterstr=f'(&(sn>=test_019)(sn<=test_021))') + assert (len(result) == 3) + assert (not inst.searchAccessLog("notes=U")) + + +if __name__ == "__main__": + # Run isolated + # -s for DEBUG mode + CURRENT_FILE = os.path.realpath(__file__) + pytest.main("-s %s" % CURRENT_FILE) diff --git a/dirsrvtests/tests/suites/indexes/regression_test.py b/dirsrvtests/tests/suites/indexes/regression_test.py new file mode 100644 index 0000000..3031623 --- /dev/null +++ b/dirsrvtests/tests/suites/indexes/regression_test.py @@ -0,0 +1,268 @@ +# --- BEGIN COPYRIGHT BLOCK --- +# Copyright (C) 2022 Red Hat, Inc. +# All rights reserved. +# +# License: GPL (version 3 or any later version). +# See LICENSE for details. +# --- END COPYRIGHT BLOCK --- +# +import time +import os +import pytest +import ldap +from lib389._constants import DEFAULT_BENAME, DEFAULT_SUFFIX +from lib389.cos import CosClassicDefinition, CosClassicDefinitions, CosTemplate +from lib389.index import Indexes +from lib389.backend import Backends, DatabaseConfig +from lib389.idm.user import UserAccounts +from lib389.idm.group import Groups, Group +from lib389.topologies import topology_st as topo +from lib389.utils import ds_is_older +from lib389.plugins import MemberOfPlugin +from lib389.idm.nscontainer import nsContainer + +pytestmark = pytest.mark.tier1 + + +@pytest.fixture(scope="function") +def add_a_group_with_users(request, topo): + """ + Add a group and users, which are members of this group. + """ + groups = Groups(topo.standalone, DEFAULT_SUFFIX, rdn=None) + group = groups.create(properties={'cn': 'test_group'}) + users_list = [] + users_num = 100 + users = UserAccounts(topo.standalone, DEFAULT_SUFFIX, rdn=None) + for num in range(users_num): + USER_NAME = f'test_{num}' + user = users.create(properties={ + 'uid': USER_NAME, + 'sn': USER_NAME, + 'cn': USER_NAME, + 'uidNumber': f'{num}', + 'gidNumber': f'{num}', + 'homeDirectory': f'/home/{USER_NAME}' + }) + users_list.append(user) + group.add_member(user.dn) + + def fin(): + """ + Removes group and users. + """ + # If the server crashed, start it again to do the cleanup + if not topo.standalone.status(): + topo.standalone.start() + for user in users_list: + user.delete() + group.delete() + + request.addfinalizer(fin) + + +@pytest.fixture(scope="function") +def set_small_idlistscanlimit(request, topo): + """ + Set nsslapd-idlistscanlimit to a smaller value to accelerate the reproducer + """ + db_cfg = DatabaseConfig(topo.standalone) + old_idlistscanlimit = db_cfg.get_attr_vals_utf8('nsslapd-idlistscanlimit') + db_cfg.set([('nsslapd-idlistscanlimit', '100')]) + topo.standalone.restart() + + def fin(): + """ + Set nsslapd-idlistscanlimit back to the default value + """ + # If the server crashed, start it again to do the cleanup + if not topo.standalone.status(): + topo.standalone.start() + db_cfg.set([('nsslapd-idlistscanlimit', old_idlistscanlimit)]) + topo.standalone.restart() + + request.addfinalizer(fin) + +#unstable or unstatus tests, skipped for now +@pytest.mark.flaky(max_runs=2, min_passes=1) +@pytest.mark.skipif(ds_is_older("1.4.4.4"), reason="Not implemented") +def test_reindex_task_creates_abandoned_index_file(topo): + """ + Recreating an index for the same attribute but changing + the case of for example 1 letter, results in abandoned indexfile + + :id: 07ae5274-481a-4fa8-8074-e0de50d89ac6 + :customerscenario: True + :setup: Standalone instance + :steps: + 1. Create a user object with additional attributes: + objectClass: mozillaabpersonalpha + mozillaCustom1: xyz + 2. Add an index entry mozillacustom1 + 3. Reindex the backend + 4. Check the content of the index (after it has been flushed to disk) mozillacustom1.db + 5. Remove the index + 6. Notice the mozillacustom1.db is removed + 7. Recreate the index but now use the exact case as mentioned in the schema + 8. Reindex the backend + 9. Check the content of the index (after it has been flushed to disk) mozillaCustom1.db + 10. Check that an ldapsearch does not return a result (mozillacustom1=xyz) + 11. Check that an ldapsearch returns the results (mozillaCustom1=xyz) + 12. Restart the instance + 13. Notice that an ldapsearch does not return a result(mozillacustom1=xyz) + 14. Check that an ldapsearch does not return a result (mozillacustom1=xyz) + 15. Check that an ldapsearch returns the results (mozillaCustom1=xyz) + 16. Reindex the backend + 17. Notice the second indexfile for this attribute + 18. Check the content of the index (after it has been flushed to disk) no mozillacustom1.db + 19. Check the content of the index (after it has been flushed to disk) mozillaCustom1.db + :expectedresults: + 1. Should Success. + 2. Should Success. + 3. Should Success. + 4. Should Success. + 5. Should Success. + 6. Should Success. + 7. Should Success. + 8. Should Success. + 9. Should Success. + 10. Should Success. + 11. Should Success. + 12. Should Success. + 13. Should Success. + 14. Should Success. + 15. Should Success. + 16. Should Success. + 17. Should Success. + 18. Should Success. + 19. Should Success. + """ + + inst = topo.standalone + attr_name = "mozillaCustom1" + attr_value = "xyz" + + users = UserAccounts(inst, DEFAULT_SUFFIX) + user = users.create_test_user() + user.add("objectClass", "mozillaabpersonalpha") + user.add(attr_name, attr_value) + + backends = Backends(inst) + backend = backends.get(DEFAULT_BENAME) + indexes = backend.get_indexes() + index = indexes.create(properties={ + 'cn': attr_name.lower(), + 'nsSystemIndex': 'false', + 'nsIndexType': ['eq', 'pres'] + }) + + backend.reindex() + time.sleep(3) + assert os.path.exists(f"{inst.ds_paths.db_dir}/{DEFAULT_BENAME}/{attr_name.lower()}.db") + index.delete() + assert not os.path.exists(f"{inst.ds_paths.db_dir}/{DEFAULT_BENAME}/{attr_name.lower()}.db") + + index = indexes.create(properties={ + 'cn': attr_name, + 'nsSystemIndex': 'false', + 'nsIndexType': ['eq', 'pres'] + }) + + backend.reindex() + time.sleep(3) + assert not os.path.exists(f"{inst.ds_paths.db_dir}/{DEFAULT_BENAME}/{attr_name.lower()}.db") + assert os.path.exists(f"{inst.ds_paths.db_dir}/{DEFAULT_BENAME}/{attr_name}.db") + + entries = inst.search_s(DEFAULT_SUFFIX, ldap.SCOPE_SUBTREE, f"{attr_name}={attr_value}") + assert len(entries) > 0 + inst.restart() + entries = inst.search_s(DEFAULT_SUFFIX, ldap.SCOPE_SUBTREE, f"{attr_name}={attr_value}") + assert len(entries) > 0 + + backend.reindex() + time.sleep(3) + assert not os.path.exists(f"{inst.ds_paths.db_dir}/{DEFAULT_BENAME}/{attr_name.lower()}.db") + assert os.path.exists(f"{inst.ds_paths.db_dir}/{DEFAULT_BENAME}/{attr_name}.db") + + +@pytest.mark.bz1905450 +def test_unindexed_internal_search_crashes_server(topo, add_a_group_with_users, set_small_idlistscanlimit): + """ + An internal unindexed search was able to crash the server due to missing logging function. + + :id: 2d0e4070-96d6-46e5-b2c8-9495925e3e87 + :customerscenario: True + :setup: Standalone instance + :steps: + 1. Add a group with users + 2. Change nsslapd-idlistscanlimit to a smaller value to accelerate the reproducer + 3. Enable memberOf plugin + 4. Restart the instance + 5. Run memberOf fixup task + 6. Wait for the task to complete + :expectedresults: + 1. Should succeed + 2. Should succeed + 3. Should succeed + 4. Should succeed + 5. Should succeed + 6. Server should not crash + """ + inst = topo.standalone + memberof = MemberOfPlugin(inst) + memberof.enable() + inst.restart() + task = memberof.fixup(DEFAULT_SUFFIX) + task.wait() + assert inst.status() + + +def test_reject_virtual_attr_for_indexing(topo): + """Reject trying to add an index for a virtual attribute (nsrole and COS) + + :id: 0fffa7a8-aaec-44d6-bdbc-93cf4b197b56 + :customerscenario: True + :setup: Standalone instance + :steps: + 1. Create COS + 2. Adding index for nsRole is rejected + 3. Adding index for COS attribute is rejected + :expectedresults: + 1. Success + 2. Success + 3. Success + """ + # Create COS: add container, create template, and definition + nsContainer(topo.standalone, f'cn=cosClassicTemplates,{DEFAULT_SUFFIX}').create(properties={'cn': 'cosClassicTemplates'}) + properties = {'employeeType': 'EngType', + 'cn': '"cn=filterRoleEngRole,dc=example,dc=com",cn=cosClassicTemplates,dc=example,dc=com' + } + CosTemplate(topo.standalone, + 'cn="cn=filterRoleEngRole,dc=example,dc=com",cn=cosClassicTemplates,{}'.format( + DEFAULT_SUFFIX)) \ + .create(properties=properties) + properties = {'cosTemplateDn': 'cn=cosClassicTemplate,{}'.format(DEFAULT_SUFFIX), + 'cosAttribute': 'employeeType', + 'cosSpecifier': 'nsrole', + 'cn': 'cosClassicGenerateEmployeeTypeUsingnsrole'} + CosClassicDefinition(topo.standalone, 'cn=cosClassicGenerateEmployeeTypeUsingnsrole,{}'.format(DEFAULT_SUFFIX)) \ + .create(properties=properties) + + # Test nsrole and cos attribute + be_insts = Backends(topo.standalone).list() + for be in be_insts: + if be.get_attr_val_utf8_l('nsslapd-suffix') == DEFAULT_SUFFIX: + # Attempt to add nsRole as index + with pytest.raises(ValueError): + be.add_index('nsrole', ['eq']) + # Attempt to add COS attribute as index + with pytest.raises(ValueError): + be.add_index('employeeType', ['eq']) + break + + +if __name__ == "__main__": + # Run isolated + # -s for DEBUG mode + CURRENT_FILE = os.path.realpath(__file__) + pytest.main("-s %s" % CURRENT_FILE) diff --git a/dirsrvtests/tests/suites/ldapi/__init__.py b/dirsrvtests/tests/suites/ldapi/__init__.py new file mode 100644 index 0000000..330903c --- /dev/null +++ b/dirsrvtests/tests/suites/ldapi/__init__.py @@ -0,0 +1,3 @@ +""" + :Requirement: 389-ds-base: ldapi +""" diff --git a/dirsrvtests/tests/suites/ldapi/ldapi_test.py b/dirsrvtests/tests/suites/ldapi/ldapi_test.py new file mode 100644 index 0000000..a101f1d --- /dev/null +++ b/dirsrvtests/tests/suites/ldapi/ldapi_test.py @@ -0,0 +1,162 @@ +# --- BEGIN COPYRIGHT BLOCK --- +# Copyright (C) 2022 Red Hat, Inc. +# All rights reserved. +# +# License: GPL (version 3 or any later version). +# See LICENSE for details. +# --- END COPYRIGHT BLOCK --- +# +import logging +import pytest +import os +import subprocess +from lib389._constants import DEFAULT_SUFFIX, DN_DM +from lib389.idm.user import UserAccounts +from lib389.ldapi import LDAPIMapping, LDAPIFixedMapping +from lib389.topologies import topology_st as topo +from lib389.tasks import LDAPIMappingReloadTask + + +def test_ldapi_authdn_attr_rewrite(topo, request): + """Test LDAPI Authentication DN mapping feature + + :id: e8d68979-4b3d-4e2d-89ed-f9bad827718c + :setup: Standalone Instance + :steps: + 1. Set LDAPI configuration + 2. Create LDAP user + 3. Create OS user + 4. Create entries under cn=config for auto bind subtree and mapping entry + 5. Do an LDAPI ldapsearch as the OS user + 6. OS user was mapped expected LDAP entry + 7. Do search using root & LDAPI + :expectedresults: + 1. Success + 2. Success + 3. Success + 4. Success + 5. Success + 6. Success + 7. Success + """ + + LINUX_USER = "ldapi_test_lib389_user" + LINUX_USER2 = "ldapi_test_lib389_user2" + LINUX_USER3 = "ldapi_test_lib389_user3" + LINUX_PWD = "5ecret_137" + LDAP_ENTRY_DN = "uid=test_ldapi,ou=people,dc=example,dc=com" + LDAP_ENTRY_DN2 = "uid=test_ldapi2,ou=people,dc=example,dc=com" + LDAP_ENTRY_DN3 = "uid=test_ldapi3,ou=people,dc=example,dc=com" + LDAPI_AUTH_CONTAINER = "cn=auto_bind,cn=config" + + def fin(): + # Remove the OS users + for user in [LINUX_USER, LINUX_USER2, LINUX_USER3]: + try: + subprocess.run(['userdel', '-r', user]) + except: + pass + request.addfinalizer(fin) + + # Must be root + if os.geteuid() != 0: + return + + # Perform config tasks + topo.standalone.config.set('nsslapd-accesslog-logbuffering', 'off') + topo.standalone.config.set('nsslapd-ldapiDNMappingBase', 'cn=auto_bind,cn=config') + topo.standalone.config.set('nsslapd-ldapimaptoentries', 'on') + topo.standalone.config.set('nsslapd-ldapiuidnumbertype', 'uidNumber') + topo.standalone.config.set('nsslapd-ldapigidnumbertype', 'gidNumber') + ldapi_socket_raw = topo.standalone.config.get_attr_val_utf8('nsslapd-ldapifilepath') + ldapi_socket = ldapi_socket_raw.replace('/', '%2F') + + # Create LDAP users + users = UserAccounts(topo.standalone, DEFAULT_SUFFIX) + user_properties = { + 'uid': 'test_ldapi', + 'cn': 'test_ldapi', + 'sn': 'test_ldapi', + 'uidNumber': '2020', + 'gidNumber': '2020', + 'userpassword': 'password', + 'description': 'userdesc', + 'homeDirectory': '/home/test_ldapi'} + users.create(properties=user_properties) + + user_properties = { + 'uid': 'test_ldapi2', + 'cn': 'test_ldapi2', + 'sn': 'test_ldapi2', + 'uidNumber': '2021', + 'gidNumber': '2021', + 'userpassword': 'password', + 'description': 'userdesc', + 'homeDirectory': '/home/test_ldapi2'} + users.create(properties=user_properties) + + user_properties = { + 'uid': 'test_ldapi3', + 'cn': 'test_ldapi3', + 'sn': 'test_ldapi3', + 'uidNumber': '2023', + 'gidNumber': '2023', + 'userpassword': 'password', + 'description': 'userdesc', + 'homeDirectory': '/home/test_ldapi3'} + users.create(properties=user_properties) + + # Create OS users + subprocess.run(['useradd', '-u', '5001', '-p', LINUX_PWD, LINUX_USER]) + subprocess.run(['useradd', '-u', '5002', '-p', LINUX_PWD, LINUX_USER2]) + + # Create some mapping entries + ldapi_mapping = LDAPIMapping(topo.standalone, LDAPI_AUTH_CONTAINER) + ldapi_mapping.create_mapping(name='entry_map1', username='dummy1', + ldap_dn='uid=dummy1,dc=example,dc=com') + ldapi_mapping.create_mapping(name='entry_map2', username=LINUX_USER, + ldap_dn=LDAP_ENTRY_DN) + ldapi_mapping.create_mapping(name='entry_map3', username='dummy2', + ldap_dn='uid=dummy3,dc=example,dc=com') + + # Restart server for config to take effect, and clear the access log + topo.standalone.deleteAccessLogs(restart=True) + + # Bind as OS user using ldapsearch + ldapsearch_cmd = f'ldapsearch -b \'\' -s base -Y EXTERNAL -H ldapi://{ldapi_socket}' + os.system(f'su {LINUX_USER} -c "{ldapsearch_cmd}"') + + # Check access log + assert topo.standalone.ds_access_log.match(f'.*AUTOBIND dn="{LDAP_ENTRY_DN}".*') + + # Bind as Root DN just to make sure it still works + assert os.system(ldapsearch_cmd) == 0 + assert topo.standalone.ds_access_log.match(f'.*AUTOBIND dn="{DN_DM}".*') + + # Create some fixed mapping + ldapi_fixed_mapping = LDAPIFixedMapping(topo.standalone, LDAPI_AUTH_CONTAINER) + ldapi_fixed_mapping.create_mapping("fixed", "5002", "5002", ldap_dn=LDAP_ENTRY_DN2) + topo.standalone.deleteAccessLogs(restart=True) + + # Bind as OS user using ldapsearch + os.system(f'su {LINUX_USER2} -c "{ldapsearch_cmd}"') + + # Check access log + assert topo.standalone.ds_access_log.match(f'.*AUTOBIND dn="{LDAP_ENTRY_DN2}".*') + + # Add 3rd user, and test reload task + subprocess.run(['useradd', '-u', '5003', '-p', LINUX_PWD, LINUX_USER3]) + ldapi_fixed_mapping.create_mapping("reload", "5003", "5003", ldap_dn=LDAP_ENTRY_DN3) + + reload_task = LDAPIMappingReloadTask(topo.standalone).create() + reload_task.wait(timeout=20) + + os.system(f'su {LINUX_USER3} -c "{ldapsearch_cmd}"') + assert topo.standalone.ds_access_log.match(f'.*AUTOBIND dn="{LDAP_ENTRY_DN3}".*') + + +if __name__ == '__main__': + # Run isolated + # -s for DEBUG mode + CURRENT_FILE = os.path.realpath(__file__) + pytest.main(["-s", CURRENT_FILE]) diff --git a/dirsrvtests/tests/suites/lib389/__init__.py b/dirsrvtests/tests/suites/lib389/__init__.py new file mode 100644 index 0000000..5b9f4c0 --- /dev/null +++ b/dirsrvtests/tests/suites/lib389/__init__.py @@ -0,0 +1,3 @@ +""" + :Requirement: 389-ds-base: Lib389 +""" diff --git a/dirsrvtests/tests/suites/lib389/config_compare_test.py b/dirsrvtests/tests/suites/lib389/config_compare_test.py new file mode 100644 index 0000000..9ed4da6 --- /dev/null +++ b/dirsrvtests/tests/suites/lib389/config_compare_test.py @@ -0,0 +1,49 @@ +# --- BEGIN COPYRIGHT BLOCK --- +# Copyright (C) 2019 William Brown +# All rights reserved. +# +# License: GPL (version 3 or any later version). +# See LICENSE for details. +# --- END COPYRIGHT BLOCK --- +# +import os +import pytest + +from lib389.topologies import topology_i2 +from lib389.config import Config + +pytestmark = pytest.mark.tier1 + +def test_config_compare(topology_i2): + """ + Compare test between cn=config of two different Directory Server intance. + + :id: 7b3e17d6-41ca-4926-bc3b-8173dd912a61 + + :setup: two isolated directory servers + + :steps: 1. Compare if cn=config is the same + + :expectedresults: 1. It should be the same (excluding unique id attrs) + """ + st1_config = topology_i2.ins.get('standalone1').config + st2_config = topology_i2.ins.get('standalone2').config + # 'nsslapd-port' attribute is expected to be same in cn=config comparison, + # but they are different in our testing environment + # as we are using 2 DS instances running, both running simultaneously. + # Hence explicitly adding 'nsslapd-port' to compare_exclude. + st1_config._compare_exclude.append('nsslapd-port') + st2_config._compare_exclude.append('nsslapd-port') + st1_config._compare_exclude.append('nsslapd-secureport') + st2_config._compare_exclude.append('nsslapd-secureport') + st1_config._compare_exclude.append('nsslapd-ldapssotoken-secret') + st2_config._compare_exclude.append('nsslapd-ldapssotoken-secret') + + assert Config.compare(st1_config, st2_config) + + +if __name__ == '__main__': + # Run isolated + # -s for DEBUG mode + CURRENT_FILE = os.path.realpath(__file__) + pytest.main("-s %s" % CURRENT_FILE) diff --git a/dirsrvtests/tests/suites/lib389/dsldapobject/__init__.py b/dirsrvtests/tests/suites/lib389/dsldapobject/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/dirsrvtests/tests/suites/lib389/dsldapobject/dn_construct_test.py b/dirsrvtests/tests/suites/lib389/dsldapobject/dn_construct_test.py new file mode 100644 index 0000000..86d0a0d --- /dev/null +++ b/dirsrvtests/tests/suites/lib389/dsldapobject/dn_construct_test.py @@ -0,0 +1,236 @@ +# --- BEGIN COPYRIGHT BLOCK --- +# Copyright (C) 2019 William Brown +# All rights reserved. +# +# License: GPL (version 3 or any later version). +# See LICENSE for details. +# --- END COPYRIGHT BLOCK --- +# + +import ldap +import pytest + +from lib389._constants import DEFAULT_SUFFIX +from lib389.topologies import topology_st + +from lib389.idm.group import Groups, Group + +pytestmark = pytest.mark.tier1 + +################################################################################# +# This is a series of test cases to assert that various DN construction scenarios +# work as expected in lib389. +# +# DSLdapObjects are designed to allow explicit control, or to "safely assume" +# so that ldap concepts aren't as confusing. +# You can thus construct an object with a DN that is: +# * defined by you expliticly +# * derived from properties of the object automatically +# +# There are also two paths to construction: from the pluralised factory style +# builder, or from the singular. The factory style has very few extra parts +# but it's worth testing anyway. +# +# In no case do we derive a multi value rdn due to their complexity. +# + +def test_mul_explicit_rdn(topology_st): + """Test that with multiple cn and an explicit rdn, we use the rdn + + :id: b39ef204-45c0-4a74-9b59-b4ac1199d78c + + :setup: standalone instance + + :steps: 1. Create with mulitple cn and rdn + + :expectedresults: 1. Create success + """ + # Create with an explicit rdn value, given to the properties/rdn + gps = Groups(topology_st.standalone, DEFAULT_SUFFIX) + gp = gps.create('cn=test_mul_explicit_rdn', + properties={ + 'cn': ['test_mul_explicit_rdn', 'other_cn_test_mul_explicit_rdn'], + }) + assert gp.dn.lower() == f'cn=test_mul_explicit_rdn,ou=groups,{DEFAULT_SUFFIX}'.lower() + gp.delete() + +def test_mul_derive_single_dn(topology_st): + """Test that with single cn we derive rdn correctly. + + :id: f34f271a-ca57-4aa0-905a-b5392ce06c79 + + :setup: standalone instance + + :steps: 1. Create with single cn + + :expectedresults: 1. Create success + """ + gps = Groups(topology_st.standalone, DEFAULT_SUFFIX) + gp = gps.create(properties={ + 'cn': ['test_mul_derive_single_dn'], + }) + assert gp.dn.lower() == f'cn=test_mul_derive_single_dn,ou=groups,{DEFAULT_SUFFIX}'.lower() + gp.delete() + +def test_mul_derive_mult_dn(topology_st): + """Test that with multiple cn we derive rdn correctly. + + :id: 1e1f5483-bfad-4f73-9dfb-aec54d08b268 + + :setup: standalone instance + + :steps: 1. Create with multiple cn + + :expectedresults: 1. Create success + """ + gps = Groups(topology_st.standalone, DEFAULT_SUFFIX) + gp = gps.create(properties={ + 'cn': ['test_mul_derive_mult_dn', 'test_mul_derive_single_dn'], + }) + assert gp.dn.lower() == f'cn=test_mul_derive_mult_dn,ou=groups,{DEFAULT_SUFFIX}'.lower() + gp.delete() + +def test_sin_explicit_dn(topology_st): + """Test explicit dn with create + + :id: 2d812225-243b-4f87-85ad-d403a4ae0267 + + :setup: standalone instance + + :steps: 1. Create with explicit dn + + :expectedresults: 1. Create success + """ + expect_dn = f'cn=test_sin_explicit_dn,ou=groups,{DEFAULT_SUFFIX}' + gp = Group(topology_st.standalone, dn=expect_dn) + gp.create(properties={ + 'cn': ['test_sin_explicit_dn'], + }) + assert gp.dn.lower() == expect_dn.lower() + gp.delete() + +def test_sin_explicit_rdn(topology_st): + """Test explicit rdn with create. + + :id: a2c14e50-8086-4edb-9088-3f4a8e875c3a + + :setup: standalone instance + + :steps: 1. Create with explicit rdn + + :expectedresults: 1. Create success + """ + gp = Group(topology_st.standalone) + gp.create(rdn='cn=test_sin_explicit_rdn', + basedn=f'ou=groups,{DEFAULT_SUFFIX}', + properties={ + 'cn': ['test_sin_explicit_rdn'], + }) + assert gp.dn.lower() == f'cn=test_sin_explicit_rdn,ou=groups,{DEFAULT_SUFFIX}'.lower() + gp.delete() + +def test_sin_derive_single_dn(topology_st): + """Derive the dn from a single cn + + :id: d7597016-214c-4fbd-8b48-71eb16ea9ede + + :setup: standalone instance + + :steps: 1. Create with a single cn (no dn, no rdn) + + :expectedresults: 1. Create success + """ + gp = Group(topology_st.standalone) + gp.create(basedn=f'ou=groups,{DEFAULT_SUFFIX}', + properties={ + 'cn': ['test_sin_explicit_dn'], + }) + assert gp.dn.lower() == f'cn=test_sin_explicit_dn,ou=groups,{DEFAULT_SUFFIX}'.lower() + gp.delete() + +def test_sin_derive_mult_dn(topology_st): + """Derive the dn from multiple cn + + :id: 0a1a7132-a08f-4b56-ae52-30c8ca59cfaf + + :setup: standalone instance + + :steps: 1. Create with multiple cn + + :expectedresults: 1. Create success + """ + gp = Group(topology_st.standalone) + gp.create(basedn=f'ou=groups,{DEFAULT_SUFFIX}', + properties={ + 'cn': ['test_sin_derive_mult_dn', 'other_test_sin_derive_mult_dn'], + }) + assert gp.dn.lower() == f'cn=test_sin_derive_mult_dn,ou=groups,{DEFAULT_SUFFIX}'.lower() + gp.delete() + +def test_sin_invalid_no_basedn(topology_st): + """Test that with insufficent data, create fails. + + :id: a710b81c-cb74-4632-97b3-bdbcccd40954 + + :setup: standalone instance + + :steps: 1. Create with no basedn (no rdn derivation will work) + + :expectedresults: 1. Create fails + """ + gp = Group(topology_st.standalone) + # No basedn, so we can't derive the full dn from this. + with pytest.raises(ldap.UNWILLING_TO_PERFORM): + gp.create(properties={ + 'cn': ['test_sin_invalid_no_basedn'], + }) + +def test_sin_invalid_no_rdn(topology_st): + """Test that with no cn, rdn derivation fails. + + :id: c3bb28f8-db59-4d8a-8920-169879ef702b + + :setup: standalone instance + + :steps: 1. Create with no cn + + :expectedresults: 1. Create fails + """ + gp = Group(topology_st.standalone) + with pytest.raises(ldap.UNWILLING_TO_PERFORM): + # Note lack of rdn derivable type (cn) AND no rdn + gp.create(basedn=f'ou=groups,{DEFAULT_SUFFIX}', + properties={ + 'member': ['test_sin_explicit_dn'], + }) + +def test_sin_non_present_rdn(topology_st): + """Test that with an rdn not present in attributes, create succeeds in some cases. + + :id: a5d9cb24-8907-4622-ac85-90407a66e00a + + :setup: standalone instance + + :steps: 1. Create with an rdn not in properties + + :expectedresults: 1. Create success + """ + # Test that creating something with an rdn not present in the properties works + # NOTE: I think that this is 389-ds making this work, NOT lib389. + gp1 = Group(topology_st.standalone) + gp1.create(rdn='cn=test_sin_non_present_rdn', + basedn=f'ou=groups,{DEFAULT_SUFFIX}', + properties={ + 'cn': ['other_test_sin_non_present_rdn'], + }) + assert gp1.dn.lower() == f'cn=test_sin_non_present_rdn,ou=groups,{DEFAULT_SUFFIX}'.lower() + gp1.delete() + + # Now, test where there is no cn. lib389 is blocking this today, but + # 50259 will change this. + gp2 = Group(topology_st.standalone) + gp2.create(rdn='cn=test_sin_non_present_rdn', + basedn=f'ou=groups,{DEFAULT_SUFFIX}', + properties={}) + assert gp2.dn.lower() == f'cn=test_sin_non_present_rdn,ou=groups,{DEFAULT_SUFFIX}'.lower() + gp2.delete() diff --git a/dirsrvtests/tests/suites/lib389/idm/__init__.py b/dirsrvtests/tests/suites/lib389/idm/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/dirsrvtests/tests/suites/lib389/idm/account_test.py b/dirsrvtests/tests/suites/lib389/idm/account_test.py new file mode 100644 index 0000000..32cbdaf --- /dev/null +++ b/dirsrvtests/tests/suites/lib389/idm/account_test.py @@ -0,0 +1,42 @@ +# --- BEGIN COPYRIGHT BLOCK --- +# Copyright (C) 2021 Red Hat, Inc. +# All rights reserved. +# +# License: GPL (version 3 or any later version). +# See LICENSE for details. +# --- END COPYRIGHT BLOCK --- +# +import os +import pytest +from lib389.idm.user import UserAccounts, Account +from lib389.topologies import topology_st as topo +from lib389._constants import DEFAULT_SUFFIX + + +def test_account_delete(topo): + """ + Test that delete function is working with Accounts/Account + + :id: 9b036f14-5144-4862-b18c-a6d91b7a1620 + + :setup: Standalone instance + + :steps: + 1. Create a test user. + 2. Delete the test user using Account class object. + + :expectedresults: + 1. Operation should be successful + 2. Operation should be successful + """ + users = UserAccounts(topo.standalone, DEFAULT_SUFFIX) + users.create_test_user(uid=1001) + account = Account(topo.standalone, f'uid=test_user_1001,ou=People,{DEFAULT_SUFFIX}') + account.delete() + + +if __name__ == '__main__': + # Run isolated + # -s for DEBUG mode + CURRENT_FILE = os.path.realpath(__file__) + pytest.main("-s %s" % CURRENT_FILE) diff --git a/dirsrvtests/tests/suites/lib389/idm/user_compare_i2_test.py b/dirsrvtests/tests/suites/lib389/idm/user_compare_i2_test.py new file mode 100644 index 0000000..f2e270e --- /dev/null +++ b/dirsrvtests/tests/suites/lib389/idm/user_compare_i2_test.py @@ -0,0 +1,60 @@ +# --- BEGIN COPYRIGHT BLOCK --- +# Copyright (C) 2022 Red Hat, Inc. +# All rights reserved. +# +# License: GPL (version 3 or any later version). +# See LICENSE for details. +# --- END COPYRIGHT BLOCK --- +# +import os +import pytest +from lib389._constants import DEFAULT_SUFFIX +from lib389.idm.user import UserAccounts, UserAccount +from lib389.topologies import topology_i2 + +pytestmark = pytest.mark.tier1 + +def test_user_compare_i2(topology_i2): + """ + Compare test between users of two different Directory Server intances. + + :id: f0ffaf59-e2c2-41ec-9f26-e9b1ef287463 + + :setup: two isolated directory servers + + :steps: 1. Add an identical user to each server + 2. Compare if the users are "the same" + + :expectedresults: 1. Users are added + 2. The users are reported as the same + """ + st1_users = UserAccounts(topology_i2.ins.get('standalone1'), DEFAULT_SUFFIX) + st2_users = UserAccounts(topology_i2.ins.get('standalone2'), DEFAULT_SUFFIX) + + # Create user + user_properties = { + 'uid': 'testuser', + 'cn': 'testuser', + 'sn': 'user', + 'uidNumber': '1000', + 'gidNumber': '2000', + 'homeDirectory': '/home/testuser' + } + + st1_users.create(properties=user_properties) + st1_testuser = st1_users.get('testuser') + + st2_users.create(properties=user_properties) + st2_testuser = st2_users.get('testuser') + + st1_testuser._compare_exclude.append('entryuuid') + st2_testuser._compare_exclude.append('entryuuid') + + assert UserAccount.compare(st1_testuser, st2_testuser) + + +if __name__ == '__main__': + # Run isolated + # -s for DEBUG mode + CURRENT_FILE = os.path.realpath(__file__) + pytest.main("-s %s" % CURRENT_FILE) diff --git a/dirsrvtests/tests/suites/lib389/idm/user_compare_m2Repl_test.py b/dirsrvtests/tests/suites/lib389/idm/user_compare_m2Repl_test.py new file mode 100644 index 0000000..54e1042 --- /dev/null +++ b/dirsrvtests/tests/suites/lib389/idm/user_compare_m2Repl_test.py @@ -0,0 +1,65 @@ +# --- BEGIN COPYRIGHT BLOCK --- +# Copyright (C) 2019 William Brown +# All rights reserved. +# +# License: GPL (version 3 or any later version). +# See LICENSE for details. +# --- END COPYRIGHT BLOCK --- +# +import os +import pytest +from lib389._constants import DEFAULT_SUFFIX +from lib389.replica import ReplicationManager +from lib389.idm.user import UserAccounts, UserAccount +from lib389.topologies import topology_m2 + +pytestmark = pytest.mark.tier1 + +def test_user_compare_m2Repl(topology_m2): + """ + User compare test between users of supplier to supplier replicaton topology. + + :id: 7c243bea-4075-4304-864d-5b789d364871 + + :setup: 2 supplier MMR + + :steps: 1. Add a user to m1 + 2. Wait for replication + 3. Compare if the user is the same + + :expectedresults: 1. User is added + 2. Replication success + 3. The user is the same + """ + rm = ReplicationManager(DEFAULT_SUFFIX) + m1 = topology_m2.ms.get('supplier1') + m2 = topology_m2.ms.get('supplier2') + + m1_users = UserAccounts(m1, DEFAULT_SUFFIX) + m2_users = UserAccounts(m2, DEFAULT_SUFFIX) + + # Create 1st user + user1_properties = { + 'uid': 'testuser', + 'cn': 'testuser', + 'sn': 'user', + 'uidNumber': '1000', + 'gidNumber': '2000', + 'homeDirectory': '/home/testuser' + } + + m1_users.create(properties=user1_properties) + m1_testuser = m1_users.get('testuser') + + rm.wait_for_replication(m1, m2) + + m2_testuser = m2_users.get('testuser') + + assert UserAccount.compare(m1_testuser, m2_testuser) + + +if __name__ == '__main__': + # Run isolated + # -s for DEBUG mode + CURRENT_FILE = os.path.realpath(__file__) + pytest.main("-s %s" % CURRENT_FILE) diff --git a/dirsrvtests/tests/suites/lib389/idm/user_compare_st_test.py b/dirsrvtests/tests/suites/lib389/idm/user_compare_st_test.py new file mode 100644 index 0000000..d1db545 --- /dev/null +++ b/dirsrvtests/tests/suites/lib389/idm/user_compare_st_test.py @@ -0,0 +1,86 @@ +# --- BEGIN COPYRIGHT BLOCK --- +# Copyright (C) 2022 Red Hat, Inc. +# All rights reserved. +# +# License: GPL (version 3 or any later version). +# See LICENSE for details. +# --- END COPYRIGHT BLOCK --- +# +import os +import pytest +from lib389._constants import DEFAULT_SUFFIX +from lib389.idm.group import Groups +from lib389.idm.user import UserAccounts, UserAccount +from lib389.topologies import topology_st as topology + +pytestmark = pytest.mark.tier1 + +def test_user_compare(topology): + """ + Testing compare function + + :id: 26f2dea9-be1e-48ca-bcea-79592823390c + + :setup: Standalone instance + + :steps: + 1. Testing comparison of two different users. + 2. Testing comparison of 'str' object with itself. + 3. Testing comparison of user with similar user (different object id). + 4. Testing comparison of user with group. + + :expectedresults: + 1. Should fail to compare + 2. Should raise value error + 3. Should be the same despite uuid difference + 4. Should fail to compare + """ + users = UserAccounts(topology.standalone, DEFAULT_SUFFIX) + groups = Groups(topology.standalone, DEFAULT_SUFFIX) + # Create 1st user + user1_properties = { + 'uid': 'testuser1', + 'cn': 'testuser1', + 'sn': 'user', + 'uidNumber': '1000', + 'gidNumber': '2000', + 'homeDirectory': '/home/testuser1' + } + + users.create(properties=user1_properties) + testuser1 = users.get('testuser1') + # Create 2nd user + user2_properties = { + 'uid': 'testuser2', + 'cn': 'testuser2', + 'sn': 'user', + 'uidNumber': '1001', + 'gidNumber': '2002', + 'homeDirectory': '/home/testuser2' + } + + users.create(properties=user2_properties) + testuser2 = users.get('testuser2') + # create group + group_properties = { + 'cn' : 'group1', + 'description' : 'testgroup' + } + + testuser1_copy = users.get("testuser1") + group = groups.create(properties=group_properties) + + assert UserAccount.compare(testuser1, testuser2) is False + + with pytest.raises(ValueError): + UserAccount.compare("test_str_object","test_str_object") + + assert UserAccount.compare(testuser1, testuser1_copy) + assert UserAccount.compare(testuser1, group) is False + + +if __name__ == '__main__': + # Run isolated + # -s for DEBUG mode + CURRENT_FILE = os.path.realpath(__file__) + pytest.main("-s %s" % CURRENT_FILE) diff --git a/dirsrvtests/tests/suites/lib389/timeout_test.py b/dirsrvtests/tests/suites/lib389/timeout_test.py new file mode 100644 index 0000000..97e6abc --- /dev/null +++ b/dirsrvtests/tests/suites/lib389/timeout_test.py @@ -0,0 +1,60 @@ +# --- BEGIN COPYRIGHT BLOCK --- +# Copyright (C) 2023 Red Hat, Inc. +# All rights reserved. +# +# License: GPL (version 3 or any later version). +# See LICENSE for details. +# --- END COPYRIGHT BLOCK --- +# +import logging +import pytest +import os +import time +from lib389._constants import * +from lib389.topologies import topology_st as topo, set_timeout + +logging.basicConfig(format='%(asctime)s %(message)s', force=True) +log = logging.getLogger(__name__) +log.setLevel(logging.DEBUG) +# create console handler with a higher log level +ch = logging.StreamHandler() +ch.setLevel(logging.DEBUG) +# create formatter and add it to the handlers +formatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s') +ch.setFormatter(formatter) +# add the handlers to logger +log.addHandler(ch) + +TEST_TIMEOUT = 150 + +@pytest.fixture(autouse=True, scope="module") +def init_timeout(): + set_timeout(TEST_TIMEOUT) + +def test_timeout(topo): + """Specify a test case purpose or name here + + :id: 4a2917d2-ad4c-44a7-aa5f-daad26d1d36e + :setup: Standalone Instance + :steps: + 1. Fill in test case steps here + 2. And indent them like this (RST format requirement) + :expectedresults: + 1. Fill in the result that is expected + 2. For each test step + """ + + with pytest.raises(TimeoutError): + log.info("Start waiting %d seconds" % TEST_TIMEOUT ) + time.sleep(TEST_TIMEOUT) + log.info("End waiting") + for inst in topo: + assert inst.status() is False + + +if __name__ == '__main__': + # Run isolated + # -s for DEBUG mode + CURRENT_FILE = os.path.realpath(__file__) + pytest.main(["-s", CURRENT_FILE]) + diff --git a/dirsrvtests/tests/suites/logging/__init__.py b/dirsrvtests/tests/suites/logging/__init__.py new file mode 100644 index 0000000..7f812e3 --- /dev/null +++ b/dirsrvtests/tests/suites/logging/__init__.py @@ -0,0 +1,3 @@ +""" + :Requirement: 389-ds-base: Directory Server Logging Configurations +""" diff --git a/dirsrvtests/tests/suites/logging/logging_compression_test.py b/dirsrvtests/tests/suites/logging/logging_compression_test.py new file mode 100644 index 0000000..e30874c --- /dev/null +++ b/dirsrvtests/tests/suites/logging/logging_compression_test.py @@ -0,0 +1,125 @@ +# --- BEGIN COPYRIGHT BLOCK --- +# Copyright (C) 2022 Red Hat, Inc. +# All rights reserved. +# +# License: GPL (version 3 or any later version). +# See LICENSE for details. +# --- END COPYRIGHT BLOCK --- + +import glob +import ldap +import logging +import pytest +import os +import time +from lib389._constants import DEFAULT_SUFFIX +from lib389.dseldif import DSEldif +from lib389.topologies import topology_st as topo +from lib389.idm.domain import Domain +from lib389.idm.directorymanager import DirectoryManager + +log = logging.getLogger(__name__) + +pytestmark = pytest.mark.tier1 + +def log_rotated_count(log_type, log_dir, check_compressed=False): + # Check if the log was rotated + log_file = f'{log_dir}/{log_type}.2*' + if check_compressed: + log_file += ".gz" + return len(glob.glob(log_file)) + + +def update_and_sleep(inst, suffix, sleep=True): + for loop in range(2): + for count in range(10): + suffix.replace('description', str(count)) + suffix.get_attr_val('description') + with pytest.raises(ldap.OBJECT_CLASS_VIOLATION): + suffix.add('doesNotExist', 'error') + # For security log we need binds to populate the log + DirectoryManager(inst).bind() + + if sleep: + # log rotation smallest unit is 1 minute + time.sleep(61) + else: + # should still sleep for a little bit + time.sleep(1) + + +def test_logging_compression(topo): + """Test logging compression works, and log rotation/deletion is still + functional. This also tests a mix of non-compressed/compressed logs. + + :id: 15b5ed0e-628c-48e5-a61e-43908590c9f1 + :setup: Standalone Instance + :steps: + 1. Enable all the logs (audit,and auditfail) + 2. Set an aggressive rotation/deletion policy + 3. Make sure all logs are rotated at least once + 4. Enable log compression on all logs + 5. Make sure all logs are rotated again and are compressed + 6. Make sure log deletion is working + :expectedresults: + 1. Success + 2. Success + 3. Success + 4. Success + 5. Success + 6. Success + """ + + inst = topo.standalone + suffix = Domain(topo.standalone, DEFAULT_SUFFIX) + timeunit = "minute" # This is the smallest time unit available + log_dir = inst.get_log_dir() + + # Enable all the logs (audit, and auditfail) + inst.stop() + dse_ldif = DSEldif(inst) + dse_ldif.replace('cn=config', 'nsslapd-auditfaillog', log_dir + "/auditfail") + inst.start() + + inst.config.set('nsslapd-auditlog-logging-enabled', 'on') + inst.config.set('nsslapd-auditfaillog-logging-enabled', 'on') + inst.config.set('nsslapd-accesslog-logbuffering', 'off') + inst.config.set('nsslapd-securitylog-logbuffering', 'off') + inst.config.set('nsslapd-errorlog-level', '64') + + # Set an aggressive rotation/deletion policy for all logs + for ds_log in ['accesslog', 'auditlog', 'auditfaillog', 'errorlog', 'securitylog']: + inst.config.set('nsslapd-' + ds_log + '-logrotationtime', '1') + inst.config.set('nsslapd-' + ds_log + '-logrotationtimeunit', timeunit) + inst.config.set('nsslapd-' + ds_log + '-maxlogsize', '1') + inst.config.set('nsslapd-' + ds_log + '-maxlogsperdir', '3') + + # Perform ops that will write to each log + update_and_sleep(topo.standalone, suffix) + + # Make sure logs are rotated + for log_type in ['access', 'audit', 'auditfail', 'errors', 'security']: + assert log_rotated_count(log_type, log_dir) > 0 + + # Enable log compression on all logs + for ds_log in ['accesslog', 'auditlog', 'auditfaillog', 'errorlog', 'securitylog']: + inst.config.set('nsslapd-' + ds_log + '-compress', 'on') + + # Perform ops that will write to each log + update_and_sleep(topo.standalone, suffix) + + # Make sure all logs were rotated again and are compressed + for log_type in ['access', 'audit', 'auditfail', 'errors', 'security']: + assert log_rotated_count(log_type, log_dir, check_compressed=True) > 0 + + # Make sure log deletion is working + update_and_sleep(topo.standalone, suffix, sleep=False) + for log_type in ['access', 'audit', 'auditfail', 'errors', 'security']: + assert log_rotated_count(log_type, log_dir) == 2 + + +if __name__ == '__main__': + # Run isolated + # -s for DEBUG mode + CURRENT_FILE = os.path.realpath(__file__) + pytest.main(["-s", CURRENT_FILE]) diff --git a/dirsrvtests/tests/suites/logging/logging_config_test.py b/dirsrvtests/tests/suites/logging/logging_config_test.py new file mode 100644 index 0000000..c9b630c --- /dev/null +++ b/dirsrvtests/tests/suites/logging/logging_config_test.py @@ -0,0 +1,97 @@ +# --- BEGIN COPYRIGHT BLOCK --- +# Copyright (C) 2022 Red Hat, Inc. +# All rights reserved. +# +# License: GPL (version 3 or any later version). +# See LICENSE for details. +# --- END COPYRIGHT BLOCK --- +# +import logging +import pytest +import os +import ldap +from lib389._constants import * +from lib389.topologies import topology_st as topo + +DEBUGGING = os.getenv("DEBUGGING", default=False) +if DEBUGGING: + logging.getLogger(__name__).setLevel(logging.DEBUG) +else: + logging.getLogger(__name__).setLevel(logging.INFO) +log = logging.getLogger(__name__) + +big_value = "1111111111111111111111111111111111111111111" + +pytestmark = pytest.mark.tier1 + +@pytest.mark.parametrize("attr, invalid_vals, valid_vals", + [ + ("logexpirationtime", ["-2", "0"], ["1", "-1"]), + ("maxlogsize", ["-2", "0"], ["100", "-1"]), + ("logmaxdiskspace", ["-2", "0"], ["100", "-1"]), + ("logminfreediskspace", ["-2", "0"], ["100", "-1"]), + ("mode", ["888", "778", "77", "7777"], ["777", "000", "600"]), + ("maxlogsperdir", ["-1", "0"], ["1", "20"]), + ("logrotationsynchour", ["-1", "24"], ["0", "23"]), + ("logrotationsyncmin", ["-1", "60"], ["0", "59"]), + ("logrotationtime", ["-2", "0"], ["100", "-1"]) + ]) +def test_logging_digit_config(topo, attr, invalid_vals, valid_vals): + """Validate logging config settings + + :id: a0ef30e5-538b-46fa-9762-01a4435a15e9 + :parametrized: yes + :setup: Standalone Instance + :steps: + 1. Test log expiration time + 2. Test log max size + 3. Test log max disk space + 4. Test log min disk space + 5. Test log mode + 6. Test log max number of logs + 7. Test log rotation hour + 8. Test log rotation minute + 9. Test log rotation time + :expectedresults: + 1. Success + 2. Success + 3. Success + 4. Success + 5. Success + 6. Success + 7. Success + 8. Success + 9. Success + """ + + accesslog_attr = "nsslapd-accesslog-{}".format(attr) + auditlog_attr = "nsslapd-auditlog-{}".format(attr) + auditfaillog_attr = "nsslapd-auditfaillog-{}".format(attr) + errorlog_attr = "nsslapd-errorlog-{}".format(attr) + securitylog_attr = "nsslapd-securitylog-{}".format(attr) + + # Test each log + for attr in [accesslog_attr, auditlog_attr, auditfaillog_attr, errorlog_attr, securitylog_attr]: + # Invalid values + for invalid_val in invalid_vals: + with pytest.raises(ldap.LDAPError): + topo.standalone.config.set(attr, invalid_val) + + # Invalid high value + with pytest.raises(ldap.LDAPError): + topo.standalone.config.set(attr, big_value) + + # Non digits + with pytest.raises(ldap.LDAPError): + topo.standalone.config.set(attr, "abc") + + # Valid values + for valid_val in valid_vals: + topo.standalone.config.set(attr, valid_val) + + +if __name__ == '__main__': + # Run isolated + # -s for DEBUG mode + CURRENT_FILE = os.path.realpath(__file__) + pytest.main(["-s", CURRENT_FILE]) diff --git a/dirsrvtests/tests/suites/logging/security_basic_test.py b/dirsrvtests/tests/suites/logging/security_basic_test.py new file mode 100644 index 0000000..36cb03f --- /dev/null +++ b/dirsrvtests/tests/suites/logging/security_basic_test.py @@ -0,0 +1,418 @@ +# --- BEGIN COPYRIGHT BLOCK --- +# Copyright (C) 2022 Red Hat, Inc. +# All rights reserved. +# +# License: GPL (version 3 or any later version). +# See LICENSE for details. +# --- END COPYRIGHT BLOCK --- + +import ldap +import json +import logging +import pytest +import os +import re +import signal +import subprocess +import time +from lib389._constants import DEFAULT_SUFFIX, PASSWORD, DN_DM +from lib389.topologies import topology_st as topo +from lib389.idm.user import UserAccount, UserAccounts +from lib389.dirsrv_log import DirsrvSecurityLog +from lib389.utils import ensure_str +from lib389.idm.domain import Domain +from lib389.idm.account import Anonymous +from lib389.config import CertmapLegacy +from lib389.nss_ssl import NssSsl + + +log = logging.getLogger(__name__) + +DN = "uid=security,ou=people," + DEFAULT_SUFFIX +DN_NO_ENTRY = "uid=fredSomething,ou=people," + DEFAULT_SUFFIX +DN_NO_BACKEND = "uid=not_there,o=nope," + DEFAULT_SUFFIX +DN_QUOATED = "uid=\"cn=mark\",ou=people," + DEFAULT_SUFFIX +DN_QUOATED_ESCAPED = "uid=cn\\3dmark,ou=people," + DEFAULT_SUFFIX +DN_LONG = "uid=" + ("z" * 520) + ",ou=people," + DEFAULT_SUFFIX +DN_LONG_TRUNCATED = "uid=" + ("z" * 508) + "..." +RDN_TEST_USER = 'testuser' +RDN_TEST_USER_WRONG = 'testuser_wrong' + + +@pytest.fixture +def setup_test(topo, request): + """Disable log buffering""" + topo.standalone.config.set('nsslapd-securitylog-logbuffering', 'off') + + """Add a test user""" + users = UserAccounts(topo.standalone, DEFAULT_SUFFIX) + try: + users.create(properties={ + 'uid': 'security', + 'cn': 'security', + 'sn': 'security', + 'uidNumber': '3000', + 'gidNumber': '4000', + 'homeDirectory': '/home/security', + 'description': 'test security logging with this user', + 'userPassword': PASSWORD + }) + except ldap.ALREADY_EXISTS: + pass + + +def check_log(inst, event_id, msg, dn=None, bind_method=None): + """Check the security log + """ + time.sleep(1) # give a little time to flush to disk + + security_log = DirsrvSecurityLog(inst) + log_lines = security_log.readlines() + for line in log_lines: + if re.match(r'[ \t]', line): + # Skip log title lines + continue + + event = json.loads(line) + found = False + if event['event'] == event_id and event['msg'] == msg: + if dn is not None: + if event['dn'] == dn.lower(): + found = True + if bind_method is not None: + if event['bind_method'] == bind_method: + found = True + + if not found and (dn is not None or bind_method is not None): + continue + else: + # Found it + return + + assert False + + +@pytest.fixture(scope="module") +def big_file(): + TEMP_BIG_FILE = '' + for x in range(1048576): + TEMP_BIG_FILE += '+' + + return TEMP_BIG_FILE + + +def test_invalid_binds(topo, setup_test): + """Test the various bind scenarios that should be logged in the security log + + :id: b82e3fb9-f1af-4a75-8d96-5e5d284f31c5 + :setup: Standalone Instance + :steps: + 1. Test successful bind is logged + 2. Test bad password is logged + 3. Test no such entry is logged + 4. Test no such entry is logged (quoated dn) + 5. Test no such entry is logged (truncated dn) + 6. Test no such backend is logged + 7. Test account lockout is logged + :expectedresults: + 1. Success + 2. Success + 3. Success + 4. Success + 5. Success + 6. Success + 7. Success + """ + + inst = topo.standalone + user_entry = UserAccount(inst, DN) + + # Delete the previous securty logs + inst.deleteSecurityLogs() + + # Good bind + user_entry.bind(PASSWORD) + check_log(inst, "BIND_SUCCESS", "", DN) + + # Bad password + with pytest.raises(ldap.INVALID_CREDENTIALS): + user_entry.bind("wrongpassword") + check_log(inst, "BIND_FAILED", "INVALID_PASSWORD", DN) + + # No_such_entry + with pytest.raises(ldap.INVALID_CREDENTIALS): + UserAccount(inst, DN_NO_ENTRY).bind(PASSWORD) + check_log(inst, "BIND_FAILED", "NO_SUCH_ENTRY", DN_NO_ENTRY) + + # No_such_entry (quoted) + with pytest.raises(ldap.INVALID_CREDENTIALS): + UserAccount(inst, DN_QUOATED).bind(PASSWORD) + check_log(inst, "BIND_FAILED", "NO_SUCH_ENTRY", DN_QUOATED_ESCAPED) + + # No such entry (truncated dn) + with pytest.raises(ldap.INVALID_CREDENTIALS): + UserAccount(inst, DN_LONG).bind(PASSWORD) + check_log(inst, "BIND_FAILED", "NO_SUCH_ENTRY", DN_LONG_TRUNCATED) + + # No_such_backend + with pytest.raises(ldap.INVALID_CREDENTIALS): + UserAccount(inst, DN_NO_BACKEND).bind(PASSWORD) + check_log(inst, "BIND_FAILED", "NO_SUCH_ENTRY", DN_NO_BACKEND) + + +def test_authorization(topo, setup_test): + """Test the authorization event by performing a modification that the user + is not allowed to do. + + :id: 17a62670-f86d-4b39-9ee7-e7d36b973ec8 + :setup: Standalone Instance + :steps: + 1. Bind as a unprivileged user + 2. Attempt to modify restricted resource + 3. Security authorization event is logged + :expectedresults: + 1. Success + 2. Should fail + 3. Success + """ + inst = topo.standalone + + # Delete the previous security logs + inst.deleteSecurityLogs() + + # Bind as a user + user_entry = UserAccount(inst, DN) + user_conn = user_entry.bind(PASSWORD) + + # Try modifying a restricted resource + suffix = Domain(user_conn, DEFAULT_SUFFIX) + with pytest.raises(ldap.INSUFFICIENT_ACCESS): + suffix.replace('description', 'not allowed') + + # Check that an authorization event was logged + check_log(inst, "AUTHZ_ERROR", f"target_dn=({DEFAULT_SUFFIX})") + + +def test_account_lockout(topo, setup_test): + """Test that account locked message is displayed correctly + + :id: b70494f0-7d8e-4d90-8265-9d009bbb08b4 + :setup: Standalone Instance + :steps: + 1. Configure account lockout + 2. Bind using the wrong password until the account is locked + 3. Check for account lockout event + :expectedresults: + 1. Success + 2. Success + 3. Success + """ + inst = topo.standalone + + # Delete the previous security logs + inst.deleteSecurityLogs() + + # Configure account lockout + inst.config.set('passwordlockout', 'on') + inst.config.set('passwordMaxFailure', '2') + + # Force entry to get locked out + user_entry = UserAccount(inst, DN) + with pytest.raises(ldap.INVALID_CREDENTIALS): + user_entry.bind("wrong") + with pytest.raises(ldap.INVALID_CREDENTIALS): + user_entry.bind("wrong") + with pytest.raises(ldap.CONSTRAINT_VIOLATION): + # Should fail with good or bad password + user_entry.bind(PASSWORD) + + # Check that an account locked event was logged for this DN + check_log(inst, "BIND_FAILED", "ACCOUNT_LOCKED", DN) + + +def test_tcp_events(topo, setup_test): + """Trigger a TCP_ERROR event that should be logged in the security log + + :id: 2f653508-89ae-4325-9fed-a2c4ab304149 + :setup: Standalone Instance + :steps: + 1. Start ldapmodify in its interactive mode + 2. Get the pid of ldapmodify + 3. Kill ldapmodify + 4. Check that a TCP_ERROR is in the security log + :expectedresults: + 1. Success + 2. Success + 3. Success + 4. Success + """ + + inst = topo.standalone + + # Delete the previous security logs + inst.deleteSecurityLogs() + + # Start interactive ldapamodfy command + ldap_cmd = ['ldapmodify', '-x', '-D', DN_DM, '-w', PASSWORD, + '-H', f'ldap://{inst.host}:{inst.port}'] + subprocess.Popen(ldap_cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE, stdin=subprocess.PIPE) + time.sleep(3) # need some time for ldapmodify to actually launch + + # Get ldapmodify pid + result = subprocess.check_output(['pidof','ldapmodify']) + ldapmodify_pid = ensure_str(result) + + # Kill ldapmodify and check the log + os.kill(int(ldapmodify_pid), signal.SIGKILL) + check_log(inst, "TCP_ERROR", "Bad Ber Tag or uncleanly closed connection - B1") + + +def test_tcp_events_maxbersize(topo, setup_test, big_file): + """Trigger a TCP_ERROR event B2 that should be logged in the security log + + :id: 85e5ac23-4288-4e55-b8c3-1f8f39e95c2b + :setup: Standalone Instance + :testype: Non-functional + :subtype1: Security + :subtype2: Penetration + :subsystemteam: sst_idm_ds + :steps: + 1. Create test user + 1. Set maxbersize attribute to a small value (20KiB) + 3. Add the big value to instance + 4. Check that a TCP_ERROR is in the security log + :expectedresults: + 1. Success + 2. Success + 3. Success + 4. Success + """ + + inst = topo.standalone + user_entry = UserAccount(inst, DN) + + # Delete the previous security logs + inst.deleteSecurityLogs() + + log.info("Set nsslapd-maxbersize to 20K") + inst.config.set('nsslapd-maxbersize', '20480') + inst.restart() + + log.info('Try to add attribute with a big value to instance - expect to FAIL') + with pytest.raises(ldap.SERVER_DOWN): + user_entry.add('jpegphoto', big_file) + + log.info('Check security log') + check_log(inst, "TCP_ERROR", "Ber Too Big (nsslapd-maxbersize) - B2") + + # restart the instance so it won't break next tests + inst.restart() + + +def test_anonymous_bind(topo, setup_test): + """Test that anonymous bind message is displayed correctly + + :id: 0df3c6c1-e93a-4baf-88c6-825c1d4d9b8e + :setup: Standalone Instance + :steps: + 1. Bind anonymously + 2. Check for account lockout event + :expectedresults: + 1. Success + 2. Success + """ + + inst = topo.standalone + + # Delete the previous securty logs + inst.deleteSecurityLogs() + + Anonymous(inst).bind() + check_log(inst, "BIND_SUCCESS", "ANONYMOUS_BIND") + + +def test_cert_map_failed_event(topo, setup_test): + """Trigger a CERT_MAP_FAILED event that should be logged in the security log. + Also test that BIND_SUCCESS works with TLSCLIENTAUTH + + :id: eb0c638b-4a30-4108-b38b-e75e55ccb6c8 + :setup: Standalone Instance + :steps: + 1. Enable TLS + 2. Create a user + 3. Create User certificates - one is for the new user, another one is free + 4. Turn on the certmap. + 5. Check that EXTERNAL is listed in supported mechns. + 6. Restart to allow certmaps to be re-read + 7. Attempt a bind with TLS external with the correct credentials + 8. Now attempt a bind with TLS external with a wrong cert + :expectedresults: + 1. Success + 2. Success + 3. Success + 4. Success + 5. Success + 6. Success + 7. Securotylog should display BIND_SUCCESS works with TLSCLIENTAUTH + 8. Securotylog should display BIND_FAILED works with CERT_MAP_FAILED + """ + + inst = topo.standalone + + inst.enable_tls() + + users = UserAccounts(inst, DEFAULT_SUFFIX) + user = users.create(properties={ + 'uid': RDN_TEST_USER, + 'cn' : RDN_TEST_USER, + 'sn' : RDN_TEST_USER, + 'uidNumber' : '1000', + 'gidNumber' : '2000', + 'homeDirectory' : f'/home/{RDN_TEST_USER}' + }) + + ssca_dir = inst.get_ssca_dir() + ssca = NssSsl(dbpath=ssca_dir) + ssca.create_rsa_user(RDN_TEST_USER) + ssca.create_rsa_user(RDN_TEST_USER_WRONG) + + # Get the details of where the key and crt are. + tls_locs = ssca.get_rsa_user(RDN_TEST_USER) + tls_locs_wrong = ssca.get_rsa_user(RDN_TEST_USER_WRONG) + + user.enroll_certificate(tls_locs['crt_der_path']) + + # Turn on the certmap. + cm = CertmapLegacy(inst) + certmaps = cm.list() + certmaps['default']['DNComps'] = '' + certmaps['default']['FilterComps'] = ['cn'] + certmaps['default']['VerifyCert'] = 'off' + cm.set(certmaps) + + # Check that EXTERNAL is listed in supported mechns. + assert(inst.rootdse.supports_sasl_external()) + + # Restart to allow certmaps to be re-read: Note, we CAN NOT use post_open + # here, it breaks on auth. see lib389/__init__.py + inst.restart(post_open=False) + + # Delete the previous securty logs + inst.deleteSecurityLogs() + + # Attempt a bind with TLS external + inst.open(saslmethod='EXTERNAL', connOnly=True, certdir=ssca_dir, userkey=tls_locs['key'], usercert=tls_locs['crt']) + inst.restart() + check_log(inst, "BIND_SUCCESS", "", bind_method="TLSCLIENTAUTH") + + # Now attempt a bind with TLS external with a wrong cert + with pytest.raises(ldap.INVALID_CREDENTIALS): + inst.open(saslmethod='EXTERNAL', connOnly=True, certdir=ssca_dir, userkey=tls_locs_wrong['key'], usercert=tls_locs_wrong['crt']) + check_log(inst, "BIND_FAILED", "CERT_MAP_FAILED") + + +if __name__ == '__main__': + # Run isolated + # -s for DEBUG mode + CURRENT_FILE = os.path.realpath(__file__) + pytest.main(["-s", CURRENT_FILE]) diff --git a/dirsrvtests/tests/suites/mapping_tree/__init__.py b/dirsrvtests/tests/suites/mapping_tree/__init__.py new file mode 100644 index 0000000..1600e20 --- /dev/null +++ b/dirsrvtests/tests/suites/mapping_tree/__init__.py @@ -0,0 +1,3 @@ +""" + :Requirement: 389-ds-base: Mapping Tree +""" diff --git a/dirsrvtests/tests/suites/mapping_tree/acceptance_test.py b/dirsrvtests/tests/suites/mapping_tree/acceptance_test.py new file mode 100644 index 0000000..35fba9f --- /dev/null +++ b/dirsrvtests/tests/suites/mapping_tree/acceptance_test.py @@ -0,0 +1,67 @@ +# --- BEGIN COPYRIGHT BLOCK --- +# Copyright (C) 2020 Red Hat, Inc. +# All rights reserved. +# +# License: GPL (version 3 or any later version). +# See LICENSE for details. +# --- END COPYRIGHT BLOCK --- +# +import ldap +import logging +import pytest +import os +from lib389._constants import * +from lib389.topologies import topology_st as topo +from lib389.mappingTree import MappingTrees + +pytestmark = pytest.mark.tier1 + +DEBUGGING = os.getenv("DEBUGGING", default=False) +if DEBUGGING: + logging.getLogger(__name__).setLevel(logging.DEBUG) +else: + logging.getLogger(__name__).setLevel(logging.INFO) +log = logging.getLogger(__name__) + + +def test_invalid_mt(topo): + """Test that you can not add a new suffix/mapping tree + that does not already have the backend entry created. + + :id: caabd407-f541-4695-b13f-8f92af1112a0 + :setup: Standalone Instance + :steps: + 1. Create a new suffix that specifies an existing backend which has a + different suffix. + 2. Create a suffix that has no backend entry at all. + :expectedresults: + 1. Should fail with UNWILLING_TO_PERFORM + 1. Should fail with UNWILLING_TO_PERFORM + """ + + bad_suffix = 'dc=does,dc=not,dc=exist' + mts = MappingTrees(topo.standalone) + + properties = { + 'cn': bad_suffix, + 'nsslapd-state': 'backend', + 'nsslapd-backend': 'userroot', + } + with pytest.raises(ldap.UNWILLING_TO_PERFORM): + mts.create(properties=properties) + + properties = { + 'cn': bad_suffix, + 'nsslapd-state': 'backend', + 'nsslapd-backend': 'notCreatedRoot', + } + with pytest.raises(ldap.UNWILLING_TO_PERFORM): + mts.create(properties=properties) + + +if __name__ == '__main__': + # Run isolated + # -s for DEBUG mode + CURRENT_FILE = os.path.realpath(__file__) + pytest.main(["-s", CURRENT_FILE]) + diff --git a/dirsrvtests/tests/suites/mapping_tree/be_del_and_default_naming_attr_test.py b/dirsrvtests/tests/suites/mapping_tree/be_del_and_default_naming_attr_test.py new file mode 100644 index 0000000..d71d2a8 --- /dev/null +++ b/dirsrvtests/tests/suites/mapping_tree/be_del_and_default_naming_attr_test.py @@ -0,0 +1,101 @@ +# --- BEGIN COPYRIGHT BLOCK --- +# Copyright (C) 2022 Red Hat, Inc. +# All rights reserved. +# +# License: GPL (version 3 or any later version). +# See LICENSE for details. +# --- END COPYRIGHT BLOCK --- +# +import logging +import pytest +import os +from lib389._constants import DEFAULT_SUFFIX +from lib389.topologies import topology_m1 as topo +from lib389.backend import Backends +from lib389.encrypted_attributes import EncryptedAttrs + +pytestmark = pytest.mark.tier1 + +DEBUGGING = os.getenv("DEBUGGING", default=False) +if DEBUGGING: + logging.getLogger(__name__).setLevel(logging.DEBUG) +else: + logging.getLogger(__name__).setLevel(logging.INFO) +log = logging.getLogger(__name__) + +SECOND_SUFFIX = 'o=namingcontext' +THIRD_SUFFIX = 'o=namingcontext2' + +def test_be_delete(topo): + """Test that we can delete a backend that contains replication + configuration and encrypted attributes. The default naming + context should also be updated to reflect the next available suffix + + :id: 5208f897-7c95-4925-bad0-9ceb95fee678 + :setup: Supplier Instance + :steps: + 1. Create second backend/suffix + 2. Add an encrypted attribute to the default suffix + 3. Delete default suffix + 4. Check the nsslapd-defaultnamingcontext is updated + 5. Delete the last backend + 6. Check the namingcontext has not changed + 7. Add new backend + 8. Set default naming context + 9. Verify the naming context is correct + :expectedresults: + 1. Success + 2. Success + 3. Success + 4. Success + 5. Success + 6. Success + 7. Success + 8. Success + 9. Success + """ + + inst = topo.ms["supplier1"] + + # Create second suffix + backends = Backends(inst) + default_backend = backends.get(DEFAULT_SUFFIX) + new_backend = backends.create(properties={'nsslapd-suffix': SECOND_SUFFIX, + 'name': 'namingRoot'}) + + # Add encrypted attribute entry under default suffix + encrypt_attrs = EncryptedAttrs(inst, basedn='cn=encrypted attributes,{}'.format(default_backend.dn)) + encrypt_attrs.create(properties={'cn': 'employeeNumber', 'nsEncryptionAlgorithm': 'AES'}) + + # Delete default suffix + default_backend.delete() + + # Check that the default naming context is set to the new/second suffix + default_naming_ctx = inst.config.get_attr_val_utf8('nsslapd-defaultnamingcontext') + assert default_naming_ctx == SECOND_SUFFIX + + # delete new backend, but the naming context should not change + new_backend.delete() + + # Check that the default naming context is still set to the new/second suffix + default_naming_ctx = inst.config.get_attr_val_utf8('nsslapd-defaultnamingcontext') + assert default_naming_ctx == SECOND_SUFFIX + + # Add new backend + new_backend = backends.create(properties={'nsslapd-suffix': THIRD_SUFFIX, + 'name': 'namingRoot2'}) + + # manaully set naming context + inst.config.set('nsslapd-defaultnamingcontext', THIRD_SUFFIX) + + # Verify naming context is correct + default_naming_ctx = inst.config.get_attr_val_utf8('nsslapd-defaultnamingcontext') + assert default_naming_ctx == THIRD_SUFFIX + + +if __name__ == '__main__': + # Run isolated + # -s for DEBUG mode + CURRENT_FILE = os.path.realpath(__file__) + pytest.main(["-s", CURRENT_FILE]) + diff --git a/dirsrvtests/tests/suites/mapping_tree/mt_cursed_test.py b/dirsrvtests/tests/suites/mapping_tree/mt_cursed_test.py new file mode 100644 index 0000000..4c6a336 --- /dev/null +++ b/dirsrvtests/tests/suites/mapping_tree/mt_cursed_test.py @@ -0,0 +1,449 @@ +# --- BEGIN COPYRIGHT BLOCK --- +# Copyright (C) 2020 William Brown +# All rights reserved. +# +# License: GPL (version 3 or any later version). +# See LICENSE for details. +# --- END COPYRIGHT BLOCK --- + +import ldap +import pytest +import time +from lib389.topologies import topology_st +from lib389.backend import Backends, Backend +from lib389.mappingTree import MappingTrees +from lib389.idm.domain import Domain +from lib389.configurations.sample import create_base_domain +from lib389.utils import * +from lib389.config import LMDB_LDBMConfig + +@pytest.fixture(scope="function") +def topology(topology_st): + if get_default_db_lib() == "mdb": + handler = LMDB_LDBMConfig(topology_st.standalone) + # Need at least 1500 dbis for 50 suffixes + maxdbi=5000 + log.info(f'Set lmdb map max dbi to {maxdbi}.') + handler.replace('nsslapd-mdb-max-dbs', str(maxdbi)) + topology_st.standalone.restart() + bes = Backends(topology_st.standalone) + bes.delete_all_dangerous() + mts = MappingTrees(topology_st.standalone) + assert len(mts.list()) == 0 + return topology_st + + +def create_backend(inst, rdn, suffix): + # We only support dc= in this test. + assert suffix.startswith('dc=') + be1 = Backend(inst) + be1.create(properties={ + 'cn': rdn, + 'nsslapd-suffix': suffix, + }, + create_mapping_tree=False + ) + + # Now we temporarily make the MT for this node so we can add the base entry. + mts = MappingTrees(inst) + mt = mts.create(properties={ + 'cn': suffix, + 'nsslapd-state': 'backend', + 'nsslapd-backend': rdn, + }) + + # Create the domain entry + create_base_domain(inst, suffix) + # Now delete the mt + mt.delete() + + return be1 + +def test_mapping_tree_inverted(topology): + """Test the results of an inverted parent suffix definition in the configuration. + + For more details see: + https://www.port389.org/docs/389ds/design/mapping_tree_assembly.html + + :id: 024c4960-3aac-4d05-bc51-963dfdeb16ca + :setup: Standalone instance (no backends) + :steps: + 1. Add two backends without mapping trees. + 2. Add the mapping trees with inverted parent-suffix definitions. + 3. Attempt to search the definitions + :expectedresults: + 1. Success + 2. Success + 3. The search suceed and can see validly arranged entries. + """ + inst = topology.standalone + # First create two Backends, without mapping trees. + be1 = create_backend(inst, 'userRootA', 'dc=example,dc=com') + be2 = create_backend(inst, 'userRootB', 'dc=straya,dc=example,dc=com') + # Okay, now we create the mapping trees for these backends, and we *invert* them in the parent config setting + mts = MappingTrees(inst) + mtb = mts.create(properties={ + 'cn': 'dc=straya,dc=example,dc=com', + 'nsslapd-state': 'backend', + 'nsslapd-backend': 'userRootB', + }) + mta = mts.create(properties={ + 'cn': 'dc=example,dc=com', + 'nsslapd-state': 'backend', + 'nsslapd-backend': 'userRootA', + 'nsslapd-parent-suffix': 'dc=straya,dc=example,dc=com' + }) + + dc_ex = Domain(inst, dn='dc=example,dc=com') + assert dc_ex.exists() + + dc_st = Domain(inst, dn='dc=straya,dc=example,dc=com') + assert dc_st.exists() + + # Restart and check again + inst.restart() + assert dc_ex.exists() + assert dc_st.exists() + + +def test_mapping_tree_nonexist_parent(topology): + """Test a backend whos mapping tree definition has a non-existant parent-suffix + + For more details see: + https://www.port389.org/docs/389ds/design/mapping_tree_assembly.html + + :id: 7a9a09bd-7604-48f7-93cb-abff9e0d0131 + :setup: Standalone instance (no backends) + :steps: + 1. Add one backend without mapping tree + 2. Configure the mapping tree with a non-existant parent suffix + 3. Attempt to search the backend + :expectedresults: + 1. Success + 2. Success + 3. The search suceed and can see validly entries. + """ + inst = topology.standalone + be1 = create_backend(inst, 'userRootC', 'dc=test,dc=com') + mts = MappingTrees(inst) + mta = mts.create(properties={ + 'cn': 'dc=test,dc=com', + 'nsslapd-state': 'backend', + 'nsslapd-backend': 'userRootC', + 'nsslapd-parent-suffix': 'dc=com' + }) + # In this case the MT is never joined properly to the hierachy because the parent suffix + # doesn't exist. The config is effectively ignored. That means that it can't be searched! + dc_ex = Domain(inst, dn='dc=test,dc=com') + assert dc_ex.exists() + # Restart and check again. + inst.restart() + assert dc_ex.exists() + + +# Two same length (dc=example,dc=com dc=abcdefg,dc=abc) +def test_mapping_tree_same_length(topology): + """Test mapping tree with backends that have same lengths (dc=example,dc=com and dc=abcdefg,dc=abc) + + :id: 7b9fcffe-e786-4895-b530-00215aaa7e84 + :setup: Standalone instance (no backends) + :steps: + 1. Add two backends without mapping trees + 2. Create the mapping trees for these backends + 3. Check that domains exist + 4. Restart and check again + :expectedresults: + 1. Success + 2. Success + 3. Success + 4. Success + """ + inst = topology.standalone + # First create two Backends, without mapping trees. + be1 = create_backend(inst, 'userRootA', 'dc=example,dc=com') + be2 = create_backend(inst, 'userRootB', 'dc=abcdefg,dc=hij') + # Okay, now we create the mapping trees for these backends + mts = MappingTrees(inst) + mtb = mts.create(properties={ + 'cn': 'dc=example,dc=com', + 'nsslapd-state': 'backend', + 'nsslapd-backend': 'userRootA', + }) + mta = mts.create(properties={ + 'cn': 'dc=abcdefg,dc=hij', + 'nsslapd-state': 'backend', + 'nsslapd-backend': 'userRootB', + }) + + dc_ex = Domain(inst, dn='dc=example,dc=com') + assert dc_ex.exists() + + dc_ab = Domain(inst, dn='dc=abcdefg,dc=hij') + assert dc_ab.exists() + + # Restart and check again + inst.restart() + assert dc_ex.exists() + assert dc_ab.exists() + +# Flipped DC comps (dc=exmaple,dc=com dc=com,dc=example) +def test_mapping_tree_flipped_components(topology): + """Test mapping tree with backends that have flipped DC components (dc=example,dc=com and dc=com,dc=example) + + :id: 55264933-2a81-428b-aec9-c8f9a64400d1 + :setup: Standalone instance (no backends) + :steps: + 1. Add two backends without mapping trees + 2. Create the mapping trees for these backends + 3. Check that domains exist + 4. Restart and check again + :expectedresults: + 1. Success + 2. Success + 3. Success + 4. Success + """ + inst = topology.standalone + # First create two Backends, without mapping trees. + be1 = create_backend(inst, 'userRootA', 'dc=example,dc=com') + be2 = create_backend(inst, 'userRootB', 'dc=com,dc=example') + # Okay, now we create the mapping trees for these backends + mts = MappingTrees(inst) + mtb = mts.create(properties={ + 'cn': 'dc=example,dc=com', + 'nsslapd-state': 'backend', + 'nsslapd-backend': 'userRootA', + }) + mta = mts.create(properties={ + 'cn': 'dc=com,dc=example', + 'nsslapd-state': 'backend', + 'nsslapd-backend': 'userRootB', + }) + + dc_ex = Domain(inst, dn='dc=example,dc=com') + assert dc_ex.exists() + + dc_ab = Domain(inst, dn='dc=com,dc=example') + assert dc_ab.exists() + + # Restart and check again + inst.restart() + assert dc_ex.exists() + assert dc_ab.exists() + +# Weird nesting (dc=exmaple,dc=com, dc=com,dc=example, dc=com,dc=example,dc=com) +def test_mapping_tree_weird_nesting(topology): + """Test mapping tree with backends that have weired nesting (dc=exmaple,dc=com, dc=com,dc=example, dc=com,dc=example,dc=com) + + :id: 02fbfaa5-15ef-43d2-a52f-c011e496e8cd + :setup: Standalone instance (no backends) + :steps: + 1. Add 3 backends without mapping trees + 2. Create the mapping trees for these backends + 3. Check that domains exist + 4. Restart and check again + :expectedresults: + 1. Success + 2. Success + 3. Success + 4. Success + """ + inst = topology.standalone + # First create 3 Backends, without mapping trees. + be1 = create_backend(inst, 'userRootA', 'dc=example,dc=com') + be2 = create_backend(inst, 'userRootB', 'dc=com,dc=example') + be3 = create_backend(inst, 'userRootC', 'dc=com,dc=example,dc=com') + # Okay, now we create the mapping trees for these backends, and we *invert* them in the parent config setting + mts = MappingTrees(inst) + mtb = mts.create(properties={ + 'cn': 'dc=example,dc=com', + 'nsslapd-state': 'backend', + 'nsslapd-backend': 'userRootA', + }) + mta = mts.create(properties={ + 'cn': 'dc=com,dc=example', + 'nsslapd-state': 'backend', + 'nsslapd-backend': 'userRootB', + }) + mtc = mts.create(properties={ + 'cn': 'dc=com,dc=example,dc=com', + 'nsslapd-state': 'backend', + 'nsslapd-backend': 'userRootC', + }) + + dc_ex = Domain(inst, dn='dc=example,dc=com') + assert dc_ex.exists() + + dc_ab = Domain(inst, dn='dc=com,dc=example') + assert dc_ab.exists() + + dc_ec = Domain(inst, dn='dc=com,dc=example,dc=com') + assert dc_ec.exists() + + # Restart and check again + inst.restart() + assert dc_ex.exists() + assert dc_ab.exists() + assert dc_ec.exists() + +# Diff lens (dc=myserver, dc=a,dc=b,dc=c,dc=d, dc=example,dc=com) +def test_mapping_tree_mixed_length(topology): + """Test mapping tree with backends that have different lengths (dc=myserver, dc=a,dc=b,dc=c,dc=d, dc=example,dc=com) + + :id: ce43abf2-335c-4327-a883-b20a40e5571c + :setup: Standalone instance (no backends) + :steps: + 1. Add 5 backends without mapping trees + 2. Create the mapping trees for these backends + 3. Check that domains exist + 4. Restart and check again + :expectedresults: + 1. Success + 2. Success + 3. Success + 4. Success + """ + inst = topology.standalone + # First create 5 Backends, without mapping trees. + be1 = create_backend(inst, 'userRootA', 'dc=myserver') + be2 = create_backend(inst, 'userRootB', 'dc=m') + be3 = create_backend(inst, 'userRootC', 'dc=a,dc=b,dc=c,dc=d,dc=e') + be4 = create_backend(inst, 'userRootD', 'dc=example,dc=com') + be5 = create_backend(inst, 'userRootE', 'dc=myldap') + + mts = MappingTrees(inst) + mts.create(properties={ + 'cn': 'dc=myserver', + 'nsslapd-state': 'backend', + 'nsslapd-backend': 'userRootA', + }) + mts.create(properties={ + 'cn': 'dc=m', + 'nsslapd-state': 'backend', + 'nsslapd-backend': 'userRootB', + }) + mts.create(properties={ + 'cn': 'dc=a,dc=b,dc=c,dc=d,dc=e', + 'nsslapd-state': 'backend', + 'nsslapd-backend': 'userRootC', + }) + mts.create(properties={ + 'cn': 'dc=example,dc=com', + 'nsslapd-state': 'backend', + 'nsslapd-backend': 'userRootD', + }) + mts.create(properties={ + 'cn': 'dc=myldap', + 'nsslapd-state': 'backend', + 'nsslapd-backend': 'userRootE', + }) + + dc_a = Domain(inst, dn='dc=myserver') + assert dc_a.exists() + dc_b = Domain(inst, dn='dc=m') + assert dc_b.exists() + dc_c = Domain(inst, dn='dc=a,dc=b,dc=c,dc=d,dc=e') + assert dc_c.exists() + dc_d = Domain(inst, dn='dc=example,dc=com') + assert dc_d.exists() + dc_e = Domain(inst, dn='dc=myldap') + assert dc_e.exists() + + inst.restart() + assert dc_a.exists() + assert dc_b.exists() + assert dc_c.exists() + assert dc_d.exists() + assert dc_e.exists() + +# 50 suffixes, shallow nest (dc=example,dc=com, then dc=00 -> dc=50) +def test_mapping_tree_many_shallow(topology): + """Test mapping tree with 50 backends, shallow nesting + + :id: c15dc565-fa9d-41c9-91f9-24543079ce31 + :setup: Standalone instance (no backends) + :steps: + 1. Add 50 backends without mapping trees + 2. Create the mapping trees for these backends + 3. Check that domains exist + 4. Restart and check again + :expectedresults: + 1. Success + 2. Success + 3. Success + 4. Success + """ + inst = topology.standalone + dcs = [ ('dc=x%s,dc=example,dc=com' % x, 'userRoot%s' % x) for x in range(0,50) ] + + for (dc, bename) in dcs: + create_backend(inst, bename, dc) + + mts = MappingTrees(inst) + for (dc, bename) in dcs: + mts.create(properties={ + 'cn': dc, + 'nsslapd-state': 'backend', + 'nsslapd-backend': bename, + }) + + dc_asserts = [ Domain(inst, dn=dc[0]) for dc in dcs ] + for dc_a in dc_asserts: + assert dc_a.exists() + inst.restart() + for dc_a in dc_asserts: + assert dc_a.exists() + +# 50 suffixes, deeper nesting (dc=example,dc=com, dc=00 -> dc=10 and dc=a,dc=b,dc=c,dc=d,dc=XX,dc=example,dc=com) +def test_mapping_tree_many_deep_nesting(topology): + """Test mapping tree with 50 backends, deep nesting (dc=example,dc=com, dc=00 -> dc=10 and dc=a,dc=b,dc=c,dc=d,dc=XX,dc=example,dc=com) + + :id: 519e5fb7-e8d1-42fe-800c-ba157054a7d9 + :setup: Standalone instance (no backends) + :steps: + 1. Add 50 backends without mapping trees + 2. Create the mapping trees for these backends + 3. Check that domains exist + 4. Restart and check again + :expectedresults: + 1. Success + 2. Success + 3. Success + 4. Success + """ + inst = topology.standalone + be_count = 0 + dcs = [] + for x in range(0, 10): + dcs.append(('dc=x%s,dc=example,dc=com' % x, 'userRoot%s' % be_count)) + be_count += 1 + + # Now add some children. + for x in range(0,10): + dcs.append(('dc=nest,dc=x%s,dc=example,dc=com' % x, 'userRoot%s' % be_count)) + be_count += 1 + + # Now add nested children + for x in range(0,10): + for y in range(0,5): + dcs.append(('dc=y%s,dc=nest,dc=x%s,dc=example,dc=com' % (y, x), 'userRoot%s' % be_count)) + be_count += 1 + + for (dc, bename) in dcs: + create_backend(inst, bename, dc) + + mts = MappingTrees(inst) + for (dc, bename) in dcs: + mts.create(properties={ + 'cn': dc, + 'nsslapd-state': 'backend', + 'nsslapd-backend': bename, + }) + + dc_asserts = [ Domain(inst, dn=dc[0]) for dc in dcs ] + for dc_a in dc_asserts: + assert dc_a.exists() + inst.restart() + for dc_a in dc_asserts: + assert dc_a.exists() + diff --git a/dirsrvtests/tests/suites/mapping_tree/referral_during_tot_init_test.py b/dirsrvtests/tests/suites/mapping_tree/referral_during_tot_init_test.py new file mode 100644 index 0000000..80b3aa1 --- /dev/null +++ b/dirsrvtests/tests/suites/mapping_tree/referral_during_tot_init_test.py @@ -0,0 +1,80 @@ +# --- BEGIN COPYRIGHT BLOCK --- +# Copyright (C) 2020 Red Hat, Inc. +# All rights reserved. +# +# License: GPL (version 3 or any later version). +# See LICENSE for details. +# --- END COPYRIGHT BLOCK --- +# +import ldap +import pytest +from lib389.topologies import topology_m2 +from lib389._constants import (DEFAULT_SUFFIX) +from lib389.agreement import Agreements +from lib389.idm.user import (TEST_USER_PROPERTIES, UserAccounts) +from lib389.dbgen import dbgen_users +from lib389.utils import ds_is_older + +pytestmark = pytest.mark.tier1 + +@pytest.mark.skipif(ds_is_older("1.4.0.0"), reason="Not implemented") +def test_referral_during_tot(topology_m2): + """Test referrals during total init + + :id: 2a030f15-89ae-4acc-880d-bd2263a6be33 + :setup: 2 suppliers + :steps: + 1. Create test user on supplier2 + 2. Create a bunch of entries in supplier1 + 3. Recreate the user on supplier1 also, so that if the init finishes first we don't lose the user on supplier2 + 4. Initialize replica on supplier1 + 5. While that's happening try to bind as a user to supplier2 + :expectedresults: + 1. Success + 2. Success + 3. Success + 4. Success + 5. This should trigger the referral code. + """ + + supplier1 = topology_m2.ms["supplier1"] + supplier2 = topology_m2.ms["supplier2"] + + users = UserAccounts(supplier2, DEFAULT_SUFFIX) + u = users.create(properties=TEST_USER_PROPERTIES) + u.set('userPassword', 'password') + binddn = u.dn + bindpw = 'password' + + # Create a bunch of entries on supplier1 + ldif_dir = supplier1.get_ldif_dir() + import_ldif = ldif_dir + '/ref_during_tot_import.ldif' + dbgen_users(supplier1, 10000, import_ldif, DEFAULT_SUFFIX) + + supplier1.stop() + supplier1.ldif2db(bename=None, excludeSuffixes=None, encrypt=False, suffixes=[DEFAULT_SUFFIX], import_file=import_ldif) + supplier1.start() + # Recreate the user on supplier1 also, so that if the init finishes first we don't lose the user on supplier2 + users = UserAccounts(supplier1, DEFAULT_SUFFIX) + u = users.create(properties=TEST_USER_PROPERTIES) + u.set('userPassword', 'password') + # Now export them to supplier2 + agmts = Agreements(supplier1) + agmts.list()[0].begin_reinit() + + # While that's happening try to bind as a user to supplier 2 + # This should trigger the referral code. + referred = False + for i in range(0, 100): + conn = ldap.initialize(supplier2.toLDAPURL()) + conn.set_option(ldap.OPT_REFERRALS, False) + try: + conn.simple_bind_s(binddn, bindpw) + conn.unbind_s() + except ldap.REFERRAL: + referred = True + break + # Means we never go a referral, should not happen! + assert referred + + # Done. diff --git a/dirsrvtests/tests/suites/mapping_tree/regression_test.py b/dirsrvtests/tests/suites/mapping_tree/regression_test.py new file mode 100644 index 0000000..f4877da --- /dev/null +++ b/dirsrvtests/tests/suites/mapping_tree/regression_test.py @@ -0,0 +1,129 @@ +# --- BEGIN COPYRIGHT BLOCK --- +# Copyright (C) 2022 Red Hat, Inc. +# All rights reserved. +# +# License: GPL (version 3 or any later version). +# See LICENSE for details. +# --- END COPYRIGHT BLOCK --- +# + +import ldap +import logging +import os +import pytest +from lib389.backend import Backends, Backend +from lib389.dbgen import dbgen_users +from lib389.mappingTree import MappingTrees +from lib389.topologies import topology_st + +try: + from lib389.backend import BackendSuffixView + has_orphan_attribute = True +except ImportError: + has_orphan_attribute = False + +pytestmark = pytest.mark.tier1 + +DEBUGGING = os.getenv("DEBUGGING", default=False) +if DEBUGGING: + logging.getLogger(__name__).setLevel(logging.DEBUG) +else: + logging.getLogger(__name__).setLevel(logging.INFO) +log = logging.getLogger(__name__) + +BESTRUCT = [ + { "bename" : "parent", "suffix": "dc=parent" }, + { "bename" : "child1", "suffix": "dc=child1,dc=parent" }, + { "bename" : "child2", "suffix": "dc=child2,dc=parent" }, +] + + +@pytest.fixture(scope="function") +def topo(topology_st, request): + bes = [] + + def fin(): + for be in bes: + be.delete() + + if not DEBUGGING: + request.addfinalizer(fin) + + inst = topology_st.standalone + ldif_files = {} + for d in BESTRUCT: + bename = d['bename'] + suffix = d['suffix'] + log.info(f'Adding suffix: {suffix} and backend: {bename}...') + backends = Backends(inst) + try: + be = backends.create(properties={'nsslapd-suffix': suffix, 'name': bename}) + # Insert at list head so that children backends get deleted before parent one. + bes.insert(0, be) + except ldap.UNWILLING_TO_PERFORM as e: + if str(e) == "Mapping tree for this suffix exists!": + pass + else: + raise e + + ldif_dir = inst.get_ldif_dir() + ldif_files[bename] = os.path.join(ldif_dir, f'default_{bename}.ldif') + dbgen_users(inst, 5, ldif_files[bename], suffix) + inst.stop() + for d in BESTRUCT: + bename = d['bename'] + inst.ldif2db(bename, None, None, None, ldif_files[bename]) + inst.start() + return topology_st + +# Parameters for test_change_repl_passwd +EXPECTED_ENTRIES = (("dc=parent", 39), ("dc=child1,dc=parent", 13), ("dc=child2,dc=parent", 13)) +@pytest.mark.parametrize( + "orphan_param", + [ + pytest.param( ( True, { "dc=parent": 2, "dc=child1,dc=parent":1, "dc=child2,dc=parent":1}), id="orphan-is-true" ), + pytest.param( ( False, { "dc=parent": 3, "dc=child1,dc=parent":1, "dc=child2,dc=parent":1}), id="orphan-is-false" ), + pytest.param( ( None, { "dc=parent": 3, "dc=child1,dc=parent":1, "dc=child2,dc=parent":1}), id="no-orphan" ), + ], +) + + +@pytest.mark.bz2083589 +@pytest.mark.skipif(not has_orphan_attribute, reason = "compatibility attribute not yet implemented in this version") +def test_sub_suffixes(topo, orphan_param): + """ check the entries found on suffix/sub-suffix + used int + + :id: 5b4421c2-d851-11ec-a760-482ae39447e5 + :feature: mapping-tree + :setup: Standalone instance with 3 additional backends: + dc=parent, dc=child1,dc=parent, dc=childr21,dc=parent + :steps: + 1. Det orphan attribute mapping tree entry for dc=child1,dc=parent according to orphan_param value + 2. Restart the server to rebuild the mapping tree + 3. For each suffix: search the suffix + :expectedresults: + 1. Success + 2. Success + 3. Number of entries should be the expected one + """ + mt = MappingTrees(topo.standalone).get('dc=child1,dc=parent') + orphan = orphan_param[0] + expected_values = orphan_param[1] + if orphan is True: + mt.replace('orphan', 'true') + elif orphan is False: + mt.replace('orphan', 'false') + elif orphan is None: + mt.remove_all('orphan') + topo.standalone.restart() + + for suffix, expected in expected_values.items(): + log.info(f'Verifying domain component entries count for search under {suffix} ...') + entries = topo.standalone.search_s(suffix, ldap.SCOPE_SUBTREE, "(dc=*)") + assert len(entries) == expected + log.info('Found {expected} domain component entries as expected while searching {suffix}') + + log.info('Test PASSED') + + diff --git a/dirsrvtests/tests/suites/memberof_plugin/__init__.py b/dirsrvtests/tests/suites/memberof_plugin/__init__.py new file mode 100644 index 0000000..d5b1467 --- /dev/null +++ b/dirsrvtests/tests/suites/memberof_plugin/__init__.py @@ -0,0 +1,3 @@ +""" + :Requirement: 389-ds-base: Memberof Plugin +""" diff --git a/dirsrvtests/tests/suites/memberof_plugin/fixup_test.py b/dirsrvtests/tests/suites/memberof_plugin/fixup_test.py new file mode 100644 index 0000000..5aac40d --- /dev/null +++ b/dirsrvtests/tests/suites/memberof_plugin/fixup_test.py @@ -0,0 +1,85 @@ +# --- BEGIN COPYRIGHT BLOCK --- +# Copyright (C) 2022 Red Hat, Inc. +# All rights reserved. +# +# License: GPL (version 3 or any later version). +# See LICENSE for details. +# --- END COPYRIGHT BLOCK --- +# +import ldap +import logging +import pytest +import os +from lib389._constants import DEFAULT_SUFFIX +from lib389.topologies import topology_st as topo +from lib389.plugins import MemberOfPlugin +from lib389.idm.user import UserAccounts +from lib389.idm.group import Groups + + +log = logging.getLogger(__name__) + + +def test_fixup_task_limit(topo): + """Test only one fixup task is allowed at one time + + :id: 2bb49a10-fca9-4d89-9a7a-34c2ba4baadc + :setup: Standalone Instance + :steps: + 1. Add some users and groups + 2. Enable memberOf Plugin + 3. Add fixup task + 4. Add second task + 5. Add a third task after first task completes + :expectedresults: + 1. Success + 2. Success + 3. Success + 4. Second task should fail + 5. Success + """ + + # Create group with members + groups = Groups(topo.standalone, DEFAULT_SUFFIX) + group = groups.create(properties={'cn': 'test'}) + + users = UserAccounts(topo.standalone, DEFAULT_SUFFIX) + for idx in range(400): + user = users.create(properties={ + 'uid': 'testuser%s' % idx, + 'cn' : 'testuser%s' % idx, + 'sn' : 'user%s' % idx, + 'uidNumber' : '%s' % (1000 + idx), + 'gidNumber' : '%s' % (1000 + idx), + 'homeDirectory' : '/home/testuser%s' % idx + }) + group.add('member', user.dn) + + # Configure memberOf plugin + memberof = MemberOfPlugin(topo.standalone) + memberof.enable() + topo.standalone.restart() + + # Add first task + task = memberof.fixup(DEFAULT_SUFFIX) + + # Add second task which should fail + with pytest.raises(ldap.UNWILLING_TO_PERFORM): + memberof.fixup(DEFAULT_SUFFIX) + + # Add second task but on different suffix which should be allowed + memberof.fixup("ou=people," + DEFAULT_SUFFIX) + + # Wait for first task to complete + task.wait() + + # Add new task which should be allowed now + memberof.fixup(DEFAULT_SUFFIX) + + +if __name__ == '__main__': + # Run isolated + # -s for DEBUG mode + CURRENT_FILE = os.path.realpath(__file__) + pytest.main(["-s", CURRENT_FILE]) + diff --git a/dirsrvtests/tests/suites/memberof_plugin/memberof_include_scopes_test.py b/dirsrvtests/tests/suites/memberof_plugin/memberof_include_scopes_test.py new file mode 100644 index 0000000..b310b15 --- /dev/null +++ b/dirsrvtests/tests/suites/memberof_plugin/memberof_include_scopes_test.py @@ -0,0 +1,127 @@ +# --- BEGIN COPYRIGHT BLOCK --- +# Copyright (C) 2022 Red Hat, Inc. +# All rights reserved. +# +# License: GPL (version 3 or any later version). +# See LICENSE for details. +# --- END COPYRIGHT BLOCK --- +# +import pytest +import os +import ldap +from lib389.utils import ensure_str +from lib389.topologies import topology_st as topo +from lib389._constants import * +from lib389.plugins import MemberOfPlugin +from lib389.idm.user import UserAccount, UserAccounts +from lib389.idm.group import Group, Groups +from lib389.idm.nscontainer import nsContainers + +SUBTREE_1 = 'cn=sub1,%s' % SUFFIX +SUBTREE_2 = 'cn=sub2,%s' % SUFFIX +SUBTREE_3 = 'cn=sub3,%s' % SUFFIX + +def add_container(inst, dn, name): + """Creates container entry""" + conts = nsContainers(inst, dn) + cont = conts.create(properties={'cn': name}) + return cont + +def add_member_and_group(server, cn, group_cn, subtree): + users = UserAccounts(server, subtree, rdn=None) + users.create(properties={'uid': f'test_{cn}', + 'cn': f'test_{cn}', + 'sn': f'test_{cn}', + 'description': 'member', + 'uidNumber': '1000', + 'gidNumber': '2000', + 'homeDirectory': '/home/testuser'}) + group = Groups(server, subtree, rdn=None) + group.create(properties={'cn': group_cn, + 'member': f'uid=test_{cn},{subtree}', + 'description': 'group'}) + +def check_membership(server, user_dn=None, group_dn=None, find_result=True): + ent = server.getEntry(user_dn, ldap.SCOPE_BASE, "(objectclass=*)", ['memberof']) + found = False + if ent.hasAttr('memberof'): + for val in ent.getValues('memberof'): + if ensure_str(val) == group_dn: + found = True + break + + if find_result: + assert found + else: + assert (not found) + +def test_multiple_scopes(topo): + """Specify memberOf works when multiple include scopes are defined + + :id: fbcd70cc-c83d-4c79-bd5b-2d8f017545ae + :setup: Standalone Instance + :steps: + 1. Set multiple include scopes + 2. Test members added to both scopes are correctly updated + 3. Test user outside of scope was not updated + 4. Set exclude scope + 5. Move user into excluded subtree and check the membership is correct + :expectedresults: + 1. Success + 2. Success + 3. Success + 4. Success + 5. Success + """ + + inst = topo.standalone + + # configure plugin + memberof = MemberOfPlugin(inst) + memberof.enable() + memberof.add('memberOfEntryScope', SUBTREE_1) + memberof.add('memberOfEntryScope', SUBTREE_2) + inst.restart() + + # Add setup entries + add_container(inst, SUFFIX, 'sub1') + add_container(inst, SUFFIX, 'sub2') + add_container(inst, SUFFIX, 'sub3') + add_member_and_group(inst, 'm1', 'g1', SUBTREE_1) + add_member_and_group(inst, 'm2', 'g2', SUBTREE_2) + add_member_and_group(inst, 'm3', 'g3', SUBTREE_3) + + # Check users 1 and 2 were correctly updated + check_membership(inst, f'uid=test_m1,{SUBTREE_1}', f'cn=g1,{SUBTREE_1}', True) + check_membership(inst, f'uid=test_m2,{SUBTREE_2}', f'cn=g2,{SUBTREE_2}', True) + + # Check that user3, which is out of scope, was not updated + check_membership(inst, f'uid=test_m3,{SUBTREE_3}', f'cn=g1,{SUBTREE_1}', False) + check_membership(inst, f'uid=test_m3,{SUBTREE_3}', f'cn=g2,{SUBTREE_2}', False) + check_membership(inst, f'uid=test_m3,{SUBTREE_3}', f'cn=g3,{SUBTREE_3}', False) + + # Set exclude scope + EXCLUDED_SUBTREE = 'cn=exclude,%s' % SUFFIX + EXCLUDED_USER = f"uid=test_m1,{EXCLUDED_SUBTREE}" + INCLUDED_USER = f"uid=test_m1,{SUBTREE_1}" + GROUP_DN = f'cn=g1,{SUBTREE_1}' + + add_container(inst, SUFFIX, 'exclude') + memberof.add('memberOfEntryScopeExcludeSubtree', EXCLUDED_SUBTREE) + + # Move user to excluded scope + user = UserAccount(topo.standalone, dn=INCLUDED_USER) + user.rename("uid=test_m1", newsuperior=EXCLUDED_SUBTREE) + + # Check memberOf and group are cleaned up + check_membership(inst, EXCLUDED_USER, GROUP_DN, False) + group = Group(topo.standalone, dn=GROUP_DN) + assert not group.present("member", EXCLUDED_USER) + assert not group.present("member", INCLUDED_USER) + + +if __name__ == '__main__': + # Run isolated + # -s for DEBUG mode + CURRENT_FILE = os.path.realpath(__file__) + pytest.main(["-s", CURRENT_FILE]) diff --git a/dirsrvtests/tests/suites/memberof_plugin/regression_test.py b/dirsrvtests/tests/suites/memberof_plugin/regression_test.py new file mode 100644 index 0000000..b75a087 --- /dev/null +++ b/dirsrvtests/tests/suites/memberof_plugin/regression_test.py @@ -0,0 +1,865 @@ +# --- BEGIN COPYRIGHT BLOCK --- +# Copyright (C) 2020 Red Hat, Inc. +# All rights reserved. +# +# License: GPL (version 3 or any later version). +# See LICENSE for details. +# --- END COPYRIGHT BLOCK --- +# +import logging +import pytest +import os +import time +import ldap +from random import sample +from lib389.utils import ds_is_older, ensure_list_bytes, ensure_bytes, ensure_str +from lib389.topologies import topology_m1h1c1 as topo, topology_st, topology_m2 as topo_m2 +from lib389._constants import * +from lib389.plugins import MemberOfPlugin +from lib389 import Entry +from lib389.idm.user import UserAccount, UserAccounts, TEST_USER_PROPERTIES +from lib389.idm.group import Groups, Group +from lib389.replica import ReplicationManager +from lib389.tasks import * +from lib389.idm.nscontainer import nsContainers + + +# Skip on older versions +pytestmark = [pytest.mark.tier1, + pytest.mark.skipif(ds_is_older('1.3.7'), reason="Not implemented")] + +USER_CN = 'user_' +GROUP_CN = 'group1' +DEBUGGING = os.getenv('DEBUGGING', False) +SUBTREE_1 = 'cn=sub1,%s' % SUFFIX +SUBTREE_2 = 'cn=sub2,%s' % SUFFIX + + +if DEBUGGING: + logging.getLogger(__name__).setLevel(logging.DEBUG) +else: + logging.getLogger(__name__).setLevel(logging.INFO) +log = logging.getLogger(__name__) + + +def add_users(topo_m2, users_num, suffix): + """Add users to the default suffix + Return the list of added user DNs. + """ + users_list = [] + users = UserAccounts(topo_m2.ms["supplier1"], suffix, rdn=None) + log.info('Adding %d users' % users_num) + for num in sample(list(range(1000)), users_num): + num_ran = int(round(num)) + USER_NAME = 'test%05d' % num_ran + user = users.create(properties={ + 'uid': USER_NAME, + 'sn': USER_NAME, + 'cn': USER_NAME, + 'uidNumber': '%s' % num_ran, + 'gidNumber': '%s' % num_ran, + 'homeDirectory': '/home/%s' % USER_NAME, + 'mail': '%s@redhat.com' % USER_NAME, + 'userpassword': 'pass%s' % num_ran, + }) + users_list.append(user) + return users_list + + +def config_memberof(server): + # Configure fractional to prevent total init to send memberof + memberof = MemberOfPlugin(server) + memberof.enable() + memberof.set_autoaddoc('nsMemberOf') + server.restart() + ents = server.agreement.list(suffix=DEFAULT_SUFFIX) + for ent in ents: + log.info('update %s to add nsDS5ReplicatedAttributeListTotal' % ent.dn) + server.agreement.setProperties(agmnt_dn=ents[0].dn, + properties={RA_FRAC_EXCLUDE: '(objectclass=*) $ EXCLUDE memberOf', + RA_FRAC_EXCLUDE_TOTAL_UPDATE: '(objectclass=*) $ EXCLUDE '}) + + +def send_updates_now(server): + ents = server.agreement.list(suffix=DEFAULT_SUFFIX) + for ent in ents: + server.agreement.pause(ent.dn) + server.agreement.resume(ent.dn) + + +def _find_memberof(server, member_dn, group_dn): + # To get the specific server's (M1, C1 and H1) user and group + user = UserAccount(server, member_dn) + assert user.exists() + group = Group(server, group_dn) + assert group.exists() + + # test that the user entry should have memberof attribute with specified group dn value + assert group._dn.lower() in user.get_attr_vals_utf8_l('memberOf') + + +@pytest.mark.bz1352121 +def test_memberof_with_repl(topo): + """Test that we allowed to enable MemberOf plugin in dedicated consumer + + :id: ef71cd7c-e792-41bf-a3c0-b3b38391cbe5 + :setup: 1 Supplier - 1 Hub - 1 Consumer + :steps: + 1. Configure replication to EXCLUDE memberof + 2. Enable memberof plugin + 3. Create users/groups + 4. Make user_0 member of group_0 + 5. Checks that user_0 is memberof group_0 on M,H,C + 6. Make group_0 member of group_1 (nest group) + 7. Checks that user_0 is memberof group_0 and group_1 on M,H,C + 8. Check group_0 is memberof group_1 on M,H,C + 9. Remove group_0 from group_1 + 10. Check group_0 and user_0 are NOT memberof group_1 on M,H,C + 11. Remove user_0 from group_0 + 12. Check user_0 is not memberof group_0 and group_1 on M,H,C + 13. Disable memberof on C + 14. make user_0 member of group_1 + 15. Checks that user_0 is memberof group_0 on M,H but not on C + 16. Enable memberof on C + 17. Checks that user_0 is memberof group_0 on M,H but not on C + 18. Run memberof fixup task + 19. Checks that user_0 is memberof group_0 on M,H,C + :expectedresults: + 1. Configuration should be successful + 2. Plugin should be enabled + 3. Users and groups should be created + 4. user_0 should be member of group_0 + 5. user_0 should be memberof group_0 on M,H,C + 6. group_0 should be member of group_1 + 7. user_0 should be memberof group_0 and group_1 on M,H,C + 8. group_0 should be memberof group_1 on M,H,C + 9. group_0 from group_1 removal should be successful + 10. group_0 and user_0 should not be memberof group_1 on M,H,C + 11. user_0 from group_0 remove should be successful + 12. user_0 should not be memberof group_0 and group_1 on M,H,C + 13. memberof should be disabled on C + 14. user_0 should be member of group_1 + 15. user_0 should be memberof group_0 on M,H and should not on C + 16. Enable memberof on C should be successful + 17. user_0 should be memberof group_0 on M,H should not on C + 18. memberof fixup task should be successful + 19. user_0 should be memberof group_0 on M,H,C + """ + + M1 = topo.ms["supplier1"] + H1 = topo.hs["hub1"] + C1 = topo.cs["consumer1"] + repl = ReplicationManager(DEFAULT_SUFFIX) + + # Step 1 & 2 + M1.config.enable_log('audit') + config_memberof(M1) + M1.restart() + + H1.config.enable_log('audit') + config_memberof(H1) + H1.restart() + + C1.config.enable_log('audit') + config_memberof(C1) + C1.restart() + + #Declare lists of users and groups + test_users = [] + test_groups = [] + + # Step 3 + # In for loop create users and add them in the user list + # it creates user_0 to user_9 (range is fun) + for i in range(10): + CN = '%s%d' % (USER_CN, i) + users = UserAccounts(M1, SUFFIX) + user_props = TEST_USER_PROPERTIES.copy() + user_props.update({'uid': CN, 'cn': CN, 'sn': '_%s' % CN}) + testuser = users.create(properties=user_props) + time.sleep(2) + test_users.append(testuser) + + # In for loop create groups and add them to the group list + # it creates group_0 to group_2 (range is fun) + for i in range(3): + CN = '%s%d' % (GROUP_CN, i) + groups = Groups(M1, SUFFIX) + testgroup = groups.create(properties={'cn': CN}) + time.sleep(2) + test_groups.append(testgroup) + + # Step 4 + # Now start testing by adding differnt user to differn group + if not ds_is_older('1.3.7'): + test_groups[0].remove('objectClass', 'nsMemberOf') + + member_dn = test_users[0].dn + grp0_dn = test_groups[0].dn + grp1_dn = test_groups[1].dn + + test_groups[0].add_member(member_dn) + repl.wait_while_replication_is_progressing(M1, C1) + + # Step 5 + for i in [M1, H1, C1]: + _find_memberof(i, member_dn, grp0_dn) + + # Step 6 + test_groups[1].add_member(test_groups[0].dn) + repl.wait_while_replication_is_progressing(M1, C1) + + # Step 7 + for i in [grp0_dn, grp1_dn]: + for inst in [M1, H1, C1]: + _find_memberof(inst, member_dn, i) + + # Step 8 + for i in [M1, H1, C1]: + _find_memberof(i, grp0_dn, grp1_dn) + + # Step 9 + test_groups[1].remove_member(test_groups[0].dn) + time.sleep(2) + + # Step 10 + # For negative testcase, we are using assertionerror + for inst in [M1, H1, C1]: + for i in [grp0_dn, member_dn]: + with pytest.raises(AssertionError): + _find_memberof(inst, i, grp1_dn) + + # Step 11 + test_groups[0].remove_member(member_dn) + time.sleep(2) + + # Step 12 + for inst in [M1, H1, C1]: + for grp in [grp0_dn, grp1_dn]: + with pytest.raises(AssertionError): + _find_memberof(inst, member_dn, grp) + + # Step 13 + C1.plugins.disable(name=PLUGIN_MEMBER_OF) + C1.restart() + + # Step 14 + test_groups[0].add_member(member_dn) + repl.wait_while_replication_is_progressing(M1, C1) + + # Step 15 + for i in [M1, H1]: + _find_memberof(i, member_dn, grp0_dn) + with pytest.raises(AssertionError): + _find_memberof(C1, member_dn, grp0_dn) + + # Step 16 + memberof = MemberOfPlugin(C1) + memberof.enable() + C1.restart() + + # Step 17 + for i in [M1, H1]: + _find_memberof(i, member_dn, grp0_dn) + with pytest.raises(AssertionError): + _find_memberof(C1, member_dn, grp0_dn) + + # Step 18 + memberof.fixup(SUFFIX) + # have to sleep instead of task.wait() because the task opens a thread and exits + time.sleep(5) + + # Step 19 + for i in [M1, H1, C1]: + _find_memberof(i, member_dn, grp0_dn) + + +@pytest.mark.skipif(ds_is_older('1.3.7'), reason="Not implemented") +def test_scheme_violation_errors_logged(topo_m2): + """Check that ERR messages are verbose enough, if a member entry + doesn't have the appropriate objectclass to support 'memberof' attribute + + :id: e2af0aaa-447e-4e85-a5ce-57ae66260d0b + :setup: Standalone instance + :steps: + 1. Enable memberofPlugin and set autoaddoc to nsMemberOf + 2. Restart the instance + 3. Add a user without nsMemberOf attribute + 4. Create a group and add the user to the group + 5. Check that user has memberOf attribute + 6. Check the error log for ".*oc_check_allowed_sv.*USER_DN.*memberOf.*not allowed.*" + and ".*schema violation caught - repair operation.*" patterns + :expectedresults: + 1. Should be successful + 2. Should be successful + 3. Should be successful + 4. Should be successful + 5. User should have the attribute + 6. Errors should be logged + """ + + inst = topo_m2.ms["supplier1"] + memberof = MemberOfPlugin(inst) + memberof.enable() + memberof.set_autoaddoc('nsMemberOf') + inst.restart() + + users = UserAccounts(inst, SUFFIX) + user_props = TEST_USER_PROPERTIES.copy() + user_props.update({'uid': USER_CN, 'cn': USER_CN, 'sn': USER_CN}) + testuser = users.create(properties=user_props) + testuser.remove('objectclass', 'nsMemberOf') + + groups = Groups(inst, SUFFIX) + testgroup = groups.create(properties={'cn': GROUP_CN}) + + testgroup.add('member', testuser.dn) + + user_memberof_attr = testuser.get_attr_val_utf8('memberof') + assert user_memberof_attr + log.info('memberOf attr value - {}'.format(user_memberof_attr)) + + pattern = ".*oc_check_allowed_sv.*{}.*memberOf.*not allowed.*".format(testuser.dn) + log.info("pattern = %s" % pattern) + assert inst.ds_error_log.match(pattern) + + pattern = ".*schema violation caught - repair operation.*" + assert inst.ds_error_log.match(pattern) + + +@pytest.mark.bz1192099 +def test_memberof_with_changelog_reset(topo_m2): + """Test that replication does not break, after DS stop-start, due to changelog reset + + :id: 60c11636-55a1-4704-9e09-2c6bcc828de4 + :setup: 2 Suppliers + :steps: + 1. On M1 and M2, Enable memberof + 2. On M1, add 999 entries allowing memberof + 3. On M1, add a group with these 999 entries as members + 4. Stop M1 in between, + when add the group memerof is called and before it is finished the + add, so step 4 should be executed after memberof has started and + before the add has finished + 5. Check that replication is working fine + :expectedresults: + 1. memberof should be enabled + 2. Entries should be added + 3. Add operation should start + 4. M1 should be stopped + 5. Replication should be working fine + """ + m1 = topo_m2.ms["supplier1"] + m2 = topo_m2.ms["supplier2"] + + log.info("Configure memberof on M1 and M2") + memberof = MemberOfPlugin(m1) + memberof.enable() + memberof.set_autoaddoc('nsMemberOf') + m1.restart() + + memberof = MemberOfPlugin(m2) + memberof.enable() + memberof.set_autoaddoc('nsMemberOf') + m2.restart() + + log.info("On M1, add 999 test entries allowing memberof") + users_list = add_users(topo_m2, 999, DEFAULT_SUFFIX) + + log.info("On M1, add a group with these 999 entries as members") + dic_of_attributes = {'cn': ensure_bytes('testgroup'), + 'objectclass': ensure_list_bytes(['top', 'groupOfNames'])} + + for user in users_list: + dic_of_attributes.setdefault('member', []) + dic_of_attributes['member'].append(user.dn) + + log.info('Adding the test group using async function') + groupdn = 'cn=testgroup,%s' % DEFAULT_SUFFIX + m1.add(Entry((groupdn, dic_of_attributes))) + + #shutdown the server in-between adding the group + m1.stop() + + #start the server + m1.start() + + log.info("Check the log messages for error") + error_msg = "ERR - NSMMReplicationPlugin - ruv_compare_ruv" + assert not m1.ds_error_log.match(error_msg) + + log.info("Check that the replication is working fine both ways, M1 <-> M2") + repl = ReplicationManager(DEFAULT_SUFFIX) + repl.test_replication_topology(topo_m2) + + +def add_container(inst, dn, name, sleep=False): + """Creates container entry""" + conts = nsContainers(inst, dn) + cont = conts.create(properties={'cn': name}) + if sleep: + time.sleep(1) + return cont + + +def add_member(server, cn, subtree): + dn = subtree + users = UserAccounts(server, dn, rdn=None) + users.create(properties={'uid': 'test_%s' % cn, + 'cn': "%s" % cn, + 'sn': 'SN', + 'description': 'member', + 'uidNumber': '1000', + 'gidNumber': '2000', + 'homeDirectory': '/home/testuser' + }) + + +def add_group(server, cn, subtree): + group = Groups(server, subtree, rdn=None) + group.create(properties={'cn': "%s" % cn, + 'member': ['uid=test_m1,%s' % SUBTREE_1, 'uid=test_m2,%s' % SUBTREE_1], + 'description': 'group'}) + + +def rename_entry(server, cn, from_subtree, to_subtree): + dn = '%s,%s' % (cn, from_subtree) + nrdn = '%s-new' % cn + log.fatal('Renaming user (%s): new %s' % (dn, nrdn)) + server.rename_s(dn, nrdn, newsuperior=to_subtree, delold=0) + + +def _find_memberof_ext(server, user_dn=None, group_dn=None, find_result=True): + assert (server) + assert (user_dn) + assert (group_dn) + ent = server.getEntry(user_dn, ldap.SCOPE_BASE, "(objectclass=*)", ['memberof']) + found = False + if ent.hasAttr('memberof'): + + for val in ent.getValues('memberof'): + server.log.info("!!!!!!! %s: memberof->%s" % (user_dn, val)) + if ensure_str(val) == group_dn: + found = True + break + + if find_result: + assert found + else: + assert (not found) + + +@pytest.mark.ds49161 +def test_memberof_group(topology_st): + """Test memberof does not fail if group is moved into scope + + :id: d1d276ae-6375-4ad8-9437-6a0afcbee7d2 + + :setup: Single instance + + :steps: + 1. Enable memberof plugin and set memberofentryscope + 2. Restart the server + 3. Add test sub-suffixes + 4. Add test users + 5. Add test groups + 6. Check for memberof attribute added to the test users + 7. Rename the group entry + 8. Check the new name is reflected in memberof attribute of user + + :expectedresults: + 1. memberof plugin should be enabled and memberofentryscope should be set + 2. Server should be restarted + 3. Sub-suffixes should be added + 4. Test users should be added + 5. Test groups should be added + 6. memberof attribute should be present in the test users + 7. Group entry should be renamed + 8. New group name should be present in memberof attribute of user + """ + + inst = topology_st.standalone + log.info('Enable memberof plugin and set the scope as cn=sub1,dc=example,dc=com') + memberof = MemberOfPlugin(inst) + memberof.enable() + memberof.replace('memberOfEntryScope', SUBTREE_1) + inst.restart() + + add_container(inst, SUFFIX, 'sub1') + add_container(inst, SUFFIX, 'sub2') + add_member(inst, 'm1', SUBTREE_1) + add_member(inst, 'm2', SUBTREE_1) + add_group(inst, 'g1', SUBTREE_1) + add_group(inst, 'g2', SUBTREE_2) + + # _check_memberof + dn1 = '%s,%s' % ('uid=test_m1', SUBTREE_1) + dn2 = '%s,%s' % ('uid=test_m2', SUBTREE_1) + g1 = '%s,%s' % ('cn=g1', SUBTREE_1) + g2 = '%s,%s' % ('cn=g2', SUBTREE_2) + _find_memberof_ext(inst, dn1, g1, True) + _find_memberof_ext(inst, dn2, g1, True) + _find_memberof_ext(inst, dn1, g2, False) + _find_memberof_ext(inst, dn2, g2, False) + + rename_entry(inst, 'cn=g2', SUBTREE_2, SUBTREE_1) + + g2n = '%s,%s' % ('cn=g2-new', SUBTREE_1) + _find_memberof_ext(inst, dn1, g1, True) + _find_memberof_ext(inst, dn2, g1, True) + _find_memberof_ext(inst, dn1, g2n, True) + _find_memberof_ext(inst, dn2, g2n, True) + + +def _config_memberof_entrycache_on_modrdn_failure(server): + + server.plugins.enable(name=PLUGIN_MEMBER_OF) + peoplebase = 'ou=people,%s' % SUFFIX + MEMBEROF_PLUGIN_DN = ('cn=' + PLUGIN_MEMBER_OF + ',cn=plugins,cn=config') + server.modify_s(MEMBEROF_PLUGIN_DN, [(ldap.MOD_REPLACE, 'memberOfAllBackends', b'on'), + (ldap.MOD_REPLACE, 'memberOfEntryScope', peoplebase.encode()), + (ldap.MOD_REPLACE, 'memberOfAutoAddOC', b'nsMemberOf')]) + + +def _disable_auto_oc_memberof(server): + MEMBEROF_PLUGIN_DN = ('cn=' + PLUGIN_MEMBER_OF + ',cn=plugins,cn=config') + server.modify_s(MEMBEROF_PLUGIN_DN, + [(ldap.MOD_REPLACE, 'memberOfAutoAddOC', b'nsContainer')]) + + +@pytest.mark.ds49967 +def test_entrycache_on_modrdn_failure(topology_st): + """This test checks that when a modrdn fails, the destination entry is not returned by a search + This could happen in case the destination entry remains in the entry cache + + :id: a4d8ac0b-2448-406a-9dc2-5a72851e30b6 + :setup: Standalone Instance + :steps: + 1. configure memberof to only scope ou=people,SUFFIX + 2. Creates 10 users + 3. Create groups0 (in peoplebase) that contain user0 and user1 + 4. Check user0 and user1 have memberof=group0.dn + 5. Create group1 (OUT peoplebase) that contain user0 and user1 + 6. Check user0 and user1 have NOT memberof=group1.dn + 7. Move group1 IN peoplebase and check users0 and user1 HAVE memberof=group1.dn + 8. Create group2 (OUT peoplebase) that contain user2 and user3. Group2 contains a specific description value + 9. Check user2 and user3 have NOT memberof=group2.dn + 10. configure memberof so that added objectclass does not allow 'memberof' attribute + 11. Move group2 IN peoplebase and check move failed OPERATIONS_ERROR (because memberof failed) + 12. Search all groups and check that the group, having the specific description value, + has the original DN of group2.dn + :expectedresults: + 1. should succeed + 2. should succeed + 3. should succeed + 4. should succeed + 5. should succeed + 6. should succeed + 7. should succeed + 8. should succeed + 9. should succeed + 10. should succeed + 11. should fail OPERATION_ERROR because memberof plugin fails to add 'memberof' to members. + 12. should succeed + + """ + + # only scopes peoplebase + _config_memberof_entrycache_on_modrdn_failure(topology_st.standalone) + topology_st.standalone.restart(timeout=10) + + # create 10 users + peoplebase = 'ou=people,%s' % SUFFIX + for i in range(10): + cn = 'user%d' % i + dn = 'cn=%s,%s' % (cn, peoplebase) + log.fatal('Adding user (%s): ' % dn) + topology_st.standalone.add_s(Entry((dn, {'objectclass': ['top', 'person'], + 'sn': 'user_%s' % cn, + 'description': 'add on standalone'}))) + + # Check that members of group0 (in the scope) have 'memberof + group0_dn = 'cn=group_in0,%s' % peoplebase + topology_st.standalone.add_s(Entry((group0_dn, {'objectclass': ['top', 'groupofnames'], + 'member': [ + 'cn=user0,%s' % peoplebase, + 'cn=user1,%s' % peoplebase, + ], + 'description': 'mygroup'}))) + + # Check the those entries have memberof with group0 + for i in range(2): + user_dn = 'cn=user%d,%s' % (i, peoplebase) + ent = topology_st.standalone.getEntry(user_dn, ldap.SCOPE_BASE, "(objectclass=*)", ['memberof']) + assert ent.hasAttr('memberof') + found = False + for val in ent.getValues('memberof'): + topology_st.standalone.log.info("!!!!!!! %s: memberof->%s (vs %s)" % (user_dn, val, group0_dn.encode().lower())) + if val.lower() == group0_dn.encode().lower(): + found = True + break + assert found + + # Create a group1 out of the scope + group1_dn = 'cn=group_out1,%s' % SUFFIX + topology_st.standalone.add_s(Entry((group1_dn, {'objectclass': ['top', 'groupofnames'], + 'member': [ + 'cn=user0,%s' % peoplebase, + 'cn=user1,%s' % peoplebase, + ], + 'description': 'mygroup'}))) + + # Check the those entries have not memberof with group1 + for i in range(2): + user_dn = 'cn=user%d,%s' % (i, peoplebase) + ent = topology_st.standalone.getEntry(user_dn, ldap.SCOPE_BASE, "(objectclass=*)", ['memberof']) + assert ent.hasAttr('memberof') + found = False + for val in ent.getValues('memberof'): + topology_st.standalone.log.info("!!!!!!! %s: memberof->%s (vs %s)" % (user_dn, val, group1_dn.encode().lower())) + if val.lower() == group1_dn.encode().lower(): + found = True + break + assert not found + + # move group1 into the scope and check user0 and user1 are memberof group1 + topology_st.standalone.rename_s(group1_dn, 'cn=group_in1', newsuperior=peoplebase, delold=0) + new_group1_dn = 'cn=group_in1,%s' % peoplebase + for i in range(2): + user_dn = 'cn=user%d,%s' % (i, peoplebase) + ent = topology_st.standalone.getEntry(user_dn, ldap.SCOPE_BASE, "(objectclass=*)", ['memberof']) + assert ent.hasAttr('memberof') + found = False + for val in ent.getValues('memberof'): + topology_st.standalone.log.info("!!!!!!! %s: memberof->%s (vs %s)" % (user_dn, val, new_group1_dn.encode().lower())) + if val.lower() == new_group1_dn.encode().lower(): + found = True + break + assert found + + # Create a group2 out of the scope with a SPECIFIC description value + entry_description = "this is to check that the entry having this description has the appropriate DN" + group2_dn = 'cn=group_out2,%s' % SUFFIX + topology_st.standalone.add_s(Entry((group2_dn, {'objectclass': ['top', 'groupofnames'], + 'member': [ + 'cn=user2,%s' % peoplebase, + 'cn=user3,%s' % peoplebase, + ], + 'description': entry_description}))) + + # Check the those entries have not memberof with group2 + for i in (2, 3): + user_dn = 'cn=user%d,%s' % (i, peoplebase) + ent = topology_st.standalone.getEntry(user_dn, ldap.SCOPE_BASE, "(objectclass=*)", ['memberof']) + assert not ent.hasAttr('memberof') + + # memberof will not add the missing objectclass + _disable_auto_oc_memberof(topology_st.standalone) + topology_st.standalone.restart(timeout=10) + + # move group2 into the scope and check it fails + try: + topology_st.standalone.rename_s(group2_dn, 'cn=group_in2', newsuperior=peoplebase, delold=0) + topology_st.standalone.log.info("This is unexpected, modrdn should fail as the member entry have not the appropriate objectclass") + assert False + except ldap.OBJECT_CLASS_VIOLATION: + pass + + # retrieve the entry having the specific description value + # check that the entry DN is the original group2 DN + ents = topology_st.standalone.search_s(DEFAULT_SUFFIX, ldap.SCOPE_SUBTREE, '(cn=gr*)') + found = False + for ent in ents: + topology_st.standalone.log.info("retrieve: %s with desc=%s" % (ent.dn, ent.getValue('description'))) + if ent.getValue('description') == entry_description.encode(): + found = True + assert ent.dn == group2_dn + assert found + + +def _config_memberof_silent_memberof_failure(server): + _config_memberof_entrycache_on_modrdn_failure(server) + + +def test_silent_memberof_failure(topology_st): + """This test checks that if during a MODRDN, the memberof plugin fails + then MODRDN also fails + + :id: 095aee01-581c-43dd-a241-71f9631a18bb + :setup: Standalone Instance + :steps: + 1. configure memberof to only scope ou=people,SUFFIX + 2. Do some cleanup and Creates 10 users + 3. Create groups0 (IN peoplebase) that contain user0 and user1 + 4. Check user0 and user1 have memberof=group0.dn + 5. Create group1 (OUT peoplebase) that contain user0 and user1 + 6. Check user0 and user1 have NOT memberof=group1.dn + 7. Move group1 IN peoplebase and check users0 and user1 HAVE memberof=group1.dn + 8. Create group2 (OUT peoplebase) that contain user2 and user3. + 9. Check user2 and user3 have NOT memberof=group2.dn + 10. configure memberof so that added objectclass does not allow 'memberof' attribute + 11. Move group2 IN peoplebase and check move failed OPERATIONS_ERROR (because memberof failed) + 12. Check user2 and user3 have NOT memberof=group2.dn + 13. ADD group3 (IN peoplebase) with user4 and user5 members and check add failed OPERATIONS_ERROR (because memberof failed) + 14. Check user4 and user5 have NOT memberof=group2.dn + :expectedresults: + 1. should succeed + 2. should succeed + 3. should succeed + 4. should succeed + 5. should succeed + 6. should succeed + 7. should succeed + 8. should succeed + 9. should succeed + 10. should succeed + 11. should fail OPERATION_ERROR because memberof plugin fails to add 'memberof' to members. + 12. should succeed + 13. should fail OPERATION_ERROR because memberof plugin fails to add 'memberof' to members + 14. should succeed + """ + # only scopes peoplebase + _config_memberof_silent_memberof_failure(topology_st.standalone) + topology_st.standalone.restart(timeout=10) + + # first do some cleanup + peoplebase = 'ou=people,%s' % SUFFIX + for i in range(10): + cn = 'user%d' % i + dn = 'cn=%s,%s' % (cn, peoplebase) + topology_st.standalone.delete_s(dn) + topology_st.standalone.delete_s('cn=group_in0,%s' % peoplebase) + topology_st.standalone.delete_s('cn=group_in1,%s' % peoplebase) + topology_st.standalone.delete_s('cn=group_out2,%s' % SUFFIX) + + # create 10 users + for i in range(10): + cn = 'user%d' % i + dn = 'cn=%s,%s' % (cn, peoplebase) + log.fatal('Adding user (%s): ' % dn) + topology_st.standalone.add_s(Entry((dn, {'objectclass': ['top', 'person'], + 'sn': 'user_%s' % cn, + 'description': 'add on standalone'}))) + + # Check that members of group0 (in the scope) have 'memberof + group0_dn = 'cn=group_in0,%s' % peoplebase + topology_st.standalone.add_s(Entry((group0_dn, {'objectclass': ['top', 'groupofnames'], + 'member': [ + 'cn=user0,%s' % peoplebase, + 'cn=user1,%s' % peoplebase, + ], + 'description': 'mygroup'}))) + + # Check the those entries have memberof with group0 + for i in range(2): + user_dn = 'cn=user%d,%s' % (i, peoplebase) + ent = topology_st.standalone.getEntry(user_dn, ldap.SCOPE_BASE, "(objectclass=*)", ['memberof']) + assert ent.hasAttr('memberof') + found = False + for val in ent.getValues('memberof'): + topology_st.standalone.log.info("!!!!!!! %s: memberof->%s (vs %s)" % (user_dn, val, group0_dn.encode().lower())) + if val.lower() == group0_dn.encode().lower(): + found = True + break + assert found + + # Create a group1 out of the scope + group1_dn = 'cn=group_out1,%s' % SUFFIX + topology_st.standalone.add_s(Entry((group1_dn, {'objectclass': ['top', 'groupofnames'], + 'member': [ + 'cn=user0,%s' % peoplebase, + 'cn=user1,%s' % peoplebase, + ], + 'description': 'mygroup'}))) + + # Check the those entries have not memberof with group1 + for i in range(2): + user_dn = 'cn=user%d,%s' % (i, peoplebase) + ent = topology_st.standalone.getEntry(user_dn, ldap.SCOPE_BASE, "(objectclass=*)", ['memberof']) + assert ent.hasAttr('memberof') + found = False + for val in ent.getValues('memberof'): + topology_st.standalone.log.info("!!!!!!! %s: memberof->%s (vs %s)" % (user_dn, val, group1_dn.encode().lower())) + if val.lower() == group1_dn.encode().lower(): + found = True + break + assert not found + + # move group1 into the scope and check user0 and user1 are memberof group1 + topology_st.standalone.rename_s(group1_dn, 'cn=group_in1', newsuperior=peoplebase, delold=0) + new_group1_dn = 'cn=group_in1,%s' % peoplebase + for i in range(2): + user_dn = 'cn=user%d,%s' % (i, peoplebase) + ent = topology_st.standalone.getEntry(user_dn, ldap.SCOPE_BASE, "(objectclass=*)", ['memberof']) + assert ent.hasAttr('memberof') + found = False + for val in ent.getValues('memberof'): + topology_st.standalone.log.info("!!!!!!! %s: memberof->%s (vs %s)" % (user_dn, val, new_group1_dn.encode().lower())) + if val.lower() == new_group1_dn.encode().lower(): + found = True + break + assert found + + # Create a group2 out of the scope + group2_dn = 'cn=group_out2,%s' % SUFFIX + topology_st.standalone.add_s(Entry((group2_dn, {'objectclass': ['top', 'groupofnames'], + 'member': [ + 'cn=user2,%s' % peoplebase, + 'cn=user3,%s' % peoplebase, + ], + 'description': 'mygroup'}))) + + # Check the those entries have not memberof with group2 + for i in (2, 3): + user_dn = 'cn=user%d,%s' % (i, peoplebase) + ent = topology_st.standalone.getEntry(user_dn, ldap.SCOPE_BASE, "(objectclass=*)", ['memberof']) + assert not ent.hasAttr('memberof') + + # memberof will not add the missing objectclass + _disable_auto_oc_memberof(topology_st.standalone) + topology_st.standalone.restart(timeout=10) + + # move group2 into the scope and check it fails + try: + topology_st.standalone.rename_s(group2_dn, 'cn=group_in2', newsuperior=peoplebase, delold=0) + topology_st.standalone.log.info("This is unexpected, modrdn should fail as the member entry have not the appropriate objectclass") + assert False + except ldap.OBJECT_CLASS_VIOLATION: + pass + + # Check the those entries have not memberof + for i in (2, 3): + user_dn = 'cn=user%d,%s' % (i, peoplebase) + ent = topology_st.standalone.getEntry(user_dn, ldap.SCOPE_BASE, "(objectclass=*)", ['memberof']) + topology_st.standalone.log.info("Should assert %s has memberof is %s" % (user_dn, ent.hasAttr('memberof'))) + assert not ent.hasAttr('memberof') + + # Create a group3 in the scope + group3_dn = 'cn=group3_in,%s' % peoplebase + try: + topology_st.standalone.add_s(Entry((group3_dn, {'objectclass': ['top', 'groupofnames'], + 'member': [ + 'cn=user4,%s' % peoplebase, + 'cn=user5,%s' % peoplebase, + ], + 'description': 'mygroup'}))) + topology_st.standalone.log.info("This is unexpected, ADD should fail as the member entry have not the appropriate objectclass") + assert False + except ldap.OBJECT_CLASS_VIOLATION: + pass + except ldap.OPERATIONS_ERROR: + pass + + # Check the those entries do not have memberof + for i in (4, 5): + user_dn = 'cn=user%d,%s' % (i, peoplebase) + ent = topology_st.standalone.getEntry(user_dn, ldap.SCOPE_BASE, "(objectclass=*)", ['memberof']) + topology_st.standalone.log.info("Should assert %s has memberof is %s" % (user_dn, ent.hasAttr('memberof'))) + assert not ent.hasAttr('memberof') + +if __name__ == '__main__': + # Run isolated + # -s for DEBUG mode + CURRENT_FILE = os.path.realpath(__file__) + pytest.main("-s %s" % CURRENT_FILE) + diff --git a/dirsrvtests/tests/suites/memory_leaks/MMR_double_free_test.py b/dirsrvtests/tests/suites/memory_leaks/MMR_double_free_test.py new file mode 100644 index 0000000..a1724cb --- /dev/null +++ b/dirsrvtests/tests/suites/memory_leaks/MMR_double_free_test.py @@ -0,0 +1,166 @@ +# --- BEGIN COPYRIGHT BLOCK --- +# Copyright (C) 2020 Red Hat, Inc. +# All rights reserved. +# +# License: GPL (version 3 or any later version). +# See LICENSE for details. +# --- END COPYRIGHT BLOCK --- +# +import pytest +from lib389.replica import Replicas, Replica +from lib389.tasks import * +from lib389.utils import * +from lib389.paths import Paths +from lib389.topologies import topology_m2 + +from lib389._constants import (DEFAULT_SUFFIX, DN_CONFIG) +from lib389.properties import (REPLICA_PURGE_DELAY, REPLICA_PURGE_INTERVAL) + +from lib389.idm.user import UserAccounts + +pytestmark = pytest.mark.tier2 + +DEBUGGING = os.getenv("DEBUGGING", default=False) +if DEBUGGING: + logging.getLogger(__name__).setLevel(logging.DEBUG) +else: + logging.getLogger(__name__).setLevel(logging.INFO) +log = logging.getLogger(__name__) + +ds_paths = Paths() + +@pytest.fixture(scope="module") +def topology_setup(topology_m2): + """Configure the topology with purge parameters and enable audit logging + + - configure replica purge delay and interval on supplier1 and supplier2 + - enable audit log on supplier1 and supplier2 + - restart supplier1 and supplier2 + """ + m1 = topology_m2.ms["supplier1"] + m2 = topology_m2.ms["supplier2"] + + replica1 = Replicas(m1).get(DEFAULT_SUFFIX) + replica2 = Replicas(m2).get(DEFAULT_SUFFIX) + + replica1.set('nsDS5ReplicaPurgeDelay','5') + replica2.set('nsDS5ReplicaPurgeDelay','5') + assert replica1.present('nsDS5ReplicaPurgeDelay') + assert replica2.present('nsDS5ReplicaPurgeDelay') + replica1.display_attr('nsDS5ReplicaPurgeDelay') + replica2.display_attr('nsDS5ReplicaPurgeDelay') + + replica1.set('nsDS5ReplicaTombstonePurgeInterval', '5') + replica2.set('nsDS5ReplicaTombstonePurgeInterval', '5') + assert replica1.present('nsDS5ReplicaTombstonePurgeInterval') + assert replica2.present('nsDS5ReplicaTombstonePurgeInterval') + replica1.display_attr('nsDS5ReplicaTombstonePurgeInterval') + replica2.display_attr('nsDS5ReplicaTombstonePurgeInterval') + + + m1.config.set('nsslapd-auditlog-logging-enabled', 'on') + m2.config.set('nsslapd-auditlog-logging-enabled', 'on') + m1.restart() + m2.restart() + + +@pytest.mark.skipif(not ds_paths.asan_enabled, reason="Don't run if ASAN is not enabled") +@pytest.mark.ds48226 +@pytest.mark.bz1243970 +@pytest.mark.bz1262363 +def test_MMR_double_free(topology_m2, topology_setup, timeout=5): + """Reproduce conditions where a double free occurs and check it does not make + the server crash + + :id: 91580b1c-ad10-49bc-8aed-402edac59f46 + :setup: replicated topology - purge delay and purge interval are configured + :steps: + 1. create an entry on supplier1 + 2. modify the entry with description add + 3. check the entry is correctly replicated on supplier2 + 4. stop supplier2 + 5. delete the entry's description on supplier1 + 6. stop supplier1 + 7. start supplier2 + 8. delete the entry's description on supplier2 + 9. add an entry's description on supplier2 + 10. wait the purge delay duration + 11. add again an entry's description on supplier2 + :expectedresults: + 1. entry exists on supplier1 + 2. modification is effective + 3. entry exists on supplier2 and modification is effective + 4. supplier2 is stopped + 5. description is removed from entry on supplier1 + 6. supplier1 is stopped + 7. supplier2 is started - not synchronized with supplier1 + 8. description is removed from entry on supplier2 (same op should be performed too by replication mecanism) + 9. description to entry is added on supplier2 + 10. Purge delay has expired - changes are erased + 11. description to entry is added again on supplier2 + """ + name = 'test_entry' + + entry_m1 = UserAccounts(topology_m2.ms["supplier1"], DEFAULT_SUFFIX) + entry = entry_m1.create(properties={ + 'uid': name, + 'sn': name, + 'cn': name, + 'uidNumber': '1001', + 'gidNumber': '1001', + 'homeDirectory': '/home/test_entry', + 'userPassword': 'test_entry_pwd' + }) + + log.info('First do an update that is replicated') + entry.add('description', '5') + + log.info('Check the update in the replicated entry') + entry_m2 = UserAccounts(topology_m2.ms["supplier2"], DEFAULT_SUFFIX) + + success = 0 + for i in range(0, timeout): + try: + entry_repl = entry_m2.get(name) + out = entry_repl.display_attr('description') + if len(out) > 0: + success = 1 + break + except: + time.sleep(1) + + assert success + + log.info('Stop M2 so that it will not receive the next update') + topology_m2.ms["supplier2"].stop(10) + + log.info('Perform a del operation that is not replicated') + entry.remove('description', '5') + + log.info("Stop M1 so that it will keep del '5' that is unknown from supplier2") + topology_m2.ms["supplier1"].stop(10) + + log.info('start M2 to do the next updates') + topology_m2.ms["supplier2"].start() + + log.info("del 'description' by '5'") + entry_repl.remove('description', '5') + + log.info("add 'description' by '5'") + entry_repl.add('description', '5') + + log.info('sleep of purge delay so that the next update will purge the CSN_7') + time.sleep(6) + + log.info("add 'description' by '6' that purge the state info") + entry_repl.add('description', '6') + + log.info('Restart supplier1') + topology_m2.ms["supplier1"].start(30) + + +if __name__ == '__main__': + # Run isolated + # -s for DEBUG mode + CURRENT_FILE = os.path.realpath(__file__) + pytest.main("-s %s" % CURRENT_FILE) diff --git a/dirsrvtests/tests/suites/memory_leaks/__init__.py b/dirsrvtests/tests/suites/memory_leaks/__init__.py new file mode 100644 index 0000000..c94c077 --- /dev/null +++ b/dirsrvtests/tests/suites/memory_leaks/__init__.py @@ -0,0 +1,3 @@ +""" + :Requirement: 389-ds-base: Test Memory Leaks +""" diff --git a/dirsrvtests/tests/suites/memory_leaks/allids_search_test.py b/dirsrvtests/tests/suites/memory_leaks/allids_search_test.py new file mode 100644 index 0000000..70b185f --- /dev/null +++ b/dirsrvtests/tests/suites/memory_leaks/allids_search_test.py @@ -0,0 +1,70 @@ +# --- BEGIN COPYRIGHT BLOCK --- +# Copyright (C) 2023 Red Hat, Inc. +# All rights reserved. +# +# License: GPL (version 3 or any later version). +# See LICENSE for details. +# --- END COPYRIGHT BLOCK --- +# +import pytest +from lib389.dbgen import dbgen_groups +from lib389.tasks import ImportTask +from lib389.utils import * +from lib389.paths import Paths +from lib389.topologies import topology_st as topo +from lib389._constants import * + +pytestmark = pytest.mark.tier2 + +logging.getLogger(__name__).setLevel(logging.DEBUG) +log = logging.getLogger(__name__) + +ds_paths = Paths() + + +@pytest.mark.skipif(not ds_paths.asan_enabled, reason="Don't run if ASAN is not enabled") +def test_allids_search(topo): + """Add 100 groups, and run a search with a special filter that triggers a memleak. + + :id: 8aeca831-e671-4203-9d50-2bfe9567bec7 + :setup: Standalone instance + :steps: + 1. Add 100 test groups + 2. Issue a search with a special filter + 3. There should be no leak + :expectedresults: + 1. 100 test groups should be added + 2. Search should be successful + 3. Success + """ + + inst = topo.standalone + + import_ldif = inst.ldifdir + '/import_100_users.ldif' + props = { + "name": "grp", + "suffix": DEFAULT_SUFFIX, + "parent": DEFAULT_SUFFIX, + "number": 100, + "numMembers": 0, + "createMembers": False, + "memberParent": DEFAULT_SUFFIX, + "membershipAttr": "member", + } + dbgen_groups(inst, import_ldif, props) + + task = ImportTask(inst) + task.import_suffix_from_ldif(ldiffile=import_ldif, suffix=DEFAULT_SUFFIX) + task.wait() + + inst.search_s(DEFAULT_SUFFIX, ldap.SCOPE_SUBTREE, + '(&(objectClass=groupOfNames)(!(objectClass=nsTombstone))(member=doesnt_exist))') + + +if __name__ == '__main__': + # Run isolated + # -s for DEBUG mode + CURRENT_FILE = os.path.realpath(__file__) + pytest.main("-s %s" % CURRENT_FILE) + + diff --git a/dirsrvtests/tests/suites/memory_leaks/range_search_test.py b/dirsrvtests/tests/suites/memory_leaks/range_search_test.py new file mode 100644 index 0000000..f228ba2 --- /dev/null +++ b/dirsrvtests/tests/suites/memory_leaks/range_search_test.py @@ -0,0 +1,71 @@ +# --- BEGIN COPYRIGHT BLOCK --- +# Copyright (C) 2016 Red Hat, Inc. +# All rights reserved. +# +# License: GPL (version 3 or any later version). +# See LICENSE for details. +# --- END COPYRIGHT BLOCK --- +# +import pytest +from lib389.tasks import * +from lib389.utils import * +from lib389.paths import Paths +from lib389.topologies import topology_st +from lib389._constants import * + +pytestmark = pytest.mark.tier2 + +logging.getLogger(__name__).setLevel(logging.DEBUG) +log = logging.getLogger(__name__) + +ds_paths = Paths() + + +@pytest.mark.skipif(not ds_paths.asan_enabled, reason="Don't run if ASAN is not enabled") +def test_range_search(topology_st): + """Add 100 entries, and run a range search. When we encounter an error + we still need to disable valgrind before exiting + + :id: aadccf78-a2a8-48cc-8769-4764c7966189 + :setup: Standalone instance, Retro changelog file, + Enabled Valgrind if the system doesn't have asan + :steps: + 1. Add 100 test entries + 2. Issue a range search with a changenumber filter + 3. There should be no leak + :expectedresults: + 1. 100 test entries should be added + 2. Search should be successful + 3. Success + """ + + log.info('Running test_range_search...') + topology_st.standalone.plugins.enable(name=PLUGIN_RETRO_CHANGELOG) + topology_st.standalone.restart() + + success = True + + # Add 100 test entries + for idx in range(1, 100): + idx = str(idx) + USER_DN = 'uid=user' + idx + ',' + DEFAULT_SUFFIX + try: + topology_st.standalone.add_s(Entry((USER_DN, {'objectclass': "top extensibleObject".split(), + 'uid': 'user' + idx}))) + except ldap.LDAPError as e: + log.fatal('test_range_search: Failed to add test user ' + USER_DN + ': error ' + e.message['desc']) + success = False + time.sleep(1) + + # Issue range search + assert success + entries = topology_st.standalone.search_s(RETROCL_SUFFIX, ldap.SCOPE_SUBTREE, + '(&(changenumber>=74)(changenumber<=84))') + assert entries + + +if __name__ == '__main__': + # Run isolated + # -s for DEBUG mode + CURRENT_FILE = os.path.realpath(__file__) + pytest.main("-s %s" % CURRENT_FILE) diff --git a/dirsrvtests/tests/suites/migration/__init__.py b/dirsrvtests/tests/suites/migration/__init__.py new file mode 100644 index 0000000..120786b --- /dev/null +++ b/dirsrvtests/tests/suites/migration/__init__.py @@ -0,0 +1,3 @@ +""" + :Requirement: 389-ds-base: DataBase Import +""" \ No newline at end of file diff --git a/dirsrvtests/tests/suites/migration/export_data_test.py b/dirsrvtests/tests/suites/migration/export_data_test.py new file mode 100644 index 0000000..3ad820a --- /dev/null +++ b/dirsrvtests/tests/suites/migration/export_data_test.py @@ -0,0 +1,82 @@ +# --- BEGIN COPYRIGHT BLOCK --- +# Copyright (C) 2019 Red Hat, Inc. +# All rights reserved. +# +# License: GPL (version 3 or any later version). +# See LICENSE for details. +# --- END COPYRIGHT BLOCK --- +# + +import logging +import pytest +import os + +from lib389._constants import * +from lib389.topologies import topology_st +from lib389.idm.user import UserAccounts, TEST_USER_PROPERTIES + +pytestmark = pytest.mark.tier3 + +logging.getLogger(__name__).setLevel(logging.DEBUG) +log = logging.getLogger(__name__) + + +@pytest.mark.skipif(os.getenv('MIGRATION') is None, reason="This test is meant to execute in specific test environment") +def test_export_data_from_source_host(topology_st): + """Prepare export file for migration using a single instance of Directory Server + + :id: 47f97d87-60f7-4f80-a72b-e7daa1de0061 + :setup: Standalone + :steps: + 1. Add a test user with employeeNumber and telephoneNumber + 2. Add a test user with escaped DN + 3. Create export file + 4. Check if values of searched attributes are present in exported file + :expectedresults: + 1. Success + 2. Success + 3. Success + 4. Success + """ + + standalone = topology_st.standalone + output_file = os.path.join(topology_st.standalone.ds_paths.ldif_dir, "migration_export.ldif") + + log.info("Add a test user") + users = UserAccounts(standalone, DEFAULT_SUFFIX) + test_user = users.create(properties=TEST_USER_PROPERTIES) + test_user.add('employeeNumber', '1000') + test_user.add('telephoneNumber', '1234567890') + + assert test_user.present('employeeNumber', value='1000') + assert test_user.present('telephoneNumber', value='1234567890') + + log.info("Creating user with escaped DN") + users.create(properties={ + 'uid': '\\#\\,\\+"\\\\>:\\=\\<\\<\\>\\;/', + 'cn': 'tuser2', + 'sn': 'user', + 'uidNumber': '1000', + 'gidNumber': '2000', + 'homeDirectory': '/home/tuser2', + }) + + log.info("Exporting LDIF offline...") + standalone.stop() + standalone.db2ldif(bename=DEFAULT_BENAME, suffixes=[DEFAULT_SUFFIX], + excludeSuffixes=None, encrypt=None, repl_data=None, outputfile=output_file) + standalone.start() + + log.info("Check that value of attribute is present in the exported file") + with open(output_file, 'r') as ldif_file: + ldif = ldif_file.read() + assert 'employeeNumber: 1000' in ldif + assert 'telephoneNumber: 1234567890' in ldif + assert 'uid: \\#\\,\\+"\\\\>:\\=\\<\\<\\>\\;/' in ldif + + +if __name__ == '__main__': + # Run isolated + # -s for DEBUG mode + CURRENT_FILE = os.path.realpath(__file__) + pytest.main(["-s", CURRENT_FILE]) diff --git a/dirsrvtests/tests/suites/migration/import_data_test.py b/dirsrvtests/tests/suites/migration/import_data_test.py new file mode 100644 index 0000000..0f03051 --- /dev/null +++ b/dirsrvtests/tests/suites/migration/import_data_test.py @@ -0,0 +1,70 @@ +# --- BEGIN COPYRIGHT BLOCK --- +# Copyright (C) 2019 Red Hat, Inc. +# All rights reserved. +# +# License: GPL (version 3 or any later version). +# See LICENSE for details. +# --- END COPYRIGHT BLOCK --- +# + +import logging +import pytest +import os + +from lib389._constants import * +from lib389.topologies import topology_st +from lib389.idm.user import UserAccounts + +pytestmark = pytest.mark.tier3 + +logging.getLogger(__name__).setLevel(logging.DEBUG) +log = logging.getLogger(__name__) + + +@pytest.mark.skipif(os.getenv('MIGRATION') is None, reason="This test is meant to execute in specific test environment") +def test_import_data_to_target_host(topology_st): + """Import file created in export_data_test.py using a single instance of Directory Server + + :id: 7e896b0c-6838-49c7-8e1d-5e8114f5fb02 + :setup: Standalone + :steps: + 1. Check that attribute values are present in input file + 2. Import input file + 3. Check imported user data + :expectedresults: + 1. Success + 2. Success + 3. Success + """ + + standalone = topology_st.standalone + input_file = os.path.join(topology_st.standalone.ds_paths.ldif_dir, "migration_export.ldif") + + log.info("Check that value of attribute is present in the exported file") + with open(input_file, 'r') as ldif_file: + ldif = ldif_file.read() + assert 'employeeNumber: 1000' in ldif + assert 'telephoneNumber: 1234567890' in ldif + assert 'uid: \\#\\,\\+"\\\\>:\\=\\<\\<\\>\\;/' in ldif + + log.info('Stopping the server and running offline import...') + standalone.stop() + assert standalone.ldif2db(bename=DEFAULT_BENAME, suffixes=[DEFAULT_SUFFIX], encrypt=None, excludeSuffixes=None, + import_file=input_file) + standalone.start() + + log.info("Check imported user data") + users = UserAccounts(standalone, DEFAULT_SUFFIX) + test_user = users.get('testuser') + assert test_user.present('employeeNumber', value='1000') + assert test_user.present('telephoneNumber', value='1234567890') + test_user = users.get('\\#\\,\\+"\\\\>:\\=\\<\\<\\>\\;/') + assert test_user.present('cn', value='tuser2') + assert test_user.present('uid', value='\\#\\,\\+"\\\\>:\\=\\<\\<\\>\\;/') + + +if __name__ == '__main__': + # Run isolated + # -s for DEBUG mode + CURRENT_FILE = os.path.realpath(__file__) + pytest.main(["-s", CURRENT_FILE]) diff --git a/dirsrvtests/tests/suites/monitor/__init__.py b/dirsrvtests/tests/suites/monitor/__init__.py new file mode 100644 index 0000000..080d1ac --- /dev/null +++ b/dirsrvtests/tests/suites/monitor/__init__.py @@ -0,0 +1,3 @@ +""" + :Requirement: 389-ds-base: Status - Performance Monitor +""" diff --git a/dirsrvtests/tests/suites/monitor/db_locks_monitor_test.py b/dirsrvtests/tests/suites/monitor/db_locks_monitor_test.py new file mode 100644 index 0000000..033ee1a --- /dev/null +++ b/dirsrvtests/tests/suites/monitor/db_locks_monitor_test.py @@ -0,0 +1,322 @@ +# --- BEGIN COPYRIGHT BLOCK --- +# Copyright (C) 2021 Red Hat, Inc. +# All rights reserved. +# +# License: GPL (version 3 or any later version). +# See LICENSE for details. +# --- END COPYRIGHT BLOCK --- +# +import logging +import pytest +import datetime +import subprocess +from multiprocessing import Process, Queue +from lib389 import pid_from_file +from lib389.utils import ldap, os, ds_is_older, get_default_db_lib +from lib389._constants import DEFAULT_SUFFIX, ReplicaRole +from lib389.cli_base import LogCapture +from lib389.idm.user import UserAccounts +from lib389.idm.organizationalunit import OrganizationalUnits +from lib389.tasks import AccessLog +from lib389.backend import Backends +from lib389.ldclt import Ldclt +from lib389.dbgen import dbgen_users +from lib389.tasks import ImportTask +from lib389.index import Indexes +from lib389.plugins import AttributeUniquenessPlugin +from lib389.config import BDB_LDBMConfig +from lib389.monitor import MonitorLDBM +from lib389.topologies import create_topology, _remove_ssca_db + +pytestmark = pytest.mark.tier2 +db_locks_monitoring_ack = pytest.mark.skipif(not os.environ.get('DB_LOCKS_MONITORING_ACK', False), + reason="DB locks monitoring tests may take hours if the feature is not present or another failure exists. " + "Also, the feature requires a big amount of space as we set nsslapd-db-locks to 1300000.") + +DEBUGGING = os.getenv('DEBUGGING', default=False) +if DEBUGGING: + logging.getLogger(__name__).setLevel(logging.DEBUG) +else: + logging.getLogger(__name__).setLevel(logging.INFO) +log = logging.getLogger(__name__) + + +def _kill_ns_slapd(inst): + pid = str(pid_from_file(inst.ds_paths.pid_file)) + cmd = ['kill', '-9', pid] + subprocess.Popen(cmd, stdout=subprocess.PIPE) + + +@pytest.fixture(scope="function") +def topology_st_fn(request): + """Create DS standalone instance for each test case""" + + topology = create_topology({ReplicaRole.STANDALONE: 1}) + + def fin(): + # Kill the hanging process at the end of test to prevent failures in the following tests + if DEBUGGING: + [_kill_ns_slapd(inst) for inst in topology] + else: + [_kill_ns_slapd(inst) for inst in topology] + assert _remove_ssca_db(topology) + [inst.stop() for inst in topology if inst.exists()] + [inst.delete() for inst in topology if inst.exists()] + request.addfinalizer(fin) + + topology.logcap = LogCapture() + return topology + + +@pytest.fixture(scope="function") +def setup_attruniq_index_be_import(topology_st_fn): + """Enable Attribute Uniqueness, disable indexes and + import 120000 entries to the default backend + """ + inst = topology_st_fn.standalone + + inst.config.loglevel([AccessLog.DEFAULT, AccessLog.INTERNAL], service='access') + inst.config.set('nsslapd-plugin-logging', 'on') + inst.restart() + + attruniq = AttributeUniquenessPlugin(inst, dn="cn=attruniq,cn=plugins,cn=config") + attruniq.create(properties={'cn': 'attruniq'}) + for cn in ['uid', 'cn', 'sn', 'uidNumber', 'gidNumber', 'homeDirectory', 'givenName', 'description']: + attruniq.add_unique_attribute(cn) + attruniq.add_unique_subtree(DEFAULT_SUFFIX) + attruniq.enable_all_subtrees() + attruniq.enable() + + indexes = Indexes(inst) + for cn in ['uid', 'cn', 'sn', 'uidNumber', 'gidNumber', 'homeDirectory', 'givenName', 'description']: + indexes.ensure_state(properties={ + 'cn': cn, + 'nsSystemIndex': 'false', + 'nsIndexType': 'none'}) + + bdb_config = BDB_LDBMConfig(inst) + bdb_config.replace("nsslapd-db-locks", "130000") + inst.restart() + + ldif_dir = inst.get_ldif_dir() + import_ldif = ldif_dir + '/perf_import.ldif' + + # Valid online import + import_task = ImportTask(inst) + dbgen_users(inst, 120000, import_ldif, DEFAULT_SUFFIX, entry_name="userNew") + import_task.import_suffix_from_ldif(ldiffile=import_ldif, suffix=DEFAULT_SUFFIX) + import_task.wait() + assert import_task.is_complete() + + +def create_user_wrapper(q, users): + try: + users.create_test_user() + except Exception as ex: + q.put(ex) + + +def spawn_worker_thread(function, users, log, timeout, info): + log.info(f"Starting the thread - {info}") + q = Queue() + p = Process(target=function, args=(q,users,)) + p.start() + + log.info(f"Waiting for {timeout} seconds for the thread to finish") + p.join(timeout) + + if p.is_alive(): + log.info("Killing the thread as it's still running") + p.terminate() + p.join() + raise RuntimeError(f"Function call was aborted: {info}") + result = q.get() + if isinstance(result, Exception): + raise result + else: + return result + + +@db_locks_monitoring_ack +@pytest.mark.parametrize("lock_threshold", [("70"), ("80"), ("95")]) +def test_exhaust_db_locks_basic(topology_st_fn, setup_attruniq_index_be_import, lock_threshold): + """Test that when all of the locks are exhausted the instance still working + and database is not corrupted + + :id: 299108cc-04d8-4ddc-b58e-99157fccd643 + :customerscenario: True + :parametrized: yes + :setup: Standalone instance with Attr Uniq plugin and user indexes disabled + :steps: 1. Set nsslapd-db-locks to 11000 + 2. Check that we stop acquiring new locks when the threshold is reached + 3. Check that we can regulate a pause interval for DB locks monitoring thread + 4. Make sure the feature works for different backends on the same suffix + :expectedresults: + 1. Success + 2. Success + 3. Success + 4. Success + """ + + inst = topology_st_fn.standalone + ADDITIONAL_SUFFIX = 'ou=newpeople,dc=example,dc=com' + + backends = Backends(inst) + backends.create(properties={'nsslapd-suffix': ADDITIONAL_SUFFIX, + 'name': ADDITIONAL_SUFFIX[-3:]}) + ous = OrganizationalUnits(inst, DEFAULT_SUFFIX) + ous.create(properties={'ou': 'newpeople'}) + + bdb_config = BDB_LDBMConfig(inst) + bdb_config.replace("nsslapd-db-locks", "11000") + + # Restart server + inst.restart() + + for lock_enabled in ["on", "off"]: + for lock_pause in ["100", "500", "1000"]: + bdb_config.replace("nsslapd-db-locks-monitoring-enabled", lock_enabled) + bdb_config.replace("nsslapd-db-locks-monitoring-threshold", lock_threshold) + bdb_config.replace("nsslapd-db-locks-monitoring-pause", lock_pause) + inst.restart() + + if lock_enabled == "off": + raised_exception = (RuntimeError, ldap.SERVER_DOWN) + else: + raised_exception = ldap.OPERATIONS_ERROR + + users = UserAccounts(inst, DEFAULT_SUFFIX) + with pytest.raises(raised_exception): + spawn_worker_thread(create_user_wrapper, users, log, 30, + f"Adding user with monitoring enabled='{lock_enabled}'; " + f"threshold='{lock_threshold}'; pause='{lock_pause}'.") + # Restart because we already run out of locks and the next unindexed searches will fail eventually + if lock_enabled == "off": + _kill_ns_slapd(inst) + inst.restart() + + users = UserAccounts(inst, ADDITIONAL_SUFFIX, rdn=None) + with pytest.raises(raised_exception): + spawn_worker_thread(create_user_wrapper, users, log, 30, + f"Adding user with monitoring enabled='{lock_enabled}'; " + f"threshold='{lock_threshold}'; pause='{lock_pause}'.") + # In case feature is disabled - restart for the clean up + if lock_enabled == "off": + _kill_ns_slapd(inst) + inst.restart() + + +@db_locks_monitoring_ack +@pytest.mark.skipif(get_default_db_lib() == "mdb", reason="Not supported over mdb") +def test_exhaust_db_locks_big_pause(topology_st_fn, setup_attruniq_index_be_import): + """Test that DB lock pause setting increases the wait interval value for the monitoring thread + + :id: 7d5bf838-5d4e-4ad5-8c03-5716afb84ea6 + :customerscenario: True + :setup: Standalone instance with Attr Uniq plugin and user indexes disabled + :steps: 1. Set nsslapd-db-locks to 20000 while using the default threshold value (95%) + 2. Set nsslapd-db-locks-monitoring-pause to 10000 (10 seconds) + 3. Make sure that the pause is successfully increased a few times in a row + :expectedresults: + 1. Success + 2. Success + 3. Success + """ + + inst = topology_st_fn.standalone + + bdb_config = BDB_LDBMConfig(inst) + bdb_config.replace("nsslapd-db-locks", "20000") + lock_pause = bdb_config.get_attr_val_int("nsslapd-db-locks-monitoring-pause") + assert lock_pause == 500 + lock_pause = "10000" + bdb_config.replace("nsslapd-db-locks-monitoring-pause", lock_pause) + + # Restart server + inst.restart() + + lock_enabled = bdb_config.get_attr_val_utf8_l("nsslapd-db-locks-monitoring-enabled") + lock_threshold = bdb_config.get_attr_val_int("nsslapd-db-locks-monitoring-threshold") + assert lock_enabled == "on" + assert lock_threshold == 90 + + users = UserAccounts(inst, DEFAULT_SUFFIX) + start = datetime.datetime.now() + with pytest.raises(ldap.OPERATIONS_ERROR): + spawn_worker_thread(create_user_wrapper, users, log, 30, + f"Adding user with monitoring enabled='{lock_enabled}'; " + f"threshold='{lock_threshold}'; pause='{lock_pause}'. Expect it to 'Work'") + end = datetime.datetime.now() + time_delta = end - start + if time_delta.seconds < 9: + raise RuntimeError("nsslapd-db-locks-monitoring-pause attribute doesn't function correctly. " + f"Finished the execution in {time_delta.seconds} seconds") + # In case something has failed - restart for the clean up + inst.restart() + + +@pytest.mark.ds4623 +@pytest.mark.bz1812286 +@pytest.mark.skipif(ds_is_older("1.4.3.23"), reason="Not implemented") +@pytest.mark.skipif(get_default_db_lib() == "mdb", reason="Not supported over mdb") +@pytest.mark.parametrize("invalid_value", [("0"), ("1"), ("42"), ("68"), ("69"), ("96"), ("120")]) +def test_invalid_threshold_range(topology_st_fn, invalid_value): + """Test that setting nsslapd-db-locks-monitoring-threshold to 60 % is rejected + + :id: e4551de1-8582-4c13-b59d-3d5ec4701457 + :customerscenario: True + :parametrized: yes + :setup: Standalone instance + :steps: 1. Set nsslapd-db-locks-monitoring-threshold to 60 % + 2. Check if exception message contains info about invalid value range + :expectedresults: + 1. Exception is raised + 2. Success + """ + + inst = topology_st_fn.standalone + bdb_config = BDB_LDBMConfig(inst) + msg = 'threshold is indicated as a percentage and it must lie in range of 70 and 95' + + try: + bdb_config.replace("nsslapd-db-locks-monitoring-threshold", invalid_value) + except ldap.OPERATIONS_ERROR as e: + log.info('Got expected error: {}'.format(str(e))) + assert msg in str(e) + + +@pytest.mark.ds4623 +@pytest.mark.bz1812286 +@pytest.mark.skipif(ds_is_older("1.4.3.23"), reason="Not implemented") +@pytest.mark.skipif(get_default_db_lib() == "mdb", reason="Not supported over mdb") +@pytest.mark.parametrize("locks_invalid", [("0"), ("1"), ("9999"), ("10000")]) +def test_invalid_db_locks_value(topology_st_fn, locks_invalid): + """Test that setting nsslapd-db-locks to 0 is rejected + + :id: bbb40279-d622-4f36-a129-c54f963f494a + :customerscenario: True + :parametrized: yes + :setup: Standalone instance + :steps: 1. Set nsslapd-db-locks to 0 + 2. Check if exception message contains info about invalid value + :expectedresults: + 1. Exception is raised + 2. Success + """ + + inst = topology_st_fn.standalone + bdb_config = BDB_LDBMConfig(inst) + msg = 'Invalid value for nsslapd-db-locks ({}). Must be greater than 10000'.format(locks_invalid) + + try: + bdb_config.replace("nsslapd-db-locks", locks_invalid) + except ldap.UNWILLING_TO_PERFORM as e: + log.info('Got expected error: {}'.format(str(e))) + assert msg in str(e) + + +if __name__ == '__main__': + # Run isolated + # -s for DEBUG mode + CURRENT_FILE = os.path.realpath(__file__) + pytest.main(["-s", CURRENT_FILE]) diff --git a/dirsrvtests/tests/suites/monitor/monitor_test.py b/dirsrvtests/tests/suites/monitor/monitor_test.py new file mode 100644 index 0000000..0f9d551 --- /dev/null +++ b/dirsrvtests/tests/suites/monitor/monitor_test.py @@ -0,0 +1,189 @@ +# --- BEGIN COPYRIGHT BLOCK --- +# Copyright (C) 2022 Red Hat, Inc. +# All rights reserved. +# +# License: GPL (version 3 or any later version). +# See LICENSE for details. +# --- END COPYRIGHT BLOCK --- +# +import logging +import pytest +import os +from lib389.monitor import * +from lib389.backend import Backends, DatabaseConfig +from lib389._constants import * +from lib389.topologies import topology_st as topo +from lib389._mapped_object import DSLdapObjects + +pytestmark = pytest.mark.tier1 + +DEBUGGING = os.getenv("DEBUGGING", default=False) +if DEBUGGING: + logging.getLogger(__name__).setLevel(logging.DEBUG) +else: + logging.getLogger(__name__).setLevel(logging.INFO) +log = logging.getLogger(__name__) + + +def test_monitor(topo): + """This test is to display monitor attributes to check the performace + + :id: f7c8a815-07cf-4e67-9574-d26a0937d3db + :setup: Single instance + :steps: + 1. Get the cn=monitor connections attributes + 2. Print connections attributes + 3. Get the cn=monitor version + 4. Print cn=monitor version + 5. Get the cn=monitor threads attributes + 6. Print cn=monitor threads attributes + 7. Get cn=monitor backends attributes + 8. Print cn=monitor backends attributes + 9. Get cn=monitor operations attributes + 10. Print cn=monitor operations attributes + 11. Get cn=monitor statistics attributes + 12. Print cn=monitor statistics attributes + :expectedresults: + 1. cn=monitor attributes should be fetched and printed successfully. + """ + + #define the monitor object from Monitor class in lib389 + monitor = Monitor(topo.standalone) + + #get monitor connections + connections = monitor.get_connections() + log.info('connection: {0[0]}, currentconnections: {0[1]}, totalconnections: {0[2]}'.format(connections)) + + #get monitor version + version = monitor.get_version() + log.info('version :: %s' %version) + + #get monitor threads + threads = monitor.get_threads() + log.info('threads: {0[0]},currentconnectionsatmaxthreads: {0[1]},maxthreadsperconnhits: {0[2]}'.format(threads)) + + #get monitor backends + backend = monitor.get_backends() + log.info('nbackends: {0[0]}, backendmonitordn: {0[1]}'.format(backend)) + + #get monitor operations + operations = monitor.get_operations() + log.info('opsinitiated: {0[0]}, opscompleted: {0[1]}'.format(operations)) + + #get monitor stats + stats = monitor.get_statistics() + log.info('dtablesize: {0[0]},readwaiters: {0[1]},entriessent: {0[2]},bytessent: {0[3]},currenttime: {0[4]},starttime: {0[5]}'.format(stats)) + + +def test_monitor_ldbm(topo): + """This test is to check if we are getting the correct monitor entry + + :id: e62ba369-32f5-4b03-8865-f597a5bb6a70 + :setup: Single instance + :steps: + 1. Get the backend library (bdb, ldbm, etc) + 2. Get the database monitor + 3. Check for expected attributes in output + 4. Check for expected DB library specific attributes + :expectedresults: + 1. Success + 2. Success + 3. Success + 4. Success + """ + + # Are we using BDB? + db_config = DatabaseConfig(topo.standalone) + db_lib = db_config.get_db_lib() + + # Get the database monitor entry + monitor = MonitorLDBM(topo.standalone).get_status() + + # Check that known attributes exist (only NDN cache stats) + assert 'normalizeddncachehits' in monitor + + # Check for library specific attributes + if db_lib == 'bdb': + assert 'dbcachehits' in monitor + assert 'nsslapd-db-configured-locks' in monitor + elif db_lib == 'mdb': + pass + else: + # Unknown - the server would probably fail to start but check it anyway + log.fatal(f'Unknown backend library: {db_lib}') + assert False + + +def test_monitor_backend(topo): + """This test is to check if we are getting the correct backend monitor entry + + :id: 27b0534f-a18c-4c95-aa2b-936bc1886a7b + :setup: Single instance + :steps: + 1. Get the backend library (bdb, ldbm, etc) + 2. Get the backend monitor + 3. Check for expected attributes in output + 4. Check for expected DB library specific attributes + :expectedresults: + 1. Success + 2. Success + 3. Success + 4. Success + """ + + # Are we using BDB? + db_lib = topo.standalone.get_db_lib() + + # Get the backend monitor + be = Backends(topo.standalone).list()[0] + monitor = be.get_monitor().get_status() + + # Check for expected attributes + assert 'entrycachehits' in monitor + + # Check for library specific attributes + if db_lib == 'bdb': + assert 'dncachehits' in monitor + assert 'dbfilename-0' in monitor + elif db_lib == 'mdb': + assert 'dbiname-1' in monitor + pass + else: + # Unknown - the server would probably fail to start but check it anyway + log.fatal(f'Unknown backend library: {db_lib}') + assert False + + +@pytest.mark.bz1843550 +@pytest.mark.ds4153 +@pytest.mark.bz1903539 +@pytest.mark.ds4528 +def test_num_subordinates_with_monitor_suffix(topo): + """This test is to compare the numSubordinates value on the root entry with the actual number of direct subordinate(s). + + :id: fdcfe0ac-33c3-4252-bf38-79819ec58a51 + :setup: Single instance + :steps: + 1. Create sample entries and perform a search with basedn as cn=monitor, filter as "(objectclass=*)" and scope as base. + 2. Extract the numSubordinates value. + 3. Perform another search with basedn as cn=monitor, filter as "(\|(objectclass=*)(objectclass=ldapsubentry))" and scope as one. + 4. Compare numSubordinates value with the number of sub-entries. + :expectedresults: + 1. Success + 2. Success + 3. Success + 4. Should be same + """ + + raw_objects = DSLdapObjects(topo.standalone, basedn='cn=monitor') + filter1 = raw_objects.filter("(objectclass=*)", scope=0) + num_subordinates_val = filter1[0].get_attr_val_int('numSubordinates') + filter2 = raw_objects.filter("(|(objectclass=*)(objectclass=ldapsubentry))",scope=1) + assert len(filter2) == num_subordinates_val + + +if __name__ == '__main__': + # Run isolated + # -s for DEBUG mode + CURRENT_FILE = os.path.realpath(__file__) + pytest.main("-s %s" % CURRENT_FILE) diff --git a/dirsrvtests/tests/suites/openldap_2_389/__init__.py b/dirsrvtests/tests/suites/openldap_2_389/__init__.py new file mode 100644 index 0000000..3850ac1 --- /dev/null +++ b/dirsrvtests/tests/suites/openldap_2_389/__init__.py @@ -0,0 +1,3 @@ +""" + :Requirement: 389-ds-base: Test OpenLDAP +""" diff --git a/dirsrvtests/tests/suites/openldap_2_389/migrate_hdb_test.py b/dirsrvtests/tests/suites/openldap_2_389/migrate_hdb_test.py new file mode 100644 index 0000000..9559789 --- /dev/null +++ b/dirsrvtests/tests/suites/openldap_2_389/migrate_hdb_test.py @@ -0,0 +1,49 @@ +# --- BEGIN COPYRIGHT BLOCK --- +# Copyright (C) 2021 William Brown +# All rights reserved. +# +# License: GPL (version 3 or any later version). +# See LICENSE for details. +# --- END COPYRIGHT BLOCK --- +# +import pytest +import os +from lib389.topologies import topology_st +from lib389.password_plugins import PBKDF2Plugin +from lib389.utils import ds_is_older +from lib389.migrate.openldap.config import olConfig +from lib389.migrate.openldap.config import olOverlayType +from lib389.migrate.plan import Migration + +pytestmark = pytest.mark.tier1 + +DATADIR1 = os.path.join(os.path.dirname(__file__), '../../data/openldap_2_389/4539/') + +@pytest.mark.skipif(ds_is_older('1.4.3'), reason="Not implemented") +def test_migrate_openldap_hdb(topology_st): + """Attempt a migration with HDB and no overlay configuration folder. + + :id: 377dbdee-7138-47d9-a518-9e0b0f4d8622 + :setup: Data directory with an openldap config with HDB database. + :steps: + 1. Parse the configuration + 2. Execute a full migration plan + + :expectedresults: + 1. Success + 2. Success + """ + + inst = topology_st.standalone + config_path = os.path.join(DATADIR1, 'slapd.d') + config = olConfig(config_path) + ldifs = {} + + migration = Migration(inst, config.schema, config.databases, ldifs) + + print("==== migration plan ====") + print(migration.__unicode__()) + print("==== end migration plan ====") + + migration.execute_plan() + # End test, should suceed with no exceptions. diff --git a/dirsrvtests/tests/suites/openldap_2_389/migrate_memberof_test.py b/dirsrvtests/tests/suites/openldap_2_389/migrate_memberof_test.py new file mode 100644 index 0000000..4092bb3 --- /dev/null +++ b/dirsrvtests/tests/suites/openldap_2_389/migrate_memberof_test.py @@ -0,0 +1,64 @@ +# --- BEGIN COPYRIGHT BLOCK --- +# Copyright (C) 2022 William Brown +# All rights reserved. +# +# License: GPL (version 3 or any later version). +# See LICENSE for details. +# --- END COPYRIGHT BLOCK --- +# +import pytest +import os +from lib389.topologies import topology_st +from lib389.password_plugins import PBKDF2Plugin +from lib389.utils import ds_is_older +from lib389.migrate.openldap.config import olConfig +from lib389.migrate.openldap.config import olOverlayType +from lib389.migrate.plan import Migration +from lib389.plugins import MemberOfPlugin + +pytestmark = pytest.mark.tier1 + +DATADIR1 = os.path.join(os.path.dirname(__file__), '../../data/openldap_2_389/memberof/') + +@pytest.mark.skipif(ds_is_older('1.4.3'), reason="Not implemented") +def test_migrate_openldap_memberof(topology_st): + """Attempt a migration with memberof configured, and ensure it migrates + + :id: f59f4c6a-7c85-40d1-91ee-dccbc0bd7ef8 + :setup: Data directory with an openldap config with memberof + :steps: + 1. Parse the configuration + 2. Execute a full migration plan + 3. Assert memberof was configured + + :expectedresults: + 1. Success + 2. Success + 3. Success + """ + + inst = topology_st.standalone + config_path = os.path.join(DATADIR1, 'slapd.d') + config = olConfig(config_path) + + for overlay in config.databases[0].overlays: + print("==================================================") + print("%s" % overlay.otype) + print("==================================================") + assert overlay.otype != olOverlayType.UNKNOWN + + ldifs = {} + + migration = Migration(inst, config.schema, config.databases, ldifs) + + print("==== migration plan ====") + print(migration.__unicode__()) + print("==== end migration plan ====") + + migration.execute_plan() + # End test, should suceed with no exceptions. + + memberof = MemberOfPlugin(inst) + assert memberof.status() + + diff --git a/dirsrvtests/tests/suites/openldap_2_389/migrate_monitor_test.py b/dirsrvtests/tests/suites/openldap_2_389/migrate_monitor_test.py new file mode 100644 index 0000000..bf056f0 --- /dev/null +++ b/dirsrvtests/tests/suites/openldap_2_389/migrate_monitor_test.py @@ -0,0 +1,57 @@ +# --- BEGIN COPYRIGHT BLOCK --- +# Copyright (C) 2022 William Brown +# All rights reserved. +# +# License: GPL (version 3 or any later version). +# See LICENSE for details. +# --- END COPYRIGHT BLOCK --- +# +import pytest +import os +from lib389.topologies import topology_st +from lib389.password_plugins import PBKDF2Plugin +from lib389.utils import ds_is_older +from lib389.migrate.openldap.config import olConfig +from lib389.migrate.openldap.config import olOverlayType +from lib389.migrate.plan import Migration +from lib389.plugins import MemberOfPlugin + +pytestmark = pytest.mark.tier1 + +DATADIR1 = os.path.join(os.path.dirname(__file__), '../../data/openldap_2_389/5323/') + +@pytest.mark.skipif(ds_is_older('1.4.3'), reason="Not implemented") +def test_migrate_openldap_monitor(topology_st): + """Attempt a migration with a monitor database configured. + + :id: 3bf7a7e8-7482-49ee-bc3c-e5a174463844 + :setup: Data directory with an openldap config with monitor db + :steps: + 1. Parse the configuration + 2. Execute a full migration plan + 3. Assert monitor was skipped + + :expectedresults: + 1. Success + 2. Success + 3. Success + """ + + inst = topology_st.standalone + config_path = os.path.join(DATADIR1, 'slapd.d') + config = olConfig(config_path) + + assert len(config.databases) == 1 + + ldifs = {} + + migration = Migration(inst, config.schema, config.databases, ldifs) + + print("==== migration plan ====") + print(migration.__unicode__()) + print("==== end migration plan ====") + + migration.execute_plan() + # End test, should suceed with no exceptions. + + diff --git a/dirsrvtests/tests/suites/openldap_2_389/migrate_test.py b/dirsrvtests/tests/suites/openldap_2_389/migrate_test.py new file mode 100644 index 0000000..492c94f --- /dev/null +++ b/dirsrvtests/tests/suites/openldap_2_389/migrate_test.py @@ -0,0 +1,154 @@ +# --- BEGIN COPYRIGHT BLOCK --- +# Copyright (C) 2020 William Brown +# All rights reserved. +# +# License: GPL (version 3 or any later version). +# See LICENSE for details. +# --- END COPYRIGHT BLOCK --- +# +import pytest +import os +from lib389.topologies import topology_st +from lib389.password_plugins import PBKDF2Plugin +from lib389.utils import ds_is_older +from lib389.migrate.openldap.config import olConfig +from lib389.migrate.openldap.config import olOverlayType +from lib389.migrate.plan import Migration +# from lib389.migrate.plan import * + +pytestmark = pytest.mark.tier1 + +DATADIR1 = os.path.join(os.path.dirname(__file__), '../../data/openldap_2_389/1/') + +@pytest.mark.skipif(ds_is_older('1.4.3'), reason="Not implemented") +def test_parse_openldap_slapdd(): + """Test parsing an example openldap configuration. We should be able to + at least determine the backends, what overlays they have, and some other + minimal amount. + + :id: b0061ab0-fff4-45c6-b6c6-171ca3d2dfbc + :setup: Data directory with an openldap config directory. + :steps: + 1. Parse the openldap configuration + + :expectedresults: + 1. Success + """ + config_path = os.path.join(DATADIR1, 'slapd.d') + config = olConfig(config_path) + + # Do we have databases? + assert len(config.databases) == 2 + # Check that we unpacked uid eq,pres,sub correctly. + assert len(config.databases[0].index) == 4 + assert ('objectClass', 'eq') in config.databases[0].index + assert ('uid', 'eq') in config.databases[0].index + assert ('uid', 'pres') in config.databases[0].index + assert ('uid', 'sub') in config.databases[0].index + + # Did our schema parse? + assert any(['suseModuleConfiguration' in x.names for x in config.schema.classes]) + + +@pytest.mark.skipif(ds_is_older('1.4.3'), reason="Not implemented") +def test_migrate_openldap_slapdd(topology_st): + """ + + :id: e9748040-90a0-4d69-bdde-007104f75cc5 + :setup: Data directory with an openldap config directory. + :steps: + 1. Parse the configuration + 2. Execute a full migration plan + + :expectedresults: + 1. Success + 2. Success + """ + + inst = topology_st.standalone + config_path = os.path.join(DATADIR1, 'slapd.d') + config = olConfig(config_path) + ldifs = { + "dc=example,dc=com": os.path.join(DATADIR1, 'example_com.slapcat.ldif'), + "dc=example,dc=net": os.path.join(DATADIR1, 'example_net.slapcat.ldif'), + } + + migration = Migration(inst, config.schema, config.databases, ldifs) + + print("==== migration plan ====") + print(migration.__unicode__()) + print("==== end migration plan ====") + + migration.execute_plan() + + # Check the BE's are there + # Check plugins + # Check the schema + # Check a user can bind + + +@pytest.mark.skipif(ds_is_older('1.4.3'), reason="Not implemented") +def test_migrate_openldap_slapdd_skip_elements(topology_st): + """ + + :id: d5e16aeb-6810-423b-b5e0-f89e0596292e + :setup: Data directory with an openldap config directory. + :steps: + 1. Parse the configuration + 2. Execute a migration with skipped elements + + :expectedresults: + 1. Success + 2. Success + """ + + inst = topology_st.standalone + config_path = os.path.join(DATADIR1, 'slapd.d') + config = olConfig(config_path) + ldifs = { + "dc=example,dc=com": os.path.join(DATADIR1, 'example_com.slapcat.ldif'), + } + + # 1.3.6.1.4.1.5322.13.1.1 is namedObject, so check that isn't there + + migration = Migration(inst, config.schema, config.databases, ldifs, + skip_schema_oids=['1.3.6.1.4.1.5322.13.1.1'], + skip_overlays=[olOverlayType.UNIQUE], + ) + + print("==== migration plan ====") + print(migration.__unicode__()) + print("==== end migration plan ====") + + migration.execute_plan() + + # Check that the overlay ISNT there + # Check the schema that SHOULDNT be there. + + + + + +# # how to convert the config +# +# # How to slapcat +# +# openldap_2_389 --config /etc/openldap/slapd.d --ldif "path" +# +# +# --confirm +# --ignore-overlay=X +# --ignore-schema-oid=X +# --no-overlays +# --no-passwords +# --no-schema +# --no-indexes +# +# +# +# +# Add skip overlay +# Add password Strip +# check userPasswords + + diff --git a/dirsrvtests/tests/suites/openldap_2_389/password_migrate_test.py b/dirsrvtests/tests/suites/openldap_2_389/password_migrate_test.py new file mode 100644 index 0000000..8066457 --- /dev/null +++ b/dirsrvtests/tests/suites/openldap_2_389/password_migrate_test.py @@ -0,0 +1,73 @@ +# --- BEGIN COPYRIGHT BLOCK --- +# Copyright (C) 2020 William Brown +# All rights reserved. +# +# License: GPL (version 3 or any later version). +# See LICENSE for details. +# --- END COPYRIGHT BLOCK --- +# +import pytest +import os +from lib389.topologies import topology_st +from lib389.utils import ds_is_older +from lib389.idm.user import nsUserAccounts +from lib389._constants import DEFAULT_SUFFIX + +pytestmark = pytest.mark.tier1 + +@pytest.mark.skipif(ds_is_older('1.4.3'), reason="Not implemented") +def test_migrate_openldap_password_hash(topology_st): + """Test import of an openldap password value into the directory and assert + it can bind. + + :id: e4898e0d-5d18-4765-9249-84bcbf862fde + :setup: Standalone Instance + :steps: + 1. Import a hash + 2. Attempt a bind + 3. Goto 1 + + :expectedresults: + 1. Success + 2. Success + 3. Success + """ + inst = topology_st.standalone + inst.config.set('nsslapd-allow-hashed-passwords', 'on') + + # You generate these with: + # slappasswd -s password -o module-load=/usr/lib64/openldap/pw-argon2.so -h {ARGON2} + pwds = [ + '{CRYPT}ZZKRwXSu3tt8s', + '{SSHA}jdALDtX0+MVMkRsX0ilHz0O6Uos95D4s', + '{MD5}X03MO1qnZdYdgyfeuILPmQ==', + '{SMD5}RnexgcsjdBHMQ1yhB7+sD+a+qDI=', + '{SHA}W6ph5Mm5Pz8GgiULbPgzG37mj9g=', + '{SHA256}XohImNooBHFR0OVvjcYpJ3NgPQ1qq73WKhHvch0VQtg=', + '{SSHA256}covFryM35UrKB3gMYxtYpQYTHbTn5kFphjcNHewfj581SLJwjA9jew==', + '{SHA384}qLZLq9CsqRpZvbt3YbQh1PK7OCgNOnW6DyHyvrxFWD1EbFmGYMlM5oDEfRnDB4On', + '{SSHA384}kNjTWdmyy2G1IgJF8WrOpq0N//Yc2Ec5TIQYceuiuHQXRXpC1bfnMqyOx0NxrSREjBWDwUpqXjo=', + '{SHA512}sQnzu7wkTrgkQZF+0G1hi5AI3Qmzvv0bXgc5THBqi7mAsdd4Xll27ASbRt9fEyavWi6m0QP9B8lThf+rDKy8hg==', + '{SSHA512}+7A8kA32q4mCBao4Cbatdyzl5imVwJ62ZAE7UOTP4pfrF90E9R2LabOfJFzx6guaYhTmUEVK2wRKC8bToqspdeTluX2d1BX2', + # Need to check -- + '{PBKDF2}10000$IlfapjA351LuDSwYC0IQ8Q$saHqQTuYnjJN/tmAndT.8mJt.6w', + '{PBKDF2-SHA1}10000$ZBEH6B07rgQpJSikyvMU2w$TAA03a5IYkz1QlPsbJKvUsTqNV', + '{PBKDF2-SHA256}10000$henZGfPWw79Cs8ORDeVNrQ$1dTJy73v6n3bnTmTZFghxHXHLsAzKaAy8SksDfZBPIw', + '{PBKDF2-SHA512}10000$Je1Uw19Bfv5lArzZ6V3EPw$g4T/1sqBUYWl9o93MVnyQ/8zKGSkPbKaXXsT8WmysXQJhWy8MRP2JFudSL.N9RklQYgDPxPjnfum/F2f/TrppA', + # '{ARGON2}$argon2id$v=19$m=65536,t=2,p=1$IyTQMsvzB2JHDiWx8fq7Ew$VhYOA7AL0kbRXI5g2kOyyp8St1epkNj7WZyUY4pAIQQ', + ] + + accounts = nsUserAccounts(inst, basedn=DEFAULT_SUFFIX) + account = accounts.create(properties={ + 'uid': 'pw_migrate_test_user', + 'cn': 'pw_migrate_test_user', + 'displayName': 'pw_migrate_test_user', + 'uidNumber': '12345', + 'gidNumber': '12345', + 'homeDirectory': '/var/empty', + }) + + for pwhash in pwds: + inst.log.debug(f"Attempting -> {pwhash}") + account.set('userPassword', pwhash) + nconn = account.bind('password') diff --git a/dirsrvtests/tests/suites/paged_results/__init__.py b/dirsrvtests/tests/suites/paged_results/__init__.py new file mode 100644 index 0000000..806f40b --- /dev/null +++ b/dirsrvtests/tests/suites/paged_results/__init__.py @@ -0,0 +1,3 @@ +""" + :Requirement: 389-ds-base: Simple Paged Results +""" diff --git a/dirsrvtests/tests/suites/paged_results/paged_results_test.py b/dirsrvtests/tests/suites/paged_results/paged_results_test.py new file mode 100644 index 0000000..d490c4a --- /dev/null +++ b/dirsrvtests/tests/suites/paged_results/paged_results_test.py @@ -0,0 +1,1198 @@ +# --- BEGIN COPYRIGHT BLOCK --- +# Copyright (C) 2020 Red Hat, Inc. +# All rights reserved. +# +# License: GPL (version 3 or any later version). +# See LICENSE for details. +# --- END COPYRIGHT BLOCK --- +# +import socket +from random import sample +import pytest +from ldap.controls import SimplePagedResultsControl, GetEffectiveRightsControl +from lib389.tasks import * +from lib389.utils import * +from lib389.topologies import topology_st +from lib389._constants import DN_LDBM, DN_DM, DEFAULT_SUFFIX +from lib389._controls import SSSRequestControl +from lib389.idm.user import UserAccount, UserAccounts +from lib389.idm.organization import Organization +from lib389.idm.organizationalunit import OrganizationalUnit +from lib389.backend import Backends +from lib389._mapped_object import DSLdapObject + +pytestmark = pytest.mark.tier1 + +DEBUGGING = os.getenv('DEBUGGING', False) + +if DEBUGGING: + logging.getLogger(__name__).setLevel(logging.DEBUG) +else: + logging.getLogger(__name__).setLevel(logging.INFO) + +log = logging.getLogger(__name__) + +TEST_USER_PWD = 'simplepaged_test' + +NEW_SUFFIX_1_NAME = 'test_parent' +NEW_SUFFIX_1 = 'o={}'.format(NEW_SUFFIX_1_NAME) +NEW_SUFFIX_2_NAME = 'child' +NEW_SUFFIX_2 = 'ou={},{}'.format(NEW_SUFFIX_2_NAME, NEW_SUFFIX_1) +NEW_BACKEND_1 = 'parent_base' +NEW_BACKEND_2 = 'child_base' + +OLD_HOSTNAME = socket.gethostname() +socket.sethostname('localhost') +HOSTNAME = socket.gethostname() +IP_ADDRESS = socket.gethostbyname(HOSTNAME) +OLD_IP_ADDRESS = socket.gethostbyname(OLD_HOSTNAME) + +@pytest.fixture(scope="module") +def create_user(topology_st, request): + """User for binding operation""" + + log.info('Adding user simplepaged_test') + new_uri = topology_st.standalone.ldapuri.replace(OLD_HOSTNAME, HOSTNAME) + topology_st.standalone.ldapuri = new_uri + users = UserAccounts(topology_st.standalone, DEFAULT_SUFFIX) + user = users.create(properties={ + 'uid': 'simplepaged_test', + 'cn': 'simplepaged_test', + 'sn': 'simplepaged_test', + 'uidNumber': '1234', + 'gidNumber': '1234', + 'homeDirectory': '/home/simplepaged_test', + 'userPassword': TEST_USER_PWD, + }) + + # Now add the ACI so simplepage_test can read the users ... + ACI_BODY = ensure_bytes('(targetattr= "uid || sn || dn")(version 3.0; acl "Allow read for user"; allow (read,search,compare) userdn = "ldap:///all";)') + topology_st.standalone.modify_s(DEFAULT_SUFFIX, [(ldap.MOD_REPLACE, 'aci', ACI_BODY)]) + + def fin(): + log.info('Deleting user simplepaged_test') + user.delete() + socket.sethostname(OLD_HOSTNAME) + + request.addfinalizer(fin) + + return user + +@pytest.fixture(scope="module") +def new_suffixes(topology_st): + """Add two suffixes with backends, one is a parent + of the another + """ + + log.info('Adding suffix:{} and backend: {}'.format(NEW_SUFFIX_1, NEW_BACKEND_1)) + + bes = Backends(topology_st.standalone) + + bes.create(properties={ + 'cn': 'NEW_BACKEND_1', + 'nsslapd-suffix': NEW_SUFFIX_1, + }) + # Create the root objects with their ACI + log.info('Adding ACI to allow our test user to search') + ACI_TARGET = '(targetattr != "userPassword || aci")' + ACI_ALLOW = '(version 3.0; acl "Enable anonymous access";allow (read, search, compare)' + ACI_SUBJECT = '(userdn = "ldap:///anyone");)' + ACI_BODY = ACI_TARGET + ACI_ALLOW + ACI_SUBJECT + + o_1 = Organization(topology_st.standalone, NEW_SUFFIX_1) + o_1.create(properties={ + 'o': NEW_SUFFIX_1_NAME, + 'aci': ACI_BODY, + }) + + log.info('Adding suffix:{} and backend: {}'.format(NEW_SUFFIX_2, NEW_BACKEND_2)) + be_2 = bes.create(properties={ + 'cn': 'NEW_BACKEND_2', + 'nsslapd-suffix': NEW_SUFFIX_2, + }) + + # We have to adjust the MT to say that BE_1 is a parent. + mt = be_2.get_mapping_tree() + mt.set_parent(NEW_SUFFIX_1) + + ou_2 = OrganizationalUnit(topology_st.standalone, NEW_SUFFIX_2) + ou_2.create(properties={ + 'ou': NEW_SUFFIX_2_NAME + }) + + +def add_users(topology_st, users_num, suffix): + """Add users to the default suffix + + Return the list of added user DNs. + """ + + users_list = [] + users = UserAccounts(topology_st.standalone, suffix, rdn=None) + + log.info('Adding %d users' % users_num) + for num in sample(range(1000), users_num): + num_ran = int(round(num)) + USER_NAME = 'test%05d' % num_ran + + user = users.create(properties={ + 'uid': USER_NAME, + 'sn': USER_NAME, + 'cn': USER_NAME, + 'uidNumber': '%s' % num_ran, + 'gidNumber': '%s' % num_ran, + 'homeDirectory': '/home/%s' % USER_NAME, + 'mail': '%s@redhat.com' % USER_NAME, + 'userpassword': 'pass%s' % num_ran, + }) + users_list.append(user) + return users_list + + +def del_users(users_list): + """Delete users with DNs from given list""" + + log.info('Deleting %d users' % len(users_list)) + for user in users_list: + user.delete() + + +def change_conf_attr(topology_st, suffix, attr_name, attr_value): + """Change configuration attribute in the given suffix. + + Returns previous attribute value. + """ + + entry = DSLdapObject(topology_st.standalone, suffix) + + attr_value_bck = entry.get_attr_val_bytes(attr_name) + log.info('Set %s to %s. Previous value - %s. Modified suffix - %s.' % ( + attr_name, attr_value, attr_value_bck, suffix)) + if attr_value is None: + entry.remove_all(attr_name) + else: + entry.replace(attr_name, attr_value) + return attr_value_bck + + +def paged_search(conn, suffix, controls, search_flt, searchreq_attrlist): + """Search at the DEFAULT_SUFFIX with ldap.SCOPE_SUBTREE + using Simple Paged Control(should the first item in the + list controls. + Assert that no cookie left at the end. + + Return the list with results summarized from all pages. + """ + + pages = 0 + pctrls = [] + all_results = [] + req_pr_ctrl = controls[0] + log.info('Running simple paged result search with - ' + 'search suffix: {}; filter: {}; attr list {}; ' + 'page_size = {}; controls: {}.'.format(suffix, search_flt, + searchreq_attrlist, + req_pr_ctrl.size, + str(controls))) + msgid = conn.search_ext(suffix, ldap.SCOPE_SUBTREE, search_flt, searchreq_attrlist, serverctrls=controls) + while True: + log.info('Getting page %d' % (pages,)) + rtype, rdata, rmsgid, rctrls = conn.result3(msgid) + log.debug('Data: {}'.format(rdata)) + all_results.extend(rdata) + pages += 1 + pctrls = [ + c + for c in rctrls + if c.controlType == SimplePagedResultsControl.controlType + ] + + if pctrls: + if pctrls[0].cookie: + # Copy cookie from response control to request control + log.debug('Cookie: {}'.format(pctrls[0].cookie)) + req_pr_ctrl.cookie = pctrls[0].cookie + msgid = conn.search_ext(suffix, ldap.SCOPE_SUBTREE, search_flt, searchreq_attrlist, serverctrls=controls) + else: + break # No more pages available + else: + break + + assert not pctrls[0].cookie + return all_results + + +@pytest.mark.parametrize("page_size,users_num", [(6, 5), (5, 5), (5, 25)]) +def test_search_success(topology_st, create_user, page_size, users_num): + """Verify that search with a simple paged results control + returns all entries it should without errors. + + :id: ddd15b70-64f1-4a85-a793-b24761e50354 + :customerscenario: True + :parametrized: yes + :feature: Simple paged results + :setup: Standalone instance, test user for binding, + varying number of users for the search base + :steps: + 1. Bind as test user + 2. Search through added users with a simple paged control + :expectedresults: + 1. Bind should be successful + 2. All users should be found + """ + + users_list = add_users(topology_st, users_num, DEFAULT_SUFFIX) + search_flt = r'(uid=test*)' + searchreq_attrlist = ['dn', 'sn'] + + log.info('Set user bind %s ' % create_user) + conn = create_user.bind(TEST_USER_PWD) + + req_ctrl = SimplePagedResultsControl(True, size=page_size, cookie='') + all_results = paged_search(conn, DEFAULT_SUFFIX, [req_ctrl], search_flt, searchreq_attrlist) + + log.info('%d results' % len(all_results)) + assert len(all_results) == len(users_list) + + del_users(users_list) + + +@pytest.mark.parametrize("page_size,users_num,suffix,attr_name,attr_value,expected_err", [ + (50, 200, 'cn=config,%s' % DN_LDBM, 'nsslapd-idlistscanlimit', '100', + ldap.UNWILLING_TO_PERFORM), + (5, 15, DN_CONFIG, 'nsslapd-timelimit', '20', + ldap.UNAVAILABLE_CRITICAL_EXTENSION), + (21, 50, DN_CONFIG, 'nsslapd-sizelimit', '20', + ldap.SIZELIMIT_EXCEEDED), + (21, 50, DN_CONFIG, 'nsslapd-pagedsizelimit', '5', + ldap.SIZELIMIT_EXCEEDED), + (5, 50, 'cn=config,%s' % DN_LDBM, 'nsslapd-lookthroughlimit', '20', + ldap.ADMINLIMIT_EXCEEDED)]) +def test_search_limits_fail(topology_st, create_user, page_size, users_num, + suffix, attr_name, attr_value, expected_err): + """Verify that search with a simple paged results control + throws expected exceptoins when corresponding limits are + exceeded. + + :id: e3067107-bd6d-493d-9989-3e641a9337b0 + :customerscenario: True + :parametrized: yes + :setup: Standalone instance, test user for binding, + varying number of users for the search base + :steps: + 1. Bind as test user + 2. Set limit attribute to the value that will cause + an expected exception + 3. Search through added users with a simple paged control + :expectedresults: + 1. Bind should be successful + 2. Operation should be successful + 3. Should fail with appropriate exception + """ + + users_list = add_users(topology_st, users_num, DEFAULT_SUFFIX) + attr_value_bck = change_conf_attr(topology_st, suffix, attr_name, attr_value) + conf_param_dict = {attr_name: attr_value} + search_flt = r'(uid=test*)' + searchreq_attrlist = ['dn', 'sn'] + controls = [] + + try: + log.info('Set user bind') + conn = create_user.bind(TEST_USER_PWD) + + log.info('Create simple paged results control instance') + req_ctrl = SimplePagedResultsControl(True, size=page_size, cookie='') + controls.append(req_ctrl) + if attr_name == 'nsslapd-idlistscanlimit': + sort_ctrl = SSSRequestControl(True, ['sn']) + controls.append(sort_ctrl) + log.info('Initiate ldapsearch with created control instance') + msgid = conn.search_ext(DEFAULT_SUFFIX, ldap.SCOPE_SUBTREE, + search_flt, searchreq_attrlist, serverctrls=controls) + + time_val = conf_param_dict.get('nsslapd-timelimit') + if time_val: + time.sleep(int(time_val) + 10) + + pages = 0 + all_results = [] + pctrls = [] + while True: + log.info('Getting page %d' % (pages,)) + if pages == 0 and (time_val or attr_name == 'nsslapd-pagesizelimit'): + rtype, rdata, rmsgid, rctrls = conn.result3(msgid) + else: + with pytest.raises(expected_err): + rtype, rdata, rmsgid, rctrls = conn.result3(msgid) + all_results.extend(rdata) + pages += 1 + pctrls = [ + c + for c in rctrls + if c.controlType == SimplePagedResultsControl.controlType + ] + + if pctrls: + if pctrls[0].cookie: + # Copy cookie from response control to request control + req_ctrl.cookie = pctrls[0].cookie + msgid = conn.search_ext(DEFAULT_SUFFIX, ldap.SCOPE_SUBTREE, + search_flt, searchreq_attrlist, serverctrls=controls) + else: + break # No more pages available + else: + break + finally: + del_users(users_list) + change_conf_attr(topology_st, suffix, attr_name, attr_value_bck) + + +def test_search_sort_success(topology_st, create_user): + """Verify that search with a simple paged results control + and a server side sort control returns all entries + it should without errors. + + :id: 17d8b150-ed43-41e1-b80f-ee9b4ce45155 + :customerscenario: True + :setup: Standalone instance, test user for binding, + varying number of users for the search base + :steps: + 1. Bind as test user + 2. Search through added users with a simple paged control + and a server side sort control + :expectedresults: + 1. Bind should be successful + 2. All users should be found and sorted + """ + + users_num = 50 + page_size = 5 + users_list = add_users(topology_st, users_num, DEFAULT_SUFFIX) + search_flt = r'(uid=test*)' + searchreq_attrlist = ['dn', 'sn'] + + try: + conn = create_user.bind(TEST_USER_PWD) + + req_ctrl = SimplePagedResultsControl(True, size=page_size, cookie='') + sort_ctrl = SSSRequestControl(True, ['sn']) + + log.info('Initiate ldapsearch with created control instance') + log.info('Collect data with sorting') + controls = [req_ctrl, sort_ctrl] + results_sorted = paged_search(conn, DEFAULT_SUFFIX, controls, + search_flt, searchreq_attrlist) + + log.info('Substring numbers from user DNs') + # r_nums = map(lambda x: int(x[0][8:13]), results_sorted) + r_nums = [int(x[0][8:13]) for x in results_sorted] + + log.info('Assert that list is sorted') + assert all(r_nums[i] <= r_nums[i + 1] for i in range(len(r_nums) - 1)) + finally: + del_users(users_list) + + +def test_search_abandon(topology_st, create_user): + """Verify that search with simple paged results control + can be abandon + + :id: 0008538b-7585-4356-839f-268828066978 + :customerscenario: True + :setup: Standalone instance, test user for binding, + varying number of users for the search base + :steps: + 1. Bind as test user + 2. Search through added users with a simple paged control + 3. Abandon the search + :expectedresults: + 1. Bind should be successful + 2. Search should be started successfully + 3. It should throw an ldap.TIMEOUT exception + while trying to get the rest of the search results + """ + + users_num = 10 + page_size = 2 + users_list = add_users(topology_st, users_num, DEFAULT_SUFFIX) + search_flt = r'(uid=test*)' + searchreq_attrlist = ['dn', 'sn'] + + try: + log.info('Set user bind') + conn = create_user.bind(TEST_USER_PWD) + + log.info('Create simple paged results control instance') + req_ctrl = SimplePagedResultsControl(True, size=page_size, cookie='') + controls = [req_ctrl] + + log.info('Initiate a search with a paged results control') + msgid = conn.search_ext(DEFAULT_SUFFIX, ldap.SCOPE_SUBTREE, + search_flt, searchreq_attrlist, serverctrls=controls) + log.info('Abandon the search') + conn.abandon(msgid) + + log.info('Expect an ldap.TIMEOUT exception, while trying to get the search results') + with pytest.raises(ldap.TIMEOUT): + conn.result3(msgid, timeout=5) + finally: + del_users(users_list) + + +def test_search_with_timelimit(topology_st, create_user): + """Verify that after performing multiple simple paged searches + to completion, each with a timelimit, it wouldn't fail, if we sleep + for a time more than the timelimit. + + :id: 6cd7234b-136c-419f-bf3e-43aa73592cff + :customerscenario: True + :setup: Standalone instance, test user for binding, + varying number of users for the search base + :steps: + 1. Bind as test user + 2. Search through added users with a simple paged control + and timelimit set to 5 + 3. When the returned cookie is empty, wait 10 seconds + 4. Perform steps 2 and 3 three times in a row + :expectedresults: + 1. Bind should be successful + 2. No error should happen + 3. 10 seconds should pass + 4. No error should happen + """ + + users_num = 100 + page_size = 50 + timelimit = 5 + users_list = add_users(topology_st, users_num, DEFAULT_SUFFIX) + search_flt = r'(uid=test*)' + searchreq_attrlist = ['dn', 'sn'] + + try: + log.info('Set user bind') + conn = create_user.bind(TEST_USER_PWD) + + log.info('Create simple paged results control instance') + req_ctrl = SimplePagedResultsControl(True, size=page_size, cookie='') + controls = [req_ctrl] + + for ii in range(3): + log.info('Iteration %d' % ii) + msgid = conn.search_ext(DEFAULT_SUFFIX, ldap.SCOPE_SUBTREE, search_flt, + searchreq_attrlist, serverctrls=controls, timeout=timelimit) + + pages = 0 + pctrls = [] + while True: + log.info('Getting page %d' % (pages,)) + rtype, rdata, rmsgid, rctrls = conn.result3(msgid) + pages += 1 + pctrls = [ + c + for c in rctrls + if c.controlType == SimplePagedResultsControl.controlType + ] + + if pctrls: + if pctrls[0].cookie: + # Copy cookie from response control to request control + req_ctrl.cookie = pctrls[0].cookie + msgid = conn.search_ext(DEFAULT_SUFFIX, ldap.SCOPE_SUBTREE, search_flt, + searchreq_attrlist, serverctrls=controls, timeout=timelimit) + else: + log.info('Done with this search - sleeping %d seconds' % ( + timelimit * 2)) + time.sleep(timelimit * 2) + break # No more pages available + else: + break + finally: + del_users(users_list) + + +def test_search_ip_aci(topology_st, create_user): + """Verify that after performing multiple simple paged searches + to completion on the suffix with DNS or IP based ACI + + :id: bbfddc46-a8c8-49ae-8c90-7265d05b22a9 + :customerscenario: True + :parametrized: yes + :setup: Standalone instance, test user for binding, + varying number of users for the search base + :steps: + 1. Back up and remove all previous ACI from suffix + 2. Add an anonymous ACI for IP check + 3. Bind as test user + 4. Search through added users with a simple paged control + 5. Perform steps 4 three times in a row + 6. Return ACI to the initial state + 7. Go through all steps once again, but use IP subject dn + instead of DNS + :expectedresults: + 1. Operation should be successful + 2. Anonymous ACI should be successfully added + 3. Bind should be successful + 4. No error happens, all users should be found and sorted + 5. Results should remain the same + 6. ACI should be successfully returned + 7. Results should be the same with ACI with IP subject dn + """ + users_num = 20 + page_size = 5 + users_list = add_users(topology_st, users_num, DEFAULT_SUFFIX) + search_flt = r'(uid=test*)' + searchreq_attrlist = ['dn', 'sn'] + + log.info("test_search_dns_ip_aci: HOSTNAME: " + HOSTNAME) + log.info("test_search_dns_ip_aci: IP_ADDRESS: " + IP_ADDRESS) + + try: + log.info('Back up current suffix ACI') + acis_bck = topology_st.standalone.aci.list(DEFAULT_SUFFIX, ldap.SCOPE_BASE) + + log.info('Add test ACI') + bind_rule = 'ip = "{}" or ip = "::1" or ip = "{}"'.format(IP_ADDRESS, OLD_IP_ADDRESS) + ACI_TARGET = '(targetattr != "userPassword")' + ACI_ALLOW = '(version 3.0;acl "Anonymous access within domain"; allow (read,compare,search)' + ACI_SUBJECT = '(userdn = "ldap:///anyone") and (%s);)' % bind_rule + ACI_BODY = ensure_bytes(ACI_TARGET + ACI_ALLOW + ACI_SUBJECT) + topology_st.standalone.modify_s(DEFAULT_SUFFIX, [(ldap.MOD_REPLACE, 'aci', ACI_BODY)]) + time.sleep(.5) + + log.info('Set user bind') + conn = create_user.bind(TEST_USER_PWD, uri=f'ldap://{HOSTNAME}:{topology_st.standalone.port}') + + log.info('Create simple paged results control instance') + req_ctrl = SimplePagedResultsControl(True, size=page_size, cookie='') + controls = [req_ctrl] + + log.info('Initiate three searches with a paged results control') + for ii in range(3): + log.info('%d search' % (ii + 1)) + all_results = paged_search(conn, DEFAULT_SUFFIX, controls, + search_flt, searchreq_attrlist) + log.info('%d results' % len(all_results)) + assert len(all_results) == len(users_list) + log.info('If we are here, then no error has happened. We are good.') + + finally: + log.info('Restore ACI') + topology_st.standalone.modify_s(DEFAULT_SUFFIX, [(ldap.MOD_DELETE, 'aci', None)]) + for aci in acis_bck: + topology_st.standalone.modify_s(DEFAULT_SUFFIX, [(ldap.MOD_ADD, 'aci', aci.getRawAci())]) + time.sleep(1) + del_users(users_list) + + +def test_search_multiple_paging(topology_st, create_user): + """Verify that after performing multiple simple paged searches + on a single connection without a complition, it wouldn't fail. + + :id: 628b29a6-2d47-4116-a88d-00b87405ef7f + :customerscenario: True + :setup: Standalone instance, test user for binding, + varying number of users for the search base + :steps: + 1. Bind as test user + 2. Initiate the search with a simple paged control + 3. Acquire the returned cookie only one time + 4. Perform steps 2 and 3 three times in a row + :expectedresults: + 1. Bind should be successful + 2. Search should be successfully initiated + 3. Cookie should be successfully acquired + 4. No error happens + """ + + users_num = 20 + page_size = 5 + users_list = add_users(topology_st, users_num, DEFAULT_SUFFIX) + search_flt = r'(uid=test*)' + searchreq_attrlist = ['dn', 'sn'] + + try: + log.info('Set user bind') + conn = create_user.bind(TEST_USER_PWD) + + log.info('Create simple paged results control instance') + req_ctrl = SimplePagedResultsControl(True, size=page_size, cookie='') + controls = [req_ctrl] + + for ii in range(3): + log.info('Iteration %d' % ii) + msgid = conn.search_ext(DEFAULT_SUFFIX, ldap.SCOPE_SUBTREE, + search_flt, searchreq_attrlist, serverctrls=controls) + rtype, rdata, rmsgid, rctrls = conn.result3(msgid) + pctrls = [ + c + for c in rctrls + if c.controlType == SimplePagedResultsControl.controlType + ] + + # Copy cookie from response control to request control + req_ctrl.cookie = pctrls[0].cookie + msgid = conn.search_ext(DEFAULT_SUFFIX, ldap.SCOPE_SUBTREE, + search_flt, searchreq_attrlist, serverctrls=controls) + finally: + del_users(users_list) + + +@pytest.mark.parametrize("invalid_cookie", [1000, -1]) +def test_search_invalid_cookie(topology_st, create_user, invalid_cookie): + """Verify that using invalid cookie while performing + search with the simple paged results control throws + a TypeError exception + + :id: 107be12d-4fe4-47fe-ae86-f3e340a56f42 + :customerscenario: True + :parametrized: yes + :setup: Standalone instance, test user for binding, + varying number of users for the search base + :steps: + 1. Bind as test user + 2. Initiate the search with a simple paged control + 3. Put an invalid cookie (-1, 1000) to the control + 4. Continue the search + :expectedresults: + 1. Bind should be successful + 2. Search should be successfully initiated + 3. Cookie should be added + 4. It should throw a TypeError exception + """ + + users_num = 20 + page_size = 5 + users_list = add_users(topology_st, users_num, DEFAULT_SUFFIX) + search_flt = r'(uid=test*)' + searchreq_attrlist = ['dn', 'sn'] + + try: + log.info('Set user bind') + conn = create_user.bind(TEST_USER_PWD) + + log.info('Create simple paged results control instance') + req_ctrl = SimplePagedResultsControl(True, size=page_size, cookie='') + controls = [req_ctrl] + + msgid = conn.search_ext(DEFAULT_SUFFIX, ldap.SCOPE_SUBTREE, + search_flt, searchreq_attrlist, serverctrls=controls) + rtype, rdata, rmsgid, rctrls = conn.result3(msgid) + + log.info('Put an invalid cookie (%d) to the control. TypeError is expected' % + invalid_cookie) + req_ctrl.cookie = invalid_cookie + with pytest.raises(TypeError): + msgid = conn.search_ext(DEFAULT_SUFFIX, ldap.SCOPE_SUBTREE, + search_flt, searchreq_attrlist, serverctrls=controls) + finally: + del_users(users_list) + + +def test_search_abandon_with_zero_size(topology_st, create_user): + """Verify that search with simple paged results control + can be abandon using page_size = 0 + + :id: d2fd9a10-84e1-4b69-a8a7-36ca1427c171 + :customerscenario: True + :setup: Standalone instance, test user for binding, + varying number of users for the search base + :steps: + 1. Bind as test user + 2. Search through added users with a simple paged control + and page_size = 0 + :expectedresults: + 1. Bind should be successful + 2. No cookie should be returned at all + """ + + users_num = 10 + page_size = 0 + users_list = add_users(topology_st, users_num, DEFAULT_SUFFIX) + search_flt = r'(uid=test*)' + searchreq_attrlist = ['dn', 'sn'] + + try: + log.info('Set user bind') + conn = create_user.bind(TEST_USER_PWD) + + log.info('Create simple paged results control instance') + req_ctrl = SimplePagedResultsControl(True, size=page_size, cookie='') + controls = [req_ctrl] + + msgid = conn.search_ext(DEFAULT_SUFFIX, ldap.SCOPE_SUBTREE, + search_flt, searchreq_attrlist, serverctrls=controls) + rtype, rdata, rmsgid, rctrls = conn.result3(msgid) + pctrls = [ + c + for c in rctrls + if c.controlType == SimplePagedResultsControl.controlType + ] + assert not pctrls[0].cookie + finally: + del_users(users_list) + + +def test_search_pagedsizelimit_success(topology_st, create_user): + """Verify that search with a simple paged results control + returns all entries it should without errors while + valid value set to nsslapd-pagedsizelimit. + + :id: 88193f10-f6f0-42f5-ae9c-ff34b8f9ee8c + :customerscenario: True + :setup: Standalone instance, test user for binding, + 10 users for the search base + :steps: + 1. Set nsslapd-pagedsizelimit: 20 + 2. Bind as test user + 3. Search through added users with a simple paged control + using page_size = 10 + :expectedresults: + 1. nsslapd-pagedsizelimit should be successfully set + 2. Bind should be successful + 3. All users should be found + """ + + users_num = 10 + page_size = 10 + attr_name = 'nsslapd-pagedsizelimit' + attr_value = '20' + attr_value_bck = change_conf_attr(topology_st, DN_CONFIG, attr_name, attr_value) + users_list = add_users(topology_st, users_num, DEFAULT_SUFFIX) + search_flt = r'(uid=test*)' + searchreq_attrlist = ['dn', 'sn'] + + try: + log.info('Set user bind') + conn = create_user.bind(TEST_USER_PWD) + + req_ctrl = SimplePagedResultsControl(True, size=page_size, cookie='') + controls = [req_ctrl] + + all_results = paged_search(conn, DEFAULT_SUFFIX, controls, search_flt, searchreq_attrlist) + + log.info('%d results' % len(all_results)) + assert len(all_results) == len(users_list) + + finally: + del_users(users_list) + change_conf_attr(topology_st, DN_CONFIG, 'nsslapd-pagedsizelimit', attr_value_bck) + + +@pytest.mark.parametrize('conf_attr,user_attr,expected_rs', + (('5', '15', 'PASS'), ('15', '5', ldap.SIZELIMIT_EXCEEDED))) +def test_search_nspagedsizelimit(topology_st, create_user, + conf_attr, user_attr, expected_rs): + """Verify that nsPagedSizeLimit attribute overrides + nsslapd-pagedsizelimit while performing search with + the simple paged results control. + + :id: b08c6ad2-ba28-447a-9f04-5377c3661d0d + :customerscenario: True + :parametrized: yes + :setup: Standalone instance, test user for binding, + 10 users for the search base + :steps: + 1. Set nsslapd-pagedsizelimit: 5 + 2. Set nsPagedSizeLimit: 15 + 3. Bind as test user + 4. Search through added users with a simple paged control + using page_size = 10 + 5. Bind as Directory Manager + 6. Restore all values + 7. Set nsslapd-pagedsizelimit: 15 + 8. Set nsPagedSizeLimit: 5 + 9. Bind as test user + 10. Search through added users with a simple paged control + using page_size = 10 + :expectedresults: + 1. nsslapd-pagedsizelimit should be successfully set + 2. nsPagedSizeLimit should be successfully set + 3. Bind should be successful + 4. No error happens, all users should be found + 5. Bind should be successful + 6. All values should be restored + 7. nsslapd-pagedsizelimit should be successfully set + 8. nsPagedSizeLimit should be successfully set + 9. Bind should be successful + 10. It should throw SIZELIMIT_EXCEEDED exception + """ + + users_num = 10 + page_size = 10 + users_list = add_users(topology_st, users_num, DEFAULT_SUFFIX) + search_flt = r'(uid=test*)' + searchreq_attrlist = ['dn', 'sn'] + conf_attr_bck = change_conf_attr(topology_st, DN_CONFIG, 'nsslapd-pagedsizelimit', conf_attr) + user_attr_bck = change_conf_attr(topology_st, create_user.dn, 'nsPagedSizeLimit', user_attr) + + try: + log.info('Set user bind') + conn = create_user.bind(TEST_USER_PWD) + + req_ctrl = SimplePagedResultsControl(True, size=page_size, cookie='') + controls = [req_ctrl] + + if expected_rs == ldap.SIZELIMIT_EXCEEDED: + log.info('Expect to fail with SIZELIMIT_EXCEEDED') + with pytest.raises(expected_rs): + all_results = paged_search(conn, DEFAULT_SUFFIX, controls, search_flt, searchreq_attrlist) + elif expected_rs == 'PASS': + log.info('Expect to pass') + all_results = paged_search(conn, DEFAULT_SUFFIX, controls, search_flt, searchreq_attrlist) + log.info('%d results' % len(all_results)) + assert len(all_results) == len(users_list) + + finally: + del_users(users_list) + change_conf_attr(topology_st, DN_CONFIG, 'nsslapd-pagedsizelimit', conf_attr_bck) + change_conf_attr(topology_st, create_user.dn, 'nsPagedSizeLimit', user_attr_bck) + + +@pytest.mark.parametrize('conf_attr_values,expected_rs', + ((('5000', '100', '100'), ldap.ADMINLIMIT_EXCEEDED), + (('5000', '120', '122'), 'PASS'))) +def test_search_paged_limits(topology_st, create_user, conf_attr_values, expected_rs): + """Verify that nsslapd-idlistscanlimit and + nsslapd-lookthroughlimit can limit the administrator + search abilities. + + :id: e0f8b916-7276-4bd3-9e73-8696a4468811 + :customerscenario: True + :parametrized: yes + :setup: Standalone instance, test user for binding, + 10 users for the search base + :steps: + 1. Set nsslapd-sizelimit and nsslapd-pagedsizelimit to 5000 + 2. Set nsslapd-idlistscanlimit: 120 + 3. Set nsslapd-lookthroughlimit: 122 + 4. Bind as test user + 5. Search through added users with a simple paged control + using page_size = 10 + 6. Bind as Directory Manager + 7. Set nsslapd-idlistscanlimit: 100 + 8. Set nsslapd-lookthroughlimit: 100 + 9. Bind as test user + 10. Search through added users with a simple paged control + using page_size = 10 + :expectedresults: + 1. nsslapd-sizelimit and nsslapd-pagedsizelimit + should be successfully set + 2. nsslapd-idlistscanlimit should be successfully set + 3. nsslapd-lookthroughlimit should be successfully set + 4. Bind should be successful + 5. No error happens, all users should be found + 6. Bind should be successful + 7. nsslapd-idlistscanlimit should be successfully set + 8. nsslapd-lookthroughlimit should be successfully set + 9. Bind should be successful + 10. It should throw ADMINLIMIT_EXCEEDED exception + """ + + users_num = 101 + page_size = 10 + users_list = add_users(topology_st, users_num, DEFAULT_SUFFIX) + search_flt = r'(uid=test*)' + searchreq_attrlist = ['dn', 'sn'] + size_attr_bck = change_conf_attr(topology_st, DN_CONFIG, 'nsslapd-sizelimit', conf_attr_values[0]) + pagedsize_attr_bck = change_conf_attr(topology_st, DN_CONFIG, 'nsslapd-pagedsizelimit', conf_attr_values[0]) + idlistscan_attr_bck = change_conf_attr(topology_st, 'cn=config,%s' % DN_LDBM, 'nsslapd-idlistscanlimit', conf_attr_values[1]) + lookthrough_attr_bck = change_conf_attr(topology_st, 'cn=config,%s' % DN_LDBM, 'nsslapd-lookthroughlimit', conf_attr_values[2]) + + try: + log.info('Set user bind') + conn = create_user.bind(TEST_USER_PWD) + + req_ctrl = SimplePagedResultsControl(True, size=page_size, cookie='') + controls = [req_ctrl] + + if expected_rs == ldap.ADMINLIMIT_EXCEEDED: + log.info('Expect to fail with ADMINLIMIT_EXCEEDED') + with pytest.raises(expected_rs): + all_results = paged_search(conn, DEFAULT_SUFFIX, controls, search_flt, searchreq_attrlist) + elif expected_rs == 'PASS': + log.info('Expect to pass') + all_results = paged_search(conn, DEFAULT_SUFFIX, controls, search_flt, searchreq_attrlist) + log.info('%d results' % len(all_results)) + assert len(all_results) == len(users_list) + finally: + del_users(users_list) + change_conf_attr(topology_st, DN_CONFIG, 'nsslapd-sizelimit', size_attr_bck) + change_conf_attr(topology_st, DN_CONFIG, 'nsslapd-pagedsizelimit', pagedsize_attr_bck) + change_conf_attr(topology_st, 'cn=config,%s' % DN_LDBM, 'nsslapd-lookthroughlimit', lookthrough_attr_bck) + change_conf_attr(topology_st, 'cn=config,%s' % DN_LDBM, 'nsslapd-idlistscanlimit', idlistscan_attr_bck) + + +@pytest.mark.parametrize('conf_attr_values,expected_rs', + ((('1000', '100', '100'), ldap.ADMINLIMIT_EXCEEDED), + (('1000', '120', '122'), 'PASS'))) +def test_search_paged_user_limits(topology_st, create_user, conf_attr_values, expected_rs): + """Verify that nsPagedIDListScanLimit and nsPagedLookthroughLimit + override nsslapd-idlistscanlimit and nsslapd-lookthroughlimit + while performing search with the simple paged results control. + + :id: 69e393e9-1ab8-4f4e-b4a1-06ca63dc7b1b + :customerscenario: True + :parametrized: yes + :setup: Standalone instance, test user for binding, + 10 users for the search base + :steps: + 1. Set nsslapd-idlistscanlimit: 1000 + 2. Set nsslapd-lookthroughlimit: 1000 + 3. Set nsPagedIDListScanLimit: 120 + 4. Set nsPagedLookthroughLimit: 122 + 5. Bind as test user + 6. Search through added users with a simple paged control + using page_size = 10 + 7. Bind as Directory Manager + 8. Set nsPagedIDListScanLimit: 100 + 9. Set nsPagedLookthroughLimit: 100 + 10. Bind as test user + 11. Search through added users with a simple paged control + using page_size = 10 + :expectedresults: + 1. nsslapd-idlistscanlimit should be successfully set + 2. nsslapd-lookthroughlimit should be successfully set + 3. nsPagedIDListScanLimit should be successfully set + 4. nsPagedLookthroughLimit should be successfully set + 5. Bind should be successful + 6. No error happens, all users should be found + 7. Bind should be successful + 8. nsPagedIDListScanLimit should be successfully set + 9. nsPagedLookthroughLimit should be successfully set + 10. Bind should be successful + 11. It should throw ADMINLIMIT_EXCEEDED exception + """ + + users_num = 101 + page_size = 10 + users_list = add_users(topology_st, users_num, DEFAULT_SUFFIX) + search_flt = r'(uid=test*)' + searchreq_attrlist = ['dn', 'sn'] + lookthrough_attr_bck = change_conf_attr(topology_st, 'cn=config,%s' % DN_LDBM, 'nsslapd-lookthroughlimit', conf_attr_values[0]) + idlistscan_attr_bck = change_conf_attr(topology_st, 'cn=config,%s' % DN_LDBM, 'nsslapd-idlistscanlimit', conf_attr_values[0]) + user_idlistscan_attr_bck = change_conf_attr(topology_st, create_user.dn, 'nsPagedIDListScanLimit', conf_attr_values[1]) + user_lookthrough_attr_bck = change_conf_attr(topology_st, create_user.dn, 'nsPagedLookthroughLimit', conf_attr_values[2]) + + try: + log.info('Set user bind') + conn = create_user.bind(TEST_USER_PWD) + + req_ctrl = SimplePagedResultsControl(True, size=page_size, cookie='') + controls = [req_ctrl] + + if expected_rs == ldap.ADMINLIMIT_EXCEEDED: + log.info('Expect to fail with ADMINLIMIT_EXCEEDED') + with pytest.raises(expected_rs): + all_results = paged_search(conn, DEFAULT_SUFFIX, controls, search_flt, searchreq_attrlist) + elif expected_rs == 'PASS': + log.info('Expect to pass') + all_results = paged_search(conn, DEFAULT_SUFFIX, controls, search_flt, searchreq_attrlist) + log.info('%d results' % len(all_results)) + assert len(all_results) == len(users_list) + finally: + del_users(users_list) + change_conf_attr(topology_st, 'cn=config,%s' % DN_LDBM, 'nsslapd-lookthroughlimit', lookthrough_attr_bck) + change_conf_attr(topology_st, 'cn=config,%s' % DN_LDBM, 'nsslapd-idlistscanlimit', idlistscan_attr_bck) + change_conf_attr(topology_st, create_user.dn, 'nsPagedIDListScanLimit', user_idlistscan_attr_bck) + change_conf_attr(topology_st, create_user.dn, 'nsPagedLookthroughLimit', user_lookthrough_attr_bck) + + +def test_ger_basic(topology_st, create_user): + """Verify that search with a simple paged results control + and get effective rights control returns all entries + it should without errors. + + :id: 7b0bdfc7-a2f2-4c1a-bcab-f1eb8b330d45 + :customerscenario: True + :setup: Standalone instance, test user for binding, + varying number of users for the search base + :steps: + 1. Search through added users with a simple paged control + and get effective rights control + :expectedresults: + 1. All users should be found, every found entry should have + an 'attributeLevelRights' returned + """ + + users_list = add_users(topology_st, 20, DEFAULT_SUFFIX) + search_flt = r'(uid=test*)' + searchreq_attrlist = ['dn', 'sn'] + page_size = 4 + + try: + spr_ctrl = SimplePagedResultsControl(True, size=page_size, cookie='') + ger_ctrl = GetEffectiveRightsControl(True, ensure_bytes("dn: " + DN_DM)) + + all_results = paged_search(topology_st.standalone, DEFAULT_SUFFIX, [spr_ctrl, ger_ctrl], + search_flt, searchreq_attrlist) + + log.info('{} results'.format(len(all_results))) + assert len(all_results) == len(users_list) + log.info('Check for attributeLevelRights') + assert all(attrs['attributeLevelRights'][0] for dn, attrs in all_results) + finally: + log.info('Remove added users') + del_users(users_list) + + +def test_multi_suffix_search(topology_st, create_user, new_suffixes): + """Verify that page result search returns empty cookie + if there is no returned entry. + + :id: 9712345b-9e38-4df6-8794-05f12c457d39 + :customerscenario: True + :setup: Standalone instance, test user for binding, + two suffixes with backends, one is inserted into another, + 10 users for the search base within each suffix + :steps: + 1. Bind as test user + 2. Search through all 20 added users with a simple paged control + using page_size = 4 + 3. Wait some time for the logs to be updated + 4. Check access log + :expectedresults: + 1. Bind should be successful + 2. All users should be found + 3. Some time should pass + 4. The access log should contain the pr_cookie for each page request + and it should be equal 0, except the last one should be equal -1 + """ + + search_flt = r'(uid=test*)' + searchreq_attrlist = ['dn', 'sn'] + page_size = 4 + users_num = 20 + + log.info('Clear the access log') + topology_st.standalone.deleteAccessLogs() + + users_list_1 = add_users(topology_st, 10, NEW_SUFFIX_1) + users_list_2 = add_users(topology_st, 10, NEW_SUFFIX_2) + + try: + req_ctrl = SimplePagedResultsControl(True, size=page_size, cookie='') + + all_results = paged_search(topology_st.standalone, NEW_SUFFIX_1, [req_ctrl], search_flt, searchreq_attrlist) + + log.info('{} results'.format(len(all_results))) + assert len(all_results) == users_num + + log.info('Restart the server to flush the logs') + topology_st.standalone.restart(timeout=10) + + access_log_lines = topology_st.standalone.ds_access_log.match('.*pr_cookie=.*') + pr_cookie_list = ([line.rsplit('=', 1)[-1] for line in access_log_lines]) + pr_cookie_list = [int(pr_cookie) for pr_cookie in pr_cookie_list] + log.info('Assert that last pr_cookie == -1 and others pr_cookie == 0') + pr_cookie_zeros = list(pr_cookie == 0 for pr_cookie in pr_cookie_list[0:-1]) + assert all(pr_cookie_zeros) + assert pr_cookie_list[-1] == -1 + finally: + log.info('Remove added users') + del_users(users_list_1) + del_users(users_list_2) + + +@pytest.mark.parametrize('conf_attr_value', (None, '-1', '1000')) +def test_maxsimplepaged_per_conn_success(topology_st, create_user, conf_attr_value): + """Verify that nsslapd-maxsimplepaged-per-conn acts according design + + :id: 192e2f25-04ee-4ff9-9340-d875dcbe8011 + :customerscenario: True + :parametrized: yes + :setup: Standalone instance, test user for binding, + 20 users for the search base + :steps: + 1. Set nsslapd-maxsimplepaged-per-conn in cn=config + to the next values: no value, -1, some positive + 2. Search through the added users with a simple paged control + using page size = 4 + :expectedresults: + 1. nsslapd-maxsimplepaged-per-conn should be successfully set + 2. If no value or value = -1 - all users should be found, + default behaviour; If the value is positive, + the value is the max simple paged results requests per connection. + """ + + users_list = add_users(topology_st, 20, DEFAULT_SUFFIX) + search_flt = r'(uid=test*)' + searchreq_attrlist = ['dn', 'sn'] + page_size = 4 + if conf_attr_value: + max_per_con_bck = change_conf_attr(topology_st, DN_CONFIG, 'nsslapd-maxsimplepaged-per-conn', conf_attr_value) + + try: + log.info('Set user bind') + conn = create_user.bind(TEST_USER_PWD) + + req_ctrl = SimplePagedResultsControl(True, size=page_size, cookie='') + + all_results = paged_search(conn, DEFAULT_SUFFIX, [req_ctrl], search_flt, searchreq_attrlist) + + log.info('{} results'.format(len(all_results))) + assert len(all_results) == len(users_list) + finally: + log.info('Remove added users') + del_users(users_list) + if conf_attr_value: + change_conf_attr(topology_st, DN_CONFIG, 'nsslapd-maxsimplepaged-per-conn', max_per_con_bck) + + +@pytest.mark.parametrize('conf_attr_value', ('0', '1')) +def test_maxsimplepaged_per_conn_failure(topology_st, create_user, conf_attr_value): + """Verify that nsslapd-maxsimplepaged-per-conn acts according design + + :id: eb609e63-2829-4331-8439-a35f99694efa + :customerscenario: True + :parametrized: yes + :setup: Standalone instance, test user for binding, + 20 users for the search base + :steps: + 1. Set nsslapd-maxsimplepaged-per-conn = 0 in cn=config + 2. Search through the added users with a simple paged control + using page size = 4 + 3. Set nsslapd-maxsimplepaged-per-conn = 1 in cn=config + 4. Search through the added users with a simple paged control + using page size = 4 two times, but don't close the connections + :expectedresults: + 1. nsslapd-maxsimplepaged-per-conn should be successfully set + 2. UNWILLING_TO_PERFORM should be thrown + 3. Bind should be successful + 4. UNWILLING_TO_PERFORM should be thrown + """ + + users_list = add_users(topology_st, 20, DEFAULT_SUFFIX) + search_flt = r'(uid=test*)' + searchreq_attrlist = ['dn', 'sn'] + page_size = 4 + max_per_con_bck = change_conf_attr(topology_st, DN_CONFIG, 'nsslapd-maxsimplepaged-per-conn', conf_attr_value) + + try: + log.info('Set user bind') + conn = create_user.bind(TEST_USER_PWD) + + log.info('Create simple paged results control instance') + req_ctrl = SimplePagedResultsControl(True, size=page_size, cookie='') + + with pytest.raises(ldap.UNWILLING_TO_PERFORM): + msgid = conn.search_ext(DEFAULT_SUFFIX, ldap.SCOPE_SUBTREE, + search_flt, searchreq_attrlist, serverctrls=[req_ctrl]) + rtype, rdata, rmsgid, rctrls = conn.result3(msgid) + + # If nsslapd-maxsimplepaged-per-conn = 1, + # it should pass this point, but failed on the next search + assert conf_attr_value == '1' + msgid = conn.search_ext(DEFAULT_SUFFIX, ldap.SCOPE_SUBTREE, + search_flt, searchreq_attrlist, serverctrls=[req_ctrl]) + rtype, rdata, rmsgid, rctrls = conn.result3(msgid) + finally: + log.info('Remove added users') + del_users(users_list) + change_conf_attr(topology_st, DN_CONFIG, 'nsslapd-maxsimplepaged-per-conn', max_per_con_bck) + +if __name__ == '__main__': + # Run isolated + # -s for DEBUG mode + CURRENT_FILE = os.path.realpath(__file__) + pytest.main("-s %s" % CURRENT_FILE) diff --git a/dirsrvtests/tests/suites/password/__init__.py b/dirsrvtests/tests/suites/password/__init__.py new file mode 100644 index 0000000..d48fba6 --- /dev/null +++ b/dirsrvtests/tests/suites/password/__init__.py @@ -0,0 +1,3 @@ +""" + :Requirement: 389-ds-base: Password Policy +""" diff --git a/dirsrvtests/tests/suites/password/password_TPR_policy_test.py b/dirsrvtests/tests/suites/password/password_TPR_policy_test.py new file mode 100644 index 0000000..41ccaa9 --- /dev/null +++ b/dirsrvtests/tests/suites/password/password_TPR_policy_test.py @@ -0,0 +1,521 @@ +# --- BEGIN COPYRIGHT BLOCK --- +# Copyright (C) 2021 Red Hat, Inc. +# All rights reserved. +# +# License: GPL (version 3 or any later version). +# See LICENSE for details. +# --- END COPYRIGHT BLOCK --- + +import logging +import os +import pytest +from lib389.utils import * +from lib389.topologies import topology_st as topo +from lib389.topologies import topology_m2c2 as topo_m2c2 +from lib389.idm.user import UserAccounts, UserAccount +from lib389._constants import DEFAULT_SUFFIX, DN_DM +from lib389.config import Config +from lib389.idm.account import Accounts +from lib389.idm.organizationalunit import OrganizationalUnits, OrganizationalUnit +from lib389.idm.directorymanager import DirectoryManager +from lib389.pwpolicy import PwPolicyManager +from lib389.replica import Replicas, ReplicationManager +from lib389.dseldif import * +import time +import ldap + +pytestmark = pytest.mark.tier1 + +logging.getLogger(__name__).setLevel(logging.DEBUG) +log = logging.getLogger(__name__) + +DN_CONFIG = 'cn=config' +TEST_ENTRY_NAME = 'mmrepl_test' +TEST_ENTRY_DN = 'uid={},{}'.format(TEST_ENTRY_NAME, DEFAULT_SUFFIX) +NEW_SUFFIX_NAME = 'test_repl' +NEW_SUFFIX = 'o={}'.format(NEW_SUFFIX_NAME) +NEW_BACKEND = 'repl_base' +PASSWORD = 'password' +NEW_PASSWORD = 'changed_pass' +USER1_PASS = 'jdoe1_password' +USER2_PASS = 'jdoe2_password' + + +def get_agreement(agmts, consumer): + log.info('Get agreement towards consumer among the agreemment list') + for agmt in agmts.list(): + if (agmt.get_attr_val_utf8('nsDS5ReplicaPort') == str(consumer.port) and + agmt.get_attr_val_utf8('nsDS5ReplicaHost') == consumer.host): + return agmt + return None + + +def _create_user(topo, uid, cn, sn, givenname, userpassword, gid, ou): + user = UserAccounts(topo.standalone, DEFAULT_SUFFIX, rdn=ou).create(properties={ + 'uid': uid, + 'cn': cn, + 'sn': sn, + 'ou': ou, + 'givenname': givenname, + 'mail': f'{uid}@example.com', + 'homeDirectory': f'/home/{uid}', + 'uidNumber': '1000', + 'gidNumber': gid, + 'userpassword': userpassword, + }) + log.info('Creating user {} with UID: {}'.format(givenname, uid)) + return user + + +@pytest.fixture(scope="function") +def _add_user(request, topo): + for uid, cn, sn, givenname, userpassword, gid, ou in [ + ('jdoe1', 'John Doe1', 'jdoe1', 'Johnny', USER1_PASS, '10001', 'ou=People'), + ('jdoe2', 'Jane Doe2', 'jdoe2', 'Janie', USER2_PASS, '10002', 'ou=People'), + ]: + user = _create_user(topo, uid, cn, sn, givenname, userpassword, gid, ou) + instance = f'ou=People,{DEFAULT_SUFFIX}' + + def fin(): + + for user1 in UserAccounts(topo.standalone, DEFAULT_SUFFIX, rdn=None).list(): + user1.delete() + + request.addfinalizer(fin) + + +def change_pwp_parameter(topo, pwp, operation, to_do): + """ + Will change password policy parameter + """ + pwp1 = PwPolicyManager(topo.standalone) + user = pwp1.get_pwpolicy_entry(f'{pwp},{DEFAULT_SUFFIX}') + user.replace(operation, to_do) + + +@pytest.fixture(scope="function") +def set_global_TPR_policies(request, topo): + """Sets the required global password policy attributes under + cn=config entry + """ + + attrs = {'passwordMustChange': '', + 'passwordTPRMaxUse': '', + 'passwordTPRDelayExpireAt': '', + 'passwordTPRDelayValidFrom': '', + } + log.info('Get the default values') + entry = topo.standalone.getEntry(DN_CONFIG, ldap.SCOPE_BASE, '(objectClass=*)', attrs.keys()) + for key in attrs.keys(): + attrs[key] = entry.getValue(key) + log.info('Set the Global password policy passwordMustChange on, passwordTPRMaxUse 3') + log.info('passwordTPRDelayExpireAt 600, passwordTPRDelayValidFrom 6') + topo.standalone.config.replace_many(('passwordMustChange', 'on'), + ('passwordTPRMaxUse', '3'), + ('passwordTPRDelayExpireAt', '600'), + ('passwordTPRDelayValidFrom', '6')) + + def fin(): + """Resets the defaults""" + + log.info('Reset the defaults') + topo.standalone.open() + for key in attrs.keys(): + topo.standalone.config.replace(key, attrs[key]) + + request.addfinalizer(fin) + # A short sleep is required after the modifying password policy or cn=config + time.sleep(0.5) + + +def _create_local_pwp(topo, instance): + """ + For a subtree entry create a local policy + """ + + policy_props = {} + pwp = PwPolicyManager(topo.standalone) + pwadm_locpol = pwp.create_subtree_policy(instance, policy_props) + for attribute, value in [ + ('pwdmustchange', 'on'), + ('passwordTPRMaxUse', '3'), + ('passwordTPRDelayExpireAt', '1800'), + ('passwordTPRDelayValidFrom', '5'), + ]: + pwadm_locpol.add(attribute, value) + log.info('Creating local policies for subtree {}'.format(instance)) + return pwadm_locpol + + +def test_only_user_can_reset_TPR(topo, _add_user, set_global_TPR_policies): + """ One Time password with expiration + + :id: 07838d5e-db43-11eb-85e5-fa163ead4114 + :customerscenario: True + :setup: Standalone + :steps: + 1. Create DS Instance + 2. Create 2 users with appropriate password + 3. Create Global TPR policy enable passwordMustChange: on + 4. Trigger Temporary password and reset user1 password + 5. Bind as user#2 and attempt to Reset user#1 password as user#2 + 6. Verify admin can reset users#1,2 passwords + + :expectedresults: + 1. Success + 2. Success + 3. Success + 4. Success + 5. Fail(ldap.INSUFFICIENT_ACCESS) + 6. Success + +""" + log.info('Creating 2 users with appropriate password') + user1 = UserAccount(topo.standalone, f'uid=jdoe1,ou=People,{DEFAULT_SUFFIX}') + user2 = UserAccount(topo.standalone, f'uid=jdoe2,ou=People,{DEFAULT_SUFFIX}') + log.info('Setting Local policies...') + conn_user2 = user2.bind(USER2_PASS) + + UserAccount(conn_user2, user2.dn).replace('userpassword', 'reset_pass') + log.info('Attempting to change user#1 password as user#2 ') + + with pytest.raises(ldap.INSUFFICIENT_ACCESS): + UserAccount(conn_user2, user1.dn).replace('userpassword', 'reset_pass') + + +def test_local_TPR_supercedes_global_TPR(topo, _add_user, set_global_TPR_policies): + """ One Time password with expiration + + :id: beb2dac4-e116-11eb-a85e-98fa9ba19b65 + :customerscenario: True + :setup: Standalone + :steps: + 1. Create DS Instance + 2. Create user with appropriate password + 3. Configure the Global Password policies with passwordTPRMaxUse 5 + 4. Configure different local password policy for passwordTPRMaxUse 3 + 5. Trigger TPR by resetting the user password above + 6. Attempt an ldap search with an incorrect bind password for user above + 7. Repeat as many times as set by attribute passwordTPRMaxUse + 8. Should lock the account after value is set in the local passwordTPRMaxUse is reached + 9. Try to search with the correct password account will be locked. + + :expectedresults: + 1. Success + 2. Success + 3. Fail(ldap.INSUFFICIENT_ACCESS) + 4. Success + 5. Success + 6. Success + 7. Success + 8. Success + 9. Success + +""" + + user1 = UserAccount(topo.standalone, f'uid=jdoe1,ou=People,{DEFAULT_SUFFIX}') + user2 = UserAccount(topo.standalone, f'uid=jdoe2,ou=People,{DEFAULT_SUFFIX}') + log.info('Setting local password Temporary password reset policies') + + log.info('Setting Global TPR policy attributes') + Config(topo.standalone).replace('passwordMustChange', 'on') + Config(topo.standalone).replace('passwordTPRMaxUse', '5') + Config(topo.standalone).replace('passwordTPRDelayExpireAt', '600') + Config(topo.standalone).replace('passwordTPRDelayValidFrom', '6') + log.info('Resetting {} password to trigger TPR policy'.format(user1)) + user1.replace('userpassword', 'not_allowed_change') + count = 0 + + while count < 4: + if count == 4: + with pytest.raises(ldap.CONSTRAINT_VIOLATION): + user2.bind('badbadbad') + else: + with pytest.raises(ldap.INVALID_CREDENTIALS): + count += 1 + user2.bind('badbadbad') + + +def test_once_TPR_reset_old_passwd_invalid(topo, _add_user, set_global_TPR_policies): + """ Verify that once a password has been reset it cannot be reused + + :id: f3ea4f00-e89c-11eb-b81d-98fa9ba19b65 + :customerscenario: True + :setup: Standalone + :steps: + 1. Create DS Instance + 2. Create user jdoe1 with appropriate password + 3. Configure the Global Password policies enable passwordMustChange + 4. Trigger TPR by resetting the user jdoe1 password above + 5. Attempt to login with the old password + 6. Login as jdoe1 with the correct password and update the new password + + + :expectedresults: + 1. Success + 2. Success + 3. Success + 4. Success + 5. Fail(ldap.CONSTRAINT_VIOLATION) + 6. Success + +""" + new_password = 'test_password' + log.info('Creating user jdoe1 with appropriate password') + user1 = UserAccount(topo.standalone, f'uid=jdoe1,ou=People,{DEFAULT_SUFFIX}') + user1.replace('userpassword', new_password) + log.info('Making sure the Global Policy passwordTPRDelayValidFrom is short') + config = Config(topo.standalone) + config.replace_many( + ('passwordLockout', 'off'), + ('passwordMaxFailure', '3'), + ('passwordLegacyPolicy', 'off'), + ('passwordTPRDelayValidFrom', '-1'), + ('nsslapd-pwpolicy-local', 'on'), ) + + log.info(' Attempting to bind as {} with the old password {}'.format(user1, USER1_PASS)) + time.sleep(.5) + with pytest.raises(ldap.INVALID_CREDENTIALS): + user1.bind(USER1_PASS) + log.info('Login as jdoe1 with the correct reset password') + time.sleep(.5) + user1.rebind(new_password) + + +def test_reset_pwd_before_passwordTPRDelayValidFrom(topo, _add_user, set_global_TPR_policies): + """ Verify that user cannot reset pwd + before passwordTPRDelayValidFrom value elapses + + :id: 22987082-e8ae-11eb-a992-98fa9ba19b65 + :customerscenario: True + :setup: Standalone + :steps: + 1. Create DS Instance + 2. Create user jdoe2 with appropriate password + 3. Configure the Global Password policies disable passwordTPRDelayValidFrom to -1 + 4. Trigger TPR by resetting the user jdoe1 password above + 5. Attempt to bind and rebind immediately + 6. Set passwordTPRDelayValidFrom - 5secs elapses and bind rebind before 5 secs elapses + 7. Wait for the passwordTPRDelayValidFrom value to elapse and try to reset passwd + + :expectedresults: + 1. Success + 2. Success + 3. Success + 4. Success + 5. Success + 6. Fail(ldap.LDAP_CONSTRAINT_VIOLATION) + 7. Success + + +""" + user2 = UserAccount(topo.standalone, f'uid=jdoe2,ou=People,{DEFAULT_SUFFIX}') + log.info('Creating user {} with appropriate password'.format(user2)) + log.info('Disabling TPR policy passwordTPRDelayValidFrom') + topo.standalone.config.replace_many(('passwordMustChange', 'on'), + ('passwordTPRDelayValidFrom', '10')) + log.info('Triggering TPR and binding immediately after') + user2.replace('userpassword', 'new_password') + time.sleep(.5) + with pytest.raises(ldap.CONSTRAINT_VIOLATION): + user2.bind('new_password') + time.sleep(.5) + topo.standalone.config.replace_many(('passwordMustChange', 'on'), + ('passwordTPRDelayValidFrom', '-1')) + log.info('Triggering TPR and binding immediately after with passwordTPRDelayValidFrom set to -1') + user2.replace('userpassword', 'new_password1') + time.sleep(.5) + user2.rebind('new_password1') + + +def test_admin_resets_pwd_TPR_attrs_reset(topo, _add_user, set_global_TPR_policies): + """Test When the ‘userpassword’ is updated (update_pw_info) by an administrator + and it exists a TPR policy, then the server flags that the entry has a + TPR password with ‘pwdTPRReset: TRUE’, ‘pwdTPRExpTime’ and ‘pwdTPRUseCount’. + + :id: e6a84dc0-f142-11eb-8c96-fa163e1f582c + :customerscenario: True + :setup: Standalone + :steps: + 1. Create DS Instance + 2. Create user jdoe2 with appropriate password + 3. Configure the Global Password policies enable + 4. Trigger TPR by resetting the user jdoe1 password above + 5. Reset the users password ‘userpassword’ + 6. Check that ‘pwdTPRExpTime’ and ‘pwdTPRUseCount’ are updated + :expectedresults: + 1. Success + 2. Success + 3. Success + 4. Success + 5. Success + 6. Success + + """ + + user1 = UserAccount(topo.standalone, f'uid=jdoe1,ou=People,{DEFAULT_SUFFIX}') + log.info('Logging current time') + start_time = time.mktime(time.gmtime()) + log.info('Verifying the Global policy are set and attributes are all set to "None"') + for tpr_attrib in ['pwdTPRReset', 'pwdTPRExpTime', 'pwdTPRUseCount']: + assert user1.get_attr_val_utf8(tpr_attrib) is None + config = Config(topo.standalone) + config.replace_many(('pwdmustchange', 'on'), + ('passwordTPRMaxUse', '3'), + ('passwordTPRDelayExpireAt', '1800'), + ('passwordTPRDelayValidFrom', '1')) + assert user1.get_attr_val_utf8('pwdTPRExpTime') is None + log.info('Triggering TPR as Admin') + user1.replace('userpassword', 'new_password') + time.sleep(1) + log.info('Checking that pwdTPRReset, pwdTPRExpTime, pwdTPRUseCount are reset.') + assert user1.get_attr_val_utf8('pwdTPRReset') == 'TRUE' + assert user1.get_attr_val_utf8('pwdTPRExpTime') is None + assert user1.get_attr_val_utf8('pwdTPRUseCount') is '0' + + +def test_user_resets_pwd_TPR_attrs_reset(topo, _add_user, set_global_TPR_policies): + """Test once password is reset attributes are set to FALSE + + :id: 6614068a-ee7d-11eb-b1a3-98fa9ba19b65 + :customerscenario: True + :setup: Standalone + :steps: + 1. Create DS Instance + 2. Create user jdoe2 with appropriate password + 3. Configure the Global Password policies and set passwordMustChange on + 4. Trigger TPR by resetting the user jdoe1 password above + 5. Reset the users password ‘userpassword’ + 6. Check that pwdTPRReset, pwdTPRUseCount, pwdTPRValidFrom, pwdTPRExpireAt are RESET + :expectedresults: + 1. Success + 2. Success + 3. Success + 4. Success + 5. Success + 6. Success + + """ + user1 = UserAccount(topo.standalone, f'uid=jdoe1,ou=People,{DEFAULT_SUFFIX}') + log.info('Logging current time') + start_time = time.mktime(time.gmtime()) + log.info('Verifying the Global policy are set and attributes are all set to "None"') + for tpr_attrib in ['pwdTPRReset', 'pwdTPRUseCount', 'pwdTPRValidFrom', 'pwdTPRExpireAt']: + assert user1.get_attr_val_utf8(tpr_attrib) is None + config = Config(topo.standalone) + config.replace_many(('pwdmustchange', 'on'), + ('passwordTPRMaxUse', '3'), + ('passwordTPRDelayExpireAt', '1800'), + ('passwordTPRDelayValidFrom', '1')) + assert user1.get_attr_val_utf8('pwdTPRReset') is None + log.info('Triggering TPR check that pwdTPRReset, pwdTPRUseCount, pwdTPRValidFrom, pwdTPRExpireAt are set') + user1.replace('userpassword', 'new_password') + time.sleep(3) + assert user1.get_attr_val_utf8('pwdTPRReset') == 'TRUE' + assert user1.get_attr_val_utf8('pwdTPRUseCount') == '0' + assert gentime_to_posix_time(user1.get_attr_val_utf8('pwdTPRValidFrom')) > start_time + assert gentime_to_posix_time(user1.get_attr_val_utf8('pwdTPRExpireAt')) > start_time + conn = user1.rebind('new_password') + user1.replace('userpassword', 'extra_new_pass') + log.info('Checking that pwdTPRReset, pwdTPRUseCount, pwdTPRValidFrom, pwdTPRExpireAt are reset to None') + time.sleep(3) + assert user1.get_attr_val_utf8('pwdTPRReset') is None + assert user1.get_attr_val_utf8('pwdTPRUseCount') is None + assert (user1.get_attr_val_utf8('pwdTPRValidFrom')) is None + assert (user1.get_attr_val_utf8('pwdTPRExpireAt')) is None + log.info('Verified that attributes are reset after password is reset') + + +def test_TPR_replication_entry(topo_m2c2): + """Test once password is reset attributes are set to FALSE + + :id: f8b98042-ff07-11eb-b938-98fa9ba19b65 + :customerscenario: True + :setup: Replicated 2 Suppliers 2 Consumers + :steps: + 1. Create Replicated Topology with 2 suppliers and 2 consumers + 2. Create users on each replica + 3. Verify that 'pwdTPRReset', 'pwdTPRUseCount', 'pwdTPRValidFrom', 'pwdTPRExpireAt' are set to None + 4. Configure the Global Password policies and set passwordMustChange on supplier1 + 5. Trigger TPR by resetting the users password above + 6. Reset the users password ‘userpassword’ + 7. Check that pwdTPRReset, pwdTPRUseCount, pwdTPRValidFrom, pwdTPRExpireAt are updated on every replica + :expectedresults: + 1. Success + 2. Success + 3. Success + 4. Success + 5. Success + 6. Success + 7. Success + + """ + repl_list = ['supplier1', 'supplier2', 'consumer1', 'consumer2'] + users_list = ['user_supplier1', 'user_supplier2', 'user_consumer1', 'user_consumer2'] + uid = 'jdoe_repl' + cn = 'John Doe1 Repl' + sn = 'jdoe1_repl' + givenname = 'Johnny Replica' + userpassword = 'replica_pass' + gid = '10001' + ou = 'ou=People' + user_obj_list = [] + + for repl in repl_list: + for user in users_list: + obj_user = UserAccounts(topo_m2c2.ms[repl], DEFAULT_SUFFIX, rdn=ou).create(properties={ + 'uid': f'{uid}{user}', + 'cn': cn, + 'sn': sn, + 'ou': ou, + 'givenname': givenname, + 'mail': f'{repl}{user}@example.com', + 'homeDirectory': f'/home/{uid}{user}', + 'uidNumber': '1000', + 'gidNumber': gid, + 'userpassword': userpassword, + }) + user_obj_list.append(obj_user) + log.info('Creating user {} with UID: {} for {}'.format(givenname, uid, repl)) + break + log.info("Created the following objects {}".format(user_obj_list)) + + start_time = time.mktime(time.gmtime()) + log.info('Verifying the Global policy are set and attributes are all set to "None"') + tpr_attrib_list = ['pwdTPRReset', 'pwdTPRUseCount', 'pwdTPRValidFrom', 'pwdTPRExpireAt'] + for tpr_attrib in tpr_attrib_list: + assert user_obj_list[0].get_attr_val_utf8(tpr_attrib) is None + assert user_obj_list[1].get_attr_val_utf8(tpr_attrib) is None + assert user_obj_list[2].get_attr_val_utf8(tpr_attrib) is None + assert user_obj_list[3].get_attr_val_utf8(tpr_attrib) is None + + topo_m2c2.ms["supplier1"].config.replace_many(('passwordMustChange', 'on'), + ('passwordTPRMaxUse', '3'), + ('passwordTPRDelayExpireAt', '600'), + ('passwordTPRDelayValidFrom', '1')) + for user in user_obj_list: + user.replace('userpassword', 'changed_pass') + log.info('Triggering TPR by resetting password for entry {}'.format(user)) + time.sleep(3) + log.info("Checking that Global passwordTPRMaxUse is in effect.") + count = 0 + while count < 3: + if count == 3: + with pytest.raises(ldap.CONSTRAINT_VIOLATION): + user.bind('password_fails') + else: + with pytest.raises(ldap.INVALID_CREDENTIALS): + count += 1 + user.bind('password_fails') + + for user in user_obj_list: + assert user.get_attr_val_utf8('pwdTPRReset') == 'TRUE' + assert user.get_attr_val_utf8('pwdTPRUseCount') == '3' + assert gentime_to_posix_time(user.get_attr_val_utf8('pwdTPRValidFrom')) > start_time + assert gentime_to_posix_time(user.get_attr_val_utf8('pwdTPRExpireAt')) > start_time + log.info('Checking TPR attributes are replicated for {}.'.format(user)) + + +if __name__ == "__main__": + CURRENT_FILE = os.path.realpath(__file__) + pytest.main("-s -v %s" % CURRENT_FILE) diff --git a/dirsrvtests/tests/suites/password/password_policy_test.py b/dirsrvtests/tests/suites/password/password_policy_test.py new file mode 100644 index 0000000..f46f959 --- /dev/null +++ b/dirsrvtests/tests/suites/password/password_policy_test.py @@ -0,0 +1,1524 @@ +# --- BEGIN COPYRIGHT BLOCK --- +# Copyright (C) 2020 Red Hat, Inc. +# All rights reserved. +# +# License: GPL (version 3 or any later version). +# See LICENSE for details. +# --- END COPYRIGHT BLOCK --- + +""" +This test script will test password policy. +""" + +import os +import pytest +import time +from lib389.config import Config +from lib389.topologies import topology_st as topo +from lib389.topologies import topology_m1 +from lib389.idm.domain import Domain +from lib389.idm.organizationalunit import OrganizationalUnits +from lib389.idm.user import UserAccounts, UserAccount +from lib389._constants import DEFAULT_SUFFIX +from lib389.pwpolicy import PwPolicyManager, PwPolicyEntries +from lib389.idm.account import Account +from lib389.idm.nscontainer import nsContainers +from lib389.cos import CosPointerDefinitions, CosTemplates +import ldap + + +pytestmark = pytest.mark.tier1 + + +def create_user(inst, uid, cn, sn, givenname, userpasseord, gid, ou): + """ + Will create user + """ + user = UserAccounts(inst, DEFAULT_SUFFIX, rdn=ou).create(properties={ + 'uid': uid, + 'cn': cn, + 'sn': sn, + 'givenname': givenname, + 'mail': f'{uid}@example.com', + 'userpassword': userpasseord, + 'homeDirectory': f'/home/{uid}', + 'uidNumber': gid, + 'gidNumber': gid + }) + return user + + +def create_subtree_policy_custom(instance, dn, properties): + """Creates all entries which are needed for the subtree + password policy + + :param dn: Entry DN for the subtree pwpolicy + :type dn: str + :param properties: A dict with password policy settings + :type properties: dict + + :returns: PwPolicyEntry instance + """ + + # Verify target dn exists before getting started + subtree_entry = Account(instance, dn) + if not subtree_entry.exists(): + raise ValueError('Can not create subtree password policy because the target dn does not exist') + + # Create the pwp container if needed + pwp_containers = nsContainers(instance, basedn=dn) + pwp_container = pwp_containers.ensure_state(properties={'cn': 'nsPwPolicyContainer'}) + + # Create policy entry + pwp_entry = None + properties['cn'] = '"cn=nsPwPolicyEntry_subtree,%s"' % dn + pwp_entries = PwPolicyEntries(instance, pwp_container.dn) + pwp_entry = pwp_entries.create(properties=properties) + try: + # The CoS template entry (nsPwTemplateEntry) that has the pwdpolicysubentry + # value pointing to the above (nsPwPolicyEntry) entry + cos_template = None + cos_templates = CosTemplates(instance, pwp_container.dn) + cos_template = cos_templates.create(properties={'cosPriority': '1', + 'pwdpolicysubentry': pwp_entry.dn, + 'cn': 'cn=nsPwTemplateEntry,%s' % dn}) + + # The CoS specification entry at the subtree level + cos_pointer_defs = CosPointerDefinitions(instance, dn) + cos_pointer_defs.create(properties={'cosAttribute': 'pwdpolicysubentry default operational', + 'cosTemplateDn': cos_template.dn, + 'cn': 'nsPwPolicy_CoS'}) + except ldap.LDAPError as e: + # Something went wrong, remove what we have done + if pwp_entry is not None: + pwp_entry.delete() + if cos_template is not None: + cos_template.delete() + raise e + + # make sure that local policies are enabled + config = Config(instance) + config.replace('nsslapd-pwpolicy-local', 'on') + + return pwp_entry + + +@pytest.fixture(scope="function") +def policy_qoutes_setup(topology_m1, request): + inst = topology_m1.ms["supplier1"] + + # Add self user modification and anonymous aci + USER_SELF_MOD_ACI = '(targetattr="userpassword")(version 3.0; acl "pwp test"; allow (all) userdn="ldap:///self";)' + ANON_ACI = "(targetattr=\"*\")(version 3.0; acl \"Anonymous Read access\"; allow (read,search,compare) userdn = \"ldap:///anyone\";)" + suffix = Domain(inst, DEFAULT_SUFFIX) + suffix.add('aci', USER_SELF_MOD_ACI) + suffix.add('aci', ANON_ACI) + + ous = [] + for suffix, ou in [(DEFAULT_SUFFIX, 'dirsec'), (f'ou=people,{DEFAULT_SUFFIX}', 'others')]: + created_ou = OrganizationalUnits(inst, suffix).create(properties={ + 'ou': ou + }) + ous.append(created_ou) + + for uid, cn, sn, givenname, userpasseord, gid, ou in [ + ('dbyers', 'Danny Byers', 'Byers', 'Danny', 'dby3rs1', '10001', 'ou=dirsec'), + ('orla', 'Orla Hegarty', 'Hegarty', 'Orla', '000rla1', '10002', 'ou=dirsec'), + ('joe', 'Joe Rath', 'Rath', 'Joe', '00j0e1', '10003', 'ou=people'), + ('jack', 'Jack Rath', 'Rath', 'Jack', '00j6ck1', '10004', 'ou=people'), + ('fred', 'Fred Byers', 'Byers', 'Fred', '00fr3d1', '10005', None), + ('deep', 'Deep Blue', 'Blue', 'Deep', '00de3p1', '10006', 'ou=others, ou=people'), + ('accntlusr', 'AccountControl User', 'ControlUser', 'Account', 'AcControl123', '10007', 'ou=dirsec'), + ('nocntlusr', 'NoAccountControl User', 'ControlUser', 'NoAccount', 'NoControl123', '10008', 'ou=dirsec') + ]: + create_user(inst, uid, cn, sn, givenname, userpasseord, gid, ou) + policy_props = {'passwordexp': 'off', + 'passwordchange': 'off', + 'passwordmustchange': 'off', + 'passwordchecksyntax': 'off', + 'passwordinhistory': '6', + 'passwordhistory': 'off', + 'passwordlockout': 'off', + 'passwordlockoutduration': '3600', + 'passwordmaxage': '8640000', + 'passwordmaxfailure': '3', + 'passwordminage': '0', + 'passwordminlength': '6', + 'passwordresetfailurecount': '600', + 'passwordunlock': 'on', + 'passwordStorageScheme': 'CLEAR', + 'passwordwarning': '86400' + } + pwp = PwPolicyManager(inst) + for dn_dn in (f'uid=orla,ou=dirsec,{DEFAULT_SUFFIX}', + f'uid=joe,ou=People,{DEFAULT_SUFFIX}'): + pwp.create_user_policy(dn_dn, policy_props) + + # The function creates PwPolicyEntry with cn: "" value instead of + create_subtree_policy_custom(inst, f'ou=People,{DEFAULT_SUFFIX}', policy_props) + + def fin(): + # Remove the OrganizationalUnits that was created for this test case + for ou in ous: + inst.delete_branch_s(ou.dn, ldap.SCOPE_SUBTREE, filterstr="(|(objectclass=*)(objectclass=ldapsubentry))") + request.addfinalizer(fin) + + return pwp + +@pytest.fixture(scope="module") +def policy_setup(topo): + """ + Will do pretest setup. + """ + + # Add self user modification and anonymous aci + USER_SELF_MOD_ACI = '(targetattr="userpassword")(version 3.0; acl "pwp test"; allow (all) userdn="ldap:///self";)' + ANON_ACI = "(targetattr=\"*\")(version 3.0; acl \"Anonymous Read access\"; allow (read,search,compare) userdn = \"ldap:///anyone\";)" + suffix = Domain(topo.standalone, DEFAULT_SUFFIX) + suffix.add('aci', USER_SELF_MOD_ACI) + suffix.add('aci', ANON_ACI) + + for suffix, ou in [(DEFAULT_SUFFIX, 'dirsec'), (f'ou=people,{DEFAULT_SUFFIX}', 'others')]: + OrganizationalUnits(topo.standalone, suffix).create(properties={ + 'ou': ou + }) + for uid, cn, sn, givenname, userpasseord, gid, ou in [ + ('dbyers', 'Danny Byers', 'Byers', 'Danny', 'dby3rs1', '10001', 'ou=dirsec'), + ('orla', 'Orla Hegarty', 'Hegarty', 'Orla', '000rla1', '10002', 'ou=dirsec'), + ('joe', 'Joe Rath', 'Rath', 'Joe', '00j0e1', '10003', 'ou=people'), + ('jack', 'Jack Rath', 'Rath', 'Jack', '00j6ck1', '10004', 'ou=people'), + ('fred', 'Fred Byers', 'Byers', 'Fred', '00fr3d1', '10005', None), + ('deep', 'Deep Blue', 'Blue', 'Deep', '00de3p1', '10006', 'ou=others, ou=people'), + ('accntlusr', 'AccountControl User', 'ControlUser', 'Account', 'AcControl123', '10007', 'ou=dirsec'), + ('nocntlusr', 'NoAccountControl User', 'ControlUser', 'NoAccount', 'NoControl123', '10008', 'ou=dirsec') + ]: + create_user(topo.standalone, uid, cn, sn, givenname, userpasseord, gid, ou) + policy_props = {'passwordexp': 'off', + 'passwordchange': 'off', + 'passwordmustchange': 'off', + 'passwordchecksyntax': 'off', + 'passwordinhistory': '6', + 'passwordhistory': 'off', + 'passwordlockout': 'off', + 'passwordlockoutduration': '3600', + 'passwordmaxage': '8640000', + 'passwordmaxfailure': '3', + 'passwordminage': '0', + 'passwordminlength': '6', + 'passwordresetfailurecount': '600', + 'passwordunlock': 'on', + 'passwordStorageScheme': 'CLEAR', + 'passwordwarning': '86400' + } + pwp = PwPolicyManager(topo.standalone) + for dn_dn in (f'uid=orla,ou=dirsec,{DEFAULT_SUFFIX}', + f'uid=joe,ou=People,{DEFAULT_SUFFIX}'): + pwp.create_user_policy(dn_dn, policy_props) + pwp.create_subtree_policy(f'ou=People,{DEFAULT_SUFFIX}', policy_props) + + +def change_password(topo, user_password_new_pass_list): + """ + Will change password with self binding. + """ + for user, password, new_pass in user_password_new_pass_list: + real_user = UserAccount(topo.standalone, f'{user},{DEFAULT_SUFFIX}') + conn = real_user.bind(password) + UserAccount(conn, real_user.dn).replace('userpassword', new_pass) + + +def change_password_ultra_new(topo, user_password_new_pass_list): + """ + Will change password with self binding. + """ + for user, password, new_pass, ultra_new_pass in user_password_new_pass_list: + real_user = UserAccount(topo.standalone, f'{user},{DEFAULT_SUFFIX}') + conn = real_user.bind(password) + UserAccount(conn, real_user.dn).replace('userpassword', new_pass) + conn = real_user.bind(new_pass) + UserAccount(conn, real_user.dn).replace('userpassword', ultra_new_pass) + + +def change_password_with_admin(topo, user_password_new_pass_list): + """ + Will change password by root. + """ + for user, password in user_password_new_pass_list: + UserAccount(topo.standalone, f'{user},{DEFAULT_SUFFIX}').replace('userpassword', password) + + +def _do_transaction_for_pwp(topo, attr1, attr2): + """ + Will change pwp parameters + """ + pwp = PwPolicyManager(topo.standalone) + orl = pwp.get_pwpolicy_entry(f'uid=orla,ou=dirsec,{DEFAULT_SUFFIX}') + joe = pwp.get_pwpolicy_entry(f'uid=joe,ou=people,{DEFAULT_SUFFIX}') + people = pwp.get_pwpolicy_entry(f'ou=people,{DEFAULT_SUFFIX}') + for instance in [orl, joe, people]: + instance.replace(attr1, attr2) + for instance in [orl, joe, people]: + assert instance.get_attr_val_utf8(attr1) == attr2 + + +@pytest.fixture(scope="function") +def fixture_for_password_change(request, topo): + pwp = PwPolicyManager(topo.standalone) + orl = pwp.get_pwpolicy_entry(f'uid=orla,ou=dirsec,{DEFAULT_SUFFIX}') + for attribute in ('passwordMustChange', 'passwordmustchange'): + orl.replace(attribute, 'off') + assert orl.get_attr_val_utf8(attribute) == 'off' + + def final_task(): + people = pwp.get_pwpolicy_entry(f'ou=people,{DEFAULT_SUFFIX}') + people.replace('passwordchange', 'on') + assert people.get_attr_val_utf8('passwordchange') == 'on' + # Administrator Reseting to original password + change_password_with_admin(topo, [ + ('uid=joe,ou=people', '00j0e1'), + ('uid=fred', '00fr3d1'), + ('uid=jack,ou=people', '00j6ck1'), + ('uid=deep,ou=others,ou=people', '00de3p1'), + ('uid=orla,ou=dirsec', '000rla1'), + ('uid=dbyers,ou=dirsec', 'Anuj') + ]) + request.addfinalizer(final_task) + + +def test_password_change_section(topo, policy_setup, fixture_for_password_change): + """Password Change Section. + + :id: 5d018c08-9388-11ea-8394-8c16451d917b + :setup: Standalone + :steps: + 1. Confirm that user is not been affected by fine grained password + (As its is not belong to any password policy) + 2. Should be able to change password(As its is not belong to any password policy) + 3. Try to change password for user even though pw policy is set to no. + Should get error message: unwilling to Perform ! + 4. Set Password change to May Change Password. + 5. Administrator Reseting to original password ! + 6. Attempt to Modify password to orla2 with an invalid first pw with error message. + 7. Changing current password from orla1 to orla2 + 8. Changing current password from orla2 to orla1. + 9. Set Password change to Must Not Change After Reset + 10 Change password for joe,jack,deep even though pw policy is set to no with error message. + 11. Fred can change.(Fred is not belong to any pw policy) + 12. Changing pw policy to may change pw + 13. Set Password change to May Change Password + 14. Administrator Reseting to original password + 15. Try to change password with invalid credentials. Should see error message. + 16. Changing current password for joe and fed. + 17. Changing current password for jack and deep with error message.(passwordchange not on) + 18. Changing pw policy to may change pw + 19. Set Password change to May Change Password + 20. Administrator Reseting to original password + 21. Try to change password with invalid credentials. Should see error message. + 22. Changing current password + 23. Set Password change to Must Not Change After Reset + 24. Searching for passwordchange: Off + 25. Administrator Reseting to original password + 26. Try to change password with invalid credentials. Should see error message + 27. Changing current password (('passwordchange', 'off') for joe) + :expectedresults: + 1. Success(As its is not belong to any password policy) + 2. Success + 3. Fail(pw policy is set to no) + 4. Success + 5. Success + 6. Fail(invalid first pw) + 7. Success + 8. Success + 9. Success + 10. Fail(pw policy is set to no) + 11. Success((Fred is not belong to any pw policy)) + 12. Success + 13. Success + 14. Success + 15. Fail(invalid credentials) + 16. Success((passwordchange on)) + 17. Fail(passwordchange not on) + 18. Success + 19. Success + 20. Success + 21. Fail(invalid credentials) + 22. Success + 23. Success + 24. Success + 25. Success + 26. Fail(invalid credentials) + 27. Success + """ + # Confirm that uid=dbyers is not been affected by fine grained password + dbyers = UserAccount(topo.standalone, f'uid=dbyers,ou=dirsec,{DEFAULT_SUFFIX}') + conn = dbyers.bind('dby3rs1') + dbyers_conn = UserAccount(conn, f'uid=dbyers,ou=dirsec,{DEFAULT_SUFFIX}') + # Should be able to change password(As its is not belong to any password policy) + dbyers_conn.replace('userpassword', "Anuj") + # Try to change password for uid=orla even though pw policy is set to no. + # Should get error message: unwilling to Perform ! + orla = UserAccount(topo.standalone, f'uid=orla,ou=dirsec,{DEFAULT_SUFFIX}') + conn = orla.bind('000rla1') + orla_conn = UserAccount(conn, f'uid=orla,ou=dirsec,{DEFAULT_SUFFIX}') + # pw policy is set to no + with pytest.raises(ldap.UNWILLING_TO_PERFORM): + orla_conn.replace('userpassword', "000rla2") + pwp = PwPolicyManager(topo.standalone) + orl = pwp.get_pwpolicy_entry(f'uid=orla,ou=dirsec,{DEFAULT_SUFFIX}') + # Set Password change to May Change Password. + orl.replace('passwordchange', 'on') + assert orl.get_attr_val_utf8('passwordchange') == 'on' + # Administrator Reseting to original password ! + orla.replace('userpassword', '000rla1') + # Attempt to Modify password to orla2 with an invalid first pw with error message. + with pytest.raises(ldap.INVALID_CREDENTIALS): + conn = orla.bind('Invalid_password') + # Changing current password from orla1 to orla2 + orla_conn.replace('userpassword', '000rla2') + # Changing current password from orla2 to orla1. + orla_conn = UserAccount(conn, f'uid=orla,ou=dirsec,{DEFAULT_SUFFIX}') + orla_conn.replace('userpassword', '000rla1') + # Set Password change to Must Not Change After Reset + joe = pwp.get_pwpolicy_entry(f'uid=joe,ou=people,{DEFAULT_SUFFIX}') + people = pwp.get_pwpolicy_entry(f'ou=people,{DEFAULT_SUFFIX}') + joe.replace_many(('passwordmustchange', 'off'), ('passwordchange', 'off')) + people.replace_many(('passwordmustchange', 'off'), ('passwordchange', 'off')) + for attr in ['passwordMustChange', 'passwordchange']: + assert joe.get_attr_val_utf8(attr) == 'off' + for attr in ['passwordMustChange', 'passwordchange']: + assert people.get_attr_val_utf8(attr) == 'off' + # Change password for uid,joe,jack,deep even though pw policy is set to no with error message. + for user, password, pass_to_change in [ + ('joe', '00j0e1', '00j0e2'), + ('jack', '00j6ck1', '00j6ck2'), + ('deep,ou=others', '00de3p1', '00de3p2') + ]: + real_user = UserAccount(topo.standalone, f'uid={user},ou=people,{DEFAULT_SUFFIX}') + conn = real_user.bind(password) + real_conn = UserAccount(conn, real_user.dn) + # pw policy is set to no + with pytest.raises(ldap.UNWILLING_TO_PERFORM): + real_conn.replace('userpassword', pass_to_change) + real_user = UserAccount(topo.standalone, f'uid=fred,{DEFAULT_SUFFIX}') + conn = real_user.bind('00fr3d1') + # Fred can change.(Fred is not belong to any pw policy) + real_conn = UserAccount(conn, real_user.dn) + real_conn.replace('userpassword', '00fr3d2') + # Changing pw policy to may change pw + # Set Password change to May Change Password + joe = pwp.get_pwpolicy_entry(f'uid=joe,ou=people,{DEFAULT_SUFFIX}') + joe.replace('passwordchange', 'on') + assert joe.get_attr_val_utf8('passwordchange') == 'on' + # Administrator Reseting to original password + change_password_with_admin(topo, [ + ('uid=joe,ou=people', '00j0e1'), + ('uid=jack,ou=people', '00j6ck1'), + ('uid=fred', '00fr3d1'), + ('uid=deep,ou=others,ou=people', '00de3p1') + ]) + # Try to change password with invalid credentials. Should see error message. + for user in [ + 'uid=joe,ou=people', + 'uid=jack,ou=people', + 'uid=fred', + 'uid=deep,ou=others,ou=people' + ]: + with pytest.raises(ldap.INVALID_CREDENTIALS): + UserAccount(topo.standalone, f'{user},{DEFAULT_SUFFIX}').bind("bad") + # Changing current password for joe and fed. + for user, password, new_pass in [ + ('uid=joe,ou=people', '00j0e1', '00j0e2'), + ('uid=fred', '00fr3d1', '00fr3d2') + ]: + real_user = UserAccount(topo.standalone, f'{user},{DEFAULT_SUFFIX}') + conn = real_user.bind(password) + UserAccount(conn, real_user.dn).replace('userpassword', new_pass) + # Changing current password for jack and deep with error message.(passwordchange not on) + for user, password, new_pass in [ + ('uid=jack,ou=people', '00j6ck1', '00j6ck2'), + ('uid=deep,ou=others,ou=people', '00de3p1', '00de3p2') + ]: + real_user = UserAccount(topo.standalone, f'{user},{DEFAULT_SUFFIX}') + conn = real_user.bind(password) + with pytest.raises(ldap.UNWILLING_TO_PERFORM): + UserAccount(conn, real_user.dn).replace('userpassword', new_pass) + # Changing pw policy to may change pw + # Set Password change to May Change Password + people.replace('passwordchange', 'on') + assert people.get_attr_val_utf8('passwordchange') == 'on' + # Administrator Reseting to original password + change_password_with_admin(topo, [ + ('uid=joe,ou=people', '00j0e1'), + ('uid=jack,ou=people', '00j6ck1'), + ('uid=fred', '00fr3d1'), + ('uid=deep,ou=others,ou=people', '00de3p1') + ]) + # Try to change password with invalid credentials. Should see error message. + for user in [ + 'uid=joe,ou=people', + 'uid=jack,ou=people', + 'uid=fred', + 'uid=deep,ou=others,ou=people' + ]: + with pytest.raises(ldap.INVALID_CREDENTIALS): + UserAccount(topo.standalone, f'{user},{DEFAULT_SUFFIX}').bind("bad") + # Changing current password + change_password(topo, [ + ('uid=joe,ou=people', '00j0e1', '00j0e2'), + ('uid=fred', '00fr3d1', '00fr3d2'), + ('uid=jack,ou=people', '00j6ck1', '00j6ck2'), + ('uid=deep,ou=others,ou=people', '00de3p1', '00de3p2') + ]) + # Set Password change to Must Not Change After Reset + joe.replace('passwordchange', 'off') + assert joe.get_attr_val_utf8('passwordchange') == 'off' + # Administrator Reseting to original password + change_password_with_admin(topo, [ + ('uid=joe,ou=people', '00j0e1'), + ('uid=fred', '00fr3d1'), + ('uid=jack,ou=people', '00j6ck1'), + ('uid=deep,ou=others,ou=people', '00de3p1') + ]) + # Try to change password with invalid credentials. Should see error message + for user in [ + 'uid=joe,ou=people', + 'uid=jack,ou=people', + 'uid=fred', + 'uid=deep,ou=others,ou=people' + ]: + with pytest.raises(ldap.INVALID_CREDENTIALS): + UserAccount(topo.standalone, f'{user},{DEFAULT_SUFFIX}').bind("bad") + # Changing current password + change_password(topo, [ + ('uid=fred', '00fr3d1', '00fr3d2'), + ('uid=jack,ou=people', '00j6ck1', '00j6ck2'), + ('uid=deep,ou=others,ou=people', '00de3p1', '00de3p2') + ]) + # ('passwordchange', 'off') for joe + real_user = UserAccount(topo.standalone, f'uid=joe,ou=people,{DEFAULT_SUFFIX}') + conn = real_user.bind('00j0e1') + with pytest.raises(ldap.UNWILLING_TO_PERFORM): + UserAccount(conn, real_user.dn).replace('userpassword', '00j0e2') + + +@pytest.fixture(scope="function") +def _fixture_for_syntax_section(request, topo): + change_password_with_admin(topo, [ + ('uid=joe,ou=people', '00j0e1'), + ('uid=fred', '00fr3d1'), + ('uid=jack,ou=people', '00j6ck1'), + ('uid=deep,ou=others,ou=people', '00de3p1'), + ('uid=orla,ou=dirsec', '000rla1'), + ('uid=dbyers,ou=dirsec', 'Anuj') + ]) + pwp = PwPolicyManager(topo.standalone) + orl = pwp.get_pwpolicy_entry(f'uid=orla,ou=dirsec,{DEFAULT_SUFFIX}') + joe = pwp.get_pwpolicy_entry(f'uid=joe,ou=people,{DEFAULT_SUFFIX}') + people = pwp.get_pwpolicy_entry(f'ou=people,{DEFAULT_SUFFIX}') + for instance in [orl, joe, people]: + instance.replace('passwordchecksyntax', 'on') + instance.replace('passwordChange', 'on') + assert instance.get_attr_val_utf8('passwordchecksyntax') == 'on' + + def final_step(): + for instance1 in [orl, joe, people]: + instance1.replace('passwordminlength', '6') + change_password_with_admin(topo, [ + ('uid=orla,ou=dirsec', '000rLb1'), + ('uid=joe,ou=people', '00J0e1'), + ('uid=jack,ou=people', '00J6ck1'), + ('uid=deep,ou=others,ou=people', '00De3p1'), + ('uid=dbyers,ou=dirsec', 'dby3rs1'), + ('uid=fred', '00fr3d1') + ]) + + request.addfinalizer(final_step) + + +def test_password_syntax_section(topo, policy_setup, _fixture_for_syntax_section): + """Password Syntax Section. + + :id: 7bf1cb46-9388-11ea-9019-8c16451d917b + :setup: Standalone + :steps: + 1. Try to change password with invalid credentials. Should get error (invalid cred). + 2. Try to change to a password that violates length. Should get error (constaint viol.). + 3. Attempt to Modify password to db which is in error to policy + 4. Changing password minimum length to 5 to check triviality + 5. Try to change password to the value of uid, which is trivial. Should get error. + 6. Try to change password to givenname which is trivial. Should get error + 7. Try to change password to sn which is trivial. Should get error + 8. Changing password minimum length back to 6 + 9. Changing current password from ``*1`` to ``*2`` + 10. Changing current password from ``*2`` to ``*1`` + 11. Changing current password to the evil password + 12. Resetting to original password as cn=directory manager + 13. Setting policy to NOT Check Password Syntax + 14. Test that when checking syntax is off, you can use small passwords + 15. Test that when checking syntax is off, trivial passwords can be used + 16. Resetting to original password as cn=directory manager + 17. Changing password minimum length from 6 to 10 + 18. Setting policy to Check Password Syntax again + 19. Try to change to a password that violates length + 20. Change to a password that meets length requirement + :expectedresults: + 1. Fail(invalid cred) + 2. Fail(constaint viol.) + 3. Fail(Syntax error) + 4. Success + 5. Fail(trivial) + 6. Fail(password to givenname ) + 7. Success + 8. Success + 9. Success + 10. Success + 11. Fail(evil password) + 12. Success + 13. Success + 14. Success + 15. Success + 16. Success + 17. Success + 18. Success + 19. Fail(violates length) + 20. Success + """ + # Try to change password with invalid credentials. Should get error (invalid cred). + for user in [ + 'uid=joe,ou=people', + 'uid=jack,ou=people', + 'uid=fred', + 'uid=deep,ou=others,ou=people', + 'uid=dbyers,ou=dirsec', + 'uid=orla,ou=dirsec' + ]: + with pytest.raises(ldap.INVALID_CREDENTIALS): + UserAccount(topo.standalone, f'{user},{DEFAULT_SUFFIX}').bind("bad") + # Try to change to a password that violates length. Should get error (constaint viol.). + with pytest.raises(ldap.CONSTRAINT_VIOLATION): + change_password(topo, [ + ('uid=orla,ou=dirsec', '000rla1', 'db'), + ('uid=joe,ou=people', '00j0e1', 'db'), + ('uid=jack,ou=people', '00j6ck1', 'db'), + ('uid=deep,ou=others,ou=people', '00de3p1', 'db') + ]) + # Attempt to Modify password to db which is in error to policy(Syntax error) + change_password_ultra_new(topo, [ + ('uid=dbyers,ou=dirsec', 'Anuj', 'db', 'dby3rs1'), + ('uid=fred', '00fr3d1', 'db', '00fr3d1') + ]) + # Changing password minimum length to 5 to check triviality + pwp = PwPolicyManager(topo.standalone) + orl = pwp.get_pwpolicy_entry(f'uid=orla,ou=dirsec,{DEFAULT_SUFFIX}') + joe = pwp.get_pwpolicy_entry(f'uid=joe,ou=people,{DEFAULT_SUFFIX}') + people = pwp.get_pwpolicy_entry(f'ou=people,{DEFAULT_SUFFIX}') + for instance in [orl, joe, people]: + instance.replace('passwordminlength', '5') + # Try to change password to the value of uid, which is trivial. Should get error. + with pytest.raises(ldap.CONSTRAINT_VIOLATION): + change_password(topo, [ + ('uid=orla,ou=dirsec', '000rla1', 'orla'), + ('uid=joe,ou=people', '00j0e1', 'joe'), + ('uid=jack,ou=people', '00j6ck1', 'jack'), + ('uid=deep,ou=others,ou=people', '00de3p1', 'deep') + ]) + # dbyers and fred can change + change_password_ultra_new(topo, [ + ('uid=dbyers,ou=dirsec', 'dby3rs1', 'dbyers', 'dby3rs1'), + ('uid=fred', '00fr3d1', 'fred', '00fr3d1') + ]) + # Try to change password to givenname which is trivial. Should get error + with pytest.raises(ldap.CONSTRAINT_VIOLATION): + change_password(topo, [ + ('uid=orla,ou=dirsec', '000rla1', 'orla'), + ('uid=joe,ou=people', '00j0e1', 'joe'), + ('uid=jack,ou=people', '00j6ck1', 'jack'), + ('uid=deep,ou=others,ou=people', '00de3p1', 'deep') + ]) + # dbyers and fred can change + change_password_ultra_new(topo, [ + ('uid=dbyers,ou=dirsec', 'dby3rs1', 'danny', 'dby3rs1'), + ('uid=fred', '00fr3d1', 'fred', '00fr3d1') + ]) + # Try to change password to sn which is trivial. Should get error + with pytest.raises(ldap.CONSTRAINT_VIOLATION): + change_password(topo, [ + ('uid=orla,ou=dirsec', '000rla1', 'Hegarty'), + ('uid=joe,ou=people', '00j0e1', 'Rath'), + ('uid=jack,ou=people', '00j6ck1', 'Rath'), + ('uid=deep,ou=others,ou=people', '00de3p1', 'Blue') + ]) + # dbyers and fred can change + change_password_ultra_new(topo, [ + ('uid=dbyers,ou=dirsec', 'dby3rs1', 'Byers', 'dby3rs1'), + ('uid=fred', '00fr3d1', 'Byers', '00fr3d1') + ]) + # Changing password minimum length back to 6 + for instance1 in [orl, joe, people]: + instance1.replace('passwordminlength', '6') + # Changing current password from *1 to *2 + change_password(topo, [ + ('uid=orla,ou=dirsec', '000rla1', '000rLb2'), + ('uid=dbyers,ou=dirsec', 'dby3rs1', 'dby3rs2'), + ('uid=fred', '00fr3d1', '00fr3d2'), + ('uid=joe,ou=people', '00j0e1', '00J0e2'), + ('uid=jack,ou=people', '00j6ck1', '00J6ck2'), + ('uid=deep,ou=others,ou=people', '00de3p1', '00De3p2') + ]) + # Changing current password from *2 to *1 + change_password(topo, [ + ('uid=orla,ou=dirsec', '000rLb2', '000rLb1'), + ('uid=dbyers,ou=dirsec', 'dby3rs2', 'dby3rs1'), + ('uid=fred', '00fr3d2', '00fr3d1'), + ('uid=joe,ou=people', '00J0e2', '00J0e1'), + ('uid=jack,ou=people', '00J6ck2', '00J6ck1'), + ('uid=deep,ou=others,ou=people', '00De3p2', '00De3p1') + ]) + # Changing current password to the evil password + with pytest.raises(ldap.CONSTRAINT_VIOLATION): + change_password(topo, [ + ('uid=orla,ou=dirsec', '000rLb1', r'{\;\\].'), + ('uid=joe,ou=people', '00J0e1', r'{\;\\].'), + ('uid=jack,ou=people', '00J6ck1', r'{\;\\].'), + ('uid=deep,ou=others,ou=people', '00De3p1', r'{\;\\].') + ]) + # dbyers and fred can change + change_password(topo, [ + ('uid=dbyers,ou=dirsec', 'dby3rs1', r'{\;\\].'), + ('uid=fred', '00fr3d1', r'{\;\\].') + ]) + # Resetting to original password as cn=directory manager + change_password_with_admin(topo, [ + ('uid=orla,ou=dirsec', '000rLb1'), + ('uid=joe,ou=people', '00J0e1'), + ('uid=jack,ou=people', '00J6ck1'), + ('uid=deep,ou=others,ou=people', '00De3p1'), + ('uid=dbyers,ou=dirsec', 'dby3rs1'), + ('uid=fred', '00fr3d1') + ]) + # Setting policy to NOT Check Password Syntax + # Searching for passwordminlength + for instance in [orl, joe, people]: + instance.replace('passwordchecksyntax', 'off') + for instance in [orl, joe, people]: + assert instance.get_attr_val_utf8('passwordchecksyntax') == 'off' + assert instance.get_attr_val_utf8('passwordminlength') == '6' + # Test that when checking syntax is off, you can use small passwords + change_password(topo, [ + ('uid=orla,ou=dirsec', '000rLb1', 'db'), + ('uid=joe,ou=people', '00J0e1', 'db'), + ('uid=jack,ou=people', '00J6ck1', 'db'), + ('uid=deep,ou=others,ou=people', '00De3p1', 'db'), + ('uid=dbyers,ou=dirsec', 'dby3rs1', 'db'), + ('uid=fred', '00fr3d1', 'db') + ]) + # Test that when checking syntax is off, trivial passwords can be used + change_password(topo, [ + ('uid=orla,ou=dirsec', 'db', 'orla'), + ('uid=joe,ou=people', 'db', 'joe'), + ('uid=jack,ou=people', 'db', 'jack'), + ('uid=deep,ou=others,ou=people', 'db', 'deep'), + ('uid=dbyers,ou=dirsec', 'db', 'dbyers'), + ('uid=fred', 'db', 'fred') + ]) + # Resetting to original password as cn=directory manager + change_password_with_admin(topo, [ + ('uid=orla,ou=dirsec', '000rLb1'), + ('uid=joe,ou=people', '00J0e1'), + ('uid=jack,ou=people', '00J6ck1'), + ('uid=deep,ou=others,ou=people', '00De3p1'), + ('uid=dbyers,ou=dirsec', 'dby3rs1'), + ('uid=fred', '00fr3d1') + ]) + # Changing password minimum length from 6 to 10 + # Setting policy to Check Password Syntax again + for instance in [orl, joe, people]: + instance.replace_many( + ('passwordchecksyntax', 'on'), + ('passwordminlength', '10')) + # Try to change to a password that violates length + with pytest.raises(ldap.CONSTRAINT_VIOLATION): + change_password(topo, [ + ('uid=orla,ou=dirsec', '000rLb1', 'db'), + ('uid=joe,ou=people', '00J0e1', 'db'), + ('uid=jack,ou=people', '00J6ck1', 'db'), + ('uid=deep,ou=others,ou=people', '00De3p1', 'db') + ]) + # dbyers and fred can change as it does not belong to any pw policy + change_password(topo, [ + ('uid=dbyers,ou=dirsec', 'dby3rs1', 'db'), + ('uid=fred', '00fr3d1', 'db') + ]) + # Change to a password that meets length requirement + change_password(topo, [ + ('uid=orla,ou=dirsec', '000rLb1', 'This_IS_a_very_very_long_password'), + ('uid=joe,ou=people', '00J0e1', 'This_IS_a_very_very_long_password'), + ('uid=jack,ou=people', '00J6ck1', 'This_IS_a_very_very_long_password'), + ('uid=deep,ou=others,ou=people', '00De3p1', 'This_IS_a_very_very_long_password'), + ('uid=dbyers,ou=dirsec', 'db', 'This_IS_a_very_very_long_password'), + ('uid=fred', 'db', 'This_IS_a_very_very_long_password') + ]) + + +@pytest.fixture(scope="function") +def _fixture_for_password_history(request, topo): + pwp = PwPolicyManager(topo.standalone) + orl = pwp.get_pwpolicy_entry(f'uid=orla,ou=dirsec,{DEFAULT_SUFFIX}') + joe = pwp.get_pwpolicy_entry(f'uid=joe,ou=people,{DEFAULT_SUFFIX}') + people = pwp.get_pwpolicy_entry(f'ou=people,{DEFAULT_SUFFIX}') + change_password_with_admin(topo, [ + ('uid=orla,ou=dirsec', '000rLb1'), + ('uid=joe,ou=people', '00J0e1'), + ('uid=jack,ou=people', '00J6ck1'), + ('uid=deep,ou=others,ou=people', '00De3p1') + ]) + for instance in [orl, joe, people]: + instance.replace_many( + ('passwordhistory', 'on'), + ('passwordinhistory', '3'), + ('passwordChange', 'on')) + for instance in [orl, joe, people]: + assert instance.get_attr_val_utf8('passwordhistory') == 'on' + assert instance.get_attr_val_utf8('passwordinhistory') == '3' + assert instance.get_attr_val_utf8('passwordChange') == 'on' + + def final_step(): + for instance1 in [orl, joe, people]: + instance1.replace('passwordhistory', 'off') + change_password_with_admin(topo, [ + ('uid=orla,ou=dirsec', '000rLb1'), + ('uid=joe,ou=people', '00J0e1'), + ('uid=jack,ou=people', '00J6ck1'), + ('uid=deep,ou=others,ou=people', '00De3p1') + ]) + request.addfinalizer(final_step) + + +def test_password_history_section(topo, policy_setup, _fixture_for_password_history): + """Password History Section. + + :id: 51f459a0-a0ba-11ea-ade7-8c16451d917b + :setup: Standalone + :steps: + 1. Changing current password for orla,joe,jack and deep + 2. Checking that the passwordhistory attribute has been added ! + 3. Try to change the password back which should fail + 4. Change the passwords for all four test users to something new + 5. Try to change passwords back to the first password + 6. Change to a fourth password not in password history + 7. Try to change all the passwords back to the first password + 8. Change the password to one more new password as root dn + 9. Now try to change the password back to the first password + 10. Checking that password history does still containt the previous 3 passwords + 11. Add a password test for long long password (more than 490 bytes). + 12. Changing password : LONGPASSWORD goes in history + 13. Setting policy to NOT keep password histories + 14. Changing current password from ``*2 to ``*2`` + 15. Try to change ``*2`` to ``*1``, should succeed + :expectedresults: + 1. Success + 2. Success + 3. Fail(ldap.CONSTRAINT_VIOLATION) + 4. Success + 5. Fail(ldap.CONSTRAINT_VIOLATION)) + 6. Success + 7. Fail(ldap.CONSTRAINT_VIOLATION)) + 8. Success + 9. Success + 10. Success + 11. Success + 12. Success + 13. Success + 14. Success + 15. Success + """ + # Changing current password for orla,joe,jack and deep + change_password_with_admin(topo, [ + ('uid=orla,ou=dirsec', '000rLb2'), + ('uid=joe,ou=people', '00J0e2'), + ('uid=jack,ou=people', '00J6ck2'), + ('uid=deep,ou=others,ou=people', '00De3p2'), + ]) + time.sleep(1) + # Checking that the password history attribute has been added ! + for user, password in [ + ('uid=orla,ou=dirsec', '000rLb1'), + ('uid=joe,ou=people', '00J0e1'), + ('uid=jack,ou=people', '00J6ck1'), + ('uid=deep,ou=others,ou=people', '00De3p1'), + ]: + assert password in UserAccount(topo.standalone, + f'{user},{DEFAULT_SUFFIX}').get_attr_val_utf8("passwordhistory") + # Try to change the password back which should fail + with pytest.raises(ldap.CONSTRAINT_VIOLATION): + change_password(topo, [ + ('uid=orla,ou=dirsec', '000rLb2', '000rLb1'), + ('uid=joe,ou=people', '00J0e2', '00J0e1'), + ('uid=jack,ou=people', '00J6ck2', '00J6ck1'), + ('uid=deep,ou=others,ou=people', '00De3p2', '00De3p1'), + ]) + # Change the passwords for all four test users to something new + change_password_with_admin(topo, [ + ('uid=orla,ou=dirsec', '000rLb3'), + ('uid=joe,ou=people', '00J0e3'), + ('uid=jack,ou=people', '00J6ck3'), + ('uid=deep,ou=others,ou=people', '00De3p3') + ]) + # Try to change passwords back to the first password + time.sleep(1) + with pytest.raises(ldap.CONSTRAINT_VIOLATION): + change_password(topo, [ + ('uid=orla,ou=dirsec', '000rLb3', '000rLb1'), + ('uid=joe,ou=people', '00J0e3', '00J0e1'), + ('uid=jack,ou=people', '00J6ck3', '00J6ck1'), + ('uid=deep,ou=others,ou=people', '00De3p3', '00De3p1'), + ]) + # Change to a fourth password not in password history + change_password_with_admin(topo, [ + ('uid=orla,ou=dirsec', '000rLb4'), + ('uid=joe,ou=people', '00J0e4'), + ('uid=jack,ou=people', '00J6ck4'), + ('uid=deep,ou=others,ou=people', '00De3p4') + ]) + time.sleep(1) + # Try to change all the passwords back to the first password + with pytest.raises(ldap.CONSTRAINT_VIOLATION): + change_password(topo, [ + ('uid=orla,ou=dirsec', '000rLb4', '000rLb1'), + ('uid=joe,ou=people', '00J0e4', '00J0e1'), + ('uid=jack,ou=people', '00J6ck4', '00J6ck1'), + ('uid=deep,ou=others,ou=people', '00De3p4', '00De3p1') + ]) + # change the password to one more new password as root dn + change_password_with_admin(topo, [ + ('uid=orla,ou=dirsec', '000rLb5'), + ('uid=joe,ou=people', '00J0e5'), + ('uid=jack,ou=people', '00J6ck5'), + ('uid=deep,ou=others,ou=people', '00De3p5') + ]) + time.sleep(1) + # Now try to change the password back to the first password + change_password(topo, [ + ('uid=orla,ou=dirsec', '000rLb5', '000rLb1'), + ('uid=joe,ou=people', '00J0e5', '00J0e1'), + ('uid=jack,ou=people', '00J6ck5', '00J6ck1'), + ('uid=deep,ou=others,ou=people', '00De3p5', '00De3p1') + ]) + time.sleep(1) + # checking that password history does still containt the previous 3 passwords + for user, password3, password2, password1 in [ + ('uid=orla,ou=dirsec', '000rLb5', '000rLb4', '000rLb3'), + ('uid=joe,ou=people', '00J0e5', '00J0e4', '00J0e3'), + ('uid=jack,ou=people', '00J6ck5', '00J6ck4', '00J6ck3'), + ('uid=deep,ou=others,ou=people', '00De3p5', '00De3p4', '00De3p3') + ]: + user1 = UserAccount(topo.standalone, f'{user},{DEFAULT_SUFFIX}') + pass_list = ''.join(user1.get_attr_vals_utf8("passwordhistory")) + assert password1 in pass_list + assert password2 in pass_list + assert password3 in pass_list + # Add a password test for long long password (more than 490 bytes). + long = '01234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901' \ + '23456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456' \ + '789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012' \ + '345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678' \ + '901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234' \ + '5678901234567890123456789LENGTH=510' + change_password(topo, [ + ('uid=orla,ou=dirsec', '000rLb1', long), + ('uid=joe,ou=people', '00J0e1', long), + ('uid=jack,ou=people', '00J6ck1', long), + ('uid=deep,ou=others,ou=people', '00De3p1', long) + ]) + time.sleep(1) + # Changing password : LONGPASSWORD goes in history + change_password(topo, [ + ('uid=orla,ou=dirsec', long, '000rLb2'), + ('uid=joe,ou=people', long, '00J0e2'), + ('uid=jack,ou=people', long, '00J6ck2'), + ('uid=deep,ou=others,ou=people', long, '00De3p2') + ]) + time.sleep(1) + for user, password in [ + ('uid=orla,ou=dirsec', '000rLb2'), + ('uid=joe,ou=people', '00J0e2'), + ('uid=jack,ou=people', '00J6ck2'), + ('uid=deep,ou=others,ou=people', '00De3p2') + ]: + real_user = UserAccount(topo.standalone, f'{user},{DEFAULT_SUFFIX}') + conn = real_user.bind(password) + assert long in ''.join(UserAccount(conn, + f'{user},{DEFAULT_SUFFIX}').get_attr_vals_utf8("passwordhistory")) + # Setting policy to NOT keep password histories + _do_transaction_for_pwp(topo, 'passwordhistory', 'off') + time.sleep(1) + # Changing current password from *2 to *2 + change_password(topo, [ + ('uid=orla,ou=dirsec', '000rLb2', '000rLb2'), + ('uid=joe,ou=people', '00J0e2', '00J0e2'), + ('uid=jack,ou=people', '00J6ck2', '00J6ck2'), + ('uid=deep,ou=others,ou=people', '00De3p2', '00De3p2') + ]) + # Try to change *2 to *1, should succeed + change_password(topo, [ + ('uid=orla,ou=dirsec', '000rLb2', '000rLb1'), + ('uid=joe,ou=people', '00J0e2', '00J0e1'), + ('uid=jack,ou=people', '00J6ck2', '00J6ck1'), + ('uid=deep,ou=others,ou=people', '00De3p2', '00De3p1') + ]) + + +@pytest.fixture(scope="function") +def _fixture_for_password_min_age(request, topo): + pwp = PwPolicyManager(topo.standalone) + orl = pwp.get_pwpolicy_entry(f'uid=orla,ou=dirsec,{DEFAULT_SUFFIX}') + joe = pwp.get_pwpolicy_entry(f'uid=joe,ou=people,{DEFAULT_SUFFIX}') + people = pwp.get_pwpolicy_entry(f'ou=people,{DEFAULT_SUFFIX}') + change_password_with_admin(topo, [ + ('uid=orla,ou=dirsec', '000rLb1'), + ('uid=joe,ou=people', '00J0e1'), + ('uid=jack,ou=people', '00J6ck1'), + ('uid=deep,ou=others,ou=people', '00De3p1') + ]) + for pwp1 in [orl, joe, people]: + assert pwp1.get_attr_val_utf8('passwordminage') == '0' + pwp1.replace_many( + ('passwordminage', '10'), + ('passwordChange', 'on')) + + def final_step(): + for pwp2 in [orl, joe, people]: + pwp2.replace('passwordminage', '0') + request.addfinalizer(final_step) + + +def test_password_minimum_age_section(topo, policy_setup, _fixture_for_password_min_age): + """Password History Section. + + :id: 470f5b2a-a0ba-11ea-ab2d-8c16451d917b + :setup: Standalone + :steps: + 1. Searching for password minimum age, should be 0 per defaults set + 2. Change current password from ``*1`` to ``*2`` + 3. Wait 5 secs and try to change again. Should fail. + 4. Wait more time to complete password min age + 5. Now user can change password + :expectedresults: + 1. Success + 2. Success + 3. Fail(ldap.CONSTRAINT_VIOLATION) + 4. Success + 5. Success + """ + # Change current password from *1 to *2 + change_password(topo, [ + ('uid=orla,ou=dirsec', '000rLb1', '000rLb2'), + ('uid=joe,ou=people', '00J0e1', '00J0e2'), + ('uid=jack,ou=people', '00J6ck1', '00J6ck2'), + ('uid=deep,ou=others,ou=people', '00De3p1', '00De3p2') + ]) + # Wait 5 secs and try to change again. Should fail. + count = 0 + while count < 5: + with pytest.raises(ldap.CONSTRAINT_VIOLATION): + change_password(topo, [ + ('uid=orla,ou=dirsec', '000rLb2', '000rLb1'), + ('uid=joe,ou=people', '00J0e2', '00J0e1'), + ('uid=jack,ou=people', '00J6ck2', '00J6ck1'), + ('uid=deep,ou=others,ou=people', '00De3p2', '00De3p1') + ]) + time.sleep(1) + count += 1 + # Wait more time to complete password min age + time.sleep(6) + # Now user can change password + change_password(topo, [ + ('uid=orla,ou=dirsec', '000rLb2', '000rLb1'), + ('uid=joe,ou=people', '00J0e2', '00J0e1'), + ('uid=jack,ou=people', '00J6ck2', '00J6ck1'), + ('uid=deep,ou=others,ou=people', '00De3p2', '00De3p1') + ]) + + +@pytest.fixture(scope="function") +def _fixture_for_password_lock_out(request, topo): + pwp = PwPolicyManager(topo.standalone) + orl = pwp.get_pwpolicy_entry(f'uid=orla,ou=dirsec,{DEFAULT_SUFFIX}') + joe = pwp.get_pwpolicy_entry(f'uid=joe,ou=people,{DEFAULT_SUFFIX}') + people = pwp.get_pwpolicy_entry(f'ou=people,{DEFAULT_SUFFIX}') + change_password_with_admin(topo, [ + ('uid=orla,ou=dirsec', '000rLb1'), + ('uid=joe,ou=people', '00J0e1'), + ('uid=jack,ou=people', '00J6ck1'), + ('uid=deep,ou=others,ou=people', '00De3p1') + ]) + for pwp1 in [orl, joe, people]: + assert pwp1.get_attr_val_utf8('passwordlockout') == 'off' + pwp1.replace_many( + ('passwordlockout', 'on'), + ('passwordlockoutduration', '3'), + ('passwordresetfailurecount', '3'), + ('passwordChange', 'on')) + + def final_step(): + for instance in [orl, joe, people]: + instance.replace('passwordlockout', 'off') + instance.replace('passwordunlock', 'off') + assert instance.get_attr_val_utf8('passwordlockout') == 'off' + assert instance.get_attr_val_utf8('passwordunlock') == 'off' + request.addfinalizer(final_step) + + +def test_account_lockout_and_lockout_duration_section(topo, policy_setup, _fixture_for_password_lock_out): + """Account Lockout and Lockout Duration Section + + :id: 1ff0b7a4-b560-11ea-9ece-8c16451d917b + :setup: Standalone + :steps: + 1. Try to bind with invalid credentials + 2. Try to bind with valid pw, should give lockout error + 3. After 3 seconds Try to bind with valid pw, should work + 4. Try to bind with invalid credentials + 5. Attempt to bind with valid pw after timeout is up + 6. Resetting with root can break lockout + :expectedresults: + 1. Fail(ldap.INVALID_CREDENTIALS) + 2. Fail(ldap.CONSTRAINT_VIOLATION) + 3. Success + 4. Fail(ldap.INVALID_CREDENTIALS)) + 5. Success + 6. Success + """ + # Try to bind with invalid credentials + for count1 in range(3): + with pytest.raises(ldap.INVALID_CREDENTIALS): + change_password(topo, [ + ('uid=orla,ou=dirsec', 'Invalid', 'Invalid'), + ('uid=joe,ou=people', 'Invalid', 'Invalid'), + ('uid=jack,ou=people', 'Invalid', 'Invalid'), + ('uid=deep,ou=others,ou=people', 'Invalid', 'Invalid') + ]) + # Try to bind with valid pw, should give lockout error + with pytest.raises(ldap.CONSTRAINT_VIOLATION): + change_password(topo, [ + ('uid=orla,ou=dirsec', '000rLb1', '000rLb1'), + ('uid=joe,ou=people', '00J0e1', '00J0e1'), + ('uid=jack,ou=people', '00J6ck1', '00J6ck1'), + ('uid=deep,ou=others,ou=people', '00De3p1', '00De3p1') + ]) + # Try to bind with valid pw, should work + time.sleep(3) + change_password(topo, [ + ('uid=orla,ou=dirsec', '000rLb1', '000rLb2'), + ('uid=joe,ou=people', '00J0e1', '00J0e2'), + ('uid=jack,ou=people', '00J6ck1', '00J6ck2'), + ('uid=deep,ou=others,ou=people', '00De3p1', '00De3p2') + ]) + # Try to bind with invalid credentials + for count1 in range(2): + with pytest.raises(ldap.INVALID_CREDENTIALS): + change_password(topo, [ + ('uid=orla,ou=dirsec', 'Invalid', 'Invalid'), + ('uid=joe,ou=people', 'Invalid', 'Invalid'), + ('uid=jack,ou=people', 'Invalid', 'Invalid'), + ('uid=deep,ou=others,ou=people', 'Invalid', 'Invalid') + ]) + # Attempt to bind with valid pw after timeout is up + time.sleep(3) + change_password(topo, [ + ('uid=orla,ou=dirsec', '000rLb2', '000rLb1'), + ('uid=joe,ou=people', '00J0e2', '00J0e1'), + ('uid=jack,ou=people', '00J6ck2', '00J6ck1'), + ('uid=deep,ou=others,ou=people', '00De3p2', '00De3p1') + ]) + # Resetting with root can break lockout + for count1 in range(3): + with pytest.raises(ldap.INVALID_CREDENTIALS): + change_password(topo, [ + ('uid=orla,ou=dirsec', 'Invalid', 'Invalid'), + ('uid=joe,ou=people', 'Invalid', 'Invalid'), + ('uid=jack,ou=people', 'Invalid', 'Invalid'), + ('uid=deep,ou=others,ou=people', 'Invalid', 'Invalid') + ]) + with pytest.raises(ldap.CONSTRAINT_VIOLATION): + change_password(topo, [ + ('uid=orla,ou=dirsec', '000rLb1', '000rLb1'), + ('uid=joe,ou=people', '00J0e1', '00J0e1'), + ('uid=jack,ou=people', '00J6ck1', '00J6ck1'), + ('uid=deep,ou=others,ou=people', '00De3p1', '00De3p1') + ]) + change_password_with_admin(topo, [ + ('uid=orla,ou=dirsec', '000rLb1'), + ('uid=joe,ou=people', '00J0e1'), + ('uid=jack,ou=people', '00J6ck1'), + ('uid=deep,ou=others,ou=people', '00De3p1') + ]) + change_password(topo, [ + ('uid=orla,ou=dirsec', '000rLb1', '000rLb1'), + ('uid=joe,ou=people', '00J0e1', '00J0e1'), + ('uid=jack,ou=people', '00J6ck1', '00J6ck1'), + ('uid=deep,ou=others,ou=people', '00De3p1', '00De3p1') + ]) + + +@pytest.fixture(scope="function") +def _fixture_for_grace_limit(topo): + pwp = PwPolicyManager(topo.standalone) + orl = pwp.get_pwpolicy_entry(f'uid=orla,ou=dirsec,{DEFAULT_SUFFIX}') + joe = pwp.get_pwpolicy_entry(f'uid=joe,ou=people,{DEFAULT_SUFFIX}') + people = pwp.get_pwpolicy_entry(f'ou=people,{DEFAULT_SUFFIX}') + change_password_with_admin(topo, [ + ('uid=orla,ou=dirsec', '000rLb1'), + ('uid=joe,ou=people', '00J0e1'), + ('uid=jack,ou=people', '00J6ck1'), + ('uid=deep,ou=others,ou=people', '00De3p1'), + ('uid=fred', '00fr3d1') + ]) + for instance in [orl, joe, people]: + instance.replace_many(('passwordMaxAge', '3'), + ('passwordGraceLimit', '7'), + ('passwordexp', 'on'), + ('passwordwarning', '30'), + ('passwordChange', 'on')) + + +def _bind_self(topo, user_password_new_pass_list): + """ + Will bind password with self. + """ + for user, password in user_password_new_pass_list: + real_user = UserAccount(topo.standalone, f'{user},{DEFAULT_SUFFIX}') + conn = real_user.bind(password) + + +def test_grace_limit_section(topo, policy_setup, _fixture_for_grace_limit): + """Account Lockout and Lockout Duration Section + + :id: 288e3756-b560-11ea-9390-8c16451d917b + :setup: Standalone + :steps: + 1. Check users have 7 grace login attempts after their password expires + 2. Wait for password expiration + 3. The the 8th should fail except fred who defaults to global password policy + 4. Now try resetting the password before the grace login attempts run out + 5. Wait for password expiration + 6. Now change the password as the 7th attempt + 7. Wait for password expiration + 8. First 7 good attempts + 9. The the 8th should fail except fred who defaults to global password policy + 10. Changing the paswordMaxAge to 0 so expiration is immediate test + 11. Modify the users passwords to start the clock of zero + 12. PasswordGraceLimit to 0, passwordMaxAge to 3 seconds + 13. Modify the users passwords to start the clock + 14. Users should be blocked + 15. Removing the passwordgracelimit attribute should make it default to 0 + :expectedresults: + 1. Success + 2. Success + 3. Fail(ldap.INVALID_CREDENTIALS) + 4. Success + 5. Success + 6. Success + 7. Success + 8. Success + 9. Fail(ldap.INVALID_CREDENTIALS) + 10. Success + 11. Success + 12. Success + 13. Success + 14. Success + 15. Success + """ + # Check users have 7 grace login attempts after their password expires + change_password_with_admin(topo, [ + ('uid=orla,ou=dirsec', '000rLb2'), + ('uid=joe,ou=people', '00J0e2'), + ('uid=jack,ou=people', '00J6ck2'), + ('uid=deep,ou=others,ou=people', '00De3p2'), + ('uid=fred', '00fr3d2') + ]) + # Wait for password expiration + time.sleep(3) + # The the 8th should fail except fred who defaults to global password policy + for _ in range(7): + _bind_self(topo, [ + ('uid=orla,ou=dirsec', '000rLb2'), + ('uid=joe,ou=people', '00J0e2'), + ('uid=jack,ou=people', '00J6ck2'), + ('uid=deep,ou=others,ou=people', '00De3p2'), + ('uid=fred', '00fr3d2') + ]) + with pytest.raises(ldap.INVALID_CREDENTIALS): + _bind_self(topo, [ + ('uid=orla,ou=dirsec', '000rLb2'), + ('uid=joe,ou=people', '00J0e2'), + ('uid=jack,ou=people', '00J6ck2'), + ('uid=deep,ou=others,ou=people', '00De3p2') + ]) + _bind_self(topo, [ + ('uid=fred', '00fr3d2') + ]) + # Now try resetting the password before the grace login attempts run out + change_password_with_admin(topo, [ + ('uid=orla,ou=dirsec', '000rLb1'), + ('uid=joe,ou=people', '00J0e1'), + ('uid=jack,ou=people', '00J6ck1'), + ('uid=deep,ou=others,ou=people', '00De3p1'), + ('uid=fred', '00fr3d1') + ]) + # Wait for password expiration + time.sleep(3) + # first 6 good attempts + for _ in range(6): + _bind_self(topo, [ + ('uid=orla,ou=dirsec', '000rLb1'), + ('uid=joe,ou=people', '00J0e1'), + ('uid=jack,ou=people', '00J6ck1'), + ('uid=deep,ou=others,ou=people', '00De3p1'), + ('uid=fred', '00fr3d1') + ]) + # now change the password as the 7th attempt + change_password(topo, [ + ('uid=orla,ou=dirsec', '000rLb1', '000rLb2'), + ('uid=joe,ou=people', '00J0e1', '00J0e2'), + ('uid=jack,ou=people', '00J6ck1', '00J6ck2'), + ('uid=deep,ou=others,ou=people', '00De3p1', '00De3p2'), + ('uid=fred', '00fr3d1', '00fr3d2') + ]) + # Wait for password expiration + time.sleep(3) + # first 7 good attempts + for _ in range(7): + _bind_self(topo, [ + ('uid=orla,ou=dirsec', '000rLb2'), + ('uid=joe,ou=people', '00J0e2'), + ('uid=jack,ou=people', '00J6ck2'), + ('uid=deep,ou=others,ou=people', '00De3p2'), + ('uid=fred', '00fr3d2') + ]) + # The the 8th should fail except fred who defaults to global password policy + with pytest.raises(ldap.INVALID_CREDENTIALS): + _bind_self(topo, [ + ('uid=orla,ou=dirsec', '000rLb2'), + ('uid=joe,ou=people', '00J0e2'), + ('uid=jack,ou=people', '00J6ck2'), + ('uid=deep,ou=others,ou=people', '00De3p2') + ]) + _bind_self(topo, [ + ('uid=fred', '00fr3d2') + ]) + # Changing the paswordMaxAge to 0 so expiration is immediate test to see + # that the user still has 7 grace login attempts before locked out + for att1 in ['passwordMaxAge', 'passwordwarning']: + _do_transaction_for_pwp(topo, att1, '0') + # Modify the users passwords to start the clock of zero + change_password_with_admin(topo, [ + ('uid=orla,ou=dirsec', '000rLb1'), + ('uid=joe,ou=people', '00J0e1'), + ('uid=jack,ou=people', '00J6ck1'), + ('uid=deep,ou=others,ou=people', '00De3p1'), + ('uid=fred', '00fr3d1') + ]) + # first 7 good attempts + for _ in range(7): + _bind_self(topo, [ + ('uid=orla,ou=dirsec', '000rLb1'), + ('uid=joe,ou=people', '00J0e1'), + ('uid=jack,ou=people', '00J6ck1'), + ('uid=deep,ou=others,ou=people', '00De3p1'), + ('uid=fred', '00fr3d1') + ]) + # The the 8th should fail .... + # except fred who defaults to global password policy + with pytest.raises(ldap.INVALID_CREDENTIALS): + _bind_self(topo, [ + ('uid=orla,ou=dirsec', '000rLb1'), + ('uid=joe,ou=people', '00J0e1'), + ('uid=jack,ou=people', '00J6ck1'), + ('uid=deep,ou=others,ou=people', '00De3p1') + ]) + _bind_self(topo, [ + ('uid=fred', '00fr3d1') + ]) + # setting the passwordMaxAge to 3 seconds once more + # and the passwordGraceLimit to 0 + for att1, att2 in [('passwordMaxAge', '3'), ('passwordGraceLimit', '0')]: + _do_transaction_for_pwp(topo, att1, att2) + # modify the users passwords to start the clock + change_password_with_admin(topo, [ + ('uid=orla,ou=dirsec', '000rLb1'), + ('uid=joe,ou=people', '00J0e1'), + ('uid=jack,ou=people', '00J6ck1'), + ('uid=deep,ou=others,ou=people', '00De3p1'), + ('uid=fred', '00fr3d1') + ]) + # Users should be blocked + time.sleep(3) + with pytest.raises(ldap.INVALID_CREDENTIALS): + _bind_self(topo, [ + ('uid=orla,ou=dirsec', '000rLb1'), + ('uid=joe,ou=people', '00J0e1'), + ('uid=jack,ou=people', '00J6ck1'), + ('uid=deep,ou=others,ou=people', '00De3p1') + ]) + _bind_self(topo, [ + ('uid=fred', '00fr3d1') + ]) + for att1, att2 in [('passwordGraceLimit', '10')]: + _do_transaction_for_pwp(topo, att1, att2) + # removing the passwordgracelimit attribute should make it default to 0 + for att1, att2 in [('passwordGraceLimit', ' ')]: + _do_transaction_for_pwp(topo, att1, att2) + change_password_with_admin(topo, [ + ('uid=orla,ou=dirsec', '000rLb1'), + ('uid=joe,ou=people', '00J0e1'), + ('uid=jack,ou=people', '00J6ck1'), + ('uid=deep,ou=others,ou=people', '00De3p1'), + ('uid=fred', '00fr3d1') + ]) + time.sleep(3) + with pytest.raises(ldap.INVALID_CREDENTIALS): + _bind_self(topo, [ + ('uid=orla,ou=dirsec', '000rLb1'), + ('uid=joe,ou=people', '00J0e1'), + ('uid=jack,ou=people', '00J6ck1'), + ('uid=deep,ou=others,ou=people', '00De3p1') + ]) + _bind_self(topo, [ + ('uid=fred', '00fr3d1') + ]) + + +@pytest.fixture(scope="function") +def _fixture_for_additional_cases(topo): + pwp = PwPolicyManager(topo.standalone) + orl = pwp.get_pwpolicy_entry(f'uid=orla,ou=dirsec,{DEFAULT_SUFFIX}') + joe = pwp.get_pwpolicy_entry(f'uid=joe,ou=people,{DEFAULT_SUFFIX}') + people = pwp.get_pwpolicy_entry(f'ou=people,{DEFAULT_SUFFIX}') + change_password_with_admin(topo, [ + ('uid=orla,ou=dirsec', '000rLb1'), + ('uid=joe,ou=people', '00J0e1'), + ('uid=jack,ou=people', '00J6ck1'), + ('uid=deep,ou=others,ou=people', '00De3p1'), + ('uid=fred', '00fr3d1'), + ('uid=dbyers,ou=dirsec', 'dby3rs1') + ]) + for instance in [orl, joe, people]: + instance.replace_many(('passwordChange', 'on'), + ('passwordwarning', '86400'), + ('passwordGraceLimit', '0'), + ('passwordexp', 'off'), + ('passwordMaxAge', '8640000'), + ('passwordchecksyntax', 'off')) + + +def test_additional_corner_cases(topo, policy_setup, _fixture_for_additional_cases): + """Additional corner cases + + :id: 2f6cec66-b560-11ea-9d7c-8c16451d917b + :setup: Standalone + :steps: + 1. Try to change password to one containing spaces + 2. Setting password policy to Check password syntax + 3. Try to change password to the value of mail, which is trivial. Should get error. + 4. No error for fred and dbyers as they are not included in PW policy. + 5. Revert changes for fred and dbyers + 6. Try to change password to the value of ou, which is trivial. Should get error. + 7. No error for fred and dbyers as they are not included in PW policy. + 8. Revert changes for fred and dbyers + :expectedresults: + 1. Success + 2. Success + 3. Fail(CONSTRAINT_VIOLATION) + 4. Success + 5. Success + 6. Fail(CONSTRAINT_VIOLATION) + 7. Success + 8. Success + """ + # Try to change password to one containing spaces + change_password(topo, [ + ('uid=orla,ou=dirsec', '000rLb1', 'This Password has spaces.'), + ('uid=joe,ou=people', '00J0e1', 'This Password has spaces.'), + ('uid=jack,ou=people', '00J6ck1', 'This Password has spaces.'), + ('uid=fred', '00fr3d1', 'This Password has spaces.'), + ('uid=deep,ou=others,ou=people', '00De3p1', 'This Password has spaces.'), + ('uid=dbyers,ou=dirsec', 'dby3rs1', 'This Password has spaces.') + ]) + change_password(topo, [ + ('uid=orla,ou=dirsec', 'This Password has spaces.', '000rLb1'), + ('uid=joe,ou=people', 'This Password has spaces.', '00j0e1'), + ('uid=jack,ou=people', 'This Password has spaces.', '00j6ck1'), + ('uid=fred', 'This Password has spaces.', '00fr3d1'), + ('uid=deep,ou=others,ou=people', 'This Password has spaces.', '00de3p1'), + ('uid=dbyers,ou=dirsec', 'This Password has spaces.', 'dby3rs1') + ]) + # Setting password policy to Check password syntax + for attr, para in [('passwordchecksyntax', 'on'), ('passwordminlength', '5')]: + _do_transaction_for_pwp(topo, attr, para) + # Try to change password to the value of mail, which is trivial. Should get error. + with pytest.raises(ldap.CONSTRAINT_VIOLATION): + change_password(topo, [ + ('uid=orla,ou=dirsec', '000rLb1', 'orla@example.com'), + ('uid=joe,ou=people', '00j0e1', 'joe@example.com'), + ('uid=jack,ou=people', '00j6ck1', 'jack@example.com'), + ('uid=deep,ou=others,ou=people', '00de3p1', 'deep@example.com') + ]) + # No error for fred and dbyers as they are not included in PW policy. + change_password(topo, [ + ('uid=fred', '00fr3d1', 'fred@example.com'), + ('uid=dbyers,ou=dirsec', 'dby3rs1', 'dbyers@example.com') + ]) + # Revert changes for fred and dbyers + change_password(topo, [ + ('uid=fred', 'fred@example.com', '00fr3d1'), + ('uid=dbyers,ou=dirsec', 'dbyers@example.com', 'dby3rs1') + ]) + # Creating OUs. + for user, new_ou in [ + ('uid=orla,ou=dirsec', 'dirsec'), + ('uid=joe,ou=people', 'people'), + ('uid=jack,ou=people', 'people'), + ('uid=deep,ou=others,ou=people', 'others'), + ('uid=dbyers,ou=dirsec', 'dirsec') + ]: + UserAccount(topo.standalone, f'{user},{DEFAULT_SUFFIX}').add('ou', new_ou) + # Try to change password to the value of ou, which is trivial. Should get error. + with pytest.raises(ldap.CONSTRAINT_VIOLATION): + change_password(topo, [ + ('uid=orla,ou=dirsec', '000rLb1', 'dirsec'), + ('uid=joe,ou=people', '00j0e1', 'people'), + ('uid=jack,ou=people', '00j6ck1', 'people'), + ('uid=deep,ou=others,ou=people', '00de3p1', 'others') + ]) + # No error for byers as it is not included in PW policy. + change_password(topo, [('uid=dbyers,ou=dirsec', 'dby3rs1', 'dirsec')]) + # Revert changes for dbyers + change_password_with_admin(topo, [ + ('uid=fred', '00fr3d1'), + ('uid=dbyers,ou=dirsec', 'dby3rs1') + ]) + + +def test_get_pwpolicy_cn_with_quotes(topology_m1, policy_qoutes_setup): + """Test that that we can get pwpolicy when + cn attr includes quotes + + :id: 5d360c40-2466-4042-bf99-14d2f68f9d66 + :setup: Standalone + :steps: + 1. Configure a custom subtree pwpolicy + 2. Try to get the unusual subtree pwpolicy + :expectedresults: + 1. Success + 2. Success + """ + + # Try to get the unusual subtree pwpolicy + people = policy_qoutes_setup.get_pwpolicy_entry(f'ou=people,{DEFAULT_SUFFIX}') + people.replace('passwordhistory', 'off') + assert people.get_attr_val_utf8('passwordhistory') == 'off' + +if __name__ == "__main__": + CURRENT_FILE = os.path.realpath(__file__) + pytest.main("-s -v %s" % CURRENT_FILE) diff --git a/dirsrvtests/tests/suites/password/password_test.py b/dirsrvtests/tests/suites/password/password_test.py new file mode 100644 index 0000000..3807947 --- /dev/null +++ b/dirsrvtests/tests/suites/password/password_test.py @@ -0,0 +1,72 @@ +# --- BEGIN COPYRIGHT BLOCK --- +# Copyright (C) 2016 Red Hat, Inc. +# All rights reserved. +# +# License: GPL (version 3 or any later version). +# See LICENSE for details. +# --- END COPYRIGHT BLOCK --- +# +import logging + +import pytest +from lib389.tasks import * +from lib389.topologies import topology_st +from lib389._constants import PASSWORD, DEFAULT_SUFFIX + +from lib389.idm.user import UserAccounts, TEST_USER_PROPERTIES + +pytestmark = pytest.mark.tier1 + +logging.getLogger(__name__).setLevel(logging.DEBUG) +log = logging.getLogger(__name__) + + +@pytest.mark.bz918684 +@pytest.mark.ds394 +def test_password_delete_specific_password(topology_st): + """Delete a specific userPassword, and make sure + it is actually deleted from the entry + + :id: 800f432a-52ab-4661-ac66-a2bdd9b984d6 + :setup: Standalone instance + :steps: + 1. Add a user with userPassword attribute in cleartext + 2. Delete the added value of userPassword attribute + 3. Check if the userPassword attribute is deleted + 4. Delete the user + :expectedresults: + 1. The user with userPassword in cleartext should be added successfully + 2. Operation should be successful + 3. UserPassword should be deleted + 4. The user should be successfully deleted + """ + + log.info('Running test_password_delete_specific_password...') + + users = UserAccounts(topology_st.standalone, DEFAULT_SUFFIX) + + user = users.create(properties=TEST_USER_PROPERTIES) + + # + # Add a test user with a password + # + user.set('userpassword', PASSWORD) + + # + # Delete the exact password + # + user.remove('userpassword', PASSWORD) + + # + # Check the password is actually deleted + # + assert not user.present('userPassword') + + log.info('test_password_delete_specific_password: PASSED') + + +if __name__ == '__main__': + # Run isolated + # -s for DEBUG mode + CURRENT_FILE = os.path.realpath(__file__) + pytest.main("-s %s" % CURRENT_FILE) diff --git a/dirsrvtests/tests/suites/password/pbkdf2_upgrade_plugin_test.py b/dirsrvtests/tests/suites/password/pbkdf2_upgrade_plugin_test.py new file mode 100644 index 0000000..90dae36 --- /dev/null +++ b/dirsrvtests/tests/suites/password/pbkdf2_upgrade_plugin_test.py @@ -0,0 +1,52 @@ +# --- BEGIN COPYRIGHT BLOCK --- +# Copyright (C) 2018 Red Hat, Inc. +# All rights reserved. +# +# License: GPL (version 3 or any later version). +# See LICENSE for details. +# --- END COPYRIGHT BLOCK --- +# +import pytest +from lib389.topologies import topology_st +from lib389.password_plugins import PBKDF2Plugin +from lib389.utils import ds_is_older + +pytestmark = pytest.mark.tier1 + +@pytest.mark.skipif(ds_is_older('1.4.1'), reason="Not implemented") +def test_pbkdf2_upgrade(topology_st): + """On upgrade pbkdf2 doesn't ship. We need to be able to + provide this on upgrade to make sure default hashes work. + However, password plugins are special - they need really + early bootstap so that setting the default has specs work. + + This tests that the removal of the pbkdf2 plugin causes + it to be re-bootstrapped and added. + + :id: c2198692-7c02-433b-af5b-3be54920571a + :setup: Single instance + :steps: 1. Remove the PBKDF2 plugin + 2. Restart the server + 3. Restart the server + :expectedresults: + 1. Plugin is removed (IE pre-upgrade state) + 2. The plugin is bootstrapped and added + 3. No change (already bootstrapped) + + """ + # Remove the pbkdf2 plugin config + p1 = PBKDF2Plugin(topology_st.standalone) + assert(p1.exists()) + p1._protected = False + p1.delete() + # Restart + topology_st.standalone.restart() + # check it's been readded. + p2 = PBKDF2Plugin(topology_st.standalone) + assert(p2.exists()) + # Now restart to make sure we still work from the non-bootstrap form + topology_st.standalone.restart() + p3 = PBKDF2Plugin(topology_st.standalone) + assert(p3.exists()) + + diff --git a/dirsrvtests/tests/suites/password/pw_expired_access_test.py b/dirsrvtests/tests/suites/password/pw_expired_access_test.py new file mode 100644 index 0000000..c83b6a4 --- /dev/null +++ b/dirsrvtests/tests/suites/password/pw_expired_access_test.py @@ -0,0 +1,70 @@ +# --- BEGIN COPYRIGHT BLOCK --- +# Copyright (C) 2022 Red Hat, Inc. +# All rights reserved. +# +# License: GPL (version 3 or any later version). +# See LICENSE for details. +# --- END COPYRIGHT BLOCK --- +# +import ldap +import logging +import pytest +import os +import time +from lib389._constants import DEFAULT_SUFFIX, PASSWORD +from lib389.idm.domain import Domain +from lib389.idm.user import UserAccounts +from lib389.topologies import topology_st as topo + +log = logging.getLogger(__name__) + +def test_expired_user_has_no_privledge(topo): + """Specify a test case purpose or name here + + :id: 3df86b45-9929-414b-9bf6-06c25301d207 + :setup: Standalone Instance + :steps: + 1. Set short password expiration time + 2. Add user and wait for expiration time to run out + 3. Set one aci that allows authenticated users full access + 4. Bind as user (password should be expired) + 5. Attempt modify + :expectedresults: + 1. Success + 2. Success + 3. Success + 4. Success + 5. Success + """ + + # Configured password epxiration + topo.standalone.config.replace_many(('passwordexp', 'on'), ('passwordmaxage', '1')) + + # Set aci + suffix = Domain(topo.standalone, DEFAULT_SUFFIX) + ACI_TEXT = '(targetattr="*")(version 3.0; acl "test aci"; allow (all) (userdn="ldap:///all");)' + suffix.replace('aci', ACI_TEXT) + + # Add user + user = UserAccounts(topo.standalone, DEFAULT_SUFFIX, rdn=None).create_test_user() + user.replace('userpassword', PASSWORD) + time.sleep(2) + + # Bind as user with expired password. Need to use raw ldap calls because + # lib389 will close the connection when an error 49 is encountered. + ldap_object = ldap.initialize(topo.standalone.toLDAPURL()) + with pytest.raises(ldap.INVALID_CREDENTIALS): + res_type, res_data, res_msgid, res_ctrls = ldap_object.simple_bind_s( + user.dn, PASSWORD) + + # Try modify + with pytest.raises(ldap.INSUFFICIENT_ACCESS): + modlist = [ (ldap.MOD_REPLACE, 'description', b'Should not work!') ] + ldap_object.modify_ext_s(DEFAULT_SUFFIX, modlist) + + +if __name__ == '__main__': + # Run isolated + # -s for DEBUG mode + CURRENT_FILE = os.path.realpath(__file__) + pytest.main(["-s", CURRENT_FILE]) diff --git a/dirsrvtests/tests/suites/password/pwdAdmin_test.py b/dirsrvtests/tests/suites/password/pwdAdmin_test.py new file mode 100644 index 0000000..3a2924b --- /dev/null +++ b/dirsrvtests/tests/suites/password/pwdAdmin_test.py @@ -0,0 +1,443 @@ +# --- BEGIN COPYRIGHT BLOCK --- +# Copyright (C) 2016 Red Hat, Inc. +# All rights reserved. +# +# License: GPL (version 3 or any later version). +# See LICENSE for details. +# --- END COPYRIGHT BLOCK --- +# +import pytest +from lib389.tasks import * +from lib389.utils import * +from lib389.topologies import topology_st + +from lib389.idm.user import UserAccounts +from lib389.idm.group import Groups +from lib389.idm.domain import Domain + +from lib389._constants import SUFFIX, DN_DM, PASSWORD, DEFAULT_SUFFIX + +pytestmark = pytest.mark.tier1 + +logging.getLogger(__name__).setLevel(logging.DEBUG) +log = logging.getLogger(__name__) + +CONFIG_DN = 'cn=config' +ADMIN_NAME = 'passwd_admin' +ADMIN_DN = 'cn=%s,%s' % (ADMIN_NAME, SUFFIX) +ADMIN2_NAME = 'passwd_admin2' +ADMIN2_DN = 'cn=%s,%s' % (ADMIN2_NAME, SUFFIX) +ADMIN_PWD = 'ntaheonusheoasuhoau_9' +ADMIN_GROUP_DN = 'cn=password admin group,%s' % (SUFFIX) +ENTRY_NAME = 'Joe Schmo' +ENTRY_DN = 'cn=%s,%s' % (ENTRY_NAME, SUFFIX) +INVALID_PWDS = ('2_Short', 'No_Number', 'N0Special', '{SSHA}bBy8UdtPZwu8uZna9QOYG3Pr41RpIRVDl8wddw==') + + +@pytest.fixture(scope="module") +def password_policy(topology_st): + """Set up password policy + Create a Password Admin entry; + Set up password policy attributes in config; + Add an aci to give everyone full access; + Test that the setup works + """ + + log.info('test_pwdAdmin_init: Creating Password Administrator entries...') + + users = UserAccounts(topology_st.standalone, DEFAULT_SUFFIX) + groups = Groups(topology_st.standalone, DEFAULT_SUFFIX) + + # Add Password Admin 1 + admin1_user = users.create(properties={ + 'uid': 'admin1', + 'cn' : 'admin1', + 'sn' : 'strator', + 'uidNumber' : '1000', + 'gidNumber' : '2000', + 'homeDirectory' : '/home/admin1', + 'userPassword': ADMIN_PWD + }) + + # Add Password Admin 2 + admin2_user = users.create(properties={ + 'uid': 'admin2', + 'cn' : 'admin2', + 'sn' : 'strator', + 'uidNumber' : '1000', + 'gidNumber' : '2000', + 'homeDirectory' : '/home/admin2', + 'userPassword': ADMIN_PWD + }) + + # Add Password Admin Group + admin_group = groups.create(properties={ + 'cn': 'password admin group' + }) + + admin_group.add_member(admin1_user.dn) + admin_group.add_member(admin2_user.dn) + + # Configure password policy + log.info('test_pwdAdmin_init: Configuring password policy...') + + topology_st.standalone.config.replace_many( + ('nsslapd-pwpolicy-local', 'on'), + ('passwordCheckSyntax', 'on'), + ('passwordMinCategories', '1'), + ('passwordMinTokenLength', '2'), + ('passwordExp', 'on'), + ('passwordMinDigits', '1'), + ('passwordMinSpecials', '1'), + ('passwordHistory', 'on'), + ('passwordStorageScheme', 'clear'), + ('nsslapd-enable-upgrade-hash', 'off') + ) + + # + # Add an aci to allow everyone all access (just makes things easier) + # + log.info('Add aci to allow password admin to add/update entries...') + + domain = Domain(topology_st.standalone, DEFAULT_SUFFIX) + + ACI_TARGET = "(target = \"ldap:///%s\")" % SUFFIX + ACI_TARGETATTR = "(targetattr = *)" + ACI_ALLOW = "(version 3.0; acl \"Password Admin Access\"; allow (all) " + ACI_SUBJECT = "(userdn = \"ldap:///anyone\");)" + ACI_BODY = ACI_TARGET + ACI_TARGETATTR + ACI_ALLOW + ACI_SUBJECT + + domain.add('aci', ACI_BODY) + + # + # Bind as the future Password Admin + # + log.info('test_pwdAdmin_init: Bind as the Password Administrator (before activating)...') + admin_conn = admin1_user.bind(ADMIN_PWD) + + # + # Setup our test entry, and test password policy is working + # + + # Connect up an admin authed users connection. + admin_users = UserAccounts(admin_conn, DEFAULT_SUFFIX) + + # + # Start by attempting to add an entry with an invalid password + # + log.info('test_pwdAdmin_init: Attempt to add entries with invalid passwords, these adds should fail...') + for passwd in INVALID_PWDS: + with pytest.raises(ldap.CONSTRAINT_VIOLATION): + admin_users.create(properties={ + 'uid': 'example', + 'cn' : 'example', + 'sn' : 'example', + 'uidNumber' : '1000', + 'gidNumber' : '2000', + 'homeDirectory' : '/home/example', + 'userPassword': passwd + }) + + return (admin_group, admin1_user, admin2_user) + + +def test_pwdAdmin_bypass(topology_st, password_policy): + """Test that password administrators/root DN can + bypass password syntax/policy + + :id: 743bfe33-a1f7-482b-8807-efeb7aa57348 + :setup: Standalone instance, Password Admin entry, + Password policy configured as below: + nsslapd-pwpolicy-local: on + passwordCheckSyntax: on + passwordMinCategories: 1 + passwordMinTokenLength: 2 + passwordExp: on + passwordMinDigits: 1 + passwordMinSpecials: 1 + :steps: + 1: Add users with invalid passwords + :expectedresults: + 1: Users should be added successfully. + """ + + # + # Now activate a password administator, bind as root dn to do the config + # update, then rebind as the password admin + # + log.info('test_pwdAdmin: Activate the Password Administator...') + + # Extract our fixture data. + + (admin_group, admin1_user, admin2_user) = password_policy + + # Set the password admin + + topology_st.standalone.config.set('passwordAdminDN', admin1_user.dn) + + # + # Get our test entry + # + + admin_conn = admin1_user.bind(ADMIN_PWD) + admin_users = UserAccounts(admin_conn, DEFAULT_SUFFIX) + + # + # Start adding entries with invalid passwords, delete the entry after each pass. + # + for passwd in INVALID_PWDS: + u1 = admin_users.create(properties={ + 'uid': 'example', + 'cn' : 'example', + 'sn' : 'example', + 'uidNumber' : '1000', + 'gidNumber' : '2000', + 'homeDirectory' : '/home/example', + 'userPassword': passwd + }) + u1.delete() + + +def test_pwdAdmin_no_admin(topology_st, password_policy): + """Test that password administrators/root DN can + bypass password syntax/policy + + :id: 74347798-7cc7-4ce7-ad5c-06387ffde02c + :setup: Standalone instance, Password Admin entry, + Password policy configured as below: + nsslapd-pwpolicy-local: on + passwordCheckSyntax: on + passwordMinCategories: 1 + passwordMinTokenLength: 2 + passwordExp: on + passwordMinDigits: 1 + passwordMinSpecials: 1 + :steps: + 1: Create a user + 2: Attempt to set passwords on the user that are invalid + :expectedresults: + 1: Success + 2: The passwords should NOT be set + """ + (admin_group, admin1_user, admin2_user) = password_policy + + # Remove password admin + + # Can't use pytest.raises. because this may or may not exist + try: + topology_st.standalone.config.remove_all('passwordAdminDN') + except ldap.NO_SUCH_ATTRIBUTE: + pass + + # + # Add the entry for the next round of testing (modify password) + # + admin_conn = admin1_user.bind(ADMIN_PWD) + admin_users = UserAccounts(admin_conn, DEFAULT_SUFFIX) + + u2 = admin_users.create(properties={ + 'uid': 'example', + 'cn' : 'example', + 'sn' : 'example', + 'uidNumber' : '1000', + 'gidNumber' : '2000', + 'homeDirectory' : '/home/example', + 'userPassword': ADMIN_PWD + }) + + # + # Make invalid password updates that should fail + # + for passwd in INVALID_PWDS: + with pytest.raises(ldap.CONSTRAINT_VIOLATION): + u2.replace('userPassword', passwd) + + +def test_pwdAdmin_modify(topology_st, password_policy): + """Test that password administrators/root DN can modify + passwords rather than adding them. + + :id: 85326527-8eeb-401f-9d1b-4ef55dee45a4 + :setup: Standalone instance, Password Admin entry, + Password policy configured as below: + nsslapd-pwpolicy-local: on + passwordCheckSyntax: on + passwordMinCategories: 1 + passwordMinTokenLength: 2 + passwordExp: on + passwordMinDigits: 1 + passwordMinSpecials: 1 + :steps: + 1: Retrieve the user + 2: Replace the password with invalid content + :expectedresults: + 1: Success + 2: The password should be set + """ + (admin_group, admin1_user, admin2_user) = password_policy + + # Update config - set the password admin + topology_st.standalone.config.set('passwordAdminDN', admin1_user.dn) + + admin_conn = admin1_user.bind(ADMIN_PWD) + admin_users = UserAccounts(admin_conn, DEFAULT_SUFFIX) + + u3 = admin_users.get('example') + # + # Make the same password updates, but this time they should succeed + # + for passwd in INVALID_PWDS: + u3.replace('userPassword', passwd) + + +def test_pwdAdmin_group(topology_st, password_policy): + """Test that password admin group can bypass policy. + + :id: 4d62ae34-0f25-486e-b823-afd2b431e9b0 + :setup: Standalone instance, Password Admin entry, + Password policy configured as below: + nsslapd-pwpolicy-local: on + passwordCheckSyntax: on + passwordMinCategories: 1 + passwordMinTokenLength: 2 + passwordExp: on + passwordMinDigits: 1 + passwordMinSpecials: 1 + :steps: + 1: Add group to passwordadmin dn + 2: Attempt to set invalid passwords. + :expectedresults: + 1: Success. + 2: Password should be set. + """ + (admin_group, admin1_user, admin2_user) = password_policy + + # Update config - set the password admin group + topology_st.standalone.config.set('passwordAdminDN', admin_group.dn) + + # Bind as admin2, who is in the group. + + admin2_conn = admin2_user.bind(ADMIN_PWD) + admin2_users = UserAccounts(admin2_conn, DEFAULT_SUFFIX) + + u4 = admin2_users.get('example') + + # Make some invalid password updates, but they should succeed + for passwd in INVALID_PWDS: + u4.replace('userPassword', passwd) + + +def test_pwdAdmin_config_validation(topology_st, password_policy): + """Check passwordAdminDN for valid and invalid values + + :id: f7049482-41e8-438b-ae18-cdd2612c783a + :setup: Standalone instance, Password Admin entry, + Password policy configured as below: + nsslapd-pwpolicy-local: on + passwordCheckSyntax: on + passwordMinCategories: 1 + passwordMinTokenLength: 1 + passwordExp: on + passwordMinDigits: 1 + passwordMinSpecials: 1 + :steps: + 1. Add multiple attributes - one already exists so just try and add the second one + 2. Set passwordAdminDN attribute to an invalid value (ZZZZZ) + :expectedresults: + 1. The operation should fail + 2. The operation should fail + """ + + (admin_group, admin1_user, admin2_user) = password_policy + # Add multiple attributes - one already exists so just try and add the second one + topology_st.standalone.config.set('passwordAdminDN', admin_group.dn) + with pytest.raises(ldap.OBJECT_CLASS_VIOLATION): + topology_st.standalone.config.add('passwordAdminDN', admin1_user.dn) + + # Attempt to set invalid DN + with pytest.raises(ldap.INVALID_SYNTAX): + topology_st.standalone.config.set('passwordAdminDN', 'zzzzzzzzzzzz') + + +def test_pwd_admin_config_test_skip_updates(topology_st, password_policy): + """Check passwordAdminDN does not update entry password state attributes + + :id: 964f1430-795b-4f4d-85b2-abaffe66ddcb + + :setup: Standalone instance + :steps: + 1. Add test entry + 2. Update password + 3. Password history updated + 4. Enable "skip info update" + 5. Update password again + 6. New password not in history + :expectedresults: + 1. Success + 2. Success + 3. Success + 4. Success + 5. Success + 6. Success + """ + + inst = topology_st.standalone + passwd_in_history = "Secret123" + password_not_in_history = "ShouldNotBeInHistory" + (admin_group, admin1_user, admin2_user) = password_policy + + # Update config + inst.config.set('passwordAdminDN', admin_group.dn) + + # Add test entry + admin_conn = admin1_user.bind(ADMIN_PWD) + admin_users = UserAccounts(admin_conn, DEFAULT_SUFFIX) + admin_users.create(properties={ + 'uid': 'skipInfoUpdate', + 'cn': 'skipInfoUpdate', + 'sn': 'skipInfoUpdate', + 'uidNumber': '1001', + 'gidNumber': '2002', + 'homeDirectory': '/home/skipInfoUpdate', + 'userPassword': "abdcefghijk" + }) + + # Update password to populate history + user = admin_users.get('skipInfoUpdate') + user.replace('userPassword', passwd_in_history) + user.replace('userPassword', passwd_in_history) + time.sleep(1) + + # Check password history was updated + passwords = user.get_attr_vals_utf8('passwordHistory') + log.debug(f"passwords in history for {user.dn}: {str(passwords)}") + found = False + for passwd in passwords: + if passwd_in_history in passwd: + found = True + assert found + + # Disable password state info updates + inst.config.set('passwordAdminSkipInfoUpdate', 'on') + time.sleep(1) + + # Update password + user.replace('userPassword', password_not_in_history) + user.replace('userPassword', password_not_in_history) + time.sleep(1) + + # Check it is not in password history + passwords = user.get_attr_vals_utf8('passwordHistory') + log.debug(f"Part 2: passwords in history for {user.dn}: {str(passwords)}") + found = False + for passwd in passwords: + if password_not_in_history in passwd: + found = True + assert not found + + +if __name__ == '__main__': + # Run isolated + # -s for DEBUG mode + CURRENT_FILE = os.path.realpath(__file__) + pytest.main("-s %s" % CURRENT_FILE) diff --git a/dirsrvtests/tests/suites/password/pwdModify_test.py b/dirsrvtests/tests/suites/password/pwdModify_test.py new file mode 100644 index 0000000..9e32823 --- /dev/null +++ b/dirsrvtests/tests/suites/password/pwdModify_test.py @@ -0,0 +1,282 @@ +# Copyright (C) 2018 Red Hat, Inc. +# All rights reserved. +# +# License: GPL (version 3 or any later version). +# See LICENSE for details. +# --- END COPYRIGHT BLOCK --- +# +import pytest +import re +from ldap.controls import LDAPControl +from lib389._constants import * +from lib389.tasks import * +from lib389.utils import * +from lib389.topologies import topology_st as topo +from lib389.idm.user import UserAccounts, TEST_USER_PROPERTIES +from lib389.idm.organizationalunit import OrganizationalUnits +from lib389.pwpolicy import PwPolicyManager + +pytestmark = pytest.mark.tier1 + +DEBUGGING = os.getenv("DEBUGGING", default=False) +if DEBUGGING: + logging.getLogger(__name__).setLevel(logging.DEBUG) +else: + logging.getLogger(__name__).setLevel(logging.INFO) +log = logging.getLogger(__name__) + + +OLD_PASSWD = 'password' +NEW_PASSWD = 'newpassword' +SHORT_PASSWD = 'wd' +TESTPEOPLE_OU = "TestPeople_bug834047" +USER_ACI = '(targetattr="userpassword")(version 3.0; acl "pwp test"; allow (all) userdn="ldap:///self";)' + + +@pytest.fixture(scope="function") +def pwd_policy_setup(topo, request): + """ + Setup to set passwordStorageScheme as CLEAR + passwordHistory to on + passwordStorageScheme to SSHA + passwordHistory off + """ + log.info("Change the pwd storage type to clear and change the password once to refresh it(for the rest of tests") + topo.standalone.simple_bind_s(DN_DM, PASSWORD) + topo.standalone.config.set('passwordStorageScheme', 'CLEAR') + assert topo.standalone.passwd_s(user_2.dn, OLD_PASSWD, NEW_PASSWD) + topo.standalone.config.set('passwordHistory', 'on') + + def fin(): + topo.standalone.simple_bind_s(DN_DM, PASSWORD) + topo.standalone.config.set('passwordStorageScheme', 'SSHA') + topo.standalone.config.set('passwordHistory', 'off') + request.addfinalizer(fin) + + +def test_pwd_modify_with_different_operation(topo): + """Performing various password modify operation, + make sure that password is actually modified + + :id: e36d68a8-0960-48e4-932c-6c2f64abaebc + :setup: Standalone instance and TLS enabled + :steps: + 1. Attempt for Password change for an entry that does not exists + 2. Attempt for Password change for an entry that exists + 3. Attempt for Password change to old for an entry that exists + 4. Attempt for Password Change with Binddn as testuser but with wrong old password + 5. Attempt for Password Change with Binddn as testuser + 6. Attempt for Password Change without giving newpassword + 7. Checking password change Operation using a Non-Secure connection + 8. Testuser attempts to change password for testuser2(userPassword attribute is Set) + 9. Directory Manager attempts to change password for testuser2(userPassword attribute is Set) + 10. Create a password syntax policy. Attempt to change to password that violates that policy + 11. userPassword mod with control results in ber decode error + + :expectedresults: + 1. Operation should be successful + 2. Operation should be successful + 3. Operation should be successful + 4. Operation should not be successful + 5. Operation should be successful + 6. Operation should be successful + 7. Operation should not be successful + 8. Operation should not be successful + 9. Operation should be successful + 10. Operation should violates the policy + 11. Operation should be successful + """ + + topo.standalone.enable_tls() + os.environ["LDAPTLS_CACERTDIR"] = topo.standalone.get_ssca_dir() + users = UserAccounts(topo.standalone, DEFAULT_SUFFIX) + TEST_USER_PROPERTIES['userpassword'] = OLD_PASSWD + global user + user = users.create(properties=TEST_USER_PROPERTIES) + ous = OrganizationalUnits(topo.standalone, DEFAULT_SUFFIX) + ou = ous.get('people') + ou.add('aci', USER_ACI) + + with pytest.raises(ldap.NO_SUCH_OBJECT): + log.info("Attempt for Password change for an entry that does not exists") + assert topo.standalone.passwd_s('uid=testuser1,ou=People,dc=example,dc=com', OLD_PASSWD, NEW_PASSWD) + log.info("Attempt for Password change for an entry that exists") + assert topo.standalone.passwd_s(user.dn, OLD_PASSWD, NEW_PASSWD) + log.info("Attempt for Password change to old for an entry that exists") + assert topo.standalone.passwd_s(user.dn, NEW_PASSWD, OLD_PASSWD) + log.info("Attempt for Password Change with Binddn as testuser but with wrong old password") + topo.standalone.simple_bind_s(user.dn, OLD_PASSWD) + with pytest.raises(ldap.INVALID_CREDENTIALS): + topo.standalone.passwd_s(user.dn, NEW_PASSWD, NEW_PASSWD) + log.info("Attempt for Password Change with Binddn as testuser") + assert topo.standalone.passwd_s(user.dn, OLD_PASSWD, NEW_PASSWD) + log.info("Attempt for Password Change without giving newpassword") + assert topo.standalone.passwd_s(user.dn, None, OLD_PASSWD) + assert user.get_attr_val_utf8('uid') == 'testuser' + log.info("Change password to NEW_PASSWD i.e newpassword") + assert topo.standalone.passwd_s(user.dn, None, NEW_PASSWD) + assert topo.standalone.passwd_s(user.dn, NEW_PASSWD, None) + log.info("Check binding with old/new password") + password = [OLD_PASSWD, NEW_PASSWD] + for pass_val in password: + with pytest.raises(ldap.INVALID_CREDENTIALS): + topo.standalone.simple_bind_s(user.dn, pass_val) + log.info("Change password back to OLD_PASSWD i.e password") + topo.standalone.simple_bind_s(DN_DM, PASSWORD) + assert topo.standalone.passwd_s(user.dn, None, NEW_PASSWD) + log.info("Checking password change Operation using a Non-Secure connection") + conn = ldap.initialize("ldap://%s:%s" % (HOST_STANDALONE, PORT_STANDALONE)) + with pytest.raises(ldap.CONFIDENTIALITY_REQUIRED): + conn.passwd_s(user.dn, NEW_PASSWD, OLD_PASSWD) + log.info("Testuser attempts to change password for testuser2(userPassword attribute is Set)") + global user_2 + users = UserAccounts(topo.standalone, DEFAULT_SUFFIX) + user_2 = users.create(properties={ + 'uid': 'testuser2', + 'cn': 'testuser2', + 'sn': 'testuser2', + 'uidNumber': '3000', + 'gidNumber': '4000', + 'homeDirectory': '/home/testuser2', + 'userPassword': OLD_PASSWD + }) + + topo.standalone.simple_bind_s(user.dn, NEW_PASSWD) + with pytest.raises(ldap.INSUFFICIENT_ACCESS): + assert topo.standalone.passwd_s(user_2.dn, OLD_PASSWD, NEW_PASSWD) + log.info("Directory Manager attempts to change password for testuser2(userPassword attribute is Set)") + topo.standalone.simple_bind_s(DN_DM, PASSWORD) + assert topo.standalone.passwd_s(user_2.dn, OLD_PASSWD, NEW_PASSWD) + log.info("Changing userPassword attribute to Undefined for testuser2") + topo.standalone.modify_s(user_2.dn, [(ldap.MOD_REPLACE, 'userPassword', None)]) + log.info("Testuser attempts to change password for testuser2(userPassword attribute is Undefined)") + with pytest.raises(ldap.INSUFFICIENT_ACCESS): + topo.standalone.simple_bind_s(user.dn, NEW_PASSWD) + assert topo.standalone.passwd_s(user_2.dn, None, NEW_PASSWD) + log.info("Directory Manager attempts to change password for testuser2(userPassword attribute is Undefined)") + topo.standalone.simple_bind_s(DN_DM, PASSWORD) + assert topo.standalone.passwd_s(user_2.dn, None, OLD_PASSWD) + log.info("Create a password syntax policy. Attempt to change to password that violates that policy") + topo.standalone.config.set('PasswordCheckSyntax', 'on') + with pytest.raises(ldap.CONSTRAINT_VIOLATION): + assert topo.standalone.passwd_s(user_2.dn, OLD_PASSWD, SHORT_PASSWD) + log.info("Reset password syntax policy") + topo.standalone.config.set('PasswordCheckSyntax', 'off') + log.info("userPassword mod with control results in ber decode error") + topo.standalone.simple_bind_s(DN_DM, PASSWORD) + assert topo.standalone.modify_ext_s(user.dn, [(ldap.MOD_REPLACE, 'userpassword', b'abcdefg')], + serverctrls=[LDAPControl('2.16.840.1.113730.3.4.2', 1, None)]) + log.info("Reseting the testuser's password") + topo.standalone.passwd_s(user.dn, 'abcdefg', NEW_PASSWD) + + +def test_pwd_modify_with_password_policy(topo, pwd_policy_setup): + """Performing various password modify operation, + with passwordStorageScheme as CLEAR + passwordHistory to on + + :id: 200bf0fd-20ab-4dde-849e-54067e98b917 + :setup: Standalone instance (TLS enabled) with pwd_policy_setup + :steps: + 1. Change the password and check that a new entry has been added to the history + 2. Try changing password to one stored in history + 3. Change the password several times in a row, and try binding after each change + 4. Try to bind using short password + + :expectedresults: + 1. Operation should be successful + 2. Operation should be unsuccessful + 3. Operation should be successful + 4. Operation should be unsuccessful + """ + log.info("Change the password and check that a new entry has been added to the history") + topo.standalone.passwd_s(user_2.dn, NEW_PASSWD, OLD_PASSWD) + regex = re.search('Z(.+)', user_2.get_attr_val_utf8('passwordhistory')) + assert NEW_PASSWD == regex.group(1) + log.info("Try changing password to one stored in history. Should fail") + with pytest.raises(ldap.CONSTRAINT_VIOLATION): + assert topo.standalone.passwd_s(user_2.dn, OLD_PASSWD, NEW_PASSWD) + log.info("Change the password several times in a row, and try binding after each change") + topo.standalone.passwd_s(user.dn, NEW_PASSWD, OLD_PASSWD) + assert topo.standalone.simple_bind_s(user.dn, OLD_PASSWD) + topo.standalone.passwd_s(user.dn, OLD_PASSWD, SHORT_PASSWD) + assert topo.standalone.simple_bind_s(user.dn, SHORT_PASSWD) + with pytest.raises(ldap.CONSTRAINT_VIOLATION): + topo.standalone.passwd_s(user.dn, SHORT_PASSWD, OLD_PASSWD) + + +def test_pwd_modify_with_subsuffix(topo): + """Performing various password modify operation. + + :id: 2255b4e6-3546-4ec5-84a5-cd8b3d894ac5 + :setup: Standalone instance (TLS enabled) + :steps: + 1. Add a new SubSuffix & password policy + 2. Add two New users under the SubEntry + 3. Change password of uid=test_user0,ou=TestPeople_bug834047,dc=example,dc=com to newpassword + 4. Try to delete password- case when password is specified + 5. Try to delete password- case when password is not specified + + :expectedresults: + 1. Operation should be successful + 2. Operation should be successful + 3. Operation should be successful + 4. Operation should be successful + 5. Operation should be successful + """ + + log.info("Add a new SubSuffix") + topo.standalone.simple_bind_s(DN_DM, PASSWORD) + + ous = OrganizationalUnits(topo.standalone, DEFAULT_SUFFIX) + ou_temp = ous.create(properties={'ou': TESTPEOPLE_OU}) + ou_temp.add('aci', USER_ACI) + + log.info("Add the container & create password policies") + policy = PwPolicyManager(topo.standalone) + policy.create_subtree_policy(ou_temp.dn, properties={ + 'passwordHistory': 'on', + 'passwordInHistory': '6', + 'passwordChange': 'on', + 'passwordStorageScheme': 'CLEAR'}) + + log.info("Add two New users under the SubEntry") + user = UserAccounts(topo.standalone, DEFAULT_SUFFIX, rdn='ou=TestPeople_bug834047') + test_user0 = user.create(properties={ + 'uid': 'test_user0', + 'cn': 'test0', + 'sn': 'test0', + 'uidNumber': '3002', + 'gidNumber': '4002', + 'homeDirectory': '/home/test_user0', + 'userPassword': OLD_PASSWD + }) + + test_user1 = user.create(properties={ + 'uid': 'test_user1', + 'cn': 'test1', + 'sn': 'test1', + 'uidNumber': '3003', + 'gidNumber': '4003', + 'homeDirectory': '/home/test_user3', + 'userPassword': OLD_PASSWD + }) + + log.info("Changing password of {} to newpassword".format(test_user0.dn)) + test_user0.rebind(OLD_PASSWD) + test_user0.reset_password(NEW_PASSWD) + test_user0.rebind(NEW_PASSWD) + + log.info("Try to delete password- case when password is specified") + test_user0.remove('userPassword', NEW_PASSWD) + + test_user1.rebind(OLD_PASSWD) + log.info("Try to delete password- case when password is not specified") + test_user1.remove_all('userPassword') + + +if __name__ == '__main__': + # Run isolated + # -s for DEBUG mode + CURRENT_FILE = os.path.realpath(__file__) + pytest.main(["-s", CURRENT_FILE]) diff --git a/dirsrvtests/tests/suites/password/pwdPolicy_attribute_test.py b/dirsrvtests/tests/suites/password/pwdPolicy_attribute_test.py new file mode 100644 index 0000000..b68b14c --- /dev/null +++ b/dirsrvtests/tests/suites/password/pwdPolicy_attribute_test.py @@ -0,0 +1,312 @@ +# --- BEGIN COPYRIGHT BLOCK --- +# Copyright (C) 2016 Red Hat, Inc. +# All rights reserved. +# +# License: GPL (version 3 or any later version). +# See LICENSE for details. +# --- END COPYRIGHT BLOCK --- +# +import pytest +from lib389.tasks import * +from lib389.utils import * +import pdb +from lib389.topologies import topology_st +from lib389.pwpolicy import PwPolicyManager +from lib389.idm.user import UserAccount, UserAccounts, TEST_USER_PROPERTIES +from lib389.idm.organizationalunit import OrganizationalUnits +from lib389._constants import (DEFAULT_SUFFIX, DN_DM, PASSWORD) + +pytestmark = pytest.mark.tier1 + +OU_PEOPLE = 'ou=people,{}'.format(DEFAULT_SUFFIX) +TEST_USER_NAME = 'simplepaged_test' +TEST_USER_DN = 'uid={},{}'.format(TEST_USER_NAME, OU_PEOPLE) +TEST_USER_PWD = 'simplepaged_test' +PW_POLICY_CONT_USER = 'cn="cn=nsPwPolicyEntry,uid=simplepaged_test,' \ + 'ou=people,dc=example,dc=com",' \ + 'cn=nsPwPolicyContainer,ou=people,dc=example,dc=com' +PW_POLICY_CONT_PEOPLE = 'cn="cn=nsPwPolicyEntry,' \ + 'ou=people,dc=example,dc=com",' \ + 'cn=nsPwPolicyContainer,ou=people,dc=example,dc=com' + +logging.getLogger(__name__).setLevel(logging.INFO) +log = logging.getLogger(__name__) + + +@pytest.fixture(scope="module") +def add_test_user(topology_st, request): + """User for binding operation""" + topology_st.standalone.config.set('nsslapd-auditlog-logging-enabled', 'on') + log.info('Adding test user {}') + users = UserAccounts(topology_st.standalone, OU_PEOPLE, rdn=None) + user_props = TEST_USER_PROPERTIES.copy() + user_props.update({'uid': TEST_USER_NAME, 'userpassword': TEST_USER_PWD}) + try: + user = users.create(properties=user_props) + except: + pass # debug only + + USER_ACI = '(targetattr="*")(version 3.0; acl "pwp test"; allow (all) userdn="ldap:///%s";)' % user.dn + ous = OrganizationalUnits(topology_st.standalone, DEFAULT_SUFFIX) + ou_people = ous.get('people') + ou_people.add('aci', USER_ACI) + + def fin(): + log.info('Deleting user {}'.format(user.dn)) + topology_st.standalone.simple_bind_s(DN_DM, PASSWORD) + + request.addfinalizer(fin) + return user + + +@pytest.fixture(scope="module") +def password_policy(topology_st, add_test_user): + """Set up password policy for subtree and user""" + + pwp = PwPolicyManager(topology_st.standalone) + policy_props = {} + log.info('Create password policy for subtree {}'.format(OU_PEOPLE)) + pwp.create_subtree_policy(OU_PEOPLE, policy_props) + + log.info('Create password policy for user {}'.format(TEST_USER_DN)) + pwp.create_user_policy(TEST_USER_DN, policy_props) + +@pytest.mark.bz1845094 +@pytest.mark.skipif(ds_is_older('1.4.3.3'), reason="Not implemented") +def test_pwdReset_by_user_DM(topology_st, add_test_user): + """Test new password policy attribute "pwdReset" + + :id: 232bc7dc-8cb6-11eb-9791-98fa9ba19b65 + :customerscenario: True + :setup: Standalone instance, Add a new user with a password + :steps: + 1. Enable passwordMustChange + 2. Bind as the user and change the password + 3. Check that the pwdReset attribute is set to TRUE + 4. Bind as the Directory manager and attempt to change the pwdReset to FALSE + 5. Check that pwdReset is NOT SET to FALSE + :expectedresults: + 1. Success + 2. Success + 3. Successful bind as DS user, pwdReset as DS user fails w UNWILLING_TO_PERFORM + 4. Success + 5. Success + """ + + # Reset user's password + our_user = UserAccount(topology_st.standalone, TEST_USER_DN) + log.info('Set password policy passwordMustChange on') + topology_st.standalone.config.replace('passwordMustChange', 'on') + our_user.replace('userpassword', PASSWORD) + time.sleep(5) + + # Check that pwdReset is TRUE + assert our_user.get_attr_val_utf8('pwdReset') == 'TRUE' + + log.info('Binding as the Directory manager and attempt to change the pwdReset to FALSE') + topology_st.standalone.simple_bind_s(DN_DM, PASSWORD) + with pytest.raises(ldap.UNWILLING_TO_PERFORM): + topology_st.standalone.config.replace('pwdReset', 'FALSE') + + log.info('Check that pwdReset is NOT SET to FALSE') + assert our_user.get_attr_val_utf8('pwdReset') == 'TRUE' + + log.info('Resetting password for {}'.format(TEST_USER_PWD)) + our_user.reset_password(TEST_USER_PWD) + + +@pytest.mark.skipif(ds_is_older('1.4.3.3'), reason="Not implemented") +def test_pwd_reset(topology_st, add_test_user): + """Test new password policy attribute "pwdReset" + + :id: 03db357b-4800-411e-a36e-28a534293004 + :customerscenario: True + :setup: Standalone instance + :steps: + 1. Enable passwordMustChange + 2. Reset user's password + 3. Check that the pwdReset attribute is set to TRUE + 4. Bind as the user and change its password + 5. Check that pwdReset is now set to FALSE + 6. Reset password policy configuration + :expectedresults: + 1. Success + 2. Success + 3. Success + 4. Success + 5. Success + 6. Success + """ + + # Set password policy config + topology_st.standalone.config.replace('passwordMustChange', 'on') + time.sleep(.5) + + # Reset user's password + our_user = UserAccount(topology_st.standalone, TEST_USER_DN) + our_user.replace('userpassword', PASSWORD) + time.sleep(.5) + + # Check that pwdReset is TRUE + assert our_user.get_attr_val_utf8('pwdReset') == 'TRUE' + + # Bind as user and change its own password + our_user.rebind(PASSWORD) + our_user.replace('userpassword', PASSWORD) + time.sleep(.5) + + # Check that pwdReset is FALSE + topology_st.standalone.simple_bind_s(DN_DM, PASSWORD) + assert our_user.get_attr_val_utf8('pwdReset') == 'FALSE' + + # Reset password policy config + topology_st.standalone.config.replace('passwordMustChange', 'off') + + # Reset user's password + our_user.replace('userpassword', TEST_USER_PWD) + + +@pytest.mark.parametrize('subtree_pwchange,user_pwchange,exception', + [('on', 'off', ldap.UNWILLING_TO_PERFORM), + ('off', 'off', ldap.UNWILLING_TO_PERFORM), + ('off', 'on', False), ('on', 'on', False)]) +def test_change_pwd(topology_st, add_test_user, password_policy, + subtree_pwchange, user_pwchange, exception): + """Verify that 'passwordChange' attr works as expected + User should have a priority over a subtree. + + :id: 2c884432-2ba1-4662-8e5d-2cd49f77e5fa + :parametrized: yes + :setup: Standalone instance, a test user, + password policy entries for a user and a subtree + :steps: + 1. Set passwordChange on the user and the subtree + to various combinations + 2. Bind as test user + 3. Try to change password + 4. Clean up - change the password to default while bound as DM + :expectedresults: + 1. passwordChange should be successfully set + 2. Bind should be successful + 3. Subtree/User passwordChange - result, accordingly: + off/on, on/on - success; + on/off, off/off - UNWILLING_TO_PERFORM + 4. Operation should be successful + """ + + users = UserAccounts(topology_st.standalone, OU_PEOPLE, rdn=None) + user = users.get(TEST_USER_NAME) + + log.info('Set passwordChange to "{}" - {}'.format(subtree_pwchange, OU_PEOPLE)) + pwp = PwPolicyManager(topology_st.standalone) + subtree_policy = pwp.get_pwpolicy_entry(OU_PEOPLE) + subtree_policy.set('passwordChange', subtree_pwchange) + + time.sleep(1) + + log.info('Set passwordChange to "{}" - {}'.format(user_pwchange, TEST_USER_DN)) + pwp2 = PwPolicyManager(topology_st.standalone) + user_policy = pwp2.get_pwpolicy_entry(TEST_USER_DN) + user_policy.set('passwordChange', user_pwchange) + user_policy.set('passwordExp', 'on') + + time.sleep(1) + + try: + log.info('Bind as user and modify userPassword') + user.rebind(TEST_USER_PWD) + if exception: + with pytest.raises(exception): + user.reset_password('new_pass') + else: + user.reset_password('new_pass') + except ldap.LDAPError as e: + log.error('Failed to change userpassword for {}: error {}'.format( + TEST_USER_DN, e.args[0]['info'])) + raise e + finally: + log.info('Bind as DM') + topology_st.standalone.simple_bind_s(DN_DM, PASSWORD) + user.reset_password(TEST_USER_PWD) + + +def test_pwd_min_age(topology_st, add_test_user, password_policy): + """If we set passwordMinAge to some value, for example to 10, then it + should not allow the user to change the password within 10 seconds after + his previous change. + + :id: 85b98516-8c82-45bd-b9ec-90bd1245e09c + :setup: Standalone instance, a test user, + password policy entries for a user and a subtree + :steps: + 1. Set passwordMinAge to 10 on the user pwpolicy entry + 2. Set passwordMinAge to 10 on the subtree pwpolicy entry + 3. Set passwordMinAge to 10 on the cn=config entry + 4. Bind as test user + 5. Try to change the password two times in a row + 6. Wait 12 seconds + 7. Try to change the password + 8. Clean up - change the password to default while bound as DM + :expectedresults: + 1. passwordMinAge should be successfully set on the user pwpolicy entry + 2. passwordMinAge should be successfully set on the subtree pwpolicy entry + 3. passwordMinAge should be successfully set on the cn=config entry + 4. Bind should be successful + 5. The password should be successfully changed + 6. 12 seconds have passed + 7. Constraint Violation error should be raised + 8. Operation should be successful + """ + + num_seconds = '10' + users = UserAccounts(topology_st.standalone, OU_PEOPLE, rdn=None) + user = users.get(TEST_USER_NAME) + + log.info('Set passwordminage to "{}" - {}'.format(num_seconds, OU_PEOPLE)) + pwp = PwPolicyManager(topology_st.standalone) + subtree_policy = pwp.get_pwpolicy_entry(OU_PEOPLE) + subtree_policy.set('passwordminage', num_seconds) + + log.info('Set passwordminage to "{}" - {}'.format(num_seconds, TEST_USER_DN)) + user_policy = pwp.get_pwpolicy_entry(TEST_USER_DN) + user_policy.set('passwordminage', num_seconds) + + log.info('Set passwordminage to "{}" - {}'.format(num_seconds, DN_CONFIG)) + topology_st.standalone.config.set('passwordminage', num_seconds) + + time.sleep(1) + + log.info('Bind as user and modify userPassword') + user.rebind(TEST_USER_PWD) + user.reset_password('new_pass') + + time.sleep(1) + + log.info('Bind as user and modify userPassword straight away after previous change') + user.rebind('new_pass') + with pytest.raises(ldap.CONSTRAINT_VIOLATION): + user.reset_password('new_new_pass') + + log.info('Wait {} second'.format(int(num_seconds) + 2)) + time.sleep(int(num_seconds) + 2) + + try: + log.info('Bind as user and modify userPassword') + user.rebind('new_pass') + user.reset_password(TEST_USER_PWD) + except ldap.LDAPError as e: + log.error('Failed to change userpassword for {}: error {}'.format( + TEST_USER_DN, e.args[0]['info'])) + raise e + finally: + log.info('Bind as DM') + topology_st.standalone.simple_bind_s(DN_DM, PASSWORD) + user.reset_password(TEST_USER_PWD) + pwp.delete_local_policy(TEST_USER_DN) + pwp.delete_local_policy(OU_PEOPLE) + +if __name__ == '__main__': + # Run isolated + # -s for DEBUG mode + CURRENT_FILE = os.path.realpath(__file__) + pytest.main("-s %s" % CURRENT_FILE) diff --git a/dirsrvtests/tests/suites/password/pwdPolicy_controls_sequence_test.py b/dirsrvtests/tests/suites/password/pwdPolicy_controls_sequence_test.py new file mode 100644 index 0000000..ec85b61 --- /dev/null +++ b/dirsrvtests/tests/suites/password/pwdPolicy_controls_sequence_test.py @@ -0,0 +1,133 @@ +# --- BEGIN COPYRIGHT BLOCK --- +# Copyright (C) 2021 Red Hat, Inc. +# All rights reserved. +# +# License: GPL (version 3 or any later version). +# See LICENSE for details. +# --- END COPYRIGHT BLOCK --- +# + +import logging +import pytest +import os +import ldap +import time +import ast + +from ldap.controls.ppolicy import PasswordPolicyControl +from ldap.controls.pwdpolicy import PasswordExpiredControl +from lib389.topologies import topology_st as topo +from lib389.idm.user import UserAccounts +from lib389._constants import (DN_DM, PASSWORD, DEFAULT_SUFFIX) + +pytestmark = pytest.mark.tier1 + +DEBUGGING = os.getenv("DEBUGGING", default=False) +if DEBUGGING: + logging.getLogger(__name__).setLevel(logging.DEBUG) +else: + logging.getLogger(__name__).setLevel(logging.INFO) +log = logging.getLogger(__name__) + + +USER_DN = 'uid=test entry,ou=people,dc=example,dc=com' +USER_PW = b'password123' + + +@pytest.fixture +def init_user(topo, request): + users = UserAccounts(topo.standalone, DEFAULT_SUFFIX) + user_data = {'uid': 'test entry', + 'cn': 'test entry', + 'sn': 'test entry', + 'uidNumber': '3000', + 'gidNumber': '4000', + 'homeDirectory': '/home/test_entry', + 'userPassword': USER_PW} + test_user = users.create(properties=user_data) + + def fin(): + log.info('Delete test user') + if test_user.exists(): + test_user.delete() + + request.addfinalizer(fin) + + +def bind_and_get_control(topo): + log.info('Bind as the user, and return any controls') + res_type = res_data = res_msgid = res_ctrls = None + result_id = '' + + try: + result_id = topo.standalone.simple_bind(USER_DN, USER_PW, + serverctrls=[PasswordPolicyControl()]) + res_type, res_data, res_msgid, res_ctrls = topo.standalone.result3(result_id) + except ldap.LDAPError as e: + log.info('Got expected error: {}'.format(str(e))) + res_ctrls = ast.literal_eval(str(e)) + pass + + topo.standalone.simple_bind(DN_DM, PASSWORD) + return res_ctrls + + +def change_passwd(topo): + log.info('Reset user password as the user, then re-bind as Directory Manager') + users = UserAccounts(topo.standalone, DEFAULT_SUFFIX) + user = users.get('test entry') + user.rebind(USER_PW) + user.reset_password(USER_PW) + topo.standalone.simple_bind(DN_DM, PASSWORD) + + +@pytest.mark.bz1724914 +@pytest.mark.ds3585 +def test_controltype_expired_grace_limit(topo, init_user): + """Test for expiration control when password is expired with available and exhausted grace login + + :id: 0392a73c-6467-49f9-bdb6-3648f6971896 + :setup: Standalone instance, a user for testing + :steps: + 1. Configure password policy, reset password and allow it to expire + 2. Bind and check sequence of controlType + 3. Bind (one grace login remaining) and check sequence of controlType + 4. Bind (grace login exhausted) and check sequence of controlType + :expectedresults: + 1. Config update and password reset are successful + 2. ControlType sequence is in correct order + 3. ControlType sequence is in correct order + 4. ControlType sequence is in correct order + """ + + log.info('Configure password policy with grace limit set to 2') + topo.standalone.config.set('passwordExp', 'on') + topo.standalone.config.set('passwordMaxAge', '5') + topo.standalone.config.set('passwordGraceLimit', '2') + + log.info('Change password and wait for it to expire') + change_passwd(topo) + time.sleep(6) + + log.info('Bind and use up one grace login (only one left)') + controls = bind_and_get_control(topo) + assert (controls[0].controlType == "1.3.6.1.4.1.42.2.27.8.5.1") + assert (controls[1].controlType == "2.16.840.1.113730.3.4.4") + + log.info('Bind again and check the sequence') + controls = bind_and_get_control(topo) + assert (controls[0].controlType == "1.3.6.1.4.1.42.2.27.8.5.1") + assert (controls[1].controlType == "2.16.840.1.113730.3.4.4") + + log.info('Bind with expired grace login and check the sequence') + # No grace login available, bind should fail, controls will be returned in error message + controls = bind_and_get_control(topo) + assert (controls['ctrls'][0][0] == "1.3.6.1.4.1.42.2.27.8.5.1") + assert (controls['ctrls'][1][0] == "2.16.840.1.113730.3.4.4") + + +if __name__ == '__main__': + # Run isolated + # -s for DEBUG mode + CURRENT_FILE = os.path.realpath(__file__) + pytest.main("-s %s" % CURRENT_FILE) diff --git a/dirsrvtests/tests/suites/password/pwdPolicy_controls_test.py b/dirsrvtests/tests/suites/password/pwdPolicy_controls_test.py new file mode 100644 index 0000000..0015a1f --- /dev/null +++ b/dirsrvtests/tests/suites/password/pwdPolicy_controls_test.py @@ -0,0 +1,300 @@ +# --- BEGIN COPYRIGHT BLOCK --- +# Copyright (C) 2022 Red Hat, Inc. +# All rights reserved. +# +# License: GPL (version 3 or any later version). +# See LICENSE for details. +# --- END COPYRIGHT BLOCK --- +# +import logging +import pytest +import os +import ldap +import time +from ldap.controls.ppolicy import PasswordPolicyControl +from lib389.topologies import topology_st as topo +from lib389.idm.user import UserAccounts +from lib389._constants import (DN_DM, PASSWORD, DEFAULT_SUFFIX) +from lib389.idm.organizationalunit import OrganizationalUnits + +pytestmark = pytest.mark.tier1 + +DEBUGGING = os.getenv("DEBUGGING", default=False) +if DEBUGGING: + logging.getLogger(__name__).setLevel(logging.DEBUG) +else: + logging.getLogger(__name__).setLevel(logging.INFO) +log = logging.getLogger(__name__) + +USER_DN = 'uid=test entry,ou=people,dc=example,dc=com' +USER_PW = b'password123' +USER_ACI = '(targetattr="userpassword")(version 3.0; acl "pwp test"; allow (all) userdn="ldap:///self";)' + + +@pytest.fixture +def init_user(topo, request): + """Initialize a user - Delete and re-add test user + """ + try: + topo.standalone.simple_bind_s(DN_DM, PASSWORD) + users = UserAccounts(topo.standalone, DEFAULT_SUFFIX) + user = users.get('test entry') + user.delete() + except ldap.NO_SUCH_OBJECT: + pass + except ldap.LDAPError as e: + log.error("Failed to delete user, error: {}".format(e.message['desc'])) + assert False + + user_data = {'uid': 'test entry', + 'cn': 'test entry', + 'sn': 'test entry', + 'uidNumber': '3000', + 'gidNumber': '4000', + 'homeDirectory': '/home/test_entry', + 'userPassword': USER_PW} + users.create(properties=user_data) + + +def change_passwd(topo): + """Reset users password as the user, then re-bind as Directory Manager + """ + users = UserAccounts(topo.standalone, DEFAULT_SUFFIX) + user = users.get('test entry') + user.rebind(USER_PW) + user.reset_password(USER_PW) + topo.standalone.simple_bind_s(DN_DM, PASSWORD) + + +def bind_and_get_control(topo, err=0): + """Bind as the user, and return any controls + """ + res_type = res_data = res_msgid = res_ctrls = None + result_id = '' + + try: + result_id = topo.standalone.simple_bind(USER_DN, USER_PW, + serverctrls=[PasswordPolicyControl()]) + res_type, res_data, res_msgid, res_ctrls = topo.standalone.result3(result_id) + if err: + log.fatal('Expected an error, but bind succeeded') + assert False + except ldap.LDAPError as e: + if err: + log.debug('Got expected error: {}'.format(str(e))) + pass + else: + log.fatal('Did not expect an error: {}'.format(str(e))) + assert False + + if DEBUGGING and res_ctrls and len(res_ctrls) > 0: + for ctl in res_ctrls: + if ctl.timeBeforeExpiration: + log.debug('control time before expiration: {}'.format(ctl.timeBeforeExpiration)) + if ctl.graceAuthNsRemaining: + log.debug('control grace login remaining: {}'.format(ctl.graceAuthNsRemaining)) + if ctl.error is not None and ctl.error >= 0: + log.debug('control error: {}'.format(ctl.error)) + + topo.standalone.simple_bind_s(DN_DM, PASSWORD) + return res_ctrls + + +def test_pwd_must_change(topo, init_user): + """Test for expiration control when password must be changed because an + admin reset the password + + :id: a3d99be5-0b69-410d-b72f-04eda8821a56 + :setup: Standalone instance, a user for testing + :steps: + 1. Configure password policy and reset password as admin + 2. Bind, and check for expired control withthe proper error code "2" + :expectedresults: + 1. Config update succeeds, adn the password is reset + 2. The EXPIRED control is returned, and we the expected error code "2" + """ + + log.info('Configure password policy with paswordMustChange set to "on"') + topo.standalone.config.set('passwordExp', 'on') + topo.standalone.config.set('passwordMaxAge', '200') + topo.standalone.config.set('passwordGraceLimit', '0') + topo.standalone.config.set('passwordWarning', '199') + topo.standalone.config.set('passwordMustChange', 'on') + + ous = OrganizationalUnits(topo.standalone, DEFAULT_SUFFIX) + ou = ous.get('people') + ou.add('aci', USER_ACI) + + log.info('Reset userpassword as Directory Manager') + users = UserAccounts(topo.standalone, DEFAULT_SUFFIX) + user = users.get('test entry') + user.reset_password(USER_PW) + + log.info('Bind should return ctrl with error code 2 (changeAfterReset)') + time.sleep(2) + ctrls = bind_and_get_control(topo) + if ctrls and len(ctrls) > 0: + if ctrls[0].error is None: + log.fatal("Response ctrl error code not set") + assert False + elif ctrls[0].error != 2: + log.fatal("Got unexpected error code: {}".format(ctrls[0].error)) + assert False + else: + log.fatal("We did not get a response ctrl") + assert False + + +def test_pwd_expired_grace_limit(topo, init_user): + """Test for expiration control when password is expired, but there are + remaining grace logins + + :id: a3d99be5-0b69-410d-b72f-04eda8821a51 + :setup: Standalone instance, a user for testing + :steps: + 1. Configure password policy and reset password,adn allow it to expire + 2. Bind, and check for expired control, and grace limit + 3. Bind again, consuming the last grace login, control should be returned + 4. Bind again, it should fail, and no control returned + :expectedresults: + 1. Config update and password reset are successful + 2. The EXPIRED control is returned, and we get the expected number + of grace logins in the control + 3. The response control has the expected value for grace logins + 4. The bind fails with error 49, and no contorl is returned + """ + + log.info('Configure password policy with grace limit set tot 2') + topo.standalone.config.set('passwordExp', 'on') + topo.standalone.config.set('passwordMaxAge', '5') + topo.standalone.config.set('passwordGraceLimit', '2') + + log.info('Change password and wait for it to expire') + change_passwd(topo) + time.sleep(6) + + log.info('Bind and use up one grace login (only one left)') + ctrls = bind_and_get_control(topo) + if ctrls is None or len(ctrls) == 0: + log.fatal('Did not get EXPIRED control in resposne') + assert False + else: + if int(ctrls[0].graceAuthNsRemaining) != 1: + log.fatal('Got unexpected value for grace logins: {}'.format(ctrls[0].graceAuthNsRemaining)) + assert False + + log.info('Use up last grace login, should get control') + ctrls = bind_and_get_control(topo) + if ctrls is None or len(ctrls) == 0: + log.fatal('Did not get control in response') + assert False + + log.info('No grace login available, bind should fail, and no control should be returned') + ctrls = bind_and_get_control(topo, err=49) + if ctrls and len(ctrls) > 0: + log.fatal('Incorrectly got control in response') + assert False + + +def test_pwd_expiring_with_warning(topo, init_user): + """Test expiring control response before and after warning is sent + + :id: 3594431f-e681-4a04-8edb-33ad2d9dad5b + :setup: Standalone instance, a user for testing + :steps: + 1. Configure password policy, and reset password + 2. Check for EXPIRING control, and the "time to expire" + 3. Bind again, as a warning has now been sent, and check the "time to expire" + :expectedresults: + 1. Configuration update and password reset are successful + 2. Get the EXPIRING control, and the expected "time to expire" values + 3. Get the EXPIRING control, and the expected "time to expire" values + """ + + log.info('Configure password policy') + topo.standalone.config.set('passwordExp', 'on') + topo.standalone.config.set('passwordMaxAge', '50') + topo.standalone.config.set('passwordWarning', '50') + + log.info('Change password and get controls') + change_passwd(topo) + ctrls = bind_and_get_control(topo) + if ctrls is None or len(ctrls) == 0: + log.fatal('Did not get EXPIRING control in response') + assert False + + if int(ctrls[0].timeBeforeExpiration) < 50: + log.fatal('Got unexpected value for timeBeforeExpiration: {}'.format(ctrls[0].timeBeforeExpiration)) + assert False + + log.info('Warning has been sent, try the bind again, and recheck the expiring time') + time.sleep(5) + ctrls = bind_and_get_control(topo) + if ctrls is None or len(ctrls) == 0: + log.fatal('Did not get EXPIRING control in resposne') + assert False + + if int(ctrls[0].timeBeforeExpiration) > 50: + log.fatal('Got unexpected value for timeBeforeExpiration: {}'.format(ctrls[0].timeBeforeExpiration)) + assert False + + +def test_pwd_expiring_with_no_warning(topo, init_user): + """Test expiring control response when no warning is sent + + :id: a3d99be5-0b69-410d-b72f-04eda8821a54 + :setup: Standalone instance, a user for testing + :steps: + 1. Configure password policy, and reset password + 2. Bind, and check that no controls are returned + 3. Set passwordSendExpiringTime to "on", bind, and check that the + EXPIRING control is returned + :expectedresults: + 1. Configuration update and passwordreset are successful + 2. No control is returned from bind + 3. A control is returned after setting "passwordSendExpiringTime" + """ + + log.info('Configure password policy') + topo.standalone.config.set('passwordExp', 'on') + topo.standalone.config.set('passwordMaxAge', '50') + topo.standalone.config.set('passwordWarning', '5') + + log.info('When the warning is less than the max age, we never send expiring control response') + change_passwd(topo) + ctrls = bind_and_get_control(topo) + if len(ctrls) > 0: + log.fatal('Incorrectly got a response control: {}'.format(ctrls)) + assert False + + log.info('Turn on sending expiring control regardless of warning') + topo.standalone.config.set('passwordSendExpiringTime', 'on') + + ctrls = bind_and_get_control(topo) + if ctrls is None or len(ctrls) == 0: + log.fatal('Did not get EXPIRED control in response') + assert False + + if int(ctrls[0].timeBeforeExpiration) < 49: + log.fatal('Got unexpected value for time before expiration: {}'.format(ctrls[0].timeBeforeExpiration)) + assert False + + log.info('Check expiring time again') + time.sleep(6) + ctrls = bind_and_get_control(topo) + if ctrls is None or len(ctrls) == 0: + log.fatal('Did not get EXPIRED control in resposne') + assert False + + if int(ctrls[0].timeBeforeExpiration) > 51: + log.fatal('Got unexpected value for time before expiration: {}'.format(ctrls[0].timeBeforeExpiration)) + assert False + + log.info('Turn off sending expiring control (restore the default setting)') + topo.standalone.config.set('passwordSendExpiringTime', 'off') + +if __name__ == '__main__': + # Run isolated + # -s for DEBUG mode + CURRENT_FILE = os.path.realpath(__file__) + pytest.main("-s %s" % CURRENT_FILE) diff --git a/dirsrvtests/tests/suites/password/pwdPolicy_inherit_global_test.py b/dirsrvtests/tests/suites/password/pwdPolicy_inherit_global_test.py new file mode 100644 index 0000000..1fe57c3 --- /dev/null +++ b/dirsrvtests/tests/suites/password/pwdPolicy_inherit_global_test.py @@ -0,0 +1,212 @@ +# --- BEGIN COPYRIGHT BLOCK --- +# Copyright (C) 2019 Red Hat, Inc. +# All rights reserved. +# +# License: GPL (version 3 or any later version). +# See LICENSE for details. +# --- END COPYRIGHT BLOCK --- +# +import logging +import time + +import ldap +import pytest +from lib389.utils import * +from lib389._constants import * +from lib389.pwpolicy import PwPolicyManager +from lib389.topologies import topology_st +from lib389.idm.organizationalunit import OrganizationalUnits +from lib389.idm.user import UserAccounts, TEST_USER_PROPERTIES + +pytestmark = pytest.mark.tier1 + +logging.getLogger(__name__).setLevel(logging.INFO) +log = logging.getLogger(__name__) + +OU_PEOPLE = 'ou=People,' + DEFAULT_SUFFIX +ATTR_INHERIT_GLOBAL = 'nsslapd-pwpolicy-inherit-global' +ATTR_CHECK_SYNTAX = 'passwordCheckSyntax' + +BN = 'uid=buser,' + OU_PEOPLE +TEMP_USER = 'cn=test{}' +TEMP_USER_DN = '%s,%s' % (TEMP_USER, OU_PEOPLE) + + +@pytest.fixture(scope="module") +def create_user(topology_st, request): + """User for binding operation""" + + log.info('Adding user {}'.format(BN)) + + users = UserAccounts(topology_st.standalone, OU_PEOPLE, rdn=None) + user_props = TEST_USER_PROPERTIES.copy() + user_props.update({'uid': 'buser', 'cn': 'buser', 'userpassword': PASSWORD}) + user = users.create(properties=user_props) + + log.info('Adding an aci for the bind user') + BN_ACI = '(targetattr="*")(version 3.0; acl "pwp test"; allow (all) userdn="ldap:///%s";)' % user.dn + ous = OrganizationalUnits(topology_st.standalone, DEFAULT_SUFFIX) + ou_people = ous.get('people') + ou_people.add('aci', BN_ACI) + + def fin(): + log.info('Deleting user {}'.format(BN)) + user.delete() + ous = OrganizationalUnits(topology_st.standalone, DEFAULT_SUFFIX) + ou_people = ous.get('people') + ou_people.remove('aci', BN_ACI) + + request.addfinalizer(fin) + + +@pytest.fixture(scope="module") +def password_policy(topology_st, create_user): + """Set global password policy. + Then, set fine-grained subtree level password policy + to ou=People with no password syntax. + + Note: do not touch nsslapd-pwpolicy-inherit-global -- off by default + """ + + log.info('Enable fine-grained policy') + pwp = PwPolicyManager(topology_st.standalone) + policy_props = { + 'passwordMustChange': 'off', + 'passwordExp': 'off', + 'passwordMinAge': '0', + 'passwordChange': 'off', + 'passwordStorageScheme': 'ssha' + } + pwp.create_subtree_policy(OU_PEOPLE, policy_props) + check_attr_val(topology_st.standalone, ATTR_INHERIT_GLOBAL, 'off') + check_attr_val(topology_st.standalone, ATTR_CHECK_SYNTAX, 'off') + + +def check_attr_val(inst, attr, expected): + """Check that entry has the value""" + + val = inst.config.get_attr_val_utf8(attr) + assert val == expected, 'Default value of %s is not %s, but %s' % ( + attr, expected, val) + + log.info('Default value of %s is %s' % (attr, expected)) + + +@pytest.mark.parametrize('inherit_value,checksyntax_value', + [('off', 'off'), ('on', 'off'), ('off', 'on')]) +def test_entry_has_no_restrictions(topology_st, password_policy, create_user, + inherit_value, checksyntax_value): + """Make sure an entry added to ou=people has no password syntax restrictions + + :id: 2f07ff40-76ca-45a9-a556-331c94084945 + :parametrized: yes + :setup: Standalone instance, test user, + password policy entries for a subtree + :steps: + 1. Bind as test user + 2. Set 'nsslapd-pwpolicy-inherit-global' and + 'passwordCheckSyntax' accordingly: + 'off' and 'off'; 'on' and 'off'; 'off' and 'on' + 3. Try to add user with a short password + 4. Cleanup - remove temp user bound as DM + :expectedresults: + 1. Bind should be successful + 2. Attributes should be successfully set + 3. No exceptions should occur + 4. Operation should be successful + """ + + log.info('Set {} to {}'.format(ATTR_INHERIT_GLOBAL, inherit_value)) + log.info('Set {} to {}'.format(ATTR_CHECK_SYNTAX, checksyntax_value)) + topology_st.standalone.config.set(ATTR_INHERIT_GLOBAL, inherit_value) + topology_st.standalone.config.set(ATTR_CHECK_SYNTAX, checksyntax_value) + + # Wait a second for cn=config to apply + time.sleep(1) + check_attr_val(topology_st.standalone, ATTR_INHERIT_GLOBAL, inherit_value) + check_attr_val(topology_st.standalone, ATTR_CHECK_SYNTAX, checksyntax_value) + + log.info('Bind as test user') + topology_st.standalone.simple_bind_s(BN, PASSWORD) + + log.info('Make sure an entry added to ou=people has ' + 'no password syntax restrictions.') + + users = UserAccounts(topology_st.standalone, OU_PEOPLE, rdn=None) + user_props = TEST_USER_PROPERTIES.copy() + user_props.update({'cn': 'test0', 'userpassword': 'short'}) + user = users.create(properties=user_props) + + topology_st.standalone.simple_bind_s(DN_DM, PASSWORD) + + # Remove test user + user.delete() + + +def test_entry_has_restrictions(topology_st, password_policy, create_user): + """Set 'nsslapd-pwpolicy-inherit-global: on' and 'passwordCheckSyntax: on'. + Make sure that syntax rules work, if set them at both: cn=config and + ou=people policy container. + + :id: 4bb0f474-17c1-40f7-aab4-4ddc17d019e8 + :setup: Standalone instance, test user, + password policy entries for a subtree + :steps: + 1. Bind as test user + 2. Switch 'nsslapd-pwpolicy-inherit-global: on' + 3. Switch 'passwordCheckSyntax: on' + 4. Set 'passwordMinLength: 9' to: + cn=config and ou=people policy container + 5. Try to add user with a short password (<9) + 6. Try to add user with a long password (>9) + 7. Cleanup - remove temp users bound as DM + :expectedresults: + 1. Bind should be successful + 2. nsslapd-pwpolicy-inherit-global should be successfully set + 3. passwordCheckSyntax should be successfully set + 4. passwordMinLength should be successfully set + 5. User should be rejected + 6. User should be rejected + 7. Operation should be successful + """ + + log.info('Set {} to {}'.format(ATTR_INHERIT_GLOBAL, 'on')) + log.info('Set {} to {}'.format(ATTR_CHECK_SYNTAX, 'on')) + topology_st.standalone.config.set(ATTR_INHERIT_GLOBAL, 'on') + topology_st.standalone.config.set(ATTR_CHECK_SYNTAX, 'on') + + pwp = PwPolicyManager(topology_st.standalone) + policy = pwp.get_pwpolicy_entry(OU_PEOPLE) + policy.set('passwordMinLength', '9') + + # Wait a second for cn=config to apply + time.sleep(1) + check_attr_val(topology_st.standalone, ATTR_INHERIT_GLOBAL, 'on') + check_attr_val(topology_st.standalone, ATTR_CHECK_SYNTAX, 'on') + + log.info('Bind as test user') + topology_st.standalone.simple_bind_s(BN, PASSWORD) + users = UserAccounts(topology_st.standalone, OU_PEOPLE, rdn=None) + user_props = TEST_USER_PROPERTIES.copy() + + log.info('Try to add user with a short password (<9)') + with pytest.raises(ldap.CONSTRAINT_VIOLATION): + user_props.update({'cn': 'test0', 'userpassword': 'short'}) + user = users.create(properties=user_props) + + log.info('Try to add user with a long password (>9)') + user_props.update({'cn': 'test1', 'userpassword': 'Reallylong1'}) + user = users.create(properties=user_props) + + log.info('Bind as DM user') + topology_st.standalone.simple_bind_s(DN_DM, PASSWORD) + + # Remove test user 1 + user.delete() + + +if __name__ == '__main__': + # Run isolated + # -s for DEBUG mode + CURRENT_FILE = os.path.realpath(__file__) + pytest.main("-s %s" % CURRENT_FILE) diff --git a/dirsrvtests/tests/suites/password/pwdPolicy_logging_test.py b/dirsrvtests/tests/suites/password/pwdPolicy_logging_test.py new file mode 100644 index 0000000..cd19cc9 --- /dev/null +++ b/dirsrvtests/tests/suites/password/pwdPolicy_logging_test.py @@ -0,0 +1,160 @@ +# --- BEGIN COPYRIGHT BLOCK --- +# Copyright (C) 2022 Red Hat, Inc. +# All rights reserved. +# +# License: GPL (version 3 or any later version). +# See LICENSE for details. +# --- END COPYRIGHT BLOCK --- +# +import ldap +import logging +import pytest +import os +import time +from lib389._constants import DEFAULT_SUFFIX, PASSWORD +from lib389.topologies import topology_st as topo +from lib389.pwpolicy import PwPolicyManager +from lib389.idm.organizationalunit import OrganizationalUnits +from lib389.idm.user import UserAccount, UserAccounts, TEST_USER_PROPERTIES +from lib389.idm.domain import Domain + +log = logging.getLogger(__name__) + +LOCAL_RDN = 'ou=People' +GLOBAL_RDN = 'ou=Global' +LOCAL_BIND_DN ="uid=local_user,ou=people," + DEFAULT_SUFFIX +GLOBAL_BIND_DN ="uid=global_user,ou=global," + DEFAULT_SUFFIX + + +def create_entries(inst): + # Create local user + users = UserAccounts(inst, DEFAULT_SUFFIX, rdn=LOCAL_RDN) + user_props = TEST_USER_PROPERTIES.copy() + user_props.update({'uid': 'local_user', 'cn': 'local_user', 'userpassword': PASSWORD}) + users.create(properties=user_props) + + # Create new OU + ou_global = OrganizationalUnits(inst, DEFAULT_SUFFIX).create(properties={'ou': 'Global'}) + + # Create global user + users = UserAccounts(inst, DEFAULT_SUFFIX, rdn=GLOBAL_RDN) + user_props = TEST_USER_PROPERTIES.copy() + user_props.update({'uid': 'global_user', 'cn': 'global_user', 'userpassword': PASSWORD}) + users.create(properties=user_props) + + # Add aci + aci = '(targetattr="*")(version 3.0; acl "pwp test"; allow (all) userdn="ldap:///self";)' + suffix = Domain(inst, DEFAULT_SUFFIX) + suffix.add('aci', aci) + + +def create_policies(inst): + # Configure subtree policy + pwp = PwPolicyManager(inst) + subtree_policy_props = { + 'passwordCheckSyntax': 'on', + 'passwordMinLength': '6', + 'passwordChange': 'on', + 'passwordLockout': 'on', + 'passwordMaxFailure': '2', + } + pwp.create_subtree_policy(f'{LOCAL_RDN},{DEFAULT_SUFFIX}', subtree_policy_props) + + # Configure global policy + inst.config.replace('nsslapd-pwpolicy-local', 'on') + inst.config.replace('passwordCheckSyntax', 'on') + inst.config.replace('passwordMinLength', '8') + inst.config.replace('passwordChange', 'on') + inst.config.replace('passwordLockout', 'on') + inst.config.replace('passwordMaxFailure', '5') + time.sleep(1) + + +def test_debug_logging(topo): + """Enable password policy logging + + :id: cc152c65-94e0-4716-a77c-abdd2deec00d + :setup: Standalone Instance + :steps: + 1. Set password policy logging level + 2. Add database entries + 3. Configure local and global policies + 4. Test syntax checking on local policy + 5. Test syntax checking on global policy + 6. Test account lockout on local policy + 7. Test account lockout on global policy + + :expectedresults: + 1. Success + 2. Success + 3. Success + 4. Success + 5. Success + 6. Success + 7. Success + """ + + inst = topo.standalone + + # Enable password policy debug logging + inst.config.replace('nsslapd-errorlog-level', '1048576') + + # Create entries and password policies + create_entries(inst) + create_policies(inst) + + # Setup bind connections + + local_conn = UserAccounts(inst, DEFAULT_SUFFIX, rdn=LOCAL_RDN).get('local_user').bind(PASSWORD) + local_user = UserAccount(local_conn, LOCAL_BIND_DN) + global_conn = UserAccounts(inst, DEFAULT_SUFFIX, rdn=GLOBAL_RDN).get('global_user').bind(PASSWORD) + global_user = UserAccount(global_conn, GLOBAL_BIND_DN) + + # Test syntax checking on local policy + passwd_val = "passw" # len 5 which is less than configured 6 + with pytest.raises(ldap.CONSTRAINT_VIOLATION): + local_user.replace('userpassword', passwd_val) + time.sleep(1) + + err_msg = "PWDPOLICY_DEBUG - invalid password syntax - password must be at least 6 characters long: Entry " + \ + "\\(uid=local_user,ou=people,dc=example,dc=com\\) Policy \\(cn=" + assert inst.searchErrorsLog(err_msg) + + # Test syntax checking on global policy + passwd_val = "passwod" # len 7 which is less than configured 8 + with pytest.raises(ldap.CONSTRAINT_VIOLATION): + global_user.replace('userpassword', passwd_val) + time.sleep(1) + + err_msg = "PWDPOLICY_DEBUG - invalid password syntax - password must be at least 8 characters long: Entry " + \ + "\\(uid=global_user,ou=global,dc=example,dc=com\\) Policy \\(Global\\)" + assert inst.searchErrorsLog(err_msg) + + # Test account lock is logging for local policy + for i in range(2): + with pytest.raises(ldap.INVALID_CREDENTIALS): + local_user.bind("bad") + with pytest.raises(ldap.CONSTRAINT_VIOLATION): + local_user.bind("bad") + + err_msg = "PWDPOLICY_DEBUG - Account is locked: Entry " + \ + "\\(uid=local_user,ou=people,dc=example,dc=com\\) Policy \\(cn=" + assert inst.searchErrorsLog(err_msg) + + # Test account lock is logging for global policy + for i in range(5): + with pytest.raises(ldap.INVALID_CREDENTIALS): + global_user.bind("bad") + with pytest.raises(ldap.CONSTRAINT_VIOLATION): + global_user.bind("bad") + + err_msg = "PWDPOLICY_DEBUG - Account is locked: Entry " + \ + "\\(uid=global_user,ou=global,dc=example,dc=com\\) Policy \\(Global\\)" + assert inst.searchErrorsLog(err_msg) + + +if __name__ == '__main__': + # Run isolated + # -s for DEBUG mode + CURRENT_FILE = os.path.realpath(__file__) + pytest.main(["-s", CURRENT_FILE]) diff --git a/dirsrvtests/tests/suites/password/pwdPolicy_syntax_test.py b/dirsrvtests/tests/suites/password/pwdPolicy_syntax_test.py new file mode 100644 index 0000000..3d72e87 --- /dev/null +++ b/dirsrvtests/tests/suites/password/pwdPolicy_syntax_test.py @@ -0,0 +1,381 @@ +# --- BEGIN COPYRIGHT BLOCK --- +# Copyright (C) 2020 Red Hat, Inc. +# All rights reserved. +# +# License: GPL (version 3 or any later version). +# See LICENSE for details. +# --- END COPYRIGHT BLOCK --- +# +import logging + +import pytest +from lib389.tasks import * +from lib389.utils import * +from lib389.topologies import topology_st +from lib389._constants import DEFAULT_SUFFIX, PASSWORD, DN_DM +from lib389.idm.domain import Domain +from lib389.idm.user import UserAccounts +from lib389.idm.organizationalunit import OrganizationalUnits + +pytestmark = pytest.mark.tier1 + +USER_DN = 'uid=user,ou=People,%s' % DEFAULT_SUFFIX +USER_RDN = 'user' +USER_ACI = '(targetattr="userpassword")(version 3.0; acl "pwp test"; allow (all) userdn="ldap:///self";)' + +logging.getLogger(__name__).setLevel(logging.INFO) +log = logging.getLogger(__name__) + + +@pytest.fixture(scope="module") +def password_policy(topology_st): + """Set global password policy""" + + log.info('Enable global password policy. Check for syntax.') + topology_st.standalone.config.set('passwordCheckSyntax', 'on') + topology_st.standalone.config.set('nsslapd-pwpolicy-local', 'off') + topology_st.standalone.config.set('passwordMinCategories', '1') + + # Add self user modification and anonymous aci + USER_SELF_MOD_ACI = '(targetattr="userpassword")(version 3.0; acl "pwp test"; allow (all) userdn="ldap:///self";)' + ANON_ACI = "(targetattr=\"*\")(version 3.0; acl \"Anonymous Read access\"; allow (read,search,compare) userdn = \"ldap:///anyone\";)" + suffix = Domain(topology_st.standalone, DEFAULT_SUFFIX) + suffix.add('aci', USER_SELF_MOD_ACI) + suffix.add('aci', ANON_ACI) + + +@pytest.fixture(scope="module") +def create_user(topology_st): + """Create the test user.""" + users = UserAccounts(topology_st.standalone, DEFAULT_SUFFIX) + users.create(properties={ + 'uid': USER_RDN, + 'cn': USER_RDN, + 'sn': USER_RDN, + 'uidNumber': '3000', + 'gidNumber': '4000', + 'homeDirectory': '/home/user', + 'description': 'd_e_s_c', + 'loginShell': USER_RDN, + 'userPassword': PASSWORD + }) + + +def setPolicy(inst, attr, value): + """Bind as Root DN, set policy, and then bind as user""" + + inst.simple_bind_s(DN_DM, PASSWORD) + + # Set the policy value + value = str(value) + inst.config.set(attr, value) + + policy = inst.config.get_attr_val_utf8(attr) + assert policy == value + + +def resetPasswd(inst): + """Reset the user password for the next test""" + + # First, bind as the ROOT DN so we can set the password + inst.simple_bind_s(DN_DM, PASSWORD) + + # Now set the password + users = UserAccounts(inst, DEFAULT_SUFFIX) + user = users.get(USER_RDN) + user.reset_password(PASSWORD) + + +def tryPassword(inst, policy_attr, value, reset_value, pw_bad, pw_good, msg): + """Attempt to change the users password + inst: DirSrv Object + password: password + msg - error message if failure + """ + + setPolicy(inst, policy_attr, value) + inst.simple_bind_s(USER_DN, PASSWORD) + users = UserAccounts(inst, DEFAULT_SUFFIX) + user = users.get(USER_RDN) + try: + user.reset_password(pw_bad) + log.fatal('Invalid password was unexpectedly accepted (%s)' % + (policy_attr)) + assert False + except ldap.CONSTRAINT_VIOLATION: + log.info('Invalid password correctly rejected by %s: %s' % + (policy_attr, msg)) + pass + except ldap.LDAPError as e: + log.fatal("Failed to change password: " + str(e)) + assert False + + # Change password that is allowed + user.reset_password(pw_good) + + # Reset for the next test + resetPasswd(inst) + setPolicy(inst, policy_attr, reset_value) + + +def test_basic(topology_st, create_user, password_policy): + """Ensure that on a password change, the policy syntax + is enforced correctly. + + :id: e8de7029-7fa6-4e96-9eb6-4a121f4c8fb3 + :customerscenario: True + :setup: Standalone instance, a test user, + global password policy with: + passwordCheckSyntax - on; nsslapd-pwpolicy-local - off; + passwordMinCategories - 1 + :steps: + 1. Set passwordMinLength to 10 in cn=config + 2. Set userPassword to 'passwd' in cn=config + 3. Set userPassword to 'password123' in cn=config + 4. Set passwordMinLength to 2 in cn=config + 5. Set passwordMinDigits to 2 in cn=config + 6. Set userPassword to 'passwd' in cn=config + 7. Set userPassword to 'password123' in cn=config + 8. Set passwordMinDigits to 0 in cn=config + 9. Set passwordMinAlphas to 2 in cn=config + 10. Set userPassword to 'p123456789' in cn=config + 11. Set userPassword to 'password123' in cn=config + 12. Set passwordMinAlphas to 0 in cn=config + 13. Set passwordMaxRepeats to 2 in cn=config + 14. Set userPassword to 'password' in cn=config + 15. Set userPassword to 'password123' in cn=config + 16. Set passwordMaxRepeats to 0 in cn=config + 17. Set passwordMinSpecials to 2 in cn=config + 18. Set userPassword to 'passwd' in cn=config + 19. Set userPassword to 'password_#$' in cn=config + 20. Set passwordMinSpecials to 0 in cn=config + 21. Set passwordMinLowers to 2 in cn=config + 22. Set userPassword to 'PASSWORD123' in cn=config + 23. Set userPassword to 'password123' in cn=config + 24. Set passwordMinLowers to 0 in cn=config + 25. Set passwordMinUppers to 2 in cn=config + 26. Set userPassword to 'password' in cn=config + 27. Set userPassword to 'PASSWORD' in cn=config + 28. Set passwordMinUppers to 0 in cn=config + 29. Test passwordDictCheck + 30. Test passwordPalindrome + 31. Test passwordMaxSequence for forward number sequence + 32. Test passwordMaxSequence for backward number sequence + 33. Test passwordMaxSequence for forward alpha sequence + 34. Test passwordMaxSequence for backward alpha sequence + 35. Test passwordMaxClassChars for digits + 36. Test passwordMaxClassChars for specials + 37. Test passwordMaxClassChars for lowers + 38. Test passwordMaxClassChars for uppers + 39. Test passwordBadWords using 'redhat' and 'fedora' + 40. Test passwordUserAttrs using description attribute + + :expectedresults: + 1. passwordMinLength should be successfully set + 2. Password should be rejected because length too short + 3. Password should be accepted + 4. passwordMinLength should be successfully set + 5. passwordMinDigits should be successfully set + 6. Password should be rejected because + it does not contain minimum number of digits + 7. Password should be accepted + 8. passwordMinDigits should be successfully set + 9. passwordMinAlphas should be successfully set + 10. Password should be rejected because + it does not contain minimum number of alphas + 11. Password should be accepted + 12. passwordMinAlphas should be successfully set + 13. passwordMaxRepeats should be successfully set + 14. Password should be rejected because too many repeating characters + 15. Password should be accepted + 16. passwordMaxRepeats should be successfully set + 17. passwordMinSpecials should be successfully set + 18. Password should be rejected because + it does not contain minimum number of special characters + 19. Password should be accepted + 20. passwordMinSpecials should be successfully set + 21. passwordMinLowers should be successfully set + 22. Password should be rejected because + it does not contain minimum number of lowercase characters + 23. Password should be accepted + 24. passwordMinLowers should be successfully set + 25. passwordMinUppers should be successfully set + 26. Password should be rejected because + it does not contain minimum number of lowercase characters + 27. Password should be accepted + 28. passwordMinUppers should be successfully set + 29. The passwordDictCheck test succeeds + 30. The passwordPalindrome test succeeds + 31. Test passwordMaxSequence for forward number sequence succeeds + 32. Test passwordMaxSequence for backward number sequence succeeds + 33. Test passwordMaxSequence for forward alpha sequence succeeds + 34. Test passwordMaxSequence for backward alpha sequence succeeds + 35. Test passwordMaxClassChars for digits succeeds + 36. Test passwordMaxClassChars for specials succeeds + 37. Test passwordMaxClassChars for lowers succeeds + 38. Test passwordMaxClassChars for uppers succeeds + 39. The passwordBadWords test succeeds + 40. The passwordUserAttrs test succeeds + """ + + # + # Test each syntax category + # + ous = OrganizationalUnits(topology_st.standalone, DEFAULT_SUFFIX) + ou = ous.get('people') + ou.add('aci', USER_ACI) + + # Min Length + tryPassword(topology_st.standalone, 'passwordMinLength', 10, 2, 'passwd', + 'password123', 'length too short') + # Min Digit + tryPassword(topology_st.standalone, 'passwordMinDigits', 2, 0, 'passwd', + 'password123', 'does not contain minimum number of digits') + # Min Alphas + tryPassword(topology_st.standalone, 'passwordMinAlphas', 2, 0, 'p123456789', + 'password123', 'does not contain minimum number of alphas') + # Max Repeats + tryPassword(topology_st.standalone, 'passwordMaxRepeats', 2, 0, 'passsword', + 'password123', 'too many repeating characters') + # Min Specials + tryPassword(topology_st.standalone, 'passwordMinSpecials', 2, 0, 'passwd', + 'password_#$', + 'does not contain minimum number of special characters') + # Min Lowers + tryPassword(topology_st.standalone, 'passwordMinLowers', 2, 0, 'PASSWORD123', + 'password123', + 'does not contain minimum number of lowercase characters') + # Min Uppers + tryPassword(topology_st.standalone, 'passwordMinUppers', 2, 0, 'password', + 'PASSWORD', + 'does not contain minimum number of lowercase characters') + # Min 8-bits - "ldap" package only accepts ascii strings at the moment + + if ds_is_newer('1.4.0.13'): + # Dictionary check + tryPassword(topology_st.standalone, 'passwordDictCheck', 'on', 'on', 'PASSWORD', + '13_#Kad472h', 'Password found in dictionary') + + # Palindromes + tryPassword(topology_st.standalone, 'passwordPalindrome', 'on', 'on', 'Za12_#_21aZ', + '13_#Kad472h', 'Password is palindrome') + + # Sequences + tryPassword(topology_st.standalone, 'passwordMaxSequence', 3, 0, 'Za1_1234', + '13_#Kad472h', 'Max monotonic sequence is not allowed') + tryPassword(topology_st.standalone, 'passwordMaxSequence', 3, 0, 'Za1_4321', + '13_#Kad472h', 'Max monotonic sequence is not allowed') + tryPassword(topology_st.standalone, 'passwordMaxSequence', 3, 0, 'Za1_abcd', + '13_#Kad472h', 'Max monotonic sequence is not allowed') + tryPassword(topology_st.standalone, 'passwordMaxSequence', 3, 0, 'Za1_dcba', + '13_#Kad472h', 'Max monotonic sequence is not allowed') + + # Sequence Sets + tryPassword(topology_st.standalone, 'passwordMaxSeqSets', 2, 0, 'Za1_123--123', + '13_#Kad472h', 'Max monotonic sequence is not allowed') + + # Max characters in a character class + tryPassword(topology_st.standalone, 'passwordMaxClassChars', 3, 0, 'Za1_9376', + '13_#Kad472h', 'Too may consecutive characters from the same class') + tryPassword(topology_st.standalone, 'passwordMaxClassChars', 3, 0, 'Za1_#$&!', + '13_#Kad472h', 'Too may consecutive characters from the same class') + tryPassword(topology_st.standalone, 'passwordMaxClassChars', 3, 0, 'Za1_ahtf', + '13_#Kad472h', 'Too may consecutive characters from the same class') + tryPassword(topology_st.standalone, 'passwordMaxClassChars', 3, 0, 'Za1_HTSE', + '13_#Kad472h', 'Too may consecutive characters from the same class') + + # Bad words + tryPassword(topology_st.standalone, 'passwordBadWords', 'redhat', 'none', 'Za1_redhat', + '13_#Kad472h', 'Too may consecutive characters from the same class') + + # User Attributes + tryPassword(topology_st.standalone, 'passwordUserAttributes', 'description', 0, 'Za1_d_e_s_c', + '13_#Kad472h', 'Password found in user entry') + + +@pytest.mark.bz1816857 +@pytest.mark.ds50875 +@pytest.mark.skipif(ds_is_older("1.4.1.18"), reason="Not implemented") +def test_config_set_few_user_attributes(topology_st, create_user, password_policy): + """Test that we can successfully set multiple values to passwordUserAttributes + + :id: 188e0aee-6e29-4857-910c-27d5606f8c08 + :setup: Standalone instance + :steps: + 1. Set passwordUserAttributes to "description loginShell" + 2. Verify passwordUserAttributes has the values + 3. Verify passwordUserAttributes enforced the policy + :expectedresults: + 1. Operation should be successful + 2. Operation should be successful + 3. Operation should be successful + """ + + standalone = topology_st.standalone + standalone.simple_bind_s(DN_DM, PASSWORD) + standalone.log.info('Set passwordUserAttributes to "description loginShell"') + standalone.config.set('passwordUserAttributes', 'description loginshell') + standalone.restart() + + standalone.log.info("Verify passwordUserAttributes has the values") + user_attrs = standalone.config.get_attr_val_utf8('passwordUserAttributes') + assert "description" in user_attrs + assert "loginshell" in user_attrs + standalone.log.info("Reset passwordUserAttributes") + standalone.config.remove_all('passwordUserAttributes') + + standalone.log.info("Verify passwordUserAttributes enforced the policy") + attributes = ['description, loginShell', 'description,loginShell', 'description loginShell'] + values = ['Za1_d_e_s_c', f'Za1_{USER_RDN}', f'Za1_d_e_s_c{USER_RDN}'] + for attr in attributes: + for value in values: + tryPassword(standalone, 'passwordUserAttributes', attr, 0, value, + '13_#Kad472h', 'Password found in user entry') + + +@pytest.mark.bz1816857 +@pytest.mark.ds50875 +@pytest.mark.skipif(ds_is_older("1.4.1.18"), reason="Not implemented") +def test_config_set_few_bad_words(topology_st, create_user, password_policy): + """Test that we can successfully set multiple values to passwordBadWords + + :id: 2977094c-921c-4b2f-af91-4c7a45ded48b + :setup: Standalone instance + :steps: + 1. Set passwordBadWords to "fedora redhat" + 2. Verify passwordBadWords has the values + 3. Verify passwordBadWords enforced the policy + :expectedresults: + 1. Operation should be successful + 2. Operation should be successful + 3. Operation should be successful + """ + + standalone = topology_st.standalone + standalone.simple_bind_s(DN_DM, PASSWORD) + standalone.log.info('Set passwordBadWords to "fedora redhat"') + standalone.config.set('passwordBadWords', 'fedora redhat') + + standalone.restart() + + standalone.log.info("Verify passwordBadWords has the values") + user_attrs = standalone.config.get_attr_val_utf8('passwordBadWords') + assert "fedora" in user_attrs + assert "redhat" in user_attrs + standalone.log.info("Reset passwordBadWords") + standalone.config.remove_all('passwordBadWords') + + standalone.log.info("Verify passwordBadWords enforced the policy") + attributes = ['redhat, fedora', 'redhat,fedora', 'redhat fedora'] + values = ['Za1_redhat_fedora', 'Za1_fedora', 'Za1_redhat'] + for attr in attributes: + for value in values: + tryPassword(standalone, 'passwordBadWords', attr, 'none', value, + '13_#Kad472h', 'Too may consecutive characters from the same class') + + +if __name__ == '__main__': + # Run isolated + # -s for DEBUG mode + CURRENT_FILE = os.path.realpath(__file__) + pytest.main("-s %s" % CURRENT_FILE) diff --git a/dirsrvtests/tests/suites/password/pwdPolicy_temporary_password.py b/dirsrvtests/tests/suites/password/pwdPolicy_temporary_password.py new file mode 100644 index 0000000..b000b97 --- /dev/null +++ b/dirsrvtests/tests/suites/password/pwdPolicy_temporary_password.py @@ -0,0 +1,1154 @@ +# --- BEGIN COPYRIGHT BLOCK --- +# Copyright (C) 2016 Red Hat, Inc. +# All rights reserved. +# +# License: GPL (version 3 or any later version). +# See LICENSE for details. +# --- END COPYRIGHT BLOCK --- +# +import pytest +from lib389.tasks import * +from lib389.utils import * +import pdb +from lib389.topologies import topology_st +from lib389.pwpolicy import PwPolicyManager +from lib389.idm.user import UserAccount, UserAccounts, TEST_USER_PROPERTIES +from lib389.idm.organizationalunit import OrganizationalUnits +from lib389._constants import (DEFAULT_SUFFIX, DN_DM, PASSWORD) + +pytestmark = pytest.mark.tier1 + +OU_PEOPLE = 'ou=people,{}'.format(DEFAULT_SUFFIX) +TEST_USER_NAME = 'simplepaged_test' +TEST_USER_DN = 'uid={},{}'.format(TEST_USER_NAME, OU_PEOPLE) +TEST_USER_PWD = 'simplepaged_test' +PW_POLICY_CONT_USER = 'cn="cn=nsPwPolicyEntry,uid=simplepaged_test,' \ + 'ou=people,dc=example,dc=com",' \ + 'cn=nsPwPolicyContainer,ou=people,dc=example,dc=com' +PW_POLICY_CONT_PEOPLE = 'cn="cn=nsPwPolicyEntry,' \ + 'ou=people,dc=example,dc=com",' \ + 'cn=nsPwPolicyContainer,ou=people,dc=example,dc=com' + +logging.getLogger(__name__).setLevel(logging.INFO) +log = logging.getLogger(__name__) + + +@pytest.fixture(scope="module") +def test_user(topology_st, request): + """User for binding operation""" + topology_st.standalone.config.set('nsslapd-auditlog-logging-enabled', 'on') + log.info('Adding test user {}') + users = UserAccounts(topology_st.standalone, OU_PEOPLE, rdn=None) + user_props = TEST_USER_PROPERTIES.copy() + user_props.update({'uid': TEST_USER_NAME, 'userpassword': TEST_USER_PWD}) + try: + user = users.create(properties=user_props) + except: + pass # debug only + + USER_ACI = '(targetattr="*")(version 3.0; acl "pwp test"; allow (all) userdn="ldap:///%s";)' % user.dn + ous = OrganizationalUnits(topology_st.standalone, DEFAULT_SUFFIX) + ou_people = ous.get('people') + ou_people.add('aci', USER_ACI) + + def fin(): + log.info('Deleting user {}'.format(user.dn)) + topology_st.standalone.simple_bind_s(DN_DM, PASSWORD) + + request.addfinalizer(fin) + return user + +def test_global_tpr_maxuse_1(topology_st, test_user, request): + """Test global TPR policy : passwordTPRMaxUse + Test that after passwordTPRMaxUse failures to bind + additional bind with valid password are failing with CONSTRAINT_VIOLATION + + :id: d1b38436-806c-4671-8ccf-c8fdad21f034 + :customerscenario: False + :setup: Standalone instance + :steps: + 1. Enable passwordMustChange + 2. Set passwordTPRMaxUse=5 + 3. Set passwordMaxFailure to a higher value to not disturb the test + 4. Bind with a wrong password passwordTPRMaxUse times and check INVALID_CREDENTIALS + 5. Check that passwordTPRRetryCount got to the limit (5) + 6. Bind with a wrong password (CONSTRAINT_VIOLATION) + and check passwordTPRRetryCount overpass the limit by 1 (6) + 7. Bind with a valid password 5 times and check CONSTRAINT_VIOLATION + and check passwordTPRRetryCount overpass the limit by 1 (6) + 8. Reset password policy configuration + :expectedresults: + 1. Success + 2. Success + 3. Success + 4. Success + 5. Success + 6. Success + 7. Success + 8. Success + """ + + try_tpr_failure = 5 + # Set password policy config, passwordMaxFailure being higher than + # passwordTPRMaxUse so that TPR is enforced first + topology_st.standalone.config.replace('passwordMustChange', 'on') + topology_st.standalone.config.replace('passwordMaxFailure', str(try_tpr_failure + 20)) + topology_st.standalone.config.replace('passwordTPRMaxUse', str(try_tpr_failure)) + time.sleep(.5) + + # Reset user's password + our_user = UserAccount(topology_st.standalone, TEST_USER_DN) + our_user.replace('userpassword', PASSWORD) + time.sleep(.5) + + # look up to passwordTPRMaxUse with failing + # bind to check that the limits of TPR are enforced + for i in range(try_tpr_failure): + # Bind as user with a wrong password + with pytest.raises(ldap.INVALID_CREDENTIALS): + our_user.rebind('wrong password') + time.sleep(.5) + + # Check that pwdReset is TRUE + topology_st.standalone.simple_bind_s(DN_DM, PASSWORD) + #assert our_user.get_attr_val_utf8('pwdReset') == 'TRUE' + + # Check that pwdTPRReset is TRUE + assert our_user.get_attr_val_utf8('pwdTPRReset') == 'TRUE' + assert our_user.get_attr_val_utf8('pwdTPRUseCount') == str(i+1) + log.info("%dth failing bind (INVALID_CREDENTIALS) => pwdTPRUseCount = %d" % (i+1, i+1)) + + + # Now the #failures reached passwordTPRMaxUse + # Check that pwdReset is TRUE + topology_st.standalone.simple_bind_s(DN_DM, PASSWORD) + #assert our_user.get_attr_val_utf8('pwdReset') == 'TRUE' + + # Check that pwdTPRReset is TRUE + assert our_user.get_attr_val_utf8('pwdTPRReset') == 'TRUE' + assert our_user.get_attr_val_utf8('pwdTPRUseCount') == str(try_tpr_failure) + log.info("last failing bind (INVALID_CREDENTIALS) => pwdTPRUseCount = %d" % (try_tpr_failure)) + + # Bind as user with wrong password --> ldap.CONSTRAINT_VIOLATION + with pytest.raises(ldap.CONSTRAINT_VIOLATION): + our_user.rebind("wrong password") + time.sleep(.5) + + # Check that pwdReset is TRUE + topology_st.standalone.simple_bind_s(DN_DM, PASSWORD) + #assert our_user.get_attr_val_utf8('pwdReset') == 'TRUE' + + # Check that pwdTPRReset is TRUE + assert our_user.get_attr_val_utf8('pwdTPRReset') == 'TRUE' + assert our_user.get_attr_val_utf8('pwdTPRUseCount') == str(try_tpr_failure + 1) + log.info("failing bind (CONSTRAINT_VIOLATION) => pwdTPRUseCount = %d" % (try_tpr_failure + i)) + + # Now check that all next attempts with correct password are all in LDAP_CONSTRAINT_VIOLATION + # and passwordTPRRetryCount remains unchanged + # account is now similar to locked + for i in range(10): + # Bind as user with valid password + with pytest.raises(ldap.CONSTRAINT_VIOLATION): + our_user.rebind(PASSWORD) + time.sleep(.5) + + # Check that pwdReset is TRUE + topology_st.standalone.simple_bind_s(DN_DM, PASSWORD) + #assert our_user.get_attr_val_utf8('pwdReset') == 'TRUE' + + # Check that pwdTPRReset is TRUE + # pwdTPRUseCount keeps increasing + assert our_user.get_attr_val_utf8('pwdTPRReset') == 'TRUE' + assert our_user.get_attr_val_utf8('pwdTPRUseCount') == str(try_tpr_failure + i + 2) + log.info("Rejected bind (CONSTRAINT_VIOLATION) => pwdTPRUseCount = %d" % (try_tpr_failure + i + 2)) + + + def fin(): + topology_st.standalone.restart() + # Reset password policy config + topology_st.standalone.simple_bind_s(DN_DM, PASSWORD) + topology_st.standalone.config.replace('passwordMustChange', 'off') + + # Reset user's password + our_user.replace('userpassword', TEST_USER_PWD) + + + request.addfinalizer(fin) + +def test_global_tpr_maxuse_2(topology_st, test_user, request): + """Test global TPR policy : passwordTPRMaxUse + Test that after less than passwordTPRMaxUse failures to bind + additional bind with valid password are successfull + + :id: bd18bf8e-f3c3-4612-9009-500cf558317e + :customerscenario: False + :setup: Standalone instance + :steps: + 1. Enable passwordMustChange + 2. Set passwordTPRMaxUse=5 + 3. Set passwordMaxFailure to a higher value to not disturb the test + 4. Bind with a wrong password less than passwordTPRMaxUse times and check INVALID_CREDENTIALS + 7. Bind successfully with a valid password 10 times + and check passwordTPRRetryCount returns to 0 + 8. Reset password policy configuration + :expectedresults: + 1. Success + 2. Success + 3. Success + 4. Success + 5. Success + 6. Success + 7. Success + 8. Success + """ + + try_tpr_failure = 5 + # Set password policy config, passwordMaxFailure being higher than + # passwordTPRMaxUse so that TPR is enforced first + topology_st.standalone.config.replace('passwordMustChange', 'on') + topology_st.standalone.config.replace('passwordMaxFailure', str(try_tpr_failure + 20)) + topology_st.standalone.config.replace('passwordTPRMaxUse', str(try_tpr_failure)) + time.sleep(.5) + + # Reset user's password + our_user = UserAccount(topology_st.standalone, TEST_USER_DN) + our_user.replace('userpassword', PASSWORD) + time.sleep(.5) + + # Do less than passwordTPRMaxUse failing bind + try_tpr_failure = try_tpr_failure - 2 + for i in range(try_tpr_failure): + # Bind as user with a wrong password + with pytest.raises(ldap.INVALID_CREDENTIALS): + our_user.rebind('wrong password') + time.sleep(.5) + + # Check that pwdReset is TRUE + topology_st.standalone.simple_bind_s(DN_DM, PASSWORD) + #assert our_user.get_attr_val_utf8('pwdReset') == 'TRUE' + + # Check that pwdTPRReset is TRUE + assert our_user.get_attr_val_utf8('pwdTPRReset') == 'TRUE' + assert our_user.get_attr_val_utf8('pwdTPRUseCount') == str(i+1) + log.info("%dth failing bind (INVALID_CREDENTIALS) => pwdTPRUseCount = %d" % (i+1, i+1)) + + + # Now the #failures has not reached passwordTPRMaxUse + # Check that pwdReset is TRUE + topology_st.standalone.simple_bind_s(DN_DM, PASSWORD) + assert our_user.get_attr_val_utf8('pwdReset') == 'TRUE' + + # Check that pwdTPRReset is TRUE + assert our_user.get_attr_val_utf8('pwdTPRReset') == 'TRUE' + assert our_user.get_attr_val_utf8('pwdTPRUseCount') == str(try_tpr_failure) + log.info("last failing bind (INVALID_CREDENTIALS) => pwdTPRUseCount = %d" % (try_tpr_failure)) + + our_user.rebind(PASSWORD) + our_user.replace('userpassword', PASSWORD) + # give time to update the pwp attributes in the entry + time.sleep(.5) + # Now check that all next attempts with correct password are successfull + # and passwordTPRRetryCount reset to 0 + for i in range(10): + # Bind as user with valid password + our_user.rebind(PASSWORD) + time.sleep(.5) + + # Check that pwdReset is TRUE + topology_st.standalone.simple_bind_s(DN_DM, PASSWORD) + #assert our_user.get_attr_val_utf8('pwdReset') == 'TRUE' + + # Check that pwdTPRReset is FALSE + assert our_user.get_attr_val_utf8('pwdTPRReset') == 'FALSE' + #pdb.set_trace() + assert not our_user.present('pwdTPRUseCount') + + + def fin(): + topology_st.standalone.restart() + # Reset password policy config + topology_st.standalone.simple_bind_s(DN_DM, PASSWORD) + topology_st.standalone.config.replace('passwordMustChange', 'off') + + # Reset user's password + our_user.replace('userpassword', TEST_USER_PWD) + + request.addfinalizer(fin) + +def test_global_tpr_maxuse_3(topology_st, test_user, request): + """Test global TPR policy : passwordTPRMaxUse + Test that after less than passwordTPRMaxUse failures to bind + A bind with valid password is successfull but passwordMustChange + does not allow to do a search. + Changing the password allows to do a search + + :id: 7fd0301a-781e-4db8-a4bd-7b44e0f04bb6 + :customerscenario: False + :setup: Standalone instance + :steps: + 1. Enable passwordMustChange + 2. Set passwordTPRMaxUse=5 + 3. Set passwordMaxFailure to a higher value to not disturb the test + 4. Bind with a wrong password less then passwordTPRMaxUse times and check INVALID_CREDENTIALS + 5. Bind with the valid password and check SRCH fail (ldap.UNWILLING_TO_PERFORM) + because of passwordMustChange + 6. check passwordTPRRetryCount reset to 0 + 7. Bindd with valid password and reset the password + 8. Check we can bind again and SRCH succeeds + 9. Reset password policy configuration + :expectedresults: + 1. Success + 2. Success + 3. Success + 4. Success + 5. Success + 6. Success + 7. Success + 8. Success + 9. Success + """ + + try_tpr_failure = 5 + # Set password policy config, passwordMaxFailure being higher than + # passwordTPRMaxUse so that TPR is enforced first + topology_st.standalone.config.replace('passwordMustChange', 'on') + topology_st.standalone.config.replace('passwordMaxFailure', str(try_tpr_failure + 20)) + topology_st.standalone.config.replace('passwordTPRMaxUse', str(try_tpr_failure)) + time.sleep(.5) + + # Reset user's password + our_user = UserAccount(topology_st.standalone, TEST_USER_DN) + our_user.replace('userpassword', PASSWORD) + # give time to update the pwp attributes in the entry + time.sleep(.5) + + # Do less than passwordTPRMaxUse failing bind + try_tpr_failure = try_tpr_failure - 2 + for i in range(try_tpr_failure): + # Bind as user with a wrong password + with pytest.raises(ldap.INVALID_CREDENTIALS): + our_user.rebind('wrong password') + time.sleep(.5) + + # Check that pwdReset is TRUE + topology_st.standalone.simple_bind_s(DN_DM, PASSWORD) + #assert our_user.get_attr_val_utf8('pwdReset') == 'TRUE' + + # Check that pwdTPRReset is TRUE + assert our_user.get_attr_val_utf8('pwdTPRReset') == 'TRUE' + assert our_user.get_attr_val_utf8('pwdTPRUseCount') == str(i+1) + log.info("%dth failing bind (INVALID_CREDENTIALS) => pwdTPRUseCount = %d" % (i+1, i+1)) + + + # Now the #failures has not reached passwordTPRMaxUse + # Check that pwdReset is TRUE + topology_st.standalone.simple_bind_s(DN_DM, PASSWORD) + assert our_user.get_attr_val_utf8('pwdReset') == 'TRUE' + + # Check that pwdTPRReset is TRUE + assert our_user.get_attr_val_utf8('pwdTPRReset') == 'TRUE' + assert our_user.get_attr_val_utf8('pwdTPRUseCount') == str(try_tpr_failure) + log.info("last failing bind (INVALID_CREDENTIALS) => pwdTPRUseCount = %d" % (try_tpr_failure)) + + # Bind as user with valid password + our_user.rebind(PASSWORD) + time.sleep(.5) + + # We can not do anything else that reset password + users = UserAccounts(topology_st.standalone, OU_PEOPLE, rdn=None) + with pytest.raises(ldap.UNWILLING_TO_PERFORM): + user = users.get(TEST_USER_NAME) + + # Check that pwdReset is TRUE + topology_st.standalone.simple_bind_s(DN_DM, PASSWORD) + assert our_user.get_attr_val_utf8('pwdReset') == 'TRUE' + + # Check that pwdTPRReset is FALSE + assert our_user.get_attr_val_utf8('pwdTPRReset') == 'TRUE' + assert our_user.get_attr_val_utf8('pwdTPRUseCount') == str(try_tpr_failure + 1) + + # Now reset the password and check we can do fully use the account + our_user.rebind(PASSWORD) + our_user.reset_password(TEST_USER_PWD) + # give time to update the pwp attributes in the entry + time.sleep(.5) + our_user.rebind(TEST_USER_PWD) + time.sleep(.5) + user = users.get(TEST_USER_NAME) + + + def fin(): + topology_st.standalone.restart() + # Reset password policy config + topology_st.standalone.simple_bind_s(DN_DM, PASSWORD) + topology_st.standalone.config.replace('passwordMustChange', 'off') + + # Reset user's password + our_user.replace('userpassword', TEST_USER_PWD) + + request.addfinalizer(fin) + +def test_global_tpr_maxuse_4(topology_st, test_user, request): + """Test global TPR policy : passwordTPRMaxUse + Test that a TPR attribute passwordTPRMaxUse + can be updated by DM but not the by user itself + + :id: ee698277-9c4e-4f58-8f57-158a6d966fe6 + :customerscenario: False + :setup: Standalone instance + :steps: + 1. Enable passwordMustChange + 2. Set passwordTPRMaxUse=5 + 3. Set passwordMaxFailure to a higher value to not disturb the test + 4. Create a user without specific rights to update passwordTPRMaxUse + 5. Reset user password + 6. Do 3 failing (bad password) user authentication -> INVALID_CREDENTIALS + 7. Check that pwdTPRUseCount==3 + 8. Bind as user and reset its password + 9. Check that user can not update pwdTPRUseCount => INSUFFICIENT_ACCESS + 10. Check that DM can update pwdTPRUseCount + :expectedresults: + 1. Success + 2. Success + 3. Success + 4. Success + 5. Success + 6. INVALID_CREDENTIALS + 7. Success + 8. Success + 9. INSUFFICIENT_ACCESS + 10. Success + """ + + try_tpr_failure = 5 + USER_NO_ACI_NAME = 'user_no_aci' + USER_NO_ACI_DN = 'uid={},{}'.format(USER_NO_ACI_NAME, OU_PEOPLE) + USER_NO_ACI_PWD = 'user_no_aci' + # Set password policy config, passwordMaxFailure being higher than + # passwordTPRMaxUse so that TPR is enforced first + topology_st.standalone.config.replace('passwordMustChange', 'on') + topology_st.standalone.config.replace('passwordMaxFailure', str(try_tpr_failure + 20)) + topology_st.standalone.config.replace('passwordTPRMaxUse', str(try_tpr_failure)) + time.sleep(.5) + + # create user account (without aci granting write rights) + users = UserAccounts(topology_st.standalone, OU_PEOPLE, rdn=None) + user_props = TEST_USER_PROPERTIES.copy() + user_props.update({'uid': USER_NO_ACI_NAME, 'userpassword': USER_NO_ACI_PWD}) + try: + user = users.create(properties=user_props) + except: + pass # debug only + + # Reset user's password + user.replace('userpassword', PASSWORD) + time.sleep(.5) + + # Do less than passwordTPRMaxUse failing bind + try_tpr_failure = try_tpr_failure - 2 + for i in range(try_tpr_failure): + # Bind as user with a wrong password + with pytest.raises(ldap.INVALID_CREDENTIALS): + user.rebind('wrong password') + time.sleep(.5) + + # Check that pwdReset is TRUE + topology_st.standalone.simple_bind_s(DN_DM, PASSWORD) + #assert user.get_attr_val_utf8('pwdReset') == 'TRUE' + + # Check that pwdTPRReset is TRUE + assert user.get_attr_val_utf8('pwdTPRReset') == 'TRUE' + assert user.get_attr_val_utf8('pwdTPRUseCount') == str(i+1) + log.info("%dth failing bind (INVALID_CREDENTIALS) => pwdTPRUseCount = %d" % (i+1, i+1)) + + + # Now the #failures has not reached passwordTPRMaxUse + # Check that pwdReset is TRUE + topology_st.standalone.simple_bind_s(DN_DM, PASSWORD) + assert user.get_attr_val_utf8('pwdReset') == 'TRUE' + + # Check that pwdTPRReset is TRUE + assert user.get_attr_val_utf8('pwdTPRReset') == 'TRUE' + assert user.get_attr_val_utf8('pwdTPRUseCount') == str(try_tpr_failure) + log.info("last failing bind (INVALID_CREDENTIALS) => pwdTPRUseCount = %d" % (try_tpr_failure)) + + # Bind as user with valid password, reset the password + # and do simple search + user.rebind(PASSWORD) + user.reset_password(USER_NO_ACI_PWD) + time.sleep(.5) + user.rebind(USER_NO_ACI_PWD) + assert user.get_attr_val_utf8('uid') + time.sleep(.5) + + # Fail to update pwdTPRUseCount being USER_NO_ACI + with pytest.raises(ldap.INSUFFICIENT_ACCESS): + user.replace('pwdTPRUseCount', '100') + assert user.get_attr_val_utf8('pwdTPRUseCount') != '100' + + # Succeeds to update pwdTPRUseCount being DM + topology_st.standalone.simple_bind_s(DN_DM, PASSWORD) + user.replace('pwdTPRUseCount', '100') + assert user.get_attr_val_utf8('pwdTPRUseCount') == '100' + + def fin(): + topology_st.standalone.restart() + # Reset password policy config + topology_st.standalone.simple_bind_s(DN_DM, PASSWORD) + topology_st.standalone.config.replace('passwordMustChange', 'off') + + # Reset user's password + user.delete() + + request.addfinalizer(fin) + +def test_local_tpr_maxuse_5(topology_st, test_user, request): + """Test TPR local policy overpass global one: passwordTPRMaxUse + Test that after passwordTPRMaxUse failures to bind + additional bind with valid password are failing with CONSTRAINT_VIOLATION + + :id: c3919707-d804-445a-8754-8385b1072c42 + :customerscenario: False + :setup: Standalone instance + :steps: + 1. Global password policy Enable passwordMustChange + 2. Global password policy Set passwordTPRMaxUse=5 + 3. Global password policy Set passwordMaxFailure to a higher value to not disturb the test + 4. Local password policy Enable passwordMustChange + 5. Local password policy Set passwordTPRMaxUse=10 (higher than global) + 6. Bind with a wrong password 10 times and check INVALID_CREDENTIALS + 7. Check that passwordTPRUseCount got to the limit (5) + 8. Bind with a wrong password (CONSTRAINT_VIOLATION) + and check passwordTPRUseCount overpass the limit by 1 (11) + 9. Bind with a valid password 10 times and check CONSTRAINT_VIOLATION + and check passwordTPRUseCount increases + 10. Reset password policy configuration and remove local password from user + :expectedresults: + 1. Success + 2. Success + 3. Success + 4. Success + 5. Success + 6. Success + 7. Success + 8. Success + 9. Success + 10. Success + """ + + global_tpr_maxuse = 5 + # Set global password policy config, passwordMaxFailure being higher than + # passwordTPRMaxUse so that TPR is enforced first + topology_st.standalone.config.replace('passwordMustChange', 'on') + topology_st.standalone.config.replace('passwordMaxFailure', str(global_tpr_maxuse + 20)) + topology_st.standalone.config.replace('passwordTPRMaxUse', str(global_tpr_maxuse)) + time.sleep(.5) + + local_tpr_maxuse = global_tpr_maxuse + 5 + # Reset user's password with a local password policy + # that has passwordTPRMaxUse higher than global + #our_user = UserAccount(topology_st.standalone, TEST_USER_DN) + subprocess.call(['%s/dsconf' % topology_st.standalone.get_sbin_dir(), + '-D', + '%s' % DN_DM, + '-w', + '%s' % PASSWORD, + 'slapd-standalone1', + 'localpwp', + 'adduser', + test_user.dn]) + subprocess.call(['%s/dsconf' % topology_st.standalone.get_sbin_dir(), + '-D', + '%s' % DN_DM, + '-w', + '%s' % PASSWORD, + 'slapd-standalone1', + 'localpwp', + 'set', + '--pwptprmaxuse', + str(local_tpr_maxuse), + '--pwdmustchange', + 'on', + test_user.dn]) + test_user.replace('userpassword', PASSWORD) + time.sleep(.5) + + # look up to passwordTPRMaxUse with failing + # bind to check that the limits of TPR are enforced + for i in range(local_tpr_maxuse): + # Bind as user with a wrong password + with pytest.raises(ldap.INVALID_CREDENTIALS): + test_user.rebind('wrong password') + time.sleep(.5) + + # Check that pwdReset is TRUE + topology_st.standalone.simple_bind_s(DN_DM, PASSWORD) + #assert test_user.get_attr_val_utf8('pwdReset') == 'TRUE' + + # Check that pwdTPRReset is TRUE + assert test_user.get_attr_val_utf8('pwdTPRReset') == 'TRUE' + assert test_user.get_attr_val_utf8('pwdTPRUseCount') == str(i+1) + log.info("%dth failing bind (INVALID_CREDENTIALS) => pwdTPRUseCount = %d" % (i+1, i+1)) + + + # Now the #failures reached passwordTPRMaxUse + # Check that pwdReset is TRUE + topology_st.standalone.simple_bind_s(DN_DM, PASSWORD) + + # Check that pwdTPRReset is TRUE + assert test_user.get_attr_val_utf8('pwdTPRReset') == 'TRUE' + assert test_user.get_attr_val_utf8('pwdTPRUseCount') == str(local_tpr_maxuse) + log.info("last failing bind (INVALID_CREDENTIALS) => pwdTPRUseCount = %d" % (local_tpr_maxuse)) + + # Bind as user with wrong password --> ldap.CONSTRAINT_VIOLATION + with pytest.raises(ldap.CONSTRAINT_VIOLATION): + test_user.rebind("wrong password") + time.sleep(.5) + + # Check that pwdReset is TRUE + topology_st.standalone.simple_bind_s(DN_DM, PASSWORD) + + # Check that pwdTPRReset is TRUE + assert test_user.get_attr_val_utf8('pwdTPRReset') == 'TRUE' + assert test_user.get_attr_val_utf8('pwdTPRUseCount') == str(local_tpr_maxuse + 1) + log.info("failing bind (CONSTRAINT_VIOLATION) => pwdTPRUseCount = %d" % (local_tpr_maxuse + i)) + + # Now check that all next attempts with correct password are all in LDAP_CONSTRAINT_VIOLATION + # and passwordTPRRetryCount remains unchanged + # account is now similar to locked + for i in range(10): + # Bind as user with valid password + with pytest.raises(ldap.CONSTRAINT_VIOLATION): + test_user.rebind(PASSWORD) + time.sleep(.5) + + # Check that pwdReset is TRUE + topology_st.standalone.simple_bind_s(DN_DM, PASSWORD) + + # Check that pwdTPRReset is TRUE + # pwdTPRUseCount keeps increasing + assert test_user.get_attr_val_utf8('pwdTPRReset') == 'TRUE' + assert test_user.get_attr_val_utf8('pwdTPRUseCount') == str(local_tpr_maxuse + i + 2) + log.info("Rejected bind (CONSTRAINT_VIOLATION) => pwdTPRUseCount = %d" % (local_tpr_maxuse + i + 2)) + + + def fin(): + topology_st.standalone.restart() + # Reset password policy config + topology_st.standalone.simple_bind_s(DN_DM, PASSWORD) + topology_st.standalone.config.replace('passwordMustChange', 'off') + + # Remove local password policy from that entry + subprocess.call(['%s/dsconf' % topology_st.standalone.get_sbin_dir(), + '-D', + '%s' % DN_DM, + '-w', + '%s' % PASSWORD, + 'slapd-standalone1', + 'localpwp', + 'remove', + test_user.dn]) + + # Reset user's password + test_user.replace('userpassword', TEST_USER_PWD) + + + request.addfinalizer(fin) + +def test_global_tpr_delayValidFrom_1(topology_st, test_user, request): + """Test global TPR policy : passwordTPRDelayValidFrom + Test that a TPR password is not valid before reset time + + passwordTPRDelayValidFrom + + :id: 8420a348-e765-43ec-82c7-7f75cb4bf913 + :customerscenario: False + :setup: Standalone instance + :steps: + 1. Enable passwordMustChange + 2. Set passwordTPRDelayValidFrom=10s + 3. Create a account user + 5. Reset the password + 6. Check that Validity is not reached yet + pwdTPRValidFrom >= now + passwordTPRDelayValidFrom - 2 (safety) + 7. Bind with valid password, Fails because of CONSTRAINT_VIOLATION + :expectedresults: + 1. Success + 2. Success + 3. Success + 4. Success + 5. Success + 6. Success + 7. Success + """ + + ValidFrom = 10 + # Set password policy config, passwordMaxFailure being higher than + # passwordTPRMaxUse so that TPR is enforced first + topology_st.standalone.simple_bind_s(DN_DM, PASSWORD) + topology_st.standalone.config.replace('passwordMustChange', 'on') + topology_st.standalone.config.replace('passwordTPRDelayValidFrom', str(ValidFrom)) + time.sleep(.5) + + # Reset user's password + our_user = UserAccount(topology_st.standalone, TEST_USER_DN) + our_user.replace('userpassword', PASSWORD) + # give time to update the pwp attributes in the entry + time.sleep(.5) + + # Check that pwdReset is TRUE + topology_st.standalone.simple_bind_s(DN_DM, PASSWORD) + assert our_user.get_attr_val_utf8('pwdReset') == 'TRUE' + + # Check that pwdTPRReset is TRUE + assert our_user.get_attr_val_utf8('pwdTPRReset') == 'TRUE' + now = time.mktime(time.gmtime()) + log.info("compare pwdTPRValidFrom (%s) vs now (%s)" % (our_user.get_attr_val_utf8('pwdTPRValidFrom'), time.gmtime())) + assert (gentime_to_posix_time(our_user.get_attr_val_utf8('pwdTPRValidFrom'))) >= (now + ValidFrom - 2) + + # Bind as user with valid password + # But too early compare to ValidFrom + with pytest.raises(ldap.CONSTRAINT_VIOLATION): + our_user.rebind(PASSWORD) + + def fin(): + topology_st.standalone.restart() + # Reset password policy config + topology_st.standalone.simple_bind_s(DN_DM, PASSWORD) + topology_st.standalone.config.replace('passwordMustChange', 'off') + + # Reset user's password + our_user.replace('userpassword', TEST_USER_PWD) + + request.addfinalizer(fin) + +def test_global_tpr_delayValidFrom_2(topology_st, test_user, request): + """Test global TPR policy : passwordTPRDelayValidFrom + Test that a TPR password is valid after reset time + + passwordTPRDelayValidFrom + + :id: 8fa9f6f7-9be2-47c0-bf92-d9fe78ddbc34 + :customerscenario: False + :setup: Standalone instance + :steps: + 1. Enable passwordMustChange + 2. Set passwordTPRDelayValidFrom=6s + 3. Create a account user + 5. Reset the password + 6. Wait for passwordTPRDelayValidFrom=6s + 7. Bind with valid password, reset password + to allow further searches + 8. Check bound user can search attribute ('uid') + :expectedresults: + 1. Success + 2. Success + 3. Success + 4. Success + 5. Success + 6. Success + 7. Success + 8. Success + """ + + ValidFrom = 6 + # Set password policy config, passwordMaxFailure being higher than + # passwordTPRMaxUse so that TPR is enforced first + topology_st.standalone.simple_bind_s(DN_DM, PASSWORD) + topology_st.standalone.config.replace('passwordMustChange', 'on') + topology_st.standalone.config.replace('passwordTPRDelayValidFrom', str(ValidFrom)) + time.sleep(.5) + + # Reset user's password + our_user = UserAccount(topology_st.standalone, TEST_USER_DN) + our_user.replace('userpassword', PASSWORD) + # give time to update the pwp attributes in the entry + time.sleep(.5) + + # Check that pwdReset is TRUE + topology_st.standalone.simple_bind_s(DN_DM, PASSWORD) + assert our_user.get_attr_val_utf8('pwdReset') == 'TRUE' + + # Check that pwdTPRReset is TRUE + assert our_user.get_attr_val_utf8('pwdTPRReset') == 'TRUE' + now = time.mktime(time.gmtime()) + log.info("compare pwdTPRValidFrom (%s) vs now (%s)" % (our_user.get_attr_val_utf8('pwdTPRValidFrom'), time.gmtime())) + assert (gentime_to_posix_time(our_user.get_attr_val_utf8('pwdTPRValidFrom'))) >= (now + ValidFrom - 2) + + # wait for pwdTPRValidFrom + time.sleep(ValidFrom + 1) + + # Bind as user with valid password, reset the password + # and do simple search + our_user.rebind(PASSWORD) + our_user.reset_password(TEST_USER_PWD) + our_user.rebind(TEST_USER_PWD) + assert our_user.get_attr_val_utf8('uid') + + def fin(): + topology_st.standalone.restart() + # Reset password policy config + topology_st.standalone.simple_bind_s(DN_DM, PASSWORD) + topology_st.standalone.config.replace('passwordMustChange', 'off') + + # Reset user's password + our_user.replace('userpassword', TEST_USER_PWD) + + request.addfinalizer(fin) + +def test_global_tpr_delayValidFrom_3(topology_st, test_user, request): + """Test global TPR policy : passwordTPRDelayValidFrom + Test that a TPR attribute passwordTPRDelayValidFrom + can be updated by DM but not the by user itself + + :id: c599aea2-bbad-4158-b32e-307e5c6fca2d + :customerscenario: False + :setup: Standalone instance + :steps: + 1. Enable passwordMustChange + 2. Set passwordTPRDelayValidFrom=6s + 3. Create a account user + 5. Reset the password + 6. Check pwdReset/pwdTPRReset/pwdTPRValidFrom + 7. wait for 6s to let the new TPR password being valid + 8. Bind with valid password, reset password + to allow further searches + 9. Check bound user can search attribute ('uid') + 10. Bound as user, check user has not the rights to + modify pwdTPRValidFrom + 11. Bound as DM, check user has the right to + modify pwdTPRValidFrom + + :expectedresults: + 1. Success + 2. Success + 3. Success + 4. Success + 5. Success + 6. Success + 7. Success + 8. Success + 9. Success + 10. ldap.INSUFFICIENT_ACCESS + 11. Success + """ + + ValidFrom = 6 + USER_NO_ACI_NAME = 'user_no_aci' + USER_NO_ACI_DN = 'uid={},{}'.format(USER_NO_ACI_NAME, OU_PEOPLE) + USER_NO_ACI_PWD = 'user_no_aci' + # Set password policy config + topology_st.standalone.simple_bind_s(DN_DM, PASSWORD) + topology_st.standalone.config.replace('passwordMustChange', 'on') + topology_st.standalone.config.replace('passwordTPRDelayValidFrom', str(ValidFrom)) + time.sleep(.5) + + # create user account (without aci granting write rights) + users = UserAccounts(topology_st.standalone, OU_PEOPLE, rdn=None) + user_props = TEST_USER_PROPERTIES.copy() + user_props.update({'uid': USER_NO_ACI_NAME, 'userpassword': USER_NO_ACI_PWD}) + try: + user = users.create(properties=user_props) + except: + pass # debug only + + # Reset user's password + #our_user = UserAccount(topology_st.standalone, USER_NO_ACI_DN) + user.replace('userpassword', PASSWORD) + # give time to update the pwp attributes in the entry + time.sleep(.5) + + # Check that pwdReset is TRUE + topology_st.standalone.simple_bind_s(DN_DM, PASSWORD) + assert user.get_attr_val_utf8('pwdReset') == 'TRUE' + + # Check that pwdTPRReset is TRUE + assert user.get_attr_val_utf8('pwdTPRReset') == 'TRUE' + now = time.mktime(time.gmtime()) + log.info("compare pwdTPRValidFrom (%s) vs now (%s)" % (user.get_attr_val_utf8('pwdTPRValidFrom'), time.gmtime())) + assert (gentime_to_posix_time(user.get_attr_val_utf8('pwdTPRValidFrom'))) >= (now + ValidFrom - 2) + + # wait for pwdTPRValidFrom + time.sleep(ValidFrom + 1) + + # Bind as user with valid password, reset the password + # and do simple search + user.rebind(PASSWORD) + user.reset_password(USER_NO_ACI_PWD) + user.rebind(USER_NO_ACI_PWD) + assert user.get_attr_val_utf8('uid') + + # Fail to update pwdTPRValidFrom being USER_NO_ACI + with pytest.raises(ldap.INSUFFICIENT_ACCESS): + user.replace('pwdTPRValidFrom', '1234567890Z') + assert user.get_attr_val_utf8('pwdTPRValidFrom') != '1234567890Z' + + # Succeeds to update pwdTPRValidFrom being DM + topology_st.standalone.simple_bind_s(DN_DM, PASSWORD) + user.replace('pwdTPRValidFrom', '1234567890Z') + assert user.get_attr_val_utf8('pwdTPRValidFrom') == '1234567890Z' + + def fin(): + topology_st.standalone.restart() + # Reset password policy config + topology_st.standalone.simple_bind_s(DN_DM, PASSWORD) + topology_st.standalone.config.replace('passwordMustChange', 'off') + + # delete the no aci entry + user.delete() + + request.addfinalizer(fin) + +def test_global_tpr_delayExpireAt_1(topology_st, test_user, request): + """Test global TPR policy : passwordTPRDelayExpireAt + Test that a TPR password is not valid after reset time + + passwordTPRDelayExpireAt + + :id: b98def32-4e30-49fd-893b-8f959ba72b98 + :customerscenario: False + :setup: Standalone instance + :steps: + 1. Enable passwordMustChange + 2. Set passwordTPRDelayExpireAt=6s + 3. Create a account user + 5. Reset the password + 6. Wait for passwordTPRDelayExpireAt=6s + 2s (safety) + 7. Bind with valid password should fail with ldap.CONSTRAINT_VIOLATION + :expectedresults: + 1. Success + 2. Success + 3. Success + 4. Success + 5. Success + 6. Success + 7. Success + """ + + ExpireAt = 6 + # Set password policy config, passwordMaxFailure being higher than + # passwordTPRMaxUse so that TPR is enforced first + topology_st.standalone.simple_bind_s(DN_DM, PASSWORD) + topology_st.standalone.config.replace('passwordMustChange', 'on') + topology_st.standalone.config.replace('passwordTPRMaxUse', str(-1)) + topology_st.standalone.config.replace('passwordTPRDelayValidFrom', str(-1)) + topology_st.standalone.config.replace('passwordTPRDelayExpireAt', str(ExpireAt)) + time.sleep(.5) + + # Reset user's password + our_user = UserAccount(topology_st.standalone, TEST_USER_DN) + our_user.replace('userpassword', PASSWORD) + # give time to update the pwp attributes in the entry + time.sleep(.5) + + # Check that pwdReset is TRUE + topology_st.standalone.simple_bind_s(DN_DM, PASSWORD) + assert our_user.get_attr_val_utf8('pwdReset') == 'TRUE' + + # Check that pwdTPRReset is TRUE + assert our_user.get_attr_val_utf8('pwdTPRReset') == 'TRUE' + now = time.mktime(time.gmtime()) + log.info("compare pwdTPRExpireAt (%s) vs now (%s)" % (our_user.get_attr_val_utf8('pwdTPRExpireAt'), time.gmtime())) + assert (gentime_to_posix_time(our_user.get_attr_val_utf8('pwdTPRExpireAt'))) >= (now + ExpireAt - 2) + + # wait for pwdTPRExpireAt + time.sleep(ExpireAt + 2) + + # Bind as user with valid password but too late + # for pwdTPRExpireAt + # and do simple search + with pytest.raises(ldap.CONSTRAINT_VIOLATION): + our_user.rebind(PASSWORD) + + def fin(): + topology_st.standalone.restart() + # Reset password policy config + topology_st.standalone.simple_bind_s(DN_DM, PASSWORD) + topology_st.standalone.config.replace('passwordMustChange', 'off') + + # Reset user's password + our_user.replace('userpassword', TEST_USER_PWD) + + request.addfinalizer(fin) + +def test_global_tpr_delayExpireAt_2(topology_st, test_user, request): + """Test global TPR policy : passwordTPRDelayExpireAt + Test that a TPR password is valid before reset time + + passwordTPRDelayExpireAt + + :id: 9df320de-ebf6-4ed0-a619-51b1a05a560c + :customerscenario: False + :setup: Standalone instance + :steps: + 1. Enable passwordMustChange + 2. Set passwordTPRDelayExpireAt=6s + 3. Create a account user + 5. Reset the password + 6. Wait for 1s + 7. Bind with valid password should succeeds + :expectedresults: + 1. Success + 2. Success + 3. Success + 4. Success + 5. Success + 6. Success + 7. Success + """ + + ExpireAt = 6 + # Set password policy config, passwordMaxFailure being higher than + # passwordTPRMaxUse so that TPR is enforced first + topology_st.standalone.simple_bind_s(DN_DM, PASSWORD) + topology_st.standalone.config.replace('passwordMustChange', 'on') + topology_st.standalone.config.replace('passwordTPRMaxUse', str(-1)) + topology_st.standalone.config.replace('passwordTPRDelayValidFrom', str(-1)) + topology_st.standalone.config.replace('passwordTPRDelayExpireAt', str(ExpireAt)) + time.sleep(.5) + + # Reset user's password + our_user = UserAccount(topology_st.standalone, TEST_USER_DN) + our_user.replace('userpassword', PASSWORD) + # give time to update the pwp attributes in the entry + time.sleep(.5) + + # Check that pwdReset is TRUE + topology_st.standalone.simple_bind_s(DN_DM, PASSWORD) + assert our_user.get_attr_val_utf8('pwdReset') == 'TRUE' + + # Check that pwdTPRReset is TRUE + assert our_user.get_attr_val_utf8('pwdTPRReset') == 'TRUE' + now = time.mktime(time.gmtime()) + log.info("compare pwdTPRExpireAt (%s) vs now (%s)" % (our_user.get_attr_val_utf8('pwdTPRExpireAt'), time.gmtime())) + assert (gentime_to_posix_time(our_user.get_attr_val_utf8('pwdTPRExpireAt'))) >= (now + ExpireAt - 2) + + # wait for 1s + time.sleep(1) + + # Bind as user with valid password, reset the password + # and do simple search + our_user.rebind(PASSWORD) + our_user.reset_password(TEST_USER_PWD) + time.sleep(.5) + our_user.rebind(TEST_USER_PWD) + assert our_user.get_attr_val_utf8('uid') + + def fin(): + topology_st.standalone.restart() + # Reset password policy config + topology_st.standalone.simple_bind_s(DN_DM, PASSWORD) + topology_st.standalone.config.replace('passwordMustChange', 'off') + + # Reset user's password + our_user.replace('userpassword', TEST_USER_PWD) + + request.addfinalizer(fin) + +def test_global_tpr_delayExpireAt_3(topology_st, test_user, request): + """Test global TPR policy : passwordTPRDelayExpireAt + Test that a TPR attribute passwordTPRDelayExpireAt + can be updated by DM but not the by user itself + + :id: 22bb5dd8-d8f6-4484-988e-6de0ef704391 + :customerscenario: False + :setup: Standalone instance + :steps: + 1. Enable passwordMustChange + 2. Set passwordTPRDelayExpireAt=6s + 3. Create a account user + 5. Reset the password + 6. Check pwdReset/pwdTPRReset/pwdTPRValidFrom + 7. wait for 1s so that TPR has not expired + 8. Bind with valid password, reset password + to allow further searches + 9. Check bound user can search attribute ('uid') + 10. Bound as user, check user has not the rights to + modify pwdTPRExpireAt + 11. Bound as DM, check user has the right to + modify pwdTPRExpireAt + + :expectedresults: + 1. Success + 2. Success + 3. Success + 4. Success + 5. Success + 6. Success + 7. Success + 8. Success + 9. Success + 10. ldap.INSUFFICIENT_ACCESS + 11. Success + """ + + ExpireAt = 6 + USER_NO_ACI_NAME = 'user_no_aci' + USER_NO_ACI_DN = 'uid={},{}'.format(USER_NO_ACI_NAME, OU_PEOPLE) + USER_NO_ACI_PWD = 'user_no_aci' + # Set password policy config + topology_st.standalone.simple_bind_s(DN_DM, PASSWORD) + topology_st.standalone.config.replace('passwordMustChange', 'on') + topology_st.standalone.config.replace('passwordTPRDelayValidFrom', str(-1)) + topology_st.standalone.config.replace('passwordTPRDelayExpireAt', str(ExpireAt)) + topology_st.standalone.config.replace('passwordTPRDelayValidFrom', str(-1)) + time.sleep(.5) + + # create user account (without aci granting write rights) + users = UserAccounts(topology_st.standalone, OU_PEOPLE, rdn=None) + user_props = TEST_USER_PROPERTIES.copy() + user_props.update({'uid': USER_NO_ACI_NAME, 'userpassword': USER_NO_ACI_PWD}) + try: + user = users.create(properties=user_props) + except: + pass # debug only + + # Reset user's password + user.replace('userpassword', PASSWORD) + # give time to update the pwp attributes in the entry + time.sleep(.5) + + # Check that pwdReset is TRUE + topology_st.standalone.simple_bind_s(DN_DM, PASSWORD) + assert user.get_attr_val_utf8('pwdReset') == 'TRUE' + + # Check that pwdTPRReset is TRUE + assert user.get_attr_val_utf8('pwdTPRReset') == 'TRUE' + now = time.mktime(time.gmtime()) + log.info("compare pwdTPRExpireAt (%s) vs now (%s)" % (user.get_attr_val_utf8('pwdTPRExpireAt'), time.gmtime())) + assert (gentime_to_posix_time(user.get_attr_val_utf8('pwdTPRExpireAt'))) >= (now + ExpireAt - 2) + + # wait for 1s + time.sleep(1) + + # Bind as user with valid password, reset the password + # and do simple search + user.rebind(PASSWORD) + user.reset_password(USER_NO_ACI_PWD) + time.sleep(.5) + user.rebind(USER_NO_ACI_PWD) + assert user.get_attr_val_utf8('uid') + time.sleep(.5) + + # Fail to update pwdTPRExpireAt being USER_NO_ACI + with pytest.raises(ldap.INSUFFICIENT_ACCESS): + user.replace('pwdTPRExpireAt', '1234567890Z') + assert user.get_attr_val_utf8('pwdTPRExpireAt') != '1234567890Z' + + # Succeeds to update pwdTPRExpireAt being DM + topology_st.standalone.simple_bind_s(DN_DM, PASSWORD) + user.replace('pwdTPRExpireAt', '1234567890Z') + assert user.get_attr_val_utf8('pwdTPRExpireAt') == '1234567890Z' + + def fin(): + topology_st.standalone.restart() + # Reset password policy config + topology_st.standalone.simple_bind_s(DN_DM, PASSWORD) + topology_st.standalone.config.replace('passwordMustChange', 'off') + + # delete the no aci entry + user.delete() + + request.addfinalizer(fin) + +if __name__ == '__main__': + # Run isolated + # -s for DEBUG mode + CURRENT_FILE = os.path.realpath(__file__) + pytest.main("-s %s" % CURRENT_FILE) diff --git a/dirsrvtests/tests/suites/password/pwdPolicy_token_test.py b/dirsrvtests/tests/suites/password/pwdPolicy_token_test.py new file mode 100644 index 0000000..ae4eb30 --- /dev/null +++ b/dirsrvtests/tests/suites/password/pwdPolicy_token_test.py @@ -0,0 +1,91 @@ +# --- BEGIN COPYRIGHT BLOCK --- +# Copyright (C) 2022 Red Hat, Inc. +# All rights reserved. +# +# License: GPL (version 3 or any later version). +# See LICENSE for details. +# --- END COPYRIGHT BLOCK --- +# +import logging +import pytest +import os +import time +import ldap +from lib389._constants import * +from lib389.idm.user import UserAccounts +from lib389.idm.organizationalunit import OrganizationalUnits +from lib389.topologies import topology_st as topo + +pytestmark = pytest.mark.tier1 + +DEBUGGING = os.getenv("DEBUGGING", default=False) +if DEBUGGING: + logging.getLogger(__name__).setLevel(logging.DEBUG) +else: + logging.getLogger(__name__).setLevel(logging.INFO) +log = logging.getLogger(__name__) + +USER_DN = 'uid=Test_user1,ou=People,dc=example,dc=com' +USER_ACI = '(targetattr="userpassword")(version 3.0; acl "pwp test"; allow (all) userdn="ldap:///self";)' +TOKEN = 'test_user1' + +user_properties = { + 'uid': 'Test_user1', + 'cn': 'test_user1', + 'sn': 'test_user1', + 'uidNumber': '1001', + 'gidNumber': '2001', + 'userpassword': PASSWORD, + 'description': 'userdesc', + 'homeDirectory': '/home/{}'.format('test_user')} + + +def pwd_setup(topo): + ous = OrganizationalUnits(topo.standalone, DEFAULT_SUFFIX) + ou = ous.get('people') + ou.add('aci', USER_ACI) + + topo.standalone.config.replace_many(('passwordCheckSyntax', 'on'), + ('passwordMinLength', '4'), + ('passwordMinCategories', '1')) + users = UserAccounts(topo.standalone, DEFAULT_SUFFIX) + return users.create(properties=user_properties) + + +def test_token_lengths(topo): + """Test that password token length is enforced for various lengths including + the same length as the attribute being checked by the policy. + + :id: dae9d916-2a03-4707-b454-9e901d295b13 + :setup: Standalone instance + :steps: + 1. Test token length rejects password of the same length as rdn value + :expectedresults: + 1. Passwords are rejected + """ + user = pwd_setup(topo) + for length in ['4', '6', '10']: + topo.standalone.simple_bind_s(DN_DM, PASSWORD) + topo.standalone.config.set('passwordMinTokenLength', length) + topo.standalone.simple_bind_s(USER_DN, PASSWORD) + time.sleep(1) + + try: + passwd = TOKEN[:int(length)] + log.info("Testing password len {} token ({})".format(length, passwd)) + user.replace('userpassword', passwd) + log.fatal('Password incorrectly allowed!') + assert False + except ldap.CONSTRAINT_VIOLATION as e: + log.info('Password correctly rejected: ' + str(e)) + except ldap.LDAPError as e: + log.fatal('Unexpected failure ' + str(e)) + assert False + + +if __name__ == '__main__': + # Run isolated + # -s for DEBUG mode + CURRENT_FILE = os.path.realpath(__file__) + pytest.main("-s %s" % CURRENT_FILE) + diff --git a/dirsrvtests/tests/suites/password/pwdPolicy_warning_test.py b/dirsrvtests/tests/suites/password/pwdPolicy_warning_test.py new file mode 100644 index 0000000..9efc4eb --- /dev/null +++ b/dirsrvtests/tests/suites/password/pwdPolicy_warning_test.py @@ -0,0 +1,600 @@ +# --- BEGIN COPYRIGHT BLOCK --- +# Copyright (C) 2020 Red Hat, Inc. +# All rights reserved. +# +# License: GPL (version 3 or any later version). +# See LICENSE for details. +# --- END COPYRIGHT BLOCK --- +# +import pytest +import subprocess +from ldap.controls.ppolicy import PasswordPolicyControl +from lib389.tasks import * +from lib389.utils import * +from lib389.topologies import topology_st +from lib389.idm.user import UserAccounts +from lib389.idm.organizationalunit import OrganizationalUnits +from lib389._constants import (DEFAULT_SUFFIX, DN_CONFIG, PASSWORD, DN_DM) +from dateutil.parser import parse as dt_parse +from lib389.config import Config +import datetime + +pytestmark = pytest.mark.tier1 + +CONFIG_ATTR = 'passwordSendExpiringTime' +USER_DN = 'uid=tuser,ou=people,{}'.format(DEFAULT_SUFFIX) +USER_RDN = 'tuser' +USER_PASSWD = 'secret123' +USER_ACI = '(targetattr="userpassword")(version 3.0; acl "pwp test"; allow (all) userdn="ldap:///self";)' + +logging.getLogger(__name__).setLevel(logging.INFO) +log = logging.getLogger(__name__) + + +@pytest.fixture +def global_policy(topology_st, request): + """Sets the required global + password policy attributes under + cn=config entry + """ + + attrs = {'passwordExp': '', + 'passwordMaxAge': '', + 'passwordWarning': '', + CONFIG_ATTR: ''} + + log.info('Get the default values') + entry = topology_st.standalone.getEntry(DN_CONFIG, ldap.SCOPE_BASE, + '(objectClass=*)', attrs.keys()) + + for key in attrs.keys(): + attrs[key] = entry.getValue(key) + + log.info('Set the new values') + topology_st.standalone.config.replace_many(('passwordExp', 'on'), + ('passwordMaxAge', '172800'), + ('passwordWarning', '86400'), + (CONFIG_ATTR, 'on')) + + def fin(): + """Resets the defaults""" + + log.info('Reset the defaults') + topology_st.standalone.simple_bind_s(DN_DM, PASSWORD) + for key in attrs.keys(): + topology_st.standalone.config.replace(key, attrs[key]) + + request.addfinalizer(fin) + # A short sleep is required after the modifying password policy or cn=config + time.sleep(0.5) + + +@pytest.fixture +def global_policy_default(topology_st, request): + """Sets the required global password policy + attributes for testing the default behavior + of password expiry warning time + """ + + attrs = {'passwordExp': '', + 'passwordMaxAge': '', + 'passwordWarning': '', + CONFIG_ATTR: ''} + + log.info('Get the default values') + entry = topology_st.standalone.getEntry(DN_CONFIG, ldap.SCOPE_BASE, + '(objectClass=*)', attrs.keys()) + for key in attrs.keys(): + attrs[key] = entry.getValue(key) + + log.info('Set the new values') + topology_st.standalone.config.replace_many( + ('passwordExp', 'on'), + ('passwordMaxAge', '8640000'), + ('passwordWarning', '86400'), + (CONFIG_ATTR, 'off')) + + def fin(): + """Resets the defaults""" + log.info('Reset the defaults') + topology_st.standalone.simple_bind_s(DN_DM, PASSWORD) + for key in attrs.keys(): + topology_st.standalone.config.replace(key, attrs[key]) + + request.addfinalizer(fin) + # A short sleep is required after modifying password policy or cn=config + time.sleep(0.5) + + +@pytest.fixture +def add_user(topology_st, request): + """Adds a user for binding""" + + log.info('Add the user') + + users = UserAccounts(topology_st.standalone, DEFAULT_SUFFIX) + user = users.create(properties={ + 'uid': USER_RDN, + 'cn': USER_RDN, + 'sn': USER_RDN, + 'uidNumber': '3000', + 'gidNumber': '4000', + 'homeDirectory': '/home/user', + 'description': 'd_e_s_c', + 'userPassword': USER_PASSWD + }) + + def fin(): + """Removes the user entry""" + + log.info('Remove the user entry') + topology_st.standalone.simple_bind_s(DN_DM, PASSWORD) + user.delete() + + request.addfinalizer(fin) + + +@pytest.fixture +def local_policy(topology_st, add_user): + """Sets fine grained policy for user entry""" + + log.info("Setting fine grained policy for user ({})".format(USER_DN)) + + subprocess.call(['%s/dsconf' % topology_st.standalone.get_sbin_dir(), + 'slapd-standalone1', + 'localpwp', + 'adduser', + USER_DN]) + # A short sleep is required after modifying password policy + time.sleep(0.5) + + +def get_password_warning(topology_st): + """Gets the password expiry warning time for the user""" + + res_type = res_data = res_msgid = res_ctrls = None + result_id = '' + + log.info('Bind with the user and request the password expiry warning time') + + result_id = topology_st.standalone.simple_bind(USER_DN, USER_PASSWD, + serverctrls=[PasswordPolicyControl()]) + res_type, res_data, res_msgid, res_ctrls = \ + topology_st.standalone.result3(result_id) + # Return the control + return res_ctrls + + +def set_conf_attr(topology_st, attr, val): + """Sets the value of a given attribute under cn=config""" + + log.info("Setting {} to {}".format(attr, val)) + topology_st.standalone.config.set(attr, val) + # A short sleep is required after modifying cn=config + time.sleep(0.5) + + +def get_conf_attr(topology_st, attr): + """Gets the value of a given attribute under cn=config entry + """ + return topology_st.standalone.config.get_attr_val_utf8(attr) + + +@pytest.mark.parametrize("value", (' ', 'junk123', 'on', 'off')) +def test_different_values(topology_st, value): + """Try to set passwordSendExpiringTime attribute + to various values both valid and invalid + + :id: 3e6d79fb-b4c8-4860-897e-5b207815a75d + :parametrized: yes + :setup: Standalone instance + :steps: + 1. Try to set passwordSendExpiringTime to 'on' and 'off' + under cn=config entry + 2. Try to set passwordSendExpiringTime to ' ' and 'junk123' + under cn=config entry + 3. Run the search command to check the + value of passwordSendExpiringTime attribute + :expectedresults: + 1. Valid values should be accepted and saved + 2. Should be rejected with an OPERATIONS_ERROR + 3. The attribute should be changed for valid values + and unchanged for invalid + """ + + log.info('Get the default value') + defval = get_conf_attr(topology_st, CONFIG_ATTR) + + if value not in ('on', 'off'): + log.info('An invalid value is being tested') + with pytest.raises(ldap.OPERATIONS_ERROR): + set_conf_attr(topology_st, CONFIG_ATTR, value) + + log.info('Now check the value is unchanged') + assert get_conf_attr(topology_st, CONFIG_ATTR) == defval + + log.info("Invalid value {} was rejected correctly".format(value)) + else: + log.info('A valid value is being tested') + set_conf_attr(topology_st, CONFIG_ATTR, value) + + log.info('Now check that the value has been changed') + assert str(get_conf_attr(topology_st, CONFIG_ATTR)) == value + + log.info("{} is now set to {}".format(CONFIG_ATTR, value)) + + log.info('Set passwordSendExpiringTime back to the default value') + set_conf_attr(topology_st, CONFIG_ATTR, defval) + + +def test_expiry_time(topology_st, global_policy, add_user): + """Test whether the password expiry warning + time for a user is returned appropriately + + :id: 7adfd395-9b25-4cc0-9b71-14710dc1a28c + :setup: Standalone instance, a user entry, + Global password policy configured as below: + passwordExp: on + passwordMaxAge: 172800 + passwordWarning: 86400 + passwordSendExpiringTime: on + :steps: + 1. Bind as the normal user + 2. Request password policy control for the user + 3. Bind as DM + :expectedresults: + 1. Bind should be successful + 2. The password expiry warning time for the user should be returned + 3. Bind should be successful + """ + + res_ctrls = None + + ous = OrganizationalUnits(topology_st.standalone, DEFAULT_SUFFIX) + ou = ous.get('people') + ou.add('aci', USER_ACI) + + log.info('Get the password expiry warning time') + log.info("Binding with ({}) and requesting the password expiry warning time" + .format(USER_DN)) + res_ctrls = get_password_warning(topology_st) + + log.info('Check whether the time is returned') + assert res_ctrls + + log.info("user's password will expire in {:d} seconds" + .format(res_ctrls[0].timeBeforeExpiration)) + + log.info("Rebinding as DM") + topology_st.standalone.simple_bind_s(DN_DM, PASSWORD) + + +@pytest.mark.parametrize("attr,val", [(CONFIG_ATTR, 'off'), + ('passwordWarning', '3600')]) +def test_password_warning(topology_st, global_policy, add_user, attr, val): + """Test password expiry warning time by setting passwordSendExpiringTime to off + and setting passwordWarning to a short value + + :id: 39f54b3c-8c80-43ca-856a-174d81c56ce8 + :parametrized: yes + :setup: Standalone instance, a test user, + Global password policy configured as below: + passwordExp: on + passwordMaxAge: 172800 + passwordWarning: 86400 + passwordSendExpiringTime: on + :steps: + 1. Set passwordSendExpiringTime attribute to off or + to on and passwordWarning to a small value (3600) + 2. Bind as the normal user + 3. Request the password expiry warning time + 4. Bind as DM + :expectedresults: + 1. passwordSendExpiringTime and passwordWarning are set successfully + 2. Bind should be successful + 3. Password expiry warning time should be returned for the small value + and should not be returned when passwordSendExpiringTime is off + 4. Bind should be successful + """ + + log.info('Set configuration parameter') + set_conf_attr(topology_st, attr, val) + + log.info("Binding with ({}) and requesting password expiry warning time" + .format(USER_DN)) + res_ctrls = get_password_warning(topology_st) + + log.info('Check the state of the control') + if not res_ctrls: + log.info("Password Expiry warning time is not returned as {} is set to {}" + .format(attr, val)) + else: + log.info("({}) password will expire in {:d} seconds" + .format(USER_DN, res_ctrls[0].timeBeforeExpiration)) + + log.info("Rebinding as DM") + topology_st.standalone.simple_bind_s(DN_DM, PASSWORD) + + +def test_with_different_password_states(topology_st, global_policy, add_user): + """Test the control with different password states + + :id: d297fb1a-661f-4d52-bb43-2a2a340b8b0e + :setup: Standalone instance, a user entry, + Global password policy configured as below: + passwordExp: on + passwordMaxAge: 172800 + passwordWarning: 86400 + passwordSendExpiringTime: on + :steps: + 1. Expire user's password by changing + passwordExpirationTime timestamp + 2. Try to bind to the server with the user entry + 3. Revert back user's passwordExpirationTime + 4. Try to bind with the user entry and request + the control + 5. Bind as DM + :expectedresults: + 1. Operation should be successful + 2. Operation should fail because of Invalid Credentials + 3. passwordExpirationTime is successfully changed + 4. Bind should be successful and the password expiry + warning time should be returned + 5. Bind should be successful + """ + + res_ctrls = None + + log.info("Expire user's password by changing passwordExpirationTime timestamp") + users = UserAccounts(topology_st.standalone, DEFAULT_SUFFIX) + user = users.get(USER_RDN) + old_ts = user.get_attr_val_utf8('passwordExpirationTime') + log.info("Old passwordExpirationTime: {}".format(old_ts)) + + new_ts = (dt_parse(old_ts) - datetime.timedelta(31)).strftime('%Y%m%d%H%M%SZ') + log.info("New passwordExpirationTime: {}".format(new_ts)) + user.replace('passwordExpirationTime', new_ts) + + log.info("Attempting to bind with user {} and retrive the password expiry warning time".format(USER_DN)) + with pytest.raises(ldap.INVALID_CREDENTIALS) as ex: + res_ctrls = get_password_warning(topology_st) + + log.info("Bind Failed, error: {}".format(str(ex))) + + log.info("Rebinding as DM") + topology_st.standalone.simple_bind_s(DN_DM, PASSWORD) + + log.info("Reverting back user's passwordExpirationTime") + + user.replace('passwordExpirationTime', old_ts) + + log.info("Rebinding with {} and retrieving the password expiry warning time".format(USER_DN)) + res_ctrls = get_password_warning(topology_st) + + log.info('Check that the control is returned') + assert res_ctrls + + log.info("user's password will expire in {:d} seconds" + .format(res_ctrls[0].timeBeforeExpiration)) + + log.info("Rebinding as DM") + topology_st.standalone.simple_bind_s(DN_DM, PASSWORD) + + +def test_default_behavior(topology_st, global_policy_default, add_user): + """Test the default behavior of password expiry warning time + + :id: c47fa824-ee08-4b78-885f-bca4c42bb655 + :setup: Standalone instance, a user entry, + Global password policy configured as below: + passwordExp: on + passwordMaxAge: 8640000 + passwordWarning: 86400 + passwordSendExpiringTime: off + :steps: + 1. Bind as the normal user + 2. Request the control for the user + 3. Bind as DM + :expectedresults: + 1. Bind should be successful + 2. No control should be returned + 3. Bind should be successful + """ + + res_ctrls = None + + log.info("Binding with {} and requesting the password expiry warning time" + .format(USER_DN)) + res_ctrls = get_password_warning(topology_st) + + log.info('Check that no control is returned') + assert not res_ctrls + + log.info("Rebinding as DM") + topology_st.standalone.simple_bind_s(DN_DM, PASSWORD) + + +def test_when_maxage_and_warning_are_the_same(topology_st, global_policy_default, add_user): + """Test the warning expiry when passwordMaxAge and + passwordWarning are set to the same value. + + :id: e57a1b1c-96fc-11e7-a91b-28d244694824 + :setup: Standalone instance, a user entry, + Global password policy configured as below: + passwordExp: on + passwordMaxAge: 86400 + passwordWarning: 86400 + passwordSendExpiringTime: off + :steps: + 1. Bind as the normal user + 2. Change user's password to reset its password expiration time + 3. Request the control for the user + 4. Bind as DM + :expectedresults: + 1. Bind should be successful + 2. Password should be changed and password's expiration time reset + 3. Password expiry warning time should be returned by the + server since passwordMaxAge and passwordWarning are set + to the same value + 4. Bind should be successful + """ + + log.info('Set the new values') + topology_st.standalone.config.set('passwordMaxAge', '86400') + res_ctrls = None + + log.info("First change user's password to reset its password expiration time") + users = UserAccounts(topology_st.standalone, DEFAULT_SUFFIX) + user = users.get(USER_RDN) + user.rebind(USER_PASSWD) + user.reset_password(USER_PASSWD) + + time.sleep(2) + log.info("Binding with {} and requesting the password expiry warning time" + .format(USER_DN)) + res_ctrls = get_password_warning(topology_st) + + log.info('Check that control is returned even' + 'if passwordSendExpiringTime is set to off') + assert res_ctrls + + log.info("user's password will expire in {:d} seconds".format(res_ctrls[0].timeBeforeExpiration)) + + log.info("Rebinding as DM") + topology_st.standalone.simple_bind_s(DN_DM, PASSWORD) + + +def test_with_local_policy(topology_st, global_policy, local_policy): + """Test the attribute with fine grained policy set for the user + + :id: ab7d9f86-8cfe-48c3-8baa-739e599f006a + :setup: Standalone instance, a user entry, + Global password policy configured as below: + passwordExp: on + passwordMaxAge: 172800 + passwordWarning: 86400 + passwordSendExpiringTime: on + Fine grained password policy for the user using: dsconf INST localpwp + :steps: + 1. Bind as the normal user + 2. Request the control for the user + 3. Bind as DM + :expectedresults: + 1. Bind should be successful + 2. Password expiry warning time should not be returned for the user + 3. Bind should be successful + """ + + res_ctrls = None + + log.info("Attempting to get password expiry warning time for user {}".format(USER_DN)) + res_ctrls = get_password_warning(topology_st) + + log.info('Check that the control is not returned') + assert not res_ctrls + + log.info("Password expiry warning time is not returned") + + log.info("Rebinding as DM") + topology_st.standalone.simple_bind_s(DN_DM, PASSWORD) + + +@pytest.mark.bz1589144 +@pytest.mark.ds50091 +def test_search_shadowWarning_when_passwordWarning_is_lower(topology_st, global_policy): + """Test if value shadowWarning is present with global password policy + when passwordWarning is set with lower value. + + :id: c1e82de6-1aa3-42c3-844a-9720172158a3 + :setup: Standalone Instance + :steps: + 1. Bind as Directory Manager + 2. Set global password policy + 3. Add test user to instance. + 4. Modify passwordWarning to have smaller value than 86400 + 5. Bind as the new user + 6. Search for shadowWarning attribute + 7. Rebind as Directory Manager + :expectedresults: + 1. Binding should be successful + 2. Setting password policy should be successful + 3. Adding test user should be successful + 4. Modifying passwordWarning should be successful + 5. Binding should be successful + 6. Attribute shadowWarning should be found + 7. Binding should be successful + """ + + users = UserAccounts(topology_st.standalone, DEFAULT_SUFFIX) + + log.info("Bind as %s" % DN_DM) + assert topology_st.standalone.simple_bind_s(DN_DM, PASSWORD) + + log.info("Creating test user") + testuser = users.create_test_user(1004) + testuser.add('objectclass', 'shadowAccount') + testuser.set('userPassword', USER_PASSWD) + + log.info("Setting passwordWarning to smaller value than 86400") + assert topology_st.standalone.config.set('passwordWarning', '86399') + + log.info("Bind as test user") + assert topology_st.standalone.simple_bind_s(testuser.dn, USER_PASSWD) + + log.info("Check if attribute shadowWarning is present") + topology_st.standalone.simple_bind_s(DN_DM, PASSWORD) + assert testuser.present('shadowWarning') + + +@pytest.mark.bug624080 +def test_password_expire_works(topology_st): + """Regression test for bug624080. If passwordMaxAge is set to a + value and a new user is added, if the passwordMaxAge is changed + to a shorter expiration time and the new users password + is then changed ..... the passwordExpirationTime for the + new user should be changed too. There was a bug in DS 6.2 + where the expirationtime remained unchanged. + + :id: 1ead6052-4636-11ea-b5af-8c16451d917b + :setup: Standalone + :steps: + 1. Set the Global password policy and a passwordMaxAge to 5 days + 2. Add the new user + 3. Check the users password expiration time now + 4. Decrease global passwordMaxAge to 2 days + 5. Modify the users password + 6. Modify the user one more time to make sur etime has been reset + 7. turn off the password policy + :expectedresults: + 1. Success + 2. Success + 3. Success + 4. Success + 5. Success + 6. Success + 7. Success + """ + config = Config(topology_st.standalone) + config.replace_many(('passwordMaxAge', '432000'), + ('passwordExp', 'on')) + user = UserAccounts(topology_st.standalone, DEFAULT_SUFFIX, rdn=None).create_test_user() + user.set('userPassword', 'anuj') + time.sleep(0.5) + expire_time = user.get_attr_val_utf8('passwordExpirationTime') + config.replace('passwordMaxAge', '172800') + user.set('userPassword', 'borah') + time.sleep(0.5) + expire_time2 = user.get_attr_val_utf8('passwordExpirationTime') + config.replace('passwordMaxAge', '604800') + user.set('userPassword', 'anujagaiin') + time.sleep(0.5) + expire_time3 = user.get_attr_val_utf8('passwordExpirationTime') + assert expire_time != expire_time2 != expire_time3 + config.replace('passwordExp', 'off') + + +if __name__ == '__main__': + # Run isolated + # -s for DEBUG mode + CURRENT_FILE = os.path.realpath(__file__) + pytest.main("-s %s" % CURRENT_FILE) diff --git a/dirsrvtests/tests/suites/password/pwd_algo_test.py b/dirsrvtests/tests/suites/password/pwd_algo_test.py new file mode 100644 index 0000000..2eb0ff3 --- /dev/null +++ b/dirsrvtests/tests/suites/password/pwd_algo_test.py @@ -0,0 +1,190 @@ +# --- BEGIN COPYRIGHT BLOCK --- +# Copyright (C) 2019 Red Hat, Inc. +# All rights reserved. +# +# License: GPL (version 3 or any later version). +# See LICENSE for details. +# --- END COPYRIGHT BLOCK --- +# +import pytest +from lib389.tasks import * +from lib389.utils import * +from lib389.topologies import topology_st +from lib389._constants import DEFAULT_SUFFIX, HOST_STANDALONE, PORT_STANDALONE +from lib389.idm.user import UserAccounts, TEST_USER_PROPERTIES +from lib389.paths import Paths + +default_paths = Paths() + +pytestmark = pytest.mark.tier1 + +DEBUGGING = os.getenv('DEBUGGING', False) +USER_DN = 'uid=user,ou=People,%s' % DEFAULT_SUFFIX + +logging.getLogger(__name__).setLevel(logging.INFO) +log = logging.getLogger(__name__) + + +def _test_bind(user, password): + result = True + try: + userconn = user.bind(password) + userconn.unbind_s() + except ldap.INVALID_CREDENTIALS: + result = False + return result + + +def _test_algo(inst, algo_name): + inst.config.set('passwordStorageScheme', algo_name) + + users = UserAccounts(inst, DEFAULT_SUFFIX) + user_props = TEST_USER_PROPERTIES.copy() + user_props.update({'uid': 'user', 'cn': 'buser', 'userpassword': 'Secret123'}) + user = users.create(properties=user_props) + + # Make sure when we read the userPassword field, it is the correct ALGO + pw_field = user.get_attr_val_utf8('userPassword') + + if algo_name != 'CLEAR' and algo_name != 'DEFAULT': + assert (algo_name[:5].lower() in pw_field.lower()) + # Now make sure a bind works + assert (_test_bind(user, 'Secret123')) + # Bind with a wrong shorter password, should fail + assert (not _test_bind(user, 'Wrong')) + # Bind with a wrong longer password, should fail + assert (not _test_bind(user, 'This is even more wrong')) + # Bind with a wrong exact length password. + assert (not _test_bind(user, 'Alsowrong')) + # Bind with a subset password, should fail + assert (not _test_bind(user, 'Secret')) + if not algo_name.startswith('CRYPT'): + # Bind with a subset password that is 1 char shorter, to detect off by 1 in clear + assert (not _test_bind(user, 'Secret12')) + # Bind with a superset password, should fail + assert (not _test_bind(user, 'Secret123456')) + + # Delete the user + user.delete() + + +def _test_bind_for_pbkdf2_algo(inst, password): + result = True + userconn = ldap.initialize("ldap://%s:%s" % (HOST_STANDALONE, PORT_STANDALONE)) + try: + userconn.simple_bind_s(USER_DN, password) + userconn.unbind_s() + except ldap.INVALID_CREDENTIALS: + result = False + return result + + +def _test_algo_for_pbkdf2(inst, algo_name): + inst.config.set('passwordStorageScheme', algo_name) + + if DEBUGGING: + print('Testing %s' % algo_name) + + # Create the user with a password + users = UserAccounts(inst, DEFAULT_SUFFIX) + user_props = TEST_USER_PROPERTIES.copy() + user_props.update({'uid': 'user', 'cn': 'buser', 'userpassword': 'Secret123'}) + user = users.create(properties=user_props) + + # Make sure when we read the userPassword field, it is the correct ALGO + pw_field = user.get_attr_val_utf8_l('userPassword') + + if DEBUGGING: + print(pw_field) + + if algo_name != 'CLEAR': + lalgo_name = algo_name.lower() + assert (pw_field.startswith('{' + lalgo_name + '}')) + + # Now make sure a bind works + assert (_test_bind_for_pbkdf2_algo(inst, 'Secret123')) + # Bind with a wrong shorter password, should fail + assert (not _test_bind_for_pbkdf2_algo(inst, 'Wrong')) + # Bind with a wrong longer password, should fail + assert (not _test_bind_for_pbkdf2_algo(inst, 'This is even more wrong')) + # Bind with a password that has the algo in the name + assert (not _test_bind_for_pbkdf2_algo(inst, '{%s}SomeValues....' % algo_name)) + # Bind with a wrong exact length password. + assert (not _test_bind_for_pbkdf2_algo(inst, 'Alsowrong')) + # Bind with a subset password, should fail + assert (not _test_bind_for_pbkdf2_algo(inst, 'Secret')) + if algo_name != 'CRYPT': + # Bind with a subset password that is 1 char shorter, to detect off by 1 in clear + assert (not _test_bind_for_pbkdf2_algo(inst, 'Secret12')) + # Bind with a superset password, should fail + assert (not _test_bind_for_pbkdf2_algo(inst, 'Secret123456')) + + # Delete the user + inst.delete_s(USER_DN) + + +ALGO_SET = ('CLEAR', 'CRYPT', 'CRYPT-MD5', 'CRYPT-SHA256', 'CRYPT-SHA512', + 'MD5', 'SHA', 'SHA256', 'SHA384', 'SHA512', 'SMD5', 'SSHA', + 'SSHA256', 'SSHA384', 'SSHA512', 'PBKDF2_SHA256', 'DEFAULT', + 'GOST_YESCRYPT', +) + +if default_paths.rust_enabled and ds_is_newer('1.4.3.0'): + ALGO_SET = ('CLEAR', 'CRYPT', 'CRYPT-MD5', 'CRYPT-SHA256', 'CRYPT-SHA512', + 'MD5', 'SHA', 'SHA256', 'SHA384', 'SHA512', 'SMD5', 'SSHA', + 'SSHA256', 'SSHA384', 'SSHA512', 'PBKDF2_SHA256', 'DEFAULT', + 'PBKDF2-SHA1', 'PBKDF2-SHA256', 'PBKDF2-SHA512', 'GOST_YESCRYPT', + ) + +@pytest.mark.parametrize("algo", ALGO_SET) +def test_pwd_algo_test(topology_st, algo): + """Assert that all of our password algorithms correctly PASS and FAIL varying + password conditions. + + :id: fbb308a8-8374-4abd-b786-1f88e56f7650 + :parametrized: yes + """ + if algo == 'DEFAULT': + if ds_is_older('1.4.0'): + pytest.skip("Not implemented") + _test_algo(topology_st.standalone, algo) + log.info('Test %s PASSED' % algo) + + +@pytest.mark.ds397 +def test_pbkdf2_algo(topology_st): + """Changing password storage scheme to PBKDF2_SHA256 + and trying to bind with different password combination + + :id: 112e265b-f468-4758-b8fa-ed8742de0182 + :setup: Standalone instance + :steps: + 1. Change password storage scheme to PBKDF2_SHA256 + 2. Add a test user entry + 3. Bind with correct password + 4. Bind with incorrect password combination(brute-force) + :expectedresults: + 1. Operation should be successful + 2. Operation should be successful + 3. Bind should be successful + 4. Should not allow to bind with incorrect password + """ + if DEBUGGING: + # Add debugging steps(if any)... + log.info("ATTACH NOW") + time.sleep(30) + + # Merge this to the password suite in the future + + for algo in ('PBKDF2_SHA256',): + for i in range(0, 10): + _test_algo_for_pbkdf2(topology_st.standalone, algo) + + log.info('Test PASSED') + + +if __name__ == '__main__': + # Run isolated + # -s for DEBUG mode + CURRENT_FILE = os.path.realpath(__file__) + pytest.main("-s %s" % CURRENT_FILE) diff --git a/dirsrvtests/tests/suites/password/pwd_crypt_asterisk_test.py b/dirsrvtests/tests/suites/password/pwd_crypt_asterisk_test.py new file mode 100644 index 0000000..d76614d --- /dev/null +++ b/dirsrvtests/tests/suites/password/pwd_crypt_asterisk_test.py @@ -0,0 +1,50 @@ +# --- BEGIN COPYRIGHT BLOCK --- +# Copyright (C) 2021 William Brown +# All rights reserved. +# +# License: GPL (version 3 or any later version). +# See LICENSE for details. +# --- END COPYRIGHT BLOCK --- +# +import ldap +import pytest +from lib389.topologies import topology_st +from lib389.idm.user import UserAccounts +from lib389._constants import (DEFAULT_SUFFIX, PASSWORD) + +pytestmark = pytest.mark.tier1 + +def test_password_crypt_asterisk_is_rejected(topology_st): + """It was reported that {CRYPT}* was allowing all passwords to be + valid in the bind process. This checks that we should be rejecting + these as they should represent locked accounts. Similar, {CRYPT}! + + :id: 0b8f1a6a-f3eb-4443-985e-da14d0939dc3 + :setup: Single instance + :steps: 1. Set a password hash in with CRYPT and the content * + 2. Test a bind + 3. Set a password hash in with CRYPT and the content ! + 4. Test a bind + :expectedresults: + 1. Successfully set the values + 2. The bind fails + 3. Successfully set the values + 4. The bind fails + """ + topology_st.standalone.config.set('nsslapd-allow-hashed-passwords', 'on') + topology_st.standalone.config.set('nsslapd-enable-upgrade-hash', 'off') + + users = UserAccounts(topology_st.standalone, DEFAULT_SUFFIX) + user = users.create_test_user() + + user.set('userPassword', "{CRYPT}*") + + # Attempt to bind with incorrect password. + with pytest.raises(ldap.INVALID_CREDENTIALS): + badconn = user.bind('badpassword') + + user.set('userPassword', "{CRYPT}!") + # Attempt to bind with incorrect password. + with pytest.raises(ldap.INVALID_CREDENTIALS): + badconn = user.bind('badpassword') + diff --git a/dirsrvtests/tests/suites/password/pwd_lockout_bypass_test.py b/dirsrvtests/tests/suites/password/pwd_lockout_bypass_test.py new file mode 100644 index 0000000..a4e0094 --- /dev/null +++ b/dirsrvtests/tests/suites/password/pwd_lockout_bypass_test.py @@ -0,0 +1,82 @@ +# --- BEGIN COPYRIGHT BLOCK --- +# Copyright (C) 2017 Red Hat, Inc. +# All rights reserved. +# +# License: GPL (version 3 or any later version). +# See LICENSE for details. +# --- END COPYRIGHT BLOCK --- +# +import pytest +from lib389.tasks import * +from lib389.utils import * +from lib389.topologies import topology_st +from lib389.idm.user import UserAccounts, TEST_USER_PROPERTIES +import ldap + +pytestmark = pytest.mark.tier1 + +# The irony of these names is not lost on me. +GOOD_PASSWORD = 'password' +BAD_PASSWORD = 'aontseunao' + +logging.getLogger(__name__).setLevel(logging.INFO) +log = logging.getLogger(__name__) + + +def test_lockout_bypass(topology_st): + """Check basic password lockout functionality + + :id: 2482a992-1719-495c-b75b-78fe5c48c873 + :setup: Standalone instance + :steps: + 1. Set passwordMaxFailure to 1 + 2. Set passwordLockDuration to 7 + 3. Set passwordLockout to 'on' + 4. Create a user + 5. Set a userPassword attribute + 6. Bind as the user with a bad credentials + 7. Bind as the user with a bad credentials + 8. Bind as the user with a good credentials + :expectedresults: + 1. passwordMaxFailure should be successfully set + 2. passwordLockDuration should be successfully set + 3. passwordLockout should be successfully set + 4. User should be created + 5. userPassword should be successfully set + 6. Should throw an invalid credentials error + 7. Should throw a constraint violation error + 8. Should throw a constraint violation error + """ + + inst = topology_st.standalone + + # Configure the lock policy + inst.config.set('passwordMaxFailure', '1') + inst.config.set('passwordLockoutDuration', '99999') + inst.config.set('passwordLockout', 'on') + + # Create the account + users = UserAccounts(inst, DEFAULT_SUFFIX) + testuser = users.create(properties=TEST_USER_PROPERTIES) + testuser.set('userPassword', GOOD_PASSWORD) + + conn = testuser.bind(GOOD_PASSWORD) + assert conn != None + conn.unbind_s() + + # Bind with bad creds twice + # This is the failure. + with pytest.raises(ldap.INVALID_CREDENTIALS): + conn = testuser.bind(BAD_PASSWORD) + # Now we should not be able to ATTEMPT the bind. It doesn't matter that + # we disclose that we have hit the rate limit here, what matters is that + # it exists. + with pytest.raises(ldap.CONSTRAINT_VIOLATION): + conn = testuser.bind(BAD_PASSWORD) + + # now bind with good creds + # Should be error 19 still. + with pytest.raises(ldap.CONSTRAINT_VIOLATION): + conn = testuser.bind(GOOD_PASSWORD) + + diff --git a/dirsrvtests/tests/suites/password/pwd_log_test.py b/dirsrvtests/tests/suites/password/pwd_log_test.py new file mode 100644 index 0000000..55ef415 --- /dev/null +++ b/dirsrvtests/tests/suites/password/pwd_log_test.py @@ -0,0 +1,87 @@ +# --- BEGIN COPYRIGHT BLOCK --- +# Copyright (C) 2019 Red Hat, Inc. +# All rights reserved. +# +# License: GPL (version 3 or any later version). +# See LICENSE for details. +# --- END COPYRIGHT BLOCK --- +# + +import logging +import pytest +from lib389.tasks import * +from lib389.topologies import topology_st +from lib389.idm.user import UserAccounts, TEST_USER_PROPERTIES +from lib389._constants import DEFAULT_SUFFIX + +pytestmark = pytest.mark.tier1 + +logging.getLogger(__name__).setLevel(logging.DEBUG) +log = logging.getLogger(__name__) + + +@pytest.mark.ds365 +def test_hide_unhashed_pwd(topology_st): + """Change userPassword, enable hiding of un-hashed + password and check the audit logs. + + :id: c4a5d08d-f525-459b-82b9-3f68dae6fc71 + :setup: Standalone instance + :steps: + 1. Add a test user entry + 2. Set a new password for user and nsslapd-auditlog-logging-enabled to 'on' + 3. Disable nsslapd-auditlog-logging-hide-unhashed-pw + 4. Check the audit logs + 5. Set a new password for user and nsslapd-auditlog-logging-hide-unhashed-pw to 'on' + 6. Check the audit logs + :expectedresults: + 1. User addition should be successful + 2. New password should be set and audit logs should be enabled + 3. Operation should be successful + 4. Audit logs should show password without hash + 5. Operation should be successful + 6. Audit logs should hide password which is un-hashed + """ + + users = UserAccounts(topology_st.standalone, DEFAULT_SUFFIX) + user_props = TEST_USER_PROPERTIES.copy() + user_props.update({'uid': 'user', 'cn': 'buser', 'userpassword': 'Secret123'}) + user = users.create(properties=user_props) + + # Enable the audit log + topology_st.standalone.config.set('nsslapd-auditlog-logging-enabled','on') + + # Allow the unhashed password to be written to audit log + topology_st.standalone.config.set('nsslapd-auditlog-logging-hide-unhashed-pw', 'off') + topology_st.standalone.config.set('nsslapd-unhashed-pw-switch', 'on') + + # Set new password, and check the audit log + user.reset_password('mypassword') + + # Check audit log + time.sleep(1) + if not topology_st.standalone.searchAuditLog('unhashed#user#password: mypassword'): + log.fatal('failed to find unhashed password in auditlog') + assert False + + # Hide unhashed password in audit log + topology_st.standalone.config.set('nsslapd-auditlog-logging-hide-unhashed-pw', 'on') + + # Modify password, and check the audit log + user.reset_password('hidepassword') + + # Check audit log + time.sleep(1) + if topology_st.standalone.searchAuditLog('unhashed#user#password: hidepassword'): + log.fatal('Found unhashed password in auditlog') + assert False + + log.info('Test complete') + + +if __name__ == '__main__': + # Run isolated + # -s for DEBUG mode + CURRENT_FILE = os.path.realpath(__file__) + pytest.main("-s %s" % CURRENT_FILE) + diff --git a/dirsrvtests/tests/suites/password/pwd_upgrade_on_bind_test.py b/dirsrvtests/tests/suites/password/pwd_upgrade_on_bind_test.py new file mode 100644 index 0000000..4612650 --- /dev/null +++ b/dirsrvtests/tests/suites/password/pwd_upgrade_on_bind_test.py @@ -0,0 +1,231 @@ +# --- BEGIN COPYRIGHT BLOCK --- +# Copyright (C) 2019 William Brown +# All rights reserved. +# +# License: GPL (version 3 or any later version). +# See LICENSE for details. +# --- END COPYRIGHT BLOCK --- +# +import ldap +import pytest +from lib389.utils import * +from lib389.topologies import topology_st +from lib389.idm.user import UserAccounts +from lib389._constants import (DEFAULT_SUFFIX, DN_CONFIG, PASSWORD, DN_DM) + +pytestmark = pytest.mark.tier1 + +CONFIG_ATTR = 'passwordSendExpiringTime' +USER_DN = 'uid=tuser,ou=people,{}'.format(DEFAULT_SUFFIX) +USER_RDN = 'tuser' +USER_PASSWD = 'secret123' +USER_ACI = '(targetattr="userpassword")(version 3.0; acl "pwp test"; allow (all) userdn="ldap:///self";)' + +@pytest.fixture +def add_user(topology_st, request): + """Adds a user for binding""" + + log.info('Add the user') + + users = UserAccounts(topology_st.standalone, DEFAULT_SUFFIX) + user = users.create(properties={ + 'uid': USER_RDN, + 'cn': USER_RDN, + 'sn': USER_RDN, + 'uidNumber': '3000', + 'gidNumber': '4000', + 'homeDirectory': '/home/user', + 'description': 'd_e_s_c', + 'userPassword': USER_PASSWD + }) + + def fin(): + """Removes the user entry""" + + log.info('Remove the user entry') + topology_st.standalone.simple_bind_s(DN_DM, PASSWORD) + user.delete() + + request.addfinalizer(fin) + +@pytest.fixture +def global_policy(topology_st, request): + """Sets the required global + password policy attributes under + cn=config entry + """ + + attrs = {'passwordExp': '', + 'passwordMaxAge': '', + 'passwordWarning': '', + CONFIG_ATTR: ''} + + log.info('Get the default values') + entry = topology_st.standalone.getEntry(DN_CONFIG, ldap.SCOPE_BASE, + '(objectClass=*)', attrs.keys()) + + for key in attrs.keys(): + attrs[key] = entry.getValue(key) + + log.info('Set the new values') + topology_st.standalone.config.replace_many(('passwordExp', 'on'), + ('passwordMaxAge', '172800'), + ('passwordWarning', '86400'), + (CONFIG_ATTR, 'on')) + + def fin(): + """Resets the defaults""" + + log.info('Reset the defaults') + topology_st.standalone.simple_bind_s(DN_DM, PASSWORD) + for key in attrs.keys(): + topology_st.standalone.config.replace(key, attrs[key]) + + request.addfinalizer(fin) + # A short sleep is required after the modifying password policy or cn=config + time.sleep(0.5) + +def test_password_hash_on_upgrade(topology_st, global_policy, add_user): + """If a legacy password hash is present, assert that on a correct bind + the hash is "upgraded" to the latest-and-greatest hash format on the + server. + + Assert also that password FAILURE does not alter the password. + Assert that the password expiration date, history, etc is not modified + as password hash upgrade on bind should be invisible to the user. + + + :id: 42cf99e6-454d-46f5-8f1c-8bb699864a07 + :setup: Single instance + :steps: 1. Set a password hash in SSHA256, and hash to pbkdf2 statically + 2. Get initial passwordExpirationtime + 3. Test a faulty bind + 4. Assert the PW is SSHA256 + 5. Test a correct bind + 6. Assert the PW is PBKDF2 + 7. Assert the passwordExpirationtime hasnt changed after upgrade on bind + :expectedresults: + 1. Successfully set the values + 2. Successfully get the passwordExpirationtime + 3. The bind fails + 4. The PW is SSHA256 + 5. The bind succeeds + 6. The PW is PBKDF2udo + 7. pwd expiration time hasnt been modifed + + """ + # Make sure the server is set to pkbdf + topology_st.standalone.config.set('passwordStorageScheme', 'PBKDF2_SHA256') + topology_st.standalone.config.set('nsslapd-allow-hashed-passwords', 'on') + topology_st.standalone.config.set('nsslapd-enable-upgrade-hash', 'on') + + users = UserAccounts(topology_st.standalone, DEFAULT_SUFFIX) + user = users.get(USER_RDN) + + # Static version of "password" in SSHA256. + user.set('userPassword', "{SSHA256}9eliEQgjfc4Fcj1IXZtc/ne1GRF+OIjz/NfSTX4f7HByGMQrWHLMLA==") + ts1 = user.get_attr_val_utf8('passwordExpirationTime') + + # Attempt to bind with incorrect password. + with pytest.raises(ldap.INVALID_CREDENTIALS): + badconn = user.bind('badpassword') + + # Check the pw is SSHA256 + up = user.get_attr_val_utf8('userPassword') + assert up.startswith('{SSHA256}') + + # Bind with correct, trigger update on bind + time.sleep(1) + conn = user.bind(PASSWORD) + + # Check the pw is now PBKDF2! + up = user.get_attr_val_utf8('userPassword') + assert up.startswith('{PBKDF2_SHA256}') + + # Verify passwordExpirationtime has not been reset ater hash upgrade + ts2 = user.get_attr_val_utf8('passwordExpirationTime') + assert ts1 == ts2 + +def test_password_hash_on_upgrade_clearcrypt(topology_st): + """In some deploymentes, some passwords MAY be in clear or crypt which have + specific possible application integrations allowing the read value to be + processed by other entities. We avoid upgrading these two, to prevent + breaking these integrations. + + :id: 27712492-a4bf-4ea9-977b-b4850ddfb628 + :setup: Single instance + :steps: 1. Set a password hash in CLEAR, and hash to pbkdf2 statically + 2. Test a correct bind + 3. Assert the PW is CLEAR + 4. Set the password to CRYPT + 5. Test a correct bind + 6. Assert the PW is CLEAR + :expectedresults: + 1. Successfully set the values + 2. The bind succeeds + 3. The PW is CLEAR + 4. The set succeeds + 5. The bind succeeds + 6. The PW is CRYPT + """ + # Make sure the server is set to pkbdf + topology_st.standalone.config.set('nsslapd-allow-hashed-passwords', 'on') + topology_st.standalone.config.set('nsslapd-enable-upgrade-hash', 'on') + + users = UserAccounts(topology_st.standalone, DEFAULT_SUFFIX) + user = users.create_test_user(1001) + + topology_st.standalone.config.set('passwordStorageScheme', 'CLEAR') + user.set('userPassword', "password") + topology_st.standalone.config.set('passwordStorageScheme', 'PBKDF2_SHA256') + + conn = user.bind(PASSWORD) + up = user.get_attr_val_utf8('userPassword') + assert up.startswith('password') + + user.set('userPassword', "{crypt}I0S3Ry62CSoFg") + conn = user.bind(PASSWORD) + up = user.get_attr_val_utf8('userPassword') + assert up.startswith('{crypt}') + +def test_password_hash_on_upgrade_disable(topology_st): + """If a legacy password hash is present, assert that on a correct bind + the hash is "upgraded" to the latest-and-greatest hash format on the + server. But some people may not like this, so test that we can disable + the feature too! + + :id: ed315145-a3d1-4f17-b04c-73d3638e7ade + :setup: Single instance + :steps: 1. Set a password hash in SSHA256, and hash to pbkdf2 statically + 2. Test a faulty bind + 3. Assert the PW is SSHA256 + 4. Test a correct bind + 5. Assert the PW is SSHA256 + :expectedresults: + 1. Successfully set the values + 2. The bind fails + 3. The PW is SSHA256 + 4. The bind succeeds + 5. The PW is SSHA256 + """ + # Make sure the server is set to pkbdf + topology_st.standalone.config.set('passwordStorageScheme', 'PBKDF2_SHA256') + topology_st.standalone.config.set('nsslapd-allow-hashed-passwords', 'on') + topology_st.standalone.config.set('nsslapd-enable-upgrade-hash', 'off') + + users = UserAccounts(topology_st.standalone, DEFAULT_SUFFIX) + user = users.create_test_user(1002) + # Static version of "password" in SSHA256. + user.set('userPassword', "{SSHA256}9eliEQgjfc4Fcj1IXZtc/ne1GRF+OIjz/NfSTX4f7HByGMQrWHLMLA==") + # Attempt to bind with incorrect password. + with pytest.raises(ldap.INVALID_CREDENTIALS): + badconn = user.bind('badpassword') + # Check the pw is SSHA256 + up = user.get_attr_val_utf8('userPassword') + assert up.startswith('{SSHA256}') + + # Bind with correct. + conn = user.bind(PASSWORD) + # Check the pw is NOT upgraded! + up = user.get_attr_val_utf8('userPassword') + assert up.startswith('{SSHA256}') diff --git a/dirsrvtests/tests/suites/password/pwp_gracel_test.py b/dirsrvtests/tests/suites/password/pwp_gracel_test.py new file mode 100644 index 0000000..af4d4b8 --- /dev/null +++ b/dirsrvtests/tests/suites/password/pwp_gracel_test.py @@ -0,0 +1,123 @@ +""" +# --- BEGIN COPYRIGHT BLOCK --- +# Copyright (C) 2020 Red Hat, Inc. +# All rights reserved. +# +# License: GPL (version 3 or any later version). +# See LICENSE for details. +# --- END COPYRIGHT BLOCK --- +""" + +import os +import pytest +from lib389.topologies import topology_st as topo +from lib389.idm.user import UserAccounts, UserAccount +from lib389._constants import DEFAULT_SUFFIX +from lib389.config import Config +import ldap +import time + +pytestmark = pytest.mark.tier1 + + +def test_password_gracelimit_section(topo): + """Password grace limit section. + + :id: d6f4a7fa-473b-11ea-8766-8c16451d917c + :setup: Standalone + :steps: + 1. Resets the default password policy + 2. Turning on password expiration, passwordMaxAge: 30 and passwordGraceLimit: 7 + 3. Check users have 7 grace login attempts after their password expires + 4. Reset the user passwords to start the clock + 5. The the 8th should fail + 6. Now try resetting the password before the grace login attempts run out + 7. Bind 6 times, and on the 7th change the password + 8. Setting passwordMaxAge: 1 and passwordGraceLimit: 7 + 9. Modify the users passwords to start the clock of zero + 10. First 7 good attempts, 8th should fail + 11. Setting the passwordMaxAge to 3 seconds once more and the passwordGraceLimit to 0 + 12. Modify the users passwords to start the clock + 13. Users should be blocked automatically after 3 second + :expectedresults: + 1. Success + 2. Success + 3. Success + 4. Success + 5. Success + 6. Success + 7. Success + 8. Success + 9. Success + 10. Success + 11. Success + 12. Success + 13. Success + """ + config = Config(topo.standalone) + # Resets the default password policy + config.replace_many( + ('passwordmincategories', '1'), + ('passwordStorageScheme', 'CLEAR')) + user = UserAccounts(topo.standalone, DEFAULT_SUFFIX, rdn=None).create_test_user() + # Turning on password expiration, passwordMaxAge: 30 and passwordGraceLimit: 7 + config.replace_many( + ('passwordMaxAge', '3'), + ('passwordGraceLimit', '7'), + ('passwordexp', 'on'), + ('passwordwarning', '30')) + # Reset the user passwords to start the clock + # Check users have 7 grace login attempts after their password expires + user.replace('userpassword', '00fr3d1') + for _ in range(3): + time.sleep(1) + user_account = UserAccount(topo.standalone, user.dn) + for _ in range(7): + conn = user_account.bind('00fr3d1') + # The the 8th should fail + with pytest.raises(ldap.INVALID_CREDENTIALS): + conn = user_account.bind('00fr3d1') + # Now try resetting the password before the grace login attempts run out + user.replace('userpassword', '00fr3d2') + for _ in range(3): + time.sleep(1) + user_account = UserAccount(topo.standalone, user.dn) + # Bind 6 times, and on the 7th change the password + for _ in range(6): + conn = user_account.bind('00fr3d2') + user.replace('userpassword', '00fr3d1') + for _ in range(3): + time.sleep(1) + for _ in range(7): + conn = user_account.bind('00fr3d1') + with pytest.raises(ldap.INVALID_CREDENTIALS): + conn = user_account.bind('00fr3d1') + # Setting passwordMaxAge: 1 and passwordGraceLimit: 7 + config.replace_many( + ('passwordMaxAge', '1'), + ('passwordwarning', '1')) + # Modify the users passwords to start the clock of zero + user.replace('userpassword', '00fr3d2') + time.sleep(1) + # First 7 good attempts, 8th should fail + user_account = UserAccount(topo.standalone, user.dn) + for _ in range(7): + conn = user_account.bind('00fr3d2') + with pytest.raises(ldap.INVALID_CREDENTIALS): + conn = user_account.bind('00fr3d2') + # Setting the passwordMaxAge to 3 seconds once more and the passwordGraceLimit to 0 + config.replace_many( + ('passwordMaxAge', '3'), + ('passwordGraceLimit', '0')) + # Modify the users passwords to start the clock + # Users should be blocked automatically after 3 second + user.replace('userpassword', '00fr3d1') + for _ in range(3): + time.sleep(1) + with pytest.raises(ldap.INVALID_CREDENTIALS): + conn = user_account.bind('00fr3d1') + + +if __name__ == "__main__": + CURRENT_FILE = os.path.realpath(__file__) + pytest.main("-s -v %s" % CURRENT_FILE) \ No newline at end of file diff --git a/dirsrvtests/tests/suites/password/pwp_history_test.py b/dirsrvtests/tests/suites/password/pwp_history_test.py new file mode 100644 index 0000000..ba5449d --- /dev/null +++ b/dirsrvtests/tests/suites/password/pwp_history_test.py @@ -0,0 +1,333 @@ +# --- BEGIN COPYRIGHT BLOCK --- +# Copyright (C) 2016 Red Hat, Inc. +# All rights reserved. +# +# License: GPL (version 3 or any later version). +# See LICENSE for details. +# --- END COPYRIGHT BLOCK --- +# +import pytest +import time +import logging +from lib389.tasks import * +from lib389.utils import ds_is_newer +from lib389.topologies import topology_st +from lib389.idm.user import UserAccounts, TEST_USER_PROPERTIES +from lib389.idm.directorymanager import DirectoryManager +from lib389.idm.organizationalunit import OrganizationalUnits +from lib389._constants import DEFAULT_SUFFIX + +pytestmark = pytest.mark.tier1 + +logging.getLogger(__name__).setLevel(logging.DEBUG) +log = logging.getLogger(__name__) + +USER_PWD = 'password' + + +@pytest.fixture(scope="function") +def user(topology_st, request): + """Add and remove a test user""" + + dm = DirectoryManager(topology_st.standalone) + + # Add aci so users can change their own password + USER_ACI = '(targetattr="userpassword || passwordHistory")(version 3.0; acl "pwp test"; allow (all) userdn="ldap:///self";)' + ous = OrganizationalUnits(topology_st.standalone, DEFAULT_SUFFIX) + ou = ous.get('people') + ou.add('aci', USER_ACI) + + # Create a user + users = UserAccounts(topology_st.standalone, DEFAULT_SUFFIX) + user = users.create_test_user() + user.set('userpassword', USER_PWD) + def fin(): + dm.rebind() + user.delete() + ou.remove('aci', USER_ACI) + request.addfinalizer(fin) + return user + + +def test_history_is_not_overwritten(topology_st, user): + """Test that passwordHistory user attribute is not overwritten + + :id: 1b311532-dd55-4072-88a9-1f960cb371bd + :setup: Standalone instance, a test user + :steps: + 1. Configure password history policy as bellow: + passwordHistory: on + passwordInHistory: 3 + 2. Change the password 3 times + 3. Try to change the password 2 more times to see + if it rewrites passwordHistory even on a failure attempt + 4. Try to change the password to the initial value (it should be + still in history) + :expectedresults: + 1. Password history policy should be configured successfully + 2. Success + 3. Password changes should be correctly rejected + with Constrant Violation error + 4. Password change should be correctly rejected + with Constrant Violation error + """ + + topology_st.standalone.config.replace_many(('passwordHistory', 'on'), + ('passwordInHistory', '3')) + log.info('Configured password policy.') + time.sleep(1) + + # Bind as the test user + user.rebind(USER_PWD) + time.sleep(.5) + + # Change the password 3 times + user.set('userpassword', 'password1') + user.rebind('password1') + time.sleep(.5) + user.set('userpassword', 'password2') + user.rebind('password2') + time.sleep(.5) + user.set('userpassword', 'password3') + user.rebind('password3') + time.sleep(.5) + + # Try to change the password 2 more times to see + # if it rewrites passwordHistory even on a failure attempt + with pytest.raises(ldap.CONSTRAINT_VIOLATION): + user.set('userpassword', 'password2') + time.sleep(.5) + with pytest.raises(ldap.CONSTRAINT_VIOLATION): + user.set('userpassword', 'password1') + time.sleep(.5) + + # Try to change the password to the initial value (it should be still in history) + with pytest.raises(ldap.CONSTRAINT_VIOLATION): + user.set('userpassword', USER_PWD) + + +def test_basic(topology_st, user): + """Test basic password policy history feature functionality + + :id: 83d74f7d-3036-4944-8839-1b40bbf265ff + :setup: Standalone instance, a test user + :steps: + 1. Configure password history policy as bellow: + passwordHistory: on + passwordInHistory: 3 + passwordChange: on + passwordStorageScheme: CLEAR + 2. Attempt to change password to the same password + 3. Change password four times + 4. Check that we only have 3 passwords stored in history + 5. Attempt to change the password to previous passwords + 6. Reset password by Directory Manager (admin reset) + 7. Try and change the password to the previous password before the reset + 8. Test passwordInHistory set to "0" rejects only the current password + 9. Test passwordInHistory set to "2" rejects previous passwords + :expectedresults: + 1. Password history policy should be configured successfully + 2. Password change should be correctly rejected + with Constrant Violation error + 3. Password should be successfully changed + 4. Only 3 passwords should be stored in history + 5. Password changes should be correctly rejected + with Constrant Violation error + 6. Password should be successfully reset + 7. Password change should be correctly rejected + with Constrant Violation error + 8. Success + 9. Success + """ + + # + # Configure password history policy and add a test user + # + try: + topology_st.standalone.config.replace_many(('passwordHistory', 'on'), + ('passwordInHistory', '3'), + ('passwordChange', 'on'), + ('passwordStorageScheme', 'CLEAR'), + ('nsslapd-auditlog-logging-enabled', 'on')) + log.info('Configured password policy.') + except ldap.LDAPError as e: + log.fatal('Failed to configure password policy: ' + str(e)) + assert False + time.sleep(1) + + # Bind as the test user + user.rebind(USER_PWD) + + # + # Test that password history is enforced. + # + # Attempt to change password to the same password + try: + user.set('userpassword', 'password') + log.info('Incorrectly able to to set password to existing password.') + assert False + except ldap.CONSTRAINT_VIOLATION: + log.info('Password change correctly rejected') + except ldap.LDAPError as e: + log.fatal('Failed to attempt to change password: ' + str(e)) + assert False + + # + # Keep changing password until we fill the password history (3) + # + user.set('userpassword', 'password1') + user.rebind('password1') + time.sleep(.5) + user.set('userpassword', 'password2') + user.rebind('password2') + time.sleep(.5) + user.set('userpassword', 'password3') + user.rebind('password3') + time.sleep(.5) + user.set('userpassword', 'password4') + user.rebind('password4') + time.sleep(.5) + + # + # Check that we only have 3 passwords stored in history + # + pwds = user.get_attr_vals('passwordHistory') + if len(pwds) != 3: + log.fatal('Incorrect number of passwords stored in history: %d' % + len(pwds)) + log.error('password history: ' + str(pwds)) + assert False + else: + log.info('Correct number of passwords found in history.') + + # + # Attempt to change the password to previous passwords + # + try: + user.set('userpassword', 'password1') + log.fatal('Incorrectly able to to set password to previous password1.') + log.fatal('password history: ' + str(user.get_attr_vals('passwordhistory'))) + assert False + except ldap.CONSTRAINT_VIOLATION: + log.info('Password change correctly rejected') + except ldap.LDAPError as e: + log.fatal('Failed to attempt to change password: ' + str(e)) + assert False + try: + user.set('userpassword', 'password2') + log.fatal('Incorrectly able to to set password to previous password2.') + log.fatal('password history: ' + str(user.get_attr_vals('passwordhistory'))) + assert False + except ldap.CONSTRAINT_VIOLATION: + log.info('Password change correctly rejected') + except ldap.LDAPError as e: + log.fatal('Failed to attempt to change password: ' + str(e)) + assert False + try: + user.set('userpassword', 'password3') + log.fatal('Incorrectly able to to set password to previous password3.') + log.fatal('password history: ' + str(user.get_attr_vals('passwordhistory'))) + assert False + except ldap.CONSTRAINT_VIOLATION: + log.info('Password change correctly rejected') + except ldap.LDAPError as e: + log.fatal('Failed to attempt to change password: ' + str(e)) + assert False + + # + # Reset password by Directory Manager(admin reset) + # + dm = DirectoryManager(topology_st.standalone) + dm.rebind() + time.sleep(.5) + user.set('userpassword', 'password-reset') + time.sleep(1) + + # Try and change the password to the previous password before the reset + try: + user.rebind('password-reset') + user.set('userpassword', 'password4') + log.fatal('Incorrectly able to to set password to previous password4.') + log.fatal('password history: ' + str(user.get_attr_vals('passwordhistory'))) + assert False + except ldap.CONSTRAINT_VIOLATION: + log.info('Password change correctly rejected') + except ldap.LDAPError as e: + log.fatal('Failed to attempt to change password: ' + str(e)) + assert False + + if ds_is_newer("1.4.1.2"): + # + # Test passwordInHistory to 0 + # + dm = DirectoryManager(topology_st.standalone) + dm.rebind() + try: + topology_st.standalone.config.replace('passwordInHistory', '0') + log.info('Configured passwordInHistory to 0.') + except ldap.LDAPError as e: + log.fatal('Failed to configure password policy (passwordInHistory to 0): ' + str(e)) + assert False + time.sleep(1) + + # Verify the older passwords in the entry (passwordhistory) are ignored + user.rebind('password-reset') + user.set('userpassword', 'password4') + time.sleep(.5) + try: + user.set('userpassword', 'password4') + log.fatal('Incorrectly able to to set password to current password4.') + log.fatal('password history: ' + str(user.get_attr_vals('passwordhistory'))) + assert False + except ldap.CONSTRAINT_VIOLATION: + log.info('Password change correctly rejected') + except ldap.LDAPError as e: + log.fatal('Failed to attempt to change password: ' + str(e)) + assert False + + # Need to make one successful update so history list is reset + user.set('userpassword', 'password5') + + # + # Set the history count back to a positive value and make sure things still work + # as expected + # + dm = DirectoryManager(topology_st.standalone) + dm.rebind() + try: + topology_st.standalone.config.replace('passwordInHistory', '2') + log.info('Configured passwordInHistory to 2.') + except ldap.LDAPError as e: + log.fatal('Failed to configure password policy (passwordInHistory to 2): ' + str(e)) + assert False + time.sleep(1) + + try: + user.rebind('password5') + user.set('userpassword', 'password5') + log.fatal('Incorrectly able to to set password to current password5.') + log.fatal('password history: ' + str(user.get_attr_vals('passwordhistory'))) + assert False + except ldap.CONSTRAINT_VIOLATION: + log.info('Password change correctly rejected') + except ldap.LDAPError as e: + log.fatal('Failed to attempt to change password: ' + str(e)) + assert False + + # Test that old password that was in history is not being checked + try: + user.set('userpassword', 'password1') + except ldap.LDAPError as e: + log.fatal('Failed to attempt to change password: ' + str(e)) + log.fatal('password history: ' + str(user.get_attr_vals('passwordhistory'))) + assert False + + # Done + log.info('Test suite PASSED.') + + +if __name__ == '__main__': + # Run isolated + # -s for DEBUG mode + CURRENT_FILE = os.path.realpath(__file__) + pytest.main("-s %s" % CURRENT_FILE) diff --git a/dirsrvtests/tests/suites/password/pwp_test.py b/dirsrvtests/tests/suites/password/pwp_test.py new file mode 100644 index 0000000..cb80dc8 --- /dev/null +++ b/dirsrvtests/tests/suites/password/pwp_test.py @@ -0,0 +1,520 @@ +""" +# --- BEGIN COPYRIGHT BLOCK --- +# Copyright (C) 2020 Red Hat, Inc. +# All rights reserved. +# +# License: GPL (version 3 or any later version). +# See LICENSE for details. +# --- END COPYRIGHT BLOCK --- +""" + +import os +import pytest +from lib389.topologies import topology_st as topo +from lib389.idm.user import UserAccounts, UserAccount +from lib389._constants import DEFAULT_SUFFIX +from lib389.config import Config +from lib389.idm.group import Group +from lib389.utils import ds_is_older, is_fips +import ldap +import time + +pytestmark = pytest.mark.tier1 + +if ds_is_older('1.4'): + DEFAULT_PASSWORD_STORAGE_SCHEME = 'SSHA512' +else: + if is_fips(): + DEFAULT_PASSWORD_STORAGE_SCHEME = 'SSHA512' + else: + DEFAULT_PASSWORD_STORAGE_SCHEME = 'PBKDF2-SHA512' + + +def _create_user(topo, uid, cn, uidNumber, userpassword): + """ + Will Create user + """ + user = UserAccounts(topo.standalone, DEFAULT_SUFFIX).create(properties={ + 'uid': uid, + 'sn': cn.split(' ')[-1], + 'cn': cn, + 'givenname': cn.split(' ')[0], + 'uidNumber': uidNumber, + 'gidNumber': uidNumber, + 'mail': f'{uid}@example.com', + 'userpassword': userpassword, + 'homeDirectory': f'/home/{uid}' + }) + return user + + +def _change_password_with_own(topo, user_dn, password, new_password): + """ + Change user password with user self + """ + conn = UserAccount(topo.standalone, user_dn).bind(password) + real_user = UserAccount(conn, user_dn) + real_user.replace('userpassword', new_password) + + +def _change_password_with_root(topo, user_dn, new_password): + """ + Root will change user password + """ + UserAccount(topo.standalone, user_dn).replace('userpassword', new_password) + + +@pytest.fixture(scope="function") +def _fix_password(topo, request): + user = _create_user(topo, 'dbyers', 'Danny Byers', '1001', 'dbyers1') + user.replace('userpassword', 'dbyers1') + + def fin(): + user.delete() + request.addfinalizer(fin) + + +def test_passwordchange_to_no(topo, _fix_password): + """Change password fo a user even password even though pw policy is set to no + + :id: 16c64ef0-5a20-11ea-a902-8c16451d917b + :setup: Standalone + :steps: + 1. Adding an user with uid=dbyers + 2. Set Password change to Must Not Change After Reset + 3. Setting Password policy to May Not Change Password + 4. Try to change password fo a user even password even though pw policy is set to no + 5. Set Password change to May Change Password + 6. Try to change password fo a user even password + 7. Try to change password with invalid credentials. Should see error message. + :expectedresults: + 1. Success + 2. Success + 3. Success + 4. Success + 5. Success + 6. Success + 7. Success + """ + # Adding an user with uid=dbyers + user = f'uid=dbyers,ou=People,{DEFAULT_SUFFIX}' + config = Config(topo.standalone) + # Set Password change to Must Not Change After Reset + config.replace_many( + ('passwordmustchange', 'off'), + ('passwordchange', 'off')) + # Try to change password fo a user even password even though pw policy is set to no + with pytest.raises(ldap.UNWILLING_TO_PERFORM): + _change_password_with_own(topo, user, 'dbyers1', 'AB') + # Set Password change to May Change Password + config.replace('passwordchange', 'on') + _change_password_with_own(topo, user, 'dbyers1', 'dbyers1') + # Try to change password with invalid credentials. Should see error message. + with pytest.raises(ldap.INVALID_CREDENTIALS): + _change_password_with_own(topo, f'uid=dbyers,ou=People,{DEFAULT_SUFFIX}', 'AB', 'dbyers1') + + +def test_password_check_syntax(topo, _fix_password): + """Password check syntax + + :id: 1e6fcc9e-5a20-11ea-9659-8c16451d917b + :setup: Standalone + :steps: + 1. Sets Password check syntax to on + 2. Try to change to a password that violates length. Should get error + 3. Attempt to Modify password to db which is in error to policy + 4. change min pw length to 5 + 5. Attempt to Modify password to dby3rs which is in error to policy + 6. Attempt to Modify password to danny which is in error to policy + 7. Attempt to Modify password to byers which is in error to policy + 8. Change min pw length to 6 + 9. Try to change the password + 10. Trying to set to a password containing value of sn + 11. Sets policy to not check pw syntax + 12. Test that when checking syntax is off, you can use small passwords + 13. Test that when checking syntax is off, trivial passwords can be used + 14. Changing password minimum length from 6 to 10 + 15. Setting policy to Check Password Syntax again + 16. Try to change to a password that violates length + 17. Reset Password + :expectedresults: + 1. Success + 2. Success + 3. Success + 4. Success + 5. Success + 6. Success + 7. Success + 8. Success + 9. Success + 10. Success + 11. Success + 12. Success + 13. Success + 14. Success + 15. Success + 16. Fail + 17. Success + """ + config = Config(topo.standalone) + # Sets Password check syntax to on + config.replace('passwordchecksyntax', 'on') + # Try to change to a password that violates length. Should get error + with pytest.raises(ldap.CONSTRAINT_VIOLATION): + _change_password_with_own(topo, f'uid=dbyers,ou=People,{DEFAULT_SUFFIX}', 'dbyers1', 'dbyers2') + # Attempt to Modify password to db which is in error to policy + with pytest.raises(ldap.CONSTRAINT_VIOLATION): + _change_password_with_own(topo, f'uid=dbyers,ou=People,{DEFAULT_SUFFIX}', 'dbyers1', 'db') + # change min pw length to 5 + config.replace('passwordminlength', '5') + # Attempt to Modify password to dby3rs which is in error to policy + # Attempt to Modify password to danny which is in error to policy + # Attempt to Modify password to byers which is in error to policy + for password in ['dbyers', 'Danny', 'byers']: + with pytest.raises(ldap.CONSTRAINT_VIOLATION): + _change_password_with_own(topo, f'uid=dbyers,ou=People,{DEFAULT_SUFFIX}', 'dbyers1', password) + # Change min pw length to 6 + config.replace('passwordminlength', '6') + # Try to change the password + # Trying to set to a password containing value of sn + for password in ['dby3rs1', 'dbyers2', '67Danny89', 'YAByers8']: + with pytest.raises(ldap.CONSTRAINT_VIOLATION): + _change_password_with_own(topo, f'uid=dbyers,ou=People,{DEFAULT_SUFFIX}', 'dbyers1', password) + # Sets policy to not check pw syntax + # Test that when checking syntax is off, you can use small passwords + # Test that when checking syntax is off, trivial passwords can be used + config.replace('passwordchecksyntax', 'off') + for password, new_pass in [('dbyers1', 'db'), ('db', 'dbyers'), ('dbyers', 'dbyers1')]: + _change_password_with_own(topo, f'uid=dbyers,ou=People,{DEFAULT_SUFFIX}', password, new_pass) + # Changing password minimum length from 6 to 10 + # Setting policy to Check Password Syntax again + config.replace_many( + ('passwordminlength', '10'), + ('passwordchecksyntax', 'on')) + # Try to change to a password that violates length + with pytest.raises(ldap.CONSTRAINT_VIOLATION): + _change_password_with_own(topo, f'uid=dbyers,ou=People,{DEFAULT_SUFFIX}', 'dbyers1', 'db') + UserAccount(topo.standalone, f'uid=dbyers,ou=People,{DEFAULT_SUFFIX}').replace('userpassword', 'dbyers1') + + +def test_too_big_password(topo, _fix_password): + """Test for long long password + + :id: 299a3fb4-5a20-11ea-bba8-8c16451d917b + :setup: Standalone + :steps: + 1. Setting policy to keep password histories + 2. Changing number of password in history to 3 + 3. Modify password from dby3rs1 to dby3rs2 + 4. Checking that the passwordhistory attribute has been added + 5. Add a password test for long long password + 6. Changing number of password in history to 6 and passwordhistory off + :expectedresults: + 1. Success + 2. Success + 3. Success + 4. Success + 5. Success + 6. Success + """ + config = Config(topo.standalone) + # Setting policy to keep password histories + config.replace_many( + ('passwordchecksyntax', 'off'), + ('passwordhistory', 'on')) + assert config.get_attr_val_utf8('passwordinhistory') == '6' + # Changing number of password in history to 3 + config.replace('passwordinhistory', '3') + # Modify password from dby3rs1 to dby3rs2 + _change_password_with_own(topo, f'uid=dbyers,ou=People,{DEFAULT_SUFFIX}', 'dbyers1', 'dbyers2') + with pytest.raises(ldap.CONSTRAINT_VIOLATION): + _change_password_with_own(topo, f'uid=dbyers,ou=People,{DEFAULT_SUFFIX}', 'dbyers2', 'dbyers1') + # Checking that the passwordhistory attribute has been added + assert UserAccount(topo.standalone, f'uid=dbyers,ou=People,{DEFAULT_SUFFIX}').get_attr_val_utf8('passwordhistory') + # Add a password test for long long password + long_pass = 50*'0123456789'+'LENGTH=510' + _change_password_with_own(topo, f'uid=dbyers,ou=People,{DEFAULT_SUFFIX}', 'dbyers2', long_pass) + with pytest.raises(ldap.CONSTRAINT_VIOLATION): + _change_password_with_own(topo, f'uid=dbyers,ou=People,{DEFAULT_SUFFIX}', long_pass, long_pass) + _change_password_with_root(topo, f'uid=dbyers,ou=People,{DEFAULT_SUFFIX}', 'dbyers1') + # Changing number of password in history to 6 and passwordhistory off + config.replace_many(('passwordhistory', 'off'), + ('passwordinhistory', '6')) + + +def test_pwminage(topo, _fix_password): + """Test pwminage + + :id: 2df7bf32-5a20-11ea-ad23-8c16451d917b + :setup: Standalone + :steps: + 1. Get pwminage; should be 0 currently + 2. Sets policy to pwminage 3 + 3. Change current password + 4. Try to change password again + 5. Try now after 3 secs is up, should work. + :expectedresults: + 1. Success + 2. Success + 3. Success + 4. Fail + 5. Success + """ + config = Config(topo.standalone) + # Get pwminage; should be 0 currently + assert config.get_attr_val_utf8('passwordminage') == '0' + # Sets policy to pwminage 3 + config.replace('passwordminage', '3') + # Change current password + _change_password_with_own(topo, f'uid=dbyers,ou=People,{DEFAULT_SUFFIX}', 'dbyers1', 'dbyers2') + # Try to change password again + with pytest.raises(ldap.CONSTRAINT_VIOLATION): + _change_password_with_own(topo, f'uid=dbyers,ou=People,{DEFAULT_SUFFIX}', 'dbyers2', 'dbyers1') + for _ in range(3): + time.sleep(1) + # Try now after 3 secs is up, should work. + _change_password_with_own(topo, f'uid=dbyers,ou=People,{DEFAULT_SUFFIX}', 'dbyers2', 'dbyers1') + config.replace('passwordminage', '0') + + +def test_invalid_credentials(topo, _fix_password): + """Test bind again with valid password: We should be locked + + :id: 3233ca78-5a20-11ea-8d35-8c16451d917b + :setup: Standalone + :steps: + 1. Search if passwordlockout is off + 2. Turns on passwordlockout + 3. sets lockout duration to 3 seconds + 4. Changing pw failure count reset duration to 3 sec and passwordminlength to 10 + 5. Try to bind with invalid credentials + 6. Change password to password lockout forever + 7. Try to bind with invalid credentials + 8. Now bind again with valid password: We should be locked + 9. Delete dby3rs before exiting + 10. Reset server + :expectedresults: + 1. Success + 2. Success + 3. Success + 4. Success + 5. Fail + 6. Success + 7. Success + 8. Success + 9. Success + 10. Success + """ + config = Config(topo.standalone) + # Search if passwordlockout is off + assert config.get_attr_val_utf8('passwordlockout') == 'off' + # Turns on passwordlockout + # sets lockout duration to 3 seconds + # Changing pw failure count reset duration to 3 sec and passwordminlength to 10 + config.replace_many( + ('passwordlockout', 'on'), + ('passwordlockoutduration', '3'), + ('passwordresetfailurecount', '3'), + ('passwordminlength', '10')) + # Try to bind with invalid credentials + for _ in range(3): + with pytest.raises(ldap.INVALID_CREDENTIALS): + _change_password_with_own(topo, f'uid=dbyers,ou=People,{DEFAULT_SUFFIX}', 'Invalid', 'dbyers1') + with pytest.raises(ldap.CONSTRAINT_VIOLATION): + _change_password_with_own(topo, f'uid=dbyers,ou=People,{DEFAULT_SUFFIX}', 'Invalid', 'dbyers1') + for _ in range(3): + time.sleep(1) + _change_password_with_own(topo, f'uid=dbyers,ou=People,{DEFAULT_SUFFIX}', 'dbyers1', 'dbyers1') + # Change password to password lockout forever + config.replace('passwordunlock', 'off') + # Try to bind with invalid credentials + for _ in range(3): + with pytest.raises(ldap.INVALID_CREDENTIALS): + _change_password_with_own(topo, f'uid=dbyers,ou=People,{DEFAULT_SUFFIX}', 'Invalid', 'dbyers1') + with pytest.raises(ldap.CONSTRAINT_VIOLATION): + _change_password_with_own(topo, f'uid=dbyers,ou=People,{DEFAULT_SUFFIX}', 'Invalid', 'dbyers1') + for _ in range(3): + time.sleep(1) + # Now bind again with valid password: We should be locked + with pytest.raises(ldap.CONSTRAINT_VIOLATION): + _change_password_with_own(topo, f'uid=dbyers,ou=People,{DEFAULT_SUFFIX}', 'dbyers1', 'dbyers1') + # Delete dby3rs before exiting + _change_password_with_root(topo, f'uid=dbyers,ou=People,{DEFAULT_SUFFIX}', 'dbyers1') + time.sleep(1) + _change_password_with_own(topo, f'uid=dbyers,ou=People,{DEFAULT_SUFFIX}', 'dbyers1', 'dbyers1') + # Reset server + config.replace_many( + ('passwordinhistory', '6'), + ('passwordlockout', 'off'), + ('passwordlockoutduration', '3600'), + ('passwordminlength', '6'), + ('passwordresetfailurecount', '600'), + ('passwordunlock', 'on')) + + +def test_expiration_date(topo, _fix_password): + """Test check the expiration date is still in the future + + :id: 3691739a-5a20-11ea-8712-8c16451d917b + :setup: Standalone + :steps: + 1. Password expiration + 2. Add a user with a password expiration date + 3. Modify their password + 4. Check the expiration date is still in the future + 5. Modify the password expiration date + 6. Check the expiration date is still in the future + 7. Change policy so that user can change passwords + 8. Deleting user + 9. Adding user + 10. Set password history ON + 11. Modify password Once + 12. Try to change the password with same one + :expectedresults: + 1. Success + 2. Success + 3. Success + 4. Success + 5. Success + 6. Success + 7. Success + 8. Success + 9. Success + 10. Success + 11. Success + 12. Fail + """ + # Add a user with a password expiration date + user = UserAccounts(topo.standalone, DEFAULT_SUFFIX).create_test_user() + user.replace_many( + ('userpassword', 'bind4now'), + ('passwordExpirationTime', '20380119031404Z')) + # Modify their password + user.replace('userPassword', 'secreter') + # Check the expiration date is still in the future + assert user.get_attr_val_utf8('passwordExpirationTime') == '20380119031404Z' + # Modify the password expiration date + user.replace('passwordExpirationTime', '20380119031405Z') + # Check the expiration date is still in the future + assert user.get_attr_val_utf8('passwordExpirationTime') == '20380119031405Z' + config = Config(topo.standalone) + # Change policy so that user can change passwords + config.replace('passwordchange', 'on') + # Deleting user + UserAccount(topo.standalone, f'uid=test_user_1000,ou=People,{DEFAULT_SUFFIX}').delete() + # Adding user + user = UserAccounts(topo.standalone, DEFAULT_SUFFIX).create_test_user() + # Set password history ON + config.replace('passwordhistory', 'on') + # Modify password Once + user.replace('userPassword', 'secreter') + time.sleep(1) + assert DEFAULT_PASSWORD_STORAGE_SCHEME in user.get_attr_val_utf8('userPassword') + # Try to change the password with same one + for _ in range(3): + with pytest.raises(ldap.CONSTRAINT_VIOLATION): + _change_password_with_own(topo, user.dn, 'secreter', 'secreter') + user.delete() + + +def test_passwordlockout(topo, _fix_password): + """Test adding admin user diradmin to Directory Administrator group + + :id: 3ffcffda-5a20-11ea-a3af-8c16451d917b + :setup: Standalone + :steps: + 1. Account Lockout must be cleared on successful password change + 2. Adding admin user diradmin + 3. Adding admin user diradmin to Directory Administrator group + 4. Turn on passwordlockout + 5. Sets lockout duration to 30 seconds + 6. Sets failure count reset duration to 30 sec + 7. Sets max password bind failure count to 3 + 8. Reset password retry count (to 0) + 9. Try to bind with invalid credentials(3 times) + 10. Try to bind with valid pw, should give lockout error + 11. Reset password using admin login + 12. Try to login as the user to check the unlocking of account. Will also change + the password back to original + 13. Change to account lockout forever until reset + 14. Reset password retry count (to 0) + 15. Try to bind with invalid credentials(3 times) + 16. Try to bind with valid pw, should give lockout error + 17. Reset password using admin login + 18. Try to login as the user to check the unlocking of account. Will also change the + password back to original + :expectedresults: + 1. Success + 2. Success + 3. Success + 4. Success + 5. Success + 6. Success + 7. Success + 8. Success + 9. Fail + 10. Success + 11. Success + 12. Success + 13. Success + 14. Success + 15. Fail + 16. Success + 17. Success + 18. Success + """ + config = Config(topo.standalone) + # Adding admin user diradmin + user = UserAccounts(topo.standalone, DEFAULT_SUFFIX).create_test_user() + user.replace('userpassword', 'dby3rs2') + admin = _create_user(topo, 'diradmin', 'Anuj Borah', '1002', 'diradmin') + # Adding admin user diradmin to Directory Administrator group + Group(topo.standalone, f'cn=user_passwd_reset,ou=permissions,{DEFAULT_SUFFIX}').add('member', admin.dn) + # Turn on passwordlockout + # Sets lockout duration to 30 seconds + # Sets failure count reset duration to 30 sec + # Sets max password bind failure count to 3 + # Reset password retry count (to 0) + config.replace_many( + ('passwordlockout', 'on'), + ('passwordlockoutduration', '30'), + ('passwordresetfailurecount', '30'), + ('passwordmaxfailure', '3'), + ('passwordhistory', 'off')) + user.replace('passwordretrycount', '0') + # Try to bind with invalid credentials(3 times) + for _ in range(3): + with pytest.raises(ldap.INVALID_CREDENTIALS): + _change_password_with_own(topo, user.dn, 'Invalid', 'secreter') + # Try to bind with valid pw, should give lockout error + with pytest.raises(ldap.CONSTRAINT_VIOLATION): + _change_password_with_own(topo, user.dn, 'Invalid', 'secreter') + # Reset password using admin login + conn = admin.bind('diradmin') + UserAccount(conn, user.dn).replace('userpassword', 'dby3rs2') + time.sleep(1) + # Try to login as the user to check the unlocking of account. Will also change + # the password back to original + _change_password_with_own(topo, user.dn, 'dby3rs2', 'secreter') + # Change to account lockout forever until reset + # Reset password retry count (to 0) + config.replace('passwordunlock', 'off') + user.replace('passwordretrycount', '0') + # Try to bind with invalid credentials(3 times) + for _ in range(3): + with pytest.raises(ldap.INVALID_CREDENTIALS): + _change_password_with_own(topo, user.dn, 'Invalid', 'secreter') + # Try to bind with valid pw, should give lockout error + with pytest.raises(ldap.CONSTRAINT_VIOLATION): + _change_password_with_own(topo, user.dn, 'Invalid', 'secreter') + # Reset password using admin login + UserAccount(conn, user.dn).replace('userpassword', 'dby3rs2') + time.sleep(1) + # Try to login as the user to check the unlocking of account. Will also change the + # password back to original + _change_password_with_own(topo, user.dn, 'dby3rs2', 'secreter') + + +if __name__ == "__main__": + CURRENT_FILE = os.path.realpath(__file__) + pytest.main("-s -v %s" % CURRENT_FILE) diff --git a/dirsrvtests/tests/suites/password/regression_of_bugs_test.py b/dirsrvtests/tests/suites/password/regression_of_bugs_test.py new file mode 100644 index 0000000..298a79a --- /dev/null +++ b/dirsrvtests/tests/suites/password/regression_of_bugs_test.py @@ -0,0 +1,622 @@ +""" +# --- BEGIN COPYRIGHT BLOCK --- +# Copyright (C) 2020 Red Hat, Inc. +# All rights reserved. +# +# License: GPL (version 3 or any later version). +# See LICENSE for details. +# --- END COPYRIGHT BLOCK --- +""" + +import os +import pytest +from lib389.topologies import topology_st as topo +from lib389.idm.user import UserAccounts, UserAccount +from lib389._constants import DEFAULT_SUFFIX, DN_DM +from lib389.config import Config +from lib389.idm.domain import Domain +from lib389.idm.group import UniqueGroups, UniqueGroup +from lib389.idm.organizationalunit import OrganizationalUnits, OrganizationalUnit +from lib389.pwpolicy import PwPolicyManager +import time +import ldap + +pytestmark = pytest.mark.tier1 + + +def _create_user(topo, uid, ou): + user = UserAccounts(topo.standalone, DEFAULT_SUFFIX, rdn=ou).create(properties={ + 'uid': uid, + 'cn': uid, + 'sn': uid, + 'mail': f'{uid}@example.com', + 'homeDirectory': f'/home/{uid}', + 'uidNumber': '1000', + 'gidNumber': '1000' + }) + return user + + +def change_pwp_parameter(topo, pwp, operation, to_do): + """ + Will change password policy parameter + """ + pwp1 = PwPolicyManager(topo.standalone) + user = pwp1.get_pwpolicy_entry(f'{pwp},{DEFAULT_SUFFIX}') + user.replace(operation, to_do) + + +def _create_pwp(topo, instance): + """ + Will create pwp + """ + policy_props = {} + pwp = PwPolicyManager(topo.standalone) + pwadm_locpol = pwp.create_subtree_policy(instance, policy_props) + for attribute, value in [ + ('passwordexp', 'off'), + ('passwordchange', 'off'), + ('passwordmustchange', 'off'), + ('passwordchecksyntax', 'off'), + ('passwordinhistory', '6'), + ('passwordhistory', 'off'), + ('passwordlockout', 'off'), + ('passwordlockoutduration', '3600'), + ('passwordmaxage', '8640000'), + ('passwordmaxfailure', '3'), + ('passwordminage', '0'), + ('passwordminlength', '6'), + ('passwordresetfailurecount', '600'), + ('passwordunlock', 'on'), + ('passwordStorageScheme', 'CLEAR'), + ('passwordwarning', '86400'), + ('passwordTPRMaxUse', '-1'), + ('passwordTPRDelayExpireAt', '-1'), + ('passwordTPRDelayValidFrom', '-1') + ]: + pwadm_locpol.add(attribute, value) + return pwadm_locpol + + +def change_password_of_user(topo, user_password_new_pass_list, pass_to_change): + """ + Will change password with self binding. + """ + for user, password, new_pass in user_password_new_pass_list: + real_user = UserAccount(topo.standalone, f'{user},{DEFAULT_SUFFIX}') + conn = real_user.bind(password) + UserAccount(conn, pass_to_change).replace('userpassword', new_pass) + + +@pytest.fixture(scope="function") +def _add_user(request, topo): + for uid, ou_ou in [('pwadm_user_1', None), ('pwadm_user_2', 'ou=People')]: + _create_user(topo, uid, ou_ou) + for uid, ou_ou in [('pwadm_admin_2', 'ou=People'), + ('pwadm_admin_3', 'ou=People'), + ('pwadm_admin_4', 'ou=People')]: + user = _create_user(topo, uid, ou_ou) + user.replace('userpassword', 'Secret123') + + def fin(): + for user1 in UserAccounts(topo.standalone, DEFAULT_SUFFIX).list(): + user1.delete() + for user1 in UserAccounts(topo.standalone, DEFAULT_SUFFIX, rdn=None).list(): + user1.delete() + request.addfinalizer(fin) + + +@pytest.mark.bz1044164 +def test_local_password_policy(topo, _add_user): + """Regression test for bz1044164 part 1. + + :id: d6f4a7fa-473b-11ea-8766-8c16451d917b + :setup: Standalone + :steps: + 1. Add a User as Password Admin + 2. Create a password admin user entry + 3. Add an aci to allow this user all rights + 4. Configure password admin + 5. Create local password policy and enable passwordmustchange + 6. Add another generic user but do not include the password (userpassword) + 7. Use admin user to perform a password update on generic user + 8. We don't need this ACI anymore. Delete it + :expectedresults: + 1. Success + 2. Success + 3. Success + 4. Success + 5. Success + 6. Success + 7. Success + 8. Success + """ + # Add a User as Password Admin + # Create a password admin user entry + user = _create_user(topo, 'pwadm_admin_1', None) + user.replace('userpassword', 'Secret123') + domian = Domain(topo.standalone, DEFAULT_SUFFIX) + # Add an aci to allow this user all rights + domian.set("aci", f'(targetattr ="userpassword")' + f'(version 3.0;acl "Allow password admin to write user ' + f'passwords";allow (write)(userdn = "ldap:///{user.dn}");)') + # Configure password admin + # Create local password policy and enable passwordmustchange + Config(topo.standalone).replace_many( + ('passwordAdminDN', user.dn), + ('passwordMustChange', 'off'), + ('nsslapd-pwpolicy-local', 'on')) + # Add another generic user but do not include the password (userpassword) + # Use admin user to perform a password update on generic user + real_user = UserAccount(topo.standalone, f'uid=pwadm_admin_1,{DEFAULT_SUFFIX}') + conn = real_user.bind('Secret123') + UserAccount(conn, f'uid=pwadm_user_1,{DEFAULT_SUFFIX}').replace('userpassword', 'hello') + # We don't need this ACI anymore. Delete it + domian.remove("aci", f'(targetattr ="userpassword")' + f'(version 3.0;acl "Allow password admin to write user ' + f'passwords";allow (write)(userdn = "ldap:///{user.dn}");)') + + +@pytest.mark.bz1118006 +def test_passwordexpirationtime_attribute(topo, _add_user): + """Regression test for bz1118006. + + :id: 867472d2-473c-11ea-b583-8c16451d917b + :setup: Standalone + :steps: + 1. Check that the passwordExpirationTime attribute is set to the epoch date + :expectedresults: + 1. Success + """ + Config(topo.standalone).replace('passwordMustChange', 'on') + epoch_date = "19700101000000Z" + time.sleep(1) + user = UserAccount(topo.standalone, f'uid=pwadm_user_1,{DEFAULT_SUFFIX}') + user.replace('userpassword', 'Secret123') + time.sleep(1) + # Check that the passwordExpirationTime attribute is set to the epoch date + assert user.get_attr_val_utf8('passwordExpirationTime') == epoch_date + Config(topo.standalone).replace('passwordMustChange', 'off') + time.sleep(1) + + +@pytest.mark.bz1118007 +@pytest.mark.bz1044164 +def test_admin_group_to_modify_password(topo, _add_user): + """Regression test for bz1044164 part 2. + + :id: 12e09446-52da-11ea-aa11-8c16451d917b + :setup: Standalone + :steps: + 1. Create unique members of admin group + 2. Create admin group with unique members + 3. Edit ACIs for admin group + 4. Add group as password admin + 5. Test password admin group to modify password of another admin user + 6. Use admin user to perform a password update on Directory Manager user + 7. Test password admin group for local password policy + 8. Add top level container + 9. Add user + 10. Create local policy configuration entry + 11. Adding admin group for local policy + 12. Change user's password by admin user. Break the local policy rule + 13. Test password admin group for global password policy + 14. Add top level container + 15. Change user's password by admin user. Break the global policy rule + 16. Add new user in password admin group + 17. Modify ordinary user's password + 18. Modify user DN using modrdn of a user in password admin group + 19. Test assigning invalid value to password admin attribute + 20. Try to add more than one Password Admin attribute to config file + 21. Use admin group setup from previous testcases, but delete ACI from that + 22. Try to change user's password by admin user + 23. Restore ACI + 24. Edit ACIs for admin group + 25. Delete a user from password admin group + 26. Change users password by ex-admin user + 27. Remove group from password admin configuration + 28. Change admins + 29. Change user's password by ex-admin user + 30. Change admin user's password by ex-admin user + :expectedresults: + 1. Success + 2. Success + 3. Success + 4. Success + 5. Success + 6. Fail(ldap.INSUFFICIENT_ACCESS) + 7. Success + 8. Success + 9. Success + 10. Success + 11. Success + 12. Success + 13. Success + 14. Success + 15. Success + 16. Success + 17. Success + 18. Success + 19. Fail + 20. Fail + 21. Success + 22. Success + 23. Success + 24. Success + 25. Success + 26. Success + 27. Success + 28. Success + 29. Fail + 30. Fail + """ + # create unique members of admin group + admin_grp = UniqueGroups(topo.standalone, DEFAULT_SUFFIX).create(properties={ + 'cn': 'pwadm_group_adm', + 'description': 'pwadm_group_adm', + 'uniqueMember': [f'uid=pwadm_admin_2,ou=People,{DEFAULT_SUFFIX}', + f'uid=pwadm_admin_3,ou=People,{DEFAULT_SUFFIX}'] + }) + # Edit ACIs for admin group + Domain(topo.standalone, + f"ou=People,{DEFAULT_SUFFIX}").set('aci', f'(targetattr ="userpassword")' + f'(version 3.0;acl "Allow passwords admin to write user ' + f'passwords";allow (write)(groupdn = "ldap:///{admin_grp.dn}");)') + # Add group as password admin + Config(topo.standalone).replace('passwordAdminDN', admin_grp.dn) + # Test password admin group to modify password of another admin user + change_password_of_user(topo, [ + ('uid=pwadm_admin_2,ou=People', 'Secret123', 'hello')], + f'uid=pwadm_admin_3,ou=people,{DEFAULT_SUFFIX}') + # Use admin user to perform a password update on Directory Manager user + with pytest.raises(ldap.INSUFFICIENT_ACCESS): + change_password_of_user(topo, [('uid=pwadm_admin_2,ou=People', 'Secret123', 'hello')], + f'{DN_DM},{DEFAULT_SUFFIX}') + # Add top level container + ou = OrganizationalUnits(topo.standalone, DEFAULT_SUFFIX).create(properties={'ou': 'pwadm_locpol'}) + # Change user's password by admin user. Break the global policy rule + # Add new user in password admin group + user = _create_user(topo, 'pwadm_locpol_user', 'ou=pwadm_locpol') + user.replace('userpassword', 'Secret123') + # Create local policy configuration entry + _create_pwp(topo, ou.dn) + # Set parameter for pwp + for para_meter, op_op in [ + ('passwordLockout', 'on'), + ('passwordMaxFailure', '4'), + ('passwordLockoutDuration', '10'), + ('passwordResetFailureCount', '100'), + ('passwordMinLength', '8'), + ('passwordAdminDN', f'cn=pwadm_group_adm,ou=Groups,{DEFAULT_SUFFIX}')]: + change_pwp_parameter(topo, 'ou=pwadm_locpol', para_meter, op_op) + # Set ACI + OrganizationalUnit(topo.standalone, + ou.dn).set('aci', + f'(targetattr ="userpassword")' + f'(version 3.0;acl "Allow passwords admin to write user ' + f'passwords";allow (write)' + f'(groupdn = "ldap:///cn=pwadm_group_adm,ou=Groups,{DEFAULT_SUFFIX}");)') + # Change password with new admin + change_password_of_user(topo, [('uid=pwadm_admin_2,ou=People', 'Secret123', 'Sec')], user.dn) + # Set global parameter + Config(topo.standalone).replace_many( + ('passwordTrackUpdateTime', 'on'), + ('passwordGraceLimit', '4'), + ('passwordHistory', 'on'), + ('passwordInHistory', '4')) + # Test password admin group for global password policy + change_password_of_user(topo, [('uid=pwadm_admin_2,ou=People', 'Secret123', 'Sec')], + f'uid=pwadm_user_2,ou=People,{DEFAULT_SUFFIX}') + # Adding admin group for local policy + grp = UniqueGroup(topo.standalone, f'cn=pwadm_group_adm,ou=Groups,{DEFAULT_SUFFIX}') + grp.add('uniqueMember', f'uid=pwadm_admin_4,ou=People,{DEFAULT_SUFFIX}') + # Modify ordinary user's password + change_password_of_user(topo, [('uid=pwadm_admin_4,ou=People', 'Secret123', 'Secret')], + f'uid=pwadm_user_2,ou=People,{DEFAULT_SUFFIX}') + # Modify user DN using modrdn of a user in password admin group + UserAccount(topo.standalone, f'uid=pwadm_admin_4,ou=People,{DEFAULT_SUFFIX}').rename('uid=pwadm_admin_4_new') + # Remove admin + grp.remove('uniqueMember', f'uid=pwadm_admin_4,ou=People,{DEFAULT_SUFFIX}') + # Add Admin + grp.add('uniqueMember', f'uid=pwadm_admin_4_new,ou=People,{DEFAULT_SUFFIX}') + # Test the group pwp again + with pytest.raises(ldap.INVALID_CREDENTIALS): + change_password_of_user(topo, [(f'uid=pwadm_admin_4,ou=People', 'Secret123', 'Secret1')], + f'uid=pwadm_user_2,ou=People,{DEFAULT_SUFFIX}') + change_password_of_user(topo, [(f'uid=pwadm_admin_4_new,ou=People', 'Secret123', 'Secret1')], + f'uid=pwadm_user_2,ou=People,{DEFAULT_SUFFIX}') + with pytest.raises(ldap.INVALID_SYNTAX): + Config(topo.standalone).replace('passwordAdminDN', "Invalid") + # Test assigning invalid value to password admin attribute + # Try to add more than one Password Admin attribute to config file + with pytest.raises(ldap.OBJECT_CLASS_VIOLATION): + Config(topo.standalone).replace('passwordAdminDN', + [f'uid=pwadm_admin_2,ou=people,{DEFAULT_SUFFIX}', + f'uid=pwadm_admin_3,ou=people,{DEFAULT_SUFFIX}']) + # Use admin group setup from previous, but delete ACI from that + people = Domain(topo.standalone, f"ou=People,{DEFAULT_SUFFIX}") + people.remove('aci', + f'(targetattr ="userpassword")(version 3.0;acl ' + f'"Allow passwords admin to write user ' + f'passwords";allow (write)' + f'(groupdn = "ldap:///cn=pwadm_group_adm,ou=Groups,{DEFAULT_SUFFIX}");)') + # Try to change user's password by admin user + with pytest.raises(ldap.INSUFFICIENT_ACCESS): + change_password_of_user(topo, [('uid=pwadm_admin_2,ou=People', 'Secret123', 'Sec')], + f'uid=pwadm_user_2,ou=People,{DEFAULT_SUFFIX}') + # Restore ACI + people.set('aci', + f'(targetattr ="userpassword")(version 3.0;acl ' + f'"Allow passwords admin to write user ' + f'passwords";allow (write)(groupdn = "ldap:///cn=pwadm_group_adm,ou=Groups,{DEFAULT_SUFFIX}");)') + # Edit ACIs for admin group + people.add('aci', + f'(targetattr ="userpassword")(version 3.0;acl ' + f'"Allow passwords admin to add user ' + f'passwords";allow (add)(groupdn = "ldap:///cn=pwadm_group_adm,ou=Groups,{DEFAULT_SUFFIX}");)') + UserAccount(topo.standalone, f'uid=pwadm_user_2,ou=people,{DEFAULT_SUFFIX}').replace('userpassword', 'Secret') + real_user = UserAccount(topo.standalone, f'uid=pwadm_user_2,ou=people,{DEFAULT_SUFFIX}') + conn = real_user.bind('Secret') + # Test new aci + with pytest.raises(ldap.INSUFFICIENT_ACCESS): + UserAccounts(conn, DEFAULT_SUFFIX, rdn='ou=People').create(properties={ + 'uid': 'ok', + 'cn': 'ok', + 'sn': 'ok', + 'uidNumber': '1000', + 'gidNumber': 'ok', + 'homeDirectory': '/home/ok'}) + UserAccounts(topo.standalone, DEFAULT_SUFFIX).list() + real_user = UserAccount(topo.standalone, f'uid=pwadm_admin_2,ou=People,{DEFAULT_SUFFIX}') + conn = real_user.bind('Secret123') + # Test new aci which has new rights + for uid, cn, password in [ + ('pwadm_user_3', 'pwadm_user_1', 'U2VjcmV0MTIzCg=='), + ('pwadm_user_4', 'pwadm_user_2', 'U2VjcmV0MTIzCg==')]: + UserAccounts(conn, DEFAULT_SUFFIX, rdn='ou=People').create(properties={ + 'uid': uid, + 'cn': cn, + 'sn': cn, + 'uidNumber': '1000', + 'gidNumber': '1001', + 'homeDirectory': f'/home/{uid}', + 'userpassword': password}) + # Remove ACI + Domain(topo.standalone, + f"ou=People,{DEFAULT_SUFFIX}").remove('aci', + f'(targetattr ="userpassword")' + f'(version 3.0;acl ' + f'"Allow passwords admin to add user ' + f'passwords";allow ' + f'(add)(groupdn = ' + f'"ldap:///cn=pwadm_group_adm,ou=Groups,{DEFAULT_SUFFIX}");)') + # Delete a user from password admin group + grp = UniqueGroup(topo.standalone, f'cn=pwadm_group_adm,ou=Groups,{DEFAULT_SUFFIX}') + grp.remove('uniqueMember', f'uid=pwadm_admin_2,ou=People,{DEFAULT_SUFFIX}') + # Change users password by ex-admin user + with pytest.raises(ldap.INSUFFICIENT_ACCESS): + change_password_of_user(topo, [('uid=pwadm_admin_2,ou=People', 'Secret123', 'Secret')], + f'uid=pwadm_user_2,ou=People,{DEFAULT_SUFFIX}') + # Set aci for only user + people = Domain(topo.standalone, f"ou=People,{DEFAULT_SUFFIX}") + people.remove('aci', + f'(targetattr ="userpassword")(version 3.0;acl ' + f'"Allow passwords admin to write user ' + f'passwords";allow (write)(groupdn = "ldap:///cn=pwadm_group_adm,ou=Groups,{DEFAULT_SUFFIX}");)') + people.set('aci', + f'(targetattr ="userpassword")(version 3.0;acl "Allow passwords admin ' + f'to write user passwords";allow (write)(groupdn = "ldap:///uid=pwadm_admin_1,{DEFAULT_SUFFIX}");)') + # Remove group from password admin configuration + Config(topo.standalone).replace('passwordAdminDN', f"uid=pwadm_admin_1,{DEFAULT_SUFFIX}") + # Change user's password by ex-admin user + with pytest.raises(ldap.INSUFFICIENT_ACCESS): + change_password_of_user(topo, [('uid=pwadm_admin_2,ou=People', 'Secret123', 'hellso')], + f'uid=pwadm_user_2,ou=People,{DEFAULT_SUFFIX}') + with pytest.raises(ldap.INSUFFICIENT_ACCESS): + change_password_of_user(topo, [('uid=pwadm_admin_2,ou=People', 'Secret123', 'hellso')], + f'uid=pwadm_admin_1,{DEFAULT_SUFFIX}') + + +@pytest.mark.bz834060 +def test_password_max_failure_should_lockout_password(topo): + """Regression test for bz834060. + + :id: f2064efa-52d9-11ea-8037-8c16451d917b + :setup: Standalone + :steps: + 1. passwordMaxFailure should lockout password one sooner + 2. Setting passwordLockout to \"on\" + 3. Set maximum number of login tries to 3 + 4. Turn off passwordLegacyPolicy + 5. Turn off local password policy, so that global is applied + :expectedresults: + 1. Success + 2. Success + 3. Success + 4. Success + 5. Success + """ + config = Config(topo.standalone) + config.replace_many( + ('passwordLockout', 'on'), + ('passwordMaxFailure', '3'), + ('passwordLegacyPolicy', 'off'), + ('nsslapd-pwpolicy-local', 'off')) + user = _create_user(topo, 'tuser', 'ou=people') + user.replace('userpassword', 'password') + for _ in range(2): + with pytest.raises(ldap.INVALID_CREDENTIALS): + user.bind('Invalid') + with pytest.raises(ldap.CONSTRAINT_VIOLATION): + user.bind("Invalid") + config.replace('nsslapd-pwpolicy-local', 'on') + + +@pytest.mark.bz834063 +def test_pwd_update_time_attribute(topo): + """Regression test for bz834063 + + :id: ec2b1d4e-52d9-11ea-b13e-8c16451d917b + :setup: Standalone + :steps: + 1. Add the attribute passwordTrackUpdateTime to cn=config + 2. Add a test entry while passwordTrackUpdateTime is on + 3. Check if new attribute pwdUpdateTime added automatically after changing the pwd + 4. Modify User pwd + 5. check for the pwdupdatetime attribute added to the test entry as passwordTrackUpdateTime is on + 6. Set passwordTrackUpdateTime to OFF and modify test entry's pwd + 7. Check passwordUpdateTime should not be changed + 8. Record last pwdUpdateTime before changing the password + 9. Modify Pwd + 10. Set passwordTrackUpdateTime to ON and modify test entry's pwd, + check passwordUpdateTime should be changed + 11. Try setting Invalid value for passwordTrackUpdateTime + 12. Try setting Invalid value for pwdupdatetime + :expectedresults: + 1. Success + 2. Success + 3. Success + 4. Success + 5. Success + 6. Success + 7. Success + 8. Success + 9. Success + 10. Success + 11. Fail + 12. Fail + """ + config = Config(topo.standalone) + # Add the attribute passwordTrackUpdateTime to cn=config + config.replace('passwordTrackUpdateTime', 'on') + # Add a test entry while passwordTrackUpdateTime is on + user = _create_user(topo, 'test_bz834063', None) + user.set('userpassword', 'Unknown') + # Modify User pwd + user.replace('userpassword', 'Unknown1') + # Check if new attribute pwdUpdateTime added automatically after changing the pwd + assert user.get_attr_val_utf8('pwdUpdateTime') + # Set passwordTrackUpdateTime to OFF and modify test entry's pwd + config.replace('passwordTrackUpdateTime', 'off') + # Record last pwdUpdateTime before changing the password + update_time = user.get_attr_val_utf8('pwdUpdateTime') + time.sleep(1) + user.replace('userpassword', 'Unknown') + # Check passwordUpdateTime should not be changed + update_time_again = user.get_attr_val_utf8('pwdUpdateTime') + assert update_time == update_time_again + # Set passwordTrackUpdateTime to ON and modify test entry's pwd, + # check passwordUpdateTime should be changed + time.sleep(1) + config.replace('passwordTrackUpdateTime', 'on') + user.replace('userpassword', 'Unknown') + time.sleep(1) + update_time_1 = user.get_attr_val_utf8('pwdUpdateTime') + assert update_time_again != update_time_1 + with pytest.raises(ldap.OPERATIONS_ERROR): + config.replace('passwordTrackUpdateTime', "invalid") + with pytest.raises(ldap.UNWILLING_TO_PERFORM): + config.replace('pwdupdatetime', 'Invalid') + + +def test_password_track_update_time(topo): + """passwordTrackUpdateTime stops working with subtree password policies + + :id: e5d3e4c6-52d9-11ea-a65e-8c16451d917b + :setup: Standalone + :steps: + 1. Add users + 2. Create local policy configuration entry for subsuffix + 3. Enable passwordTrackUpdateTime to local policy configuration entry + 4. Check that attribute passwordUpdate was added to entries + 5. check for the pwdupdatetime attribute added to the test entry as passwordTrackUpdateTime is on + 6. Set passwordTrackUpdateTime to OFF and modify test entry's pwd, + check passwordUpdateTime should not be changed + 7. Record last pwdUpdateTime before changing the password + 8. Modify Pwd + 9. Check current pwdUpdateTime + 10. Set passwordTrackUpdateTime to ON and modify test entry's pwd, + check passwordUpdateTime should be changed + :expectedresults: + 1. Success + 2. Success + 3. Success + 4. Success + 5. Success + 6. Success + 7. Success + 8. Success + 9. Success + 10. Success + """ + # Add users + user1 = _create_user(topo, 'trac478_user1', None) + user2 = _create_user(topo, 'trac478_user2', None) + # Create local policy configuration entry for subsuffix + pwp_for_sufix = _create_pwp(topo, DEFAULT_SUFFIX) + pwp_for_user2 = _create_pwp(topo, user2.dn) + # Enable passwordTrackUpdateTime to local policy configuration entry + for instance in [pwp_for_user2, pwp_for_sufix]: + instance.replace('passwordTrackUpdateTime', 'on') + # Check that attribute passwordUpdate was added to entries + # check for the pwdupdatetime attribute added to the test entry as passwordTrackUpdateTime is on + for user in [user1, user2]: + user.replace('userpassword', 'pwd') + time.sleep(1) + assert user.get_attr_val_utf8('pwdUpdateTime') + # Set passwordTrackUpdateTime to OFF and modify test entry's pwd, + # check passwordUpdateTime should not be changed + pwp_for_sufix.replace('passwordTrackUpdateTime', 'off') + # Record last pwdUpdateTime before changing the password + last_login_time_user1 = user1.get_attr_val_utf8('pwdUpdateTime') + last_login_time_user2 = user2.get_attr_val_utf8('pwdUpdateTime') + time.sleep(1) + # Modify Pwd + user1.replace('userpassword', 'pwd1') + # Check current pwdUpdateTime + last_login_time_user1_last = user1.get_attr_val_utf8('pwdUpdateTime') + assert last_login_time_user1 == last_login_time_user1_last + # Set passwordTrackUpdateTime to ON and modify test entry's pwd, + # check passwordUpdateTime should be changed + pwp_for_user2.replace('passwordTrackUpdateTime', 'off') + time.sleep(1) + user2.replace('userpassword', 'pwd1') + last_login_time_user2_last = user2.get_attr_val_utf8('pwdUpdateTime') + assert last_login_time_user1 == last_login_time_user1_last + assert last_login_time_user2 == last_login_time_user2_last + pwp_for_sufix.replace('passwordTrackUpdateTime', 'on') + user1.replace('userpassword', 'pwd1') + time.sleep(1) + last_login_time_user1_last = user1.get_attr_val_utf8('pwdUpdateTime') + assert last_login_time_user1 != last_login_time_user1_last + pwp_for_user2.replace('passwordTrackUpdateTime', 'on') + time.sleep(1) + user2.replace('userpassword', 'pwd1') + time.sleep(1) + last_login_time_user2_last = user2.get_attr_val_utf8('pwdUpdateTime') + assert last_login_time_user2 != last_login_time_user2_last + + +@pytest.mark.bz834063 +def test_signal_11(topo): + """ns-slapd instance crashed with signal 11 SIGSEGV + + :id: d757b9ae-52d9-11ea-802f-8c16451d917b + :setup: Standalone + :steps: + 1. Adding new user + 2. Modifying user passwod of uid=bz973583 + :expectedresults: + 1. Success + 2. Success + """ + user = _create_user(topo, 'bz973583', None) + user.set('userpassword', 'Secret123') + user.remove('userpassword', 'Secret123') + user.set('userpassword', 'new') + assert topo.standalone.status() + + +if __name__ == "__main__": + CURRENT_FILE = os.path.realpath(__file__) + pytest.main("-s -v %s" % CURRENT_FILE) diff --git a/dirsrvtests/tests/suites/password/regression_test.py b/dirsrvtests/tests/suites/password/regression_test.py new file mode 100644 index 0000000..a0d3c2c --- /dev/null +++ b/dirsrvtests/tests/suites/password/regression_test.py @@ -0,0 +1,329 @@ +# Copyright (C) 2017 Red Hat, Inc. +# All rights reserved. +# +# License: GPL (version 3 or any later version). +# See LICENSE for details. +# --- END COPYRIGHT BLOCK --- +# +import pytest +import time +import glob +from lib389._constants import PASSWORD, DN_DM, DEFAULT_SUFFIX +from lib389._constants import SUFFIX, PASSWORD, DN_DM, DN_CONFIG, PLUGIN_RETRO_CHANGELOG, DEFAULT_SUFFIX, DEFAULT_CHANGELOG_DB, DEFAULT_BENAME +from lib389 import Entry +from lib389.topologies import topology_m1 as topo_supplier +from lib389.idm.user import UserAccounts +from lib389.utils import ldap, os, logging, ensure_bytes, ds_is_newer, ds_supports_new_changelog +from lib389.topologies import topology_st as topo +from lib389.idm.organizationalunit import OrganizationalUnits + +pytestmark = pytest.mark.tier1 + +DEBUGGING = os.getenv("DEBUGGING", default=False) +if DEBUGGING: + logging.getLogger(__name__).setLevel(logging.DEBUG) +else: + logging.getLogger(__name__).setLevel(logging.INFO) +log = logging.getLogger(__name__) + +user_data = {'cn': 'CNpwtest1', 'sn': 'SNpwtest1', 'uid': 'UIDpwtest1', 'mail': 'MAILpwtest1@redhat.com', + 'givenname': 'GNpwtest1'} + +TEST_PASSWORDS = list(user_data.values()) +# Add substring/token values of "CNpwtest1" +TEST_PASSWORDS += ['CNpwtest1ZZZZ', 'ZZZZZCNpwtest1', + 'ZCNpwtest1', 'CNpwtest1Z', 'ZCNpwtest1Z', + 'ZZCNpwtest1', 'CNpwtest1ZZ', 'ZZCNpwtest1ZZ', + 'ZZZCNpwtest1', 'CNpwtest1ZZZ', 'ZZZCNpwtest1ZZZ', + 'ZZZZZZCNpwtest1ZZZZZZZZ'] + +TEST_PASSWORDS2 = ( + 'CN12pwtest31', 'SN3pwtest231', 'UID1pwtest123', 'MAIL2pwtest12@redhat.com', '2GN1pwtest123', 'People123') + +def _check_unhashed_userpw(inst, user_dn, is_present=False): + """Check if unhashed#user#password attribute is present or not in the changelog""" + unhashed_pwd_attribute = 'unhashed#user#password' + + if ds_supports_new_changelog(): + dbscanOut = inst.dbscan(DEFAULT_BENAME, 'replication_changelog') + else: + changelog_dbdir = os.path.join(os.path.dirname(inst.dbdir), DEFAULT_CHANGELOG_DB) + for changelog_dbfile in glob.glob(f'{changelog_dbdir}*/*.db*'): + log.info('Changelog dbfile file exist: {}'.format(changelog_dbfile)) + dbscanOut = inst.dbscan(DEFAULT_CHANGELOG_DB, changelog_dbfile) + + for entry in dbscanOut.split(b'dbid: '): + if ensure_bytes('operation: modify') in entry and ensure_bytes(user_dn) in entry and ensure_bytes('userPassword') in entry: + if is_present: + assert ensure_bytes(unhashed_pwd_attribute) in entry + else: + assert ensure_bytes(unhashed_pwd_attribute) not in entry + +@pytest.fixture(scope="module") +def passw_policy(topo, request): + """Configure password policy with PasswordCheckSyntax attribute set to on""" + + log.info('Configure Pwpolicy with PasswordCheckSyntax and nsslapd-pwpolicy-local set to on') + topo.standalone.simple_bind_s(DN_DM, PASSWORD) + topo.standalone.config.set('PasswordExp', 'on') + topo.standalone.config.set('PasswordCheckSyntax', 'off') + topo.standalone.config.set('nsslapd-pwpolicy-local', 'on') + + subtree = 'ou=people,{}'.format(DEFAULT_SUFFIX) + log.info('Configure subtree password policy for {}'.format(subtree)) + topo.standalone.subtreePwdPolicy(subtree, {'passwordchange': b'on', + 'passwordCheckSyntax': b'on', + 'passwordLockout': b'on', + 'passwordResetFailureCount': b'3', + 'passwordLockoutDuration': b'3', + 'passwordMaxFailure': b'2'}) + time.sleep(1) + + def fin(): + log.info('Reset pwpolicy configuration settings') + topo.standalone.simple_bind_s(DN_DM, PASSWORD) + topo.standalone.config.set('PasswordExp', 'off') + topo.standalone.config.set('PasswordCheckSyntax', 'off') + topo.standalone.config.set('nsslapd-pwpolicy-local', 'off') + + request.addfinalizer(fin) + + +@pytest.fixture(scope="module") +def create_user(topo, request): + """Add test users using UserAccounts""" + + log.info('Adding user-uid={},ou=people,{}'.format(user_data['uid'], DEFAULT_SUFFIX)) + users = UserAccounts(topo.standalone, DEFAULT_SUFFIX) + user_properties = { + 'uidNumber': '1001', + 'gidNumber': '2001', + 'cn': 'pwtest1', + 'userpassword': PASSWORD, + 'homeDirectory': '/home/pwtest1'} + user_properties.update(user_data) + tuser = users.create(properties=user_properties) + + def fin(): + log.info('Deleting user-{}'.format(tuser.dn)) + tuser.delete() + + request.addfinalizer(fin) + return tuser + + +def test_pwp_local_unlock(topo, passw_policy, create_user): + """Test subtree policies use the same global default for passwordUnlock + + :id: 741a8417-5f65-4012-b9ed-87987ce3ca1b + :setup: Standalone instance + :steps: + 1. Test user can bind + 2. Bind with bad passwords to lockout account, and verify account is locked + 3. Wait for lockout interval, and bind with valid password + :expectedresults: + 1. Bind successful + 2. Entry is locked + 3. Entry can bind with correct password + """ + # Add aci so users can change their own password + USER_ACI = '(targetattr="userpassword")(version 3.0; acl "pwp test"; allow (all) userdn="ldap:///self";)' + ous = OrganizationalUnits(topo.standalone, DEFAULT_SUFFIX) + ou = ous.get('people') + ou.add('aci', USER_ACI) + + log.info("Verify user can bind...") + create_user.bind(PASSWORD) + + log.info('Test passwordUnlock default - user should be able to reset password after lockout') + for i in range(0, 2): + try: + create_user.bind("bad-password") + except ldap.INVALID_CREDENTIALS: + # expected + pass + except ldap.LDAPError as e: + log.fatal("Got unexpected failure: " + str(e)) + raise e + + log.info('Verify account is locked') + with pytest.raises(ldap.CONSTRAINT_VIOLATION): + create_user.bind(PASSWORD) + + log.info('Wait for lockout duration...') + time.sleep(4) + + log.info('Check if user can now bind with correct password') + create_user.bind(PASSWORD) + + +@pytest.mark.bz1465600 +@pytest.mark.parametrize("user_pasw", TEST_PASSWORDS) +def test_trivial_passw_check(topo, passw_policy, create_user, user_pasw): + """PasswordCheckSyntax attribute fails to validate cn, sn, uid, givenname, ou and mail attributes + + :id: bf9fe1ef-56cb-46a3-a6f8-5530398a06dc + :parametrized: yes + :setup: Standalone instance. + :steps: + 1. Configure local password policy with PasswordCheckSyntax set to on. + 2. Add users with cn, sn, uid, givenname, mail and userPassword attributes. + 3. Configure subtree password policy for ou=people subtree. + 4. Reset userPassword with trivial values like cn, sn, uid, givenname, ou and mail attributes. + :expectedresults: + 1. Enabling PasswordCheckSyntax should PASS. + 2. Add users should PASS. + 3. Configure subtree password policy should PASS. + 4. Resetting userPassword to cn, sn, uid and mail should be rejected. + """ + + create_user.rebind(PASSWORD) + log.info('Replace userPassword attribute with {}'.format(user_pasw)) + with pytest.raises(ldap.CONSTRAINT_VIOLATION) as excinfo: + create_user.reset_password(user_pasw) + log.fatal('Failed: Userpassword with {} is accepted'.format(user_pasw)) + assert 'password based off of user entry' in str(excinfo.value) + + # reset password + topo.standalone.simple_bind_s(DN_DM, PASSWORD) + create_user.set('userPassword', PASSWORD) + + +@pytest.mark.parametrize("user_pasw", TEST_PASSWORDS) +def test_global_vs_local(topo, passw_policy, create_user, user_pasw): + """Passwords rejected if its similar to uid, cn, sn, givenname, ou and mail attributes + + :id: dfd6cf5d-8bcd-4895-a691-a43ad9ec1be8 + :parametrized: yes + :setup: Standalone instance + :steps: + 1. Configure global password policy with PasswordCheckSyntax set to off + 2. Add users with cn, sn, uid, mail, givenname and userPassword attributes + 3. Replace userPassword similar to cn, sn, uid, givenname, ou and mail attributes + :expectedresults: + 1. Disabling the local policy should PASS. + 2. Add users should PASS. + 3. Resetting userPasswords similar to cn, sn, uid, givenname, ou and mail attributes should PASS. + """ + + log.info('Configure Pwpolicy with PasswordCheckSyntax and nsslapd-pwpolicy-local set to off') + topo.standalone.simple_bind_s(DN_DM, PASSWORD) + topo.standalone.config.set('nsslapd-pwpolicy-local', 'off') + + create_user.rebind(PASSWORD) + log.info('Replace userPassword attribute with {}'.format(user_pasw)) + create_user.reset_password(user_pasw) + + # reset password + create_user.set('userPassword', PASSWORD) + +@pytest.mark.ds49789 +def test_unhashed_pw_switch(topo_supplier): + """Check that nsslapd-unhashed-pw-switch works corrently + + :id: e5aba180-d174-424d-92b0-14fe7bb0b92a + :setup: Supplier Instance + :steps: + 1. A Supplier is created, enable retrocl (not used here) + 2. Create a set of users + 3. update userpassword of user1 and check that unhashed#user#password is not logged (default) + 4. udpate userpassword of user2 and check that unhashed#user#password is not logged ('nolog') + 5. udpate userpassword of user3 and check that unhashed#user#password is logged ('on') + :expectedresults: + 1. Success + 2. Success + 3. Success (unhashed#user#password is not logged in the replication changelog) + 4. Success (unhashed#user#password is not logged in the replication changelog) + 5. Success (unhashed#user#password is logged in the replication changelog) + """ + MAX_USERS = 10 + PEOPLE_DN = ("ou=people," + DEFAULT_SUFFIX) + + inst = topo_supplier.ms["supplier1"] + inst.modify_s("cn=Retro Changelog Plugin,cn=plugins,cn=config", + [(ldap.MOD_REPLACE, 'nsslapd-changelogmaxage', b'2m'), + (ldap.MOD_REPLACE, 'nsslapd-changelog-trim-interval', b"5s"), + (ldap.MOD_REPLACE, 'nsslapd-logAccess', b'on')]) + inst.config.loglevel(vals=[256 + 4], service='access') + inst.restart() + # If you need any test suite initialization, + # please, write additional fixture for that (including finalizer). + # Topology for suites are predefined in lib389/topologies.py. + + # enable dynamic plugins, memberof and retro cl plugin + # + log.info('Enable plugins...') + try: + inst.modify_s(DN_CONFIG, + [(ldap.MOD_REPLACE, + 'nsslapd-dynamic-plugins', + b'on')]) + except ldap.LDAPError as e: + ldap.error('Failed to enable dynamic plugins! ' + e.message['desc']) + assert False + + #topology_st.standalone.plugins.enable(name=PLUGIN_MEMBER_OF) + inst.plugins.enable(name=PLUGIN_RETRO_CHANGELOG) + #topology_st.standalone.modify_s("cn=changelog,cn=ldbm database,cn=plugins,cn=config", [(ldap.MOD_REPLACE, 'nsslapd-cachememsize', str(100000))]) + inst.restart() + + log.info('create users and group...') + for idx in range(1, MAX_USERS): + try: + USER_DN = ("uid=member%d,%s" % (idx, PEOPLE_DN)) + inst.add_s(Entry((USER_DN, + {'objectclass': 'top extensibleObject'.split(), + 'uid': 'member%d' % (idx)}))) + except ldap.LDAPError as e: + log.fatal('Failed to add user (%s): error %s' % (USER_DN, e.message['desc'])) + assert False + + # Check default is that unhashed#user#password is not logged on 1.4.1.6+ + user = "uid=member1,%s" % (PEOPLE_DN) + inst.modify_s(user, [(ldap.MOD_REPLACE, + 'userpassword', + PASSWORD.encode())]) + inst.stop() + if ds_is_newer('1.4.1.6'): + _check_unhashed_userpw(inst, user, is_present=False) + else: + _check_unhashed_userpw(inst, user, is_present=True) + inst.start() + + # Check with nolog that unhashed#user#password is not logged + inst.modify_s(DN_CONFIG, + [(ldap.MOD_REPLACE, + 'nsslapd-unhashed-pw-switch', + b'nolog')]) + inst.restart() + user = "uid=member2,%s" % (PEOPLE_DN) + inst.modify_s(user, [(ldap.MOD_REPLACE, + 'userpassword', + PASSWORD.encode())]) + inst.stop() + _check_unhashed_userpw(inst, user, is_present=False) + inst.start() + + # Check with value 'on' that unhashed#user#password is logged + inst.modify_s(DN_CONFIG, + [(ldap.MOD_REPLACE, + 'nsslapd-unhashed-pw-switch', + b'on')]) + inst.restart() + user = "uid=member3,%s" % (PEOPLE_DN) + inst.modify_s(user, [(ldap.MOD_REPLACE, + 'userpassword', + PASSWORD.encode())]) + inst.stop() + _check_unhashed_userpw(inst, user, is_present=True) + + if DEBUGGING: + # Add debugging steps(if any)... + pass + + +if __name__ == '__main__': + # Run isolated + # -s for DEBUG mode + CURRENT_FILE = os.path.realpath(__file__) + pytest.main(["-s", CURRENT_FILE]) diff --git a/dirsrvtests/tests/suites/plugins/__init__.py b/dirsrvtests/tests/suites/plugins/__init__.py new file mode 100644 index 0000000..fe45a34 --- /dev/null +++ b/dirsrvtests/tests/suites/plugins/__init__.py @@ -0,0 +1,3 @@ +""" + :Requirement: 389-ds-base: Directory Server Plugins +""" diff --git a/dirsrvtests/tests/suites/plugins/acceptance_test.py b/dirsrvtests/tests/suites/plugins/acceptance_test.py new file mode 100644 index 0000000..f3aa89f --- /dev/null +++ b/dirsrvtests/tests/suites/plugins/acceptance_test.py @@ -0,0 +1,1810 @@ +# --- BEGIN COPYRIGHT BLOCK --- +# Copyright (C) 2016 Red Hat, Inc. +# All rights reserved. +# +# License: GPL (version 3 or any later version). +# See LICENSE for details. +# --- END COPYRIGHT BLOCK --- +# +''' +Created on Dec 09, 2014 + +@author: mreynolds +''' +import logging +import threading +from ldap.syncrepl import SyncreplConsumer +from ldap.ldapobject import ReconnectLDAPObject +import subprocess +import pytest +from lib389.utils import * +from lib389.plugins import * +from lib389._constants import * +from lib389.dseldif import DSEldif +from lib389.idm.user import UserAccounts +from lib389.idm.group import Groups +from lib389.idm.organizationalunit import OrganizationalUnits +from lib389.idm.domain import Domain +from lib389.topologies import create_topology, topology_i2 as topo + +pytestmark = pytest.mark.tier1 + +log = logging.getLogger(__name__) + +USER_DN = 'uid=test_user_1001,ou=people,dc=example,dc=com' +USER_PW = 'password' +GROUP_DN = 'cn=group,' + DEFAULT_SUFFIX +CONFIG_AREA = 'nsslapd-pluginConfigArea' + +if ds_is_older('1.3.7'): + MEMBER_ATTR = 'member' +else: + MEMBER_ATTR = 'memberOf' + +''' + Functional tests for each plugin + + Test: + plugin restarts (test when on and off) + plugin config validation + plugin dependencies + plugin functionality (including plugin tasks) +''' + + +def check_dependency(inst, plugin, online=True): + """Set the "account usability" plugin to depend on this plugin. + This plugin is generic, always enabled, and perfect for our testing + """ + + acct_usability = AccountUsabilityPlugin(inst) + acct_usability.replace('nsslapd-plugin-depends-on-named', plugin.rdn) + + if online: + with pytest.raises(ldap.UNWILLING_TO_PERFORM): + plugin.disable() + # Now undo the change + acct_usability.remove('nsslapd-plugin-depends-on-named', plugin.rdn) + else: + plugin.disable() + with pytest.raises((subprocess.CalledProcessError, ValueError)): + inst.restart() + dse_ldif = DSEldif(inst) + dse_ldif.delete(acct_usability.dn, 'nsslapd-plugin-depends-on-named') + dse_ldif.replace(plugin.dn, 'nsslapd-pluginEnabled', 'on') + inst.start() + + +def test_acctpolicy(topo, args=None): + """Test Account policy basic functionality + + :id: 9b87493b-0493-46f9-8364-6099d0e5d829 + :setup: Standalone Instance + :steps: + 1. Enable the plugin + 2. Restart the instance + 3. Add a config entry for 'lastLoginTime' + 4. Add a user + 5. Bind as the user + 6. Check testLastLoginTime was added to the user + 7. Replace 'stateattrname': 'testLastLoginTime' + 8. Bind as the user + 9. Check testLastLoginTime was added to the user + 10. Check nsslapd-plugin-depends-on-named for the plugin + 11. Clean up + :expectedresults: + 1. Success + 2. Success + 3. Success + 4. Success + 5. Success + 6. Success + 7. Success + 8. Success + 9. Success + 10. Success + 11. Success + """ + + inst = topo[0] + + # stop the plugin, and start it + plugin = AccountPolicyPlugin(inst) + plugin.disable() + plugin.enable() + + if args == "restart": + return True + + # If args is None then we run the test suite as pytest standalone and it's not dynamic + if args is None: + inst.restart() + + log.info('Testing {}'.format(PLUGIN_ACCT_POLICY)) + + ############################################################################ + # Configure plugin + ############################################################################ + # Add the config entry + ap_configs = AccountPolicyConfigs(inst) + try: + ap_config = ap_configs.create(properties={'cn': 'config', + 'alwaysrecordlogin': 'yes', + 'stateattrname': 'lastLoginTime'}) + except ldap.ALREADY_EXISTS: + ap_config = ap_configs.get('config') + ap_config.replace_many(('alwaysrecordlogin', 'yes'), + ('stateattrname', 'lastLoginTime')) + + ############################################################################ + # Test plugin + ############################################################################ + # Add an entry + users = UserAccounts(inst, DEFAULT_SUFFIX) + user = users.create_test_user(1000, 2000) + user.add('objectclass', 'extensibleObject') + user.replace('userPassword', USER_PW) + + # Bind as user + user.bind(USER_PW) + time.sleep(1) + + # Check lastLoginTime of USER1 + entries = inst.search_s(DEFAULT_SUFFIX, ldap.SCOPE_SUBTREE, 'lastLoginTime=*') + assert entries + + ############################################################################ + # Change config - change the stateAttrName to a new attribute + ############################################################################ + test_attribute = "( 2.16.840.1.113719.1.1.4.1.35999 \ + NAME 'testLastLoginTime' DESC 'Test Last login time' \ + SYNTAX 1.3.6.1.4.1.1466.115.121.1.24 SINGLE-VALUE USAGE \ + directoryOperation X-ORIGIN 'dirsrvtests' )" + Schema(inst).add('attributetypes', test_attribute) + ap_config.replace('stateattrname', 'testLastLoginTime') + time.sleep(1) + + ############################################################################ + # Test plugin + ############################################################################ + # login as user + user.bind(USER_PW) + time.sleep(1) + + # Check testLastLoginTime was added to USER1 + entries = inst.search_s(DEFAULT_SUFFIX, ldap.SCOPE_SUBTREE, '(testLastLoginTime=*)') + assert entries + + ############################################################################ + # Test plugin dependency + ############################################################################ + check_dependency(inst, plugin, online=isinstance(args, str)) + + ############################################################################ + # Cleanup + ############################################################################ + user.delete() + + ############################################################################ + # Test passed + ############################################################################ + log.info('test_acctpolicy: PASS\n') + + return + + +def test_attruniq(topo, args=None): + """Test Attribute uniqueness basic functionality + + :id: 9b87493b-0493-46f9-8364-6099d0e5d801 + :setup: Standalone Instance + :steps: + 1. Enable the plugin + 2. Restart the instance + 3. Add a user: with 'mail' and 'mailAlternateAddress' attributes + 4. Replace 'uniqueness-attribute-name': 'cn' + 5. Try to add a user with the same 'cn' + 6. Replace 'uniqueness-attribute-name': 'mail' + 7. Try to add a user with the same 'mail' + 8. Add 'uniqueness-attribute-name': 'mailAlternateAddress' + 9. Try to add a user with the same 'mailAlternateAddress' + 10. Check nsslapd-plugin-depends-on-named for the plugin + 11. Clean up + :expectedresults: + 1. Success + 2. Success + 3. Success + 4. Success + 5. Should fail + 6. Success + 7. Should fail + 8. Success + 9. Should fail + 10. Success + 11. Success + """ + + inst = topo[0] + + # stop the plugin, and start it + plugin = AttributeUniquenessPlugin(inst, dn="cn=attribute uniqueness,cn=plugins,cn=config") + plugin.disable() + plugin.enable() + + if args == "restart": + return + + # If args is None then we run the test suite as pytest standalone and it's not dynamic + if args is None: + inst.restart() + + log.info('Testing {}'.format(PLUGIN_ATTR_UNIQUENESS)) + user1_dict = {'objectclass': 'extensibleObject', + 'uid': 'testuser1', + 'cn': 'testuser1', + 'sn': 'user1', + 'uidNumber': '1001', + 'gidNumber': '2001', + 'mail': 'user1@example.com', + 'mailAlternateAddress': 'user1@alt.example.com', + 'homeDirectory': '/home/testuser1', + 'userpassword': 'password'} + user2_dict = {'objectclass': 'extensibleObject', + 'uid': 'testuser2', + 'cn': 'testuser2', + 'sn': 'user2', + 'uidNumber': '1000', + 'gidNumber': '2000', + 'homeDirectory': '/home/testuser2', + 'userpassword': 'password'} + + ############################################################################ + # Configure plugin + ############################################################################ + plugin.replace('uniqueness-attribute-name', 'cn') + if args is None: + inst.restart() + + ############################################################################ + # Test plugin + ############################################################################ + # Add an entry + users = UserAccounts(inst, DEFAULT_SUFFIX) + user1 = users.create(properties=user1_dict) + + # Add an entry with a duplicate "cn" + with pytest.raises(ldap.CONSTRAINT_VIOLATION): + user2_dict['cn'] = 'testuser1' + users.create(properties=user2_dict) + + ############################################################################ + # Change config to use "mail" instead of "uid" + ############################################################################ + + plugin.replace('uniqueness-attribute-name', 'mail') + + ############################################################################ + # Test plugin - Add an entry, that has a duplicate "mail" value + ############################################################################ + with pytest.raises(ldap.CONSTRAINT_VIOLATION): + user2_dict['mail'] = 'user1@example.com' + users.create(properties=user2_dict) + + ############################################################################ + # Reconfigure plugin for mail and mailAlternateAddress + ############################################################################ + plugin.add('uniqueness-attribute-name', 'mailAlternateAddress') + + ############################################################################ + # Test plugin - Add an entry, that has a duplicate "mail" value + ############################################################################ + with pytest.raises(ldap.CONSTRAINT_VIOLATION): + user2_dict['mail'] = 'user1@example.com' + users.create(properties=user2_dict) + + ############################################################################ + # Test plugin - Add an entry, that has a duplicate "mailAlternateAddress" value + ############################################################################ + with pytest.raises(ldap.CONSTRAINT_VIOLATION): + user2_dict['mailAlternateAddress'] = 'user1@alt.example.com' + users.create(properties=user2_dict) + + ############################################################################ + # Test plugin - Add an entry, that has a duplicate "mail" value conflicting mailAlternateAddress + ############################################################################ + with pytest.raises(ldap.CONSTRAINT_VIOLATION): + user2_dict['mail'] = 'user1@alt.example.com' + users.create(properties=user2_dict) + + ############################################################################ + # Test plugin - Add an entry, that has a duplicate "mailAlternateAddress" conflicting mail + ############################################################################ + with pytest.raises(ldap.CONSTRAINT_VIOLATION): + user2_dict['mailAlternateAddress'] = 'user1@example.com' + users.create(properties=user2_dict) + + ############################################################################ + # Test plugin dependency + ############################################################################ + check_dependency(inst, plugin, online=isinstance(args, str)) + + ############################################################################ + # Cleanup + ############################################################################ + user1.delete() + + ############################################################################ + # Test passed + ############################################################################ + log.info('test_attruniq: PASS\n') + return + + +def test_automember(topo, args=None): + """Test Auto Membership basic functionality + + :id: 9b87493b-0493-46f9-8364-6099d0e5d802 + :setup: Standalone Instance + :steps: + 1. Enable the plugin + 2. Restart the instance + 3. Add a group + 4. Add two Organisation Units entries + 5. Add a config entry for the group and one branch + 6. Add a user that should get added to the group + 7. Check the entry is in group + 8. Set groupattr to 'uniquemember:dn' and scope to branch2 + 9. Add a user that should get added to the group + 10. Check the group + 11. Disable plugin and restart + 12. Add an entry that should be picked up by automember + 13. Verify that the entry is not picked up by automember (yet) + 14. Check the group - uniquemember should not exist + 15. Enable plugin and restart + 16. Verify the fixup task worked + 17. Check nsslapd-plugin-depends-on-named for the plugin + 18. Clean up + :expectedresults: + 1. Success + 2. Success + 3. Success + 4. Success + 5. Success + 6. Success + 7. Success + 8. Success + 9. Success + 10. Success + 11. Success + 12. Success + 13. Success + 14. Success + 15. Success + 16. Success + 17. Success + 18. Success + """ + + inst = topo[0] + + # stop the plugin, and start it + plugin = AutoMembershipPlugin(inst) + plugin.disable() + plugin.enable() + + if args == "restart": + return + + # If args is None then we run the test suite as pytest standalone and it's not dynamic + if args is None: + inst.restart() + + log.info('Testing ' + PLUGIN_AUTOMEMBER + '...') + + ############################################################################ + # Configure plugin + ############################################################################ + + # Add the automember group + groups = Groups(inst, DEFAULT_SUFFIX) + group = groups.create(properties={'cn': 'group'}) + + ous = OrganizationalUnits(inst, DEFAULT_SUFFIX) + branch1 = ous.create(properties={'ou': 'branch1'}) + branch2 = ous.create(properties={'ou': 'branch2'}) + + # Add the automember config entry + am_configs = AutoMembershipDefinitions(inst) + am_config = am_configs.create(properties={'cn': 'config', + 'autoMemberScope': branch1.dn, + 'autoMemberFilter': 'objectclass=top', + 'autoMemberDefaultGroup': group.dn, + 'autoMemberGroupingAttr': '{}:dn'.format(MEMBER_ATTR)}) + + ############################################################################ + # Test the plugin + ############################################################################ + + users = UserAccounts(inst, DEFAULT_SUFFIX, rdn='ou={}'.format(branch1.rdn)) + # Add a user that should get added to the group + user1 = users.create_test_user(uid=1001) + + # Check the group + group_members = group.get_attr_vals_utf8(MEMBER_ATTR) + assert user1.dn in group_members + + ############################################################################ + # Change config + ############################################################################ + group.add('objectclass', 'groupOfUniqueNames') + am_config.set_groupattr('uniquemember:dn') + am_config.set_scope(branch2.dn) + + ############################################################################ + # Test plugin + ############################################################################ + # Add a user that should get added to the group + users = UserAccounts(inst, DEFAULT_SUFFIX, rdn='ou={}'.format(branch2.rdn)) + user2 = users.create_test_user(uid=1002) + + # Check the group + group_members = group.get_attr_vals_utf8('uniquemember') + assert user2.dn in group_members + + ############################################################################ + # Test Task + ############################################################################ + + # Disable plugin + plugin.disable() + + # If args is None then we run the test suite as pytest standalone and it's not dynamic + if args is None: + inst.restart() + + # Add an entry that should be picked up by automember - verify it is not(yet) + user3 = users.create_test_user(uid=1003) + + # Check the group - uniquemember should not exist + group_members = group.get_attr_vals_utf8('uniquemember') + assert user3.dn not in group_members + + # Enable plugin + plugin.enable() + + # If args is None then we run the test suite as pytest standalone and it's not dynamic + if args is None: + inst.restart() + task = plugin.fixup(branch2.dn, _filter='objectclass=top') + task.wait() + + # Verify the fixup task worked + group_members = group.get_attr_vals_utf8('uniquemember') + assert user3.dn in group_members + + ############################################################################ + # Test plugin dependency + ############################################################################ + check_dependency(inst, plugin, online=isinstance(args, str)) + + ############################################################################ + # Cleanup + ############################################################################ + user1.delete() + user2.delete() + user3.delete() + branch1.delete() + branch2.delete() + group.delete() + am_config.delete() + + ############################################################################ + # Test passed + ############################################################################ + log.info('test_automember: PASS\n') + return + + +def test_dna(topo, args=None): + """Test DNA basic functionality + + :id: 9b87493b-0493-46f9-8364-6099d0e5d803 + :setup: Standalone Instance + :steps: + 1. Enable the plugin + 2. Restart the instance + 3. Configure plugin for uidNumber + 4. Add a user + 5. See if the entry now has the new uidNumber assignment - uidNumber=1 + 6. Test the magic regen value + 7. See if the entry now has the new uidNumber assignment - uidNumber=2 + 8. Set 'dnaMagicRegen': '-2' + 9. Test the magic regen value + 10. See if the entry now has the new uidNumber assignment - uidNumber=3 + 11. Check nsslapd-plugin-depends-on-named for the plugin + 12. Clean up + :expectedresults: + 1. Success + 2. Success + 3. Success + 4. Success + 5. Success + 6. Success + 7. Success + 8. Success + 9. Success + 10. Success + 11. Success + 12. Success + """ + + inst = topo[0] + + # stop the plugin, and start it + plugin = DNAPlugin(inst) + plugin.disable() + plugin.enable() + + if args == "restart": + return + + # If args is None then we run the test suite as pytest standalone and it's not dynamic + if args is None: + inst.restart() + + log.info('Testing ' + PLUGIN_DNA + '...') + + ############################################################################ + # Configure plugin + ############################################################################ + dna_configs = DNAPluginConfigs(inst, plugin.dn) + try: + dna_config = dna_configs.create(properties={'cn': 'config', + 'dnatype': 'uidNumber', + 'dnafilter': '(objectclass=top)', + 'dnascope': DEFAULT_SUFFIX, + 'dnaMagicRegen': '-1', + 'dnaMaxValue': '50000', + 'dnaNextValue': '1'}) + except ldap.ALREADY_EXISTS: + dna_config = dna_configs.get('config') + dna_config.replace_many(('dnaNextValue', '1'), ('dnaMagicRegen', '-1')) + + ############################################################################ + # Test plugin + ############################################################################ + users = UserAccounts(inst, DEFAULT_SUFFIX) + user1 = users.create_test_user(uid=1) + + # See if the entry now has the new uidNumber assignment - uidNumber=1 + entries = inst.search_s(user1.dn, ldap.SCOPE_BASE, '(uidNumber=1)') + assert entries + + # Test the magic regen value + user1.replace('uidNumber', '-1') + + # See if the entry now has the new uidNumber assignment - uidNumber=2 + entries = inst.search_s(user1.dn, ldap.SCOPE_BASE, '(uidNumber=2)') + assert entries + + ################################################################################ + # Change the config + ################################################################################ + dna_config.replace('dnaMagicRegen', '-2') + + ################################################################################ + # Test plugin + ################################################################################ + + # Test the magic regen value + user1.replace('uidNumber', '-2') + + # See if the entry now has the new uidNumber assignment - uidNumber=3 + entries = inst.search_s(user1.dn, ldap.SCOPE_BASE, '(uidNumber=3)') + assert entries + + ############################################################################ + # Test plugin dependency + ############################################################################ + check_dependency(inst, plugin, online=isinstance(args, str)) + + ############################################################################ + # Cleanup + ############################################################################ + user1.delete() + dna_config.delete() + plugin.disable() + + # If args is None then we run the test suite as pytest standalone and it's not dynamic + if args is None: + inst.restart() + + ############################################################################ + # Test passed + ############################################################################ + log.info('test_dna: PASS\n') + return + + +def test_linkedattrs(topo, args=None): + """Test Linked Attributes basic functionality + + :id: 9b87493b-0493-46f9-8364-6099d0e5d804 + :setup: Standalone Instance + :steps: + 1. Enable the plugin + 2. Restart the instance + 3. Add a config entry for directReport + 4. Add test entries + 5. Add the linked attrs config entry + 6. User1 - Set "directReport" to user2 + 7. See if manager was added to the other entry + 8. User1 - Remove "directReport" + 9. See if manager was removed + 10. Change the config - using linkType "indirectReport" now + 11. Make sure the old linkType(directManager) is not working + 12. See if manager was added to the other entry, better not be... + 13. Now, set the new linkType "indirectReport", which should add "manager" to the other entry + 14. See if manager was added to the other entry, better not be + 15. Remove "indirectReport" should remove "manager" to the other entry + 16. See if manager was removed + 17. Disable plugin and make some updates that would of triggered the plugin + 18. The entry should not have a manager attribute + 19. Enable the plugin and rerun the task entry + 20. Add the task again + 21. Check if user2 now has a manager attribute now + 22. Check nsslapd-plugin-depends-on-named for the plugin + 23. Clean up + :expectedresults: + 1. Success + 2. Success + 3. Success + 4. Success + 5. Success + 6. Success + 7. Success + 8. Success + 9. Success + 10. Success + 11. Success + 12. Success + 13. Success + 14. Success + 15. Success + 16. Success + 17. Success + 18. Success + 19. Success + 20. Success + 21. Success + 22. Success + 23. Success + """ + + inst = topo[0] + + # stop the plugin, and start it + plugin = LinkedAttributesPlugin(inst) + plugin.disable() + plugin.enable() + + if args == "restart": + return + + # If args is None then we run the test suite as pytest standalone and it's not dynamic + if args is None: + inst.restart() + + log.info('Testing ' + PLUGIN_LINKED_ATTRS + '...') + + ############################################################################ + # Configure plugin + ############################################################################ + + # Add test entries + users = UserAccounts(inst, DEFAULT_SUFFIX) + user1 = users.create_test_user(uid=1001) + user1.add('objectclass', 'extensibleObject') + user2 = users.create_test_user(uid=1002) + user2.add('objectclass', 'extensibleObject') + + # Add the linked attrs config entry + la_configs = LinkedAttributesConfigs(inst) + la_config = la_configs.create(properties={'cn': 'config', + 'linkType': 'directReport', + 'managedType': 'manager'}) + + ############################################################################ + # Test plugin + ############################################################################ + # Set "directReport" should add "manager" to the other entry + user1.replace('directReport', user2.dn) + + # See if manager was added to the other entry + entries = inst.search_s(user2.dn, ldap.SCOPE_BASE, '(manager=*)') + assert entries + + # Remove "directReport" should remove "manager" to the other entry + user1.remove_all('directReport') + + # See if manager was removed + entries = inst.search_s(user2.dn, ldap.SCOPE_BASE, '(manager=*)') + assert not entries + + ############################################################################ + # Change the config - using linkType "indirectReport" now + ############################################################################ + la_config.replace('linkType', 'indirectReport') + + ############################################################################ + # Test plugin + ############################################################################ + # Make sure the old linkType(directManager) is not working + user1.replace('directReport', user2.dn) + + # See if manager was added to the other entry, better not be... + entries = inst.search_s(user2.dn, ldap.SCOPE_BASE, '(manager=*)') + assert not entries + + # Now, set the new linkType "indirectReport", which should add "manager" to the other entry + user1.replace('indirectReport', user2.dn) + + # See if manager was added to the other entry, better not be + entries = inst.search_s(user2.dn, ldap.SCOPE_BASE, '(manager=*)') + assert entries + + # Remove "indirectReport" should remove "manager" to the other entry + user1.remove_all('indirectReport') + + # See if manager was removed + entries = inst.search_s(user2.dn, ldap.SCOPE_BASE, '(manager=*)') + assert not entries + + ############################################################################ + # Test Fixup Task + ############################################################################ + # Disable plugin and make some updates that would of triggered the plugin + plugin.disable() + + # If args is None then we run the test suite as pytest standalone and it's not dynamic + if args is None: + inst.restart() + + user1.replace('indirectReport', user2.dn) + + # The entry should not have a manager attribute + entries = inst.search_s(user2.dn, ldap.SCOPE_BASE, '(manager=*)') + assert not entries + + # Enable the plugin and rerun the task entry + plugin.enable() + + # If args is None then we run the test suite as pytest standalone and it's not dynamic + if args is None: + inst.restart() + + # Add the task again + task = plugin.fixup(la_config.dn) + task.wait() + + # Check if user2 now has a manager attribute now + entries = inst.search_s(user2.dn, ldap.SCOPE_BASE, '(manager=*)') + assert entries + + ############################################################################ + # Test plugin dependency + ############################################################################ + check_dependency(inst, plugin, online=isinstance(args, str)) + + ############################################################################ + # Cleanup + ############################################################################ + user1.delete() + user2.delete() + la_config.delete() + + ############################################################################ + # Test passed + ############################################################################ + log.info('test_linkedattrs: PASS\n') + return + + +def test_memberof(topo, args=None): + """Test MemberOf basic functionality + + :id: 9b87493b-0493-46f9-8364-6099d0e5d805 + :setup: Standalone Instance + :steps: + 1. Enable the plugin + 2. Restart the instance + 3. Replace groupattr with 'member' + 4. Add our test entries + 5. Check if the user now has a "memberOf" attribute + 6. Remove "member" should remove "memberOf" from the entry + 7. Check that "memberOf" was removed + 8. Replace 'memberofgroupattr': 'uniquemember' + 9. Replace 'uniquemember': user1 + 10. Check if the user now has a "memberOf" attribute + 11. Remove "uniquemember" should remove "memberOf" from the entry + 12. Check that "memberOf" was removed + 13. The shared config entry uses "member" - the above test uses "uniquemember" + 14. Delete the test entries then read them to start with a clean slate + 15. Check if the user now has a "memberOf" attribute + 16. Check that "memberOf" was removed + 17. Replace 'memberofgroupattr': 'uniquemember' + 18. Check if the user now has a "memberOf" attribute + 19. Remove "uniquemember" should remove "memberOf" from the entry + 20. Check that "memberOf" was removed + 21. Replace 'memberofgroupattr': 'member' + 22. Remove shared config from plugin + 23. Check if the user now has a "memberOf" attribute + 24. Remove "uniquemember" should remove "memberOf" from the entry + 25. Check that "memberOf" was removed + 26. First change the plugin to use uniquemember + 27. Add uniquemember, should not update user1 + 28. Check for "memberOf" + 29. Enable memberof plugin + 30. Run the task and validate that it worked + 31. Check for "memberOf" + 32. Check nsslapd-plugin-depends-on-named for the plugin + 33. Clean up + :expectedresults: + 1. Success + 2. Success + 3. Success + 4. Success + 5. Success + 6. Success + 7. Success + 8. Success + 9. Success + 10. Success + 11. Success + 12. Success + 13. Success + 14. Success + 15. Success + 16. Success + 17. Success + 18. Success + 19. Success + 20. Success + 21. Success + 22. Success + 23. Success + 24. Success + 25. Success + 26. Success + 27. Success + 28. Success + 29. Success + 30. Success + 31. Success + 32. Success + 33. Success + """ + + inst = topo[0] + + # stop the plugin, and start it + plugin = MemberOfPlugin(inst) + plugin.disable() + plugin.enable() + + if args == "restart": + return + + # If args is None then we run the test suite as pytest standalone and it's not dynamic + if args is None: + inst.restart() + + log.info('Testing ' + PLUGIN_MEMBER_OF + '...') + + ############################################################################ + # Configure plugin + ############################################################################ + plugin.replace_groupattr('member') + + ############################################################################ + # Test plugin + ############################################################################ + # Add our test entries + users = UserAccounts(inst, DEFAULT_SUFFIX) + user1 = users.create_test_user(uid=1001) + + groups = Groups(inst, DEFAULT_SUFFIX) + group = groups.create(properties={'cn': 'group', + 'member': user1.dn}) + group.add('objectclass', 'groupOfUniqueNames') + + memberof_config = MemberOfSharedConfig(inst, 'cn=memberOf config,{}'.format(DEFAULT_SUFFIX)) + memberof_config.create(properties={'cn': 'memberOf config', + 'memberOfGroupAttr': 'member', + 'memberOfAttr': MEMBER_ATTR}) + + # Check if the user now has a "memberOf" attribute + entries = inst.search_s(user1.dn, ldap.SCOPE_BASE, '({}=*)'.format(MEMBER_ATTR)) + assert entries + + # Remove "member" should remove "memberOf" from the entry + group.remove_all('member') + + # Check that "memberOf" was removed + entries = inst.search_s(user1.dn, ldap.SCOPE_BASE, '({}=*)'.format(MEMBER_ATTR)) + assert not entries + + ############################################################################ + # Change the config + ############################################################################ + plugin.replace('memberofgroupattr', 'uniquemember') + + ############################################################################ + # Test plugin + ############################################################################ + group.replace('uniquemember', user1.dn) + + # Check if the user now has a "memberOf" attribute + entries = inst.search_s(user1.dn, ldap.SCOPE_BASE, '({}=*)'.format(MEMBER_ATTR)) + assert entries + + # Remove "uniquemember" should remove "memberOf" from the entry + group.remove_all('uniquemember') + + # Check that "memberOf" was removed + entries = inst.search_s(user1.dn, ldap.SCOPE_BASE, '({}=*)'.format(MEMBER_ATTR)) + assert not entries + + ############################################################################ + # Set the shared config entry and test the plugin + ############################################################################ + # The shared config entry uses "member" - the above test uses "uniquemember" + plugin.set_configarea(memberof_config.dn) + if args is None: + inst.restart() + + # Delete the test entries then readd them to start with a clean slate + user1.delete() + group.delete() + + user1 = users.create_test_user(uid=1001) + group = groups.create(properties={'cn': 'group', + 'member': user1.dn}) + group.add('objectclass', 'groupOfUniqueNames') + + # Test the shared config + # Check if the user now has a "memberOf" attribute + entries = inst.search_s(user1.dn, ldap.SCOPE_BASE, '({}=*)'.format(MEMBER_ATTR)) + assert entries + + group.remove_all('member') + + # Check that "memberOf" was removed + entries = inst.search_s(user1.dn, ldap.SCOPE_BASE, '({}=*)'.format(MEMBER_ATTR)) + assert not entries + + ############################################################################ + # Change the shared config entry to use 'uniquemember' and test the plugin + ############################################################################ + memberof_config.replace('memberofgroupattr', 'uniquemember') + + group.replace('uniquemember', user1.dn) + + # Check if the user now has a "memberOf" attribute + entries = inst.search_s(user1.dn, ldap.SCOPE_BASE, '({}=*)'.format(MEMBER_ATTR)) + assert entries + + # Remove "uniquemember" should remove "memberOf" from the entry + group.remove_all('uniquemember') + + # Check that "memberOf" was removed + entries = inst.search_s(user1.dn, ldap.SCOPE_BASE, '({}=*)'.format(MEMBER_ATTR)) + assert not entries + + ############################################################################ + # Remove shared config from plugin, and retest + ############################################################################ + # First change the plugin to use member before we move the shared config that uses uniquemember + plugin.replace('memberofgroupattr', 'member') + + # Remove shared config from plugin + plugin.remove_configarea() + + group.replace('member', user1.dn) + + # Check if the user now has a "memberOf" attribute + entries = inst.search_s(user1.dn, ldap.SCOPE_BASE, '({}=*)'.format(MEMBER_ATTR)) + assert entries + + # Remove "uniquemember" should remove "memberOf" from the entry + group.remove_all('member') + + # Check that "memberOf" was removed + entries = inst.search_s(user1.dn, ldap.SCOPE_BASE, '({}=*)'.format(MEMBER_ATTR)) + assert not entries + + ############################################################################ + # Test Fixup Task + ############################################################################ + plugin.disable() + + # If args is None then we run the test suite as pytest standalone and it's not dynamic + if args is None: + inst.restart() + + # First change the plugin to use uniquemember + plugin.replace('memberofgroupattr', 'uniquemember') + + # Add uniquemember, should not update USER1 + group.replace('uniquemember', user1.dn) + + # Check for "memberOf" + entries = inst.search_s(user1.dn, ldap.SCOPE_BASE, '({}=*)'.format(MEMBER_ATTR)) + assert not entries + + # Enable memberof plugin + plugin.enable() + + # If args is None then we run the test suite as pytest standalone and it's not dynamic + if args is None: + inst.restart() + + ############################################################# + # Test memberOf fixup arg validation: Test the DN and filter + ############################################################# + for basedn, filter in (('{}bad'.format(DEFAULT_SUFFIX), 'objectclass=top'), + ("bad", 'objectclass=top'), + (DEFAULT_SUFFIX, '(objectclass=top')): + task = plugin.fixup(basedn, filter) + task.wait() + exitcode = task.get_exit_code() + assert exitcode != "0", 'test_memberof: Task with invalid DN still reported success' + + #################################################### + # Test fixup works + #################################################### + # Run the task and validate that it worked + task = plugin.fixup(DEFAULT_SUFFIX, 'objectclass=top') + task.wait() + + # Check for "memberOf" + entries = inst.search_s(user1.dn, ldap.SCOPE_BASE, '({}=*)'.format(MEMBER_ATTR)) + assert entries + + ############################################################################ + # Test plugin dependency + ############################################################################ + check_dependency(inst, plugin, online=isinstance(args, str)) + + ############################################################################ + # Cleanup + ############################################################################ + user1.delete() + group.delete() + memberof_config.delete() + + ############################################################################ + # Test passed + ############################################################################ + log.info('test_memberof: PASS\n') + return + + +def test_mep(topo, args=None): + """Test Managed Entries basic functionality + + :id: 9b87493b-0493-46f9-8364-6099d0e5d806 + :setup: Standalone Instance + :steps: + 1. Enable the plugin + 2. Restart the instance + 3. Add our org units + 4. Set up config entry and template entry for the org units + 5. Add an entry that meets the MEP scope + 6. Check if a managed group entry was created + 7. Add a new template entry + 8. Add an entry that meets the MEP scope + 9. Check if a managed group entry was created + 10. Check nsslapd-plugin-depends-on-named for the plugin + 11. Clean up + :expectedresults: + 1. Success + 2. Success + 3. Success + 4. Success + 5. Success + 6. Success + 7. Success + 8. Success + 9. Success + 10. Success + 11. Success + """ + + inst = topo[0] + + # stop the plugin, and start it + plugin = ManagedEntriesPlugin(inst) + plugin.disable() + plugin.enable() + + if args == "restart": + return + + # If args is None then we run the test suite as pytest standalone and it's not dynamic + if args is None: + inst.restart() + + log.info('Testing ' + PLUGIN_MANAGED_ENTRY + '...') + + ############################################################################ + # Configure plugin + ############################################################################ + # Add our org units + ous = OrganizationalUnits(inst, DEFAULT_SUFFIX) + ou_people = ous.create(properties={'ou': 'managed_people'}) + ou_groups = ous.create(properties={'ou': 'managed_groups'}) + + mep_templates = MEPTemplates(inst, DEFAULT_SUFFIX) + mep_template1 = mep_templates.create(properties={ + 'cn': 'MEP template', + 'mepRDNAttr': 'cn', + 'mepStaticAttr': 'objectclass: posixGroup|objectclass: extensibleObject'.split('|'), + 'mepMappedAttr': 'cn: $cn|uid: $cn|gidNumber: $uidNumber'.split('|') + }) + mep_configs = MEPConfigs(inst) + mep_config = mep_configs.create(properties={'cn': 'config', + 'originScope': ou_people.dn, + 'originFilter': 'objectclass=posixAccount', + 'managedBase': ou_groups.dn, + 'managedTemplate': mep_template1.dn}) + if args is None: + inst.restart() + + ############################################################################ + # Test plugin + ############################################################################ + # Add an entry that meets the MEP scope + test_users_m1 = UserAccounts(inst, DEFAULT_SUFFIX, rdn='ou={}'.format(ou_people.rdn)) + test_user1 = test_users_m1.create_test_user(1001) + + # Check if a managed group entry was created + entries = inst.search_s('cn={},{}'.format(test_user1.rdn, ou_groups.dn), ldap.SCOPE_BASE, '(objectclass=top)') + assert len(entries) == 1 + + ############################################################################ + # Change the config + ############################################################################ + # Add a new template entry + mep_template2 = mep_templates.create(properties={ + 'cn': 'MEP template2', + 'mepRDNAttr': 'uid', + 'mepStaticAttr': 'objectclass: posixGroup|objectclass: extensibleObject'.split('|'), + 'mepMappedAttr': 'cn: $cn|uid: $cn|gidNumber: $uidNumber'.split('|') + }) + mep_config.replace('managedTemplate', mep_template2.dn) + + ############################################################################ + # Test plugin + ############################################################################ + # Add an entry that meets the MEP scope + test_user2 = test_users_m1.create_test_user(1002) + + # Check if a managed group entry was created + entries = inst.search_s('uid={},{}'.format(test_user2.rdn, ou_groups.dn), ldap.SCOPE_BASE, '(objectclass=top)') + assert len(entries) == 1 + + ############################################################################ + # Test plugin dependency + ############################################################################ + check_dependency(inst, plugin, online=isinstance(args, str)) + + ############################################################################ + # Cleanup + ############################################################################ + test_user1.delete() + test_user2.delete() + ou_people.delete() + ou_groups.delete() + mep_config.delete() + mep_template1.delete() + mep_template2.delete() + + ############################################################################ + # Test passed + ############################################################################ + log.info('test_mep: PASS\n') + return + + +def test_passthru(topo, args=None): + """Test Passthrough Authentication basic functionality + + :id: 9b87493b-0493-46f9-8364-6099d0e5d807 + :setup: Standalone Instance + :steps: + 1. Stop the plugin + 2. Restart the instance + 3. Create a second backend + 4. Create the top of the tree + 5. Add user to suffix1 + 6. Configure and start plugin + 7. Login as user + 8. Login as root DN + 9. Replace 'nsslapd-pluginarg0': ldap uri for second instance + 10. Login as user + 11. Login as root DN + 12. Check nsslapd-plugin-depends-on-named for the plugin + 13. Clean up + :expectedresults: + 1. Success + 2. Success + 3. Success + 4. Success + 5. Success + 6. Success + 7. Success + 8. Success + 9. Success + 10. Success + 11. Success + 12. Success + 13. Success + """ + + inst1 = topo[0] + inst2 = topo[1] + + # Passthru is a bit picky about the state of the entry - we can't just restart it + if args == "restart": + return + + # stop the plugin + plugin = PassThroughAuthenticationPlugin(inst1) + plugin.disable() + + # If args is None then we run the test suite as pytest standalone and it's not dynamic + if args is None: + inst1.restart() + + PASS_SUFFIX1 = 'dc=pass1,dc=thru' + PASS_SUFFIX2 = 'dc=pass2,dc=thru' + PASS_BE1 = 'PASS1' + PASS_BE2 = 'PASS2' + + log.info('Testing ' + PLUGIN_PASSTHRU + '...') + + ############################################################################ + # Use a new "remote" instance, and a user for auth + ############################################################################ + # Create a second backend + backend1 = inst2.backends.create(properties={'cn': PASS_BE1, + 'nsslapd-suffix': PASS_SUFFIX1}) + backend2 = inst2.backends.create(properties={'cn': PASS_BE2, + 'nsslapd-suffix': PASS_SUFFIX2}) + + # Create the top of the tree + suffix = Domain(inst2, PASS_SUFFIX1) + pass1 = suffix.create(properties={'dc': 'pass1'}) + suffix = Domain(inst2, PASS_SUFFIX2) + pass2 = suffix.create(properties={'dc': 'pass2'}) + + # Add user to suffix1 + users = UserAccounts(inst2, pass1.dn, None) + test_user1 = users.create_test_user(1001) + test_user1.replace('userpassword', 'password') + + users = UserAccounts(inst2, pass2.dn, None) + test_user2 = users.create_test_user(1002) + test_user2.replace('userpassword', 'password') + + ############################################################################ + # Configure and start plugin + ############################################################################ + plugin.replace('nsslapd-pluginarg0', + 'ldap://{}:{}/{}'.format(inst2.host, inst2.port, pass1.dn)) + plugin.enable() + + # If args is None then we run the test suite as pytest standalone and it's not dynamic + if args is None: + inst1.restart() + + ############################################################################ + # Test plugin + ############################################################################ + # login as user + inst1.simple_bind_s(test_user1.dn, "password") + + ############################################################################ + # Change the config + ############################################################################ + # login as root DN + inst1.simple_bind_s(DN_DM, PASSWORD) + + plugin.replace('nsslapd-pluginarg0', + 'ldap://{}:{}/{}'.format(inst2.host, inst2.port, pass2.dn)) + if args is None: + inst1.restart() + + ############################################################################ + # Test plugin + ############################################################################ + + # login as user + inst1.simple_bind_s(test_user2.dn, "password") + + # login as root DN + inst1.simple_bind_s(DN_DM, PASSWORD) + + # Clean up + backend1.delete() + backend2.delete() + + ############################################################################ + # Test plugin dependency + ############################################################################ + check_dependency(inst1, plugin, online=isinstance(args, str)) + + ############################################################################ + # Test passed + ############################################################################ + log.info('test_passthru: PASS\n') + return + + +def test_referint(topo, args=None): + """Test Referential Integrity basic functionality + + :id: 9b87493b-0493-46f9-8364-6099d0e5d808 + :setup: Standalone Instance + :steps: + 1. Enable the plugin + 2. Restart the instance + 3. Replace 'referint-membership-attr': 'member' + 4. Add some users and a group + 5. Grab the referint log file from the plugin + 6. Add shared config entry + 7. Delete one user + 8. Check for integrity + 9. Replace 'referint-membership-attr': 'uniquemember' + 10. Delete second user + 11. Check for integrity + 12. The shared config entry uses "member" - the above test used "uniquemember" + 13. Recreate users and a group + 14. Delete one user + 15. Check for integrity + 16. Change the shared config entry to use 'uniquemember' and test the plugin + 17. Delete second user + 18. Check for integrity + 19. First change the plugin to use member before we move the shared config that uses uniquemember + 20. Remove shared config from plugin + 21. Add test user + 22. Add user to group + 23. Delete a user + 24. Check for integrity + 25. Check nsslapd-plugin-depends-on-named for the plugin + 26. Clean up + :expectedresults: + 1. Success + 2. Success + 3. Success + 4. Success + 5. Success + 6. Success + 7. Success + 8. Success + 9. Success + 10. Success + 11. Success + 12. Success + 13. Success + 14. Success + 15. Success + 16. Success + 17. Success + 18. Success + 19. Success + 20. Success + 21. Success + 22. Success + 23. Success + 24. Success + 25. Success + 26. Success + """ + + inst = topo[0] + + # stop the plugin, and start it + plugin = ReferentialIntegrityPlugin(inst) + plugin.disable() + plugin.enable() + + if args == "restart": + return + + # If args is None then we run the test suite as pytest standalone and it's not dynamic + if args is None: + inst.restart() + + log.info('Testing ' + PLUGIN_REFER_INTEGRITY + '...') + + ############################################################################ + # Configure plugin + ############################################################################ + plugin.replace('referint-membership-attr', 'member') + + ############################################################################ + # Test plugin + ############################################################################ + # Add some users and a group + users = UserAccounts(inst, DEFAULT_SUFFIX, None) + user1 = users.create_test_user(uid=1001) + user2 = users.create_test_user(uid=1002) + + groups = Groups(inst, DEFAULT_SUFFIX, None) + group = groups.create(properties={'cn': 'group', + MEMBER_ATTR: user1.dn}) + group.add('objectclass', 'groupOfUniqueNames') + group.add('uniquemember', user2.dn) + + # Grab the referint log file from the plugin + referin_logfile = plugin.get_attr_val_utf8('referint-logfile') + + # Add shared config entry + referin_config = ReferentialIntegrityConfig(inst, 'cn=RI config,{}'.format(DEFAULT_SUFFIX)) + referin_config.create(properties={'cn': 'RI config', + 'referint-membership-attr': 'member', + 'referint-update-delay': '0', + 'referint-logfile': referin_logfile}) + + user1.delete() + + # Check for integrity + entry = inst.search_s(group.dn, ldap.SCOPE_BASE, '(member={})'.format(user1.dn)) + assert not entry + + ############################################################################ + # Change the config + ############################################################################ + plugin.replace('referint-membership-attr', 'uniquemember') + + ############################################################################ + # Test plugin + ############################################################################ + + user2.delete() + + # Check for integrity + entry = inst.search_s(group.dn, ldap.SCOPE_BASE, '(uniquemember={})'.format(user2.dn)) + assert not entry + + ############################################################################ + # Set the shared config entry and test the plugin + ############################################################################ + # The shared config entry uses "member" - the above test used "uniquemember" + plugin.set_configarea(referin_config.dn) + group.delete() + + user1 = users.create_test_user(uid=1001) + user2 = users.create_test_user(uid=1002) + group = groups.create(properties={'cn': 'group', + MEMBER_ATTR: user1.dn}) + group.add('objectclass', 'groupOfUniqueNames') + group.add('uniquemember', user2.dn) + + # Delete a user + user1.delete() + + # Check for integrity + entry = inst.search_s(group.dn, ldap.SCOPE_BASE, '(member={})'.format(user1.dn)) + assert not entry + + ############################################################################ + # Change the shared config entry to use 'uniquemember' and test the plugin + ############################################################################ + + referin_config.replace('referint-membership-attr', 'uniquemember') + + # Delete a user + user2.delete() + + # Check for integrity + entry = inst.search_s(group.dn, ldap.SCOPE_BASE, '(uniquemember={})'.format(user2.dn)) + assert not entry + + ############################################################################ + # Remove shared config from plugin, and retest + ############################################################################ + # First change the plugin to use member before we move the shared config that uses uniquemember + plugin.replace('referint-membership-attr', 'member') + + # Remove shared config from plugin + plugin.remove_configarea() + + # Add test user + user1 = users.create_test_user(uid=1001) + + # Add user to group + group.replace('member', user1.dn) + + # Delete a user + user1.delete() + + # Check for integrity + entry = inst.search_s(group.dn, ldap.SCOPE_BASE, '(member={})'.format(user1.dn)) + assert not entry + + ############################################################################ + # Test plugin dependency + ############################################################################ + check_dependency(inst, plugin, online=isinstance(args, str)) + + ############################################################################ + # Cleanup + ############################################################################ + group.delete() + referin_config.delete() + + ############################################################################ + # Test passed + ############################################################################ + log.info('test_referint: PASS\n') + return + +def test_retrocl(topo, args=None): + """Test Retro Changelog basic functionality + + :id: 9b87493b-0493-46f9-8364-6099d0e5d810 + :setup: Standalone Instance + :steps: + 1. Enable the plugin + 2. Restart the instance + 3. Gather the current change count (it's not 1 once we start the stability tests) + 4. Add a user + 5. Check we logged this in the retro cl + 6. Change the config - disable plugin + 7. Delete the user + 8. Check we didn't log this in the retro cl + 9. Check nsslapd-plugin-depends-on-named for the plugin + 10. Clean up + :expectedresults: + 1. Success + 2. Success + 3. Success + 4. Success + 5. Success + 6. Success + 7. Success + 8. Success + 9. Success + 10. Success + """ + + inst = topo[0] + + # stop the plugin, and start it + plugin = RetroChangelogPlugin(inst) + plugin.disable() + plugin.enable() + + if args == "restart": + return + + # If args is None then we run the test suite as pytest standalone and it's not dynamic + if args is None: + inst.restart() + + log.info('Testing ' + PLUGIN_RETRO_CHANGELOG + '...') + + ############################################################################ + # Configure plugin + ############################################################################ + + # Gather the current change count (it's not 1 once we start the stabilty tests) + entry = inst.search_s(RETROCL_SUFFIX, ldap.SCOPE_SUBTREE, '(changenumber=*)') + entry_count = len(entry) + + ############################################################################ + # Test plugin + ############################################################################ + + # Add a user + users = UserAccounts(inst, DEFAULT_SUFFIX) + user1 = users.create_test_user(uid=1001) + + # Check we logged this in the retro cl + entry = inst.search_s(RETROCL_SUFFIX, ldap.SCOPE_SUBTREE, '(changenumber=*)') + assert entry + assert len(entry) != entry_count + + entry_count += 1 + + ############################################################################ + # Change the config - disable plugin + ############################################################################ + plugin.disable() + + # If args is None then we run the test suite as pytest standalone and it's not dynamic + if args is None: + inst.restart() + + ############################################################################ + # Test plugin + ############################################################################ + user1.delete() + + # Check we didn't logged this in the retro cl + entry = inst.search_s(RETROCL_SUFFIX, ldap.SCOPE_SUBTREE, '(changenumber=*)') + assert len(entry) == entry_count + + plugin.enable() + if args is None: + inst.restart() + + ############################################################################ + # Test plugin dependency + ############################################################################ + check_dependency(inst, plugin, online=isinstance(args, str)) + + ############################################################################ + # Test passed + ############################################################################ + log.info('test_retrocl: PASS\n') + return + + +def _rootdn_restart(inst): + """Special restart wrapper function for rootDN plugin""" + + with pytest.raises(ldap.LDAPError): + inst.restart() + # Bind as the user who can make updates to the config + inst.simple_bind_s(USER_DN, USER_PW) + # We need it online for other operations to work + inst.state = DIRSRV_STATE_ONLINE + + +def test_rootdn(topo, args=None): + """Test Root DNA Access control basic functionality + + :id: 9b87493b-0493-46f9-8364-6099d0e5d811 + :setup: Standalone Instance + :steps: + 1. Enable the plugin + 2. Restart the instance + 3. Add an user and aci to open up cn=config + 4. Set an aci so we can modify the plugin after we deny the root dn + 5. Set allowed IP to an unknown host - blocks root dn + 6. Bind as Root DN + 7. Bind as the user who can make updates to the config + 8. Test that invalid plugin changes are rejected + 9. Remove the restriction + 10. Bind as Root DN + 11. Check nsslapd-plugin-depends-on-named for the plugin + 12. Clean up + :expectedresults: + 1. Success + 2. Success + 3. Success + 4. Success + 5. Success + 6. Success + 7. Success + 8. Success + 9. Success + 10. Success + 11. Success + 12. Success + """ + + inst = topo[0] + + # stop the plugin, and start it + plugin = RootDNAccessControlPlugin(inst) + plugin.disable() + plugin.enable() + + if args == "restart": + return + + # If args is None then we run the test suite as pytest standalone and it's not dynamic + if args is None: + inst.restart() + + log.info('Testing ' + PLUGIN_ROOTDN_ACCESS + '...') + + ############################################################################ + # Configure plugin + ############################################################################ + + # Add an user and aci to open up cn=config + users = UserAccounts(inst, DEFAULT_SUFFIX) + user1 = users.create_test_user(uid=1001) + user1.replace('userpassword', USER_PW) + + # Set an aci so we can modify the plugin after ew deny the root dn + ACI = ('(target ="ldap:///cn=config")(targetattr = "*")(version 3.0;acl ' + + '"all access";allow (all)(userdn="ldap:///anyone");)') + inst.config.add('aci', ACI) + + # Set allowed IP to an unknown host - blocks root dn + plugin.replace('rootdn-allow-ip', '10.10.10.10') + + ############################################################################ + # Test plugin + ############################################################################ + # Bind as Root DN + if args is None: + _rootdn_restart(inst) + else: + with pytest.raises(ldap.LDAPError): + inst.simple_bind_s(DN_DM, PASSWORD) + # Bind as the user who can make updates to the config + inst.simple_bind_s(USER_DN, USER_PW) + + ############################################################################ + # Change the config + ############################################################################ + # First, test that invalid plugin changes are rejected + if args is None: + plugin.replace('rootdn-deny-ip', '12.12.ZZZ.12') + with pytest.raises((subprocess.CalledProcessError, ValueError)): + inst.restart() + dse_ldif = DSEldif(inst) + dse_ldif.delete(plugin.dn, 'rootdn-deny-ip') + _rootdn_restart(inst) + + plugin.replace('rootdn-allow-host', 'host._.com') + with pytest.raises((subprocess.CalledProcessError, ValueError)): + inst.restart() + dse_ldif = DSEldif(inst) + dse_ldif.delete(plugin.dn, 'rootdn-allow-host') + _rootdn_restart(inst) + else: + with pytest.raises(ldap.LDAPError): + plugin.replace('rootdn-deny-ip', '12.12.ZZZ.12') + + with pytest.raises(ldap.LDAPError): + plugin.replace('rootdn-allow-host', 'host._.com') + + # Remove the restriction + plugin.remove_all('rootdn-allow-ip') + if args is None: + inst.restart() + + ############################################################################ + # Test plugin + ############################################################################ + # Bind as Root DN + inst.simple_bind_s(DN_DM, PASSWORD) + + ############################################################################ + # Test plugin dependency + ############################################################################ + check_dependency(inst, plugin, online=isinstance(args, str)) + + ############################################################################ + # Cleanup - remove ACI from cn=config and test user + ############################################################################ + inst.config.remove('aci', ACI) + user1.delete() + + ############################################################################ + # Test passed + ############################################################################ + log.info('test_rootdn: PASS\n') + return + + +# Array of test functions +func_tests = [test_acctpolicy, test_attruniq, test_automember, test_dna, + test_linkedattrs, test_memberof, test_mep, test_passthru, + test_referint, test_retrocl, test_rootdn] + + +def check_all_plugins(topo, args="online"): + for func in func_tests: + func(topo, args) + + return diff --git a/dirsrvtests/tests/suites/plugins/accpol_check_all_state_attrs_test.py b/dirsrvtests/tests/suites/plugins/accpol_check_all_state_attrs_test.py new file mode 100644 index 0000000..971620a --- /dev/null +++ b/dirsrvtests/tests/suites/plugins/accpol_check_all_state_attrs_test.py @@ -0,0 +1,119 @@ +# --- BEGIN COPYRIGHT BLOCK --- +# Copyright (C) 2023 Red Hat, Inc. +# All rights reserved. +# +# License: GPL (version 3 or any later version). +# See LICENSE for details. +# --- END COPYRIGHT BLOCK --- +# +import ldap +import logging +import pytest +import os +import time +from lib389.topologies import topology_st as topo +from lib389._constants import DN_CONFIG, DEFAULT_SUFFIX, PLUGIN_ACCT_POLICY, DN_PLUGIN, PASSWORD +from lib389.idm.user import (UserAccount, UserAccounts) +from lib389.plugins import (AccountPolicyPlugin, AccountPolicyConfig) +from lib389.idm.domain import Domain + +log = logging.getLogger(__name__) + +ACCPOL_DN = "cn={},{}".format(PLUGIN_ACCT_POLICY, DN_PLUGIN) +ACCP_CONF = "{},{}".format(DN_CONFIG, ACCPOL_DN) +TEST_ENTRY_NAME = 'actpol_test' +TEST_ENTRY_DN = 'uid={},{}'.format(TEST_ENTRY_NAME, DEFAULT_SUFFIX) +NEW_PASSWORD = 'password123' +USER_SELF_MOD_ACI = '(targetattr="userpassword")(version 3.0; acl "pwp test"; allow (all) userdn="ldap:///self";)' +ANON_ACI = "(targetattr=\"*\")(version 3.0; acl \"Anonymous Read access\"; allow (read,search,compare) userdn = \"ldap:///anyone\";)" + + +def test_inactivty_and_expiration(topo): + """Test account expiration works when we are checking all state attributes + + :id: 704310de-a2eb-4ee7-baf3-9770c0fbf07c + :setup: Standalone Instance + :steps: + 1. Configure instance for password expiration + 2. Add ACI to allow users to update themselves + 3. Create test user + 4. Reset users password to set passwordExpirationtime + 5. Configure account policy plugin and restart + 6. Bind as test user to reset lastLoginTime + 7. Sleep, then bind as user which triggers error + + :expectedresults: + 1. Success + 2. Success + 3. Success + 4. Success + 5. Success + 6. Success + 7. Success + """ + + # Configure instance + inst = topo.standalone + inst.config.set('passwordexp', 'on') + inst.config.set('passwordmaxage', '2') + inst.config.set('passwordGraceLimit', '5') + + # Add aci so user and update password + suffix = Domain(inst, DEFAULT_SUFFIX) + suffix.add('aci', USER_SELF_MOD_ACI) + suffix.add('aci', ANON_ACI) + + # Create the test user + test_user = UserAccount(inst, TEST_ENTRY_DN) + test_user.create(properties={ + 'uid': TEST_ENTRY_NAME, + 'cn': TEST_ENTRY_NAME, + 'sn': TEST_ENTRY_NAME, + 'userPassword': PASSWORD, + 'uidNumber': '1000', + 'gidNumber': '2000', + 'homeDirectory': '/home/test', + }) + + # Reset test user password to reset passwordExpirationtime + conn = test_user.bind(PASSWORD) + test_user = UserAccount(conn, TEST_ENTRY_DN) + test_user.replace('userpassword', NEW_PASSWORD) + + # Sleep a little bit, we'll sleep the remaining 10 seconds later + time.sleep(3) + + # Configure account policy plugin + plugin = AccountPolicyPlugin(inst) + plugin.enable() + plugin.set('nsslapd-pluginarg0', ACCP_CONF) + accp = AccountPolicyConfig(inst, dn=ACCP_CONF) + accp.set('alwaysrecordlogin', 'yes') + accp.set('stateattrname', 'lastLoginTime') + accp.set('altstateattrname', 'passwordexpirationtime') + accp.set('specattrname', 'acctPolicySubentry') + accp.set('limitattrname', 'accountInactivityLimit') + accp.set('accountInactivityLimit', '10') + accp.set('checkAllStateAttrs', 'on') + inst.restart() + + # Bind as test user to reset lastLoginTime + conn = test_user.bind(NEW_PASSWORD) + test_user = UserAccount(conn, TEST_ENTRY_DN) + + # Sleep to exceed passwordexprattiontime over 10 seconds, but less than + # 10 seconds for lastLoginTime + time.sleep(7) + + # Try to bind, but password expiration should reject this as lastLogintTime + # has not exceeded the inactivity limit + with pytest.raises(ldap.CONSTRAINT_VIOLATION): + test_user.bind(NEW_PASSWORD) + + +if __name__ == '__main__': + # Run isolated + # -s for DEBUG mode + CURRENT_FILE = os.path.realpath(__file__) + pytest.main(["-s", CURRENT_FILE]) + diff --git a/dirsrvtests/tests/suites/plugins/accpol_test.py b/dirsrvtests/tests/suites/plugins/accpol_test.py new file mode 100644 index 0000000..964d98e --- /dev/null +++ b/dirsrvtests/tests/suites/plugins/accpol_test.py @@ -0,0 +1,1306 @@ +# --- BEGIN COPYRIGHT BLOCK --- +# Copyright (C) 2017 Red Hat, Inc. +# All rights reserved. +# +# License: GPL (version 3 or any later version). +# See LICENSE for details. +# --- END COPYRIGHT BLOCK --- +# +import pytest +import subprocess +from lib389.tasks import * +from lib389.utils import * +from lib389.topologies import topology_st +from lib389.idm.user import (UserAccount, UserAccounts) +from lib389.plugins import (AccountPolicyPlugin, AccountPolicyConfig, AccountPolicyConfigs) +from lib389.cos import (CosTemplate, CosPointerDefinition) +from lib389._constants import (PLUGIN_ACCT_POLICY, DN_PLUGIN, DN_DM, PASSWORD, DEFAULT_SUFFIX, + DN_CONFIG, SERVERID_STANDALONE) + +pytestmark = pytest.mark.tier1 + +LOCL_CONF = 'cn=AccountPolicy1,ou=people,dc=example,dc=com' +TEMPL_COS = 'cn=TempltCoS,ou=people,dc=example,dc=com' +DEFIN_COS = 'cn=DefnCoS,ou=people,dc=example,dc=com' +ACCPOL_DN = "cn={},{}".format(PLUGIN_ACCT_POLICY, DN_PLUGIN) +ACCP_CONF = "{},{}".format(DN_CONFIG, ACCPOL_DN) +USER_PASW = 'Secret1234' +INVL_PASW = 'Invalid234' + + +@pytest.fixture(scope="module") +def accpol_global(topology_st, request): + """Configure Global account policy plugin and restart the server""" + + log.info('Configuring Global account policy plugin, pwpolicy attributes and restarting the server') + plugin = AccountPolicyPlugin(topology_st.standalone) + try: + if DEBUGGING: + topology_st.standalone.config.set('nsslapd-auditlog-logging-enabled', 'on') + plugin.enable() + plugin.set('nsslapd-pluginarg0', ACCP_CONF) + accp = AccountPolicyConfig(topology_st.standalone, dn=ACCP_CONF) + accp.set('alwaysrecordlogin', 'yes') + accp.set('stateattrname', 'lastLoginTime') + accp.set('altstateattrname', 'createTimestamp') + accp.set('specattrname', 'acctPolicySubentry') + accp.set('limitattrname', 'accountInactivityLimit') + accp.set('accountInactivityLimit', '12') + topology_st.standalone.config.set('passwordexp', 'on') + topology_st.standalone.config.set('passwordmaxage', '400') + topology_st.standalone.config.set('passwordwarning', '1') + topology_st.standalone.config.set('passwordlockout', 'on') + topology_st.standalone.config.set('passwordlockoutduration', '5') + topology_st.standalone.config.set('passwordmaxfailure', '3') + topology_st.standalone.config.set('passwordunlock', 'on') + except ldap.LDAPError as e: + log.error('Failed to enable Global Account Policy Plugin and Password policy attributes') + raise e + topology_st.standalone.restart(timeout=10) + + def fin(): + log.info('Disabling Global accpolicy plugin and removing pwpolicy attrs') + try: + plugin = AccountPolicyPlugin(topology_st.standalone) + plugin.disable() + topology_st.standalone.config.set('passwordexp', 'off') + topology_st.standalone.config.set('passwordlockout', 'off') + except ldap.LDAPError as e: + log.error('Failed to disable Global accpolicy plugin, {}'.format(e.message['desc'])) + assert False + topology_st.standalone.restart(timeout=10) + + request.addfinalizer(fin) + + +@pytest.fixture(scope="module") +def accpol_local(topology_st, accpol_global, request): + """Configure Local account policy plugin for ou=people subtree and restart the server""" + + log.info('Adding Local account policy plugin configuration entries') + try: + topology_st.standalone.config.set('passwordmaxage', '400') + accp = AccountPolicyConfig(topology_st.standalone, dn=ACCP_CONF) + accp.remove_all('accountInactivityLimit') + locl_conf = AccountPolicyConfig(topology_st.standalone, dn=LOCL_CONF) + locl_conf.create(properties={'cn': 'AccountPolicy1', 'accountInactivityLimit': '10'}) + cos_template = CosTemplate(topology_st.standalone, dn=TEMPL_COS) + cos_template.create(properties={'cn': 'TempltCoS', 'acctPolicySubentry': LOCL_CONF}) + cos_def = CosPointerDefinition(topology_st.standalone, dn=DEFIN_COS) + cos_def.create(properties={ + 'cn': 'DefnCoS', + 'cosTemplateDn': TEMPL_COS, + 'cosAttribute': 'acctPolicySubentry default operational-default'}) + except ldap.LDAPError as e: + log.error('Failed to configure Local account policy plugin') + log.error('Failed to add entry {}, {}, {}:'.format(LOCL_CONF, TEMPL_COS, DEFIN_COS)) + raise e + topology_st.standalone.restart(timeout=10) + + def fin(): + log.info('Disabling Local accpolicy plugin and removing pwpolicy attrs') + try: + topology_st.standalone.plugins.disable(name=PLUGIN_ACCT_POLICY) + for entry_dn in [LOCL_CONF, TEMPL_COS, DEFIN_COS]: + entry = UserAccount(topology_st.standalone, dn=entry_dn) + entry.delete() + except ldap.LDAPError as e: + log.error('Failed to disable Local accpolicy plugin, {}'.format(e.message['desc'])) + assert False + topology_st.standalone.restart(timeout=10) + + request.addfinalizer(fin) + + +@pytest.fixture(scope="module") +def setup_account_policy_plugin(topology_st): + inst = topology_st[0] + + # Enable plugin and restart + plugin = AccountPolicyPlugin(inst) + plugin.disable() + plugin.enable() + inst.restart() + + # Add config entry, set alwaysrecordlogin to yes (lastLoginHistorySize defaults to 5) + ap_configs = AccountPolicyConfigs(inst) + try: + ap_config = ap_configs.create(properties={'cn': 'config', 'alwaysrecordlogin': 'yes', }) + except ldap.ALREADY_EXISTS: + ap_config = ap_configs.get('config') + ap_config.replace('alwaysrecordlogin', 'yes') + + return ap_config + + +@pytest.fixture(scope="module") +def setup_test_user(topology_st, setup_account_policy_plugin): + inst = topology_st[0] + USER_PW = 'password' + + # Add a test user entry + users = UserAccounts(inst, DEFAULT_SUFFIX) + user = users.create_test_user(uid=1000, gid=2000) + user.replace('userPassword', USER_PW) + + return user + + +def pwacc_lock(topology_st, suffix, subtree, userid, nousrs): + """Lockout user account by attempting invalid password binds""" + + log.info('Lockout user account by attempting invalid password binds') + while (nousrs > 0): + usrrdn = '{}{}'.format(userid, nousrs) + userdn = 'uid={},{},{}'.format(usrrdn, subtree, suffix) + user = UserAccount(topology_st.standalone, dn=userdn) + for i in range(3): + with pytest.raises(ldap.INVALID_CREDENTIALS): + user.bind(INVL_PASW) + log.error('No invalid credentials error for User {}'.format(userdn)) + with pytest.raises(ldap.CONSTRAINT_VIOLATION): + user.bind(USER_PASW) + log.error('User {} is not locked, expected error 19'.format(userdn)) + nousrs = nousrs - 1 + time.sleep(1) + + +def userpw_reset(topology_st, suffix, subtree, userid, nousrs, bindusr, bindpw, newpasw): + """Reset user password""" + + while (nousrs > 0): + usrrdn = '{}{}'.format(userid, nousrs) + userdn = 'uid={},{},{}'.format(usrrdn, subtree, suffix) + user = UserAccount(topology_st.standalone, dn=userdn) + log.info('Reset user password for user-{}'.format(userdn)) + if (bindusr == "DirMgr"): + try: + user.replace('userPassword', newpasw) + except ldap.LDAPError as e: + log.error('Unable to reset userPassword for user-{}'.format(userdn)) + raise e + elif (bindusr == "RegUsr"): + user_conn = user.bind(bindpw) + try: + user_conn.replace('userPassword', newpasw) + except ldap.LDAPError as e: + log.error('Unable to reset userPassword for user-{}'.format(userdn)) + raise e + nousrs = nousrs - 1 + time.sleep(1) + + +def nsact_inact(topology_st, suffix, subtree, userid, nousrs, command, expected): + """Account activate/in-activate/status using dsidm""" + + log.info('Account activate/in-activate/status using dsidm') + while (nousrs > 0): + usrrdn = '{}{}'.format(userid, nousrs) + userdn = 'uid={},{},{}'.format(usrrdn, subtree, suffix) + log.info('Running {} for user {}'.format(command, userdn)) + + dsidm_cmd = ['%s/dsidm' % topology_st.standalone.get_sbin_dir(), + 'slapd-standalone1', + '-b', DEFAULT_SUFFIX, + 'account', command, + userdn] + + log.info('Running {} for user {}'.format(dsidm_cmd, userdn)) + try: + output = subprocess.check_output(dsidm_cmd) + except subprocess.CalledProcessError as err: + output = err.output + + log.info('output: {}'.format(output)) + assert ensure_bytes(expected) in output + nousrs = nousrs - 1 + time.sleep(1) + + +def modify_attr(topology_st, base_dn, attr_name, attr_val): + """Modify attribute value for a given DN""" + + log.info('Modify attribute value for a given DN') + try: + entry = UserAccount(topology_st.standalone, dn=base_dn) + entry.replace(attr_name, attr_val) + except ldap.LDAPError as e: + log.error('Failed to replace lastLoginTime attribute for user-{} {}'.format(userdn, e.message['desc'])) + assert False + time.sleep(1) + + +def check_attr(topology_st, suffix, subtree, userid, nousrs, attr_name): + """Check ModifyTimeStamp attribute present for user""" + + log.info('Check ModifyTimeStamp attribute present for user') + while (nousrs > 0): + usrrdn = '{}{}'.format(userid, nousrs) + userdn = 'uid={},{},{}'.format(usrrdn, subtree, suffix) + user = UserAccount(topology_st.standalone, dn=userdn) + try: + user.get_attr_val(attr_name) + except ldap.LDAPError as e: + log.error('ModifyTimeStamp attribute is not present for user-{} {}'.format(userdn, e.message['desc'])) + assert False + nousrs = nousrs - 1 + + +def add_time_attr(topology_st, suffix, subtree, userid, nousrs, attr_name): + """Enable account by replacing lastLoginTime/createTimeStamp/ModifyTimeStamp attribute""" + + new_attr_val = time.strftime("%Y%m%d%H%M%S", time.gmtime()) + 'Z' + log.info('Enable account by replacing lastLoginTime/createTimeStamp/ModifyTimeStamp attribute') + while (nousrs > 0): + usrrdn = '{}{}'.format(userid, nousrs) + userdn = 'uid={},{},{}'.format(usrrdn, subtree, suffix) + user = UserAccount(topology_st.standalone, dn=userdn) + try: + user.replace(attr_name, new_attr_val) + except ldap.LDAPError as e: + log.error('Failed to add/replace {} attribute to-{}, for user-{}'.format(attr_name, new_attr_val, userdn)) + raise e + nousrs = nousrs - 1 + time.sleep(1) + time.sleep(1) + + +def modusr_attr(topology_st, suffix, subtree, userid, nousrs, attr_name, attr_value): + """Enable account by replacing cn attribute value, value of modifyTimeStamp changed""" + + log.info('Enable account by replacing cn attribute value, value of modifyTimeStamp changed') + while (nousrs > 0): + usrrdn = '{}{}'.format(userid, nousrs) + userdn = 'uid={},{},{}'.format(usrrdn, subtree, suffix) + user = UserAccount(topology_st.standalone, dn=userdn) + try: + user.replace(attr_name, attr_value) + except ldap.LDAPError as e: + log.error('Failed to add/replace {} attribute to-{}, for user-{}'.format(attr_name, attr_value, userdn)) + raise e + nousrs = nousrs - 1 + time.sleep(1) + + +def del_time_attr(topology_st, suffix, subtree, userid, nousrs, attr_name): + """Delete lastLoginTime/createTimeStamp/ModifyTimeStamp attribute from user account""" + + log.info('Delete lastLoginTime/createTimeStamp/ModifyTimeStamp attribute from user account') + while (nousrs > 0): + usrrdn = '{}{}'.format(userid, nousrs) + userdn = 'uid={},{},{}'.format(usrrdn, subtree, suffix) + user = UserAccount(topology_st.standalone, dn=userdn) + try: + user.remove_all(attr_name) + except ldap.LDAPError as e: + log.error('Failed to delete {} attribute for user-{}'.format(attr_name, userdn)) + raise e + nousrs = nousrs - 1 + time.sleep(1) + + +def add_users(topology_st, suffix, subtree, userid, nousrs, ulimit): + """Add users to default test instance with given suffix, subtree, userid and nousrs""" + + log.info('add_users: Pass all of these as parameters suffix, subtree, userid and nousrs') + users = UserAccounts(topology_st.standalone, suffix, rdn=subtree) + while (nousrs > ulimit): + usrrdn = '{}{}'.format(userid, nousrs) + user_properties = { + 'uid': usrrdn, + 'cn': usrrdn, + 'sn': usrrdn, + 'uidNumber': '1001', + 'gidNumber': '2001', + 'userpassword': USER_PASW, + 'homeDirectory': '/home/{}'.format(usrrdn)} + users.create(properties=user_properties) + nousrs = nousrs - 1 + + +def del_users(topology_st, suffix, subtree, userid, nousrs): + """Delete users from default test instance with given suffix, subtree, userid and nousrs""" + + log.info('del_users: Pass all of these as parameters suffix, subtree, userid and nousrs') + users = UserAccounts(topology_st.standalone, suffix, rdn=subtree) + while (nousrs > 0): + usrrdn = '{}{}'.format(userid, nousrs) + userdn = users.get(usrrdn) + userdn.delete() + nousrs = nousrs - 1 + + +def account_status(topology_st, suffix, subtree, userid, nousrs, ulimit, tochck): + """Check account status for the given suffix, subtree, userid and nousrs""" + + while (nousrs > ulimit): + usrrdn = '{}{}'.format(userid, nousrs) + userdn = 'uid={},{},{}'.format(usrrdn, subtree, suffix) + user = UserAccount(topology_st.standalone, dn=userdn) + if (tochck == "Enabled"): + try: + user.bind(USER_PASW) + except ldap.LDAPError as e: + log.error('User {} failed to login, expected 0'.format(userdn)) + raise e + elif (tochck == "Expired"): + with pytest.raises(ldap.INVALID_CREDENTIALS): + user.bind(USER_PASW) + log.error('User {} password not expired , expected error 49'.format(userdn)) + elif (tochck == "Disabled"): + with pytest.raises(ldap.CONSTRAINT_VIOLATION): + user.bind(USER_PASW) + log.error('User {} is not inactivated, expected error 19'.format(userdn)) + nousrs = nousrs - 1 + time.sleep(1) + + +def user_binds(user, user_pw, num_binds): + """ Bind as user a number of times """ + for i in range(num_binds): + userconn = user.bind(user_pw) + time.sleep(1) + userconn.unbind() + + +def verify_last_login_entries(inst, dn, expected): + """ Search for lastLoginHistory attribute and verify the number and order of entries """ + entries = inst.search_s(dn, ldap.SCOPE_SUBTREE, "(objectclass=*)", ['lastLoginHistory']) + decoded_values = [entry.decode() for entry in entries[0].getValues('lastLoginHistory')] + ascending_order = all(decoded_values[i] <= decoded_values[i + 1] for i in range(len(decoded_values) - 1)) + assert len(decoded_values) == expected + assert ascending_order + + +def test_glact_inact(topology_st, accpol_global): + """Verify if user account is inactivated when accountInactivityLimit is exceeded. + + :id: 342af084-0ad0-442f-b6f6-5a8b8e5e4c28 + :setup: Standalone instance, Global account policy plugin configuration, + set accountInactivityLimit to few secs. + :steps: + 1. Add few users to ou=people subtree in the default suffix + 2. Check if users are active just before it reaches accountInactivityLimit. + 3. User accounts should not be inactivated, expected 0 + 4. Check if users are inactivated when accountInactivityLimit is exceeded. + 5. User accounts should be inactivated, expected error 19. + :expectedresults: + 1. Success + 2. Success + 3. Success + 4. Success + 5. Should return error code 19 + """ + + suffix = DEFAULT_SUFFIX + subtree = "ou=people" + userid = "glinactusr" + nousrs = 3 + log.info('AccountInactivityLimit set to 12. Account will be inactivated if not accessed in 12 secs') + add_users(topology_st, suffix, subtree, userid, nousrs, 0) + + log.info('Sleep for 10 secs to check if account is not inactivated, expected value 0') + time.sleep(10) + log.info('Account should not be inactivated since AccountInactivityLimit not exceeded') + account_status(topology_st, suffix, subtree, userid, 3, 2, "Enabled") + + log.info('Sleep for 3 more secs to check if account is inactivated') + time.sleep(3) + account_status(topology_st, suffix, subtree, userid, 2, 0, "Disabled") + + log.info('Sleep +10 secs to check if account {}3 is inactivated'.format(userid)) + time.sleep(10) + account_status(topology_st, suffix, subtree, userid, 3, 2, "Disabled") + del_users(topology_st, suffix, subtree, userid, nousrs) + + +def test_glremv_lastlogin(topology_st, accpol_global): + """Verify if user account is inactivated by createTimeStamp, if lastLoginTime attribute is missing. + + :id: 8ded5d8e-ed93-4c22-9c8e-78c479189f84 + :setup: Standalone instance, Global account policy plugin configuration, + set accountInactivityLimit to few secs. + :steps: + 1. Add few users to ou=people subtree in the default suffix + 2. Wait for few secs and bind as user to create lastLoginTime attribute. + 3. Remove the lastLoginTime attribute from the user. + 4. Wait till accountInactivityLimit exceeded based on createTimeStamp value + 5. Check if users are inactivated, expected error 19. + 6. Replace lastLoginTime attribute and check if account is activated + 7. User should be activated based on lastLoginTime attribute, expected 0 + :expectedresults: + 1. Success + 2. Success + 3. Success + 4. Success + 5. Success + 6. Success + 7. Should return error code 19 + """ + + suffix = DEFAULT_SUFFIX + subtree = "ou=people" + userid = "nologtimeusr" + nousrs = 1 + log.info('AccountInactivityLimit set to 12. Account will be inactivated if not accessed in 12 secs') + add_users(topology_st, suffix, subtree, userid, nousrs, 0) + log.info('Sleep for 6 secs to check if account is not inactivated, expected value 0') + time.sleep(6) + log.info('Account should not be inactivated since AccountInactivityLimit not exceeded') + account_status(topology_st, suffix, subtree, userid, nousrs, 0, "Enabled") + del_time_attr(topology_st, suffix, subtree, userid, nousrs, 'lastLoginTime') + log.info('Sleep for 7 more secs to check if account is inactivated') + time.sleep(7) + account_status(topology_st, suffix, subtree, userid, nousrs, 0, "Disabled") + add_time_attr(topology_st, suffix, subtree, userid, nousrs, 'lastLoginTime') + log.info('Check if account is activated, expected 0') + account_status(topology_st, suffix, subtree, userid, nousrs, 0, "Enabled") + del_users(topology_st, suffix, subtree, userid, nousrs) + + +def test_login_history_valid_values(topology_st, setup_test_user, setup_account_policy_plugin): + """Verify a user account with attr alwaysrecordlogin=yes returns no more + than the last login history size and that the timestamps are in chronological order. + + :id: 34725a73-c2ba-4b18-9329-532c1514327f + :setup: Standalone instance, Global account policy plugin configuration, + set alwaysrecordlogin to yes. + :steps: + 1. Bind as test user more times than lastLoginHistorySize. + 2. Search on the test user DN for lastLoginTimeHistory attribute. + 3. Verify returned entry contains only LOGIN_HIST_SIZE_FIVE timestamps in chronological order. + 4. Modify plugin config entry, setting lastLoginHistorySize to LOGIN_HIST_SIZE_TWO + 5. Bind as test user more times than lastLoginHistorySize. + 6. Search on the test user DN for lastLoginTimeHistory attribute. + 7. Verify returned entry contains only LOGIN_HIST_SIZE_TWO timestamps in chronological order. + 8. Modify plugin config entry, setting lastLoginHistorySize to LOGIN_HIST_SIZE_FIVE + 9. Search on the test user DN for lastLoginTimeHistory attribute. + 10. Verify returned entry contains only LOGIN_HIST_SIZE_FIVE timestamps in chronological order. + :expectedresults: + 1. Success + 2. Success + 3. Success + 4. Success + 5. Success + 6. Success + 7. Success + 8. Success + 9. Success + 10. Success + """ + + USER_DN = 'uid=test_user_1000,ou=people,dc=example,dc=com' + USER_PW = 'password' + LOGIN_HIST_NUM_BINDS_SEVEN = 7 + LOGIN_HIST_SIZE_FIVE = 5 + LOGIN_HIST_SIZE_TWO = 2 + + inst = topology_st[0] + user = setup_test_user + ap_config = setup_account_policy_plugin + + # Bind as test user more times than lastLoginHistorySize + user_binds(user, USER_PW, LOGIN_HIST_NUM_BINDS_SEVEN) + + # Verify lastLoginTimeHistory attribute returns the correct number of entries in chronological order + verify_last_login_entries(inst, USER_DN, LOGIN_HIST_SIZE_FIVE) + + ap_config.replace('lastLoginHistorySize', str(LOGIN_HIST_SIZE_TWO)) + + # Bind as test user more times than lastLoginHistorySize + user_binds(user, USER_PW, LOGIN_HIST_NUM_BINDS_SEVEN) + + # Verify lastLoginTimeHistory attribute returns the correct number of entries in chronological order + verify_last_login_entries(inst, USER_DN, LOGIN_HIST_SIZE_TWO) + + # Increase the lastLoginHistorySize to LOGIN_HIST_SIZE_FIVE + ap_config.replace('lastLoginHistorySize', str(LOGIN_HIST_SIZE_FIVE)) + + # Bind as test user more times than lastLoginHistorySize + user_binds(user, USER_PW, LOGIN_HIST_NUM_BINDS_SEVEN) + + # Verify lastLoginTimeHistory attribute returns the correct number of entries in chronological order + verify_last_login_entries(inst, USER_DN, LOGIN_HIST_SIZE_FIVE) + + +def test_lastlogin_history_size_zero(topology_st, setup_test_user, setup_account_policy_plugin): + """Verify that when lastLoginHistorySize is set to zero, no login history is recorded. + + :id: c1169b98-ebd9-4fe9-8402-c95f6f80a184 + :setup: Standalone instance, Global account policy plugin configuration, + set alwaysrecordlogin to yes, and a test user. + :steps: + 1. Set the lastLoginHistorySize to 0. + 2. Bind as the test user more times than the lastLoginHistorySize. + 3. Search for the lastLoginTimeHistory attribute on the test user DN. + :expectedresults: + 1. The lastLoginHistorySize is successfully set to 0. + 2. Success. + 3. The returned entry has no timestamps, as the size was set to zero. + """ + + USER_DN = 'uid=test_user_1000,ou=people,dc=example,dc=com' + USER_PW = 'password' + LOGIN_HIST_NUM_BINDS_THREE = 3 + LOGIN_HIST_SIZE_ZERO = 0 + + inst = topology_st[0] + user = setup_test_user + ap_config = setup_account_policy_plugin + + # Set lastLoginHistorySize to 0 + ap_config.replace('lastLoginHistorySize', str(LOGIN_HIST_SIZE_ZERO)) + + # Bind as test user more times than lastLoginHistorySize + user_binds(user, USER_PW, LOGIN_HIST_NUM_BINDS_THREE) + + # Verify no entries in lastLoginTimeHistory attribute + verify_last_login_entries(inst, USER_DN, LOGIN_HIST_SIZE_ZERO) + + +def test_lastlogin_history_size_negative(topology_st, setup_account_policy_plugin): + """Verify that setting the lastLoginHistorySize to a negative number raises an error. + + :id: 3e3252e0-ad66-4d49-b9a1-3e097f88f2c4 + :setup: Standalone instance, Global account policy plugin configuration, + set alwaysrecordlogin to yes. + :steps: + 1. Try to set the lastLoginHistorySize to a negative number. + :expectedresults: + 1. A warning messge has been written to the error logs. + """ + + LOGIN_HIST_SIZE_NEGATIVE = -1 + AC_POL_CFG_DN = "cn=config,cn=Account Policy Plugin,cn=plugins,cn=config" + + inst = topology_st[0] + ap_config = setup_account_policy_plugin + + # Try to set lastLoginHistorySize to negative + ap_config.replace('lastLoginHistorySize', str(LOGIN_HIST_SIZE_NEGATIVE)) + + assert inst.searchErrorsLog("Invalid value for login-history-size: -1") + + +def test_lastlogin_history_size_non_integer(topology_st, setup_account_policy_plugin): + """Verify that setting the lastLoginHistorySize to a non-integer value raises an error. + + :id: 460e17a0-4d76-4c1e-94e8-a09e185b4dca + :setup: Standalone instance, Global account policy plugin configuration, + set alwaysrecordlogin to yes. + :steps: + 1. Try to set the lastLoginHistorySize to a non-integer value. + :expectedresults: + 1. An ldap.INVALID_SYNTAX error is raised. + """ + + LOGIN_HIST_SIZE_NON_INTEGER = 'five' + ap_config = setup_account_policy_plugin + + # Try to set lastLoginHistorySize to a non-integer + with pytest.raises(ldap.INVALID_SYNTAX): + ap_config.replace('lastLoginHistorySize', str(LOGIN_HIST_SIZE_NON_INTEGER)) + + +def test_glact_login(topology_st, accpol_global): + """Verify if user account can be activated by replacing the lastLoginTime attribute. + + :id: f89897cc-c13e-4824-af08-3dd1039bab3c + :setup: Standalone instance, Global account policy plugin configuration, + set accountInactivityLimit to few secs. + :steps: + 1. Add few users to ou=groups subtree in the default suffix + 2. Wait till accountInactivityLimit exceeded + 3. Run ldapsearch as normal user, expected error 19. + 4. Replace the lastLoginTime attribute and check if account is activated + 5. Run ldapsearch as normal user, expected 0. + :expectedresults: + 1. Success + 2. Success + 3. Success + 4. Success + 5. Success + """ + + suffix = DEFAULT_SUFFIX + subtree = "ou=groups" + userid = "glactusr" + nousrs = 3 + log.info('AccountInactivityLimit set to 12. Account will be inactivated if not accessed in 12 secs') + add_users(topology_st, suffix, subtree, userid, nousrs, 0) + log.info('Sleep for 13 secs to check if account is inactivated, expected error 19') + time.sleep(13) + account_status(topology_st, suffix, subtree, userid, nousrs, 0, "Disabled") + add_time_attr(topology_st, suffix, subtree, userid, nousrs, 'lastLoginTime') + log.info('Check if account is activated, expected 0') + account_status(topology_st, suffix, subtree, userid, nousrs, 0, "Enabled") + del_users(topology_st, suffix, subtree, userid, nousrs) + + +def test_glinact_limit(topology_st, accpol_global): + """Verify if account policy plugin functions well when changing accountInactivityLimit value. + + :id: 7fbc373f-a3d7-4774-8d34-89b057c5e74b + :setup: Standalone instance, Global account policy plugin configuration, + set accountInactivityLimit to few secs. + :steps: + 1. Add few users to ou=groups subtree in the default suffix + 2. Check if users are active just before reaching accountInactivityLimit + 3. Modify AccountInactivityLimit to a bigger value + 4. Wait for additional few secs, but check users before it reaches accountInactivityLimit + 5. Wait till accountInactivityLimit exceeded and check users, expected error 19 + 6. Modify accountInactivityLimit to use the min value. + 7. Add few users to ou=groups subtree in the default suffix + 8. Wait till it reaches accountInactivityLimit and check users, expected error 19 + 9. Modify accountInactivityLimit to 10 times(30 secs) bigger than the initial value. + 10. Add few users to ou=groups subtree in the default suffix + 11. Wait for 90 secs and check if account is not inactivated, expected 0 + 12. Wait for +27 secs and check if account is not inactivated, expected 0 + 13. Wait for +30 secs and check if account is inactivated, error 19 + 14. Replace the lastLoginTime attribute and check if account is activated + 15. Modify accountInactivityLimit to 12 secs, which is the default + 16. Run ldapsearch as normal user, expected 0. + :expectedresults: + 1. Success + 2. Success + 3. Success + 4. Success + 5. Success + 6. Success + 7. Success + 8. Success + 9. Success + 10. Success + 11. Success + 12. Success + 13. Success + 14. Success + 15. Success + 16. Success + """ + + suffix = DEFAULT_SUFFIX + subtree = "ou=groups" + userid = "inactestusr" + nousrs = 3 + + log.info('AccountInactivityLimit set to 12. Account will be inactivated if not accessed in 12 secs') + add_users(topology_st, suffix, subtree, userid, nousrs, 2) + log.info('Sleep for 9 secs to check if account is not inactivated, expected 0') + time.sleep(9) + account_status(topology_st, suffix, subtree, userid, nousrs, 2, "Enabled") + + modify_attr(topology_st, ACCP_CONF, 'accountInactivityLimit', '20') + time.sleep(17) + account_status(topology_st, suffix, subtree, userid, nousrs, 2, "Enabled") + time.sleep(20) + account_status(topology_st, suffix, subtree, userid, nousrs, 2, "Disabled") + + modify_attr(topology_st, ACCP_CONF, 'accountInactivityLimit', '1') + add_users(topology_st, suffix, subtree, userid, 2, 1) + time.sleep(2) + account_status(topology_st, suffix, subtree, userid, 2, 1, "Disabled") + + modify_attr(topology_st, ACCP_CONF, 'accountInactivityLimit', '30') + add_users(topology_st, suffix, subtree, userid, 1, 0) + time.sleep(27) + account_status(topology_st, suffix, subtree, userid, 1, 0, "Enabled") + time.sleep(30) + account_status(topology_st, suffix, subtree, userid, 1, 0, "Disabled") + + log.info('Check if account is activated, expected 0') + add_time_attr(topology_st, suffix, subtree, userid, nousrs, 'lastLoginTime') + account_status(topology_st, suffix, subtree, userid, nousrs, 0, "Enabled") + + modify_attr(topology_st, ACCP_CONF, 'accountInactivityLimit', '12') + del_users(topology_st, suffix, subtree, userid, nousrs) + +#unstable or unstatus tests, skipped for now +@pytest.mark.flaky(max_runs=2, min_passes=1) +def test_glnologin_attr(topology_st, accpol_global): + """Verify if user account is inactivated based on createTimeStamp attribute, no lastLoginTime attribute present + + :id: 3032f670-705d-4f69-96f5-d75445cffcfb + :setup: Standalone instance, Local account policy plugin configuration, + set accountInactivityLimit to few secs. + :steps: + 1. Configure Global account policy plugin with createTimestamp as stateattrname + 2. lastLoginTime attribute will not be effective. + 3. Add few users to ou=groups subtree in the default suffix + 4. Wait for 10 secs and check if account is not inactivated, expected 0 + 5. Modify AccountInactivityLimit to 20 secs + 6. Wait for +9 secs and check if account is not inactivated, expected 0 + 7. Wait for +3 secs and check if account is inactivated, error 19 + 8. Modify accountInactivityLimit to 3 secs + 9. Add few users to ou=groups subtree in the default suffix + 10. Wait for 3 secs and check if account is inactivated, error 19 + 11. Modify accountInactivityLimit to 30 secs + 12. Add few users to ou=groups subtree in the default suffix + 13. Wait for 90 secs and check if account is not inactivated, expected 0 + 14. Wait for +28 secs and check if account is not inactivated, expected 0 + 15. Wait for +2 secs and check if account is inactivated, error 19 + 16. Replace the lastLoginTime attribute and check if account is activated + 17. Modify accountInactivityLimit to 12 secs, which is the default + 18. Run ldapsearch as normal user, expected 0. + :expectedresults: + 1. Success + 2. Success + 3. Success + 4. Success + 5. Success + 6. Success + 7. Success + 8. Success + 9. Success + 10. Success + 11. Success + 12. Success + 13. Success + 14. Success + 15. Success + 16. Success + 17. Success + 18. Success + """ + + suffix = DEFAULT_SUFFIX + subtree = "ou=groups" + userid = "nologinusr" + nousrs = 3 + + log.info('AccountInactivityLimit set to 12. Account will be inactivated if not accessed in 12 secs') + log.info('Set attribute StateAttrName to createTimestamp, loginTime attr wont be considered') + modify_attr(topology_st, ACCP_CONF, 'stateattrname', 'createTimestamp') + topology_st.standalone.restart(timeout=10) + add_users(topology_st, suffix, subtree, userid, nousrs, 2) + log.info('Sleep for 9 secs to check if account is not inactivated, expected 0') + time.sleep(9) + account_status(topology_st, suffix, subtree, userid, nousrs, 2, "Enabled") + + modify_attr(topology_st, ACCP_CONF, 'accountInactivityLimit', '20') + time.sleep(9) + account_status(topology_st, suffix, subtree, userid, nousrs, 2, "Enabled") + time.sleep(3) + account_status(topology_st, suffix, subtree, userid, nousrs, 2, "Disabled") + + modify_attr(topology_st, ACCP_CONF, 'accountInactivityLimit', '3') + add_users(topology_st, suffix, subtree, userid, 2, 1) + time.sleep(2) + account_status(topology_st, suffix, subtree, userid, 2, 1, "Enabled") + time.sleep(2) + account_status(topology_st, suffix, subtree, userid, 2, 1, "Disabled") + + modify_attr(topology_st, ACCP_CONF, 'accountInactivityLimit', '30') + add_users(topology_st, suffix, subtree, userid, 1, 0) + time.sleep(28) + account_status(topology_st, suffix, subtree, userid, 1, 0, "Enabled") + time.sleep(2) + account_status(topology_st, suffix, subtree, userid, 1, 0, "Disabled") + + modify_attr(topology_st, ACCP_CONF, 'accountInactivityLimit', '12') + log.info('Set attribute StateAttrName to lastLoginTime, the default') + modify_attr(topology_st, ACCP_CONF, 'stateattrname', 'lastLoginTime') + topology_st.standalone.restart(timeout=10) + add_time_attr(topology_st, suffix, subtree, userid, nousrs, 'lastLoginTime') + log.info('Check if account is activated, expected 0') + account_status(topology_st, suffix, subtree, userid, nousrs, 0, "Enabled") + del_users(topology_st, suffix, subtree, userid, nousrs) + +#unstable or unstatus tests, skipped for now +@pytest.mark.flaky(max_runs=2, min_passes=1) +def test_glnoalt_stattr(topology_st, accpol_global): + """Verify if user account can be inactivated based on lastLoginTime attribute, altstateattrname set to 1.1 + + :id: 8dcc3540-578f-422a-bb44-28c2cf20dbcd + :setup: Standalone instance, Global account policy plugin configuration, + set accountInactivityLimit to few secs. + :steps: + 1. Configure Global account policy plugin with altstateattrname to 1.1 + 2. Add few users to ou=groups subtree in the default suffix + 3. Wait till it reaches accountInactivityLimit + 4. Remove lastLoginTime attribute from the user entry + 5. Run ldapsearch as normal user, expected 0. no lastLoginTime attribute present + 6. Wait till it reaches accountInactivityLimit and check users, expected error 19 + :expectedresults: + 1. Success + 2. Success + 3. Success + 4. Success + 5. Success + 6. Success + """ + + suffix = DEFAULT_SUFFIX + subtree = "ou=groups" + userid = "nologinusr" + nousrs = 3 + log.info('Set attribute altStateAttrName to 1.1') + modify_attr(topology_st, ACCP_CONF, 'altstateattrname', '1.1') + topology_st.standalone.restart(timeout=10) + add_users(topology_st, suffix, subtree, userid, nousrs, 0) + log.info('Sleep for 13 secs to check if account is not inactivated, expected 0') + time.sleep(13) + account_status(topology_st, suffix, subtree, userid, nousrs, 0, "Enabled") + log.info('lastLoginTime attribute is added from the above ldap bind by userdn') + time.sleep(13) + account_status(topology_st, suffix, subtree, userid, nousrs, 0, "Disabled") + del_time_attr(topology_st, suffix, subtree, userid, nousrs, 'lastLoginTime') + account_status(topology_st, suffix, subtree, userid, nousrs, 0, "Enabled") + modify_attr(topology_st, ACCP_CONF, 'altstateattrname', 'createTimestamp') + topology_st.standalone.restart(timeout=10) + add_time_attr(topology_st, suffix, subtree, userid, nousrs, 'lastLoginTime') + account_status(topology_st, suffix, subtree, userid, nousrs, 0, "Enabled") + del_users(topology_st, suffix, subtree, userid, nousrs) + + +#unstable or unstatus tests, skipped for now +@pytest.mark.flaky(max_runs=2, min_passes=1) +def test_glattr_modtime(topology_st, accpol_global): + """Verify if user account can be inactivated based on modifyTimeStamp attribute + + :id: 67380839-2966-45dc-848a-167a954153e1 + :setup: Standalone instance, Global account policy plugin configuration, + set accountInactivityLimit to few secs. + :steps: + 1. Configure Global account policy plugin with altstateattrname to modifyTimestamp + 2. Add few users to ou=groups subtree in the default suffix + 3. Wait till the accountInactivityLimit exceeded and check users, expected error 19 + 4. Modify cn attribute for user, ModifyTimeStamp is updated. + 5. Check if user is activated based on ModifyTimeStamp attribute, expected 0 + 6. Change the plugin to use createTimeStamp and remove lastLoginTime attribute + 7. Check if account is inactivated, expected error 19 + :expectedresults: + 1. Success + 2. Success + 3. Success + 4. Success + 5. Success + 6. Success + 7. Success + """ + + suffix = DEFAULT_SUFFIX + subtree = "ou=groups" + userid = "modtimeusr" + nousrs = 3 + log.info('Set attribute altStateAttrName to modifyTimestamp') + modify_attr(topology_st, ACCP_CONF, 'altstateattrname', 'modifyTimestamp') + topology_st.standalone.restart(timeout=10) + add_users(topology_st, suffix, subtree, userid, nousrs, 0) + log.info('Sleep for 13 secs to check if account is inactivated, expected 0') + time.sleep(13) + check_attr(topology_st, suffix, subtree, userid, nousrs, "modifyTimeStamp=*") + account_status(topology_st, suffix, subtree, userid, nousrs, 0, "Disabled") + attr_name = "cn" + attr_value = "cnewusr" + modusr_attr(topology_st, suffix, subtree, userid, nousrs, attr_name, attr_value) + account_status(topology_st, suffix, subtree, userid, nousrs, 0, "Enabled") + modify_attr(topology_st, ACCP_CONF, 'altstateattrname', 'createTimestamp') + del_time_attr(topology_st, suffix, subtree, userid, nousrs, 'lastLoginTime') + topology_st.standalone.restart(timeout=10) + account_status(topology_st, suffix, subtree, userid, nousrs, 0, "Disabled") + add_time_attr(topology_st, suffix, subtree, userid, nousrs, 'lastLoginTime') + account_status(topology_st, suffix, subtree, userid, nousrs, 0, "Enabled") + del_users(topology_st, suffix, subtree, userid, nousrs) + + +#unstable or unstatus tests, skipped for now +@pytest.mark.flaky(max_runs=2, min_passes=1) +def test_glnoalt_nologin(topology_st, accpol_global): + """Verify if account policy plugin works if we set altstateattrname set to 1.1 and alwaysrecordlogin to NO + + :id: 49eda7db-84de-47ba-8f81-ac5e4de3a500 + :setup: Standalone instance, Global account policy plugin configuration, + set accountInactivityLimit to few secs. + :steps: + 1. Configure Global account policy plugin with altstateattrname to 1.1 + 2. Set alwaysrecordlogin to NO. + 3. Add few users to ou=groups subtree in the default suffix + 4. Wait till accountInactivityLimit exceeded and check users, expected 0 + 5. Check for lastLoginTime attribute, it should not be present + 6. Wait for few more secs and check if account is not inactivated, expected 0 + 7. Run ldapsearch as normal user, expected 0. no lastLoginTime attribute present + 8. Set altstateattrname to createTimeStamp + 9. Check if user account is inactivated based on createTimeStamp attribute. + 10. Account should be inactivated, expected error 19 + :expectedresults: + 1. Success + 2. Success + 3. Success + 4. Success + 5. Success + 6. Success + 7. Success + 8. Success + 9. Success + 10. Success + """ + + suffix = DEFAULT_SUFFIX + subtree = "ou=groups" + userid = "norecrodlogusr" + nousrs = 3 + log.info('Set attribute altStateAttrName to 1.1') + modify_attr(topology_st, ACCP_CONF, 'altstateattrname', '1.1') + log.info('Set attribute alwaysrecordlogin to No') + modify_attr(topology_st, ACCP_CONF, 'alwaysrecordlogin', 'no') + topology_st.standalone.restart(timeout=10) + add_users(topology_st, suffix, subtree, userid, nousrs, 0) + log.info('Sleep for 13 secs to check if account is not inactivated, expected 0') + time.sleep(13) + account_status(topology_st, suffix, subtree, userid, nousrs, 0, "Enabled") + time.sleep(3) + account_status(topology_st, suffix, subtree, userid, nousrs, 0, "Enabled") + log.info('Set attribute altStateAttrName to createTimestamp') + modify_attr(topology_st, ACCP_CONF, 'altstateattrname', 'createTimestamp') + topology_st.standalone.restart(timeout=10) + time.sleep(2) + account_status(topology_st, suffix, subtree, userid, nousrs, 0, "Disabled") + log.info('Reset the default attribute values') + modify_attr(topology_st, ACCP_CONF, 'alwaysrecordlogin', 'yes') + topology_st.standalone.restart(timeout=10) + add_time_attr(topology_st, suffix, subtree, userid, nousrs, 'lastLoginTime') + account_status(topology_st, suffix, subtree, userid, nousrs, 0, "Enabled") + del_users(topology_st, suffix, subtree, userid, nousrs) + + +#unstable or unstatus tests, skipped for now +@pytest.mark.flaky(max_runs=2, min_passes=1) +def test_glinact_nsact(topology_st, accpol_global): + """Verify if user account can be activated using dsidm. + + :id: 876a7a7c-0b3f-4cd2-9b45-1dc80846e334 + :setup: Standalone instance, Global account policy plugin configuration, + set accountInactivityLimit to few secs. + :steps: + 1. Configure Global account policy plugin + 2. Add few users to ou=groups subtree in the default suffix + 3. Wait for few secs and inactivate user using dsidm + 4. Wait till accountInactivityLimit exceeded. + 5. Run ldapsearch as normal user, expected error 19. + 6. Activate user using ns-activate.pl script + 7. Check if account is activated, expected error 19 + 8. Replace the lastLoginTime attribute and check if account is activated + 9. Run ldapsearch as normal user, expected 0. + :expectedresults: + 1. Success + 2. Success + 3. Success + 4. Success + 5. Success + 6. Success + 7. Success + 8. Success + 9. Success + """ + + suffix = DEFAULT_SUFFIX + subtree = "ou=groups" + userid = "nsactusr" + nousrs = 1 + + log.info('AccountInactivityLimit set to 12. Account will be inactivated if not accessed in 12 secs') + add_users(topology_st, suffix, subtree, userid, nousrs, 0) + log.info('Sleep for 3 secs to check if account is not inactivated, expected value 0') + time.sleep(3) + nsact_inact(topology_st, suffix, subtree, userid, nousrs, "unlock", "") + log.info('Sleep for 10 secs to check if account is inactivated, expected value 19') + time.sleep(10) + nsact_inact(topology_st, suffix, subtree, userid, nousrs, "unlock", "") + account_status(topology_st, suffix, subtree, userid, nousrs, 0, "Disabled") + nsact_inact(topology_st, suffix, subtree, userid, nousrs, "entry-status", + "inactivity limit exceeded") + add_time_attr(topology_st, suffix, subtree, userid, nousrs, 'lastLoginTime') + account_status(topology_st, suffix, subtree, userid, nousrs, 0, "Enabled") + nsact_inact(topology_st, suffix, subtree, userid, nousrs, "entry-status", "activated") + del_users(topology_st, suffix, subtree, userid, nousrs) + + +#unstable or unstatus tests, skipped for now +@pytest.mark.flaky(max_runs=2, min_passes=1) +def test_glinact_acclock(topology_st, accpol_global): + """Verify if user account is activated when account is unlocked by passwordlockoutduration. + + :id: 43601a61-065c-4c80-a7c2-e4f6ae17beb8 + :setup: Standalone instance, Global account policy plugin configuration, + set accountInactivityLimit to few secs. + :steps: + 1. Add few users to ou=groups subtree in the default suffix + 2. Wait for few secs and attempt invalid binds for user + 3. User account should be locked based on Account Lockout policy. + 4. Wait till accountInactivityLimit exceeded and check users, expected error 19 + 5. Wait for passwordlockoutduration and check if account is active + 6. Check if account is unlocked, expected error 19, since account is inactivated + 7. Replace the lastLoginTime attribute and check users, expected 0 + :expectedresults: + 1. Success + 2. Success + 3. Success + 4. Success + 5. Success + 6. Success + 7. Success + """ + + suffix = DEFAULT_SUFFIX + subtree = "ou=groups" + userid = "pwlockusr" + nousrs = 1 + log.info('AccountInactivityLimit set to 12. Account will be inactivated if not accessed in 12 secs') + add_users(topology_st, suffix, subtree, userid, nousrs, 0) + log.info('Sleep for 3 secs and try invalid binds to lockout the user') + time.sleep(3) + + pwacc_lock(topology_st, suffix, subtree, userid, nousrs) + log.info('Sleep for 10 secs to check if account is inactivated, expected value 19') + time.sleep(10) + account_status(topology_st, suffix, subtree, userid, nousrs, 0, "Disabled") + + log.info('Add lastLoginTime to activate the user account') + add_time_attr(topology_st, suffix, subtree, userid, nousrs, 'lastLoginTime') + account_status(topology_st, suffix, subtree, userid, nousrs, 0, "Enabled") + + log.info('Checking if account is unlocked after passwordlockoutduration, but inactivated after accountInactivityLimit') + pwacc_lock(topology_st, suffix, subtree, userid, nousrs) + account_status(topology_st, suffix, subtree, userid, nousrs, 0, "Disabled") + + log.info('Account is expected to be unlocked after 5 secs of passwordlockoutduration') + time.sleep(5) + account_status(topology_st, suffix, subtree, userid, nousrs, 0, "Enabled") + + log.info('Sleep 13s and check if account inactivated based on accountInactivityLimit, expected 19') + time.sleep(13) + account_status(topology_st, suffix, subtree, userid, nousrs, 0, "Disabled") + del_users(topology_st, suffix, subtree, userid, nousrs) + + +#unstable or unstatus tests, skipped for now +@pytest.mark.flaky(max_runs=2, min_passes=1) +def test_glnact_pwexp(topology_st, accpol_global): + """Verify if user account is activated when password is reset after password is expired + + :id: 3bb97992-101a-4e5a-b60a-4cc21adcc76e + :setup: Standalone instance, Global account policy plugin configuration, + set accountInactivityLimit to few secs. + :steps: + 1. Add few users to ou=groups subtree in the default suffix + 2. Set passwordmaxage to few secs + 3. Wait for passwordmaxage to reach and check if password expired + 4. Run ldapsearch as normal user, expected error 19. + 5. Reset the password for user account + 6. Wait till accountInactivityLimit exceeded and check users + 7. Run ldapsearch as normal user, expected error 19. + 8. Replace the lastLoginTime attribute and check if account is activated + 9. Run ldapsearch as normal user, expected 0. + :expectedresults: + 1. Success + 2. Success + 3. Success + 4. Success + 5. Success + 6. Success + 7. Success + 8. Success + 9. Success + """ + + suffix = DEFAULT_SUFFIX + subtree = "ou=groups" + userid = "pwexpusr" + nousrs = 1 + try: + topology_st.standalone.config.set('passwordmaxage', '9') + except ldap.LDAPError as e: + log.error('Failed to change the value of passwordmaxage to 9') + raise e + log.info('AccountInactivityLimit set to 12. Account will be inactivated if not accessed in 12 secs') + log.info('Passwordmaxage is set to 9. Password will expire in 9 secs') + add_users(topology_st, suffix, subtree, userid, nousrs, 0) + + log.info('Sleep for 9 secs and check if password expired') + time.sleep(9) + account_status(topology_st, suffix, subtree, userid, nousrs, 0, "Expired") + time.sleep(4) # Passed inactivity + account_status(topology_st, suffix, subtree, userid, nousrs, 0, "Disabled") + + log.info('Add lastLoginTime to activate the user account') + add_time_attr(topology_st, suffix, subtree, userid, nousrs, 'lastLoginTime') + account_status(topology_st, suffix, subtree, userid, nousrs, 0, "Expired") + userpw_reset(topology_st, suffix, subtree, userid, nousrs, "DirMgr", PASSWORD, USER_PASW) + account_status(topology_st, suffix, subtree, userid, nousrs, 0, "Enabled") + + # Allow password to expire again, but inactivity continues + time.sleep(7) + + # reset password to counter expiration, we will test expiration again later + userpw_reset(topology_st, suffix, subtree, userid, nousrs, "DirMgr", PASSWORD, USER_PASW) + log.info('Sleep for 4 secs and check if account is now inactivated, expected error 19') + time.sleep(4) + account_status(topology_st, suffix, subtree, userid, nousrs, 0, "Disabled") + userpw_reset(topology_st, suffix, subtree, userid, nousrs, "DirMgr", PASSWORD, USER_PASW) + account_status(topology_st, suffix, subtree, userid, nousrs, 0, "Disabled") + + # Reset inactivity and check for expiration + add_time_attr(topology_st, suffix, subtree, userid, nousrs, 'lastLoginTime') + account_status(topology_st, suffix, subtree, userid, nousrs, 0, "Enabled") + time.sleep(8) + account_status(topology_st, suffix, subtree, userid, nousrs, 0, "Expired") + + # Reset account + userpw_reset(topology_st, suffix, subtree, userid, nousrs, "DirMgr", PASSWORD, USER_PASW) + account_status(topology_st, suffix, subtree, userid, nousrs, 0, "Enabled") + + # Reset maxage + try: + topology_st.standalone.config.set('passwordmaxage', '400') + except ldap.LDAPError as e: + log.error('Failed to change the value of passwordmaxage to 400') + raise e + del_users(topology_st, suffix, subtree, userid, nousrs) + + +#unstable or unstatus tests, skipped for now +@pytest.mark.flaky(max_runs=2, min_passes=1) +def test_locact_inact(topology_st, accpol_local): + """Verify if user account is inactivated when accountInactivityLimit is exceeded. + + :id: 02140e36-79eb-4d88-ba28-66478689289b + :setup: Standalone instance, ou=people subtree configured for Local account + policy plugin configuration, set accountInactivityLimit to few secs. + :steps: + 1. Add few users to ou=people subtree in the default suffix + 2. Wait for few secs before it reaches accountInactivityLimit and check users. + 3. Run ldapsearch as normal user, expected 0 + 4. Wait till accountInactivityLimit is exceeded + 5. Run ldapsearch as normal user and check if its inactivated, expected error 19. + 6. Replace user's lastLoginTime attribute and check if its activated, expected 0 + :expectedresults: + 1. Success + 2. Success + 3. Success + 4. Success + 5. Success + 6. Should return error code 19 + """ + + suffix = DEFAULT_SUFFIX + subtree = "ou=people" + userid = "inactusr" + nousrs = 3 + log.info('AccountInactivityLimit set to 10. Account will be inactivated if not accessed in 10 secs') + add_users(topology_st, suffix, subtree, userid, nousrs, 0) + log.info('Sleep for 9 secs to check if account is not inactivated, expected value 0') + time.sleep(9) + log.info('Account should not be inactivated since AccountInactivityLimit not exceeded') + account_status(topology_st, suffix, subtree, userid, 3, 2, "Enabled") + log.info('Sleep for 2 more secs to check if account is inactivated') + time.sleep(2) + account_status(topology_st, suffix, subtree, userid, 2, 0, "Disabled") + log.info('Sleep +9 secs to check if account {}3 is inactivated'.format(userid)) + time.sleep(9) + account_status(topology_st, suffix, subtree, userid, 3, 2, "Disabled") + log.info('Add lastLoginTime attribute to all users and check if its activated') + add_time_attr(topology_st, suffix, subtree, userid, nousrs, 'lastLoginTime') + account_status(topology_st, suffix, subtree, userid, nousrs, 0, "Enabled") + del_users(topology_st, suffix, subtree, userid, nousrs) + + +#unstable or unstatus tests, skipped for now +@pytest.mark.flaky(max_runs=2, min_passes=1) +def test_locinact_modrdn(topology_st, accpol_local): + """Verify if user account is inactivated when moved from ou=groups to ou=people subtree. + + :id: 5f25bea3-fab0-4db4-b43d-2d47cc6e5ad1 + :setup: Standalone instance, ou=people subtree configured for Local account + policy plugin configuration, set accountInactivityLimit to few secs. + :steps: + 1. Add few users to ou=groups subtree in the default suffix + 2. Plugin configured to ou=people subtree only. + 3. Wait for few secs before it reaches accountInactivityLimit and check users. + 4. Run ldapsearch as normal user, expected 0 + 5. Wait till accountInactivityLimit exceeded + 6. Move users from ou=groups subtree to ou=people subtree + 7. Check if users are inactivated, expected error 19 + :expectedresults: + 1. Success + 2. Success + 3. Success + 4. Success + 5. Success + 6. Success + 7. Should return error code 0 and 19 + """ + + suffix = DEFAULT_SUFFIX + subtree = "ou=groups" + userid = "nolockusr" + nousrs = 1 + log.info('Account should not be inactivated since the subtree is not configured') + add_users(topology_st, suffix, subtree, userid, nousrs, 0) + log.info('Sleep for 11 secs to check if account is not inactivated, expected value 0') + time.sleep(11) + account_status(topology_st, suffix, subtree, userid, nousrs, 0, "Enabled") + log.info('Moving users from ou=groups to ou=people subtree') + user = UserAccount(topology_st.standalone, dn='uid=nolockusr1,ou=groups,dc=example,dc=com') + try: + user.rename('uid=nolockusr1', newsuperior='ou=people,dc=example,dc=com') + except ldap.LDAPError as e: + log.error('Failed to move user uid=nolockusr1 from ou=groups to ou=people') + raise e + subtree = "ou=people" + log.info('Then wait for 11 secs and check if entries are inactivated') + time.sleep(11) + account_status(topology_st, suffix, subtree, userid, nousrs, 0, "Disabled") + add_time_attr(topology_st, suffix, subtree, userid, nousrs, 'lastLoginTime') + account_status(topology_st, suffix, subtree, userid, nousrs, 0, "Enabled") + del_users(topology_st, suffix, subtree, userid, nousrs) + + +def test_locact_modrdn(topology_st, accpol_local): + """Verify if user account is inactivated when users moved from ou=people to ou=groups subtree. + + :id: e821cbae-bfc3-40d3-947d-b228c809987f + :setup: Standalone instance, ou=people subtree configured for Local account + policy plugin configuration, set accountInactivityLimit to few secs. + :steps: + 1. Add few users to ou=people subtree in the default suffix + 2. Wait for few secs and check if users not inactivated, expected 0. + 3. Move users from ou=people to ou=groups subtree + 4. Wait till accountInactivityLimit is exceeded + 5. Check if users are active in ou=groups subtree, expected 0 + :expectedresults: + 1. Success + 2. Success + 3. Success + 4. Success + 5. Success + """ + + suffix = DEFAULT_SUFFIX + subtree = "ou=people" + userid = "lockusr" + nousrs = 1 + log.info('Account should be inactivated since the subtree is configured') + add_users(topology_st, suffix, subtree, userid, nousrs, 0) + log.info('Sleep for 11 secs to check if account is inactivated, expected value 19') + time.sleep(11) + account_status(topology_st, suffix, subtree, userid, nousrs, 0, "Disabled") + log.info('Moving users from ou=people to ou=groups subtree') + user = UserAccount(topology_st.standalone, dn='uid=lockusr1,ou=people,dc=example,dc=com') + try: + user.rename('uid=lockusr1', newsuperior='ou=groups,dc=example,dc=com') + except ldap.LDAPError as e: + log.error('Failed to move user uid=lockusr1 from ou=people to ou=groups') + raise e + log.info('Sleep for +2 secs and check users from both ou=people and ou=groups subtree') + time.sleep(2) + subtree = "ou=groups" + account_status(topology_st, suffix, subtree, userid, 1, 0, "Enabled") + del_users(topology_st, suffix, subtree, userid, nousrs) + + +if __name__ == '__main__': + # Run isolated + # -s for DEBUG mode + CURRENT_FILE = os.path.realpath(__file__) + pytest.main("-s {}".format(CURRENT_FILE)) diff --git a/dirsrvtests/tests/suites/plugins/alias_entries_test.py b/dirsrvtests/tests/suites/plugins/alias_entries_test.py new file mode 100644 index 0000000..0cab21d --- /dev/null +++ b/dirsrvtests/tests/suites/plugins/alias_entries_test.py @@ -0,0 +1,111 @@ +# --- BEGIN COPYRIGHT BLOCK --- +# Copyright (C) 2023 Red Hat, Inc. +# All rights reserved. +# +# License: GPL (version 3 or any later version). +# See LICENSE for details. +# --- END COPYRIGHT BLOCK --- +# +import logging +import pytest +import os +import ldap +import time +from lib389.utils import ensure_str +from lib389.plugins import AliasEntriesPlugin +from lib389.idm.user import UserAccount +from lib389._constants import DEFAULT_SUFFIX +from lib389.topologies import topology_st as topo + +log = logging.getLogger(__name__) + +TEST_ENTRY_NAME = "entry" +TEST_ENTRY_DN = "cn=entry," + DEFAULT_SUFFIX +TEST_ALIAS_NAME = "alias entry" +TEST_ALIAS_DN = "cn=alias_entry," + DEFAULT_SUFFIX +TEST_ALIAS_DN_WRONG = "cn=alias_entry_not_there," + DEFAULT_SUFFIX +EXPECTED_UIDNUM = "1000" + + +def test_entry_alias(topo): + """Test that deref search for alias entry works + + :id: 454e85af-0e20-4a36-9b3a-02562b1db53d + :setup: Standalone Instance + :steps: + 1. Enable alias entry plugin + 2. Create entry and alias entry + 3. Set deref option and do a base search + 4. Test non-base scope ssearch returns error + 5. Test invalid alias DN returns error + :expectedresults: + 1. Success + 2. Success + 3. Success + 4. Success + 5. Success + """ + inst = topo.standalone + + # Enable Alias Entries plugin + alias_plugin = AliasEntriesPlugin(inst) + alias_plugin.enable() + inst.restart() + + # Add entry + test_user = UserAccount(inst, TEST_ENTRY_DN) + test_user.create(properties={ + 'uid': TEST_ENTRY_NAME, + 'cn': TEST_ENTRY_NAME, + 'sn': TEST_ENTRY_NAME, + 'userPassword': TEST_ENTRY_NAME, + 'uidNumber': '1000', + 'gidNumber': '2000', + 'homeDirectory': '/home/alias_test', + }) + + # Add entry that has an alias set to the first entry + test_alias = UserAccount(inst, TEST_ALIAS_DN) + test_alias.create(properties={ + 'uid': TEST_ALIAS_NAME, + 'cn': TEST_ALIAS_NAME, + 'sn': TEST_ALIAS_NAME, + 'userPassword': TEST_ALIAS_NAME, + 'uidNumber': '1001', + 'gidNumber': '2001', + 'homeDirectory': '/home/alias_test', + 'objectclass': ['alias', 'extensibleObject'], + 'aliasedObjectName': TEST_ENTRY_DN, + }) + + # Set the deref "finding" option + inst.set_option(ldap.OPT_DEREF, ldap.DEREF_FINDING) + + # Do base search which could map entry to the aliased one + log.info("Test alias") + deref_user = UserAccount(inst, TEST_ALIAS_DN) + result = deref_user.search(scope="base") + assert result[0].dn == TEST_ENTRY_DN + assert ensure_str(result[0].getValue('uidNumber')) == EXPECTED_UIDNUM + + # Do non-base search which could raise an error + log.info("Test unsupported search scope") + with pytest.raises(ldap.UNWILLING_TO_PERFORM): + deref_user.search(scope="subtree") + + # Reset the aliasObjectname to a DN that does not exist, and try again + log.info("Test invalid alias") + test_alias.replace('aliasedObjectName', TEST_ALIAS_DN_WRONG) + try: + deref_user.search(scope="base") + assert False + except ldap.LDAPError as e: + msg = e.args[0]['info'] + assert msg.startswith("Failed to dereference alias object") + + +if __name__ == '__main__': + # Run isolated + # -s for DEBUG mode + CURRENT_FILE = os.path.realpath(__file__) + pytest.main(["-s", CURRENT_FILE]) diff --git a/dirsrvtests/tests/suites/plugins/attr_nsslapd-pluginarg_test.py b/dirsrvtests/tests/suites/plugins/attr_nsslapd-pluginarg_test.py new file mode 100644 index 0000000..2afaa3d --- /dev/null +++ b/dirsrvtests/tests/suites/plugins/attr_nsslapd-pluginarg_test.py @@ -0,0 +1,211 @@ +# --- BEGIN COPYRIGHT BLOCK --- +# Copyright (C) 2016 Red Hat, Inc. +# All rights reserved. +# +# License: GPL (version 3 or any later version). +# See LICENSE for details. +# --- END COPYRIGHT BLOCK --- +# +import pytest +from lib389.tasks import * +from lib389.utils import * +from lib389.topologies import topology_st + +from lib389._constants import DEFAULT_SUFFIX, DN_PLUGIN, SUFFIX, PLUGIN_7_BIT_CHECK + +# Skip on older versions +pytestmark = [pytest.mark.tier1, + pytest.mark.skipif(ds_is_older('1.3'), reason="Not implemented")] + +logging.getLogger(__name__).setLevel(logging.DEBUG) +log = logging.getLogger(__name__) + +DN_7BITPLUGIN = "cn=7-bit check,%s" % DN_PLUGIN +ATTRS = ["uid", "mail", "userpassword", ",", SUFFIX, None] + + +@pytest.fixture(scope="module") +def enable_plugin(topology_st): + """Enabling the 7-bit plugin for the + environment setup""" + log.info("Ticket 47431 - 0: Enable 7bit plugin...") + topology_st.standalone.plugins.enable(name=PLUGIN_7_BIT_CHECK) + + +@pytest.mark.ds47431 +def test_duplicate_values(topology_st, enable_plugin): + """Check 26 duplicate values are treated as one + + :id: b23e04f1-2757-42cc-b3a2-26426c903f6d + :setup: Standalone instance, enable 7bit plugin + :steps: + 1. Modify the entry for cn=7-bit check,cn=plugins,cn=config as : + nsslapd-pluginarg0 : uid + nsslapd-pluginarg1 : mail + nsslapd-pluginarg2 : userpassword + nsslapd-pluginarg3 : , + nsslapd-pluginarg4 : dc=example,dc=com + 2. Set nsslapd-pluginarg2 to 'userpassword' for multiple time (ideally 27) + 3. Check whether duplicate values are treated as one + :expectedresults: + 1. It should be modified successfully + 2. It should be successful + 3. It should be successful + """ + + log.info("Ticket 47431 - 1: Check 26 duplicate values are treated as one...") + expected = "str2entry_dupcheck.* duplicate values for attribute type nsslapd-pluginarg2 detected in entry cn=7-bit check,cn=plugins,cn=config." + + log.debug('modify_s %s' % DN_7BITPLUGIN) + topology_st.standalone.modify_s(DN_7BITPLUGIN, + [(ldap.MOD_REPLACE, 'nsslapd-pluginarg0', b"uid"), + (ldap.MOD_REPLACE, 'nsslapd-pluginarg1', b"mail"), + (ldap.MOD_REPLACE, 'nsslapd-pluginarg2', b"userpassword"), + (ldap.MOD_REPLACE, 'nsslapd-pluginarg3', b","), + (ldap.MOD_REPLACE, 'nsslapd-pluginarg4', ensure_bytes(SUFFIX))]) + + arg2 = "nsslapd-pluginarg2: userpassword" + topology_st.standalone.stop() + dse_ldif = topology_st.standalone.confdir + '/dse.ldif' + os.system('mv %s %s.47431' % (dse_ldif, dse_ldif)) + os.system( + 'sed -e "s/\\(%s\\)/\\1\\n\\1\\n\\1\\n\\1\\n\\1\\n\\1\\n\\1\\n\\1\\n\\1\\n\\1\\n\\1\\n\\1\\n\\1\\n\\1\\n\\1\\n\\1\\n\\1\\n\\1\\n\\1\\n\\1\\n\\1\\n\\1\\n\\1\\n\\1\\n\\1\\n\\1\\n\\1/" %s.47431 > %s' % ( + arg2, dse_ldif, dse_ldif)) + topology_st.standalone.start() + + cmdline = 'egrep -i "%s" %s' % (expected, topology_st.standalone.errlog) + p = os.popen(cmdline, "r") + line = p.readline() + if line == "": + log.error('Expected error "%s" not logged in %s' % (expected, topology_st.standalone.errlog)) + assert False + else: + log.debug('line: %s' % line) + log.info('Expected error "%s" logged in %s' % (expected, topology_st.standalone.errlog)) + + log.info("Ticket 47431 - 1: done") + + +@pytest.mark.ds47431 +def test_multiple_value(topology_st, enable_plugin): + """Check two values belonging to one arg is fixed + + :id: 20c802bc-332f-4e8d-bcfb-8cd28123d695 + :setup: Standalone instance, enable 7bit plugin + :steps: + 1. Modify the entry for cn=7-bit check,cn=plugins,cn=config as : + nsslapd-pluginarg0 : uid + nsslapd-pluginarg0 : mail + nsslapd-pluginarg1 : userpassword + nsslapd-pluginarg2 : , + nsslapd-pluginarg3 : dc=example,dc=com + nsslapd-pluginarg4 : None + (Note : While modifying add two attributes entries for nsslapd-pluginarg0) + + 2. Check two values belonging to one arg is fixed + :expectedresults: + 1. Entries should be modified successfully + 2. Operation should be successful + """ + + log.info("Ticket 47431 - 2: Check two values belonging to one arg is fixed...") + + topology_st.standalone.modify_s(DN_7BITPLUGIN, + [(ldap.MOD_REPLACE, 'nsslapd-pluginarg0', b"uid"), + (ldap.MOD_ADD, 'nsslapd-pluginarg0', b"mail"), + (ldap.MOD_REPLACE, 'nsslapd-pluginarg1', b"userpassword"), + (ldap.MOD_REPLACE, 'nsslapd-pluginarg2', b","), + (ldap.MOD_REPLACE, 'nsslapd-pluginarg3', ensure_bytes(SUFFIX)), + (ldap.MOD_DELETE, 'nsslapd-pluginarg4', None)]) + + # PLUGIN LOG LEVEL + topology_st.standalone.modify_s(DN_CONFIG, [(ldap.MOD_REPLACE, 'nsslapd-errorlog-level', b'65536')]) + + topology_st.standalone.restart() + + cmdline = 'egrep -i %s %s' % ("NS7bitAttr_Init", topology_st.standalone.errlog) + p = os.popen(cmdline, "r") + i = 0 + while ATTRS[i]: + line = p.readline() + log.debug('line - %s' % line) + log.debug('ATTRS[%d] %s' % (i, ATTRS[i])) + if line == "": + break + elif line.find(ATTRS[i]) >= 0: + log.debug('%s was logged' % ATTRS[i]) + else: + log.error('%s was not logged.' % ATTRS[i]) + assert False + i = i + 1 + + log.info("Ticket 47431 - 2: done") + + +@pytest.mark.ds47431 +def test_missing_args(topology_st, enable_plugin): + """Check missing args are fixed + + :id: b2814399-7ed2-4fe0-981d-b0bdbbe31cfb + :setup: Standalone instance, enable 7bit plugin + :steps: + 1. Modify the entry for cn=7-bit check,cn=plugins,cn=config as : + nsslapd-pluginarg0 : None + nsslapd-pluginarg1 : uid + nsslapd-pluginarg2 : None + nsslapd-pluginarg3 : mail + nsslapd-pluginarg5 : userpassword + nsslapd-pluginarg7 : , + nsslapd-pluginarg9 : dc=example,dc=com + (Note: While modifying add 2 entries as None) + + 2. Change the nsslapd-errorlog-level to 65536 + 3. Check missing agrs are fixed + :expectedresults: + 1. Entries should be modified successfully + 2. Operation should be successful + 3. Operation should be successful + """ + + log.info("Ticket 47431 - 3: Check missing args are fixed...") + + topology_st.standalone.modify_s(DN_7BITPLUGIN, + [(ldap.MOD_DELETE, 'nsslapd-pluginarg0', None), + (ldap.MOD_REPLACE, 'nsslapd-pluginarg1', b"uid"), + (ldap.MOD_DELETE, 'nsslapd-pluginarg2', None), + (ldap.MOD_REPLACE, 'nsslapd-pluginarg3', b"mail"), + (ldap.MOD_REPLACE, 'nsslapd-pluginarg5', b"userpassword"), + (ldap.MOD_REPLACE, 'nsslapd-pluginarg7', b","), + (ldap.MOD_REPLACE, 'nsslapd-pluginarg9', ensure_bytes(SUFFIX))]) + + # PLUGIN LOG LEVEL + topology_st.standalone.modify_s(DN_CONFIG, [(ldap.MOD_REPLACE, 'nsslapd-errorlog-level', b'65536')]) + + topology_st.standalone.stop() + os.system('mv %s %s.47431' % (topology_st.standalone.errlog, topology_st.standalone.errlog)) + os.system('touch %s' % (topology_st.standalone.errlog)) + topology_st.standalone.start() + + cmdline = 'egrep -i %s %s' % ("NS7bitAttr_Init", topology_st.standalone.errlog) + p = os.popen(cmdline, "r") + i = 0 + while ATTRS[i]: + line = p.readline() + if line == "": + break + elif line.find(ATTRS[i]) >= 0: + log.debug('%s was logged' % ATTRS[i]) + else: + log.error('%s was not logged.' % ATTRS[i]) + assert False + i = i + 1 + + log.info("Ticket 47431 - 3: done") + log.info('Test complete') + + +if __name__ == '__main__': + # Run isolated + # -s for DEBUG mode + CURRENT_FILE = os.path.realpath(__file__) + pytest.main("-s %s" % CURRENT_FILE) diff --git a/dirsrvtests/tests/suites/plugins/attruniq_test.py b/dirsrvtests/tests/suites/plugins/attruniq_test.py new file mode 100644 index 0000000..b190e0e --- /dev/null +++ b/dirsrvtests/tests/suites/plugins/attruniq_test.py @@ -0,0 +1,83 @@ +# --- BEGIN COPYRIGHT BLOCK --- +# Copyright (C) 2021 Red Hat, Inc. +# All rights reserved. +# +# License: GPL (version 3 or any later version). +# See LICENSE for details. +# --- END COPYRIGHT BLOCK --- +# +import pytest +import ldap +import logging +from lib389.plugins import AttributeUniquenessPlugin +from lib389.idm.user import UserAccounts +from lib389.idm.group import Groups +from lib389._constants import DEFAULT_SUFFIX +from lib389.topologies import topology_st + +pytestmark = pytest.mark.tier1 + +logging.getLogger(__name__).setLevel(logging.DEBUG) +log = logging.getLogger(__name__) +MAIL_ATTR_VALUE = 'non-uniq@value.net' + + +def test_modrdn_attr_uniqueness(topology_st): + """Test that we can not add two entries that have the same attr value that is + defined by the plugin + + :id: dd763830-78b8-452e-888d-1d83d2e623f1 + + :setup: Standalone instance + + :steps: 1. Create two groups + 2. Setup PLUGIN_ATTR_UNIQUENESS plugin for 'mail' attribute for the group2 + 3. Enable PLUGIN_ATTR_UNIQUENESS plugin as "ON" + 4. Add two test users at group1 and add not uniq 'mail' attribute to each of them + 5. Move user1 to group2 + 6. Move user2 to group2 + 7. Move user2 back to group1 + + :expectedresults: + 1. Success + 2. Success + 3. Success + 4. Success + 5. Success + 6. Modrdn operation should FAIL + 7. Success + """ + log.debug('Create two groups') + groups = Groups(topology_st.standalone, DEFAULT_SUFFIX) + group1 = groups.create(properties={'cn': 'group1'}) + group2 = groups.create(properties={'cn': 'group2'}) + + attruniq = AttributeUniquenessPlugin(topology_st.standalone, dn="cn=attruniq,cn=plugins,cn=config") + log.debug(f'Setup PLUGIN_ATTR_UNIQUENESS plugin for {MAIL_ATTR_VALUE} attribute for the group2') + attruniq.create(properties={'cn': 'attruniq'}) + attruniq.add_unique_attribute('mail') + attruniq.add_unique_subtree(group2.dn) + attruniq.enable_all_subtrees() + log.debug(f'Enable PLUGIN_ATTR_UNIQUENESS plugin as "ON"') + attruniq.enable() + topology_st.standalone.restart() + + log.debug(f'Add two test users at group1 and add not uniq {MAIL_ATTR_VALUE} attribute to each of them') + users = UserAccounts(topology_st.standalone, basedn=group1.dn, rdn=None) + user1 = users.create_test_user(1) + user2 = users.create_test_user(2) + user1.add('mail', MAIL_ATTR_VALUE) + user2.add('mail', MAIL_ATTR_VALUE) + + log.debug('Move user1 to group2') + user1.rename(f'uid={user1.rdn}', group2.dn) + + log.debug('Move user2 to group2') + with pytest.raises(ldap.CONSTRAINT_VIOLATION) as excinfo: + user2.rename(f'uid={user2.rdn}', group2.dn) + log.fatal(f'Failed: Attribute "mail" with {MAIL_ATTR_VALUE} is accepted') + assert 'attribute value already exist' in str(excinfo.value) + log.debug(excinfo.value) + + log.debug('Move user2 to group1') + user2.rename(f'uid={user2.rdn}', group1.dn) \ No newline at end of file diff --git a/dirsrvtests/tests/suites/plugins/cos_test.py b/dirsrvtests/tests/suites/plugins/cos_test.py new file mode 100644 index 0000000..82e29a0 --- /dev/null +++ b/dirsrvtests/tests/suites/plugins/cos_test.py @@ -0,0 +1,220 @@ +# --- BEGIN COPYRIGHT BLOCK --- +# Copyright (C) 2016 Red Hat, Inc. +# All rights reserved. +# +# License: GPL (version 3 or any later version). +# See LICENSE for details. +# --- END COPYRIGHT BLOCK --- +# +''' +Created on Nov 27th, 2018 + +@author: tbordaz +''' +import logging +import subprocess +import pytest +from lib389 import Entry +from lib389.utils import * +from lib389.plugins import * +from lib389._constants import * +from lib389.topologies import topology_st as topo + +pytestmark = pytest.mark.tier1 + +def add_user(server, uid, testbase, locality=None, tel=None, title=None): + dn = 'uid=%s,%s' % (uid, testbase) + log.fatal('Adding user (%s): ' % dn) + server.add_s(Entry((dn, {'objectclass': ['top', 'person', 'organizationalPerson', 'inetOrgPerson'], + 'cn': 'user_%s' % uid, + 'sn': 'user_%s' % uid, + 'uid': uid, + 'l': locality, + 'title': title, + 'telephoneNumber': tel, + 'description': 'description real'}))) + +@pytest.mark.ds50053 +def test_cos_operational_default(topo): + """operational-default cosAttribute should not overwrite an existing value + + :id: 12fadff9-e14a-4c64-a3ee-51152cb8fcfb + :setup: Standalone Instance + :steps: + 1. Create a user entry with attribute 'l' and 'telephonenumber' (real attribute with real value) + 2. Create cos that defines 'l' as operational-default (virt. attr. with value != real value) + 3. Create cos that defines 'telephone' as default (virt. attr. with value != real value) + 4. Check that telephone is retrieved with real value + 5. Check that 'l' is retrieved with real value + :expectedresults: + 1. should succeed + 2. should succeed + 3. should succeed + """ + + REAL = 'real' + VIRTUAL = 'virtual' + TEL_REAL = '1234 is %s' % REAL + TEL_VIRT = '4321 is %s' % VIRTUAL + + LOC_REAL = 'here is %s' % REAL + LOC_VIRT = 'there is %s' % VIRTUAL + + TITLE_REAL = 'title is %s' % REAL + + inst = topo[0] + + PEOPLE = 'ou=people,%s' % SUFFIX + add_user(inst, 'user_0', PEOPLE, locality=LOC_REAL, tel=TEL_REAL, title=TITLE_REAL) + + # locality cos operational-default + LOC_COS_TEMPLATE = "cn=locality_template,%s" % PEOPLE + LOC_COS_DEFINITION = "cn=locality_definition,%s" % PEOPLE + inst.add_s(Entry((LOC_COS_TEMPLATE, { + 'objectclass': ['top', 'extensibleObject', 'costemplate', + 'ldapsubentry'], + 'l': LOC_VIRT}))) + + inst.add_s(Entry((LOC_COS_DEFINITION, { + 'objectclass': ['top', 'LdapSubEntry', 'cosSuperDefinition', + 'cosPointerDefinition'], + 'cosTemplateDn': LOC_COS_TEMPLATE, + 'cosAttribute': 'l operational-default'}))) + + # telephone cos default + TEL_COS_TEMPLATE = "cn=telephone_template,%s" % PEOPLE + TEL_COS_DEFINITION = "cn=telephone_definition,%s" % PEOPLE + inst.add_s(Entry((TEL_COS_TEMPLATE, { + 'objectclass': ['top', 'extensibleObject', 'costemplate', + 'ldapsubentry'], + 'telephonenumber': TEL_VIRT}))) + + inst.add_s(Entry((TEL_COS_DEFINITION, { + 'objectclass': ['top', 'LdapSubEntry', 'cosSuperDefinition', + 'cosPointerDefinition'], + 'cosTemplateDn': TEL_COS_TEMPLATE, + 'cosAttribute': 'telephonenumber default'}))) + + # seeAlso cos operational + SEEALSO_VIRT = "dc=%s,dc=example,dc=com" % VIRTUAL + SEEALSO_COS_TEMPLATE = "cn=seealso_template,%s" % PEOPLE + SEEALSO_COS_DEFINITION = "cn=seealso_definition,%s" % PEOPLE + inst.add_s(Entry((SEEALSO_COS_TEMPLATE, { + 'objectclass': ['top', 'extensibleObject', 'costemplate', + 'ldapsubentry'], + 'seealso': SEEALSO_VIRT}))) + + inst.add_s(Entry((SEEALSO_COS_DEFINITION, { + 'objectclass': ['top', 'LdapSubEntry', 'cosSuperDefinition', + 'cosPointerDefinition'], + 'cosTemplateDn': SEEALSO_COS_TEMPLATE, + 'cosAttribute': 'seealso operational'}))) + + # description cos override + DESC_VIRT = "desc is %s" % VIRTUAL + DESC_COS_TEMPLATE = "cn=desc_template,%s" % PEOPLE + DESC_COS_DEFINITION = "cn=desc_definition,%s" % PEOPLE + inst.add_s(Entry((DESC_COS_TEMPLATE, { + 'objectclass': ['top', 'extensibleObject', 'costemplate', + 'ldapsubentry'], + 'description': DESC_VIRT}))) + + inst.add_s(Entry((DESC_COS_DEFINITION, { + 'objectclass': ['top', 'LdapSubEntry', 'cosSuperDefinition', + 'cosPointerDefinition'], + 'cosTemplateDn': DESC_COS_TEMPLATE, + 'cosAttribute': 'description override'}))) + + # title cos override + TITLE_VIRT = [] + for i in range(2): + TITLE_VIRT.append("title is %s %d" % (VIRTUAL, i)) + TITLE_COS_TEMPLATE = "cn=title_template,%s" % PEOPLE + TITLE_COS_DEFINITION = "cn=title_definition,%s" % PEOPLE + inst.add_s(Entry((TITLE_COS_TEMPLATE, { + 'objectclass': ['top', 'extensibleObject', 'costemplate', + 'ldapsubentry'], + 'title': TITLE_VIRT}))) + + inst.add_s(Entry((TITLE_COS_DEFINITION, { + 'objectclass': ['top', 'LdapSubEntry', 'cosSuperDefinition', + 'cosPointerDefinition'], + 'cosTemplateDn': TITLE_COS_TEMPLATE, + 'cosAttribute': 'title merge-schemes'}))) + + # note that the search requests both attributes (it is required for operational*) + ents = inst.search_s(SUFFIX, ldap.SCOPE_SUBTREE, "uid=user_0", ["telephonenumber", "l"]) + assert len(ents) == 1 + ent = ents[0] + + # Check telephonenumber (specifier default) with real value => real + assert ent.hasAttr('telephonenumber') + value = ent.getValue('telephonenumber') + log.info('Returned telephonenumber (exp. real): %s' % value) + log.info('Returned telephonenumber: %d' % value.find(REAL.encode())) + assert value.find(REAL.encode()) != -1 + + # Check 'locality' (specifier operational-default) with real value => real + assert ent.hasAttr('l') + value = ent.getValue('l') + log.info('Returned l (exp. real): %s ' % value) + log.info('Returned l: %d' % value.find(REAL.encode())) + assert value.find(REAL.encode()) != -1 + + # Check 'seealso' (specifier operational) without real value => virtual + assert not ent.hasAttr('seealso') + ents = inst.search_s(SUFFIX, ldap.SCOPE_SUBTREE, "uid=user_0", ["seealso"]) + assert len(ents) == 1 + ent = ents[0] + value = ent.getValue('seealso') + log.info('Returned seealso (exp. virtual): %s' % value) + log.info('Returned seealso: %d' % value.find(VIRTUAL.encode())) + assert value.find(VIRTUAL.encode()) != -1 + + # Check 'description' (specifier override) with real value => virtual + assert not ent.hasAttr('description') + ents = inst.search_s(SUFFIX, ldap.SCOPE_SUBTREE, "uid=user_0") + assert len(ents) == 1 + ent = ents[0] + value = ent.getValue('description') + log.info('Returned description (exp. virtual): %s' % value) + log.info('Returned description: %d' % value.find(VIRTUAL.encode())) + assert value.find(VIRTUAL.encode()) != -1 + + # Check 'title' (specifier merge-schemes) with real value => real value returned + ents = inst.search_s(SUFFIX, ldap.SCOPE_SUBTREE, "uid=user_0") + assert len(ents) == 1 + ent = ents[0] + found_real = False + found_virtual = False + for value in ent.getValues('title'): + log.info('Returned title (exp. real): %s' % value) + if value.find(VIRTUAL.encode()) != -1: + found_virtual = True + if value.find(REAL.encode()) != -1: + found_real = True + assert not found_virtual + assert found_real + + # Check 'title ((specifier merge-schemes) without real value => real value returned + ents = inst.search_s(SUFFIX, ldap.SCOPE_SUBTREE, "uid=user_0") + assert len(ents) == 1 + inst.modify_s(ents[0].dn,[(ldap.MOD_DELETE, 'title', None)]) + + inst.restart() + ents = inst.search_s(SUFFIX, ldap.SCOPE_SUBTREE, "uid=user_0") + assert len(ents) == 1 + ent = ents[0] + found_real = False + found_virtual = False + count = 0 + for value in ent.getValues('title'): + log.info('Returned title(exp. virt): %s' % value) + count = count + 1 + if value.find(VIRTUAL.encode()) != -1: + found_virtual = True + if value.find(REAL.encode()) != -1: + found_real = True + assert not found_real + assert found_virtual + assert count == 2 diff --git a/dirsrvtests/tests/suites/plugins/deref_aci_test.py b/dirsrvtests/tests/suites/plugins/deref_aci_test.py new file mode 100644 index 0000000..acb89ea --- /dev/null +++ b/dirsrvtests/tests/suites/plugins/deref_aci_test.py @@ -0,0 +1,149 @@ +# --- BEGIN COPYRIGHT BLOCK --- +# Copyright (C) 2022 Red Hat, Inc. +# All rights reserved. +# +# License: GPL (version 3 or any later version). +# See LICENSE for details. +# --- END COPYRIGHT BLOCK --- +# +import os +import logging +import pytest +import ldap +from lib389._constants import DEFAULT_SUFFIX, PASSWORD +from lib389.idm.organizationalunit import OrganizationalUnits +from lib389.idm.user import UserAccounts, TEST_USER_PROPERTIES +from lib389.idm.group import Groups +from lib389.topologies import topology_st as topo + +pytestmark = pytest.mark.tier1 + +DEBUGGING = os.getenv("DEBUGGING", default=None) +if DEBUGGING: + logging.getLogger(__name__).setLevel(logging.DEBUG) +else: + logging.getLogger(__name__).setLevel(logging.INFO) +log = logging.getLogger(__name__) + +ACCTS_DN = "ou=accounts,dc=example,dc=com" +USERS_DN = "ou=users,ou=accounts,dc=example,dc=com" +GROUPS_DN = "ou=groups,ou=accounts,dc=example,dc=com" +ADMIN_GROUP_DN = "cn=admins,ou=groups,ou=accounts,dc=example,dc=com" +ADMIN_DN = "uid=admin,ou=users,ou=accounts,dc=example,dc=com" + +ACCTS_ACI = ('(targetattr="userPassword")(version 3.0; acl "allow password ' + + 'search"; allow(search) userdn = "ldap:///all";)') +USERS_ACI = ('(targetattr = "cn || createtimestamp || description || displayname || entryusn || gecos ' + + '|| gidnumber || givenname || homedirectory || initials || ' + + 'loginshell || manager || modifytimestamp || objectclass || sn || title || uid || uidnumber")' + + '(targetfilter = "(objectclass=posixaccount)")' + + '(version 3.0;acl "Read Attributes";allow (compare,read,search) userdn = "ldap:///anyone";)') +GROUPS_ACIS = [ + ( + '(targetattr = "businesscategory || cn || createtimestamp || description |' + + '| entryusn || gidnumber || mepmanagedby || modifytimestamp || o || objectclass || ou || own' + + 'er || seealso")(targetfilter = "(objectclass=posixgroup)")(version 3.0;acl' + + '"permission:System: Read Groups";allow (compare,re' + + 'ad,search) userdn = "ldap:///anyone";)' + ), + ( + '(targetattr = "member || memberof || memberuid")(targetfilter = '+ + '"(objectclass=posixgroup)")(version 3.0;acl' + + '"permission:System: Read Group Membership";allow (compare,read' + + ',search) userdn = "ldap:///all";)' + ) +] + + +def test_deref_and_access_control(topo): + """Test that the deref plugin honors access control rules correctly + + The setup mimics a generic IPA DIT with its ACI's. The userpassword + attribute should not be returned + + :id: bedb6af2-b765-479d-808c-df0348e0ec95 + :setup: Standalone Instance + :steps: + 1. Create container entries with aci's + 2. Perform deref search and make sure userpassword is not returned + :expectedresults: + 1. Success + 2. Success + """ + + topo.standalone.config.set('nsslapd-schemacheck', 'off') + if DEBUGGING: + topo.standalone.config.enable_log('audit') + topo.standalone.config.set('nsslapd-errorlog-level', '128') + + # Accounts + ou1 = OrganizationalUnits(topo.standalone, DEFAULT_SUFFIX) + ou1.create(properties={ + 'ou': 'accounts', + 'aci': ACCTS_ACI + }) + + # Users + ou2 = OrganizationalUnits(topo.standalone, ACCTS_DN) + ou2.create(properties={ + 'ou': 'users', + 'aci': USERS_ACI + }) + + # Groups + ou3 = OrganizationalUnits(topo.standalone, ACCTS_DN) + ou3.create(properties={ + 'ou': 'groups', + 'aci': GROUPS_ACIS + }) + + # Create User + users = UserAccounts(topo.standalone, USERS_DN, rdn=None) + user_props = TEST_USER_PROPERTIES.copy() + user_props.update( + { + 'uid': 'user', + 'objectclass': ['posixAccount', 'extensibleObject'], + 'userpassword': PASSWORD + } + ) + user = users.create(properties=user_props) + + # Create Admin user + user_props = TEST_USER_PROPERTIES.copy() + user_props.update( + { + 'uid': 'admin', + 'objectclass': ['posixAccount', 'extensibleObject', 'inetuser'], + 'userpassword': PASSWORD, + 'memberOf': ADMIN_GROUP_DN + } + ) + users.create(properties=user_props) + + # Create Admin group + groups = Groups(topo.standalone, GROUPS_DN, rdn=None) + group_props = { + 'cn': 'admins', + 'gidNumber': '123', + 'objectclass': ['posixGroup', 'extensibleObject'], + 'member': ADMIN_DN + } + groups.create(properties=group_props) + + # Bind as user, then perform deref search on admin user + user.rebind(PASSWORD) + result, control_response = topo.standalone.dereference( + 'member:cn,userpassword', + base=ADMIN_GROUP_DN, + scope=ldap.SCOPE_BASE) + + log.info('Check, that the dereference search result does not have userpassword') + assert result[0][2][0].entry[0]['attrVals'][0]['type'] != 'userpassword' + + +if __name__ == '__main__': + # Run isolated + # -s for DEBUG mode + CURRENT_FILE = os.path.realpath(__file__) + pytest.main(["-s", CURRENT_FILE]) diff --git a/dirsrvtests/tests/suites/plugins/dna_interval_test.py b/dirsrvtests/tests/suites/plugins/dna_interval_test.py new file mode 100644 index 0000000..d16822f --- /dev/null +++ b/dirsrvtests/tests/suites/plugins/dna_interval_test.py @@ -0,0 +1,194 @@ +# --- BEGIN COPYRIGHT BLOCK --- +# Copyright (C) 2021 Red Hat, Inc. +# All rights reserved. +# +# License: GPL (version 3 or any later version). +# See LICENSE for details. +# --- END COPYRIGHT BLOCK --- +# +"""Test DNA plugin functionality""" + +import logging +import pytest +from lib389._constants import DEFAULT_SUFFIX +from lib389.plugins import DNAPlugin, DNAPluginConfigs +from lib389.idm.organizationalunit import OrganizationalUnits +from lib389.idm.user import UserAccounts +from lib389.topologies import topology_st +from lib389.utils import * + +pytestmark = pytest.mark.tier1 + +log = logging.getLogger(__name__) + + +@pytest.fixture(scope="function") +def dna_plugin(topology_st, request): + inst = topology_st.standalone + plugin = DNAPlugin(inst) + ous = OrganizationalUnits(inst, DEFAULT_SUFFIX) + ou_people = ous.get("People") + + log.info("Add dna plugin config entry...") + configs = DNAPluginConfigs(inst, plugin.dn) + dna_config = configs.create(properties={'cn': 'dna config', + 'dnaType': 'uidNumber', + 'dnaMaxValue': '1000', + 'dnaMagicRegen': '-1', + 'dnaFilter': '(objectclass=top)', + 'dnaScope': ou_people.dn, + 'dnaNextValue': '10', + 'dnaInterval': '10'}) + + log.info("Enable the DNA plugin and restart...") + plugin.enable() + inst.restart() + + def fin(): + inst.stop() + dse_ldif = DSEldif(inst) + dse_ldif.delete_dn(f'cn=dna config,{plugin.dn}') + inst.start() + request.addfinalizer(fin) + + return dna_config + + +def test_dna_interval(topology_st, dna_plugin): + """Test the dna interval works + + :id: 3982d698-e16b-4945-9eb4-eecaa4bac5f7 + :customerscenario: True + :setup: Standalone Instance + :steps: + 1. Set DNAZZ interval to 10 + 2. Create user that trigger DNA to assign a value + 3. Verify DNA is working + 4. Make update to entry that triggers DNA again + 5. Verify interval is applied as expected + :expectedresults: + 1. Success + 2. Success + 3. Success + 4. Success + 5. Success + """ + + # Create user and check interval + log.info("Test DNA is working...") + users = UserAccounts(topology_st.standalone, DEFAULT_SUFFIX) + log.info('Adding user1') + user = users.create(properties={ + 'sn': 'interval', + 'cn': 'interval', + 'uid': 'interval', + 'uidNumber': '-1', # Magic regen value + 'gidNumber': '111', + 'givenname': 'interval', + 'homePhone': '0861234567', + 'carLicense': '131D16674', + 'mail': 'interval@example.com', + 'homeDirectory': '/home/interval'}) + + # Verify DNA works + assert user.get_attr_val_utf8_l('uidNumber') == '10' + + # Make update and verify interval was applied + log.info("Test DNA interval assignment is working...") + user.replace('uidNumber', '-1') + assert user.get_attr_val_utf8_l('uidNumber') == '20' + + +def test_dna_max_value(topology_st, dna_plugin): + """Test the dna max value works with dna interval + + :id: cc979ea8-3cd0-4d52-af35-9cea7cf8cb5f + :customerscenario: True + :setup: Standalone Instance + :steps: + 1. Set dnaMaxValue, dnaNextValue, dnaInterval values to 100, 90, 50 respectively + 2. Create user that trigger DNA to assign a value + 3. Try to make an update to entry that triggers DNA again + :expectedresults: + 1. Success + 2. Success + 3. Operation should fail with OPERATIONS_ERROR + """ + log.info("Make the config changes needed to test dnaMaxValue") + dna_plugin.replace_many(('dnaMaxValue', '100'), ('dnaNextValue', '90'), ('dnaInterval', '50')) + # Create user2 + users = UserAccounts(topology_st.standalone, DEFAULT_SUFFIX) + log.info('Adding user2') + user = users.create(properties={ + 'sn': 'interval2', + 'cn': 'interval2', + 'uid': 'interval2', + 'uidNumber': '-1', # Magic regen value + 'gidNumber': '222', + 'givenname': 'interval2', + 'homePhone': '0861234567', + 'carLicense': '131D16674', + 'mail': 'interval2@example.com', + 'homeDirectory': '/home/interval2'}) + + # Verify DNA works + assert user.get_attr_val_utf8_l('uidNumber') == '90' + + log.info("Make an update and verify it raises error as the new interval value is more than dnaMaxValue") + with pytest.raises(ldap.OPERATIONS_ERROR): + user.replace('uidNumber', '-1') + + +@pytest.mark.parametrize('attr_value', ('0', 'abc', '2000')) +def test_dna_interval_with_different_values(topology_st, dna_plugin, attr_value): + """Test the dna interval with different values + + :id: 1a3f69fd-1d8d-4046-ba68-b6aa7cafbd37 + :customerscenario: True + :parametrized: yes + :setup: Standalone Instance + :steps: + 1. Set dnaInterval value to 0 + 2. Set dnaInterval value to 'abc' + 3. Set dnaInterval value to 2000 and dnaMaxValue to 1000 + 4. Create user that trigger DNA to assign a value + 5. Try to make an update to entry that triggers DNA again when dnaInerval is greater than dnaMaxValue + :expectedresults: + 1. Success + 2. Operation should fail with INVALID_SYNTAX + 3. Success + 4. Success + 5. Operation should fail with OPERATIONS_ERROR + """ + log.info("Make the config changes needed to test dnaInterval") + if attr_value == '0': + dna_plugin.replace('dnaInterval', attr_value) + elif attr_value == 'abc': + with pytest.raises(ldap.INVALID_SYNTAX): + dna_plugin.replace('dnaInterval', attr_value) + else: + dna_plugin.replace_many(('dnaInterval', attr_value), ('dnaMaxValue', '1000')) + # Create user3 + users = UserAccounts(topology_st.standalone, DEFAULT_SUFFIX) + log.info('Adding user3') + user = users.create(properties={ + 'sn': 'interval3', + 'cn': 'interval3', + 'uid': 'interval3', + 'uidNumber': '-1', # Magic regen value + 'gidNumber': '333', + 'givenname': 'interval3', + 'homePhone': '0861234567', + 'carLicense': '131D16674', + 'mail': 'interval3@example.com', + 'homeDirectory': '/home/interval3'}) + + # Verify DNA works + assert user.get_attr_val_utf8_l('uidNumber') == '10' + + log.info("Make an update and verify it raises error as the new interval value is more than dnaMaxValue") + with pytest.raises(ldap.OPERATIONS_ERROR): + user.replace('uidNumber', '-1') + + # Check that instance did not crashed + assert topology_st.standalone.status() diff --git a/dirsrvtests/tests/suites/plugins/dna_test.py b/dirsrvtests/tests/suites/plugins/dna_test.py new file mode 100644 index 0000000..f6b1f00 --- /dev/null +++ b/dirsrvtests/tests/suites/plugins/dna_test.py @@ -0,0 +1,86 @@ +# --- BEGIN COPYRIGHT BLOCK --- +# Copyright (C) 2019 Red Hat, Inc. +# All rights reserved. +# +# License: GPL (version 3 or any later version). +# See LICENSE for details. +# --- END COPYRIGHT BLOCK --- +# +"""Test DNA plugin functionality""" + +import logging +import pytest +from lib389._constants import DEFAULT_SUFFIX +from lib389.plugins import DNAPlugin, DNAPluginSharedConfigs, DNAPluginConfigs +from lib389.idm.organizationalunit import OrganizationalUnits +from lib389.idm.user import UserAccounts +from lib389.topologies import topology_st +import ldap + +pytestmark = pytest.mark.tier1 + +log = logging.getLogger(__name__) + + +@pytest.mark.ds47937 +def test_dnatype_only_valid(topology_st): + """Test that DNA plugin only accepts valid attributes for "dnaType" + + :id: 0878ecff-5fdc-47d7-8c8f-edf4556f9746 + :setup: Standalone Instance + :steps: + 1. Create a use entry + 2. Create DNA shared config entry container + 3. Create DNA shared config entry + 4. Add DNA plugin config entry + 5. Enable DNA plugin + 6. Restart the instance + 7. Replace dnaType with invalid value + :expectedresults: + 1. Successful + 2. Successful + 3. Successful + 4. Successful + 5. Successful + 6. Successful + 7. Unwilling to perform exception should be raised + """ + + inst = topology_st.standalone + plugin = DNAPlugin(inst) + + log.info("Creating an entry...") + users = UserAccounts(inst, DEFAULT_SUFFIX) + users.create_test_user(uid=1) + + log.info("Creating \"ou=ranges\"...") + ous = OrganizationalUnits(inst, DEFAULT_SUFFIX) + ou_ranges = ous.create(properties={'ou': 'ranges'}) + ou_people = ous.get("People") + + log.info("Creating DNA shared config entry...") + shared_configs = DNAPluginSharedConfigs(inst, ou_ranges.dn) + shared_configs.create(properties={'dnaHostname': str(inst.host), + 'dnaPortNum': str(inst.port), + 'dnaRemainingValues': '9501'}) + + log.info("Add dna plugin config entry...") + configs = DNAPluginConfigs(inst, plugin.dn) + config = configs.create(properties={'cn': 'dna config', + 'dnaType': 'description', + 'dnaMaxValue': '10000', + 'dnaMagicRegen': '0', + 'dnaFilter': '(objectclass=top)', + 'dnaScope': ou_people.dn, + 'dnaNextValue': '500', + 'dnaSharedCfgDN': ou_ranges.dn}) + + log.info("Enable the DNA plugin...") + plugin.enable() + + log.info("Restarting the server...") + inst.restart() + + log.info("Apply an invalid attribute to the DNA config(dnaType: foo)...") + with pytest.raises(ldap.UNWILLING_TO_PERFORM): + config.replace('dnaType', 'foo') diff --git a/dirsrvtests/tests/suites/plugins/entryusn_test.py b/dirsrvtests/tests/suites/plugins/entryusn_test.py new file mode 100644 index 0000000..60580c3 --- /dev/null +++ b/dirsrvtests/tests/suites/plugins/entryusn_test.py @@ -0,0 +1,245 @@ +# --- BEGIN COPYRIGHT BLOCK --- +# Copyright (C) 2020 Red Hat, Inc. +# All rights reserved. +# +# License: GPL (version 3 or any later version). +# See LICENSE for details. +# --- END COPYRIGHT BLOCK --- +# +import os +import ldap +import logging +import pytest +import time +from lib389._constants import DEFAULT_SUFFIX +from lib389.config import Config +from lib389.plugins import USNPlugin, MemberOfPlugin +from lib389.idm.group import Groups +from lib389.idm.user import UserAccounts +from lib389.idm.organizationalunit import OrganizationalUnit +from lib389.tombstone import Tombstones +from lib389.rootdse import RootDSE +from lib389.topologies import topology_st, topology_m2 + +pytestmark = pytest.mark.tier1 + +log = logging.getLogger(__name__) + +USER_NUM = 10 +GROUP_NUM = 3 + + +def check_entryusn_no_duplicates(entryusn_list): + """Check that all values in the list are unique""" + + if len(entryusn_list) > len(set(entryusn_list)): + raise AssertionError(f"EntryUSN values have duplicates, please, check logs") + + +def check_lastusn_after_restart(inst): + """Check that last usn is the same after restart""" + + root_dse = RootDSE(inst) + last_usn_before = root_dse.get_attr_val_int("lastusn;userroot") + inst.restart() + last_usn_after = root_dse.get_attr_val_int("lastusn;userroot") + assert last_usn_after == last_usn_before + + +@pytest.fixture(scope="module") +def setup(topology_st, request): + """ + Enable USN plug-in + Enable MEMBEROF plugin + Add test entries + """ + + inst = topology_st.standalone + + log.info("Enable the USN plugin...") + plugin = USNPlugin(inst) + plugin.enable() + + log.info("Enable the MEMBEROF plugin...") + plugin = MemberOfPlugin(inst) + plugin.enable() + + inst.restart() + + users_list = [] + log.info("Adding test entries...") + users = UserAccounts(inst, DEFAULT_SUFFIX) + for id in range(USER_NUM): + user = users.create_test_user(uid=id) + users_list.append(user) + + groups_list = [] + log.info("Adding test groups...") + groups = Groups(inst, DEFAULT_SUFFIX) + for id in range(GROUP_NUM): + group = groups.create(properties={'cn': f'test_group{id}'}) + groups_list.append(group) + + def fin(): + for user in users_list: + try: + user.delete() + except ldap.NO_SUCH_OBJECT: + pass + for group in groups_list: + try: + group.delete() + except ldap.NO_SUCH_OBJECT: + pass + request.addfinalizer(fin) + + return {"users": users_list, + "groups": groups_list} + + +def test_entryusn_no_duplicates(topology_st, setup): + """Verify that entryUSN is not duplicated after memberOf operation + + :id: 1a7d382d-1214-4d56-b9c2-9c4ed57d1683 + :setup: Standalone instance, Groups and Users, USN and memberOf are enabled + :steps: + 1. Add a member to group 1 + 2. Add a member to group 1 and 2 + 3. Check that entryUSNs are different + 4. Check that lastusn before and after a restart are the same + :expectedresults: + 1. Success + 2. Success + 3. Success + 4. Success + """ + + inst = topology_st.standalone + config = Config(inst) + config.replace('nsslapd-accesslog-level', '260') # Internal op + config.replace('nsslapd-errorlog-level', '65536') + config.replace('nsslapd-plugin-logging', 'on') + entryusn_list = [] + + users = setup["users"] + groups = setup["groups"] + + groups[0].replace('member', users[0].dn) + entryusn_list.append(users[0].get_attr_val_int('entryusn')) + log.info(f"{users[0].dn}_1: {entryusn_list[-1:]}") + entryusn_list.append(groups[0].get_attr_val_int('entryusn')) + log.info(f"{groups[0].dn}_1: {entryusn_list[-1:]}") + check_entryusn_no_duplicates(entryusn_list) + + groups[1].replace('member', [users[0].dn, users[1].dn]) + entryusn_list.append(users[0].get_attr_val_int('entryusn')) + log.info(f"{users[0].dn}_2: {entryusn_list[-1:]}") + entryusn_list.append(users[1].get_attr_val_int('entryusn')) + log.info(f"{users[1].dn}_2: {entryusn_list[-1:]}") + entryusn_list.append(groups[1].get_attr_val_int('entryusn')) + log.info(f"{groups[1].dn}_2: {entryusn_list[-1:]}") + check_entryusn_no_duplicates(entryusn_list) + + check_lastusn_after_restart(inst) + + +def test_entryusn_is_same_after_failure(topology_st, setup): + """Verify that entryUSN is the same after failed operation + + :id: 1f227533-370a-48c1-b920-9b3b0bcfc32e + :setup: Standalone instance, Groups and Users, USN and memberOf are enabled + :steps: + 1. Get current group's entryUSN value + 2. Try to modify the group with an invalid syntax + 3. Get new group's entryUSN value and compare with old + 4. Check that lastusn before and after a restart are the same + :expectedresults: + 1. Success + 2. Invalid Syntax error + 3. Should be the same + 4. Success + """ + + inst = topology_st.standalone + users = setup["users"] + + # We need this update so we get the latest USN pointed to our entry + users[0].replace('description', 'update') + + entryusn_before = users[0].get_attr_val_int('entryusn') + users[0].replace('description', 'update') + try: + users[0].replace('uid', 'invalid update') + except ldap.NOT_ALLOWED_ON_RDN: + pass + users[0].replace('description', 'second update') + entryusn_after = users[0].get_attr_val_int('entryusn') + + # entryUSN should be OLD + 2 (only two user updates) + assert entryusn_after == (entryusn_before + 2) + + check_lastusn_after_restart(inst) + + +def test_entryusn_after_repl_delete(topology_m2): + """Verify that entryUSN is incremented on 1 after delete operation which creates a tombstone + + :id: 1704cf65-41bc-4347-bdaf-20fc2431b218 + :setup: An instance with replication, Users, USN enabled + :steps: + 1. Try to delete a user + 2. Check the tombstone has the incremented USN + 3. Try to delete ou=People with users + 4. Check the entry has a not incremented entryUSN + :expectedresults: + 1. Success + 2. Success + 3. Should fail with Not Allowed On Non-leaf error + 4. Success + """ + + inst = topology_m2.ms["supplier1"] + plugin = USNPlugin(inst) + plugin.enable() + inst.restart() + users = UserAccounts(inst, DEFAULT_SUFFIX) + + try: + user_1 = users.create_test_user() + user_rdn = user_1.rdn + tombstones = Tombstones(inst, DEFAULT_SUFFIX) + + user_1.replace('description', 'update_ts') + user_usn = user_1.get_attr_val_int('entryusn') + + user_1.delete() + time.sleep(1) # Gives a little time for tombstone creation to complete + + ts = tombstones.get(user_rdn) + ts_usn = ts.get_attr_val_int('entryusn') + + assert (user_usn + 1) == ts_usn + + user_1 = users.create_test_user() + org = OrganizationalUnit(inst, f"ou=People,{DEFAULT_SUFFIX}") + org.replace('description', 'update_ts') + ou_usn_before = org.get_attr_val_int('entryusn') + try: + org.delete() + except ldap.NOT_ALLOWED_ON_NONLEAF: + pass + ou_usn_after = org.get_attr_val_int('entryusn') + assert ou_usn_before == ou_usn_after + + finally: + try: + user_1.delete() + except ldap.NO_SUCH_OBJECT: + pass + + +if __name__ == '__main__': + # Run isolated + # -s for DEBUG mode + CURRENT_FILE = os.path.realpath(__file__) + pytest.main("-s %s" % CURRENT_FILE) diff --git a/dirsrvtests/tests/suites/plugins/managed_entry_test.py b/dirsrvtests/tests/suites/plugins/managed_entry_test.py new file mode 100644 index 0000000..755f4a4 --- /dev/null +++ b/dirsrvtests/tests/suites/plugins/managed_entry_test.py @@ -0,0 +1,531 @@ +# --- BEGIN COPYRIGHT BLOCK --- +# Copyright (C) 2023 Red Hat, Inc. +# All rights reserved. +# +# License: GPL (version 3 or any later version). +# See LICENSE for details. +# --- END COPYRIGHT BLOCK --- +# +import pytest +import time +from lib389.topologies import topology_st as topo +from lib389.idm.user import UserAccounts +from lib389.idm.account import Account +from lib389._constants import DEFAULT_SUFFIX +from lib389.idm.group import Groups +from lib389.config import Config +from lib389.idm.organizationalunit import OrganizationalUnits, OrganizationalUnit +from lib389.plugins import MEPTemplates, MEPConfigs, ManagedEntriesPlugin, MEPTemplate +from lib389.idm.nscontainer import nsContainers +from lib389.idm.domain import Domain +import ldap +import logging + +logging.getLogger(__name__).setLevel(logging.DEBUG) +log = logging.getLogger(__name__) + +pytestmark = pytest.mark.tier1 +USER_PASSWORD = 'password' + + +def _create_ou(inst, name): + OrganizationalUnits(inst, DEFAULT_SUFFIX).create(properties={'ou': name}) + + +def _create_template(inst, name): + templates = MEPTemplates(inst, DEFAULT_SUFFIX) + temp = templates.create(properties={ + 'cn': f"{name}_template", + 'mepRDNAttr': 'cn', + 'mepStaticAttr': ['objectclass: posixGroup', 'objectclass: top', f'description: {name}'], + 'mepMappedAttr': ['cn: $uid', 'memberUid: $uid', 'gidNumber: $uidNumber'] + }) + return temp.dn + + +def _config_plugin(inst, name, template): + configs = MEPConfigs(inst) + configs.create(properties={ + 'cn': f"{name}_config", + 'originScope': f"ou=People,{DEFAULT_SUFFIX}", + 'originFilter': 'objectClass=posixAccount', + 'managedBase': f"ou={name},{DEFAULT_SUFFIX}", + 'managedTemplate': template}) + + +def _create_user(inst, name): + users = UserAccounts(inst, DEFAULT_SUFFIX) + users.create(properties={ + 'uid': name, + 'cn': name, + 'sn': name, + 'uidNumber': '100', + 'gidNumber': '100', + 'homeDirectory': f"/home/{name}", + }) + + +@pytest.mark.ds1870 +@pytest.mark.xfail(reason='https://github.com/389ds/389-ds-base/issues/1870') +def test_overlapping_scope(topo): + """Test overlapping scopes in Managed Entries + + :id: 7038ab53-89c8-4fce-897a-76d42ec85063 + :setup: Standalone Instance + :steps: + 1. Make the two subtrees for targets (Create organizational units `oua`, and `oub`). + 2. Add the template A (`oua`). + 3. Enable the plugin A (`oua`). + 4. Add the template B (`oub`). + 5. Enable the plugin B (`oub`). + 6. Add a user (`user1`). + 7. Search for the user to ensure it's present in both A and B. + :expectedresults: + 1. Success + 2. Success + 3. Success + 4. Success + 5. Success + 6. Success + 7. Success + """ + + inst = topo.standalone + + log.info("Creating organizational units") + _create_ou(inst, 'oua') + _create_ou(inst, 'oub') + + log.info("Creating template for 'oua'") + dn = _create_template(inst, 'oua') + + log.info("Configuring plugin for 'oua'") + _config_plugin(inst, 'oua', dn) + + log.info("Creating template for 'oub'") + dn = _create_template(inst, 'oub') + + log.info("Configuring plugin for 'oub'") + _config_plugin(inst, 'oub', dn) + + log.info("Creating user 'user1'") + _create_user(inst, 'user1') + + log.info("Searching for user in both A and B") + results = inst.search_s(DEFAULT_SUFFIX, ldap.SCOPE_SUBTREE, '(&(cn=user1)(objectClass=posixGroup))', ['cn']) + + log.info(f"Found {len(results)} results for user search") + assert(len(results) == 2) + + +@pytest.fixture(scope="module") +def _create_inital(topo): + """ + Will create entries for this module + """ + meps = MEPTemplates(topo.standalone, DEFAULT_SUFFIX) + mep_template1 = meps.create( + properties={'cn': 'UPG Template', 'mepRDNAttr': 'cn', 'mepStaticAttr': 'objectclass: posixGroup', + 'mepMappedAttr': 'cn: $uid|gidNumber: $gidNumber|description: User private group for $uid'.split( + '|')}) + conf_mep = MEPConfigs(topo.standalone) + conf_mep.create(properties={'cn': 'UPG Definition1', 'originScope': f'cn=Users,{DEFAULT_SUFFIX}', + 'originFilter': 'objectclass=posixaccount', 'managedBase': f'cn=Groups,{DEFAULT_SUFFIX}', + 'managedTemplate': mep_template1.dn}) + container = nsContainers(topo.standalone, DEFAULT_SUFFIX) + for cn in ['Users', 'Groups']: + container.create(properties={'cn': cn}) + + +def test_managed_entry_betxn(topo): + """Test if failure to create a managed entry rolls back the transaction. + + :id: 7aa74994-f89b-11ec-9821-98fa9ba19b65 + :setup: Standalone Instance + :customerscenario: True + :steps: + 1. Check that plugin active if not activate it + 2. Create a Template entry + 3. Create a definition entry + 4. Attempt to create a user + 5. Verify that transaction is aborted and user not created + :expectedresults: + 1. Success + 2. Success + 3. Success + 4. Success + 5. Success + """ + + log.info("Make sure the plugin is active") + me_plugn = ManagedEntriesPlugin(topo.standalone) + log.info("Stopping and starting the Managed Entry plugin.") + me_plugn.disable() + me_plugn.enable() + assert me_plugn.status() + log.info("Plugin Restarted.") + log.info("Adding organization units") + ous1 = OrganizationalUnits(topo.standalone, DEFAULT_SUFFIX) + ou1_people = ous1.create(properties={'ou': 'tst_people'}) + ou1_groups = ous1.create(properties={'ou': 'tst_groups'}) + + log.info("Create the template entry") + mep_templates1 = MEPTemplates(topo.standalone, DEFAULT_SUFFIX) + mep_temp1 = mep_templates1.create(properties={ + 'cn': 'MEP template1', + 'mepRDNAttr': 'cn', + 'mepStaticAttr': 'objectclass: groupOfNames|objectclass: extensibleObject'.split('|'), + 'mepMappedAttr': 'cn: $cn|uid: $cn|gidNumber: $uidNumber'.split('|') + }) + conf_mep = MEPConfigs(topo.standalone) + log.info("Create definition entry.") + conf_mep.create(properties={ + 'cn': 'cn=config', + 'originScope': ou1_people.dn, + 'originFilter': 'objectclass=posixAccount', + 'managedBase': ou1_groups.dn, + 'managedTemplate': mep_temp1.dn}) + topo.standalone.restart() + log.info("Attempt to add a user that doenst fit the template.") + users = UserAccounts(topo.standalone, DEFAULT_SUFFIX, rdn='ou={}'.format(ou1_people.rdn)) + with pytest.raises(ldap.UNWILLING_TO_PERFORM): + mgd_entry = users.create(properties={ + 'uid': 'test_uid', + 'cn': 'test_uid', + 'sn': 'test_sn', + }) + mgd_entry.delete() + log.info("Cleaning up") + + +def test_binddn_tracking(topo, _create_inital): + """Test Managed Entries basic functionality + + :id: ea2ddfd4-aaec-11ea-8416-8c16451d917b + :setup: Standalone Instance + :steps: + 1. Set nsslapd-plugin-binddn-tracking attribute under cn=config + 2. Add user + 3. Managed Entry Plugin runs against managed entries upon any update without validating + 4. verify creation of User Private Group with its time stamp value + 5. Modify the SN attribute which is not mapped with managed entry + 6. run ModRDN operation and check the User Private group + 7. Check the time stamp of UPG should be changed now + 8. Check the creatorsname should be user dn and internalCreatorsname should be plugin name + 9. Check if a managed group entry was created + :expectedresults: + 1. Success + 2. Success + 3. Success + 4. Success + 5. Success + 6. Success + 7. Success + 8. Success + 9. Success + """ + config = Config(topo.standalone) + # set nsslapd-plugin-binddn-tracking attribute under cn=config + config.replace('nsslapd-plugin-binddn-tracking', 'on') + # Add user + user = UserAccounts(topo.standalone, f'cn=Users,{DEFAULT_SUFFIX}', rdn=None).create_test_user() + assert user.get_attr_val_utf8('mepManagedEntry') == f'cn=test_user_1000,cn=Groups,{DEFAULT_SUFFIX}' + entry = Account(topo.standalone, f'cn=test_user_1000,cn=Groups,{DEFAULT_SUFFIX}') + # Managed Entry Plugin runs against managed entries upon any update without validating + # verify creation of User Private Group with its time stamp value + stamp1 = entry.get_attr_val_utf8('modifyTimestamp') + user.replace('sn', 'NewSN_modified') + stamp2 = entry.get_attr_val_utf8('modifyTimestamp') + # Modify the SN attribute which is not mapped with managed entry + # Check the time stamp of UPG should not be changed + assert stamp1 == stamp2 + time.sleep(1) + # run ModRDN operation and check the User Private group + user.rename(new_rdn='uid=UserNewRDN', newsuperior='cn=Users,dc=example,dc=com') + assert user.get_attr_val_utf8('mepManagedEntry') == f'cn=UserNewRDN,cn=Groups,{DEFAULT_SUFFIX}' + entry = Account(topo.standalone, f'cn=UserNewRDN,cn=Groups,{DEFAULT_SUFFIX}') + stamp3 = entry.get_attr_val_utf8('modifyTimestamp') + # Check the time stamp of UPG should be changed now + assert stamp2 != stamp3 + time.sleep(1) + user.replace('gidNumber', '1') + stamp4 = entry.get_attr_val_utf8('modifyTimestamp') + assert stamp4 != stamp3 + # Check the creatorsname should be user dn and internalCreatorsname should be plugin name + assert entry.get_attr_val_utf8('creatorsname') == 'cn=directory manager' + assert entry.get_attr_val_utf8('internalCreatorsname') == 'cn=Managed Entries,cn=plugins,cn=config' + assert entry.get_attr_val_utf8('modifiersname') == 'cn=directory manager' + user.delete() + config.replace('nsslapd-plugin-binddn-tracking', 'off') + + +class WithObjectClass(Account): + def __init__(self, instance, dn=None): + super(WithObjectClass, self).__init__(instance, dn) + self._rdn_attribute = 'uid' + self._create_objectclasses = ['top', 'person', 'inetorgperson'] + + +def test_mentry01(topo, _create_inital): + """Test Managed Entries basic functionality + + :id: 863678bb-9383-42cf-b2a8-8763f4908650 + :setup: Standalone Instance + :steps: + 1. Check the plug-in status + 2. Add Template and definition entry + 3. Add our org units + 4. Add users with PosixAccount ObjectClass and verify creation of User Private Group + 5. Disable the plug-in and check the status + 6. Enable the plug-in and check the status the plug-in is disabled and creation of UPG should fail + 7. Add users with PosixAccount ObjectClass and verify creation of User Private Group + 8. Add users, run ModRDN operation and check the User Private group + 9. Add users, run LDAPMODIFY to change the gidNumber and check the User Private group + 10. Checking whether creation of User Private group fails for existing group entry + 11. Checking whether adding of posixAccount objectClass to existing user creates UPG + 12. Running ModRDN operation and checking the user private groups mepManagedBy attribute + 13. Deleting mepManagedBy attribute and running ModRDN operation to check if it creates a new UPG + 14. Change the RDN of template entry, DSA Unwilling to perform error expected + 15. Change the RDN of cn=Users to cn=TestUsers and check UPG are deleted + :expectedresults: + 1. Success + 2. Success + 3. Success + 4. Success + 5. Success + 6. Success + 7. Success + 8. Success + 9. Success + 10. Success + 11. Success + 12. Success + 13. Success + 14. Fail(Unwilling to perform ) + 15. Success + """ + # Check the plug-in status + mana = ManagedEntriesPlugin(topo.standalone) + assert mana.status() + + # Add Template and definition entry + org1 = OrganizationalUnits(topo.standalone, DEFAULT_SUFFIX).create(properties={'ou': 'Users'}) + org2 = OrganizationalUnit(topo.standalone, f'ou=Groups,{DEFAULT_SUFFIX}') + meps = MEPTemplates(topo.standalone, DEFAULT_SUFFIX) + mep_template1 = meps.create(properties={ + 'cn': 'UPG Template1', + 'mepRDNAttr': 'cn', + 'mepStaticAttr': 'objectclass: posixGroup', + 'mepMappedAttr': 'cn: $uid|gidNumber: $gidNumber|description: User private group for $uid'.split('|')}) + conf_mep = MEPConfigs(topo.standalone) + conf_mep.create(properties={ + 'cn': 'UPG Definition2', + 'originScope': org1.dn, + 'originFilter': 'objectclass=posixaccount', + 'managedBase': org2.dn, + 'managedTemplate': mep_template1.dn}) + + # Add users with PosixAccount ObjectClass and verify creation of User Private Group + user = UserAccounts(topo.standalone, f'ou=Users,{DEFAULT_SUFFIX}', rdn=None).create_test_user() + assert user.get_attr_val_utf8('mepManagedEntry') == f'cn=test_user_1000,ou=Groups,{DEFAULT_SUFFIX}' + + # Disable the plug-in and check the status + mana.disable() + user.delete() + topo.standalone.restart() + + # Add users with PosixAccount ObjectClass when the plug-in is disabled and creation of UPG should fail + user = UserAccounts(topo.standalone, f'ou=Users,{DEFAULT_SUFFIX}', rdn=None).create_test_user() + assert not user.get_attr_val_utf8('mepManagedEntry') + + # Enable the plug-in and check the status + mana.enable() + user.delete() + topo.standalone.restart() + + # Add users with PosixAccount ObjectClass and verify creation of User Private Group + user = UserAccounts(topo.standalone, f'ou=Users,{DEFAULT_SUFFIX}', rdn=None).create_test_user() + assert user.get_attr_val_utf8('mepManagedEntry') == f'cn=test_user_1000,ou=Groups,{DEFAULT_SUFFIX}' + + # Add users, run ModRDN operation and check the User Private group + # Add users, run LDAPMODIFY to change the gidNumber and check the User Private group + user.rename(new_rdn='uid=UserNewRDN', newsuperior='ou=Users,dc=example,dc=com') + assert user.get_attr_val_utf8('mepManagedEntry') == f'cn=UserNewRDN,ou=Groups,{DEFAULT_SUFFIX}' + user.replace('gidNumber', '20209') + entry = Account(topo.standalone, f'cn=UserNewRDN,ou=Groups,{DEFAULT_SUFFIX}') + assert entry.get_attr_val_utf8('gidNumber') == '20209' + user.replace_many(('sn', 'new_modified_sn'), ('gidNumber', '31309')) + assert entry.get_attr_val_utf8('gidNumber') == '31309' + user.delete() + + # Checking whether creation of User Private group fails for existing group entry + Groups(topo.standalone, f'ou=Groups,{DEFAULT_SUFFIX}', rdn=None).create(properties={'cn': 'MENTRY_14'}) + user = UserAccounts(topo.standalone, f'ou=Users,{DEFAULT_SUFFIX}', rdn=None).create_test_user() + with pytest.raises(ldap.NO_SUCH_OBJECT): + entry.status() + user.delete() + + # Checking whether adding of posixAccount objectClass to existing user creates UPG + # Add Users without posixAccount objectClass + users = WithObjectClass(topo.standalone, f'uid=test_test, ou=Users,{DEFAULT_SUFFIX}') + user_properties1 = {'uid': 'test_test', 'cn': 'test', 'sn': 'test', 'mail': 'sasa@sasa.com', 'telephoneNumber': '123'} + user = users.create(properties=user_properties1) + assert not user.get_attr_val_utf8('mepManagedEntry') + + # Add posixAccount objectClass + user.replace_many(('objectclass', ['top', 'person', 'inetorgperson', 'posixAccount']), + ('homeDirectory', '/home/ok'), + ('uidNumber', '61603'), ('gidNumber', '61603')) + assert not user.get_attr_val_utf8('mepManagedEntry') + user = UserAccounts(topo.standalone, f'ou=Users,{DEFAULT_SUFFIX}', rdn=None).create_test_user() + entry = Account(topo.standalone, 'cn=test_user_1000,ou=Groups,dc=example,dc=com') + + # Add inetuser objectClass + user.replace_many( + ('objectclass', ['top', 'account', 'posixaccount', 'inetOrgPerson', + 'organizationalPerson', 'nsMemberOf', 'nsAccount', + 'person', 'mepOriginEntry', 'inetuser']), + ('memberOf', entry.dn)) + assert entry.status() + user.delete() + user = UserAccounts(topo.standalone, f'ou=Users,{DEFAULT_SUFFIX}', rdn=None).create_test_user() + entry = Account(topo.standalone, 'cn=test_user_1000,ou=Groups,dc=example,dc=com') + + # Add groupofNames objectClass + user.replace_many( + ('objectclass', ['top', 'account', 'posixaccount', 'inetOrgPerson', + 'organizationalPerson', 'nsMemberOf', 'nsAccount', + 'person', 'mepOriginEntry', 'groupofNames']), + ('memberOf', user.dn)) + assert entry.status() + + # Running ModRDN operation and checking the user private groups mepManagedBy + # attribute was also reset because the modrdn on the origin will do a modrdn + # on checkManagedEntry to match the new rdn value of the origin entry + checkManagedEntry = UserAccounts(topo.standalone, f'ou=Groups,{DEFAULT_SUFFIX}', rdn=None) + check_entry = checkManagedEntry.create(properties={ + 'objectclass': ['top', 'extensibleObject'], + 'uid': 'CheckModRDN', + 'uidNumber': '12', + 'gidNumber': '12', + 'homeDirectory': '/home', + 'sn': 'tmp', + 'cn': 'tmp', + }) + user.replace('mepManagedEntry', check_entry.dn) + user.rename(new_rdn='uid=UserNewRDN', newsuperior='ou=Users,dc=example,dc=com') + assert user.get_attr_val_utf8_l('mepManagedEntry') == f'cn=UserNewRDN,ou=Groups,{DEFAULT_SUFFIX}'.lower() + + # Deleting mepManagedBy attribute and running ModRDN operation to check if it creates a new UPG + user.remove('mepManagedEntry', f'cn=UserNewRDN,ou=Groups,{DEFAULT_SUFFIX}') + user.rename(new_rdn='uid=UserNewRDN1', newsuperior='ou=Users,dc=example,dc=com') + assert user.get_attr_val_utf8('mepManagedEntry') == f'cn=UserNewRDN1,ou=Groups,{DEFAULT_SUFFIX}' + + # Change the RDN of template entry, DSA Unwilling to perform error expected + mep = MEPTemplate(topo.standalone, f'cn=UPG Template,{DEFAULT_SUFFIX}') + with pytest.raises(ldap.UNWILLING_TO_PERFORM): + mep.rename(new_rdn='cn=UPG Template2', newsuperior='dc=example,dc=com') + + # Change the RDN of cn=Users to cn=TestUsers and check UPG are deleted + before = user.get_attr_val_utf8('mepManagedEntry') + user.rename(new_rdn='uid=Anuj', newsuperior='ou=Users,dc=example,dc=com') + assert user.get_attr_val_utf8('mepManagedEntry') != before + + +def test_managed_entry_removal(topo): + """Check that we can't remove managed entry manually + + :id: cf9c5be5-97ef-46fc-b199-8346acf4c296 + :setup: Standalone Instance + :steps: + 1. Enable the plugin + 2. Restart the instance + 3. Add our org units + 4. Set up config entry and template entry for the org units + 5. Add an entry that meets the MEP scope + 6. Check if a managed group entry was created + 7. Try to remove the entry while bound as Admin (non-DM) + 8. Remove the entry while bound as DM + 9. Check that the managing entry can be deleted too + :expectedresults: + 1. Success + 2. Success + 3. Success + 4. Success + 5. Success + 6. Success + 7. Should fail + 8. Success + 9. Success + """ + + inst = topo.standalone + + # Add ACI so we can test that non-DM user can't delete managed entry + domain = Domain(inst, DEFAULT_SUFFIX) + ACI_TARGET = f"(target = \"ldap:///{DEFAULT_SUFFIX}\")" + ACI_TARGETATTR = "(targetattr = *)" + ACI_ALLOW = "(version 3.0; acl \"Admin Access\"; allow (all) " + ACI_SUBJECT = "(userdn = \"ldap:///anyone\");)" + ACI_BODY = ACI_TARGET + ACI_TARGETATTR + ACI_ALLOW + ACI_SUBJECT + domain.add('aci', ACI_BODY) + + # stop the plugin, and start it + plugin = ManagedEntriesPlugin(inst) + plugin.disable() + plugin.enable() + + # Add our org units + ous = OrganizationalUnits(inst, DEFAULT_SUFFIX) + ou_people = ous.create(properties={'ou': 'managed_people'}) + ou_groups = ous.create(properties={'ou': 'managed_groups'}) + + mep_templates = MEPTemplates(inst, DEFAULT_SUFFIX) + mep_template1 = mep_templates.create(properties={ + 'cn': 'MEP template', + 'mepRDNAttr': 'cn', + 'mepStaticAttr': 'objectclass: groupOfNames|objectclass: extensibleObject'.split('|'), + 'mepMappedAttr': 'cn: $cn|uid: $cn|gidNumber: $uidNumber'.split('|') + }) + mep_configs = MEPConfigs(inst) + mep_configs.create(properties={'cn': 'config', + 'originScope': ou_people.dn, + 'originFilter': 'objectclass=posixAccount', + 'managedBase': ou_groups.dn, + 'managedTemplate': mep_template1.dn}) + inst.restart() + + # Add an entry that meets the MEP scope + test_users_m1 = UserAccounts(inst, DEFAULT_SUFFIX, rdn='ou={}'.format(ou_people.rdn)) + managing_entry = test_users_m1.create_test_user(1001) + managing_entry.reset_password(USER_PASSWORD) + user_bound_conn = managing_entry.bind(USER_PASSWORD) + + # Get the managed entry + managed_groups = Groups(inst, ou_groups.dn, rdn=None) + managed_entry = managed_groups.get(managing_entry.rdn) + + # Check that the managed entry was created + assert managed_entry.exists() + + # Try to remove the entry while bound as Admin (non-DM) + managed_groups_user_conn = Groups(user_bound_conn, ou_groups.dn, rdn=None) + managed_entry_user_conn = managed_groups_user_conn.get(managed_entry.rdn) + with pytest.raises(ldap.UNWILLING_TO_PERFORM): + managed_entry_user_conn.delete() + assert managed_entry_user_conn.exists() + + # Remove the entry while bound as DM + managed_entry.delete() + assert not managed_entry.exists() + + # Check that the managing entry can be deleted too + managing_entry.delete() + assert not managing_entry.exists() + + +if __name__ == '__main__': + # Run isolated + # -s for DEBUG mode + CURRENT_FILE = os.path.realpath(__file__) + pytest.main("-s %s" % CURRENT_FILE) diff --git a/dirsrvtests/tests/suites/plugins/memberof_test.py b/dirsrvtests/tests/suites/plugins/memberof_test.py new file mode 100644 index 0000000..40853c2 --- /dev/null +++ b/dirsrvtests/tests/suites/plugins/memberof_test.py @@ -0,0 +1,2831 @@ +# --- BEGIN COPYRIGHT BLOCK --- +# Copyright (C) 2016 Red Hat, Inc. +# All rights reserved. +# +# License: GPL (version 3 or any later version). +# See LICENSE for details. +# --- END COPYRIGHT BLOCK --- +# +import pytest +from lib389.tasks import * +from lib389.utils import * +from lib389.topologies import topology_st +from lib389._constants import PLUGIN_MEMBER_OF, SUFFIX + +pytestmark = pytest.mark.tier1 + +DEBUGGING = os.getenv('DEBUGGING', False) + +logging.getLogger(__name__).setLevel(logging.DEBUG) +log = logging.getLogger(__name__) + +MEMBEROF_PLUGIN_DN = ('cn=' + PLUGIN_MEMBER_OF + ',cn=plugins,cn=config') +USER1_DN = 'uid=user1,' + DEFAULT_SUFFIX +USER2_DN = 'uid=user2,' + DEFAULT_SUFFIX +GROUP_DN = 'cn=group,' + DEFAULT_SUFFIX + +PLUGIN_TYPE = 'nsslapd-pluginType' +PLUGIN_MEMBEROF_GRP_ATTR = 'memberofgroupattr' +PLUGIN_ENABLED = 'nsslapd-pluginEnabled' + +USER_RDN = "user" +USERS_CONTAINER = "ou=people,%s" % SUFFIX + +GROUP_RDN = "group" +GROUPS_CONTAINER = "ou=groups,%s" % SUFFIX + + +def _set_memberofgroupattr_add(topology_st, values): + topology_st.standalone.modify_s(MEMBEROF_PLUGIN_DN, + [(ldap.MOD_ADD, + PLUGIN_MEMBEROF_GRP_ATTR, + ensure_bytes(values))]) + + +def _get_user_rdn(ext): + return ensure_bytes("uid=%s_%s" % (USER_RDN, ext)) + + +def _get_user_dn(ext): + return ensure_bytes("%s,%s" % (ensure_str(_get_user_rdn(ext)), USERS_CONTAINER)) + + +def _get_group_rdn(ext): + return ensure_bytes("cn=%s_%s" % (GROUP_RDN, ext)) + + +def _get_group_dn(ext): + return ensure_bytes("%s,%s" % (ensure_str(_get_group_rdn(ext)), GROUPS_CONTAINER)) + + +def _create_user(topology_st, ext): + user_dn = ensure_str(_get_user_dn(ext)) + topology_st.standalone.add_s(Entry((user_dn, { + 'objectclass': 'top extensibleObject'.split(), + 'uid': ensure_str(_get_user_rdn(ext)) + }))) + log.info("Create user %s" % user_dn) + return ensure_bytes(user_dn) + + +def _delete_user(topology_st, ext): + user_dn = ensure_str(_get_user_dn(ext)) + topology_st.standalone.delete_s(user_dn) + log.info("Delete user %s" % user_dn) + + +def _create_group(topology_st, ext): + group_dn = ensure_str(_get_group_dn(ext)) + topology_st.standalone.add_s(Entry((group_dn, { + 'objectclass': 'top groupOfNames groupOfUniqueNames extensibleObject'.split(), + 'ou': ensure_str(_get_group_rdn(ext)) + }))) + log.info("Create group %s" % group_dn) + return ensure_bytes(group_dn) + + +def _delete_group(topology_st, ext): + group_dn = ensure_str(_get_group_dn(ext)) + topology_st.standalone.delete_s(group_dn) + log.info("Delete group %s" % group_dn) + + +def _check_memberattr(topology_st, entry, memberattr, value): + log.info("Check %s.%s = %s" % (entry, memberattr, value)) + entry = topology_st.standalone.getEntry(ensure_str(entry), ldap.SCOPE_BASE, '(objectclass=*)', [memberattr]) + if not entry.hasAttr(ensure_str(memberattr)): + return False + + found = False + for val in entry.getValues(ensure_str(memberattr)): + log.info("%s: %s" % (memberattr, ensure_str(val))) + if ensure_str(value.lower()) == ensure_str(val.lower()): + found = True + break + return found + + +def _check_memberof(topology_st, member, group): + log.info("Lookup memberof from %s" % member) + entry = topology_st.standalone.getEntry(ensure_str(member), ldap.SCOPE_BASE, '(objectclass=*)', ['memberof']) + if not entry.hasAttr('memberof'): + return False + + found = False + for val in entry.getValues('memberof'): + log.info("memberof: %s" % ensure_str(val)) + if ensure_str(group.lower()) == ensure_str(val.lower()): + found = True + log.info("--> membership verified") + break + return found + + +def test_betxnpostoperation_replace(topology_st): + """Test modify the memberof plugin operation to use the new type + + :id: d222af17-17a6-48a0-8f22-a38306726a91 + :setup: Standalone instance + :steps: + 1. Set plugin type to betxnpostoperation + 2. Check is was changed + :expectedresults: + 1. Success + 2. Success + """ + + topology_st.standalone.modify_s(MEMBEROF_PLUGIN_DN, + [(ldap.MOD_REPLACE, + PLUGIN_TYPE, + b'betxnpostoperation')]) + topology_st.standalone.restart() + ent = topology_st.standalone.getEntry(MEMBEROF_PLUGIN_DN, ldap.SCOPE_BASE, "(objectclass=*)", [PLUGIN_TYPE]) + assert ent.hasAttr(PLUGIN_TYPE) + assert ent.getValue(PLUGIN_TYPE) == b'betxnpostoperation' + + +def test_memberofgroupattr_add(topology_st): + """Check multiple grouping attributes supported + + :id: d222af17-17a6-48a0-8f22-a38306726a92 + :setup: Standalone instance + :steps: + 1. Add memberofgroupattr - 'uniqueMember' + 2. Check we have 'uniqueMember' and 'member' values + :expectedresults: + 1. Success + 2. Success + """ + + _set_memberofgroupattr_add(topology_st, 'uniqueMember') + ent = topology_st.standalone.getEntry(MEMBEROF_PLUGIN_DN, ldap.SCOPE_BASE, "(objectclass=*)", + [PLUGIN_MEMBEROF_GRP_ATTR]) + assert ent.hasAttr(PLUGIN_MEMBEROF_GRP_ATTR) + assert b'member'.lower() in [x.lower() for x in ent.getValues(PLUGIN_MEMBEROF_GRP_ATTR)] + assert b'uniqueMember'.lower() in [x.lower() for x in ent.getValues(PLUGIN_MEMBEROF_GRP_ATTR)] + + +def test_enable(topology_st): + """Check the plug-in is started + + :id: d222af17-17a6-48a0-8f22-a38306726a93 + :setup: Standalone instance + :steps: + 1. Enable the plugin + 2. Restart the instance + :expectedresults: + 1. Success + 2. Server should start and plugin should be on + """ + + log.info("Enable MemberOf plugin") + topology_st.standalone.plugins.enable(name=PLUGIN_MEMBER_OF) + topology_st.standalone.restart() + ent = topology_st.standalone.getEntry(MEMBEROF_PLUGIN_DN, ldap.SCOPE_BASE, "(objectclass=*)", [PLUGIN_ENABLED]) + assert ent.hasAttr(PLUGIN_ENABLED) + assert ent.getValue(PLUGIN_ENABLED).lower() == b'on' + + +def test_member_add(topology_st): + """MemberOf attribute should be successfully added to both the users + + :id: d222af17-17a6-48a0-8f22-a38306726a94 + :setup: Standalone instance + :steps: + 1. Create user and groups + 2. Add the users as members to the groups + 3. Check the membership + :expectedresults: + 1. Success + 2. Success + 3. Success + """ + + memofenh1 = _create_user(topology_st, 'memofenh1') + memofenh2 = _create_user(topology_st, 'memofenh2') + + memofegrp1 = _create_group(topology_st, 'memofegrp1') + memofegrp2 = _create_group(topology_st, 'memofegrp2') + + mods = [(ldap.MOD_ADD, 'member', memofenh1), (ldap.MOD_ADD, 'uniqueMember', memofenh2)] + log.info("Update %s is memberof %s (member)" % (memofenh1, memofegrp1)) + log.info("Update %s is memberof %s (uniqueMember)" % (memofenh2, memofegrp1)) + topology_st.standalone.modify_s(ensure_str(memofegrp1), mods) + + log.info("Update %s is memberof %s (member)" % (memofenh1, memofegrp2)) + log.info("Update %s is memberof %s (uniqueMember)" % (memofenh2, memofegrp2)) + topology_st.standalone.modify_s(ensure_str(memofegrp2), mods) + + # assert enh1 is member of grp1 and grp2 + assert _check_memberof(topology_st, member=memofenh1, group=memofegrp1) + assert _check_memberof(topology_st, member=memofenh1, group=memofegrp2) + + # assert enh2 is member of grp1 and grp2 + assert _check_memberof(topology_st, member=memofenh2, group=memofegrp1) + assert _check_memberof(topology_st, member=memofenh2, group=memofegrp2) + + +def test_member_delete_gr1(topology_st): + """Partial removal of memberofgroupattr: removing member attribute from Group1 + + :id: d222af17-17a6-48a0-8f22-a38306726a95 + :setup: Standalone instance + :steps: + 1. Delete a member: enh1 in grp1 + 2. Check the states of the members were changed accordingly + :expectedresults: + 1. Success + 2. Success + """ + + memofenh1 = _get_user_dn('memofenh1') + memofenh2 = _get_user_dn('memofenh2') + + memofegrp1 = _get_group_dn('memofegrp1') + memofegrp2 = _get_group_dn('memofegrp2') + log.info("Update %s is no longer memberof %s (member)" % (memofenh1, memofegrp1)) + mods = [(ldap.MOD_DELETE, 'member', memofenh1)] + topology_st.standalone.modify_s(ensure_str(memofegrp1), mods) + + # assert enh1 is NOT member of grp1 and is member of grp2 + assert not _check_memberof(topology_st, member=memofenh1, group=memofegrp1) + assert _check_memberof(topology_st, member=memofenh1, group=memofegrp2) + + # assert enh2 is member of grp1 and is member of grp2 + assert _check_memberof(topology_st, member=memofenh2, group=memofegrp1) + assert _check_memberof(topology_st, member=memofenh2, group=memofegrp2) + + +def test_member_delete_gr2(topology_st): + """Partial removal of memberofgroupattr: removing uniqueMember attribute from Group2 + + :id: d222af17-17a6-48a0-8f22-a38306726a96 + :setup: Standalone instance + :steps: + 1. Delete a uniqueMember: enh2 in grp2 + 2. Check the states of the members were changed accordingly + :expectedresults: + 1. Success + 2. Success + """ + + memofenh1 = _get_user_dn('memofenh1') + memofenh2 = _get_user_dn('memofenh2') + + memofegrp1 = _get_group_dn('memofegrp1') + memofegrp2 = _get_group_dn('memofegrp2') + + log.info("Update %s is no longer memberof %s (uniqueMember)" % (memofenh1, memofegrp1)) + mods = [(ldap.MOD_DELETE, 'uniqueMember', memofenh2)] + topology_st.standalone.modify_s(ensure_str(memofegrp2), mods) + + # assert enh1 is NOT member of grp1 and is member of grp2 + assert not _check_memberof(topology_st, member=memofenh1, group=memofegrp1) + assert _check_memberof(topology_st, member=memofenh1, group=memofegrp2) + + # assert enh2 is member of grp1 and is NOT member of grp2 + assert _check_memberof(topology_st, member=memofenh2, group=memofegrp1) + assert not _check_memberof(topology_st, member=memofenh2, group=memofegrp2) + + +def test_member_delete_all(topology_st): + """Complete removal of memberofgroupattr + + :id: d222af17-17a6-48a0-8f22-a38306726a97 + :setup: Standalone instance + :steps: + 1. Delete the rest of the members + 2. Check the states of the members were changed accordingly + :expectedresults: + 1. Success + 2. Success + """ + + memofenh1 = _get_user_dn('memofenh1') + memofenh2 = _get_user_dn('memofenh2') + + memofegrp1 = _get_group_dn('memofegrp1') + memofegrp2 = _get_group_dn('memofegrp2') + + log.info("Update %s is no longer memberof %s (uniqueMember)" % (memofenh2, memofegrp1)) + mods = [(ldap.MOD_DELETE, 'uniqueMember', memofenh2)] + topology_st.standalone.modify_s(ensure_str(memofegrp1), mods) + + log.info("Update %s is no longer memberof %s (member)" % (memofenh1, memofegrp2)) + mods = [(ldap.MOD_DELETE, 'member', memofenh1)] + topology_st.standalone.modify_s(ensure_str(memofegrp2), mods) + + # assert enh1 is NOT member of grp1 and is member of grp2 + assert not _check_memberof(topology_st, member=memofenh1, group=memofegrp1) + assert not _check_memberof(topology_st, member=memofenh1, group=memofegrp2) + + # assert enh2 is member of grp1 and is NOT member of grp2 + assert not _check_memberof(topology_st, member=memofenh2, group=memofegrp1) + assert not _check_memberof(topology_st, member=memofenh2, group=memofegrp2) + + +def test_member_after_restart(topology_st): + """MemberOf attribute should be present on both the users + + :id: d222af17-17a6-48a0-8f22-a38306726a98 + :setup: Standalone instance + :steps: + 1. Add a couple of members to the groups + 2. Restart the instance + 3. Check the states of the members were changed accordingly + :expectedresults: + 1. Success + 2. Success + 3. Success + """ + + memofenh1 = _get_user_dn('memofenh1') + memofenh2 = _get_user_dn('memofenh2') + + memofegrp1 = _get_group_dn('memofegrp1') + memofegrp2 = _get_group_dn('memofegrp2') + + mods = [(ldap.MOD_ADD, 'member', memofenh1)] + log.info("Update %s is memberof %s (member)" % (memofenh1, memofegrp1)) + topology_st.standalone.modify_s(ensure_str(memofegrp1), mods) + + mods = [(ldap.MOD_ADD, 'uniqueMember', memofenh2)] + log.info("Update %s is memberof %s (uniqueMember)" % (memofenh2, memofegrp2)) + topology_st.standalone.modify_s(ensure_str(memofegrp2), mods) + + # assert enh1 is member of grp1 and is NOT member of grp2 + assert _check_memberof(topology_st, member=memofenh1, group=memofegrp1) + assert not _check_memberof(topology_st, member=memofenh1, group=memofegrp2) + + # assert enh2 is NOT member of grp1 and is member of grp2 + assert not _check_memberof(topology_st, member=memofenh2, group=memofegrp1) + assert _check_memberof(topology_st, member=memofenh2, group=memofegrp2) + + log.info("Remove uniqueMember as a memberofgrpattr") + topology_st.standalone.modify_s(MEMBEROF_PLUGIN_DN, + [(ldap.MOD_DELETE, + PLUGIN_MEMBEROF_GRP_ATTR, + [b'uniqueMember'])]) + topology_st.standalone.restart() + + log.info("Assert that this change of configuration did change the already set values") + # assert enh1 is member of grp1 and is NOT member of grp2 + assert _check_memberof(topology_st, member=memofenh1, group=memofegrp1) + assert not _check_memberof(topology_st, member=memofenh1, group=memofegrp2) + + # assert enh2 is NOT member of grp1 and is member of grp2 + assert not _check_memberof(topology_st, member=memofenh2, group=memofegrp1) + assert _check_memberof(topology_st, member=memofenh2, group=memofegrp2) + + _set_memberofgroupattr_add(topology_st, 'uniqueMember') + topology_st.standalone.restart() + + +def test_memberofgroupattr_uid(topology_st): + """MemberOf attribute should not be added to the user since memberuid is not a DN syntax attribute + + :id: d222af17-17a6-48a0-8f22-a38306726a99 + :setup: Standalone instance + :steps: + 1. Try to add memberUid to the group + :expectedresults: + 1. It should fail with Unwilling to perform error + """ + + try: + _set_memberofgroupattr_add(topology_st, 'memberUid') + log.error("Setting 'memberUid' as memberofgroupattr should be rejected") + assert False + except ldap.UNWILLING_TO_PERFORM: + log.error("Setting 'memberUid' as memberofgroupattr is rejected (expected)") + assert True + + +def test_member_add_duplicate_usr1(topology_st): + """Duplicate member attribute to groups + + :id: d222af17-17a6-48a0-8f22-a38306726a10 + :setup: Standalone instance + :steps: + 1. Try to add a member: enh1 which already exists + :expectedresults: + 1. It should fail with Type of value exists error + """ + + memofenh1 = _get_user_dn('memofenh1') + memofegrp1 = _get_group_dn('memofegrp1') + + # assert enh1 is member of grp1 + assert _check_memberof(topology_st, member=memofenh1, group=memofegrp1) + + mods = [(ldap.MOD_ADD, 'member', memofenh1)] + log.info("Try %s is memberof %s (member)" % (memofenh1, memofegrp1)) + try: + topology_st.standalone.modify_s(ensure_str(memofegrp1), mods) + log.error( + "Should not be allowed to add %s member of %s (because it was already member)" % (memofenh1, memofegrp1)) + assert False + except ldap.TYPE_OR_VALUE_EXISTS: + log.error("%s already member of %s --> fail (expected)" % (memofenh1, memofegrp1)) + assert True + + +def test_member_add_duplicate_usr2(topology_st): + """Duplicate uniqueMember attributes to groups + + :id: d222af17-17a6-48a0-8f22-a38306726a11 + :setup: Standalone instance + :steps: + 1. Try to add a uniqueMember: enh2 which already exists + :expectedresults: + 1. It should fail with Type of value exists error + """ + + memofenh1 = _get_user_dn('memofenh1') + memofenh2 = _get_user_dn('memofenh2') + + memofegrp1 = _get_group_dn('memofegrp1') + memofegrp2 = _get_group_dn('memofegrp2') + + log.info("Check initial status") + # assert enh1 is member of grp1 and is NOT member of grp2 + assert _check_memberof(topology_st, member=memofenh1, group=memofegrp1) + assert not _check_memberof(topology_st, member=memofenh1, group=memofegrp2) + + # assert enh2 is NOT member of grp1 and is member of grp2 + assert not _check_memberof(topology_st, member=memofenh2, group=memofegrp1) + assert _check_memberof(topology_st, member=memofenh2, group=memofegrp2) + + mods = [(ldap.MOD_ADD, 'uniqueMember', memofenh2)] + log.info("Try %s is memberof %s (member)" % (memofenh2, memofegrp2)) + try: + topology_st.standalone.modify_s(ensure_str(memofegrp2), mods) + log.error( + "Should not be allowed to add %s member of %s (because it was already member)" % (memofenh2, memofegrp2)) + assert False + except ldap.TYPE_OR_VALUE_EXISTS: + log.error("%s already member of %s --> fail (expected)" % (memofenh2, memofegrp2)) + assert True + + log.info("Check final status") + # assert enh1 is member of grp1 and is NOT member of grp2 + assert _check_memberof(topology_st, member=memofenh1, group=memofegrp1) + assert not _check_memberof(topology_st, member=memofenh1, group=memofegrp2) + + # assert enh2 is NOT member of grp1 and is member of grp2 + assert not _check_memberof(topology_st, member=memofenh2, group=memofegrp1) + assert _check_memberof(topology_st, member=memofenh2, group=memofegrp2) + + +#def test_memberof_MultiGrpAttr_012(topology_st): +# """ +# MemberURL attritbute should reflect the modrdn changes in the group. +# +# This test has been covered in MODRDN test suite +# +# At the beginning: +# memofenh1 is memberof memofegrp1 +# memofenh2 is memberof memofegrp2 +# +# At the end +# memofenh1 is memberof memofegrp1 +# memofenh2 is memberof memofegrp2 +# """ +# pass + + +#def test_memberof_MultiGrpAttr_013(topology_st): +# """ +# MemberURL attritbute should reflect the modrdn changes in the group. +# +# This test has been covered in MODRDN test suite +# +# At the beginning: +# memofenh1 is memberof memofegrp1 +# memofenh2 is memberof memofegrp2 +# +# At the end +# memofenh1 is memberof memofegrp1 +# memofenh2 is memberof memofegrp2 +# """ +# pass + + +def test_member_uniquemember_same_user(topology_st): + """Check the situation when both member and uniqueMember + pointing to the same user + + :id: d222af17-17a6-48a0-8f22-a38306726a13 + :setup: Standalone instance, grp3, + enh1 is member of + - grp1 (member) + - not grp2 + enh2 is member of + - not grp1 + - grp2 (uniquemember) + :steps: + 1. Add member: enh1 and uniqueMember: enh1 to grp3 + 2. Assert enh1 is member of + - grp1 (member) + - not grp2 + - grp3 (member uniquemember) + 3. Delete member: enh1 from grp3 + 4. Add member: enh2 to grp3 + 5. Assert enh1 is member of + - grp1 (member) + - not grp2 + - grp3 (uniquemember) + 6. Assert enh2 is member of + - not grp1 + - grp2 (uniquemember) + - grp3 (member) + :expectedresults: + 1. Success + 2. Success + 3. Success + 4. Success + 5. Success + 6. Success + """ + + memofenh1 = _get_user_dn('memofenh1') + memofenh2 = _get_user_dn('memofenh2') + + memofegrp1 = _get_group_dn('memofegrp1') + memofegrp2 = _get_group_dn('memofegrp2') + + log.info("Check initial status") + # assert enh1 is member of grp1 and is NOT member of grp2 + assert _check_memberof(topology_st, member=memofenh1, group=memofegrp1) + assert not _check_memberof(topology_st, member=memofenh1, group=memofegrp2) + + # assert enh2 is NOT member of grp1 and is member of grp2 + assert not _check_memberof(topology_st, member=memofenh2, group=memofegrp1) + assert _check_memberof(topology_st, member=memofenh2, group=memofegrp2) + + memofegrp3 = _create_group(topology_st, 'memofegrp3') + + mods = [(ldap.MOD_ADD, 'member', memofenh1), (ldap.MOD_ADD, 'uniqueMember', memofenh1)] + log.info("Update %s is memberof %s (member)" % (memofenh1, memofegrp3)) + log.info("Update %s is memberof %s (uniqueMember)" % (memofenh1, memofegrp3)) + topology_st.standalone.modify_s(ensure_str(memofegrp3), mods) + + # assert enh1 is member of + # - grp1 (member) + # - not grp2 + # - grp3 (member uniquemember) + assert _check_memberof(topology_st, member=memofenh1, group=memofegrp1) + assert not _check_memberof(topology_st, member=memofenh1, group=memofegrp2) + assert _check_memberof(topology_st, member=memofenh1, group=memofegrp3) + + mods = [(ldap.MOD_DELETE, 'member', memofenh1)] + log.info("Update %s is not memberof %s (member)" % (memofenh1, memofegrp3)) + topology_st.standalone.modify_s(ensure_str(memofegrp3), mods) + + mods = [(ldap.MOD_ADD, 'member', memofenh2)] + log.info("Update %s is memberof %s (member)" % (memofenh2, memofegrp3)) + topology_st.standalone.modify_s(ensure_str(memofegrp3), mods) + + # assert enh1 is member of + # - grp1 (member) + # - not grp2 + # - grp3 (uniquemember) + assert _check_memberof(topology_st, member=memofenh1, group=memofegrp1) + assert not _check_memberof(topology_st, member=memofenh1, group=memofegrp2) + assert _check_memberof(topology_st, member=memofenh1, group=memofegrp3) + + # assert enh2 is member of + # - not grp1 + # - grp2 (uniquemember) + # - grp3 (member) + assert not _check_memberof(topology_st, member=memofenh2, group=memofegrp1) + assert _check_memberof(topology_st, member=memofenh2, group=memofegrp2) + assert _check_memberof(topology_st, member=memofenh2, group=memofegrp3) + + ent = topology_st.standalone.getEntry(ensure_str(memofegrp3), ldap.SCOPE_BASE, "(objectclass=*)", ['member', 'uniqueMember']) + assert ent.hasAttr('member') + assert ensure_bytes(memofenh1) not in ent.getValues('member') + assert ensure_bytes(memofenh2) in ent.getValues('member') + assert ent.hasAttr('uniqueMember') + assert ensure_bytes(memofenh1) in ent.getValues('uniqueMember') + assert ensure_bytes(memofenh2) not in ent.getValues('uniqueMember') + + log.info("Checking final status") + # assert enh1 is member of + # - grp1 (member) + # - not grp2 + # - grp3 (uniquemember) + assert _check_memberof(topology_st, member=memofenh1, group=memofegrp1) + assert not _check_memberof(topology_st, member=memofenh1, group=memofegrp2) + assert _check_memberof(topology_st, member=memofenh1, group=memofegrp3) + + # assert enh2 is member of + # - not grp1 + # - grp2 (uniquemember) + # - grp3 (member) + assert not _check_memberof(topology_st, member=memofenh2, group=memofegrp1) + assert _check_memberof(topology_st, member=memofenh2, group=memofegrp2) + assert _check_memberof(topology_st, member=memofenh2, group=memofegrp3) + + +def test_member_not_exists(topology_st): + """Check the situation when we add non-existing users to member attribute + + :id: d222af17-17a6-48a0-8f22-a38306726a14 + :setup: Standalone instance, grp015, + enh1 is member of + - grp1 (member) + - not grp2 + - grp3 (uniquemember) + enh2 is member of + - not grp1 + - grp2 (uniquemember) + - grp3 (member) + :steps: + 1. Add member: dummy1 and uniqueMember: dummy2 to grp015 + 2. Assert enh1 is member of + - grp1 (member) + - not grp2 + - grp3 (uniquemember) + - not grp015 + 3. Assert enh2 is member of + - not grp1 + - grp2 (uniquemember) + - grp3 (member) + - not grp015 + :expectedresults: + 1. Success + 2. Success + 3. Success + """ + + memofenh1 = _get_user_dn('memofenh1') + memofenh2 = _get_user_dn('memofenh2') + dummy1 = _get_user_dn('dummy1') + dummy2 = _get_user_dn('dummy2') + + memofegrp1 = _get_group_dn('memofegrp1') + memofegrp2 = _get_group_dn('memofegrp2') + memofegrp3 = _get_group_dn('memofegrp3') + + log.info("Checking Initial status") + # assert enh1 is member of + # - grp1 (member) + # - not grp2 + # - grp3 (uniquemember) + assert _check_memberof(topology_st, member=memofenh1, group=memofegrp1) + assert not _check_memberof(topology_st, member=memofenh1, group=memofegrp2) + assert _check_memberof(topology_st, member=memofenh1, group=memofegrp3) + + # assert enh2 is member of + # - not grp1 + # - grp2 (uniquemember) + # - grp3 (member) + assert not _check_memberof(topology_st, member=memofenh2, group=memofegrp1) + assert _check_memberof(topology_st, member=memofenh2, group=memofegrp2) + assert _check_memberof(topology_st, member=memofenh2, group=memofegrp3) + + memofegrp015 = _create_group(topology_st, 'memofegrp015') + + mods = [(ldap.MOD_ADD, 'member', dummy1), (ldap.MOD_ADD, 'uniqueMember', dummy2)] + log.info("Update %s is memberof %s (member)" % (dummy1, memofegrp015)) + log.info("Update %s is memberof %s (uniqueMember)" % (dummy2, memofegrp015)) + topology_st.standalone.modify_s(ensure_str(memofegrp015), mods) + + ent = topology_st.standalone.getEntry(ensure_str(memofegrp015), ldap.SCOPE_BASE, "(objectclass=*)", ['member', 'uniqueMember']) + assert ent.hasAttr('member') + assert ensure_bytes(dummy1) in ent.getValues('member') + assert ensure_bytes(dummy2) not in ent.getValues('member') + assert ent.hasAttr('uniqueMember') + assert ensure_bytes(dummy1) not in ent.getValues('uniqueMember') + assert ensure_bytes(dummy2) in ent.getValues('uniqueMember') + + # assert enh1 is member of + # - grp1 (member) + # - not grp2 + # - grp3 (uniquemember) + assert _check_memberof(topology_st, member=memofenh1, group=memofegrp1) + assert not _check_memberof(topology_st, member=memofenh1, group=memofegrp2) + assert _check_memberof(topology_st, member=memofenh1, group=memofegrp3) + assert not _check_memberof(topology_st, member=memofenh1, group=memofegrp015) + + # assert enh2 is member of + # - not grp1 + # - grp2 (uniquemember) + # - grp3 (member) + # + assert not _check_memberof(topology_st, member=memofenh2, group=memofegrp1) + assert _check_memberof(topology_st, member=memofenh2, group=memofegrp2) + assert _check_memberof(topology_st, member=memofenh2, group=memofegrp3) + assert not _check_memberof(topology_st, member=memofenh2, group=memofegrp015) + + +def test_member_not_exists_complex(topology_st): + """Check the situation when we modify non-existing users member attribute + + :id: d222af17-17a6-48a0-8f22-a38306726a15 + :setup: Standalone instance, + enh1 is member of + - grp1 (member) + - not grp2 + - grp3 (uniquemember) + - not grp015 + enh2 is member of + - not grp1 + - grp2 (uniquemember) + - grp3 (member) + - not grp015 + :steps: + 1. Add member: enh1 and uniqueMember: enh1 to grp016 + 2. Assert enh1 is member of + - grp1 (member) + - not grp2 + - grp3 (uniquemember) + - not grp15 + - grp16 (member uniquemember) + 3. Assert enh2 is member of + - not grp1 + - grp2 (uniquemember) + - grp3 (member) + - not grp15 + - not grp16 + 4. Add member: dummy1 and uniqueMember: dummy2 to grp016 + 5. Assert enh1 is member of + - grp1 (member) + - not grp2 + - grp3 (uniquemember) + - not grp15 + - grp16 (member uniquemember) + 6. Assert enh2 is member of + - not grp1 + - grp2 (uniquemember) + - grp3 (member) + - not grp15 + - not grp16 + :expectedresults: + 1. Success + 2. Success + 3. Success + 4. Success + 5. Success + 6. Success + """ + + memofenh1 = _get_user_dn('memofenh1') + memofenh2 = _get_user_dn('memofenh2') + dummy1 = _get_user_dn('dummy1') + + memofegrp1 = _get_group_dn('memofegrp1') + memofegrp2 = _get_group_dn('memofegrp2') + memofegrp3 = _get_group_dn('memofegrp3') + memofegrp015 = _get_group_dn('memofegrp015') + + # assert enh1 is member of + # - grp1 (member) + # - not grp2 + # - grp3 (uniquemember) + # - not grp15 + assert _check_memberof(topology_st, member=memofenh1, group=memofegrp1) + assert not _check_memberof(topology_st, member=memofenh1, group=memofegrp2) + assert _check_memberof(topology_st, member=memofenh1, group=memofegrp3) + assert not _check_memberof(topology_st, member=memofenh1, group=memofegrp015) + + # assert enh2 is member of + # - not grp1 + # - grp2 (uniquemember) + # - grp3 (member) + # - not grp15 + assert not _check_memberof(topology_st, member=memofenh2, group=memofegrp1) + assert _check_memberof(topology_st, member=memofenh2, group=memofegrp2) + assert _check_memberof(topology_st, member=memofenh2, group=memofegrp3) + assert not _check_memberof(topology_st, member=memofenh2, group=memofegrp015) + + memofegrp016 = _create_group(topology_st, 'memofegrp016') + + mods = [(ldap.MOD_ADD, 'member', memofenh1), (ldap.MOD_ADD, 'uniqueMember', memofenh1)] + log.info("Update %s is memberof %s (member)" % (memofenh1, memofegrp016)) + log.info("Update %s is memberof %s (uniqueMember)" % (memofenh1, memofegrp016)) + topology_st.standalone.modify_s(ensure_str(memofegrp016), mods) + + # assert enh1 is member of + # - grp1 (member) + # - not grp2 + # - grp3 (uniquemember) + # - not grp15 + # - grp16 (member uniquemember) + assert _check_memberof(topology_st, member=memofenh1, group=memofegrp1) + assert not _check_memberof(topology_st, member=memofenh1, group=memofegrp2) + assert _check_memberof(topology_st, member=memofenh1, group=memofegrp3) + assert not _check_memberof(topology_st, member=memofenh1, group=memofegrp015) + assert _check_memberof(topology_st, member=memofenh1, group=memofegrp016) + + # assert enh2 is member of + # - not grp1 + # - grp2 (uniquemember) + # - grp3 (member) + # - not grp15 + # - not grp16 + assert not _check_memberof(topology_st, member=memofenh2, group=memofegrp1) + assert _check_memberof(topology_st, member=memofenh2, group=memofegrp2) + assert _check_memberof(topology_st, member=memofenh2, group=memofegrp3) + assert not _check_memberof(topology_st, member=memofenh2, group=memofegrp015) + assert not _check_memberof(topology_st, member=memofenh2, group=memofegrp016) + + mods = [(ldap.MOD_ADD, 'member', dummy1), ] + log.info("Update %s is memberof %s (member)" % (dummy1, memofegrp016)) + topology_st.standalone.modify_s(ensure_str(memofegrp016), mods) + + ent = topology_st.standalone.getEntry(ensure_str(memofegrp016), ldap.SCOPE_BASE, "(objectclass=*)", ['member', 'uniqueMember']) + assert ent.hasAttr('member') + assert ensure_bytes(dummy1) in ent.getValues('member') + assert ent.hasAttr('uniqueMember') + assert ensure_bytes(dummy1) not in ent.getValues('uniqueMember') + + mods = [(ldap.MOD_ADD, 'uniqueMember', dummy1), ] + log.info("Update %s is memberof %s (uniqueMember)" % (dummy1, memofegrp016)) + topology_st.standalone.modify_s(ensure_str(memofegrp016), mods) + + ent = topology_st.standalone.getEntry(ensure_str(memofegrp016), ldap.SCOPE_BASE, "(objectclass=*)", ['member', 'uniqueMember']) + assert ent.hasAttr('member') + assert ensure_bytes(dummy1) in ent.getValues('member') + assert ent.hasAttr('uniqueMember') + assert ensure_bytes(dummy1) in ent.getValues('uniqueMember') + + # assert enh1 is member of + # - grp1 (member) + # - not grp2 + # - grp3 (uniquemember) + # - not grp15 + # - grp16 (member uniquemember) + assert _check_memberof(topology_st, member=memofenh1, group=memofegrp1) + assert not _check_memberof(topology_st, member=memofenh1, group=memofegrp2) + assert _check_memberof(topology_st, member=memofenh1, group=memofegrp3) + assert not _check_memberof(topology_st, member=memofenh1, group=memofegrp015) + assert _check_memberof(topology_st, member=memofenh1, group=memofegrp016) + + # assert enh2 is member of + # - not grp1 + # - grp2 (uniquemember) + # - grp3 (member) + # - not grp15 + # - not grp16 + assert not _check_memberof(topology_st, member=memofenh2, group=memofegrp1) + assert _check_memberof(topology_st, member=memofenh2, group=memofegrp2) + assert _check_memberof(topology_st, member=memofenh2, group=memofegrp3) + assert not _check_memberof(topology_st, member=memofenh2, group=memofegrp015) + assert not _check_memberof(topology_st, member=memofenh2, group=memofegrp016) + + +def test_complex_group_scenario_1(topology_st): + """Check the situation when user1 and user2 are memberof grp017 + user2 is member of grp017 but not with a memberof attribute (memberUid) + + :id: d222af17-17a6-48a0-8f22-a38306726a16 + :setup: Standalone instance, grp017, + enh1 is member of + - grp1 (member) + - not grp2 + - grp3 (uniquemember) + - not grp015 + - grp016 (member uniquemember) + enh2 is member of + - not grp1 + - grp2 (uniquemember) + - grp3 (member) + - not grp015 + - not grp016 + :steps: + 1. Create user1 as grp17 (member) + 2. Create user2 as grp17 (uniqueMember) + 3. Create user3 as grp17 (memberuid) (not memberof attribute) + 4. Assert enh1 is member of + - grp1 (member) + - not grp2 + - grp3 (uniquemember) + - not grp15 + - grp16 (member uniquemember) + - not grp17 + 5. Assert enh2 is member of + - not grp1 + - grp2 (uniquemember) + - grp3 (member) + - not grp15 + - not grp16 + - not grp17 + 6. Assert user1 is member of + - not grp1 + - not grp2 + - not grp3 + - not grp15 + - not grp16 + - grp17 (member) + 7. Assert user2 is member of + - not grp1 + - not grp2 + - not grp3 + - not grp15 + - not grp16 + - grp17 (uniqueMember) + 8. Assert user3 is member of + - not grp1 + - not grp2 + - not grp3 + - not grp15 + - not grp16 + - NOT grp17 (memberuid) + :expectedresults: + 1. Success + 2. Success + 3. Success + 4. Success + 5. Success + 6. Success + 7. Success + 8. Success + """ + + memofenh1 = _get_user_dn('memofenh1') + memofenh2 = _get_user_dn('memofenh2') + + memofegrp1 = _get_group_dn('memofegrp1') + memofegrp2 = _get_group_dn('memofegrp2') + memofegrp3 = _get_group_dn('memofegrp3') + memofegrp015 = _get_group_dn('memofegrp015') + memofegrp016 = _get_group_dn('memofegrp016') + + # assert enh1 is member of + # - grp1 (member) + # - not grp2 + # - grp3 (uniquemember) + # - not grp15 + # - grp16 (member uniquemember) + assert _check_memberof(topology_st, member=memofenh1, group=memofegrp1) + assert not _check_memberof(topology_st, member=memofenh1, group=memofegrp2) + assert _check_memberof(topology_st, member=memofenh1, group=memofegrp3) + assert not _check_memberof(topology_st, member=memofenh1, group=memofegrp015) + assert _check_memberof(topology_st, member=memofenh1, group=memofegrp016) + + # assert enh2 is member of + # - not grp1 + # - grp2 (uniquemember) + # - grp3 (member) + # - not grp15 + # - not grp16 + assert not _check_memberof(topology_st, member=memofenh2, group=memofegrp1) + assert _check_memberof(topology_st, member=memofenh2, group=memofegrp2) + assert _check_memberof(topology_st, member=memofenh2, group=memofegrp3) + assert not _check_memberof(topology_st, member=memofenh2, group=memofegrp015) + assert not _check_memberof(topology_st, member=memofenh2, group=memofegrp016) + + # + # create user1 + # - not grp1 + # - not grp2 + # - not grp3 + # - not grp15 + # - not grp16 + # - grp17 (member) + # + # create user2 + # - not grp1 + # - not grp2 + # - not grp3 + # - not grp15 + # - not grp16 + # - grp17 (uniqueMember) + # + # create user3 + # - not grp1 + # - not grp2 + # - not grp3 + # - not grp15 + # - not grp16 + # - grp17 (memberuid) (not memberof attribute) + memofuser1 = _create_user(topology_st, 'memofuser1') + memofuser2 = _create_user(topology_st, 'memofuser2') + memofuser3 = _create_user(topology_st, 'memofuser3') + memofegrp017 = _create_group(topology_st, 'memofegrp017') + + mods = [(ldap.MOD_ADD, 'member', memofuser1), (ldap.MOD_ADD, 'uniqueMember', memofuser2), + (ldap.MOD_ADD, 'memberuid', memofuser3)] + log.info("Update %s is memberof %s (member)" % (memofuser1, memofegrp017)) + log.info("Update %s is memberof %s (uniqueMember)" % (memofuser2, memofegrp017)) + log.info("Update %s is memberof %s (memberuid)" % (memofuser3, memofegrp017)) + topology_st.standalone.modify_s(ensure_str(memofegrp017), mods) + + # assert enh1 is member of + # - grp1 (member) + # - not grp2 + # - grp3 (uniquemember) + # - not grp15 + # - grp16 (member uniquemember) + # - not grp17 + assert _check_memberof(topology_st, member=memofenh1, group=memofegrp1) + assert not _check_memberof(topology_st, member=memofenh1, group=memofegrp2) + assert _check_memberof(topology_st, member=memofenh1, group=memofegrp3) + assert not _check_memberof(topology_st, member=memofenh1, group=memofegrp015) + assert _check_memberof(topology_st, member=memofenh1, group=memofegrp016) + assert not _check_memberof(topology_st, member=memofenh2, group=memofegrp017) + + # assert enh2 is member of + # - not grp1 + # - grp2 (uniquemember) + # - grp3 (member) + # - not grp15 + # - not grp16 + # - not grp17 + assert not _check_memberof(topology_st, member=memofenh2, group=memofegrp1) + assert _check_memberof(topology_st, member=memofenh2, group=memofegrp2) + assert _check_memberof(topology_st, member=memofenh2, group=memofegrp3) + assert not _check_memberof(topology_st, member=memofenh2, group=memofegrp015) + assert not _check_memberof(topology_st, member=memofenh2, group=memofegrp016) + assert not _check_memberof(topology_st, member=memofenh2, group=memofegrp017) + + # assert user1 is member of + # - not grp1 + # - not grp2 + # - not grp3 + # - not grp15 + # - not grp16 + # - grp17 (member) + assert not _check_memberof(topology_st, member=memofuser1, group=memofegrp1) + assert not _check_memberof(topology_st, member=memofuser1, group=memofegrp2) + assert not _check_memberof(topology_st, member=memofuser1, group=memofegrp3) + assert not _check_memberof(topology_st, member=memofuser1, group=memofegrp015) + assert not _check_memberof(topology_st, member=memofuser1, group=memofegrp016) + assert _check_memberof(topology_st, member=memofuser1, group=memofegrp017) + + # assert user2 is member of + # - not grp1 + # - not grp2 + # - not grp3 + # - not grp15 + # - not grp16 + # - grp17 (uniqueMember) + assert not _check_memberof(topology_st, member=memofuser2, group=memofegrp1) + assert not _check_memberof(topology_st, member=memofuser2, group=memofegrp2) + assert not _check_memberof(topology_st, member=memofuser2, group=memofegrp3) + assert not _check_memberof(topology_st, member=memofuser2, group=memofegrp015) + assert not _check_memberof(topology_st, member=memofuser2, group=memofegrp016) + assert _check_memberof(topology_st, member=memofuser2, group=memofegrp017) + + # assert user3 is member of + # - not grp1 + # - not grp2 + # - not grp3 + # - not grp15 + # - not grp16 + # - NOT grp17 (memberuid) + assert not _check_memberof(topology_st, member=memofuser3, group=memofegrp1) + assert not _check_memberof(topology_st, member=memofuser3, group=memofegrp2) + assert not _check_memberof(topology_st, member=memofuser3, group=memofegrp3) + assert not _check_memberof(topology_st, member=memofuser3, group=memofegrp015) + assert not _check_memberof(topology_st, member=memofuser3, group=memofegrp016) + assert not _check_memberof(topology_st, member=memofuser3, group=memofegrp017) + + +def test_complex_group_scenario_2(topology_st): + """Check the situation when user1 and user2 are memberof grp018 + user2 is member of grp018 but not with a memberof attribute (memberUid) + + :id: d222af17-17a6-48a0-8f22-a38306726a17 + :setup: Standalone instance, grp018, + enh1 is member of + - grp1 (member) + - not grp2 + - grp3 (uniquemember) + - not grp015 + - grp016 (member uniquemember) + - not grp17 + enh2 is member of + - not grp1 + - grp2 (uniquemember) + - grp3 (member) + - not grp015 + - not grp016 + - not grp017 + user1 is member of + - not grp1 + - not grp2 + - not grp3 + - not grp015 + - not grp016 + - grp017 (member) + user2 is member of + - not grp1 + - not grp2 + - not grp3 + - not grp015 + - not grp016 + - grp017 (uniquemember) + user3 is member of + - not grp1 + - not grp2 + - not grp3 + - not grp015 + - not grp016 + - not grp017 (memberuid) + :steps: + 1. Add user1 as a member of grp18 (member, uniquemember) + 2. Assert user1 is member of + - not grp1 + - not grp2 + - not grp3 + - not grp15 + - not grp16 + - grp17 (member) + - grp18 (member, uniquemember) + 3. Delete user1 member/uniquemember attributes from grp018 + 4. Assert user1 is member of + - not grp1 + - not grp2 + - not grp3 + - not grp15 + - not grp16 + - grp17 (member) + - NOT grp18 (memberUid) + 5. Delete user1, user2, user3, grp17 entries + 6. Assert enh1 is member of + - grp1 (member) + - not grp2 + - grp3 (uniquemember) + - not grp15 + - grp16 (member uniquemember) + - not grp018 + 7. Assert enh2 is member of + - not grp1 + - grp2 (uniquemember) + - grp3 (member) + - not grp15 + - not grp16 + - not grp018 + :expectedresults: + 1. Success + 2. Success + 3. Success + 4. Success + 5. Success + 6. Success + 7. Success + """ + + memofenh1 = _get_user_dn('memofenh1') + memofenh2 = _get_user_dn('memofenh2') + memofuser1 = _get_user_dn('memofuser1') + memofuser2 = _get_user_dn('memofuser2') + memofuser3 = _get_user_dn('memofuser3') + + memofegrp1 = _get_group_dn('memofegrp1') + memofegrp2 = _get_group_dn('memofegrp2') + memofegrp3 = _get_group_dn('memofegrp3') + memofegrp015 = _get_group_dn('memofegrp015') + memofegrp016 = _get_group_dn('memofegrp016') + memofegrp017 = _get_group_dn('memofegrp017') + + # assert enh1 is member of + # - grp1 (member) + # - not grp2 + # - grp3 (uniquemember) + # - not grp15 + # - grp16 (member uniquemember) + # - not grp17 + assert _check_memberof(topology_st, member=memofenh1, group=memofegrp1) + assert not _check_memberof(topology_st, member=memofenh1, group=memofegrp2) + assert _check_memberof(topology_st, member=memofenh1, group=memofegrp3) + assert not _check_memberof(topology_st, member=memofenh1, group=memofegrp015) + assert _check_memberof(topology_st, member=memofenh1, group=memofegrp016) + assert not _check_memberof(topology_st, member=memofenh2, group=memofegrp017) + + # assert enh2 is member of + # - not grp1 + # - grp2 (uniquemember) + # - grp3 (member) + # - not grp15 + # - not grp16 + # - not grp17 + assert not _check_memberof(topology_st, member=memofenh2, group=memofegrp1) + assert _check_memberof(topology_st, member=memofenh2, group=memofegrp2) + assert _check_memberof(topology_st, member=memofenh2, group=memofegrp3) + assert not _check_memberof(topology_st, member=memofenh2, group=memofegrp015) + assert not _check_memberof(topology_st, member=memofenh2, group=memofegrp016) + assert not _check_memberof(topology_st, member=memofenh2, group=memofegrp017) + + # assert user1 is member of + # - not grp1 + # - not grp2 + # - not grp3 + # - not grp15 + # - not grp16 + # - grp17 (member) + assert not _check_memberof(topology_st, member=memofuser1, group=memofegrp1) + assert not _check_memberof(topology_st, member=memofuser1, group=memofegrp2) + assert not _check_memberof(topology_st, member=memofuser1, group=memofegrp3) + assert not _check_memberof(topology_st, member=memofuser1, group=memofegrp015) + assert not _check_memberof(topology_st, member=memofuser1, group=memofegrp016) + assert _check_memberof(topology_st, member=memofuser1, group=memofegrp017) + + # assert user2 is member of + # - not grp1 + # - not grp2 + # - not grp3 + # - not grp15 + # - not grp16 + # - grp17 (uniqueMember) + assert not _check_memberof(topology_st, member=memofuser2, group=memofegrp1) + assert not _check_memberof(topology_st, member=memofuser2, group=memofegrp2) + assert not _check_memberof(topology_st, member=memofuser2, group=memofegrp3) + assert not _check_memberof(topology_st, member=memofuser2, group=memofegrp015) + assert not _check_memberof(topology_st, member=memofuser2, group=memofegrp016) + assert _check_memberof(topology_st, member=memofuser2, group=memofegrp017) + + # assert user3 is member of + # - not grp1 + # - not grp2 + # - not grp3 + # - not grp15 + # - not grp16 + # - NOT grp17 (memberuid) + assert not _check_memberof(topology_st, member=memofuser3, group=memofegrp1) + assert not _check_memberof(topology_st, member=memofuser3, group=memofegrp2) + assert not _check_memberof(topology_st, member=memofuser3, group=memofegrp3) + assert not _check_memberof(topology_st, member=memofuser3, group=memofegrp015) + assert not _check_memberof(topology_st, member=memofuser3, group=memofegrp016) + assert not _check_memberof(topology_st, member=memofuser3, group=memofegrp017) + + # + # Create a group grp018 with user1 member/uniquemember + memofegrp018 = _create_group(topology_st, 'memofegrp018') + + mods = [(ldap.MOD_ADD, 'member', memofuser1), (ldap.MOD_ADD, 'uniqueMember', memofuser1), + (ldap.MOD_ADD, 'memberuid', memofuser1)] + log.info("Update %s is memberof %s (member)" % (memofuser1, memofegrp017)) + log.info("Update %s is memberof %s (uniqueMember)" % (memofuser1, memofegrp017)) + log.info("Update %s is memberof %s (memberuid)" % (memofuser1, memofegrp017)) + topology_st.standalone.modify_s(ensure_str(memofegrp018), mods) + + # assert user1 is member of + # - not grp1 + # - not grp2 + # - not grp3 + # - not grp15 + # - not grp16 + # - grp17 (member) + # - grp18 (member, uniquemember) + assert not _check_memberof(topology_st, member=memofuser1, group=memofegrp1) + assert not _check_memberof(topology_st, member=memofuser1, group=memofegrp2) + assert not _check_memberof(topology_st, member=memofuser1, group=memofegrp3) + assert not _check_memberof(topology_st, member=memofuser1, group=memofegrp015) + assert not _check_memberof(topology_st, member=memofuser1, group=memofegrp016) + assert _check_memberof(topology_st, member=memofuser1, group=memofegrp017) + assert _check_memberof(topology_st, member=memofuser1, group=memofegrp018) + + mods = [(ldap.MOD_DELETE, 'member', memofuser1), (ldap.MOD_DELETE, 'uniqueMember', memofuser1)] + log.info("Update %s is no longer memberof %s (member)" % (memofuser1, memofegrp018)) + log.info("Update %s is no longer memberof %s (uniqueMember)" % (memofuser1, memofegrp018)) + topology_st.standalone.modify_s(ensure_str(memofegrp018), mods) + + # assert user1 is member of + # - not grp1 + # - not grp2 + # - not grp3 + # - not grp15 + # - not grp16 + # - grp17 (member) + # - NOT grp18 (memberUid) + assert not _check_memberof(topology_st, member=memofuser1, group=memofegrp1) + assert not _check_memberof(topology_st, member=memofuser1, group=memofegrp2) + assert not _check_memberof(topology_st, member=memofuser1, group=memofegrp3) + assert not _check_memberof(topology_st, member=memofuser1, group=memofegrp015) + assert not _check_memberof(topology_st, member=memofuser1, group=memofegrp016) + assert _check_memberof(topology_st, member=memofuser1, group=memofegrp017) + assert not _check_memberof(topology_st, member=memofuser1, group=memofegrp018) + + # DEL user1, user2, user3, grp17 + topology_st.standalone.delete_s(ensure_str(memofuser1)) + topology_st.standalone.delete_s(ensure_str(memofuser2)) + topology_st.standalone.delete_s(ensure_str(memofuser3)) + topology_st.standalone.delete_s(ensure_str(memofegrp017)) + + # assert enh1 is member of + # - grp1 (member) + # - not grp2 + # - grp3 (uniquemember) + # - not grp15 + # - grp16 (member uniquemember) + # - not grp018 + assert _check_memberof(topology_st, member=memofenh1, group=memofegrp1) + assert not _check_memberof(topology_st, member=memofenh1, group=memofegrp2) + assert _check_memberof(topology_st, member=memofenh1, group=memofegrp3) + assert not _check_memberof(topology_st, member=memofenh1, group=memofegrp015) + assert _check_memberof(topology_st, member=memofenh1, group=memofegrp016) + assert not _check_memberof(topology_st, member=memofenh1, group=memofegrp018) + + # assert enh2 is member of + # - not grp1 + # - grp2 (uniquemember) + # - grp3 (member) + # - not grp15 + # - not grp16 + # - not grp018 + assert not _check_memberof(topology_st, member=memofenh2, group=memofegrp1) + assert _check_memberof(topology_st, member=memofenh2, group=memofegrp2) + assert _check_memberof(topology_st, member=memofenh2, group=memofegrp3) + assert not _check_memberof(topology_st, member=memofenh2, group=memofegrp015) + assert not _check_memberof(topology_st, member=memofenh2, group=memofegrp016) + assert not _check_memberof(topology_st, member=memofenh2, group=memofegrp018) + + +def test_complex_group_scenario_3(topology_st): + """Test a complex memberOf case: + Add user2 to grp19_2, + Add user3 to grp19_3, + Add grp19_2 and grp_19_3 to grp19_1 + + :id: d222af17-17a6-48a0-8f22-a38306726a18 + :setup: Standalone instance, + enh1 is member of + - grp1 (member) + - not grp2 + - grp3 (uniquemember) + - not grp015 + - grp016 (member uniquemember) + - not grp018 + enh2 is member of + - not grp1 + - grp2 (uniquemember) + - grp3 (member) + - not grp015 + - not grp016 + - not grp018 + :steps: + 1. Create user2 and user3 + 2. Create a group grp019_2 with user2 member + 3. Create a group grp019_3 with user3 member + 4. Create a group grp019_1 with memofegrp019_2, memofegrp019_3 member + 5. Assert memofegrp019_1 is member of + - not grp1 + - not grp2 + - not grp3 + - not grp15 + - not grp16 + - not grp018 + - not grp19_1 + - not grp019_2 + - not grp019_3 + 6. Assert memofegrp019_2 is member of + - not grp1 + - not grp2 + - not grp3 + - not grp15 + - not grp16 + - not grp018 + - grp19_1 + - not grp019_2 + - not grp019_3 + 7. Assert memofegrp019_3 is member of + - not grp1 + - not grp2 + - not grp3 + - not grp15 + - not grp16 + - not grp018 + - grp19_1 + - not grp019_2 + - not grp019_3 + 8. Assert memofuser2 is member of + - not grp1 + - not grp2 + - not grp3 + - not grp15 + - not grp16 + - not grp018 + - grp19_1 + - grp019_2 + - not grp019_3 + 9. Assert memofuser3 is member of + - not grp1 + - not grp2 + - not grp3 + - not grp15 + - not grp16 + - not grp018 + - grp19_1 + - not grp019_2 + - grp019_3 + 10. Delete user2, user3, and all grp19* entries + 11. Assert enh1 is member of + - grp1 (member) + - not grp2 + - grp3 (uniquemember) + - not grp15 + - grp16 (member uniquemember) + - not grp018 + 12. Assert enh2 is member of + - not grp1 + - grp2 (uniquemember) + - grp3 (member) + - not grp15 + - not grp16 + - not grp018 + :expectedresults: + 1. Success + 2. Success + 3. Success + 4. Success + 5. Success + 6. Success + 7. Success + 8. Success + 9. Success + 10. Success + 11. Success + 12. Success + """ + + memofenh1 = _get_user_dn('memofenh1') + memofenh2 = _get_user_dn('memofenh2') + + memofegrp1 = _get_group_dn('memofegrp1') + memofegrp2 = _get_group_dn('memofegrp2') + memofegrp3 = _get_group_dn('memofegrp3') + memofegrp015 = _get_group_dn('memofegrp015') + memofegrp016 = _get_group_dn('memofegrp016') + memofegrp018 = _get_group_dn('memofegrp018') + + # assert enh1 is member of + # - grp1 (member) + # - not grp2 + # - grp3 (uniquemember) + # - not grp15 + # - grp16 (member uniquemember) + # - not grp018 + assert _check_memberof(topology_st, member=memofenh1, group=memofegrp1) + assert not _check_memberof(topology_st, member=memofenh1, group=memofegrp2) + assert _check_memberof(topology_st, member=memofenh1, group=memofegrp3) + assert not _check_memberof(topology_st, member=memofenh1, group=memofegrp015) + assert _check_memberof(topology_st, member=memofenh1, group=memofegrp016) + assert not _check_memberof(topology_st, member=memofenh1, group=memofegrp018) + + # assert enh2 is member of + # - not grp1 + # - grp2 (uniquemember) + # - grp3 (member) + # - not grp15 + # - not grp16 + # - not grp018 + assert not _check_memberof(topology_st, member=memofenh2, group=memofegrp1) + assert _check_memberof(topology_st, member=memofenh2, group=memofegrp2) + assert _check_memberof(topology_st, member=memofenh2, group=memofegrp3) + assert not _check_memberof(topology_st, member=memofenh2, group=memofegrp015) + assert not _check_memberof(topology_st, member=memofenh2, group=memofegrp016) + assert not _check_memberof(topology_st, member=memofenh2, group=memofegrp018) + + memofuser2 = _create_user(topology_st, 'memofuser2') + memofuser3 = _create_user(topology_st, 'memofuser3') + + # Create a group grp019_2 with user2 member + memofegrp019_2 = _create_group(topology_st, 'memofegrp019_2') + mods = [(ldap.MOD_ADD, 'member', memofuser2)] + topology_st.standalone.modify_s(ensure_str(memofegrp019_2), mods) + + # Create a group grp019_3 with user3 member + memofegrp019_3 = _create_group(topology_st, 'memofegrp019_3') + mods = [(ldap.MOD_ADD, 'member', memofuser3)] + topology_st.standalone.modify_s(ensure_str(memofegrp019_3), mods) + + mods = [(ldap.MOD_ADD, 'objectClass', b'inetUser')] + topology_st.standalone.modify_s(ensure_str(memofegrp019_2), mods) + topology_st.standalone.modify_s(ensure_str(memofegrp019_3), mods) + + # Create a group grp019_1 with memofegrp019_2, memofegrp019_3 member + memofegrp019_1 = _create_group(topology_st, 'memofegrp019_1') + mods = [(ldap.MOD_ADD, 'member', memofegrp019_2), (ldap.MOD_ADD, 'member', memofegrp019_3)] + topology_st.standalone.modify_s(ensure_str(memofegrp019_1), mods) + + # assert memofegrp019_1 is member of + # - not grp1 + # - not grp2 + # - not grp3 + # - not grp15 + # - not grp16 + # - not grp018 + # - not grp19_1 + # - not grp019_2 + # - not grp019_3 + assert not _check_memberof(topology_st, member=memofegrp019_1, group=memofegrp1) + assert not _check_memberof(topology_st, member=memofegrp019_1, group=memofegrp2) + assert not _check_memberof(topology_st, member=memofegrp019_1, group=memofegrp3) + assert not _check_memberof(topology_st, member=memofegrp019_1, group=memofegrp015) + assert not _check_memberof(topology_st, member=memofegrp019_1, group=memofegrp016) + assert not _check_memberof(topology_st, member=memofegrp019_1, group=memofegrp018) + assert not _check_memberof(topology_st, member=memofegrp019_1, group=memofegrp019_1) + assert not _check_memberof(topology_st, member=memofegrp019_1, group=memofegrp019_2) + assert not _check_memberof(topology_st, member=memofegrp019_1, group=memofegrp019_3) + + # assert memofegrp019_2 is member of + # - not grp1 + # - not grp2 + # - not grp3 + # - not grp15 + # - not grp16 + # - not grp018 + # - grp19_1 + # - not grp019_2 + # - not grp019_3 + assert not _check_memberof(topology_st, member=memofegrp019_2, group=memofegrp1) + assert not _check_memberof(topology_st, member=memofegrp019_2, group=memofegrp2) + assert not _check_memberof(topology_st, member=memofegrp019_2, group=memofegrp3) + assert not _check_memberof(topology_st, member=memofegrp019_2, group=memofegrp015) + assert not _check_memberof(topology_st, member=memofegrp019_2, group=memofegrp016) + assert not _check_memberof(topology_st, member=memofegrp019_2, group=memofegrp018) + assert _check_memberof(topology_st, member=memofegrp019_2, group=memofegrp019_1) + assert not _check_memberof(topology_st, member=memofegrp019_2, group=memofegrp019_2) + assert not _check_memberof(topology_st, member=memofegrp019_2, group=memofegrp019_3) + + # assert memofegrp019_3 is member of + # - not grp1 + # - not grp2 + # - not grp3 + # - not grp15 + # - not grp16 + # - not grp018 + # - grp19_1 + # - not grp019_2 + # - not grp019_3 + assert not _check_memberof(topology_st, member=memofegrp019_2, group=memofegrp1) + assert not _check_memberof(topology_st, member=memofegrp019_2, group=memofegrp2) + assert not _check_memberof(topology_st, member=memofegrp019_2, group=memofegrp3) + assert not _check_memberof(topology_st, member=memofegrp019_2, group=memofegrp015) + assert not _check_memberof(topology_st, member=memofegrp019_2, group=memofegrp016) + assert not _check_memberof(topology_st, member=memofegrp019_2, group=memofegrp018) + assert _check_memberof(topology_st, member=memofegrp019_2, group=memofegrp019_1) + assert not _check_memberof(topology_st, member=memofegrp019_2, group=memofegrp019_2) + assert not _check_memberof(topology_st, member=memofegrp019_2, group=memofegrp019_3) + + # assert memofuser2 is member of + # - not grp1 + # - not grp2 + # - not grp3 + # - not grp15 + # - not grp16 + # - not grp018 + # - grp19_1 + # - grp019_2 + # - not grp019_3 + assert not _check_memberof(topology_st, member=memofuser2, group=memofegrp1) + assert not _check_memberof(topology_st, member=memofuser2, group=memofegrp2) + assert not _check_memberof(topology_st, member=memofuser2, group=memofegrp3) + assert not _check_memberof(topology_st, member=memofuser2, group=memofegrp015) + assert not _check_memberof(topology_st, member=memofuser2, group=memofegrp016) + assert not _check_memberof(topology_st, member=memofuser2, group=memofegrp018) + assert _check_memberof(topology_st, member=memofuser2, group=memofegrp019_1) + assert _check_memberof(topology_st, member=memofuser2, group=memofegrp019_2) + assert not _check_memberof(topology_st, member=memofuser2, group=memofegrp019_3) + + # assert memofuser3 is member of + # - not grp1 + # - not grp2 + # - not grp3 + # - not grp15 + # - not grp16 + # - not grp018 + # - grp19_1 + # - not grp019_2 + # - grp019_3 + assert not _check_memberof(topology_st, member=memofuser3, group=memofegrp1) + assert not _check_memberof(topology_st, member=memofuser3, group=memofegrp2) + assert not _check_memberof(topology_st, member=memofuser3, group=memofegrp3) + assert not _check_memberof(topology_st, member=memofuser3, group=memofegrp015) + assert not _check_memberof(topology_st, member=memofuser3, group=memofegrp016) + assert not _check_memberof(topology_st, member=memofuser3, group=memofegrp018) + assert _check_memberof(topology_st, member=memofuser3, group=memofegrp019_1) + assert not _check_memberof(topology_st, member=memofuser3, group=memofegrp019_2) + assert _check_memberof(topology_st, member=memofuser3, group=memofegrp019_3) + + # DEL user2, user3, grp19* + topology_st.standalone.delete_s(ensure_str(memofuser2)) + topology_st.standalone.delete_s(ensure_str(memofuser3)) + topology_st.standalone.delete_s(ensure_str(memofegrp019_1)) + topology_st.standalone.delete_s(ensure_str(memofegrp019_2)) + topology_st.standalone.delete_s(ensure_str(memofegrp019_3)) + + # assert enh1 is member of + # - grp1 (member) + # - not grp2 + # - grp3 (uniquemember) + # - not grp15 + # - grp16 (member uniquemember) + # - not grp018 + assert _check_memberof(topology_st, member=memofenh1, group=memofegrp1) + assert not _check_memberof(topology_st, member=memofenh1, group=memofegrp2) + assert _check_memberof(topology_st, member=memofenh1, group=memofegrp3) + assert not _check_memberof(topology_st, member=memofenh1, group=memofegrp015) + assert _check_memberof(topology_st, member=memofenh1, group=memofegrp016) + assert not _check_memberof(topology_st, member=memofenh1, group=memofegrp018) + + # assert enh2 is member of + # - not grp1 + # - grp2 (uniquemember) + # - grp3 (member) + # - not grp15 + # - not grp16 + # - not grp018 + assert not _check_memberof(topology_st, member=memofenh2, group=memofegrp1) + assert _check_memberof(topology_st, member=memofenh2, group=memofegrp2) + assert _check_memberof(topology_st, member=memofenh2, group=memofegrp3) + assert not _check_memberof(topology_st, member=memofenh2, group=memofegrp015) + assert not _check_memberof(topology_st, member=memofenh2, group=memofegrp016) + assert not _check_memberof(topology_st, member=memofenh2, group=memofegrp018) + + +def test_complex_group_scenario_4(topology_st): + """Test a complex memberOf case: + Add user1 and grp[1-5] + Add user1 member of grp[1-4] + Add grp[1-4] member of grp5 + Check user1 is member of grp[1-5] + + :id: d223af17-17a6-48a0-8f22-a38306726a19 + :setup: Standalone instance, + enh1 is member of + - grp1 (member) + - not grp2 + - grp3 (uniquemember) + - not grp015 + - grp016 (member uniquemember) + - not grp018 + enh2 is member of + - not grp1 + - grp2 (uniquemember) + - grp3 (member) + - not grp015 + - not grp016 + - not grp018 + :steps: + 1. Create user1 + 2. Create grp[1-5] that can be inetUser (having memberof) + 3. Add user1 to grp[1-4] (uniqueMember) + 4. Create grp5 with grp[1-4] as member + 5. Assert user1 is a member grp[1-5] + 6. Delete user1 and all grp20 entries + :expectedresults: + 1. Success + 2. Success + 3. Success + 4. Success + 5. Success + 6. Success + """ + + memofenh1 = _get_user_dn('memofenh1') + memofenh2 = _get_user_dn('memofenh2') + + memofegrp1 = _get_group_dn('memofegrp1') + memofegrp2 = _get_group_dn('memofegrp2') + memofegrp3 = _get_group_dn('memofegrp3') + memofegrp015 = _get_group_dn('memofegrp015') + memofegrp016 = _get_group_dn('memofegrp016') + memofegrp018 = _get_group_dn('memofegrp018') + + # assert enh1 is member of + # - grp1 (member) + # - not grp2 + # - grp3 (uniquemember) + # - not grp15 + # - grp16 (member uniquemember) + # - not grp018 + assert _check_memberof(topology_st, member=memofenh1, group=memofegrp1) + assert not _check_memberof(topology_st, member=memofenh1, group=memofegrp2) + assert _check_memberof(topology_st, member=memofenh1, group=memofegrp3) + assert not _check_memberof(topology_st, member=memofenh1, group=memofegrp015) + assert _check_memberof(topology_st, member=memofenh1, group=memofegrp016) + assert not _check_memberof(topology_st, member=memofenh1, group=memofegrp018) + + # assert enh2 is member of + # - not grp1 + # - grp2 (uniquemember) + # - grp3 (member) + # - not grp15 + # - not grp16 + # - not grp018 + assert not _check_memberof(topology_st, member=memofenh2, group=memofegrp1) + assert _check_memberof(topology_st, member=memofenh2, group=memofegrp2) + assert _check_memberof(topology_st, member=memofenh2, group=memofegrp3) + assert not _check_memberof(topology_st, member=memofenh2, group=memofegrp015) + assert not _check_memberof(topology_st, member=memofenh2, group=memofegrp016) + assert not _check_memberof(topology_st, member=memofenh2, group=memofegrp018) + + # create user1 + memofuser1 = _create_user(topology_st, 'memofuser1') + + # create grp[1-5] that can be inetUser (having memberof) + memofegrp020_1 = _create_group(topology_st, 'memofegrp020_1') + memofegrp020_2 = _create_group(topology_st, 'memofegrp020_2') + memofegrp020_3 = _create_group(topology_st, 'memofegrp020_3') + memofegrp020_4 = _create_group(topology_st, 'memofegrp020_4') + memofegrp020_5 = _create_group(topology_st, 'memofegrp020_5') + mods = [(ldap.MOD_ADD, 'objectClass', b'inetUser')] + for grp in [memofegrp020_1, memofegrp020_2, memofegrp020_3, memofegrp020_4, memofegrp020_5]: + topology_st.standalone.modify_s(ensure_str(grp), mods) + + # add user1 to grp[1-4] (uniqueMember) + mods = [(ldap.MOD_ADD, 'uniqueMember', memofuser1)] + for grp in [memofegrp020_1, memofegrp020_2, memofegrp020_3, memofegrp020_4]: + topology_st.standalone.modify_s(ensure_str(grp), mods) + + # create grp5 with grp[1-4] as member + mods = [] + for grp in [memofegrp020_1, memofegrp020_2, memofegrp020_3, memofegrp020_4]: + mods.append((ldap.MOD_ADD, 'member', grp)) + topology_st.standalone.modify_s(ensure_str(memofegrp020_5), mods) + + assert _check_memberof(topology_st, member=memofuser1, group=memofegrp020_1) + assert _check_memberof(topology_st, member=memofuser1, group=memofegrp020_2) + assert _check_memberof(topology_st, member=memofuser1, group=memofegrp020_3) + assert _check_memberof(topology_st, member=memofuser1, group=memofegrp020_4) + assert _check_memberof(topology_st, member=memofuser1, group=memofegrp020_5) + + # DEL user1, grp20* + topology_st.standalone.delete_s(ensure_str(memofuser1)) + for grp in [memofegrp020_1, memofegrp020_2, memofegrp020_3, memofegrp020_4, memofegrp020_5]: + topology_st.standalone.delete_s(ensure_str(grp)) + + +def test_complex_group_scenario_5(topology_st): + """Test a complex memberOf case: + Add user[1-4] and Grp[1-4] + Add userX as uniquemember of GrpX + Add Grp5 + Grp[1-4] as members of Grp5 + user1 as member of Grp5 + Check that user1 is member of Grp1 and Grp5 + Check that user* are members of Grp5 + + :id: d222af17-17a6-48a0-8f22-a38306726a20 + :setup: Standalone instance, + enh1 is member of + - grp1 (member) + - not grp2 + - grp3 (uniquemember) + - not grp015 + - grp016 (member uniquemember) + - not grp018 + enh2 is member of + - not grp1 + - grp2 (uniquemember) + - grp3 (member) + - not grp015 + - not grp016 + - not grp018 + :steps: + 1. Create user1-4 + 2. Create grp[1-4] that can be inetUser (having memberof) + 3. Add userX (uniquemember) to grpX + 4. Create grp5 with grp[1-4] as member + user1 + 5. Assert user[1-4] are member of grp20_5 + 6. Assert userX is uniqueMember of grpX + 7. Check that user[1-4] is only 'uniqueMember' of the grp20_[1-4] + 8. Check that grp20_[1-4] are only 'member' of grp20_5 + 9. Check that user1 are only 'member' of grp20_5 + 10. Assert enh1 is member of + - grp1 (member) + - not grp2 + - grp3 (uniquemember) + - not grp15 + - grp16 (member uniquemember) + - not grp018 + - not grp20* + 11. Assert enh2 is member of + - not grp1 + - grp2 (uniquemember) + - grp3 (member) + - not grp + - not grp16 + - not grp018 + - not grp20* + :expectedresults: + 1. Success + 2. Success + 3. Success + 4. Success + 5. Success + 6. Success + 7. Success + 8. Success + 9. Success + 10. Success + 11. Success + """ + + memofenh1 = _get_user_dn('memofenh1') + memofenh2 = _get_user_dn('memofenh2') + + memofegrp1 = _get_group_dn('memofegrp1') + memofegrp2 = _get_group_dn('memofegrp2') + memofegrp3 = _get_group_dn('memofegrp3') + memofegrp015 = _get_group_dn('memofegrp015') + memofegrp016 = _get_group_dn('memofegrp016') + memofegrp018 = _get_group_dn('memofegrp018') + + # assert enh1 is member of + # - grp1 (member) + # - not grp2 + # - grp3 (uniquemember) + # - not grp15 + # - grp16 (member uniquemember) + # - not grp018 + assert _check_memberof(topology_st, member=memofenh1, group=memofegrp1) + assert not _check_memberof(topology_st, member=memofenh1, group=memofegrp2) + assert _check_memberof(topology_st, member=memofenh1, group=memofegrp3) + assert not _check_memberof(topology_st, member=memofenh1, group=memofegrp015) + assert _check_memberof(topology_st, member=memofenh1, group=memofegrp016) + assert not _check_memberof(topology_st, member=memofenh1, group=memofegrp018) + + # assert enh2 is member of + # - not grp1 + # - grp2 (uniquemember) + # - grp3 (member) + # - not grp15 + # - not grp16 + # - not grp018 + assert not _check_memberof(topology_st, member=memofenh2, group=memofegrp1) + assert _check_memberof(topology_st, member=memofenh2, group=memofegrp2) + assert _check_memberof(topology_st, member=memofenh2, group=memofegrp3) + assert not _check_memberof(topology_st, member=memofenh2, group=memofegrp015) + assert not _check_memberof(topology_st, member=memofenh2, group=memofegrp016) + assert not _check_memberof(topology_st, member=memofenh2, group=memofegrp018) + + # create user1-4 + memofuser1 = _create_user(topology_st, 'memofuser1') + memofuser2 = _create_user(topology_st, 'memofuser2') + memofuser3 = _create_user(topology_st, 'memofuser3') + memofuser4 = _create_user(topology_st, 'memofuser4') + + # create grp[1-4] that can be inetUser (having memberof) + # add userX (uniquemember) to grpX + memofegrp020_1 = _create_group(topology_st, 'memofegrp020_1') + memofegrp020_2 = _create_group(topology_st, 'memofegrp020_2') + memofegrp020_3 = _create_group(topology_st, 'memofegrp020_3') + memofegrp020_4 = _create_group(topology_st, 'memofegrp020_4') + for x in [(memofegrp020_1, memofuser1), + (memofegrp020_2, memofuser2), + (memofegrp020_3, memofuser3), + (memofegrp020_4, memofuser4)]: + mods = [(ldap.MOD_ADD, 'objectClass', b'inetUser'), (ldap.MOD_ADD, 'uniqueMember', x[1])] + topology_st.standalone.modify_s(ensure_str(x[0]), mods) + + # create grp5 with grp[1-4] as member + user1 + memofegrp020_5 = _create_group(topology_st, 'memofegrp020_5') + mods = [(ldap.MOD_ADD, 'member', memofuser1)] + for grp in [memofegrp020_1, memofegrp020_2, memofegrp020_3, memofegrp020_4]: + mods.append((ldap.MOD_ADD, 'member', grp)) + topology_st.standalone.modify_s(ensure_str(memofegrp020_5), mods) + + # assert user[1-4] are member of grp20_5 + for user in [memofuser1, memofuser2, memofuser3, memofuser4]: + assert _check_memberof(topology_st, member=user, group=memofegrp020_5) + + # assert userX is uniqueMember of grpX + assert _check_memberof(topology_st, member=memofuser1, group=memofegrp020_1) + assert _check_memberof(topology_st, member=memofuser2, group=memofegrp020_2) + assert _check_memberof(topology_st, member=memofuser3, group=memofegrp020_3) + assert _check_memberof(topology_st, member=memofuser4, group=memofegrp020_4) + + # check that user[1-4] is only 'uniqueMember' of the grp20_[1-4] + for x in [(memofegrp020_1, memofuser1), + (memofegrp020_2, memofuser2), + (memofegrp020_3, memofuser3), + (memofegrp020_4, memofuser4)]: + assert _check_memberattr(topology_st, x[0], 'uniqueMember', x[1]) + assert not _check_memberattr(topology_st, x[0], 'member', x[1]) + # check that grp20_[1-4] are only 'member' of grp20_5 + # check that user1 are only 'member' of grp20_5 + for x in [memofuser1, memofegrp020_1, memofegrp020_2, memofegrp020_3, memofegrp020_4]: + assert _check_memberattr(topology_st, memofegrp020_5, 'member', x) + assert not _check_memberattr(topology_st, memofegrp020_5, 'uniqueMember', x) + + for user in [memofuser2, memofuser3, memofuser4]: + assert not _check_memberattr(topology_st, memofegrp020_5, 'member', user) + assert not _check_memberattr(topology_st, memofegrp020_5, 'uniqueMember', user) + + # assert enh1 is member of + # - grp1 (member) + # - not grp2 + # - grp3 (uniquemember) + # - not grp15 + # - grp16 (member uniquemember) + # - not grp018 + # - not grp20* + assert _check_memberof(topology_st, member=memofenh1, group=memofegrp1) + assert not _check_memberof(topology_st, member=memofenh1, group=memofegrp2) + assert _check_memberof(topology_st, member=memofenh1, group=memofegrp3) + assert not _check_memberof(topology_st, member=memofenh1, group=memofegrp015) + assert _check_memberof(topology_st, member=memofenh1, group=memofegrp016) + assert not _check_memberof(topology_st, member=memofenh1, group=memofegrp018) + assert not _check_memberof(topology_st, member=memofenh1, group=memofegrp020_1) + assert not _check_memberof(topology_st, member=memofenh1, group=memofegrp020_2) + assert not _check_memberof(topology_st, member=memofenh1, group=memofegrp020_3) + assert not _check_memberof(topology_st, member=memofenh1, group=memofegrp020_4) + assert not _check_memberof(topology_st, member=memofenh1, group=memofegrp020_5) + + # assert enh2 is member of + # - not grp1 + # - grp2 (uniquemember) + # - grp3 (member) + # - not grp15 + # - not grp16 + # - not grp018 + # - not grp20* + assert not _check_memberof(topology_st, member=memofenh2, group=memofegrp1) + assert _check_memberof(topology_st, member=memofenh2, group=memofegrp2) + assert _check_memberof(topology_st, member=memofenh2, group=memofegrp3) + assert not _check_memberof(topology_st, member=memofenh2, group=memofegrp015) + assert not _check_memberof(topology_st, member=memofenh2, group=memofegrp016) + assert not _check_memberof(topology_st, member=memofenh2, group=memofegrp018) + assert not _check_memberof(topology_st, member=memofenh2, group=memofegrp020_1) + assert not _check_memberof(topology_st, member=memofenh2, group=memofegrp020_2) + assert not _check_memberof(topology_st, member=memofenh2, group=memofegrp020_3) + assert not _check_memberof(topology_st, member=memofenh2, group=memofegrp020_4) + assert not _check_memberof(topology_st, member=memofenh2, group=memofegrp020_5) + + +def test_complex_group_scenario_6(topology_st): + """Test a complex memberOf case: + add userX as member/uniqueMember of GrpX + add Grp5 as uniquemember of GrpX (this create a loop) + + :id: d222af17-17a6-48a0-8f22-a38306726a21 + :setup: Standalone instance + enh1 is member of + - grp1 (member) + - not grp2 + - grp3 (uniquemember) + - not grp15 + - grp16 (member uniquemember) + - not grp018 + - not grp20* + + enh2 is member of + - not grp1 + - grp2 (uniquemember) + - grp3 (member) + - not grp15 + - not grp16 + - not grp018 + - not grp20* + + user1 is member of grp20_5 + userX is uniquemember of grp20_X + grp[1-4] are member of grp20_5 + :steps: + 1. Add user[1-4] (member) to grp020_[1-4] + 2. Check that user[1-4] are 'member' and 'uniqueMember' of the grp20_[1-4] + 3. Add Grp[1-4] (uniqueMember) to grp5 + 4. Assert user[1-4] are member of grp20_[1-4] + 5. Assert that all groups are members of each others because Grp5 is member of all grp20_[1-4] + 6. Assert user[1-5] is uniqueMember of grp[1-5] + 7. Assert enh1 is member of + - grp1 (member) + - not grp2 + - grp3 (uniquemember) + - not grp15 + - grp16 (member uniquemember) + - not grp018 + - not grp20* + 8. Assert enh2 is member of + - not grp1 + - grp2 (uniquemember) + - grp3 (member) + - not grp15 + - not grp16 + - not grp018 + - not grp20* + :expectedresults: + 1. Success + 2. Success + 3. Success + 4. Success + 5. Success + 6. Success + 7. Success + 8. Success + """ + + memofenh1 = _get_user_dn('memofenh1') + memofenh2 = _get_user_dn('memofenh2') + + memofegrp1 = _get_group_dn('memofegrp1') + memofegrp2 = _get_group_dn('memofegrp2') + memofegrp3 = _get_group_dn('memofegrp3') + memofegrp015 = _get_group_dn('memofegrp015') + memofegrp016 = _get_group_dn('memofegrp016') + memofegrp018 = _get_group_dn('memofegrp018') + + memofuser1 = _get_user_dn('memofuser1') + memofuser2 = _get_user_dn('memofuser2') + memofuser3 = _get_user_dn('memofuser3') + memofuser4 = _get_user_dn('memofuser4') + + memofegrp020_1 = _get_group_dn('memofegrp020_1') + memofegrp020_2 = _get_group_dn('memofegrp020_2') + memofegrp020_3 = _get_group_dn('memofegrp020_3') + memofegrp020_4 = _get_group_dn('memofegrp020_4') + memofegrp020_5 = _get_group_dn('memofegrp020_5') + + # assert user[1-4] are member of grp20_5 + for user in [memofuser1, memofuser2, memofuser3, memofuser4]: + assert _check_memberof(topology_st, member=user, group=memofegrp020_5) + + # assert userX is member of grpX + assert _check_memberof(topology_st, member=memofuser1, group=memofegrp020_1) + assert _check_memberof(topology_st, member=memofuser2, group=memofegrp020_2) + assert _check_memberof(topology_st, member=memofuser3, group=memofegrp020_3) + assert _check_memberof(topology_st, member=memofuser4, group=memofegrp020_4) + + # assert enh1 is member of + # - grp1 (member) + # - not grp2 + # - grp3 (uniquemember) + # - not grp15 + # - grp16 (member uniquemember) + # - not grp018 + # - not grp20* + assert _check_memberof(topology_st, member=memofenh1, group=memofegrp1) + assert not _check_memberof(topology_st, member=memofenh1, group=memofegrp2) + assert _check_memberof(topology_st, member=memofenh1, group=memofegrp3) + assert not _check_memberof(topology_st, member=memofenh1, group=memofegrp015) + assert _check_memberof(topology_st, member=memofenh1, group=memofegrp016) + assert not _check_memberof(topology_st, member=memofenh1, group=memofegrp018) + assert not _check_memberof(topology_st, member=memofenh1, group=memofegrp020_1) + assert not _check_memberof(topology_st, member=memofenh1, group=memofegrp020_2) + assert not _check_memberof(topology_st, member=memofenh1, group=memofegrp020_3) + assert not _check_memberof(topology_st, member=memofenh1, group=memofegrp020_4) + assert not _check_memberof(topology_st, member=memofenh1, group=memofegrp020_5) + + # assert enh2 is member of + # - not grp1 + # - grp2 (uniquemember) + # - grp3 (member) + # - not grp15 + # - not grp16 + # - not grp018 + # - not grp20* + assert not _check_memberof(topology_st, member=memofenh2, group=memofegrp1) + assert _check_memberof(topology_st, member=memofenh2, group=memofegrp2) + assert _check_memberof(topology_st, member=memofenh2, group=memofegrp3) + assert not _check_memberof(topology_st, member=memofenh2, group=memofegrp015) + assert not _check_memberof(topology_st, member=memofenh2, group=memofegrp016) + assert not _check_memberof(topology_st, member=memofenh2, group=memofegrp018) + assert not _check_memberof(topology_st, member=memofenh2, group=memofegrp020_1) + assert not _check_memberof(topology_st, member=memofenh2, group=memofegrp020_2) + assert not _check_memberof(topology_st, member=memofenh2, group=memofegrp020_3) + assert not _check_memberof(topology_st, member=memofenh2, group=memofegrp020_4) + assert not _check_memberof(topology_st, member=memofenh2, group=memofegrp020_5) + + # check that user[1-4] is only 'uniqueMember' of the grp20_[1-4] + for x in [(memofegrp020_1, memofuser1), + (memofegrp020_2, memofuser2), + (memofegrp020_3, memofuser3), + (memofegrp020_4, memofuser4)]: + assert _check_memberattr(topology_st, x[0], 'uniqueMember', x[1]) + assert not _check_memberattr(topology_st, x[0], 'member', x[1]) + + # check that grp20_[1-4] are only 'member' of grp20_5 + # check that user1 is only 'member' of grp20_5 + for x in [memofuser1, memofegrp020_1, memofegrp020_2, memofegrp020_3, memofegrp020_4]: + assert _check_memberattr(topology_st, memofegrp020_5, 'member', x) + assert not _check_memberattr(topology_st, memofegrp020_5, 'uniqueMember', x) + + # check that user2-4 are neither 'member' nor 'uniquemember' of grp20_5 + for user in [memofuser2, memofuser3, memofuser4]: + assert not _check_memberattr(topology_st, memofegrp020_5, 'member', user) + assert not _check_memberattr(topology_st, memofegrp020_5, 'uniqueMember', user) + + # add userX (member) to grpX + for x in [(memofegrp020_1, memofuser1), + (memofegrp020_2, memofuser2), + (memofegrp020_3, memofuser3), + (memofegrp020_4, memofuser4)]: + mods = [(ldap.MOD_ADD, 'member', x[1])] + topology_st.standalone.modify_s(ensure_str(x[0]), mods) + + # check that user[1-4] are 'member' and 'uniqueMember' of the grp20_[1-4] + for x in [(memofegrp020_1, memofuser1), + (memofegrp020_2, memofuser2), + (memofegrp020_3, memofuser3), + (memofegrp020_4, memofuser4)]: + assert _check_memberattr(topology_st, x[0], 'uniqueMember', x[1]) + assert _check_memberattr(topology_st, x[0], 'member', x[1]) + + # add Grp[1-4] (uniqueMember) to grp5 + # it creates a membership loop !!! + mods = [(ldap.MOD_ADD, 'uniqueMember', memofegrp020_5)] + for grp in [memofegrp020_1, memofegrp020_2, memofegrp020_3, memofegrp020_4]: + topology_st.standalone.modify_s(ensure_str(grp), mods) + + time.sleep(5) + # assert user[1-4] are member of grp20_[1-4] + for user in [memofuser1, memofuser2, memofuser3, memofuser4]: + assert _check_memberof(topology_st, member=user, group=memofegrp020_5) + assert _check_memberof(topology_st, member=user, group=memofegrp020_4) + assert _check_memberof(topology_st, member=user, group=memofegrp020_3) + assert _check_memberof(topology_st, member=user, group=memofegrp020_2) + assert _check_memberof(topology_st, member=user, group=memofegrp020_1) + + # assert that all groups are members of each others because Grp5 + # is member of all grp20_[1-4] + for grp in [memofegrp020_1, memofegrp020_2, memofegrp020_3, memofegrp020_4]: + for owner in [memofegrp020_1, memofegrp020_2, memofegrp020_3, memofegrp020_4]: + if grp == owner: + # no member of itself + assert not _check_memberof(topology_st, member=grp, group=owner) + else: + assert _check_memberof(topology_st, member=grp, group=owner) + for grp in [memofegrp020_1, memofegrp020_2, memofegrp020_3, memofegrp020_4]: + assert _check_memberof(topology_st, member=grp, group=memofegrp020_5) + + # assert userX is uniqueMember of grpX + assert _check_memberof(topology_st, member=memofuser1, group=memofegrp020_1) + assert _check_memberof(topology_st, member=memofuser2, group=memofegrp020_2) + assert _check_memberof(topology_st, member=memofuser3, group=memofegrp020_3) + assert _check_memberof(topology_st, member=memofuser4, group=memofegrp020_4) + assert _check_memberof(topology_st, member=memofuser4, group=memofegrp020_5) + + # assert enh1 is member of + # - grp1 (member) + # - not grp2 + # - grp3 (uniquemember) + # - not grp15 + # - grp16 (member uniquemember) + # - not grp018 + # - not grp20* + assert _check_memberof(topology_st, member=memofenh1, group=memofegrp1) + assert not _check_memberof(topology_st, member=memofenh1, group=memofegrp2) + assert _check_memberof(topology_st, member=memofenh1, group=memofegrp3) + assert not _check_memberof(topology_st, member=memofenh1, group=memofegrp015) + assert _check_memberof(topology_st, member=memofenh1, group=memofegrp016) + assert not _check_memberof(topology_st, member=memofenh1, group=memofegrp018) + assert not _check_memberof(topology_st, member=memofenh1, group=memofegrp020_1) + assert not _check_memberof(topology_st, member=memofenh1, group=memofegrp020_2) + assert not _check_memberof(topology_st, member=memofenh1, group=memofegrp020_3) + assert not _check_memberof(topology_st, member=memofenh1, group=memofegrp020_4) + assert not _check_memberof(topology_st, member=memofenh1, group=memofegrp020_5) + + # assert enh2 is member of + # - not grp1 + # - grp2 (uniquemember) + # - grp3 (member) + # - not grp15 + # - not grp16 + # - not grp018 + # - not grp20* + assert not _check_memberof(topology_st, member=memofenh2, group=memofegrp1) + assert _check_memberof(topology_st, member=memofenh2, group=memofegrp2) + assert _check_memberof(topology_st, member=memofenh2, group=memofegrp3) + assert not _check_memberof(topology_st, member=memofenh2, group=memofegrp015) + assert not _check_memberof(topology_st, member=memofenh2, group=memofegrp016) + assert not _check_memberof(topology_st, member=memofenh2, group=memofegrp018) + assert not _check_memberof(topology_st, member=memofenh2, group=memofegrp020_1) + assert not _check_memberof(topology_st, member=memofenh2, group=memofegrp020_2) + assert not _check_memberof(topology_st, member=memofenh2, group=memofegrp020_3) + assert not _check_memberof(topology_st, member=memofenh2, group=memofegrp020_4) + assert not _check_memberof(topology_st, member=memofenh2, group=memofegrp020_5) + + +def verify_post_023(topology_st, memofegrp020_1, memofegrp020_2, memofegrp020_3, memofegrp020_4, memofegrp020_5, + memofuser1, memofuser2, memofuser3, memofuser4): + """ + /----member ---> G1 ---uniqueMember -------\ + / V + G5 ------------------------>member ---------- --->U1 + | + |----member ---> G2 ---member/uniqueMember -> U2 + |<--uniquemember-/ + | + |----member ---> G3 ---member/uniqueMember -> U3 + |<--uniquemember-/ + |----member ---> G4 ---member/uniqueMember -> U4 + |<--uniquemember-/ + """ + for x in [memofegrp020_1, memofegrp020_2, memofegrp020_3, memofegrp020_4]: + assert _check_memberattr(topology_st, memofegrp020_5, 'member', x) + assert not _check_memberattr(topology_st, memofegrp020_5, 'uniqueMember', x) + for x in [memofegrp020_2, memofegrp020_3, memofegrp020_4]: + assert not _check_memberattr(topology_st, x, 'member', memofegrp020_5) + assert _check_memberattr(topology_st, x, 'uniqueMember', memofegrp020_5) + # check that user[1-4] is only 'uniqueMember' of the grp20_[1-4] + for x in [(memofegrp020_2, memofuser2), + (memofegrp020_3, memofuser3), + (memofegrp020_4, memofuser4)]: + assert _check_memberattr(topology_st, x[0], 'uniqueMember', x[1]) + assert _check_memberattr(topology_st, x[0], 'member', x[1]) + assert _check_memberattr(topology_st, memofegrp020_1, 'uniqueMember', memofuser1) + assert not _check_memberattr(topology_st, memofegrp020_1, 'member', memofuser1) + assert not _check_memberattr(topology_st, memofegrp020_1, 'uniqueMember', memofegrp020_5) + assert not _check_memberattr(topology_st, memofegrp020_1, 'member', memofegrp020_5) + + for x in [memofegrp020_1, memofegrp020_2, memofegrp020_3, memofegrp020_4, memofuser1, memofuser2, memofuser3, + memofuser4]: + assert _check_memberof(topology_st, member=x, group=memofegrp020_5) + for x in [memofegrp020_2, memofegrp020_3, memofegrp020_4]: + assert _check_memberof(topology_st, member=memofegrp020_5, group=x) + + for user in [memofuser1, memofuser2, memofuser3, memofuser4]: + assert _check_memberof(topology_st, member=user, group=memofegrp020_5) + assert _check_memberof(topology_st, member=user, group=memofegrp020_4) + assert _check_memberof(topology_st, member=user, group=memofegrp020_3) + assert _check_memberof(topology_st, member=user, group=memofegrp020_2) + for grp in [memofegrp020_1, memofegrp020_2, memofegrp020_3, memofegrp020_4, memofegrp020_5]: + assert _check_memberof(topology_st, member=memofuser1, group=grp) + + +def test_complex_group_scenario_7(topology_st): + """Check the user removal from the complex membership topology + + :id: d222af17-17a6-48a0-8f22-a38306726a22 + :setup: Standalone instance, + enh1 is member of + - grp1 (member) + - not grp2 + - grp3 (uniquemember) + - not grp15 + - grp16 (member uniquemember) + - not grp018 + - not grp20* + + enh2 is member of + - not grp1 + - grp2 (uniquemember) + - grp3 (member) + - not grp15 + - not grp16 + - not grp018 + - not grp20* + + grp[1-4] are member of grp20_5 + user1 is member (member) of group_5 + grp5 is uniqueMember of grp20_[1-4] + user[1-4] is member/uniquemember of grp20_[1-4] + :steps: + 1. Delete user1 as 'member' of grp20_1 + 2. Delete grp020_5 as 'uniqueMember' of grp20_1 + 3. Check the result membership + :expectedresults: + 1. Success + 2. Success + 3. The result should be like this + + :: + + /----member ---> G1 ---uniqueMember -------\ + / V + G5 ------------------------>member ---------- --->U1 + | + |----member ---> G2 ---member/uniqueMember -> U2 + |<--uniquemember-/ + | + |----member ---> G3 ---member/uniqueMember -> U3 + |<--uniquemember-/ + |----member ---> G4 ---member/uniqueMember -> U4 + |<--uniquemember-/ + + """ + + memofenh1 = _get_user_dn('memofenh1') + memofenh2 = _get_user_dn('memofenh2') + + memofegrp1 = _get_group_dn('memofegrp1') + memofegrp2 = _get_group_dn('memofegrp2') + memofegrp3 = _get_group_dn('memofegrp3') + memofegrp015 = _get_group_dn('memofegrp015') + memofegrp016 = _get_group_dn('memofegrp016') + memofegrp018 = _get_group_dn('memofegrp018') + + memofuser1 = _get_user_dn('memofuser1') + memofuser2 = _get_user_dn('memofuser2') + memofuser3 = _get_user_dn('memofuser3') + memofuser4 = _get_user_dn('memofuser4') + + memofegrp020_1 = _get_group_dn('memofegrp020_1') + memofegrp020_2 = _get_group_dn('memofegrp020_2') + memofegrp020_3 = _get_group_dn('memofegrp020_3') + memofegrp020_4 = _get_group_dn('memofegrp020_4') + memofegrp020_5 = _get_group_dn('memofegrp020_5') + + # assert user[1-4] are member of grp20_[1-4] + for user in [memofuser1, memofuser2, memofuser3, memofuser4]: + assert _check_memberof(topology_st, member=user, group=memofegrp020_5) + assert _check_memberof(topology_st, member=user, group=memofegrp020_4) + assert _check_memberof(topology_st, member=user, group=memofegrp020_3) + assert _check_memberof(topology_st, member=user, group=memofegrp020_2) + assert _check_memberof(topology_st, member=user, group=memofegrp020_1) + + # assert that all groups are members of each others because Grp5 + # is member of all grp20_[1-4] + for grp in [memofegrp020_1, memofegrp020_2, memofegrp020_3, memofegrp020_4]: + for owner in [memofegrp020_1, memofegrp020_2, memofegrp020_3, memofegrp020_4]: + if grp == owner: + # no member of itself + assert not _check_memberof(topology_st, member=grp, group=owner) + else: + assert _check_memberof(topology_st, member=grp, group=owner) + for grp in [memofegrp020_1, memofegrp020_2, memofegrp020_3, memofegrp020_4]: + assert _check_memberof(topology_st, member=grp, group=memofegrp020_5) + + # assert userX is uniqueMember of grpX + assert _check_memberof(topology_st, member=memofuser1, group=memofegrp020_1) + assert _check_memberof(topology_st, member=memofuser2, group=memofegrp020_2) + assert _check_memberof(topology_st, member=memofuser3, group=memofegrp020_3) + assert _check_memberof(topology_st, member=memofuser4, group=memofegrp020_4) + assert _check_memberof(topology_st, member=memofuser4, group=memofegrp020_5) + + # assert enh1 is member of + # - grp1 (member) + # - not grp2 + # - grp3 (uniquemember) + # - not grp15 + # - grp16 (member uniquemember) + # - not grp018 + # - not grp20* + assert _check_memberof(topology_st, member=memofenh1, group=memofegrp1) + assert not _check_memberof(topology_st, member=memofenh1, group=memofegrp2) + assert _check_memberof(topology_st, member=memofenh1, group=memofegrp3) + assert not _check_memberof(topology_st, member=memofenh1, group=memofegrp015) + assert _check_memberof(topology_st, member=memofenh1, group=memofegrp016) + assert not _check_memberof(topology_st, member=memofenh1, group=memofegrp018) + assert not _check_memberof(topology_st, member=memofenh1, group=memofegrp020_1) + assert not _check_memberof(topology_st, member=memofenh1, group=memofegrp020_2) + assert not _check_memberof(topology_st, member=memofenh1, group=memofegrp020_3) + assert not _check_memberof(topology_st, member=memofenh1, group=memofegrp020_4) + assert not _check_memberof(topology_st, member=memofenh1, group=memofegrp020_5) + + # assert enh2 is member of + # - not grp1 + # - grp2 (uniquemember) + # - grp3 (member) + # - not grp15 + # - not grp16 + # - not grp018 + # - not grp20* + assert not _check_memberof(topology_st, member=memofenh2, group=memofegrp1) + assert _check_memberof(topology_st, member=memofenh2, group=memofegrp2) + assert _check_memberof(topology_st, member=memofenh2, group=memofegrp3) + assert not _check_memberof(topology_st, member=memofenh2, group=memofegrp015) + assert not _check_memberof(topology_st, member=memofenh2, group=memofegrp016) + assert not _check_memberof(topology_st, member=memofenh2, group=memofegrp018) + assert not _check_memberof(topology_st, member=memofenh2, group=memofegrp020_1) + assert not _check_memberof(topology_st, member=memofenh2, group=memofegrp020_2) + assert not _check_memberof(topology_st, member=memofenh2, group=memofegrp020_3) + assert not _check_memberof(topology_st, member=memofenh2, group=memofegrp020_4) + assert not _check_memberof(topology_st, member=memofenh2, group=memofegrp020_5) + + # check that user[1-4] is only 'uniqueMember' of the grp20_[1-4] + for x in [(memofegrp020_1, memofuser1), + (memofegrp020_2, memofuser2), + (memofegrp020_3, memofuser3), + (memofegrp020_4, memofuser4)]: + assert _check_memberattr(topology_st, x[0], 'uniqueMember', x[1]) + assert _check_memberattr(topology_st, x[0], 'member', x[1]) + + # check that grp20_[1-4] are 'uniqueMember' and 'member' of grp20_5 + # check that user1 is only 'member' of grp20_5 + for x in [memofegrp020_1, memofegrp020_2, memofegrp020_3, memofegrp020_4]: + assert _check_memberattr(topology_st, memofegrp020_5, 'member', x) + assert not _check_memberattr(topology_st, memofegrp020_5, 'uniqueMember', x) + assert _check_memberattr(topology_st, memofegrp020_5, 'member', memofuser1) + assert not _check_memberattr(topology_st, memofegrp020_5, 'uniqueMember', memofuser1) + + # DEL user1 as 'member' of grp20_1 + mods = [(ldap.MOD_DELETE, 'member', memofuser1)] + topology_st.standalone.modify_s(ensure_str(memofegrp020_1), mods) + + mods = [(ldap.MOD_DELETE, 'uniqueMember', memofegrp020_5)] + topology_st.standalone.modify_s(ensure_str(memofegrp020_1), mods) + + """ + /----member ---> G1 ---uniqueMember -------\ + / V + G5 ------------------------>member ---------- --->U1 + | + |----member ---> G2 ---member/uniqueMember -> U2 + |<--uniquemember-/ + | + |----member ---> G3 ---member/uniqueMember -> U3 + |<--uniquemember-/ + |----member ---> G4 ---member/uniqueMember -> U4 + |<--uniquemember-/ + """ + verify_post_023(topology_st, memofegrp020_1, memofegrp020_2, memofegrp020_3, memofegrp020_4, memofegrp020_5, + memofuser1, memofuser2, memofuser3, memofuser4) + + +def verify_post_024(topology_st, memofegrp020_1, memofegrp020_2, memofegrp020_3, memofegrp020_4, memofegrp020_5, + memofuser1, memofuser2, memofuser3, memofuser4): + """ + /----member ---> G1 ---member/uniqueMember -\ + / V + G5 ------------------------>member ---------- --->U1 + | + |----member ---> G2 ---member/uniqueMember -> U2 + |<--uniquemember-/ + | + |----member ---> G3 ---member/uniqueMember -> U3 + |<--uniquemember-/ + |----member ---> G4 ---member/uniqueMember -> U4 + |<--uniquemember-/ + """ + for x in [memofegrp020_1, memofegrp020_2, memofegrp020_3, memofegrp020_4]: + assert _check_memberattr(topology_st, memofegrp020_5, 'member', x) + assert not _check_memberattr(topology_st, memofegrp020_5, 'uniqueMember', x) + for x in [memofegrp020_2, memofegrp020_3, memofegrp020_4]: + assert not _check_memberattr(topology_st, x, 'member', memofegrp020_5) + assert _check_memberattr(topology_st, x, 'uniqueMember', memofegrp020_5) + # check that user[1-4] is only 'uniqueMember' of the grp20_[1-4] + for x in [(memofegrp020_1, memofuser1), + (memofegrp020_2, memofuser2), + (memofegrp020_3, memofuser3), + (memofegrp020_4, memofuser4)]: + assert _check_memberattr(topology_st, x[0], 'uniqueMember', x[1]) + assert _check_memberattr(topology_st, x[0], 'member', x[1]) + assert not _check_memberattr(topology_st, memofegrp020_1, 'uniqueMember', memofegrp020_5) + assert not _check_memberattr(topology_st, memofegrp020_1, 'member', memofegrp020_5) + + for x in [memofegrp020_1, memofegrp020_2, memofegrp020_3, memofegrp020_4, memofuser1, memofuser2, memofuser3, + memofuser4]: + assert _check_memberof(topology_st, member=x, group=memofegrp020_5) + for x in [memofegrp020_2, memofegrp020_3, memofegrp020_4]: + assert _check_memberof(topology_st, member=memofegrp020_5, group=x) + + for user in [memofuser1, memofuser2, memofuser3, memofuser4]: + assert _check_memberof(topology_st, member=user, group=memofegrp020_5) + assert _check_memberof(topology_st, member=user, group=memofegrp020_4) + assert _check_memberof(topology_st, member=user, group=memofegrp020_3) + assert _check_memberof(topology_st, member=user, group=memofegrp020_2) + for grp in [memofegrp020_1, memofegrp020_2, memofegrp020_3, memofegrp020_4, memofegrp020_5]: + assert _check_memberof(topology_st, member=memofuser1, group=grp) + + +def test_complex_group_scenario_8(topology_st): + """Check the user add operation to the complex membership topology + + :id: d222af17-17a6-48a0-8f22-a38306726a23 + :setup: Standalone instance, + + :: + + /----member ---> G1 ---uniqueMember -------\ + / V + G5 ------------------------>member ---------- --->U1 + | + |----member ---> G2 ---member/uniqueMember -> U2 + |<--uniquemember-/ + | + |----member ---> G3 ---member/uniqueMember -> U3 + |<--uniquemember-/ + |----member ---> G4 ---member/uniqueMember -> U4 + |<--uniquemember-/ + + :steps: + 1. Add user1 to grp020_1 + 2. Check the result membership + :expectedresults: + 1. Success + 2. The result should be like this + + :: + + /----member ---> G1 ---member/uniqueMember -\ + / V + G5 ------------------------>member ---------- --->U1 + | + |----member ---> G2 ---member/uniqueMember -> U2 + |<--uniquemember-/ + | + |----member ---> G3 ---member/uniqueMember -> U3 + |<--uniquemember-/ + |----member ---> G4 ---member/uniqueMember -> U4 + |<--uniquemember-/ + + """ + + memofuser1 = _get_user_dn('memofuser1') + memofuser2 = _get_user_dn('memofuser2') + memofuser3 = _get_user_dn('memofuser3') + memofuser4 = _get_user_dn('memofuser4') + + memofegrp020_1 = _get_group_dn('memofegrp020_1') + memofegrp020_2 = _get_group_dn('memofegrp020_2') + memofegrp020_3 = _get_group_dn('memofegrp020_3') + memofegrp020_4 = _get_group_dn('memofegrp020_4') + memofegrp020_5 = _get_group_dn('memofegrp020_5') + verify_post_023(topology_st, memofegrp020_1, memofegrp020_2, memofegrp020_3, memofegrp020_4, memofegrp020_5, + memofuser1, memofuser2, memofuser3, memofuser4) + + # ADD user1 as 'member' of grp20_1 + mods = [(ldap.MOD_ADD, 'member', memofuser1)] + topology_st.standalone.modify_s(ensure_str(memofegrp020_1), mods) + verify_post_024(topology_st, memofegrp020_1, memofegrp020_2, memofegrp020_3, memofegrp020_4, memofegrp020_5, + memofuser1, memofuser2, memofuser3, memofuser4) + + +def verify_post_025(topology_st, memofegrp020_1, memofegrp020_2, memofegrp020_3, memofegrp020_4, memofegrp020_5, + memofuser1, memofuser2, memofuser3, memofuser4): + """ + /----member ---> G1 + / + G5 ------------------------>member ---------- --->U1 + | + |----member ---> G2 + |----member ---> G3 + |----member ---> G4 + + """ + for x in [memofegrp020_1, memofegrp020_2, memofegrp020_3, memofegrp020_4]: + assert _check_memberattr(topology_st, memofegrp020_5, 'member', x) + assert not _check_memberattr(topology_st, memofegrp020_5, 'uniqueMember', x) + for x in [memofegrp020_1, memofegrp020_2, memofegrp020_3, memofegrp020_4]: + assert not _check_memberattr(topology_st, x, 'member', memofegrp020_5) + assert not _check_memberattr(topology_st, x, 'uniqueMember', memofegrp020_5) + # check that user[1-4] is only 'uniqueMember' of the grp20_[1-4] + for x in [(memofegrp020_1, memofuser1), + (memofegrp020_2, memofuser2), + (memofegrp020_3, memofuser3), + (memofegrp020_4, memofuser4)]: + assert not _check_memberattr(topology_st, x[0], 'uniqueMember', x[1]) + assert not _check_memberattr(topology_st, x[0], 'member', x[1]) + + for x in [memofegrp020_1, memofegrp020_2, memofegrp020_3, memofegrp020_4, memofuser1]: + assert _check_memberof(topology_st, member=x, group=memofegrp020_5) + for x in [memofuser2, memofuser3, memofuser4]: + assert not _check_memberof(topology_st, member=x, group=memofegrp020_5) + assert _check_memberof(topology_st, member=memofuser1, group=memofegrp020_5) + for user in [memofuser1, memofuser2, memofuser3, memofuser4]: + for grp in [memofegrp020_1, memofegrp020_2, memofegrp020_3, memofegrp020_4]: + assert not _check_memberof(topology_st, member=user, group=grp) + + +def test_complex_group_scenario_9(topology_st): + """Check the massive user deletion from the complex membership topology + + :id: d222af17-17a6-48a0-8f22-a38306726a24 + :setup: Standalone instance, + + :: + + /----member ---> G1 ---member/uniqueMember -\ + / V + G5 ------------------------>member ---------- --->U1 + | + |----member ---> G2 ---member/uniqueMember -> U2 + |<--uniquemember-/ + | + |----member ---> G3 ---member/uniqueMember -> U3 + |<--uniquemember-/ + |----member ---> G4 ---member/uniqueMember -> U4 + |<--uniquemember-/ + + :steps: + 1. Delete user[1-5] as 'member' and 'uniqueMember' from grp20_[1-5] + 2. Check the result membership + :expectedresults: + 1. Success + 2. The result should be like this + + :: + + /----member ---> G1 + / + G5 ------------------------>member ---------- --->U1 + | + |----member ---> G2 + |----member ---> G3 + |----member ---> G4 + + """ + + memofuser1 = _get_user_dn('memofuser1') + memofuser2 = _get_user_dn('memofuser2') + memofuser3 = _get_user_dn('memofuser3') + memofuser4 = _get_user_dn('memofuser4') + + memofegrp020_1 = _get_group_dn('memofegrp020_1') + memofegrp020_2 = _get_group_dn('memofegrp020_2') + memofegrp020_3 = _get_group_dn('memofegrp020_3') + memofegrp020_4 = _get_group_dn('memofegrp020_4') + memofegrp020_5 = _get_group_dn('memofegrp020_5') + verify_post_024(topology_st, memofegrp020_1, memofegrp020_2, memofegrp020_3, memofegrp020_4, memofegrp020_5, + memofuser1, memofuser2, memofuser3, memofuser4) + + # ADD inet + # for user in [memofuser1, memofuser2, memofuser3, memofuser4]: + # mods = [(ldap.MOD_ADD, 'objectClass', 'inetUser')] + # topology_st.standalone.modify_s(user, mods) + for x in [(memofegrp020_1, memofuser1), + (memofegrp020_2, memofuser2), + (memofegrp020_3, memofuser3), + (memofegrp020_4, memofuser4)]: + mods = [(ldap.MOD_DELETE, 'member', x[1]), + (ldap.MOD_DELETE, 'uniqueMember', x[1])] + topology_st.standalone.modify_s(ensure_str(x[0]), mods) + """ + /----member ---> G1 + / + G5 ------------------------>member ---------- --->U1 + | + |----member ---> G2 + |<--uniquemember-/ + | + |----member ---> G3 + |<--uniquemember-/ + |----member ---> G4 + |<--uniquemember-/ + """ + + for x in [memofegrp020_2, memofegrp020_3, memofegrp020_4]: + mods = [(ldap.MOD_DELETE, 'uniqueMember', memofegrp020_5)] + topology_st.standalone.modify_s(ensure_str(x), mods) + """ + /----member ---> G1 + / + G5 ------------------------>member ---------- --->U1 + | + |----member ---> G2 + |----member ---> G3 + |----member ---> G4 + + """ + + verify_post_025(topology_st, memofegrp020_1, memofegrp020_2, memofegrp020_3, memofegrp020_4, memofegrp020_5, + memofuser1, memofuser2, memofuser3, memofuser4) + +#unstable or unstatus tests, skipped for now +@pytest.mark.flaky(max_runs=2, min_passes=1) +def test_memberof_auto_add_oc(topology_st): + """Test the auto add objectclass (OC) feature. The plugin should add a predefined + objectclass that will allow memberOf to be added to an entry. + + :id: d222af17-17a6-48a0-8f22-a38306726a25 + :setup: Standalone instance + :steps: + 1. Enable dynamic plugins + 2. Enable memberOf plugin + 3. Test that the default add OC works. + 4. Add a group that already includes one user + 5. Assert memberOf on user1 + 6. Delete user1 and the group + 7. Test invalid value (config validation) + 8. Add valid objectclass + 9. Add two users + 10. Add a group that already includes one user + 11. Add a user to the group + :expectedresults: + 1. Success + 2. Success + 3. Success + 4. Success + 5. Success + 6. Success + 7. Success + 8. Success + 9. Success + 10. Success + 11. Success + """ + + # enable dynamic plugins + try: + topology_st.standalone.modify_s(DN_CONFIG, + [(ldap.MOD_REPLACE, + 'nsslapd-dynamic-plugins', + b'on')]) + except ldap.LDAPError as e: + ldap.error('Failed to enable dynamic plugins! ' + e.message['desc']) + assert False + + # Enable the plugin + topology_st.standalone.plugins.enable(name=PLUGIN_MEMBER_OF) + + # Test that the default add OC works. + + try: + topology_st.standalone.add_s(Entry((USER1_DN, + {'objectclass': 'top', + 'objectclass': 'person', + 'objectclass': 'organizationalPerson', + 'objectclass': 'inetorgperson', + 'sn': 'last', + 'cn': 'full', + 'givenname': 'user1', + 'uid': 'user1' + }))) + except ldap.LDAPError as e: + log.fatal('Failed to add user1 entry, error: ' + e.message['desc']) + assert False + + # Add a group(that already includes one user + try: + topology_st.standalone.add_s(Entry((GROUP_DN, + {'objectclass': 'top', + 'objectclass': 'groupOfNames', + 'cn': 'group', + 'member': USER1_DN + }))) + except ldap.LDAPError as e: + log.fatal('Failed to add group entry, error: ' + e.message['desc']) + assert False + + # Assert memberOf on user1 + _check_memberof(topology_st, USER1_DN, GROUP_DN) + + # Reset for the next test .... + topology_st.standalone.delete_s(USER1_DN) + topology_st.standalone.delete_s(GROUP_DN) + + # Test invalid value (config validation) + topology_st.standalone.plugins.enable(name=PLUGIN_MEMBER_OF) + try: + topology_st.standalone.modify_s(MEMBEROF_PLUGIN_DN, + [(ldap.MOD_REPLACE, + 'memberofAutoAddOC', + b'invalid123')]) + log.fatal('Incorrectly added invalid objectclass!') + assert False + except ldap.UNWILLING_TO_PERFORM: + log.info('Correctly rejected invalid objectclass') + except ldap.LDAPError as e: + ldap.error('Unexpected error adding invalid objectclass - error: ' + e.message['desc']) + assert False + + + # Add valid objectclass + topology_st.standalone.plugins.enable(name=PLUGIN_MEMBER_OF) + try: + topology_st.standalone.modify_s(MEMBEROF_PLUGIN_DN, + [(ldap.MOD_REPLACE, + 'memberofAutoAddOC', + b'inetuser')]) + except ldap.LDAPError as e: + log.fatal('Failed to configure memberOf plugin: error ' + e.message['desc']) + assert False + + # Add two users + try: + topology_st.standalone.add_s(Entry((USER1_DN, + {'objectclass': 'top', + 'objectclass': 'person', + 'objectclass': 'organizationalPerson', + 'objectclass': 'inetorgperson', + 'sn': 'last', + 'cn': 'full', + 'givenname': 'user1', + 'uid': 'user1' + }))) + except ldap.LDAPError as e: + log.fatal('Failed to add user1 entry, error: ' + e.message['desc']) + assert False + + try: + topology_st.standalone.add_s(Entry((USER2_DN, + {'objectclass': 'top', + 'objectclass': 'person', + 'objectclass': 'organizationalPerson', + 'objectclass': 'inetorgperson', + 'sn': 'last', + 'cn': 'full', + 'givenname': 'user2', + 'uid': 'user2' + }))) + except ldap.LDAPError as e: + log.fatal('Failed to add user2 entry, error: ' + e.message['desc']) + assert False + + # Add a group(that already includes one user + try: + topology_st.standalone.add_s(Entry((GROUP_DN, + {'objectclass': 'top', + 'objectclass': 'groupOfNames', + 'cn': 'group', + 'member': USER1_DN + }))) + except ldap.LDAPError as e: + log.fatal('Failed to add group entry, error: ' + e.message['desc']) + assert False + + # Add a user to the group + try: + topology_st.standalone.modify_s(GROUP_DN, + [(ldap.MOD_ADD, + 'member', + ensure_bytes(USER2_DN))]) + except ldap.LDAPError as e: + log.fatal('Failed to add user2 to group: error ' + e.message['desc']) + assert False + + log.info('Test complete.') + + +if __name__ == '__main__': + # Run isolated + # -s for DEBUG mode + CURRENT_FILE = os.path.realpath(__file__) + pytest.main("-s %s" % CURRENT_FILE) diff --git a/dirsrvtests/tests/suites/plugins/pluginpath_validation_test.py b/dirsrvtests/tests/suites/plugins/pluginpath_validation_test.py new file mode 100644 index 0000000..660ceac --- /dev/null +++ b/dirsrvtests/tests/suites/plugins/pluginpath_validation_test.py @@ -0,0 +1,111 @@ +# --- BEGIN COPYRIGHT BLOCK --- +# Copyright (C) 2016 Red Hat, Inc. +# All rights reserved. +# +# License: GPL (version 3 or any later version). +# See LICENSE for details. +# --- END COPYRIGHT BLOCK --- +# +import pytest +from lib389.tasks import * +from lib389.utils import * +from lib389.topologies import topology_st +from lib389.plugins import WhoamiPlugin + +pytestmark = pytest.mark.tier1 + +logging.getLogger(__name__).setLevel(logging.DEBUG) +log = logging.getLogger(__name__) + + +@pytest.mark.ds47384 +@pytest.mark.ds47601 +def test_pluginpath_validation(topology_st): + """Test pluginpath validation: relative and absolute paths + With the inclusion of ticket 47601 - we do allow plugin paths + outside the default location + + :id: 99f1fb2f-051d-4fd9-93d0-592dcd9b4c22 + :setup: Standalone instance + :steps: + 1. Copy the library to a temporary directory + 2. Add valid plugin paths + * using the absolute path to the current library + * using new remote location + 3. Set plugin path back to the default + 4. Check invalid path (no library present) + 5. Check invalid relative path (no library present) + + :expectedresults: + 1. This should pass + 2. This should pass + 3. This should pass + 4. This should fail + 5. This should fail + """ + + inst = topology_st.standalone + whoami = WhoamiPlugin(inst) + # /tmp nowadays comes with noexec bit set on some systems + # so instead let's write somewhere where dirsrv user has access + tmp_dir = inst.get_bak_dir() + plugin_dir = inst.get_plugin_dir() + + # Copy the library to our tmp directory + try: + shutil.copy('%s/libwhoami-plugin.so' % plugin_dir, tmp_dir) + except IOError as e: + log.fatal('Failed to copy %s/libwhoami-plugin.so to the tmp directory %s, error: %s' % ( + plugin_dir, tmp_dir, e.strerror)) + assert False + + # + # Test adding valid plugin paths + # + # Try using the absolute path to the current library + whoami.replace('nsslapd-pluginPath', '%s/libwhoami-plugin' % plugin_dir) + + # Try using new remote location + # If SELinux is enabled, plugin can't be loaded as it's not labeled properly + if selinux_present(): + import selinux + if selinux.is_selinux_enabled(): + with pytest.raises(ldap.UNWILLING_TO_PERFORM): + whoami.replace('nsslapd-pluginPath', '%s/libwhoami-plugin' % tmp_dir) + # Label it with lib_t, so it can be executed + # We can't use selinux.setfilecon() here, because py.test needs to have mac_admin capability + # Instead we can call chcon directly: + subprocess.check_call(['/usr/bin/chcon', '-t', 'lib_t', '%s/libwhoami-plugin.so' % tmp_dir]) + # And try to change the path again + whoami.replace('nsslapd-pluginPath', '%s/libwhoami-plugin' % tmp_dir) + else: + whoami.replace('nsslapd-pluginPath', '%s/libwhoami-plugin' % tmp_dir) + + # Set plugin path back to the default + whoami.replace('nsslapd-pluginPath', 'libwhoami-plugin') + + # + # Test invalid path (no library present) + # + with pytest.raises(ldap.UNWILLING_TO_PERFORM): + whoami.replace('nsslapd-pluginPath', '/bin/libwhoami-plugin') + # No exception?! This is an error + log.error('Invalid plugin path was incorrectly accepted by the server!') + + # + # Test invalid relative path (no library present) + # + with pytest.raises(ldap.UNWILLING_TO_PERFORM): + whoami.replace('nsslapd-pluginPath', '../libwhoami-plugin') + # No exception?! This is an error + log.error('Invalid plugin path was incorrectly accepted by the server!') + + log.info('Test complete') + + +if __name__ == '__main__': + # Run isolated + # -s for DEBUG mode + CURRENT_FILE = os.path.realpath(__file__) + pytest.main("-s %s" % CURRENT_FILE) + diff --git a/dirsrvtests/tests/suites/plugins/referint_test.py b/dirsrvtests/tests/suites/plugins/referint_test.py new file mode 100644 index 0000000..fda6025 --- /dev/null +++ b/dirsrvtests/tests/suites/plugins/referint_test.py @@ -0,0 +1,149 @@ +# --- BEGIN COPYRIGHT BLOCK --- +# Copyright (C) 2021 Red Hat, Inc. +# All rights reserved. +# +# License: GPL (version 3 or any later version). +# See LICENSE for details. +# --- END COPYRIGHT BLOCK --- +# +''' +Created on Dec 12, 2019 + +@author: tbordaz +''' +import logging +import pytest +from lib389 import Entry +from lib389.plugins import ReferentialIntegrityPlugin +from lib389._constants import DEFAULT_SUFFIX +from lib389.idm.user import UserAccounts +from lib389.idm.group import Groups +from lib389.topologies import topology_st as topo + +pytestmark = pytest.mark.tier1 + +log = logging.getLogger(__name__) + +ESCAPED_RDN_BASE = "foo\\,oo" +def _user_get_dn(no): + uid = '%s%d' % (ESCAPED_RDN_BASE, no) + dn = 'uid=%s,%s' % (uid, DEFAULT_SUFFIX) + return (uid, dn) + +def add_escaped_user(server, no): + (uid, dn) = _user_get_dn(no) + log.fatal('Adding user (%s): ' % dn) + users = UserAccounts(server, DEFAULT_SUFFIX, None) + user_properties = { + 'objectclass': ['top', 'person', 'organizationalPerson', 'inetOrgPerson', 'posixAccount'], + 'uid': uid, + 'cn' : uid, + 'sn' : uid, + 'uidNumber' : '1000', + 'gidNumber' : '2000', + 'homeDirectory' : '/home/testuser', + } + users.create(properties=user_properties) + return dn + +def test_referential_false_failure(topo): + """On MODRDN referential integrity can erroneously fail + + :id: f77aeb80-c4c4-471b-8c1b-4733b714778b + :setup: Standalone Instance + :steps: + 1. Configure the plugin + 2. Create a group + - 1rst member the one that will be move + - more than 128 members + - last member is a DN containing escaped char + 3. Rename the 1rst member + :expectedresults: + 1. should succeed + 2. should succeed + 3. should succeed + """ + + inst = topo[0] + + # stop the plugin, and start it + plugin = ReferentialIntegrityPlugin(inst) + plugin.disable() + plugin.enable() + + ############################################################################ + # Configure plugin + ############################################################################ + GROUP_CONTAINER = "ou=groups,%s" % DEFAULT_SUFFIX + plugin.replace('referint-membership-attr', 'member') + plugin.replace('nsslapd-plugincontainerscope', GROUP_CONTAINER) + + ############################################################################ + # Creates a group with members having escaped DN + ############################################################################ + # Add some users and a group + users = UserAccounts(inst, DEFAULT_SUFFIX, None) + user1 = users.create_test_user(uid=1001) + user2 = users.create_test_user(uid=1002) + + groups = Groups(inst, GROUP_CONTAINER, None) + group = groups.create(properties={'cn': 'group'}) + group.add('member', user2.dn) + group.add('member', user1.dn) + + # Add more than 128 members so that referint follows the buggy path + for i in range(130): + escaped_user = add_escaped_user(inst, i) + group.add('member', escaped_user) + + ############################################################################ + # Check that the MODRDN succeeds + ########################################################################### + # Here we need to restart so that member values are taken in the right order + # the last value is the escaped one + inst.restart() + + # Here if the bug is fixed, referential is able to update the member value + user1.rename('uid=new_test_user_1001', newsuperior=DEFAULT_SUFFIX, deloldrdn=False) + + +def test_invalid_referint_log(topo): + """If there is an invalid log line in the referint log, make sure the server + does not crash at startup + + :id: 34807b5a-ab17-4281-ae48-4e3513e19145 + :setup: Standalone Instance + :steps: + 1. Set the referint log delay + 2. Create invalid log + 3. Start the server (no crash) + :expectedresults: + 1. Success + 2. Success + 3. Success + """ + + inst = topo.standalone + + # Set delay - required for log parsing at server startup + plugin = ReferentialIntegrityPlugin(inst) + plugin.enable() + plugin.set_update_delay('2') + logfile = plugin.get_log_file() + inst.restart() + + # Create invalid log + inst.stop() + with open(logfile, 'w') as log_fh: + log_fh.write("CRASH\n") + + # Start the instance + inst.start() + assert inst.status() + + +if __name__ == '__main__': + # Run isolated + # -s for DEBUG mode + CURRENT_FILE = os.path.realpath(__file__) + pytest.main("-s %s" % CURRENT_FILE) diff --git a/dirsrvtests/tests/suites/plugins/rootdn_plugin_test.py b/dirsrvtests/tests/suites/plugins/rootdn_plugin_test.py new file mode 100644 index 0000000..7664625 --- /dev/null +++ b/dirsrvtests/tests/suites/plugins/rootdn_plugin_test.py @@ -0,0 +1,744 @@ +# --- BEGIN COPYRIGHT BLOCK --- +# Copyright (C) 2019 Red Hat, Inc. +# All rights reserved. +# +# License: GPL (version 3 or any later version). +# See LICENSE for details. +# --- END COPYRIGHT BLOCK --- +# +import logging +import socket +import ldap +import pytest +import uuid +import time +from lib389 import DirSrv +from lib389.utils import * +from lib389.tasks import * +from lib389.tools import DirSrvTools +from lib389.topologies import topology_st +from lib389.idm.directorymanager import DirectoryManager +from lib389.plugins import RootDNAccessControlPlugin + + +pytestmark = pytest.mark.tier1 + +logging.getLogger(__name__).setLevel(logging.DEBUG) +log = logging.getLogger(__name__) + +localhost = DirSrvTools.getLocalhost() +hostname = socket.gethostname() + + +@pytest.fixture(scope="function") +def rootdn_cleanup(topology_st): + """Do a cleanup of the config area before the test """ + log.info('Cleaning up the config area') + plugin = RootDNAccessControlPlugin(topology_st.standalone) + plugin.remove_all_allow_host() + plugin.remove_all_deny_host() + plugin.remove_all_allow_ip() + plugin.remove_all_deny_ip() + + +@pytest.fixture(scope="module") +def rootdn_setup(topology_st): + """Initialize our setup to test the Root DN Access Control Plugin + + Test the following access control type: + + - Allowed IP address * + - Denied IP address * + - Specific time window + - Days allowed access + - Allowed host * + - Denied host * + + * means multiple valued + """ + + log.info('Initializing root DN test suite...') + + # Enable dynamic plugins + topology_st.standalone.config.set('nsslapd-dynamic-plugins', 'on') + + # Enable the plugin + global plugin + plugin = RootDNAccessControlPlugin(topology_st.standalone) + plugin.enable() + + log.info('test_rootdn_init: Initialized root DN test suite.') + + +def rootdn_bind(inst, uri=None, fail=False): + """Helper function to test root DN bind + """ + newinst = DirSrv(verbose=False) + args = {SER_PORT: inst.port, + SER_SERVERID_PROP: inst.serverid} + newinst.allocate(args) + newinst.open(uri=uri, connOnly=True) # This binds as root dn + + +def test_rootdn_access_specific_time(topology_st, rootdn_setup, rootdn_cleanup, timeout=5): + """Test binding inside and outside of a specific time + + :id: a0ef30e5-538b-46fa-9762-01a4435a15e8 + :setup: Standalone instance, rootdn plugin set up + :steps: + 1. Get the current time, and bump it ahead twohours + 2. Bind as Root DN + 3. Set config to allow the entire day + 4. Bind as Root DN + 5. Cleanup + :expectedresults: + 1. Success + 2. Should fail + 3. Success + 4. Success + 5. Success + """ + + log.info('Running test_rootdn_access_specific_time...') + dm = DirectoryManager(topology_st.standalone) + + # Get the current time, and bump it ahead twohours + current_hour = time.strftime("%H") + if int(current_hour) > 12: + open_time = '0200' + close_time = '0400' + else: + open_time = '1600' + close_time = '1800' + + assert plugin.replace_many(('rootdn-open-time', open_time), + ('rootdn-close-time', close_time)) + + attr_updated = 0 + for i in range(0, timeout): + if (plugin.get_attr_val_utf8('rootdn-open-time') == open_time) and (plugin.get_attr_val_utf8('rootdn-close-time') == close_time): + attr_updated = 1 + break + else: + time.sleep(.5) + + if not attr_updated : + raise Exception ("rootdn-open-time and rootdn-close-time were not updated") + + # Bind as Root DN - should fail + for i in range(0, timeout): + try: + dm.bind() + except ldap.UNWILLING_TO_PERFORM: + break + else: + time.sleep(.5) + + + # Set config to allow the entire day + open_time = '0000' + close_time = '2359' + assert plugin.replace_many(('rootdn-open-time', open_time), + ('rootdn-close-time', close_time)) + + attr_updated = 0 + for i in range(0, timeout): + if (plugin.get_attr_val_utf8('rootdn-open-time') == open_time) and (plugin.get_attr_val_utf8('rootdn-close-time') == close_time): + attr_updated = 1 + break + else: + time.sleep(.5) + + if not attr_updated : + raise Exception ("rootdn-open-time and rootdn-close-time were not updated") + + # Bind as Root DN - should succeed + for i in range(0, timeout): + try: + dm.bind() + break + except: + time.sleep(.5) + + # Cleanup - undo the changes we made so the next test has a clean slate + assert plugin.apply_mods([(ldap.MOD_DELETE, 'rootdn-open-time'), + (ldap.MOD_DELETE, 'rootdn-close-time')]) + + +def test_rootdn_access_day_of_week(topology_st, rootdn_setup, rootdn_cleanup, timeout=5): + """Test the days of week feature + + :id: a0ef30e5-538b-46fa-9762-01a4435a15e1 + :setup: Standalone instance, rootdn plugin set up + :steps: + 1. Set the deny days + 2. Bind as Root DN + 3. Set the allow days + 4. Bind as Root DN + :expectedresults: + 1. Success + 2. Should fail + 3. Success + 4. Success + """ + + log.info('Running test_rootdn_access_day_of_week...') + dm = DirectoryManager(topology_st.standalone) + + days = ('Sun', 'Mon', 'Tue', 'Wed', 'Thu', 'Fri', 'Sat') + day = int(time.strftime("%w", time.gmtime())) + + if day == 6: + # Handle the roll over from Saturday into Sunday + deny_days = days[1] + ', ' + days[2] + allow_days = days[6] + ',' + days[0] + elif day > 3: + deny_days = days[0] + ', ' + days[1] + allow_days = days[day] + ',' + days[day - 1] + else: + deny_days = days[4] + ',' + days[5] + allow_days = days[day] + ',' + days[day + 1] + + log.info('Today: ' + days[day]) + log.info('Allowed days: ' + allow_days) + log.info('Deny days: ' + deny_days) + + # Set the deny days + plugin.set_days_allowed(deny_days) + + attr_updated = 0 + for i in range(0, timeout): + if (str(plugin.get_days_allowed()) == deny_days): + attr_updated = 1 + break + else: + time.sleep(.5) + + if not attr_updated : + raise Exception ("rootdn-days-allowed was not updated") + + # Bind as Root DN - should fail + for i in range(0, timeout): + try: + dm.bind() + except ldap.UNWILLING_TO_PERFORM: + break + else: + time.sleep(.5) + + # Set the allow days + plugin.set_days_allowed(allow_days) + + attr_updated = 0 + for i in range(0, timeout): + if (str(plugin.get_days_allowed()) == allow_days): + attr_updated = 1 + break + else: + time.sleep(.5) + + if not attr_updated : + raise Exception ("rootdn-days-allowed was not updated") + + # Bind as Root DN - should succeed + for i in range(0, timeout): + try: + dm.bind() + break + except: + time.sleep(.5) + +def test_rootdn_access_denied_ip(topology_st, rootdn_setup, rootdn_cleanup, timeout=5): + """Test denied IP feature - we can just test denying 127.0.0.1 + + :id: a0ef30e5-538b-46fa-9762-01a4435a15e2 + :setup: Standalone instance, rootdn plugin set up + :steps: + 1. Set rootdn-deny-ip to '127.0.0.1' and '::1' + 2. Bind as Root DN + 3. Change the denied IP so root DN succeeds + 4. Bind as Root DN + :expectedresults: + 1. Success + 2. Should fail + 3. Success + 4. Success + """ + + log.info('Running test_rootdn_access_denied_ip...') + plugin.add_deny_ip('127.0.0.1') + plugin.add_deny_ip('::1') + + attr_updated = 0 + for i in range(0, timeout): + if ('127.0.0.1' in str(plugin.get_deny_ip())): + attr_updated = 1 + break + else: + time.sleep(.5) + + if not attr_updated : + raise Exception ("rootdn-deny-ip was not updated") + + # Bind as Root DN - should fail + uri = 'ldap://{}:{}'.format('127.0.0.1', topology_st.standalone.port) + for i in range(0, timeout): + try: + rootdn_bind(topology_st.standalone, uri=uri) + except ldap.UNWILLING_TO_PERFORM: + break + else: + time.sleep(.5) + + # Change the denied IP so root DN succeeds + plugin.apply_mods([(ldap.MOD_REPLACE, 'rootdn-deny-ip', '255.255.255.255')]) + + attr_updated = 0 + for i in range(0, timeout): + if ('255.255.255.255' in str(plugin.get_deny_ip())): + attr_updated = 1 + break + else: + time.sleep(.5) + + if not attr_updated : + raise Exception ("rootdn-deny-ip was not updated") + + # Bind as Root DN - should succeed + for i in range(0, timeout): + try: + rootdn_bind(topology_st.standalone, uri=uri) + break + except: + time.sleep(.5) + + +def test_rootdn_access_denied_host(topology_st, rootdn_setup, rootdn_cleanup, timeout=5): + """Test denied Host feature - we can just test denying localhost + + :id: a0ef30e5-538b-46fa-9762-01a4435a15e3 + :setup: Standalone instance, rootdn plugin set up + :steps: + 1. Set rootdn-deny-host to hostname (localhost if not accessable) + 2. Bind as Root DN + 3. Change the denied host so root DN succeeds + 4. Bind as Root DN + :expectedresults: + 1. Success + 2. Should fail + 3. Success + 4. Success + """ + + log.info('Running test_rootdn_access_denied_host...') + hostname = socket.gethostname() + plugin.add_deny_host(hostname) + if localhost != hostname: + plugin.add_deny_host(localhost) + + attr_updated = 0 + for i in range(0, timeout): + if (str(plugin.get_deny_host()) == hostname) or (str(plugin.get_deny_host()) == localhost): + attr_updated = 1 + break + else: + time.sleep(.5) + + if not attr_updated : + raise Exception ("rootdn-deny-host was not updated") + + # Bind as Root DN - should fail + uri = 'ldap://{}:{}'.format(localhost, topology_st.standalone.port) + for i in range(0, timeout): + try: + rootdn_bind(topology_st.standalone, uri=uri) + except ldap.UNWILLING_TO_PERFORM: + break + else: + time.sleep(.5) + + # Change the denied host so root DN bind succeeds + rand_host = 'i.dont.exist.{}'.format(uuid.uuid4()) + plugin.apply_mods([(ldap.MOD_REPLACE, 'rootdn-deny-host', rand_host)]) + + attr_updated = 0 + for i in range(0, timeout): + if (plugin.get_deny_host() == rand_host): + attr_updated = 1 + break + else: + time.sleep(.5) + + if not attr_updated : + raise Exception ("rootdn-deny-host was not updated") + + # Bind as Root DN - should succeed + for i in range(0, timeout): + try: + rootdn_bind(topology_st.standalone, uri=uri) + break + except: + time.sleep(.5) + +def test_rootdn_access_allowed_ip(topology_st, rootdn_setup, rootdn_cleanup, timeout=5): + """Test allowed ip feature + + :id: a0ef30e5-538b-46fa-9762-01a4435a15e4 + :setup: Standalone instance, rootdn plugin set up + :steps: + 1. Set allowed ip to 255.255.255.255 - blocks the Root DN + 2. Bind as Root DN + 3. Allow localhost + 4. Bind as Root DN + :expectedresults: + 1. Success + 2. Should fail + 3. Success + 4. Success + """ + + log.info('Running test_rootdn_access_allowed_ip...') + + # Set allowed ip to 255.255.255.255 - blocks the Root DN + plugin.add_allow_ip('255.255.255.255') + + attr_updated = 0 + for i in range(0, timeout): + if ('255.255.255.255' in plugin.get_allow_ip()): + attr_updated = 1 + break + else: + time.sleep(.5) + + if not attr_updated : + raise Exception ("rootdn-allow-ip was not updated") + + # Bind as Root DN - should fail + uri = 'ldap://{}:{}'.format(localhost, topology_st.standalone.port) + for i in range(0, timeout): + try: + rootdn_bind(topology_st.standalone, uri=uri) + except ldap.UNWILLING_TO_PERFORM: + break + else: + time.sleep(.5) + + # Allow localhost + plugin.add_allow_ip('127.0.0.1') + plugin.add_allow_ip('::1') + + attr_updated = 0 + for i in range(0, timeout): + if ('127.0.0.1' in plugin.get_allow_ip()): + attr_updated = 1 + break + else: + time.sleep(.5) + + if not attr_updated : + raise Exception ("rootdn-allow-ip was not updated") + + # Bind as Root DN - should succeed + for i in range(0, timeout): + try: + rootdn_bind(topology_st.standalone, uri=uri) + break + except: + time.sleep(.5) + +def test_rootdn_access_allowed_host(topology_st, rootdn_setup, rootdn_cleanup, timeout=5): + """Test allowed host feature + + :id: a0ef30e5-538b-46fa-9762-01a4435a15e5 + :setup: Standalone instance, rootdn plugin set up + :steps: + 1. Set allowed host to an unknown host - blocks the Root DN + 2. Bind as Root DN + 3. Allow localhost + 4. Bind as Root DN + :expectedresults: + 1. Success + 2. Should fail + 3. Success + 4. Success + """ + + log.info('Running test_rootdn_access_allowed_host...') + + # Set allowed host to an unknown host - blocks the Root DN + rand_host = 'i.dont.exist.{}'.format(uuid.uuid4()) + plugin.add_allow_host(rand_host) + + attr_updated = 0 + for i in range(0, timeout): + if (str(plugin.get_allow_host()) == rand_host): + attr_updated = 1 + break + else: + time.sleep(.5) + + if not attr_updated : + raise Exception ("rootdn-allow-host was not updated") + + # Bind as Root DN - should fail + uri = 'ldap://{}:{}'.format(localhost, topology_st.standalone.port) + for i in range(0, timeout): + try: + rootdn_bind(topology_st.standalone, uri=uri) + except ldap.UNWILLING_TO_PERFORM: + break + else: + time.sleep(.5) + + # Allow localhost + plugin.remove_all_allow_host() + plugin.add_allow_host(localhost) + if hostname != localhost: + plugin.add_allow_host(hostname) + + attr_updated = 0 + for i in range(0, timeout): + if (str(plugin.get_allow_host()) == hostname) or (str(plugin.get_allow_host()) == localhost): + attr_updated = 1 + break + else: + time.sleep(.5) + + if not attr_updated : + raise Exception ("rootdn-allow-host was not updated") + + # Bind as Root DN - should succeed + for i in range(0, timeout): + try: + rootdn_bind(topology_st.standalone, uri=uri) + break + except: + time.sleep(.5) + +def test_rootdn_config_validate(topology_st, rootdn_setup, rootdn_cleanup): + """Test plugin configuration validation + + :id: a0ef30e5-538b-46fa-9762-01a4435a15e6 + :setup: Standalone instance, rootdn plugin set up + :steps: + 1. Replace 'rootdn-open-time' with '0000' + 2. Add 'rootdn-open-time': '0000' and 'rootdn-open-time': '0001' + 3. Replace 'rootdn-open-time' with '-1' and 'rootdn-close-time' with '0000' + 4. Replace 'rootdn-open-time' with '2400' and 'rootdn-close-time' with '0000' + 5. Replace 'rootdn-open-time' with 'aaaaa' and 'rootdn-close-time' with '0000' + 6. Replace 'rootdn-close-time' with '0000' + 7. Add 'rootdn-close-time': '0000' and 'rootdn-close-time': '0001' + 8. Replace 'rootdn-open-time' with '0000' and 'rootdn-close-time' with '-1' + 9. Replace 'rootdn-open-time' with '0000' and 'rootdn-close-time' with '2400' + 10. Replace 'rootdn-open-time' with '0000' and 'rootdn-close-time' with 'aaaaa' + 11. Add 'rootdn-days-allowed': 'Mon' and 'rootdn-days-allowed': 'Tue' + 12. Replace 'rootdn-days-allowed' with 'Mon1' + 13. Replace 'rootdn-days-allowed' with 'Tue, Mon1' + 14. Replace 'rootdn-days-allowed' with 'm111m' + 15. Replace 'rootdn-days-allowed' with 'Gur' + 16. Replace 'rootdn-allow-ip' with '12.12.Z.12' + 17. Replace 'rootdn-allow-ip' with '123.234.345.456' + 18. Replace 'rootdn-allow-ip' with ':::' + 19. Replace 'rootdn-deny-ip' with '12.12.Z.12' + 20. Replace 'rootdn-deny-ip' with '123.234.345.456' + 21. Replace 'rootdn-deny-ip' with ':::' + 22. Replace 'rootdn-allow-host' with 'host._.com' + 23. Replace 'rootdn-deny-host' with 'host.####.com' + :expectedresults: + 1. Should fail + 2. Should fail + 3. Should fail + 4. Should fail + 5. Should fail + 6. Should fail + 7. Should fail + 8. Should fail + 9. Should fail + 10. Should fail + 11. Should fail + 12. Should fail + 13. Should fail + 14. Should fail + 15. Should fail + 16. Should fail + 17. Should fail + 18. Should fail + 19. Should fail + 20. Should fail + 21. Should fail + 22. Should fail + 23. Should fail + """ + + # Test invalid values for all settings + with pytest.raises(ldap.UNWILLING_TO_PERFORM): + log.info('Add just "rootdn-open-time"') + plugin.apply_mods([(ldap.MOD_REPLACE, 'rootdn-open-time', '0000')]) + + log.info('Add multiple "rootdn-open-time"') + plugin.apply_mods([(ldap.MOD_ADD, 'rootdn-open-time', '0000'), + (ldap.MOD_ADD, 'rootdn-open-time', '0001')]) + + log.info('Add invalid "rootdn-open-time" -1 ') + plugin.apply_mods([(ldap.MOD_REPLACE, 'rootdn-open-time', '-1'), + (ldap.MOD_REPLACE, 'rootdn-close-time', '0000')]) + + log.info('Add invalid "rootdn-open-time" 2400') + plugin.apply_mods([(ldap.MOD_REPLACE, 'rootdn-open-time', '2400'), + (ldap.MOD_REPLACE, 'rootdn-close-time', '0000')]) + + log.info('Add invalid "rootdn-open-time" aaaaa') + plugin.apply_mods([(ldap.MOD_REPLACE, 'rootdn-open-time','aaaaa'), + (ldap.MOD_REPLACE, 'rootdn-close-time', '0000')]) + + # Test rootdn-close-time + log.info('Add just "rootdn-close-time"') + plugin.apply_mods([(ldap.MOD_REPLACE, 'rootdn-close-time', '0000')]) + + log.info('Add multiple "rootdn-close-time"') + plugin.apply_mods([(ldap.MOD_ADD, 'rootdn-close-time', '0000'), + (ldap.MOD_ADD, 'rootdn-close-time', '0001')]) + + log.info('Add invalid "rootdn-close-time" -1 ') + plugin.apply_mods([(ldap.MOD_REPLACE, 'rootdn-open-time', '0000'), + (ldap.MOD_REPLACE, 'rootdn-close-time', '-1')]) + + log.info('Add invalid "rootdn-close-time" 2400') + plugin.apply_mods([(ldap.MOD_REPLACE, 'rootdn-open-time', '0000'), + (ldap.MOD_REPLACE, 'rootdn-close-time', '2400')]) + + log.info('Add invalid "rootdn-open-time" aaaaa') + plugin.apply_mods([(ldap.MOD_REPLACE, 'rootdn-open-time','0000'), + (ldap.MOD_REPLACE, 'rootdn-close-time','aaaaa')]) + + # Test days allowed + log.info('Add multiple "rootdn-days-allowed"') + plugin.apply_mods([(ldap.MOD_ADD, 'rootdn-days-allowed', 'Mon'), + (ldap.MOD_ADD, 'rootdn-days-allowed', 'Tue')]) + + log.info('Add invalid "rootdn-days-allowed"') + plugin.apply_mods([(ldap.MOD_REPLACE, 'rootdn-days-allowed', 'Mon1')]) + plugin.apply_mods([(ldap.MOD_REPLACE, 'rootdn-days-allowed', 'Tue, Mon1')]) + plugin.apply_mods([(ldap.MOD_REPLACE, 'rootdn-days-allowed', 'm111m')]) + plugin.apply_mods([(ldap.MOD_REPLACE, 'rootdn-days-allowed', 'Gur')]) + + # Test allow ips + log.info('Add invalid "rootdn-allow-ip"') + plugin.apply_mods([(ldap.MOD_REPLACE, 'rootdn-allow-ip', '12.12.Z.12')]) + plugin.apply_mods([(ldap.MOD_REPLACE, 'rootdn-allow-ip', '123.234.345.456')]) + plugin.apply_mods([(ldap.MOD_REPLACE, 'rootdn-allow-ip', ':::')]) + + # Test deny ips + log.info('Add invalid "rootdn-deny-ip"') + plugin.apply_mods([(ldap.MOD_REPLACE, 'rootdn-deny-ip', '12.12.Z.12')]) + plugin.apply_mods([(ldap.MOD_REPLACE, 'rootdn-deny-ip', '123.234.345.456')]) + plugin.apply_mods([(ldap.MOD_REPLACE, 'rootdn-deny-ip', ':::')]) + + # Test allow hosts + log.info('Add invalid "rootdn-allow-host"') + plugin.apply_mods([(ldap.MOD_REPLACE, 'rootdn-allow-host', 'host._.com')]) + + # Test deny hosts + log.info('Add invalid "rootdn-deny-host"') + plugin.apply_mods([(ldap.MOD_REPLACE, 'rootdn-deny-host', 'host.####.com')]) + + +@pytest.mark.ds50800 +@pytest.mark.bz1807537 +@pytest.mark.xfail(ds_is_older('1.3.11', '1.4.3.5'), reason="May fail because of bz1807537") +def test_rootdn_access_denied_ip_wildcard(topology_st, rootdn_setup, rootdn_cleanup, timeout=5): + """Test denied IP feature with a wildcard + + :id: 73c74f62-9ac2-4bb6-8a63-bacc8d8bbf93 + :setup: Standalone instance, rootdn plugin set up + :steps: + 1. Set rootdn-deny-ip to '127.*' + 2. Bind as Root DN + 3. Change the denied IP so root DN succeeds + 4. Bind as Root DN + :expectedresults: + 1. Success + 2. Should fail + 3. Success + 4. Success + """ + + log.info('Running test_rootdn_access_denied_ip_wildcard...') + + plugin.add_deny_ip('127.*') + + # Bind as Root DN - should fail + uri = 'ldap://{}:{}'.format('127.0.0.1', topology_st.standalone.port) + for i in range(0, timeout): + try: + rootdn_bind(topology_st.standalone, uri=uri) + except ldap.UNWILLING_TO_PERFORM: + break + else: + time.sleep(.5) + + # Change the denied IP so root DN succeeds + plugin.apply_mods([(ldap.MOD_REPLACE, 'rootdn-deny-ip', '255.255.255.255')]) + + # Bind as Root DN - should succeed + for i in range(0, timeout): + try: + rootdn_bind(topology_st.standalone, uri=uri) + break + except: + time.sleep(.5) + + +@pytest.mark.ds50800 +@pytest.mark.bz1807537 +@pytest.mark.xfail(ds_is_older('1.3.11', '1.4.3.5'), reason="May fail because of bz1807537") +def test_rootdn_access_allowed_ip_wildcard(topology_st, rootdn_setup, rootdn_cleanup, timeout=5): + """Test allowed ip feature + + :id: c3e22c61-9ed2-4e89-8243-6ff686ecad9b + :setup: Standalone instance, rootdn plugin set up + :steps: + 1. Set allowed ip to 255.255.255.255 - blocks the Root DN + 2. Bind as Root DN + 3. Allow 127.* + 4. Bind as Root DN + :expectedresults: + 1. Success + 2. Should fail + 3. Success + 4. Success + """ + + log.info('Running test_rootdn_access_allowed_ip...') + + # Set allowed ip to 255.255.255.255 - blocks the Root DN + plugin.add_allow_ip('255.255.255.255') + time.sleep(.5) + + # Bind as Root DN - should fail + uri = 'ldap://{}:{}'.format('127.0.0.1', topology_st.standalone.port) + for i in range(0, timeout): + try: + rootdn_bind(topology_st.standalone, uri=uri) + except ldap.UNWILLING_TO_PERFORM: + break + else: + time.sleep(.5) + + # Allow localhost + plugin.add_allow_ip('127.*') + + # Bind as Root DN - should succeed + for i in range(0, timeout): + try: + rootdn_bind(topology_st.standalone, uri=uri) + break + except: + time.sleep(.5) + + +if __name__ == '__main__': + # Run isolated + # -s for DEBUG mode + CURRENT_FILE = os.path.realpath(__file__) + pytest.main("-s %s" % CURRENT_FILE) diff --git a/dirsrvtests/tests/suites/psearch/__init__.py b/dirsrvtests/tests/suites/psearch/__init__.py new file mode 100644 index 0000000..a928609 --- /dev/null +++ b/dirsrvtests/tests/suites/psearch/__init__.py @@ -0,0 +1,3 @@ +""" + :Requirement: 389-ds-base: Persistent Search control +""" diff --git a/dirsrvtests/tests/suites/psearch/psearch_test.py b/dirsrvtests/tests/suites/psearch/psearch_test.py new file mode 100644 index 0000000..8fe8ff7 --- /dev/null +++ b/dirsrvtests/tests/suites/psearch/psearch_test.py @@ -0,0 +1,75 @@ +# --- BEGIN COPYRIGHT BLOCK --- +# Copyright (C) 2021 Red Hat, Inc. +# All rights reserved. +# +# License: GPL (version 3 or any later version). +# See LICENSE for details. +# --- END COPYRIGHT BLOCK --- +# +import ldap +import os +import pytest +from lib389._constants import DEFAULT_SUFFIX +from lib389.topologies import topology_st +from lib389.idm.group import Groups +from ldap.controls.psearch import PersistentSearchControl,EntryChangeNotificationControl + +pytestmark = pytest.mark.tier1 + +def _run_psearch(inst, msg_id): + """Run a search with EntryChangeNotificationControl""" + + results = [] + while True: + try: + _, data, _, _, _, _ = inst.result4(msgid=msg_id, all=0, timeout=1.0, add_ctrls=1, add_intermediates=1, + resp_ctrl_classes={EntryChangeNotificationControl.controlType:EntryChangeNotificationControl}) + # See if there are any entry changes + for dn, entry, srv_ctrls in data: + ecn_ctrls = filter(lambda c: c.controlType == EntryChangeNotificationControl.controlType, srv_ctrls) + if ecn_ctrls: + inst.log.info('%s has changed!' % dn) + results.append(dn) + except ldap.TIMEOUT: + # There are no more results, so we timeout. + inst.log.info('No more results') + return results + + +def test_psearch(topology_st): + """Check basic Persistent Search control functionality + + :id: 4b395ef4-c3ff-49d1-a680-b9fdffa633bd + :setup: Standalone instance + :steps: + 1. Run an extended search with a Persistent Search control + 2. Create a new group (could be any entry) + 3. Run an extended search with a Persistent Search control again + 4. Check that entry DN is in the result + :expectedresults: + 1. Operation should be successful + 2. Group should be successfully created + 3. Operation should be successful + 4. Entry DN should be in the result + """ + + # Create the search control + psc = PersistentSearchControl() + # do a search extended with the control + msg_id = topology_st.standalone.search_ext(base=DEFAULT_SUFFIX, scope=ldap.SCOPE_SUBTREE, attrlist=['*'], serverctrls=[psc]) + # Get the result for the message id with result4 + _run_psearch(topology_st.standalone, msg_id) + # Change an entry / add one + groups = Groups(topology_st.standalone, DEFAULT_SUFFIX) + group = groups.create(properties={'cn': 'group1', 'description': 'testgroup'}) + # Now run the result again and see what's there. + results = _run_psearch(topology_st.standalone, msg_id) + # assert our group is in the changeset. + assert(group.dn == results[0]) + + +if __name__ == '__main__': + # Run isolated + # -s for DEBUG mode + CURRENT_FILE = os.path.realpath(__file__) + pytest.main("-s %s" % CURRENT_FILE) diff --git a/dirsrvtests/tests/suites/pwp_storage/__init__.py b/dirsrvtests/tests/suites/pwp_storage/__init__.py new file mode 100644 index 0000000..314d179 --- /dev/null +++ b/dirsrvtests/tests/suites/pwp_storage/__init__.py @@ -0,0 +1,3 @@ +""" + :Requirement: 389-ds-base: Password Storage Scheme +""" diff --git a/dirsrvtests/tests/suites/pwp_storage/storage_test.py b/dirsrvtests/tests/suites/pwp_storage/storage_test.py new file mode 100644 index 0000000..ed0dd9e --- /dev/null +++ b/dirsrvtests/tests/suites/pwp_storage/storage_test.py @@ -0,0 +1,165 @@ +# --- BEGIN COPYRIGHT BLOCK --- +# Copyright (C) 2019 Red Hat, Inc. +# All rights reserved. +# +# License: GPL (version 3 or any later version). +# See LICENSE for details. +# --- END COPYRIGHT BLOCK ---- + + +""" +This file contains the test for password storage scheme +""" + +import os +import subprocess +import shutil +import pytest + +from lib389.topologies import topology_st as topo +from lib389.idm.user import UserAccounts, UserAccount +from lib389._constants import DEFAULT_SUFFIX +from lib389.config import Config +from lib389.password_plugins import PBKDF2Plugin, SSHA512Plugin +from lib389.utils import ds_is_older + +pytestmark = pytest.mark.tier1 + + +def user_config(topo, field_value): + """ + Will set storage schema and create user. + """ + Config(topo.standalone).replace("passwordStorageScheme", field_value) + user = UserAccounts(topo.standalone, DEFAULT_SUFFIX).create_test_user() + user.set('userpassword', 'ItsMeAnuj') + return user + + +LIST_FOR_PARAMETERIZATION = ["CRYPT", "SHA", "SSHA", "SHA256", "SSHA256", + "SHA384", "SSHA384", "SHA512", "SSHA512", "MD5", "PBKDF2_SHA256"] + + +@pytest.mark.parametrize("value", LIST_FOR_PARAMETERIZATION, ids=LIST_FOR_PARAMETERIZATION) +def test_check_password_scheme(topo, value): + """Check all password scheme. + + :id: 196bccfc-33a6-11ea-a2a5-8c16451d917b + :parametrized: yes + :setup: Standalone + :steps: + 1. Change password scheme and create user with password. + 2. check password scheme is set . + 3. Delete user + :expectedresults: + 1. Pass + 2. Pass + 3. Pass + """ + user = user_config(topo, value) + assert '{' + f'{value.lower()}' + '}' in \ + UserAccount(topo.standalone, user.dn).get_attr_val_utf8('userpassword').lower() + user.delete() + + +def test_clear_scheme(topo): + """Check clear password scheme. + + :id: 2420aadc-33a6-11ea-b59a-8c16451d917b + :setup: Standalone + :steps: + 1. Change password scheme and create user with password. + 2. check password scheme is set . + 3. Delete user + :expectedresults: + 1. Pass + 2. Pass + 3. Pass + """ + user = user_config(topo, "CLEAR") + assert "ItsMeAnuj" in UserAccount(topo.standalone, user.dn).get_attr_val_utf8('userpassword') + user.delete() + + +def test_check_two_scheme(topo): + """Check password scheme SHA and CRYPT + + :id: 2b677f1e-33a6-11ea-a371-8c16451d917b + :setup: Standalone + :steps: + 1. Change password scheme and create user with password. + 2. check password scheme is set . + 3. Delete user + :expectedresults: + 1. Pass + 2. Pass + 3. Pass + """ + for schema, value in [("nsslapd-rootpwstoragescheme", "SHA"), + ("passwordStorageScheme", "CRYPT")]: + Config(topo.standalone).replace(schema, value) + topo.standalone.restart() + user = UserAccounts(topo.standalone, DEFAULT_SUFFIX).create_test_user() + user.set('userpassword', 'ItsMeAnuj') + assert '{' + f'{"CRYPT".lower()}' + '}' \ + in UserAccount(topo.standalone, user.dn).get_attr_val_utf8('userpassword').lower() + user.delete() + +@pytest.mark.skipif(ds_is_older('1.4'), reason="Not implemented") +def test_check_pbkdf2_sha256(topo): + """Check password scheme PBKDF2_SHA256. + + :id: 31612e7e-33a6-11ea-a750-8c16451d917b + :setup: Standalone + :steps: + 1. Try to delete PBKDF2_SHA256. + 2. Should not deleted PBKDF2_SHA256 and server should up. + :expectedresults: + 1. Pass + 2. Pass + """ + value = 'PBKDF2_SHA256' + user = user_config(topo, value) + assert '{' + f'{value.lower()}' + '}' in \ + UserAccount(topo.standalone, user.dn).get_attr_val_utf8('userpassword').lower() + plg = PBKDF2Plugin(topo.standalone) + plg._protected = False + plg.delete() + topo.standalone.restart() + assert Config(topo.standalone).get_attr_val_utf8('passwordStorageScheme') == 'PBKDF2_SHA256' + assert topo.standalone.status() + user.delete() + + +def test_check_ssha512(topo): + """Check password scheme SSHA512. + + :id: 9db023d2-33a1-11ea-b68c-8c16451d917b + :setup: Standalone + :steps: + 1. Try to delete SSHA512Plugin. + 2. Should deleted SSHA512Plugin and server should not up. + 3. Restore dse file to recover + :expectedresults: + 1. Pass + 2. Pass + 3. Pass + """ + value = 'SSHA512' + config_dir = topo.standalone.get_config_dir() + user = user_config(topo, value) + assert '{' + f'{value.lower()}' + '}' in \ + UserAccount(topo.standalone, user.dn).get_attr_val_utf8('userpassword').lower() + plg = SSHA512Plugin(topo.standalone) + plg._protected = False + plg.delete() + with pytest.raises(subprocess.CalledProcessError): + topo.standalone.restart() + shutil.copy(config_dir + '/dse.ldif.startOK', config_dir + '/dse.ldif') + topo.standalone.restart() + user.delete() + + +if __name__ == "__main__": + CURRENT_FILE = os.path.realpath(__file__) + pytest.main("-s -v %s" % CURRENT_FILE) diff --git a/dirsrvtests/tests/suites/referint_plugin/__init__.py b/dirsrvtests/tests/suites/referint_plugin/__init__.py new file mode 100644 index 0000000..00e6e90 --- /dev/null +++ b/dirsrvtests/tests/suites/referint_plugin/__init__.py @@ -0,0 +1,3 @@ +""" + :Requirement: 389-ds-base: Referential Integrity Plugin +""" diff --git a/dirsrvtests/tests/suites/referint_plugin/rename_test.py b/dirsrvtests/tests/suites/referint_plugin/rename_test.py new file mode 100644 index 0000000..0a5f7fb --- /dev/null +++ b/dirsrvtests/tests/suites/referint_plugin/rename_test.py @@ -0,0 +1,182 @@ +# --- BEGIN COPYRIGHT BLOCK --- +# Copyright (C) 2022 William Brown +# All rights reserved. +# +# License: GPL (version 3 or any later version). +# See LICENSE for details. +# --- END COPYRIGHT BLOCK --- +# + +import pytest +import time +from lib389._constants import DEFAULT_SUFFIX +from lib389.topologies import topology_m2 + +from lib389.replica import ReplicationManager +from lib389.idm.group import Groups +from lib389.idm.user import nsUserAccounts +from lib389.idm.organizationalunit import OrganizationalUnit as OrganisationalUnit + +from lib389.plugins import AutoMembershipPlugin, ReferentialIntegrityPlugin, AutoMembershipDefinitions, MemberOfPlugin + +pytestmark = pytest.mark.tier2 + +UCOUNT = 400 + +def _enable_plugins(inst, group_dn): + # Enable automember + amp = AutoMembershipPlugin(inst) + amp.enable() + + # Create the automember definition + automembers = AutoMembershipDefinitions(inst) + + automember = automembers.create(properties={ + 'cn': 'testgroup_definition', + 'autoMemberScope': DEFAULT_SUFFIX, + 'autoMemberFilter': 'objectclass=nsAccount', + 'autoMemberDefaultGroup': group_dn, + 'autoMemberGroupingAttr': 'member:dn', + }) + + # Enable MemberOf + mop = MemberOfPlugin(inst) + mop.enable() + + # Enable referint + rip = ReferentialIntegrityPlugin(inst) + # We only need to enable the plugin, the default configuration is sane and + # correctly coveres member as an enforced attribute. + rip.enable() + + # Restart to make sure it's enabled and good to go. + inst.restart() + +def test_rename_large_subtree(topology_m2): + """ + A report stated that the following configuration would lead + to an operation failure: + + ou=int,ou=account,dc=... + ou=s1,ou=int,ou=account,dc=... + ou=s2,ou=int,ou=account,dc=... + + rename ou=s1 to re-parent to ou=account, leaving: + + ou=int,ou=account,dc=... + ou=s1,ou=account,dc=... + ou=s2,ou=account,dc=... + + The ou=s1 if it has < 100 entries below, is able to be reparented. + + If ou=s1 has > 400 entries, it fails. + + Other conditions was the presence of referential integrity - so one would + assume that all users under s1 are a member of some group external to this. + + :id: 5915c38d-b3c2-4b7c-af76-8a1e002e27f7 + + :setup: standalone instance + + :steps: 1. Enable automember plugin + 2. Add UCOUNT users, and ensure they are members of a group. + 3. Enable refer-int plugin + 4. Move ou=s1 to a new parent + + :expectedresults: + 1. The plugin is enabled + 2. The users are members of the group + 3. The plugin is enabled + 4. The rename operation of ou=s1 succeeds + """ + + st = topology_m2.ms["supplier1"] + m2 = topology_m2.ms["supplier2"] + + # Create a default group + gps = Groups(st, DEFAULT_SUFFIX) + # Keep the group so we can get it's DN out. + group = gps.create(properties={ + 'cn': 'default_group' + }) + + _enable_plugins(st, group.dn) + _enable_plugins(m2, group.dn) + + # Now unlike normal, we bypass the plural-create method, because we need control + # over the exact DN of the OU to create. + # Create the ou=account + + # We don't need to set a DN here because ... + ou_account = OrganisationalUnit(st) + + # It's set in the .create step. + ou_account.create( + basedn = DEFAULT_SUFFIX, + properties={ + 'ou': 'account' + }) + # create the ou=int,ou=account + ou_int = OrganisationalUnit(st) + ou_int.create( + basedn = ou_account.dn, + properties={ + 'ou': 'int' + }) + # Create the ou=s1,ou=int,ou=account + ou_s1 = OrganisationalUnit(st) + ou_s1.create( + basedn = ou_int.dn, + properties={ + 'ou': 's1' + }) + + # Pause replication + repl = ReplicationManager(DEFAULT_SUFFIX) + repl.disable_to_supplier(m2, [st, ]) + + # Create the users 1 -> UCOUNT in ou=s1 + nsu = nsUserAccounts(st, basedn=ou_s1.dn, rdn=None) + for i in range(1000, 1000 + UCOUNT): + nsu.create_test_user(uid=i) + + # Enable replication + + repl.enable_to_supplier(m2, [st, ]) + + # Assert they are in the group as we expect + members = group.get_attr_vals_utf8('member') + assert len(members) == UCOUNT + + # Wait for replication + repl.wait_for_replication(st, m2, timeout=60) + + for i in range(0, 5): + # Move ou=s1 to ou=account as parent. We have to provide the rdn, + # even though it's not changing. + ou_s1.rename('ou=s1', newsuperior=ou_account.dn) + time.sleep(2) + + members = group.get_attr_vals_utf8('member') + assert len(members) == UCOUNT + # Check that we really did refer-int properly, and ou=int is not in the members. + for member in members: + assert 'ou=int' not in member + + # Now move it back + ou_s1.rename('ou=s1', newsuperior=ou_int.dn) + time.sleep(2) + members = group.get_attr_vals_utf8('member') + assert len(members) == UCOUNT + for member in members: + assert 'ou=int' in member + + # Check everythig on the other side is good. + repl.wait_for_replication(st, m2, timeout=60) + + group2 = Groups(m2, DEFAULT_SUFFIX).get('default_group') + + members = group2.get_attr_vals_utf8('member') + assert len(members) == UCOUNT + for member in members: + assert 'ou=int' in member diff --git a/dirsrvtests/tests/suites/replication/__init__.py b/dirsrvtests/tests/suites/replication/__init__.py new file mode 100644 index 0000000..39c4723 --- /dev/null +++ b/dirsrvtests/tests/suites/replication/__init__.py @@ -0,0 +1,21 @@ +""" + :Requirement: 389-ds-base: Replication +""" +import time +import ldap +from lib389._constants import DEFAULT_SUFFIX + + +def get_repl_entries(topo, entry_name, attr_list): + """Get a list of test entries from all suppliers""" + + entries_list = [] + + time.sleep(10) + + for inst in topo.all_insts.values(): + entries = inst.search_s(DEFAULT_SUFFIX, ldap.SCOPE_SUBTREE, "uid={}".format(entry_name), attr_list) + entries_list += entries + + return entries_list + diff --git a/dirsrvtests/tests/suites/replication/acceptance_test.py b/dirsrvtests/tests/suites/replication/acceptance_test.py new file mode 100644 index 0000000..ecfba2e --- /dev/null +++ b/dirsrvtests/tests/suites/replication/acceptance_test.py @@ -0,0 +1,717 @@ +# --- BEGIN COPYRIGHT BLOCK --- +# Copyright (C) 2017 Red Hat, Inc. +# All rights reserved. +# +# License: GPL (version 3 or any later version). +# See LICENSE for details. +# --- END COPYRIGHT BLOCK --- +# +import pytest +import logging +import time +from lib389.replica import Replicas +from lib389.tasks import * +from lib389.utils import * +from lib389.topologies import topology_m4 as topo_m4 +from lib389.topologies import topology_m2 as topo_m2 +from . import get_repl_entries +from lib389.idm.user import UserAccount +from lib389.replica import ReplicationManager, Changelog +from lib389._constants import * + +pytestmark = pytest.mark.tier0 + +TEST_ENTRY_NAME = 'mmrepl_test' +TEST_ENTRY_DN = 'uid={},{}'.format(TEST_ENTRY_NAME, DEFAULT_SUFFIX) +NEW_SUFFIX_NAME = 'test_repl' +NEW_SUFFIX = 'o={}'.format(NEW_SUFFIX_NAME) +NEW_BACKEND = 'repl_base' + +DEBUGGING = os.getenv("DEBUGGING", default=False) +if DEBUGGING: + logging.getLogger(__name__).setLevel(logging.DEBUG) +else: + logging.getLogger(__name__).setLevel(logging.INFO) +log = logging.getLogger(__name__) + + +@pytest.fixture(scope="function") +def create_entry(topo_m4, request): + """Add test entry to supplier1""" + + log.info('Adding entry {}'.format(TEST_ENTRY_DN)) + + test_user = UserAccount(topo_m4.ms["supplier1"], TEST_ENTRY_DN) + if test_user.exists(): + log.info('Deleting entry {}'.format(TEST_ENTRY_DN)) + test_user.delete() + test_user.create(properties={ + 'uid': TEST_ENTRY_NAME, + 'cn': TEST_ENTRY_NAME, + 'sn': TEST_ENTRY_NAME, + 'userPassword': TEST_ENTRY_NAME, + 'uidNumber' : '1000', + 'gidNumber' : '2000', + 'homeDirectory' : '/home/mmrepl_test', + }) + +@pytest.fixture(scope="function") +def new_suffix(topo_m4, request): + """Add a new suffix and enable a replication on it""" + + for num in range(1, 5): + log.info('Adding suffix:{} and backend: {} to supplier{}'.format(NEW_SUFFIX, NEW_BACKEND, num)) + topo_m4.ms["supplier{}".format(num)].backend.create(NEW_SUFFIX, {BACKEND_NAME: NEW_BACKEND}) + topo_m4.ms["supplier{}".format(num)].mappingtree.create(NEW_SUFFIX, NEW_BACKEND) + + try: + topo_m4.ms["supplier{}".format(num)].add_s(Entry((NEW_SUFFIX, { + 'objectclass': 'top', + 'objectclass': 'organization', + 'o': NEW_SUFFIX_NAME, + 'description': NEW_SUFFIX_NAME + }))) + except ldap.LDAPError as e: + log.error('Failed to add suffix ({}): error ({})'.format(NEW_SUFFIX, e.message['desc'])) + raise + + def fin(): + for num in range(1, 5): + log.info('Deleting suffix:{} and backend: {} from supplier{}'.format(NEW_SUFFIX, NEW_BACKEND, num)) + topo_m4.ms["supplier{}".format(num)].mappingtree.delete(NEW_SUFFIX) + topo_m4.ms["supplier{}".format(num)].backend.delete(NEW_SUFFIX) + + request.addfinalizer(fin) + + +def test_add_entry(topo_m4, create_entry): + """Check that entries are replicated after add operation + + :id: 024250f1-5f7e-4f3b-a9f5-27741e6fd405 + :setup: Four suppliers replication setup, an entry + :steps: + 1. Check entry on all other suppliers + :expectedresults: + 1. The entry should be replicated to all suppliers + """ + + entries = get_repl_entries(topo_m4, TEST_ENTRY_NAME, ["uid"]) + assert all(entries), "Entry {} wasn't replicated successfully".format(TEST_ENTRY_DN) + + +def test_modify_entry(topo_m4, create_entry): + """Check that entries are replicated after modify operation + + :id: 36764053-622c-43c2-a132-d7a3ab7d9aaa + :setup: Four suppliers replication setup, an entry + :steps: + 1. Modify the entry on supplier1 - add attribute + 2. Wait for replication to happen + 3. Check entry on all other suppliers + 4. Modify the entry on supplier1 - replace attribute + 5. Wait for replication to happen + 6. Check entry on all other suppliers + 7. Modify the entry on supplier1 - delete attribute + 8. Wait for replication to happen + 9. Check entry on all other suppliers + :expectedresults: + 1. Attribute should be successfully added + 2. Some time should pass + 3. The change should be present on all suppliers + 4. Attribute should be successfully replaced + 5. Some time should pass + 6. The change should be present on all suppliers + 7. Attribute should be successfully deleted + 8. Some time should pass + 9. The change should be present on all suppliers + """ + if DEBUGGING: + sleep_time = 8 + else: + sleep_time = 2 + + log.info('Modifying entry {} - add operation'.format(TEST_ENTRY_DN)) + + m1 = topo_m4.ms["supplier1"] + m2 = topo_m4.ms["supplier2"] + m3 = topo_m4.ms["supplier3"] + m4 = topo_m4.ms["supplier4"] + repl = ReplicationManager(DEFAULT_SUFFIX) + + test_user = UserAccount(topo_m4.ms["supplier1"], TEST_ENTRY_DN) + test_user.add('mail', '{}@redhat.com'.format(TEST_ENTRY_NAME)) + repl.wait_for_replication(m1, m2) + repl.wait_for_replication(m1, m3) + repl.wait_for_replication(m1, m4) + + all_user = topo_m4.all_get_dsldapobject(TEST_ENTRY_DN, UserAccount) + for u in all_user: + assert "{}@redhat.com".format(TEST_ENTRY_NAME) in u.get_attr_vals_utf8('mail') + + log.info('Modifying entry {} - replace operation'.format(TEST_ENTRY_DN)) + test_user.replace('mail', '{}@greenhat.com'.format(TEST_ENTRY_NAME)) + repl.wait_for_replication(m1, m2) + repl.wait_for_replication(m1, m3) + repl.wait_for_replication(m1, m4) + + all_user = topo_m4.all_get_dsldapobject(TEST_ENTRY_DN, UserAccount) + for u in all_user: + assert "{}@greenhat.com".format(TEST_ENTRY_NAME) in u.get_attr_vals_utf8('mail') + + log.info('Modifying entry {} - delete operation'.format(TEST_ENTRY_DN)) + test_user.remove('mail', '{}@greenhat.com'.format(TEST_ENTRY_NAME)) + repl.wait_for_replication(m1, m2) + repl.wait_for_replication(m1, m3) + repl.wait_for_replication(m1, m4) + + all_user = topo_m4.all_get_dsldapobject(TEST_ENTRY_DN, UserAccount) + for u in all_user: + assert "{}@greenhat.com".format(TEST_ENTRY_NAME) not in u.get_attr_vals_utf8('mail') + + +def test_delete_entry(topo_m4, create_entry): + """Check that entry deletion is replicated after delete operation + + :id: 18437262-9d6a-4b98-a47a-6182501ab9bc + :setup: Four suppliers replication setup, an entry + :steps: + 1. Delete the entry from supplier1 + 2. Check entry on all other suppliers + :expectedresults: + 1. The entry should be deleted + 2. The change should be present on all suppliers + """ + + log.info('Deleting entry {} during the test'.format(TEST_ENTRY_DN)) + topo_m4.ms["supplier1"].delete_s(TEST_ENTRY_DN) + if DEBUGGING: + time.sleep(8) + else: + time.sleep(1) + entries = get_repl_entries(topo_m4, TEST_ENTRY_NAME, ["uid"]) + assert not entries, "Entry deletion {} wasn't replicated successfully".format(TEST_ENTRY_DN) + + +@pytest.mark.parametrize("delold", [0, 1]) +def test_modrdn_entry(topo_m4, create_entry, delold): + """Check that entries are replicated after modrdn operation + + :id: 02558e6d-a745-45ae-8d88-34fe9b16adc9 + :parametrized: yes + :setup: Four suppliers replication setup, an entry + :steps: + 1. Make modrdn operation on entry on supplier1 with both delold 1 and 0 + 2. Check entry on all other suppliers + :expectedresults: + 1. Modrdn operation should be successful + 2. The change should be present on all suppliers + """ + + newrdn_name = 'newrdn' + newrdn_dn = 'uid={},{}'.format(newrdn_name, DEFAULT_SUFFIX) + log.info('Modify entry RDN {}'.format(TEST_ENTRY_DN)) + try: + topo_m4.ms["supplier1"].modrdn_s(TEST_ENTRY_DN, 'uid={}'.format(newrdn_name), delold) + except ldap.LDAPError as e: + log.error('Failed to modrdn entry (%s): error (%s)' % (TEST_ENTRY_DN, + e.message['desc'])) + raise e + + try: + entries_new = get_repl_entries(topo_m4, newrdn_name, ["uid"]) + assert all(entries_new), "Entry {} wasn't replicated successfully".format(newrdn_name) + if delold == 0: + entries_old = get_repl_entries(topo_m4, TEST_ENTRY_NAME, ["uid"]) + assert all(entries_old), "Entry with old rdn {} wasn't replicated successfully".format(TEST_ENTRY_DN) + else: + entries_old = get_repl_entries(topo_m4, TEST_ENTRY_NAME, ["uid"]) + assert not entries_old, "Entry with old rdn {} wasn't removed in replicas successfully".format( + TEST_ENTRY_DN) + finally: + log.info('Remove entry with new RDN {}'.format(newrdn_dn)) + topo_m4.ms["supplier1"].delete_s(newrdn_dn) + + +def test_modrdn_after_pause(topo_m4): + """Check that changes are properly replicated after replica pause + + :id: 6271dc9c-a993-4a9e-9c6d-05650cdab282 + :setup: Four suppliers replication setup, an entry + :steps: + 1. Pause all replicas + 2. Make modrdn operation on entry on supplier1 + 3. Resume all replicas + 4. Wait for replication to happen + 5. Check entry on all other suppliers + :expectedresults: + 1. Replicas should be paused + 2. Modrdn operation should be successful + 3. Replicas should be resumed + 4. Some time should pass + 5. The change should be present on all suppliers + """ + + if DEBUGGING: + sleep_time = 8 + else: + sleep_time = 3 + + newrdn_name = 'newrdn' + newrdn_dn = 'uid={},{}'.format(newrdn_name, DEFAULT_SUFFIX) + + log.info('Adding entry {}'.format(TEST_ENTRY_DN)) + try: + topo_m4.ms["supplier1"].add_s(Entry((TEST_ENTRY_DN, { + 'objectclass': 'top person'.split(), + 'objectclass': 'organizationalPerson', + 'objectclass': 'inetorgperson', + 'cn': TEST_ENTRY_NAME, + 'sn': TEST_ENTRY_NAME, + 'uid': TEST_ENTRY_NAME + }))) + except ldap.LDAPError as e: + log.error('Failed to add entry (%s): error (%s)' % (TEST_ENTRY_DN, + e.message['desc'])) + raise e + + log.info('Pause all replicas') + topo_m4.pause_all_replicas() + + log.info('Modify entry RDN {}'.format(TEST_ENTRY_DN)) + try: + topo_m4.ms["supplier1"].modrdn_s(TEST_ENTRY_DN, 'uid={}'.format(newrdn_name)) + except ldap.LDAPError as e: + log.error('Failed to modrdn entry (%s): error (%s)' % (TEST_ENTRY_DN, + e.message['desc'])) + raise e + + log.info('Resume all replicas') + topo_m4.resume_all_replicas() + + log.info('Wait for replication to happen') + time.sleep(sleep_time) + + try: + entries_new = get_repl_entries(topo_m4, newrdn_name, ["uid"]) + assert all(entries_new), "Entry {} wasn't replicated successfully".format(newrdn_name) + finally: + log.info('Remove entry with new RDN {}'.format(newrdn_dn)) + topo_m4.ms["supplier1"].delete_s(newrdn_dn) + + +@pytest.mark.bz842441 +def test_modify_stripattrs(topo_m4): + """Check that we can modify nsds5replicastripattrs + + :id: f36abed8-e262-4f35-98aa-71ae55611aaa + :setup: Four suppliers replication setup + :steps: + 1. Modify nsds5replicastripattrs attribute on any agreement + 2. Search for the modified attribute + :expectedresults: It should be contain the value + 1. nsds5replicastripattrs should be successfully set + 2. The modified attribute should be the one we set + """ + + m1 = topo_m4.ms["supplier1"] + agreement = m1.agreement.list(suffix=DEFAULT_SUFFIX)[0].dn + attr_value = b'modifiersname modifytimestamp' + + log.info('Modify nsds5replicastripattrs with {}'.format(attr_value)) + m1.modify_s(agreement, [(ldap.MOD_REPLACE, 'nsds5replicastripattrs', [attr_value])]) + + log.info('Check nsds5replicastripattrs for {}'.format(attr_value)) + entries = m1.search_s(agreement, ldap.SCOPE_BASE, "objectclass=*", ['nsds5replicastripattrs']) + assert attr_value in entries[0].data['nsds5replicastripattrs'] + + +def test_new_suffix(topo_m4, new_suffix): + """Check that we can enable replication on a new suffix + + :id: d44a9ed4-26b0-4189-b0d0-b2b336ddccbd + :setup: Four suppliers replication setup, a new suffix + :steps: + 1. Enable replication on the new suffix + 2. Check if replication works + 3. Disable replication on the new suffix + :expectedresults: + 1. Replication on the new suffix should be enabled + 2. Replication should work + 3. Replication on the new suffix should be disabled + """ + m1 = topo_m4.ms["supplier1"] + m2 = topo_m4.ms["supplier2"] + + repl = ReplicationManager(NEW_SUFFIX) + + repl.create_first_supplier(m1) + + repl.join_supplier(m1, m2) + + repl.test_replication(m1, m2) + repl.test_replication(m2, m1) + + repl.remove_supplier(m1) + repl.remove_supplier(m2) + +def test_many_attrs(topo_m4, create_entry): + """Check a replication with many attributes (add and delete) + + :id: d540b358-f67a-43c6-8df5-7c74b3cb7523 + :setup: Four suppliers replication setup, a test entry + :steps: + 1. Add 10 new attributes to the entry + 2. Delete few attributes: one from the beginning, + two from the middle and one from the end + 3. Check that the changes were replicated in the right order + :expectedresults: + 1. The attributes should be successfully added + 2. Delete operations should be successful + 3. The changes should be replicated in the right order + """ + + m1 = topo_m4.ms["supplier1"] + add_list = ensure_list_bytes(map(lambda x: "test{}".format(x), range(10))) + delete_list = ensure_list_bytes(map(lambda x: "test{}".format(x), [0, 4, 7, 9])) + test_user = UserAccount(topo_m4.ms["supplier1"], TEST_ENTRY_DN) + + log.info('Modifying entry {} - 10 add operations'.format(TEST_ENTRY_DN)) + for add_name in add_list: + test_user.add('description', add_name) + + if DEBUGGING: + time.sleep(10) + else: + time.sleep(1) + + log.info('Check that everything was properly replicated after an add operation') + entries = get_repl_entries(topo_m4, TEST_ENTRY_NAME, ["description"]) + for entry in entries: + assert all(entry.getValues("description")[i] == add_name for i, add_name in enumerate(add_list)) + + log.info('Modifying entry {} - 4 delete operations for {}'.format(TEST_ENTRY_DN, str(delete_list))) + for delete_name in delete_list: + test_user.remove('description', delete_name) + + if DEBUGGING: + time.sleep(10) + else: + time.sleep(1) + + log.info('Check that everything was properly replicated after a delete operation') + entries = get_repl_entries(topo_m4, TEST_ENTRY_NAME, ["description"]) + for entry in entries: + for i, value in enumerate(entry.getValues("description")): + assert value == [name for name in add_list if name not in delete_list][i] + assert value not in delete_list + + +def test_double_delete(topo_m4, create_entry): + """Check that double delete of the entry doesn't crash server + + :id: 5b85a5af-df29-42c7-b6cb-965ec5aa478e + :feature: Multi supplier replication + :setup: Four suppliers replication setup, a test entry + :steps: 1. Delete the entry + 2. Delete the entry on the second supplier + 3. Check that server is alive + :expectedresults: Server hasn't crash + """ + + log.info('Deleting entry {} from supplier1'.format(TEST_ENTRY_DN)) + topo_m4.ms["supplier1"].delete_s(TEST_ENTRY_DN) + + if DEBUGGING: + time.sleep(5) + else: + time.sleep(1) + + log.info('Deleting entry {} from supplier2'.format(TEST_ENTRY_DN)) + try: + topo_m4.ms["supplier2"].delete_s(TEST_ENTRY_DN) + except ldap.NO_SUCH_OBJECT: + log.info("Entry {} wasn't found supplier2. It is expected.".format(TEST_ENTRY_DN)) + + if DEBUGGING: + time.sleep(5) + else: + time.sleep(1) + + log.info('Make searches to check if server is alive') + entries = get_repl_entries(topo_m4, TEST_ENTRY_NAME, ["uid"]) + assert not entries, "Entry deletion {} wasn't replicated successfully".format(TEST_ENTRY_DN) + + +def test_password_repl_error(topo_m4, create_entry): + """Check that error about userpassword replication is properly logged + + :id: d4f12dc0-cd2c-4b92-9b8d-d764a60f0698 + :feature: Multi supplier replication + :setup: Four suppliers replication setup, a test entry + :steps: 1. Change userpassword on supplier 1 + 2. Restart the servers to flush the logs + 3. Check the error log for an replication error + :expectedresults: We don't have a replication error in the error log + """ + + m1 = topo_m4.ms["supplier1"] + m2 = topo_m4.ms["supplier2"] + TEST_ENTRY_NEW_PASS = 'new_{}'.format(TEST_ENTRY_NAME) + + log.info('Clean the error log') + m2.deleteErrorLogs() + + log.info('Set replication loglevel') + m2.config.loglevel((ErrorLog.REPLICA,)) + + log.info('Modifying entry {} - change userpassword on supplier 2'.format(TEST_ENTRY_DN)) + test_user_m1 = UserAccount(topo_m4.ms["supplier1"], TEST_ENTRY_DN) + test_user_m2 = UserAccount(topo_m4.ms["supplier2"], TEST_ENTRY_DN) + test_user_m3 = UserAccount(topo_m4.ms["supplier3"], TEST_ENTRY_DN) + test_user_m4 = UserAccount(topo_m4.ms["supplier4"], TEST_ENTRY_DN) + + test_user_m1.set('userpassword', TEST_ENTRY_NEW_PASS) + + log.info('Restart the servers to flush the logs') + for num in range(1, 5): + topo_m4.ms["supplier{}".format(num)].restart(timeout=10) + + m1_conn = test_user_m1.bind(TEST_ENTRY_NEW_PASS) + m2_conn = test_user_m2.bind(TEST_ENTRY_NEW_PASS) + m3_conn = test_user_m3.bind(TEST_ENTRY_NEW_PASS) + m4_conn = test_user_m4.bind(TEST_ENTRY_NEW_PASS) + + if DEBUGGING: + time.sleep(5) + else: + time.sleep(1) + + log.info('Check the error log for the error with {}'.format(TEST_ENTRY_DN)) + assert not m2.ds_error_log.match('.*can.t add a change for uid={}.*'.format(TEST_ENTRY_NAME)) + + +def test_invalid_agmt(topo_m4): + """Test adding that an invalid agreement is properly rejected and does not crash the server + + :id: 92f10f46-1be1-49ca-9358-784359397bc2 + :setup: MMR with four suppliers + :steps: + 1. Add invalid agreement (nsds5ReplicaEnabled set to invalid value) + 2. Verify the server is still running + :expectedresults: + 1. Invalid repl agreement should be rejected + 2. Server should be still running + """ + m1 = topo_m4.ms["supplier1"] + + # Add invalid agreement (nsds5ReplicaEnabled set to invalid value) + AGMT_DN = 'cn=whatever,cn=replica,cn="dc=example,dc=com",cn=mapping tree,cn=config' + try: + invalid_props = {RA_ENABLED: 'True', # Invalid value + RA_SCHEDULE: '0001-2359 0123456'} + m1.agreement.create(suffix=DEFAULT_SUFFIX, host='localhost', port=389, properties=invalid_props) + except ldap.UNWILLING_TO_PERFORM: + m1.log.info('Invalid repl agreement correctly rejected') + except ldap.LDAPError as e: + m1.log.fatal('Got unexpected error adding invalid agreement: ' + str(e)) + assert False + else: + m1.log.fatal('Invalid agreement was incorrectly accepted by the server') + assert False + + # Verify the server is still running + try: + m1.simple_bind_s(DN_DM, PASSWORD) + except ldap.LDAPError as e: + m1.log.fatal('Failed to bind: ' + str(e)) + assert False + + +def test_warining_for_invalid_replica(topo_m4): + """Testing logs to indicate the inconsistency when configuration is performed. + + :id: dd689d03-69b8-4bf9-a06e-2acd19d5e2c8 + :setup: MMR with four suppliers + :steps: + 1. Setup nsds5ReplicaBackoffMin to 20 + 2. Setup nsds5ReplicaBackoffMax to 10 + :expectedresults: + 1. nsds5ReplicaBackoffMin should set to 20 + 2. An error should be generated and also logged in the error logs. + """ + replicas = Replicas(topo_m4.ms["supplier1"]) + replica = replicas.list()[0] + log.info('Set nsds5ReplicaBackoffMin to 20') + replica.set('nsds5ReplicaBackoffMin', '20') + with pytest.raises(ldap.UNWILLING_TO_PERFORM): + log.info('Set nsds5ReplicaBackoffMax to 10') + replica.set('nsds5ReplicaBackoffMax', '10') + log.info('Resetting configuration: nsds5ReplicaBackoffMin') + replica.remove_all('nsds5ReplicaBackoffMin') + log.info('Check the error log for the error') + assert topo_m4.ms["supplier1"].ds_error_log.match('.*nsds5ReplicaBackoffMax.*10.*invalid.*') + +@pytest.mark.ds51082 +def test_csnpurge_large_valueset(topo_m2): + """Test csn generator test + + :id: 63e2bdb2-0a8f-4660-9465-7b80a9f72a74 + :setup: MMR with 2 suppliers + :steps: + 1. Create a test_user + 2. add a large set of values (more than 10) + 3. delete all the values (more than 10) + 4. configure the replica to purge those values (purgedelay=5s) + 5. Waiting for 6 second + 6. do a series of update + :expectedresults: + 1. Should succeeds + 2. Should succeeds + 3. Should succeeds + 4. Should succeeds + 5. Should succeeds + 6. Should not crash + """ + m1 = topo_m2.ms["supplier2"] + + test_user = UserAccount(m1, TEST_ENTRY_DN) + if test_user.exists(): + log.info('Deleting entry {}'.format(TEST_ENTRY_DN)) + test_user.delete() + test_user.create(properties={ + 'uid': TEST_ENTRY_NAME, + 'cn': TEST_ENTRY_NAME, + 'sn': TEST_ENTRY_NAME, + 'userPassword': TEST_ENTRY_NAME, + 'uidNumber' : '1000', + 'gidNumber' : '2000', + 'homeDirectory' : '/home/mmrepl_test', + }) + + # create a large value set so that it is sorted + for i in range(1,20): + test_user.add('description', 'value {}'.format(str(i))) + + # delete all values of the valueset + for i in range(1,20): + test_user.remove('description', 'value {}'.format(str(i))) + + # set purging delay to 5 second and wait more that 5second + replicas = Replicas(m1) + replica = replicas.list()[0] + log.info('nsds5ReplicaPurgeDelay to 5') + replica.set('nsds5ReplicaPurgeDelay', '5') + time.sleep(10) + + # add some new values to the valueset containing entries that should be purged + for i in range(21,25): + test_user.add('description', 'value {}'.format(str(i))) + +@pytest.mark.ds51244 +def test_urp_trigger_substring_search(topo_m2): + """Test that a ADD of a entry with a '*' in its DN, triggers + an internal search with a escaped DN + + :id: 9869bb39-419f-42c3-a44b-c93eb0b77667 + :customerscenario: True + :setup: MMR with 2 suppliers + :steps: + 1. enable internal operation loggging for plugins + 2. Create on M1 a test_user with a '*' in its DN + 3. Check the test_user is replicated + 4. Check in access logs that the internal search does not contain '*' + :expectedresults: + 1. Should succeeds + 2. Should succeeds + 3. Should succeeds + 4. Should succeeds + """ + m1 = topo_m2.ms["supplier1"] + m2 = topo_m2.ms["supplier2"] + + # Enable loggging of internal operation logging to capture URP intop + log.info('Set nsslapd-plugin-logging to on') + for inst in (m1, m2): + inst.config.loglevel([AccessLog.DEFAULT, AccessLog.INTERNAL], service='access') + inst.config.set('nsslapd-plugin-logging', 'on') + inst.restart() + + # add a user with a DN containing '*' + test_asterisk_uid = 'asterisk_*_in_value' + test_asterisk_dn = 'uid={},{}'.format(test_asterisk_uid, DEFAULT_SUFFIX) + + test_user = UserAccount(m1, test_asterisk_dn) + if test_user.exists(): + log.info('Deleting entry {}'.format(test_asterisk_dn)) + test_user.delete() + test_user.create(properties={ + 'uid': test_asterisk_uid, + 'cn': test_asterisk_uid, + 'sn': test_asterisk_uid, + 'userPassword': test_asterisk_uid, + 'uidNumber' : '1000', + 'gidNumber' : '2000', + 'homeDirectory' : '/home/asterisk', + }) + + # check that the ADD was replicated on M2 + test_user_m2 = UserAccount(m2, test_asterisk_dn) + for i in range(1,5): + if test_user_m2.exists(): + break + else: + log.info('Entry not yet replicated on M2, wait a bit') + time.sleep(3) + + # check that M2 access logs does not "(&(objectclass=nstombstone)(nscpentrydn=uid=asterisk_*_in_value,dc=example,dc=com))" + log.info('Check that on M2, URP as not triggered such internal search') + pattern = ".*\(Internal\).*SRCH.*\(&\(objectclass=nstombstone\)\(nscpentrydn=uid=asterisk_\*_in_value,dc=example,dc=com.*" + found = m2.ds_access_log.match(pattern) + log.info("found line: %s" % found) + assert not found + + +@pytest.mark.skipif(ds_is_older('1.4.4'), reason="Not implemented") +def test_csngen_task(topo_m2): + """Test csn generator test + + :id: b976849f-dbed-447e-91a7-c877d5d71fd0 + :setup: MMR with 2 suppliers + :steps: + 1. Create a csngen_test task + 2. Check that debug messages "_csngen_gen_tester_main" are in errors logs + :expectedresults: + 1. Should succeeds + 2. Should succeeds + """ + m1 = topo_m2.ms["supplier1"] + csngen_task = csngenTestTask(m1) + csngen_task.create(properties={ + 'ttl': '300' + }) + time.sleep(10) + log.info('Check the error log contains strings showing csn generator is tested') + assert m1.searchErrorsLog("_csngen_gen_tester_main") + + +def test_default_cl_trimming_enabled(topo_m2): + """Check that changelog trimming was enabled by default + + :id: c37b9a28-f961-4867-b8a1-e81edd7f9bf3 + :setup: Supplier Instance + :steps: + 1. Check changelog has trimming set up by default + :expectedresults: + 1. Success + """ + + # Set up changelog trimming by default + cl = Changelog(topo_m2.ms["supplier1"], DEFAULT_SUFFIX) + assert cl.get_attr_val_utf8("nsslapd-changelogmaxage") == "7d" + + +if __name__ == '__main__': + # Run isolated + # -s for DEBUG mode + CURRENT_FILE = os.path.realpath(__file__) + pytest.main("-s %s" % CURRENT_FILE) diff --git a/dirsrvtests/tests/suites/replication/cascading_test.py b/dirsrvtests/tests/suites/replication/cascading_test.py new file mode 100644 index 0000000..5fe4c61 --- /dev/null +++ b/dirsrvtests/tests/suites/replication/cascading_test.py @@ -0,0 +1,152 @@ +# --- BEGIN COPYRIGHT BLOCK --- +# Copyright (C) 2018 Red Hat, Inc. +# All rights reserved. +# +# License: GPL (version 3 or any later version). +# See LICENSE for details. +# --- END COPYRIGHT BLOCK --- +# +import logging +import pytest +import os +import ldap +from lib389._constants import * +from lib389.replica import ReplicationManager +from lib389.plugins import MemberOfPlugin +from lib389.agreement import Agreements +from lib389.idm.user import UserAccount, TEST_USER_PROPERTIES +from lib389.idm.group import Groups +from lib389.topologies import topology_m1h1c1 as topo + +pytestmark = pytest.mark.tier1 + +DEBUGGING = os.getenv("DEBUGGING", default=False) +if DEBUGGING: + logging.getLogger(__name__).setLevel(logging.DEBUG) +else: + logging.getLogger(__name__).setLevel(logging.INFO) +log = logging.getLogger(__name__) + +BIND_DN = 'uid=tuser1,ou=People,dc=example,dc=com' +BIND_RDN = 'tuser1' + + +def config_memberof(server): + """Configure memberOf plugin and configure fractional + to prevent total init to send memberof + """ + + memberof = MemberOfPlugin(server) + memberof.enable() + memberof.set_autoaddoc('nsMemberOf') + server.restart() + agmts = Agreements(server) + for agmt in agmts.list(): + log.info('update %s to add nsDS5ReplicatedAttributeListTotal' % agmt.dn) + agmt.replace_many(('nsDS5ReplicatedAttributeListTotal', '(objectclass=*) $ EXCLUDE '), + ('nsDS5ReplicatedAttributeList', '(objectclass=*) $ EXCLUDE memberOf')) + + +def test_basic_with_hub(topo): + """Check that basic operations work in cascading replication, this includes + testing plugins that perform internal operatons, and replicated password + policy state attributes. + + :id: 4ac85552-45bc-477b-89a4-226dfff8c6cc + :setup: 1 supplier, 1 hub, 1 consumer + :steps: + 1. Enable memberOf plugin and set password account lockout settings + 2. Restart the instance + 3. Add a user + 4. Add a group + 5. Test that the replication works + 6. Add the user as a member to the group + 7. Test that the replication works + 8. Issue bad binds to update passwordRetryCount + 9. Test that replicaton works + 10. Check that passwordRetyCount was replicated + :expectedresults: + 1. Should be a success + 2. Should be a success + 3. Should be a success + 4. Should be a success + 5. Should be a success + 6. Should be a success + 7. Should be a success + 8. Should be a success + 9. Should be a success + 10. Should be a success + """ + + repl_manager = ReplicationManager(DEFAULT_SUFFIX) + supplier = topo.ms["supplier1"] + consumer = topo.cs["consumer1"] + hub = topo.hs["hub1"] + + for inst in topo: + config_memberof(inst) + inst.config.set('passwordlockout', 'on') + inst.config.set('passwordlockoutduration', '60') + inst.config.set('passwordmaxfailure', '3') + inst.config.set('passwordIsGlobalPolicy', 'on') + + # Create user + user1 = UserAccount(supplier, BIND_DN) + user_props = TEST_USER_PROPERTIES.copy() + user_props.update({'sn': BIND_RDN, + 'cn': BIND_RDN, + 'uid': BIND_RDN, + 'inetUserStatus': '1', + 'objectclass': 'extensibleObject', + 'userpassword': PASSWORD}) + user1.create(properties=user_props, basedn=SUFFIX) + + # Create group + groups = Groups(supplier, DEFAULT_SUFFIX) + group = groups.create(properties={'cn': 'group'}) + + # Test replication + repl_manager.test_replication(supplier, consumer) + + # Trigger memberOf plugin by adding user to group + group.replace('member', user1.dn) + + # Test replication once more + repl_manager.test_replication(supplier, consumer) + + # Issue bad password to update passwordRetryCount + try: + supplier.simple_bind_s(user1.dn, "badpassword") + except: + pass + + # Test replication one last time + supplier.simple_bind_s(DN_DM, PASSWORD) + repl_manager.test_replication(supplier, consumer) + + # Finally check if passwordRetyCount was replicated to the hub and consumer + user1 = UserAccount(hub, BIND_DN) + count = user1.get_attr_val_int('passwordRetryCount') + if count is None: + log.fatal('PasswordRetyCount was not replicated to hub') + assert False + if int(count) != 1: + log.fatal('PasswordRetyCount has unexpected value: {}'.format(count)) + assert False + + user1 = UserAccount(consumer, BIND_DN) + count = user1.get_attr_val_int('passwordRetryCount') + if count is None: + log.fatal('PasswordRetyCount was not replicated to consumer') + assert False + if int(count) != 1: + log.fatal('PasswordRetyCount has unexpected value: {}'.format(count)) + assert False + + +if __name__ == '__main__': + # Run isolated + # -s for DEBUG mode + CURRENT_FILE = os.path.realpath(__file__) + pytest.main(["-s", CURRENT_FILE]) + diff --git a/dirsrvtests/tests/suites/replication/changelog_encryption_test.py b/dirsrvtests/tests/suites/replication/changelog_encryption_test.py new file mode 100644 index 0000000..dc16fce --- /dev/null +++ b/dirsrvtests/tests/suites/replication/changelog_encryption_test.py @@ -0,0 +1,91 @@ +# --- BEGIN COPYRIGHT BLOCK --- +# Copyright (C) 2022 Red Hat, Inc. +# All rights reserved. +# +# License: GPL (version 3 or any later version). +# See LICENSE for details. +# --- END COPYRIGHT BLOCK --- +# +import logging +import pytest +import os +import time +from lib389._constants import DEFAULT_SUFFIX, DN_CHANGELOG, DN_USERROOT_LDBM +from lib389.topologies import topology_m1c1 as topo +from lib389.dseldif import DSEldif +from lib389.utils import ds_supports_new_changelog +from lib389.replica import Replicas + +pytestmark = pytest.mark.tier1 + +DEBUGGING = os.getenv("DEBUGGING", default=False) +if DEBUGGING: + logging.getLogger(__name__).setLevel(logging.DEBUG) +else: + logging.getLogger(__name__).setLevel(logging.INFO) +log = logging.getLogger(__name__) + + +def test_cl_encryption_setup_process(topo): + """Take an already working replication deployment, and setup changelog + encryption + + :id: 1a1b7d29-69f5-4f0e-91c4-e7f66140ff17 + :setup: Supplier Instance, Consumer Instance + :steps: + 1. Enable TLS for the server + 2. Export changelog + 3. Enable changelog encryption + 4. Import changelog + 5. Verify replication is still working + :expectedresults: + 1. Success + 2. Success + 3. Success + 4. Success + 5. Success + """ + + supplier = topo.ms['supplier1'] + consumer = topo.cs['consumer1'] + + # Enable TLS + log.info('Enable TLS ...') + supplier.enable_tls() + consumer.enable_tls() + + # Export changelog + log.info('Export changelog ...') + replicas = Replicas(supplier) + replica = replicas.get(DEFAULT_SUFFIX) + replica.begin_task_cl2ldif() + replica.task_finished() + + # Enable changelog encryption + log.info('Enable changelog encryption ...') + dse_ldif = DSEldif(supplier) + supplier.stop() + if ds_supports_new_changelog(): + changelog = 'cn=changelog,{}'.format(DN_USERROOT_LDBM) + else: + changelog = DN_CHANGELOG + dse_ldif.replace(changelog, 'nsslapd-encryptionalgorithm', 'AES') + if dse_ldif.get(changelog, 'nsSymmetricKey'): + dse_ldif.delete(changelog, 'nsSymmetricKey') + supplier.start() + + # Import changelog + log.info('Import changelog ...') + replica.begin_task_ldif2cl() + replica.task_finished() + + # Verify replication is still working + log.info('Test replication is still working ...') + assert replica.test_replication([consumer]) + +if __name__ == '__main__': + # Run isolated + # -s for DEBUG mode + CURRENT_FILE = os.path.realpath(__file__) + pytest.main(["-s", CURRENT_FILE]) + diff --git a/dirsrvtests/tests/suites/replication/changelog_test.py b/dirsrvtests/tests/suites/replication/changelog_test.py new file mode 100644 index 0000000..b125217 --- /dev/null +++ b/dirsrvtests/tests/suites/replication/changelog_test.py @@ -0,0 +1,783 @@ +# --- BEGIN COPYRIGHT BLOCK --- +# Copyright (C) 2017 Red Hat, Inc. +# All rights reserved. +# +# License: GPL (version 3 or any later version). +# See LICENSE for details. +# --- END COPYRIGHT BLOCK --- +# +import os +import logging +import ldap +import ldif +import pytest +import time +import subprocess +import glob +from lib389.properties import TASK_WAIT +from lib389.replica import Replicas +from lib389.idm.user import UserAccounts +from lib389.topologies import topology_m2 as topo +from lib389._constants import * +from lib389.plugins import RetroChangelogPlugin +from lib389.dseldif import DSEldif +from lib389.tasks import * +from lib389.utils import * +from lib389.utils import ensure_bytes, ds_supports_new_changelog + +pytestmark = pytest.mark.tier1 + +TEST_ENTRY_NAME = 'replusr' +NEW_RDN_NAME = 'cl5usr' +if ds_supports_new_changelog(): + CHANGELOG = 'cn=changelog,{}'.format(DN_USERROOT_LDBM) +else: + CHANGELOG = 'cn=changelog5,cn=config' +RETROCHANGELOG = 'cn=Retro Changelog Plugin,cn=plugins,cn=config' +MAXAGE = 'nsslapd-changelogmaxage' +TRIMINTERVAL = 'nsslapd-changelogtrim-interval' +COMPACTDBINTERVAL = 'nsslapd-changelogcompactdb-interval' +FILTER = '(cn=*)' + +DEBUGGING = os.getenv('DEBUGGING', default=False) +if DEBUGGING: + logging.getLogger(__name__).setLevel(logging.DEBUG) +else: + logging.getLogger(__name__).setLevel(logging.INFO) +log = logging.getLogger(__name__) + +def _check_repl_changelog_backup(instance, backup_dir): + # Note: there is no way to check dbi on lmdb backup + # That said dbscan may perhaps do it ... + if instance.get_db_lib() is 'bdb': + if ds_supports_new_changelog(): + backup_checkdir = os.path.join(backup_dir, DEFAULT_BENAME, BDB_CL_FILENAME) + else: + backup_checkdir = os.path.join(backup_dir, '.repl_changelog_backup', DEFAULT_CHANGELOG_DB) + if glob.glob(f'{backup_checkdir}*'): + log.info('Database backup is created successfully') + else: + log.fatal('test_changelog5: backup directory does not exist : {}*'.format(backup_checkdir)) + assert False + +def _perform_ldap_operations(topo): + """Add a test user, modify description, modrdn user and delete it""" + + log.info('Adding user {}'.format(TEST_ENTRY_NAME)) + users = UserAccounts(topo.ms['supplier1'], DEFAULT_SUFFIX) + user_properties = { + 'uid': TEST_ENTRY_NAME, + 'cn': TEST_ENTRY_NAME, + 'sn': TEST_ENTRY_NAME, + 'uidNumber': '1001', + 'gidNumber': '2001', + 'userpassword': PASSWORD, + 'description': 'userdesc', + 'homeDirectory': '/home/{}'.format(TEST_ENTRY_NAME)} + tuser = users.create(properties=user_properties) + tuser.replace('description', 'newdesc') + log.info('Modify RDN of user {}'.format(tuser.dn)) + try: + topo.ms['supplier1'].modrdn_s(tuser.dn, 'uid={}'.format(NEW_RDN_NAME), 0) + except ldap.LDAPError as e: + log.fatal('Failed to modrdn entry {}'.format(tuser.dn)) + raise e + tuser = users.get(NEW_RDN_NAME) + log.info('Deleting user: {}'.format(tuser.dn)) + tuser.delete() + + +def _create_changelog_dump(topo): + """Dump changelog using nss5task and check if ldap operations are logged""" + + log.info('Dump changelog using nss5task and check if ldap operations are logged') + if ds_supports_new_changelog(): + changelog_dir = topo.ms['supplier1'].get_ldif_dir() + changelog_end = '_cl.ldif' + else: + changelog_dir = topo.ms['supplier1'].get_changelog_dir() + changelog_end = '.ldif' + replicas = Replicas(topo.ms["supplier1"]) + replica = replicas.get(DEFAULT_SUFFIX) + log.info('Remove ldif files, if present in: {}'.format(changelog_dir)) + for files in os.listdir(changelog_dir): + if files.endswith(changelog_end): + changelog_file = os.path.join(changelog_dir, files) + try: + os.remove(changelog_file) + except OSError as e: + log.fatal('Failed to remove ldif file: {}'.format(changelog_file)) + raise e + log.info('Existing changelog ldif file: {} removed'.format(changelog_file)) + else: + log.info('No existing changelog ldif files present') + + log.info('Running nsds5task to dump changelog database to a file') + replica.begin_task_cl2ldif() + + log.info('Check if changelog ldif file exist in: {}'.format(changelog_dir)) + for files in os.listdir(changelog_dir): + if files.endswith(changelog_end): + changelog_ldif = os.path.join(changelog_dir, files) + log.info('Changelog ldif file exist: {}'.format(changelog_ldif)) + return changelog_ldif + else: + log.fatal('Changelog ldif file does not exist in: {}'.format(changelog_dir)) + assert False + + +def _check_changelog_ldif(topo, changelog_ldif): + """Check changelog ldif file for required ldap operations""" + + log.info('Checking changelog ldif file for ldap operations') + assert os.stat(changelog_ldif).st_size > 0, 'Changelog file has no contents' + with open(changelog_ldif, 'r') as fh: + content = fh.read() + ldap_operations = set() + log.info('Checking if all required changetype operations are present') + for entry_ldif in content.split('\n\n'): + for line in entry_ldif.split('\n'): + if line.startswith('changetype: '): + ldap_operations.add(line.split(': ')[1]) + valid_operations = set(ldif.valid_changetype_dict.keys()) + log.info('Valid ldap operations: {}'.format(valid_operations)) + log.info('Ldap operations found: {}'.format(ldap_operations)) + assert ldap_operations == valid_operations, 'Changelog ldif file does not contain all \ + changetype operations' + + +def get_ldap_error_msg(e, type): + return e.args[0][type] + + +@pytest.fixture(scope="module") +def changelog_init(topo): + """ changlog dir is not configuarable, just + enable cn=Retro Changelog Plugin,cn=plugins,cn=config + """ + log.info('Testing Ticket 47669 - Test duration syntax in the changelogs') + + # bind as directory manager + topo.ms["supplier1"].log.info("Bind as %s" % DN_DM) + topo.ms["supplier1"].simple_bind_s(DN_DM, PASSWORD) + + if not ds_supports_new_changelog(): + try: + changelogdir = os.path.join(os.path.dirname(topo.ms["supplier1"].dbdir), 'changelog') + topo.ms["supplier1"].modify_s(CHANGELOG, [(ldap.MOD_REPLACE, 'nsslapd-changelogdir', + ensure_bytes(changelogdir))]) + except ldap.LDAPError as e: + log.error('Failed to modify ' + CHANGELOG + ': error {}'.format(get_ldap_error_msg(e,'desc'))) + assert False + + try: + topo.ms["supplier1"].modify_s(RETROCHANGELOG, [(ldap.MOD_REPLACE, 'nsslapd-pluginEnabled', b'on')]) + except ldap.LDAPError as e: + log.error('Failed to enable ' + RETROCHANGELOG + ': error {}'.format(get_ldap_error_msg(e, 'desc'))) + assert False + + # restart the server + topo.ms["supplier1"].restart(timeout=10) + + +def add_and_check(topo, plugin, attr, val, isvalid): + """ + Helper function to add/replace attr: val and check the added value + """ + if isvalid: + log.info('Test %s: %s -- valid' % (attr, val)) + try: + topo.ms["supplier1"].modify_s(plugin, [(ldap.MOD_REPLACE, attr, ensure_bytes(val))]) + except ldap.LDAPError as e: + log.error('Failed to add ' + attr + ': ' + val + ' to ' + plugin + ': error {}'.format(get_ldap_error_msg(e,'desc'))) + assert False + else: + log.info('Test %s: %s -- invalid' % (attr, val)) + if plugin == CHANGELOG: + try: + topo.ms["supplier1"].modify_s(plugin, [(ldap.MOD_REPLACE, attr, ensure_bytes(val))]) + except ldap.LDAPError as e: + log.error('Expectedly failed to add ' + attr + ': ' + val + + ' to ' + plugin + ': error {}'.format(get_ldap_error_msg(e,'desc'))) + else: + try: + topo.ms["supplier1"].modify_s(plugin, [(ldap.MOD_REPLACE, attr, ensure_bytes(val))]) + except ldap.LDAPError as e: + log.error('Failed to add ' + attr + ': ' + val + ' to ' + plugin + ': error {}'.format(get_ldap_error_msg(e,'desc'))) + + try: + entries = topo.ms["supplier1"].search_s(plugin, ldap.SCOPE_BASE, FILTER, [attr]) + if isvalid: + if not entries[0].hasValue(attr, val): + log.fatal('%s does not have expected (%s: %s)' % (plugin, attr, val)) + assert False + else: + if plugin == CHANGELOG: + if entries[0].hasValue(attr, val): + log.fatal('%s has unexpected (%s: %s)' % (plugin, attr, val)) + assert False + else: + if not entries[0].hasValue(attr, val): + log.fatal('%s does not have expected (%s: %s)' % (plugin, attr, val)) + assert False + except ldap.LDAPError as e: + log.fatal('Unable to search for entry %s: error %s' % (plugin, e.message['desc'])) + assert False + +def remove_ldif_files_from_changelogdir(topo, extension): + """ + Remove existing ldif files from changelog dir + """ + if ds_supports_new_changelog(): + changelog_dir = topo.ms['supplier1'].get_ldif_dir() + else: + changelog_dir = topo.ms['supplier1'].get_changelog_dir() + + log.info('Remove %s files, if present in: %s' % (extension, changelog_dir)) + for files in os.listdir(changelog_dir): + if files.endswith(extension): + changelog_file = os.path.join(changelog_dir, files) + try: + os.remove(changelog_file) + except OSError as e: + log.fatal('Failed to remove %s file: %s' % (extension,changelog_file)) + raise e + else: + log.info('Existing changelog %s file: %s removed' % (extension,changelog_file)) + + +@pytest.mark.xfail(ds_is_older('1.3.10.1', '1.4.3'), reason="bug bz1685059") +@pytest.mark.skip(reason="does not work for prefix builds") +@pytest.mark.bz1685059 +@pytest.mark.ds50498 +@pytest.mark.bz1769296 +def test_cldump_files_removed(topo): + """Verify bz1685059 : cl-dump generated ldif files are removed at the end, -l option is the way to keep them + + :id: fbb2f2a3-167b-4bc6-b513-9e0318b09edc + :setup: Replication with two supplier, nsslapd-changelogdir is '/var/lib/dirsrv/slapd-supplier1/changelog' + retrochangelog plugin disabled + :steps: + 1. Clean the changelog directory, removing .ldif files present, if any + 2. Clean the changelog directory, removing .done files present, if any + 3. Perform ldap operations to record replication changes + 4. Try a cl-dump call with invalid arguments to secure the next steps and to check bz1769296 + 5. Launch cl-dump cli without -l option + 6. Wait so that all cl-dump tasks be finished + 7. Check that all .ldif.done generated files have been removed from the changelog dir + 8. Launch cl-dump cli with -l option + 9. Wait so that all cl-dump tasks be finished + 10. Check that the generated .ldif.done files are present in the changelog dir + + :expectedresults: + 1. No remaining .ldif file in the changelog directory + 2. No remaining .ldif.done file in the changelog directory + 3. ldap operations are replicated and recorded in changelog + 4. A result code different from 0 is raised + 5. cl-dump is successfully executed + 6. cl-dump process has finished + 7. No .ldif.done files in the changelog dir + 8. cl-dump is successfully executed + 9. cl-dump process has finished + 10. .ldif.done generated files are present in the changelog dir + """ + + changelog_dir = topo.ms['supplier1'].get_changelog_dir() + + # Remove existing .ldif files in changelog dir + remove_ldif_files_from_changelogdir(topo, '.ldif') + + # Remove existing .ldif.done files in changelog dir + remove_ldif_files_from_changelogdir(topo, '.done') + + _perform_ldap_operations(topo) + + # This part to make sure that an error in the cl-dump script execution will be detected, + # primary condition before executing the core goal of this case : management of cl-dump generated files. + # As of today the returned code by cl-dump.pl is incorrect when run with invalid arguments (bz1769296) + # This piece of code will serve as reproducer and verification mean for bz1769296 + + log.info("Use cl-dump perl script without -l option : no generated ldif files should remain in %s " % changelog_dir) + cmdline=['/usr/bin/cl-dump', '-h', HOST_SUPPLIER_1, '-p', 'invalid port', '-D', DN_DM, '-w', PASSWORD] + log.info('Command used : %s' % cmdline) + proc = subprocess.Popen(cmdline, stdout=subprocess.PIPE) + msg = proc.communicate() + log.info('output message : %s' % msg[0]) + assert proc.returncode != 0 + + # Now the core goal of the test case + # Using cl-dump without -l option + log.info("Use cl-dump perl script without -l option : no generated ldif files should remain in %s " % changelog_dir) + cmdline=['/usr/bin/cl-dump', '-h', HOST_SUPPLIER_1, '-p', str(PORT_SUPPLIER_1), '-D', DN_DM, '-w', PASSWORD] + log.info('Command used : %s' % cmdline) + proc = subprocess.Popen(cmdline, stdout=subprocess.PIPE) + proc.communicate() + assert proc.returncode == 0 + + log.info('Wait for all cl-dump files to be generated') + time.sleep(1) + + log.info('Check if cl-dump generated .ldif.done files are present - should not') + for files in os.listdir(changelog_dir): + if files.endswith('.done'): + log.fatal('cl-dump generated .ldif.done files are present in %s - they should not' % changelog_dir) + assert False + else: + log.info('All cl-dump generated .ldif files have been successfully removed from %s ' % changelog_dir) + + + # Using cl-dump with -l option + log.info("Use cl-dump perl script with -l option : generated ldif files should be kept in %s " % changelog_dir) + cmdline=['/usr/bin/cl-dump', '-h', HOST_SUPPLIER_1, '-p', str(PORT_SUPPLIER_1), '-D', DN_DM, '-w', PASSWORD, '-l'] + log.info('Command used : %s' % cmdline) + proc = subprocess.Popen(cmdline, stdout=subprocess.PIPE) + msg = proc.communicate() + assert proc.returncode == 0 + + log.info('Wait for all cl-dump files to be generated') + time.sleep(1) + + log.info('Check if cl-dump generated .ldif.done files are present - should be') + for files in os.listdir(changelog_dir): + if files.endswith('.done'): + cldump_file = os.path.join(changelog_dir, files) + log.info('Success : ldif file %s is present' % cldump_file) + break + else: + log.fatal('.ldif.done files are not present in %s - they should be' % changelog_dir) + assert False + +@pytest.mark.skipif(ds_is_older("1.4.3"), reason="Not implemented") +def test_dsconf_dump_changelog_files_removed(topo): + """Verify that the python counterpart of cl-dump (using dsconf) has a correct management of generated files + + :id: e41dcf90-098a-4386-acb5-789384579bf7 + :setup: Replication with two supplier, nsslapd-changelogdir is '/var/lib/dirsrv/slapd-supplier1/changelog' + retrochangelog plugin disabled + :steps: + 1. Clean the changelog directory, removing .ldif files present, if any + 2. Clean the changelog directory, removing .ldif.done files present, if any + 3. Perform ldap operations to record replication changes + 4. Try a dsconf call with invalid arguments to secure the next steps + 5. Launch dsconf export-changelog cli without -l option + 6. Wait so that all dsconf tasks be finished + 7. Check that all .ldif.done generated files have been removed from the changelog dir + 8. Launch dsconf export-changelog cli with -l option + 9. Wait so that all dsconf tasks be finished + 10. Check that the generated .ldif.done files are present in the changelog dir + + :expectedresults: + 1. No remaining .ldif file in the changelog directory + 2. No remaining .ldif.done file in the changelog directory + 3. ldap operations are replicated and recorded in changelog + 4. A result code different from 0 is raised + 5. dsconf export-changelog is successfully executed + 6. dsconf process has finished + 7. No .ldif.done files in the changelog dir + 8. dsconf export-changelog is successfully executed + 9. dsconf process has finished + 10. .ldif.done generated files are present in the changelog dir + """ + + if ds_supports_new_changelog(): + changelog_dir = topo.ms['supplier1'].get_ldif_dir() + else: + changelog_dir = topo.ms['supplier1'].get_changelog_dir() + instance = topo.ms['supplier1'] + instance_url = 'ldap://%s:%s' % (HOST_SUPPLIER_1, PORT_SUPPLIER_1) + + # Remove existing .ldif files in changelog dir + remove_ldif_files_from_changelogdir(topo, '.ldif') + + # Remove existing .ldif.done files from changelog dir + remove_ldif_files_from_changelogdir(topo, '.done') + + _perform_ldap_operations(topo) + + # This part to make sure that an error in the python dsconf export-changelog execution will be detected, + # primary condition before executing the core goal of this case : management of generated files. + + log.info("Use dsconf export-changelog with invalid parameters") + cmdline=['/usr/sbin/dsconf', instance_url, '-D', DN_DM, '-w', 'badpasswd', 'replication', 'export-changelog'] + log.info('Command used : %s' % cmdline) + proc = subprocess.Popen(cmdline, stdout=subprocess.PIPE) + msg = proc.communicate() + log.info('output message : %s' % msg[0]) + assert proc.returncode != 0 + + # Now the core goal of the test case + # Using dsconf replication changelog without -l option + log.info('Use dsconf replication changelog without -l option: no generated ldif files should be present in %s ' % changelog_dir) + cmdline=['/usr/sbin/dsconf', instance_url, '-D', DN_DM, '-w', PASSWORD, 'replication', 'export-changelog', + 'default', '-r', DEFAULT_SUFFIX] + log.info('Command used : %s' % cmdline) + proc = subprocess.Popen(cmdline, stdout=subprocess.PIPE) + proc.communicate() + assert proc.returncode == 0 + + log.info('Wait for all dsconf export-changelog files to be generated') + time.sleep(1) + + log.info('Check if dsconf export-changelog generated .ldif.done files are present - should not') + for files in os.listdir(changelog_dir): + if files.endswith('.done'): + log.fatal('export-changelog generated .ldif.done files are present in %s - they should not' % changelog_dir) + assert False + else: + log.info('All dsconf export-changelog generated .ldif files have been successfully removed from %s ' % changelog_dir) + + # Using dsconf replication changelog without -l option + log.info('Use dsconf replication changelog with -l option: generated ldif files should be kept in %s ' % changelog_dir) + cmdline=['/usr/sbin/dsconf', instance_url, '-D', DN_DM, '-w', PASSWORD, 'replication', 'export-changelog', + 'to-ldif', '-o', changelog_dir + '/test.ldif', '-r', DEFAULT_SUFFIX, '-l'] + log.info('Command used : %s' % cmdline) + proc = subprocess.Popen(cmdline, stdout=subprocess.PIPE) + proc.communicate() + assert proc.returncode == 0 + + log.info('Wait for all dsconf export-changelog files to be generated') + time.sleep(1) + + log.info('Check if dsconf export-changelog generated .ldif.done files are present - should be') + for files in os.listdir(changelog_dir): + if files.endswith('.done'): + cldump_file = os.path.join(changelog_dir, files) + log.info('Success : ldif file %s is present' % cldump_file) + break + else: + log.fatal('.ldif.done files are not present in %s - they should be' % changelog_dir) + assert False + + +def test_verify_changelog(topo): + """Check if changelog dump file contains required ldap operations + + :id: 15ead076-8c18-410b-90eb-c2fe9eab966b + :setup: Replication with two suppliers. + :steps: 1. Add user to server. + 2. Perform ldap modify, modrdn and delete operations. + 3. Dump the changelog to a file using nsds5task. + 4. Check if changelog is updated with ldap operations. + :expectedresults: + 1. Add user should PASS. + 2. Ldap operations should PASS. + 3. Changelog should be dumped successfully. + 4. Changelog dump file should contain ldap operations + """ + + log.info('LDAP operations add, modify, modrdn and delete') + _perform_ldap_operations(topo) + changelog_ldif = _create_changelog_dump(topo) + _check_changelog_ldif(topo, changelog_ldif) + + +def test_verify_changelog_online_backup(topo): + """Check ldap operations in changelog dump file after online backup + + :id: 4001c34f-35b4-439e-8c2d-fa7e30375219 + :setup: Replication with two suppliers. + :steps: 1. Add user to server. + 2. Take online backup using db2bak task. + 3. Restore the database using bak2db task. + 4. Perform ldap modify, modrdn and delete operations. + 5. Dump the changelog to a file using nsds5task. + 6. Check if changelog is updated with ldap operations. + :expectedresults: + 1. Add user should PASS. + 2. Backup of database should PASS. + 3. Restore of database should PASS. + 4. Ldap operations should PASS. + 5. Changelog should be dumped successfully. + 6. Changelog dump file should contain ldap operations + """ + + backup_dir = os.path.join(topo.ms['supplier1'].get_bak_dir(), 'online_backup') + log.info('Run db2bak script to take database backup') + try: + topo.ms['supplier1'].tasks.db2bak(backup_dir=backup_dir, args={TASK_WAIT: True}) + except ValueError: + log.fatal('test_changelog5: Online backup failed') + assert False + + # Note: there is no way to check dbi on lmdb backup + # That said dbscan may perhaps do it ... + _check_repl_changelog_backup(topo.ms['supplier1'], backup_dir); + + log.info('Run bak2db to restore directory server') + try: + topo.ms['supplier1'].tasks.bak2db(backup_dir=backup_dir, args={TASK_WAIT: True}) + except ValueError: + log.fatal('test_changelog5: Online restore failed') + assert False + + log.info('LDAP operations add, modify, modrdn and delete') + _perform_ldap_operations(topo) + changelog_ldif = _create_changelog_dump(topo) + _check_changelog_ldif(topo, changelog_ldif) + + +def test_verify_changelog_offline_backup(topo): + """Check ldap operations in changelog dump file after offline backup + + :id: feed290d-57dd-46e4-9ab3-422c77589867 + :setup: Replication with two suppliers. + :steps: 1. Add user to server. + 2. Stop server and take offline backup using db2bak. + 3. Restore the database using bak2db. + 4. Perform ldap modify, modrdn and delete operations. + 5. Start the server and dump the changelog using nsds5task. + 6. Check if changelog is updated with ldap operations. + :expectedresults: + 1. Add user should PASS. + 2. Backup of database should PASS. + 3. Restore of database should PASS. + 4. Ldap operations should PASS. + 5. Changelog should be dumped successfully. + 6. Changelog dump file should contain ldap operations + """ + + backup_dir = os.path.join(topo.ms['supplier1'].get_bak_dir(), 'offline_backup') + + topo.ms['supplier1'].stop() + log.info('Run db2bak to take database backup') + try: + topo.ms['supplier1'].db2bak(backup_dir) + except ValueError: + log.fatal('test_changelog5: Offline backup failed') + assert False + + log.info('Run bak2db to restore directory server') + try: + topo.ms['supplier1'].bak2db(backup_dir) + except ValueError: + log.fatal('test_changelog5: Offline restore failed') + assert False + topo.ms['supplier1'].start() + + _check_repl_changelog_backup(topo.ms['supplier1'], backup_dir); + + log.info('LDAP operations add, modify, modrdn and delete') + _perform_ldap_operations(topo) + changelog_ldif = _create_changelog_dump(topo) + _check_changelog_ldif(topo, changelog_ldif) + + +@pytest.mark.ds47669 +def test_changelog_maxage(topo, changelog_init): + """Check nsslapd-changelog max age values + + :id: d284ff27-03b2-412c-ac74-ac4f2d2fae3b + :setup: Replication with two supplier, change nsslapd-changelogdir to + '/var/lib/dirsrv/slapd-supplier1/changelog' and + set cn=Retro Changelog Plugin,cn=plugins,cn=config to 'on' + :steps: + 1. Set nsslapd-changelogmaxage in cn=changelog5,cn=config to values - '12345','10s','30M','12h','2D','4w' + 2. Set nsslapd-changelogmaxage in cn=changelog5,cn=config to values - '-123','xyz' + + :expectedresults: + 1. Operation should be successful + 2. Operation should be unsuccessful + """ + log.info('1. Test nsslapd-changelogmaxage in cn=changelog5,cn=config') + + # bind as directory manager + topo.ms["supplier1"].log.info("Bind as %s" % DN_DM) + topo.ms["supplier1"].simple_bind_s(DN_DM, PASSWORD) + + add_and_check(topo, CHANGELOG, MAXAGE, '12345', True) + add_and_check(topo, CHANGELOG, MAXAGE, '10s', True) + add_and_check(topo, CHANGELOG, MAXAGE, '30M', True) + add_and_check(topo, CHANGELOG, MAXAGE, '12h', True) + add_and_check(topo, CHANGELOG, MAXAGE, '2D', True) + add_and_check(topo, CHANGELOG, MAXAGE, '4w', True) + add_and_check(topo, CHANGELOG, MAXAGE, '-123', False) + add_and_check(topo, CHANGELOG, MAXAGE, 'xyz', False) + + +@pytest.mark.ds47669 +def test_ticket47669_changelog_triminterval(topo, changelog_init): + """Check nsslapd-changelog triminterval values + + :id: 8f850c37-7e7c-49dd-a4e0-9344638616d6 + :setup: Replication with two supplier, change nsslapd-changelogdir to + '/var/lib/dirsrv/slapd-supplier1/changelog' and + set cn=Retro Changelog Plugin,cn=plugins,cn=config to 'on' + :steps: + 1. Set nsslapd-changelogtrim-interval in cn=changelog5,cn=config to values - + '12345','10s','30M','12h','2D','4w' + 2. Set nsslapd-changelogtrim-interval in cn=changelog5,cn=config to values - '-123','xyz' + + :expectedresults: + 1. Operation should be successful + 2. Operation should be unsuccessful + """ + log.info('2. Test nsslapd-changelogtrim-interval in cn=changelog5,cn=config') + + # bind as directory manager + topo.ms["supplier1"].log.info("Bind as %s" % DN_DM) + topo.ms["supplier1"].simple_bind_s(DN_DM, PASSWORD) + + add_and_check(topo, CHANGELOG, TRIMINTERVAL, '12345', True) + add_and_check(topo, CHANGELOG, TRIMINTERVAL, '10s', True) + add_and_check(topo, CHANGELOG, TRIMINTERVAL, '30M', True) + add_and_check(topo, CHANGELOG, TRIMINTERVAL, '12h', True) + add_and_check(topo, CHANGELOG, TRIMINTERVAL, '2D', True) + add_and_check(topo, CHANGELOG, TRIMINTERVAL, '4w', True) + add_and_check(topo, CHANGELOG, TRIMINTERVAL, '-123', False) + add_and_check(topo, CHANGELOG, TRIMINTERVAL, 'xyz', False) + + +@pytest.mark.ds47669 +@pytest.mark.skipif(ds_supports_new_changelog(), reason="changelog compaction is done by the backend itself, with id2entry as well, nsslapd-changelogcompactdb-interval is no longer supported") +def test_changelog_compactdbinterval(topo, changelog_init): + """Check nsslapd-changelog compactdbinterval values + + :id: 0f4b3118-9dfa-4c2a-945c-72847b42a48c + :setup: Replication with two supplier, change nsslapd-changelogdir to + '/var/lib/dirsrv/slapd-supplier1/changelog' and + set cn=Retro Changelog Plugin,cn=plugins,cn=config to 'on' + :steps: + 1. Set nsslapd-changelogcompactdb-interval in cn=changelog5,cn=config to values - + '12345','10s','30M','12h','2D','4w' + 2. Set nsslapd-changelogcompactdb-interval in cn=changelog5,cn=config to values - + '-123','xyz' + + :expectedresults: + 1. Operation should be successful + 2. Operation should be unsuccessful + """ + log.info('3. Test nsslapd-changelogcompactdb-interval in cn=changelog5,cn=config') + + # bind as directory manager + topo.ms["supplier1"].log.info("Bind as %s" % DN_DM) + topo.ms["supplier1"].simple_bind_s(DN_DM, PASSWORD) + + add_and_check(topo, CHANGELOG, COMPACTDBINTERVAL, '12345', True) + add_and_check(topo, CHANGELOG, COMPACTDBINTERVAL, '10s', True) + add_and_check(topo, CHANGELOG, COMPACTDBINTERVAL, '30M', True) + add_and_check(topo, CHANGELOG, COMPACTDBINTERVAL, '12h', True) + add_and_check(topo, CHANGELOG, COMPACTDBINTERVAL, '2D', True) + add_and_check(topo, CHANGELOG, COMPACTDBINTERVAL, '4w', True) + add_and_check(topo, CHANGELOG, COMPACTDBINTERVAL, '-123', False) + add_and_check(topo, CHANGELOG, COMPACTDBINTERVAL, 'xyz', False) + + +@pytest.mark.ds47669 +def test_retrochangelog_maxage(topo, changelog_init): + """Check nsslapd-retrochangelog max age values + + :id: 0cb84d81-3e86-4dbf-84a2-66aefd8281db + :setup: Replication with two supplier, change nsslapd-changelogdir to + '/var/lib/dirsrv/slapd-supplier1/changelog' and + set cn=Retro Changelog Plugin,cn=plugins,cn=config to 'on' + :steps: + 1. Set nsslapd-changelogmaxage in cn=Retro Changelog Plugin,cn=plugins,cn=config to values - + '12345','10s','30M','12h','2D','4w' + 2. Set nsslapd-changelogmaxage in cn=Retro Changelog Plugin,cn=plugins,cn=config to values - + '-123','xyz' + + :expectedresults: + 1. Operation should be successful + 2. Operation should be unsuccessful + """ + log.info('4. Test nsslapd-changelogmaxage in cn=Retro Changelog Plugin,cn=plugins,cn=config') + + # bind as directory manager + topo.ms["supplier1"].log.info("Bind as %s" % DN_DM) + topo.ms["supplier1"].simple_bind_s(DN_DM, PASSWORD) + + add_and_check(topo, RETROCHANGELOG, MAXAGE, '12345', True) + add_and_check(topo, RETROCHANGELOG, MAXAGE, '10s', True) + add_and_check(topo, RETROCHANGELOG, MAXAGE, '30M', True) + add_and_check(topo, RETROCHANGELOG, MAXAGE, '12h', True) + add_and_check(topo, RETROCHANGELOG, MAXAGE, '2D', True) + add_and_check(topo, RETROCHANGELOG, MAXAGE, '4w', True) + add_and_check(topo, RETROCHANGELOG, MAXAGE, '-123', False) + add_and_check(topo, RETROCHANGELOG, MAXAGE, 'xyz', False) + + topo.ms["supplier1"].log.info("ticket47669 was successfully verified.") + +@pytest.mark.ds50736 +def test_retrochangelog_trimming_crash(topo, changelog_init): + """Check that when retroCL nsslapd-retrocthangelog contains invalid + value, then the instance does not crash at shutdown + + :id: 5d9bd7ca-e9bf-4be9-8fc8-902aa5513052 + :customerscenario: True + :setup: Replication with two supplier, change nsslapd-changelogdir to + '/var/lib/dirsrv/slapd-supplier1/changelog' and + set cn=Retro Changelog Plugin,cn=plugins,cn=config to 'on' + :steps: + 1. Set nsslapd-changelogmaxage in cn=Retro Changelog Plugin,cn=plugins,cn=config to value '-1' + This value is invalid. To disable retroCL trimming it should be set to 0 + 2. Do several restart + 3. check there is no 'Detected Disorderly Shutdown' message (crash) + 4. restore valid value for nsslapd-changelogmaxage '1w' + + :expectedresults: + 1. Operation should be successful + 2. Operation should be successful + 3. Operation should be successful + 4. Operation should be successful + """ + log.info('1. Test retroCL trimming crash in cn=Retro Changelog Plugin,cn=plugins,cn=config') + + # set the nsslapd-changelogmaxage directly on dse.ldif + # because the set value is invalid + topo.ms["supplier1"].log.info("ticket50736 start verification") + topo.ms["supplier1"].stop() + retroPlugin = RetroChangelogPlugin(topo.ms["supplier1"]) + dse_ldif = DSEldif(topo.ms["supplier1"]) + dse_ldif.replace(retroPlugin.dn, 'nsslapd-changelogmaxage', '-1') + topo.ms["supplier1"].start() + + # The crash should be systematic, but just in case do several restart + # with a delay to let all plugin init + for i in range(5): + time.sleep(1) + topo.ms["supplier1"].stop() + topo.ms["supplier1"].start() + + assert not topo.ms["supplier1"].detectDisorderlyShutdown() + + topo.ms["supplier1"].log.info("ticket 50736 was successfully verified.") + + +@pytest.mark.bz2034407 +@pytest.mark.skipif(not os.path.isfile("/usr/bin/db_stat"), reason="libdb-utils package is not installed") +def test_changelog_pagesize(topo): + """Test that changelog page size is set properly + + :id: 584a9a82-756d-11ed-8b38-482ae39447e5 + :setup: Replication with two suppliers + :steps: + 1. Check that file system preferred block size is 4K + 2. Run db_stat -e -h db_home_dir + 3. Check for 4K page size in db_stat output + :expectedresults: + 1. Mark the test as skipped if block size is not 4K + 2. Success + 3. Should not have any 4K page size in db_stat output + """ + + s1=topo.ms["supplier1"] + fs_pagesize = os.statvfs(s1.ds_paths.db_home_dir).f_bsize + if fs_pagesize != 4096: + pytest.skip("This test requires that database filesystem prefered block size is 4K.") + return + try: + cmd = ["/usr/bin/db_stat", "-h", s1.ds_paths.db_home_dir, "-e"] + log.debug(f"DEBUG: Running {cmd}") + output = subprocess.check_output(cmd, universal_newlines=True, stderr=subprocess.STDOUT) + except subprocess.CalledProcessError as e: + self.log.error(f'Failed to gather db statistics {cmd}: "{e.output.decode()}') + self.log.error(e) + raise e + assert not re.match("^4096 *Page size", output, flags=re.MULTILINE) + + +if __name__ == '__main__': + # Run isolated + # -s for DEBUG mode + CURRENT_FILE = os.path.realpath(__file__) + pytest.main("-s %s" % CURRENT_FILE) diff --git a/dirsrvtests/tests/suites/replication/changelog_trimming_test.py b/dirsrvtests/tests/suites/replication/changelog_trimming_test.py new file mode 100644 index 0000000..ea8a688 --- /dev/null +++ b/dirsrvtests/tests/suites/replication/changelog_trimming_test.py @@ -0,0 +1,176 @@ +# --- BEGIN COPYRIGHT BLOCK --- +# Copyright (C) 2022 Red Hat, Inc. +# All rights reserved. +# +# License: GPL (version 3 or any later version). +# See LICENSE for details. +# --- END COPYRIGHT BLOCK --- +# +import logging +import pytest +import os +import ldap +import time +from lib389._constants import * +from lib389.properties import * +from lib389.topologies import topology_m1 as topo +from lib389.replica import Changelog5 +from lib389.idm.domain import Domain +from lib389.utils import ensure_bytes, ds_supports_new_changelog + +pytestmark = pytest.mark.tier1 + +DEBUGGING = os.getenv("DEBUGGING", default=False) +if DEBUGGING: + logging.getLogger(__name__).setLevel(logging.DEBUG) +else: + logging.getLogger(__name__).setLevel(logging.INFO) +log = logging.getLogger(__name__) + +CHANGELOG = 'cn=changelog,{}'.format(DN_USERROOT_LDBM) +MAXAGE = 'nsslapd-changelogmaxage' +MAXENTRIES = 'nsslapd-changelogmaxentries' +TRIMINTERVAL = 'nsslapd-changelogtrim-interval' + +def do_mods(supplier, num): + """Perform a num of mods on the default suffix + """ + domain = Domain(supplier, DEFAULT_SUFFIX) + for i in range(num): + domain.replace('description', 'change %s' % i) + +def set_value(supplier, attr, val): + """ + Helper function to add/replace attr: val and check the added value + """ + try: + supplier.modify_s(CHANGELOG, [(ldap.MOD_REPLACE, attr, ensure_bytes(val))]) + except ldap.LDAPError as e: + log.error('Failed to add ' + attr + ': ' + val + ' to ' + plugin + ': error {}'.format(get_ldap_error_msg(e,'desc'))) + assert False + +@pytest.fixture(scope="module") +def setup_max_entries(topo, request): + """Configure logging and changelog max entries + """ + supplier = topo.ms["supplier1"] + + supplier.config.loglevel((ErrorLog.REPLICA,), 'error') + + if ds_supports_new_changelog(): + set_value(supplier, MAXENTRIES, '2') + set_value(supplier, TRIMINTERVAL, '300') + else: + cl = Changelog5(supplier) + cl.set_trim_interval('300') + +@pytest.fixture(scope="module") +def setup_max_age(topo, request): + """Configure logging and changelog max age + """ + supplier = topo.ms["supplier1"] + supplier.config.loglevel((ErrorLog.REPLICA,), 'error') + + if ds_supports_new_changelog(): + set_value(supplier, MAXAGE, '5') + set_value(supplier, TRIMINTERVAL, '300') + else: + cl = Changelog5(supplier) + cl.set_max_age('5') + cl.set_trim_interval('300') + +def test_max_age(topo, setup_max_age): + """Test changing the trimming interval works with max age + + :id: b5de04a5-4d92-49ea-a725-1d278a1c647c + :setup: single supplier + :steps: + 1. Perform modification to populate changelog + 2. Adjust the changelog trimming interval + 3. Check is trimming occurrs within the new interval + + :expectedresults: + 1. Modifications are successful + 2. The changelog trimming interval is correctly lowered + 3. Trimming occurs + + """ + log.info("Testing changelog trimming interval with max age...") + + supplier = topo.ms["supplier1"] + if not ds_supports_new_changelog(): + cl = Changelog5(supplier) + + # Do mods to build if cl entries + do_mods(supplier, 10) + + time.sleep(1) # Trimming should not have occurred + if supplier.searchErrorsLog("Trimmed") is True: + log.fatal('Trimming event unexpectedly occurred') + assert False + + if ds_supports_new_changelog(): + set_value(supplier, TRIMINTERVAL, '5') + else: + cl.set_trim_interval('5') + + time.sleep(3) # Trimming should not have occurred + if supplier.searchErrorsLog("Trimmed") is True: + log.fatal('Trimming event unexpectedly occurred') + assert False + + time.sleep(3) # Trimming should have occurred + if supplier.searchErrorsLog("Trimmed") is False: + log.fatal('Trimming event did not occur') + assert False + + +def test_max_entries(topo, setup_max_entries): + """Test changing the trimming interval works with max entries + + :id: b5de04a5-4d92-49ea-a725-1d278a1c647d + :setup: single supplier + :steps: + 1. Perform modification to populate changelog + 2. Adjust the changelog trimming interval + 3. Check is trimming occurrs within the new interval + + :expectedresults: + 1. Modifications are successful + 2. The changelog trimming interval is correctly lowered + 3. Trimming occurs + + """ + + log.info("Testing changelog triming interval with max entries...") + supplier = topo.ms["supplier1"] + if not ds_supports_new_changelog(): + cl = Changelog5(supplier) + + # reset errors log + supplier.deleteErrorLogs() + + # Do mods to build if cl entries + do_mods(supplier, 10) + + time.sleep(1) # Trimming should have occurred + if supplier.searchErrorsLog("Trimmed") is True: + log.fatal('Trimming event unexpectedly occurred') + assert False + + if ds_supports_new_changelog(): + set_value(supplier, TRIMINTERVAL, '5') + else: + cl.set_trim_interval('5') + + time.sleep(6) # Trimming should have occurred + if supplier.searchErrorsLog("Trimmed") is False: + log.fatal('Trimming event did not occur') + assert False + + +if __name__ == '__main__': + # Run isolated + # -s for DEBUG mode + CURRENT_FILE = os.path.realpath(__file__) + pytest.main("-s %s" % CURRENT_FILE) diff --git a/dirsrvtests/tests/suites/replication/cleanallruv_abort_certify_test.py b/dirsrvtests/tests/suites/replication/cleanallruv_abort_certify_test.py new file mode 100644 index 0000000..603693b --- /dev/null +++ b/dirsrvtests/tests/suites/replication/cleanallruv_abort_certify_test.py @@ -0,0 +1,136 @@ +# --- BEGIN COPYRIGHT BLOCK --- +# Copyright (C) 2022 Red Hat, Inc. +# All rights reserved. +# +# License: GPL (version 3 or any later version). +# See LICENSE for details. +# --- END COPYRIGHT BLOCK --- +# +import logging +import pytest +import os +import time +from lib389._constants import DEFAULT_SUFFIX +from lib389.topologies import topology_m4 +from lib389.tasks import CleanAllRUVTask +from lib389.replica import ReplicationManager, Replicas + +log = logging.getLogger(__name__) + + +def remove_supplier4_agmts(msg, topology_m4): + """Remove all the repl agmts to supplier4. """ + + log.info('%s: remove all the agreements to supplier 4...' % msg) + repl = ReplicationManager(DEFAULT_SUFFIX) + # This will delete m4 from the topo *and* remove all incoming agreements + # to m4. + repl.remove_supplier(topology_m4.ms["supplier4"], + [topology_m4.ms["supplier1"], topology_m4.ms["supplier2"], topology_m4.ms["supplier3"]]) + +def task_done(topology_m4, task_dn, timeout=60): + """Check if the task is complete""" + + attrlist = ['nsTaskLog', 'nsTaskStatus', 'nsTaskExitCode', + 'nsTaskCurrentItem', 'nsTaskTotalItems'] + done = False + count = 0 + + while not done and count < timeout: + try: + entry = topology_m4.ms["supplier1"].getEntry(task_dn, attrlist=attrlist) + if entry is not None: + if entry.hasAttr('nsTaskExitCode'): + done = True + break + else: + done = True + break + except ldap.NO_SUCH_OBJECT: + done = True + break + except ldap.LDAPError: + break + time.sleep(1) + count += 1 + + return done + +@pytest.mark.flaky(max_runs=2, min_passes=1) +def test_abort_certify(topology_m4): + """Test the abort task with a replica-certify-all option + + :id: 78959966-d644-44a8-b98c-1fcf21b45eb0 + :setup: Replication setup with four suppliers + :steps: + 1. Disable replication on supplier 4 + 2. Remove agreements to supplier 4 from other suppliers + 3. Stop supplier 2 + 4. Run a cleanallruv task on supplier 1 + 5. Run a cleanallruv abort task on supplier 1 with a replica-certify-all option + :expectedresults: No hanging tasks left + 1. Replication on supplier 4 should be disabled + 2. Agreements to supplier 4 should be removed + 3. Supplier 2 should be stopped + 4. Operation should be successful + 5. Operation should be successful + """ + + log.info('Running test_abort_certify...') + + # Remove the agreements from the other suppliers that point to supplier 4 + repl = ReplicationManager(DEFAULT_SUFFIX) + m4rid = repl.get_rid(topology_m4.ms["supplier4"]) + remove_supplier4_agmts("test_abort_certify", topology_m4) + + # Stop supplier 2 + log.info('test_abort_certify: stop supplier 2 to freeze the cleanAllRUV task...') + topology_m4.ms["supplier2"].stop() + + # Run the task + log.info('test_abort_certify: add the cleanAllRUV task...') + cruv_task = CleanAllRUVTask(topology_m4.ms["supplier1"]) + cruv_task.create(properties={ + 'replica-id': m4rid, + 'replica-base-dn': DEFAULT_SUFFIX, + 'replica-force-cleaning': 'no', + 'replica-certify-all': 'yes' + }) + # Wait a bit + time.sleep(2) + + # Abort the task + log.info('test_abort_certify: abort the cleanAllRUV task...') + abort_task = cruv_task.abort(certify=True) + + # Wait a while and make sure the abort task is still running + log.info('test_abort_certify...') + + if task_done(topology_m4, abort_task.dn, 10): + log.fatal('test_abort_certify: abort task incorrectly finished') + assert False + + # Now start supplier 2 so it can be aborted + log.info('test_abort_certify: start supplier 2 to allow the abort task to finish...') + topology_m4.ms["supplier2"].start() + + # Wait for the abort task to stop + if not task_done(topology_m4, abort_task.dn, 90): + log.fatal('test_abort_certify: The abort CleanAllRUV task was not aborted') + assert False + + # Check supplier 1 does not have the clean task running + log.info('test_abort_certify: check supplier 1 no longer has a cleanAllRUV task...') + if not task_done(topology_m4, cruv_task.dn): + log.fatal('test_abort_certify: CleanAllRUV task was not aborted') + assert False + + log.info('test_abort_certify PASSED') + + +if __name__ == '__main__': + # Run isolated + # -s for DEBUG mode + CURRENT_FILE = os.path.realpath(__file__) + pytest.main(["-s", CURRENT_FILE]) + diff --git a/dirsrvtests/tests/suites/replication/cleanallruv_abort_restart_test.py b/dirsrvtests/tests/suites/replication/cleanallruv_abort_restart_test.py new file mode 100644 index 0000000..1406c65 --- /dev/null +++ b/dirsrvtests/tests/suites/replication/cleanallruv_abort_restart_test.py @@ -0,0 +1,146 @@ +# --- BEGIN COPYRIGHT BLOCK --- +# Copyright (C) 2022 Red Hat, Inc. +# All rights reserved. +# +# License: GPL (version 3 or any later version). +# See LICENSE for details. +# --- END COPYRIGHT BLOCK --- +# +import logging +import pytest +import os +import time +from lib389._constants import DEFAULT_SUFFIX +from lib389.topologies import topology_m4 +from lib389.tasks import CleanAllRUVTask +from lib389.replica import ReplicationManager + +log = logging.getLogger(__name__) + + +def remove_supplier4_agmts(msg, topology_m4): + """Remove all the repl agmts to supplier4. """ + + log.info('%s: remove all the agreements to supplier 4...' % msg) + repl = ReplicationManager(DEFAULT_SUFFIX) + # This will delete m4 from the topo *and* remove all incoming agreements + # to m4. + repl.remove_supplier(topology_m4.ms["supplier4"], + [topology_m4.ms["supplier1"], topology_m4.ms["supplier2"], topology_m4.ms["supplier3"]]) + +def task_done(topology_m4, task_dn, timeout=60): + """Check if the task is complete""" + + attrlist = ['nsTaskLog', 'nsTaskStatus', 'nsTaskExitCode', + 'nsTaskCurrentItem', 'nsTaskTotalItems'] + done = False + count = 0 + + while not done and count < timeout: + try: + entry = topology_m4.ms["supplier1"].getEntry(task_dn, attrlist=attrlist) + if entry is not None: + if entry.hasAttr('nsTaskExitCode'): + done = True + break + else: + done = True + break + except ldap.NO_SUCH_OBJECT: + done = True + break + except ldap.LDAPError: + break + time.sleep(1) + count += 1 + + return done + + +@pytest.mark.flaky(max_runs=2, min_passes=1) +def test_abort_restart(topology_m4): + """Test the abort task can handle a restart, and then resume + + :id: b66e33d4-fe85-4e1c-b882-75da80f70ab3 + :setup: Replication setup with four suppliers + :steps: + 1. Disable replication on supplier 4 + 2. Remove agreements to supplier 4 from other suppliers + 3. Stop supplier 3 + 4. Run a cleanallruv task on supplier 1 + 5. Run a cleanallruv abort task on supplier 1 + 6. Restart supplier 1 + 7. Make sure that no crash happened + 8. Start supplier 3 + 9. Check supplier 1 does not have the clean task running + 10. Check that errors log doesn't have 'Aborting abort task' message + :expectedresults: + 1. Replication on supplier 4 should be disabled + 2. Agreements to supplier 4 should be removed + 3. Supplier 3 should be stopped + 4. Operation should be successful + 5. Operation should be successful + 6. Supplier 1 should be restarted + 7. No crash should happened + 8. Supplier 3 should be started + 9. Check supplier 1 shouldn't have the clean task running + 10. Errors log shouldn't have 'Aborting abort task' message + """ + + log.info('Running test_abort_restart...') + # Remove the agreements from the other suppliers that point to supplier 4 + repl = ReplicationManager(DEFAULT_SUFFIX) + m4rid = repl.get_rid(topology_m4.ms["supplier4"]) + remove_supplier4_agmts("test_abort", topology_m4) + + # Stop supplier 3 + log.info('test_abort_restart: stop supplier 3 to freeze the cleanAllRUV task...') + topology_m4.ms["supplier3"].stop() + + # Run the task + log.info('test_abort_restart: add the cleanAllRUV task...') + cruv_task = CleanAllRUVTask(topology_m4.ms["supplier1"]) + cruv_task.create(properties={ + 'replica-id': m4rid, + 'replica-base-dn': DEFAULT_SUFFIX, + 'replica-force-cleaning': 'no', + 'replica-certify-all': 'yes' + }) + # Wait a bit + time.sleep(2) + + # Abort the task + cruv_task.abort(certify=True) + + # Check supplier 1 does not have the clean task running + log.info('test_abort_abort: check supplier 1 no longer has a cleanAllRUV task...') + if not task_done(topology_m4, cruv_task.dn): + log.fatal('test_abort_restart: CleanAllRUV task was not aborted') + assert False + + # Now restart supplier 1, and make sure the abort process completes + topology_m4.ms["supplier1"].restart() + if topology_m4.ms["supplier1"].detectDisorderlyShutdown(): + log.fatal('test_abort_restart: Supplier 1 previously crashed!') + assert False + + # Start supplier 3 + topology_m4.ms["supplier3"].start() + + # Need to wait 5 seconds before server processes any leftover tasks + time.sleep(6) + + # Check supplier 1 tried to run abort task. We expect the abort task to be aborted. + if not topology_m4.ms["supplier1"].searchErrorsLog('Aborting abort task'): + log.fatal('test_abort_restart: Abort task did not restart') + assert False + + log.info('test_abort_restart PASSED') + + +if __name__ == '__main__': + # Run isolated + # -s for DEBUG mode + CURRENT_FILE = os.path.realpath(__file__) + pytest.main(["-s", CURRENT_FILE]) + diff --git a/dirsrvtests/tests/suites/replication/cleanallruv_abort_test.py b/dirsrvtests/tests/suites/replication/cleanallruv_abort_test.py new file mode 100644 index 0000000..f891881 --- /dev/null +++ b/dirsrvtests/tests/suites/replication/cleanallruv_abort_test.py @@ -0,0 +1,123 @@ +# --- BEGIN COPYRIGHT BLOCK --- +# Copyright (C) 2022 Red Hat, Inc. +# All rights reserved. +# +# License: GPL (version 3 or any later version). +# See LICENSE for details. +# --- END COPYRIGHT BLOCK --- +# +import logging +import pytest +import os +import time +from lib389._constants import DEFAULT_SUFFIX +from lib389.topologies import topology_m4 +from lib389.tasks import CleanAllRUVTask +from lib389.replica import ReplicationManager + +log = logging.getLogger(__name__) + + +def remove_supplier4_agmts(msg, topology_m4): + """Remove all the repl agmts to supplier4. """ + + log.info('%s: remove all the agreements to supplier 4...' % msg) + repl = ReplicationManager(DEFAULT_SUFFIX) + # This will delete m4 from the topo *and* remove all incoming agreements + # to m4. + repl.remove_supplier(topology_m4.ms["supplier4"], + [topology_m4.ms["supplier1"], topology_m4.ms["supplier2"], topology_m4.ms["supplier3"]]) + +def task_done(topology_m4, task_dn, timeout=60): + """Check if the task is complete""" + + attrlist = ['nsTaskLog', 'nsTaskStatus', 'nsTaskExitCode', + 'nsTaskCurrentItem', 'nsTaskTotalItems'] + done = False + count = 0 + + while not done and count < timeout: + try: + entry = topology_m4.ms["supplier1"].getEntry(task_dn, attrlist=attrlist) + if entry is not None: + if entry.hasAttr('nsTaskExitCode'): + done = True + break + else: + done = True + break + except ldap.NO_SUCH_OBJECT: + done = True + break + except ldap.LDAPError: + break + time.sleep(1) + count += 1 + + return done + + +@pytest.mark.flaky(max_runs=2, min_passes=1) +def test_abort(topology_m4): + """Test the abort task basic functionality + + :id: b09a6887-8de0-4fac-8e41-73ccbaaf7a08 + :setup: Replication setup with four suppliers + :steps: + 1. Disable replication on supplier 4 + 2. Remove agreements to supplier 4 from other suppliers + 3. Stop supplier 2 + 4. Run a cleanallruv task on supplier 1 + 5. Run a cleanallruv abort task on supplier 1 + :expectedresults: No hanging tasks left + 1. Replication on supplier 4 should be disabled + 2. Agreements to supplier 4 should be removed + 3. Supplier 2 should be stopped + 4. Operation should be successful + 5. Operation should be successful + """ + + log.info('Running test_abort...') + # Remove the agreements from the other suppliers that point to supplier 4 + repl = ReplicationManager(DEFAULT_SUFFIX) + m4rid = repl.get_rid(topology_m4.ms["supplier4"]) + remove_supplier4_agmts("test_abort", topology_m4) + + # Stop supplier 2 + log.info('test_abort: stop supplier 2 to freeze the cleanAllRUV task...') + topology_m4.ms["supplier2"].stop() + + # Run the task + log.info('test_abort: add the cleanAllRUV task...') + cruv_task = CleanAllRUVTask(topology_m4.ms["supplier1"]) + cruv_task.create(properties={ + 'replica-id': m4rid, + 'replica-base-dn': DEFAULT_SUFFIX, + 'replica-force-cleaning': 'no', + 'replica-certify-all': 'yes' + }) + # Wait a bit + time.sleep(2) + + # Abort the task + cruv_task.abort() + + # Check supplier 1 does not have the clean task running + log.info('test_abort: check supplier 1 no longer has a cleanAllRUV task...') + if not task_done(topology_m4, cruv_task.dn): + log.fatal('test_abort: CleanAllRUV task was not aborted') + assert False + + # Start supplier 2 + log.info('test_abort: start supplier 2 to begin the restore process...') + topology_m4.ms["supplier2"].start() + + log.info('test_abort PASSED') + + +if __name__ == '__main__': + # Run isolated + # -s for DEBUG mode + CURRENT_FILE = os.path.realpath(__file__) + pytest.main(["-s", CURRENT_FILE]) + diff --git a/dirsrvtests/tests/suites/replication/cleanallruv_force_test.py b/dirsrvtests/tests/suites/replication/cleanallruv_force_test.py new file mode 100644 index 0000000..d5b9305 --- /dev/null +++ b/dirsrvtests/tests/suites/replication/cleanallruv_force_test.py @@ -0,0 +1,187 @@ +# --- BEGIN COPYRIGHT BLOCK --- +# Copyright (C) 2022 Red Hat, Inc. +# All rights reserved. +# +# License: GPL (version 3 or any later version). +# See LICENSE for details. +# --- END COPYRIGHT BLOCK --- +# +import logging +import pytest +import os +import time +import random +import threading +from lib389._constants import DEFAULT_SUFFIX +from lib389.topologies import topology_m4 +from lib389.tasks import CleanAllRUVTask +from lib389.replica import Replicas, ReplicationManager +from lib389.idm.directorymanager import DirectoryManager +from lib389.idm.user import UserAccounts + +log = logging.getLogger(__name__) + + +class AddUsers(threading.Thread): + def __init__(self, inst, num_users): + threading.Thread.__init__(self) + self.daemon = True + self.inst = inst + self.num_users = num_users + + def run(self): + """Start adding users""" + + dm = DirectoryManager(self.inst) + conn = dm.bind() + + users = UserAccounts(conn, DEFAULT_SUFFIX) + + u_range = list(range(self.num_users)) + random.shuffle(u_range) + + for idx in u_range: + try: + users.create(properties={ + 'uid': 'testuser%s' % idx, + 'cn' : 'testuser%s' % idx, + 'sn' : 'user%s' % idx, + 'uidNumber' : '%s' % (1000 + idx), + 'gidNumber' : '%s' % (1000 + idx), + 'homeDirectory' : '/home/testuser%s' % idx + }) + # One of the suppliers was probably put into read only mode - just break out + except ldap.UNWILLING_TO_PERFORM: + break + except ldap.ALREADY_EXISTS: + pass + conn.close() + +def remove_some_supplier4_agmts(msg, topology_m4): + """Remove all the repl agmts to supplier4 except from supplier3. Used by + the force tests.""" + + log.info('%s: remove the agreements to supplier 4...' % msg) + repl = ReplicationManager(DEFAULT_SUFFIX) + # This will delete m4 from the topo *and* remove all incoming agreements + # to m4. + repl.remove_supplier(topology_m4.ms["supplier4"], + [topology_m4.ms["supplier1"], topology_m4.ms["supplier2"]]) + +def task_done(topology_m4, task_dn, timeout=60): + """Check if the task is complete""" + + attrlist = ['nsTaskLog', 'nsTaskStatus', 'nsTaskExitCode', + 'nsTaskCurrentItem', 'nsTaskTotalItems'] + done = False + count = 0 + + while not done and count < timeout: + try: + entry = topology_m4.ms["supplier1"].getEntry(task_dn, attrlist=attrlist) + if entry is not None: + if entry.hasAttr('nsTaskExitCode'): + done = True + break + else: + done = True + break + except ldap.NO_SUCH_OBJECT: + done = True + break + except ldap.LDAPError: + break + time.sleep(1) + count += 1 + + return done + +def check_ruvs(msg, topology_m4, m4rid): + """Check suppliers 1-3 for supplier 4's rid.""" + for inst in (topology_m4.ms["supplier1"], topology_m4.ms["supplier2"], topology_m4.ms["supplier3"]): + clean = False + replicas = Replicas(inst) + replica = replicas.get(DEFAULT_SUFFIX) + log.info('check_ruvs for replica %s:%s (suffix:rid)' % (replica.get_suffix(), replica.get_rid())) + + count = 0 + while not clean and count < 20: + ruv = replica.get_ruv() + if m4rid in ruv._rids: + time.sleep(5) + count = count + 1 + else: + clean = True + if not clean: + raise Exception("Supplier %s was not cleaned in time." % inst.serverid) + return True + +def test_clean_force(topology_m4): + """Check that multiple tasks with a 'force' option work properly + + :id: f8810dfe-d2d2-4dd9-ba03-5fc14896fabe + :setup: Replication setup with four suppliers + :steps: + 1. Stop supplier 3 + 2. Add a bunch of updates to supplier 4 + 3. Disable replication on supplier 4 + 4. Start supplier 3 + 5. Remove agreements to supplier 4 from other suppliers + 6. Run a cleanallruv task on supplier 1 with a 'force' option 'on' + 7. Check that everything was cleaned + :expectedresults: + 1. Supplier 3 should be stopped + 2. Operation should be successful + 3. Replication on supplier 4 should be disabled + 4. Supplier 3 should be started + 5. Agreements to supplier 4 should be removed + 6. Operation should be successful + 7. Everything should be cleaned + """ + + log.info('Running test_clean_force...') + + # Stop supplier 3, while we update supplier 4, so that 3 is behind the other suppliers + topology_m4.ms["supplier3"].stop() + + # Add a bunch of updates to supplier 4 + m4_add_users = AddUsers(topology_m4.ms["supplier4"], 10) + m4_add_users.start() + m4_add_users.join() + + # Remove the agreements from the other suppliers that point to supplier 4 + repl = ReplicationManager(DEFAULT_SUFFIX) + m4rid = repl.get_rid(topology_m4.ms["supplier4"]) + remove_some_supplier4_agmts("test_clean_force", topology_m4) + + # Start supplier 3, it should be out of sync with the other replicas... + topology_m4.ms["supplier3"].start() + + # Remove the agreement to replica 4 + replica = Replicas(topology_m4.ms["supplier3"]).get(DEFAULT_SUFFIX) + replica.get_agreements().get("004").delete() + + # Run the task, use "force" because supplier 3 is not in sync with the other replicas + # in regards to the replica 4 RUV + log.info('test_clean: run the cleanAllRUV task...') + cruv_task = CleanAllRUVTask(topology_m4.ms["supplier1"]) + cruv_task.create(properties={ + 'replica-id': m4rid, + 'replica-base-dn': DEFAULT_SUFFIX, + 'replica-force-cleaning': 'yes' + }) + cruv_task.wait() + + # Check the other supplier's RUV for 'replica 4' + log.info('test_clean_force: check all the suppliers have been cleaned...') + clean = check_ruvs("test_clean_force", topology_m4, m4rid) + assert clean + + log.info('test_clean_force PASSED') + + +if __name__ == '__main__': + # Run isolated + # -s for DEBUG mode + CURRENT_FILE = os.path.realpath(__file__) + pytest.main(["-s", CURRENT_FILE]) diff --git a/dirsrvtests/tests/suites/replication/cleanallruv_max_tasks_test.py b/dirsrvtests/tests/suites/replication/cleanallruv_max_tasks_test.py new file mode 100644 index 0000000..0a88995 --- /dev/null +++ b/dirsrvtests/tests/suites/replication/cleanallruv_max_tasks_test.py @@ -0,0 +1,72 @@ +# --- BEGIN COPYRIGHT BLOCK --- +# Copyright (C) 2019 Red Hat, Inc. +# All rights reserved. +# +# License: GPL (version 3 or any later version). +# See LICENSE for details. +# --- END COPYRIGHT BLOCK --- +# +import threading +import pytest +import random +from lib389 import DirSrv +from lib389.tasks import * +from lib389.utils import * +from lib389.topologies import topology_m4, topology_m2 +from lib389._constants import * + +pytestmark = pytest.mark.tier1 + +@pytest.mark.skipif(ds_is_older("1.4.1.6"), reason="Not implemented") +def test_max_tasks(topology_m4): + """Test we can not create more than 64 cleaning tasks + + This test needs to be a standalone test becuase there is no easy way to + "restore" the instance after running this test + + :id: c34d0b40-3c3e-4f53-8656-5e4c2a310a1f + :setup: Replication setup with four suppliers + :steps: + 1. Stop suppliers 3 & 4 + 2. Create over 64 tasks between m1 and m2 + 3. Check logs to see if (>64) tasks were rejected + + :expectedresults: + 1. Success + 2. Success + 3. Success + """ + + # Stop suppliers 3 & 4 + m1 = topology_m4.ms["supplier1"] + m2 = topology_m4.ms["supplier2"] + m3 = topology_m4.ms["supplier3"] + m4 = topology_m4.ms["supplier4"] + m3.stop() + m4.stop() + + # Add over 64 tasks between supplier1 & 2 to try to exceed the 64 task limit + for i in range(1, 64): + cruv_task = CleanAllRUVTask(m1) + cruv_task.create(properties={ + 'replica-id': str(i), + 'replica-base-dn': DEFAULT_SUFFIX, + 'replica-force-cleaning': 'no', # This forces these tasks to stick around + }) + cruv_task = CleanAllRUVTask(m2) + cruv_task.create(properties={ + 'replica-id': "10" + str(i), + 'replica-base-dn': DEFAULT_SUFFIX, + 'replica-force-cleaning': 'yes', # This allows the tasks to propagate + }) + + # Check the errors log for our error message in supplier 1 + assert m1.searchErrorsLog('Exceeded maximum number of active CLEANALLRUV tasks') + + +if __name__ == '__main__': + # Run isolated + # -s for DEBUG mode + CURRENT_FILE = os.path.realpath(__file__) + pytest.main("-s %s" % CURRENT_FILE) + diff --git a/dirsrvtests/tests/suites/replication/cleanallruv_multiple_force_test.py b/dirsrvtests/tests/suites/replication/cleanallruv_multiple_force_test.py new file mode 100644 index 0000000..0a0848b --- /dev/null +++ b/dirsrvtests/tests/suites/replication/cleanallruv_multiple_force_test.py @@ -0,0 +1,214 @@ +# --- BEGIN COPYRIGHT BLOCK --- +# Copyright (C) 2022 Red Hat, Inc. +# All rights reserved. +# +# License: GPL (version 3 or any later version). +# See LICENSE for details. +# --- END COPYRIGHT BLOCK --- +# +import ldap +import logging +import os +import pytest +import random +import time +import threading +from lib389._constants import DEFAULT_SUFFIX +from lib389.topologies import topology_m4 +from lib389.tasks import CleanAllRUVTask +from lib389.idm.directorymanager import DirectoryManager +from lib389.idm.user import UserAccounts +from lib389.replica import ReplicationManager, Replicas + +log = logging.getLogger(__name__) + + +class AddUsers(threading.Thread): + def __init__(self, inst, num_users): + threading.Thread.__init__(self) + self.daemon = True + self.inst = inst + self.num_users = num_users + + def run(self): + """Start adding users""" + + dm = DirectoryManager(self.inst) + conn = dm.bind() + + users = UserAccounts(conn, DEFAULT_SUFFIX) + + u_range = list(range(self.num_users)) + random.shuffle(u_range) + + for idx in u_range: + try: + users.create(properties={ + 'uid': 'testuser%s' % idx, + 'cn' : 'testuser%s' % idx, + 'sn' : 'user%s' % idx, + 'uidNumber' : '%s' % (1000 + idx), + 'gidNumber' : '%s' % (1000 + idx), + 'homeDirectory' : '/home/testuser%s' % idx + }) + # One of the suppliers was probably put into read only mode - just break out + except ldap.UNWILLING_TO_PERFORM: + break + except ldap.ALREADY_EXISTS: + pass + conn.close() + +def remove_some_supplier4_agmts(msg, topology_m4): + """Remove all the repl agmts to supplier4 except from supplier3. Used by + the force tests.""" + + log.info('%s: remove the agreements to supplier 4...' % msg) + repl = ReplicationManager(DEFAULT_SUFFIX) + # This will delete m4 from the topo *and* remove all incoming agreements + # to m4. + repl.remove_supplier(topology_m4.ms["supplier4"], + [topology_m4.ms["supplier1"], topology_m4.ms["supplier2"]]) + +def task_done(topology_m4, task_dn, timeout=60): + """Check if the task is complete""" + + attrlist = ['nsTaskLog', 'nsTaskStatus', 'nsTaskExitCode', + 'nsTaskCurrentItem', 'nsTaskTotalItems'] + done = False + count = 0 + + while not done and count < timeout: + try: + entry = topology_m4.ms["supplier1"].getEntry(task_dn, attrlist=attrlist) + if entry is not None: + if entry.hasAttr('nsTaskExitCode'): + done = True + break + else: + done = True + break + except ldap.NO_SUCH_OBJECT: + done = True + break + except ldap.LDAPError: + break + time.sleep(1) + count += 1 + + return done + +def check_ruvs(msg, topology_m4, m4rid): + """Check suppliers 1-3 for supplier 4's rid.""" + for inst in (topology_m4.ms["supplier1"], topology_m4.ms["supplier2"], topology_m4.ms["supplier3"]): + clean = False + replicas = Replicas(inst) + replica = replicas.get(DEFAULT_SUFFIX) + log.info('check_ruvs for replica %s:%s (suffix:rid)' % (replica.get_suffix(), replica.get_rid())) + + count = 0 + while not clean and count < 20: + ruv = replica.get_ruv() + if m4rid in ruv._rids: + time.sleep(5) + count = count + 1 + else: + clean = True + if not clean: + raise Exception("Supplier %s was not cleaned in time." % inst.serverid) + return True + + +def test_multiple_tasks_with_force(topology_m4): + """Check that multiple tasks with a 'force' option work properly + + :id: eb76a93d-8d1c-405e-9f25-6e8d5a781098 + :setup: Replication setup with four suppliers + :steps: + 1. Stop supplier 3 + 2. Add a bunch of updates to supplier 4 + 3. Disable replication on supplier 4 + 4. Start supplier 3 + 5. Remove agreements to supplier 4 from other suppliers + 6. Run a cleanallruv task on supplier 1 with a 'force' option 'on' + 7. Run one more cleanallruv task on supplier 1 with a 'force' option 'off' + 8. Check that everything was cleaned + :expectedresults: + 1. Supplier 3 should be stopped + 2. Operation should be successful + 3. Replication on supplier 4 should be disabled + 4. Supplier 3 should be started + 5. Agreements to supplier 4 should be removed + 6. Operation should be successful + 7. Operation should be successful + 8. Everything should be cleaned + """ + + log.info('Running test_multiple_tasks_with_force...') + + # Stop supplier 3, while we update supplier 4, so that 3 is behind the other suppliers + topology_m4.ms["supplier3"].stop() + repl = ReplicationManager(DEFAULT_SUFFIX) + m4rid = repl.get_rid(topology_m4.ms["supplier4"]) + + # Add a bunch of updates to supplier 4 + m4_add_users = AddUsers(topology_m4.ms["supplier4"], 10) + m4_add_users.start() + m4_add_users.join() + + # Disable supplier 4 + # Remove the agreements from the other suppliers that point to supplier 4 + remove_some_supplier4_agmts("test_multiple_tasks_with_force", topology_m4) + + # Start supplier 3, it should be out of sync with the other replicas... + topology_m4.ms["supplier3"].start() + + # Remove the agreement to replica 4 + replica = Replicas(topology_m4.ms["supplier3"]).get(DEFAULT_SUFFIX) + replica.get_agreements().get("004").delete() + + # Run the task, use "force" because supplier 3 is not in sync with the other replicas + # in regards to the replica 4 RUV + log.info('test_multiple_tasks_with_force: run the cleanAllRUV task with "force" on...') + cruv_task = CleanAllRUVTask(topology_m4.ms["supplier1"]) + cruv_task.create(properties={ + 'replica-id': m4rid, + 'replica-base-dn': DEFAULT_SUFFIX, + 'replica-force-cleaning': 'yes', + 'replica-certify-all': 'no' + }) + + log.info('test_multiple_tasks_with_force: run the cleanAllRUV task with "force" off...') + + # NOTE: This must be try not py.test raises, because the above may or may + # not have completed yet .... + try: + cruv_task_fail = CleanAllRUVTask(topology_m4.ms["supplier1"]) + cruv_task_fail.create(properties={ + 'replica-id': m4rid, + 'replica-base-dn': DEFAULT_SUFFIX, + 'replica-force-cleaning': 'no', + 'replica-certify-all': 'no' + }) + cruv_task_fail.wait() + except ldap.UNWILLING_TO_PERFORM: + pass + # Wait for the force task .... + cruv_task.wait() + + # Check the other supplier's RUV for 'replica 4' + log.info('test_multiple_tasks_with_force: check all the suppliers have been cleaned...') + clean = check_ruvs("test_clean_force", topology_m4, m4rid) + assert clean + # Check supplier 1 does not have the clean task running + log.info('test_abort: check supplier 1 no longer has a cleanAllRUV task...') + if not task_done(topology_m4, cruv_task.dn): + log.fatal('test_abort: CleanAllRUV task was not aborted') + assert False + + +if __name__ == '__main__': + # Run isolated + # -s for DEBUG mode + CURRENT_FILE = os.path.realpath(__file__) + pytest.main(["-s", CURRENT_FILE]) + diff --git a/dirsrvtests/tests/suites/replication/cleanallruv_restart_test.py b/dirsrvtests/tests/suites/replication/cleanallruv_restart_test.py new file mode 100644 index 0000000..2e8d7e4 --- /dev/null +++ b/dirsrvtests/tests/suites/replication/cleanallruv_restart_test.py @@ -0,0 +1,161 @@ +# --- BEGIN COPYRIGHT BLOCK --- +# Copyright (C) 2022 Red Hat, Inc. +# All rights reserved. +# +# License: GPL (version 3 or any later version). +# See LICENSE for details. +# --- END COPYRIGHT BLOCK --- +# +import logging +import pytest +import os +import time +from lib389._constants import DEFAULT_SUFFIX +from lib389.topologies import topology_m4 +from lib389.tasks import CleanAllRUVTask +from lib389.replica import ReplicationManager, Replicas + +log = logging.getLogger(__name__) + + +def remove_supplier4_agmts(msg, topology_m4): + """Remove all the repl agmts to supplier4. """ + + log.info('%s: remove all the agreements to supplier 4...' % msg) + repl = ReplicationManager(DEFAULT_SUFFIX) + # This will delete m4 from the topo *and* remove all incoming agreements + # to m4. + repl.remove_supplier(topology_m4.ms["supplier4"], + [topology_m4.ms["supplier1"], topology_m4.ms["supplier2"], topology_m4.ms["supplier3"]]) + +def task_done(topology_m4, task_dn, timeout=60): + """Check if the task is complete""" + + attrlist = ['nsTaskLog', 'nsTaskStatus', 'nsTaskExitCode', + 'nsTaskCurrentItem', 'nsTaskTotalItems'] + done = False + count = 0 + + while not done and count < timeout: + try: + entry = topology_m4.ms["supplier1"].getEntry(task_dn, attrlist=attrlist) + if entry is not None: + if entry.hasAttr('nsTaskExitCode'): + done = True + break + else: + done = True + break + except ldap.NO_SUCH_OBJECT: + done = True + break + except ldap.LDAPError: + break + time.sleep(1) + count += 1 + + return done + + +def check_ruvs(msg, topology_m4, m4rid): + """Check suppliers 1-3 for supplier 4's rid.""" + for inst in (topology_m4.ms["supplier1"], topology_m4.ms["supplier2"], topology_m4.ms["supplier3"]): + clean = False + replicas = Replicas(inst) + replica = replicas.get(DEFAULT_SUFFIX) + log.info('check_ruvs for replica %s:%s (suffix:rid)' % (replica.get_suffix(), replica.get_rid())) + + count = 0 + while not clean and count < 20: + ruv = replica.get_ruv() + if m4rid in ruv._rids: + time.sleep(5) + count = count + 1 + else: + clean = True + if not clean: + raise Exception("Supplier %s was not cleaned in time." % inst.serverid) + return True + + +@pytest.mark.flaky(max_runs=2, min_passes=1) +def test_clean_restart(topology_m4): + """Check that cleanallruv task works properly after a restart + + :id: c6233bb3-092c-4919-9ac9-80dd02cc6e02 + :setup: Replication setup with four suppliers + :steps: + 1. Disable replication on supplier 4 + 2. Remove agreements to supplier 4 from other suppliers + 3. Stop supplier 3 + 4. Run a cleanallruv task on supplier 1 + 5. Stop supplier 1 + 6. Start supplier 3 + 7. Make sure that no crash happened + 8. Start supplier 1 + 9. Make sure that no crash happened + 10. Check that everything was cleaned + :expectedresults: + 1. Operation should be successful + 2. Agreements to supplier 4 should be removed + 3. Supplier 3 should be stopped + 4. Cleanallruv task should be successfully executed + 5. Supplier 1 should be stopped + 6. Supplier 3 should be started + 7. No crash should happened + 8. Supplier 1 should be started + 9. No crash should happened + 10. Everything should be cleaned + """ + log.info('Running test_clean_restart...') + + # Disable supplier 4 + log.info('test_clean: disable supplier 4...') + + # Remove the agreements from the other suppliers that point to supplier 4 + repl = ReplicationManager(DEFAULT_SUFFIX) + m4rid = repl.get_rid(topology_m4.ms["supplier4"]) + remove_supplier4_agmts("test_clean", topology_m4) + + # Stop supplier 3 to keep the task running, so we can stop supplier 1... + topology_m4.ms["supplier3"].stop() + + # Run the task + log.info('test_clean: run the cleanAllRUV task...') + cruv_task = CleanAllRUVTask(topology_m4.ms["supplier1"]) + cruv_task.create(properties={ + 'replica-id': m4rid, + 'replica-base-dn': DEFAULT_SUFFIX, + 'replica-force-cleaning': 'no', + 'replica-certify-all': 'yes' + }) + + # Sleep a bit, then stop supplier 1 + time.sleep(5) + topology_m4.ms["supplier1"].stop() + + # Now start supplier 3 & 1, and make sure we didn't crash + topology_m4.ms["supplier3"].start() + if topology_m4.ms["supplier3"].detectDisorderlyShutdown(): + log.fatal('test_clean_restart: Supplier 3 previously crashed!') + assert False + + topology_m4.ms["supplier1"].start(timeout=30) + if topology_m4.ms["supplier1"].detectDisorderlyShutdown(): + log.fatal('test_clean_restart: Supplier 1 previously crashed!') + assert False + + # Check the other supplier's RUV for 'replica 4' + log.info('test_clean_restart: check all the suppliers have been cleaned...') + clean = check_ruvs("test_clean_restart", topology_m4, m4rid) + assert clean + + log.info('test_clean_restart PASSED, restoring supplier 4...') + + +if __name__ == '__main__': + # Run isolated + # -s for DEBUG mode + CURRENT_FILE = os.path.realpath(__file__) + pytest.main(["-s", CURRENT_FILE]) + diff --git a/dirsrvtests/tests/suites/replication/cleanallruv_shutdown_crash_test.py b/dirsrvtests/tests/suites/replication/cleanallruv_shutdown_crash_test.py new file mode 100644 index 0000000..b4b74e3 --- /dev/null +++ b/dirsrvtests/tests/suites/replication/cleanallruv_shutdown_crash_test.py @@ -0,0 +1,123 @@ +# --- BEGIN COPYRIGHT BLOCK --- +# Copyright (C) 2022 Red Hat, Inc. +# All rights reserved. +# +# License: GPL (version 3 or any later version). +# See LICENSE for details. +# --- END COPYRIGHT BLOCK --- +# +import logging +import pytest +import os +import time +from lib389._constants import DEFAULT_SUFFIX +from lib389.topologies import topology_m4 +from lib389.tasks import CleanAllRUVTask +from lib389.replica import ReplicationManager, Replicas +from lib389.config import CertmapLegacy +from lib389.idm.services import ServiceAccounts + +log = logging.getLogger(__name__) + + +def test_clean_shutdown_crash(topology_m2): + """Check that server didn't crash after shutdown when running CleanAllRUV task + + :id: c34d0b40-3c3e-4f53-8656-5e4c2a310aaf + :setup: Replication setup with two suppliers + :steps: + 1. Enable TLS on both suppliers + 2. Reconfigure both agreements to use TLS Client auth + 3. Stop supplier2 + 4. Run the CleanAllRUV task + 5. Restart supplier1 + 6. Check if supplier1 didn't crash + 7. Restart supplier1 again + 8. Check if supplier1 didn't crash + + :expectedresults: + 1. Success + 2. Success + 3. Success + 4. Success + 5. Success + 6. Success + 7. Success + 8. Success + """ + + m1 = topology_m2.ms["supplier1"] + m2 = topology_m2.ms["supplier2"] + + repl = ReplicationManager(DEFAULT_SUFFIX) + + cm_m1 = CertmapLegacy(m1) + cm_m2 = CertmapLegacy(m2) + + certmaps = cm_m1.list() + certmaps['default']['DNComps'] = None + certmaps['default']['CmapLdapAttr'] = 'nsCertSubjectDN' + + cm_m1.set(certmaps) + cm_m2.set(certmaps) + + log.info('Enabling TLS') + [i.enable_tls() for i in topology_m2] + + log.info('Creating replication dns') + services = ServiceAccounts(m1, DEFAULT_SUFFIX) + repl_m1 = services.get('%s:%s' % (m1.host, m1.sslport)) + repl_m1.set('nsCertSubjectDN', m1.get_server_tls_subject()) + + repl_m2 = services.get('%s:%s' % (m2.host, m2.sslport)) + repl_m2.set('nsCertSubjectDN', m2.get_server_tls_subject()) + + log.info('Changing auth type') + replica_m1 = Replicas(m1).get(DEFAULT_SUFFIX) + agmt_m1 = replica_m1.get_agreements().list()[0] + agmt_m1.replace_many( + ('nsDS5ReplicaBindMethod', 'SSLCLIENTAUTH'), + ('nsDS5ReplicaTransportInfo', 'SSL'), + ('nsDS5ReplicaPort', '%s' % m2.sslport), + ) + + agmt_m1.remove_all('nsDS5ReplicaBindDN') + + replica_m2 = Replicas(m2).get(DEFAULT_SUFFIX) + agmt_m2 = replica_m2.get_agreements().list()[0] + + agmt_m2.replace_many( + ('nsDS5ReplicaBindMethod', 'SSLCLIENTAUTH'), + ('nsDS5ReplicaTransportInfo', 'SSL'), + ('nsDS5ReplicaPort', '%s' % m1.sslport), + ) + agmt_m2.remove_all('nsDS5ReplicaBindDN') + + log.info('Stopping supplier2') + m2.stop() + + log.info('Run the cleanAllRUV task') + cruv_task = CleanAllRUVTask(m1) + cruv_task.create(properties={ + 'replica-id': repl.get_rid(m1), + 'replica-base-dn': DEFAULT_SUFFIX, + 'replica-force-cleaning': 'no', + 'replica-certify-all': 'yes' + }) + + m1.restart() + + log.info('Check if supplier1 crashed') + assert not m1.detectDisorderlyShutdown() + + log.info('Repeat') + m1.restart() + assert not m1.detectDisorderlyShutdown() + + +if __name__ == '__main__': + # Run isolated + # -s for DEBUG mode + CURRENT_FILE = os.path.realpath(__file__) + pytest.main(["-s", CURRENT_FILE]) + diff --git a/dirsrvtests/tests/suites/replication/cleanallruv_stress_test.py b/dirsrvtests/tests/suites/replication/cleanallruv_stress_test.py new file mode 100644 index 0000000..0d43dd7 --- /dev/null +++ b/dirsrvtests/tests/suites/replication/cleanallruv_stress_test.py @@ -0,0 +1,216 @@ +# --- BEGIN COPYRIGHT BLOCK --- +# Copyright (C) 2022 Red Hat, Inc. +# All rights reserved. +# +# License: GPL (version 3 or any later version). +# See LICENSE for details. +# --- END COPYRIGHT BLOCK --- +# +import ldap +import logging +import pytest +import os +import random +import time +import threading +from lib389._constants import DEFAULT_SUFFIX +from lib389.topologies import topology_m4 +from lib389.tasks import CleanAllRUVTask +from lib389.idm.directorymanager import DirectoryManager +from lib389.idm.user import UserAccounts +from lib389.replica import ReplicationManager, Replicas +from lib389.config import LDBMConfig + +log = logging.getLogger(__name__) + + +class AddUsers(threading.Thread): + def __init__(self, inst, num_users): + threading.Thread.__init__(self) + self.daemon = True + self.inst = inst + self.num_users = num_users + + def run(self): + """Start adding users""" + + dm = DirectoryManager(self.inst) + conn = dm.bind() + + users = UserAccounts(conn, DEFAULT_SUFFIX) + + u_range = list(range(self.num_users)) + random.shuffle(u_range) + + for idx in u_range: + try: + users.create(properties={ + 'uid': 'testuser%s' % idx, + 'cn' : 'testuser%s' % idx, + 'sn' : 'user%s' % idx, + 'uidNumber' : '%s' % (1000 + idx), + 'gidNumber' : '%s' % (1000 + idx), + 'homeDirectory' : '/home/testuser%s' % idx + }) + # One of the suppliers was probably put into read only mode - just break out + except ldap.UNWILLING_TO_PERFORM: + break + except ldap.ALREADY_EXISTS: + pass + conn.close() + +def remove_supplier4_agmts(msg, topology_m4): + """Remove all the repl agmts to supplier4. """ + + log.info('%s: remove all the agreements to supplier 4...' % msg) + repl = ReplicationManager(DEFAULT_SUFFIX) + # This will delete m4 from the topo *and* remove all incoming agreements + # to m4. + repl.remove_supplier(topology_m4.ms["supplier4"], + [topology_m4.ms["supplier1"], topology_m4.ms["supplier2"], topology_m4.ms["supplier3"]]) + +def task_done(topology_m4, task_dn, timeout=60): + """Check if the task is complete""" + + attrlist = ['nsTaskLog', 'nsTaskStatus', 'nsTaskExitCode', + 'nsTaskCurrentItem', 'nsTaskTotalItems'] + done = False + count = 0 + + while not done and count < timeout: + try: + entry = topology_m4.ms["supplier1"].getEntry(task_dn, attrlist=attrlist) + if entry is not None: + if entry.hasAttr('nsTaskExitCode'): + done = True + break + else: + done = True + break + except ldap.NO_SUCH_OBJECT: + done = True + break + except ldap.LDAPError: + break + time.sleep(1) + count += 1 + + return done + +def check_ruvs(msg, topology_m4, m4rid): + """Check suppliers 1-3 for supplier 4's rid.""" + for inst in (topology_m4.ms["supplier1"], topology_m4.ms["supplier2"], topology_m4.ms["supplier3"]): + clean = False + replicas = Replicas(inst) + replica = replicas.get(DEFAULT_SUFFIX) + log.info('check_ruvs for replica %s:%s (suffix:rid)' % (replica.get_suffix(), replica.get_rid())) + + count = 0 + while not clean and count < 20: + ruv = replica.get_ruv() + if m4rid in ruv._rids: + time.sleep(5) + count = count + 1 + else: + clean = True + if not clean: + raise Exception("Supplier %s was not cleaned in time." % inst.serverid) + return True + + +@pytest.mark.flaky(max_runs=2, min_passes=1) +def test_stress_clean(topology_m4): + """Put each server(m1 - m4) under a stress, and perform the entire clean process + + :id: a8263cd6-f068-4357-86e0-e7c34504c8c5 + :setup: Replication setup with four suppliers + :steps: + 1. Add a bunch of updates to all suppliers + 2. Put supplier 4 to read-only mode + 3. Disable replication on supplier 4 + 4. Remove agreements to supplier 4 from other suppliers + 5. Run a cleanallruv task on supplier 1 + 6. Check that everything was cleaned + :expectedresults: + 1. Operation should be successful + 2. Supplier 4 should be put to read-only mode + 3. Replication on supplier 4 should be disabled + 4. Agreements to supplier 4 should be removed + 5. Operation should be successful + 6. Everything should be cleaned + """ + + log.info('Running test_stress_clean...') + log.info('test_stress_clean: put all the suppliers under load...') + + ldbm_config = LDBMConfig(topology_m4.ms["supplier4"]) + + # Put all the suppliers under load + # not too high load else it takes a long time to converge and + # the test result becomes instable + m1_add_users = AddUsers(topology_m4.ms["supplier1"], 200) + m1_add_users.start() + m2_add_users = AddUsers(topology_m4.ms["supplier2"], 200) + m2_add_users.start() + m3_add_users = AddUsers(topology_m4.ms["supplier3"], 200) + m3_add_users.start() + m4_add_users = AddUsers(topology_m4.ms["supplier4"], 200) + m4_add_users.start() + + # Allow sometime to get replication flowing in all directions + log.info('test_stress_clean: allow some time for replication to get flowing...') + time.sleep(5) + + # Put supplier 4 into read only mode + ldbm_config.set('nsslapd-readonly', 'on') + # We need to wait for supplier 4 to push its changes out + log.info('test_stress_clean: allow some time for supplier 4 to push changes out (60 seconds)...') + time.sleep(60) + + # Remove the agreements from the other suppliers that point to supplier 4 + repl = ReplicationManager(DEFAULT_SUFFIX) + m4rid = repl.get_rid(topology_m4.ms["supplier4"]) + remove_supplier4_agmts("test_stress_clean", topology_m4) + + # Run the task + cruv_task = CleanAllRUVTask(topology_m4.ms["supplier1"]) + cruv_task.create(properties={ + 'replica-id': m4rid, + 'replica-base-dn': DEFAULT_SUFFIX, + 'replica-force-cleaning': 'no' + }) + cruv_task.wait() + + # Wait for the update to finish + log.info('test_stress_clean: wait for all the updates to finish...') + m1_add_users.join() + m2_add_users.join() + m3_add_users.join() + m4_add_users.join() + + # Check the other supplier's RUV for 'replica 4' + log.info('test_stress_clean: check if all the replicas have been cleaned...') + clean = check_ruvs("test_stress_clean", topology_m4, m4rid) + assert clean + + log.info('test_stress_clean: PASSED, restoring supplier 4...') + + # Sleep for a bit to replication complete + log.info("Sleep for 120 seconds to allow replication to complete...") + repl = ReplicationManager(DEFAULT_SUFFIX) + repl.test_replication_topology([ + topology_m4.ms["supplier1"], + topology_m4.ms["supplier2"], + topology_m4.ms["supplier3"], + ], timeout=120) + + # Turn off readonly mode + ldbm_config.set('nsslapd-readonly', 'off') + + +if __name__ == '__main__': + # Run isolated + # -s for DEBUG mode + CURRENT_FILE = os.path.realpath(__file__) + pytest.main(["-s", CURRENT_FILE]) + diff --git a/dirsrvtests/tests/suites/replication/cleanallruv_test.py b/dirsrvtests/tests/suites/replication/cleanallruv_test.py new file mode 100644 index 0000000..adb2319 --- /dev/null +++ b/dirsrvtests/tests/suites/replication/cleanallruv_test.py @@ -0,0 +1,149 @@ +# --- BEGIN COPYRIGHT BLOCK --- +# Copyright (C) 2022 Red Hat, Inc. +# All rights reserved. +# +# License: GPL (version 3 or any later version). +# See LICENSE for details. +# --- END COPYRIGHT BLOCK --- +# +import pytest +from lib389 import DirSrv +from lib389.tasks import * +from lib389.utils import * +from lib389.monitor import Monitor +from lib389.topologies import topology_m4, topology_m2, topology_m2c2 +from lib389._constants import DEFAULT_SUFFIX +from lib389.replica import ReplicationManager, Replicas +from lib389.tasks import CleanAllRUVTask + + +pytestmark = pytest.mark.tier1 + +logging.getLogger(__name__).setLevel(logging.DEBUG) +log = logging.getLogger(__name__) + + +def remove_supplier4_agmts(msg, topology_m4): + """Remove all the repl agmts to supplier4. """ + + log.info('%s: remove all the agreements to supplier 4...' % msg) + repl = ReplicationManager(DEFAULT_SUFFIX) + # This will delete m4 from the topo *and* remove all incoming agreements + # to m4. + repl.remove_supplier(topology_m4.ms["supplier4"], + [topology_m4.ms["supplier1"], topology_m4.ms["supplier2"], topology_m4.ms["supplier3"]]) + +def check_ruvs(msg, topology_m4, m4rid): + """Check suppliers 1-3 for supplier 4's rid.""" + for inst in (topology_m4.ms["supplier1"], topology_m4.ms["supplier2"], topology_m4.ms["supplier3"]): + clean = False + replicas = Replicas(inst) + replica = replicas.get(DEFAULT_SUFFIX) + log.info('check_ruvs for replica %s:%s (suffix:rid)' % (replica.get_suffix(), replica.get_rid())) + + count = 0 + while not clean and count < 20: + ruv = replica.get_ruv() + if m4rid in ruv._rids: + time.sleep(5) + count = count + 1 + else: + clean = True + if not clean: + raise Exception("Supplier %s was not cleaned in time." % inst.serverid) + return True + + +def test_clean(topology_m4): + """Check that cleanallruv task works properly + + :id: e9b3ce5c-e17c-409e-aafc-e97d630f2878 + :setup: Replication setup with four suppliers + :steps: + 1. Check that replication works on all suppliers + 2. Disable replication on supplier 4 + 3. Remove agreements to supplier 4 from other suppliers + 4. Run a cleanallruv task on supplier 1 with a 'force' option 'on' + 5. Check that everything was cleaned + :expectedresults: + 1. Replication should work properly on all suppliers + 2. Operation should be successful + 3. Agreements to supplier 4 should be removed + 4. Cleanallruv task should be successfully executed + 5. Everything should be cleaned + """ + + log.info('Running test_clean...') + # Disable supplier 4 + # Remove the agreements from the other suppliers that point to supplier 4 + log.info('test_clean: disable supplier 4...') + repl = ReplicationManager(DEFAULT_SUFFIX) + m4rid = repl.get_rid(topology_m4.ms["supplier4"]) + remove_supplier4_agmts("test_clean", topology_m4) + + # Run the task + log.info('test_clean: run the cleanAllRUV task...') + cruv_task = CleanAllRUVTask(topology_m4.ms["supplier1"]) + cruv_task.create(properties={ + 'replica-id': m4rid, + 'replica-base-dn': DEFAULT_SUFFIX, + 'replica-force-cleaning': 'no' + }) + cruv_task.wait() + + # Check the other supplier's RUV for 'replica 4' + log.info('test_clean: check all the suppliers have been cleaned...') + clean = check_ruvs("test_clean", topology_m4, m4rid) + assert clean + + log.info('test_clean PASSED, restoring supplier 4...') + +def test_cleanallruv_consumer(topology_m2c2): + """Check that cleanallruv task works properly on consumer + + :id: 2c0e7e83-314d-4df1-b1b7-07b7ab242976 + :setup: Replication setup with 2 suppliers and 2 consumers + :steps: + 1. Run a cleanallruv task on supplier 2 + 2. Waits until completion + 3. Retrieve monitoring info from all servers + convenient way to check the server did not crash + :expectedresults: + 1. pass + 2. pass + 3. pass + """ + + log.info('test_cleanallruv_consumer: Starts ...') + supplier1 = topology_m2c2.ms["supplier1"] + supplier2 = topology_m2c2.ms["supplier2"] + consumer1 = topology_m2c2.cs["consumer1"] + consumer2 = topology_m2c2.cs["consumer2"] + log.info('Running ...') + + # Run the task + log.info('test_cleanallruv_consumer: run the cleanAllRUV task...') + repl = ReplicationManager(DEFAULT_SUFFIX) + m1rid = repl.get_rid(supplier1) + cruv_task = CleanAllRUVTask(supplier2) + cruv_task.create(properties={ + 'replica-id': m1rid, + 'replica-base-dn': DEFAULT_SUFFIX, + 'replica-force-cleaning': 'no' + }) + cruv_task.wait() + + # Check that all servers are alived + for server in (supplier1, supplier2, consumer1, consumer2): + monitor = Monitor(server) + version = monitor.get_version() + assert(version) + + log.info('test_cleanallruv_consumer PASSED..') + + +if __name__ == '__main__': + # Run isolated + # -s for DEBUG mode + CURRENT_FILE = os.path.realpath(__file__) + pytest.main("-s %s" % CURRENT_FILE) diff --git a/dirsrvtests/tests/suites/replication/conflict_resolve_test.py b/dirsrvtests/tests/suites/replication/conflict_resolve_test.py new file mode 100644 index 0000000..0d25bd5 --- /dev/null +++ b/dirsrvtests/tests/suites/replication/conflict_resolve_test.py @@ -0,0 +1,1093 @@ +# --- BEGIN COPYRIGHT BLOCK --- +# Copyright (C) 2020 Red Hat, Inc. +# All rights reserved. +# +# License: GPL (version 3 or any later version). +# See LICENSE for details. +# --- END COPYRIGHT BLOCK --- +# +import time +import logging +import ldap +import pytest +import re +from itertools import permutations +from lib389._constants import * +from lib389.idm.nscontainer import nsContainers +from lib389.idm.user import UserAccounts, UserAccount +from lib389.idm.group import Groups +from lib389.idm.organizationalunit import OrganizationalUnits +from lib389.replica import ReplicationManager, Replicas +from lib389.agreement import Agreements +from lib389.plugins import MemberOfPlugin +from lib389.dirsrv_log import DirsrvErrorLog + +pytestmark = pytest.mark.tier1 + +DEBUGGING = os.getenv("DEBUGGING", default=False) +if DEBUGGING: + logging.getLogger(__name__).setLevel(logging.DEBUG) +else: + logging.getLogger(__name__).setLevel(logging.INFO) +log = logging.getLogger(__name__) + + +def _create_user(users, user_num, group_num=2000, sleep=False): + """Creates user entry""" + + user = users.create_test_user(user_num, group_num) + if sleep: + time.sleep(1) + return user + + +def _rename_user(users, user_num, new_num, sleep=False): + """Rename user entry""" + + assert user_num != new_num, "New user number should not be the same as the old one" + + user = users.get('test_user_{}'.format(user_num)) + user.rename('uid=test_user_{}'.format(new_num)) + if sleep: + time.sleep(1) + + +def _modify_user(users, user_num, sleep=False): + """Modify user entry""" + + user = users.get('test_user_{}'.format(user_num)) + user.replace("homeDirectory", "/home/test_user0{}".format(user_num)) + if sleep: + time.sleep(1) + time.sleep(1) + + +def _delete_user(users, user_num, sleep=False): + """Delete user entry""" + + user = users.get('test_user_{}'.format(user_num)) + user.delete() + if sleep: + time.sleep(1) + time.sleep(1) + + +def _create_group(groups, num, member, sleep=False): + """Creates group entry""" + + group_props = {'cn': 'test_group_{}'.format(num), + 'member': member} + group = groups.create(properties=group_props) + if sleep: + time.sleep(1) + return group + + +def _delete_group(groups, num, sleep=False): + """Delete group entry""" + + group = groups.get('test_group_{}'.format(num)) + group.delete() + if sleep: + time.sleep(1) + + +def _create_container(inst, dn, name, sleep=False): + """Creates container entry""" + + conts = nsContainers(inst, dn) + cont = conts.create(properties={'cn': name}) + if sleep: + time.sleep(1) + return cont + + +def _delete_container(cont, sleep=False): + """Deletes container entry""" + + cont.delete() + if sleep: + time.sleep(1) + + +def _test_base(topology): + """Add test container for entries, enable plugin logging, + audit log, error log for replica and access log for internal + """ + + M1 = topology.ms["supplier1"] + + conts = nsContainers(M1, SUFFIX) + base_m2 = conts.ensure_state(properties={'cn': 'test_container'}) + + for inst in topology: + inst.config.loglevel([ErrorLog.DEFAULT, ErrorLog.REPLICA], service='error') + inst.config.loglevel([AccessLog.DEFAULT, AccessLog.INTERNAL], service='access') + inst.config.set('nsslapd-plugin-logging', 'on') + inst.config.enable_log('audit') + inst.restart() + + return base_m2 + + +def _dump_logs(topology): + """ Logs instances error logs""" + for inst in topology: + errlog = DirsrvErrorLog(inst) + log.info(f'{inst.serverid} errorlog:') + for l in errlog.readlines(): + log.info(l.strip()) + + +def _delete_test_base(inst, base_m2_dn): + """Delete test container with entries and entry conflicts""" + + try: + ents = inst.search_s(base_m2_dn, ldap.SCOPE_SUBTREE, filterstr="(|(objectclass=*)(objectclass=ldapsubentry))") + for ent in sorted(ents, key=lambda e: len(e.dn), reverse=True): + log.debug("Delete entry children {}".format(ent.dn)) + try: + inst.delete_ext_s(ent.dn) + except ldap.NO_SUCH_OBJECT: # For the case with objectclass: glue entries + pass + except ldap.NO_SUCH_OBJECT: # Subtree is already removed. + pass + + +def _resume_agmts(inst): + """Resume all agreements in the instance""" + + replicas = Replicas(inst) + replica = replicas.get(DEFAULT_SUFFIX) + for agreement in replica.get_agreements().list(): + agreement.resume() + + +@pytest.fixture +def base_m2(topology_m2, request): + tb = _test_base(topology_m2) + + def fin(): + if not DEBUGGING: + _delete_test_base(topology_m2.ms["supplier1"], tb.dn) + _delete_test_base(topology_m2.ms["supplier2"], tb.dn) + # Replication may break while deleting the container because naming + # conflict entries still exists on the other side + # Note IMHO there a bug in the entryrdn handling of replicated delete operation + # ( children naming conflict or glue entries older than the parent delete operation should + # should be deleted when the parent is deleted ) + # So let restarts the agmt once everything is deleted. + topology_m2.pause_all_replicas() + topology_m2.resume_all_replicas() + + request.addfinalizer(fin) + + return tb + + +@pytest.fixture +def base_m3(topology_m3, request): + tb = _test_base(topology_m3) + + def fin(): + if not DEBUGGING: + _delete_test_base(topology_m3.ms["supplier1"], tb.dn) + request.addfinalizer(fin) + + return tb + + +class TestTwoSuppliers: + def test_add_modrdn(self, topology_m2, base_m2): + """Check that conflict properly resolved for create - modrdn operations + + :id: 77f09b18-03d1-45da-940b-1ad2c2908ebb + :setup: Two supplier replication, test container for entries, enable plugin logging, + audit log, error log for replica and access log for internal + :steps: + 1. Add five users to m1 and wait for replication to happen + 2. Pause replication + 3. Create an entry on m1 and m2 + 4. Create an entry on m1 and rename on m2 + 5. Rename an entry on m1 and create on m2 + 6. Rename an entry on m1 and rename on m2 + 7. Rename an entry on m1 and rename on m2. Use different entries + but rename them to the same entry + 8. Resume replication + 9. Check that the entries on both suppliers are the same and replication is working + :expectedresults: + 1. It should pass + 2. It should pass + 3. It should pass + 4. It should pass + 5. It should pass + 6. It should pass + 7. It should pass + 8. It should pass + """ + + M1 = topology_m2.ms["supplier1"] + M2 = topology_m2.ms["supplier2"] + test_users_m1 = UserAccounts(M1, base_m2.dn, rdn=None) + test_users_m2 = UserAccounts(M2, base_m2.dn, rdn=None) + repl = ReplicationManager(SUFFIX) + + for user_num in range(1000, 1005): + _create_user(test_users_m1, user_num) + + repl.test_replication(M1, M2) + topology_m2.pause_all_replicas() + + log.info("Test create - modrdn") + user_num += 1 + _create_user(test_users_m1, user_num, sleep=True) + _create_user(test_users_m2, user_num, sleep=True) + + user_num += 1 + _create_user(test_users_m1, user_num, sleep=True) + _rename_user(test_users_m2, 1000, user_num, sleep=True) + + user_num += 1 + _rename_user(test_users_m1, 1001, user_num, sleep=True) + _create_user(test_users_m2, user_num, sleep=True) + + user_num += 1 + _rename_user(test_users_m1, 1002, user_num, sleep=True) + _rename_user(test_users_m2, 1002, user_num, sleep=True) + + user_num += 1 + _rename_user(test_users_m1, 1003, user_num, sleep=True) + _rename_user(test_users_m2, 1004, user_num) + + topology_m2.resume_all_replicas() + + repl.test_replication_topology(topology_m2) + + user_dns_m1 = [user.dn for user in test_users_m1.list()] + user_dns_m2 = [user.dn for user in test_users_m2.list()] + assert set(user_dns_m1) == set(user_dns_m2) + + def test_complex_add_modify_modrdn_delete(self, topology_m2, base_m2): + """Check that conflict properly resolved for complex operations + which involve add, modify, modrdn and delete + + :id: 77f09b18-03d1-45da-940b-1ad2c2908eb1 + :customerscenario: True + :setup: Two supplier replication, test container for entries, enable plugin logging, + audit log, error log for replica and access log for internal + :steps: + 1. Add ten users to m1 and wait for replication to happen + 2. Pause replication + 3. Test add-del on m1 and add on m2 + 4. Test add-mod on m1 and add on m2 + 5. Test add-modrdn on m1 and add on m2 + 6. Test multiple add, modrdn + 7. Test Add-del on both suppliers + 8. Test modrdn-modrdn + 9. Test modrdn-del + 10. Resume replication + 11. Check that the entries on both suppliers are the same and replication is working + :expectedresults: + 1. It should pass + 2. It should pass + 3. It should pass + 4. It should pass + 5. It should pass + 6. It should pass + 7. It should pass + 8. It should pass + 9. It should pass + 10. It should pass + 11. It should pass + """ + + M1 = topology_m2.ms["supplier1"] + M2 = topology_m2.ms["supplier2"] + + test_users_m1 = UserAccounts(M1, base_m2.dn, rdn=None) + test_users_m2 = UserAccounts(M2, base_m2.dn, rdn=None) + repl = ReplicationManager(SUFFIX) + + for user_num in range(1100, 1110): + _create_user(test_users_m1, user_num) + + repl.test_replication(M1, M2) + topology_m2.pause_all_replicas() + + log.info("Test add-del on M1 and add on M2") + user_num += 1 + _create_user(test_users_m1, user_num) + _delete_user(test_users_m1, user_num, sleep=True) + _create_user(test_users_m2, user_num, sleep=True) + + user_num += 1 + _create_user(test_users_m1, user_num, sleep=True) + _create_user(test_users_m2, user_num, sleep=True) + _delete_user(test_users_m1, user_num, sleep=True) + + user_num += 1 + _create_user(test_users_m2, user_num, sleep=True) + _create_user(test_users_m1, user_num) + _delete_user(test_users_m1, user_num) + + log.info("Test add-mod on M1 and add on M2") + user_num += 1 + _create_user(test_users_m1, user_num) + _modify_user(test_users_m1, user_num, sleep=True) + _create_user(test_users_m2, user_num, sleep=True) + + user_num += 1 + _create_user(test_users_m1, user_num, sleep=True) + _create_user(test_users_m2, user_num, sleep=True) + _modify_user(test_users_m1, user_num, sleep=True) + + user_num += 1 + _create_user(test_users_m2, user_num, sleep=True) + _create_user(test_users_m1, user_num) + _modify_user(test_users_m1, user_num) + + log.info("Test add-modrdn on M1 and add on M2") + user_num += 1 + _create_user(test_users_m1, user_num) + _rename_user(test_users_m1, user_num, user_num+20, sleep=True) + _create_user(test_users_m2, user_num, sleep=True) + + user_num += 1 + _create_user(test_users_m1, user_num, sleep=True) + _create_user(test_users_m2, user_num, sleep=True) + _rename_user(test_users_m1, user_num, user_num+20, sleep=True) + + user_num += 1 + _create_user(test_users_m2, user_num, sleep=True) + _create_user(test_users_m1, user_num) + _rename_user(test_users_m1, user_num, user_num+20) + + log.info("Test multiple add, modrdn") + user_num += 1 + _create_user(test_users_m1, user_num, sleep=True) + _create_user(test_users_m2, user_num, sleep=True) + _rename_user(test_users_m1, user_num, user_num+20) + _create_user(test_users_m1, user_num, sleep=True) + _modify_user(test_users_m2, user_num, sleep=True) + + log.info("Add - del on both suppliers") + user_num += 1 + _create_user(test_users_m1, user_num) + _delete_user(test_users_m1, user_num, sleep=True) + _create_user(test_users_m2, user_num) + _delete_user(test_users_m2, user_num, sleep=True) + + log.info("Test modrdn - modrdn") + user_num += 1 + _rename_user(test_users_m1, 1109, 1129, sleep=True) + _rename_user(test_users_m2, 1109, 1129, sleep=True) + + log.info("Test modrdn - del") + user_num += 1 + _rename_user(test_users_m1, 1100, 1120, sleep=True) + _delete_user(test_users_m2, 1100) + + user_num += 1 + _delete_user(test_users_m2, 1101, sleep=True) + _rename_user(test_users_m1, 1101, 1121) + + topology_m2.resume_all_replicas() + + repl.test_replication_topology(topology_m2) + + user_dns_m1 = [user.dn for user in test_users_m1.list()] + user_dns_m2 = [user.dn for user in test_users_m2.list()] + assert set(user_dns_m1) == set(user_dns_m2) + + def test_memberof_groups(self, topology_m2, base_m2): + """Check that conflict properly resolved for operations + with memberOf and groups + + :id: 77f09b18-03d1-45da-940b-1ad2c2908eb3 + :setup: Two supplier replication, test container for entries, enable plugin logging, + audit log, error log for replica and access log for internal + :steps: + 1. Enable memberOf plugin + 2. Add 30 users to m1 and wait for replication to happen + 3. Pause replication + 4. Create a group on m1 and m2 + 5. Create a group on m1 and m2, delete from m1 + 6. Create a group on m1, delete from m1, and create on m2, + 7. Create a group on m2 and m1, delete from m1 + 8. Create two different groups on m2 + 9. Resume replication + 10. Check that the entries on both suppliers are the same and replication is working + :expectedresults: + 1. It should pass + 2. It should pass + 3. It should pass + 4. It should pass + 5. It should pass + 6. It should pass + 7. It should pass + 8. It should pass + 9. It should pass + 10. It should pass + """ + + pytest.xfail("Issue 49591 - work in progress") + + M1 = topology_m2.ms["supplier1"] + M2 = topology_m2.ms["supplier2"] + test_users_m1 = UserAccounts(M1, base_m2.dn, rdn=None) + test_groups_m1 = Groups(M1, base_m2.dn, rdn=None) + test_groups_m2 = Groups(M2, base_m2.dn, rdn=None) + + repl = ReplicationManager(SUFFIX) + + for inst in topology_m2.ms.values(): + memberof = MemberOfPlugin(inst) + memberof.enable() + agmt = Agreements(inst).list()[0] + agmt.replace_many(('nsDS5ReplicatedAttributeListTotal', + '(objectclass=*) $ EXCLUDE '), + ('nsDS5ReplicatedAttributeList', + '(objectclass=*) $ EXCLUDE memberOf')) + inst.restart() + user_dns = [] + for user_num in range(10): + user_trio = [] + for num in range(0, 30, 10): + user = _create_user(test_users_m1, 1200 + user_num + num) + user_trio.append(user.dn) + user_dns.append(user_trio) + + repl.test_replication(M1, M2) + topology_m2.pause_all_replicas() + + log.info("Check a simple conflict") + group_num = 0 + _create_group(test_groups_m1, group_num, user_dns[group_num], sleep=True) + _create_group(test_groups_m2, group_num, user_dns[group_num], sleep=True) + + log.info("Check a add - del") + group_num += 1 + _create_group(test_groups_m1, group_num, user_dns[group_num], sleep=True) + _create_group(test_groups_m2, group_num, user_dns[group_num], sleep=True) + _delete_group(test_groups_m1, group_num) + + group_num += 1 + _create_group(test_groups_m1, group_num, user_dns[group_num]) + _delete_group(test_groups_m1, group_num, sleep=True) + _create_group(test_groups_m2, group_num, user_dns[group_num]) + + group_num += 1 + _create_group(test_groups_m2, group_num, user_dns[group_num], sleep=True) + _create_group(test_groups_m1, group_num, user_dns[group_num]) + _delete_group(test_groups_m1, group_num, sleep=True) + + group_num += 1 + _create_group(test_groups_m2, group_num, user_dns[group_num]) + group_num += 1 + _create_group(test_groups_m2, group_num, user_dns[group_num]) + + topology_m2.resume_all_replicas() + + repl.test_replication_topology(topology_m2) + + group_dns_m1 = [group.dn for group in test_groups_m1.list()] + group_dns_m2 = [group.dn for group in test_groups_m2.list()] + assert set(group_dns_m1) == set(group_dns_m2) + + def test_managed_entries(self, topology_m2): + """Check that conflict properly resolved for operations + with managed entries + + :id: 77f09b18-03d1-45da-940b-1ad2c2908eb4 + :setup: Two supplier replication, test container for entries, enable plugin logging, + audit log, error log for replica and access log for internal + :steps: + 1. Create ou=managed_users and ou=managed_groups under test container + 2. Configure managed entries plugin and add a template to test container + 3. Add a user to m1 and wait for replication to happen + 4. Pause replication + 5. Create a user on m1 and m2 with a same group ID on both supplier + 6. Create a user on m1 and m2 with a different group ID on both supplier + 7. Resume replication + 8. Check that the entries on both suppliers are the same and replication is working + :expectedresults: + 1. It should pass + 2. It should pass + 3. It should pass + 4. It should pass + 5. It should pass + 6. It should pass + 7. It should pass + 8. It should pass + """ + + pytest.xfail("Issue 49591 - work in progress") + + M1 = topology_m2.ms["supplier1"] + M2 = topology_m2.ms["supplier2"] + repl = ReplicationManager(SUFFIX) + + ous = OrganizationalUnits(M1, DEFAULT_SUFFIX) + ou_people = ous.create(properties={'ou': 'managed_people'}) + ou_groups = ous.create(properties={'ou': 'managed_groups'}) + + test_users_m1 = UserAccounts(M1, DEFAULT_SUFFIX, rdn='ou={}'.format(ou_people.rdn)) + test_users_m2 = UserAccounts(M2, DEFAULT_SUFFIX, rdn='ou={}'.format(ou_people.rdn)) + + # TODO: Refactor ManagedPlugin class functionality (also add configs and templates) + conts = nsContainers(M1, SUFFIX) + template = conts.create(properties={ + 'objectclass': 'top mepTemplateEntry extensibleObject'.split(), + 'cn': 'MEP Template', + 'mepRDNAttr': 'cn', + 'mepStaticAttr': ['objectclass: posixGroup', 'objectclass: extensibleObject'], + 'mepMappedAttr': ['cn: $uid', 'uid: $cn', 'gidNumber: $uidNumber'] + }) + repl.test_replication(M1, M2) + + for inst in topology_m2.ms.values(): + conts = nsContainers(inst, "cn={},{}".format(PLUGIN_MANAGED_ENTRY, DN_PLUGIN)) + conts.create(properties={'objectclass': 'top extensibleObject'.split(), + 'cn': 'config', + 'originScope': ou_people.dn, + 'originFilter': 'objectclass=posixAccount', + 'managedBase': ou_groups.dn, + 'managedTemplate': template.dn}) + inst.restart() + + _create_user(test_users_m1, 1, 1) + + topology_m2.pause_all_replicas() + + _create_user(test_users_m1, 2, 2, sleep=True) + _create_user(test_users_m2, 2, 2, sleep=True) + + _create_user(test_users_m1, 3, 3, sleep=True) + _create_user(test_users_m2, 3, 33) + + topology_m2.resume_all_replicas() + + repl.test_replication_topology(topology_m2) + + user_dns_m1 = [user.dn for user in test_users_m1.list()] + user_dns_m2 = [user.dn for user in test_users_m2.list()] + assert set(user_dns_m1) == set(user_dns_m2) + + def test_nested_entries_with_children(self, topology_m2, base_m2): + """Check that conflict properly resolved for operations + with nested entries with children + + :id: 77f09b18-03d1-45da-940b-1ad2c2908eb5 + :setup: Two supplier replication, test container for entries, enable plugin logging, + audit log, error log for replica and access log for internal + :steps: + 1. Add 15 containers to m1 and wait for replication to happen + 2. Pause replication + 3. Create parent-child on supplier2 and supplier1 + 4. Create parent-child on supplier1 and supplier2 + 5. Create parent-child on supplier1 and supplier2 different child rdn + 6. Create parent-child on supplier1 and delete parent on supplier2 + 7. Create parent on supplier1, delete it and parent-child on supplier2, delete them + 8. Create parent on supplier1, delete it and parent-two children on supplier2 + 9. Create parent-two children on supplier1 and parent-child on supplier2, delete them + 10. Create three subsets inside existing container entry, applying only part of changes on m2 + 11. Create more combinations of the subset with parent-child on m1 and parent on m2 + 12. Delete container on m1, modify user1 on m1, create parent on m2 and modify user2 on m2 + 13. Resume replication + 14. Check that the entries on both suppliers are the same and replication is working + :expectedresults: + 1. It should pass + 2. It should pass + 3. It should pass + 4. It should pass + 5. It should pass + 6. It should pass + 7. It should pass + 8. It should pass + 9. It should pass + 10. It should pass + 11. It should pass + 12. It should pass + 13. It should pass + 14. It should pass + """ + + pytest.xfail("Issue 49591 - work in progress") + + M1 = topology_m2.ms["supplier1"] + M2 = topology_m2.ms["supplier2"] + repl = ReplicationManager(SUFFIX) + test_users_m1 = UserAccounts(M1, base_m2.dn, rdn=None) + test_users_m2 = UserAccounts(M2, base_m2.dn, rdn=None) + _create_user(test_users_m1, 4000) + _create_user(test_users_m1, 4001) + + cont_list = [] + for num in range(15): + cont = _create_container(M1, base_m2.dn, 'sub{}'.format(num)) + cont_list.append(cont) + + repl.test_replication(M1, M2) + + topology_m2.pause_all_replicas() + + log.info("Create parent-child on supplier2 and supplier1") + _create_container(M2, base_m2.dn, 'p0', sleep=True) + cont_p = _create_container(M1, base_m2.dn, 'p0', sleep=True) + _create_container(M1, cont_p.dn, 'c0', sleep=True) + _create_container(M2, cont_p.dn, 'c0', sleep=True) + + log.info("Create parent-child on supplier1 and supplier2") + cont_p = _create_container(M1, base_m2.dn, 'p1', sleep=True) + _create_container(M2, base_m2.dn, 'p1', sleep=True) + _create_container(M1, cont_p.dn, 'c1', sleep=True) + _create_container(M2, cont_p.dn, 'c1', sleep=True) + + log.info("Create parent-child on supplier1 and supplier2 different child rdn") + cont_p = _create_container(M1, base_m2.dn, 'p2', sleep=True) + _create_container(M2, base_m2.dn, 'p2', sleep=True) + _create_container(M1, cont_p.dn, 'c2', sleep=True) + _create_container(M2, cont_p.dn, 'c3', sleep=True) + + log.info("Create parent-child on supplier1 and delete parent on supplier2") + cont_num = 0 + cont_p_m1 = _create_container(M1, cont_list[cont_num].dn, 'p0', sleep=True) + cont_p_m2 = _create_container(M2, cont_list[cont_num].dn, 'p0', sleep=True) + _create_container(M1, cont_p_m1.dn, 'c0', sleep=True) + _delete_container(cont_p_m2) + + cont_num += 1 + cont_p_m2 = _create_container(M2, cont_list[cont_num].dn, 'p0', sleep=True) + cont_p_m1 = _create_container(M1, cont_list[cont_num].dn, 'p0') + _create_container(M1, cont_p_m1.dn, 'c0', sleep=True) + _delete_container(cont_p_m2, sleep=True) + + log.info("Create parent on supplier1, delete it and parent-child on supplier2, delete them") + cont_num += 1 + cont_p_m1 = _create_container(M1, cont_list[cont_num].dn, 'p0') + _delete_container(cont_p_m1, sleep=True) + + cont_p_m2 = _create_container(M2, cont_list[cont_num].dn, 'p0') + cont_c_m2 = _create_container(M2, cont_p_m2.dn, 'c0') + _delete_container(cont_c_m2) + _delete_container(cont_p_m2) + + cont_num += 1 + cont_p_m2 = _create_container(M2, cont_list[cont_num].dn, 'p0') + cont_c_m2 = _create_container(M2, cont_p_m2.dn, 'c0') + _delete_container(cont_c_m2) + _delete_container(cont_p_m2, sleep=True) + + cont_p_m1 = _create_container(M1, cont_list[cont_num].dn, 'p0') + _delete_container(cont_p_m1) + + log.info("Create parent on supplier1, delete it and parent-two children on supplier2") + cont_num += 1 + cont_p_m1 = _create_container(M1, cont_list[cont_num].dn, 'p0') + _delete_container(cont_p_m1, sleep=True) + + cont_p_m2 = _create_container(M2, cont_list[cont_num].dn, 'p0') + _create_container(M2, cont_p_m2.dn, 'c0') + _create_container(M2, cont_p_m2.dn, 'c1') + + cont_num += 1 + cont_p_m2 = _create_container(M2, cont_list[cont_num].dn, 'p0') + _create_container(M2, cont_p_m2.dn, 'c0') + _create_container(M2, cont_p_m2.dn, 'c1', sleep=True) + + cont_p_m1 = _create_container(M1, cont_list[cont_num].dn, 'p0') + _delete_container(cont_p_m1, sleep=True) + + log.info("Create parent-two children on supplier1 and parent-child on supplier2, delete them") + cont_num += 1 + cont_p_m2 = _create_container(M2, cont_list[cont_num].dn, 'p0') + cont_c_m2 = _create_container(M2, cont_p_m2.dn, 'c0') + _delete_container(cont_c_m2) + _delete_container(cont_p_m2, sleep=True) + + cont_p_m1 = _create_container(M1, cont_list[cont_num].dn, 'p0') + _create_container(M1, cont_p_m1.dn, 'c0') + _create_container(M1, cont_p_m1.dn, 'c1') + + cont_num += 1 + cont_p_m1 = _create_container(M1, cont_list[cont_num].dn, 'p0') + _create_container(M1, cont_p_m1.dn, 'c0') + _create_container(M1, cont_p_m1.dn, 'c1', sleep=True) + + cont_p_m2 = _create_container(M2, cont_list[cont_num].dn, 'p0') + cont_c_m2 = _create_container(M2, cont_p_m2.dn, 'c0') + _delete_container(cont_c_m2) + _delete_container(cont_p_m2, sleep=True) + + log.info("Create three subsets inside existing container entry, applying only part of changes on m2") + cont_num += 1 + cont_p_m1 = _create_container(M1, cont_list[cont_num].dn, 'p0') + _create_container(M1, cont_p_m1.dn, 'c0') + _create_container(M1, cont_p_m1.dn, 'c1', sleep=True) + _create_container(M2, cont_list[cont_num].dn, 'p0', sleep=True) + + cont_num += 1 + cont_p_m1 = _create_container(M1, cont_list[cont_num].dn, 'p0') + _create_container(M1, cont_p_m1.dn, 'c0') + _create_container(M1, cont_p_m1.dn, 'c1', sleep=True) + cont_p_m2 = _create_container(M2, cont_list[cont_num].dn, 'p0') + _create_container(M2, cont_p_m2.dn, 'c0', sleep=True) + + cont_num += 1 + cont_p_m1 = _create_container(M1, cont_list[cont_num].dn, 'p0') + _create_container(M1, cont_p_m1.dn, 'c0') + _create_container(M1, cont_p_m1.dn, 'c1', sleep=True) + cont_p_m2 = _create_container(M2, cont_list[cont_num].dn, 'p0') + cont_c_m2 = _create_container(M2, cont_p_m2.dn, 'c0') + _delete_container(cont_c_m2, sleep=True) + + log.info("Create more combinations of the subset with parent-child on m1 and parent on m2") + cont_num += 1 + cont_p_m1 = _create_container(M1, cont_list[cont_num].dn, 'p0', sleep=True) + cont_p_m2 = _create_container(M2, cont_list[cont_num].dn, 'p0', sleep=True) + _delete_container(cont_p_m1, sleep=True) + cont_c_m2 = _create_container(M2, cont_p_m2.dn, 'c0') + _delete_container(cont_c_m2) + _delete_container(cont_p_m2, sleep=True) + + cont_num += 1 + cont_p_m1 = _create_container(M1, cont_list[cont_num].dn, 'p0', sleep=True) + cont_p_m2 = _create_container(M2, cont_list[cont_num].dn, 'p0', sleep=True) + _delete_container(cont_p_m1, sleep=True) + _create_container(M2, cont_p_m2.dn, 'c0', sleep=True) + + cont_num += 1 + cont_p_m1 = _create_container(M1, cont_list[cont_num].dn, 'p0', sleep=True) + cont_p_m2 = _create_container(M2, cont_list[cont_num].dn, 'p0', sleep=True) + cont_c_m1 = _create_container(M1, cont_p_m1.dn, 'c0', sleep=True) + _create_container(M2, cont_p_m2.dn, 'c0', sleep=True) + _delete_container(cont_c_m1, sleep=True) + _create_container(M2, cont_p_m2.dn, 'c1', sleep=True) + _delete_container(cont_p_m1, sleep=True) + + log.info("Delete container on m1, modify user1 on m1, create parent on m2 and modify user2 on m2") + cont_num += 1 + _delete_container(cont_list[cont_num]) + _modify_user(test_users_m1, 4000, sleep=True) + _create_container(M2, cont_list[cont_num].dn, 'p0') + _modify_user(test_users_m2, 4001) + + topology_m2.resume_all_replicas() + + repl.test_replication_topology(topology_m2, timeout=60) + + conts_dns = {} + for num in range(1, 3): + inst = topology_m2.ms["supplier{}".format(num)] + conts_dns[inst.serverid] = [] + conts = nsContainers(inst, base_m2.dn) + for cont in conts.list(): + conts_p = nsContainers(inst, cont.dn) + for cont_p in conts_p.list(): + conts_c = nsContainers(inst, cont_p.dn) + conts_dns[inst.serverid].extend([cont_c.dn for cont_c in conts_c.list()]) + conts_dns[inst.serverid].extend([cont_p.dn for cont_p in conts_p.list()]) + conts_dns[inst.serverid].extend([cont.dn for cont in conts.list()]) + + assert set(conts_dns[M1.serverid]) == set(conts_dns[M2.serverid]) + + user_dns_m1 = [user.dn for user in test_users_m1.list()] + user_dns_m2 = [user.dn for user in test_users_m2.list()] + assert set(user_dns_m1) == set(user_dns_m2) + + def test_conflict_attribute_multi_valued(self, topology_m2, base_m2): + """A RDN attribute being multi-valued, checks that after several operations + MODRDN and MOD_REPL its RDN values are the same on both servers + + :id: 225b3522-8ed7-4256-96f9-5fab9b7044a5 + :setup: Two supplier replication, + audit log, error log for replica and access log for internal + :steps: + 1. Create a test entry uid=user_test_1000,... + 2. Pause all replication agreements + 3. On M1 rename it into uid=foo1,... + 4. On M2 rename it into uid=foo2,... + 5. On M1 MOD_REPL uid:foo1 + 6. Resume all replication agreements + 7. Check that entry on M1 has uid=foo1, foo2 + 8. Check that entry on M2 has uid=foo1, foo2 + 9. Check that entry on M1 and M2 has the same uid values + :expectedresults: + 1. It should pass + 2. It should pass + 3. It should pass + 4. It should pass + 5. It should pass + 6. It should pass + 7. It should pass + 8. It should pass + 9. It should pass + """ + + M1 = topology_m2.ms["supplier1"] + M2 = topology_m2.ms["supplier2"] + repl = ReplicationManager(SUFFIX) + + # add a test user + test_users_m1 = UserAccounts(M1, base_m2.dn, rdn=None) + user_1 = test_users_m1.create_test_user(uid=1000) + test_users_m2 = UserAccount(M2, user_1.dn) + # Waiting fo the user to be replicated + for i in range(0,60): + time.sleep(1) + if test_users_m2.exists(): + break + try: + assert(test_users_m2.exists()) + except AssertionError as e: + _dump_logs(topology_m2) + raise e from None + + # Stop replication agreements + topology_m2.pause_all_replicas() + + # On M1 rename test entry in uid=foo1 + original_dn = user_1.dn + user_1.rename('uid=foo1') + time.sleep(1) + + # On M2 rename test entry in uid=foo2 + M2.rename_s(original_dn, 'uid=foo2') + time.sleep(2) + + # on M1 MOD_REPL uid into foo1 + user_1.replace('uid', 'foo1') + + # resume replication agreements + topology_m2.resume_all_replicas() + repl.test_replication_topology(topology_m2) + + # check that on M1, the entry 'uid' has two values 'foo1' and 'foo2' + final_dn = re.sub('^.*1000,', 'uid=foo2,', original_dn) + final_user_m1 = UserAccount(M1, final_dn) + for val in final_user_m1.get_attr_vals_utf8('uid'): + log.info("Check %s is on M1" % val) + assert(val in ['foo1', 'foo2']) + + # check that on M2, the entry 'uid' has two values 'foo1' and 'foo2' + final_user_m2 = UserAccount(M2, final_dn) + for val in final_user_m2.get_attr_vals_utf8('uid'): + log.info("Check %s is on M1" % val) + assert(val in ['foo1', 'foo2']) + + # check that the entry have the same uid values + for val in final_user_m1.get_attr_vals_utf8('uid'): + log.info("Check M1.uid %s is also on M2" % val) + assert(val in final_user_m2.get_attr_vals_utf8('uid')) + + for val in final_user_m2.get_attr_vals_utf8('uid'): + log.info("Check M2.uid %s is also on M1" % val) + assert(val in final_user_m1.get_attr_vals_utf8('uid')) + + def test_conflict_attribute_single_valued(self, topology_m2, base_m2): + """A RDN attribute being signle-valued, checks that after several operations + MODRDN and MOD_REPL its RDN values are the same on both servers + + :id: c38ae613-5d1e-47cf-b051-c7284e64b817 + :setup: Two supplier replication, test container for entries, enable plugin logging, + audit log, error log for replica and access log for internal + :steps: + 1. Create a test entry uid=user_test_1000,... + 2. Pause all replication agreements + 3. On M1 rename it into employeenumber=foo1,... + 4. On M2 rename it into employeenumber=foo2,... + 5. On M1 MOD_REPL employeenumber:foo1 + 6. Resume all replication agreements + 7. Check that entry on M1 has employeenumber=foo1 + 8. Check that entry on M2 has employeenumber=foo1 + 9. Check that entry on M1 and M2 has the same employeenumber values + :expectedresults: + 1. It should pass + 2. It should pass + 3. It should pass + 4. It should pass + 5. It should pass + 6. It should pass + 7. It should pass + 8. It should pass + 9. It should pass + """ + + M1 = topology_m2.ms["supplier1"] + M2 = topology_m2.ms["supplier2"] + repl = ReplicationManager(SUFFIX) + + # add a test user with a dummy 'uid' extra value because modrdn removes + # uid that conflict with 'account' objectclass + test_users_m1 = UserAccounts(M1, base_m2.dn, rdn=None) + user_1 = test_users_m1.create_test_user(uid=1000) + user_1.add('objectclass', 'extensibleobject') + user_1.add('uid', 'dummy') + test_users_m2 = UserAccount(M2, user_1.dn) + + # Waiting fo the user to be replicated + for i in range(0,60): + time.sleep(1) + if test_users_m2.exists(): + break + try: + assert(test_users_m2.exists()) + except AssertionError as e: + _dump_logs(topology_m2) + raise e from None + + # Stop replication agreements + topology_m2.pause_all_replicas() + + # On M1 rename test entry in employeenumber=foo1 + original_dn = user_1.dn + user_1.rename('employeenumber=foo1') + time.sleep(1) + + # On M2 rename test entry in employeenumber=foo2 + M2.rename_s(original_dn, 'employeenumber=foo2') + time.sleep(2) + + # on M1 MOD_REPL uid into foo1 + user_1.replace('employeenumber', 'foo1') + + # resume replication agreements + topology_m2.resume_all_replicas() + repl.test_replication_topology(topology_m2) + + # check that on M1, the entry 'employeenumber' has value 'foo1' + final_dn = re.sub('^.*1000,', 'employeenumber=foo2,', original_dn) + final_user_m1 = UserAccount(M1, final_dn) + for val in final_user_m1.get_attr_vals_utf8('employeenumber'): + log.info("Check %s is on M1" % val) + assert(val in ['foo1']) + + # check that on M2, the entry 'employeenumber' has values 'foo1' + final_user_m2 = UserAccount(M2, final_dn) + for val in final_user_m2.get_attr_vals_utf8('employeenumber'): + log.info("Check %s is on M2" % val) + assert(val in ['foo1']) + + # check that the entry have the same uid values + for val in final_user_m1.get_attr_vals_utf8('employeenumber'): + log.info("Check M1.uid %s is also on M2" % val) + assert(val in final_user_m2.get_attr_vals_utf8('employeenumber')) + + for val in final_user_m2.get_attr_vals_utf8('employeenumber'): + log.info("Check M2.uid %s is also on M1" % val) + assert(val in final_user_m1.get_attr_vals_utf8('employeenumber')) + +class TestThreeSuppliers: + def test_nested_entries(self, topology_m3, base_m3): + """Check that conflict properly resolved for operations + with nested entries with children + + :id: 77f09b18-03d1-45da-940b-1ad2c2908eb6 + :setup: Three supplier replication, test container for entries, enable plugin logging, + audit log, error log for replica and access log for internal + :steps: + 1. Add 15 containers to m1 and wait for replication to happen + 2. Pause replication + 3. Create two child entries under each of two entries + 4. Create three child entries under each of three entries + 5. Create two parents on m1 and m2, then on m1 - create a child and delete one parent, + on m2 - delete one parent and create a child + 6. Test a few more parent-child combinations with three instances + 7. Resume replication + 8. Check that the entries on both suppliers are the same and replication is working + :expectedresults: + 1. It should pass + 2. It should pass + 3. It should pass + 4. It should pass + 5. It should pass + 6. It should pass + 7. It should pass + 8. It should pass + """ + + pytest.xfail("Issue 49591 - work in progress") + + M1 = topology_m3.ms["supplier1"] + M2 = topology_m3.ms["supplier2"] + M3 = topology_m3.ms["supplier3"] + repl = ReplicationManager(SUFFIX) + + cont_list = [] + for num in range(11): + cont = _create_container(M1, base_m3.dn, 'sub{}'.format(num)) + cont_list.append(cont) + + repl.test_replication(M1, M2) + repl.test_replication(M1, M3) + + topology_m3.pause_all_replicas() + + log.info("Create two child entries under each of two entries") + cont_num = -1 + for num in range(2): + cont_num += 1 + _create_container(M1, cont_list[cont_num].dn, 'p0', sleep=True) + _create_container(M2, cont_list[cont_num].dn, 'p1', sleep=True) + + log.info("Create three child entries under each of three entries") + for num in range(3): + cont_num += 1 + _create_container(M1, cont_list[cont_num].dn, 'p0', sleep=True) + _create_container(M2, cont_list[cont_num].dn, 'p1', sleep=True) + _create_container(M3, cont_list[cont_num].dn, 'p2', sleep=True) + + log.info("Create two parents on m1 and m2, then on m1 - create a child and delete one parent," + "on m2 - delete one parent and create a child") + for inst1, inst2 in ((M1, M2), (M2, M1)): + cont_num += 1 + cont_p_m1_1 = _create_container(inst1, cont_list[cont_num].dn, 'p0') + cont_p_m1_2 = _create_container(inst1, cont_list[cont_num].dn, 'p1', sleep=True) + cont_p_m2_1 = _create_container(inst2, cont_list[cont_num].dn, 'p0') + cont_p_m2_2 = _create_container(inst2, cont_list[cont_num].dn, 'p1', sleep=True) + _create_container(inst1, cont_p_m1_1.dn, 'c0', sleep=True) + _delete_container(cont_p_m2_1, sleep=True) + _delete_container(cont_p_m1_2, sleep=True) + _create_container(inst2, cont_p_m2_2.dn, 'c0', sleep=True) + + log.info("Test a few more parent-child combinations on three instances") + for inst1, inst2, inst3 in ((M1, M2, M3), (M2, M1, M3), (M3, M1, M2)): + cont_num += 1 + cont_p_m1 = _create_container(inst1, cont_list[cont_num].dn, 'p0') + _delete_container(cont_p_m1, sleep=True) + + cont_p_m2 = _create_container(inst2, cont_list[cont_num].dn, 'p0') + cont_c_m2 = _create_container(inst2, cont_p_m2.dn, 'c0') + _delete_container(cont_c_m2) + _delete_container(cont_p_m2, sleep=True) + + cont_p_m3 = _create_container(inst3, cont_list[cont_num].dn, 'p0') + _create_container(inst3, cont_p_m3.dn, 'c0') + _create_container(inst3, cont_p_m3.dn, 'c1', sleep=True) + + topology_m3.resume_all_replicas() + + repl.test_replication_topology(topology_m3) + + conts_dns = {} + for num in range(1, 4): + inst = topology_m3.ms["supplier{}".format(num)] + conts_dns[inst.serverid] = [] + conts = nsContainers(inst, base_m3.dn) + for cont in conts.list(): + conts_p = nsContainers(inst, cont.dn) + for cont_p in conts_p.list(): + conts_c = nsContainers(inst, cont_p.dn) + conts_dns[inst.serverid].extend([cont_c.dn for cont_c in conts_c.list()]) + conts_dns[inst.serverid].extend([cont_p.dn for cont_p in conts_p.list()]) + conts_dns[inst.serverid].extend([cont.dn for cont in conts.list()]) + + for conts1, conts2 in permutations(conts_dns.values(), 2): + assert set(conts1) == set(conts2) + + +if __name__ == '__main__': + # Run isolated + # -s for DEBUG mode + CURRENT_FILE = os.path.realpath(__file__) + pytest.main("-s %s" % CURRENT_FILE) diff --git a/dirsrvtests/tests/suites/replication/conftest.py b/dirsrvtests/tests/suites/replication/conftest.py new file mode 100644 index 0000000..da8f399 --- /dev/null +++ b/dirsrvtests/tests/suites/replication/conftest.py @@ -0,0 +1,53 @@ +# --- BEGIN COPYRIGHT BLOCK --- +# Copyright (C) 2018 Red Hat, Inc. +# All rights reserved. +# +# License: GPL (version 3 or any later version). +# See LICENSE for details. +# --- END COPYRIGHT BLOCK --- +# +import os +import logging +import pytest +from lib389.topologies import create_topology +from lib389._constants import ReplicaRole + +DEBUGGING = os.getenv('DEBUGGING', default=False) +if DEBUGGING: + logging.getLogger(__name__).setLevel(logging.DEBUG) +else: + logging.getLogger(__name__).setLevel(logging.INFO) +log = logging.getLogger(__name__) + + +# Redefine some fixtures so we can use them with class scope +@pytest.fixture(scope="class") +def topology_m2(request): + """Create Replication Deployment with two suppliers""" + + topology = create_topology({ReplicaRole.SUPPLIER: 2}) + + def fin(): + if DEBUGGING: + [inst.stop() for inst in topology] + else: + [inst.delete() for inst in topology] + request.addfinalizer(fin) + + return topology + + +@pytest.fixture(scope="class") +def topology_m3(request): + """Create Replication Deployment with three suppliers""" + + topology = create_topology({ReplicaRole.SUPPLIER: 3}) + + def fin(): + if DEBUGGING: + [inst.stop() for inst in topology] + else: + [inst.delete() for inst in topology] + request.addfinalizer(fin) + + return topology diff --git a/dirsrvtests/tests/suites/replication/encryption_cl5_test.py b/dirsrvtests/tests/suites/replication/encryption_cl5_test.py new file mode 100644 index 0000000..51fbc72 --- /dev/null +++ b/dirsrvtests/tests/suites/replication/encryption_cl5_test.py @@ -0,0 +1,153 @@ +# --- BEGIN COPYRIGHT BLOCK --- +# Copyright (C) 2017 Red Hat, Inc. +# All rights reserved. +# +# License: GPL (version 3 or any later version). +# See LICENSE for details. +# --- END COPYRIGHT BLOCK --- +# +import logging +import pytest +import pdb +from lib389.utils import ensure_bytes, ds_supports_new_changelog +from lib389.replica import ReplicationManager +from lib389.dseldif import DSEldif +from lib389.idm.user import UserAccounts, TEST_USER_PROPERTIES +from lib389.topologies import topology_m2 +from lib389._constants import * + +pytestmark = pytest.mark.tier1 + +ATTRIBUTE = 'unhashed#user#password' + +DEBUGGING = os.getenv("DEBUGGING", default=False) +if DEBUGGING: + logging.getLogger(__name__).setLevel(logging.DEBUG) +else: + logging.getLogger(__name__).setLevel(logging.INFO) +log = logging.getLogger(__name__) + + +@pytest.fixture(scope="module") +def topology_with_tls(topology_m2): + """Enable TLS on all suppliers""" + + [i.enable_tls() for i in topology_m2] + + repl = ReplicationManager(DEFAULT_SUFFIX) + repl.test_replication(topology_m2.ms['supplier1'], topology_m2.ms['supplier2']) + + return topology_m2 + + +def _enable_changelog_encryption(inst, encrypt_algorithm): + """Configure changelog encryption for supplier""" + + dse_ldif = DSEldif(inst) + log.info('Configuring changelog encryption:{} for: {}'.format(inst.serverid, encrypt_algorithm)) + inst.stop() + if ds_supports_new_changelog(): + changelog = 'cn=changelog,{}'.format(DN_USERROOT_LDBM) + else: + changelog = DN_CHANGELOG + + dse_ldif.replace(changelog, 'nsslapd-encryptionalgorithm', encrypt_algorithm) + if dse_ldif.get(changelog, 'nsSymmetricKey'): + dse_ldif.delete(changelog, 'nsSymmetricKey') + inst.start() + + +def _check_unhashed_userpw_encrypted(inst, change_type, user_dn, user_pw, is_encrypted): + """Check if unhashed#user#password attribute value is encrypted or not""" + + if ds_supports_new_changelog(): + log.info('Running dbscan -f to check {} attr'.format(ATTRIBUTE)) + dbscanOut = inst.dbscan(DEFAULT_BENAME, 'replication_changelog') + else: + changelog_dbdir = os.path.join(os.path.dirname(inst.dbdir), DEFAULT_CHANGELOG_DB) + for changelog_dbfile in glob.glob(f'{changelog_dbdir}*/*.db*'): + log.info('Changelog dbfile file exist: {}'.format(changelog_dbfile)) + log.info('Running dbscan -f to check {} attr'.format(ATTRIBUTE)) + dbscanOut = inst.dbscan(DEFAULT_CHANGELOG_DB, changelog_dbfile) + + count = 0 + for entry in dbscanOut.split(b'dbid: '): + if ensure_bytes('operation: {}'.format(change_type)) in entry and\ + ensure_bytes(ATTRIBUTE) in entry and ensure_bytes(user_dn.lower()) in entry.lower(): + count += 1 + user_pw_attr = ensure_bytes('{}: {}'.format(ATTRIBUTE, user_pw)) + if is_encrypted: + assert user_pw_attr not in entry, 'Changelog entry contains clear text password' + else: + assert user_pw_attr in entry, 'Changelog entry does not contain clear text password' + assert count, 'Operation type and DN of the entry not matched in changelog' + +#unstable or unstatus tests, skipped for now +@pytest.mark.flaky(max_runs=2, min_passes=1) +def test_algorithm_unhashed(topology_with_tls): + """Check encryption algorithm AES + And check unhashed#user#password attribute for encryption. + + :id: b7a37bf8-4b2e-4dbd-9891-70117d67558c + :parametrized: yes + :setup: Replication with two suppliers and SSL configured. + :steps: 1. Enable changelog encrytion on supplier1 + 2. Add a user to supplier1/supplier2 + 3. Run dbscan -f on m1 to check unhashed#user#password + attribute is encrypted. + 4. Run dbscan -f on m2 to check unhashed#user#password + attribute is in cleartext. + 5. Modify password in supplier2/supplier1 + 6. Run dbscan -f on m1 to check unhashed#user#password + attribute is encrypted. + 7. Run dbscan -f on m2 to check unhashed#user#password + attribute is in cleartext. + :expectedresults: + 1. It should pass + 2. It should pass + 3. It should pass + 4. It should pass + 5. It should pass + 6. It should pass + 7. It should pass + """ + encryption = 'AES' + m1 = topology_with_tls.ms['supplier1'] + m2 = topology_with_tls.ms['supplier2'] + m1.config.set('nsslapd-unhashed-pw-switch', 'on') + m2.config.set('nsslapd-unhashed-pw-switch', 'on') + test_passw = 'm2Test199' + + _enable_changelog_encryption(m1, encryption) + + for inst1, inst2 in ((m1, m2), (m2, m1)): + # need to create a user specific to the encryption + # else the two runs will hit the same user + user_props={ + 'uid': 'testuser_%s' % encryption, + 'cn' : 'testuser_%s' % encryption, + 'sn' : 'user', + 'uidNumber' : '1000', + 'gidNumber' : '1000', + 'homeDirectory' : '/home/testuser_%s' % encryption + } + user_props["userPassword"] = PASSWORD + users = UserAccounts(inst1, DEFAULT_SUFFIX) + tuser = users.create(properties=user_props) + + _check_unhashed_userpw_encrypted(m1, 'add', tuser.dn, PASSWORD, True) + _check_unhashed_userpw_encrypted(m2, 'add', tuser.dn, PASSWORD, False) + + users = UserAccounts(inst2, DEFAULT_SUFFIX) + tuser = users.get(tuser.rdn) + tuser.set('userPassword', test_passw) + _check_unhashed_userpw_encrypted(m1, 'modify', tuser.dn, test_passw, True) + _check_unhashed_userpw_encrypted(m2, 'modify', tuser.dn, test_passw, False) + tuser.delete() + + +if __name__ == '__main__': + # Run isolated + # -s for DEBUG mode + CURRENT_FILE = os.path.realpath(__file__) + pytest.main("-s {}".format(CURRENT_FILE)) diff --git a/dirsrvtests/tests/suites/replication/multiple_changelogs_test.py b/dirsrvtests/tests/suites/replication/multiple_changelogs_test.py new file mode 100644 index 0000000..ea24c7a --- /dev/null +++ b/dirsrvtests/tests/suites/replication/multiple_changelogs_test.py @@ -0,0 +1,186 @@ +# --- BEGIN COPYRIGHT BLOCK --- +# Copyright (C) 2022 Red Hat, Inc. +# All rights reserved. +# +# License: GPL (version 3 or any later version). +# See LICENSE for details. +# --- END COPYRIGHT BLOCK --- +# +import ldap +import logging +import pytest +import os +import threading +import time +from lib389._constants import * +from lib389.topologies import topology_m1c1 as topo +from lib389.idm.directorymanager import DirectoryManager +from lib389.idm.domain import Domain +from lib389.backend import Backend +from lib389.replica import Replicas, ReplicationManager +from lib389.config import LDBMConfig + +log = logging.getLogger(__name__) + +SECOND_SUFFIX = 'dc=second_suffix' +MOD_COUNT = 50 + +class DoMods(threading.Thread): + """modify the suffix entry""" + def __init__(self, inst, task): + """ + Initialize the thread + """ + threading.Thread.__init__(self) + self.daemon = True + self.inst = inst + self.name = inst.serverid + self.task = task + + def run(self): + """ + Start adding users + """ + idx = 0 + conn = DirectoryManager(self.inst).bind() + domain = Domain(conn, DEFAULT_SUFFIX) + while idx < MOD_COUNT: + try: + domain.replace('description', str(idx)) + except: + if self.task == "import": + # Failures are expected during an import + pass + else: + # export, should not fail + log.fatal('Updates should not fail during an export') + assert False + idx += 1 + + +def test_multiple_changelogs(topo): + """Test the multiple suffixes can be replicated with the new per backend + changelog. + + :id: eafcdb57-4ea2-4887-a0a8-9e4d295f4f4d + :setup: Supplier Instance, Consumer Instance + :steps: + 1. Create s second suffix + 2. Enable replication for second backend + 3. Perform some updates on both backends and make sure replication is + working for both backends + + :expectedresults: + 1. Success + 2. Success + 3. Success + """ + supplier = topo.ms['supplier1'] + consumer = topo.cs['consumer1'] + + # Create second suffix dc=second_backend on both replicas + for inst in [supplier, consumer]: + # Create the backends + props = {'cn': 'secondRoot', 'nsslapd-suffix': SECOND_SUFFIX} + be = Backend(inst) + be.create(properties=props) + be.create_sample_entries('001004002') + + # Setup replication for second suffix + repl = ReplicationManager(SECOND_SUFFIX) + repl.create_first_supplier(supplier) + repl.join_consumer(supplier, consumer) + + # Test replication works for each backend + for suffix in [DEFAULT_SUFFIX, SECOND_SUFFIX]: + replicas = Replicas(supplier) + replica = replicas.get(suffix) + log.info("Testing replication for: " + suffix) + assert replica.test_replication([consumer]) + + +def test_multiple_changelogs_export_import(topo): + """Test that we can export and import the replication changelog + + :id: b74fcaaf-a13f-4ee0-98f9-248b281f8700 + :setup: Supplier Instance, Consumer Instance + :steps: + 1. Create s second suffix + 2. Enable replication for second backend + 3. Perform some updates on a backend, and export the changelog + 4. Do an export and import while the server is idle + 5. Do an import while the server is under load + + :expectedresults: + 1. Success + 2. Success + 3. Success + 4. Success + 5. Success + """ + SECOND_SUFFIX = 'dc=second_suffix' + supplier = topo.ms['supplier1'] + consumer = topo.cs['consumer1'] + supplier.config.set('nsslapd-errorlog-level', '0') + # Create second suffix dc=second_backend on both replicas + for inst in [supplier, consumer]: + # Create the backends + props = {'cn': 'secondRoot', 'nsslapd-suffix': SECOND_SUFFIX} + be = Backend(inst) + try: + be.create(properties=props) + be.create_sample_entries('001004002') + except ldap.UNWILLING_TO_PERFORM: + pass + + # Setup replication for second suffix + try: + repl = ReplicationManager(SECOND_SUFFIX) + repl.create_first_supplier(supplier) + repl.join_consumer(supplier, consumer) + except ldap.ALREADY_EXISTS: + pass + + # Put the replica under load, and export the changelog + replicas = Replicas(supplier) + replica = replicas.get(DEFAULT_SUFFIX) + doMods1 = DoMods(supplier, task="export") + doMods1.start() + replica.begin_task_cl2ldif() + doMods1.join() + replica.task_finished() + + supplier.restart() + assert replica.test_replication([consumer]) + + # While idle, do an export and import, and make sure replication still works + log.info("Testing idle server with CL export and import...") + ldbm_config = LDBMConfig(supplier) + ldbm_config.set('nsslapd-readonly', 'on') # prevent keep alive updates + replica.begin_task_cl2ldif() + replica.task_finished() + replica.begin_task_ldif2cl() + replica.task_finished() + ldbm_config.set('nsslapd-readonly', 'off') + assert replica.test_replication([consumer]) + + # stability test, put the replica under load, import the changelog, and make + # sure server did not crash. + log.info("Testing busy server with CL import...") + doMods2 = DoMods(supplier, task="import") + doMods2.start() + replica.begin_task_ldif2cl() + doMods2.join() + replica.task_finished() + # Replication will be broken so no need to test it. This is just make sure + # the import works, and the server is stable + assert supplier.status() + assert consumer.status() + + +if __name__ == '__main__': + # Run isolated + # -s for DEBUG mode + CURRENT_FILE = os.path.realpath(__file__) + pytest.main(["-s", CURRENT_FILE]) + diff --git a/dirsrvtests/tests/suites/replication/promote_demote_test.py b/dirsrvtests/tests/suites/replication/promote_demote_test.py new file mode 100644 index 0000000..c72e932 --- /dev/null +++ b/dirsrvtests/tests/suites/replication/promote_demote_test.py @@ -0,0 +1,75 @@ +# --- BEGIN COPYRIGHT BLOCK --- +# Copyright (C) 2022 Red Hat, Inc. +# All rights reserved. +# +# License: GPL (version 3 or any later version). +# See LICENSE for details. +# --- END COPYRIGHT BLOCK --- +# +import logging +import pytest +import os +from lib389._constants import DEFAULT_SUFFIX, ReplicaRole +from lib389.topologies import topology_m1h1c1 as topo +from lib389.replica import Replicas, ReplicationManager, Agreements + +pytestmark = pytest.mark.tier1 + +log = logging.getLogger(__name__) + +def test_promote_demote(topo): + """Test promoting and demoting a replica + + :id: 75edff64-f987-4ed5-a03d-9bee73c0fbf0 + :setup: 2 Supplier Instances + :steps: + 1. Promote Hub to a Supplier + 2. Test replication works + 3. Demote the supplier to a consumer + 4. Test replication works + 5. Promote consumer to supplier + 6. Test replication works + :expectedresults: + 1. Success + 2. Success + 3. Success + 4. Success + 5. Success + 6. Success + """ + + supplier = topo.ms["supplier1"] + supplier_replica = Replicas(supplier).get(DEFAULT_SUFFIX) + bind_dn = supplier_replica.get_attr_val_utf8('nsDS5ReplicaBindDN') + hub = topo.hs["hub1"] + hub_replica = Replicas(hub).get(DEFAULT_SUFFIX) + consumer = topo.cs["consumer1"] + + repl = ReplicationManager(DEFAULT_SUFFIX) + + # promote replica + hub_replica.promote(ReplicaRole.SUPPLIER, binddn=bind_dn, rid='55') + repl.test_replication(supplier, consumer) + + # Demote the replica + hub_replica.demote(ReplicaRole.CONSUMER) + repl.test_replication(supplier, hub) + + # promote replica and init it + hub_replica.promote(ReplicaRole.SUPPLIER, binddn=bind_dn, rid='56') + agmt = Agreements(supplier).list()[0] + agmt.begin_reinit() + agmt.wait_reinit() + + # init consumer + agmt = Agreements(hub).list()[0] + agmt.begin_reinit() + agmt.wait_reinit() + repl.test_replication(supplier, consumer) + + +if __name__ == '__main__': + # Run isolated + # -s for DEBUG mode + CURRENT_FILE = os.path.realpath(__file__) + pytest.main(["-s", CURRENT_FILE]) diff --git a/dirsrvtests/tests/suites/replication/regression_i2_test.py b/dirsrvtests/tests/suites/replication/regression_i2_test.py new file mode 100644 index 0000000..232c7d0 --- /dev/null +++ b/dirsrvtests/tests/suites/replication/regression_i2_test.py @@ -0,0 +1,90 @@ +# --- BEGIN COPYRIGHT BLOCK --- +# Copyright (C) 2021 Red Hat, Inc. +# All rights reserved. +# +# License: GPL (version 3 or any later version). +# See LICENSE for details. +# --- END COPYRIGHT BLOCK --- +# +import os +import logging +import pytest +from lib389.utils import * +from lib389._constants import * +from lib389.replica import Replicas, ReplicationManager +from lib389.dseldif import * +from lib389.topologies import topology_i2 as topo_i2 + + +pytestmark = pytest.mark.tier1 + +NEW_SUFFIX_NAME = 'test_repl' +NEW_SUFFIX = 'o={}'.format(NEW_SUFFIX_NAME) +NEW_BACKEND = 'repl_base' +CHANGELOG = 'cn=changelog,{}'.format(DN_USERROOT_LDBM) +MAXAGE_ATTR = 'nsslapd-changelogmaxage' +MAXAGE_STR = '30' +TRIMINTERVAL_STR = '5' +TRIMINTERVAL = 'nsslapd-changelogtrim-interval' + +DEBUGGING = os.getenv("DEBUGGING", default=False) +if DEBUGGING: + logging.getLogger(__name__).setLevel(logging.DEBUG) +else: + logging.getLogger(__name__).setLevel(logging.INFO) +log = logging.getLogger(__name__) + + +def test_special_symbol_replica_agreement(topo_i2): + """ Check if agreement starts with "cn=->..." then + after upgrade does it get removed. + + :id: 68aa0072-4dd4-4e33-b107-cb383a439125 + :setup: two standalone instance + :steps: + 1. Create and Enable Replication on standalone2 and role as consumer + 2. Create and Enable Replication on standalone1 and role as supplier + 3. Create a Replication agreement starts with "cn=->..." + 4. Perform an upgrade operation over the supplier + 5. Check if the agreement is still present or not. + :expectedresults: + 1. It should be successful + 2. It should be successful + 3. It should be successful + 4. It should be successful + 5. It should be successful + """ + + supplier = topo_i2.ins["standalone1"] + consumer = topo_i2.ins["standalone2"] + consumer.replica.enableReplication(suffix=DEFAULT_SUFFIX, role=ReplicaRole.CONSUMER, replicaId=CONSUMER_REPLICAID) + repl = ReplicationManager(DEFAULT_SUFFIX) + repl.create_first_supplier(supplier) + + properties = {RA_NAME: '-\\3meTo_{}:{}'.format(consumer.host, str(consumer.port)), + RA_BINDDN: defaultProperties[REPLICATION_BIND_DN], + RA_BINDPW: defaultProperties[REPLICATION_BIND_PW], + RA_METHOD: defaultProperties[REPLICATION_BIND_METHOD], + RA_TRANSPORT_PROT: defaultProperties[REPLICATION_TRANSPORT]} + + supplier.agreement.create(suffix=SUFFIX, + host=consumer.host, + port=consumer.port, + properties=properties) + + supplier.agreement.init(SUFFIX, consumer.host, consumer.port) + + replica_server = Replicas(supplier).get(DEFAULT_SUFFIX) + + supplier.upgrade('online') + + agmt = replica_server.get_agreements().list()[0] + + assert agmt.get_attr_val_utf8('cn') == '-\\3meTo_{}:{}'.format(consumer.host, str(consumer.port)) + + +if __name__ == '__main__': + # Run isolated + # -s for DEBUG mode + CURRENT_FILE = os.path.realpath(__file__) + pytest.main("-s %s" % CURRENT_FILE) diff --git a/dirsrvtests/tests/suites/replication/regression_m2_test.py b/dirsrvtests/tests/suites/replication/regression_m2_test.py new file mode 100644 index 0000000..3938efe --- /dev/null +++ b/dirsrvtests/tests/suites/replication/regression_m2_test.py @@ -0,0 +1,1050 @@ +# --- BEGIN COPYRIGHT BLOCK --- +# Copyright (C) 2021 Red Hat, Inc. +# All rights reserved. +# +# License: GPL (version 3 or any later version). +# See LICENSE for details. +# --- END COPYRIGHT BLOCK --- +# +import os +import re +import time +import logging +import ldif +import ldap +import pytest +import subprocess +import time +from lib389.idm.user import TEST_USER_PROPERTIES, UserAccount, UserAccounts +from lib389.pwpolicy import PwPolicyManager +from lib389.utils import * +from lib389._constants import * +from lib389.idm.domain import Domain +from lib389.idm.organizationalunit import OrganizationalUnits +from lib389.idm.group import Groups, Group +from lib389.idm.domain import Domain +from lib389.idm.directorymanager import DirectoryManager +from lib389.idm.services import ServiceAccounts, ServiceAccount +from lib389.replica import Replicas, ReplicationManager, ReplicaRole +from lib389.agreement import Agreements +from lib389 import pid_from_file +from lib389.dseldif import * +from lib389.topologies import topology_m2 as topo_m2, TopologyMain, create_topology, _remove_ssca_db + + +pytestmark = pytest.mark.tier1 + +NEW_SUFFIX_NAME = 'test_repl' +NEW_SUFFIX = 'o={}'.format(NEW_SUFFIX_NAME) +NEW_BACKEND = 'repl_base' +CHANGELOG = 'cn=changelog,{}'.format(DN_USERROOT_LDBM) +MAXAGE_ATTR = 'nsslapd-changelogmaxage' +MAXAGE_STR = '30' +TRIMINTERVAL_STR = '5' +TRIMINTERVAL = 'nsslapd-changelogtrim-interval' + +DEBUGGING = os.getenv("DEBUGGING", default=False) +if DEBUGGING: + logging.getLogger(__name__).setLevel(logging.DEBUG) +else: + logging.getLogger(__name__).setLevel(logging.INFO) +log = logging.getLogger(__name__) + + +class _AgmtHelper: + """test_change_repl_passwd helper (Easy access to bind and agmt entries)""" + + def __init__(self, from_inst, to_inst, cn = None): + self.from_inst = from_inst + self.to_inst = to_inst + if cn: + self.usedn = True + self.cn = cn + self.binddn = f'cn={cn},cn=config' + else: + self.usedn = False + self.cn = f'{self.from_inst.host}:{self.from_inst.sslport}' + self.binddn = f'cn={self.cn}, ou=Services, {DEFAULT_SUFFIX}' + self.original_state = [] + self._pass = False + + def _save_vals(self, entry, attrslist, name): + """Get current property value for cn and requested attributes""" + repl_prop = [] + del_prop = [] + for attr in attrslist: + try: + val = entry.get_attr_val_utf8(attr) + if val is None: + del_prop.append((attr,)) + else: + repl_prop.append((attr,val)) + except ldap.NO_SUCH_OBJECT: + del_prop.append((attr,)) + self.original_state.append((entry, repl_prop, del_prop)) + + def init(self, request): + """Initialize the _AgmtHelper""" + agmt = self.get_agmt() + replica = Replicas(self.to_inst).get(DEFAULT_SUFFIX) + bind_entry = self.get_bind_entry() + # Preserve current configuartion + self._save_vals(agmt, ('nsds5ReplicaCredentials', 'nsds5ReplicaBindDN'), 'agmt') + self._save_vals(replica, ('nsds5ReplicaBindDN', 'nsDS5ReplicaBindDNGroup'), 'replica') + if not self.usedn: + self._save_vals(bind_entry, ('userPassword',), 'bind_entry') + + if self.usedn: + # if using bind group, the topology is already initted (by topo_m2) + # if using bind dn, should create the bind entry and configure the agmt and replica + passwd='replrepl' + # Creates the bind entry + bind_entry.ensure_state(properties={ + 'cn' : self.cn, + 'userPassword': passwd + }) + # Modify the replica + replica.replace('nsds5ReplicaBindDN', self.binddn) + replica.remove_all('nsds5ReplicaBindDNGroup') + # Modify the agmt + agmt.replace_many( ('nsds5ReplicaCredentials', passwd), + ('nsds5ReplicaBindDN', self.binddn)) + # Add a finalizer to restore the original configuration + def fin(): + if not self._pass and "-x" in sys.argv: + # Keep config as is if debugging a failed test + return + # remove the added bind entry + if self.usedn: + bind_entry.delete() + # Restore the original entries + for entry, repl_prop, del_prop, in self.original_state: + log.debug(f"dn: {entry.dn} repl_prop={repl_prop} del_prop={del_prop}") + if repl_prop: + entry.replace_many(*repl_prop) + if del_prop: + for attr, in del_prop: + entry.remove_all(attr) + request.addfinalizer(fin) + + + def get_bind_entry(self): + """Get bind entry (on consumer)""" + return ServiceAccount(self.to_inst, dn=self.binddn) + + def get_agmt(self): + """Get agmt entry (on supplier)""" + agmts = Agreements(self.from_inst) + for agmt in agmts.list(): + port = agmt.get_attr_val_utf8('nsDS5ReplicaPort') + if port == str(self.to_inst.port) or port == str(self.to_inst.sslport): + return agmt + raise AssertionError(f'no agmt toward {self.to_inst.serverid} found on {self.from_inst.serverid}') + + def change_pw(self, passwd): + """Change bind entry and agmt entry password""" + self.get_bind_entry().replace('userPassword', passwd) + self.get_agmt().replace('nsds5ReplicaCredentials', passwd) + + def testok(self): + self._pass = True + + +def find_start_location(file, no): + log_pattern = re.compile("slapd_daemon - slapd started.") + count = 0 + while True: + line = file.readline() + log.debug("_pattern_errorlog: [%d] %s" % (file.tell(), line)) + found = log_pattern.search(line) + if (found): + count = count + 1 + if (count == no): + return file.tell() + if (line == ''): + break + return -1 + + +def pattern_errorlog(file, log_pattern, start_location=0): + + count = 0 + log.debug("_pattern_errorlog: start from the beginning") + file.seek(start_location) + + # Use a while true iteration because 'for line in file: hit a + # python bug that break file.tell() + while True: + line = file.readline() + log.debug("_pattern_errorlog: [%d] %s" % (file.tell(), line)) + found = log_pattern.search(line) + if (found): + count = count + 1 + if (line == ''): + break + + log.debug("_pattern_errorlog: complete (count=%d)" % count) + return count + + +def _move_ruv(ldif_file): + """ Move RUV entry in an ldif file to the top""" + + with open(ldif_file) as f: + parser = ldif.LDIFRecordList(f) + parser.parse() + + ldif_list = parser.all_records + for dn in ldif_list: + if dn[0].startswith('nsuniqueid=ffffffff-ffffffff-ffffffff-ffffffff'): + ruv_index = ldif_list.index(dn) + ldif_list.insert(0, ldif_list.pop(ruv_index)) + break + + with open(ldif_file, 'w') as f: + ldif_writer = ldif.LDIFWriter(f) + for dn, entry in ldif_list: + ldif_writer.unparse(dn, entry) + + +def _remove_replication_data(ldif_file): + """ Remove the replication data from ldif file: + db2lif without -r includes some of the replica data like + - nsUniqueId + - keepalive entries + This function filters the ldif fil to remove these data + """ + + with open(ldif_file) as f: + parser = ldif.LDIFRecordList(f) + parser.parse() + + ldif_list = parser.all_records + # Iterate on a copy of the ldif entry list + for dn, entry in ldif_list[:]: + if dn.startswith('cn=repl keep alive'): + ldif_list.remove((dn, entry)) + else: + entry.pop('nsUniqueId') + with open(ldif_file, 'w') as f: + ldif_writer = ldif.LDIFWriter(f) + for dn, entry in ldif_list: + ldif_writer.unparse(dn, entry) + + +@pytest.fixture(scope="function") +def topo_with_sigkill(request): + """Create Replication Deployment with two suppliers""" + + topology = create_topology({ReplicaRole.SUPPLIER: 2}) + + def _kill_ns_slapd(inst): + pid = str(pid_from_file(inst.ds_paths.pid_file)) + cmd = ['kill', '-9', pid] + subprocess.Popen(cmd, stdout=subprocess.PIPE) + + def fin(): + # Kill the hanging process at the end of test to prevent failures in the following tests + if DEBUGGING: + [_kill_ns_slapd(inst) for inst in topology] + else: + [_kill_ns_slapd(inst) for inst in topology] + assert _remove_ssca_db(topology) + [inst.stop() for inst in topology if inst.exists()] + [inst.delete() for inst in topology if inst.exists()] + request.addfinalizer(fin) + + return topology + + +@pytest.fixture() +def create_entry(topo_m2, request): + """Add test entry using UserAccounts""" + + log.info('Adding a test entry user') + users = UserAccounts(topo_m2.ms["supplier1"], DEFAULT_SUFFIX) + tuser = users.ensure_state(properties=TEST_USER_PROPERTIES) + return tuser + + +def add_ou_entry(server, idx, parent): + ous = OrganizationalUnits(server, parent) + name = 'OU%d' % idx + ous.create(properties={'ou': '%s' % name}) + + +def add_user_entry(server, idx, parent): + users = UserAccounts(server, DEFAULT_SUFFIX, rdn=parent) + user_properties = { + 'uid': 'tuser%d' % idx, + 'givenname': 'test', + 'cn': 'Test User%d' % idx, + 'sn': 'user%d' % idx, + 'userpassword': PW_DM, + 'uidNumber': '1000%d' % idx, + 'gidNumber': '2000%d' % idx, + 'homeDirectory': '/home/{}'.format('tuser%d' % idx) + } + users.create(properties=user_properties) + + +def del_user_entry(server, idx, parent): + users = UserAccounts(server, DEFAULT_SUFFIX, rdn=parent) + test_user = users.get('tuser%d' % idx) + test_user.delete() + + +def rename_entry(server, idx, ou_name, new_parent): + users = UserAccounts(server, DEFAULT_SUFFIX, rdn=ou_name) + name = 'tuser%d' % idx + rdn = 'uid=%s' % name + test_user = users.get(name) + test_user.rename(new_rdn=rdn, newsuperior=new_parent) + + +def add_ldapsubentry(server, parent): + pwp = PwPolicyManager(server) + policy_props = {'passwordStorageScheme': 'ssha', + 'passwordCheckSyntax': 'on', + 'passwordInHistory': '6', + 'passwordChange': 'on', + 'passwordMinAge': '0', + 'passwordExp': 'off', + 'passwordMustChange': 'off',} + log.info('Create password policy for subtree {}'.format(parent)) + pwp.create_subtree_policy(parent, policy_props) + + +def test_double_delete(topo_m2, create_entry): + """Check that double delete of the entry doesn't crash server + + :id: 3496c82d-636a-48c9-973c-2455b12164cc + :setup: Two suppliers replication setup, a test entry + :steps: + 1. Delete the entry on the first supplier + 2. Delete the entry on the second supplier + 3. Check that server is alive + :expectedresults: + 1. Entry should be successfully deleted from first supplier + 2. Entry should be successfully deleted from second aster + 3. Server should me alive + """ + + m1 = topo_m2.ms["supplier1"] + m2 = topo_m2.ms["supplier2"] + + repl = ReplicationManager(DEFAULT_SUFFIX) + repl.disable_to_supplier(m1, [m2]) + repl.disable_to_supplier(m2, [m1]) + + log.info('Deleting entry {} from supplier1'.format(create_entry.dn)) + topo_m2.ms["supplier1"].delete_s(create_entry.dn) + + try: + log.info('Deleting entry {} from supplier2'.format(create_entry.dn)) + topo_m2.ms["supplier2"].delete_s(create_entry.dn) + except ldap.NO_SUCH_OBJECT: + # replication was too fast (DEBUGGING is probably set) + pass + + repl.enable_to_supplier(m2, [m1]) + repl.enable_to_supplier(m1, [m2]) + + repl.test_replication(m1, m2) + repl.test_replication(m2, m1) + + +@pytest.mark.bz1506831 +def test_repl_modrdn(topo_m2): + """Test that replicated MODRDN does not break replication + + :id: a3e17698-9eb4-41e0-b537-8724b9915fa6 + :setup: Two suppliers replication setup + :steps: + 1. Add 3 test OrganizationalUnits A, B and C + 2. Add 1 test user under OU=A + 3. Add same test user under OU=B + 4. Stop Replication + 5. Apply modrdn to M1 - move test user from OU A -> C + 6. Apply modrdn on M2 - move test user from OU B -> C + 7. Start Replication + 8. Check that there should be only one test entry under ou=C on both suppliers + 9. Check that the replication is working fine both ways M1 <-> M2 + :expectedresults: + 1. This should pass + 2. This should pass + 3. This should pass + 4. This should pass + 5. This should pass + 6. This should pass + 7. This should pass + 8. This should pass + 9. This should pass + """ + + supplier1 = topo_m2.ms["supplier1"] + supplier2 = topo_m2.ms["supplier2"] + + repl = ReplicationManager(DEFAULT_SUFFIX) + + log.info("Add test entries - Add 3 OUs and 2 same users under 2 different OUs") + OUs = OrganizationalUnits(supplier1, DEFAULT_SUFFIX) + OU_A = OUs.create(properties={ + 'ou': 'A', + 'description': 'A', + }) + OU_B = OUs.create(properties={ + 'ou': 'B', + 'description': 'B', + }) + OU_C = OUs.create(properties={ + 'ou': 'C', + 'description': 'C', + }) + + users = UserAccounts(supplier1, DEFAULT_SUFFIX, rdn='ou={}'.format(OU_A.rdn)) + tuser_A = users.create(properties=TEST_USER_PROPERTIES) + + users = UserAccounts(supplier1, DEFAULT_SUFFIX, rdn='ou={}'.format(OU_B.rdn)) + tuser_B = users.create(properties=TEST_USER_PROPERTIES) + + repl.test_replication(supplier1, supplier2) + repl.test_replication(supplier2, supplier1) + + log.info("Stop Replication") + topo_m2.pause_all_replicas() + + log.info("Apply modrdn to M1 - move test user from OU A -> C") + supplier1.rename_s(tuser_A.dn, 'uid=testuser1', newsuperior=OU_C.dn, delold=1) + + log.info("Apply modrdn on M2 - move test user from OU B -> C") + supplier2.rename_s(tuser_B.dn, 'uid=testuser1', newsuperior=OU_C.dn, delold=1) + + log.info("Start Replication") + topo_m2.resume_all_replicas() + + log.info("Wait for sometime for repl to resume") + repl.test_replication(supplier1, supplier2) + repl.test_replication(supplier2, supplier1) + + log.info("Check that there should be only one test entry under ou=C on both suppliers") + users = UserAccounts(supplier1, DEFAULT_SUFFIX, rdn='ou={}'.format(OU_C.rdn)) + assert len(users.list()) == 1 + + users = UserAccounts(supplier2, DEFAULT_SUFFIX, rdn='ou={}'.format(OU_C.rdn)) + assert len(users.list()) == 1 + + log.info("Check that the replication is working fine both ways, M1 <-> M2") + repl.test_replication(supplier1, supplier2) + repl.test_replication(supplier2, supplier1) + + +def test_password_repl_error(topo_m2, create_entry): + """Check that error about userpassword replication is properly logged + + :id: 714130ff-e4f0-4633-9def-c1f4b24abfef + :setup: Four suppliers replication setup, a test entry + :steps: + 1. Change userpassword on the first supplier + 2. Restart the servers to flush the logs + 3. Check the error log for an replication error + :expectedresults: + 1. Password should be successfully changed + 2. Server should be successfully restarted + 3. There should be no replication errors in the error log + """ + + m1 = topo_m2.ms["supplier1"] + m2 = topo_m2.ms["supplier2"] + TEST_ENTRY_NEW_PASS = 'new_pass' + + log.info('Clean the error log') + m2.deleteErrorLogs() + + log.info('Set replication loglevel') + m2.config.loglevel((ErrorLog.REPLICA,)) + + log.info('Modifying entry {} - change userpassword on supplier 1'.format(create_entry.dn)) + + create_entry.set('userpassword', TEST_ENTRY_NEW_PASS) + + repl = ReplicationManager(DEFAULT_SUFFIX) + repl.wait_for_replication(m1, m2) + + log.info('Restart the servers to flush the logs') + for num in range(1, 3): + topo_m2.ms["supplier{}".format(num)].restart() + + try: + log.info('Check that password works on supplier 2') + create_entry_m2 = UserAccount(m2, create_entry.dn) + create_entry_m2.bind(TEST_ENTRY_NEW_PASS) + + log.info('Check the error log for the error with {}'.format(create_entry.dn)) + assert not m2.ds_error_log.match('.*can.t add a change for {}.*'.format(create_entry.dn)) + finally: + log.info('Set the default loglevel') + m2.config.loglevel((ErrorLog.DEFAULT,)) + + +def test_invalid_agmt(topo_m2): + """Test adding that an invalid agreement is properly rejected and does not crash the server + + :id: 6c3b2a7e-edcd-4327-a003-6bd878ff722b + :setup: Four suppliers replication setup + :steps: + 1. Add invalid agreement (nsds5ReplicaEnabled set to invalid value) + 2. Verify the server is still running + :expectedresults: + 1. Invalid repl agreement should be rejected + 2. Server should be still running + """ + + m1 = topo_m2.ms["supplier1"] + m2 = topo_m2.ms["supplier2"] + + repl = ReplicationManager(DEFAULT_SUFFIX) + + replicas = Replicas(m1) + replica = replicas.get(DEFAULT_SUFFIX) + agmts = replica.get_agreements() + + # Add invalid agreement (nsds5ReplicaEnabled set to invalid value) + with pytest.raises(ldap.UNWILLING_TO_PERFORM): + agmts.create(properties={ + 'cn': 'whatever', + 'nsDS5ReplicaRoot': DEFAULT_SUFFIX, + 'nsDS5ReplicaBindDN': 'cn=replication manager,cn=config', + 'nsDS5ReplicaBindMethod': 'simple', + 'nsDS5ReplicaTransportInfo': 'LDAP', + 'nsds5replicaTimeout': '5', + 'description': "test agreement", + 'nsDS5ReplicaHost': m2.host, + 'nsDS5ReplicaPort': str(m2.port), + 'nsDS5ReplicaCredentials': 'whatever', + 'nsds5ReplicaEnabled': 'YEAH MATE, LETS REPLICATE' + }) + + # Verify the server is still running + repl = ReplicationManager(DEFAULT_SUFFIX) + repl.test_replication(m1, m2) + repl.test_replication(m2, m1) + + +def test_fetch_bindDnGroup(topo_m2): + """Check the bindDNGroup is fetched on first replication session + + :id: 5f1b1f59-6744-4260-b091-c82d22130025 + :setup: 2 Supplier Instances + :steps: + 1. Create a replication bound user and group, but the user *not* member of the group + 2. Check that replication is working + 3. Some preparation is required because of lib389 magic that already define a replication via group + - define the group as groupDN for replication and 60sec as fetch interval + - pause RA in both direction + - Define the user as bindDn of the RAs + 4. restart servers. + It sets the fetch time to 0, so next session will refetch the group + 5. Before resuming RA, add user to groupDN (on both side as replication is not working at that time) + 6. trigger an update and check replication is working and + there is no failure logged on supplier side 'does not have permission to supply replication updates to the replica' + :expectedresults: + 1. Success + 2. Success + 3. Success + 4. Success + 5. Success + 6. Success + """ + + # If you need any test suite initialization, + # please, write additional fixture for that (including finalizer). + # Topology for suites are predefined in lib389/topologies.py. + + # If you need host, port or any other data about instance, + # Please, use the instance object attributes for that (for example, topo.ms["supplier1"].serverid) + M1 = topo_m2.ms['supplier1'] + M2 = topo_m2.ms['supplier2'] + + # Enable replication log level. Not really necessary + M1.modify_s('cn=config', [(ldap.MOD_REPLACE, 'nsslapd-errorlog-level', b'8192')]) + M2.modify_s('cn=config', [(ldap.MOD_REPLACE, 'nsslapd-errorlog-level', b'8192')]) + + # Create a group and a user + PEOPLE = "ou=People,%s" % SUFFIX + PASSWD = 'password' + REPL_MGR_BOUND_DN = 'repl_mgr_bound_dn' + + uid = REPL_MGR_BOUND_DN.encode() + users = UserAccounts(M1, PEOPLE, rdn=None) + user_props = TEST_USER_PROPERTIES.copy() + user_props.update({'uid': uid, 'cn': uid, 'sn': '_%s' % uid, 'userpassword': PASSWD.encode(), 'description': b'value creation'}) + create_user = users.create(properties=user_props) + + groups_M1 = Groups(M1, DEFAULT_SUFFIX) + group_properties = { + 'cn': 'group1', + 'description': 'testgroup'} + group_M1 = groups_M1.create(properties=group_properties) + group_M2 = Group(M2, group_M1.dn) + assert(not group_M1.is_member(create_user.dn)) + + # Check that M1 and M2 are in sync + repl = ReplicationManager(DEFAULT_SUFFIX) + repl.wait_for_replication(M1, M2, timeout=20) + + save_values = [] # Save original value to restore them at the end of the test + # Define the group as the replication manager and fetch interval as 60sec + replicas = Replicas(M1) + replica = replicas.list()[0] + save_values.append((replica, 'nsDS5ReplicaBindDnGroupCheckInterval', replica.get_attr_val_utf8('nsDS5ReplicaBindDnGroupCheckInterval'))) + save_values.append((replica, 'nsDS5ReplicaBindDnGroup', replica.get_attr_val_utf8('nsDS5ReplicaBindDnGroup'))) + replica.apply_mods([(ldap.MOD_REPLACE, 'nsDS5ReplicaBindDnGroupCheckInterval', '60'), + (ldap.MOD_REPLACE, 'nsDS5ReplicaBindDnGroup', group_M1.dn)]) + + replicas = Replicas(M2) + replica = replicas.list()[0] + save_values.append((replica, 'nsDS5ReplicaBindDnGroupCheckInterval', replica.get_attr_val_utf8('nsDS5ReplicaBindDnGroupCheckInterval'))) + save_values.append((replica, 'nsDS5ReplicaBindDnGroup', replica.get_attr_val_utf8('nsDS5ReplicaBindDnGroup'))) + replica.apply_mods([(ldap.MOD_REPLACE, 'nsDS5ReplicaBindDnGroupCheckInterval', '60'), + (ldap.MOD_REPLACE, 'nsDS5ReplicaBindDnGroup', group_M1.dn)]) + + # Then pause the replication agreement to prevent them trying to acquire + # while the user is not member of the group + topo_m2.pause_all_replicas() + + # Define the user as the bindDN of the RAs + for inst in (M1, M2): + agmts = Agreements(inst) + agmt = agmts.list()[0] + save_values.append((agmt, 'nsDS5ReplicaBindDN', agmt.get_attr_val_utf8('nsDS5ReplicaBindDN'))) + save_values.append((agmt, 'nsds5ReplicaCredentials', agmt.get_attr_val_utf8('nsds5ReplicaCredentials'))) + agmt.replace('nsDS5ReplicaBindDN', create_user.dn.encode()) + agmt.replace('nsds5ReplicaCredentials', PASSWD.encode()) + + # Key step + # The restart will fetch the group/members define in the replica + # + # The user NOT member of the group replication will not work until bindDNcheckInterval + # + # With the fix, the first fetch is not taken into account (fetch time=0) + # so on the first session, the group will be fetched + M1.restart() + M2.restart() + + # Replication being broken here we need to directly do the same update. + # Sorry not found another solution except total update + group_M1.add_member(create_user.dn) + group_M2.add_member(create_user.dn) + + topo_m2.resume_all_replicas() + + # trigger updates to be sure to have a replication session, giving some time + M1.modify_s(create_user.dn, [(ldap.MOD_ADD, 'description', b'value_1_1')]) + M2.modify_s(create_user.dn, [(ldap.MOD_ADD, 'description', b'value_2_2')]) + time.sleep(10) + + # Check replication is working + ents = M1.search_s(create_user.dn, ldap.SCOPE_BASE, '(objectclass=*)') + for ent in ents: + assert (ent.hasAttr('description')) + found = 0 + for val in ent.getValues('description'): + if (val == b'value_1_1'): + found = found + 1 + elif (val == b'value_2_2'): + found = found + 1 + assert (found == 2) + + ents = M2.search_s(create_user.dn, ldap.SCOPE_BASE, '(objectclass=*)') + for ent in ents: + assert (ent.hasAttr('description')) + found = 0 + for val in ent.getValues('description'): + if (val == b'value_1_1'): + found = found + 1 + elif (val == b'value_2_2'): + found = found + 1 + assert (found == 2) + + # Check in the logs that the member was detected in the group although + # at startup it was not member of the group + regex = re.compile("does not have permission to supply replication updates to the replica.") + errorlog_M1 = open(M1.errlog, "r") + errorlog_M2 = open(M1.errlog, "r") + + # Find the last restart position + restart_location_M1 = find_start_location(errorlog_M1, 2) + assert (restart_location_M1 != -1) + restart_location_M2 = find_start_location(errorlog_M2, 2) + assert (restart_location_M2 != -1) + + # Then check there is no failure to authenticate + count = pattern_errorlog(errorlog_M1, regex, start_location=restart_location_M1) + assert(count <= 1) + count = pattern_errorlog(errorlog_M2, regex, start_location=restart_location_M2) + assert(count <= 1) + + # Restore the agmt values to avoid impacting the other tests. + for entry, attr, val in save_values: + entry.replace(attr, val) + + +def test_plugin_bind_dn_tracking_and_replication(topo_m2): + """Testing nsslapd-plugin-binddn-tracking does not cause issues around + access control and reconfiguring replication/repl agmt. + + :id: dd689d03-69b8-4bf9-a06e-2acd19d5e2c9 + :setup: 2 supplier topology + :steps: + 1. Turn on plugin binddn tracking + 2. Add some users + 3. Make an update as a user + 4. Make an update to the replica config + 5. Make an update to the repliocation agreement + :expectedresults: + 1. Success + 2. Success + 3. Success + 4. Success + 5. Success + """ + + m1 = topo_m2.ms["supplier1"] + + # Turn on bind dn tracking + m1.config.set('nsslapd-plugin-binddn-tracking', 'on') + + # Add two users + users = UserAccounts(m1, DEFAULT_SUFFIX) + user1 = users.create_test_user(uid=1011) + user1.set('userpassword', PASSWORD) + user2 = users.create_test_user(uid=1012) + + # Add an aci + acival = '(targetattr ="cn")(version 3.0;acl "Test bind dn tracking"' + \ + ';allow (all) (userdn = "ldap:///{}");)'.format(user1.dn) + Domain(m1, DEFAULT_SUFFIX).add('aci', acival) + + # Bind as user and make an update + user1.rebind(PASSWORD) + user2.set('cn', 'new value') + dm = DirectoryManager(m1) + dm.rebind() + + # modify replica + replica = Replicas(m1).get(DEFAULT_SUFFIX) + replica.set(REPL_PROTOCOL_TIMEOUT, "30") + + # modify repl agmt + agmt = replica.get_agreements().list()[0] + agmt.set(REPL_PROTOCOL_TIMEOUT, "20") + + +@pytest.mark.bz1314956 +@pytest.mark.ds48755 +def test_moving_entry_make_online_init_fail(topo_m2): + """ + Moving an entry could make the online init fail + + :id: e3895be7-884a-4e9f-80e3-24e9a5167c9e + :setup: Two suppliers replication setup + :steps: + 1. Generate DIT_0 + 2. Generate password policy for DIT_0 + 3. Create users for DIT_0 + 4. Turn idx % 2 == 0 users into tombstones + 5. Generate DIT_1 + 6. Move 'ou=OU0,ou=OU0,dc=example,dc=com' to DIT_1 + 7. Move 'ou=OU0,dc=example,dc=com' to DIT_1 + 8. Move idx % 2 == 1 users to 'ou=OU0,ou=OU0,ou=OU1,dc=example,dc=com' + 9. Init replicas + 10. Number of entries should match on both suppliers + + :expectedresults: + 1. Success + 2. Success + 3. Success + 4. Success + 5. Success + 6. Success + 7. Success + 8. Success + 9. Success + 10. Success + """ + + M1 = topo_m2.ms["supplier1"] + M2 = topo_m2.ms["supplier2"] + + log.info("Generating DIT_0") + idx = 0 + add_ou_entry(M1, idx, DEFAULT_SUFFIX) + log.info("Created entry: ou=OU0, dc=example, dc=com") + + ou0 = 'ou=OU%d' % idx + first_parent = '%s,%s' % (ou0, DEFAULT_SUFFIX) + add_ou_entry(M1, idx, first_parent) + log.info("Created entry: ou=OU0, ou=OU0, dc=example, dc=com") + + add_ldapsubentry(M1, first_parent) + + ou_name = 'ou=OU%d,ou=OU%d' % (idx, idx) + second_parent = 'ou=OU%d,%s' % (idx, first_parent) + for idx in range(0, 9): + add_user_entry(M1, idx, ou_name) + if idx % 2 == 0: + log.info("Turning tuser%d into a tombstone entry" % idx) + del_user_entry(M1, idx, ou_name) + + log.info('%s => %s => %s => 10 USERS' % (DEFAULT_SUFFIX, first_parent, second_parent)) + + log.info("Generating DIT_1") + idx = 1 + add_ou_entry(M1, idx, DEFAULT_SUFFIX) + log.info("Created entry: ou=OU1,dc=example,dc=com") + + third_parent = 'ou=OU%d,%s' % (idx, DEFAULT_SUFFIX) + add_ou_entry(M1, idx, third_parent) + log.info("Created entry: ou=OU1, ou=OU1, dc=example, dc=com") + + add_ldapsubentry(M1, third_parent) + + log.info("Moving %s to DIT_1" % second_parent) + OrganizationalUnits(M1, second_parent).get('OU0').rename(ou0, newsuperior=third_parent) + + log.info("Moving %s to DIT_1" % first_parent) + fourth_parent = '%s,%s' % (ou0, third_parent) + OrganizationalUnits(M1, first_parent).get('OU0').rename(ou0, newsuperior=fourth_parent) + + fifth_parent = '%s,%s' % (ou0, fourth_parent) + + ou_name = 'ou=OU0,ou=OU1' + log.info("Moving USERS to %s" % fifth_parent) + for idx in range(0, 9): + if idx % 2 == 1: + rename_entry(M1, idx, ou_name, fifth_parent) + + log.info('%s => %s => %s => %s => 10 USERS' % (DEFAULT_SUFFIX, third_parent, fourth_parent, fifth_parent)) + + log.info("Run Initialization.") + repl = ReplicationManager(DEFAULT_SUFFIX) + repl.wait_for_replication(M1, M2, timeout=5) + + m1entries = M1.search_s(DEFAULT_SUFFIX, ldap.SCOPE_SUBTREE, + '(|(objectclass=ldapsubentry)(objectclass=nstombstone)(nsuniqueid=*))') + m2entries = M2.search_s(DEFAULT_SUFFIX, ldap.SCOPE_SUBTREE, + '(|(objectclass=ldapsubentry)(objectclass=nstombstone)(nsuniqueid=*))') + + log.info("m1entry count - %d", len(m1entries)) + log.info("m2entry count - %d", len(m2entries)) + + assert len(m1entries) == len(m2entries) + + +def get_keepalive_entries(instance, replica): + # Returns the keep alive entries that exists with the suffix of the server instance + try: + entries = instance.search_s(replica.get_suffix(), ldap.SCOPE_ONELEVEL, + "(&(objectclass=ldapsubentry)(cn=repl keep alive*))", + ['cn', 'keepalivetimestamp', 'nsUniqueId', 'modifierTimestamp']) + except ldap.LDAPError as e: + log.fatal('Failed to retrieve keepalive entry (%s) on instance %s: error %s' % (dn, instance, str(e))) + assert False + # No error, so lets log the keepalive entries + if log.isEnabledFor(logging.DEBUG): + for ret in entries: + log.debug("Found keepalive entry:\n"+str(ret)); + return entries + + +def verify_keepalive_entries(topo, expected): + # Check that keep alive entries exists (or not exists) for every suppliers on every suppliers + # Note: The testing method is quite basic: counting that there is one keepalive entry per supplier. + # that is ok for simple test cases like test_online_init_should_create_keepalive_entries but + # not for the general case as keep alive associated with no more existing supplier may exists + # (for example after: db2ldif / demote a supplier / ldif2db / init other suppliers) + # ==> if the function is somehow pushed in lib389, a check better than simply counting the entries + # should be done. + entries = [] + for supplierId in topo.ms: + supplier = topo.ms[supplierId] + for replica in Replicas(supplier).list(): + if (replica.get_role() != ReplicaRole.SUPPLIER): + continue + replica_info = f'supplier: {supplierId} RID: {replica.get_rid()} suffix: {replica.get_suffix()}' + log.debug(f'Checking keepAliveEntries on {replica_info}') + keepaliveEntries = get_keepalive_entries(supplier, replica); + expectedCount = len(topo.ms) if expected else 0 + foundCount = len(keepaliveEntries) + entries += keepaliveEntries + if (foundCount == expectedCount): + log.debug(f'Found {foundCount} keepalive entries as expected on {replica_info}.') + else: + log.error(f'{foundCount} Keepalive entries are found ' + f'while {expectedCount} were expected on {replica_info}.') + assert False + + return entries + + +def test_keepalive_entries(topo_m2): + """Check that keep alive entries are created + + :id: d5940e71-d18a-4b71-aaf7-b9185361fffe + :setup: Two suppliers replication setup + :steps: + 1. Keep alives entries are present + 2. Keep alive entries are updated every 60 seconds + :expectedresults: + 1. Success + 2. Success + + """ + + # default interval is 1 hour, too long for test, set it to the minimum of + # 60 seconds + for supplierId in topo_m2.ms: + supplier = topo_m2.ms[supplierId] + replica = Replicas(supplier).get(DEFAULT_SUFFIX) + replica.replace('nsds5ReplicaKeepAliveUpdateInterval', '60') + supplier.restart() + + # verify entries exist + entries = verify_keepalive_entries(topo_m2, True); + + # Get current time from keep alive entry + keep_alive_s1 = str(entries[0].data['keepalivetimestamp']) + keep_alive_s2 = str(entries[1].data['keepalivetimestamp']) + + # Wait for event interval (60 secs) to pass, but first update doesn't + # start until 30 seconds after startup + time.sleep(91) + + # Check keep alives entries have been updated + entries = verify_keepalive_entries(topo_m2, True); + assert keep_alive_s1 != str(entries[0].data['keepalivetimestamp']) + assert keep_alive_s2 != str(entries[1].data['keepalivetimestamp']) + + # Test replication + supplier = topo_m2.ms['supplier1'] + replica = Replicas(supplier).get(DEFAULT_SUFFIX) + assert replica.test_replication([topo_m2.ms['supplier2']]) + + +# Parameters for test_change_repl_passwd +@pytest.mark.parametrize( + "bind_cn", + [ + pytest.param( None, id="using-bind-group"), + pytest.param( "replMgr", id="using-bind-dn"), + ], +) +@pytest.mark.bz1956987 +def test_change_repl_passwd(topo_m2, request, bind_cn): + """Replication may break after changing password. + Testing when agmt bind group are used. + + :id: a305913a-cc76-11ec-b324-482ae39447e5 + :setup: 2 Supplier Instances + :steps: + 1. Insure agmt from supplier1 to supplier2 is properly set to use bind group + 2. Insure agmt from supplier2 to supplier1 is properly set to use bind group + 3. Check that replication is working + 4. Modify supplier1 agreement password and the associated bind entry + 5. Modify supplier2 agreement password and the associated bind entry + 6. Check that replication is working + :expectedresults: + 1. Step should run sucessfully without errors + 2. Step should run sucessfully without errors + 3. Replication should work + 4. Step should run sucessfully without errors + 5. Step should run sucessfully without errors + 6. Replication should work + """ + + m1 = topo_m2.ms["supplier1"] + m2 = topo_m2.ms["supplier2"] + # Step 1 + a1 = _AgmtHelper(m1, m2, cn=bind_cn) + a1.init(request) + # Step 2 + a2 = _AgmtHelper(m2, m1, cn=bind_cn) + a2.init(request) + # Step 3 + repl = ReplicationManager(DEFAULT_SUFFIX) + repl.wait_for_replication(m1, m2) + # Step 4 + TEST_ENTRY_NEW_PASS = 'new_pass2' + a1.change_pw(TEST_ENTRY_NEW_PASS) + # Step 5 + a2.change_pw(TEST_ENTRY_NEW_PASS) + # Step 6 + repl.wait_for_replication(m1, m2) + # Mark test as successul before exiting + a1.testok() + a2.testok() + + +@pytest.mark.ds49915 +@pytest.mark.bz1626375 +def test_online_reinit_may_hang(topo_with_sigkill): + """Online reinitialization may hang when the first + entry of the DB is RUV entry instead of the suffix + + :id: cded6afa-66c0-4c65-9651-993ba3f7a49c + :setup: 2 Supplier Instances + :steps: + 1. Export the database + 2. Move RUV entry to the top in the ldif file + 3. Import the ldif file + 4. Check that replication is still working + 5. Online replica initializaton + :expectedresults: + 1. Ldif file should be created successfully + 2. RUV entry should be on top in the ldif file + 3. Import should be successful + 4. Replication should work + 5. Server should not hang and consume 100% CPU + """ + M1 = topo_with_sigkill.ms["supplier1"] + M2 = topo_with_sigkill.ms["supplier2"] + M1.stop() + ldif_file = '%s/supplier1.ldif' % M1.get_ldif_dir() + M1.db2ldif(bename=DEFAULT_BENAME, suffixes=[DEFAULT_SUFFIX], + excludeSuffixes=None, repl_data=True, + outputfile=ldif_file, encrypt=False) + _move_ruv(ldif_file) + M1.ldif2db(DEFAULT_BENAME, None, None, None, ldif_file) + M1.start() + # After this server may hang + # Exporting idle server with replication data and reimporting + # should not break replication (Unless we hit issue 5098) + # So let check that replication is still working. + repl = ReplicationManager(DEFAULT_SUFFIX) + repl.test_replication_topology(topo_with_sigkill) + agmt = Agreements(M1).list()[0] + agmt.begin_reinit() + (done, error) = agmt.wait_reinit() + assert done is True + assert error is False + repl = ReplicationManager(DEFAULT_SUFFIX) + repl.test_replication_topology(topo_with_sigkill) + + if DEBUGGING: + # Add debugging steps(if any)... + pass + + +############################################################################## +#### WARNING ! New tests must be added before test_online_reinit_may_hang #### +#### because topo_with_sigkill and topo_m2 fixtures are not compatible as #### +#### topo_with_sigkill stops and destroy topo_m2 instances. #### +############################################################################## + + +if __name__ == '__main__': + # Run isolated + # -s for DEBUG mode + CURRENT_FILE = os.path.realpath(__file__) + pytest.main("-s %s" % CURRENT_FILE) diff --git a/dirsrvtests/tests/suites/replication/regression_m2c2_test.py b/dirsrvtests/tests/suites/replication/regression_m2c2_test.py new file mode 100644 index 0000000..b43369d --- /dev/null +++ b/dirsrvtests/tests/suites/replication/regression_m2c2_test.py @@ -0,0 +1,331 @@ +# --- BEGIN COPYRIGHT BLOCK --- +# Copyright (C) 2021 Red Hat, Inc. +# All rights reserved. +# +# License: GPL (version 3 or any later version). +# See LICENSE for details. +# --- END COPYRIGHT BLOCK --- +# +import os +import logging +import pytest +from lib389.utils import * +from lib389._constants import * +from lib389.replica import Replicas, ReplicationManager +from lib389.agreement import Agreements +from lib389.dseldif import * +from lib389.topologies import topology_m2c2 as topo_m2c2 + + +pytestmark = pytest.mark.tier1 + +NEW_SUFFIX_NAME = 'test_repl' +NEW_SUFFIX = 'o={}'.format(NEW_SUFFIX_NAME) +NEW_BACKEND = 'repl_base' +CHANGELOG = 'cn=changelog,{}'.format(DN_USERROOT_LDBM) +MAXAGE_ATTR = 'nsslapd-changelogmaxage' +MAXAGE_STR = '30' +TRIMINTERVAL_STR = '5' +TRIMINTERVAL = 'nsslapd-changelogtrim-interval' + +DEBUGGING = os.getenv("DEBUGGING", default=False) +if DEBUGGING: + logging.getLogger(__name__).setLevel(logging.DEBUG) +else: + logging.getLogger(__name__).setLevel(logging.INFO) +log = logging.getLogger(__name__) + + +def get_agreement(agmts, consumer): + # Get agreement towards consumer among the agremment list + for agmt in agmts.list(): + if (agmt.get_attr_val_utf8('nsDS5ReplicaPort') == str(consumer.port) and + agmt.get_attr_val_utf8('nsDS5ReplicaHost') == consumer.host): + return agmt + return None + + +def test_ruv_url_not_added_if_different_uuid(topo_m2c2): + """Check that RUV url is not updated if RUV generation uuid are different + + :id: 7cc30a4e-0ffd-4758-8f00-e500279af344 + :setup: Two suppliers + two consumers replication setup + :steps: + 1. Generate ldif without replication data + 2. Init both suppliers from that ldif + (to clear the ruvs and generates different generation uuid) + 3. Perform on line init from supplier1 to consumer1 + and from supplier2 to consumer2 + 4. Perform update on both suppliers + 5. Check that c1 RUV does not contains URL towards m2 + 6. Check that c2 RUV does contains URL towards m2 + 7. Perform on line init from supplier1 to supplier2 + 8. Perform update on supplier2 + 9. Check that c1 RUV does contains URL towards m2 + :expectedresults: + 1. No error while generating ldif + 2. No error while importing the ldif file + 3. No error and Initialization done. + 4. No error + 5. supplier2 replicaid should not be in the consumer1 RUV + 6. supplier2 replicaid should be in the consumer2 RUV + 7. No error and Initialization done. + 8. No error + 9. supplier2 replicaid should be in the consumer1 RUV + + """ + + # Variables initialization + repl = ReplicationManager(DEFAULT_SUFFIX) + + m1 = topo_m2c2.ms["supplier1"] + m2 = topo_m2c2.ms["supplier2"] + c1 = topo_m2c2.cs["consumer1"] + c2 = topo_m2c2.cs["consumer2"] + + replica_m1 = Replicas(m1).get(DEFAULT_SUFFIX) + replica_m2 = Replicas(m2).get(DEFAULT_SUFFIX) + replica_c1 = Replicas(c1).get(DEFAULT_SUFFIX) + replica_c2 = Replicas(c2).get(DEFAULT_SUFFIX) + + replicid_m2 = replica_m2.get_rid() + + agmts_m1 = Agreements(m1, replica_m1.dn) + agmts_m2 = Agreements(m2, replica_m2.dn) + + m1_m2 = get_agreement(agmts_m1, m2) + m1_c1 = get_agreement(agmts_m1, c1) + m1_c2 = get_agreement(agmts_m1, c2) + m2_m1 = get_agreement(agmts_m2, m1) + m2_c1 = get_agreement(agmts_m2, c1) + m2_c2 = get_agreement(agmts_m2, c2) + + # Step 1: Generate ldif without replication data + m1.stop() + m2.stop() + ldif_file = '%s/norepl.ldif' % m1.get_ldif_dir() + m1.db2ldif(bename=DEFAULT_BENAME, suffixes=[DEFAULT_SUFFIX], + excludeSuffixes=None, repl_data=False, + outputfile=ldif_file, encrypt=False) + # Remove replication metadata that are still in the ldif + # _remove_replication_data(ldif_file) + + # Step 2: Init both suppliers from that ldif + m1.ldif2db(DEFAULT_BENAME, None, None, None, ldif_file) + m2.ldif2db(DEFAULT_BENAME, None, None, None, ldif_file) + m1.start() + m2.start() + + # Step 3: Perform on line init from supplier1 to consumer1 + # and from supplier2 to consumer2 + m1_c1.begin_reinit() + m2_c2.begin_reinit() + (done, error) = m1_c1.wait_reinit() + assert done is True + assert error is False + (done, error) = m2_c2.wait_reinit() + assert done is True + assert error is False + + # Step 4: Perform update on both suppliers + repl.test_replication(m1, c1) + repl.test_replication(m2, c2) + + # Step 5: Check that c1 RUV does not contains URL towards m2 + ruv = replica_c1.get_ruv() + log.debug(f"c1 RUV: {ruv}") + url = ruv._rid_url.get(replica_m2.get_rid()) + if url is None: + log.debug(f"No URL for RID {replica_m2.get_rid()} in RUV") + else: + log.debug(f"URL for RID {replica_m2.get_rid()} in RUV is {url}") + log.error(f"URL for RID {replica_m2.get_rid()} found in RUV") + # Note: this assertion fails if issue 2054 is not fixed. + assert False + + # Step 6: Check that c2 RUV does contains URL towards m2 + ruv = replica_c2.get_ruv() + log.debug(f"c1 RUV: {ruv} {ruv._rids} ") + url = ruv._rid_url.get(replica_m2.get_rid()) + if url is None: + log.error(f"No URL for RID {replica_m2.get_rid()} in RUV") + assert False + else: + log.debug(f"URL for RID {replica_m2.get_rid()} in RUV is {url}") + + # Step 7: Perform on line init from supplier1 to supplier2 + m1_m2.begin_reinit() + (done, error) = m1_m2.wait_reinit() + assert done is True + assert error is False + + # Step 8: Perform update on supplier2 + repl.test_replication(m2, c1) + + # Step 9: Check that c1 RUV does contains URL towards m2 + ruv = replica_c1.get_ruv() + log.debug(f"c1 RUV: {ruv} {ruv._rids} ") + url = ruv._rid_url.get(replica_m2.get_rid()) + if url is None: + log.error(f"No URL for RID {replica_m2.get_rid()} in RUV") + assert False + else: + log.debug(f"URL for RID {replica_m2.get_rid()} in RUV is {url}") + + +def test_csngen_state_not_updated_if_different_uuid(topo_m2c2): + """Check that csngen remote offset is not updated if RUV generation uuid are different + + :id: 77694b8e-22ae-11eb-89b2-482ae39447e5 + :setup: Two suppliers + two consumers replication setup + :steps: + 1. Disable m1<->m2 agreement to avoid propagate timeSkew + 2. Generate ldif without replication data + 3. Increase time skew on supplier2 + 4. Init both suppliers from that ldif + (to clear the ruvs and generates different generation uuid) + 5. Perform on line init from supplier1 to consumer1 and supplier2 to consumer2 + 6. Perform update on both suppliers + 7. Check that c1 has no time skew + 8. Check that c2 has time skew + 9. Init supplier2 from supplier1 + 10. Perform update on supplier2 + 11. Check that c1 has time skew + :expectedresults: + 1. No error + 2. No error while generating ldif + 3. No error + 4. No error while importing the ldif file + 5. No error and Initialization done. + 6. No error + 7. c1 time skew should be lesser than threshold + 8. c2 time skew should be higher than threshold + 9. No error and Initialization done. + 10. No error + 11. c1 time skew should be higher than threshold + + """ + + # Variables initialization + repl = ReplicationManager(DEFAULT_SUFFIX) + + m1 = topo_m2c2.ms["supplier1"] + m2 = topo_m2c2.ms["supplier2"] + c1 = topo_m2c2.cs["consumer1"] + c2 = topo_m2c2.cs["consumer2"] + + replica_m1 = Replicas(m1).get(DEFAULT_SUFFIX) + replica_m2 = Replicas(m2).get(DEFAULT_SUFFIX) + replica_c1 = Replicas(c1).get(DEFAULT_SUFFIX) + replica_c2 = Replicas(c2).get(DEFAULT_SUFFIX) + + replicid_m2 = replica_m2.get_rid() + + agmts_m1 = Agreements(m1, replica_m1.dn) + agmts_m2 = Agreements(m2, replica_m2.dn) + + m1_m2 = get_agreement(agmts_m1, m2) + m1_c1 = get_agreement(agmts_m1, c1) + m1_c2 = get_agreement(agmts_m1, c2) + m2_m1 = get_agreement(agmts_m2, m1) + m2_c1 = get_agreement(agmts_m2, c1) + m2_c2 = get_agreement(agmts_m2, c2) + + # Step 1: Disable m1<->m2 agreement to avoid propagate timeSkew + m1_m2.pause() + m2_m1.pause() + + # Step 2: Generate ldif without replication data + m1.stop() + m2.stop() + ldif_file = '%s/norepl.ldif' % m1.get_ldif_dir() + m1.db2ldif(bename=DEFAULT_BENAME, suffixes=[DEFAULT_SUFFIX], + excludeSuffixes=None, repl_data=False, + outputfile=ldif_file, encrypt=False) + # Remove replication metadata that are still in the ldif + # _remove_replication_data(ldif_file) + + # Step 3: Increase time skew on supplier2 + timeSkew = 6*3600 + # We can modify supplier2 time skew + # But the time skew on the consumer may be smaller + # depending on when the cnsgen generation time is updated + # and when first csn get replicated. + # Since we use timeSkew has threshold value to detect + # whether there are time skew or not, + # lets add a significative margin (longer than the test duration) + # to avoid any risk of erroneous failure + timeSkewMargin = 300 + DSEldif(m2)._increaseTimeSkew(DEFAULT_SUFFIX, timeSkew+timeSkewMargin) + + # Step 4: Init both suppliers from that ldif + m1.ldif2db(DEFAULT_BENAME, None, None, None, ldif_file) + m2.ldif2db(DEFAULT_BENAME, None, None, None, ldif_file) + m1.start() + m2.start() + + # Step 5: Perform on line init from supplier1 to consumer1 + # and from supplier2 to consumer2 + m1_c1.begin_reinit() + m2_c2.begin_reinit() + (done, error) = m1_c1.wait_reinit() + assert done is True + assert error is False + (done, error) = m2_c2.wait_reinit() + assert done is True + assert error is False + + # Step 6: Perform update on both suppliers + repl.test_replication(m1, c1) + repl.test_replication(m2, c2) + + # Step 7: Check that c1 has no time skew + # Stop server to insure that dse.ldif is uptodate + c1.stop() + c1_nsState = DSEldif(c1).readNsState(DEFAULT_SUFFIX)[0] + c1_timeSkew = int(c1_nsState['time_skew']) + log.debug(f"c1 time skew: {c1_timeSkew}") + if (c1_timeSkew >= timeSkew): + log.error(f"c1 csngen state has unexpectedly been synchronized with m2: time skew {c1_timeSkew}") + assert False + c1.start() + time.sleep(5) + + # Step 8: Check that c2 has time skew + # Stop server to insure that dse.ldif is uptodate + c2.stop() + c2_nsState = DSEldif(c2).readNsState(DEFAULT_SUFFIX)[0] + c2_timeSkew = int(c2_nsState['time_skew']) + log.debug(f"c2 time skew: {c2_timeSkew}") + if (c2_timeSkew < timeSkew): + log.error(f"c2 csngen state has not been synchronized with m2: time skew {c2_timeSkew}") + assert False + c2.start() + + # Step 9: Perform on line init from supplier1 to supplier2 + m1_c1.pause() + m1_m2.resume() + m1_m2.begin_reinit() + (done, error) = m1_m2.wait_reinit() + assert done is True + assert error is False + + # Step 10: Perform update on supplier2 + repl.test_replication(m2, c1) + + # Step 11: Check that c1 has time skew + # Stop server to insure that dse.ldif is uptodate + c1.stop() + c1_nsState = DSEldif(c1).readNsState(DEFAULT_SUFFIX)[0] + c1_timeSkew = int(c1_nsState['time_skew']) + log.debug(f"c1 time skew: {c1_timeSkew}") + if (c1_timeSkew < timeSkew): + log.error(f"c1 csngen state has not been synchronized with m2: time skew {c1_timeSkew}") + assert False + + +if __name__ == '__main__': + # Run isolated + # -s for DEBUG mode + CURRENT_FILE = os.path.realpath(__file__) + pytest.main("-s %s" % CURRENT_FILE) diff --git a/dirsrvtests/tests/suites/replication/regression_m3_test.py b/dirsrvtests/tests/suites/replication/regression_m3_test.py new file mode 100644 index 0000000..8474210 --- /dev/null +++ b/dirsrvtests/tests/suites/replication/regression_m3_test.py @@ -0,0 +1,172 @@ +# --- BEGIN COPYRIGHT BLOCK --- +# Copyright (C) 2021 Red Hat, Inc. +# All rights reserved. +# +# License: GPL (version 3 or any later version). +# See LICENSE for details. +# --- END COPYRIGHT BLOCK --- +# +import os +import time +import logging +import ldap +import pytest +from lib389.idm.user import TEST_USER_PROPERTIES, UserAccounts +from lib389.utils import * +from lib389._constants import * +from lib389.replica import Changelog5 +from lib389.dseldif import * +from lib389.topologies import topology_m3 as topo_m3 + + +pytestmark = pytest.mark.tier1 + +NEW_SUFFIX_NAME = 'test_repl' +NEW_SUFFIX = 'o={}'.format(NEW_SUFFIX_NAME) +NEW_BACKEND = 'repl_base' +CHANGELOG = 'cn=changelog,{}'.format(DN_USERROOT_LDBM) +MAXAGE_ATTR = 'nsslapd-changelogmaxage' +MAXAGE_STR = '30' +TRIMINTERVAL_STR = '5' +TRIMINTERVAL = 'nsslapd-changelogtrim-interval' + +DEBUGGING = os.getenv("DEBUGGING", default=False) +if DEBUGGING: + logging.getLogger(__name__).setLevel(logging.DEBUG) +else: + logging.getLogger(__name__).setLevel(logging.INFO) +log = logging.getLogger(__name__) + + +def test_cleanallruv_repl(topo_m3): + """Test that cleanallruv could not break replication if anchor csn in ruv originated + in deleted replica + + :id: 46faba9a-897e-45b8-98dc-aec7fa8cec9a + :setup: 3 Suppliers + :steps: + 1. Configure error log level to 8192 in all suppliers + 2. Modify nsslapd-changelogmaxage=30 and nsslapd-changelogtrim-interval=5 for M1 and M2 + 3. Add test users to 3 suppliers + 4. Launch ClearRuv but withForce + 5. Check the users after CleanRUV, because of changelog trimming, it will effect the CLs + :expectedresults: + 1. Error logs should be configured successfully + 2. Modify should be successful + 3. Test users should be added successfully + 4. ClearRuv should be launched successfully + 5. Users should be present according to the changelog trimming effect + """ + + M1 = topo_m3.ms["supplier1"] + M2 = topo_m3.ms["supplier2"] + M3 = topo_m3.ms["supplier3"] + + log.info("Change the error log levels for all suppliers") + for s in (M1, M2, M3): + s.config.replace('nsslapd-errorlog-level', "8192") + + log.info("Get the replication agreements for all 3 suppliers") + m1_m2 = M1.agreement.list(suffix=SUFFIX, consumer_host=M2.host, consumer_port=M2.port) + m1_m3 = M1.agreement.list(suffix=SUFFIX, consumer_host=M3.host, consumer_port=M3.port) + m3_m1 = M3.agreement.list(suffix=SUFFIX, consumer_host=M1.host, consumer_port=M1.port) + + log.info("Modify nsslapd-changelogmaxage=30 and nsslapd-changelogtrim-interval=5 for M1 and M2") + if ds_supports_new_changelog(): + CHANGELOG = 'cn=changelog,{}'.format(DN_USERROOT_LDBM) + + # set_value(M1, MAXAGE_ATTR, MAXAGE_STR) + try: + M1.modify_s(CHANGELOG, [(ldap.MOD_REPLACE, MAXAGE_ATTR, ensure_bytes(MAXAGE_STR))]) + except ldap.LDAPError as e: + log.error('Failed to add ' + MAXAGE_ATTR, + ': ' + MAXAGE_STR + ' to ' + CHANGELOG + ': error {}'.format(get_ldap_error_msg(e,'desc'))) + assert False + + # set_value(M2, TRIMINTERVAL, TRIMINTERVAL_STR) + try: + M2.modify_s(CHANGELOG, [(ldap.MOD_REPLACE, TRIMINTERVAL, ensure_bytes(TRIMINTERVAL_STR))]) + except ldap.LDAPError as e: + log.error('Failed to add ' + TRIMINTERVAL, + ': ' + TRIMINTERVAL_STR + ' to ' + CHANGELOG + ': error {}'.format(get_ldap_error_msg(e,'desc'))) + assert False + else: + log.info("Get the changelog enteries for M1 and M2") + changelog_m1 = Changelog5(M1) + changelog_m1.set_max_age(MAXAGE_STR) + changelog_m1.set_trim_interval(TRIMINTERVAL_STR) + + log.info("Add test users to 3 suppliers") + users_m1 = UserAccounts(M1, DEFAULT_SUFFIX) + users_m2 = UserAccounts(M2, DEFAULT_SUFFIX) + users_m3 = UserAccounts(M3, DEFAULT_SUFFIX) + user_props = TEST_USER_PROPERTIES.copy() + + user_props.update({'uid': "testuser10"}) + user10 = users_m1.create(properties=user_props) + + user_props.update({'uid': "testuser20"}) + user20 = users_m2.create(properties=user_props) + + user_props.update({'uid': "testuser30"}) + user30 = users_m3.create(properties=user_props) + + # ::important:: the testuser31 is the oldest csn in M2, + # because it will be cleared by changelog trimming + user_props.update({'uid': "testuser31"}) + user31 = users_m3.create(properties=user_props) + + user_props.update({'uid': "testuser11"}) + user11 = users_m1.create(properties=user_props) + + user_props.update({'uid': "testuser21"}) + user21 = users_m2.create(properties=user_props) + # this is to trigger changelog trim and interval values + time.sleep(40) + + # Here M1, M2, M3 should have 11,21,31 and 10,20,30 are CL cleared + M2.stop() + M1.agreement.pause(m1_m2[0].dn) + user_props.update({'uid': "testuser32"}) + user32 = users_m3.create(properties=user_props) + + user_props.update({'uid': "testuser33"}) + user33 = users_m3.create(properties=user_props) + + user_props.update({'uid': "testuser12"}) + user12 = users_m1.create(properties=user_props) + + M3.agreement.pause(m3_m1[0].dn) + M3.agreement.resume(m3_m1[0].dn) + time.sleep(40) + + # Here because of changelog trimming testusers 31 and 32 are CL cleared + # ClearRuv is launched but with Force + M3.stop() + M1.tasks.cleanAllRUV(suffix=SUFFIX, replicaid='3', + force=True, args={TASK_WAIT: False}) + + # here M1 should clear 31 + M2.start() + M1.agreement.pause(m1_m2[0].dn) + M1.agreement.resume(m1_m2[0].dn) + time.sleep(10) + + # Check the users after CleanRUV + expected_m1_users = [user31.dn, user11.dn, user21.dn, user32.dn, user33.dn, user12.dn] + expected_m1_users = [x.lower() for x in expected_m1_users] + expected_m2_users = [user31.dn, user11.dn, user21.dn, user12.dn] + expected_m2_users = [x.lower() for x in expected_m2_users] + + current_m1_users = [user.dn for user in users_m1.list()] + current_m1_users = [x.lower() for x in current_m1_users] + current_m2_users = [user.dn for user in users_m2.list()] + current_m2_users = [x.lower() for x in current_m2_users] + + assert set(expected_m1_users).issubset(current_m1_users) + assert set(expected_m2_users).issubset(current_m2_users) + + +if __name__ == '__main__': + # Run isolated + # -s for DEBUG mode + CURRENT_FILE = os.path.realpath(__file__) + pytest.main("-s %s" % CURRENT_FILE) diff --git a/dirsrvtests/tests/suites/replication/repl_agmt_bootstrap_test.py b/dirsrvtests/tests/suites/replication/repl_agmt_bootstrap_test.py new file mode 100644 index 0000000..0697cdc --- /dev/null +++ b/dirsrvtests/tests/suites/replication/repl_agmt_bootstrap_test.py @@ -0,0 +1,129 @@ +# --- BEGIN COPYRIGHT BLOCK --- +# Copyright (C) 2022 Red Hat, Inc. +# All rights reserved. +# +# License: GPL (version 3 or any later version). +# See LICENSE for details. +# --- END COPYRIGHT BLOCK --- +# +import logging +import pytest +import os +import time +from lib389._constants import DEFAULT_SUFFIX +from lib389.topologies import topology_m2 as topo +from lib389.replica import BootstrapReplicationManager, Replicas +from lib389.idm.user import TEST_USER_PROPERTIES, UserAccounts, UserAccount +from lib389.idm.group import Group + +pytestmark = pytest.mark.tier1 + +DEBUGGING = os.getenv("DEBUGGING", default=False) +if DEBUGGING: + logging.getLogger(__name__).setLevel(logging.DEBUG) +else: + logging.getLogger(__name__).setLevel(logging.INFO) +log = logging.getLogger(__name__) + +BOOTSTRAP_MGR_DN = 'uid=replication manager,cn=config' +BOOTSTRAP_MGR_PWD = 'boostrap_manager_password' +BIND_GROUP_DN = 'cn=replication_managers,' + DEFAULT_SUFFIX + + +def test_repl_agmt_bootstrap_credentials(topo): + """Test that the agreement bootstrap credentials works if the default + credentials fail for some reason. + + :id: 38c8095c-d958-415a-b602-74854b7882b3 + :customerscenario: True + :setup: 2 Supplier Instances + :steps: + 1. Change the bind dn group member passwords + 2. Verify replication is not working + 3. Create a new repl manager on supplier 2 for bootstrapping + 4. Add bootstrap credentials to agmt on supplier 1 + 5. Verify replication is now working with bootstrap creds + 6. Trigger new repl session and default credentials are used first + :expectedresults: + 1. Success + 2. Success + 3. Success + 4. Success + 5. Success + 6. Success + """ + + # Gather all of our objects for the test + m1 = topo.ms["supplier1"] + m2 = topo.ms["supplier2"] + supplier1_replica = Replicas(m1).get(DEFAULT_SUFFIX) + supplier2_replica = Replicas(m2).get(DEFAULT_SUFFIX) + supplier2_users = UserAccounts(m2, DEFAULT_SUFFIX) + m1_agmt = supplier1_replica.get_agreements().list()[0] + num_of_original_users = len(supplier2_users.list()) + + # Change the member's passwords which should break replication + bind_group = Group(m2, dn=BIND_GROUP_DN) + members = bind_group.list_members() + for member_dn in members: + member = UserAccount(m2, dn=member_dn) + member.replace('userPassword', 'not_right') + time.sleep(3) + m1_agmt.pause() + m1_agmt.resume() + + # Verify replication is not working, a new user should not be replicated + users = UserAccounts(m1, DEFAULT_SUFFIX) + test_user = users.ensure_state(properties=TEST_USER_PROPERTIES) + time.sleep(3) + assert len(supplier2_users.list()) == num_of_original_users + + # Create a repl manager on replica + repl_mgr = BootstrapReplicationManager(m2, dn=BOOTSTRAP_MGR_DN) + mgr_properties = { + 'uid': 'replication manager', + 'cn': 'replication manager', + 'userPassword': BOOTSTRAP_MGR_PWD, + } + repl_mgr.create(properties=mgr_properties) + + # Update supplier 2 config + supplier2_replica.remove_all('nsDS5ReplicaBindDNGroup') + supplier2_replica.remove_all('nsDS5ReplicaBindDnGroupCheckInterval') + supplier2_replica.replace('nsDS5ReplicaBindDN', BOOTSTRAP_MGR_DN) + + # Add bootstrap credentials to supplier1 agmt, and restart agmt + m1_agmt.replace('nsds5ReplicaBootstrapTransportInfo', 'LDAP') + m1_agmt.replace('nsds5ReplicaBootstrapBindMethod', 'SIMPLE') + m1_agmt.replace('nsds5ReplicaBootstrapCredentials', BOOTSTRAP_MGR_PWD) + m1_agmt.replace('nsds5ReplicaBootstrapBindDN', BOOTSTRAP_MGR_DN) + m1_agmt.pause() + m1_agmt.resume() + + # Verify replication is working. The user should have been replicated + time.sleep(3) + assert len(supplier2_users.list()) > num_of_original_users + + # Finally check if the default credentials are used on the next repl + # session. Clear out the logs, and disable log buffering. Then + # trigger a replication update/session. + m1_agmt.pause() + m2.stop() + m2.deleteLog(m2.accesslog) # Clear out the logs + m2.start() + m2.config.set('nsslapd-accesslog-logbuffering', 'off') + m1_agmt.resume() + test_user.delete() + time.sleep(3) + + # We know if the default credentials are used it will fail (err=49) + results = m2.ds_access_log.match('.* err=49 .*') + assert len(results) > 0 + + +if __name__ == '__main__': + # Run isolated + # -s for DEBUG mode + CURRENT_FILE = os.path.realpath(__file__) + pytest.main(["-s", CURRENT_FILE]) + diff --git a/dirsrvtests/tests/suites/replication/replica_config_test.py b/dirsrvtests/tests/suites/replication/replica_config_test.py new file mode 100644 index 0000000..9e8146f --- /dev/null +++ b/dirsrvtests/tests/suites/replication/replica_config_test.py @@ -0,0 +1,308 @@ +# --- BEGIN COPYRIGHT BLOCK --- +# Copyright (C) 2022 Red Hat, Inc. +# All rights reserved. +# +# License: GPL (version 3 or any later version). +# See LICENSE for details. +# --- END COPYRIGHT BLOCK --- +# +import logging +import pytest +import copy +import os +import ldap +from lib389._constants import * +from lib389.topologies import topology_st as topo + +from lib389.replica import Replicas +from lib389.agreement import Agreements +from lib389.utils import ds_is_older + +pytestmark = pytest.mark.tier1 + +DEBUGGING = os.getenv("DEBUGGING", default=False) +if DEBUGGING: + logging.getLogger(__name__).setLevel(logging.DEBUG) +else: + logging.getLogger(__name__).setLevel(logging.INFO) +log = logging.getLogger(__name__) + +notnum = 'invalid' +too_big = '9223372036854775807' +overflow = '9999999999999999999999999999999999999999999999999999999999999999999' + +replica_dict = {'nsDS5ReplicaRoot': 'dc=example,dc=com', + 'nsDS5ReplicaType': '3', + 'nsDS5Flags': '1', + 'nsDS5ReplicaId': '65534', + 'nsds5ReplicaPurgeDelay': '604800', + 'nsDS5ReplicaBindDN': 'cn=u', + 'cn': 'replica'} + +agmt_dict = {'cn': 'test_agreement', + 'nsDS5ReplicaRoot': 'dc=example,dc=com', + 'nsDS5ReplicaHost': 'localhost.localdomain', + 'nsDS5ReplicaPort': '5555', + 'nsDS5ReplicaBindDN': 'uid=tester', + 'nsds5ReplicaCredentials': 'password', + 'nsDS5ReplicaTransportInfo': 'LDAP', + 'nsDS5ReplicaBindMethod': 'SIMPLE'} + + +repl_add_attrs = [('nsDS5ReplicaType', '-1', '4', overflow, notnum, '1'), + ('nsDS5Flags', '-1', '2', overflow, notnum, '1'), + ('nsDS5ReplicaId', '0', '65536', overflow, notnum, '1'), + ('nsds5ReplicaPurgeDelay', '-2', too_big, overflow, notnum, '1'), + ('nsDS5ReplicaBindDnGroupCheckInterval', '-2', too_big, overflow, notnum, '1'), + ('nsds5ReplicaTombstonePurgeInterval', '-2', too_big, overflow, notnum, '1'), + ('nsds5ReplicaProtocolTimeout', '-1', too_big, overflow, notnum, '1'), + ('nsds5ReplicaReleaseTimeout', '-1', too_big, overflow, notnum, '1'), + ('nsds5ReplicaBackoffMin', '0', too_big, overflow, notnum, '3'), + ('nsds5ReplicaBackoffMax', '0', too_big, overflow, notnum, '6'), + ('nsds5ReplicaKeepAliveUpdateInterval', '59', too_big, overflow, notnum, '60'),] + +repl_mod_attrs = [('nsDS5Flags', '-1', '2', overflow, notnum, '1'), + ('nsds5ReplicaPurgeDelay', '-2', too_big, overflow, notnum, '1'), + ('nsDS5ReplicaBindDnGroupCheckInterval', '-2', too_big, overflow, notnum, '1'), + ('nsds5ReplicaTombstonePurgeInterval', '-2', too_big, overflow, notnum, '1'), + ('nsds5ReplicaProtocolTimeout', '-1', too_big, overflow, notnum, '1'), + ('nsds5ReplicaReleaseTimeout', '-1', too_big, overflow, notnum, '1'), + ('nsds5ReplicaBackoffMin', '0', too_big, overflow, notnum, '3'), + ('nsds5ReplicaBackoffMax', '0', too_big, overflow, notnum, '6'), + ('nsds5ReplicaKeepAliveUpdateInterval', '59', too_big, overflow, notnum, '60'),] + +agmt_attrs = [ + ('nsds5ReplicaPort', '0', '65535', overflow, notnum, '389'), + ('nsds5ReplicaTimeout', '-1', too_big, overflow, notnum, '6'), + ('nsds5ReplicaBusyWaitTime', '-1', too_big, overflow, notnum, '6'), + ('nsds5ReplicaSessionPauseTime', '-1', too_big, overflow, notnum, '6'), + ('nsds5ReplicaFlowControlWindow', '-1', too_big, overflow, notnum, '6'), + ('nsds5ReplicaFlowControlPause', '-1', too_big, overflow, notnum, '6'), + ('nsds5ReplicaProtocolTimeout', '-1', too_big, overflow, notnum, '6') + ] + +def replica_reset(topo): + """Purge all existing replica details""" + replicas = Replicas(topo.standalone) + for r in replicas.list(): + r.delete() + +def replica_setup(topo): + """Add a valid replica config entry to modify + """ + replicas = Replicas(topo.standalone) + for r in replicas.list(): + r.delete() + return replicas.create(properties=replica_dict) + +def agmt_reset(topo): + """Purge all existing agreements for testing""" + agmts = Agreements(topo.standalone) + for a in agmts.list(): + a.delete() + +def agmt_setup(topo): + """Add a valid replica config entry to modify + """ + # Reset the agreements too. + replica = replica_setup(topo) + agmts = Agreements(topo.standalone, basedn=replica.dn) + for a in agmts.list(): + a.delete() + return agmts.create(properties=agmt_dict) + +def perform_invalid_create(many, properties, attr, value): + my_properties = copy.deepcopy(properties) + my_properties[attr] = value + with pytest.raises(ldap.LDAPError) as ei: + many.create(properties=my_properties) + return ei.value + +def perform_invalid_modify(o, attr, value): + with pytest.raises(ldap.LDAPError) as ei: + o.replace(attr, value) + return ei.value + +@pytest.mark.parametrize("attr, too_small, too_big, overflow, notnum, valid", repl_add_attrs) +def test_replica_num_add(topo, attr, too_small, too_big, overflow, notnum, valid): + """Test all the number values you can set for a replica config entry + + :id: a8b47d4a-a089-4d70-8070-e6181209bf92 + :parametrized: yes + :setup: standalone instance + :steps: + 1. Use a value that is too small + 2. Use a value that is too big + 3. Use a value that overflows the int + 4. Use a value with character value (not a number) + 5. Use a valid value + :expectedresults: + 1. Add is rejected + 2. Add is rejected + 3. Add is rejected + 4. Add is rejected + 5. Add is allowed + """ + replica_reset(topo) + + replicas = Replicas(topo.standalone) + + # Test too small + perform_invalid_create(replicas, replica_dict, attr, too_small) + # Test too big + perform_invalid_create(replicas, replica_dict, attr, too_big) + # Test overflow + perform_invalid_create(replicas, replica_dict, attr, overflow) + # test not a number + perform_invalid_create(replicas, replica_dict, attr, notnum) + # Test valid value + my_replica = copy.deepcopy(replica_dict) + my_replica[attr] = valid + replicas.create(properties=my_replica) + +@pytest.mark.parametrize("attr, too_small, too_big, overflow, notnum, valid", repl_mod_attrs) +def test_replica_num_modify(topo, attr, too_small, too_big, overflow, notnum, valid): + """Test all the number values you can set for a replica config entry + + :id: a8b47d4a-a089-4d70-8070-e6181209bf93 + :parametrized: yes + :setup: standalone instance + :steps: + 1. Replace a value that is too small + 2. Repalce a value that is too big + 3. Replace a value that overflows the int + 4. Replace a value with character value (not a number) + 5. Replace a vlue with a valid value + :expectedresults: + 1. Value is rejected + 2. Value is rejected + 3. Value is rejected + 4. Value is rejected + 5. Value is allowed + """ + replica = replica_setup(topo) + + # Value too small + perform_invalid_modify(replica, attr, too_small) + # Value too big + perform_invalid_modify(replica, attr, too_big) + # Value overflow + perform_invalid_modify(replica, attr, overflow) + # Value not a number + perform_invalid_modify(replica, attr, notnum) + # Value is valid + replica.replace(attr, valid) + + +@pytest.mark.xfail(reason="Agreement validation current does not work.") +@pytest.mark.parametrize("attr, too_small, too_big, overflow, notnum, valid", agmt_attrs) +def test_agmt_num_add(topo, attr, too_small, too_big, overflow, notnum, valid): + """Test all the number values you can set for a replica config entry + + :id: a8b47d4a-a089-4d70-8070-e6181209bf94 + :parametrized: yes + :setup: standalone instance + :steps: + 1. Use a value that is too small + 2. Use a value that is too big + 3. Use a value that overflows the int + 4. Use a value with character value (not a number) + 5. Use a valid value + :expectedresults: + 1. Add is rejected + 2. Add is rejected + 3. Add is rejected + 4. Add is rejected + 5. Add is allowed + """ + + agmt_reset(topo) + replica = replica_setup(topo) + + agmts = Agreements(topo.standalone, basedn=replica.dn) + + # Test too small + perform_invalid_create(agmts, agmt_dict, attr, too_small) + # Test too big + perform_invalid_create(agmts, agmt_dict, attr, too_big) + # Test overflow + perform_invalid_create(agmts, agmt_dict, attr, overflow) + # test not a number + perform_invalid_create(agmts, agmt_dict, attr, notnum) + # Test valid value + my_agmt = copy.deepcopy(agmt_dict) + my_agmt[attr] = valid + agmts.create(properties=my_agmt) + + +@pytest.mark.xfail(reason="Agreement validation current does not work.") +@pytest.mark.parametrize("attr, too_small, too_big, overflow, notnum, valid", agmt_attrs) +def test_agmt_num_modify(topo, attr, too_small, too_big, overflow, notnum, valid): + """Test all the number values you can set for a replica config entry + + :id: a8b47d4a-a089-4d70-8070-e6181209bf95 + :parametrized: yes + :setup: standalone instance + :steps: + 1. Replace a value that is too small + 2. Replace a value that is too big + 3. Replace a value that overflows the int + 4. Replace a value with character value (not a number) + 5. Replace a vlue with a valid value + :expectedresults: + 1. Value is rejected + 2. Value is rejected + 3. Value is rejected + 4. Value is rejected + 5. Value is allowed + """ + + agmt = agmt_setup(topo) + + # Value too small + perform_invalid_modify(agmt, attr, too_small) + # Value too big + perform_invalid_modify(agmt, attr, too_big) + # Value overflow + perform_invalid_modify(agmt, attr, overflow) + # Value not a number + perform_invalid_modify(agmt, attr, notnum) + # Value is valid + agmt.replace(attr, valid) + + +@pytest.mark.skipif(ds_is_older('1.4.1.4'), reason="Not implemented") +@pytest.mark.bz1546739 +def test_same_attr_yields_same_return_code(topo): + """Test that various operations with same incorrect attribute value yield same return code + + :id: 4bae88d7-0da8-4a71-b062-9d0ff4e472cf + :setup: standalone instance + :steps: + 1. Purge all replica details + 2. Perform an invalid create operation + 3. Setup replica + 4. Perform an invalid modify operation + :expectedresults: + 1. Success + 2. Value is rejected + 3. Success + 4. Value is rejected + """ + attr = 'nsDS5ReplicaId' + + replica_reset(topo) + replicas = Replicas(topo.standalone) + e = perform_invalid_create(replicas, replica_dict, attr, too_big) + assert type(e) is ldap.UNWILLING_TO_PERFORM + + replica = replica_setup(topo) + e = perform_invalid_modify(replica, attr, too_big) + assert type(e) is ldap.UNWILLING_TO_PERFORM + + +if __name__ == '__main__': + # Run isolated + # -s for DEBUG mode + CURRENT_FILE = os.path.realpath(__file__) + pytest.main(["-s", CURRENT_FILE]) diff --git a/dirsrvtests/tests/suites/replication/replica_roles_test.py b/dirsrvtests/tests/suites/replication/replica_roles_test.py new file mode 100644 index 0000000..5f70488 --- /dev/null +++ b/dirsrvtests/tests/suites/replication/replica_roles_test.py @@ -0,0 +1,125 @@ +# --- BEGIN COPYRIGHT BLOCK --- +# Copyright (C) 2023 Red Hat, Inc. +# All rights reserved. +# +# License: GPL (version 3 or any later version). +# See LICENSE for details. +# --- END COPYRIGHT BLOCK --- +# +import logging +import os +import itertools +import pytest +import ldap +from lib389._constants import SUFFIX +from lib389.topologies import topology_st as topo +from lib389.replica import Replicas + + +log = logging.getLogger(__name__) + + +ROLE_TO_CONFIG = { + "None" : {}, + "supplier" : { + "nsDS5Flags": 1, + "nsDS5ReplicaType": 3, + "nsDS5ReplicaId": 1, + }, + "hub" : { + "nsDS5Flags": 1, + "nsDS5ReplicaType": 2, + "nsDS5ReplicaId": 65535, + }, + "consumer" : { + "nsDS5Flags": 0, + "nsDS5ReplicaType": 2, + "nsDS5ReplicaId": 65535, + }, + +} + +REPLICA_PROPERTIES = { + 'cn': 'replica', + 'nsDS5ReplicaRoot': SUFFIX, + 'nsDS5ReplicaBindDN': 'cn=replmgr,cn=config', +} + + +def verify_role(replicas, role): + """Verify that instance has the right replica attrbutes.""" + log.info("Verify role '%s'", role) + expected = ROLE_TO_CONFIG[role] + rep = {} + try: + replica = replicas.get(SUFFIX) + rep["nsDS5Flags"] = replica.get_attr_val_int("nsDS5Flags") + rep["nsDS5ReplicaType"] = replica.get_attr_val_int("nsDS5ReplicaType") + rep["nsDS5ReplicaId"] = replica.get_attr_val_int("nsDS5ReplicaId") + except ldap.NO_SUCH_OBJECT: + pass + log.info('verify_role: role: %s expected: %s found: %s', role, expected, rep) + assert rep == expected + + +def config_role(replicas, role): + """Configure replica role.""" + log.info("Set role to: '%s'", role) + try: + replica = replicas.get(SUFFIX) + except ldap.NO_SUCH_OBJECT: + replica = None + properties = { key:str(val) for dct in (REPLICA_PROPERTIES, + ROLE_TO_CONFIG[role]) for key,val in dct.items() } + if replica: + if role == "None": + replica.delete() + else: + # Cannot use replica.ensure_state here because: + # lib389 complains if nsDS5ReplicaRoot is not set + # 389ds complains if nsDS5ReplicaRoot it is set + # replica.ensure_state(rdn='cn=replica', properties=properties) + mods = [ (key, str(val)) + for key,val in ROLE_TO_CONFIG[role].items() + if str(val).lower() != replica.get_attr_val_utf8_l(key) ] + log.debug(f'replica.replace_many({mods})') + replica.replace_many(*mods) + elif role != "None": + replicas.create(properties=properties) + + +@pytest.mark.parametrize( + "from_role,to_role", + itertools.permutations( ("None", "supplier", "hub", "consumer" ) , 2 ) +) +def test_switching_roles(topo, from_role, to_role): + """Test all transitions between roles/ CONSUMER/HUB/SUPPLIER/NONE + + :id: 6e9a697b-d5a0-45ff-b9c7-5fa14ea0c102 + :setup: Standalone Instance + :steps: + 1. Set initial replica role + 2. Verify initial replica role + 3. Set final replica role + 4. Verify final replica role + :expectedresults: + 1. No error + 2. No error + 3. No error + 4. No error + """ + + inst = topo.standalone + replicas = Replicas(inst) + inst.start() + config_role(replicas, from_role) + verify_role(replicas, from_role) + config_role(replicas, to_role) + verify_role(replicas, to_role) + + +if __name__ == '__main__': + # Run isolated + # -s for DEBUG mode + CURRENT_FILE = os.path.realpath(__file__) + pytest.main(["-s", CURRENT_FILE]) diff --git a/dirsrvtests/tests/suites/replication/ruvstore_test.py b/dirsrvtests/tests/suites/replication/ruvstore_test.py new file mode 100644 index 0000000..24757d1 --- /dev/null +++ b/dirsrvtests/tests/suites/replication/ruvstore_test.py @@ -0,0 +1,271 @@ +# --- BEGIN COPYRIGHT BLOCK --- +# Copyright (C) 2017 Red Hat, Inc. +# All rights reserved. +# +# License: GPL (version 3 or any later version). +# See LICENSE for details. +# --- END COPYRIGHT BLOCK --- +# +import os +import logging +import ldap +import pytest +from ldif import LDIFParser +from lib389.cli_base import LogCapture +from lib389.dbgen import dbgen_users +from lib389.replica import Replicas, ReplicationManager +from lib389.backend import Backends +from lib389.idm.domain import Domain +from lib389.idm.user import UserAccounts +from lib389.tasks import ImportTask +from lib389.topologies import create_topology +from lib389._constants import * + +pytestmark = pytest.mark.tier1 + +TEST_ENTRY_NAME = 'rep2lusr' +NEW_RDN_NAME = 'ruvusr' +ATTRIBUTES = ['objectClass', 'nsUniqueId', 'nsds50ruv', 'nsruvReplicaLastModified'] +USER_PROPERTIES = { + 'uid': TEST_ENTRY_NAME, + 'cn': TEST_ENTRY_NAME, + 'sn': TEST_ENTRY_NAME, + 'uidNumber': '1001', + 'gidNumber': '2001', + 'userpassword': PASSWORD, + 'description': 'userdesc', + 'homeDirectory': '/home/testuser' +} + +DEBUGGING = os.getenv('DEBUGGING', default=False) +if DEBUGGING: + logging.getLogger(__name__).setLevel(logging.DEBUG) +else: + logging.getLogger(__name__).setLevel(logging.INFO) +log = logging.getLogger(__name__) + + +@pytest.fixture(scope="function") +def topo(request): + """Create Replication Deployment with two suppliers""" + + topology = create_topology({ReplicaRole.SUPPLIER: 2}, request=request) + + topology.logcap = LogCapture() + return topology + + +class MyLDIF(LDIFParser): + def __init__(self, input): + LDIFParser.__init__(self, input) + + def handle(self, dn, entry): + if 'nsuniqueid=' + REPLICA_RUV_UUID in dn: + for attr in ATTRIBUTES: + assert entry.get(attr), 'Failed to find attribute: {}'.format(attr) + log.info('Attribute found in RUV: {}'.format(attr)) + + +def _perform_ldap_operations(topo): + """Add a test user, modify description, modrdn user and delete it""" + + users = UserAccounts(topo.ms['supplier1'], DEFAULT_SUFFIX) + log.info('Adding user to supplier1') + tuser = users.create(properties=USER_PROPERTIES) + tuser.replace('description', 'newdesc') + log.info('Modify RDN of user: {}'.format(tuser.dn)) + try: + topo.ms['supplier1'].modrdn_s(tuser.dn, 'uid={}'.format(NEW_RDN_NAME), 0) + except ldap.LDAPError as e: + log.fatal('Failed to modrdn entry: {}'.format(tuser.dn)) + raise e + tuser = users.get(NEW_RDN_NAME) + log.info('Deleting user: {}'.format(tuser.dn)) + tuser.delete() + + +def _compare_memoryruv_and_databaseruv(topo, operation_type): + """Compare the memoryruv and databaseruv for ldap operations""" + + log.info('Checking memory ruv for ldap: {} operation'.format(operation_type)) + replicas = Replicas(topo.ms['supplier1']) + replica = replicas.list()[0] + memory_ruv = replica.get_attr_val_utf8('nsds50ruv') + + log.info('Checking database ruv for ldap: {} operation'.format(operation_type)) + entry = replicas.get_ruv_entry(DEFAULT_SUFFIX) + database_ruv = entry.getValues('nsds50ruv')[0] + assert memory_ruv == database_ruv + + +def test_ruv_entry_backup(topo): + """Check if db2ldif stores the RUV details in the backup file + + :id: cbe2c473-8578-4caf-ac0a-841140e41e66 + :setup: Replication with two suppliers. + :steps: 1. Add user to server. + 2. Perform ldap modify, modrdn and delete operations. + 3. Stop the server and backup the database using db2ldif task. + 4. Start the server and check if correct RUV is stored in the backup file. + :expectedresults: + 1. Add user should PASS. + 2. Ldap operations should PASS. + 3. Database backup using db2ldif task should PASS. + 4. Backup file should contain the correct RUV details. + """ + + log.info('LDAP operations add, modify, modrdn and delete') + _perform_ldap_operations(topo) + + output_file = os.path.join(topo.ms['supplier1'].get_ldif_dir(), 'supplier1.ldif') + log.info('Stopping the server instance to run db2ldif task to create backup file') + topo.ms['supplier1'].stop() + topo.ms['supplier1'].db2ldif(bename=DEFAULT_BENAME, suffixes=[DEFAULT_SUFFIX], excludeSuffixes=[], + encrypt=False, repl_data=True, outputfile=output_file) + log.info('Starting the server after backup') + topo.ms['supplier1'].start() + + log.info('Checking if backup file contains RUV and required attributes') + with open(output_file, 'r') as ldif_file: + parser = MyLDIF(ldif_file) + parser.parse() + + +@pytest.mark.xfail(reason="No method to safety access DB ruv currently exists online.") +def test_memoryruv_sync_with_databaseruv(topo): + """Check if memory ruv and database ruv are synced + + :id: 5f38ac5f-6353-460d-bf60-49cafffda5b3 + :setup: Replication with two suppliers. + :steps: 1. Add user to server and compare memory ruv and database ruv. + 2. Modify description of user and compare memory ruv and database ruv. + 3. Modrdn of user and compare memory ruv and database ruv. + 4. Delete user and compare memory ruv and database ruv. + :expectedresults: + 1. For add user, the memory ruv and database ruv should be the same. + 2. For modify operation, the memory ruv and database ruv should be the same. + 3. For modrdn operation, the memory ruv and database ruv should be the same. + 4. For delete operation, the memory ruv and database ruv should be the same. + """ + + log.info('Adding user: {} to supplier1'.format(TEST_ENTRY_NAME)) + users = UserAccounts(topo.ms['supplier1'], DEFAULT_SUFFIX) + tuser = users.create(properties=USER_PROPERTIES) + _compare_memoryruv_and_databaseruv(topo, 'add') + + log.info('Modify user: {} description'.format(TEST_ENTRY_NAME)) + tuser.replace('description', 'newdesc') + _compare_memoryruv_and_databaseruv(topo, 'modify') + + log.info('Modify RDN of user: {}'.format(tuser.dn)) + try: + topo.ms['supplier1'].modrdn_s(tuser.dn, 'uid={}'.format(NEW_RDN_NAME), 0) + except ldap.LDAPError as e: + log.fatal('Failed to modrdn entry: {}'.format(tuser.dn)) + raise e + _compare_memoryruv_and_databaseruv(topo, 'modrdn') + + tuser = users.get(NEW_RDN_NAME) + log.info('Delete user: {}'.format(tuser.dn)) + tuser.delete() + _compare_memoryruv_and_databaseruv(topo, 'delete') + + +def test_ruv_after_reindex(topo): + """Test that the tombstone RUV entry is not corrupted after a reindex task + + :id: 988c0fab-1905-4dc5-a45d-fbf195843a33 + :setup: 2 suppliers + :steps: + 1. Reindex database + 2. Perform some updates + 3. Check error log does not have "_entryrdn_insert_key" errors + :expectedresults: + 1. Success + 2. Success + 3. Success + """ + + inst = topo.ms['supplier1'] + suffix = Domain(inst, "ou=people," + DEFAULT_SUFFIX) + backends = Backends(inst) + backend = backends.get(DEFAULT_BENAME) + + # Reindex nsuniqueid + backend.reindex(attrs=['nsuniqueid'], wait=True) + + # Do some updates + for idx in range(0, 5): + suffix.replace('description', str(idx)) + + # Check error log for RUV entryrdn errors. Stopping instance forces RUV + # to be written and quickly exposes the error + inst.stop() + assert not inst.searchErrorsLog("entryrdn_insert_key") + + +@pytest.mark.ds1317 +@pytest.mark.xfail(reason='https://github.com/389ds/389-ds-base/issues/1317') +def test_ruv_after_import(topo): + """Test the RUV behavior after an LDIF import operation. + + :id: 6843ab56-0291-425c-954b-3002b8352025 + :setup: 2 suppliers + :steps: + 1. Export LDIF from supplier 1. + 2. Create 1000 test users in supplier 1. + 3. Wait for replication to complete from supplier 1 to supplier 2. + 4. Pause all replicas. + 5. Import LDIF back to supplier 1. + 6. Resume all replicas. + 7. Perform attribute updates. + :expectedresults: + 1. LDIF export should complete successfully. + 2. Test users should be created successfully. + 3. Replication should complete successfully. + 4. All replicas should be paused. + 5. LDIF import should complete successfully. + 6. All replicas should be resumed. + 7. Attribute updates should complete successfully. + """ + + log.info('Getting supplier instances') + s1 = topo.ms['supplier1'] + s2 = topo.ms['supplier2'] + + log.info('Performing LDIF export on supplier 1') + ldif_dir = s1.get_ldif_dir() + export_ldif = ldif_dir + '/export.ldif' + export_task = Backends(s1).export_ldif(be_names=DEFAULT_BENAME, ldif=export_ldif, replication=True) + export_task.wait() + + log.info('Creating 1000 test users on supplier 1') + users = UserAccounts(s1, DEFAULT_SUFFIX) + for idx in range(0, 1000): + users.create_test_user(uid=idx) + + log.info('Waiting for replication to complete') + repl = ReplicationManager(DEFAULT_SUFFIX) + repl.wait_for_replication(s2, s1) + + log.info('Performing LDIF import on supplier 1') + r = Backends(s1).get(DEFAULT_BENAME).import_ldif([export_ldif]) + s2.stop() + r.wait() + + s2.start() + + log.info('Performing attribute updates') + suffix = Domain(s1, "ou=people," + DEFAULT_SUFFIX) + for idx in range(0, 5): + suffix.replace('description', str(idx)) + + repl.wait_for_replication(s1, s2) + repl.wait_for_replication(s2, s1) + + +if __name__ == '__main__': + # Run isolated + # -s for DEBUG mode + CURRENT_FILE = os.path.realpath(__file__) + pytest.main('-s {}'.format(CURRENT_FILE)) diff --git a/dirsrvtests/tests/suites/replication/sasl_m2_test.py b/dirsrvtests/tests/suites/replication/sasl_m2_test.py new file mode 100644 index 0000000..60a5824 --- /dev/null +++ b/dirsrvtests/tests/suites/replication/sasl_m2_test.py @@ -0,0 +1,185 @@ +# --- BEGIN COPYRIGHT BLOCK --- +# Copyright (C) 2022 Red Hat, Inc. +# All rights reserved. +# +# License: GPL (version 3 or any later version). +# See LICENSE for details. +# --- END COPYRIGHT BLOCK --- +# +import logging +import os +import pytest +import ldap +import uuid +from lib389.utils import ds_is_older, valgrind_enable, valgrind_disable, valgrind_get_results_file, valgrind_check_file + +from lib389.idm.services import ServiceAccounts +from lib389.idm.group import Groups +from lib389.config import CertmapLegacy, Config +from lib389._constants import DEFAULT_SUFFIX +from lib389.agreement import Agreements +from lib389._mapped_object import DSLdapObject +from lib389.replica import ReplicationManager, Replicas, BootstrapReplicationManager +from lib389.topologies import topology_m2 as topo_m2 + +pytestmark = pytest.mark.tier1 + +DEBUGGING = os.getenv("DEBUGGING", default=False) +if DEBUGGING: + logging.getLogger(__name__).setLevel(logging.DEBUG) +else: + logging.getLogger(__name__).setLevel(logging.INFO) +log = logging.getLogger(__name__) + +def set_sasl_md5_client_auth(inst, to): + # Create the certmap before we restart + cm = CertmapLegacy(to) + certmaps = cm.list() + certmaps['default']['nsSaslMapRegexString'] = '^dn:\\(.*\\)' + certmaps['default']['nsSaslMapBaseDNTemplate'] = 'cn=config' + certmaps['default']['nsSaslMapFilterTemplate'] = '(objectclass=*)' + cm.set(certmaps) + + Config(to).replace("passwordStorageScheme", 'CLEAR') + + # Create a repl manager on the replica + replication_manager_pwd = 'secret12' + brm = BootstrapReplicationManager(to) + try: + brm.delete() + except ldap.NO_SUCH_OBJECT: + pass + brm.create(properties={ + 'cn': brm.common_name, + 'userPassword': replication_manager_pwd + }) + replication_manager_dn = brm.dn + + replica = Replicas(inst).get(DEFAULT_SUFFIX) + replica.set('nsDS5ReplicaBindDN', brm.dn) + replica.remove_all('nsDS5ReplicaBindDNgroup') + agmt = replica.get_agreements().list()[0] + agmt.replace_many( + ('nsDS5ReplicaBindMethod', 'SASL/DIGEST-MD5'), + ('nsDS5ReplicaTransportInfo', 'LDAP'), + ('nsDS5ReplicaPort', str(to.port)), + ('nsDS5ReplicaBindDN', replication_manager_dn), + ('nsDS5ReplicaCredentials', replication_manager_pwd), + ) + + +def gen_valgrind_wrapper(dir): + name=f"{dir}/VALGRIND" + with open(name, 'w') as f: + f.write('#!/bin/sh\n') + f.write('export SASL_PATH=foo\n') + f.write(f'valgrind -q --tool=memcheck --leak-check=yes --leak-resolution=high --num-callers=50 --log-file=/var/tmp/slapd.vg.$$ {dir}/ns-slapd.original "$@"\n') + os.chmod(name, 0o755) + return name + +@pytest.fixture +def use_valgrind(topo_m2, request): + """Adds entries to the supplier1""" + + log.info("Enable valgrind") + m1 = topo_m2.ms['supplier1'] + m2 = topo_m2.ms['supplier2'] + if m1.has_asan(): + pytest.skip('Tescase using valgring cannot run on asan enabled build') + return + set_sasl_md5_client_auth(m1, m2) + set_sasl_md5_client_auth(m2, m1) + m1.stop() + m2.stop() + m1.systemd_override = False + m2.systemd_override = False + valgrind_enable(m1.ds_paths.sbin_dir, gen_valgrind_wrapper(m1.ds_paths.sbin_dir)) + + def fin(): + log.info("Disable valgrind") + valgrind_disable(m1.ds_paths.sbin_dir) + + request.addfinalizer(fin) + + +def test_repl_sasl_md5_auth(topo_m2): + """Test replication with SASL digest-md5 authentication + + :id: 922d16f8-662a-4915-a39e-0aecd7c8e6e2 + :setup: Two supplier replication + :steps: + 1. Set sasl digest/md4 on both suppliers + 2. Restart the instance + 3. Check that replication works + :expectedresults: + 1. Success + 2. Success + 3. Replication works + """ + + m1 = topo_m2.ms['supplier1'] + m2 = topo_m2.ms['supplier2'] + + set_sasl_md5_client_auth(m1, m2) + set_sasl_md5_client_auth(m2, m1) + + m1.restart() + m2.restart() + + repl = ReplicationManager(DEFAULT_SUFFIX) + repl.test_replication_topology(topo_m2) + + +@pytest.mark.skipif(not os.path.exists('/usr/bin/valgrind'), reason="valgrind is not installed.") +def test_repl_sasl_leak(topo_m2, use_valgrind): + """Test replication with SASL digest-md5 authentication + + :id: 180e088e-841c-11ec-af4f-482ae39447e5 + :setup: Two supplier replication, valgrind + :steps: + 1. Set sasl digest/md4 on both suppliers + 2. Break sasl by setting invalid PATH + 3. Restart the instances + 4. Perform a change + 5. Poke replication 100 times + 6. Stop server + 7. Check presence of "SASL(-4): no mechanism available: No worthy mechs found" message in error log + 8. Check that there is no leak about slapi_ldap_get_lderrno + :expectedresults: + 1. Success + 2. Success + 3. Success + 4. Success + 5. Success + 6. Success + 7. Success + 8. Success + """ + + m1 = topo_m2.ms['supplier1'] + m2 = topo_m2.ms['supplier2'] + + os.environ["SASL_PATH"] = 'foo' + + m1.start() + m2.start() + + resfile=valgrind_get_results_file(m1) + + # Perform a change + from_groups = Groups(m1, basedn=DEFAULT_SUFFIX, rdn=None) + from_group = from_groups.get('replication_managers') + change = str(uuid.uuid4()) + from_group.replace('description', change) + + # Poke replication to trigger thev leak + replica = Replicas(m1).get(DEFAULT_SUFFIX) + agmt = Agreements(m1, replica.dn).list()[0] + for i in range(0, 100): + agmt.pause() + agmt.resume() + + m1.stop() + assert m1.searchErrorsLog("worthy") + assert not valgrind_check_file(resfile, 'slapi_ldap_get_lderrno'); + diff --git a/dirsrvtests/tests/suites/replication/series_of_repl_bugs_test.py b/dirsrvtests/tests/suites/replication/series_of_repl_bugs_test.py new file mode 100644 index 0000000..cb21971 --- /dev/null +++ b/dirsrvtests/tests/suites/replication/series_of_repl_bugs_test.py @@ -0,0 +1,374 @@ +# --- BEGIN COPYRIGHT BLOCK --- +# Copyright (C) 2020 Red Hat, Inc. +# All rights reserved. +# +# License: GPL (version 3 or any later version). +# See LICENSE for details. +# --- END COPYRIGHT BLOCK --- +# +import pytest +from lib389.tasks import * +from lib389.utils import * +from lib389.topologies import topology_m2 as topo_m2 +from lib389.topologies import topology_m1c1 as m1c1 +from lib389.topologies import topology_st as topo +from lib389.idm.user import UserAccount, UserAccounts +from lib389.plugins import USNPlugin +from lib389.replica import ReplicationManager +from lib389.tombstone import Tombstones +from lib389.agreement import Agreements +from lib389._constants import * + + +pytestmark = pytest.mark.tier1 + + +@pytest.fixture(scope="function") +def _delete_after(request, topo_m2): + def last(): + m1 = topo_m2.ms["supplier1"] + if UserAccounts(m1, DEFAULT_SUFFIX, rdn=None).list(): + for user in UserAccounts(m1, DEFAULT_SUFFIX, rdn=None).list(): + user.delete() + + request.addfinalizer(last) + + +@pytest.mark.bz830337 +def test_deletions_are_not_replicated(topo_m2): + """usn + mmr = deletions are not replicated + + :id: aa4f67ce-a64c-11ea-a6fd-8c16451d917b + :setup: MMR with 2 suppliers + :steps: + 1. Enable USN plugin on both servers + 2. Enable USN plugin on Supplier 2 + 3. Add user + 4. Check that user propagated to Supplier 2 + 5. Check user`s USN on Supplier 1 + 6. Check user`s USN on Supplier 2 + 7. Delete user + 8. Check that deletion of user propagated to Supplier 1 + :expectedresults: + 1. Should succeeds + 2. Should succeeds + 3. Should succeeds + 4. Should succeeds + 5. Should succeeds + 6. Should succeeds + 7. Should succeeds + 8. Should succeeds + """ + m1 = topo_m2.ms["supplier1"] + m2 = topo_m2.ms["supplier2"] + # Enable USN plugin on both servers + usn1 = USNPlugin(m1) + usn2 = USNPlugin(m2) + for usn_usn in [usn1, usn2]: + usn_usn.enable() + for instance in [m1, m2]: + instance.restart() + # Add user + user = UserAccounts(m1, DEFAULT_SUFFIX, rdn=None).create_test_user(uid=1, gid=1) + repl_manager = ReplicationManager(DEFAULT_SUFFIX) + repl_manager.wait_for_replication(m1, m2, timeout=100) + # Check that user propagated to Supplier 2 + assert user.dn in [i.dn for i in UserAccounts(m2, DEFAULT_SUFFIX, rdn=None).list()] + user2 = UserAccount(m2, f'uid=test_user_1,{DEFAULT_SUFFIX}') + # Check user`s USN on Supplier 1 + assert user.get_attr_val_utf8('entryusn') + # Check user`s USN on Supplier 2 + assert user2.get_attr_val_utf8('entryusn') + # Delete user + user2.delete() + repl_manager.wait_for_replication(m1, m2, timeout=100) + # Check that deletion of user propagated to Supplier 1 + with pytest.raises(ldap.NO_SUCH_OBJECT): + user.status() + + +@pytest.mark.bz891866 +def test_error_20(topo_m2, _delete_after): + """DS returns error 20 when replacing values of a multi-valued attribute (only when replication is enabled) + + :id: a55bccc6-a64c-11ea-bac8-8c16451d917b + :setup: MMR with 2 suppliers + :steps: + 1. Add user + 2. Change multivalue attribute + :expectedresults: + 1. Should succeeds + 2. Should succeeds + """ + m1 = topo_m2.ms["supplier1"] + m2 = topo_m2.ms["supplier2"] + # Add user + user = UserAccounts(m1, DEFAULT_SUFFIX, rdn=None).create_test_user(uid=1, gid=1) + repl_manager = ReplicationManager(DEFAULT_SUFFIX) + repl_manager.wait_for_replication(m1, m2, timeout=100) + # Change multivalue attribute + assert user.replace_many(('cn', 'BUG 891866'), ('cn', 'Test')) + + +@pytest.mark.bz1955658 +def test_enable_repl_w_master(topo): + """Check that enabling replication with the role "master" succeeds. + + :id: 074fbb38-069e-11ec-98ca-fa163ec212ff + :customerscenario: True + :setup: Create DS standalone instance + :steps: + 1. Create DS standalone instance + 2. Enable replication on supplier with role='master' attribute OR Display appropriate message. + 3. Disable role created above if it was created. + 4. Re-enable replication on supplier with role='supplier' attribute + + :expectedresults: + 1. Success + 2. Success + 3. Success + 4. Success + """ + _err_unknown_role = 'Error: Unknown replication role (master), you must use "supplier", "hub", or "consumer"' + log.info("Enabling replication on supplier with role='master' attribute") + cmd = ('dsconf -D "' + DN_DM + '" standalone1 ' + ' -w ' + PW_DM + ' replication enable --suffix="' + DEFAULT_SUFFIX + + '" --role="master" --replica-id=1 ') + if os.system(cmd) == 0: + log.info("Replication role enabled successfully") + cmd = ('dsconf -D "' + DN_DM + '" standalone1 ' + ' -w ' + PW_DM + ' replication disable --suffix="' + DEFAULT_SUFFIX+' "') + os.system(cmd) + log.info("Disabling replication on supplier with role='master' attribute") + time.sleep(.5) + elif topo.logcap.contains(_err_unknown_role): + log.info("Replication role provided is not supported") + log.info("Enabling replication on supplier with role='supplier' attribute") + cmd = ('dsconf -D "' + DN_DM + '" standalone1 ' + ' -w ' + PW_DM + ' replication enable --suffix="' + DEFAULT_SUFFIX + + '" --role="supplier" --replica-id=1 ') + assert os.system(cmd) == 0 + + +@pytest.mark.bz914305 +def test_segfaults(topo_m2, _delete_after): + """ns-slapd segfaults while trying to delete a tombstone entry + + :id: 9f8f7388-a64c-11ea-b5f7-8c16451d917b + :setup: MMR with 2 suppliers + :steps: + 1. Add new user + 2. Delete user - should leave tombstone entry + 3. Search for tombstone entry + 4. Try to delete tombstone entry + 5. Check if server is still alive + :expectedresults: + 1. Should succeeds + 2. Should succeeds + 3. Should succeeds + 4. Should succeeds + 5. Should succeeds + """ + m1 = topo_m2.ms["supplier1"] + # Add user + user = UserAccounts(m1, DEFAULT_SUFFIX, rdn=None).create_test_user(uid=10, gid=1) + # Delete user - should leave tombstone entry + user.delete() + tombstones = Tombstones(m1, DEFAULT_SUFFIX) + # Search for tombstone entry + fil = tombstones.filter("(&(objectClass=nstombstone)(uid=test_user_10))") + assert fil + # Try to delete tombstone entry + for user in fil: + user.delete() + # Check if server is still alive + assert m1.status() + + +def test_adding_deleting(topo_m2, _delete_after): + """Adding attribute with 11 values to entry + + :id: 99842b1e-a64c-11ea-b8e3-8c16451d917b + :setup: MMR with 2 suppliers + :steps: + 1. Adding entry + 2. Adding attribute with 11 values to entry + 3. Removing 4 values from the attribute in the entry + :expectedresults: + 1. Should succeeds + 2. Should succeeds + 3. Should succeeds + """ + m1 = topo_m2.ms["supplier1"] + # Adding entry + user = UserAccounts(m1, DEFAULT_SUFFIX, rdn=None).create_test_user(uid=1, gid=1) + # Adding attribute with 11 values to entry + for val1, val2 in [('description', 'first description'), + ('description', 'second description'), + ('description', 'third description'), + ('description', 'fourth description'), + ('description', 'fifth description'), + ('description', 'sixth description'), + ('description', 'seventh description'), + ('description', 'eighth description'), + ('description', 'nineth description'), + ('description', 'tenth description'), + ('description', 'eleventh description')]: + user.add(val1, val2) + # Removing 4 values from the attribute in the entry + for val1, val2 in [('description', 'first description'), + ('description', 'second description'), + ('description', 'third description'), + ('description', 'fourth description')]: + user.remove(val1, val2) + + +def test_deleting_twice(topo_m2): + """Deleting entry twice crashed a server + + :id: 94045560-a64c-11ea-93d6-8c16451d917b + :setup: MMR with 2 suppliers + :steps: + 1. Adding entry + 2. Deleting the same entry from s1 + 3. Deleting the same entry from s2 after some seconds + :expectedresults: + 1. Should succeeds + 2. Should succeeds + 3. Should succeeds + """ + m1 = topo_m2.ms["supplier1"] + m2 = topo_m2.ms["supplier2"] + # Adding entry + user1 = UserAccounts(m1, DEFAULT_SUFFIX, rdn=None).create_test_user(uid=1, gid=1) + repl_manager = ReplicationManager(DEFAULT_SUFFIX) + repl_manager.wait_for_replication(m1, m2, timeout=100) + user2 = UserAccount(m2, f'uid=test_user_1,{DEFAULT_SUFFIX}') + assert user2.status() + # Deleting the same entry from s1 + user1.delete() + repl_manager.wait_for_replication(m1, m2, timeout=100) + # Deleting the same entry from s2 after some seconds + with pytest.raises(ldap.NO_SUCH_OBJECT): + user2.delete() + assert m1.status() + assert m2.status() + + +def test_rename_entry(topo_m2, _delete_after): + """Rename entry crashed a server + + :id: 3866f9d6-a946-11ea-a3f8-8c16451d917b + :setup: MMR with 2 suppliers + :steps: + 1. Adding entry + 2. Stop Agreement for both + 3. Change description + 4. Change will not reflect on other supplier + 5. Turn on agreement on both + 6. Change will reflect on other supplier + :expectedresults: + 1. Should succeeds + 2. Should succeeds + 3. Should succeeds + 4. Should succeeds + 5. Should succeeds + 6. Should succeeds + """ + m1 = topo_m2.ms["supplier1"] + m2 = topo_m2.ms["supplier2"] + # Adding entry + user1 = UserAccounts(m1, DEFAULT_SUFFIX, rdn=None).create_test_user(uid=1, gid=1) + repl_manager = ReplicationManager(DEFAULT_SUFFIX) + repl_manager.wait_for_replication(m1, m2, timeout=100) + user2 = UserAccount(m2, user1.dn) + assert user2.status() + # Stop Agreement for both + agree1 = Agreements(m1).list()[0] + agree2 = Agreements(m2).list()[0] + for agree in [agree1, agree2]: + agree.pause() + # change description + user1.replace('description', 'New Des') + assert user1.get_attr_val_utf8('description') + # Change will not reflect on other supplier + with pytest.raises(AssertionError): + assert user2.get_attr_val_utf8('description') + # Turn on agreement on both + for agree in [agree1, agree2]: + agree.resume() + repl_manager.wait_for_replication(m1, m2, timeout=100) + for instance in [user1, user2]: + assert instance.get_attr_val_utf8('description') + + +def test_userpassword_attribute(topo_m2, _delete_after): + """Modifications of userpassword attribute in an MMR environment were successful + however a error message was displayed in the error logs which was curious. + + :id: bdcf0464-a947-11ea-9f0d-8c16451d917b + :setup: MMR with 2 suppliers + :steps: + 1. Add the test user to S1 + 2. Check that user's has been propogated to Supplier 2 + 3. modify user's userpassword attribute on supplier 2 + 4. check the error logs on suppler 1 to make sure the error message is not there + :expectedresults: + 1. Should succeeds + 2. Should succeeds + 3. Should succeeds + 4. Should succeeds + """ + m1 = topo_m2.ms["supplier1"] + m2 = topo_m2.ms["supplier2"] + # Add the test user to S1 + user1 = UserAccounts(m1, DEFAULT_SUFFIX, rdn=None).create_test_user(uid=1, gid=1) + repl_manager = ReplicationManager(DEFAULT_SUFFIX) + repl_manager.wait_for_replication(m1, m2, timeout=100) + # Check that user's has been propogated to Supplier 2 + user2 = UserAccount(m2, user1.dn) + assert user2.status() + # modify user's userpassword attribute on supplier 2 + user2.replace('userpassword', 'fred1') + repl_manager.wait_for_replication(m1, m2, timeout=100) + assert user1.get_attr_val_utf8('userpassword') + # check the error logs on suppler 1 to make sure the error message is not there + assert not m1.searchErrorsLog("can\'t add a change for uid=") + + +def _create_and_delete_tombstone(topo_m2, id): + m1 = topo_m2.ms["supplier1"] + # Add new user + user1 = UserAccounts(m1, DEFAULT_SUFFIX, rdn=None).create_test_user(uid=id, gid=id) + # Delete user - should leave tombstone entry + user1.delete() + tombstones = Tombstones(m1, DEFAULT_SUFFIX) + # Search for tombstone entry + fil = tombstones.filter("(&(objectClass=nstombstone)(uid=test_user_{}*))".format(id))[0] + assert fil + fil.rename("uid=engineer") + assert m1 + + +def test_tombstone_modrdn(topo_m2): + """rhds90 crash on tombstone modrdn + + :id: 846f5042-a948-11ea-ade2-8c16451d917b + :setup: MMR with 2 suppliers + :steps: + 1. Add new user + 2. Delete user - should leave tombstone entry + 3. Search for tombstone entry + 4. Try to modrdn with deleteoldrdn + :expectedresults: + 1. Should succeeds + 2. Should succeeds + 3. Should succeeds + 4. Should succeeds + """ + for id_id in [11, 12, 13, 14]: + _create_and_delete_tombstone(topo_m2, id_id) + + +if __name__ == '__main__': + # Run isolated + # -s for DEBUG mode + CURRENT_FILE = os.path.realpath(__file__) + pytest.main("-s %s" % CURRENT_FILE) diff --git a/dirsrvtests/tests/suites/replication/single_master_test.py b/dirsrvtests/tests/suites/replication/single_master_test.py new file mode 100644 index 0000000..448cf30 --- /dev/null +++ b/dirsrvtests/tests/suites/replication/single_master_test.py @@ -0,0 +1,163 @@ +# --- BEGIN COPYRIGHT BLOCK --- +# Copyright (C) 2017 Red Hat, Inc. +# All rights reserved. +# +# License: GPL (version 3 or any later version). +# See LICENSE for details. +# --- END COPYRIGHT BLOCK --- +# +import pytest +from lib389.tasks import * +from lib389.utils import * + +from lib389.idm.user import UserAccounts, TEST_USER_PROPERTIES + +from lib389.replica import ReplicationManager, Replicas +from lib389.backend import Backends + +from lib389.topologies import topology_m1c1 as topo_r # Replication +from lib389.topologies import topology_i2 as topo_nr # No replication +from lib389.utils import ldap, os, ds_is_older, get_default_db_lib + +from lib389._constants import (ReplicaRole, DEFAULT_SUFFIX, REPLICAID_SUPPLIER_1, + REPLICATION_BIND_DN, REPLICATION_BIND_PW, + REPLICATION_BIND_METHOD, REPLICATION_TRANSPORT, DEFAULT_BACKUPDIR, + RA_NAME, RA_BINDDN, RA_BINDPW, RA_METHOD, RA_TRANSPORT_PROT, + defaultProperties) +import json + +pytestmark = pytest.mark.tier1 + +DEBUGGING = os.getenv("DEBUGGING", default=False) +if DEBUGGING: + logging.getLogger(__name__).setLevel(logging.DEBUG) +else: + logging.getLogger(__name__).setLevel(logging.INFO) +log = logging.getLogger(__name__) + +@pytest.mark.skipif(get_default_db_lib() != "bdb", reason="Test requires bdb files") +def test_mail_attr_repl(topo_r): + """Check that no crash happens during mail attribute replication + + :id: 959edc84-05be-4bf9-a541-53afae482052 + :customerscenario: True + :setup: Replication setup with supplier and consumer instances, + test user on supplier + :steps: + 1. Check that user was replicated to consumer + 2. Back up mail database file + 3. Remove mail attribute from the user entry + 4. Restore mail database + 5. Search for the entry with a substring 'mail=user*' + 6. Search for the entry once again to make sure that server is alive + :expectedresults: + 1. The user should be replicated to consumer + 2. Operation should be successful + 3. The mail attribute should be removed + 4. Operation should be successful + 5. Search should be successful + 6. No crash should happen + """ + + supplier = topo_r.ms["supplier1"] + consumer = topo_r.cs["consumer1"] + repl = ReplicationManager(DEFAULT_SUFFIX) + + m_users = UserAccounts(topo_r.ms["supplier1"], DEFAULT_SUFFIX) + m_user = m_users.ensure_state(properties=TEST_USER_PROPERTIES) + m_user.ensure_present('mail', 'testuser@redhat.com') + + log.info("Check that replication is working") + repl.wait_for_replication(supplier, consumer) + c_users = UserAccounts(topo_r.cs["consumer1"], DEFAULT_SUFFIX) + c_user = c_users.get('testuser') + + c_bes = Backends(consumer) + c_be = c_bes.get(DEFAULT_SUFFIX) + + db_dir = c_be.get_attr_val_utf8('nsslapd-directory') + + mail_db = list(filter(lambda fl: fl.startswith("mail"), os.listdir(db_dir))) + assert mail_db, "mail.* wasn't found in {}" + mail_db_path = os.path.join(db_dir, mail_db[0]) + backup_path = os.path.join(DEFAULT_BACKUPDIR, mail_db[0]) + + consumer.stop() + log.info("Back up {} to {}".format(mail_db_path, backup_path)) + shutil.copyfile(mail_db_path, backup_path) + consumer.start() + + log.info("Remove 'mail' attr from supplier") + m_user.remove_all('mail') + + log.info("Wait for the replication to happen") + repl.wait_for_replication(supplier, consumer) + + consumer.stop() + log.info("Restore {} to {}".format(backup_path, mail_db_path)) + shutil.copyfile(backup_path, mail_db_path) + consumer.start() + + log.info("Make a search for mail attribute in attempt to crash server") + c_user.get_attr_val("mail") + + log.info("Make sure that server hasn't crashed") + repl.test_replication(supplier, consumer) + + +def test_lastupdate_attr_before_init(topo_nr): + """Check that LastUpdate replica attributes show right values + + :id: bc8ce431-ff65-41f5-9331-605cbcaaa887 + :customerscenario: True + :setup: Replication setup with supplier and consumer instances + without initialization + :steps: + 1. Check nsds5replicaLastUpdateStart value + 2. Check nsds5replicaLastUpdateEnd value + 3. Check nsds5replicaLastUpdateStatus value + 4. Check nsds5replicaLastUpdateStatusJSON is parsable + :expectedresults: + 1. nsds5replicaLastUpdateStart should be equal to 0 + 2. nsds5replicaLastUpdateEnd should be equal to 0 + 3. nsds5replicaLastUpdateStatus should not be equal + to "Replica acquired successfully: Incremental update started" + 4. Success + """ + + supplier = topo_nr.ins["standalone1"] + consumer = topo_nr.ins["standalone2"] + + repl = ReplicationManager(DEFAULT_SUFFIX) + repl.create_first_supplier(supplier) + + # Manually create an un-synced consumer. + + consumer_replicas = Replicas(consumer) + consumer_replicas.create(properties={ + 'cn': 'replica', + 'nsDS5ReplicaRoot': DEFAULT_SUFFIX, + 'nsDS5ReplicaId': '65535', + 'nsDS5Flags': '0', + 'nsDS5ReplicaType': '2', + }) + + agmt = repl.ensure_agreement(supplier, consumer) + with pytest.raises(Exception): + repl.wait_for_replication(supplier, consumer, timeout=5) + + assert agmt.get_attr_val_utf8('nsds5replicaLastUpdateStart') == "19700101000000Z" + assert agmt.get_attr_val_utf8("nsds5replicaLastUpdateEnd") == "19700101000000Z" + assert "replica acquired successfully" not in agmt.get_attr_val_utf8_l("nsds5replicaLastUpdateStatus") + + # make sure the JSON attribute is parsable + json_status = agmt.get_attr_val_utf8("nsds5replicaLastUpdateStatusJSON") + if json_status is not None: + json_obj = json.loads(json_status) + log.debug("JSON status message: {}".format(json_obj)) + +if __name__ == '__main__': + # Run isolated + # -s for DEBUG mode + CURRENT_FILE = os.path.realpath(__file__) + pytest.main("-s %s" % CURRENT_FILE) diff --git a/dirsrvtests/tests/suites/replication/tls_client_auth_repl_test.py b/dirsrvtests/tests/suites/replication/tls_client_auth_repl_test.py new file mode 100644 index 0000000..274de94 --- /dev/null +++ b/dirsrvtests/tests/suites/replication/tls_client_auth_repl_test.py @@ -0,0 +1,176 @@ +# --- BEGIN COPYRIGHT BLOCK --- +# Copyright (C) 2017 Red Hat, Inc. +# All rights reserved. +# +# License: GPL (version 3 or any later version). +# See LICENSE for details. +# --- END COPYRIGHT BLOCK --- +# +import logging +import os +import pytest +from lib389.utils import ds_is_older +from lib389.idm.services import ServiceAccounts +from lib389.config import CertmapLegacy +from lib389._constants import DEFAULT_SUFFIX +from lib389.replica import ReplicationManager, Replicas +from lib389.topologies import topology_m2 as topo_m2 + +pytestmark = pytest.mark.tier1 + +DEBUGGING = os.getenv("DEBUGGING", default=False) +if DEBUGGING: + logging.getLogger(__name__).setLevel(logging.DEBUG) +else: + logging.getLogger(__name__).setLevel(logging.INFO) +log = logging.getLogger(__name__) + + +@pytest.fixture(scope="module") +def tls_client_auth(topo_m2): + """Enable TLS on both suppliers and reconfigure + both agreements to use TLS Client auth + """ + + m1 = topo_m2.ms['supplier1'] + m2 = topo_m2.ms['supplier2'] + + if ds_is_older('1.4.0.6'): + transport = 'SSL' + else: + transport = 'LDAPS' + + # Create the certmap before we restart for enable_tls + cm_m1 = CertmapLegacy(m1) + cm_m2 = CertmapLegacy(m2) + + # We need to configure the same maps for both .... + certmaps = cm_m1.list() + certmaps['default']['DNComps'] = None + certmaps['default']['CmapLdapAttr'] = 'nsCertSubjectDN' + + cm_m1.set(certmaps) + cm_m2.set(certmaps) + + [i.enable_tls() for i in topo_m2] + + # Create the replication dns + services = ServiceAccounts(m1, DEFAULT_SUFFIX) + repl_m1 = services.get('%s:%s' % (m1.host, m1.sslport)) + repl_m1.set('nsCertSubjectDN', m1.get_server_tls_subject()) + + repl_m2 = services.get('%s:%s' % (m2.host, m2.sslport)) + repl_m2.set('nsCertSubjectDN', m2.get_server_tls_subject()) + + # Check the replication is "done". + repl = ReplicationManager(DEFAULT_SUFFIX) + repl.wait_for_replication(m1, m2) + # Now change the auth type + + replica_m1 = Replicas(m1).get(DEFAULT_SUFFIX) + agmt_m1 = replica_m1.get_agreements().list()[0] + + agmt_m1.replace_many( + ('nsDS5ReplicaBindMethod', 'SSLCLIENTAUTH'), + ('nsDS5ReplicaTransportInfo', transport), + ('nsDS5ReplicaPort', str(m2.sslport)), + ) + agmt_m1.remove_all('nsDS5ReplicaBindDN') + + replica_m2 = Replicas(m2).get(DEFAULT_SUFFIX) + agmt_m2 = replica_m2.get_agreements().list()[0] + + agmt_m2.replace_many( + ('nsDS5ReplicaBindMethod', 'SSLCLIENTAUTH'), + ('nsDS5ReplicaTransportInfo', transport), + ('nsDS5ReplicaPort', str(m1.sslport)), + ) + agmt_m2.remove_all('nsDS5ReplicaBindDN') + + repl.test_replication_topology(topo_m2) + + return topo_m2 + + +def test_ssl_transport(tls_client_auth): + """Test different combinations for nsDS5ReplicaTransportInfo values + + :id: a3157108-cb98-43e9-ba16-8fb21a4a03e9 + :setup: Two supplier replication, enabled TLS client auth + :steps: + 1. Set nsDS5ReplicaTransportInfoCheck: SSL or StartTLS or TLS + 2. Restart the instance + 3. Check that replication works + 4. Set nsDS5ReplicaTransportInfoCheck: LDAPS back + :expectedresults: + 1. Success + 2. Success + 3. Replication works + 4. Success + """ + + m1 = tls_client_auth.ms['supplier1'] + m2 = tls_client_auth.ms['supplier2'] + repl = ReplicationManager(DEFAULT_SUFFIX) + replica_m1 = Replicas(m1).get(DEFAULT_SUFFIX) + replica_m2 = Replicas(m2).get(DEFAULT_SUFFIX) + agmt_m1 = replica_m1.get_agreements().list()[0] + agmt_m2 = replica_m2.get_agreements().list()[0] + + if ds_is_older('1.4.0.6'): + check_list = (('TLS', False),) + else: + check_list = (('SSL', True), ('StartTLS', False), ('TLS', False)) + + for transport, secure_port in check_list: + agmt_m1.replace_many(('nsDS5ReplicaTransportInfo', transport), + ('nsDS5ReplicaPort', '{}'.format(m2.port if not secure_port else m2.sslport))) + agmt_m2.replace_many(('nsDS5ReplicaTransportInfo', transport), + ('nsDS5ReplicaPort', '{}'.format(m1.port if not secure_port else m1.sslport))) + repl.test_replication_topology(tls_client_auth) + + if ds_is_older('1.4.0.6'): + agmt_m1.replace_many(('nsDS5ReplicaTransportInfo', 'SSL'), + ('nsDS5ReplicaPort', str(m2.sslport))) + agmt_m2.replace_many(('nsDS5ReplicaTransportInfo', 'SSL'), + ('nsDS5ReplicaPort', str(m1.sslport))) + else: + agmt_m1.replace_many(('nsDS5ReplicaTransportInfo', 'LDAPS'), + ('nsDS5ReplicaPort', str(m2.sslport))) + agmt_m2.replace_many(('nsDS5ReplicaTransportInfo', 'LDAPS'), + ('nsDS5ReplicaPort', str(m1.sslport))) + repl.test_replication_topology(tls_client_auth) + + +def test_extract_pemfiles(tls_client_auth): + """Test TLS client authentication between two suppliers operates + as expected with 'on' and 'off' options of nsslapd-extract-pemfiles + + :id: 922d16f8-662a-4915-a39e-0aecd7c8e6e1 + :setup: Two supplier replication, enabled TLS client auth + :steps: + 1. Check that nsslapd-extract-pemfiles default value is right + 2. Check that replication works with both 'on' and 'off' values + :expectedresults: + 1. Success + 2. Replication works + """ + + m1 = tls_client_auth.ms['supplier1'] + m2 = tls_client_auth.ms['supplier2'] + repl = ReplicationManager(DEFAULT_SUFFIX) + + if ds_is_older('1.3.7'): + default_val = 'off' + else: + default_val = 'on' + attr_val = m1.config.get_attr_val_utf8('nsslapd-extract-pemfiles') + log.info("Check that nsslapd-extract-pemfiles is {}".format(default_val)) + assert attr_val == default_val + + for extract_pemfiles in ('on', 'off'): + log.info("Set nsslapd-extract-pemfiles = '{}' and check replication works)") + m1.config.set('nsslapd-extract-pemfiles', extract_pemfiles) + m2.config.set('nsslapd-extract-pemfiles', extract_pemfiles) + repl.test_replication_topology(tls_client_auth) + diff --git a/dirsrvtests/tests/suites/replication/tombstone_fixup_test.py b/dirsrvtests/tests/suites/replication/tombstone_fixup_test.py new file mode 100644 index 0000000..280a431 --- /dev/null +++ b/dirsrvtests/tests/suites/replication/tombstone_fixup_test.py @@ -0,0 +1,130 @@ +# --- BEGIN COPYRIGHT BLOCK --- +# Copyright (C) 2017 Red Hat, Inc. +# All rights reserved. +# +# License: GPL (version 3 or any later version). +# See LICENSE for details. +# --- END COPYRIGHT BLOCK --- +# +import pytest +from lib389.tasks import * +from lib389.utils import * +from lib389.topologies import topology_m1 +from lib389.tombstone import Tombstones +from lib389.idm.user import UserAccounts, TEST_USER_PROPERTIES +from lib389.replica import ReplicationManager +from lib389._constants import (defaultProperties, DEFAULT_SUFFIX, ReplicaRole, + REPLICAID_SUPPLIER_1, REPLICA_PRECISE_PURGING, REPLICA_PURGE_DELAY, + REPLICA_PURGE_INTERVAL) + +pytestmark = pytest.mark.tier2 + + +def test_precise_tombstone_purging(topology_m1): + """ Test precise tombstone purging + + :id: adb86f50-ae76-4ed6-82b4-3cdc30ccab79 + :setup: supplier1 instance + :steps: + 1. Create and Delete entry to create a tombstone + 2. export ldif, edit, and import ldif + 3. Check tombstones do not contain nsTombstoneCSN + 4. Run fixup task, and verify tombstones now have nsTombstone CSN + 5. Configure tombstone purging + 6. Verify tombstones are purged + :expectedresults: + 1. Success + 2. Success + 3. Success + 4. Success + 5. Success + 6. Success + """ + + m1 = topology_m1.ms['supplier1'] + m1_tasks = Tasks(m1) + m1_tasks.log = log + + # Create tombstone entry + users = UserAccounts(m1, DEFAULT_SUFFIX) + user = users.create_test_user(uid=1001) + user.delete() + + # Verify tombstone was created + tombstones = Tombstones(m1, DEFAULT_SUFFIX) + assert len(tombstones.list()) == 1 + + # Export db, strip nsTombstoneCSN, and import it + ldif_file = "{}/export.ldif".format(m1.get_ldif_dir()) + args = {EXPORT_REPL_INFO: True, + TASK_WAIT: True} + m1_tasks.exportLDIF(DEFAULT_SUFFIX, None, ldif_file, args) + m1.restart() # harden test case + + # Strip LDIF of nsTombstoneCSN, get the LDIF lines, then create new ldif + ldif = open(ldif_file, "r") + lines = ldif.readlines() + ldif.close() + + ldif = open(ldif_file, "w") + for line in lines: + if not line.lower().startswith('nstombstonecsn'): + ldif.write(line) + ldif.close() + + # import the new ldif file + log.info('Import replication LDIF file...') + args = {TASK_WAIT: True} + m1_tasks.importLDIF(DEFAULT_SUFFIX, None, ldif_file, args) + + # Search for the tombstone again + tombstones = Tombstones(m1, DEFAULT_SUFFIX) + assert len(tombstones.list()) == 1 + + # + # Part 3 - test fixup task using the strip option. + # + args = {TASK_WAIT: True, + TASK_TOMB_STRIP: True} + m1_tasks.fixupTombstones(DEFAULT_BENAME, args) + + # Search for tombstones with nsTombstoneCSN - better not find any + for ts in tombstones.list(): + assert not ts.present("nsTombstoneCSN") + + # Now run the fixup task + args = {TASK_WAIT: True} + m1_tasks.fixupTombstones(DEFAULT_BENAME, args) + + # Search for tombstones with nsTombstoneCSN - better find some + tombstones = Tombstones(m1, DEFAULT_SUFFIX) + assert len(tombstones.list()) == 1 + + # Verify that all tombstones have a nsTombstoneCSN + for ts in tombstones.list(): + log.info(f'Checking nsTombstoneCSN on tombstone {ts}') + assert ts.present("nsTombstoneCSN") + + # + # Part 4 - Test tombstone purging + # + args = {REPLICA_PRECISE_PURGING: b'on', + REPLICA_PURGE_DELAY: b'5', + REPLICA_PURGE_INTERVAL: b'5'} + m1.replica.setProperties(DEFAULT_SUFFIX, None, None, args) + + # Wait for the interval to pass + log.info('Wait for tombstone purge interval to pass...') + time.sleep(6) + + # Add an entry to trigger replication + users.create_test_user(uid=1002) + + # Wait for the interval to pass again + log.info('Wait for tombstone purge interval to pass again...') + time.sleep(6) + + # search for tombstones, there should be none + tombstones = Tombstones(m1, DEFAULT_SUFFIX) + assert len(tombstones.list()) == 0 + diff --git a/dirsrvtests/tests/suites/replication/tombstone_repl_mods_test.py b/dirsrvtests/tests/suites/replication/tombstone_repl_mods_test.py new file mode 100644 index 0000000..09f5890 --- /dev/null +++ b/dirsrvtests/tests/suites/replication/tombstone_repl_mods_test.py @@ -0,0 +1,101 @@ +# --- BEGIN COPYRIGHT BLOCK --- +# Copyright (C) 2023 Red Hat, Inc. +# All rights reserved. +# +# License: GPL (version 3 or any later version). +# See LICENSE for details. +# --- END COPYRIGHT BLOCK --- +# +import time +import ldap +import pytest +from lib389.topologies import topology_m2 +from lib389.idm.user import UserAccounts +from lib389._constants import DEFAULT_SUFFIX +from lib389.replica import Replicas, ReplicationManager +from lib389.tombstone import Tombstones + +# Constants for user names +USER1_UID = "1" +USER2_UID = "2" +NEW_USER2_UID = "new_user2" + +pytestmark = pytest.mark.tier2 + + +def test_replication_with_mod_delete_and_modrdn_operations(topology_m2): + """ Test replication with modifications + + :id: d7798eb7-8b04-486a-95ea-4cd1a5031fdb + :setup: Two supplier instances (S1 and S2) with initial users + :steps: + 1. Pause all replication agreements + 2. Perform a delete operation on S1 (e.g., delete a user) + 3. Perform a modify operation on S1 (e.g., change the description of a test user) + 4. Sleep for 1 second to ensure CSNs are different + 5. Perform a modrdn operation on S2 (e.g., rename a user) + 6. Perform a modify operation on S2 (e.g., change the description of a test user) + 7. Resume all replication agreements + 8. Sleep for 5 seconds to allow replication to propagate + 9. Validate that replication is working + :expectedresults: + 1. All replication agreements should be paused successfully + 2. User should be deleted on S1 + 3. Description should be modified for a test user on S1 + 4. 1 second should elapse + 5. User should be renamed on S2 + 6. Description should be modified for a test user on S2 + 7. All replication agreements should be resumed + 8. Sufficient time should pass to allow replication to propagate + 9. Entries should be in the expected state on both servers + """ + + S1 = topology_m2.ms["supplier1"] + S2 = topology_m2.ms["supplier2"] + repl = ReplicationManager(DEFAULT_SUFFIX) + + # Add entries for the test + users_s1 = UserAccounts(S1, DEFAULT_SUFFIX) + user1 = users_s1.create_test_user(uid=USER1_UID) + test1 = users_s1.create_test_user(uid=USER2_UID) + repl.wait_for_replication(S2, S1) + + topology_m2.pause_all_replicas() + + users_s2 = UserAccounts(S2, DEFAULT_SUFFIX) + user2 = users_s2.get(f"test_user_{USER1_UID}") + test2 = users_s2.get(f"test_user_{USER2_UID}") + + # Delete operation on S1 + user1.delete() + + # Modify operation on S1 + test1.replace("description", "modified on S1") + + # Ensure CSN different + time.sleep(1) + + # modrdn operation on S2 + user2.rename(f"uid={NEW_USER2_UID}") + + # Modify operation on S2 + test2.replace("description", "modified on S2") + + # Resume all replication + topology_m2.resume_all_replicas() + + # Check if replication is working + repl.wait_for_replication(S2, S1) + + assert not users_s1.exists(f"test_user_{USER1_UID}") + assert not users_s2.exists(f"test_user_{USER1_UID}") + assert not users_s1.exists(NEW_USER2_UID) + assert not users_s2.exists(NEW_USER2_UID) + + tombstones = Tombstones(S1, DEFAULT_SUFFIX) + assert tombstones.filter(f"(&(objectClass=nstombstone)(uid=test_user_{USER1_UID}))") + tombstones = Tombstones(S2, DEFAULT_SUFFIX) + assert tombstones.filter(f"(&(objectClass=nstombstone)(uid={NEW_USER2_UID}))") + + assert test1.get_attr_val_utf8("description") == "modified on S2" + assert test2.get_attr_val_utf8("description") == "modified on S2" diff --git a/dirsrvtests/tests/suites/replication/tombstone_test.py b/dirsrvtests/tests/suites/replication/tombstone_test.py new file mode 100644 index 0000000..f89ee11 --- /dev/null +++ b/dirsrvtests/tests/suites/replication/tombstone_test.py @@ -0,0 +1,63 @@ +# --- BEGIN COPYRIGHT BLOCK --- +# Copyright (C) 2017 Red Hat, Inc. +# All rights reserved. +# +# License: GPL (version 3 or any later version). +# See LICENSE for details. +# --- END COPYRIGHT BLOCK --- +# +import pytest +from lib389.tasks import * +from lib389.utils import * +from lib389.topologies import topology_m1 +from lib389.tombstone import Tombstones +from lib389.idm.user import UserAccounts, TEST_USER_PROPERTIES + +pytestmark = pytest.mark.tier1 + + +def test_purge_success(topology_m1): + """Verify that tombstones are created successfully + + :id: adb86f50-ae76-4ed6-82b4-3cdc30ccab78 + :setup: Standalone instance + :steps: + 1. Enable replication to unexisting instance + 2. Add an entry to the replicated suffix + 3. Delete the entry + 4. Check that tombstone entry exists (objectclass=nsTombstone) + :expectedresults: Tombstone entry exist + 1. Operation should be successful + 2. The entry should be successfully added + 3. The entry should be successfully deleted + 4. Tombstone entry should exist + """ + m1 = topology_m1.ms['supplier1'] + + users = UserAccounts(m1, DEFAULT_SUFFIX) + user = users.create(properties=TEST_USER_PROPERTIES) + + tombstones = Tombstones(m1, DEFAULT_SUFFIX) + + assert len(tombstones.list()) == 0 + + user.delete() + + assert len(tombstones.list()) == 1 + assert len(users.list()) == 0 + + ts = tombstones.get('testuser') + assert ts.exists() + + if not ds_is_older('1.4.0'): + ts.revive() + + assert len(users.list()) == 1 + user_revived = users.get('testuser') + + +if __name__ == '__main__': + # Run isolated + # -s for DEBUG mode + CURRENT_FILE = os.path.realpath(__file__) + pytest.main("-s %s" % CURRENT_FILE) diff --git a/dirsrvtests/tests/suites/replication/virtual_attribute_replication_test.py b/dirsrvtests/tests/suites/replication/virtual_attribute_replication_test.py new file mode 100644 index 0000000..ec99cb2 --- /dev/null +++ b/dirsrvtests/tests/suites/replication/virtual_attribute_replication_test.py @@ -0,0 +1,223 @@ +# --- BEGIN COPYRIGHT BLOCK --- +# Copyright (C) 2022 Red Hat, Inc. +# All rights reserved. +# +# License: GPL (version 3 or any later version). +# See LICENSE for details. +# --- END COPYRIGHT BLOCK --- + +import logging +import time +import pytest +import os +from lib389._constants import PW_DM, DEFAULT_SUFFIX +from lib389.idm.organization import Organization +from lib389.topologies import topology_m1c1 as topo +from lib389.idm.role import FilteredRoles, ManagedRoles +from lib389.cos import CosClassicDefinition, CosClassicDefinitions, CosTemplate +from lib389.replica import ReplicationManager + +logging.getLogger(__name__).setLevel(logging.INFO) +log = logging.getLogger(__name__) + +pytestmark = pytest.mark.tier1 + +DNBASE = "o=acivattr,{}".format(DEFAULT_SUFFIX) + + +@pytest.fixture(scope="function") +def reset_ignore_vattr(topo, request): + s = topo.ms['supplier1'] + c = topo.cs['consumer1'] + default_ignore_vattr_value = s.config.get_attr_val_utf8('nsslapd-ignore-virtual-attrs') + default_ignore_vattr_value = c.config.get_attr_val_utf8('nsslapd-ignore-virtual-attrs') + + def fin(): + s.config.set('nsslapd-ignore-virtual-attrs', default_ignore_vattr_value) + c.config.set('nsslapd-ignore-virtual-attrs', default_ignore_vattr_value) + + request.addfinalizer(fin) + +def test_vattr_on_cos_definition_with_replication(topo, reset_ignore_vattr): + """Test nsslapd-ignore-virtual-attrs configuration attribute + The attribute is ON by default. If a cos definition is + added it is moved to OFF in replication scenario + + :id: c1fd8fa1-bd13-478b-9b33-e33b49c587bd + :customerscenario: True + :setup: Supplier Consumer + :steps: + 1. Check the attribute nsslapd-ignore-virtual-attrs is present in cn=config over consumer + 2. Check the default value of attribute nsslapd-ignore-virtual-attrs should be ON over consumer + 3. Create a cos definition for employeeType in supplier + 4. Check the value of nsslapd-ignore-virtual-attrs should be OFF (with a delay for postop processing) over consumer + 5. Check a message "slapi_vattrspi_regattr - Because employeeType,.." in error logs of consumer + 6. Check after deleting cos definition value of attribute nsslapd-ignore-virtual-attrs is set back to ON over consumer + :expectedresults: + 1. This should be successful + 2. This should be successful + 3. This should be successful + 4. This should be successful + 5. This should be successful + 6. This should be successful + """ + s = topo.ms['supplier1'] + c = topo.cs['consumer1'] + log.info("Check the attribute nsslapd-ignore-virtual-attrs is present in cn=config over consumer") + assert c.config.present('nsslapd-ignore-virtual-attrs') + + log.info("Check the default value of attribute nsslapd-ignore-virtual-attrs should be ON over consumer") + assert c.config.get_attr_val_utf8('nsslapd-ignore-virtual-attrs') == "on" + + # creating CosClassicDefinition in supplier + log.info("Create a cos definition") + properties = {'cosTemplateDn': 'cn=cosClassicGenerateEmployeeTypeUsingnsroleTemplates,{}'.format(DEFAULT_SUFFIX), + 'cosAttribute': 'employeeType', + 'cosSpecifier': 'nsrole', + 'cn': 'cosClassicGenerateEmployeeTypeUsingnsrole'} + cosdef = CosClassicDefinition(s,'cn=cosClassicGenerateEmployeeTypeUsingnsrole,{}'.format(DEFAULT_SUFFIX))\ + .create(properties=properties) + + log.info("Check the default value of attribute nsslapd-ignore-virtual-attrs is OFF over consumer") + time.sleep(2) + assert c.config.present('nsslapd-ignore-virtual-attrs', 'off') + + #stop both supplier and consumer + c.stop() + assert c.searchErrorsLog("slapi_vattrspi_regattr - Because employeeType is a new registered virtual attribute , nsslapd-ignore-virtual-attrs was set to \'off\'") + c.start() + log.info("Delete a cos definition") + cosdef.delete() + repl = ReplicationManager(DEFAULT_SUFFIX) + log.info("Check Delete was propagated") + repl.wait_for_replication(s, c) + + log.info("Check the default value of attribute nsslapd-ignore-virtual-attrs is back to ON over consumer") + s.restart() + c.restart() + assert c.config.get_attr_val_utf8('nsslapd-ignore-virtual-attrs') == "on" + +def test_vattr_on_filtered_role_with_replication(topo, request): + """Test nsslapd-ignore-virtual-attrs configuration attribute + The attribute is ON by default. If a filtered role is + added it is moved to OFF in replication scenario + + :id: 7b29be88-c8ca-409b-bbb7-ce3962f73f91 + :customerscenario: True + :setup: Supplier Consumer + :steps: + 1. Check the attribute nsslapd-ignore-virtual-attrs is present in cn=config over consumer + 2. Check the default value of attribute nsslapd-ignore-virtual-attrs should be ON over consumer + 3. Create a filtered role in supplier + 4. Check the value of nsslapd-ignore-virtual-attrs should be OFF over consumer + 5. Check a message "roles_cache_trigger_update_role - Because of virtual attribute.." in error logs of consumer + 6. Check after deleting role definition value of attribute nsslapd-ignore-virtual-attrs is set back to ON over consumer + :expectedresults: + 1. This should be successful + 2. This should be successful + 3. This should be successful + 4. This should be successful + 5. This should be successful + 6. This should be successful + """ + s = topo.ms['supplier1'] + c = topo.cs['consumer1'] + + log.info("Check the attribute nsslapd-ignore-virtual-attrs is present in cn=config over consumer") + assert c.config.present('nsslapd-ignore-virtual-attrs') + + log.info("Check the default value of attribute nsslapd-ignore-virtual-attrs should be ON over consumer") + assert c.config.get_attr_val_utf8('nsslapd-ignore-virtual-attrs') == "on" + + log.info("Create a filtered role") + try: + Organization(s).create(properties={"o": "acivattr"}, basedn=DEFAULT_SUFFIX) + except: + pass + roles = FilteredRoles(s, DNBASE) + roles.create(properties={'cn': 'FILTERROLEENGROLE', 'nsRoleFilter': 'cn=eng*'}) + + log.info("Check the default value of attribute nsslapd-ignore-virtual-attrs should be OFF over consumer") + time.sleep(5) + assert c.config.present('nsslapd-ignore-virtual-attrs', 'off') + + c.stop() + assert c.searchErrorsLog("roles_cache_trigger_update_role - Because of virtual attribute definition \(role\), nsslapd-ignore-virtual-attrs was set to \'off\'") + + def fin(): + s.restart() + c.restart() + try: + filtered_roles = FilteredRoles(s, DEFAULT_SUFFIX) + for i in filtered_roles.list(): + i.delete() + except: + pass + log.info("Check the default value of attribute nsslapd-ignore-virtual-attrs is back to ON over consumer") + s.restart() + c.restart() + assert c.config.get_attr_val_utf8('nsslapd-ignore-virtual-attrs') == "on" + + request.addfinalizer(fin) + +def test_vattr_on_managed_role_replication(topo, request): + """Test nsslapd-ignore-virtual-attrs configuration attribute + The attribute is ON by default. If a managed role is + added it is moved to OFF in replcation scenario + + :id: 446f2fc3-bbb2-4835-b14a-cb855db78c6f + :customerscenario: True + :setup: Supplier Consumer + :steps: + 1. Check the attribute nsslapd-ignore-virtual-attrs is present in cn=config over consumer + 2. Check the default value of attribute nsslapd-ignore-virtual-attrs should be ON over consumer + 3. Create a managed role in supplier + 4. Check the value of nsslapd-ignore-virtual-attrs should be OFF over consumer + 5. Check a message "roles_cache_trigger_update_role - Because of virtual attribute.." in error logs of consumer + 6. Check after deleting role definition value of attribute nsslapd-ignore-virtual-attrs is set back to ON over consumer + :expectedresults: + 1. This should be successful + 2. This should be successful + 3. This should be successful + 4. This should be successful + 5. This should be successful + 6. This should be successful + """ + s = topo.ms['supplier1'] + c = topo.cs['consumer1'] + log.info("Check the attribute nsslapd-ignore-virtual-attrs is present in cn=config") + assert c.config.present('nsslapd-ignore-virtual-attrs') + + log.info("Check the default value of attribute nsslapd-ignore-virtual-attrs should be ON") + assert c.config.get_attr_val_utf8('nsslapd-ignore-virtual-attrs') == "on" + + log.info("Create a managed role") + roles = ManagedRoles(s, DEFAULT_SUFFIX) + role = roles.create(properties={"cn": 'ROLE1'}) + + log.info("Check the default value of attribute nsslapd-ignore-virtual-attrs should be OFF") + time.sleep(5) + assert c.config.present('nsslapd-ignore-virtual-attrs', 'off') + + c.stop() + assert c.searchErrorsLog("roles_cache_trigger_update_role - Because of virtual attribute definition \(role\), nsslapd-ignore-virtual-attrs was set to \'off\'") + + def fin(): + s.restart() + c.restart() + try: + filtered_roles = ManagedRoles(s, DEFAULT_SUFFIX) + for i in filtered_roles.list(): + i.delete() + except: + pass + log.info("Check the default value of attribute nsslapd-ignore-virtual-attrs is back to ON") + s.restart() + c.restart() + assert c.config.get_attr_val_utf8('nsslapd-ignore-virtual-attrs') == "on" + + request.addfinalizer(fin) + +if __name__ == "__main__": + CURRENT_FILE = os.path.realpath(__file__) + pytest.main("-s -v %s" % CURRENT_FILE) diff --git a/dirsrvtests/tests/suites/replication/wait_for_async_feature_test.py b/dirsrvtests/tests/suites/replication/wait_for_async_feature_test.py new file mode 100644 index 0000000..b1b9655 --- /dev/null +++ b/dirsrvtests/tests/suites/replication/wait_for_async_feature_test.py @@ -0,0 +1,212 @@ +# --- BEGIN COPYRIGHT BLOCK --- +# Copyright (C) 2016 Red Hat, Inc. +# All rights reserved. +# +# License: GPL (version 3 or any later version). +# See LICENSE for details. +# --- END COPYRIGHT BLOCK --- +# +from collections import Counter + +import pytest +from lib389.tasks import * +from lib389.utils import * +from lib389.topologies import topology_m2 + +from lib389._constants import SUFFIX, DEFAULT_SUFFIX, ErrorLog + +from lib389.agreement import Agreements +from lib389.idm.organizationalunit import OrganizationalUnits + +pytestmark = pytest.mark.tier1 + +logging.getLogger(__name__).setLevel(logging.DEBUG) +log = logging.getLogger(__name__) + +installation1_prefix = None + +@pytest.fixture(params=[(None, (4, 11)), + ('2000', (0, 2)), + ('0', (4, 11)), + ('-5', (4, 11))]) +def waitfor_async_attr(topology_m2, request): + """Sets attribute on all replicas""" + + attr_value = request.param[0] + expected_result = request.param[1] + + # Run through all suppliers + + for supplier in topology_m2.ms.values(): + agmt = Agreements(supplier).list()[0] + + if attr_value: + agmt.set_wait_for_async_results(attr_value) + else: + try: + # Sometimes we can double remove this. + agmt.remove_wait_for_async_results() + except ldap.NO_SUCH_ATTRIBUTE: + pass + + return (attr_value, expected_result) + + +@pytest.fixture +def entries(topology_m2, request): + """Adds entries to the supplier1""" + + supplier1 = topology_m2.ms["supplier1"] + + test_list = [] + + log.info("Add 100 nested entries under replicated suffix on %s" % supplier1.serverid) + ous = OrganizationalUnits(supplier1, DEFAULT_SUFFIX) + for i in range(100): + ou = ous.create(properties={ + 'ou' : 'test_ou_%s' % i, + }) + test_list.append(ou) + + log.info("Delete created entries") + for test_ou in test_list: + test_ou.delete() + + def fin(): + log.info("Clear the errors log in the end of the test case") + with open(supplier1.errlog, 'w') as errlog: + errlog.writelines("") + + request.addfinalizer(fin) + + +def test_not_int_value(topology_m2): + """Tests not integer value + + :id: 67c9994f-9251-425a-8197-8d12ad9beafc + :setup: Replication with two suppliers + :steps: + 1. Try to set some string value + to nsDS5ReplicaWaitForAsyncResults + :expectedresults: + 1. Invalid syntax error should be raised + """ + supplier1 = topology_m2.ms["supplier1"] + agmt = Agreements(supplier1).list()[0] + + with pytest.raises(ldap.INVALID_SYNTAX): + agmt.set_wait_for_async_results("ws2") + +def test_multi_value(topology_m2): + """Tests multi value + + :id: 1932301a-db29-407e-b27e-4466a876d1d3 + :setup: Replication with two suppliers + :steps: + 1. Set nsDS5ReplicaWaitForAsyncResults to some int + 2. Try to add one more int value + to nsDS5ReplicaWaitForAsyncResults + :expectedresults: + 1. nsDS5ReplicaWaitForAsyncResults should be set + 2. Object class violation error should be raised + """ + + supplier1 = topology_m2.ms["supplier1"] + agmt = Agreements(supplier1).list()[0] + + agmt.set_wait_for_async_results('100') + with pytest.raises(ldap.OBJECT_CLASS_VIOLATION): + agmt.add('nsDS5ReplicaWaitForAsyncResults', '101') + +def test_value_check(topology_m2, waitfor_async_attr): + """Checks that value has been set correctly + + :id: 3e81afe9-5130-410d-a1bb-d798d8ab8519 + :parametrized: yes + :setup: Replication with two suppliers, + wait for async set on all suppliers, try: + None, '2000', '0', '-5' + :steps: + 1. Search for nsDS5ReplicaWaitForAsyncResults on supplier 1 + 2. Search for nsDS5ReplicaWaitForAsyncResults on supplier 2 + :expectedresults: + 1. nsDS5ReplicaWaitForAsyncResults should be set correctly + 2. nsDS5ReplicaWaitForAsyncResults should be set correctly + """ + + attr_value = waitfor_async_attr[0] + + for supplier in topology_m2.ms.values(): + agmt = Agreements(supplier).list()[0] + + server_value = agmt.get_wait_for_async_results_utf8() + assert server_value == attr_value + +def test_behavior_with_value(topology_m2, waitfor_async_attr, entries): + """Tests replication behavior with valid + nsDS5ReplicaWaitForAsyncResults attribute values + + :id: 117b6be2-cdab-422e-b0c7-3b88bbeec036 + :parametrized: yes + :setup: Replication with two suppliers, + wait for async set on all suppliers, try: + None, '2000', '0', '-5' + :steps: + 1. Set Replication Debugging loglevel for the errorlog + 2. Set nsslapd-logging-hr-timestamps-enabled to 'off' on both suppliers + 3. Gather all sync attempts, group by timestamp + 4. Take the most common timestamp and assert it has appeared + in the set range + :expectedresults: + 1. Replication Debugging loglevel should be set + 2. nsslapd-logging-hr-timestamps-enabled should be set + 3. Operation should be successful + 4. Errors log should have all timestamp appear + """ + + supplier1 = topology_m2.ms["supplier1"] + supplier2 = topology_m2.ms["supplier2"] + + log.info("Set Replication Debugging loglevel for the errorlog") + supplier1.config.loglevel((ErrorLog.REPLICA,)) + supplier2.config.loglevel((ErrorLog.REPLICA,)) + + sync_dict = Counter() + min_ap = waitfor_async_attr[1][0] + max_ap = waitfor_async_attr[1][1] + + time.sleep(20) + + log.info("Gather all sync attempts within Counter dict, group by timestamp") + with open(supplier1.errlog, 'r') as errlog: + errlog_filtered = filter(lambda x: "waitfor_async_results" in x, errlog) + + # Watch only over unsuccessful sync attempts + for line in errlog_filtered: + if line.split()[3] != line.split()[4]: + # A timestamp looks like: + # [03/Jan/2018:14:35:15.806396035 +1000] LOGMESSAGE HERE + # We want to assert a range of "seconds", so we need to reduce + # this to a reasonable amount. IE: + # [03/Jan/2018:14:35:15 + # So to achieve this we split on ] and . IE. + # [03/Jan/2018:14:35:15.806396035 +1000] LOGMESSAGE HERE + # ^ split here first + # ^ now split here + # [03/Jan/2018:14:35:15 + # ^ final result + timestamp = line.split(']')[0].split('.')[0] + sync_dict[timestamp] += 1 + + log.info("Take the most common timestamp and assert it has appeared " \ + "in the range from %s to %s times" % (min_ap, max_ap)) + most_common_val = sync_dict.most_common(1)[0][1] + log.debug("%s <= %s <= %s" % (min_ap, most_common_val, max_ap)) + assert min_ap <= most_common_val <= max_ap + + +if __name__ == '__main__': + # Run isolated + # -s for DEBUG mode + CURRENT_FILE = os.path.realpath(__file__) + pytest.main("-s %s" % CURRENT_FILE) diff --git a/dirsrvtests/tests/suites/resource_limits/__init__.py b/dirsrvtests/tests/suites/resource_limits/__init__.py new file mode 100644 index 0000000..6e1d204 --- /dev/null +++ b/dirsrvtests/tests/suites/resource_limits/__init__.py @@ -0,0 +1,3 @@ +""" + :Requirement: 389-ds-base: Resource Limits +""" diff --git a/dirsrvtests/tests/suites/resource_limits/fdlimits_test.py b/dirsrvtests/tests/suites/resource_limits/fdlimits_test.py new file mode 100644 index 0000000..2d8cd9a --- /dev/null +++ b/dirsrvtests/tests/suites/resource_limits/fdlimits_test.py @@ -0,0 +1,129 @@ +# --- BEGIN COPYRIGHT BLOCK --- +# Copyright (C) 2022 Red Hat, Inc. +# All rights reserved. +# +# License: GPL (version 3 or any later version). +# See LICENSE for details. +# --- END COPYRIGHT BLOCK --- +# +import logging +import pytest +import os +import ldap +import resource +from lib389.backend import Backends +from lib389._constants import * +from lib389.topologies import topology_st +from lib389.utils import ds_is_older, ensure_str +from subprocess import check_output + +pytestmark = pytest.mark.tier1 + +logging.getLogger(__name__).setLevel(logging.INFO) +log = logging.getLogger(__name__) + +FD_ATTR = "nsslapd-maxdescriptors" +RESRV_FD_ATTR = "nsslapd-reservedescriptors" +GLOBAL_LIMIT = resource.getrlimit(resource.RLIMIT_NOFILE)[1] +SYSTEMD_LIMIT = ensure_str(check_output("systemctl show -p LimitNOFILE dirsrv@standalone1".split(" ")).strip()).split('=')[1] +CUSTOM_VAL = str(int(SYSTEMD_LIMIT) - 10) +RESRV_DESC_VAL = str(10) +TOO_HIGH_VAL = str(GLOBAL_LIMIT * 2) +TOO_HIGH_VAL2 = str(int(SYSTEMD_LIMIT) * 2) +TOO_LOW_VAL = "0" + +@pytest.mark.skipif(ds_is_older("1.4.1.2"), reason="Not implemented") +def test_fd_limits(topology_st): + """Test the default limits, and custom limits + + :id: fa0a5106-612f-428f-84c0-9c85c34d0433 + :setup: Standalone Instance + :steps: + 1. Check default limit + 2. Change default limit + 3. Check invalid/too high limits are rejected + 4. Check invalid/too low limit is rejected + :expectedresults: + 1. Success + 2. Success + 3. Success + 4. Success + """ + + # Check systemd default + max_fd = topology_st.standalone.config.get_attr_val_utf8(FD_ATTR) + assert max_fd == SYSTEMD_LIMIT + + # Check custom value is applied + topology_st.standalone.config.set(FD_ATTR, CUSTOM_VAL) + max_fd = topology_st.standalone.config.get_attr_val_utf8(FD_ATTR) + assert max_fd == CUSTOM_VAL + + # # Attempt to use value that is higher than the global system limit + with pytest.raises(ldap.UNWILLING_TO_PERFORM): + topology_st.standalone.config.set(FD_ATTR, TOO_HIGH_VAL) + max_fd = topology_st.standalone.config.get_attr_val_utf8(FD_ATTR) + assert max_fd == CUSTOM_VAL + + # Attempt to use value that is higher than the value defined in the systemd service + with pytest.raises(ldap.UNWILLING_TO_PERFORM): + topology_st.standalone.config.set(FD_ATTR, TOO_HIGH_VAL2) + max_fd = topology_st.standalone.config.get_attr_val_utf8(FD_ATTR) + assert max_fd == CUSTOM_VAL + + # Attempt to use val that is too low + with pytest.raises(ldap.OPERATIONS_ERROR): + topology_st.standalone.config.set(FD_ATTR, TOO_LOW_VAL) + max_fd = topology_st.standalone.config.get_attr_val_utf8(FD_ATTR) + assert max_fd == CUSTOM_VAL + + log.info("test_fd_limits PASSED") + +@pytest.mark.skipif(ds_is_older("1.4.1.2"), reason="Not implemented") +def test_reserve_descriptor_validation(topology_st): + """Test the reserve descriptor self check + + :id: TODO + :setup: Standalone Instance + :steps: + 1. Set attr nsslapd-reservedescriptors to a low value of RESRV_DESC_VAL (10) + 2. Verify low value has been set + 3. Restart instance (On restart the reservedescriptor attr will be validated) + 4. Check updated value for nsslapd-reservedescriptors attr + :expectedresults: + 1. Success + 2. A value of RESRV_DESC_VAL (10) is returned + 3. Success + 4. A value of STANDALONE_INST_RESRV_DESCS (55) is returned + """ + + # Set nsslapd-reservedescriptors to a low value (RESRV_DESC_VAL:10) + topology_st.standalone.config.set(RESRV_FD_ATTR, RESRV_DESC_VAL) + resrv_fd = topology_st.standalone.config.get_attr_val_utf8(RESRV_FD_ATTR) + assert resrv_fd == RESRV_DESC_VAL + + # An instance restart triggers a validation of the configured nsslapd-reservedescriptors attribute + topology_st.standalone.restart() + + """ + A standalone instance contains a single backend with default indexes + so we only check these. TODO add tests for repl, chaining, PTA, SSL + """ + STANDALONE_INST_RESRV_DESCS = 20 # 20 = Reserve descriptor constant + backends = Backends(topology_st.standalone) + STANDALONE_INST_RESRV_DESCS += (len(backends.list()) * 4) # 4 = Backend descriptor constant + for be in backends.list() : + STANDALONE_INST_RESRV_DESCS += len(be.get_indexes().list()) + + # Varify reservedescriptors has been updated + resrv_fd = topology_st.standalone.config.get_attr_val_utf8(RESRV_FD_ATTR) + assert resrv_fd == str(STANDALONE_INST_RESRV_DESCS) + + log.info("test_reserve_descriptor_validation PASSED") + + +if __name__ == '__main__': + # Run isolated + # -s for DEBUG mode + CURRENT_FILE = os.path.realpath(__file__) + pytest.main(["-s", CURRENT_FILE]) diff --git a/dirsrvtests/tests/suites/retrocl/__init__.py b/dirsrvtests/tests/suites/retrocl/__init__.py new file mode 100644 index 0000000..404a59b --- /dev/null +++ b/dirsrvtests/tests/suites/retrocl/__init__.py @@ -0,0 +1,3 @@ +""" + :Requirement: 389-ds-base: Retro Changelog plugin +""" diff --git a/dirsrvtests/tests/suites/retrocl/basic_test.py b/dirsrvtests/tests/suites/retrocl/basic_test.py new file mode 100644 index 0000000..85d6b08 --- /dev/null +++ b/dirsrvtests/tests/suites/retrocl/basic_test.py @@ -0,0 +1,426 @@ +# --- BEGIN COPYRIGHT BLOCK --- +# Copyright (C) 2021 Red Hat, Inc. +# All rights reserved. +# +# License: GPL (version 3 or any later version). +# See LICENSE for details. +# --- END COPYRIGHT BLOCK --- + +import logging +import ldap +import pytest +from lib389.topologies import topology_st +from lib389.plugins import RetroChangelogPlugin +from lib389._constants import * +from lib389.utils import * +from lib389.tasks import * +from lib389.cli_base import FakeArgs, connect_instance, disconnect_instance +from lib389.cli_base.dsrc import dsrc_arg_concat +from lib389.cli_conf.plugins.retrochangelog import retrochangelog_add +from lib389.idm.user import UserAccount, UserAccounts +from lib389.idm.domain import Domain +from lib389._mapped_object import DSLdapObjects + +pytestmark = pytest.mark.tier1 + +USER1_DN = 'uid=user1,ou=people,'+ DEFAULT_SUFFIX +USER2_DN = 'uid=user2,ou=people,'+ DEFAULT_SUFFIX +USER_PW = 'password' +ATTR_HOMEPHONE = 'homePhone' +ATTR_CARLICENSE = 'carLicense' + +log = logging.getLogger(__name__) + +#unstable or unstatus tests, skipped for now +@pytest.mark.flaky(max_runs=2, min_passes=1) +def test_retrocl_exclude_attr_add(topology_st): + """ Test exclude attribute feature of the retrocl plugin for add operation + + :id: 3481650f-2070-45ef-9600-2500cfc51559 + + :setup: Standalone instance + + :steps: + 1. Enable dynamic plugins + 2. Confige retro changelog plugin + 3. Add an entry + 4. Ensure entry attrs are in the changelog + 5. Exclude an attr + 6. Add another entry + 7. Ensure excluded attr is not in the changelog + + :expectedresults: + 1. Success + 2. Success + 3. Success + 4. Success + 5. Success + 6. Success + 7. Success + """ + + st = topology_st.standalone + + log.info('Configure retrocl plugin') + rcl = RetroChangelogPlugin(st) + rcl.disable() + rcl.enable() + rcl.replace('nsslapd-attribute', 'nsuniqueid:targetUniqueId') + + log.info('Restarting instance') + try: + st.restart() + except ldap.LDAPError as e: + ldap.error('Failed to restart instance ' + e.args[0]['desc']) + assert False + + users = UserAccounts(st, DEFAULT_SUFFIX) + + log.info('Adding user1') + try: + users.create(properties={ + 'sn': '1', + 'cn': 'user 1', + 'uid': 'user1', + 'uidNumber': '11', + 'gidNumber': '111', + 'givenname': 'user1', + 'homePhone': '0861234567', + 'carLicense': '131D16674', + 'mail': 'user1@whereever.com', + 'homeDirectory': '/home/user1', + 'userpassword': USER_PW}) + except ldap.ALREADY_EXISTS: + pass + except ldap.LDAPError as e: + log.error("Failed to add user1: " + str(e)) + + log.info('Verify homePhone and carLicense attrs are in the changelog changestring') + try: + retro_changelog_suffix = DSLdapObjects(st, basedn=RETROCL_SUFFIX) + cllist = retro_changelog_suffix.filter(f'(targetDn={USER1_DN})') + except ldap.LDAPError as e: + log.fatal("Changelog search failed, error: " + str(e)) + assert False + assert len(cllist) > 0 + if cllist[0].present('changes'): + clstr = str(cllist[0].get_attr_vals_utf8('changes')) + assert ATTR_HOMEPHONE in clstr + assert ATTR_CARLICENSE in clstr + + log.info('Excluding attribute ' + ATTR_HOMEPHONE) + args = FakeArgs() + args.connections = [st.host + ':' + str(st.port) + ':' + DN_DM + ':' + PW_DM] + args.instance = 'standalone1' + args.basedn = None + args.binddn = None + args.starttls = False + args.pwdfile = None + args.bindpw = None + args.prompt = False + args.exclude_attrs = ATTR_HOMEPHONE + args.func = retrochangelog_add + dsrc_inst = dsrc_arg_concat(args, None) + inst = connect_instance(dsrc_inst, False, args) + result = args.func(inst, None, log, args) + disconnect_instance(inst) + assert result is None + + log.info('Restarting instance') + try: + st.restart() + except ldap.LDAPError as e: + ldap.error('Failed to restart instance ' + e.args[0]['desc']) + assert False + + log.info('Adding user2') + try: + users.create(properties={ + 'sn': '2', + 'cn': 'user 2', + 'uid': 'user2', + 'uidNumber': '22', + 'gidNumber': '222', + 'givenname': 'user2', + 'homePhone': '0879088363', + 'carLicense': '04WX11038', + 'mail': 'user2@whereever.com', + 'homeDirectory': '/home/user2', + 'userpassword': USER_PW}) + except ldap.ALREADY_EXISTS: + pass + except ldap.LDAPError as e: + log.error("Failed to add user2: " + str(e)) + + log.info('Verify homePhone attr is not in the changelog changestring') + try: + cllist = retro_changelog_suffix.filter(f'(targetDn={USER2_DN})') + assert len(cllist) > 0 + if cllist[0].present('changes'): + clstr = str(cllist[0].get_attr_vals_utf8('changes')) + assert ATTR_HOMEPHONE not in clstr + assert ATTR_CARLICENSE in clstr + except ldap.LDAPError as e: + log.fatal("Changelog search failed, error: " + str(e)) + assert False + +#unstable or unstatus tests, skipped for now +@pytest.mark.flaky(max_runs=2, min_passes=1) +def test_retrocl_exclude_attr_mod(topology_st): + """ Test exclude attribute feature of the retrocl plugin for mod operation + + :id: f6bef689-685b-4f86-a98d-f7e6b1fcada3 + + :setup: Standalone instance + + :steps: + 1. Enable dynamic plugins + 2. Confige retro changelog plugin + 3. Add user1 entry + 4. Ensure entry attrs are in the changelog + 5. Exclude an attr + 6. Modify user1 entry + 7. Ensure excluded attr is not in the changelog + + :expectedresults: + 1. Success + 2. Success + 3. Success + 4. Success + 5. Success + 6. Success + 7. Success + """ + + st = topology_st.standalone + + log.info('Configure retrocl plugin') + rcl = RetroChangelogPlugin(st) + rcl.disable() + rcl.enable() + rcl.replace('nsslapd-attribute', 'nsuniqueid:targetUniqueId') + + log.info('Restarting instance') + try: + st.restart() + except ldap.LDAPError as e: + ldap.error('Failed to restart instance ' + e.args[0]['desc']) + assert False + + users = UserAccounts(st, DEFAULT_SUFFIX) + + log.info('Adding user1') + try: + user1 = users.create(properties={ + 'sn': '1', + 'cn': 'user 1', + 'uid': 'user1', + 'uidNumber': '11', + 'gidNumber': '111', + 'givenname': 'user1', + 'homePhone': '0861234567', + 'carLicense': '131D16674', + 'mail': 'user1@whereever.com', + 'homeDirectory': '/home/user1', + 'userpassword': USER_PW}) + except ldap.ALREADY_EXISTS: + user1 = UserAccount(st, dn=USER1_DN) + except ldap.LDAPError as e: + log.error("Failed to add user1: " + str(e)) + + log.info('Verify homePhone and carLicense attrs are in the changelog changestring') + try: + retro_changelog_suffix = DSLdapObjects(st, basedn=RETROCL_SUFFIX) + cllist = retro_changelog_suffix.filter(f'(targetDn={USER1_DN})') + except ldap.LDAPError as e: + log.fatal("Changelog search failed, error: " + str(e)) + assert False + assert len(cllist) > 0 + if cllist[0].present('changes'): + clstr = str(cllist[0].get_attr_vals_utf8('changes')) + assert ATTR_HOMEPHONE in clstr + assert ATTR_CARLICENSE in clstr + + log.info('Excluding attribute ' + ATTR_CARLICENSE) + args = FakeArgs() + args.connections = [st.host + ':' + str(st.port) + ':' + DN_DM + ':' + PW_DM] + args.instance = 'standalone1' + args.basedn = None + args.binddn = None + args.starttls = False + args.pwdfile = None + args.bindpw = None + args.prompt = False + args.exclude_attrs = ATTR_CARLICENSE + args.func = retrochangelog_add + dsrc_inst = dsrc_arg_concat(args, None) + inst = connect_instance(dsrc_inst, False, args) + result = args.func(inst, None, log, args) + disconnect_instance(inst) + assert result is None + + log.info('Restarting instance') + try: + st.restart() + except ldap.LDAPError as e: + ldap.error('Failed to restart instance ' + e.args[0]['desc']) + assert False + + log.info('Modify user1 carLicense attribute') + try: + user1.replace(ATTR_CARLICENSE, "123WX321") + except ldap.LDAPError as e: + log.fatal('test_retrocl_exclude_attr_mod: Failed to update user1 attribute: error ' + e.message['desc']) + assert False + + log.info('Verify carLicense attr is not in the changelog changestring') + try: + cllist = retro_changelog_suffix.filter(f'(targetDn={USER1_DN})') + assert len(cllist) > 0 + # There will be 2 entries in the changelog for this user, we are only + #interested in the second one, the modify operation. + if cllist[1].present('changes'): + clstr = str(cllist[1].get_attr_vals_utf8('changes')) + assert ATTR_CARLICENSE not in clstr + except ldap.LDAPError as e: + log.fatal("Changelog search failed, error: " + str(e)) + assert False + + +def test_retrocl_trimming(topology_st): + """Test retrocl trimming works + + :id: 54c6747f-6772-43b7-8b03-09e13fa0c205 + :setup: Standalone Instance + :steps: + 1. Enable Retro changelog + 2. Add a bunch of entries + 3. Configure trimming + 4. Verify trimming occurred + :expectedresults: + 1. Success + 2. Success + 3. Success + 4. Success + """ + + inst = topology_st.standalone + # Configure plugin + log.info('Configure retrocl plugin') + rcl = RetroChangelogPlugin(inst) + rcl.enable() + inst.restart() + + # Do some updates + suffix = Domain(inst, DEFAULT_SUFFIX) + for idx in range(0, 10): + suffix.replace('description', str(idx)) + + # Setup trimming + rcl.replace('nsslapd-changelog-trim-interval', '2') + rcl.replace('nsslapd-changelogmaxage', '5s') + inst.config.set('nsslapd-errorlog-level', '65536') # plugin logging + inst.restart() + + # Verify trimming occurs + time.sleep(5) + assert inst.searchErrorsLog("trim_changelog: removed ") + + # Clean up + inst.config.set('nsslapd-errorlog-level', '0') + +def test_retrocl_trimming_interval(topology_st, request): + """Test retrocl trimming interval works + + :id: 261a951d-8fe3-4b94-b933-c21806f795e7 + :setup: Standalone Instance + :steps: + 1. Enable Retro changelog + 2. Configure trimming (trimming records older than 20sec) + 3. Do a dummy update and wait 5sec + 4. Do a extra updates + 5. wait for 10sec and restart (after restart the dummy update will soon be trimmed) + 6. wait 15sec so that the extra updates are trimmed + 7. stop the instance + 8. check that dummy/extra updates have been trimmed + :expectedresults: + 1. Success + 2. Success + 3. Success + 4. Success + 5. Success + 6. Success + 7. Success + 8. Success + """ + + inst = topology_st.standalone + # Configure plugin + log.info('Configure retrocl plugin') + rcl = RetroChangelogPlugin(inst) + rcl.replace('nsslapd-include-suffix', DEFAULT_SUFFIX) + rcl.enable() + inst.restart() + + # Do some updates + suffix = Domain(inst, DEFAULT_SUFFIX) + + # Configure trimming + rcl.replace('nsslapd-changelog-trim-interval', '2') + rcl.replace('nsslapd-changelogmaxage', '20s') + inst.restart() + + # this update will be the first one to be trimmed + # and will initialize in the last_trim time. + # This initialization is important to trigger the bug + # if not initialized it will trim immediately + suffix.replace('description', 'dummy') + + # retrieve the index of the update in retroCL as + # starting point for the extra updates + try: + retro_changelog_suffix = DSLdapObjects(inst, basedn=RETROCL_SUFFIX) + cl_index = int(str(retro_changelog_suffix.filter(f'(targetDn={DEFAULT_SUFFIX})')[0].get_attr_vals_utf8('changeNumber')[0])) + except ldap.LDAPError as e: + log.fatal("Changelog search failed, error: " + str(e)) + assert False + time.sleep(5) + + # Those extra updates should be trimmed 5 sec after the previous one + for idx in range(0, 10): + suffix.replace('description', str(idx)) + + # wait for ~14s (sleep+stop+start), so the trimming + # of the first update occurs soon after the restart + time.sleep(10) + inst.config.set('nsslapd-plugin-logging', 'on') + inst.config.set('nsslapd-accesslog-level','260') + inst.stop() + inst.start() + + # wait for an additional 15s. + # at that time we should have trim the extra updates + # checking both error logs and DEL operations in access logs + inst.config.set('nsslapd-errorlog-level', '65536') # plugin logging + time.sleep(15) + inst.stop() + assert inst.searchErrorsLog("trim_changelog: removed 9 change records") + for idx in range(cl_index+1,cl_index+10): + pattern="changenumber=%d,cn=changelog" % idx + assert inst.searchAccessLog(pattern) + + # Clean up + inst.restart() + + def fin(): + inst.config.set('nsslapd-errorlog-level', '0') + inst.config.set('nsslapd-plugin-logging', 'off') + inst.config.set('nsslapd-accesslog-level','256') + + request.addfinalizer(fin) + +if __name__ == '__main__': + # Run isolated + # -s for DEBUG mode + CURRENT_FILE = os.path.realpath(__file__) + pytest.main("-s %s" % CURRENT_FILE) diff --git a/dirsrvtests/tests/suites/retrocl/retrocl_indexing_test.py b/dirsrvtests/tests/suites/retrocl/retrocl_indexing_test.py new file mode 100644 index 0000000..1e9ee27 --- /dev/null +++ b/dirsrvtests/tests/suites/retrocl/retrocl_indexing_test.py @@ -0,0 +1,76 @@ +# --- BEGIN COPYRIGHT BLOCK --- +# Copyright (C) 2022 Red Hat, Inc. +# All rights reserved. +# +# License: GPL (version 3 or any later version). +# See LICENSE for details. +# --- END COPYRIGHT BLOCK --- +# +import logging +import pytest +import os +from lib389._constants import RETROCL_SUFFIX, DEFAULT_SUFFIX +from lib389.topologies import topology_st as topo +from lib389.plugins import RetroChangelogPlugin +from lib389.idm.user import UserAccounts +from lib389._mapped_object import DSLdapObjects +log = logging.getLogger(__name__) + + +def test_indexing_is_online(topo): + """Test that the changenmumber index is online right after enabling the plugin + + :id: 16f4c001-9e0c-4448-a2b3-08ac1e85d40f + :setup: Standalone Instance + :steps: + 1. Enable retro cl + 2. Perform some updates + 3. Search for "(changenumber>=-1)", and it is not partially unindexed + 4. Search for "(&(changenumber>=-1)(targetuniqueid=*))", and it is not partially unindexed + :expectedresults: + 1. Success + 2. Success + 3. Success + 4. Success + """ + + # Enable plugin + topo.standalone.config.set('nsslapd-accesslog-logbuffering', 'off') + plugin = RetroChangelogPlugin(topo.standalone) + plugin.enable() + topo.standalone.restart() + + # Do a bunch of updates + users = UserAccounts(topo.standalone, DEFAULT_SUFFIX) + user_entry = users.create(properties={ + 'sn': '1', + 'cn': 'user 1', + 'uid': 'user1', + 'uidNumber': '11', + 'gidNumber': '111', + 'givenname': 'user1', + 'homePhone': '0861234567', + 'carLicense': '131D16674', + 'mail': 'user1@whereever.com', + 'homeDirectory': '/home' + }) + for count in range(0, 10): + user_entry.replace('mail', f'test{count}@test.com') + + # Search the retro cl, and check for error messages + filter_simple = '(changenumber>=-1)' + filter_compound = '(&(changenumber>=-1)(targetuniqueid=*))' + retro_changelog_suffix = DSLdapObjects(topo.standalone, basedn=RETROCL_SUFFIX) + retro_changelog_suffix.filter(filter_simple) + assert not topo.standalone.searchAccessLog('Partially Unindexed Filter') + + # Search the retro cl again with compound filter + retro_changelog_suffix.filter(filter_compound) + assert not topo.standalone.searchAccessLog('Partially Unindexed Filter') + + +if __name__ == '__main__': + # Run isolated + # -s for DEBUG mode + CURRENT_FILE = os.path.realpath(__file__) + pytest.main(["-s", CURRENT_FILE]) diff --git a/dirsrvtests/tests/suites/rewriters/__init__.py b/dirsrvtests/tests/suites/rewriters/__init__.py new file mode 100644 index 0000000..fb2e3b9 --- /dev/null +++ b/dirsrvtests/tests/suites/rewriters/__init__.py @@ -0,0 +1,3 @@ +""" + :Requirement: 389-ds-base: Rewriters +""" diff --git a/dirsrvtests/tests/suites/rewriters/adfilter_test.py b/dirsrvtests/tests/suites/rewriters/adfilter_test.py new file mode 100644 index 0000000..369bdfe --- /dev/null +++ b/dirsrvtests/tests/suites/rewriters/adfilter_test.py @@ -0,0 +1,197 @@ +# --- BEGIN COPYRIGHT BLOCK --- +# Copyright (C) 2022 Red Hat, Inc. +# All rights reserved. +# +# License: GPL (version 3 or any later version). +# See LICENSE for details. +# --- END COPYRIGHT BLOCK --- +# +import pytest +import glob +import base64 +import re +from lib389.tasks import * +from lib389.rewriters import * +from lib389.idm.user import UserAccounts +from lib389.utils import * +from lib389.topologies import topology_st + +from lib389._constants import DEFAULT_SUFFIX, HOST_STANDALONE, PORT_STANDALONE + +samba_missing = False +try: + from samba.dcerpc import security + from samba.ndr import ndr_pack, ndr_unpack +except: + samba_missing = True + pass + +log = logging.getLogger(__name__) +# Skip on versions 1.4.2 and before. Rewriters are expected in 1.4.3 +pytestmark = [pytest.mark.tier2, + pytest.mark.skipif(ds_is_older('1.4.3'), reason="Not implemented")] + +PW = 'password' + +# +# Necessary because objectcategory relies on cn=xxx RDN +# while userAccount creates uid=xxx RDN +# +def _create_user(inst, schema_container, name, salt): + dn = 'cn=%s,%s' % (name, schema_container) + inst.add_s(Entry(( + dn, { + 'objectClass': 'top person extensibleobject'.split(), + 'cn': name, + 'sn': name, + 'objectcategory': dn, + "description" : salt, + 'userpassword': PW + }))) + + +def test_adfilter_objectCategory(topology_st): + """Test adfilter objectCategory rewriter function + + :id: 9c6493c9-1ee0-4bc2-b5db-7253a04fb6f8 + :setup: Standalone instance + :steps: + 1. Add a objectsid rewriter (from librewriters.so) + 2. Add a dummy schema definition of objectsid to prevent nsslapd-verify-filter-schema + 3. Restart the server (to load the rewriter) + 4. Check EQUALITY filter rewrite + 5. Check SUBSTRING search is not replaced + 6. Check PRESENCE search is not replaced so it selects all entries having objectCategory + :expectedresults: + 1. Add operation should PASS. + 2. Add operations should PASS. + 3. Restart should PASS + 4. Search returns only one entry + 5. Search returns zero entries + 6. Search returns all (20) entries + """ + + librewriters = os.path.join( topology_st.standalone.ds_paths.lib_dir, 'dirsrv/librewriters.so') + assert librewriters + + rewriters = AdRewriters(topology_st.standalone) + ad_rewriter = rewriters.ensure_state(properties={"cn": "adfilter", "nsslapd-libpath": librewriters}) + ad_rewriter.add('nsslapd-filterrewriter', "adfilter_rewrite_objectCategory") + ad_rewriter.create_containers(DEFAULT_SUFFIX) + schema_container = ad_rewriter.get_schema_dn() + + objectcategory_attr = '( NAME \'objectCategory\' DESC \'test of objectCategory\' SYNTAX 1.3.6.1.4.1.1466.115.121.1.15 )' + topology_st.standalone.schema.add_schema('attributetypes', [ensure_bytes(objectcategory_attr)]) + + topology_st.standalone.restart(60) + + # Add a user + for i in range(0, 20): + _create_user(topology_st.standalone, schema_container, "user_%d" % i, str(i)) + + # Check EQUALITY filter rewrite => it should match only one entry + for i in range(0, 20): + ents = topology_st.standalone.search_s(DEFAULT_SUFFIX, ldap.SCOPE_SUBTREE, '(objectCategory=user_%d)' % i) + assert len(ents) == 1 + + # Check SUBSTRING search is not replaced + ents = topology_st.standalone.search_s(DEFAULT_SUFFIX, ldap.SCOPE_SUBTREE, '(objectCategory=user_*)') + assert len(ents) == 0 + + # Check PRESENCE search is not replaced so it selects all entries having objectCategory + ents = topology_st.standalone.search_s(DEFAULT_SUFFIX, ldap.SCOPE_SUBTREE, '(objectCategory=*)') + assert len(ents) == 20 + + log.info('Test PASSED') + +def sid_to_objectsid(sid): + return base64.b64encode(ndr_pack(security.dom_sid(sid))).decode('utf-8') + +def objectsid_to_sid(objectsid): + sid = ndr_unpack(security.dom_sid, base64.b64decode(objectsid)) + return str(sid) + +@pytest.mark.skipif(samba_missing, reason="It is missing samba python bindings") +def test_adfilter_objectSid(topology_st): + """Test adfilter objectCategory rewriter function with samba container/users + + :id: fc5880ff-4305-47ba-84fb-38429e264e9e + :setup: Standalone instance + :steps: + 1. Add a objectsid rewriter (from librewriters.so) + 2. Add a dummy schema definition of objectsid to prevent nsslapd-verify-filter-schema + 3. Restart the server (to load the rewriter) + 4. Add "samba" container/users + 5. Searches using objectsid in string format + :expectedresults: + 1. Add operation should PASS. + 2. Add operations should PASS. + 3. Restart should PASS + 4. Add "samba" users should PASS + 5. Search returns only one entry + """ + librewriters = os.path.join( topology_st.standalone.ds_paths.lib_dir, 'dirsrv/librewriters.so') + assert librewriters + + rewriters = AdRewriters(topology_st.standalone) + ad_rewriter = rewriters.ensure_state(properties={"cn": "adfilter", "nsslapd-libpath": librewriters}) + ad_rewriter.add('nsslapd-filterrewriter', "adfilter_rewrite_objectsid") + ad_rewriter.create_containers(DEFAULT_SUFFIX) + schema_container = ad_rewriter.get_schema_dn() + + # to prevent nsslapd-verify-filter-schema to reject searches with objectsid + objectcategory_attr = '( NAME \'objectsid\' DESC \'test of objectsid\' SYNTAX 1.3.6.1.4.1.1466.115.121.1.40 )' + topology_st.standalone.schema.add_schema('attributetypes', [ensure_bytes(objectcategory_attr)]) + + topology_st.standalone.restart() + + # Contains a list of b64encoded SID from https://github.com/SSSD/sssd/blob/supplier/src/tests/intg/data/ad_data.ldif + SIDs = ["AQUAAAAAAAUVAAAADcfLTVzC66zo0l8EUAQAAA==", + "AQUAAAAAAAUVAAAADcfLTVzC66zo0l8E9gEAAA==", + "AQUAAAAAAAUVAAAADcfLTVzC66zo0l8EAwIAAA==", + "AQUAAAAAAAUVAAAADcfLTVzC66zo0l8EBAIAAA==", + "AQUAAAAAAAUVAAAADcfLTVzC66zo0l8EBgIAAA==", + "AQUAAAAAAAUVAAAADcfLTVzC66zo0l8EBwIAAA==", + "AQUAAAAAAAUVAAAADcfLTVzC66zo0l8EBQIAAA==", + "AQUAAAAAAAUVAAAADcfLTVzC66zo0l8EAAIAAA==", + "AQUAAAAAAAUVAAAADcfLTVzC66zo0l8EAQIAAA==", + "AQUAAAAAAAUVAAAADcfLTVzC66zo0l8EAgIAAA==", + "AQUAAAAAAAUVAAAADcfLTVzC66zo0l8ECAIAAA==", + "AQUAAAAAAAUVAAAADcfLTVzC66zo0l8EKQIAAA==", + "AQUAAAAAAAUVAAAADcfLTVzC66zo0l8EOwIAAA==", + "AQUAAAAAAAUVAAAADcfLTVzC66zo0l8EPAIAAA==", + "AQUAAAAAAAUVAAAADcfLTVzC66zo0l8ECQIAAA==", + "AQUAAAAAAAUVAAAADcfLTVzC66zo0l8E8gEAAA==", + "AQUAAAAAAAUVAAAADcfLTVzC66zo0l8ETQQAAA==", + "AQUAAAAAAAUVAAAADcfLTVzC66zo0l8ETgQAAA==", + "AQUAAAAAAAUVAAAADcfLTVzC66zo0l8EeUMBAA==", + "AQUAAAAAAAUVAAAADcfLTVzC66zo0l8EekMBAA==", + "AQUAAAAAAAUVAAAADcfLTVzC66zo0l8Ee0MBAA==", + "AQUAAAAAAAUVAAAADcfLTVzC66zo0l8EfEMBAA==", + "AQUAAAAAAAUVAAAADcfLTVzC66zo0l8ETwQAAA==", + "AQUAAAAAAAUVAAAADcfLTVzC66zo0l8EUQQAAA==", + "AQUAAAAAAAUVAAAADcfLTVzC66zo0l8ESUMBAA==", + "AQUAAAAAAAUVAAAADcfLTVzC66zo0l8ESkMBAA==", + "AQUAAAAAAAUVAAAADcfLTVzC66zo0l8ES0MBAA==", + "AQUAAAAAAAUVAAAADcfLTVzC66zo0l8ETEMBAA==", + "AQUAAAAAAAUVAAAADcfLTVzC66zo0l8E9AEAAA==", + "AQUAAAAAAAUVAAAADcfLTVzC66zo0l8E9QEAAA=="] + + # Add a container and "samba" like users containing objectsid + users = UserAccounts(topology_st.standalone, schema_container, rdn=None) + i = 0 + for sid in SIDs: + decoded = base64.b64decode(sid) + user = users.create_test_user(uid=i) + user.add('objectclass', 'extensibleobject') + user.replace('objectsid', decoded) + user.replace('objectSidString', objectsid_to_sid(sid)) + i = i + 1 + + # Check that objectsid rewrite can retrieve the "samba" user + # using either a string objectsid (i.e. S-1-5...) or a blob objectsid + for sid_blob in SIDs: + sid_string = objectsid_to_sid(sid_blob) + ents_sid_string = topology_st.standalone.search_s(DEFAULT_SUFFIX, ldap.SCOPE_SUBTREE, '(objectsid=%s)' % sid_string) + assert len(ents_sid_string) == 1 + diff --git a/dirsrvtests/tests/suites/rewriters/basic_test.py b/dirsrvtests/tests/suites/rewriters/basic_test.py new file mode 100644 index 0000000..9e0b5e4 --- /dev/null +++ b/dirsrvtests/tests/suites/rewriters/basic_test.py @@ -0,0 +1,80 @@ +# --- BEGIN COPYRIGHT BLOCK --- +# Copyright (C) 2022 Red Hat, Inc. +# All rights reserved. +# +# License: GPL (version 3 or any later version). +# See LICENSE for details. +# --- END COPYRIGHT BLOCK --- +# +import pytest +import glob +from lib389.tasks import * +from lib389.utils import * +from lib389.topologies import topology_st + +from lib389._constants import DEFAULT_SUFFIX, HOST_STANDALONE, PORT_STANDALONE + +log = logging.getLogger(__name__) +# Skip on versions 1.4.2 and before. Rewriters are expected in 1.4.3 +pytestmark = [pytest.mark.tier2, + pytest.mark.skipif(ds_is_older('1.4.3'), reason="Not implemented")] + + +rewriters_container = "cn=rewriters,cn=config" + +def test_rewriters_container(topology_st): + """Test checks that rewriters container exists + + :id: 5514ae43-546b-4165-beac-896f7f0c9197 + :setup: Standalone instance + :steps: + 1. Check container of rewriteers + :expectedresults: + 1. Search returns only one entry + """ + + # Check container of rewriters + ents = topology_st.standalone.search_s(rewriters_container, ldap.SCOPE_BASE, '(objectclass=*)') + assert len(ents) == 1 + + log.info('Test PASSED') + +def test_foo_filter_rewriter(topology_st): + """Test that example filter rewriter 'foo' is register and search use it + + :id: d16cf7e9-4973-4747-8694-65818156a28e + :setup: Standalone instance + :steps: + 1. Register foo filter rewriters + 2. Restart the server + 3. Check that the filter 'foo=foo' is rewritten into 'cn=foo' + :expectedresults: + 1. Success + 2. Success + 3. Success + """ + + libslapd = os.path.join( topology_st.standalone.ds_paths.lib_dir, 'dirsrv/libslapd.so') + if not os.path.exists(libslapd): + libslapd = os.path.join( topology_st.standalone.ds_paths.lib_dir, 'dirsrv/libslapd.so.0') + # register foo filter rewriters + topology_st.standalone.add_s(Entry(( + "cn=foo_filter,cn=rewriters,cn=config", { + "objectClass": "top", + "objectClass": "extensibleObject", + "cn": "foo_filter", + "nsslapd-libpath": libslapd, + "nsslapd-filterrewriter": "example_foo2cn_filter_rewriter", + } + ))) + + + topology_st.standalone.restart(60) + + # Check that the filter 'foo=foo' is rewritten into 'cn=foo' + ents = topology_st.standalone.search_s(rewriters_container, ldap.SCOPE_SUBTREE, '(foo=foo_filter)') + assert len(ents) > 0 + assert ents[0].dn == "cn=foo_filter,cn=rewriters,cn=config" + + log.info('Test PASSED') + diff --git a/dirsrvtests/tests/suites/roles/__init__.py b/dirsrvtests/tests/suites/roles/__init__.py new file mode 100644 index 0000000..1981985 --- /dev/null +++ b/dirsrvtests/tests/suites/roles/__init__.py @@ -0,0 +1,3 @@ +""" + :Requirement: 389-ds-base: Roles +""" diff --git a/dirsrvtests/tests/suites/roles/basic_test.py b/dirsrvtests/tests/suites/roles/basic_test.py new file mode 100644 index 0000000..7c83a32 --- /dev/null +++ b/dirsrvtests/tests/suites/roles/basic_test.py @@ -0,0 +1,774 @@ +# --- BEGIN COPYRIGHT BLOCK --- +# Copyright (C) 2020 Red Hat, Inc. +# All rights reserved. +# +# License: GPL (version 3 or any later version). +# See LICENSE for details. +# --- END COPYRIGHT BLOCK --- + + +""" +Importing necessary Modules. +""" + +import logging +import time +import ldap +import os +import pytest + +from lib389._constants import ErrorLog, PW_DM, DEFAULT_SUFFIX, DEFAULT_BENAME +from lib389.idm.user import UserAccount, UserAccounts +from lib389.idm.organization import Organization +from lib389.idm.organizationalunit import OrganizationalUnit +from lib389.topologies import topology_st as topo +from lib389.idm.role import FilteredRoles, ManagedRoles, NestedRoles +from lib389.idm.domain import Domain +from lib389.dbgen import dbgen_users +from lib389.tasks import ImportTask +from lib389.utils import get_default_db_lib +from lib389.rewriters import * +from lib389.backend import Backends + +logging.getLogger(__name__).setLevel(logging.INFO) +log = logging.getLogger(__name__) + +pytestmark = pytest.mark.tier1 + +DNBASE = "o=acivattr,{}".format(DEFAULT_SUFFIX) +ENG_USER = "cn=enguser1,ou=eng,{}".format(DNBASE) +SALES_UESER = "cn=salesuser1,ou=sales,{}".format(DNBASE) +ENG_MANAGER = "cn=engmanager1,ou=eng,{}".format(DNBASE) +SALES_MANAGER = "cn=salesmanager1,ou=sales,{}".format(DNBASE) +SALES_OU = "ou=sales,{}".format(DNBASE) +ENG_OU = "ou=eng,{}".format(DNBASE) +FILTERROLESALESROLE = "cn=FILTERROLESALESROLE,{}".format(DNBASE) +FILTERROLEENGROLE = "cn=FILTERROLEENGROLE,{}".format(DNBASE) + + +def test_filterrole(topo, request): + """Test Filter Role + + :id: 8ada4064-786b-11e8-8634-8c16451d917b + :setup: server + :steps: + 1. Add test entry + 2. Add ACI + 3. Search nsconsole role + :expectedresults: + 1. Entry should be added + 2. Operation should succeed + 3. Operation should succeed + """ + Organization(topo.standalone).create(properties={"o": "acivattr"}, basedn=DEFAULT_SUFFIX) + properties = { + 'ou': 'eng', + } + + ou_ou = OrganizationalUnit(topo.standalone, "ou=eng,o=acivattr,{}".format(DEFAULT_SUFFIX)) + ou_ou.create(properties=properties) + properties = {'ou': 'sales'} + ou_ou = OrganizationalUnit(topo.standalone, "ou=sales,o=acivattr,{}".format(DEFAULT_SUFFIX)) + ou_ou.create(properties=properties) + + roles = FilteredRoles(topo.standalone, DNBASE) + roles.create(properties={'cn': 'FILTERROLEENGROLE', 'nsRoleFilter': 'cn=eng*'}) + roles.create(properties={'cn': 'FILTERROLESALESROLE', 'nsRoleFilter': 'cn=sales*'}) + + properties = { + 'uid': 'salesuser1', + 'cn': 'salesuser1', + 'sn': 'user', + 'uidNumber': '1000', + 'gidNumber': '2000', + 'homeDirectory': '/home/' + 'salesuser1', + 'userPassword': PW_DM + } + user = UserAccount(topo.standalone, + 'cn=salesuser1,ou=sales,o=acivattr,{}'.format(DEFAULT_SUFFIX)) + user.create(properties=properties) + + properties = { + 'uid': 'salesmanager1', + 'cn': 'salesmanager1', + 'sn': 'user', + 'uidNumber': '1000', + 'gidNumber': '2000', + 'homeDirectory': '/home/' + 'salesmanager1', + 'userPassword': PW_DM, + } + user = UserAccount(topo.standalone, + 'cn=salesmanager1,ou=sales,o=acivattr,{}'.format(DEFAULT_SUFFIX)) + user.create(properties=properties) + + properties = { + 'uid': 'enguser1', + 'cn': 'enguser1', + 'sn': 'user', + 'uidNumber': '1000', + 'gidNumber': '2000', + 'homeDirectory': '/home/' + 'enguser1', + 'userPassword': PW_DM + } + user = UserAccount(topo.standalone, + 'cn=enguser1,ou=eng,o=acivattr,{}'.format(DEFAULT_SUFFIX)) + user.create(properties=properties) + + properties = { + 'uid': 'engmanager1', + 'cn': 'engmanager1', + 'sn': 'user', + 'uidNumber': '1000', + 'gidNumber': '2000', + 'homeDirectory': '/home/' + 'engmanager1', + 'userPassword': PW_DM + } + user = UserAccount(topo.standalone, + 'cn=engmanager1,ou=eng,o=acivattr,{}'.format(DEFAULT_SUFFIX)) + user.create(properties=properties) + + # user with cn=sales* will automatically memeber of nsfilterrole + # cn=filterrolesalesrole,o=acivattr,dc=example,dc=com + assert UserAccount(topo.standalone, + 'cn=salesuser1,ou=sales,o=acivattr,dc=example,dc=com').\ + get_attr_val_utf8('nsrole') == 'cn=filterrolesalesrole,o=acivattr,dc=example,dc=com' + # same goes to SALES_MANAGER + assert UserAccount(topo.standalone, SALES_MANAGER).get_attr_val_utf8( + 'nsrole') == 'cn=filterrolesalesrole,o=acivattr,dc=example,dc=com' + # user with cn=eng* will automatically memeber of nsfilterrole + # cn=filterroleengrole,o=acivattr,dc=example,dc=com + assert UserAccount(topo.standalone, 'cn=enguser1,ou=eng,o=acivattr,dc=example,dc=com').\ + get_attr_val_utf8('nsrole') == 'cn=filterroleengrole,o=acivattr,dc=example,dc=com' + # same goes to ENG_MANAGER + assert UserAccount(topo.standalone, ENG_MANAGER).get_attr_val_utf8( + 'nsrole') == 'cn=filterroleengrole,o=acivattr,dc=example,dc=com' + for dn_dn in [ENG_USER, SALES_UESER, ENG_MANAGER, SALES_MANAGER, + FILTERROLESALESROLE, FILTERROLEENGROLE, ENG_OU, + SALES_OU, DNBASE]: + UserAccount(topo.standalone, dn_dn).delete() + + def fin(): + topo.standalone.restart() + try: + filtered_roles = FilteredRoles(topo.standalone, DEFAULT_SUFFIX) + for i in filtered_roles.list(): + i.delete() + except: + pass + topo.standalone.config.set('nsslapd-ignore-virtual-attrs', 'on') + + request.addfinalizer(fin) + + +def test_managedrole(topo, request): + """Test Managed Role + + :id: d52a9c00-3bf6-11e9-9b7b-8c16451d917b + :setup: server + :steps: + 1. Add test entry + 2. Add ACI + 3. Search managed role entries + :expectedresults: + 1. Entry should be added + 2. Operation should succeed + 3. Operation should succeed + """ + # Create Managed role entry + roles = ManagedRoles(topo.standalone, DEFAULT_SUFFIX) + role = roles.create(properties={"cn": 'ROLE1'}) + + # Create user and Assign the role to the entry + uas = UserAccounts(topo.standalone, DEFAULT_SUFFIX, rdn=None) + uas.create(properties={ + 'uid': 'Fail', + 'cn': 'Fail', + 'sn': 'user', + 'uidNumber': '1000', + 'gidNumber': '2000', + 'homeDirectory': '/home/' + 'Fail', + 'nsRoleDN': role.dn, + 'userPassword': PW_DM + }) + + # Create user and do not Assign any role to the entry + uas.create( + properties={ + 'uid': 'Success', + 'cn': 'Success', + 'sn': 'user', + 'uidNumber': '1000', + 'gidNumber': '2000', + 'homeDirectory': '/home/' + 'Success', + 'userPassword': PW_DM + }) + + # Assert that Manage role entry is created and its searchable + assert ManagedRoles(topo.standalone, DEFAULT_SUFFIX).list()[0].dn \ + == 'cn=ROLE1,dc=example,dc=com' + + # Set an aci that will deny ROLE1 manage role + Domain(topo.standalone, DEFAULT_SUFFIX).\ + add('aci', '(targetattr="*")(version 3.0; aci "role aci";' + ' deny(all) roledn="ldap:///{}";)'.format(role.dn),) + # Add self user modification and anonymous aci + ANON_ACI = "(targetattr=\"*\")(version 3.0; acl \"Anonymous Read access\"; allow (read,search,compare) userdn = \"ldap:///anyone\";)" + suffix = Domain(topo.standalone, DEFAULT_SUFFIX) + suffix.add('aci', ANON_ACI) + + # Crate a connection with cn=Fail which is member of ROLE1 + conn = UserAccount(topo.standalone, "uid=Fail,{}".format(DEFAULT_SUFFIX)).bind(PW_DM) + # Access denied to ROLE1 members + assert not ManagedRoles(conn, DEFAULT_SUFFIX).list() + + # Now create a connection with cn=Success which is not a member of ROLE1 + conn = UserAccount(topo.standalone, "uid=Success,{}".format(DEFAULT_SUFFIX)).bind(PW_DM) + # Access allowed here + assert ManagedRoles(conn, DEFAULT_SUFFIX).list() + + for i in uas.list(): + i.delete() + + for i in roles.list(): + i.delete() + + def fin(): + topo.standalone.restart() + try: + role = ManagedRoles(topo.standalone, DEFAULT_SUFFIX).get('ROLE1') + role.delete() + except: + pass + topo.standalone.config.set('nsslapd-ignore-virtual-attrs', 'on') + + request.addfinalizer(fin) + +@pytest.fixture(scope="function") +def _final(request, topo): + """ + Removes and Restores ACIs after the test. + """ + aci_list = Domain(topo.standalone, DEFAULT_SUFFIX).get_attr_vals('aci') + + def finofaci(): + """ + Removes and Restores ACIs and other users after the test. + And restore nsslapd-ignore-virtual-attrs to default + """ + domain = Domain(topo.standalone, DEFAULT_SUFFIX) + domain.remove_all('aci') + + managed_roles = ManagedRoles(topo.standalone, DEFAULT_SUFFIX) + nested_roles = NestedRoles(topo.standalone, DEFAULT_SUFFIX) + users = UserAccounts(topo.standalone, DEFAULT_SUFFIX) + + for i in managed_roles.list() + nested_roles.list() + users.list(): + i.delete() + + for i in aci_list: + domain.add("aci", i) + + topo.standalone.config.set('nsslapd-ignore-virtual-attrs', 'on') + + request.addfinalizer(finofaci) + + +def test_nestedrole(topo, _final): + """Test Nested Role + + :id: 867b40c0-7fcf-4332-afc7-bd01025b77f2 + :setup: Standalone server + :steps: + 1. Add test entry + 2. Add ACI + 3. Search managed role entries + :expectedresults: + 1. Entry should be added + 2. Operation should succeed + 3. Operation should succeed + """ + # Create Managed role entry + managed_roles = ManagedRoles(topo.standalone, DEFAULT_SUFFIX) + managed_role1 = managed_roles.create(properties={"cn": 'managed_role1'}) + managed_role2 = managed_roles.create(properties={"cn": 'managed_role2'}) + + # Create nested role entry + nested_roles = NestedRoles(topo.standalone, DEFAULT_SUFFIX) + nested_role = nested_roles.create(properties={"cn": 'nested_role', + "nsRoleDN": [managed_role1.dn, managed_role2.dn]}) + + # Create user and assign managed role to it + users = UserAccounts(topo.standalone, DEFAULT_SUFFIX) + user1 = users.create_test_user(uid=1, gid=1) + user1.set('nsRoleDN', managed_role1.dn) + user1.set('userPassword', PW_DM) + + # Create another user and assign managed role to it + user2 = users.create_test_user(uid=2, gid=2) + user2.set('nsRoleDN', managed_role2.dn) + user2.set('userPassword', PW_DM) + + # Create another user and do not assign any role to it + user3 = users.create_test_user(uid=3, gid=3) + user3.set('userPassword', PW_DM) + + # Create a ACI with deny access to nested role entry + Domain(topo.standalone, DEFAULT_SUFFIX).\ + add('aci', f'(targetattr="*")(version 3.0; aci ' + f'"role aci"; deny(all) roledn="ldap:///{nested_role.dn}";)') + + # Create connection with 'uid=test_user_1,ou=People,dc=example,dc=com' member of managed_role1 + # and search while bound as the user + conn = users.get('test_user_1').bind(PW_DM) + assert not UserAccounts(conn, DEFAULT_SUFFIX).list() + + # Create connection with 'uid=test_user_2,ou=People,dc=example,dc=com' member of managed_role2 + # and search while bound as the user + conn = users.get('test_user_2').bind(PW_DM) + assert not UserAccounts(conn, DEFAULT_SUFFIX).list() + + # Create connection with 'uid=test_user_3,ou=People,dc=example,dc=com' and + # search while bound as the user + conn = users.get('test_user_3').bind(PW_DM) + assert UserAccounts(conn, DEFAULT_SUFFIX).list() + +def test_vattr_on_filtered_role(topo, request): + """Test nsslapd-ignore-virtual-attrs configuration attribute + The attribute is ON by default. If a filtered role is + added it is moved to OFF + + :id: 88b3ad3c-f39a-4eb7-a8c9-07c685f11908 + :customerscenario: True + :setup: Standalone instance + :steps: + 1. Check the attribute nsslapd-ignore-virtual-attrs is present in cn=config + 2. Check the default value of attribute nsslapd-ignore-virtual-attrs should be ON + 3. Create a filtered role + 4. Check the value of nsslapd-ignore-virtual-attrs should be OFF + 5. Check a message "roles_cache_trigger_update_role - Because of virtual attribute.." in error logs + 6. Check after deleting role definition value of attribute nsslapd-ignore-virtual-attrs is set back to ON + :expectedresults: + 1. This should be successful + 2. This should be successful + 3. This should be successful + 4. This should be successful + 5. This should be successful + 6. This should be successful + """ + + log.info("Check the attribute nsslapd-ignore-virtual-attrs is present in cn=config") + assert topo.standalone.config.present('nsslapd-ignore-virtual-attrs') + + log.info("Check the default value of attribute nsslapd-ignore-virtual-attrs should be ON") + assert topo.standalone.config.get_attr_val_utf8('nsslapd-ignore-virtual-attrs') == "on" + + log.info("Create a filtered role") + try: + Organization(topo.standalone).create(properties={"o": "acivattr"}, basedn=DEFAULT_SUFFIX) + except: + pass + roles = FilteredRoles(topo.standalone, DNBASE) + roles.create(properties={'cn': 'FILTERROLEENGROLE', 'nsRoleFilter': 'cn=eng*'}) + + log.info("Check the default value of attribute nsslapd-ignore-virtual-attrs should be OFF") + assert topo.standalone.config.present('nsslapd-ignore-virtual-attrs', 'off') + + topo.standalone.stop() + assert topo.standalone.searchErrorsLog("roles_cache_trigger_update_role - Because of virtual attribute definition \(role\), nsslapd-ignore-virtual-attrs was set to \'off\'") + + def fin(): + topo.standalone.restart() + try: + filtered_roles = FilteredRoles(topo.standalone, DEFAULT_SUFFIX) + for i in filtered_roles.list(): + i.delete() + except: + pass + log.info("Check the default value of attribute nsslapd-ignore-virtual-attrs is back to ON") + topo.standalone.restart() + assert topo.standalone.config.get_attr_val_utf8('nsslapd-ignore-virtual-attrs') == "on" + + request.addfinalizer(fin) + +def test_vattr_on_filtered_role_restart(topo, request): + """Test nsslapd-ignore-virtual-attrs configuration attribute + If it exists a filtered role definition at restart then + nsslapd-ignore-virtual-attrs should be set to 'off' + + :id: 972183f7-d18f-40e0-94ab-580e7b7d78d0 + :customerscenario: True + :setup: Standalone instance + :steps: + 1. Check the attribute nsslapd-ignore-virtual-attrs is present in cn=config + 2. Check the default value of attribute nsslapd-ignore-virtual-attrs should be ON + 3. Create a filtered role + 4. Check the value of nsslapd-ignore-virtual-attrs should be OFF + 5. restart the instance + 6. Check the presence of virtual attribute is detected + 7. Check the value of nsslapd-ignore-virtual-attrs should be OFF + :expectedresults: + 1. This should be successful + 2. This should be successful + 3. This should be successful + 4. This should be successful + 5. This should be successful + 6. This should be successful + 7. This should be successful + """ + + log.info("Check the attribute nsslapd-ignore-virtual-attrs is present in cn=config") + assert topo.standalone.config.present('nsslapd-ignore-virtual-attrs') + + log.info("Check the default value of attribute nsslapd-ignore-virtual-attrs should be ON") + assert topo.standalone.config.get_attr_val_utf8('nsslapd-ignore-virtual-attrs') == "on" + + log.info("Create a filtered role") + try: + Organization(topo.standalone).create(properties={"o": "acivattr"}, basedn=DEFAULT_SUFFIX) + except: + pass + roles = FilteredRoles(topo.standalone, DNBASE) + roles.create(properties={'cn': 'FILTERROLEENGROLE', 'nsRoleFilter': 'cn=eng*'}) + + log.info("Check the default value of attribute nsslapd-ignore-virtual-attrs should be OFF") + assert topo.standalone.config.present('nsslapd-ignore-virtual-attrs', 'off') + + + log.info("Check the virtual attribute definition is found (after a required delay)") + topo.standalone.restart() + time.sleep(5) + assert topo.standalone.searchErrorsLog("Found a role/cos definition in") + assert topo.standalone.searchErrorsLog("roles_cache_trigger_update_role - Because of virtual attribute definition \(role\), nsslapd-ignore-virtual-attrs was set to \'off\'") + + log.info("Check the default value of attribute nsslapd-ignore-virtual-attrs should be OFF") + assert topo.standalone.config.present('nsslapd-ignore-virtual-attrs', 'off') + + def fin(): + topo.standalone.restart() + try: + filtered_roles = FilteredRoles(topo.standalone, DEFAULT_SUFFIX) + for i in filtered_roles.list(): + i.delete() + except: + pass + topo.standalone.config.set('nsslapd-ignore-virtual-attrs', 'on') + + request.addfinalizer(fin) + + +def test_vattr_on_managed_role(topo, request): + """Test nsslapd-ignore-virtual-attrs configuration attribute + The attribute is ON by default. If a managed role is + added it is moved to OFF + + :id: 664b722d-c1ea-41e4-8f6c-f9c87a212346 + :customerscenario: True + :setup: Standalone instance + :steps: + 1. Check the attribute nsslapd-ignore-virtual-attrs is present in cn=config + 2. Check the default value of attribute nsslapd-ignore-virtual-attrs should be ON + 3. Create a managed role + 4. Check the value of nsslapd-ignore-virtual-attrs should be OFF + 5. Check a message "roles_cache_trigger_update_role - Because of virtual attribute.." in error logs + 6. Check after deleting role definition value of attribute nsslapd-ignore-virtual-attrs is set back to ON + :expectedresults: + 1. This should be successful + 2. This should be successful + 3. This should be successful + 4. This should be successful + 5. This should be successful + 6. This should be successful + """ + + log.info("Check the attribute nsslapd-ignore-virtual-attrs is present in cn=config") + assert topo.standalone.config.present('nsslapd-ignore-virtual-attrs') + + log.info("Check the default value of attribute nsslapd-ignore-virtual-attrs should be ON") + assert topo.standalone.config.get_attr_val_utf8('nsslapd-ignore-virtual-attrs') == "on" + + log.info("Create a managed role") + roles = ManagedRoles(topo.standalone, DEFAULT_SUFFIX) + role = roles.create(properties={"cn": 'ROLE1'}) + + log.info("Check the default value of attribute nsslapd-ignore-virtual-attrs should be OFF") + assert topo.standalone.config.present('nsslapd-ignore-virtual-attrs', 'off') + + topo.standalone.stop() + assert topo.standalone.searchErrorsLog("roles_cache_trigger_update_role - Because of virtual attribute definition \(role\), nsslapd-ignore-virtual-attrs was set to \'off\'") + + def fin(): + topo.standalone.restart() + try: + filtered_roles = ManagedRoles(topo.standalone, DEFAULT_SUFFIX) + for i in filtered_roles.list(): + i.delete() + except: + pass + log.info("Check the default value of attribute nsslapd-ignore-virtual-attrs is back to ON") + topo.standalone.restart() + assert topo.standalone.config.get_attr_val_utf8('nsslapd-ignore-virtual-attrs') == "on" + + request.addfinalizer(fin) + +def test_managed_and_filtered_role_rewrite(topo, request): + """Test that filter components containing 'nsrole=xxx' + are reworked if xxx is either a filtered role or a managed + role. + + :id: e30ff5ed-4f8b-48db-bb88-66f150fca31f + :setup: server + :steps: + 1. Setup nsrole rewriter + 2. Add a 'nsroleDN' indexes for managed roles + 3. Create an 90K ldif files + This is large so that unindex search will last long + 4. import/restart the instance + 5. Create a managed role and add 4 entries in that role + 6. Check that a search 'nsrole=managed_role' is fast + 7. Create a filtered role that use an indexed attribute (givenName) + 8. Check that a search 'nsrole=filtered_role' is fast + :expectedresults: + 1. Operation should succeed + 2. Operation should succeed + 3. Operation should succeed + 4. Operation should succeed + 5. Operation should succeed + 6. Operation should succeed + 7. Operation should succeed + 8. Operation should succeed + """ + # Setup nsrole rewriter + rewriters = Rewriters(topo.standalone) + rewriter = rewriters.ensure_state(properties={"cn": "nsrole", "nsslapd-libpath": 'libroles-plugin'}) + try: + rewriter.add('nsslapd-filterrewriter', "role_nsRole_filter_rewriter") + except: + pass + + # Create an index for nsRoleDN that is used by managed role + attrname = 'nsRoleDN' + backends = Backends(topo.standalone) + backend = backends.get(DEFAULT_BENAME) + indexes = backend.get_indexes() + try: + index = indexes.create(properties={ + 'cn': attrname, + 'nsSystemIndex': 'false', + 'nsIndexType': ['eq', 'pres'] + }) + except: + pass + + # Build LDIF file + bdb_values = { + 'wait30': 30 + } + + # Note: I still sometime get failure with a 60s timeout so lets use 90s + mdb_values = { + 'wait30': 90 + } + + if get_default_db_lib() == 'bdb': + values = bdb_values + else: + values = mdb_values + + ldif_dir = topo.standalone.get_ldif_dir() + import_ldif = ldif_dir + '/perf_import.ldif' + + RDN="userNew" + PARENT="ou=people,%s" % DEFAULT_SUFFIX + dbgen_users(topo.standalone, 90000, import_ldif, DEFAULT_SUFFIX, entry_name=RDN, generic=True, parent=PARENT) + + # online import + import_task = ImportTask(topo.standalone) + import_task.import_suffix_from_ldif(ldiffile=import_ldif, suffix=DEFAULT_SUFFIX) + # Check for up to 120sec that the completion + for i in range(1, 12): + if len(topo.standalone.ds_error_log.match('.*import userRoot: Import complete. Processed 9000.*')) > 0: + break + time.sleep(10) + import_complete = topo.standalone.ds_error_log.match('.*import userRoot: Import complete. Processed 9000.*') + assert (len(import_complete) == 1) + + # Restart server + topo.standalone.restart() + + # Create Managed role entry + managed_roles = ManagedRoles(topo.standalone, DEFAULT_SUFFIX) + role = managed_roles.create(properties={"cn": 'MANAGED_ROLE'}) + + # Assign managed role to 4 entries out of the 90K + for i in range(1, 5): + dn = "uid=%s0000%d,%s" % (RDN, i, PARENT) + topo.standalone.modify_s(dn, [(ldap.MOD_REPLACE, 'nsRoleDN', [role.dn.encode()])]) + + + # Now check that search is fast, evaluating only 4 entries + search_start = time.time() + entries = topo.standalone.search_s(DEFAULT_SUFFIX, ldap.SCOPE_SUBTREE, "(nsrole=%s)" % role.dn) + duration = time.time() - search_start + log.info("Duration of the search was %f", duration) + assert(len(entries) == 4) + assert (duration < 1) + + # Restart server to refresh entrycache + topo.standalone.restart() + + # Create Filtered Role entry + # it uses 'givenName' attribute that is indexed (eq) by default + filtered_roles = FilteredRoles(topo.standalone, DEFAULT_SUFFIX) + role = filtered_roles.create(properties={'cn': 'FILTERED_ROLE', 'nsRoleFilter': 'givenName=Technical'}) + + # Now check that search is fast + search_start = time.time() + entries = topo.standalone.search_s(DEFAULT_SUFFIX, ldap.SCOPE_SUBTREE, "(nsrole=%s)" % role.dn) + duration = time.time() - search_start + log.info("Duration of the search was %f", duration) + assert (duration < 1) + + def fin(): + topo.standalone.restart() + try: + managed_roles = ManagedRoles(topo.standalone, DEFAULT_SUFFIX) + for i in managed_roles.list(): + i.delete() + filtered_roles = FilteredRoles(topo.standalone, DEFAULT_SUFFIX) + for i in filtered_roles.list(): + i.delete() + except: + pass + os.remove(import_ldif) + + request.addfinalizer(fin) + +def test_not_such_entry_role_rewrite(topo, request): + """Test that filter components containing 'nsrole=xxx' + ,where xxx does not refer to any role definition, + replace the component by 'nsuniqueid=-1' + + :id: b098dda5-fc77-46c4-84a7-5d0c7035bb77 + :setup: server + :steps: + 1. Setup nsrole rewriter + 2. Add a 'nsroleDN' indexes for managed roles + 3. Create an 90K ldif files + This is large so that unindex search will last long + 4. import/restart the instance + 5. Create a managed role and add 4 entries in that role + 6. Enable plugin log level to capture role plugin message + 7. Check that a search is fast "(OR(nsrole=managed_role)(nsrole=not_existing_role))" + 8. Stop the instance + 9. Check that a message like this was logged: replace (nsrole=not_existing_role) by (nsuniqueid=-1) + :expectedresults: + 1. Operation should succeed + 2. Operation should succeed + 3. Operation should succeed + 4. Operation should succeed + 5. Operation should succeed + 6. Operation should succeed + 7. Operation should succeed + 8. Operation should succeed + 9. Operation should succeed + """ + # Setup nsrole rewriter + rewriters = Rewriters(topo.standalone) + rewriter = rewriters.ensure_state(properties={"cn": "nsrole", "nsslapd-libpath": 'libroles-plugin'}) + try: + rewriter.add('nsslapd-filterrewriter', "role_nsRole_filter_rewriter") + except: + pass + + # Create an index for nsRoleDN that is used by managed role + attrname = 'nsRoleDN' + backends = Backends(topo.standalone) + backend = backends.get(DEFAULT_BENAME) + indexes = backend.get_indexes() + try: + index = indexes.create(properties={ + 'cn': attrname, + 'nsSystemIndex': 'false', + 'nsIndexType': ['eq', 'pres'] + }) + except: + pass + + # Build LDIF file + bdb_values = { + 'wait60': 60 + } + + # Note: I still sometime get failure with a 60s timeout so lets use 90s + mdb_values = { + 'wait60': 90 + } + + if get_default_db_lib() == 'bdb': + values = bdb_values + else: + values = mdb_values + + ldif_dir = topo.standalone.get_ldif_dir() + import_ldif = ldif_dir + '/perf_import.ldif' + + RDN="userNew" + PARENT="ou=people,%s" % DEFAULT_SUFFIX + dbgen_users(topo.standalone, 91000, import_ldif, DEFAULT_SUFFIX, entry_name=RDN, generic=True, parent=PARENT) + + # online import + import_task = ImportTask(topo.standalone) + import_task.import_suffix_from_ldif(ldiffile=import_ldif, suffix=DEFAULT_SUFFIX) + # Check for up to 120sec that the completion + for i in range(1, 12): + if len(topo.standalone.ds_error_log.match('.*import userRoot: Import complete. Processed 9100.*')) > 0: + break + time.sleep(10) + import_complete = topo.standalone.ds_error_log.match('.*import userRoot: Import complete. Processed 9100.*') + assert (len(import_complete) == 1) + + # Restart server + topo.standalone.restart() + + # Create Managed role entry + managed_roles = ManagedRoles(topo.standalone, DEFAULT_SUFFIX) + role = managed_roles.create(properties={"cn": 'MANAGED_ROLE'}) + + # Assign managed role to 4 entries out of the 90K + for i in range(1, 5): + dn = "uid=%s0000%d,%s" % (RDN, i, PARENT) + topo.standalone.modify_s(dn, [(ldap.MOD_REPLACE, 'nsRoleDN', [role.dn.encode()])]) + + # Enable plugin level to check message + topo.standalone.config.loglevel(vals=(ErrorLog.DEFAULT,ErrorLog.PLUGIN)) + + # Now check that search is fast, evaluating only 4 entries + search_start = time.time() + entries = topo.standalone.search_s(DEFAULT_SUFFIX, ldap.SCOPE_SUBTREE, "(|(nsrole=%s)(nsrole=cn=not_such_entry_role,%s))" % (role.dn, DEFAULT_SUFFIX)) + duration = time.time() - search_start + log.info("Duration of the search was %f", duration) + assert(len(entries) == 4) + assert (duration < 1) + + # Restart server to refresh entrycache + topo.standalone.stop() + + # Check that when the role does not exist it is translated into 'nsuniqueid=-1' + pattern = ".*replace \(nsRole=cn=not_such_entry_role,dc=example,dc=com\) by \(nsuniqueid=-1\).*" + assert topo.standalone.ds_error_log.match(pattern) + + def fin(): + topo.standalone.restart() + try: + managed_roles = ManagedRoles(topo.standalone, DEFAULT_SUFFIX) + for i in managed_roles.list(): + i.delete() + except: + pass + os.remove(import_ldif) + + request.addfinalizer(fin) + +if __name__ == "__main__": + CURRENT_FILE = os.path.realpath(__file__) + pytest.main("-s -v %s" % CURRENT_FILE) diff --git a/dirsrvtests/tests/suites/sasl/__init__.py b/dirsrvtests/tests/suites/sasl/__init__.py new file mode 100644 index 0000000..78c30ff --- /dev/null +++ b/dirsrvtests/tests/suites/sasl/__init__.py @@ -0,0 +1,3 @@ +""" + :Requirement: 389-ds-base: SASL Mechanism +""" diff --git a/dirsrvtests/tests/suites/sasl/allowed_mechs_test.py b/dirsrvtests/tests/suites/sasl/allowed_mechs_test.py new file mode 100644 index 0000000..352cd62 --- /dev/null +++ b/dirsrvtests/tests/suites/sasl/allowed_mechs_test.py @@ -0,0 +1,211 @@ +# --- BEGIN COPYRIGHT BLOCK --- +# Copyright (C) 2017 Red Hat, Inc. +# All rights reserved. +# +# License: GPL (version 3 or any later version). +# See LICENSE for details. +# --- END COPYRIGHT BLOCK --- +# + +import pytest +import os +from lib389.topologies import topology_st +from lib389.utils import ds_is_older + +pytestmark = pytest.mark.tier1 + +def test_basic_feature(topology_st): + """Test the alloweed sasl mechanism feature + + :id: b0453b91-9955-4e8f-9d2f-a6bf440022b1 + :setup: Standalone instance + :steps: + 1. Get the default list of mechanisms + 2. Set allowed mechanism PLAIN + 3. Verify the list + 4. Restart the server + 5. Verify that list is still correct + 6. Edit mechanisms to allow just PLAIN and EXTERNAL + 7. Verify the list + 8. Edit mechanisms to allow just PLAIN and GSSAPI + 9. Verify the list + 10. Restart the server + 11. Verify that list is still correct + 12. Edit mechanisms to allow just PLAIN, GSSAPI, and ANONYMOUS + 13. Verify the list + 14. Restart the server + 15. Verify that list is still correct + 16. Edit mechanisms to allow just PLAIN and ANONYMOUS + 17. Verify the list + 18. Restart the server + 19. Verify that list is still correct + 20. Reset the allowed list to nothing, + 21. Verify that the returned mechanisms are the default ones + 22. Restart the server + 23. Verify that list is still correct + :expectedresults: + 1. GSSAPI, PLAIN and EXTERNAL mechanisms should be acquired + 2. Operation should be successful + 3. List should have - PLAIN, EXTERNAL; shouldn't have - GSSAPI + 4. Server should be restarted + 5. List should have - PLAIN, EXTERNAL; shouldn't have - GSSAPI + 6. Operation should be successful + 7. List should have - PLAIN, EXTERNAL; shouldn't have - GSSAPI + 8. Operation should be successful + 9. List should have - PLAIN, EXTERNAL, GSSAPI + 10. Server should be restarted + 11. List should have - PLAIN, EXTERNAL, GSSAPI + 12. Operation should be successful + 13. List should have - PLAIN, EXTERNAL, GSSAPI, ANONYMOUS + 14. Server should be restarted + 15. List should have - PLAIN, EXTERNAL, GSSAPI, ANONYMOUS + 16. Operation should be successful + 17. List should have - PLAIN, EXTERNAL, ANONYMOUS; shouldn't have - GSSAPI + 18. Server should be restarted + 19. List should have - PLAIN, EXTERNAL, ANONYMOUS; shouldn't have - GSSAPI + 20. Operation should be successful + 21. List should have - PLAIN, EXTERNAL, GSSAPI + 22. Server should be restarted + 23. List should have - PLAIN, EXTERNAL, GSSAPI + """ + + standalone = topology_st.standalone + + # Get the supported mechanisms. This should contain PLAIN, GSSAPI, EXTERNAL at least + standalone.log.info("Test we have some of the default mechanisms") + orig_mechs = standalone.rootdse.supported_sasl() + print(orig_mechs) + assert('GSSAPI' in orig_mechs) + assert('PLAIN' in orig_mechs) + assert('EXTERNAL' in orig_mechs) + + # Now edit the supported mechanisms. Check them again. + standalone.log.info("Edit mechanisms to allow just PLAIN") + standalone.config.set('nsslapd-allowed-sasl-mechanisms', 'PLAIN') + limit_mechs = standalone.rootdse.supported_sasl() + assert('PLAIN' in limit_mechs) + assert('EXTERNAL' in limit_mechs) # Should always be in the allowed list, even if not set. + assert('GSSAPI' not in limit_mechs) # Should not be there! + + # Restart the server a few times and make sure nothing changes + standalone.log.info("Restart server and make sure we still have correct allowed mechs") + standalone.restart() + standalone.restart() + limit_mechs = standalone.rootdse.supported_sasl() + assert('PLAIN' in limit_mechs) + assert('EXTERNAL' in limit_mechs) + assert('GSSAPI' not in limit_mechs) + + # Set EXTERNAL, even though its always supported + standalone.log.info("Edit mechanisms to allow just PLAIN and EXTERNAL") + standalone.config.set('nsslapd-allowed-sasl-mechanisms', 'PLAIN, EXTERNAL') + limit_mechs = standalone.rootdse.supported_sasl() + assert('PLAIN' in limit_mechs) + assert('EXTERNAL' in limit_mechs) + assert('GSSAPI' not in limit_mechs) + + # Now edit the supported mechanisms. Check them again. + standalone.log.info("Edit mechanisms to allow just PLAIN and GSSAPI") + standalone.config.set('nsslapd-allowed-sasl-mechanisms', 'PLAIN, GSSAPI') + limit_mechs = standalone.rootdse.supported_sasl() + assert('PLAIN' in limit_mechs) + assert('EXTERNAL' in limit_mechs) + assert('GSSAPI' in limit_mechs) + assert(len(limit_mechs) == 3) + + # Restart server twice and make sure the allowed list is the same + standalone.restart() + standalone.restart() # For ticket 49379 (test double restart) + limit_mechs = standalone.rootdse.supported_sasl() + assert('PLAIN' in limit_mechs) + assert('EXTERNAL' in limit_mechs) + assert('GSSAPI' in limit_mechs) + assert(len(limit_mechs) == 3) + + # Add ANONYMOUS to the supported mechanisms and test again. + standalone.log.info("Edit mechanisms to allow just PLAIN, GSSAPI, and ANONYMOUS") + standalone.config.set('nsslapd-allowed-sasl-mechanisms', 'PLAIN, GSSAPI, ANONYMOUS') + limit_mechs = standalone.rootdse.supported_sasl() + assert('PLAIN' in limit_mechs) + assert('EXTERNAL' in limit_mechs) + assert('GSSAPI' in limit_mechs) + assert('ANONYMOUS' in limit_mechs) + assert(len(limit_mechs) == 4) + + # Restart server and make sure the allowed list is the same + standalone.restart() + standalone.restart() # For ticket 49379 (test double restart) + limit_mechs = standalone.rootdse.supported_sasl() + assert('PLAIN' in limit_mechs) + assert('EXTERNAL' in limit_mechs) + assert('GSSAPI' in limit_mechs) + assert('ANONYMOUS' in limit_mechs) + assert(len(limit_mechs) == 4) + + # Remove GSSAPI + standalone.log.info("Edit mechanisms to allow just PLAIN and ANONYMOUS") + standalone.config.set('nsslapd-allowed-sasl-mechanisms', 'PLAIN, ANONYMOUS') + limit_mechs = standalone.rootdse.supported_sasl() + assert('PLAIN' in limit_mechs) + assert('EXTERNAL' in limit_mechs) + assert('GSSAPI' not in limit_mechs) + assert('ANONYMOUS' in limit_mechs) + assert(len(limit_mechs) == 3) + + # Restart server and make sure the allowed list is the same + standalone.restart() + limit_mechs = standalone.rootdse.supported_sasl() + assert('PLAIN' in limit_mechs) + assert('EXTERNAL' in limit_mechs) + assert('GSSAPI' not in limit_mechs) + assert('ANONYMOUS' in limit_mechs) + assert(len(limit_mechs) == 3) + + # Do a config reset + standalone.log.info("Reset allowed mechaisms") + standalone.config.reset('nsslapd-allowed-sasl-mechanisms') + + # check the supported list is the same as our first check. + standalone.log.info("Check that we have the original set of mechanisms") + final_mechs = standalone.rootdse.supported_sasl() + assert(set(final_mechs) == set(orig_mechs)) + + # Check it after a restart + standalone.log.info("Check that we have the original set of mechanisms after a restart") + standalone.restart() + final_mechs = standalone.rootdse.supported_sasl() + assert(set(final_mechs) == set(orig_mechs)) + + +@pytest.mark.bz1816854 +@pytest.mark.ds50869 +@pytest.mark.xfail(ds_is_older('1.3.11', '1.4.3.6'), reason="May fail because of bz1816854") +def test_config_set_few_mechs(topology_st): + """Test that we can successfully set multiple values to nsslapd-allowed-sasl-mechanisms + + :id: d7c3c58b-4fbe-42ab-a8d4-9dd362916d5f + :setup: Standalone instance + :steps: + 1. Set nsslapd-allowed-sasl-mechanisms to "PLAIN GSSAPI" + 2. Verify nsslapd-allowed-sasl-mechanisms has the values + :expectedresults: + 1. Operation should be successful + 2. Operation should be successful + """ + + standalone = topology_st.standalone + + standalone.log.info("Set nsslapd-allowed-sasl-mechanisms to 'PLAIN GSSAPI'") + standalone.config.set('nsslapd-allowed-sasl-mechanisms', 'PLAIN GSSAPI') + + standalone.log.info("Verify nsslapd-allowed-sasl-mechanisms has the values") + allowed_mechs = standalone.config.get_attr_val_utf8('nsslapd-allowed-sasl-mechanisms') + assert('PLAIN' in allowed_mechs) + assert('GSSAPI' in allowed_mechs) + + +if __name__ == '__main__': + # Run isolated + # -s for DEBUG mode + CURRENT_FILE = os.path.realpath(__file__) + pytest.main("-s %s" % CURRENT_FILE) diff --git a/dirsrvtests/tests/suites/sasl/plain_test.py b/dirsrvtests/tests/suites/sasl/plain_test.py new file mode 100644 index 0000000..c7e672f --- /dev/null +++ b/dirsrvtests/tests/suites/sasl/plain_test.py @@ -0,0 +1,95 @@ +# --- BEGIN COPYRIGHT BLOCK --- +# Copyright (C) 2017 Red Hat, Inc. +# All rights reserved. +# +# License: GPL (version 3 or any later version). +# See LICENSE for details. +# --- END COPYRIGHT BLOCK --- +# + +import pytest +import ldap + +from lib389.topologies import topology_st +# This pulls in logging I think +from lib389.utils import * +from lib389._constants import DEFAULT_SUFFIX, DEFAULT_SECURE_PORT +from lib389.sasl import PlainSASL +from lib389.idm.services import ServiceAccounts, ServiceAccount + +pytestmark = pytest.mark.tier1 + +log = logging.getLogger(__name__) + + +def test_basic_feature(topology_st): + """Check basic SASL functionality for PLAIN mechanism + + :id: 75ddc6fa-aa5a-4025-9c71-1abad20c91fc + :setup: Standalone instance + :steps: + 1. Stop the instance + 2. Clean up confdir from previous cert and key files + 3. Create RSA files: CA, key and cert + 4. Start the instance + 5. Create RSA entry + 6. Set nsslapd-secureport to 636 and nsslapd-security to 'on' + 7. Restart the instance + 8. Create a user + 9. Check we can bind + 10. Check that PLAIN is listed in supported mechs + 11. Set up Plain SASL credentials + 12. Try to open a connection without TLS + 13. Try to open a connection with TLS + 14. Try to open a connection with a wrong password + :expectedresults: + 1. The instance should stop + 2. Confdir should be clean + 3. RSA files should be created + 4. The instance should start + 5. RSA entry should be created + 6. nsslapd-secureport and nsslapd-security should be set successfully + 7. The instance should be restarted + 8. User should be created + 9. Bind should be successful + 10. PLAIN should be listed in supported mechs + 11. Plain SASL should be successfully set + 12. AUTH_UNKNOWN exception should be raised + 13. The connection should open + 14. INVALID_CREDENTIALS exception should be raised + """ + + standalone = topology_st.standalone + standalone.enable_tls() + + # Create a user + sas = ServiceAccounts(standalone, DEFAULT_SUFFIX) + sas._basedn = DEFAULT_SUFFIX + sa = sas.create(properties={'cn': 'testaccount', 'userPassword': 'password'}) + # Check we can bind. This will raise exceptions if it fails. + sa.bind('password') + + # Check that PLAIN is listed in supported mechns. + assert(standalone.rootdse.supports_sasl_plain()) + + # The sasl parameters don't change, so set them up now. + # Do we need the sasl map dn:? + auth_tokens = PlainSASL("dn:%s" % sa.dn, 'password') + + # Check that it fails without TLS + with pytest.raises(ldap.AUTH_UNKNOWN): + conn = sa.sasl_bind(uri=standalone.get_ldap_uri(), saslmethod='PLAIN', sasltoken=auth_tokens, connOnly=True) + + # We *have* to use REQCERT NEVER here because python ldap fails cert verification for .... some reason that even + # I can not solve. I think it's leaking state across connections in start_tls_s? + + # Check that it works with TLS + conn = sa.sasl_bind(uri=standalone.get_ldaps_uri(), saslmethod='PLAIN', sasltoken=auth_tokens, connOnly=True) + conn.close() + + # Check that it correct fails our bind if we don't have the password. + auth_tokens = PlainSASL("dn:%s" % sa.dn, 'password-wrong') + with pytest.raises(ldap.INVALID_CREDENTIALS): + conn = sa.sasl_bind(uri=standalone.get_ldaps_uri(), saslmethod='PLAIN', sasltoken=auth_tokens, connOnly=True) + + # Done! diff --git a/dirsrvtests/tests/suites/sasl/regression_test.py b/dirsrvtests/tests/suites/sasl/regression_test.py new file mode 100644 index 0000000..c2d5d6f --- /dev/null +++ b/dirsrvtests/tests/suites/sasl/regression_test.py @@ -0,0 +1,181 @@ +# --- BEGIN COPYRIGHT BLOCK --- +# Copyright (C) 2020 Red Hat, Inc. +# All rights reserved. +# +# License: GPL (version 3 or any later version). +# See LICENSE for details. +# --- END COPYRIGHT BLOCK --- +# + +import os +import pytest +from lib389.tasks import * +from lib389.utils import * +from lib389.topologies import topology_m2 +from lib389._constants import * +from lib389.replica import ReplicationManager + +pytestmark = [pytest.mark.tier1, + pytest.mark.skipif(ds_is_older('1.3.5'), reason="Not implemented")] + +logging.getLogger(__name__).setLevel(logging.DEBUG) +log = logging.getLogger(__name__) + +ISSUER = 'cn=CAcert' +CACERT = 'CAcertificate' +M1SERVERCERT = 'Server-Cert1' +M2SERVERCERT = 'Server-Cert2' +M1LDAPSPORT = '41636' +M2LDAPSPORT = '42636' +M1SUBJECT = 'CN=' + os.uname()[1] + ',OU=389 Directory Server' +M2SUBJECT = 'CN=' + os.uname()[1] + ',OU=390 Directory Server' + + +def add_entry(server, name, rdntmpl, start, num): + log.info("\n######################### Adding %d entries to %s ######################\n" % (num, name)) + + for i in range(num): + ii = start + i + dn = '%s%d,%s' % (rdntmpl, ii, DEFAULT_SUFFIX) + server.add_s(Entry((dn, {'objectclass': 'top person extensibleObject'.split(), + 'uid': '%s%d' % (rdntmpl, ii), + 'cn': '%s user%d' % (name, ii), + 'sn': 'user%d' % (ii)}))) + + +def check_pems(confdir, mycacert, myservercert, myserverkey, notexist): + log.info("\n######################### Check PEM files (%s, %s, %s)%s in %s ######################\n" + % (mycacert, myservercert, myserverkey, notexist, confdir)) + global cacert + cacert = f"{mycacert}.pem" + if os.path.isfile(cacert): + if notexist == "": + log.info('%s is successfully generated.' % cacert) + else: + log.info('%s is incorrecly generated.' % cacert) + assert False + else: + if notexist == "": + log.fatal('%s is not generated.' % cacert) + assert False + else: + log.info('%s is correctly not generated.' % cacert) + servercert = f"{myservercert}.pem" + if os.path.isfile(servercert): + if notexist == "": + log.info('%s is successfully generated.' % servercert) + else: + log.info('%s is incorrecly generated.' % servercert) + assert False + else: + if notexist == "": + log.fatal('%s was not generated.' % servercert) + assert False + else: + log.info('%s is correctly not generated.' % servercert) + serverkey = f"{myserverkey}.pem" + if os.path.isfile(serverkey): + if notexist == "": + log.info('%s is successfully generated.' % serverkey) + else: + log.info('%s is incorrectly generated.' % serverkey) + assert False + else: + if notexist == "": + log.fatal('%s was not generated.' % serverkey) + assert False + else: + log.info('%s is correctly not generated.' % serverkey) + + +def relocate_pem_files(topology_m2): + log.info("######################### Relocate PEM files on supplier1 ######################") + certdir_prefix = "/dev/shm" + mycacert = os.path.join(certdir_prefix, "MyCA") + topology_m2.ms["supplier1"].encryption.set('CACertExtractFile', mycacert) + myservercert = os.path.join(certdir_prefix, "MyServerCert1") + myserverkey = os.path.join(certdir_prefix, "MyServerKey1") + topology_m2.ms["supplier1"].rsa.apply_mods([(ldap.MOD_REPLACE, 'ServerCertExtractFile', myservercert), + (ldap.MOD_REPLACE, 'ServerKeyExtractFile', myserverkey)]) + log.info("##### restart supplier1") + topology_m2.ms["supplier1"].restart() + check_pems(certdir_prefix, mycacert, myservercert, myserverkey, "") + +@pytest.mark.ds47536 +def test_openldap_no_nss_crypto(topology_m2): + """Check that we allow usage of OpenLDAP libraries + that don't use NSS for crypto + + :id: 0a622f3d-8ba5-4df2-a1de-1fb2237da40a + :setup: Replication with two suppliers: + supplier_1 ----- startTLS -----> supplier_2; + supplier_1 <-- TLS_clientAuth -- supplier_2; + nsslapd-extract-pemfiles set to 'on' on both suppliers + without specifying cert names + :steps: + 1. Add 5 users to supplier 1 and 2 + 2. Check that the users were successfully replicated + 3. Relocate PEM files on supplier 1 + 4. Check PEM files in supplier 1 config directory + 5. Add 5 users more to supplier 1 and 2 + 6. Check that the users were successfully replicated + 7. Export userRoot on supplier 1 + :expectedresults: + 1. Users should be successfully added + 2. Users should be successfully replicated + 3. Operation should be successful + 4. PEM files should be found + 5. Users should be successfully added + 6. Users should be successfully replicated + 7. Operation should be successful + """ + + log.info("Ticket 47536 - Allow usage of OpenLDAP libraries that don't use NSS for crypto") + + m1 = topology_m2.ms["supplier1"] + m2 = topology_m2.ms["supplier2"] + [i.enable_tls() for i in topology_m2] + repl = ReplicationManager(DEFAULT_SUFFIX) + repl.test_replication(m1, m2) + + add_entry(m1, 'supplier1', 'uid=m1user', 0, 5) + add_entry(m2, 'supplier2', 'uid=m2user', 0, 5) + repl.wait_for_replication(m1, m2) + repl.wait_for_replication(m2, m1) + + log.info('##### Searching for entries on supplier1...') + entries = m1.search_s(DEFAULT_SUFFIX, ldap.SCOPE_SUBTREE, '(uid=*)') + assert 11 == len(entries) + + log.info('##### Searching for entries on supplier2...') + entries = m2.search_s(DEFAULT_SUFFIX, ldap.SCOPE_SUBTREE, '(uid=*)') + assert 11 == len(entries) + + relocate_pem_files(topology_m2) + + add_entry(m1, 'supplier1', 'uid=m1user', 10, 5) + add_entry(m2, 'supplier2', 'uid=m2user', 10, 5) + + repl.wait_for_replication(m1, m2) + repl.wait_for_replication(m2, m1) + + log.info('##### Searching for entries on supplier1...') + entries = m1.search_s(DEFAULT_SUFFIX, ldap.SCOPE_SUBTREE, '(uid=*)') + assert 21 == len(entries) + + log.info('##### Searching for entries on supplier2...') + entries = m2.search_s(DEFAULT_SUFFIX, ldap.SCOPE_SUBTREE, '(uid=*)') + assert 21 == len(entries) + + output_file = os.path.join(m1.get_ldif_dir(), "supplier1.ldif") + m1.tasks.exportLDIF(benamebase='userRoot', output_file=output_file, args={'wait': True}) + + log.info("Ticket 47536 - PASSED") + + +if __name__ == '__main__': + # Run isolated + # -s for DEBUG mode + + CURRENT_FILE = os.path.realpath(__file__) + pytest.main("-s %s" % CURRENT_FILE) diff --git a/dirsrvtests/tests/suites/schema/__init__.py b/dirsrvtests/tests/suites/schema/__init__.py new file mode 100644 index 0000000..fe69a45 --- /dev/null +++ b/dirsrvtests/tests/suites/schema/__init__.py @@ -0,0 +1,3 @@ +""" + :Requirement: 389-ds-base: Directory Server Schema +""" diff --git a/dirsrvtests/tests/suites/schema/eduperson_test.py b/dirsrvtests/tests/suites/schema/eduperson_test.py new file mode 100644 index 0000000..1ddcc63 --- /dev/null +++ b/dirsrvtests/tests/suites/schema/eduperson_test.py @@ -0,0 +1,90 @@ +# --- BEGIN COPYRIGHT BLOCK --- +# Copyright (C) 2017 Red Hat, Inc. +# All rights reserved. +# +# License: GPL (version 3 or any later version). +# See LICENSE for details. +# --- END COPYRIGHT BLOCK --- +# + + +import os +import logging +import pytest +import ldap + +from lib389.idm.user import UserAccounts +from lib389.topologies import topology_st as topology +from lib389._constants import DEFAULT_SUFFIX + +pytestmark = pytest.mark.tier1 + +DEBUGGING = os.getenv('DEBUGGING', False) + +if DEBUGGING is not False: + DEBUGGING = True + +if DEBUGGING: + logging.getLogger(__name__).setLevel(logging.DEBUG) +else: + logging.getLogger(__name__).setLevel(logging.INFO) + +log = logging.getLogger(__name__) + + +def test_account_locking(topology): + """Test the eduperson schema works + + :id: f2f15449-a822-4ec6-b4ea-bd6db6240a6c + + :setup: Standalone instance + + :steps: + 1. Add a common user + 2. Extend the user with eduPerson objectClass + 3. Add attributes in eduPerson + + :expectedresults: + 1. User should be added with its properties + 2. User should be extended with eduPerson as the objectClass + 3. eduPerson should be added + """ + + if DEBUGGING: + # Add debugging steps(if any)... + pass + + users = UserAccounts(topology.standalone, DEFAULT_SUFFIX) + + user_properties = { + 'uid': 'testuser', + 'cn' : 'testuser', + 'sn' : 'user', + 'uidNumber' : '1000', + 'gidNumber' : '2000', + 'homeDirectory' : '/home/testuser', + } + testuser = users.create(properties=user_properties) + + # Extend the user with eduPerson + testuser.add('objectClass', 'eduPerson') + + # now add eduPerson attrs + testuser.add('eduPersonAffiliation', 'value') # From 2002 + testuser.add('eduPersonNickName', 'value') # From 2002 + testuser.add('eduPersonOrgDN', 'ou=People,%s' % DEFAULT_SUFFIX) # From 2002 + testuser.add('eduPersonOrgUnitDN', 'ou=People,%s' % DEFAULT_SUFFIX) # From 2002 + testuser.add('eduPersonPrimaryAffiliation', 'value') # From 2002 + testuser.add('eduPersonPrincipalName', 'value') # From 2002 + testuser.add('eduPersonEntitlement', 'value') # From 2002 + testuser.add('eduPersonPrimaryOrgUnitDN', 'ou=People,%s' % DEFAULT_SUFFIX) # From 2002 + testuser.add('eduPersonScopedAffiliation', 'value') # From 2003 + testuser.add('eduPersonTargetedID', 'value') # From 2003 + testuser.add('eduPersonAssurance', 'value') # From 2008 + testuser.add('eduPersonPrincipalNamePrior', 'value') # From 2012 + testuser.add('eduPersonUniqueId', 'value') # From 2013 + testuser.add('eduPersonOrcid', 'value') # From 2016 + + log.info('Test PASSED') + + diff --git a/dirsrvtests/tests/suites/schema/schema_reload_test.py b/dirsrvtests/tests/suites/schema/schema_reload_test.py new file mode 100644 index 0000000..232a497 --- /dev/null +++ b/dirsrvtests/tests/suites/schema/schema_reload_test.py @@ -0,0 +1,293 @@ +# --- BEGIN COPYRIGHT BLOCK --- +# Copyright (C) 2017 Red Hat, Inc. +# All rights reserved. +# +# License: GPL (version 3 or any later version). +# See LICENSE for details. +# --- END COPYRIGHT BLOCK --- + +import logging +import pytest +import time, ldap, re, os +from lib389.schema import Schema +from lib389.utils import ensure_bytes +from lib389.topologies import topology_st as topo +from lib389._constants import DEFAULT_SUFFIX, DN_DM, PW_DM +from lib389._mapped_object import DSLdapObjects +from lib389.idm.user import UserAccounts + +pytestmark = pytest.mark.tier1 + +DEBUGGING = os.getenv("DEBUGGING", default=False) +if DEBUGGING: + logging.getLogger(__name__).setLevel(logging.DEBUG) +else: + logging.getLogger(__name__).setLevel(logging.INFO) +log = logging.getLogger(__name__) + +INVALID_SCHEMA = 'givenName $ cn $ MoZiLLaATTRiBuTe' + + +def test_schema_reload_with_searches(topo): + """Test that during the schema reload task there is a small window where the new schema is not loaded + into the asi hashtables - this results in searches not returning entries. + + :id: 375f1fdc-a9ef-45de-984d-0b79a40ff219 + :setup: Standalone instance + :steps: + 1. Create a test user + 2. Run a schema_reload task while searching for our user + 3. While we wait for the task to complete search for our user + 4. Check the user is still being returned and if task is complete + :expectedresults: + 1. Operation should be successful + 2. Operation should be successful + 3. Operation should be successful + 4. Operation should be successful + """ + + log.info('Test the searches still work as expected during schema reload tasks') + + # Add a user + users = UserAccounts(topo.standalone, DEFAULT_SUFFIX) + user = users.create_test_user(uid=1) + + # Run a schema_reload tasks while searching for our user.Since + # this is a race condition, run it several times. + schema = Schema(topo.standalone) + task = schema.reload(schema_dir=topo.standalone.schemadir) + + # While we wait for the task to complete search for our user + search_count = 0 + while search_count < 10: + # Now check the user is still being returned + # Check if task is complete + assert user.exists() + if task.get_exit_code() == 0: + break + time.sleep(1) + search_count += 1 + + +def test_schema_operation(topo): + """Test that the cases in original schema are preserved. + Test that duplicated schema except cases are not loaded + Test to use a custom schema + + :id: e7448863-ac62-4b49-b013-4efa412c0455 + :setup: Standalone instance + :steps: + 1. Create a test schema with cases + 2. Run a schema_reload task + 3. Check the attribute is present + 4. Case 2: Check duplicated schema except cases are not loaded + 5. Case 2-1: Use the custom schema + + :expectedresults: + 1. Operation should be successful + 2. Operation should be successful + 3. Operation should be successful + 4. Operation should be successful + 5. Operation should be successful + """ + + log.info('case 1: Test the cases in the original schema are preserved.') + + schema_filename = topo.standalone.schemadir + '/98test.ldif' + try: + with open(schema_filename, "w") as schema_file: + schema_file.write("dn: cn=schema\n") + schema_file.write("attributetypes: ( 8.9.10.11.12.13.14 NAME " + + "'MoZiLLaaTTRiBuTe' SYNTAX 1.3.6.1.4.1.1466.115.121.1.15 " + + " X-ORIGIN 'Mozilla Dummy Schema' )\n") + schema_file.write("objectclasses: ( 1.2.3.4.5.6.7 NAME 'MozillaObject' " + + "SUP top MUST ( objectclass $ cn ) MAY ( MoZiLLaaTTRiBuTe )" + + " X-ORIGIN 'user defined' )')\n") + os.chmod(schema_filename, 0o777) + except OSError as e: + log.fatal("Failed to create schema file: " + + "{} Error: {}".format(schema_filename, str(e))) + + + # run the schema reload task with the default schemadir + schema = Schema(topo.standalone) + task = schema.reload(schema_dir=topo.standalone.schemadir) + task.wait() + + subschema = topo.standalone.schema.get_subschema() + at_obj = subschema.get_obj(ldap.schema.AttributeType, 'MoZiLLaaTTRiBuTe') + + assert at_obj is not None, "The attribute was not found on server" + + log.info('Case 2: Duplicated schema except cases are not loaded.') + + schema_filename = topo.standalone.schemadir + '/97test.ldif' + try: + with open(schema_filename, "w") as schema_file: + Mozattr1 = "MOZILLAATTRIBUTE" + schema_file.write("dn: cn=schema\n") + schema_file.write("attributetypes: ( 8.9.10.11.12.13.14 NAME " + + "'MOZILLAATTRIBUTE' SYNTAX 1.3.6.1.4.1.1466.115.121.1.15 " + + "X-ORIGIN 'Mozilla Dummy Schema' )\n") + schema_file.write("objectclasses: ( 1.2.3.4.5.6.7 NAME 'MozillaObject' "+ + "SUP top MUST ( objectclass $ cn ) MAY ( MOZILLAATTRIBUTE ) "+ + "X-ORIGIN 'user defined' )')\n") + os.chmod(schema_filename, 0o777) + except OSError as e: + log.fatal("Failed to create schema file: " + + "{} Error: {}".format(schema_filename, str(e))) + + # run the schema reload task with the default schemadir + task = schema.reload(schema_dir=topo.standalone.schemadir) + task.wait() + + subschema_duplicate = topo.standalone.schema.get_subschema() + at_obj_duplicate = subschema_duplicate.get_obj(ldap.schema.AttributeType, 'MOZILLAATTRIBUTE') + + moz = re.findall('MOZILLAATTRIBUTE',str(at_obj_duplicate)) + if moz: + log.error('case 2: MOZILLAATTRIBUTE is in the objectclasses list -- FAILURE') + assert False + else: + log.info('case 2: MOZILLAATTRIBUTE is not in the objectclasses list -- PASS') + + Mozattr2 = "mozillaattribute" + log.info(f'Case 2-1: Use the custom schema with {Mozattr2}') + name = "test_user" + ld = ldap.initialize(topo.standalone.get_ldap_uri()) + ld.simple_bind_s(DN_DM,PW_DM) + ld.add_s(f"cn={name},{DEFAULT_SUFFIX}",[('objectclass', [b'top', b'person', b'MozillaObject']), + ('sn', [ensure_bytes(name)]), + ('cn', [ensure_bytes(name)]), + (Mozattr2, [ensure_bytes(name)]) + ]) + + mozattrval = DSLdapObjects(topo.standalone,DEFAULT_SUFFIX).filter('(objectclass=mozillaobject)')[0] + assert mozattrval.get_attr_val_utf8('mozillaattribute') == name + + +def test_valid_schema(topo): + """Test schema-reload task with valid schema + + :id: 2ab304c0-3e58-4d34-b23b-a14b5997c7a8 + :setup: Standalone instance + :steps: + 1. Create schema file with valid schema + 2. Run schema-reload.pl script + 3. Run ldapsearch and check if schema was added + :expectedresults: + 1. File creation should work + 2. The schema reload task should be successful + 3. Searching the server should return the new schema + """ + + log.info("Test schema-reload task with valid schema") + + # Step 1 - Create schema file + log.info("Create valid schema file (99user.ldif)...") + schema_filename = (topo.standalone.schemadir + "/99user.ldif") + try: + with open(schema_filename, 'w') as schema_file: + schema_file.write("dn: cn=schema\n") + schema_file.write("attributetypes: ( 8.9.10.11.12.13.13 NAME " + + "'ValidAttribute' SYNTAX 1.3.6.1.4.1.1466.115.121.1.15" + + " X-ORIGIN 'Mozilla Dummy Schema' )\n") + schema_file.write("objectclasses: ( 1.2.3.4.5.6.7.8 NAME 'TestObject' " + + "SUP top MUST ( objectclass $ cn ) MAY ( givenName $ " + + "sn $ ValidAttribute ) X-ORIGIN 'user defined' )')\n") + os.chmod(schema_filename, 0o777) + except OSError as e: + log.fatal("Failed to create schema file: " + + "{} Error: {}".format(schema_filename, str(e))) + + # Step 2 - Run the schema-reload task + log.info("Run the schema-reload task...") + schema = Schema(topo.standalone) + task = schema.reload(schema_dir=topo.standalone.schemadir) + task.wait() + assert task.get_exit_code() == 0, "The schema reload task failed" + + # Step 3 - Verify valid schema was added to the server + log.info("Check cn=schema to verify the valid schema was added") + subschema = topo.standalone.schema.get_subschema() + + oc_obj = subschema.get_obj(ldap.schema.ObjectClass, 'TestObject') + assert oc_obj is not None, "The new objectclass was not found on server" + + at_obj = subschema.get_obj(ldap.schema.AttributeType, 'ValidAttribute') + assert at_obj is not None, "The new attribute was not found on server" + + +def test_invalid_schema(topo): + """Test schema-reload task with invalid schema + + :id: 2ab304c0-3e58-4d34-b23b-a14b5997c7a9 + :setup: Standalone instance + :steps: + 1. Create schema files with invalid schema + 2. Run schema-reload.pl script + 3. Run ldapsearch and check if schema was added + :expectedresults: + 1. File creation should work + 2. The schema reload task should return an error + 3. Searching the server should not return the invalid schema + """ + log.info("Test schema-reload task with invalid schema") + + # Step 1 - Create schema files: one valid, one invalid + log.info("Create valid schema file (98user.ldif)...") + schema_filename = (topo.standalone.schemadir + "/98user.ldif") + try: + with open(schema_filename, 'w') as schema_file: + schema_file.write("dn: cn=schema\n") + schema_file.write("attributetypes: ( 8.9.10.11.12.13.14 NAME " + + "'MozillaAttribute' SYNTAX 1.3.6.1.4.1.1466.115.121.1.15" + + " X-ORIGIN 'Mozilla Dummy Schema' )\n") + schema_file.write("objectclasses: ( 1.2.3.4.5.6.7 NAME 'MoZiLLaOBJeCT' " + + "SUP top MUST ( objectclass $ cn ) MAY ( givenName $ " + + "sn $ MozillaAttribute ) X-ORIGIN 'user defined' )')\n") + os.chmod(schema_filename, 0o777) + except OSError as e: + log.fatal("Failed to create schema file: " + + "{} Error: {}".format(schema_filename, str(e))) + + log.info("Create invalid schema file (99user.ldif)...") + schema_filename = (topo.standalone.schemadir + "/99user.ldif") + try: + with open(schema_filename, 'w') as schema_file: + schema_file.write("dn: cn=schema\n") + # Same attribute/objclass names, but different OIDs and MAY attributes + schema_file.write("attributetypes: ( 8.9.10.11.12.13.140 NAME " + + "'MozillaAttribute' SYNTAX 1.3.6.1.4.1.1466.115.121.1.15" + + " X-ORIGIN 'Mozilla Dummy Schema' )\n") + schema_file.write("objectclasses: ( 1.2.3.4.5.6.70 NAME 'MoZiLLaOBJeCT' " + + "SUP top MUST ( objectclass $ cn ) MAY ( givenName $ " + + "cn $ MoZiLLaATTRiBuTe ) X-ORIGIN 'user defined' )')\n") + os.chmod(schema_filename, 0o777) + except OSError as e: + log.fatal("Failed to create schema file: " + + "{} Error: {}".format(schema_filename, str(e))) + + # Step 2 - Run the schema-reload task + log.info("Run the schema-reload task, it should fail...") + schema = Schema(topo.standalone) + task = schema.reload(schema_dir=topo.standalone.schemadir) + task.wait() + assert task.get_exit_code() != 0, f"The schema reload task incorectly reported success{task.get_exit_code()}" + + # Step 3 - Verify invalid schema was not added to the server + log.info("Check cn=schema to verify the invalid schema was not added") + subschema = topo.standalone.schema.get_subschema() + oc_obj = subschema.get_obj(ldap.schema.ObjectClass, 'MoZiLLaOBJeCT') + if oc_obj is not None and INVALID_SCHEMA in str(oc_obj): + log.fatal("The invalid schema was returned from the server: " + str(oc_obj)) + assert False + else: + log.info("The invalid schema is not present on the server") + + +if __name__ == '__main__': + # Run isolated + # -s for DEBUG mode + CURRENT_FILE = os.path.realpath(__file__) + pytest.main("-s %s" % CURRENT_FILE) diff --git a/dirsrvtests/tests/suites/schema/schema_replication_origin_test.py b/dirsrvtests/tests/suites/schema/schema_replication_origin_test.py new file mode 100644 index 0000000..9e4ce49 --- /dev/null +++ b/dirsrvtests/tests/suites/schema/schema_replication_origin_test.py @@ -0,0 +1,235 @@ +# --- BEGIN COPYRIGHT BLOCK --- +# Copyright (C) 2023 Red Hat, Inc. +# All rights reserved. +# +# License: GPL (version 3 or any later version). +# See LICENSE for details. +# --- END COPYRIGHT BLOCK --- +# +import json +import logging +import re +import time +import ldap +import pytest +from lib389._constants import SUFFIX, ReplicaRole, DEFAULT_SUFFIX +from lib389.topologies import create_topology +from lib389.replica import Agreements, ReplicationManager +from lib389.schema import Schema +from lib389.idm.user import UserAccounts +from lib389.cli_base import LogCapture +from lib389.utils import * + +logging.getLogger(__name__).setLevel(logging.DEBUG) +log = logging.getLogger(__name__) + +def pattern_errorlog(file, log_pattern): + """Check for a pattern in the error log file.""" + + try: + pattern_errorlog.last_pos += 1 + except AttributeError: + pattern_errorlog.last_pos = 0 + + found = None + log.debug("_pattern_errorlog: start at offset %d" % pattern_errorlog.last_pos) + file.seek(pattern_errorlog.last_pos) + + # Use a while true iteration because 'for line in file: hit a + # python bug that break file.tell() + while True: + line = file.readline() + log.debug("_pattern_errorlog: [%d] %s" % (file.tell(), line)) + found = log_pattern.search(line) + if ((line == '') or (found)): + break + + log.debug("_pattern_errorlog: end at offset %d" % file.tell()) + pattern_errorlog.last_pos = file.tell() + return found + + +def trigger_update(topology, user_rdn, num): + """It triggers an update on the supplier. This will start a replication + session and a schema push + """ + + users_s = UserAccounts(topology.ms["supplier1"], DEFAULT_SUFFIX) + user = users_s.get(user_rdn) + user.replace('telephonenumber', str(num)) + + # wait until the update is replicated (until up to x seconds) + users_c = UserAccounts(topology.cs["consumer1"], DEFAULT_SUFFIX) + for _ in range(30): + try: + user = users_c.get(user_rdn) + val = user.get_attr_val_int('telephonenumber') + if val == num: + return + # the expected value is not yet replicated. try again + time.sleep(1) + log.debug(f"trigger_update: receive {val} (expected {num})") + except ldap.NO_SUCH_OBJECT: + time.sleep(1) + + +def trigger_schema_push(topology, user_rdn, num): + """Triggers a schema push from the supplier to the consumer or hub.""" + + supplier = topology['topology'].ms["supplier1"] + if topology['type'] == "m1h1c1": + consumer = topology['topology'].hs["hub1"] + else: + consumer = topology['topology'].cs["consumer1"] + + agreements = supplier.agreement.list(suffix=SUFFIX, + consumer_host=consumer.host, + consumer_port=consumer.port) + assert (len(agreements) == 1) + ra = agreements[0] + trigger_update(topology['topology'], user_rdn, num) + supplier.agreement.pause(ra.dn) + supplier.agreement.resume(ra.dn) + trigger_update(topology['topology'], user_rdn, num) + + +def add_attributetype(inst, num, at_name, x_origin): + """Adds a new attribute type to the schema.""" + + schema = Schema(inst) + # Add new attribute + parameters = { + 'names': [at_name], + 'oid': str(9000 + num), + 'desc': 'Test extra parenthesis in X-ORIGIN', + # 'x_origin': [x_origin], + 'x_origin': None, + 'syntax': '1.3.6.1.4.1.1466.115.121.1.15', + 'syntax_len': None, + 'x_ordered': None, + 'collective': None, + 'obsolete': None, + 'single_value': None, + 'no_user_mod': None, + 'equality': None, + 'substr': None, + 'ordering': None, + 'usage': None, + 'sup': None + } + schema.add_attributetype(parameters) + + +@pytest.fixture(scope="function", params=["m1c1", "m1h1c1"]) +def topology(request): + """Create Replication Deployment based on the params""" + + if request.param == "m1c1": + topo_roles = {ReplicaRole.SUPPLIER: 1, ReplicaRole.CONSUMER: 1} + elif request.param == "m1h1c1": + topo_roles = {ReplicaRole.SUPPLIER: 1, ReplicaRole.HUB: 1, ReplicaRole.CONSUMER: 1} + + topology = create_topology(topo_roles, request=request) + + topology.logcap = LogCapture() + return { + 'topology': topology, + 'type': request.param + } + + +@pytest.fixture(scope="function") +def schema_replication_init(topology): + """Initialize the test environment """ + + supplier = topology['topology'].ms["supplier1"] + supplier.errorlog_file = open(supplier.errlog, "r") + users = UserAccounts(supplier, DEFAULT_SUFFIX) + user = users.create_test_user() + user.replace('telephonenumber', '0') + + return user + + +@pytest.mark.parametrize("xorigin", ['user defined', 'custom xorigin']) +def test_schema_xorigin_repl(topology, schema_replication_init, xorigin): + """Check consumer schema is a superset (one extra OC) of supplier schema, then + schema is pushed and there is a message in the error log + + :id: 2b29823b-3e83-4b25-954a-8a081dbc15ee + :setup: Supplier and consumer topology, with one user entry; + Supplier, hub and consumer topology, with one user entry + :steps: + 1. Push the schema from the supplier to the consumer (an error should not be generated) + 2. Update the schema of the consumer, so it will be a superset of the supplier's schema + 3. Update the schema of the supplier to make its nsSchemaCSN larger than the consumer's + 4. Push the schema from the supplier to the consumer (an error should be generated) + 5. Check if the supplier learns the missing definition + 6. Check the error logs for any issues + 7. Check the startup and final state of the schema replication process + :expectedresults: + 1. The supplier's schema update should be successful + 2. The consumer's schema update should be successful + 3. The supplier's schema update should be successful + 4. The schema push operation should be successful + 5. The supplier should successfully learn the missing definition + 6. There should be no error messages in the logs + 7. The startup and final state of the schema replication process should be as expected + """ + + repl = ReplicationManager(DEFAULT_SUFFIX) + user = schema_replication_init + hub = None + supplier = topology['topology'].ms["supplier1"] + consumer = topology['topology'].cs["consumer1"] + if topology['type'] == "m1h1c1": + hub = topology['topology'].hs["hub1"] + + add_attributetype(supplier, 1, 'testAttribute', xorigin) + + # Search for attribute with JSON option + schema = Schema(supplier) + attr_result = schema.query_attributetype('testAttribute', json=True) + # Verify the x-origin value is correct + assert attr_result['at']['x_origin'][0] == "user defined" + + trigger_schema_push(topology, user.rdn, 1) + repl.wait_for_replication(supplier, consumer) + supplier_schema_csn = supplier.schema.get_schema_csn() + consumer_schema_csn = consumer.schema.get_schema_csn() + assert supplier_schema_csn == consumer_schema_csn + + # Check the error log of the supplier does not contain an error + regex = re.compile(r"must not be overwritten \(set replication log for additional info\)") + res = pattern_errorlog(supplier.errorlog_file, regex) + if res is not None: + assert False + + # add a new OC on the supplier so that its nsSchemaCSN is larger than the consumer (wait 2s) + add_attributetype(consumer, 2, 'testAttributeCA', xorigin) + time.sleep(2) + add_attributetype(supplier, 3, 'testAttributeSA', xorigin) + + # now push the scheam + trigger_schema_push(topology, user.rdn, 2) + repl.wait_for_replication(supplier, consumer) + supplier_schema_csn = supplier.schema.get_schema_csn() + consumer_schema_csn = consumer.schema.get_schema_csn() + assert supplier_schema_csn == consumer_schema_csn + + # Check the error log of the supplier does not contain an error + # This message may happen during the learning phase + regex = re.compile(r"must not be overwritten \(set replication log for additional info\)") + pattern_errorlog(supplier.errorlog_file, regex) + + # Check that standard schema was not rewritten to be "user defined' on the consumer + cn_attrs = json.loads(consumer.schema.query_attributetype("cn", json=True)) + cn_attr = cn_attrs['at'] + assert cn_attr['x_origin'][0].lower() != "user defined" + if len(cn_attr['x_origin']) > 1: + assert cn_attr['x_origin'][1].lower() != "user defined" + + # Check that the new OC "supplierNewOCB" was written to be "user defined' on the consumer + ocs = json.loads(consumer.schema.query_attributetype("testAttributeSA", json=True)) + new_oc = ocs['at'] + assert new_oc['x_origin'][0].lower() == "user defined" diff --git a/dirsrvtests/tests/suites/schema/schema_replication_test.py b/dirsrvtests/tests/suites/schema/schema_replication_test.py new file mode 100644 index 0000000..226957c --- /dev/null +++ b/dirsrvtests/tests/suites/schema/schema_replication_test.py @@ -0,0 +1,712 @@ +# --- BEGIN COPYRIGHT BLOCK --- +# Copyright (C) 2023 Red Hat, Inc. +# All rights reserved. +# +# License: GPL (version 3 or any later version). +# See LICENSE for details. +# --- END COPYRIGHT BLOCK --- +# +""" +Created on Nov 7, 2013 + +@author: tbordaz +""" +import json +import logging +import re +import time +import ldap +import pytest +from lib389 import Entry +from lib389._constants import DN_CONFIG, SUFFIX +from lib389.topologies import topology_m1c1 + +from lib389.utils import * + +# Skip on older versions +pytestmark = [pytest.mark.tier1, + pytest.mark.skipif(ds_is_older('1.3'), reason="Not implemented")] +logging.getLogger(__name__).setLevel(logging.DEBUG) +log = logging.getLogger(__name__) + +TEST_REPL_DN = "cn=test_repl, %s" % SUFFIX +ENTRY_DN = "cn=test_entry, %s" % SUFFIX +MUST_OLD = "(postalAddress $ preferredLocale)" +MUST_NEW = "(postalAddress $ preferredLocale $ telexNumber)" +MAY_OLD = "(postalCode $ street)" +MAY_NEW = "(postalCode $ street $ postOfficeBox)" + + +def _header(topology_m1c1, label): + topology_m1c1.ms["supplier1"].log.info("\n\n###############################################") + topology_m1c1.ms["supplier1"].log.info("#######") + topology_m1c1.ms["supplier1"].log.info("####### %s" % label) + topology_m1c1.ms["supplier1"].log.info("#######") + topology_m1c1.ms["supplier1"].log.info("###################################################") + + +def pattern_errorlog(file, log_pattern): + try: + pattern_errorlog.last_pos += 1 + except AttributeError: + pattern_errorlog.last_pos = 0 + + found = None + log.debug("_pattern_errorlog: start at offset %d" % pattern_errorlog.last_pos) + file.seek(pattern_errorlog.last_pos) + + # Use a while true iteration because 'for line in file: hit a + # python bug that break file.tell() + while True: + line = file.readline() + log.debug("_pattern_errorlog: [%d] %s" % (file.tell(), line)) + found = log_pattern.search(line) + if ((line == '') or (found)): + break + + log.debug("_pattern_errorlog: end at offset %d" % file.tell()) + pattern_errorlog.last_pos = file.tell() + return found + + +def _oc_definition(oid_ext, name, must=None, may=None): + oid = "1.2.3.4.5.6.7.8.9.10.%d" % oid_ext + desc = 'To test ticket 47490' + sup = 'person' + if not must: + must = MUST_OLD + if not may: + may = MAY_OLD + + new_oc = "( %s NAME '%s' DESC '%s' SUP %s AUXILIARY MUST %s MAY %s )" % (oid, name, desc, sup, must, may) + return new_oc + + +def add_OC(instance, oid_ext, name): + new_oc = _oc_definition(oid_ext, name) + instance.schema.add_schema('objectClasses', ensure_bytes(new_oc)) + + +def mod_OC(instance, oid_ext, name, old_must=None, old_may=None, new_must=None, new_may=None): + old_oc = _oc_definition(oid_ext, name, old_must, old_may) + new_oc = _oc_definition(oid_ext, name, new_must, new_may) + instance.schema.del_schema('objectClasses', ensure_bytes(old_oc)) + instance.schema.add_schema('objectClasses', ensure_bytes(new_oc)) + + +def support_schema_learning(topology_m1c1): + """ + with https://fedorahosted.org/389/ticket/47721, the supplier and consumer can learn + schema definitions when a replication occurs. + Before that ticket: replication of the schema fails requiring administrative operation + In the test the schemaCSN (supplier consumer) differs + + After that ticket: replication of the schema succeeds (after an initial phase of learning) + In the test the schema CSN (supplier consumer) are in sync + + This function returns True if 47721 is fixed in the current release + False else + """ + ent = topology_m1c1.cs["consumer1"].getEntry(DN_CONFIG, ldap.SCOPE_BASE, "(cn=config)", ['nsslapd-versionstring']) + if ent.hasAttr('nsslapd-versionstring'): + val = ent.getValue('nsslapd-versionstring') + version = ensure_str(val).split('/')[1].split('.') # something like ['1', '3', '1', '23', 'final_fix'] + major = int(version[0]) + minor = int(version[1]) + if major > 1: + return True + if minor > 3: + # version is 1.4 or after + return True + if minor == 3: + if version[2].isdigit(): + if int(version[2]) >= 3: + return True + return False + + +def trigger_update(topology_m1c1): + """ + It triggers an update on the supplier. This will start a replication + session and a schema push + """ + try: + trigger_update.value += 1 + except AttributeError: + trigger_update.value = 1 + replace = [(ldap.MOD_REPLACE, 'telephonenumber', ensure_bytes(str(trigger_update.value)))] + topology_m1c1.ms["supplier1"].modify_s(ENTRY_DN, replace) + + # wait 10 seconds that the update is replicated + loop = 0 + while loop <= 10: + try: + ent = topology_m1c1.cs["consumer1"].getEntry(ENTRY_DN, ldap.SCOPE_BASE, "(objectclass=*)", + ['telephonenumber']) + val = ent.telephonenumber or "0" + if int(val) == trigger_update.value: + return + # the expected value is not yet replicated. try again + time.sleep(1) + loop += 1 + log.debug("trigger_update: receive %s (expected %d)" % (val, trigger_update.value)) + except ldap.NO_SUCH_OBJECT: + time.sleep(1) + loop += 1 + + +def trigger_schema_push(topology_m1c1): + ''' + Trigger update to create a replication session. + In case of 47721 is fixed and the replica needs to learn the missing definition, then + the first replication session learn the definition and the second replication session + push the schema (and the schemaCSN. + This is why there is two updates and replica agreement is stopped/start (to create a second session) + ''' + agreements = topology_m1c1.ms["supplier1"].agreement.list(suffix=SUFFIX, + consumer_host=topology_m1c1.cs["consumer1"].host, + consumer_port=topology_m1c1.cs["consumer1"].port) + assert (len(agreements) == 1) + ra = agreements[0] + trigger_update(topology_m1c1) + topology_m1c1.ms["supplier1"].agreement.pause(ra.dn) + topology_m1c1.ms["supplier1"].agreement.resume(ra.dn) + trigger_update(topology_m1c1) + + +@pytest.fixture(scope="module") +def schema_replication_init(topology_m1c1): + """Initialize the test environment + + """ + log.debug("test_schema_replication_init topology_m1c1 %r (supplier %r, consumer %r" % ( + topology_m1c1, topology_m1c1.ms["supplier1"], topology_m1c1.cs["consumer1"])) + # check if a warning message is logged in the + # error log of the supplier + topology_m1c1.ms["supplier1"].errorlog_file = open(topology_m1c1.ms["supplier1"].errlog, "r") + + # This entry will be used to trigger attempt of schema push + topology_m1c1.ms["supplier1"].add_s(Entry((ENTRY_DN, { + 'objectclass': "top person".split(), + 'sn': 'test_entry', + 'cn': 'test_entry'}))) + + +@pytest.mark.ds47490 +def test_schema_replication_one(topology_m1c1, schema_replication_init): + """Check supplier schema is a superset (one extra OC) of consumer schema, then + schema is pushed and there is no message in the error log + + :id: d6c6ff30-b3ae-4001-80ff-0fb18563a393 + :setup: Supplier Consumer, check if a warning message is logged in the + error log of the supplier and add a test entry to trigger attempt of schema push. + :steps: + 1. Update the schema of supplier, so it will be superset of consumer + 2. Push the Schema (no error) + 3. Check both supplier and consumer has same schemaCSN + 4. Check the startup/final state + :expectedresults: + 1. Operation should be successful + 2. Operation should be successful + 3. Operation should be successful + 4. State at startup: + - supplier default schema + - consumer default schema + Final state + - supplier +supplierNewOCA + - consumer +supplierNewOCA + """ + + _header(topology_m1c1, "Extra OC Schema is pushed - no error") + + log.debug("test_schema_replication_one topology_m1c1 %r (supplier %r, consumer %r" % ( + topology_m1c1, topology_m1c1.ms["supplier1"], topology_m1c1.cs["consumer1"])) + # update the schema of the supplier so that it is a superset of + # consumer. Schema should be pushed + add_OC(topology_m1c1.ms["supplier1"], 2, 'supplierNewOCA') + + trigger_schema_push(topology_m1c1) + supplier_schema_csn = topology_m1c1.ms["supplier1"].schema.get_schema_csn() + consumer_schema_csn = topology_m1c1.cs["consumer1"].schema.get_schema_csn() + + # Check the schemaCSN was updated on the consumer + log.debug("test_schema_replication_one supplier_schema_csn=%s", supplier_schema_csn) + log.debug("ctest_schema_replication_one onsumer_schema_csn=%s", consumer_schema_csn) + assert supplier_schema_csn == consumer_schema_csn + + # Check the error log of the supplier does not contain an error + regex = re.compile(r"must not be overwritten \(set replication log for additional info\)") + res = pattern_errorlog(topology_m1c1.ms["supplier1"].errorlog_file, regex) + if res is not None: + assert False + + +@pytest.mark.ds47490 +def test_schema_replication_two(topology_m1c1, schema_replication_init): + """Check consumer schema is a superset (one extra OC) of supplier schema, then + schema is pushed and there is a message in the error log + + :id: b5db9b75-a9a7-458e-86ec-2a8e7bd1c014 + :setup: Supplier Consumer, check if a warning message is logged in the + error log of the supplier and add a test entry to trigger attempt of schema push. + :steps: + 1. Update the schema of consumer, so it will be superset of supplier + 2. Update the schema of supplier so ti make it's nsSchemaCSN larger than consumer + 3. Push the Schema (error should be generated) + 4. Check supplier learns the missing definition + 5. Check the error logs + 6. Check the startup/final state + :expectedresults: + 1. Operation should be successful + 2. Operation should be successful + 3. Operation should be successful + 4. Operation should be successful + 5. Operation should be successful + 6. State at startup + - supplier +supplierNewOCA + - consumer +supplierNewOCA + Final state + - supplier +supplierNewOCA +supplierNewOCB + - consumer +supplierNewOCA +consumerNewOCA + """ + + _header(topology_m1c1, "Extra OC Schema is pushed - (ticket 47721 allows to learn missing def)") + + # add this OC on consumer. Supplier will no push the schema + add_OC(topology_m1c1.cs["consumer1"], 1, 'consumerNewOCA') + + # add a new OC on the supplier so that its nsSchemaCSN is larger than the consumer (wait 2s) + time.sleep(2) + add_OC(topology_m1c1.ms["supplier1"], 3, 'supplierNewOCB') + + # now push the scheam + trigger_schema_push(topology_m1c1) + supplier_schema_csn = topology_m1c1.ms["supplier1"].schema.get_schema_csn() + consumer_schema_csn = topology_m1c1.cs["consumer1"].schema.get_schema_csn() + + # Check the schemaCSN was NOT updated on the consumer + # with 47721, supplier learns the missing definition + log.debug("test_schema_replication_two supplier_schema_csn=%s", supplier_schema_csn) + log.debug("test_schema_replication_two consumer_schema_csn=%s", consumer_schema_csn) + if support_schema_learning(topology_m1c1): + assert supplier_schema_csn == consumer_schema_csn + else: + assert supplier_schema_csn != consumer_schema_csn + + # Check the error log of the supplier does contain an error + # This message may happen during the learning phase + regex = re.compile(r"must not be overwritten \(set replication log for additional info\)") + pattern_errorlog(topology_m1c1.ms["supplier1"].errorlog_file, regex) + + # Check that standard schema was not rewritten to be "user defined' on the consumer + cn_attrs = json.loads(topology_m1c1.cs["consumer1"].schema.query_attributetype("cn", json=True)) + cn_attr = cn_attrs['at'] + assert cn_attr['x_origin'][0].lower() != "user defined" + if len(cn_attr['x_origin']) > 1: + assert cn_attr['x_origin'][1].lower() != "user defined" + + # Check that the new OC "supplierNewOCB" was written to be "user defined' on the consumer + ocs = json.loads(topology_m1c1.cs["consumer1"].schema.query_objectclass("supplierNewOCB", json=True)) + new_oc = ocs['oc'] + assert new_oc['x_origin'][0].lower() == "user defined" + + +@pytest.mark.ds47490 +def test_schema_replication_three(topology_m1c1, schema_replication_init): + """Check supplier schema is again a superset (one extra OC), then + schema is pushed and there is no message in the error log + + :id: 45888895-76bc-4cc3-9f90-33a69d027116 + :setup: Supplier Consumer, check if a warning message is logged in the + error log of the supplier and add a test entry to trigger attempt of schema push. + :steps: + 1. Update the schema of supplier + 2. Push the Schema (no error) + 3. Check the schemaCSN was NOT updated on the consumer + 4. Check the error logs for no errors + 5. Check the startup/final state + :expectedresults: + 1. Operation should be successful + 2. Operation should be successful + 3. Operation should be successful + 4. Operation should be successful + 5. State at startup + - supplier +supplierNewOCA +supplierNewOCB + - consumer +supplierNewOCA +consumerNewOCA + Final state + - supplier +supplierNewOCA +supplierNewOCB +consumerNewOCA + - consumer +supplierNewOCA +supplierNewOCB +consumerNewOCA + """ + _header(topology_m1c1, "Extra OC Schema is pushed - no error") + + # Do an upate to trigger the schema push attempt + # add this OC on consumer. Supplier will no push the schema + add_OC(topology_m1c1.ms["supplier1"], 1, 'consumerNewOCA') + + # now push the scheam + trigger_schema_push(topology_m1c1) + supplier_schema_csn = topology_m1c1.ms["supplier1"].schema.get_schema_csn() + consumer_schema_csn = topology_m1c1.cs["consumer1"].schema.get_schema_csn() + + # Check the schemaCSN was NOT updated on the consumer + log.debug("test_schema_replication_three supplier_schema_csn=%s", supplier_schema_csn) + log.debug("test_schema_replication_three consumer_schema_csn=%s", consumer_schema_csn) + assert supplier_schema_csn == consumer_schema_csn + + # Check the error log of the supplier does not contain an error + regex = re.compile(r"must not be overwritten \(set replication log for additional info\)") + res = pattern_errorlog(topology_m1c1.ms["supplier1"].errorlog_file, regex) + if res is not None: + assert False + + +@pytest.mark.ds47490 +def test_schema_replication_four(topology_m1c1, schema_replication_init): + """Check supplier schema is again a superset (OC with more MUST), then + schema is pushed and there is no message in the error log + + :id: 39304242-2641-4eb8-a9fb-5ff0cf80718f + :setup: Supplier Consumer, check if a warning message is logged in the + error log of the supplier and add a test entry to trigger attempt of schema push. + :steps: + 1. Add telenumber to 'supplierNewOCA' on the supplier + 2. Push the Schema (no error) + 3. Check the schemaCSN was updated on the consumer + 4. Check the error log of the supplier does not contain an error + 5. Check the startup/final state + :expectedresults: + 1. Operation should be successful + 2. Operation should be successful + 3. Operation should be successful + 4. Operation should be successful + 5. State at startup + - supplier +supplierNewOCA +supplierNewOCB +consumerNewOCA + - consumer +supplierNewOCA +supplierNewOCB +consumerNewOCA + Final state + - supplier +supplierNewOCA +supplierNewOCB +consumerNewOCA + +must=telexnumber + - consumer +supplierNewOCA +supplierNewOCB +consumerNewOCA + +must=telexnumber + """ + _header(topology_m1c1, "Same OC - extra MUST: Schema is pushed - no error") + + mod_OC(topology_m1c1.ms["supplier1"], 2, 'supplierNewOCA', old_must=MUST_OLD, new_must=MUST_NEW, old_may=MAY_OLD, + new_may=MAY_OLD) + + trigger_schema_push(topology_m1c1) + supplier_schema_csn = topology_m1c1.ms["supplier1"].schema.get_schema_csn() + consumer_schema_csn = topology_m1c1.cs["consumer1"].schema.get_schema_csn() + + # Check the schemaCSN was updated on the consumer + log.debug("test_schema_replication_four supplier_schema_csn=%s", supplier_schema_csn) + log.debug("ctest_schema_replication_four onsumer_schema_csn=%s", consumer_schema_csn) + assert supplier_schema_csn == consumer_schema_csn + + # Check the error log of the supplier does not contain an error + regex = re.compile(r"must not be overwritten \(set replication log for additional info\)") + res = pattern_errorlog(topology_m1c1.ms["supplier1"].errorlog_file, regex) + if res is not None: + assert False + + +@pytest.mark.ds47490 +def test_schema_replication_five(topology_m1c1, schema_replication_init): + """Check consumer schema is a superset (OC with more MUST), then + schema is pushed (fix for 47721) and there is a message in the error log + + :id: 498527df-28c8-4e1a-bc9e-799fd2b7b2bb + :setup: Supplier Consumer, check if a warning message is logged in the + error log of the supplier and add a test entry to trigger attempt of schema push. + :steps: + 1. Add telenumber to 'consumerNewOCA' on the consumer + 2. Add a new OC on the supplier so that its nsSchemaCSN is larger than the consumer + 3. Push the Schema + 4. Check the schemaCSN was NOT updated on the consumer + 5. Check the error log of the supplier contain an error + 6. Check the startup/final state + :expectedresults: + 1. Operation should be successful + 2. Operation should be successful + 3. Operation should be successful + 4. Operation should be successful + 5. Operation should be successful + 6. State at startup + - supplier +supplierNewOCA +supplierNewOCB +consumerNewOCA + +must=telexnumber + - consumer +supplierNewOCA +supplierNewOCB +consumerNewOCA + +must=telexnumber + Final state + - supplier +supplierNewOCA +supplierNewOCB +consumerNewOCA +supplierNewOCC + +must=telexnumber + - consumer +supplierNewOCA +supplierNewOCB +consumerNewOCA + +must=telexnumber +must=telexnumber + + Note: replication log is enabled to get more details + """ + _header(topology_m1c1, "Same OC - extra MUST: Schema is pushed - (fix for 47721)") + + # get more detail why it fails + topology_m1c1.ms["supplier1"].enableReplLogging() + + # add telenumber to 'consumerNewOCA' on the consumer + mod_OC(topology_m1c1.cs["consumer1"], 1, 'consumerNewOCA', old_must=MUST_OLD, new_must=MUST_NEW, old_may=MAY_OLD, + new_may=MAY_OLD) + # add a new OC on the supplier so that its nsSchemaCSN is larger than the consumer (wait 2s) + time.sleep(2) + add_OC(topology_m1c1.ms["supplier1"], 4, 'supplierNewOCC') + + trigger_schema_push(topology_m1c1) + supplier_schema_csn = topology_m1c1.ms["supplier1"].schema.get_schema_csn() + consumer_schema_csn = topology_m1c1.cs["consumer1"].schema.get_schema_csn() + + # Check the schemaCSN was NOT updated on the consumer + # with 47721, supplier learns the missing definition + log.debug("test_schema_replication_five supplier_schema_csn=%s", supplier_schema_csn) + log.debug("ctest_schema_replication_five consumer_schema_csn=%s", consumer_schema_csn) + if support_schema_learning(topology_m1c1): + assert supplier_schema_csn == consumer_schema_csn + else: + assert supplier_schema_csn != consumer_schema_csn + + # Check the error log of the supplier does contain an error + # This message may happen during the learning phase + regex = re.compile(r"must not be overwritten \(set replication log for additional info\)") + res = pattern_errorlog(topology_m1c1.ms["supplier1"].errorlog_file, regex) + + +@pytest.mark.ds47490 +def test_schema_replication_six(topology_m1c1, schema_replication_init): + """Check supplier schema is again a superset (OC with more MUST), then + schema is pushed and there is no message in the error log + + :id: ed57b0cc-6a10-4f89-94ae-9f18542b1954 + :setup: Supplier Consumer, check if a warning message is logged in the + error log of the supplier and add a test entry to trigger attempt of schema push. + :steps: + 1. Add telenumber to 'consumerNewOCA' on the supplier + 2. Push the Schema (no error) + 3. Check the schemaCSN was NOT updated on the consumer + 4. Check the error log of the supplier does not contain an error + 5. Check the startup/final state + :expectedresults: + 1. Operation should be successful + 2. Operation should be successful + 3. Operation should be successful + 4. Operation should be successful + 5. State at startup + - supplier +supplierNewOCA +supplierNewOCB +consumerNewOCA +supplierNewOCC + +must=telexnumber + - consumer +supplierNewOCA +supplierNewOCB +consumerNewOCA + +must=telexnumber +must=telexnumber + Final state + - supplier +supplierNewOCA +supplierNewOCB +consumerNewOCA +supplierNewOCC + +must=telexnumber +must=telexnumber + - consumer +supplierNewOCA +supplierNewOCB +consumerNewOCA +supplierNewOCC + +must=telexnumber +must=telexnumber + + Note: replication log is enabled to get more details + """ + _header(topology_m1c1, "Same OC - extra MUST: Schema is pushed - no error") + + # add telenumber to 'consumerNewOCA' on the consumer + mod_OC(topology_m1c1.ms["supplier1"], 1, 'consumerNewOCA', old_must=MUST_OLD, new_must=MUST_NEW, old_may=MAY_OLD, + new_may=MAY_OLD) + + trigger_schema_push(topology_m1c1) + supplier_schema_csn = topology_m1c1.ms["supplier1"].schema.get_schema_csn() + consumer_schema_csn = topology_m1c1.cs["consumer1"].schema.get_schema_csn() + + # Check the schemaCSN was NOT updated on the consumer + log.debug("test_schema_replication_six supplier_schema_csn=%s", supplier_schema_csn) + log.debug("ctest_schema_replication_six onsumer_schema_csn=%s", consumer_schema_csn) + assert supplier_schema_csn == consumer_schema_csn + + # Check the error log of the supplier does not contain an error + # This message may happen during the learning phase + regex = re.compile(r"must not be overwritten \(set replication log for additional info\)") + res = pattern_errorlog(topology_m1c1.ms["supplier1"].errorlog_file, regex) + if res is not None: + assert False + + +@pytest.mark.ds47490 +def test_schema_replication_seven(topology_m1c1, schema_replication_init): + """Check supplier schema is again a superset (OC with more MAY), then + schema is pushed and there is no message in the error log + + :id: 8725055a-b3f8-4d1d-a4d6-bb7dccf644d0 + :setup: Supplier Consumer, check if a warning message is logged in the + error log of the supplier and add a test entry to trigger attempt of schema push. + :steps: + 1. Add telenumber to 'supplierNewOCA' on the supplier + 2. Push the Schema (no error) + 3. Check the schemaCSN was updated on the consumer + 4. Check the error log of the supplier does not contain an error + 5. Check the startup/final state + :expectedresults: + 1. Operation should be successful + 2. Operation should be successful + 3. Operation should be successful + 4. Operation should be successful + 5. State at startup + - supplier +supplierNewOCA +supplierNewOCB +consumerNewOCA +supplierNewOCC + +must=telexnumber +must=telexnumber + - consumer +supplierNewOCA +supplierNewOCB +consumerNewOCA +supplierNewOCC + +must=telexnumber +must=telexnumber + Final state + - supplier +supplierNewOCA +supplierNewOCB +consumerNewOCA +supplierNewOCC + +must=telexnumber +must=telexnumber + +may=postOfficeBox + - consumer +supplierNewOCA +supplierNewOCB +consumerNewOCA +supplierNewOCC + +must=telexnumber +must=telexnumber + +may=postOfficeBox + """ + _header(topology_m1c1, "Same OC - extra MAY: Schema is pushed - no error") + + mod_OC(topology_m1c1.ms["supplier1"], 2, 'supplierNewOCA', old_must=MUST_NEW, new_must=MUST_NEW, old_may=MAY_OLD, + new_may=MAY_NEW) + + trigger_schema_push(topology_m1c1) + supplier_schema_csn = topology_m1c1.ms["supplier1"].schema.get_schema_csn() + consumer_schema_csn = topology_m1c1.cs["consumer1"].schema.get_schema_csn() + + # Check the schemaCSN was updated on the consumer + log.debug("test_schema_replication_seven supplier_schema_csn=%s", supplier_schema_csn) + log.debug("ctest_schema_replication_seven consumer_schema_csn=%s", consumer_schema_csn) + assert supplier_schema_csn == consumer_schema_csn + + # Check the error log of the supplier does not contain an error + regex = re.compile(r"must not be overwritten \(set replication log for additional info\)") + res = pattern_errorlog(topology_m1c1.ms["supplier1"].errorlog_file, regex) + if res is not None: + assert False + + +@pytest.mark.ds47490 +def test_schema_replication_eight(topology_m1c1, schema_replication_init): + """Check consumer schema is a superset (OC with more MAY), then + schema is pushed (fix for 47721) and there is message in the error log + + :id: 2310d150-a71a-498d-add8-4056beeb58c6 + :setup: Supplier Consumer, check if a warning message is logged in the + error log of the supplier and add a test entry to trigger attempt of schema push. + :steps: + 1. Add telenumber to 'consumerNewOCA' on the consumer + 2. Modify OC on the supplier so that its nsSchemaCSN is larger than the consumer + 3. Push the Schema (no error) + 4. Check the schemaCSN was updated on the consumer + 5. Check the error log of the supplier does not contain an error + 6. Check the startup/final state + :expectedresults: + 1. Operation should be successful + 2. Operation should be successful + 3. Operation should be successful + 4. Operation should be successful + 5. Operation should be successful + 6. State at startup + - supplier +supplierNewOCA +supplierNewOCB +consumerNewOCA +supplierNewOCC + +must=telexnumber +must=telexnumber + +may=postOfficeBox + - consumer +supplierNewOCA +supplierNewOCB +consumerNewOCA +supplierNewOCC + +must=telexnumber +must=telexnumber + +may=postOfficeBox + Final state + - supplier +supplierNewOCA +supplierNewOCB +consumerNewOCA +supplierNewOCC + +must=telexnumber +must=telexnumber + +may=postOfficeBox +may=postOfficeBox + - consumer +supplierNewOCA +supplierNewOCB +consumerNewOCA +supplierNewOCC + +must=telexnumber +must=telexnumber + +may=postOfficeBox +may=postOfficeBox + """ + _header(topology_m1c1, "Same OC - extra MAY: Schema is pushed (fix for 47721)") + + mod_OC(topology_m1c1.cs["consumer1"], 1, 'consumerNewOCA', old_must=MUST_NEW, new_must=MUST_NEW, old_may=MAY_OLD, + new_may=MAY_NEW) + + # modify OC on the supplier so that its nsSchemaCSN is larger than the consumer (wait 2s) + time.sleep(2) + mod_OC(topology_m1c1.ms["supplier1"], 4, 'supplierNewOCC', old_must=MUST_OLD, new_must=MUST_OLD, old_may=MAY_OLD, + new_may=MAY_NEW) + + trigger_schema_push(topology_m1c1) + supplier_schema_csn = topology_m1c1.ms["supplier1"].schema.get_schema_csn() + consumer_schema_csn = topology_m1c1.cs["consumer1"].schema.get_schema_csn() + + # Check the schemaCSN was not updated on the consumer + # with 47721, supplier learns the missing definition + log.debug("test_schema_replication_eight supplier_schema_csn=%s", supplier_schema_csn) + log.debug("ctest_schema_replication_eight onsumer_schema_csn=%s", consumer_schema_csn) + if support_schema_learning(topology_m1c1): + assert supplier_schema_csn == consumer_schema_csn + else: + assert supplier_schema_csn != consumer_schema_csn + + # Check the error log of the supplier does contain an error + # This message may happen during the learning phase + regex = re.compile(r"must not be overwritten \(set replication log for additional info\)") + res = pattern_errorlog(topology_m1c1.ms["supplier1"].errorlog_file, regex) + + +@pytest.mark.ds47490 +def test_schema_replication_nine(topology_m1c1, schema_replication_init): + """Check consumer schema is a superset (OC with more MAY), then + schema is not pushed and there is message in the error log + + :id: 851b24c6-b1e0-466f-9714-aa2940fbfeeb + :setup: Supplier Consumer, check if a warning message is logged in the + error log of the supplier and add a test entry to trigger attempt of schema push. + :steps: + 1. Add postOfficeBox to 'consumerNewOCA' on the supplier + 3. Push the Schema + 4. Check the schemaCSN was updated on the consumer + 5. Check the error log of the supplier does contain an error + 6. Check the startup/final state + :expectedresults: + 1. Operation should be successful + 2. Operation should be successful + 3. Operation should be successful + 4. Operation should be successful + 5. Operation should be successful + 6. State at startup + - supplier +supplierNewOCA +supplierNewOCB +consumerNewOCA +supplierNewOCC + +must=telexnumber +must=telexnumber + +may=postOfficeBox +may=postOfficeBox + - consumer +supplierNewOCA +supplierNewOCB +consumerNewOCA +supplierNewOCC + +must=telexnumber +must=telexnumber + +may=postOfficeBox +may=postOfficeBox + Final state + - supplier +supplierNewOCA +supplierNewOCB +consumerNewOCA +supplierNewOCC + +must=telexnumber +must=telexnumber + +may=postOfficeBox +may=postOfficeBox +may=postOfficeBox + - consumer +supplierNewOCA +supplierNewOCB +consumerNewOCA +supplierNewOCC + +must=telexnumber +must=telexnumber + +may=postOfficeBox +may=postOfficeBox +may=postOfficeBox + """ + _header(topology_m1c1, "Same OC - extra MAY: Schema is pushed - no error") + + mod_OC(topology_m1c1.ms["supplier1"], 1, 'consumerNewOCA', old_must=MUST_NEW, new_must=MUST_NEW, old_may=MAY_OLD, + new_may=MAY_NEW) + + trigger_schema_push(topology_m1c1) + supplier_schema_csn = topology_m1c1.ms["supplier1"].schema.get_schema_csn() + consumer_schema_csn = topology_m1c1.cs["consumer1"].schema.get_schema_csn() + + # Check the schemaCSN was updated on the consumer + log.debug("test_schema_replication_nine supplier_schema_csn=%s", supplier_schema_csn) + log.debug("ctest_schema_replication_nine onsumer_schema_csn=%s", consumer_schema_csn) + assert supplier_schema_csn == consumer_schema_csn + + # Check the error log of the supplier does not contain an error + regex = re.compile(r"must not be overwritten \(set replication log for additional info\)") + res = pattern_errorlog(topology_m1c1.ms["supplier1"].errorlog_file, regex) + if res is not None: + assert False + + log.info('Testcase PASSED') + + +if __name__ == '__main__': + # Run isolated + # -s for DEBUG mode + CURRENT_FILE = os.path.realpath(__file__) + pytest.main("-s %s" % CURRENT_FILE) diff --git a/dirsrvtests/tests/suites/schema/schema_test.py b/dirsrvtests/tests/suites/schema/schema_test.py new file mode 100644 index 0000000..afc9cc6 --- /dev/null +++ b/dirsrvtests/tests/suites/schema/schema_test.py @@ -0,0 +1,635 @@ +# --- BEGIN COPYRIGHT BLOCK --- +# Copyright (C) 2016 Red Hat, Inc. +# All rights reserved. +# +# License: GPL (version 3 or any later version). +# See LICENSE for details. +# --- END COPYRIGHT BLOCK --- +# +''' +Created on Dec 18, 2013 + +@author: rmeggins +''' +import logging + +import ldap +import pytest +from ldap.cidict import cidict +from ldap.schema import SubSchema +from lib389.schema import SchemaLegacy +from lib389._constants import * +from lib389.topologies import topology_st, topology_m2 as topo_m2 +from lib389.idm.user import UserAccounts, UserAccount +from lib389.replica import ReplicationManager +from lib389.utils import ensure_bytes + +pytestmark = pytest.mark.tier1 + +logging.getLogger(__name__).setLevel(logging.DEBUG) +log = logging.getLogger(__name__) + +attrclass = ldap.schema.models.AttributeType +occlass = ldap.schema.models.ObjectClass +syntax_len_supported = False + + +def ochasattr(subschema, oc, mustormay, attr, key): + """See if the oc and any of its parents and ancestors have the + given attr""" + rc = False + if not key in oc.__dict__: + dd = cidict() + for ii in oc.__dict__[mustormay]: + dd[ii] = ii + oc.__dict__[key] = dd + if attr in oc.__dict__[key]: + rc = True + else: + # look in parents + for noroid in oc.sup: + ocpar = subschema.get_obj(occlass, noroid) + assert (ocpar) + rc = ochasattr(subschema, ocpar, mustormay, attr, key) + if rc: + break + return rc + + +def ochasattrs(subschema, oc, mustormay, attrs): + key = mustormay + "dict" + ret = [] + for attr in attrs: + if not ochasattr(subschema, oc, mustormay, attr, key): + ret.append(attr) + return ret + + +def mycmp(v1, v2): + v1ary, v2ary = [v1], [v2] + if isinstance(v1, list) or isinstance(v1, tuple): + v1ary, v2ary = list(set([x.lower() for x in v1])), list(set([x.lower() for x in v2])) + if not len(v1ary) == len(v2ary): + return False + for v1, v2 in zip(v1ary, v2ary): + if isinstance(v1, str): + if not len(v1) == len(v2): + return False + if not v1 == v2: + return False + return True + + +def ocgetdiffs(ldschema, oc1, oc2): + fields = ['obsolete', 'names', 'desc', 'must', 'may', 'kind', 'sup'] + ret = '' + for field in fields: + v1, v2 = oc1.__dict__[field], oc2.__dict__[field] + if field == 'may' or field == 'must': + missing = ochasattrs(ldschema, oc1, field, oc2.__dict__[field]) + if missing: + ret = ret + '\t%s is missing %s\n' % (field, missing) + missing = ochasattrs(ldschema, oc2, field, oc1.__dict__[field]) + if missing: + ret = ret + '\t%s is missing %s\n' % (field, missing) + elif not mycmp(v1, v2): + ret = ret + '\t%s differs: [%s] vs. [%s]\n' % (field, oc1.__dict__[field], oc2.__dict__[field]) + return ret + + +def atgetparfield(subschema, at, field): + v = None + for nameoroid in at.sup: + atpar = subschema.get_obj(attrclass, nameoroid) + assert (atpar) + v = atpar.__dict__.get(field, atgetparfield(subschema, atpar, field)) + if v is not None: + break + return v + + +def atgetdiffs(ldschema, at1, at2): + fields = ['names', 'desc', 'obsolete', 'sup', 'equality', 'ordering', 'substr', 'syntax', + 'single_value', 'collective', 'no_user_mod', 'usage'] + if syntax_len_supported: + fields.append('syntax_len') + ret = '' + for field in fields: + v1 = at1.__dict__.get(field) or atgetparfield(ldschema, at1, field) + v2 = at2.__dict__.get(field) or atgetparfield(ldschema, at2, field) + if not mycmp(v1, v2): + ret = ret + '\t%s differs: [%s] vs. [%s]\n' % (field, at1.__dict__[field], at2.__dict__[field]) + return ret + + +def test_schema_comparewithfiles(topology_st): + """Compare the schema from ldap cn=schema with the schema files + + :id: 31f0233e-a7e6-442f-8b05-c0b095e93387 + :setup: Standalone instance + :steps: + 1. Compare the schema from ldap cn=schema with the schema from files + + :expectedresults: + 1. Schema in cn=schema and files should be the same + """ + + log.info('Running test_schema_comparewithfiles...') + + retval = True + schemainst = topology_st.standalone + ldschema = schemainst.schema.get_subschema() + assert ldschema + for fn in schemainst.schema.list_files(): + try: + fschema = schemainst.schema.file_to_subschema(fn) + if fschema is None: + raise Exception("Empty schema file %s" % fn) + except: + log.warning("Unable to parse %s as a schema file - skipping" % fn) + continue + log.info("Parsed %s as a schema file - checking" % fn) + for oid in fschema.listall(occlass): + se = fschema.get_obj(occlass, oid) + assert se + ldse = ldschema.get_obj(occlass, oid) + if not ldse: + log.error("objectclass in %s but not in %s: %s" % (fn, DN_SCHEMA, se)) + retval = False + continue + ret = ocgetdiffs(ldschema, ldse, se) + if ret: + log.error("name %s oid %s\n%s" % (se.names[0], oid, ret)) + retval = False + for oid in fschema.listall(attrclass): + se = fschema.get_obj(attrclass, oid) + assert se + ldse = ldschema.get_obj(attrclass, oid) + if not ldse: + log.error("attributetype in %s but not in %s: %s" % (fn, DN_SCHEMA, se)) + retval = False + continue + ret = atgetdiffs(ldschema, ldse, se) + if ret: + log.error("name %s oid %s\n%s" % (se.names[0], oid, ret)) + retval = False + assert retval + + log.info('test_schema_comparewithfiles: PASSED') + +def test_gecos_directoryString(topology_st): + """Check that gecos supports directoryString value + + :id: aee422bb-6299-4124-b5cd-d7393dac19d3 + + :setup: Standalone instance + + :steps: + 1. Add a common user + 2. replace gecos with a direstoryString value + + :expectedresults: + 1. Success + 2. Success + """ + + users = UserAccounts(topology_st.standalone, DEFAULT_SUFFIX) + + user_properties = { + 'uid': 'testuser', + 'cn' : 'testuser', + 'sn' : 'user', + 'uidNumber' : '1000', + 'gidNumber' : '2000', + 'homeDirectory' : '/home/testuser', + } + testuser = users.create(properties=user_properties) + + # Add a gecos UTF value + testuser.replace('gecos', 'Hélène') + +def test_gecos_mixed_definition_topo(topo_m2, request): + """Check that replication is still working if schema contains + definitions that does not conform with a replicated entry + + :id: 0a1b4f8c-e84a-4cc4-82c8-2561b1126786 + :setup: Two suppliers replication setup + :steps: + 1. Create a testuser on M1 + 2 Stop M1 and M2 + 3 Change gecos def on M2 to be IA5 + 4 Update testuser with gecos directoryString value + 5 Check replication is still working + :expectedresults: + 1. success + 2. success + 3. success + 4. success + 5. success + + """ + + repl = ReplicationManager(DEFAULT_SUFFIX) + m1 = topo_m2.ms["supplier1"] + m2 = topo_m2.ms["supplier2"] + + + # create a test user + testuser_dn = 'uid={},{}'.format('testuser', DEFAULT_SUFFIX) + testuser = UserAccount(m1, testuser_dn) + try: + testuser.create(properties={ + 'uid': 'testuser', + 'cn': 'testuser', + 'sn': 'testuser', + 'uidNumber' : '1000', + 'gidNumber' : '2000', + 'homeDirectory' : '/home/testuser', + }) + except ldap.ALREADY_EXISTS: + pass + repl.wait_for_replication(m1, m2) + + # Stop suppliers to update the schema + m1.stop() + m2.stop() + + # on M1: gecos is DirectoryString (default) + # on M2: gecos is IA5 + schema_filename = (m2.schemadir + "/99user.ldif") + try: + with open(schema_filename, 'w') as schema_file: + schema_file.write("dn: cn=schema\n") + schema_file.write("attributetypes: ( 1.3.6.1.1.1.1.2 NAME " + + "'gecos' DESC 'The GECOS field; the common name' " + + "EQUALITY caseIgnoreIA5Match " + + "SUBSTR caseIgnoreIA5SubstringsMatch " + + "SYNTAX 1.3.6.1.4.1.1466.115.121.1.26 " + + "SINGLE-VALUE )\n") + os.chmod(schema_filename, 0o777) + except OSError as e: + log.fatal("Failed to update schema file: " + + "{} Error: {}".format(schema_filename, str(e))) + + # start the instances + m1.start() + m2.start() + + # Check that gecos is IA5 on M2 + schema = SchemaLegacy(m2) + attributetypes = schema.query_attributetype('gecos') + assert attributetypes[0].syntax == "1.3.6.1.4.1.1466.115.121.1.26" + + + # Add a gecos UTF value on M1 + testuser.replace('gecos', 'Hélène') + + # Check replication is still working + testuser.replace('displayName', 'ascii value') + repl.wait_for_replication(m1, m2) + testuser_m2 = UserAccount(m2, testuser_dn) + assert testuser_m2.exists() + assert testuser_m2.get_attr_val_utf8('displayName') == 'ascii value' + + def fin(): + m1.start() + m2.start() + testuser.delete() + repl.wait_for_replication(m1, m2) + + # on M2 restore a default 99user.ldif + m2.stop() + os.remove(m2.schemadir + "/99user.ldif") + schema_filename = (m2.schemadir + "/99user.ldif") + try: + with open(schema_filename, 'w') as schema_file: + schema_file.write("dn: cn=schema\n") + os.chmod(schema_filename, 0o777) + except OSError as e: + log.fatal("Failed to update schema file: " + + "{} Error: {}".format(schema_filename, str(e))) + m2.start() + m1.start() + + request.addfinalizer(fin) + +def test_gecos_directoryString_wins_M1(topo_m2, request): + """Check that if inital syntax are IA5(M2) and DirectoryString(M1) + Then directoryString wins when nsSchemaCSN M1 is the greatest + + :id: ad119fa5-7671-45c8-b2ef-0b28ffb68fdb + :setup: Two suppliers replication setup + :steps: + 1. Create a testuser on M1 + 2 Stop M1 and M2 + 3 Change gecos def on M2 to be IA5 + 4 Start M1 and M2 + 5 Update M1 schema so that M1 has greatest nsSchemaCSN + 6 Update testuser with gecos directoryString value + 7 Check replication is still working + 8 Check gecos is DirectoryString on M1 and M2 + :expectedresults: + 1. success + 2. success + 3. success + 4. success + 5. success + 6. success + 7. success + 8. success + + """ + + repl = ReplicationManager(DEFAULT_SUFFIX) + m1 = topo_m2.ms["supplier1"] + m2 = topo_m2.ms["supplier2"] + + + # create a test user + testuser_dn = 'uid={},{}'.format('testuser', DEFAULT_SUFFIX) + testuser = UserAccount(m1, testuser_dn) + try: + testuser.create(properties={ + 'uid': 'testuser', + 'cn': 'testuser', + 'sn': 'testuser', + 'uidNumber' : '1000', + 'gidNumber' : '2000', + 'homeDirectory' : '/home/testuser', + }) + except ldap.ALREADY_EXISTS: + pass + repl.wait_for_replication(m1, m2) + + # Stop suppliers to update the schema + m1.stop() + m2.stop() + + # on M1: gecos is DirectoryString (default) + # on M2: gecos is IA5 + schema_filename = (m2.schemadir + "/99user.ldif") + try: + with open(schema_filename, 'w') as schema_file: + schema_file.write("dn: cn=schema\n") + schema_file.write("attributetypes: ( 1.3.6.1.1.1.1.2 NAME " + + "'gecos' DESC 'The GECOS field; the common name' " + + "EQUALITY caseIgnoreIA5Match " + + "SUBSTR caseIgnoreIA5SubstringsMatch " + + "SYNTAX 1.3.6.1.4.1.1466.115.121.1.26 " + + "SINGLE-VALUE )\n") + os.chmod(schema_filename, 0o777) + except OSError as e: + log.fatal("Failed to update schema file: " + + "{} Error: {}".format(schema_filename, str(e))) + + # start the instances + m1.start() + m2.start() + + # Check that gecos is IA5 on M2 + schema = SchemaLegacy(m2) + attributetypes = schema.query_attributetype('gecos') + assert attributetypes[0].syntax == "1.3.6.1.4.1.1466.115.121.1.26" + + + # update M1 schema to increase its nsschemaCSN + new_at = "( dummy-oid NAME 'dummy' DESC 'dummy attribute' SYNTAX 1.3.6.1.4.1.1466.115.121.1.15 SINGLE-VALUE X-ORIGIN 'RFC 2307' )" + m1.schema.add_schema('attributetypes', ensure_bytes(new_at)) + + # Add a gecos UTF value on M1 + testuser.replace('gecos', 'Hélène') + + # Check replication is still working + testuser.replace('displayName', 'ascii value') + repl.wait_for_replication(m1, m2) + testuser_m2 = UserAccount(m2, testuser_dn) + assert testuser_m2.exists() + assert testuser_m2.get_attr_val_utf8('displayName') == 'ascii value' + + # Check that gecos is DirectoryString on M1 + schema = SchemaLegacy(m1) + attributetypes = schema.query_attributetype('gecos') + assert attributetypes[0].syntax == "1.3.6.1.4.1.1466.115.121.1.15" + + # Check that gecos is DirectoryString on M2 + schema = SchemaLegacy(m2) + attributetypes = schema.query_attributetype('gecos') + assert attributetypes[0].syntax == "1.3.6.1.4.1.1466.115.121.1.15" + + def fin(): + m1.start() + m2.start() + testuser.delete() + m1.schema.del_schema('attributetypes', ensure_bytes(new_at)) + repl.wait_for_replication(m1, m2) + + # on M2 restore a default 99user.ldif + m2.stop() + os.remove(m2.schemadir + "/99user.ldif") + schema_filename = (m2.schemadir + "/99user.ldif") + try: + with open(schema_filename, 'w') as schema_file: + schema_file.write("dn: cn=schema\n") + os.chmod(schema_filename, 0o777) + except OSError as e: + log.fatal("Failed to update schema file: " + + "{} Error: {}".format(schema_filename, str(e))) + m2.start() + m1.start() + + request.addfinalizer(fin) + +def test_gecos_directoryString_wins_M2(topo_m2, request): + """Check that if inital syntax are IA5(M2) and DirectoryString(M1) + Then directoryString wins when nsSchemaCSN M2 is the greatest + + :id: 2da7f1b1-f86d-4072-a940-ba56d4bc8348 + :setup: Two suppliers replication setup + :steps: + 1. Create a testuser on M1 + 2 Stop M1 and M2 + 3 Change gecos def on M2 to be IA5 + 4 Start M1 and M2 + 5 Update M2 schema so that M2 has greatest nsSchemaCSN + 6 Update testuser on M2 and trigger replication to M1 + 7 Update testuser on M2 with gecos directoryString value + 8 Check replication is still working + 9 Check gecos is DirectoryString on M1 and M2 + :expectedresults: + 1. success + 2. success + 3. success + 4. success + 5. success + 6. success + 7. success + 8. success + 9. success + + """ + + repl = ReplicationManager(DEFAULT_SUFFIX) + m1 = topo_m2.ms["supplier1"] + m2 = topo_m2.ms["supplier2"] + + + # create a test user + testuser_dn = 'uid={},{}'.format('testuser', DEFAULT_SUFFIX) + testuser = UserAccount(m1, testuser_dn) + try: + testuser.create(properties={ + 'uid': 'testuser', + 'cn': 'testuser', + 'sn': 'testuser', + 'uidNumber' : '1000', + 'gidNumber' : '2000', + 'homeDirectory' : '/home/testuser', + }) + except ldap.ALREADY_EXISTS: + pass + testuser.replace('displayName', 'to trigger replication M1-> M2') + repl.wait_for_replication(m1, m2) + + # Stop suppliers to update the schema + m1.stop() + m2.stop() + + # on M1: gecos is DirectoryString (default) + # on M2: gecos is IA5 + schema_filename = (m2.schemadir + "/99user.ldif") + try: + with open(schema_filename, 'w') as schema_file: + schema_file.write("dn: cn=schema\n") + schema_file.write("attributetypes: ( 1.3.6.1.1.1.1.2 NAME " + + "'gecos' DESC 'The GECOS field; the common name' " + + "EQUALITY caseIgnoreIA5Match " + + "SUBSTR caseIgnoreIA5SubstringsMatch " + + "SYNTAX 1.3.6.1.4.1.1466.115.121.1.26 " + + "SINGLE-VALUE )\n") + os.chmod(schema_filename, 0o777) + except OSError as e: + log.fatal("Failed to update schema file: " + + "{} Error: {}".format(schema_filename, str(e))) + + # start the instances + m1.start() + m2.start() + + # Check that gecos is IA5 on M2 + schema = SchemaLegacy(m2) + attributetypes = schema.query_attributetype('gecos') + assert attributetypes[0].syntax == "1.3.6.1.4.1.1466.115.121.1.26" + + # update M2 schema to increase its nsschemaCSN + new_at = "( dummy-oid NAME 'dummy' DESC 'dummy attribute' SYNTAX 1.3.6.1.4.1.1466.115.121.1.15 SINGLE-VALUE X-ORIGIN 'RFC 2307' )" + m2.schema.add_schema('attributetypes', ensure_bytes(new_at)) + + # update just to trigger replication M2->M1 + # and update of M2 schema + testuser_m2 = UserAccount(m2, testuser_dn) + testuser_m2.replace('displayName', 'to trigger replication M2-> M1') + + # Add a gecos UTF value on M1 + testuser.replace('gecos', 'Hélène') + + # Check replication is still working + testuser.replace('displayName', 'ascii value') + repl.wait_for_replication(m1, m2) + assert testuser_m2.exists() + assert testuser_m2.get_attr_val_utf8('displayName') == 'ascii value' + + # Check that gecos is DirectoryString on M1 + schema = SchemaLegacy(m1) + attributetypes = schema.query_attributetype('gecos') + assert attributetypes[0].syntax == "1.3.6.1.4.1.1466.115.121.1.15" + + # Check that gecos is DirectoryString on M2 + schema = SchemaLegacy(m2) + attributetypes = schema.query_attributetype('gecos') + assert attributetypes[0].syntax == "1.3.6.1.4.1.1466.115.121.1.15" + + def fin(): + m1.start() + m2.start() + testuser.delete() + m1.schema.del_schema('attributetypes', ensure_bytes(new_at)) + repl.wait_for_replication(m1, m2) + + # on M2 restore a default 99user.ldif + m2.stop() + os.remove(m2.schemadir + "/99user.ldif") + schema_filename = (m2.schemadir + "/99user.ldif") + try: + with open(schema_filename, 'w') as schema_file: + schema_file.write("dn: cn=schema\n") + os.chmod(schema_filename, 0o777) + except OSError as e: + log.fatal("Failed to update schema file: " + + "{} Error: {}".format(schema_filename, str(e))) + m2.start() + + request.addfinalizer(fin) + +def test_definition_with_sharp(topology_st, request): + """Check that replication is still working if schema contains + definitions that does not conform with a replicated entry + + :id: 94aa18ca-752f-11ec-a5ad-482ae39447e5 + :setup: A single instance + :steps: + 1 Stop instance + 2 Add schema definition with a line starting with " #" + 3 Start instance + 4 Check that nstance is really running. + :expectedresults: + 1. success + 2. success + 3. success + 4. success + + """ + + inst = topology_st.standalone + inst.stop() + + def fin(): + # restore a default 99user.ldif + inst.stop() + os.remove(inst.schemadir + "/99user.ldif") + schema_filename = (inst.schemadir + "/99user.ldif") + try: + with open(schema_filename, 'w') as schema_file: + schema_file.write("dn: cn=schema\n") + os.chmod(schema_filename, 0o777) + except OSError as e: + log.fatal("Failed to update schema file: " + + "{} Error: {}".format(schema_filename, str(e))) + inst.start() + + request.addfinalizer(fin) + + schema_filename = (inst.schemadir + "/99user.ldif") + try: + with open(schema_filename, 'w') as schema_file: + schema_file.write("dn: cn=schema\n") + schema_file.write("attributeTypes: ( 2.16.840.1.113730.3.8.11.61 NAME 'ipaWrappingKey' DESC 'PKCS\n") + schema_file.write(" #11 URI of the wrapping key' EQUALITY caseExactMatch SYNTAX 1.3.6.1.4.1.1466.\n") + schema_file.write(" 115.121.1.15 SINGLE-VALUE X-ORIGIN ( 'IPA v4.1' 'user defined' ) )\n") + os.chmod(schema_filename, 0o777) + except OSError as e: + log.fatal("Failed to update schema file: " + + "{} Error: {}".format(schema_filename, str(e))) + + # start the instances + inst.start() + + i# Check that server is really running. + assert inst.status() + + + +if __name__ == '__main__': + # Run isolated + # -s for DEBUG mode + CURRENT_FILE = os.path.realpath(__file__) + pytest.main("-s %s" % CURRENT_FILE) diff --git a/dirsrvtests/tests/suites/schema/x_attribute_descr_oid_test.py b/dirsrvtests/tests/suites/schema/x_attribute_descr_oid_test.py new file mode 100644 index 0000000..2967a5a --- /dev/null +++ b/dirsrvtests/tests/suites/schema/x_attribute_descr_oid_test.py @@ -0,0 +1,53 @@ +# --- BEGIN COPYRIGHT BLOCK --- +# Copyright (C) 2022 William Brown +# All rights reserved. +# +# License: GPL (version 3 or any later version). +# See LICENSE for details. +# --- END COPYRIGHT BLOCK --- +# + + +import os +import logging +import pytest +import ldap + +from lib389.topologies import topology_st as topology +from lib389._constants import DEFAULT_SUFFIX +from lib389.schema import Schema + +pytestmark = pytest.mark.tier1 + +def test_x_descr_oid(topology): + """Test import of an attribute using descr-oid format that starts + with an X-. This should "fail" with a descriptive error message. + + :id: 9308bdbd-363c-45a9-8223-9a6c925dba37 + + :setup: Standalone instance + + :steps: + 1. Add invalid x-attribute + 2. Add valid x-attribute + 3. Add invalid x-object + 4. Add valid x-object + + :expectedresults: + 1. raises INVALID_SYNTAX + 2. success + 3. raises INVALID_SYNTAX + 4. success + """ + inst = topology.standalone + + schema = Schema(inst) + + with pytest.raises(ldap.INVALID_SYNTAX): + schema.add('attributeTypes', "( x-attribute-oid NAME 'x-attribute' DESC 'desc' EQUALITY caseIgnoreIA5Match SYNTAX 1.3.6.1.4.1.1466.115.121.1.26 X-ORIGIN 'user defined' )") + schema.add('attributeTypes', "( 1.2.3.4.5.6.7.8.9.10 NAME 'x-attribute' DESC 'desc' EQUALITY caseIgnoreIA5Match SYNTAX 1.3.6.1.4.1.1466.115.121.1.26 X-ORIGIN 'user defined' )") + + with pytest.raises(ldap.INVALID_SYNTAX): + schema.add('objectClasses', "( x-object-oid NAME 'x-object' DESC 'desc' SUP TOP AUXILIARY MAY ( x-attribute ) X-ORIGIN 'user defined' )") + schema.add('objectClasses', "( 1.2.3.4.5.6.7.8.9.11 NAME 'x-object' DESC 'desc' SUP TOP AUXILIARY MAY ( x-attribute ) X-ORIGIN 'user defined' )") + diff --git a/dirsrvtests/tests/suites/setup_ds/__init__.py b/dirsrvtests/tests/suites/setup_ds/__init__.py new file mode 100644 index 0000000..80ce751 --- /dev/null +++ b/dirsrvtests/tests/suites/setup_ds/__init__.py @@ -0,0 +1,12 @@ +# --- BEGIN COPYRIGHT BLOCK --- +# Copyright (C) 2019 Red Hat, Inc. +# All rights reserved. +# +# License: GPL (version 3 or any later version). +# See LICENSE for details. +# --- END COPYRIGHT BLOCK --- + +""" + :Requirement: 389-ds-base: Basic Directory Server Operations +""" + diff --git a/dirsrvtests/tests/suites/setup_ds/db_home_test.py b/dirsrvtests/tests/suites/setup_ds/db_home_test.py new file mode 100755 index 0000000..3cf03f8 --- /dev/null +++ b/dirsrvtests/tests/suites/setup_ds/db_home_test.py @@ -0,0 +1,221 @@ +# --- BEGIN COPYRIGHT BLOCK --- +# Copyright (C) 2022 Red Hat, Inc. +# All rights reserved. +# +# License: GPL (version 3 or any later version). +# See LICENSE for details. +# --- END COPYRIGHT BLOCK --- +# + +import pytest + +from lib389.utils import * +from lib389.topologies import topology_st as topo +from lib389._constants import * +from lib389.dseldif import * +from lib389.cli_conf.backend import * +from lib389.config import BDB_LDBMConfig +from .... conftest import get_rpm_version +from lib389.paths import DEFAULTS_PATH + +# Check if we are in a container +container_result = subprocess.run(["systemd-detect-virt", "-c"], stdout=subprocess.PIPE) + +pytestmark = [pytest.mark.tier1, + pytest.mark.skipif(get_rpm_version("selinux-policy") <= "3.14.3-79" or + get_rpm_version("selinux-policy") <= "34.1.19-1", + reason="Will fail because of incorrect selinux labels"), + pytest.mark.skipif(ds_is_older('1.4.3.28'), reason='Not implemented'), + pytest.mark.skipif(container_result.returncode == 0, reason='db_home_dir is in old location in container')] + +DEBUGGING = os.getenv("DEBUGGING", default=False) +if DEBUGGING: + logging.getLogger(__name__).setLevel(logging.DEBUG) +else: + logging.getLogger(__name__).setLevel(logging.INFO) +log = logging.getLogger(__name__) + + +@pytest.mark.ds2790 +@pytest.mark.bz1780842 +def test_check_db_home_dir_in_config(topo): + """Test to check nsslapd-db-home-directory is set to /dev/shm/slapd-instance in cn=config + + :id: 9a1d0fcf-ca31-4f60-8b31-4de495b0b3ce + :customerscenario: True + :setup: Standalone Instance + :steps: + 1. Create instance + 2. Check nsslapd-db-home-directory is set to /dev/shm/slapd-instance in cn=config + :expectedresults: + 1. Success + 2. Success + """ + + standalone = topo.standalone + + if standalone.is_in_container(): + dbhome_value = standalone.db_dir + else: + dbhome_value = '/dev/shm/slapd-{}'.format(standalone.serverid) + bdb_ldbmconfig = BDB_LDBMConfig(standalone) + + log.info('Check the config value of nsslapd-db-home-directory') + assert bdb_ldbmconfig.get_attr_val_utf8('nsslapd-db-home-directory') == dbhome_value + + +@pytest.mark.ds2790 +@pytest.mark.bz1780842 +def test_check_db_home_dir_contents(topo): + """Test to check contents of /dev/shm/slapd-instance + + :id: a2d36990-2bb6-46af-99ca-f0cb30e68460 + :customerscenario: True + :setup: Standalone Instance + :steps: + 1. Create instance + 2. Check the directory /dev/shm/slapd-instance exists + 3. Check the contents of /dev/shm/slapd-instance/ + 4. Check the contents of /dev/shm/slapd-instance/ are not present in var/lib/dirsrv/slapd-instance/db + :expectedresults: + 1. Success + 2. Success + 3. Success + 4. Success + """ + + standalone = topo.standalone + file_list = ['__db.001', '__db.002', '__db.003', 'DBVERSION'] + if standalone.is_in_container(): + dbhome_value = standalone.db_dir + else: + dbhome_value = '/dev/shm/slapd-{}/'.format(standalone.serverid) + old_dbhome = '/var/lib/dirsrv/slapd-{}/db'.format(standalone.serverid) + existing_files = list(next(os.walk(dbhome_value))[2]) + old_location_files = list(next(os.walk(old_dbhome))[2]) + + log.info('Check the directory exists') + assert os.path.exists(dbhome_value) + + log.info('Check the files are present in /dev/shm/slapd-instance/') + for item in file_list: + assert item in existing_files + + log.info('Check these files are not present in old location') + for item in file_list: + assert item not in old_location_files + + +@pytest.mark.ds2790 +@pytest.mark.bz1780842 +def test_check_db_home_dir_in_dse(topo): + """Test to check nsslapd-db-home-directory is set to /dev/shm/slapd-instance in dse.ldif + + :id: f25befd2-a57c-4365-8eaf-70ea5fb987ea + :customerscenario: True + :setup: Standalone Instance + :steps: + 1. Create instance + 2. Check nsslapd-db-home-directory is set to /dev/shm/slapd-instance in dse.ldif + :expectedresults: + 1. Success + 2. Success + """ + + standalone = topo.standalone + bdb_ldbmconfig = BDB_LDBMConfig(standalone) + if standalone.is_in_container(): + dbhome_value = standalone.db_dir + else: + dbhome_value = '/dev/shm/slapd-{}'.format(standalone.serverid) + dse_ldif = DSEldif(standalone) + + log.info('Check value of nsslapd-db-home-directory in dse.ldif') + dse_value = dse_ldif.get(bdb_ldbmconfig.dn, 'nsslapd-db-home-directory', True) + assert dse_value == dbhome_value + + +@pytest.mark.ds2790 +@pytest.mark.bz1780842 +def test_check_db_home_dir_in_defaults(topo): + """Test to check nsslapd-db-home-directory is set to /dev/shm/slapd-instance in defaults.inf file + + :id: 995ef963-acb1-4210-887e-803fc63e716c + :customerscenario: True + :setup: Standalone Instance + :steps: + 1. Create instance + 2. Check nsslapd-db-home-directory is set to /dev/shm/slapd-instance in defaults.inf file + :expectedresults: + 1. Success + 2. Success + """ + + standalone = topo.standalone + if standalone.is_in_container(): + dbhome_value = 'db_home_dir = ' + standalone.db_dir + else: + dbhome_value = 'db_home_dir = /dev/shm/slapd-{instance_name}' + + log.info('Get defaults.inf path') + def_loc = standalone.ds_paths._get_defaults_loc(DEFAULTS_PATH) + + log.info('Check db_home value is /dev/shm/slapd-{instance_name} in defaults.inf') + with open(def_loc) as f: + assert dbhome_value in f.read() + + +@pytest.mark.ds2790 +@pytest.mark.bz1780842 +def test_delete_db_home_dir(topo): + """Test to check behaviour when deleting contents of /dev/shm/slapd-instance/ and restarting the instance + + :id: 07764487-4cb1-438f-a327-bba7d762fea3 + :customerscenario: True + :setup: Standalone Instance + :steps: + 1. Create instance + 2. Delete contents of /dev/shm/slapd-instance + 3. Restart instance + 4. Check the contents of /dev/shm/slapd-instance are recreated + :expectedresults: + 1. Success + 2. Success + 3. Success + 4. Success + """ + + standalone = topo.standalone + file_list = ['__db.001', '__db.002', '__db.003', 'DBVERSION'] + if standalone.is_in_container(): + dbhome_value = standalone.db_dir + else: + dbhome_value = '/dev/shm/slapd-{}/'.format(standalone.serverid) + existing_files = list(next(os.walk(dbhome_value))[2]) + + log.info('Stop the instance') + standalone.stop() + + log.info('Remove contents of /dev/shm/slapd-instance/') + for f in os.listdir(dbhome_value): + os.remove(os.path.join(dbhome_value, f)) + + log.info('Check there are no files') + assert len(os.listdir(dbhome_value)) == 0 + + log.info('Restart the instance') + standalone.restart() + + log.info('Check number of files') + assert len(os.listdir(dbhome_value)) == 4 + + log.info('Check the filenames') + for item in file_list: + assert item in existing_files + + +if __name__ == '__main__': + # Run isolated + # -s for DEBUG mode + CURRENT_FILE = os.path.realpath(__file__) + pytest.main("-s %s" % CURRENT_FILE) \ No newline at end of file diff --git a/dirsrvtests/tests/suites/setup_ds/dscreate_test.py b/dirsrvtests/tests/suites/setup_ds/dscreate_test.py new file mode 100644 index 0000000..64d6ea2 --- /dev/null +++ b/dirsrvtests/tests/suites/setup_ds/dscreate_test.py @@ -0,0 +1,601 @@ +# --- BEGIN COPYRIGHT BLOCK --- +# Copyright (C) 2021 Red Hat, Inc. +# All rights reserved. +# +# License: GPL (version 3 or any later version). +# See LICENSE for details. +# --- END COPYRIGHT BLOCK --- +import sys +import pytest +import subprocess +import logging +import grp +import pwd +import re +from tempfile import TemporaryDirectory +from lib389 import DirSrv +from lib389.cli_base import LogCapture +from lib389.instance.setup import SetupDs +from lib389.instance.remove import remove_ds_instance +from lib389.instance.options import General2Base, Slapd2Base +from lib389._constants import * +from lib389.utils import ds_is_older, selinux_label_file, ensure_list_str, ensure_str +from shutil import rmtree + +pytestmark = [pytest.mark.tier0, + pytest.mark.skipif(ds_is_older('1.4.1.2'), reason="Needs a compatible systemd unit, see PR#50213")] + +DEBUGGING = os.getenv('DEBUGGING', False) +if DEBUGGING: + logging.getLogger(__name__).setLevel(logging.DEBUG) +else: + logging.getLogger(__name__).setLevel(logging.INFO) +log = logging.getLogger(__name__) + +INSTANCE_PORT = 54321 +INSTANCE_SECURE_PORT = 54322 +INSTANCE_SERVERID = 'standalone' +#DEBUGGING = True + +MAJOR, MINOR, _, _, _ = sys.version_info + +CUSTOM_DIR = f'{os.getenv("PREFIX", "")}/var/lib/dirsrv_pytest_test_setup_ds_custom_db_dir' +CUSTOM_DB_DIR = f'{CUSTOM_DIR}/db' + +class TopologyInstance(object): + def __init__(self, standalone): + # For these tests, we don't want to open the instance. + # instance.open() + self.standalone = standalone + +# Need a teardown to destroy the instance. +@pytest.fixture +def topology(request): + instance = DirSrv(verbose=DEBUGGING) + instance.log.debug("Instance allocated") + args = {SER_PORT: INSTANCE_PORT, + SER_SERVERID_PROP: INSTANCE_SERVERID} + instance.allocate(args) + if instance.exists(): + instance.delete() + # Cleanup custom dir + selinux_label_file(CUSTOM_DB_DIR, None) + rmtree(CUSTOM_DIR, ignore_errors=True) + + def fin(): + if not DEBUGGING: + if instance.exists(): + instance.delete() + selinux_label_file(CUSTOM_DB_DIR, None) + rmtree(CUSTOM_DIR, ignore_errors=True) + request.addfinalizer(fin) + + return TopologyInstance(instance) + + +def run_cmd(cmd): + result = subprocess.run(cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE) + args = ' '.join(ensure_list_str(result.args)) + stdout = ensure_str(result.stdout) + stderr = ensure_str(result.stderr) + log.info(f"CMD: {args} returned {result.returncode} STDOUT: {stdout} STDERR: {stderr}") + return stdout + + +def test_setup_ds_minimal_dry(topology): + """Test minimal DS setup - dry run + + :id: 82637910-e279-11ec-a785-3497f624ea11 + :setup: standalone instance + :steps: + 1. Create the setupDS + 2. Give it the right types + 3. Get the dicts from Type2Base, as though they were from _validate_ds_2_config + 4. Override instance name, root password, port and secure port + 5. Assert we did not change the system + :expectedresults: + 1. Success + 2. Success + 3. Success + 4. Success + 5. Success + """ + # Unset PYTHONPATH to avoid mixing old CLI tools and new lib389 + tmp_env = os.environ + if "PYTHONPATH" in tmp_env: + del tmp_env["PYTHONPATH"] + + # Create the setupDs + lc = LogCapture() + # Give it the right types. + sds = SetupDs(verbose=DEBUGGING, dryrun=True, log=lc.log) + + # Get the dicts from Type2Base, as though they were from _validate_ds_2_config + # IE get the defaults back just from Slapd2Base.collect + # Override instance name, root password, port and secure port. + + general_options = General2Base(lc.log) + general_options.verify() + general = general_options.collect() + + slapd_options = Slapd2Base(lc.log) + slapd_options.set('instance_name', INSTANCE_SERVERID) + slapd_options.set('port', INSTANCE_PORT) + slapd_options.set('secure_port', INSTANCE_SECURE_PORT) + slapd_options.set('root_password', PW_DM) + slapd_options.verify() + slapd = slapd_options.collect() + + sds.create_from_args(general, slapd, {}, None) + + insts = topology.standalone.list(serverid=INSTANCE_SERVERID) + # Assert we did not change the system. + assert(len(insts) == 0) + +def test_setup_ds_minimal(topology): + """Test minimal DS setup + + :id: 563c3ec4-e27b-11ec-970e-3497f624ea11 + :setup: standalone instance + :steps: + 1. Create the setupDS + 2. Give it the right types + 3. Get the dicts from Type2Base, as though they were from _validate_ds_2_config + 4. Override instance name, root password, port and secure port + 5. Assert we did change the system + 6. Make sure we can connect + 7. Make sure we can start stop. + 8. Remove the instance + :expectedresults: + 1. Success + 2. Success + 3. Success + 4. Success + 5. Success + 6. Success + 7. Success + 8. Success + """ + # Create the setupDs + lc = LogCapture() + # Give it the right types. + sds = SetupDs(verbose=DEBUGGING, dryrun=False, log=lc.log) + + # Get the dicts from Type2Base, as though they were from _validate_ds_2_config + # IE get the defaults back just from Slapd2Base.collect + # Override instance name, root password, port and secure port. + + general_options = General2Base(lc.log) + general_options.verify() + general = general_options.collect() + + slapd_options = Slapd2Base(lc.log) + slapd_options.set('instance_name', INSTANCE_SERVERID) + slapd_options.set('port', INSTANCE_PORT) + slapd_options.set('secure_port', INSTANCE_SECURE_PORT) + slapd_options.set('root_password', PW_DM) + slapd_options.verify() + slapd = slapd_options.collect() + + sds.create_from_args(general, slapd, {}, None) + insts = topology.standalone.list(serverid=INSTANCE_SERVERID) + # Assert we did change the system. + assert(len(insts) == 1) + # Make sure we can connect + topology.standalone.open() + # Make sure we can start stop. + topology.standalone.stop() + topology.standalone.start() + # Okay, actually remove the instance + remove_ds_instance(topology.standalone) + + +@pytest.mark.skipif(not os.path.exists('/usr/sbin/semanage'), reason="semanage is not installed. Please run dnf install policycoreutils-python-utils -y") +@pytest.mark.skipif(os.getuid()!=0, reason="pytest non run by root user") +def test_setup_ds_custom_db_dir(topology): + """Test DS setup using custom uid,gid and db_dir path + + :id: 5a596887-cabb-4862-a91c-5eedafe222cd + :setup: standalone instance + :steps: + 1. Create the user that will run ns-slapd + 2. Create the setupDS + 3. Give it the right types + 4. Get the dicts from Type2Base, as though they were from _validate_ds_2_config + 5. Override instance name, root password, port, secure port, user, group and dir_path + 6. Assert we did change the system + 7. Make sure we can connect + 8. Make sure we can start stop. + 9. Remove the instance + 10. Check that there is not any dirsrv_* labels in in file local selinux customizations + 11. Check that there is not any wldap_port_t labels in port local selinux customizations + :expectedresults: + 1. Success + 2. Success + 3. Success + 4. Success + 5. Success + 6. Success + 7. Success + 8. Success + 9. Success + 10. Success + 11. Success + """ + # Add linux user NON_ROOT_USER if it does not already exist + CUSTOM_USER='ldapsrv1' + try: + pwd_cu = pwd.getpwnam(CUSTOM_USER) + except KeyError: + subprocess.run(('/usr/sbin/useradd', CUSTOM_USER), check=True) + pwd_cu = pwd.getpwnam(CUSTOM_USER) + grp_cu = grp.getgrgid(pwd_cu.pw_gid) + log.info(f'Custom user: {pwd_cu} {grp_cu}') + + # Create the setupDs + lc = LogCapture() + # Give it the right types. + sds = SetupDs(verbose=DEBUGGING, dryrun=False, log=lc.log) + + # Get the dicts from Type2Base, as though they were from _validate_ds_2_config + # IE get the defaults back just from Slapd2Base.collect + # Override instance name, root password, port, secure port, user, group and db_dir. + + general_options = General2Base(lc.log) + general_options.verify() + general = general_options.collect() + + slapd_options = Slapd2Base(lc.log) + slapd_options.set('instance_name', INSTANCE_SERVERID) + slapd_options.set('port', INSTANCE_PORT) + slapd_options.set('secure_port', INSTANCE_SECURE_PORT) + slapd_options.set('root_password', PW_DM) + slapd_options.set('user', pwd_cu.pw_name) + slapd_options.set('group', grp_cu.gr_name) + slapd_options.set('db_dir', CUSTOM_DB_DIR) + slapd_options.verify() + slapd = slapd_options.collect() + + sds.create_from_args(general, slapd, {}, None) + insts = topology.standalone.list(serverid=INSTANCE_SERVERID) + # Assert we did change the system. + assert(len(insts) == 1) + # Make sure we can connect + topology.standalone.open() + # Make sure we can start stop. + topology.standalone.stop() + topology.standalone.start() + # Okay, actually remove the instance + insts = topology.standalone.list(all=True) + remove_ds_instance(topology.standalone) + if (len(insts) == 1): + res = run_cmd(["semanage", "fcontext", "--list", "-C"]) + assert not "dirsrv_" in res + res = run_cmd(["semanage", "port", "--list", "-C"]) + assert not "ldap_port_t" in res + + +class UserEnv: + def __init__(self, user): + if os.geteuid() == 0: + try: + pw = pwd.getpwnam(user) + except KeyError: + subprocess.run(('/usr/sbin/useradd', user), check=True) + pw = pwd.getpwnam(user) + else: + pw = pwd.getpwuid(os.geteuid()) + user = pw.pw_name + self.user = user + self.pw = pw + self.dir = None + self.runid = 1 + self._instances = {} + self._dirs = [] + + def setdir(self, testname): + # Stop instance from current dir + if self.dir in self._instances: + lines = [ f'dsctl {i} stop' for i in self._instances[self.dir] ] + self.run(lines) + # Then create new dir + dir = f'{self.pw.pw_dir}/{testname}' + self.dir = dir + if os.path.isdir(dir): + rmtree(dir) + self._dirs.append(dir) + os.makedirs(dir) + if os.geteuid() == 0: + os.chown(dir, self.pw.pw_uid, self.pw.pw_gid) + + def write_file(self, fname, is_runnable=False, lines=()): + log.debug(f'Creating file {fname} with:') + with open(fname, 'wt') as f: + for line in lines: + log.debug(line) + f.write(line+'\n') + os.chown(fname, self.pw.pw_uid, self.pw.pw_gid) + if (is_runnable): + os.chmod(fname, 0o755) + log.debug(f'End of file: {fname}') + + def add_instance_for_cleanup(self, serverid): + if not self.dir in self._instances: + self._instances[self.dir] = [] + self._instances[self.dir].append(serverid) + + def run(self, lines, exit_on_error=True): + # Prepare test script + # Export current path and python path in test script + # to insure that right binary/libraries are used + path = os.environ['PATH'] + pythonpath = os.getenv('PYTHONPATH', '') + if exit_on_error: + eoe = ( "set -e # Exit on error" , ) + else: + eoe = () + name = f'{self.dir}/run.{self.runid}' + self.runid = self.runid + 1 + self.write_file(name, is_runnable=True, lines=( + '#!/usr/bin/bash', + 'set -x', + f'export PATH="{self.dir}/bin:{path}"', + f'export PYTHONPATH="{pythonpath}:$PYTHONPATH"', + *eoe, + *lines, + 'exit 0', + )) + # Run the script as self.user + log.debug(f'Run script {name} as user {self.user}') + if os.geteuid() == 0: + return subprocess.run(['/usr/bin/su', '-', self.user, name], capture_output=True, text=True) + else: + return subprocess.run([name], capture_output=True, text=True) + + def cleanup(self): + path = os.environ['PATH'] + lines = [] + for k,v in self._instances.items(): + lines.append(f'export PATH="{k}/bin:{path}"') + for i in v: + if DEBUGGING: + lines.append((f'dsctl {i} stop')) + else: + lines.append((f'dsctl {i} remove --do-it')) + log.debug(f'CLEANUP LINES: {lines}') + if lines: + self.run(list(lines)) + if DEBUGGING: + log.debug(f'CLEANUP DIRS: {self._dirs}') + for d in self._dirs: + if os.path.isdir(d): + rmtree(d) + + +@pytest.fixture(scope="module") +def nru(request): + # Geberate a non root user + env = UserEnv('user1') + + def fin(): + # Should delete the UserEnv object and remove the temporary directory + if env: + env.cleanup() + + request.addfinalizer(fin) + return env; + + +def test_setup_ds_as_non_root(nru, request): + """Test creating an instance as a non root user + + :id: c727998e-a960-11ec-898e-482ae39447e5 + :setup: no instance + :steps: + 1. Create a dscreate template file + 2. Create an run a test script that + Run dscreate ds-root + Run dscreate from-file + Add a backend + Search users in backend and store output in a file + Stop the instance + 3. Check that pid file exists and kill the associated process + 4. Check demo_user is in the search result + 5. Check that test.sh returned 0 + + + :expectedresults: + 1. No error. + 2. No error. + 3. Should fail to kill the process (That is supposed to be stopped) + 4. demo_user should be in search result + 5. return code should be 0 + + """ + + nru.setdir(testname=request.node.name) + # Prepare dscreate template + nru.write_file(f'{nru.dir}/ds.tmpl', lines=( + '[general]', + '[slapd]', + f'port = {INSTANCE_PORT}', + f'instance_name = {INSTANCE_SERVERID}', + f'root_password = {PW_DM}', + f'secure_port = {INSTANCE_SECURE_PORT}', + '[backend-userroot]', + 'create_suffix_entry = True', + 'require_index = True', + 'sample_entries = yes', + 'suffix = dc=example,dc=com', + )) + # Create the test script and run it as nru.user + nru.add_instance_for_cleanup(INSTANCE_SERVERID) + result = nru.run(( + 'type dscreate', + f'dscreate ds-root {nru.dir}/root {nru.dir}/bin', + 'hash -d dscreate # Remove dscreate from hash to use the new one', + 'type dscreate', + f'dscreate from-file {nru.dir}/ds.tmpl', + f'dsconf {INSTANCE_SERVERID} backend create --suffix dc=foo,dc=bar --be-name=foo --create-entries', + f'ldapsearch -x -H ldap://localhost:{INSTANCE_PORT} -D "cn=directory manager" -w {PW_DM} -b dc=foo,dc=bar "uid=*" | tee {nru.dir}/search.out', + f'dsctl {INSTANCE_SERVERID} stop', + )) + log.info(f'test.sh stdout is: {str(result.stdout)}') + log.info(f'test.sh stderr is: {str(result.stderr)}') + + # Let check that demo_user is in the search result + with open(f'{nru.dir}/search.out', 'rt') as f: + assert(re.findall('demo_user', f.read())) + log.debug(f'Check that test script finished successfully.') + assert(result.returncode == 0) + +def test_setup_ds_as_non_root_with_non_canonic_paths(nru, request): + """Test creating an instance as a non root user + + :id: db8e1ca0-98ce-11ed-89b9-482ae39447e5 + :setup: no instance + :steps: + 1. Create a dscreate template file + 2. Create an run a test script that + Run dscreate ds-root using non canonic paths + Run dscreate from-file + Add a backend + Search users in backend and store output in a file + Stop the instance + 3. Check that pid file exists and kill the associated process + 4. Check demo_user is in the search result + 5. Check that test.sh returned 0 + + + :expectedresults: + 1. No error. + 2. No error. + 3. Should fail to kill the process (That is supposed to be stopped) + 4. demo_user should be in search result + 5. return code should be 0 + + """ + + nru.setdir(testname=request.node.name) + # Prepare dscreate template + nru.write_file(f'{nru.dir}/ds.tmpl', lines=( + '[general]', + '[slapd]', + f'port = {INSTANCE_PORT}', + f'instance_name = {INSTANCE_SERVERID}', + f'root_password = {PW_DM}', + f'secure_port = {INSTANCE_SECURE_PORT}', + '[backend-userroot]', + 'create_suffix_entry = True', + 'require_index = True', + 'sample_entries = yes', + 'suffix = dc=example,dc=com', + )) + # Create the test script and run it as nru.user + nru.add_instance_for_cleanup(INSTANCE_SERVERID) + result = nru.run(( + 'type dscreate', + f'dscreate ds-root {nru.dir}/root/. {nru.dir}/bin/', + 'hash -d dscreate # Remove dscreate from hash to use the new one', + 'type dscreate', + f'dscreate from-file {nru.dir}/ds.tmpl', + f'dsconf {INSTANCE_SERVERID} backend create --suffix dc=foo,dc=bar --be-name=foo --create-entries', + f'ldapsearch -x -H ldap://localhost:{INSTANCE_PORT} -D "cn=directory manager" -w {PW_DM} -b dc=foo,dc=bar "uid=*" | tee {nru.dir}/search.out', + f'dsctl {INSTANCE_SERVERID} stop', + )) + log.info(f'test.sh stdout is: {str(result.stdout)}') + log.info(f'test.sh stderr is: {str(result.stderr)}') + + # Let check that demo_user is in the search result + with open(f'{nru.dir}/search.out', 'rt') as f: + assert(re.findall('demo_user', f.read())) + log.debug(f'Check that test script finished successfully.') + assert(result.returncode == 0) + +def test_setup_ds_as_non_root_with_default_options(nru, request): + """Test creating an instance as a non root user + + :id: 160e3eaa-7cb9-11ed-9b2b-482ae39447e5 + :setup: Create a non root user environment + :steps: + 1. Create a dscreate template file + 2. Create an run a test script that + Run dscreate ds-root + Run dscreate from-file without specifying any ports + Add a backend + Search users in backend and store output in a file + Stop the instance + 3. Check demo_user is in the search result + 4. Check that test.sh returned 0 + + + :expectedresults: + 1. No error. + 2. No error. + 3. Should fail to kill the process (That is supposed to be stopped) + 4. demo_user should be in search result + 5. return code should be 0 + + """ + + nru.setdir(testname=request.node.name) + # Prepare dscreate template + nru.write_file(f'{nru.dir}/ds.tmpl', lines=( + '[general]', + '[slapd]', + f'instance_name = {INSTANCE_SERVERID}', + f'root_password = {PW_DM}', + )) + # Remove instance if test fails + nru.add_instance_for_cleanup(INSTANCE_SERVERID) + # Create the test script and run it as nru.user + result = nru.run(( + f'dscreate ds-root {nru.dir}/root {nru.dir}/bin', + 'hash -d dscreate # Remove dscreate from hash to use the new one', + 'type dscreate', + f'dscreate from-file {nru.dir}/ds.tmpl', + f'dsconf {INSTANCE_SERVERID} backend create --suffix dc=foo,dc=bar --be-name=foo --create-entries', + "port=`awk '/nsslapd-port/ { print $2; }' " + f"{nru.dir}/root/etc/dirsrv/slapd-{INSTANCE_SERVERID}/dse.ldif`", + f'ldapsearch -x -H ldap://localhost:$port -D "cn=directory manager" -w {PW_DM} -b dc=foo,dc=bar "uid=*" | tee {nru.dir}/search.out', + f'dsctl {INSTANCE_SERVERID} stop', + )) + log.info(f'test.sh stdout is: {str(result.stdout)}') + log.info(f'test.sh stderr is: {str(result.stderr)}') + + # Let check that demo_user is in the search result + with open(f'{nru.dir}/search.out', 'rt') as f: + assert(re.findall('demo_user', f.read())) + log.debug(f'Check that test script finished successfully.') + assert(result.returncode == 0) + +def test_dscreate_non_root_defaults(nru, request): + """Test creating an instance as a non root user + + :id: 98174234-7cb9-11ed-9be5-482ae39447e5 + :setup: Create a non root user environment + :steps: + 1. Run dscreate create-template --advanced + 2. Checks that we got expected default values + + + :expectedresults: + 1. No error. + 2. Check that: + selinux=False + systemd=False + port != 389 + secure_port != 636 + + """ + + nru.setdir(testname=request.node.name) + # Prepare dscreate template + # Create the test script and run it as nru.user + result = nru.run(("dscreate create-template --advanced",)) + stdout = ensure_str(result.stdout) + assert(result.returncode == 0) + log.debug(f"stdout={stdout}") + assert ";selinux = False" in stdout + assert ";systemd = False" in stdout + assert not ";secure_port = 636" in stdout + assert not ";port = 389" in stdout diff --git a/dirsrvtests/tests/suites/setup_ds/remove_test.py b/dirsrvtests/tests/suites/setup_ds/remove_test.py new file mode 100644 index 0000000..78e5c0a --- /dev/null +++ b/dirsrvtests/tests/suites/setup_ds/remove_test.py @@ -0,0 +1,69 @@ +# --- BEGIN COPYRIGHT BLOCK --- +# Copyright (C) 2018 Red Hat, Inc. +# All rights reserved. +# +# License: GPL (version 3 or any later version). +# See LICENSE for details. +# --- END COPYRIGHT BLOCK --- + +import os +import subprocess +import pytest +import logging +from lib389 import DirSrv +from lib389.instance.remove import remove_ds_instance +from lib389._constants import ReplicaRole +from lib389.topologies import create_topology +from lib389.utils import ds_is_older + +pytestmark = pytest.mark.tier0 + + +@pytest.fixture(scope="function") +def topology_st(request): + """Create DS standalone instance""" + + topology = create_topology({ReplicaRole.STANDALONE: 1}) + + def fin(): + if topology.standalone.exists(): + topology.standalone.delete() + request.addfinalizer(fin) + + return topology + +@pytest.mark.skipif(ds_is_older('1.4.3'), reason="Backend split, lib389 supports only cn=bdb,cn=config...") +@pytest.mark.parametrize("simple_allocate", (True, False)) +def test_basic(topology_st, simple_allocate): + """Check that all DS directories and systemd items were removed + + :id: 9e8bbcda-358d-4e9c-a38c-9b4c3b63308e + :parametrized: yes + """ + + inst = topology_st.standalone + + # FreeIPA uses local_simple_allocate for the removal process + if simple_allocate: + inst = DirSrv(verbose=inst.verbose) + inst.local_simple_allocate(topology_st.standalone.serverid) + + remove_ds_instance(inst) + + paths = [inst.ds_paths.backup_dir, + inst.ds_paths.cert_dir, + inst.ds_paths.config_dir, + inst.ds_paths.db_dir, + inst.get_changelog_dir(), + inst.ds_paths.ldif_dir, + inst.ds_paths.lock_dir, + inst.ds_paths.log_dir] + for path in paths: + assert not os.path.exists(path) + + try: + subprocess.check_output(['systemctl', 'is-enabled', 'dirsrv@{}'.format(inst.serverid)], encoding='utf-8') + except subprocess.CalledProcessError as ex: + assert "disabled" in ex.output + + diff --git a/dirsrvtests/tests/suites/slapi_memberof/basic_interface_test.py b/dirsrvtests/tests/suites/slapi_memberof/basic_interface_test.py new file mode 100644 index 0000000..c5ecf52 --- /dev/null +++ b/dirsrvtests/tests/suites/slapi_memberof/basic_interface_test.py @@ -0,0 +1,4423 @@ +# --- BEGIN COPYRIGHT BLOCK --- +# Copyright (C) 2023 Red Hat, Inc. +# All rights reserved. +# +# License: GPL (version 3 or any later version). +# See LICENSE for details. +# --- END COPYRIGHT BLOCK ---- + +import pytest, os + +import logging +import ldap +from lib389.backend import Backends, Backend +from lib389.mappingTree import MappingTrees +from lib389.configurations.sample import create_base_domain +from ldap.extop import ExtendedRequest +from pyasn1.type import namedtype, univ +from pyasn1.codec.ber import encoder, decoder +from lib389.utils import ensure_bytes, get_plugin_dir +from ldap.extop import ExtendedRequest, ExtendedResponse +from pyasn1.type import namedtype, univ +from pyasn1.codec.ber import encoder, decoder +from lib389 import Entry + +from lib389._constants import DEFAULT_SUFFIX, PW_DM +from lib389.topologies import topology_st as topo +from lib389.plugins import MemberOfPlugin + +from lib389.idm.user import UserAccount, UserAccounts +from lib389.idm.account import Accounts + +pytestmark = pytest.mark.tier0 +log = logging.getLogger(__name__) + + +class SlapiMemberofRequestValue(univ.Sequence): + pass + +class SlapiMemberofRequest(ExtendedRequest): + def __init__(self, requestValidLifeTime=0): + self.requestName = '2.3.4.5.113730.6.7.1' + + def encodedRequestValue(self): + v = SlapiMemberofRequestValue() + return encoder.encode(v) + +@pytest.fixture(scope="module") +def install_test_plugin(topo): + import subprocess + import shutil + import os + import sys + import re + import pdb + import random + + current_dir = os.getcwd() + # create a build directory + build_dir="/tmp/build.%d" % (random.randint(1, 10000)) + os.makedirs(build_dir, exist_ok=True) + cmd_str="chmod 755 %s" % build_dir + subprocess.run(cmd_str, shell=True) + os.chdir(build_dir) + + # Retrieve the path of the workspace (from the path of the test) + workspace = None + for i in range(0, len(sys.argv)): + if sys.argv[i].find("slapi_memberof") > -1: + log.info("Workspace is: %s" % sys.argv[i]) + workspace=re.sub("dirsrvtest.*$", "", sys.argv[i]) + else: + log.info("Workspace is not: %s" % sys.argv[i]) + + if not workspace: + log.info("Fail to Retrieve from the repos containing slapi_memberof test plugin source") + log.info("using the current directory as workspace") + workspace=current_dir + + # Gather the include files from 'ldap/include' and 'ldap/servers/slapd' + for the_include in ["portable.h", "avl.h", "ldaprot.h"]: + include_file="%s/ldap/include/%s" % (workspace, the_include) + log.info("Retrieved from the repos: %s" % include_file) + file_path="%s/%s" % (build_dir, the_include) + shutil.copy(include_file, file_path) + cmd_str="chmod 666 %s" % file_path + subprocess.run(cmd_str, shell=True) + + for the_include in ["slap.h", "slapi-private.h", "slapi-plugin.h", "haproxy.h",\ + "slapi_pal.h", "csngen.h", "uuid.h", "disconnect_errors.h",\ + "pw.h", "filter.h", "proto-slap.h", "intrinsics.h", "slapi-plugin-compat4.h"]: + include_file="%s/ldap/servers/slapd/%s" % (workspace, the_include) + log.info("Retrieve from the repos: %s" % include_file) + file_path="%s/%s" % (build_dir, the_include) + shutil.copy(include_file, file_path) + cmd_str="chmod 666 %s" % file_path + subprocess.run(cmd_str, shell=True) + + # retrieve the test plugin source + log.info("use the default location") + src_file="%s/ldap/servers/slapd/test-plugins/test_slapi_memberof.c" % (workspace) + dst_file="%s/%s" % (build_dir, "test_slapi_memberof.c") + log.info("Retrieve from the repos: %s" % src_file) + shutil.copy(src_file, dst_file) + cmd_str="chmod 666 %s" % dst_file + subprocess.run(cmd_str, shell=True) + test_plugin_location=dst_file + + # + # If needed (if PR not pushed yet) to craft slapi-plugin.h + # + file_path_old = "%s/slapi-plugin.h" % (build_dir) + file_path_new = "%s/slapi-plugin.h.new" % (build_dir) + slapi_plugin_old = open(file_path_old) + + # before crafting check if slapi_memberof defs are present + need_to_craft = True + for line in slapi_plugin_old: + if "Slapi_MemberOfConfig" in line: + need_to_craft = False + break + + if need_to_craft: + log.info("Need to craft slapi-plugin.h") + slapi_plugin_old.seek(0, 0) + slapi_plugin_new = open(file_path_new, "w") + + # definitions that were missing, add them + struct_slapi_memberof = """ + +#include +typedef enum { + MEMBEROF_REUSE_ONLY, + MEMBEROF_REUSE_IF_POSSIBLE, + MEMBEROF_RECOMPUTE +} memberof_flag_t; + +typedef struct _slapi_memberofresult { + Slapi_ValueSet *nsuniqueid_vals; + Slapi_ValueSet *dn_vals; + PRBool maxgroups_reached; /* flag is true if the number of groups hit the max limit */ +} Slapi_MemberOfResult; + +typedef struct _slapi_memberofconfig +{ + char **groupattrs; + PRBool subtree_search; + int allBackends; + Slapi_DN **entryScopes; + Slapi_DN **entryScopeExcludeSubtrees; + PRBool recurse; + int maxgroups; + memberof_flag_t flag; + char *error_msg; + int errot_msg_lenght; + int entryScopeCount; /* private to slapi_memberof */ + int entryExcludeScopeCount; /* private to slapi_memberof */ + PRBool maxgroups_reached; /* private to slapi_memberof */ + const char *memberof_attr; /* private to slapi_memberof */ + Slapi_Attr *dn_syntax_attr; /* private to slapi_memberof */ + PLHashTable *ancestors_cache; /* private to slapi_memberof */ + int current_maxgroup; /* private to slapi_memberof */ +} Slapi_MemberOfConfig; + +""" + for line in slapi_plugin_old: + if re.search(r"^#endif.*SLAPIPLUGIN_H_.*$", line): + slapi_plugin_new.write(struct_slapi_memberof) + slapi_plugin_new.write("\n") + slapi_plugin_new.write(line) + + slapi_plugin_old.close() + slapi_plugin_new.close() + os.remove(file_path_old) + shutil.move(file_path_new, file_path_old) + + # + # If needed (if PR not pushed yet) to craft slapi-private.h + # + file_path_old = "%s/slapi-private.h" % (build_dir) + file_path_new = "%s/slapi-private.h.new" % (build_dir) + slapi_private_old = open(file_path_old) + + # before crafting check if slapi_memberof defs are present + need_to_craft = True + for line in slapi_private_old: + if "slapi_memberof" in line: + need_to_craft = False + break + + if need_to_craft: + log.info("Need to craft slapi-private.h") + slapi_private_old.seek(0, 0) + slapi_private_new = open(file_path_new, "w") + + # definitions that were missing, add them + struct_slapi_memberof = """ +int slapi_memberof(Slapi_MemberOfConfig *config, Slapi_DN *member_sdn, Slapi_MemberOfResult *result); +void slapi_memberof_free_memberof_plugin_config(); +int slapi_memberof_load_memberof_plugin_config(); + +""" + + for line in slapi_private_old: + if re.search(r"^void dup_ldif_line.*$", line): + slapi_private_new.write(struct_slapi_memberof) + slapi_private_new.write("\n") + slapi_private_new.write(line) + + slapi_private_old.close() + slapi_private_new.close() + os.remove(file_path_old) + shutil.move(file_path_new, file_path_old) + + # build the plugin into a shared library + test_plugin_object="%s/test_slapi_memberof.o" % build_dir + test_plugin_sharedlib="%s/libtest_slapi_memberof-plugin.so" % build_dir + cmd_str="/usr/bin/gcc -I./ldap/include -I./ldap/servers/slapd -I./include -I. -I/usr/include -I/usr/include/nss3 -I%s -I/usr/include/nspr4 -g -O2 -Wall -c %s -fPIC -DPIC -o %s" % (build_dir, test_plugin_location, test_plugin_object) + subprocess.run(cmd_str, shell=True) + cmd_str="/usr/bin/gcc -shared -fPIC -DPIC %s -Wl,-rpath -Wl,/usr/lib64/dirsrv -L/usr/lib64/dirsrv/ /usr/lib64/dirsrv/libslapd.so.0 -lldap -llber -lc -Wl,-z,now -g -O2 -O2 -m64 -Wl,-z -Wl,relro -Wl,--as-needed -Wl,-z -Wl,now -Wl,-soname -Wl,libtest_slapi_memberof-plugin.so -o %s" % (test_plugin_object, test_plugin_sharedlib) + subprocess.run(cmd_str, shell=True) + + # install the test plugin + cmd_str="chmod 755 %s" % test_plugin_sharedlib + subprocess.run(cmd_str, shell=True) + shutil.copy(test_plugin_sharedlib, topo.standalone.get_plugin_dir()) + + +def _check_res_vs_expected(msg, res, expected): + log.info("Checking %s expecting %d entries" % (msg, len(expected))) + assert len(expected) == len(res) + expected_str_lower = [] + for i in expected: + expected_str_lower.append(str(i).lower()) + + res_str_lower = [] + for i in res: + res_str_lower.append(str(i).lower()) + + for i in expected_str_lower: + log.info("Check that %s is present" % (i)) + assert i in res_str_lower + +EMPTY_RESULT="no error msg" + +def _extop_test_slapi_member(server, dn, relation): + value = univ.OctetString(dn) + value_encoded = encoder.encode(value) + + extop = ExtendedRequest(requestName = '2.3.4.5.113730.6.7.1', requestValue=value_encoded) + (oid_response, res) = server.extop_s(extop) + d1, d2 = decoder.decode(res) + log.info("The entries refering to %s as %s are:" % (dn, relation)) + for i in d1: + log.info(" - %s" % i) + return d1 + + +def replace_manager(server, dn, managers): + mod = [(ldap.MOD_REPLACE, 'manager', managers)] + server.modify_s(dn, mod) + +def add_entry(server, uid, manager=None, subtree=None): + if (subtree): + dn = 'uid=%s,ou=%s,ou=People,%s' % (uid, subtree, DEFAULT_SUFFIX) + else: + dn = 'uid=%s,ou=People,%s' % (uid, DEFAULT_SUFFIX) + server.add_s(Entry((dn, {'objectclass': 'top person extensibleObject'.split(), + 'uid': uid, + 'cn': uid, + 'sn': uid}))) + if manager: + replace_manager(server, dn, manager) + return dn + +def test_slapi_memberof_simple(topo, request, install_test_plugin): + """ + Test that management hierarchy (manager) is computed with slapi_member + with following parameters + - membership attribute: 'manager' + - span over all backends: 'on' + - skip nesting membership: 'off' + - computation mode: recompute + - Scope: DEFAULT_SUFFIX + - ExcludeScope: None + - Maximum return entries: None + + :id: 4c2595eb-a947-4c0b-996c-e499db67d11a + :setup: Standalone instance + :steps: + 1. provision a set of entry + 2. configure test_slapi_memberof as described above + 3. check computed membership vs expected result + :expectedresults: + 1. Operation should succeed + 2. Operation should succeed + 3. Operation should succeed + + DIT is : + e_1_parent_0 + - e_1_parent_1_0 + -- e_1_parent_1_1_0 + --- e_1_parent_1_1_1_0 + --- e_2_parent_1_1_1_0 + --- e_3_parent_1_1_1_0 + --- e_4_parent_1_1_1_0 + --- e_5_parent_1_1_1_0 + -- e_2_parent_1_1_0 + - e_2_parent_1_0 + -- e_1_parent_2_1_0 + -- e_2_parent_2_1_0 + --- e_1_parent_2_2_1_0 + -- e_3_parent_2_1_0 + -- e_4_parent_2_1_0 + e_2_parent_0 + - e_1_parent_2_0 + - e_2_parent_2_0 + - e_3_parent_2_0 + - e_4_parent_2_0 + e_3_parent_0 + - e_1_parent_3_0 + -- e_1_parent_1_3_0 + --- e_1_parent_1_1_3_0 + ---- e_1_parent_1_1_1_3_0 + """ + user = UserAccounts(topo.standalone, DEFAULT_SUFFIX) + + # First subtree + e_1_parent_0 = add_entry(topo.standalone, uid="e_1_parent_0") + + e_1_parent_1_0 = add_entry(topo.standalone, uid="e_1_parent_1_0", manager=[ensure_bytes(e_1_parent_0)]) + + e_1_parent_1_1_0 = add_entry(topo.standalone, uid="e_1_parent_1_1_0", manager=[ensure_bytes(e_1_parent_1_0)]) + + e_1_parent_1_1_1_0 = add_entry(topo.standalone, uid="e_1_parent_1_1_1_0", manager=[ensure_bytes(e_1_parent_1_1_0)]) + e_2_parent_1_1_1_0 = add_entry(topo.standalone, uid="e_2_parent_1_1_1_0", manager=[ensure_bytes(e_1_parent_1_1_0)]) + e_3_parent_1_1_1_0 = add_entry(topo.standalone, uid="e_3_parent_1_1_1_0", manager=[ensure_bytes(e_1_parent_1_1_0)]) + e_4_parent_1_1_1_0 = add_entry(topo.standalone, uid="e_4_parent_1_1_1_0", manager=[ensure_bytes(e_1_parent_1_1_0)]) + e_5_parent_1_1_1_0 = add_entry(topo.standalone, uid="e_5_parent_1_1_1_0", manager=[ensure_bytes(e_1_parent_1_1_0)]) + + e_2_parent_1_1_0 = add_entry(topo.standalone, uid="e_2_parent_1_1_0", manager=[ensure_bytes(e_1_parent_1_0)]) + + e_2_parent_1_0 = add_entry(topo.standalone, uid="e_2_parent_1_0", manager=[ensure_bytes(e_1_parent_0)]) + + e_1_parent_2_1_0 = add_entry(topo.standalone, uid="e_1_parent_2_1_0", manager=[ensure_bytes(e_2_parent_1_0)]) + e_2_parent_2_1_0 = add_entry(topo.standalone, uid="e_2_parent_2_1_0", manager=[ensure_bytes(e_2_parent_1_0)]) + e_1_parent_2_2_1_0 = add_entry(topo.standalone, uid="e_1_parent_2_2_1_0", manager=[ensure_bytes(e_2_parent_2_1_0)]) + e_3_parent_2_1_0 = add_entry(topo.standalone, uid="e_3_parent_2_1_0", manager=[ensure_bytes(e_2_parent_1_0)]) + e_4_parent_2_1_0 = add_entry(topo.standalone, uid="e_4_parent_2_1_0", manager=[ensure_bytes(e_2_parent_1_0)]) + + # 2nd subtree + e_2_parent_0 = add_entry(topo.standalone, uid="e_2_parent_0") + + e_1_parent_2_0 = add_entry(topo.standalone, uid="e_1_parent_2_0", manager=[ensure_bytes(e_2_parent_0)]) + e_2_parent_2_0 = add_entry(topo.standalone, uid="e_2_parent_2_0", manager=[ensure_bytes(e_2_parent_0)]) + e_3_parent_2_0 = add_entry(topo.standalone, uid="e_3_parent_2_0", manager=[ensure_bytes(e_2_parent_0)]) + e_4_parent_2_0 = add_entry(topo.standalone, uid="e_4_parent_2_0", manager=[ensure_bytes(e_2_parent_0)]) + + # third subtree + e_3_parent_0 = add_entry(topo.standalone, uid="e_3_parent_0") + + e_1_parent_3_0 = add_entry(topo.standalone, uid="e_1_parent_3_0", manager=[ensure_bytes(e_3_parent_0)]) + + e_1_parent_1_3_0 = add_entry(topo.standalone, uid="e_1_parent_1_3_0", manager=[ensure_bytes(e_1_parent_3_0)]) + + e_1_parent_1_1_3_0 = add_entry(topo.standalone, uid="e_1_parent_1_1_3_0", manager=[ensure_bytes(e_1_parent_1_3_0)]) + + e_1_parent_1_1_1_3_0 = add_entry(topo.standalone, uid="e_1_parent_1_1_1_3_0", manager=[ensure_bytes(e_1_parent_1_1_3_0)]) + + dn_config = 'cn=test_slapi_memberof,cn=plugins,cn=config' + topo.standalone.add_s(Entry((dn_config, {'objectclass': 'top nsSlapdPlugin extensibleObject'.split(), + 'cn': 'test_slapi_memberof', + 'nsslapd-pluginPath': 'libtest_slapi_memberof-plugin', + 'nsslapd-pluginInitfunc': 'test_slapi_memberof_init', + 'nsslapd-pluginType': 'extendedop', + 'nsslapd-pluginEnabled': 'on', + 'nsslapd-plugin-depends-on-type': 'database', + 'nsslapd-pluginId': 'test_slapi_memberof-plugin', + 'slapimemberOfMemberDN': 'uid=test_user_11,ou=People,dc=example,dc=com', + 'slapimemberOfGroupAttr': 'manager', + 'slapimemberOfAttr': 'memberof', + 'slapimemberOfAllBackends': 'on', + 'slapimemberOfSkipNested': 'off', + 'slapimemberOfEntryScope': DEFAULT_SUFFIX, + 'slapimemberOfMaxGroup': '0', + 'nsslapd-pluginVersion': '2.3.2.202302131418git0e190fc3d', + 'nsslapd-pluginVendor': '389 Project', + 'nsslapd-pluginDescription': 'test_slapi_memberof extended operation plugin'}))) + topo.standalone.restart() + + # Check the first subtree + expected = [ e_1_parent_1_0, e_1_parent_1_1_0, e_1_parent_1_1_1_0, e_2_parent_1_1_1_0, e_3_parent_1_1_1_0, e_4_parent_1_1_1_0, e_5_parent_1_1_1_0, e_2_parent_1_1_0, e_2_parent_1_0, e_1_parent_2_1_0, e_2_parent_2_1_0, e_1_parent_2_2_1_0, e_3_parent_2_1_0, e_4_parent_2_1_0] + res = _extop_test_slapi_member(server=topo.standalone, dn=e_1_parent_0, relation="manager") + _check_res_vs_expected("first subtree", res, expected) + + # Check the second subtree + expected = [e_1_parent_2_0, e_2_parent_2_0, e_3_parent_2_0, e_4_parent_2_0] + res = _extop_test_slapi_member(server=topo.standalone, dn=e_2_parent_0, relation="manager") + _check_res_vs_expected("second subtree", res, expected) + + # Check the third subtree + expected = [e_1_parent_3_0, e_1_parent_1_3_0, e_1_parent_1_1_3_0, e_1_parent_1_1_1_3_0] + res = _extop_test_slapi_member(server=topo.standalone, dn=e_3_parent_0, relation="manager") + _check_res_vs_expected("third subtree", res, expected) + + # check e_1_parent_1_0 + expected = [e_1_parent_1_1_0, e_1_parent_1_1_1_0, e_2_parent_1_1_1_0, e_3_parent_1_1_1_0, e_4_parent_1_1_1_0, e_5_parent_1_1_1_0, e_2_parent_1_1_0] + res = _extop_test_slapi_member(server=topo.standalone, dn=e_1_parent_1_0, relation="manager") + _check_res_vs_expected("organisation reporting to e_1_parent_1_0", res, expected) + + # check e_1_parent_1_1_0 + expected = [e_1_parent_1_1_1_0, e_2_parent_1_1_1_0, e_3_parent_1_1_1_0, e_4_parent_1_1_1_0, e_5_parent_1_1_1_0] + res = _extop_test_slapi_member(server=topo.standalone, dn=e_1_parent_1_1_0, relation="manager") + _check_res_vs_expected("organisation reporting to e_1_parent_1_1_0", res, expected) + + # check e_2_parent_1_1_0 + expected = [EMPTY_RESULT] + res = _extop_test_slapi_member(server=topo.standalone, dn=e_2_parent_1_1_0, relation="manager") + _check_res_vs_expected("organisation reporting to e_2_parent_1_1_0", res, expected) + + # check e_2_parent_1_0 + expected = [e_1_parent_2_1_0, e_2_parent_2_1_0, e_1_parent_2_2_1_0, e_3_parent_2_1_0, e_4_parent_2_1_0] + res = _extop_test_slapi_member(server=topo.standalone, dn=e_2_parent_1_0, relation="manager") + _check_res_vs_expected("organisation reporting to e_2_parent_1_0", res, expected) + + # check e_2_parent_2_1_0 + expected = [e_1_parent_2_2_1_0] + res = _extop_test_slapi_member(server=topo.standalone, dn=e_2_parent_2_1_0, relation="manager") + _check_res_vs_expected("organisation reporting to e_2_parent_2_1_0", res, expected) + + # Check e_1_parent_3_0 + expected = [e_1_parent_1_3_0, e_1_parent_1_1_3_0, e_1_parent_1_1_1_3_0] + res = _extop_test_slapi_member(server=topo.standalone, dn=e_1_parent_3_0, relation="manager") + _check_res_vs_expected("organisation reporting to e_1_parent_3_0", res, expected) + + # Check e_1_parent_1_3_0 + expected = [e_1_parent_1_1_3_0, e_1_parent_1_1_1_3_0] + res = _extop_test_slapi_member(server=topo.standalone, dn=e_1_parent_1_3_0, relation="manager") + _check_res_vs_expected("organisation reporting to e_1_parent_1_3_0", res, expected) + + # Check e_1_parent_1_1_3_0 + expected = [e_1_parent_1_1_1_3_0] + res = _extop_test_slapi_member(server=topo.standalone, dn=e_1_parent_1_1_3_0, relation="manager") + _check_res_vs_expected("organisation reporting to e_1_parent_1_1_3_0", res, expected) + + # Check e_1_parent_1_1_1_3_0 + expected = [EMPTY_RESULT] + res = _extop_test_slapi_member(server=topo.standalone, dn=e_1_parent_1_1_1_3_0, relation="manager") + _check_res_vs_expected("organisation reporting to e_1_parent_1_1_1_3_0", res, expected) + + def fin(): + entries = [e_1_parent_0, e_1_parent_1_0, e_1_parent_1_1_0, e_1_parent_1_1_1_0, e_2_parent_1_1_1_0, e_3_parent_1_1_1_0, e_4_parent_1_1_1_0, e_5_parent_1_1_1_0, e_2_parent_1_1_0, e_2_parent_1_0, e_1_parent_2_1_0, e_2_parent_2_1_0, e_1_parent_2_2_1_0, e_3_parent_2_1_0, e_4_parent_2_1_0, e_2_parent_0, e_1_parent_2_0, e_2_parent_2_0, e_3_parent_2_0, e_4_parent_2_0, e_3_parent_0, e_1_parent_3_0, e_1_parent_1_3_0, e_1_parent_1_1_3_0, e_1_parent_1_1_1_3_0] + for entry in entries: + topo.standalone.delete_s(entry) + topo.standalone.delete_s(dn_config) + + request.addfinalizer(fin) + +def test_slapi_memberof_allbackends_on(topo, request, install_test_plugin): + """ + Test that management hierarchy (manager) is computed with slapi_member + It exists several backends and manager relationship cross those backends + with following parameters + - membership attribute: 'manager' + - span over all backends: 'on' <---- + - skip nesting membership: 'off' + - computation mode: recompute + - Scope: DEFAULT_SUFFIX + - ExcludeScope: None + - Maximum return entries: None + + :id: 910c43a0-04ae-48f1-9e3c-6d97ba5bcb71 + :setup: Standalone instance + :steps: + 1. create a second backend with foo_bar entry + 2. provision a set of entries in default backend with foo_bar being + manager of entry e_1_parent_1_1_1_3_0 that is in default backend + 3. configure test_slapi_memberof as described above + 4. check computed membership vs expected result + slapi_memberof(foo_bar, "manager") -> e_1_parent_1_1_1_3_0 + :expectedresults: + 1. Operation should succeed + 2. Operation should succeed + 3. Operation should succeed + 4. Operation should succeed + + DIT is : + e_1_parent_0 + - e_1_parent_1_0 + -- e_1_parent_1_1_0 + --- e_1_parent_1_1_1_0 + --- e_2_parent_1_1_1_0 + --- e_3_parent_1_1_1_0 + --- e_4_parent_1_1_1_0 + --- e_5_parent_1_1_1_0 + -- e_2_parent_1_1_0 + - e_2_parent_1_0 + -- e_1_parent_2_1_0 + -- e_2_parent_2_1_0 + --- e_1_parent_2_2_1_0 + -- e_3_parent_2_1_0 + -- e_4_parent_2_1_0 + e_2_parent_0 + - e_1_parent_2_0 + - e_2_parent_2_0 + - e_3_parent_2_0 + - e_4_parent_2_0 + e_3_parent_0 + - e_1_parent_3_0 + -- e_1_parent_1_3_0 + --- e_1_parent_1_1_3_0 + ---- e_1_parent_1_1_1_3_0 + """ + # create a second backend + second_suffix='dc=foo,dc=bar' + be_name='fooBar' + be1 = Backend(topo.standalone) + be1.create(properties={ + 'cn': be_name, + 'nsslapd-suffix': second_suffix, + }, + ) + # Create the domain entry + create_base_domain(topo.standalone, second_suffix) + rdn='foo_bar' + dn_entry_foo_bar='uid=%s,%s' % (rdn, second_suffix) + topo.standalone.add_s(Entry((dn_entry_foo_bar, {'objectclass': 'top person extensibleObject'.split(), + 'uid': rdn, + 'cn': rdn, + 'sn': rdn}))) + + user = UserAccounts(topo.standalone, DEFAULT_SUFFIX) + + # First subtree + e_1_parent_0 = add_entry(topo.standalone, uid="e_1_parent_0") + + e_1_parent_1_0 = add_entry(topo.standalone, uid="e_1_parent_1_0", manager=[ensure_bytes(e_1_parent_0)]) + + e_1_parent_1_1_0 = add_entry(topo.standalone, uid="e_1_parent_1_1_0", manager=[ensure_bytes(e_1_parent_1_0)]) + + e_1_parent_1_1_1_0 = add_entry(topo.standalone, uid="e_1_parent_1_1_1_0", manager=[ensure_bytes(e_1_parent_1_1_0)]) + e_2_parent_1_1_1_0 = add_entry(topo.standalone, uid="e_2_parent_1_1_1_0", manager=[ensure_bytes(e_1_parent_1_1_0)]) + e_3_parent_1_1_1_0 = add_entry(topo.standalone, uid="e_3_parent_1_1_1_0", manager=[ensure_bytes(e_1_parent_1_1_0)]) + e_4_parent_1_1_1_0 = add_entry(topo.standalone, uid="e_4_parent_1_1_1_0", manager=[ensure_bytes(e_1_parent_1_1_0)]) + e_5_parent_1_1_1_0 = add_entry(topo.standalone, uid="e_5_parent_1_1_1_0", manager=[ensure_bytes(e_1_parent_1_1_0)]) + + e_2_parent_1_1_0 = add_entry(topo.standalone, uid="e_2_parent_1_1_0", manager=[ensure_bytes(e_1_parent_1_0)]) + + e_2_parent_1_0 = add_entry(topo.standalone, uid="e_2_parent_1_0", manager=[ensure_bytes(e_1_parent_0)]) + + e_1_parent_2_1_0 = add_entry(topo.standalone, uid="e_1_parent_2_1_0", manager=[ensure_bytes(e_2_parent_1_0)]) + e_2_parent_2_1_0 = add_entry(topo.standalone, uid="e_2_parent_2_1_0", manager=[ensure_bytes(e_2_parent_1_0)]) + e_1_parent_2_2_1_0 = add_entry(topo.standalone, uid="e_1_parent_2_2_1_0", manager=[ensure_bytes(e_2_parent_2_1_0)]) + e_3_parent_2_1_0 = add_entry(topo.standalone, uid="e_3_parent_2_1_0", manager=[ensure_bytes(e_2_parent_1_0)]) + e_4_parent_2_1_0 = add_entry(topo.standalone, uid="e_4_parent_2_1_0", manager=[ensure_bytes(e_2_parent_1_0)]) + + # 2nd subtree + e_2_parent_0 = add_entry(topo.standalone, uid="e_2_parent_0") + + e_1_parent_2_0 = add_entry(topo.standalone, uid="e_1_parent_2_0", manager=[ensure_bytes(e_2_parent_0)]) + e_2_parent_2_0 = add_entry(topo.standalone, uid="e_2_parent_2_0", manager=[ensure_bytes(e_2_parent_0)]) + e_3_parent_2_0 = add_entry(topo.standalone, uid="e_3_parent_2_0", manager=[ensure_bytes(e_2_parent_0)]) + e_4_parent_2_0 = add_entry(topo.standalone, uid="e_4_parent_2_0", manager=[ensure_bytes(e_2_parent_0)]) + + # third subtree + e_3_parent_0 = add_entry(topo.standalone, uid="e_3_parent_0") + + e_1_parent_3_0 = add_entry(topo.standalone, uid="e_1_parent_3_0", manager=[ensure_bytes(e_3_parent_0)]) + + e_1_parent_1_3_0 = add_entry(topo.standalone, uid="e_1_parent_1_3_0", manager=[ensure_bytes(e_1_parent_3_0)]) + + e_1_parent_1_1_3_0 = add_entry(topo.standalone, uid="e_1_parent_1_1_3_0", manager=[ensure_bytes(e_1_parent_1_3_0)]) + + e_1_parent_1_1_1_3_0 = add_entry(topo.standalone, uid="e_1_parent_1_1_1_3_0", manager=[ensure_bytes(e_1_parent_1_1_3_0)]) + + # make foo_bar entry manager of e_1_parent_1_1_1_3_0 + replace_manager(topo.standalone, e_1_parent_1_1_1_3_0, [ensure_bytes(dn_entry_foo_bar)]) + + dn_config = 'cn=test_slapi_memberof,cn=plugins,cn=config' + topo.standalone.add_s(Entry((dn_config, {'objectclass': 'top nsSlapdPlugin extensibleObject'.split(), + 'cn': 'test_slapi_memberof', + 'nsslapd-pluginPath': 'libtest_slapi_memberof-plugin', + 'nsslapd-pluginInitfunc': 'test_slapi_memberof_init', + 'nsslapd-pluginType': 'extendedop', + 'nsslapd-pluginEnabled': 'on', + 'nsslapd-plugin-depends-on-type': 'database', + 'nsslapd-pluginId': 'test_slapi_memberof-plugin', + 'slapimemberOfMemberDN': 'uid=test_user_11,ou=People,dc=example,dc=com', + 'slapimemberOfGroupAttr': 'manager', + 'slapimemberOfAttr': 'memberof', + 'slapimemberOfAllBackends': 'on', + 'slapimemberOfSkipNested': 'off', + 'slapimemberOfEntryScope': [DEFAULT_SUFFIX, second_suffix], + 'slapimemberOfMaxGroup': '0', + 'nsslapd-pluginVersion': '2.3.2.202302131418git0e190fc3d', + 'nsslapd-pluginVendor': '389 Project', + 'nsslapd-pluginDescription': 'test_slapi_memberof extended operation plugin'}))) + topo.standalone.restart() + + # Check the first subtree + expected = [ e_1_parent_1_0, e_1_parent_1_1_0, e_1_parent_1_1_1_0, e_2_parent_1_1_1_0, e_3_parent_1_1_1_0, e_4_parent_1_1_1_0, e_5_parent_1_1_1_0, e_2_parent_1_1_0, e_2_parent_1_0, e_1_parent_2_1_0, e_2_parent_2_1_0, e_1_parent_2_2_1_0, e_3_parent_2_1_0, e_4_parent_2_1_0] + res = _extop_test_slapi_member(server=topo.standalone, dn=e_1_parent_0, relation="manager") + _check_res_vs_expected("first subtree", res, expected) + + # Check the second subtree + expected = [e_1_parent_2_0, e_2_parent_2_0, e_3_parent_2_0, e_4_parent_2_0] + res = _extop_test_slapi_member(server=topo.standalone, dn=e_2_parent_0, relation="manager") + _check_res_vs_expected("second subtree", res, expected) + + # Check the third subtree + expected = [e_1_parent_3_0, e_1_parent_1_3_0, e_1_parent_1_1_3_0] + res = _extop_test_slapi_member(server=topo.standalone, dn=e_3_parent_0, relation="manager") + _check_res_vs_expected("third subtree", res, expected) + + # check e_1_parent_1_0 + expected = [e_1_parent_1_1_0, e_1_parent_1_1_1_0, e_2_parent_1_1_1_0, e_3_parent_1_1_1_0, e_4_parent_1_1_1_0, e_5_parent_1_1_1_0, e_2_parent_1_1_0] + res = _extop_test_slapi_member(server=topo.standalone, dn=e_1_parent_1_0, relation="manager") + _check_res_vs_expected("organisation reporting to e_1_parent_1_0", res, expected) + + # check e_1_parent_1_1_0 + expected = [e_1_parent_1_1_1_0, e_2_parent_1_1_1_0, e_3_parent_1_1_1_0, e_4_parent_1_1_1_0, e_5_parent_1_1_1_0] + res = _extop_test_slapi_member(server=topo.standalone, dn=e_1_parent_1_1_0, relation="manager") + _check_res_vs_expected("organisation reporting to e_1_parent_1_1_0", res, expected) + + # check e_2_parent_1_1_0 + expected = [EMPTY_RESULT] + res = _extop_test_slapi_member(server=topo.standalone, dn=e_2_parent_1_1_0, relation="manager") + _check_res_vs_expected("organisation reporting to e_2_parent_1_1_0", res, expected) + + # check e_2_parent_1_0 + expected = [e_1_parent_2_1_0, e_2_parent_2_1_0, e_1_parent_2_2_1_0, e_3_parent_2_1_0, e_4_parent_2_1_0] + res = _extop_test_slapi_member(server=topo.standalone, dn=e_2_parent_1_0, relation="manager") + _check_res_vs_expected("organisation reporting to e_2_parent_1_0", res, expected) + + # check e_2_parent_2_1_0 + expected = [e_1_parent_2_2_1_0] + res = _extop_test_slapi_member(server=topo.standalone, dn=e_2_parent_2_1_0, relation="manager") + _check_res_vs_expected("organisation reporting to e_2_parent_2_1_0", res, expected) + + # Check e_1_parent_3_0 + expected = [e_1_parent_1_3_0, e_1_parent_1_1_3_0] + res = _extop_test_slapi_member(server=topo.standalone, dn=e_1_parent_3_0, relation="manager") + _check_res_vs_expected("organisation reporting to e_1_parent_3_0", res, expected) + + # Check e_1_parent_1_3_0 + expected = [e_1_parent_1_1_3_0] + res = _extop_test_slapi_member(server=topo.standalone, dn=e_1_parent_1_3_0, relation="manager") + _check_res_vs_expected("organisation reporting to e_1_parent_1_3_0", res, expected) + + # Check e_1_parent_1_1_3_0 + expected = [EMPTY_RESULT] + res = _extop_test_slapi_member(server=topo.standalone, dn=e_1_parent_1_1_3_0, relation="manager") + _check_res_vs_expected("organisation reporting to e_1_parent_1_1_3_0", res, expected) + + # Check e_1_parent_1_1_1_3_0 + expected = [EMPTY_RESULT] + res = _extop_test_slapi_member(server=topo.standalone, dn=e_1_parent_1_1_1_3_0, relation="manager") + _check_res_vs_expected("organisation reporting to e_1_parent_1_1_1_3_0", res, expected) + + # Check dn_entry_foo_bar + expected = [e_1_parent_1_1_1_3_0] + res = _extop_test_slapi_member(server=topo.standalone, dn=dn_entry_foo_bar, relation="manager") + _check_res_vs_expected("organisation reporting to dn_entry_foo_bar", res, expected) + + + def fin(): + entries = [e_1_parent_0, e_1_parent_1_0, e_1_parent_1_1_0, e_1_parent_1_1_1_0, e_2_parent_1_1_1_0, e_3_parent_1_1_1_0, e_4_parent_1_1_1_0, e_5_parent_1_1_1_0, e_2_parent_1_1_0, e_2_parent_1_0, e_1_parent_2_1_0, e_2_parent_2_1_0, e_1_parent_2_2_1_0, e_3_parent_2_1_0, e_4_parent_2_1_0, e_2_parent_0, e_1_parent_2_0, e_2_parent_2_0, e_3_parent_2_0, e_4_parent_2_0, e_3_parent_0, e_1_parent_3_0, e_1_parent_1_3_0, e_1_parent_1_1_3_0, e_1_parent_1_1_1_3_0] + for entry in entries: + topo.standalone.delete_s(entry) + topo.standalone.delete_s(dn_config) + topo.standalone.delete_s(dn_entry_foo_bar) + be1.delete() + + request.addfinalizer(fin) + +def test_slapi_memberof_allbackends_off(topo, request, install_test_plugin): + """ + Test that management hierarchy (manager) is computed with slapi_member + It exists several backends and manager relationship cross those backends + with following parameters + - membership attribute: 'manager' + - span over all backends: 'off' <---- + - skip nesting membership: 'off' + - computation mode: recompute + - Scope: DEFAULT_SUFFIX + - ExcludeScope: None + - Maximum return entries: None + + :id: 56fb0c16-8086-429b-adf0-fff0eb8e121e + :setup: Standalone instance + :steps: + 1. create a second backend with foo_bar entry + 2. provision a set of entries in default backend with foo_bar being + manager of entry e_1_parent_1_1_1_3_0 that is in default backend + 3. configure test_slapi_memberof as described above + 4. check computed membership vs expected result + slapi_memberof(foo_bar, "manager") NOT -> e_1_parent_1_1_1_3_0 + :expectedresults: + 1. Operation should succeed + 2. Operation should succeed + 3. Operation should succeed + 4. Operation should succeed + + DIT is : + e_1_parent_0 + - e_1_parent_1_0 + -- e_1_parent_1_1_0 + --- e_1_parent_1_1_1_0 + --- e_2_parent_1_1_1_0 + --- e_3_parent_1_1_1_0 + --- e_4_parent_1_1_1_0 + --- e_5_parent_1_1_1_0 + -- e_2_parent_1_1_0 + - e_2_parent_1_0 + -- e_1_parent_2_1_0 + -- e_2_parent_2_1_0 + --- e_1_parent_2_2_1_0 + -- e_3_parent_2_1_0 + -- e_4_parent_2_1_0 + e_2_parent_0 + - e_1_parent_2_0 + - e_2_parent_2_0 + - e_3_parent_2_0 + - e_4_parent_2_0 + e_3_parent_0 + - e_1_parent_3_0 + -- e_1_parent_1_3_0 + --- e_1_parent_1_1_3_0 + ---- e_1_parent_1_1_1_3_0 + """ + # Create second backend + second_suffix='dc=foo,dc=bar' + be_name='fooBar' + be1 = Backend(topo.standalone) + be1.create(properties={ + 'cn': be_name, + 'nsslapd-suffix': second_suffix, + }, + ) + # Create the domain entry + create_base_domain(topo.standalone, second_suffix) + rdn='foo_bar' + dn_entry_foo_bar='uid=%s,%s' % (rdn, second_suffix) + topo.standalone.add_s(Entry((dn_entry_foo_bar, {'objectclass': 'top person extensibleObject'.split(), + 'uid': rdn, + 'cn': rdn, + 'sn': rdn}))) + + user = UserAccounts(topo.standalone, DEFAULT_SUFFIX) + + # First subtree + e_1_parent_0 = add_entry(topo.standalone, uid="e_1_parent_0") + + e_1_parent_1_0 = add_entry(topo.standalone, uid="e_1_parent_1_0", manager=[ensure_bytes(e_1_parent_0)]) + + e_1_parent_1_1_0 = add_entry(topo.standalone, uid="e_1_parent_1_1_0", manager=[ensure_bytes(e_1_parent_1_0)]) + + e_1_parent_1_1_1_0 = add_entry(topo.standalone, uid="e_1_parent_1_1_1_0", manager=[ensure_bytes(e_1_parent_1_1_0)]) + e_2_parent_1_1_1_0 = add_entry(topo.standalone, uid="e_2_parent_1_1_1_0", manager=[ensure_bytes(e_1_parent_1_1_0)]) + e_3_parent_1_1_1_0 = add_entry(topo.standalone, uid="e_3_parent_1_1_1_0", manager=[ensure_bytes(e_1_parent_1_1_0)]) + e_4_parent_1_1_1_0 = add_entry(topo.standalone, uid="e_4_parent_1_1_1_0", manager=[ensure_bytes(e_1_parent_1_1_0)]) + e_5_parent_1_1_1_0 = add_entry(topo.standalone, uid="e_5_parent_1_1_1_0", manager=[ensure_bytes(e_1_parent_1_1_0)]) + + e_2_parent_1_1_0 = add_entry(topo.standalone, uid="e_2_parent_1_1_0", manager=[ensure_bytes(e_1_parent_1_0)]) + + e_2_parent_1_0 = add_entry(topo.standalone, uid="e_2_parent_1_0", manager=[ensure_bytes(e_1_parent_0)]) + + e_1_parent_2_1_0 = add_entry(topo.standalone, uid="e_1_parent_2_1_0", manager=[ensure_bytes(e_2_parent_1_0)]) + e_2_parent_2_1_0 = add_entry(topo.standalone, uid="e_2_parent_2_1_0", manager=[ensure_bytes(e_2_parent_1_0)]) + e_1_parent_2_2_1_0 = add_entry(topo.standalone, uid="e_1_parent_2_2_1_0", manager=[ensure_bytes(e_2_parent_2_1_0)]) + e_3_parent_2_1_0 = add_entry(topo.standalone, uid="e_3_parent_2_1_0", manager=[ensure_bytes(e_2_parent_1_0)]) + e_4_parent_2_1_0 = add_entry(topo.standalone, uid="e_4_parent_2_1_0", manager=[ensure_bytes(e_2_parent_1_0)]) + + # 2nd subtree + e_2_parent_0 = add_entry(topo.standalone, uid="e_2_parent_0") + + e_1_parent_2_0 = add_entry(topo.standalone, uid="e_1_parent_2_0", manager=[ensure_bytes(e_2_parent_0)]) + e_2_parent_2_0 = add_entry(topo.standalone, uid="e_2_parent_2_0", manager=[ensure_bytes(e_2_parent_0)]) + e_3_parent_2_0 = add_entry(topo.standalone, uid="e_3_parent_2_0", manager=[ensure_bytes(e_2_parent_0)]) + e_4_parent_2_0 = add_entry(topo.standalone, uid="e_4_parent_2_0", manager=[ensure_bytes(e_2_parent_0)]) + + # third subtree + e_3_parent_0 = add_entry(topo.standalone, uid="e_3_parent_0") + + e_1_parent_3_0 = add_entry(topo.standalone, uid="e_1_parent_3_0", manager=[ensure_bytes(e_3_parent_0)]) + + e_1_parent_1_3_0 = add_entry(topo.standalone, uid="e_1_parent_1_3_0", manager=[ensure_bytes(e_1_parent_3_0)]) + + e_1_parent_1_1_3_0 = add_entry(topo.standalone, uid="e_1_parent_1_1_3_0", manager=[ensure_bytes(e_1_parent_1_3_0)]) + + e_1_parent_1_1_1_3_0 = add_entry(topo.standalone, uid="e_1_parent_1_1_1_3_0", manager=[ensure_bytes(e_1_parent_1_1_3_0)]) + + # make foo_bar entry manager of e_1_parent_1_1_1_3_0 + replace_manager(topo.standalone, e_1_parent_1_1_1_3_0, [ensure_bytes(dn_entry_foo_bar)]) + + dn_config = 'cn=test_slapi_memberof,cn=plugins,cn=config' + topo.standalone.add_s(Entry((dn_config, {'objectclass': 'top nsSlapdPlugin extensibleObject'.split(), + 'cn': 'test_slapi_memberof', + 'nsslapd-pluginPath': 'libtest_slapi_memberof-plugin', + 'nsslapd-pluginInitfunc': 'test_slapi_memberof_init', + 'nsslapd-pluginType': 'extendedop', + 'nsslapd-pluginEnabled': 'on', + 'nsslapd-plugin-depends-on-type': 'database', + 'nsslapd-pluginId': 'test_slapi_memberof-plugin', + 'slapimemberOfMemberDN': 'uid=test_user_11,ou=People,dc=example,dc=com', + 'slapimemberOfGroupAttr': 'manager', + 'slapimemberOfAttr': 'memberof', + 'slapimemberOfAllBackends': 'off', + 'slapimemberOfSkipNested': 'off', + 'slapimemberOfEntryScope': [DEFAULT_SUFFIX, second_suffix], + 'slapimemberOfMaxGroup': '0', + 'nsslapd-pluginVersion': '2.3.2.202302131418git0e190fc3d', + 'nsslapd-pluginVendor': '389 Project', + 'nsslapd-pluginDescription': 'test_slapi_memberof extended operation plugin'}))) + topo.standalone.restart() + + # Check the first subtree + expected = [ e_1_parent_1_0, e_1_parent_1_1_0, e_1_parent_1_1_1_0, e_2_parent_1_1_1_0, e_3_parent_1_1_1_0, e_4_parent_1_1_1_0, e_5_parent_1_1_1_0, e_2_parent_1_1_0, e_2_parent_1_0, e_1_parent_2_1_0, e_2_parent_2_1_0, e_1_parent_2_2_1_0, e_3_parent_2_1_0, e_4_parent_2_1_0] + res = _extop_test_slapi_member(server=topo.standalone, dn=e_1_parent_0, relation="manager") + _check_res_vs_expected("first subtree", res, expected) + + # Check the second subtree + expected = [e_1_parent_2_0, e_2_parent_2_0, e_3_parent_2_0, e_4_parent_2_0] + res = _extop_test_slapi_member(server=topo.standalone, dn=e_2_parent_0, relation="manager") + _check_res_vs_expected("second subtree", res, expected) + + # Check the third subtree + expected = [e_1_parent_3_0, e_1_parent_1_3_0, e_1_parent_1_1_3_0] + res = _extop_test_slapi_member(server=topo.standalone, dn=e_3_parent_0, relation="manager") + _check_res_vs_expected("third subtree", res, expected) + + # check e_1_parent_1_0 + expected = [e_1_parent_1_1_0, e_1_parent_1_1_1_0, e_2_parent_1_1_1_0, e_3_parent_1_1_1_0, e_4_parent_1_1_1_0, e_5_parent_1_1_1_0, e_2_parent_1_1_0] + res = _extop_test_slapi_member(server=topo.standalone, dn=e_1_parent_1_0, relation="manager") + _check_res_vs_expected("organisation reporting to e_1_parent_1_0", res, expected) + + # check e_1_parent_1_1_0 + expected = [e_1_parent_1_1_1_0, e_2_parent_1_1_1_0, e_3_parent_1_1_1_0, e_4_parent_1_1_1_0, e_5_parent_1_1_1_0] + res = _extop_test_slapi_member(server=topo.standalone, dn=e_1_parent_1_1_0, relation="manager") + _check_res_vs_expected("organisation reporting to e_1_parent_1_1_0", res, expected) + + # check e_2_parent_1_1_0 + expected = [EMPTY_RESULT] + res = _extop_test_slapi_member(server=topo.standalone, dn=e_2_parent_1_1_0, relation="manager") + _check_res_vs_expected("organisation reporting to e_2_parent_1_1_0", res, expected) + + # check e_2_parent_1_0 + expected = [e_1_parent_2_1_0, e_2_parent_2_1_0, e_1_parent_2_2_1_0, e_3_parent_2_1_0, e_4_parent_2_1_0] + res = _extop_test_slapi_member(server=topo.standalone, dn=e_2_parent_1_0, relation="manager") + _check_res_vs_expected("organisation reporting to e_2_parent_1_0", res, expected) + + # check e_2_parent_2_1_0 + expected = [e_1_parent_2_2_1_0] + res = _extop_test_slapi_member(server=topo.standalone, dn=e_2_parent_2_1_0, relation="manager") + _check_res_vs_expected("organisation reporting to e_2_parent_2_1_0", res, expected) + + # Check e_1_parent_3_0 + expected = [e_1_parent_1_3_0, e_1_parent_1_1_3_0] + res = _extop_test_slapi_member(server=topo.standalone, dn=e_1_parent_3_0, relation="manager") + _check_res_vs_expected("organisation reporting to e_1_parent_3_0", res, expected) + + # Check e_1_parent_1_3_0 + expected = [e_1_parent_1_1_3_0] + res = _extop_test_slapi_member(server=topo.standalone, dn=e_1_parent_1_3_0, relation="manager") + _check_res_vs_expected("organisation reporting to e_1_parent_1_3_0", res, expected) + + # Check e_1_parent_1_1_3_0 + expected = [EMPTY_RESULT] + res = _extop_test_slapi_member(server=topo.standalone, dn=e_1_parent_1_1_3_0, relation="manager") + _check_res_vs_expected("organisation reporting to e_1_parent_1_1_3_0", res, expected) + + # Check e_1_parent_1_1_1_3_0 + expected = [EMPTY_RESULT] + res = _extop_test_slapi_member(server=topo.standalone, dn=e_1_parent_1_1_1_3_0, relation="manager") + _check_res_vs_expected("organisation reporting to e_1_parent_1_1_1_3_0", res, expected) + + # Check dn_entry_foo_bar is not manager of e_1_parent_1_1_1_3_0 because slapimemberOfAllBackends=off + expected = [EMPTY_RESULT] + res = _extop_test_slapi_member(server=topo.standalone, dn=dn_entry_foo_bar, relation="manager") + _check_res_vs_expected("organisation reporting to dn_entry_foo_bar", res, expected) + + + def fin(): + entries = [e_1_parent_0, e_1_parent_1_0, e_1_parent_1_1_0, e_1_parent_1_1_1_0, e_2_parent_1_1_1_0, e_3_parent_1_1_1_0, e_4_parent_1_1_1_0, e_5_parent_1_1_1_0, e_2_parent_1_1_0, e_2_parent_1_0, e_1_parent_2_1_0, e_2_parent_2_1_0, e_1_parent_2_2_1_0, e_3_parent_2_1_0, e_4_parent_2_1_0, e_2_parent_0, e_1_parent_2_0, e_2_parent_2_0, e_3_parent_2_0, e_4_parent_2_0, e_3_parent_0, e_1_parent_3_0, e_1_parent_1_3_0, e_1_parent_1_1_3_0, e_1_parent_1_1_1_3_0] + for entry in entries: + topo.standalone.delete_s(entry) + topo.standalone.delete_s(dn_config) + topo.standalone.delete_s(dn_entry_foo_bar) + be1.delete() + + request.addfinalizer(fin) + + +def test_slapi_memberof_memberattr(topo, request, install_test_plugin): + """ + Test that membership hierarchy (member) is computed with slapi_member + the membership is done with 'manager' attribute but slapi_memberof + called with 'member' attribute. As there is no 'member' then + membership returns empty_results + with following parameters + - membership attribute: 'member' <---- + - span over all backends: 'on' + - skip nesting membership: 'off' + - computation mode: recompute + - Scope: DEFAULT_SUFFIX + - ExcludeScope: None + - Maximum return entries: None + + :id: 373f7f65-185f-4b06-a0a5-3e23692b87f1 + :setup: Standalone instance + :steps: + 1. provision a set of entries in default backend + with membership using 'manager' + 2. configure test_slapi_memberof as described above + so checking membership using 'member' + 3. check computed membership vs expected result + all empty_result because no entry has 'member' + :expectedresults: + 1. Operation should succeed + 2. Operation should succeed + 3. Operation should succeed + + DIT is : + e_1_parent_0 + - e_1_parent_1_0 + -- e_1_parent_1_1_0 + --- e_1_parent_1_1_1_0 + --- e_2_parent_1_1_1_0 + --- e_3_parent_1_1_1_0 + --- e_4_parent_1_1_1_0 + --- e_5_parent_1_1_1_0 + -- e_2_parent_1_1_0 + - e_2_parent_1_0 + -- e_1_parent_2_1_0 + -- e_2_parent_2_1_0 + --- e_1_parent_2_2_1_0 + -- e_3_parent_2_1_0 + -- e_4_parent_2_1_0 + e_2_parent_0 + - e_1_parent_2_0 + - e_2_parent_2_0 + - e_3_parent_2_0 + - e_4_parent_2_0 + e_3_parent_0 + - e_1_parent_3_0 + -- e_1_parent_1_3_0 + --- e_1_parent_1_1_3_0 + ---- e_1_parent_1_1_1_3_0 + """ + user = UserAccounts(topo.standalone, DEFAULT_SUFFIX) + + # First subtree + e_1_parent_0 = add_entry(topo.standalone, uid="e_1_parent_0") + + e_1_parent_1_0 = add_entry(topo.standalone, uid="e_1_parent_1_0", manager=[ensure_bytes(e_1_parent_0)]) + + e_1_parent_1_1_0 = add_entry(topo.standalone, uid="e_1_parent_1_1_0", manager=[ensure_bytes(e_1_parent_1_0)]) + + e_1_parent_1_1_1_0 = add_entry(topo.standalone, uid="e_1_parent_1_1_1_0", manager=[ensure_bytes(e_1_parent_1_1_0)]) + e_2_parent_1_1_1_0 = add_entry(topo.standalone, uid="e_2_parent_1_1_1_0", manager=[ensure_bytes(e_1_parent_1_1_0)]) + e_3_parent_1_1_1_0 = add_entry(topo.standalone, uid="e_3_parent_1_1_1_0", manager=[ensure_bytes(e_1_parent_1_1_0)]) + e_4_parent_1_1_1_0 = add_entry(topo.standalone, uid="e_4_parent_1_1_1_0", manager=[ensure_bytes(e_1_parent_1_1_0)]) + e_5_parent_1_1_1_0 = add_entry(topo.standalone, uid="e_5_parent_1_1_1_0", manager=[ensure_bytes(e_1_parent_1_1_0)]) + + e_2_parent_1_1_0 = add_entry(topo.standalone, uid="e_2_parent_1_1_0", manager=[ensure_bytes(e_1_parent_1_0)]) + + e_2_parent_1_0 = add_entry(topo.standalone, uid="e_2_parent_1_0", manager=[ensure_bytes(e_1_parent_0)]) + + e_1_parent_2_1_0 = add_entry(topo.standalone, uid="e_1_parent_2_1_0", manager=[ensure_bytes(e_2_parent_1_0)]) + e_2_parent_2_1_0 = add_entry(topo.standalone, uid="e_2_parent_2_1_0", manager=[ensure_bytes(e_2_parent_1_0)]) + e_1_parent_2_2_1_0 = add_entry(topo.standalone, uid="e_1_parent_2_2_1_0", manager=[ensure_bytes(e_2_parent_2_1_0)]) + e_3_parent_2_1_0 = add_entry(topo.standalone, uid="e_3_parent_2_1_0", manager=[ensure_bytes(e_2_parent_1_0)]) + e_4_parent_2_1_0 = add_entry(topo.standalone, uid="e_4_parent_2_1_0", manager=[ensure_bytes(e_2_parent_1_0)]) + + # 2nd subtree + e_2_parent_0 = add_entry(topo.standalone, uid="e_2_parent_0") + + e_1_parent_2_0 = add_entry(topo.standalone, uid="e_1_parent_2_0", manager=[ensure_bytes(e_2_parent_0)]) + e_2_parent_2_0 = add_entry(topo.standalone, uid="e_2_parent_2_0", manager=[ensure_bytes(e_2_parent_0)]) + e_3_parent_2_0 = add_entry(topo.standalone, uid="e_3_parent_2_0", manager=[ensure_bytes(e_2_parent_0)]) + e_4_parent_2_0 = add_entry(topo.standalone, uid="e_4_parent_2_0", manager=[ensure_bytes(e_2_parent_0)]) + + # third subtree + e_3_parent_0 = add_entry(topo.standalone, uid="e_3_parent_0") + + e_1_parent_3_0 = add_entry(topo.standalone, uid="e_1_parent_3_0", manager=[ensure_bytes(e_3_parent_0)]) + + e_1_parent_1_3_0 = add_entry(topo.standalone, uid="e_1_parent_1_3_0", manager=[ensure_bytes(e_1_parent_3_0)]) + + e_1_parent_1_1_3_0 = add_entry(topo.standalone, uid="e_1_parent_1_1_3_0", manager=[ensure_bytes(e_1_parent_1_3_0)]) + + e_1_parent_1_1_1_3_0 = add_entry(topo.standalone, uid="e_1_parent_1_1_1_3_0", manager=[ensure_bytes(e_1_parent_1_1_3_0)]) + + dn_config = 'cn=test_slapi_memberof,cn=plugins,cn=config' + topo.standalone.add_s(Entry((dn_config, {'objectclass': 'top nsSlapdPlugin extensibleObject'.split(), + 'cn': 'test_slapi_memberof', + 'nsslapd-pluginPath': 'libtest_slapi_memberof-plugin', + 'nsslapd-pluginInitfunc': 'test_slapi_memberof_init', + 'nsslapd-pluginType': 'extendedop', + 'nsslapd-pluginEnabled': 'on', + 'nsslapd-plugin-depends-on-type': 'database', + 'nsslapd-pluginId': 'test_slapi_memberof-plugin', + 'slapimemberOfMemberDN': 'uid=test_user_11,ou=People,dc=example,dc=com', + 'slapimemberOfGroupAttr': 'member', + 'slapimemberOfAttr': 'memberof', + 'slapimemberOfAllBackends': 'on', + 'slapimemberOfSkipNested': 'off', + 'slapimemberOfEntryScope': DEFAULT_SUFFIX, + 'slapimemberOfMaxGroup': '0', + 'nsslapd-pluginVersion': '2.3.2.202302131418git0e190fc3d', + 'nsslapd-pluginVendor': '389 Project', + 'nsslapd-pluginDescription': 'test_slapi_memberof extended operation plugin'}))) + topo.standalone.restart() + + # Check the first subtree + expected = [ EMPTY_RESULT ] + res = _extop_test_slapi_member(server=topo.standalone, dn=e_1_parent_0, relation="manager") + _check_res_vs_expected("first subtree", res, expected) + + # Check the second subtree + expected = [ EMPTY_RESULT ] + res = _extop_test_slapi_member(server=topo.standalone, dn=e_2_parent_0, relation="manager") + _check_res_vs_expected("second subtree", res, expected) + + # Check the third subtree + expected = [ EMPTY_RESULT ] + res = _extop_test_slapi_member(server=topo.standalone, dn=e_3_parent_0, relation="manager") + _check_res_vs_expected("third subtree", res, expected) + + # check e_1_parent_1_0 + expected = [ EMPTY_RESULT ] + res = _extop_test_slapi_member(server=topo.standalone, dn=e_1_parent_1_0, relation="manager") + _check_res_vs_expected("organisation reporting to e_1_parent_1_0", res, expected) + + # check e_1_parent_1_1_0 + expected = [ EMPTY_RESULT ] + res = _extop_test_slapi_member(server=topo.standalone, dn=e_1_parent_1_1_0, relation="manager") + _check_res_vs_expected("organisation reporting to e_1_parent_1_1_0", res, expected) + + # check e_2_parent_1_1_0 + expected = [EMPTY_RESULT] + res = _extop_test_slapi_member(server=topo.standalone, dn=e_2_parent_1_1_0, relation="manager") + _check_res_vs_expected("organisation reporting to e_2_parent_1_1_0", res, expected) + + # check e_2_parent_1_0 + expected = [ EMPTY_RESULT ] + res = _extop_test_slapi_member(server=topo.standalone, dn=e_2_parent_1_0, relation="manager") + _check_res_vs_expected("organisation reporting to e_2_parent_1_0", res, expected) + + # check e_2_parent_2_1_0 + expected = [ EMPTY_RESULT ] + res = _extop_test_slapi_member(server=topo.standalone, dn=e_2_parent_2_1_0, relation="manager") + _check_res_vs_expected("organisation reporting to e_2_parent_2_1_0", res, expected) + + # Check e_1_parent_3_0 + expected = [ EMPTY_RESULT ] + res = _extop_test_slapi_member(server=topo.standalone, dn=e_1_parent_3_0, relation="manager") + _check_res_vs_expected("organisation reporting to e_1_parent_3_0", res, expected) + + # Check e_1_parent_1_3_0 + expected = [ EMPTY_RESULT ] + res = _extop_test_slapi_member(server=topo.standalone, dn=e_1_parent_1_3_0, relation="manager") + _check_res_vs_expected("organisation reporting to e_1_parent_1_3_0", res, expected) + + # Check e_1_parent_1_1_3_0 + expected = [ EMPTY_RESULT ] + res = _extop_test_slapi_member(server=topo.standalone, dn=e_1_parent_1_1_3_0, relation="manager") + _check_res_vs_expected("organisation reporting to e_1_parent_1_1_3_0", res, expected) + + # Check e_1_parent_1_1_1_3_0 + expected = [EMPTY_RESULT] + res = _extop_test_slapi_member(server=topo.standalone, dn=e_1_parent_1_1_1_3_0, relation="manager") + _check_res_vs_expected("organisation reporting to e_1_parent_1_1_1_3_0", res, expected) + + def fin(): + entries = [e_1_parent_0, e_1_parent_1_0, e_1_parent_1_1_0, e_1_parent_1_1_1_0, e_2_parent_1_1_1_0, e_3_parent_1_1_1_0, e_4_parent_1_1_1_0, e_5_parent_1_1_1_0, e_2_parent_1_1_0, e_2_parent_1_0, e_1_parent_2_1_0, e_2_parent_2_1_0, e_1_parent_2_2_1_0, e_3_parent_2_1_0, e_4_parent_2_1_0, e_2_parent_0, e_1_parent_2_0, e_2_parent_2_0, e_3_parent_2_0, e_4_parent_2_0, e_3_parent_0, e_1_parent_3_0, e_1_parent_1_3_0, e_1_parent_1_1_3_0, e_1_parent_1_1_1_3_0] + for entry in entries: + topo.standalone.delete_s(entry) + topo.standalone.delete_s(dn_config) + + request.addfinalizer(fin) + + +def test_slapi_memberof_scope(topo, request, install_test_plugin): + """ + Test that membership hierarchy (member) is computed with slapi_member + Only entries in the subtree scope (e_2_parent_1_0) gets valid + computation of the membership + with following parameters + - membership attribute: 'manager' + - span over all backends: 'on' + - skip nesting membership: 'off' + - computation mode: recompute + - Scope: ou=subtree,ou=People,dc=example,dc=com <---- + - ExcludeScope: None + - Maximum return entries: None + + :id: 6c7587e0-0bc4-4847-b403-773d7314aa31 + :setup: Standalone instance + :steps: + 1. provision a set of entries in default backend + 2. configure test_slapi_memberof as described above + so only entries under e_2_parent_1_0 are taken into + consideration + 3. check computed membership vs expected result + Only entries under e_2_parent_1_0 get no empty results + :expectedresults: + 1. Operation should succeed + 2. Operation should succeed + 3. Operation should succeed + + DIT is : + e_1_parent_0 + - e_1_parent_1_0 + -- e_1_parent_1_1_0 + --- e_1_parent_1_1_1_0 + --- e_2_parent_1_1_1_0 + --- e_3_parent_1_1_1_0 + --- e_4_parent_1_1_1_0 + --- e_5_parent_1_1_1_0 + -- e_2_parent_1_1_0 + - e_2_parent_1_0 (subtree) <---- + -- e_1_parent_2_1_0 (subtree) <---- + -- e_2_parent_2_1_0 (subtree) <---- + --- e_1_parent_2_2_1_0 (subtree) <---- + -- e_3_parent_2_1_0 (subtree) <---- + -- e_4_parent_2_1_0 (subtree) <---- + e_2_parent_0 + - e_1_parent_2_0 + - e_2_parent_2_0 + - e_3_parent_2_0 + - e_4_parent_2_0 + e_3_parent_0 + - e_1_parent_3_0 + -- e_1_parent_1_3_0 + --- e_1_parent_1_1_3_0 + ---- e_1_parent_1_1_1_3_0 + """ + + subtree="subtree" + dn_subtree = 'ou=%s,ou=People,%s' % (subtree, DEFAULT_SUFFIX) + topo.standalone.add_s(Entry((dn_subtree, {'objectclass': 'top organizationalunit'.split(), + 'ou': subtree}))) + # First subtree + e_1_parent_0 = add_entry(topo.standalone, uid="e_1_parent_0") + + e_1_parent_1_0 = add_entry(topo.standalone, uid="e_1_parent_1_0", manager=[ensure_bytes(e_1_parent_0)]) + + e_1_parent_1_1_0 = add_entry(topo.standalone, uid="e_1_parent_1_1_0", manager=[ensure_bytes(e_1_parent_1_0)]) + + e_1_parent_1_1_1_0 = add_entry(topo.standalone, uid="e_1_parent_1_1_1_0", manager=[ensure_bytes(e_1_parent_1_1_0)]) + e_2_parent_1_1_1_0 = add_entry(topo.standalone, uid="e_2_parent_1_1_1_0", manager=[ensure_bytes(e_1_parent_1_1_0)]) + e_3_parent_1_1_1_0 = add_entry(topo.standalone, uid="e_3_parent_1_1_1_0", manager=[ensure_bytes(e_1_parent_1_1_0)]) + e_4_parent_1_1_1_0 = add_entry(topo.standalone, uid="e_4_parent_1_1_1_0", manager=[ensure_bytes(e_1_parent_1_1_0)]) + e_5_parent_1_1_1_0 = add_entry(topo.standalone, uid="e_5_parent_1_1_1_0", manager=[ensure_bytes(e_1_parent_1_1_0)]) + + e_2_parent_1_1_0 = add_entry(topo.standalone, uid="e_2_parent_1_1_0", manager=[ensure_bytes(e_1_parent_1_0)]) + + e_2_parent_1_0 = add_entry(topo.standalone, uid="e_2_parent_1_0", manager=[ensure_bytes(e_1_parent_0)], subtree=subtree) + + e_1_parent_2_1_0 = add_entry(topo.standalone, uid="e_1_parent_2_1_0", manager=[ensure_bytes(e_2_parent_1_0)], subtree=subtree) + e_2_parent_2_1_0 = add_entry(topo.standalone, uid="e_2_parent_2_1_0", manager=[ensure_bytes(e_2_parent_1_0)], subtree=subtree) + e_1_parent_2_2_1_0 = add_entry(topo.standalone, uid="e_1_parent_2_2_1_0", manager=[ensure_bytes(e_2_parent_2_1_0)], subtree=subtree) + e_3_parent_2_1_0 = add_entry(topo.standalone, uid="e_3_parent_2_1_0", manager=[ensure_bytes(e_2_parent_1_0)], subtree=subtree) + e_4_parent_2_1_0 = add_entry(topo.standalone, uid="e_4_parent_2_1_0", manager=[ensure_bytes(e_2_parent_1_0)], subtree=subtree) + + # 2nd subtree + e_2_parent_0 = add_entry(topo.standalone, uid="e_2_parent_0") + + e_1_parent_2_0 = add_entry(topo.standalone, uid="e_1_parent_2_0", manager=[ensure_bytes(e_2_parent_0)]) + e_2_parent_2_0 = add_entry(topo.standalone, uid="e_2_parent_2_0", manager=[ensure_bytes(e_2_parent_0)]) + e_3_parent_2_0 = add_entry(topo.standalone, uid="e_3_parent_2_0", manager=[ensure_bytes(e_2_parent_0)]) + e_4_parent_2_0 = add_entry(topo.standalone, uid="e_4_parent_2_0", manager=[ensure_bytes(e_2_parent_0)]) + + # third subtree + e_3_parent_0 = add_entry(topo.standalone, uid="e_3_parent_0") + + e_1_parent_3_0 = add_entry(topo.standalone, uid="e_1_parent_3_0", manager=[ensure_bytes(e_3_parent_0)]) + + e_1_parent_1_3_0 = add_entry(topo.standalone, uid="e_1_parent_1_3_0", manager=[ensure_bytes(e_1_parent_3_0)]) + + e_1_parent_1_1_3_0 = add_entry(topo.standalone, uid="e_1_parent_1_1_3_0", manager=[ensure_bytes(e_1_parent_1_3_0)]) + + e_1_parent_1_1_1_3_0 = add_entry(topo.standalone, uid="e_1_parent_1_1_1_3_0", manager=[ensure_bytes(e_1_parent_1_1_3_0)]) + + dn_config = 'cn=test_slapi_memberof,cn=plugins,cn=config' + topo.standalone.add_s(Entry((dn_config, {'objectclass': 'top nsSlapdPlugin extensibleObject'.split(), + 'cn': 'test_slapi_memberof', + 'nsslapd-pluginPath': 'libtest_slapi_memberof-plugin', + 'nsslapd-pluginInitfunc': 'test_slapi_memberof_init', + 'nsslapd-pluginType': 'extendedop', + 'nsslapd-pluginEnabled': 'on', + 'nsslapd-plugin-depends-on-type': 'database', + 'nsslapd-pluginId': 'test_slapi_memberof-plugin', + 'slapimemberOfMemberDN': 'uid=test_user_11,ou=People,dc=example,dc=com', + 'slapimemberOfGroupAttr': 'manager', + 'slapimemberOfAttr': 'memberof', + 'slapimemberOfAllBackends': 'on', + 'slapimemberOfSkipNested': 'off', + 'slapimemberOfEntryScope': dn_subtree, + 'slapimemberOfMaxGroup': '0', + 'nsslapd-pluginVersion': '2.3.2.202302131418git0e190fc3d', + 'nsslapd-pluginVendor': '389 Project', + 'nsslapd-pluginDescription': 'test_slapi_memberof extended operation plugin'}))) + topo.standalone.restart() + + # Check the first subtree + expected = [ EMPTY_RESULT ] + res = _extop_test_slapi_member(server=topo.standalone, dn=e_1_parent_0, relation="manager") + _check_res_vs_expected("first subtree", res, expected) + + # Check the second subtree + expected = [ EMPTY_RESULT ] + res = _extop_test_slapi_member(server=topo.standalone, dn=e_2_parent_0, relation="manager") + _check_res_vs_expected("second subtree", res, expected) + + # Check the third subtree + expected = [ EMPTY_RESULT ] + res = _extop_test_slapi_member(server=topo.standalone, dn=e_3_parent_0, relation="manager") + _check_res_vs_expected("third subtree", res, expected) + + # check e_1_parent_1_0 + expected = [ EMPTY_RESULT ] + res = _extop_test_slapi_member(server=topo.standalone, dn=e_1_parent_1_0, relation="manager") + _check_res_vs_expected("organisation reporting to e_1_parent_1_0", res, expected) + + # check e_1_parent_1_1_0 + expected = [ EMPTY_RESULT ] + res = _extop_test_slapi_member(server=topo.standalone, dn=e_1_parent_1_1_0, relation="manager") + _check_res_vs_expected("organisation reporting to e_1_parent_1_1_0", res, expected) + + # check e_2_parent_1_1_0 + expected = [ EMPTY_RESULT ] + res = _extop_test_slapi_member(server=topo.standalone, dn=e_2_parent_1_1_0, relation="manager") + _check_res_vs_expected("organisation reporting to e_2_parent_1_1_0", res, expected) + + # check e_2_parent_1_0 + expected = [e_1_parent_2_1_0, e_2_parent_2_1_0, e_1_parent_2_2_1_0, e_3_parent_2_1_0, e_4_parent_2_1_0] + res = _extop_test_slapi_member(server=topo.standalone, dn=e_2_parent_1_0, relation="manager") + _check_res_vs_expected("organisation reporting to e_2_parent_1_0", res, expected) + + # Check e_1_parent_2_1_0 + expected = [ EMPTY_RESULT ] + res = _extop_test_slapi_member(server=topo.standalone, dn=e_1_parent_2_1_0, relation="manager") + _check_res_vs_expected("organisation reporting to e_1_parent_2_1_0", res, expected) + + # check e_2_parent_2_1_0 + expected = [ e_1_parent_2_2_1_0 ] + res = _extop_test_slapi_member(server=topo.standalone, dn=e_2_parent_2_1_0, relation="manager") + _check_res_vs_expected("organisation reporting to e_2_parent_2_1_0", res, expected) + + # Check e_1_parent_3_0 + expected = [ EMPTY_RESULT ] + res = _extop_test_slapi_member(server=topo.standalone, dn=e_1_parent_3_0, relation="manager") + _check_res_vs_expected("organisation reporting to e_1_parent_3_0", res, expected) + + # Check e_1_parent_1_3_0 + expected = [ EMPTY_RESULT ] + res = _extop_test_slapi_member(server=topo.standalone, dn=e_1_parent_1_3_0, relation="manager") + _check_res_vs_expected("organisation reporting to e_1_parent_1_3_0", res, expected) + + # Check e_1_parent_1_1_3_0 + expected = [ EMPTY_RESULT ] + res = _extop_test_slapi_member(server=topo.standalone, dn=e_1_parent_1_1_3_0, relation="manager") + _check_res_vs_expected("organisation reporting to e_1_parent_1_1_3_0", res, expected) + + # Check e_1_parent_1_1_1_3_0 + expected = [EMPTY_RESULT] + res = _extop_test_slapi_member(server=topo.standalone, dn=e_1_parent_1_1_1_3_0, relation="manager") + _check_res_vs_expected("organisation reporting to e_1_parent_1_1_1_3_0", res, expected) + + def fin(): + entries = [e_1_parent_0, e_1_parent_1_0, e_1_parent_1_1_0, e_1_parent_1_1_1_0, e_2_parent_1_1_1_0, e_3_parent_1_1_1_0, e_4_parent_1_1_1_0, e_5_parent_1_1_1_0, e_2_parent_1_1_0, e_2_parent_1_0, e_1_parent_2_1_0, e_2_parent_2_1_0, e_1_parent_2_2_1_0, e_3_parent_2_1_0, e_4_parent_2_1_0, e_2_parent_0, e_1_parent_2_0, e_2_parent_2_0, e_3_parent_2_0, e_4_parent_2_0, e_3_parent_0, e_1_parent_3_0, e_1_parent_1_3_0, e_1_parent_1_1_3_0, e_1_parent_1_1_1_3_0] + for entry in entries: + topo.standalone.delete_s(entry) + topo.standalone.delete_s(dn_config) + topo.standalone.delete_s(dn_subtree) + + request.addfinalizer(fin) + +def test_slapi_memberof_excludescope(topo, request, install_test_plugin): + """ + Test that membership hierarchy (member) is computed with slapi_member + Entries in the subtree excludeescope (e_2_parent_1_0) are ignored + computation of the membership + with following parameters + - membership attribute: 'manager' + - span over all backends: 'on' + - skip nesting membership: 'off' + - computation mode: recompute + - Scope: DEFAULT_SUFFIX + - ExcludeScope: ou=subtree,ou=People,dc=example,dc=com <---- + - Maximum return entries: None + + :id: bdb17e7e-289c-4b56-83d5-0eb54d0c660e + :setup: Standalone instance + :steps: + 1. provision a set of entries in default backend + 2. configure test_slapi_memberof as described above + so entries under e_2_parent_1_0 are ignored + 3. check computed membership vs expected result + :expectedresults: + 1. Operation should succeed + 2. Operation should succeed + 3. Operation should succeed + + DIT is : + e_1_parent_0 + - e_1_parent_1_0 + -- e_1_parent_1_1_0 + --- e_1_parent_1_1_1_0 + --- e_2_parent_1_1_1_0 + --- e_3_parent_1_1_1_0 + --- e_4_parent_1_1_1_0 + --- e_5_parent_1_1_1_0 + -- e_2_parent_1_1_0 + - e_2_parent_1_0 (subtree) <---- + -- e_1_parent_2_1_0 (subtree) <---- + -- e_2_parent_2_1_0 (subtree) <---- + --- e_1_parent_2_2_1_0 (subtree) <---- + -- e_3_parent_2_1_0 (subtree) <---- + -- e_4_parent_2_1_0 (subtree) <---- + e_2_parent_0 + - e_1_parent_2_0 + - e_2_parent_2_0 + - e_3_parent_2_0 + - e_4_parent_2_0 + e_3_parent_0 + - e_1_parent_3_0 + -- e_1_parent_1_3_0 + --- e_1_parent_1_1_3_0 + ---- e_1_parent_1_1_1_3_0 + """ + + subtree="subtree" + dn_subtree = 'ou=%s,ou=People,%s' % (subtree, DEFAULT_SUFFIX) + topo.standalone.add_s(Entry((dn_subtree, {'objectclass': 'top organizationalunit'.split(), + 'ou': subtree}))) + # First subtree + e_1_parent_0 = add_entry(topo.standalone, uid="e_1_parent_0") + + e_1_parent_1_0 = add_entry(topo.standalone, uid="e_1_parent_1_0", manager=[ensure_bytes(e_1_parent_0)]) + + e_1_parent_1_1_0 = add_entry(topo.standalone, uid="e_1_parent_1_1_0", manager=[ensure_bytes(e_1_parent_1_0)]) + + e_1_parent_1_1_1_0 = add_entry(topo.standalone, uid="e_1_parent_1_1_1_0", manager=[ensure_bytes(e_1_parent_1_1_0)]) + e_2_parent_1_1_1_0 = add_entry(topo.standalone, uid="e_2_parent_1_1_1_0", manager=[ensure_bytes(e_1_parent_1_1_0)]) + e_3_parent_1_1_1_0 = add_entry(topo.standalone, uid="e_3_parent_1_1_1_0", manager=[ensure_bytes(e_1_parent_1_1_0)]) + e_4_parent_1_1_1_0 = add_entry(topo.standalone, uid="e_4_parent_1_1_1_0", manager=[ensure_bytes(e_1_parent_1_1_0)]) + e_5_parent_1_1_1_0 = add_entry(topo.standalone, uid="e_5_parent_1_1_1_0", manager=[ensure_bytes(e_1_parent_1_1_0)]) + + e_2_parent_1_1_0 = add_entry(topo.standalone, uid="e_2_parent_1_1_0", manager=[ensure_bytes(e_1_parent_1_0)]) + + e_2_parent_1_0 = add_entry(topo.standalone, uid="e_2_parent_1_0", manager=[ensure_bytes(e_1_parent_0)], subtree=subtree) + + e_1_parent_2_1_0 = add_entry(topo.standalone, uid="e_1_parent_2_1_0", manager=[ensure_bytes(e_2_parent_1_0)], subtree=subtree) + e_2_parent_2_1_0 = add_entry(topo.standalone, uid="e_2_parent_2_1_0", manager=[ensure_bytes(e_2_parent_1_0)], subtree=subtree) + e_1_parent_2_2_1_0 = add_entry(topo.standalone, uid="e_1_parent_2_2_1_0", manager=[ensure_bytes(e_2_parent_2_1_0)], subtree=subtree) + e_3_parent_2_1_0 = add_entry(topo.standalone, uid="e_3_parent_2_1_0", manager=[ensure_bytes(e_2_parent_1_0)], subtree=subtree) + e_4_parent_2_1_0 = add_entry(topo.standalone, uid="e_4_parent_2_1_0", manager=[ensure_bytes(e_2_parent_1_0)], subtree=subtree) + + # 2nd subtree + e_2_parent_0 = add_entry(topo.standalone, uid="e_2_parent_0") + + e_1_parent_2_0 = add_entry(topo.standalone, uid="e_1_parent_2_0", manager=[ensure_bytes(e_2_parent_0)]) + e_2_parent_2_0 = add_entry(topo.standalone, uid="e_2_parent_2_0", manager=[ensure_bytes(e_2_parent_0)]) + e_3_parent_2_0 = add_entry(topo.standalone, uid="e_3_parent_2_0", manager=[ensure_bytes(e_2_parent_0)]) + e_4_parent_2_0 = add_entry(topo.standalone, uid="e_4_parent_2_0", manager=[ensure_bytes(e_2_parent_0)]) + + # third subtree + e_3_parent_0 = add_entry(topo.standalone, uid="e_3_parent_0") + + e_1_parent_3_0 = add_entry(topo.standalone, uid="e_1_parent_3_0", manager=[ensure_bytes(e_3_parent_0)]) + + e_1_parent_1_3_0 = add_entry(topo.standalone, uid="e_1_parent_1_3_0", manager=[ensure_bytes(e_1_parent_3_0)]) + + e_1_parent_1_1_3_0 = add_entry(topo.standalone, uid="e_1_parent_1_1_3_0", manager=[ensure_bytes(e_1_parent_1_3_0)]) + + e_1_parent_1_1_1_3_0 = add_entry(topo.standalone, uid="e_1_parent_1_1_1_3_0", manager=[ensure_bytes(e_1_parent_1_1_3_0)]) + + dn_config = 'cn=test_slapi_memberof,cn=plugins,cn=config' + topo.standalone.add_s(Entry((dn_config, {'objectclass': 'top nsSlapdPlugin extensibleObject'.split(), + 'cn': 'test_slapi_memberof', + 'nsslapd-pluginPath': 'libtest_slapi_memberof-plugin', + 'nsslapd-pluginInitfunc': 'test_slapi_memberof_init', + 'nsslapd-pluginType': 'extendedop', + 'nsslapd-pluginEnabled': 'on', + 'nsslapd-plugin-depends-on-type': 'database', + 'nsslapd-pluginId': 'test_slapi_memberof-plugin', + 'slapimemberOfMemberDN': 'uid=test_user_11,ou=People,dc=example,dc=com', + 'slapimemberOfGroupAttr': 'manager', + 'slapimemberOfAttr': 'memberof', + 'slapimemberOfAllBackends': 'on', + 'slapimemberOfSkipNested': 'off', + 'slapimemberOfEntryScopeExcludeSubtree': dn_subtree, + 'slapimemberOfEntryScope': DEFAULT_SUFFIX, + 'slapimemberOfMaxGroup': '0', + 'nsslapd-pluginVersion': '2.3.2.202302131418git0e190fc3d', + 'nsslapd-pluginVendor': '389 Project', + 'nsslapd-pluginDescription': 'test_slapi_memberof extended operation plugin'}))) + topo.standalone.restart() + + # Check the first subtree + expected = [ e_1_parent_1_0, e_1_parent_1_1_0, e_1_parent_1_1_1_0, e_2_parent_1_1_1_0, e_3_parent_1_1_1_0, e_4_parent_1_1_1_0, e_5_parent_1_1_1_0, e_2_parent_1_1_0] + res = _extop_test_slapi_member(server=topo.standalone, dn=e_1_parent_0, relation="manager") + _check_res_vs_expected("first subtree", res, expected) + + # Check the second subtree + expected = [e_1_parent_2_0, e_2_parent_2_0, e_3_parent_2_0, e_4_parent_2_0] + res = _extop_test_slapi_member(server=topo.standalone, dn=e_2_parent_0, relation="manager") + _check_res_vs_expected("second subtree", res, expected) + + # Check the third subtree + expected = [e_1_parent_3_0, e_1_parent_1_3_0, e_1_parent_1_1_3_0, e_1_parent_1_1_1_3_0] + res = _extop_test_slapi_member(server=topo.standalone, dn=e_3_parent_0, relation="manager") + _check_res_vs_expected("third subtree", res, expected) + + # check e_1_parent_1_0 + expected = [e_1_parent_1_1_0, e_1_parent_1_1_1_0, e_2_parent_1_1_1_0, e_3_parent_1_1_1_0, e_4_parent_1_1_1_0, e_5_parent_1_1_1_0, e_2_parent_1_1_0] + res = _extop_test_slapi_member(server=topo.standalone, dn=e_1_parent_1_0, relation="manager") + _check_res_vs_expected("organisation reporting to e_1_parent_1_0", res, expected) + + # check e_1_parent_1_1_0 + expected = [e_1_parent_1_1_1_0, e_2_parent_1_1_1_0, e_3_parent_1_1_1_0, e_4_parent_1_1_1_0, e_5_parent_1_1_1_0] + res = _extop_test_slapi_member(server=topo.standalone, dn=e_1_parent_1_1_0, relation="manager") + _check_res_vs_expected("organisation reporting to e_1_parent_1_1_0", res, expected) + + # check e_2_parent_1_1_0 + expected = [ EMPTY_RESULT ] + res = _extop_test_slapi_member(server=topo.standalone, dn=e_2_parent_1_1_0, relation="manager") + _check_res_vs_expected("organisation reporting to e_2_parent_1_1_0", res, expected) + + # check e_2_parent_1_0 + expected = [ EMPTY_RESULT ] + res = _extop_test_slapi_member(server=topo.standalone, dn=e_2_parent_1_0, relation="manager") + _check_res_vs_expected("organisation reporting to e_2_parent_1_0", res, expected) + + # Check e_1_parent_2_1_0 + expected = [ EMPTY_RESULT ] + res = _extop_test_slapi_member(server=topo.standalone, dn=e_1_parent_2_1_0, relation="manager") + _check_res_vs_expected("organisation reporting to e_1_parent_2_1_0", res, expected) + + # check e_2_parent_2_1_0 + expected = [ EMPTY_RESULT ] + res = _extop_test_slapi_member(server=topo.standalone, dn=e_2_parent_2_1_0, relation="manager") + _check_res_vs_expected("organisation reporting to e_2_parent_2_1_0", res, expected) + + # Check e_1_parent_3_0 + expected = [ e_1_parent_1_3_0, e_1_parent_1_1_3_0, e_1_parent_1_1_1_3_0 ] + res = _extop_test_slapi_member(server=topo.standalone, dn=e_1_parent_3_0, relation="manager") + _check_res_vs_expected("organisation reporting to e_1_parent_3_0", res, expected) + + # Check e_1_parent_1_3_0 + expected = [ e_1_parent_1_1_3_0, e_1_parent_1_1_1_3_0 ] + res = _extop_test_slapi_member(server=topo.standalone, dn=e_1_parent_1_3_0, relation="manager") + _check_res_vs_expected("organisation reporting to e_1_parent_1_3_0", res, expected) + + # Check e_1_parent_1_1_3_0 + expected = [ e_1_parent_1_1_1_3_0 ] + res = _extop_test_slapi_member(server=topo.standalone, dn=e_1_parent_1_1_3_0, relation="manager") + _check_res_vs_expected("organisation reporting to e_1_parent_1_1_3_0", res, expected) + + # Check e_1_parent_1_1_1_3_0 + expected = [EMPTY_RESULT] + res = _extop_test_slapi_member(server=topo.standalone, dn=e_1_parent_1_1_1_3_0, relation="manager") + _check_res_vs_expected("organisation reporting to e_1_parent_1_1_1_3_0", res, expected) + + def fin(): + entries = [e_1_parent_0, e_1_parent_1_0, e_1_parent_1_1_0, e_1_parent_1_1_1_0, e_2_parent_1_1_1_0, e_3_parent_1_1_1_0, e_4_parent_1_1_1_0, e_5_parent_1_1_1_0, e_2_parent_1_1_0, e_2_parent_1_0, e_1_parent_2_1_0, e_2_parent_2_1_0, e_1_parent_2_2_1_0, e_3_parent_2_1_0, e_4_parent_2_1_0, e_2_parent_0, e_1_parent_2_0, e_2_parent_2_0, e_3_parent_2_0, e_4_parent_2_0, e_3_parent_0, e_1_parent_3_0, e_1_parent_1_3_0, e_1_parent_1_1_3_0, e_1_parent_1_1_1_3_0] + for entry in entries: + topo.standalone.delete_s(entry) + topo.standalone.delete_s(dn_config) + topo.standalone.delete_s(dn_subtree) + + request.addfinalizer(fin) + +def test_slapi_memberof_skip_nested(topo, request, install_test_plugin): + """ + When searching the management (manager) hierarchy it stops at the first level + no recursion + Test that management hierarchy is computed with slapi_member + It is done stopping at the first level, so the direct subordinate + with following parameters + - membership attribute: 'manager' + - span over all backends: 'on' + - skip nesting membership: 'on' <---- + - computation mode: recompute + - Scope: DEFAULT_SUFFIX + - ExcludeScope: ou=subtree,ou=People,dc=example,dc=com + - Maximum return entries: None + + :id: c9b5617f-9058-40f5-bdd6-a560bc67b30d + :setup: Standalone instance + :steps: + 1. provision a set of entries in default backend + 2. configure test_slapi_memberof as described above + 3. check computed membership vs expected result + only direct subordinate are returned + :expectedresults: + 1. Operation should succeed + 2. Operation should succeed + 3. Operation should succeed + + DIT is : + e_1_parent_0 + - e_1_parent_1_0 + -- e_1_parent_1_1_0 + --- e_1_parent_1_1_1_0 + --- e_2_parent_1_1_1_0 + --- e_3_parent_1_1_1_0 + --- e_4_parent_1_1_1_0 + --- e_5_parent_1_1_1_0 + -- e_2_parent_1_1_0 + - e_2_parent_1_0 + -- e_1_parent_2_1_0 + -- e_2_parent_2_1_0 + --- e_1_parent_2_2_1_0 + -- e_3_parent_2_1_0 + -- e_4_parent_2_1_0 + e_2_parent_0 + - e_1_parent_2_0 + - e_2_parent_2_0 + - e_3_parent_2_0 + - e_4_parent_2_0 + e_3_parent_0 + - e_1_parent_3_0 + -- e_1_parent_1_3_0 + --- e_1_parent_1_1_3_0 + ---- e_1_parent_1_1_1_3_0 + """ + + subtree="subtree" + dn_subtree = 'ou=%s,ou=People,%s' % (subtree, DEFAULT_SUFFIX) + topo.standalone.add_s(Entry((dn_subtree, {'objectclass': 'top organizationalunit'.split(), + 'ou': subtree}))) + # First subtree + e_1_parent_0 = add_entry(topo.standalone, uid="e_1_parent_0") + + e_1_parent_1_0 = add_entry(topo.standalone, uid="e_1_parent_1_0", manager=[ensure_bytes(e_1_parent_0)]) + + e_1_parent_1_1_0 = add_entry(topo.standalone, uid="e_1_parent_1_1_0", manager=[ensure_bytes(e_1_parent_1_0)]) + + e_1_parent_1_1_1_0 = add_entry(topo.standalone, uid="e_1_parent_1_1_1_0", manager=[ensure_bytes(e_1_parent_1_1_0)]) + e_2_parent_1_1_1_0 = add_entry(topo.standalone, uid="e_2_parent_1_1_1_0", manager=[ensure_bytes(e_1_parent_1_1_0)]) + e_3_parent_1_1_1_0 = add_entry(topo.standalone, uid="e_3_parent_1_1_1_0", manager=[ensure_bytes(e_1_parent_1_1_0)]) + e_4_parent_1_1_1_0 = add_entry(topo.standalone, uid="e_4_parent_1_1_1_0", manager=[ensure_bytes(e_1_parent_1_1_0)]) + e_5_parent_1_1_1_0 = add_entry(topo.standalone, uid="e_5_parent_1_1_1_0", manager=[ensure_bytes(e_1_parent_1_1_0)]) + + e_2_parent_1_1_0 = add_entry(topo.standalone, uid="e_2_parent_1_1_0", manager=[ensure_bytes(e_1_parent_1_0)]) + + e_2_parent_1_0 = add_entry(topo.standalone, uid="e_2_parent_1_0", manager=[ensure_bytes(e_1_parent_0)]) + + e_1_parent_2_1_0 = add_entry(topo.standalone, uid="e_1_parent_2_1_0", manager=[ensure_bytes(e_2_parent_1_0)]) + e_2_parent_2_1_0 = add_entry(topo.standalone, uid="e_2_parent_2_1_0", manager=[ensure_bytes(e_2_parent_1_0)]) + e_1_parent_2_2_1_0 = add_entry(topo.standalone, uid="e_1_parent_2_2_1_0", manager=[ensure_bytes(e_2_parent_2_1_0)]) + e_3_parent_2_1_0 = add_entry(topo.standalone, uid="e_3_parent_2_1_0", manager=[ensure_bytes(e_2_parent_1_0)]) + e_4_parent_2_1_0 = add_entry(topo.standalone, uid="e_4_parent_2_1_0", manager=[ensure_bytes(e_2_parent_1_0)]) + + # 2nd subtree + e_2_parent_0 = add_entry(topo.standalone, uid="e_2_parent_0") + + e_1_parent_2_0 = add_entry(topo.standalone, uid="e_1_parent_2_0", manager=[ensure_bytes(e_2_parent_0)]) + e_2_parent_2_0 = add_entry(topo.standalone, uid="e_2_parent_2_0", manager=[ensure_bytes(e_2_parent_0)]) + e_3_parent_2_0 = add_entry(topo.standalone, uid="e_3_parent_2_0", manager=[ensure_bytes(e_2_parent_0)]) + e_4_parent_2_0 = add_entry(topo.standalone, uid="e_4_parent_2_0", manager=[ensure_bytes(e_2_parent_0)]) + + # third subtree + e_3_parent_0 = add_entry(topo.standalone, uid="e_3_parent_0") + + e_1_parent_3_0 = add_entry(topo.standalone, uid="e_1_parent_3_0", manager=[ensure_bytes(e_3_parent_0)]) + + e_1_parent_1_3_0 = add_entry(topo.standalone, uid="e_1_parent_1_3_0", manager=[ensure_bytes(e_1_parent_3_0)]) + + e_1_parent_1_1_3_0 = add_entry(topo.standalone, uid="e_1_parent_1_1_3_0", manager=[ensure_bytes(e_1_parent_1_3_0)]) + + e_1_parent_1_1_1_3_0 = add_entry(topo.standalone, uid="e_1_parent_1_1_1_3_0", manager=[ensure_bytes(e_1_parent_1_1_3_0)]) + + dn_config = 'cn=test_slapi_memberof,cn=plugins,cn=config' + topo.standalone.add_s(Entry((dn_config, {'objectclass': 'top nsSlapdPlugin extensibleObject'.split(), + 'cn': 'test_slapi_memberof', + 'nsslapd-pluginPath': 'libtest_slapi_memberof-plugin', + 'nsslapd-pluginInitfunc': 'test_slapi_memberof_init', + 'nsslapd-pluginType': 'extendedop', + 'nsslapd-pluginEnabled': 'on', + 'nsslapd-plugin-depends-on-type': 'database', + 'nsslapd-pluginId': 'test_slapi_memberof-plugin', + 'slapimemberOfMemberDN': 'uid=test_user_11,ou=People,dc=example,dc=com', + 'slapimemberOfGroupAttr': 'manager', + 'slapimemberOfAttr': 'memberof', + 'slapimemberOfAllBackends': 'on', + 'slapimemberOfSkipNested': 'on', + 'slapimemberOfEntryScope': DEFAULT_SUFFIX, + 'slapimemberOfMaxGroup': '0', + 'nsslapd-pluginVersion': '2.3.2.202302131418git0e190fc3d', + 'nsslapd-pluginVendor': '389 Project', + 'nsslapd-pluginDescription': 'test_slapi_memberof extended operation plugin'}))) + topo.standalone.restart() + # Check the first subtree + expected = [ e_1_parent_1_0, e_2_parent_1_0] + res = _extop_test_slapi_member(server=topo.standalone, dn=e_1_parent_0, relation="manager") + _check_res_vs_expected("first subtree", res, expected) + + # Check the second subtree + expected = [e_1_parent_2_0, e_2_parent_2_0, e_3_parent_2_0, e_4_parent_2_0] + res = _extop_test_slapi_member(server=topo.standalone, dn=e_2_parent_0, relation="manager") + _check_res_vs_expected("second subtree", res, expected) + + # Check the third subtree + expected = [e_1_parent_3_0] + res = _extop_test_slapi_member(server=topo.standalone, dn=e_3_parent_0, relation="manager") + _check_res_vs_expected("third subtree", res, expected) + + # check e_1_parent_1_0 + expected = [e_1_parent_1_1_0, e_2_parent_1_1_0] + res = _extop_test_slapi_member(server=topo.standalone, dn=e_1_parent_1_0, relation="manager") + _check_res_vs_expected("organisation reporting to e_1_parent_1_0", res, expected) + + # check e_1_parent_1_1_0 + expected = [e_1_parent_1_1_1_0, e_2_parent_1_1_1_0, e_3_parent_1_1_1_0, e_4_parent_1_1_1_0, e_5_parent_1_1_1_0] + res = _extop_test_slapi_member(server=topo.standalone, dn=e_1_parent_1_1_0, relation="manager") + _check_res_vs_expected("organisation reporting to e_1_parent_1_1_0", res, expected) + + # check e_2_parent_1_1_0 + expected = [EMPTY_RESULT] + res = _extop_test_slapi_member(server=topo.standalone, dn=e_2_parent_1_1_0, relation="manager") + _check_res_vs_expected("organisation reporting to e_2_parent_1_1_0", res, expected) + + # check e_2_parent_1_0 + expected = [e_1_parent_2_1_0, e_2_parent_2_1_0, e_3_parent_2_1_0, e_4_parent_2_1_0] + res = _extop_test_slapi_member(server=topo.standalone, dn=e_2_parent_1_0, relation="manager") + _check_res_vs_expected("organisation reporting to e_2_parent_1_0", res, expected) + + # check e_2_parent_2_1_0 + expected = [e_1_parent_2_2_1_0] + res = _extop_test_slapi_member(server=topo.standalone, dn=e_2_parent_2_1_0, relation="manager") + _check_res_vs_expected("organisation reporting to e_2_parent_2_1_0", res, expected) + + # Check e_1_parent_3_0 + expected = [e_1_parent_1_3_0] + res = _extop_test_slapi_member(server=topo.standalone, dn=e_1_parent_3_0, relation="manager") + _check_res_vs_expected("organisation reporting to e_1_parent_3_0", res, expected) + + # Check e_1_parent_1_3_0 + expected = [e_1_parent_1_1_3_0] + res = _extop_test_slapi_member(server=topo.standalone, dn=e_1_parent_1_3_0, relation="manager") + _check_res_vs_expected("organisation reporting to e_1_parent_1_3_0", res, expected) + + # Check e_1_parent_1_1_3_0 + expected = [e_1_parent_1_1_1_3_0] + res = _extop_test_slapi_member(server=topo.standalone, dn=e_1_parent_1_1_3_0, relation="manager") + _check_res_vs_expected("organisation reporting to e_1_parent_1_1_3_0", res, expected) + + # Check e_1_parent_1_1_1_3_0 + expected = [EMPTY_RESULT] + res = _extop_test_slapi_member(server=topo.standalone, dn=e_1_parent_1_1_1_3_0, relation="manager") + _check_res_vs_expected("organisation reporting to e_1_parent_1_1_1_3_0", res, expected) + + def fin(): + entries = [e_1_parent_0, e_1_parent_1_0, e_1_parent_1_1_0, e_1_parent_1_1_1_0, e_2_parent_1_1_1_0, e_3_parent_1_1_1_0, e_4_parent_1_1_1_0, e_5_parent_1_1_1_0, e_2_parent_1_1_0, e_2_parent_1_0, e_1_parent_2_1_0, e_2_parent_2_1_0, e_1_parent_2_2_1_0, e_3_parent_2_1_0, e_4_parent_2_1_0, e_2_parent_0, e_1_parent_2_0, e_2_parent_2_0, e_3_parent_2_0, e_4_parent_2_0, e_3_parent_0, e_1_parent_3_0, e_1_parent_1_3_0, e_1_parent_1_1_3_0, e_1_parent_1_1_1_3_0] + for entry in entries: + topo.standalone.delete_s(entry) + topo.standalone.delete_s(dn_config) + topo.standalone.delete_s(dn_subtree) + + request.addfinalizer(fin) + +def test_slapi_memberof_maxgroup(topo, request, install_test_plugin): + """ + When searching the management (manager) hierarchy it stops when + a maximum subordinates are retrieved + Test that management hierarchy is computed with slapi_member + with following parameters + - membership attribute: 'manager' + - span over all backends: 'on' + - skip nesting membership: 'off' <---- + - computation mode: recompute + - Scope: DEFAULT_SUFFIX + - ExcludeScope: ou=subtree,ou=People,dc=example,dc=com + - Maximum return entries: 3 <-- + + :id: 83a4c668-99d0-4f47-ac89-a7f7fc620340 + :setup: Standalone instance + :steps: + 1. provision a set of entries in default backend + 2. configure test_slapi_memberof as described above + 3. check computed membership vs expected result + only direct subordinate are returned + :expectedresults: + 1. Operation should succeed + 2. Operation should succeed + 3. Operation should succeed + + DIT is : + e_1_parent_0 + - e_1_parent_1_0 + -- e_1_parent_1_1_0 + --- e_1_parent_1_1_1_0 + --- e_2_parent_1_1_1_0 + --- e_3_parent_1_1_1_0 + --- e_4_parent_1_1_1_0 + --- e_5_parent_1_1_1_0 + -- e_2_parent_1_1_0 + - e_2_parent_1_0 + -- e_1_parent_2_1_0 + -- e_2_parent_2_1_0 + --- e_1_parent_2_2_1_0 + -- e_3_parent_2_1_0 + -- e_4_parent_2_1_0 + e_2_parent_0 + - e_1_parent_2_0 + - e_2_parent_2_0 + - e_3_parent_2_0 + - e_4_parent_2_0 + e_3_parent_0 + - e_1_parent_3_0 + -- e_1_parent_1_3_0 + --- e_1_parent_1_1_3_0 + ---- e_1_parent_1_1_1_3_0 + """ + user = UserAccounts(topo.standalone, DEFAULT_SUFFIX) + + # First subtree + e_1_parent_0 = add_entry(topo.standalone, uid="e_1_parent_0") + + e_1_parent_1_0 = add_entry(topo.standalone, uid="e_1_parent_1_0", manager=[ensure_bytes(e_1_parent_0)]) + + e_1_parent_1_1_0 = add_entry(topo.standalone, uid="e_1_parent_1_1_0", manager=[ensure_bytes(e_1_parent_1_0)]) + + e_1_parent_1_1_1_0 = add_entry(topo.standalone, uid="e_1_parent_1_1_1_0", manager=[ensure_bytes(e_1_parent_1_1_0)]) + e_2_parent_1_1_1_0 = add_entry(topo.standalone, uid="e_2_parent_1_1_1_0", manager=[ensure_bytes(e_1_parent_1_1_0)]) + e_3_parent_1_1_1_0 = add_entry(topo.standalone, uid="e_3_parent_1_1_1_0", manager=[ensure_bytes(e_1_parent_1_1_0)]) + e_4_parent_1_1_1_0 = add_entry(topo.standalone, uid="e_4_parent_1_1_1_0", manager=[ensure_bytes(e_1_parent_1_1_0)]) + e_5_parent_1_1_1_0 = add_entry(topo.standalone, uid="e_5_parent_1_1_1_0", manager=[ensure_bytes(e_1_parent_1_1_0)]) + + e_2_parent_1_1_0 = add_entry(topo.standalone, uid="e_2_parent_1_1_0", manager=[ensure_bytes(e_1_parent_1_0)]) + + e_2_parent_1_0 = add_entry(topo.standalone, uid="e_2_parent_1_0", manager=[ensure_bytes(e_1_parent_0)]) + + e_1_parent_2_1_0 = add_entry(topo.standalone, uid="e_1_parent_2_1_0", manager=[ensure_bytes(e_2_parent_1_0)]) + e_2_parent_2_1_0 = add_entry(topo.standalone, uid="e_2_parent_2_1_0", manager=[ensure_bytes(e_2_parent_1_0)]) + e_1_parent_2_2_1_0 = add_entry(topo.standalone, uid="e_1_parent_2_2_1_0", manager=[ensure_bytes(e_2_parent_2_1_0)]) + e_3_parent_2_1_0 = add_entry(topo.standalone, uid="e_3_parent_2_1_0", manager=[ensure_bytes(e_2_parent_1_0)]) + e_4_parent_2_1_0 = add_entry(topo.standalone, uid="e_4_parent_2_1_0", manager=[ensure_bytes(e_2_parent_1_0)]) + + # 2nd subtree + e_2_parent_0 = add_entry(topo.standalone, uid="e_2_parent_0") + + e_1_parent_2_0 = add_entry(topo.standalone, uid="e_1_parent_2_0", manager=[ensure_bytes(e_2_parent_0)]) + e_2_parent_2_0 = add_entry(topo.standalone, uid="e_2_parent_2_0", manager=[ensure_bytes(e_2_parent_0)]) + e_3_parent_2_0 = add_entry(topo.standalone, uid="e_3_parent_2_0", manager=[ensure_bytes(e_2_parent_0)]) + e_4_parent_2_0 = add_entry(topo.standalone, uid="e_4_parent_2_0", manager=[ensure_bytes(e_2_parent_0)]) + + # third subtree + e_3_parent_0 = add_entry(topo.standalone, uid="e_3_parent_0") + + e_1_parent_3_0 = add_entry(topo.standalone, uid="e_1_parent_3_0", manager=[ensure_bytes(e_3_parent_0)]) + + e_1_parent_1_3_0 = add_entry(topo.standalone, uid="e_1_parent_1_3_0", manager=[ensure_bytes(e_1_parent_3_0)]) + + e_1_parent_1_1_3_0 = add_entry(topo.standalone, uid="e_1_parent_1_1_3_0", manager=[ensure_bytes(e_1_parent_1_3_0)]) + + e_1_parent_1_1_1_3_0 = add_entry(topo.standalone, uid="e_1_parent_1_1_1_3_0", manager=[ensure_bytes(e_1_parent_1_1_3_0)]) + + dn_config = 'cn=test_slapi_memberof,cn=plugins,cn=config' + topo.standalone.add_s(Entry((dn_config, {'objectclass': 'top nsSlapdPlugin extensibleObject'.split(), + 'cn': 'test_slapi_memberof', + 'nsslapd-pluginPath': 'libtest_slapi_memberof-plugin', + 'nsslapd-pluginInitfunc': 'test_slapi_memberof_init', + 'nsslapd-pluginType': 'extendedop', + 'nsslapd-pluginEnabled': 'on', + 'nsslapd-plugin-depends-on-type': 'database', + 'nsslapd-pluginId': 'test_slapi_memberof-plugin', + 'slapimemberOfMemberDN': 'uid=test_user_11,ou=People,dc=example,dc=com', + 'slapimemberOfGroupAttr': 'manager', + 'slapimemberOfAttr': 'memberof', + 'slapimemberOfAllBackends': 'on', + 'slapimemberOfSkipNested': 'off', + 'slapimemberOfEntryScope': DEFAULT_SUFFIX, + 'slapimemberOfMaxGroup': '3', + 'nsslapd-pluginVersion': '2.3.2.202302131418git0e190fc3d', + 'nsslapd-pluginVendor': '389 Project', + 'nsslapd-pluginDescription': 'test_slapi_memberof extended operation plugin'}))) + topo.standalone.restart() + + # Check the first subtree + expected = [ e_1_parent_1_0, e_1_parent_1_1_0, e_1_parent_1_1_1_0] + res = _extop_test_slapi_member(server=topo.standalone, dn=e_1_parent_0, relation="manager") + _check_res_vs_expected("first subtree", res, expected) + + # Check the second subtree + expected = [e_1_parent_2_0, e_2_parent_2_0, e_3_parent_2_0] + res = _extop_test_slapi_member(server=topo.standalone, dn=e_2_parent_0, relation="manager") + _check_res_vs_expected("second subtree", res, expected) + + # Check the third subtree + expected = [e_1_parent_3_0, e_1_parent_1_3_0, e_1_parent_1_1_3_0] + res = _extop_test_slapi_member(server=topo.standalone, dn=e_3_parent_0, relation="manager") + _check_res_vs_expected("third subtree", res, expected) + + # check e_1_parent_1_0 + expected = [e_1_parent_1_1_0, e_1_parent_1_1_1_0, e_2_parent_1_1_1_0] + res = _extop_test_slapi_member(server=topo.standalone, dn=e_1_parent_1_0, relation="manager") + _check_res_vs_expected("organisation reporting to e_1_parent_1_0", res, expected) + + # check e_1_parent_1_1_0 + expected = [e_1_parent_1_1_1_0, e_2_parent_1_1_1_0, e_3_parent_1_1_1_0] + res = _extop_test_slapi_member(server=topo.standalone, dn=e_1_parent_1_1_0, relation="manager") + _check_res_vs_expected("organisation reporting to e_1_parent_1_1_0", res, expected) + + # check e_2_parent_1_1_0 + expected = [EMPTY_RESULT] + res = _extop_test_slapi_member(server=topo.standalone, dn=e_2_parent_1_1_0, relation="manager") + _check_res_vs_expected("organisation reporting to e_2_parent_1_1_0", res, expected) + + # check e_2_parent_1_0 + expected = [e_1_parent_2_1_0, e_2_parent_2_1_0, e_1_parent_2_2_1_0] + res = _extop_test_slapi_member(server=topo.standalone, dn=e_2_parent_1_0, relation="manager") + _check_res_vs_expected("organisation reporting to e_2_parent_1_0", res, expected) + + # check e_2_parent_2_1_0 + expected = [e_1_parent_2_2_1_0] + res = _extop_test_slapi_member(server=topo.standalone, dn=e_2_parent_2_1_0, relation="manager") + _check_res_vs_expected("organisation reporting to e_2_parent_2_1_0", res, expected) + + # Check e_1_parent_3_0 + expected = [e_1_parent_1_3_0, e_1_parent_1_1_3_0, e_1_parent_1_1_1_3_0] + res = _extop_test_slapi_member(server=topo.standalone, dn=e_1_parent_3_0, relation="manager") + _check_res_vs_expected("organisation reporting to e_1_parent_3_0", res, expected) + + # Check e_1_parent_1_3_0 + expected = [e_1_parent_1_1_3_0, e_1_parent_1_1_1_3_0] + res = _extop_test_slapi_member(server=topo.standalone, dn=e_1_parent_1_3_0, relation="manager") + _check_res_vs_expected("organisation reporting to e_1_parent_1_3_0", res, expected) + + # Check e_1_parent_1_1_3_0 + expected = [e_1_parent_1_1_1_3_0] + res = _extop_test_slapi_member(server=topo.standalone, dn=e_1_parent_1_1_3_0, relation="manager") + _check_res_vs_expected("organisation reporting to e_1_parent_1_1_3_0", res, expected) + + # Check e_1_parent_1_1_1_3_0 + expected = [EMPTY_RESULT] + res = _extop_test_slapi_member(server=topo.standalone, dn=e_1_parent_1_1_1_3_0, relation="manager") + _check_res_vs_expected("organisation reporting to e_1_parent_1_1_1_3_0", res, expected) + + def fin(): + entries = [e_1_parent_0, e_1_parent_1_0, e_1_parent_1_1_0, e_1_parent_1_1_1_0, e_2_parent_1_1_1_0, e_3_parent_1_1_1_0, e_4_parent_1_1_1_0, e_5_parent_1_1_1_0, e_2_parent_1_1_0, e_2_parent_1_0, e_1_parent_2_1_0, e_2_parent_2_1_0, e_1_parent_2_2_1_0, e_3_parent_2_1_0, e_4_parent_2_1_0, e_2_parent_0, e_1_parent_2_0, e_2_parent_2_0, e_3_parent_2_0, e_4_parent_2_0, e_3_parent_0, e_1_parent_3_0, e_1_parent_1_3_0, e_1_parent_1_1_3_0, e_1_parent_1_1_1_3_0] + for entry in entries: + topo.standalone.delete_s(entry) + topo.standalone.delete_s(dn_config) + + request.addfinalizer(fin) + +def test_slapi_memberof_reuse_if_possible_1(topo, request, install_test_plugin): + """ + Test that management hierarchy (manager) is computed with slapi_memberof + It requires slapi_memberof to reuse IF POSSIBLE the computed values + from memberof plugins. As memberof plugin is not enabled, it falls back + to regular computation (recompute) + with following parameters + - membership attribute: 'manager' + - span over all backends: 'on' + - skip nesting membership: 'off' + - computation mode: MEMBEROF_REUSE_IF_POSSIBLE <-- + - Scope: DEFAULT_SUFFIX + - ExcludeScope: None + - Maximum return entries: None + + :id: 8f75e4c9-60d4-41b8-8b25-df9fe4b0231d + :setup: Standalone instance + :steps: + 1. provision a set of entry + 2. configure test_slapi_memberof as described above + 3. check computed membership vs expected result + :expectedresults: + 1. Operation should succeed + 2. Operation should succeed + 3. Operation should succeed + + DIT is : + e_1_parent_0 + - e_1_parent_1_0 + -- e_1_parent_1_1_0 + --- e_1_parent_1_1_1_0 + --- e_2_parent_1_1_1_0 + --- e_3_parent_1_1_1_0 + --- e_4_parent_1_1_1_0 + --- e_5_parent_1_1_1_0 + -- e_2_parent_1_1_0 + - e_2_parent_1_0 + -- e_1_parent_2_1_0 + -- e_2_parent_2_1_0 + --- e_1_parent_2_2_1_0 + -- e_3_parent_2_1_0 + -- e_4_parent_2_1_0 + e_2_parent_0 + - e_1_parent_2_0 + - e_2_parent_2_0 + - e_3_parent_2_0 + - e_4_parent_2_0 + e_3_parent_0 + - e_1_parent_3_0 + -- e_1_parent_1_3_0 + --- e_1_parent_1_1_3_0 + ---- e_1_parent_1_1_1_3_0 + """ + user = UserAccounts(topo.standalone, DEFAULT_SUFFIX) + + # First subtree + e_1_parent_0 = add_entry(topo.standalone, uid="e_1_parent_0") + + e_1_parent_1_0 = add_entry(topo.standalone, uid="e_1_parent_1_0", manager=[ensure_bytes(e_1_parent_0)]) + + e_1_parent_1_1_0 = add_entry(topo.standalone, uid="e_1_parent_1_1_0", manager=[ensure_bytes(e_1_parent_1_0)]) + + e_1_parent_1_1_1_0 = add_entry(topo.standalone, uid="e_1_parent_1_1_1_0", manager=[ensure_bytes(e_1_parent_1_1_0)]) + e_2_parent_1_1_1_0 = add_entry(topo.standalone, uid="e_2_parent_1_1_1_0", manager=[ensure_bytes(e_1_parent_1_1_0)]) + e_3_parent_1_1_1_0 = add_entry(topo.standalone, uid="e_3_parent_1_1_1_0", manager=[ensure_bytes(e_1_parent_1_1_0)]) + e_4_parent_1_1_1_0 = add_entry(topo.standalone, uid="e_4_parent_1_1_1_0", manager=[ensure_bytes(e_1_parent_1_1_0)]) + e_5_parent_1_1_1_0 = add_entry(topo.standalone, uid="e_5_parent_1_1_1_0", manager=[ensure_bytes(e_1_parent_1_1_0)]) + + e_2_parent_1_1_0 = add_entry(topo.standalone, uid="e_2_parent_1_1_0", manager=[ensure_bytes(e_1_parent_1_0)]) + + e_2_parent_1_0 = add_entry(topo.standalone, uid="e_2_parent_1_0", manager=[ensure_bytes(e_1_parent_0)]) + + e_1_parent_2_1_0 = add_entry(topo.standalone, uid="e_1_parent_2_1_0", manager=[ensure_bytes(e_2_parent_1_0)]) + e_2_parent_2_1_0 = add_entry(topo.standalone, uid="e_2_parent_2_1_0", manager=[ensure_bytes(e_2_parent_1_0)]) + e_1_parent_2_2_1_0 = add_entry(topo.standalone, uid="e_1_parent_2_2_1_0", manager=[ensure_bytes(e_2_parent_2_1_0)]) + e_3_parent_2_1_0 = add_entry(topo.standalone, uid="e_3_parent_2_1_0", manager=[ensure_bytes(e_2_parent_1_0)]) + e_4_parent_2_1_0 = add_entry(topo.standalone, uid="e_4_parent_2_1_0", manager=[ensure_bytes(e_2_parent_1_0)]) + + # 2nd subtree + e_2_parent_0 = add_entry(topo.standalone, uid="e_2_parent_0") + + e_1_parent_2_0 = add_entry(topo.standalone, uid="e_1_parent_2_0", manager=[ensure_bytes(e_2_parent_0)]) + e_2_parent_2_0 = add_entry(topo.standalone, uid="e_2_parent_2_0", manager=[ensure_bytes(e_2_parent_0)]) + e_3_parent_2_0 = add_entry(topo.standalone, uid="e_3_parent_2_0", manager=[ensure_bytes(e_2_parent_0)]) + e_4_parent_2_0 = add_entry(topo.standalone, uid="e_4_parent_2_0", manager=[ensure_bytes(e_2_parent_0)]) + + # third subtree + e_3_parent_0 = add_entry(topo.standalone, uid="e_3_parent_0") + + e_1_parent_3_0 = add_entry(topo.standalone, uid="e_1_parent_3_0", manager=[ensure_bytes(e_3_parent_0)]) + + e_1_parent_1_3_0 = add_entry(topo.standalone, uid="e_1_parent_1_3_0", manager=[ensure_bytes(e_1_parent_3_0)]) + + e_1_parent_1_1_3_0 = add_entry(topo.standalone, uid="e_1_parent_1_1_3_0", manager=[ensure_bytes(e_1_parent_1_3_0)]) + + e_1_parent_1_1_1_3_0 = add_entry(topo.standalone, uid="e_1_parent_1_1_1_3_0", manager=[ensure_bytes(e_1_parent_1_1_3_0)]) + + dn_config = 'cn=test_slapi_memberof,cn=plugins,cn=config' + topo.standalone.add_s(Entry((dn_config, {'objectclass': 'top nsSlapdPlugin extensibleObject'.split(), + 'cn': 'test_slapi_memberof', + 'nsslapd-pluginPath': 'libtest_slapi_memberof-plugin', + 'nsslapd-pluginInitfunc': 'test_slapi_memberof_init', + 'nsslapd-pluginType': 'extendedop', + 'nsslapd-pluginEnabled': 'on', + 'nsslapd-plugin-depends-on-type': 'database', + 'nsslapd-pluginId': 'test_slapi_memberof-plugin', + 'slapimemberOfMemberDN': 'uid=test_user_11,ou=People,dc=example,dc=com', + 'slapimemberOfGroupAttr': 'manager', + 'slapimemberOfAttr': 'memberof', + 'slapimemberOfFlag': 'MEMBEROF_REUSE_IF_POSSIBLE', + 'slapimemberOfAllBackends': 'on', + 'slapimemberOfSkipNested': 'off', + 'slapimemberOfEntryScope': DEFAULT_SUFFIX, + 'slapimemberOfMaxGroup': '0', + 'nsslapd-pluginVersion': '2.3.2.202302131418git0e190fc3d', + 'nsslapd-pluginVendor': '389 Project', + 'nsslapd-pluginDescription': 'test_slapi_memberof extended operation plugin'}))) + topo.standalone.restart() + + # Check the first subtree + expected = [ e_1_parent_1_0, e_1_parent_1_1_0, e_1_parent_1_1_1_0, e_2_parent_1_1_1_0, e_3_parent_1_1_1_0, e_4_parent_1_1_1_0, e_5_parent_1_1_1_0, e_2_parent_1_1_0, e_2_parent_1_0, e_1_parent_2_1_0, e_2_parent_2_1_0, e_1_parent_2_2_1_0, e_3_parent_2_1_0, e_4_parent_2_1_0] + res = _extop_test_slapi_member(server=topo.standalone, dn=e_1_parent_0, relation="manager") + _check_res_vs_expected("first subtree", res, expected) + + # Check the second subtree + expected = [e_1_parent_2_0, e_2_parent_2_0, e_3_parent_2_0, e_4_parent_2_0] + res = _extop_test_slapi_member(server=topo.standalone, dn=e_2_parent_0, relation="manager") + _check_res_vs_expected("second subtree", res, expected) + + # Check the third subtree + expected = [e_1_parent_3_0, e_1_parent_1_3_0, e_1_parent_1_1_3_0, e_1_parent_1_1_1_3_0] + res = _extop_test_slapi_member(server=topo.standalone, dn=e_3_parent_0, relation="manager") + _check_res_vs_expected("third subtree", res, expected) + + # check e_1_parent_1_0 + expected = [e_1_parent_1_1_0, e_1_parent_1_1_1_0, e_2_parent_1_1_1_0, e_3_parent_1_1_1_0, e_4_parent_1_1_1_0, e_5_parent_1_1_1_0, e_2_parent_1_1_0] + res = _extop_test_slapi_member(server=topo.standalone, dn=e_1_parent_1_0, relation="manager") + _check_res_vs_expected("organisation reporting to e_1_parent_1_0", res, expected) + + # check e_1_parent_1_1_0 + expected = [e_1_parent_1_1_1_0, e_2_parent_1_1_1_0, e_3_parent_1_1_1_0, e_4_parent_1_1_1_0, e_5_parent_1_1_1_0] + res = _extop_test_slapi_member(server=topo.standalone, dn=e_1_parent_1_1_0, relation="manager") + _check_res_vs_expected("organisation reporting to e_1_parent_1_1_0", res, expected) + + # check e_2_parent_1_1_0 + expected = [EMPTY_RESULT] + res = _extop_test_slapi_member(server=topo.standalone, dn=e_2_parent_1_1_0, relation="manager") + _check_res_vs_expected("organisation reporting to e_2_parent_1_1_0", res, expected) + + # check e_2_parent_1_0 + expected = [e_1_parent_2_1_0, e_2_parent_2_1_0, e_1_parent_2_2_1_0, e_3_parent_2_1_0, e_4_parent_2_1_0] + res = _extop_test_slapi_member(server=topo.standalone, dn=e_2_parent_1_0, relation="manager") + _check_res_vs_expected("organisation reporting to e_2_parent_1_0", res, expected) + + # check e_2_parent_2_1_0 + expected = [e_1_parent_2_2_1_0] + res = _extop_test_slapi_member(server=topo.standalone, dn=e_2_parent_2_1_0, relation="manager") + _check_res_vs_expected("organisation reporting to e_2_parent_2_1_0", res, expected) + + # Check e_1_parent_3_0 + expected = [e_1_parent_1_3_0, e_1_parent_1_1_3_0, e_1_parent_1_1_1_3_0] + res = _extop_test_slapi_member(server=topo.standalone, dn=e_1_parent_3_0, relation="manager") + _check_res_vs_expected("organisation reporting to e_1_parent_3_0", res, expected) + + # Check e_1_parent_1_3_0 + expected = [e_1_parent_1_1_3_0, e_1_parent_1_1_1_3_0] + res = _extop_test_slapi_member(server=topo.standalone, dn=e_1_parent_1_3_0, relation="manager") + _check_res_vs_expected("organisation reporting to e_1_parent_1_3_0", res, expected) + + # Check e_1_parent_1_1_3_0 + expected = [e_1_parent_1_1_1_3_0] + res = _extop_test_slapi_member(server=topo.standalone, dn=e_1_parent_1_1_3_0, relation="manager") + _check_res_vs_expected("organisation reporting to e_1_parent_1_1_3_0", res, expected) + + # Check e_1_parent_1_1_1_3_0 + expected = [EMPTY_RESULT] + res = _extop_test_slapi_member(server=topo.standalone, dn=e_1_parent_1_1_1_3_0, relation="manager") + _check_res_vs_expected("organisation reporting to e_1_parent_1_1_1_3_0", res, expected) + + def fin(): + entries = [e_1_parent_0, e_1_parent_1_0, e_1_parent_1_1_0, e_1_parent_1_1_1_0, e_2_parent_1_1_1_0, e_3_parent_1_1_1_0, e_4_parent_1_1_1_0, e_5_parent_1_1_1_0, e_2_parent_1_1_0, e_2_parent_1_0, e_1_parent_2_1_0, e_2_parent_2_1_0, e_1_parent_2_2_1_0, e_3_parent_2_1_0, e_4_parent_2_1_0, e_2_parent_0, e_1_parent_2_0, e_2_parent_2_0, e_3_parent_2_0, e_4_parent_2_0, e_3_parent_0, e_1_parent_3_0, e_1_parent_1_3_0, e_1_parent_1_1_3_0, e_1_parent_1_1_1_3_0] + for entry in entries: + topo.standalone.delete_s(entry) + topo.standalone.delete_s(dn_config) + + request.addfinalizer(fin) + +def test_slapi_memberof_reuse_if_possible_2(topo, request, install_test_plugin): + """ + Test that management hierarchy (manager) is computed with slapi_memberof + It requires slapi_memberof to reuse IF POSSIBLE the computed values + from memberof plugins. + Memberof plugin is enabled, but with a different 'membership attribute' + it falls back to regular computation (recompute) + with following parameters + - membership attribute: 'manager' <-- + - span over all backends: 'off' + - skip nesting membership: 'off' + - computation mode: MEMBEROF_REUSE_IF_POSSIBLE <-- + - Scope: DEFAULT_SUFFIX + - ExcludeScope: None + - Maximum return entries: None + + :id: 2175578b-7f12-4f36-a4fe-eb401422643d + :setup: Standalone instance + :steps: + 1. Configure memberof with 'uniquemember' memberOfGroupAttr <-- + 2. provision a set of entry + 3. configure test_slapi_memberof as described above + 4. check computed membership vs expected result + :expectedresults: + 1. Operation should succeed + 2. Operation should succeed + 3. Operation should succeed + 4. Operation should succeed + + DIT is : + e_1_parent_0 + - e_1_parent_1_0 + -- e_1_parent_1_1_0 + --- e_1_parent_1_1_1_0 + --- e_2_parent_1_1_1_0 + --- e_3_parent_1_1_1_0 + --- e_4_parent_1_1_1_0 + --- e_5_parent_1_1_1_0 + -- e_2_parent_1_1_0 + - e_2_parent_1_0 + -- e_1_parent_2_1_0 + -- e_2_parent_2_1_0 + --- e_1_parent_2_2_1_0 + -- e_3_parent_2_1_0 + -- e_4_parent_2_1_0 + e_2_parent_0 + - e_1_parent_2_0 + - e_2_parent_2_0 + - e_3_parent_2_0 + - e_4_parent_2_0 + e_3_parent_0 + - e_1_parent_3_0 + -- e_1_parent_1_3_0 + --- e_1_parent_1_1_3_0 + ---- e_1_parent_1_1_1_3_0 + """ + memberof = MemberOfPlugin(topo.standalone) + memberof.enable() + memberof.replace('memberOfGroupAttr', 'uniquemember') + topo.standalone.restart() + + user = UserAccounts(topo.standalone, DEFAULT_SUFFIX) + + # First subtree + e_1_parent_0 = add_entry(topo.standalone, uid="e_1_parent_0") + + e_1_parent_1_0 = add_entry(topo.standalone, uid="e_1_parent_1_0", manager=[ensure_bytes(e_1_parent_0)]) + + e_1_parent_1_1_0 = add_entry(topo.standalone, uid="e_1_parent_1_1_0", manager=[ensure_bytes(e_1_parent_1_0)]) + + e_1_parent_1_1_1_0 = add_entry(topo.standalone, uid="e_1_parent_1_1_1_0", manager=[ensure_bytes(e_1_parent_1_1_0)]) + e_2_parent_1_1_1_0 = add_entry(topo.standalone, uid="e_2_parent_1_1_1_0", manager=[ensure_bytes(e_1_parent_1_1_0)]) + e_3_parent_1_1_1_0 = add_entry(topo.standalone, uid="e_3_parent_1_1_1_0", manager=[ensure_bytes(e_1_parent_1_1_0)]) + e_4_parent_1_1_1_0 = add_entry(topo.standalone, uid="e_4_parent_1_1_1_0", manager=[ensure_bytes(e_1_parent_1_1_0)]) + e_5_parent_1_1_1_0 = add_entry(topo.standalone, uid="e_5_parent_1_1_1_0", manager=[ensure_bytes(e_1_parent_1_1_0)]) + + e_2_parent_1_1_0 = add_entry(topo.standalone, uid="e_2_parent_1_1_0", manager=[ensure_bytes(e_1_parent_1_0)]) + + e_2_parent_1_0 = add_entry(topo.standalone, uid="e_2_parent_1_0", manager=[ensure_bytes(e_1_parent_0)]) + + e_1_parent_2_1_0 = add_entry(topo.standalone, uid="e_1_parent_2_1_0", manager=[ensure_bytes(e_2_parent_1_0)]) + e_2_parent_2_1_0 = add_entry(topo.standalone, uid="e_2_parent_2_1_0", manager=[ensure_bytes(e_2_parent_1_0)]) + e_1_parent_2_2_1_0 = add_entry(topo.standalone, uid="e_1_parent_2_2_1_0", manager=[ensure_bytes(e_2_parent_2_1_0)]) + e_3_parent_2_1_0 = add_entry(topo.standalone, uid="e_3_parent_2_1_0", manager=[ensure_bytes(e_2_parent_1_0)]) + e_4_parent_2_1_0 = add_entry(topo.standalone, uid="e_4_parent_2_1_0", manager=[ensure_bytes(e_2_parent_1_0)]) + + # 2nd subtree + e_2_parent_0 = add_entry(topo.standalone, uid="e_2_parent_0") + + e_1_parent_2_0 = add_entry(topo.standalone, uid="e_1_parent_2_0", manager=[ensure_bytes(e_2_parent_0)]) + e_2_parent_2_0 = add_entry(topo.standalone, uid="e_2_parent_2_0", manager=[ensure_bytes(e_2_parent_0)]) + e_3_parent_2_0 = add_entry(topo.standalone, uid="e_3_parent_2_0", manager=[ensure_bytes(e_2_parent_0)]) + e_4_parent_2_0 = add_entry(topo.standalone, uid="e_4_parent_2_0", manager=[ensure_bytes(e_2_parent_0)]) + + # third subtree + e_3_parent_0 = add_entry(topo.standalone, uid="e_3_parent_0") + + e_1_parent_3_0 = add_entry(topo.standalone, uid="e_1_parent_3_0", manager=[ensure_bytes(e_3_parent_0)]) + + e_1_parent_1_3_0 = add_entry(topo.standalone, uid="e_1_parent_1_3_0", manager=[ensure_bytes(e_1_parent_3_0)]) + + e_1_parent_1_1_3_0 = add_entry(topo.standalone, uid="e_1_parent_1_1_3_0", manager=[ensure_bytes(e_1_parent_1_3_0)]) + + e_1_parent_1_1_1_3_0 = add_entry(topo.standalone, uid="e_1_parent_1_1_1_3_0", manager=[ensure_bytes(e_1_parent_1_1_3_0)]) + + dn_config = 'cn=test_slapi_memberof,cn=plugins,cn=config' + topo.standalone.add_s(Entry((dn_config, {'objectclass': 'top nsSlapdPlugin extensibleObject'.split(), + 'cn': 'test_slapi_memberof', + 'nsslapd-pluginPath': 'libtest_slapi_memberof-plugin', + 'nsslapd-pluginInitfunc': 'test_slapi_memberof_init', + 'nsslapd-pluginType': 'extendedop', + 'nsslapd-pluginEnabled': 'on', + 'nsslapd-plugin-depends-on-type': 'database', + 'nsslapd-pluginId': 'test_slapi_memberof-plugin', + 'slapimemberOfMemberDN': 'uid=test_user_11,ou=People,dc=example,dc=com', + 'slapimemberOfGroupAttr': 'manager', + 'slapimemberOfAttr': 'memberof', + 'slapimemberOfFlag': 'MEMBEROF_REUSE_IF_POSSIBLE', + 'slapimemberOfAllBackends': 'off', + 'slapimemberOfSkipNested': 'off', + 'slapimemberOfEntryScope': DEFAULT_SUFFIX, + 'slapimemberOfMaxGroup': '0', + 'nsslapd-pluginVersion': '2.3.2.202302131418git0e190fc3d', + 'nsslapd-pluginVendor': '389 Project', + 'nsslapd-pluginDescription': 'test_slapi_memberof extended operation plugin'}))) + topo.standalone.restart() + + # Check the first subtree + expected = [ e_1_parent_1_0, e_1_parent_1_1_0, e_1_parent_1_1_1_0, e_2_parent_1_1_1_0, e_3_parent_1_1_1_0, e_4_parent_1_1_1_0, e_5_parent_1_1_1_0, e_2_parent_1_1_0, e_2_parent_1_0, e_1_parent_2_1_0, e_2_parent_2_1_0, e_1_parent_2_2_1_0, e_3_parent_2_1_0, e_4_parent_2_1_0] + res = _extop_test_slapi_member(server=topo.standalone, dn=e_1_parent_0, relation="manager") + _check_res_vs_expected("first subtree", res, expected) + + # Check the second subtree + expected = [e_1_parent_2_0, e_2_parent_2_0, e_3_parent_2_0, e_4_parent_2_0] + res = _extop_test_slapi_member(server=topo.standalone, dn=e_2_parent_0, relation="manager") + _check_res_vs_expected("second subtree", res, expected) + + # Check the third subtree + expected = [e_1_parent_3_0, e_1_parent_1_3_0, e_1_parent_1_1_3_0, e_1_parent_1_1_1_3_0] + res = _extop_test_slapi_member(server=topo.standalone, dn=e_3_parent_0, relation="manager") + _check_res_vs_expected("third subtree", res, expected) + + # check e_1_parent_1_0 + expected = [e_1_parent_1_1_0, e_1_parent_1_1_1_0, e_2_parent_1_1_1_0, e_3_parent_1_1_1_0, e_4_parent_1_1_1_0, e_5_parent_1_1_1_0, e_2_parent_1_1_0] + res = _extop_test_slapi_member(server=topo.standalone, dn=e_1_parent_1_0, relation="manager") + _check_res_vs_expected("organisation reporting to e_1_parent_1_0", res, expected) + + # check e_1_parent_1_1_0 + expected = [e_1_parent_1_1_1_0, e_2_parent_1_1_1_0, e_3_parent_1_1_1_0, e_4_parent_1_1_1_0, e_5_parent_1_1_1_0] + res = _extop_test_slapi_member(server=topo.standalone, dn=e_1_parent_1_1_0, relation="manager") + _check_res_vs_expected("organisation reporting to e_1_parent_1_1_0", res, expected) + + # check e_2_parent_1_1_0 + expected = [EMPTY_RESULT] + res = _extop_test_slapi_member(server=topo.standalone, dn=e_2_parent_1_1_0, relation="manager") + _check_res_vs_expected("organisation reporting to e_2_parent_1_1_0", res, expected) + + # check e_2_parent_1_0 + expected = [e_1_parent_2_1_0, e_2_parent_2_1_0, e_1_parent_2_2_1_0, e_3_parent_2_1_0, e_4_parent_2_1_0] + res = _extop_test_slapi_member(server=topo.standalone, dn=e_2_parent_1_0, relation="manager") + _check_res_vs_expected("organisation reporting to e_2_parent_1_0", res, expected) + + # check e_2_parent_2_1_0 + expected = [e_1_parent_2_2_1_0] + res = _extop_test_slapi_member(server=topo.standalone, dn=e_2_parent_2_1_0, relation="manager") + _check_res_vs_expected("organisation reporting to e_2_parent_2_1_0", res, expected) + + # Check e_1_parent_3_0 + expected = [e_1_parent_1_3_0, e_1_parent_1_1_3_0, e_1_parent_1_1_1_3_0] + res = _extop_test_slapi_member(server=topo.standalone, dn=e_1_parent_3_0, relation="manager") + _check_res_vs_expected("organisation reporting to e_1_parent_3_0", res, expected) + + # Check e_1_parent_1_3_0 + expected = [e_1_parent_1_1_3_0, e_1_parent_1_1_1_3_0] + res = _extop_test_slapi_member(server=topo.standalone, dn=e_1_parent_1_3_0, relation="manager") + _check_res_vs_expected("organisation reporting to e_1_parent_1_3_0", res, expected) + + # Check e_1_parent_1_1_3_0 + expected = [e_1_parent_1_1_1_3_0] + res = _extop_test_slapi_member(server=topo.standalone, dn=e_1_parent_1_1_3_0, relation="manager") + _check_res_vs_expected("organisation reporting to e_1_parent_1_1_3_0", res, expected) + + # Check e_1_parent_1_1_1_3_0 + expected = [EMPTY_RESULT] + res = _extop_test_slapi_member(server=topo.standalone, dn=e_1_parent_1_1_1_3_0, relation="manager") + _check_res_vs_expected("organisation reporting to e_1_parent_1_1_1_3_0", res, expected) + + def fin(): + entries = [e_1_parent_0, e_1_parent_1_0, e_1_parent_1_1_0, e_1_parent_1_1_1_0, e_2_parent_1_1_1_0, e_3_parent_1_1_1_0, e_4_parent_1_1_1_0, e_5_parent_1_1_1_0, e_2_parent_1_1_0, e_2_parent_1_0, e_1_parent_2_1_0, e_2_parent_2_1_0, e_1_parent_2_2_1_0, e_3_parent_2_1_0, e_4_parent_2_1_0, e_2_parent_0, e_1_parent_2_0, e_2_parent_2_0, e_3_parent_2_0, e_4_parent_2_0, e_3_parent_0, e_1_parent_3_0, e_1_parent_1_3_0, e_1_parent_1_1_3_0, e_1_parent_1_1_1_3_0] + for entry in entries: + topo.standalone.delete_s(entry) + topo.standalone.delete_s(dn_config) + + request.addfinalizer(fin) + +def test_slapi_memberof_reuse_if_possible_3(topo, request, install_test_plugin): + """ + Test that management hierarchy (manager) is computed with slapi_memberof + It requires slapi_memberof to reuse IF POSSIBLE the computed values + from memberof plugins. + Memberof plugin is enabled, but with a different 'memberOfAllBackends attribute' + it falls back to regular computation (recompute) + with following parameters + - membership attribute: 'manager' + - span over all backends: 'off' <-- + - skip nesting membership: 'off' + - computation mode: MEMBEROF_REUSE_IF_POSSIBLE <-- + - Scope: DEFAULT_SUFFIX + - ExcludeScope: None + - Maximum return entries: None + + :id: 11615fc6-67e8-4c4a-be76-d57baf0e1706 + :setup: Standalone instance + :steps: + 1. Configure memberof with 'memberOfAllBackends: on' <-- + 2. provision a set of entry + 3. configure test_slapi_memberof as described above + 4. check computed membership vs expected result + :expectedresults: + 1. Operation should succeed + 2. Operation should succeed + 3. Operation should succeed + 4. Operation should succeed + + DIT is : + e_1_parent_0 + - e_1_parent_1_0 + -- e_1_parent_1_1_0 + --- e_1_parent_1_1_1_0 + --- e_2_parent_1_1_1_0 + --- e_3_parent_1_1_1_0 + --- e_4_parent_1_1_1_0 + --- e_5_parent_1_1_1_0 + -- e_2_parent_1_1_0 + - e_2_parent_1_0 + -- e_1_parent_2_1_0 + -- e_2_parent_2_1_0 + --- e_1_parent_2_2_1_0 + -- e_3_parent_2_1_0 + -- e_4_parent_2_1_0 + e_2_parent_0 + - e_1_parent_2_0 + - e_2_parent_2_0 + - e_3_parent_2_0 + - e_4_parent_2_0 + e_3_parent_0 + - e_1_parent_3_0 + -- e_1_parent_1_3_0 + --- e_1_parent_1_1_3_0 + ---- e_1_parent_1_1_1_3_0 + """ + memberof = MemberOfPlugin(topo.standalone) + memberof.enable() + memberof.replace('memberOfGroupAttr', 'manager') + memberof.replace('memberOfAllBackends', 'on') + topo.standalone.restart() + + user = UserAccounts(topo.standalone, DEFAULT_SUFFIX) + + # First subtree + e_1_parent_0 = add_entry(topo.standalone, uid="e_1_parent_0") + + e_1_parent_1_0 = add_entry(topo.standalone, uid="e_1_parent_1_0", manager=[ensure_bytes(e_1_parent_0)]) + + e_1_parent_1_1_0 = add_entry(topo.standalone, uid="e_1_parent_1_1_0", manager=[ensure_bytes(e_1_parent_1_0)]) + + e_1_parent_1_1_1_0 = add_entry(topo.standalone, uid="e_1_parent_1_1_1_0", manager=[ensure_bytes(e_1_parent_1_1_0)]) + e_2_parent_1_1_1_0 = add_entry(topo.standalone, uid="e_2_parent_1_1_1_0", manager=[ensure_bytes(e_1_parent_1_1_0)]) + e_3_parent_1_1_1_0 = add_entry(topo.standalone, uid="e_3_parent_1_1_1_0", manager=[ensure_bytes(e_1_parent_1_1_0)]) + e_4_parent_1_1_1_0 = add_entry(topo.standalone, uid="e_4_parent_1_1_1_0", manager=[ensure_bytes(e_1_parent_1_1_0)]) + e_5_parent_1_1_1_0 = add_entry(topo.standalone, uid="e_5_parent_1_1_1_0", manager=[ensure_bytes(e_1_parent_1_1_0)]) + + e_2_parent_1_1_0 = add_entry(topo.standalone, uid="e_2_parent_1_1_0", manager=[ensure_bytes(e_1_parent_1_0)]) + + e_2_parent_1_0 = add_entry(topo.standalone, uid="e_2_parent_1_0", manager=[ensure_bytes(e_1_parent_0)]) + + e_1_parent_2_1_0 = add_entry(topo.standalone, uid="e_1_parent_2_1_0", manager=[ensure_bytes(e_2_parent_1_0)]) + e_2_parent_2_1_0 = add_entry(topo.standalone, uid="e_2_parent_2_1_0", manager=[ensure_bytes(e_2_parent_1_0)]) + e_1_parent_2_2_1_0 = add_entry(topo.standalone, uid="e_1_parent_2_2_1_0", manager=[ensure_bytes(e_2_parent_2_1_0)]) + e_3_parent_2_1_0 = add_entry(topo.standalone, uid="e_3_parent_2_1_0", manager=[ensure_bytes(e_2_parent_1_0)]) + e_4_parent_2_1_0 = add_entry(topo.standalone, uid="e_4_parent_2_1_0", manager=[ensure_bytes(e_2_parent_1_0)]) + + # 2nd subtree + e_2_parent_0 = add_entry(topo.standalone, uid="e_2_parent_0") + + e_1_parent_2_0 = add_entry(topo.standalone, uid="e_1_parent_2_0", manager=[ensure_bytes(e_2_parent_0)]) + e_2_parent_2_0 = add_entry(topo.standalone, uid="e_2_parent_2_0", manager=[ensure_bytes(e_2_parent_0)]) + e_3_parent_2_0 = add_entry(topo.standalone, uid="e_3_parent_2_0", manager=[ensure_bytes(e_2_parent_0)]) + e_4_parent_2_0 = add_entry(topo.standalone, uid="e_4_parent_2_0", manager=[ensure_bytes(e_2_parent_0)]) + + # third subtree + e_3_parent_0 = add_entry(topo.standalone, uid="e_3_parent_0") + + e_1_parent_3_0 = add_entry(topo.standalone, uid="e_1_parent_3_0", manager=[ensure_bytes(e_3_parent_0)]) + + e_1_parent_1_3_0 = add_entry(topo.standalone, uid="e_1_parent_1_3_0", manager=[ensure_bytes(e_1_parent_3_0)]) + + e_1_parent_1_1_3_0 = add_entry(topo.standalone, uid="e_1_parent_1_1_3_0", manager=[ensure_bytes(e_1_parent_1_3_0)]) + + e_1_parent_1_1_1_3_0 = add_entry(topo.standalone, uid="e_1_parent_1_1_1_3_0", manager=[ensure_bytes(e_1_parent_1_1_3_0)]) + + dn_config = 'cn=test_slapi_memberof,cn=plugins,cn=config' + topo.standalone.add_s(Entry((dn_config, {'objectclass': 'top nsSlapdPlugin extensibleObject'.split(), + 'cn': 'test_slapi_memberof', + 'nsslapd-pluginPath': 'libtest_slapi_memberof-plugin', + 'nsslapd-pluginInitfunc': 'test_slapi_memberof_init', + 'nsslapd-pluginType': 'extendedop', + 'nsslapd-pluginEnabled': 'on', + 'nsslapd-plugin-depends-on-type': 'database', + 'nsslapd-pluginId': 'test_slapi_memberof-plugin', + 'slapimemberOfMemberDN': 'uid=test_user_11,ou=People,dc=example,dc=com', + 'slapimemberOfGroupAttr': 'manager', + 'slapimemberOfAttr': 'memberof', + 'slapimemberOfFlag': 'MEMBEROF_REUSE_IF_POSSIBLE', + 'slapimemberOfAllBackends': 'off', + 'slapimemberOfSkipNested': 'off', + 'slapimemberOfEntryScope': DEFAULT_SUFFIX, + 'slapimemberOfMaxGroup': '0', + 'nsslapd-pluginVersion': '2.3.2.202302131418git0e190fc3d', + 'nsslapd-pluginVendor': '389 Project', + 'nsslapd-pluginDescription': 'test_slapi_memberof extended operation plugin'}))) + topo.standalone.restart() + + # Check the first subtree + expected = [ e_1_parent_1_0, e_1_parent_1_1_0, e_1_parent_1_1_1_0, e_2_parent_1_1_1_0, e_3_parent_1_1_1_0, e_4_parent_1_1_1_0, e_5_parent_1_1_1_0, e_2_parent_1_1_0, e_2_parent_1_0, e_1_parent_2_1_0, e_2_parent_2_1_0, e_1_parent_2_2_1_0, e_3_parent_2_1_0, e_4_parent_2_1_0] + res = _extop_test_slapi_member(server=topo.standalone, dn=e_1_parent_0, relation="manager") + _check_res_vs_expected("first subtree", res, expected) + + # Check the second subtree + expected = [e_1_parent_2_0, e_2_parent_2_0, e_3_parent_2_0, e_4_parent_2_0] + res = _extop_test_slapi_member(server=topo.standalone, dn=e_2_parent_0, relation="manager") + _check_res_vs_expected("second subtree", res, expected) + + # Check the third subtree + expected = [e_1_parent_3_0, e_1_parent_1_3_0, e_1_parent_1_1_3_0, e_1_parent_1_1_1_3_0] + res = _extop_test_slapi_member(server=topo.standalone, dn=e_3_parent_0, relation="manager") + _check_res_vs_expected("third subtree", res, expected) + + # check e_1_parent_1_0 + expected = [e_1_parent_1_1_0, e_1_parent_1_1_1_0, e_2_parent_1_1_1_0, e_3_parent_1_1_1_0, e_4_parent_1_1_1_0, e_5_parent_1_1_1_0, e_2_parent_1_1_0] + res = _extop_test_slapi_member(server=topo.standalone, dn=e_1_parent_1_0, relation="manager") + _check_res_vs_expected("organisation reporting to e_1_parent_1_0", res, expected) + + # check e_1_parent_1_1_0 + expected = [e_1_parent_1_1_1_0, e_2_parent_1_1_1_0, e_3_parent_1_1_1_0, e_4_parent_1_1_1_0, e_5_parent_1_1_1_0] + res = _extop_test_slapi_member(server=topo.standalone, dn=e_1_parent_1_1_0, relation="manager") + _check_res_vs_expected("organisation reporting to e_1_parent_1_1_0", res, expected) + + # check e_2_parent_1_1_0 + expected = [EMPTY_RESULT] + res = _extop_test_slapi_member(server=topo.standalone, dn=e_2_parent_1_1_0, relation="manager") + _check_res_vs_expected("organisation reporting to e_2_parent_1_1_0", res, expected) + + # check e_2_parent_1_0 + expected = [e_1_parent_2_1_0, e_2_parent_2_1_0, e_1_parent_2_2_1_0, e_3_parent_2_1_0, e_4_parent_2_1_0] + res = _extop_test_slapi_member(server=topo.standalone, dn=e_2_parent_1_0, relation="manager") + _check_res_vs_expected("organisation reporting to e_2_parent_1_0", res, expected) + + # check e_2_parent_2_1_0 + expected = [e_1_parent_2_2_1_0] + res = _extop_test_slapi_member(server=topo.standalone, dn=e_2_parent_2_1_0, relation="manager") + _check_res_vs_expected("organisation reporting to e_2_parent_2_1_0", res, expected) + + # Check e_1_parent_3_0 + expected = [e_1_parent_1_3_0, e_1_parent_1_1_3_0, e_1_parent_1_1_1_3_0] + res = _extop_test_slapi_member(server=topo.standalone, dn=e_1_parent_3_0, relation="manager") + _check_res_vs_expected("organisation reporting to e_1_parent_3_0", res, expected) + + # Check e_1_parent_1_3_0 + expected = [e_1_parent_1_1_3_0, e_1_parent_1_1_1_3_0] + res = _extop_test_slapi_member(server=topo.standalone, dn=e_1_parent_1_3_0, relation="manager") + _check_res_vs_expected("organisation reporting to e_1_parent_1_3_0", res, expected) + + # Check e_1_parent_1_1_3_0 + expected = [e_1_parent_1_1_1_3_0] + res = _extop_test_slapi_member(server=topo.standalone, dn=e_1_parent_1_1_3_0, relation="manager") + _check_res_vs_expected("organisation reporting to e_1_parent_1_1_3_0", res, expected) + + # Check e_1_parent_1_1_1_3_0 + expected = [EMPTY_RESULT] + res = _extop_test_slapi_member(server=topo.standalone, dn=e_1_parent_1_1_1_3_0, relation="manager") + _check_res_vs_expected("organisation reporting to e_1_parent_1_1_1_3_0", res, expected) + + def fin(): + entries = [e_1_parent_0, e_1_parent_1_0, e_1_parent_1_1_0, e_1_parent_1_1_1_0, e_2_parent_1_1_1_0, e_3_parent_1_1_1_0, e_4_parent_1_1_1_0, e_5_parent_1_1_1_0, e_2_parent_1_1_0, e_2_parent_1_0, e_1_parent_2_1_0, e_2_parent_2_1_0, e_1_parent_2_2_1_0, e_3_parent_2_1_0, e_4_parent_2_1_0, e_2_parent_0, e_1_parent_2_0, e_2_parent_2_0, e_3_parent_2_0, e_4_parent_2_0, e_3_parent_0, e_1_parent_3_0, e_1_parent_1_3_0, e_1_parent_1_1_3_0, e_1_parent_1_1_1_3_0] + for entry in entries: + topo.standalone.delete_s(entry) + topo.standalone.delete_s(dn_config) + + request.addfinalizer(fin) + + +def test_slapi_memberof_reuse_if_possible_4(topo, request, install_test_plugin): + """ + Test that management hierarchy (manager) is computed with slapi_memberof + It requires slapi_memberof to reuse IF POSSIBLE the computed values + from memberof plugins. + Memberof plugin is enabled, but with a different 'memberOfSkipNested' attr + it falls back to regular computation (recompute) + with following parameters + - membership attribute: 'manager' + - span over all backends: 'off' + - skip nesting membership: 'off' <-- + - computation mode: MEMBEROF_REUSE_IF_POSSIBLE <-- + - Scope: DEFAULT_SUFFIX + - ExcludeScope: None + - Maximum return entries: None + + :id: 305c99ba-5835-4b8c-bfb7-11deeea5eedc + :setup: Standalone instance + :steps: + 1. Configure memberof with 'memberOfSkipNested: on' <-- + 2. provision a set of entry + 3. configure test_slapi_memberof as described above + 4. check computed membership vs expected result + :expectedresults: + 1. Operation should succeed + 2. Operation should succeed + 3. Operation should succeed + 4. Operation should succeed + + DIT is : + e_1_parent_0 + - e_1_parent_1_0 + -- e_1_parent_1_1_0 + --- e_1_parent_1_1_1_0 + --- e_2_parent_1_1_1_0 + --- e_3_parent_1_1_1_0 + --- e_4_parent_1_1_1_0 + --- e_5_parent_1_1_1_0 + -- e_2_parent_1_1_0 + - e_2_parent_1_0 + -- e_1_parent_2_1_0 + -- e_2_parent_2_1_0 + --- e_1_parent_2_2_1_0 + -- e_3_parent_2_1_0 + -- e_4_parent_2_1_0 + e_2_parent_0 + - e_1_parent_2_0 + - e_2_parent_2_0 + - e_3_parent_2_0 + - e_4_parent_2_0 + e_3_parent_0 + - e_1_parent_3_0 + -- e_1_parent_1_3_0 + --- e_1_parent_1_1_3_0 + ---- e_1_parent_1_1_1_3_0 + """ + memberof = MemberOfPlugin(topo.standalone) + memberof.enable() + memberof.replace('memberOfGroupAttr', 'manager') + memberof.replace('memberOfSkipNested', 'on') + topo.standalone.restart() + + user = UserAccounts(topo.standalone, DEFAULT_SUFFIX) + + # First subtree + e_1_parent_0 = add_entry(topo.standalone, uid="e_1_parent_0") + + e_1_parent_1_0 = add_entry(topo.standalone, uid="e_1_parent_1_0", manager=[ensure_bytes(e_1_parent_0)]) + + e_1_parent_1_1_0 = add_entry(topo.standalone, uid="e_1_parent_1_1_0", manager=[ensure_bytes(e_1_parent_1_0)]) + + e_1_parent_1_1_1_0 = add_entry(topo.standalone, uid="e_1_parent_1_1_1_0", manager=[ensure_bytes(e_1_parent_1_1_0)]) + e_2_parent_1_1_1_0 = add_entry(topo.standalone, uid="e_2_parent_1_1_1_0", manager=[ensure_bytes(e_1_parent_1_1_0)]) + e_3_parent_1_1_1_0 = add_entry(topo.standalone, uid="e_3_parent_1_1_1_0", manager=[ensure_bytes(e_1_parent_1_1_0)]) + e_4_parent_1_1_1_0 = add_entry(topo.standalone, uid="e_4_parent_1_1_1_0", manager=[ensure_bytes(e_1_parent_1_1_0)]) + e_5_parent_1_1_1_0 = add_entry(topo.standalone, uid="e_5_parent_1_1_1_0", manager=[ensure_bytes(e_1_parent_1_1_0)]) + + e_2_parent_1_1_0 = add_entry(topo.standalone, uid="e_2_parent_1_1_0", manager=[ensure_bytes(e_1_parent_1_0)]) + + e_2_parent_1_0 = add_entry(topo.standalone, uid="e_2_parent_1_0", manager=[ensure_bytes(e_1_parent_0)]) + + e_1_parent_2_1_0 = add_entry(topo.standalone, uid="e_1_parent_2_1_0", manager=[ensure_bytes(e_2_parent_1_0)]) + e_2_parent_2_1_0 = add_entry(topo.standalone, uid="e_2_parent_2_1_0", manager=[ensure_bytes(e_2_parent_1_0)]) + e_1_parent_2_2_1_0 = add_entry(topo.standalone, uid="e_1_parent_2_2_1_0", manager=[ensure_bytes(e_2_parent_2_1_0)]) + e_3_parent_2_1_0 = add_entry(topo.standalone, uid="e_3_parent_2_1_0", manager=[ensure_bytes(e_2_parent_1_0)]) + e_4_parent_2_1_0 = add_entry(topo.standalone, uid="e_4_parent_2_1_0", manager=[ensure_bytes(e_2_parent_1_0)]) + + # 2nd subtree + e_2_parent_0 = add_entry(topo.standalone, uid="e_2_parent_0") + + e_1_parent_2_0 = add_entry(topo.standalone, uid="e_1_parent_2_0", manager=[ensure_bytes(e_2_parent_0)]) + e_2_parent_2_0 = add_entry(topo.standalone, uid="e_2_parent_2_0", manager=[ensure_bytes(e_2_parent_0)]) + e_3_parent_2_0 = add_entry(topo.standalone, uid="e_3_parent_2_0", manager=[ensure_bytes(e_2_parent_0)]) + e_4_parent_2_0 = add_entry(topo.standalone, uid="e_4_parent_2_0", manager=[ensure_bytes(e_2_parent_0)]) + + # third subtree + e_3_parent_0 = add_entry(topo.standalone, uid="e_3_parent_0") + + e_1_parent_3_0 = add_entry(topo.standalone, uid="e_1_parent_3_0", manager=[ensure_bytes(e_3_parent_0)]) + + e_1_parent_1_3_0 = add_entry(topo.standalone, uid="e_1_parent_1_3_0", manager=[ensure_bytes(e_1_parent_3_0)]) + + e_1_parent_1_1_3_0 = add_entry(topo.standalone, uid="e_1_parent_1_1_3_0", manager=[ensure_bytes(e_1_parent_1_3_0)]) + + e_1_parent_1_1_1_3_0 = add_entry(topo.standalone, uid="e_1_parent_1_1_1_3_0", manager=[ensure_bytes(e_1_parent_1_1_3_0)]) + + dn_config = 'cn=test_slapi_memberof,cn=plugins,cn=config' + topo.standalone.add_s(Entry((dn_config, {'objectclass': 'top nsSlapdPlugin extensibleObject'.split(), + 'cn': 'test_slapi_memberof', + 'nsslapd-pluginPath': 'libtest_slapi_memberof-plugin', + 'nsslapd-pluginInitfunc': 'test_slapi_memberof_init', + 'nsslapd-pluginType': 'extendedop', + 'nsslapd-pluginEnabled': 'on', + 'nsslapd-plugin-depends-on-type': 'database', + 'nsslapd-pluginId': 'test_slapi_memberof-plugin', + 'slapimemberOfMemberDN': 'uid=test_user_11,ou=People,dc=example,dc=com', + 'slapimemberOfGroupAttr': 'manager', + 'slapimemberOfAttr': 'memberof', + 'slapimemberOfFlag': 'MEMBEROF_REUSE_IF_POSSIBLE', + 'slapimemberOfAllBackends': 'off', + 'slapimemberOfSkipNested': 'off', + 'slapimemberOfEntryScope': DEFAULT_SUFFIX, + 'slapimemberOfMaxGroup': '0', + 'nsslapd-pluginVersion': '2.3.2.202302131418git0e190fc3d', + 'nsslapd-pluginVendor': '389 Project', + 'nsslapd-pluginDescription': 'test_slapi_memberof extended operation plugin'}))) + topo.standalone.restart() + + # Check the first subtree + expected = [ e_1_parent_1_0, e_1_parent_1_1_0, e_1_parent_1_1_1_0, e_2_parent_1_1_1_0, e_3_parent_1_1_1_0, e_4_parent_1_1_1_0, e_5_parent_1_1_1_0, e_2_parent_1_1_0, e_2_parent_1_0, e_1_parent_2_1_0, e_2_parent_2_1_0, e_1_parent_2_2_1_0, e_3_parent_2_1_0, e_4_parent_2_1_0] + res = _extop_test_slapi_member(server=topo.standalone, dn=e_1_parent_0, relation="manager") + _check_res_vs_expected("first subtree", res, expected) + + # Check the second subtree + expected = [e_1_parent_2_0, e_2_parent_2_0, e_3_parent_2_0, e_4_parent_2_0] + res = _extop_test_slapi_member(server=topo.standalone, dn=e_2_parent_0, relation="manager") + _check_res_vs_expected("second subtree", res, expected) + + # Check the third subtree + expected = [e_1_parent_3_0, e_1_parent_1_3_0, e_1_parent_1_1_3_0, e_1_parent_1_1_1_3_0] + res = _extop_test_slapi_member(server=topo.standalone, dn=e_3_parent_0, relation="manager") + _check_res_vs_expected("third subtree", res, expected) + + # check e_1_parent_1_0 + expected = [e_1_parent_1_1_0, e_1_parent_1_1_1_0, e_2_parent_1_1_1_0, e_3_parent_1_1_1_0, e_4_parent_1_1_1_0, e_5_parent_1_1_1_0, e_2_parent_1_1_0] + res = _extop_test_slapi_member(server=topo.standalone, dn=e_1_parent_1_0, relation="manager") + _check_res_vs_expected("organisation reporting to e_1_parent_1_0", res, expected) + + # check e_1_parent_1_1_0 + expected = [e_1_parent_1_1_1_0, e_2_parent_1_1_1_0, e_3_parent_1_1_1_0, e_4_parent_1_1_1_0, e_5_parent_1_1_1_0] + res = _extop_test_slapi_member(server=topo.standalone, dn=e_1_parent_1_1_0, relation="manager") + _check_res_vs_expected("organisation reporting to e_1_parent_1_1_0", res, expected) + + # check e_2_parent_1_1_0 + expected = [EMPTY_RESULT] + res = _extop_test_slapi_member(server=topo.standalone, dn=e_2_parent_1_1_0, relation="manager") + _check_res_vs_expected("organisation reporting to e_2_parent_1_1_0", res, expected) + + # check e_2_parent_1_0 + expected = [e_1_parent_2_1_0, e_2_parent_2_1_0, e_1_parent_2_2_1_0, e_3_parent_2_1_0, e_4_parent_2_1_0] + res = _extop_test_slapi_member(server=topo.standalone, dn=e_2_parent_1_0, relation="manager") + _check_res_vs_expected("organisation reporting to e_2_parent_1_0", res, expected) + + # check e_2_parent_2_1_0 + expected = [e_1_parent_2_2_1_0] + res = _extop_test_slapi_member(server=topo.standalone, dn=e_2_parent_2_1_0, relation="manager") + _check_res_vs_expected("organisation reporting to e_2_parent_2_1_0", res, expected) + + # Check e_1_parent_3_0 + expected = [e_1_parent_1_3_0, e_1_parent_1_1_3_0, e_1_parent_1_1_1_3_0] + res = _extop_test_slapi_member(server=topo.standalone, dn=e_1_parent_3_0, relation="manager") + _check_res_vs_expected("organisation reporting to e_1_parent_3_0", res, expected) + + # Check e_1_parent_1_3_0 + expected = [e_1_parent_1_1_3_0, e_1_parent_1_1_1_3_0] + res = _extop_test_slapi_member(server=topo.standalone, dn=e_1_parent_1_3_0, relation="manager") + _check_res_vs_expected("organisation reporting to e_1_parent_1_3_0", res, expected) + + # Check e_1_parent_1_1_3_0 + expected = [e_1_parent_1_1_1_3_0] + res = _extop_test_slapi_member(server=topo.standalone, dn=e_1_parent_1_1_3_0, relation="manager") + _check_res_vs_expected("organisation reporting to e_1_parent_1_1_3_0", res, expected) + + # Check e_1_parent_1_1_1_3_0 + expected = [EMPTY_RESULT] + res = _extop_test_slapi_member(server=topo.standalone, dn=e_1_parent_1_1_1_3_0, relation="manager") + _check_res_vs_expected("organisation reporting to e_1_parent_1_1_1_3_0", res, expected) + + def fin(): + entries = [e_1_parent_0, e_1_parent_1_0, e_1_parent_1_1_0, e_1_parent_1_1_1_0, e_2_parent_1_1_1_0, e_3_parent_1_1_1_0, e_4_parent_1_1_1_0, e_5_parent_1_1_1_0, e_2_parent_1_1_0, e_2_parent_1_0, e_1_parent_2_1_0, e_2_parent_2_1_0, e_1_parent_2_2_1_0, e_3_parent_2_1_0, e_4_parent_2_1_0, e_2_parent_0, e_1_parent_2_0, e_2_parent_2_0, e_3_parent_2_0, e_4_parent_2_0, e_3_parent_0, e_1_parent_3_0, e_1_parent_1_3_0, e_1_parent_1_1_3_0, e_1_parent_1_1_1_3_0] + for entry in entries: + topo.standalone.delete_s(entry) + topo.standalone.delete_s(dn_config) + + request.addfinalizer(fin) + + +def test_slapi_memberof_reuse_if_possible_5(topo, request, install_test_plugin): + """ + Test that management hierarchy (manager) is computed with slapi_memberof + It requires slapi_memberof to reuse IF POSSIBLE the computed values + from memberof plugins. + Memberof plugin is enabled, but with a different 'memberOfAttr' attr + it falls back to regular computation (recompute) + with following parameters + - member attribute: memberof <-- + - membership attribute: 'manager' + - span over all backends: 'off' + - skip nesting membership: 'off' + - computation mode: MEMBEROF_REUSE_IF_POSSIBLE <-- + - Scope: DEFAULT_SUFFIX + - ExcludeScope: None + - Maximum return entries: None + + :id: 66d2ed29-5d14-487a-b28a-5660962c7c6c + :setup: Standalone instance + :steps: + 1. Configure memberof with 'memberOfAttr: member' <-- + 2. provision a set of entry + 3. configure test_slapi_memberof as described above + 4. check computed membership vs expected result + :expectedresults: + 1. Operation should succeed + 2. Operation should succeed + 3. Operation should succeed + 4. Operation should succeed + + DIT is : + e_1_parent_0 + - e_1_parent_1_0 + -- e_1_parent_1_1_0 + --- e_1_parent_1_1_1_0 + --- e_2_parent_1_1_1_0 + --- e_3_parent_1_1_1_0 + --- e_4_parent_1_1_1_0 + --- e_5_parent_1_1_1_0 + -- e_2_parent_1_1_0 + - e_2_parent_1_0 + -- e_1_parent_2_1_0 + -- e_2_parent_2_1_0 + --- e_1_parent_2_2_1_0 + -- e_3_parent_2_1_0 + -- e_4_parent_2_1_0 + e_2_parent_0 + - e_1_parent_2_0 + - e_2_parent_2_0 + - e_3_parent_2_0 + - e_4_parent_2_0 + e_3_parent_0 + - e_1_parent_3_0 + -- e_1_parent_1_3_0 + --- e_1_parent_1_1_3_0 + ---- e_1_parent_1_1_1_3_0 + """ + memberof = MemberOfPlugin(topo.standalone) + memberof.enable() + memberof.replace('memberOfGroupAttr', 'manager') + + # For the test memberOfAttr should differ from 'memberof' that is + # used in slapi_memberof call. We can not use a dummy attribute + # because it requires to be a DN syntax. Let's use 'member' + memberof.replace('memberOfAttr', 'member') + topo.standalone.restart() + + user = UserAccounts(topo.standalone, DEFAULT_SUFFIX) + + # First subtree + e_1_parent_0 = add_entry(topo.standalone, uid="e_1_parent_0") + + e_1_parent_1_0 = add_entry(topo.standalone, uid="e_1_parent_1_0", manager=[ensure_bytes(e_1_parent_0)]) + + e_1_parent_1_1_0 = add_entry(topo.standalone, uid="e_1_parent_1_1_0", manager=[ensure_bytes(e_1_parent_1_0)]) + + e_1_parent_1_1_1_0 = add_entry(topo.standalone, uid="e_1_parent_1_1_1_0", manager=[ensure_bytes(e_1_parent_1_1_0)]) + e_2_parent_1_1_1_0 = add_entry(topo.standalone, uid="e_2_parent_1_1_1_0", manager=[ensure_bytes(e_1_parent_1_1_0)]) + e_3_parent_1_1_1_0 = add_entry(topo.standalone, uid="e_3_parent_1_1_1_0", manager=[ensure_bytes(e_1_parent_1_1_0)]) + e_4_parent_1_1_1_0 = add_entry(topo.standalone, uid="e_4_parent_1_1_1_0", manager=[ensure_bytes(e_1_parent_1_1_0)]) + e_5_parent_1_1_1_0 = add_entry(topo.standalone, uid="e_5_parent_1_1_1_0", manager=[ensure_bytes(e_1_parent_1_1_0)]) + + e_2_parent_1_1_0 = add_entry(topo.standalone, uid="e_2_parent_1_1_0", manager=[ensure_bytes(e_1_parent_1_0)]) + + e_2_parent_1_0 = add_entry(topo.standalone, uid="e_2_parent_1_0", manager=[ensure_bytes(e_1_parent_0)]) + + e_1_parent_2_1_0 = add_entry(topo.standalone, uid="e_1_parent_2_1_0", manager=[ensure_bytes(e_2_parent_1_0)]) + e_2_parent_2_1_0 = add_entry(topo.standalone, uid="e_2_parent_2_1_0", manager=[ensure_bytes(e_2_parent_1_0)]) + e_1_parent_2_2_1_0 = add_entry(topo.standalone, uid="e_1_parent_2_2_1_0", manager=[ensure_bytes(e_2_parent_2_1_0)]) + e_3_parent_2_1_0 = add_entry(topo.standalone, uid="e_3_parent_2_1_0", manager=[ensure_bytes(e_2_parent_1_0)]) + e_4_parent_2_1_0 = add_entry(topo.standalone, uid="e_4_parent_2_1_0", manager=[ensure_bytes(e_2_parent_1_0)]) + + # 2nd subtree + e_2_parent_0 = add_entry(topo.standalone, uid="e_2_parent_0") + + e_1_parent_2_0 = add_entry(topo.standalone, uid="e_1_parent_2_0", manager=[ensure_bytes(e_2_parent_0)]) + e_2_parent_2_0 = add_entry(topo.standalone, uid="e_2_parent_2_0", manager=[ensure_bytes(e_2_parent_0)]) + e_3_parent_2_0 = add_entry(topo.standalone, uid="e_3_parent_2_0", manager=[ensure_bytes(e_2_parent_0)]) + e_4_parent_2_0 = add_entry(topo.standalone, uid="e_4_parent_2_0", manager=[ensure_bytes(e_2_parent_0)]) + + # third subtree + e_3_parent_0 = add_entry(topo.standalone, uid="e_3_parent_0") + + e_1_parent_3_0 = add_entry(topo.standalone, uid="e_1_parent_3_0", manager=[ensure_bytes(e_3_parent_0)]) + + e_1_parent_1_3_0 = add_entry(topo.standalone, uid="e_1_parent_1_3_0", manager=[ensure_bytes(e_1_parent_3_0)]) + + e_1_parent_1_1_3_0 = add_entry(topo.standalone, uid="e_1_parent_1_1_3_0", manager=[ensure_bytes(e_1_parent_1_3_0)]) + + e_1_parent_1_1_1_3_0 = add_entry(topo.standalone, uid="e_1_parent_1_1_1_3_0", manager=[ensure_bytes(e_1_parent_1_1_3_0)]) + + dn_config = 'cn=test_slapi_memberof,cn=plugins,cn=config' + topo.standalone.add_s(Entry((dn_config, {'objectclass': 'top nsSlapdPlugin extensibleObject'.split(), + 'cn': 'test_slapi_memberof', + 'nsslapd-pluginPath': 'libtest_slapi_memberof-plugin', + 'nsslapd-pluginInitfunc': 'test_slapi_memberof_init', + 'nsslapd-pluginType': 'extendedop', + 'nsslapd-pluginEnabled': 'on', + 'nsslapd-plugin-depends-on-type': 'database', + 'nsslapd-pluginId': 'test_slapi_memberof-plugin', + 'slapimemberOfMemberDN': 'uid=test_user_11,ou=People,dc=example,dc=com', + 'slapimemberOfGroupAttr': 'manager', + 'slapimemberOfAttr': 'memberof', + 'slapimemberOfFlag': 'MEMBEROF_REUSE_IF_POSSIBLE', + 'slapimemberOfAllBackends': 'off', + 'slapimemberOfSkipNested': 'off', + 'slapimemberOfEntryScope': DEFAULT_SUFFIX, + 'slapimemberOfMaxGroup': '0', + 'nsslapd-pluginVersion': '2.3.2.202302131418git0e190fc3d', + 'nsslapd-pluginVendor': '389 Project', + 'nsslapd-pluginDescription': 'test_slapi_memberof extended operation plugin'}))) + topo.standalone.restart() + + # Check the first subtree + expected = [ e_1_parent_1_0, e_1_parent_1_1_0, e_1_parent_1_1_1_0, e_2_parent_1_1_1_0, e_3_parent_1_1_1_0, e_4_parent_1_1_1_0, e_5_parent_1_1_1_0, e_2_parent_1_1_0, e_2_parent_1_0, e_1_parent_2_1_0, e_2_parent_2_1_0, e_1_parent_2_2_1_0, e_3_parent_2_1_0, e_4_parent_2_1_0] + res = _extop_test_slapi_member(server=topo.standalone, dn=e_1_parent_0, relation="manager") + _check_res_vs_expected("first subtree", res, expected) + + # Check the second subtree + expected = [e_1_parent_2_0, e_2_parent_2_0, e_3_parent_2_0, e_4_parent_2_0] + res = _extop_test_slapi_member(server=topo.standalone, dn=e_2_parent_0, relation="manager") + _check_res_vs_expected("second subtree", res, expected) + + # Check the third subtree + expected = [e_1_parent_3_0, e_1_parent_1_3_0, e_1_parent_1_1_3_0, e_1_parent_1_1_1_3_0] + res = _extop_test_slapi_member(server=topo.standalone, dn=e_3_parent_0, relation="manager") + _check_res_vs_expected("third subtree", res, expected) + + # check e_1_parent_1_0 + expected = [e_1_parent_1_1_0, e_1_parent_1_1_1_0, e_2_parent_1_1_1_0, e_3_parent_1_1_1_0, e_4_parent_1_1_1_0, e_5_parent_1_1_1_0, e_2_parent_1_1_0] + res = _extop_test_slapi_member(server=topo.standalone, dn=e_1_parent_1_0, relation="manager") + _check_res_vs_expected("organisation reporting to e_1_parent_1_0", res, expected) + + # check e_1_parent_1_1_0 + expected = [e_1_parent_1_1_1_0, e_2_parent_1_1_1_0, e_3_parent_1_1_1_0, e_4_parent_1_1_1_0, e_5_parent_1_1_1_0] + res = _extop_test_slapi_member(server=topo.standalone, dn=e_1_parent_1_1_0, relation="manager") + _check_res_vs_expected("organisation reporting to e_1_parent_1_1_0", res, expected) + + # check e_2_parent_1_1_0 + expected = [EMPTY_RESULT] + res = _extop_test_slapi_member(server=topo.standalone, dn=e_2_parent_1_1_0, relation="manager") + _check_res_vs_expected("organisation reporting to e_2_parent_1_1_0", res, expected) + + # check e_2_parent_1_0 + expected = [e_1_parent_2_1_0, e_2_parent_2_1_0, e_1_parent_2_2_1_0, e_3_parent_2_1_0, e_4_parent_2_1_0] + res = _extop_test_slapi_member(server=topo.standalone, dn=e_2_parent_1_0, relation="manager") + _check_res_vs_expected("organisation reporting to e_2_parent_1_0", res, expected) + + # check e_2_parent_2_1_0 + expected = [e_1_parent_2_2_1_0] + res = _extop_test_slapi_member(server=topo.standalone, dn=e_2_parent_2_1_0, relation="manager") + _check_res_vs_expected("organisation reporting to e_2_parent_2_1_0", res, expected) + + # Check e_1_parent_3_0 + expected = [e_1_parent_1_3_0, e_1_parent_1_1_3_0, e_1_parent_1_1_1_3_0] + res = _extop_test_slapi_member(server=topo.standalone, dn=e_1_parent_3_0, relation="manager") + _check_res_vs_expected("organisation reporting to e_1_parent_3_0", res, expected) + + # Check e_1_parent_1_3_0 + expected = [e_1_parent_1_1_3_0, e_1_parent_1_1_1_3_0] + res = _extop_test_slapi_member(server=topo.standalone, dn=e_1_parent_1_3_0, relation="manager") + _check_res_vs_expected("organisation reporting to e_1_parent_1_3_0", res, expected) + + # Check e_1_parent_1_1_3_0 + expected = [e_1_parent_1_1_1_3_0] + res = _extop_test_slapi_member(server=topo.standalone, dn=e_1_parent_1_1_3_0, relation="manager") + _check_res_vs_expected("organisation reporting to e_1_parent_1_1_3_0", res, expected) + + # Check e_1_parent_1_1_1_3_0 + expected = [EMPTY_RESULT] + res = _extop_test_slapi_member(server=topo.standalone, dn=e_1_parent_1_1_1_3_0, relation="manager") + _check_res_vs_expected("organisation reporting to e_1_parent_1_1_1_3_0", res, expected) + + def fin(): + entries = [e_1_parent_0, e_1_parent_1_0, e_1_parent_1_1_0, e_1_parent_1_1_1_0, e_2_parent_1_1_1_0, e_3_parent_1_1_1_0, e_4_parent_1_1_1_0, e_5_parent_1_1_1_0, e_2_parent_1_1_0, e_2_parent_1_0, e_1_parent_2_1_0, e_2_parent_2_1_0, e_1_parent_2_2_1_0, e_3_parent_2_1_0, e_4_parent_2_1_0, e_2_parent_0, e_1_parent_2_0, e_2_parent_2_0, e_3_parent_2_0, e_4_parent_2_0, e_3_parent_0, e_1_parent_3_0, e_1_parent_1_3_0, e_1_parent_1_1_3_0, e_1_parent_1_1_1_3_0] + for entry in entries: + topo.standalone.delete_s(entry) + topo.standalone.delete_s(dn_config) + + request.addfinalizer(fin) + +def test_slapi_memberof_reuse_if_possible_6(topo, request, install_test_plugin): + """ + Test that management hierarchy (manager) is computed with slapi_memberof + It requires slapi_memberof to reuse IF POSSIBLE the computed values + from memberof plugins. + Memberof plugin is enabled, but with a different 'slapimemberOfEntryScope' attr + it falls back to regular computation (recompute) + with following parameters + - member attribute: memberof + - membership attribute: 'manager' + - span over all backends: 'off' + - skip nesting membership: 'off' + - computation mode: MEMBEROF_REUSE_IF_POSSIBLE <-- + - Scope: ou=people,dc=example,dc=com <-- + - ExcludeScope: None + - Maximum return entries: None + + :id: 4fbefa39-6c06-47c4-8818-a102944b7f29 + :setup: Standalone instance + :steps: + 1. Configure memberof without 'memberOfEntryScope' <-- + 2. provision a set of entry + 3. configure test_slapi_memberof as described above + 4. check computed membership vs expected result + :expectedresults: + 1. Operation should succeed + 2. Operation should succeed + 3. Operation should succeed + 4. Operation should succeed + + DIT is : + e_1_parent_0 + - e_1_parent_1_0 + -- e_1_parent_1_1_0 + --- e_1_parent_1_1_1_0 + --- e_2_parent_1_1_1_0 + --- e_3_parent_1_1_1_0 + --- e_4_parent_1_1_1_0 + --- e_5_parent_1_1_1_0 + -- e_2_parent_1_1_0 + - e_2_parent_1_0 + -- e_1_parent_2_1_0 + -- e_2_parent_2_1_0 + --- e_1_parent_2_2_1_0 + -- e_3_parent_2_1_0 + -- e_4_parent_2_1_0 + e_2_parent_0 + - e_1_parent_2_0 + - e_2_parent_2_0 + - e_3_parent_2_0 + - e_4_parent_2_0 + e_3_parent_0 + - e_1_parent_3_0 + -- e_1_parent_1_3_0 + --- e_1_parent_1_1_3_0 + ---- e_1_parent_1_1_1_3_0 + """ + memberof = MemberOfPlugin(topo.standalone) + memberof.enable() + memberof.replace('memberOfGroupAttr', 'manager') + topo.standalone.restart() + + user = UserAccounts(topo.standalone, DEFAULT_SUFFIX) + + # First subtree + e_1_parent_0 = add_entry(topo.standalone, uid="e_1_parent_0") + + e_1_parent_1_0 = add_entry(topo.standalone, uid="e_1_parent_1_0", manager=[ensure_bytes(e_1_parent_0)]) + + e_1_parent_1_1_0 = add_entry(topo.standalone, uid="e_1_parent_1_1_0", manager=[ensure_bytes(e_1_parent_1_0)]) + + e_1_parent_1_1_1_0 = add_entry(topo.standalone, uid="e_1_parent_1_1_1_0", manager=[ensure_bytes(e_1_parent_1_1_0)]) + e_2_parent_1_1_1_0 = add_entry(topo.standalone, uid="e_2_parent_1_1_1_0", manager=[ensure_bytes(e_1_parent_1_1_0)]) + e_3_parent_1_1_1_0 = add_entry(topo.standalone, uid="e_3_parent_1_1_1_0", manager=[ensure_bytes(e_1_parent_1_1_0)]) + e_4_parent_1_1_1_0 = add_entry(topo.standalone, uid="e_4_parent_1_1_1_0", manager=[ensure_bytes(e_1_parent_1_1_0)]) + e_5_parent_1_1_1_0 = add_entry(topo.standalone, uid="e_5_parent_1_1_1_0", manager=[ensure_bytes(e_1_parent_1_1_0)]) + + e_2_parent_1_1_0 = add_entry(topo.standalone, uid="e_2_parent_1_1_0", manager=[ensure_bytes(e_1_parent_1_0)]) + + e_2_parent_1_0 = add_entry(topo.standalone, uid="e_2_parent_1_0", manager=[ensure_bytes(e_1_parent_0)]) + + e_1_parent_2_1_0 = add_entry(topo.standalone, uid="e_1_parent_2_1_0", manager=[ensure_bytes(e_2_parent_1_0)]) + e_2_parent_2_1_0 = add_entry(topo.standalone, uid="e_2_parent_2_1_0", manager=[ensure_bytes(e_2_parent_1_0)]) + e_1_parent_2_2_1_0 = add_entry(topo.standalone, uid="e_1_parent_2_2_1_0", manager=[ensure_bytes(e_2_parent_2_1_0)]) + e_3_parent_2_1_0 = add_entry(topo.standalone, uid="e_3_parent_2_1_0", manager=[ensure_bytes(e_2_parent_1_0)]) + e_4_parent_2_1_0 = add_entry(topo.standalone, uid="e_4_parent_2_1_0", manager=[ensure_bytes(e_2_parent_1_0)]) + + # 2nd subtree + e_2_parent_0 = add_entry(topo.standalone, uid="e_2_parent_0") + + e_1_parent_2_0 = add_entry(topo.standalone, uid="e_1_parent_2_0", manager=[ensure_bytes(e_2_parent_0)]) + e_2_parent_2_0 = add_entry(topo.standalone, uid="e_2_parent_2_0", manager=[ensure_bytes(e_2_parent_0)]) + e_3_parent_2_0 = add_entry(topo.standalone, uid="e_3_parent_2_0", manager=[ensure_bytes(e_2_parent_0)]) + e_4_parent_2_0 = add_entry(topo.standalone, uid="e_4_parent_2_0", manager=[ensure_bytes(e_2_parent_0)]) + + # third subtree + e_3_parent_0 = add_entry(topo.standalone, uid="e_3_parent_0") + + e_1_parent_3_0 = add_entry(topo.standalone, uid="e_1_parent_3_0", manager=[ensure_bytes(e_3_parent_0)]) + + e_1_parent_1_3_0 = add_entry(topo.standalone, uid="e_1_parent_1_3_0", manager=[ensure_bytes(e_1_parent_3_0)]) + + e_1_parent_1_1_3_0 = add_entry(topo.standalone, uid="e_1_parent_1_1_3_0", manager=[ensure_bytes(e_1_parent_1_3_0)]) + + e_1_parent_1_1_1_3_0 = add_entry(topo.standalone, uid="e_1_parent_1_1_1_3_0", manager=[ensure_bytes(e_1_parent_1_1_3_0)]) + + dn_config = 'cn=test_slapi_memberof,cn=plugins,cn=config' + topo.standalone.add_s(Entry((dn_config, {'objectclass': 'top nsSlapdPlugin extensibleObject'.split(), + 'cn': 'test_slapi_memberof', + 'nsslapd-pluginPath': 'libtest_slapi_memberof-plugin', + 'nsslapd-pluginInitfunc': 'test_slapi_memberof_init', + 'nsslapd-pluginType': 'extendedop', + 'nsslapd-pluginEnabled': 'on', + 'nsslapd-plugin-depends-on-type': 'database', + 'nsslapd-pluginId': 'test_slapi_memberof-plugin', + 'slapimemberOfMemberDN': 'uid=test_user_11,ou=People,dc=example,dc=com', + 'slapimemberOfGroupAttr': 'manager', + 'slapimemberOfAttr': 'memberof', + 'slapimemberOfFlag': 'MEMBEROF_REUSE_IF_POSSIBLE', + 'slapimemberOfAllBackends': 'off', + 'slapimemberOfSkipNested': 'off', + 'slapimemberOfEntryScope': "ou=People,%s" % DEFAULT_SUFFIX, + 'slapimemberOfMaxGroup': '0', + 'nsslapd-pluginVersion': '2.3.2.202302131418git0e190fc3d', + 'nsslapd-pluginVendor': '389 Project', + 'nsslapd-pluginDescription': 'test_slapi_memberof extended operation plugin'}))) + topo.standalone.restart() + + # Check the first subtree + expected = [ e_1_parent_1_0, e_1_parent_1_1_0, e_1_parent_1_1_1_0, e_2_parent_1_1_1_0, e_3_parent_1_1_1_0, e_4_parent_1_1_1_0, e_5_parent_1_1_1_0, e_2_parent_1_1_0, e_2_parent_1_0, e_1_parent_2_1_0, e_2_parent_2_1_0, e_1_parent_2_2_1_0, e_3_parent_2_1_0, e_4_parent_2_1_0] + res = _extop_test_slapi_member(server=topo.standalone, dn=e_1_parent_0, relation="manager") + _check_res_vs_expected("first subtree", res, expected) + + # Check the second subtree + expected = [e_1_parent_2_0, e_2_parent_2_0, e_3_parent_2_0, e_4_parent_2_0] + res = _extop_test_slapi_member(server=topo.standalone, dn=e_2_parent_0, relation="manager") + _check_res_vs_expected("second subtree", res, expected) + + # Check the third subtree + expected = [e_1_parent_3_0, e_1_parent_1_3_0, e_1_parent_1_1_3_0, e_1_parent_1_1_1_3_0] + res = _extop_test_slapi_member(server=topo.standalone, dn=e_3_parent_0, relation="manager") + _check_res_vs_expected("third subtree", res, expected) + + # check e_1_parent_1_0 + expected = [e_1_parent_1_1_0, e_1_parent_1_1_1_0, e_2_parent_1_1_1_0, e_3_parent_1_1_1_0, e_4_parent_1_1_1_0, e_5_parent_1_1_1_0, e_2_parent_1_1_0] + res = _extop_test_slapi_member(server=topo.standalone, dn=e_1_parent_1_0, relation="manager") + _check_res_vs_expected("organisation reporting to e_1_parent_1_0", res, expected) + + # check e_1_parent_1_1_0 + expected = [e_1_parent_1_1_1_0, e_2_parent_1_1_1_0, e_3_parent_1_1_1_0, e_4_parent_1_1_1_0, e_5_parent_1_1_1_0] + res = _extop_test_slapi_member(server=topo.standalone, dn=e_1_parent_1_1_0, relation="manager") + _check_res_vs_expected("organisation reporting to e_1_parent_1_1_0", res, expected) + + # check e_2_parent_1_1_0 + expected = [EMPTY_RESULT] + res = _extop_test_slapi_member(server=topo.standalone, dn=e_2_parent_1_1_0, relation="manager") + _check_res_vs_expected("organisation reporting to e_2_parent_1_1_0", res, expected) + + # check e_2_parent_1_0 + expected = [e_1_parent_2_1_0, e_2_parent_2_1_0, e_1_parent_2_2_1_0, e_3_parent_2_1_0, e_4_parent_2_1_0] + res = _extop_test_slapi_member(server=topo.standalone, dn=e_2_parent_1_0, relation="manager") + _check_res_vs_expected("organisation reporting to e_2_parent_1_0", res, expected) + + # check e_2_parent_2_1_0 + expected = [e_1_parent_2_2_1_0] + res = _extop_test_slapi_member(server=topo.standalone, dn=e_2_parent_2_1_0, relation="manager") + _check_res_vs_expected("organisation reporting to e_2_parent_2_1_0", res, expected) + + # Check e_1_parent_3_0 + expected = [e_1_parent_1_3_0, e_1_parent_1_1_3_0, e_1_parent_1_1_1_3_0] + res = _extop_test_slapi_member(server=topo.standalone, dn=e_1_parent_3_0, relation="manager") + _check_res_vs_expected("organisation reporting to e_1_parent_3_0", res, expected) + + # Check e_1_parent_1_3_0 + expected = [e_1_parent_1_1_3_0, e_1_parent_1_1_1_3_0] + res = _extop_test_slapi_member(server=topo.standalone, dn=e_1_parent_1_3_0, relation="manager") + _check_res_vs_expected("organisation reporting to e_1_parent_1_3_0", res, expected) + + # Check e_1_parent_1_1_3_0 + expected = [e_1_parent_1_1_1_3_0] + res = _extop_test_slapi_member(server=topo.standalone, dn=e_1_parent_1_1_3_0, relation="manager") + _check_res_vs_expected("organisation reporting to e_1_parent_1_1_3_0", res, expected) + + # Check e_1_parent_1_1_1_3_0 + expected = [EMPTY_RESULT] + res = _extop_test_slapi_member(server=topo.standalone, dn=e_1_parent_1_1_1_3_0, relation="manager") + _check_res_vs_expected("organisation reporting to e_1_parent_1_1_1_3_0", res, expected) + + def fin(): + entries = [e_1_parent_0, e_1_parent_1_0, e_1_parent_1_1_0, e_1_parent_1_1_1_0, e_2_parent_1_1_1_0, e_3_parent_1_1_1_0, e_4_parent_1_1_1_0, e_5_parent_1_1_1_0, e_2_parent_1_1_0, e_2_parent_1_0, e_1_parent_2_1_0, e_2_parent_2_1_0, e_1_parent_2_2_1_0, e_3_parent_2_1_0, e_4_parent_2_1_0, e_2_parent_0, e_1_parent_2_0, e_2_parent_2_0, e_3_parent_2_0, e_4_parent_2_0, e_3_parent_0, e_1_parent_3_0, e_1_parent_1_3_0, e_1_parent_1_1_3_0, e_1_parent_1_1_1_3_0] + for entry in entries: + topo.standalone.delete_s(entry) + topo.standalone.delete_s(dn_config) + + request.addfinalizer(fin) + + +def test_slapi_memberof_reuse_if_possible_7(topo, request, install_test_plugin): + """ + Test that management hierarchy (manager) is computed with slapi_memberof + It requires slapi_memberof to reuse IF POSSIBLE the computed values + from memberof plugins. + Memberof plugin is enabled, but with a different 'slapimemberOfEntryScope' attr + it falls back to regular computation (recompute) + with following parameters + - member attribute: memberof + - membership attribute: 'manager' + - span over all backends: 'off' + - skip nesting membership: 'off' + - computation mode: MEMBEROF_REUSE_IF_POSSIBLE <-- + - Scope: ou=people,dc=example,dc=com <-- + - ExcludeScope: None + - Maximum return entries: None + + :id: b8cfca23-742f-44f0-8bb7-f93371954d40 + :setup: Standalone instance + :steps: + 1. Configure memberof with 'memberOfEntryScope: ou=groups,dc=example,dc=com' <-- + 2. provision a set of entry + 3. configure test_slapi_memberof as described above + 4. check computed membership vs expected result + :expectedresults: + 1. Operation should succeed + 2. Operation should succeed + 3. Operation should succeed + 4. Operation should succeed + + DIT is : + e_1_parent_0 + - e_1_parent_1_0 + -- e_1_parent_1_1_0 + --- e_1_parent_1_1_1_0 + --- e_2_parent_1_1_1_0 + --- e_3_parent_1_1_1_0 + --- e_4_parent_1_1_1_0 + --- e_5_parent_1_1_1_0 + -- e_2_parent_1_1_0 + - e_2_parent_1_0 + -- e_1_parent_2_1_0 + -- e_2_parent_2_1_0 + --- e_1_parent_2_2_1_0 + -- e_3_parent_2_1_0 + -- e_4_parent_2_1_0 + e_2_parent_0 + - e_1_parent_2_0 + - e_2_parent_2_0 + - e_3_parent_2_0 + - e_4_parent_2_0 + e_3_parent_0 + - e_1_parent_3_0 + -- e_1_parent_1_3_0 + --- e_1_parent_1_1_3_0 + ---- e_1_parent_1_1_1_3_0 + """ + memberof = MemberOfPlugin(topo.standalone) + memberof.enable() + memberof.replace('memberOfGroupAttr', 'manager') + memberof.replace('memberOfAllBackends', 'off') + memberof.replace('memberOfSkipNested', 'off') + memberof.replace('memberOfEntryScope', 'ou=groups,dc=example,dc=com') + topo.standalone.restart() + + user = UserAccounts(topo.standalone, DEFAULT_SUFFIX) + + # First subtree + e_1_parent_0 = add_entry(topo.standalone, uid="e_1_parent_0") + + e_1_parent_1_0 = add_entry(topo.standalone, uid="e_1_parent_1_0", manager=[ensure_bytes(e_1_parent_0)]) + + e_1_parent_1_1_0 = add_entry(topo.standalone, uid="e_1_parent_1_1_0", manager=[ensure_bytes(e_1_parent_1_0)]) + + e_1_parent_1_1_1_0 = add_entry(topo.standalone, uid="e_1_parent_1_1_1_0", manager=[ensure_bytes(e_1_parent_1_1_0)]) + e_2_parent_1_1_1_0 = add_entry(topo.standalone, uid="e_2_parent_1_1_1_0", manager=[ensure_bytes(e_1_parent_1_1_0)]) + e_3_parent_1_1_1_0 = add_entry(topo.standalone, uid="e_3_parent_1_1_1_0", manager=[ensure_bytes(e_1_parent_1_1_0)]) + e_4_parent_1_1_1_0 = add_entry(topo.standalone, uid="e_4_parent_1_1_1_0", manager=[ensure_bytes(e_1_parent_1_1_0)]) + e_5_parent_1_1_1_0 = add_entry(topo.standalone, uid="e_5_parent_1_1_1_0", manager=[ensure_bytes(e_1_parent_1_1_0)]) + + e_2_parent_1_1_0 = add_entry(topo.standalone, uid="e_2_parent_1_1_0", manager=[ensure_bytes(e_1_parent_1_0)]) + + e_2_parent_1_0 = add_entry(topo.standalone, uid="e_2_parent_1_0", manager=[ensure_bytes(e_1_parent_0)]) + + e_1_parent_2_1_0 = add_entry(topo.standalone, uid="e_1_parent_2_1_0", manager=[ensure_bytes(e_2_parent_1_0)]) + e_2_parent_2_1_0 = add_entry(topo.standalone, uid="e_2_parent_2_1_0", manager=[ensure_bytes(e_2_parent_1_0)]) + e_1_parent_2_2_1_0 = add_entry(topo.standalone, uid="e_1_parent_2_2_1_0", manager=[ensure_bytes(e_2_parent_2_1_0)]) + e_3_parent_2_1_0 = add_entry(topo.standalone, uid="e_3_parent_2_1_0", manager=[ensure_bytes(e_2_parent_1_0)]) + e_4_parent_2_1_0 = add_entry(topo.standalone, uid="e_4_parent_2_1_0", manager=[ensure_bytes(e_2_parent_1_0)]) + + # 2nd subtree + e_2_parent_0 = add_entry(topo.standalone, uid="e_2_parent_0") + + e_1_parent_2_0 = add_entry(topo.standalone, uid="e_1_parent_2_0", manager=[ensure_bytes(e_2_parent_0)]) + e_2_parent_2_0 = add_entry(topo.standalone, uid="e_2_parent_2_0", manager=[ensure_bytes(e_2_parent_0)]) + e_3_parent_2_0 = add_entry(topo.standalone, uid="e_3_parent_2_0", manager=[ensure_bytes(e_2_parent_0)]) + e_4_parent_2_0 = add_entry(topo.standalone, uid="e_4_parent_2_0", manager=[ensure_bytes(e_2_parent_0)]) + + # third subtree + e_3_parent_0 = add_entry(topo.standalone, uid="e_3_parent_0") + + e_1_parent_3_0 = add_entry(topo.standalone, uid="e_1_parent_3_0", manager=[ensure_bytes(e_3_parent_0)]) + + e_1_parent_1_3_0 = add_entry(topo.standalone, uid="e_1_parent_1_3_0", manager=[ensure_bytes(e_1_parent_3_0)]) + + e_1_parent_1_1_3_0 = add_entry(topo.standalone, uid="e_1_parent_1_1_3_0", manager=[ensure_bytes(e_1_parent_1_3_0)]) + + e_1_parent_1_1_1_3_0 = add_entry(topo.standalone, uid="e_1_parent_1_1_1_3_0", manager=[ensure_bytes(e_1_parent_1_1_3_0)]) + + dn_config = 'cn=test_slapi_memberof,cn=plugins,cn=config' + topo.standalone.add_s(Entry((dn_config, {'objectclass': 'top nsSlapdPlugin extensibleObject'.split(), + 'cn': 'test_slapi_memberof', + 'nsslapd-pluginPath': 'libtest_slapi_memberof-plugin', + 'nsslapd-pluginInitfunc': 'test_slapi_memberof_init', + 'nsslapd-pluginType': 'extendedop', + 'nsslapd-pluginEnabled': 'on', + 'nsslapd-plugin-depends-on-type': 'database', + 'nsslapd-pluginId': 'test_slapi_memberof-plugin', + 'slapimemberOfMemberDN': 'uid=test_user_11,ou=People,dc=example,dc=com', + 'slapimemberOfGroupAttr': 'manager', + 'slapimemberOfAttr': 'memberof', + 'slapimemberOfFlag': 'MEMBEROF_REUSE_IF_POSSIBLE', + 'slapimemberOfAllBackends': 'off', + 'slapimemberOfSkipNested': 'off', + 'slapimemberOfEntryScope': "ou=People,%s" % DEFAULT_SUFFIX, + 'slapimemberOfMaxGroup': '0', + 'nsslapd-pluginVersion': '2.3.2.202302131418git0e190fc3d', + 'nsslapd-pluginVendor': '389 Project', + 'nsslapd-pluginDescription': 'test_slapi_memberof extended operation plugin'}))) + topo.standalone.restart() + + # Check the first subtree + expected = [ e_1_parent_1_0, e_1_parent_1_1_0, e_1_parent_1_1_1_0, e_2_parent_1_1_1_0, e_3_parent_1_1_1_0, e_4_parent_1_1_1_0, e_5_parent_1_1_1_0, e_2_parent_1_1_0, e_2_parent_1_0, e_1_parent_2_1_0, e_2_parent_2_1_0, e_1_parent_2_2_1_0, e_3_parent_2_1_0, e_4_parent_2_1_0] + res = _extop_test_slapi_member(server=topo.standalone, dn=e_1_parent_0, relation="manager") + _check_res_vs_expected("first subtree", res, expected) + + # Check the second subtree + expected = [e_1_parent_2_0, e_2_parent_2_0, e_3_parent_2_0, e_4_parent_2_0] + res = _extop_test_slapi_member(server=topo.standalone, dn=e_2_parent_0, relation="manager") + _check_res_vs_expected("second subtree", res, expected) + + # Check the third subtree + expected = [e_1_parent_3_0, e_1_parent_1_3_0, e_1_parent_1_1_3_0, e_1_parent_1_1_1_3_0] + res = _extop_test_slapi_member(server=topo.standalone, dn=e_3_parent_0, relation="manager") + _check_res_vs_expected("third subtree", res, expected) + + # check e_1_parent_1_0 + expected = [e_1_parent_1_1_0, e_1_parent_1_1_1_0, e_2_parent_1_1_1_0, e_3_parent_1_1_1_0, e_4_parent_1_1_1_0, e_5_parent_1_1_1_0, e_2_parent_1_1_0] + res = _extop_test_slapi_member(server=topo.standalone, dn=e_1_parent_1_0, relation="manager") + _check_res_vs_expected("organisation reporting to e_1_parent_1_0", res, expected) + + # check e_1_parent_1_1_0 + expected = [e_1_parent_1_1_1_0, e_2_parent_1_1_1_0, e_3_parent_1_1_1_0, e_4_parent_1_1_1_0, e_5_parent_1_1_1_0] + res = _extop_test_slapi_member(server=topo.standalone, dn=e_1_parent_1_1_0, relation="manager") + _check_res_vs_expected("organisation reporting to e_1_parent_1_1_0", res, expected) + + # check e_2_parent_1_1_0 + expected = [EMPTY_RESULT] + res = _extop_test_slapi_member(server=topo.standalone, dn=e_2_parent_1_1_0, relation="manager") + _check_res_vs_expected("organisation reporting to e_2_parent_1_1_0", res, expected) + + # check e_2_parent_1_0 + expected = [e_1_parent_2_1_0, e_2_parent_2_1_0, e_1_parent_2_2_1_0, e_3_parent_2_1_0, e_4_parent_2_1_0] + res = _extop_test_slapi_member(server=topo.standalone, dn=e_2_parent_1_0, relation="manager") + _check_res_vs_expected("organisation reporting to e_2_parent_1_0", res, expected) + + # check e_2_parent_2_1_0 + expected = [e_1_parent_2_2_1_0] + res = _extop_test_slapi_member(server=topo.standalone, dn=e_2_parent_2_1_0, relation="manager") + _check_res_vs_expected("organisation reporting to e_2_parent_2_1_0", res, expected) + + # Check e_1_parent_3_0 + expected = [e_1_parent_1_3_0, e_1_parent_1_1_3_0, e_1_parent_1_1_1_3_0] + res = _extop_test_slapi_member(server=topo.standalone, dn=e_1_parent_3_0, relation="manager") + _check_res_vs_expected("organisation reporting to e_1_parent_3_0", res, expected) + + # Check e_1_parent_1_3_0 + expected = [e_1_parent_1_1_3_0, e_1_parent_1_1_1_3_0] + res = _extop_test_slapi_member(server=topo.standalone, dn=e_1_parent_1_3_0, relation="manager") + _check_res_vs_expected("organisation reporting to e_1_parent_1_3_0", res, expected) + + # Check e_1_parent_1_1_3_0 + expected = [e_1_parent_1_1_1_3_0] + res = _extop_test_slapi_member(server=topo.standalone, dn=e_1_parent_1_1_3_0, relation="manager") + _check_res_vs_expected("organisation reporting to e_1_parent_1_1_3_0", res, expected) + + # Check e_1_parent_1_1_1_3_0 + expected = [EMPTY_RESULT] + res = _extop_test_slapi_member(server=topo.standalone, dn=e_1_parent_1_1_1_3_0, relation="manager") + _check_res_vs_expected("organisation reporting to e_1_parent_1_1_1_3_0", res, expected) + + def fin(): + entries = [e_1_parent_0, e_1_parent_1_0, e_1_parent_1_1_0, e_1_parent_1_1_1_0, e_2_parent_1_1_1_0, e_3_parent_1_1_1_0, e_4_parent_1_1_1_0, e_5_parent_1_1_1_0, e_2_parent_1_1_0, e_2_parent_1_0, e_1_parent_2_1_0, e_2_parent_2_1_0, e_1_parent_2_2_1_0, e_3_parent_2_1_0, e_4_parent_2_1_0, e_2_parent_0, e_1_parent_2_0, e_2_parent_2_0, e_3_parent_2_0, e_4_parent_2_0, e_3_parent_0, e_1_parent_3_0, e_1_parent_1_3_0, e_1_parent_1_1_3_0, e_1_parent_1_1_1_3_0] + for entry in entries: + topo.standalone.delete_s(entry) + topo.standalone.delete_s(dn_config) + + request.addfinalizer(fin) + +def test_slapi_memberof_reuse_if_possible_8(topo, request, install_test_plugin): + """ + Test that management hierarchy (manager) is computed with slapi_memberof + It requires slapi_memberof to reuse IF POSSIBLE the computed values + from memberof plugins. + Memberof plugin is enabled, but with a different 'slapimemberOfEntryScope' attr + it falls back to regular computation (recompute) + with following parameters + - member attribute: memberof + - membership attribute: 'manager' + - span over all backends: 'off' + - skip nesting membership: 'off' + - computation mode: MEMBEROF_REUSE_IF_POSSIBLE <-- + - Scope: ou=people,dc=example,dc=com <-- + - ExcludeScope: None + - Maximum return entries: None + + :id: 5c990df5-8aa6-44c6-a9e1-f161c3e01d1e + :setup: Standalone instance + :steps: + 1. Configure memberof with + - 'memberOfEntryScope: ou=groups,dc=example,dc=com' <-- + - 'memberOfEntryScope: ou=people,dc=example,dc=com' <-- + 2. provision a set of entry + 3. configure test_slapi_memberof as described above + 4. check computed membership vs expected result + :expectedresults: + 1. Operation should succeed + 2. Operation should succeed + 3. Operation should succeed + 4. Operation should succeed + + DIT is : + e_1_parent_0 + - e_1_parent_1_0 + -- e_1_parent_1_1_0 + --- e_1_parent_1_1_1_0 + --- e_2_parent_1_1_1_0 + --- e_3_parent_1_1_1_0 + --- e_4_parent_1_1_1_0 + --- e_5_parent_1_1_1_0 + -- e_2_parent_1_1_0 + - e_2_parent_1_0 + -- e_1_parent_2_1_0 + -- e_2_parent_2_1_0 + --- e_1_parent_2_2_1_0 + -- e_3_parent_2_1_0 + -- e_4_parent_2_1_0 + e_2_parent_0 + - e_1_parent_2_0 + - e_2_parent_2_0 + - e_3_parent_2_0 + - e_4_parent_2_0 + e_3_parent_0 + - e_1_parent_3_0 + -- e_1_parent_1_3_0 + --- e_1_parent_1_1_3_0 + ---- e_1_parent_1_1_1_3_0 + """ + memberof = MemberOfPlugin(topo.standalone) + memberof.enable() + memberof.replace('memberOfGroupAttr', 'manager') + memberof.replace('memberOfAllBackends', 'off') + memberof.replace('memberOfSkipNested', 'off') + memberof.replace('memberOfEntryScope', DEFAULT_SUFFIX) + memberof.replace('memberOfEntryScope', 'ou=groups,dc=example,dc=com') + memberof.add('memberOfEntryScope', 'ou=people,dc=example,dc=com') + topo.standalone.restart() + + user = UserAccounts(topo.standalone, DEFAULT_SUFFIX) + + # First subtree + e_1_parent_0 = add_entry(topo.standalone, uid="e_1_parent_0") + + e_1_parent_1_0 = add_entry(topo.standalone, uid="e_1_parent_1_0", manager=[ensure_bytes(e_1_parent_0)]) + + e_1_parent_1_1_0 = add_entry(topo.standalone, uid="e_1_parent_1_1_0", manager=[ensure_bytes(e_1_parent_1_0)]) + + e_1_parent_1_1_1_0 = add_entry(topo.standalone, uid="e_1_parent_1_1_1_0", manager=[ensure_bytes(e_1_parent_1_1_0)]) + e_2_parent_1_1_1_0 = add_entry(topo.standalone, uid="e_2_parent_1_1_1_0", manager=[ensure_bytes(e_1_parent_1_1_0)]) + e_3_parent_1_1_1_0 = add_entry(topo.standalone, uid="e_3_parent_1_1_1_0", manager=[ensure_bytes(e_1_parent_1_1_0)]) + e_4_parent_1_1_1_0 = add_entry(topo.standalone, uid="e_4_parent_1_1_1_0", manager=[ensure_bytes(e_1_parent_1_1_0)]) + e_5_parent_1_1_1_0 = add_entry(topo.standalone, uid="e_5_parent_1_1_1_0", manager=[ensure_bytes(e_1_parent_1_1_0)]) + + e_2_parent_1_1_0 = add_entry(topo.standalone, uid="e_2_parent_1_1_0", manager=[ensure_bytes(e_1_parent_1_0)]) + + e_2_parent_1_0 = add_entry(topo.standalone, uid="e_2_parent_1_0", manager=[ensure_bytes(e_1_parent_0)]) + + e_1_parent_2_1_0 = add_entry(topo.standalone, uid="e_1_parent_2_1_0", manager=[ensure_bytes(e_2_parent_1_0)]) + e_2_parent_2_1_0 = add_entry(topo.standalone, uid="e_2_parent_2_1_0", manager=[ensure_bytes(e_2_parent_1_0)]) + e_1_parent_2_2_1_0 = add_entry(topo.standalone, uid="e_1_parent_2_2_1_0", manager=[ensure_bytes(e_2_parent_2_1_0)]) + e_3_parent_2_1_0 = add_entry(topo.standalone, uid="e_3_parent_2_1_0", manager=[ensure_bytes(e_2_parent_1_0)]) + e_4_parent_2_1_0 = add_entry(topo.standalone, uid="e_4_parent_2_1_0", manager=[ensure_bytes(e_2_parent_1_0)]) + + # 2nd subtree + e_2_parent_0 = add_entry(topo.standalone, uid="e_2_parent_0") + + e_1_parent_2_0 = add_entry(topo.standalone, uid="e_1_parent_2_0", manager=[ensure_bytes(e_2_parent_0)]) + e_2_parent_2_0 = add_entry(topo.standalone, uid="e_2_parent_2_0", manager=[ensure_bytes(e_2_parent_0)]) + e_3_parent_2_0 = add_entry(topo.standalone, uid="e_3_parent_2_0", manager=[ensure_bytes(e_2_parent_0)]) + e_4_parent_2_0 = add_entry(topo.standalone, uid="e_4_parent_2_0", manager=[ensure_bytes(e_2_parent_0)]) + + # third subtree + e_3_parent_0 = add_entry(topo.standalone, uid="e_3_parent_0") + + e_1_parent_3_0 = add_entry(topo.standalone, uid="e_1_parent_3_0", manager=[ensure_bytes(e_3_parent_0)]) + + e_1_parent_1_3_0 = add_entry(topo.standalone, uid="e_1_parent_1_3_0", manager=[ensure_bytes(e_1_parent_3_0)]) + + e_1_parent_1_1_3_0 = add_entry(topo.standalone, uid="e_1_parent_1_1_3_0", manager=[ensure_bytes(e_1_parent_1_3_0)]) + + e_1_parent_1_1_1_3_0 = add_entry(topo.standalone, uid="e_1_parent_1_1_1_3_0", manager=[ensure_bytes(e_1_parent_1_1_3_0)]) + + dn_config = 'cn=test_slapi_memberof,cn=plugins,cn=config' + topo.standalone.add_s(Entry((dn_config, {'objectclass': 'top nsSlapdPlugin extensibleObject'.split(), + 'cn': 'test_slapi_memberof', + 'nsslapd-pluginPath': 'libtest_slapi_memberof-plugin', + 'nsslapd-pluginInitfunc': 'test_slapi_memberof_init', + 'nsslapd-pluginType': 'extendedop', + 'nsslapd-pluginEnabled': 'on', + 'nsslapd-plugin-depends-on-type': 'database', + 'nsslapd-pluginId': 'test_slapi_memberof-plugin', + 'slapimemberOfMemberDN': 'uid=test_user_11,ou=People,dc=example,dc=com', + 'slapimemberOfGroupAttr': 'manager', + 'slapimemberOfAttr': 'memberof', + 'slapimemberOfFlag': 'MEMBEROF_REUSE_IF_POSSIBLE', + 'slapimemberOfAllBackends': 'off', + 'slapimemberOfSkipNested': 'off', + 'slapimemberOfEntryScope': "ou=People,%s" % DEFAULT_SUFFIX, + 'slapimemberOfMaxGroup': '0', + 'nsslapd-pluginVersion': '2.3.2.202302131418git0e190fc3d', + 'nsslapd-pluginVendor': '389 Project', + 'nsslapd-pluginDescription': 'test_slapi_memberof extended operation plugin'}))) + topo.standalone.restart() + + # Check the first subtree + expected = [ e_1_parent_1_0, e_1_parent_1_1_0, e_1_parent_1_1_1_0, e_2_parent_1_1_1_0, e_3_parent_1_1_1_0, e_4_parent_1_1_1_0, e_5_parent_1_1_1_0, e_2_parent_1_1_0, e_2_parent_1_0, e_1_parent_2_1_0, e_2_parent_2_1_0, e_1_parent_2_2_1_0, e_3_parent_2_1_0, e_4_parent_2_1_0] + res = _extop_test_slapi_member(server=topo.standalone, dn=e_1_parent_0, relation="manager") + _check_res_vs_expected("first subtree", res, expected) + + # Check the second subtree + expected = [e_1_parent_2_0, e_2_parent_2_0, e_3_parent_2_0, e_4_parent_2_0] + res = _extop_test_slapi_member(server=topo.standalone, dn=e_2_parent_0, relation="manager") + _check_res_vs_expected("second subtree", res, expected) + + # Check the third subtree + expected = [e_1_parent_3_0, e_1_parent_1_3_0, e_1_parent_1_1_3_0, e_1_parent_1_1_1_3_0] + res = _extop_test_slapi_member(server=topo.standalone, dn=e_3_parent_0, relation="manager") + _check_res_vs_expected("third subtree", res, expected) + + # check e_1_parent_1_0 + expected = [e_1_parent_1_1_0, e_1_parent_1_1_1_0, e_2_parent_1_1_1_0, e_3_parent_1_1_1_0, e_4_parent_1_1_1_0, e_5_parent_1_1_1_0, e_2_parent_1_1_0] + res = _extop_test_slapi_member(server=topo.standalone, dn=e_1_parent_1_0, relation="manager") + _check_res_vs_expected("organisation reporting to e_1_parent_1_0", res, expected) + + # check e_1_parent_1_1_0 + expected = [e_1_parent_1_1_1_0, e_2_parent_1_1_1_0, e_3_parent_1_1_1_0, e_4_parent_1_1_1_0, e_5_parent_1_1_1_0] + res = _extop_test_slapi_member(server=topo.standalone, dn=e_1_parent_1_1_0, relation="manager") + _check_res_vs_expected("organisation reporting to e_1_parent_1_1_0", res, expected) + + # check e_2_parent_1_1_0 + expected = [EMPTY_RESULT] + res = _extop_test_slapi_member(server=topo.standalone, dn=e_2_parent_1_1_0, relation="manager") + _check_res_vs_expected("organisation reporting to e_2_parent_1_1_0", res, expected) + + # check e_2_parent_1_0 + expected = [e_1_parent_2_1_0, e_2_parent_2_1_0, e_1_parent_2_2_1_0, e_3_parent_2_1_0, e_4_parent_2_1_0] + res = _extop_test_slapi_member(server=topo.standalone, dn=e_2_parent_1_0, relation="manager") + _check_res_vs_expected("organisation reporting to e_2_parent_1_0", res, expected) + + # check e_2_parent_2_1_0 + expected = [e_1_parent_2_2_1_0] + res = _extop_test_slapi_member(server=topo.standalone, dn=e_2_parent_2_1_0, relation="manager") + _check_res_vs_expected("organisation reporting to e_2_parent_2_1_0", res, expected) + + # Check e_1_parent_3_0 + expected = [e_1_parent_1_3_0, e_1_parent_1_1_3_0, e_1_parent_1_1_1_3_0] + res = _extop_test_slapi_member(server=topo.standalone, dn=e_1_parent_3_0, relation="manager") + _check_res_vs_expected("organisation reporting to e_1_parent_3_0", res, expected) + + # Check e_1_parent_1_3_0 + expected = [e_1_parent_1_1_3_0, e_1_parent_1_1_1_3_0] + res = _extop_test_slapi_member(server=topo.standalone, dn=e_1_parent_1_3_0, relation="manager") + _check_res_vs_expected("organisation reporting to e_1_parent_1_3_0", res, expected) + + # Check e_1_parent_1_1_3_0 + expected = [e_1_parent_1_1_1_3_0] + res = _extop_test_slapi_member(server=topo.standalone, dn=e_1_parent_1_1_3_0, relation="manager") + _check_res_vs_expected("organisation reporting to e_1_parent_1_1_3_0", res, expected) + + # Check e_1_parent_1_1_1_3_0 + expected = [EMPTY_RESULT] + res = _extop_test_slapi_member(server=topo.standalone, dn=e_1_parent_1_1_1_3_0, relation="manager") + _check_res_vs_expected("organisation reporting to e_1_parent_1_1_1_3_0", res, expected) + + def fin(): + entries = [e_1_parent_0, e_1_parent_1_0, e_1_parent_1_1_0, e_1_parent_1_1_1_0, e_2_parent_1_1_1_0, e_3_parent_1_1_1_0, e_4_parent_1_1_1_0, e_5_parent_1_1_1_0, e_2_parent_1_1_0, e_2_parent_1_0, e_1_parent_2_1_0, e_2_parent_2_1_0, e_1_parent_2_2_1_0, e_3_parent_2_1_0, e_4_parent_2_1_0, e_2_parent_0, e_1_parent_2_0, e_2_parent_2_0, e_3_parent_2_0, e_4_parent_2_0, e_3_parent_0, e_1_parent_3_0, e_1_parent_1_3_0, e_1_parent_1_1_3_0, e_1_parent_1_1_1_3_0] + for entry in entries: + topo.standalone.delete_s(entry) + topo.standalone.delete_s(dn_config) + + request.addfinalizer(fin) + +def test_slapi_memberof_reuse_if_possible_9(topo, request, install_test_plugin): + """ + Test that management hierarchy (manager) is computed with slapi_memberof + It requires slapi_memberof to reuse IF POSSIBLE the computed values + from memberof plugins. + Memberof plugin is enabled, but with a different 'slapimemberOfEntryScopeExcludeSubtree' attr + it falls back to regular computation (recompute) + with following parameters + - member attribute: memberof + - membership attribute: 'manager' + - span over all backends: 'off' + - skip nesting membership: 'off' + - computation mode: MEMBEROF_REUSE_IF_POSSIBLE <-- + - Scope: None + - ExcludeScope: ou=groups,dc=example,dc=com <-- + - Maximum return entries: None + + :id: 55ffe094-482b-4e2f-9d34-8b0a1ebd5248 + :setup: Standalone instance + :steps: + 1. Configure memberof without 'memberOfEntryScopeExcludeSubtree' <-- + 2. provision a set of entry + 3. configure test_slapi_memberof as described above + 4. check computed membership vs expected result + :expectedresults: + 1. Operation should succeed + 2. Operation should succeed + 3. Operation should succeed + 4. Operation should succeed + + DIT is : + e_1_parent_0 + - e_1_parent_1_0 + -- e_1_parent_1_1_0 + --- e_1_parent_1_1_1_0 + --- e_2_parent_1_1_1_0 + --- e_3_parent_1_1_1_0 + --- e_4_parent_1_1_1_0 + --- e_5_parent_1_1_1_0 + -- e_2_parent_1_1_0 + - e_2_parent_1_0 + -- e_1_parent_2_1_0 + -- e_2_parent_2_1_0 + --- e_1_parent_2_2_1_0 + -- e_3_parent_2_1_0 + -- e_4_parent_2_1_0 + e_2_parent_0 + - e_1_parent_2_0 + - e_2_parent_2_0 + - e_3_parent_2_0 + - e_4_parent_2_0 + e_3_parent_0 + - e_1_parent_3_0 + -- e_1_parent_1_3_0 + --- e_1_parent_1_1_3_0 + ---- e_1_parent_1_1_1_3_0 + """ + memberof = MemberOfPlugin(topo.standalone) + memberof.enable() + memberof.replace('memberOfGroupAttr', 'manager') + memberof.replace('memberOfAllBackends', 'off') + memberof.replace('memberOfSkipNested', 'off') + memberof.replace('memberOfEntryScope', DEFAULT_SUFFIX) + topo.standalone.restart() + + user = UserAccounts(topo.standalone, DEFAULT_SUFFIX) + + # First subtree + e_1_parent_0 = add_entry(topo.standalone, uid="e_1_parent_0") + + e_1_parent_1_0 = add_entry(topo.standalone, uid="e_1_parent_1_0", manager=[ensure_bytes(e_1_parent_0)]) + + e_1_parent_1_1_0 = add_entry(topo.standalone, uid="e_1_parent_1_1_0", manager=[ensure_bytes(e_1_parent_1_0)]) + + e_1_parent_1_1_1_0 = add_entry(topo.standalone, uid="e_1_parent_1_1_1_0", manager=[ensure_bytes(e_1_parent_1_1_0)]) + e_2_parent_1_1_1_0 = add_entry(topo.standalone, uid="e_2_parent_1_1_1_0", manager=[ensure_bytes(e_1_parent_1_1_0)]) + e_3_parent_1_1_1_0 = add_entry(topo.standalone, uid="e_3_parent_1_1_1_0", manager=[ensure_bytes(e_1_parent_1_1_0)]) + e_4_parent_1_1_1_0 = add_entry(topo.standalone, uid="e_4_parent_1_1_1_0", manager=[ensure_bytes(e_1_parent_1_1_0)]) + e_5_parent_1_1_1_0 = add_entry(topo.standalone, uid="e_5_parent_1_1_1_0", manager=[ensure_bytes(e_1_parent_1_1_0)]) + + e_2_parent_1_1_0 = add_entry(topo.standalone, uid="e_2_parent_1_1_0", manager=[ensure_bytes(e_1_parent_1_0)]) + + e_2_parent_1_0 = add_entry(topo.standalone, uid="e_2_parent_1_0", manager=[ensure_bytes(e_1_parent_0)]) + + e_1_parent_2_1_0 = add_entry(topo.standalone, uid="e_1_parent_2_1_0", manager=[ensure_bytes(e_2_parent_1_0)]) + e_2_parent_2_1_0 = add_entry(topo.standalone, uid="e_2_parent_2_1_0", manager=[ensure_bytes(e_2_parent_1_0)]) + e_1_parent_2_2_1_0 = add_entry(topo.standalone, uid="e_1_parent_2_2_1_0", manager=[ensure_bytes(e_2_parent_2_1_0)]) + e_3_parent_2_1_0 = add_entry(topo.standalone, uid="e_3_parent_2_1_0", manager=[ensure_bytes(e_2_parent_1_0)]) + e_4_parent_2_1_0 = add_entry(topo.standalone, uid="e_4_parent_2_1_0", manager=[ensure_bytes(e_2_parent_1_0)]) + + # 2nd subtree + e_2_parent_0 = add_entry(topo.standalone, uid="e_2_parent_0") + + e_1_parent_2_0 = add_entry(topo.standalone, uid="e_1_parent_2_0", manager=[ensure_bytes(e_2_parent_0)]) + e_2_parent_2_0 = add_entry(topo.standalone, uid="e_2_parent_2_0", manager=[ensure_bytes(e_2_parent_0)]) + e_3_parent_2_0 = add_entry(topo.standalone, uid="e_3_parent_2_0", manager=[ensure_bytes(e_2_parent_0)]) + e_4_parent_2_0 = add_entry(topo.standalone, uid="e_4_parent_2_0", manager=[ensure_bytes(e_2_parent_0)]) + + # third subtree + e_3_parent_0 = add_entry(topo.standalone, uid="e_3_parent_0") + + e_1_parent_3_0 = add_entry(topo.standalone, uid="e_1_parent_3_0", manager=[ensure_bytes(e_3_parent_0)]) + + e_1_parent_1_3_0 = add_entry(topo.standalone, uid="e_1_parent_1_3_0", manager=[ensure_bytes(e_1_parent_3_0)]) + + e_1_parent_1_1_3_0 = add_entry(topo.standalone, uid="e_1_parent_1_1_3_0", manager=[ensure_bytes(e_1_parent_1_3_0)]) + + e_1_parent_1_1_1_3_0 = add_entry(topo.standalone, uid="e_1_parent_1_1_1_3_0", manager=[ensure_bytes(e_1_parent_1_1_3_0)]) + + dn_config = 'cn=test_slapi_memberof,cn=plugins,cn=config' + topo.standalone.add_s(Entry((dn_config, {'objectclass': 'top nsSlapdPlugin extensibleObject'.split(), + 'cn': 'test_slapi_memberof', + 'nsslapd-pluginPath': 'libtest_slapi_memberof-plugin', + 'nsslapd-pluginInitfunc': 'test_slapi_memberof_init', + 'nsslapd-pluginType': 'extendedop', + 'nsslapd-pluginEnabled': 'on', + 'nsslapd-plugin-depends-on-type': 'database', + 'nsslapd-pluginId': 'test_slapi_memberof-plugin', + 'slapimemberOfMemberDN': 'uid=test_user_11,ou=People,dc=example,dc=com', + 'slapimemberOfGroupAttr': 'manager', + 'slapimemberOfAttr': 'memberof', + 'slapimemberOfFlag': 'MEMBEROF_REUSE_IF_POSSIBLE', + 'slapimemberOfAllBackends': 'off', + 'slapimemberOfSkipNested': 'off', + 'slapimemberOfEntryScope': DEFAULT_SUFFIX, + 'slapimemberOfEntryScopeExcludeSubtree': "ou=Groups,%s" % DEFAULT_SUFFIX, + 'slapimemberOfMaxGroup': '0', + 'nsslapd-pluginVersion': '2.3.2.202302131418git0e190fc3d', + 'nsslapd-pluginVendor': '389 Project', + 'nsslapd-pluginDescription': 'test_slapi_memberof extended operation plugin'}))) + topo.standalone.restart() + + # Check the first subtree + expected = [ e_1_parent_1_0, e_1_parent_1_1_0, e_1_parent_1_1_1_0, e_2_parent_1_1_1_0, e_3_parent_1_1_1_0, e_4_parent_1_1_1_0, e_5_parent_1_1_1_0, e_2_parent_1_1_0, e_2_parent_1_0, e_1_parent_2_1_0, e_2_parent_2_1_0, e_1_parent_2_2_1_0, e_3_parent_2_1_0, e_4_parent_2_1_0] + res = _extop_test_slapi_member(server=topo.standalone, dn=e_1_parent_0, relation="manager") + _check_res_vs_expected("first subtree", res, expected) + + # Check the second subtree + expected = [e_1_parent_2_0, e_2_parent_2_0, e_3_parent_2_0, e_4_parent_2_0] + res = _extop_test_slapi_member(server=topo.standalone, dn=e_2_parent_0, relation="manager") + _check_res_vs_expected("second subtree", res, expected) + + # Check the third subtree + expected = [e_1_parent_3_0, e_1_parent_1_3_0, e_1_parent_1_1_3_0, e_1_parent_1_1_1_3_0] + res = _extop_test_slapi_member(server=topo.standalone, dn=e_3_parent_0, relation="manager") + _check_res_vs_expected("third subtree", res, expected) + + # check e_1_parent_1_0 + expected = [e_1_parent_1_1_0, e_1_parent_1_1_1_0, e_2_parent_1_1_1_0, e_3_parent_1_1_1_0, e_4_parent_1_1_1_0, e_5_parent_1_1_1_0, e_2_parent_1_1_0] + res = _extop_test_slapi_member(server=topo.standalone, dn=e_1_parent_1_0, relation="manager") + _check_res_vs_expected("organisation reporting to e_1_parent_1_0", res, expected) + + # check e_1_parent_1_1_0 + expected = [e_1_parent_1_1_1_0, e_2_parent_1_1_1_0, e_3_parent_1_1_1_0, e_4_parent_1_1_1_0, e_5_parent_1_1_1_0] + res = _extop_test_slapi_member(server=topo.standalone, dn=e_1_parent_1_1_0, relation="manager") + _check_res_vs_expected("organisation reporting to e_1_parent_1_1_0", res, expected) + + # check e_2_parent_1_1_0 + expected = [EMPTY_RESULT] + res = _extop_test_slapi_member(server=topo.standalone, dn=e_2_parent_1_1_0, relation="manager") + _check_res_vs_expected("organisation reporting to e_2_parent_1_1_0", res, expected) + + # check e_2_parent_1_0 + expected = [e_1_parent_2_1_0, e_2_parent_2_1_0, e_1_parent_2_2_1_0, e_3_parent_2_1_0, e_4_parent_2_1_0] + res = _extop_test_slapi_member(server=topo.standalone, dn=e_2_parent_1_0, relation="manager") + _check_res_vs_expected("organisation reporting to e_2_parent_1_0", res, expected) + + # check e_2_parent_2_1_0 + expected = [e_1_parent_2_2_1_0] + res = _extop_test_slapi_member(server=topo.standalone, dn=e_2_parent_2_1_0, relation="manager") + _check_res_vs_expected("organisation reporting to e_2_parent_2_1_0", res, expected) + + # Check e_1_parent_3_0 + expected = [e_1_parent_1_3_0, e_1_parent_1_1_3_0, e_1_parent_1_1_1_3_0] + res = _extop_test_slapi_member(server=topo.standalone, dn=e_1_parent_3_0, relation="manager") + _check_res_vs_expected("organisation reporting to e_1_parent_3_0", res, expected) + + # Check e_1_parent_1_3_0 + expected = [e_1_parent_1_1_3_0, e_1_parent_1_1_1_3_0] + res = _extop_test_slapi_member(server=topo.standalone, dn=e_1_parent_1_3_0, relation="manager") + _check_res_vs_expected("organisation reporting to e_1_parent_1_3_0", res, expected) + + # Check e_1_parent_1_1_3_0 + expected = [e_1_parent_1_1_1_3_0] + res = _extop_test_slapi_member(server=topo.standalone, dn=e_1_parent_1_1_3_0, relation="manager") + _check_res_vs_expected("organisation reporting to e_1_parent_1_1_3_0", res, expected) + + # Check e_1_parent_1_1_1_3_0 + expected = [EMPTY_RESULT] + res = _extop_test_slapi_member(server=topo.standalone, dn=e_1_parent_1_1_1_3_0, relation="manager") + _check_res_vs_expected("organisation reporting to e_1_parent_1_1_1_3_0", res, expected) + + def fin(): + entries = [e_1_parent_0, e_1_parent_1_0, e_1_parent_1_1_0, e_1_parent_1_1_1_0, e_2_parent_1_1_1_0, e_3_parent_1_1_1_0, e_4_parent_1_1_1_0, e_5_parent_1_1_1_0, e_2_parent_1_1_0, e_2_parent_1_0, e_1_parent_2_1_0, e_2_parent_2_1_0, e_1_parent_2_2_1_0, e_3_parent_2_1_0, e_4_parent_2_1_0, e_2_parent_0, e_1_parent_2_0, e_2_parent_2_0, e_3_parent_2_0, e_4_parent_2_0, e_3_parent_0, e_1_parent_3_0, e_1_parent_1_3_0, e_1_parent_1_1_3_0, e_1_parent_1_1_1_3_0] + for entry in entries: + topo.standalone.delete_s(entry) + topo.standalone.delete_s(dn_config) + + request.addfinalizer(fin) + + +def test_slapi_memberof_reuse_if_possible_10(topo, request, install_test_plugin): + """ + Test that management hierarchy (manager) is computed with slapi_memberof + It requires slapi_memberof to reuse IF POSSIBLE the computed values + from memberof plugins. + Memberof plugin is enabled, but with a different 'slapimemberOfEntryScopeExcludeSubtree' attr + it falls back to regular computation (recompute) + with following parameters + - member attribute: memberof + - membership attribute: 'manager' + - span over all backends: 'off' + - skip nesting membership: 'off' + - computation mode: MEMBEROF_REUSE_IF_POSSIBLE <-- + - Scope: DEFAULT_SUFFIX + - ExcludeScope: ou=foo,dc=example,dc=com <-- + - Maximum return entries: None + + :id: 6c26619d-e0e2-4b3e-938b-4cab817e928f + :setup: Standalone instance + :steps: + 1. Configure memberof with 'memberOfEntryScopeExcludeSubtree: ou=groups,dc=example,dc=com' <-- + 2. provision a set of entry + 3. configure test_slapi_memberof as described above + 4. check computed membership vs expected result + :expectedresults: + 1. Operation should succeed + 2. Operation should succeed + 3. Operation should succeed + 4. Operation should succeed + + DIT is : + e_1_parent_0 + - e_1_parent_1_0 + -- e_1_parent_1_1_0 + --- e_1_parent_1_1_1_0 + --- e_2_parent_1_1_1_0 + --- e_3_parent_1_1_1_0 + --- e_4_parent_1_1_1_0 + --- e_5_parent_1_1_1_0 + -- e_2_parent_1_1_0 + - e_2_parent_1_0 + -- e_1_parent_2_1_0 + -- e_2_parent_2_1_0 + --- e_1_parent_2_2_1_0 + -- e_3_parent_2_1_0 + -- e_4_parent_2_1_0 + e_2_parent_0 + - e_1_parent_2_0 + - e_2_parent_2_0 + - e_3_parent_2_0 + - e_4_parent_2_0 + e_3_parent_0 + - e_1_parent_3_0 + -- e_1_parent_1_3_0 + --- e_1_parent_1_1_3_0 + ---- e_1_parent_1_1_1_3_0 + """ + memberof = MemberOfPlugin(topo.standalone) + memberof.enable() + memberof.replace('memberOfGroupAttr', 'manager') + memberof.replace('memberOfAllBackends', 'off') + memberof.replace('memberOfSkipNested', 'off') + memberof.replace('memberOfEntryScope', DEFAULT_SUFFIX) + memberof.replace('memberOfEntryScopeExcludeSubtree', 'ou=groups,dc=example,dc=com') + topo.standalone.restart() + + user = UserAccounts(topo.standalone, DEFAULT_SUFFIX) + + # First subtree + e_1_parent_0 = add_entry(topo.standalone, uid="e_1_parent_0") + + e_1_parent_1_0 = add_entry(topo.standalone, uid="e_1_parent_1_0", manager=[ensure_bytes(e_1_parent_0)]) + + e_1_parent_1_1_0 = add_entry(topo.standalone, uid="e_1_parent_1_1_0", manager=[ensure_bytes(e_1_parent_1_0)]) + + e_1_parent_1_1_1_0 = add_entry(topo.standalone, uid="e_1_parent_1_1_1_0", manager=[ensure_bytes(e_1_parent_1_1_0)]) + e_2_parent_1_1_1_0 = add_entry(topo.standalone, uid="e_2_parent_1_1_1_0", manager=[ensure_bytes(e_1_parent_1_1_0)]) + e_3_parent_1_1_1_0 = add_entry(topo.standalone, uid="e_3_parent_1_1_1_0", manager=[ensure_bytes(e_1_parent_1_1_0)]) + e_4_parent_1_1_1_0 = add_entry(topo.standalone, uid="e_4_parent_1_1_1_0", manager=[ensure_bytes(e_1_parent_1_1_0)]) + e_5_parent_1_1_1_0 = add_entry(topo.standalone, uid="e_5_parent_1_1_1_0", manager=[ensure_bytes(e_1_parent_1_1_0)]) + + e_2_parent_1_1_0 = add_entry(topo.standalone, uid="e_2_parent_1_1_0", manager=[ensure_bytes(e_1_parent_1_0)]) + + e_2_parent_1_0 = add_entry(topo.standalone, uid="e_2_parent_1_0", manager=[ensure_bytes(e_1_parent_0)]) + + e_1_parent_2_1_0 = add_entry(topo.standalone, uid="e_1_parent_2_1_0", manager=[ensure_bytes(e_2_parent_1_0)]) + e_2_parent_2_1_0 = add_entry(topo.standalone, uid="e_2_parent_2_1_0", manager=[ensure_bytes(e_2_parent_1_0)]) + e_1_parent_2_2_1_0 = add_entry(topo.standalone, uid="e_1_parent_2_2_1_0", manager=[ensure_bytes(e_2_parent_2_1_0)]) + e_3_parent_2_1_0 = add_entry(topo.standalone, uid="e_3_parent_2_1_0", manager=[ensure_bytes(e_2_parent_1_0)]) + e_4_parent_2_1_0 = add_entry(topo.standalone, uid="e_4_parent_2_1_0", manager=[ensure_bytes(e_2_parent_1_0)]) + + # 2nd subtree + e_2_parent_0 = add_entry(topo.standalone, uid="e_2_parent_0") + + e_1_parent_2_0 = add_entry(topo.standalone, uid="e_1_parent_2_0", manager=[ensure_bytes(e_2_parent_0)]) + e_2_parent_2_0 = add_entry(topo.standalone, uid="e_2_parent_2_0", manager=[ensure_bytes(e_2_parent_0)]) + e_3_parent_2_0 = add_entry(topo.standalone, uid="e_3_parent_2_0", manager=[ensure_bytes(e_2_parent_0)]) + e_4_parent_2_0 = add_entry(topo.standalone, uid="e_4_parent_2_0", manager=[ensure_bytes(e_2_parent_0)]) + + # third subtree + e_3_parent_0 = add_entry(topo.standalone, uid="e_3_parent_0") + + e_1_parent_3_0 = add_entry(topo.standalone, uid="e_1_parent_3_0", manager=[ensure_bytes(e_3_parent_0)]) + + e_1_parent_1_3_0 = add_entry(topo.standalone, uid="e_1_parent_1_3_0", manager=[ensure_bytes(e_1_parent_3_0)]) + + e_1_parent_1_1_3_0 = add_entry(topo.standalone, uid="e_1_parent_1_1_3_0", manager=[ensure_bytes(e_1_parent_1_3_0)]) + + e_1_parent_1_1_1_3_0 = add_entry(topo.standalone, uid="e_1_parent_1_1_1_3_0", manager=[ensure_bytes(e_1_parent_1_1_3_0)]) + + dn_config = 'cn=test_slapi_memberof,cn=plugins,cn=config' + topo.standalone.add_s(Entry((dn_config, {'objectclass': 'top nsSlapdPlugin extensibleObject'.split(), + 'cn': 'test_slapi_memberof', + 'nsslapd-pluginPath': 'libtest_slapi_memberof-plugin', + 'nsslapd-pluginInitfunc': 'test_slapi_memberof_init', + 'nsslapd-pluginType': 'extendedop', + 'nsslapd-pluginEnabled': 'on', + 'nsslapd-plugin-depends-on-type': 'database', + 'nsslapd-pluginId': 'test_slapi_memberof-plugin', + 'slapimemberOfMemberDN': 'uid=test_user_11,ou=People,dc=example,dc=com', + 'slapimemberOfGroupAttr': 'manager', + 'slapimemberOfAttr': 'memberof', + 'slapimemberOfFlag': 'MEMBEROF_REUSE_IF_POSSIBLE', + 'slapimemberOfAllBackends': 'off', + 'slapimemberOfSkipNested': 'off', + 'slapimemberOfEntryScope': DEFAULT_SUFFIX, + 'slapimemberOfEntryScopeExcludeSubtree': "ou=Foo,%s" % DEFAULT_SUFFIX, + 'slapimemberOfMaxGroup': '0', + 'nsslapd-pluginVersion': '2.3.2.202302131418git0e190fc3d', + 'nsslapd-pluginVendor': '389 Project', + 'nsslapd-pluginDescription': 'test_slapi_memberof extended operation plugin'}))) + topo.standalone.restart() + + # Check the first subtree + expected = [ e_1_parent_1_0, e_1_parent_1_1_0, e_1_parent_1_1_1_0, e_2_parent_1_1_1_0, e_3_parent_1_1_1_0, e_4_parent_1_1_1_0, e_5_parent_1_1_1_0, e_2_parent_1_1_0, e_2_parent_1_0, e_1_parent_2_1_0, e_2_parent_2_1_0, e_1_parent_2_2_1_0, e_3_parent_2_1_0, e_4_parent_2_1_0] + res = _extop_test_slapi_member(server=topo.standalone, dn=e_1_parent_0, relation="manager") + _check_res_vs_expected("first subtree", res, expected) + + # Check the second subtree + expected = [e_1_parent_2_0, e_2_parent_2_0, e_3_parent_2_0, e_4_parent_2_0] + res = _extop_test_slapi_member(server=topo.standalone, dn=e_2_parent_0, relation="manager") + _check_res_vs_expected("second subtree", res, expected) + + # Check the third subtree + expected = [e_1_parent_3_0, e_1_parent_1_3_0, e_1_parent_1_1_3_0, e_1_parent_1_1_1_3_0] + res = _extop_test_slapi_member(server=topo.standalone, dn=e_3_parent_0, relation="manager") + _check_res_vs_expected("third subtree", res, expected) + + # check e_1_parent_1_0 + expected = [e_1_parent_1_1_0, e_1_parent_1_1_1_0, e_2_parent_1_1_1_0, e_3_parent_1_1_1_0, e_4_parent_1_1_1_0, e_5_parent_1_1_1_0, e_2_parent_1_1_0] + res = _extop_test_slapi_member(server=topo.standalone, dn=e_1_parent_1_0, relation="manager") + _check_res_vs_expected("organisation reporting to e_1_parent_1_0", res, expected) + + # check e_1_parent_1_1_0 + expected = [e_1_parent_1_1_1_0, e_2_parent_1_1_1_0, e_3_parent_1_1_1_0, e_4_parent_1_1_1_0, e_5_parent_1_1_1_0] + res = _extop_test_slapi_member(server=topo.standalone, dn=e_1_parent_1_1_0, relation="manager") + _check_res_vs_expected("organisation reporting to e_1_parent_1_1_0", res, expected) + + # check e_2_parent_1_1_0 + expected = [EMPTY_RESULT] + res = _extop_test_slapi_member(server=topo.standalone, dn=e_2_parent_1_1_0, relation="manager") + _check_res_vs_expected("organisation reporting to e_2_parent_1_1_0", res, expected) + + # check e_2_parent_1_0 + expected = [e_1_parent_2_1_0, e_2_parent_2_1_0, e_1_parent_2_2_1_0, e_3_parent_2_1_0, e_4_parent_2_1_0] + res = _extop_test_slapi_member(server=topo.standalone, dn=e_2_parent_1_0, relation="manager") + _check_res_vs_expected("organisation reporting to e_2_parent_1_0", res, expected) + + # check e_2_parent_2_1_0 + expected = [e_1_parent_2_2_1_0] + res = _extop_test_slapi_member(server=topo.standalone, dn=e_2_parent_2_1_0, relation="manager") + _check_res_vs_expected("organisation reporting to e_2_parent_2_1_0", res, expected) + + # Check e_1_parent_3_0 + expected = [e_1_parent_1_3_0, e_1_parent_1_1_3_0, e_1_parent_1_1_1_3_0] + res = _extop_test_slapi_member(server=topo.standalone, dn=e_1_parent_3_0, relation="manager") + _check_res_vs_expected("organisation reporting to e_1_parent_3_0", res, expected) + + # Check e_1_parent_1_3_0 + expected = [e_1_parent_1_1_3_0, e_1_parent_1_1_1_3_0] + res = _extop_test_slapi_member(server=topo.standalone, dn=e_1_parent_1_3_0, relation="manager") + _check_res_vs_expected("organisation reporting to e_1_parent_1_3_0", res, expected) + + # Check e_1_parent_1_1_3_0 + expected = [e_1_parent_1_1_1_3_0] + res = _extop_test_slapi_member(server=topo.standalone, dn=e_1_parent_1_1_3_0, relation="manager") + _check_res_vs_expected("organisation reporting to e_1_parent_1_1_3_0", res, expected) + + # Check e_1_parent_1_1_1_3_0 + expected = [EMPTY_RESULT] + res = _extop_test_slapi_member(server=topo.standalone, dn=e_1_parent_1_1_1_3_0, relation="manager") + _check_res_vs_expected("organisation reporting to e_1_parent_1_1_1_3_0", res, expected) + + def fin(): + entries = [e_1_parent_0, e_1_parent_1_0, e_1_parent_1_1_0, e_1_parent_1_1_1_0, e_2_parent_1_1_1_0, e_3_parent_1_1_1_0, e_4_parent_1_1_1_0, e_5_parent_1_1_1_0, e_2_parent_1_1_0, e_2_parent_1_0, e_1_parent_2_1_0, e_2_parent_2_1_0, e_1_parent_2_2_1_0, e_3_parent_2_1_0, e_4_parent_2_1_0, e_2_parent_0, e_1_parent_2_0, e_2_parent_2_0, e_3_parent_2_0, e_4_parent_2_0, e_3_parent_0, e_1_parent_3_0, e_1_parent_1_3_0, e_1_parent_1_1_3_0, e_1_parent_1_1_1_3_0] + for entry in entries: + topo.standalone.delete_s(entry) + topo.standalone.delete_s(dn_config) + + request.addfinalizer(fin) + +def test_slapi_memberof_reuse_if_possible_11(topo, request, install_test_plugin): + """ + Test that management hierarchy (manager) is computed with slapi_memberof + It requires slapi_memberof to reuse IF POSSIBLE the computed values + from memberof plugins. + Memberof plugin is enabled, but with a different 'slapimemberOfEntryScope' attr + it falls back to regular computation (recompute) + with following parameters + - member attribute: memberof + - membership attribute: 'manager' + - span over all backends: 'off' + - skip nesting membership: 'off' + - computation mode: MEMBEROF_REUSE_IF_POSSIBLE <-- + - Scope: None + - ExcludeScope: ou=foo1,dc=example,dc=com <-- + - Maximum return entries: None + + :id: 4c15995e-21a9-4443-8027-a0908345be50 + :setup: Standalone instance + :steps: + 1. Configure memberof with + - 'memberOfEntryScopeExcludeSubtree: ou=foo1,dc=example,dc=com' <-- + - 'memberOfEntryScopeExcludeSubtree: ou=foo2,dc=example,dc=com' <-- + 2. provision a set of entry + 3. configure test_slapi_memberof as described above + 4. check computed membership vs expected result + :expectedresults: + 1. Operation should succeed + 2. Operation should succeed + 3. Operation should succeed + 4. Operation should succeed + + DIT is : + e_1_parent_0 + - e_1_parent_1_0 + -- e_1_parent_1_1_0 + --- e_1_parent_1_1_1_0 + --- e_2_parent_1_1_1_0 + --- e_3_parent_1_1_1_0 + --- e_4_parent_1_1_1_0 + --- e_5_parent_1_1_1_0 + -- e_2_parent_1_1_0 + - e_2_parent_1_0 + -- e_1_parent_2_1_0 + -- e_2_parent_2_1_0 + --- e_1_parent_2_2_1_0 + -- e_3_parent_2_1_0 + -- e_4_parent_2_1_0 + e_2_parent_0 + - e_1_parent_2_0 + - e_2_parent_2_0 + - e_3_parent_2_0 + - e_4_parent_2_0 + e_3_parent_0 + - e_1_parent_3_0 + -- e_1_parent_1_3_0 + --- e_1_parent_1_1_3_0 + ---- e_1_parent_1_1_1_3_0 + """ + memberof = MemberOfPlugin(topo.standalone) + memberof.enable() + memberof.replace('memberOfGroupAttr', 'manager') + memberof.replace('memberOfAllBackends', 'off') + memberof.replace('memberOfSkipNested', 'off') + memberof.replace('memberOfEntryScope', DEFAULT_SUFFIX) + memberof.replace('memberOfEntryScopeExcludeSubtree', 'ou=foo1,dc=example,dc=com') + memberof.add('memberOfEntryScopeExcludeSubtree', 'ou=foo2,dc=example,dc=com') + topo.standalone.restart() + + user = UserAccounts(topo.standalone, DEFAULT_SUFFIX) + + # First subtree + e_1_parent_0 = add_entry(topo.standalone, uid="e_1_parent_0") + + e_1_parent_1_0 = add_entry(topo.standalone, uid="e_1_parent_1_0", manager=[ensure_bytes(e_1_parent_0)]) + + e_1_parent_1_1_0 = add_entry(topo.standalone, uid="e_1_parent_1_1_0", manager=[ensure_bytes(e_1_parent_1_0)]) + + e_1_parent_1_1_1_0 = add_entry(topo.standalone, uid="e_1_parent_1_1_1_0", manager=[ensure_bytes(e_1_parent_1_1_0)]) + e_2_parent_1_1_1_0 = add_entry(topo.standalone, uid="e_2_parent_1_1_1_0", manager=[ensure_bytes(e_1_parent_1_1_0)]) + e_3_parent_1_1_1_0 = add_entry(topo.standalone, uid="e_3_parent_1_1_1_0", manager=[ensure_bytes(e_1_parent_1_1_0)]) + e_4_parent_1_1_1_0 = add_entry(topo.standalone, uid="e_4_parent_1_1_1_0", manager=[ensure_bytes(e_1_parent_1_1_0)]) + e_5_parent_1_1_1_0 = add_entry(topo.standalone, uid="e_5_parent_1_1_1_0", manager=[ensure_bytes(e_1_parent_1_1_0)]) + + e_2_parent_1_1_0 = add_entry(topo.standalone, uid="e_2_parent_1_1_0", manager=[ensure_bytes(e_1_parent_1_0)]) + + e_2_parent_1_0 = add_entry(topo.standalone, uid="e_2_parent_1_0", manager=[ensure_bytes(e_1_parent_0)]) + + e_1_parent_2_1_0 = add_entry(topo.standalone, uid="e_1_parent_2_1_0", manager=[ensure_bytes(e_2_parent_1_0)]) + e_2_parent_2_1_0 = add_entry(topo.standalone, uid="e_2_parent_2_1_0", manager=[ensure_bytes(e_2_parent_1_0)]) + e_1_parent_2_2_1_0 = add_entry(topo.standalone, uid="e_1_parent_2_2_1_0", manager=[ensure_bytes(e_2_parent_2_1_0)]) + e_3_parent_2_1_0 = add_entry(topo.standalone, uid="e_3_parent_2_1_0", manager=[ensure_bytes(e_2_parent_1_0)]) + e_4_parent_2_1_0 = add_entry(topo.standalone, uid="e_4_parent_2_1_0", manager=[ensure_bytes(e_2_parent_1_0)]) + + # 2nd subtree + e_2_parent_0 = add_entry(topo.standalone, uid="e_2_parent_0") + + e_1_parent_2_0 = add_entry(topo.standalone, uid="e_1_parent_2_0", manager=[ensure_bytes(e_2_parent_0)]) + e_2_parent_2_0 = add_entry(topo.standalone, uid="e_2_parent_2_0", manager=[ensure_bytes(e_2_parent_0)]) + e_3_parent_2_0 = add_entry(topo.standalone, uid="e_3_parent_2_0", manager=[ensure_bytes(e_2_parent_0)]) + e_4_parent_2_0 = add_entry(topo.standalone, uid="e_4_parent_2_0", manager=[ensure_bytes(e_2_parent_0)]) + + # third subtree + e_3_parent_0 = add_entry(topo.standalone, uid="e_3_parent_0") + + e_1_parent_3_0 = add_entry(topo.standalone, uid="e_1_parent_3_0", manager=[ensure_bytes(e_3_parent_0)]) + + e_1_parent_1_3_0 = add_entry(topo.standalone, uid="e_1_parent_1_3_0", manager=[ensure_bytes(e_1_parent_3_0)]) + + e_1_parent_1_1_3_0 = add_entry(topo.standalone, uid="e_1_parent_1_1_3_0", manager=[ensure_bytes(e_1_parent_1_3_0)]) + + e_1_parent_1_1_1_3_0 = add_entry(topo.standalone, uid="e_1_parent_1_1_1_3_0", manager=[ensure_bytes(e_1_parent_1_1_3_0)]) + + dn_config = 'cn=test_slapi_memberof,cn=plugins,cn=config' + topo.standalone.add_s(Entry((dn_config, {'objectclass': 'top nsSlapdPlugin extensibleObject'.split(), + 'cn': 'test_slapi_memberof', + 'nsslapd-pluginPath': 'libtest_slapi_memberof-plugin', + 'nsslapd-pluginInitfunc': 'test_slapi_memberof_init', + 'nsslapd-pluginType': 'extendedop', + 'nsslapd-pluginEnabled': 'on', + 'nsslapd-plugin-depends-on-type': 'database', + 'nsslapd-pluginId': 'test_slapi_memberof-plugin', + 'slapimemberOfMemberDN': 'uid=test_user_11,ou=People,dc=example,dc=com', + 'slapimemberOfGroupAttr': 'manager', + 'slapimemberOfAttr': 'memberof', + 'slapimemberOfFlag': 'MEMBEROF_REUSE_IF_POSSIBLE', + 'slapimemberOfAllBackends': 'off', + 'slapimemberOfSkipNested': 'off', + 'slapimemberOfEntryScope': DEFAULT_SUFFIX, + 'slapimemberOfEntryScopeExcludeSubtree': "ou=foo1,%s" % DEFAULT_SUFFIX, + 'slapimemberOfMaxGroup': '0', + 'nsslapd-pluginVersion': '2.3.2.202302131418git0e190fc3d', + 'nsslapd-pluginVendor': '389 Project', + 'nsslapd-pluginDescription': 'test_slapi_memberof extended operation plugin'}))) + topo.standalone.restart() + + # Check the first subtree + expected = [ e_1_parent_1_0, e_1_parent_1_1_0, e_1_parent_1_1_1_0, e_2_parent_1_1_1_0, e_3_parent_1_1_1_0, e_4_parent_1_1_1_0, e_5_parent_1_1_1_0, e_2_parent_1_1_0, e_2_parent_1_0, e_1_parent_2_1_0, e_2_parent_2_1_0, e_1_parent_2_2_1_0, e_3_parent_2_1_0, e_4_parent_2_1_0] + res = _extop_test_slapi_member(server=topo.standalone, dn=e_1_parent_0, relation="manager") + _check_res_vs_expected("first subtree", res, expected) + + # Check the second subtree + expected = [e_1_parent_2_0, e_2_parent_2_0, e_3_parent_2_0, e_4_parent_2_0] + res = _extop_test_slapi_member(server=topo.standalone, dn=e_2_parent_0, relation="manager") + _check_res_vs_expected("second subtree", res, expected) + + # Check the third subtree + expected = [e_1_parent_3_0, e_1_parent_1_3_0, e_1_parent_1_1_3_0, e_1_parent_1_1_1_3_0] + res = _extop_test_slapi_member(server=topo.standalone, dn=e_3_parent_0, relation="manager") + _check_res_vs_expected("third subtree", res, expected) + + # check e_1_parent_1_0 + expected = [e_1_parent_1_1_0, e_1_parent_1_1_1_0, e_2_parent_1_1_1_0, e_3_parent_1_1_1_0, e_4_parent_1_1_1_0, e_5_parent_1_1_1_0, e_2_parent_1_1_0] + res = _extop_test_slapi_member(server=topo.standalone, dn=e_1_parent_1_0, relation="manager") + _check_res_vs_expected("organisation reporting to e_1_parent_1_0", res, expected) + + # check e_1_parent_1_1_0 + expected = [e_1_parent_1_1_1_0, e_2_parent_1_1_1_0, e_3_parent_1_1_1_0, e_4_parent_1_1_1_0, e_5_parent_1_1_1_0] + res = _extop_test_slapi_member(server=topo.standalone, dn=e_1_parent_1_1_0, relation="manager") + _check_res_vs_expected("organisation reporting to e_1_parent_1_1_0", res, expected) + + # check e_2_parent_1_1_0 + expected = [EMPTY_RESULT] + res = _extop_test_slapi_member(server=topo.standalone, dn=e_2_parent_1_1_0, relation="manager") + _check_res_vs_expected("organisation reporting to e_2_parent_1_1_0", res, expected) + + # check e_2_parent_1_0 + expected = [e_1_parent_2_1_0, e_2_parent_2_1_0, e_1_parent_2_2_1_0, e_3_parent_2_1_0, e_4_parent_2_1_0] + res = _extop_test_slapi_member(server=topo.standalone, dn=e_2_parent_1_0, relation="manager") + _check_res_vs_expected("organisation reporting to e_2_parent_1_0", res, expected) + + # check e_2_parent_2_1_0 + expected = [e_1_parent_2_2_1_0] + res = _extop_test_slapi_member(server=topo.standalone, dn=e_2_parent_2_1_0, relation="manager") + _check_res_vs_expected("organisation reporting to e_2_parent_2_1_0", res, expected) + + # Check e_1_parent_3_0 + expected = [e_1_parent_1_3_0, e_1_parent_1_1_3_0, e_1_parent_1_1_1_3_0] + res = _extop_test_slapi_member(server=topo.standalone, dn=e_1_parent_3_0, relation="manager") + _check_res_vs_expected("organisation reporting to e_1_parent_3_0", res, expected) + + # Check e_1_parent_1_3_0 + expected = [e_1_parent_1_1_3_0, e_1_parent_1_1_1_3_0] + res = _extop_test_slapi_member(server=topo.standalone, dn=e_1_parent_1_3_0, relation="manager") + _check_res_vs_expected("organisation reporting to e_1_parent_1_3_0", res, expected) + + # Check e_1_parent_1_1_3_0 + expected = [e_1_parent_1_1_1_3_0] + res = _extop_test_slapi_member(server=topo.standalone, dn=e_1_parent_1_1_3_0, relation="manager") + _check_res_vs_expected("organisation reporting to e_1_parent_1_1_3_0", res, expected) + + # Check e_1_parent_1_1_1_3_0 + expected = [EMPTY_RESULT] + res = _extop_test_slapi_member(server=topo.standalone, dn=e_1_parent_1_1_1_3_0, relation="manager") + _check_res_vs_expected("organisation reporting to e_1_parent_1_1_1_3_0", res, expected) + + def fin(): + entries = [e_1_parent_0, e_1_parent_1_0, e_1_parent_1_1_0, e_1_parent_1_1_1_0, e_2_parent_1_1_1_0, e_3_parent_1_1_1_0, e_4_parent_1_1_1_0, e_5_parent_1_1_1_0, e_2_parent_1_1_0, e_2_parent_1_0, e_1_parent_2_1_0, e_2_parent_2_1_0, e_1_parent_2_2_1_0, e_3_parent_2_1_0, e_4_parent_2_1_0, e_2_parent_0, e_1_parent_2_0, e_2_parent_2_0, e_3_parent_2_0, e_4_parent_2_0, e_3_parent_0, e_1_parent_3_0, e_1_parent_1_3_0, e_1_parent_1_1_3_0, e_1_parent_1_1_1_3_0] + for entry in entries: + topo.standalone.delete_s(entry) + topo.standalone.delete_s(dn_config) + + request.addfinalizer(fin) + +def test_slapi_memberof_reuse_only_1(topo, request, install_test_plugin): + """ + Test that management hierarchy (manager) is computed with slapi_memberof + It requires slapi_memberof to ONLY reuse the computed values + from memberof plugins. As memberof plugin is not enabled, it returns + no memberof. + with following parameters + - membership attribute: 'manager' + - span over all backends: 'on' + - skip nesting membership: 'off' + - computation mode: MEMBEROF_REUSE_ONLY <-- + - Scope: DEFAULT_SUFFIX + - ExcludeScope: None + - Maximum return entries: None + + :id: 7be9b188-2e84-4454-b6db-9e176014582a + :setup: Standalone instance + :steps: + 1. provision a set of entry + 2. configure test_slapi_memberof as described above + 3. check computed membership vs expected result + :expectedresults: + 1. Operation should succeed + 2. Operation should succeed + 3. Operation should succeed + + DIT is : + e_1_parent_0 + - e_1_parent_1_0 + -- e_1_parent_1_1_0 + --- e_1_parent_1_1_1_0 + --- e_2_parent_1_1_1_0 + --- e_3_parent_1_1_1_0 + --- e_4_parent_1_1_1_0 + --- e_5_parent_1_1_1_0 + -- e_2_parent_1_1_0 + - e_2_parent_1_0 + -- e_1_parent_2_1_0 + -- e_2_parent_2_1_0 + --- e_1_parent_2_2_1_0 + -- e_3_parent_2_1_0 + -- e_4_parent_2_1_0 + e_2_parent_0 + - e_1_parent_2_0 + - e_2_parent_2_0 + - e_3_parent_2_0 + - e_4_parent_2_0 + e_3_parent_0 + - e_1_parent_3_0 + -- e_1_parent_1_3_0 + --- e_1_parent_1_1_3_0 + ---- e_1_parent_1_1_1_3_0 + """ + user = UserAccounts(topo.standalone, DEFAULT_SUFFIX) + + # First subtree + e_1_parent_0 = add_entry(topo.standalone, uid="e_1_parent_0") + + e_1_parent_1_0 = add_entry(topo.standalone, uid="e_1_parent_1_0", manager=[ensure_bytes(e_1_parent_0)]) + + e_1_parent_1_1_0 = add_entry(topo.standalone, uid="e_1_parent_1_1_0", manager=[ensure_bytes(e_1_parent_1_0)]) + + e_1_parent_1_1_1_0 = add_entry(topo.standalone, uid="e_1_parent_1_1_1_0", manager=[ensure_bytes(e_1_parent_1_1_0)]) + e_2_parent_1_1_1_0 = add_entry(topo.standalone, uid="e_2_parent_1_1_1_0", manager=[ensure_bytes(e_1_parent_1_1_0)]) + e_3_parent_1_1_1_0 = add_entry(topo.standalone, uid="e_3_parent_1_1_1_0", manager=[ensure_bytes(e_1_parent_1_1_0)]) + e_4_parent_1_1_1_0 = add_entry(topo.standalone, uid="e_4_parent_1_1_1_0", manager=[ensure_bytes(e_1_parent_1_1_0)]) + e_5_parent_1_1_1_0 = add_entry(topo.standalone, uid="e_5_parent_1_1_1_0", manager=[ensure_bytes(e_1_parent_1_1_0)]) + + e_2_parent_1_1_0 = add_entry(topo.standalone, uid="e_2_parent_1_1_0", manager=[ensure_bytes(e_1_parent_1_0)]) + + e_2_parent_1_0 = add_entry(topo.standalone, uid="e_2_parent_1_0", manager=[ensure_bytes(e_1_parent_0)]) + + e_1_parent_2_1_0 = add_entry(topo.standalone, uid="e_1_parent_2_1_0", manager=[ensure_bytes(e_2_parent_1_0)]) + e_2_parent_2_1_0 = add_entry(topo.standalone, uid="e_2_parent_2_1_0", manager=[ensure_bytes(e_2_parent_1_0)]) + e_1_parent_2_2_1_0 = add_entry(topo.standalone, uid="e_1_parent_2_2_1_0", manager=[ensure_bytes(e_2_parent_2_1_0)]) + e_3_parent_2_1_0 = add_entry(topo.standalone, uid="e_3_parent_2_1_0", manager=[ensure_bytes(e_2_parent_1_0)]) + e_4_parent_2_1_0 = add_entry(topo.standalone, uid="e_4_parent_2_1_0", manager=[ensure_bytes(e_2_parent_1_0)]) + + # 2nd subtree + e_2_parent_0 = add_entry(topo.standalone, uid="e_2_parent_0") + + e_1_parent_2_0 = add_entry(topo.standalone, uid="e_1_parent_2_0", manager=[ensure_bytes(e_2_parent_0)]) + e_2_parent_2_0 = add_entry(topo.standalone, uid="e_2_parent_2_0", manager=[ensure_bytes(e_2_parent_0)]) + e_3_parent_2_0 = add_entry(topo.standalone, uid="e_3_parent_2_0", manager=[ensure_bytes(e_2_parent_0)]) + e_4_parent_2_0 = add_entry(topo.standalone, uid="e_4_parent_2_0", manager=[ensure_bytes(e_2_parent_0)]) + + # third subtree + e_3_parent_0 = add_entry(topo.standalone, uid="e_3_parent_0") + + e_1_parent_3_0 = add_entry(topo.standalone, uid="e_1_parent_3_0", manager=[ensure_bytes(e_3_parent_0)]) + + e_1_parent_1_3_0 = add_entry(topo.standalone, uid="e_1_parent_1_3_0", manager=[ensure_bytes(e_1_parent_3_0)]) + + e_1_parent_1_1_3_0 = add_entry(topo.standalone, uid="e_1_parent_1_1_3_0", manager=[ensure_bytes(e_1_parent_1_3_0)]) + + e_1_parent_1_1_1_3_0 = add_entry(topo.standalone, uid="e_1_parent_1_1_1_3_0", manager=[ensure_bytes(e_1_parent_1_1_3_0)]) + + dn_config = 'cn=test_slapi_memberof,cn=plugins,cn=config' + topo.standalone.add_s(Entry((dn_config, {'objectclass': 'top nsSlapdPlugin extensibleObject'.split(), + 'cn': 'test_slapi_memberof', + 'nsslapd-pluginPath': 'libtest_slapi_memberof-plugin', + 'nsslapd-pluginInitfunc': 'test_slapi_memberof_init', + 'nsslapd-pluginType': 'extendedop', + 'nsslapd-pluginEnabled': 'on', + 'nsslapd-plugin-depends-on-type': 'database', + 'nsslapd-pluginId': 'test_slapi_memberof-plugin', + 'slapimemberOfMemberDN': 'uid=test_user_11,ou=People,dc=example,dc=com', + 'slapimemberOfGroupAttr': 'manager', + 'slapimemberOfAttr': 'memberof', + 'slapimemberOfFlag': 'MEMBEROF_REUSE_ONLY', + 'slapimemberOfAllBackends': 'off', + 'slapimemberOfSkipNested': 'off', + 'slapimemberOfEntryScope': DEFAULT_SUFFIX, + 'slapimemberOfMaxGroup': '0', + 'nsslapd-pluginVersion': '2.3.2.202302131418git0e190fc3d', + 'nsslapd-pluginVendor': '389 Project', + 'nsslapd-pluginDescription': 'test_slapi_memberof extended operation plugin'}))) + topo.standalone.restart() + + # Check the first subtree + expected = [EMPTY_RESULT] + res = _extop_test_slapi_member(server=topo.standalone, dn=e_1_parent_0, relation="manager") + _check_res_vs_expected("first subtree", res, expected) + + # Check the second subtree + expected = [EMPTY_RESULT] + res = _extop_test_slapi_member(server=topo.standalone, dn=e_2_parent_0, relation="manager") + _check_res_vs_expected("second subtree", res, expected) + + # Check the third subtree + expected = [EMPTY_RESULT] + res = _extop_test_slapi_member(server=topo.standalone, dn=e_3_parent_0, relation="manager") + _check_res_vs_expected("third subtree", res, expected) + + # check e_1_parent_1_0 + expected = [EMPTY_RESULT] + res = _extop_test_slapi_member(server=topo.standalone, dn=e_1_parent_1_0, relation="manager") + _check_res_vs_expected("organisation reporting to e_1_parent_1_0", res, expected) + + # check e_1_parent_1_1_0 + expected = [EMPTY_RESULT] + res = _extop_test_slapi_member(server=topo.standalone, dn=e_1_parent_1_1_0, relation="manager") + _check_res_vs_expected("organisation reporting to e_1_parent_1_1_0", res, expected) + + # check e_2_parent_1_1_0 + expected = [EMPTY_RESULT] + res = _extop_test_slapi_member(server=topo.standalone, dn=e_2_parent_1_1_0, relation="manager") + _check_res_vs_expected("organisation reporting to e_2_parent_1_1_0", res, expected) + + # check e_2_parent_1_0 + expected = [EMPTY_RESULT] + res = _extop_test_slapi_member(server=topo.standalone, dn=e_2_parent_1_0, relation="manager") + _check_res_vs_expected("organisation reporting to e_2_parent_1_0", res, expected) + + # check e_2_parent_2_1_0 + expected = [EMPTY_RESULT] + res = _extop_test_slapi_member(server=topo.standalone, dn=e_2_parent_2_1_0, relation="manager") + _check_res_vs_expected("organisation reporting to e_2_parent_2_1_0", res, expected) + + # Check e_1_parent_3_0 + expected = [EMPTY_RESULT] + res = _extop_test_slapi_member(server=topo.standalone, dn=e_1_parent_3_0, relation="manager") + _check_res_vs_expected("organisation reporting to e_1_parent_3_0", res, expected) + + # Check e_1_parent_1_3_0 + expected = [EMPTY_RESULT] + res = _extop_test_slapi_member(server=topo.standalone, dn=e_1_parent_1_3_0, relation="manager") + _check_res_vs_expected("organisation reporting to e_1_parent_1_3_0", res, expected) + + # Check e_1_parent_1_1_3_0 + expected = [EMPTY_RESULT] + res = _extop_test_slapi_member(server=topo.standalone, dn=e_1_parent_1_1_3_0, relation="manager") + _check_res_vs_expected("organisation reporting to e_1_parent_1_1_3_0", res, expected) + + # Check e_1_parent_1_1_1_3_0 + expected = [EMPTY_RESULT] + res = _extop_test_slapi_member(server=topo.standalone, dn=e_1_parent_1_1_1_3_0, relation="manager") + _check_res_vs_expected("organisation reporting to e_1_parent_1_1_1_3_0", res, expected) + + def fin(): + entries = [e_1_parent_0, e_1_parent_1_0, e_1_parent_1_1_0, e_1_parent_1_1_1_0, e_2_parent_1_1_1_0, e_3_parent_1_1_1_0, e_4_parent_1_1_1_0, e_5_parent_1_1_1_0, e_2_parent_1_1_0, e_2_parent_1_0, e_1_parent_2_1_0, e_2_parent_2_1_0, e_1_parent_2_2_1_0, e_3_parent_2_1_0, e_4_parent_2_1_0, e_2_parent_0, e_1_parent_2_0, e_2_parent_2_0, e_3_parent_2_0, e_4_parent_2_0, e_3_parent_0, e_1_parent_3_0, e_1_parent_1_3_0, e_1_parent_1_1_3_0, e_1_parent_1_1_1_3_0] + for entry in entries: + topo.standalone.delete_s(entry) + topo.standalone.delete_s(dn_config) + + request.addfinalizer(fin) + +def test_slapi_memberof_reuse_only_2(topo, request, install_test_plugin): + """ + Test that management hierarchy (manager) is computed with slapi_memberof + It requires slapi_memberof to ONLY reuse the computed values + from memberof plugins. As memberof plugin is enabled, it returns + memberof. + with following parameters + - member attribute: memberof + - membership attribute: 'manager' + - span over all backends: 'off' + - skip nesting membership: 'off' + - computation mode: MEMBEROF_REUSE_IF_POSSIBLE <-- + - Scope: None + - ExcludeScope: ou=foo1,dc=example,dc=com <-- + - Maximum return entries: None + + :id: fb4f8c86-aa39-4252-90e0-36cfd7b3dd80 + :setup: Standalone instance + :steps: + 1. Configure memberof with + 2. provision a set of entry + 3. configure test_slapi_memberof as described above + 4. check computed membership vs expected result + :expectedresults: + 1. Operation should succeed + 2. Operation should succeed + 3. Operation should succeed + 4. Operation should succeed + + DIT is : + e_1_parent_0 + - e_1_parent_1_0 + -- e_1_parent_1_1_0 + --- e_1_parent_1_1_1_0 + --- e_2_parent_1_1_1_0 + --- e_3_parent_1_1_1_0 + --- e_4_parent_1_1_1_0 + --- e_5_parent_1_1_1_0 + -- e_2_parent_1_1_0 + - e_2_parent_1_0 + -- e_1_parent_2_1_0 + -- e_2_parent_2_1_0 + --- e_1_parent_2_2_1_0 + -- e_3_parent_2_1_0 + -- e_4_parent_2_1_0 + e_2_parent_0 + - e_1_parent_2_0 + - e_2_parent_2_0 + - e_3_parent_2_0 + - e_4_parent_2_0 + e_3_parent_0 + - e_1_parent_3_0 + -- e_1_parent_1_3_0 + --- e_1_parent_1_1_3_0 + ---- e_1_parent_1_1_1_3_0 + """ + memberof = MemberOfPlugin(topo.standalone) + memberof.enable() + memberof.replace('memberOfAttr', 'memberof') + memberof.replace('memberOfGroupAttr', 'manager') + memberof.replace('memberOfAllBackends', 'off') + memberof.replace('memberOfSkipNested', 'off') + memberof.replace('memberOfEntryScope', DEFAULT_SUFFIX) + topo.standalone.restart() + + user = UserAccounts(topo.standalone, DEFAULT_SUFFIX) + + # First subtree + e_1_parent_0 = add_entry(topo.standalone, uid="e_1_parent_0") + + e_1_parent_1_0 = add_entry(topo.standalone, uid="e_1_parent_1_0", manager=[ensure_bytes(e_1_parent_0)]) + + e_1_parent_1_1_0 = add_entry(topo.standalone, uid="e_1_parent_1_1_0", manager=[ensure_bytes(e_1_parent_1_0)]) + + e_1_parent_1_1_1_0 = add_entry(topo.standalone, uid="e_1_parent_1_1_1_0", manager=[ensure_bytes(e_1_parent_1_1_0)]) + e_2_parent_1_1_1_0 = add_entry(topo.standalone, uid="e_2_parent_1_1_1_0", manager=[ensure_bytes(e_1_parent_1_1_0)]) + e_3_parent_1_1_1_0 = add_entry(topo.standalone, uid="e_3_parent_1_1_1_0", manager=[ensure_bytes(e_1_parent_1_1_0)]) + e_4_parent_1_1_1_0 = add_entry(topo.standalone, uid="e_4_parent_1_1_1_0", manager=[ensure_bytes(e_1_parent_1_1_0)]) + e_5_parent_1_1_1_0 = add_entry(topo.standalone, uid="e_5_parent_1_1_1_0", manager=[ensure_bytes(e_1_parent_1_1_0)]) + + e_2_parent_1_1_0 = add_entry(topo.standalone, uid="e_2_parent_1_1_0", manager=[ensure_bytes(e_1_parent_1_0)]) + + e_2_parent_1_0 = add_entry(topo.standalone, uid="e_2_parent_1_0", manager=[ensure_bytes(e_1_parent_0)]) + + e_1_parent_2_1_0 = add_entry(topo.standalone, uid="e_1_parent_2_1_0", manager=[ensure_bytes(e_2_parent_1_0)]) + e_2_parent_2_1_0 = add_entry(topo.standalone, uid="e_2_parent_2_1_0", manager=[ensure_bytes(e_2_parent_1_0)]) + e_1_parent_2_2_1_0 = add_entry(topo.standalone, uid="e_1_parent_2_2_1_0", manager=[ensure_bytes(e_2_parent_2_1_0)]) + e_3_parent_2_1_0 = add_entry(topo.standalone, uid="e_3_parent_2_1_0", manager=[ensure_bytes(e_2_parent_1_0)]) + e_4_parent_2_1_0 = add_entry(topo.standalone, uid="e_4_parent_2_1_0", manager=[ensure_bytes(e_2_parent_1_0)]) + + # 2nd subtree + e_2_parent_0 = add_entry(topo.standalone, uid="e_2_parent_0") + + e_1_parent_2_0 = add_entry(topo.standalone, uid="e_1_parent_2_0", manager=[ensure_bytes(e_2_parent_0)]) + e_2_parent_2_0 = add_entry(topo.standalone, uid="e_2_parent_2_0", manager=[ensure_bytes(e_2_parent_0)]) + e_3_parent_2_0 = add_entry(topo.standalone, uid="e_3_parent_2_0", manager=[ensure_bytes(e_2_parent_0)]) + e_4_parent_2_0 = add_entry(topo.standalone, uid="e_4_parent_2_0", manager=[ensure_bytes(e_2_parent_0)]) + + # third subtree + e_3_parent_0 = add_entry(topo.standalone, uid="e_3_parent_0") + + e_1_parent_3_0 = add_entry(topo.standalone, uid="e_1_parent_3_0", manager=[ensure_bytes(e_3_parent_0)]) + + e_1_parent_1_3_0 = add_entry(topo.standalone, uid="e_1_parent_1_3_0", manager=[ensure_bytes(e_1_parent_3_0)]) + + e_1_parent_1_1_3_0 = add_entry(topo.standalone, uid="e_1_parent_1_1_3_0", manager=[ensure_bytes(e_1_parent_1_3_0)]) + + e_1_parent_1_1_1_3_0 = add_entry(topo.standalone, uid="e_1_parent_1_1_1_3_0", manager=[ensure_bytes(e_1_parent_1_1_3_0)]) + + dn_config = 'cn=test_slapi_memberof,cn=plugins,cn=config' + topo.standalone.add_s(Entry((dn_config, {'objectclass': 'top nsSlapdPlugin extensibleObject'.split(), + 'cn': 'test_slapi_memberof', + 'nsslapd-pluginPath': 'libtest_slapi_memberof-plugin', + 'nsslapd-pluginInitfunc': 'test_slapi_memberof_init', + 'nsslapd-pluginType': 'extendedop', + 'nsslapd-pluginEnabled': 'on', + 'nsslapd-plugin-depends-on-type': 'database', + 'nsslapd-pluginId': 'test_slapi_memberof-plugin', + 'slapimemberOfMemberDN': 'uid=test_user_11,ou=People,dc=example,dc=com', + 'slapimemberOfGroupAttr': 'manager', + 'slapimemberOfAttr': 'memberof', + 'slapimemberOfFlag': 'MEMBEROF_REUSE_ONLY', + 'slapimemberOfAllBackends': 'off', + 'slapimemberOfSkipNested': 'off', + 'slapimemberOfEntryScope': DEFAULT_SUFFIX, + 'slapimemberOfMaxGroup': '0', + 'nsslapd-pluginVersion': '2.3.2.202302131418git0e190fc3d', + 'nsslapd-pluginVendor': '389 Project', + 'nsslapd-pluginDescription': 'test_slapi_memberof extended operation plugin'}))) + topo.standalone.restart() + + # Check the first subtree + expected = [ e_1_parent_1_0, e_1_parent_1_1_0, e_1_parent_1_1_1_0, e_2_parent_1_1_1_0, e_3_parent_1_1_1_0, e_4_parent_1_1_1_0, e_5_parent_1_1_1_0, e_2_parent_1_1_0, e_2_parent_1_0, e_1_parent_2_1_0, e_2_parent_2_1_0, e_1_parent_2_2_1_0, e_3_parent_2_1_0, e_4_parent_2_1_0] + res = _extop_test_slapi_member(server=topo.standalone, dn=e_1_parent_0, relation="manager") + _check_res_vs_expected("first subtree", res, expected) + + # Check the second subtree + expected = [e_1_parent_2_0, e_2_parent_2_0, e_3_parent_2_0, e_4_parent_2_0] + res = _extop_test_slapi_member(server=topo.standalone, dn=e_2_parent_0, relation="manager") + _check_res_vs_expected("second subtree", res, expected) + + # Check the third subtree + expected = [e_1_parent_3_0, e_1_parent_1_3_0, e_1_parent_1_1_3_0, e_1_parent_1_1_1_3_0] + res = _extop_test_slapi_member(server=topo.standalone, dn=e_3_parent_0, relation="manager") + _check_res_vs_expected("third subtree", res, expected) + + # check e_1_parent_1_0 + expected = [e_1_parent_1_1_0, e_1_parent_1_1_1_0, e_2_parent_1_1_1_0, e_3_parent_1_1_1_0, e_4_parent_1_1_1_0, e_5_parent_1_1_1_0, e_2_parent_1_1_0] + res = _extop_test_slapi_member(server=topo.standalone, dn=e_1_parent_1_0, relation="manager") + _check_res_vs_expected("organisation reporting to e_1_parent_1_0", res, expected) + + # check e_1_parent_1_1_0 + expected = [e_1_parent_1_1_1_0, e_2_parent_1_1_1_0, e_3_parent_1_1_1_0, e_4_parent_1_1_1_0, e_5_parent_1_1_1_0] + res = _extop_test_slapi_member(server=topo.standalone, dn=e_1_parent_1_1_0, relation="manager") + _check_res_vs_expected("organisation reporting to e_1_parent_1_1_0", res, expected) + + # check e_2_parent_1_1_0 + expected = [EMPTY_RESULT] + res = _extop_test_slapi_member(server=topo.standalone, dn=e_2_parent_1_1_0, relation="manager") + _check_res_vs_expected("organisation reporting to e_2_parent_1_1_0", res, expected) + + # check e_2_parent_1_0 + expected = [e_1_parent_2_1_0, e_2_parent_2_1_0, e_1_parent_2_2_1_0, e_3_parent_2_1_0, e_4_parent_2_1_0] + res = _extop_test_slapi_member(server=topo.standalone, dn=e_2_parent_1_0, relation="manager") + _check_res_vs_expected("organisation reporting to e_2_parent_1_0", res, expected) + + # check e_2_parent_2_1_0 + expected = [e_1_parent_2_2_1_0] + res = _extop_test_slapi_member(server=topo.standalone, dn=e_2_parent_2_1_0, relation="manager") + _check_res_vs_expected("organisation reporting to e_2_parent_2_1_0", res, expected) + + # Check e_1_parent_3_0 + expected = [e_1_parent_1_3_0, e_1_parent_1_1_3_0, e_1_parent_1_1_1_3_0] + res = _extop_test_slapi_member(server=topo.standalone, dn=e_1_parent_3_0, relation="manager") + _check_res_vs_expected("organisation reporting to e_1_parent_3_0", res, expected) + + # Check e_1_parent_1_3_0 + expected = [e_1_parent_1_1_3_0, e_1_parent_1_1_1_3_0] + res = _extop_test_slapi_member(server=topo.standalone, dn=e_1_parent_1_3_0, relation="manager") + _check_res_vs_expected("organisation reporting to e_1_parent_1_3_0", res, expected) + + # Check e_1_parent_1_1_3_0 + expected = [e_1_parent_1_1_1_3_0] + res = _extop_test_slapi_member(server=topo.standalone, dn=e_1_parent_1_1_3_0, relation="manager") + _check_res_vs_expected("organisation reporting to e_1_parent_1_1_3_0", res, expected) + + # Check e_1_parent_1_1_1_3_0 + expected = [EMPTY_RESULT] + res = _extop_test_slapi_member(server=topo.standalone, dn=e_1_parent_1_1_1_3_0, relation="manager") + _check_res_vs_expected("organisation reporting to e_1_parent_1_1_1_3_0", res, expected) + + def fin(): + entries = [e_1_parent_0, e_1_parent_1_0, e_1_parent_1_1_0, e_1_parent_1_1_1_0, e_2_parent_1_1_1_0, e_3_parent_1_1_1_0, e_4_parent_1_1_1_0, e_5_parent_1_1_1_0, e_2_parent_1_1_0, e_2_parent_1_0, e_1_parent_2_1_0, e_2_parent_2_1_0, e_1_parent_2_2_1_0, e_3_parent_2_1_0, e_4_parent_2_1_0, e_2_parent_0, e_1_parent_2_0, e_2_parent_2_0, e_3_parent_2_0, e_4_parent_2_0, e_3_parent_0, e_1_parent_3_0, e_1_parent_1_3_0, e_1_parent_1_1_3_0, e_1_parent_1_1_1_3_0] + for entry in entries: + topo.standalone.delete_s(entry) + topo.standalone.delete_s(dn_config) + + request.addfinalizer(fin) + +if __name__ == "__main__": + CURRENT_FILE = os.path.realpath(__file__) + pytest.main("-s -v %s" % CURRENT_FILE) + diff --git a/dirsrvtests/tests/suites/snmp/__init__.py b/dirsrvtests/tests/suites/snmp/__init__.py new file mode 100644 index 0000000..da86e48 --- /dev/null +++ b/dirsrvtests/tests/suites/snmp/__init__.py @@ -0,0 +1,3 @@ +""" + :Requirement: 389-ds-base: SNMP +""" diff --git a/dirsrvtests/tests/suites/state/__init__.py b/dirsrvtests/tests/suites/state/__init__.py new file mode 100644 index 0000000..d8e230b --- /dev/null +++ b/dirsrvtests/tests/suites/state/__init__.py @@ -0,0 +1,3 @@ +""" + :Requirement: 389-ds-base: 389-ds-base: Operational Attributes +""" diff --git a/dirsrvtests/tests/suites/state/mmt_state_test.py b/dirsrvtests/tests/suites/state/mmt_state_test.py new file mode 100644 index 0000000..7f10b4b --- /dev/null +++ b/dirsrvtests/tests/suites/state/mmt_state_test.py @@ -0,0 +1,374 @@ +# --- BEGIN COPYRIGHT BLOCK --- +# Copyright (C) 2022 Red Hat, Inc. +# All rights reserved. +# +# License: GPL (version 3 or any later version). +# See LICENSE for details. +# --- END COPYRIGHT BLOCK --- +# +import os +import logging +import ldap +import pytest +from lib389.idm.user import UserAccounts +from lib389.topologies import topology_m2 as topo +from lib389._constants import * + +pytestmark = pytest.mark.tier1 + +DEBUGGING = os.getenv("DEBUGGING", default=False) +if DEBUGGING: + logging.getLogger(__name__).setLevel(logging.DEBUG) +else: + logging.getLogger(__name__).setLevel(logging.INFO) +log = logging.getLogger(__name__) + +BINVALUE1 = 'thedeadbeef1' +BINVALUE2 = 'thedeadbeef2' +BINVALUE3 = 'thedeadbeef3' + +USER_PROPERTIES = { + 'uid': 'state1usr', + 'cn': 'state1usr', + 'sn': 'state1usr', + 'uidNumber': '1001', + 'gidNumber': '2001', + 'userpassword': PASSWORD, + 'homeDirectory': '/home/testuser' +} + + +def _check_user_oper_attrs(topo, tuser, attr_name, attr_value, oper_type, exp_values, oper_attr): + """Check if list of operational attributes present for a given entry""" + + log.info('Checking if operational attrs vucsn, adcsn and vdcsn present for: {}'.format(tuser)) + entry = topo.ms["supplier1"].search_s(tuser.dn, ldap.SCOPE_BASE, 'objectclass=*',['nscpentrywsi']) + if oper_attr: + match = False + for line in str(entry).split('\n'): + if attr_name.lower() + ';' in line.lower(): + match = True + if not 'DELETE' in oper_type: + assert any(attr in line for attr in exp_values) and oper_attr in line + else: + assert 'deleted' in line and oper_attr in line + + # If we didn't look at a single attribute then something went wrong + assert match + + +@pytest.mark.parametrize("attr_name, attr_value, oper_type, exp_values, oper_attr", + [('description', 'Test1usr1', 'ldap.MOD_ADD', ['Test1usr1'], 'vucsn'), + ('description', 'Test1usr2', 'ldap.MOD_ADD', ['Test1usr1', + 'Test1usr2'], 'vucsn'), + ('description', 'Test1usr3', 'ldap.MOD_ADD', + ['Test1usr1', 'Test1usr2', 'Test1usr3'], 'vucsn'), + ('description', 'Test1usr4', 'ldap.MOD_REPLACE', ['Test1usr4'], + 'adcsn'), + ('description', 'Test1usr4', 'ldap.MOD_DELETE', [], 'vdcsn')]) +def test_check_desc_attr_state(topo, attr_name, attr_value, oper_type, exp_values, oper_attr): + """Modify user's description attribute and check if description attribute is + added/modified/deleted and operational attributes vucsn, adcsn and vdcsn are present. + + :id: f0830538-02cf-11e9-8be0-8c16451d917b + :parametrized: yes + :setup: Replication with two suppliers. + :steps: 1. Add user to Supplier1 without description attribute. + 2. Add description attribute to user. + 3. Check if only one description attribute exist. + 4. Check if operational attribute vucsn exist. + 5. Add second description attribute to user. + 6. Check if two description attributes exist. + 7. Check if operational attribute vucsn exist. + 8. Add third description attribute to user. + 9. Check if three description attributes exist. + 10. Check if operational attribute vucsn exist. + 11. Replace description attribute for the user. + 12. Check if only one description attribute exist. + 13. Check if operational attribute adcsn exist. + 14. Delete description attribute for the user. + 15. Check if no description attribute exist. + 16. Check if no operational attribute vdcsn exist. + :expectedresults: + 1. Add user to M1 should PASS. + 2. Adding description attribute should PASS + 3. Only one description attribute should be present. + 4. Vucsn attribute should be present. + 5. Adding a new description attribute should PASS + 6. Two description attribute should be present. + 7. Vucsn attribute should be present. + 8. Adding a new description attribute should PASS + 9. Three description attribute should be present. + 10. Vucsn attribute should be present. + 11. Replacing new description attribute should PASS + 12. Only one description attribute should be present. + 13. Adcsn attribute should be present. + 14. Deleting description attribute should PASS + 15. No description attribute should be present. + 16. Vdcsn attribute should be present. + """ + + test_entry = 'state1test' + log.info('Add user: {}'.format(test_entry)) + users = UserAccounts(topo.ms['supplier1'], DEFAULT_SUFFIX) + try: + tuser = users.get(test_entry) + except ldap.NO_SUCH_OBJECT: + USER_PROPERTIES.update(dict.fromkeys(['uid', 'cn'], test_entry)) + tuser = users.create(properties=USER_PROPERTIES) + tuser.set(attr_name, attr_value, eval(oper_type)) + log.info('Check if list of description attrs present for: {}'.format(test_entry)) + assert sorted([i.decode() for i in tuser.get_attr_vals(attr_name)]) == sorted(exp_values) + + log.info('Checking for operational attributes') + _check_user_oper_attrs(topo, tuser, attr_name, attr_value, oper_type, exp_values, oper_attr) + + +@pytest.mark.parametrize("attr_name, attr_value, oper_type, exp_values, oper_attr", + [('cn', 'TestCN1', 'ldap.MOD_ADD', ['TestCN1', 'TestCNusr1'], 'vucsn'), + ('cn', 'TestCN2', 'ldap.MOD_ADD', ['TestCN1', + 'TestCN2', 'TestCNusr1'], 'vucsn'), + ('cn', 'TestnewCN3', 'ldap.MOD_REPLACE', ['TestnewCN3'], 'adcsn'), + ('cn', 'TestnewCN3', 'ldap.MOD_DELETE', None, None)]) +def test_check_cn_attr_state(topo, attr_name, attr_value, oper_type, exp_values, oper_attr): + """Modify user's cn attribute and check if cn attribute is added/modified/deleted and + operational attributes vucsn, adcsn and vdcsn are present. + + :id: 19614bae-02d0-11e9-a295-8c16451d917b + :parametrized: yes + :setup: Replication with two suppliers. + :steps: 1. Add user to Supplier1 with cn attribute. + 2. Add a new cn attribute to user. + 3. Check if two cn attributes exist. + 4. Check if operational attribute vucsn exist for each cn attribute. + 5. Add a new cn attribute to user. + 6. Check if three cn attributes exist. + 7. Check if operational attribute vucsn exist for each cn attribute. + 8. Replace cn attribute for the user. + 9. Check if only one cn attribute exist. + 10. Check if operational attribute adcsn exist. + 11. Delete cn attribute from user and check if it fails. + :expectedresults: + 1. Add user to M1 should PASS. + 2. Adding a new cn attribute should PASS + 3. Two cn attribute should be present. + 4. Vucsn attribute should be present. + 5. Adding a new cn attribute should PASS + 6. Three cn attribute should be present. + 7. Vucsn attribute should be present. + 8. Replacing new cn attribute should PASS + 9. Only one cn attribute should be present. + 10. Operational attribute adcsn should be present. + 11. Deleting cn attribute should fail with ObjectClass violation error. + """ + + test_entry = 'TestCNusr1' + log.info('Add user: {}'.format(test_entry)) + users = UserAccounts(topo.ms['supplier1'], DEFAULT_SUFFIX) + try: + tuser = users.get(test_entry) + except ldap.NO_SUCH_OBJECT: + USER_PROPERTIES.update(dict.fromkeys(['uid', 'cn'], test_entry)) + tuser = users.create(properties=USER_PROPERTIES) + + if 'MOD_DELETE' in oper_type: + with pytest.raises(ldap.OBJECT_CLASS_VIOLATION): + tuser.set(attr_name, attr_value, eval(oper_type)) + else: + tuser.set(attr_name, attr_value, eval(oper_type)) + log.info('Check if list of cn attrs present for: {}'.format(test_entry)) + assert sorted([i.decode() for i in tuser.get_attr_vals(attr_name)]) == sorted(exp_values) + log.info('Checking for operational attributes') + _check_user_oper_attrs(topo, tuser, attr_name, attr_value, oper_type, exp_values, oper_attr) + + +@pytest.mark.parametrize("attr_name, attr_value, oper_type, exp_values, oper_attr", + [('preferredlanguage', 'Chinese', 'ldap.MOD_REPLACE', ['Chinese'], + 'vucsn'), + ('preferredlanguage', 'French', 'ldap.MOD_ADD', None, None), + ('preferredlanguage', 'German', 'ldap.MOD_REPLACE', ['German'], 'adcsn'), + ('preferredlanguage', 'German', 'ldap.MOD_DELETE', [], 'vdcsn')]) +def test_check_single_value_attr_state(topo, attr_name, attr_value, oper_type, + exp_values, oper_attr): + """Modify user's preferredlanguage attribute and check if preferredlanguage attribute is + added/modified/deleted and operational attributes vucsn, adcsn and vdcsn are present. + + :id: 22fd645e-02d0-11e9-a9e4-8c16451d917b + :parametrized: yes + :setup: Replication with two suppliers. + :steps: 1. Add user to Supplier1 without preferredlanguage attribute. + 2. Add a new preferredlanguage attribute to user. + 3. Check if one preferredlanguage attributes exist. + 4. Check if operational attribute vucsn exist. + 5. Add a new preferredlanguage attribute for the user and check if its rejected. + 6. Replace preferredlanguage attribute for the user. + 7. Check if only one preferredlanguage attribute exist. + 8. Check if operational attribute adcsn exist with preferredlanguage. + :expectedresults: + 1. Add user to M1 should PASS. + 2. Adding a new preferredlanguage attribute should PASS + 3. Only one preferredlanguage attribute should be present. + 4. Vucsn attribute should be present. + 5. Adding a new preferredlanguage should fail with ObjectClass violation error. + 6. Replace preferredlanguage should PASS. + 7. Only one preferredlanguage attribute should be present. + 8. Operational attribute adcsn should be present with preferredlanguage. + """ + + test_entry = 'Langusr1' + log.info('Add user: {}'.format(test_entry)) + users = UserAccounts(topo.ms['supplier1'], DEFAULT_SUFFIX) + try: + tuser = users.get(test_entry) + except ldap.NO_SUCH_OBJECT: + USER_PROPERTIES.update(dict.fromkeys(['uid', 'cn'], test_entry)) + tuser = users.create(properties=USER_PROPERTIES) + + if 'MOD_ADD' in oper_type: + with pytest.raises(ldap.OBJECT_CLASS_VIOLATION): + tuser.set(attr_name, attr_value, eval(oper_type)) + else: + tuser.set(attr_name, attr_value, eval(oper_type)) + log.info('Check if list of cn attrs present for: {}'.format(test_entry)) + assert sorted([i.decode() for i in tuser.get_attr_vals(attr_name)]) == sorted(exp_values) + log.info('Checking for operational attributes') + _check_user_oper_attrs(topo, tuser, attr_name, attr_value, oper_type, exp_values, oper_attr) + + +@pytest.mark.parametrize("attr_name, attr_value, oper_type, exp_values, oper_attr", + [('roomnumber;office', 'Tower1', 'ldap.MOD_ADD', ['Tower1'], 'vucsn'), + ('roomnumber;office', 'Tower2', 'ldap.MOD_ADD', ['Tower1', 'Tower2'], + 'vucsn'), + ('roomnumber;office', 'Tower3', 'ldap.MOD_ADD', ['Tower1', 'Tower2', + 'Tower3'], 'vucsn'), + ('roomnumber;office', 'Tower4', 'ldap.MOD_REPLACE', ['Tower4'], 'adcsn'), + ('roomnumber;office', 'Tower4', 'ldap.MOD_DELETE', [], 'vucsn')]) +def test_check_subtype_attr_state(topo, attr_name, attr_value, oper_type, exp_values, oper_attr): + """Modify user's roomnumber;office attribute subtype and check if roomnumber;office attribute + is added/modified/deleted and operational attributes vucsn, adcsn and vdcsn are present. + + :id: 29ab87a4-02d0-11e9-b104-8c16451d917b + :parametrized: yes + :setup: Replication with two suppliers. + :steps: 1. Add user to Supplier1 without roomnumber;office attribute. + 2. Add roomnumber;office attribute to user. + 3. Check if only one roomnumber;office attribute exist. + 4. Check if operational attribute vucsn exist. + 5. Add second roomnumber;office attribute to user. + 6. Check if two roomnumber;office attributes exist. + 7. Check if operational attribute vucsn exist. + 8. Add third roomnumber;office attribute to user. + 9. Check if three roomnumber;office attributes exist. + 10. Check if operational attribute vucsn exist. + 11. Replace roomnumber;office attribute for the user. + 12. Check if only one roomnumber;office attribute exist. + 13. Check if operational attribute adcsn exist. + 14. Delete roomnumber;office attribute for the user. + 15. Check if no roomnumber;office attribute exist. + 16. Check if no operational attribute vdcsn exist. + :expectedresults: + 1. Add user to M1 should PASS. + 2. Adding roomnumber;office attribute should PASS + 3. Only one roomnumber;office attribute should be present. + 4. Vucsn attribute should be present. + 5. Adding a new roomnumber;office attribute should PASS + 6. Two roomnumber;office attribute should be present. + 7. Vucsn attribute should be present. + 8. Adding a new roomnumber;office attribute should PASS + 9. Three roomnumber;office attribute should be present. + 10. Vucsn attribute should be present. + 11. Replacing new roomnumber;office attribute should PASS + 12. Only one roomnumber;office attribute should be present. + 13. Adcsn attribute should be present. + 14. Deleting roomnumber;office attribute should PASS + 15. No roomnumber;office attribute should be present. + 16. Vdcsn attribute should be present. + """ + + test_entry = 'roomoffice1usr' + log.info('Add user: {}'.format(test_entry)) + users = UserAccounts(topo.ms['supplier1'], DEFAULT_SUFFIX) + try: + tuser = users.get(test_entry) + except ldap.NO_SUCH_OBJECT: + USER_PROPERTIES.update(dict.fromkeys(['uid', 'cn'], test_entry)) + tuser = users.create(properties=USER_PROPERTIES) + + tuser.set(attr_name, attr_value, eval(oper_type)) + log.info('Check if list of roomnumber;office attributes are present for a given entry') + assert sorted([i.decode() for i in tuser.get_attr_vals(attr_name)]) == sorted(exp_values) + log.info('Checking if operational attributes are present for cn') + _check_user_oper_attrs(topo, tuser, attr_name, attr_value, oper_type, exp_values, oper_attr) + + +@pytest.mark.parametrize("attr_name, attr_value, oper_type, exp_values, oper_attr", + [('jpegphoto', BINVALUE1, 'ldap.MOD_ADD', [BINVALUE1], 'vucsn'), + ('jpegphoto', BINVALUE2, 'ldap.MOD_ADD', [BINVALUE1, BINVALUE2], + 'vucsn'), + ('jpegphoto', BINVALUE3, 'ldap.MOD_ADD', [BINVALUE1, BINVALUE2, + BINVALUE3], 'vucsn'), + ('jpegphoto', BINVALUE2, 'ldap.MOD_REPLACE', [BINVALUE2], 'adcsn'), + ('jpegphoto', BINVALUE2, 'ldap.MOD_DELETE', [], 'vdcsn')]) +def test_check_jpeg_attr_state(topo, attr_name, attr_value, oper_type, exp_values, oper_attr): + """Modify user's jpegphoto attribute and check if jpegphoto attribute is added/modified/deleted + and operational attributes vucsn, adcsn and vdcsn are present. + + :id: 312ac0d0-02d0-11e9-9d34-8c16451d917b + :parametrized: yes + :setup: Replication with two suppliers. + :steps: 1. Add user to Supplier1 without jpegphoto attribute. + 2. Add jpegphoto attribute to user. + 3. Check if only one jpegphoto attribute exist. + 4. Check if operational attribute vucsn exist. + 5. Add second jpegphoto attribute to user. + 6. Check if two jpegphoto attributes exist. + 7. Check if operational attribute vucsn exist. + 8. Add third jpegphoto attribute to user. + 9. Check if three jpegphoto attributes exist. + 10. Check if operational attribute vucsn exist. + 11. Replace jpegphoto attribute for the user. + 12. Check if only one jpegphoto attribute exist. + 13. Check if operational attribute adcsn exist. + 14. Delete jpegphoto attribute for the user. + 15. Check if no jpegphoto attribute exist. + 16. Check if no operational attribute vdcsn exist. + :expectedresults: + 1. Add user to M1 should PASS. + 2. Adding jpegphoto attribute should PASS + 3. Only one jpegphoto attribute should be present. + 4. Vucsn attribute should be present. + 5. Adding a new jpegphoto attribute should PASS + 6. Two jpegphoto attribute should be present. + 7. Vucsn attribute should be present. + 8. Adding a new jpegphoto attribute should PASS + 9. Three jpegphoto attribute should be present. + 10. Vucsn attribute should be present. + 11. Replacing new jpegphoto attribute should PASS + 12. Only one jpegphoto attribute should be present. + 13. Adcsn attribute should be present. + 14. Deleting jpegphoto attribute should PASS + 15. No jpegphoto attribute should be present. + 16. Vdcsn attribute should be present. + """ + + test_entry = 'testJpeg1usr' + log.info('Add user: {}'.format(test_entry)) + users = UserAccounts(topo.ms['supplier1'], DEFAULT_SUFFIX) + try: + tuser = users.get(test_entry) + except ldap.NO_SUCH_OBJECT: + USER_PROPERTIES.update(dict.fromkeys(['uid', 'cn'], test_entry)) + tuser = users.create(properties=USER_PROPERTIES) + + tuser.set(attr_name, attr_value, eval(oper_type)) + log.info('Check if list of jpeg attributes are present for a given entry') + assert sorted([i.decode() for i in tuser.get_attr_vals(attr_name)]) == sorted(exp_values) + log.info('Checking if operational attributes are present for cn') + _check_user_oper_attrs(topo, tuser, attr_name, attr_value, oper_type, exp_values, oper_attr) + + +if __name__ == "__main__": + # Run isolated + # -s for DEBUG mode + CURRENT_FILE = os.path.realpath(__file__) + pytest.main("-s -v %s" % CURRENT_FILE) diff --git a/dirsrvtests/tests/suites/subentries/__init__.py b/dirsrvtests/tests/suites/subentries/__init__.py new file mode 100644 index 0000000..8f2fa23 --- /dev/null +++ b/dirsrvtests/tests/suites/subentries/__init__.py @@ -0,0 +1,3 @@ +""" + :Requirement: 389-ds-base: LDAP Subentries +""" diff --git a/dirsrvtests/tests/suites/subentries/subentries_test.py b/dirsrvtests/tests/suites/subentries/subentries_test.py new file mode 100644 index 0000000..f522b93 --- /dev/null +++ b/dirsrvtests/tests/suites/subentries/subentries_test.py @@ -0,0 +1,169 @@ +# --- BEGIN COPYRIGHT BLOCK --- +# Copyright (C) 2021 Red Hat, Inc. +# All rights reserved. +# +# License: GPL (version 3 or any later version). +# See LICENSE for details. +# --- END COPYRIGHT BLOCK ---- + +# Author: Anton Bobrov + +import logging +import pytest +import os +import ldap +from ldap.controls import LDAPControl +from lib389 import DirSrv +from lib389.rootdse import RootDSE +from lib389.utils import * +from lib389._constants import * +from lib389.topologies import create_topology +from lib389.topologies import topology_st as topo +from lib389.idm.user import UserAccount, UserAccounts + +log = logging.getLogger(__name__) + +pytestmark = pytest.mark.tier0 + +""" +This BooleanControl class is taken from python-ldap, +see https://www.python-ldap.org/ for details. +The reason is python-ldap standard class has not been +updated for Python 3 properly and behaves incorrectly. +When python-ldap is fixed this class can be removed in +exchange for from ldap.controls import BooleanControl +""" +class BooleanControl(LDAPControl): + """ + Base class for simple request controls with boolean control value. + Constructor argument and class attribute: + booleanValue + Boolean (True/False or 1/0) which is the boolean controlValue. + """ + boolean2ber = { 1:b'\x01\x01\xFF', 0:b'\x01\x01\x00' } + ber2boolean = { b'\x01\x01\xFF':1, b'\x01\x01\x00':0 } + + def __init__(self,controlType=None,criticality=False,booleanValue=False): + self.controlType = controlType + self.criticality = criticality + self.booleanValue = booleanValue + + def encodeControlValue(self): + return self.boolean2ber[int(self.booleanValue)] + + def decodeControlValue(self,encodedControlValue): + self.booleanValue = self.ber2boolean[encodedControlValue] + +def has_subentries_control(topo): + """ + Checks if server supports LDAP Subentries control + """ + rdse = RootDSE(topo.standalone) + return "1.3.6.1.4.1.4203.1.10.1" in rdse.get_supported_ctrls() + + +@pytest.fixture(scope="module") +def setup_test_entries(topo, request): + """ + Add nentries entries and nentries subentries. + """ + users = [] + # Add normal entries. + user = UserAccounts(topo.standalone, DEFAULT_SUFFIX) + for i in range(0, nentries): + user1 = user.create_test_user(uid=i) + users.append(user1) + + # Add subentries. + for i in range(nentries, nentries * 2): + user1 = user.create_test_user(uid=i) + user1.add("objectclass", "ldapsubentry") + users.append(user1) + + def fin(): + for user in users: + user.delete() + + + request.addfinalizer(fin) + + +nentries = 5 +search_entries = '(objectclass=inetorgperson)' +search_subentries = '(objectclass=ldapsubentry)' + +# Test matrix parameters +searches = [ + # Search with subentries control visibility TRUE + (search_entries, True, True, nentries, True), + # Search with subentries control visibility FALSE + (search_entries, True, False, nentries, False), + # Search for normal entries + (search_entries, False, None, nentries, False), + # Search for subentries + (search_subentries, False, None, nentries, True), + # Search for normal entries and subentries + (f'(|{search_entries}{search_subentries})', False, None, + nentries * 2, None) + ] + +@pytest.mark.parametrize('search_filter, use_control, controlValue,'\ + 'expected_nentries, expected_subentries', searches) +def test_subentries(topo, setup_test_entries, search_filter, use_control, + controlValue, expected_nentries, expected_subentries): + """Test LDAP Subentries control (RFC 3672) + + :id: 5cdb72eb-d227-49c8-9f7a-89314c717a85 + :setup: Standalone Instance + :parametrized: yes + :steps: + 1. Add test entries and subentries + 2. Search with subentries control visibility TRUE + 3. Search with subentries control visibility FALSE + 4. Search for normal entries + 5. Search for subentries + 6. Search for normal entries and subentries + :expectedresults: + 1. Entries and subentries should be added + 2. Only subentries are visible + 3. Only normal entries are visible + 4. Only normal entries are visible + 5. Only subentries are visible + 6. Both normal entries and subentries are visible + """ + if use_control and not has_subentries_control(topo): + pytest.skip("This test is only required when LDAP Subentries "\ + "control is supported.") + + request_control = BooleanControl( + controlType="1.3.6.1.4.1.4203.1.10.1", + criticality=True, booleanValue=controlValue) + + if use_control: + request_ctrl = [request_control] + else: + request_ctrl = None + + entries = topo.standalone.search_ext_s("ou=people," + + DEFAULT_SUFFIX, + ldap.SCOPE_SUBTREE, + search_filter, + serverctrls=request_ctrl) + + assert len(entries) == expected_nentries + if expected_subentries is not None: + if expected_subentries: + for entry in entries: + assert ensure_bytes("ldapsubentry") in \ + entry.getValues("objectclass") + else: + for entry in entries: + assert ensure_bytes("ldapsubentry") not in \ + entry.getValues("objectclass") + + +if __name__ == '__main__': + # Run isolated + # -s for DEBUG mode + CURRENT_FILE = os.path.realpath(__file__) + pytest.main(["-s", CURRENT_FILE]) diff --git a/dirsrvtests/tests/suites/syncrepl_plugin/__init__.py b/dirsrvtests/tests/suites/syncrepl_plugin/__init__.py new file mode 100644 index 0000000..2429f71 --- /dev/null +++ b/dirsrvtests/tests/suites/syncrepl_plugin/__init__.py @@ -0,0 +1,282 @@ +# --- BEGIN COPYRIGHT BLOCK --- +# Copyright (C) 2020 William Brown +# All rights reserved. +# +# License: GPL (version 3 or any later version). +# See LICENSE for details. +# --- END COPYRIGHT BLOCK --- +""" + :Requirement: 389-ds-base: Sync Replication Plugin +""" +import logging +import ldap +import time +from ldap.syncrepl import SyncreplConsumer +from lib389 import DirSrv +from lib389.idm.user import nsUserAccounts +from lib389.topologies import topology_st as topology +from lib389._constants import DEFAULT_SUFFIX + +log = logging.getLogger(__name__) + +OU_PEOPLE = "ou=people,%s" % DEFAULT_SUFFIX + +class ISyncRepl(DirSrv, SyncreplConsumer): + """ + This implements a test harness for checking syncrepl, and allowing us to check various actions or + behaviours. During a "run" it stores the results in it's instance, so that they can be inspected + later to ensure that syncrepl worked as expected. + """ + def __init__(self, inst, openldap=False): + ### 🚧 WARNING 🚧 + # There are bugs with python ldap sync repl in ALL VERSIONS below 3.3.1. + # These tests WILL FAIL unless you have version 3.3.1 or higher! + assert ldap.__version__ >= '3.3.1' + + self.inst = inst + self.msgid = None + + self.last_cookie = None + self.next_cookie = None + self.cookie = None + self.openldap = openldap + if self.openldap: + # In openldap mode, our initial cookie needs to be a rid. + self.cookie = "rid=123" + self.delete = [] + self.present = [] + self.entries = {} + + super().__init__() + + def result4(self, *args, **kwargs): + return self.inst.result4(*args, **kwargs, escapehatch='i am sure') + + def search_ext(self, *args, **kwargs): + return self.inst.search_ext(*args, **kwargs, escapehatch='i am sure') + + def syncrepl_search(self, base=DEFAULT_SUFFIX, scope=ldap.SCOPE_SUBTREE, mode='refreshOnly', cookie=None, **search_args): + # Wipe the last result set. + self.delete = [] + self.present = [] + self.entries = {} + self.refdel = False + self.next_cookie = None + # Start the sync + # If cookie is none, will call "get_cookie" we have. + self.msgid = super().syncrepl_search(base, scope, mode, cookie, **search_args) + log.debug(f'syncrepl_search -> {self.msgid}') + assert self.msgid is not None + + def syncrepl_complete(self): + log.debug(f'syncrepl_complete -> {self.msgid}') + assert self.msgid is not None + # Loop until the operation is complete. + time.sleep(1) + while super().syncrepl_poll(msgid=self.msgid) is True: + pass + assert self.next_cookie is not None + self.last_cookie = self.cookie + self.cookie = self.next_cookie + + def check_cookie(self): + assert self.last_cookie != self.cookie + + def syncrepl_set_cookie(self, cookie): + log.debug(f'set_cookie -> {cookie}') + if self.openldap: + assert self.cookie.startswith("rid=123") + self.next_cookie = cookie + + def syncrepl_get_cookie(self): + log.debug('get_cookie -> %s' % self.cookie) + if self.openldap: + assert self.cookie.startswith("rid=123") + return self.cookie + + def syncrepl_present(self, uuids, refreshDeletes=False): + log.debug(f'=====> refdel -> {refreshDeletes} uuids -> {uuids}') + if refreshDeletes: + # Indicate we recieved a refdel in the process. + self.refdel = True + if uuids is not None: + self.present = self.present + uuids + + def syncrepl_delete(self, uuids): + log.debug(f'delete -> {uuids}') + self.delete = uuids + + def syncrepl_entry(self, dn, attrs, uuid): + log.debug(f'entry -> {dn}') + self.entries[dn] = (uuid, attrs) + + def syncrepl_refreshdone(self): + log.debug('refreshdone') + +def syncstate_assert(st, sync): + # How many entries do we have? + # We setup sync under ou=people so we can modrdn out of the scope. + r = st.search_ext_s( + base=OU_PEOPLE, + scope=ldap.SCOPE_SUBTREE, + filterstr='(objectClass=*)', + attrsonly=1, + escapehatch='i am sure' + ) + + # Initial sync + log.debug("*test* initial") + sync.syncrepl_search(base=OU_PEOPLE) + sync.syncrepl_complete() + # check we caught them all + assert len(r) == len(sync.entries.keys()) + assert len(r) == len(sync.present) + assert 0 == len(sync.delete) + if sync.openldap: + assert True == sync.refdel + else: + assert False == sync.refdel + + # Add a new entry + account = nsUserAccounts(st, DEFAULT_SUFFIX).create_test_user() + + # Find the primary uuid we expect to see in syncrepl. + # This will be None if not present. + acc_uuid = account.get_attr_val_utf8('entryuuid') + if not sync.openldap: + nsid = account.get_attr_val_utf8('nsuniqueid') + # nsunique has a diff format, so we change it up. + # 431cf081-b44311ea-83fdb082-f24d490e + # Add a hyphen V + # 431cf081-b443-11ea-83fdb082-f24d490e + nsid_a = nsid[:13] + '-' + nsid[13:] + # Add a hyphen V + # 431cf081-b443-11ea-83fd-b082-f24d490e + nsid_b = nsid_a[:23] + '-' + nsid_a[23:] + # Remove a hyphen V + # 431cf081-b443-11ea-83fd-b082-f24d490e + acc_uuid = nsid_b[:28] + nsid_b[29:] + # Tada! + # 431cf081-b443-11ea-83fd-b082f24d490e + log.debug(f"--> expected sync uuid (from nsuniqueid): {acc_uuid}") + else: + log.debug(f"--> expected sync uuid (from entryuuid): {acc_uuid}") + + # Check + log.debug("*test* add") + sync.syncrepl_search(base=OU_PEOPLE) + sync.syncrepl_complete() + sync.check_cookie() + log.debug(f"sd: {sync.delete}, sp: {sync.present} sek: {sync.entries.keys()}") + + assert 1 == len(sync.entries.keys()) + assert 1 == len(sync.present) + #################################### + assert sync.present == [acc_uuid] + assert 0 == len(sync.delete) + if sync.openldap: + assert True == sync.refdel + else: + assert False == sync.refdel + + # Mod + account.replace('description', 'change') + # Check + log.debug("*test* mod") + sync.syncrepl_search(base=OU_PEOPLE) + sync.syncrepl_complete() + sync.check_cookie() + log.debug(f"sd: {sync.delete}, sp: {sync.present} sek: {sync.entries.keys()}") + assert 1 == len(sync.entries.keys()) + assert 1 == len(sync.present) + #################################### + assert sync.present == [acc_uuid] + assert 0 == len(sync.delete) + if sync.openldap: + assert True == sync.refdel + else: + assert False == sync.refdel + + ## ModRdn (remain in scope) + account.rename('uid=test1_modrdn') + # newsuperior=None + # Check + log.debug("*test* modrdn (in scope)") + sync.syncrepl_search(base=OU_PEOPLE) + sync.syncrepl_complete() + sync.check_cookie() + log.debug(f"sd: {sync.delete}, sp: {sync.present} sek: {sync.entries.keys()}") + assert 1 == len(sync.entries.keys()) + assert 1 == len(sync.present) + #################################### + assert sync.present == [acc_uuid] + assert 0 == len(sync.delete) + if sync.openldap: + assert True == sync.refdel + else: + assert False == sync.refdel + + # import time + # print("attach now ....") + # time.sleep(45) + + ## Modrdn (out of scope, then back into scope) + account.rename('uid=test1_modrdn', newsuperior=DEFAULT_SUFFIX) + + # Check it's gone. + log.debug("*test* modrdn (move out of scope)") + sync.syncrepl_search(base=OU_PEOPLE) + sync.syncrepl_complete() + sync.check_cookie() + log.debug(f"sd: {sync.delete}, sp: {sync.present} sek: {sync.entries.keys()}") + assert 0 == len(sync.entries.keys()) + assert 0 == len(sync.present) + ## WARNING: This test MAY FAIL here if you do not have a new enough python-ldap + # due to an ASN.1 parsing bug. You require at least python-ldap 3.3.1 + assert 1 == len(sync.delete) + assert sync.delete == [acc_uuid] + if sync.openldap: + assert True == sync.refdel + else: + assert False == sync.refdel + + # Put it back + account.rename('uid=test1_modrdn', newsuperior=OU_PEOPLE) + log.debug("*test* modrdn (move in to scope)") + sync.syncrepl_search(base=OU_PEOPLE) + sync.syncrepl_complete() + sync.check_cookie() + log.debug(f"sd: {sync.delete}, sp: {sync.present} sek: {sync.entries.keys()}") + assert 1 == len(sync.entries.keys()) + assert 1 == len(sync.present) + #################################### + assert sync.present == [acc_uuid] + assert 0 == len(sync.delete) + if sync.openldap: + assert True == sync.refdel + else: + assert False == sync.refdel + + ## Delete + account.delete() + + # Check + log.debug("*test* del") + sync.syncrepl_search(base=OU_PEOPLE) + sync.syncrepl_complete() + # In a delete, the cookie isn't updated (?) + sync.check_cookie() + log.debug(f'{sync.entries.keys()}') + log.debug(f'{sync.present}') + log.debug(f'{sync.delete}') + log.debug(f"sd: {sync.delete}, sp: {sync.present} sek: {sync.entries.keys()}") + assert 0 == len(sync.entries.keys()) + assert 0 == len(sync.present) + assert 1 == len(sync.delete) + assert sync.delete == [acc_uuid] + #################################### + if sync.openldap: + assert True == sync.refdel + else: + assert False == sync.refdel + diff --git a/dirsrvtests/tests/suites/syncrepl_plugin/basic_test.py b/dirsrvtests/tests/suites/syncrepl_plugin/basic_test.py new file mode 100644 index 0000000..c22331e --- /dev/null +++ b/dirsrvtests/tests/suites/syncrepl_plugin/basic_test.py @@ -0,0 +1,734 @@ +# --- BEGIN COPYRIGHT BLOCK --- +# Copyright (C) 2020 William Brown +# All rights reserved. +# +# License: GPL (version 3 or any later version). +# See LICENSE for details. +# --- END COPYRIGHT BLOCK --- + +import logging +import ldap +import time +import threading +from ldap.syncrepl import SyncreplConsumer +from ldap.ldapobject import ReconnectLDAPObject +import pytest +from lib389 import DirSrv +from lib389.idm.organizationalunit import OrganizationalUnits, OrganizationalUnit +from lib389.idm.user import nsUserAccounts, UserAccounts +from lib389.idm.group import Groups +from lib389.topologies import topology_st as topology +from lib389.topologies import topology_m2 as topo_m2 +from lib389.paths import Paths +from lib389.utils import ds_is_older +from lib389.plugins import RetroChangelogPlugin, ContentSyncPlugin, AutoMembershipPlugin, MemberOfPlugin, MemberOfSharedConfig, AutoMembershipDefinitions, MEPTemplates, MEPConfigs, ManagedEntriesPlugin, MEPTemplate +from lib389._constants import * + +from . import ISyncRepl, syncstate_assert + +default_paths = Paths() +pytestmark = pytest.mark.tier1 + +log = logging.getLogger(__name__) + +@pytest.fixture(scope="function") +def init_sync_repl_plugins(topology, request): + """Prepare test environment (retroCL/sync_repl/ + automember/memberof) and cleanup at the end of the test + 1.: enable retroCL + 2.: configure retroCL to log nsuniqueid as targetUniqueId + 3.: enable content_sync plugin + 4.: enable automember + 5.: create (2) groups. Few groups can help to reproduce the concurrent updates problem. + 6.: configure automember to provision those groups with 'member' + 7.: enable and configure memberof plugin + 8.: enable plugin log level + 9.: restart the server + """ + inst = topology[0] + inst.restart() + + # Enable/configure retroCL + plugin = RetroChangelogPlugin(inst) + plugin.disable() + plugin.enable() + plugin.set('nsslapd-attribute', 'nsuniqueid:targetuniqueid') + + # Enable sync plugin + plugin = ContentSyncPlugin(inst) + plugin.enable() + + # Enable automember + plugin = AutoMembershipPlugin(inst) + plugin.disable() + plugin.enable() + + # Add the automember group + groups = Groups(inst, DEFAULT_SUFFIX) + group = [] + for i in range(1,5): + group.append(groups.create(properties={'cn': 'group%d' % i})) + + # Add the automember config entry + am_configs = AutoMembershipDefinitions(inst) + am_configs_cleanup = [] + for g in group: + am_config = am_configs.create(properties={'cn': 'config %s' % g.get_attr_val_utf8('cn'), + 'autoMemberScope': DEFAULT_SUFFIX, + 'autoMemberFilter': 'uid=*', + 'autoMemberDefaultGroup': g.dn, + 'autoMemberGroupingAttr': 'member:dn'}) + am_configs_cleanup.append(am_config) + + # Enable and configure memberof plugin + plugin = MemberOfPlugin(inst) + plugin.disable() + plugin.enable() + + plugin.replace_groupattr('member') + + memberof_config = MemberOfSharedConfig(inst, 'cn=memberOf config,{}'.format(DEFAULT_SUFFIX)) + try: + memberof_config.create(properties={'cn': 'memberOf config', + 'memberOfGroupAttr': 'member', + 'memberOfAttr': 'memberof'}) + except ldap.ALREADY_EXISTS: + pass + + # Enable plugin log level (usefull for debug) + inst.setLogLevel(65536) + inst.restart() + + def fin(): + inst.restart() + for am_config in am_configs_cleanup: + am_config.delete() + for g in group: + try: + g.delete() + except: + pass + request.addfinalizer(fin) + +#unstable or unstatus tests, skipped for now +#it fails, let's say 1 time out of 10, while decoding asn1 response +@pytest.mark.flaky(max_runs=2, min_passes=1) +@pytest.mark.skipif(ldap.__version__ < '3.3.1', + reason="python ldap versions less that 3.3.1 have bugs in sync repl that will cause this to fail!") +def test_syncrepl_basic(topology): + """ Test basic functionality of the SyncRepl interface + + :id: f9fea826-8ae2-412a-8e88-b8e0ba939b06 + + :setup: Standalone instance + + :steps: + 1. Enable Retro Changelog + 2. Enable Syncrepl + 3. Run the syncstate test to check refresh, add, delete, mod. + + :expectedresults: + 1. Success + 1. Success + 1. Success + """ + st = topology.standalone + # Enable RetroChangelog. + rcl = RetroChangelogPlugin(st) + rcl.enable() + # Set the default targetid + rcl.replace('nsslapd-attribute', 'nsuniqueid:targetUniqueId') + # Enable sync repl + csp = ContentSyncPlugin(st) + csp.enable() + # Restart DS + st.restart() + # Setup the syncer + sync = ISyncRepl(st) + # Run the checks + syncstate_assert(st, sync) + +class TestSyncer(ReconnectLDAPObject, SyncreplConsumer): + def __init__(self, *args, **kwargs): + self.cookie = None + self.cookies = [] + ldap.ldapobject.ReconnectLDAPObject.__init__(self, *args, **kwargs) + + def syncrepl_set_cookie(self, cookie): + # extract the changenumber from the cookie + self.cookie = cookie + self.cookies.append(cookie.split('#')[2]) + log.info("XXXX Set cookie: %s" % cookie) + + def syncrepl_get_cookie(self): + log.info("XXXX Get cookie: %s" % self.cookie) + return self.cookie + + def syncrepl_present(self, uuids, refreshDeletes=False): + log.info("XXXX syncrepl_present uuids %s %s" % ( uuids, refreshDeletes)) + + def syncrepl_delete(self, uuids): + log.info("XXXX syncrepl_delete uuids %s" % uuids) + + def syncrepl_entry(self, dn, attrs, uuid): + log.info("XXXX syncrepl_entry dn %s" % dn) + + def syncrepl_refreshdone(self): + log.info("XXXX syncrepl_refreshdone") + + def get_cookies(self): + return self.cookies + +class Sync_persist(threading.Thread, ReconnectLDAPObject, SyncreplConsumer): + # This runs a sync_repl client in background + # it registers a result that contain a list of the change numbers (from the cookie) + # that are list as they are received + def __init__(self, inst): + threading.Thread.__init__(self) + self.daemon = True + self.inst = inst + self.cookie = None + self.conn = inst.clone({SER_ROOT_DN: 'cn=directory manager', SER_ROOT_PW: 'password'}) + self.filterstr = '(|(objectClass=groupofnames)(objectClass=person))' + self.attrs = [ + 'objectclass', + 'cn', + 'displayname', + 'gidnumber', + 'givenname', + 'homedirectory', + 'mail', + 'member', + 'memberof', + 'sn', + 'uid', + 'uidnumber', + ] + self.conn.open() + self.result = [] + + def get_result(self): + # used to return the cookies list to the requestor + return self.result + + def run(self): + """Start a sync repl client""" + ldap_connection = TestSyncer(self.inst.toLDAPURL()) + ldap_connection.simple_bind_s('cn=directory manager', 'password') + ldap_search = ldap_connection.syncrepl_search( + "dc=example,dc=com", + ldap.SCOPE_SUBTREE, + mode='refreshAndPersist', + attrlist=self.attrs, + filterstr=self.filterstr, + cookie=None + ) + + try: + while ldap_connection.syncrepl_poll(all=1, msgid=ldap_search): + pass + except (ldap.SERVER_DOWN, ldap.CONNECT_ERROR) as e: + print('syncrepl_poll: LDAP error (%s)', e) + self.result = ldap_connection.get_cookies() + log.info("ZZZ result = %s" % self.result) + ldap_connection.unbind() + +def test_sync_repl_mep(topology, request): + """Test sync repl with MEP plugin that triggers several + updates on the same entry + + :id: d9515930-293e-42da-9835-9f255fa6111b + :setup: Standalone Instance + :steps: + 1. enable retro/sync_repl/mep + 2. Add mep Template and definition entry + 3. start sync_repl client + 4. Add users with PosixAccount ObjectClass (mep will update it several times) + 5. Check that the received cookie are progressing + :expectedresults: + 1. Success + 2. Success + 3. Success + 4. Success + 5. Success + """ + inst = topology[0] + + # Enable/configure retroCL + plugin = RetroChangelogPlugin(inst) + plugin.disable() + plugin.enable() + plugin.set('nsslapd-attribute', 'nsuniqueid:targetuniqueid') + + # Enable sync plugin + plugin = ContentSyncPlugin(inst) + plugin.enable() + + # Check the plug-in status + mana = ManagedEntriesPlugin(inst) + plugin.enable() + + # Add Template and definition entry + org1 = OrganizationalUnits(inst, DEFAULT_SUFFIX).create(properties={'ou': 'Users'}) + org2 = OrganizationalUnit(inst, f'ou=Groups,{DEFAULT_SUFFIX}') + meps = MEPTemplates(inst, DEFAULT_SUFFIX) + mep_template1 = meps.create(properties={ + 'cn': 'UPG Template1', + 'mepRDNAttr': 'cn', + 'mepStaticAttr': 'objectclass: posixGroup', + 'mepMappedAttr': 'cn: $uid|gidNumber: $gidNumber|description: User private group for $uid'.split('|')}) + conf_mep = MEPConfigs(inst) + mep_config = conf_mep.create(properties={ + 'cn': 'UPG Definition2', + 'originScope': org1.dn, + 'originFilter': 'objectclass=posixaccount', + 'managedBase': org2.dn, + 'managedTemplate': mep_template1.dn}) + + # Enable plugin log level (usefull for debug) + inst.setLogLevel(65536) + inst.restart() + + # create a sync repl client and wait 5 seconds to be sure it is running + sync_repl = Sync_persist(inst) + sync_repl.start() + time.sleep(5) + + # Add users with PosixAccount ObjectClass and verify creation of User Private Group + user = UserAccounts(inst, f'ou=Users,{DEFAULT_SUFFIX}', rdn=None).create_test_user() + assert user.get_attr_val_utf8('mepManagedEntry') == f'cn=test_user_1000,ou=Groups,{DEFAULT_SUFFIX}' + + # stop the server to get the sync_repl result set (exit from while loop). + # Only way I found to acheive that. + # and wait a bit to let sync_repl thread time to set its result before fetching it. + inst.stop() + time.sleep(10) + cookies = sync_repl.get_result() + + # checking that the cookie are in increasing and in an acceptable range (0..1000) + assert len(cookies) > 0 + prev = -1 + for cookie in cookies: + log.info('Check cookie %s' % cookie) + + assert int(cookie) >= 0 + assert int(cookie) < 1000 + assert int(cookie) > prev + prev = int(cookie) + sync_repl.join() + log.info('test_sync_repl_map: PASS\n') + +def test_sync_repl_cookie(topology, init_sync_repl_plugins, request): + """Test sync_repl cookie are progressing is an increasing order + when there are nested updates + + :id: d7fbde25-5702-46ac-b38e-169d7a68e97c + :setup: Standalone Instance + :steps: + 1.: initialization/cleanup done by init_sync_repl_plugins fixture + 2.: create a thread dedicated to run a sync repl client + 3.: Create (9) users that will generate nested updates (automember/memberof) + 4.: stop sync repl client and collect the list of cookie.change_no + 5.: check that cookies.change_no are in increasing order + :expectedresults: + 1.: succeeds + 2.: succeeds + 3.: succeeds + 4.: succeeds + 5.: succeeds + """ + inst = topology[0] + + # create a sync repl client and wait 5 seconds to be sure it is running + sync_repl = Sync_persist(inst) + sync_repl.start() + time.sleep(5) + + # create users, that automember/memberof will generate nested updates + users = UserAccounts(inst, DEFAULT_SUFFIX) + users_set = [] + for i in range(10001, 10010): + users_set.append(users.create_test_user(uid=i)) + + # stop the server to get the sync_repl result set (exit from while loop). + # Only way I found to acheive that. + # and wait a bit to let sync_repl thread time to set its result before fetching it. + inst.stop() + time.sleep(10) + cookies = sync_repl.get_result() + + # checking that the cookie are in increasing and in an acceptable range (0..1000) + assert len(cookies) > 0 + prev = -1 + for cookie in cookies: + log.info('Check cookie %s' % cookie) + + assert int(cookie) >= 0 + assert int(cookie) < 1000 + assert int(cookie) > prev + prev = int(cookie) + sync_repl.join() + log.info('test_sync_repl_cookie: PASS\n') + + def fin(): + inst.restart() + for user in users_set: + try: + user.delete() + except: + pass + + request.addfinalizer(fin) + + return + +def test_sync_repl_cookie_add_del(topology, init_sync_repl_plugins, request): + """Test sync_repl cookie are progressing is an increasing order + when there add and del + + :id: 83e11038-6ed0-4a5b-ac77-e44887ab11e3 + :setup: Standalone Instance + :steps: + 1.: initialization/cleanup done by init_sync_repl_plugins fixture + 2.: create a thread dedicated to run a sync repl client + 3.: Create (3) users that will generate nested updates (automember/memberof) + 4.: Delete (3) users + 5.: stop sync repl client and collect the list of cookie.change_no + 6.: check that cookies.change_no are in increasing order + :expectedresults: + 1.: succeeds + 2.: succeeds + 3.: succeeds + 4.: succeeds + 5.: succeeds + 6.: succeeds + """ + inst = topology[0] + # create a sync repl client and wait 5 seconds to be sure it is running + sync_repl = Sync_persist(inst) + sync_repl.start() + time.sleep(5) + # create users, that automember/memberof will generate nested updates + users = UserAccounts(inst, DEFAULT_SUFFIX) + users_set = [] + for i in range(10001, 10004): + users_set.append(users.create_test_user(uid=i)) + + time.sleep(10) + # delete users, that automember/memberof will generate nested updates + for user in users_set: + user.delete() + # stop the server to get the sync_repl result set (exit from while loop). + # Only way I found to acheive that. + # and wait a bit to let sync_repl thread time to set its result before fetching it. + inst.stop() + cookies = sync_repl.get_result() + # checking that the cookie are in increasing and in an acceptable range (0..1000) + assert len(cookies) > 0 + prev = -1 + for cookie in cookies: + log.info('Check cookie %s' % cookie) + + assert int(cookie) >= 0 + assert int(cookie) < 1000 + assert int(cookie) > prev + prev = int(cookie) + sync_repl.join() + log.info('test_sync_repl_cookie_add_del: PASS\n') + + def fin(): + pass + + request.addfinalizer(fin) + + return + +def test_sync_repl_cookie_with_failure(topology, init_sync_repl_plugins, request): + """Test sync_repl cookie are progressing is the right order + when there is a failure in nested updates + + :id: e0103448-170e-4080-8f22-c34606447ce2 + :setup: Standalone Instance + :steps: + 1. initialization/cleanup done by init_sync_repl_plugins fixture + 2. update group2 so that it will not accept 'member' attribute (set by memberof) + 3. create a thread dedicated to run a sync repl client + 4. Create a group that will be the only update received by sync repl client + 5. Create (9) users that will generate nested updates (automember/memberof). Creation will fail because 'member' attribute is not allowed in group2 + 6. stop sync repl client and collect the list of cookie.change_no + 7. check that the list of cookie.change_no contains only the group 'step 11' + :expectedresults: + 1. succeeds + 2. succeeds + 3. succeeds + 4. succeeds + 5. Fails (expected) + 6. succeeds + 7. succeeds + """ + inst = topology[0] + + # Set group2 as a groupOfUniqueNames so that automember will fail to update that group + # This will trigger a failure in internal MOD and a failure to add member + group2 = Groups(inst, DEFAULT_SUFFIX).get('group2') + group2.replace('objectclass', 'groupOfUniqueNames') + + + # create a sync repl client and wait 5 seconds to be sure it is running + sync_repl = Sync_persist(inst) + sync_repl.start() + time.sleep(5) + + # Add a test group just to check that sync_repl receives that SyncControlInfo cookie + groups = Groups(inst, DEFAULT_SUFFIX) + testgroup = groups.create(properties={'cn': 'group%d' % 10}) + + # create users, that automember/memberof will generate nested updates + users = UserAccounts(inst, DEFAULT_SUFFIX) + users_set = [] + for i in range(1000,1010): + try: + users_set.append(users.create_test_user(uid=i)) + # Automember should fail to add uid=1000 in group2 + assert(False) + except ldap.UNWILLING_TO_PERFORM: + pass + + # stop the server to get the sync_repl result set (exit from while loop). + # Only way I found to acheive that. + # and wait a bit to let sync_repl thread time to set its result before fetching it. + inst.stop() + time.sleep(10) + cookies = sync_repl.get_result() + + # checking that the cookie list contains only two entries + # the one from the SyncInfo/RefreshDelete that indicates the end of the refresh + # the the one from SyncStateControl related to the only updated entry (group10) + assert len(cookies) == 2 + prev = -1 + for cookie in cookies: + log.info('Check cookie %s' % cookie) + + assert int(cookie) >= 0 + assert int(cookie) < 1000 + assert int(cookie) > prev + prev = int(cookie) + sync_repl.join() + log.info('test_sync_repl_cookie_with_failure: PASS\n') + + def fin(): + inst.restart() + for user in users_set: + try: + user.delete() + except: + pass + testgroup.delete() + + request.addfinalizer(fin) + +def test_sync_repl_cenotaph(topo_m2, request): + """Test the creation of a cenotaph while a + sync repl client is running + + :id: 8ca1724a-cf42-4880-bf0f-be451f9bd3b4 + :setup: MMR with 2 suppliers + :steps: + 1. Enable retroCL/content_sync + 2. Run a sync repl client + 3. create users + 4. do a MODRDN of a user entry => creation of cenotaph + 5. stop sync repl client + :expectedresults: + 1. Should succeeds + 2. Should succeeds + 3. Should succeeds + 4. Should succeeds + 5. Should succeeds + """ + m1 = topo_m2.ms["supplier1"] + # Enable/configure retroCL + plugin = RetroChangelogPlugin(m1) + plugin.disable() + plugin.enable() + plugin.set('nsslapd-attribute', 'nsuniqueid:targetuniqueid') + + # Enable sync plugin + plugin = ContentSyncPlugin(m1) + plugin.enable() + # Restart DS + m1.restart() + + # create a sync repl client and wait 5 seconds to be sure it is running + sync_repl = Sync_persist(m1) + sync_repl.start() + time.sleep(5) + + # create users + users = UserAccounts(m1, DEFAULT_SUFFIX) + users_set = [] + for i in range(10001, 10003): + users_set.append(users.create_test_user(uid=i)) + + # rename the entry that would trigger the creation of a cenotaph + users_set[0].rename("uid=foo") + + # stop the server to get the sync_repl result set (exit from while loop). + # Only way I found to acheive that. + # and wait a bit to let sync_repl thread time to set its result before fetching it. + m1.stop() + time.sleep(2) + + def fin(): + m1.restart() + for user in users_set: + try: + user.delete() + except: + pass + + request.addfinalizer(fin) + +def test_sync_repl_dynamic_plugin(topology, request): + """Test sync_repl with dynamic plugin + + :id: d4f84913-c18a-459f-8525-110f610ca9e6 + :setup: install a standalone instance + :steps: + 1. reset instance to standard (no retroCL, no sync_repl, no dynamic plugin) + 2. Enable dynamic plugin + 3. Enable retroCL/content_sync + 4. Establish a sync_repl req + :expectedresults: + 1. Should succeeds + 2. Should succeeds + 3. Should succeeds + 4. Should succeeds + """ + + # Reset the instance in a default config + # Disable content sync plugin + topology.standalone.plugins.disable(name=PLUGIN_REPL_SYNC) + + # Disable retro changelog + topology.standalone.plugins.disable(name=PLUGIN_RETRO_CHANGELOG) + + # Disable dynamic plugins + topology.standalone.modify_s(DN_CONFIG, [(ldap.MOD_REPLACE, 'nsslapd-dynamic-plugins', b'off')]) + topology.standalone.restart() + + # Now start the test + # Enable dynamic plugins + try: + topology.standalone.modify_s(DN_CONFIG, [(ldap.MOD_REPLACE, 'nsslapd-dynamic-plugins', b'on')]) + except ldap.LDAPError as e: + log.error('Failed to enable dynamic plugin! {}'.format(e.args[0]['desc'])) + assert False + + # Enable retro changelog + topology.standalone.plugins.enable(name=PLUGIN_RETRO_CHANGELOG) + + # Enbale content sync plugin + topology.standalone.plugins.enable(name=PLUGIN_REPL_SYNC) + + # create a sync repl client and wait 5 seconds to be sure it is running + sync_repl = Sync_persist(topology.standalone) + sync_repl.start() + time.sleep(5) + + # create users + users = UserAccounts(topology.standalone, DEFAULT_SUFFIX) + users_set = [] + for i in range(10001, 10004): + users_set.append(users.create_test_user(uid=i)) + + time.sleep(10) + # delete users, that automember/memberof will generate nested updates + for user in users_set: + user.delete() + # stop the server to get the sync_repl result set (exit from while loop). + # Only way I found to acheive that. + # and wait a bit to let sync_repl thread time to set its result before fetching it. + topology.standalone.stop() + sync_repl.get_result() + sync_repl.join() + log.info('test_sync_repl_dynamic_plugin: PASS\n') + + # Success + log.info('Test complete') + +def test_sync_repl_invalid_cookie(topology, request): + """Test sync_repl with invalid cookie + + :id: 8fa4a8f8-acf4-42a5-90f1-6ba1d8080e46 + :setup: install a standalone instance + :steps: + 1. reset instance to standard (no retroCL, no sync_repl, no dynamic plugin) + 2. Enable retroCL/content_sync + 3. Establish a sync_repl connection + 4. Tests servers results to search with invalid cookie + 5. Add/delete an user entry to check the server is up and running + :expectedresults: + 1. Should succeeds + 2. Should succeeds + 3. Should succeeds + 4. Should succeeds + 5. Should succeeds + """ + + # Reset the instance in a default config + # Disable content sync plugin + topology.standalone.restart() + topology.standalone.plugins.disable(name=PLUGIN_REPL_SYNC) + + # Disable retro changelog + topology.standalone.plugins.disable(name=PLUGIN_RETRO_CHANGELOG) + + # Disable dynamic plugins + topology.standalone.modify_s(DN_CONFIG, [(ldap.MOD_REPLACE, 'nsslapd-dynamic-plugins', b'off')]) + topology.standalone.restart() + + # Enable retro changelog + topology.standalone.plugins.enable(name=PLUGIN_RETRO_CHANGELOG) + + # Enbale content sync plugin + topology.standalone.plugins.enable(name=PLUGIN_REPL_SYNC) + topology.standalone.restart() + + # Setup the syncer + sync = ISyncRepl(topology.standalone) + + # Test invalid cookies + cookies = ('#', '##', 'a#a#a', 'a#a#1', 'foo') + for invalid_cookie in cookies: + log.info('Testing cookie: %s' % invalid_cookie) + try: + ldap_search = sync.syncrepl_search(base=DEFAULT_SUFFIX, + scope=ldap.SCOPE_SUBTREE, + attrlist=['objectclass', 'cn', 'homedirectory', 'sn','uid'], + filterstr='(|(objectClass=groupofnames)(objectClass=person))', + mode='refreshOnly', + cookie=invalid_cookie) + poll_result = sync.syncrepl_poll(all=1) + + log.fatal('Invalid cookie accepted!') + assert False + except Exception as e: + log.info('Invalid cookie correctly rejected: {}'.format(e.args[0]['info'])) + pass + + # check that the server is still up and running + users = UserAccounts(topology.standalone, DEFAULT_SUFFIX) + user = users.create_test_user(uid=1000) + + # Success + log.info('Test complete') + + def fin(): + topology.standalone.restart() + try: + user.delete() + except: + pass + + request.addfinalizer(fin) diff --git a/dirsrvtests/tests/suites/syncrepl_plugin/openldap_test.py b/dirsrvtests/tests/suites/syncrepl_plugin/openldap_test.py new file mode 100644 index 0000000..25a5ed3 --- /dev/null +++ b/dirsrvtests/tests/suites/syncrepl_plugin/openldap_test.py @@ -0,0 +1,70 @@ +# Copyright (C) 2020 William Brown +# All rights reserved. +# +# License: GPL (version 3 or any later version). +# See LICENSE for details. +# --- END COPYRIGHT BLOCK --- + +import logging +import ldap +import time +import pytest +from lib389.topologies import topology_st as topology +from lib389.paths import Paths +from lib389.utils import ds_is_older +from lib389.plugins import RetroChangelogPlugin, ContentSyncPlugin +from lib389._constants import ErrorLog, DEFAULT_SUFFIX +from lib389.plugins import EntryUUIDPlugin + +from . import ISyncRepl, syncstate_assert + +default_paths = Paths() +pytestmark = pytest.mark.tier1 + +log = logging.getLogger(__name__) + +@pytest.mark.skipif(ldap.__version__ < '3.3.1' or not default_paths.rust_enabled or ds_is_older('1.4.4.0'), + reason="Sync repl does not support openldap compat in older versions, and without entryuuid") +def test_syncrepl_openldap(topology): + """ Test basic functionality of the openldap syncrepl + compatability handler. + + :id: 03039178-2cc6-40bd-b32c-7d6de108828b + + :setup: Standalone instance + + :steps: + 1. Enable Retro Changelog + 2. Enable Syncrepl + 3. Run the syncstate test to check refresh, add, delete, mod. + + :expectedresults: + 1. Success + 1. Success + 1. Success + """ + st = topology.standalone + # Ensure entryuuid is setup + plug = EntryUUIDPlugin(st) + task = plug.fixup(DEFAULT_SUFFIX) + task.wait() + st.config.loglevel(vals=(ErrorLog.DEFAULT,ErrorLog.PLUGIN)) + assert(task.is_complete() and task.get_exit_code() == 0) + + # Enable RetroChangelog. + rcl = RetroChangelogPlugin(st) + rcl.enable() + # Set the default targetid + rcl.add('nsslapd-attribute', 'nsuniqueid:targetUniqueId') + rcl.add('nsslapd-attribute', 'entryuuid:targetEntryUUID') + # Enable sync repl + csp = ContentSyncPlugin(st) + csp.add('syncrepl-allow-openldap', 'on') + csp.enable() + # Restart DS + st.restart() + # Setup the syncer + sync = ISyncRepl(st, openldap=True) + # Run the checks + syncstate_assert(st, sync) + diff --git a/dirsrvtests/tests/suites/syntax/__init__.py b/dirsrvtests/tests/suites/syntax/__init__.py new file mode 100644 index 0000000..c083413 --- /dev/null +++ b/dirsrvtests/tests/suites/syntax/__init__.py @@ -0,0 +1,3 @@ +""" + :Requirement: 389-ds-base: Syntax +""" diff --git a/dirsrvtests/tests/suites/syntax/acceptance_test.py b/dirsrvtests/tests/suites/syntax/acceptance_test.py new file mode 100644 index 0000000..8079368 --- /dev/null +++ b/dirsrvtests/tests/suites/syntax/acceptance_test.py @@ -0,0 +1,248 @@ +# --- BEGIN COPYRIGHT BLOCK --- +# Copyright (C) 2023 Red Hat, Inc. +# All rights reserved. +# +# License: GPL (version 3 or any later version). +# See LICENSE for details. +# --- END COPYRIGHT BLOCK --- + +import ldap +import pytest +import os +from lib389.schema import Schema +from lib389.config import Config +from lib389.idm.user import UserAccounts +from lib389.idm.group import Group, Groups +from lib389._constants import DEFAULT_SUFFIX +from lib389.topologies import log, topology_st as topo + +pytestmark = pytest.mark.tier0 + +log = log.getChild(__name__) + + +@pytest.fixture(scope="function") +def validate_syntax_off(topo, request): + config = Config(topo.standalone) + config.replace("nsslapd-syntaxcheck", "off") + + def fin(): + config.replace("nsslapd-syntaxcheck", "on") + request.addfinalizer(fin) + + +def test_valid(topo, validate_syntax_off): + """Test syntax-validate task with valid entries + + :id: ec402a5b-bfb1-494d-b751-71b0d31a4d83 + :setup: Standalone instance + :steps: + 1. Set nsslapd-syntaxcheck to off + 2. Clean error log + 3. Run syntax validate task + 4. Assert that there are no errors in the error log + 5. Set nsslapd-syntaxcheck to on + :expectedresults: + 1. It should succeed + 2. It should succeed + 3. It should succeed + 4. It should succeed + 5. It should succeed + """ + + inst = topo.standalone + + log.info('Clean the error log') + inst.deleteErrorLogs() + + schema = Schema(inst) + log.info('Attempting to add task entry...') + validate_task = schema.validate_syntax(DEFAULT_SUFFIX) + validate_task.wait() + exitcode = validate_task.get_exit_code() + assert exitcode == 0 + error_lines = inst.ds_error_log.match('.*Found 0 invalid entries.*') + assert (len(error_lines) == 1) + log.info('Found 0 invalid entries - Success') + + +def test_invalid_uidnumber(topo, validate_syntax_off): + """Test syntax-validate task with invalid uidNumber attribute value + + :id: 30fdcae6-ffa6-4ec4-8da9-6fb138fc1828 + :setup: Standalone instance + :steps: + 1. Set nsslapd-syntaxcheck to off + 2. Clean error log + 3. Add a user with uidNumber attribute set to an invalid value (string) + 4. Run syntax validate task + 5. Assert that there is corresponding error in the error log + 6. Set nsslapd-syntaxcheck to on + :expectedresults: + 1. It should succeed + 2. It should succeed + 3. It should succeed + 4. It should succeed + 5. It should succeed + 6. It should succeed + """ + + inst = topo.standalone + + log.info('Clean the error log') + inst.deleteErrorLogs() + + users = UserAccounts(inst, DEFAULT_SUFFIX) + users.create_test_user(uid="invalid_value") + + schema = Schema(inst) + log.info('Attempting to add task entry...') + validate_task = schema.validate_syntax(DEFAULT_SUFFIX) + validate_task.wait() + exitcode = validate_task.get_exit_code() + assert exitcode == 0 + error_lines = inst.ds_error_log.match('.*uidNumber: value #0 invalid per syntax.*') + assert (len(error_lines) == 1) + log.info('Found an invalid entry with wrong uidNumber - Success') + + +def test_invalid_dn_syntax_crash(topo): + """Add an entry with an escaped space, restart the server, and try to delete + it. In this case the DN is not correctly parsed and causes cache revert to + to dereference a NULL pointer. So the delete can fail as long as the server + does not crash. + + :id: 62d87272-dfb8-4627-9ca1-dbe33082caf8 + :setup: Standalone Instance + :steps: + 1. Add entry with leading escaped space in the RDN + 2. Restart the server so the entry is rebuilt from the database + 3. Delete the entry + 4. The server should still be running + :expectedresults: + 1. Success + 2. Success + 3. Success + 4. Success + """ + + # Create group + groups = Groups(topo.standalone, DEFAULT_SUFFIX) + group = groups.create(properties={'cn': ' test'}) + + # Restart the server + topo.standalone.restart() + + # Delete group + try: + group.delete() + except ldap.NO_SUCH_OBJECT: + # This is okay in this case as we are only concerned about a crash + pass + + # Make sure server is still running + groups.list() + + +@pytest.mark.parametrize("props, rawdn", [ + ({'cn': ' leadingSpace'}, "cn=\\20leadingSpace,ou=Groups,dc=example,dc=com"), + ({'cn': 'trailingSpace '}, "cn=trailingSpace\\20,ou=Groups,dc=example,dc=com")]) +def test_dn_syntax_spaces_delete(topo, props, rawdn): + """Test that an entry with a space as the first character in the DN can be + deleted without error. We also want to make sure the indexes are properly + updated by repeatedly adding and deleting the entry, and that the entry cache + is properly maintained. + + :id: b993f37c-c2b0-4312-992c-a9048ff98965 + :customerscenario: True + :parametrized: yes + :setup: Standalone Instance + :steps: + 1. Create a group with a DN that has a space as the first/last + character. + 2. Delete group + 3. Add group + 4. Modify group + 5. Restart server and modify entry + 6. Delete group + 7. Add group back + 8. Delete group using specific DN + :expectedresults: + 1. Success + 2. Success + 3. Success + 4. Success + 5. Success + 6. Success + 7. Success + 8. Success + """ + + # Create group + groups = Groups(topo.standalone, DEFAULT_SUFFIX) + group = groups.create(properties=props.copy()) + + # Delete group (verifies DN/RDN parsing works and cache is correct) + group.delete() + + # Add group again (verifies entryrdn index was properly updated) + groups = Groups(topo.standalone, DEFAULT_SUFFIX) + group = groups.create(properties=props.copy()) + + # Modify the group (verifies dn/rdn parsing is correct) + group.replace('description', 'escaped space group') + + # Restart the server. This will pull the entry from the database and + # convert it into a cache entry, which is different than how a client + # first adds an entry and is put into the cache before being written to + # disk. + topo.standalone.restart() + + # Make sure we can modify the entry (verifies cache entry was created + # correctly) + group.replace('description', 'escaped space group after restart') + + # Make sure it can still be deleted (verifies cache again). + group.delete() + + # Add it back so we can delete it using a specific DN (sanity test to verify + # another DN/RDN parsing variation). + groups = Groups(topo.standalone, DEFAULT_SUFFIX) + group = groups.create(properties=props.copy()) + group = Group(topo.standalone, dn=rawdn) + group.delete() + + +def test_boolean_case(topo): + """Test that we can a boolean value in any case + + :id: 56777c1d-b058-41e1-abd5-87a6f1512db2 + :customerscenario: True + :setup: Standalone Instance + :steps: + 1. Create test user + 2. Add boolean attribute value that is lowercase "false" + :expectedresults: + 1. Success + 2. Success + """ + inst = topo.standalone + users = UserAccounts(inst, DEFAULT_SUFFIX) + user = users.create_test_user(uid=1011) + + user.add('objectclass', 'extensibleObject') + user.add('pamsecure', 'false') + user.replace('pamsecure', 'FALSE') + user.replace('pamsecure', 'true') + user.replace('pamsecure', 'TRUE') + + # Test some invalid syntax + with pytest.raises(ldap.INVALID_SYNTAX): + user.replace('pamsecure', 'blah') + + +if __name__ == '__main__': + # Run isolated + # -s for DEBUG mode + CURRENT_FILE = os.path.realpath(__file__) + pytest.main("-s %s" % CURRENT_FILE) diff --git a/dirsrvtests/tests/suites/syntax/mr_test.py b/dirsrvtests/tests/suites/syntax/mr_test.py new file mode 100644 index 0000000..a7fa5a9 --- /dev/null +++ b/dirsrvtests/tests/suites/syntax/mr_test.py @@ -0,0 +1,70 @@ +# --- BEGIN COPYRIGHT BLOCK --- +# Copyright (C) 2020 Red Hat, Inc. +# All rights reserved. +# +# License: GPL (version 3 or any later version). +# See LICENSE for details. +# --- END COPYRIGHT BLOCK --- +# + +import logging +import pytest +import os +import ldap +from lib389.dbgen import dbgen_users +from lib389._constants import * +from lib389.topologies import topology_st as topo +from lib389._controls import SSSRequestControl + +pytestmark = pytest.mark.tier1 + +DEBUGGING = os.getenv("DEBUGGING", default=False) +if DEBUGGING: + logging.getLogger(__name__).setLevel(logging.DEBUG) +else: + logging.getLogger(__name__).setLevel(logging.INFO) +log = logging.getLogger(__name__) + + +def test_sss_mr(topo): + """Test matching rule/server side sort does not crash DS + + :id: 48c73d76-1694-420f-ab55-187135f2d260 + :setup: Standalone Instance + :steps: + 1. Add sample entries to the database + 2. Perform search using server side control (uid:2.5.13.3) + :expectedresults: + 1. Success + 2. Success + """ + + log.info("Creating LDIF...") + ldif_dir = topo.standalone.get_ldif_dir() + ldif_file = os.path.join(ldif_dir, 'mr-crash.ldif') + dbgen_users(topo.standalone, 5, ldif_file, DEFAULT_SUFFIX) + + log.info("Importing LDIF...") + topo.standalone.stop() + assert topo.standalone.ldif2db(DEFAULT_BENAME, None, None, None, ldif_file) + topo.standalone.start() + + log.info('Search using server side sorting using undefined mr in the attr...') + sort_ctrl = SSSRequestControl(True, ['uid:2.5.13.3']) + controls = [sort_ctrl] + msg_id = topo.standalone.search_ext(DEFAULT_SUFFIX, ldap.SCOPE_SUBTREE, + "objectclass=*", serverctrls=controls) + try: + rtype, rdata, rmsgid, response_ctrl = topo.standalone.result3(msg_id) + except ldap.OPERATIONS_ERROR: + pass + + log.info("Test PASSED") + + +if __name__ == '__main__': + # Run isolated + # -s for DEBUG mode + CURRENT_FILE = os.path.realpath(__file__) + pytest.main(["-s", CURRENT_FILE]) + diff --git a/dirsrvtests/tests/suites/tls/__init__.py b/dirsrvtests/tests/suites/tls/__init__.py new file mode 100644 index 0000000..6846c00 --- /dev/null +++ b/dirsrvtests/tests/suites/tls/__init__.py @@ -0,0 +1,3 @@ +""" + :Requirement: 389-ds-base: Transport Layer Security +""" diff --git a/dirsrvtests/tests/suites/tls/cipher_test.py b/dirsrvtests/tests/suites/tls/cipher_test.py new file mode 100644 index 0000000..523e2bd --- /dev/null +++ b/dirsrvtests/tests/suites/tls/cipher_test.py @@ -0,0 +1,60 @@ +# --- BEGIN COPYRIGHT BLOCK --- +# Copyright (C) 2022 Red Hat, Inc. +# All rights reserved. +# +# License: GPL (version 3 or any later version). +# See LICENSE for details. +# --- END COPYRIGHT BLOCK --- +# +import pytest +import os +from lib389.config import Encryption +from lib389.topologies import topology_st as topo + +pytestmark = pytest.mark.tier1 + +def test_long_cipher_list(topo): + """Test a long cipher list, and makre sure it is not truncated + + :id: bc400f54-3966-49c8-b640-abbf4fb2377d + :setup: Standalone Instance + :steps: + 1. Set nsSSL3Ciphers to a very long list of ciphers + 2. Ciphers are applied correctly + :expectedresults: + 1. Success + 2. Success + """ + ENABLED_CIPHER = "TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384::AES-GCM::AEAD::256" + DISABLED_CIPHER = "TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256::AES-GCM::AEAD::128" + CIPHER_LIST = ( + "-all,-SSL_CK_RC4_128_WITH_MD5,-SSL_CK_RC4_128_EXPORT40_WITH_MD5,-SSL_CK_RC2_128_CBC_WITH_MD5," + "-SSL_CK_RC2_128_CBC_EXPORT40_WITH_MD5,-SSL_CK_DES_64_CBC_WITH_MD5,-SSL_CK_DES_192_EDE3_CBC_WITH_MD5," + "-TLS_RSA_WITH_RC4_128_MD5,-TLS_RSA_WITH_RC4_128_SHA,-TLS_RSA_WITH_3DES_EDE_CBC_SHA," + "-TLS_RSA_WITH_DES_CBC_SHA,-SSL_RSA_FIPS_WITH_3DES_EDE_CBC_SHA,-SSL_RSA_FIPS_WITH_DES_CBC_SHA," + "-TLS_RSA_EXPORT_WITH_RC4_40_MD5,-TLS_RSA_EXPORT_WITH_RC2_CBC_40_MD5,-TLS_RSA_WITH_NULL_MD5," + "-TLS_RSA_WITH_NULL_SHA,-TLS_RSA_EXPORT1024_WITH_DES_CBC_SHA,-SSL_FORTEZZA_DMS_WITH_FORTEZZA_CBC_SHA," + "-SSL_FORTEZZA_DMS_WITH_RC4_128_SHA,-SSL_FORTEZZA_DMS_WITH_NULL_SHA,-TLS_DHE_DSS_WITH_DES_CBC_SHA," + "-TLS_DHE_DSS_WITH_3DES_EDE_CBC_SHA,-TLS_DHE_RSA_WITH_DES_CBC_SHA,-TLS_DHE_RSA_WITH_3DES_EDE_CBC_SHA," + "+TLS_RSA_WITH_AES_128_CBC_SHA,-TLS_DHE_DSS_WITH_AES_128_CBC_SHA,-TLS_DHE_RSA_WITH_AES_128_CBC_SHA," + "+TLS_RSA_WITH_AES_256_CBC_SHA,-TLS_DHE_DSS_WITH_AES_256_CBC_SHA,-TLS_DHE_RSA_WITH_AES_256_CBC_SHA," + "-TLS_RSA_EXPORT1024_WITH_RC4_56_SHA,-TLS_DHE_DSS_WITH_RC4_128_SHA,-TLS_ECDHE_RSA_WITH_RC4_128_SHA," + "-TLS_RSA_WITH_NULL_SHA,-TLS_RSA_EXPORT1024_WITH_DES_CBC_SHA,-SSL_CK_DES_192_EDE3_CBC_WITH_MD5," + "-TLS_RSA_WITH_RC4_128_MD5,-TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,-TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA," + "-TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,+TLS_AES_128_GCM_SHA256,+TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384" + ) + + topo.standalone.enable_tls() + enc = Encryption(topo.standalone) + enc.set('nsSSL3Ciphers', CIPHER_LIST) + topo.standalone.restart() + enabled_ciphers = enc.get_attr_vals_utf8('nssslenabledciphers') + assert ENABLED_CIPHER in enabled_ciphers + assert DISABLED_CIPHER not in enabled_ciphers + + +if __name__ == '__main__': + # Run isolated + # -s for DEBUG mode + CURRENT_FILE = os.path.realpath(__file__) + pytest.main(["-s", CURRENT_FILE]) diff --git a/dirsrvtests/tests/suites/tls/ecdsa_test.py b/dirsrvtests/tests/suites/tls/ecdsa_test.py new file mode 100644 index 0000000..df88977 --- /dev/null +++ b/dirsrvtests/tests/suites/tls/ecdsa_test.py @@ -0,0 +1,214 @@ +# --- BEGIN COPYRIGHT BLOCK --- +# Copyright (C) 2022 Red Hat, Inc. +# All rights reserved. +# +# License: GPL (version 3 or any later version). +# See LICENSE for details. +# --- END COPYRIGHT BLOCK --- +# +import logging +import pytest +import os +import subprocess +from lib389.utils import ds_is_older +from lib389._constants import DN_DM, PW_DM +from lib389.topologies import topology_st as topo +from tempfile import TemporaryDirectory + +pytestmark = pytest.mark.tier1 + +DEBUGGING = os.getenv("DEBUGGING", default=False) +if DEBUGGING: + logging.getLogger(__name__).setLevel(logging.DEBUG) +else: + logging.getLogger(__name__).setLevel(logging.INFO) +log = logging.getLogger(__name__) + +script_content=""" +#!/bin/bash +set -e # Exit if a command fails +set -x # Log the commands + +cd {dir} +inst={instname} +url={url} +rootdn="{rootdn}" +rootpw="{rootpw}" + +################################ +###### GENERATE CA CERT ######## +################################ + +echo " +[ req ] +distinguished_name = req_distinguished_name +policy = policy_match +x509_extensions = v3_ca + +# For the CA policy +[ policy_match ] +countryName = optional +stateOrProvinceName = optional +organizationName = optional +organizationalUnitName = optional +commonName = supplied +emailAddress = optional + +[ req_distinguished_name ] +countryName = Country Name (2 letter code) +countryName_default = FR +countryName_min = 2 +countryName_max = 2 + +stateOrProvinceName = State or Province Name (full name) +stateOrProvinceName_default = test + +localityName = Locality Name (eg, city) + +0.organizationName = Organization Name (eg, company) +0.organizationName_default = test-ECDSA-CA + +organizationalUnitName = Organizational Unit Name (eg, section) +#organizationalUnitName_default = + +commonName = Common Name (e.g. server FQDN or YOUR name) +commonName_max = 64 + + +[ v3_ca ] +subjectKeyIdentifier = hash +authorityKeyIdentifier = keyid:always,issuer +basicConstraints = critical,CA:true +#nsComment = "OpenSSL Generated Certificate" +keyUsage=critical, keyCertSign +" >ca.conf + + +openssl ecparam -genkey -name prime256v1 -out ca.key +openssl req -x509 -new -sha256 -key ca.key -nodes -days 3650 -config ca.conf -subj "/CN=`hostname`/O=test-ECDSA-CA/C=FR" -out ca.pem -keyout ca.key +openssl x509 -outform der -in ca.pem -out ca.crt + +openssl x509 -text -in ca.pem + +#################################### +###### GENERATE SERVER CERT ######## +#################################### + +echo " +[ req ] +distinguished_name = req_distinguished_name +policy = policy_match +x509_extensions = v3_cert + +# For the cert policy +[ policy_match ] +countryName = optional +stateOrProvinceName = optional +organizationName = optional +organizationalUnitName = optional +commonName = supplied +emailAddress = optional + +[ req_distinguished_name ] +countryName = Country Name (2 letter code) +countryName_default = FR +countryName_min = 2 +countryName_max = 2 + +stateOrProvinceName = State or Province Name (full name) + +localityName = Locality Name (eg, city) + +0.organizationName = Organization Name (eg, company) +0.organizationName_default = test-ECDSA + +organizationalUnitName = Organizational Unit Name (eg, section) +#organizationalUnitName_default = + +commonName = Common Name (e.g. server FQDN or YOUR name) +commonName_max = 64 + + +[ v3_cert ] +basicConstraints = critical,CA:false +subjectAltName=DNS:`hostname` +keyUsage=digitalSignature, nonRepudiation, keyEncipherment, dataEncipherment +#nsComment = "OpenSSL Generated Certificate" +extendedKeyUsage=clientAuth, serverAuth +nsCertType=client, server +" >cert.conf + +openssl ecparam -genkey -name prime256v1 -out cert.key +openssl req -new -sha256 -key cert.key -nodes -config cert.conf -subj "/CN=`hostname`/O=test-ECDSA/C=FR" -out cert.csr +openssl x509 -req -sha256 -days 3650 -extensions v3_cert -extfile cert.conf -in cert.csr -CA ca.pem -CAkey ca.key -CAcreateserial -out cert.pem +openssl pkcs12 -export -inkey cert.key -in cert.pem -name ecdsacert -out cert.p12 -passout pass:secret12 + +openssl x509 -text -in cert.pem + + +############################# +###### INSTALL CERTS ######## +############################# + +certdbdir=$PREFIX/etc/dirsrv/slapd-$inst +rm -f $certdbdir/cert9.db $certdbdir/key4.db +certutil -N -d $certdbdir -f $certdbdir/pwdfile.txt + +certutil -A -n Self-Signed-CA -t CT,, -f $certdbdir/pwdfile.txt -d $certdbdir -a -i ca.pem + +dsctl $inst tls import-server-key-cert cert.pem cert.key + +dsctl $inst restart + + +######################### +###### TEST CERT ######## +######################### +LDAPTLS_CACERT=$PWD/ca.pem ldapsearch -x -H $url -D "$rootdn" -w "$rootpw" -b "" -s base +""" + + +def test_ecdsa(topo): + """Specify a test case purpose or name here + + :id: 7902f37c-01d3-11ed-b65c-482ae39447e5 + :setup: Standalone Instance + :steps: + 1. Generate the test script + 2. Run the test script + 3. Check that ldapsearch returned the namingcontext + :expectedresults: + 1. No error + 2. No error and exit code should be 0 + 3. namingcontext should be in the script output + """ + + inst=topo.standalone + inst.enable_tls() + with TemporaryDirectory() as dir: + scriptname = f"{dir}/doit" + scriptname = "/tmp/doit" + d = { + 'dir': dir, + 'instname': inst.serverid, + 'url': f"ldaps://localhost:{inst.sslport}", + 'rootdn': DN_DM, + 'rootpw': PW_DM, + } + with open(scriptname, 'w') as f: + f.write(script_content.format(**d)) + res = subprocess.run(('/bin/bash', scriptname), stdout=subprocess.PIPE, stderr=subprocess.STDOUT, encoding='utf-8') + assert res + log.info(res.stdout) + res.check_returncode() + # If ldapsearch is successful then defaultnamingcontext should be in res.stdout + assert "defaultnamingcontext" in res.stdout + + + + +if __name__ == '__main__': + # Run isolated + # -s for DEBUG mode + CURRENT_FILE = os.path.realpath(__file__) + pytest.main(["-s", CURRENT_FILE]) diff --git a/dirsrvtests/tests/suites/tls/ssl_version_test.py b/dirsrvtests/tests/suites/tls/ssl_version_test.py new file mode 100644 index 0000000..f0dde08 --- /dev/null +++ b/dirsrvtests/tests/suites/tls/ssl_version_test.py @@ -0,0 +1,89 @@ +# --- BEGIN COPYRIGHT BLOCK --- +# Copyright (C) 2022 Red Hat, Inc. +# All rights reserved. +# +# License: GPL (version 3 or any later version). +# See LICENSE for details. +# --- END COPYRIGHT BLOCK --- +# +import logging +import pytest +import os +from lib389.config import Encryption +from lib389.utils import ds_is_older +from lib389.topologies import topology_st as topo + +pytestmark = pytest.mark.tier1 + +DEBUGGING = os.getenv("DEBUGGING", default=False) +if DEBUGGING: + logging.getLogger(__name__).setLevel(logging.DEBUG) +else: + logging.getLogger(__name__).setLevel(logging.INFO) +log = logging.getLogger(__name__) + + +def test_ssl_version_range(topo): + """Specify a test case purpose or name here + + :id: bc400f54-3966-49c8-b640-abbf4fb2377e + :customerscenario: True + 1. Get current default range + 2. Set sslVersionMin and verify it is applied after a restart + 3. Set sslVersionMax and verify it is applied after a restart + 4. Sanity test all the min/max versions + :expectedresults: + 1. Success + 2. Success + 3. Success + 4. Success + """ + + topo.standalone.enable_tls() + enc = Encryption(topo.standalone) + default_min = enc.get_attr_val_utf8('sslVersionMin') + default_max = enc.get_attr_val_utf8('sslVersionMax') + log.info(f"default min: {default_min} max: {default_max}") + if DEBUGGING: + topo.standalone.config.set('nsslapd-auditlog-logging-enabled', 'on') + + # Test that setting the min version is applied after a restart + enc.replace('sslVersionMin', default_max) + enc.replace('sslVersionMax', default_max) + topo.standalone.restart() + min = enc.get_attr_val_utf8('sslVersionMin') + assert min == default_max + + # Test that setting the max version is applied after a restart + enc.replace('sslVersionMin', default_min) + enc.replace('sslVersionMax', default_min) + topo.standalone.restart() + max = enc.get_attr_val_utf8('sslVersionMax') + assert max == default_min + + # 389-ds-base-1.4.3 == Fedora 32, 389-ds-base-1.4.4 == Fedora 33 + # Starting from Fedora 33, cryptographic protocols (TLS 1.0 and TLS 1.1) were moved to LEGACY + # So we should not check for the policies with our DEFAULT crypro setup + # https://fedoraproject.org/wiki/Changes/StrongCryptoSettings2 + if ds_is_older('1.4.4'): + ssl_versions = [('sslVersionMin', ['TLS1.0', 'TLS1.1', 'TLS1.2', 'TLS1.0']), + ('sslVersionMax', ['TLS1.0', 'TLS1.1', 'TLS1.2'])] + else: + ssl_versions = [('sslVersionMin', ['TLS1.2']), + ('sslVersionMax', ['TLS1.2', 'TLS1.3'])] + + # Sanity test all the min/max versions + for attr, versions in ssl_versions: + for version in versions: + # Test that the setting is correctly applied after a restart + enc.replace(attr, version) + topo.standalone.restart() + current_val = enc.get_attr_val_utf8(attr) + assert current_val == version + + +if __name__ == '__main__': + # Run isolated + # -s for DEBUG mode + CURRENT_FILE = os.path.realpath(__file__) + pytest.main(["-s", CURRENT_FILE]) diff --git a/dirsrvtests/tests/suites/tls/tls_cert_namespace_test.py b/dirsrvtests/tests/suites/tls/tls_cert_namespace_test.py new file mode 100644 index 0000000..cee3686 --- /dev/null +++ b/dirsrvtests/tests/suites/tls/tls_cert_namespace_test.py @@ -0,0 +1,133 @@ +# --- BEGIN COPYRIGHT BLOCK --- +# Copyright (C) 2020 Red Hat, Inc. +# All rights reserved. +# +# License: GPL (version 3 or any later version). +# See LICENSE for details. +# --- END COPYRIGHT BLOCK --- +# +import time +import subprocess +import pytest + +from glob import glob +from lib389.utils import * +from lib389.topologies import topology_st +from lib389.paths import Paths + +pytestmark = pytest.mark.tier1 + +logging.getLogger(__name__).setLevel(logging.DEBUG) +log = logging.getLogger(__name__) + +p = Paths() + + +@pytest.mark.ds50889 +@pytest.mark.bz1638875 +@pytest.mark.skipif(p.with_systemd == False, reason='Will not run without systemd') +@pytest.mark.skipif(ds_is_older("1.4.3"), reason="Not implemented") +def test_pem_cert_in_private_namespace(topology_st): + """Test if certificates are present in private /tmp namespace + + :id: 01bc27d0-6368-496a-9724-7fe1e8fb239b + :customerscenario: True + :setup: Standalone instance + :steps: + 1. Create DS instance + 2. Enable TLS + 3. Check if value of PrivateTmp == yes + 4. Check if pem certificates are present in private /tmp + 5. Check if pem certificates are not present in /etc/dirsrv/instance + :expectedresults: + 1. Success + 2. Success + 3. Success + 4. Success + 5. Success + """ + + PEM_CHECK = ['Self-Signed-CA.pem', 'Server-Cert-Key.pem', 'Server-Cert.pem'] + PRIVATE_TMP = 'PrivateTmp=yes' + + standalone = topology_st.standalone + + log.info('Enable TLS') + standalone.enable_tls() + + log.info('Checking PrivateTmp value') + cmdline = ['systemctl', 'show', '-p', 'PrivateTmp', 'dirsrv@{}.service'.format(standalone.serverid)] + log.info('Command used : %s' % format_cmd_list(cmdline)) + result = subprocess.check_output(cmdline) + assert PRIVATE_TMP in ensure_str(result) + + log.info('Check files in private /tmp') + cert_path = glob('/tmp/systemd-private-*-dirsrv@{}.service-*/tmp/slapd-{}/'.format(standalone.serverid, + standalone.serverid)) + assert os.path.exists(cert_path[0]) + for item in PEM_CHECK: + log.info('Check that {} is present in private /tmp'.format(item)) + assert os.path.exists(cert_path[0] + item) + + log.info('Check instance cert directory') + cert_path = '/etc/dirsrv/slapd-{}/'.format(standalone.serverid) + assert os.path.exists(cert_path) + for item in PEM_CHECK: + log.info('Check that {} is not present in /etc/dirsrv/slapd-{}/ directory'.format(item, standalone.serverid)) + assert not os.path.exists(cert_path + item) + + +@pytest.mark.ds50952 +@pytest.mark.bz1809279 +@pytest.mark.xfail(ds_is_older("1.4.3"), reason="Might fail because of bz1809279") +@pytest.mark.skipif(ds_is_older("1.4.0"), reason="Not implemented") +def test_cert_category_authority(topology_st): + """Test that certificate generated by instance has category: authority + + :id: b7e816e9-2786-4d76-9c5b-bb111b0870f2 + :setup: Standalone instance + :steps: + 1. Create DS instance + 2. Enable TLS + 3. Check if Self-Signed-CA.pem is present + 4. Trust the certificate + 5. Search if the certificate has category: authority + :expectedresults: + 1. Success + 2. Success + 3. Success + 4. Success + 5. Success + """ + + PEM_FILE = 'Self-Signed-CA.pem' + + standalone = topology_st.standalone + + log.info('Enable TLS') + standalone.enable_tls() + + log.info('Get certificate path') + if ds_is_older('1.4.3'): + cert_path = glob('/etc/dirsrv/slapd-{}/'.format(standalone.serverid)) + else: + cert_path = glob('/tmp/systemd-private-*-dirsrv@{}.service-*/tmp/slapd-{}/'.format(standalone.serverid, + standalone.serverid)) + log.info('Check that {} is present'.format(PEM_FILE)) + signed_cert = cert_path[0] + PEM_FILE + assert os.path.exists(signed_cert) + + log.info('Trust the certificate') + subprocess.check_output(['trust', 'anchor', signed_cert]) + + log.info('Search if our certificate has category: authority') + result = subprocess.check_output(['trust', 'list']) + assert re.search(r'^(.*)label: ssca[.]389ds[.]example[.]com\n(.*).*\n.*category: authority$', ensure_str(result), + re.MULTILINE) + + +if __name__ == '__main__': + # Run isolated + # -s for DEBUG mode + CURRENT_FILE = os.path.realpath(__file__) + pytest.main("-s %s" % CURRENT_FILE) \ No newline at end of file diff --git a/dirsrvtests/tests/suites/tls/tls_check_crl_test.py b/dirsrvtests/tests/suites/tls/tls_check_crl_test.py new file mode 100644 index 0000000..eb55985 --- /dev/null +++ b/dirsrvtests/tests/suites/tls/tls_check_crl_test.py @@ -0,0 +1,54 @@ +# --- BEGIN COPYRIGHT BLOCK --- +# Copyright (C) 2018 Red Hat, Inc. +# All rights reserved. +# +# License: GPL (version 3 or any later version). +# See LICENSE for details. +# --- END COPYRIGHT BLOCK --- +# + + +import pytest +import ldap +from lib389.topologies import topology_st + +pytestmark = pytest.mark.tier1 + +def test_tls_check_crl(topology_st): + """Test that TLS check_crl configurations work as expected. + + :id: 9dfc6c62-dcae-44a9-83e8-b15c8e61c609 + :steps: + 1. Enable TLS + 2. Set invalid value + 3. Set valid values + 4. Check config reset + :expectedresults: + 1. TlS is setup + 2. The invalid value is rejected + 3. The valid values are used + 4. The value can be reset + """ + standalone = topology_st.standalone + # Enable TLS + standalone.enable_tls() + # Check all the valid values. + assert(standalone.config.get_attr_val_utf8('nsslapd-tls-check-crl') == 'none') + with pytest.raises(ldap.OPERATIONS_ERROR): + standalone.config.set('nsslapd-tls-check-crl', 'tnhoeutnoeutn') + assert(standalone.config.get_attr_val_utf8('nsslapd-tls-check-crl') == 'none') + + standalone.config.set('nsslapd-tls-check-crl', 'peer') + assert(standalone.config.get_attr_val_utf8('nsslapd-tls-check-crl') == 'peer') + + standalone.config.set('nsslapd-tls-check-crl', 'none') + assert(standalone.config.get_attr_val_utf8('nsslapd-tls-check-crl') == 'none') + + standalone.config.set('nsslapd-tls-check-crl', 'all') + assert(standalone.config.get_attr_val_utf8('nsslapd-tls-check-crl') == 'all') + + standalone.config.remove_all('nsslapd-tls-check-crl') + assert(standalone.config.get_attr_val_utf8('nsslapd-tls-check-crl') == 'none') + + + diff --git a/dirsrvtests/tests/suites/tls/tls_import_ca_chain_test.py b/dirsrvtests/tests/suites/tls/tls_import_ca_chain_test.py new file mode 100644 index 0000000..0be3a9a --- /dev/null +++ b/dirsrvtests/tests/suites/tls/tls_import_ca_chain_test.py @@ -0,0 +1,67 @@ +# --- BEGIN COPYRIGHT BLOCK --- +# Copyright (C) 2022, William Brown +# All rights reserved. +# +# License: GPL (version 3 or any later version). +# See LICENSE for details. +# --- END COPYRIGHT BLOCK --- +# + +import pytest +import ldap +import os + +from lib389.nss_ssl import NssSsl +from lib389.topologies import topology_st + +pytestmark = pytest.mark.tier1 + +CA_CHAIN_FILE = os.path.join(os.path.dirname(__file__), '../../data/tls/tls_import_ca_chain.pem') +CRT_CHAIN_FILE = os.path.join(os.path.dirname(__file__), '../../data/tls/tls_import_crt_chain.pem') +KEY_CHAIN_FILE = os.path.join(os.path.dirname(__file__), '../../data/tls/tls_import_key_chain.pem') +KEY_FILE = os.path.join(os.path.dirname(__file__), '../../data/tls/tls_import_key.pem') + +def test_tls_import_chain(topology_st): + """Test that TLS import will correct report errors when there are multiple + files in a chain. + + :id: b7ba71bd-112a-44a1-8a7e-8968249da419 + + :steps: + 1. Attempt to import a ca chain + + :expectedresults: + 1. The chain is rejected + """ + topology_st.standalone.stop() + tls = NssSsl(dirsrv=topology_st.standalone) + tls.reinit() + + with pytest.raises(ValueError): + tls.add_cert(nickname='CA_CHAIN_1', input_file=CA_CHAIN_FILE) + + with pytest.raises(ValueError): + tls.import_rsa_crt(crt=CRT_CHAIN_FILE) + with pytest.raises(ValueError): + tls.import_rsa_crt(ca=CA_CHAIN_FILE) + +def test_tls_import_chain_pk12util(topology_st): + """Test that importing certificate chain files via pk12util does not report + any errors + + :id: c38b2cf9-93f0-4168-ab23-c74ac21ad59f + + :steps: + 1. Attempt to import a ca chain + + :expectedresults: + 1. Chain is successfully imported, no errors raised + """ + + topology_st.standalone.stop() + tls = NssSsl(dirsrv=topology_st.standalone) + tls.reinit() + + tls.add_server_key_and_cert(KEY_FILE, CRT_CHAIN_FILE) + tls.add_server_key_and_cert(KEY_CHAIN_FILE, CRT_CHAIN_FILE) + tls.add_server_key_and_cert(KEY_FILE, KEY_CHAIN_FILE) diff --git a/dirsrvtests/tests/suites/tls/tls_ldaps_only_test.py b/dirsrvtests/tests/suites/tls/tls_ldaps_only_test.py new file mode 100644 index 0000000..4bb5989 --- /dev/null +++ b/dirsrvtests/tests/suites/tls/tls_ldaps_only_test.py @@ -0,0 +1,46 @@ +# --- BEGIN COPYRIGHT BLOCK --- +# Copyright (C) 2020 William Brown 0 + entries = M2.search_s(DEFAULT_SUFFIX, ldap.SCOPE_SUBTREE, "(objectclass=*)") + entries = M2.search_s(DEFAULT_SUFFIX, ldap.SCOPE_SUBTREE, "(objectclass=*)") + + +def test_vlv_recreation_reindex(topology_st): + """Test VLV recreation and reindexing. + + :id: 29f4567f-4ac0-410f-bc99-a32e217a939f + :setup: Standalone instance. + :steps: + 1. Create new VLVs and do the reindex. + 2. Test the new VLVs. + 3. Remove the existing VLVs. + 4. Create new VLVs (with the same name). + 5. Perform online re-indexing of the new VLVs. + 6. Test the new VLVs. + :expectedresults: + 1. Should Success. + 2. Should Success. + 3. Should Success. + 4. Should Success. + 5. Should Success. + 6. Should Success. + """ + + inst = topology_st.standalone + reindex_task = Tasks(inst) + + # Create and test VLVs + vlv_search, vlv_index = create_vlv_search_and_index(inst) + assert reindex_task.reindex( + suffix=DEFAULT_SUFFIX, + attrname=vlv_index.rdn, + args={TASK_WAIT: True}, + vlv=True + ) == 0 + + add_users(inst, 5000) + + conn = open_new_ldapi_conn(inst.serverid) + assert len(conn.search_s(DEFAULT_SUFFIX, ldap.SCOPE_SUBTREE, "(cn=*)")) > 0 + check_vlv_search(conn) + + # Remove and recreate VLVs + vlv_index.delete() + vlv_search.delete() + + vlv_search, vlv_index = create_vlv_search_and_index(inst) + assert reindex_task.reindex( + suffix=DEFAULT_SUFFIX, + attrname=vlv_index.rdn, + args={TASK_WAIT: True}, + vlv=True + ) == 0 + + conn = open_new_ldapi_conn(inst.serverid) + assert len(conn.search_s(DEFAULT_SUFFIX, ldap.SCOPE_SUBTREE, "(cn=*)")) > 0 + check_vlv_search(conn) + + +if __name__ == "__main__": + # Run isolated + # -s for DEBUG mode + CURRENT_FILE = os.path.realpath(__file__) + pytest.main("-s %s" % CURRENT_FILE) diff --git a/dirsrvtests/tests/suites/webui/README b/dirsrvtests/tests/suites/webui/README new file mode 100644 index 0000000..2481a21 --- /dev/null +++ b/dirsrvtests/tests/suites/webui/README @@ -0,0 +1 @@ +To run locally you need to set WEBUI=1 and PASSWD= env variables. You also need to remove "root" from /etc/cockpit/disallowed-users diff --git a/dirsrvtests/tests/suites/webui/__init__.py b/dirsrvtests/tests/suites/webui/__init__.py new file mode 100644 index 0000000..f92b756 --- /dev/null +++ b/dirsrvtests/tests/suites/webui/__init__.py @@ -0,0 +1,218 @@ +# --- BEGIN COPYRIGHT BLOCK --- +# Copyright (C) 2023 Red Hat, Inc. +# All rights reserved. +# +# License: GPL (version 3 or any later version). +# See LICENSE for details. +# --- END COPYRIGHT BLOCK --- +# + +import pytest +import distro +import os +import time + +from lib389.utils import * +from lib389.topologies import topology_st + +pytest.importorskip('playwright') + +RHEL = 'Red Hat Enterprise Linux' + + +# in some cockpit versions the selectors got renamed, these functions help to check the versions +def check_cockpit_version_is_higher(version): + f = os.popen("rpm -q --queryformat '%{VERSION}' cockpit") + installed_version = f.readline() + + return installed_version >= version + + +def check_cockpit_version_is_lower(version): + f = os.popen("rpm -q --queryformat '%{VERSION}' cockpit") + installed_version = f.readline() + + return installed_version <= version + + +# the iframe selection differs for chromium and firefox browser +def determine_frame_selection(page, browser_name): + if browser_name == 'firefox': + frame = page.query_selector('iframe[name=\"cockpit1:localhost/389-console\"]').content_frame() + else: + frame = page.frame('cockpit1:localhost/389-console') + + return frame + + +# sometimes on a slow machine the iframe is not loaded yet, so we check for it until +# it is available or the timeout is exhausted +def check_frame_assignment(page, browser_name): + timeout = 80 + count = 0 + frame = determine_frame_selection(page, browser_name) + + while (frame is None) and (count != timeout): + log.info('Waiting 0.5 seconds for iframe availability') + time.sleep(0.5) + count += 0.5 + frame = determine_frame_selection(page, browser_name) + + return frame + + +def remove_instance_through_lib(topology): + log.info('Check and remove instance before starting tests') + if topology.standalone.exists(): + topology.standalone.delete() + time.sleep(1) + + +def remove_instance_through_webui(topology, page, browser_name): + frame = check_frame_assignment(page, browser_name) + + log.info('Check if instance exist') + if topology.standalone.exists(): + log.info('Delete instance') + frame.wait_for_selector('#ds-action') + frame.click('#ds-action') + frame.click('#remove-ds') + frame.check('#modalChecked') + frame.click('//button[normalize-space(.)=\'Remove Instance\']') + frame = check_frame_assignment(page, browser_name) + frame.is_visible("#no-inst-create-btn") + time.sleep(1) + log.info('Instance deleted') + + +def setup_login(page): + password = ensure_str(os.getenv('PASSWD')) + page.set_viewport_size({"width": 1920, "height": 1080}) + + # increase default timeout to wait enough time on a slow machine for selector availability + # (it will wait just enough time for the selector to be available, + # it won't stop for 80 000 miliseconds each time it is called) + page.set_default_timeout(65000) + + page.goto("http://localhost:9090/") + + # We are at login page + page.fill('#login-user-input', 'root') + page.fill('#login-password-input', password) + page.click("#login-button") + time.sleep(2) + + if RHEL in distro.linux_distribution(): + page.wait_for_selector('text=Red Hat Directory Server') + page.click('text=Red Hat Directory Server') + else: + page.wait_for_selector('text=389 Directory Server') + page.click('text=389 Directory Server') + + +@pytest.fixture(scope="function") +def setup_page(topology_st, page, browser_name, request): + # remove instance if it exists before starting tests + remove_instance_through_lib(topology_st) + setup_login(page) + + def fin(): + remove_instance_through_webui(topology_st, page, browser_name) + + request.addfinalizer(fin) + + +def enable_replication(frame): + log.info('Check if replication is enabled, if not enable it in order to proceed further with test.') + frame.get_by_role('tab', name='Replication').click() + time.sleep(2) + if frame.get_by_role('button', name='Enable Replication').is_visible(): + frame.get_by_role('button', name='Enable Replication').click() + frame.fill('#enableBindPW', 'redhat') + frame.fill('#enableBindPWConfirm', 'redhat') + frame.get_by_role("dialog", name="Enable Replication").get_by_role("button", + name="Enable Replication").click() + frame.get_by_role('button', name='Add Replication Manager').wait_for() + assert frame.get_by_role('button', name='Add Replication Manager').is_visible() + +def load_ldap_browser_tab(frame): + frame.get_by_role('tab', name='LDAP Browser', exact=True).click() + frame.get_by_role('button').filter(has_text='dc=example,dc=com').click() + frame.get_by_role('columnheader', name='Attribute').wait_for() + time.sleep(1) + + +def prepare_page_for_entry(frame, entry_type): + frame.get_by_role("tabpanel", name="Tree View").get_by_role("button", name="Actions").click() + frame.get_by_role("menuitem", name="New ...").click() + frame.get_by_label(f"Create a new {entry_type}").check() + frame.get_by_role("button", name="Next").click() + + +def finish_entry_creation(frame, entry_type, entry_data): + frame.get_by_role("button", name="Next").click() + if entry_type == "User": + frame.get_by_role("contentinfo").get_by_role("button", name="Create User").click() + elif entry_type == "custom Entry": + frame.get_by_role("button", name="Create Entry").click() + else: + frame.get_by_role("button", name="Create", exact=True).click() + frame.get_by_role("button", name="Finish").click() + frame.get_by_role("button").filter(has_text=entry_data['suffixTreeEntry']).wait_for() + + +def create_entry(frame, entry_type, entry_data): + prepare_page_for_entry(frame, entry_type) + + if entry_type == 'User': + frame.get_by_role("button", name="Options menu").click() + frame.get_by_role("option", name="Posix Account").click() + frame.get_by_role("button", name="Next", exact=True).click() + frame.get_by_role("button", name="Next", exact=True).click() + + for row, value in enumerate(entry_data.values()): + if row > 5: + break + frame.get_by_role("button", name=f"Place row {row} in edit mode").click() + frame.get_by_role("textbox", name="_").fill(value) + frame.get_by_role("button", name=f"Save row edits for row {row}").click() + + elif entry_type == 'Group': + frame.get_by_role("button", name="Next").click() + frame.locator("#groupName").fill(entry_data["group_name"]) + frame.get_by_role("button", name="Next").click() + + elif entry_type == 'Organizational Unit': + frame.get_by_role("button", name="Next", exact=True).click() + frame.get_by_role("button", name="Place row 0 in edit mode").click() + frame.get_by_role("textbox", name="_").fill(entry_data['ou_name']) + frame.get_by_role("button", name="Save row edits for row 0").click() + + elif entry_type == 'Role': + frame.locator("#namingVal").fill(entry_data['role_name']) + frame.get_by_role("button", name="Next").click() + frame.get_by_role("button", name="Next", exact=True).click() + + elif entry_type == 'custom Entry': + frame.get_by_role("checkbox", name="Select row 0").check() + frame.get_by_role("button", name="Next", exact=True).click() + frame.get_by_role("checkbox", name="Select row 1").check() + frame.get_by_role("button", name="Next", exact=True).click() + frame.get_by_role("button", name="Place row 0 in edit mode").click() + frame.get_by_role("textbox", name="_").fill(entry_data['uid']) + frame.get_by_role("button", name="Save row edits for row 0").click() + frame.get_by_role("button", name="Place row 1 in edit mode").click() + frame.get_by_role("textbox", name="_").fill(entry_data['entry_name']) + frame.get_by_role("button", name="Save row edits for row 1").click() + + finish_entry_creation(frame, entry_type, entry_data) + +def delete_entry(frame): + frame.get_by_role("tabpanel", name="Tree View").get_by_role("button", name="Actions").click() + frame.get_by_role("menuitem", name="Delete ...").click() + frame.get_by_role("button", name="Next").click() + frame.get_by_role("button", name="Next").click() + frame.get_by_text("No, don't delete.").click() + frame.get_by_role("button", name="Delete").click() + frame.get_by_role("button", name="Finish").click() + time.sleep(1) diff --git a/dirsrvtests/tests/suites/webui/backup/__init__.py b/dirsrvtests/tests/suites/webui/backup/__init__.py new file mode 100644 index 0000000..656f03a --- /dev/null +++ b/dirsrvtests/tests/suites/webui/backup/__init__.py @@ -0,0 +1,3 @@ +""" + :Requirement: WebUI: Backup +""" diff --git a/dirsrvtests/tests/suites/webui/backup/backup_test.py b/dirsrvtests/tests/suites/webui/backup/backup_test.py new file mode 100644 index 0000000..26eae0f --- /dev/null +++ b/dirsrvtests/tests/suites/webui/backup/backup_test.py @@ -0,0 +1,54 @@ +# --- BEGIN COPYRIGHT BLOCK --- +# Copyright (C) 2023 Red Hat, Inc. +# All rights reserved. +# +# License: GPL (version 3 or any later version). +# See LICENSE for details. +# --- END COPYRIGHT BLOCK --- +# +import time +import subprocess +import pytest + +from lib389.cli_idm.account import * +from lib389.tasks import * +from lib389.utils import * +from lib389.topologies import topology_st +from .. import setup_page, check_frame_assignment, setup_login + +pytestmark = pytest.mark.skipif(os.getenv('WEBUI') is None, reason="These tests are only for WebUI environment") +pytest.importorskip('playwright') + +SERVER_ID = 'standalone1' + + +@pytest.mark.xfail(reason="Will fail because of bz2189181") +def test_no_backup_dir(topology_st, page, browser_name): + """ Test that instance is able to load when backup directory doesn't exist. + + :id: a1fb9e70-c110-4578-ba1f-4b593cc0a047 + :setup: Standalone instance + :steps: + 1. Set Backup Directory (nsslapd-bakdir) to non existing directory. + 2. Check if element on Server tab is loaded. + :expectedresults: + 1. Success + 2. Element is visible. + """ + + topology_st.standalone.config.set('nsslapd-bakdir', '/DOES_NOT_EXIST') + + setup_login(page) + time.sleep(1) + frame = check_frame_assignment(page, browser_name) + + log.info('Check if server settings tabs are loaded.') + frame.get_by_role('tab', name='General Settings', exact=True).wait_for() + assert frame.get_by_role('tab', name='General Settings').is_visible() + + +if __name__ == '__main__': + # Run isolated + # -s for DEBUG mode + CURRENT_FILE = os.path.realpath(__file__) + pytest.main("-s %s" % CURRENT_FILE) diff --git a/dirsrvtests/tests/suites/webui/create/__init__.py b/dirsrvtests/tests/suites/webui/create/__init__.py new file mode 100644 index 0000000..e1692c0 --- /dev/null +++ b/dirsrvtests/tests/suites/webui/create/__init__.py @@ -0,0 +1,3 @@ +""" + :Requirement: WebUI: Create instance +""" diff --git a/dirsrvtests/tests/suites/webui/create/create_instance_test.py b/dirsrvtests/tests/suites/webui/create/create_instance_test.py new file mode 100644 index 0000000..29b9821 --- /dev/null +++ b/dirsrvtests/tests/suites/webui/create/create_instance_test.py @@ -0,0 +1,223 @@ +# --- BEGIN COPYRIGHT BLOCK --- +# Copyright (C) 2021 Red Hat, Inc. +# All rights reserved. +# +# License: GPL (version 3 or any later version). +# See LICENSE for details. +# --- END COPYRIGHT BLOCK --- +# +import time +import subprocess +import pytest + +from lib389.cli_idm.account import * +from lib389.tasks import * +from lib389.utils import * +from lib389.topologies import topology_st +from .. import setup_page, check_frame_assignment + +pytestmark = pytest.mark.skipif(os.getenv('WEBUI') is None, reason="These tests are only for WebUI environment") +pytest.importorskip('playwright') + +SERVER_ID = 'standalone1' + + +def test_no_instance(topology_st, page, browser_name, setup_page): + """ Test page of Red Hat Directory Server when no instance is created + + :id: 04c962b1-df5e-470d-8e19-aa6b77988c75 + :setup: Standalone instance + :steps: + 1. Go to Red Hat Directory server side tab page + 2. Check there is Create New Instance button when no instance exists + :expectedresults: + 1. Success + 2. Button is visible + """ + time.sleep(1) + frame = check_frame_assignment(page, browser_name) + log.info('Check the Create New instance button is present') + frame.wait_for_selector('#noInsts') + assert frame.is_visible('#noInsts') + + +def test_instance_button_disabled_passwd_short(topology_st, page, browser_name, setup_page): + """ Test Create Instance button is disabled when password is too short + + :id: 9d413b70-7746-45ef-b389-9b67fcbc945a + :setup: Standalone instance + :steps: + 1. Click on Create New Instance button + 2. Fill serverID + 3. Fill password shorter than eight characters + 4. Fill passwordConfirm shorter than eight character + 5. Check the Create Instance button is disabled when password is too short + :expectedresults: + 1. A pop-up window should appear with create instance details + 2. Success + 3. Success + 4. Success + 5. Button is disabled + """ + time.sleep(1) + frame = check_frame_assignment(page, browser_name) + + log.info('Click on Create New Instance button') + frame.click("#no-inst-create-btn") + frame.wait_for_selector('#createServerId') + + log.info('Fill serverID and short password') + frame.fill('#createServerId', SERVER_ID) + frame.fill('#createDMPassword', 'redhat') + frame.fill('#createDMPasswordConfirm', 'redhat') + + log.info('Check Create Instance button is disabled') + assert frame.is_disabled("text=Create Instance") + + +def test_create_instance_without_database(topology_st, page, browser_name, setup_page): + """ Test create instance without database + + :id: 7390c009-cb0d-406a-962a-4a1f0f02cfe6 + :setup: Standalone instance + :steps: + 1. Click on Create New Instance button + 2. Fill serverID + 3. Fill password longer than eight characters + 4. Fill passwordConfirm longer than eight character + 5. Click on the Create Instance button + :expectedresults: + 1. A pop-up window should appear with create instance details + 2. Success + 3. Success + 4. Success + 5. Page redirection successful and instance is created + """ + time.sleep(1) + frame = check_frame_assignment(page, browser_name) + + log.info('Click on Create New Instance button') + frame.click("#no-inst-create-btn") + frame.wait_for_selector('#createServerId') + + log.info('Fill serverID and password longer than eight characters') + frame.fill('#createServerId', SERVER_ID) + frame.fill('#createDMPassword', 'password') + frame.fill('#createDMPasswordConfirm', 'password') + + log.info('Click Create Instance button') + frame.click("text=Create Instance") + frame.wait_for_selector("#serverId") + + log.info('Check that created serverID is present') + assert frame.is_visible("#serverId") + + +def test_create_instance_database_suffix_entry(topology_st, page, browser_name, setup_page): + """ Test create instance with database and suffix entry + + :id: 52703fc9-2f5b-49f9-b80b-bd0703008118 + :setup: Standalone instance + :steps: + 1. Click on Create New Instance button + 2. Fill serverID, user and password + 3. Check Create Database checkbox + 4. Fill database suffix and name + 5. Select Create Suffix Entry from drop-down list + 6. Click on the Create Instance button + :expectedresults: + 1. A pop-up window should appear with create instance details + 2. Success + 3. Success + 4. Success + 5. Success + 6. Page redirection successful and instance is created + """ + time.sleep(1) + frame = check_frame_assignment(page, browser_name) + + log.info('Click on Create New Instance button') + frame.click("#no-inst-create-btn") + frame.wait_for_selector('#createServerId') + + log.info('Fill serverID and password longer than eight characters') + frame.fill('#createServerId', SERVER_ID) + frame.fill('#createDMPassword', 'password') + frame.fill('#createDMPasswordConfirm', 'password') + + log.info('Choose to create database with suffix entry') + if ds_is_older('2.0.10'): + frame.check('text="Create Database" >> input[type="checkbox"]') + frame.fill('input[placeholder="e.g. dc=example,dc=com"]', 'dc=example,dc=com') + frame.fill('input[placeholder="e.g. userRoot"]', 'userRoot') + else: + frame.check('#createDBCheckbox') + frame.fill('#createDBSuffix','dc=example,dc=com') + frame.fill('#createDBName','userRoot') + + frame.select_option('#createInitDB', 'createSuffix') + + frame.click("text=Create Instance") + frame.wait_for_selector("#serverId") + + log.info('Check that created serverID is present') + assert frame.is_visible("#serverId") + + +def test_create_instance_database_sample_entries(topology_st, page, browser_name, setup_page): + """ Test create instance with database and sample entries + + :id: d6d8cb10-8a5f-428d-b9a7-1c65b85986b7 + :setup: Standalone instance + :steps: + 1. Click on Create New Instance button + 2. Fill serverID, user and password + 3. Check Create Database checkbox + 4. Fill database suffix and name + 5. Select Create Sample Entries from drop-down list + 6. Click on the Create Instance button + :expectedresults: + 1. A pop-up window should appear with create instance details + 2. Success + 3. Success + 4. Success + 5. Success + 6. Page redirection successful and instance is created + """ + time.sleep(1) + frame = check_frame_assignment(page, browser_name) + + log.info('Click on Create New Instance button') + frame.click("#no-inst-create-btn") + frame.wait_for_selector('#createServerId') + + log.info('Fill serverID and password longer than eight characters') + frame.fill('#createServerId', SERVER_ID) + frame.fill('#createDMPassword', 'password') + frame.fill('#createDMPasswordConfirm', 'password') + + log.info('Choose to create database with sample entries') + if ds_is_older('2.0.10'): + frame.check('text="Create Database" >> input[type="checkbox"]') + frame.fill('input[placeholder="e.g. dc=example,dc=com"]', 'dc=example,dc=com') + frame.fill('input[placeholder="e.g. userRoot"]', 'userRoot') + else: + frame.check('#createDBCheckbox') + frame.fill('#createDBSuffix','dc=example,dc=com') + frame.fill('#createDBName','userRoot') + + frame.select_option('#createInitDB', 'createSample') + + log.info('Click Create Instance button') + frame.click("text=Create Instance") + frame.wait_for_selector("#serverId") + + log.info('Check that created serverID is present') + assert frame.is_visible("#serverId") + + +if __name__ == '__main__': + # Run isolated + # -s for DEBUG mode + CURRENT_FILE = os.path.realpath(__file__) + pytest.main("-s %s" % CURRENT_FILE) diff --git a/dirsrvtests/tests/suites/webui/database/__init__.py b/dirsrvtests/tests/suites/webui/database/__init__.py new file mode 100644 index 0000000..15d3b6c --- /dev/null +++ b/dirsrvtests/tests/suites/webui/database/__init__.py @@ -0,0 +1,3 @@ +""" + :Requirement: WebUI: Database +""" diff --git a/dirsrvtests/tests/suites/webui/database/database_test.py b/dirsrvtests/tests/suites/webui/database/database_test.py new file mode 100644 index 0000000..d582bd1 --- /dev/null +++ b/dirsrvtests/tests/suites/webui/database/database_test.py @@ -0,0 +1,345 @@ +# --- BEGIN COPYRIGHT BLOCK --- +# Copyright (C) 2023 Red Hat, Inc. +# All rights reserved. +# +# License: GPL (version 3 or any later version). +# See LICENSE for details. +# --- END COPYRIGHT BLOCK --- +# +import logging +import time +import subprocess +import pytest + +from lib389.cli_idm.account import * +from lib389.tasks import * +from lib389.utils import * +from lib389.pwpolicy import PwPolicyManager +from lib389.topologies import topology_st +from .. import setup_page, check_frame_assignment, setup_login + +pytestmark = pytest.mark.skipif(os.getenv('WEBUI') is None, reason="These tests are only for WebUI environment") +pytest.importorskip('playwright') + +SERVER_ID = 'standalone1' + + +def test_database_tab_availability(topology_st, page, browser_name): + """ Test Database tab visibility + + :id: 863863e0-4ba7-4309-8f56-e6719cdf2bbe + :setup: Standalone instance + :steps: + 1. Click on Database tab. + 2. Check if Limits tab under Global Database Configuration is visible. + :expectedresults: + 1. Success + 2. Element is visible + """ + setup_login(page) + time.sleep(1) + frame = check_frame_assignment(page, browser_name) + + log.info('Check if database tab contents are loaded.') + frame.get_by_role('tab', name='Database', exact=True).click() + frame.get_by_role('tab', name='Limits').wait_for() + assert frame.get_by_role('tab', name='Limits').is_visible() + + +def test_global_database_configuration_availability(topology_st, page, browser_name): + """ Test Global Database Configuration tabs visibility + + :id: d0efda45-4e8e-4703-b9c0-ab53249dafc3 + :setup: Standalone instance + :steps: + 1. Click on Database tab and check if ID List Scan Limit label is visible. + 2. Click on Database Cache tab and check if Automatic Cache Tuning checkbox is visible. + 3. Click on Import Cache tab and check if Automatic Import Cache Tuning checkbox is visible. + 4. Click on NDN Cache tab and check if Normalized DN Cache Max Size label is visible. + 5. Click on Database Locks tab and check if Enable DB Lock Monitoring checkbox is visible. + 6. Click on Advanced Settings and check if Transaction Logs Directory input field is visible. + :expectedresults: + 1. Element is visible + 2. Element is visible + 3. Element is visible + 4. Element is visible + 5. Element is visible + 6. Element is visible + """ + setup_login(page) + time.sleep(1) + frame = check_frame_assignment(page, browser_name) + + log.info('Check if element on Limits tab is loaded.') + frame.get_by_role('tab', name='Database', exact=True).click() + frame.get_by_text('ID List Scan Limit', exact=True).wait_for() + assert frame.get_by_text('ID List Scan Limit', exact=True).is_visible() + + log.info('Click on Database Cache tab and check if element is loaded') + frame.get_by_role('tab', name='Database Cache', exact=True).click() + assert frame.locator('#db_cache_auto').is_visible() + + log.info('Click on Import Cache tab and check if element is loaded') + frame.get_by_role('tab', name='Import Cache', exact=True).click() + assert frame.locator('#import_cache_auto').is_visible() + + log.info('Click on NDN Cache tab and check if element is loaded') + frame.get_by_role('tab', name='NDN Cache', exact=True).click() + assert frame.get_by_text('Normalized DN Cache Max Size').is_visible() + + log.info('Click on Database Locks tab and check if element is loaded') + frame.get_by_role('tab', name='Database Locks', exact=True).click() + assert frame.locator('#dblocksMonitoring').is_visible() + + log.info('Click on Advanced Settings tab and check if element is loaded') + frame.get_by_role('tab', name='Advanced Settings', exact=True).click() + assert frame.locator('#txnlogdir').is_visible() + + +def test_chaining_configuration_availability(topology_st, page, browser_name): + """ Test Chaining Configuration settings visibility + + :id: 1f936968-d2fc-4fee-beeb-caeeb5df8c3f + :setup: Standalone instance + :steps: + 1. Click on Database tab, click on Chaining Configuration button on the side panel. + 2. Check if Size Limit input field is visible. + 3. Click on Controls & Components tab and check if Forwarded LDAP Controls heading is visible. + :expectedresults: + 1. Success + 2. Element is visible + 3. Element is visible + """ + setup_login(page) + time.sleep(1) + frame = check_frame_assignment(page, browser_name) + + log.info('Click on Chaining Configuration and check if element is loaded.') + frame.get_by_role('tab', name='Database', exact=True).click() + frame.locator('#chaining-config').click() + frame.locator('#defSizeLimit').wait_for() + assert frame.locator('#defSizeLimit').is_visible() + + log.info('Click on Controls & Components tab and check if element is loaded') + frame.get_by_role('tab', name='Controls & Components').click() + assert frame.get_by_role('heading', name='Forwarded LDAP Controls').is_visible() + + +def test_backups_and_ldifs_availability(topology_st, page, browser_name): + """ Test Backups & LDIFs settings visibility. + + :id: 90571e96-f3c9-4bec-83d6-04c61e8a0e78 + :setup: Standalone instance + :steps: + 1. Click on Database tab, click on Backups & LDIFs button on the side panel. + 2. Check if Create Backup button is visible. + 3. Click on LDIFs tab and check if Create LDIF button is visible. + :expectedresults: + 1. Success + 2. Element is visible + 3. Element is visible + """ + setup_login(page) + time.sleep(1) + frame = check_frame_assignment(page, browser_name) + + log.info('Click on Backups & LDIFs button and check if element is loaded.') + frame.get_by_role('tab', name='Database', exact=True).click() + frame.locator('#backups').click() + assert frame.get_by_role('button', name='Create Backup').is_visible() + + log.info('Click on LDIFs tab and check if element is loaded.') + frame.get_by_role('tab', name='LDIFs').click() + assert frame.get_by_role('button', name='Create LDIF').is_visible() + + +def test_global_policy_availability(topology_st, page, browser_name): + """ Check if Global Policy settings is visible + + :id: 2bdd219d-c28d-411d-9758-18386f472ad2 + :setup: Standalone instance + :steps: + 1. Click on Database tab, click on Global Policy button on the side panel. + 2. Check if Password Minimum Age input field is visible. + 3. Click on Expiration tab and click on Enforce Password Expiration checkbox. + 4. Check if Allowed Logins After Password Expires input field is visible. + 5. Click on Account Lockout tab and click on Enable Account Lockout checkbox. + 6. Check if Number of Failed Logins That Locks Out Account input field is visible. + 7. Click on Syntax Checking tab and click on Enable Password Syntax Checking checkbox. + 8. Check if Minimum Length input field is visible. + 9. Click on Temporary Password Rules tab and check if Password Max Use input field is visible. + :expectedresults: + 1. Success + 2. Element is visible + 3. Success + 4. Element is visible + 5. Success + 6. Element is visible + 7. Success + 8. Element is visible + 9. Element is visible + """ + setup_login(page) + time.sleep(1) + frame = check_frame_assignment(page, browser_name) + + log.info('Click on Global Policy button and check if element is loaded.') + frame.get_by_role('tab', name='Database', exact=True).click() + frame.locator('#pwpolicy').click() + frame.locator('#passwordminage').wait_for() + assert frame.locator('#passwordminage').is_visible() + + log.info('Click on Expiration tab and check if element is loaded.') + frame.get_by_role('tab', name='Expiration').click() + frame.get_by_text('Enforce Password Expiration').click() + assert frame.locator('#passwordgracelimit').is_visible() + + log.info('Click on Account Lockout tab and check if element is loaded.') + frame.get_by_role('tab', name='Account Lockout').click() + frame.get_by_text('Enable Account Lockout').click() + assert frame.locator('#passwordmaxfailure').is_visible() + + log.info('Click on Syntax Checking tab and check if element is loaded.') + frame.get_by_role('tab', name='Syntax Checking').click() + frame.get_by_text('Enable Password Syntax Checking').click() + assert frame.locator('#passwordminlength').is_visible() + + log.info('Click on Temporary Password Rules tab and check if element is loaded.') + frame.get_by_role('tab', name='Temporary Password Rules').click() + assert frame.locator('#passwordtprmaxuse').is_visible() + + +def test_local_policy_availability(topology_st, page, browser_name): + """ Test Local Policies settings visibility + + :id: f540e0fa-a4c6-4c88-b97a-d21ada68f627 + :setup: Standalone instance + :steps: + 1. Click on Database tab, click on Local Policies button on side panel. + 2. Check if Local Password Policies columnheader is visible. + 3. Click on Edit Policy tab and check if Please choose a policy from the Local Policy Table heading is visible. + 4. Click on Create A Policy tab and check if Target DN input field is visible. + :expectedresults: + 1. Success + 2. Element is visible + 3. Element is visible + 4. Element is visible + """ + setup_login(page) + time.sleep(1) + frame = check_frame_assignment(page, browser_name) + + log.info('Click on Local Policies button and check if element is loaded.') + frame.get_by_role('tab', name='Database', exact=True).click() + frame.locator('#localpwpolicy').click() + frame.get_by_role('columnheader', name='Local Password Policies').wait_for() + assert frame.get_by_role('columnheader', name='Local Password Policies').is_visible() + + log.info('Click on Edit Policy tab and check if element is loaded.') + frame.get_by_role('tab', name='Edit Policy').click() + assert frame.get_by_role('heading', name='Please choose a policy from the Local Policy Table.').is_visible() + + log.info('Click on Create A Policy tab and check if element is loaded.') + frame.get_by_role('tab', name='Create A Policy').click() + assert frame.locator('#policyDN').is_visible() + + +def test_suffixes_policy_availability(topology_st, page, browser_name): + """ Test Suffixes settings visibility + + :id: b8399229-3b98-46d7-af15-f5ff0bcc6be9 + :setup: Standalone instance + :steps: + 1. Click on Database tab, click on dc=example,dc=com button. + 2. Check if Entry Cache Size input field is visible. + 3. Click on Referrals tab and check if Referrals columnheader is visible. + 4. Click on Indexes tab and check if Database Indexes-sub tab is visible. + 5. Click on VLV Indexes and check if VLV Indexes columnheader is visible. + 6. Click on Encrypted Attributes and check if Encrypted Attribute columnheader is visible. + :expectedresults: + 1. Success + 2. Element is visible + 3. Element is visible + 4. Element is visible + 5. Element is visible + 6. Element is visible + """ + setup_login(page) + time.sleep(1) + frame = check_frame_assignment(page, browser_name) + + log.info('Click on Suffixes and check if element is loaded.') + frame.get_by_role('tab', name='Database', exact=True).click() + frame.locator('#dc\=example\,dc\=com').click() + frame.locator('#cachememsize').wait_for() + assert frame.locator('#cachememsize').is_visible() + + log.info('Click on Referrals tab and check if element is loaded.') + frame.get_by_role('tab', name='Referrals').click() + frame.get_by_role('columnheader', name='Referrals').wait_for() + assert frame.get_by_role('columnheader', name='Referrals').is_visible() + + log.info('Click on Indexes tab and check if element is loaded.') + frame.get_by_role('tab', name='Indexes', exact=True).click() + frame.get_by_role('tab', name='Database Indexes').wait_for() + assert frame.get_by_role('tab', name='Database Indexes').is_visible() + + log.info('Click on VLV Indexes tab and check if element is loaded.') + frame.get_by_role('tab', name='VLV Indexes').click() + frame.get_by_role('columnheader', name='VLV Indexes').wait_for() + assert frame.get_by_role('columnheader', name='VLV Indexes').is_visible() + + log.info('Click on Encrypted Attributes tab and check if element is loaded.') + frame.get_by_role('tab', name='Encrypted Attributes').click() + frame.get_by_role('columnheader', name='Encrypted Attribute').wait_for() + assert frame.get_by_role('columnheader', name='Encrypted Attribute').is_visible() + + +def test_dictionary_check_checkbox(topology_st, page, browser_name): + """ Test that Dictionary Check checkbox in WebUI is changed after cli command + + :id: e1dcac6d-df45-4a89-a1f2-b18c65dfecba + :setup: Standalone instance + :steps: + 1. Enable PasswordDictCheck through cli. + 2. Open Database tab, Global Password Policies and click on Syntax Checking tab. + 3. Check that Dictionary Check checkbox is checked. + 4. Disable PasswordDictCheck through cli. + 5. Reload Syntax Checking tab. + 6. Check that Dictionary Check checkbox is unchecked. + :expectedresults: + 1. Success + 2. Success + 3. Dictionary Check checkbox is checked + 4. Success + 5. Success + 6. Dictionary Check checkbox is unchecked + """ + log.info('Enable password syntax checking and enable dictionary check.') + ppm = PwPolicyManager(topology_st.standalone) + ppm.set_global_policy({"passworddictcheck": "on"}) + + setup_login(page) + time.sleep(1) + frame = check_frame_assignment(page, browser_name) + + log.info('Click on Database tab, click on Global Policy, ' + 'click on Syntax Checking and check that Dictionary Check checkbox is checked.') + frame.get_by_role('tab', name='Database', exact=True).click() + frame.locator('#pwpolicy').click() + frame.get_by_role('tab', name='Syntax Checking').click() + frame.get_by_text('Enable Password Syntax Checking').click() + assert frame.get_by_text('Dictionary Check').is_checked() + + log.info('Disable dictionary check, reload tab and check that Dictionary Check checkbox is unchecked.') + ppm.set_global_policy({"passworddictcheck": "off"}) + ppm.set_global_policy({"passwordchecksyntax": "on"}) + frame.get_by_role('img', name="Refresh global password policy settings").click() + assert not frame.get_by_text('Dictionary Check').is_checked() + + +if __name__ == '__main__': + # Run isolated + # -s for DEBUG mode + CURRENT_FILE = os.path.realpath(__file__) + pytest.main("-s %s" % CURRENT_FILE) diff --git a/dirsrvtests/tests/suites/webui/ldap_browser/__init__.py b/dirsrvtests/tests/suites/webui/ldap_browser/__init__.py new file mode 100644 index 0000000..79c6bee --- /dev/null +++ b/dirsrvtests/tests/suites/webui/ldap_browser/__init__.py @@ -0,0 +1,3 @@ +""" + :Requirement: WebUI: LDAP Browser +""" diff --git a/dirsrvtests/tests/suites/webui/ldap_browser/ldap_browser_test.py b/dirsrvtests/tests/suites/webui/ldap_browser/ldap_browser_test.py new file mode 100644 index 0000000..265cc94 --- /dev/null +++ b/dirsrvtests/tests/suites/webui/ldap_browser/ldap_browser_test.py @@ -0,0 +1,308 @@ +# --- BEGIN COPYRIGHT BLOCK --- +# Copyright (C) 2023 Red Hat, Inc. +# All rights reserved. +# +# License: GPL (version 3 or any later version). +# See LICENSE for details. +# --- END COPYRIGHT BLOCK --- +# +import time +import subprocess +import pytest + +from lib389.cli_idm.account import * +from lib389.tasks import * +from lib389.utils import * +from lib389.topologies import topology_st +from .. import setup_page, check_frame_assignment, setup_login, create_entry, delete_entry, load_ldap_browser_tab + +pytestmark = pytest.mark.skipif(os.getenv('WEBUI') is None, reason="These tests are only for WebUI environment") +pytest.importorskip('playwright') + +SERVER_ID = 'standalone1' + +entry_data = {'User': {'cn': 'John Smith', + 'displayName': 'John Smith', + 'gidNumber': '1204', + 'homeDirectory': 'user/jsmith', + 'uid': '1204', + 'uidNumber': '1204', + 'suffixTreeEntry': 'cn=John Smith'}, + 'Group': {'group_name': 'testgroup', + 'suffixTreeEntry': 'cn=testgroup'}, + 'Organizational Unit': {'ou_name': 'testou', + 'suffixTreeEntry': 'ou=testou'}, + 'Role': {'role_name': 'testrole', + 'suffixTreeEntry': 'cn=testrole'}, + 'custom Entry': {'uid': '1234', + 'entry_name': 'test_entry', + 'suffixTreeEntry': 'uid=1234'}} + + +def test_ldap_browser_tab_visibility(topology_st, page, browser_name): + """ Test LDAP Browser tab visibility + + :id: cb5f04dc-99ff-4ef6-928c-5f41272c51af + :setup: Standalone instance + :steps: + 1. Click on LDAP Browser tab. + 2. Check if Tree View tab is visible. + 3. Click on dc=example,dc=com button. + 4. Check if Attribute columnheader is visible. + 5. Click on Table View tab. + 6. Check if Database Suffixes columnheader is visible. + 7. Click on Search tab and click on Show Search Criteria button. + 8. Check if Search Base text input field is visible. + :expectedresults: + 1. Success + 2. Element is visible + 3. Success + 4. Element is visible + 5. Success + 6. Element is visible + 7. Success + 8. Element is visible + """ + setup_login(page) + time.sleep(1) + frame = check_frame_assignment(page, browser_name) + + log.info('Click on LDAP Browser tab and check if element is loaded.') + frame.get_by_role('tab', name='LDAP Browser', exact=True).click() + frame.get_by_role('tab', name='Tree View').wait_for() + assert frame.get_by_role('tab', name='Tree View').is_visible() + + log.info('Click on dc=example,dc=com button and check if element is loaded.') + frame.get_by_role('button').filter(has_text='dc=example,dc=com').click() + frame.get_by_role('columnheader', name='Attribute').wait_for() + assert frame.get_by_role('columnheader', name='Attribute').is_visible() + + log.info('Click on Table View tab and check if element is loaded') + frame.get_by_role('tab', name='Table View').click() + assert frame.get_by_role('columnheader', name='Database Suffixes').is_visible() + + log.info('Click on Search tab and check if element is loaded') + frame.get_by_role('tab', name='Search').click() + frame.get_by_text('Show Search Criteria').click() + assert frame.locator('#searchBase').is_visible() + + +def test_create_and_delete_user(topology_st, page, browser_name): + """ Test to create and delete user + + :id: eb08c1d7-cbee-4a37-b724-429e1cdfe092 + :setup: Standalone instance + :steps: + 1. Call load_LDAP_browser_tab function. + 2. Click on ou=people. + 3. Call create_entry function to create new user. + 4. Check that new user is successfully created. + 5. Click on newly created user. + 6. Call delete_entry function to delete user. + 7. Check that newly created user is deleted. + :expectedresults: + 1. Success + 2. Success + 3. Success + 4. User is created + 5. Success + 6. Success + 7. User is deleted + """ + + setup_login(page) + time.sleep(1) + frame = check_frame_assignment(page, browser_name) + + entry_type = 'User' + test_data = entry_data.get(entry_type) + load_ldap_browser_tab(frame) + + log.info('Click on ou=people.') + frame.get_by_role('button').filter(has_text='ou=people').click() + frame.get_by_role('columnheader', name='Attribute').wait_for() + time.sleep(1) + + log.info('Create a new user named John Smith by calling create_entry function.') + create_entry(frame, entry_type, test_data) + assert frame.get_by_role("button").filter(has_text=f"cn={test_data['displayName']}").is_visible() + + log.info('Click on cn=John Smith and call delete_entry function to delete it.') + frame.get_by_role("button").filter(has_text=f"cn={test_data['displayName']}").click() + time.sleep(1) + delete_entry(frame) + assert frame.get_by_role("button").filter(has_text=f"cn={test_data['displayName']}").count() == 0 + + +def test_create_and_delete_group(topology_st, page, browser_name): + """ Test to create and delete group + + :id: dcd61b3a-b6bc-4255-8a38-c1f98b435ad9 + :setup: Standalone instance + :steps: + 1. Call load_LDAP_browser_tab function. + 2. Click on ou=groups. + 3. Call create_entry function to create new group. + 4. Check that new group is successfully created. + 5. Click on newly created group. + 6. Call delete_entry function to delete group. + 7. Check that newly created group is deleted. + :expectedresults: + 1. Success + 2. Success + 3. Success + 4. Group is created + 5. Success + 6. Success + 7. Group is deleted + """ + + setup_login(page) + time.sleep(1) + frame = check_frame_assignment(page, browser_name) + + entry_type = 'Group' + test_data = entry_data.get(entry_type) + load_ldap_browser_tab(frame) + + log.info('Click on groups.') + frame.get_by_role('button').filter(has_text='ou=groups').click() + frame.get_by_role('columnheader', name='Attribute').wait_for() + time.sleep(1) + + log.info('Call create_entry function to create a new group.') + create_entry(frame, entry_type, test_data) + assert frame.get_by_role("button").filter(has_text=f"cn={test_data['group_name']}").is_visible() + + log.info('Click on cn=testgroup and call delete_entry function to delete it.') + frame.get_by_role("button").filter(has_text=f"cn={test_data['group_name']}").click() + time.sleep(1) + delete_entry(frame) + assert frame.get_by_role("button").filter(has_text=f"cn={test_data['group_name']}").count() == 0 + + +def test_create_and_delete_organizational_unit(topology_st, page, browser_name): + """ Test to create and delete organizational unit + + :id: ce42b85d-6eab-459b-a61d-b77e7979be73 + :setup: Standalone instance + :steps: + 1. Call load_LDAP_browser_tab function. + 2. Call create_entry function to create new organizational unit. + 3. Check that new ou is successfully created. + 4. Click on newly created ou. + 5. Call delete_entry function to delete ou. + 6. Check that newly created ou is deleted. + :expectedresults: + 1. Success + 2. Success + 3. New organizational unit is created + 4. Success + 5. Success + 6. New organizational unit is deleted. + """ + + setup_login(page) + time.sleep(1) + frame = check_frame_assignment(page, browser_name) + + entry_type = 'Organizational Unit' + test_data = entry_data.get(entry_type) + load_ldap_browser_tab(frame) + + log.info('Call create_entry function to create new organizational unit named testou.') + create_entry(frame, entry_type, test_data) + assert frame.get_by_role("button").filter(has_text=f"ou={test_data['ou_name']}").is_visible() + + log.info('Click on ou=testou and call delete_entry function to delete it.') + frame.get_by_role("button").filter(has_text=f"ou={test_data['ou_name']}").click() + time.sleep(1) + delete_entry(frame) + assert frame.get_by_role("button").filter(has_text=f"ou={test_data['ou_name']}").count() == 0 + + +def test_create_and_delete_role(topology_st, page, browser_name): + """ Test to create and delete role + + :id: 39d54c08-5841-403c-9d88-0179f57c27b1 + :setup: Standalone instance + :steps: + 1. Call load_LDAP_browser_tab function. + 2. Call create_entry function to create new role. + 3. Check that new role is successfully created. + 4. Click on newly created role. + 5. Call delete_entry function to delete role. + 6. Check that newly created role is deleted. + :expectedresults: + 1. Success + 2. Success + 3. New role is created + 4. Success + 5. Success + 6. New role is deleted + """ + + setup_login(page) + time.sleep(1) + frame = check_frame_assignment(page, browser_name) + + entry_type = 'Role' + test_data = entry_data.get(entry_type) + load_ldap_browser_tab(frame) + + log.info('Call create_entry function to create a new role named testrole.') + create_entry(frame, entry_type, test_data) + assert frame.get_by_role("button").filter(has_text=f"cn={test_data['role_name']}").is_visible() + + log.info('Click on cn=testrole and call delete_entry function to delete it.') + frame.get_by_role("button").filter(has_text=f"cn={test_data['role_name']}").click() + time.sleep(1) + delete_entry(frame) + assert frame.get_by_role("button").filter(has_text=f"cn={test_data['role_name']}").count() == 0 + + +def test_create_and_delete_custom_entry(topology_st, page, browser_name): + """ Test to create and delete custom entry + + :id: 21906d0f-f097-4f30-8308-16085519159a + :setup: Standalone instance + :steps: + 1. Call load_LDAP_browser_tab function. + 2. Call create_entry function to create new custom entry. + 3. Check that new custom entry is successfully created. + 4. Click on newly created custom entry. + 5. Call delete_entry function to delete custom entry. + 6. Check that newly created custom entry is deleted. + :expectedresults: + 1. Success + 2. Success + 3. New custom entry is created + 4. Success + 5. Success + 6. New custom entry is created + """ + + setup_login(page) + time.sleep(1) + frame = check_frame_assignment(page, browser_name) + + entry_type = 'custom Entry' + test_data = entry_data.get(entry_type) + load_ldap_browser_tab(frame) + + log.info('Call create_entry function to create new custom entry.') + create_entry(frame, entry_type, test_data) + assert frame.get_by_role("button").filter(has_text=f"uid={test_data['uid']}").is_visible() + + log.info('Click on uid=1234 and call delete_entry function to delete it.') + frame.get_by_role("button").filter(has_text=f"uid={test_data['uid']}").click() + time.sleep(1) + delete_entry(frame) + assert frame.get_by_role("button").filter(has_text=f"uid={test_data['uid']}").count() == 0 + + +if __name__ == '__main__': + # Run isolated + # -s for DEBUG mode + CURRENT_FILE = os.path.realpath(__file__) + pytest.main("-s %s" % CURRENT_FILE) diff --git a/dirsrvtests/tests/suites/webui/login/__init__.py b/dirsrvtests/tests/suites/webui/login/__init__.py new file mode 100644 index 0000000..968aff7 --- /dev/null +++ b/dirsrvtests/tests/suites/webui/login/__init__.py @@ -0,0 +1,3 @@ +""" + :Requirement: WebUI: Login +""" diff --git a/dirsrvtests/tests/suites/webui/login/login_test.py b/dirsrvtests/tests/suites/webui/login/login_test.py new file mode 100644 index 0000000..08c92df --- /dev/null +++ b/dirsrvtests/tests/suites/webui/login/login_test.py @@ -0,0 +1,132 @@ +# --- BEGIN COPYRIGHT BLOCK --- +# Copyright (C) 2023 Red Hat, Inc. +# All rights reserved. +# +# License: GPL (version 3 or any later version). +# See LICENSE for details. +# --- END COPYRIGHT BLOCK --- +# +import time +import subprocess +import pytest +import distro + +from lib389.cli_idm.account import * +from lib389.tasks import * +from lib389.utils import * +from lib389.topologies import topology_st +from .. import setup_page, remove_instance_through_lib, check_cockpit_version_is_higher, check_frame_assignment, \ + setup_login, check_cockpit_version_is_lower + +pytestmark = pytest.mark.skipif(os.getenv('WEBUI') is None, reason="These tests are only for WebUI environment") +pytest.importorskip('playwright') + +RHEL = 'Red Hat Enterprise Linux' + + +def test_login_no_instance(topology_st, page, browser_name): + """ Test login to WebUI is successful + + :id: 0a85c1ec-20f3-41ae-8203-3951e74f34e7 + :setup: Standalone instance + :steps: + 1. Go to cockpit login page + 2. Fill user and password + 3. Click on login button + 4. Go to Red Hat Directory server side tab page + 5. Check there is Create New Instance button when no instance exists + :expectedresults: + 1. Page redirection to login page successful + 2. Success + 3. Login successful + 4. Page redirection successful + 5. Button is visible + """ + + remove_instance_through_lib(topology_st) + password = ensure_str(os.getenv('PASSWD')) + + # if we use setup_page from __init__.py we would be logged in already + page.set_viewport_size({"width": 1920, "height": 1080}) + + # increase default timeout to wait enough time on a slow machine for selector availability + # (it will wait just enough time for the selector to be available, + # it won't stop for 60 000 miliseconds each time it is called) + page.set_default_timeout(60000) + + page.goto("http://localhost:9090/") + assert page.url == 'http://localhost:9090/' + page.wait_for_selector('#login-user-input') + + # We are at login page + log.info('Let us log in') + page.fill('#login-user-input', 'root') + page.fill('#login-password-input', password) + page.click('#login-button') + time.sleep(2) + + if RHEL in distro.linux_distribution(): + page.wait_for_selector('text=Red Hat Directory Server') + assert page.is_visible('text=Red Hat Directory Server') + log.info('Let us go to RHDS side tab page') + page.click('text=Red Hat Directory Server') + else: + page.wait_for_selector('text=389 Directory Server') + assert page.is_visible('text=389 Directory Server') + log.info('Let us go to RHDS side tab page') + page.click('text=389 Directory Server') + + log.info('Login successful') + assert page.url == 'http://localhost:9090/389-console' + + log.info('Check there is Create New Instance button') + frame = check_frame_assignment(page, browser_name) + frame.wait_for_selector('#no-inst-create-btn') + assert frame.is_visible("#no-inst-create-btn") + + +def test_logout(topology_st, page): + """ Test logout from WebUI is successful + + :id: a7e71179-3ef0-4e4e-baca-a36beeef71b6 + :setup: Standalone instance + :steps: + 1. Go to cockpit login page + 2. Fill user and password + 3. Click on login button + 4. Click on root user and choose Log Out option + 5. Check we have been redirected to the login page + :expectedresults: + 1. Page redirection to login page successful + 2. Success + 3. Login successful + 4. Page redirection successful + 5. We are at login page + """ + + setup_login(page) + assert page.url == "http://localhost:9090/389-console" + + # checking cockpit versions because selector ids got renamed in between + log.info('Let us log out') + if check_cockpit_version_is_higher('258'): + page.click('#toggle-menu') + page.click('#logout') + elif check_cockpit_version_is_higher('250') and check_cockpit_version_is_lower('257'): + page.click('#navbar-dropdown') + page.click('#go-logout') + else: + page.click('#content-user-name') + page.click('#go-logout') + + page.wait_for_selector('#login-user-input') + assert page.is_visible('#login-user-input') + log.info('Log out successful') + + +if __name__ == '__main__': + # Run isolated + # -s for DEBUG mode + CURRENT_FILE = os.path.realpath(__file__) + pytest.main("-s %s" % CURRENT_FILE) + diff --git a/dirsrvtests/tests/suites/webui/monitoring/__init__.py b/dirsrvtests/tests/suites/webui/monitoring/__init__.py new file mode 100644 index 0000000..ae9b152 --- /dev/null +++ b/dirsrvtests/tests/suites/webui/monitoring/__init__.py @@ -0,0 +1,3 @@ +""" + :Requirement: WebUI: Monitoring +""" diff --git a/dirsrvtests/tests/suites/webui/monitoring/monitoring_test.py b/dirsrvtests/tests/suites/webui/monitoring/monitoring_test.py new file mode 100644 index 0000000..40de3ba --- /dev/null +++ b/dirsrvtests/tests/suites/webui/monitoring/monitoring_test.py @@ -0,0 +1,270 @@ +# --- BEGIN COPYRIGHT BLOCK --- +# Copyright (C) 2023 Red Hat, Inc. +# All rights reserved. +# +# License: GPL (version 3 or any later version). +# See LICENSE for details. +# --- END COPYRIGHT BLOCK --- +# +import time +import subprocess +import pytest + +from lib389.cli_idm.account import * +from lib389.tasks import * +from lib389.utils import * +from lib389.topologies import topology_st +from .. import setup_page, check_frame_assignment, setup_login, enable_replication + +pytestmark = pytest.mark.skipif(os.getenv('WEBUI') is None, reason="These tests are only for WebUI environment") +pytest.importorskip('playwright') + +SERVER_ID = 'standalone1' + + +def test_monitoring_tab_visibility(topology_st, page, browser_name): + """ Test Monitoring tab visibility + + :id: e16be05a-4465-4a2b-bfe2-7c5aafb55c91 + :setup: Standalone instance + :steps: + 1. Click on Monitoring tab. + 2. Check if Resource Charts tab is visible. + :expectedresults: + 1. Success + 2. Element is visible + """ + setup_login(page) + time.sleep(1) + frame = check_frame_assignment(page, browser_name) + + log.info('Check if Monitoring tab is loaded.') + frame.get_by_role('tab', name='Monitoring', exact=True).click() + frame.get_by_role('tab', name='Resource Charts').wait_for() + assert frame.get_by_role('tab', name='Resource Charts').is_visible() + + +def test_server_statistics_visibility(topology_st, page, browser_name): + """ Test Server Statistics monitoring visibility + + :id: 90e964e8-99d7-45e5-ad20-520099db054e + :setup: Standalone instance + :steps: + 1. Click on Monitoring tab and check if Connections heading is visible. + 2. Click on Server Stats tab and check if Server Instance label is visible. + 3. Click on Connection Table tab and check if Client Connections heading is visible. + 4. Click on Disk Space tab and check if Refresh button is visible. + 5. Click on SNMP Counters and check if Bytes Sent label is visible. + :expectedresults: + 1. Element is visible + 2. Element is visible + 3. Element is visible + 4. Element is visible + 5. Element is visible + """ + setup_login(page) + time.sleep(1) + frame = check_frame_assignment(page, browser_name) + + log.info('Click on Monitoring tab and check if element in Server Statistics is loaded.') + frame.get_by_role('tab', name='Monitoring', exact=True).click() + frame.get_by_role('heading', name='Connections').wait_for() + assert frame.get_by_role('heading', name='Connections').is_visible() + + log.info('Click on Server Stats tab and check if element is loaded.') + frame.get_by_role('tab', name='Server Stats').click() + assert frame.get_by_text('Server Instance').is_visible() + + log.info('Click on Connection Table tab and check if element is loaded.') + frame.get_by_role('tab', name='Connection Table').click() + assert frame.get_by_role('heading', name='Client Connections').is_visible() + + log.info('Click on Disk Space tab and check if element is loaded.') + frame.get_by_role('tab', name='Disk Space').click() + assert frame.get_by_role('button', name='Refresh').is_visible() + + log.info('Click on SNMP Counters tab and check if element is loaded.') + frame.get_by_role('tab', name='SNMP Counters').click() + assert frame.get_by_text('Bytes Sent', exact=True).is_visible() + + +def test_replication_visibility(topology_st, page, browser_name): + """ Test Replication monitoring visibility + + :id: 65b271e5-a172-461b-ad36-605706d68780 + :setup: Standalone instance + :steps: + 1. Click on Replication Tab, Click on Enable Replication. + 2. Fill Password and Confirm password. + 3. Click on Enable Replication button and wait until Add Replication Manager is visible. + 4. Click on Monitoring tab, click on Replication button on the side panel. + 5. Check if Generate Report button is visible. + 6. Click on Agreements tab and check if Replication Agreements columnheader is visible. + 7. Click on Winsync tab and check if Winsync Agreements columnheader is visible. + 8. Click on Tasks tab and check if CleanAllRUV Tasks columnheader is visible. + 9. Click on Conflict Entries tab and check if Replication Conflict Entries columnheader is visible. + :expectedresults: + 1. Success + 2. Success + 3. Success + 4. Success + 5. Element is visible + 6. Element is visible + 7. Element is visible + 8. Element is visible + 9. Element is visible + """ + setup_login(page) + time.sleep(1) + frame = check_frame_assignment(page, browser_name) + enable_replication(frame) + + log.info('Click on Monitoring tab and then on Replication in the menu and check if element is loaded.') + frame.get_by_role('tab', name='Monitoring', exact=True).click() + frame.locator('#replication-monitor').click() + frame.get_by_role('button', name='Synchronization Report').wait_for() + frame.locator('#sync-report').click() + frame.get_by_role('tab', name='Prepare New Report').click() + frame.get_by_role('button', name='Generate Report').wait_for() + assert frame.get_by_role('button', name='Generate Report').is_visible() + + log.info('Click on Agreements tab and check if element is loaded.') + assert frame.locator('#replication-suffix-dc\\=example\\,dc\\=com').is_visible() + + +def test_database_visibility(topology_st, page, browser_name): + """ Test Database monitoring visibility + + :id: bf3f3e42-e748-41b8-bda2-a1856343a995 + :setup: Standalone instance + :steps: + 1. Click on Monitoring tab, click on dc=example,dc=com button on the side panel. + 2. Check if Entry Cache Hit Ratio label is visible. + 3. Click on DN Cache tab and check if DN Cache Hit Ratio label is visible. + :expectedresults: + 1. Success + 2. Element is visible + 3. Element is visible + """ + setup_login(page) + time.sleep(1) + frame = check_frame_assignment(page, browser_name) + + log.info('Click on Monitoring tab, then click on database button and check if element is loaded.') + frame.get_by_role('tab', name='Monitoring', exact=True).click() + frame.locator('#dc\\=example\\,dc\\=com').click() + frame.get_by_text('Entry Cache Hit Ratio').wait_for() + assert frame.get_by_text('Entry Cache Hit Ratio').is_visible() + + log.info('Click on DN Cache tab and check if element is loaded.') + frame.get_by_role('tab', name='DN Cache').click() + assert frame.get_by_text('DN Cache Hit Ratio').is_visible() + + +def test_logging_visibility(topology_st, page, browser_name): + """ Test Logging monitoring visibility + + :id: c3e91cd4-569e-45e2-adc7-cbffb4ee7b6c + :setup: Standalone instance + :steps: + 1. Click on Monitoring tab, click on Access Log button on side panel. + 2. Check if Access Log text field is visible. + 3. Click on Audit Log button on side panel. + 4. Check if Audit Log text field is visible. + 5. Click on Audit Failure Log button on side panel. + 6. Check if Audit Failure Log text field is visible. + 7. Click on Errors Log button on side panel. + 8. Check if Errors Log text field is visible. + 9. Click on Security Log button on side panel. + 10. Check if Security Log text field is visible. + :expectedresults: + 1. Success + 2. Element is visible + 3. Success + 4. Element is visible + 5. Success + 6. Element is visible + 7. Success + 8. Element is visible + 9. Success + 10. Element is visible + """ + setup_login(page) + time.sleep(1) + frame = check_frame_assignment(page, browser_name) + + log.info('Click on Monitoring tab, then click on Access Log button and check if element is loaded.') + frame.get_by_role('tab', name='Monitoring', exact=True).click() + frame.locator('#access-log-monitor').click() + frame.locator('#accesslog-area').wait_for() + assert frame.locator('#accesslog-area').is_visible() + + log.info('Click on Audit Log button and check if element is loaded.') + frame.locator('#audit-log-monitor').click() + frame.locator('#auditlog-area').wait_for() + assert frame.locator('#auditlog-area').is_visible() + + log.info('Click on Audit Failure Log button and check if element is loaded.') + frame.locator('#auditfail-log-monitor').click() + frame.locator('#auditfaillog-area').wait_for() + assert frame.locator('#auditfaillog-area').is_visible() + + log.info('Click on Errors Log button and check if element is loaded.') + frame.locator('#error-log-monitor').click() + frame.locator('#errorslog-area').wait_for() + assert frame.locator('#errorslog-area').is_visible() + + log.info('Click on Security Log button and check if element is loaded.') + frame.locator('#security-log-monitor').click() + frame.locator('#securitylog-area').wait_for() + assert frame.locator('#securitylog-area').is_visible() + + +def test_create_credential_and_alias(topology_st, page, browser_name): + """ Test check that you are able to give input to input field in pop up windows when creating credential or alias + + :id: 8908405c-47b9-470e-a906-42790b131e9f + :setup: Standalone instance + :steps: + 1. Check if replication is enabled, if not enable it. + 2. Click on Monitoring tab, click on Replication Log button on side panel. + 3. Click on Add Credentials button and fill Hostname and Password, then click on save. + 4. Check if new credential appeared in the credentials list. + 5. Click on Add Alias button and fill alias name and alias hostname, click on Save button. + 6. Check if new alias appeared in the alias list. + :expectedresults: + 1. Success + 2. Success + 3. Success + 4. Element is visible + 5. Success + 6. Element is visible + """ + setup_login(page) + time.sleep(1) + frame = check_frame_assignment(page, browser_name) + enable_replication(frame) + + log.info('Click on Monitoring tab, click on replication button, create new credential and check if it is created') + frame.get_by_role('tab', name='Monitoring', exact=True).click() + frame.locator('#replication-monitor').click() + frame.locator('#pf-tab-1-prepare-new-report').click() + frame.get_by_role('button', name='Add Credentials').click() + frame.locator('#credsHostname').fill('credential.test') + frame.locator('#credsBindpw').fill('redhat') + frame.get_by_role('button', name='Save', exact=True).click() + assert frame.get_by_role("gridcell", name="credential.test:389").is_visible() + + log.info('Click on Add Alias, create new alias, check if new alias is created.') + frame.get_by_role('button', name='Add Alias').click() + frame.locator('#aliasName').fill('alias.test') + frame.locator('#aliasHostname').fill('example.com') + frame.get_by_role('button', name='Save', exact=True).click() + assert frame.get_by_role("gridcell", name="alias.test").is_visible() + + +if __name__ == '__main__': + # Run isolated + # -s for DEBUG mode + CURRENT_FILE = os.path.realpath(__file__) + pytest.main("-s %s" % CURRENT_FILE) diff --git a/dirsrvtests/tests/suites/webui/plugins/__init__.py b/dirsrvtests/tests/suites/webui/plugins/__init__.py new file mode 100644 index 0000000..fc5adce --- /dev/null +++ b/dirsrvtests/tests/suites/webui/plugins/__init__.py @@ -0,0 +1,3 @@ +""" + :Requirement: WebUI: Plugins +""" diff --git a/dirsrvtests/tests/suites/webui/plugins/plugins_test.py b/dirsrvtests/tests/suites/webui/plugins/plugins_test.py new file mode 100644 index 0000000..849dedc --- /dev/null +++ b/dirsrvtests/tests/suites/webui/plugins/plugins_test.py @@ -0,0 +1,423 @@ +# --- BEGIN COPYRIGHT BLOCK --- +# Copyright (C) 2023 Red Hat, Inc. +# All rights reserved. +# +# License: GPL (version 3 or any later version). +# See LICENSE for details. +# --- END COPYRIGHT BLOCK --- +# +import time +import subprocess +import pytest + +from lib389.cli_idm.account import * +from lib389.tasks import * +from lib389.utils import * +from lib389.topologies import topology_st +from .. import setup_page, check_frame_assignment, setup_login + +pytestmark = pytest.mark.skipif(os.getenv('WEBUI') is None, reason="These tests are only for WebUI environment") +pytest.importorskip('playwright') + +SERVER_ID = 'standalone1' + + +def test_plugins_tab_visibility(topology_st, page, browser_name): + """ Test visibility of Plugins tab. + + :id: 5b80bd5d-9294-4521-af0e-cd37ce9264a6 + :setup: Standalone instance + :steps: + 1. Click on Plugins tab. + 2. Check if search input is visible + :expectedresults: + 1. Success + 2. Element is visible + """ + setup_login(page) + time.sleep(1) + frame = check_frame_assignment(page, browser_name) + + log.info('Check if Plugins tab is loaded.') + frame.get_by_role('tab', name='Plugins', exact=True).click() + frame.get_by_placeholder('Search Plugins').wait_for() + assert frame.get_by_placeholder('Search Plugins').is_visible() + + +def test_account_policy_plugin_visibility(topology_st, page, browser_name): + """ Test Account Policy Plugin visibility. + + :id: 6e8a27cb-32a2-46f1-918e-4c3f91c8f34e + :setup: Standalone instance + :steps: + 1. Click on Plugins tab. + 2. Click on Account Policy button on the side panel. + 3. Check if Shared Config Entry text input field is visible. + :expectedresults: + 1. Success + 2. Success + 3. Element is visible + """ + setup_login(page) + time.sleep(1) + frame = check_frame_assignment(page, browser_name) + + log.info('Click on Plugins tab, click on Account Policy plugin and check if element is loaded.') + frame.get_by_role('tab', name='Plugins', exact=True).click() + frame.get_by_text('Plugin is disabledAccount Policy', exact=True).wait_for() + frame.get_by_text('Plugin is disabledAccount Policy', exact=True).click() + frame.locator('#configArea').wait_for() + assert frame.locator('#configArea').is_visible() + + +def test_attribute_uniqueness_plugin_visibility(topology_st, page, browser_name): + """ Test Attribute Uniqueness plugin visibility. + + :id: f6e49e13-7820-40fa-b2ae-d5e48dd03d2c + :setup: Standalone instance + :steps: + 1. Click on Plugins tab. + 2. Click on Attribute Uniqueness button on the side panel. + 3. Check if Add Config button is visible. + :expectedresults: + 1. Success + 2. Success + 3. Element is visible + """ + setup_login(page) + time.sleep(1) + frame = check_frame_assignment(page, browser_name) + + log.info('Click on Plugins tab, click on Attribute Uniqueness plugin and check if element is loaded.') + frame.get_by_role('tab', name='Plugins', exact=True).click() + frame.get_by_text('Plugin is disabledAttribute Uniqueness', exact=True).wait_for() + frame.get_by_text('Plugin is disabledAttribute Uniqueness', exact=True).click() + frame.get_by_role('button', name='Add Config').wait_for() + assert frame.get_by_role('button', name='Add Config').is_visible() + + +def test_auto_membership_plugin_visibility(topology_st, page, browser_name): + """ Test Auto Membership plugin visibility + + :id: 5c05617a-8a23-46cb-83ce-3bdd30388e0b + :setup: Standalone instance + :steps: + 1. Click on Plugins tab. + 2. Click on Auto Membership button on the side panel. + 3. Check if Add Definition button is visible. + :expectedresults: + 1. Success + 2. Success + 3. Element is visible + """ + setup_login(page) + time.sleep(1) + frame = check_frame_assignment(page, browser_name) + + log.info('Click on Plugins tab, click on Auto Membership plugin and check if element is loaded.') + frame.get_by_role('tab', name='Plugins', exact=True).click() + frame.get_by_text('Plugin is enabledAuto Membership').wait_for() + frame.get_by_text('Plugin is enabledAuto Membership').click() + frame.get_by_role('button', name='Add Definition').wait_for() + assert frame.get_by_role('button', name='Add Definition').is_visible() + + +def test_dna_plugin_visibility(topology_st, page, browser_name): + """ Test DNA plugin visibility. + + :id: b246682b-c41d-4ae9-9c64-38bd8e665a71 + :setup: Standalone instance + :steps: + 1. Click on Plugins tab. + 2. Click on DNA button on the side panel. + 3. Check if Add Config button is visible. + :expectedresults: + 1. Success + 2. Success + 3. Element is visible + """ + setup_login(page) + time.sleep(1) + frame = check_frame_assignment(page, browser_name) + + log.info('Click on Plugins tab, click on DNA plugin and check if element is loaded.') + frame.get_by_role('tab', name='Plugins', exact=True).click() + frame.get_by_text('Plugin is disabledDNA').wait_for() + frame.get_by_text('Plugin is disabledDNA').click() + frame.get_by_role('button', name='Add Config').wait_for() + assert frame.get_by_role('button', name='Add Config').is_visible() + + +def test_linked_attributes_plugin_visibility(topology_st, page, browser_name): + """ Test Linked Attributes plugin visibility + + :id: 21cb6021-dc6f-4f26-a6a3-f4311c9afe2e + :setup: Standalone instance + :steps: + 1. Click on Plugins tab. + 2. Click on Linked Attributes button on the side panel. + 3. Check if Add Config button is visible. + :expectedresults: + 1. Success + 2. Success + 3. Element is visible + """ + setup_login(page) + time.sleep(1) + frame = check_frame_assignment(page, browser_name) + + log.info('Click on Plugins tab, click on Linked Attributes plugin and check if element is loaded.') + frame.get_by_role('tab', name='Plugins', exact=True).click() + frame.get_by_text('Linked Attributes').wait_for() + frame.get_by_text('Linked Attributes').click() + frame.get_by_role('button', name='Add Config').wait_for() + assert frame.get_by_role('button', name='Add Config').is_visible() + + +def test_managed_entries_plugin_visibility(topology_st, page, browser_name): + """ Test Managed Entries plugin visibility + + :id: fd2dcaf9-422b-4d17-85f2-bc12427adc1c + :setup: Standalone instance + :steps: + 1. Click on Plugins tab. + 2. Click on Managed Entries button on the side panel. + 3. Check if Create Template button is visible. + 4. Click on Definitions tab. + 5. Check if Add Definition button is visible. + :expectedresults: + 1. Success + 2. Success + 3. Element is visible + 4. Success + 5. Element is visible + """ + setup_login(page) + time.sleep(1) + frame = check_frame_assignment(page, browser_name) + + log.info('Click on Plugins tab, click on Managed Entries plugin and check if element is loaded.') + frame.get_by_role('tab', name='Plugins', exact=True).click() + frame.get_by_text('Plugin is enabledManaged Entries').wait_for() + frame.get_by_text('Plugin is enabledManaged Entries').click() + frame.get_by_role('button', name='Create Template').wait_for() + assert frame.get_by_role('button', name='Create Template').is_visible() + + log.info('Click on Definitions tab and check if element is loaded.') + frame.get_by_role('tab', name='Definitions').click() + assert frame.get_by_role('button', name='Add Definition').is_visible() + + +def test_memberof_plugin_visibility(topology_st, page, browser_name): + """ Test MemberOf plugin visibility + + :id: 865db69f-6e6b-4beb-b456-8e055fc0b14b + :setup: Standalone instance + :steps: + 1. Click on Plugins tab. + 2. Click on MemberOf button on the side panel. + 3. Check if Shared Config Entry text input field is visible. + :expectedresults: + 1. Success + 2. Success + 3. Element is visible + """ + setup_login(page) + time.sleep(1) + frame = check_frame_assignment(page, browser_name) + + log.info('Click on Plugins tab, click on MemberOf plugin and check if element is loaded.') + frame.get_by_role('tab', name='Plugins', exact=True).click() + frame.get_by_text('Plugin is disabledMemberOf').wait_for() + frame.get_by_text('Plugin is disabledMemberOf').click() + frame.locator('#memberOfConfigEntry').wait_for() + assert frame.locator('#memberOfConfigEntry').is_visible() + + +def test_ldap_pass_through_auth_plugin_visibility(topology_st, page, browser_name): + """ test LDAP Pass Through Auth plugin visibility + + :id: a47c4054-233f-4398-aaf6-eddfb442e53d + :setup: Standalone instance + :steps: + 1. Click on Plugins tab. + 2. Click on LDAP Pass Through Auth button on the side panel. + 3. Check if Add URL button is visible. + :expectedresults: + 1. Success + 2. Success + 3. Element is visible + """ + setup_login(page) + time.sleep(1) + frame = check_frame_assignment(page, browser_name) + + log.info('Click on Plugins tab, click on LDAP Pass Through Auth plugin and check if element is loaded.') + frame.get_by_role('tab', name='Plugins', exact=True).click() + frame.get_by_text('LDAP Pass Through Auth').wait_for() + frame.get_by_text('LDAP Pass Through Auth').click() + frame.get_by_role('button', name='Add URL').wait_for() + assert frame.get_by_role('button', name='Add URL').is_visible() + + +def test_pam_pass_through_auth_plugin_visibility(topology_st, page, browser_name): + """ Test PAM Pass Through Auth visibility. + + :id: 99c72177-6c86-4b24-b754-38f9698bd70c + :setup: Standalone instance + :steps: + 1. Click on Plugins tab. + 2. Click on PAM Pass Through Auth button on the side panel. + 3. Check if Add Config button is visible. + :expectedresults: + 1. Success + 2. Success + 3. Element is visible + """ + setup_login(page) + time.sleep(1) + frame = check_frame_assignment(page, browser_name) + + log.info('Click on Plugins tab, click on PAM Pass Through Auth plugin and check if element is loaded.') + frame.get_by_role('tab', name='Plugins', exact=True).click() + frame.get_by_text('PAM Pass Through Auth').wait_for() + frame.get_by_text('PAM Pass Through Auth').click() + frame.get_by_role('button', name='Add Config').wait_for() + assert frame.get_by_role('button', name='Add Config').is_visible() + + +def test_posix_winsync_plugin_visibility(topology_st, page, browser_name): + """ Test Posix Winsync plugin visibility. + + :id: 9998d23c-d550-4605-bcbe-d501f26d8a66 + :setup: Standalone instance + :steps: + 1. Click on Plugins tab. + 2. Click on Posix Winsync button on the side panel. + 3. Check if Create MemberOf Task checkbox is visible. + :expectedresults: + 1. Success + 2. Success + 3. Element is visible + """ + setup_login(page) + time.sleep(1) + frame = check_frame_assignment(page, browser_name) + + log.info('Click on Plugins tab, click on Posix Winsync plugin and check if element is loaded.') + frame.get_by_role('tab', name='Plugins', exact=True).click() + frame.get_by_text('Posix Winsync').wait_for() + frame.get_by_text('Posix Winsync').click() + frame.locator('#posixWinsyncCreateMemberOfTask').wait_for() + assert frame.locator('#posixWinsyncCreateMemberOfTask').is_visible() + + +def test_referential_integrity_plugin_visibility(topology_st, page, browser_name): + """ Test Referential Integrity plugin visibility. + + :id: e868f520-a409-4ec8-b086-c86cf1f8855b + :setup: Standalone instance + :steps: + 1. Click on Plugins tab. + 2. Click on Referential Integrity button on the side panel. + 3. Check if Entry Scope text input field is visible. + :expectedresults: + 1. Success + 2. Success + 3. Element is visible + """ + setup_login(page) + time.sleep(1) + frame = check_frame_assignment(page, browser_name) + + log.info('Click on Plugins tab, click on Referential Integrity plugin and check if element is loaded.') + frame.get_by_role('tab', name='Plugins', exact=True).click() + frame.get_by_text('Referential Integrity').wait_for() + frame.get_by_text('Referential Integrity').click() + frame.locator('#entryScope').wait_for() + assert frame.locator('#entryScope').is_visible() + + +def test_retro_changelog_plugin_visibility(topology_st, page, browser_name): + """ Test Retro Changelog plugin visibility. + + :id: b1813138-25d9-4b73-ab99-1e27d51d0c53 + :setup: Standalone instance + :steps: + 1. Click on Plugins tab. + 2. Click on Retro Changelog button on the side panel. + 3. Check if Is Replicated checkbox is visible. + :expectedresults: + 1. Success + 2. Success + 3. Element is visible + """ + setup_login(page) + time.sleep(1) + frame = check_frame_assignment(page, browser_name) + + log.info('Click on Plugins tab, click on Retro Changelog plugin and check if element is loaded.') + frame.get_by_role('tab', name='Plugins', exact=True).click() + frame.get_by_text('Retro Changelog').wait_for() + frame.get_by_text('Retro Changelog').click() + frame.locator('#isReplicated').wait_for() + assert frame.locator('#isReplicated').is_visible() + + +def test_rootdn_access_control_plugin_visibility(topology_st, page, browser_name): + """ Test RootDN Access Control plugin visibility + + :id: 3d57131f-a23e-4030-b51b-1dd3ebac95c9 + :setup: Standalone instance + :steps: + 1. Click on Plugins tab. + 2. Click on RootDN Access Control button on the side panel. + 3. Check if Monday checkbox is visible. + :expectedresults: + 1. Success + 2. Success + 3. Element is visible + """ + setup_login(page) + time.sleep(1) + frame = check_frame_assignment(page, browser_name) + + log.info('Click on Plugins tab, click on RootDN Access Control plugin and check if element is loaded.') + frame.get_by_role('tab', name='Plugins', exact=True).click() + frame.get_by_text('RootDN Access Control').wait_for() + frame.get_by_text('RootDN Access Control').click() + frame.locator('#allowMon').wait_for() + assert frame.locator('#allowMon').is_visible() + + +def test_usn_plugin_visibility(topology_st, page, browser_name): + """ Test USN plugin visibility + + :id: e1a60298-694e-4d04-ace9-164290a3786b + :setup: Standalone instance + :steps: + 1. Click on Plugins tab. + 2. Click on USN button on the side panel. + 3. Check if USN Global label is visible. + :expectedresults: + 1. Success + 2. Success + 3. Element is visible + """ + setup_login(page) + time.sleep(1) + frame = check_frame_assignment(page, browser_name) + + log.info('Click on Plugins tab, click on USN Access Control plugin and check if element is loaded.') + frame.get_by_role('tab', name='Plugins', exact=True).click() + frame.get_by_text('Plugin is disabledUSN').wait_for() + frame.get_by_text('Plugin is disabledUSN').click() + frame.get_by_text('USN Global').wait_for() + assert frame.get_by_text('USN Global').is_visible() + + +if __name__ == '__main__': + # Run isolated + # -s for DEBUG mode + CURRENT_FILE = os.path.realpath(__file__) + pytest.main("-s %s" % CURRENT_FILE) diff --git a/dirsrvtests/tests/suites/webui/replication/__init__.py b/dirsrvtests/tests/suites/webui/replication/__init__.py new file mode 100644 index 0000000..0ffda68 --- /dev/null +++ b/dirsrvtests/tests/suites/webui/replication/__init__.py @@ -0,0 +1,3 @@ +""" + :Requirement: WebUI: Replication +""" diff --git a/dirsrvtests/tests/suites/webui/replication/replication_test.py b/dirsrvtests/tests/suites/webui/replication/replication_test.py new file mode 100644 index 0000000..97df56a --- /dev/null +++ b/dirsrvtests/tests/suites/webui/replication/replication_test.py @@ -0,0 +1,146 @@ +# --- BEGIN COPYRIGHT BLOCK --- +# Copyright (C) 2023 Red Hat, Inc. +# All rights reserved. +# +# License: GPL (version 3 or any later version). +# See LICENSE for details. +# --- END COPYRIGHT BLOCK --- +# +import time +import subprocess +import pytest + +from lib389.cli_idm.account import * +from lib389.tasks import * +from lib389.utils import * +from lib389.topologies import topology_st +from .. import setup_page, check_frame_assignment, setup_login + +pytestmark = pytest.mark.skipif(os.getenv('WEBUI') is None, reason="These tests are only for WebUI environment") +pytest.importorskip('playwright') + +SERVER_ID = 'standalone1' + + +def test_replication_availability(topology_st, page, browser_name): + """ Test replication tab of Red Hat Directory Server when instance is created + + :id: f3451124-9764-4da1-8efb-4e3d2749e465 + :setup: Standalone instance + :steps: + 1. Go to Red Hat Directory server side tab page + 2. Click on replication tab + 3. Check there is Enable Replication button + :expectedresults: + 1. Success + 2. Success + 3. Button is visible + """ + setup_login(page) + time.sleep(1) + frame = check_frame_assignment(page, browser_name) + + log.info('Click on Replication tab and check if Enable Replication button is visible') + frame.get_by_role('tab', name='Replication').click() + frame.get_by_role('button', name='Enable Replication').wait_for() + assert frame.get_by_role('button', name='Enable Replication').is_visible() + + +def test_enable_replication(topology_st, page, browser_name): + """ Test functionality of Enable Replication button + + :id: 87d8f3c0-1dae-4240-826c-f633abb85cda + :setup: Standalone instance + :steps: + 1. Go to Red Hat Directory server side tab page + 2. Click on replication tab + 3. Click on Enable Replication button + 4. Fill password and confirm password + 5. Click on cancel + 6. Click on Enable Replication button and fill passwords again. + 7. Click on Enable Replication button and wait until Add Replication Manager is visible. + :expectedresults: + 1. Success + 2. Success + 3. Success + 4. Success + 5. Success + 6. Success + 7. Success + """ + setup_login(page) + time.sleep(1) + frame = check_frame_assignment(page, browser_name) + + log.info('Click on Replication tab and click on Enable Replication button ') + frame.get_by_role('tab', name='Replication').click() + frame.get_by_role('button', name='Enable Replication').wait_for() + frame.get_by_role('button', name='Enable Replication').click() + + log.info('Fill password, fill confirm password and click on cancel.') + frame.fill('#enableBindPW', 'redhat') + frame.fill('#enableBindPWConfirm', 'redhat') + frame.get_by_role('button', name='Cancel').click() + + assert frame.get_by_role('button', name='Enable Replication').is_visible() + + log.info('Fill password, fill confirm password, click on enable replication' + ' and check Add Replication Manager button is visible.') + frame.get_by_role('button', name='Enable Replication').click() + frame.fill('#enableBindPW', 'redhat') + frame.fill('#enableBindPWConfirm', 'redhat') + frame.get_by_role("dialog", name="Enable Replication").get_by_role("button", name="Enable Replication").click() + frame.get_by_role('button', name='Add Replication Manager').wait_for() + + assert frame.get_by_role('button', name='Add Replication Manager').is_visible() + + +def test_suffixes_visibility(topology_st, page, browser_name): + """ Test visibility of created suffixes in replication tab + + :id: 47141eaa-a506-4a60-a3ae-8e960f692faa + :setup: Standalone instance + :steps: + 1. Click on Replication tab check if Add Replication Manager Button is visible. + 2. Click on Agreements tab check if column header with Replication Agreement text is visible. + 3. Click on Winsync Agreements tab and check if column header with Winsync Agreements text is visible. + 4. Click on Change Log tab and check if label with Changelog Maximum Entries text is visible. + 5. Click on RUV's & Tasks tab and check if Export Changelog button is visible. + :expectedresults: + 1. Element is visible. + 2. Element is visible. + 3. Element is visible. + 4. Element is visible. + 5. Element is visible. + """ + setup_login(page) + time.sleep(1) + frame = check_frame_assignment(page, browser_name) + + log.info('Click on Replication tab and check if element is loaded.') + frame.get_by_role('tab', name='Replication').click() + frame.get_by_role('button', name='Add Replication Manager').wait_for() + assert frame.get_by_role('button', name='Add Replication Manager').is_visible() + + log.info('Click on Agreements tab and check if element is loaded.') + frame.get_by_role('tab', name='Agreements (0)', exact=True).click() + assert frame.get_by_role('columnheader', name='Replication Agreements').is_visible() + + log.info('Click on Winsync Agreements tab and check if element is loaded.') + frame.get_by_role('tab', name='Winsync Agreements').click() + assert frame.get_by_role('columnheader', name='Replication Agreements').is_visible() + + log.info('Click on Change Log tab and check if element is loaded.') + frame.get_by_role('tab', name='Change Log').click() + assert frame.get_by_text('Changelog Maximum Entries').is_visible() + + log.info("Click on RUV'S & Tasks tab and check if element is loaded.") + frame.get_by_role('tab', name="RUV'S & Tasks").click() + assert frame.get_by_role('button', name='Export Changelog') + + +if __name__ == '__main__': + # Run isolated + # -s for DEBUG mode + CURRENT_FILE = os.path.realpath(__file__) + pytest.main("-s %s" % CURRENT_FILE) diff --git a/dirsrvtests/tests/suites/webui/schema/__init__.py b/dirsrvtests/tests/suites/webui/schema/__init__.py new file mode 100644 index 0000000..f00925f --- /dev/null +++ b/dirsrvtests/tests/suites/webui/schema/__init__.py @@ -0,0 +1,3 @@ +""" + :Requirement: WebUI: Schema +""" diff --git a/dirsrvtests/tests/suites/webui/schema/schema_test.py b/dirsrvtests/tests/suites/webui/schema/schema_test.py new file mode 100644 index 0000000..fe80f9e --- /dev/null +++ b/dirsrvtests/tests/suites/webui/schema/schema_test.py @@ -0,0 +1,64 @@ +# --- BEGIN COPYRIGHT BLOCK --- +# Copyright (C) 2023 Red Hat, Inc. +# All rights reserved. +# +# License: GPL (version 3 or any later version). +# See LICENSE for details. +# --- END COPYRIGHT BLOCK --- +# +import logging +import time +import subprocess +import pytest + +from lib389.cli_idm.account import * +from lib389.tasks import * +from lib389.utils import * +from lib389.topologies import topology_st +from .. import setup_page, check_frame_assignment, setup_login + +pytestmark = pytest.mark.skipif(os.getenv('WEBUI') is None, reason="These tests are only for WebUI environment") +pytest.importorskip('playwright') + +SERVER_ID = 'standalone1' + + +def test_schema_tab_visibility(topology_st, page, browser_name): + """ Test Schema tab visibility + + :id: 4cbca624-b7be-49db-93f6-f9a9df79a9b2 + :setup: Standalone instance + :steps: + 1. Click on Schema tab and check if Add Object Class button is visible. + 2. Click on Attributes tab and check if Add Attribute button is visible. + 3. Click on Matching Rules tab and check if Matching Rule columnheader is visible. + :expectedresults: + 1. Element is visible + 2. Element is visible + 3. Element is visible + """ + setup_login(page) + time.sleep(1) + frame = check_frame_assignment(page, browser_name) + + log.info('Click on Schema tab and check if element is loaded.') + frame.get_by_role('tab', name='Schema', exact=True).click() + frame.get_by_role('button', name='Add ObjectClass').wait_for() + assert frame.get_by_role('button', name='Add ObjectClass').is_visible() + + log.info('Click on Attributes tab and check if element is loaded.') + frame.get_by_role('tab', name='Attributes').click() + frame.get_by_role('button', name='Add Attribute').wait_for() + assert frame.get_by_role('button', name='Add Attribute').is_visible() + + log.info('Click on Matching Rules tab and check if element is loaded.') + frame.get_by_role('tab', name='Matching Rules').click() + frame.get_by_role('columnheader', name='Matching Rule').wait_for() + assert frame.get_by_role('columnheader', name='Matching Rule').is_visible() + + +if __name__ == '__main__': + # Run isolated + # -s for DEBUG mode + CURRENT_FILE = os.path.realpath(__file__) + pytest.main("-s %s" % CURRENT_FILE) diff --git a/dirsrvtests/tests/suites/webui/server/__init__.py b/dirsrvtests/tests/suites/webui/server/__init__.py new file mode 100644 index 0000000..2dfbaeb --- /dev/null +++ b/dirsrvtests/tests/suites/webui/server/__init__.py @@ -0,0 +1,3 @@ +""" + :Requirement: WebUI: Server +""" diff --git a/dirsrvtests/tests/suites/webui/server/server_test.py b/dirsrvtests/tests/suites/webui/server/server_test.py new file mode 100644 index 0000000..9732d3d --- /dev/null +++ b/dirsrvtests/tests/suites/webui/server/server_test.py @@ -0,0 +1,351 @@ +# --- BEGIN COPYRIGHT BLOCK --- +# Copyright (C) 2023 Red Hat, Inc. +# All rights reserved. +# +# License: GPL (version 3 or any later version). +# See LICENSE for details. +# --- END COPYRIGHT BLOCK --- +# +import time +import subprocess +import pytest + +from lib389.cli_idm.account import * +from lib389.tasks import * +from lib389.utils import * +from lib389.topologies import topology_st +from .. import setup_page, check_frame_assignment, setup_login + +pytestmark = pytest.mark.skipif(os.getenv('WEBUI') is None, reason="These tests are only for WebUI environment") +pytest.importorskip('playwright') + +SERVER_ID = 'standalone1' + + +def test_server_settings_availability(topology_st, page, browser_name): + """ Test visibility of Server Settings in server tab + + :id: e87a3c6f-3fda-49fa-91c4-a8ca418f32c2 + :setup: Standalone instance + :steps: + 1. Check if General Settings tab is visible. + :expectedresults: + 1. Element is visible + """ + setup_login(page) + time.sleep(1) + frame = check_frame_assignment(page, browser_name) + + log.info('Check if server settings tabs are loaded.') + frame.get_by_role('tab', name='General Settings', exact=True).wait_for() + assert frame.get_by_role('tab', name='General Settings').is_visible() + + +def test_server_settings_tabs_availability(topology_st, page, browser_name): + """ Test visibility of individual tabs under Server Settings + + :id: 08cd0f84-e233-4a94-8230-a0cc54636595 + :setup: Standalone instance + :steps: + 1. Check if Server Hostname is visible + 2. Click on Directory manager tab and check if Directory Manager DN is visible. + 3. Click on Disk Monitoring tab, click on checkbox and check if Disk Monitoring Threshold label is visible. + 4. Click on Advanced Settings tab and check if Anonymous Resource Limits DN text input is visible. + :expectedresults: + 1. Element is visible. + 2. Element is visible. + 3. Element is visible. + 4. Element is visible. + """ + setup_login(page) + time.sleep(1) + frame = check_frame_assignment(page, browser_name) + + log.info('Check if General Settings tab is loaded.') + frame.locator('#nsslapd-localhost').wait_for() + assert frame.locator('#nsslapd-localhost').is_visible() + + log.info('Click on Directory Manager tab and check if element is loaded.') + frame.get_by_role('tab', name='Directory Manager').click() + assert frame.locator('#nsslapd-rootdn').is_visible() + + log.info('Click on Disk Monitoring tab and check if element is loaded.') + frame.get_by_role('tab', name='Disk Monitoring').click() + frame.locator('#nsslapd-disk-monitoring').click() + assert frame.get_by_text('Disk Monitoring Threshold').is_visible() + + log.info('Click on Advanced Settings tab and check if element is loaded.') + frame.get_by_role('tab', name='Advanced Settings').click() + assert frame.locator('#nsslapd-anonlimitsdn').is_visible() + + +def test_tuning_and_limits_availability(topology_st, page, browser_name): + """ Test visibility of Tuning & Limits settings + + :id: c09af833-0359-46ad-a701-52b67f315f70 + :setup: Standalone instance + :steps: + 1. Click on Tuning & Limits button on the side panel and check if Number Of Worker Threads is visible. + 2. Click on Show Advanced Settings button. + 3. Check if Outbound IO Timeout label is visible. + :expectedresults: + 1. Element is visible + 2. Success + 3. Element is visible + """ + setup_login(page) + time.sleep(1) + frame = check_frame_assignment(page, browser_name) + + log.info('Click on Tuning & Limits button and check if element is loaded.') + frame.locator('#tuning-config').click() + frame.get_by_text("Number Of Worker Threads").wait_for() + assert frame.get_by_text("Number Of Worker Threads").is_visible() + + log.info('Open expandable section and check if element is loaded.') + frame.get_by_role('button', name='Show Advanced Settings').click() + frame.get_by_text('Outbound IO Timeout').wait_for() + assert frame.get_by_text('Outbound IO Timeout').is_visible() + + +def test_security_availability(topology_st, page, browser_name): + """ Test Security Settings tabs visibility + + :id: 6cd72564-798c-4524-89d3-aa2691535905 + :setup: Standalone instance + :steps: + 1. Click on Security button on the side panel and check if Security Configuration tab is visible. + 2. Click on Certificate Management tab and check if Add CA Certificate button is visible. + 3. Click on Cipher Preferences and check if Enabled Ciphers heading is visible. + :expectedresults: + 1. Element is visible + 2. Element is visible + 3. Element is visible + """ + setup_login(page) + time.sleep(1) + frame = check_frame_assignment(page, browser_name) + + log.info('Click on Security button and check if element is loaded.') + frame.locator('#security-config').click() + frame.get_by_role('tab', name='Security Configuration').wait_for() + assert frame.get_by_role('tab', name='Security Configuration').is_visible() + + log.info('Click on Certificate Management tab and check if element is loaded.') + frame.get_by_role('tab', name='Certificate Management').click() + assert frame.get_by_role('button', name='Add CA Certificate').is_visible() + + log.info('Click on Cipher Preferences tab and check if element is loaded.') + frame.get_by_role('tab', name='Cipher Preferences').click() + assert frame.get_by_role('heading', name='Enabled Ciphers').is_visible() + + +def test_sasl_settings_and_mappings_availability(topology_st, page, browser_name): + """ Test SASL Settings & Mappings visibility + + :id: 88954828-7533-4ac9-bfc0-e9c68f95278f + :setup: Standalone instance + :steps: + 1. Click on SASL Settings & Mappings button on the side panel. + 2. Check if Max SASL Buffer size text input field is visible. + :expectedresults: + 1. Success + 2. Element is visible + """ + setup_login(page) + time.sleep(1) + frame = check_frame_assignment(page, browser_name) + + log.info('Click on SASL Settings & Mappings and check if element is loaded.') + frame.locator('#sasl-config').click() + frame.locator('#maxBufSize').wait_for() + assert frame.locator('#maxBufSize').is_visible() + + +def test_ldapi_and_autobind_availability(topology_st, page, browser_name): + """ Test LDAPI & AutoBind settings visibility + + :id: 505f1e3b-5d84-4734-8c64-fbb8b2805d6b + :setup: Standalone instance + :steps: + 1. Click on LDAPI & Autobind button on the side panel. + 2. Check if LDAPI Socket File Path is visible. + :expectedresults: + 1. Success + 2. Element is visible + """ + setup_login(page) + time.sleep(1) + frame = check_frame_assignment(page, browser_name) + + log.info('Click on LDAPI & Autobind and check if element is loaded.') + frame.locator('#ldapi-config').click() + frame.locator('#nsslapd-ldapifilepath').wait_for() + assert frame.locator('#nsslapd-ldapifilepath').is_visible() + + +def test_access_log_availability(topology_st, page, browser_name): + """ Test Access Log tabs visibility + + :id: 48f8e778-b28b-45e1-8946-29456a53cf58 + :setup: Standalone instance + :steps: + 1. Click on Access Log button on the side panel and check if Access Log Location input field is visible. + 2. Click on Rotation Policy tab and check if Maximum Number Of Logs label is visible. + 3. Click on Deletion Policy and check if Log Archive Exceeds label is visible. + :expectedresults: + 1. Element is visible + 2. Element is visible + 3. Element is visible + """ + setup_login(page) + time.sleep(1) + frame = check_frame_assignment(page, browser_name) + + log.info('Click on Access Log button and check if element is loaded.') + frame.locator('#access-log-config').click() + frame.locator('#nsslapd-accesslog').wait_for() + assert frame.locator('#nsslapd-accesslog').is_visible() + + log.info('Click on Rotation Policy tab and check if element is loaded.') + frame.get_by_role('tab', name='Rotation Policy').click() + assert frame.get_by_text('Maximum Number Of Logs').is_visible() + + log.info('Click on Deletion Policy tab and check if element is loaded.') + frame.get_by_role('tab', name='Deletion Policy').click() + assert frame.get_by_text('Log Archive Exceeds (in MB)').is_visible() + + +def test_audit_log_availability(topology_st, page, browser_name): + """ Test Audit Log tabs visibility + + :id: a1539010-22b8-4e6b-b377-666a10c20573 + :setup: Standalone instance + :steps: + 1. Click on Audit Log button on the side panel and check if Audit Log Location input field is visible. + 2. Click on Rotation Policy tab and check if Maximum Number Of Logs label is visible. + 3. Click on Deletion Policy and check if Log Archive Exceeds label is visible. + :expectedresults: + 1. Element is visible + 2. Element is visible + 3. Element is visible + """ + setup_login(page) + time.sleep(1) + frame = check_frame_assignment(page, browser_name) + + log.info('Click on Audit Log button and check if element is loaded.') + frame.locator('#audit-log-config').click() + frame.locator('#nsslapd-auditlog').wait_for() + assert frame.locator('#nsslapd-auditlog').is_visible() + + log.info('Click on Rotation Policy tab and check if element is loaded.') + frame.get_by_role('tab', name='Rotation Policy').click() + assert frame.get_by_text('Maximum Number Of Logs').is_visible() + + log.info('Click on Deletion Policy tab and check if element is loaded.') + frame.get_by_role('tab', name='Deletion Policy').click() + assert frame.get_by_text('Log Archive Exceeds (in MB)').is_visible() + + +def test_audit_failure_log_availability(topology_st, page, browser_name): + """ Test Audit Failure Log tabs visibility + + :id: 0adcd31f-98a0-4b70-9efa-e810bc971f77 + :setup: Standalone instance + :steps: + 1. Click on Audit Failure Log button on the side panel and check if Audit Log Location input field is visible. + 2. Click on Rotation Policy tab and check if Maximum Number Of Logs label is visible. + 3. Click on Deletion Policy and check if Log Archive Exceeds label is visible. + :expectedresults: + 1. Element is visible + 2. Element is visible + 3. Element is visible + """ + setup_login(page) + time.sleep(1) + frame = check_frame_assignment(page, browser_name) + + log.info('Click on Audit Failure Log button and check if element is loaded.') + frame.locator('#auditfail-log-config').click() + frame.locator('#nsslapd-auditfaillog').wait_for() + assert frame.locator('#nsslapd-auditfaillog').is_visible() + + log.info('Click on Rotation Policy tab and check if element is loaded.') + frame.get_by_role('tab', name='Rotation Policy').click() + assert frame.get_by_text('Maximum Number Of Logs').is_visible() + + log.info('Click on Deletion Policy tab and check if element is loaded.') + frame.get_by_role('tab', name='Deletion Policy').click() + assert frame.get_by_text('Log Archive Exceeds (in MB)').is_visible() + + +def test_errors_log_availability(topology_st, page, browser_name): + """ Test Errors Log tabs visibility + + :id: 52cac1fd-a0cd-4c6e-8963-16d764955b86 + :setup: Standalone instance + :steps: + 1. Click on Errors Log button in the side panel and check if Errors Log Location input field is visible. + 2. Click on Rotation Policy tab and check if Maximum Number Of Logs label is visible. + 3. Click on Deletion Policy and check if Log Archive Exceeds label is visible. + :expectedresults: + 1. Element is visible + 2. Element is visible + 3. Element is visible + """ + setup_login(page) + time.sleep(1) + frame = check_frame_assignment(page, browser_name) + + log.info('Click on Errors Log button and check if element is loaded.') + frame.locator('#error-log-config').click() + frame.locator('#nsslapd-errorlog').wait_for() + assert frame.locator('#nsslapd-errorlog').is_visible() + + log.info('Click on Rotation Policy tab and check if element is loaded.') + frame.get_by_role('tab', name='Rotation Policy').click() + assert frame.get_by_text('Maximum Number Of Logs').is_visible() + + log.info('Click on Deletion Policy tab and check if element is loaded.') + frame.get_by_role('tab', name='Deletion Policy').click() + assert frame.get_by_text('Log Archive Exceeds (in MB)').is_visible() + + +def test_security_log_availability(topology_st, page, browser_name): + """ Test Security Log tabs visibility + + :id: 1b851fa2-38c9-4865-9e24-f762ef80825f + :setup: Standalone instance + :steps: + 1. Click on Security Log button in the side panel and check if Security Log Location input field is visible. + 2. Click on Rotation Policy tab and check if Maximum Number Of Logs label is visible. + 3. Click on Deletion Policy and check if Log Archive Exceeds label is visible. + :expectedresults: + 1. Element is visible + 2. Element is visible + 3. Element is visible + """ + setup_login(page) + time.sleep(1) + frame = check_frame_assignment(page, browser_name) + + log.info('Click on Security Log button and check if element is loaded.') + frame.locator('#security-log-config').click() + frame.locator('#nsslapd-securitylog').wait_for() + assert frame.locator('#nsslapd-securitylog').is_visible() + + log.info('Click on Rotation Policy tab and check if element is loaded.') + frame.get_by_role('tab', name='Rotation Policy').click() + assert frame.get_by_text('Maximum Number Of Logs').is_visible() + + log.info('Click on Deletion Policy tab and check if element is loaded.') + frame.get_by_role('tab', name='Deletion Policy').click() + assert frame.get_by_text('Log Archive Exceeds (in MB)').is_visible() + + +if __name__ == '__main__': + # Run isolated + # -s for DEBUG mode + CURRENT_FILE = os.path.realpath(__file__) + pytest.main("-s %s" % CURRENT_FILE) diff --git a/dirsrvtests/tests/tickets/__init__.py b/dirsrvtests/tests/tickets/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/dirsrvtests/tests/tickets/ticket47560_test.py b/dirsrvtests/tests/tickets/ticket47560_test.py new file mode 100644 index 0000000..38479bd --- /dev/null +++ b/dirsrvtests/tests/tickets/ticket47560_test.py @@ -0,0 +1,191 @@ +# --- BEGIN COPYRIGHT BLOCK --- +# Copyright (C) 2016 Red Hat, Inc. +# All rights reserved. +# +# License: GPL (version 3 or any later version). +# See LICENSE for details. +# --- END COPYRIGHT BLOCK --- +# +import logging +import time + +import ldap +import pytest +from lib389 import Entry +from lib389._constants import * +from lib389.properties import * +from lib389.topologies import topology_st +from lib389.utils import * + +pytestmark = pytest.mark.tier2 + +log = logging.getLogger(__name__) + + +def test_ticket47560(topology_st): + """ + This test case does the following: + SETUP + - Create entry cn=group,SUFFIX + - Create entry cn=member,SUFFIX + - Update 'cn=member,SUFFIX' to add "memberOf: cn=group,SUFFIX" + - Enable Memberof Plugins + + # Here the cn=member entry has a 'memberOf' but + # cn=group entry does not contain 'cn=member' in its member + + TEST CASE + - start the fixupmemberof task + - read the cn=member entry + - check 'memberOf is now empty + + TEARDOWN + - Delete entry cn=group,SUFFIX + - Delete entry cn=member,SUFFIX + - Disable Memberof Plugins + """ + + def _enable_disable_mbo(value): + """ + Enable or disable mbo plugin depending on 'value' ('on'/'off') + """ + # enable/disable the mbo plugin + if value == 'on': + topology_st.standalone.plugins.enable(name=PLUGIN_MEMBER_OF) + else: + topology_st.standalone.plugins.disable(name=PLUGIN_MEMBER_OF) + + log.debug("-------------> _enable_disable_mbo(%s)" % value) + + topology_st.standalone.stop(timeout=120) + time.sleep(1) + topology_st.standalone.start(timeout=120) + time.sleep(3) + + # need to reopen a connection toward the instance + topology_st.standalone.open() + + def _test_ticket47560_setup(): + """ + - Create entry cn=group,SUFFIX + - Create entry cn=member,SUFFIX + - Update 'cn=member,SUFFIX' to add "memberOf: cn=group,SUFFIX" + - Enable Memberof Plugins + """ + log.debug("-------- > _test_ticket47560_setup\n") + + # + # By default the memberof plugin is disabled create + # - create a group entry + # - create a member entry + # - set the member entry as memberof the group entry + # + entry = Entry(group_DN) + entry.setValues('objectclass', 'top', 'groupOfNames', 'inetUser') + entry.setValues('cn', 'group') + try: + topology_st.standalone.add_s(entry) + except ldap.ALREADY_EXISTS: + log.debug("Entry %s already exists" % (group_DN)) + + entry = Entry(member_DN) + entry.setValues('objectclass', 'top', 'person', 'organizationalPerson', 'inetorgperson', 'inetUser') + entry.setValues('uid', 'member') + entry.setValues('cn', 'member') + entry.setValues('sn', 'member') + try: + topology_st.standalone.add_s(entry) + except ldap.ALREADY_EXISTS: + log.debug("Entry %s already exists" % (member_DN)) + + replace = [(ldap.MOD_REPLACE, 'memberof', ensure_bytes(group_DN))] + topology_st.standalone.modify_s(member_DN, replace) + + # + # enable the memberof plugin and restart the instance + # + _enable_disable_mbo('on') + + # + # check memberof attribute is still present + # + filt = 'uid=member' + ents = topology_st.standalone.search_s(member_DN, ldap.SCOPE_BASE, filt) + assert len(ents) == 1 + ent = ents[0] + # print ent + value = ensure_str(ent.getValue('memberof')) + # print "memberof: %s" % (value) + assert value == group_DN + + def _test_ticket47560_teardown(): + """ + - Delete entry cn=group,SUFFIX + - Delete entry cn=member,SUFFIX + - Disable Memberof Plugins + """ + log.debug("-------- > _test_ticket47560_teardown\n") + # remove the entries group_DN and member_DN + try: + topology_st.standalone.delete_s(group_DN) + except: + log.warning("Entry %s fail to delete" % (group_DN)) + try: + topology_st.standalone.delete_s(member_DN) + except: + log.warning("Entry %s fail to delete" % (member_DN)) + # + # disable the memberof plugin and restart the instance + # + _enable_disable_mbo('off') + + group_DN = "cn=group,%s" % (SUFFIX) + member_DN = "uid=member,%s" % (SUFFIX) + + # + # Initialize the test case + # + _test_ticket47560_setup() + + # + # start the test + # - start the fixup task + # - check the entry is fixed (no longer memberof the group) + # + log.debug("-------- > Start ticket tests\n") + + filt = 'uid=member' + ents = topology_st.standalone.search_s(member_DN, ldap.SCOPE_BASE, filt) + assert len(ents) == 1 + ent = ents[0] + log.debug("Unfixed entry %r\n" % ent) + + # run the fixup task + topology_st.standalone.tasks.fixupMemberOf(suffix=SUFFIX, args={TASK_WAIT: True}) + + ents = topology_st.standalone.search_s(member_DN, ldap.SCOPE_BASE, filt) + assert len(ents) == 1 + ent = ents[0] + log.debug("Fixed entry %r\n" % ent) + + if ensure_str(ent.getValue('memberof')) == group_DN: + log.warning("Error the fixupMemberOf did not fix %s" % (member_DN)) + result_successful = False + else: + result_successful = True + + # + # cleanup up the test case + # + _test_ticket47560_teardown() + + assert result_successful is True + + log.info('Testcase PASSED') + + +if __name__ == '__main__': + # Run isolated + # -s for DEBUG mode + CURRENT_FILE = os.path.realpath(__file__) + pytest.main("-s %s" % CURRENT_FILE) diff --git a/dirsrvtests/tests/tickets/ticket47573_test.py b/dirsrvtests/tests/tickets/ticket47573_test.py new file mode 100644 index 0000000..b453a78 --- /dev/null +++ b/dirsrvtests/tests/tickets/ticket47573_test.py @@ -0,0 +1,235 @@ +# --- BEGIN COPYRIGHT BLOCK --- +# Copyright (C) 2016 Red Hat, Inc. +# All rights reserved. +# +# License: GPL (version 3 or any later version). +# See LICENSE for details. +# --- END COPYRIGHT BLOCK --- +# +''' +Created on Nov 7, 2013 + +@author: tbordaz +''' +import logging +import re +import time + +import ldap +import pytest +from lib389 import Entry +from lib389._constants import * +from lib389.topologies import topology_m1c1 +from lib389.utils import * + +pytestmark = pytest.mark.tier2 + +logging.getLogger(__name__).setLevel(logging.DEBUG) +log = logging.getLogger(__name__) + +TEST_REPL_DN = "cn=test_repl, %s" % SUFFIX +ENTRY_DN = "cn=test_entry, %s" % SUFFIX + +MUST_OLD = "(postalAddress $ preferredLocale $ telexNumber)" +MAY_OLD = "(postalCode $ street)" + +MUST_NEW = "(postalAddress $ preferredLocale)" +MAY_NEW = "(telexNumber $ postalCode $ street)" + + +def pattern_errorlog(file, log_pattern): + try: + pattern_errorlog.last_pos += 1 + except AttributeError: + pattern_errorlog.last_pos = 0 + + found = None + log.debug("_pattern_errorlog: start at offset %d" % pattern_errorlog.last_pos) + file.seek(pattern_errorlog.last_pos) + + # Use a while true iteration because 'for line in file: hit a + # python bug that break file.tell() + while True: + line = file.readline() + log.debug("_pattern_errorlog: [%d] %s" % (file.tell(), line)) + found = log_pattern.search(line) + if ((line == '') or (found)): + break + + log.debug("_pattern_errorlog: end at offset %d" % file.tell()) + pattern_errorlog.last_pos = file.tell() + return found + + +def _oc_definition(oid_ext, name, must=None, may=None): + oid = "1.2.3.4.5.6.7.8.9.10.%d" % oid_ext + desc = 'To test ticket 47573' + sup = 'person' + if not must: + must = MUST_OLD + if not may: + may = MAY_OLD + + new_oc = "( %s NAME '%s' DESC '%s' SUP %s AUXILIARY MUST %s MAY %s )" % (oid, name, desc, sup, must, may) + return ensure_bytes(new_oc) + + +def add_OC(instance, oid_ext, name): + new_oc = _oc_definition(oid_ext, name) + instance.schema.add_schema('objectClasses', new_oc) + + +def mod_OC(instance, oid_ext, name, old_must=None, old_may=None, new_must=None, new_may=None): + old_oc = _oc_definition(oid_ext, name, old_must, old_may) + new_oc = _oc_definition(oid_ext, name, new_must, new_may) + instance.schema.del_schema('objectClasses', old_oc) + instance.schema.add_schema('objectClasses', new_oc) + + +def trigger_schema_push(topology_m1c1): + """ + It triggers an update on the supplier. This will start a replication + session and a schema push + """ + try: + trigger_schema_push.value += 1 + except AttributeError: + trigger_schema_push.value = 1 + replace = [(ldap.MOD_REPLACE, 'telephonenumber', ensure_bytes(str(trigger_schema_push.value)))] + topology_m1c1.ms["supplier1"].modify_s(ENTRY_DN, replace) + + # wait 10 seconds that the update is replicated + loop = 0 + while loop <= 10: + try: + ent = topology_m1c1.cs["consumer1"].getEntry(ENTRY_DN, ldap.SCOPE_BASE, "(objectclass=*)", + ['telephonenumber']) + val = ent.telephonenumber or "0" + if int(val) == trigger_schema_push.value: + return + # the expected value is not yet replicated. try again + time.sleep(1) + loop += 1 + log.debug("trigger_schema_push: receive %s (expected %d)" % (val, trigger_schema_push.value)) + except ldap.NO_SUCH_OBJECT: + time.sleep(1) + loop += 1 + + +def test_ticket47573_init(topology_m1c1): + """ + Initialize the test environment + """ + log.debug("test_ticket47573_init topology_m1c1 %r (supplier %r, consumer %r" % + (topology_m1c1, topology_m1c1.ms["supplier1"], topology_m1c1.cs["consumer1"])) + # the test case will check if a warning message is logged in the + # error log of the supplier + topology_m1c1.ms["supplier1"].errorlog_file = open(topology_m1c1.ms["supplier1"].errlog, "r") + + # This entry will be used to trigger attempt of schema push + topology_m1c1.ms["supplier1"].add_s(Entry((ENTRY_DN, { + 'objectclass': "top person".split(), + 'sn': 'test_entry', + 'cn': 'test_entry'}))) + + +def test_ticket47573_one(topology_m1c1): + """ + Summary: Add a custom OC with MUST and MAY + MUST = postalAddress $ preferredLocale + MAY = telexNumber $ postalCode $ street + + Final state + - supplier +OCwithMayAttr + - consumer +OCwithMayAttr + + """ + log.debug("test_ticket47573_one topology_m1c1 %r (supplier %r, consumer %r" % ( + topology_m1c1, topology_m1c1.ms["supplier1"], topology_m1c1.cs["consumer1"])) + # update the schema of the supplier so that it is a superset of + # consumer. Schema should be pushed + new_oc = _oc_definition(2, 'OCwithMayAttr', + must=MUST_OLD, + may=MAY_OLD) + topology_m1c1.ms["supplier1"].schema.add_schema('objectClasses', new_oc) + + trigger_schema_push(topology_m1c1) + supplier_schema_csn = topology_m1c1.ms["supplier1"].schema.get_schema_csn() + consumer_schema_csn = topology_m1c1.cs["consumer1"].schema.get_schema_csn() + + # Check the schemaCSN was updated on the consumer + log.debug("test_ticket47573_one supplier_schema_csn=%s", supplier_schema_csn) + log.debug("ctest_ticket47573_one onsumer_schema_csn=%s", consumer_schema_csn) + assert supplier_schema_csn == consumer_schema_csn + + # Check the error log of the supplier does not contain an error + regex = re.compile("must not be overwritten \(set replication log for additional info\)") + res = pattern_errorlog(topology_m1c1.ms["supplier1"].errorlog_file, regex) + assert res is None + + +def test_ticket47573_two(topology_m1c1): + """ + Summary: Change OCwithMayAttr to move a MAY attribute to a MUST attribute + + + Final state + - supplier OCwithMayAttr updated + - consumer OCwithMayAttr updated + + """ + + # Update the objectclass so that a MAY attribute is moved to MUST attribute + mod_OC(topology_m1c1.ms["supplier1"], 2, 'OCwithMayAttr', old_must=MUST_OLD, new_must=MUST_NEW, old_may=MAY_OLD, + new_may=MAY_NEW) + + # now push the scheam + trigger_schema_push(topology_m1c1) + supplier_schema_csn = topology_m1c1.ms["supplier1"].schema.get_schema_csn() + consumer_schema_csn = topology_m1c1.cs["consumer1"].schema.get_schema_csn() + + # Check the schemaCSN was NOT updated on the consumer + log.debug("test_ticket47573_two supplier_schema_csn=%s", supplier_schema_csn) + log.debug("test_ticket47573_two consumer_schema_csn=%s", consumer_schema_csn) + assert supplier_schema_csn == consumer_schema_csn + + # Check the error log of the supplier does not contain an error + regex = re.compile("must not be overwritten \(set replication log for additional info\)") + res = pattern_errorlog(topology_m1c1.ms["supplier1"].errorlog_file, regex) + assert res is None + + +def test_ticket47573_three(topology_m1c1): + ''' + Create a entry with OCwithMayAttr OC + ''' + # Check replication is working fine + dn = "cn=ticket47573, %s" % SUFFIX + topology_m1c1.ms["supplier1"].add_s(Entry((dn, + {'objectclass': "top person OCwithMayAttr".split(), + 'sn': 'test_repl', + 'cn': 'test_repl', + 'postalAddress': 'here', + 'preferredLocale': 'en', + 'telexNumber': '12$us$21', + 'postalCode': '54321'}))) + loop = 0 + ent = None + while loop <= 10: + try: + ent = topology_m1c1.cs["consumer1"].getEntry(dn, ldap.SCOPE_BASE, "(objectclass=*)") + break + except ldap.NO_SUCH_OBJECT: + time.sleep(1) + loop += 1 + if ent is None: + assert False + + log.info('Testcase PASSED') + + +if __name__ == '__main__': + # Run isolated + # -s for DEBUG mode + CURRENT_FILE = os.path.realpath(__file__) + pytest.main("-s %s" % CURRENT_FILE) diff --git a/dirsrvtests/tests/tickets/ticket47619_test.py b/dirsrvtests/tests/tickets/ticket47619_test.py new file mode 100644 index 0000000..d95d865 --- /dev/null +++ b/dirsrvtests/tests/tickets/ticket47619_test.py @@ -0,0 +1,97 @@ +# --- BEGIN COPYRIGHT BLOCK --- +# Copyright (C) 2016 Red Hat, Inc. +# All rights reserved. +# +# License: GPL (version 3 or any later version). +# See LICENSE for details. +# --- END COPYRIGHT BLOCK --- +# +''' +Created on Nov 7, 2013 + +@author: tbordaz +''' +import logging +import time + +import ldap +import pytest +from lib389 import Entry +from lib389._constants import * +from lib389.properties import * +from lib389.topologies import topology_m1c1 + +pytestmark = pytest.mark.tier2 + +logging.getLogger(__name__).setLevel(logging.DEBUG) +log = logging.getLogger(__name__) + +TEST_REPL_DN = "cn=test_repl, %s" % SUFFIX +ENTRY_DN = "cn=test_entry, %s" % SUFFIX + +OTHER_NAME = 'other_entry' +MAX_OTHERS = 100 + +ATTRIBUTES = ['street', 'countryName', 'description', 'postalAddress', 'postalCode', 'title', 'l', 'roomNumber'] + + +def test_ticket47619_init(topology_m1c1): + """ + Initialize the test environment + """ + topology_m1c1.ms["supplier1"].plugins.enable(name=PLUGIN_RETRO_CHANGELOG) + # topology_m1c1.ms["supplier1"].plugins.enable(name=PLUGIN_MEMBER_OF) + # topology_m1c1.ms["supplier1"].plugins.enable(name=PLUGIN_REFER_INTEGRITY) + topology_m1c1.ms["supplier1"].stop(timeout=10) + topology_m1c1.ms["supplier1"].start(timeout=10) + + topology_m1c1.ms["supplier1"].log.info("test_ticket47619_init topology_m1c1 %r" % (topology_m1c1)) + # the test case will check if a warning message is logged in the + # error log of the supplier + topology_m1c1.ms["supplier1"].errorlog_file = open(topology_m1c1.ms["supplier1"].errlog, "r") + + # add dummy entries + for cpt in range(MAX_OTHERS): + name = "%s%d" % (OTHER_NAME, cpt) + topology_m1c1.ms["supplier1"].add_s(Entry(("cn=%s,%s" % (name, SUFFIX), { + 'objectclass': "top person".split(), + 'sn': name, + 'cn': name}))) + + topology_m1c1.ms["supplier1"].log.info( + "test_ticket47619_init: %d entries ADDed %s[0..%d]" % (MAX_OTHERS, OTHER_NAME, MAX_OTHERS - 1)) + + # Check the number of entries in the retro changelog + time.sleep(2) + ents = topology_m1c1.ms["supplier1"].search_s(RETROCL_SUFFIX, ldap.SCOPE_ONELEVEL, "(objectclass=*)") + assert len(ents) == MAX_OTHERS + + +def test_ticket47619_create_index(topology_m1c1): + args = {INDEX_TYPE: 'eq'} + for attr in ATTRIBUTES: + topology_m1c1.ms["supplier1"].index.create(suffix=RETROCL_SUFFIX, attr=attr, args=args) + topology_m1c1.ms["supplier1"].restart(timeout=10) + + +def test_ticket47619_reindex(topology_m1c1): + ''' + Reindex all the attributes in ATTRIBUTES + ''' + args = {TASK_WAIT: True} + for attr in ATTRIBUTES: + rc = topology_m1c1.ms["supplier1"].tasks.reindex(suffix=RETROCL_SUFFIX, attrname=attr, args=args) + assert rc == 0 + + +def test_ticket47619_check_indexed_search(topology_m1c1): + for attr in ATTRIBUTES: + ents = topology_m1c1.ms["supplier1"].search_s(RETROCL_SUFFIX, ldap.SCOPE_SUBTREE, "(%s=hello)" % attr) + assert len(ents) == 0 + + +if __name__ == '__main__': + # Run isolated + # -s for DEBUG mode + CURRENT_FILE = os.path.realpath(__file__) + pytest.main("-s %s" % CURRENT_FILE) diff --git a/dirsrvtests/tests/tickets/ticket47640_test.py b/dirsrvtests/tests/tickets/ticket47640_test.py new file mode 100644 index 0000000..996735f --- /dev/null +++ b/dirsrvtests/tests/tickets/ticket47640_test.py @@ -0,0 +1,82 @@ +# --- BEGIN COPYRIGHT BLOCK --- +# Copyright (C) 2016 Red Hat, Inc. +# All rights reserved. +# +# License: GPL (version 3 or any later version). +# See LICENSE for details. +# --- END COPYRIGHT BLOCK --- +# +import pytest +from lib389.tasks import * +from lib389.utils import * +from lib389.topologies import topology_st + +from lib389._constants import PLUGIN_LINKED_ATTRS, DEFAULT_SUFFIX + +# Skip on older versions +pytestmark = [pytest.mark.tier2, + pytest.mark.skipif(ds_is_older('1.3.4'), reason="Not implemented")] + +logging.getLogger(__name__).setLevel(logging.DEBUG) +log = logging.getLogger(__name__) + + +def test_ticket47640(topology_st): + ''' + Linked Attrs Plugins - verify that if the plugin fails to update the link entry + that the entire operation is aborted + ''' + + # Enable Dynamic plugins, and the linked Attrs plugin + try: + topology_st.standalone.modify_s(DN_CONFIG, [(ldap.MOD_REPLACE, 'nsslapd-dynamic-plugins', b'on')]) + except ldap.LDAPError as e: + log.fatal('Failed to enable dynamic plugin!' + e.message['desc']) + assert False + + try: + topology_st.standalone.plugins.enable(name=PLUGIN_LINKED_ATTRS) + except ValueError as e: + log.fatal('Failed to enable linked attributes plugin!' + e.message['desc']) + assert False + + # Add the plugin config entry + try: + topology_st.standalone.add_s(Entry(('cn=manager link,cn=Linked Attributes,cn=plugins,cn=config', { + 'objectclass': 'top extensibleObject'.split(), + 'cn': 'Manager Link', + 'linkType': 'seeAlso', + 'managedType': 'seeAlso' + }))) + except ldap.LDAPError as e: + log.fatal('Failed to add linked attr config entry: error ' + e.message['desc']) + assert False + + # Add an entry who has a link to an entry that does not exist + OP_REJECTED = False + try: + topology_st.standalone.add_s(Entry(('uid=manager,' + DEFAULT_SUFFIX, { + 'objectclass': 'top extensibleObject'.split(), + 'uid': 'manager', + 'seeAlso': 'uid=user,dc=example,dc=com' + }))) + except ldap.UNWILLING_TO_PERFORM: + # Success + log.info('Add operation correctly rejected.') + OP_REJECTED = True + except ldap.LDAPError as e: + log.fatal('Add operation incorrectly rejected: error %s - ' + + 'expected "unwilling to perform"' % e.message['desc']) + assert False + if not OP_REJECTED: + log.fatal('Add operation incorrectly allowed') + assert False + + log.info('Test complete') + + +if __name__ == '__main__': + # Run isolated + # -s for DEBUG mode + CURRENT_FILE = os.path.realpath(__file__) + pytest.main("-s %s" % CURRENT_FILE) diff --git a/dirsrvtests/tests/tickets/ticket47653MMR_test.py b/dirsrvtests/tests/tickets/ticket47653MMR_test.py new file mode 100644 index 0000000..3fdb7f9 --- /dev/null +++ b/dirsrvtests/tests/tickets/ticket47653MMR_test.py @@ -0,0 +1,348 @@ +# --- BEGIN COPYRIGHT BLOCK --- +# Copyright (C) 2016 Red Hat, Inc. +# All rights reserved. +# +# License: GPL (version 3 or any later version). +# See LICENSE for details. +# --- END COPYRIGHT BLOCK --- +# +''' +Created on Nov 7, 2013 + +@author: tbordaz +''' +import logging +import time + +import ldap +import pytest +from lib389 import Entry +from lib389._constants import * +from lib389.topologies import topology_m2 + +logging.getLogger(__name__).setLevel(logging.DEBUG) +from lib389.utils import * + +# Skip on older versions +pytestmark =[pytest.mark.tier2, + pytest.mark.skipif(ds_is_older('1.3.2'), reason="Not implemented")] +log = logging.getLogger(__name__) + +DEBUGGING = os.getenv("DEBUGGING", default=False) + +TEST_REPL_DN = "cn=test_repl, %s" % SUFFIX +OC_NAME = 'OCticket47653' +MUST = "(postalAddress $ postalCode)" +MAY = "(member $ street)" + +OTHER_NAME = 'other_entry' +MAX_OTHERS = 10 + +BIND_NAME = 'bind_entry' +BIND_DN = 'cn=%s, %s' % (BIND_NAME, SUFFIX) +BIND_PW = 'password' + +ENTRY_NAME = 'test_entry' +ENTRY_DN = 'cn=%s, %s' % (ENTRY_NAME, SUFFIX) +ENTRY_OC = "top person %s" % OC_NAME + + +def _oc_definition(oid_ext, name, must=None, may=None): + oid = "1.2.3.4.5.6.7.8.9.10.%d" % oid_ext + desc = 'To test ticket 47490' + sup = 'person' + if not must: + must = MUST + if not may: + may = MAY + + new_oc = "( %s NAME '%s' DESC '%s' SUP %s AUXILIARY MUST %s MAY %s )" % (oid, name, desc, sup, must, may) + return ensure_bytes(new_oc) + + +def test_ticket47653_init(topology_m2): + """ + It adds + - Objectclass with MAY 'member' + - an entry ('bind_entry') with which we bind to test the 'SELFDN' operation + It deletes the anonymous aci + + """ + + topology_m2.ms["supplier1"].log.info("Add %s that allows 'member' attribute" % OC_NAME) + new_oc = _oc_definition(2, OC_NAME, must=MUST, may=MAY) + topology_m2.ms["supplier1"].schema.add_schema('objectClasses', new_oc) + + # entry used to bind with + topology_m2.ms["supplier1"].log.info("Add %s" % BIND_DN) + topology_m2.ms["supplier1"].add_s(Entry((BIND_DN, { + 'objectclass': "top person".split(), + 'sn': BIND_NAME, + 'cn': BIND_NAME, + 'userpassword': BIND_PW}))) + + if DEBUGGING: + # enable acl error logging + mod = [(ldap.MOD_REPLACE, 'nsslapd-errorlog-level', ensure_bytes(str(128 + 8192)))] # ACL + REPL + topology_m2.ms["supplier1"].modify_s(DN_CONFIG, mod) + topology_m2.ms["supplier2"].modify_s(DN_CONFIG, mod) + + # remove all aci's and start with a clean slate + mod = [(ldap.MOD_DELETE, 'aci', None)] + topology_m2.ms["supplier1"].modify_s(SUFFIX, mod) + topology_m2.ms["supplier2"].modify_s(SUFFIX, mod) + + # add dummy entries + for cpt in range(MAX_OTHERS): + name = "%s%d" % (OTHER_NAME, cpt) + topology_m2.ms["supplier1"].add_s(Entry(("cn=%s,%s" % (name, SUFFIX), { + 'objectclass': "top person".split(), + 'sn': name, + 'cn': name}))) + + +def test_ticket47653_add(topology_m2): + ''' + This test ADD an entry on SUPPLIER1 where 47653 is fixed. Then it checks that entry is replicated + on SUPPLIER2 (even if on SUPPLIER2 47653 is NOT fixed). Then update on SUPPLIER2 and check the update on SUPPLIER1 + + It checks that, bound as bind_entry, + - we can not ADD an entry without the proper SELFDN aci. + - with the proper ACI we can not ADD with 'member' attribute + - with the proper ACI and 'member' it succeeds to ADD + ''' + topology_m2.ms["supplier1"].log.info("\n\n######################### ADD ######################\n") + + # bind as bind_entry + topology_m2.ms["supplier1"].log.info("Bind as %s" % BIND_DN) + topology_m2.ms["supplier1"].simple_bind_s(BIND_DN, BIND_PW) + + # Prepare the entry with multivalued members + entry_with_members = Entry(ENTRY_DN) + entry_with_members.setValues('objectclass', 'top', 'person', 'OCticket47653') + entry_with_members.setValues('sn', ENTRY_NAME) + entry_with_members.setValues('cn', ENTRY_NAME) + entry_with_members.setValues('postalAddress', 'here') + entry_with_members.setValues('postalCode', '1234') + members = [] + for cpt in range(MAX_OTHERS): + name = "%s%d" % (OTHER_NAME, cpt) + members.append("cn=%s,%s" % (name, SUFFIX)) + members.append(BIND_DN) + entry_with_members.setValues('member', members) + + # Prepare the entry with only one member value + entry_with_member = Entry(ENTRY_DN) + entry_with_member.setValues('objectclass', 'top', 'person', 'OCticket47653') + entry_with_member.setValues('sn', ENTRY_NAME) + entry_with_member.setValues('cn', ENTRY_NAME) + entry_with_member.setValues('postalAddress', 'here') + entry_with_member.setValues('postalCode', '1234') + member = [] + member.append(BIND_DN) + entry_with_member.setValues('member', member) + + # entry to add WITH member being BIND_DN but WITHOUT the ACI -> ldap.INSUFFICIENT_ACCESS + try: + topology_m2.ms["supplier1"].log.info("Try to add Add %s (aci is missing): %r" % (ENTRY_DN, entry_with_member)) + + topology_m2.ms["supplier1"].add_s(entry_with_member) + except Exception as e: + topology_m2.ms["supplier1"].log.info("Exception (expected): %s" % type(e).__name__) + assert isinstance(e, ldap.INSUFFICIENT_ACCESS) + + # Ok Now add the proper ACI + topology_m2.ms["supplier1"].log.info("Bind as %s and add the ADD SELFDN aci" % DN_DM) + topology_m2.ms["supplier1"].simple_bind_s(DN_DM, PASSWORD) + + ACI_TARGET = "(target = \"ldap:///cn=*,%s\")" % SUFFIX + ACI_TARGETFILTER = "(targetfilter =\"(objectClass=%s)\")" % OC_NAME + ACI_ALLOW = "(version 3.0; acl \"SelfDN add\"; allow (add)" + ACI_SUBJECT = " userattr = \"member#selfDN\";)" + ACI_BODY = ACI_TARGET + ACI_TARGETFILTER + ACI_ALLOW + ACI_SUBJECT + mod = [(ldap.MOD_ADD, 'aci', ensure_bytes(ACI_BODY))] + topology_m2.ms["supplier1"].modify_s(SUFFIX, mod) + time.sleep(1) + + # bind as bind_entry + topology_m2.ms["supplier1"].log.info("Bind as %s" % BIND_DN) + topology_m2.ms["supplier1"].simple_bind_s(BIND_DN, BIND_PW) + + # entry to add WITHOUT member and WITH the ACI -> ldap.INSUFFICIENT_ACCESS + try: + topology_m2.ms["supplier1"].log.info("Try to add Add %s (member is missing)" % ENTRY_DN) + topology_m2.ms["supplier1"].add_s(Entry((ENTRY_DN, { + 'objectclass': ENTRY_OC.split(), + 'sn': ENTRY_NAME, + 'cn': ENTRY_NAME, + 'postalAddress': 'here', + 'postalCode': '1234'}))) + except Exception as e: + topology_m2.ms["supplier1"].log.info("Exception (expected): %s" % type(e).__name__) + assert isinstance(e, ldap.INSUFFICIENT_ACCESS) + time.sleep(1) + + # entry to add WITH memberS and WITH the ACI -> ldap.INSUFFICIENT_ACCESS + # member should contain only one value + try: + topology_m2.ms["supplier1"].log.info("Try to add Add %s (with several member values)" % ENTRY_DN) + topology_m2.ms["supplier1"].add_s(entry_with_members) + except Exception as e: + topology_m2.ms["supplier1"].log.info("Exception (expected): %s" % type(e).__name__) + assert isinstance(e, ldap.INSUFFICIENT_ACCESS) + time.sleep(2) + + topology_m2.ms["supplier1"].log.info("Try to add Add %s should be successful" % ENTRY_DN) + try: + topology_m2.ms["supplier1"].add_s(entry_with_member) + except ldap.LDAPError as e: + topology_m2.ms["supplier1"].log.info("Failed to add entry, error: " + e.message['desc']) + assert False + + # + # Now check the entry as been replicated + # + topology_m2.ms["supplier2"].simple_bind_s(DN_DM, PASSWORD) + topology_m2.ms["supplier1"].log.info("Try to retrieve %s from Supplier2" % ENTRY_DN) + loop = 0 + while loop <= 10: + try: + ent = topology_m2.ms["supplier2"].getEntry(ENTRY_DN, ldap.SCOPE_BASE, "(objectclass=*)") + break + except ldap.NO_SUCH_OBJECT: + time.sleep(1) + loop += 1 + assert loop <= 10 + + # Now update the entry on Supplier2 (as DM because 47653 is possibly not fixed on M2) + topology_m2.ms["supplier1"].log.info("Update %s on M2" % ENTRY_DN) + mod = [(ldap.MOD_REPLACE, 'description', b'test_add')] + topology_m2.ms["supplier2"].modify_s(ENTRY_DN, mod) + time.sleep(1) + + topology_m2.ms["supplier1"].simple_bind_s(DN_DM, PASSWORD) + loop = 0 + while loop <= 10: + try: + ent = topology_m2.ms["supplier1"].getEntry(ENTRY_DN, ldap.SCOPE_BASE, "(objectclass=*)") + if ent.hasAttr('description') and (ensure_str(ent.getValue('description')) == 'test_add'): + break + except ldap.NO_SUCH_OBJECT: + time.sleep(1) + loop += 1 + + assert ensure_str(ent.getValue('description')) == 'test_add' + + +def test_ticket47653_modify(topology_m2): + ''' + This test MOD an entry on SUPPLIER1 where 47653 is fixed. Then it checks that update is replicated + on SUPPLIER2 (even if on SUPPLIER2 47653 is NOT fixed). Then update on SUPPLIER2 (bound as BIND_DN). + This update may fail whether or not 47653 is fixed on SUPPLIER2 + + It checks that, bound as bind_entry, + - we can not modify an entry without the proper SELFDN aci. + - adding the ACI, we can modify the entry + ''' + # bind as bind_entry + topology_m2.ms["supplier1"].log.info("Bind as %s" % BIND_DN) + topology_m2.ms["supplier1"].simple_bind_s(BIND_DN, BIND_PW) + + topology_m2.ms["supplier1"].log.info("\n\n######################### MODIFY ######################\n") + + # entry to modify WITH member being BIND_DN but WITHOUT the ACI -> ldap.INSUFFICIENT_ACCESS + try: + topology_m2.ms["supplier1"].log.info("Try to modify %s (aci is missing)" % ENTRY_DN) + mod = [(ldap.MOD_REPLACE, 'postalCode', b'9876')] + topology_m2.ms["supplier1"].modify_s(ENTRY_DN, mod) + except Exception as e: + topology_m2.ms["supplier1"].log.info("Exception (expected): %s" % type(e).__name__) + assert isinstance(e, ldap.INSUFFICIENT_ACCESS) + + # Ok Now add the proper ACI + topology_m2.ms["supplier1"].log.info("Bind as %s and add the WRITE SELFDN aci" % DN_DM) + topology_m2.ms["supplier1"].simple_bind_s(DN_DM, PASSWORD) + + ACI_TARGET = "(target = \"ldap:///cn=*,%s\")" % SUFFIX + ACI_TARGETATTR = "(targetattr = *)" + ACI_TARGETFILTER = "(targetfilter =\"(objectClass=%s)\")" % OC_NAME + ACI_ALLOW = "(version 3.0; acl \"SelfDN write\"; allow (write)" + ACI_SUBJECT = " userattr = \"member#selfDN\";)" + ACI_BODY = ACI_TARGET + ACI_TARGETATTR + ACI_TARGETFILTER + ACI_ALLOW + ACI_SUBJECT + mod = [(ldap.MOD_ADD, 'aci', ensure_bytes(ACI_BODY))] + topology_m2.ms["supplier1"].modify_s(SUFFIX, mod) + time.sleep(2) + + # bind as bind_entry + topology_m2.ms["supplier1"].log.info("M1: Bind as %s" % BIND_DN) + topology_m2.ms["supplier1"].simple_bind_s(BIND_DN, BIND_PW) + time.sleep(1) + + # modify the entry and checks the value + topology_m2.ms["supplier1"].log.info("M1: Try to modify %s. It should succeeds" % ENTRY_DN) + mod = [(ldap.MOD_REPLACE, 'postalCode', b'1928')] + topology_m2.ms["supplier1"].modify_s(ENTRY_DN, mod) + + topology_m2.ms["supplier1"].log.info("M1: Bind as %s" % DN_DM) + topology_m2.ms["supplier1"].simple_bind_s(DN_DM, PASSWORD) + + topology_m2.ms["supplier1"].log.info("M1: Check the update of %s" % ENTRY_DN) + ents = topology_m2.ms["supplier1"].search_s(ENTRY_DN, ldap.SCOPE_BASE, 'objectclass=*') + assert len(ents) == 1 + assert ensure_str(ents[0].postalCode) == '1928' + + # Now check the update has been replicated on M2 + topology_m2.ms["supplier1"].log.info("M2: Bind as %s" % DN_DM) + topology_m2.ms["supplier2"].simple_bind_s(DN_DM, PASSWORD) + topology_m2.ms["supplier1"].log.info("M2: Try to retrieve %s" % ENTRY_DN) + loop = 0 + while loop <= 10: + try: + ent = topology_m2.ms["supplier2"].getEntry(ENTRY_DN, ldap.SCOPE_BASE, "(objectclass=*)") + if ent.hasAttr('postalCode') and (ensure_str(ent.getValue('postalCode')) == '1928'): + break + except ldap.NO_SUCH_OBJECT: + time.sleep(1) + loop += 1 + assert loop <= 10 + assert ensure_str(ent.getValue('postalCode')) == '1928' + + # Now update the entry on Supplier2 bound as BIND_DN (update may fail if 47653 is not fixed on M2) + topology_m2.ms["supplier1"].log.info("M2: Update %s (bound as %s)" % (ENTRY_DN, BIND_DN)) + topology_m2.ms["supplier2"].simple_bind_s(BIND_DN, PASSWORD) + time.sleep(1) + fail = False + try: + mod = [(ldap.MOD_REPLACE, 'postalCode', b'1929')] + topology_m2.ms["supplier2"].modify_s(ENTRY_DN, mod) + fail = False + except ldap.INSUFFICIENT_ACCESS: + topology_m2.ms["supplier1"].log.info( + "M2: Exception (INSUFFICIENT_ACCESS): that is fine the bug is possibly not fixed on M2") + fail = True + except Exception as e: + topology_m2.ms["supplier1"].log.info("M2: Exception (not expected): %s" % type(e).__name__) + assert 0 + + if not fail: + # Check the update has been replicaed on M1 + topology_m2.ms["supplier1"].log.info("M1: Bind as %s" % DN_DM) + topology_m2.ms["supplier1"].simple_bind_s(DN_DM, PASSWORD) + topology_m2.ms["supplier1"].log.info("M1: Check %s.postalCode=1929)" % (ENTRY_DN)) + loop = 0 + while loop <= 10: + try: + ent = topology_m2.ms["supplier1"].getEntry(ENTRY_DN, ldap.SCOPE_BASE, "(objectclass=*)") + if ent.hasAttr('postalCode') and (ensure_str(ent.getValue('postalCode')) == '1929'): + break + except ldap.NO_SUCH_OBJECT: + time.sleep(1) + loop += 1 + assert ensure_str(ent.getValue('postalCode')) == '1929' + + +if __name__ == '__main__': + # Run isolated + # -s for DEBUG mode + CURRENT_FILE = os.path.realpath(__file__) + pytest.main("-s %s" % CURRENT_FILE) diff --git a/dirsrvtests/tests/tickets/ticket47676_test.py b/dirsrvtests/tests/tickets/ticket47676_test.py new file mode 100644 index 0000000..a217a66 --- /dev/null +++ b/dirsrvtests/tests/tickets/ticket47676_test.py @@ -0,0 +1,252 @@ +# --- BEGIN COPYRIGHT BLOCK --- +# Copyright (C) 2016 Red Hat, Inc. +# All rights reserved. +# +# License: GPL (version 3 or any later version). +# See LICENSE for details. +# --- END COPYRIGHT BLOCK --- +# +''' +Created on Nov 7, 2013 + +@author: tbordaz +''' +import logging +import time + +import ldap +import pytest +from lib389 import Entry +from lib389._constants import * +from lib389.topologies import topology_m2 +from lib389.replica import ReplicationManager + +logging.getLogger(__name__).setLevel(logging.DEBUG) +from lib389.utils import * + +# Skip on older versions +pytestmark = [pytest.mark.tier2, + pytest.mark.skipif(ds_is_older('1.3.2'), reason="Not implemented")] +log = logging.getLogger(__name__) + +SCHEMA_DN = "cn=schema" +TEST_REPL_DN = "cn=test_repl, %s" % SUFFIX +OC_NAME = 'OCticket47676' +OC_OID_EXT = 2 +MUST = "(postalAddress $ postalCode)" +MAY = "(member $ street)" + +OC2_NAME = 'OC2ticket47676' +OC2_OID_EXT = 3 +MUST_2 = "(postalAddress $ postalCode)" +MAY_2 = "(member $ street)" + +REPL_SCHEMA_POLICY_CONSUMER = "cn=consumerUpdatePolicy,cn=replSchema,cn=config" +REPL_SCHEMA_POLICY_SUPPLIER = "cn=supplierUpdatePolicy,cn=replSchema,cn=config" + +OTHER_NAME = 'other_entry' +MAX_OTHERS = 10 + +BIND_NAME = 'bind_entry' +BIND_DN = 'cn=%s, %s' % (BIND_NAME, SUFFIX) +BIND_PW = 'password' + +ENTRY_NAME = 'test_entry' +ENTRY_DN = 'cn=%s, %s' % (ENTRY_NAME, SUFFIX) +ENTRY_OC = "top person %s" % OC_NAME + +BASE_OID = "1.2.3.4.5.6.7.8.9.10" + + +def _oc_definition(oid_ext, name, must=None, may=None): + oid = "%s.%d" % (BASE_OID, oid_ext) + desc = 'To test ticket 47490' + sup = 'person' + if not must: + must = MUST + if not may: + may = MAY + + new_oc = "( %s NAME '%s' DESC '%s' SUP %s AUXILIARY MUST %s MAY %s )" % (oid, name, desc, sup, must, may) + return ensure_bytes(new_oc) + +def replication_check(topology_m2): + repl = ReplicationManager(SUFFIX) + supplier1 = topology_m2.ms["supplier1"] + supplier2 = topology_m2.ms["supplier2"] + return repl.test_replication(supplier1, supplier2) + +def test_ticket47676_init(topology_m2): + """ + It adds + - Objectclass with MAY 'member' + - an entry ('bind_entry') with which we bind to test the 'SELFDN' operation + It deletes the anonymous aci + + """ + + topology_m2.ms["supplier1"].log.info("Add %s that allows 'member' attribute" % OC_NAME) + new_oc = _oc_definition(OC_OID_EXT, OC_NAME, must=MUST, may=MAY) + topology_m2.ms["supplier1"].schema.add_schema('objectClasses', new_oc) + + # entry used to bind with + topology_m2.ms["supplier1"].log.info("Add %s" % BIND_DN) + topology_m2.ms["supplier1"].add_s(Entry((BIND_DN, { + 'objectclass': "top person".split(), + 'sn': BIND_NAME, + 'cn': BIND_NAME, + 'userpassword': BIND_PW}))) + + # enable acl error logging + mod = [(ldap.MOD_REPLACE, 'nsslapd-errorlog-level', ensure_bytes(str(128 + 8192)))] # ACL + REPL + topology_m2.ms["supplier1"].modify_s(DN_CONFIG, mod) + topology_m2.ms["supplier2"].modify_s(DN_CONFIG, mod) + + # add dummy entries + for cpt in range(MAX_OTHERS): + name = "%s%d" % (OTHER_NAME, cpt) + topology_m2.ms["supplier1"].add_s(Entry(("cn=%s,%s" % (name, SUFFIX), { + 'objectclass': "top person".split(), + 'sn': name, + 'cn': name}))) + + +def test_ticket47676_skip_oc_at(topology_m2): + ''' + This test ADD an entry on SUPPLIER1 where 47676 is fixed. Then it checks that entry is replicated + on SUPPLIER2 (even if on SUPPLIER2 47676 is NOT fixed). Then update on SUPPLIER2. + If the schema has successfully been pushed, updating Supplier2 should succeed + ''' + topology_m2.ms["supplier1"].log.info("\n\n######################### ADD ######################\n") + + # bind as 'cn=Directory manager' + topology_m2.ms["supplier1"].log.info("Bind as %s and add the add the entry with specific oc" % DN_DM) + topology_m2.ms["supplier1"].simple_bind_s(DN_DM, PASSWORD) + + # Prepare the entry with multivalued members + entry = Entry(ENTRY_DN) + entry.setValues('objectclass', 'top', 'person', 'OCticket47676') + entry.setValues('sn', ENTRY_NAME) + entry.setValues('cn', ENTRY_NAME) + entry.setValues('postalAddress', 'here') + entry.setValues('postalCode', '1234') + members = [] + for cpt in range(MAX_OTHERS): + name = "%s%d" % (OTHER_NAME, cpt) + members.append("cn=%s,%s" % (name, SUFFIX)) + members.append(BIND_DN) + entry.setValues('member', members) + + topology_m2.ms["supplier1"].log.info("Try to add Add %s should be successful" % ENTRY_DN) + topology_m2.ms["supplier1"].add_s(entry) + + # + # Now check the entry as been replicated + # + topology_m2.ms["supplier2"].simple_bind_s(DN_DM, PASSWORD) + topology_m2.ms["supplier1"].log.info("Try to retrieve %s from Supplier2" % ENTRY_DN) + replication_check(topology_m2) + ent = topology_m2.ms["supplier2"].getEntry(ENTRY_DN, ldap.SCOPE_BASE, "(objectclass=*)") + assert ent + # Now update the entry on Supplier2 (as DM because 47676 is possibly not fixed on M2) + topology_m2.ms["supplier1"].log.info("Update %s on M2" % ENTRY_DN) + mod = [(ldap.MOD_REPLACE, 'description', b'test_add')] + topology_m2.ms["supplier2"].modify_s(ENTRY_DN, mod) + + topology_m2.ms["supplier1"].simple_bind_s(DN_DM, PASSWORD) + replication_check(topology_m2) + ent = topology_m2.ms["supplier1"].getEntry(ENTRY_DN, ldap.SCOPE_BASE, "(objectclass=*)") + assert ensure_str(ent.getValue('description')) == 'test_add' + + +def test_ticket47676_reject_action(topology_m2): + topology_m2.ms["supplier1"].log.info("\n\n######################### REJECT ACTION ######################\n") + + topology_m2.ms["supplier1"].simple_bind_s(DN_DM, PASSWORD) + topology_m2.ms["supplier2"].simple_bind_s(DN_DM, PASSWORD) + + # make supplier1 to refuse to push the schema if OC_NAME is present in consumer schema + mod = [(ldap.MOD_ADD, 'schemaUpdateObjectclassReject', ensure_bytes('%s' % (OC_NAME)))] # ACL + REPL + topology_m2.ms["supplier1"].modify_s(REPL_SCHEMA_POLICY_SUPPLIER, mod) + + # Restart is required to take into account that policy + topology_m2.ms["supplier1"].stop(timeout=10) + topology_m2.ms["supplier1"].start(timeout=10) + + # Add a new OC on M1 so that schema CSN will change and M1 will try to push the schema + topology_m2.ms["supplier1"].log.info("Add %s on M1" % OC2_NAME) + new_oc = _oc_definition(OC2_OID_EXT, OC2_NAME, must=MUST, may=MAY) + topology_m2.ms["supplier1"].schema.add_schema('objectClasses', new_oc) + + # Safety checking that the schema has been updated on M1 + topology_m2.ms["supplier1"].log.info("Check %s is in M1" % OC2_NAME) + ent = topology_m2.ms["supplier1"].getEntry(SCHEMA_DN, ldap.SCOPE_BASE, "(objectclass=*)", ["objectclasses"]) + assert ent.hasAttr('objectclasses') + found = False + for objectclass in ent.getValues('objectclasses'): + if str(objectclass).find(OC2_NAME) >= 0: + found = True + break + assert found + + # Do an update of M1 so that M1 will try to push the schema + topology_m2.ms["supplier1"].log.info("Update %s on M1" % ENTRY_DN) + mod = [(ldap.MOD_REPLACE, 'description', b'test_reject')] + topology_m2.ms["supplier1"].modify_s(ENTRY_DN, mod) + + # Check the replication occured and so also M1 attempted to push the schema + topology_m2.ms["supplier1"].log.info("Check updated %s on M2" % ENTRY_DN) + + replication_check(topology_m2) + ent = topology_m2.ms["supplier2"].getEntry(ENTRY_DN, ldap.SCOPE_BASE, "(objectclass=*)", ['description']) + assert ensure_str(ent.getValue('description')) == 'test_reject' + + # Check that the schema has not been pushed + topology_m2.ms["supplier1"].log.info("Check %s is not in M2" % OC2_NAME) + ent = topology_m2.ms["supplier2"].getEntry(SCHEMA_DN, ldap.SCOPE_BASE, "(objectclass=*)", ["objectclasses"]) + assert ent.hasAttr('objectclasses') + found = False + for objectclass in ent.getValues('objectclasses'): + if str(objectclass).find(OC2_NAME) >= 0: + found = True + break + assert not found + + topology_m2.ms["supplier1"].log.info("\n\n######################### NO MORE REJECT ACTION ######################\n") + + # make supplier1 to do no specific action on OC_NAME + mod = [(ldap.MOD_DELETE, 'schemaUpdateObjectclassReject', ensure_bytes('%s' % (OC_NAME)))] # ACL + REPL + topology_m2.ms["supplier1"].modify_s(REPL_SCHEMA_POLICY_SUPPLIER, mod) + + # Restart is required to take into account that policy + topology_m2.ms["supplier1"].stop(timeout=10) + topology_m2.ms["supplier1"].start(timeout=10) + + # Do an update of M1 so that M1 will try to push the schema + topology_m2.ms["supplier1"].log.info("Update %s on M1" % ENTRY_DN) + mod = [(ldap.MOD_REPLACE, 'description', b'test_no_more_reject')] + topology_m2.ms["supplier1"].modify_s(ENTRY_DN, mod) + + # Check the replication occured and so also M1 attempted to push the schema + topology_m2.ms["supplier1"].log.info("Check updated %s on M2" % ENTRY_DN) + + replication_check(topology_m2) + ent = topology_m2.ms["supplier2"].getEntry(ENTRY_DN, ldap.SCOPE_BASE, "(objectclass=*)", ['description']) + assert ensure_str(ent.getValue('description')) == 'test_no_more_reject' + # Check that the schema has been pushed + topology_m2.ms["supplier1"].log.info("Check %s is in M2" % OC2_NAME) + ent = topology_m2.ms["supplier2"].getEntry(SCHEMA_DN, ldap.SCOPE_BASE, "(objectclass=*)", ["objectclasses"]) + assert ent.hasAttr('objectclasses') + found = False + for objectclass in ent.getValues('objectclasses'): + if str(objectclass).find(OC2_NAME) >= 0: + found = True + break + assert found + + +if __name__ == '__main__': + # Run isolated + # -s for DEBUG mode + CURRENT_FILE = os.path.realpath(__file__) + pytest.main("-s %s" % CURRENT_FILE) diff --git a/dirsrvtests/tests/tickets/ticket47714_test.py b/dirsrvtests/tests/tickets/ticket47714_test.py new file mode 100644 index 0000000..7a6a564 --- /dev/null +++ b/dirsrvtests/tests/tickets/ticket47714_test.py @@ -0,0 +1,213 @@ +# --- BEGIN COPYRIGHT BLOCK --- +# Copyright (C) 2016 Red Hat, Inc. +# All rights reserved. +# +# License: GPL (version 3 or any later version). +# See LICENSE for details. +# --- END COPYRIGHT BLOCK --- +# +import logging +import time + +import ldap +import pytest +from lib389 import Entry +from lib389._constants import * +from lib389.topologies import topology_st + +log = logging.getLogger(__name__) + +from lib389.utils import * + +# Skip on older versions +pytestmark = [pytest.mark.tier2, + pytest.mark.skipif(ds_is_older('1.3.3'), reason="Not implemented")] +ACCT_POLICY_CONFIG_DN = ('cn=config,cn=%s,cn=plugins,cn=config' % + PLUGIN_ACCT_POLICY) +ACCT_POLICY_DN = 'cn=Account Inactivation Policy,%s' % SUFFIX +# Set inactivty high to prevent timing issues with debug options or gdb on test runs. +INACTIVITY_LIMIT = '3000' +SEARCHFILTER = '(objectclass=*)' + +TEST_USER = 'ticket47714user' +TEST_USER_DN = 'uid=%s,%s' % (TEST_USER, SUFFIX) +TEST_USER_PW = '%s' % TEST_USER + + +def _header(topology_st, label): + topology_st.standalone.log.info("\n\n###############################################") + topology_st.standalone.log.info("#######") + topology_st.standalone.log.info("####### %s" % label) + topology_st.standalone.log.info("#######") + topology_st.standalone.log.info("###############################################") + + +def test_ticket47714_init(topology_st): + """ + 1. Add account policy entry to the DB + 2. Add a test user to the DB + """ + _header(topology_st, + 'Testing Ticket 47714 - [RFE] Update lastLoginTime also in Account Policy plugin if account lockout is based on passwordExpirationTime.') + + topology_st.standalone.simple_bind_s(DN_DM, PASSWORD) + + log.info("\n######################### Adding Account Policy entry: %s ######################\n" % ACCT_POLICY_DN) + topology_st.standalone.add_s( + Entry((ACCT_POLICY_DN, {'objectclass': "top ldapsubentry extensibleObject accountpolicy".split(), + 'accountInactivityLimit': INACTIVITY_LIMIT}))) + + log.info("\n######################### Adding Test User entry: %s ######################\n" % TEST_USER_DN) + topology_st.standalone.add_s( + Entry((TEST_USER_DN, {'objectclass': "top person organizationalPerson inetOrgPerson".split(), + 'cn': TEST_USER, + 'sn': TEST_USER, + 'givenname': TEST_USER, + 'userPassword': TEST_USER_PW, + 'acctPolicySubentry': ACCT_POLICY_DN}))) + + +def test_ticket47714_run_0(topology_st): + """ + Check this change has no inpact to the existing functionality. + 1. Set account policy config without the new attr alwaysRecordLoginAttr + 2. Bind as a test user + 3. Bind as the test user again and check the lastLoginTime is updated + 4. Waint longer than the accountInactivityLimit time and bind as the test user, + which should fail with CONSTANT_VIOLATION. + """ + _header(topology_st, 'Account Policy - No new attr alwaysRecordLoginAttr in config') + + topology_st.standalone.simple_bind_s(DN_DM, PASSWORD) + + # Modify Account Policy config entry + topology_st.standalone.modify_s(ACCT_POLICY_CONFIG_DN, [(ldap.MOD_REPLACE, 'alwaysrecordlogin', b'yes'), + (ldap.MOD_REPLACE, 'stateattrname', b'lastLoginTime'), + (ldap.MOD_REPLACE, 'altstateattrname', b'createTimestamp'), + (ldap.MOD_REPLACE, 'specattrname', b'acctPolicySubentry'), + (ldap.MOD_REPLACE, 'limitattrname', + b'accountInactivityLimit')]) + + # Enable the plugins + topology_st.standalone.plugins.enable(name=PLUGIN_ACCT_POLICY) + + topology_st.standalone.restart() + + log.info("\n######################### Bind as %s ######################\n" % TEST_USER_DN) + try: + topology_st.standalone.simple_bind_s(TEST_USER_DN, TEST_USER_PW) + except ldap.CONSTRAINT_VIOLATION as e: + log.error('CONSTRAINT VIOLATION {}'.format(e.args[0]['desc'])) + + time.sleep(2) + + topology_st.standalone.simple_bind_s(DN_DM, PASSWORD) + entry = topology_st.standalone.search_s(TEST_USER_DN, ldap.SCOPE_BASE, SEARCHFILTER, ['lastLoginTime']) + + lastLoginTime0 = entry[0].lastLoginTime + + log.info("\n######################### Bind as %s again ######################\n" % TEST_USER_DN) + try: + topology_st.standalone.simple_bind_s(TEST_USER_DN, TEST_USER_PW) + except ldap.CONSTRAINT_VIOLATION as e: + log.error('CONSTRAINT VIOLATION {}'.format(e.args[0]['desc'])) + + time.sleep(2) + + topology_st.standalone.simple_bind_s(DN_DM, PASSWORD) + entry = topology_st.standalone.search_s(TEST_USER_DN, ldap.SCOPE_BASE, SEARCHFILTER, ['lastLoginTime']) + + lastLoginTime1 = entry[0].lastLoginTime + + log.info("First lastLoginTime: %s, Second lastLoginTime: %s" % (lastLoginTime0, lastLoginTime1)) + assert lastLoginTime0 < lastLoginTime1 + + topology_st.standalone.simple_bind_s(DN_DM, PASSWORD) + + # Now, change the inactivity limit, because that should trigger the account to now be locked. This is possible because the check is "delayed" until the usage of the account. + + topology_st.standalone.modify_s(ACCT_POLICY_DN, [(ldap.MOD_REPLACE, 'accountInactivityLimit', b'1'),]) + time.sleep(2) + + entry = topology_st.standalone.search_s(ACCT_POLICY_DN, ldap.SCOPE_BASE, SEARCHFILTER) + log.info("\n######################### %s ######################\n" % ACCT_POLICY_CONFIG_DN) + log.info("accountInactivityLimit: %s" % entry[0].accountInactivityLimit) + log.info("\n######################### %s DONE ######################\n" % ACCT_POLICY_CONFIG_DN) + + log.info("\n######################### Bind as %s again to fail ######################\n" % TEST_USER_DN) + try: + topology_st.standalone.simple_bind_s(TEST_USER_DN, TEST_USER_PW) + except ldap.CONSTRAINT_VIOLATION as e: + log.info('CONSTRAINT VIOLATION {}'.format(e.args[0]['desc'])) + log.info("%s was successfully inactivated." % TEST_USER_DN) + pass + + # Now reset the value high to prevent issues with the next test. + topology_st.standalone.simple_bind_s(DN_DM, PASSWORD) + topology_st.standalone.modify_s(ACCT_POLICY_DN, [(ldap.MOD_REPLACE, 'accountInactivityLimit', ensure_bytes(INACTIVITY_LIMIT)),]) + + +def test_ticket47714_run_1(topology_st): + """ + Verify a new config attr alwaysRecordLoginAttr + 1. Set account policy config with the new attr alwaysRecordLoginAttr: lastLoginTime + Note: bogus attr is set to stateattrname. + altstateattrname type value is used for checking whether the account is idle or not. + 2. Bind as a test user + 3. Bind as the test user again and check the alwaysRecordLoginAttr: lastLoginTime is updated + """ + _header(topology_st, 'Account Policy - With new attr alwaysRecordLoginAttr in config') + + topology_st.standalone.simple_bind_s(DN_DM, PASSWORD) + topology_st.standalone.modify_s(TEST_USER_DN, [(ldap.MOD_DELETE, 'lastLoginTime', None)]) + + # Modify Account Policy config entry + topology_st.standalone.modify_s(ACCT_POLICY_CONFIG_DN, [(ldap.MOD_REPLACE, 'alwaysrecordlogin', b'yes'), + (ldap.MOD_REPLACE, 'stateattrname', b'bogus'), + (ldap.MOD_REPLACE, 'altstateattrname', b'modifyTimestamp'), + ( + ldap.MOD_REPLACE, 'alwaysRecordLoginAttr', b'lastLoginTime'), + (ldap.MOD_REPLACE, 'specattrname', b'acctPolicySubentry'), + (ldap.MOD_REPLACE, 'limitattrname', + b'accountInactivityLimit')]) + + # Enable the plugins + topology_st.standalone.plugins.enable(name=PLUGIN_ACCT_POLICY) + + topology_st.standalone.restart() + + log.info("\n######################### Bind as %s ######################\n" % TEST_USER_DN) + try: + topology_st.standalone.simple_bind_s(TEST_USER_DN, TEST_USER_PW) + except ldap.CONSTRAINT_VIOLATION as e: + log.error('CONSTRAINT VIOLATION {}'.format(e.args[0]['desc'])) + + time.sleep(1) + + topology_st.standalone.simple_bind_s(DN_DM, PASSWORD) + entry = topology_st.standalone.search_s(TEST_USER_DN, ldap.SCOPE_BASE, SEARCHFILTER, ['lastLoginTime']) + lastLoginTime0 = entry[0].lastLoginTime + + log.info("\n######################### Bind as %s again ######################\n" % TEST_USER_DN) + try: + topology_st.standalone.simple_bind_s(TEST_USER_DN, TEST_USER_PW) + except ldap.CONSTRAINT_VIOLATION as e: + log.error('CONSTRAINT VIOLATION {}'.format(e.args[0]['desc'])) + + time.sleep(1) + + topology_st.standalone.simple_bind_s(DN_DM, PASSWORD) + entry = topology_st.standalone.search_s(TEST_USER_DN, ldap.SCOPE_BASE, SEARCHFILTER, ['lastLoginTime']) + lastLoginTime1 = entry[0].lastLoginTime + + log.info("First lastLoginTime: %s, Second lastLoginTime: %s" % (lastLoginTime0, lastLoginTime1)) + assert lastLoginTime0 < lastLoginTime1 + + topology_st.standalone.log.info("ticket47714 was successfully verified.") + + +if __name__ == '__main__': + # Run isolated + # -s for DEBUG mode + CURRENT_FILE = os.path.realpath(__file__) + pytest.main("-s %s" % CURRENT_FILE) diff --git a/dirsrvtests/tests/tickets/ticket47721_test.py b/dirsrvtests/tests/tickets/ticket47721_test.py new file mode 100644 index 0000000..e22f1ed --- /dev/null +++ b/dirsrvtests/tests/tickets/ticket47721_test.py @@ -0,0 +1,293 @@ +# --- BEGIN COPYRIGHT BLOCK --- +# Copyright (C) 2016 Red Hat, Inc. +# All rights reserved. +# +# License: GPL (version 3 or any later version). +# See LICENSE for details. +# --- END COPYRIGHT BLOCK --- +# +''' +Created on Nov 7, 2013 + +@author: tbordaz +''' +import logging +import time + +import ldap +import pytest +from lib389 import Entry +from lib389._constants import * +from lib389.topologies import topology_m2 +from lib389.replica import ReplicationManager +from lib389.utils import * + +pytestmark = pytest.mark.tier2 + +logging.getLogger(__name__).setLevel(logging.DEBUG) +log = logging.getLogger(__name__) + +SCHEMA_DN = "cn=schema" +TEST_REPL_DN = "cn=test_repl, %s" % SUFFIX +OC_NAME = 'OCticket47721' +OC_OID_EXT = 2 +MUST = "(postalAddress $ postalCode)" +MAY = "(member $ street)" + +OC2_NAME = 'OC2ticket47721' +OC2_OID_EXT = 3 +MUST_2 = "(postalAddress $ postalCode)" +MAY_2 = "(member $ street)" + +REPL_SCHEMA_POLICY_CONSUMER = "cn=consumerUpdatePolicy,cn=replSchema,cn=config" +REPL_SCHEMA_POLICY_SUPPLIER = "cn=supplierUpdatePolicy,cn=replSchema,cn=config" + +OTHER_NAME = 'other_entry' +MAX_OTHERS = 10 + +BIND_NAME = 'bind_entry' +BIND_DN = 'cn=%s, %s' % (BIND_NAME, SUFFIX) +BIND_PW = 'password' + +ENTRY_NAME = 'test_entry' +ENTRY_DN = 'cn=%s, %s' % (ENTRY_NAME, SUFFIX) +ENTRY_OC = "top person %s" % OC_NAME + +BASE_OID = "1.2.3.4.5.6.7.8.9.10" + +SLEEP_INTERVAL = 60 + + +def _add_custom_at_definition(name='ATticket47721'): + new_at = "( %s-oid NAME '%s' DESC 'test AT ticket 47721' SYNTAX 1.3.6.1.4.1.1466.115.121.1.15 X-ORIGIN ( 'Test 47721' 'user defined' ) )" % ( + name, name) + return ensure_bytes(new_at) + + +def _chg_std_at_defintion(): + new_at = "( 2.16.840.1.113730.3.1.569 NAME 'cosPriority' DESC 'Netscape defined attribute type' SYNTAX 1.3.6.1.4.1.1466.115.121.1.27 X-ORIGIN 'Netscape Directory Server' )" + return ensure_bytes(new_at) + + +def _add_custom_oc_defintion(name='OCticket47721'): + new_oc = "( %s-oid NAME '%s' DESC 'An group of related automount objects' SUP top STRUCTURAL MUST ou X-ORIGIN 'draft-howard-rfc2307bis' )" % ( + name, name) + return ensure_bytes(new_oc) + + +def _chg_std_oc_defintion(): + new_oc = "( 5.3.6.1.1.1.2.0 NAME 'trustAccount' DESC 'Sets trust accounts information' SUP top AUXILIARY MUST trustModel MAY ( accessTo $ ou ) X-ORIGIN 'nss_ldap/pam_ldap' )" + return ensure_bytes(new_oc) + +def replication_check(topology_m2): + repl = ReplicationManager(SUFFIX) + supplier1 = topology_m2.ms["supplier1"] + supplier2 = topology_m2.ms["supplier2"] + return repl.test_replication(supplier1, supplier2) + +def test_ticket47721_init(topology_m2): + """ + It adds + - Objectclass with MAY 'member' + - an entry ('bind_entry') with which we bind to test the 'SELFDN' operation + It deletes the anonymous aci + + """ + + # entry used to bind with + topology_m2.ms["supplier1"].log.info("Add %s" % BIND_DN) + topology_m2.ms["supplier1"].add_s(Entry((BIND_DN, { + 'objectclass': "top person".split(), + 'sn': BIND_NAME, + 'cn': BIND_NAME, + 'userpassword': BIND_PW}))) + + # enable repl error logging + mod = [(ldap.MOD_REPLACE, 'nsslapd-errorlog-level', ensure_bytes(str(8192)))] # REPL logging + topology_m2.ms["supplier1"].modify_s(DN_CONFIG, mod) + topology_m2.ms["supplier2"].modify_s(DN_CONFIG, mod) + + # add dummy entries + for cpt in range(MAX_OTHERS): + name = "%s%d" % (OTHER_NAME, cpt) + topology_m2.ms["supplier1"].add_s(Entry(("cn=%s,%s" % (name, SUFFIX), { + 'objectclass': "top person".split(), + 'sn': name, + 'cn': name}))) + + +def test_ticket47721_0(topology_m2): + dn = "cn=%s0,%s" % (OTHER_NAME, SUFFIX) + replication_check(topology_m2) + ent = topology_m2.ms["supplier2"].getEntry(dn, ldap.SCOPE_BASE, "(objectclass=*)") + assert ent + + +def test_ticket47721_1(topology_m2): + log.info('Running test 1...') + # topology_m2.ms["supplier1"].log.info("Attach debugger\n\n") + # time.sleep(30) + + new = _add_custom_at_definition() + topology_m2.ms["supplier1"].log.info("Add (M2) %s " % new) + topology_m2.ms["supplier2"].schema.add_schema('attributetypes', new) + + new = _chg_std_at_defintion() + topology_m2.ms["supplier1"].log.info("Chg (M2) %s " % new) + topology_m2.ms["supplier2"].schema.add_schema('attributetypes', new) + + new = _add_custom_oc_defintion() + topology_m2.ms["supplier1"].log.info("Add (M2) %s " % new) + topology_m2.ms["supplier2"].schema.add_schema('objectClasses', new) + + new = _chg_std_oc_defintion() + topology_m2.ms["supplier1"].log.info("Chg (M2) %s " % new) + topology_m2.ms["supplier2"].schema.add_schema('objectClasses', new) + + mod = [(ldap.MOD_REPLACE, 'description', b'Hello world 1')] + dn = "cn=%s0,%s" % (OTHER_NAME, SUFFIX) + topology_m2.ms["supplier2"].modify_s(dn, mod) + + replication_check(topology_m2) + ent = topology_m2.ms["supplier1"].getEntry(dn, ldap.SCOPE_BASE, "(objectclass=*)") + assert ensure_str(ent.getValue('description')) == 'Hello world 1' + + time.sleep(2) + schema_csn_supplier1 = topology_m2.ms["supplier1"].schema.get_schema_csn() + schema_csn_supplier2 = topology_m2.ms["supplier2"].schema.get_schema_csn() + log.debug('Supplier 1 schemaCSN: %s' % schema_csn_supplier1) + log.debug('Supplier 2 schemaCSN: %s' % schema_csn_supplier2) + + +def test_ticket47721_2(topology_m2): + log.info('Running test 2...') + + mod = [(ldap.MOD_REPLACE, 'description', b'Hello world 2')] + dn = "cn=%s0,%s" % (OTHER_NAME, SUFFIX) + topology_m2.ms["supplier1"].modify_s(dn, mod) + + replication_check(topology_m2) + ent = topology_m2.ms["supplier2"].getEntry(dn, ldap.SCOPE_BASE, "(objectclass=*)") + assert ensure_str(ent.getValue('description')) == 'Hello world 2' + + time.sleep(2) + schema_csn_supplier1 = topology_m2.ms["supplier1"].schema.get_schema_csn() + schema_csn_supplier2 = topology_m2.ms["supplier2"].schema.get_schema_csn() + log.debug('Supplier 1 schemaCSN: %s' % schema_csn_supplier1) + log.debug('Supplier 2 schemaCSN: %s' % schema_csn_supplier2) + if schema_csn_supplier1 != schema_csn_supplier2: + # We need to give the server a little more time, then check it again + log.info('Schema CSNs are not in sync yet: m1 (%s) vs m2 (%s), wait a little...' + % (schema_csn_supplier1, schema_csn_supplier2)) + time.sleep(SLEEP_INTERVAL) + schema_csn_supplier1 = topology_m2.ms["supplier1"].schema.get_schema_csn() + schema_csn_supplier2 = topology_m2.ms["supplier2"].schema.get_schema_csn() + + assert schema_csn_supplier1 is not None + assert schema_csn_supplier1 == schema_csn_supplier2 + + +def test_ticket47721_3(topology_m2): + ''' + Check that the supplier can update its schema from consumer schema + Update M2 schema, then trigger a replication M1->M2 + ''' + log.info('Running test 3...') + + # stop RA M2->M1, so that M1 can only learn being a supplier + ents = topology_m2.ms["supplier2"].agreement.list(suffix=SUFFIX) + assert len(ents) == 1 + topology_m2.ms["supplier2"].agreement.pause(ents[0].dn) + + new = _add_custom_at_definition('ATtest3') + topology_m2.ms["supplier1"].log.info("Update schema (M2) %s " % new) + topology_m2.ms["supplier2"].schema.add_schema('attributetypes', new) + time.sleep(1) + + new = _add_custom_oc_defintion('OCtest3') + topology_m2.ms["supplier1"].log.info("Update schema (M2) %s " % new) + topology_m2.ms["supplier2"].schema.add_schema('objectClasses', new) + time.sleep(1) + + mod = [(ldap.MOD_REPLACE, 'description', b'Hello world 3')] + dn = "cn=%s0,%s" % (OTHER_NAME, SUFFIX) + topology_m2.ms["supplier1"].modify_s(dn, mod) + + replication_check(topology_m2) + ent = topology_m2.ms["supplier2"].getEntry(dn, ldap.SCOPE_BASE, "(objectclass=*)") + assert ensure_str(ent.getValue('description')) == 'Hello world 3' + + time.sleep(5) + schema_csn_supplier1 = topology_m2.ms["supplier1"].schema.get_schema_csn() + schema_csn_supplier2 = topology_m2.ms["supplier2"].schema.get_schema_csn() + log.debug('Supplier 1 schemaCSN: %s' % schema_csn_supplier1) + log.debug('Supplier 2 schemaCSN: %s' % schema_csn_supplier2) + if schema_csn_supplier1 == schema_csn_supplier2: + # We need to give the server a little more time, then check it again + log.info('Schema CSNs are not in sync yet: m1 (%s) vs m2 (%s), wait a little...' + % (schema_csn_supplier1, schema_csn_supplier2)) + time.sleep(SLEEP_INTERVAL) + schema_csn_supplier1 = topology_m2.ms["supplier1"].schema.get_schema_csn() + schema_csn_supplier2 = topology_m2.ms["supplier2"].schema.get_schema_csn() + + assert schema_csn_supplier1 is not None + # schema csn on M2 is larger that on M1. M1 only took the new definitions + assert schema_csn_supplier1 != schema_csn_supplier2 + + +def test_ticket47721_4(topology_m2): + ''' + Here M2->M1 agreement is disabled. + with test_ticket47721_3, M1 schema and M2 should be identical BUT + the nsschemacsn is M2>M1. But as the RA M2->M1 is disabled, M1 keeps its schemacsn. + Update schema on M2 (nsschemaCSN update), update M2. Check they have the same schemacsn + ''' + log.info('Running test 4...') + + new = _add_custom_at_definition('ATtest4') + topology_m2.ms["supplier1"].log.info("Update schema (M1) %s " % new) + topology_m2.ms["supplier1"].schema.add_schema('attributetypes', new) + + new = _add_custom_oc_defintion('OCtest4') + topology_m2.ms["supplier1"].log.info("Update schema (M1) %s " % new) + topology_m2.ms["supplier1"].schema.add_schema('objectClasses', new) + + topology_m2.ms["supplier1"].log.info("trigger replication M1->M2: to update the schema") + mod = [(ldap.MOD_REPLACE, 'description', b'Hello world 4')] + dn = "cn=%s0,%s" % (OTHER_NAME, SUFFIX) + topology_m2.ms["supplier1"].modify_s(dn, mod) + + replication_check(topology_m2) + ent = topology_m2.ms["supplier2"].getEntry(dn, ldap.SCOPE_BASE, "(objectclass=*)") + assert ensure_str(ent.getValue('description')) == 'Hello world 4' + + topology_m2.ms["supplier1"].log.info("trigger replication M1->M2: to push the schema") + mod = [(ldap.MOD_REPLACE, 'description', b'Hello world 5')] + dn = "cn=%s0,%s" % (OTHER_NAME, SUFFIX) + topology_m2.ms["supplier1"].modify_s(dn, mod) + + replication_check(topology_m2) + ent = topology_m2.ms["supplier2"].getEntry(dn, ldap.SCOPE_BASE, "(objectclass=*)") + assert ensure_str(ent.getValue('description')) == 'Hello world 5' + + time.sleep(2) + schema_csn_supplier1 = topology_m2.ms["supplier1"].schema.get_schema_csn() + schema_csn_supplier2 = topology_m2.ms["supplier2"].schema.get_schema_csn() + log.debug('Supplier 1 schemaCSN: %s' % schema_csn_supplier1) + log.debug('Supplier 2 schemaCSN: %s' % schema_csn_supplier2) + if schema_csn_supplier1 != schema_csn_supplier2: + # We need to give the server a little more time, then check it again + log.info('Schema CSNs are incorrectly in sync, wait a little...') + time.sleep(SLEEP_INTERVAL) + schema_csn_supplier1 = topology_m2.ms["supplier1"].schema.get_schema_csn() + schema_csn_supplier2 = topology_m2.ms["supplier2"].schema.get_schema_csn() + + assert schema_csn_supplier1 is not None + assert schema_csn_supplier1 == schema_csn_supplier2 + + +if __name__ == '__main__': + # Run isolated + # -s for DEBUG mode + CURRENT_FILE = os.path.realpath(__file__) + pytest.main("-s %s" % CURRENT_FILE) diff --git a/dirsrvtests/tests/tickets/ticket47781_test.py b/dirsrvtests/tests/tickets/ticket47781_test.py new file mode 100644 index 0000000..ffb9a5e --- /dev/null +++ b/dirsrvtests/tests/tickets/ticket47781_test.py @@ -0,0 +1,104 @@ +# --- BEGIN COPYRIGHT BLOCK --- +# Copyright (C) 2016 Red Hat, Inc. +# All rights reserved. +# +# License: GPL (version 3 or any later version). +# See LICENSE for details. +# --- END COPYRIGHT BLOCK --- +# +import logging + +import pytest +from lib389.tasks import * +from lib389.topologies import topology_st +from lib389.replica import ReplicationManager + +from lib389._constants import (defaultProperties, DEFAULT_SUFFIX, ReplicaRole, + REPLICAID_SUPPLIER_1, REPLICATION_BIND_DN, REPLICATION_BIND_PW, + REPLICATION_BIND_METHOD, REPLICATION_TRANSPORT, RA_NAME, + RA_BINDDN, RA_BINDPW, RA_METHOD, RA_TRANSPORT_PROT) + +pytestmark = pytest.mark.tier2 + +log = logging.getLogger(__name__) + + +def test_ticket47781(topology_st): + """ + Testing for a deadlock after doing an online import of an LDIF with + replication data. The replication agreement should be invalid. + """ + + log.info('Testing Ticket 47781 - Testing for deadlock after importing LDIF with replication data') + + supplier = topology_st.standalone + repl = ReplicationManager(DEFAULT_SUFFIX) + repl.create_first_supplier(supplier) + + properties = {RA_NAME: r'meTo_$host:$port', + RA_BINDDN: defaultProperties[REPLICATION_BIND_DN], + RA_BINDPW: defaultProperties[REPLICATION_BIND_PW], + RA_METHOD: defaultProperties[REPLICATION_BIND_METHOD], + RA_TRANSPORT_PROT: defaultProperties[REPLICATION_TRANSPORT]} + # The agreement should point to a server that does NOT exist (invalid port) + repl_agreement = supplier.agreement.create(suffix=DEFAULT_SUFFIX, + host=supplier.host, + port=5555, + properties=properties) + + # + # add two entries + # + log.info('Adding two entries...') + + supplier.add_s(Entry(('cn=entry1,dc=example,dc=com', { + 'objectclass': 'top person'.split(), + 'sn': 'user', + 'cn': 'entry1'}))) + + supplier.add_s(Entry(('cn=entry2,dc=example,dc=com', { + 'objectclass': 'top person'.split(), + 'sn': 'user', + 'cn': 'entry2'}))) + + # + # export the replication ldif + # + log.info('Exporting replication ldif...') + args = {EXPORT_REPL_INFO: True} + exportTask = Tasks(supplier) + exportTask.exportLDIF(DEFAULT_SUFFIX, None, "/tmp/export.ldif", args) + + # + # Restart the server + # + log.info('Restarting server...') + supplier.stop() + supplier.start() + + # + # Import the ldif + # + log.info('Import replication LDIF file...') + importTask = Tasks(supplier) + args = {TASK_WAIT: True} + importTask.importLDIF(DEFAULT_SUFFIX, None, "/tmp/export.ldif", args) + os.remove("/tmp/export.ldif") + + # + # Search for tombstones - we should not hang/timeout + # + log.info('Search for tombstone entries(should find one and not hang)...') + supplier.set_option(ldap.OPT_NETWORK_TIMEOUT, 5) + supplier.set_option(ldap.OPT_TIMEOUT, 5) + entries = supplier.search_s(DEFAULT_SUFFIX, ldap.SCOPE_SUBTREE, 'objectclass=nsTombstone') + if not entries: + log.fatal('Search failed to find any entries.') + assert PR_False + + +if __name__ == '__main__': + # Run isolated + # -s for DEBUG mode + CURRENT_FILE = os.path.realpath(__file__) + pytest.main("-s %s" % CURRENT_FILE) diff --git a/dirsrvtests/tests/tickets/ticket47787_test.py b/dirsrvtests/tests/tickets/ticket47787_test.py new file mode 100644 index 0000000..38f11d1 --- /dev/null +++ b/dirsrvtests/tests/tickets/ticket47787_test.py @@ -0,0 +1,428 @@ +# --- BEGIN COPYRIGHT BLOCK --- +# Copyright (C) 2016 Red Hat, Inc. +# All rights reserved. +# +# License: GPL (version 3 or any later version). +# See LICENSE for details. +# --- END COPYRIGHT BLOCK --- +# +''' +Created on April 14, 2014 + +@author: tbordaz +''' +import logging +import re +import time + +import ldap +import pytest +from lib389 import Entry +from lib389._constants import * +from lib389.topologies import topology_m2 +from lib389.utils import * + +pytestmark = pytest.mark.tier2 + +logging.getLogger(__name__).setLevel(logging.DEBUG) +log = logging.getLogger(__name__) + +# set this flag to False so that it will assert on failure _status_entry_both_server +DEBUG_FLAG = False + +TEST_REPL_DN = "cn=test_repl, %s" % SUFFIX + +STAGING_CN = "staged user" +PRODUCTION_CN = "accounts" +EXCEPT_CN = "excepts" + +STAGING_DN = "cn=%s,%s" % (STAGING_CN, SUFFIX) +PRODUCTION_DN = "cn=%s,%s" % (PRODUCTION_CN, SUFFIX) +PROD_EXCEPT_DN = "cn=%s,%s" % (EXCEPT_CN, PRODUCTION_DN) + +STAGING_PATTERN = "cn=%s*,%s" % (STAGING_CN[:2], SUFFIX) +PRODUCTION_PATTERN = "cn=%s*,%s" % (PRODUCTION_CN[:2], SUFFIX) +BAD_STAGING_PATTERN = "cn=bad*,%s" % (SUFFIX) +BAD_PRODUCTION_PATTERN = "cn=bad*,%s" % (SUFFIX) + +BIND_CN = "bind_entry" +BIND_DN = "cn=%s,%s" % (BIND_CN, SUFFIX) +BIND_PW = "password" + +NEW_ACCOUNT = "new_account" +MAX_ACCOUNTS = 20 + +CONFIG_MODDN_ACI_ATTR = "nsslapd-moddn-aci" + + +def _bind_manager(server): + server.log.info("Bind as %s " % DN_DM) + server.simple_bind_s(DN_DM, PASSWORD) + + +def _bind_normal(server): + server.log.info("Bind as %s " % BIND_DN) + server.simple_bind_s(BIND_DN, BIND_PW) + + +def _header(topology_m2, label): + topology_m2.ms["supplier1"].log.info("\n\n###############################################") + topology_m2.ms["supplier1"].log.info("#######") + topology_m2.ms["supplier1"].log.info("####### %s" % label) + topology_m2.ms["supplier1"].log.info("#######") + topology_m2.ms["supplier1"].log.info("###############################################") + + +def _status_entry_both_server(topology_m2, name=None, desc=None, debug=True): + if not name: + return + topology_m2.ms["supplier1"].log.info("\n\n######################### Tombstone on M1 ######################\n") + attr = 'description' + found = False + attempt = 0 + while not found and attempt < 10: + ent_m1 = _find_tombstone(topology_m2.ms["supplier1"], SUFFIX, 'sn', name) + if attr in ent_m1.getAttrs(): + found = True + else: + time.sleep(1) + attempt = attempt + 1 + assert ent_m1 + + topology_m2.ms["supplier1"].log.info("\n\n######################### Tombstone on M2 ######################\n") + ent_m2 = _find_tombstone(topology_m2.ms["supplier2"], SUFFIX, 'sn', name) + assert ent_m2 + + topology_m2.ms["supplier1"].log.info("\n\n######################### Description ######################\n%s\n" % desc) + topology_m2.ms["supplier1"].log.info("M1 only\n") + for attr in ent_m1.getAttrs(): + + if not debug: + assert attr in ent_m2.getAttrs() + + if not attr in ent_m2.getAttrs(): + topology_m2.ms["supplier1"].log.info(" %s" % attr) + for val in ent_m1.getValues(attr): + topology_m2.ms["supplier1"].log.info(" %s" % val) + + topology_m2.ms["supplier1"].log.info("M2 only\n") + for attr in ent_m2.getAttrs(): + + if not debug: + assert attr in ent_m1.getAttrs() + + if not attr in ent_m1.getAttrs(): + topology_m2.ms["supplier1"].log.info(" %s" % attr) + for val in ent_m2.getValues(attr): + topology_m2.ms["supplier1"].log.info(" %s" % val) + + topology_m2.ms["supplier1"].log.info("M1 differs M2\n") + + if not debug: + assert ent_m1.dn == ent_m2.dn + + if ent_m1.dn != ent_m2.dn: + topology_m2.ms["supplier1"].log.info(" M1[dn] = %s\n M2[dn] = %s" % (ent_m1.dn, ent_m2.dn)) + + for attr1 in ent_m1.getAttrs(): + if attr1 in ent_m2.getAttrs(): + for val1 in ent_m1.getValues(attr1): + found = False + for val2 in ent_m2.getValues(attr1): + if val1 == val2: + found = True + break + + if not debug: + assert found + + if not found: + topology_m2.ms["supplier1"].log.info(" M1[%s] = %s" % (attr1, val1)) + + for attr2 in ent_m2.getAttrs(): + if attr2 in ent_m1.getAttrs(): + for val2 in ent_m2.getValues(attr2): + found = False + for val1 in ent_m1.getValues(attr2): + if val2 == val1: + found = True + break + + if not debug: + assert found + + if not found: + topology_m2.ms["supplier1"].log.info(" M2[%s] = %s" % (attr2, val2)) + + +def _pause_RAs(topology_m2): + topology_m2.ms["supplier1"].log.info("\n\n######################### Pause RA M1<->M2 ######################\n") + ents = topology_m2.ms["supplier1"].agreement.list(suffix=SUFFIX) + assert len(ents) == 1 + topology_m2.ms["supplier1"].agreement.pause(ents[0].dn) + + ents = topology_m2.ms["supplier2"].agreement.list(suffix=SUFFIX) + assert len(ents) == 1 + topology_m2.ms["supplier2"].agreement.pause(ents[0].dn) + + +def _resume_RAs(topology_m2): + topology_m2.ms["supplier1"].log.info("\n\n######################### resume RA M1<->M2 ######################\n") + ents = topology_m2.ms["supplier1"].agreement.list(suffix=SUFFIX) + assert len(ents) == 1 + topology_m2.ms["supplier1"].agreement.resume(ents[0].dn) + + ents = topology_m2.ms["supplier2"].agreement.list(suffix=SUFFIX) + assert len(ents) == 1 + topology_m2.ms["supplier2"].agreement.resume(ents[0].dn) + + +def _find_tombstone(instance, base, attr, value): + # + # we can not use a filter with a (&(objeclass=nsTombstone)(sn=name)) because + # tombstone are not index in 'sn' so 'sn=name' will return NULL + # and even if tombstone are indexed for objectclass the '&' will set + # the candidate list to NULL + # + filt = '(objectclass=%s)' % REPLICA_OC_TOMBSTONE + ents = instance.search_s(base, ldap.SCOPE_SUBTREE, filt) + # found = False + for ent in ents: + if ent.hasAttr(attr): + for val in ent.getValues(attr): + if ensure_str(val) == value: + instance.log.debug("tombstone found: %r" % ent) + return ent + return None + + +def _delete_entry(instance, entry_dn, name): + instance.log.info("\n\n######################### DELETE %s (M1) ######################\n" % name) + + # delete the entry + instance.delete_s(entry_dn) + ent = _find_tombstone(instance, SUFFIX, 'sn', name) + assert ent is not None + + +def _mod_entry(instance, entry_dn, attr, value): + instance.log.info("\n\n######################### MOD %s (M2) ######################\n" % entry_dn) + mod = [(ldap.MOD_REPLACE, attr, ensure_bytes(value))] + instance.modify_s(entry_dn, mod) + + +def _modrdn_entry(instance=None, entry_dn=None, new_rdn=None, del_old=0, new_superior=None): + assert instance is not None + assert entry_dn is not None + + if not new_rdn: + pattern = 'cn=(.*),(.*)' + rdnre = re.compile(pattern) + match = rdnre.match(entry_dn) + old_value = match.group(1) + new_rdn_val = "%s_modrdn" % old_value + new_rdn = "cn=%s" % new_rdn_val + + instance.log.info("\n\n######################### MODRDN %s (M2) ######################\n" % new_rdn) + if new_superior: + instance.rename_s(entry_dn, new_rdn, newsuperior=new_superior, delold=del_old) + else: + instance.rename_s(entry_dn, new_rdn, delold=del_old) + + +def _check_entry_exists(instance, entry_dn): + loop = 0 + ent = None + while loop <= 10: + try: + ent = instance.getEntry(entry_dn, ldap.SCOPE_BASE, "(objectclass=*)") + break + except ldap.NO_SUCH_OBJECT: + time.sleep(1) + loop += 1 + if ent is None: + assert False + + +def _check_mod_received(instance, base, filt, attr, value): + instance.log.info( + "\n\n######################### Check MOD replicated on %s ######################\n" % instance.serverid) + loop = 0 + while loop <= 10: + ent = instance.getEntry(base, ldap.SCOPE_SUBTREE, filt) + if ent.hasAttr(attr) and ent.getValue(attr) == value: + break + time.sleep(1) + loop += 1 + assert loop <= 10 + + +def _check_replication(topology_m2, entry_dn): + # prepare the filter to retrieve the entry + filt = entry_dn.split(',')[0] + + topology_m2.ms["supplier1"].log.info("\n######################### Check replicat M1->M2 ######################\n") + loop = 0 + while loop <= 10: + attr = 'description' + value = 'test_value_%d' % loop + mod = [(ldap.MOD_REPLACE, attr, ensure_bytes(value))] + topology_m2.ms["supplier1"].modify_s(entry_dn, mod) + _check_mod_received(topology_m2.ms["supplier2"], SUFFIX, filt, attr, value) + loop += 1 + + topology_m2.ms["supplier1"].log.info("\n######################### Check replicat M2->M1 ######################\n") + loop = 0 + while loop <= 10: + attr = 'description' + value = 'test_value_%d' % loop + mod = [(ldap.MOD_REPLACE, attr, ensure_bytes(value))] + topology_m2.ms["supplier2"].modify_s(entry_dn, mod) + _check_mod_received(topology_m2.ms["supplier1"], SUFFIX, filt, attr, value) + loop += 1 + + +def test_ticket47787_init(topology_m2): + """ + Creates + - a staging DIT + - a production DIT + - add accounts in staging DIT + + """ + + topology_m2.ms["supplier1"].log.info("\n\n######################### INITIALIZATION ######################\n") + + # entry used to bind with + topology_m2.ms["supplier1"].log.info("Add %s" % BIND_DN) + topology_m2.ms["supplier1"].add_s(Entry((BIND_DN, { + 'objectclass': "top person".split(), + 'sn': BIND_CN, + 'cn': BIND_CN, + 'userpassword': BIND_PW}))) + + # DIT for staging + topology_m2.ms["supplier1"].log.info("Add %s" % STAGING_DN) + topology_m2.ms["supplier1"].add_s(Entry((STAGING_DN, { + 'objectclass': "top organizationalRole".split(), + 'cn': STAGING_CN, + 'description': "staging DIT"}))) + + # DIT for production + topology_m2.ms["supplier1"].log.info("Add %s" % PRODUCTION_DN) + topology_m2.ms["supplier1"].add_s(Entry((PRODUCTION_DN, { + 'objectclass': "top organizationalRole".split(), + 'cn': PRODUCTION_CN, + 'description': "production DIT"}))) + + # enable replication error logging + mod = [(ldap.MOD_REPLACE, 'nsslapd-errorlog-level', b'8192')] + topology_m2.ms["supplier1"].modify_s(DN_CONFIG, mod) + topology_m2.ms["supplier2"].modify_s(DN_CONFIG, mod) + + # add dummy entries in the staging DIT + for cpt in range(MAX_ACCOUNTS): + name = "%s%d" % (NEW_ACCOUNT, cpt) + topology_m2.ms["supplier1"].add_s(Entry(("cn=%s,%s" % (name, STAGING_DN), { + 'objectclass': "top person".split(), + 'sn': name, + 'cn': name}))) + + +def test_ticket47787_2(topology_m2): + ''' + Disable replication so that updates are not replicated + Delete an entry on M1. Modrdn it on M2 (chg rdn + delold=0 + same superior). + update a test entry on M2 + Reenable the RA. + checks that entry was deleted on M2 (with the modified RDN) + checks that test entry was replicated on M1 (replication M2->M1 not broken by modrdn) + ''' + + _header(topology_m2, "test_ticket47787_2") + _bind_manager(topology_m2.ms["supplier1"]) + _bind_manager(topology_m2.ms["supplier2"]) + + # entry to test the replication is still working + name = "%s%d" % (NEW_ACCOUNT, MAX_ACCOUNTS - 1) + test_rdn = "cn=%s" % (name) + testentry_dn = "%s,%s" % (test_rdn, STAGING_DN) + + name = "%s%d" % (NEW_ACCOUNT, MAX_ACCOUNTS - 2) + test2_rdn = "cn=%s" % (name) + testentry2_dn = "%s,%s" % (test2_rdn, STAGING_DN) + + # value of updates to test the replication both ways + attr = 'description' + value = 'test_ticket47787_2' + + # entry for the modrdn + name = "%s%d" % (NEW_ACCOUNT, 1) + rdn = "cn=%s" % (name) + entry_dn = "%s,%s" % (rdn, STAGING_DN) + + # created on M1, wait the entry exists on M2 + _check_entry_exists(topology_m2.ms["supplier2"], entry_dn) + _check_entry_exists(topology_m2.ms["supplier2"], testentry_dn) + + _pause_RAs(topology_m2) + + # Delete 'entry_dn' on M1. + # dummy update is only have a first CSN before the DEL + # else the DEL will be in min_csn RUV and make diagnostic a bit more complex + _mod_entry(topology_m2.ms["supplier1"], testentry2_dn, attr, 'dummy') + _delete_entry(topology_m2.ms["supplier1"], entry_dn, name) + _mod_entry(topology_m2.ms["supplier1"], testentry2_dn, attr, value) + + time.sleep(1) # important to have MOD.csn != DEL.csn + + # MOD 'entry_dn' on M1. + # dummy update is only have a first CSN before the MOD entry_dn + # else the DEL will be in min_csn RUV and make diagnostic a bit more complex + _mod_entry(topology_m2.ms["supplier2"], testentry_dn, attr, 'dummy') + _mod_entry(topology_m2.ms["supplier2"], entry_dn, attr, value) + _mod_entry(topology_m2.ms["supplier2"], testentry_dn, attr, value) + + _resume_RAs(topology_m2) + + topology_m2.ms["supplier1"].log.info( + "\n\n######################### Check DEL replicated on M2 ######################\n") + loop = 0 + while loop <= 10: + ent = _find_tombstone(topology_m2.ms["supplier2"], SUFFIX, 'sn', name) + if ent: + break + time.sleep(1) + loop += 1 + assert loop <= 10 + assert ent + + # the following checks are not necessary + # as this bug is only for failing replicated MOD (entry_dn) on M1 + # _check_mod_received(topology_m2.ms["supplier1"], SUFFIX, "(%s)" % (test_rdn), attr, value) + # _check_mod_received(topology_m2.ms["supplier2"], SUFFIX, "(%s)" % (test2_rdn), attr, value) + # + # _check_replication(topology_m2, testentry_dn) + + _status_entry_both_server(topology_m2, name=name, desc="DEL M1 - MOD M2", debug=DEBUG_FLAG) + + topology_m2.ms["supplier1"].log.info( + "\n\n######################### Check MOD replicated on M1 ######################\n") + loop = 0 + while loop <= 10: + ent = _find_tombstone(topology_m2.ms["supplier1"], SUFFIX, 'sn', name) + if ent: + break + time.sleep(1) + loop += 1 + assert loop <= 10 + assert ent + assert ent.hasAttr(attr) + assert ensure_str(ent.getValue(attr)) == value + + +if __name__ == '__main__': + # Run isolated + # -s for DEBUG mode + CURRENT_FILE = os.path.realpath(__file__) + pytest.main("-s %s" % CURRENT_FILE) diff --git a/dirsrvtests/tests/tickets/ticket47808_test.py b/dirsrvtests/tests/tickets/ticket47808_test.py new file mode 100644 index 0000000..8ca75eb --- /dev/null +++ b/dirsrvtests/tests/tickets/ticket47808_test.py @@ -0,0 +1,101 @@ +# --- BEGIN COPYRIGHT BLOCK --- +# Copyright (C) 2016 Red Hat, Inc. +# All rights reserved. +# +# License: GPL (version 3 or any later version). +# See LICENSE for details. +# --- END COPYRIGHT BLOCK --- +# +import logging + +import ldap +import pytest +from lib389 import Entry +from lib389._constants import * +from lib389.topologies import topology_st +from lib389.utils import * + +pytestmark = pytest.mark.tier2 + +log = logging.getLogger(__name__) + +ATTRIBUTE_UNIQUENESS_PLUGIN = 'cn=attribute uniqueness,cn=plugins,cn=config' +ENTRY_NAME = 'test_entry' + + +def test_ticket47808_run(topology_st): + """ + It enables attribute uniqueness plugin with sn as a unique attribute + Add an entry 1 with sn = ENTRY_NAME + Add an entry 2 with sn = ENTRY_NAME + If the second add does not crash the server and the following search found none, + the bug is fixed. + """ + + # bind as directory manager + topology_st.standalone.log.info("Bind as %s" % DN_DM) + topology_st.standalone.simple_bind_s(DN_DM, PASSWORD) + + topology_st.standalone.log.info("\n\n######################### SETUP ATTR UNIQ PLUGIN ######################\n") + + # enable attribute uniqueness plugin + mod = [(ldap.MOD_REPLACE, 'nsslapd-pluginEnabled', b'on'), (ldap.MOD_REPLACE, 'nsslapd-pluginarg0', b'sn'), + (ldap.MOD_REPLACE, 'nsslapd-pluginarg1', ensure_bytes(SUFFIX))] + topology_st.standalone.modify_s(ATTRIBUTE_UNIQUENESS_PLUGIN, mod) + + topology_st.standalone.log.info("\n\n######################### ADD USER 1 ######################\n") + + # Prepare entry 1 + entry_name = '%s 1' % (ENTRY_NAME) + entry_dn_1 = 'cn=%s, %s' % (entry_name, SUFFIX) + entry_1 = Entry(entry_dn_1) + entry_1.setValues('objectclass', 'top', 'person') + entry_1.setValues('sn', ENTRY_NAME) + entry_1.setValues('cn', entry_name) + topology_st.standalone.log.info("Try to add Add %s: %r" % (entry_1, entry_1)) + topology_st.standalone.add_s(entry_1) + + topology_st.standalone.log.info("\n\n######################### Restart Server ######################\n") + topology_st.standalone.stop(timeout=10) + topology_st.standalone.start(timeout=10) + + topology_st.standalone.log.info("\n\n######################### ADD USER 2 ######################\n") + + # Prepare entry 2 having the same sn, which crashes the server + entry_name = '%s 2' % (ENTRY_NAME) + entry_dn_2 = 'cn=%s, %s' % (entry_name, SUFFIX) + entry_2 = Entry(entry_dn_2) + entry_2.setValues('objectclass', 'top', 'person') + entry_2.setValues('sn', ENTRY_NAME) + entry_2.setValues('cn', entry_name) + topology_st.standalone.log.info("Try to add Add %s: %r" % (entry_2, entry_2)) + try: + topology_st.standalone.add_s(entry_2) + except: + topology_st.standalone.log.warning("Adding %s failed" % entry_dn_2) + pass + + topology_st.standalone.log.info("\n\n######################### IS SERVER UP? ######################\n") + ents = topology_st.standalone.search_s(entry_dn_1, ldap.SCOPE_BASE, '(objectclass=*)') + assert len(ents) == 1 + topology_st.standalone.log.info("Yes, it's up.") + + topology_st.standalone.log.info("\n\n######################### CHECK USER 2 NOT ADDED ######################\n") + topology_st.standalone.log.info("Try to search %s" % entry_dn_2) + try: + ents = topology_st.standalone.search_s(entry_dn_2, ldap.SCOPE_BASE, '(objectclass=*)') + except ldap.NO_SUCH_OBJECT: + topology_st.standalone.log.info("Found none") + + topology_st.standalone.log.info("\n\n######################### DELETE USER 1 ######################\n") + + topology_st.standalone.log.info("Try to delete %s " % entry_dn_1) + topology_st.standalone.delete_s(entry_dn_1) + log.info('Testcase PASSED') + + +if __name__ == '__main__': + # Run isolated + # -s for DEBUG mode + CURRENT_FILE = os.path.realpath(__file__) + pytest.main("-s %s" % CURRENT_FILE) diff --git a/dirsrvtests/tests/tickets/ticket47815_test.py b/dirsrvtests/tests/tickets/ticket47815_test.py new file mode 100644 index 0000000..4263ab7 --- /dev/null +++ b/dirsrvtests/tests/tickets/ticket47815_test.py @@ -0,0 +1,116 @@ +# --- BEGIN COPYRIGHT BLOCK --- +# Copyright (C) 2016 Red Hat, Inc. +# All rights reserved. +# +# License: GPL (version 3 or any later version). +# See LICENSE for details. +# --- END COPYRIGHT BLOCK --- +# +import logging +import time + +import ldap +import pytest +from lib389 import Entry +from lib389._constants import * +from lib389.topologies import topology_st + +log = logging.getLogger(__name__) +from lib389.utils import * + +# Skip on older versions +pytestmark = [pytest.mark.tier2, + pytest.mark.skipif(ds_is_older('1.3.3') or ds_is_newer('1.3.7'), + reason="Not implemented, or invalid by nsMemberOf")] + +def test_ticket47815(topology_st): + """ + Test betxn plugins reject an invalid option, and make sure that the rejected entry + is not in the entry cache. + + Enable memberOf, automember, and retrocl plugins + Add the automember config entry + Add the automember group + Add a user that will be rejected by a betxn plugin - result error 53 + Attempt the same add again, and it should result in another error 53 (not error 68) + """ + result = 0 + result2 = 0 + + log.info( + 'Testing Ticket 47815 - Add entries that should be rejected by the betxn plugins, and are not left in the entry cache') + + # Enabled the plugins + topology_st.standalone.plugins.enable(name=PLUGIN_MEMBER_OF) + topology_st.standalone.plugins.enable(name=PLUGIN_AUTOMEMBER) + topology_st.standalone.plugins.enable(name=PLUGIN_RETRO_CHANGELOG) + + # configure automember config entry + log.info('Adding automember config') + try: + topology_st.standalone.add_s(Entry(('cn=group cfg,cn=Auto Membership Plugin,cn=plugins,cn=config', { + 'objectclass': 'top autoMemberDefinition'.split(), + 'autoMemberScope': 'dc=example,dc=com', + 'autoMemberFilter': 'cn=user', + 'autoMemberDefaultGroup': 'cn=group,dc=example,dc=com', + 'autoMemberGroupingAttr': 'member:dn', + 'cn': 'group cfg'}))) + except: + log.error('Failed to add automember config') + exit(1) + + topology_st.standalone.restart() + + # need to reopen a connection toward the instance + topology_st.standalone.open() + + # add automember group + log.info('Adding automember group') + try: + topology_st.standalone.add_s(Entry(('cn=group,dc=example,dc=com', { + 'objectclass': 'top groupOfNames'.split(), + 'cn': 'group'}))) + except: + log.error('Failed to add automember group') + exit(1) + + # add user that should result in an error 53 + log.info('Adding invalid entry') + + try: + topology_st.standalone.add_s(Entry(('cn=user,dc=example,dc=com', { + 'objectclass': 'top person'.split(), + 'sn': 'user', + 'cn': 'user'}))) + except ldap.UNWILLING_TO_PERFORM: + log.debug('Adding invalid entry failed as expected') + result = 53 + except ldap.LDAPError as e: + log.error('Unexpected result ' + e.message['desc']) + assert False + if result == 0: + log.error('Add operation unexpectedly succeeded') + assert False + + # Attempt to add user again, should result in error 53 again + try: + topology_st.standalone.add_s(Entry(('cn=user,dc=example,dc=com', { + 'objectclass': 'top person'.split(), + 'sn': 'user', + 'cn': 'user'}))) + except ldap.UNWILLING_TO_PERFORM: + log.debug('2nd add of invalid entry failed as expected') + result2 = 53 + except ldap.LDAPError as e: + log.error('Unexpected result ' + e.message['desc']) + assert False + if result2 == 0: + log.error('2nd Add operation unexpectedly succeeded') + assert False + + +if __name__ == '__main__': + # Run isolated + # -s for DEBUG mode + CURRENT_FILE = os.path.realpath(__file__) + pytest.main("-s %s" % CURRENT_FILE) diff --git a/dirsrvtests/tests/tickets/ticket47823_test.py b/dirsrvtests/tests/tickets/ticket47823_test.py new file mode 100644 index 0000000..07a3d90 --- /dev/null +++ b/dirsrvtests/tests/tickets/ticket47823_test.py @@ -0,0 +1,965 @@ +# --- BEGIN COPYRIGHT BLOCK --- +# Copyright (C) 2016 Red Hat, Inc. +# All rights reserved. +# +# License: GPL (version 3 or any later version). +# See LICENSE for details. +# --- END COPYRIGHT BLOCK --- +# +import logging +import re +import shutil +import subprocess +import time + +import ldap +import pytest +from lib389 import Entry +from lib389._constants import * +from lib389.topologies import topology_st + +log = logging.getLogger(__name__) +from lib389.utils import * + +# Skip on older versions +pytestmark = [pytest.mark.tier2, + pytest.mark.skipif(ds_is_older('1.3.3'), reason="Not implemented")] +PROVISIONING_CN = "provisioning" +PROVISIONING_DN = "cn=%s,%s" % (PROVISIONING_CN, SUFFIX) + +ACTIVE_CN = "accounts" +STAGE_CN = "staged users" +DELETE_CN = "deleted users" +ACTIVE_DN = "cn=%s,%s" % (ACTIVE_CN, SUFFIX) +STAGE_DN = "cn=%s,%s" % (STAGE_CN, PROVISIONING_DN) +DELETE_DN = "cn=%s,%s" % (DELETE_CN, PROVISIONING_DN) + +STAGE_USER_CN = "stage guy" +STAGE_USER_DN = "cn=%s,%s" % (STAGE_USER_CN, STAGE_DN) + +ACTIVE_USER_CN = "active guy" +ACTIVE_USER_DN = "cn=%s,%s" % (ACTIVE_USER_CN, ACTIVE_DN) + +ACTIVE_USER_1_CN = "test_1" +ACTIVE_USER_1_DN = "cn=%s,%s" % (ACTIVE_USER_1_CN, ACTIVE_DN) +ACTIVE_USER_2_CN = "test_2" +ACTIVE_USER_2_DN = "cn=%s,%s" % (ACTIVE_USER_2_CN, ACTIVE_DN) + +STAGE_USER_1_CN = ACTIVE_USER_1_CN +STAGE_USER_1_DN = "cn=%s,%s" % (STAGE_USER_1_CN, STAGE_DN) +STAGE_USER_2_CN = ACTIVE_USER_2_CN +STAGE_USER_2_DN = "cn=%s,%s" % (STAGE_USER_2_CN, STAGE_DN) + +ALL_CONFIG_ATTRS = ['nsslapd-pluginarg0', 'nsslapd-pluginarg1', 'nsslapd-pluginarg2', + 'uniqueness-attribute-name', 'uniqueness-subtrees', 'uniqueness-across-all-subtrees'] + + +def _header(topology_st, label): + topology_st.standalone.log.info("\n\n###############################################") + topology_st.standalone.log.info("#######") + topology_st.standalone.log.info("####### %s" % label) + topology_st.standalone.log.info("#######") + topology_st.standalone.log.info("###############################################") + + +def _uniqueness_config_entry(topology_st, name=None): + if not name: + return None + + ent = topology_st.standalone.getEntry("cn=%s,%s" % (PLUGIN_ATTR_UNIQUENESS, DN_PLUGIN), ldap.SCOPE_BASE, + "(objectclass=nsSlapdPlugin)", + ['objectClass', 'cn', 'nsslapd-pluginPath', 'nsslapd-pluginInitfunc', + 'nsslapd-pluginType', 'nsslapd-pluginEnabled', + 'nsslapd-plugin-depends-on-type', + 'nsslapd-pluginId', 'nsslapd-pluginVersion', 'nsslapd-pluginVendor', + 'nsslapd-pluginDescription']) + ent.dn = "cn=%s uniqueness,%s" % (name, DN_PLUGIN) + return ent + + +def _build_config(topology_st, attr_name='cn', subtree_1=None, subtree_2=None, type_config='old', + across_subtrees=False): + assert topology_st + assert attr_name + assert subtree_1 + + if type_config == 'old': + # enable the 'cn' uniqueness on Active + config = _uniqueness_config_entry(topology_st, attr_name) + config.setValue('nsslapd-pluginarg0', attr_name) + config.setValue('nsslapd-pluginarg1', subtree_1) + if subtree_2: + config.setValue('nsslapd-pluginarg2', subtree_2) + else: + # prepare the config entry + config = _uniqueness_config_entry(topology_st, attr_name) + config.setValue('uniqueness-attribute-name', attr_name) + config.setValue('uniqueness-subtrees', subtree_1) + if subtree_2: + config.setValue('uniqueness-subtrees', subtree_2) + if across_subtrees: + config.setValue('uniqueness-across-all-subtrees', 'on') + return config + + +def _active_container_invalid_cfg_add(topology_st): + ''' + Check uniqueness is not enforced with ADD (invalid config) + ''' + topology_st.standalone.add_s(Entry((ACTIVE_USER_1_DN, { + 'objectclass': "top person".split(), + 'sn': ACTIVE_USER_1_CN, + 'cn': ACTIVE_USER_1_CN}))) + + topology_st.standalone.add_s(Entry((ACTIVE_USER_2_DN, { + 'objectclass': "top person".split(), + 'sn': ACTIVE_USER_2_CN, + 'cn': [ACTIVE_USER_1_CN, ACTIVE_USER_2_CN]}))) + + topology_st.standalone.delete_s(ACTIVE_USER_1_DN) + topology_st.standalone.delete_s(ACTIVE_USER_2_DN) + + +def _active_container_add(topology_st, type_config='old'): + ''' + Check uniqueness in a single container (Active) + Add an entry with a given 'cn', then check we can not add an entry with the same 'cn' value + + ''' + config = _build_config(topology_st, attr_name='cn', subtree_1=ACTIVE_DN, subtree_2=None, type_config=type_config, + across_subtrees=False) + + # remove the 'cn' uniqueness entry + try: + topology_st.standalone.delete_s(config.dn) + + except ldap.NO_SUCH_OBJECT: + pass + topology_st.standalone.restart(timeout=120) + + topology_st.standalone.log.info('Uniqueness not enforced: create the entries') + + topology_st.standalone.add_s(Entry((ACTIVE_USER_1_DN, { + 'objectclass': "top person".split(), + 'sn': ACTIVE_USER_1_CN, + 'cn': ACTIVE_USER_1_CN}))) + + topology_st.standalone.add_s(Entry((ACTIVE_USER_2_DN, { + 'objectclass': "top person".split(), + 'sn': ACTIVE_USER_2_CN, + 'cn': [ACTIVE_USER_1_CN, ACTIVE_USER_2_CN]}))) + + topology_st.standalone.delete_s(ACTIVE_USER_1_DN) + topology_st.standalone.delete_s(ACTIVE_USER_2_DN) + + topology_st.standalone.log.info('Uniqueness enforced: checks second entry is rejected') + + # enable the 'cn' uniqueness on Active + topology_st.standalone.add_s(config) + topology_st.standalone.restart(timeout=120) + topology_st.standalone.add_s(Entry((ACTIVE_USER_1_DN, { + 'objectclass': "top person".split(), + 'sn': ACTIVE_USER_1_CN, + 'cn': ACTIVE_USER_1_CN}))) + + try: + topology_st.standalone.add_s(Entry((ACTIVE_USER_2_DN, { + 'objectclass': "top person".split(), + 'sn': ACTIVE_USER_2_CN, + 'cn': [ACTIVE_USER_1_CN, ACTIVE_USER_2_CN]}))) + except ldap.CONSTRAINT_VIOLATION: + # yes it is expected + pass + + # cleanup the stuff now + topology_st.standalone.delete_s(config.dn) + topology_st.standalone.delete_s(ACTIVE_USER_1_DN) + + +def _active_container_mod(topology_st, type_config='old'): + ''' + Check uniqueness in a single container (active) + Add and entry with a given 'cn', then check we can not modify an entry with the same 'cn' value + + ''' + + config = _build_config(topology_st, attr_name='cn', subtree_1=ACTIVE_DN, subtree_2=None, type_config=type_config, + across_subtrees=False) + + # enable the 'cn' uniqueness on Active + topology_st.standalone.add_s(config) + topology_st.standalone.restart(timeout=120) + + topology_st.standalone.log.info('Uniqueness enforced: checks MOD ADD entry is rejected') + topology_st.standalone.add_s(Entry((ACTIVE_USER_1_DN, { + 'objectclass': "top person".split(), + 'sn': ACTIVE_USER_1_CN, + 'cn': ACTIVE_USER_1_CN}))) + + topology_st.standalone.add_s(Entry((ACTIVE_USER_2_DN, { + 'objectclass': "top person".split(), + 'sn': ACTIVE_USER_2_CN, + 'cn': ACTIVE_USER_2_CN}))) + + try: + topology_st.standalone.modify_s(ACTIVE_USER_2_DN, [(ldap.MOD_ADD, 'cn', ensure_bytes(ACTIVE_USER_1_CN))]) + except ldap.CONSTRAINT_VIOLATION: + # yes it is expected + pass + + topology_st.standalone.log.info('Uniqueness enforced: checks MOD REPLACE entry is rejected') + try: + topology_st.standalone.modify_s(ACTIVE_USER_2_DN, + [(ldap.MOD_REPLACE, 'cn', [ensure_bytes(ACTIVE_USER_1_CN), ensure_bytes(ACTIVE_USER_2_CN)])]) + except ldap.CONSTRAINT_VIOLATION: + # yes it is expected + pass + + # cleanup the stuff now + topology_st.standalone.delete_s(config.dn) + topology_st.standalone.delete_s(ACTIVE_USER_1_DN) + topology_st.standalone.delete_s(ACTIVE_USER_2_DN) + + +def _active_container_modrdn(topology_st, type_config='old'): + ''' + Check uniqueness in a single container + Add and entry with a given 'cn', then check we can not modrdn an entry with the same 'cn' value + + ''' + config = _build_config(topology_st, attr_name='cn', subtree_1=ACTIVE_DN, subtree_2=None, type_config=type_config, + across_subtrees=False) + + # enable the 'cn' uniqueness on Active + topology_st.standalone.add_s(config) + topology_st.standalone.restart(timeout=120) + + topology_st.standalone.log.info('Uniqueness enforced: checks MODRDN entry is rejected') + + topology_st.standalone.add_s(Entry((ACTIVE_USER_1_DN, { + 'objectclass': "top person".split(), + 'sn': ACTIVE_USER_1_CN, + 'cn': [ACTIVE_USER_1_CN, 'dummy']}))) + + topology_st.standalone.add_s(Entry((ACTIVE_USER_2_DN, { + 'objectclass': "top person".split(), + 'sn': ACTIVE_USER_2_CN, + 'cn': ACTIVE_USER_2_CN}))) + + try: + topology_st.standalone.rename_s(ACTIVE_USER_2_DN, 'cn=dummy', delold=0) + except ldap.CONSTRAINT_VIOLATION: + # yes it is expected + pass + + # cleanup the stuff now + topology_st.standalone.delete_s(config.dn) + topology_st.standalone.delete_s(ACTIVE_USER_1_DN) + topology_st.standalone.delete_s(ACTIVE_USER_2_DN) + + +def _active_stage_containers_add(topology_st, type_config='old', across_subtrees=False): + ''' + Check uniqueness in several containers + Add an entry on a container with a given 'cn' + with across_subtrees=False check we CAN add an entry with the same 'cn' value on the other container + with across_subtrees=True check we CAN NOT add an entry with the same 'cn' value on the other container + + ''' + config = _build_config(topology_st, attr_name='cn', subtree_1=ACTIVE_DN, subtree_2=STAGE_DN, + type_config=type_config, across_subtrees=False) + + topology_st.standalone.add_s(config) + topology_st.standalone.restart(timeout=120) + topology_st.standalone.add_s(Entry((ACTIVE_USER_1_DN, { + 'objectclass': "top person".split(), + 'sn': ACTIVE_USER_1_CN, + 'cn': ACTIVE_USER_1_CN}))) + try: + + # adding an entry on a separated contains with the same 'cn' + topology_st.standalone.add_s(Entry((STAGE_USER_1_DN, { + 'objectclass': "top person".split(), + 'sn': STAGE_USER_1_CN, + 'cn': ACTIVE_USER_1_CN}))) + except ldap.CONSTRAINT_VIOLATION: + assert across_subtrees + + # cleanup the stuff now + topology_st.standalone.delete_s(config.dn) + topology_st.standalone.delete_s(ACTIVE_USER_1_DN) + topology_st.standalone.delete_s(STAGE_USER_1_DN) + + +def _active_stage_containers_mod(topology_st, type_config='old', across_subtrees=False): + ''' + Check uniqueness in a several containers + Add an entry on a container with a given 'cn', then check we CAN mod an entry with the same 'cn' value on the other container + + ''' + config = _build_config(topology_st, attr_name='cn', subtree_1=ACTIVE_DN, subtree_2=STAGE_DN, + type_config=type_config, across_subtrees=False) + + topology_st.standalone.add_s(config) + topology_st.standalone.restart(timeout=120) + # adding an entry on active with a different 'cn' + topology_st.standalone.add_s(Entry((ACTIVE_USER_1_DN, { + 'objectclass': "top person".split(), + 'sn': ACTIVE_USER_1_CN, + 'cn': ACTIVE_USER_2_CN}))) + + # adding an entry on a stage with a different 'cn' + topology_st.standalone.add_s(Entry((STAGE_USER_1_DN, { + 'objectclass': "top person".split(), + 'sn': STAGE_USER_1_CN, + 'cn': STAGE_USER_1_CN}))) + + try: + + # modify add same value + topology_st.standalone.modify_s(STAGE_USER_1_DN, [(ldap.MOD_ADD, 'cn', [ensure_bytes(ACTIVE_USER_2_CN)])]) + except ldap.CONSTRAINT_VIOLATION: + assert across_subtrees + + topology_st.standalone.delete_s(STAGE_USER_1_DN) + topology_st.standalone.add_s(Entry((STAGE_USER_1_DN, { + 'objectclass': "top person".split(), + 'sn': STAGE_USER_1_CN, + 'cn': STAGE_USER_2_CN}))) + try: + # modify replace same value + topology_st.standalone.modify_s(STAGE_USER_1_DN, + [(ldap.MOD_REPLACE, 'cn', [ensure_bytes(STAGE_USER_2_CN), ensure_bytes(ACTIVE_USER_1_CN)])]) + except ldap.CONSTRAINT_VIOLATION: + assert across_subtrees + + # cleanup the stuff now + topology_st.standalone.delete_s(config.dn) + topology_st.standalone.delete_s(ACTIVE_USER_1_DN) + topology_st.standalone.delete_s(STAGE_USER_1_DN) + + +def _active_stage_containers_modrdn(topology_st, type_config='old', across_subtrees=False): + ''' + Check uniqueness in a several containers + Add and entry with a given 'cn', then check we CAN modrdn an entry with the same 'cn' value on the other container + + ''' + + config = _build_config(topology_st, attr_name='cn', subtree_1=ACTIVE_DN, subtree_2=STAGE_DN, + type_config=type_config, across_subtrees=False) + + # enable the 'cn' uniqueness on Active and Stage + topology_st.standalone.add_s(config) + topology_st.standalone.restart(timeout=120) + topology_st.standalone.add_s(Entry((ACTIVE_USER_1_DN, { + 'objectclass': "top person".split(), + 'sn': ACTIVE_USER_1_CN, + 'cn': [ACTIVE_USER_1_CN, 'dummy']}))) + + topology_st.standalone.add_s(Entry((STAGE_USER_1_DN, { + 'objectclass': "top person".split(), + 'sn': STAGE_USER_1_CN, + 'cn': STAGE_USER_1_CN}))) + + try: + + topology_st.standalone.rename_s(STAGE_USER_1_DN, 'cn=dummy', delold=0) + + # check stage entry has 'cn=dummy' + stage_ent = topology_st.standalone.getEntry("cn=dummy,%s" % (STAGE_DN), ldap.SCOPE_BASE, "objectclass=*", + ['cn']) + assert stage_ent.hasAttr('cn') + found = False + for value in stage_ent.getValues('cn'): + if ensure_str(value) == 'dummy': + found = True + assert found + + # check active entry has 'cn=dummy' + active_ent = topology_st.standalone.getEntry(ACTIVE_USER_1_DN, ldap.SCOPE_BASE, "objectclass=*", ['cn']) + assert active_ent.hasAttr('cn') + found = False + for value in stage_ent.getValues('cn'): + if ensure_str(value) == 'dummy': + found = True + assert found + + topology_st.standalone.delete_s("cn=dummy,%s" % (STAGE_DN)) + except ldap.CONSTRAINT_VIOLATION: + assert across_subtrees + topology_st.standalone.delete_s(STAGE_USER_1_DN) + + # cleanup the stuff now + topology_st.standalone.delete_s(config.dn) + topology_st.standalone.delete_s(ACTIVE_USER_1_DN) + + +def _config_file(topology_st, action='save'): + dse_ldif = topology_st.standalone.confdir + '/dse.ldif' + sav_file = topology_st.standalone.confdir + '/dse.ldif.ticket47823' + if action == 'save': + shutil.copy(dse_ldif, sav_file) + else: + shutil.copy(sav_file, dse_ldif) + time.sleep(1) + + +def _pattern_errorlog(file, log_pattern): + try: + _pattern_errorlog.last_pos += 1 + except AttributeError: + _pattern_errorlog.last_pos = 0 + + found = None + log.debug("_pattern_errorlog: start at offset %d" % _pattern_errorlog.last_pos) + file.seek(_pattern_errorlog.last_pos) + + # Use a while true iteration because 'for line in file: hit a + # python bug that break file.tell() + while True: + line = file.readline() + log.debug("_pattern_errorlog: [%d] %s" % (file.tell(), line)) + found = log_pattern.search(line) + if ((line == '') or (found)): + break + + log.debug("_pattern_errorlog: end at offset %d" % file.tell()) + _pattern_errorlog.last_pos = file.tell() + return found + + +def test_ticket47823_init(topology_st): + """ + + """ + + # Enabled the plugins + topology_st.standalone.plugins.enable(name=PLUGIN_ATTR_UNIQUENESS) + topology_st.standalone.restart(timeout=120) + + topology_st.standalone.add_s(Entry((PROVISIONING_DN, {'objectclass': "top nscontainer".split(), + 'cn': PROVISIONING_CN}))) + topology_st.standalone.add_s(Entry((ACTIVE_DN, {'objectclass': "top nscontainer".split(), + 'cn': ACTIVE_CN}))) + topology_st.standalone.add_s(Entry((STAGE_DN, {'objectclass': "top nscontainer".split(), + 'cn': STAGE_CN}))) + topology_st.standalone.add_s(Entry((DELETE_DN, {'objectclass': "top nscontainer".split(), + 'cn': DELETE_CN}))) + topology_st.standalone.errorlog_file = open(topology_st.standalone.errlog, "r") + + topology_st.standalone.stop(timeout=120) + time.sleep(1) + topology_st.standalone.start(timeout=120) + time.sleep(3) + + +def test_ticket47823_one_container_add(topology_st): + ''' + Check uniqueness in a single container + Add and entry with a given 'cn', then check we can not add an entry with the same 'cn' value + + ''' + _header(topology_st, "With former config (args), check attribute uniqueness with 'cn' (ADD) ") + + _active_container_add(topology_st, type_config='old') + + _header(topology_st, "With new config (args), check attribute uniqueness with 'cn' (ADD) ") + + _active_container_add(topology_st, type_config='new') + + +def test_ticket47823_one_container_mod(topology_st): + ''' + Check uniqueness in a single container + Add and entry with a given 'cn', then check we can not modify an entry with the same 'cn' value + + ''' + _header(topology_st, "With former config (args), check attribute uniqueness with 'cn' (MOD)") + + _active_container_mod(topology_st, type_config='old') + + _header(topology_st, "With new config (args), check attribute uniqueness with 'cn' (MOD)") + + _active_container_mod(topology_st, type_config='new') + + +def test_ticket47823_one_container_modrdn(topology_st): + ''' + Check uniqueness in a single container + Add and entry with a given 'cn', then check we can not modrdn an entry with the same 'cn' value + + ''' + _header(topology_st, "With former config (args), check attribute uniqueness with 'cn' (MODRDN)") + + _active_container_modrdn(topology_st, type_config='old') + + _header(topology_st, "With former config (args), check attribute uniqueness with 'cn' (MODRDN)") + + _active_container_modrdn(topology_st, type_config='new') + + +def test_ticket47823_multi_containers_add(topology_st): + ''' + Check uniqueness in a several containers + Add and entry with a given 'cn', then check we can not add an entry with the same 'cn' value + + ''' + _header(topology_st, "With former config (args), check attribute uniqueness with 'cn' (ADD) ") + + _active_stage_containers_add(topology_st, type_config='old', across_subtrees=False) + + _header(topology_st, "With new config (args), check attribute uniqueness with 'cn' (ADD) ") + + _active_stage_containers_add(topology_st, type_config='new', across_subtrees=False) + + +def test_ticket47823_multi_containers_mod(topology_st): + ''' + Check uniqueness in a several containers + Add an entry on a container with a given 'cn', then check we CAN mod an entry with the same 'cn' value on the other container + + ''' + _header(topology_st, "With former config (args), check attribute uniqueness with 'cn' (MOD) on separated container") + + topology_st.standalone.log.info( + 'Uniqueness not enforced: if same \'cn\' modified (add/replace) on separated containers') + _active_stage_containers_mod(topology_st, type_config='old', across_subtrees=False) + + _header(topology_st, "With new config (args), check attribute uniqueness with 'cn' (MOD) on separated container") + + topology_st.standalone.log.info( + 'Uniqueness not enforced: if same \'cn\' modified (add/replace) on separated containers') + _active_stage_containers_mod(topology_st, type_config='new', across_subtrees=False) + + +def test_ticket47823_multi_containers_modrdn(topology_st): + ''' + Check uniqueness in a several containers + Add and entry with a given 'cn', then check we CAN modrdn an entry with the same 'cn' value on the other container + + ''' + _header(topology_st, + "With former config (args), check attribute uniqueness with 'cn' (MODRDN) on separated containers") + + topology_st.standalone.log.info('Uniqueness not enforced: checks MODRDN entry is accepted on separated containers') + _active_stage_containers_modrdn(topology_st, type_config='old', across_subtrees=False) + + topology_st.standalone.log.info('Uniqueness not enforced: checks MODRDN entry is accepted on separated containers') + _active_stage_containers_modrdn(topology_st, type_config='old') + + +def test_ticket47823_across_multi_containers_add(topology_st): + ''' + Check uniqueness across several containers, uniquely with the new configuration + Add and entry with a given 'cn', then check we can not add an entry with the same 'cn' value + + ''' + _header(topology_st, "With new config (args), check attribute uniqueness with 'cn' (ADD) across several containers") + + _active_stage_containers_add(topology_st, type_config='old', across_subtrees=True) + + +def test_ticket47823_across_multi_containers_mod(topology_st): + ''' + Check uniqueness across several containers, uniquely with the new configuration + Add and entry with a given 'cn', then check we can not modifiy an entry with the same 'cn' value + + ''' + _header(topology_st, "With new config (args), check attribute uniqueness with 'cn' (MOD) across several containers") + + _active_stage_containers_mod(topology_st, type_config='old', across_subtrees=True) + + +def test_ticket47823_across_multi_containers_modrdn(topology_st): + ''' + Check uniqueness across several containers, uniquely with the new configuration + Add and entry with a given 'cn', then check we can not modrdn an entry with the same 'cn' value + + ''' + _header(topology_st, + "With new config (args), check attribute uniqueness with 'cn' (MODRDN) across several containers") + + _active_stage_containers_modrdn(topology_st, type_config='old', across_subtrees=True) + + +def test_ticket47823_invalid_config_1(topology_st): + ''' + Check that an invalid config is detected. No uniqueness enforced + Using old config: arg0 is missing + ''' + _header(topology_st, "Invalid config (old): arg0 is missing") + + _config_file(topology_st, action='save') + + # create an invalid config without arg0 + config = _build_config(topology_st, attr_name='cn', subtree_1=ACTIVE_DN, subtree_2=None, type_config='old', + across_subtrees=False) + + del config.data['nsslapd-pluginarg0'] + # replace 'cn' uniqueness entry + try: + topology_st.standalone.delete_s(config.dn) + + except ldap.NO_SUCH_OBJECT: + pass + topology_st.standalone.add_s(config) + + topology_st.standalone.getEntry(config.dn, ldap.SCOPE_BASE, "(objectclass=nsSlapdPlugin)", ALL_CONFIG_ATTRS) + + # Check the server did not restart + topology_st.standalone.modify_s(DN_CONFIG, [(ldap.MOD_REPLACE, 'nsslapd-errorlog-level', b'65536')]) + try: + topology_st.standalone.restart() + ent = topology_st.standalone.getEntry(config.dn, ldap.SCOPE_BASE, "(objectclass=nsSlapdPlugin)", + ALL_CONFIG_ATTRS) + if ent: + # be sure to restore a valid config before assert + _config_file(topology_st, action='restore') + assert not ent + except: + pass + + # Check the expected error message + regex = re.compile("[U|u]nable to parse old style") + res = _pattern_errorlog(topology_st.standalone.errorlog_file, regex) + if not res: + # be sure to restore a valid config before assert + _config_file(topology_st, action='restore') + assert res + + # Check we can restart the server + _config_file(topology_st, action='restore') + topology_st.standalone.start() + try: + topology_st.standalone.getEntry(config.dn, ldap.SCOPE_BASE, "(objectclass=nsSlapdPlugin)", ALL_CONFIG_ATTRS) + except ldap.NO_SUCH_OBJECT: + pass + + +def test_ticket47823_invalid_config_2(topology_st): + ''' + Check that an invalid config is detected. No uniqueness enforced + Using old config: arg1 is missing + ''' + _header(topology_st, "Invalid config (old): arg1 is missing") + + _config_file(topology_st, action='save') + + # create an invalid config without arg0 + config = _build_config(topology_st, attr_name='cn', subtree_1=ACTIVE_DN, subtree_2=None, type_config='old', + across_subtrees=False) + + del config.data['nsslapd-pluginarg1'] + # replace 'cn' uniqueness entry + try: + topology_st.standalone.delete_s(config.dn) + + except ldap.NO_SUCH_OBJECT: + pass + topology_st.standalone.add_s(config) + + topology_st.standalone.getEntry(config.dn, ldap.SCOPE_BASE, "(objectclass=nsSlapdPlugin)", ALL_CONFIG_ATTRS) + + # Check the server did not restart + try: + topology_st.standalone.restart() + ent = topology_st.standalone.getEntry(config.dn, ldap.SCOPE_BASE, "(objectclass=nsSlapdPlugin)", + ALL_CONFIG_ATTRS) + if ent: + # be sure to restore a valid config before assert + _config_file(topology_st, action='restore') + assert not ent + except: + pass + + # Check the expected error message + regex = re.compile("No valid subtree is defined") + res = _pattern_errorlog(topology_st.standalone.errorlog_file, regex) + if not res: + # be sure to restore a valid config before assert + _config_file(topology_st, action='restore') + assert res + + # Check we can restart the server + _config_file(topology_st, action='restore') + topology_st.standalone.start() + try: + topology_st.standalone.getEntry(config.dn, ldap.SCOPE_BASE, "(objectclass=nsSlapdPlugin)", ALL_CONFIG_ATTRS) + except ldap.NO_SUCH_OBJECT: + pass + + +def test_ticket47823_invalid_config_3(topology_st): + ''' + Check that an invalid config is detected. No uniqueness enforced + Using old config: arg0 is missing + ''' + _header(topology_st, "Invalid config (old): arg0 is missing but new config attrname exists") + + _config_file(topology_st, action='save') + + # create an invalid config without arg0 + config = _build_config(topology_st, attr_name='cn', subtree_1=ACTIVE_DN, subtree_2=None, type_config='old', + across_subtrees=False) + + del config.data['nsslapd-pluginarg0'] + config.data['uniqueness-attribute-name'] = 'cn' + # replace 'cn' uniqueness entry + try: + topology_st.standalone.delete_s(config.dn) + + except ldap.NO_SUCH_OBJECT: + pass + topology_st.standalone.add_s(config) + + topology_st.standalone.getEntry(config.dn, ldap.SCOPE_BASE, "(objectclass=nsSlapdPlugin)", ALL_CONFIG_ATTRS) + + # Check the server did not restart + topology_st.standalone.modify_s(DN_CONFIG, [(ldap.MOD_REPLACE, 'nsslapd-errorlog-level', b'65536')]) + try: + topology_st.standalone.restart() + ent = topology_st.standalone.getEntry(config.dn, ldap.SCOPE_BASE, "(objectclass=nsSlapdPlugin)", + ALL_CONFIG_ATTRS) + if ent: + # be sure to restore a valid config before assert + _config_file(topology_st, action='restore') + assert not ent + except: + pass + + # Check the expected error message + regex = re.compile("[U|u]nable to parse old style") + res = _pattern_errorlog(topology_st.standalone.errorlog_file, regex) + if not res: + # be sure to restore a valid config before assert + _config_file(topology_st, action='restore') + assert res + + # Check we can restart the server + _config_file(topology_st, action='restore') + topology_st.standalone.start() + try: + topology_st.standalone.getEntry(config.dn, ldap.SCOPE_BASE, "(objectclass=nsSlapdPlugin)", ALL_CONFIG_ATTRS) + except ldap.NO_SUCH_OBJECT: + pass + + +def test_ticket47823_invalid_config_4(topology_st): + ''' + Check that an invalid config is detected. No uniqueness enforced + Using old config: arg1 is missing + ''' + _header(topology_st, "Invalid config (old): arg1 is missing but new config exist") + + _config_file(topology_st, action='save') + + # create an invalid config without arg0 + config = _build_config(topology_st, attr_name='cn', subtree_1=ACTIVE_DN, subtree_2=None, type_config='old', + across_subtrees=False) + + del config.data['nsslapd-pluginarg1'] + config.data['uniqueness-subtrees'] = ACTIVE_DN + # replace 'cn' uniqueness entry + try: + topology_st.standalone.delete_s(config.dn) + + except ldap.NO_SUCH_OBJECT: + pass + topology_st.standalone.add_s(config) + + topology_st.standalone.getEntry(config.dn, ldap.SCOPE_BASE, "(objectclass=nsSlapdPlugin)", ALL_CONFIG_ATTRS) + + # Check the server did not restart + try: + topology_st.standalone.restart() + ent = topology_st.standalone.getEntry(config.dn, ldap.SCOPE_BASE, "(objectclass=nsSlapdPlugin)", + ALL_CONFIG_ATTRS) + if ent: + # be sure to restore a valid config before assert + _config_file(topology_st, action='restore') + assert not ent + except: + pass + + # Check the expected error message + regex = re.compile("No valid subtree is defined") + res = _pattern_errorlog(topology_st.standalone.errorlog_file, regex) + if not res: + # be sure to restore a valid config before assert + _config_file(topology_st, action='restore') + assert res + + # Check we can restart the server + _config_file(topology_st, action='restore') + topology_st.standalone.start() + try: + topology_st.standalone.getEntry(config.dn, ldap.SCOPE_BASE, "(objectclass=nsSlapdPlugin)", ALL_CONFIG_ATTRS) + except ldap.NO_SUCH_OBJECT: + pass + + +def test_ticket47823_invalid_config_5(topology_st): + ''' + Check that an invalid config is detected. No uniqueness enforced + Using new config: uniqueness-attribute-name is missing + ''' + _header(topology_st, "Invalid config (new): uniqueness-attribute-name is missing") + + _config_file(topology_st, action='save') + + # create an invalid config without arg0 + config = _build_config(topology_st, attr_name='cn', subtree_1=ACTIVE_DN, subtree_2=None, type_config='new', + across_subtrees=False) + + del config.data['uniqueness-attribute-name'] + # replace 'cn' uniqueness entry + try: + topology_st.standalone.delete_s(config.dn) + + except ldap.NO_SUCH_OBJECT: + pass + topology_st.standalone.add_s(config) + + topology_st.standalone.getEntry(config.dn, ldap.SCOPE_BASE, "(objectclass=nsSlapdPlugin)", ALL_CONFIG_ATTRS) + + # Check the server did not restart + try: + topology_st.standalone.restart() + ent = topology_st.standalone.getEntry(config.dn, ldap.SCOPE_BASE, "(objectclass=nsSlapdPlugin)", + ALL_CONFIG_ATTRS) + if ent: + # be sure to restore a valid config before assert + _config_file(topology_st, action='restore') + assert not ent + except: + pass + + # Check the expected error message + regex = re.compile("[A|a]ttribute name not defined") + res = _pattern_errorlog(topology_st.standalone.errorlog_file, regex) + if not res: + # be sure to restore a valid config before assert + _config_file(topology_st, action='restore') + assert res + + # Check we can restart the server + _config_file(topology_st, action='restore') + topology_st.standalone.start() + try: + topology_st.standalone.getEntry(config.dn, ldap.SCOPE_BASE, "(objectclass=nsSlapdPlugin)", ALL_CONFIG_ATTRS) + except ldap.NO_SUCH_OBJECT: + pass + + +def test_ticket47823_invalid_config_6(topology_st): + ''' + Check that an invalid config is detected. No uniqueness enforced + Using new config: uniqueness-subtrees is missing + ''' + _header(topology_st, "Invalid config (new): uniqueness-subtrees is missing") + + _config_file(topology_st, action='save') + + # create an invalid config without arg0 + config = _build_config(topology_st, attr_name='cn', subtree_1=ACTIVE_DN, subtree_2=None, type_config='new', + across_subtrees=False) + + del config.data['uniqueness-subtrees'] + # replace 'cn' uniqueness entry + try: + topology_st.standalone.delete_s(config.dn) + + except ldap.NO_SUCH_OBJECT: + pass + topology_st.standalone.add_s(config) + + topology_st.standalone.getEntry(config.dn, ldap.SCOPE_BASE, "(objectclass=nsSlapdPlugin)", ALL_CONFIG_ATTRS) + + # Check the server did not restart + try: + topology_st.standalone.restart() + ent = topology_st.standalone.getEntry(config.dn, ldap.SCOPE_BASE, "(objectclass=nsSlapdPlugin)", + ALL_CONFIG_ATTRS) + if ent: + # be sure to restore a valid config before assert + _config_file(topology_st, action='restore') + assert not ent + except: + pass + + # Check the expected error message + regex = re.compile("[O|o]bjectclass for subtree entries is not defined") + res = _pattern_errorlog(topology_st.standalone.errorlog_file, regex) + if not res: + # be sure to restore a valid config before assert + _config_file(topology_st, action='restore') + assert res + + # Check we can restart the server + _config_file(topology_st, action='restore') + topology_st.standalone.start() + try: + topology_st.standalone.getEntry(config.dn, ldap.SCOPE_BASE, "(objectclass=nsSlapdPlugin)", ALL_CONFIG_ATTRS) + except ldap.NO_SUCH_OBJECT: + pass + + +def test_ticket47823_invalid_config_7(topology_st): + ''' + Check that an invalid config is detected. No uniqueness enforced + Using new config: uniqueness-subtrees is missing + ''' + _header(topology_st, "Invalid config (new): uniqueness-subtrees are invalid") + + _config_file(topology_st, action='save') + + # create an invalid config without arg0 + config = _build_config(topology_st, attr_name='cn', subtree_1="this_is dummy DN", subtree_2="an other=dummy DN", + type_config='new', across_subtrees=False) + + topology_st.standalone.modify_s(DN_CONFIG, [(ldap.MOD_REPLACE, 'nsslapd-errorlog-level', b'65536')]) + # replace 'cn' uniqueness entry + try: + topology_st.standalone.delete_s(config.dn) + + except ldap.NO_SUCH_OBJECT: + pass + topology_st.standalone.add_s(config) + + topology_st.standalone.getEntry(config.dn, ldap.SCOPE_BASE, "(objectclass=nsSlapdPlugin)", ALL_CONFIG_ATTRS) + + # Check the server did not restart + try: + topology_st.standalone.restart() + ent = topology_st.standalone.getEntry(config.dn, ldap.SCOPE_BASE, "(objectclass=nsSlapdPlugin)", + ALL_CONFIG_ATTRS) + if ent: + # be sure to restore a valid config before assert + _config_file(topology_st, action='restore') + assert not ent + except: + pass + + # Check the expected error message + regex = re.compile("No valid subtree is defined") + res = _pattern_errorlog(topology_st.standalone.errorlog_file, regex) + if not res: + # be sure to restore a valid config before assert + _config_file(topology_st, action='restore') + assert res + + # Check we can restart the server + _config_file(topology_st, action='restore') + topology_st.standalone.start() + try: + topology_st.standalone.getEntry(config.dn, ldap.SCOPE_BASE, "(objectclass=nsSlapdPlugin)", ALL_CONFIG_ATTRS) + except ldap.NO_SUCH_OBJECT: + pass + + +if __name__ == '__main__': + # Run isolated + # -s for DEBUG mode + CURRENT_FILE = os.path.realpath(__file__) + pytest.main("-s %s" % CURRENT_FILE) diff --git a/dirsrvtests/tests/tickets/ticket47828_test.py b/dirsrvtests/tests/tickets/ticket47828_test.py new file mode 100644 index 0000000..e20f753 --- /dev/null +++ b/dirsrvtests/tests/tickets/ticket47828_test.py @@ -0,0 +1,652 @@ +# --- BEGIN COPYRIGHT BLOCK --- +# Copyright (C) 2016 Red Hat, Inc. +# All rights reserved. +# +# License: GPL (version 3 or any later version). +# See LICENSE for details. +# --- END COPYRIGHT BLOCK --- +# +import logging + +import ldap +import pytest +from lib389 import Entry +from lib389._constants import * +from lib389.topologies import topology_st + +log = logging.getLogger(__name__) + +ACCT_POLICY_CONFIG_DN = 'cn=config,cn=%s,cn=plugins,cn=config' % PLUGIN_ACCT_POLICY +ACCT_POLICY_DN = 'cn=Account Inactivation Policy,%s' % SUFFIX +from lib389.utils import * + +# Skip on older versions +pytestmark = [pytest.mark.tier2, + pytest.mark.skipif(ds_is_older('1.3.3'), reason="Not implemented")] +INACTIVITY_LIMIT = '9' +SEARCHFILTER = '(objectclass=*)' + +DUMMY_CONTAINER = 'cn=dummy container,%s' % SUFFIX +PROVISIONING = 'cn=provisioning,%s' % SUFFIX +ACTIVE_USER1_CN = 'active user1' +ACTIVE_USER1_DN = 'cn=%s,%s' % (ACTIVE_USER1_CN, SUFFIX) +STAGED_USER1_CN = 'staged user1' +STAGED_USER1_DN = 'cn=%s,%s' % (STAGED_USER1_CN, PROVISIONING) +DUMMY_USER1_CN = 'dummy user1' +DUMMY_USER1_DN = 'cn=%s,%s' % (DUMMY_USER1_CN, DUMMY_CONTAINER) + +ALLOCATED_ATTR = 'employeeNumber' + + +def _header(topology_st, label): + topology_st.standalone.log.info("\n\n###############################################") + topology_st.standalone.log.info("#######") + topology_st.standalone.log.info("####### %s" % label) + topology_st.standalone.log.info("#######") + topology_st.standalone.log.info("###############################################") + + +def test_ticket47828_init(topology_st): + """ + Enable DNA + """ + topology_st.standalone.plugins.enable(name=PLUGIN_DNA) + + topology_st.standalone.add_s(Entry((PROVISIONING, {'objectclass': "top nscontainer".split(), + 'cn': 'provisioning'}))) + topology_st.standalone.add_s(Entry((DUMMY_CONTAINER, {'objectclass': "top nscontainer".split(), + 'cn': 'dummy container'}))) + + dn_config = "cn=excluded scope, cn=%s, %s" % (PLUGIN_DNA, DN_PLUGIN) + topology_st.standalone.add_s(Entry((dn_config, {'objectclass': "top extensibleObject".split(), + 'cn': 'excluded scope', + 'dnaType': ALLOCATED_ATTR, + 'dnaNextValue': str(1000), + 'dnaMaxValue': str(2000), + 'dnaMagicRegen': str(-1), + 'dnaFilter': '(&(objectClass=person)(objectClass=organizationalPerson)(objectClass=inetOrgPerson))', + 'dnaScope': SUFFIX}))) + topology_st.standalone.restart(timeout=10) + + +def test_ticket47828_run_0(topology_st): + """ + NO exclude scope: Add an active entry and check its ALLOCATED_ATTR is set + """ + _header(topology_st, 'NO exclude scope: Add an active entry and check its ALLOCATED_ATTR is set') + + topology_st.standalone.add_s( + Entry((ACTIVE_USER1_DN, {'objectclass': "top person organizationalPerson inetOrgPerson".split(), + 'cn': ACTIVE_USER1_CN, + 'sn': ACTIVE_USER1_CN, + ALLOCATED_ATTR: str(-1)}))) + ent = topology_st.standalone.getEntry(ACTIVE_USER1_DN, ldap.SCOPE_BASE, "(objectclass=*)") + assert ent.hasAttr(ALLOCATED_ATTR) + assert ensure_str(ent.getValue(ALLOCATED_ATTR)) != str(-1) + topology_st.standalone.log.debug('%s.%s=%s' % (ACTIVE_USER1_CN, ALLOCATED_ATTR, ensure_str(ent.getValue(ALLOCATED_ATTR)))) + topology_st.standalone.delete_s(ACTIVE_USER1_DN) + + +def test_ticket47828_run_1(topology_st): + """ + NO exclude scope: Add an active entry and check its ALLOCATED_ATTR is unchanged (!= magic) + """ + _header(topology_st, 'NO exclude scope: Add an active entry and check its ALLOCATED_ATTR is unchanged (!= magic)') + + topology_st.standalone.add_s( + Entry((ACTIVE_USER1_DN, {'objectclass': "top person organizationalPerson inetOrgPerson".split(), + 'cn': ACTIVE_USER1_CN, + 'sn': ACTIVE_USER1_CN, + ALLOCATED_ATTR: str(20)}))) + ent = topology_st.standalone.getEntry(ACTIVE_USER1_DN, ldap.SCOPE_BASE, "(objectclass=*)") + assert ent.hasAttr(ALLOCATED_ATTR) + assert ensure_str(ent.getValue(ALLOCATED_ATTR)) == str(20) + topology_st.standalone.log.debug('%s.%s=%s' % (ACTIVE_USER1_CN, ALLOCATED_ATTR, ensure_str(ent.getValue(ALLOCATED_ATTR)))) + topology_st.standalone.delete_s(ACTIVE_USER1_DN) + + +def test_ticket47828_run_2(topology_st): + """ + NO exclude scope: Add a staged entry and check its ALLOCATED_ATTR is set + """ + _header(topology_st, 'NO exclude scope: Add a staged entry and check its ALLOCATED_ATTR is set') + + topology_st.standalone.add_s( + Entry((STAGED_USER1_DN, {'objectclass': "top person organizationalPerson inetOrgPerson".split(), + 'cn': STAGED_USER1_CN, + 'sn': STAGED_USER1_CN, + ALLOCATED_ATTR: str(-1)}))) + ent = topology_st.standalone.getEntry(STAGED_USER1_DN, ldap.SCOPE_BASE, "(objectclass=*)") + assert ent.hasAttr(ALLOCATED_ATTR) + assert ensure_str(ent.getValue(ALLOCATED_ATTR)) != str(-1) + topology_st.standalone.log.debug('%s.%s=%s' % (STAGED_USER1_CN, ALLOCATED_ATTR, ensure_str(ent.getValue(ALLOCATED_ATTR)))) + topology_st.standalone.delete_s(STAGED_USER1_DN) + + +def test_ticket47828_run_3(topology_st): + """ + NO exclude scope: Add a staged entry and check its ALLOCATED_ATTR is unchanged (!= magic) + """ + _header(topology_st, 'NO exclude scope: Add a staged entry and check its ALLOCATED_ATTR is unchanged (!= magic)') + + topology_st.standalone.add_s( + Entry((STAGED_USER1_DN, {'objectclass': "top person organizationalPerson inetOrgPerson".split(), + 'cn': STAGED_USER1_CN, + 'sn': STAGED_USER1_CN, + ALLOCATED_ATTR: str(20)}))) + ent = topology_st.standalone.getEntry(STAGED_USER1_DN, ldap.SCOPE_BASE, "(objectclass=*)") + assert ent.hasAttr(ALLOCATED_ATTR) + assert ensure_str(ent.getValue(ALLOCATED_ATTR)) == str(20) + topology_st.standalone.log.debug('%s.%s=%s' % (STAGED_USER1_CN, ALLOCATED_ATTR, ensure_str(ent.getValue(ALLOCATED_ATTR)))) + topology_st.standalone.delete_s(STAGED_USER1_DN) + + +def test_ticket47828_run_4(topology_st): + ''' + Exclude the provisioning container + ''' + _header(topology_st, 'Exclude the provisioning container') + + dn_config = "cn=excluded scope, cn=%s, %s" % (PLUGIN_DNA, DN_PLUGIN) + mod = [(ldap.MOD_REPLACE, 'dnaExcludeScope', ensure_bytes(PROVISIONING))] + topology_st.standalone.modify_s(dn_config, mod) + + +def test_ticket47828_run_5(topology_st): + """ + Provisioning excluded scope: Add an active entry and check its ALLOCATED_ATTR is set + """ + _header(topology_st, 'Provisioning excluded scope: Add an active entry and check its ALLOCATED_ATTR is set') + + topology_st.standalone.add_s( + Entry((ACTIVE_USER1_DN, {'objectclass': "top person organizationalPerson inetOrgPerson".split(), + 'cn': ACTIVE_USER1_CN, + 'sn': ACTIVE_USER1_CN, + ALLOCATED_ATTR: str(-1)}))) + ent = topology_st.standalone.getEntry(ACTIVE_USER1_DN, ldap.SCOPE_BASE, "(objectclass=*)") + assert ent.hasAttr(ALLOCATED_ATTR) + assert ensure_str(ensure_str(ent.getValue(ALLOCATED_ATTR))) != str(-1) + topology_st.standalone.log.debug('%s.%s=%s' % (ACTIVE_USER1_CN, ALLOCATED_ATTR, ensure_str(ensure_str(ent.getValue(ALLOCATED_ATTR))))) + topology_st.standalone.delete_s(ACTIVE_USER1_DN) + + +def test_ticket47828_run_6(topology_st): + """ + Provisioning excluded scope: Add an active entry and check its ALLOCATED_ATTR is unchanged (!= magic) + """ + _header(topology_st, + 'Provisioning excluded scope: Add an active entry and check its ALLOCATED_ATTR is unchanged (!= magic)') + + topology_st.standalone.add_s( + Entry((ACTIVE_USER1_DN, {'objectclass': "top person organizationalPerson inetOrgPerson".split(), + 'cn': ACTIVE_USER1_CN, + 'sn': ACTIVE_USER1_CN, + ALLOCATED_ATTR: str(20)}))) + ent = topology_st.standalone.getEntry(ACTIVE_USER1_DN, ldap.SCOPE_BASE, "(objectclass=*)") + assert ent.hasAttr(ALLOCATED_ATTR) + assert ensure_str(ensure_str(ent.getValue(ALLOCATED_ATTR))) == str(20) + topology_st.standalone.log.debug('%s.%s=%s' % (ACTIVE_USER1_CN, ALLOCATED_ATTR, ensure_str(ensure_str(ent.getValue(ALLOCATED_ATTR))))) + topology_st.standalone.delete_s(ACTIVE_USER1_DN) + + +def test_ticket47828_run_7(topology_st): + """ + Provisioning excluded scope: Add a staged entry and check its ALLOCATED_ATTR is not set + """ + _header(topology_st, 'Provisioning excluded scope: Add a staged entry and check its ALLOCATED_ATTR is not set') + + topology_st.standalone.add_s( + Entry((STAGED_USER1_DN, {'objectclass': "top person organizationalPerson inetOrgPerson".split(), + 'cn': STAGED_USER1_CN, + 'sn': STAGED_USER1_CN, + ALLOCATED_ATTR: str(-1)}))) + ent = topology_st.standalone.getEntry(STAGED_USER1_DN, ldap.SCOPE_BASE, "(objectclass=*)") + assert ent.hasAttr(ALLOCATED_ATTR) + assert ensure_str(ent.getValue(ALLOCATED_ATTR)) == str(-1) + topology_st.standalone.log.debug('%s.%s=%s' % (STAGED_USER1_CN, ALLOCATED_ATTR, ensure_str(ent.getValue(ALLOCATED_ATTR)))) + topology_st.standalone.delete_s(STAGED_USER1_DN) + + +def test_ticket47828_run_8(topology_st): + """ + Provisioning excluded scope: Add a staged entry and check its ALLOCATED_ATTR is unchanged (!= magic) + """ + _header(topology_st, + 'Provisioning excluded scope: Add a staged entry and check its ALLOCATED_ATTR is unchanged (!= magic)') + + topology_st.standalone.add_s( + Entry((STAGED_USER1_DN, {'objectclass': "top person organizationalPerson inetOrgPerson".split(), + 'cn': STAGED_USER1_CN, + 'sn': STAGED_USER1_CN, + ALLOCATED_ATTR: str(20)}))) + ent = topology_st.standalone.getEntry(STAGED_USER1_DN, ldap.SCOPE_BASE, "(objectclass=*)") + assert ent.hasAttr(ALLOCATED_ATTR) + assert ensure_str(ent.getValue(ALLOCATED_ATTR)) == str(20) + topology_st.standalone.log.debug('%s.%s=%s' % (STAGED_USER1_CN, ALLOCATED_ATTR, ensure_str(ent.getValue(ALLOCATED_ATTR)))) + topology_st.standalone.delete_s(STAGED_USER1_DN) + + +def test_ticket47828_run_9(topology_st): + """ + Provisioning excluded scope: Add an dummy entry and check its ALLOCATED_ATTR is set + """ + _header(topology_st, 'Provisioning excluded scope: Add an dummy entry and check its ALLOCATED_ATTR is set') + + topology_st.standalone.add_s( + Entry((DUMMY_USER1_DN, {'objectclass': "top person organizationalPerson inetOrgPerson".split(), + 'cn': DUMMY_USER1_CN, + 'sn': DUMMY_USER1_CN, + ALLOCATED_ATTR: str(-1)}))) + ent = topology_st.standalone.getEntry(DUMMY_USER1_DN, ldap.SCOPE_BASE, "(objectclass=*)") + assert ent.hasAttr(ALLOCATED_ATTR) + assert ensure_str(ent.getValue(ALLOCATED_ATTR)) != str(-1) + topology_st.standalone.log.debug('%s.%s=%s' % (DUMMY_USER1_CN, ALLOCATED_ATTR, ensure_str(ent.getValue(ALLOCATED_ATTR)))) + topology_st.standalone.delete_s(DUMMY_USER1_DN) + + +def test_ticket47828_run_10(topology_st): + """ + Provisioning excluded scope: Add an dummy entry and check its ALLOCATED_ATTR is unchanged (!= magic) + """ + _header(topology_st, + 'Provisioning excluded scope: Add an dummy entry and check its ALLOCATED_ATTR is unchanged (!= magic)') + + topology_st.standalone.add_s( + Entry((DUMMY_USER1_DN, {'objectclass': "top person organizationalPerson inetOrgPerson".split(), + 'cn': DUMMY_USER1_CN, + 'sn': DUMMY_USER1_CN, + ALLOCATED_ATTR: str(20)}))) + ent = topology_st.standalone.getEntry(DUMMY_USER1_DN, ldap.SCOPE_BASE, "(objectclass=*)") + assert ent.hasAttr(ALLOCATED_ATTR) + assert ensure_str(ent.getValue(ALLOCATED_ATTR)) == str(20) + topology_st.standalone.log.debug('%s.%s=%s' % (DUMMY_USER1_CN, ALLOCATED_ATTR, ensure_str(ent.getValue(ALLOCATED_ATTR)))) + topology_st.standalone.delete_s(DUMMY_USER1_DN) + + +def test_ticket47828_run_11(topology_st): + ''' + Exclude (in addition) the dummy container + ''' + _header(topology_st, 'Exclude (in addition) the dummy container') + + dn_config = "cn=excluded scope, cn=%s, %s" % (PLUGIN_DNA, DN_PLUGIN) + mod = [(ldap.MOD_ADD, 'dnaExcludeScope', ensure_bytes(DUMMY_CONTAINER))] + topology_st.standalone.modify_s(dn_config, mod) + + +def test_ticket47828_run_12(topology_st): + """ + Provisioning/Dummy excluded scope: Add an active entry and check its ALLOCATED_ATTR is set + """ + _header(topology_st, 'Provisioning/Dummy excluded scope: Add an active entry and check its ALLOCATED_ATTR is set') + + topology_st.standalone.add_s( + Entry((ACTIVE_USER1_DN, {'objectclass': "top person organizationalPerson inetOrgPerson".split(), + 'cn': ACTIVE_USER1_CN, + 'sn': ACTIVE_USER1_CN, + ALLOCATED_ATTR: str(-1)}))) + ent = topology_st.standalone.getEntry(ACTIVE_USER1_DN, ldap.SCOPE_BASE, "(objectclass=*)") + assert ent.hasAttr(ALLOCATED_ATTR) + assert ensure_str(ent.getValue(ALLOCATED_ATTR)) != str(-1) + topology_st.standalone.log.debug('%s.%s=%s' % (ACTIVE_USER1_CN, ALLOCATED_ATTR, ensure_str(ent.getValue(ALLOCATED_ATTR)))) + topology_st.standalone.delete_s(ACTIVE_USER1_DN) + + +def test_ticket47828_run_13(topology_st): + """ + Provisioning/Dummy excluded scope: Add an active entry and check its ALLOCATED_ATTR is unchanged (!= magic) + """ + _header(topology_st, + 'Provisioning/Dummy excluded scope: Add an active entry and check its ALLOCATED_ATTR is unchanged (!= magic)') + + topology_st.standalone.add_s( + Entry((ACTIVE_USER1_DN, {'objectclass': "top person organizationalPerson inetOrgPerson".split(), + 'cn': ACTIVE_USER1_CN, + 'sn': ACTIVE_USER1_CN, + ALLOCATED_ATTR: str(20)}))) + ent = topology_st.standalone.getEntry(ACTIVE_USER1_DN, ldap.SCOPE_BASE, "(objectclass=*)") + assert ent.hasAttr(ALLOCATED_ATTR) + assert ensure_str(ent.getValue(ALLOCATED_ATTR)) == str(20) + topology_st.standalone.log.debug('%s.%s=%s' % (ACTIVE_USER1_CN, ALLOCATED_ATTR, ensure_str(ent.getValue(ALLOCATED_ATTR)))) + topology_st.standalone.delete_s(ACTIVE_USER1_DN) + + +def test_ticket47828_run_14(topology_st): + """ + Provisioning/Dummy excluded scope: Add a staged entry and check its ALLOCATED_ATTR is not set + """ + _header(topology_st, + 'Provisioning/Dummy excluded scope: Add a staged entry and check its ALLOCATED_ATTR is not set') + + topology_st.standalone.add_s( + Entry((STAGED_USER1_DN, {'objectclass': "top person organizationalPerson inetOrgPerson".split(), + 'cn': STAGED_USER1_CN, + 'sn': STAGED_USER1_CN, + ALLOCATED_ATTR: str(-1)}))) + ent = topology_st.standalone.getEntry(STAGED_USER1_DN, ldap.SCOPE_BASE, "(objectclass=*)") + assert ent.hasAttr(ALLOCATED_ATTR) + assert ensure_str(ent.getValue(ALLOCATED_ATTR)) == str(-1) + topology_st.standalone.log.debug('%s.%s=%s' % (STAGED_USER1_CN, ALLOCATED_ATTR, ensure_str(ent.getValue(ALLOCATED_ATTR)))) + topology_st.standalone.delete_s(STAGED_USER1_DN) + + +def test_ticket47828_run_15(topology_st): + """ + Provisioning/Dummy excluded scope: Add a staged entry and check its ALLOCATED_ATTR is unchanged (!= magic) + """ + _header(topology_st, + 'Provisioning/Dummy excluded scope: Add a staged entry and check its ALLOCATED_ATTR is unchanged (!= magic)') + + topology_st.standalone.add_s( + Entry((STAGED_USER1_DN, {'objectclass': "top person organizationalPerson inetOrgPerson".split(), + 'cn': STAGED_USER1_CN, + 'sn': STAGED_USER1_CN, + ALLOCATED_ATTR: str(20)}))) + ent = topology_st.standalone.getEntry(STAGED_USER1_DN, ldap.SCOPE_BASE, "(objectclass=*)") + assert ent.hasAttr(ALLOCATED_ATTR) + assert ensure_str(ent.getValue(ALLOCATED_ATTR)) == str(20) + topology_st.standalone.log.debug('%s.%s=%s' % (STAGED_USER1_CN, ALLOCATED_ATTR, ensure_str(ent.getValue(ALLOCATED_ATTR)))) + topology_st.standalone.delete_s(STAGED_USER1_DN) + + +def test_ticket47828_run_16(topology_st): + """ + Provisioning/Dummy excluded scope: Add an dummy entry and check its ALLOCATED_ATTR is not set + """ + _header(topology_st, + 'Provisioning/Dummy excluded scope: Add an dummy entry and check its ALLOCATED_ATTR not is set') + + topology_st.standalone.add_s( + Entry((DUMMY_USER1_DN, {'objectclass': "top person organizationalPerson inetOrgPerson".split(), + 'cn': DUMMY_USER1_CN, + 'sn': DUMMY_USER1_CN, + ALLOCATED_ATTR: str(-1)}))) + ent = topology_st.standalone.getEntry(DUMMY_USER1_DN, ldap.SCOPE_BASE, "(objectclass=*)") + assert ent.hasAttr(ALLOCATED_ATTR) + assert ensure_str(ent.getValue(ALLOCATED_ATTR)) == str(-1) + topology_st.standalone.log.debug('%s.%s=%s' % (DUMMY_USER1_CN, ALLOCATED_ATTR, ensure_str(ent.getValue(ALLOCATED_ATTR)))) + topology_st.standalone.delete_s(DUMMY_USER1_DN) + + +def test_ticket47828_run_17(topology_st): + """ + Provisioning/Dummy excluded scope: Add an dummy entry and check its ALLOCATED_ATTR is unchanged (!= magic) + """ + _header(topology_st, + 'Provisioning/Dummy excluded scope: Add an dummy entry and check its ALLOCATED_ATTR is unchanged (!= magic)') + + topology_st.standalone.add_s( + Entry((DUMMY_USER1_DN, {'objectclass': "top person organizationalPerson inetOrgPerson".split(), + 'cn': DUMMY_USER1_CN, + 'sn': DUMMY_USER1_CN, + ALLOCATED_ATTR: str(20)}))) + ent = topology_st.standalone.getEntry(DUMMY_USER1_DN, ldap.SCOPE_BASE, "(objectclass=*)") + assert ent.hasAttr(ALLOCATED_ATTR) + assert ensure_str(ent.getValue(ALLOCATED_ATTR)) == str(20) + topology_st.standalone.log.debug('%s.%s=%s' % (DUMMY_USER1_CN, ALLOCATED_ATTR, ensure_str(ent.getValue(ALLOCATED_ATTR)))) + topology_st.standalone.delete_s(DUMMY_USER1_DN) + + +def test_ticket47828_run_18(topology_st): + ''' + Exclude PROVISIONING and a wrong container + ''' + _header(topology_st, 'Exclude PROVISIONING and a wrong container') + + dn_config = "cn=excluded scope, cn=%s, %s" % (PLUGIN_DNA, DN_PLUGIN) + mod = [(ldap.MOD_REPLACE, 'dnaExcludeScope', ensure_bytes(PROVISIONING))] + topology_st.standalone.modify_s(dn_config, mod) + try: + mod = [(ldap.MOD_ADD, 'dnaExcludeScope', ensure_bytes("invalidDN,%s" % SUFFIX))] + topology_st.standalone.modify_s(dn_config, mod) + raise ValueError("invalid dnaExcludeScope value (not a DN)") + except ldap.INVALID_SYNTAX: + pass + + +def test_ticket47828_run_19(topology_st): + """ + Provisioning+wrong container excluded scope: Add an active entry and check its ALLOCATED_ATTR is set + """ + _header(topology_st, + 'Provisioning+wrong container excluded scope: Add an active entry and check its ALLOCATED_ATTR is set') + + topology_st.standalone.add_s( + Entry((ACTIVE_USER1_DN, {'objectclass': "top person organizationalPerson inetOrgPerson".split(), + 'cn': ACTIVE_USER1_CN, + 'sn': ACTIVE_USER1_CN, + ALLOCATED_ATTR: str(-1)}))) + ent = topology_st.standalone.getEntry(ACTIVE_USER1_DN, ldap.SCOPE_BASE, "(objectclass=*)") + assert ent.hasAttr(ALLOCATED_ATTR) + assert ensure_str(ent.getValue(ALLOCATED_ATTR)) != str(-1) + topology_st.standalone.log.debug('%s.%s=%s' % (ACTIVE_USER1_CN, ALLOCATED_ATTR, ensure_str(ent.getValue(ALLOCATED_ATTR)))) + topology_st.standalone.delete_s(ACTIVE_USER1_DN) + + +def test_ticket47828_run_20(topology_st): + """ + Provisioning+wrong container excluded scope: Add an active entry and check its ALLOCATED_ATTR is unchanged (!= magic) + """ + _header(topology_st, + 'Provisioning+wrong container excluded scope: Add an active entry and check its ALLOCATED_ATTR is unchanged (!= magic)') + + topology_st.standalone.add_s( + Entry((ACTIVE_USER1_DN, {'objectclass': "top person organizationalPerson inetOrgPerson".split(), + 'cn': ACTIVE_USER1_CN, + 'sn': ACTIVE_USER1_CN, + ALLOCATED_ATTR: str(20)}))) + ent = topology_st.standalone.getEntry(ACTIVE_USER1_DN, ldap.SCOPE_BASE, "(objectclass=*)") + assert ent.hasAttr(ALLOCATED_ATTR) + assert ensure_str(ent.getValue(ALLOCATED_ATTR)) == str(20) + topology_st.standalone.log.debug('%s.%s=%s' % (ACTIVE_USER1_CN, ALLOCATED_ATTR, ensure_str(ent.getValue(ALLOCATED_ATTR)))) + topology_st.standalone.delete_s(ACTIVE_USER1_DN) + + +def test_ticket47828_run_21(topology_st): + """ + Provisioning+wrong container excluded scope: Add a staged entry and check its ALLOCATED_ATTR is not set + """ + _header(topology_st, + 'Provisioning+wrong container excluded scope: Add a staged entry and check its ALLOCATED_ATTR is not set') + + topology_st.standalone.add_s( + Entry((STAGED_USER1_DN, {'objectclass': "top person organizationalPerson inetOrgPerson".split(), + 'cn': STAGED_USER1_CN, + 'sn': STAGED_USER1_CN, + ALLOCATED_ATTR: str(-1)}))) + ent = topology_st.standalone.getEntry(STAGED_USER1_DN, ldap.SCOPE_BASE, "(objectclass=*)") + assert ent.hasAttr(ALLOCATED_ATTR) + assert ensure_str(ent.getValue(ALLOCATED_ATTR)) == str(-1) + topology_st.standalone.log.debug('%s.%s=%s' % (STAGED_USER1_CN, ALLOCATED_ATTR, ensure_str(ent.getValue(ALLOCATED_ATTR)))) + topology_st.standalone.delete_s(STAGED_USER1_DN) + + +def test_ticket47828_run_22(topology_st): + """ + Provisioning+wrong container excluded scope: Add a staged entry and check its ALLOCATED_ATTR is unchanged (!= magic) + """ + _header(topology_st, + 'Provisioning+wrong container excluded scope: Add a staged entry and check its ALLOCATED_ATTR is unchanged (!= magic)') + + topology_st.standalone.add_s( + Entry((STAGED_USER1_DN, {'objectclass': "top person organizationalPerson inetOrgPerson".split(), + 'cn': STAGED_USER1_CN, + 'sn': STAGED_USER1_CN, + ALLOCATED_ATTR: str(20)}))) + ent = topology_st.standalone.getEntry(STAGED_USER1_DN, ldap.SCOPE_BASE, "(objectclass=*)") + assert ent.hasAttr(ALLOCATED_ATTR) + assert ensure_str(ent.getValue(ALLOCATED_ATTR)) == str(20) + topology_st.standalone.log.debug('%s.%s=%s' % (STAGED_USER1_CN, ALLOCATED_ATTR, ensure_str(ent.getValue(ALLOCATED_ATTR)))) + topology_st.standalone.delete_s(STAGED_USER1_DN) + + +def test_ticket47828_run_23(topology_st): + """ + Provisioning+wrong container excluded scope: Add an dummy entry and check its ALLOCATED_ATTR is set + """ + _header(topology_st, + 'Provisioning+wrong container excluded scope: Add an dummy entry and check its ALLOCATED_ATTR is set') + + topology_st.standalone.add_s( + Entry((DUMMY_USER1_DN, {'objectclass': "top person organizationalPerson inetOrgPerson".split(), + 'cn': DUMMY_USER1_CN, + 'sn': DUMMY_USER1_CN, + ALLOCATED_ATTR: str(-1)}))) + ent = topology_st.standalone.getEntry(DUMMY_USER1_DN, ldap.SCOPE_BASE, "(objectclass=*)") + assert ent.hasAttr(ALLOCATED_ATTR) + assert ensure_str(ent.getValue(ALLOCATED_ATTR)) != str(-1) + topology_st.standalone.log.debug('%s.%s=%s' % (DUMMY_USER1_CN, ALLOCATED_ATTR, ensure_str(ent.getValue(ALLOCATED_ATTR)))) + topology_st.standalone.delete_s(DUMMY_USER1_DN) + + +def test_ticket47828_run_24(topology_st): + """ + Provisioning+wrong container excluded scope: Add an dummy entry and check its ALLOCATED_ATTR is unchanged (!= magic) + """ + _header(topology_st, + 'Provisioning+wrong container excluded scope: Add an dummy entry and check its ALLOCATED_ATTR is unchanged (!= magic)') + + topology_st.standalone.add_s( + Entry((DUMMY_USER1_DN, {'objectclass': "top person organizationalPerson inetOrgPerson".split(), + 'cn': DUMMY_USER1_CN, + 'sn': DUMMY_USER1_CN, + ALLOCATED_ATTR: str(20)}))) + ent = topology_st.standalone.getEntry(DUMMY_USER1_DN, ldap.SCOPE_BASE, "(objectclass=*)") + assert ent.hasAttr(ALLOCATED_ATTR) + assert ensure_str(ent.getValue(ALLOCATED_ATTR)) == str(20) + topology_st.standalone.log.debug('%s.%s=%s' % (DUMMY_USER1_CN, ALLOCATED_ATTR, ensure_str(ent.getValue(ALLOCATED_ATTR)))) + topology_st.standalone.delete_s(DUMMY_USER1_DN) + + +def test_ticket47828_run_25(topology_st): + ''' + Exclude a wrong container + ''' + _header(topology_st, 'Exclude a wrong container') + + dn_config = "cn=excluded scope, cn=%s, %s" % (PLUGIN_DNA, DN_PLUGIN) + + try: + mod = [(ldap.MOD_REPLACE, 'dnaExcludeScope', ensure_bytes("invalidDN,%s" % SUFFIX))] + topology_st.standalone.modify_s(dn_config, mod) + raise ValueError("invalid dnaExcludeScope value (not a DN)") + except ldap.INVALID_SYNTAX: + pass + + +def test_ticket47828_run_26(topology_st): + """ + Wrong container excluded scope: Add an active entry and check its ALLOCATED_ATTR is set + """ + _header(topology_st, 'Wrong container excluded scope: Add an active entry and check its ALLOCATED_ATTR is set') + + topology_st.standalone.add_s( + Entry((ACTIVE_USER1_DN, {'objectclass': "top person organizationalPerson inetOrgPerson".split(), + 'cn': ACTIVE_USER1_CN, + 'sn': ACTIVE_USER1_CN, + ALLOCATED_ATTR: str(-1)}))) + ent = topology_st.standalone.getEntry(ACTIVE_USER1_DN, ldap.SCOPE_BASE, "(objectclass=*)") + assert ent.hasAttr(ALLOCATED_ATTR) + assert ensure_str(ent.getValue(ALLOCATED_ATTR)) != str(-1) + topology_st.standalone.log.debug('%s.%s=%s' % (ACTIVE_USER1_CN, ALLOCATED_ATTR, ensure_str(ent.getValue(ALLOCATED_ATTR)))) + topology_st.standalone.delete_s(ACTIVE_USER1_DN) + + +def test_ticket47828_run_27(topology_st): + """ + Wrong container excluded scope: Add an active entry and check its ALLOCATED_ATTR is unchanged (!= magic) + """ + _header(topology_st, + 'Wrong container excluded scope: Add an active entry and check its ALLOCATED_ATTR is unchanged (!= magic)') + + topology_st.standalone.add_s( + Entry((ACTIVE_USER1_DN, {'objectclass': "top person organizationalPerson inetOrgPerson".split(), + 'cn': ACTIVE_USER1_CN, + 'sn': ACTIVE_USER1_CN, + ALLOCATED_ATTR: str(20)}))) + ent = topology_st.standalone.getEntry(ACTIVE_USER1_DN, ldap.SCOPE_BASE, "(objectclass=*)") + assert ent.hasAttr(ALLOCATED_ATTR) + assert ensure_str(ent.getValue(ALLOCATED_ATTR)) == str(20) + topology_st.standalone.log.debug('%s.%s=%s' % (ACTIVE_USER1_CN, ALLOCATED_ATTR, ensure_str(ent.getValue(ALLOCATED_ATTR)))) + topology_st.standalone.delete_s(ACTIVE_USER1_DN) + + +def test_ticket47828_run_28(topology_st): + """ + Wrong container excluded scope: Add a staged entry and check its ALLOCATED_ATTR is not set + """ + _header(topology_st, 'Wrong container excluded scope: Add a staged entry and check its ALLOCATED_ATTR is not set') + + topology_st.standalone.add_s( + Entry((STAGED_USER1_DN, {'objectclass': "top person organizationalPerson inetOrgPerson".split(), + 'cn': STAGED_USER1_CN, + 'sn': STAGED_USER1_CN, + ALLOCATED_ATTR: str(-1)}))) + ent = topology_st.standalone.getEntry(STAGED_USER1_DN, ldap.SCOPE_BASE, "(objectclass=*)") + assert ent.hasAttr(ALLOCATED_ATTR) + assert ensure_str(ent.getValue(ALLOCATED_ATTR)) == str(-1) + topology_st.standalone.log.debug('%s.%s=%s' % (STAGED_USER1_CN, ALLOCATED_ATTR, ensure_str(ent.getValue(ALLOCATED_ATTR)))) + topology_st.standalone.delete_s(STAGED_USER1_DN) + + +def test_ticket47828_run_29(topology_st): + """ + Wrong container excluded scope: Add a staged entry and check its ALLOCATED_ATTR is unchanged (!= magic) + """ + _header(topology_st, + 'Wrong container excluded scope: Add a staged entry and check its ALLOCATED_ATTR is unchanged (!= magic)') + + topology_st.standalone.add_s( + Entry((STAGED_USER1_DN, {'objectclass': "top person organizationalPerson inetOrgPerson".split(), + 'cn': STAGED_USER1_CN, + 'sn': STAGED_USER1_CN, + ALLOCATED_ATTR: str(20)}))) + ent = topology_st.standalone.getEntry(STAGED_USER1_DN, ldap.SCOPE_BASE, "(objectclass=*)") + assert ent.hasAttr(ALLOCATED_ATTR) + assert ensure_str(ent.getValue(ALLOCATED_ATTR)) == str(20) + topology_st.standalone.log.debug('%s.%s=%s' % (STAGED_USER1_CN, ALLOCATED_ATTR, ensure_str(ent.getValue(ALLOCATED_ATTR)))) + topology_st.standalone.delete_s(STAGED_USER1_DN) + + +def test_ticket47828_run_30(topology_st): + """ + Wrong container excluded scope: Add an dummy entry and check its ALLOCATED_ATTR is set + """ + _header(topology_st, 'Wrong container excluded scope: Add an dummy entry and check its ALLOCATED_ATTR is set') + + topology_st.standalone.add_s( + Entry((DUMMY_USER1_DN, {'objectclass': "top person organizationalPerson inetOrgPerson".split(), + 'cn': DUMMY_USER1_CN, + 'sn': DUMMY_USER1_CN, + ALLOCATED_ATTR: str(-1)}))) + ent = topology_st.standalone.getEntry(DUMMY_USER1_DN, ldap.SCOPE_BASE, "(objectclass=*)") + assert ent.hasAttr(ALLOCATED_ATTR) + assert ensure_str(ent.getValue(ALLOCATED_ATTR)) != str(-1) + topology_st.standalone.log.debug('%s.%s=%s' % (DUMMY_USER1_CN, ALLOCATED_ATTR, ensure_str(ent.getValue(ALLOCATED_ATTR)))) + topology_st.standalone.delete_s(DUMMY_USER1_DN) + + +def test_ticket47828_run_31(topology_st): + """ + Wrong container excluded scope: Add an dummy entry and check its ALLOCATED_ATTR is unchanged (!= magic) + """ + _header(topology_st, + 'Wrong container excluded scope: Add an dummy entry and check its ALLOCATED_ATTR is unchanged (!= magic)') + + topology_st.standalone.add_s( + Entry((DUMMY_USER1_DN, {'objectclass': "top person organizationalPerson inetOrgPerson".split(), + 'cn': DUMMY_USER1_CN, + 'sn': DUMMY_USER1_CN, + ALLOCATED_ATTR: str(20)}))) + ent = topology_st.standalone.getEntry(DUMMY_USER1_DN, ldap.SCOPE_BASE, "(objectclass=*)") + assert ent.hasAttr(ALLOCATED_ATTR) + assert ensure_str(ent.getValue(ALLOCATED_ATTR)) == str(20) + topology_st.standalone.log.debug('%s.%s=%s' % (DUMMY_USER1_CN, ALLOCATED_ATTR, ensure_str(ent.getValue(ALLOCATED_ATTR)))) + topology_st.standalone.delete_s(DUMMY_USER1_DN) + + +if __name__ == '__main__': + # Run isolated + # -s for DEBUG mode + CURRENT_FILE = os.path.realpath(__file__) + pytest.main("-s %s" % CURRENT_FILE) diff --git a/dirsrvtests/tests/tickets/ticket47829_test.py b/dirsrvtests/tests/tickets/ticket47829_test.py new file mode 100644 index 0000000..64aee67 --- /dev/null +++ b/dirsrvtests/tests/tickets/ticket47829_test.py @@ -0,0 +1,629 @@ +# --- BEGIN COPYRIGHT BLOCK --- +# Copyright (C) 2016 Red Hat, Inc. +# All rights reserved. +# +# License: GPL (version 3 or any later version). +# See LICENSE for details. +# --- END COPYRIGHT BLOCK --- +# +import logging +import time + +import ldap +import pytest +from lib389 import Entry +from lib389._constants import * +from lib389.topologies import topology_st +from lib389.utils import * + +# Skip on older versions +pytestmark = [pytest.mark.tier2, + pytest.mark.skipif(ds_is_older('1.3.4'), reason="Not implemented")] +SCOPE_IN_CN = 'in' +SCOPE_OUT_CN = 'out' +SCOPE_IN_DN = 'cn=%s,%s' % (SCOPE_IN_CN, SUFFIX) +SCOPE_OUT_DN = 'cn=%s,%s' % (SCOPE_OUT_CN, SUFFIX) + +PROVISIONING_CN = "provisioning" +PROVISIONING_DN = "cn=%s,%s" % (PROVISIONING_CN, SCOPE_IN_DN) + +ACTIVE_CN = "accounts" +STAGE_CN = "staged users" +DELETE_CN = "deleted users" +ACTIVE_DN = "cn=%s,%s" % (ACTIVE_CN, SCOPE_IN_DN) +STAGE_DN = "cn=%s,%s" % (STAGE_CN, PROVISIONING_DN) +DELETE_DN = "cn=%s,%s" % (DELETE_CN, PROVISIONING_DN) + +STAGE_USER_CN = "stage guy" +STAGE_USER_DN = "cn=%s,%s" % (STAGE_USER_CN, STAGE_DN) + +ACTIVE_USER_CN = "active guy" +ACTIVE_USER_DN = "cn=%s,%s" % (ACTIVE_USER_CN, ACTIVE_DN) + +OUT_USER_CN = "out guy" +OUT_USER_DN = "cn=%s,%s" % (OUT_USER_CN, SCOPE_OUT_DN) + +STAGE_GROUP_CN = "stage group" +STAGE_GROUP_DN = "cn=%s,%s" % (STAGE_GROUP_CN, STAGE_DN) + +ACTIVE_GROUP_CN = "active group" +ACTIVE_GROUP_DN = "cn=%s,%s" % (ACTIVE_GROUP_CN, ACTIVE_DN) + +OUT_GROUP_CN = "out group" +OUT_GROUP_DN = "cn=%s,%s" % (OUT_GROUP_CN, SCOPE_OUT_DN) + +INDIRECT_ACTIVE_GROUP_CN = "indirect active group" +INDIRECT_ACTIVE_GROUP_DN = "cn=%s,%s" % (INDIRECT_ACTIVE_GROUP_CN, ACTIVE_DN) + +log = logging.getLogger(__name__) + + +def _header(topology_st, label): + topology_st.standalone.log.info("\n\n###############################################") + topology_st.standalone.log.info("#######") + topology_st.standalone.log.info("####### %s" % label) + topology_st.standalone.log.info("#######") + topology_st.standalone.log.info("###############################################") + + +def _add_user(topology_st, type='active'): + if type == 'active': + topology_st.standalone.add_s(Entry((ACTIVE_USER_DN, { + 'objectclass': "top person inetuser".split(), + 'sn': ACTIVE_USER_CN, + 'cn': ACTIVE_USER_CN}))) + elif type == 'stage': + topology_st.standalone.add_s(Entry((STAGE_USER_DN, { + 'objectclass': "top person inetuser".split(), + 'sn': STAGE_USER_CN, + 'cn': STAGE_USER_CN}))) + else: + topology_st.standalone.add_s(Entry((OUT_USER_DN, { + 'objectclass': "top person inetuser".split(), + 'sn': OUT_USER_CN, + 'cn': OUT_USER_CN}))) + + +def _find_memberof(topology_st, user_dn=None, group_dn=None, find_result=True): + assert (topology_st) + assert (user_dn) + assert (group_dn) + ent = topology_st.standalone.getEntry(user_dn, ldap.SCOPE_BASE, "(objectclass=*)", ['memberof']) + found = False + if ent.hasAttr('memberof'): + + for val in ent.getValues('memberof'): + topology_st.standalone.log.info("!!!!!!! %s: memberof->%s" % (user_dn, val)) + if ensure_str(val) == group_dn: + found = True + break + + if find_result: + assert (found) + else: + assert (not found) + + +def _find_member(topology_st, user_dn=None, group_dn=None, find_result=True): + assert (topology_st) + assert (user_dn) + assert (group_dn) + ent = topology_st.standalone.getEntry(group_dn, ldap.SCOPE_BASE, "(objectclass=*)", ['member']) + found = False + if ent.hasAttr('member'): + + for val in ent.getValues('member'): + topology_st.standalone.log.info("!!!!!!! %s: member ->%s" % (group_dn, val)) + if ensure_str(val) == user_dn: + found = True + break + + if find_result: + assert (found) + else: + assert (not found) + + +def _modrdn_entry(topology_st=None, entry_dn=None, new_rdn=None, del_old=0, new_superior=None): + assert topology_st is not None + assert entry_dn is not None + assert new_rdn is not None + + topology_st.standalone.log.info("\n\n######################### MODRDN %s ######################\n" % new_rdn) + try: + if new_superior: + topology_st.standalone.rename_s(entry_dn, new_rdn, newsuperior=new_superior, delold=del_old) + else: + topology_st.standalone.rename_s(entry_dn, new_rdn, delold=del_old) + except ldap.NO_SUCH_ATTRIBUTE: + topology_st.standalone.log.info("accepted failure due to 47833: modrdn reports error.. but succeeds") + attempt = 0 + if new_superior: + dn = "%s,%s" % (new_rdn, new_superior) + base = new_superior + else: + base = ','.join(entry_dn.split(",")[1:]) + dn = "%s, %s" % (new_rdn, base) + myfilter = entry_dn.split(',')[0] + + while attempt < 10: + try: + ent = topology_st.standalone.getEntry(dn, ldap.SCOPE_BASE, myfilter) + break + except ldap.NO_SUCH_OBJECT: + topology_st.standalone.log.info("Accept failure due to 47833: unable to find (base) a modrdn entry") + attempt += 1 + time.sleep(1) + if attempt == 10: + ent = topology_st.standalone.getEntry(base, ldap.SCOPE_SUBTREE, myfilter) + ent = topology_st.standalone.getEntry(dn, ldap.SCOPE_BASE, myfilter) + + +def _check_memberof(topology_st=None, action=None, user_dn=None, group_dn=None, find_result=None): + assert (topology_st) + assert (user_dn) + assert (group_dn) + if action == ldap.MOD_ADD: + txt = 'add' + elif action == ldap.MOD_DELETE: + txt = 'delete' + else: + txt = 'replace' + topology_st.standalone.log.info('\n%s entry %s' % (txt, user_dn)) + topology_st.standalone.log.info('to group %s' % group_dn) + + topology_st.standalone.modify_s(group_dn, [(action, 'member', ensure_bytes(user_dn))]) + time.sleep(1) + _find_memberof(topology_st, user_dn=user_dn, group_dn=group_dn, find_result=find_result) + + +def test_ticket47829_init(topology_st): + topology_st.standalone.add_s(Entry((SCOPE_IN_DN, { + 'objectclass': "top nscontainer".split(), + 'cn': SCOPE_IN_DN}))) + topology_st.standalone.add_s(Entry((SCOPE_OUT_DN, { + 'objectclass': "top nscontainer".split(), + 'cn': SCOPE_OUT_DN}))) + topology_st.standalone.add_s(Entry((PROVISIONING_DN, { + 'objectclass': "top nscontainer".split(), + 'cn': PROVISIONING_CN}))) + topology_st.standalone.add_s(Entry((ACTIVE_DN, { + 'objectclass': "top nscontainer".split(), + 'cn': ACTIVE_CN}))) + topology_st.standalone.add_s(Entry((STAGE_DN, { + 'objectclass': "top nscontainer".split(), + 'cn': STAGE_DN}))) + topology_st.standalone.add_s(Entry((DELETE_DN, { + 'objectclass': "top nscontainer".split(), + 'cn': DELETE_CN}))) + + # add groups + topology_st.standalone.add_s(Entry((ACTIVE_GROUP_DN, { + 'objectclass': "top groupOfNames inetuser".split(), + 'cn': ACTIVE_GROUP_CN}))) + topology_st.standalone.add_s(Entry((STAGE_GROUP_DN, { + 'objectclass': "top groupOfNames inetuser".split(), + 'cn': STAGE_GROUP_CN}))) + topology_st.standalone.add_s(Entry((OUT_GROUP_DN, { + 'objectclass': "top groupOfNames inetuser".split(), + 'cn': OUT_GROUP_CN}))) + topology_st.standalone.add_s(Entry((INDIRECT_ACTIVE_GROUP_DN, { + 'objectclass': "top groupOfNames".split(), + 'cn': INDIRECT_ACTIVE_GROUP_CN}))) + + # add users + _add_user(topology_st, 'active') + _add_user(topology_st, 'stage') + _add_user(topology_st, 'out') + + # enable memberof of with scope IN except provisioning + topology_st.standalone.plugins.enable(name=PLUGIN_MEMBER_OF) + dn = "cn=%s,%s" % (PLUGIN_MEMBER_OF, DN_PLUGIN) + topology_st.standalone.modify_s(dn, [(ldap.MOD_REPLACE, 'memberOfEntryScope', ensure_bytes(SCOPE_IN_DN))]) + topology_st.standalone.modify_s(dn, [(ldap.MOD_REPLACE, 'memberOfEntryScopeExcludeSubtree', ensure_bytes(PROVISIONING_DN))]) + + # enable RI with scope IN except provisioning + topology_st.standalone.plugins.enable(name=PLUGIN_REFER_INTEGRITY) + dn = "cn=%s,%s" % (PLUGIN_REFER_INTEGRITY, DN_PLUGIN) + topology_st.standalone.modify_s(dn, [(ldap.MOD_REPLACE, 'nsslapd-pluginentryscope', ensure_bytes(SCOPE_IN_DN))]) + topology_st.standalone.modify_s(dn, [(ldap.MOD_REPLACE, 'nsslapd-plugincontainerscope', ensure_bytes(SCOPE_IN_DN))]) + topology_st.standalone.modify_s(dn, [(ldap.MOD_REPLACE, 'nsslapd-pluginExcludeEntryScope', ensure_bytes(PROVISIONING_DN))]) + + topology_st.standalone.restart(timeout=10) + + +def test_ticket47829_mod_active_user_1(topology_st): + _header(topology_st, 'MOD: add an active user to an active group') + + # add active user to active group + _check_memberof(topology_st, action=ldap.MOD_ADD, user_dn=ACTIVE_USER_DN, group_dn=ACTIVE_GROUP_DN, + find_result=True) + _find_member(topology_st, user_dn=ACTIVE_USER_DN, group_dn=ACTIVE_GROUP_DN, find_result=True) + + # remove active user to active group + _check_memberof(topology_st, action=ldap.MOD_DELETE, user_dn=ACTIVE_USER_DN, group_dn=ACTIVE_GROUP_DN, + find_result=False) + + +def test_ticket47829_mod_active_user_2(topology_st): + _header(topology_st, 'MOD: add an Active user to a Stage group') + + # add active user to stage group + _check_memberof(topology_st, action=ldap.MOD_ADD, user_dn=ACTIVE_USER_DN, group_dn=STAGE_GROUP_DN, + find_result=False) + _find_member(topology_st, user_dn=ACTIVE_USER_DN, group_dn=STAGE_GROUP_DN, find_result=True) + + # remove active user to stage group + _check_memberof(topology_st, action=ldap.MOD_DELETE, user_dn=ACTIVE_USER_DN, group_dn=STAGE_GROUP_DN, + find_result=False) + + +def test_ticket47829_mod_active_user_3(topology_st): + _header(topology_st, 'MOD: add an Active user to a out of scope group') + + # add active user to out of scope group + _check_memberof(topology_st, action=ldap.MOD_ADD, user_dn=ACTIVE_USER_DN, group_dn=OUT_GROUP_DN, find_result=False) + _find_member(topology_st, user_dn=ACTIVE_USER_DN, group_dn=OUT_GROUP_DN, find_result=True) + + # remove active user to out of scope group + _check_memberof(topology_st, action=ldap.MOD_DELETE, user_dn=ACTIVE_USER_DN, group_dn=OUT_GROUP_DN, + find_result=False) + + +def test_ticket47829_mod_stage_user_1(topology_st): + _header(topology_st, 'MOD: add an Stage user to a Active group') + + # add stage user to active group + _check_memberof(topology_st, action=ldap.MOD_ADD, user_dn=STAGE_USER_DN, group_dn=ACTIVE_GROUP_DN, + find_result=False) + _find_member(topology_st, user_dn=STAGE_USER_DN, group_dn=ACTIVE_GROUP_DN, find_result=True) + + # remove stage user to active group + _check_memberof(topology_st, action=ldap.MOD_DELETE, user_dn=STAGE_USER_DN, group_dn=ACTIVE_GROUP_DN, + find_result=False) + + +def test_ticket47829_mod_stage_user_2(topology_st): + _header(topology_st, 'MOD: add an Stage user to a Stage group') + + # add stage user to stage group + _check_memberof(topology_st, action=ldap.MOD_ADD, user_dn=STAGE_USER_DN, group_dn=STAGE_GROUP_DN, find_result=False) + _find_member(topology_st, user_dn=STAGE_USER_DN, group_dn=STAGE_GROUP_DN, find_result=True) + + # remove stage user to stage group + _check_memberof(topology_st, action=ldap.MOD_DELETE, user_dn=STAGE_USER_DN, group_dn=STAGE_GROUP_DN, + find_result=False) + + +def test_ticket47829_mod_stage_user_3(topology_st): + _header(topology_st, 'MOD: add an Stage user to a out of scope group') + + # add stage user to an out of scope group + _check_memberof(topology_st, action=ldap.MOD_ADD, user_dn=STAGE_USER_DN, group_dn=OUT_GROUP_DN, find_result=False) + _find_member(topology_st, user_dn=STAGE_USER_DN, group_dn=OUT_GROUP_DN, find_result=True) + + # remove stage user to out of scope group + _check_memberof(topology_st, action=ldap.MOD_DELETE, user_dn=STAGE_USER_DN, group_dn=OUT_GROUP_DN, + find_result=False) + + +def test_ticket47829_mod_out_user_1(topology_st): + _header(topology_st, 'MOD: add an out of scope user to an active group') + + # add out of scope user to active group + _check_memberof(topology_st, action=ldap.MOD_ADD, user_dn=OUT_USER_DN, group_dn=ACTIVE_GROUP_DN, find_result=False) + _find_member(topology_st, user_dn=OUT_USER_DN, group_dn=ACTIVE_GROUP_DN, find_result=True) + + # remove out of scope user to active group + _check_memberof(topology_st, action=ldap.MOD_DELETE, user_dn=OUT_USER_DN, group_dn=ACTIVE_GROUP_DN, + find_result=False) + + +def test_ticket47829_mod_out_user_2(topology_st): + _header(topology_st, 'MOD: add an out of scope user to a Stage group') + + # add out of scope user to stage group + _check_memberof(topology_st, action=ldap.MOD_ADD, user_dn=OUT_USER_DN, group_dn=STAGE_GROUP_DN, find_result=False) + _find_member(topology_st, user_dn=OUT_USER_DN, group_dn=STAGE_GROUP_DN, find_result=True) + + # remove out of scope user to stage group + _check_memberof(topology_st, action=ldap.MOD_DELETE, user_dn=OUT_USER_DN, group_dn=STAGE_GROUP_DN, + find_result=False) + + +def test_ticket47829_mod_out_user_3(topology_st): + _header(topology_st, 'MOD: add an out of scope user to an out of scope group') + + # add out of scope user to stage group + _check_memberof(topology_st, action=ldap.MOD_ADD, user_dn=OUT_USER_DN, group_dn=OUT_GROUP_DN, find_result=False) + _find_member(topology_st, user_dn=OUT_USER_DN, group_dn=OUT_GROUP_DN, find_result=True) + + # remove out of scope user to stage group + _check_memberof(topology_st, action=ldap.MOD_DELETE, user_dn=OUT_USER_DN, group_dn=OUT_GROUP_DN, find_result=False) + + +def test_ticket47829_mod_active_user_modrdn_active_user_1(topology_st): + _header(topology_st, 'add an Active user to a Active group. Then move Active user to Active') + + # add Active user to active group + _check_memberof(topology_st, action=ldap.MOD_ADD, user_dn=ACTIVE_USER_DN, group_dn=ACTIVE_GROUP_DN, + find_result=True) + _find_member(topology_st, user_dn=ACTIVE_USER_DN, group_dn=ACTIVE_GROUP_DN, find_result=True) + + # move the Active entry to active, expect 'member' and 'memberof' + _modrdn_entry(topology_st, entry_dn=ACTIVE_USER_DN, new_rdn="cn=x%s" % ACTIVE_USER_CN, new_superior=ACTIVE_DN) + _find_memberof(topology_st, user_dn="cn=x%s,%s" % (ACTIVE_USER_CN, ACTIVE_DN), group_dn=ACTIVE_GROUP_DN, + find_result=True) + _find_member(topology_st, user_dn="cn=x%s,%s" % (ACTIVE_USER_CN, ACTIVE_DN), group_dn=ACTIVE_GROUP_DN, + find_result=True) + + # move the Active entry to active, expect 'member' and no 'memberof' + _modrdn_entry(topology_st, entry_dn="cn=x%s,%s" % (ACTIVE_USER_CN, ACTIVE_DN), new_rdn="cn=%s" % ACTIVE_USER_CN, + new_superior=ACTIVE_DN) + _find_memberof(topology_st, user_dn="cn=%s,%s" % (ACTIVE_USER_CN, ACTIVE_DN), group_dn=ACTIVE_GROUP_DN, + find_result=True) + _find_member(topology_st, user_dn="cn=%s,%s" % (ACTIVE_USER_CN, ACTIVE_DN), group_dn=ACTIVE_GROUP_DN, + find_result=True) + + # remove active user to active group + _check_memberof(topology_st, action=ldap.MOD_DELETE, user_dn=ACTIVE_USER_DN, group_dn=ACTIVE_GROUP_DN, + find_result=False) + + +def test_ticket47829_mod_active_user_modrdn_stage_user_1(topology_st): + _header(topology_st, 'add an Active user to a Active group. Then move Active user to Stage') + + # add Active user to active group + _check_memberof(topology_st, action=ldap.MOD_ADD, user_dn=ACTIVE_USER_DN, group_dn=ACTIVE_GROUP_DN, + find_result=True) + _find_member(topology_st, user_dn=ACTIVE_USER_DN, group_dn=ACTIVE_GROUP_DN, find_result=True) + + # move the Active entry to stage, expect no 'member' and 'memberof' + _modrdn_entry(topology_st, entry_dn=ACTIVE_USER_DN, new_rdn="cn=%s" % ACTIVE_USER_CN, new_superior=STAGE_DN) + _find_memberof(topology_st, user_dn="cn=%s,%s" % (ACTIVE_USER_CN, STAGE_DN), group_dn=ACTIVE_GROUP_DN, + find_result=False) + _find_member(topology_st, user_dn="cn=%s,%s" % (ACTIVE_USER_CN, STAGE_DN), group_dn=ACTIVE_GROUP_DN, + find_result=False) + + # move the Active entry to Stage, expect 'member' and no 'memberof' + _modrdn_entry(topology_st, entry_dn="cn=%s,%s" % (ACTIVE_USER_CN, STAGE_DN), new_rdn="cn=%s" % ACTIVE_USER_CN, + new_superior=ACTIVE_DN) + _find_memberof(topology_st, user_dn="cn=%s,%s" % (ACTIVE_USER_CN, ACTIVE_DN), group_dn=ACTIVE_GROUP_DN, + find_result=False) + _find_member(topology_st, user_dn="cn=%s,%s" % (ACTIVE_USER_CN, ACTIVE_DN), group_dn=ACTIVE_GROUP_DN, + find_result=False) + + +def test_ticket47829_mod_active_user_modrdn_out_user_1(topology_st): + _header(topology_st, 'add an Active user to a Active group. Then move Active user to out of scope') + + # add Active user to active group + _check_memberof(topology_st, action=ldap.MOD_ADD, user_dn=ACTIVE_USER_DN, group_dn=ACTIVE_GROUP_DN, + find_result=True) + _find_member(topology_st, user_dn=ACTIVE_USER_DN, group_dn=ACTIVE_GROUP_DN, find_result=True) + + # move the Active entry to out of scope, expect no 'member' and no 'memberof' + _modrdn_entry(topology_st, entry_dn=ACTIVE_USER_DN, new_rdn="cn=%s" % ACTIVE_USER_CN, new_superior=OUT_GROUP_DN) + _find_memberof(topology_st, user_dn="cn=%s,%s" % (ACTIVE_USER_CN, OUT_GROUP_DN), group_dn=ACTIVE_GROUP_DN, + find_result=False) + _find_member(topology_st, user_dn="cn=%s,%s" % (ACTIVE_USER_CN, OUT_GROUP_DN), group_dn=ACTIVE_GROUP_DN, + find_result=False) + + # move the Active entry to out of scope, expect no 'member' and no 'memberof' + _modrdn_entry(topology_st, entry_dn="cn=%s,%s" % (ACTIVE_USER_CN, OUT_GROUP_DN), new_rdn="cn=%s" % ACTIVE_USER_CN, + new_superior=ACTIVE_DN) + _find_memberof(topology_st, user_dn="cn=%s,%s" % (ACTIVE_USER_CN, ACTIVE_DN), group_dn=ACTIVE_GROUP_DN, + find_result=False) + _find_member(topology_st, user_dn="cn=%s,%s" % (ACTIVE_USER_CN, ACTIVE_DN), group_dn=ACTIVE_GROUP_DN, + find_result=False) + + +def test_ticket47829_mod_modrdn_1(topology_st): + _header(topology_st, 'add an Stage user to a Active group. Then move Stage user to Active') + + # add Stage user to active group + _check_memberof(topology_st, action=ldap.MOD_ADD, user_dn=STAGE_USER_DN, group_dn=ACTIVE_GROUP_DN, + find_result=False) + _find_member(topology_st, user_dn=STAGE_USER_DN, group_dn=ACTIVE_GROUP_DN, find_result=True) + + # move the Stage entry to active, expect 'member' and 'memberof' + _modrdn_entry(topology_st, entry_dn=STAGE_USER_DN, new_rdn="cn=%s" % STAGE_USER_CN, new_superior=ACTIVE_DN) + _find_memberof(topology_st, user_dn="cn=%s,%s" % (STAGE_USER_CN, ACTIVE_DN), group_dn=ACTIVE_GROUP_DN, + find_result=True) + _find_member(topology_st, user_dn="cn=%s,%s" % (STAGE_USER_CN, ACTIVE_DN), group_dn=ACTIVE_GROUP_DN, + find_result=True) + + # move the Active entry to Stage, expect no 'member' and no 'memberof' + _modrdn_entry(topology_st, entry_dn="cn=%s,%s" % (STAGE_USER_CN, ACTIVE_DN), new_rdn="cn=%s" % STAGE_USER_CN, + new_superior=STAGE_DN) + _find_memberof(topology_st, user_dn="cn=%s,%s" % (STAGE_USER_CN, STAGE_DN), group_dn=ACTIVE_GROUP_DN, + find_result=False) + _find_member(topology_st, user_dn="cn=%s,%s" % (STAGE_USER_CN, ACTIVE_DN), group_dn=ACTIVE_GROUP_DN, + find_result=False) + + +def test_ticket47829_mod_stage_user_modrdn_active_user_1(topology_st): + _header(topology_st, 'add an Stage user to a Active group. Then move Stage user to Active') + + stage_user_dn = STAGE_USER_DN + stage_user_rdn = "cn=%s" % STAGE_USER_CN + active_user_dn = "cn=%s,%s" % (STAGE_USER_CN, ACTIVE_DN) + + # add Stage user to active group + _check_memberof(topology_st, action=ldap.MOD_ADD, user_dn=stage_user_dn, group_dn=ACTIVE_GROUP_DN, + find_result=False) + _find_member(topology_st, user_dn=stage_user_dn, group_dn=ACTIVE_GROUP_DN, find_result=True) + + # move the Stage entry to Actve, expect 'member' and 'memberof' + _modrdn_entry(topology_st, entry_dn=stage_user_dn, new_rdn=stage_user_rdn, new_superior=ACTIVE_DN) + _find_memberof(topology_st, user_dn=active_user_dn, group_dn=ACTIVE_GROUP_DN, find_result=True) + _find_member(topology_st, user_dn=active_user_dn, group_dn=ACTIVE_GROUP_DN, find_result=True) + + # move the Active entry to Stage, expect no 'member' and no 'memberof' + _modrdn_entry(topology_st, entry_dn=active_user_dn, new_rdn=stage_user_rdn, new_superior=STAGE_DN) + _find_memberof(topology_st, user_dn=stage_user_dn, group_dn=ACTIVE_GROUP_DN, find_result=False) + _find_member(topology_st, user_dn=stage_user_dn, group_dn=ACTIVE_GROUP_DN, find_result=False) + + +def test_ticket47829_mod_stage_user_modrdn_stage_user_1(topology_st): + _header(topology_st, 'add an Stage user to a Active group. Then move Stage user to Stage') + + _header(topology_st, 'Return because it requires a fix for 47833') + return + + old_stage_user_dn = STAGE_USER_DN + old_stage_user_rdn = "cn=%s" % STAGE_USER_CN + new_stage_user_rdn = "cn=x%s" % STAGE_USER_CN + new_stage_user_dn = "%s,%s" % (new_stage_user_rdn, STAGE_DN) + + # add Stage user to active group + _check_memberof(topology_st, action=ldap.MOD_ADD, user_dn=old_stage_user_dn, group_dn=ACTIVE_GROUP_DN, + find_result=False) + _find_member(topology_st, user_dn=old_stage_user_dn, group_dn=ACTIVE_GROUP_DN, find_result=True) + + # move the Stage entry to Stage, expect no 'member' and 'memberof' + _modrdn_entry(topology_st, entry_dn=old_stage_user_dn, new_rdn=new_stage_user_rdn, new_superior=STAGE_DN) + _find_memberof(topology_st, user_dn=new_stage_user_dn, group_dn=ACTIVE_GROUP_DN, find_result=False) + _find_member(topology_st, user_dn=new_stage_user_dn, group_dn=ACTIVE_GROUP_DN, find_result=False) + + # move the Stage entry to Stage, expect no 'member' and no 'memberof' + _modrdn_entry(topology_st, entry_dn=new_stage_user_dn, new_rdn=old_stage_user_rdn, new_superior=STAGE_DN) + _find_memberof(topology_st, user_dn=old_stage_user_dn, group_dn=ACTIVE_GROUP_DN, find_result=False) + _find_member(topology_st, user_dn=old_stage_user_dn, group_dn=ACTIVE_GROUP_DN, find_result=False) + + +def test_ticket47829_indirect_active_group_1(topology_st): + _header(topology_st, 'add an Active group (G1) to an active group (G0). Then add active user to G1') + + topology_st.standalone.modify_s(INDIRECT_ACTIVE_GROUP_DN, [(ldap.MOD_ADD, 'member', ensure_bytes(ACTIVE_GROUP_DN))]) + + # add an active user to G1. Checks that user is memberof G1 + _check_memberof(topology_st, action=ldap.MOD_ADD, user_dn=ACTIVE_USER_DN, group_dn=ACTIVE_GROUP_DN, + find_result=True) + _find_memberof(topology_st, user_dn=ACTIVE_USER_DN, group_dn=INDIRECT_ACTIVE_GROUP_DN, find_result=True) + + # remove G1 from G0 + topology_st.standalone.modify_s(INDIRECT_ACTIVE_GROUP_DN, [(ldap.MOD_DELETE, 'member', ensure_bytes(ACTIVE_GROUP_DN))]) + _find_memberof(topology_st, user_dn=ACTIVE_USER_DN, group_dn=INDIRECT_ACTIVE_GROUP_DN, find_result=False) + _find_memberof(topology_st, user_dn=ACTIVE_USER_DN, group_dn=ACTIVE_GROUP_DN, find_result=True) + + # remove active user from G1 + _check_memberof(topology_st, action=ldap.MOD_DELETE, user_dn=ACTIVE_USER_DN, group_dn=ACTIVE_GROUP_DN, + find_result=False) + + +def test_ticket47829_indirect_active_group_2(topology_st): + _header(topology_st, + 'add an Active group (G1) to an active group (G0). Then add active user to G1. Then move active user to stage') + + topology_st.standalone.modify_s(INDIRECT_ACTIVE_GROUP_DN, [(ldap.MOD_ADD, 'member', ensure_bytes(ACTIVE_GROUP_DN))]) + + # add an active user to G1. Checks that user is memberof G1 + _check_memberof(topology_st, action=ldap.MOD_ADD, user_dn=ACTIVE_USER_DN, group_dn=ACTIVE_GROUP_DN, + find_result=True) + _find_memberof(topology_st, user_dn=ACTIVE_USER_DN, group_dn=INDIRECT_ACTIVE_GROUP_DN, find_result=True) + + # remove G1 from G0 + topology_st.standalone.modify_s(INDIRECT_ACTIVE_GROUP_DN, [(ldap.MOD_DELETE, 'member', ensure_bytes(ACTIVE_GROUP_DN))]) + _find_memberof(topology_st, user_dn=ACTIVE_USER_DN, group_dn=INDIRECT_ACTIVE_GROUP_DN, find_result=False) + _find_memberof(topology_st, user_dn=ACTIVE_USER_DN, group_dn=ACTIVE_GROUP_DN, find_result=True) + + # move active user to stage + _modrdn_entry(topology_st, entry_dn=ACTIVE_USER_DN, new_rdn="cn=%s" % ACTIVE_USER_CN, new_superior=STAGE_DN) + + # stage user is no long member of active group and indirect active group + _find_memberof(topology_st, user_dn="cn=%s,%s" % (ACTIVE_USER_CN, STAGE_DN), group_dn=ACTIVE_GROUP_DN, + find_result=False) + _find_memberof(topology_st, user_dn="cn=%s,%s" % (ACTIVE_USER_CN, STAGE_DN), group_dn=INDIRECT_ACTIVE_GROUP_DN, + find_result=False) + + # active group and indirect active group do no longer have stage user as member + _find_member(topology_st, user_dn="cn=%s,%s" % (ACTIVE_USER_CN, STAGE_DN), group_dn=ACTIVE_GROUP_DN, + find_result=False) + _find_member(topology_st, user_dn="cn=%s,%s" % (ACTIVE_USER_CN, STAGE_DN), group_dn=INDIRECT_ACTIVE_GROUP_DN, + find_result=False) + + # return back the entry to active. It remains not member + _modrdn_entry(topology_st, entry_dn="cn=%s,%s" % (ACTIVE_USER_CN, STAGE_DN), new_rdn="cn=%s" % ACTIVE_USER_CN, + new_superior=ACTIVE_DN) + _find_member(topology_st, user_dn="cn=%s,%s" % (ACTIVE_USER_CN, ACTIVE_DN), group_dn=ACTIVE_GROUP_DN, + find_result=False) + _find_member(topology_st, user_dn="cn=%s,%s" % (ACTIVE_USER_CN, ACTIVE_DN), group_dn=INDIRECT_ACTIVE_GROUP_DN, + find_result=False) + + +def test_ticket47829_indirect_active_group_3(topology_st): + _header(topology_st, + 'add an Active group (G1) to an active group (G0). Then add active user to G1. Then move active user to out of the scope') + + topology_st.standalone.modify_s(INDIRECT_ACTIVE_GROUP_DN, [(ldap.MOD_ADD, 'member', ensure_bytes(ACTIVE_GROUP_DN))]) + + # add an active user to G1. Checks that user is memberof G1 + _check_memberof(topology_st, action=ldap.MOD_ADD, user_dn=ACTIVE_USER_DN, group_dn=ACTIVE_GROUP_DN, + find_result=True) + _find_memberof(topology_st, user_dn=ACTIVE_USER_DN, group_dn=INDIRECT_ACTIVE_GROUP_DN, find_result=True) + + # remove G1 from G0 + topology_st.standalone.modify_s(INDIRECT_ACTIVE_GROUP_DN, [(ldap.MOD_DELETE, 'member', ensure_bytes(ACTIVE_GROUP_DN))]) + _find_memberof(topology_st, user_dn=ACTIVE_USER_DN, group_dn=INDIRECT_ACTIVE_GROUP_DN, find_result=False) + _find_memberof(topology_st, user_dn=ACTIVE_USER_DN, group_dn=ACTIVE_GROUP_DN, find_result=True) + + # move active user to out of the scope + _modrdn_entry(topology_st, entry_dn=ACTIVE_USER_DN, new_rdn="cn=%s" % ACTIVE_USER_CN, new_superior=SCOPE_OUT_DN) + + # stage user is no long member of active group and indirect active group + _find_memberof(topology_st, user_dn="cn=%s,%s" % (ACTIVE_USER_CN, SCOPE_OUT_DN), group_dn=ACTIVE_GROUP_DN, + find_result=False) + _find_memberof(topology_st, user_dn="cn=%s,%s" % (ACTIVE_USER_CN, SCOPE_OUT_DN), group_dn=INDIRECT_ACTIVE_GROUP_DN, + find_result=False) + + # active group and indirect active group do no longer have stage user as member + _find_member(topology_st, user_dn="cn=%s,%s" % (ACTIVE_USER_CN, SCOPE_OUT_DN), group_dn=ACTIVE_GROUP_DN, + find_result=False) + _find_member(topology_st, user_dn="cn=%s,%s" % (ACTIVE_USER_CN, SCOPE_OUT_DN), group_dn=INDIRECT_ACTIVE_GROUP_DN, + find_result=False) + + # return back the entry to active. It remains not member + _modrdn_entry(topology_st, entry_dn="cn=%s,%s" % (ACTIVE_USER_CN, SCOPE_OUT_DN), new_rdn="cn=%s" % ACTIVE_USER_CN, + new_superior=ACTIVE_DN) + _find_member(topology_st, user_dn="cn=%s,%s" % (ACTIVE_USER_CN, ACTIVE_DN), group_dn=ACTIVE_GROUP_DN, + find_result=False) + _find_member(topology_st, user_dn="cn=%s,%s" % (ACTIVE_USER_CN, ACTIVE_DN), group_dn=INDIRECT_ACTIVE_GROUP_DN, + find_result=False) + + +def test_ticket47829_indirect_active_group_4(topology_st): + _header(topology_st, + 'add an Active group (G1) to an active group (G0). Then add stage user to G1. Then move user to active. Then move it back') + + topology_st.standalone.modify_s(INDIRECT_ACTIVE_GROUP_DN, [(ldap.MOD_ADD, 'member', ensure_bytes(ACTIVE_GROUP_DN))]) + + # add stage user to active group + _check_memberof(topology_st, action=ldap.MOD_ADD, user_dn=STAGE_USER_DN, group_dn=ACTIVE_GROUP_DN, + find_result=False) + _find_member(topology_st, user_dn=STAGE_USER_DN, group_dn=ACTIVE_GROUP_DN, find_result=True) + _find_member(topology_st, user_dn=STAGE_USER_DN, group_dn=INDIRECT_ACTIVE_GROUP_DN, find_result=False) + _find_memberof(topology_st, user_dn=STAGE_USER_DN, group_dn=INDIRECT_ACTIVE_GROUP_DN, find_result=False) + _find_memberof(topology_st, user_dn=STAGE_USER_DN, group_dn=ACTIVE_GROUP_DN, find_result=False) + + # move stage user to active + _modrdn_entry(topology_st, entry_dn=STAGE_USER_DN, new_rdn="cn=%s" % STAGE_USER_CN, new_superior=ACTIVE_DN) + renamed_stage_dn = "cn=%s,%s" % (STAGE_USER_CN, ACTIVE_DN) + _find_member(topology_st, user_dn=renamed_stage_dn, group_dn=ACTIVE_GROUP_DN, find_result=True) + _find_member(topology_st, user_dn=renamed_stage_dn, group_dn=INDIRECT_ACTIVE_GROUP_DN, find_result=False) + _find_memberof(topology_st, user_dn=renamed_stage_dn, group_dn=INDIRECT_ACTIVE_GROUP_DN, find_result=True) + _find_memberof(topology_st, user_dn=renamed_stage_dn, group_dn=ACTIVE_GROUP_DN, find_result=True) + + # move back active to stage + _modrdn_entry(topology_st, entry_dn=renamed_stage_dn, new_rdn="cn=%s" % STAGE_USER_CN, new_superior=STAGE_DN) + _find_member(topology_st, user_dn=STAGE_USER_DN, group_dn=ACTIVE_GROUP_DN, find_result=False) + _find_member(topology_st, user_dn=STAGE_USER_DN, group_dn=INDIRECT_ACTIVE_GROUP_DN, find_result=False) + _find_memberof(topology_st, user_dn=STAGE_USER_DN, group_dn=INDIRECT_ACTIVE_GROUP_DN, find_result=False) + _find_memberof(topology_st, user_dn=STAGE_USER_DN, group_dn=ACTIVE_GROUP_DN, find_result=False) + + +if __name__ == '__main__': + # Run isolated + # -s for DEBUG mode + CURRENT_FILE = os.path.realpath(__file__) + pytest.main("-s %s" % CURRENT_FILE) diff --git a/dirsrvtests/tests/tickets/ticket47833_test.py b/dirsrvtests/tests/tickets/ticket47833_test.py new file mode 100644 index 0000000..bb6e3fb --- /dev/null +++ b/dirsrvtests/tests/tickets/ticket47833_test.py @@ -0,0 +1,220 @@ +# --- BEGIN COPYRIGHT BLOCK --- +# Copyright (C) 2016 Red Hat, Inc. +# All rights reserved. +# +# License: GPL (version 3 or any later version). +# See LICENSE for details. +# --- END COPYRIGHT BLOCK --- +# +import pytest +from lib389.tasks import * +from lib389.utils import * +from lib389.topologies import topology_st + +from lib389._constants import SUFFIX, DEFAULT_SUFFIX, PLUGIN_MEMBER_OF, DN_PLUGIN + +SCOPE_IN_CN = 'in' +SCOPE_OUT_CN = 'out' +SCOPE_IN_DN = 'cn=%s,%s' % (SCOPE_IN_CN, SUFFIX) +SCOPE_OUT_DN = 'cn=%s,%s' % (SCOPE_OUT_CN, SUFFIX) + +PROVISIONING_CN = "provisioning" +PROVISIONING_DN = "cn=%s,%s" % (PROVISIONING_CN, SCOPE_IN_DN) + +# Skip on older versions +pytestmark = [pytest.mark.tier2, + pytest.mark.skipif(ds_is_older('1.3.3'), reason="Not implemented")] + +ACTIVE_CN = "accounts" +STAGE_CN = "staged users" +DELETE_CN = "deleted users" +ACTIVE_DN = "cn=%s,%s" % (ACTIVE_CN, SCOPE_IN_DN) +STAGE_DN = "cn=%s,%s" % (STAGE_CN, PROVISIONING_DN) +DELETE_DN = "cn=%s,%s" % (DELETE_CN, PROVISIONING_DN) + +STAGE_USER_CN = "stage guy" +STAGE_USER_DN = "cn=%s,%s" % (STAGE_USER_CN, STAGE_DN) + +ACTIVE_USER_CN = "active guy" +ACTIVE_USER_DN = "cn=%s,%s" % (ACTIVE_USER_CN, ACTIVE_DN) + +OUT_USER_CN = "out guy" +OUT_USER_DN = "cn=%s,%s" % (OUT_USER_CN, SCOPE_OUT_DN) + +STAGE_GROUP_CN = "stage group" +STAGE_GROUP_DN = "cn=%s,%s" % (STAGE_GROUP_CN, STAGE_DN) + +ACTIVE_GROUP_CN = "active group" +ACTIVE_GROUP_DN = "cn=%s,%s" % (ACTIVE_GROUP_CN, ACTIVE_DN) + +OUT_GROUP_CN = "out group" +OUT_GROUP_DN = "cn=%s,%s" % (OUT_GROUP_CN, SCOPE_OUT_DN) + +logging.getLogger(__name__).setLevel(logging.DEBUG) +log = logging.getLogger(__name__) + + +def _header(topology_st, label): + topology_st.standalone.log.info("\n\n###############################################") + topology_st.standalone.log.info("#######") + topology_st.standalone.log.info("####### %s" % label) + topology_st.standalone.log.info("#######") + topology_st.standalone.log.info("###############################################") + + +def _add_user(topology_st, type='active'): + if type == 'active': + topology_st.standalone.add_s(Entry((ACTIVE_USER_DN, { + 'objectclass': "top person inetuser".split(), + 'sn': ACTIVE_USER_CN, + 'cn': ACTIVE_USER_CN}))) + elif type == 'stage': + topology_st.standalone.add_s(Entry((STAGE_USER_DN, { + 'objectclass': "top person inetuser".split(), + 'sn': STAGE_USER_CN, + 'cn': STAGE_USER_CN}))) + else: + topology_st.standalone.add_s(Entry((OUT_USER_DN, { + 'objectclass': "top person inetuser".split(), + 'sn': OUT_USER_CN, + 'cn': OUT_USER_CN}))) + + +def _find_memberof(topology_st, user_dn=None, group_dn=None, find_result=True): + assert (topology_st) + assert (user_dn) + assert (group_dn) + ent = topology_st.standalone.getEntry(user_dn, ldap.SCOPE_BASE, "(objectclass=*)", ['memberof']) + found = False + if ent.hasAttr('memberof'): + + for val in ent.getValues('memberof'): + topology_st.standalone.log.info("!!!!!!! %s: memberof->%s" % (user_dn, val)) + if val == group_dn: + found = True + break + + if find_result: + assert (found) + else: + assert (not found) + + +def _find_member(topology_st, user_dn=None, group_dn=None, find_result=True): + assert (topology_st) + assert (user_dn) + assert (group_dn) + ent = topology_st.standalone.getEntry(group_dn, ldap.SCOPE_BASE, "(objectclass=*)", ['member']) + found = False + if ent.hasAttr('member'): + + for val in ent.getValues('member'): + topology_st.standalone.log.info("!!!!!!! %s: member ->%s" % (group_dn, val)) + if ensure_str(val) == user_dn: + found = True + break + + if find_result: + assert (found) + else: + assert (not found) + + +def _modrdn_entry(topology_st=None, entry_dn=None, new_rdn=None, del_old=0, new_superior=None): + assert topology_st != None + assert entry_dn != None + assert new_rdn != None + + topology_st.standalone.log.info("\n\n######################### MODRDN %s ######################\n" % new_rdn) + if new_superior: + topology_st.standalone.rename_s(entry_dn, new_rdn, newsuperior=new_superior, delold=del_old) + else: + topology_st.standalone.rename_s(entry_dn, new_rdn, delold=del_old) + + +def _check_memberof(topology_st=None, action=None, user_dn=None, group_dn=None, find_result=None): + assert (topology_st) + assert (user_dn) + assert (group_dn) + if action == ldap.MOD_ADD: + txt = 'add' + elif action == ldap.MOD_DELETE: + txt = 'delete' + else: + txt = 'replace' + topology_st.standalone.log.info('\n%s entry %s' % (txt, user_dn)) + topology_st.standalone.log.info('to group %s' % group_dn) + + topology_st.standalone.modify_s(group_dn, [(action, 'member', ensure_bytes(user_dn))]) + time.sleep(1) + _find_memberof(topology_st, user_dn=user_dn, group_dn=group_dn, find_result=find_result) + + +def test_ticket47829_init(topology_st): + topology_st.standalone.add_s(Entry((SCOPE_IN_DN, { + 'objectclass': "top nscontainer".split(), + 'cn': SCOPE_IN_DN}))) + topology_st.standalone.add_s(Entry((SCOPE_OUT_DN, { + 'objectclass': "top nscontainer".split(), + 'cn': SCOPE_OUT_DN}))) + topology_st.standalone.add_s(Entry((PROVISIONING_DN, { + 'objectclass': "top nscontainer".split(), + 'cn': PROVISIONING_CN}))) + topology_st.standalone.add_s(Entry((ACTIVE_DN, { + 'objectclass': "top nscontainer".split(), + 'cn': ACTIVE_CN}))) + topology_st.standalone.add_s(Entry((STAGE_DN, { + 'objectclass': "top nscontainer".split(), + 'cn': STAGE_DN}))) + topology_st.standalone.add_s(Entry((DELETE_DN, { + 'objectclass': "top nscontainer".split(), + 'cn': DELETE_CN}))) + + # add groups + topology_st.standalone.add_s(Entry((ACTIVE_GROUP_DN, { + 'objectclass': "top groupOfNames".split(), + 'cn': ACTIVE_GROUP_CN}))) + topology_st.standalone.add_s(Entry((STAGE_GROUP_DN, { + 'objectclass': "top groupOfNames".split(), + 'cn': STAGE_GROUP_CN}))) + topology_st.standalone.add_s(Entry((OUT_GROUP_DN, { + 'objectclass': "top groupOfNames".split(), + 'cn': OUT_GROUP_CN}))) + + # add users + _add_user(topology_st, 'active') + _add_user(topology_st, 'stage') + _add_user(topology_st, 'out') + + # enable memberof of with scope account + topology_st.standalone.plugins.enable(name=PLUGIN_MEMBER_OF) + dn = "cn=%s,%s" % (PLUGIN_MEMBER_OF, DN_PLUGIN) + topology_st.standalone.modify_s(dn, [(ldap.MOD_REPLACE, 'memberOfEntryScope', ensure_bytes(ACTIVE_DN))]) + + topology_st.standalone.restart(timeout=10) + + +def test_ticket47829_mod_stage_user_modrdn_stage_user_1(topology_st): + _header(topology_st, 'add an Stage user to a Active group. Then move Stage user to Stage') + + old_stage_user_dn = STAGE_USER_DN + old_stage_user_rdn = "cn=%s" % STAGE_USER_CN + new_stage_user_rdn = "cn=x%s" % STAGE_USER_CN + new_stage_user_dn = "%s,%s" % (new_stage_user_rdn, STAGE_DN) + + # add Stage user to active group + _check_memberof(topology_st, action=ldap.MOD_ADD, user_dn=old_stage_user_dn, group_dn=ACTIVE_GROUP_DN, + find_result=False) + _find_member(topology_st, user_dn=old_stage_user_dn, group_dn=ACTIVE_GROUP_DN, find_result=True) + + # move the Stage entry to Stage, expect no 'member' and 'memberof' + _modrdn_entry(topology_st, entry_dn=old_stage_user_dn, new_rdn=new_stage_user_rdn, new_superior=STAGE_DN) + _find_memberof(topology_st, user_dn=new_stage_user_dn, group_dn=ACTIVE_GROUP_DN, find_result=False) + _find_member(topology_st, user_dn=new_stage_user_dn, group_dn=ACTIVE_GROUP_DN, find_result=False) + + +if __name__ == '__main__': + # Run isolated + # -s for DEBUG mode + CURRENT_FILE = os.path.realpath(__file__) + pytest.main("-s %s" % CURRENT_FILE) diff --git a/dirsrvtests/tests/tickets/ticket47869MMR_test.py b/dirsrvtests/tests/tickets/ticket47869MMR_test.py new file mode 100644 index 0000000..4117420 --- /dev/null +++ b/dirsrvtests/tests/tickets/ticket47869MMR_test.py @@ -0,0 +1,200 @@ +# --- BEGIN COPYRIGHT BLOCK --- +# Copyright (C) 2016 Red Hat, Inc. +# All rights reserved. +# +# License: GPL (version 3 or any later version). +# See LICENSE for details. +# --- END COPYRIGHT BLOCK --- +# +import logging +import time + +import ldap +import pytest +from lib389 import Entry +from lib389._constants import * +from lib389.topologies import topology_m2 +from lib389.replica import ReplicationManager +from lib389.utils import * + +pytestmark = pytest.mark.tier2 + +logging.getLogger(__name__).setLevel(logging.DEBUG) +log = logging.getLogger(__name__) + +TEST_REPL_DN = "cn=test_repl, %s" % SUFFIX +ENTRY_NAME = 'test_entry' +MAX_ENTRIES = 10 + +BIND_NAME = 'bind_entry' +BIND_DN = 'cn=%s, %s' % (BIND_NAME, SUFFIX) +BIND_PW = 'password' + +def replication_check(topology_m2): + repl = ReplicationManager(SUFFIX) + supplier1 = topology_m2.ms["supplier1"] + supplier2 = topology_m2.ms["supplier2"] + return repl.test_replication(supplier1, supplier2) + +def test_ticket47869_init(topology_m2): + """ + It adds an entry ('bind_entry') and 10 test entries + It sets the anonymous aci + + """ + # enable acl error logging + mod = [(ldap.MOD_REPLACE, 'nsslapd-errorlog-level', ensure_bytes(str(8192)))] # REPL + topology_m2.ms["supplier1"].modify_s(DN_CONFIG, mod) + topology_m2.ms["supplier2"].modify_s(DN_CONFIG, mod) + + # entry used to bind with + topology_m2.ms["supplier1"].log.info("Add %s" % BIND_DN) + topology_m2.ms["supplier1"].add_s(Entry((BIND_DN, { + 'objectclass': "top person".split(), + 'sn': BIND_NAME, + 'cn': BIND_NAME, + 'userpassword': BIND_PW}))) + replication_check(topology_m2) + ent = topology_m2.ms["supplier2"].getEntry(BIND_DN, ldap.SCOPE_BASE, "(objectclass=*)") + assert ent + # keep anonymous ACI for use 'read-search' aci in SEARCH test + ACI_ANONYMOUS = "(targetattr!=\"userPassword\")(version 3.0; acl \"Enable anonymous access\"; allow (read, search, compare) userdn=\"ldap:///anyone\";)" + mod = [(ldap.MOD_REPLACE, 'aci', ensure_bytes(ACI_ANONYMOUS))] + topology_m2.ms["supplier1"].modify_s(SUFFIX, mod) + topology_m2.ms["supplier2"].modify_s(SUFFIX, mod) + + # add entries + for cpt in range(MAX_ENTRIES): + name = "%s%d" % (ENTRY_NAME, cpt) + mydn = "cn=%s,%s" % (name, SUFFIX) + topology_m2.ms["supplier1"].add_s(Entry((mydn, + {'objectclass': "top person".split(), + 'sn': name, + 'cn': name}))) + replication_check(topology_m2) + ent = topology_m2.ms["supplier2"].getEntry(mydn, ldap.SCOPE_BASE, "(objectclass=*)") + assert ent + +def test_ticket47869_check(topology_m2): + ''' + On Supplier 1 and 2: + Bind as Directory Manager. + Search all specifying nscpEntryWsi in the attribute list. + Check nscpEntryWsi is returned. + On Supplier 1 and 2: + Bind as Bind Entry. + Search all specifying nscpEntryWsi in the attribute list. + Check nscpEntryWsi is not returned. + On Supplier 1 and 2: + Bind as anonymous. + Search all specifying nscpEntryWsi in the attribute list. + Check nscpEntryWsi is not returned. + ''' + topology_m2.ms["supplier1"].log.info("\n\n######################### CHECK nscpentrywsi ######################\n") + + topology_m2.ms["supplier1"].log.info("##### Supplier1: Bind as %s #####" % DN_DM) + topology_m2.ms["supplier1"].simple_bind_s(DN_DM, PASSWORD) + + topology_m2.ms["supplier1"].log.info("Supplier1: Calling search_ext...") + msgid = topology_m2.ms["supplier1"].search_ext(SUFFIX, ldap.SCOPE_SUBTREE, 'objectclass=*', ['nscpentrywsi']) + nscpentrywsicnt = 0 + rtype, rdata, rmsgid = topology_m2.ms["supplier1"].result2(msgid) + topology_m2.ms["supplier1"].log.info("%d results" % len(rdata)) + + topology_m2.ms["supplier1"].log.info("Results:") + for dn, attrs in rdata: + topology_m2.ms["supplier1"].log.info("dn: %s" % dn) + if 'nscpentrywsi' in attrs: + nscpentrywsicnt += 1 + + topology_m2.ms["supplier1"].log.info("Supplier1: count of nscpentrywsi: %d" % nscpentrywsicnt) + + topology_m2.ms["supplier2"].log.info("##### Supplier2: Bind as %s #####" % DN_DM) + topology_m2.ms["supplier2"].simple_bind_s(DN_DM, PASSWORD) + + topology_m2.ms["supplier2"].log.info("Supplier2: Calling search_ext...") + msgid = topology_m2.ms["supplier2"].search_ext(SUFFIX, ldap.SCOPE_SUBTREE, 'objectclass=*', ['nscpentrywsi']) + nscpentrywsicnt = 0 + rtype, rdata, rmsgid = topology_m2.ms["supplier2"].result2(msgid) + topology_m2.ms["supplier2"].log.info("%d results" % len(rdata)) + + topology_m2.ms["supplier2"].log.info("Results:") + for dn, attrs in rdata: + topology_m2.ms["supplier2"].log.info("dn: %s" % dn) + if 'nscpentrywsi' in attrs: + nscpentrywsicnt += 1 + + topology_m2.ms["supplier2"].log.info("Supplier2: count of nscpentrywsi: %d" % nscpentrywsicnt) + + # bind as bind_entry + topology_m2.ms["supplier1"].log.info("##### Supplier1: Bind as %s #####" % BIND_DN) + topology_m2.ms["supplier1"].simple_bind_s(BIND_DN, BIND_PW) + + topology_m2.ms["supplier1"].log.info("Supplier1: Calling search_ext...") + msgid = topology_m2.ms["supplier1"].search_ext(SUFFIX, ldap.SCOPE_SUBTREE, 'objectclass=*', ['nscpentrywsi']) + nscpentrywsicnt = 0 + rtype, rdata, rmsgid = topology_m2.ms["supplier1"].result2(msgid) + topology_m2.ms["supplier1"].log.info("%d results" % len(rdata)) + + for dn, attrs in rdata: + if 'nscpentrywsi' in attrs: + nscpentrywsicnt += 1 + assert nscpentrywsicnt == 0 + topology_m2.ms["supplier1"].log.info("Supplier1: count of nscpentrywsi: %d" % nscpentrywsicnt) + + # bind as bind_entry + topology_m2.ms["supplier2"].log.info("##### Supplier2: Bind as %s #####" % BIND_DN) + topology_m2.ms["supplier2"].simple_bind_s(BIND_DN, BIND_PW) + + topology_m2.ms["supplier2"].log.info("Supplier2: Calling search_ext...") + msgid = topology_m2.ms["supplier2"].search_ext(SUFFIX, ldap.SCOPE_SUBTREE, 'objectclass=*', ['nscpentrywsi']) + nscpentrywsicnt = 0 + rtype, rdata, rmsgid = topology_m2.ms["supplier2"].result2(msgid) + topology_m2.ms["supplier2"].log.info("%d results" % len(rdata)) + + for dn, attrs in rdata: + if 'nscpentrywsi' in attrs: + nscpentrywsicnt += 1 + assert nscpentrywsicnt == 0 + topology_m2.ms["supplier2"].log.info("Supplier2: count of nscpentrywsi: %d" % nscpentrywsicnt) + + # bind as anonymous + topology_m2.ms["supplier1"].log.info("##### Supplier1: Bind as anonymous #####") + topology_m2.ms["supplier1"].simple_bind_s("", "") + + topology_m2.ms["supplier1"].log.info("Supplier1: Calling search_ext...") + msgid = topology_m2.ms["supplier1"].search_ext(SUFFIX, ldap.SCOPE_SUBTREE, 'objectclass=*', ['nscpentrywsi']) + nscpentrywsicnt = 0 + rtype, rdata, rmsgid = topology_m2.ms["supplier1"].result2(msgid) + topology_m2.ms["supplier1"].log.info("%d results" % len(rdata)) + + for dn, attrs in rdata: + if 'nscpentrywsi' in attrs: + nscpentrywsicnt += 1 + assert nscpentrywsicnt == 0 + topology_m2.ms["supplier1"].log.info("Supplier1: count of nscpentrywsi: %d" % nscpentrywsicnt) + + # bind as bind_entry + topology_m2.ms["supplier2"].log.info("##### Supplier2: Bind as anonymous #####") + topology_m2.ms["supplier2"].simple_bind_s("", "") + + topology_m2.ms["supplier2"].log.info("Supplier2: Calling search_ext...") + msgid = topology_m2.ms["supplier2"].search_ext(SUFFIX, ldap.SCOPE_SUBTREE, 'objectclass=*', ['nscpentrywsi']) + nscpentrywsicnt = 0 + rtype, rdata, rmsgid = topology_m2.ms["supplier2"].result2(msgid) + topology_m2.ms["supplier2"].log.info("%d results" % len(rdata)) + + for dn, attrs in rdata: + if 'nscpentrywsi' in attrs: + nscpentrywsicnt += 1 + assert nscpentrywsicnt == 0 + topology_m2.ms["supplier2"].log.info("Supplier2: count of nscpentrywsi: %d" % nscpentrywsicnt) + + topology_m2.ms["supplier1"].log.info("##### ticket47869 was successfully verified. #####") + + +if __name__ == '__main__': + # Run isolated + # -s for DEBUG mode + CURRENT_FILE = os.path.realpath(__file__) + pytest.main("-s %s" % CURRENT_FILE) diff --git a/dirsrvtests/tests/tickets/ticket47871_test.py b/dirsrvtests/tests/tickets/ticket47871_test.py new file mode 100644 index 0000000..99c98fa --- /dev/null +++ b/dirsrvtests/tests/tickets/ticket47871_test.py @@ -0,0 +1,108 @@ +# --- BEGIN COPYRIGHT BLOCK --- +# Copyright (C) 2016 Red Hat, Inc. +# All rights reserved. +# +# License: GPL (version 3 or any later version). +# See LICENSE for details. +# --- END COPYRIGHT BLOCK --- +# +''' +Created on Nov 7, 2013 + +@author: tbordaz +''' +import logging +import time + +import ldap +import pytest +from lib389 import Entry +from lib389._constants import * +from lib389.topologies import topology_m1c1 + +logging.getLogger(__name__).setLevel(logging.DEBUG) +from lib389.utils import * + +# Skip on older versions +pytestmark = [pytest.mark.tier2, + pytest.mark.skipif(ds_is_older('1.3.2'), reason="Not implemented")] +log = logging.getLogger(__name__) + +TEST_REPL_DN = "cn=test_repl, %s" % SUFFIX +ENTRY_DN = "cn=test_entry, %s" % SUFFIX + +OTHER_NAME = 'other_entry' +MAX_OTHERS = 10 + +ATTRIBUTES = ['street', 'countryName', 'description', 'postalAddress', 'postalCode', 'title', 'l', 'roomNumber'] + + +def test_ticket47871_init(topology_m1c1): + """ + Initialize the test environment + """ + topology_m1c1.ms["supplier1"].plugins.enable(name=PLUGIN_RETRO_CHANGELOG) + mod = [(ldap.MOD_REPLACE, 'nsslapd-changelogmaxage', b"10s"), # 10 second triming + (ldap.MOD_REPLACE, 'nsslapd-changelog-trim-interval', b"5s")] + topology_m1c1.ms["supplier1"].modify_s("cn=%s,%s" % (PLUGIN_RETRO_CHANGELOG, DN_PLUGIN), mod) + # topology_m1c1.ms["supplier1"].plugins.enable(name=PLUGIN_MEMBER_OF) + # topology_m1c1.ms["supplier1"].plugins.enable(name=PLUGIN_REFER_INTEGRITY) + topology_m1c1.ms["supplier1"].stop(timeout=10) + topology_m1c1.ms["supplier1"].start(timeout=10) + + topology_m1c1.ms["supplier1"].log.info("test_ticket47871_init topology_m1c1 %r" % (topology_m1c1)) + # the test case will check if a warning message is logged in the + # error log of the supplier + topology_m1c1.ms["supplier1"].errorlog_file = open(topology_m1c1.ms["supplier1"].errlog, "r") + + +def test_ticket47871_1(topology_m1c1): + ''' + ADD entries and check they are all in the retrocl + ''' + # add dummy entries + for cpt in range(MAX_OTHERS): + name = "%s%d" % (OTHER_NAME, cpt) + topology_m1c1.ms["supplier1"].add_s(Entry(("cn=%s,%s" % (name, SUFFIX), { + 'objectclass': "top person".split(), + 'sn': name, + 'cn': name}))) + + topology_m1c1.ms["supplier1"].log.info( + "test_ticket47871_init: %d entries ADDed %s[0..%d]" % (MAX_OTHERS, OTHER_NAME, MAX_OTHERS - 1)) + + # Check the number of entries in the retro changelog + time.sleep(1) + ents = topology_m1c1.ms["supplier1"].search_s(RETROCL_SUFFIX, ldap.SCOPE_ONELEVEL, "(objectclass=*)") + assert len(ents) == MAX_OTHERS + topology_m1c1.ms["supplier1"].log.info("Added entries are") + for ent in ents: + topology_m1c1.ms["supplier1"].log.info("%s" % ent.dn) + + +def test_ticket47871_2(topology_m1c1): + ''' + Wait until there is just a last entries + ''' + MAX_TRIES = 10 + TRY_NO = 1 + while TRY_NO <= MAX_TRIES: + time.sleep(6) # at least 1 trimming occurred + ents = topology_m1c1.ms["supplier1"].search_s(RETROCL_SUFFIX, ldap.SCOPE_ONELEVEL, "(objectclass=*)") + assert len(ents) <= MAX_OTHERS + topology_m1c1.ms["supplier1"].log.info("\nTry no %d it remains %d entries" % (TRY_NO, len(ents))) + for ent in ents: + topology_m1c1.ms["supplier1"].log.info("%s" % ent.dn) + if len(ents) > 1: + TRY_NO += 1 + else: + break + assert TRY_NO <= MAX_TRIES + assert len(ents) <= 1 + + +if __name__ == '__main__': + # Run isolated + # -s for DEBUG mode + CURRENT_FILE = os.path.realpath(__file__) + pytest.main("-s %s" % CURRENT_FILE) diff --git a/dirsrvtests/tests/tickets/ticket47900_test.py b/dirsrvtests/tests/tickets/ticket47900_test.py new file mode 100644 index 0000000..ce5ce33 --- /dev/null +++ b/dirsrvtests/tests/tickets/ticket47900_test.py @@ -0,0 +1,212 @@ +# --- BEGIN COPYRIGHT BLOCK --- +# Copyright (C) 2016 Red Hat, Inc. +# All rights reserved. +# +# License: GPL (version 3 or any later version). +# See LICENSE for details. +# --- END COPYRIGHT BLOCK --- +# +import logging + +import ldap +import pytest +from lib389 import Entry +from lib389._constants import * +from lib389.topologies import topology_st +from lib389.utils import * + +pytestmark = pytest.mark.tier2 + +log = logging.getLogger(__name__) + +CONFIG_DN = 'cn=config' +ADMIN_NAME = 'passwd_admin' +ADMIN_DN = 'cn=%s,%s' % (ADMIN_NAME, SUFFIX) +ADMIN_PWD = 'adminPassword_1' +ENTRY_NAME = 'Joe Schmo' +ENTRY_DN = 'cn=%s,%s' % (ENTRY_NAME, SUFFIX) +INVALID_PWDS = ('2_Short', 'No_Number', 'N0Special', '{SSHA}bBy8UdtPZwu8uZna9QOYG3Pr41RpIRVDl8wddw==') + + +def test_ticket47900(topology_st): + """ + Test that password administrators/root DN can + bypass password syntax/policy. + + We need to test how passwords are modified in + existing entries, and when adding new entries. + + Create the Password Admin entry, but do not set + it as an admin yet. Use the entry to verify invalid + passwords are caught. Then activate the password + admin and make sure it can bypass password policy. + """ + + # Prepare the Password Administator + entry = Entry(ADMIN_DN) + entry.setValues('objectclass', 'top', 'person') + entry.setValues('sn', ADMIN_NAME) + entry.setValues('cn', ADMIN_NAME) + entry.setValues('userpassword', ADMIN_PWD) + + topology_st.standalone.log.info("Creating Password Administator entry %s..." % ADMIN_DN) + try: + topology_st.standalone.add_s(entry) + except ldap.LDAPError as e: + topology_st.standalone.log.error('Unexpected result ' + e.args[0]['desc']) + assert False + topology_st.standalone.log.error("Failed to add Password Administator %s, error: %s " + % (ADMIN_DN, e.args[0]['desc'])) + assert False + + topology_st.standalone.log.info("Configuring password policy...") + topology_st.standalone.config.replace_many(('nsslapd-pwpolicy-local', 'on'), + ('passwordCheckSyntax', 'on'), + ('passwordMinCategories', '1'), + ('passwordMinTokenLength', '1'), + ('passwordExp', 'on'), + ('passwordMinDigits', '1'), + ('passwordMinSpecials', '1')) + + # + # Add an aci to allow everyone all access (just makes things easier) + # + topology_st.standalone.log.info("Add aci to allow password admin to add/update entries...") + + ACI_TARGET = "(target = \"ldap:///%s\")" % SUFFIX + ACI_TARGETATTR = "(targetattr = *)" + ACI_ALLOW = "(version 3.0; acl \"Password Admin Access\"; allow (all) " + ACI_SUBJECT = "(userdn = \"ldap:///anyone\");)" + ACI_BODY = ACI_TARGET + ACI_TARGETATTR + ACI_ALLOW + ACI_SUBJECT + mod = [(ldap.MOD_ADD, 'aci', ensure_bytes(ACI_BODY))] + topology_st.standalone.modify_s(SUFFIX, mod) + + # + # Bind as the Password Admin + # + topology_st.standalone.log.info("Bind as the Password Administator (before activating)...") + topology_st.standalone.simple_bind_s(ADMIN_DN, ADMIN_PWD) + + # + # Setup our test entry, and test password policy is working + # + entry = Entry(ENTRY_DN) + entry.setValues('objectclass', 'top', 'person') + entry.setValues('sn', ENTRY_NAME) + entry.setValues('cn', ENTRY_NAME) + + # + # Start by attempting to add an entry with an invalid password + # + topology_st.standalone.log.info("Attempt to add entries with invalid passwords, these adds should fail...") + for passwd in INVALID_PWDS: + failed_as_expected = False + entry.setValues('userpassword', passwd) + topology_st.standalone.log.info("Create a regular user entry %s with password (%s)..." % (ENTRY_DN, passwd)) + try: + topology_st.standalone.add_s(entry) + except ldap.LDAPError as e: + # We failed as expected + failed_as_expected = True + topology_st.standalone.log.info('Add failed as expected: password (%s) result (%s)' + % (passwd, e.args[0]['desc'])) + + if not failed_as_expected: + topology_st.standalone.log.error("We were incorrectly able to add an entry " + + "with an invalid password (%s)" % (passwd)) + assert False + + # + # Now activate a password administator, bind as root dn to do the config + # update, then rebind as the password admin + # + topology_st.standalone.log.info("Activate the Password Administator...") + + # Bind as Root DN + topology_st.standalone.simple_bind_s(DN_DM, PASSWORD) + + # Update config + topology_st.standalone.modify_s(CONFIG_DN, [(ldap.MOD_REPLACE, 'passwordAdminDN', ensure_bytes(ADMIN_DN))]) + + # Bind as Password Admin + topology_st.standalone.simple_bind_s(ADMIN_DN, ADMIN_PWD) + + # + # Start adding entries with invalid passwords, delete the entry after each pass. + # + for passwd in INVALID_PWDS: + entry.setValues('userpassword', passwd) + topology_st.standalone.log.info("Create a regular user entry %s with password (%s)..." % (ENTRY_DN, passwd)) + topology_st.standalone.add_s(entry) + + topology_st.standalone.log.info('Succesfully added entry (%s)' % ENTRY_DN) + + # Delete entry for the next pass + topology_st.standalone.delete_s(ENTRY_DN) + # + # Add the entry for the next round of testing (modify password) + # + entry.setValues('userpassword', ADMIN_PWD) + topology_st.standalone.add_s(entry) + + # + # Deactivate the password admin and make sure invalid password updates fail + # + topology_st.standalone.log.info("Deactivate Password Administator and try invalid password updates...") + + # Bind as root DN + topology_st.standalone.simple_bind_s(DN_DM, PASSWORD) + + # Update conf + topology_st.standalone.modify_s(CONFIG_DN, [(ldap.MOD_DELETE, 'passwordAdminDN', None)]) + + # Bind as Password Admin + topology_st.standalone.simple_bind_s(ADMIN_DN, ADMIN_PWD) + + # + # Make invalid password updates that should fail + # + for passwd in INVALID_PWDS: + failed_as_expected = False + entry.setValues('userpassword', passwd) + try: + topology_st.standalone.modify_s(ENTRY_DN, [(ldap.MOD_REPLACE, 'userpassword', ensure_bytes(passwd))]) + except ldap.LDAPError as e: + # We failed as expected + failed_as_expected = True + topology_st.standalone.log.info('Password update failed as expected: password (%s) result (%s)' + % (passwd, e.args[0]['desc'])) + + if not failed_as_expected: + topology_st.standalone.log.error("We were incorrectly able to add an invalid password (%s)" + % (passwd)) + assert False + + # + # Now activate a password administator + # + topology_st.standalone.log.info("Activate Password Administator and try updates again...") + + # Bind as root D + topology_st.standalone.simple_bind_s(DN_DM, PASSWORD) + + # Update config + topology_st.standalone.modify_s(CONFIG_DN, [(ldap.MOD_REPLACE, 'passwordAdminDN', ensure_bytes(ADMIN_DN))]) + + # Bind as Password Admin + topology_st.standalone.simple_bind_s(ADMIN_DN, ADMIN_PWD) + + # + # Make the same password updates, but this time they should succeed + # + for passwd in INVALID_PWDS: + entry.setValues('userpassword', passwd) + topology_st.standalone.modify_s(ENTRY_DN, [(ldap.MOD_REPLACE, 'userpassword', ensure_bytes(passwd))]) + topology_st.standalone.log.info('Password update succeeded (%s)' % passwd) + + +if __name__ == '__main__': + # Run isolated + # -s for DEBUG mode + CURRENT_FILE = os.path.realpath(__file__) + pytest.main("-s %s" % CURRENT_FILE) diff --git a/dirsrvtests/tests/tickets/ticket47910_test.py b/dirsrvtests/tests/tickets/ticket47910_test.py new file mode 100644 index 0000000..799d44d --- /dev/null +++ b/dirsrvtests/tests/tickets/ticket47910_test.py @@ -0,0 +1,166 @@ +# Copyright (C) 2016 Red Hat, Inc. +# All rights reserved. +# +# License: GPL (version 3 or any later version). +# See LICENSE for details. +# --- END COPYRIGHT BLOCK --- +# +import logging +import subprocess +from datetime import datetime, timedelta + +import pytest +from lib389.tasks import * +from lib389.topologies import topology_st + +from lib389._constants import SUFFIX, DEFAULT_SUFFIX + +logging.getLogger(__name__).setLevel(logging.DEBUG) +log = logging.getLogger(__name__) + +from lib389.utils import * + +# Skip on older versions +pytestmark = [pytest.mark.tier2, + pytest.mark.skipif(ds_is_older('1.3.4'), reason="Not implemented")] + + +@pytest.fixture(scope="module") +def log_dir(topology_st): + ''' + Do a search operation + and disable access log buffering + to generate the access log + ''' + + log.info("Diable access log buffering") + topology_st.standalone.setAccessLogBuffering(False) + + log.info("Do a ldapsearch operation") + topology_st.standalone.search_s(SUFFIX, ldap.SCOPE_SUBTREE, "(objectclass=*)") + + log.info("sleep for sometime so that access log file get generated") + time.sleep(1) + + return topology_st.standalone.accesslog + + +def format_time(local_datetime): + formatted_time = (local_datetime.strftime("[%d/%b/%Y:%H:%M:%S]")) + return formatted_time + + +def execute_logconv(inst, start_time_stamp, end_time_stamp, access_log): + ''' + This function will take start time and end time + as input parameter and + assign these values to -S and -E options of logconv + and, it will execute logconv and return result value + ''' + + log.info("Executing logconv.pl with -S current time and -E end time") + cmd = [os.path.join(inst.get_bin_dir(), 'logconv.pl'), '-S', start_time_stamp, '-E', end_time_stamp, access_log] + log.info(" ".join(cmd)) + proc = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE) + stdout, stderr = proc.communicate() + log.info("standard output" + ensure_str(stdout)) + log.info("standard errors" + ensure_str(stderr)) + return proc.returncode + + +def test_ticket47910_logconv_start_end_positive(topology_st, log_dir): + ''' + Execute logconv.pl with -S and -E(endtime) with random time stamp + This is execute successfully + ''' + # + # Execute logconv.pl -S -E with random timestamp + # + log.info('Running test_ticket47910 - Execute logconv.pl -S -E with random values') + + log.info("taking current time with offset of 2 mins and formatting it to feed -S") + start_time_stamp = (datetime.now() - timedelta(minutes=2)) + formatted_start_time_stamp = format_time(start_time_stamp) + + log.info("taking current time with offset of 2 mins and formatting it to feed -E") + end_time_stamp = (datetime.now() + timedelta(minutes=2)) + formatted_end_time_stamp = format_time(end_time_stamp) + + log.info("Executing logconv.pl with -S and -E") + result = execute_logconv(topology_st.standalone, formatted_start_time_stamp, formatted_end_time_stamp, log_dir) + assert result == 0 + + +def test_ticket47910_logconv_start_end_negative(topology_st, log_dir): + ''' + Execute logconv.pl with -S and -E(endtime) with random time stamp + This is a negative test case, where endtime will be lesser than the + starttime + This should give error message + ''' + + # + # Execute logconv.pl -S and -E with random timestamp + # + log.info('Running test_ticket47910 - Execute logconv.pl -S -E with starttime>endtime') + + log.info("taking current time with offset of 2 mins and formatting it to feed -S") + start_time_stamp = (datetime.now() + timedelta(minutes=2)) + formatted_start_time_stamp = format_time(start_time_stamp) + + log.info("taking current time with offset of 2 mins and formatting it to feed -E") + end_time_stamp = (datetime.now() - timedelta(minutes=2)) + formatted_end_time_stamp = format_time(end_time_stamp) + + log.info("Executing logconv.pl with -S and -E") + result = execute_logconv(topology_st.standalone, formatted_start_time_stamp, formatted_end_time_stamp, log_dir) + assert result == 1 + + +def test_ticket47910_logconv_start_end_invalid(topology_st, log_dir): + ''' + Execute logconv.pl with -S and -E(endtime) with invalid time stamp + This is a negative test case, where it should give error message + ''' + # + # Execute logconv.pl -S and -E with invalid timestamp + # + log.info('Running test_ticket47910 - Execute logconv.pl -S -E with invalid timestamp') + log.info("Set start time and end time to invalid values") + start_time_stamp = "invalid" + end_time_stamp = "invalid" + + log.info("Executing logconv.pl with -S and -E") + result = execute_logconv(topology_st.standalone, start_time_stamp, end_time_stamp, log_dir) + assert result == 1 + + +def test_ticket47910_logconv_noaccesslogs(topology_st, log_dir): + ''' + Execute logconv.pl -S(starttime) without specify + access logs location + ''' + + # + # Execute logconv.pl -S with random timestamp and no access log location + # + log.info('Running test_ticket47910 - Execute logconv.pl without access logs') + + log.info("taking current time with offset of 2 mins and formatting it to feed -S") + time_stamp = (datetime.now() - timedelta(minutes=2)) + formatted_time_stamp = format_time(time_stamp) + log.info("Executing logconv.pl with -S current time") + cmd = [os.path.join(topology_st.standalone.get_bin_dir(), 'logconv.pl'), '-S', formatted_time_stamp] + log.info(" ".join(cmd)) + proc = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE) + stdout, stderr = proc.communicate() + log.info("standard output" + ensure_str(stdout)) + log.info("standard errors" + ensure_str(stderr)) + + assert proc.returncode == 1 + + +if __name__ == '__main__': + # Run isolated + # -s for DEBUG mode + pytest.main("-s ticket47910_test.py") diff --git a/dirsrvtests/tests/tickets/ticket47920_test.py b/dirsrvtests/tests/tickets/ticket47920_test.py new file mode 100644 index 0000000..25203d4 --- /dev/null +++ b/dirsrvtests/tests/tickets/ticket47920_test.py @@ -0,0 +1,130 @@ +# --- BEGIN COPYRIGHT BLOCK --- +# Copyright (C) 2016 Red Hat, Inc. +# All rights reserved. +# +# License: GPL (version 3 or any later version). +# See LICENSE for details. +# --- END COPYRIGHT BLOCK --- +# +import logging + +import ldap +import pytest +from ldap.controls.readentry import PostReadControl +from lib389 import Entry +from lib389._constants import * +from lib389.topologies import topology_st + +SCOPE_IN_CN = 'in' +SCOPE_OUT_CN = 'out' +SCOPE_IN_DN = 'cn=%s,%s' % (SCOPE_IN_CN, SUFFIX) +SCOPE_OUT_DN = 'cn=%s,%s' % (SCOPE_OUT_CN, SUFFIX) +from lib389.utils import * + +# Skip on older versions +pytestmark = [pytest.mark.tier2, + pytest.mark.skipif(ds_is_older('1.3.2'), reason="Not implemented")] +PROVISIONING_CN = "provisioning" +PROVISIONING_DN = "cn=%s,%s" % (PROVISIONING_CN, SCOPE_IN_DN) + +ACTIVE_CN = "accounts" +STAGE_CN = "staged users" +DELETE_CN = "deleted users" +ACTIVE_DN = "cn=%s,%s" % (ACTIVE_CN, SCOPE_IN_DN) +STAGE_DN = "cn=%s,%s" % (STAGE_CN, PROVISIONING_DN) +DELETE_DN = "cn=%s,%s" % (DELETE_CN, PROVISIONING_DN) + +STAGE_USER_CN = "stage guy" +STAGE_USER_DN = "cn=%s,%s" % (STAGE_USER_CN, STAGE_DN) + +ACTIVE_USER_CN = "active guy" +ACTIVE_USER_DN = "cn=%s,%s" % (ACTIVE_USER_CN, ACTIVE_DN) + +OUT_USER_CN = "out guy" +OUT_USER_DN = "cn=%s,%s" % (OUT_USER_CN, SCOPE_OUT_DN) + +STAGE_GROUP_CN = "stage group" +STAGE_GROUP_DN = "cn=%s,%s" % (STAGE_GROUP_CN, STAGE_DN) + +ACTIVE_GROUP_CN = "active group" +ACTIVE_GROUP_DN = "cn=%s,%s" % (ACTIVE_GROUP_CN, ACTIVE_DN) + +OUT_GROUP_CN = "out group" +OUT_GROUP_DN = "cn=%s,%s" % (OUT_GROUP_CN, SCOPE_OUT_DN) + +INDIRECT_ACTIVE_GROUP_CN = "indirect active group" +INDIRECT_ACTIVE_GROUP_DN = "cn=%s,%s" % (INDIRECT_ACTIVE_GROUP_CN, ACTIVE_DN) + +INITIAL_DESC = "inital description" +FINAL_DESC = "final description" + +log = logging.getLogger(__name__) + + +def _header(topology_st, label): + topology_st.standalone.log.info("\n\n###############################################") + topology_st.standalone.log.info("#######") + topology_st.standalone.log.info("####### %s" % label) + topology_st.standalone.log.info("#######") + topology_st.standalone.log.info("###############################################") + + +def _add_user(topology_st, type='active'): + if type == 'active': + topology_st.standalone.add_s(Entry((ACTIVE_USER_DN, { + 'objectclass': "top person inetuser".split(), + 'sn': ACTIVE_USER_CN, + 'cn': ACTIVE_USER_CN, + 'description': INITIAL_DESC}))) + elif type == 'stage': + topology_st.standalone.add_s(Entry((STAGE_USER_DN, { + 'objectclass': "top person inetuser".split(), + 'sn': STAGE_USER_CN, + 'cn': STAGE_USER_CN}))) + else: + topology_st.standalone.add_s(Entry((OUT_USER_DN, { + 'objectclass': "top person inetuser".split(), + 'sn': OUT_USER_CN, + 'cn': OUT_USER_CN}))) + + +def test_ticket47920_init(topology_st): + topology_st.standalone.add_s(Entry((SCOPE_IN_DN, { + 'objectclass': "top nscontainer".split(), + 'cn': SCOPE_IN_DN}))) + topology_st.standalone.add_s(Entry((ACTIVE_DN, { + 'objectclass': "top nscontainer".split(), + 'cn': ACTIVE_CN}))) + + # add users + _add_user(topology_st, 'active') + + +def test_ticket47920_mod_readentry_ctrl(topology_st): + _header(topology_st, 'MOD: with a readentry control') + + topology_st.standalone.log.info("Check the initial value of the entry") + ent = topology_st.standalone.getEntry(ACTIVE_USER_DN, ldap.SCOPE_BASE, "(objectclass=*)", ['description']) + assert ent.hasAttr('description') + assert ensure_str(ent.getValue('description')) == INITIAL_DESC + + pr = PostReadControl(criticality=True, attrList=['cn', 'description']) + _, _, _, resp_ctrls = topology_st.standalone.modify_ext_s(ACTIVE_USER_DN, + [(ldap.MOD_REPLACE, 'description', [ensure_bytes(FINAL_DESC)])], + serverctrls=[pr]) + + assert resp_ctrls[0].dn == ACTIVE_USER_DN + assert 'description' in resp_ctrls[0].entry + assert 'cn' in resp_ctrls[0].entry + print(resp_ctrls[0].entry['description']) + + ent = topology_st.standalone.getEntry(ACTIVE_USER_DN, ldap.SCOPE_BASE, "(objectclass=*)", ['description']) + assert ent.hasAttr('description') + assert ensure_str(ent.getValue('description')) == FINAL_DESC + + +if __name__ == '__main__': + # Run isolated + # -s for DEBUG mode + CURRENT_FILE = os.path.realpath(__file__) + pytest.main("-s %s" % CURRENT_FILE) diff --git a/dirsrvtests/tests/tickets/ticket47921_test.py b/dirsrvtests/tests/tickets/ticket47921_test.py new file mode 100644 index 0000000..d52d0c5 --- /dev/null +++ b/dirsrvtests/tests/tickets/ticket47921_test.py @@ -0,0 +1,88 @@ +# --- BEGIN COPYRIGHT BLOCK --- +# Copyright (C) 2016 Red Hat, Inc. +# All rights reserved. +# +# License: GPL (version 3 or any later version). +# See LICENSE for details. +# --- END COPYRIGHT BLOCK --- +# +import pytest +from lib389.tasks import * +from lib389.utils import * +from lib389.topologies import topology_st + +pytestmark = pytest.mark.tier2 + +logging.getLogger(__name__).setLevel(logging.DEBUG) +log = logging.getLogger(__name__) + + +def test_ticket47921(topology_st): + ''' + Test that indirect cos reflects the current value of the indirect entry + ''' + + INDIRECT_COS_DN = 'cn=cos definition,' + DEFAULT_SUFFIX + MANAGER_DN = 'uid=my manager,ou=people,' + DEFAULT_SUFFIX + USER_DN = 'uid=user,ou=people,' + DEFAULT_SUFFIX + + # Add COS definition + topology_st.standalone.add_s(Entry((INDIRECT_COS_DN, + { + 'objectclass': 'top cosSuperDefinition cosIndirectDefinition ldapSubEntry'.split(), + 'cosIndirectSpecifier': 'manager', + 'cosAttribute': 'roomnumber' + }))) + + # Add manager entry + topology_st.standalone.add_s(Entry((MANAGER_DN, + {'objectclass': 'top extensibleObject'.split(), + 'uid': 'my manager', + 'roomnumber': '1' + }))) + + # Add user entry + topology_st.standalone.add_s(Entry((USER_DN, + {'objectclass': 'top person organizationalPerson inetorgperson'.split(), + 'sn': 'last', + 'cn': 'full', + 'givenname': 'mark', + 'uid': 'user', + 'manager': MANAGER_DN + }))) + + # Test COS is working + entry = topology_st.standalone.search_s(DEFAULT_SUFFIX, ldap.SCOPE_SUBTREE, + "uid=user", + ['roomnumber']) + if entry: + if ensure_str(entry[0].getValue('roomnumber')) != '1': + log.fatal('COS is not working.') + assert False + else: + log.fatal('Failed to find user entry') + assert False + + # Modify manager entry + topology_st.standalone.modify_s(MANAGER_DN, [(ldap.MOD_REPLACE, 'roomnumber', b'2')]) + + # Confirm COS is returning the new value + entry = topology_st.standalone.search_s(DEFAULT_SUFFIX, ldap.SCOPE_SUBTREE, + "uid=user", + ['roomnumber']) + if entry: + if ensure_str(entry[0].getValue('roomnumber')) != '2': + log.fatal('COS is not working after manager update.') + assert False + else: + log.fatal('Failed to find user entry') + assert False + + log.info('Test complete') + + +if __name__ == '__main__': + # Run isolated + # -s for DEBUG mode + CURRENT_FILE = os.path.realpath(__file__) + pytest.main("-s %s" % CURRENT_FILE) diff --git a/dirsrvtests/tests/tickets/ticket47927_test.py b/dirsrvtests/tests/tickets/ticket47927_test.py new file mode 100644 index 0000000..887fe1a --- /dev/null +++ b/dirsrvtests/tests/tickets/ticket47927_test.py @@ -0,0 +1,267 @@ +# --- BEGIN COPYRIGHT BLOCK --- +# Copyright (C) 2016 Red Hat, Inc. +# All rights reserved. +# +# License: GPL (version 3 or any later version). +# See LICENSE for details. +# --- END COPYRIGHT BLOCK --- +# +import pytest +from lib389.tasks import * +from lib389.utils import * +from lib389.topologies import topology_st + +from lib389._constants import SUFFIX, DEFAULT_SUFFIX, PLUGIN_ATTR_UNIQUENESS + +# Skip on older versions +pytestmark = [pytest.mark.tier2, + pytest.mark.skipif(ds_is_older('1.3.4'), reason="Not implemented")] + +logging.getLogger(__name__).setLevel(logging.DEBUG) +log = logging.getLogger(__name__) + +EXCLUDED_CONTAINER_CN = "excluded_container" +EXCLUDED_CONTAINER_DN = "cn=%s,%s" % (EXCLUDED_CONTAINER_CN, SUFFIX) + +EXCLUDED_BIS_CONTAINER_CN = "excluded_bis_container" +EXCLUDED_BIS_CONTAINER_DN = "cn=%s,%s" % (EXCLUDED_BIS_CONTAINER_CN, SUFFIX) + +ENFORCED_CONTAINER_CN = "enforced_container" +ENFORCED_CONTAINER_DN = "cn=%s,%s" % (ENFORCED_CONTAINER_CN, SUFFIX) + +USER_1_CN = "test_1" +USER_1_DN = "cn=%s,%s" % (USER_1_CN, ENFORCED_CONTAINER_DN) +USER_2_CN = "test_2" +USER_2_DN = "cn=%s,%s" % (USER_2_CN, ENFORCED_CONTAINER_DN) +USER_3_CN = "test_3" +USER_3_DN = "cn=%s,%s" % (USER_3_CN, EXCLUDED_CONTAINER_DN) +USER_4_CN = "test_4" +USER_4_DN = "cn=%s,%s" % (USER_4_CN, EXCLUDED_BIS_CONTAINER_DN) + + +def test_ticket47927_init(topology_st): + topology_st.standalone.plugins.enable(name=PLUGIN_ATTR_UNIQUENESS) + try: + topology_st.standalone.modify_s('cn=' + PLUGIN_ATTR_UNIQUENESS + ',cn=plugins,cn=config', + [(ldap.MOD_REPLACE, 'uniqueness-attribute-name', b'telephonenumber'), + (ldap.MOD_REPLACE, 'uniqueness-subtrees', ensure_bytes(DEFAULT_SUFFIX)), + ]) + except ldap.LDAPError as e: + log.fatal('test_ticket47927: Failed to configure plugin for "telephonenumber": error ' + e.args[0]['desc']) + assert False + topology_st.standalone.restart(timeout=120) + + topology_st.standalone.add_s(Entry((EXCLUDED_CONTAINER_DN, {'objectclass': "top nscontainer".split(), + 'cn': EXCLUDED_CONTAINER_CN}))) + topology_st.standalone.add_s(Entry((EXCLUDED_BIS_CONTAINER_DN, {'objectclass': "top nscontainer".split(), + 'cn': EXCLUDED_BIS_CONTAINER_CN}))) + topology_st.standalone.add_s(Entry((ENFORCED_CONTAINER_DN, {'objectclass': "top nscontainer".split(), + 'cn': ENFORCED_CONTAINER_CN}))) + + # adding an entry on a stage with a different 'cn' + topology_st.standalone.add_s(Entry((USER_1_DN, { + 'objectclass': "top person".split(), + 'sn': USER_1_CN, + 'cn': USER_1_CN}))) + # adding an entry on a stage with a different 'cn' + topology_st.standalone.add_s(Entry((USER_2_DN, { + 'objectclass': "top person".split(), + 'sn': USER_2_CN, + 'cn': USER_2_CN}))) + topology_st.standalone.add_s(Entry((USER_3_DN, { + 'objectclass': "top person".split(), + 'sn': USER_3_CN, + 'cn': USER_3_CN}))) + topology_st.standalone.add_s(Entry((USER_4_DN, { + 'objectclass': "top person".split(), + 'sn': USER_4_CN, + 'cn': USER_4_CN}))) + + +def test_ticket47927_one(topology_st): + ''' + Check that uniqueness is enforce on all SUFFIX + ''' + UNIQUE_VALUE = b'1234' + try: + topology_st.standalone.modify_s(USER_1_DN, + [(ldap.MOD_REPLACE, 'telephonenumber', UNIQUE_VALUE)]) + except ldap.LDAPError as e: + log.fatal('test_ticket47927_one: Failed to set the telephonenumber for %s: %s' % (USER_1_DN, e.args[0]['desc'])) + assert False + + # we expect to fail because user1 is in the scope of the plugin + try: + topology_st.standalone.modify_s(USER_2_DN, + [(ldap.MOD_REPLACE, 'telephonenumber', UNIQUE_VALUE)]) + log.fatal('test_ticket47927_one: unexpected success to set the telephonenumber for %s' % (USER_2_DN)) + assert False + except ldap.LDAPError as e: + log.fatal('test_ticket47927_one: Failed (expected) to set the telephonenumber for %s: %s' % ( + USER_2_DN, e.args[0]['desc'])) + pass + + # we expect to fail because user1 is in the scope of the plugin + try: + topology_st.standalone.modify_s(USER_3_DN, + [(ldap.MOD_REPLACE, 'telephonenumber', UNIQUE_VALUE)]) + log.fatal('test_ticket47927_one: unexpected success to set the telephonenumber for %s' % (USER_3_DN)) + assert False + except ldap.LDAPError as e: + log.fatal('test_ticket47927_one: Failed (expected) to set the telephonenumber for %s: %s' % ( + USER_3_DN, e.args[0]['desc'])) + pass + + +def test_ticket47927_two(topology_st): + ''' + Exclude the EXCLUDED_CONTAINER_DN from the uniqueness plugin + ''' + try: + topology_st.standalone.modify_s('cn=' + PLUGIN_ATTR_UNIQUENESS + ',cn=plugins,cn=config', + [(ldap.MOD_REPLACE, 'uniqueness-exclude-subtrees', ensure_bytes(EXCLUDED_CONTAINER_DN))]) + except ldap.LDAPError as e: + log.fatal('test_ticket47927_two: Failed to configure plugin for to exclude %s: error %s' % ( + EXCLUDED_CONTAINER_DN, e.args[0]['desc'])) + assert False + topology_st.standalone.restart(timeout=120) + + +def test_ticket47927_three(topology_st): + ''' + Check that uniqueness is enforced on full SUFFIX except EXCLUDED_CONTAINER_DN + First case: it exists an entry (with the same attribute value) in the scope + of the plugin and we set the value in an entry that is in an excluded scope + ''' + UNIQUE_VALUE = b'9876' + try: + topology_st.standalone.modify_s(USER_1_DN, + [(ldap.MOD_REPLACE, 'telephonenumber', UNIQUE_VALUE)]) + except ldap.LDAPError as e: + log.fatal('test_ticket47927_three: Failed to set the telephonenumber ' + e.args[0]['desc']) + assert False + + # we should not be allowed to set this value (because user1 is in the scope) + try: + topology_st.standalone.modify_s(USER_2_DN, + [(ldap.MOD_REPLACE, 'telephonenumber', UNIQUE_VALUE)]) + log.fatal('test_ticket47927_three: unexpected success to set the telephonenumber for %s' % (USER_2_DN)) + assert False + except ldap.LDAPError as e: + log.fatal('test_ticket47927_three: Failed (expected) to set the telephonenumber for %s: %s' % ( + USER_2_DN, e.args[0]['desc'])) + + # USER_3_DN is in EXCLUDED_CONTAINER_DN so update should be successful + try: + topology_st.standalone.modify_s(USER_3_DN, + [(ldap.MOD_REPLACE, 'telephonenumber', UNIQUE_VALUE)]) + log.fatal('test_ticket47927_three: success to set the telephonenumber for %s' % (USER_3_DN)) + except ldap.LDAPError as e: + log.fatal('test_ticket47927_three: Failed (unexpected) to set the telephonenumber for %s: %s' % ( + USER_3_DN, e.args[0]['desc'])) + assert False + + +def test_ticket47927_four(topology_st): + ''' + Check that uniqueness is enforced on full SUFFIX except EXCLUDED_CONTAINER_DN + Second case: it exists an entry (with the same attribute value) in an excluded scope + of the plugin and we set the value in an entry is in the scope + ''' + UNIQUE_VALUE = b'1111' + # USER_3_DN is in EXCLUDED_CONTAINER_DN so update should be successful + try: + topology_st.standalone.modify_s(USER_3_DN, + [(ldap.MOD_REPLACE, 'telephonenumber', UNIQUE_VALUE)]) + log.fatal('test_ticket47927_four: success to set the telephonenumber for %s' % USER_3_DN) + except ldap.LDAPError as e: + log.fatal('test_ticket47927_four: Failed (unexpected) to set the telephonenumber for %s: %s' % ( + USER_3_DN, e.args[0]['desc'])) + assert False + + # we should be allowed to set this value (because user3 is excluded from scope) + try: + topology_st.standalone.modify_s(USER_1_DN, + [(ldap.MOD_REPLACE, 'telephonenumber', UNIQUE_VALUE)]) + except ldap.LDAPError as e: + log.fatal( + 'test_ticket47927_four: Failed to set the telephonenumber for %s: %s' % (USER_1_DN, e.args[0]['desc'])) + assert False + + # we should not be allowed to set this value (because user1 is in the scope) + try: + topology_st.standalone.modify_s(USER_2_DN, + [(ldap.MOD_REPLACE, 'telephonenumber', UNIQUE_VALUE)]) + log.fatal('test_ticket47927_four: unexpected success to set the telephonenumber %s' % USER_2_DN) + assert False + except ldap.LDAPError as e: + log.fatal('test_ticket47927_four: Failed (expected) to set the telephonenumber for %s: %s' % ( + USER_2_DN, e.args[0]['desc'])) + pass + + +def test_ticket47927_five(topology_st): + ''' + Exclude the EXCLUDED_BIS_CONTAINER_DN from the uniqueness plugin + ''' + try: + topology_st.standalone.modify_s('cn=' + PLUGIN_ATTR_UNIQUENESS + ',cn=plugins,cn=config', + [(ldap.MOD_ADD, 'uniqueness-exclude-subtrees', ensure_bytes(EXCLUDED_BIS_CONTAINER_DN))]) + except ldap.LDAPError as e: + log.fatal('test_ticket47927_five: Failed to configure plugin for to exclude %s: error %s' % ( + EXCLUDED_BIS_CONTAINER_DN, e.args[0]['desc'])) + assert False + topology_st.standalone.restart(timeout=120) + topology_st.standalone.getEntry('cn=' + PLUGIN_ATTR_UNIQUENESS + ',cn=plugins,cn=config', ldap.SCOPE_BASE) + + +def test_ticket47927_six(topology_st): + ''' + Check that uniqueness is enforced on full SUFFIX except EXCLUDED_CONTAINER_DN + and EXCLUDED_BIS_CONTAINER_DN + First case: it exists an entry (with the same attribute value) in the scope + of the plugin and we set the value in an entry that is in an excluded scope + ''' + UNIQUE_VALUE = b'222' + try: + topology_st.standalone.modify_s(USER_1_DN, + [(ldap.MOD_REPLACE, 'telephonenumber', UNIQUE_VALUE)]) + except ldap.LDAPError as e: + log.fatal('test_ticket47927_six: Failed to set the telephonenumber ' + e.args[0]['desc']) + assert False + + # we should not be allowed to set this value (because user1 is in the scope) + try: + topology_st.standalone.modify_s(USER_2_DN, + [(ldap.MOD_REPLACE, 'telephonenumber', UNIQUE_VALUE)]) + log.fatal('test_ticket47927_six: unexpected success to set the telephonenumber for %s' % (USER_2_DN)) + assert False + except ldap.LDAPError as e: + log.fatal('test_ticket47927_six: Failed (expected) to set the telephonenumber for %s: %s' % ( + USER_2_DN, e.args[0]['desc'])) + + # USER_3_DN is in EXCLUDED_CONTAINER_DN so update should be successful + try: + topology_st.standalone.modify_s(USER_3_DN, + [(ldap.MOD_REPLACE, 'telephonenumber', UNIQUE_VALUE)]) + log.fatal('test_ticket47927_six: success to set the telephonenumber for %s' % (USER_3_DN)) + except ldap.LDAPError as e: + log.fatal('test_ticket47927_six: Failed (unexpected) to set the telephonenumber for %s: %s' % ( + USER_3_DN, e.args[0]['desc'])) + assert False + # USER_4_DN is in EXCLUDED_CONTAINER_DN so update should be successful + try: + topology_st.standalone.modify_s(USER_4_DN, + [(ldap.MOD_REPLACE, 'telephonenumber', UNIQUE_VALUE)]) + log.fatal('test_ticket47927_six: success to set the telephonenumber for %s' % (USER_4_DN)) + except ldap.LDAPError as e: + log.fatal('test_ticket47927_six: Failed (unexpected) to set the telephonenumber for %s: %s' % ( + USER_4_DN, e.args[0]['desc'])) + assert False + + +if __name__ == '__main__': + # Run isolated + # -s for DEBUG mode + CURRENT_FILE = os.path.realpath(__file__) + pytest.main("-s %s" % CURRENT_FILE) diff --git a/dirsrvtests/tests/tickets/ticket47931_test.py b/dirsrvtests/tests/tickets/ticket47931_test.py new file mode 100644 index 0000000..03d5c71 --- /dev/null +++ b/dirsrvtests/tests/tickets/ticket47931_test.py @@ -0,0 +1,179 @@ +# --- BEGIN COPYRIGHT BLOCK --- +# Copyright (C) 2022 Red Hat, Inc. +# All rights reserved. +# +# License: GPL (version 3 or any later version). +# See LICENSE for details. +# --- END COPYRIGHT BLOCK --- +# +import threading +import time +import pytest +from lib389.tasks import * +from lib389.utils import * +from lib389.topologies import topology_st + +from lib389._constants import DEFAULT_SUFFIX, PLUGIN_RETRO_CHANGELOG, PLUGIN_MEMBER_OF, BACKEND_NAME + +# Skip on older versions +pytestmark = [pytest.mark.tier2, + pytest.mark.skipif(ds_is_older('1.3.3'), reason="Not implemented")] + +logging.getLogger(__name__).setLevel(logging.DEBUG) +log = logging.getLogger(__name__) + +installation1_prefix = None +SECOND_SUFFIX = "dc=deadlock" +SECOND_BACKEND = "deadlock" +RETROCL_PLUGIN_DN = ('cn=' + PLUGIN_RETRO_CHANGELOG + ',cn=plugins,cn=config') +MEMBEROF_PLUGIN_DN = ('cn=' + PLUGIN_MEMBER_OF + ',cn=plugins,cn=config') +GROUP_DN = ("cn=group," + DEFAULT_SUFFIX) +MEMBER_DN_COMP = "uid=member" +TIME_OUT = 5 + + +class modifySecondBackendThread(threading.Thread): + def __init__(self, inst, timeout): + threading.Thread.__init__(self) + self.daemon = True + self.inst = inst + self.timeout = timeout + + def run(self): + conn = self.inst.clone() + conn.set_option(ldap.OPT_TIMEOUT, self.timeout) + log.info('Modify second suffix...') + for x in range(0, 5000): + try: + conn.modify_s(SECOND_SUFFIX, + [(ldap.MOD_REPLACE, + 'description', + b'new description')]) + except ldap.LDAPError as e: + log.fatal('Failed to modify second suffix - error: %s' % + (e.args[0]['desc'])) + assert False + + conn.close() + log.info('Finished modifying second suffix') + + +def test_ticket47931(topology_st): + """Test Retro Changelog and MemberOf deadlock fix. + Verification steps: + - Enable retro cl and memberOf. + - Create two backends: A & B. + - Configure retrocl scoping for backend A. + - Configure memberOf plugin for uniquemember + - Create group in backend A. + - In parallel, add members to the group on A, and make modifications + to entries in backend B. + - Make sure the server does not hang during the updates to both + backends. + + """ + + # Enable dynamic plugins to make plugin configuration easier + try: + topology_st.standalone.modify_s(DN_CONFIG, + [(ldap.MOD_REPLACE, + 'nsslapd-dynamic-plugins', + b'on')]) + except ldap.LDAPError as e: + log.error('Failed to enable dynamic plugins! ' + e.args[0]['desc']) + assert False + + # Enable the plugins + topology_st.standalone.plugins.enable(name=PLUGIN_MEMBER_OF) + topology_st.standalone.plugins.enable(name=PLUGIN_RETRO_CHANGELOG) + + # Create second backend + topology_st.standalone.backend.create(SECOND_SUFFIX, {BACKEND_NAME: SECOND_BACKEND}) + topology_st.standalone.mappingtree.create(SECOND_SUFFIX, bename=SECOND_BACKEND) + + # Create the root node of the second backend + try: + topology_st.standalone.add_s(Entry((SECOND_SUFFIX, + {'objectclass': 'top domain'.split(), + 'dc': 'deadlock'}))) + except ldap.LDAPError as e: + log.fatal('Failed to create suffix entry: error ' + e.args[0]['desc']) + assert False + + # Configure retrocl scope + try: + topology_st.standalone.modify_s(RETROCL_PLUGIN_DN, + [(ldap.MOD_REPLACE, + 'nsslapd-include-suffix', + ensure_bytes(DEFAULT_SUFFIX))]) + except ldap.LDAPError as e: + log.error('Failed to configure retrocl plugin: ' + e.args[0]['desc']) + assert False + + # Configure memberOf group attribute + try: + topology_st.standalone.modify_s(MEMBEROF_PLUGIN_DN, + [(ldap.MOD_REPLACE, + 'memberofgroupattr', + b'uniquemember')]) + except ldap.LDAPError as e: + log.fatal('Failed to configure memberOf plugin: error ' + e.args[0]['desc']) + assert False + time.sleep(1) + + # Create group + try: + topology_st.standalone.add_s(Entry((GROUP_DN, + {'objectclass': 'top extensibleObject'.split(), + 'cn': 'group'}))) + except ldap.LDAPError as e: + log.fatal('Failed to add grouo: error ' + e.args[0]['desc']) + assert False + + # Create 1500 entries (future members of the group) + for idx in range(1, 1500): + try: + USER_DN = ("uid=member%d,%s" % (idx, DEFAULT_SUFFIX)) + topology_st.standalone.add_s(Entry((USER_DN, + {'objectclass': 'top extensibleObject'.split(), + 'uid': 'member%d' % (idx)}))) + except ldap.LDAPError as e: + log.fatal('Failed to add user (%s): error %s' % (USER_DN, e.args[0]['desc'])) + assert False + + # Modify second backend (separate thread) + mod_backend_thrd = modifySecondBackendThread(topology_st.standalone, TIME_OUT) + mod_backend_thrd.start() + time.sleep(1) + + # Add members to the group - set timeout + log.info('Adding members to the group...') + topology_st.standalone.set_option(ldap.OPT_TIMEOUT, TIME_OUT) + for idx in range(1, 1500): + try: + MEMBER_VAL = ("uid=member%d,%s" % (idx, DEFAULT_SUFFIX)) + topology_st.standalone.modify_s(GROUP_DN, + [(ldap.MOD_ADD, + 'uniquemember', + ensure_bytes(MEMBER_VAL))]) + except ldap.TIMEOUT: + log.fatal('Deadlock! Bug verification failed.') + assert False + except ldap.LDAPError as e: + log.fatal('Failed to update group(not a deadlock) member (%s) - error: %s' % + (MEMBER_VAL, e.args[0]['desc'])) + assert False + log.info('Finished adding members to the group.') + + # Wait for the thread to finish + mod_backend_thrd.join() + + # No timeout, test passed! + log.info('Test complete\n') + + +if __name__ == '__main__': + # Run isolated + # -s for DEBUG mode + CURRENT_FILE = os.path.realpath(__file__) + pytest.main("-s %s" % CURRENT_FILE) diff --git a/dirsrvtests/tests/tickets/ticket47953_test.py b/dirsrvtests/tests/tickets/ticket47953_test.py new file mode 100644 index 0000000..c99a1ca --- /dev/null +++ b/dirsrvtests/tests/tickets/ticket47953_test.py @@ -0,0 +1,73 @@ +# --- BEGIN COPYRIGHT BLOCK --- +# Copyright (C) 2016 Red Hat, Inc. +# All rights reserved. +# +# License: GPL (version 3 or any later version). +# See LICENSE for details. +# --- END COPYRIGHT BLOCK --- +# +import logging +import shutil + +import pytest +from lib389.tasks import * +from lib389.utils import * +from lib389.topologies import topology_st + +from lib389._constants import DATA_DIR, DEFAULT_SUFFIX + +pytestmark = pytest.mark.tier2 + +log = logging.getLogger(__name__) + + +def test_ticket47953(topology_st): + """ + Test that we can delete an aci that has an invalid syntax. + Sart by importing an ldif with a "bad" aci, then simply try + to remove that value without error. + """ + + log.info('Testing Ticket 47953 - Test we can delete aci that has invalid syntax') + + # + # Import an invalid ldif + # + ldif_file = (topology_st.standalone.getDir(__file__, DATA_DIR) + + "ticket47953/ticket47953.ldif") + try: + ldif_dir = topology_st.standalone.get_ldif_dir() + shutil.copy(ldif_file, ldif_dir) + ldif_file = ldif_dir + '/ticket47953.ldif' + except: + log.fatal('Failed to copy ldif to instance ldif dir') + assert False + importTask = Tasks(topology_st.standalone) + args = {TASK_WAIT: True} + try: + importTask.importLDIF(DEFAULT_SUFFIX, None, ldif_file, args) + except ValueError: + assert False + + time.sleep(2) + + # + # Delete the invalid aci + # + acival = '(targetattr ="fffff")(version 3.0;acl "Directory Administrators Group"' + \ + ';allow (all) (groupdn = "ldap:///cn=Directory Administrators, dc=example,dc=com");)' + + log.info('Attempting to remove invalid aci...') + try: + topology_st.standalone.modify_s(DEFAULT_SUFFIX, [(ldap.MOD_DELETE, 'aci', ensure_bytes(acival))]) + log.info('Removed invalid aci.') + except ldap.LDAPError as e: + log.error('Failed to remove invalid aci: ' + e.args[0]['desc']) + assert False + + +if __name__ == '__main__': + # Run isolated + # -s for DEBUG mode + CURRENT_FILE = os.path.realpath(__file__) + pytest.main("-s %s" % CURRENT_FILE) diff --git a/dirsrvtests/tests/tickets/ticket47963_test.py b/dirsrvtests/tests/tickets/ticket47963_test.py new file mode 100644 index 0000000..8736511 --- /dev/null +++ b/dirsrvtests/tests/tickets/ticket47963_test.py @@ -0,0 +1,152 @@ +# --- BEGIN COPYRIGHT BLOCK --- +# Copyright (C) 2016 Red Hat, Inc. +# All rights reserved. +# +# License: GPL (version 3 or any later version). +# See LICENSE for details. +# --- END COPYRIGHT BLOCK --- +# +import logging + +import pytest +from lib389.tasks import * +from lib389.utils import * +from lib389.topologies import topology_st + +from lib389._constants import DEFAULT_SUFFIX, PLUGIN_MEMBER_OF + +pytestmark = pytest.mark.tier2 + +logging.getLogger(__name__).setLevel(logging.DEBUG) +log = logging.getLogger(__name__) + + +def test_ticket47963(topology_st): + ''' + Test that the memberOf plugin works correctly after setting: + + memberofskipnested: on + + ''' + PLUGIN_DN = 'cn=' + PLUGIN_MEMBER_OF + ',cn=plugins,cn=config' + USER_DN = 'uid=test_user,' + DEFAULT_SUFFIX + GROUP_DN1 = 'cn=group1,' + DEFAULT_SUFFIX + GROUP_DN2 = 'cn=group2,' + DEFAULT_SUFFIX + GROUP_DN3 = 'cn=group3,' + DEFAULT_SUFFIX + + # + # Enable the plugin and configure the skiop nest attribute, then restart the server + # + topology_st.standalone.plugins.enable(name=PLUGIN_MEMBER_OF) + try: + topology_st.standalone.modify_s(PLUGIN_DN, [(ldap.MOD_REPLACE, 'memberofskipnested', b'on')]) + except ldap.LDAPError as e: + log.error('test_automember: Failed to modify config entry: error ' + e.args[0]['desc']) + assert False + + topology_st.standalone.restart(timeout=10) + + # + # Add our groups, users, memberships, etc + # + try: + topology_st.standalone.add_s(Entry((USER_DN, { + 'objectclass': 'top extensibleObject'.split(), + 'uid': 'test_user' + }))) + except ldap.LDAPError as e: + log.error('Failed to add teset user: error ' + e.args[0]['desc']) + assert False + + try: + topology_st.standalone.add_s(Entry((GROUP_DN1, { + 'objectclass': 'top groupOfNames groupOfUniqueNames extensibleObject'.split(), + 'cn': 'group1', + 'member': USER_DN + }))) + except ldap.LDAPError as e: + log.error('Failed to add group1: error ' + e.args[0]['desc']) + assert False + + try: + topology_st.standalone.add_s(Entry((GROUP_DN2, { + 'objectclass': 'top groupOfNames groupOfUniqueNames extensibleObject'.split(), + 'cn': 'group2', + 'member': USER_DN + }))) + except ldap.LDAPError as e: + log.error('Failed to add group2: error ' + e.args[0]['desc']) + assert False + + # Add group with no member(yet) + try: + topology_st.standalone.add_s(Entry((GROUP_DN3, { + 'objectclass': 'top groupOfNames groupOfUniqueNames extensibleObject'.split(), + 'cn': 'group' + }))) + except ldap.LDAPError as e: + log.error('Failed to add group3: error ' + e.args[0]['desc']) + assert False + time.sleep(1) + + # + # Test we have the correct memberOf values in the user entry + # + try: + member_filter = ('(&(memberOf=' + GROUP_DN1 + ')(memberOf=' + GROUP_DN2 + '))') + entries = topology_st.standalone.search_s(USER_DN, ldap.SCOPE_BASE, member_filter) + if not entries: + log.fatal('User is missing expected memberOf attrs') + assert False + except ldap.LDAPError as e: + log.fatal('Search for user1 failed: ' + e.args[0]['desc']) + assert False + + # Add the user to the group + try: + topology_st.standalone.modify_s(GROUP_DN3, [(ldap.MOD_ADD, 'member', ensure_bytes(USER_DN))]) + except ldap.LDAPError as e: + log.error('Failed to member to group: error ' + e.args[0]['desc']) + assert False + time.sleep(1) + + # Check that the test user is a "memberOf" all three groups + try: + member_filter = ('(&(memberOf=' + GROUP_DN1 + ')(memberOf=' + GROUP_DN2 + + ')(memberOf=' + GROUP_DN3 + '))') + entries = topology_st.standalone.search_s(USER_DN, ldap.SCOPE_BASE, member_filter) + if not entries: + log.fatal('User is missing expected memberOf attrs') + assert False + except ldap.LDAPError as e: + log.fatal('Search for user1 failed: ' + e.args[0]['desc']) + assert False + + # + # Delete group2, and check memberOf values in the user entry + # + try: + topology_st.standalone.delete_s(GROUP_DN2) + except ldap.LDAPError as e: + log.error('Failed to delete test group2: ' + e.args[0]['desc']) + assert False + time.sleep(1) + + try: + member_filter = ('(&(memberOf=' + GROUP_DN1 + ')(memberOf=' + GROUP_DN3 + '))') + entries = topology_st.standalone.search_s(USER_DN, ldap.SCOPE_BASE, member_filter) + if not entries: + log.fatal('User incorrect memberOf attrs') + assert False + except ldap.LDAPError as e: + log.fatal('Search for user1 failed: ' + e.args[0]['desc']) + assert False + + log.info('Test complete') + + +if __name__ == '__main__': + # Run isolated + # -s for DEBUG mode + CURRENT_FILE = os.path.realpath(__file__) + pytest.main("-s %s" % CURRENT_FILE) diff --git a/dirsrvtests/tests/tickets/ticket47970_test.py b/dirsrvtests/tests/tickets/ticket47970_test.py new file mode 100644 index 0000000..f59405d --- /dev/null +++ b/dirsrvtests/tests/tickets/ticket47970_test.py @@ -0,0 +1,89 @@ +# --- BEGIN COPYRIGHT BLOCK --- +# Copyright (C) 2016 Red Hat, Inc. +# All rights reserved. +# +# License: GPL (version 3 or any later version). +# See LICENSE for details. +# --- END COPYRIGHT BLOCK --- +# +import logging + +import ldap.sasl +import pytest +from lib389.tasks import * +from lib389.topologies import topology_st + +pytestmark = pytest.mark.tier2 + +log = logging.getLogger(__name__) + +USER1_DN = "uid=user1,%s" % DEFAULT_SUFFIX +USER2_DN = "uid=user2,%s" % DEFAULT_SUFFIX + + +def test_ticket47970(topology_st): + """ + Testing that a failed SASL bind does not trigger account lockout - + which would attempt to update the passwordRetryCount on the root dse entry + """ + + log.info('Testing Ticket 47970 - Testing that a failed SASL bind does not trigger account lockout') + + # + # Enable account lockout + # + try: + topology_st.standalone.modify_s("cn=config", [(ldap.MOD_REPLACE, 'passwordLockout', b'on')]) + log.info('account lockout enabled.') + except ldap.LDAPError as e: + log.error('Failed to enable account lockout: ' + e.args[0]['desc']) + assert False + + try: + topology_st.standalone.modify_s("cn=config", [(ldap.MOD_REPLACE, 'passwordMaxFailure', b'5')]) + log.info('passwordMaxFailure set.') + except ldap.LDAPError as e: + log.error('Failed to to set passwordMaxFailure: ' + e.args[0]['desc']) + assert False + + # + # Perform SASL bind that should fail + # + failed_as_expected = False + try: + user_name = "mark" + pw = "secret" + auth_tokens = ldap.sasl.digest_md5(user_name, pw) + topology_st.standalone.sasl_interactive_bind_s("", auth_tokens) + except ldap.INVALID_CREDENTIALS as e: + log.info("SASL Bind failed as expected") + failed_as_expected = True + + if not failed_as_expected: + log.error("SASL bind unexpectedly succeeded!") + assert False + + # + # Check that passwordRetryCount was not set on the root dse entry + # + try: + entry = topology_st.standalone.search_s("", ldap.SCOPE_BASE, + "passwordRetryCount=*", + ['passwordRetryCount']) + except ldap.LDAPError as e: + log.error('Failed to search Root DSE entry: ' + e.args[0]['desc']) + assert False + + if entry: + log.error('Root DSE was incorrectly updated') + assert False + + # We passed + log.info('Root DSE was correctly not updated') + + +if __name__ == '__main__': + # Run isolated + # -s for DEBUG mode + CURRENT_FILE = os.path.realpath(__file__) + pytest.main("-s %s" % CURRENT_FILE) diff --git a/dirsrvtests/tests/tickets/ticket47976_test.py b/dirsrvtests/tests/tickets/ticket47976_test.py new file mode 100644 index 0000000..efdaeb1 --- /dev/null +++ b/dirsrvtests/tests/tickets/ticket47976_test.py @@ -0,0 +1,168 @@ +# --- BEGIN COPYRIGHT BLOCK --- +# Copyright (C) 2022 Red Hat, Inc. +# All rights reserved. +# +# License: GPL (version 3 or any later version). +# See LICENSE for details. +# --- END COPYRIGHT BLOCK --- +# +import pytest +from lib389.tasks import * +from lib389.utils import * +from lib389.topologies import topology_st + +from lib389._constants import SUFFIX, DEFAULT_SUFFIX, PLUGIN_MANAGED_ENTRY, DN_LDBM + +pytestmark = pytest.mark.tier2 + +PEOPLE_OU = 'people' +PEOPLE_DN = "ou=%s,%s" % (PEOPLE_OU, SUFFIX) +GROUPS_OU = 'groups' +GROUPS_DN = "ou=%s,%s" % (GROUPS_OU, SUFFIX) +DEFINITIONS_CN = 'definitions' +DEFINITIONS_DN = "cn=%s,%s" % (DEFINITIONS_CN, SUFFIX) +TEMPLATES_CN = 'templates' +TEMPLATES_DN = "cn=%s,%s" % (TEMPLATES_CN, SUFFIX) +MANAGED_GROUP_TEMPLATES_CN = 'managed group templates' +MANAGED_GROUP_TEMPLATES_DN = 'cn=%s,%s' % (MANAGED_GROUP_TEMPLATES_CN, TEMPLATES_DN) +MANAGED_GROUP_MEP_TMPL_CN = 'UPG' +MANAGED_GROUP_MEP_TMPL_DN = 'cn=%s,%s' % (MANAGED_GROUP_MEP_TMPL_CN, MANAGED_GROUP_TEMPLATES_DN) +MANAGED_GROUP_DEF_CN = 'managed group definition' +MANAGED_GROUP_DEF_DN = 'cn=%s,%s' % (MANAGED_GROUP_DEF_CN, DEFINITIONS_DN) + +MAX_ACCOUNTS = 2 + +logging.getLogger(__name__).setLevel(logging.DEBUG) +log = logging.getLogger(__name__) + + +def test_ticket47976_init(topology_st): + """Create mep definitions and templates""" + + try: + topology_st.standalone.add_s(Entry((PEOPLE_DN, { + 'objectclass': "top extensibleObject".split(), + 'ou': 'people'}))) + except ldap.ALREADY_EXISTS: + pass + try: + topology_st.standalone.add_s(Entry((GROUPS_DN, { + 'objectclass': "top extensibleObject".split(), + 'ou': GROUPS_OU}))) + except ldap.ALREADY_EXISTS: + pass + topology_st.standalone.add_s(Entry((DEFINITIONS_DN, { + 'objectclass': "top nsContainer".split(), + 'cn': DEFINITIONS_CN}))) + topology_st.standalone.add_s(Entry((TEMPLATES_DN, { + 'objectclass': "top nsContainer".split(), + 'cn': TEMPLATES_CN}))) + topology_st.standalone.add_s(Entry((MANAGED_GROUP_DEF_DN, { + 'objectclass': "top extensibleObject".split(), + 'cn': MANAGED_GROUP_DEF_CN, + 'originScope': PEOPLE_DN, + 'originFilter': '(objectclass=posixAccount)', + 'managedBase': GROUPS_DN, + 'managedTemplate': MANAGED_GROUP_MEP_TMPL_DN}))) + + topology_st.standalone.add_s(Entry((MANAGED_GROUP_TEMPLATES_DN, { + 'objectclass': "top nsContainer".split(), + 'cn': MANAGED_GROUP_TEMPLATES_CN}))) + + topology_st.standalone.add_s(Entry((MANAGED_GROUP_MEP_TMPL_DN, { + 'objectclass': "top mepTemplateEntry".split(), + 'cn': MANAGED_GROUP_MEP_TMPL_CN, + 'mepRDNAttr': 'cn', + 'mepStaticAttr': ['objectclass: posixGroup', + 'objectclass: extensibleObject'], + 'mepMappedAttr': ['cn: $cn|uid: $cn', + 'gidNumber: $uidNumber']}))) + + topology_st.standalone.plugins.enable(name=PLUGIN_MANAGED_ENTRY) + topology_st.standalone.restart(timeout=10) + + +def test_ticket47976_1(topology_st): + mod = [(ldap.MOD_REPLACE, 'nsslapd-pluginConfigArea', ensure_bytes(DEFINITIONS_DN))] + topology_st.standalone.modify_s('cn=%s,cn=plugins,cn=config' % PLUGIN_MANAGED_ENTRY, mod) + topology_st.standalone.stop(timeout=10) + topology_st.standalone.start(timeout=10) + for cpt in range(MAX_ACCOUNTS): + name = "user%d" % (cpt) + topology_st.standalone.add_s(Entry(("uid=%s,%s" % (name, PEOPLE_DN), { + 'objectclass': 'top posixAccount extensibleObject'.split(), + 'uid': name, + 'cn': name, + 'uidNumber': '1', + 'gidNumber': '1', + 'homeDirectory': '/home/%s' % name + }))) + + +def test_ticket47976_2(topology_st): + """It reimports the database with a very large page size + so all the entries (user and its private group). + """ + + log.info('Test complete') + mod = [(ldap.MOD_REPLACE, 'nsslapd-db-page-size', ensure_bytes(str(128 * 1024)))] + topology_st.standalone.modify_s(DN_LDBM, mod) + + # Get the the full path and name for our LDIF we will be exporting + log.info('Export LDIF file...') + ldif_dir = topology_st.standalone.get_ldif_dir() + ldif_file = ldif_dir + "/export.ldif" + args = {EXPORT_REPL_INFO: False, + TASK_WAIT: True} + exportTask = Tasks(topology_st.standalone) + try: + exportTask.exportLDIF(DEFAULT_SUFFIX, None, ldif_file, args) + except ValueError: + assert False + # import the new ldif file + log.info('Import LDIF file...') + importTask = Tasks(topology_st.standalone) + args = {TASK_WAIT: True} + try: + importTask.importLDIF(DEFAULT_SUFFIX, None, ldif_file, args) + os.remove(ldif_file) + except ValueError: + os.remove(ldif_file) + assert False + + +def test_ticket47976_3(topology_st): + """A single delete of a user should hit 47976, because mep post op will + delete its related group. + """ + + log.info('Testing if the delete will hang or not') + # log.info("\n\nAttach\n\n debugger") + # time.sleep(60) + topology_st.standalone.set_option(ldap.OPT_TIMEOUT, 5) + try: + for cpt in range(MAX_ACCOUNTS): + name = "user%d" % (cpt) + topology_st.standalone.delete_s("uid=%s,%s" % (name, PEOPLE_DN)) + except ldap.TIMEOUT as e: + log.fatal('Timeout... likely it hangs (47976)') + assert False + + # check the entry has been deleted + for cpt in range(MAX_ACCOUNTS): + try: + name = "user%d" % (cpt) + topology_st.standalone.getEntry("uid=%s,%s" % (name, PEOPLE_DN), ldap.SCOPE_BASE, 'objectclass=*') + assert False + except ldap.NO_SUCH_OBJECT: + log.info('%s was correctly deleted' % name) + pass + + assert cpt == (MAX_ACCOUNTS - 1) + + +if __name__ == '__main__': + # Run isolated + # -s for DEBUG mode + CURRENT_FILE = os.path.realpath(__file__) + pytest.main("-s %s" % CURRENT_FILE) diff --git a/dirsrvtests/tests/tickets/ticket47980_test.py b/dirsrvtests/tests/tickets/ticket47980_test.py new file mode 100644 index 0000000..61cda2f --- /dev/null +++ b/dirsrvtests/tests/tickets/ticket47980_test.py @@ -0,0 +1,595 @@ +# --- BEGIN COPYRIGHT BLOCK --- +# Copyright (C) 2016 Red Hat, Inc. +# All rights reserved. +# +# License: GPL (version 3 or any later version). +# See LICENSE for details. +# --- END COPYRIGHT BLOCK --- +# +import logging + +import ldap.sasl +import pytest +from lib389.tasks import * +from lib389.topologies import topology_st + +from lib389._constants import DN_CONFIG, DEFAULT_SUFFIX + +pytestmark = pytest.mark.tier2 + +log = logging.getLogger(__name__) + +BRANCH1 = 'ou=level1,' + DEFAULT_SUFFIX +BRANCH2 = 'ou=level2,ou=level1,' + DEFAULT_SUFFIX +BRANCH3 = 'ou=level3,ou=level2,ou=level1,' + DEFAULT_SUFFIX +BRANCH4 = 'ou=people,' + DEFAULT_SUFFIX +BRANCH5 = 'ou=lower,ou=people,' + DEFAULT_SUFFIX +BRANCH6 = 'ou=lower,ou=lower,ou=people,' + DEFAULT_SUFFIX +USER1_DN = 'uid=user1,%s' % (BRANCH1) +USER2_DN = 'uid=user2,%s' % (BRANCH2) +USER3_DN = 'uid=user3,%s' % (BRANCH3) +USER4_DN = 'uid=user4,%s' % (BRANCH4) +USER5_DN = 'uid=user5,%s' % (BRANCH5) +USER6_DN = 'uid=user6,%s' % (BRANCH6) + +BRANCH1_CONTAINER = 'cn=nsPwPolicyContainer,ou=level1,dc=example,dc=com' +BRANCH1_PWP = 'cn=cn\3DnsPwPolicyEntry\2Cou\3Dlevel1\2Cdc\3Dexample\2Cdc\3Dcom,' + \ + 'cn=nsPwPolicyContainer,ou=level1,dc=example,dc=com' +BRANCH1_COS_TMPL = 'cn=cn\3DnsPwTemplateEntry\2Cou\3Dlevel1\2Cdc\3Dexample\2Cdc\3Dcom,' + \ + 'cn=nsPwPolicyContainer,ou=level1,dc=example,dc=com' +BRANCH1_COS_DEF = 'cn=nsPwPolicy_CoS,ou=level1,dc=example,dc=com' + +BRANCH2_CONTAINER = 'cn=nsPwPolicyContainer,ou=level2,ou=level1,dc=example,dc=com' +BRANCH2_PWP = 'cn=cn\3DnsPwPolicyEntry\2Cou\3Dlevel2\2Cou\3Dlevel1\2Cdc\3Dexample\2Cdc\3Dcom,' + \ + 'cn=nsPwPolicyContainer,ou=level2,ou=level1,dc=example,dc=com' +BRANCH2_COS_TMPL = 'cn=cn\3DnsPwTemplateEntry\2Cou\3Dlevel2\2Cou\3Dlevel1\2Cdc\3Dexample\2Cdc\3Dcom,' + \ + 'cn=nsPwPolicyContainer,ou=level2,ou=level1,dc=example,dc=com' +BRANCH2_COS_DEF = 'cn=nsPwPolicy_CoS,ou=level2,ou=level1,dc=example,dc=com' + +BRANCH3_CONTAINER = 'cn=nsPwPolicyContainer,ou=level3,ou=level2,ou=level1,dc=example,dc=com' +BRANCH3_PWP = 'cn=cn\3DnsPwPolicyEntry\2Cou\3Dlevel3\2Cou\3Dlevel2\2Cou\3Dlevel1\2Cdc\3Dexample\2Cdc\3Dcom,' + \ + 'cn=nsPwPolicyContainer,ou=level3,ou=level2,ou=level1,dc=example,dc=com' +BRANCH3_COS_TMPL = 'cn=cn\3DnsPwTemplateEntry\2Cou\3Dlevel3\2Cou\3Dlevel2\2Cou\3Dlevel1\2Cdc\3Dexample\2Cdc\3Dcom,' + \ + 'cn=nsPwPolicyContainer,ou=level3,ou=level2,ou=level1,dc=example,dc=com' +BRANCH3_COS_DEF = 'cn=nsPwPolicy_CoS,ou=level3,ou=level2,ou=level1,dc=example,dc=com' + +BRANCH4_CONTAINER = 'cn=nsPwPolicyContainer,ou=people,dc=example,dc=com' +BRANCH4_PWP = 'cn=cn\3DnsPwPolicyEntry\2Cou\3DPeople\2Cdc\3Dexample\2Cdc\3Dcom,' + \ + 'cn=nsPwPolicyContainer,ou=People,dc=example,dc=com' +BRANCH4_COS_TMPL = 'cn=cn\3DnsPwTemplateEntry\2Cou\3DPeople\2Cdc\3Dexample\2Cdc\3Dcom,' + \ + 'cn=nsPwPolicyContainer,ou=People,dc=example,dc=com' +BRANCH4_COS_DEF = 'cn=nsPwPolicy_CoS,ou=people,dc=example,dc=com' + +BRANCH5_CONTAINER = 'cn=nsPwPolicyContainer,ou=lower,ou=people,dc=example,dc=com' +BRANCH5_PWP = 'cn=cn\3DnsPwPolicyEntry\2Cou\3Dlower\2Cou\3DPeople\2Cdc\3Dexample\2Cdc\3Dcom,' + \ + 'cn=nsPwPolicyContainer,ou=lower,ou=People,dc=example,dc=com' +BRANCH5_COS_TMPL = 'cn=cn\3DnsPwTemplateEntry\2Cou\3Dlower\2Cou\3DPeople\2Cdc\3Dexample\2Cdc\3Dcom,' + \ + 'cn=nsPwPolicyContainer,ou=lower,ou=People,dc=example,dc=com' +BRANCH5_COS_DEF = 'cn=nsPwPolicy_CoS,ou=lower,ou=People,dc=example,dc=com' + +BRANCH6_CONTAINER = 'cn=nsPwPolicyContainer,ou=lower,ou=lower,ou=People,dc=example,dc=com' +BRANCH6_PWP = 'cn=cn\3DnsPwPolicyEntry\2Cou\3Dlower\2Cou\3Dlower\2Cou\3DPeople\2Cdc\3Dexample\2Cdc\3Dcom,' + \ + 'cn=nsPwPolicyContainer,ou=lower,ou=lower,ou=People,dc=example,dc=com' +BRANCH6_COS_TMPL = 'cn=cn\3DnsPwTemplateEntry\2Cou\3Dlower\2Cou\3Dlower\2Cou\3DPeople\2Cdc\3Dexample\2Cdc\3Dcom,' + \ + 'cn=nsPwPolicyContainer,ou=lower,ou=lower,ou=People,dc=example,dc=com' +BRANCH6_COS_DEF = 'cn=nsPwPolicy_CoS,ou=lower,ou=lower,ou=People,dc=example,dc=com' + + +def test_ticket47980(topology_st): + """ + Multiple COS pointer definitions that use the same attribute are not correctly ordered. + The cos plugin was incorrectly sorting the attribute indexes based on subtree, which lead + to the wrong cos attribute value being applied to the entry. + """ + + log.info('Testing Ticket 47980 - Testing multiple nested COS pointer definitions are processed correctly') + + # Add our nested branches + try: + topology_st.standalone.add_s(Entry((BRANCH1, { + 'objectclass': 'top extensibleObject'.split(), + 'ou': 'level1' + }))) + except ldap.LDAPError as e: + log.error('Failed to add level1: error ' + e.args[0]['desc']) + assert False + + try: + topology_st.standalone.add_s(Entry((BRANCH2, { + 'objectclass': 'top extensibleObject'.split(), + 'ou': 'level2' + }))) + except ldap.LDAPError as e: + log.error('Failed to add level2: error ' + e.args[0]['desc']) + assert False + + try: + topology_st.standalone.add_s(Entry((BRANCH3, { + 'objectclass': 'top extensibleObject'.split(), + 'uid': 'level3' + }))) + except ldap.LDAPError as e: + log.error('Failed to add level3: error ' + e.args[0]['desc']) + assert False + + # People branch, might already exist + try: + topology_st.standalone.add_s(Entry((BRANCH4, { + 'objectclass': 'top extensibleObject'.split(), + 'ou': 'level4' + }))) + except ldap.ALREADY_EXISTS: + pass + except ldap.LDAPError as e: + log.error('Failed to add level4: error ' + e.args[0]['desc']) + assert False + + try: + topology_st.standalone.add_s(Entry((BRANCH5, { + 'objectclass': 'top extensibleObject'.split(), + 'ou': 'level5' + }))) + except ldap.LDAPError as e: + log.error('Failed to add level5: error ' + e.args[0]['desc']) + assert False + + try: + topology_st.standalone.add_s(Entry((BRANCH6, { + 'objectclass': 'top extensibleObject'.split(), + 'uid': 'level6' + }))) + except ldap.LDAPError as e: + log.error('Failed to add level6: error ' + e.args[0]['desc']) + assert False + + # Add users to each branch + try: + topology_st.standalone.add_s(Entry((USER1_DN, { + 'objectclass': 'top extensibleObject'.split(), + 'uid': 'user1' + }))) + except ldap.LDAPError as e: + log.error('Failed to add user1: error ' + e.args[0]['desc']) + assert False + + try: + topology_st.standalone.add_s(Entry((USER2_DN, { + 'objectclass': 'top extensibleObject'.split(), + 'uid': 'user2' + }))) + except ldap.LDAPError as e: + log.error('Failed to add user2: error ' + e.args[0]['desc']) + assert False + + try: + topology_st.standalone.add_s(Entry((USER3_DN, { + 'objectclass': 'top extensibleObject'.split(), + 'uid': 'user3' + }))) + except ldap.LDAPError as e: + log.error('Failed to add user3: error ' + e.args[0]['desc']) + assert False + + try: + topology_st.standalone.add_s(Entry((USER4_DN, { + 'objectclass': 'top extensibleObject'.split(), + 'uid': 'user4' + }))) + except ldap.LDAPError as e: + log.error('Failed to add user4: error ' + e.args[0]['desc']) + assert False + + try: + topology_st.standalone.add_s(Entry((USER5_DN, { + 'objectclass': 'top extensibleObject'.split(), + 'uid': 'user5' + }))) + except ldap.LDAPError as e: + log.error('Failed to add user5: error ' + e.args[0]['desc']) + assert False + + try: + topology_st.standalone.add_s(Entry((USER6_DN, { + 'objectclass': 'top extensibleObject'.split(), + 'uid': 'user6' + }))) + except ldap.LDAPError as e: + log.error('Failed to add user6: error ' + e.args[0]['desc']) + assert False + + # Enable password policy + try: + topology_st.standalone.modify_s(DN_CONFIG, [(ldap.MOD_REPLACE, 'nsslapd-pwpolicy-local', b'on')]) + except ldap.LDAPError as e: + log.error('Failed to set pwpolicy-local: error ' + e.args[0]['desc']) + assert False + + # + # Add subtree policy to branch 1 + # + # Add the container + try: + topology_st.standalone.add_s(Entry((BRANCH1_CONTAINER, { + 'objectclass': 'top nsContainer'.split(), + 'cn': 'nsPwPolicyContainer' + }))) + except ldap.LDAPError as e: + log.error('Failed to add subtree container for level1: error ' + e.args[0]['desc']) + assert False + + # Add the password policy subentry + try: + topology_st.standalone.add_s(Entry((BRANCH1_PWP, { + 'objectclass': 'top ldapsubentry passwordpolicy'.split(), + 'cn': 'cn=nsPwPolicyEntry,ou=level1,dc=example,dc=com', + 'passwordMustChange': 'off', + 'passwordExp': 'off', + 'passwordHistory': 'off', + 'passwordMinAge': '0', + 'passwordChange': 'off', + 'passwordStorageScheme': 'ssha' + }))) + except ldap.LDAPError as e: + log.error('Failed to add passwordpolicy for level1: error ' + e.args[0]['desc']) + assert False + + # Add the COS template + try: + topology_st.standalone.add_s(Entry((BRANCH1_COS_TMPL, { + 'objectclass': 'top ldapsubentry costemplate extensibleObject'.split(), + 'cn': 'cn=nsPwPolicyEntry,ou=level1,dc=example,dc=com', + 'cosPriority': '1', + 'cn': 'cn=nsPwTemplateEntry,ou=level1,dc=example,dc=com', + 'pwdpolicysubentry': BRANCH1_PWP + }))) + except ldap.LDAPError as e: + log.error('Failed to add COS template for level1: error ' + e.args[0]['desc']) + assert False + + # Add the COS definition + try: + topology_st.standalone.add_s(Entry((BRANCH1_COS_DEF, { + 'objectclass': 'top ldapsubentry cosSuperDefinition cosPointerDefinition'.split(), + 'cn': 'cn=nsPwPolicyEntry,ou=level1,dc=example,dc=com', + 'costemplatedn': BRANCH1_COS_TMPL, + 'cosAttribute': 'pwdpolicysubentry default operational-default' + }))) + except ldap.LDAPError as e: + log.error('Failed to add COS def for level1: error ' + e.args[0]['desc']) + assert False + + # + # Add subtree policy to branch 2 + # + # Add the container + try: + topology_st.standalone.add_s(Entry((BRANCH2_CONTAINER, { + 'objectclass': 'top nsContainer'.split(), + 'cn': 'nsPwPolicyContainer' + }))) + except ldap.LDAPError as e: + log.error('Failed to add subtree container for level2: error ' + e.args[0]['desc']) + assert False + + # Add the password policy subentry + try: + topology_st.standalone.add_s(Entry((BRANCH2_PWP, { + 'objectclass': 'top ldapsubentry passwordpolicy'.split(), + 'cn': 'cn=nsPwPolicyEntry,ou=level2,dc=example,dc=com', + 'passwordMustChange': 'off', + 'passwordExp': 'off', + 'passwordHistory': 'off', + 'passwordMinAge': '0', + 'passwordChange': 'off', + 'passwordStorageScheme': 'ssha' + }))) + except ldap.LDAPError as e: + log.error('Failed to add passwordpolicy for level2: error ' + e.args[0]['desc']) + assert False + + # Add the COS template + try: + topology_st.standalone.add_s(Entry((BRANCH2_COS_TMPL, { + 'objectclass': 'top ldapsubentry costemplate extensibleObject'.split(), + 'cn': 'cn=nsPwPolicyEntry,ou=level2,dc=example,dc=com', + 'cosPriority': '1', + 'cn': 'cn=nsPwTemplateEntry,ou=level2,dc=example,dc=com', + 'pwdpolicysubentry': BRANCH2_PWP + }))) + except ldap.LDAPError as e: + log.error('Failed to add COS template for level2: error ' + e.args[0]['desc']) + assert False + + # Add the COS definition + try: + topology_st.standalone.add_s(Entry((BRANCH2_COS_DEF, { + 'objectclass': 'top ldapsubentry cosSuperDefinition cosPointerDefinition'.split(), + 'cn': 'cn=nsPwPolicyEntry,ou=level2,dc=example,dc=com', + 'costemplatedn': BRANCH2_COS_TMPL, + 'cosAttribute': 'pwdpolicysubentry default operational-default' + }))) + except ldap.LDAPError as e: + log.error('Failed to add COS def for level2: error ' + e.args[0]['desc']) + assert False + + # + # Add subtree policy to branch 3 + # + # Add the container + try: + topology_st.standalone.add_s(Entry((BRANCH3_CONTAINER, { + 'objectclass': 'top nsContainer'.split(), + 'cn': 'nsPwPolicyContainer' + }))) + except ldap.LDAPError as e: + log.error('Failed to add subtree container for level3: error ' + e.args[0]['desc']) + assert False + + # Add the password policy subentry + try: + topology_st.standalone.add_s(Entry((BRANCH3_PWP, { + 'objectclass': 'top ldapsubentry passwordpolicy'.split(), + 'cn': 'cn=nsPwPolicyEntry,ou=level3,dc=example,dc=com', + 'passwordMustChange': 'off', + 'passwordExp': 'off', + 'passwordHistory': 'off', + 'passwordMinAge': '0', + 'passwordChange': 'off', + 'passwordStorageScheme': 'ssha' + }))) + except ldap.LDAPError as e: + log.error('Failed to add passwordpolicy for level3: error ' + e.args[0]['desc']) + assert False + + # Add the COS template + try: + topology_st.standalone.add_s(Entry((BRANCH3_COS_TMPL, { + 'objectclass': 'top ldapsubentry costemplate extensibleObject'.split(), + 'cn': 'cn=nsPwPolicyEntry,ou=level3,dc=example,dc=com', + 'cosPriority': '1', + 'cn': 'cn=nsPwTemplateEntry,ou=level3,dc=example,dc=com', + 'pwdpolicysubentry': BRANCH3_PWP + }))) + except ldap.LDAPError as e: + log.error('Failed to add COS template for level3: error ' + e.args[0]['desc']) + assert False + + # Add the COS definition + try: + topology_st.standalone.add_s(Entry((BRANCH3_COS_DEF, { + 'objectclass': 'top ldapsubentry cosSuperDefinition cosPointerDefinition'.split(), + 'cn': 'cn=nsPwPolicyEntry,ou=level3,dc=example,dc=com', + 'costemplatedn': BRANCH3_COS_TMPL, + 'cosAttribute': 'pwdpolicysubentry default operational-default' + }))) + except ldap.LDAPError as e: + log.error('Failed to add COS def for level3: error ' + e.args[0]['desc']) + assert False + + # + # Add subtree policy to branch 4 + # + # Add the container + try: + topology_st.standalone.add_s(Entry((BRANCH4_CONTAINER, { + 'objectclass': 'top nsContainer'.split(), + 'cn': 'nsPwPolicyContainer' + }))) + except ldap.LDAPError as e: + log.error('Failed to add subtree container for level3: error ' + e.args[0]['desc']) + assert False + + # Add the password policy subentry + try: + topology_st.standalone.add_s(Entry((BRANCH4_PWP, { + 'objectclass': 'top ldapsubentry passwordpolicy'.split(), + 'cn': 'cn=nsPwPolicyEntry,ou=people,dc=example,dc=com', + 'passwordMustChange': 'off', + 'passwordExp': 'off', + 'passwordHistory': 'off', + 'passwordMinAge': '0', + 'passwordChange': 'off', + 'passwordStorageScheme': 'ssha' + }))) + except ldap.LDAPError as e: + log.error('Failed to add passwordpolicy for branch4: error ' + e.args[0]['desc']) + assert False + + # Add the COS template + try: + topology_st.standalone.add_s(Entry((BRANCH4_COS_TMPL, { + 'objectclass': 'top ldapsubentry costemplate extensibleObject'.split(), + 'cn': 'cn=nsPwPolicyEntry,ou=people,dc=example,dc=com', + 'cosPriority': '1', + 'cn': 'cn=nsPwTemplateEntry,ou=people,dc=example,dc=com', + 'pwdpolicysubentry': BRANCH4_PWP + }))) + except ldap.LDAPError as e: + log.error('Failed to add COS template for level3: error ' + e.args[0]['desc']) + assert False + + # Add the COS definition + try: + topology_st.standalone.add_s(Entry((BRANCH4_COS_DEF, { + 'objectclass': 'top ldapsubentry cosSuperDefinition cosPointerDefinition'.split(), + 'cn': 'cn=nsPwPolicyEntry,ou=people,dc=example,dc=com', + 'costemplatedn': BRANCH4_COS_TMPL, + 'cosAttribute': 'pwdpolicysubentry default operational-default' + }))) + except ldap.LDAPError as e: + log.error('Failed to add COS def for branch4: error ' + e.args[0]['desc']) + assert False + + # + # Add subtree policy to branch 5 + # + # Add the container + try: + topology_st.standalone.add_s(Entry((BRANCH5_CONTAINER, { + 'objectclass': 'top nsContainer'.split(), + 'cn': 'nsPwPolicyContainer' + }))) + except ldap.LDAPError as e: + log.error('Failed to add subtree container for branch5: error ' + e.args[0]['desc']) + assert False + + # Add the password policy subentry + try: + topology_st.standalone.add_s(Entry((BRANCH5_PWP, { + 'objectclass': 'top ldapsubentry passwordpolicy'.split(), + 'cn': 'cn=nsPwPolicyEntry,ou=lower,ou=people,dc=example,dc=com', + 'passwordMustChange': 'off', + 'passwordExp': 'off', + 'passwordHistory': 'off', + 'passwordMinAge': '0', + 'passwordChange': 'off', + 'passwordStorageScheme': 'ssha' + }))) + except ldap.LDAPError as e: + log.error('Failed to add passwordpolicy for branch5: error ' + e.args[0]['desc']) + assert False + + # Add the COS template + try: + topology_st.standalone.add_s(Entry((BRANCH5_COS_TMPL, { + 'objectclass': 'top ldapsubentry costemplate extensibleObject'.split(), + 'cn': 'cn=nsPwPolicyEntry,ou=lower,ou=people,dc=example,dc=com', + 'cosPriority': '1', + 'cn': 'cn=nsPwTemplateEntry,ou=lower,ou=people,dc=example,dc=com', + 'pwdpolicysubentry': BRANCH5_PWP + }))) + except ldap.LDAPError as e: + log.error('Failed to add COS template for branch5: error ' + e.args[0]['desc']) + assert False + + # Add the COS definition + try: + topology_st.standalone.add_s(Entry((BRANCH5_COS_DEF, { + 'objectclass': 'top ldapsubentry cosSuperDefinition cosPointerDefinition'.split(), + 'cn': 'cn=nsPwPolicyEntry,ou=lower,ou=people,dc=example,dc=com', + 'costemplatedn': BRANCH5_COS_TMPL, + 'cosAttribute': 'pwdpolicysubentry default operational-default' + }))) + except ldap.LDAPError as e: + log.error('Failed to add COS def for level3: error ' + e.args[0]['desc']) + assert False + + # + # Add subtree policy to branch 6 + # + # Add the container + try: + topology_st.standalone.add_s(Entry((BRANCH6_CONTAINER, { + 'objectclass': 'top nsContainer'.split(), + 'cn': 'nsPwPolicyContainer' + }))) + except ldap.LDAPError as e: + log.error('Failed to add subtree container for branch6: error ' + e.args[0]['desc']) + assert False + + # Add the password policy subentry + try: + topology_st.standalone.add_s(Entry((BRANCH6_PWP, { + 'objectclass': 'top ldapsubentry passwordpolicy'.split(), + 'cn': 'cn=nsPwPolicyEntry,ou=level3,dc=example,dc=com', + 'passwordMustChange': 'off', + 'passwordExp': 'off', + 'passwordHistory': 'off', + 'passwordMinAge': '0', + 'passwordChange': 'off', + 'passwordStorageScheme': 'ssha' + }))) + except ldap.LDAPError as e: + log.error('Failed to add passwordpolicy for branch6: error ' + e.args[0]['desc']) + assert False + + # Add the COS template + try: + topology_st.standalone.add_s(Entry((BRANCH6_COS_TMPL, { + 'objectclass': 'top ldapsubentry costemplate extensibleObject'.split(), + 'cn': 'cn=nsPwPolicyEntry,ou=lower,ou=lower,ou=people,dc=example,dc=com', + 'cosPriority': '1', + 'cn': 'cn=nsPwTemplateEntry,ou=lower,ou=lower,ou=people,dc=example,dc=com', + 'pwdpolicysubentry': BRANCH6_PWP + }))) + except ldap.LDAPError as e: + log.error('Failed to add COS template for branch6: error ' + e.args[0]['desc']) + assert False + + # Add the COS definition + try: + topology_st.standalone.add_s(Entry((BRANCH6_COS_DEF, { + 'objectclass': 'top ldapsubentry cosSuperDefinition cosPointerDefinition'.split(), + 'cn': 'cn=nsPwPolicyEntry,ou=lower,ou=lower,ou=people,dc=example,dc=com', + 'costemplatedn': BRANCH6_COS_TMPL, + 'cosAttribute': 'pwdpolicysubentry default operational-default' + }))) + except ldap.LDAPError as e: + log.error('Failed to add COS def for branch6: error ' + e.args[0]['desc']) + assert False + + time.sleep(2) + + # + # Now check that each user has its expected passwordPolicy subentry + # + try: + entries = topology_st.standalone.search_s(USER1_DN, ldap.SCOPE_BASE, '(objectclass=top)', ['pwdpolicysubentry']) + if not entries[0].hasValue('pwdpolicysubentry', BRANCH1_PWP): + log.fatal('User %s does not have expected pwdpolicysubentry!') + assert False + except ldap.LDAPError as e: + log.fatal('Unable to search for entry %s: error %s' % (USER1_DN, e.args[0]['desc'])) + assert False + + try: + entries = topology_st.standalone.search_s(USER2_DN, ldap.SCOPE_BASE, '(objectclass=top)', ['pwdpolicysubentry']) + if not entries[0].hasValue('pwdpolicysubentry', BRANCH2_PWP): + log.fatal('User %s does not have expected pwdpolicysubentry!' % USER2_DN) + assert False + except ldap.LDAPError as e: + log.fatal('Unable to search for entry %s: error %s' % (USER2_DN, e.args[0]['desc'])) + assert False + + try: + entries = topology_st.standalone.search_s(USER3_DN, ldap.SCOPE_BASE, '(objectclass=top)', ['pwdpolicysubentry']) + if not entries[0].hasValue('pwdpolicysubentry', BRANCH3_PWP): + log.fatal('User %s does not have expected pwdpolicysubentry!' % USER3_DN) + assert False + except ldap.LDAPError as e: + log.fatal('Unable to search for entry %s: error %s' % (USER3_DN, e.args[0]['desc'])) + assert False + + try: + entries = topology_st.standalone.search_s(USER4_DN, ldap.SCOPE_BASE, '(objectclass=top)', ['pwdpolicysubentry']) + if not entries[0].hasValue('pwdpolicysubentry', BRANCH4_PWP): + log.fatal('User %s does not have expected pwdpolicysubentry!' % USER4_DN) + assert False + except ldap.LDAPError as e: + log.fatal('Unable to search for entry %s: error %s' % (USER4_DN, e.args[0]['desc'])) + assert False + + try: + entries = topology_st.standalone.search_s(USER5_DN, ldap.SCOPE_BASE, '(objectclass=top)', ['pwdpolicysubentry']) + if not entries[0].hasValue('pwdpolicysubentry', BRANCH5_PWP): + log.fatal('User %s does not have expected pwdpolicysubentry!' % USER5_DN) + assert False + except ldap.LDAPError as e: + log.fatal('Unable to search for entry %s: error %s' % (USER5_DN, e.args[0]['desc'])) + assert False + + try: + entries = topology_st.standalone.search_s(USER6_DN, ldap.SCOPE_BASE, '(objectclass=top)', ['pwdpolicysubentry']) + if not entries[0].hasValue('pwdpolicysubentry', BRANCH6_PWP): + log.fatal('User %s does not have expected pwdpolicysubentry!' % USER6_DN) + assert False + except ldap.LDAPError as e: + log.fatal('Unable to search for entry %s: error %s' % (USER6_DN, e.args[0]['desc'])) + assert False + + +if __name__ == '__main__': + # Run isolated + # -s for DEBUG mode + CURRENT_FILE = os.path.realpath(__file__) + pytest.main("-s %s" % CURRENT_FILE) diff --git a/dirsrvtests/tests/tickets/ticket47981_test.py b/dirsrvtests/tests/tickets/ticket47981_test.py new file mode 100644 index 0000000..e8ab9d6 --- /dev/null +++ b/dirsrvtests/tests/tickets/ticket47981_test.py @@ -0,0 +1,228 @@ +# --- BEGIN COPYRIGHT BLOCK --- +# Copyright (C) 2016 Red Hat, Inc. +# All rights reserved. +# +# License: GPL (version 3 or any later version). +# See LICENSE for details. +# --- END COPYRIGHT BLOCK --- +# +import logging + +import ldap.sasl +import pytest +from lib389.tasks import * +from lib389.topologies import topology_st + +from lib389._constants import DEFAULT_SUFFIX, BACKEND_NAME, DN_CONFIG + +pytestmark = pytest.mark.tier2 + +log = logging.getLogger(__name__) + +BRANCH = 'ou=people,' + DEFAULT_SUFFIX +USER_DN = 'uid=user1,%s' % (BRANCH) +BRANCH_CONTAINER = 'cn=nsPwPolicyContainer,ou=people,dc=example,dc=com' +BRANCH_COS_DEF = 'cn=nsPwPolicy_CoS,ou=people,dc=example,dc=com' +BRANCH_PWP = 'cn=cn\\3DnsPwPolicyEntry\\2Cou\\3DPeople\\2Cdc\\3Dexample\\2Cdc\\3Dcom,' + \ + 'cn=nsPwPolicyContainer,ou=People,dc=example,dc=com' +BRANCH_COS_TMPL = 'cn=cn\\3DnsPwTemplateEntry\\2Cou\\3DPeople\\2Cdc\\3Dexample\\2Cdc\\3Dcom,' + \ + 'cn=nsPwPolicyContainer,ou=People,dc=example,dc=com' +SECOND_SUFFIX = 'o=netscaperoot' +BE_NAME = 'netscaperoot' + + +def addSubtreePwPolicy(inst): + # + # Add subtree policy to the people branch + # + try: + inst.add_s(Entry((BRANCH_CONTAINER, { + 'objectclass': 'top nsContainer'.split(), + 'cn': 'nsPwPolicyContainer' + }))) + except ldap.LDAPError as e: + log.error('Failed to add subtree container for ou=people: error ' + e.args[0]['desc']) + assert False + + # Add the password policy subentry + try: + inst.add_s(Entry((BRANCH_PWP, { + 'objectclass': 'top ldapsubentry passwordpolicy'.split(), + 'cn': 'cn=nsPwPolicyEntry,ou=people,dc=example,dc=com', + 'passwordMustChange': 'off', + 'passwordExp': 'off', + 'passwordHistory': 'off', + 'passwordMinAge': '0', + 'passwordChange': 'off', + 'passwordStorageScheme': 'ssha' + }))) + except ldap.LDAPError as e: + log.error('Failed to add passwordpolicy: error ' + e.args[0]['desc']) + assert False + + # Add the COS template + try: + inst.add_s(Entry((BRANCH_COS_TMPL, { + 'objectclass': 'top ldapsubentry costemplate extensibleObject'.split(), + 'cn': 'cn=nsPwPolicyEntry,ou=people,dc=example,dc=com', + 'cosPriority': '1', + 'cn': 'cn=nsPwTemplateEntry,ou=people,dc=example,dc=com', + 'pwdpolicysubentry': BRANCH_PWP + }))) + except ldap.LDAPError as e: + log.error('Failed to add COS template: error ' + e.args[0]['desc']) + assert False + + # Add the COS definition + try: + inst.add_s(Entry((BRANCH_COS_DEF, { + 'objectclass': 'top ldapsubentry cosSuperDefinition cosPointerDefinition'.split(), + 'cn': 'cn=nsPwPolicyEntry,ou=people,dc=example,dc=com', + 'costemplatedn': BRANCH_COS_TMPL, + 'cosAttribute': 'pwdpolicysubentry default operational-default' + }))) + except ldap.LDAPError as e: + log.error('Failed to add COS def: error ' + e.args[0]['desc']) + assert False + time.sleep(1) + + +def delSubtreePwPolicy(inst): + try: + inst.delete_s(BRANCH_COS_DEF) + except ldap.LDAPError as e: + log.error('Failed to delete COS def: error ' + e.args[0]['desc']) + assert False + + try: + inst.delete_s(BRANCH_COS_TMPL) + except ldap.LDAPError as e: + log.error('Failed to delete COS template: error ' + e.args[0]['desc']) + assert False + + try: + inst.delete_s(BRANCH_PWP) + except ldap.LDAPError as e: + log.error('Failed to delete COS password policy: error ' + e.args[0]['desc']) + assert False + + try: + inst.delete_s(BRANCH_CONTAINER) + except ldap.LDAPError as e: + log.error('Failed to delete COS container: error ' + e.args[0]['desc']) + assert False + time.sleep(1) + + +def test_ticket47981(topology_st): + """ + If there are multiple suffixes, and the last suffix checked does not contain any COS entries, + while other suffixes do, then the vattr cache is not invalidated as it should be. Then any + cached entries will still contain the old COS attributes/values. + """ + + log.info('Testing Ticket 47981 - Test that COS def changes are correctly reflected in affected users') + + # + # Create a second backend that does not have any COS entries + # + log.info('Adding second suffix that will not contain any COS entries...\n') + + topology_st.standalone.backend.create(SECOND_SUFFIX, {BACKEND_NAME: BE_NAME}) + topology_st.standalone.mappingtree.create(SECOND_SUFFIX, bename=BE_NAME) + try: + topology_st.standalone.add_s(Entry((SECOND_SUFFIX, { + 'objectclass': 'top organization'.split(), + 'o': BE_NAME}))) + except ldap.ALREADY_EXISTS: + pass + except ldap.LDAPError as e: + log.error('Failed to create suffix entry: error ' + e.args[0]['desc']) + assert False + + # + # Add People branch, it might already exist + # + log.info('Add our test entries to the default suffix, and proceed with the test...') + + try: + topology_st.standalone.add_s(Entry((BRANCH, { + 'objectclass': 'top extensibleObject'.split(), + 'ou': 'level4' + }))) + except ldap.ALREADY_EXISTS: + pass + except ldap.LDAPError as e: + log.error('Failed to add ou=people: error ' + e.args[0]['desc']) + assert False + + # + # Add a user to the branch + # + try: + topology_st.standalone.add_s(Entry((USER_DN, { + 'objectclass': 'top extensibleObject'.split(), + 'uid': 'user1' + }))) + except ldap.LDAPError as e: + log.error('Failed to add user1: error ' + e.args[0]['desc']) + assert False + + # + # Enable password policy and add the subtree policy + # + try: + topology_st.standalone.modify_s(DN_CONFIG, [(ldap.MOD_REPLACE, 'nsslapd-pwpolicy-local', b'on')]) + except ldap.LDAPError as e: + log.error('Failed to set pwpolicy-local: error ' + e.args[0]['desc']) + assert False + + addSubtreePwPolicy(topology_st.standalone) + + # + # Now check the user has its expected passwordPolicy subentry + # + try: + entries = topology_st.standalone.search_s(USER_DN, + ldap.SCOPE_BASE, + '(objectclass=top)', + ['pwdpolicysubentry', 'dn']) + if not entries[0].hasAttr('pwdpolicysubentry'): + log.fatal('User does not have expected pwdpolicysubentry!') + assert False + except ldap.LDAPError as e: + log.fatal('Unable to search for entry %s: error %s' % (USER_DN, e.args[0]['desc'])) + assert False + + # + # Delete the password policy and make sure it is removed from the same user + # + delSubtreePwPolicy(topology_st.standalone) + try: + entries = topology_st.standalone.search_s(USER_DN, ldap.SCOPE_BASE, '(objectclass=top)', ['pwdpolicysubentry']) + if entries[0].hasAttr('pwdpolicysubentry'): + log.fatal('User unexpectedly does have the pwdpolicysubentry!') + assert False + except ldap.LDAPError as e: + log.fatal('Unable to search for entry %s: error %s' % (USER_DN, e.args[0]['desc'])) + assert False + + # + # Add the subtree policvy back and see if the user now has it + # + addSubtreePwPolicy(topology_st.standalone) + try: + entries = topology_st.standalone.search_s(USER_DN, ldap.SCOPE_BASE, '(objectclass=top)', ['pwdpolicysubentry']) + if not entries[0].hasAttr('pwdpolicysubentry'): + log.fatal('User does not have expected pwdpolicysubentry!') + assert False + except ldap.LDAPError as e: + log.fatal('Unable to search for entry %s: error %s' % (USER_DN, e.args[0]['desc'])) + assert False + + +if __name__ == '__main__': + # Run isolated + # -s for DEBUG mode + CURRENT_FILE = os.path.realpath(__file__) + pytest.main("-s %s" % CURRENT_FILE) diff --git a/dirsrvtests/tests/tickets/ticket47988_test.py b/dirsrvtests/tests/tickets/ticket47988_test.py new file mode 100644 index 0000000..28ecf7e --- /dev/null +++ b/dirsrvtests/tests/tickets/ticket47988_test.py @@ -0,0 +1,372 @@ +# --- BEGIN COPYRIGHT BLOCK --- +# Copyright (C) 2016 Red Hat, Inc. +# All rights reserved. +# +# License: GPL (version 3 or any later version). +# See LICENSE for details. +# --- END COPYRIGHT BLOCK --- +# +''' +Created on Nov 7, 2013 + +@author: tbordaz +''' +import logging +import shutil +import stat +import tarfile +import time +from random import randint + +import ldap +import pytest +from lib389 import Entry +from lib389._constants import * +from lib389.topologies import topology_m2 +from lib389.utils import * + +pytestmark = pytest.mark.tier2 + +logging.getLogger(__name__).setLevel(logging.DEBUG) +log = logging.getLogger(__name__) + +TEST_REPL_DN = "cn=test_repl, %s" % SUFFIX +OC_NAME = 'OCticket47988' +MUST = "(postalAddress $ postalCode)" +MAY = "(member $ street)" + +OTHER_NAME = 'other_entry' +MAX_OTHERS = 10 + +BIND_NAME = 'bind_entry' +BIND_DN = 'cn=%s, %s' % (BIND_NAME, SUFFIX) +BIND_PW = 'password' + +ENTRY_NAME = 'test_entry' +ENTRY_DN = 'cn=%s, %s' % (ENTRY_NAME, SUFFIX) +ENTRY_OC = "top person %s" % OC_NAME + + +def _oc_definition(oid_ext, name, must=None, may=None): + oid = "1.2.3.4.5.6.7.8.9.10.%d" % oid_ext + desc = 'To test ticket 47490' + sup = 'person' + if not must: + must = MUST + if not may: + may = MAY + + new_oc = "( %s NAME '%s' DESC '%s' SUP %s AUXILIARY MUST %s MAY %s )" % (oid, name, desc, sup, must, may) + return new_oc + + +def _header(topology_m2, label): + topology_m2.ms["supplier1"].log.info("\n\n###############################################") + topology_m2.ms["supplier1"].log.info("#######") + topology_m2.ms["supplier1"].log.info("####### %s" % label) + topology_m2.ms["supplier1"].log.info("#######") + topology_m2.ms["supplier1"].log.info("###################################################") + + +def _install_schema(server, tarFile): + server.stop(timeout=10) + + tmpSchema = '/tmp/schema_47988' + if not os.path.isdir(tmpSchema): + os.mkdir(tmpSchema) + + for the_file in os.listdir(tmpSchema): + file_path = os.path.join(tmpSchema, the_file) + if os.path.isfile(file_path): + os.unlink(file_path) + + os.chdir(tmpSchema) + tar = tarfile.open(tarFile, 'r:gz') + tar.extraction_filter = (lambda member, path: member) + for member in tar.getmembers(): + tar.extract(member.name) + + tar.close() + + st = os.stat(server.schemadir) + os.chmod(server.schemadir, st.st_mode | stat.S_IWUSR | stat.S_IXUSR | stat.S_IRUSR) + for the_file in os.listdir(tmpSchema): + schemaFile = os.path.join(server.schemadir, the_file) + if os.path.isfile(schemaFile): + if the_file.startswith('99user.ldif'): + # only replace 99user.ldif, the other standard definition are kept + os.chmod(schemaFile, stat.S_IWUSR | stat.S_IRUSR) + server.log.info("replace %s" % schemaFile) + shutil.copy(the_file, schemaFile) + + else: + server.log.info("add %s" % schemaFile) + shutil.copy(the_file, schemaFile) + os.chmod(schemaFile, stat.S_IRUSR | stat.S_IRGRP) + os.chmod(server.schemadir, st.st_mode | stat.S_IRUSR | stat.S_IRGRP) + + +def test_ticket47988_init(topology_m2): + """ + It adds + - Objectclass with MAY 'member' + - an entry ('bind_entry') with which we bind to test the 'SELFDN' operation + It deletes the anonymous aci + + """ + + _header(topology_m2, 'test_ticket47988_init') + + # enable acl error logging + mod = [(ldap.MOD_REPLACE, 'nsslapd-errorlog-level', ensure_bytes(str(8192)))] # REPL + topology_m2.ms["supplier1"].modify_s(DN_CONFIG, mod) + topology_m2.ms["supplier2"].modify_s(DN_CONFIG, mod) + + mod = [(ldap.MOD_REPLACE, 'nsslapd-accesslog-level', ensure_bytes(str(260)))] # Internal op + topology_m2.ms["supplier1"].modify_s(DN_CONFIG, mod) + topology_m2.ms["supplier2"].modify_s(DN_CONFIG, mod) + + # add dummy entries + for cpt in range(MAX_OTHERS): + name = "%s%d" % (OTHER_NAME, cpt) + topology_m2.ms["supplier1"].add_s(Entry(("cn=%s,%s" % (name, SUFFIX), { + 'objectclass': "top person".split(), + 'sn': name, + 'cn': name}))) + + # check that entry 0 is replicated before + loop = 0 + entryDN = "cn=%s0,%s" % (OTHER_NAME, SUFFIX) + while loop <= 10: + try: + ent = topology_m2.ms["supplier2"].getEntry(entryDN, ldap.SCOPE_BASE, "(objectclass=*)", ['telephonenumber']) + break + except ldap.NO_SUCH_OBJECT: + time.sleep(1) + loop += 1 + assert (loop <= 10) + + topology_m2.ms["supplier1"].stop(timeout=10) + topology_m2.ms["supplier2"].stop(timeout=10) + + # install the specific schema M1: ipa3.3, M2: ipa4.1 + schema_file = os.path.join(topology_m2.ms["supplier1"].getDir(__file__, DATA_DIR), "ticket47988/schema_ipa3.3.tar.gz") + _install_schema(topology_m2.ms["supplier1"], schema_file) + schema_file = os.path.join(topology_m2.ms["supplier1"].getDir(__file__, DATA_DIR), "ticket47988/schema_ipa4.1.tar.gz") + _install_schema(topology_m2.ms["supplier2"], schema_file) + + topology_m2.ms["supplier1"].start(timeout=10) + topology_m2.ms["supplier2"].start(timeout=10) + + +def _do_update_schema(server, range=3999): + ''' + Update the schema of the M2 (IPA4.1). to generate a nsSchemaCSN + ''' + postfix = str(randint(range, range + 1000)) + OID = '2.16.840.1.113730.3.8.12.%s' % postfix + NAME = 'thierry%s' % postfix + value = '( %s NAME \'%s\' DESC \'Override for Group Attributes\' STRUCTURAL MUST ( cn ) MAY sn X-ORIGIN ( \'IPA v4.1.2\' \'user defined\' ) )' % ( + OID, NAME) + mod = [(ldap.MOD_ADD, 'objectclasses', ensure_bytes(value))] + server.modify_s('cn=schema', mod) + + +def _do_update_entry(supplier=None, consumer=None, attempts=10): + ''' + This is doing an update on M2 (IPA4.1) and checks the update has been + propagated to M1 (IPA3.3) + ''' + assert (supplier) + assert (consumer) + entryDN = "cn=%s0,%s" % (OTHER_NAME, SUFFIX) + value = str(randint(100, 200)) + mod = [(ldap.MOD_REPLACE, 'telephonenumber', ensure_bytes(value))] + supplier.modify_s(entryDN, mod) + + loop = 0 + while loop <= attempts: + ent = consumer.getEntry(entryDN, ldap.SCOPE_BASE, "(objectclass=*)", ['telephonenumber']) + read_val = ensure_str(ent.telephonenumber) or "0" + if read_val == value: + break + # the expected value is not yet replicated. try again + time.sleep(5) + loop += 1 + supplier.log.debug("test_do_update: receive %s (expected %s)" % (read_val, value)) + assert (loop <= attempts) + + +def _pause_M2_to_M1(topology_m2): + topology_m2.ms["supplier1"].log.info("\n\n######################### Pause RA M2->M1 ######################\n") + ents = topology_m2.ms["supplier2"].agreement.list(suffix=SUFFIX) + assert len(ents) == 1 + topology_m2.ms["supplier2"].agreement.pause(ents[0].dn) + + +def _resume_M1_to_M2(topology_m2): + topology_m2.ms["supplier1"].log.info("\n\n######################### resume RA M1->M2 ######################\n") + ents = topology_m2.ms["supplier1"].agreement.list(suffix=SUFFIX) + assert len(ents) == 1 + topology_m2.ms["supplier1"].agreement.resume(ents[0].dn) + + +def _pause_M1_to_M2(topology_m2): + topology_m2.ms["supplier1"].log.info("\n\n######################### Pause RA M1->M2 ######################\n") + ents = topology_m2.ms["supplier1"].agreement.list(suffix=SUFFIX) + assert len(ents) == 1 + topology_m2.ms["supplier1"].agreement.pause(ents[0].dn) + + +def _resume_M2_to_M1(topology_m2): + topology_m2.ms["supplier1"].log.info("\n\n######################### resume RA M2->M1 ######################\n") + ents = topology_m2.ms["supplier2"].agreement.list(suffix=SUFFIX) + assert len(ents) == 1 + topology_m2.ms["supplier2"].agreement.resume(ents[0].dn) + + +def test_ticket47988_1(topology_m2): + ''' + Check that replication is working and pause replication M2->M1 + ''' + _header(topology_m2, 'test_ticket47988_1') + + topology_m2.ms["supplier1"].log.debug("\n\nCheck that replication is working and pause replication M2->M1\n") + _do_update_entry(supplier=topology_m2.ms["supplier2"], consumer=topology_m2.ms["supplier1"], attempts=5) + _pause_M2_to_M1(topology_m2) + + +def test_ticket47988_2(topology_m2): + ''' + Update M1 schema and trigger update M1->M2 + So M1 should learn new/extended definitions that are in M2 schema + ''' + _header(topology_m2, 'test_ticket47988_2') + + topology_m2.ms["supplier1"].log.debug("\n\nUpdate M1 schema and an entry on M1\n") + supplier1_schema_csn = topology_m2.ms["supplier1"].schema.get_schema_csn() + supplier2_schema_csn = topology_m2.ms["supplier2"].schema.get_schema_csn() + topology_m2.ms["supplier1"].log.debug("\nBefore updating the schema on M1\n") + topology_m2.ms["supplier1"].log.debug("Supplier1 nsschemaCSN: %s" % supplier1_schema_csn) + topology_m2.ms["supplier1"].log.debug("Supplier2 nsschemaCSN: %s" % supplier2_schema_csn) + + # Here M1 should no, should check M2 schema and learn + _do_update_schema(topology_m2.ms["supplier1"]) + supplier1_schema_csn = topology_m2.ms["supplier1"].schema.get_schema_csn() + supplier2_schema_csn = topology_m2.ms["supplier2"].schema.get_schema_csn() + topology_m2.ms["supplier1"].log.debug("\nAfter updating the schema on M1\n") + topology_m2.ms["supplier1"].log.debug("Supplier1 nsschemaCSN: %s" % supplier1_schema_csn) + topology_m2.ms["supplier1"].log.debug("Supplier2 nsschemaCSN: %s" % supplier2_schema_csn) + assert (supplier1_schema_csn) + + # to avoid linger effect where a replication session is reused without checking the schema + _pause_M1_to_M2(topology_m2) + _resume_M1_to_M2(topology_m2) + + # topo.supplier1.log.debug("\n\nSleep.... attach the debugger dse_modify") + # time.sleep(60) + _do_update_entry(supplier=topology_m2.ms["supplier1"], consumer=topology_m2.ms["supplier2"], attempts=15) + supplier1_schema_csn = topology_m2.ms["supplier1"].schema.get_schema_csn() + supplier2_schema_csn = topology_m2.ms["supplier2"].schema.get_schema_csn() + topology_m2.ms["supplier1"].log.debug("\nAfter a full replication session\n") + topology_m2.ms["supplier1"].log.debug("Supplier1 nsschemaCSN: %s" % supplier1_schema_csn) + topology_m2.ms["supplier1"].log.debug("Supplier2 nsschemaCSN: %s" % supplier2_schema_csn) + assert (supplier1_schema_csn) + assert (supplier2_schema_csn) + + +def test_ticket47988_3(topology_m2): + ''' + Resume replication M2->M1 and check replication is still working + ''' + _header(topology_m2, 'test_ticket47988_3') + + _resume_M2_to_M1(topology_m2) + _do_update_entry(supplier=topology_m2.ms["supplier1"], consumer=topology_m2.ms["supplier2"], attempts=5) + _do_update_entry(supplier=topology_m2.ms["supplier2"], consumer=topology_m2.ms["supplier1"], attempts=5) + + +def test_ticket47988_4(topology_m2): + ''' + Check schemaCSN is identical on both server + And save the nsschemaCSN to later check they do not change unexpectedly + ''' + _header(topology_m2, 'test_ticket47988_4') + + supplier1_schema_csn = topology_m2.ms["supplier1"].schema.get_schema_csn() + supplier2_schema_csn = topology_m2.ms["supplier2"].schema.get_schema_csn() + topology_m2.ms["supplier1"].log.debug("\n\nSupplier1 nsschemaCSN: %s" % supplier1_schema_csn) + topology_m2.ms["supplier1"].log.debug("\n\nSupplier2 nsschemaCSN: %s" % supplier2_schema_csn) + assert (supplier1_schema_csn) + assert (supplier2_schema_csn) + assert (supplier1_schema_csn == supplier2_schema_csn) + + topology_m2.ms["supplier1"].saved_schema_csn = supplier1_schema_csn + topology_m2.ms["supplier2"].saved_schema_csn = supplier2_schema_csn + + +def test_ticket47988_5(topology_m2): + ''' + Check schemaCSN do not change unexpectedly + ''' + _header(topology_m2, 'test_ticket47988_5') + + _do_update_entry(supplier=topology_m2.ms["supplier1"], consumer=topology_m2.ms["supplier2"], attempts=5) + _do_update_entry(supplier=topology_m2.ms["supplier2"], consumer=topology_m2.ms["supplier1"], attempts=5) + supplier1_schema_csn = topology_m2.ms["supplier1"].schema.get_schema_csn() + supplier2_schema_csn = topology_m2.ms["supplier2"].schema.get_schema_csn() + topology_m2.ms["supplier1"].log.debug("\n\nSupplier1 nsschemaCSN: %s" % supplier1_schema_csn) + topology_m2.ms["supplier1"].log.debug("\n\nSupplier2 nsschemaCSN: %s" % supplier2_schema_csn) + assert (supplier1_schema_csn) + assert (supplier2_schema_csn) + assert (supplier1_schema_csn == supplier2_schema_csn) + + assert (topology_m2.ms["supplier1"].saved_schema_csn == supplier1_schema_csn) + assert (topology_m2.ms["supplier2"].saved_schema_csn == supplier2_schema_csn) + + +def test_ticket47988_6(topology_m2): + ''' + Update M1 schema and trigger update M2->M1 + So M2 should learn new/extended definitions that are in M1 schema + ''' + + _header(topology_m2, 'test_ticket47988_6') + + topology_m2.ms["supplier1"].log.debug("\n\nUpdate M1 schema and an entry on M1\n") + supplier1_schema_csn = topology_m2.ms["supplier1"].schema.get_schema_csn() + supplier2_schema_csn = topology_m2.ms["supplier2"].schema.get_schema_csn() + topology_m2.ms["supplier1"].log.debug("\nBefore updating the schema on M1\n") + topology_m2.ms["supplier1"].log.debug("Supplier1 nsschemaCSN: %s" % supplier1_schema_csn) + topology_m2.ms["supplier1"].log.debug("Supplier2 nsschemaCSN: %s" % supplier2_schema_csn) + + # Here M1 should no, should check M2 schema and learn + _do_update_schema(topology_m2.ms["supplier1"], range=5999) + supplier1_schema_csn = topology_m2.ms["supplier1"].schema.get_schema_csn() + supplier2_schema_csn = topology_m2.ms["supplier2"].schema.get_schema_csn() + topology_m2.ms["supplier1"].log.debug("\nAfter updating the schema on M1\n") + topology_m2.ms["supplier1"].log.debug("Supplier1 nsschemaCSN: %s" % supplier1_schema_csn) + topology_m2.ms["supplier1"].log.debug("Supplier2 nsschemaCSN: %s" % supplier2_schema_csn) + assert (supplier1_schema_csn) + + # to avoid linger effect where a replication session is reused without checking the schema + _pause_M1_to_M2(topology_m2) + _resume_M1_to_M2(topology_m2) + + # topo.supplier1.log.debug("\n\nSleep.... attach the debugger dse_modify") + # time.sleep(60) + _do_update_entry(supplier=topology_m2.ms["supplier2"], consumer=topology_m2.ms["supplier1"], attempts=15) + supplier1_schema_csn = topology_m2.ms["supplier1"].schema.get_schema_csn() + supplier2_schema_csn = topology_m2.ms["supplier2"].schema.get_schema_csn() + topology_m2.ms["supplier1"].log.debug("\nAfter a full replication session\n") + topology_m2.ms["supplier1"].log.debug("Supplier1 nsschemaCSN: %s" % supplier1_schema_csn) + topology_m2.ms["supplier1"].log.debug("Supplier2 nsschemaCSN: %s" % supplier2_schema_csn) + assert (supplier1_schema_csn) + assert (supplier2_schema_csn) + + +if __name__ == '__main__': + # Run isolated + # -s for DEBUG mode + CURRENT_FILE = os.path.realpath(__file__) + pytest.main("-s %s" % CURRENT_FILE) diff --git a/dirsrvtests/tests/tickets/ticket48005_test.py b/dirsrvtests/tests/tickets/ticket48005_test.py new file mode 100644 index 0000000..6ad7aaf --- /dev/null +++ b/dirsrvtests/tests/tickets/ticket48005_test.py @@ -0,0 +1,365 @@ +# --- BEGIN COPYRIGHT BLOCK --- +# Copyright (C) 2016 Red Hat, Inc. +# All rights reserved. +# +# License: GPL (version 3 or any later version). +# See LICENSE for details. +# --- END COPYRIGHT BLOCK --- +# +import logging +import re + +import pytest +from lib389.tasks import * +from lib389.topologies import topology_st + +from lib389._constants import (DEFAULT_SUFFIX, SUFFIX, PLUGIN_REFER_INTEGRITY, PLUGIN_AUTOMEMBER, + PLUGIN_MEMBER_OF, PLUGIN_USN) + +pytestmark = pytest.mark.tier2 + +logging.getLogger(__name__).setLevel(logging.DEBUG) +log = logging.getLogger(__name__) + + +def test_ticket48005_setup(topology_st): + ''' + allow dump core + generate a test ldif file using dbgen.pl + import the ldif + ''' + log.info("Ticket 48005 setup...") + if hasattr(topology_st.standalone, 'prefix'): + prefix = topology_st.standalone.prefix + else: + prefix = None + sysconfig_dirsrv = os.path.join(topology_st.standalone.get_initconfig_dir(), 'dirsrv') + cmdline = 'egrep "ulimit -c unlimited" %s' % sysconfig_dirsrv + p = os.popen(cmdline, "r") + ulimitc = p.readline() + if ulimitc == "": + log.info('No ulimit -c in %s' % sysconfig_dirsrv) + log.info('Adding it') + cmdline = 'echo "ulimit -c unlimited" >> %s' % sysconfig_dirsrv + + sysconfig_dirsrv_systemd = sysconfig_dirsrv + ".systemd" + cmdline = 'egrep LimitCORE=infinity %s' % sysconfig_dirsrv_systemd + p = os.popen(cmdline, "r") + lcore = p.readline() + if lcore == "": + log.info('No LimitCORE in %s' % sysconfig_dirsrv_systemd) + log.info('Adding it') + cmdline = 'echo LimitCORE=infinity >> %s' % sysconfig_dirsrv_systemd + + topology_st.standalone.restart(timeout=10) + + ldif_file = topology_st.standalone.get_ldif_dir() + "/ticket48005.ldif" + os.system('ls %s' % ldif_file) + os.system('rm -f %s' % ldif_file) + if hasattr(topology_st.standalone, 'prefix'): + prefix = topology_st.standalone.prefix + else: + prefix = "" + dbgen_prog = prefix + '/bin/dbgen.pl' + log.info('dbgen_prog: %s' % dbgen_prog) + os.system('%s -s %s -o %s -u -n 10000' % (dbgen_prog, SUFFIX, ldif_file)) + cmdline = 'egrep dn: %s | wc -l' % ldif_file + p = os.popen(cmdline, "r") + dnnumstr = p.readline() + num = int(dnnumstr) + log.info("We have %d entries.\n", num) + + importTask = Tasks(topology_st.standalone) + args = {TASK_WAIT: True} + importTask.importLDIF(SUFFIX, None, ldif_file, args) + log.info('Importing %s complete.' % ldif_file) + + +def test_ticket48005_memberof(topology_st): + ''' + Enable memberof and referint plugin + Run fixmemberof task without waiting + Shutdown the server + Check if a core file was generated or not + If no core was found, this test case was successful. + ''' + log.info("Ticket 48005 memberof test...") + topology_st.standalone.plugins.enable(name=PLUGIN_MEMBER_OF) + topology_st.standalone.plugins.enable(name=PLUGIN_REFER_INTEGRITY) + + topology_st.standalone.restart(timeout=10) + + try: + # run the fixup task + topology_st.standalone.tasks.fixupMemberOf(suffix=SUFFIX, args={TASK_WAIT: False}) + except ValueError: + log.error('Some problem occured with a value that was provided') + assert False + + topology_st.standalone.stop(timeout=10) + + mytmp = '/tmp' + logdir = re.sub('errors', '', topology_st.standalone.errlog) + cmdline = 'ls ' + logdir + 'core*' + p = os.popen(cmdline, "r") + lcore = p.readline() + if lcore != "": + s.system('mv %score* %s/core.ticket48005_memberof' % (logdir, mytmp)) + log.error('FixMemberof: Moved core file(s) to %s; Test failed' % mytmp) + assert False + log.info('No core files are found') + + topology_st.standalone.start(timeout=10) + + topology_st.standalone.plugins.disable(name=PLUGIN_REFER_INTEGRITY) + topology_st.standalone.plugins.disable(name=PLUGIN_MEMBER_OF) + + topology_st.standalone.restart(timeout=10) + + log.info("Ticket 48005 memberof test complete") + + +def test_ticket48005_automember(topology_st): + ''' + Enable automember and referint plugin + 1. Run automember rebuild membership task without waiting + Shutdown the server + Check if a core file was generated or not + If no core was found, this test case was successful. + 2. Run automember export updates task without waiting + Shutdown the server + Check if a core file was generated or not + If no core was found, this test case was successful. + 3. Run automember map updates task without waiting + Shutdown the server + Check if a core file was generated or not + If no core was found, this test case was successful. + ''' + log.info("Ticket 48005 automember test...") + topology_st.standalone.plugins.enable(name=PLUGIN_AUTOMEMBER) + topology_st.standalone.plugins.enable(name=PLUGIN_REFER_INTEGRITY) + + # configure automember config entry + log.info('Adding automember config') + try: + topology_st.standalone.add_s(Entry(('cn=group cfg,cn=Auto Membership Plugin,cn=plugins,cn=config', { + 'objectclass': 'top autoMemberDefinition'.split(), + 'autoMemberScope': 'dc=example,dc=com', + 'autoMemberFilter': 'objectclass=inetorgperson', + 'autoMemberDefaultGroup': 'cn=group0,dc=example,dc=com', + 'autoMemberGroupingAttr': 'uniquemember:dn', + 'cn': 'group cfg'}))) + except ValueError: + log.error('Failed to add automember config') + assert False + + topology_st.standalone.restart(timeout=10) + + try: + # run the automember rebuild task + topology_st.standalone.tasks.automemberRebuild(suffix=SUFFIX, args={TASK_WAIT: False}) + except ValueError: + log.error('Automember rebuild task failed.') + assert False + + topology_st.standalone.stop(timeout=10) + + mytmp = '/tmp' + logdir = re.sub('errors', '', topology_st.standalone.errlog) + cmdline = 'ls ' + logdir + 'core*' + p = os.popen(cmdline, "r") + lcore = p.readline() + if lcore != "": + s.system('mv %score* %s/core.ticket48005_automember_rebuild' % (logdir, mytmp)) + log.error('Automember_rebuld: Moved core file(s) to %s; Test failed' % mytmp) + assert False + log.info('No core files are found') + + topology_st.standalone.start(timeout=10) + + ldif_out_file = mytmp + "/ticket48005_automember_exported.ldif" + try: + # run the automember export task + topology_st.standalone.tasks.automemberExport(suffix=SUFFIX, ldif_out=ldif_out_file, args={TASK_WAIT: False}) + except ValueError: + log.error('Automember Export task failed.') + assert False + + topology_st.standalone.stop(timeout=10) + + logdir = re.sub('errors', '', topology_st.standalone.errlog) + cmdline = 'ls ' + logdir + 'core*' + p = os.popen(cmdline, "r") + lcore = p.readline() + if lcore != "": + s.system('mv %score* %s/core.ticket48005_automember_export' % (logdir, mytmp)) + log.error('Automember_export: Moved core file(s) to %s; Test failed' % mytmp) + assert False + log.info('No core files are found') + + topology_st.standalone.start(timeout=10) + + ldif_in_file = topology_st.standalone.get_ldif_dir() + "/ticket48005.ldif" + ldif_out_file = mytmp + "/ticket48005_automember_map.ldif" + try: + # run the automember map task + topology_st.standalone.tasks.automemberMap(ldif_in=ldif_in_file, ldif_out=ldif_out_file, + args={TASK_WAIT: False}) + except ValueError: + log.error('Automember Map task failed.') + assert False + + topology_st.standalone.stop(timeout=10) + + logdir = re.sub('errors', '', topology_st.standalone.errlog) + cmdline = 'ls ' + logdir + 'core*' + p = os.popen(cmdline, "r") + lcore = p.readline() + if lcore != "": + s.system('mv %score* %s/core.ticket48005_automember_map' % (logdir, mytmp)) + log.error('Automember_map: Moved core file(s) to %s; Test failed' % mytmp) + assert False + log.info('No core files are found') + + topology_st.standalone.start(timeout=10) + + topology_st.standalone.plugins.disable(name=PLUGIN_REFER_INTEGRITY) + topology_st.standalone.plugins.enable(name=PLUGIN_AUTOMEMBER) + + topology_st.standalone.restart(timeout=10) + + log.info("Ticket 48005 automember test complete") + + +def test_ticket48005_syntaxvalidate(topology_st): + ''' + Run syntax validate task without waiting + Shutdown the server + Check if a core file was generated or not + If no core was found, this test case was successful. + ''' + log.info("Ticket 48005 syntax validate test...") + + try: + # run the fixup task + topology_st.standalone.tasks.syntaxValidate(suffix=SUFFIX, args={TASK_WAIT: False}) + except ValueError: + log.error('Some problem occured with a value that was provided') + assert False + + topology_st.standalone.stop(timeout=10) + + mytmp = '/tmp' + logdir = re.sub('errors', '', topology_st.standalone.errlog) + cmdline = 'ls ' + logdir + 'core*' + p = os.popen(cmdline, "r") + lcore = p.readline() + if lcore != "": + s.system('mv %score* %s/core.ticket48005_syntaxvalidate' % (logdir, mytmp)) + log.error('SyntaxValidate: Moved core file(s) to %s; Test failed' % mytmp) + assert False + log.info('No core files are found') + + topology_st.standalone.start(timeout=10) + + log.info("Ticket 48005 syntax validate test complete") + + +def test_ticket48005_usn(topology_st): + ''' + Enable entryusn + Delete all user entries. + Run USN tombstone cleanup task + Shutdown the server + Check if a core file was generated or not + If no core was found, this test case was successful. + ''' + log.info("Ticket 48005 usn test...") + topology_st.standalone.plugins.enable(name=PLUGIN_USN) + + topology_st.standalone.restart(timeout=10) + + try: + entries = topology_st.standalone.search_s(SUFFIX, ldap.SCOPE_SUBTREE, "(objectclass=inetorgperson)") + if len(entries) == 0: + log.info("No user entries.") + else: + for i in range(len(entries)): + # log.info('Deleting %s' % entries[i].dn) + try: + topology_st.standalone.delete_s(entries[i].dn) + except ValueError: + log.error('delete_s %s failed.' % entries[i].dn) + assert False + except ValueError: + log.error('search_s failed.') + assert False + + try: + # run the usn tombstone cleanup + topology_st.standalone.tasks.usnTombstoneCleanup(suffix=SUFFIX, bename="userRoot", args={TASK_WAIT: False}) + except ValueError: + log.error('Some problem occured with a value that was provided') + assert False + + topology_st.standalone.stop(timeout=10) + + mytmp = '/tmp' + logdir = re.sub('errors', '', topology_st.standalone.errlog) + cmdline = 'ls ' + logdir + 'core*' + p = os.popen(cmdline, "r") + lcore = p.readline() + if lcore != "": + s.system('mv %score* %s/core.ticket48005_usn' % (logdir, mytmp)) + log.error('usnTombstoneCleanup: Moved core file(s) to %s; Test failed' % mytmp) + assert False + log.info('No core files are found') + + topology_st.standalone.start(timeout=10) + + topology_st.standalone.plugins.disable(name=PLUGIN_USN) + + topology_st.standalone.restart(timeout=10) + + log.info("Ticket 48005 usn test complete") + + +def test_ticket48005_schemareload(topology_st): + ''' + Run schema reload task without waiting + Shutdown the server + Check if a core file was generated or not + If no core was found, this test case was successful. + ''' + log.info("Ticket 48005 schema reload test...") + + try: + # run the schema reload task + topology_st.standalone.tasks.schemaReload(args={TASK_WAIT: False}) + except ValueError: + log.error('Schema Reload task failed.') + assert False + + topology_st.standalone.stop(timeout=10) + + logdir = re.sub('errors', '', topology_st.standalone.errlog) + cmdline = 'ls ' + logdir + 'core*' + p = os.popen(cmdline, "r") + lcore = p.readline() + if lcore != "": + mytmp = '/tmp' + s.system('mv %score* %s/core.ticket48005_schema_reload' % (logdir, mytmp)) + log.error('Schema reload: Moved core file(s) to %s; Test failed' % mytmp) + assert False + log.info('No core files are found') + + topology_st.standalone.start(timeout=10) + + log.info("Ticket 48005 schema reload test complete") + + +if __name__ == '__main__': + # Run isolated + # -s for DEBUG mode + CURRENT_FILE = os.path.realpath(__file__) + pytest.main("-s %s" % CURRENT_FILE) diff --git a/dirsrvtests/tests/tickets/ticket48013_test.py b/dirsrvtests/tests/tickets/ticket48013_test.py new file mode 100644 index 0000000..915d589 --- /dev/null +++ b/dirsrvtests/tests/tickets/ticket48013_test.py @@ -0,0 +1,95 @@ +# --- BEGIN COPYRIGHT BLOCK --- +# Copyright (C) 2016 Red Hat, Inc. +# All rights reserved. +# +# License: GPL (version 3 or any later version). +# See LICENSE for details. +# --- END COPYRIGHT BLOCK --- +# +import ldapurl +import pytest +from ldap.ldapobject import SimpleLDAPObject +from ldap.syncrepl import SyncreplConsumer +from lib389.utils import * +from lib389.topologies import topology_st + +from lib389._constants import (PLUGIN_RETRO_CHANGELOG, DEFAULT_SUFFIX, DN_CONFIG, + DN_DM, PASSWORD, PLUGIN_REPL_SYNC, HOST_STANDALONE, + PORT_STANDALONE) + + +# Skip on older versions +pytestmark = [pytest.mark.tier2, + pytest.mark.skipif(ds_is_older('1.3.4'), reason="Not implemented")] + +logging.getLogger(__name__).setLevel(logging.DEBUG) +log = logging.getLogger(__name__) + + +class SyncObject(SimpleLDAPObject, SyncreplConsumer): + def __init__(self, uri): + # Init the ldap connection + SimpleLDAPObject.__init__(self, uri) + + def sync_search(self, test_cookie): + self.syncrepl_search('dc=example,dc=com', ldap.SCOPE_SUBTREE, + filterstr='(objectclass=*)', mode='refreshOnly', + cookie=test_cookie) + + def poll(self): + self.syncrepl_poll(all=1) + + +def test_ticket48013(topology_st): + ''' + Content Synchonization: Test that invalid cookies are caught + ''' + + cookies = ('#', '##', 'a#a#a', 'a#a#1') + + # Enable dynamic plugins + try: + topology_st.standalone.modify_s(DN_CONFIG, [(ldap.MOD_REPLACE, 'nsslapd-dynamic-plugins', b'on')]) + except ldap.LDAPError as e: + log.error('Failed to enable dynamic plugin! {}'.format(e.args[0]['desc'])) + assert False + + # Enable retro changelog + topology_st.standalone.plugins.enable(name=PLUGIN_RETRO_CHANGELOG) + + # Enbale content sync plugin + topology_st.standalone.plugins.enable(name=PLUGIN_REPL_SYNC) + + # Set everything up + ldap_url = ldapurl.LDAPUrl('ldap://%s:%s' % (HOST_STANDALONE, + PORT_STANDALONE)) + ldap_connection = SyncObject(ldap_url.initializeUrl()) + + # Authenticate + try: + ldap_connection.simple_bind_s(DN_DM, PASSWORD) + except ldap.LDAPError as e: + log.error('Login to LDAP server failed: {}'.format(e.args[0]['desc'])) + assert False + + # Test invalid cookies + for invalid_cookie in cookies: + log.info('Testing cookie: %s' % invalid_cookie) + try: + ldap_connection.sync_search(invalid_cookie) + ldap_connection.poll() + log.fatal('Invalid cookie accepted!') + assert False + except Exception as e: + log.info('Invalid cookie correctly rejected: {}'.format(e.args[0]['info'])) + pass + + # Success + log.info('Test complete') + + +if __name__ == '__main__': + # Run isolated + # -s for DEBUG mode + CURRENT_FILE = os.path.realpath(__file__) + pytest.main("-s %s" % CURRENT_FILE) diff --git a/dirsrvtests/tests/tickets/ticket48026_test.py b/dirsrvtests/tests/tickets/ticket48026_test.py new file mode 100644 index 0000000..f00d3d5 --- /dev/null +++ b/dirsrvtests/tests/tickets/ticket48026_test.py @@ -0,0 +1,121 @@ +# --- BEGIN COPYRIGHT BLOCK --- +# Copyright (C) 2016 Red Hat, Inc. +# All rights reserved. +# +# License: GPL (version 3 or any later version). +# See LICENSE for details. +# --- END COPYRIGHT BLOCK --- +# +import pytest +from lib389.tasks import * +from lib389.utils import * +from lib389.topologies import topology_st + +from lib389._constants import PLUGIN_ATTR_UNIQUENESS, DEFAULT_SUFFIX + +# Skip on older versions +pytestmark = [pytest.mark.tier2, + pytest.mark.skipif(ds_is_older('1.3.4'), reason="Not implemented")] + +logging.getLogger(__name__).setLevel(logging.DEBUG) +log = logging.getLogger(__name__) + +USER1_DN = 'uid=user1,' + DEFAULT_SUFFIX +USER2_DN = 'uid=user2,' + DEFAULT_SUFFIX + + +def test_ticket48026(topology_st): + ''' + Test that multiple attribute uniqueness works correctly. + ''' + # Configure the plugin + inst = topology_st.standalone + inst.plugins.enable(name=PLUGIN_ATTR_UNIQUENESS) + + try: + # This plugin enable / disable doesn't seem to create the nsslapd-pluginId correctly? + inst.modify_s('cn=' + PLUGIN_ATTR_UNIQUENESS + ',cn=plugins,cn=config', + [(ldap.MOD_REPLACE, 'uniqueness-attribute-name', b'mail'), + (ldap.MOD_ADD, 'uniqueness-attribute-name', + b'mailAlternateAddress'), + ]) + except ldap.LDAPError as e: + log.fatal('test_ticket48026: Failed to configure plugin for "mail": error {}'.format(e.args[0]['desc'])) + assert False + + inst.restart(timeout=30) + + # Add an entry + try: + inst.add_s(Entry((USER1_DN, {'objectclass': "top extensibleObject".split(), + 'sn': '1', + 'cn': 'user 1', + 'uid': 'user1', + 'mail': 'user1@example.com', + 'mailAlternateAddress': 'user1@alt.example.com', + 'userpassword': 'password'}))) + except ldap.LDAPError as e: + log.fatal('test_ticket48026: Failed to add test user' + USER1_DN + ': error {}'.format(e.args[0]['desc'])) + assert False + + try: + inst.add_s(Entry((USER2_DN, {'objectclass': "top extensibleObject".split(), + 'sn': '2', + 'cn': 'user 2', + 'uid': 'user2', + 'mail': 'user1@example.com', + 'userpassword': 'password'}))) + except ldap.CONSTRAINT_VIOLATION: + pass + else: + log.error('test_ticket48026: Adding of 1st entry(mail v mail) incorrectly succeeded') + assert False + + try: + inst.add_s(Entry((USER2_DN, {'objectclass': "top extensibleObject".split(), + 'sn': '2', + 'cn': 'user 2', + 'uid': 'user2', + 'mailAlternateAddress': 'user1@alt.example.com', + 'userpassword': 'password'}))) + except ldap.CONSTRAINT_VIOLATION: + pass + else: + log.error( + 'test_ticket48026: Adding of 2nd entry(mailAlternateAddress v mailAlternateAddress) incorrectly succeeded') + assert False + + try: + inst.add_s(Entry((USER2_DN, {'objectclass': "top extensibleObject".split(), + 'sn': '2', + 'cn': 'user 2', + 'uid': 'user2', + 'mail': 'user1@alt.example.com', + 'userpassword': 'password'}))) + except ldap.CONSTRAINT_VIOLATION: + pass + else: + log.error('test_ticket48026: Adding of 3rd entry(mail v mailAlternateAddress) incorrectly succeeded') + assert False + + try: + inst.add_s(Entry((USER2_DN, {'objectclass': "top extensibleObject".split(), + 'sn': '2', + 'cn': 'user 2', + 'uid': 'user2', + 'mailAlternateAddress': 'user1@example.com', + 'userpassword': 'password'}))) + except ldap.CONSTRAINT_VIOLATION: + pass + else: + log.error('test_ticket48026: Adding of 4th entry(mailAlternateAddress v mail) incorrectly succeeded') + assert False + + log.info('Test complete') + + +if __name__ == '__main__': + # Run isolated + # -s for DEBUG mode + CURRENT_FILE = os.path.realpath(__file__) + pytest.main("-s %s" % CURRENT_FILE) diff --git a/dirsrvtests/tests/tickets/ticket48109_test.py b/dirsrvtests/tests/tickets/ticket48109_test.py new file mode 100644 index 0000000..5f9b657 --- /dev/null +++ b/dirsrvtests/tests/tickets/ticket48109_test.py @@ -0,0 +1,338 @@ +# --- BEGIN COPYRIGHT BLOCK --- +# Copyright (C) 2016 Red Hat, Inc. +# All rights reserved. +# +# License: GPL (version 3 or any later version). +# See LICENSE for details. +# --- END COPYRIGHT BLOCK --- +# +import pytest +from lib389.tasks import * +from lib389.utils import * +from lib389.topologies import topology_st + +from lib389._constants import SUFFIX, DEFAULT_SUFFIX + +pytestmark = pytest.mark.tier2 + +logging.getLogger(__name__).setLevel(logging.DEBUG) +log = logging.getLogger(__name__) + +UID_INDEX = 'cn=uid,cn=index,cn=userRoot,cn=ldbm database,cn=plugins,cn=config' + + +def test_ticket48109(topology_st): + ''' + Set SubStr lengths to cn=uid,cn=index,... + objectClass: extensibleObject + nsIndexType: sub + nsSubStrBegin: 2 + nsSubStrEnd: 2 + ''' + log.info('Test case 0') + # add substr setting to UID_INDEX + try: + topology_st.standalone.modify_s(UID_INDEX, + [(ldap.MOD_ADD, 'objectClass', b'extensibleObject'), + (ldap.MOD_ADD, 'nsIndexType', b'sub'), + (ldap.MOD_ADD, 'nsSubStrBegin', b'2'), + (ldap.MOD_ADD, 'nsSubStrEnd', b'2')]) + except ldap.LDAPError as e: + log.error('Failed to add substr lengths: error {}'.format(e.args[0]['desc'])) + assert False + + # restart the server to apply the indexing + topology_st.standalone.restart(timeout=10) + + # add a test user + UID = 'auser0' + USER_DN = 'uid=%s,%s' % (UID, SUFFIX) + try: + topology_st.standalone.add_s(Entry((USER_DN, { + 'objectclass': 'top person organizationalPerson inetOrgPerson'.split(), + 'cn': 'a user0', + 'sn': 'user0', + 'givenname': 'a', + 'mail': UID}))) + except ldap.LDAPError as e: + log.error('Failed to add ' + USER_DN + ': error {}'.format(e.args[0]['desc'])) + assert False + + entries = topology_st.standalone.search_s(SUFFIX, ldap.SCOPE_SUBTREE, '(uid=a*)') + assert len(entries) == 1 + + # restart the server to check the access log + topology_st.standalone.restart(timeout=10) + + cmdline = 'egrep %s %s | egrep "uid=a\*"' % (SUFFIX, topology_st.standalone.accesslog) + p = os.popen(cmdline, "r") + l0 = p.readline() + if l0 == "": + log.error('Search with "(uid=a*)" is not logged in ' + topology_st.standalone.accesslog) + assert False + else: + # regex = re.compile('\(conn=[0-9]* op=[0-9]*\) SRCH .*') + regex = re.compile(r'.*\s+(conn=\d+ op=\d+)\s+SRCH .*') + match = regex.match(l0) + log.info('match: %s' % match.group(1)) + cmdline = 'egrep "%s" %s | egrep "RESULT"' % (match.group(1), topology_st.standalone.accesslog) + p = os.popen(cmdline, "r") + l1 = p.readline() + if l1 == "": + log.error('Search result of "(uid=a*)" is not logged in ' + topology_st.standalone.accesslog) + assert False + else: + log.info('l1: %s' % l1) + regex = re.compile(r'.*nentries=(\d+)\s+.*') + match = regex.match(l1) + log.info('match: nentires=%s' % match.group(1)) + if match.group(1) == "0": + log.error('Entry uid=a* not found.') + assert False + else: + log.info('Entry uid=a* found.') + regex = re.compile(r'.*(notes=[AU]).*') + match = regex.match(l1) + if match: + log.error('%s - substr index was not used' % match.group(1)) + assert False + else: + log.info('Test case 0 - OK - substr index used') + + # clean up substr setting to UID_INDEX + try: + topology_st.standalone.modify_s(UID_INDEX, + [(ldap.MOD_DELETE, 'objectClass', b'extensibleObject'), + (ldap.MOD_DELETE, 'nsIndexType', b'sub'), + (ldap.MOD_DELETE, 'nsSubStrBegin', b'2'), + (ldap.MOD_DELETE, 'nsSubStrEnd', b'2')]) + except ldap.LDAPError as e: + log.error('Failed to delete substr lengths: error {}'.format(e.args[0]['desc'])) + assert False + + ''' + Set SubStr lengths to cn=uid,cn=index,... + nsIndexType: sub + nsMatchingRule: nsSubStrBegin=2 + nsMatchingRule: nsSubStrEnd=2 + ''' + log.info('Test case 1') + # add substr setting to UID_INDEX + try: + topology_st.standalone.modify_s(UID_INDEX, + [(ldap.MOD_ADD, 'nsIndexType', b'sub'), + (ldap.MOD_ADD, 'nsMatchingRule', b'nssubstrbegin=2'), + (ldap.MOD_ADD, 'nsMatchingRule', b'nssubstrend=2')]) + except ldap.LDAPError as e: + log.error('Failed to add substr lengths: error {}'.format(e.args[0]['desc'])) + assert False + + # restart the server to apply the indexing + topology_st.standalone.restart(timeout=10) + + # add a test user + UID = 'buser1' + USER_DN = 'uid=%s,%s' % (UID, SUFFIX) + try: + topology_st.standalone.add_s(Entry((USER_DN, { + 'objectclass': 'top person organizationalPerson inetOrgPerson'.split(), + 'cn': 'b user1', + 'sn': 'user1', + 'givenname': 'b', + 'mail': UID}))) + except ldap.LDAPError as e: + log.error('Failed to add ' + USER_DN + ': error {}'.format(e.args[0]['desc'])) + assert False + + entries = topology_st.standalone.search_s(SUFFIX, ldap.SCOPE_SUBTREE, '(uid=b*)') + assert len(entries) == 1 + + # restart the server to check the access log + topology_st.standalone.restart(timeout=10) + + cmdline = 'egrep %s %s | egrep "uid=b\*"' % (SUFFIX, topology_st.standalone.accesslog) + p = os.popen(cmdline, "r") + l0 = p.readline() + if l0 == "": + log.error('Search with "(uid=b*)" is not logged in ' + topology_st.standalone.accesslog) + assert False + else: + # regex = re.compile('\(conn=[0-9]* op=[0-9]*\) SRCH .*') + regex = re.compile(r'.*\s+(conn=\d+ op=\d+)\s+SRCH .*') + match = regex.match(l0) + log.info('match: %s' % match.group(1)) + cmdline = 'egrep "%s" %s | egrep "RESULT"' % (match.group(1), topology_st.standalone.accesslog) + p = os.popen(cmdline, "r") + l1 = p.readline() + if l1 == "": + log.error('Search result of "(uid=*b)" is not logged in ' + topology_st.standalone.accesslog) + assert False + else: + log.info('l1: %s' % l1) + regex = re.compile(r'.*nentries=(\d+)\s+.*') + match = regex.match(l1) + log.info('match: nentires=%s' % match.group(1)) + if match.group(1) == "0": + log.error('Entry uid=*b not found.') + assert False + else: + log.info('Entry uid=*b found.') + regex = re.compile(r'.*(notes=[AU]).*') + match = regex.match(l1) + if match: + log.error('%s - substr index was not used' % match.group(1)) + assert False + else: + log.info('Test case 1 - OK - substr index used') + + # clean up substr setting to UID_INDEX + try: + topology_st.standalone.modify_s(UID_INDEX, + [(ldap.MOD_DELETE, 'nsIndexType', b'sub'), + (ldap.MOD_DELETE, 'nsMatchingRule', b'nssubstrbegin=2'), + (ldap.MOD_DELETE, 'nsMatchingRule', b'nssubstrend=2')]) + except ldap.LDAPError as e: + log.error('Failed to delete substr lengths: error {}'.format(e.args[0]['desc'])) + assert False + + ''' + Set SubStr conflict formats/lengths to cn=uid,cn=index,... + objectClass: extensibleObject + nsIndexType: sub + nsMatchingRule: nsSubStrBegin=3 + nsMatchingRule: nsSubStrEnd=3 + nsSubStrBegin: 2 + nsSubStrEnd: 2 + nsSubStr{Begin,End} are honored. + ''' + log.info('Test case 2') + + # add substr setting to UID_INDEX + try: + topology_st.standalone.modify_s(UID_INDEX, + [(ldap.MOD_ADD, 'nsIndexType', b'sub'), + (ldap.MOD_ADD, 'nsMatchingRule', b'nssubstrbegin=3'), + (ldap.MOD_ADD, 'nsMatchingRule', b'nssubstrend=3'), + (ldap.MOD_ADD, 'objectClass', b'extensibleObject'), + (ldap.MOD_ADD, 'nsSubStrBegin', b'2'), + (ldap.MOD_ADD, 'nsSubStrEnd', b'2')]) + except ldap.LDAPError as e: + log.error('Failed to add substr lengths: error {}'.format(e.args[0]['desc'])) + assert False + + # restart the server to apply the indexing + topology_st.standalone.restart(timeout=10) + + # add a test user + UID = 'cuser2' + USER_DN = 'uid=%s,%s' % (UID, SUFFIX) + try: + topology_st.standalone.add_s(Entry((USER_DN, { + 'objectclass': 'top person organizationalPerson inetOrgPerson'.split(), + 'cn': 'c user2', + 'sn': 'user2', + 'givenname': 'c', + 'mail': UID}))) + except ldap.LDAPError as e: + log.error('Failed to add ' + USER_DN + ': error {}'.format(e.args[0]['desc'])) + assert False + + entries = topology_st.standalone.search_s(SUFFIX, ldap.SCOPE_SUBTREE, '(uid=c*)') + assert len(entries) == 1 + + entries = topology_st.standalone.search_s(SUFFIX, ldap.SCOPE_SUBTREE, '(uid=*2)') + assert len(entries) == 1 + + # restart the server to check the access log + topology_st.standalone.restart(timeout=10) + + cmdline = 'egrep %s %s | egrep "uid=c\*"' % (SUFFIX, topology_st.standalone.accesslog) + p = os.popen(cmdline, "r") + l0 = p.readline() + if l0 == "": + log.error('Search with "(uid=c*)" is not logged in ' + topology_st.standalone.accesslog) + assert False + else: + # regex = re.compile('\(conn=[0-9]* op=[0-9]*\) SRCH .*') + regex = re.compile(r'.*\s+(conn=\d+ op=\d+)\s+SRCH .*') + match = regex.match(l0) + log.info('match: %s' % match.group(1)) + cmdline = 'egrep "%s" %s | egrep "RESULT"' % (match.group(1), topology_st.standalone.accesslog) + p = os.popen(cmdline, "r") + l1 = p.readline() + if l1 == "": + log.error('Search result of "(uid=c*)" is not logged in ' + topology_st.standalone.accesslog) + assert False + else: + log.info('l1: %s' % l1) + regex = re.compile(r'.*nentries=(\d+)\s+.*') + match = regex.match(l1) + log.info('match: nentires=%s' % match.group(1)) + if match.group(1) == "0": + log.error('Entry uid=c* not found.') + assert False + else: + log.info('Entry uid=c* found.') + regex = re.compile(r'.*(notes=[AU]).*') + match = regex.match(l1) + if match: + log.error('%s - substr index was not used' % match.group(1)) + assert False + else: + log.info('Test case 2-1 - OK - correct substr index used') + + cmdline = 'egrep %s %s | egrep "uid=\*2"' % (SUFFIX, topology_st.standalone.accesslog) + p = os.popen(cmdline, "r") + l0 = p.readline() + if l0 == "": + log.error('Search with "(uid=*2)" is not logged in ' + topology_st.standalone.accesslog) + assert False + else: + # regex = re.compile('\(conn=[0-9]* op=[0-9]*\) SRCH .*') + regex = re.compile(r'.*\s+(conn=\d+ op=\d+)\s+SRCH .*') + match = regex.match(l0) + log.info('match: %s' % match.group(1)) + cmdline = 'egrep "%s" %s | egrep "RESULT"' % (match.group(1), topology_st.standalone.accesslog) + p = os.popen(cmdline, "r") + l1 = p.readline() + if l1 == "": + log.error('Search result of "(uid=*2)" is not logged in ' + topology_st.standalone.accesslog) + assert False + else: + log.info('l1: %s' % l1) + regex = re.compile(r'.*nentries=(\d+)\s+.*') + match = regex.match(l1) + log.info('match: nentires=%s' % match.group(1)) + if match.group(1) == "0": + log.error('Entry uid=*2 not found.') + assert False + else: + log.info('Entry uid=*2 found.') + regex = re.compile(r'.*(notes=[AU]).*') + match = regex.match(l1) + if match: + log.error('%s - substr index was not used' % match.group(1)) + assert False + else: + log.info('Test case 2-2 - OK - correct substr index used') + + # clean up substr setting to UID_INDEX + try: + topology_st.standalone.modify_s(UID_INDEX, + [(ldap.MOD_DELETE, 'nsIndexType', b'sub'), + (ldap.MOD_DELETE, 'nsMatchingRule', b'nssubstrbegin=3'), + (ldap.MOD_DELETE, 'nsMatchingRule', b'nssubstrend=3'), + (ldap.MOD_DELETE, 'objectClass', b'extensibleObject'), + (ldap.MOD_DELETE, 'nsSubStrBegin', b'2'), + (ldap.MOD_DELETE, 'nsSubStrEnd', b'2')]) + except ldap.LDAPError as e: + log.error('Failed to delete substr lengths: error {}'.format(e.args[0]['desc'])) + assert False + log.info('Testcase PASSED') + + +if __name__ == '__main__': + # Run isolated + # -s for DEBUG mode + CURRENT_FILE = os.path.realpath(__file__) + pytest.main("-s %s" % CURRENT_FILE) diff --git a/dirsrvtests/tests/tickets/ticket48170_test.py b/dirsrvtests/tests/tickets/ticket48170_test.py new file mode 100644 index 0000000..e3c8a27 --- /dev/null +++ b/dirsrvtests/tests/tickets/ticket48170_test.py @@ -0,0 +1,43 @@ +# --- BEGIN COPYRIGHT BLOCK --- +# Copyright (C) 2016 Red Hat, Inc. +# All rights reserved. +# +# License: GPL (version 3 or any later version). +# See LICENSE for details. +# --- END COPYRIGHT BLOCK --- +# +import pytest +from lib389.utils import * +from lib389.topologies import topology_st + +pytestmark = pytest.mark.tier2 + +logging.getLogger(__name__).setLevel(logging.DEBUG) +log = logging.getLogger(__name__) + + +def test_ticket48170(topology_st): + ''' + Attempt to add a nsIndexType wikth an invalid value: "eq,pres" + ''' + + INDEX_DN = 'cn=cn,cn=index,cn=userroot,cn=ldbm database,cn=plugins,cn=config' + REJECTED = False + try: + topology_st.standalone.modify_s(INDEX_DN, [(ldap.MOD_ADD, 'nsINdexType', b'eq,pres')]) + except ldap.UNWILLING_TO_PERFORM: + log.info('Index update correctly rejected') + REJECTED = True + + if not REJECTED: + log.fatal('Invalid nsIndexType value was incorrectly accepted.') + assert False + + log.info('Test complete') + + +if __name__ == '__main__': + # Run isolated + # -s for DEBUG mode + CURRENT_FILE = os.path.realpath(__file__) + pytest.main("-s %s" % CURRENT_FILE) diff --git a/dirsrvtests/tests/tickets/ticket48194_test.py b/dirsrvtests/tests/tickets/ticket48194_test.py new file mode 100644 index 0000000..4431ebd --- /dev/null +++ b/dirsrvtests/tests/tickets/ticket48194_test.py @@ -0,0 +1,352 @@ +# --- BEGIN COPYRIGHT BLOCK --- +# Copyright (C) 2016 Red Hat, Inc. +# All rights reserved. +# +# License: GPL (version 3 or any later version). +# See LICENSE for details. +# --- END COPYRIGHT BLOCK --- +# +import logging +import subprocess +import time + +import ldap +import pytest +from lib389 import Entry +from lib389._constants import * +from lib389.topologies import topology_st +from lib389.nss_ssl import NssSsl + +log = logging.getLogger(__name__) + +CONFIG_DN = 'cn=config' +from lib389.utils import * + +# Skip on older versions +pytestmark = [pytest.mark.tier2, + pytest.mark.skipif(ds_is_older('1.3.4'), reason="Not implemented")] +ENCRYPTION_DN = 'cn=encryption,%s' % CONFIG_DN +RSA = 'RSA' +RSA_DN = 'cn=%s,%s' % (RSA, ENCRYPTION_DN) +LDAPSPORT = str(SECUREPORT_STANDALONE) +SERVERCERT = 'Server-Cert' +plus_all_ecount = 0 +plus_all_dcount = 0 +plus_all_ecount_noweak = 0 +plus_all_dcount_noweak = 0 + + +def _header(topology_st, label): + topology_st.standalone.log.info("\n\n###############################################") + topology_st.standalone.log.info("####### %s" % label) + topology_st.standalone.log.info("###############################################") + + +def test_init(topology_st): + """ + Generate self signed cert and import it to the DS cert db. + Enable SSL + """ + _header(topology_st, 'Testing Ticket 48194 - harden the list of ciphers available by default') + + nss_ssl = NssSsl(dbpath=topology_st.standalone.get_cert_dir()) + nss_ssl.reinit() + nss_ssl.create_rsa_ca() + nss_ssl.create_rsa_key_and_cert() + + log.info("\n######################### enable SSL in the directory server with all ciphers ######################\n") + topology_st.standalone.simple_bind_s(DN_DM, PASSWORD) + topology_st.standalone.modify_s(ENCRYPTION_DN, [(ldap.MOD_REPLACE, 'nsSSL3', b'off'), + (ldap.MOD_REPLACE, 'nsTLS1', b'on'), + (ldap.MOD_REPLACE, 'nsSSLClientAuth', b'allowed'), + (ldap.MOD_REPLACE, 'allowWeakCipher', b'on'), + (ldap.MOD_REPLACE, 'nsSSL3Ciphers', b'+all')]) + + topology_st.standalone.modify_s(CONFIG_DN, [(ldap.MOD_REPLACE, 'nsslapd-security', b'on'), + (ldap.MOD_REPLACE, 'nsslapd-ssl-check-hostname', b'off'), + (ldap.MOD_REPLACE, 'nsslapd-secureport', ensure_bytes(LDAPSPORT))]) + + if ds_is_older('1.4.0'): + topology_st.standalone.add_s(Entry((RSA_DN, {'objectclass': "top nsEncryptionModule".split(), + 'cn': RSA, + 'nsSSLPersonalitySSL': SERVERCERT, + 'nsSSLToken': 'internal (software)', + 'nsSSLActivation': 'on'}))) + + +def connectWithOpenssl(topology_st, cipher, expect): + """ + Connect with the given cipher + Condition: + If expect is True, the handshake should be successful. + If expect is False, the handshake should be refused with + access log: "Cannot communicate securely with peer: + no common encryption algorithm(s)." + """ + log.info("Testing %s -- expect to handshake %s", cipher, "successfully" if expect else "failed") + + myurl = 'localhost:%s' % LDAPSPORT + cmdline = ['/usr/bin/openssl', 's_client', '-connect', myurl, '-cipher', cipher] + + strcmdline = " ".join(cmdline) + log.info("Running cmdline: %s", strcmdline) + + try: + proc = subprocess.Popen(cmdline, stdout=subprocess.PIPE, stdin=subprocess.PIPE, stderr=subprocess.STDOUT) + except ValueError: + log.info("%s failed: %s", cmdline, ValueError) + proc.kill() + + while True: + l = proc.stdout.readline() + if l == b"": + break + if b'Cipher is' in l: + log.info("Found: %s", l) + if expect: + if b'(NONE)' in l: + assert False + else: + proc.stdin.close() + assert True + else: + if b'(NONE)' in l: + assert True + else: + proc.stdin.close() + assert False + + +def test_run_0(topology_st): + """ + Check nsSSL3Ciphers: +all + All ciphers are enabled except null. + Note: allowWeakCipher: on + """ + _header(topology_st, 'Test Case 1 - Check the ciphers availability for "+all"; allowWeakCipher: on') + + topology_st.standalone.simple_bind_s(DN_DM, PASSWORD) + topology_st.standalone.modify_s(CONFIG_DN, [(ldap.MOD_REPLACE, 'nsslapd-errorlog-level', b'64')]) + + log.info("\n######################### Restarting the server ######################\n") + topology_st.standalone.restart(timeout=120) + + connectWithOpenssl(topology_st, 'DES-CBC3-SHA', True) + connectWithOpenssl(topology_st, 'AES256-SHA256', True) + + +def test_run_1(topology_st): + """ + Check nsSSL3Ciphers: +all + All ciphers are enabled except null. + Note: default allowWeakCipher (i.e., off) for +all + """ + _header(topology_st, 'Test Case 2 - Check the ciphers availability for "+all" with default allowWeakCiphers') + + topology_st.standalone.simple_bind_s(DN_DM, PASSWORD) + topology_st.standalone.modify_s(CONFIG_DN, [(ldap.MOD_REPLACE, 'nsslapd-errorlog-level', b'64')]) + # Make sure allowWeakCipher is not set. + topology_st.standalone.modify_s(ENCRYPTION_DN, [(ldap.MOD_DELETE, 'allowWeakCipher', None)]) + + log.info("\n######################### Restarting the server ######################\n") + topology_st.standalone.stop(timeout=10) + os.system('mv %s %s.48194_0' % (topology_st.standalone.errlog, topology_st.standalone.errlog)) + os.system('touch %s' % (topology_st.standalone.errlog)) + time.sleep(2) + topology_st.standalone.start(timeout=120) + + connectWithOpenssl(topology_st, 'DES-CBC3-SHA', False) + connectWithOpenssl(topology_st, 'AES256-SHA256', True) + + +def test_run_2(topology_st): + """ + Check nsSSL3Ciphers: +rsa_aes_128_sha,+rsa_aes_256_sha + rsa_aes_128_sha, tls_rsa_aes_128_sha, rsa_aes_256_sha, tls_rsa_aes_256_sha are enabled. + default allowWeakCipher + """ + _header(topology_st, + 'Test Case 3 - Check the ciphers availability for "+rsa_aes_128_sha,+rsa_aes_256_sha" with default allowWeakCipher') + + topology_st.standalone.simple_bind_s(DN_DM, PASSWORD) + topology_st.standalone.modify_s(ENCRYPTION_DN, + [(ldap.MOD_REPLACE, 'nsSSL3Ciphers', b'+rsa_aes_128_sha,+rsa_aes_256_sha')]) + + log.info("\n######################### Restarting the server ######################\n") + topology_st.standalone.stop(timeout=10) + os.system('mv %s %s.48194_1' % (topology_st.standalone.errlog, topology_st.standalone.errlog)) + os.system('touch %s' % (topology_st.standalone.errlog)) + time.sleep(2) + topology_st.standalone.start(timeout=120) + + connectWithOpenssl(topology_st, 'DES-CBC3-SHA', False) + connectWithOpenssl(topology_st, 'AES256-SHA256', False) + connectWithOpenssl(topology_st, 'AES128-SHA', True) + connectWithOpenssl(topology_st, 'AES256-SHA', True) + + +def test_run_3(topology_st): + """ + Check nsSSL3Ciphers: -all + All ciphers are disabled. + default allowWeakCipher + """ + _header(topology_st, 'Test Case 4 - Check the ciphers availability for "-all"') + + topology_st.standalone.simple_bind_s(DN_DM, PASSWORD) + topology_st.standalone.modify_s(ENCRYPTION_DN, [(ldap.MOD_REPLACE, 'nsSSL3Ciphers', b'-all')]) + + log.info("\n######################### Restarting the server ######################\n") + topology_st.standalone.stop(timeout=10) + os.system('mv %s %s.48194_2' % (topology_st.standalone.errlog, topology_st.standalone.errlog)) + os.system('touch %s' % (topology_st.standalone.errlog)) + time.sleep(1) + topology_st.standalone.start(timeout=120) + + connectWithOpenssl(topology_st, 'DES-CBC3-SHA', False) + connectWithOpenssl(topology_st, 'AES256-SHA256', False) + + +def test_run_4(topology_st): + """ + Check no nsSSL3Ciphers + Default ciphers are enabled. + default allowWeakCipher + """ + _header(topology_st, 'Test Case 5 - Check no nsSSL3Ciphers (-all) with default allowWeakCipher') + + topology_st.standalone.simple_bind_s(DN_DM, PASSWORD) + topology_st.standalone.modify_s(ENCRYPTION_DN, [(ldap.MOD_DELETE, 'nsSSL3Ciphers', b'-all')]) + + log.info("\n######################### Restarting the server ######################\n") + topology_st.standalone.stop(timeout=10) + os.system('mv %s %s.48194_3' % (topology_st.standalone.errlog, topology_st.standalone.errlog)) + os.system('touch %s' % (topology_st.standalone.errlog)) + time.sleep(2) + topology_st.standalone.start(timeout=120) + + connectWithOpenssl(topology_st, 'DES-CBC3-SHA', False) + connectWithOpenssl(topology_st, 'AES256-SHA256', True) + + +def test_run_5(topology_st): + """ + Check nsSSL3Ciphers: default + Default ciphers are enabled. + default allowWeakCipher + """ + _header(topology_st, 'Test Case 6 - Check default nsSSL3Ciphers (default setting) with default allowWeakCipher') + + topology_st.standalone.simple_bind_s(DN_DM, PASSWORD) + topology_st.standalone.modify_s(ENCRYPTION_DN, [(ldap.MOD_REPLACE, 'nsSSL3Ciphers', b'default')]) + + log.info("\n######################### Restarting the server ######################\n") + topology_st.standalone.stop(timeout=10) + os.system('mv %s %s.48194_4' % (topology_st.standalone.errlog, topology_st.standalone.errlog)) + os.system('touch %s' % (topology_st.standalone.errlog)) + time.sleep(2) + topology_st.standalone.start(timeout=120) + + connectWithOpenssl(topology_st, 'DES-CBC3-SHA', False) + connectWithOpenssl(topology_st, 'AES256-SHA256', True) + + +def test_run_6(topology_st): + """ + Check nsSSL3Ciphers: +all,-TLS_RSA_WITH_AES_256_CBC_SHA256 + All ciphers are disabled. + default allowWeakCipher + """ + _header(topology_st, + 'Test Case 7 - Check nsSSL3Ciphers: +all,-TLS_RSA_WITH_AES_256_CBC_SHA256 with default allowWeakCipher') + + topology_st.standalone.simple_bind_s(DN_DM, PASSWORD) + topology_st.standalone.modify_s(ENCRYPTION_DN, + [(ldap.MOD_REPLACE, 'nsSSL3Ciphers', b'+all,-TLS_RSA_WITH_AES_256_CBC_SHA256')]) + + log.info("\n######################### Restarting the server ######################\n") + topology_st.standalone.stop(timeout=10) + os.system('mv %s %s.48194_5' % (topology_st.standalone.errlog, topology_st.standalone.errlog)) + os.system('touch %s' % (topology_st.standalone.errlog)) + time.sleep(2) + topology_st.standalone.start(timeout=120) + + connectWithOpenssl(topology_st, 'DES-CBC3-SHA', False) + connectWithOpenssl(topology_st, 'AES256-SHA256', False) + connectWithOpenssl(topology_st, 'AES128-SHA', True) + + +def test_run_8(topology_st): + """ + Check nsSSL3Ciphers: default + allowWeakCipher: off + Strong Default ciphers are enabled. + """ + _header(topology_st, 'Test Case 9 - Check default nsSSL3Ciphers (default setting + allowWeakCipher: off)') + + topology_st.standalone.simple_bind_s(DN_DM, PASSWORD) + topology_st.standalone.modify_s(ENCRYPTION_DN, [(ldap.MOD_REPLACE, 'nsSSL3Ciphers', b'default'), + (ldap.MOD_REPLACE, 'allowWeakCipher', b'off')]) + + log.info("\n######################### Restarting the server ######################\n") + topology_st.standalone.stop(timeout=10) + os.system('mv %s %s.48194_7' % (topology_st.standalone.errlog, topology_st.standalone.errlog)) + os.system('touch %s' % (topology_st.standalone.errlog)) + time.sleep(2) + topology_st.standalone.start(timeout=120) + + connectWithOpenssl(topology_st, 'DES-CBC3-SHA', False) + connectWithOpenssl(topology_st, 'AES256-SHA256', True) + + +def test_run_9(topology_st): + """ + Check no nsSSL3Ciphers + Default ciphers are enabled. + allowWeakCipher: on + nsslapd-errorlog-level: 0 + """ + _header(topology_st, + 'Test Case 10 - Check no nsSSL3Ciphers (default setting) with no errorlog-level & allowWeakCipher on') + + topology_st.standalone.simple_bind_s(DN_DM, PASSWORD) + topology_st.standalone.modify_s(ENCRYPTION_DN, [(ldap.MOD_REPLACE, 'nsSSL3Ciphers', None), + (ldap.MOD_REPLACE, 'allowWeakCipher', b'on')]) + topology_st.standalone.modify_s(CONFIG_DN, [(ldap.MOD_REPLACE, 'nsslapd-errorlog-level', None)]) + + log.info("\n######################### Restarting the server ######################\n") + topology_st.standalone.stop(timeout=10) + os.system('mv %s %s.48194_8' % (topology_st.standalone.errlog, topology_st.standalone.errlog)) + os.system('touch %s' % (topology_st.standalone.errlog)) + time.sleep(2) + topology_st.standalone.start(timeout=120) + + connectWithOpenssl(topology_st, 'DES-CBC3-SHA', True) + connectWithOpenssl(topology_st, 'AES256-SHA256', True) + + +def test_run_11(topology_st): + """ + Check nsSSL3Ciphers: +fortezza + SSL_GetImplementedCiphers does not return this as a secuire cipher suite + """ + _header(topology_st, 'Test Case 12 - Check nsSSL3Ciphers: +fortezza, which is not supported') + + topology_st.standalone.simple_bind_s(DN_DM, PASSWORD) + topology_st.standalone.modify_s(ENCRYPTION_DN, [(ldap.MOD_REPLACE, 'nsSSL3Ciphers', b'+fortezza')]) + + log.info("\n######################### Restarting the server ######################\n") + topology_st.standalone.stop(timeout=10) + os.system('mv %s %s.48194_10' % (topology_st.standalone.errlog, topology_st.standalone.errlog)) + os.system('touch %s' % (topology_st.standalone.errlog)) + time.sleep(1) + topology_st.standalone.start(timeout=120) + + connectWithOpenssl(topology_st, 'DES-CBC3-SHA', False) + connectWithOpenssl(topology_st, 'AES256-SHA256', False) + + +if __name__ == '__main__': + # Run isolated + # -s for DEBUG mode + CURRENT_FILE = os.path.realpath(__file__) + pytest.main("-s %s" % CURRENT_FILE) diff --git a/dirsrvtests/tests/tickets/ticket48212_test.py b/dirsrvtests/tests/tickets/ticket48212_test.py new file mode 100644 index 0000000..3746859 --- /dev/null +++ b/dirsrvtests/tests/tickets/ticket48212_test.py @@ -0,0 +1,142 @@ +# --- BEGIN COPYRIGHT BLOCK --- +# Copyright (C) 2022 Red Hat, Inc. +# All rights reserved. +# +# License: GPL (version 3 or any later version). +# See LICENSE for details. +# --- END COPYRIGHT BLOCK --- +# +import pytest +from lib389.tasks import * +from lib389.utils import * +from lib389.topologies import topology_st + +from lib389._constants import DN_DM, PASSWORD, DEFAULT_SUFFIX, DATA_DIR + +pytestmark = pytest.mark.tier2 + +log = logging.getLogger(__name__) + +MYSUFFIX = 'dc=example,dc=com' +MYSUFFIXBE = 'userRoot' +_MYLDIF = 'example1k_posix.ldif' +UIDNUMBERDN = "cn=uidnumber,cn=index,cn=userroot,cn=ldbm database,cn=plugins,cn=config" + + +def runDbVerify(topology_st): + topology_st.standalone.log.info("\n\n +++++ dbverify +++++\n") + sbin_dir = get_sbin_dir() + dbverifyCMD = sbin_dir + "/dbverify -Z " + topology_st.standalone.serverid + " -V" + dbverifyOUT = os.popen(dbverifyCMD, "r") + topology_st.standalone.log.info("Running %s" % dbverifyCMD) + running = True + error = False + while running: + l = dbverifyOUT.readline() + if l == "": + running = False + elif "libdb:" in l: + running = False + error = True + topology_st.standalone.log.info("%s" % l) + elif "verify failed" in l: + error = True + running = False + topology_st.standalone.log.info("%s" % l) + + if error: + topology_st.standalone.log.fatal("dbverify failed") + assert False + else: + topology_st.standalone.log.info("dbverify passed") + + +def reindexUidNumber(topology_st): + topology_st.standalone.log.info("\n\n +++++ reindex uidnumber +++++\n") + try: + args = {TASK_WAIT: True} + topology_st.standalone.tasks.reindex(suffix=MYSUFFIX, attrname='uidNumber', args=args) + except: + topology_st.standalone.log.fatal("Reindexing failed") + assert False + + +def test_ticket48212(topology_st): + """ + Import posixAccount entries. + Index uidNumber + add nsMatchingRule: integerOrderingMatch + run dbverify to see if it reports the db corruption or not + delete nsMatchingRule: integerOrderingMatch + run dbverify to see if it reports the db corruption or not + if no corruption is reported, the bug fix was verified. + """ + log.info( + 'Testing Ticket 48212 - Dynamic nsMatchingRule changes had no effect on the attrinfo thus following reindexing, as well.') + + # bind as directory manager + topology_st.standalone.log.info("Bind as %s" % DN_DM) + topology_st.standalone.simple_bind_s(DN_DM, PASSWORD) + + data_dir_path = topology_st.standalone.getDir(__file__, DATA_DIR) + ldif_file = f"{data_dir_path}ticket48212/{_MYLDIF}" + try: + ldif_dir = topology_st.standalone.get_ldif_dir() + shutil.copy(ldif_file, ldif_dir) + ldif_file = ldif_dir + '/' + _MYLDIF + except: + log.fatal('Failed to copy ldif to instance ldif dir') + assert False + + topology_st.standalone.log.info( + "\n\n######################### Import Test data (%s) ######################\n" % ldif_file) + args = {TASK_WAIT: True} + importTask = Tasks(topology_st.standalone) + importTask.importLDIF(MYSUFFIX, MYSUFFIXBE, ldif_file, args) + args = {TASK_WAIT: True} + + runDbVerify(topology_st) + + topology_st.standalone.log.info("\n\n######################### Add index by uidnumber ######################\n") + try: + topology_st.standalone.add_s(Entry((UIDNUMBERDN, {'objectclass': "top nsIndex".split(), + 'cn': 'uidnumber', + 'nsSystemIndex': 'false', + 'nsIndexType': "pres eq".split()}))) + except ValueError: + topology_st.standalone.log.fatal("add_s failed: %s", ValueError) + + topology_st.standalone.log.info("\n\n######################### reindexing... ######################\n") + reindexUidNumber(topology_st) + + runDbVerify(topology_st) + + topology_st.standalone.log.info("\n\n######################### Add nsMatchingRule ######################\n") + try: + topology_st.standalone.modify_s(UIDNUMBERDN, [(ldap.MOD_ADD, 'nsMatchingRule', b'integerOrderingMatch')]) + except ValueError: + topology_st.standalone.log.fatal("modify_s failed: %s", ValueError) + + topology_st.standalone.log.info("\n\n######################### reindexing... ######################\n") + reindexUidNumber(topology_st) + + runDbVerify(topology_st) + + topology_st.standalone.log.info("\n\n######################### Delete nsMatchingRule ######################\n") + try: + topology_st.standalone.modify_s(UIDNUMBERDN, [(ldap.MOD_DELETE, 'nsMatchingRule', b'integerOrderingMatch')]) + except ValueError: + topology_st.standalone.log.fatal("modify_s failed: %s", ValueError) + + reindexUidNumber(topology_st) + + runDbVerify(topology_st) + + log.info('Testcase PASSED') + + +if __name__ == '__main__': + # Run isolated + # -s for DEBUG mode + CURRENT_FILE = os.path.realpath(__file__) + pytest.main("-s %s" % CURRENT_FILE) diff --git a/dirsrvtests/tests/tickets/ticket48214_test.py b/dirsrvtests/tests/tickets/ticket48214_test.py new file mode 100644 index 0000000..1d15239 --- /dev/null +++ b/dirsrvtests/tests/tickets/ticket48214_test.py @@ -0,0 +1,113 @@ +# --- BEGIN COPYRIGHT BLOCK --- +# Copyright (C) 2022 Red Hat, Inc. +# All rights reserved. +# +# License: GPL (version 3 or any later version). +# See LICENSE for details. +# --- END COPYRIGHT BLOCK --- +# +import logging + +import pytest +from lib389.tasks import * +from lib389.topologies import topology_st + +from lib389._constants import DEFAULT_SUFFIX, DN_DM, PASSWORD + +pytestmark = pytest.mark.tier2 + +log = logging.getLogger(__name__) + +MYSUFFIX = 'dc=example,dc=com' +MYSUFFIXBE = 'userRoot' + + +def getMaxBerSizeFromDseLdif(topology_st): + topology_st.standalone.log.info(" +++++ Get maxbersize from dse.ldif +++++\n") + dse_ldif = topology_st.standalone.confdir + '/dse.ldif' + grepMaxBerCMD = "egrep nsslapd-maxbersize " + dse_ldif + topology_st.standalone.log.info(" Run CMD: %s\n" % grepMaxBerCMD) + grepMaxBerOUT = os.popen(grepMaxBerCMD, "r") + running = True + maxbersize = -1 + while running: + l = grepMaxBerOUT.readline() + if l == "": + topology_st.standalone.log.info(" Empty: %s\n" % l) + running = False + elif "nsslapd-maxbersize:" in l.lower(): + running = False + fields = l.split() + if len(fields) >= 2: + maxbersize = fields[1] + topology_st.standalone.log.info(" Right format - %s %s\n" % (fields[0], fields[1])) + else: + topology_st.standalone.log.info(" Wrong format - %s\n" % l) + else: + topology_st.standalone.log.info(" Else?: %s\n" % l) + return maxbersize + + +def checkMaxBerSize(topology_st): + topology_st.standalone.log.info(" +++++ Check Max Ber Size +++++\n") + maxbersizestr = getMaxBerSizeFromDseLdif(topology_st) + maxbersize = int(maxbersizestr) + isdefault = True + defaultvalue = 2097152 + if maxbersize < 0: + topology_st.standalone.log.info(" No nsslapd-maxbersize found in dse.ldif\n") + elif maxbersize == 0: + topology_st.standalone.log.info(" nsslapd-maxbersize: %d\n" % maxbersize) + else: + isdefault = False + topology_st.standalone.log.info(" nsslapd-maxbersize: %d\n" % maxbersize) + + try: + entry = topology_st.standalone.search_s('cn=config', ldap.SCOPE_BASE, + "(cn=*)", + ['nsslapd-maxbersize']) + if entry: + searchedsize = entry[0].getValue('nsslapd-maxbersize') + topology_st.standalone.log.info(" ldapsearch returned nsslapd-maxbersize: %s\n" % searchedsize) + else: + topology_st.standalone.log.fatal('ERROR: cn=config is not found?') + assert False + except ldap.LDAPError as e: + topology_st.standalone.log.error('ERROR: Failed to search for user entry: ' + e.message['desc']) + assert False + + if isdefault: + topology_st.standalone.log.info(" Checking %d vs %d\n" % (int(searchedsize), defaultvalue)) + assert int(searchedsize) == defaultvalue + + +def test_ticket48214_run(topology_st): + """ + Check ldapsearch returns the correct maxbersize when it is not explicitly set. + """ + log.info('Testing Ticket 48214 - ldapsearch on nsslapd-maxbersize returns 0 instead of current value') + + # bind as directory manager + topology_st.standalone.log.info("Bind as %s" % DN_DM) + topology_st.standalone.simple_bind_s(DN_DM, PASSWORD) + + topology_st.standalone.log.info("\n\n######################### Out of Box ######################\n") + checkMaxBerSize(topology_st) + + topology_st.standalone.log.info("\n\n######################### Add nsslapd-maxbersize: 0 ######################\n") + topology_st.standalone.modify_s('cn=config', [(ldap.MOD_REPLACE, 'nsslapd-maxbersize', b'0')]) + checkMaxBerSize(topology_st) + + topology_st.standalone.log.info( + "\n\n######################### Add nsslapd-maxbersize: 10000 ######################\n") + topology_st.standalone.modify_s('cn=config', [(ldap.MOD_REPLACE, 'nsslapd-maxbersize', b'10000')]) + checkMaxBerSize(topology_st) + + topology_st.standalone.log.info("ticket48214 was successfully verified.") + + +if __name__ == '__main__': + # Run isolated + # -s for DEBUG mode + CURRENT_FILE = os.path.realpath(__file__) + pytest.main("-s %s" % CURRENT_FILE) diff --git a/dirsrvtests/tests/tickets/ticket48228_test.py b/dirsrvtests/tests/tickets/ticket48228_test.py new file mode 100644 index 0000000..530cd51 --- /dev/null +++ b/dirsrvtests/tests/tickets/ticket48228_test.py @@ -0,0 +1,274 @@ +# --- BEGIN COPYRIGHT BLOCK --- +# Copyright (C) 2016 Red Hat, Inc. +# All rights reserved. +# +# License: GPL (version 3 or any later version). +# See LICENSE for details. +# --- END COPYRIGHT BLOCK --- +# +import logging +import time + +import pytest +from lib389.tasks import * +from lib389.topologies import topology_st + +from lib389._constants import DEFAULT_SUFFIX, DN_DM, PASSWORD, DN_CONFIG + +pytestmark = pytest.mark.tier2 + +log = logging.getLogger(__name__) + +# Assuming DEFAULT_SUFFIX is "dc=example,dc=com", otherwise it does not work... :( +SUBTREE_CONTAINER = 'cn=nsPwPolicyContainer,' + DEFAULT_SUFFIX +SUBTREE_PWPDN = 'cn=nsPwPolicyEntry,' + DEFAULT_SUFFIX +SUBTREE_PWP = 'cn=cn\3DnsPwPolicyEntry\2Cdc\3Dexample\2Cdc\3Dcom,' + SUBTREE_CONTAINER +SUBTREE_COS_TMPLDN = 'cn=nsPwTemplateEntry,' + DEFAULT_SUFFIX +SUBTREE_COS_TMPL = 'cn=cn\3DnsPwTemplateEntry\2Cdc\3Dexample\2Cdc\3Dcom,' + SUBTREE_CONTAINER +SUBTREE_COS_DEF = 'cn=nsPwPolicy_CoS,' + DEFAULT_SUFFIX + +USER1_DN = 'uid=user1,' + DEFAULT_SUFFIX +USER2_DN = 'uid=user2,' + DEFAULT_SUFFIX + + +def set_global_pwpolicy(topology_st, inhistory): + log.info(" +++++ Enable global password policy +++++\n") + topology_st.standalone.simple_bind_s(DN_DM, PASSWORD) + # Enable password policy + try: + topology_st.standalone.modify_s(DN_CONFIG, [(ldap.MOD_REPLACE, 'nsslapd-pwpolicy-local', b'on')]) + except ldap.LDAPError as e: + log.error('Failed to set pwpolicy-local: error ' + e.message['desc']) + assert False + + log.info(" Set global password history on\n") + try: + topology_st.standalone.modify_s(DN_CONFIG, [(ldap.MOD_REPLACE, 'passwordHistory', b'on')]) + except ldap.LDAPError as e: + log.error('Failed to set passwordHistory: error ' + e.message['desc']) + assert False + + log.info(" Set global passwords in history\n") + try: + count = "%d" % inhistory + topology_st.standalone.modify_s(DN_CONFIG, [(ldap.MOD_REPLACE, 'passwordInHistory', count.encode())]) + except ldap.LDAPError as e: + log.error('Failed to set passwordInHistory: error ' + e.message['desc']) + assert False + time.sleep(1) + + +def set_subtree_pwpolicy(topology_st): + log.info(" +++++ Enable subtree level password policy +++++\n") + topology_st.standalone.simple_bind_s(DN_DM, PASSWORD) + log.info(" Add the container") + try: + topology_st.standalone.add_s(Entry((SUBTREE_CONTAINER, {'objectclass': 'top nsContainer'.split(), + 'cn': 'nsPwPolicyContainer'}))) + except ldap.LDAPError as e: + log.error('Failed to add subtree container: error ' + e.message['desc']) + assert False + + log.info(" Add the password policy subentry {passwordHistory: on, passwordInHistory: 6}") + try: + topology_st.standalone.add_s(Entry((SUBTREE_PWP, {'objectclass': 'top ldapsubentry passwordpolicy'.split(), + 'cn': SUBTREE_PWPDN, + 'passwordMustChange': 'off', + 'passwordExp': 'off', + 'passwordHistory': 'on', + 'passwordInHistory': '6', + 'passwordMinAge': '0', + 'passwordChange': 'on', + 'passwordStorageScheme': 'clear'}))) + except ldap.LDAPError as e: + log.error('Failed to add passwordpolicy: error ' + e.message['desc']) + assert False + + log.info(" Add the COS template") + try: + topology_st.standalone.add_s( + Entry((SUBTREE_COS_TMPL, {'objectclass': 'top ldapsubentry costemplate extensibleObject'.split(), + 'cn': SUBTREE_PWPDN, + 'cosPriority': '1', + 'cn': SUBTREE_COS_TMPLDN, + 'pwdpolicysubentry': SUBTREE_PWP}))) + except ldap.LDAPError as e: + log.error('Failed to add COS template: error ' + e.message['desc']) + assert False + + log.info(" Add the COS definition") + try: + topology_st.standalone.add_s( + Entry((SUBTREE_COS_DEF, {'objectclass': 'top ldapsubentry cosSuperDefinition cosPointerDefinition'.split(), + 'cn': SUBTREE_PWPDN, + 'costemplatedn': SUBTREE_COS_TMPL, + 'cosAttribute': 'pwdpolicysubentry default operational-default'}))) + except ldap.LDAPError as e: + log.error('Failed to add COS def: error ' + e.message['desc']) + assert False + time.sleep(1) + + +def check_passwd_inhistory(topology_st, user, cpw, passwd): + + inhistory = 0 + log.info(" Bind as {%s,%s}" % (user, cpw)) + topology_st.standalone.simple_bind_s(user, cpw) + time.sleep(1) + try: + topology_st.standalone.modify_s(user, [(ldap.MOD_REPLACE, 'userpassword', passwd.encode())]) + except ldap.LDAPError as e: + log.info(' The password ' + passwd + ' of user' + USER1_DN + ' in history: error {0}'.format(e)) + inhistory = 1 + time.sleep(1) + return inhistory + + +def update_passwd(topology_st, user, passwd, times): + # Set the default value + cpw = passwd + for i in range(times): + log.info(" Bind as {%s,%s}" % (user, cpw)) + topology_st.standalone.simple_bind_s(user, cpw) + # Now update the value for this iter. + cpw = 'password%d' % i + try: + topology_st.standalone.modify_s(user, [(ldap.MOD_REPLACE, 'userpassword', cpw.encode())]) + except ldap.LDAPError as e: + log.fatal( + 'test_ticket48228: Failed to update the password ' + cpw + ' of user ' + user + ': error ' + e.message[ + 'desc']) + assert False + + # checking the first password, which is supposed to be in history + inhistory = check_passwd_inhistory(topology_st, user, cpw, passwd) + assert inhistory == 1 + + +def test_ticket48228_test_global_policy(topology_st): + """ + Check global password policy + """ + log.info(' Set inhistory = 6') + set_global_pwpolicy(topology_st, 6) + + log.info(' Bind as directory manager') + log.info("Bind as %s" % DN_DM) + topology_st.standalone.simple_bind_s(DN_DM, PASSWORD) + + log.info(' Add an entry' + USER1_DN) + try: + topology_st.standalone.add_s( + Entry((USER1_DN, {'objectclass': "top person organizationalPerson inetOrgPerson".split(), + 'sn': '1', + 'cn': 'user 1', + 'uid': 'user1', + 'givenname': 'user', + 'mail': 'user1@example.com', + 'userpassword': 'password'}))) + except ldap.LDAPError as e: + log.fatal('test_ticket48228: Failed to add user' + USER1_DN + ': error ' + e.message['desc']) + assert False + + log.info(' Update the password of ' + USER1_DN + ' 6 times') + update_passwd(topology_st, USER1_DN, 'password', 6) + + log.info(' Set inhistory = 4') + set_global_pwpolicy(topology_st, 4) + + log.info(' checking the first password, which is supposed NOT to be in history any more') + cpw = 'password%d' % 5 + tpw = 'password' + inhistory = check_passwd_inhistory(topology_st, USER1_DN, cpw, tpw) + assert inhistory == 0 + + log.info(' checking the second password, which is supposed NOT to be in history any more') + cpw = tpw + tpw = 'password%d' % 0 + inhistory = check_passwd_inhistory(topology_st, USER1_DN, cpw, tpw) + assert inhistory == 0 + + log.info(' checking the third password, which is supposed NOT to be in history any more') + cpw = tpw + tpw = 'password%d' % 1 + inhistory = check_passwd_inhistory(topology_st, USER1_DN, cpw, tpw) + assert inhistory == 0 + + log.info(' checking the sixth password, which is supposed to be in history') + cpw = tpw + tpw = 'password%d' % 5 + inhistory = check_passwd_inhistory(topology_st, USER1_DN, cpw, tpw) + assert inhistory == 1 + + log.info("Global policy was successfully verified.") + + +def text_ticket48228_text_subtree_policy(topology_st): + """ + Check subtree level password policy + """ + + log.info(' Set inhistory = 6') + set_subtree_pwpolicy(topology_st) + + log.info(' Bind as directory manager') + log.info("Bind as %s" % DN_DM) + topology_st.standalone.simple_bind_s(DN_DM, PASSWORD) + + log.info(' Add an entry' + USER2_DN) + try: + topology_st.standalone.add_s( + Entry((USER2_DN, {'objectclass': "top person organizationalPerson inetOrgPerson".split(), + 'sn': '2', + 'cn': 'user 2', + 'uid': 'user2', + 'givenname': 'user', + 'mail': 'user2@example.com', + 'userpassword': 'password'}))) + except ldap.LDAPError as e: + log.fatal('test_ticket48228: Failed to add user' + USER2_DN + ': error ' + e.message['desc']) + assert False + + log.info(' Update the password of ' + USER2_DN + ' 6 times') + update_passwd(topology_st, USER2_DN, 'password', 6) + + log.info(' Set inhistory = 4') + topology_st.standalone.simple_bind_s(DN_DM, PASSWORD) + try: + topology_st.standalone.modify_s(SUBTREE_PWP, [(ldap.MOD_REPLACE, 'passwordInHistory', b'4')]) + except ldap.LDAPError as e: + log.error('Failed to set pwpolicy-local: error ' + e.message['desc']) + assert False + + log.info(' checking the first password, which is supposed NOT to be in history any more') + cpw = 'password%d' % 5 + tpw = 'password' + inhistory = check_passwd_inhistory(topology_st, USER2_DN, cpw, tpw) + assert inhistory == 0 + + log.info(' checking the second password, which is supposed NOT to be in history any more') + cpw = tpw + tpw = 'password%d' % 1 + inhistory = check_passwd_inhistory(topology_st, USER2_DN, cpw, tpw) + assert inhistory == 0 + + log.info(' checking the third password, which is supposed NOT to be in history any more') + cpw = tpw + tpw = 'password%d' % 2 + inhistory = check_passwd_inhistory(topology_st, USER2_DN, cpw, tpw) + assert inhistory == 0 + + log.info(' checking the six password, which is supposed to be in history') + cpw = tpw + tpw = 'password%d' % 5 + inhistory = check_passwd_inhistory(topology_st, USER2_DN, cpw, tpw) + assert inhistory == 1 + + log.info("Subtree level policy was successfully verified.") + + +if __name__ == '__main__': + # Run isolated + # -s for DEBUG mode + CURRENT_FILE = os.path.realpath(__file__) + pytest.main("-s %s" % CURRENT_FILE) diff --git a/dirsrvtests/tests/tickets/ticket48233_test.py b/dirsrvtests/tests/tickets/ticket48233_test.py new file mode 100644 index 0000000..6c6c504 --- /dev/null +++ b/dirsrvtests/tests/tickets/ticket48233_test.py @@ -0,0 +1,69 @@ +# --- BEGIN COPYRIGHT BLOCK --- +# Copyright (C) 2022 Red Hat, Inc. +# All rights reserved. +# +# License: GPL (version 3 or any later version). +# See LICENSE for details. +# --- END COPYRIGHT BLOCK --- +# +import pytest +from lib389.utils import * +from lib389.topologies import topology_st + +from lib389._constants import DEFAULT_SUFFIX + +pytestmark = pytest.mark.tier2 + +logging.getLogger(__name__).setLevel(logging.DEBUG) +log = logging.getLogger(__name__) + + +def test_ticket48233(topology_st): + """Test that ACI's that use IP restrictions do not crash the server at + shutdown + """ + + # Add aci to restrict access my ip + aci_text = ('(targetattr != "userPassword")(version 3.0;acl ' + + '"Enable anonymous access - IP"; allow (read,compare,search)' + + '(userdn = "ldap:///anyone") and (ip="127.0.0.1");)') + + try: + topology_st.standalone.modify_s(DEFAULT_SUFFIX, [(ldap.MOD_ADD, 'aci', ensure_bytes(aci_text))]) + except ldap.LDAPError as e: + log.error('Failed to add aci: ({}) error {}'.format(aci_text,e.args[0]['desc'])) + assert False + time.sleep(1) + + # Anonymous search to engage the aci + try: + topology_st.standalone.simple_bind_s("", "") + except ldap.LDAPError as e: + log.error('Failed to anonymously bind -error {}'.format(e.args[0]['desc'])) + assert False + + try: + entries = topology_st.standalone.search_s(DEFAULT_SUFFIX, ldap.SCOPE_SUBTREE, 'objectclass=*') + if not entries: + log.fatal('Failed return an entries from search') + assert False + except ldap.LDAPError as e: + log.fatal('Search failed: ' + e.message['desc']) + assert False + + # Restart the server + topology_st.standalone.restart(timeout=10) + + # Check for crash + if topology_st.standalone.detectDisorderlyShutdown(): + log.fatal('Server crashed!') + assert False + + log.info('Test complete') + + +if __name__ == '__main__': + # Run isolated + # -s for DEBUG mode + CURRENT_FILE = os.path.realpath(__file__) + pytest.main("-s %s" % CURRENT_FILE) diff --git a/dirsrvtests/tests/tickets/ticket48252_test.py b/dirsrvtests/tests/tickets/ticket48252_test.py new file mode 100644 index 0000000..05419ba --- /dev/null +++ b/dirsrvtests/tests/tickets/ticket48252_test.py @@ -0,0 +1,120 @@ +# --- BEGIN COPYRIGHT BLOCK --- +# Copyright (C) 2016 Red Hat, Inc. +# All rights reserved. +# +# License: GPL (version 3 or any later version). +# See LICENSE for details. +# --- END COPYRIGHT BLOCK --- +# +import logging + +import pytest +from lib389.tasks import * +from lib389.utils import * +from lib389.topologies import topology_st +from lib389.idm.user import UserAccounts + +from lib389._constants import DEFAULT_SUFFIX, SUFFIX, DEFAULT_BENAME, PLUGIN_USN + +pytestmark = pytest.mark.tier2 + +log = logging.getLogger(__name__) + +# Assuming DEFAULT_SUFFIX is "dc=example,dc=com", otherwise it does not work... :( +USER_NUM = 10 +TEST_USER = "test_user" + + +def test_ticket48252_setup(topology_st): + """ + Enable USN plug-in for enabling tombstones + Add test entries + """ + + log.info("Enable the USN plugin...") + try: + topology_st.standalone.plugins.enable(name=PLUGIN_USN) + except e: + log.error("Failed to enable USN Plugin: error " + e.message['desc']) + assert False + + log.info("Adding test entries...") + ua = UserAccounts(topology_st.standalone, DEFAULT_SUFFIX) + for i in range(USER_NUM): + ua.create(properties={ + 'uid': "%s%d" % (TEST_USER, i), + 'cn' : "%s%d" % (TEST_USER, i), + 'sn' : 'user', + 'uidNumber' : '1000', + 'gidNumber' : '2000', + 'homeDirectory' : '/home/testuser' + }) + + +def in_index_file(topology_st, id, index): + key = "%s%s" % (TEST_USER, id) + log.info(" dbscan - checking %s is in index file %s..." % (key, index)) + dbscanOut = topology_st.standalone.dbscan(DEFAULT_BENAME, index) + if ensure_bytes(key) in ensure_bytes(dbscanOut): + found = True + topology_st.standalone.log.info("Found key %s in dbscan output" % key) + else: + found = False + topology_st.standalone.log.info("Did not found key %s in dbscan output" % key) + + return found + + +def test_ticket48252_run_0(topology_st): + """ + Delete an entry cn=test_entry0 + Check it is not in the 'cn' index file + """ + log.info("Case 1 - Check deleted entry is not in the 'cn' index file") + uas = UserAccounts(topology_st.standalone, DEFAULT_SUFFIX) + del_rdn = "uid=%s0" % TEST_USER + del_entry = uas.get('%s0' % TEST_USER) + log.info(" Deleting a test entry %s..." % del_entry) + del_entry.delete() + + assert in_index_file(topology_st, 0, 'cn') is False + log.info(" db2index - reindexing %s ..." % 'cn') + topology_st.standalone.stop() + assert topology_st.standalone.db2index(suffixes=[DEFAULT_SUFFIX], attrs=['cn']) + topology_st.standalone.start() + assert in_index_file(topology_st, 0, 'cn') is False + log.info(" entry %s is not in the cn index file after reindexed." % del_rdn) + log.info('Case 1 - PASSED') + + +def test_ticket48252_run_1(topology_st): + """ + Delete an entry cn=test_entry1 + Check it is in the 'objectclass' index file as a tombstone entry + """ + log.info("Case 2 - Check deleted entry is in the 'objectclass' index file as a tombstone entry") + uas = UserAccounts(topology_st.standalone, DEFAULT_SUFFIX) + del_rdn = "uid=%s1" % TEST_USER + del_entry = uas.get('%s1' % TEST_USER) + log.info(" Deleting a test entry %s..." % del_rdn) + del_entry.delete() + + entry = topology_st.standalone.search_s(SUFFIX, ldap.SCOPE_SUBTREE, '(&(objectclass=nstombstone)(%s))' % del_rdn) + assert len(entry) == 1 + log.info(" entry %s is in the objectclass index file." % del_rdn) + + log.info(" db2index - reindexing %s ..." % 'objectclass') + topology_st.standalone.stop() + assert topology_st.standalone.db2index(suffixes=[DEFAULT_SUFFIX], attrs=['objectclass']) + topology_st.standalone.start() + entry = topology_st.standalone.search_s(SUFFIX, ldap.SCOPE_SUBTREE, '(&(objectclass=nstombstone)(%s))' % del_rdn) + assert len(entry) == 1 + log.info(" entry %s is in the objectclass index file after reindexed." % del_rdn) + log.info('Case 2 - PASSED') + + +if __name__ == '__main__': + # Run isolated + # -s for DEBUG mode + CURRENT_FILE = os.path.realpath(__file__) + pytest.main("-s %s" % CURRENT_FILE) diff --git a/dirsrvtests/tests/tickets/ticket48265_test.py b/dirsrvtests/tests/tickets/ticket48265_test.py new file mode 100644 index 0000000..1652b7a --- /dev/null +++ b/dirsrvtests/tests/tickets/ticket48265_test.py @@ -0,0 +1,76 @@ +# --- BEGIN COPYRIGHT BLOCK --- +# Copyright (C) 2016 Red Hat, Inc. +# All rights reserved. +# +# License: GPL (version 3 or any later version). +# See LICENSE for details. +# --- END COPYRIGHT BLOCK --- +# +import pytest +from lib389.tasks import * +from lib389.utils import * +from lib389.topologies import topology_st + +from lib389._constants import SUFFIX, DEFAULT_SUFFIX + +pytestmark = pytest.mark.tier2 + +logging.getLogger(__name__).setLevel(logging.DEBUG) +log = logging.getLogger(__name__) + +USER_NUM = 20 +TEST_USER = 'test_user' + + +def test_ticket48265_test(topology_st): + """ + Complex filter issues + Ticket 47521 type complex filter: + (&(|(uid=tuser*)(cn=Test user*))(&(givenname=test*3))(mail=tuser@example.com)(&(description=*))) + Ticket 48264 type complex filter: + (&(&(|(l=EU)(l=AP)(l=NA))(|(c=SE)(c=DE)))(|(uid=*test*)(cn=*test*))(l=eu)) + """ + + log.info("Adding %d test entries..." % USER_NUM) + for id in range(USER_NUM): + name = "%s%d" % (TEST_USER, id) + mail = "%s@example.com" % name + secretary = "cn=%s,ou=secretary,%s" % (name, SUFFIX) + topology_st.standalone.add_s(Entry(("cn=%s,%s" % (name, SUFFIX), { + 'objectclass': "top person organizationalPerson inetOrgPerson".split(), + 'sn': name, + 'cn': name, + 'uid': name, + 'givenname': 'test', + 'mail': mail, + 'description': 'description', + 'secretary': secretary, + 'l': 'MV', + 'title': 'Engineer'}))) + + log.info("Search with Ticket 47521 type complex filter") + for id in range(USER_NUM): + name = "%s%d" % (TEST_USER, id) + mail = "%s@example.com" % name + filter47521 = '(&(|(uid=%s*)(cn=%s*))(&(givenname=test))(mail=%s)(&(description=*)))' % ( + TEST_USER, TEST_USER, mail) + entry = topology_st.standalone.search_s(SUFFIX, ldap.SCOPE_SUBTREE, filter47521) + assert len(entry) == 1 + + log.info("Search with Ticket 48265 type complex filter") + for id in range(USER_NUM): + name = "%s%d" % (TEST_USER, id) + mail = "%s@example.com" % name + filter48265 = '(&(&(|(l=AA)(l=BB)(l=MV))(|(title=admin)(title=engineer)))(|(uid=%s)(mail=%s))(description=description))' % ( + name, mail) + entry = topology_st.standalone.search_s(SUFFIX, ldap.SCOPE_SUBTREE, filter48265) + assert len(entry) == 1 + + log.info('Test 48265 complete\n') + + +if __name__ == '__main__': + # Run isolated + # -s for DEBUG mode + CURRENT_FILE = os.path.realpath(__file__) + pytest.main("-s %s" % CURRENT_FILE) diff --git a/dirsrvtests/tests/tickets/ticket48266_test.py b/dirsrvtests/tests/tickets/ticket48266_test.py new file mode 100644 index 0000000..5c033c0 --- /dev/null +++ b/dirsrvtests/tests/tickets/ticket48266_test.py @@ -0,0 +1,288 @@ +# --- BEGIN COPYRIGHT BLOCK --- +# Copyright (C) 2022 Red Hat, Inc. +# All rights reserved. +# +# License: GPL (version 3 or any later version). +# See LICENSE for details. +# --- END COPYRIGHT BLOCK --- +# +import pytest +from lib389.tasks import * +from lib389.utils import * +from lib389.topologies import topology_m2 +from lib389.replica import ReplicationManager + +from lib389._constants import SUFFIX, DEFAULT_SUFFIX, HOST_SUPPLIER_2, PORT_SUPPLIER_2 + +pytestmark = pytest.mark.tier2 + +logging.getLogger(__name__).setLevel(logging.DEBUG) +log = logging.getLogger(__name__) + +NEW_ACCOUNT = "new_account" +MAX_ACCOUNTS = 20 + + +@pytest.fixture(scope="module") +def entries(topology_m2): + # add dummy entries in the staging DIT + for cpt in range(MAX_ACCOUNTS): + name = "%s%d" % (NEW_ACCOUNT, cpt) + topology_m2.ms["supplier1"].add_s(Entry(("cn=%s,%s" % (name, SUFFIX), { + 'objectclass': "top person".split(), + 'sn': name, + 'cn': name}))) + topology_m2.ms["supplier1"].config.set('nsslapd-accesslog-logbuffering', 'off') + topology_m2.ms["supplier1"].config.set('nsslapd-errorlog-level', '8192') + # 256 + 4 + topology_m2.ms["supplier1"].config.set('nsslapd-accesslog-level', '260') + + topology_m2.ms["supplier2"].config.set('nsslapd-accesslog-logbuffering', 'off') + topology_m2.ms["supplier2"].config.set('nsslapd-errorlog-level', '8192') + # 256 + 4 + topology_m2.ms["supplier2"].config.set('nsslapd-accesslog-level', '260') + + +def test_ticket48266_fractional(topology_m2, entries): + ents = topology_m2.ms["supplier1"].agreement.list(suffix=SUFFIX) + assert len(ents) == 1 + + mod = [(ldap.MOD_REPLACE, 'nsDS5ReplicatedAttributeList', [b'(objectclass=*) $ EXCLUDE telephonenumber']), + (ldap.MOD_REPLACE, 'nsds5ReplicaStripAttrs', [b'modifiersname modifytimestamp'])] + ents = topology_m2.ms["supplier1"].agreement.list(suffix=SUFFIX) + assert len(ents) == 1 + m1_m2_agmt = ents[0].dn + topology_m2.ms["supplier1"].modify_s(ents[0].dn, mod) + + ents = topology_m2.ms["supplier2"].agreement.list(suffix=SUFFIX) + assert len(ents) == 1 + topology_m2.ms["supplier2"].modify_s(ents[0].dn, mod) + + topology_m2.ms["supplier1"].restart() + topology_m2.ms["supplier2"].restart() + + repl = ReplicationManager(DEFAULT_SUFFIX) + repl.ensure_agreement(topology_m2.ms["supplier1"], topology_m2.ms["supplier2"]) + repl.test_replication(topology_m2.ms["supplier1"], topology_m2.ms["supplier2"]) + + +def test_ticket48266_check_repl_desc(topology_m2, entries): + name = "cn=%s1,%s" % (NEW_ACCOUNT, SUFFIX) + value = 'check repl. description' + mod = [(ldap.MOD_REPLACE, 'description', ensure_bytes(value))] + topology_m2.ms["supplier1"].modify_s(name, mod) + + loop = 0 + while loop <= 10: + ent = topology_m2.ms["supplier2"].getEntry(name, ldap.SCOPE_BASE, "(objectclass=*)") + if ent.hasAttr('description') and ent.getValue('description') == ensure_bytes(value): + break + time.sleep(1) + loop += 1 + assert loop <= 10 + + +# will use this CSN as a starting point on error log +# after this is one 'Skipped' then the first csn _get_first_not_replicated_csn +# should no longer be Skipped in the error log +def _get_last_not_replicated_csn(topology_m2): + name = "cn=%s5,%s" % (NEW_ACCOUNT, SUFFIX) + + # read the first CSN that will not be replicated + mod = [(ldap.MOD_REPLACE, 'telephonenumber', ensure_bytes('123456'))] + topology_m2.ms["supplier1"].modify_s(name, mod) + msgid = topology_m2.ms["supplier1"].search_ext(name, ldap.SCOPE_SUBTREE, 'objectclass=*', ['nscpentrywsi']) + rtype, rdata, rmsgid = topology_m2.ms["supplier1"].result2(msgid) + attrs = None + for dn, raw_attrs in rdata: + topology_m2.ms["supplier1"].log.info("dn: %s" % dn) + if 'nscpentrywsi' in raw_attrs: + attrs = raw_attrs['nscpentrywsi'] + assert attrs + for attr in attrs: + if ensure_str(attr.lower()).startswith('telephonenumber'): + break + assert attr + + log.info("############# %s " % name) + # now retrieve the CSN of the operation we are looking for + csn = None + found_ops = topology_m2.ms['supplier1'].ds_access_log.match(".*MOD dn=\"%s\".*" % name) + assert(len(found_ops) > 0) + found_op = topology_m2.ms['supplier1'].ds_access_log.parse_line(found_ops[-1]) + log.info(found_op) + + # Now look for the related CSN + found_csns = topology_m2.ms['supplier1'].ds_access_log.match(".*conn=%s op=%s RESULT.*" % (found_op['conn'], found_op['op'])) + assert(len(found_csns) > 0) + found_csn = topology_m2.ms['supplier1'].ds_access_log.parse_line(found_csns[-1]) + log.info(found_csn) + return found_csn['csn'] + + +def _get_first_not_replicated_csn(topology_m2): + name = "cn=%s2,%s" % (NEW_ACCOUNT, SUFFIX) + + # read the first CSN that will not be replicated + mod = [(ldap.MOD_REPLACE, 'telephonenumber', ensure_bytes('123456'))] + topology_m2.ms["supplier1"].modify_s(name, mod) + msgid = topology_m2.ms["supplier1"].search_ext(name, ldap.SCOPE_SUBTREE, 'objectclass=*', ['nscpentrywsi']) + rtype, rdata, rmsgid = topology_m2.ms["supplier1"].result2(msgid) + attrs = None + for dn, raw_attrs in rdata: + topology_m2.ms["supplier1"].log.info("dn: %s" % dn) + if 'nscpentrywsi' in raw_attrs: + attrs = raw_attrs['nscpentrywsi'] + assert attrs + for attr in attrs: + if ensure_str(attr.lower()).startswith('telephonenumber'): + break + assert attr + + log.info("############# %s " % name) + # now retrieve the CSN of the operation we are looking for + csn = None + found_ops = topology_m2.ms['supplier1'].ds_access_log.match(".*MOD dn=\"%s\".*" % name) + assert(len(found_ops) > 0) + found_op = topology_m2.ms['supplier1'].ds_access_log.parse_line(found_ops[-1]) + log.info(found_op) + + # Now look for the related CSN + found_csns = topology_m2.ms['supplier1'].ds_access_log.match(".*conn=%s op=%s RESULT.*" % (found_op['conn'], found_op['op'])) + assert(len(found_csns) > 0) + found_csn = topology_m2.ms['supplier1'].ds_access_log.parse_line(found_csns[-1]) + log.info(found_csn) + return found_csn['csn'] + + +def _count_full_session(topology_m2): + # + # compute the number of 'No more updates' + # + file_obj = open(topology_m2.ms["supplier1"].errlog, "r") + # pattern to find + pattern = ".*No more updates to send.*" + regex = re.compile(pattern) + no_more_updates = 0 + + # check initiation number of 'No more updates + while True: + line = file_obj.readline() + found = regex.search(line) + if (found): + no_more_updates = no_more_updates + 1 + if (line == ''): + break + file_obj.close() + + return no_more_updates + + +def test_ticket48266_count_csn_evaluation(topology_m2, entries): + ents = topology_m2.ms["supplier1"].agreement.list(suffix=SUFFIX) + assert len(ents) == 1 + first_csn = _get_first_not_replicated_csn(topology_m2) + name = "cn=%s3,%s" % (NEW_ACCOUNT, SUFFIX) + NB_SESSION = 102 + + no_more_update_cnt = _count_full_session(topology_m2) + topology_m2.ms["supplier1"].agreement.pause(ents[0].dn) + # now do a set of updates that will NOT be replicated + for telNumber in range(NB_SESSION): + mod = [(ldap.MOD_REPLACE, 'telephonenumber', ensure_bytes(str(telNumber)))] + topology_m2.ms["supplier1"].modify_s(name, mod) + + topology_m2.ms["supplier1"].agreement.resume(ents[0].dn) + + # let's wait all replication session complete + MAX_LOOP = 10 + cnt = 0 + current_no_more_update = _count_full_session(topology_m2) + while (current_no_more_update == no_more_update_cnt): + cnt = cnt + 1 + if (cnt > MAX_LOOP): + break + time.sleep(5) + current_no_more_update = _count_full_session(topology_m2) + + log.info('after %d MODs we have completed %d replication sessions' % ( + NB_SESSION, (current_no_more_update - no_more_update_cnt))) + no_more_update_cnt = current_no_more_update + + # At this point, with the fix a dummy update was made BUT may be not sent it + # make sure it was sent so that the consumer CSN will be updated + last_csn = _get_last_not_replicated_csn(topology_m2) + + # let's wait all replication session complete + MAX_LOOP = 10 + cnt = 0 + current_no_more_update = _count_full_session(topology_m2) + while (current_no_more_update == no_more_update_cnt): + cnt = cnt + 1 + if (cnt > MAX_LOOP): + break + time.sleep(5) + current_no_more_update = _count_full_session(topology_m2) + + log.info('This MODs %s triggered the send of the dummy update completed %d replication sessions' % ( + last_csn, (current_no_more_update - no_more_update_cnt))) + no_more_update_cnt = current_no_more_update + + # so we should no longer see the first_csn in the log + # Let's create a new csn (last_csn) and check there is no longer first_csn + topology_m2.ms["supplier1"].agreement.pause(ents[0].dn) + last_csn = _get_last_not_replicated_csn(topology_m2) + topology_m2.ms["supplier1"].agreement.resume(ents[0].dn) + + # let's wait for the session to complete + MAX_LOOP = 10 + cnt = 0 + while (current_no_more_update == no_more_update_cnt): + cnt = cnt + 1 + if (cnt > MAX_LOOP): + break + time.sleep(5) + current_no_more_update = _count_full_session(topology_m2) + + log.info('This MODs %s completed in %d replication sessions, should be sent without evaluating %s' % ( + last_csn, (current_no_more_update - no_more_update_cnt), first_csn)) + no_more_update_cnt = current_no_more_update + + # Now determine how many times we have skipped 'csn' + # no need to stop the server to check the error log + file_obj = open(topology_m2.ms["supplier1"].errlog, "r") + + # find where the last_csn operation was processed + pattern = ".*ruv_add_csn_inprogress: successfully inserted csn %s.*" % last_csn + regex = re.compile(pattern) + cnt = 0 + + while True: + line = file_obj.readline() + found = regex.search(line) + if ((line == '') or (found)): + break + if (found): + log.info('last operation was found at %d' % file_obj.tell()) + log.info(line) + log.info('Now check the we can not find the first csn %s in the log' % first_csn) + + pattern = ".*Skipping update operation.*CSN %s.*" % first_csn + regex = re.compile(pattern) + found = False + while True: + line = file_obj.readline() + found = regex.search(line) + if ((line == '') or (found)): + break + if (found): + log.info('Unexpected found %s' % line) + assert not found + + +if __name__ == '__main__': + # Run isolated + # -s for DEBUG mode + + CURRENT_FILE = os.path.realpath(__file__) + pytest.main("-s %s" % CURRENT_FILE) diff --git a/dirsrvtests/tests/tickets/ticket48270_test.py b/dirsrvtests/tests/tickets/ticket48270_test.py new file mode 100644 index 0000000..3d1e7ff --- /dev/null +++ b/dirsrvtests/tests/tickets/ticket48270_test.py @@ -0,0 +1,126 @@ +# --- BEGIN COPYRIGHT BLOCK --- +# Copyright (C) 2022 Red Hat, Inc. +# All rights reserved. +# +# License: GPL (version 3 or any later version). +# See LICENSE for details. +# --- END COPYRIGHT BLOCK --- +# +import pytest +from lib389.tasks import * +from lib389.utils import * +from lib389.topologies import topology_st + +from lib389._constants import SUFFIX, DEFAULT_SUFFIX + +pytestmark = pytest.mark.tier2 + +logging.getLogger(__name__).setLevel(logging.DEBUG) +log = logging.getLogger(__name__) + +NEW_ACCOUNT = "new_account" +MAX_ACCOUNTS = 20 + +MIXED_VALUE = "/home/mYhOmEdIrEcToRy" +LOWER_VALUE = "/home/myhomedirectory" +HOMEDIRECTORY_INDEX = 'cn=homeDirectory,cn=index,cn=userRoot,cn=ldbm database,cn=plugins,cn=config' +HOMEDIRECTORY_CN = "homedirectory" +MATCHINGRULE = 'nsMatchingRule' +UIDNUMBER_INDEX = 'cn=uidnumber,cn=index,cn=userRoot,cn=ldbm database,cn=plugins,cn=config' +UIDNUMBER_CN = "uidnumber" + + +def test_ticket48270_init(topology_st): + log.info("Initialization: add dummy entries for the tests") + for cpt in range(MAX_ACCOUNTS): + name = "%s%d" % (NEW_ACCOUNT, cpt) + topology_st.standalone.add_s(Entry(("uid=%s,%s" % (name, SUFFIX), { + 'objectclass': "top posixAccount".split(), + 'uid': name, + 'cn': name, + 'uidnumber': str(111), + 'gidnumber': str(222), + 'homedirectory': "/home/tbordaz_%d" % cpt}))) + + +def test_ticket48270_homeDirectory_indexed_cis(topology_st): + log.info("\n\nindex homeDirectory in caseIgnoreIA5Match and caseExactIA5Match") + try: + ent = topology_st.standalone.getEntry(HOMEDIRECTORY_INDEX, ldap.SCOPE_BASE) + except ldap.NO_SUCH_OBJECT: + topology_st.standalone.add_s(Entry((HOMEDIRECTORY_INDEX, { + 'objectclass': "top nsIndex".split(), + 'cn': HOMEDIRECTORY_CN, + 'nsSystemIndex': 'false', + 'nsIndexType': 'eq'}))) + # log.info("attach debugger") + # time.sleep(60) + + IGNORE_MR_NAME = b'caseIgnoreIA5Match' + EXACT_MR_NAME = b'caseExactIA5Match' + mod = [(ldap.MOD_REPLACE, MATCHINGRULE, (IGNORE_MR_NAME, EXACT_MR_NAME))] + topology_st.standalone.modify_s(HOMEDIRECTORY_INDEX, mod) + + # topology_st.standalone.stop(timeout=10) + log.info("successfully checked that filter with exact mr , a filter with lowercase eq is failing") + # assert topology_st.standalone.db2index(bename=DEFAULT_BENAME, suffixes=None, attrs=['homeDirectory']) + # topology_st.standalone.start(timeout=10) + args = {TASK_WAIT: True} + topology_st.standalone.tasks.reindex(suffix=SUFFIX, attrname='homeDirectory', args=args) + + log.info("Check indexing succeeded with a specified matching rule") + file_obj = open(topology_st.standalone.errlog, "r") + + # Check if the MR configuration failure occurs + regex = re.compile("unknown or invalid matching rule") + while True: + line = file_obj.readline() + found = regex.search(line) + if ((line == '') or (found)): + break + + if (found): + log.info("The configuration of a specific MR fails") + log.info(line) + # assert not found + + +def test_ticket48270_homeDirectory_mixed_value(topology_st): + # Set a homedirectory value with mixed case + name = "uid=%s1,%s" % (NEW_ACCOUNT, SUFFIX) + mod = [(ldap.MOD_REPLACE, 'homeDirectory', ensure_bytes(MIXED_VALUE))] + topology_st.standalone.modify_s(name, mod) + + +def test_ticket48270_extensible_search(topology_st): + name = "uid=%s1,%s" % (NEW_ACCOUNT, SUFFIX) + + # check with the exact stored value + log.info("Default: can retrieve an entry filter syntax with exact stored value") + ent = topology_st.standalone.getEntry(name, ldap.SCOPE_BASE, "(homeDirectory=%s)" % MIXED_VALUE) + log.info("Default: can retrieve an entry filter caseExactIA5Match with exact stored value") + ent = topology_st.standalone.getEntry(name, ldap.SCOPE_BASE, "(homeDirectory:caseExactIA5Match:=%s)" % MIXED_VALUE) + + # check with a lower case value that is different from the stored value + log.info("Default: can not retrieve an entry filter syntax match with lowered stored value") + try: + ent = topology_st.standalone.getEntry(name, ldap.SCOPE_BASE, "(homeDirectory=%s)" % LOWER_VALUE) + assert ent is None + except ldap.NO_SUCH_OBJECT: + pass + log.info("Default: can not retrieve an entry filter caseExactIA5Match with lowered stored value") + try: + ent = topology_st.standalone.getEntry(name, ldap.SCOPE_BASE, + "(homeDirectory:caseExactIA5Match:=%s)" % LOWER_VALUE) + assert ent is None + except ldap.NO_SUCH_OBJECT: + pass + log.info("Default: can retrieve an entry filter caseIgnoreIA5Match with lowered stored value") + ent = topology_st.standalone.getEntry(name, ldap.SCOPE_BASE, "(homeDirectory:caseIgnoreIA5Match:=%s)" % LOWER_VALUE) + + +if __name__ == '__main__': + # Run isolated + # -s for DEBUG mode + CURRENT_FILE = os.path.realpath(__file__) + pytest.main("-s %s" % CURRENT_FILE) diff --git a/dirsrvtests/tests/tickets/ticket48272_test.py b/dirsrvtests/tests/tickets/ticket48272_test.py new file mode 100644 index 0000000..35aba7e --- /dev/null +++ b/dirsrvtests/tests/tickets/ticket48272_test.py @@ -0,0 +1,144 @@ +# --- BEGIN COPYRIGHT BLOCK --- +# Copyright (C) 2022 Red Hat, Inc. +# All rights reserved. +# +# License: GPL (version 3 or any later version). +# See LICENSE for details. +# --- END COPYRIGHT BLOCK --- +# +import pytest +from lib389.tasks import * +from lib389.utils import * +from lib389.topologies import topology_st + +from lib389._constants import DEFAULT_SUFFIX, HOST_STANDALONE, PORT_STANDALONE + +DEBUGGING = os.getenv('DEBUGGING', False) + +if DEBUGGING: + logging.getLogger(__name__).setLevel(logging.DEBUG) +else: + logging.getLogger(__name__).setLevel(logging.INFO) + +log = logging.getLogger(__name__) +# Skip on older versions +pytestmark = [pytest.mark.tier2, + pytest.mark.skipif(ds_is_older('1.3.6'), reason="Not implemented")] + + +USER1 = 'user1' +USER1_DOMAIN = 'user1@example.com' +PW = 'password' +USER1_DN = 'uid=user1,ou=People,%s' % DEFAULT_SUFFIX +USER1_CONFLICT_DN = 'uid=user1,%s' % DEFAULT_SUFFIX + + +def _create_user(inst, name, dn): + inst.add_s(Entry(( + dn, { + 'objectClass': 'top account simplesecurityobject'.split(), + 'uid': name, + 'userpassword': PW + }))) + + +def _bind(name, cred): + # Returns true or false if it worked. + if DEBUGGING: + print('test 48272 BINDING AS %s:%s' % (name, cred)) + status = True + conn = ldap.initialize("ldap://%s:%s" % (HOST_STANDALONE, PORT_STANDALONE)) + try: + conn.simple_bind_s(name, cred) + conn.unbind_s() + except ldap.INVALID_CREDENTIALS: + status = False + return status + + +def test_ticket48272(topology_st): + """ + Test the functionality of the addn bind plugin. This should allow users + of the type "name" or "name@domain.com" to bind. + """ + + # There will be a better way to do this in the future. + topology_st.standalone.add_s(Entry(( + "cn=addn,cn=plugins,cn=config", { + "objectClass": "top nsSlapdPlugin extensibleObject".split(), + "cn": "addn", + "nsslapd-pluginPath": "libaddn-plugin", + "nsslapd-pluginInitfunc": "addn_init", + "nsslapd-pluginType": "preoperation", + "nsslapd-pluginEnabled": "on", + "nsslapd-pluginId": "addn", + "nsslapd-pluginVendor": "389 Project", + "nsslapd-pluginVersion": "1.3.6.0", + "nsslapd-pluginDescription": "Allow AD DN style bind names to LDAP", + "addn_default_domain": "example.com", + } + ))) + + topology_st.standalone.add_s(Entry(( + "cn=example.com,cn=addn,cn=plugins,cn=config", { + "objectClass": "top extensibleObject".split(), + "cn": "example.com", + "addn_base": "ou=People,%s" % DEFAULT_SUFFIX, + "addn_filter": "(&(objectClass=account)(uid=%s))", + } + ))) + + topology_st.standalone.restart(60) + + # Add a user + _create_user(topology_st.standalone, USER1, USER1_DN) + + if DEBUGGING is not False: + print("Attach now") + time.sleep(20) + + # Make sure our binds still work. + assert (_bind(USER1_DN, PW)) + # Test an anonymous bind + for i in range(0, 10): + # Test bind as name + assert (_bind(USER1, PW)) + + # Make sure that name@fakedom fails + assert (_bind(USER1_DOMAIN, PW)) + + # Add a conflicting user to an alternate subtree + _create_user(topology_st.standalone, USER1, USER1_CONFLICT_DN) + # Change the plugin to search from the rootdn instead + # This means we have a conflicting user in scope now! + + topology_st.standalone.modify_s("cn=example.com,cn=addn,cn=plugins,cn=config", + [(ldap.MOD_REPLACE, 'addn_base', ensure_bytes(DEFAULT_SUFFIX))]) + topology_st.standalone.restart(60) + + # Make sure our binds still work. + assert (_bind(USER1_DN, PW)) + assert (_bind(USER1_CONFLICT_DN, PW)) + for i in range(0, 10): + + # Test bind as name fails + try: + _bind(USER1, PW) + assert (False) + except: + pass + # Test bind as name@domain fails too + try: + _bind(USER1_DOMAIN, PW) + assert (False) + except: + pass + + log.info('Test PASSED') + + +if __name__ == '__main__': + # Run isolated + # -s for DEBUG mode + CURRENT_FILE = os.path.realpath(__file__) + pytest.main("-s %s" % CURRENT_FILE) diff --git a/dirsrvtests/tests/tickets/ticket48294_test.py b/dirsrvtests/tests/tickets/ticket48294_test.py new file mode 100644 index 0000000..73df896 --- /dev/null +++ b/dirsrvtests/tests/tickets/ticket48294_test.py @@ -0,0 +1,220 @@ +# --- BEGIN COPYRIGHT BLOCK --- +# Copyright (C) 2016 Red Hat, Inc. +# All rights reserved. +# +# License: GPL (version 3 or any later version). +# See LICENSE for details. +# --- END COPYRIGHT BLOCK --- +# +import logging +import time + +import ldap +import pytest +from lib389 import Entry +from lib389._constants import * +from lib389.topologies import topology_st +from lib389.utils import * + +pytestmark = pytest.mark.tier2 + +log = logging.getLogger(__name__) + +LINKEDATTR_PLUGIN = 'cn=Linked Attributes,cn=plugins,cn=config' +MANAGER_LINK = 'cn=Manager Link,' + LINKEDATTR_PLUGIN +OU_PEOPLE = 'ou=People,' + DEFAULT_SUFFIX +LINKTYPE = 'directReport' +MANAGEDTYPE = 'manager' + + +def _header(topology_st, label): + topology_st.standalone.log.info("###############################################") + topology_st.standalone.log.info("####### %s" % label) + topology_st.standalone.log.info("###############################################") + + +def check_attr_val(topology_st, dn, attr, expected): + try: + centry = topology_st.standalone.search_s(dn, ldap.SCOPE_BASE, 'uid=*') + if centry: + val = centry[0].getValue(attr) + if val.lower() == expected.lower(): + log.info('Value of %s is %s' % (attr, expected)) + else: + log.info('Value of %s is not %s, but %s' % (attr, expected, val)) + assert False + else: + log.fatal('Failed to get %s' % dn) + assert False + except ldap.LDAPError as e: + log.fatal('Failed to search ' + dn + ': ' + e.args[0]['desc']) + assert False + + +def _modrdn_entry(topology_st=None, entry_dn=None, new_rdn=None, del_old=0, new_superior=None): + assert topology_st is not None + assert entry_dn is not None + assert new_rdn is not None + + topology_st.standalone.log.info("\n\n######################### MODRDN %s ######################\n" % new_rdn) + try: + if new_superior: + topology_st.standalone.rename_s(entry_dn, new_rdn, newsuperior=new_superior, delold=del_old) + else: + topology_st.standalone.rename_s(entry_dn, new_rdn, delold=del_old) + except ldap.NO_SUCH_ATTRIBUTE: + topology_st.standalone.log.info("accepted failure due to 47833: modrdn reports error.. but succeeds") + attempt = 0 + if new_superior: + dn = "%s,%s" % (new_rdn, new_superior) + base = new_superior + else: + base = ','.join(entry_dn.split(",")[1:]) + dn = "%s, %s" % (new_rdn, base) + myfilter = entry_dn.split(',')[0] + + while attempt < 10: + try: + ent = topology_st.standalone.getEntry(dn, ldap.SCOPE_BASE, myfilter) + break + except ldap.NO_SUCH_OBJECT: + topology_st.standalone.log.info("Accept failure due to 47833: unable to find (base) a modrdn entry") + attempt += 1 + time.sleep(1) + if attempt == 10: + ent = topology_st.standalone.getEntry(base, ldap.SCOPE_SUBTREE, myfilter) + ent = topology_st.standalone.getEntry(dn, ldap.SCOPE_BASE, myfilter) + + +def test_48294_init(topology_st): + """ + Set up Linked Attribute + """ + _header(topology_st, + 'Testing Ticket 48294 - Linked Attributes plug-in - won\'t update links after MODRDN operation') + + log.info('Enable Dynamic plugins, and the linked Attrs plugin') + try: + topology_st.standalone.modify_s(DN_CONFIG, [(ldap.MOD_REPLACE, 'nsslapd-dynamic-plugins', b'on')]) + except ldap.LDAPError as e: + log.fatal('Failed to enable dynamic plugin!' + e.args[0]['desc']) + assert False + + try: + topology_st.standalone.plugins.enable(name=PLUGIN_LINKED_ATTRS) + except ValueError as e: + log.fatal('Failed to enable linked attributes plugin!' + e.args[0]['desc']) + assert False + + log.info('Add the plugin config entry') + try: + topology_st.standalone.add_s(Entry((MANAGER_LINK, { + 'objectclass': 'top extensibleObject'.split(), + 'cn': 'Manager Link', + 'linkType': LINKTYPE, + 'managedType': MANAGEDTYPE + }))) + except ldap.LDAPError as e: + log.fatal('Failed to add linked attr config entry: error ' + e.args[0]['desc']) + assert False + + log.info('Add 2 entries: manager1 and employee1') + try: + topology_st.standalone.add_s(Entry(('uid=manager1,%s' % OU_PEOPLE, { + 'objectclass': 'top extensibleObject'.split(), + 'uid': 'manager1'}))) + except ldap.LDAPError as e: + log.fatal('Add manager1 failed: error ' + e.args[0]['desc']) + assert False + + try: + topology_st.standalone.add_s(Entry(('uid=employee1,%s' % OU_PEOPLE, { + 'objectclass': 'top extensibleObject'.split(), + 'uid': 'employee1'}))) + except ldap.LDAPError as e: + log.fatal('Add employee1 failed: error ' + e.args[0]['desc']) + assert False + + log.info('Add linktype to manager1') + topology_st.standalone.modify_s('uid=manager1,%s' % OU_PEOPLE, + [(ldap.MOD_ADD, LINKTYPE, ensure_bytes('uid=employee1,%s' % OU_PEOPLE))]) + + log.info('Check managed attribute') + check_attr_val(topology_st, 'uid=employee1,%s' % OU_PEOPLE, MANAGEDTYPE, ensure_bytes('uid=manager1,%s' % OU_PEOPLE)) + + log.info('PASSED') + + +def test_48294_run_0(topology_st): + """ + Rename employee1 to employee2 and adjust the value of directReport by replace + """ + _header(topology_st, 'Case 0 - Rename employee1 and adjust the link type value by replace') + + log.info('Rename employee1 to employee2') + _modrdn_entry(topology_st, entry_dn='uid=employee1,%s' % OU_PEOPLE, new_rdn='uid=employee2') + + log.info('Modify the value of directReport to uid=employee2') + try: + topology_st.standalone.modify_s('uid=manager1,%s' % OU_PEOPLE, + [(ldap.MOD_REPLACE, LINKTYPE, ensure_bytes('uid=employee2,%s' % OU_PEOPLE))]) + except ldap.LDAPError as e: + log.fatal('Failed to replace uid=employee1 with employee2: ' + e.args[0]['desc']) + assert False + + log.info('Check managed attribute') + check_attr_val(topology_st, 'uid=employee2,%s' % OU_PEOPLE, MANAGEDTYPE, ensure_bytes('uid=manager1,%s' % OU_PEOPLE)) + + log.info('PASSED') + + +def test_48294_run_1(topology_st): + """ + Rename employee2 to employee3 and adjust the value of directReport by delete and add + """ + _header(topology_st, 'Case 1 - Rename employee2 and adjust the link type value by delete and add') + + log.info('Rename employee2 to employee3') + _modrdn_entry(topology_st, entry_dn='uid=employee2,%s' % OU_PEOPLE, new_rdn='uid=employee3') + + log.info('Modify the value of directReport to uid=employee3') + try: + topology_st.standalone.modify_s('uid=manager1,%s' % OU_PEOPLE, + [(ldap.MOD_DELETE, LINKTYPE, ensure_bytes('uid=employee2,%s' % OU_PEOPLE))]) + except ldap.LDAPError as e: + log.fatal('Failed to delete employee2: ' + e.args[0]['desc']) + assert False + + try: + topology_st.standalone.modify_s('uid=manager1,%s' % OU_PEOPLE, + [(ldap.MOD_ADD, LINKTYPE, ensure_bytes('uid=employee3,%s' % OU_PEOPLE))]) + except ldap.LDAPError as e: + log.fatal('Failed to add employee3: ' + e.args[0]['desc']) + assert False + + log.info('Check managed attribute') + check_attr_val(topology_st, 'uid=employee3,%s' % OU_PEOPLE, MANAGEDTYPE, ensure_bytes('uid=manager1,%s' % OU_PEOPLE)) + + log.info('PASSED') + + +def test_48294_run_2(topology_st): + """ + Rename manager1 to manager2 and make sure the managed attribute value is updated + """ + _header(topology_st, 'Case 2 - Rename manager1 to manager2 and make sure the managed attribute value is updated') + + log.info('Rename manager1 to manager2') + _modrdn_entry(topology_st, entry_dn='uid=manager1,%s' % OU_PEOPLE, new_rdn='uid=manager2') + + log.info('Check managed attribute') + check_attr_val(topology_st, 'uid=employee3,%s' % OU_PEOPLE, MANAGEDTYPE, ensure_bytes('uid=manager2,%s' % OU_PEOPLE)) + + log.info('PASSED') + + +if __name__ == '__main__': + # Run isolated + # -s for DEBUG mode + CURRENT_FILE = os.path.realpath(__file__) + pytest.main("-s %s" % CURRENT_FILE) diff --git a/dirsrvtests/tests/tickets/ticket48295_test.py b/dirsrvtests/tests/tickets/ticket48295_test.py new file mode 100644 index 0000000..c175b21 --- /dev/null +++ b/dirsrvtests/tests/tickets/ticket48295_test.py @@ -0,0 +1,144 @@ +# --- BEGIN COPYRIGHT BLOCK --- +# Copyright (C) 2016 Red Hat, Inc. +# All rights reserved. +# +# License: GPL (version 3 or any later version). +# See LICENSE for details. +# --- END COPYRIGHT BLOCK --- +# +import logging + +import ldap +import pytest +from lib389 import Entry +from lib389._constants import * +from lib389.topologies import topology_st +from lib389.utils import * + +pytestmark = pytest.mark.tier2 + +log = logging.getLogger(__name__) + +LINKEDATTR_PLUGIN = 'cn=Linked Attributes,cn=plugins,cn=config' +MANAGER_LINK = 'cn=Manager Link,' + LINKEDATTR_PLUGIN +OU_PEOPLE = 'ou=People,' + DEFAULT_SUFFIX +LINKTYPE = 'directReport' +MANAGEDTYPE = 'manager' + + +def _header(topology_st, label): + topology_st.standalone.log.info("###############################################") + topology_st.standalone.log.info("####### %s" % label) + topology_st.standalone.log.info("###############################################") + + +def check_attr_val(topology_st, dn, attr, expected, revert): + try: + centry = topology_st.standalone.search_s(dn, ldap.SCOPE_BASE, 'uid=*') + if centry: + val = centry[0].getValue(attr) + if val: + if val.lower() == expected.lower(): + if revert: + log.info('Value of %s %s exists, which should not.' % (attr, expected)) + assert False + else: + log.info('Value of %s is %s' % (attr, expected)) + else: + if revert: + log.info('NEEDINFO: Value of %s is not %s, but %s' % (attr, expected, val)) + else: + log.info('Value of %s is not %s, but %s' % (attr, expected, val)) + assert False + else: + if revert: + log.info('Value of %s does not expectedly exist' % attr) + else: + log.info('Value of %s does not exist' % attr) + assert False + else: + log.fatal('Failed to get %s' % dn) + assert False + except ldap.LDAPError as e: + log.fatal('Failed to search ' + dn + ': ' + e.args[0]['desc']) + assert False + + +def test_48295_init(topology_st): + """ + Set up Linked Attribute + """ + _header(topology_st, + 'Testing Ticket 48295 - Entry cache is not rolled back -- Linked Attributes plug-in - wrong behaviour when adding valid and broken links') + + log.info('Enable Dynamic plugins, and the linked Attrs plugin') + try: + topology_st.standalone.modify_s(DN_CONFIG, [(ldap.MOD_REPLACE, 'nsslapd-dynamic-plugins', b'on')]) + except ldap.LDAPError as e: + log.fatal('Failed to enable dynamic plugin!' + e.args[0]['desc']) + assert False + + try: + topology_st.standalone.plugins.enable(name=PLUGIN_LINKED_ATTRS) + except ValueError as e: + log.fatal('Failed to enable linked attributes plugin!' + e.args[0]['desc']) + assert False + + log.info('Add the plugin config entry') + try: + topology_st.standalone.add_s(Entry((MANAGER_LINK, { + 'objectclass': 'top extensibleObject'.split(), + 'cn': 'Manager Link', + 'linkType': LINKTYPE, + 'managedType': MANAGEDTYPE + }))) + except ldap.LDAPError as e: + log.fatal('Failed to add linked attr config entry: error ' + e.args[0]['desc']) + assert False + + log.info('Add 2 entries: manager1 and employee1') + try: + topology_st.standalone.add_s(Entry(('uid=manager1,%s' % OU_PEOPLE, { + 'objectclass': 'top extensibleObject'.split(), + 'uid': 'manager1'}))) + except ldap.LDAPError as e: + log.fatal('Add manager1 failed: error ' + e.args[0]['desc']) + assert False + + try: + topology_st.standalone.add_s(Entry(('uid=employee1,%s' % OU_PEOPLE, { + 'objectclass': 'top extensibleObject'.split(), + 'uid': 'employee1'}))) + except ldap.LDAPError as e: + log.fatal('Add employee1 failed: error ' + e.args[0]['desc']) + assert False + + log.info('PASSED') + + +def test_48295_run(topology_st): + """ + Add 2 linktypes - one exists, another does not + """ + + _header(topology_st, + 'Add 2 linktypes to manager1 - one exists, another does not to make sure the managed entry does not have managed type.') + try: + topology_st.standalone.modify_s('uid=manager1,%s' % OU_PEOPLE, + [(ldap.MOD_ADD, LINKTYPE, ensure_bytes('uid=employee1,%s' % OU_PEOPLE)), + (ldap.MOD_ADD, LINKTYPE, ensure_bytes('uid=doNotExist,%s' % OU_PEOPLE))]) + except ldap.UNWILLING_TO_PERFORM: + log.info('Add uid=employee1 and uid=doNotExist expectedly failed.') + pass + + log.info('Check managed attribute does not exist.') + check_attr_val(topology_st, 'uid=employee1,%s' % OU_PEOPLE, MANAGEDTYPE, ensure_bytes('uid=manager1,%s' % OU_PEOPLE), True) + + log.info('PASSED') + + +if __name__ == '__main__': + # Run isolated + # -s for DEBUG mode + CURRENT_FILE = os.path.realpath(__file__) + pytest.main("-s %s" % CURRENT_FILE) diff --git a/dirsrvtests/tests/tickets/ticket48312_test.py b/dirsrvtests/tests/tickets/ticket48312_test.py new file mode 100644 index 0000000..0403a02 --- /dev/null +++ b/dirsrvtests/tests/tickets/ticket48312_test.py @@ -0,0 +1,132 @@ +# --- BEGIN COPYRIGHT BLOCK --- +# Copyright (C) 2022 Red Hat, Inc. +# All rights reserved. +# +# License: GPL (version 3 or any later version). +# See LICENSE for details. +# --- END COPYRIGHT BLOCK --- +# +import pytest +from lib389.tasks import * +from lib389.utils import * +from lib389.topologies import topology_st + +from lib389._constants import DEFAULT_SUFFIX, PLUGIN_MANAGED_ENTRY, DN_CONFIG + +pytestmark = pytest.mark.tier2 + +logging.getLogger(__name__).setLevel(logging.DEBUG) +log = logging.getLogger(__name__) + + +def test_ticket48312(topology_st): + """ + Configure managed entries plugins(tempalte/definition), then perform a + modrdn(deleteoldrdn 1), and make sure the server does not crash. + """ + + GROUP_OU = 'ou=groups,' + DEFAULT_SUFFIX + PEOPLE_OU = 'ou=people,' + DEFAULT_SUFFIX + USER_DN = 'uid=user1,ou=people,' + DEFAULT_SUFFIX + CONFIG_DN = 'cn=config,cn=' + PLUGIN_MANAGED_ENTRY + ',cn=plugins,cn=config' + TEMPLATE_DN = 'cn=MEP Template,' + DEFAULT_SUFFIX + USER_NEWRDN = 'uid=\+user1' + + # + # First enable dynamic plugins + # + try: + topology_st.standalone.modify_s(DN_CONFIG, [(ldap.MOD_REPLACE, 'nsslapd-dynamic-plugins', b'on')]) + except ldap.LDAPError as e: + log.fatal('Failed to enable dynamic plugin!' + e.args[0]['desc']) + assert False + topology_st.standalone.plugins.enable(name=PLUGIN_MANAGED_ENTRY) + + # + # Add our org units (they should already exist, but do it just in case) + # + try: + topology_st.standalone.add_s(Entry((PEOPLE_OU, { + 'objectclass': 'top extensibleObject'.split(), + 'ou': 'people'}))) + except ldap.ALREADY_EXISTS: + pass + except ldap.LDAPError as e: + log.fatal('test_mep: Failed to add people org unit: error ' + e.args[0]['desc']) + assert False + + try: + topology_st.standalone.add_s(Entry((GROUP_OU, { + 'objectclass': 'top extensibleObject'.split(), + 'ou': 'people'}))) + except ldap.ALREADY_EXISTS: + pass + except ldap.LDAPError as e: + log.fatal('test_mep: Failed to add people org unit: error ' + e.args[0]['desc']) + assert False + + # + # Add the template entry + # + try: + topology_st.standalone.add_s(Entry((TEMPLATE_DN, { + 'objectclass': 'top mepTemplateEntry extensibleObject'.split(), + 'cn': 'MEP Template', + 'mepRDNAttr': 'cn', + 'mepStaticAttr': ['objectclass: posixGroup', 'objectclass: extensibleObject'], + 'mepMappedAttr': ['cn: $uid', 'uid: $cn', 'gidNumber: $uidNumber'] + }))) + except ldap.LDAPError as e: + log.fatal('test_mep: Failed to add template entry: error ' + e.args[0]['desc']) + assert False + + # + # Add the definition entry + # + try: + topology_st.standalone.add_s(Entry((CONFIG_DN, { + 'objectclass': 'top extensibleObject'.split(), + 'cn': 'config', + 'originScope': PEOPLE_OU, + 'originFilter': 'objectclass=posixAccount', + 'managedBase': GROUP_OU, + 'managedTemplate': TEMPLATE_DN + }))) + except ldap.LDAPError as e: + log.fatal('test_mep: Failed to add config entry: error ' + e.args[0]['desc']) + assert False + + # + # Add an entry that meets the MEP scope + # + try: + topology_st.standalone.add_s(Entry((USER_DN, { + 'objectclass': 'top posixAccount extensibleObject'.split(), + 'uid': 'user1', + 'cn': 'user1', + 'uidNumber': '1', + 'gidNumber': '1', + 'homeDirectory': '/home/user1', + 'description': 'uiser description' + }))) + except ldap.LDAPError as e: + log.fatal('test_mep: Failed to user1: error ' + e.args[0]['desc']) + assert False + + # + # Perform a modrdn on USER_DN + # + try: + topology_st.standalone.rename_s(USER_DN, USER_NEWRDN, delold=1) + except ldap.LDAPError as e: + log.error('Failed to modrdn: error ' + e.args[0]['desc']) + assert False + + log.info('Test complete') + + +if __name__ == '__main__': + # Run isolated + # -s for DEBUG mode + CURRENT_FILE = os.path.realpath(__file__) + pytest.main("-s %s" % CURRENT_FILE) diff --git a/dirsrvtests/tests/tickets/ticket48325_test.py b/dirsrvtests/tests/tickets/ticket48325_test.py new file mode 100644 index 0000000..a1d89cd --- /dev/null +++ b/dirsrvtests/tests/tickets/ticket48325_test.py @@ -0,0 +1,140 @@ +# --- BEGIN COPYRIGHT BLOCK --- +# Copyright (C) 2022 Red Hat, Inc. +# All rights reserved. +# +# License: GPL (version 3 or any later version). +# See LICENSE for details. +# --- END COPYRIGHT BLOCK --- +# +import pytest +from lib389.utils import * +from lib389.tasks import * +from lib389.topologies import topology_m1h1c1 +from lib389.replica import ReplicationManager + +from lib389._constants import (DEFAULT_SUFFIX, REPLICA_RUV_FILTER, defaultProperties, + REPLICATION_BIND_DN, REPLICATION_BIND_PW, REPLICATION_BIND_METHOD, + REPLICATION_TRANSPORT, RA_NAME, RA_BINDDN, RA_BINDPW, + RA_METHOD, RA_TRANSPORT_PROT, SUFFIX) + +pytestmark = pytest.mark.tier2 + +logging.getLogger(__name__).setLevel(logging.DEBUG) +log = logging.getLogger(__name__) + + +def checkFirstElement(ds, rid): + """ + Return True if the first RUV element is for the specified rid + """ + try: + entry = ds.search_s(DEFAULT_SUFFIX, + ldap.SCOPE_SUBTREE, + REPLICA_RUV_FILTER, + ['nsds50ruv']) + assert entry + entry = entry[0] + except ldap.LDAPError as e: + log.fatal('Failed to retrieve RUV entry: %s' % str(e)) + assert False + + ruv_elements = entry.getValues('nsds50ruv') + if ('replica %s ' % rid) in ensure_str(ruv_elements[1]): + return True + else: + return False + + +def test_ticket48325(topology_m1h1c1): + """ + Test that the RUV element order is correctly maintained when promoting + a hub or consumer. + """ + + # + # Promote consumer to supplier + # + C1 = topology_m1h1c1.cs["consumer1"] + M1 = topology_m1h1c1.ms["supplier1"] + H1 = topology_m1h1c1.hs["hub1"] + repl = ReplicationManager(DEFAULT_SUFFIX) + repl._ensure_changelog(C1) + DN = topology_m1h1c1.cs["consumer1"].replica._get_mt_entry(DEFAULT_SUFFIX) + topology_m1h1c1.cs["consumer1"].modify_s(DN, [(ldap.MOD_REPLACE, + 'nsDS5ReplicaType', + b'3'), + (ldap.MOD_REPLACE, + 'nsDS5ReplicaID', + b'1234'), + (ldap.MOD_REPLACE, + 'nsDS5Flags', + b'1')]) + time.sleep(1) + + # + # Check ruv has been reordered + # + if not checkFirstElement(topology_m1h1c1.cs["consumer1"], '1234'): + log.fatal('RUV was not reordered') + assert False + + topology_m1h1c1.ms["supplier1"].add_s(Entry((defaultProperties[REPLICATION_BIND_DN], + {'objectclass': 'top netscapeServer'.split(), + 'cn': 'replication manager', + 'userPassword': 'password'}))) + + DN = topology_m1h1c1.ms["supplier1"].replica._get_mt_entry(DEFAULT_SUFFIX) + topology_m1h1c1.ms["supplier1"].modify_s(DN, [(ldap.MOD_REPLACE, + 'nsDS5ReplicaBindDN', ensure_bytes(defaultProperties[REPLICATION_BIND_DN]))]) + # + # Create repl agreement from the newly promoted supplier to supplier1 + + properties = {RA_NAME: 'meTo_{}:{}'.format(topology_m1h1c1.ms["supplier1"].host, + str(topology_m1h1c1.ms["supplier1"].port)), + RA_BINDDN: defaultProperties[REPLICATION_BIND_DN], + RA_BINDPW: defaultProperties[REPLICATION_BIND_PW], + RA_METHOD: defaultProperties[REPLICATION_BIND_METHOD], + RA_TRANSPORT_PROT: defaultProperties[REPLICATION_TRANSPORT]} + new_agmt = topology_m1h1c1.cs["consumer1"].agreement.create(suffix=SUFFIX, + host=topology_m1h1c1.ms["supplier1"].host, + port=topology_m1h1c1.ms["supplier1"].port, + properties=properties) + + if not new_agmt: + log.fatal("Fail to create new agmt from old consumer to the supplier") + assert False + + # Test replication is working + repl.test_replication(C1, M1) + + # + # Promote hub to supplier + # + DN = topology_m1h1c1.hs["hub1"].replica._get_mt_entry(DEFAULT_SUFFIX) + topology_m1h1c1.hs["hub1"].modify_s(DN, [(ldap.MOD_REPLACE, + 'nsDS5ReplicaType', + b'3'), + (ldap.MOD_REPLACE, + 'nsDS5ReplicaID', + b'5678')]) + time.sleep(1) + + # + # Check ruv has been reordered + # + if not checkFirstElement(topology_m1h1c1.hs["hub1"], '5678'): + log.fatal('RUV was not reordered') + assert False + + # Test replication is working + repl.test_replication(M1, H1) + + # Done + log.info('Test complete') + + +if __name__ == '__main__': + # Run isolated + # -s for DEBUG mode + CURRENT_FILE = os.path.realpath(__file__) + pytest.main("-s %s" % CURRENT_FILE) diff --git a/dirsrvtests/tests/tickets/ticket48342_test.py b/dirsrvtests/tests/tickets/ticket48342_test.py new file mode 100644 index 0000000..3fbab26 --- /dev/null +++ b/dirsrvtests/tests/tickets/ticket48342_test.py @@ -0,0 +1,150 @@ +# --- BEGIN COPYRIGHT BLOCK --- +# Copyright (C) 2022 Red Hat, Inc. +# All rights reserved. +# +# License: GPL (version 3 or any later version). +# See LICENSE for details. +# --- END COPYRIGHT BLOCK --- +# +import pytest +from lib389.tasks import * +from lib389.utils import * +from lib389.topologies import topology_m3 + +from lib389._constants import SUFFIX, DEFAULT_SUFFIX, PLUGIN_DNA + +pytestmark = pytest.mark.tier2 + +logging.getLogger(__name__).setLevel(logging.DEBUG) +log = logging.getLogger(__name__) + +PEOPLE_OU = 'people' +PEOPLE_DN = "ou=%s,%s" % (PEOPLE_OU, SUFFIX) +MAX_ACCOUNTS = 5 + + +def _dna_config(server, nextValue=500, maxValue=510): + log.info("Add dna plugin config entry...%s" % server) + + try: + server.add_s(Entry(('cn=dna config,cn=Distributed Numeric Assignment Plugin,cn=plugins,cn=config', { + 'objectclass': 'top dnaPluginConfig'.split(), + 'dnaType': 'description', + 'dnaMagicRegen': '-1', + 'dnaFilter': '(objectclass=posixAccount)', + 'dnaScope': 'ou=people,%s' % SUFFIX, + 'dnaNextValue': str(nextValue), + 'dnaMaxValue': str(nextValue + maxValue), + 'dnaSharedCfgDN': 'ou=ranges,%s' % SUFFIX + }))) + + except ldap.LDAPError as e: + log.error('Failed to add DNA config entry: error ' + e.args[0]['desc']) + assert False + + log.info("Enable the DNA plugin...") + try: + server.plugins.enable(name=PLUGIN_DNA) + except e: + log.error("Failed to enable DNA Plugin: error " + e.args[0]['desc']) + assert False + + log.info("Restarting the server...") + server.stop(timeout=120) + time.sleep(1) + server.start(timeout=120) + time.sleep(3) + + +def test_ticket4026(topology_m3): + """Write your replication testcase here. + + To access each DirSrv instance use: topology_m3.ms["supplier1"], topology_m3.ms["supplier2"], + ..., topology_m3.hub1, ..., topology_m3.consumer1, ... + + Also, if you need any testcase initialization, + please, write additional fixture for that(include finalizer). + """ + + try: + topology_m3.ms["supplier1"].add_s(Entry((PEOPLE_DN, { + 'objectclass': "top extensibleObject".split(), + 'ou': 'people'}))) + except ldap.ALREADY_EXISTS: + pass + + topology_m3.ms["supplier1"].add_s(Entry(('ou=ranges,' + SUFFIX, { + 'objectclass': 'top organizationalunit'.split(), + 'ou': 'ranges' + }))) + for cpt in range(MAX_ACCOUNTS): + name = "user%d" % (cpt) + topology_m3.ms["supplier1"].add_s(Entry(("uid=%s,%s" % (name, PEOPLE_DN), { + 'objectclass': 'top posixAccount extensibleObject'.split(), + 'uid': name, + 'cn': name, + 'uidNumber': '1', + 'gidNumber': '1', + 'homeDirectory': '/home/%s' % name + }))) + + # make supplier3 having more free slots that supplier2 + # so supplier1 will contact supplier3 + _dna_config(topology_m3.ms["supplier1"], nextValue=100, maxValue=10) + _dna_config(topology_m3.ms["supplier2"], nextValue=200, maxValue=10) + _dna_config(topology_m3.ms["supplier3"], nextValue=300, maxValue=3000) + + # Turn on lots of error logging now. + + mod = [(ldap.MOD_REPLACE, 'nsslapd-errorlog-level', b'16384')] + # mod = [(ldap.MOD_REPLACE, 'nsslapd-errorlog-level', '1')] + topology_m3.ms["supplier1"].modify_s('cn=config', mod) + topology_m3.ms["supplier2"].modify_s('cn=config', mod) + topology_m3.ms["supplier3"].modify_s('cn=config', mod) + + # We need to wait for the event in dna.c to fire to start the servers + # see dna.c line 899 + time.sleep(60) + + # add on supplier1 users with description DNA + for cpt in range(10): + name = "user_with_desc1_%d" % (cpt) + topology_m3.ms["supplier1"].add_s(Entry(("uid=%s,%s" % (name, PEOPLE_DN), { + 'objectclass': 'top posixAccount extensibleObject'.split(), + 'uid': name, + 'cn': name, + 'description': '-1', + 'uidNumber': '1', + 'gidNumber': '1', + 'homeDirectory': '/home/%s' % name + }))) + # give time to negociate supplier1 <--> supplier3 + time.sleep(10) + # add on supplier1 users with description DNA + for cpt in range(11, 20): + name = "user_with_desc1_%d" % (cpt) + topology_m3.ms["supplier1"].add_s(Entry(("uid=%s,%s" % (name, PEOPLE_DN), { + 'objectclass': 'top posixAccount extensibleObject'.split(), + 'uid': name, + 'cn': name, + 'description': '-1', + 'uidNumber': '1', + 'gidNumber': '1', + 'homeDirectory': '/home/%s' % name + }))) + log.info('Test complete') + # add on supplier1 users with description DNA + mod = [(ldap.MOD_REPLACE, 'nsslapd-errorlog-level', b'16384')] + # mod = [(ldap.MOD_REPLACE, 'nsslapd-errorlog-level', '1')] + topology_m3.ms["supplier1"].modify_s('cn=config', mod) + topology_m3.ms["supplier2"].modify_s('cn=config', mod) + topology_m3.ms["supplier3"].modify_s('cn=config', mod) + + log.info('Test complete') + + +if __name__ == '__main__': + # Run isolated + # -s for DEBUG mode + CURRENT_FILE = os.path.realpath(__file__) + pytest.main("-s %s" % CURRENT_FILE) diff --git a/dirsrvtests/tests/tickets/ticket48354_test.py b/dirsrvtests/tests/tickets/ticket48354_test.py new file mode 100644 index 0000000..34f3c41 --- /dev/null +++ b/dirsrvtests/tests/tickets/ticket48354_test.py @@ -0,0 +1,65 @@ +# --- BEGIN COPYRIGHT BLOCK --- +# Copyright (C) 2022 Red Hat, Inc. +# All rights reserved. +# +# License: GPL (version 3 or any later version). +# See LICENSE for details. +# --- END COPYRIGHT BLOCK --- +# +import pytest +from lib389.utils import * +from lib389.topologies import topology_st + +from lib389._constants import DEFAULT_SUFFIX, HOST_STANDALONE, PORT_STANDALONE + +pytestmark = pytest.mark.tier2 + +DEBUGGING = os.getenv('DEBUGGING', False) + +if DEBUGGING: + logging.getLogger(__name__).setLevel(logging.DEBUG) +else: + logging.getLogger(__name__).setLevel(logging.INFO) + +log = logging.getLogger(__name__) + + +def _attr_present(conn, name): + results = conn.search_s(DEFAULT_SUFFIX, ldap.SCOPE_SUBTREE, '(%s=*)' % name, [name, ]) + if DEBUGGING: + print(results) + if len(results) > 0: + return True + return False + + +def test_ticket48354(topology_st): + """ + Test that we cannot view ACIs, userPassword, or certain other attributes as anonymous. + """ + + if DEBUGGING: + # Add debugging steps(if any)... + pass + + # Do an anonymous bind + conn = ldap.initialize("ldap://%s:%s" % (HOST_STANDALONE, PORT_STANDALONE)) + conn.simple_bind_s() + + # Make sure that we cannot see: + # * userPassword + assert (not _attr_present(conn, 'userPassword')) + # * aci + assert (not _attr_present(conn, 'aci')) + # * anything else? + + conn.unbind_s() + + log.info('Test PASSED') + + +if __name__ == '__main__': + # Run isolated + # -s for DEBUG mode + CURRENT_FILE = os.path.realpath(__file__) + pytest.main("-s %s" % CURRENT_FILE) diff --git a/dirsrvtests/tests/tickets/ticket48362_test.py b/dirsrvtests/tests/tickets/ticket48362_test.py new file mode 100644 index 0000000..2eeb618 --- /dev/null +++ b/dirsrvtests/tests/tickets/ticket48362_test.py @@ -0,0 +1,169 @@ +import pytest +from lib389.tasks import * +from lib389.utils import * +from lib389.topologies import topology_m2 + +from lib389._constants import SUFFIX, DEFAULT_SUFFIX, PLUGIN_DNA + +# Skip on older versions +pytestmark = [pytest.mark.tier2, + pytest.mark.skipif(ds_is_older('1.3.4'), reason="Not implemented")] + +logging.getLogger(__name__).setLevel(logging.DEBUG) +log = logging.getLogger(__name__) + +PEOPLE_OU = 'people' +PEOPLE_DN = "ou=%s,%s" % (PEOPLE_OU, SUFFIX) +MAX_ACCOUNTS = 5 + +BINDMETHOD_ATTR = 'dnaRemoteBindMethod' +BINDMETHOD_VALUE = b'SASL/GSSAPI' +PROTOCOLE_ATTR = 'dnaRemoteConnProtocol' +PROTOCOLE_VALUE = b'LDAP' + +SHARE_CFG_BASE = 'ou=ranges,' + SUFFIX + + +def _dna_config(server, nextValue=500, maxValue=510): + log.info("Add dna plugin config entry...%s" % server) + + cfg_base_dn = 'cn=dna config,cn=Distributed Numeric Assignment Plugin,cn=plugins,cn=config' + + try: + server.add_s(Entry((cfg_base_dn, { + 'objectclass': 'top dnaPluginConfig'.split(), + 'dnaType': 'description', + 'dnaMagicRegen': '-1', + 'dnaFilter': '(objectclass=posixAccount)', + 'dnaScope': 'ou=people,%s' % SUFFIX, + 'dnaNextValue': str(nextValue), + 'dnaMaxValue': str(nextValue + maxValue), + 'dnaSharedCfgDN': 'ou=ranges,%s' % SUFFIX + }))) + + except ldap.LDAPError as e: + log.error('Failed to add DNA config entry: error ' + e.message['desc']) + assert False + + log.info("Enable the DNA plugin...") + try: + server.plugins.enable(name=PLUGIN_DNA) + except e: + log.error("Failed to enable DNA Plugin: error " + e.message['desc']) + assert False + + log.info("Restarting the server...") + server.stop(timeout=120) + time.sleep(1) + server.start(timeout=120) + time.sleep(3) + + +def _wait_shared_cfg_servers(server, expected): + attempts = 0 + ents = [] + try: + ents = server.search_s(SHARE_CFG_BASE, ldap.SCOPE_ONELEVEL, "(objectclass=*)") + except ldap.NO_SUCH_OBJECT: + pass + except lib389.NoSuchEntryError: + pass + while (len(ents) != expected): + assert attempts < 10 + time.sleep(5) + try: + ents = server.search_s(SHARE_CFG_BASE, ldap.SCOPE_ONELEVEL, "(objectclass=*)") + except ldap.NO_SUCH_OBJECT: + pass + except lib389.NoSuchEntryError: + pass + + +def _shared_cfg_server_update(server, method=BINDMETHOD_VALUE, transport=PROTOCOLE_VALUE): + log.info('\n======================== Update dnaPortNum=%d ============================\n' % server.port) + try: + ent = server.getEntry(SHARE_CFG_BASE, ldap.SCOPE_ONELEVEL, "(dnaPortNum=%d)" % server.port) + mod = [(ldap.MOD_REPLACE, BINDMETHOD_ATTR, ensure_bytes(method)), + (ldap.MOD_REPLACE, PROTOCOLE_ATTR, ensure_bytes(transport))] + server.modify_s(ent.dn, mod) + + log.info('\n======================== Update done\n') + ent = server.getEntry(SHARE_CFG_BASE, ldap.SCOPE_ONELEVEL, "(dnaPortNum=%d)" % server.port) + except ldap.NO_SUCH_OBJECT: + log.fatal("Unknown host") + assert False + + +def test_ticket48362(topology_m2): + """Write your replication testcase here. + + To access each DirSrv instance use: topology_m2.ms["supplier1"], topology_m2.ms["supplier2"], + ..., topology_m2.hub1, ..., topology_m2.consumer1, ... + + Also, if you need any testcase initialization, + please, write additional fixture for that(include finalizer). + """ + + try: + topology_m2.ms["supplier1"].add_s(Entry((PEOPLE_DN, { + 'objectclass': "top extensibleObject".split(), + 'ou': 'people'}))) + except ldap.ALREADY_EXISTS: + pass + + topology_m2.ms["supplier1"].add_s(Entry((SHARE_CFG_BASE, { + 'objectclass': 'top organizationalunit'.split(), + 'ou': 'ranges' + }))) + # supplier 1 will have a valid remaining range (i.e. 101) + # supplier 2 will not have a valid remaining range (i.e. 0) so dna servers list on supplier2 + # will not contain supplier 2. So at restart, supplier 2 is recreated without the method/protocol attribute + _dna_config(topology_m2.ms["supplier1"], nextValue=1000, maxValue=100) + _dna_config(topology_m2.ms["supplier2"], nextValue=2000, maxValue=-1) + + # check we have all the servers available + _wait_shared_cfg_servers(topology_m2.ms["supplier1"], 2) + _wait_shared_cfg_servers(topology_m2.ms["supplier2"], 2) + + # now force the method/transport on the servers entry + _shared_cfg_server_update(topology_m2.ms["supplier1"]) + _shared_cfg_server_update(topology_m2.ms["supplier2"]) + + log.info('\n======================== BEFORE RESTART ============================\n') + ent = topology_m2.ms["supplier1"].getEntry(SHARE_CFG_BASE, ldap.SCOPE_ONELEVEL, + "(dnaPortNum=%d)" % topology_m2.ms["supplier1"].port) + log.info('\n======================== BEFORE RESTART ============================\n') + assert (ent.hasAttr(BINDMETHOD_ATTR) and ent.getValue(BINDMETHOD_ATTR) == BINDMETHOD_VALUE) + assert (ent.hasAttr(PROTOCOLE_ATTR) and ent.getValue(PROTOCOLE_ATTR) == PROTOCOLE_VALUE) + + ent = topology_m2.ms["supplier2"].getEntry(SHARE_CFG_BASE, ldap.SCOPE_ONELEVEL, + "(dnaPortNum=%d)" % topology_m2.ms["supplier2"].port) + log.info('\n======================== BEFORE RESTART ============================\n') + assert (ent.hasAttr(BINDMETHOD_ATTR) and ent.getValue(BINDMETHOD_ATTR) == BINDMETHOD_VALUE) + assert (ent.hasAttr(PROTOCOLE_ATTR) and ent.getValue(PROTOCOLE_ATTR) == PROTOCOLE_VALUE) + topology_m2.ms["supplier1"].restart(10) + topology_m2.ms["supplier2"].restart(10) + + # to allow DNA plugin to recreate the local host entry + time.sleep(40) + + log.info('\n=================== AFTER RESTART =================================\n') + ent = topology_m2.ms["supplier1"].getEntry(SHARE_CFG_BASE, ldap.SCOPE_ONELEVEL, + "(dnaPortNum=%d)" % topology_m2.ms["supplier1"].port) + log.info('\n=================== AFTER RESTART =================================\n') + assert (ent.hasAttr(BINDMETHOD_ATTR) and ent.getValue(BINDMETHOD_ATTR) == BINDMETHOD_VALUE) + assert (ent.hasAttr(PROTOCOLE_ATTR) and ent.getValue(PROTOCOLE_ATTR) == PROTOCOLE_VALUE) + + ent = topology_m2.ms["supplier2"].getEntry(SHARE_CFG_BASE, ldap.SCOPE_ONELEVEL, + "(dnaPortNum=%d)" % topology_m2.ms["supplier2"].port) + log.info('\n=================== AFTER RESTART =================================\n') + assert (ent.hasAttr(BINDMETHOD_ATTR) and ent.getValue(BINDMETHOD_ATTR) == BINDMETHOD_VALUE) + assert (ent.hasAttr(PROTOCOLE_ATTR) and ent.getValue(PROTOCOLE_ATTR) == PROTOCOLE_VALUE) + log.info('Test complete') + + +if __name__ == '__main__': + # Run isolated + # -s for DEBUG mode + CURRENT_FILE = os.path.realpath(__file__) + pytest.main("-s %s" % CURRENT_FILE) diff --git a/dirsrvtests/tests/tickets/ticket48366_test.py b/dirsrvtests/tests/tickets/ticket48366_test.py new file mode 100644 index 0000000..d30697d --- /dev/null +++ b/dirsrvtests/tests/tickets/ticket48366_test.py @@ -0,0 +1,148 @@ +# --- BEGIN COPYRIGHT BLOCK --- +# Copyright (C) 2016 Red Hat, Inc. +# All rights reserved. +# +# License: GPL (version 3 or any later version). +# See LICENSE for details. +# --- END COPYRIGHT BLOCK --- +# +import logging + +import ldap +import pytest +from ldap.controls.simple import ProxyAuthzControl +from lib389 import Entry +from lib389._constants import * +from lib389.topologies import topology_st + +log = logging.getLogger(__name__) + +from lib389.utils import * + +# Skip on older versions +pytestmark = [pytest.mark.tier2, + pytest.mark.skipif(ds_is_older('1.3.5'), reason="Not implemented")] +PROXY_USER_DN = 'cn=proxy,ou=people,%s' % SUFFIX +TEST_USER_DN = 'cn=test,ou=people,%s' % SUFFIX +USER_PW = 'password' + +# subtrees used in test +SUBTREE_GREEN = "ou=green,%s" % SUFFIX +SUBTREE_RED = "ou=red,%s" % SUFFIX +SUBTREES = (SUBTREE_GREEN, SUBTREE_RED) + + +def test_ticket48366_init(topology_st): + """ + It creates identical entries in 3 subtrees + It creates aci which allow access to a set of attrs + in two of these subtrees for bound users + It creates a user to be used for test + + """ + + topology_st.standalone.log.info("Add subtree: %s" % SUBTREE_GREEN) + topology_st.standalone.add_s(Entry((SUBTREE_GREEN, { + 'objectclass': "top organizationalunit".split(), + 'ou': "green_one"}))) + topology_st.standalone.log.info("Add subtree: %s" % SUBTREE_RED) + topology_st.standalone.add_s(Entry((SUBTREE_RED, { + 'objectclass': "top organizationalunit".split(), + 'ou': "red"}))) + + # add proxy user and test user + topology_st.standalone.log.info("Add %s" % TEST_USER_DN) + topology_st.standalone.add_s(Entry((TEST_USER_DN, { + 'objectclass': "top person".split(), + 'sn': 'test', + 'cn': 'test', + 'userpassword': USER_PW}))) + topology_st.standalone.log.info("Add %s" % PROXY_USER_DN) + topology_st.standalone.add_s(Entry((PROXY_USER_DN, { + 'objectclass': "top person".split(), + 'sn': 'proxy', + 'cn': 'proxy', + 'userpassword': USER_PW}))) + + # enable acl error logging + # mod = [(ldap.MOD_REPLACE, 'nsslapd-errorlog-level', '128')] + # topology_st.standalone.modify_s(DN_CONFIG, mod) + + # get rid of default ACIs + mod = [(ldap.MOD_DELETE, 'aci', None)] + topology_st.standalone.modify_s(SUFFIX, mod) + + # Ok Now add the proper ACIs + ACI_TARGET = "(target = \"ldap:///%s\")" % SUBTREE_GREEN + ACI_TARGETATTR = "(targetattr = \"objectclass || cn || sn || uid || givenname \")" + ACI_ALLOW = "(version 3.0; acl \"Allow search-read to green subtree\"; allow (read, search, compare)" + ACI_SUBJECT = " userdn = \"ldap:///%s\";)" % TEST_USER_DN + ACI_BODY = ACI_TARGET + ACI_TARGETATTR + ACI_ALLOW + ACI_SUBJECT + mod = [(ldap.MOD_ADD, 'aci', ensure_bytes(ACI_BODY))] + topology_st.standalone.modify_s(SUFFIX, mod) + + ACI_ALLOW = "(version 3.0; acl \"Allow use pf proxy auth to green subtree\"; allow (proxy)" + ACI_SUBJECT = " userdn = \"ldap:///%s\";)" % PROXY_USER_DN + ACI_BODY = ACI_TARGET + ACI_TARGETATTR + ACI_ALLOW + ACI_SUBJECT + mod = [(ldap.MOD_ADD, 'aci', ensure_bytes(ACI_BODY))] + topology_st.standalone.modify_s(SUFFIX, mod) + + log.info("Adding %d test entries...") + for id in range(2): + name = "%s%d" % ('test', id) + mail = "%s@example.com" % name + for subtree in SUBTREES: + topology_st.standalone.add_s(Entry(("cn=%s,%s" % (name, subtree), { + 'objectclass': "top person organizationalPerson inetOrgPerson".split(), + 'sn': name, + 'cn': name, + 'uid': name, + 'givenname': 'test', + 'mail': mail, + 'description': 'description', + 'employeenumber': "%d" % id, + 'telephonenumber': "%d%d%d" % (id, id, id), + 'mobile': "%d%d%d" % (id, id, id), + 'l': 'MV', + 'title': 'Engineer'}))) + + +def test_ticket48366_search_user(topology_st): + proxy_ctrl = ProxyAuthzControl(criticality=True, authzId=ensure_bytes("dn: " + TEST_USER_DN)) + # searching as test user should return one entry from the green subtree + topology_st.standalone.simple_bind_s(TEST_USER_DN, PASSWORD) + ents = topology_st.standalone.search_s(SUFFIX, ldap.SCOPE_SUBTREE, 'uid=test1') + assert (len(ents) == 1) + + # searching as proxy user should return no entry + topology_st.standalone.simple_bind_s(PROXY_USER_DN, PASSWORD) + ents = topology_st.standalone.search_s(SUFFIX, ldap.SCOPE_SUBTREE, 'uid=test1') + assert (len(ents) == 0) + + # serching as proxy user, authorizing as test user should return 1 entry + ents = topology_st.standalone.search_ext_s(SUFFIX, ldap.SCOPE_SUBTREE, 'uid=test1', serverctrls=[proxy_ctrl]) + assert (len(ents) == 1) + + +def test_ticket48366_search_dm(topology_st): + # searching as directory manager should return one entries from both subtrees + topology_st.standalone.simple_bind_s(DN_DM, PASSWORD) + ents = topology_st.standalone.search_s(SUFFIX, ldap.SCOPE_SUBTREE, 'uid=test1') + assert (len(ents) == 2) + + # searching as directory manager proxying test user should return one entry + proxy_ctrl = ProxyAuthzControl(criticality=True, authzId=ensure_bytes("dn: " + TEST_USER_DN)) + ents = topology_st.standalone.search_ext_s(SUFFIX, ldap.SCOPE_SUBTREE, 'uid=test1', serverctrls=[proxy_ctrl]) + assert (len(ents) == 1) + + # searching as directory manager proxying proxy user should return no entry + proxy_ctrl = ProxyAuthzControl(criticality=True, authzId=ensure_bytes("dn: " + PROXY_USER_DN)) + ents = topology_st.standalone.search_ext_s(SUFFIX, ldap.SCOPE_SUBTREE, 'uid=test1', serverctrls=[proxy_ctrl]) + assert (len(ents) == 0) + + +if __name__ == '__main__': + # Run isolated + # -s for DEBUG mode + CURRENT_FILE = os.path.realpath(__file__) + pytest.main("-s %s" % CURRENT_FILE) diff --git a/dirsrvtests/tests/tickets/ticket48370_test.py b/dirsrvtests/tests/tickets/ticket48370_test.py new file mode 100644 index 0000000..3b5d89e --- /dev/null +++ b/dirsrvtests/tests/tickets/ticket48370_test.py @@ -0,0 +1,202 @@ +# --- BEGIN COPYRIGHT BLOCK --- +# Copyright (C) 2022 Red Hat, Inc. +# All rights reserved. +# +# License: GPL (version 3 or any later version). +# See LICENSE for details. +# --- END COPYRIGHT BLOCK --- +# +import pytest +from lib389.tasks import * +from lib389.utils import * +from lib389.topologies import topology_st + +pytestmark = pytest.mark.tier2 + +logging.getLogger(__name__).setLevel(logging.DEBUG) +log = logging.getLogger(__name__) + + +def test_ticket48370(topology_st): + """ + Deleting attirbute values and readding a value does not properly update + the pres index. The values are not actually deleted from the index + """ + + DN = 'uid=user0099,' + DEFAULT_SUFFIX + + # + # Add an entry + # + topology_st.standalone.add_s(Entry((DN, { + 'objectclass': ['top', 'person', + 'organizationalPerson', + 'inetorgperson', + 'posixAccount'], + 'givenname': 'test', + 'sn': 'user', + 'loginshell': '/bin/bash', + 'uidNumber': '10099', + 'gidNumber': '10099', + 'gecos': 'Test User', + 'mail': ['user0099@dev.null', + 'alias@dev.null', + 'user0099@redhat.com'], + 'cn': 'Test User', + 'homeDirectory': '/home/user0099', + 'uid': 'admin2', + 'userpassword': 'password'}))) + + # + # Perform modify (delete & add mail attributes) + # + try: + topology_st.standalone.modify_s(DN, [(ldap.MOD_DELETE, + 'mail', + b'user0099@dev.null'), + (ldap.MOD_DELETE, + 'mail', + b'alias@dev.null'), + (ldap.MOD_ADD, + 'mail', b'user0099@dev.null')]) + except ldap.LDAPError as e: + log.fatal('Failedto modify user: ' + str(e)) + assert False + + # + # Search using deleted attribute value- no entries should be returned + # + try: + entry = topology_st.standalone.search_s(DEFAULT_SUFFIX, + ldap.SCOPE_SUBTREE, + 'mail=alias@dev.null') + if entry: + log.fatal('Entry incorrectly returned') + assert False + except ldap.LDAPError as e: + log.fatal('Failed to search for user: ' + str(e)) + assert False + + # + # Search using existing attribute value - the entry should be returned + # + try: + entry = topology_st.standalone.search_s(DEFAULT_SUFFIX, + ldap.SCOPE_SUBTREE, + 'mail=user0099@dev.null') + if entry is None: + log.fatal('Entry not found, but it should have been') + assert False + except ldap.LDAPError as e: + log.fatal('Failed to search for user: ' + str(e)) + assert False + + # + # Delete the last values + # + try: + topology_st.standalone.modify_s(DN, [(ldap.MOD_DELETE, + 'mail', + b'user0099@dev.null'), + (ldap.MOD_DELETE, + 'mail', + b'user0099@redhat.com') + ]) + except ldap.LDAPError as e: + log.fatal('Failed to modify user: ' + str(e)) + assert False + + # + # Search using deleted attribute value - no entries should be returned + # + try: + entry = topology_st.standalone.search_s(DEFAULT_SUFFIX, + ldap.SCOPE_SUBTREE, + 'mail=user0099@redhat.com') + if entry: + log.fatal('Entry incorrectly returned') + assert False + except ldap.LDAPError as e: + log.fatal('Failed to search for user: ' + str(e)) + assert False + + # + # Make sure presence index is correctly updated - no entries should be + # returned + # + try: + entry = topology_st.standalone.search_s(DEFAULT_SUFFIX, + ldap.SCOPE_SUBTREE, + 'mail=*') + if entry: + log.fatal('Entry incorrectly returned') + assert False + except ldap.LDAPError as e: + log.fatal('Failed to search for user: ' + str(e)) + assert False + + # + # Now add the attributes back, and lets run a different set of tests with + # a different number of attributes + # + try: + topology_st.standalone.modify_s(DN, [(ldap.MOD_ADD, + 'mail', + [b'user0099@dev.null', + b'alias@dev.null'])]) + except ldap.LDAPError as e: + log.fatal('Failedto modify user: ' + str(e)) + assert False + + # + # Remove and readd some attibutes + # + try: + topology_st.standalone.modify_s(DN, [(ldap.MOD_DELETE, + 'mail', + b'alias@dev.null'), + (ldap.MOD_DELETE, + 'mail', + b'user0099@dev.null'), + (ldap.MOD_ADD, + 'mail', b'user0099@dev.null')]) + except ldap.LDAPError as e: + log.fatal('Failedto modify user: ' + str(e)) + assert False + + # + # Search using deleted attribute value - no entries should be returned + # + try: + entry = topology_st.standalone.search_s(DEFAULT_SUFFIX, + ldap.SCOPE_SUBTREE, + 'mail=alias@dev.null') + if entry: + log.fatal('Entry incorrectly returned') + assert False + except ldap.LDAPError as e: + log.fatal('Failed to search for user: ' + str(e)) + assert False + + # + # Search using existing attribute value - the entry should be returned + # + try: + entry = topology_st.standalone.search_s(DEFAULT_SUFFIX, + ldap.SCOPE_SUBTREE, + 'mail=user0099@dev.null') + if entry is None: + log.fatal('Entry not found, but it should have been') + assert False + except ldap.LDAPError as e: + log.fatal('Failed to search for user: ' + str(e)) + assert False + + log.info('Test PASSED') + + +if __name__ == '__main__': + # Run isolated + # -s for DEBUG mode + CURRENT_FILE = os.path.realpath(__file__) + pytest.main("-s %s" % CURRENT_FILE) diff --git a/dirsrvtests/tests/tickets/ticket48383_test.py b/dirsrvtests/tests/tickets/ticket48383_test.py new file mode 100644 index 0000000..3ae53eb --- /dev/null +++ b/dirsrvtests/tests/tickets/ticket48383_test.py @@ -0,0 +1,103 @@ +# --- BEGIN COPYRIGHT BLOCK --- +# Copyright (C) 2022 Red Hat, Inc. +# All rights reserved. +# +# License: GPL (version 3 or any later version). +# See LICENSE for details. +# --- END COPYRIGHT BLOCK --- +# +import random +import string + +import pytest +from lib389.tasks import * +from lib389.utils import * +from lib389.topologies import topology_st + +from lib389._constants import DEFAULT_SUFFIX, SERVERID_STANDALONE + +pytestmark = pytest.mark.tier2 + +logging.getLogger(__name__).setLevel(logging.DEBUG) +log = logging.getLogger(__name__) + + +def test_ticket48383(topology_st): + """ + This test case will check that we re-alloc buffer sizes on import.c + + We achieve this by setting the servers dbcachesize to a stupid small value + and adding huge objects to ds. + + Then when we run db2index, either: + * If we are not using the re-alloc code, it will FAIL (Bad) + * If we re-alloc properly, it all works regardless. + """ + + topology_st.standalone.config.set('nsslapd-maxbersize', '200000000') + topology_st.standalone.restart() + + # Create some stupid huge objects / attributes in DS. + # seeAlso is indexed by default. Lets do that! + # This will take a while ... + data = [random.choice(string.ascii_letters) for x in range(10000000)] + s = "".join(data) + + # This was here for an iteration test. + i = 1 + USER_DN = 'uid=user%s,ou=people,%s' % (i, DEFAULT_SUFFIX) + padding = ['%s' % n for n in range(400)] + + user = Entry((USER_DN, { + 'objectclass': 'top posixAccount person extensibleObject'.split(), + 'uid': 'user%s' % (i), + 'cn': 'user%s' % (i), + 'uidNumber': '%s' % (i), + 'gidNumber': '%s' % (i), + 'homeDirectory': '/home/user%s' % (i), + 'description': 'user description', + 'sn': s, + 'padding': padding, + })) + + topology_st.standalone.add_s(user) + + # Set the dbsize really low. + try: + topology_st.standalone.modify_s(DEFAULT_BENAME, [(ldap.MOD_REPLACE, + 'nsslapd-cachememsize', b'1')]) + except ldap.LDAPError as e: + log.fatal('Failed to change nsslapd-cachememsize {}'.format(e.args[0]['desc'])) + + ## Does ds try and set a minimum possible value for this? + ## Yes: [16/Feb/2016:16:39:18 +1000] - WARNING: cache too small, increasing to 500K bytes + # Given the formula, by default, this means DS will make the buffsize 400k + # So an object with a 1MB attribute should break indexing + + ldifpath = os.path.join(topology_st.standalone.get_ldif_dir(), "%s.ldif" % SERVERID_STANDALONE) + + # stop the server + topology_st.standalone.stop() + # Now export and import the DB. It's easier than db2index ... + topology_st.standalone.db2ldif(bename=DEFAULT_BENAME, suffixes=[DEFAULT_SUFFIX], excludeSuffixes=[], + encrypt=False, repl_data=True, outputfile=ldifpath) + + result = topology_st.standalone.ldif2db(DEFAULT_BENAME, None, None, False, ldifpath) + + assert (result) + topology_st.standalone.start() + + # see if user1 exists at all .... + + result_user = topology_st.standalone.search_s(DEFAULT_SUFFIX, ldap.SCOPE_SUBTREE, '(uid=user1)') + + assert (len(result_user) > 0) + + log.info('Test complete') + + +if __name__ == '__main__': + # Run isolated + # -s for DEBUG mode + CURRENT_FILE = os.path.realpath(__file__) + pytest.main("-s %s" % CURRENT_FILE) diff --git a/dirsrvtests/tests/tickets/ticket48497_test.py b/dirsrvtests/tests/tickets/ticket48497_test.py new file mode 100644 index 0000000..0e27232 --- /dev/null +++ b/dirsrvtests/tests/tickets/ticket48497_test.py @@ -0,0 +1,122 @@ +# --- BEGIN COPYRIGHT BLOCK --- +# Copyright (C) 2022 Red Hat, Inc. +# All rights reserved. +# +# License: GPL (version 3 or any later version). +# See LICENSE for details. +# --- END COPYRIGHT BLOCK --- +# +import pytest +from lib389.tasks import * +from lib389.utils import * +from lib389.topologies import topology_st + +from lib389._constants import DEFAULT_SUFFIX, SUFFIX + +pytestmark = pytest.mark.tier2 + +logging.getLogger(__name__).setLevel(logging.DEBUG) +log = logging.getLogger(__name__) + +NEW_ACCOUNT = "new_account" +MAX_ACCOUNTS = 20 + +MIXED_VALUE = "/home/mYhOmEdIrEcToRy" +LOWER_VALUE = "/home/myhomedirectory" +HOMEDIRECTORY_INDEX = 'cn=homeDirectory,cn=index,cn=userRoot,cn=ldbm database,cn=plugins,cn=config' +HOMEDIRECTORY_CN = "homedirectory" +MATCHINGRULE = 'nsMatchingRule' +UIDNUMBER_INDEX = 'cn=uidnumber,cn=index,cn=userRoot,cn=ldbm database,cn=plugins,cn=config' +UIDNUMBER_CN = "uidnumber" + + +def test_ticket48497_init(topology_st): + log.info("Initialization: add dummy entries for the tests") + for cpt in range(MAX_ACCOUNTS): + name = "%s%d" % (NEW_ACCOUNT, cpt) + topology_st.standalone.add_s(Entry(("uid=%s,%s" % (name, SUFFIX), { + 'objectclass': "top posixAccount".split(), + 'uid': name, + 'cn': name, + 'uidnumber': str(111), + 'gidnumber': str(222), + 'homedirectory': "/home/tb_%d" % cpt}))) + + +def test_ticket48497_homeDirectory_mixed_value(topology_st): + # Set a homedirectory value with mixed case + name = "uid=%s1,%s" % (NEW_ACCOUNT, SUFFIX) + mod = [(ldap.MOD_REPLACE, 'homeDirectory', ensure_bytes(MIXED_VALUE))] + topology_st.standalone.modify_s(name, mod) + + +def test_ticket48497_extensible_search(topology_st): + name = "uid=%s1,%s" % (NEW_ACCOUNT, SUFFIX) + + # check with the exact stored value + log.info("Default: can retrieve an entry filter syntax with exact stored value") + ent = topology_st.standalone.getEntry(name, ldap.SCOPE_BASE, "(homeDirectory=%s)" % MIXED_VALUE) + log.info("Default: can retrieve an entry filter caseExactIA5Match with exact stored value") + ent = topology_st.standalone.getEntry(name, ldap.SCOPE_BASE, "(homeDirectory:caseExactIA5Match:=%s)" % MIXED_VALUE) + + # check with a lower case value that is different from the stored value + log.info("Default: can not retrieve an entry filter syntax match with lowered stored value") + try: + ent = topology_st.standalone.getEntry(name, ldap.SCOPE_BASE, "(homeDirectory=%s)" % LOWER_VALUE) + assert ent is None + except ldap.NO_SUCH_OBJECT: + pass + log.info("Default: can not retrieve an entry filter caseExactIA5Match with lowered stored value") + try: + ent = topology_st.standalone.getEntry(name, ldap.SCOPE_BASE, + "(homeDirectory:caseExactIA5Match:=%s)" % LOWER_VALUE) + assert ent is None + except ldap.NO_SUCH_OBJECT: + pass + log.info("Default: can retrieve an entry filter caseIgnoreIA5Match with lowered stored value") + ent = topology_st.standalone.getEntry(name, ldap.SCOPE_BASE, "(homeDirectory:caseIgnoreIA5Match:=%s)" % LOWER_VALUE) + + +def test_ticket48497_homeDirectory_index_cfg(topology_st): + log.info("\n\nindex homeDirectory in caseIgnoreIA5Match and caseExactIA5Match") + try: + ent = topology_st.standalone.getEntry(HOMEDIRECTORY_INDEX, ldap.SCOPE_BASE) + except ldap.NO_SUCH_OBJECT: + topology_st.standalone.add_s(Entry((HOMEDIRECTORY_INDEX, { + 'objectclass': "top nsIndex".split(), + 'cn': HOMEDIRECTORY_CN, + 'nsSystemIndex': 'false', + 'nsIndexType': 'eq'}))) + + IGNORE_MR_NAME = b'caseIgnoreIA5Match' + EXACT_MR_NAME = b'caseExactIA5Match' + mod = [(ldap.MOD_REPLACE, MATCHINGRULE, (IGNORE_MR_NAME, EXACT_MR_NAME))] + topology_st.standalone.modify_s(HOMEDIRECTORY_INDEX, mod) + + +def test_ticket48497_homeDirectory_index_run(topology_st): + args = {TASK_WAIT: True} + topology_st.standalone.tasks.reindex(suffix=SUFFIX, attrname='homeDirectory', args=args) + + log.info("Check indexing succeeded with a specified matching rule") + file_obj = open(topology_st.standalone.errlog, "r") + + # Check if the MR configuration failure occurs + regex = re.compile("unknown or invalid matching rule") + while True: + line = file_obj.readline() + found = regex.search(line) + if ((line == '') or (found)): + break + + if (found): + log.info("The configuration of a specific MR fails") + log.info(line) + assert 0 + + +if __name__ == '__main__': + # Run isolated + # -s for DEBUG mode + CURRENT_FILE = os.path.realpath(__file__) + pytest.main("-s %s" % CURRENT_FILE) diff --git a/dirsrvtests/tests/tickets/ticket48637_test.py b/dirsrvtests/tests/tickets/ticket48637_test.py new file mode 100644 index 0000000..fbb731b --- /dev/null +++ b/dirsrvtests/tests/tickets/ticket48637_test.py @@ -0,0 +1,158 @@ +# --- BEGIN COPYRIGHT BLOCK --- +# Copyright (C) 2022 Red Hat, Inc. +# All rights reserved. +# +# License: GPL (version 3 or any later version). +# See LICENSE for details. +# --- END COPYRIGHT BLOCK --- +# +import pytest +from lib389.tasks import * +from lib389.utils import * +from lib389.topologies import topology_st + +pytestmark = pytest.mark.tier2 + +DEBUGGING = os.getenv('DEBUGGING', False) + +if DEBUGGING: + logging.getLogger(__name__).setLevel(logging.DEBUG) +else: + logging.getLogger(__name__).setLevel(logging.INFO) + +log = logging.getLogger(__name__) + +USER_DN = "uid=test,ou=people,dc=example,dc=com" +GROUP_DN = "cn=group,dc=example,dc=com" +GROUP_OU = "ou=groups,dc=example,dc=com" +PEOPLE_OU = "ou=people,dc=example,dc=com" +MEP_OU = "ou=mep,dc=example,dc=com" +MEP_TEMPLATE = "cn=mep template,dc=example,dc=com" +AUTO_DN = "cn=All Users,cn=Auto Membership Plugin,cn=plugins,cn=config" +MEP_DN = "cn=MEP Definition,cn=Managed Entries,cn=plugins,cn=config" + + +def test_ticket48637(topology_st): + """Test for entry cache corruption + + This requires automember and managed entry plugins to be configured. + + Then remove the group that automember would use to trigger a failure when + adding a new entry. Automember fails, and then managed entry also fails. + + Make sure a base search on the entry returns error 32 + """ + + if DEBUGGING: + # Add debugging steps(if any)... + pass + + # + # Add our setup entries + # + try: + topology_st.standalone.add_s(Entry((PEOPLE_OU, { + 'objectclass': 'top organizationalunit'.split(), + 'ou': 'people'}))) + except ldap.ALREADY_EXISTS: + pass + except ldap.LDAPError as e: + log.fatal('Failed to add people ou: ' + str(e)) + assert False + + try: + topology_st.standalone.add_s(Entry((GROUP_OU, { + 'objectclass': 'top organizationalunit'.split(), + 'ou': 'groups'}))) + except ldap.ALREADY_EXISTS: + pass + except ldap.LDAPError as e: + log.fatal('Failed to add groups ou: ' + str(e)) + assert False + + try: + topology_st.standalone.add_s(Entry((MEP_OU, { + 'objectclass': 'top extensibleObject'.split(), + 'ou': 'mep'}))) + except ldap.LDAPError as e: + log.fatal('Failed to add MEP ou: ' + str(e)) + assert False + + try: + topology_st.standalone.add_s(Entry((MEP_TEMPLATE, { + 'objectclass': 'top mepTemplateEntry'.split(), + 'cn': 'mep template', + 'mepRDNAttr': 'cn', + 'mepStaticAttr': 'objectclass: groupofuniquenames', + 'mepMappedAttr': 'cn: $uid'}))) + except ldap.LDAPError as e: + log.fatal('Failed to add MEP ou: ' + str(e)) + assert False + + # + # Configure automember + # + try: + topology_st.standalone.add_s(Entry((AUTO_DN, { + 'cn': 'All Users', + 'objectclass': ['top', 'autoMemberDefinition'], + 'autoMemberScope': 'dc=example,dc=com', + 'autoMemberFilter': 'objectclass=person', + 'autoMemberDefaultGroup': GROUP_DN, + 'autoMemberGroupingAttr': 'uniquemember:dn'}))) + except ldap.LDAPError as e: + log.fatal('Failed to configure automember plugin : ' + str(e)) + assert False + + # + # Configure managed entry plugin + # + try: + topology_st.standalone.add_s(Entry((MEP_DN, { + 'cn': 'MEP Definition', + 'objectclass': ['top', 'extensibleObject'], + 'originScope': 'ou=people,dc=example,dc=com', + 'originFilter': 'objectclass=person', + 'managedBase': 'ou=groups,dc=example,dc=com', + 'managedTemplate': MEP_TEMPLATE}))) + except ldap.LDAPError as e: + log.fatal('Failed to configure managed entry plugin : ' + str(e)) + assert False + + # + # Restart DS + # + topology_st.standalone.restart(timeout=30) + + # + # Add entry that should fail since the automember group does not exist + # + try: + topology_st.standalone.add_s(Entry((USER_DN, { + 'uid': 'test', + 'objectclass': ['top', 'person', 'extensibleObject'], + 'sn': 'test', + 'cn': 'test'}))) + except ldap.LDAPError as e: + pass + + # + # Search for the entry - it should not be returned + # + try: + entry = topology_st.standalone.search_s(USER_DN, ldap.SCOPE_SUBTREE, + 'objectclass=*') + if entry: + log.fatal('Entry was incorrectly returned') + assert False + except ldap.NO_SUCH_OBJECT: + pass + + log.info('Test PASSED') + + +if __name__ == '__main__': + # Run isolated + # -s for DEBUG mode + CURRENT_FILE = os.path.realpath(__file__) + pytest.main("-s %s" % CURRENT_FILE) diff --git a/dirsrvtests/tests/tickets/ticket48665_test.py b/dirsrvtests/tests/tickets/ticket48665_test.py new file mode 100644 index 0000000..1781cd1 --- /dev/null +++ b/dirsrvtests/tests/tickets/ticket48665_test.py @@ -0,0 +1,80 @@ +# --- BEGIN COPYRIGHT BLOCK --- +# Copyright (C) 2022 Red Hat, Inc. +# All rights reserved. +# +# License: GPL (version 3 or any later version). +# See LICENSE for details. +# --- END COPYRIGHT BLOCK --- +# +import pytest +from lib389.utils import * +from lib389.topologies import topology_st + +from lib389._constants import DEFAULT_SUFFIX, DEFAULT_BENAME + +pytestmark = pytest.mark.tier2 + +logging.getLogger(__name__).setLevel(logging.DEBUG) +log = logging.getLogger(__name__) + + +def test_ticket48665(topology_st): + """ + This tests deletion of certain cn=config values. + + First, it should be able to delete, and not crash the server. + + Second, we might be able to delete then add to replace values. + + We should also still be able to mod replace the values and keep the server alive. + """ + # topology_st.standalone.config.enable_log('audit') + # topology_st.standalone.config.enable_log('auditfail') + # This will trigger a mod delete then add. + + topology_st.standalone.modify_s('cn=config,cn=ldbm database,cn=plugins,cn=config', + [(ldap.MOD_REPLACE, 'nsslapd-cache-autosize', b'0')]) + + try: + modlist = [(ldap.MOD_DELETE, 'nsslapd-cachememsize', None), (ldap.MOD_ADD, 'nsslapd-cachememsize', b'1')] + topology_st.standalone.modify_s("cn=%s,cn=ldbm database,cn=plugins,cn=config" % DEFAULT_BENAME, + modlist) + except: + pass + + # Check the server has not commited seppuku. + entries = topology_st.standalone.search_s(DEFAULT_SUFFIX, ldap.SCOPE_SUBTREE, '(cn=*)') + assert len(entries) > 0 + log.info('{} entries are returned from the server.'.format(len(entries))) + + # This has a magic hack to determine if we are in cn=config. + try: + topology_st.standalone.modify_s(DEFAULT_BENAME, [(ldap.MOD_REPLACE, + 'nsslapd-cachememsize', b'1')]) + except ldap.LDAPError as e: + log.fatal('Failed to change nsslapd-cachememsize ' + e.args[0]['desc']) + + # Check the server has not commited seppuku. + entries = topology_st.standalone.search_s(DEFAULT_SUFFIX, ldap.SCOPE_SUBTREE, '(cn=*)') + assert len(entries) > 0 + log.info('{} entries are returned from the server.'.format(len(entries))) + + # Now try with mod_replace. This should be okay. + + modlist = [(ldap.MOD_REPLACE, 'nsslapd-cachememsize', b'1')] + topology_st.standalone.modify_s("cn=%s,cn=ldbm database,cn=plugins,cn=config" % DEFAULT_BENAME, + modlist) + + # Check the server has not commited seppuku. + entries = topology_st.standalone.search_s(DEFAULT_SUFFIX, ldap.SCOPE_SUBTREE, '(cn=*)') + assert len(entries) > 0 + log.info('{} entries are returned from the server.'.format(len(entries))) + + log.info('Test complete') + + +if __name__ == '__main__': + # Run isolated + # -s for DEBUG mode + CURRENT_FILE = os.path.realpath(__file__) + pytest.main("-s %s" % CURRENT_FILE) diff --git a/dirsrvtests/tests/tickets/ticket48745_test.py b/dirsrvtests/tests/tickets/ticket48745_test.py new file mode 100644 index 0000000..ce07b34 --- /dev/null +++ b/dirsrvtests/tests/tickets/ticket48745_test.py @@ -0,0 +1,136 @@ +# --- BEGIN COPYRIGHT BLOCK --- +# Copyright (C) 2022 Red Hat, Inc. +# All rights reserved. +# +# License: GPL (version 3 or any later version). +# See LICENSE for details. +# --- END COPYRIGHT BLOCK --- +# +import pytest +from lib389.tasks import * +from lib389.utils import * +from lib389.topologies import topology_st + +from lib389._constants import SUFFIX, DEFAULT_SUFFIX + +pytestmark = pytest.mark.tier2 + +logging.getLogger(__name__).setLevel(logging.DEBUG) +log = logging.getLogger(__name__) + +NEW_ACCOUNT = "new_account" +MAX_ACCOUNTS = 20 + +MIXED_VALUE = "/home/mYhOmEdIrEcToRy" +LOWER_VALUE = "/home/myhomedirectory" +HOMEDIRECTORY_INDEX = 'cn=homeDirectory,cn=index,cn=userRoot,cn=ldbm database,cn=plugins,cn=config' +HOMEDIRECTORY_CN = "homedirectory" +MATCHINGRULE = 'nsMatchingRule' +UIDNUMBER_INDEX = 'cn=uidnumber,cn=index,cn=userRoot,cn=ldbm database,cn=plugins,cn=config' +UIDNUMBER_CN = "uidnumber" + + +def test_ticket48745_init(topology_st): + log.info("Initialization: add dummy entries for the tests") + for cpt in range(MAX_ACCOUNTS): + name = "%s%d" % (NEW_ACCOUNT, cpt) + topology_st.standalone.add_s(Entry(("uid=%s,%s" % (name, SUFFIX), { + 'objectclass': "top posixAccount".split(), + 'uid': name, + 'cn': name, + 'uidnumber': str(111), + 'gidnumber': str(222), + 'homedirectory': "/home/tbordaz_%d" % cpt}))) + + +def test_ticket48745_homeDirectory_indexed_cis(topology_st): + log.info("\n\nindex homeDirectory in caseIgnoreIA5Match and caseExactIA5Match") + try: + ent = topology_st.standalone.getEntry(HOMEDIRECTORY_INDEX, ldap.SCOPE_BASE) + except ldap.NO_SUCH_OBJECT: + topology_st.standalone.add_s(Entry((HOMEDIRECTORY_INDEX, { + 'objectclass': "top nsIndex".split(), + 'cn': HOMEDIRECTORY_CN, + 'nsSystemIndex': 'false', + 'nsIndexType': 'eq'}))) + # log.info("attach debugger") + # time.sleep(60) + + IGNORE_MR_NAME = b'caseIgnoreIA5Match' + EXACT_MR_NAME = b'caseExactIA5Match' + mod = [(ldap.MOD_REPLACE, MATCHINGRULE, (IGNORE_MR_NAME, EXACT_MR_NAME))] + topology_st.standalone.modify_s(HOMEDIRECTORY_INDEX, mod) + + # topology_st.standalone.stop(timeout=10) + log.info("successfully checked that filter with exact mr , a filter with lowercase eq is failing") + # assert topology_st.standalone.db2index(bename=DEFAULT_BENAME, suffixes=None, attrs=['homeDirectory']) + # topology_st.standalone.start(timeout=10) + args = {TASK_WAIT: True} + topology_st.standalone.tasks.reindex(suffix=SUFFIX, attrname='homeDirectory', args=args) + + log.info("Check indexing succeeded with a specified matching rule") + file_obj = open(topology_st.standalone.errlog, "r") + + # Check if the MR configuration failure occurs + regex = re.compile("unknown or invalid matching rule") + while True: + line = file_obj.readline() + found = regex.search(line) + if ((line == '') or (found)): + break + + if (found): + log.info("The configuration of a specific MR fails") + log.info(line) + assert 0 + + +def test_ticket48745_homeDirectory_mixed_value(topology_st): + # Set a homedirectory value with mixed case + name = "uid=%s1,%s" % (NEW_ACCOUNT, SUFFIX) + mod = [(ldap.MOD_REPLACE, 'homeDirectory', ensure_bytes(MIXED_VALUE))] + topology_st.standalone.modify_s(name, mod) + + +def test_ticket48745_extensible_search_after_index(topology_st): + name = "uid=%s1,%s" % (NEW_ACCOUNT, SUFFIX) + + # check with the exact stored value + log.info("Default: can retrieve an entry filter syntax with exact stored value") + ent = topology_st.standalone.getEntry(SUFFIX, ldap.SCOPE_SUBTREE, "(homeDirectory=%s)" % MIXED_VALUE) + # log.info("attach debugger") + # time.sleep(60) + + # This search will fail because a + # subtree search with caseExactIA5Match will find a key + # where the value has been lowercase + log.info("Default: can retrieve an entry filter caseExactIA5Match with exact stored value") + ent = topology_st.standalone.getEntry(SUFFIX, ldap.SCOPE_SUBTREE, + "(homeDirectory:caseExactIA5Match:=%s)" % MIXED_VALUE) + assert ent + + # But do additional searches.. just for more tests + # check with a lower case value that is different from the stored value + log.info("Default: can not retrieve an entry filter syntax match with lowered stored value") + try: + ent = topology_st.standalone.getEntry(SUFFIX, ldap.SCOPE_SUBTREE, "(homeDirectory=%s)" % LOWER_VALUE) + assert ent is None + except ldap.NO_SUCH_OBJECT: + pass + log.info("Default: can not retrieve an entry filter caseExactIA5Match with lowered stored value") + try: + ent = topology_st.standalone.getEntry(SUFFIX, ldap.SCOPE_SUBTREE, + "(homeDirectory:caseExactIA5Match:=%s)" % LOWER_VALUE) + assert ent is None + except ldap.NO_SUCH_OBJECT: + pass + log.info("Default: can retrieve an entry filter caseIgnoreIA5Match with lowered stored value") + ent = topology_st.standalone.getEntry(SUFFIX, ldap.SCOPE_SUBTREE, + "(homeDirectory:caseIgnoreIA5Match:=%s)" % LOWER_VALUE) + + +if __name__ == '__main__': + # Run isolated + # -s for DEBUG mode + CURRENT_FILE = os.path.realpath(__file__) + pytest.main("-s %s" % CURRENT_FILE) diff --git a/dirsrvtests/tests/tickets/ticket48746_test.py b/dirsrvtests/tests/tickets/ticket48746_test.py new file mode 100644 index 0000000..8b2b72a --- /dev/null +++ b/dirsrvtests/tests/tickets/ticket48746_test.py @@ -0,0 +1,156 @@ +# --- BEGIN COPYRIGHT BLOCK --- +# Copyright (C) 2022 Red Hat, Inc. +# All rights reserved. +# +# License: GPL (version 3 or any later version). +# See LICENSE for details. +# --- END COPYRIGHT BLOCK --- +# +import pytest +from lib389.tasks import * +from lib389.utils import * +from lib389.topologies import topology_st + +from lib389._constants import SUFFIX, DEFAULT_SUFFIX, DEFAULT_BENAME + +pytestmark = pytest.mark.tier2 + +logging.getLogger(__name__).setLevel(logging.DEBUG) +log = logging.getLogger(__name__) + +NEW_ACCOUNT = "new_account" +MAX_ACCOUNTS = 20 + +MIXED_VALUE = "/home/mYhOmEdIrEcToRy" +LOWER_VALUE = "/home/myhomedirectory" +HOMEDIRECTORY_INDEX = 'cn=homeDirectory,cn=index,cn=userRoot,cn=ldbm database,cn=plugins,cn=config' +HOMEDIRECTORY_CN = "homedirectory" +MATCHINGRULE = 'nsMatchingRule' +UIDNUMBER_INDEX = 'cn=uidnumber,cn=index,cn=userRoot,cn=ldbm database,cn=plugins,cn=config' +UIDNUMBER_CN = "uidnumber" + + +def test_ticket48746_init(topology_st): + log.info("Initialization: add dummy entries for the tests") + for cpt in range(MAX_ACCOUNTS): + name = "%s%d" % (NEW_ACCOUNT, cpt) + topology_st.standalone.add_s(Entry(("uid=%s,%s" % (name, SUFFIX), { + 'objectclass': "top posixAccount".split(), + 'uid': name, + 'cn': name, + 'uidnumber': str(111), + 'gidnumber': str(222), + 'homedirectory': "/home/tbordaz_%d" % cpt}))) + + +def test_ticket48746_homeDirectory_indexed_cis(topology_st): + log.info("\n\nindex homeDirectory in caseIgnoreIA5Match and caseExactIA5Match") + try: + ent = topology_st.standalone.getEntry(HOMEDIRECTORY_INDEX, ldap.SCOPE_BASE) + except ldap.NO_SUCH_OBJECT: + topology_st.standalone.add_s(Entry((HOMEDIRECTORY_INDEX, { + 'objectclass': "top nsIndex".split(), + 'cn': HOMEDIRECTORY_CN, + 'nsSystemIndex': 'false', + 'nsIndexType': 'eq'}))) + # log.info("attach debugger") + # time.sleep(60) + + IGNORE_MR_NAME = b'caseIgnoreIA5Match' + EXACT_MR_NAME = b'caseExactIA5Match' + mod = [(ldap.MOD_REPLACE, MATCHINGRULE, (IGNORE_MR_NAME, EXACT_MR_NAME))] + topology_st.standalone.modify_s(HOMEDIRECTORY_INDEX, mod) + + # topology_st.standalone.stop(timeout=10) + log.info("successfully checked that filter with exact mr , a filter with lowercase eq is failing") + # assert topology_st.standalone.db2index(bename=DEFAULT_BENAME, suffixes=None, attrs=['homeDirectory']) + # topology_st.standalone.start(timeout=10) + args = {TASK_WAIT: True} + topology_st.standalone.tasks.reindex(suffix=SUFFIX, attrname='homeDirectory', args=args) + + log.info("Check indexing succeeded with a specified matching rule") + file_obj = open(topology_st.standalone.errlog, "r") + + # Check if the MR configuration failure occurs + regex = re.compile("unknown or invalid matching rule") + while True: + line = file_obj.readline() + found = regex.search(line) + if ((line == '') or (found)): + break + + if (found): + log.info("The configuration of a specific MR fails") + log.info(line) + assert not found + + +def test_ticket48746_homeDirectory_mixed_value(topology_st): + # Set a homedirectory value with mixed case + name = "uid=%s1,%s" % (NEW_ACCOUNT, SUFFIX) + mod = [(ldap.MOD_REPLACE, 'homeDirectory', ensure_bytes(MIXED_VALUE))] + topology_st.standalone.modify_s(name, mod) + + +def test_ticket48746_extensible_search_after_index(topology_st): + name = "uid=%s1,%s" % (NEW_ACCOUNT, SUFFIX) + + # check with the exact stored value + # log.info("Default: can retrieve an entry filter syntax with exact stored value") + # ent = topology_st.standalone.getEntry(name, ldap.SCOPE_BASE, "(homeDirectory=%s)" % MIXED_VALUE) + # log.info("attach debugger") + # time.sleep(60) + + # This search is enought to trigger the crash + # because it loads a registered filter MR plugin that has no indexer create function + # following index will trigger the crash + log.info("Default: can retrieve an entry filter caseExactIA5Match with exact stored value") + ent = topology_st.standalone.getEntry(name, ldap.SCOPE_BASE, "(homeDirectory:caseExactIA5Match:=%s)" % MIXED_VALUE) + + +def test_ticket48746_homeDirectory_indexed_ces(topology_st): + log.info("\n\nindex homeDirectory in caseExactIA5Match, this would trigger the crash") + try: + ent = topology_st.standalone.getEntry(HOMEDIRECTORY_INDEX, ldap.SCOPE_BASE) + except ldap.NO_SUCH_OBJECT: + topology_st.standalone.add_s(Entry((HOMEDIRECTORY_INDEX, { + 'objectclass': "top nsIndex".split(), + 'cn': HOMEDIRECTORY_CN, + 'nsSystemIndex': 'false', + 'nsIndexType': 'eq'}))) + # log.info("attach debugger") + # time.sleep(60) + + EXACT_MR_NAME = b'caseExactIA5Match' + mod = [(ldap.MOD_REPLACE, MATCHINGRULE, (EXACT_MR_NAME))] + topology_st.standalone.modify_s(HOMEDIRECTORY_INDEX, mod) + + # topology_st.standalone.stop(timeout=10) + log.info("successfully checked that filter with exact mr , a filter with lowercase eq is failing") + # assert topology_st.standalone.db2index(bename=DEFAULT_BENAME, suffixes=None, attrs=['homeDirectory']) + # topology_st.standalone.start(timeout=10) + args = {TASK_WAIT: True} + topology_st.standalone.tasks.reindex(suffix=SUFFIX, attrname='homeDirectory', args=args) + + log.info("Check indexing succeeded with a specified matching rule") + file_obj = open(topology_st.standalone.errlog, "r") + + # Check if the MR configuration failure occurs + regex = re.compile("unknown or invalid matching rule") + while True: + line = file_obj.readline() + found = regex.search(line) + if ((line == '') or (found)): + break + + if (found): + log.info("The configuration of a specific MR fails") + log.info(line) + assert not found + + +if __name__ == '__main__': + # Run isolated + # -s for DEBUG mode + CURRENT_FILE = os.path.realpath(__file__) + pytest.main("-s %s" % CURRENT_FILE) diff --git a/dirsrvtests/tests/tickets/ticket48759_test.py b/dirsrvtests/tests/tickets/ticket48759_test.py new file mode 100644 index 0000000..c7370b6 --- /dev/null +++ b/dirsrvtests/tests/tickets/ticket48759_test.py @@ -0,0 +1,227 @@ +# --- BEGIN COPYRIGHT BLOCK --- +# Copyright (C) 2016 Red Hat, Inc. +# All rights reserved. +# +# License: GPL (version 3 or any later version). +# See LICENSE for details. +# --- END COPYRIGHT BLOCK --- +# +import logging + +import pytest +from lib389.tasks import * +from lib389.utils import * +from lib389.topologies import topology_st +from lib389.replica import ReplicationManager,Replicas + +from lib389._constants import (PLUGIN_MEMBER_OF, DEFAULT_SUFFIX, ReplicaRole, REPLICAID_SUPPLIER_1, + PLUGIN_RETRO_CHANGELOG, REPLICA_PRECISE_PURGING, REPLICA_PURGE_DELAY, + REPLICA_PURGE_INTERVAL) + +pytestmark = pytest.mark.tier2 + +log = logging.getLogger(__name__) + +MEMBEROF_PLUGIN_DN = ('cn=' + PLUGIN_MEMBER_OF + ',cn=plugins,cn=config') +GROUP_DN = ("cn=group," + DEFAULT_SUFFIX) +MEMBER_DN_COMP = "uid=member" + + +def _add_group_with_members(topology_st): + # Create group + try: + topology_st.standalone.add_s(Entry((GROUP_DN, + {'objectclass': 'top groupofnames'.split(), + 'cn': 'group'}))) + except ldap.LDAPError as e: + log.fatal('Failed to add group: error ' + e.args[0]['desc']) + assert False + + # Add members to the group - set timeout + log.info('Adding members to the group...') + for idx in range(1, 5): + try: + MEMBER_VAL = ("uid=member%d,%s" % (idx, DEFAULT_SUFFIX)) + topology_st.standalone.modify_s(GROUP_DN, + [(ldap.MOD_ADD, + 'member', + ensure_bytes(MEMBER_VAL))]) + except ldap.LDAPError as e: + log.fatal('Failed to update group: member (%s) - error: %s' % + (MEMBER_VAL, e.args[0]['desc'])) + assert False + + +def _find_retrocl_changes(topology_st, user_dn=None): + ents = topology_st.standalone.search_s('cn=changelog', ldap.SCOPE_SUBTREE, '(targetDn=%s)' % user_dn) + return len(ents) + + +def _find_memberof(topology_st, user_dn=None, group_dn=None, find_result=True): + ent = topology_st.standalone.getEntry(user_dn, ldap.SCOPE_BASE, "(objectclass=*)", ['memberof']) + found = False + if ent.hasAttr('memberof'): + + for val in ent.getValues('memberof'): + topology_st.standalone.log.info("!!!!!!! %s: memberof->%s" % (user_dn, val)) + if ensure_str(val) == group_dn: + found = True + break + + if find_result: + assert (found) + else: + assert (not found) + + +def test_ticket48759(topology_st): + """ + The fix for ticket 48759 has to prevent plugin calls for tombstone purging + + The test uses the memberof and retrocl plugins to verify this. + In tombstone purging without the fix the mmeberof plugin is called, + if the tombstone entry is a group, + it modifies the user entries for the group + and if retrocl is enabled this mod is written to the retrocl + + The test sequence is: + - enable replication + - enable memberof and retro cl plugin + - add user entries + - add a group and add the users as members + - verify memberof is set to users + - delete the group + - verify memberof is removed from users + - add group again + - verify memberof is set to users + - get number of changes in retro cl for one user + - configure tombstone purging + - wait for purge interval to pass + - add a dummy entry to increase maxcsn + - wait for purge interval to pass two times + - get number of changes in retro cl for user again + - assert there was no additional change + """ + + log.info('Testing Ticket 48759 - no plugin calls for tombstone purging') + + # + # Setup Replication + # + log.info('Setting up replication...') + repl = ReplicationManager(DEFAULT_SUFFIX) + repl.create_first_supplier(topology_st.standalone) + # + # enable dynamic plugins, memberof and retro cl plugin + # + log.info('Enable plugins...') + try: + topology_st.standalone.config.set('nsslapd-dynamic-plugins', 'on') + except ldap.LDAPError as e: + ldap.error('Failed to enable dynamic plugins! ' + e.args[0]['desc']) + assert False + + topology_st.standalone.plugins.enable(name=PLUGIN_MEMBER_OF) + topology_st.standalone.plugins.enable(name=PLUGIN_RETRO_CHANGELOG) + # Configure memberOf group attribute + try: + topology_st.standalone.modify_s(MEMBEROF_PLUGIN_DN, + [(ldap.MOD_REPLACE, + 'memberofgroupattr', + b'member')]) + except ldap.LDAPError as e: + log.fatal('Failed to configure memberOf plugin: error ' + e.args[0]['desc']) + assert False + + # + # create some users and a group + # + log.info('create users and group...') + for idx in range(1, 5): + try: + USER_DN = ("uid=member%d,%s" % (idx, DEFAULT_SUFFIX)) + topology_st.standalone.add_s(Entry((USER_DN, + {'objectclass': 'top extensibleObject'.split(), + 'uid': 'member%d' % (idx)}))) + except ldap.LDAPError as e: + log.fatal('Failed to add user (%s): error %s' % (USER_DN, e.args[0]['desc'])) + assert False + + _add_group_with_members(topology_st) + + MEMBER_VAL = ("uid=member2,%s" % DEFAULT_SUFFIX) + time.sleep(1) + _find_memberof(topology_st, MEMBER_VAL, GROUP_DN, True) + + # delete group + log.info('delete group...') + try: + topology_st.standalone.delete_s(GROUP_DN) + except ldap.LDAPError as e: + log.error('Failed to delete entry: ' + e.args[0]['desc']) + assert False + + time.sleep(1) + _find_memberof(topology_st, MEMBER_VAL, GROUP_DN, False) + + # add group again + log.info('add group again') + _add_group_with_members(topology_st) + time.sleep(1) + _find_memberof(topology_st, MEMBER_VAL, GROUP_DN, True) + + # + # get number of changelog records for one user entry + log.info('get number of changes for %s before tombstone purging' % MEMBER_VAL) + changes_pre = _find_retrocl_changes(topology_st, MEMBER_VAL) + + # configure tombstone purging + args = {REPLICA_PRECISE_PURGING: 'on', + REPLICA_PURGE_DELAY: '5', + REPLICA_PURGE_INTERVAL: '5'} + try: + Repl_DN = 'cn=replica,cn=dc\\3Dexample\\2Cdc\\3Dcom,cn=mapping tree,cn=config' + topology_st.standalone.modify_s(Repl_DN, + [(ldap.MOD_ADD, 'nsDS5ReplicaPreciseTombstonePurging', b'on'), + (ldap.MOD_ADD, 'nsDS5ReplicaPurgeDelay', b'5'), + (ldap.MOD_ADD, 'nsDS5ReplicaTombstonePurgeInterval', b'5')]) + except: + log.fatal('Failed to configure replica') + assert False + + # Wait for the interval to pass + log.info('Wait for tombstone purge interval to pass ...') + time.sleep(6) + + # Add an entry to trigger replication + log.info('add dummy entry') + try: + topology_st.standalone.add_s(Entry(('cn=test_entry,dc=example,dc=com', { + 'objectclass': 'top person'.split(), + 'sn': 'user', + 'cn': 'entry1'}))) + except ldap.LDAPError as e: + log.error('Failed to add entry: ' + e.args[0]['desc']) + assert False + + # check memberof is still correct + time.sleep(1) + _find_memberof(topology_st, MEMBER_VAL, GROUP_DN, True) + + # Wait for the interval to pass again + log.info('Wait for tombstone purge interval to pass again...') + time.sleep(10) + + # + # get number of changelog records for one user entry + log.info('get number of changes for %s before tombstone purging' % MEMBER_VAL) + changes_post = _find_retrocl_changes(topology_st, MEMBER_VAL) + + assert (changes_pre == changes_post) + + +if __name__ == '__main__': + # Run isolated + # -s for DEBUG mode + CURRENT_FILE = os.path.realpath(__file__) + pytest.main("-s %s" % CURRENT_FILE) diff --git a/dirsrvtests/tests/tickets/ticket48784_test.py b/dirsrvtests/tests/tickets/ticket48784_test.py new file mode 100644 index 0000000..d343ffd --- /dev/null +++ b/dirsrvtests/tests/tickets/ticket48784_test.py @@ -0,0 +1,141 @@ +# --- BEGIN COPYRIGHT BLOCK --- +# Copyright (C) 2016 Red Hat, Inc. +# All rights reserved. +# +# License: GPL (version 3 or any later version). +# See LICENSE for details. +# --- END COPYRIGHT BLOCK --- +# +import pytest +from lib389.tasks import * + +from lib389.utils import * +# Skip on older versions +pytestmark = [pytest.mark.tier2, + pytest.mark.skipif(ds_is_older('1.3.5'), reason="Not implemented")] + +from lib389.topologies import topology_m2 + +from lib389._constants import DEFAULT_SUFFIX, DN_DM, PASSWORD + +logging.getLogger(__name__).setLevel(logging.DEBUG) +log = logging.getLogger(__name__) + +CONFIG_DN = 'cn=config' +ENCRYPTION_DN = 'cn=encryption,%s' % CONFIG_DN +RSA = 'RSA' +RSA_DN = 'cn=%s,%s' % (RSA, ENCRYPTION_DN) +ISSUER = 'cn=CAcert' +CACERT = 'CAcertificate' +SERVERCERT = 'Server-Cert' + + +@pytest.fixture(scope="module") +def add_entry(server, name, rdntmpl, start, num): + log.info("\n######################### Adding %d entries to %s ######################" % (num, name)) + + for i in range(num): + ii = start + i + dn = '%s%d,%s' % (rdntmpl, ii, DEFAULT_SUFFIX) + try: + server.add_s(Entry((dn, {'objectclass': 'top person extensibleObject'.split(), + 'uid': '%s%d' % (rdntmpl, ii), + 'cn': '%s user%d' % (name, ii), + 'sn': 'user%d' % (ii)}))) + except ldap.LDAPError as e: + log.error('Failed to add %s ' % dn + e.message['desc']) + assert False + +def config_tls_agreements(topology_m2): + log.info("######################### Configure SSL/TLS agreements ######################") + log.info("######################## supplier1 <-- startTLS -> supplier2 #####################") + + log.info("##### Update the agreement of supplier1") + m1 = topology_m2.ms["supplier1"] + m1_m2_agmt = m1.agreement.list(suffix=DEFAULT_SUFFIX)[0].dn + topology_m2.ms["supplier1"].modify_s(m1_m2_agmt, [(ldap.MOD_REPLACE, 'nsDS5ReplicaTransportInfo', b'TLS')]) + + log.info("##### Update the agreement of supplier2") + m2 = topology_m2.ms["supplier2"] + m2_m1_agmt = m2.agreement.list(suffix=DEFAULT_SUFFIX)[0].dn + topology_m2.ms["supplier2"].modify_s(m2_m1_agmt, [(ldap.MOD_REPLACE, 'nsDS5ReplicaTransportInfo', b'TLS')]) + + time.sleep(1) + + topology_m2.ms["supplier1"].restart(10) + topology_m2.ms["supplier2"].restart(10) + + log.info("\n######################### Configure SSL/TLS agreements Done ######################\n") + + +def set_ssl_Version(server, name, version): + log.info("\n######################### Set %s on %s ######################\n" % + (version, name)) + server.simple_bind_s(DN_DM, PASSWORD) + server.modify_s(ENCRYPTION_DN, [(ldap.MOD_REPLACE, 'nsSSL3', b'off'), + (ldap.MOD_REPLACE, 'nsTLS1', b'on'), + (ldap.MOD_REPLACE, 'sslVersionMin', ensure_bytes(version)), + (ldap.MOD_REPLACE, 'sslVersionMax', ensure_bytes(version))]) + + +def test_ticket48784(topology_m2): + """ + Set up 2way MMR: + supplier_1 <----- startTLS -----> supplier_2 + + Make sure the replication is working. + Then, stop the servers and set only TLS1.0 on supplier_1 while TLS1.2 on supplier_2 + Replication is supposed to fail. + """ + log.info("Ticket 48784 - Allow usage of OpenLDAP libraries that don't use NSS for crypto") + + #create_keys_certs(topology_m2) + [i.enable_tls() for i in topology_m2] + + config_tls_agreements(topology_m2) + + add_entry(topology_m2.ms["supplier1"], 'supplier1', 'uid=m1user', 0, 5) + add_entry(topology_m2.ms["supplier2"], 'supplier2', 'uid=m2user', 0, 5) + + time.sleep(10) + + log.info('##### Searching for entries on supplier1...') + entries = topology_m2.ms["supplier1"].search_s(DEFAULT_SUFFIX, ldap.SCOPE_SUBTREE, '(uid=*)') + assert 10 == len(entries) + + log.info('##### Searching for entries on supplier2...') + entries = topology_m2.ms["supplier2"].search_s(DEFAULT_SUFFIX, ldap.SCOPE_SUBTREE, '(uid=*)') + assert 10 == len(entries) + + log.info("##### openldap client just accepts sslVersionMin not Max.") + set_ssl_Version(topology_m2.ms["supplier1"], 'supplier1', 'TLS1.0') + set_ssl_Version(topology_m2.ms["supplier2"], 'supplier2', 'TLS1.2') + + log.info("##### restart supplier[12]") + topology_m2.ms["supplier1"].restart(timeout=10) + topology_m2.ms["supplier2"].restart(timeout=10) + + log.info("##### replication from supplier_1 to supplier_2 should be ok.") + add_entry(topology_m2.ms["supplier1"], 'supplier1', 'uid=m1user', 10, 1) + log.info("##### replication from supplier_2 to supplier_1 should fail.") + add_entry(topology_m2.ms["supplier2"], 'supplier2', 'uid=m2user', 10, 1) + + time.sleep(10) + + log.info('##### Searching for entries on supplier1...') + entries = topology_m2.ms["supplier1"].search_s(DEFAULT_SUFFIX, ldap.SCOPE_SUBTREE, '(uid=*)') + assert 11 == len(entries) # This is supposed to be "1" less than supplier 2's entry count + + log.info('##### Searching for entries on supplier2...') + entries = topology_m2.ms["supplier2"].search_s(DEFAULT_SUFFIX, ldap.SCOPE_SUBTREE, '(uid=*)') + assert 12 == len(entries) + + log.info("Ticket 48784 - PASSED") + + +if __name__ == '__main__': + # Run isolated + # -s for DEBUG mode + + CURRENT_FILE = os.path.realpath(__file__) + pytest.main("-s %s" % CURRENT_FILE) diff --git a/dirsrvtests/tests/tickets/ticket48798_test.py b/dirsrvtests/tests/tickets/ticket48798_test.py new file mode 100644 index 0000000..2c03678 --- /dev/null +++ b/dirsrvtests/tests/tickets/ticket48798_test.py @@ -0,0 +1,73 @@ +# --- BEGIN COPYRIGHT BLOCK --- +# Copyright (C) 2022 Red Hat, Inc. +# All rights reserved. +# +# License: GPL (version 3 or any later version). +# See LICENSE for details. +# --- END COPYRIGHT BLOCK --- +# +from subprocess import check_output + +import pytest +from lib389.tasks import * +from lib389.utils import * +from lib389.topologies import topology_st +from lib389.config import Encryption + +from lib389._constants import DEFAULT_SUFFIX, DEFAULT_SECURE_PORT + +pytestmark = pytest.mark.tier2 + +logging.getLogger(__name__).setLevel(logging.DEBUG) +log = logging.getLogger(__name__) + + +def check_socket_dh_param_size(hostname, port): + ### You know why we have to do this? + # Because TLS and SSL suck. Hard. They are impossible. It's all terrible, burn it all down. + cmd = "echo quit | openssl s_client -connect {HOSTNAME}:{PORT} -msg -cipher DH | grep -A 1 ServerKeyExchange".format( + HOSTNAME=hostname, + PORT=port) + output = check_output(cmd, shell=True) + dhheader = output.split(b'\n')[1] + # Get rid of all the other whitespace. + dhheader = dhheader.replace(b' ', b'') + # Example is 0c00040b0100ffffffffffffffffadf8 + # We need the bits 0100 here. Which means 256 bytes aka 256 * 8, for 2048 bit. + dhheader = dhheader[8:12] + # make it an int, and times 8 + i = int(dhheader, 16) * 8 + return i + + +def test_ticket48798(topology_st): + """ + Test DH param sizes offered by DS. + + """ + topology_st.standalone.enable_tls() + + # Confirm that we have a connection, and that it has DH + + # Open a socket to the port. + # Check the security settings. + size = check_socket_dh_param_size(topology_st.standalone.host, topology_st.standalone.sslport) + + assert size == 2048 + + # Now toggle the settings. + enc = Encryption(topology_st.standalone) + enc.set('allowWeakDHParam', 'on') + + topology_st.standalone.restart() + + # Check the DH params are less than 1024. + size = check_socket_dh_param_size(topology_st.standalone.host, topology_st.standalone.sslport) + + assert size == 1024 + +if __name__ == '__main__': + # Run isolated + # -s for DEBUG mode + CURRENT_FILE = os.path.realpath(__file__) + pytest.main("-s %s" % CURRENT_FILE) diff --git a/dirsrvtests/tests/tickets/ticket48799_test.py b/dirsrvtests/tests/tickets/ticket48799_test.py new file mode 100644 index 0000000..a396220 --- /dev/null +++ b/dirsrvtests/tests/tickets/ticket48799_test.py @@ -0,0 +1,95 @@ +# --- BEGIN COPYRIGHT BLOCK --- +# Copyright (C) 2022 Red Hat, Inc. +# All rights reserved. +# +# License: GPL (version 3 or any later version). +# See LICENSE for details. +# --- END COPYRIGHT BLOCK --- +# +import pytest +from lib389.tasks import * +from lib389.utils import * +from lib389.topologies import topology_m1c1 + +pytestmark = pytest.mark.tier2 + +logging.getLogger(__name__).setLevel(logging.DEBUG) +log = logging.getLogger(__name__) + + +def _add_custom_schema(server): + attr_value = b"( 10.0.9.2342.19200300.100.1.1 NAME 'customManager' EQUALITY distinguishedNameMatch SYNTAX 1.3.6.1.4.1.1466.115.121.1.12 X-ORIGIN 'user defined' )" + mod = [(ldap.MOD_ADD, 'attributeTypes', attr_value)] + server.modify_s('cn=schema', mod) + + oc_value = b"( 1.3.6.1.4.1.4843.2.1 NAME 'customPerson' SUP inetorgperson STRUCTURAL MAY (customManager) X-ORIGIN 'user defined' )" + mod = [(ldap.MOD_ADD, 'objectclasses', oc_value)] + server.modify_s('cn=schema', mod) + + +def _create_user(server): + server.add_s(Entry(( + "uid=testuser,ou=People,%s" % DEFAULT_SUFFIX, + { + 'objectClass': "top account posixaccount".split(), + 'uid': 'testuser', + 'gecos': 'Test User', + 'cn': 'testuser', + 'homedirectory': '/home/testuser', + 'passwordexpirationtime': '20160710184141Z', + 'userpassword': '!', + 'uidnumber': '1111212', + 'gidnumber': '1111212', + 'loginshell': '/bin/bash' + } + ))) + + +def _modify_user(server): + mod = [ + (ldap.MOD_ADD, 'objectClass', [b'customPerson']), + (ldap.MOD_ADD, 'sn', [b'User']), + (ldap.MOD_ADD, 'customManager', [b'cn=manager']), + ] + server.modify("uid=testuser,ou=People,%s" % DEFAULT_SUFFIX, mod) + + +def test_ticket48799(topology_m1c1): + """Write your replication testcase here. + + To access each DirSrv instance use: topology_m1c1.ms["supplier1"], topology_m1c1.ms["supplier1"]2, + ..., topology_m1c1.hub1, ..., topology_m1c1.cs["consumer1"],... + + Also, if you need any testcase initialization, + please, write additional fixture for that(include finalizer). + """ + + # Add the new schema element. + _add_custom_schema(topology_m1c1.ms["supplier1"]) + _add_custom_schema(topology_m1c1.cs["consumer1"]) + + # Add a new user on the supplier. + _create_user(topology_m1c1.ms["supplier1"]) + # Modify the user on the supplier. + _modify_user(topology_m1c1.ms["supplier1"]) + + # We need to wait for replication here. + time.sleep(15) + + # Now compare the supplier vs consumer, and see if the objectClass was dropped. + + supplier_entry = topology_m1c1.ms["supplier1"].search_s("uid=testuser,ou=People,%s" % DEFAULT_SUFFIX, ldap.SCOPE_BASE, + '(objectclass=*)', ['objectClass']) + consumer_entry = topology_m1c1.cs["consumer1"].search_s("uid=testuser,ou=People,%s" % DEFAULT_SUFFIX, + ldap.SCOPE_BASE, '(objectclass=*)', ['objectClass']) + + assert (supplier_entry == consumer_entry) + + log.info('Test complete') + + +if __name__ == '__main__': + # Run isolated + # -s for DEBUG mode + CURRENT_FILE = os.path.realpath(__file__) + pytest.main("-s %s" % CURRENT_FILE) diff --git a/dirsrvtests/tests/tickets/ticket48808_test.py b/dirsrvtests/tests/tickets/ticket48808_test.py new file mode 100644 index 0000000..646a9a7 --- /dev/null +++ b/dirsrvtests/tests/tickets/ticket48808_test.py @@ -0,0 +1,311 @@ +# --- BEGIN COPYRIGHT BLOCK --- +# Copyright (C) 2022 Red Hat, Inc. +# All rights reserved. +# +# License: GPL (version 3 or any later version). +# See LICENSE for details. +# --- END COPYRIGHT BLOCK --- +# +from random import sample + +import pytest +from ldap.controls import SimplePagedResultsControl +from lib389.tasks import * +from lib389.utils import * +from lib389.topologies import topology_st + +from lib389._constants import DEFAULT_SUFFIX, DN_DM, PASSWORD + +pytestmark = pytest.mark.tier2 + +logging.getLogger(__name__).setLevel(logging.DEBUG) +log = logging.getLogger(__name__) + +TEST_USER_NAME = 'simplepaged_test' +TEST_USER_DN = 'uid=%s,%s' % (TEST_USER_NAME, DEFAULT_SUFFIX) +TEST_USER_PWD = 'simplepaged_test' + + +@pytest.fixture(scope="module") +def create_user(topology_st): + """User for binding operation""" + + try: + topology_st.standalone.add_s(Entry((TEST_USER_DN, { + 'objectclass': 'top person'.split(), + 'objectclass': 'organizationalPerson', + 'objectclass': 'inetorgperson', + 'cn': TEST_USER_NAME, + 'sn': TEST_USER_NAME, + 'userpassword': TEST_USER_PWD, + 'mail': '%s@redhat.com' % TEST_USER_NAME, + 'uid': TEST_USER_NAME + }))) + except ldap.LDAPError as e: + log.error('Failed to add user (%s): error (%s)' % (TEST_USER_DN, + e.args[0]['desc'])) + raise e + + +def add_users(topology_st, users_num): + """Add users to the default suffix + and return a list of added user DNs. + """ + + users_list = [] + log.info('Adding %d users' % users_num) + for num in sample(range(1000), users_num): + num_ran = int(round(num)) + USER_NAME = 'test%05d' % num_ran + USER_DN = 'uid=%s,%s' % (USER_NAME, DEFAULT_SUFFIX) + users_list.append(USER_DN) + try: + topology_st.standalone.add_s(Entry((USER_DN, { + 'objectclass': 'top person'.split(), + 'objectclass': 'organizationalPerson', + 'objectclass': 'inetorgperson', + 'cn': USER_NAME, + 'sn': USER_NAME, + 'userpassword': 'pass%s' % num_ran, + 'mail': '%s@redhat.com' % USER_NAME, + 'uid': USER_NAME + }))) + except ldap.LDAPError as e: + log.error('Failed to add user (%s): error (%s)' % (USER_DN, + e.args[0]['desc'])) + raise e + return users_list + + +def del_users(topology_st, users_list): + """Delete users with DNs from given list""" + + log.info('Deleting %d users' % len(users_list)) + for user_dn in users_list: + try: + topology_st.standalone.delete_s(user_dn) + except ldap.LDAPError as e: + log.error('Failed to delete user (%s): error (%s)' % (user_dn, + e.args[0]['desc'])) + raise e + + +def change_conf_attr(topology_st, suffix, attr_name, attr_value): + """Change configurational attribute in the given suffix. + Funtion returns previous attribute value. + """ + + try: + entries = topology_st.standalone.search_s(suffix, ldap.SCOPE_BASE, + 'objectclass=top', + [attr_name]) + attr_value_bck = entries[0].data.get(attr_name) + log.info('Set %s to %s. Previous value - %s. Modified suffix - %s.' % ( + attr_name, attr_value, attr_value_bck, suffix)) + if attr_value is None: + topology_st.standalone.modify_s(suffix, [(ldap.MOD_DELETE, + attr_name, + attr_value)]) + else: + topology_st.standalone.modify_s(suffix, [(ldap.MOD_REPLACE, + attr_name, + attr_value)]) + except ldap.LDAPError as e: + log.error('Failed to change attr value (%s): error (%s)' % (attr_name, + e.args[0]['desc'])) + raise e + + return attr_value_bck + + +def paged_search(topology_st, controls, search_flt, searchreq_attrlist): + """Search at the DEFAULT_SUFFIX with ldap.SCOPE_SUBTREE + using Simple Paged Control(should the first item in the + list controls. + Return the list with results summarized from all pages + """ + + pages = 0 + pctrls = [] + all_results = [] + req_ctrl = controls[0] + msgid = topology_st.standalone.search_ext(DEFAULT_SUFFIX, + ldap.SCOPE_SUBTREE, + search_flt, + searchreq_attrlist, + serverctrls=controls) + while True: + log.info('Getting page %d' % (pages,)) + rtype, rdata, rmsgid, rctrls = topology_st.standalone.result3(msgid) + all_results.extend(rdata) + pages += 1 + pctrls = [ + c + for c in rctrls + if c.controlType == SimplePagedResultsControl.controlType + ] + + if pctrls: + if pctrls[0].cookie: + # Copy cookie from response control to request control + req_ctrl.cookie = pctrls[0].cookie + msgid = topology_st.standalone.search_ext(DEFAULT_SUFFIX, + ldap.SCOPE_SUBTREE, + search_flt, + searchreq_attrlist, + serverctrls=controls) + else: + break # no more pages available + else: + break + + assert not pctrls[0].cookie + return all_results + + +def test_ticket48808(topology_st, create_user): + log.info('Run multiple paging controls on a single connection') + users_num = 100 + page_size = 30 + users_list = add_users(topology_st, users_num) + search_flt = r'(uid=test*)' + searchreq_attrlist = ['dn', 'sn'] + + log.info('Set user bind') + topology_st.standalone.simple_bind_s(TEST_USER_DN, TEST_USER_PWD) + + log.info('Create simple paged results control instance') + req_ctrl = SimplePagedResultsControl(True, size=page_size, cookie='') + controls = [req_ctrl] + + for ii in range(3): + log.info('Iteration %d' % ii) + msgid = topology_st.standalone.search_ext(DEFAULT_SUFFIX, + ldap.SCOPE_SUBTREE, + search_flt, + searchreq_attrlist, + serverctrls=controls) + rtype, rdata, rmsgid, rctrls = topology_st.standalone.result3(msgid) + pctrls = [ + c + for c in rctrls + if c.controlType == SimplePagedResultsControl.controlType + ] + + req_ctrl.cookie = pctrls[0].cookie + msgid = topology_st.standalone.search_ext(DEFAULT_SUFFIX, + ldap.SCOPE_SUBTREE, + search_flt, + searchreq_attrlist, + serverctrls=controls) + log.info('Set Directory Manager bind back') + topology_st.standalone.simple_bind_s(DN_DM, PASSWORD) + del_users(topology_st, users_list) + + log.info('Abandon the search') + users_num = 10 + page_size = 0 + users_list = add_users(topology_st, users_num) + search_flt = r'(uid=test*)' + searchreq_attrlist = ['dn', 'sn'] + + log.info('Set user bind') + topology_st.standalone.simple_bind_s(TEST_USER_DN, TEST_USER_PWD) + + log.info('Create simple paged results control instance') + req_ctrl = SimplePagedResultsControl(True, size=page_size, cookie='') + controls = [req_ctrl] + + msgid = topology_st.standalone.search_ext(DEFAULT_SUFFIX, + ldap.SCOPE_SUBTREE, + search_flt, + searchreq_attrlist, + serverctrls=controls) + rtype, rdata, rmsgid, rctrls = topology_st.standalone.result3(msgid) + pctrls = [ + c + for c in rctrls + if c.controlType == SimplePagedResultsControl.controlType + ] + assert not pctrls[0].cookie + + log.info('Set Directory Manager bind back') + topology_st.standalone.simple_bind_s(DN_DM, PASSWORD) + del_users(topology_st, users_list) + + log.info("Search should fail with 'nsPagedSizeLimit = 5'" + "and 'nsslapd-pagedsizelimit = 15' with 10 users") + conf_attr = b'15' + user_attr = b'5' + expected_rs = ldap.SIZELIMIT_EXCEEDED + users_num = 10 + page_size = 10 + users_list = add_users(topology_st, users_num) + search_flt = r'(uid=test*)' + searchreq_attrlist = ['dn', 'sn'] + conf_attr_bck = change_conf_attr(topology_st, DN_CONFIG, + 'nsslapd-pagedsizelimit', conf_attr) + user_attr_bck = change_conf_attr(topology_st, TEST_USER_DN, + 'nsPagedSizeLimit', user_attr) + + log.info('Set user bind') + topology_st.standalone.simple_bind_s(TEST_USER_DN, TEST_USER_PWD) + + log.info('Create simple paged results control instance') + req_ctrl = SimplePagedResultsControl(True, size=page_size, cookie='') + controls = [req_ctrl] + + log.info('Expect to fail with SIZELIMIT_EXCEEDED') + with pytest.raises(expected_rs): + all_results = paged_search(topology_st, controls, + search_flt, searchreq_attrlist) + + log.info('Set Directory Manager bind back') + topology_st.standalone.simple_bind_s(DN_DM, PASSWORD) + del_users(topology_st, users_list) + change_conf_attr(topology_st, DN_CONFIG, + 'nsslapd-pagedsizelimit', conf_attr_bck) + change_conf_attr(topology_st, TEST_USER_DN, + 'nsPagedSizeLimit', user_attr_bck) + + log.info("Search should pass with 'nsPagedSizeLimit = 15'" + "and 'nsslapd-pagedsizelimit = 5' with 10 users") + conf_attr = b'5' + user_attr = b'15' + users_num = 10 + page_size = 10 + users_list = add_users(topology_st, users_num) + search_flt = r'(uid=test*)' + searchreq_attrlist = ['dn', 'sn'] + conf_attr_bck = change_conf_attr(topology_st, DN_CONFIG, + 'nsslapd-pagedsizelimit', conf_attr) + user_attr_bck = change_conf_attr(topology_st, TEST_USER_DN, + 'nsPagedSizeLimit', user_attr) + + log.info('Set user bind') + topology_st.standalone.simple_bind_s(TEST_USER_DN, TEST_USER_PWD) + + log.info('Create simple paged results control instance') + req_ctrl = SimplePagedResultsControl(True, size=page_size, cookie='') + controls = [req_ctrl] + + log.info('Search should PASS') + all_results = paged_search(topology_st, controls, + search_flt, searchreq_attrlist) + log.info('%d results' % len(all_results)) + assert len(all_results) == len(users_list) + + log.info('Set Directory Manager bind back') + topology_st.standalone.simple_bind_s(DN_DM, PASSWORD) + del_users(topology_st, users_list) + change_conf_attr(topology_st, DN_CONFIG, + 'nsslapd-pagedsizelimit', conf_attr_bck) + change_conf_attr(topology_st, TEST_USER_DN, + 'nsPagedSizeLimit', user_attr_bck) + + +if __name__ == '__main__': + # Run isolated + # -s for DEBUG mode + CURRENT_FILE = os.path.realpath(__file__) + pytest.main("-s %s" % CURRENT_FILE) diff --git a/dirsrvtests/tests/tickets/ticket48844_test.py b/dirsrvtests/tests/tickets/ticket48844_test.py new file mode 100644 index 0000000..04da4ef --- /dev/null +++ b/dirsrvtests/tests/tickets/ticket48844_test.py @@ -0,0 +1,144 @@ +# --- BEGIN COPYRIGHT BLOCK --- +# Copyright (C) 2022 Red Hat, Inc. +# All rights reserved. +# +# License: GPL (version 3 or any later version). +# See LICENSE for details. +# --- END COPYRIGHT BLOCK --- +# +import pytest +from lib389.tasks import * +from lib389.utils import * +from lib389.topologies import topology_st + +from lib389._constants import DEFAULT_SUFFIX, BACKEND_NAME + +pytestmark = pytest.mark.tier2 + +logging.getLogger(__name__).setLevel(logging.DEBUG) +log = logging.getLogger(__name__) + +PLUGIN_BITWISE = 'Bitwise Plugin' +TESTBASEDN = "dc=bitwise,dc=com" +TESTBACKEND_NAME = "TestBitw" + +F1 = 'objectclass=testperson' +BITWISE_F2 = '(&(%s)(testUserAccountControl:1.2.840.113556.1.4.803:=514))' % F1 +BITWISE_F3 = '(&(%s)(testUserAccountControl:1.2.840.113556.1.4.803:=513))' % F1 +BITWISE_F6 = '(&(%s)(testUserAccountControl:1.2.840.113556.1.4.803:=16777216))' % F1 + + +def _addBitwiseEntries(topology_st): + users = [ + ('testuser2', '65536', 'PasswordNeverExpired'), + ('testuser3', '8388608', 'PasswordExpired'), + ('testuser4', '256', 'TempDuplicateAccount'), + ('testuser5', '16777216', 'TrustedAuthDelegation'), + ('testuser6', '528', 'AccountLocked'), + ('testuser7', '513', 'AccountActive'), + ('testuser8', '98536 99512 99528'.split(), 'AccountActive PasswordExxpired AccountLocked'.split()), + ('testuser9', '87536 912'.split(), 'AccountActive PasswordNeverExpired'.split()), + ('testuser10', '89536 97546 96579'.split(), 'TestVerify1 TestVerify2 TestVerify3'.split()), + ('testuser11', '655236', 'TestStatus1'), + ('testuser12', '665522', 'TestStatus2'), + ('testuser13', '266552', 'TestStatus3')] + try: + topology_st.standalone.add_s(Entry((TESTBASEDN, + {'objectclass': "top dcobject".split(), + 'dc': 'bitwise', + 'aci': '(target =\"ldap:///dc=bitwise,dc=com\")' + \ + '(targetattr != \"userPassword\")' + \ + '(version 3.0;acl \"Anonymous read-search access\";' + \ + 'allow (read, search, compare)(userdn = \"ldap:///anyone\");)'}))) + + topology_st.standalone.add_s(Entry(('uid=btestuser1,%s' % TESTBASEDN, + {'objectclass': 'top testperson organizationalPerson inetorgperson'.split(), + 'mail': 'btestuser1@redhat.com', + 'uid': 'btestuser1', + 'givenName': 'bit', + 'sn': 'testuser1', + 'userPassword': 'testuser1', + 'testUserAccountControl': '514', + 'testUserStatus': 'Disabled', + 'cn': 'bit tetsuser1'}))) + for (userid, accCtl, accStatus) in users: + topology_st.standalone.add_s(Entry(('uid=b%s,%s' % (userid, TESTBASEDN), + { + 'objectclass': 'top testperson organizationalPerson inetorgperson'.split(), + 'mail': '%s@redhat.com' % userid, + 'uid': 'b%s' % userid, + 'givenName': 'bit', + 'sn': userid, + 'userPassword': userid, + 'testUserAccountControl': accCtl, + 'testUserStatus': accStatus, + 'cn': 'bit %s' % userid}))) + except ValueError: + topology_st.standalone.log.fatal("add_s failed: %s", ValueError) + + +def test_ticket48844_init(topology_st): + # create a suffix where test entries will be stored + BITW_SCHEMA_AT_1 = '( NAME \'testUserAccountControl\' DESC \'Attribute Bitwise filteri-Multi-Valued\' SYNTAX 1.3.6.1.4.1.1466.115.121.1.27 )' + BITW_SCHEMA_AT_2 = '( NAME \'testUserStatus\' DESC \'State of User account active/disabled\' SYNTAX 1.3.6.1.4.1.1466.115.121.1.15 )' + BITW_SCHEMA_OC_1 = '( NAME \'testperson\' SUP top STRUCTURAL MUST ( sn $ cn $ testUserAccountControl $ testUserStatus )' + \ + ' MAY ( userPassword $ telephoneNumber $ seeAlso $ description ) X-ORIGIN \'BitWise\' )' + topology_st.standalone.schema.add_schema('attributetypes', [ensure_bytes(BITW_SCHEMA_AT_1), ensure_bytes(BITW_SCHEMA_AT_2)]) + topology_st.standalone.schema.add_schema('objectClasses', ensure_bytes(BITW_SCHEMA_OC_1)) + + topology_st.standalone.backend.create(TESTBASEDN, {BACKEND_NAME: TESTBACKEND_NAME}) + topology_st.standalone.mappingtree.create(TESTBASEDN, bename=TESTBACKEND_NAME, parent=None) + _addBitwiseEntries(topology_st) + + +def test_ticket48844_bitwise_on(topology_st): + """ + Check that bitwise plugin (old style MR plugin) that defines + Its own indexer create function, is selected to evaluate the filter + """ + + topology_st.standalone.plugins.enable(name=PLUGIN_BITWISE) + topology_st.standalone.restart(timeout=10) + ents = topology_st.standalone.search_s('cn=%s,cn=plugins,cn=config' % PLUGIN_BITWISE, ldap.SCOPE_BASE, + 'objectclass=*') + assert (ents[0].hasValue('nsslapd-pluginEnabled', 'on')) + + expect = 2 + ents = topology_st.standalone.search_s(TESTBASEDN, ldap.SCOPE_SUBTREE, BITWISE_F2) + assert (len(ents) == expect) + + expect = 1 + ents = topology_st.standalone.search_s(TESTBASEDN, ldap.SCOPE_SUBTREE, BITWISE_F3) + assert (len(ents) == expect) + assert (ents[0].hasAttr('testUserAccountControl')) + + expect = 1 + ents = topology_st.standalone.search_s(TESTBASEDN, ldap.SCOPE_SUBTREE, BITWISE_F6) + assert (len(ents) == expect) + assert (ents[0].hasAttr('testUserAccountControl')) + + +def test_ticket48844_bitwise_off(topology_st): + """ + Check that when bitwise plugin is not enabled, no plugin + is identified to evaluate the filter -> ldap.UNAVAILABLE_CRITICAL_EXTENSION: + """ + topology_st.standalone.plugins.disable(name=PLUGIN_BITWISE) + topology_st.standalone.restart(timeout=10) + ents = topology_st.standalone.search_s('cn=%s,cn=plugins,cn=config' % PLUGIN_BITWISE, ldap.SCOPE_BASE, + 'objectclass=*') + assert (ents[0].hasValue('nsslapd-pluginEnabled', 'off')) + + res = 0 + try: + ents = topology_st.standalone.search_s(TESTBASEDN, ldap.SCOPE_SUBTREE, BITWISE_F2) + except ldap.UNAVAILABLE_CRITICAL_EXTENSION: + res = 12 + assert (res == 12) + + +if __name__ == '__main__': + # Run isolated + # -s for DEBUG mode + CURRENT_FILE = os.path.realpath(__file__) + pytest.main("-s %s" % CURRENT_FILE) diff --git a/dirsrvtests/tests/tickets/ticket48891_test.py b/dirsrvtests/tests/tickets/ticket48891_test.py new file mode 100644 index 0000000..041ce21 --- /dev/null +++ b/dirsrvtests/tests/tickets/ticket48891_test.py @@ -0,0 +1,102 @@ +# --- BEGIN COPYRIGHT BLOCK --- +# Copyright (C) 2016 Red Hat, Inc. +# All rights reserved. +# +# License: GPL (version 3 or any later version). +# See LICENSE for details. +# --- END COPYRIGHT BLOCK --- +# +import fnmatch +import logging + +import pytest +from lib389.tasks import * +from lib389.topologies import topology_st + +from lib389._constants import DN_DM, PASSWORD, DEFAULT_SUFFIX, BACKEND_NAME, SUFFIX + +pytestmark = pytest.mark.tier2 + +log = logging.getLogger(__name__) + +CONFIG_DN = 'cn=config' +RDN_VAL_SUFFIX = 'ticket48891.org' +MYSUFFIX = 'dc=%s' % RDN_VAL_SUFFIX +MYSUFFIXBE = 'ticket48891' + +SEARCHFILTER = '(objectclass=person)' + +OTHER_NAME = 'other_entry' +MAX_OTHERS = 10 + + +def test_ticket48891_setup(topology_st): + """ + Check there is no core + Create a second backend + stop DS (that should trigger the core) + check there is no core + """ + log.info('Testing Ticket 48891 - ns-slapd crashes during the shutdown after adding attribute with a matching rule') + + # bind as directory manager + topology_st.standalone.log.info("Bind as %s" % DN_DM) + topology_st.standalone.simple_bind_s(DN_DM, PASSWORD) + + # check there is no core + path = topology_st.standalone.config.get_attr_val_utf8('nsslapd-errorlog').replace('errors', '') + log.debug('Looking for a core file in: ' + path) + cores = fnmatch.filter(os.listdir(path), 'core.*') + assert len(cores) == 0 + + topology_st.standalone.log.info( + "\n\n######################### SETUP SUFFIX o=ticket48891.org ######################\n") + + topology_st.standalone.backend.create(MYSUFFIX, {BACKEND_NAME: MYSUFFIXBE}) + topology_st.standalone.mappingtree.create(MYSUFFIX, bename=MYSUFFIXBE) + topology_st.standalone.add_s(Entry((MYSUFFIX, { + 'objectclass': "top domain".split(), + 'dc': RDN_VAL_SUFFIX}))) + + topology_st.standalone.log.info("\n\n######################### Generate Test data ######################\n") + + # add dummy entries on both backends + for cpt in range(MAX_OTHERS): + name = "%s%d" % (OTHER_NAME, cpt) + topology_st.standalone.add_s(Entry(("cn=%s,%s" % (name, SUFFIX), { + 'objectclass': "top person".split(), + 'sn': name, + 'cn': name}))) + for cpt in range(MAX_OTHERS): + name = "%s%d" % (OTHER_NAME, cpt) + topology_st.standalone.add_s(Entry(("cn=%s,%s" % (name, MYSUFFIX), { + 'objectclass': "top person".split(), + 'sn': name, + 'cn': name}))) + + topology_st.standalone.log.info("\n\n######################### SEARCH ALL ######################\n") + topology_st.standalone.log.info("Bind as %s and add the READ/SEARCH SELFDN aci" % DN_DM) + topology_st.standalone.simple_bind_s(DN_DM, PASSWORD) + + entries = topology_st.standalone.search_s(MYSUFFIX, ldap.SCOPE_SUBTREE, SEARCHFILTER) + topology_st.standalone.log.info("Returned %d entries.\n", len(entries)) + + assert MAX_OTHERS == len(entries) + + topology_st.standalone.log.info('%d person entries are successfully created under %s.' % (len(entries), MYSUFFIX)) + topology_st.standalone.stop(timeout=1) + + cores = fnmatch.filter(os.listdir(path), 'core.*') + for core in cores: + core = os.path.join(path, core) + topology_st.standalone.log.info('cores are %s' % core) + assert not os.path.isfile(core) + + log.info('Testcase PASSED') + + +if __name__ == '__main__': + # Run isolated + # -s for DEBUG mode + CURRENT_FILE = os.path.realpath(__file__) + pytest.main("-s %s" % CURRENT_FILE) diff --git a/dirsrvtests/tests/tickets/ticket48893_test.py b/dirsrvtests/tests/tickets/ticket48893_test.py new file mode 100644 index 0000000..f31e26a --- /dev/null +++ b/dirsrvtests/tests/tickets/ticket48893_test.py @@ -0,0 +1,61 @@ +# --- BEGIN COPYRIGHT BLOCK --- +# Copyright (C) 2022 Red Hat, Inc. +# All rights reserved. +# +# License: GPL (version 3 or any later version). +# See LICENSE for details. +# --- END COPYRIGHT BLOCK --- +# +import pytest +from lib389.utils import * +from lib389.topologies import topology_st + +from lib389._constants import DEFAULT_SUFFIX, HOST_STANDALONE, PORT_STANDALONE + +pytestmark = pytest.mark.tier2 + +DEBUGGING = os.getenv('DEBUGGING', False) + +if DEBUGGING: + logging.getLogger(__name__).setLevel(logging.DEBUG) +else: + logging.getLogger(__name__).setLevel(logging.INFO) + +log = logging.getLogger(__name__) + + +def _attr_present(conn): + results = conn.search_s('cn=config', ldap.SCOPE_SUBTREE, '(objectClass=*)') + if DEBUGGING: + print(results) + if len(results) > 0: + return True + return False + + +def test_ticket48893(topology_st): + """ + Test that anonymous has NO VIEW to cn=config + """ + + if DEBUGGING: + # Add debugging steps(if any)... + pass + + # Do an anonymous bind + conn = ldap.initialize("ldap://%s:%s" % (HOST_STANDALONE, PORT_STANDALONE)) + conn.simple_bind_s() + + # Make sure that we cannot see what's in cn=config as anonymous + assert (not _attr_present(conn)) + + conn.unbind_s() + + log.info('Test PASSED') + + +if __name__ == '__main__': + # Run isolated + # -s for DEBUG mode + CURRENT_FILE = os.path.realpath(__file__) + pytest.main("-s %s" % CURRENT_FILE) diff --git a/dirsrvtests/tests/tickets/ticket48896_test.py b/dirsrvtests/tests/tickets/ticket48896_test.py new file mode 100644 index 0000000..a189758 --- /dev/null +++ b/dirsrvtests/tests/tickets/ticket48896_test.py @@ -0,0 +1,139 @@ +# --- BEGIN COPYRIGHT BLOCK --- +# Copyright (C) 2016 Red Hat, Inc. +# All rights reserved. +# +# License: GPL (version 3 or any later version). +# See LICENSE for details. +# --- END COPYRIGHT BLOCK --- +# +import pytest +from lib389.tasks import * +from lib389.utils import * +from lib389.topologies import topology_st + +from lib389._constants import DEFAULT_SUFFIX, DN_DM, PASSWORD + +# Skip on older versions +pytestmark = [pytest.mark.tier2, + pytest.mark.skipif(ds_is_older('1.3.6'), reason="Not implemented")] + +logging.getLogger(__name__).setLevel(logging.DEBUG) +log = logging.getLogger(__name__) + +CONFIG_DN = 'cn=config' +UID = 'buser123' +TESTDN = 'uid=%s,' % UID + DEFAULT_SUFFIX + + +def check_attr_val(topology_st, dn, attr, expected): + try: + centry = topology_st.standalone.search_s(dn, ldap.SCOPE_BASE, 'cn=*') + if centry: + val = centry[0].getValue(attr) + if val == expected: + log.info('Default value of %s is %s' % (attr, expected)) + else: + log.info('Default value of %s is not %s, but %s' % (attr, expected, val)) + assert False + else: + log.fatal('Failed to get %s' % dn) + assert False + except ldap.LDAPError as e: + log.fatal('Failed to search ' + dn + ': ' + e.message['desc']) + assert False + + +def replace_pw(server, curpw, newpw, expstr, rc): + log.info('Binding as {%s, %s}' % (TESTDN, curpw)) + server.simple_bind_s(TESTDN, curpw) + + hit = 0 + log.info('Replacing password: %s -> %s, which should %s' % (curpw, newpw, expstr)) + try: + server.modify_s(TESTDN, [(ldap.MOD_REPLACE, 'userPassword', ensure_bytes(newpw))]) + except Exception as e: + log.info("Exception (expected): %s" % type(e).__name__) + hit = 1 + assert isinstance(e, rc) + + if (0 != rc) and (0 == hit): + log.info('Expected to fail with %s, but passed' % rc.__name__) + assert False + + log.info('PASSED') + + +def test_ticket48896(topology_st): + """ + """ + log.info('Testing Ticket 48896 - Default Setting for passwordMinTokenLength does not work') + + log.info("Setting global password policy with password syntax.") + topology_st.standalone.simple_bind_s(DN_DM, PASSWORD) + topology_st.standalone.modify_s(CONFIG_DN, [(ldap.MOD_REPLACE, 'passwordCheckSyntax', b'on'), + (ldap.MOD_REPLACE, 'nsslapd-pwpolicy-local', b'on')]) + + config = topology_st.standalone.search_s(CONFIG_DN, ldap.SCOPE_BASE, 'cn=*') + mintokenlen = config[0].getValue('passwordMinTokenLength') + history = config[0].getValue('passwordInHistory') + + log.info('Default passwordMinTokenLength == %s' % mintokenlen) + log.info('Default passwordInHistory == %s' % history) + + log.info('Adding a user.') + curpw = 'password' + topology_st.standalone.add_s(Entry((TESTDN, + {'objectclass': "top person organizationalPerson inetOrgPerson".split(), + 'cn': 'test user', + 'sn': 'user', + 'userPassword': curpw}))) + + newpw = 'Abcd012+' + exp = 'be ok' + rc = 0 + replace_pw(topology_st.standalone, curpw, newpw, exp, rc) + + curpw = 'Abcd012+' + newpw = 'user' + exp = 'fail' + rc = ldap.CONSTRAINT_VIOLATION + replace_pw(topology_st.standalone, curpw, newpw, exp, rc) + + curpw = 'Abcd012+' + newpw = UID + exp = 'fail' + rc = ldap.CONSTRAINT_VIOLATION + replace_pw(topology_st.standalone, curpw, newpw, exp, rc) + + curpw = 'Abcd012+' + newpw = 'Tuse!1234' + exp = 'fail' + rc = ldap.CONSTRAINT_VIOLATION + replace_pw(topology_st.standalone, curpw, newpw, exp, rc) + + curpw = 'Abcd012+' + newpw = 'Tuse!0987' + exp = 'fail' + rc = ldap.CONSTRAINT_VIOLATION + replace_pw(topology_st.standalone, curpw, newpw, exp, rc) + + curpw = 'Abcd012+' + newpw = 'Tabc!1234' + exp = 'fail' + rc = ldap.CONSTRAINT_VIOLATION + replace_pw(topology_st.standalone, curpw, newpw, exp, rc) + + curpw = 'Abcd012+' + newpw = 'Direc+ory389' + exp = 'be ok' + rc = 0 + replace_pw(topology_st.standalone, curpw, newpw, exp, rc) + + log.info('SUCCESS') + + +if __name__ == '__main__': + # Run isolated + # -s for DEBUG mode + CURRENT_FILE = os.path.realpath(__file__) + pytest.main("-s %s" % CURRENT_FILE) diff --git a/dirsrvtests/tests/tickets/ticket48906_test.py b/dirsrvtests/tests/tickets/ticket48906_test.py new file mode 100644 index 0000000..9a20c1d --- /dev/null +++ b/dirsrvtests/tests/tickets/ticket48906_test.py @@ -0,0 +1,302 @@ +# --- BEGIN COPYRIGHT BLOCK --- +# Copyright (C) 2016 Red Hat, Inc. +# All rights reserved. +# +# License: GPL (version 3 or any later version). +# See LICENSE for details. +# --- END COPYRIGHT BLOCK --- +# +import fnmatch +import logging +import shutil + +import pytest +from lib389.tasks import * +from lib389.topologies import topology_st +from lib389.utils import * + +from lib389._constants import DEFAULT_SUFFIX, DN_LDBM, DN_DM, PASSWORD, SUFFIX + +# Skip on older versions +pytestmark = [pytest.mark.tier2, + pytest.mark.skipif(ds_is_older('1.3.6'), reason="Not implemented")] + +log = logging.getLogger(__name__) + +CONFIG_DN = 'cn=config' +RDN_VAL_SUFFIX = 'ticket48906.org' +MYSUFFIX = 'dc=%s' % RDN_VAL_SUFFIX +MYSUFFIXBE = 'ticket48906' + +SEARCHFILTER = '(objectclass=person)' + +OTHER_NAME = 'other_entry' +MAX_OTHERS = 10 +DBLOCK_DEFAULT = "10000" +DBLOCK_LDAP_UPDATE = "20000" +DBLOCK_EDIT_UPDATE = "40000" +DBLOCK_MIN_UPDATE = DBLOCK_DEFAULT +DBLOCK_ATTR_CONFIG = "nsslapd-db-locks" +DBLOCK_ATTR_MONITOR = "nsslapd-db-configured-locks" +DBLOCK_ATTR_GUARDIAN = "locks" + +DBCACHE_LDAP_UPDATE = "20000000" +DBCACHE_EDIT_UPDATE = "40000000" +DBCACHE_ATTR_CONFIG = "nsslapd-dbcachesize" +DBCACHE_ATTR_GUARDIAN = "cachesize" + +ldbm_config = "cn=config,%s" % (DN_LDBM) +bdb_ldbm_config = "cn=bdb,cn=config,%s" % (DN_LDBM) +ldbm_monitor = "cn=database,cn=monitor,%s" % (DN_LDBM) + + +def test_ticket48906_setup(topology_st): + """ + Check there is no core + Create a second backend + stop DS (that should trigger the core) + check there is no core + """ + log.info('Testing Ticket 48906 - ns-slapd crashes during the shutdown after adding attribute with a matching rule') + + # bind as directory manager + topology_st.standalone.log.info("Bind as %s" % DN_DM) + topology_st.standalone.simple_bind_s(DN_DM, PASSWORD) + + # check there is no core + entry = topology_st.standalone.search_s(CONFIG_DN, ldap.SCOPE_BASE, "(cn=config)", ['nsslapd-workingdir']) + assert entry + assert entry[0] + assert entry[0].hasAttr('nsslapd-workingdir') + path = entry[0].getValue('nsslapd-workingdir') + cores = fnmatch.filter(os.listdir(path), b'core.*') + assert len(cores) == 0 + + # add dummy entries on backend + for cpt in range(MAX_OTHERS): + name = "%s%d" % (OTHER_NAME, cpt) + topology_st.standalone.add_s(Entry(("cn=%s,%s" % (name, SUFFIX), { + 'objectclass': "top person".split(), + 'sn': name, + 'cn': name}))) + + topology_st.standalone.log.info("\n\n######################### SEARCH ALL ######################\n") + topology_st.standalone.log.info("Bind as %s and add the READ/SEARCH SELFDN aci" % DN_DM) + topology_st.standalone.simple_bind_s(DN_DM, PASSWORD) + + entries = topology_st.standalone.search_s(SUFFIX, ldap.SCOPE_SUBTREE, SEARCHFILTER) + topology_st.standalone.log.info("Returned %d entries.\n", len(entries)) + + assert MAX_OTHERS == len(entries) + + topology_st.standalone.log.info('%d person entries are successfully created under %s.' % (len(entries), SUFFIX)) + + +def _check_configured_value(topology_st, attr=DBLOCK_ATTR_CONFIG, expected_value=None, required=False): + entries = topology_st.standalone.search_s(bdb_ldbm_config, ldap.SCOPE_BASE, 'cn=bdb') + if required: + assert (entries[0].hasValue(attr)) + elif entries[0].hasValue(attr): + assert (entries[0].getValue(attr) == ensure_bytes(expected_value)) + + +def _check_monitored_value(topology_st, expected_value): + entries = topology_st.standalone.search_s(ldbm_monitor, ldap.SCOPE_BASE, '(objectclass=*)') + assert (entries[0].hasValue(DBLOCK_ATTR_MONITOR) and entries[0].getValue(DBLOCK_ATTR_MONITOR) == ensure_bytes(expected_value)) + + +def _check_dse_ldif_value(topology_st, attr=DBLOCK_ATTR_CONFIG, expected_value=DBLOCK_LDAP_UPDATE): + dse_ref_ldif = topology_st.standalone.confdir + '/dse.ldif' + dse_ref = open(dse_ref_ldif, "r") + + # Check the DBLOCK in dse.ldif + value = None + while True: + line = dse_ref.readline() + if (line == ''): + break + elif attr in line.lower(): + value = line.split()[1] + assert (value == expected_value) + break + assert (value) + + +def _check_guardian_value(topology_st, attr=DBLOCK_ATTR_CONFIG, expected_value=None): + guardian_file = os.path.join(topology_st.standalone.dbdir, 'guardian') + assert (os.path.exists(guardian_file)) + guardian = open(guardian_file, "r") + + value = None + while True: + line = guardian.readline() + if (line == ''): + break + elif attr in line.lower(): + value = line.split(':')[1].replace("\n", "") + print("line") + print(line) + print("expected_value") + print(expected_value) + print("value") + print(value) + assert (str(value) == str(expected_value)) + break + assert (value) + + +def test_ticket48906_dblock_default(topology_st): + topology_st.standalone.log.info('###################################') + topology_st.standalone.log.info('###') + topology_st.standalone.log.info('### Check that before any change config/monitor') + topology_st.standalone.log.info('### contains the default value') + topology_st.standalone.log.info('###') + topology_st.standalone.log.info('###################################') + _check_monitored_value(topology_st, DBLOCK_DEFAULT) + _check_configured_value(topology_st, attr=DBLOCK_ATTR_CONFIG, expected_value=DBLOCK_DEFAULT, required=False) + + +def test_ticket48906_dblock_ldap_update(topology_st): + topology_st.standalone.log.info('###################################') + topology_st.standalone.log.info('###') + topology_st.standalone.log.info('### Check that after ldap update') + topology_st.standalone.log.info('### - monitor contains DEFAULT') + topology_st.standalone.log.info('### - configured contains DBLOCK_LDAP_UPDATE') + topology_st.standalone.log.info('### - After stop dse.ldif contains DBLOCK_LDAP_UPDATE') + topology_st.standalone.log.info('### - After stop guardian contains DEFAULT') + topology_st.standalone.log.info('### In fact guardian should differ from config to recreate the env') + topology_st.standalone.log.info('### Check that after restart (DBenv recreated)') + topology_st.standalone.log.info('### - monitor contains DBLOCK_LDAP_UPDATE ') + topology_st.standalone.log.info('### - configured contains DBLOCK_LDAP_UPDATE') + topology_st.standalone.log.info('### - dse.ldif contains DBLOCK_LDAP_UPDATE') + topology_st.standalone.log.info('###') + topology_st.standalone.log.info('###################################') + + topology_st.standalone.modify_s(ldbm_config, [(ldap.MOD_REPLACE, DBLOCK_ATTR_CONFIG, ensure_bytes(DBLOCK_LDAP_UPDATE))]) + _check_monitored_value(topology_st, DBLOCK_DEFAULT) + _check_configured_value(topology_st, attr=DBLOCK_ATTR_CONFIG, expected_value=DBLOCK_LDAP_UPDATE, required=True) + + topology_st.standalone.stop(timeout=10) + _check_dse_ldif_value(topology_st, attr=DBLOCK_ATTR_CONFIG, expected_value=DBLOCK_LDAP_UPDATE) + _check_guardian_value(topology_st, attr=DBLOCK_ATTR_GUARDIAN, expected_value=DBLOCK_DEFAULT) + + # Check that the value is the same after restart and recreate + topology_st.standalone.start(timeout=10) + _check_monitored_value(topology_st, DBLOCK_LDAP_UPDATE) + _check_configured_value(topology_st, attr=DBLOCK_ATTR_CONFIG, expected_value=DBLOCK_LDAP_UPDATE, required=True) + _check_dse_ldif_value(topology_st, attr=DBLOCK_ATTR_CONFIG, expected_value=DBLOCK_LDAP_UPDATE) + + +def test_ticket48906_dblock_edit_update(topology_st): + topology_st.standalone.log.info('###################################') + topology_st.standalone.log.info('###') + topology_st.standalone.log.info('### Check that after stop') + topology_st.standalone.log.info('### - dse.ldif contains DBLOCK_LDAP_UPDATE') + topology_st.standalone.log.info('### - guardian contains DBLOCK_LDAP_UPDATE') + topology_st.standalone.log.info('### Check that edit dse+restart') + topology_st.standalone.log.info('### - monitor contains DBLOCK_EDIT_UPDATE') + topology_st.standalone.log.info('### - configured contains DBLOCK_EDIT_UPDATE') + topology_st.standalone.log.info('### Check that after stop') + topology_st.standalone.log.info('### - dse.ldif contains DBLOCK_EDIT_UPDATE') + topology_st.standalone.log.info('### - guardian contains DBLOCK_EDIT_UPDATE') + topology_st.standalone.log.info('###') + topology_st.standalone.log.info('###################################') + + topology_st.standalone.stop(timeout=10) + _check_dse_ldif_value(topology_st, attr=DBLOCK_ATTR_CONFIG, expected_value=DBLOCK_LDAP_UPDATE) + _check_guardian_value(topology_st, attr=DBLOCK_ATTR_GUARDIAN, expected_value=DBLOCK_LDAP_UPDATE) + + dse_ref_ldif = topology_st.standalone.confdir + '/dse.ldif' + dse_new_ldif = topology_st.standalone.confdir + '/dse.ldif.new' + dse_ref = open(dse_ref_ldif, "r") + dse_new = open(dse_new_ldif, "w") + + # Change the DBLOCK in dse.ldif + value = None + while True: + line = dse_ref.readline() + if (line == ''): + break + elif DBLOCK_ATTR_CONFIG in line.lower(): + value = line.split()[1] + assert (value == DBLOCK_LDAP_UPDATE) + new_value = [line.split()[0], DBLOCK_EDIT_UPDATE, ] + new_line = "%s\n" % " ".join(new_value) + else: + new_line = line + dse_new.write(new_line) + + assert (value) + dse_ref.close() + dse_new.close() + shutil.move(dse_new_ldif, dse_ref_ldif) + + # Check that the value is the same after restart + topology_st.standalone.start(timeout=10) + _check_monitored_value(topology_st, DBLOCK_EDIT_UPDATE) + _check_configured_value(topology_st, attr=DBLOCK_ATTR_CONFIG, expected_value=DBLOCK_EDIT_UPDATE, required=True) + + topology_st.standalone.stop(timeout=10) + _check_dse_ldif_value(topology_st, attr=DBLOCK_ATTR_CONFIG, expected_value=DBLOCK_EDIT_UPDATE) + _check_guardian_value(topology_st, attr=DBLOCK_ATTR_GUARDIAN, expected_value=DBLOCK_EDIT_UPDATE) + + +def test_ticket48906_dblock_robust(topology_st): + topology_st.standalone.log.info('###################################') + topology_st.standalone.log.info('###') + topology_st.standalone.log.info('### Check that the following values are rejected') + topology_st.standalone.log.info('### - negative value') + topology_st.standalone.log.info('### - insuffisant value') + topology_st.standalone.log.info('### - invalid value') + topology_st.standalone.log.info('### Check that minimum value is accepted') + topology_st.standalone.log.info('###') + topology_st.standalone.log.info('###################################') + + topology_st.standalone.start(timeout=10) + _check_monitored_value(topology_st, DBLOCK_EDIT_UPDATE) + _check_configured_value(topology_st, attr=DBLOCK_ATTR_CONFIG, expected_value=DBLOCK_EDIT_UPDATE, required=True) + + # Check negative value + try: + topology_st.standalone.modify_s(ldbm_config, [(ldap.MOD_REPLACE, DBLOCK_ATTR_CONFIG, b"-1")]) + except ldap.UNWILLING_TO_PERFORM: + pass + _check_monitored_value(topology_st, DBLOCK_EDIT_UPDATE) + _check_configured_value(topology_st, attr=DBLOCK_ATTR_CONFIG, expected_value=DBLOCK_LDAP_UPDATE, required=True) + + # Check insuffisant value + too_small = int(DBLOCK_MIN_UPDATE) - 1 + try: + topology_st.standalone.modify_s(ldbm_config, [(ldap.MOD_REPLACE, DBLOCK_ATTR_CONFIG, ensure_bytes(str(too_small)))]) + except ldap.UNWILLING_TO_PERFORM: + pass + _check_monitored_value(topology_st, DBLOCK_EDIT_UPDATE) + _check_configured_value(topology_st, attr=DBLOCK_ATTR_CONFIG, expected_value=DBLOCK_LDAP_UPDATE, required=True) + + # Check invalid value + try: + topology_st.standalone.modify_s(ldbm_config, [(ldap.MOD_REPLACE, DBLOCK_ATTR_CONFIG, b"dummy")]) + except ldap.UNWILLING_TO_PERFORM: + pass + _check_monitored_value(topology_st, DBLOCK_EDIT_UPDATE) + _check_configured_value(topology_st, attr=DBLOCK_ATTR_CONFIG, expected_value=DBLOCK_LDAP_UPDATE, required=True) + + # now check the minimal value + topology_st.standalone.modify_s(ldbm_config, [(ldap.MOD_REPLACE, DBLOCK_ATTR_CONFIG, ensure_bytes(DBLOCK_MIN_UPDATE))]) + _check_monitored_value(topology_st, DBLOCK_EDIT_UPDATE) + _check_configured_value(topology_st, attr=DBLOCK_ATTR_CONFIG, expected_value=DBLOCK_MIN_UPDATE, required=True) + + topology_st.standalone.stop(timeout=10) + _check_dse_ldif_value(topology_st, attr=DBLOCK_ATTR_CONFIG, expected_value=DBLOCK_MIN_UPDATE) + _check_guardian_value(topology_st, attr=DBLOCK_ATTR_GUARDIAN, expected_value=DBLOCK_EDIT_UPDATE) + + topology_st.standalone.start(timeout=10) + _check_monitored_value(topology_st, DBLOCK_MIN_UPDATE) + _check_configured_value(topology_st, attr=DBLOCK_ATTR_CONFIG, expected_value=DBLOCK_MIN_UPDATE, required=True) + + +if __name__ == '__main__': + # Run isolated + # -s for DEBUG mode + CURRENT_FILE = os.path.realpath(__file__) + pytest.main("-s %s" % CURRENT_FILE) diff --git a/dirsrvtests/tests/tickets/ticket48916_test.py b/dirsrvtests/tests/tickets/ticket48916_test.py new file mode 100644 index 0000000..b36305c --- /dev/null +++ b/dirsrvtests/tests/tickets/ticket48916_test.py @@ -0,0 +1,143 @@ +# --- BEGIN COPYRIGHT BLOCK --- +# Copyright (C) 2022 Red Hat, Inc. +# All rights reserved. +# +# License: GPL (version 3 or any later version). +# See LICENSE for details. +# --- END COPYRIGHT BLOCK --- +# +import pytest +from lib389.tasks import * +from lib389.utils import * +from lib389.topologies import topology_m2 + +DEBUGGING = os.getenv('DEBUGGING', False) + +if DEBUGGING: + logging.getLogger(__name__).setLevel(logging.DEBUG) +else: + logging.getLogger(__name__).setLevel(logging.INFO) + +log = logging.getLogger(__name__) +# Skip on older versions +pytestmark = [pytest.mark.tier2, + pytest.mark.skipif(ds_is_older('1.3.5'), reason="Not implemented")] + + + +def _create_user(inst, idnum): + inst.add_s(Entry( + ('uid=user%s,ou=People,%s' % (idnum, DEFAULT_SUFFIX), { + 'objectClass': 'top account posixAccount'.split(' '), + 'cn': 'user', + 'uid': 'user%s' % idnum, + 'homeDirectory': '/home/user%s' % idnum, + 'loginShell': '/bin/nologin', + 'gidNumber': '-1', + 'uidNumber': '-1', + }) + )) + + +def test_ticket48916(topology_m2): + """ + https://bugzilla.redhat.com/show_bug.cgi?id=1353629 + + This is an issue with ID exhaustion in DNA causing a crash. + + To access each DirSrv instance use: topology_m2.ms["supplier1"], topology_m2.ms["supplier2"], + ..., topology_m2.hub1, ..., topology_m2.consumer1,... + + + """ + + if DEBUGGING: + # Add debugging steps(if any)... + pass + + # Enable the plugin on both servers + + dna_m1 = topology_m2.ms["supplier1"].plugins.get('Distributed Numeric Assignment Plugin') + dna_m2 = topology_m2.ms["supplier2"].plugins.get('Distributed Numeric Assignment Plugin') + + # Configure it + # Create the container for the ranges to go into. + + topology_m2.ms["supplier1"].add_s(Entry( + ('ou=Ranges,%s' % DEFAULT_SUFFIX, { + 'objectClass': 'top organizationalUnit'.split(' '), + 'ou': 'Ranges', + }) + )) + + # Create the dnaAdmin? + + # For now we just pinch the dn from the dna_m* types, and add the relevant child config + # but in the future, this could be a better plugin template type from lib389 + + config_dn = dna_m1.dn + + topology_m2.ms["supplier1"].add_s(Entry( + ('cn=uids,%s' % config_dn, { + 'objectClass': 'top dnaPluginConfig'.split(' '), + 'cn': 'uids', + 'dnatype': 'uidNumber gidNumber'.split(' '), + 'dnafilter': '(objectclass=posixAccount)', + 'dnascope': '%s' % DEFAULT_SUFFIX, + 'dnaNextValue': '1', + 'dnaMaxValue': '50', + 'dnasharedcfgdn': 'ou=Ranges,%s' % DEFAULT_SUFFIX, + 'dnaThreshold': '0', + 'dnaRangeRequestTimeout': '60', + 'dnaMagicRegen': '-1', + 'dnaRemoteBindDN': 'uid=dnaAdmin,ou=People,%s' % DEFAULT_SUFFIX, + 'dnaRemoteBindCred': 'secret123', + 'dnaNextRange': '80-90' + }) + )) + + topology_m2.ms["supplier2"].add_s(Entry( + ('cn=uids,%s' % config_dn, { + 'objectClass': 'top dnaPluginConfig'.split(' '), + 'cn': 'uids', + 'dnatype': 'uidNumber gidNumber'.split(' '), + 'dnafilter': '(objectclass=posixAccount)', + 'dnascope': '%s' % DEFAULT_SUFFIX, + 'dnaNextValue': '61', + 'dnaMaxValue': '70', + 'dnasharedcfgdn': 'ou=Ranges,%s' % DEFAULT_SUFFIX, + 'dnaThreshold': '2', + 'dnaRangeRequestTimeout': '60', + 'dnaMagicRegen': '-1', + 'dnaRemoteBindDN': 'uid=dnaAdmin,ou=People,%s' % DEFAULT_SUFFIX, + 'dnaRemoteBindCred': 'secret123', + }) + )) + + # Enable the plugins + dna_m1.enable() + dna_m2.enable() + + # Restart the instances + topology_m2.ms["supplier1"].restart(60) + topology_m2.ms["supplier2"].restart(60) + + # Wait for a replication ..... + time.sleep(40) + + # Allocate the 10 members to exhaust + + for i in range(1, 11): + _create_user(topology_m2.ms["supplier2"], i) + + # Allocate the 11th + _create_user(topology_m2.ms["supplier2"], 11) + + log.info('Test PASSED') + + +if __name__ == '__main__': + # Run isolated + # -s for DEBUG mode + CURRENT_FILE = os.path.realpath(__file__) + pytest.main("-s %s" % CURRENT_FILE) diff --git a/dirsrvtests/tests/tickets/ticket48944_test.py b/dirsrvtests/tests/tickets/ticket48944_test.py new file mode 100644 index 0000000..862f842 --- /dev/null +++ b/dirsrvtests/tests/tickets/ticket48944_test.py @@ -0,0 +1,211 @@ +# --- BEGIN COPYRIGHT BLOCK --- +# Copyright (C) 2017 Red Hat, Inc. +# All rights reserved. +# +# License: GPL (version 3 or any later version). +# See LICENSE for details. +# --- END COPYRIGHT BLOCK --- +# +import pytest +from lib389.tasks import * +from lib389.utils import * +from lib389.topologies import topology_m2c2 as topo + +from lib389._constants import (PLUGIN_ACCT_POLICY, DN_PLUGIN, DN_CONFIG, DN_DM, PASSWORD, + DEFAULT_SUFFIX, SUFFIX) + +pytestmark = pytest.mark.tier2 + +DEBUGGING = os.getenv("DEBUGGING", default=False) +if DEBUGGING: + logging.getLogger(__name__).setLevel(logging.DEBUG) +else: + logging.getLogger(__name__).setLevel(logging.INFO) +log = logging.getLogger(__name__) + +ACCPOL_DN = "cn={},{}".format(PLUGIN_ACCT_POLICY, DN_PLUGIN) +ACCP_CONF = "{},{}".format(DN_CONFIG, ACCPOL_DN) +USER_PW = 'Secret123' + + +def _last_login_time(topo, userdn, inst_name, last_login): + """Find lastLoginTime attribute value for a given supplier/consumer""" + + if 'supplier' in inst_name: + if (last_login == 'bind_n_check'): + topo.ms[inst_name].simple_bind_s(userdn, USER_PW) + topo.ms[inst_name].simple_bind_s(DN_DM, PASSWORD) + entry = topo.ms[inst_name].search_s(userdn, ldap.SCOPE_BASE, 'objectClass=*', ['lastLoginTime']) + else: + if (last_login == 'bind_n_check'): + topo.cs[inst_name].simple_bind_s(userdn, USER_PW) + topo.cs[inst_name].simple_bind_s(DN_DM, PASSWORD) + entry = topo.cs[inst_name].search_s(userdn, ldap.SCOPE_BASE, 'objectClass=*', ['lastLoginTime']) + lastLogin = entry[0].lastLoginTime + time.sleep(1) + return lastLogin + + +def _enable_plugin(topo, inst_name): + """Enable account policy plugin and configure required attributes""" + + log.info('Enable account policy plugin and configure required attributes') + if 'supplier' in inst_name: + log.info('Configure Account policy plugin on {}'.format(inst_name)) + topo.ms[inst_name].simple_bind_s(DN_DM, PASSWORD) + try: + topo.ms[inst_name].plugins.enable(name=PLUGIN_ACCT_POLICY) + topo.ms[inst_name].modify_s(ACCPOL_DN, [(ldap.MOD_REPLACE, 'nsslapd-pluginarg0', ensure_bytes(ACCP_CONF))]) + topo.ms[inst_name].modify_s(ACCP_CONF, [(ldap.MOD_REPLACE, 'alwaysrecordlogin', b'yes')]) + topo.ms[inst_name].modify_s(ACCP_CONF, [(ldap.MOD_REPLACE, 'stateattrname', b'lastLoginTime')]) + topo.ms[inst_name].modify_s(ACCP_CONF, [(ldap.MOD_REPLACE, 'altstateattrname', b'createTimestamp')]) + topo.ms[inst_name].modify_s(ACCP_CONF, [(ldap.MOD_REPLACE, 'specattrname', b'acctPolicySubentry')]) + topo.ms[inst_name].modify_s(ACCP_CONF, [(ldap.MOD_REPLACE, 'limitattrname', b'accountInactivityLimit')]) + topo.ms[inst_name].modify_s(ACCP_CONF, [(ldap.MOD_REPLACE, 'accountInactivityLimit', b'3600')]) + except ldap.LDAPError as e: + log.error('Failed to configure {} plugin for inst-{} error: {}'.format(PLUGIN_ACCT_POLICY, inst_name, str(e))) + topo.ms[inst_name].restart(timeout=10) + else: + log.info('Configure Account policy plugin on {}'.format(inst_name)) + topo.cs[inst_name].simple_bind_s(DN_DM, PASSWORD) + try: + topo.cs[inst_name].plugins.enable(name=PLUGIN_ACCT_POLICY) + topo.cs[inst_name].modify_s(ACCPOL_DN, [(ldap.MOD_REPLACE, 'nsslapd-pluginarg0', ensure_bytes(ACCP_CONF))]) + topo.cs[inst_name].modify_s(ACCP_CONF, [(ldap.MOD_REPLACE, 'alwaysrecordlogin', b'yes')]) + topo.cs[inst_name].modify_s(ACCP_CONF, [(ldap.MOD_REPLACE, 'stateattrname', b'lastLoginTime')]) + topo.cs[inst_name].modify_s(ACCP_CONF, [(ldap.MOD_REPLACE, 'altstateattrname', b'createTimestamp')]) + topo.cs[inst_name].modify_s(ACCP_CONF, [(ldap.MOD_REPLACE, 'specattrname', b'acctPolicySubentry')]) + topo.cs[inst_name].modify_s(ACCP_CONF, [(ldap.MOD_REPLACE, 'limitattrname', b'accountInactivityLimit')]) + topo.cs[inst_name].modify_s(ACCP_CONF, [(ldap.MOD_REPLACE, 'accountInactivityLimit', b'3600')]) + except ldap.LDAPError as e: + log.error('Failed to configure {} plugin for inst-{} error {}'.format(PLUGIN_ACCT_POLICY, inst_name, str(e))) + topo.cs[inst_name].restart(timeout=10) + + +def test_ticket48944(topo): + """On a read only replica invalid state info can accumulate + + :id: 833be131-f3bf-493e-97c6-3121438a07b1 + :feature: Account Policy Plugin + :setup: Two supplier and two consumer setup + :steps: 1. Configure Account policy plugin with alwaysrecordlogin set to yes + 2. Check if entries are synced across suppliers and consumers + 3. Stop all suppliers and consumers + 4. Start supplier1 and bind as user1 to create lastLoginTime attribute + 5. Start supplier2 and wait for the sync of lastLoginTime attribute + 6. Stop supplier1 and bind as user1 from supplier2 + 7. Check if lastLoginTime attribute is updated and greater than supplier1 + 8. Stop supplier2, start consumer1, consumer2 and then supplier2 + 9. Check if lastLoginTime attribute is updated on both consumers + 10. Bind as user1 to both consumers and check the value is updated + 11. Check if lastLoginTime attribute is not updated from consumers + 12. Start supplier1 and make sure the lastLoginTime attribute is not updated on consumers + 13. Bind as user1 from supplier1 and check if all suppliers and consumers have the same value + 14. Check error logs of consumers for "deletedattribute;deleted" message + :expectedresults: No accumulation of replica invalid state info on consumers + """ + + log.info("Ticket 48944 - On a read only replica invalid state info can accumulate") + user_name = 'newbzusr' + tuserdn = 'uid={}1,ou=people,{}'.format(user_name, SUFFIX) + inst_list = ['supplier1', 'supplier2', 'consumer1', 'consumer2'] + for inst_name in inst_list: + _enable_plugin(topo, inst_name) + + log.info('Sleep for 10secs for the server to come up') + time.sleep(10) + log.info('Add few entries to server and check if entries are replicated') + for nos in range(10): + userdn = 'uid={}{},ou=people,{}'.format(user_name, nos, SUFFIX) + try: + topo.ms['supplier1'].add_s(Entry((userdn, { + 'objectclass': 'top person'.split(), + 'objectclass': 'inetorgperson', + 'cn': user_name, + 'sn': user_name, + 'userpassword': USER_PW, + 'mail': '{}@redhat.com'.format(user_name)}))) + except ldap.LDAPError as e: + log.error('Failed to add {} user: error {}'.format(userdn, e.message['desc'])) + raise e + + log.info('Checking if entries are synced across suppliers and consumers') + entries_m1 = topo.ms['supplier1'].search_s(SUFFIX, ldap.SCOPE_SUBTREE, 'uid={}*'.format(user_name), ['uid=*']) + exp_entries = str(entries_m1).count('dn: uid={}*'.format(user_name)) + entries_m2 = topo.ms['supplier2'].search_s(SUFFIX, ldap.SCOPE_SUBTREE, 'uid={}*'.format(user_name), ['uid=*']) + act_entries = str(entries_m2).count('dn: uid={}*'.format(user_name)) + assert act_entries == exp_entries + inst_list = ['consumer1', 'consumer2'] + for inst in inst_list: + entries_other = topo.cs[inst].search_s(SUFFIX, ldap.SCOPE_SUBTREE, 'uid={}*'.format(user_name), ['uid=*']) + act_entries = str(entries_other).count('dn: uid={}*'.format(user_name)) + assert act_entries == exp_entries + + topo.ms['supplier2'].stop(timeout=10) + topo.ms['supplier1'].stop(timeout=10) + topo.cs['consumer1'].stop(timeout=10) + topo.cs['consumer2'].stop(timeout=10) + + topo.ms['supplier1'].start(timeout=10) + lastLogin_m1_1 = _last_login_time(topo, tuserdn, 'supplier1', 'bind_n_check') + + log.info('Start supplier2 to sync lastLoginTime attribute from supplier1') + topo.ms['supplier2'].start(timeout=10) + time.sleep(5) + log.info('Stop supplier1') + topo.ms['supplier1'].stop(timeout=10) + log.info('Bind as user1 to supplier2 and check if lastLoginTime attribute is greater than supplier1') + lastLogin_m2_1 = _last_login_time(topo, tuserdn, 'supplier2', 'bind_n_check') + assert lastLogin_m2_1 > lastLogin_m1_1 + + log.info('Start all servers except supplier1') + topo.ms['supplier2'].stop(timeout=10) + topo.cs['consumer1'].start(timeout=10) + topo.cs['consumer2'].start(timeout=10) + topo.ms['supplier2'].start(timeout=10) + time.sleep(10) + log.info('Check if consumers are updated with lastLoginTime attribute value from supplier2') + lastLogin_c1_1 = _last_login_time(topo, tuserdn, 'consumer1', 'check') + assert lastLogin_c1_1 == lastLogin_m2_1 + + lastLogin_c2_1 = _last_login_time(topo, tuserdn, 'consumer2', 'check') + assert lastLogin_c2_1 == lastLogin_m2_1 + + log.info('Check if lastLoginTime update in consumers not synced to supplier2') + lastLogin_c1_2 = _last_login_time(topo, tuserdn, 'consumer1', 'bind_n_check') + assert lastLogin_c1_2 > lastLogin_m2_1 + + lastLogin_c2_2 = _last_login_time(topo, tuserdn, 'consumer2', 'bind_n_check') + assert lastLogin_c2_2 > lastLogin_m2_1 + + time.sleep(10) # Allow replication to kick in + lastLogin_m2_2 = _last_login_time(topo, tuserdn, 'supplier2', 'check') + assert lastLogin_m2_2 == lastLogin_m2_1 + + log.info('Start supplier1 and check if its updating its older lastLoginTime attribute to consumers') + topo.ms['supplier1'].start(timeout=10) + time.sleep(10) + lastLogin_c1_3 = _last_login_time(topo, tuserdn, 'consumer1', 'check') + assert lastLogin_c1_3 == lastLogin_c1_2 + + lastLogin_c2_3 = _last_login_time(topo, tuserdn, 'consumer2', 'check') + assert lastLogin_c2_3 == lastLogin_c2_2 + + log.info('Check if lastLoginTime update from supplier2 is synced to all suppliers and consumers') + lastLogin_m2_3 = _last_login_time(topo, tuserdn, 'supplier2', 'bind_n_check') + time.sleep(10) # Allow replication to kick in + lastLogin_m1_2 = _last_login_time(topo, tuserdn, 'supplier1', 'check') + lastLogin_c1_4 = _last_login_time(topo, tuserdn, 'consumer1', 'check') + lastLogin_c2_4 = _last_login_time(topo, tuserdn, 'consumer2', 'check') + assert lastLogin_m2_3 == lastLogin_m1_2 == lastLogin_c2_4 == lastLogin_c1_4 + + log.info('Checking consumer error logs for replica invalid state info') + assert not topo.cs['consumer2'].ds_error_log.match('.*deletedattribute;deleted.*') + assert not topo.cs['consumer1'].ds_error_log.match('.*deletedattribute;deleted.*') + + +if __name__ == '__main__': + # Run isolated + # -s for DEBUG mode + CURRENT_FILE = os.path.realpath(__file__) + pytest.main("-s %s" % CURRENT_FILE) diff --git a/dirsrvtests/tests/tickets/ticket48956_test.py b/dirsrvtests/tests/tickets/ticket48956_test.py new file mode 100644 index 0000000..53e97d6 --- /dev/null +++ b/dirsrvtests/tests/tickets/ticket48956_test.py @@ -0,0 +1,136 @@ +# --- BEGIN COPYRIGHT BLOCK --- +# Copyright (C) 2022 Red Hat, Inc. +# All rights reserved. +# +# License: GPL (version 3 or any later version). +# See LICENSE for details. +# --- END COPYRIGHT BLOCK --- +# +import pytest +import subprocess +from lib389.tasks import * +from lib389.utils import * +from lib389.topologies import topology_st + +from lib389._constants import (PLUGIN_ACCT_POLICY, DEFAULT_SUFFIX, DN_DM, PASSWORD, SUFFIX, + BACKEND_NAME) + +pytestmark = pytest.mark.tier2 + +DEBUGGING = os.getenv('DEBUGGING', False) + +RDN_LONG_SUFFIX = 'this' +LONG_SUFFIX = "dc=%s,dc=is,dc=a,dc=very,dc=long,dc=suffix,dc=so,dc=long,dc=suffix,dc=extremely,dc=long,dc=suffix" % RDN_LONG_SUFFIX +LONG_SUFFIX_BE = 'ticket48956' + +ACCT_POLICY_PLUGIN_DN = 'cn=%s,cn=plugins,cn=config' % PLUGIN_ACCT_POLICY +ACCT_POLICY_CONFIG_DN = 'cn=config,%s' % ACCT_POLICY_PLUGIN_DN + +INACTIVITY_LIMIT = '9' +SEARCHFILTER = '(objectclass=*)' + +TEST_USER = 'ticket48956user' +TEST_USER_PW = '%s' % TEST_USER + +if DEBUGGING: + logging.getLogger(__name__).setLevel(logging.DEBUG) +else: + logging.getLogger(__name__).setLevel(logging.INFO) + +log = logging.getLogger(__name__) + + +def _check_status(topology_st, user, expected): + nsaccountstatus = os.path.join(topology_st.standalone.ds_paths.sbin_dir, "ns-accountstatus.pl") + + try: + output = subprocess.check_output([nsaccountstatus, '-Z', topology_st.standalone.serverid, + '-D', DN_DM, '-w', PASSWORD, + '-p', str(topology_st.standalone.port), '-I', user]) + except subprocess.CalledProcessError as err: + output = err.output + + log.info("output: %s" % output) + + if expected in output: + return True + return False + + +def _check_inactivity(topology_st, mysuffix): + ACCT_POLICY_DN = 'cn=Account Inactivation Policy,%s' % mysuffix + log.info("\n######################### Adding Account Policy entry: %s ######################\n" % ACCT_POLICY_DN) + topology_st.standalone.add_s( + Entry((ACCT_POLICY_DN, {'objectclass': "top ldapsubentry extensibleObject accountpolicy".split(), + 'accountInactivityLimit': INACTIVITY_LIMIT}))) + time.sleep(1) + + TEST_USER_DN = 'uid=%s,%s' % (TEST_USER, mysuffix) + log.info("\n######################### Adding Test User entry: %s ######################\n" % TEST_USER_DN) + topology_st.standalone.add_s( + Entry((TEST_USER_DN, {'objectclass': "top person organizationalPerson inetOrgPerson".split(), + 'cn': TEST_USER, + 'sn': TEST_USER, + 'givenname': TEST_USER, + 'userPassword': TEST_USER_PW, + 'acctPolicySubentry': ACCT_POLICY_DN}))) + time.sleep(1) + + # Setting the lastLoginTime + try: + topology_st.standalone.simple_bind_s(TEST_USER_DN, TEST_USER_PW) + except ldap.CONSTRAINT_VIOLATION as e: + log.error('CONSTRAINT VIOLATION ' + e.message['desc']) + topology_st.standalone.simple_bind_s(DN_DM, PASSWORD) + + assert (_check_status(topology_st, TEST_USER_DN, b'- activated')) + + time.sleep(int(INACTIVITY_LIMIT) + 5) + assert (_check_status(topology_st, TEST_USER_DN, b'- inactivated (inactivity limit exceeded')) + + +def test_ticket48956(topology_st): + """Write your testcase here... + + Also, if you need any testcase initialization, + please, write additional fixture for that(include finalizer). + + """ + + topology_st.standalone.modify_s(ACCT_POLICY_PLUGIN_DN, + [(ldap.MOD_REPLACE, 'nsslapd-pluginarg0', ensure_bytes(ACCT_POLICY_CONFIG_DN))]) + + topology_st.standalone.modify_s(ACCT_POLICY_CONFIG_DN, [(ldap.MOD_REPLACE, 'alwaysrecordlogin', b'yes'), + (ldap.MOD_REPLACE, 'stateattrname', b'lastLoginTime'), + (ldap.MOD_REPLACE, 'altstateattrname', b'createTimestamp'), + (ldap.MOD_REPLACE, 'specattrname', b'acctPolicySubentry'), + (ldap.MOD_REPLACE, 'limitattrname', + b'accountInactivityLimit')]) + + # Enable the plugins + topology_st.standalone.plugins.enable(name=PLUGIN_ACCT_POLICY) + topology_st.standalone.restart(timeout=10) + + # Check inactivity on standard suffix (short) + _check_inactivity(topology_st, SUFFIX) + + # Check inactivity on a long suffix + topology_st.standalone.backend.create(LONG_SUFFIX, {BACKEND_NAME: LONG_SUFFIX_BE}) + topology_st.standalone.mappingtree.create(LONG_SUFFIX, bename=LONG_SUFFIX_BE) + topology_st.standalone.add_s(Entry((LONG_SUFFIX, { + 'objectclass': "top domain".split(), + 'dc': RDN_LONG_SUFFIX}))) + _check_inactivity(topology_st, LONG_SUFFIX) + + if DEBUGGING: + # Add debugging steps(if any)... + pass + + log.info('Test PASSED') + + +if __name__ == '__main__': + # Run isolated + # -s for DEBUG mode + CURRENT_FILE = os.path.realpath(__file__) + pytest.main("-s %s" % CURRENT_FILE) diff --git a/dirsrvtests/tests/tickets/ticket48973_test.py b/dirsrvtests/tests/tickets/ticket48973_test.py new file mode 100644 index 0000000..2fd7004 --- /dev/null +++ b/dirsrvtests/tests/tickets/ticket48973_test.py @@ -0,0 +1,314 @@ +# --- BEGIN COPYRIGHT BLOCK --- +# Copyright (C) 2022 Red Hat, Inc. +# All rights reserved. +# +# License: GPL (version 3 or any later version). +# See LICENSE for details. +# --- END COPYRIGHT BLOCK --- +# +import os +import sys +import time +import ldap +import logging +import pytest +from lib389 import DirSrv, Entry +from lib389._constants import * +from lib389.properties import * +from lib389.tasks import * +from lib389.utils import * + +pytestmark = pytest.mark.tier2 + +logging.getLogger(__name__).setLevel(logging.DEBUG) +log = logging.getLogger(__name__) + +installation1_prefix = None + +NEW_ACCOUNT = "new_account" +MAX_ACCOUNTS = 100 +HOMEHEAD = "/home/xyz_" + +MIXED_VALUE="/home/mYhOmEdIrEcToRy" +LOWER_VALUE="/home/myhomedirectory" +HOMEDIRECTORY_INDEX = 'cn=homeDirectory,cn=index,cn=userRoot,cn=ldbm database,cn=plugins,cn=config' +HOMEDIRECTORY_CN="homedirectory" +MATCHINGRULE = 'nsMatchingRule' +UIDNUMBER_INDEX = 'cn=uidnumber,cn=index,cn=userRoot,cn=ldbm database,cn=plugins,cn=config' +UIDNUMBER_CN="uidnumber" + + +class TopologyStandalone(object): + def __init__(self, standalone): + standalone.open() + self.standalone = standalone + + +@pytest.fixture(scope="module") +def topology(request): + # Creating standalone instance ... + standalone = DirSrv(verbose=False) + args_instance[SER_HOST] = HOST_STANDALONE + args_instance[SER_PORT] = PORT_STANDALONE + args_instance[SER_SERVERID_PROP] = SERVERID_STANDALONE + args_instance[SER_CREATION_SUFFIX] = DEFAULT_SUFFIX + args_standalone = args_instance.copy() + standalone.allocate(args_standalone) + instance_standalone = standalone.exists() + if instance_standalone: + standalone.delete() + standalone.create() + standalone.open() + + # Delete each instance in the end + def fin(): + #standalone.delete() + pass + request.addfinalizer(fin) + + return TopologyStandalone(standalone) + +def _find_notes_accesslog(file, log_pattern): + try: + _find_notes_accesslog.last_pos += 1 + except AttributeError: + _find_notes_accesslog.last_pos = 0 + + + #position to the where we were last time + found = None + file.seek(_find_notes_accesslog.last_pos) + + while True: + line = file.readline() + found = log_pattern.search(line) + if ((line == '') or (found)): + break + + + if found: + # assuming that the result is the next line of the search + line = file.readline() + _find_notes_accesslog.last_pos = file.tell() + return line + else: + _find_notes_accesslog.last_pos = file.tell() + return None + +def _find_next_notes(topology, Filter): + topology.standalone.stop(timeout=10) + file_path = topology.standalone.accesslog + file_obj = open(file_path, "r") + regex = re.compile("filter=\"\(%s" % Filter) + result = _find_notes_accesslog(file_obj, regex) + file_obj.close() + topology.standalone.start(timeout=10) + + return result + +# +# find the next message showing an indexing failure +# (starting at the specified posistion) +# and return the position in the error log +# If there is not such message -> return None +def _find_next_indexing_failure(topology, pattern, position): + file_path = topology.standalone.errlog + file_obj = open(file_path, "r") + + try: + file_obj.seek(position + 1) + except: + file_obj.close() + return None + + # Check if the MR configuration failure occurs + regex = re.compile(pattern) + while True: + line = file_obj.readline() + found = regex.search(line) + if ((line == '') or (found)): + break + + + + if (found): + log.info("The configuration of a specific MR fails") + log.info(line) + result = file_obj.tell() + file_obj.close() + return result + else: + file_obj.close() + result = None + + return result +# +# find the first message showing an indexing failure +# and return the position in the error log +# If there is not such message -> return None +def _find_first_indexing_failure(topology, pattern): + file_path = topology.standalone.errlog + file_obj = open(file_path, "r") + + # Check if the MR configuration failure occurs + regex = re.compile(pattern) + while True: + line = file_obj.readline() + found = regex.search(line) + if ((line == '') or (found)): + break + + + + if (found): + log.info("pattern is found: \"%s\"") + log.info(line) + result = file_obj.tell() + file_obj.close() + else: + result = None + + return result + +def _check_entry(topology, filterHead=None, filterValueUpper=False, entry_ext=None, found=False, indexed=False): + # Search with CES with exact value -> find an entry + indexed + if filterValueUpper: + homehead = HOMEHEAD.upper() + else: + homehead = HOMEHEAD + searchedHome = "%s%d" % (homehead, entry_ext) + Filter = "(%s=%s)" % (filterHead, searchedHome) + log.info("Search %s" % Filter) + ents = topology.standalone.search_s(SUFFIX, ldap.SCOPE_SUBTREE, Filter) + if found: + assert len(ents) == 1 + assert ents[0].hasAttr('homedirectory') + valueHome = ensure_bytes("%s%d" % (HOMEHEAD, entry_ext)) + assert valueHome in ents[0].getValues('homedirectory') + else: + assert len(ents) == 0 + + result = _find_next_notes(topology, Filter) + log.info("result=%s" % result) + if indexed: + assert not "notes=U" in result + else: + assert "notes=U" in result + +def test_ticket48973_init(topology): + log.info("Initialization: add dummy entries for the tests") + for cpt in range(MAX_ACCOUNTS): + name = "%s%d" % (NEW_ACCOUNT, cpt) + topology.standalone.add_s(Entry(("uid=%s,%s" % (name, SUFFIX), { + 'objectclass': "top posixAccount".split(), + 'uid': name, + 'cn': name, + 'uidnumber': str(111), + 'gidnumber': str(222), + 'homedirectory': "%s%d" % (HOMEHEAD, cpt)}))) + +def test_ticket48973_ces_not_indexed(topology): + """ + Check that homedirectory is not indexed + - do a search unindexed + """ + + entry_ext = 0 + searchedHome = "%s%d" % (HOMEHEAD, entry_ext) + Filter = "(homeDirectory=%s)" % searchedHome + log.info("Search %s" % Filter) + ents = topology.standalone.search_s(SUFFIX, ldap.SCOPE_SUBTREE, Filter) + assert len(ents) == 1 + assert ents[0].hasAttr('homedirectory') + assert ensure_bytes(searchedHome) in ents[0].getValues('homedirectory') + + result = _find_next_notes(topology, Filter) + log.info("result=%s" % result) + assert "notes=U" in result + + +def test_ticket48973_homeDirectory_indexing(topology): + """ + Check that homedirectory is indexed with syntax (ces) + - triggers index + - no failure on index + - do a search indexed with exact value (ces) and no default_mr_indexer_create warning + - do a search indexed with uppercase value (ces) and no default_mr_indexer_create warning + """ + entry_ext = 1 + + try: + ent = topology.standalone.getEntry(HOMEDIRECTORY_INDEX, ldap.SCOPE_BASE) + except ldap.NO_SUCH_OBJECT: + topology.standalone.add_s(Entry((HOMEDIRECTORY_INDEX, { + 'objectclass': "top nsIndex".split(), + 'cn': HOMEDIRECTORY_CN, + 'nsSystemIndex': 'false', + 'nsIndexType': 'eq'}))) + + args = {TASK_WAIT: True} + topology.standalone.tasks.reindex(suffix=SUFFIX, attrname='homeDirectory', args=args) + + log.info("Check indexing succeeded with no specified matching rule") + assert not _find_first_indexing_failure(topology, "unknown or invalid matching rule") + assert not _find_first_indexing_failure(topology, "default_mr_indexer_create: warning") + assert not _find_first_indexing_failure(topology, "default_mr_indexer_create - Plugin .* does not handle") + + _check_entry(topology, filterHead="homeDirectory", filterValueUpper=False, entry_ext=entry_ext,found=True, indexed=True) + + _check_entry(topology, filterHead="homeDirectory:caseExactIA5Match:", filterValueUpper=False, entry_ext=entry_ext, found=True, indexed=False) + + _check_entry(topology, filterHead="homeDirectory:caseIgnoreIA5Match:", filterValueUpper=False, entry_ext=entry_ext, found=True, indexed=False) + + _check_entry(topology, filterHead="homeDirectory", filterValueUpper=True, entry_ext=entry_ext, found=False, indexed=True) + + _check_entry(topology, filterHead="homeDirectory:caseExactIA5Match:", filterValueUpper=True, entry_ext=entry_ext, found=False, indexed=False) + + _check_entry(topology, filterHead="homeDirectory:caseIgnoreIA5Match:", filterValueUpper=True, entry_ext=entry_ext, found=True, indexed=False) + + + +def test_ticket48973_homeDirectory_caseExactIA5Match_caseIgnoreIA5Match_indexing(topology): + """ + Check that homedirectory is indexed with syntax (ces && cis) + - triggers index + - no failure on index + - do a search indexed (ces) and no default_mr_indexer_create warning + - do a search indexed (cis) and no default_mr_indexer_create warning + """ + entry_ext = 4 + + log.info("\n\nindex homeDirectory in caseExactIA5Match and caseIgnoreIA5Match") + EXACTIA5_MR_NAME=b'caseExactIA5Match' + IGNOREIA5_MR_NAME=b'caseIgnoreIA5Match' + EXACT_MR_NAME=b'caseExactMatch' + IGNORE_MR_NAME=b'caseIgnoreMatch' + mod = [(ldap.MOD_REPLACE, MATCHINGRULE, (EXACT_MR_NAME, IGNORE_MR_NAME, EXACTIA5_MR_NAME, IGNOREIA5_MR_NAME))] + topology.standalone.modify_s(HOMEDIRECTORY_INDEX, mod) + + args = {TASK_WAIT: True} + topology.standalone.tasks.reindex(suffix=SUFFIX, attrname='homeDirectory', args=args) + + log.info("Check indexing succeeded with no specified matching rule") + assert not _find_first_indexing_failure(topology, "unknown or invalid matching rule") + assert not _find_first_indexing_failure(topology, "default_mr_indexer_create: warning") + assert not _find_first_indexing_failure(topology, "default_mr_indexer_create - Plugin .* does not handle") + + _check_entry(topology, filterHead="homeDirectory", filterValueUpper=False, entry_ext=entry_ext, found=True, indexed=True) + + _check_entry(topology, filterHead="homeDirectory:caseExactIA5Match:", filterValueUpper=False, entry_ext=entry_ext, found=True, indexed=True) + + _check_entry(topology, filterHead="homeDirectory:caseIgnoreIA5Match:", filterValueUpper=False, entry_ext=entry_ext, found=True, indexed=True) + + _check_entry(topology, filterHead="homeDirectory", filterValueUpper=True, entry_ext=entry_ext, found=False, indexed=True) + + _check_entry(topology, filterHead="homeDirectory:caseExactIA5Match:", filterValueUpper=True, entry_ext=entry_ext, found=False, indexed=True) + + _check_entry(topology, filterHead="homeDirectory:caseIgnoreIA5Match:", filterValueUpper=True, entry_ext=entry_ext, found=True, indexed=True) + + +if __name__ == '__main__': + # Run isolated + # -s for DEBUG mode + CURRENT_FILE = os.path.realpath(__file__) + pytest.main("-s %s" % CURRENT_FILE) diff --git a/dirsrvtests/tests/tickets/ticket49008_test.py b/dirsrvtests/tests/tickets/ticket49008_test.py new file mode 100644 index 0000000..2f3e095 --- /dev/null +++ b/dirsrvtests/tests/tickets/ticket49008_test.py @@ -0,0 +1,133 @@ +# --- BEGIN COPYRIGHT BLOCK --- +# Copyright (C) 2022 Red Hat, Inc. +# All rights reserved. +# +# License: GPL (version 3 or any later version). +# See LICENSE for details. +# --- END COPYRIGHT BLOCK --- +# +import pytest +from lib389.tasks import * +from lib389.utils import * +from lib389.topologies import topology_m3 as T + +from lib389._constants import DEFAULT_SUFFIX, PLUGIN_MEMBER_OF + +# Skip on older versions +pytestmark = [pytest.mark.tier2, + pytest.mark.skipif(ds_is_older('1.3.6'), reason="Not implemented")] + +DEBUGGING = os.getenv("DEBUGGING", default=False) +if DEBUGGING: + logging.getLogger(__name__).setLevel(logging.DEBUG) +else: + logging.getLogger(__name__).setLevel(logging.INFO) +log = logging.getLogger(__name__) + + +def test_ticket49008(T): + A = T.ms['supplier1'] + B = T.ms['supplier2'] + C = T.ms['supplier3'] + + A.enableReplLogging() + B.enableReplLogging() + C.enableReplLogging() + + AtoB = A.agreement.list(suffix=DEFAULT_SUFFIX)[0].dn + AtoC = A.agreement.list(suffix=DEFAULT_SUFFIX)[1].dn + CtoA = C.agreement.list(suffix=DEFAULT_SUFFIX)[0].dn + CtoB = C.agreement.list(suffix=DEFAULT_SUFFIX)[1].dn + + # we want replication in a line A <==> B <==> C + A.agreement.pause(AtoC) + C.agreement.pause(CtoA) + + # Enable memberOf on Supplier B + B.plugins.enable(name=PLUGIN_MEMBER_OF) + + # Set the auto OC to an objectclass that does NOT allow memberOf + B.modify_s('cn=MemberOf Plugin,cn=plugins,cn=config', + [(ldap.MOD_REPLACE, 'memberofAutoAddOC', b'referral')]) + B.restart(timeout=10) + + # add a few entries allowing memberof + for i in range(1, 6): + name = "userX{}".format(i) + dn = "cn={},{}".format(name, DEFAULT_SUFFIX) + A.add_s(Entry((dn, {'objectclass': "top person inetuser".split(), + 'sn': name, 'cn': name}))) + + # add a few entries not allowing memberof + for i in range(1, 6): + name = "userY{}".format(i) + dn = "cn={},{}".format(name, DEFAULT_SUFFIX) + A.add_s(Entry((dn, {'objectclass': "top person".split(), + 'sn': name, 'cn': name}))) + + time.sleep(15) + + A_entries = A.search_s(DEFAULT_SUFFIX, ldap.SCOPE_SUBTREE, + '(objectClass=person)') + B_entries = B.search_s(DEFAULT_SUFFIX, ldap.SCOPE_SUBTREE, + '(objectClass=person)') + C_entries = C.search_s(DEFAULT_SUFFIX, ldap.SCOPE_SUBTREE, + '(objectClass=person)') + + log.debug("A contains: %s", A_entries) + log.debug("B contains: %s", B_entries) + log.debug("C contains: %s", C_entries) + + assert len(A_entries) == len(B_entries) + assert len(B_entries) == len(C_entries) + + # add a group with members allowing memberof + dn = "cn=g1,{}".format(DEFAULT_SUFFIX) + A.add_s(Entry((dn, {'objectclass': "top groupOfNames".split(), + 'description': "Test Owned Group {}".format(name), + 'member': "cn=userX1,{}".format(DEFAULT_SUFFIX), + 'cn': "g1"}))) + + # check ruv on m2 before applying failing op + time.sleep(10) + B_RUV = B.search_s("cn=config", ldap.SCOPE_SUBTREE, + "(&(objectclass=nsds5replica)(nsDS5ReplicaRoot={}))".format(DEFAULT_SUFFIX), + ['nsds50ruv']) + elements = B_RUV[0].getValues('nsds50ruv') + ruv_before = 'ruv_before' + for ruv in elements: + if b'replica 2' in ruv: + ruv_before = ruv + + # add a group with members allowing memberof and members which don't + # the op will fail on M2 + dn = "cn=g2,{}".format(DEFAULT_SUFFIX) + A.add_s(Entry((dn, {'objectclass': "top groupOfNames".split(), + 'description': "Test Owned Group {}".format(name), + 'member': ["cn=userX1,{}".format(DEFAULT_SUFFIX), + "cn=userX2,{}".format(DEFAULT_SUFFIX), + "cn=userY1,{}".format(DEFAULT_SUFFIX)], + 'cn': "g2"}))) + + # check ruv on m2 after applying failing op + time.sleep(10) + B_RUV = B.search_s("cn=config", ldap.SCOPE_SUBTREE, + "(&(objectclass=nsds5replica)(nsDS5ReplicaRoot={}))".format(DEFAULT_SUFFIX), + ['nsds50ruv']) + elements = B_RUV[0].getValues('nsds50ruv') + ruv_after = 'ruv_after' + for ruv in elements: + if b'replica 2' in ruv: + ruv_after = ruv + + log.info('ruv before fail: {}'.format(ruv_before)) + log.info('ruv after fail: {}'.format(ruv_after)) + # the ruv should not have changed + assert ruv_before == ruv_after + + +if __name__ == '__main__': + # Run isolated + # -s for DEBUG mode + CURRENT_FILE = os.path.realpath(__file__) + pytest.main("-s {}".format(CURRENT_FILE)) diff --git a/dirsrvtests/tests/tickets/ticket49020_test.py b/dirsrvtests/tests/tickets/ticket49020_test.py new file mode 100644 index 0000000..f83f0ec --- /dev/null +++ b/dirsrvtests/tests/tickets/ticket49020_test.py @@ -0,0 +1,81 @@ +# --- BEGIN COPYRIGHT BLOCK --- +# Copyright (C) 2022 Red Hat, Inc. +# All rights reserved. +# +# License: GPL (version 3 or any later version). +# See LICENSE for details. +# --- END COPYRIGHT BLOCK --- +# +import time +import ldap +import logging +import pytest +from lib389 import DirSrv, Entry, tools, tasks +from lib389.tools import DirSrvTools +from lib389._constants import * +from lib389.properties import * +from lib389.tasks import * +from lib389.utils import * +from lib389.topologies import topology_m3 as T +import socket + +# Skip on older versions +pytestmark = [pytest.mark.tier2, + pytest.mark.skipif(ds_is_older('1.3.6'), reason="Not implemented")] + +DEBUGGING = os.getenv("DEBUGGING", default=False) +if DEBUGGING: + logging.getLogger(__name__).setLevel(logging.DEBUG) +else: + logging.getLogger(__name__).setLevel(logging.INFO) +log = logging.getLogger(__name__) + + +def test_ticket49020(T): + A = T.ms['supplier1'] + B = T.ms['supplier2'] + C = T.ms['supplier3'] + + A.enableReplLogging() + B.enableReplLogging() + C.enableReplLogging() + + AtoB = A.agreement.list(suffix=DEFAULT_SUFFIX)[0].dn + AtoC = A.agreement.list(suffix=DEFAULT_SUFFIX)[1].dn + CtoB = C.agreement.list(suffix=DEFAULT_SUFFIX)[1].dn + + A.agreement.pause(AtoB) + C.agreement.pause(CtoB) + time.sleep(5) + name = "userX" + dn = "cn={},{}".format(name, DEFAULT_SUFFIX) + A.add_s(Entry((dn, {'objectclass': "top person".split(), + 'sn': name,'cn': name}))) + + A.agreement.init(DEFAULT_SUFFIX, socket.gethostname(), PORT_SUPPLIER_3) + time.sleep(5) + for i in range(1,11): + name = "userY{}".format(i) + dn = "cn={},{}".format(name, DEFAULT_SUFFIX) + A.add_s(Entry((dn, {'objectclass': "top person".split(), + 'sn': name,'cn': name}))) + time.sleep(5) + C.agreement.resume(CtoB) + + time.sleep(5) + A_entries = A.search_s(DEFAULT_SUFFIX, ldap.SCOPE_SUBTREE, + '(objectClass=person)') + B_entries = B.search_s(DEFAULT_SUFFIX, ldap.SCOPE_SUBTREE, + '(objectClass=person)') + C_entries = C.search_s(DEFAULT_SUFFIX, ldap.SCOPE_SUBTREE, + '(objectClass=person)') + + assert len(A_entries) == len(C_entries) + assert len(B_entries) == len(A_entries) - 11 + + +if __name__ == '__main__': + # Run isolated + # -s for DEBUG mode + CURRENT_FILE = os.path.realpath(__file__) + pytest.main("-s %s" % CURRENT_FILE) diff --git a/dirsrvtests/tests/tickets/ticket49039_test.py b/dirsrvtests/tests/tickets/ticket49039_test.py new file mode 100644 index 0000000..0313f69 --- /dev/null +++ b/dirsrvtests/tests/tickets/ticket49039_test.py @@ -0,0 +1,127 @@ +# --- BEGIN COPYRIGHT BLOCK --- +# Copyright (C) 2022 Red Hat, Inc. +# All rights reserved. +# +# License: GPL (version 3 or any later version). +# See LICENSE for details. +# --- END COPYRIGHT BLOCK --- +# +import time +import ldap +import logging +import pytest +import os +from lib389 import Entry +from lib389._constants import * +from lib389.properties import * +from lib389.tasks import * +from lib389.utils import * +from lib389.topologies import topology_st as topo +from lib389.pwpolicy import PwPolicyManager + + +pytestmark = pytest.mark.tier2 + +DEBUGGING = os.getenv("DEBUGGING", default=False) +if DEBUGGING: + logging.getLogger(__name__).setLevel(logging.DEBUG) +else: + logging.getLogger(__name__).setLevel(logging.INFO) +log = logging.getLogger(__name__) + +USER_DN = 'uid=user,dc=example,dc=com' + + +def test_ticket49039(topo): + """Test "password must change" verses "password min age". Min age should not + block password update if the password was reset. + """ + + # Setup SSL (for ldappasswd test) + topo.standalone.enable_tls() + + # Configure password policy + try: + policy = PwPolicyManager(topo.standalone) + policy.set_global_policy(properties={'nsslapd-pwpolicy-local': 'on', + 'passwordMustChange': 'on', + 'passwordExp': 'on', + 'passwordMaxAge': '86400000', + 'passwordMinAge': '8640000', + 'passwordChange': 'on'}) + except ldap.LDAPError as e: + log.fatal('Failed to set password policy: ' + str(e)) + + # Add user, bind, and set password + try: + topo.standalone.add_s(Entry((USER_DN, { + 'objectclass': 'top extensibleObject'.split(), + 'uid': 'user1', + 'userpassword': PASSWORD + }))) + except ldap.LDAPError as e: + log.fatal('Failed to add user: error ' + e.args[0]['desc']) + assert False + + # Reset password as RootDN + try: + topo.standalone.modify_s(USER_DN, [(ldap.MOD_REPLACE, 'userpassword', ensure_bytes(PASSWORD))]) + except ldap.LDAPError as e: + log.fatal('Failed to bind: error ' + e.args[0]['desc']) + assert False + + time.sleep(1) + + # Reset password as user + try: + topo.standalone.simple_bind_s(USER_DN, PASSWORD) + except ldap.LDAPError as e: + log.fatal('Failed to bind: error ' + e.args[0]['desc']) + assert False + + try: + topo.standalone.modify_s(USER_DN, [(ldap.MOD_REPLACE, 'userpassword', ensure_bytes(PASSWORD))]) + except ldap.LDAPError as e: + log.fatal('Failed to change password: error ' + e.args[0]['desc']) + assert False + + ################################### + # Make sure ldappasswd also works + ################################### + + # Reset password as RootDN + try: + topo.standalone.simple_bind_s(DN_DM, PASSWORD) + except ldap.LDAPError as e: + log.fatal('Failed to bind as rootdn: error ' + e.args[0]['desc']) + assert False + + try: + topo.standalone.modify_s(USER_DN, [(ldap.MOD_REPLACE, 'userpassword', ensure_bytes(PASSWORD))]) + except ldap.LDAPError as e: + log.fatal('Failed to bind: error ' + e.args[0]['desc']) + assert False + + time.sleep(1) + + # Run ldappasswd as the User. + os.environ["LDAPTLS_CACERTDIR"] = topo.standalone.get_cert_dir() + cmd = ('ldappasswd' + ' -h ' + topo.standalone.host + ' -Z -p 38901 -D ' + USER_DN + + ' -w password -a password -s password2 ' + USER_DN) + os.system(cmd) + time.sleep(1) + + try: + topo.standalone.simple_bind_s(USER_DN, "password2") + except ldap.LDAPError as e: + log.fatal('Failed to bind: error ' + e.args[0]['desc']) + assert False + + log.info('Test Passed') + + +if __name__ == '__main__': + # Run isolated + # -s for DEBUG mode + CURRENT_FILE = os.path.realpath(__file__) + pytest.main("-s %s" % CURRENT_FILE) diff --git a/dirsrvtests/tests/tickets/ticket49072_test.py b/dirsrvtests/tests/tickets/ticket49072_test.py new file mode 100644 index 0000000..c91ae24 --- /dev/null +++ b/dirsrvtests/tests/tickets/ticket49072_test.py @@ -0,0 +1,114 @@ +# --- BEGIN COPYRIGHT BLOCK --- +# Copyright (C) 2017 Red Hat, Inc. +# All rights reserved. +# +# License: GPL (version 3 or any later version). +# See LICENSE for details. +# --- END COPYRIGHT BLOCK --- +# +import pytest +import subprocess +from lib389.tasks import * +from lib389.utils import * +from lib389.topologies import topology_st as topo + +from lib389._constants import (DEFAULT_SUFFIX, PLUGIN_MEMBER_OF, DN_DM, PASSWORD, SERVERID_STANDALONE, + SUFFIX) + +pytestmark = pytest.mark.tier2 + +logging.getLogger(__name__).setLevel(logging.DEBUG) +log = logging.getLogger(__name__) + +TEST_FILTER = '(objectClass=person' +TEST_BASEDN = 'dc=testdb,dc=com' +FILTER = '(objectClass=person)' +FIXUP_MEMOF = 'fixup-memberof.pl' + + +def test_ticket49072_basedn(topo): + """memberOf fixup task does not validate args + + :id: dce9b898-119d-42b8-a236-1130e59bfe18 + :feature: memberOf + :setup: Standalone instance, with memberOf plugin + :steps: 1. Run fixup-memberOf.pl with invalid DN entry + 2. Check if error log reports "Failed to get be backend" + :expectedresults: Fixup-memberOf.pl task should complete, but errors logged. + """ + + log.info("Ticket 49072 memberof fixup task with invalid basedn...") + topo.standalone.plugins.enable(name=PLUGIN_MEMBER_OF) + topo.standalone.restart(timeout=10) + + if ds_is_older('1.3'): + inst_dir = topo.standalone.get_inst_dir() + memof_task = os.path.join(inst_dir, FIXUP_MEMOF) + try: + output = subprocess.check_output([memof_task, '-D', DN_DM, '-w', PASSWORD, '-b', TEST_BASEDN, '-f', FILTER]) + except subprocess.CalledProcessError as err: + output = err.output + else: + sbin_dir = topo.standalone.get_sbin_dir() + memof_task = os.path.join(sbin_dir, FIXUP_MEMOF) + try: + output = subprocess.check_output( + [memof_task, '-D', DN_DM, '-w', PASSWORD, '-b', TEST_BASEDN, '-Z', SERVERID_STANDALONE, '-f', FILTER]) + except subprocess.CalledProcessError as err: + output = err.output + log.info('output: {}'.format(output)) + expected = b"Successfully added task entry" + assert expected in output + log_entry = topo.standalone.ds_error_log.match('.*Failed to get be backend.*') + log.info('Error log out: {}'.format(log_entry)) + assert topo.standalone.ds_error_log.match('.*Failed to get be backend.*') + + +def test_ticket49072_filter(topo): + """memberOf fixup task does not validate args + + :id: dde9e893-119d-42c8-a236-1190e56bfe98 + :feature: memberOf + :setup: Standalone instance, with memberOf plugin + :steps: 1. Run fixup-memberOf.pl with invalid filter + 2. Check if error log reports "Bad search filter" + :expectedresults: Fixup-memberOf.pl task should complete, but errors logged. + """ + log.info("Ticket 49072 memberof fixup task with invalid filter...") + log.info('Wait for 10 secs and check if task is completed') + time.sleep(10) + task_memof = 'cn=memberOf task,cn=tasks,cn=config' + if topo.standalone.search_s(task_memof, ldap.SCOPE_SUBTREE, 'cn=memberOf_fixup*', ['dn:']): + log.info('memberof task is still running, wait for +10 secs') + time.sleep(10) + + if ds_is_older('1.3'): + inst_dir = topo.standalone.get_inst_dir() + memof_task = os.path.join(inst_dir, FIXUP_MEMOF) + try: + output = subprocess.check_output([memof_task, '-D', DN_DM, '-w', PASSWORD, '-b', SUFFIX, '-f', TEST_FILTER]) + except subprocess.CalledProcessError as err: + output = err.output + else: + sbin_dir = topo.standalone.get_sbin_dir() + memof_task = os.path.join(sbin_dir, FIXUP_MEMOF) + try: + output = subprocess.check_output( + [memof_task, '-D', DN_DM, '-w', PASSWORD, '-b', SUFFIX, '-Z', SERVERID_STANDALONE, '-f', TEST_FILTER]) + except subprocess.CalledProcessError as err: + output = err.output + log.info('output: {}'.format(output)) + expected = b"Successfully added task entry" + assert expected in output + log_entry = topo.standalone.ds_error_log.match('.*Bad search filter.*') + log.info('Error log out: {}'.format(log_entry)) + assert topo.standalone.ds_error_log.match('.*Bad search filter.*') + + log.info("Ticket 49072 complete: memberOf fixup task does not validate args") + + +if __name__ == '__main__': + # Run isolated + # -s for DEBUG mode + CURRENT_FILE = os.path.realpath(__file__) + pytest.main("-s %s" % CURRENT_FILE) diff --git a/dirsrvtests/tests/tickets/ticket49073_test.py b/dirsrvtests/tests/tickets/ticket49073_test.py new file mode 100644 index 0000000..5f75797 --- /dev/null +++ b/dirsrvtests/tests/tickets/ticket49073_test.py @@ -0,0 +1,158 @@ +# --- BEGIN COPYRIGHT BLOCK --- +# Copyright (C) 2022 Red Hat, Inc. +# All rights reserved. +# +# License: GPL (version 3 or any later version). +# See LICENSE for details. +# --- END COPYRIGHT BLOCK --- +# +import pytest +from lib389.tasks import * +from lib389.utils import * +from lib389.topologies import topology_m2 + +from lib389._constants import (PLUGIN_MEMBER_OF, DEFAULT_SUFFIX, SUFFIX, HOST_SUPPLIER_2, + PORT_SUPPLIER_2) + +# Skip on older versions +pytestmark = [pytest.mark.tier2, + pytest.mark.skipif(ds_is_older('1.3.6'), reason="Not implemented")] + +DEBUGGING = os.getenv('DEBUGGING', False) +GROUP_DN = ("cn=group," + DEFAULT_SUFFIX) + +if DEBUGGING: + logging.getLogger(__name__).setLevel(logging.DEBUG) +else: + logging.getLogger(__name__).setLevel(logging.INFO) +log = logging.getLogger(__name__) + + +def _add_group_with_members(topology_m2): + # Create group + try: + topology_m2.ms["supplier1"].add_s(Entry((GROUP_DN, + {'objectclass': 'top groupofnames'.split(), + 'cn': 'group'}))) + except ldap.LDAPError as e: + log.fatal('Failed to add group: error ' + e.message['desc']) + assert False + + # Add members to the group - set timeout + log.info('Adding members to the group...') + for idx in range(1, 5): + try: + MEMBER_VAL = ("uid=member%d,%s" % (idx, DEFAULT_SUFFIX)) + topology_m2.ms["supplier1"].modify_s(GROUP_DN, + [(ldap.MOD_ADD, + 'member', + MEMBER_VAL)]) + except ldap.LDAPError as e: + log.fatal('Failed to update group: member (%s) - error: %s' % + (MEMBER_VAL, e.message['desc'])) + assert False + + +def _check_memberof(supplier, presence_flag): + # Check that members have memberof attribute on M1 + for idx in range(1, 5): + try: + USER_DN = ("uid=member%d,%s" % (idx, DEFAULT_SUFFIX)) + ent = supplier.getEntry(USER_DN, ldap.SCOPE_BASE, "(objectclass=*)") + if presence_flag: + assert ent.hasAttr('memberof') and ent.getValue('memberof') == GROUP_DN + else: + assert not ent.hasAttr('memberof') + except ldap.LDAPError as e: + log.fatal('Failed to retrieve user (%s): error %s' % (USER_DN, e.message['desc'])) + assert False + + +def _check_entry_exist(supplier, dn): + attempt = 0 + while attempt <= 10: + try: + dn + ent = supplier.getEntry(dn, ldap.SCOPE_BASE, "(objectclass=*)") + break + except ldap.NO_SUCH_OBJECT: + attempt = attempt + 1 + time.sleep(1) + except ldap.LDAPError as e: + log.fatal('Failed to retrieve user (%s): error %s' % (dn, e.message['desc'])) + assert False + assert attempt != 10 + + +def test_ticket49073(topology_m2): + """Write your replication test here. + + To access each DirSrv instance use: topology_m2.ms["supplier1"], topology_m2.ms["supplier2"], + ..., topology_m2.hub1, ..., topology_m2.consumer1,... + + Also, if you need any testcase initialization, + please, write additional fixture for that(include finalizer). + """ + topology_m2.ms["supplier1"].plugins.enable(name=PLUGIN_MEMBER_OF) + topology_m2.ms["supplier1"].restart(timeout=10) + topology_m2.ms["supplier2"].plugins.enable(name=PLUGIN_MEMBER_OF) + topology_m2.ms["supplier2"].restart(timeout=10) + + # Configure fractional to prevent total init to send memberof + ents = topology_m2.ms["supplier1"].agreement.list(suffix=SUFFIX) + assert len(ents) == 1 + log.info('update %s to add nsDS5ReplicatedAttributeListTotal' % ents[0].dn) + topology_m2.ms["supplier1"].modify_s(ents[0].dn, + [(ldap.MOD_REPLACE, + 'nsDS5ReplicatedAttributeListTotal', + '(objectclass=*) $ EXCLUDE '), + (ldap.MOD_REPLACE, + 'nsDS5ReplicatedAttributeList', + '(objectclass=*) $ EXCLUDE memberOf')]) + topology_m2.ms["supplier1"].restart(timeout=10) + + # + # create some users and a group + # + log.info('create users and group...') + for idx in range(1, 5): + try: + USER_DN = ("uid=member%d,%s" % (idx, DEFAULT_SUFFIX)) + topology_m2.ms["supplier1"].add_s(Entry((USER_DN, + {'objectclass': 'top extensibleObject'.split(), + 'uid': 'member%d' % (idx)}))) + except ldap.LDAPError as e: + log.fatal('Failed to add user (%s): error %s' % (USER_DN, e.message['desc'])) + assert False + + _check_entry_exist(topology_m2.ms["supplier2"], "uid=member4,%s" % (DEFAULT_SUFFIX)) + _add_group_with_members(topology_m2) + _check_entry_exist(topology_m2.ms["supplier2"], GROUP_DN) + + # Check that for regular update memberof was on both side (because plugin is enabled both) + time.sleep(5) + _check_memberof(topology_m2.ms["supplier1"], True) + _check_memberof(topology_m2.ms["supplier2"], True) + + # reinit with fractional definition + ents = topology_m2.ms["supplier1"].agreement.list(suffix=SUFFIX) + assert len(ents) == 1 + topology_m2.ms["supplier1"].agreement.init(SUFFIX, HOST_SUPPLIER_2, PORT_SUPPLIER_2) + topology_m2.ms["supplier1"].waitForReplInit(ents[0].dn) + + # Check that for total update memberof was on both side + # because memberof is NOT excluded from total init + time.sleep(5) + _check_memberof(topology_m2.ms["supplier1"], True) + _check_memberof(topology_m2.ms["supplier2"], True) + + if DEBUGGING: + # Add debugging steps(if any)... + pass + + +if __name__ == '__main__': + # Run isolated + # -s for DEBUG mode + CURRENT_FILE = os.path.realpath(__file__) + pytest.main("-s %s" % CURRENT_FILE) diff --git a/dirsrvtests/tests/tickets/ticket49076_test.py b/dirsrvtests/tests/tickets/ticket49076_test.py new file mode 100644 index 0000000..ac5ebd5 --- /dev/null +++ b/dirsrvtests/tests/tickets/ticket49076_test.py @@ -0,0 +1,113 @@ +# --- BEGIN COPYRIGHT BLOCK --- +# Copyright (C) 2022 Red Hat, Inc. +# All rights reserved. +# +# License: GPL (version 3 or any later version). +# See LICENSE for details. +# --- END COPYRIGHT BLOCK --- +# +import time +import ldap +import logging +import pytest +from lib389 import DirSrv, Entry, tools, tasks +from lib389.tools import DirSrvTools +from lib389._constants import * +from lib389.properties import * +from lib389.tasks import * +from lib389.utils import * +from lib389.topologies import topology_st as topo + +pytestmark = pytest.mark.tier2 + +DEBUGGING = os.getenv("DEBUGGING", default=False) +if DEBUGGING: + logging.getLogger(__name__).setLevel(logging.DEBUG) +else: + logging.getLogger(__name__).setLevel(logging.INFO) +log = logging.getLogger(__name__) + +ldbm_config = "cn=config,%s" % (DN_LDBM) +txn_begin_flag = "nsslapd-db-transaction-wait" +TEST_USER_DN = 'cn=test,%s' % SUFFIX +TEST_USER = "test" + +def _check_configured_value(topology_st, attr=txn_begin_flag, expected_value=None, required=False): + entries = topology_st.standalone.search_s(ldbm_config, ldap.SCOPE_BASE, 'cn=config') + if required: + assert (entries[0].hasValue(attr)) + if entries[0].hasValue(attr): + topology_st.standalone.log.info('Current value is %s' % entries[0].getValue(attr)) + assert (entries[0].getValue(attr) == ensure_bytes(expected_value)) + +def _update_db(topology_st): + topology_st.standalone.add_s( + Entry((TEST_USER_DN, {'objectclass': "top person organizationalPerson inetOrgPerson".split(), + 'cn': TEST_USER, + 'sn': TEST_USER, + 'givenname': TEST_USER}))) + topology_st.standalone.delete_s(TEST_USER_DN) + +def test_ticket49076(topo): + """Write your testcase here... + + Also, if you need any testcase initialization, + please, write additional fixture for that(include finalizer). + """ + + # check default value is DB_TXN_NOWAIT + _check_configured_value(topo, expected_value="off") + + # tests we are able to update DB + _update_db(topo) + + # switch to wait mode + topo.standalone.modify_s(ldbm_config, + [(ldap.MOD_REPLACE, txn_begin_flag, b"on")]) + # check default value is DB_TXN_NOWAIT + _check_configured_value(topo, expected_value="on") + _update_db(topo) + + + # switch back to "normal mode" + topo.standalone.modify_s(ldbm_config, + [(ldap.MOD_REPLACE, txn_begin_flag, b"off")]) + # check default value is DB_TXN_NOWAIT + _check_configured_value(topo, expected_value="off") + # tests we are able to update DB + _update_db(topo) + + # check that settings are not reset by restart + topo.standalone.modify_s(ldbm_config, + [(ldap.MOD_REPLACE, txn_begin_flag, b"on")]) + # check default value is DB_TXN_NOWAIT + _check_configured_value(topo, expected_value="on") + _update_db(topo) + topo.standalone.restart(timeout=10) + _check_configured_value(topo, expected_value="on") + _update_db(topo) + + # switch default value + topo.standalone.modify_s(ldbm_config, + [(ldap.MOD_DELETE, txn_begin_flag, None)]) + # check default value is DB_TXN_NOWAIT + _check_configured_value(topo, expected_value="off") + # tests we are able to update DB + _update_db(topo) + topo.standalone.restart(timeout=10) + _check_configured_value(topo, expected_value="off") + # tests we are able to update DB + _update_db(topo) + + + if DEBUGGING: + # Add debugging steps(if any)... + pass + + +if __name__ == '__main__': + # Run isolated + # -s for DEBUG mode + CURRENT_FILE = os.path.realpath(__file__) + pytest.main("-s %s" % CURRENT_FILE) + diff --git a/dirsrvtests/tests/tickets/ticket49095_test.py b/dirsrvtests/tests/tickets/ticket49095_test.py new file mode 100644 index 0000000..a8e3598 --- /dev/null +++ b/dirsrvtests/tests/tickets/ticket49095_test.py @@ -0,0 +1,95 @@ +# --- BEGIN COPYRIGHT BLOCK --- +# Copyright (C) 2022 Red Hat, Inc. +# All rights reserved. +# +# License: GPL (version 3 or any later version). +# See LICENSE for details. +# --- END COPYRIGHT BLOCK --- +# +import time +import ldap +import logging +import pytest +from lib389 import DirSrv, Entry, tools, tasks +from lib389.tools import DirSrvTools +from lib389._constants import * +from lib389.properties import * +from lib389.tasks import * +from lib389.utils import * +from lib389.topologies import topology_st as topo + +pytestmark = pytest.mark.tier2 + +DEBUGGING = os.getenv("DEBUGGING", default=False) +if DEBUGGING: + logging.getLogger(__name__).setLevel(logging.DEBUG) +else: + logging.getLogger(__name__).setLevel(logging.INFO) +log = logging.getLogger(__name__) + +USER_DN = 'uid=testuser,dc=example,dc=com' +acis = ['(targetattr != "tele*") (version 3.0;acl "test case";allow (read,compare,search)(userdn = "ldap:///anyone");)', + '(targetattr != "TELE*") (version 3.0;acl "test case";allow (read,compare,search)(userdn = "ldap:///anyone");)', + '(targetattr != "telephonenum*") (version 3.0;acl "test case";allow (read,compare,search)(userdn = "ldap:///anyone");)', + '(targetattr != "TELEPHONENUM*") (version 3.0;acl "test case";allow (read,compare,search)(userdn = "ldap:///anyone");)'] + + +def test_ticket49095(topo): + """Check that target attrbiutes with wildcards are case insensitive + """ + + # Add an entry + try: + topo.standalone.add_s(Entry((USER_DN, { + 'objectclass': 'top extensibleObject'.split(), + 'uid': 'testuser', + 'telephonenumber': '555-555-5555' + }))) + except ldap.LDAPError as e: + log.fatal('Failed to add test user: ' + e.args[0]['desc']) + assert False + + for aci in acis: + # Add ACI + try: + topo.standalone.modify_s(DEFAULT_SUFFIX, + [(ldap.MOD_REPLACE, 'aci', ensure_bytes(aci))]) + + except ldap.LDAPError as e: + log.fatal('Failed to set aci: ' + aci + ': ' + e.args[0]['desc']) + assert False + + # Set Anonymous Bind to test aci + try: + topo.standalone.simple_bind_s("", "") + except ldap.LDAPError as e: + log.fatal('Failed to bind anonymously: ' + e.args[0]['desc']) + assert False + + # Search for entry - should not get any results + try: + entry = topo.standalone.search_s(DEFAULT_SUFFIX, ldap.SCOPE_BASE, + 'telephonenumber=*') + if entry: + log.fatal('The entry was incorrectly returned') + assert False + except ldap.LDAPError as e: + log.fatal('Failed to search anonymously: ' + e.args[0]['desc']) + assert False + + # Set root DN Bind so we can update aci's + try: + topo.standalone.simple_bind_s(DN_DM, PASSWORD) + except ldap.LDAPError as e: + log.fatal('Failed to bind anonymously: ' + e.args[0]['desc']) + assert False + + log.info("Test Passed") + + +if __name__ == '__main__': + # Run isolated + # -s for DEBUG mode + CURRENT_FILE = os.path.realpath(__file__) + pytest.main("-s %s" % CURRENT_FILE) + diff --git a/dirsrvtests/tests/tickets/ticket49104_test.py b/dirsrvtests/tests/tickets/ticket49104_test.py new file mode 100644 index 0000000..08458ab --- /dev/null +++ b/dirsrvtests/tests/tickets/ticket49104_test.py @@ -0,0 +1,88 @@ +# --- BEGIN COPYRIGHT BLOCK --- +# Copyright (C) 2017 Red Hat, Inc. +# All rights reserved. +# +# License: GPL (version 3 or any later version). +# See LICENSE for details. +# --- END COPYRIGHT BLOCK --- +# +import logging +import subprocess + +import pytest +from lib389.tasks import * +from lib389.utils import * +from lib389.topologies import topology_st + +# Skip on older versions +pytestmark = [pytest.mark.tier2, + pytest.mark.skipif(ds_is_older('1.3.6'), reason="Not implemented")] +log = logging.getLogger(__name__) + +def test_ticket49104_setup(topology_st): + """ + Generate an ldif file having 10K entries and import it. + """ + # Generate a test ldif (100k entries) + ldif_dir = topology_st.standalone.get_ldif_dir() + import_ldif = ldif_dir + '/49104.ldif' + try: + topology_st.standalone.buildLDIF(100000, import_ldif) + except OSError as e: + log.fatal('ticket 49104: failed to create test ldif,\ + error: %s - %s' % (e.errno, e.strerror)) + assert False + + # Online + try: + topology_st.standalone.tasks.importLDIF(suffix=DEFAULT_SUFFIX, + input_file=import_ldif, + args={TASK_WAIT: True}) + except ValueError: + log.fatal('ticket 49104: Online import failed') + assert False + +def test_ticket49104(topology_st): + """ + Run dbscan with valgrind changing the truncate size. + If there is no Invalid report, we can claim the test has passed. + """ + log.info("Test ticket 49104 -- dbscan crashes by memory corruption") + myvallog = '/tmp/val49104.out' + if os.path.exists(myvallog): + os.remove(myvallog) + prog = os.path.join(topology_st.standalone.get_bin_dir(), 'dbscan-bin') + valcmd = 'valgrind --tool=memcheck --leak-check=yes --num-callers=40 --log-file=%s ' % myvallog + if topology_st.standalone.has_asan(): + valcmd = '' + id2entry = os.path.join(topology_st.standalone.dbdir, DEFAULT_BENAME, 'id2entry.db') + + for i in range(20, 30): + cmd = valcmd + '%s -f %s -t %d -R' % (prog, id2entry , i) + log.info('Running script: %s' % cmd) + proc = subprocess.Popen(cmd.split(), stdout=subprocess.PIPE) + outs = '' + try: + outs = proc.communicate() + except OSError as e: + log.exception('dbscan: error executing (%s): error %d - %s' % + (cmd, e.errno, e.strerror)) + raise e + + # If we have asan, this fails in other spectacular ways instead + if not topology_st.standalone.has_asan(): + grep = 'egrep "Invalid read|Invalid write" %s' % myvallog + p = os.popen(grep, "r") + l = p.readline() + if 'Invalid' in l: + log.fatal('ERROR: valgrind reported invalid read/write: %s' % l) + assert False + + log.info('ticket 49104 - PASSED') + + +if __name__ == '__main__': + # Run isolated + # -s for DEBUG mode + CURRENT_FILE = os.path.realpath(__file__) + pytest.main("-s %s" % CURRENT_FILE) diff --git a/dirsrvtests/tests/tickets/ticket49121_test.py b/dirsrvtests/tests/tickets/ticket49121_test.py new file mode 100644 index 0000000..3c6cf79 --- /dev/null +++ b/dirsrvtests/tests/tickets/ticket49121_test.py @@ -0,0 +1,206 @@ +# --- BEGIN COPYRIGHT BLOCK --- +# Copyright (C) 2017 Red Hat, Inc. +# All rights reserved. +# +# License: GPL (version 3 or any later version). +# See LICENSE for details. +# --- END COPYRIGHT BLOCK --- +# +import pytest +import codecs +from lib389.tasks import * +from lib389.utils import * +from lib389.topologies import topology_m2 + +from lib389._constants import DATA_DIR, DEFAULT_SUFFIX, VALGRIND_INVALID_STR + +# Skip on older versions +pytestmark = [pytest.mark.tier2, + pytest.mark.skipif(ds_is_older('1.3.6'), reason="Not implemented")] + +DEBUGGING = os.getenv('DEBUGGING', False) + +if DEBUGGING: + logging.getLogger(__name__).setLevel(logging.DEBUG) +else: + logging.getLogger(__name__).setLevel(logging.INFO) +log = logging.getLogger(__name__) + +ds_paths = Paths() + + +@pytest.mark.skipif(not ds_paths.asan_enabled, reason="Don't run if ASAN is not enabled") +def test_ticket49121(topology_m2): + """ + Creating some users. + Deleting quite a number of attributes which may or may not be in the entry. + The attribute type names are to be long. + Under the conditions, it did not estimate the size of string format entry + shorter than the real size and caused the Invalid write / server crash. + """ + + utf8file = os.path.join(topology_m2.ms["supplier1"].getDir(__file__, DATA_DIR), "ticket49121/utf8str.txt") + utf8obj = codecs.open(utf8file, 'r', 'utf-8') + utf8strorig = utf8obj.readline() + utf8str = ensure_bytes(utf8strorig).rstrip(b'\n') + utf8obj.close() + assert (utf8str) + + # Get the sbin directory so we know where to replace 'ns-slapd' + sbin_dir = topology_m2.ms["supplier1"].get_sbin_dir() + log.info('sbin_dir: %s' % sbin_dir) + + # stop M1 to do the next updates + topology_m2.ms["supplier1"].stop(30) + topology_m2.ms["supplier2"].stop(30) + + # wait for the servers shutdown + time.sleep(5) + + # start M1 to do the next updates + topology_m2.ms["supplier1"].start() + topology_m2.ms["supplier2"].start() + + for idx in range(1, 10): + try: + USER_DN = 'CN=user%d,ou=People,%s' % (idx, DEFAULT_SUFFIX) + log.info('adding user %s...' % (USER_DN)) + topology_m2.ms["supplier1"].add_s(Entry((USER_DN, + {'objectclass': 'top person extensibleObject'.split(' '), + 'cn': 'user%d' % idx, + 'sn': 'SN%d-%s' % (idx, utf8str)}))) + except ldap.LDAPError as e: + log.fatal('Failed to add user (%s): error %s' % (USER_DN, e.args[0]['desc'])) + assert False + + for i in range(1, 3): + time.sleep(3) + for idx in range(1, 10): + try: + USER_DN = 'CN=user%d,ou=People,%s' % (idx, DEFAULT_SUFFIX) + log.info('[%d] modify user %s - replacing attrs...' % (i, USER_DN)) + topology_m2.ms["supplier1"].modify_s( + USER_DN, [(ldap.MOD_REPLACE, 'cn', b'user%d' % idx), + (ldap.MOD_REPLACE, 'ABCDEFGH_ID', [b'239001ad-06dd-e011-80fa-c00000ad5174', + b'240f0878-c552-e411-b0f3-000006040037']), + (ldap.MOD_REPLACE, 'attr1', b'NEW_ATTR'), + (ldap.MOD_REPLACE, 'attr20000000000000000000000000000000000000000', None), + (ldap.MOD_REPLACE, 'attr30000000000000000000000000000000000000000000000', None), + (ldap.MOD_REPLACE, 'attr40000000000000000000000000000', None), + (ldap.MOD_REPLACE, 'attr50000000000000000000000000000000000000', None), + (ldap.MOD_REPLACE, 'attr600000000000000000000000000000000000000000', None), + (ldap.MOD_REPLACE, 'attr7000000000000000000000000000000', None), + (ldap.MOD_REPLACE, 'attr8000000000000000000000000000000000', None), + (ldap.MOD_REPLACE, 'attr900000000000000000', None), + (ldap.MOD_REPLACE, 'attr1000000000000000000000', None), + (ldap.MOD_REPLACE, 'attr110000000000000', None), + (ldap.MOD_REPLACE, 'attr120000000000000', None), + (ldap.MOD_REPLACE, 'attr130000000000000000000000000000000000000000000000', None), + (ldap.MOD_REPLACE, 'attr140000000000000000000000000000', None), + (ldap.MOD_REPLACE, 'attr150000000000000000000000000000000000000000000000000000000000000', + None), + (ldap.MOD_REPLACE, 'attr1600000000000000000000000000000000000000000', None), + (ldap.MOD_REPLACE, 'attr17000000000000000000000000000000', None), + (ldap.MOD_REPLACE, 'attr18000000000000000000000000000000000', None), + (ldap.MOD_REPLACE, 'attr1900000000000000000', None), + (ldap.MOD_REPLACE, 'attr2000000000000000000000', None), + (ldap.MOD_REPLACE, 'attr210000000000000000000000000000000000000000', None), + (ldap.MOD_REPLACE, 'attr220000000000000000000000000000000000000000', None), + (ldap.MOD_REPLACE, 'attr230000000000000000000000000000000000000000', None), + (ldap.MOD_REPLACE, 'attr240000000000000000000000000000000000000000', None), + (ldap.MOD_REPLACE, 'attr25000000000000000000000000000000', None), + (ldap.MOD_REPLACE, 'attr260000000000000000000000000000000000000000', None), + (ldap.MOD_REPLACE, + 'attr270000000000000000000000000000000000000000000000000000000000000000000000', None), + (ldap.MOD_REPLACE, 'attr280000000000000000000000000000000000000000', None), + (ldap.MOD_REPLACE, 'attr29000000000000000000000000000000000000000000000000000000000', + None), + (ldap.MOD_REPLACE, 'attr3000000000000000000000000', None), + (ldap.MOD_REPLACE, 'attr310000000000000000000000000000000000000000000000', None), + (ldap.MOD_REPLACE, 'attr320000000000000000000000000000000000000000000000', None), + (ldap.MOD_REPLACE, 'attr330000000000000000000000000000000000000000000000', None), + (ldap.MOD_REPLACE, 'attr340000000000000000000000000000000000000000000000', None), + (ldap.MOD_REPLACE, 'attr350000000000000000000000000000000000000000', None), + (ldap.MOD_REPLACE, 'attr360000000000000000000000000000000000000000000000', None), + (ldap.MOD_REPLACE, 'attr370000000000000000000000000000000000000000000000', None), + (ldap.MOD_REPLACE, 'attr380000000000000000000000000000000000000000000000', None), + (ldap.MOD_REPLACE, 'attr390000000000000000000000000000000000000000000000', None), + (ldap.MOD_REPLACE, 'attr4000000000000000000000000', None), + (ldap.MOD_REPLACE, 'attr410000000000000000000000000000', None), + (ldap.MOD_REPLACE, 'attr420000000000000000000000000000', None), + (ldap.MOD_REPLACE, 'attr430000000000000000000000000000', None), + (ldap.MOD_REPLACE, 'attr440000000000000000000000000000', None), + (ldap.MOD_REPLACE, 'attr4500000000000000000000000000000000000000', None), + (ldap.MOD_REPLACE, 'attr460000000000000000000000000000', None), + (ldap.MOD_REPLACE, 'attr470000000000000000000000000000000000000000000000000000000000', + None), + (ldap.MOD_REPLACE, 'attr480000000000000000000000000000', None), + (ldap.MOD_REPLACE, 'attr49000000000000000000000000000000000000000000000', None), + (ldap.MOD_REPLACE, 'attr5000000000000000000000000', None), + (ldap.MOD_REPLACE, 'attr510000000000000', None), + (ldap.MOD_REPLACE, 'attr520000000000000', None), + (ldap.MOD_REPLACE, 'attr530000000000000', None), + (ldap.MOD_REPLACE, 'attr540000000000000', None), + (ldap.MOD_REPLACE, 'attr550000000000000000000000000000000000000', None), + (ldap.MOD_REPLACE, 'attr5600000000000000000000000000000000000000000', None), + (ldap.MOD_REPLACE, 'attr57000000000000000000000000000000', None), + (ldap.MOD_REPLACE, 'attr58000000000000000000000000000000000', None), + (ldap.MOD_REPLACE, 'attr5900000000000000000', None), + (ldap.MOD_REPLACE, 'attr6000000000000000000000000', None), + (ldap.MOD_REPLACE, 'attr6100000000000000000000000000000000000000000', None), + (ldap.MOD_REPLACE, 'attr6200000000000000000000000000000000000000000', None), + (ldap.MOD_REPLACE, 'attr6300000000000000000000000000000000000000000', None), + (ldap.MOD_REPLACE, 'attr6400000000000000000000000000000000000000000', None), + (ldap.MOD_REPLACE, + 'attr65000000000000000000000000000000000000000000000000000000000000000000000000000000', + None), + (ldap.MOD_REPLACE, 'attr6600000000000000000000000000000000000000000', None), + (ldap.MOD_REPLACE, 'attr6700000000000000000000000000000000000000000', None), + (ldap.MOD_REPLACE, 'attr6800000000000000000000000000000000000000000', None), + (ldap.MOD_REPLACE, 'attr690000000000000000000000000000000000000000000000000000000000', + None), + (ldap.MOD_REPLACE, 'attr7000000000000000000000000', None), + (ldap.MOD_REPLACE, 'attr71000000000000000000000000000000', None), + (ldap.MOD_REPLACE, 'attr72000000000000000000000000000000', None), + (ldap.MOD_REPLACE, 'attr73000000000000000000000000000000', None), + (ldap.MOD_REPLACE, 'attr74000000000000000000000000000000', None), + (ldap.MOD_REPLACE, 'attr750000000000000000000000000000000000000000000', None), + (ldap.MOD_REPLACE, 'attr7600000000000000000000000000000000000000000', None), + (ldap.MOD_REPLACE, 'attr77000000000000000000000000000000', None), + ( + ldap.MOD_REPLACE, 'attr78000000000000000000000000000000000000000000000000000000000000000', + None), + (ldap.MOD_REPLACE, 'attr79000000000000000000000000000000', None), + (ldap.MOD_REPLACE, 'attr800000000000000000000000000000000000000000000000', None), + (ldap.MOD_REPLACE, 'attr81000000000000000000000000000000000', None), + (ldap.MOD_REPLACE, 'attr82000000000000000000000000000000000', None), + (ldap.MOD_REPLACE, 'attr83000000000000000000000000000000000', None), + (ldap.MOD_REPLACE, 'attr84000000000000000000000000000000000', None), + (ldap.MOD_REPLACE, 'attr85000000000000000000000000000000000000000000000000000', None), + (ldap.MOD_REPLACE, 'attr8600000000000000000000000000000000000000000', None), + (ldap.MOD_REPLACE, 'attr87000000000000000000000000000000', None), + (ldap.MOD_REPLACE, 'attr88000000000000000000000000000000000', None), + (ldap.MOD_REPLACE, 'attr89000000000000000000000000000000000', None), + (ldap.MOD_REPLACE, 'attr9000000000000000000000000000000000000000000000000000', None)]) + except ldap.LDAPError as e: + log.fatal('Failed to modify user - deleting attrs (%s): error %s' % (USER_DN, e.args[0]['desc'])) + + # Stop supplier2 + topology_m2.ms["supplier1"].stop(30) + topology_m2.ms["supplier2"].stop(30) + + # start M1 to do the next updates + topology_m2.ms["supplier1"].start() + topology_m2.ms["supplier2"].start() + + log.info('Testcase PASSED') + if DEBUGGING: + # Add debugging steps(if any)... + pass + + +if __name__ == '__main__': + # Run isolated + # -s for DEBUG mode + CURRENT_FILE = os.path.realpath(__file__) + pytest.main("-s %s" % CURRENT_FILE) diff --git a/dirsrvtests/tests/tickets/ticket49122_test.py b/dirsrvtests/tests/tickets/ticket49122_test.py new file mode 100644 index 0000000..34f63d1 --- /dev/null +++ b/dirsrvtests/tests/tickets/ticket49122_test.py @@ -0,0 +1,102 @@ +# --- BEGIN COPYRIGHT BLOCK --- +# Copyright (C) 2022 Red Hat, Inc. +# All rights reserved. +# +# License: GPL (version 3 or any later version). +# See LICENSE for details. +# --- END COPYRIGHT BLOCK --- +# +import time +import ldap +import logging +import pytest +from lib389 import Entry +from lib389._constants import * +from lib389.properties import * +from lib389.tasks import * +from lib389.utils import * +from lib389.topologies import topology_st as topo + +pytestmark = pytest.mark.tier2 + +DEBUGGING = os.getenv("DEBUGGING", default=False) +if DEBUGGING: + logging.getLogger(__name__).setLevel(logging.DEBUG) +else: + logging.getLogger(__name__).setLevel(logging.INFO) +log = logging.getLogger(__name__) + +USER_DN = 'uid=user,' + DEFAULT_SUFFIX +ROLE_DN = 'cn=Filtered_Role_That_Includes_Empty_Role,' + DEFAULT_SUFFIX +filters = ['nsrole=cn=empty,dc=example,dc=com', + '(nsrole=cn=empty,dc=example,dc=com)', + '(&(nsrole=cn=empty,dc=example,dc=com))', + '(!(nsrole=cn=empty,dc=example,dc=com))', + '(&(|(objectclass=person)(sn=app*))(userpassword=*))', + '(&(|(objectclass=person)(nsrole=cn=empty,dc=example,dc=com))(userpassword=*))', + '(&(|(nsrole=cn=empty,dc=example,dc=com)(sn=app*))(userpassword=*))', + '(&(|(objectclass=person)(sn=app*))(nsrole=cn=empty,dc=example,dc=com))', + '(&(|(&(cn=*)(objectclass=person)(nsrole=cn=empty,dc=example,dc=com)))(uid=*))'] + + +def test_ticket49122(topo): + """Search for non-existant role and make sure the server does not crash + """ + + # Enable roles plugin + topo.standalone.plugins.enable(name=PLUGIN_ROLES) + topo.standalone.restart() + + # Add test user + try: + topo.standalone.add_s(Entry(( + USER_DN, {'objectclass': "top extensibleObject".split(), + 'uid': 'user'}))) + except ldap.LDAPError as e: + topo.standalone.log.fatal('Failed to add test user: error ' + str(e)) + assert False + + if DEBUGGING: + print("Attach gdb") + time.sleep(20) + + # Loop over filters + for role_filter in filters: + log.info('Testing filter: ' + role_filter) + + # Add invalid role + try: + topo.standalone.add_s(Entry(( + ROLE_DN, {'objectclass': ['top', 'ldapsubentry', 'nsroledefinition', + 'nscomplexroledefinition', 'nsfilteredroledefinition'], + 'cn': 'Filtered_Role_That_Includes_Empty_Role', + 'nsRoleFilter': role_filter, + 'description': 'A filtered role with filter that will crash the server'}))) + except ldap.LDAPError as e: + topo.standalone.log.fatal('Failed to add filtered role: error ' + e.message['desc']) + assert False + + # Search for the role + try: + topo.standalone.search_s(USER_DN, ldap.SCOPE_SUBTREE, 'objectclass=*', ['nsrole']) + except ldap.LDAPError as e: + topo.standalone.log.fatal('Search failed: error ' + str(e)) + assert False + + # Cleanup + try: + topo.standalone.delete_s(ROLE_DN) + except ldap.LDAPError as e: + topo.standalone.log.fatal('delete failed: error ' + str(e)) + assert False + time.sleep(1) + + topo.standalone.log.info('Test Passed') + + +if __name__ == '__main__': + # Run isolated + # -s for DEBUG mode + CURRENT_FILE = os.path.realpath(__file__) + pytest.main("-s %s" % CURRENT_FILE) + diff --git a/dirsrvtests/tests/tickets/ticket49180_test.py b/dirsrvtests/tests/tickets/ticket49180_test.py new file mode 100644 index 0000000..7560776 --- /dev/null +++ b/dirsrvtests/tests/tickets/ticket49180_test.py @@ -0,0 +1,124 @@ +# --- BEGIN COPYRIGHT BLOCK --- +# Copyright (C) 2016 Red Hat, Inc. +# All rights reserved. +# +# License: GPL (version 3 or any later version). +# See LICENSE for details. +# --- END COPYRIGHT BLOCK --- +# +import threading + +import pytest +from lib389.tasks import * +from lib389.utils import * +from lib389.topologies import topology_m4 +from lib389.replica import ReplicationManager + +from lib389._constants import (DEFAULT_SUFFIX, SUFFIX) + +from lib389 import DirSrv + +pytestmark = pytest.mark.tier2 + +logging.getLogger(__name__).setLevel(logging.DEBUG) +log = logging.getLogger(__name__) + + +def remove_supplier4_agmts(msg, topology_m4): + """Remove all the repl agmts to supplier4. """ + + log.info('%s: remove all the agreements to supplier 4...' % msg) + for num in range(1, 4): + try: + topology_m4.ms["supplier{}".format(num)].agreement.delete(DEFAULT_SUFFIX, + topology_m4.ms["supplier4"].host, + topology_m4.ms["supplier4"].port) + except ldap.LDAPError as e: + log.fatal('{}: Failed to delete agmt(m{} -> m4), error: {}'.format(msg, num, str(e))) + assert False + + +def restore_supplier4(topology_m4): + """In our tests will always be removing supplier 4, so we need a common + way to restore it for another test + """ + + log.info('Restoring supplier 4...') + + # Enable replication on supplier 4 + M4 = topology_m4.ms["supplier4"] + M1 = topology_m4.ms["supplier1"] + repl = ReplicationManager(SUFFIX) + repl.join_supplier(M1, M4) + repl.ensure_agreement(M4, M1) + repl.ensure_agreement(M1, M4) + + # Test Replication is working + for num in range(2, 5): + if topology_m4.ms["supplier1"].testReplication(DEFAULT_SUFFIX, topology_m4.ms["supplier{}".format(num)]): + log.info('Replication is working m1 -> m{}.'.format(num)) + else: + log.fatal('restore_supplier4: Replication is not working from m1 -> m{}.'.format(num)) + assert False + time.sleep(1) + + # Check replication is working from supplier 4 to supplier1... + if topology_m4.ms["supplier4"].testReplication(DEFAULT_SUFFIX, topology_m4.ms["supplier1"]): + log.info('Replication is working m4 -> m1.') + else: + log.fatal('restore_supplier4: Replication is not working from m4 -> 1.') + assert False + time.sleep(5) + + log.info('Supplier 4 has been successfully restored.') + + +def test_ticket49180(topology_m4): + + log.info('Running test_ticket49180...') + + log.info('Check that replication works properly on all suppliers') + agmt_nums = {"supplier1": ("2", "3", "4"), + "supplier2": ("1", "3", "4"), + "supplier3": ("1", "2", "4"), + "supplier4": ("1", "2", "3")} + + for inst_name, agmts in agmt_nums.items(): + for num in agmts: + if not topology_m4.ms[inst_name].testReplication(DEFAULT_SUFFIX, topology_m4.ms["supplier{}".format(num)]): + log.fatal( + 'test_replication: Replication is not working between {} and supplier {}.'.format(inst_name, + num)) + assert False + + # Disable supplier 4 + log.info('test_clean: disable supplier 4...') + topology_m4.ms["supplier4"].replica.disableReplication(DEFAULT_SUFFIX) + + # Remove the agreements from the other suppliers that point to supplier 4 + remove_supplier4_agmts("test_clean", topology_m4) + + # Cleanup - restore supplier 4 + restore_supplier4(topology_m4) + + attr_errors = os.popen('egrep "attrlist_replace" %s | wc -l' % topology_m4.ms["supplier1"].errlog) + ecount = int(attr_errors.readline().rstrip()) + log.info("Errors found on m1: %d" % ecount) + assert (ecount == 0) + + attr_errors = os.popen('egrep "attrlist_replace" %s | wc -l' % topology_m4.ms["supplier2"].errlog) + ecount = int(attr_errors.readline().rstrip()) + log.info("Errors found on m2: %d" % ecount) + assert (ecount == 0) + + attr_errors = os.popen('egrep "attrlist_replace" %s | wc -l' % topology_m4.ms["supplier3"].errlog) + ecount = int(attr_errors.readline().rstrip()) + log.info("Errors found on m3: %d" % ecount) + assert (ecount == 0) + + +if __name__ == '__main__': + # Run isolated + # -s for DEBUG mode + CURRENT_FILE = os.path.realpath(__file__) + pytest.main("-s %s" % CURRENT_FILE) diff --git a/dirsrvtests/tests/tickets/ticket49184_test.py b/dirsrvtests/tests/tickets/ticket49184_test.py new file mode 100644 index 0000000..d9c5bb3 --- /dev/null +++ b/dirsrvtests/tests/tickets/ticket49184_test.py @@ -0,0 +1,156 @@ +# --- BEGIN COPYRIGHT BLOCK --- +# Copyright (C) 2022 Red Hat, Inc. +# All rights reserved. +# +# License: GPL (version 3 or any later version). +# See LICENSE for details. +# --- END COPYRIGHT BLOCK --- +# +import time +import ldap +import logging +import pytest +from lib389 import DirSrv, Entry, tools, tasks +from lib389.tools import DirSrvTools +from lib389._constants import * +from lib389.properties import * +from lib389.tasks import * +from lib389.utils import * +from lib389.topologies import topology_st as topo + +pytestmark = pytest.mark.tier2 + +DEBUGGING = os.getenv("DEBUGGING", default=False) +GROUP_DN_1 = ("cn=group1," + DEFAULT_SUFFIX) +GROUP_DN_2 = ("cn=group2," + DEFAULT_SUFFIX) +SUPER_GRP1 = ("cn=super_grp1," + DEFAULT_SUFFIX) +SUPER_GRP2 = ("cn=super_grp2," + DEFAULT_SUFFIX) +SUPER_GRP3 = ("cn=super_grp3," + DEFAULT_SUFFIX) + +if DEBUGGING: + logging.getLogger(__name__).setLevel(logging.DEBUG) +else: + logging.getLogger(__name__).setLevel(logging.INFO) +log = logging.getLogger(__name__) + +def _add_group_with_members(topo, group_dn): + # Create group + try: + topo.standalone.add_s(Entry((group_dn, + {'objectclass': 'top groupofnames extensibleObject'.split(), + 'cn': 'group'}))) + except ldap.LDAPError as e: + log.fatal('Failed to add group: error ' + e.args[0]['desc']) + assert False + + # Add members to the group - set timeout + log.info('Adding members to the group...') + for idx in range(1, 5): + try: + MEMBER_VAL = ("uid=member%d,%s" % (idx, DEFAULT_SUFFIX)) + topo.standalone.modify_s(group_dn, + [(ldap.MOD_ADD, + 'member', + ensure_bytes(MEMBER_VAL))]) + except ldap.LDAPError as e: + log.fatal('Failed to update group: member (%s) - error: %s' % + (MEMBER_VAL, e.args[0]['desc'])) + assert False + +def _check_memberof(topo, member=None, memberof=True, group_dn=None): + # Check that members have memberof attribute on M1 + for idx in range(1, 5): + try: + USER_DN = ("uid=member%d,%s" % (idx, DEFAULT_SUFFIX)) + ent = topo.standalone.getEntry(USER_DN, ldap.SCOPE_BASE, "(objectclass=*)") + if presence_flag: + assert ent.hasAttr('memberof') and ent.getValue('memberof') == ensure_bytes(group_dn) + else: + assert not ent.hasAttr('memberof') + except ldap.LDAPError as e: + log.fatal('Failed to retrieve user (%s): error %s' % (USER_DN, e.args[0]['desc'])) + assert False + +def _check_memberof(topo, member=None, memberof=True, group_dn=None): + ent = topo.standalone.getEntry(member, ldap.SCOPE_BASE, "(objectclass=*)") + if memberof: + assert group_dn + assert ent.hasAttr('memberof') and ensure_bytes(group_dn) in ent.getValues('memberof') + else: + if ent.hasAttr('memberof'): + assert ensure_bytes(group_dn) not in ent.getValues('memberof') + + +def test_ticket49184(topo): + """Write your testcase here... + + Also, if you need any testcase initialization, + please, write additional fixture for that(include finalizer). + """ + + topo.standalone.plugins.enable(name=PLUGIN_MEMBER_OF) + topo.standalone.restart(timeout=10) + + # + # create some users and a group + # + log.info('create users and group...') + for idx in range(1, 5): + try: + USER_DN = ("uid=member%d,%s" % (idx, DEFAULT_SUFFIX)) + topo.standalone.add_s(Entry((USER_DN, + {'objectclass': 'top extensibleObject'.split(), + 'uid': 'member%d' % (idx)}))) + except ldap.LDAPError as e: + log.fatal('Failed to add user (%s): error %s' % (USER_DN, e.args[0]['desc'])) + assert False + + # add all users in GROUP_DN_1 and checks each users is memberof GROUP_DN_1 + _add_group_with_members(topo, GROUP_DN_1) + for idx in range(1, 5): + USER_DN = ("uid=member%d,%s" % (idx, DEFAULT_SUFFIX)) + _check_memberof(topo, member=USER_DN, memberof=True, group_dn=GROUP_DN_1 ) + + # add all users in GROUP_DN_2 and checks each users is memberof GROUP_DN_2 + _add_group_with_members(topo, GROUP_DN_2) + for idx in range(1, 5): + USER_DN = ("uid=member%d,%s" % (idx, DEFAULT_SUFFIX)) + _check_memberof(topo, member=USER_DN, memberof=True, group_dn=GROUP_DN_2 ) + + # add the level 2, 3 and 4 group + for super_grp in (SUPER_GRP1, SUPER_GRP2, SUPER_GRP3): + topo.standalone.add_s(Entry((super_grp, + {'objectclass': 'top groupofnames extensibleObject'.split(), + 'cn': 'super_grp'}))) + topo.standalone.modify_s(SUPER_GRP1, + [(ldap.MOD_ADD, + 'member', + ensure_bytes(GROUP_DN_1)), + (ldap.MOD_ADD, + 'member', + ensure_bytes(GROUP_DN_2))]) + topo.standalone.modify_s(SUPER_GRP2, + [(ldap.MOD_ADD, + 'member', + ensure_bytes(GROUP_DN_1)), + (ldap.MOD_ADD, + 'member', + ensure_bytes(GROUP_DN_2))]) + return + topo.standalone.delete_s(GROUP_DN_2) + for idx in range(1, 5): + USER_DN = ("uid=member%d,%s" % (idx, DEFAULT_SUFFIX)) + _check_memberof(topo, member=USER_DN, memberof=True, group_dn=GROUP_DN_1 ) + _check_memberof(topo, member=USER_DN, memberof=False, group_dn=GROUP_DN_2 ) + + if DEBUGGING: + # Add debugging steps(if any)... + pass + + +if __name__ == '__main__': + # Run isolated + # -s for DEBUG mode + CURRENT_FILE = os.path.realpath(__file__) + pytest.main("-s %s" % CURRENT_FILE) + diff --git a/dirsrvtests/tests/tickets/ticket49192_test.py b/dirsrvtests/tests/tickets/ticket49192_test.py new file mode 100644 index 0000000..0330a8b --- /dev/null +++ b/dirsrvtests/tests/tickets/ticket49192_test.py @@ -0,0 +1,187 @@ +# --- BEGIN COPYRIGHT BLOCK --- +# Copyright (C) 2022 Red Hat, Inc. +# All rights reserved. +# +# License: GPL (version 3 or any later version). +# See LICENSE for details. +# --- END COPYRIGHT BLOCK --- +# +import time +import ldap +import logging +import pytest +from lib389 import Entry +from lib389._constants import * +from lib389.properties import * +from lib389.tasks import * +from lib389.utils import * +from lib389.topologies import topology_st as topo + +pytestmark = pytest.mark.tier2 + +DEBUGGING = os.getenv("DEBUGGING", default=False) +if DEBUGGING: + logging.getLogger(__name__).setLevel(logging.DEBUG) +else: + logging.getLogger(__name__).setLevel(logging.INFO) +log = logging.getLogger(__name__) + +INDEX_DN = 'cn=index,cn=Second_Backend,cn=ldbm database,cn=plugins,cn=config' +SUFFIX_DN = 'cn=Second_Backend,cn=ldbm database,cn=plugins,cn=config' +MY_SUFFIX = "o=hang.com" +USER_DN = 'uid=user,' + MY_SUFFIX + + +def test_ticket49192(topo): + """Trigger deadlock when removing suffix + """ + + # + # Create a second suffix/backend + # + log.info('Creating second backend...') + topo.standalone.backends.create(None, properties={ + BACKEND_NAME: "Second_Backend", + 'suffix': "o=hang.com", + }) + try: + topo.standalone.add_s(Entry(("o=hang.com", { + 'objectclass': 'top organization'.split(), + 'o': 'hang.com'}))) + except ldap.LDAPError as e: + log.fatal('Failed to create 2nd suffix: error ' + e.args[0]['desc']) + assert False + + # + # Add roles + # + log.info('Adding roles...') + try: + topo.standalone.add_s(Entry(('cn=nsManagedDisabledRole,' + MY_SUFFIX, { + 'objectclass': ['top', 'LdapSubEntry', + 'nsRoleDefinition', + 'nsSimpleRoleDefinition', + 'nsManagedRoleDefinition'], + 'cn': 'nsManagedDisabledRole'}))) + except ldap.LDAPError as e: + log.fatal('Failed to add managed role: error ' + e.args[0]['desc']) + assert False + + try: + topo.standalone.add_s(Entry(('cn=nsDisabledRole,' + MY_SUFFIX, { + 'objectclass': ['top', 'LdapSubEntry', + 'nsRoleDefinition', + 'nsComplexRoleDefinition', + 'nsNestedRoleDefinition'], + 'cn': 'nsDisabledRole', + 'nsRoledn': 'cn=nsManagedDisabledRole,' + MY_SUFFIX}))) + except ldap.LDAPError as e: + log.fatal('Failed to add nested role: error ' + e.args[0]['desc']) + assert False + + try: + topo.standalone.add_s(Entry(('cn=nsAccountInactivationTmp,' + MY_SUFFIX, { + 'objectclass': ['top', 'nsContainer'], + 'cn': 'nsAccountInactivationTmp'}))) + except ldap.LDAPError as e: + log.fatal('Failed to add container: error ' + e.args[0]['desc']) + assert False + + try: + topo.standalone.add_s(Entry(('cn=\"cn=nsDisabledRole,' + MY_SUFFIX + '\",cn=nsAccountInactivationTmp,' + MY_SUFFIX, { + 'objectclass': ['top', 'extensibleObject', 'costemplate', + 'ldapsubentry'], + 'nsAccountLock': 'true'}))) + except ldap.LDAPError as e: + log.fatal('Failed to add cos1: error ' + e.args[0]['desc']) + assert False + + try: + topo.standalone.add_s(Entry(('cn=nsAccountInactivation_cos,' + MY_SUFFIX, { + 'objectclass': ['top', 'LdapSubEntry', 'cosSuperDefinition', + 'cosClassicDefinition'], + 'cn': 'nsAccountInactivation_cos', + 'cosTemplateDn': 'cn=nsAccountInactivationTmp,' + MY_SUFFIX, + 'cosSpecifier': 'nsRole', + 'cosAttribute': 'nsAccountLock operational'}))) + except ldap.LDAPError as e: + log.fatal('Failed to add cos2 : error ' + e.args[0]['desc']) + assert False + + # + # Add test entry + # + try: + topo.standalone.add_s(Entry((USER_DN, { + 'objectclass': 'top extensibleObject'.split(), + 'uid': 'user', + 'userpassword': 'password', + }))) + except ldap.LDAPError as e: + log.fatal('Failed to add user: error ' + e.args[0]['desc']) + assert False + + # + # Inactivate the user account + # + try: + topo.standalone.modify_s(USER_DN, + [(ldap.MOD_ADD, + 'nsRoleDN', + ensure_bytes('cn=nsManagedDisabledRole,' + MY_SUFFIX))]) + except ldap.LDAPError as e: + log.fatal('Failed to disable user: error ' + e.args[0]['desc']) + assert False + + time.sleep(1) + + # Bind as user (should fail) + try: + topo.standalone.simple_bind_s(USER_DN, 'password') + log.error("Bind incorrectly worked") + assert False + except ldap.UNWILLING_TO_PERFORM: + log.info('Got error 53 as expected') + except ldap.LDAPError as e: + log.fatal('Bind has unexpected error ' + e.args[0]['desc']) + assert False + + # Bind as root DN + try: + topo.standalone.simple_bind_s(DN_DM, PASSWORD) + except ldap.LDAPError as e: + log.fatal('RootDN Bind has unexpected error ' + e.args[0]['desc']) + assert False + + # + # Delete suffix + # + log.info('Delete the suffix and children...') + try: + index_entries = topo.standalone.search_s( + SUFFIX_DN, ldap.SCOPE_SUBTREE, 'objectclass=top') + except ldap.LDAPError as e: + log.error('Failed to search: %s - error %s' % (SUFFIX_DN, str(e))) + + for entry in reversed(index_entries): + try: + log.info("Deleting: " + entry.dn) + if entry.dn != SUFFIX_DN and entry.dn != INDEX_DN: + topo.standalone.search_s(entry.dn, + ldap.SCOPE_ONELEVEL, + 'objectclass=top') + topo.standalone.delete_s(entry.dn) + except ldap.LDAPError as e: + log.fatal('Failed to delete entry: %s - error %s' % + (entry.dn, str(e))) + assert False + + log.info("Test Passed") + + +if __name__ == '__main__': + # Run isolated + # -s for DEBUG mode + CURRENT_FILE = os.path.realpath(__file__) + pytest.main("-s %s" % CURRENT_FILE) + diff --git a/dirsrvtests/tests/tickets/ticket49227_test.py b/dirsrvtests/tests/tickets/ticket49227_test.py new file mode 100644 index 0000000..a58c627 --- /dev/null +++ b/dirsrvtests/tests/tickets/ticket49227_test.py @@ -0,0 +1,157 @@ +# --- BEGIN COPYRIGHT BLOCK --- +# Copyright (C) 2022 Red Hat, Inc. +# All rights reserved. +# +# License: GPL (version 3 or any later version). +# See LICENSE for details. +# --- END COPYRIGHT BLOCK --- +# +import os +import time +import ldap +import logging +import pytest +from lib389._constants import * +from lib389.properties import * +from lib389.tasks import * +from lib389.utils import * +from lib389.topologies import topology_st as topo + +pytestmark = pytest.mark.tier2 + +DEBUGGING = os.getenv("DEBUGGING", default=False) +if DEBUGGING: + logging.getLogger(__name__).setLevel(logging.DEBUG) +else: + logging.getLogger(__name__).setLevel(logging.INFO) +log = logging.getLogger(__name__) + +DEFAULT_LEVEL = b"16384" +COMB_LEVEL = b"73864" # 65536+8192+128+8 = 73864 +COMB_DEFAULT_LEVEL = b"90248" # 65536+8192+128+8+16384 = 90248 + + +def set_level(topo, level): + ''' Set the error log level + ''' + try: + topo.standalone.modify_s("cn=config", [(ldap.MOD_REPLACE, 'nsslapd-errorlog-level', ensure_bytes(level))]) + time.sleep(1) + except ldap.LDAPError as e: + log.fatal('Failed to set loglevel to %s - error: %s' % (level, str(e))) + assert False + + +def get_level(topo): + ''' Set the error log level + ''' + try: + config = topo.standalone.search_s("cn=config", ldap.SCOPE_BASE, "objectclass=top") + time.sleep(1) + return config[0].getValue('nsslapd-errorlog-level') + except ldap.LDAPError as e: + log.fatal('Failed to get loglevel - error: %s' % (str(e))) + assert False + + +def get_log_size(topo): + ''' Get the errors log size + ''' + statinfo = os.stat(topo.standalone.errlog) + return statinfo.st_size + + +def test_ticket49227(topo): + """Set the error log to varying levels, and make sure a search for that value + reflects the expected value (not the bitmasked value. + """ + log_size = get_log_size(topo) + + # Check the default level + level = get_level(topo) + if level != DEFAULT_LEVEL: + log.fatal('Incorrect default logging level: %s' % (level)) + assert False + + # Set connection logging + set_level(topo, '8') + level = get_level(topo) + if level != b'8': + log.fatal('Incorrect connection logging level: %s' % (level)) + assert False + + # Check the actual log + new_size = get_log_size(topo) + if new_size == log_size: + # Size should be different + log.fatal('Connection logging is not working') + assert False + + # Set default logging using zero + set_level(topo, '0') + log_size = get_log_size(topo) + level = get_level(topo) + if level != DEFAULT_LEVEL: + log.fatal('Incorrect default logging level: %s' % (level)) + assert False + + # Check the actual log + new_size = get_log_size(topo) + if new_size != log_size: + # Size should be the size + log.fatal('Connection logging is still on') + assert False + + # Set default logging using the default value + set_level(topo, DEFAULT_LEVEL) + level = get_level(topo) + if level != DEFAULT_LEVEL: + log.fatal('Incorrect default logging level: %s' % (level)) + assert False + + # Check the actual log + new_size = get_log_size(topo) + if new_size != log_size: + # Size should be the size + log.fatal('Connection logging is still on') + assert False + + # Set a combined level that includes the default level + set_level(topo, COMB_DEFAULT_LEVEL) + level = get_level(topo) + if level != COMB_DEFAULT_LEVEL: + log.fatal('Incorrect combined logging level with default level: %s expected %s' % + (level, COMB_DEFAULT_LEVEL)) + assert False + + # Set a combined level that does not includes the default level + set_level(topo, COMB_LEVEL) + level = get_level(topo) + if level != COMB_LEVEL: + log.fatal('Incorrect combined logging level without default level: %s expected %s' % + (level, COMB_LEVEL)) + assert False + + # Check our level is present after a restart - previous level was COMB_LEVEL + topo.standalone.restart() + log_size = get_log_size(topo) # Grab the log size for our next check + level = get_level(topo) # This should trigger connection logging + if level != COMB_LEVEL: + log.fatal('Incorrect combined logging level with default level: %s expected %s' % + (level, COMB_LEVEL)) + assert False + + # Now check the actual levels are still working + new_size = get_log_size(topo) + if new_size == log_size: + # Size should be different + log.fatal('Combined logging is not working') + assert False + + +if __name__ == '__main__': + # Run isolated + # -s for DEBUG mode + CURRENT_FILE = os.path.realpath(__file__) + pytest.main("-s %s" % CURRENT_FILE) + diff --git a/dirsrvtests/tests/tickets/ticket49249_test.py b/dirsrvtests/tests/tickets/ticket49249_test.py new file mode 100644 index 0000000..6d2d2ec --- /dev/null +++ b/dirsrvtests/tests/tickets/ticket49249_test.py @@ -0,0 +1,150 @@ +# --- BEGIN COPYRIGHT BLOCK --- +# Copyright (C) 2022 Red Hat, Inc. +# All rights reserved. +# +# License: GPL (version 3 or any later version). +# See LICENSE for details. +# --- END COPYRIGHT BLOCK --- +# +import time +import ldap +import logging +import pytest +from lib389 import DirSrv, Entry, tools, tasks +from lib389.tools import DirSrvTools +from lib389._constants import * +from lib389.properties import * +from lib389.tasks import * +from lib389.utils import * +from lib389.topologies import topology_st as topo + +pytestmark = pytest.mark.tier2 + +DEBUGGING = os.getenv("DEBUGGING", default=False) +if DEBUGGING: + logging.getLogger(__name__).setLevel(logging.DEBUG) +else: + logging.getLogger(__name__).setLevel(logging.INFO) +log = logging.getLogger(__name__) + +COS_BRANCH = 'ou=cos_scope,' + DEFAULT_SUFFIX +COS_DEF = 'cn=cos_definition,' + COS_BRANCH +COS_TEMPLATE = 'cn=cos_template,' + COS_BRANCH +INVALID_USER_WITH_COS = 'cn=cos_user_no_mail,' + COS_BRANCH +VALID_USER_WITH_COS = 'cn=cos_user_with_mail,' + COS_BRANCH + +NO_COS_BRANCH = 'ou=no_cos_scope,' + DEFAULT_SUFFIX +INVALID_USER_WITHOUT_COS = 'cn=no_cos_user_no_mail,' + NO_COS_BRANCH +VALID_USER_WITHOUT_COS = 'cn=no_cos_user_with_mail,' + NO_COS_BRANCH + +def test_ticket49249(topo): + """Write your testcase here... + + Also, if you need any testcase initialization, + please, write additional fixture for that(include finalizer). + """ + # Add the branches + try: + topo.standalone.add_s(Entry((COS_BRANCH, { + 'objectclass': 'top extensibleObject'.split(), + 'ou': 'cos_scope' + }))) + except ldap.LDAPError as e: + log.error('Failed to add cos_scope: error ' + e.message['desc']) + assert False + + try: + topo.standalone.add_s(Entry((NO_COS_BRANCH, { + 'objectclass': 'top extensibleObject'.split(), + 'ou': 'no_cos_scope' + }))) + except ldap.LDAPError as e: + log.error('Failed to add no_cos_scope: error ' + e.message['desc']) + assert False + + try: + topo.standalone.add_s(Entry((COS_TEMPLATE, { + 'objectclass': 'top ldapsubentry costemplate extensibleObject'.split(), + 'cn': 'cos_template', + 'cosPriority': '1', + 'cn': 'cn=nsPwTemplateEntry,ou=level1,dc=example,dc=com', + 'mailAlternateAddress': 'hello@world' + }))) + except ldap.LDAPError as e: + log.error('Failed to add cos_template: error ' + e.message['desc']) + assert False + + try: + topo.standalone.add_s(Entry((COS_DEF, { + 'objectclass': 'top ldapsubentry cosSuperDefinition cosPointerDefinition'.split(), + 'cn': 'cos_definition', + 'costemplatedn': COS_TEMPLATE, + 'cosAttribute': 'mailAlternateAddress default' + }))) + except ldap.LDAPError as e: + log.error('Failed to add cos_definition: error ' + e.message['desc']) + assert False + + try: + # This entry is not allowed to have mailAlternateAddress + topo.standalone.add_s(Entry((INVALID_USER_WITH_COS, { + 'objectclass': 'top person'.split(), + 'cn': 'cos_user_no_mail', + 'sn': 'cos_user_no_mail' + }))) + except ldap.LDAPError as e: + log.error('Failed to add cos_user_no_mail: error ' + e.message['desc']) + assert False + + try: + # This entry is allowed to have mailAlternateAddress + topo.standalone.add_s(Entry((VALID_USER_WITH_COS, { + 'objectclass': 'top mailGroup'.split(), + 'cn': 'cos_user_with_mail' + }))) + except ldap.LDAPError as e: + log.error('Failed to add cos_user_no_mail: error ' + e.message['desc']) + assert False + + try: + # This entry is not allowed to have mailAlternateAddress + topo.standalone.add_s(Entry((INVALID_USER_WITHOUT_COS, { + 'objectclass': 'top person'.split(), + 'cn': 'no_cos_user_no_mail', + 'sn': 'no_cos_user_no_mail' + }))) + except ldap.LDAPError as e: + log.error('Failed to add no_cos_user_no_mail: error ' + e.message['desc']) + assert False + + try: + # This entry is allowed to have mailAlternateAddress + topo.standalone.add_s(Entry((VALID_USER_WITHOUT_COS, { + 'objectclass': 'top mailGroup'.split(), + 'cn': 'no_cos_user_with_mail' + }))) + except ldap.LDAPError as e: + log.error('Failed to add no_cos_user_with_mail: error ' + e.message['desc']) + assert False + + try: + entries = topo.standalone.search_s(SUFFIX, ldap.SCOPE_SUBTREE, '(mailAlternateAddress=*)') + assert len(entries) == 1 + assert entries[0].hasValue('mailAlternateAddress', 'hello@world') + except ldap.LDAPError as e: + log.fatal('Unable to retrieve cos_user_with_mail (only entry with mailAlternateAddress) : error %s' % (USER1_DN, e.message['desc'])) + assert False + + assert not topo.standalone.ds_error_log.match(".*cos attribute mailAlternateAddress failed schema.*") + + if DEBUGGING: + # Add debugging steps(if any)... + pass + + +if __name__ == '__main__': + # Run isolated + # -s for DEBUG mode + CURRENT_FILE = os.path.realpath(__file__) + pytest.main("-s %s" % CURRENT_FILE) + diff --git a/dirsrvtests/tests/tickets/ticket49273_test.py b/dirsrvtests/tests/tickets/ticket49273_test.py new file mode 100644 index 0000000..e3213bd --- /dev/null +++ b/dirsrvtests/tests/tickets/ticket49273_test.py @@ -0,0 +1,52 @@ +# --- BEGIN COPYRIGHT BLOCK --- +# Copyright (C) 2017 Red Hat, Inc. +# All rights reserved. +# +# License: GPL (version 3 or any later version). +# See LICENSE for details. +# --- END COPYRIGHT BLOCK --- +# + +import pytest +import ldap + +from lib389.topologies import topology_st +# This pulls in logging I think +from lib389.utils import * +from lib389.sasl import PlainSASL +from lib389.idm.services import ServiceAccounts + +pytestmark = pytest.mark.tier2 + +log = logging.getLogger(__name__) + +def test_49273_corrupt_dbversion(topology_st): + """ + ticket 49273 was caused by a disk space full, which corrupted + the users DBVERSION files. We can't prevent this, but we can handle + the error better than "crash". + """ + + standalone = topology_st.standalone + + # Stop the instance + standalone.stop() + # Corrupt userRoot dbversion + dbvf = os.path.join(standalone.ds_paths.db_dir, 'userRoot/DBVERSION') + with open(dbvf, 'w') as f: + # This will trunc the file + f.write('') + # Start up + try: + # post_open false, means ds state is OFFLINE, which allows + # dspaths below to use defaults rather than ldap check. + standalone.start(timeout=20, post_open=False) + except: + pass + # Trigger an update of the running server state, to move it OFFLINE. + standalone.status() + + # CHeck error log? + error_lines = standalone.ds_error_log.match('.*Could not parse file.*') + assert(len(error_lines) > 0) + diff --git a/dirsrvtests/tests/tickets/ticket49287_test.py b/dirsrvtests/tests/tickets/ticket49287_test.py new file mode 100644 index 0000000..5d15ea3 --- /dev/null +++ b/dirsrvtests/tests/tickets/ticket49287_test.py @@ -0,0 +1,347 @@ +# --- BEGIN COPYRIGHT BLOCK --- +# Copyright (C) 2017 Red Hat, Inc. +# All rights reserved. +# +# License: GPL (version 3 or any later version). +# See LICENSE for details. +# --- END COPYRIGHT BLOCK --- +# +import pytest +from lib389.tasks import * +from lib389.utils import * +from lib389.properties import RA_NAME, RA_BINDDN, RA_BINDPW, RA_METHOD, RA_TRANSPORT_PROT, BACKEND_NAME +from lib389.topologies import topology_m2 +from lib389._constants import * +from lib389.replica import ReplicationManager + +pytestmark = pytest.mark.tier2 + +DEBUGGING = os.getenv('DEBUGGING', False) +GROUP_DN = ("cn=group," + DEFAULT_SUFFIX) + +if DEBUGGING: + logging.getLogger(__name__).setLevel(logging.DEBUG) +else: + logging.getLogger(__name__).setLevel(logging.INFO) +log = logging.getLogger(__name__) + + +def _add_repl_backend(s1, s2, be): + suffix = 'ou=%s,dc=test,dc=com' % be + create_backend(s1, s2, suffix, be) + add_ou(s1, suffix) + replicate_backend(s1, s2, suffix) + + +def _wait_for_sync(s1, s2, testbase, final_db): + + now = time.time() + cn1 = 'sync-%s-%d' % (now, 1) + cn2 = 'sync-%s-%d' % (now, 2) + add_user(s1, cn1, testbase, 'add on m1', sleep=False) + add_user(s2, cn2, testbase, 'add on m2', sleep=False) + dn1 = 'cn=%s,%s' % (cn1, testbase) + dn2 = 'cn=%s,%s' % (cn2, testbase) + if final_db: + final_db.append(dn1) + final_db.append(dn2) + _check_entry_exist(s2, dn1, 10, 5) + _check_entry_exist(s1, dn2, 10, 5) + + +def _check_entry_exist(supplier, dn, loops=10, wait=1): + attempt = 0 + while attempt <= loops: + try: + dn + ent = supplier.getEntry(dn, ldap.SCOPE_BASE, "(objectclass=*)") + break + except ldap.NO_SUCH_OBJECT: + attempt = attempt + 1 + time.sleep(wait) + except ldap.LDAPError as e: + log.fatal('Failed to retrieve user (%s): error %s' % (dn, e.message['desc'])) + assert False + assert attempt <= loops + + +def config_memberof(server): + + server.plugins.enable(name=PLUGIN_MEMBER_OF) + MEMBEROF_PLUGIN_DN = ('cn=' + PLUGIN_MEMBER_OF + ',cn=plugins,cn=config') + server.modify_s(MEMBEROF_PLUGIN_DN, [(ldap.MOD_REPLACE, + 'memberOfAllBackends', + b'on')]) + # Configure fractional to prevent total init to send memberof + ents = server.agreement.list(suffix=DEFAULT_SUFFIX) + log.info('update %s to add nsDS5ReplicatedAttributeListTotal' % ents[0].dn) + for ent in ents: + server.modify_s(ent.dn, + [(ldap.MOD_REPLACE, + 'nsDS5ReplicatedAttributeListTotal', + b'(objectclass=*) $ EXCLUDE '), + (ldap.MOD_REPLACE, + 'nsDS5ReplicatedAttributeList', + b'(objectclass=*) $ EXCLUDE memberOf')]) + + +def _disable_auto_oc_memberof(server): + MEMBEROF_PLUGIN_DN = ('cn=' + PLUGIN_MEMBER_OF + ',cn=plugins,cn=config') + server.modify_s(MEMBEROF_PLUGIN_DN, + [(ldap.MOD_REPLACE, 'memberOfAutoAddOC', b'nsContainer')]) + + +def _enable_auto_oc_memberof(server): + MEMBEROF_PLUGIN_DN = ('cn=' + PLUGIN_MEMBER_OF + ',cn=plugins,cn=config') + server.modify_s(MEMBEROF_PLUGIN_DN, + [(ldap.MOD_REPLACE, 'memberOfAutoAddOC', b'nsMemberOf')]) + + +def add_dc(server, dn): + server.add_s(Entry((dn, {'objectclass': ['top', 'domain']}))) + + +def add_ou(server, dn): + server.add_s(Entry((dn, {'objectclass': ['top', 'organizationalunit']}))) + + +def add_container(server, dn): + server.add_s(Entry((dn, {'objectclass': ['top', 'nscontainer']}))) + + +def add_user(server, cn, testbase, desc, sleep=True): + dn = 'cn=%s,%s' % (cn, testbase) + log.fatal('Adding user (%s): ' % dn) + server.add_s(Entry((dn, {'objectclass': ['top', 'person', 'inetuser'], + 'sn': 'user_%s' % cn, + 'description': desc}))) + if sleep: + time.sleep(2) + + +def add_person(server, cn, testbase, desc, sleep=True): + dn = 'cn=%s,%s' % (cn, testbase) + log.fatal('Adding user (%s): ' % dn) + server.add_s(Entry((dn, {'objectclass': ['top', 'person'], + 'sn': 'user_%s' % cn, + 'description': desc}))) + if sleep: + time.sleep(2) + + +def add_multi_member(server, cn, mem_id, mem_usr, testbase, sleep=True): + dn = 'cn=%s,ou=groups,%s' % (cn, testbase) + members = [] + for usr in mem_usr: + members.append('cn=a%d,ou=be_%d,%s' % (mem_id, usr, testbase)) + for mem in members: + mod = [(ldap.MOD_ADD, 'member', ensure_bytes(mem))] + try: + server.modify_s(dn, mod) + except ldap.OBJECT_CLASS_VIOLATION: + log.info('objectclass violation') + + if sleep: + time.sleep(2) + + +def add_member(server, cn, mem, testbase, sleep=True): + dn = 'cn=%s,ou=groups,%s' % (cn, testbase) + mem_dn = 'cn=%s,ou=people,%s' % (mem, testbase) + mod = [(ldap.MOD_ADD, 'member', ensure_bytes(mem_dn))] + server.modify_s(dn, mod) + if sleep: + time.sleep(2) + + +def add_group(server, testbase, nr, sleep=True): + + dn = 'cn=g%d,ou=groups,%s' % (nr, testbase) + server.add_s(Entry((dn, {'objectclass': ['top', 'groupofnames'], + 'member': [ + 'cn=m1_%d,%s' % (nr, testbase), + 'cn=m2_%d,%s' % (nr, testbase), + 'cn=m3_%d,%s' % (nr, testbase) + ], + 'description': 'group %d' % nr}))) + if sleep: + time.sleep(2) + + +def del_group(server, testbase, nr, sleep=True): + + dn = 'cn=g%d,%s' % (nr, testbase) + server.delete_s(dn) + if sleep: + time.sleep(2) + + +def mod_entry(server, cn, testbase, desc): + dn = 'cn=%s,%s' % (cn, testbase) + mod = [(ldap.MOD_ADD, 'description', ensure_bytes(desc))] + server.modify_s(dn, mod) + time.sleep(2) + + +def del_entry(server, testbase, cn): + dn = 'cn=%s,%s' % (cn, testbase) + server.delete_s(dn) + time.sleep(2) + + +def _disable_nunc_stans(server): + server.config.set('nsslapd-enable-nunc-stans', 'off') + + +def _enable_spec_logging(server): + server.config.replace_many(('nsslapd-accesslog-level', '260'), + ('nsslapd-errorlog-level', str(8192 + 65536)), + ('nsslapd-plugin-logging', 'on'), + ('nsslapd-auditlog-logging-enabled', 'on')) + + +def create_backend(s1, s2, beSuffix, beName): + s1.mappingtree.create(beSuffix, beName) + s1.backend.create(beSuffix, {BACKEND_NAME: beName}) + s2.mappingtree.create(beSuffix, beName) + s2.backend.create(beSuffix, {BACKEND_NAME: beName}) + + +def replicate_backend(s1, s2, beSuffix): + repl = ReplicationManager(beSuffix) + repl.create_first_supplier(s1) + repl.join_supplier(s1, s2) + repl.ensure_agreement(s1, s2) + repl.ensure_agreement(s2, s2) + # agreement m2_m1_agmt is not needed... :p + # + + +def check_group_mods(server1, server2, group, testbase): + # add members to group + add_multi_member(server1, group, 1, [1,2,3,4,5], testbase, sleep=False) + add_multi_member(server1, group, 2, [3,4,5], testbase, sleep=False) + add_multi_member(server1, group, 3, [0], testbase, sleep=False) + add_multi_member(server1, group, 4, [1,3,5], testbase, sleep=False) + add_multi_member(server1, group, 5, [2,0], testbase, sleep=False) + add_multi_member(server1, group, 6, [2,3,4], testbase, sleep=False) + # check that replication is working + # for main backend and some member backends + _wait_for_sync(server1, server2, testbase, None) + for i in range(6): + be = "be_%d" % i + _wait_for_sync(server1, server2, 'ou=%s,dc=test,dc=com' % be, None) + + +def check_multi_group_mods(server1, server2, group1, group2, testbase): + # add members to group + add_multi_member(server2, group1, 1, [1,2,3,4,5], testbase, sleep=False) + add_multi_member(server1, group2, 1, [1,2,3,4,5], testbase, sleep=False) + add_multi_member(server2, group1, 2, [3,4,5], testbase, sleep=False) + add_multi_member(server1, group2, 2, [3,4,5], testbase, sleep=False) + add_multi_member(server2, group1, 3, [0], testbase, sleep=False) + add_multi_member(server1, group2, 3, [0], testbase, sleep=False) + add_multi_member(server2, group1, 4, [1,3,5], testbase, sleep=False) + add_multi_member(server1, group2, 4, [1,3,5], testbase, sleep=False) + add_multi_member(server2, group1, 5, [2,0], testbase, sleep=False) + add_multi_member(server1, group2, 5, [2,0], testbase, sleep=False) + add_multi_member(server2, group1, 6, [2,3,4], testbase, sleep=False) + add_multi_member(server1, group2, 6, [2,3,4], testbase, sleep=False) + # check that replication is working + # for main backend and some member backends + _wait_for_sync(server1, server2, testbase, None) + for i in range(6): + be = "be_%d" % i + _wait_for_sync(server1, server2, 'ou=%s,dc=test,dc=com' % be, None) + + +def test_ticket49287(topology_m2): + """ + test case for memberof and conflict entries + + """ + + # return + M1 = topology_m2.ms["supplier1"] + M2 = topology_m2.ms["supplier2"] + + config_memberof(M1) + config_memberof(M2) + + _enable_spec_logging(M1) + _enable_spec_logging(M2) + + _disable_nunc_stans(M1) + _disable_nunc_stans(M2) + + M1.restart(timeout=10) + M2.restart(timeout=10) + + testbase = 'dc=test,dc=com' + bename = 'test' + create_backend(M1, M2, testbase, bename) + add_dc(M1, testbase) + add_ou(M1, 'ou=groups,%s' % testbase) + replicate_backend(M1, M2, testbase) + + peoplebase = 'ou=people,dc=test,dc=com' + peoplebe = 'people' + create_backend(M1, M2, peoplebase, peoplebe) + add_ou(M1, peoplebase) + replicate_backend(M1, M2, peoplebase) + + for i in range(10): + cn = 'a%d' % i + add_user(M1, cn, peoplebase, 'add on m1', sleep=False) + time.sleep(2) + add_group(M1, testbase, 1) + for i in range(10): + cn = 'a%d' % i + add_member(M1, 'g1', cn, testbase, sleep=False) + cn = 'b%d' % i + add_user(M1, cn, peoplebase, 'add on m1', sleep=False) + time.sleep(2) + + _wait_for_sync(M1, M2, testbase, None) + _wait_for_sync(M1, M2, peoplebase, None) + + # test group with members in multiple backends + for i in range(7): + be = "be_%d" % i + _add_repl_backend(M1, M2, be) + + # add entries akllowing meberof + for i in range(1, 7): + be = "be_%d" % i + for i in range(10): + cn = 'a%d' % i + add_user(M1, cn, 'ou=%s,dc=test,dc=com' % be, 'add on m1', sleep=False) + # add entries not allowing memberof + be = 'be_0' + for i in range(10): + cn = 'a%d' % i + add_person(M1, cn, 'ou=%s,dc=test,dc=com' % be, 'add on m1', sleep=False) + + _disable_auto_oc_memberof(M1) + _disable_auto_oc_memberof(M2) + add_group(M1, testbase, 2) + check_group_mods(M1, M2, 'g2', testbase) + + _enable_auto_oc_memberof(M1) + add_group(M1, testbase, 3) + check_group_mods(M1, M2, 'g3', testbase) + + _enable_auto_oc_memberof(M2) + add_group(M1, testbase, 4) + check_group_mods(M1, M2, 'g4', testbase) + + add_group(M1, testbase, 5) + add_group(M1, testbase, 6) + check_multi_group_mods(M1, M2, 'g5', 'g6', testbase) + + +if __name__ == '__main__': + # Run isolated + # -s for DEBUG mode + CURRENT_FILE = os.path.realpath(__file__) + pytest.main("-s %s" % CURRENT_FILE) diff --git a/dirsrvtests/tests/tickets/ticket49290_test.py b/dirsrvtests/tests/tickets/ticket49290_test.py new file mode 100644 index 0000000..fe47d18 --- /dev/null +++ b/dirsrvtests/tests/tickets/ticket49290_test.py @@ -0,0 +1,68 @@ +# --- BEGIN COPYRIGHT BLOCK --- +# Copyright (C) 2017 Red Hat, Inc. +# All rights reserved. +# +# License: GPL (version 3 or any later version). +# See LICENSE for details. +# --- END COPYRIGHT BLOCK --- +# + +import pytest +import ldap + +from lib389.topologies import topology_st +from lib389._constants import DEFAULT_SUFFIX, DEFAULT_BENAME + +from lib389.backend import Backends + +pytestmark = pytest.mark.tier2 + +def test_49290_range_unindexed_notes(topology_st): + """ + Ticket 49290 had a small collection of issues - the primary issue is + that range requests on an attribute that is unindexed was not reporting + notes=U. This asserts that: + + * When unindexed, the attr shows notes=U + * when indexed, the attr does not + """ + + # First, assert that modifyTimestamp does not have an index. If it does, + # delete it. + topology_st.standalone.config.set('nsslapd-accesslog-logbuffering', 'off') + backends = Backends(topology_st.standalone) + backend = backends.get(DEFAULT_BENAME) + indexes = backend.get_indexes() + + for i in indexes.list(): + i_cn = i.get_attr_val_utf8('cn') + if i_cn.lower() == 'modifytimestamp': + i.delete() + topology_st.standalone.restart() + + # Now restart the server, and perform a modifyTimestamp range operation. + # in access, we should see notes=U (or notes=A) + results = topology_st.standalone.search_s(DEFAULT_SUFFIX, ldap.SCOPE_SUBTREE, '(modifyTimestamp>=0)', ['nsUniqueId',]) + access_lines_unindexed = topology_st.standalone.ds_access_log.match('.*notes=U.*') + assert len(access_lines_unindexed) == 1 + + # Now add the modifyTimestamp index and run db2index. This will restart + # the server + indexes.create(properties={ + 'cn': 'modifytimestamp', + 'nsSystemIndex': 'false', + 'nsIndexType' : 'eq', + }) + topology_st.standalone.stop() + assert topology_st.standalone.db2index(DEFAULT_BENAME, attrs=['modifytimestamp'] ) + topology_st.standalone.start() + + # Now run the modifyTimestamp range query again. Assert that there is no + # notes=U/A in the log + results = topology_st.standalone.search_s(DEFAULT_SUFFIX, ldap.SCOPE_SUBTREE, '(modifyTimestamp>=0)', ['nsUniqueId',]) + access_lines_indexed = topology_st.standalone.ds_access_log.match('.*notes=U.*') + # Remove the old lines too. + access_lines_final = set(access_lines_unindexed) - set(access_lines_indexed) + # Make sure we have no unindexed notes in the log. + assert len(access_lines_final) == 0 + diff --git a/dirsrvtests/tests/tickets/ticket49303_test.py b/dirsrvtests/tests/tickets/ticket49303_test.py new file mode 100644 index 0000000..2ee7eb5 --- /dev/null +++ b/dirsrvtests/tests/tickets/ticket49303_test.py @@ -0,0 +1,113 @@ +# --- BEGIN COPYRIGHT BLOCK --- +# Copyright (C) 2017 Red Hat, Inc. +# All rights reserved. +# +# License: GPL (version 3 or any later version). +# See LICENSE for details. +# --- END COPYRIGHT BLOCK --- +# +import time +import logging +import os +import subprocess +import pytest +from lib389.topologies import topology_st as topo +from lib389.nss_ssl import NssSsl + +from lib389._constants import SECUREPORT_STANDALONE1, HOST_STANDALONE1 + +pytestmark = pytest.mark.tier2 + +DEBUGGING = os.getenv("DEBUGGING", default=False) +if DEBUGGING: + logging.getLogger(__name__).setLevel(logging.DEBUG) +else: + logging.getLogger(__name__).setLevel(logging.INFO) +log = logging.getLogger(__name__) + + +def try_reneg(host, port): + """ + Connect to the specified host and port with openssl, and attempt to + initiate a renegotiation. Returns true if successful, false if not. + """ + + cmd = [ + '/usr/bin/openssl', + 's_client', + '-connect', + '%s:%d' % (host, port), + ] + + try: + proc = subprocess.Popen(cmd, stdout=subprocess.PIPE, + stdin=subprocess.PIPE, + stderr=subprocess.PIPE) + except ValueError as e: + log.info("openssl failed: %s", e) + proc.kill() + + # This 'R' command is intercepted by openssl and triggers a renegotiation + proc.communicate(b'R\n') + + # We rely on openssl returning 0 if no errors occured, and 1 if any did + # (for example, the server rejecting renegotiation and terminating the + # connection) + return proc.returncode == 0 + + +def enable_ssl(server, ldapsport): + server.stop() + nss_ssl = NssSsl(dbpath=server.get_cert_dir()) + nss_ssl.reinit() + nss_ssl.create_rsa_ca() + nss_ssl.create_rsa_key_and_cert() + server.start() + server.config.set('nsslapd-secureport', '%s' % ldapsport) + server.config.set('nsslapd-security', 'on') + server.sslport = SECUREPORT_STANDALONE1 + server.restart() + + +def set_reneg(server, state): + server.encryption.set('nsTLSAllowClientRenegotiation', state) + time.sleep(1) + server.restart() + + +def test_ticket49303(topo): + """ + Test the nsTLSAllowClientRenegotiation setting. + """ + sslport = SECUREPORT_STANDALONE1 + + log.info("Ticket 49303 - Allow disabling of SSL renegotiation") + + # No value set, defaults to reneg allowed + enable_ssl(topo.standalone, sslport) + assert try_reneg(HOST_STANDALONE1, sslport) is True + log.info("Renegotiation allowed by default - OK") + + # Turn reneg off + set_reneg(topo.standalone, 'off') + assert try_reneg(HOST_STANDALONE1, sslport) is False + log.info("Renegotiation disallowed - OK") + + # Explicitly enable + set_reneg(topo.standalone, 'on') + assert try_reneg(HOST_STANDALONE1, sslport) is True + log.info("Renegotiation explicitly allowed - OK") + + # Set to an invalid value, defaults to allowed + set_reneg(topo.standalone, 'invalid') + assert try_reneg(HOST_STANDALONE1, sslport) is True + log.info("Renegotiation allowed when option is invalid - OK") + + log.info("Ticket 49303 - PASSED") + + +if __name__ == '__main__': + # Run isolated + # -s for DEBUG mode + CURRENT_FILE = os.path.realpath(__file__) + pytest.main("-s %s" % CURRENT_FILE) diff --git a/dirsrvtests/tests/tickets/ticket49386_test.py b/dirsrvtests/tests/tickets/ticket49386_test.py new file mode 100644 index 0000000..c6a59ea --- /dev/null +++ b/dirsrvtests/tests/tickets/ticket49386_test.py @@ -0,0 +1,159 @@ +# --- BEGIN COPYRIGHT BLOCK --- +# Copyright (C) 2022 Red Hat, Inc. +# All rights reserved. +# +# License: GPL (version 3 or any later version). +# See LICENSE for details. +# --- END COPYRIGHT BLOCK --- +# +import logging +import pytest +import os +import ldap +import time +from lib389.utils import * +from lib389.topologies import topology_st as topo +from lib389._constants import * +from lib389.config import Config +from lib389 import Entry + +pytestmark = [pytest.mark.tier2, + pytest.mark.skipif(ds_is_older('1.3.7'), reason="Not implemented")] + +USER_CN='user_' +GROUP_CN='group_' + +DEBUGGING = os.getenv("DEBUGGING", default=False) +if DEBUGGING: + logging.getLogger(__name__).setLevel(logging.DEBUG) +else: + logging.getLogger(__name__).setLevel(logging.INFO) +log = logging.getLogger(__name__) + +def add_user(server, no, desc='dummy', sleep=True): + cn = '%s%d' % (USER_CN, no) + dn = 'cn=%s,ou=people,%s' % (cn, SUFFIX) + log.fatal('Adding user (%s): ' % dn) + server.add_s(Entry((dn, {'objectclass': ['top', 'person', 'inetuser'], + 'sn': ['_%s' % cn], + 'description': [desc]}))) + if sleep: + time.sleep(2) + +def add_group(server, nr, sleep=True): + cn = '%s%d' % (GROUP_CN, nr) + dn = 'cn=%s,ou=groups,%s' % (cn, SUFFIX) + server.add_s(Entry((dn, {'objectclass': ['top', 'groupofnames'], + 'description': 'group %d' % nr}))) + if sleep: + time.sleep(2) + +def update_member(server, member_dn, group_dn, op, sleep=True): + mod = [(op, 'member', ensure_bytes(member_dn))] + server.modify_s(group_dn, mod) + if sleep: + time.sleep(2) + +def config_memberof(server): + + server.plugins.enable(name=PLUGIN_MEMBER_OF) + MEMBEROF_PLUGIN_DN = ('cn=' + PLUGIN_MEMBER_OF + ',cn=plugins,cn=config') + server.modify_s(MEMBEROF_PLUGIN_DN, [(ldap.MOD_REPLACE, + 'memberOfAllBackends', + b'on'), + (ldap.MOD_REPLACE, 'memberOfAutoAddOC', b'nsMemberOf')]) + + +def _find_memberof(server, member_dn, group_dn, find_result=True): + ent = server.getEntry(member_dn, ldap.SCOPE_BASE, "(objectclass=*)", ['memberof']) + found = False + if ent.hasAttr('memberof'): + + for val in ent.getValues('memberof'): + server.log.info("!!!!!!! %s: memberof->%s" % (member_dn, val)) + server.log.info("!!!!!!! %s" % (val)) + server.log.info("!!!!!!! %s" % (group_dn)) + if val.lower() == ensure_bytes(group_dn.lower()): + found = True + break + + if find_result: + assert (found) + else: + assert (not found) + +def test_ticket49386(topo): + """Specify a test case purpose or name here + + :id: ceb1e2b7-42cb-49f9-8ddd-bc752aa4a589 + :setup: Fill in set up configuration here + :steps: + 1. Configure memberof + 2. Add users (user_1) + 3. Add groups (group_1) + 4. Make user_1 member of group_1 + 5. Check that user_1 has the memberof attribute to group_1 + 6. Enable plugin log to capture memberof modrdn callback notification + 7. Rename group_1 in itself + 8. Check that the operation was skipped by memberof + + :expectedresults: + 1. memberof modrdn callbackk to log notfication that the update is skipped + """ + + S1 = topo.standalone + + # Step 1 + config_memberof(S1) + S1.restart() + + # Step 2 + for i in range(10): + add_user(S1, i, desc='add on S1') + + # Step 3 + for i in range(3): + add_group(S1, i) + + # Step 4 + member_dn = 'cn=%s%d,ou=people,%s' % (USER_CN, 1, SUFFIX) + group_parent_dn = 'ou=groups,%s' % (SUFFIX) + group_rdn = 'cn=%s%d' % (GROUP_CN, 1) + group_dn = '%s,%s' % (group_rdn, group_parent_dn) + update_member(S1, member_dn, group_dn, ldap.MOD_ADD, sleep=False) + + # Step 5 + _find_memberof(S1, member_dn, group_dn, find_result=True) + + # Step 6 + S1.config.loglevel(vals=[LOG_PLUGIN, LOG_DEFAULT], service='error') + + # Step 7 + S1.rename_s(group_dn, group_rdn, newsuperior=group_parent_dn, delold=0) + + # Step 8 + time.sleep(2) # should not be useful.. + found = False + for i in S1.ds_error_log.match('.*Skip modrdn operation because src/dst identical.*'): + log.info('memberof log found: %s' % i) + found = True + assert(found) + + # If you need any test suite initialization, + # please, write additional fixture for that (including finalizer). + # Topology for suites are predefined in lib389/topologies.py. + + # If you need host, port or any other data about instance, + # Please, use the instance object attributes for that (for example, topo.ms["supplier1"].serverid) + + if DEBUGGING: + # Add debugging steps(if any)... + pass + + +if __name__ == '__main__': + # Run isolated + # -s for DEBUG mode + CURRENT_FILE = os.path.realpath(__file__) + pytest.main("-s %s" % CURRENT_FILE) + diff --git a/dirsrvtests/tests/tickets/ticket49412_test.py b/dirsrvtests/tests/tickets/ticket49412_test.py new file mode 100644 index 0000000..895e02c --- /dev/null +++ b/dirsrvtests/tests/tickets/ticket49412_test.py @@ -0,0 +1,75 @@ +# --- BEGIN COPYRIGHT BLOCK --- +# Copyright (C) 2022 Red Hat, Inc. +# All rights reserved. +# +# License: GPL (version 3 or any later version). +# See LICENSE for details. +# --- END COPYRIGHT BLOCK --- +# +import logging +import pytest +import os +import ldap +import time +from lib389._constants import * +from lib389.topologies import topology_m1c1 as topo +from lib389._constants import * +from lib389 import Entry + +pytestmark = pytest.mark.tier2 + +DEBUGGING = os.getenv("DEBUGGING", default=False) +if DEBUGGING: + logging.getLogger(__name__).setLevel(logging.DEBUG) +else: + logging.getLogger(__name__).setLevel(logging.INFO) +log = logging.getLogger(__name__) +CHANGELOG = 'cn=changelog5,cn=config' +MAXAGE_ATTR = 'nsslapd-changelogmaxage' +TRIMINTERVAL = 'nsslapd-changelogtrim-interval' + + + +def test_ticket49412(topo): + """Specify a test case purpose or name here + + :id: 4c7681ff-0511-4256-9589-bdcad84c13e6 + :setup: Fill in set up configuration here + :steps: + 1. Fill in test case steps here + 2. And indent them like this (RST format requirement) + :expectedresults: + 1. Fill in the result that is expected + 2. For each test step + """ + + M1 = topo.ms["supplier1"] + + # wrong call with invalid value (should be str(60) + # that create replace with NULL value + # it should fail with UNWILLING_TO_PERFORM + try: + M1.modify_s(CHANGELOG, [(ldap.MOD_REPLACE, MAXAGE_ATTR, 60), + (ldap.MOD_REPLACE, TRIMINTERVAL, 10)]) + assert(False) + except ldap.UNWILLING_TO_PERFORM: + pass + + # If you need any test suite initialization, + # please, write additional fixture for that (including finalizer). + # Topology for suites are predefined in lib389/topologies.py. + + # If you need host, port or any other data about instance, + # Please, use the instance object attributes for that (for example, topo.ms["supplier1"].serverid) + + if DEBUGGING: + # Add debugging steps(if any)... + pass + + +if __name__ == '__main__': + # Run isolated + # -s for DEBUG mode + CURRENT_FILE = os.path.realpath(__file__) + pytest.main("-s %s" % CURRENT_FILE) + diff --git a/dirsrvtests/tests/tickets/ticket49441_test.py b/dirsrvtests/tests/tickets/ticket49441_test.py new file mode 100644 index 0000000..39cf881 --- /dev/null +++ b/dirsrvtests/tests/tickets/ticket49441_test.py @@ -0,0 +1,84 @@ +# --- BEGIN COPYRIGHT BLOCK --- +# Copyright (C) 2022 Red Hat, Inc. +# All rights reserved. +# +# License: GPL (version 3 or any later version). +# See LICENSE for details. +# --- END COPYRIGHT BLOCK --- +# +import logging +import pytest +import os +import ldap +from lib389._constants import * +from lib389.topologies import topology_st as topo +from lib389.utils import * + +pytestmark = pytest.mark.tier2 + +DEBUGGING = os.getenv("DEBUGGING", default=False) +if DEBUGGING: + logging.getLogger(__name__).setLevel(logging.DEBUG) +else: + logging.getLogger(__name__).setLevel(logging.INFO) +log = logging.getLogger(__name__) + + +def test_ticket49441(topo): + """Import ldif with large indexed binary attributes, the server should not + crash + + :id: 4e5df145-cbd1-4955-8f77-6a7eaa14beba + :setup: standalone topology + :steps: + 1. Add indexes for binary attributes + 2. Perform online import + 3. Verify server is still running + :expectedresults: + 1. Indexes are successfully added + 2. Import succeeds + 3. Server is still running + """ + + log.info('Position ldif files, and add indexes...') + ldif_dir = topo.standalone.get_ldif_dir() + "binary.ldif" + ldif_file = (topo.standalone.getDir(__file__, DATA_DIR) + + "ticket49441/binary.ldif") + shutil.copyfile(ldif_file, ldif_dir) + args = {INDEX_TYPE: ['eq', 'pres']} + for attr in ('usercertificate', 'authorityrevocationlist', + 'certificaterevocationlist', 'crosscertificatepair', + 'cacertificate'): + try: + topo.standalone.index.create(suffix=DEFAULT_SUFFIX, + be_name='userroot', + attr=attr, args=args) + except ldap.LDAPError as e: + log.fatal("Failed to add index '{}' error: {}".format(attr, str(e))) + raise e + + log.info('Import LDIF with large indexed binary attributes...') + try: + topo.standalone.tasks.importLDIF(suffix=DEFAULT_SUFFIX, + input_file=ldif_dir, + args={TASK_WAIT: True}) + except: + log.fatal('Import failed!') + assert False + + log.info('Verify server is still running...') + try: + topo.standalone.search_s("", ldap.SCOPE_BASE, "objectclass=*") + except ldap.LDAPError as e: + log.fatal('Server is not alive: ' + str(e)) + assert False + + log.info('Test PASSED') + + +if __name__ == '__main__': + # Run isolated + # -s for DEBUG mode + CURRENT_FILE = os.path.realpath(__file__) + pytest.main("-s %s" % CURRENT_FILE) + diff --git a/dirsrvtests/tests/tickets/ticket49460_test.py b/dirsrvtests/tests/tickets/ticket49460_test.py new file mode 100644 index 0000000..224969f --- /dev/null +++ b/dirsrvtests/tests/tickets/ticket49460_test.py @@ -0,0 +1,125 @@ +# --- BEGIN COPYRIGHT BLOCK --- +# Copyright (C) 2022 Red Hat, Inc. +# All rights reserved. +# +# License: GPL (version 3 or any later version). +# See LICENSE for details. +# --- END COPYRIGHT BLOCK --- +# +import time +import ldap +import logging +import pytest +import os +import re +from lib389._constants import * +from lib389.config import Config +from lib389 import DirSrv, Entry +from lib389.topologies import topology_m3 as topo + +pytestmark = pytest.mark.tier2 + +DEBUGGING = os.getenv("DEBUGGING", default=False) +if DEBUGGING: + logging.getLogger(__name__).setLevel(logging.DEBUG) +else: + logging.getLogger(__name__).setLevel(logging.INFO) +log = logging.getLogger(__name__) + +USER_CN="user" + +def add_user(server, no, desc='dummy', sleep=True): + cn = '%s%d' % (USER_CN, no) + dn = 'cn=%s,ou=people,%s' % (cn, SUFFIX) + log.fatal('Adding user (%s): ' % dn) + server.add_s(Entry((dn, {'objectclass': ['top', 'person', 'inetuser', 'userSecurityInformation'], + 'sn': ['_%s' % cn], + 'description': [desc]}))) + time.sleep(1) + +def check_user(server, no, timeout=10): + + cn = '%s%d' % (USER_CN, no) + dn = 'cn=%s,ou=people,%s' % (cn, SUFFIX) + found = False + cpt = 0 + while cpt < timeout: + try: + server.getEntry(dn, ldap.SCOPE_BASE, "(objectclass=*)") + found = True + break + except ldap.NO_SUCH_OBJECT: + time.sleep(1) + cpt += 1 + return found + +def pattern_errorlog(server, log_pattern): + file_obj = open(server.errlog, "r") + + found = None + # Use a while true iteration because 'for line in file: hit a + while True: + line = file_obj.readline() + found = log_pattern.search(line) + if ((line == '') or (found)): + break + + return found + +def test_ticket_49460(topo): + """Specify a test case purpose or name here + + :id: d1aa2e8b-e6ab-4fc6-9c63-c6f622544f2d + :setup: Fill in set up configuration here + :steps: + 1. Enable replication logging + 2. Do few updates to generatat RUV update + :expectedresults: + 1. No report of failure when the RUV is updated + """ + + M1 = topo.ms["supplier1"] + M2 = topo.ms["supplier2"] + M3 = topo.ms["supplier3"] + + for i in (M1, M2, M3): + i.config.loglevel(vals=[256 + 4], service='access') + i.config.loglevel(vals=[LOG_REPLICA, LOG_DEFAULT], service='error') + + add_user(M1, 11, desc="add to M1") + add_user(M2, 21, desc="add to M2") + add_user(M3, 31, desc="add to M3") + + for i in (M1, M2, M3): + assert check_user(i, 11) + assert check_user(i, 21) + assert check_user(i, 31) + + time.sleep(10) + + #M1.tasks.cleanAllRUV(suffix=SUFFIX, replicaid='3', + # force=False, args={TASK_WAIT: True}) + #time.sleep(10) + regex = re.compile(".*Failed to update RUV tombstone.*LDAP error - 0") + assert not pattern_errorlog(M1, regex) + assert not pattern_errorlog(M2, regex) + assert not pattern_errorlog(M3, regex) + + # If you need any test suite initialization, + # please, write additional fixture for that (including finalizer). + # Topology for suites are predefined in lib389/topologies.py. + + # If you need host, port or any other data about instance, + # Please, use the instance object attributes for that (for example, topo.ms["supplier1"].serverid) + + if DEBUGGING: + # Add debugging steps(if any)... + pass + + +if __name__ == '__main__': + # Run isolated + # -s for DEBUG mode + CURRENT_FILE = os.path.realpath(__file__) + pytest.main("-s %s" % CURRENT_FILE) + diff --git a/dirsrvtests/tests/tickets/ticket49463_test.py b/dirsrvtests/tests/tickets/ticket49463_test.py new file mode 100644 index 0000000..d924ec4 --- /dev/null +++ b/dirsrvtests/tests/tickets/ticket49463_test.py @@ -0,0 +1,236 @@ +# --- BEGIN COPYRIGHT BLOCK --- +# Copyright (C) 2022 Red Hat, Inc. +# All rights reserved. +# +# License: GPL (version 3 or any later version). +# See LICENSE for details. +# --- END COPYRIGHT BLOCK --- +# +import time +import ldap +import logging +import pytest +import os +import re +from lib389._constants import DEFAULT_SUFFIX, SUFFIX, LOG_REPLICA, LOG_DEFAULT +from lib389.config import Config +from lib389 import DirSrv, Entry +from lib389.topologies import topology_m4 as topo +from lib389.replica import Replicas, ReplicationManager +from lib389.idm.user import UserAccounts, UserAccount +from lib389.tasks import * +from lib389.utils import * + +pytestmark = pytest.mark.tier2 + +USER_CN = "test_user" + + +def add_user(server, no, desc='dummy'): + user = UserAccounts(server, DEFAULT_SUFFIX) + users = user.create_test_user(uid=no) + users.add('description', [desc]) + users.add('objectclass', 'userSecurityInformation') + + +def pattern_errorlog(server, log_pattern): + for i in range(10): + time.sleep(5) + found = server.ds_error_log.match(log_pattern) + if found == '' or found: + return found + break + + +def fractional_server_to_replica(server, replica): + repl = ReplicationManager(DEFAULT_SUFFIX) + repl.ensure_agreement(server, replica) + replica_server = Replicas(server).get(DEFAULT_SUFFIX) + agmt_server = replica_server.get_agreements().list()[0] + agmt_server.replace_many( + ('nsDS5ReplicatedAttributeListTotal', '(objectclass=*) $ EXCLUDE telephoneNumber'), + ('nsDS5ReplicatedAttributeList', '(objectclass=*) $ EXCLUDE telephoneNumber'), + ('nsds5ReplicaStripAttrs', 'modifiersname modifytimestamp'), + ) + + +def count_pattern_accesslog(server, log_pattern): + count = 0 + server.config.set('nsslapd-accesslog-logbuffering', 'off') + if server.ds_access_log.match(log_pattern): + count = count + 1 + + return count + + +def test_ticket_49463(topo): + """Specify a test case purpose or name here + + :id: 2a68e8be-387d-4ac7-9452-1439e8483c13 + :setup: Fill in set up configuration here + :steps: + 1. Enable fractional replication + 2. Enable replication logging + 3. Check that replication is working fine + 4. Generate skipped updates to create keep alive entries + 5. Remove M3 from the topology + 6. issue cleanAllRuv FORCE that will run on M1 then propagated M2 and M4 + 7. Check that Number DEL keep alive '3' is <= 1 + 8. Check M1 is the originator of cleanAllRuv and M2/M4 the propagated ones + 9. Check replication M1,M2 and M4 can recover + 10. Remove M4 from the topology + 11. Issue cleanAllRuv not force while M2 is stopped (that hangs the cleanAllRuv) + 12. Check that nsds5ReplicaCleanRUV is correctly encoded on M1 (last value: 1) + 13. Check that nsds5ReplicaCleanRUV encoding survives M1 restart + 14. Check that nsds5ReplicaCleanRUV encoding is valid on M2 (last value: 0) + 15. Check that (for M4 cleanAllRUV) M1 is Originator and M2 propagation + :expectedresults: + 1. No report of failure when the RUV is updated + """ + + # Step 1 - Configure fractional (skip telephonenumber) replication + M1 = topo.ms["supplier1"] + M2 = topo.ms["supplier2"] + M3 = topo.ms["supplier3"] + M4 = topo.ms["supplier4"] + repl = ReplicationManager(DEFAULT_SUFFIX) + fractional_server_to_replica(M1, M2) + fractional_server_to_replica(M1, M3) + fractional_server_to_replica(M1, M4) + + fractional_server_to_replica(M2, M1) + fractional_server_to_replica(M2, M3) + fractional_server_to_replica(M2, M4) + + fractional_server_to_replica(M3, M1) + fractional_server_to_replica(M3, M2) + fractional_server_to_replica(M3, M4) + + fractional_server_to_replica(M4, M1) + fractional_server_to_replica(M4, M2) + fractional_server_to_replica(M4, M3) + + # Step 2 - enable internal op logging and replication debug + for i in (M1, M2, M3, M4): + i.config.loglevel(vals=[256 + 4], service='access') + i.config.loglevel(vals=[LOG_REPLICA, LOG_DEFAULT], service='error') + + # Step 3 - Check that replication is working fine + add_user(M1, 11, desc="add to M1") + add_user(M2, 21, desc="add to M2") + add_user(M3, 31, desc="add to M3") + add_user(M4, 41, desc="add to M4") + + for i in (M1, M2, M3, M4): + for j in (M1, M2, M3, M4): + if i == j: + continue + repl.wait_for_replication(i, j) + + # Step 4 - Generate skipped updates to create keep alive entries + for i in (M1, M2, M3, M4): + cn = '%s_%d' % (USER_CN, 11) + dn = 'uid=%s,ou=People,%s' % (cn, SUFFIX) + users = UserAccount(i, dn) + for j in range(110): + users.set('telephoneNumber', str(j)) + + # Step 5 - Remove M3 from the topology + M3.stop() + M1.agreement.delete(suffix=SUFFIX, consumer_host=M3.host, consumer_port=M3.port) + M2.agreement.delete(suffix=SUFFIX, consumer_host=M3.host, consumer_port=M3.port) + M4.agreement.delete(suffix=SUFFIX, consumer_host=M3.host, consumer_port=M3.port) + + # Step 6 - Then issue cleanAllRuv FORCE that will run on M1, M2 and M4 + M1.tasks.cleanAllRUV(suffix=SUFFIX, replicaid='3', + force=True, args={TASK_WAIT: True}) + + # Step 7 - Count the number of received DEL of the keep alive 3 + for i in (M1, M2, M4): + i.restart() + regex = re.compile(".*DEL dn=.cn=repl keep alive 3.*") + for i in (M1, M2, M4): + count = count_pattern_accesslog(M1, regex) + log.debug("count on %s = %d" % (i, count)) + + # check that DEL is replicated once (If DEL is kept in the fix) + # check that DEL is is not replicated (If DEL is finally no long done in the fix) + assert ((count == 1) or (count == 0)) + + # Step 8 - Check that M1 is Originator of cleanAllRuv and M2, M4 propagation + regex = re.compile(".*Original task deletes Keep alive entry .3.*") + assert pattern_errorlog(M1, regex) + + regex = re.compile(".*Propagated task does not delete Keep alive entry .3.*") + assert pattern_errorlog(M2, regex) + assert pattern_errorlog(M4, regex) + + # Step 9 - Check replication M1,M2 and M4 can recover + add_user(M1, 12, desc="add to M1") + add_user(M2, 22, desc="add to M2") + for i in (M1, M2, M4): + for j in (M1, M2, M4): + if i == j: + continue + repl.wait_for_replication(i, j) + + # Step 10 - Remove M4 from the topology + M4.stop() + M1.agreement.delete(suffix=SUFFIX, consumer_host=M4.host, consumer_port=M4.port) + M2.agreement.delete(suffix=SUFFIX, consumer_host=M4.host, consumer_port=M4.port) + + # Step 11 - Issue cleanAllRuv not force while M2 is stopped (that hangs the cleanAllRuv) + M2.stop() + M1.tasks.cleanAllRUV(suffix=SUFFIX, replicaid='4', + force=False, args={TASK_WAIT: False}) + + # Step 12 + # CleanAllRuv is hanging waiting for M2 to restart + # Check that nsds5ReplicaCleanRUV is correctly encoded on M1 + replicas = Replicas(M1) + replica = replicas.list()[0] + time.sleep(0.5) + replica.present('nsds5ReplicaCleanRUV') + log.info("M1: nsds5ReplicaCleanRUV=%s" % replica.get_attr_val_utf8('nsds5replicacleanruv')) + regex = re.compile("^4:.*:no:1$") + assert regex.match(replica.get_attr_val_utf8('nsds5replicacleanruv')) + + # Step 13 + # Check that it encoding survives restart + M1.restart() + assert replica.present('nsds5ReplicaCleanRUV') + assert regex.match(replica.get_attr_val_utf8('nsds5replicacleanruv')) + + # Step 14 - Check that nsds5ReplicaCleanRUV encoding is valid on M2 + M1.stop() + M2.start() + replicas = Replicas(M2) + replica = replicas.list()[0] + M1.start() + time.sleep(0.5) + if replica.present('nsds5ReplicaCleanRUV'): + log.info("M2: nsds5ReplicaCleanRUV=%s" % replica.get_attr_val_utf8('nsds5replicacleanruv')) + regex = re.compile("^4:.*:no:0$") + assert regex.match(replica.get_attr_val_utf8('nsds5replicacleanruv')) + + # time to run cleanAllRuv + for i in (M1, M2): + for j in (M1, M2): + if i == j: + continue + repl.wait_for_replication(i, j) + + # Step 15 - Check that M1 is Originator of cleanAllRuv and M2 propagation + regex = re.compile(".*Original task deletes Keep alive entry .4.*") + assert pattern_errorlog(M1, regex) + + regex = re.compile(".*Propagated task does not delete Keep alive entry .4.*") + assert pattern_errorlog(M2, regex) + + +if __name__ == '__main__': + # Run isolated + # -s for DEBUG mode + CURRENT_FILE = os.path.realpath(__file__) + pytest.main("-s %s" % CURRENT_FILE) + diff --git a/dirsrvtests/tests/tickets/ticket49471_test.py b/dirsrvtests/tests/tickets/ticket49471_test.py new file mode 100644 index 0000000..058a741 --- /dev/null +++ b/dirsrvtests/tests/tickets/ticket49471_test.py @@ -0,0 +1,89 @@ +# --- BEGIN COPYRIGHT BLOCK --- +# Copyright (C) 2022 Red Hat, Inc. +# All rights reserved. +# +# License: GPL (version 3 or any later version). +# See LICENSE for details. +# --- END COPYRIGHT BLOCK --- +# +import logging +import pytest +import os +import time +import ldap +from lib389._constants import * +from lib389.topologies import topology_st as topo +from lib389 import Entry + +pytestmark = pytest.mark.tier2 + +DEBUGGING = os.getenv("DEBUGGING", default=False) +if DEBUGGING: + logging.getLogger(__name__).setLevel(logging.DEBUG) +else: + logging.getLogger(__name__).setLevel(logging.INFO) +log = logging.getLogger(__name__) + + +USER_CN='user_' +def _user_get_dn(no): + cn = '%s%d' % (USER_CN, no) + dn = 'cn=%s,ou=people,%s' % (cn, SUFFIX) + return (cn, dn) + +def add_user(server, no, desc='dummy', sleep=True): + (cn, dn) = _user_get_dn(no) + log.fatal('Adding user (%s): ' % dn) + server.add_s(Entry((dn, {'objectclass': ['top', 'person', 'inetuser', 'userSecurityInformation'], + 'cn': [cn], + 'description': [desc], + 'sn': [cn], + 'description': ['add on that host']}))) + if sleep: + time.sleep(2) + +def test_ticket49471(topo): + """Specify a test case purpose or name here + + :id: 457ab172-9455-4eb2-89a0-150e3de5993f + :setup: Fill in set up configuration here + :steps: + 1. Fill in test case steps here + 2. And indent them like this (RST format requirement) + :expectedresults: + 1. Fill in the result that is expected + 2. For each test step + """ + + # If you need any test suite initialization, + # please, write additional fixture for that (including finalizer). + # Topology for suites are predefined in lib389/topologies.py. + + # If you need host, port or any other data about instance, + # Please, use the instance object attributes for that (for example, topo.ms["supplier1"].serverid) + + S1 = topo.standalone + add_user(S1, 1) + + Filter = "(description:2.16.840.1.113730.3.3.2.1.1.6:=\*on\*)" + ents = S1.search_s(SUFFIX, ldap.SCOPE_SUBTREE, Filter) + assert len(ents) == 1 + + # + # The following is for the test 49491 + # skipped here else it crashes in ASAN + #Filter = "(description:2.16.840.1.113730.3.3.2.1.1.6:=\*host)" + #ents = S1.search_s(SUFFIX, ldap.SCOPE_SUBTREE, Filter) + #assert len(ents) == 1 + + if DEBUGGING: + # Add debugging steps(if any)... + pass + + +if __name__ == '__main__': + # Run isolated + # -s for DEBUG mode + CURRENT_FILE = os.path.realpath(__file__) + pytest.main("-s %s" % CURRENT_FILE) + diff --git a/dirsrvtests/tests/tickets/ticket49540_test.py b/dirsrvtests/tests/tickets/ticket49540_test.py new file mode 100644 index 0000000..5711eee --- /dev/null +++ b/dirsrvtests/tests/tickets/ticket49540_test.py @@ -0,0 +1,143 @@ +# --- BEGIN COPYRIGHT BLOCK --- +# Copyright (C) 2022 Red Hat, Inc. +# All rights reserved. +# +# License: GPL (version 3 or any later version). +# See LICENSE for details. +# --- END COPYRIGHT BLOCK --- +# +import logging +import pytest +import os +import ldap +import time +import re +from lib389._constants import * +from lib389.tasks import * +from lib389.topologies import topology_st as topo +from lib389.idm.user import UserAccount, UserAccounts, TEST_USER_PROPERTIES +from lib389 import Entry + +pytestmark = pytest.mark.tier2 + +DEBUGGING = os.getenv("DEBUGGING", default=False) +if DEBUGGING: + logging.getLogger(__name__).setLevel(logging.DEBUG) +else: + logging.getLogger(__name__).setLevel(logging.INFO) +log = logging.getLogger(__name__) + +HOMEDIRECTORY_INDEX = 'cn=homeDirectory,cn=index,cn=userRoot,cn=ldbm database,cn=plugins,cn=config' +HOMEDIRECTORY_CN = "homedirectory" +MATCHINGRULE = 'nsMatchingRule' +USER_CN = 'user_' + +def create_index_entry(topo): + log.info("\n\nindex homeDirectory") + try: + ent = topo.getEntry(HOMEDIRECTORY_INDEX, ldap.SCOPE_BASE) + except ldap.NO_SUCH_OBJECT: + topo.add_s(Entry((HOMEDIRECTORY_INDEX, { + 'objectclass': "top nsIndex".split(), + 'cn': HOMEDIRECTORY_CN, + 'nsSystemIndex': 'false', + MATCHINGRULE: ['caseIgnoreIA5Match', 'caseExactIA5Match' ], + 'nsIndexType': ['eq', 'sub', 'pres']}))) + + +def provision_users(topo): + test_users = [] + homeValue = b'x' * (32 * 1024) # just to slow down indexing + for i in range(100): + CN = '%s%d' % (USER_CN, i) + users = UserAccounts(topo, SUFFIX) + user_props = TEST_USER_PROPERTIES.copy() + user_props.update({'uid': CN, 'cn': CN, 'sn': '_%s' % CN, HOMEDIRECTORY_CN: homeValue}) + testuser = users.create(properties=user_props) + test_users.append(testuser) + return test_users + +def start_start_status(server): + args = {TASK_WAIT: False} + indexTask = Tasks(server) + indexTask.reindex(suffix=SUFFIX, attrname='homeDirectory', args=args) + return indexTask + +def check_task_status(server, indexTask, test_entry): + finish_pattern = re.compile(".*Finished indexing.*") + mod = [(ldap.MOD_REPLACE, 'sn', b'foo')] + for i in range(10): + log.info("check_task_status =========> %d th loop" % i) + try: + ent = server.getEntry(indexTask.dn, ldap.SCOPE_BASE) + if ent.hasAttr('nsTaskStatus'): + value = str(ent.getValue('nsTaskStatus')) + finish = finish_pattern.search(value) + log.info("%s ---> %s" % (indexTask.dn, value)) + else: + finish = None + log.info("%s ---> NO STATUS" % (indexTask.dn)) + + if not finish: + # This is not yet finished try an update + try: + server.modify_s(test_entry, mod) + + # weird, may be indexing just complete + ent = server.getEntry(indexTask.dn, ldap.SCOPE_BASE, ['nsTaskStatus']) + assert (ent.hasAttr('nsTaskStatus') and regex.search(ent.getValue('nsTaskStatus'))) + log.info("Okay, it just finished so the MOD was successful") + except ldap.UNWILLING_TO_PERFORM: + log.info("=========> Great it was expected in the middle of index") + else: + # The update should be successful + server.modify_s(test_entry, mod) + + except ldap.NO_SUCH_OBJECT: + log.info("%s: no found" % (indexTask.dn)) + + time.sleep(1) + +def test_ticket49540(topo): + """Specify a test case purpose or name here + + :id: 1df16d5a-1b92-46b7-8435-876b87545748 + :setup: Standalone Instance + :steps: + 1. Create homeDirectory index (especially with substring) + 2. Creates 100 users with large homeDirectory value => long to index + 3. Start an indexing task WITHOUT waiting for its completion + 4. Monitor that until task.status = 'Finish', any update -> UNWILLING to perform + :expectedresults: + 1. Index configuration succeeds + 2. users entry are successfully created + 3. Indexing task is started + 4. If the task.status does not contain 'Finished indexing', any update should return UNWILLING_TO_PERFORM + When it contains 'Finished indexing', updates should be successful + """ + + server = topo.standalone + create_index_entry(server) + test_users = provision_users(server) + + indexTask = start_start_status(server) + check_task_status(server, indexTask, test_users[0].dn) + + # If you need any test suite initialization, + # please, write additional fixture for that (including finalizer). + # Topology for suites are predefined in lib389/topologies.py. + + # If you need host, port or any other data about instance, + # Please, use the instance object attributes for that (for example, topo.ms["supplier1"].serverid) + + if DEBUGGING: + # Add debugging steps(if any)... + pass + + +if __name__ == '__main__': + # Run isolated + # -s for DEBUG mode + CURRENT_FILE = os.path.realpath(__file__) + pytest.main(["-s", CURRENT_FILE]) + diff --git a/dirsrvtests/tests/tickets/ticket49623_2_test.py b/dirsrvtests/tests/tickets/ticket49623_2_test.py new file mode 100644 index 0000000..8c5e1c5 --- /dev/null +++ b/dirsrvtests/tests/tickets/ticket49623_2_test.py @@ -0,0 +1,68 @@ +# --- BEGIN COPYRIGHT BLOCK --- +# Copyright (C) 2020 Red Hat, Inc. +# All rights reserved. +# +# License: GPL (version 3 or any later version). +# See LICENSE for details. +# --- END COPYRIGHT BLOCK --- +# +import os +import ldap +import pytest +import subprocess +from lib389.tasks import * +from lib389.utils import * +from lib389.topologies import topology_m1 +from lib389.idm.user import UserAccounts +from lib389._constants import DEFAULT_SUFFIX +from contextlib import contextmanager + +pytestmark = pytest.mark.tier1 + +logging.getLogger(__name__).setLevel(logging.DEBUG) +log = logging.getLogger(__name__) + + +@pytest.mark.ds49623 +@pytest.mark.bz1790986 +def test_modrdn_loop(topology_m1): + """Test that renaming the same entry multiple times reusing the same + RDN multiple times does not result in cenotaph error messages + + :id: 631b2be9-5c03-44c7-9853-a87c923d5b30 + + :customerscenario: True + + :setup: Single supplier instance + + :steps: 1. Add an entry with RDN start rdn + 2. Rename the entry to rdn change + 3. Rename the entry to start again + 4. Rename the entry to rdn change + 5. check for cenotaph error messages + :expectedresults: + 1. No error messages + """ + + topo = topology_m1.ms['supplier1'] + TEST_ENTRY_RDN_START = 'start' + TEST_ENTRY_RDN_CHANGE = 'change' + TEST_ENTRY_NAME = 'tuser' + users = UserAccounts(topo, DEFAULT_SUFFIX) + user_properties = { + 'uid': TEST_ENTRY_RDN_START, + 'cn': TEST_ENTRY_NAME, + 'sn': TEST_ENTRY_NAME, + 'uidNumber': '1001', + 'gidNumber': '2001', + 'homeDirectory': '/home/{}'.format(TEST_ENTRY_NAME) + } + + tuser = users.create(properties=user_properties) + tuser.rename('uid={}'.format(TEST_ENTRY_RDN_CHANGE), newsuperior=None, deloldrdn=True) + tuser.rename('uid={}'.format(TEST_ENTRY_RDN_START), newsuperior=None, deloldrdn=True) + tuser.rename('uid={}'.format(TEST_ENTRY_RDN_CHANGE), newsuperior=None, deloldrdn=True) + + log.info("Check the log messages for cenotaph error") + error_msg = ".*urp_fixup_add_cenotaph - failed to add cenotaph, err= 68" + assert not topo.ds_error_log.match(error_msg) diff --git a/dirsrvtests/tests/tickets/ticket49658_test.py b/dirsrvtests/tests/tickets/ticket49658_test.py new file mode 100644 index 0000000..c3a1db4 --- /dev/null +++ b/dirsrvtests/tests/tickets/ticket49658_test.py @@ -0,0 +1,4276 @@ +# --- BEGIN COPYRIGHT BLOCK --- +# Copyright (C) 2022 Red Hat, Inc. +# All rights reserved. +# +# License: GPL (version 3 or any later version). +# See LICENSE for details. +# --- END COPYRIGHT BLOCK --- +# +import logging +import pytest +import os +import ldap +import time +import sys +print(sys.path) +from lib389 import Entry +from lib389._constants import DEFAULT_SUFFIX +from lib389.idm.user import UserAccounts, TEST_USER_PROPERTIES +from lib389.topologies import topology_m3 as topo + +pytestmark = pytest.mark.tier2 + +DEBUGGING = os.getenv("DEBUGGING", default=False) +if DEBUGGING: + logging.getLogger(__name__).setLevel(logging.DEBUG) +else: + logging.getLogger(__name__).setLevel(logging.INFO) +log = logging.getLogger(__name__) + + +MAX_EMPLOYEENUMBER_USER = 20 +MAX_STANDARD_USER = 100 +MAX_USER = MAX_STANDARD_USER + MAX_EMPLOYEENUMBER_USER +EMPLOYEENUMBER_RDN_START = 0 + +USER_UID='user_' +BASE_DISTINGUISHED = 'ou=distinguished,ou=people,%s' % (DEFAULT_SUFFIX) +BASE_REGULAR = 'ou=regular,ou=people,%s' % (DEFAULT_SUFFIX) + +def _user_get_dn(no): + uid = '%s%d' % (USER_UID, no) + dn = 'uid=%s,%s' % (uid, BASE_REGULAR) + return (uid, dn) + +def add_user(server, no, init_val): + (uid, dn) = _user_get_dn(no) + log.fatal('Adding user (%s): ' % dn) + server.add_s(Entry((dn, {'objectclass': ['top', 'person', 'organizationalPerson', 'inetOrgPerson'], + 'uid': [uid], + 'sn' : [uid], + 'cn' : [uid], + 'employeeNumber': init_val}))) + return dn + +def _employeenumber_user_get_dn(no): + employeeNumber = str(no) + dn = 'employeeNumber=%s,%s' % (employeeNumber, BASE_DISTINGUISHED) + return (employeeNumber, dn) + +def add_employeenumber_user(server, no): + (uid, dn) = _employeenumber_user_get_dn(EMPLOYEENUMBER_RDN_START + no) + log.fatal('Adding user (%s): ' % dn) + server.add_s(Entry((dn, {'objectclass': ['top', 'person', 'organizationalPerson', 'inetOrgPerson'], + 'uid': [uid], + 'sn' : [uid], + 'cn' : [uid], + 'employeeNumber': str(EMPLOYEENUMBER_RDN_START + no)}))) + return dn + +def save_stuff(): + M1 = topo.ms["supplier1"] + M2 = topo.ms["supplier2"] + M3 = topo.ms["supplier3"] + value_11 = '11'.encode() + value_1000 = '1000'.encode() + value_13 = '13'.encode() + value_14 = '14'.encode() + + # Step 2 + test_user_dn= add_user(M3, 0, value_11) + log.info('Adding %s on M3' % test_user_dn) + M3.modify_s(test_user_dn, [(ldap.MOD_DELETE, 'employeeNumber', value_11), (ldap.MOD_ADD, 'employeeNumber', value_1000)]) + ents = M3.search_s(DEFAULT_SUFFIX, ldap.SCOPE_SUBTREE, '(employeeNumber=*)') + assert len(ents) == 1 + + + # Step 3 + # Check the entry is replicated on M1 + for j in range(30): + try: + ent = M1.getEntry(test_user_dn, ldap.SCOPE_BASE,) + if not ent.hasAttr('employeeNumber'): + # wait for the MOD + log.info('M1 waiting for employeeNumber') + time.sleep(1) + continue; + break; + except ldap.NO_SUCH_OBJECT: + time.sleep(1) + pass + time.sleep(1) + ents = M1.search_s(DEFAULT_SUFFIX, ldap.SCOPE_SUBTREE, '(employeeNumber=*)') + assert len(ents) == 1 + + # Check the entry is replicated on M2 + for j in range(30): + try: + ent = M2.getEntry(test_user_dn, ldap.SCOPE_BASE,) + if not ent.hasAttr('employeeNumber'): + # wait for the MOD + log.info('M2 waiting for employeeNumber') + time.sleep(1) + continue; + + break; + except ldap.NO_SUCH_OBJECT: + time.sleep(1) + pass + time.sleep(1) + ents = M2.search_s(DEFAULT_SUFFIX, ldap.SCOPE_SUBTREE, '(employeeNumber=*)') + assert len(ents) == 1 + +def test_ticket49658_init(topo): + """Specify a test case purpose or name here + + :id: f8d43cef-c385-46a2-b32b-fdde2114b45e + :setup: 3 Supplier Instances + :steps: + 1. Create 3 suppliers + 2. Create on M3 MAX_USER test entries having a single-value attribute employeeNumber=11 + and update it MOD_DEL 11 + MOD_ADD 1000 + 3. Check they are replicated on M1 and M2 + :expectedresults: + 1. Fill in the result that is expected + 2. For each test step + """ + + # If you need any test suite initialization, + # please, write additional fixture for that (including finalizer). + # Topology for suites are predefined in lib389/topologies.py. + + # If you need host, port or any other data about instance, + # Please, use the instance object attributes for that (for example, topo.ms["supplier1"].serverid) + + if DEBUGGING: + # Add debugging steps(if any)... + pass + M1 = topo.ms["supplier1"] + M2 = topo.ms["supplier2"] + M3 = topo.ms["supplier3"] + value_11 = '11'.encode() + value_1000 = '1000'.encode() + + # Step 2 + M3.add_s(Entry((BASE_DISTINGUISHED, {'objectclass': ['top', 'organizationalUnit'], + 'ou': ['distinguished']}))) + for i in range(MAX_EMPLOYEENUMBER_USER): + test_user_dn= add_employeenumber_user(M3, i) + log.info('Adding %s on M3' % test_user_dn) + ents = M3.search_s(DEFAULT_SUFFIX, ldap.SCOPE_SUBTREE, '(employeeNumber=*)') + assert len(ents) == (i + 1) + + M3.add_s(Entry((BASE_REGULAR, {'objectclass': ['top', 'organizationalUnit'], + 'ou': ['regular']}))) + for i in range(MAX_STANDARD_USER): + test_user_dn= add_user(M3, i, value_11) + log.info('Adding %s on M3' % test_user_dn) + M3.modify_s(test_user_dn, [(ldap.MOD_DELETE, 'employeeNumber', value_11), (ldap.MOD_ADD, 'employeeNumber', value_1000)]) + ents = M3.search_s(DEFAULT_SUFFIX, ldap.SCOPE_SUBTREE, '(employeeNumber=*)') + assert len(ents) == (MAX_EMPLOYEENUMBER_USER + i + 1) + + + # Step 3 + # Check the last entry is replicated on M1 + (uid, test_user_dn) = _user_get_dn(MAX_STANDARD_USER - 1) + for j in range(30): + try: + ent = M1.getEntry(test_user_dn, ldap.SCOPE_BASE,) + if not ent.hasAttr('employeeNumber'): + # wait for the MOD + log.info('M1 waiting for employeeNumber') + time.sleep(1) + continue; + break; + except ldap.NO_SUCH_OBJECT: + time.sleep(1) + pass + time.sleep(1) + ents = M1.search_s(DEFAULT_SUFFIX, ldap.SCOPE_SUBTREE, '(employeeNumber=*)') + assert len(ents) == MAX_USER + + # Check the last entry is replicated on M2 + for j in range(30): + try: + ent = M2.getEntry(test_user_dn, ldap.SCOPE_BASE,) + if not ent.hasAttr('employeeNumber'): + # wait for the MOD + log.info('M2 waiting for employeeNumber') + time.sleep(1) + continue; + + break; + except ldap.NO_SUCH_OBJECT: + time.sleep(1) + pass + time.sleep(1) + ents = M2.search_s(DEFAULT_SUFFIX, ldap.SCOPE_SUBTREE, '(employeeNumber=*)') + assert len(ents) == MAX_USER + +def test_ticket49658_0(topo): + """Do MOD(DEL+ADD) and replicate MOST RECENT first + M1: MOD(DEL+ADD) -> V1 + M2: MOD(DEL+ADD) -> V1 + expected: V1 + + :id: 5360b304-9b33-4d37-935f-ab73e0baa1aa + :setup: 3 Supplier Instances + 1. using user_0 where employNumber=1000 + :steps: + 1. Create 3 suppliers + 2. Isolate M1 and M2 by pausing the replication agreements + 3. On M1 do MOD_DEL 1000 + MOD_ADD_13 + 4. On M2 do MOD_DEL 1000 + MOD_ADD_13 + 5. Enable replication agreement M2 -> M3, so that update step 6 is replicated first + 6. Enable replication agreement M1 -> M3, so that update step 5 is replicated second + 7. Check that the employeeNumber is 13 on all servers + :expectedresults: + 1. Fill in the result that is expected + 2. For each test step + """ + + # If you need any test suite initialization, + # please, write additional fixture for that (including finalizer). + # Topology for suites are predefined in lib389/topologies.py. + + # If you need host, port or any other data about instance, + # Please, use the instance object attributes for that (for example, topo.ms["supplier1"].serverid) + + if DEBUGGING: + # Add debugging steps(if any)... + pass + M1 = topo.ms["supplier1"] + M2 = topo.ms["supplier2"] + M3 = topo.ms["supplier3"] + value_1000 = '1000'.encode() + last = '0' + value_end = last.encode() + theFilter = '(employeeNumber=%s)' % last + (uid, test_user_dn) = _user_get_dn(int(last)) + + # + # Step 2 + # + # disable all RA from M1 and M2 + # only M3 can replicate the update + # + agreement_m1_m2 = M1.agreement.list(suffix=DEFAULT_SUFFIX, consumer_host=M2.host, consumer_port=M2.port) + agreement_m1_m3 = M1.agreement.list(suffix=DEFAULT_SUFFIX, consumer_host=M3.host, consumer_port=M3.port) + agreement_m2_m1 = M2.agreement.list(suffix=DEFAULT_SUFFIX, consumer_host=M1.host, consumer_port=M1.port) + agreement_m2_m3 = M2.agreement.list(suffix=DEFAULT_SUFFIX, consumer_host=M3.host, consumer_port=M3.port) + + M1.agreement.pause(agreement_m1_m2[0].dn) + M1.agreement.pause(agreement_m1_m3[0].dn) + M2.agreement.pause(agreement_m2_m1[0].dn) + M2.agreement.pause(agreement_m2_m3[0].dn) + + # Step 3 + # Oldest update + # check that the entry on M1 contains employeeNumber= + M1.modify_s(test_user_dn, [(ldap.MOD_DELETE, 'employeeNumber', value_1000), (ldap.MOD_ADD, 'employeeNumber', value_end)]) + ents = M1.search_s(BASE_REGULAR, ldap.SCOPE_SUBTREE, theFilter) + assert len(ents) == 1 + time.sleep(1) + + # Step 4 + # More recent update + # check that the entry on M2 contains employeeNumber= + M2.modify_s(test_user_dn, [(ldap.MOD_DELETE, 'employeeNumber', value_1000), (ldap.MOD_ADD, 'employeeNumber', value_end)]) + ents = M2.search_s(BASE_REGULAR, ldap.SCOPE_SUBTREE, theFilter) + assert len(ents) == 1 + + #time.sleep(60) + # Step 7 + # Renable M2 before M1 so that on M3, the most recent update is replicated before + for ra in agreement_m2_m1, agreement_m2_m3: + M2.agreement.resume(ra[0].dn) + + # Step 8 + # Renable M1 so that on M3 oldest update is now replicated + time.sleep(4) + for ra in agreement_m1_m2, agreement_m1_m3: + M1.agreement.resume(ra[0].dn) + + # Step 9 + # Check that M1 still contains employeeNumber= + ents = M1.search_s(BASE_REGULAR, ldap.SCOPE_SUBTREE, theFilter) + log.info('Search M1 employeeNumber=%s (vs. %s)' % (ents[0].getValue('employeeNumber'), value_end)) + assert len(ents) == 1 + assert ents[0].hasAttr('employeeNumber') and ents[0].getValue('employeeNumber') == value_end + + # Check that M2 still contains employeeNumber= + ents = M2.search_s(BASE_REGULAR, ldap.SCOPE_SUBTREE, theFilter) + log.info('Search M2 employeeNumber=%s (vs. %s)' % (ents[0].getValue('employeeNumber'), value_end)) + assert len(ents) == 1 + assert ents[0].hasAttr('employeeNumber') and ents[0].getValue('employeeNumber') == value_end + + # Check that M3 still contain employeeNumber and it contains employeeNumber= + ents = M3.search_s(BASE_REGULAR, ldap.SCOPE_SUBTREE, '(employeeNumber=*)') + assert len(ents) == MAX_STANDARD_USER + ents = M3.search_s(BASE_REGULAR, ldap.SCOPE_SUBTREE, theFilter) + log.info('Search M3 employeeNumber=%s (vs. %s)' % (ents[0].getValue('employeeNumber'), value_end)) + assert len(ents) == 1 + assert ents[0].hasAttr('employeeNumber') and ents[0].getValue('employeeNumber') == value_end + +def test_ticket49658_1(topo): + """Do MOD(DEL+ADD) and replicate OLDEST first + M2: MOD(DEL+ADD) -> V1 + M1: MOD(DEL+ADD) -> V1 + expected: V1 + + :id: bc6620d9-eae1-48af-8a4f-bc14405ea6b6 + :setup: 3 Supplier Instances + 1. using user_1 where employNumber=1000 + :steps: + 1. Isolate M1 and M2 by pausing the replication agreements + 2. On M2 do MOD_DEL 1000 + MOD_ADD_13 + 3. On M1 do MOD_DEL 1000 + MOD_ADD_13 + 4. Enable replication agreement M2 -> M3, so that update step 2 is replicated first + 5. Enable replication agreement M1 -> M3, so that update step 3 is replicated second + 6. Check that the employeeNumber is 13 on all servers + :expectedresults: + 1. Fill in the result that is expected + 2. For each test step + """ + + # If you need any test suite initialization, + # please, write additional fixture for that (including finalizer). + # Topology for suites are predefined in lib389/topologies.py. + + # If you need host, port or any other data about instance, + # Please, use the instance object attributes for that (for example, topo.ms["supplier1"].serverid) + + if DEBUGGING: + # Add debugging steps(if any)... + pass + M1 = topo.ms["supplier1"] + M2 = topo.ms["supplier2"] + M3 = topo.ms["supplier3"] + value_1000 = '1000'.encode() + last = '1' + value_end = last.encode() + theFilter = '(employeeNumber=%s)' % last + + # This test takes the user_1 + (uid, test_user_dn) = _user_get_dn(int(1)) + + # + # Step 4 + # + # disable all RA from M1 and M2 + # only M3 can replicate the update + # + agreement_m1_m2 = M1.agreement.list(suffix=DEFAULT_SUFFIX, consumer_host=M2.host, consumer_port=M2.port) + agreement_m1_m3 = M1.agreement.list(suffix=DEFAULT_SUFFIX, consumer_host=M3.host, consumer_port=M3.port) + agreement_m2_m1 = M2.agreement.list(suffix=DEFAULT_SUFFIX, consumer_host=M1.host, consumer_port=M1.port) + agreement_m2_m3 = M2.agreement.list(suffix=DEFAULT_SUFFIX, consumer_host=M3.host, consumer_port=M3.port) + + M1.agreement.pause(agreement_m1_m2[0].dn) + M1.agreement.pause(agreement_m1_m3[0].dn) + M2.agreement.pause(agreement_m2_m1[0].dn) + M2.agreement.pause(agreement_m2_m3[0].dn) + + # Step 5 + # Oldest update + # check that the entry on M2 contains employeeNumber= + M2.modify_s(test_user_dn, [(ldap.MOD_DELETE, 'employeeNumber', value_1000), (ldap.MOD_ADD, 'employeeNumber', value_end)]) + ents = M2.search_s(BASE_REGULAR, ldap.SCOPE_SUBTREE, theFilter) + assert len(ents) == 1 + time.sleep(1) + + # Step 6 + # More recent update + # check that the entry on M2 contains employeeNumber= + M1.modify_s(test_user_dn, [(ldap.MOD_DELETE, 'employeeNumber', value_1000), (ldap.MOD_ADD, 'employeeNumber', value_end)]) + ents = M1.search_s(BASE_REGULAR, ldap.SCOPE_SUBTREE, theFilter) + assert len(ents) == 1 + + #time.sleep(60) + # Step 7 + # Renable M2 before M1 so that on M3, the most recent update is replicated before + for ra in agreement_m2_m1, agreement_m2_m3: + M2.agreement.resume(ra[0].dn) + + # Step 8 + # Renable M1 so that on M3 oldest update is now replicated + time.sleep(4) + for ra in agreement_m1_m2, agreement_m1_m3: + M1.agreement.resume(ra[0].dn) + + # Step 9 + # Check that M1 still contains employeeNumber= + ents = M1.search_s(BASE_REGULAR, ldap.SCOPE_SUBTREE, theFilter) + log.info('Search M1 employeeNumber=%s (vs. %s)' % (ents[0].getValue('employeeNumber'), value_end)) + assert len(ents) == 1 + assert ents[0].hasAttr('employeeNumber') and ents[0].getValue('employeeNumber') == value_end + + # Check that M2 still contains employeeNumber= + ents = M2.search_s(BASE_REGULAR, ldap.SCOPE_SUBTREE, theFilter) + log.info('Search M2 employeeNumber=%s (vs. %s)' % (ents[0].getValue('employeeNumber'), value_end)) + assert len(ents) == 1 + assert ents[0].hasAttr('employeeNumber') and ents[0].getValue('employeeNumber') == value_end + + # Check that M3 still contain employeeNumber and it contains employeeNumber= + ents = M3.search_s(BASE_REGULAR, ldap.SCOPE_SUBTREE, '(employeeNumber=*)') + assert len(ents) == MAX_STANDARD_USER + ents = M3.search_s(BASE_REGULAR, ldap.SCOPE_SUBTREE, theFilter) + log.info('Search M3 employeeNumber=%s (vs. %s)' % (ents[0].getValue('employeeNumber'), value_end)) + assert len(ents) == 1 + assert ents[0].hasAttr('employeeNumber') and ents[0].getValue('employeeNumber') == value_end + +def test_ticket49658_2(topo): + """Do MOD(ADD+DEL) and replicate OLDEST first + M2: MOD(ADD+DEL) -> V1 + M1: MOD(ADD+DEL) -> V1 + expected: V1 + + :id: 672ff689-5b76-4107-92be-fb95d08400b3 + :setup: 3 Supplier Instances + 1. using user_2 where employNumber=1000 + :steps: + 1. Isolate M1 and M2 by pausing the replication agreements + 2. On M2 do MOD_DEL 1000 + MOD_ADD_13 + 3. On M1 do MOD_DEL 1000 + MOD_ADD_13 + 4. Enable replication agreement M2 -> M3, so that update step 2 is replicated first + 5. Enable replication agreement M1 -> M3, so that update step 3 is replicated second + 6. Check that the employeeNumber is 13 on all servers + :expectedresults: + 1. Fill in the result that is expected + 2. For each test step + """ + + # If you need any test suite initialization, + # please, write additional fixture for that (including finalizer). + # Topology for suites are predefined in lib389/topologies.py. + + # If you need host, port or any other data about instance, + # Please, use the instance object attributes for that (for example, topo.ms["supplier1"].serverid) + + if DEBUGGING: + # Add debugging steps(if any)... + pass + M1 = topo.ms["supplier1"] + M2 = topo.ms["supplier2"] + M3 = topo.ms["supplier3"] + value_1000 = '1000'.encode() + last = '2' + value_end = last.encode() + theFilter = '(employeeNumber=%s)' % last + + # This test takes the user_1 + (uid, test_user_dn) = _user_get_dn(int(last)) + + # + # Step 4 + # + # disable all RA from M1 and M2 + # only M3 can replicate the update + # + agreement_m1_m2 = M1.agreement.list(suffix=DEFAULT_SUFFIX, consumer_host=M2.host, consumer_port=M2.port) + agreement_m1_m3 = M1.agreement.list(suffix=DEFAULT_SUFFIX, consumer_host=M3.host, consumer_port=M3.port) + agreement_m2_m1 = M2.agreement.list(suffix=DEFAULT_SUFFIX, consumer_host=M1.host, consumer_port=M1.port) + agreement_m2_m3 = M2.agreement.list(suffix=DEFAULT_SUFFIX, consumer_host=M3.host, consumer_port=M3.port) + + M1.agreement.pause(agreement_m1_m2[0].dn) + M1.agreement.pause(agreement_m1_m3[0].dn) + M2.agreement.pause(agreement_m2_m1[0].dn) + M2.agreement.pause(agreement_m2_m3[0].dn) + + # Step 5 + # Oldest update + # check that the entry on M2 contains employeeNumber= + M2.modify_s(test_user_dn, [(ldap.MOD_ADD, 'employeeNumber', value_end),(ldap.MOD_DELETE, 'employeeNumber', value_1000)]) + ents = M2.search_s(BASE_REGULAR, ldap.SCOPE_SUBTREE, theFilter) + assert len(ents) == 1 + time.sleep(1) + + # Step 6 + # More recent update + # check that the entry on M2 contains employeeNumber= + M1.modify_s(test_user_dn, [(ldap.MOD_ADD, 'employeeNumber', value_end), (ldap.MOD_DELETE, 'employeeNumber', value_1000)]) + ents = M1.search_s(BASE_REGULAR, ldap.SCOPE_SUBTREE, theFilter) + assert len(ents) == 1 + + #time.sleep(60) + # Step 7 + # Renable M2 before M1 so that on M3, the most recent update is replicated before + for ra in agreement_m2_m1, agreement_m2_m3: + M2.agreement.resume(ra[0].dn) + + # Step 8 + # Renable M1 so that on M3 oldest update is now replicated + time.sleep(4) + for ra in agreement_m1_m2, agreement_m1_m3: + M1.agreement.resume(ra[0].dn) + + # Step 9 + # Check that M1 still contains employeeNumber= + ents = M1.search_s(BASE_REGULAR, ldap.SCOPE_SUBTREE, theFilter) + log.info('Search M1 employeeNumber=%s (vs. %s)' % (ents[0].getValue('employeeNumber'), value_end)) + assert len(ents) == 1 + assert ents[0].hasAttr('employeeNumber') and ents[0].getValue('employeeNumber') == value_end + + # Check that M2 still contains employeeNumber= + ents = M2.search_s(BASE_REGULAR, ldap.SCOPE_SUBTREE, theFilter) + log.info('Search M2 employeeNumber=%s (vs. %s)' % (ents[0].getValue('employeeNumber'), value_end)) + assert len(ents) == 1 + assert ents[0].hasAttr('employeeNumber') and ents[0].getValue('employeeNumber') == value_end + + # Check that M3 still contain employeeNumber and it contains employeeNumber= + ents = M3.search_s(BASE_REGULAR, ldap.SCOPE_SUBTREE, '(employeeNumber=*)') + assert len(ents) == MAX_STANDARD_USER + ents = M3.search_s(BASE_REGULAR, ldap.SCOPE_SUBTREE, theFilter) + log.info('Search M3 employeeNumber=%s (vs. %s)' % (ents[0].getValue('employeeNumber'), value_end)) + assert len(ents) == 1 + assert ents[0].hasAttr('employeeNumber') and ents[0].getValue('employeeNumber') == value_end + +def test_ticket49658_3(topo): + """Do MOD(ADD+DEL) and replicate MOST RECENT first + M1: MOD(ADD+DEL) -> V1 + M2: MOD(ADD+DEL) -> V1 + expected: V1 + + :id: b25e508a-8bf2-4351-88f6-3b6c098ccc44 + :setup: 3 Supplier Instances + 1. using user_2 where employNumber=1000 + :steps: + 1. Isolate M1 and M2 by pausing the replication agreements + 2. On M1 do MOD_DEL 1000 + MOD_ADD_13 + 3. On M2 do MOD_DEL 1000 + MOD_ADD_13 + 4. Enable replication agreement M2 -> M3, so that update step 2 is replicated first + 5. Enable replication agreement M1 -> M3, so that update step 3 is replicated second + 6. Check that the employeeNumber is 13 on all servers + :expectedresults: + 1. Fill in the result that is expected + 2. For each test step + """ + + # If you need any test suite initialization, + # please, write additional fixture for that (including finalizer). + # Topology for suites are predefined in lib389/topologies.py. + + # If you need host, port or any other data about instance, + # Please, use the instance object attributes for that (for example, topo.ms["supplier1"].serverid) + + if DEBUGGING: + # Add debugging steps(if any)... + pass + M1 = topo.ms["supplier1"] + M2 = topo.ms["supplier2"] + M3 = topo.ms["supplier3"] + value_1000 = '1000'.encode() + last = '3' + value_end = last.encode() + theFilter = '(employeeNumber=%s)' % last + + # This test takes the user_1 + (uid, test_user_dn) = _user_get_dn(int(last)) + + # + # Step 4 + # + # disable all RA from M1 and M2 + # only M3 can replicate the update + # + agreement_m1_m2 = M1.agreement.list(suffix=DEFAULT_SUFFIX, consumer_host=M2.host, consumer_port=M2.port) + agreement_m1_m3 = M1.agreement.list(suffix=DEFAULT_SUFFIX, consumer_host=M3.host, consumer_port=M3.port) + agreement_m2_m1 = M2.agreement.list(suffix=DEFAULT_SUFFIX, consumer_host=M1.host, consumer_port=M1.port) + agreement_m2_m3 = M2.agreement.list(suffix=DEFAULT_SUFFIX, consumer_host=M3.host, consumer_port=M3.port) + + M1.agreement.pause(agreement_m1_m2[0].dn) + M1.agreement.pause(agreement_m1_m3[0].dn) + M2.agreement.pause(agreement_m2_m1[0].dn) + M2.agreement.pause(agreement_m2_m3[0].dn) + + # Step 5 + # Oldest update + # check that the entry on M1 contains employeeNumber= + M1.modify_s(test_user_dn, [(ldap.MOD_ADD, 'employeeNumber', value_end),(ldap.MOD_DELETE, 'employeeNumber', value_1000)]) + ents = M1.search_s(BASE_REGULAR, ldap.SCOPE_SUBTREE, theFilter) + assert len(ents) == 1 + time.sleep(1) + + # Step 6 + # More recent update + # check that the entry on M2 contains employeeNumber= + M2.modify_s(test_user_dn, [(ldap.MOD_ADD, 'employeeNumber', value_end), (ldap.MOD_DELETE, 'employeeNumber', value_1000)]) + ents = M2.search_s(BASE_REGULAR, ldap.SCOPE_SUBTREE, theFilter) + assert len(ents) == 1 + + #time.sleep(60) + # Step 7 + # Renable M2 before M1 so that on M3, the most recent update is replicated before + for ra in agreement_m2_m1, agreement_m2_m3: + M2.agreement.resume(ra[0].dn) + + # Step 8 + # Renable M1 so that on M3 oldest update is now replicated + time.sleep(4) + for ra in agreement_m1_m2, agreement_m1_m3: + M1.agreement.resume(ra[0].dn) + + # Step 9 + # Check that M1 still contains employeeNumber= + ents = M1.search_s(BASE_REGULAR, ldap.SCOPE_SUBTREE, theFilter) + log.info('Search M1 employeeNumber=%s (vs. %s)' % (ents[0].getValue('employeeNumber'), value_end)) + assert len(ents) == 1 + assert ents[0].hasAttr('employeeNumber') and ents[0].getValue('employeeNumber') == value_end + + # Check that M2 still contains employeeNumber= + ents = M2.search_s(BASE_REGULAR, ldap.SCOPE_SUBTREE, theFilter) + log.info('Search M2 employeeNumber=%s (vs. %s)' % (ents[0].getValue('employeeNumber'), value_end)) + assert len(ents) == 1 + assert ents[0].hasAttr('employeeNumber') and ents[0].getValue('employeeNumber') == value_end + + # Check that M3 still contain employeeNumber and it contains employeeNumber= + ents = M3.search_s(BASE_REGULAR, ldap.SCOPE_SUBTREE, '(employeeNumber=*)') + assert len(ents) == MAX_STANDARD_USER + ents = M3.search_s(BASE_REGULAR, ldap.SCOPE_SUBTREE, theFilter) + log.info('Search M3 employeeNumber=%s (vs. %s)' % (ents[0].getValue('employeeNumber'), value_end)) + assert len(ents) == 1 + assert ents[0].hasAttr('employeeNumber') and ents[0].getValue('employeeNumber') == value_end + +def test_ticket49658_4(topo): + """Do MOD(ADD+DEL) MOD(REPL) and replicate MOST RECENT first + M1: MOD(ADD+DEL) -> V1 + M2: MOD(REPL) -> V1 + expected: V1 + + :id: 8f7ce9ff-e36f-48cd-b0ed-b7077a3e7341 + :setup: 3 Supplier Instances + 1. using user_2 where employNumber=1000 + :steps: + 1. Isolate M1 and M2 by pausing the replication agreements + 2. On M1 do MOD_DEL 1000 + MOD_ADD_13 + 3. On M2 do MOD_REPL _13 + 4. Enable replication agreement M2 -> M3, so that update step 2 is replicated first + 5. Enable replication agreement M1 -> M3, so that update step 3 is replicated second + 6. Check that the employeeNumber is 13 on all servers + :expectedresults: + 1. Fill in the result that is expected + 2. For each test step + """ + + # If you need any test suite initialization, + # please, write additional fixture for that (including finalizer). + # Topology for suites are predefined in lib389/topologies.py. + + # If you need host, port or any other data about instance, + # Please, use the instance object attributes for that (for example, topo.ms["supplier1"].serverid) + + if DEBUGGING: + # Add debugging steps(if any)... + pass + M1 = topo.ms["supplier1"] + M2 = topo.ms["supplier2"] + M3 = topo.ms["supplier3"] + value_1000 = '1000'.encode() + last = '4' + value_end = last.encode() + theFilter = '(employeeNumber=%s)' % last + + # This test takes the user_1 + (uid, test_user_dn) = _user_get_dn(int(last)) + + # + # Step 4 + # + # disable all RA from M1 and M2 + # only M3 can replicate the update + # + agreement_m1_m2 = M1.agreement.list(suffix=DEFAULT_SUFFIX, consumer_host=M2.host, consumer_port=M2.port) + agreement_m1_m3 = M1.agreement.list(suffix=DEFAULT_SUFFIX, consumer_host=M3.host, consumer_port=M3.port) + agreement_m2_m1 = M2.agreement.list(suffix=DEFAULT_SUFFIX, consumer_host=M1.host, consumer_port=M1.port) + agreement_m2_m3 = M2.agreement.list(suffix=DEFAULT_SUFFIX, consumer_host=M3.host, consumer_port=M3.port) + + M1.agreement.pause(agreement_m1_m2[0].dn) + M1.agreement.pause(agreement_m1_m3[0].dn) + M2.agreement.pause(agreement_m2_m1[0].dn) + M2.agreement.pause(agreement_m2_m3[0].dn) + + # Step 5 + # Oldest update + # check that the entry on M1 contains employeeNumber= + M1.modify_s(test_user_dn, [(ldap.MOD_ADD, 'employeeNumber', value_end),(ldap.MOD_DELETE, 'employeeNumber', value_1000)]) + ents = M1.search_s(BASE_REGULAR, ldap.SCOPE_SUBTREE, theFilter) + assert len(ents) == 1 + time.sleep(1) + + # Step 6 + # More recent update + # check that the entry on M2 contains employeeNumber= + M2.modify_s(test_user_dn, [(ldap.MOD_REPLACE, 'employeeNumber', value_end)]) + ents = M2.search_s(BASE_REGULAR, ldap.SCOPE_SUBTREE, theFilter) + assert len(ents) == 1 + + #time.sleep(60) + # Step 7 + # Renable M2 before M1 so that on M3, the most recent update is replicated before + for ra in agreement_m2_m1, agreement_m2_m3: + M2.agreement.resume(ra[0].dn) + + # Step 8 + # Renable M1 so that on M3 oldest update is now replicated + time.sleep(4) + for ra in agreement_m1_m2, agreement_m1_m3: + M1.agreement.resume(ra[0].dn) + + # Step 9 + # Check that M1 still contains employeeNumber= + ents = M1.search_s(BASE_REGULAR, ldap.SCOPE_SUBTREE, theFilter) + log.info('Search M1 employeeNumber=%s (vs. %s)' % (ents[0].getValue('employeeNumber'), value_end)) + assert len(ents) == 1 + assert ents[0].hasAttr('employeeNumber') and ents[0].getValue('employeeNumber') == value_end + + # Check that M2 still contains employeeNumber= + ents = M2.search_s(BASE_REGULAR, ldap.SCOPE_SUBTREE, theFilter) + log.info('Search M2 employeeNumber=%s (vs. %s)' % (ents[0].getValue('employeeNumber'), value_end)) + assert len(ents) == 1 + assert ents[0].hasAttr('employeeNumber') and ents[0].getValue('employeeNumber') == value_end + + # Check that M3 still contain employeeNumber and it contains employeeNumber= + ents = M3.search_s(BASE_REGULAR, ldap.SCOPE_SUBTREE, '(employeeNumber=*)') + assert len(ents) == MAX_STANDARD_USER + ents = M3.search_s(BASE_REGULAR, ldap.SCOPE_SUBTREE, theFilter) + log.info('Search M3 employeeNumber=%s (vs. %s)' % (ents[0].getValue('employeeNumber'), value_end)) + assert len(ents) == 1 + assert ents[0].hasAttr('employeeNumber') and ents[0].getValue('employeeNumber') == value_end + +def test_ticket49658_5(topo): + """Do MOD(REPL) MOD(ADD+DEL) and replicate MOST RECENT first + M1: MOD(REPL) -> V1 + M2: MOD(ADD+DEL) -> V1 + expected: V1 + + :id: d6b88e3c-a509-4d3e-8e5d-849237993f47 + :setup: 3 Supplier Instances + 1. using user_2 where employNumber=1000 + :steps: + 1. Isolate M1 and M2 by pausing the replication agreements + 2. On M1 do MOD_DEL 1000 + MOD_ADD_13 + 3. On M2 do MOD_REPL _13 + 4. Enable replication agreement M2 -> M3, so that update step 2 is replicated first + 5. Enable replication agreement M1 -> M3, so that update step 3 is replicated second + 6. Check that the employeeNumber is 13 on all servers + :expectedresults: + 1. Fill in the result that is expected + 2. For each test step + """ + + # If you need any test suite initialization, + # please, write additional fixture for that (including finalizer). + # Topology for suites are predefined in lib389/topologies.py. + + # If you need host, port or any other data about instance, + # Please, use the instance object attributes for that (for example, topo.ms["supplier1"].serverid) + + if DEBUGGING: + # Add debugging steps(if any)... + pass + M1 = topo.ms["supplier1"] + M2 = topo.ms["supplier2"] + M3 = topo.ms["supplier3"] + value_1000 = '1000'.encode() + last = '5' + value_end = last.encode() + theFilter = '(employeeNumber=%s)' % last + + # This test takes the user_1 + (uid, test_user_dn) = _user_get_dn(int(last)) + + # + # Step 4 + # + # disable all RA from M1 and M2 + # only M3 can replicate the update + # + agreement_m1_m2 = M1.agreement.list(suffix=DEFAULT_SUFFIX, consumer_host=M2.host, consumer_port=M2.port) + agreement_m1_m3 = M1.agreement.list(suffix=DEFAULT_SUFFIX, consumer_host=M3.host, consumer_port=M3.port) + agreement_m2_m1 = M2.agreement.list(suffix=DEFAULT_SUFFIX, consumer_host=M1.host, consumer_port=M1.port) + agreement_m2_m3 = M2.agreement.list(suffix=DEFAULT_SUFFIX, consumer_host=M3.host, consumer_port=M3.port) + + M1.agreement.pause(agreement_m1_m2[0].dn) + M1.agreement.pause(agreement_m1_m3[0].dn) + M2.agreement.pause(agreement_m2_m1[0].dn) + M2.agreement.pause(agreement_m2_m3[0].dn) + + # Step 5 + # Oldest update + # check that the entry on M1 contains employeeNumber= + M1.modify_s(test_user_dn, [(ldap.MOD_REPLACE, 'employeeNumber', value_end)]) + ents = M1.search_s(BASE_REGULAR, ldap.SCOPE_SUBTREE, theFilter) + assert len(ents) == 1 + time.sleep(1) + + # Step 6 + # More recent update + # check that the entry on M2 contains employeeNumber= + M2.modify_s(test_user_dn, [(ldap.MOD_ADD, 'employeeNumber', value_end),(ldap.MOD_DELETE, 'employeeNumber', value_1000)]) + ents = M2.search_s(BASE_REGULAR, ldap.SCOPE_SUBTREE, theFilter) + assert len(ents) == 1 + + #time.sleep(60) + # Step 7 + # Renable M2 before M1 so that on M3, the most recent update is replicated before + for ra in agreement_m2_m1, agreement_m2_m3: + M2.agreement.resume(ra[0].dn) + + # Step 8 + # Renable M1 so that on M3 oldest update is now replicated + time.sleep(4) + for ra in agreement_m1_m2, agreement_m1_m3: + M1.agreement.resume(ra[0].dn) + + # Step 9 + # Check that M1 still contains employeeNumber= + ents = M1.search_s(BASE_REGULAR, ldap.SCOPE_SUBTREE, theFilter) + log.info('Search M1 employeeNumber=%s (vs. %s)' % (ents[0].getValue('employeeNumber'), value_end)) + assert len(ents) == 1 + assert ents[0].hasAttr('employeeNumber') and ents[0].getValue('employeeNumber') == value_end + + # Check that M2 still contains employeeNumber= + ents = M2.search_s(BASE_REGULAR, ldap.SCOPE_SUBTREE, theFilter) + log.info('Search M2 employeeNumber=%s (vs. %s)' % (ents[0].getValue('employeeNumber'), value_end)) + assert len(ents) == 1 + assert ents[0].hasAttr('employeeNumber') and ents[0].getValue('employeeNumber') == value_end + + # Check that M3 still contain employeeNumber and it contains employeeNumber= + ents = M3.search_s(BASE_REGULAR, ldap.SCOPE_SUBTREE, '(employeeNumber=*)') + assert len(ents) == MAX_STANDARD_USER + ents = M3.search_s(BASE_REGULAR, ldap.SCOPE_SUBTREE, theFilter) + log.info('Search M3 employeeNumber=%s (vs. %s)' % (ents[0].getValue('employeeNumber'), value_end)) + assert len(ents) == 1 + assert ents[0].hasAttr('employeeNumber') and ents[0].getValue('employeeNumber') == value_end + +def test_ticket49658_6(topo): + """Do + M1: MOD(REPL) -> V1 + M2: MOD(ADD+DEL) -> V2 + expected: V2 + + :id: 5eb67db1-2ff2-4c17-85af-e124b45aace3 + :setup: 3 Supplier Instances + 1. using user_2 where employNumber=1000 + :steps: + 1. Isolate M1 and M2 by pausing the replication agreements + 2. On M1 do MOD_DEL 1000 + MOD_ADD_13 + 3. On M2 do MOD_REPL _13 + 4. Enable replication agreement M2 -> M3, so that update step 2 is replicated first + 5. Enable replication agreement M1 -> M3, so that update step 3 is replicated second + 6. Check that the employeeNumber is 13 on all servers + :expectedresults: + 1. Fill in the result that is expected + 2. For each test step + """ + + # If you need any test suite initialization, + # please, write additional fixture for that (including finalizer). + # Topology for suites are predefined in lib389/topologies.py. + + # If you need host, port or any other data about instance, + # Please, use the instance object attributes for that (for example, topo.ms["supplier1"].serverid) + + + + if DEBUGGING: + # Add debugging steps(if any)... + pass + M1 = topo.ms["supplier1"] + M2 = topo.ms["supplier2"] + M3 = topo.ms["supplier3"] + value_1000 = '1000' + last = '6' + value_S1 = '6.1' + value_S2 = '6.2' + + description = { + "S1": M1, + "S2": M2, + "S1_MOD": [(ldap.MOD_REPLACE, 'employeeNumber', value_S1.encode())], + "S2_MOD": [(ldap.MOD_ADD, 'employeeNumber', value_S2.encode()),(ldap.MOD_DELETE, 'employeeNumber', value_1000.encode())], + "expected": value_S2} + + # This test takes the user_1 + (uid, test_user_dn) = _user_get_dn(int(last)) + + # + # Step 4 + # + # disable all RA from M1 and M2 + # only M3 can replicate the update + # + agreement_m1_m2 = M1.agreement.list(suffix=DEFAULT_SUFFIX, consumer_host=M2.host, consumer_port=M2.port) + agreement_m1_m3 = M1.agreement.list(suffix=DEFAULT_SUFFIX, consumer_host=M3.host, consumer_port=M3.port) + agreement_m2_m1 = M2.agreement.list(suffix=DEFAULT_SUFFIX, consumer_host=M1.host, consumer_port=M1.port) + agreement_m2_m3 = M2.agreement.list(suffix=DEFAULT_SUFFIX, consumer_host=M3.host, consumer_port=M3.port) + + M1.agreement.pause(agreement_m1_m2[0].dn) + M1.agreement.pause(agreement_m1_m3[0].dn) + M2.agreement.pause(agreement_m2_m1[0].dn) + M2.agreement.pause(agreement_m2_m3[0].dn) + + # Step 5 + # Oldest update + # check that the entry on M1 contains employeeNumber= + description["S1"].modify_s(test_user_dn, description["S1_MOD"]) + ents = description["S1"].search_s(DEFAULT_SUFFIX, ldap.SCOPE_SUBTREE, '(employeeNumber=%s)' % value_S1) + assert len(ents) == 1 + time.sleep(1) + + # Step 6 + # More recent update + # check that the entry on M2 contains employeeNumber= + description["S2"].modify_s(test_user_dn, description["S2_MOD"]) + ents = description["S2"].search_s(DEFAULT_SUFFIX, ldap.SCOPE_SUBTREE, '(employeeNumber=%s)' % value_S2) + assert len(ents) == 1 + + #time.sleep(60) + # Step 7 + # Renable M2 before M1 so that on M3, the most recent update is replicated before + for ra in agreement_m2_m1, agreement_m2_m3: + M2.agreement.resume(ra[0].dn) + + # Step 8 + # Renable M1 so that on M3 oldest update is now replicated + time.sleep(4) + for ra in agreement_m1_m2, agreement_m1_m3: + M1.agreement.resume(ra[0].dn) + + # Step 9 + # Check that M1 still contains employeeNumber= + ents = M1.search_s(BASE_REGULAR, ldap.SCOPE_SUBTREE, '(employeeNumber=%s)' % description["expected"]) + log.info('Search M1 employeeNumber=%s (vs. %s)' % (ents[0].getValue('employeeNumber'), description["expected"])) + assert len(ents) == 1 + assert ents[0].hasAttr('employeeNumber') and ents[0].getValue('employeeNumber') == description["expected"].encode() + + # Check that M2 still contains employeeNumber= + ents = M2.search_s(BASE_REGULAR, ldap.SCOPE_SUBTREE, '(employeeNumber=%s)' % description["expected"]) + log.info('Search M2 employeeNumber=%s (vs. %s)' % (ents[0].getValue('employeeNumber'), description["expected"])) + assert len(ents) == 1 + assert ents[0].hasAttr('employeeNumber') and ents[0].getValue('employeeNumber') == description["expected"].encode() + + # Check that M3 still contain employeeNumber and it contains employeeNumber= + ents = M3.search_s(BASE_REGULAR, ldap.SCOPE_SUBTREE, '(employeeNumber=*)') + assert len(ents) == MAX_STANDARD_USER + ents = M3.search_s(BASE_REGULAR, ldap.SCOPE_SUBTREE, '(employeeNumber=%s)' % description["expected"]) + log.info('Search M3 employeeNumber=%s (vs. %s)' % (ents[0].getValue('employeeNumber'), description["expected"])) + assert len(ents) == 1 + assert ents[0].hasAttr('employeeNumber') and ents[0].getValue('employeeNumber') == description["expected"].encode() + +def test_ticket49658_7(topo): + """Do + M1: MOD(ADD+DEL) -> V1 + M2: MOD(REPL) -> V2 + expected: V2 + + :id: a79036ca-0e1b-453e-9524-fb44e1d7c929 + :setup: 3 Supplier Instances + :steps: + :expectedresults: + 1. Fill in the result that is expected + 2. For each test step + """ + + # If you need any test suite initialization, + # please, write additional fixture for that (including finalizer). + # Topology for suites are predefined in lib389/topologies.py. + + # If you need host, port or any other data about instance, + # Please, use the instance object attributes for that (for example, topo.ms["supplier1"].serverid) + + + + if DEBUGGING: + # Add debugging steps(if any)... + pass + M1 = topo.ms["supplier1"] + M2 = topo.ms["supplier2"] + M3 = topo.ms["supplier3"] + value_1000 = '1000' + last = '7' + value_S1 = '7.1' + value_S2 = '7.2' + + description = { + "S1": M1, + "S2": M2, + "S1_MOD": [(ldap.MOD_ADD, 'employeeNumber', value_S1.encode()),(ldap.MOD_DELETE, 'employeeNumber', value_1000.encode())], + "S2_MOD": [(ldap.MOD_REPLACE, 'employeeNumber', value_S2.encode())], + "expected": value_S2} + + # This test takes the user_1 + (uid, test_user_dn) = _user_get_dn(int(last)) + + # + # Step 4 + # + # disable all RA from M1 and M2 + # only M3 can replicate the update + # + agreement_m1_m2 = M1.agreement.list(suffix=DEFAULT_SUFFIX, consumer_host=M2.host, consumer_port=M2.port) + agreement_m1_m3 = M1.agreement.list(suffix=DEFAULT_SUFFIX, consumer_host=M3.host, consumer_port=M3.port) + agreement_m2_m1 = M2.agreement.list(suffix=DEFAULT_SUFFIX, consumer_host=M1.host, consumer_port=M1.port) + agreement_m2_m3 = M2.agreement.list(suffix=DEFAULT_SUFFIX, consumer_host=M3.host, consumer_port=M3.port) + + M1.agreement.pause(agreement_m1_m2[0].dn) + M1.agreement.pause(agreement_m1_m3[0].dn) + M2.agreement.pause(agreement_m2_m1[0].dn) + M2.agreement.pause(agreement_m2_m3[0].dn) + + # Step 5 + # Oldest update + # check that the entry on M1 contains employeeNumber= + description["S1"].modify_s(test_user_dn, description["S1_MOD"]) + ents = description["S1"].search_s(BASE_REGULAR, ldap.SCOPE_SUBTREE, '(employeeNumber=%s)' % value_S1) + assert len(ents) == 1 + time.sleep(1) + + # Step 6 + # More recent update + # check that the entry on M2 contains employeeNumber= + description["S2"].modify_s(test_user_dn, description["S2_MOD"]) + ents = description["S2"].search_s(BASE_REGULAR, ldap.SCOPE_SUBTREE, '(employeeNumber=%s)' % value_S2) + assert len(ents) == 1 + + #time.sleep(60) + # Step 7 + # Renable M2 before M1 so that on M3, the most recent update is replicated before + for ra in agreement_m2_m1, agreement_m2_m3: + M2.agreement.resume(ra[0].dn) + + # Step 8 + # Renable M1 so that on M3 oldest update is now replicated + time.sleep(4) + for ra in agreement_m1_m2, agreement_m1_m3: + M1.agreement.resume(ra[0].dn) + + # Step 9 + # Check that M1 still contains employeeNumber= + ents = M1.search_s(BASE_REGULAR, ldap.SCOPE_SUBTREE, '(employeeNumber=%s)' % description["expected"]) + log.info('Search M1 employeeNumber=%s (vs. %s)' % (ents[0].getValue('employeeNumber'), description["expected"])) + assert len(ents) == 1 + assert ents[0].hasAttr('employeeNumber') and ents[0].getValue('employeeNumber') == description["expected"].encode() + + # Check that M2 still contains employeeNumber= + ents = M2.search_s(BASE_REGULAR, ldap.SCOPE_SUBTREE, '(employeeNumber=%s)' % description["expected"]) + log.info('Search M2 employeeNumber=%s (vs. %s)' % (ents[0].getValue('employeeNumber'), description["expected"])) + assert len(ents) == 1 + assert ents[0].hasAttr('employeeNumber') and ents[0].getValue('employeeNumber') == description["expected"].encode() + + # Check that M3 still contain employeeNumber and it contains employeeNumber= + ents = M3.search_s(BASE_REGULAR, ldap.SCOPE_SUBTREE, '(employeeNumber=*)') + assert len(ents) == MAX_STANDARD_USER + ents = M3.search_s(BASE_REGULAR, ldap.SCOPE_SUBTREE, '(employeeNumber=%s)' % description["expected"]) + log.info('Search M3 employeeNumber=%s (vs. %s)' % (ents[0].getValue('employeeNumber'), description["expected"])) + assert len(ents) == 1 + assert ents[0].hasAttr('employeeNumber') and ents[0].getValue('employeeNumber') == description["expected"].encode() + +def test_ticket49658_8(topo): + """Do + M1: MOD(DEL+ADD) -> V1 + M2: MOD(REPL) -> V2 + expected: V2 + + :id: 06acb988-b735-424a-9886-b0557ee12a9a + :setup: 3 Supplier Instances + :steps: + :expectedresults: + 1. Fill in the result that is expected + 2. For each test step + """ + + # If you need any test suite initialization, + # please, write additional fixture for that (including finalizer). + # Topology for suites are predefined in lib389/topologies.py. + + # If you need host, port or any other data about instance, + # Please, use the instance object attributes for that (for example, topo.ms["supplier1"].serverid) + + + + if DEBUGGING: + # Add debugging steps(if any)... + pass + M1 = topo.ms["supplier1"] + M2 = topo.ms["supplier2"] + M3 = topo.ms["supplier3"] + value_1000 = '1000' + last = '8' + value_S1 = '8.1' + value_S2 = '8.2' + + description = { + "S1": M1, + "S2": M2, + "S1_MOD": [(ldap.MOD_DELETE, 'employeeNumber', value_1000.encode()),(ldap.MOD_ADD, 'employeeNumber', value_S1.encode())], + "S2_MOD": [(ldap.MOD_REPLACE, 'employeeNumber', value_S2.encode())], + "expected": value_S2} + + # This test takes the user_1 + (uid, test_user_dn) = _user_get_dn(int(last)) + + # + # Step 4 + # + # disable all RA from M1 and M2 + # only M3 can replicate the update + # + agreement_m1_m2 = M1.agreement.list(suffix=DEFAULT_SUFFIX, consumer_host=M2.host, consumer_port=M2.port) + agreement_m1_m3 = M1.agreement.list(suffix=DEFAULT_SUFFIX, consumer_host=M3.host, consumer_port=M3.port) + agreement_m2_m1 = M2.agreement.list(suffix=DEFAULT_SUFFIX, consumer_host=M1.host, consumer_port=M1.port) + agreement_m2_m3 = M2.agreement.list(suffix=DEFAULT_SUFFIX, consumer_host=M3.host, consumer_port=M3.port) + + M1.agreement.pause(agreement_m1_m2[0].dn) + M1.agreement.pause(agreement_m1_m3[0].dn) + M2.agreement.pause(agreement_m2_m1[0].dn) + M2.agreement.pause(agreement_m2_m3[0].dn) + + # Step 5 + # Oldest update + # check that the entry on M1 contains employeeNumber= + description["S1"].modify_s(test_user_dn, description["S1_MOD"]) + ents = description["S1"].search_s(BASE_REGULAR, ldap.SCOPE_SUBTREE, '(employeeNumber=%s)' % value_S1) + assert len(ents) == 1 + time.sleep(1) + + # Step 6 + # More recent update + # check that the entry on M2 contains employeeNumber= + description["S2"].modify_s(test_user_dn, description["S2_MOD"]) + ents = description["S2"].search_s(BASE_REGULAR, ldap.SCOPE_SUBTREE, '(employeeNumber=%s)' % value_S2) + assert len(ents) == 1 + + #time.sleep(60) + # Step 7 + # Renable M2 before M1 so that on M3, the most recent update is replicated before + for ra in agreement_m2_m1, agreement_m2_m3: + M2.agreement.resume(ra[0].dn) + + # Step 8 + # Renable M1 so that on M3 oldest update is now replicated + time.sleep(4) + for ra in agreement_m1_m2, agreement_m1_m3: + M1.agreement.resume(ra[0].dn) + + # Step 9 + # Check that M1 still contains employeeNumber= + ents = M1.search_s(BASE_REGULAR, ldap.SCOPE_SUBTREE, '(employeeNumber=%s)' % description["expected"]) + log.info('Search M1 employeeNumber=%s (vs. %s)' % (ents[0].getValue('employeeNumber'), description["expected"])) + assert len(ents) == 1 + assert ents[0].hasAttr('employeeNumber') and ents[0].getValue('employeeNumber') == description["expected"].encode() + + # Check that M2 still contains employeeNumber= + ents = M2.search_s(BASE_REGULAR, ldap.SCOPE_SUBTREE, '(employeeNumber=%s)' % description["expected"]) + log.info('Search M2 employeeNumber=%s (vs. %s)' % (ents[0].getValue('employeeNumber'), description["expected"])) + assert len(ents) == 1 + assert ents[0].hasAttr('employeeNumber') and ents[0].getValue('employeeNumber') == description["expected"].encode() + + # Check that M3 still contain employeeNumber and it contains employeeNumber= + ents = M3.search_s(BASE_REGULAR, ldap.SCOPE_SUBTREE, '(employeeNumber=*)') + assert len(ents) == MAX_STANDARD_USER + ents = M3.search_s(BASE_REGULAR, ldap.SCOPE_SUBTREE, '(employeeNumber=%s)' % description["expected"]) + log.info('Search M3 employeeNumber=%s (vs. %s)' % (ents[0].getValue('employeeNumber'), description["expected"])) + assert len(ents) == 1 + assert ents[0].hasAttr('employeeNumber') and ents[0].getValue('employeeNumber') == description["expected"].encode() + + +def test_ticket49658_9(topo): + """Do + M1: MOD(REPL) -> V1 + M2: MOD(DEL+ADD) -> V2 + expected: V2 + + :id: 3a4c1be3-e3b9-44fe-aa5a-72a3b1a8985c + :setup: 3 Supplier Instances + :steps: + :expectedresults: + 1. Fill in the result that is expected + 2. For each test step + """ + + # If you need any test suite initialization, + # please, write additional fixture for that (including finalizer). + # Topology for suites are predefined in lib389/topologies.py. + + # If you need host, port or any other data about instance, + # Please, use the instance object attributes for that (for example, topo.ms["supplier1"].serverid) + + + + if DEBUGGING: + # Add debugging steps(if any)... + pass + M1 = topo.ms["supplier1"] + M2 = topo.ms["supplier2"] + M3 = topo.ms["supplier3"] + value_1000 = '1000' + last = '9' + value_S1 = '9.1' + value_S2 = '9.2' + + description = { + "S1": M1, + "S2": M2, + "S1_MOD": [(ldap.MOD_REPLACE, 'employeeNumber', value_S1.encode())], + "S2_MOD": [(ldap.MOD_DELETE, 'employeeNumber', value_1000.encode()),(ldap.MOD_ADD, 'employeeNumber', value_S2.encode())], + "expected": value_S2} + + # This test takes the user_1 + (uid, test_user_dn) = _user_get_dn(int(last)) + + # + # Step 4 + # + # disable all RA from M1 and M2 + # only M3 can replicate the update + # + agreement_m1_m2 = M1.agreement.list(suffix=DEFAULT_SUFFIX, consumer_host=M2.host, consumer_port=M2.port) + agreement_m1_m3 = M1.agreement.list(suffix=DEFAULT_SUFFIX, consumer_host=M3.host, consumer_port=M3.port) + agreement_m2_m1 = M2.agreement.list(suffix=DEFAULT_SUFFIX, consumer_host=M1.host, consumer_port=M1.port) + agreement_m2_m3 = M2.agreement.list(suffix=DEFAULT_SUFFIX, consumer_host=M3.host, consumer_port=M3.port) + + M1.agreement.pause(agreement_m1_m2[0].dn) + M1.agreement.pause(agreement_m1_m3[0].dn) + M2.agreement.pause(agreement_m2_m1[0].dn) + M2.agreement.pause(agreement_m2_m3[0].dn) + + # Step 5 + # Oldest update + # check that the entry on M1 contains employeeNumber= + description["S1"].modify_s(test_user_dn, description["S1_MOD"]) + ents = description["S1"].search_s(BASE_REGULAR, ldap.SCOPE_SUBTREE, '(employeeNumber=%s)' % value_S1) + assert len(ents) == 1 + time.sleep(1) + + # Step 6 + # More recent update + # check that the entry on M2 contains employeeNumber= + description["S2"].modify_s(test_user_dn, description["S2_MOD"]) + ents = description["S2"].search_s(BASE_REGULAR, ldap.SCOPE_SUBTREE, '(employeeNumber=%s)' % value_S2) + assert len(ents) == 1 + + #time.sleep(60) + # Step 7 + # Renable M2 before M1 so that on M3, the most recent update is replicated before + for ra in agreement_m2_m1, agreement_m2_m3: + M2.agreement.resume(ra[0].dn) + + # Step 8 + # Renable M1 so that on M3 oldest update is now replicated + time.sleep(4) + for ra in agreement_m1_m2, agreement_m1_m3: + M1.agreement.resume(ra[0].dn) + + # Step 9 + # Check that M1 still contains employeeNumber= + ents = M1.search_s(BASE_REGULAR, ldap.SCOPE_SUBTREE, '(employeeNumber=%s)' % description["expected"]) + log.info('Search M1 employeeNumber=%s (vs. %s)' % (ents[0].getValue('employeeNumber'), description["expected"])) + assert len(ents) == 1 + assert ents[0].hasAttr('employeeNumber') and ents[0].getValue('employeeNumber') == description["expected"].encode() + + # Check that M2 still contains employeeNumber= + ents = M2.search_s(BASE_REGULAR, ldap.SCOPE_SUBTREE, '(employeeNumber=%s)' % description["expected"]) + log.info('Search M2 employeeNumber=%s (vs. %s)' % (ents[0].getValue('employeeNumber'), description["expected"])) + assert len(ents) == 1 + assert ents[0].hasAttr('employeeNumber') and ents[0].getValue('employeeNumber') == description["expected"].encode() + + # Check that M3 still contain employeeNumber and it contains employeeNumber= + ents = M3.search_s(BASE_REGULAR, ldap.SCOPE_SUBTREE, '(employeeNumber=*)') + assert len(ents) == MAX_STANDARD_USER + ents = M3.search_s(BASE_REGULAR, ldap.SCOPE_SUBTREE, '(employeeNumber=%s)' % description["expected"]) + log.info('Search M3 employeeNumber=%s (vs. %s)' % (ents[0].getValue('employeeNumber'), description["expected"])) + assert len(ents) == 1 + assert ents[0].hasAttr('employeeNumber') and ents[0].getValue('employeeNumber') == description["expected"].encode() + + + +def test_ticket49658_10(topo): + """Do + M1: MOD(REPL) -> V1 + M2: MOD(REPL) -> V2 + expected: V2 + + :id: 1413341a-45e6-422a-b6cc-9fde6fc9bb15 + :setup: 3 Supplier Instances + :steps: + :expectedresults: + 1. Fill in the result that is expected + 2. For each test step + """ + + # If you need any test suite initialization, + # please, write additional fixture for that (including finalizer). + # Topology for suites are predefined in lib389/topologies.py. + + # If you need host, port or any other data about instance, + # Please, use the instance object attributes for that (for example, topo.ms["supplier1"].serverid) + + + + if DEBUGGING: + # Add debugging steps(if any)... + pass + M1 = topo.ms["supplier1"] + M2 = topo.ms["supplier2"] + M3 = topo.ms["supplier3"] + value_1000 = '1000' + last = '10' + value_S1 = '10.1' + value_S2 = '10.2' + + description = { + "S1": M1, + "S2": M2, + "S1_MOD": [(ldap.MOD_REPLACE, 'employeeNumber', value_S1.encode())], + "S2_MOD": [(ldap.MOD_REPLACE, 'employeeNumber', value_S2.encode())], + "expected": value_S2} + + # This test takes the user_1 + (uid, test_user_dn) = _user_get_dn(int(last)) + + # + # Step 4 + # + # disable all RA from M1 and M2 + # only M3 can replicate the update + # + agreement_m1_m2 = M1.agreement.list(suffix=DEFAULT_SUFFIX, consumer_host=M2.host, consumer_port=M2.port) + agreement_m1_m3 = M1.agreement.list(suffix=DEFAULT_SUFFIX, consumer_host=M3.host, consumer_port=M3.port) + agreement_m2_m1 = M2.agreement.list(suffix=DEFAULT_SUFFIX, consumer_host=M1.host, consumer_port=M1.port) + agreement_m2_m3 = M2.agreement.list(suffix=DEFAULT_SUFFIX, consumer_host=M3.host, consumer_port=M3.port) + + M1.agreement.pause(agreement_m1_m2[0].dn) + M1.agreement.pause(agreement_m1_m3[0].dn) + M2.agreement.pause(agreement_m2_m1[0].dn) + M2.agreement.pause(agreement_m2_m3[0].dn) + + # Step 5 + # Oldest update + # check that the entry on M1 contains employeeNumber= + description["S1"].modify_s(test_user_dn, description["S1_MOD"]) + ents = description["S1"].search_s(BASE_REGULAR, ldap.SCOPE_SUBTREE, '(employeeNumber=%s)' % value_S1) + assert len(ents) == 1 + time.sleep(1) + + # Step 6 + # More recent update + # check that the entry on M2 contains employeeNumber= + description["S2"].modify_s(test_user_dn, description["S2_MOD"]) + ents = description["S2"].search_s(BASE_REGULAR, ldap.SCOPE_SUBTREE, '(employeeNumber=%s)' % value_S2) + assert len(ents) == 1 + + #time.sleep(60) + # Step 7 + # Renable M2 before M1 so that on M3, the most recent update is replicated before + for ra in agreement_m2_m1, agreement_m2_m3: + M2.agreement.resume(ra[0].dn) + + # Step 8 + # Renable M1 so that on M3 oldest update is now replicated + time.sleep(4) + for ra in agreement_m1_m2, agreement_m1_m3: + M1.agreement.resume(ra[0].dn) + + # Step 9 + # Check that M1 still contains employeeNumber= + ents = M1.search_s(BASE_REGULAR, ldap.SCOPE_SUBTREE, '(employeeNumber=%s)' % description["expected"]) + log.info('Search M1 employeeNumber=%s (vs. %s)' % (ents[0].getValue('employeeNumber'), description["expected"])) + assert len(ents) == 1 + assert ents[0].hasAttr('employeeNumber') and ents[0].getValue('employeeNumber') == description["expected"].encode() + + # Check that M2 still contains employeeNumber= + ents = M2.search_s(BASE_REGULAR, ldap.SCOPE_SUBTREE, '(employeeNumber=%s)' % description["expected"]) + log.info('Search M2 employeeNumber=%s (vs. %s)' % (ents[0].getValue('employeeNumber'), description["expected"])) + assert len(ents) == 1 + assert ents[0].hasAttr('employeeNumber') and ents[0].getValue('employeeNumber') == description["expected"].encode() + + # Check that M3 still contain employeeNumber and it contains employeeNumber= + ents = M3.search_s(BASE_REGULAR, ldap.SCOPE_SUBTREE, '(employeeNumber=*)') + assert len(ents) == MAX_STANDARD_USER + ents = M3.search_s(BASE_REGULAR, ldap.SCOPE_SUBTREE, '(employeeNumber=%s)' % description["expected"]) + log.info('Search M3 employeeNumber=%s (vs. %s)' % (ents[0].getValue('employeeNumber'), description["expected"])) + assert len(ents) == 1 + assert ents[0].hasAttr('employeeNumber') and ents[0].getValue('employeeNumber') == description["expected"].encode() + + + +def test_ticket49658_11(topo): + """Do + M2: MOD(REPL) -> V2 + M1: MOD(REPL) -> V1 + expected: V1 + + :id: a2810403-418b-41d7-948c-6f8ca46e2f29 + :setup: 3 Supplier Instances + :steps: + :expectedresults: + 1. Fill in the result that is expected + 2. For each test step + """ + + # If you need any test suite initialization, + # please, write additional fixture for that (including finalizer). + # Topology for suites are predefined in lib389/topologies.py. + + # If you need host, port or any other data about instance, + # Please, use the instance object attributes for that (for example, topo.ms["supplier1"].serverid) + + + + if DEBUGGING: + # Add debugging steps(if any)... + pass + M1 = topo.ms["supplier1"] + M2 = topo.ms["supplier2"] + M3 = topo.ms["supplier3"] + value_1000 = '1000' + last = '11' + value_S1 = '11.1' + value_S2 = '11.2' + + description = { + "S1": M1, + "S2": M2, + "S1_MOD": [(ldap.MOD_REPLACE, 'employeeNumber', value_S1.encode())], + "S2_MOD": [(ldap.MOD_REPLACE, 'employeeNumber', value_S2.encode())], + "expected": value_S1} + + # This test takes the user_1 + (uid, test_user_dn) = _user_get_dn(int(last)) + + # + # Step 4 + # + # disable all RA from M1 and M2 + # only M3 can replicate the update + # + agreement_m1_m2 = M1.agreement.list(suffix=DEFAULT_SUFFIX, consumer_host=M2.host, consumer_port=M2.port) + agreement_m1_m3 = M1.agreement.list(suffix=DEFAULT_SUFFIX, consumer_host=M3.host, consumer_port=M3.port) + agreement_m2_m1 = M2.agreement.list(suffix=DEFAULT_SUFFIX, consumer_host=M1.host, consumer_port=M1.port) + agreement_m2_m3 = M2.agreement.list(suffix=DEFAULT_SUFFIX, consumer_host=M3.host, consumer_port=M3.port) + + M1.agreement.pause(agreement_m1_m2[0].dn) + M1.agreement.pause(agreement_m1_m3[0].dn) + M2.agreement.pause(agreement_m2_m1[0].dn) + M2.agreement.pause(agreement_m2_m3[0].dn) + + # Step 5 + # Oldest update + # check that the entry on M1 contains employeeNumber= + description["S2"].modify_s(test_user_dn, description["S2_MOD"]) + ents = description["S2"].search_s(BASE_REGULAR, ldap.SCOPE_SUBTREE, '(employeeNumber=%s)' % value_S2) + assert len(ents) == 1 + time.sleep(1) + + # Step 6 + # More recent update + # check that the entry on M2 contains employeeNumber= + description["S1"].modify_s(test_user_dn, description["S1_MOD"]) + ents = description["S1"].search_s(BASE_REGULAR, ldap.SCOPE_SUBTREE, '(employeeNumber=%s)' % value_S1) + assert len(ents) == 1 + + #time.sleep(60) + # Step 7 + # Renable M2 before M1 so that on M3, the most recent update is replicated before + for ra in agreement_m2_m1, agreement_m2_m3: + M2.agreement.resume(ra[0].dn) + + # Step 8 + # Renable M1 so that on M3 oldest update is now replicated + time.sleep(4) + for ra in agreement_m1_m2, agreement_m1_m3: + M1.agreement.resume(ra[0].dn) + + # Step 9 + # Check that M1 still contains employeeNumber= + ents = M1.search_s(BASE_REGULAR, ldap.SCOPE_SUBTREE, '(employeeNumber=%s)' % description["expected"]) + log.info('Search M1 employeeNumber=%s (vs. %s)' % (ents[0].getValue('employeeNumber'), description["expected"])) + assert len(ents) == 1 + assert ents[0].hasAttr('employeeNumber') and ents[0].getValue('employeeNumber') == description["expected"].encode() + + # Check that M2 still contains employeeNumber= + ents = M2.search_s(BASE_REGULAR, ldap.SCOPE_SUBTREE, '(employeeNumber=%s)' % description["expected"]) + log.info('Search M2 employeeNumber=%s (vs. %s)' % (ents[0].getValue('employeeNumber'), description["expected"])) + assert len(ents) == 1 + assert ents[0].hasAttr('employeeNumber') and ents[0].getValue('employeeNumber') == description["expected"].encode() + + # Check that M3 still contain employeeNumber and it contains employeeNumber= + ents = M3.search_s(BASE_REGULAR, ldap.SCOPE_SUBTREE, '(employeeNumber=*)') + assert len(ents) == MAX_STANDARD_USER + ents = M3.search_s(BASE_REGULAR, ldap.SCOPE_SUBTREE, '(employeeNumber=%s)' % description["expected"]) + log.info('Search M3 employeeNumber=%s (vs. %s)' % (ents[0].getValue('employeeNumber'), description["expected"])) + assert len(ents) == 1 + assert ents[0].hasAttr('employeeNumber') and ents[0].getValue('employeeNumber') == description["expected"].encode() + +def test_ticket49658_12(topo): + """Do + M2: MOD(ADD+DEL) -> V2 + M1: MOD(REPL) -> V1 + expected: V1 + + :id: daba6f3c-e060-4d3f-8f9c-25ea4c1bca48 + :setup: 3 Supplier Instances + 1. using user_2 where employNumber=1000 + :steps: + 1. Isolate M1 and M2 by pausing the replication agreements + 2. On M1 do MOD_DEL 1000 + MOD_ADD_13 + 3. On M2 do MOD_REPL _13 + 4. Enable replication agreement M2 -> M3, so that update step 2 is replicated first + 5. Enable replication agreement M1 -> M3, so that update step 3 is replicated second + 6. Check that the employeeNumber is 13 on all servers + :expectedresults: + 1. Fill in the result that is expected + 2. For each test step + """ + + # If you need any test suite initialization, + # please, write additional fixture for that (including finalizer). + # Topology for suites are predefined in lib389/topologies.py. + + # If you need host, port or any other data about instance, + # Please, use the instance object attributes for that (for example, topo.ms["supplier1"].serverid) + + + + if DEBUGGING: + # Add debugging steps(if any)... + pass + M1 = topo.ms["supplier1"] + M2 = topo.ms["supplier2"] + M3 = topo.ms["supplier3"] + value_1000 = '1000' + last = '12' + value_S1 = '12.1' + value_S2 = '12.2' + + description = { + "S1": M1, + "S2": M2, + "S1_MOD": [(ldap.MOD_REPLACE, 'employeeNumber', value_S1.encode())], + "S2_MOD": [(ldap.MOD_ADD, 'employeeNumber', value_S2.encode()),(ldap.MOD_DELETE, 'employeeNumber', value_1000.encode())], + "expected": value_S1} + + # This test takes the user_1 + (uid, test_user_dn) = _user_get_dn(int(last)) + + # + # Step 4 + # + # disable all RA from M1 and M2 + # only M3 can replicate the update + # + agreement_m1_m2 = M1.agreement.list(suffix=DEFAULT_SUFFIX, consumer_host=M2.host, consumer_port=M2.port) + agreement_m1_m3 = M1.agreement.list(suffix=DEFAULT_SUFFIX, consumer_host=M3.host, consumer_port=M3.port) + agreement_m2_m1 = M2.agreement.list(suffix=DEFAULT_SUFFIX, consumer_host=M1.host, consumer_port=M1.port) + agreement_m2_m3 = M2.agreement.list(suffix=DEFAULT_SUFFIX, consumer_host=M3.host, consumer_port=M3.port) + + M1.agreement.pause(agreement_m1_m2[0].dn) + M1.agreement.pause(agreement_m1_m3[0].dn) + M2.agreement.pause(agreement_m2_m1[0].dn) + M2.agreement.pause(agreement_m2_m3[0].dn) + + # Step 5 + # Oldest update + # check that the entry on M1 contains employeeNumber= + description["S2"].modify_s(test_user_dn, description["S2_MOD"]) + ents = description["S2"].search_s(BASE_REGULAR, ldap.SCOPE_SUBTREE, '(employeeNumber=%s)' % value_S2) + assert len(ents) == 1 + time.sleep(1) + + # Step 6 + # More recent update + # check that the entry on M2 contains employeeNumber= + description["S1"].modify_s(test_user_dn, description["S1_MOD"]) + ents = description["S1"].search_s(BASE_REGULAR, ldap.SCOPE_SUBTREE, '(employeeNumber=%s)' % value_S1) + assert len(ents) == 1 + + #time.sleep(60) + # Step 7 + # Renable M2 before M1 so that on M3, the most recent update is replicated before + for ra in agreement_m2_m1, agreement_m2_m3: + M2.agreement.resume(ra[0].dn) + + # Step 8 + # Renable M1 so that on M3 oldest update is now replicated + time.sleep(4) + for ra in agreement_m1_m2, agreement_m1_m3: + M1.agreement.resume(ra[0].dn) + + # Step 9 + # Check that M1 still contains employeeNumber= + ents = M1.search_s(BASE_REGULAR, ldap.SCOPE_SUBTREE, '(employeeNumber=%s)' % description["expected"]) + log.info('Search M1 employeeNumber=%s (vs. %s)' % (ents[0].getValue('employeeNumber'), description["expected"])) + assert len(ents) == 1 + assert ents[0].hasAttr('employeeNumber') and ents[0].getValue('employeeNumber') == description["expected"].encode() + + # Check that M2 still contains employeeNumber= + ents = M2.search_s(BASE_REGULAR, ldap.SCOPE_SUBTREE, '(employeeNumber=%s)' % description["expected"]) + log.info('Search M2 employeeNumber=%s (vs. %s)' % (ents[0].getValue('employeeNumber'), description["expected"])) + assert len(ents) == 1 + assert ents[0].hasAttr('employeeNumber') and ents[0].getValue('employeeNumber') == description["expected"].encode() + + # Check that M3 still contain employeeNumber and it contains employeeNumber= + ents = M3.search_s(BASE_REGULAR, ldap.SCOPE_SUBTREE, '(employeeNumber=*)') + assert len(ents) == MAX_STANDARD_USER + ents = M3.search_s(BASE_REGULAR, ldap.SCOPE_SUBTREE, '(employeeNumber=%s)' % description["expected"]) + log.info('Search M3 employeeNumber=%s (vs. %s)' % (ents[0].getValue('employeeNumber'), description["expected"])) + assert len(ents) == 1 + assert ents[0].hasAttr('employeeNumber') and ents[0].getValue('employeeNumber') == description["expected"].encode() + +def test_ticket49658_13(topo): + """Do + M2: MOD(DEL+ADD) -> V2 + M1: MOD(REPL) -> V1 + expected: V1 + + :id: 50006b1f-d17c-47a1-86a5-4d78b2a6eab1 + :setup: 3 Supplier Instances + 1. using user_2 where employNumber=1000 + :steps: + 1. Isolate M1 and M2 by pausing the replication agreements + 2. On M1 do MOD_DEL 1000 + MOD_ADD_13 + 3. On M2 do MOD_REPL _13 + 4. Enable replication agreement M2 -> M3, so that update step 2 is replicated first + 5. Enable replication agreement M1 -> M3, so that update step 3 is replicated second + 6. Check that the employeeNumber is 13 on all servers + :expectedresults: + 1. Fill in the result that is expected + 2. For each test step + """ + + # If you need any test suite initialization, + # please, write additional fixture for that (including finalizer). + # Topology for suites are predefined in lib389/topologies.py. + + # If you need host, port or any other data about instance, + # Please, use the instance object attributes for that (for example, topo.ms["supplier1"].serverid) + + + + if DEBUGGING: + # Add debugging steps(if any)... + pass + M1 = topo.ms["supplier1"] + M2 = topo.ms["supplier2"] + M3 = topo.ms["supplier3"] + value_1000 = '1000' + last = '13' + value_S1 = '13.1' + value_S2 = '13.2' + + description = { + "S1": M1, + "S2": M2, + "S1_MOD": [(ldap.MOD_REPLACE, 'employeeNumber', value_S1.encode())], + "S2_MOD": [(ldap.MOD_DELETE, 'employeeNumber', value_1000.encode()),(ldap.MOD_ADD, 'employeeNumber', value_S2.encode())], + "expected": value_S1} + + # This test takes the user_1 + (uid, test_user_dn) = _user_get_dn(int(last)) + + # + # Step 4 + # + # disable all RA from M1 and M2 + # only M3 can replicate the update + # + agreement_m1_m2 = M1.agreement.list(suffix=DEFAULT_SUFFIX, consumer_host=M2.host, consumer_port=M2.port) + agreement_m1_m3 = M1.agreement.list(suffix=DEFAULT_SUFFIX, consumer_host=M3.host, consumer_port=M3.port) + agreement_m2_m1 = M2.agreement.list(suffix=DEFAULT_SUFFIX, consumer_host=M1.host, consumer_port=M1.port) + agreement_m2_m3 = M2.agreement.list(suffix=DEFAULT_SUFFIX, consumer_host=M3.host, consumer_port=M3.port) + + M1.agreement.pause(agreement_m1_m2[0].dn) + M1.agreement.pause(agreement_m1_m3[0].dn) + M2.agreement.pause(agreement_m2_m1[0].dn) + M2.agreement.pause(agreement_m2_m3[0].dn) + + # Step 5 + # Oldest update + # check that the entry on M1 contains employeeNumber= + description["S2"].modify_s(test_user_dn, description["S2_MOD"]) + ents = description["S2"].search_s(BASE_REGULAR, ldap.SCOPE_SUBTREE, '(employeeNumber=%s)' % value_S2) + assert len(ents) == 1 + time.sleep(1) + + # Step 6 + # More recent update + # check that the entry on M2 contains employeeNumber= + description["S1"].modify_s(test_user_dn, description["S1_MOD"]) + ents = description["S1"].search_s(BASE_REGULAR, ldap.SCOPE_SUBTREE, '(employeeNumber=%s)' % value_S1) + assert len(ents) == 1 + + #time.sleep(60) + # Step 7 + # Renable M2 before M1 so that on M3, the most recent update is replicated before + for ra in agreement_m2_m1, agreement_m2_m3: + M2.agreement.resume(ra[0].dn) + + # Step 8 + # Renable M1 so that on M3 oldest update is now replicated + time.sleep(4) + for ra in agreement_m1_m2, agreement_m1_m3: + M1.agreement.resume(ra[0].dn) + + # Step 9 + # Check that M1 still contains employeeNumber= + ents = M1.search_s(BASE_REGULAR, ldap.SCOPE_SUBTREE, '(employeeNumber=%s)' % description["expected"]) + log.info('Search M1 employeeNumber=%s (vs. %s)' % (ents[0].getValue('employeeNumber'), description["expected"])) + assert len(ents) == 1 + assert ents[0].hasAttr('employeeNumber') and ents[0].getValue('employeeNumber') == description["expected"].encode() + + # Check that M2 still contains employeeNumber= + ents = M2.search_s(BASE_REGULAR, ldap.SCOPE_SUBTREE, '(employeeNumber=%s)' % description["expected"]) + log.info('Search M2 employeeNumber=%s (vs. %s)' % (ents[0].getValue('employeeNumber'), description["expected"])) + assert len(ents) == 1 + assert ents[0].hasAttr('employeeNumber') and ents[0].getValue('employeeNumber') == description["expected"].encode() + + # Check that M3 still contain employeeNumber and it contains employeeNumber= + ents = M3.search_s(BASE_REGULAR, ldap.SCOPE_SUBTREE, '(employeeNumber=*)') + assert len(ents) == MAX_STANDARD_USER + ents = M3.search_s(BASE_REGULAR, ldap.SCOPE_SUBTREE, '(employeeNumber=%s)' % description["expected"]) + log.info('Search M3 employeeNumber=%s (vs. %s)' % (ents[0].getValue('employeeNumber'), description["expected"])) + assert len(ents) == 1 + assert ents[0].hasAttr('employeeNumber') and ents[0].getValue('employeeNumber') == description["expected"].encode() + + +def test_ticket49658_14(topo): + """Do + M2: MOD(DEL+ADD) -> V2 + M1: MOD(DEL+ADD) -> V1 + expected: V1 + + :id: d45c58f1-c95e-4314-9cdd-53a2dd391218 + :setup: 3 Supplier Instances + 1. using user_2 where employNumber=1000 + :steps: + 1. Isolate M1 and M2 by pausing the replication agreements + 2. On M1 do MOD_DEL 1000 + MOD_ADD_13 + 3. On M2 do MOD_REPL _13 + 4. Enable replication agreement M2 -> M3, so that update step 2 is replicated first + 5. Enable replication agreement M1 -> M3, so that update step 3 is replicated second + 6. Check that the employeeNumber is 13 on all servers + :expectedresults: + 1. Fill in the result that is expected + 2. For each test step + """ + + # If you need any test suite initialization, + # please, write additional fixture for that (including finalizer). + # Topology for suites are predefined in lib389/topologies.py. + + # If you need host, port or any other data about instance, + # Please, use the instance object attributes for that (for example, topo.ms["supplier1"].serverid) + + + + if DEBUGGING: + # Add debugging steps(if any)... + pass + M1 = topo.ms["supplier1"] + M2 = topo.ms["supplier2"] + M3 = topo.ms["supplier3"] + value_1000 = '1000' + last = '14' + value_S1 = '14.1' + value_S2 = '14.2' + + description = { + "S1": M1, + "S2": M2, + "S1_MOD": [(ldap.MOD_DELETE, 'employeeNumber', value_1000.encode()),(ldap.MOD_ADD, 'employeeNumber', value_S1.encode())], + "S2_MOD": [(ldap.MOD_DELETE, 'employeeNumber', value_1000.encode()),(ldap.MOD_ADD, 'employeeNumber', value_S2.encode())], + "expected": value_S1} + + # This test takes the user_1 + (uid, test_user_dn) = _user_get_dn(int(last)) + + # + # Step 4 + # + # disable all RA from M1 and M2 + # only M3 can replicate the update + # + agreement_m1_m2 = M1.agreement.list(suffix=DEFAULT_SUFFIX, consumer_host=M2.host, consumer_port=M2.port) + agreement_m1_m3 = M1.agreement.list(suffix=DEFAULT_SUFFIX, consumer_host=M3.host, consumer_port=M3.port) + agreement_m2_m1 = M2.agreement.list(suffix=DEFAULT_SUFFIX, consumer_host=M1.host, consumer_port=M1.port) + agreement_m2_m3 = M2.agreement.list(suffix=DEFAULT_SUFFIX, consumer_host=M3.host, consumer_port=M3.port) + + M1.agreement.pause(agreement_m1_m2[0].dn) + M1.agreement.pause(agreement_m1_m3[0].dn) + M2.agreement.pause(agreement_m2_m1[0].dn) + M2.agreement.pause(agreement_m2_m3[0].dn) + + # Step 5 + # Oldest update + # check that the entry on M1 contains employeeNumber= + description["S2"].modify_s(test_user_dn, description["S2_MOD"]) + ents = description["S2"].search_s(BASE_REGULAR, ldap.SCOPE_SUBTREE, '(employeeNumber=%s)' % value_S2) + assert len(ents) == 1 + time.sleep(1) + + # Step 6 + # More recent update + # check that the entry on M2 contains employeeNumber= + description["S1"].modify_s(test_user_dn, description["S1_MOD"]) + ents = description["S1"].search_s(BASE_REGULAR, ldap.SCOPE_SUBTREE, '(employeeNumber=%s)' % value_S1) + assert len(ents) == 1 + + #time.sleep(60) + # Step 7 + # Renable M2 before M1 so that on M3, the most recent update is replicated before + for ra in agreement_m2_m1, agreement_m2_m3: + M2.agreement.resume(ra[0].dn) + + # Step 8 + # Renable M1 so that on M3 oldest update is now replicated + time.sleep(4) + for ra in agreement_m1_m2, agreement_m1_m3: + M1.agreement.resume(ra[0].dn) + + # Step 9 + # Check that M1 still contains employeeNumber= + ents = M1.search_s(BASE_REGULAR, ldap.SCOPE_SUBTREE, '(employeeNumber=%s)' % description["expected"]) + log.info('Search M1 employeeNumber=%s (vs. %s)' % (ents[0].getValue('employeeNumber'), description["expected"])) + assert len(ents) == 1 + assert ents[0].hasAttr('employeeNumber') and ents[0].getValue('employeeNumber') == description["expected"].encode() + + # Check that M2 still contains employeeNumber= + ents = M2.search_s(BASE_REGULAR, ldap.SCOPE_SUBTREE, '(employeeNumber=%s)' % description["expected"]) + log.info('Search M2 employeeNumber=%s (vs. %s)' % (ents[0].getValue('employeeNumber'), description["expected"])) + assert len(ents) == 1 + assert ents[0].hasAttr('employeeNumber') and ents[0].getValue('employeeNumber') == description["expected"].encode() + + # Check that M3 still contain employeeNumber and it contains employeeNumber= + ents = M3.search_s(BASE_REGULAR, ldap.SCOPE_SUBTREE, '(employeeNumber=*)') + assert len(ents) == MAX_STANDARD_USER + ents = M3.search_s(BASE_REGULAR, ldap.SCOPE_SUBTREE, '(employeeNumber=%s)' % description["expected"]) + log.info('Search M3 employeeNumber=%s (vs. %s)' % (ents[0].getValue('employeeNumber'), description["expected"])) + assert len(ents) == 1 + assert ents[0].hasAttr('employeeNumber') and ents[0].getValue('employeeNumber') == description["expected"].encode() + +def test_ticket49658_15(topo): + """Do + M2: MOD(ADD+DEL) -> V2 + M1: MOD(DEL+ADD) -> V1 + expected: V1 + + :id: e077f312-e0af-497a-8a31-3395873512d8 + :setup: 3 Supplier Instances + 1. using user_2 where employNumber=1000 + :steps: + 1. Isolate M1 and M2 by pausing the replication agreements + 2. On M1 do MOD_DEL 1000 + MOD_ADD_13 + 3. On M2 do MOD_REPL _13 + 4. Enable replication agreement M2 -> M3, so that update step 2 is replicated first + 5. Enable replication agreement M1 -> M3, so that update step 3 is replicated second + 6. Check that the employeeNumber is 13 on all servers + :expectedresults: + 1. Fill in the result that is expected + 2. For each test step + """ + + # If you need any test suite initialization, + # please, write additional fixture for that (including finalizer). + # Topology for suites are predefined in lib389/topologies.py. + + # If you need host, port or any other data about instance, + # Please, use the instance object attributes for that (for example, topo.ms["supplier1"].serverid) + + + + if DEBUGGING: + # Add debugging steps(if any)... + pass + M1 = topo.ms["supplier1"] + M2 = topo.ms["supplier2"] + M3 = topo.ms["supplier3"] + value_1000 = '1000' + last = '15' + value_S1 = '15.1' + value_S2 = '15.2' + + description = { + "S1": M1, + "S2": M2, + "S1_MOD": [(ldap.MOD_DELETE, 'employeeNumber', value_1000.encode()),(ldap.MOD_ADD, 'employeeNumber', value_S1.encode())], + "S2_MOD": [(ldap.MOD_ADD, 'employeeNumber', value_S2.encode()),(ldap.MOD_DELETE, 'employeeNumber', value_1000.encode())], + "expected": value_S1} + + # This test takes the user_1 + (uid, test_user_dn) = _user_get_dn(int(last)) + + # + # Step 4 + # + # disable all RA from M1 and M2 + # only M3 can replicate the update + # + agreement_m1_m2 = M1.agreement.list(suffix=DEFAULT_SUFFIX, consumer_host=M2.host, consumer_port=M2.port) + agreement_m1_m3 = M1.agreement.list(suffix=DEFAULT_SUFFIX, consumer_host=M3.host, consumer_port=M3.port) + agreement_m2_m1 = M2.agreement.list(suffix=DEFAULT_SUFFIX, consumer_host=M1.host, consumer_port=M1.port) + agreement_m2_m3 = M2.agreement.list(suffix=DEFAULT_SUFFIX, consumer_host=M3.host, consumer_port=M3.port) + + M1.agreement.pause(agreement_m1_m2[0].dn) + M1.agreement.pause(agreement_m1_m3[0].dn) + M2.agreement.pause(agreement_m2_m1[0].dn) + M2.agreement.pause(agreement_m2_m3[0].dn) + + # Step 5 + # Oldest update + # check that the entry on M1 contains employeeNumber= + description["S2"].modify_s(test_user_dn, description["S2_MOD"]) + ents = description["S2"].search_s(BASE_REGULAR, ldap.SCOPE_SUBTREE, '(employeeNumber=%s)' % value_S2) + assert len(ents) == 1 + time.sleep(1) + + # Step 6 + # More recent update + # check that the entry on M2 contains employeeNumber= + description["S1"].modify_s(test_user_dn, description["S1_MOD"]) + ents = description["S1"].search_s(BASE_REGULAR, ldap.SCOPE_SUBTREE, '(employeeNumber=%s)' % value_S1) + assert len(ents) == 1 + + #time.sleep(60) + # Step 7 + # Renable M2 before M1 so that on M3, the most recent update is replicated before + for ra in agreement_m2_m1, agreement_m2_m3: + M2.agreement.resume(ra[0].dn) + + # Step 8 + # Renable M1 so that on M3 oldest update is now replicated + time.sleep(4) + for ra in agreement_m1_m2, agreement_m1_m3: + M1.agreement.resume(ra[0].dn) + + # Step 9 + # Check that M1 still contains employeeNumber= + ents = M1.search_s(BASE_REGULAR, ldap.SCOPE_SUBTREE, '(employeeNumber=%s)' % description["expected"]) + log.info('Search M1 employeeNumber=%s (vs. %s)' % (ents[0].getValue('employeeNumber'), description["expected"])) + assert len(ents) == 1 + assert ents[0].hasAttr('employeeNumber') and ents[0].getValue('employeeNumber') == description["expected"].encode() + + # Check that M2 still contains employeeNumber= + ents = M2.search_s(BASE_REGULAR, ldap.SCOPE_SUBTREE, '(employeeNumber=%s)' % description["expected"]) + log.info('Search M2 employeeNumber=%s (vs. %s)' % (ents[0].getValue('employeeNumber'), description["expected"])) + assert len(ents) == 1 + assert ents[0].hasAttr('employeeNumber') and ents[0].getValue('employeeNumber') == description["expected"].encode() + + # Check that M3 still contain employeeNumber and it contains employeeNumber= + ents = M3.search_s(BASE_REGULAR, ldap.SCOPE_SUBTREE, '(employeeNumber=*)') + assert len(ents) == MAX_STANDARD_USER + ents = M3.search_s(BASE_REGULAR, ldap.SCOPE_SUBTREE, '(employeeNumber=%s)' % description["expected"]) + log.info('Search M3 employeeNumber=%s (vs. %s)' % (ents[0].getValue('employeeNumber'), description["expected"])) + assert len(ents) == 1 + assert ents[0].hasAttr('employeeNumber') and ents[0].getValue('employeeNumber') == description["expected"].encode() + + +def _resume_ra_M1_then_M2(M1, M2, M3): + agreement_m1_m2 = M1.agreement.list(suffix=DEFAULT_SUFFIX, consumer_host=M2.host, consumer_port=M2.port) + agreement_m1_m3 = M1.agreement.list(suffix=DEFAULT_SUFFIX, consumer_host=M3.host, consumer_port=M3.port) + agreement_m2_m1 = M2.agreement.list(suffix=DEFAULT_SUFFIX, consumer_host=M1.host, consumer_port=M1.port) + agreement_m2_m3 = M2.agreement.list(suffix=DEFAULT_SUFFIX, consumer_host=M3.host, consumer_port=M3.port) + + for ra in agreement_m1_m2, agreement_m1_m3: + M1.agreement.resume(ra[0].dn) + + time.sleep(4) + for ra in agreement_m2_m1, agreement_m2_m3: + M2.agreement.resume(ra[0].dn) + time.sleep(4) + +def _resume_ra_M2_then_M1(M1, M2, M3): + agreement_m1_m2 = M1.agreement.list(suffix=DEFAULT_SUFFIX, consumer_host=M2.host, consumer_port=M2.port) + agreement_m1_m3 = M1.agreement.list(suffix=DEFAULT_SUFFIX, consumer_host=M3.host, consumer_port=M3.port) + agreement_m2_m1 = M2.agreement.list(suffix=DEFAULT_SUFFIX, consumer_host=M1.host, consumer_port=M1.port) + agreement_m2_m3 = M2.agreement.list(suffix=DEFAULT_SUFFIX, consumer_host=M3.host, consumer_port=M3.port) + + for ra in agreement_m2_m1, agreement_m2_m3: + M2.agreement.resume(ra[0].dn) + + time.sleep(4) + for ra in agreement_m1_m2, agreement_m1_m3: + M1.agreement.resume(ra[0].dn) + time.sleep(4) + + +def test_ticket49658_16(topo): + """Do + M1: MODRDN -> V1 + M2: MODRDN -> V1 + expected: V1 + resume order: M2, M1 + + :id: 131b4e4c-0a6d-45df-88aa-cb26a1cd6fa6 + :setup: 3 Supplier Instances + 1. Use employeenumber=1000,ou=distinguished,ou=people, + :steps: + 1. Isolate M1 and M2 by pausing the replication agreements + 2. On M1 do DEL+ADD 1000 + MOD_ADD_13 + 3. On M2 do DEL+ADD 1000 + MOD_ADD_13 + 4. Enable replication agreement M2 -> M3, so that update step 2 is replicated first + 5. Enable replication agreement M1 -> M3, so that update step 3 is replicated second + 6. Check that the employeeNumber is 13 on all servers + :expectedresults: + 1. Fill in the result that is expected + 2. For each test step + """ + + # If you need any test suite initialization, + # please, write additional fixture for that (including finalizer). + # Topology for suites are predefined in lib389/topologies.py. + + # If you need host, port or any other data about instance, + # Please, use the instance object attributes for that (for example, topo.ms["supplier1"].serverid) + + + + if DEBUGGING: + # Add debugging steps(if any)... + pass + M1 = topo.ms["supplier1"] + M2 = topo.ms["supplier2"] + M3 = topo.ms["supplier3"] + value_init = '1' + last = '1' + value_S1 = '1.1' + value_S2 = value_S1 + + description = { + "S1": M1, + "S2": M2, + "S1_MODRDN": value_S1, + "S2_MODRDN": value_S2, + "expected": value_S1} + + # This test takes the user_1 + (uid, test_user_dn) = _employeenumber_user_get_dn(int(last)) + + # + # Step 4 + # + # disable all RA from M1 and M2 + # only M3 can replicate the update + # + agreement_m1_m2 = M1.agreement.list(suffix=DEFAULT_SUFFIX, consumer_host=M2.host, consumer_port=M2.port) + agreement_m1_m3 = M1.agreement.list(suffix=DEFAULT_SUFFIX, consumer_host=M3.host, consumer_port=M3.port) + agreement_m2_m1 = M2.agreement.list(suffix=DEFAULT_SUFFIX, consumer_host=M1.host, consumer_port=M1.port) + agreement_m2_m3 = M2.agreement.list(suffix=DEFAULT_SUFFIX, consumer_host=M3.host, consumer_port=M3.port) + + M1.agreement.pause(agreement_m1_m2[0].dn) + M1.agreement.pause(agreement_m1_m3[0].dn) + M2.agreement.pause(agreement_m2_m1[0].dn) + M2.agreement.pause(agreement_m2_m3[0].dn) + + # Step 5 + # Oldest update + # check that the entry on M1 contains employeeNumber= + description["S1"].rename_s(test_user_dn, 'employeeNumber=%s' % description["S1_MODRDN"], newsuperior=BASE_DISTINGUISHED, delold=1) + ents = description["S1"].search_s(BASE_DISTINGUISHED, ldap.SCOPE_SUBTREE, '(employeeNumber=%s)' % description["S1_MODRDN"]) + assert len(ents) == 1 + time.sleep(1) + + # Step 6 + # More recent update + # check that the entry on M2 contains employeeNumber= + description["S2"].rename_s(test_user_dn, 'employeeNumber=%s' % description["S2_MODRDN"], newsuperior=BASE_DISTINGUISHED, delold=1) + ents = description["S2"].search_s(BASE_DISTINGUISHED, ldap.SCOPE_SUBTREE, '(employeeNumber=%s)' % description["S2_MODRDN"]) + assert len(ents) == 1 + + _resume_ra_M2_then_M1(M1, M2, M3) + + # Step 9 + # Check that M1 still contains employeeNumber= + ents = M1.search_s(BASE_DISTINGUISHED, ldap.SCOPE_SUBTREE, '(employeeNumber=%s)' % description["expected"]) + log.info('Search M1 employeeNumber=%s (vs. %s)' % (ents[0].getValue('employeeNumber'), description["expected"])) + assert len(ents) == 1 + assert ents[0].hasAttr('employeeNumber') and ents[0].getValue('employeeNumber') == description["expected"].encode() + + # Check that M2 still contains employeeNumber= + ents = M2.search_s(BASE_DISTINGUISHED, ldap.SCOPE_SUBTREE, '(employeeNumber=%s)' % description["expected"]) + log.info('Search M2 employeeNumber=%s (vs. %s)' % (ents[0].getValue('employeeNumber'), description["expected"])) + assert len(ents) == 1 + assert ents[0].hasAttr('employeeNumber') and ents[0].getValue('employeeNumber') == description["expected"].encode() + + # Check that M3 still contain employeeNumber and it contains employeeNumber= + ents = M3.search_s(BASE_DISTINGUISHED, ldap.SCOPE_SUBTREE, '(employeeNumber=*)') + assert len(ents) == MAX_EMPLOYEENUMBER_USER + ents = M3.search_s(BASE_DISTINGUISHED, ldap.SCOPE_SUBTREE, '(employeeNumber=%s)' % description["expected"]) + log.info('Search M3 employeeNumber=%s (vs. %s)' % (ents[0].getValue('employeeNumber'), description["expected"])) + assert len(ents) == 1 + assert ents[0].hasAttr('employeeNumber') and ents[0].getValue('employeeNumber') == description["expected"].encode() + + +def test_ticket49658_17(topo): + """Do + M1: MODRDN -> V1 + M2: MODRDN -> V2 + expected: V2 + resume order: M2 then M1 + + :id: 1d3423ec-a2f3-4c03-9765-ec0924f03cb2 + :setup: 3 Supplier Instances + 1. Use employeenumber=1000,ou=distinguished,ou=people, + :steps: + 1. Isolate M1 and M2 by pausing the replication agreements + 2. On M1 do DEL+ADD 1000 + MOD_ADD_13 + 3. On M2 do DEL+ADD 1000 + MOD_ADD_13 + 4. Enable replication agreement M2 -> M3, so that update step 2 is replicated first + 5. Enable replication agreement M1 -> M3, so that update step 3 is replicated second + 6. Check that the employeeNumber is 13 on all servers + :expectedresults: + 1. Fill in the result that is expected + 2. For each test step + """ + + # If you need any test suite initialization, + # please, write additional fixture for that (including finalizer). + # Topology for suites are predefined in lib389/topologies.py. + + # If you need host, port or any other data about instance, + # Please, use the instance object attributes for that (for example, topo.ms["supplier1"].serverid) + + + + if DEBUGGING: + # Add debugging steps(if any)... + pass + M1 = topo.ms["supplier1"] + M2 = topo.ms["supplier2"] + M3 = topo.ms["supplier3"] + value_init = '2' + last = '2' + value_S1 = '2.1' + value_S2 = '2.2' + + description = { + "S1": M1, + "S2": M2, + "S1_MODRDN": value_S1, + "S2_MODRDN": value_S2, + "expected": value_S2} + + # This test takes the user_1 + (uid, test_user_dn) = _employeenumber_user_get_dn(int(last)) + + # + # Step 4 + # + # disable all RA from M1 and M2 + # only M3 can replicate the update + # + agreement_m1_m2 = M1.agreement.list(suffix=DEFAULT_SUFFIX, consumer_host=M2.host, consumer_port=M2.port) + agreement_m1_m3 = M1.agreement.list(suffix=DEFAULT_SUFFIX, consumer_host=M3.host, consumer_port=M3.port) + agreement_m2_m1 = M2.agreement.list(suffix=DEFAULT_SUFFIX, consumer_host=M1.host, consumer_port=M1.port) + agreement_m2_m3 = M2.agreement.list(suffix=DEFAULT_SUFFIX, consumer_host=M3.host, consumer_port=M3.port) + + M1.agreement.pause(agreement_m1_m2[0].dn) + M1.agreement.pause(agreement_m1_m3[0].dn) + M2.agreement.pause(agreement_m2_m1[0].dn) + M2.agreement.pause(agreement_m2_m3[0].dn) + + # Step 5 + # Oldest update + # check that the entry on M1 contains employeeNumber= + description["S1"].rename_s(test_user_dn, 'employeeNumber=%s' % description["S1_MODRDN"], newsuperior=BASE_DISTINGUISHED, delold=1) + ents = description["S1"].search_s(BASE_DISTINGUISHED, ldap.SCOPE_SUBTREE, '(employeeNumber=%s)' % description["S1_MODRDN"]) + assert len(ents) == 1 + time.sleep(1) + + # Step 6 + # More recent update + # check that the entry on M2 contains employeeNumber= + description["S2"].rename_s(test_user_dn, 'employeeNumber=%s' % description["S2_MODRDN"], newsuperior=BASE_DISTINGUISHED, delold=1) + ents = description["S2"].search_s(BASE_DISTINGUISHED, ldap.SCOPE_SUBTREE, '(employeeNumber=%s)' % description["S2_MODRDN"]) + assert len(ents) == 1 + + _resume_ra_M2_then_M1(M1, M2, M3) + + # Step 9 + # Check that M1 still contains employeeNumber= + ents = M1.search_s(BASE_DISTINGUISHED, ldap.SCOPE_SUBTREE, '(employeeNumber=%s)' % description["expected"]) + log.info('Search M1 employeeNumber=%s (vs. %s)' % (ents[0].getValue('employeeNumber'), description["expected"])) + assert len(ents) == 1 + assert ents[0].hasAttr('employeeNumber') and ents[0].getValue('employeeNumber') == description["expected"].encode() + + # Check that M2 still contains employeeNumber= + ents = M2.search_s(BASE_DISTINGUISHED, ldap.SCOPE_SUBTREE, '(employeeNumber=%s)' % description["expected"]) + log.info('Search M2 employeeNumber=%s (vs. %s)' % (ents[0].getValue('employeeNumber'), description["expected"])) + assert len(ents) == 1 + assert ents[0].hasAttr('employeeNumber') and ents[0].getValue('employeeNumber') == description["expected"].encode() + + # Check that M3 still contain employeeNumber and it contains employeeNumber= + ents = M3.search_s(BASE_DISTINGUISHED, ldap.SCOPE_SUBTREE, '(employeeNumber=*)') + assert len(ents) == MAX_EMPLOYEENUMBER_USER + ents = M3.search_s(BASE_DISTINGUISHED, ldap.SCOPE_SUBTREE, '(employeeNumber=%s)' % description["expected"]) + log.info('Search M3 employeeNumber=%s (vs. %s)' % (ents[0].getValue('employeeNumber'), description["expected"])) + assert len(ents) == 1 + assert ents[0].hasAttr('employeeNumber') and ents[0].getValue('employeeNumber') == description["expected"].encode() + +def test_ticket49658_18(topo): + """Do + M1: MODRDN -> V1 + M2: MODRDN -> V2 + expected: V2 + resume order: M1 then M2 + + :id: c50ea634-ba35-4943-833b-0524a446214f + :setup: 3 Supplier Instances + 1. Use employeenumber=1000,ou=distinguished,ou=people, + :steps: + 1. Isolate M1 and M2 by pausing the replication agreements + 2. On M1 do DEL+ADD 1000 + MOD_ADD_13 + 3. On M2 do DEL+ADD 1000 + MOD_ADD_13 + 4. Enable replication agreement M2 -> M3, so that update step 2 is replicated first + 5. Enable replication agreement M1 -> M3, so that update step 3 is replicated second + 6. Check that the employeeNumber is 13 on all servers + :expectedresults: + 1. Fill in the result that is expected + 2. For each test step + """ + + # If you need any test suite initialization, + # please, write additional fixture for that (including finalizer). + # Topology for suites are predefined in lib389/topologies.py. + + # If you need host, port or any other data about instance, + # Please, use the instance object attributes for that (for example, topo.ms["supplier1"].serverid) + + + + if DEBUGGING: + # Add debugging steps(if any)... + pass + M1 = topo.ms["supplier1"] + M2 = topo.ms["supplier2"] + M3 = topo.ms["supplier3"] + value_init = '2' + last = '3' + value_S1 = '3.1' + value_S2 = '3.2' + + description = { + "S1": M1, + "S2": M2, + "S1_MODRDN": value_S1, + "S2_MODRDN": value_S2, + "expected": value_S2} + + # This test takes the user_1 + (uid, test_user_dn) = _employeenumber_user_get_dn(int(last)) + + # + # Step 4 + # + # disable all RA from M1 and M2 + # only M3 can replicate the update + # + agreement_m1_m2 = M1.agreement.list(suffix=DEFAULT_SUFFIX, consumer_host=M2.host, consumer_port=M2.port) + agreement_m1_m3 = M1.agreement.list(suffix=DEFAULT_SUFFIX, consumer_host=M3.host, consumer_port=M3.port) + agreement_m2_m1 = M2.agreement.list(suffix=DEFAULT_SUFFIX, consumer_host=M1.host, consumer_port=M1.port) + agreement_m2_m3 = M2.agreement.list(suffix=DEFAULT_SUFFIX, consumer_host=M3.host, consumer_port=M3.port) + + M1.agreement.pause(agreement_m1_m2[0].dn) + M1.agreement.pause(agreement_m1_m3[0].dn) + M2.agreement.pause(agreement_m2_m1[0].dn) + M2.agreement.pause(agreement_m2_m3[0].dn) + + # Step 5 + # Oldest update + # check that the entry on M1 contains employeeNumber= + description["S1"].rename_s(test_user_dn, 'employeeNumber=%s' % description["S1_MODRDN"], newsuperior=BASE_DISTINGUISHED, delold=1) + ents = description["S1"].search_s(BASE_DISTINGUISHED, ldap.SCOPE_SUBTREE, '(employeeNumber=%s)' % description["S1_MODRDN"]) + assert len(ents) == 1 + time.sleep(1) + + # Step 6 + # More recent update + # check that the entry on M2 contains employeeNumber= + description["S2"].rename_s(test_user_dn, 'employeeNumber=%s' % description["S2_MODRDN"], newsuperior=BASE_DISTINGUISHED, delold=1) + ents = description["S2"].search_s(BASE_DISTINGUISHED, ldap.SCOPE_SUBTREE, '(employeeNumber=%s)' % description["S2_MODRDN"]) + assert len(ents) == 1 + + _resume_ra_M1_then_M2(M1, M2, M3) + + # Step 9 + # Check that M1 still contains employeeNumber= + ents = M1.search_s(BASE_DISTINGUISHED, ldap.SCOPE_SUBTREE, '(employeeNumber=%s)' % description["expected"]) + log.info('Search M1 employeeNumber=%s (vs. %s)' % (ents[0].getValue('employeeNumber'), description["expected"])) + assert len(ents) == 1 + assert ents[0].hasAttr('employeeNumber') and ents[0].getValue('employeeNumber') == description["expected"].encode() + + # Check that M2 still contains employeeNumber= + ents = M2.search_s(BASE_DISTINGUISHED, ldap.SCOPE_SUBTREE, '(employeeNumber=%s)' % description["expected"]) + log.info('Search M2 employeeNumber=%s (vs. %s)' % (ents[0].getValue('employeeNumber'), description["expected"])) + assert len(ents) == 1 + assert ents[0].hasAttr('employeeNumber') and ents[0].getValue('employeeNumber') == description["expected"].encode() + + # Check that M3 still contain employeeNumber and it contains employeeNumber= + ents = M3.search_s(BASE_DISTINGUISHED, ldap.SCOPE_SUBTREE, '(employeeNumber=*)') + assert len(ents) == MAX_EMPLOYEENUMBER_USER + ents = M3.search_s(BASE_DISTINGUISHED, ldap.SCOPE_SUBTREE, '(employeeNumber=%s)' % description["expected"]) + log.info('Search M3 employeeNumber=%s (vs. %s)' % (ents[0].getValue('employeeNumber'), description["expected"])) + assert len(ents) == 1 + assert ents[0].hasAttr('employeeNumber') and ents[0].getValue('employeeNumber') == description["expected"].encode() + +def test_ticket49658_19(topo): + """Do + M1: MODRDN -> V1 + M2: MODRDN -> V2 + M1: MOD(REPL) -> V1 + Replicate order: M2 then M1 + expected: V1 + + :id: 787db943-fc95-4fbb-b066-5e8895cfd296 + :setup: 3 Supplier Instances + 1. Use employeenumber=1000,ou=distinguished,ou=people, + :steps: + 1. Isolate M1 and M2 by pausing the replication agreements + 2. On M1 do DEL+ADD 1000 + MOD_ADD_13 + 3. On M2 do DEL+ADD 1000 + MOD_ADD_13 + 4. Enable replication agreement M2 -> M3, so that update step 2 is replicated first + 5. Enable replication agreement M1 -> M3, so that update step 3 is replicated second + 6. Check that the employeeNumber is 13 on all servers + :expectedresults: + 1. Fill in the result that is expected + 2. For each test step + """ + + # If you need any test suite initialization, + # please, write additional fixture for that (including finalizer). + # Topology for suites are predefined in lib389/topologies.py. + + # If you need host, port or any other data about instance, + # Please, use the instance object attributes for that (for example, topo.ms["supplier1"].serverid) + + + + if DEBUGGING: + # Add debugging steps(if any)... + pass + M1 = topo.ms["supplier1"] + M2 = topo.ms["supplier2"] + M3 = topo.ms["supplier3"] + value_init = '3' + last = '4' + value_S1 = '4.1' + value_S2 = '4.2' + + description = { + "S1": M1, + "S2": M2, + "S1_MODRDN": value_S1, + "S1_MOD": [(ldap.MOD_REPLACE, 'employeeNumber', value_S1.encode())], + "S2_MODRDN": value_S2, + "expected": value_S1} + + # This test takes the user_1 + (uid, test_user_dn) = _employeenumber_user_get_dn(int(last)) + + # + # Step 4 + # + # disable all RA from M1 and M2 + # only M3 can replicate the update + # + agreement_m1_m2 = M1.agreement.list(suffix=DEFAULT_SUFFIX, consumer_host=M2.host, consumer_port=M2.port) + agreement_m1_m3 = M1.agreement.list(suffix=DEFAULT_SUFFIX, consumer_host=M3.host, consumer_port=M3.port) + agreement_m2_m1 = M2.agreement.list(suffix=DEFAULT_SUFFIX, consumer_host=M1.host, consumer_port=M1.port) + agreement_m2_m3 = M2.agreement.list(suffix=DEFAULT_SUFFIX, consumer_host=M3.host, consumer_port=M3.port) + + M1.agreement.pause(agreement_m1_m2[0].dn) + M1.agreement.pause(agreement_m1_m3[0].dn) + M2.agreement.pause(agreement_m2_m1[0].dn) + M2.agreement.pause(agreement_m2_m3[0].dn) + + # Step 5 + # Oldest update + # check that the entry on M1 contains employeeNumber= + description["S1"].rename_s(test_user_dn, 'employeeNumber=%s' % description["S1_MODRDN"], newsuperior=BASE_DISTINGUISHED, delold=1) + ents = description["S1"].search_s(BASE_DISTINGUISHED, ldap.SCOPE_SUBTREE, '(employeeNumber=%s)' % description["S1_MODRDN"]) + assert len(ents) == 1 + time.sleep(1) + + # Step 6 + # More recent update + # check that the entry on M2 contains employeeNumber= + description["S2"].rename_s(test_user_dn, 'employeeNumber=%s' % description["S2_MODRDN"], newsuperior=BASE_DISTINGUISHED, delold=1) + ents = description["S2"].search_s(BASE_DISTINGUISHED, ldap.SCOPE_SUBTREE, '(employeeNumber=%s)' % description["S2_MODRDN"]) + assert len(ents) == 1 + time.sleep(1) + + (no, new_test_user_dn) = _employeenumber_user_get_dn(description["S1_MODRDN"]) + description["S1"].modify_s(new_test_user_dn, description["S1_MOD"]) + ents = description["S1"].search_s(BASE_DISTINGUISHED, ldap.SCOPE_SUBTREE, '(employeeNumber=%s)' % value_S1) + assert len(ents) == 1 + + _resume_ra_M2_then_M1(M1, M2, M3) + + + #time.sleep(3600) + # Step 9 + # Check that M1 still contains employeeNumber= + ents = M1.search_s(BASE_DISTINGUISHED, ldap.SCOPE_SUBTREE, '(employeeNumber=%s)' % description["expected"]) + log.info('Search M1 employeeNumber=%s (vs. %s)' % (ents[0].getValue('employeeNumber'), description["expected"])) + assert len(ents) == 1 + assert ents[0].hasAttr('employeeNumber') and ents[0].getValue('employeeNumber') == description["expected"].encode() + + # Check that M2 still contains employeeNumber= + ents = M2.search_s(BASE_DISTINGUISHED, ldap.SCOPE_SUBTREE, '(employeeNumber=%s)' % description["expected"]) + log.info('Search M2 employeeNumber=%s (vs. %s)' % (ents[0].getValue('employeeNumber'), description["expected"])) + assert len(ents) == 1 + assert ents[0].hasAttr('employeeNumber') and ents[0].getValue('employeeNumber') == description["expected"].encode() + + # Check that M3 still contain employeeNumber and it contains employeeNumber= + ents = M3.search_s(BASE_DISTINGUISHED, ldap.SCOPE_SUBTREE, '(employeeNumber=*)') + assert len(ents) == MAX_EMPLOYEENUMBER_USER + ents = M3.search_s(BASE_DISTINGUISHED, ldap.SCOPE_SUBTREE, '(employeeNumber=%s)' % description["expected"]) + log.info('Search M3 employeeNumber=%s (vs. %s)' % (ents[0].getValue('employeeNumber'), description["expected"])) + assert len(ents) == 1 + assert ents[0].hasAttr('employeeNumber') and ents[0].getValue('employeeNumber') == description["expected"].encode() + +def test_ticket49658_20(topo): + """Do + M1: MODRDN -> V1 + M2: MODRDN -> V2 + M1: MOD(REPL) -> V1 + Replicate order: M1 then M2 + expected: V1 + + :id: a3df2f72-b8b1-4bb8-b0ca-ebd306539c8b + :setup: 3 Supplier Instances + 1. Use employeenumber=1000,ou=distinguished,ou=people, + :steps: + 1. Isolate M1 and M2 by pausing the replication agreements + 2. On M1 do DEL+ADD 1000 + MOD_ADD_13 + 3. On M2 do DEL+ADD 1000 + MOD_ADD_13 + 4. Enable replication agreement M2 -> M3, so that update step 2 is replicated first + 5. Enable replication agreement M1 -> M3, so that update step 3 is replicated second + 6. Check that the employeeNumber is 13 on all servers + :expectedresults: + 1. Fill in the result that is expected + 2. For each test step + """ + + # If you need any test suite initialization, + # please, write additional fixture for that (including finalizer). + # Topology for suites are predefined in lib389/topologies.py. + + # If you need host, port or any other data about instance, + # Please, use the instance object attributes for that (for example, topo.ms["supplier1"].serverid) + + + + if DEBUGGING: + # Add debugging steps(if any)... + pass + M1 = topo.ms["supplier1"] + M2 = topo.ms["supplier2"] + M3 = topo.ms["supplier3"] + value_init = '3' + last = '5' + value_S1 = '5.1' + value_S2 = '5.2' + + description = { + "S1": M1, + "S2": M2, + "S1_MODRDN": value_S1, + "S1_MOD": [(ldap.MOD_REPLACE, 'employeeNumber', value_S1.encode())], + "S2_MODRDN": value_S2, + "expected": value_S1} + + # This test takes the user_1 + (uid, test_user_dn) = _employeenumber_user_get_dn(int(last)) + + # + # Step 4 + # + # disable all RA from M1 and M2 + # only M3 can replicate the update + # + agreement_m1_m2 = M1.agreement.list(suffix=DEFAULT_SUFFIX, consumer_host=M2.host, consumer_port=M2.port) + agreement_m1_m3 = M1.agreement.list(suffix=DEFAULT_SUFFIX, consumer_host=M3.host, consumer_port=M3.port) + agreement_m2_m1 = M2.agreement.list(suffix=DEFAULT_SUFFIX, consumer_host=M1.host, consumer_port=M1.port) + agreement_m2_m3 = M2.agreement.list(suffix=DEFAULT_SUFFIX, consumer_host=M3.host, consumer_port=M3.port) + + M1.agreement.pause(agreement_m1_m2[0].dn) + M1.agreement.pause(agreement_m1_m3[0].dn) + M2.agreement.pause(agreement_m2_m1[0].dn) + M2.agreement.pause(agreement_m2_m3[0].dn) + + # Step 5 + # Oldest update + # check that the entry on M1 contains employeeNumber= + description["S1"].rename_s(test_user_dn, 'employeeNumber=%s' % description["S1_MODRDN"], newsuperior=BASE_DISTINGUISHED, delold=1) + ents = description["S1"].search_s(BASE_DISTINGUISHED, ldap.SCOPE_SUBTREE, '(employeeNumber=%s)' % description["S1_MODRDN"]) + assert len(ents) == 1 + time.sleep(1) + + # Step 6 + # More recent update + # check that the entry on M2 contains employeeNumber= + description["S2"].rename_s(test_user_dn, 'employeeNumber=%s' % description["S2_MODRDN"], newsuperior=BASE_DISTINGUISHED, delold=1) + ents = description["S2"].search_s(BASE_DISTINGUISHED, ldap.SCOPE_SUBTREE, '(employeeNumber=%s)' % description["S2_MODRDN"]) + assert len(ents) == 1 + time.sleep(1) + + (no, new_test_user_dn) = _employeenumber_user_get_dn(description["S1_MODRDN"]) + description["S1"].modify_s(new_test_user_dn, description["S1_MOD"]) + ents = description["S1"].search_s(BASE_DISTINGUISHED, ldap.SCOPE_SUBTREE, '(employeeNumber=%s)' % value_S1) + assert len(ents) == 1 + + _resume_ra_M1_then_M2(M1, M2, M3) + + #time.sleep(3600) + # Step 9 + # Check that M1 still contains employeeNumber= + ents = M1.search_s(BASE_DISTINGUISHED, ldap.SCOPE_SUBTREE, '(employeeNumber=%s)' % description["expected"]) + log.info('Search M1 employeeNumber=%s (vs. %s)' % (ents[0].getValue('employeeNumber'), description["expected"])) + assert len(ents) == 1 + assert ents[0].hasAttr('employeeNumber') and ents[0].getValue('employeeNumber') == description["expected"].encode() + + # Check that M2 still contains employeeNumber= + ents = M2.search_s(BASE_DISTINGUISHED, ldap.SCOPE_SUBTREE, '(employeeNumber=%s)' % description["expected"]) + log.info('Search M2 employeeNumber=%s (vs. %s)' % (ents[0].getValue('employeeNumber'), description["expected"])) + assert len(ents) == 1 + assert ents[0].hasAttr('employeeNumber') and ents[0].getValue('employeeNumber') == description["expected"].encode() + + # Check that M3 still contain employeeNumber and it contains employeeNumber= + ents = M3.search_s(BASE_DISTINGUISHED, ldap.SCOPE_SUBTREE, '(employeeNumber=*)') + assert len(ents) == MAX_EMPLOYEENUMBER_USER + ents = M3.search_s(BASE_DISTINGUISHED, ldap.SCOPE_SUBTREE, '(employeeNumber=%s)' % description["expected"]) + log.info('Search M3 employeeNumber=%s (vs. %s)' % (ents[0].getValue('employeeNumber'), description["expected"])) + assert len(ents) == 1 + assert ents[0].hasAttr('employeeNumber') and ents[0].getValue('employeeNumber') == description["expected"].encode() + +def test_ticket49658_21(topo): + """Do + M1: MODRDN -> V1 + M2: MODRDN -> V2 + M1: MOD(DEL/ADD) -> V1 + Replicate order: M2 then M1 + expected: V1 + + :id: f338188c-6877-4a2e-bbb1-14b81ac7668a + :setup: 3 Supplier Instances + 1. Use employeenumber=1000,ou=distinguished,ou=people, + :steps: + 1. Isolate M1 and M2 by pausing the replication agreements + 2. On M1 do DEL+ADD 1000 + MOD_ADD_13 + 3. On M2 do DEL+ADD 1000 + MOD_ADD_13 + 4. Enable replication agreement M2 -> M3, so that update step 2 is replicated first + 5. Enable replication agreement M1 -> M3, so that update step 3 is replicated second + 6. Check that the employeeNumber is 13 on all servers + :expectedresults: + 1. Fill in the result that is expected + 2. For each test step + """ + + # If you need any test suite initialization, + # please, write additional fixture for that (including finalizer). + # Topology for suites are predefined in lib389/topologies.py. + + # If you need host, port or any other data about instance, + # Please, use the instance object attributes for that (for example, topo.ms["supplier1"].serverid) + + + + if DEBUGGING: + # Add debugging steps(if any)... + pass + M1 = topo.ms["supplier1"] + M2 = topo.ms["supplier2"] + M3 = topo.ms["supplier3"] + value_init = '3' + last = '6' + value_S1 = '6.1' + value_S2 = '6.2' + + description = { + "S1": M1, + "S2": M2, + "S1_MODRDN": value_S1, + "S1_MOD": [(ldap.MOD_DELETE, 'employeeNumber', value_S1.encode()),(ldap.MOD_ADD, 'employeeNumber', value_S1.encode())], + "S2_MODRDN": value_S2, + "expected": value_S1} + + # This test takes the user_1 + (uid, test_user_dn) = _employeenumber_user_get_dn(int(last)) + + # + # Step 4 + # + # disable all RA from M1 and M2 + # only M3 can replicate the update + # + agreement_m1_m2 = M1.agreement.list(suffix=DEFAULT_SUFFIX, consumer_host=M2.host, consumer_port=M2.port) + agreement_m1_m3 = M1.agreement.list(suffix=DEFAULT_SUFFIX, consumer_host=M3.host, consumer_port=M3.port) + agreement_m2_m1 = M2.agreement.list(suffix=DEFAULT_SUFFIX, consumer_host=M1.host, consumer_port=M1.port) + agreement_m2_m3 = M2.agreement.list(suffix=DEFAULT_SUFFIX, consumer_host=M3.host, consumer_port=M3.port) + + M1.agreement.pause(agreement_m1_m2[0].dn) + M1.agreement.pause(agreement_m1_m3[0].dn) + M2.agreement.pause(agreement_m2_m1[0].dn) + M2.agreement.pause(agreement_m2_m3[0].dn) + + # Step 5 + # Oldest update + # check that the entry on M1 contains employeeNumber= + description["S1"].rename_s(test_user_dn, 'employeeNumber=%s' % description["S1_MODRDN"], newsuperior=BASE_DISTINGUISHED, delold=1) + ents = description["S1"].search_s(BASE_DISTINGUISHED, ldap.SCOPE_SUBTREE, '(employeeNumber=%s)' % description["S1_MODRDN"]) + assert len(ents) == 1 + time.sleep(1) + + # Step 6 + # More recent update + # check that the entry on M2 contains employeeNumber= + description["S2"].rename_s(test_user_dn, 'employeeNumber=%s' % description["S2_MODRDN"], newsuperior=BASE_DISTINGUISHED, delold=1) + ents = description["S2"].search_s(BASE_DISTINGUISHED, ldap.SCOPE_SUBTREE, '(employeeNumber=%s)' % description["S2_MODRDN"]) + assert len(ents) == 1 + time.sleep(1) + + (no, new_test_user_dn) = _employeenumber_user_get_dn(description["S1_MODRDN"]) + description["S1"].modify_s(new_test_user_dn, description["S1_MOD"]) + ents = description["S1"].search_s(BASE_DISTINGUISHED, ldap.SCOPE_SUBTREE, '(employeeNumber=%s)' % value_S1) + assert len(ents) == 1 + + _resume_ra_M2_then_M1(M1, M2, M3) + + #time.sleep(3600) + # Step 9 + # Check that M1 still contains employeeNumber= + ents = M1.search_s(BASE_DISTINGUISHED, ldap.SCOPE_SUBTREE, '(employeeNumber=%s)' % description["expected"]) + log.info('Search M1 employeeNumber=%s (vs. %s)' % (ents[0].getValue('employeeNumber'), description["expected"])) + assert len(ents) == 1 + assert ents[0].hasAttr('employeeNumber') and ents[0].getValue('employeeNumber') == description["expected"].encode() + + # Check that M2 still contains employeeNumber= + ents = M2.search_s(BASE_DISTINGUISHED, ldap.SCOPE_SUBTREE, '(employeeNumber=%s)' % description["expected"]) + log.info('Search M2 employeeNumber=%s (vs. %s)' % (ents[0].getValue('employeeNumber'), description["expected"])) + assert len(ents) == 1 + assert ents[0].hasAttr('employeeNumber') and ents[0].getValue('employeeNumber') == description["expected"].encode() + + # Check that M3 still contain employeeNumber and it contains employeeNumber= + ents = M3.search_s(BASE_DISTINGUISHED, ldap.SCOPE_SUBTREE, '(employeeNumber=*)') + assert len(ents) == MAX_EMPLOYEENUMBER_USER + ents = M3.search_s(BASE_DISTINGUISHED, ldap.SCOPE_SUBTREE, '(employeeNumber=%s)' % description["expected"]) + log.info('Search M3 employeeNumber=%s (vs. %s)' % (ents[0].getValue('employeeNumber'), description["expected"])) + assert len(ents) == 1 + assert ents[0].hasAttr('employeeNumber') and ents[0].getValue('employeeNumber') == description["expected"].encode() + +def test_ticket49658_22(topo): + """Do + M1: MODRDN -> V1 + M2: MODRDN -> V2 + M1: MOD(DEL/ADD) -> V1 + Replicate: M1 then M2 + expected: V1 + + :id: f3b33f52-d5c7-4b49-89cf-3cbe4b060674 + :setup: 3 Supplier Instances + 1. Use employeenumber=1000,ou=distinguished,ou=people, + :steps: + 1. Isolate M1 and M2 by pausing the replication agreements + 2. On M1 do DEL+ADD 1000 + MOD_ADD_13 + 3. On M2 do DEL+ADD 1000 + MOD_ADD_13 + 4. Enable replication agreement M2 -> M3, so that update step 2 is replicated first + 5. Enable replication agreement M1 -> M3, so that update step 3 is replicated second + 6. Check that the employeeNumber is 13 on all servers + :expectedresults: + 1. Fill in the result that is expected + 2. For each test step + """ + + # If you need any test suite initialization, + # please, write additional fixture for that (including finalizer). + # Topology for suites are predefined in lib389/topologies.py. + + # If you need host, port or any other data about instance, + # Please, use the instance object attributes for that (for example, topo.ms["supplier1"].serverid) + + + + if DEBUGGING: + # Add debugging steps(if any)... + pass + M1 = topo.ms["supplier1"] + M2 = topo.ms["supplier2"] + M3 = topo.ms["supplier3"] + value_init = '3' + last = '7' + value_S1 = '7.1' + value_S2 = '7.2' + + description = { + "S1": M1, + "S2": M2, + "S1_MODRDN": value_S1, + "S1_MOD": [(ldap.MOD_DELETE, 'employeeNumber', value_S1.encode()),(ldap.MOD_ADD, 'employeeNumber', value_S1.encode())], + "S2_MODRDN": value_S2, + "expected": value_S1} + + # This test takes the user_1 + (uid, test_user_dn) = _employeenumber_user_get_dn(int(last)) + + # + # Step 4 + # + # disable all RA from M1 and M2 + # only M3 can replicate the update + # + agreement_m1_m2 = M1.agreement.list(suffix=DEFAULT_SUFFIX, consumer_host=M2.host, consumer_port=M2.port) + agreement_m1_m3 = M1.agreement.list(suffix=DEFAULT_SUFFIX, consumer_host=M3.host, consumer_port=M3.port) + agreement_m2_m1 = M2.agreement.list(suffix=DEFAULT_SUFFIX, consumer_host=M1.host, consumer_port=M1.port) + agreement_m2_m3 = M2.agreement.list(suffix=DEFAULT_SUFFIX, consumer_host=M3.host, consumer_port=M3.port) + + M1.agreement.pause(agreement_m1_m2[0].dn) + M1.agreement.pause(agreement_m1_m3[0].dn) + M2.agreement.pause(agreement_m2_m1[0].dn) + M2.agreement.pause(agreement_m2_m3[0].dn) + + # Step 5 + # Oldest update + # check that the entry on M1 contains employeeNumber= + description["S1"].rename_s(test_user_dn, 'employeeNumber=%s' % description["S1_MODRDN"], newsuperior=BASE_DISTINGUISHED, delold=1) + ents = description["S1"].search_s(BASE_DISTINGUISHED, ldap.SCOPE_SUBTREE, '(employeeNumber=%s)' % description["S1_MODRDN"]) + assert len(ents) == 1 + time.sleep(1) + + # Step 6 + # More recent update + # check that the entry on M2 contains employeeNumber= + description["S2"].rename_s(test_user_dn, 'employeeNumber=%s' % description["S2_MODRDN"], newsuperior=BASE_DISTINGUISHED, delold=1) + ents = description["S2"].search_s(BASE_DISTINGUISHED, ldap.SCOPE_SUBTREE, '(employeeNumber=%s)' % description["S2_MODRDN"]) + assert len(ents) == 1 + time.sleep(1) + + (no, new_test_user_dn) = _employeenumber_user_get_dn(description["S1_MODRDN"]) + description["S1"].modify_s(new_test_user_dn, description["S1_MOD"]) + ents = description["S1"].search_s(BASE_DISTINGUISHED, ldap.SCOPE_SUBTREE, '(employeeNumber=%s)' % value_S1) + assert len(ents) == 1 + + _resume_ra_M1_then_M2(M1, M2, M3) + + #time.sleep(3600) + # Step 9 + # Check that M1 still contains employeeNumber= + ents = M1.search_s(BASE_DISTINGUISHED, ldap.SCOPE_SUBTREE, '(employeeNumber=%s)' % description["expected"]) + log.info('Search M1 employeeNumber=%s (vs. %s)' % (ents[0].getValue('employeeNumber'), description["expected"])) + assert len(ents) == 1 + assert ents[0].hasAttr('employeeNumber') and ents[0].getValue('employeeNumber') == description["expected"].encode() + + # Check that M2 still contains employeeNumber= + ents = M2.search_s(BASE_DISTINGUISHED, ldap.SCOPE_SUBTREE, '(employeeNumber=%s)' % description["expected"]) + log.info('Search M2 employeeNumber=%s (vs. %s)' % (ents[0].getValue('employeeNumber'), description["expected"])) + assert len(ents) == 1 + assert ents[0].hasAttr('employeeNumber') and ents[0].getValue('employeeNumber') == description["expected"].encode() + + # Check that M3 still contain employeeNumber and it contains employeeNumber= + ents = M3.search_s(BASE_DISTINGUISHED, ldap.SCOPE_SUBTREE, '(employeeNumber=*)') + assert len(ents) == MAX_EMPLOYEENUMBER_USER + ents = M3.search_s(BASE_DISTINGUISHED, ldap.SCOPE_SUBTREE, '(employeeNumber=%s)' % description["expected"]) + log.info('Search M3 employeeNumber=%s (vs. %s)' % (ents[0].getValue('employeeNumber'), description["expected"])) + assert len(ents) == 1 + assert ents[0].hasAttr('employeeNumber') and ents[0].getValue('employeeNumber') == description["expected"].encode() + +def test_ticket49658_23(topo): + """Do + M1: MODRDN -> V1 + M2: MODRDN -> V2 + M1: MOD(REPL) -> V1 + M2: MOD(REPL) -> V2 + Replicate order: M2 then M1 + expected: V2 + + :id: 2c550174-33a0-4666-8abf-f3362e19ae29 + :setup: 3 Supplier Instances + 1. Use employeenumber=1000,ou=distinguished,ou=people, + :steps: + 1. Isolate M1 and M2 by pausing the replication agreements + 2. On M1 do DEL+ADD 1000 + MOD_ADD_13 + 3. On M2 do DEL+ADD 1000 + MOD_ADD_13 + 4. Enable replication agreement M2 -> M3, so that update step 2 is replicated first + 5. Enable replication agreement M1 -> M3, so that update step 3 is replicated second + 6. Check that the employeeNumber is 13 on all servers + :expectedresults: + 1. Fill in the result that is expected + 2. For each test step + """ + + # If you need any test suite initialization, + # please, write additional fixture for that (including finalizer). + # Topology for suites are predefined in lib389/topologies.py. + + # If you need host, port or any other data about instance, + # Please, use the instance object attributes for that (for example, topo.ms["supplier1"].serverid) + + + + if DEBUGGING: + # Add debugging steps(if any)... + pass + M1 = topo.ms["supplier1"] + M2 = topo.ms["supplier2"] + M3 = topo.ms["supplier3"] + value_init = '7' + last = '8' + value_S1 = '8.1' + value_S2 = '8.2' + + description = { + "S1": M1, + "S2": M2, + "S1_MODRDN": value_S1, + "S1_MOD": [(ldap.MOD_REPLACE, 'employeeNumber', value_S1.encode())], + "S2_MOD": [(ldap.MOD_REPLACE, 'employeeNumber', value_S2.encode())], + "S2_MODRDN": value_S2, + "expected": value_S2} + + # This test takes the user_1 + (uid, test_user_dn) = _employeenumber_user_get_dn(int(last)) + + # + # Step 4 + # + # disable all RA from M1 and M2 + # only M3 can replicate the update + # + agreement_m1_m2 = M1.agreement.list(suffix=DEFAULT_SUFFIX, consumer_host=M2.host, consumer_port=M2.port) + agreement_m1_m3 = M1.agreement.list(suffix=DEFAULT_SUFFIX, consumer_host=M3.host, consumer_port=M3.port) + agreement_m2_m1 = M2.agreement.list(suffix=DEFAULT_SUFFIX, consumer_host=M1.host, consumer_port=M1.port) + agreement_m2_m3 = M2.agreement.list(suffix=DEFAULT_SUFFIX, consumer_host=M3.host, consumer_port=M3.port) + + M1.agreement.pause(agreement_m1_m2[0].dn) + M1.agreement.pause(agreement_m1_m3[0].dn) + M2.agreement.pause(agreement_m2_m1[0].dn) + M2.agreement.pause(agreement_m2_m3[0].dn) + + # Step 5 + # Oldest update + # check that the entry on M1 contains employeeNumber= + description["S1"].rename_s(test_user_dn, 'employeeNumber=%s' % description["S1_MODRDN"], newsuperior=BASE_DISTINGUISHED, delold=1) + ents = description["S1"].search_s(BASE_DISTINGUISHED, ldap.SCOPE_SUBTREE, '(employeeNumber=%s)' % description["S1_MODRDN"]) + assert len(ents) == 1 + time.sleep(1) + + # Step 6 + # More recent update + # check that the entry on M2 contains employeeNumber= + description["S2"].rename_s(test_user_dn, 'employeeNumber=%s' % description["S2_MODRDN"], newsuperior=BASE_DISTINGUISHED, delold=1) + ents = description["S2"].search_s(BASE_DISTINGUISHED, ldap.SCOPE_SUBTREE, '(employeeNumber=%s)' % description["S2_MODRDN"]) + assert len(ents) == 1 + time.sleep(1) + + (no, new_test_user_dn) = _employeenumber_user_get_dn(description["S1_MODRDN"]) + description["S1"].modify_s(new_test_user_dn, description["S1_MOD"]) + ents = description["S1"].search_s(BASE_DISTINGUISHED, ldap.SCOPE_SUBTREE, '(employeeNumber=%s)' % value_S1) + assert len(ents) == 1 + time.sleep(1) + + (no, new_test_user_dn) = _employeenumber_user_get_dn(description["S2_MODRDN"]) + description["S2"].modify_s(new_test_user_dn, description["S2_MOD"]) + ents = description["S2"].search_s(BASE_DISTINGUISHED, ldap.SCOPE_SUBTREE, '(employeeNumber=%s)' % value_S2) + assert len(ents) == 1 + + _resume_ra_M2_then_M1(M1, M2, M3) + + #time.sleep(3600) + # Step 9 + # Check that M1 still contains employeeNumber= + ents = M1.search_s(BASE_DISTINGUISHED, ldap.SCOPE_SUBTREE, '(employeeNumber=%s)' % description["expected"]) + log.info('Search M1 employeeNumber=%s (vs. %s)' % (ents[0].getValue('employeeNumber'), description["expected"])) + assert len(ents) == 1 + assert ents[0].hasAttr('employeeNumber') and ents[0].getValue('employeeNumber') == description["expected"].encode() + + # Check that M2 still contains employeeNumber= + ents = M2.search_s(BASE_DISTINGUISHED, ldap.SCOPE_SUBTREE, '(employeeNumber=%s)' % description["expected"]) + log.info('Search M2 employeeNumber=%s (vs. %s)' % (ents[0].getValue('employeeNumber'), description["expected"])) + assert len(ents) == 1 + assert ents[0].hasAttr('employeeNumber') and ents[0].getValue('employeeNumber') == description["expected"].encode() + + # Check that M3 still contain employeeNumber and it contains employeeNumber= + ents = M3.search_s(BASE_DISTINGUISHED, ldap.SCOPE_SUBTREE, '(employeeNumber=*)') + assert len(ents) == MAX_EMPLOYEENUMBER_USER + ents = M3.search_s(BASE_DISTINGUISHED, ldap.SCOPE_SUBTREE, '(employeeNumber=%s)' % description["expected"]) + log.info('Search M3 employeeNumber=%s (vs. %s)' % (ents[0].getValue('employeeNumber'), description["expected"])) + assert len(ents) == 1 + assert ents[0].hasAttr('employeeNumber') and ents[0].getValue('employeeNumber') == description["expected"].encode() + +def test_ticket49658_24(topo): + """Do + M1: MODRDN -> V1 + M2: MODRDN -> V2 + M1: MOD(REPL) -> V1 + M2: MOD(REPL) -> V2 + Replicate order: M1 then M2 + expected: V2 + + :id: af6a472c-29e3-4833-a5dc-d96c684d33f9 + :setup: 3 Supplier Instances + 1. Use employeenumber=1000,ou=distinguished,ou=people, + :steps: + 1. Isolate M1 and M2 by pausing the replication agreements + 2. On M1 do DEL+ADD 1000 + MOD_ADD_13 + 3. On M2 do DEL+ADD 1000 + MOD_ADD_13 + 4. Enable replication agreement M2 -> M3, so that update step 2 is replicated first + 5. Enable replication agreement M1 -> M3, so that update step 3 is replicated second + 6. Check that the employeeNumber is 13 on all servers + :expectedresults: + 1. Fill in the result that is expected + 2. For each test step + """ + + # If you need any test suite initialization, + # please, write additional fixture for that (including finalizer). + # Topology for suites are predefined in lib389/topologies.py. + + # If you need host, port or any other data about instance, + # Please, use the instance object attributes for that (for example, topo.ms["supplier1"].serverid) + + + + if DEBUGGING: + # Add debugging steps(if any)... + pass + M1 = topo.ms["supplier1"] + M2 = topo.ms["supplier2"] + M3 = topo.ms["supplier3"] + value_init = '7' + last = '9' + value_S1 = '9.1' + value_S2 = '9.2' + + description = { + "S1": M1, + "S2": M2, + "S1_MODRDN": value_S1, + "S1_MOD": [(ldap.MOD_REPLACE, 'employeeNumber', value_S1.encode())], + "S2_MOD": [(ldap.MOD_REPLACE, 'employeeNumber', value_S2.encode())], + "S2_MODRDN": value_S2, + "expected": value_S2} + + # This test takes the user_1 + (uid, test_user_dn) = _employeenumber_user_get_dn(int(last)) + + # + # Step 4 + # + # disable all RA from M1 and M2 + # only M3 can replicate the update + # + agreement_m1_m2 = M1.agreement.list(suffix=DEFAULT_SUFFIX, consumer_host=M2.host, consumer_port=M2.port) + agreement_m1_m3 = M1.agreement.list(suffix=DEFAULT_SUFFIX, consumer_host=M3.host, consumer_port=M3.port) + agreement_m2_m1 = M2.agreement.list(suffix=DEFAULT_SUFFIX, consumer_host=M1.host, consumer_port=M1.port) + agreement_m2_m3 = M2.agreement.list(suffix=DEFAULT_SUFFIX, consumer_host=M3.host, consumer_port=M3.port) + + M1.agreement.pause(agreement_m1_m2[0].dn) + M1.agreement.pause(agreement_m1_m3[0].dn) + M2.agreement.pause(agreement_m2_m1[0].dn) + M2.agreement.pause(agreement_m2_m3[0].dn) + + # Step 5 + # Oldest update + # check that the entry on M1 contains employeeNumber= + description["S1"].rename_s(test_user_dn, 'employeeNumber=%s' % description["S1_MODRDN"], newsuperior=BASE_DISTINGUISHED, delold=1) + ents = description["S1"].search_s(BASE_DISTINGUISHED, ldap.SCOPE_SUBTREE, '(employeeNumber=%s)' % description["S1_MODRDN"]) + assert len(ents) == 1 + time.sleep(1) + + # Step 6 + # More recent update + # check that the entry on M2 contains employeeNumber= + description["S2"].rename_s(test_user_dn, 'employeeNumber=%s' % description["S2_MODRDN"], newsuperior=BASE_DISTINGUISHED, delold=1) + ents = description["S2"].search_s(BASE_DISTINGUISHED, ldap.SCOPE_SUBTREE, '(employeeNumber=%s)' % description["S2_MODRDN"]) + assert len(ents) == 1 + time.sleep(1) + + (no, new_test_user_dn) = _employeenumber_user_get_dn(description["S1_MODRDN"]) + description["S1"].modify_s(new_test_user_dn, description["S1_MOD"]) + ents = description["S1"].search_s(BASE_DISTINGUISHED, ldap.SCOPE_SUBTREE, '(employeeNumber=%s)' % value_S1) + assert len(ents) == 1 + time.sleep(1) + + (no, new_test_user_dn) = _employeenumber_user_get_dn(description["S2_MODRDN"]) + description["S2"].modify_s(new_test_user_dn, description["S2_MOD"]) + ents = description["S2"].search_s(BASE_DISTINGUISHED, ldap.SCOPE_SUBTREE, '(employeeNumber=%s)' % value_S2) + assert len(ents) == 1 + + _resume_ra_M1_then_M2(M1, M2, M3) + + #time.sleep(3600) + # Step 9 + # Check that M1 still contains employeeNumber= + ents = M1.search_s(BASE_DISTINGUISHED, ldap.SCOPE_SUBTREE, '(employeeNumber=%s)' % description["expected"]) + log.info('Search M1 employeeNumber=%s (vs. %s)' % (ents[0].getValue('employeeNumber'), description["expected"])) + assert len(ents) == 1 + assert ents[0].hasAttr('employeeNumber') and ents[0].getValue('employeeNumber') == description["expected"].encode() + + # Check that M2 still contains employeeNumber= + ents = M2.search_s(BASE_DISTINGUISHED, ldap.SCOPE_SUBTREE, '(employeeNumber=%s)' % description["expected"]) + log.info('Search M2 employeeNumber=%s (vs. %s)' % (ents[0].getValue('employeeNumber'), description["expected"])) + assert len(ents) == 1 + assert ents[0].hasAttr('employeeNumber') and ents[0].getValue('employeeNumber') == description["expected"].encode() + + # Check that M3 still contain employeeNumber and it contains employeeNumber= + ents = M3.search_s(BASE_DISTINGUISHED, ldap.SCOPE_SUBTREE, '(employeeNumber=*)') + assert len(ents) == MAX_EMPLOYEENUMBER_USER + ents = M3.search_s(BASE_DISTINGUISHED, ldap.SCOPE_SUBTREE, '(employeeNumber=%s)' % description["expected"]) + log.info('Search M3 employeeNumber=%s (vs. %s)' % (ents[0].getValue('employeeNumber'), description["expected"])) + assert len(ents) == 1 + assert ents[0].hasAttr('employeeNumber') and ents[0].getValue('employeeNumber') == description["expected"].encode() + +def test_ticket49658_25(topo): + """Do + M1: MODRDN -> V1 + M2: MODRDN -> V2 + M1: MOD(REPL) -> V1 + M2: MOD(DEL/ADD) -> V2 + Replicate order: M1 then M2 + expected: V2 + + :id: df2cba7c-7afa-44b3-b1df-261e8bf0c9b4 + :setup: 3 Supplier Instances + 1. Use employeenumber=1000,ou=distinguished,ou=people, + :steps: + 1. Isolate M1 and M2 by pausing the replication agreements + 2. On M1 do DEL+ADD 1000 + MOD_ADD_13 + 3. On M2 do DEL+ADD 1000 + MOD_ADD_13 + 4. Enable replication agreement M2 -> M3, so that update step 2 is replicated first + 5. Enable replication agreement M1 -> M3, so that update step 3 is replicated second + 6. Check that the employeeNumber is 13 on all servers + :expectedresults: + 1. Fill in the result that is expected + 2. For each test step + """ + + # If you need any test suite initialization, + # please, write additional fixture for that (including finalizer). + # Topology for suites are predefined in lib389/topologies.py. + + # If you need host, port or any other data about instance, + # Please, use the instance object attributes for that (for example, topo.ms["supplier1"].serverid) + + + + if DEBUGGING: + # Add debugging steps(if any)... + pass + M1 = topo.ms["supplier1"] + M2 = topo.ms["supplier2"] + M3 = topo.ms["supplier3"] + value_init = '7' + last = '10' + value_S1 = '10.1' + value_S2 = '10.2' + + description = { + "S1": M1, + "S2": M2, + "S1_MODRDN": value_S1, + "S1_MOD": [(ldap.MOD_REPLACE, 'employeeNumber', value_S1.encode())], + "S2_MOD": [(ldap.MOD_DELETE, 'employeeNumber', value_S2.encode()),(ldap.MOD_ADD, 'employeeNumber', value_S2.encode())], + "S2_MODRDN": value_S2, + "expected": value_S2} + + # This test takes the user_1 + (uid, test_user_dn) = _employeenumber_user_get_dn(int(last)) + + # + # Step 4 + # + # disable all RA from M1 and M2 + # only M3 can replicate the update + # + agreement_m1_m2 = M1.agreement.list(suffix=DEFAULT_SUFFIX, consumer_host=M2.host, consumer_port=M2.port) + agreement_m1_m3 = M1.agreement.list(suffix=DEFAULT_SUFFIX, consumer_host=M3.host, consumer_port=M3.port) + agreement_m2_m1 = M2.agreement.list(suffix=DEFAULT_SUFFIX, consumer_host=M1.host, consumer_port=M1.port) + agreement_m2_m3 = M2.agreement.list(suffix=DEFAULT_SUFFIX, consumer_host=M3.host, consumer_port=M3.port) + + M1.agreement.pause(agreement_m1_m2[0].dn) + M1.agreement.pause(agreement_m1_m3[0].dn) + M2.agreement.pause(agreement_m2_m1[0].dn) + M2.agreement.pause(agreement_m2_m3[0].dn) + + # Step 5 + # Oldest update + # check that the entry on M1 contains employeeNumber= + description["S1"].rename_s(test_user_dn, 'employeeNumber=%s' % description["S1_MODRDN"], newsuperior=BASE_DISTINGUISHED, delold=1) + ents = description["S1"].search_s(BASE_DISTINGUISHED, ldap.SCOPE_SUBTREE, '(employeeNumber=%s)' % description["S1_MODRDN"]) + assert len(ents) == 1 + time.sleep(1) + + # Step 6 + # More recent update + # check that the entry on M2 contains employeeNumber= + description["S2"].rename_s(test_user_dn, 'employeeNumber=%s' % description["S2_MODRDN"], newsuperior=BASE_DISTINGUISHED, delold=1) + ents = description["S2"].search_s(BASE_DISTINGUISHED, ldap.SCOPE_SUBTREE, '(employeeNumber=%s)' % description["S2_MODRDN"]) + assert len(ents) == 1 + time.sleep(1) + + (no, new_test_user_dn) = _employeenumber_user_get_dn(description["S1_MODRDN"]) + description["S1"].modify_s(new_test_user_dn, description["S1_MOD"]) + ents = description["S1"].search_s(BASE_DISTINGUISHED, ldap.SCOPE_SUBTREE, '(employeeNumber=%s)' % value_S1) + assert len(ents) == 1 + time.sleep(1) + + (no, new_test_user_dn) = _employeenumber_user_get_dn(description["S2_MODRDN"]) + description["S2"].modify_s(new_test_user_dn, description["S2_MOD"]) + ents = description["S2"].search_s(BASE_DISTINGUISHED, ldap.SCOPE_SUBTREE, '(employeeNumber=%s)' % value_S2) + assert len(ents) == 1 + + _resume_ra_M1_then_M2(M1, M2, M3) + + #time.sleep(3600) + # Step 9 + # Check that M1 still contains employeeNumber= + ents = M1.search_s(BASE_DISTINGUISHED, ldap.SCOPE_SUBTREE, '(employeeNumber=%s)' % description["expected"]) + log.info('Search M1 employeeNumber=%s (vs. %s)' % (ents[0].getValue('employeeNumber'), description["expected"])) + assert len(ents) == 1 + assert ents[0].hasAttr('employeeNumber') and ents[0].getValue('employeeNumber') == description["expected"].encode() + + # Check that M2 still contains employeeNumber= + ents = M2.search_s(BASE_DISTINGUISHED, ldap.SCOPE_SUBTREE, '(employeeNumber=%s)' % description["expected"]) + log.info('Search M2 employeeNumber=%s (vs. %s)' % (ents[0].getValue('employeeNumber'), description["expected"])) + assert len(ents) == 1 + assert ents[0].hasAttr('employeeNumber') and ents[0].getValue('employeeNumber') == description["expected"].encode() + + # Check that M3 still contain employeeNumber and it contains employeeNumber= + ents = M3.search_s(BASE_DISTINGUISHED, ldap.SCOPE_SUBTREE, '(employeeNumber=*)') + assert len(ents) == MAX_EMPLOYEENUMBER_USER + ents = M3.search_s(BASE_DISTINGUISHED, ldap.SCOPE_SUBTREE, '(employeeNumber=%s)' % description["expected"]) + log.info('Search M3 employeeNumber=%s (vs. %s)' % (ents[0].getValue('employeeNumber'), description["expected"])) + assert len(ents) == 1 + assert ents[0].hasAttr('employeeNumber') and ents[0].getValue('employeeNumber') == description["expected"].encode() + +def test_ticket49658_26(topo): + """Do + M1: MODRDN -> V1 + M2: MODRDN -> V2 + M1: MOD(REPL) -> V1 + M2: MOD(DEL/ADD) -> V2 + Replicate order: M2 then M1 + expected: V2 + + :id: 8e9f85d3-22cc-4a84-a828-cec29202821f + :setup: 3 Supplier Instances + 1. Use employeenumber=1000,ou=distinguished,ou=people, + :steps: + 1. Isolate M1 and M2 by pausing the replication agreements + 2. On M1 do DEL+ADD 1000 + MOD_ADD_13 + 3. On M2 do DEL+ADD 1000 + MOD_ADD_13 + 4. Enable replication agreement M2 -> M3, so that update step 2 is replicated first + 5. Enable replication agreement M1 -> M3, so that update step 3 is replicated second + 6. Check that the employeeNumber is 13 on all servers + :expectedresults: + 1. Fill in the result that is expected + 2. For each test step + """ + + # If you need any test suite initialization, + # please, write additional fixture for that (including finalizer). + # Topology for suites are predefined in lib389/topologies.py. + + # If you need host, port or any other data about instance, + # Please, use the instance object attributes for that (for example, topo.ms["supplier1"].serverid) + + + + if DEBUGGING: + # Add debugging steps(if any)... + pass + M1 = topo.ms["supplier1"] + M2 = topo.ms["supplier2"] + M3 = topo.ms["supplier3"] + value_init = '7' + last = '11' + value_S1 = '11.1' + value_S2 = '11.2' + + description = { + "S1": M1, + "S2": M2, + "S1_MODRDN": value_S1, + "S1_MOD": [(ldap.MOD_REPLACE, 'employeeNumber', value_S1.encode())], + "S2_MOD": [(ldap.MOD_DELETE, 'employeeNumber', value_S2.encode()),(ldap.MOD_ADD, 'employeeNumber', value_S2.encode())], + "S2_MODRDN": value_S2, + "expected": value_S2} + + # This test takes the user_1 + (uid, test_user_dn) = _employeenumber_user_get_dn(int(last)) + + # + # Step 4 + # + # disable all RA from M1 and M2 + # only M3 can replicate the update + # + agreement_m1_m2 = M1.agreement.list(suffix=DEFAULT_SUFFIX, consumer_host=M2.host, consumer_port=M2.port) + agreement_m1_m3 = M1.agreement.list(suffix=DEFAULT_SUFFIX, consumer_host=M3.host, consumer_port=M3.port) + agreement_m2_m1 = M2.agreement.list(suffix=DEFAULT_SUFFIX, consumer_host=M1.host, consumer_port=M1.port) + agreement_m2_m3 = M2.agreement.list(suffix=DEFAULT_SUFFIX, consumer_host=M3.host, consumer_port=M3.port) + + M1.agreement.pause(agreement_m1_m2[0].dn) + M1.agreement.pause(agreement_m1_m3[0].dn) + M2.agreement.pause(agreement_m2_m1[0].dn) + M2.agreement.pause(agreement_m2_m3[0].dn) + + # Step 5 + # Oldest update + # check that the entry on M1 contains employeeNumber= + description["S1"].rename_s(test_user_dn, 'employeeNumber=%s' % description["S1_MODRDN"], newsuperior=BASE_DISTINGUISHED, delold=1) + ents = description["S1"].search_s(BASE_DISTINGUISHED, ldap.SCOPE_SUBTREE, '(employeeNumber=%s)' % description["S1_MODRDN"]) + assert len(ents) == 1 + time.sleep(1) + + # Step 6 + # More recent update + # check that the entry on M2 contains employeeNumber= + description["S2"].rename_s(test_user_dn, 'employeeNumber=%s' % description["S2_MODRDN"], newsuperior=BASE_DISTINGUISHED, delold=1) + ents = description["S2"].search_s(BASE_DISTINGUISHED, ldap.SCOPE_SUBTREE, '(employeeNumber=%s)' % description["S2_MODRDN"]) + assert len(ents) == 1 + time.sleep(1) + + (no, new_test_user_dn) = _employeenumber_user_get_dn(description["S1_MODRDN"]) + description["S1"].modify_s(new_test_user_dn, description["S1_MOD"]) + ents = description["S1"].search_s(BASE_DISTINGUISHED, ldap.SCOPE_SUBTREE, '(employeeNumber=%s)' % value_S1) + assert len(ents) == 1 + time.sleep(1) + + (no, new_test_user_dn) = _employeenumber_user_get_dn(description["S2_MODRDN"]) + description["S2"].modify_s(new_test_user_dn, description["S2_MOD"]) + ents = description["S2"].search_s(BASE_DISTINGUISHED, ldap.SCOPE_SUBTREE, '(employeeNumber=%s)' % value_S2) + assert len(ents) == 1 + + _resume_ra_M2_then_M1(M1, M2, M3) + + #time.sleep(3600) + # Step 9 + # Check that M1 still contains employeeNumber= + ents = M1.search_s(BASE_DISTINGUISHED, ldap.SCOPE_SUBTREE, '(employeeNumber=%s)' % description["expected"]) + log.info('Search M1 employeeNumber=%s (vs. %s)' % (ents[0].getValue('employeeNumber'), description["expected"])) + assert len(ents) == 1 + assert ents[0].hasAttr('employeeNumber') and ents[0].getValue('employeeNumber') == description["expected"].encode() + + # Check that M2 still contains employeeNumber= + ents = M2.search_s(BASE_DISTINGUISHED, ldap.SCOPE_SUBTREE, '(employeeNumber=%s)' % description["expected"]) + log.info('Search M2 employeeNumber=%s (vs. %s)' % (ents[0].getValue('employeeNumber'), description["expected"])) + assert len(ents) == 1 + assert ents[0].hasAttr('employeeNumber') and ents[0].getValue('employeeNumber') == description["expected"].encode() + + # Check that M3 still contain employeeNumber and it contains employeeNumber= + ents = M3.search_s(BASE_DISTINGUISHED, ldap.SCOPE_SUBTREE, '(employeeNumber=*)') + assert len(ents) == MAX_EMPLOYEENUMBER_USER + ents = M3.search_s(BASE_DISTINGUISHED, ldap.SCOPE_SUBTREE, '(employeeNumber=%s)' % description["expected"]) + log.info('Search M3 employeeNumber=%s (vs. %s)' % (ents[0].getValue('employeeNumber'), description["expected"])) + assert len(ents) == 1 + assert ents[0].hasAttr('employeeNumber') and ents[0].getValue('employeeNumber') == description["expected"].encode() + +def test_ticket49658_27(topo): + """Do + M1: MODRDN -> V1 + M2: MODRDN -> V2 + M1: MOD(DEL/ADD) -> V1 + M2: MOD(REPL) -> V2 + Replicate order: M1 then M2 + expected: V2 + + :id: d85bd9ef-b257-4027-a29c-dfba87c0bf51 + :setup: 3 Supplier Instances + 1. Use employeenumber=1000,ou=distinguished,ou=people, + :steps: + 1. Isolate M1 and M2 by pausing the replication agreements + 2. On M1 do DEL+ADD 1000 + MOD_ADD_13 + 3. On M2 do DEL+ADD 1000 + MOD_ADD_13 + 4. Enable replication agreement M2 -> M3, so that update step 2 is replicated first + 5. Enable replication agreement M1 -> M3, so that update step 3 is replicated second + 6. Check that the employeeNumber is 13 on all servers + :expectedresults: + 1. Fill in the result that is expected + 2. For each test step + """ + + # If you need any test suite initialization, + # please, write additional fixture for that (including finalizer). + # Topology for suites are predefined in lib389/topologies.py. + + # If you need host, port or any other data about instance, + # Please, use the instance object attributes for that (for example, topo.ms["supplier1"].serverid) + + + + if DEBUGGING: + # Add debugging steps(if any)... + pass + M1 = topo.ms["supplier1"] + M2 = topo.ms["supplier2"] + M3 = topo.ms["supplier3"] + value_init = '7' + last = '12' + value_S1 = '12.1' + value_S2 = '12.2' + + description = { + "S1": M1, + "S2": M2, + "S1_MODRDN": value_S1, + "S1_MOD": [(ldap.MOD_DELETE, 'employeeNumber', value_S1.encode()),(ldap.MOD_ADD, 'employeeNumber', value_S1.encode())], + "S2_MOD": [(ldap.MOD_REPLACE, 'employeeNumber', value_S2.encode())], + "S2_MODRDN": value_S2, + "expected": value_S2} + + # This test takes the user_1 + (uid, test_user_dn) = _employeenumber_user_get_dn(int(last)) + + # + # Step 4 + # + # disable all RA from M1 and M2 + # only M3 can replicate the update + # + agreement_m1_m2 = M1.agreement.list(suffix=DEFAULT_SUFFIX, consumer_host=M2.host, consumer_port=M2.port) + agreement_m1_m3 = M1.agreement.list(suffix=DEFAULT_SUFFIX, consumer_host=M3.host, consumer_port=M3.port) + agreement_m2_m1 = M2.agreement.list(suffix=DEFAULT_SUFFIX, consumer_host=M1.host, consumer_port=M1.port) + agreement_m2_m3 = M2.agreement.list(suffix=DEFAULT_SUFFIX, consumer_host=M3.host, consumer_port=M3.port) + + M1.agreement.pause(agreement_m1_m2[0].dn) + M1.agreement.pause(agreement_m1_m3[0].dn) + M2.agreement.pause(agreement_m2_m1[0].dn) + M2.agreement.pause(agreement_m2_m3[0].dn) + + # Step 5 + # Oldest update + # check that the entry on M1 contains employeeNumber= + description["S1"].rename_s(test_user_dn, 'employeeNumber=%s' % description["S1_MODRDN"], newsuperior=BASE_DISTINGUISHED, delold=1) + ents = description["S1"].search_s(BASE_DISTINGUISHED, ldap.SCOPE_SUBTREE, '(employeeNumber=%s)' % description["S1_MODRDN"]) + assert len(ents) == 1 + time.sleep(1) + + # Step 6 + # More recent update + # check that the entry on M2 contains employeeNumber= + description["S2"].rename_s(test_user_dn, 'employeeNumber=%s' % description["S2_MODRDN"], newsuperior=BASE_DISTINGUISHED, delold=1) + ents = description["S2"].search_s(BASE_DISTINGUISHED, ldap.SCOPE_SUBTREE, '(employeeNumber=%s)' % description["S2_MODRDN"]) + assert len(ents) == 1 + time.sleep(1) + + (no, new_test_user_dn) = _employeenumber_user_get_dn(description["S1_MODRDN"]) + description["S1"].modify_s(new_test_user_dn, description["S1_MOD"]) + ents = description["S1"].search_s(BASE_DISTINGUISHED, ldap.SCOPE_SUBTREE, '(employeeNumber=%s)' % value_S1) + assert len(ents) == 1 + time.sleep(1) + + (no, new_test_user_dn) = _employeenumber_user_get_dn(description["S2_MODRDN"]) + description["S2"].modify_s(new_test_user_dn, description["S2_MOD"]) + ents = description["S2"].search_s(BASE_DISTINGUISHED, ldap.SCOPE_SUBTREE, '(employeeNumber=%s)' % value_S2) + assert len(ents) == 1 + + _resume_ra_M1_then_M2(M1, M2, M3) + + #time.sleep(3600) + # Step 9 + # Check that M1 still contains employeeNumber= + ents = M1.search_s(BASE_DISTINGUISHED, ldap.SCOPE_SUBTREE, '(employeeNumber=%s)' % description["expected"]) + log.info('Search M1 employeeNumber=%s (vs. %s)' % (ents[0].getValue('employeeNumber'), description["expected"])) + assert len(ents) == 1 + assert ents[0].hasAttr('employeeNumber') and ents[0].getValue('employeeNumber') == description["expected"].encode() + + # Check that M2 still contains employeeNumber= + ents = M2.search_s(BASE_DISTINGUISHED, ldap.SCOPE_SUBTREE, '(employeeNumber=%s)' % description["expected"]) + log.info('Search M2 employeeNumber=%s (vs. %s)' % (ents[0].getValue('employeeNumber'), description["expected"])) + assert len(ents) == 1 + assert ents[0].hasAttr('employeeNumber') and ents[0].getValue('employeeNumber') == description["expected"].encode() + + # Check that M3 still contain employeeNumber and it contains employeeNumber= + ents = M3.search_s(BASE_DISTINGUISHED, ldap.SCOPE_SUBTREE, '(employeeNumber=*)') + assert len(ents) == MAX_EMPLOYEENUMBER_USER + ents = M3.search_s(BASE_DISTINGUISHED, ldap.SCOPE_SUBTREE, '(employeeNumber=%s)' % description["expected"]) + log.info('Search M3 employeeNumber=%s (vs. %s)' % (ents[0].getValue('employeeNumber'), description["expected"])) + assert len(ents) == 1 + assert ents[0].hasAttr('employeeNumber') and ents[0].getValue('employeeNumber') == description["expected"].encode() + +def test_ticket49658_28(topo): + """Do + M1: MODRDN -> V1 + M2: MODRDN -> V2 + M1: MOD(DEL/ADD) -> V1 + M2: MOD(REPL) -> V2 + Replicate order: M2 then M1 + expected: V2 + + :id: 286cd17e-225e-490f-83c9-20618b9407a9 + :setup: 3 Supplier Instances + 1. Use employeenumber=1000,ou=distinguished,ou=people, + :steps: + 1. Isolate M1 and M2 by pausing the replication agreements + 2. On M1 do DEL+ADD 1000 + MOD_ADD_13 + 3. On M2 do DEL+ADD 1000 + MOD_ADD_13 + 4. Enable replication agreement M2 -> M3, so that update step 2 is replicated first + 5. Enable replication agreement M1 -> M3, so that update step 3 is replicated second + 6. Check that the employeeNumber is 13 on all servers + :expectedresults: + 1. Fill in the result that is expected + 2. For each test step + """ + + # If you need any test suite initialization, + # please, write additional fixture for that (including finalizer). + # Topology for suites are predefined in lib389/topologies.py. + + # If you need host, port or any other data about instance, + # Please, use the instance object attributes for that (for example, topo.ms["supplier1"].serverid) + + + + if DEBUGGING: + # Add debugging steps(if any)... + pass + M1 = topo.ms["supplier1"] + M2 = topo.ms["supplier2"] + M3 = topo.ms["supplier3"] + value_init = '7' + last = '13' + value_S1 = '13.1' + value_S2 = '13.2' + + description = { + "S1": M1, + "S2": M2, + "S1_MODRDN": value_S1, + "S1_MOD": [(ldap.MOD_DELETE, 'employeeNumber', value_S1.encode()),(ldap.MOD_ADD, 'employeeNumber', value_S1.encode())], + "S2_MOD": [(ldap.MOD_REPLACE, 'employeeNumber', value_S2.encode())], + "S2_MODRDN": value_S2, + "expected": value_S2} + + # This test takes the user_1 + (uid, test_user_dn) = _employeenumber_user_get_dn(int(last)) + + # + # Step 4 + # + # disable all RA from M1 and M2 + # only M3 can replicate the update + # + agreement_m1_m2 = M1.agreement.list(suffix=DEFAULT_SUFFIX, consumer_host=M2.host, consumer_port=M2.port) + agreement_m1_m3 = M1.agreement.list(suffix=DEFAULT_SUFFIX, consumer_host=M3.host, consumer_port=M3.port) + agreement_m2_m1 = M2.agreement.list(suffix=DEFAULT_SUFFIX, consumer_host=M1.host, consumer_port=M1.port) + agreement_m2_m3 = M2.agreement.list(suffix=DEFAULT_SUFFIX, consumer_host=M3.host, consumer_port=M3.port) + + M1.agreement.pause(agreement_m1_m2[0].dn) + M1.agreement.pause(agreement_m1_m3[0].dn) + M2.agreement.pause(agreement_m2_m1[0].dn) + M2.agreement.pause(agreement_m2_m3[0].dn) + + # Step 5 + # Oldest update + # check that the entry on M1 contains employeeNumber= + description["S1"].rename_s(test_user_dn, 'employeeNumber=%s' % description["S1_MODRDN"], newsuperior=BASE_DISTINGUISHED, delold=1) + ents = description["S1"].search_s(BASE_DISTINGUISHED, ldap.SCOPE_SUBTREE, '(employeeNumber=%s)' % description["S1_MODRDN"]) + assert len(ents) == 1 + time.sleep(1) + + # Step 6 + # More recent update + # check that the entry on M2 contains employeeNumber= + description["S2"].rename_s(test_user_dn, 'employeeNumber=%s' % description["S2_MODRDN"], newsuperior=BASE_DISTINGUISHED, delold=1) + ents = description["S2"].search_s(BASE_DISTINGUISHED, ldap.SCOPE_SUBTREE, '(employeeNumber=%s)' % description["S2_MODRDN"]) + assert len(ents) == 1 + time.sleep(1) + + (no, new_test_user_dn) = _employeenumber_user_get_dn(description["S1_MODRDN"]) + description["S1"].modify_s(new_test_user_dn, description["S1_MOD"]) + ents = description["S1"].search_s(BASE_DISTINGUISHED, ldap.SCOPE_SUBTREE, '(employeeNumber=%s)' % value_S1) + assert len(ents) == 1 + time.sleep(1) + + (no, new_test_user_dn) = _employeenumber_user_get_dn(description["S2_MODRDN"]) + description["S2"].modify_s(new_test_user_dn, description["S2_MOD"]) + ents = description["S2"].search_s(BASE_DISTINGUISHED, ldap.SCOPE_SUBTREE, '(employeeNumber=%s)' % value_S2) + assert len(ents) == 1 + + _resume_ra_M2_then_M1(M1, M2, M3) + + #time.sleep(3600) + # Step 9 + # Check that M1 still contains employeeNumber= + ents = M1.search_s(BASE_DISTINGUISHED, ldap.SCOPE_SUBTREE, '(employeeNumber=%s)' % description["expected"]) + log.info('Search M1 employeeNumber=%s (vs. %s)' % (ents[0].getValue('employeeNumber'), description["expected"])) + assert len(ents) == 1 + assert ents[0].hasAttr('employeeNumber') and ents[0].getValue('employeeNumber') == description["expected"].encode() + + # Check that M2 still contains employeeNumber= + ents = M2.search_s(BASE_DISTINGUISHED, ldap.SCOPE_SUBTREE, '(employeeNumber=%s)' % description["expected"]) + log.info('Search M2 employeeNumber=%s (vs. %s)' % (ents[0].getValue('employeeNumber'), description["expected"])) + assert len(ents) == 1 + assert ents[0].hasAttr('employeeNumber') and ents[0].getValue('employeeNumber') == description["expected"].encode() + + # Check that M3 still contain employeeNumber and it contains employeeNumber= + ents = M3.search_s(BASE_DISTINGUISHED, ldap.SCOPE_SUBTREE, '(employeeNumber=*)') + assert len(ents) == MAX_EMPLOYEENUMBER_USER + ents = M3.search_s(BASE_DISTINGUISHED, ldap.SCOPE_SUBTREE, '(employeeNumber=%s)' % description["expected"]) + log.info('Search M3 employeeNumber=%s (vs. %s)' % (ents[0].getValue('employeeNumber'), description["expected"])) + assert len(ents) == 1 + assert ents[0].hasAttr('employeeNumber') and ents[0].getValue('employeeNumber') == description["expected"].encode() + + +def test_ticket49658_29(topo): + """Do + M1: MODRDN -> V1 + M2: MODRDN -> V2 + M1: MOD(DEL/ADD) -> V1 + M2: MOD(DEL/ADD) -> V2 + Replicate order: M1 then M2 + expected: V2 + + :id: b81f3885-7965-48fe-8dbf-692d1150d061 + :setup: 3 Supplier Instances + 1. Use employeenumber=1000,ou=distinguished,ou=people, + :steps: + 1. Isolate M1 and M2 by pausing the replication agreements + 2. On M1 do DEL+ADD 1000 + MOD_ADD_13 + 3. On M2 do DEL+ADD 1000 + MOD_ADD_13 + 4. Enable replication agreement M2 -> M3, so that update step 2 is replicated first + 5. Enable replication agreement M1 -> M3, so that update step 3 is replicated second + 6. Check that the employeeNumber is 13 on all servers + :expectedresults: + 1. Fill in the result that is expected + 2. For each test step + """ + + # If you need any test suite initialization, + # please, write additional fixture for that (including finalizer). + # Topology for suites are predefined in lib389/topologies.py. + + # If you need host, port or any other data about instance, + # Please, use the instance object attributes for that (for example, topo.ms["supplier1"].serverid) + + + + if DEBUGGING: + # Add debugging steps(if any)... + pass + M1 = topo.ms["supplier1"] + M2 = topo.ms["supplier2"] + M3 = topo.ms["supplier3"] + value_init = '7' + last = '14' + value_S1 = '14.1' + value_S2 = '14.2' + + description = { + "S1": M1, + "S2": M2, + "S1_MODRDN": value_S1, + "S1_MOD": [(ldap.MOD_DELETE, 'employeeNumber', value_S1.encode()),(ldap.MOD_ADD, 'employeeNumber', value_S1.encode())], + "S2_MOD": [(ldap.MOD_DELETE, 'employeeNumber', value_S2.encode()),(ldap.MOD_ADD, 'employeeNumber', value_S2.encode())], + "S2_MODRDN": value_S2, + "expected": value_S2} + + # This test takes the user_1 + (uid, test_user_dn) = _employeenumber_user_get_dn(int(last)) + + # + # Step 4 + # + # disable all RA from M1 and M2 + # only M3 can replicate the update + # + agreement_m1_m2 = M1.agreement.list(suffix=DEFAULT_SUFFIX, consumer_host=M2.host, consumer_port=M2.port) + agreement_m1_m3 = M1.agreement.list(suffix=DEFAULT_SUFFIX, consumer_host=M3.host, consumer_port=M3.port) + agreement_m2_m1 = M2.agreement.list(suffix=DEFAULT_SUFFIX, consumer_host=M1.host, consumer_port=M1.port) + agreement_m2_m3 = M2.agreement.list(suffix=DEFAULT_SUFFIX, consumer_host=M3.host, consumer_port=M3.port) + + M1.agreement.pause(agreement_m1_m2[0].dn) + M1.agreement.pause(agreement_m1_m3[0].dn) + M2.agreement.pause(agreement_m2_m1[0].dn) + M2.agreement.pause(agreement_m2_m3[0].dn) + + # Step 5 + # Oldest update + # check that the entry on M1 contains employeeNumber= + description["S1"].rename_s(test_user_dn, 'employeeNumber=%s' % description["S1_MODRDN"], newsuperior=BASE_DISTINGUISHED, delold=1) + ents = description["S1"].search_s(BASE_DISTINGUISHED, ldap.SCOPE_SUBTREE, '(employeeNumber=%s)' % description["S1_MODRDN"]) + assert len(ents) == 1 + time.sleep(1) + + # Step 6 + # More recent update + # check that the entry on M2 contains employeeNumber= + description["S2"].rename_s(test_user_dn, 'employeeNumber=%s' % description["S2_MODRDN"], newsuperior=BASE_DISTINGUISHED, delold=1) + ents = description["S2"].search_s(BASE_DISTINGUISHED, ldap.SCOPE_SUBTREE, '(employeeNumber=%s)' % description["S2_MODRDN"]) + assert len(ents) == 1 + time.sleep(1) + + (no, new_test_user_dn) = _employeenumber_user_get_dn(description["S1_MODRDN"]) + description["S1"].modify_s(new_test_user_dn, description["S1_MOD"]) + ents = description["S1"].search_s(BASE_DISTINGUISHED, ldap.SCOPE_SUBTREE, '(employeeNumber=%s)' % value_S1) + assert len(ents) == 1 + time.sleep(1) + + (no, new_test_user_dn) = _employeenumber_user_get_dn(description["S2_MODRDN"]) + description["S2"].modify_s(new_test_user_dn, description["S2_MOD"]) + ents = description["S2"].search_s(BASE_DISTINGUISHED, ldap.SCOPE_SUBTREE, '(employeeNumber=%s)' % value_S2) + assert len(ents) == 1 + + _resume_ra_M1_then_M2(M1, M2, M3) + + #time.sleep(3600) + # Step 9 + # Check that M1 still contains employeeNumber= + ents = M1.search_s(BASE_DISTINGUISHED, ldap.SCOPE_SUBTREE, '(employeeNumber=%s)' % description["expected"]) + log.info('Search M1 employeeNumber=%s (vs. %s)' % (ents[0].getValue('employeeNumber'), description["expected"])) + assert len(ents) == 1 + assert ents[0].hasAttr('employeeNumber') and ents[0].getValue('employeeNumber') == description["expected"].encode() + + # Check that M2 still contains employeeNumber= + ents = M2.search_s(BASE_DISTINGUISHED, ldap.SCOPE_SUBTREE, '(employeeNumber=%s)' % description["expected"]) + log.info('Search M2 employeeNumber=%s (vs. %s)' % (ents[0].getValue('employeeNumber'), description["expected"])) + assert len(ents) == 1 + assert ents[0].hasAttr('employeeNumber') and ents[0].getValue('employeeNumber') == description["expected"].encode() + + # Check that M3 still contain employeeNumber and it contains employeeNumber= + ents = M3.search_s(BASE_DISTINGUISHED, ldap.SCOPE_SUBTREE, '(employeeNumber=*)') + assert len(ents) == MAX_EMPLOYEENUMBER_USER + ents = M3.search_s(BASE_DISTINGUISHED, ldap.SCOPE_SUBTREE, '(employeeNumber=%s)' % description["expected"]) + log.info('Search M3 employeeNumber=%s (vs. %s)' % (ents[0].getValue('employeeNumber'), description["expected"])) + assert len(ents) == 1 + assert ents[0].hasAttr('employeeNumber') and ents[0].getValue('employeeNumber') == description["expected"].encode() + +def test_ticket49658_30(topo): + """Do + M1: MODRDN -> V1 + M2: MODRDN -> V2 + M1: MOD(DEL/ADD) -> V1 + M2: MOD(DEL/ADD) -> V2 + Replicate order: M2 then M1 + expected: V2 + + :id: 4dce88f8-31db-488b-aeb4-fce4173e3f12 + :setup: 3 Supplier Instances + 1. Use employeenumber=1000,ou=distinguished,ou=people, + :steps: + 1. Isolate M1 and M2 by pausing the replication agreements + 2. On M1 do DEL+ADD 1000 + MOD_ADD_13 + 3. On M2 do DEL+ADD 1000 + MOD_ADD_13 + 4. Enable replication agreement M2 -> M3, so that update step 2 is replicated first + 5. Enable replication agreement M1 -> M3, so that update step 3 is replicated second + 6. Check that the employeeNumber is 13 on all servers + :expectedresults: + 1. Fill in the result that is expected + 2. For each test step + """ + + # If you need any test suite initialization, + # please, write additional fixture for that (including finalizer). + # Topology for suites are predefined in lib389/topologies.py. + + # If you need host, port or any other data about instance, + # Please, use the instance object attributes for that (for example, topo.ms["supplier1"].serverid) + + + + if DEBUGGING: + # Add debugging steps(if any)... + pass + M1 = topo.ms["supplier1"] + M2 = topo.ms["supplier2"] + M3 = topo.ms["supplier3"] + value_init = '7' + last = '15' + value_S1 = '15.1' + value_S2 = '15.2' + + description = { + "S1": M1, + "S2": M2, + "S1_MODRDN": value_S1, + "S1_MOD": [(ldap.MOD_DELETE, 'employeeNumber', value_S1.encode()),(ldap.MOD_ADD, 'employeeNumber', value_S1.encode())], + "S2_MOD": [(ldap.MOD_DELETE, 'employeeNumber', value_S2.encode()),(ldap.MOD_ADD, 'employeeNumber', value_S2.encode())], + "S2_MODRDN": value_S2, + "expected": value_S2} + + # This test takes the user_1 + (uid, test_user_dn) = _employeenumber_user_get_dn(int(last)) + + # + # Step 4 + # + # disable all RA from M1 and M2 + # only M3 can replicate the update + # + agreement_m1_m2 = M1.agreement.list(suffix=DEFAULT_SUFFIX, consumer_host=M2.host, consumer_port=M2.port) + agreement_m1_m3 = M1.agreement.list(suffix=DEFAULT_SUFFIX, consumer_host=M3.host, consumer_port=M3.port) + agreement_m2_m1 = M2.agreement.list(suffix=DEFAULT_SUFFIX, consumer_host=M1.host, consumer_port=M1.port) + agreement_m2_m3 = M2.agreement.list(suffix=DEFAULT_SUFFIX, consumer_host=M3.host, consumer_port=M3.port) + + M1.agreement.pause(agreement_m1_m2[0].dn) + M1.agreement.pause(agreement_m1_m3[0].dn) + M2.agreement.pause(agreement_m2_m1[0].dn) + M2.agreement.pause(agreement_m2_m3[0].dn) + + # Step 5 + # Oldest update + # check that the entry on M1 contains employeeNumber= + description["S1"].rename_s(test_user_dn, 'employeeNumber=%s' % description["S1_MODRDN"], newsuperior=BASE_DISTINGUISHED, delold=1) + ents = description["S1"].search_s(BASE_DISTINGUISHED, ldap.SCOPE_SUBTREE, '(employeeNumber=%s)' % description["S1_MODRDN"]) + assert len(ents) == 1 + time.sleep(1) + + # Step 6 + # More recent update + # check that the entry on M2 contains employeeNumber= + description["S2"].rename_s(test_user_dn, 'employeeNumber=%s' % description["S2_MODRDN"], newsuperior=BASE_DISTINGUISHED, delold=1) + ents = description["S2"].search_s(BASE_DISTINGUISHED, ldap.SCOPE_SUBTREE, '(employeeNumber=%s)' % description["S2_MODRDN"]) + assert len(ents) == 1 + time.sleep(1) + + (no, new_test_user_dn) = _employeenumber_user_get_dn(description["S1_MODRDN"]) + description["S1"].modify_s(new_test_user_dn, description["S1_MOD"]) + ents = description["S1"].search_s(BASE_DISTINGUISHED, ldap.SCOPE_SUBTREE, '(employeeNumber=%s)' % value_S1) + assert len(ents) == 1 + time.sleep(1) + + (no, new_test_user_dn) = _employeenumber_user_get_dn(description["S2_MODRDN"]) + description["S2"].modify_s(new_test_user_dn, description["S2_MOD"]) + ents = description["S2"].search_s(BASE_DISTINGUISHED, ldap.SCOPE_SUBTREE, '(employeeNumber=%s)' % value_S2) + assert len(ents) == 1 + + _resume_ra_M2_then_M1(M1, M2, M3) + + #time.sleep(3600) + # Step 9 + # Check that M1 still contains employeeNumber= + ents = M1.search_s(BASE_DISTINGUISHED, ldap.SCOPE_SUBTREE, '(employeeNumber=%s)' % description["expected"]) + log.info('Search M1 employeeNumber=%s (vs. %s)' % (ents[0].getValue('employeeNumber'), description["expected"])) + assert len(ents) == 1 + assert ents[0].hasAttr('employeeNumber') and ents[0].getValue('employeeNumber') == description["expected"].encode() + + # Check that M2 still contains employeeNumber= + ents = M2.search_s(BASE_DISTINGUISHED, ldap.SCOPE_SUBTREE, '(employeeNumber=%s)' % description["expected"]) + log.info('Search M2 employeeNumber=%s (vs. %s)' % (ents[0].getValue('employeeNumber'), description["expected"])) + assert len(ents) == 1 + assert ents[0].hasAttr('employeeNumber') and ents[0].getValue('employeeNumber') == description["expected"].encode() + + # Check that M3 still contain employeeNumber and it contains employeeNumber= + ents = M3.search_s(BASE_DISTINGUISHED, ldap.SCOPE_SUBTREE, '(employeeNumber=*)') + assert len(ents) == MAX_EMPLOYEENUMBER_USER + ents = M3.search_s(BASE_DISTINGUISHED, ldap.SCOPE_SUBTREE, '(employeeNumber=%s)' % description["expected"]) + log.info('Search M3 employeeNumber=%s (vs. %s)' % (ents[0].getValue('employeeNumber'), description["expected"])) + assert len(ents) == 1 + assert ents[0].hasAttr('employeeNumber') and ents[0].getValue('employeeNumber') == description["expected"].encode() + +def test_ticket49658_31(topo): + """Do + M1: MODRDN -> V1 + M2: MODRDN -> V2 + M1: MOD(REPL) -> V1 + M2: MOD(REPL) -> V2 + M2: MODRDN -> V1 + Replicate order: M2 then M1 + expected: V1 + + :id: 2791a3df-25a2-4e6e-a5e9-514d76af43fb + :setup: 3 Supplier Instances + 1. Use employeenumber=1000,ou=distinguished,ou=people, + :steps: + 1. Isolate M1 and M2 by pausing the replication agreements + 2. On M1 do DEL+ADD 1000 + MOD_ADD_13 + 3. On M2 do DEL+ADD 1000 + MOD_ADD_13 + 4. Enable replication agreement M2 -> M3, so that update step 2 is replicated first + 5. Enable replication agreement M1 -> M3, so that update step 3 is replicated second + 6. Check that the employeeNumber is 13 on all servers + :expectedresults: + 1. Fill in the result that is expected + 2. For each test step + """ + + # If you need any test suite initialization, + # please, write additional fixture for that (including finalizer). + # Topology for suites are predefined in lib389/topologies.py. + + # If you need host, port or any other data about instance, + # Please, use the instance object attributes for that (for example, topo.ms["supplier1"].serverid) + + + + if DEBUGGING: + # Add debugging steps(if any)... + pass + M1 = topo.ms["supplier1"] + M2 = topo.ms["supplier2"] + M3 = topo.ms["supplier3"] + value_init = '7' + last = '16' + value_S1 = '16.1' + value_S2 = '16.2' + + description = { + "S1": M1, + "S2": M2, + "S1_MODRDN": value_S1, + "S1_MOD": [(ldap.MOD_REPLACE, 'employeeNumber', value_S1.encode())], + "S2_MOD": [(ldap.MOD_REPLACE, 'employeeNumber', value_S2.encode())], + "S2_MODRDN_1": value_S2, + "S2_MODRDN_2": value_S1, + "expected": value_S1} + + # This test takes the user_1 + (uid, test_user_dn) = _employeenumber_user_get_dn(int(last)) + + # + # Step 4 + # + # disable all RA from M1 and M2 + # only M3 can replicate the update + # + agreement_m1_m2 = M1.agreement.list(suffix=DEFAULT_SUFFIX, consumer_host=M2.host, consumer_port=M2.port) + agreement_m1_m3 = M1.agreement.list(suffix=DEFAULT_SUFFIX, consumer_host=M3.host, consumer_port=M3.port) + agreement_m2_m1 = M2.agreement.list(suffix=DEFAULT_SUFFIX, consumer_host=M1.host, consumer_port=M1.port) + agreement_m2_m3 = M2.agreement.list(suffix=DEFAULT_SUFFIX, consumer_host=M3.host, consumer_port=M3.port) + + M1.agreement.pause(agreement_m1_m2[0].dn) + M1.agreement.pause(agreement_m1_m3[0].dn) + M2.agreement.pause(agreement_m2_m1[0].dn) + M2.agreement.pause(agreement_m2_m3[0].dn) + + # Step 5 + # Oldest update + # check that the entry on M1 contains employeeNumber= + description["S1"].rename_s(test_user_dn, 'employeeNumber=%s' % description["S1_MODRDN"], newsuperior=BASE_DISTINGUISHED, delold=1) + ents = description["S1"].search_s(BASE_DISTINGUISHED, ldap.SCOPE_SUBTREE, '(employeeNumber=%s)' % description["S1_MODRDN"]) + assert len(ents) == 1 + time.sleep(1) + + # Step 6 + # More recent update + # check that the entry on M2 contains employeeNumber= + description["S2"].rename_s(test_user_dn, 'employeeNumber=%s' % description["S2_MODRDN_1"], newsuperior=BASE_DISTINGUISHED, delold=1) + ents = description["S2"].search_s(BASE_DISTINGUISHED, ldap.SCOPE_SUBTREE, '(employeeNumber=%s)' % description["S2_MODRDN_1"]) + assert len(ents) == 1 + time.sleep(1) + + (no, new_test_user_dn) = _employeenumber_user_get_dn(description["S1_MODRDN"]) + description["S1"].modify_s(new_test_user_dn, description["S1_MOD"]) + ents = description["S1"].search_s(BASE_DISTINGUISHED, ldap.SCOPE_SUBTREE, '(employeeNumber=%s)' % value_S1) + assert len(ents) == 1 + time.sleep(1) + + + (no, new_test_user_dn) = _employeenumber_user_get_dn(description["S2_MODRDN_1"]) + description["S2"].modify_s(new_test_user_dn, description["S2_MOD"]) + ents = description["S2"].search_s(BASE_DISTINGUISHED, ldap.SCOPE_SUBTREE, '(employeeNumber=%s)' % value_S2) + assert len(ents) == 1 + + description["S2"].rename_s(new_test_user_dn, 'employeeNumber=%s' % description["S2_MODRDN_2"], newsuperior=BASE_DISTINGUISHED, delold=1) + ents = description["S2"].search_s(BASE_DISTINGUISHED, ldap.SCOPE_SUBTREE, '(employeeNumber=%s)' % description["S2_MODRDN_2"]) + assert len(ents) == 1 + time.sleep(1) + + _resume_ra_M2_then_M1(M1, M2, M3) + + #time.sleep(3600) + # Step 9 + # Check that M1 still contains employeeNumber= + ents = M1.search_s(BASE_DISTINGUISHED, ldap.SCOPE_SUBTREE, '(employeeNumber=%s)' % description["expected"]) + log.info('Search M1 employeeNumber=%s (vs. %s)' % (ents[0].getValue('employeeNumber'), description["expected"])) + assert len(ents) == 1 + assert ents[0].hasAttr('employeeNumber') and ents[0].getValue('employeeNumber') == description["expected"].encode() + + # Check that M2 still contains employeeNumber= + ents = M2.search_s(BASE_DISTINGUISHED, ldap.SCOPE_SUBTREE, '(employeeNumber=%s)' % description["expected"]) + log.info('Search M2 employeeNumber=%s (vs. %s)' % (ents[0].getValue('employeeNumber'), description["expected"])) + assert len(ents) == 1 + assert ents[0].hasAttr('employeeNumber') and ents[0].getValue('employeeNumber') == description["expected"].encode() + + # Check that M3 still contain employeeNumber and it contains employeeNumber= + ents = M3.search_s(BASE_DISTINGUISHED, ldap.SCOPE_SUBTREE, '(employeeNumber=*)') + assert len(ents) == MAX_EMPLOYEENUMBER_USER + ents = M3.search_s(BASE_DISTINGUISHED, ldap.SCOPE_SUBTREE, '(employeeNumber=%s)' % description["expected"]) + log.info('Search M3 employeeNumber=%s (vs. %s)' % (ents[0].getValue('employeeNumber'), description["expected"])) + assert len(ents) == 1 + assert ents[0].hasAttr('employeeNumber') and ents[0].getValue('employeeNumber') == description["expected"].encode() + + +def test_ticket49658_32(topo): + """Do + M1: MODRDN -> V1 + M2: MODRDN -> V2 + M1: MOD(REPL) -> V1 + M2: MOD(REPL) -> V2 + M2: MODRDN -> V1 + Replicate order: M1 then M2 + expected: V1 + + :id: 6af57e2e-a325-474a-9c9d-f07cd2244657 + :setup: 3 Supplier Instances + 1. Use employeenumber=1000,ou=distinguished,ou=people, + :steps: + 1. Isolate M1 and M2 by pausing the replication agreements + 2. On M1 do DEL+ADD 1000 + MOD_ADD_13 + 3. On M2 do DEL+ADD 1000 + MOD_ADD_13 + 4. Enable replication agreement M2 -> M3, so that update step 2 is replicated first + 5. Enable replication agreement M1 -> M3, so that update step 3 is replicated second + 6. Check that the employeeNumber is 13 on all servers + :expectedresults: + 1. Fill in the result that is expected + 2. For each test step + """ + + # If you need any test suite initialization, + # please, write additional fixture for that (including finalizer). + # Topology for suites are predefined in lib389/topologies.py. + + # If you need host, port or any other data about instance, + # Please, use the instance object attributes for that (for example, topo.ms["supplier1"].serverid) + + + + if DEBUGGING: + # Add debugging steps(if any)... + pass + M1 = topo.ms["supplier1"] + M2 = topo.ms["supplier2"] + M3 = topo.ms["supplier3"] + value_init = '7' + last = '17' + value_S1 = '17.1' + value_S2 = '17.2' + + description = { + "S1": M1, + "S2": M2, + "S1_MODRDN": value_S1, + "S1_MOD": [(ldap.MOD_REPLACE, 'employeeNumber', value_S1.encode())], + "S2_MOD": [(ldap.MOD_REPLACE, 'employeeNumber', value_S2.encode())], + "S2_MODRDN_1": value_S2, + "S2_MODRDN_2": value_S1, + "expected": value_S1} + + # This test takes the user_1 + (uid, test_user_dn) = _employeenumber_user_get_dn(int(last)) + + # + # Step 4 + # + # disable all RA from M1 and M2 + # only M3 can replicate the update + # + agreement_m1_m2 = M1.agreement.list(suffix=DEFAULT_SUFFIX, consumer_host=M2.host, consumer_port=M2.port) + agreement_m1_m3 = M1.agreement.list(suffix=DEFAULT_SUFFIX, consumer_host=M3.host, consumer_port=M3.port) + agreement_m2_m1 = M2.agreement.list(suffix=DEFAULT_SUFFIX, consumer_host=M1.host, consumer_port=M1.port) + agreement_m2_m3 = M2.agreement.list(suffix=DEFAULT_SUFFIX, consumer_host=M3.host, consumer_port=M3.port) + + M1.agreement.pause(agreement_m1_m2[0].dn) + M1.agreement.pause(agreement_m1_m3[0].dn) + M2.agreement.pause(agreement_m2_m1[0].dn) + M2.agreement.pause(agreement_m2_m3[0].dn) + + # Step 5 + # Oldest update + # check that the entry on M1 contains employeeNumber= + description["S1"].rename_s(test_user_dn, 'employeeNumber=%s' % description["S1_MODRDN"], newsuperior=BASE_DISTINGUISHED, delold=1) + ents = description["S1"].search_s(BASE_DISTINGUISHED, ldap.SCOPE_SUBTREE, '(employeeNumber=%s)' % description["S1_MODRDN"]) + assert len(ents) == 1 + time.sleep(1) + + # Step 6 + # More recent update + # check that the entry on M2 contains employeeNumber= + description["S2"].rename_s(test_user_dn, 'employeeNumber=%s' % description["S2_MODRDN_1"], newsuperior=BASE_DISTINGUISHED, delold=1) + ents = description["S2"].search_s(BASE_DISTINGUISHED, ldap.SCOPE_SUBTREE, '(employeeNumber=%s)' % description["S2_MODRDN_1"]) + assert len(ents) == 1 + time.sleep(1) + + (no, new_test_user_dn) = _employeenumber_user_get_dn(description["S1_MODRDN"]) + description["S1"].modify_s(new_test_user_dn, description["S1_MOD"]) + ents = description["S1"].search_s(BASE_DISTINGUISHED, ldap.SCOPE_SUBTREE, '(employeeNumber=%s)' % value_S1) + assert len(ents) == 1 + time.sleep(1) + + + (no, new_test_user_dn) = _employeenumber_user_get_dn(description["S2_MODRDN_1"]) + description["S2"].modify_s(new_test_user_dn, description["S2_MOD"]) + ents = description["S2"].search_s(BASE_DISTINGUISHED, ldap.SCOPE_SUBTREE, '(employeeNumber=%s)' % value_S2) + assert len(ents) == 1 + + description["S2"].rename_s(new_test_user_dn, 'employeeNumber=%s' % description["S2_MODRDN_2"], newsuperior=BASE_DISTINGUISHED, delold=1) + ents = description["S2"].search_s(BASE_DISTINGUISHED, ldap.SCOPE_SUBTREE, '(employeeNumber=%s)' % description["S2_MODRDN_2"]) + assert len(ents) == 1 + time.sleep(1) + + _resume_ra_M1_then_M2(M1, M2, M3) + + #time.sleep(3600) + # Step 9 + # Check that M1 still contains employeeNumber= + ents = M1.search_s(BASE_DISTINGUISHED, ldap.SCOPE_SUBTREE, '(employeeNumber=%s)' % description["expected"]) + log.info('Search M1 employeeNumber=%s (vs. %s)' % (ents[0].getValue('employeeNumber'), description["expected"])) + assert len(ents) == 1 + assert ents[0].hasAttr('employeeNumber') and ents[0].getValue('employeeNumber') == description["expected"].encode() + + # Check that M2 still contains employeeNumber= + ents = M2.search_s(BASE_DISTINGUISHED, ldap.SCOPE_SUBTREE, '(employeeNumber=%s)' % description["expected"]) + log.info('Search M2 employeeNumber=%s (vs. %s)' % (ents[0].getValue('employeeNumber'), description["expected"])) + assert len(ents) == 1 + assert ents[0].hasAttr('employeeNumber') and ents[0].getValue('employeeNumber') == description["expected"].encode() + + # Check that M3 still contain employeeNumber and it contains employeeNumber= + ents = M3.search_s(BASE_DISTINGUISHED, ldap.SCOPE_SUBTREE, '(employeeNumber=*)') + assert len(ents) == MAX_EMPLOYEENUMBER_USER + ents = M3.search_s(BASE_DISTINGUISHED, ldap.SCOPE_SUBTREE, '(employeeNumber=%s)' % description["expected"]) + log.info('Search M3 employeeNumber=%s (vs. %s)' % (ents[0].getValue('employeeNumber'), description["expected"])) + assert len(ents) == 1 + assert ents[0].hasAttr('employeeNumber') and ents[0].getValue('employeeNumber') == description["expected"].encode() + +def test_ticket49658_33(topo): + """Do + M1: MODRDN -> V1 + M2: MODRDN -> V2 + M1: MOD(REPL) -> V1 + M2: MODRDN -> V1 + Replicate order: M2 then M1 + expected: V1 + + :id: 81100b04-d3b6-47df-90eb-d96ef14a3722 + :setup: 3 Supplier Instances + 1. Use employeenumber=1000,ou=distinguished,ou=people, + :steps: + 1. Isolate M1 and M2 by pausing the replication agreements + 2. On M1 do DEL+ADD 1000 + MOD_ADD_13 + 3. On M2 do DEL+ADD 1000 + MOD_ADD_13 + 4. Enable replication agreement M2 -> M3, so that update step 2 is replicated first + 5. Enable replication agreement M1 -> M3, so that update step 3 is replicated second + 6. Check that the employeeNumber is 13 on all servers + :expectedresults: + 1. Fill in the result that is expected + 2. For each test step + """ + + # If you need any test suite initialization, + # please, write additional fixture for that (including finalizer). + # Topology for suites are predefined in lib389/topologies.py. + + # If you need host, port or any other data about instance, + # Please, use the instance object attributes for that (for example, topo.ms["supplier1"].serverid) + + + + if DEBUGGING: + # Add debugging steps(if any)... + pass + M1 = topo.ms["supplier1"] + M2 = topo.ms["supplier2"] + M3 = topo.ms["supplier3"] + value_init = '7' + last = '18' + value_S1 = '18.1' + value_S2 = '18.2' + + description = { + "S1": M1, + "S2": M2, + "S1_MODRDN": value_S1, + "S1_MOD": [(ldap.MOD_REPLACE, 'employeeNumber', value_S1.encode())], + "S2_MODRDN_1": value_S2, + "S2_MODRDN_2": value_S1, + "expected": value_S1} + + # This test takes the user_1 + (uid, test_user_dn) = _employeenumber_user_get_dn(int(last)) + + # + # Step 4 + # + # disable all RA from M1 and M2 + # only M3 can replicate the update + # + agreement_m1_m2 = M1.agreement.list(suffix=DEFAULT_SUFFIX, consumer_host=M2.host, consumer_port=M2.port) + agreement_m1_m3 = M1.agreement.list(suffix=DEFAULT_SUFFIX, consumer_host=M3.host, consumer_port=M3.port) + agreement_m2_m1 = M2.agreement.list(suffix=DEFAULT_SUFFIX, consumer_host=M1.host, consumer_port=M1.port) + agreement_m2_m3 = M2.agreement.list(suffix=DEFAULT_SUFFIX, consumer_host=M3.host, consumer_port=M3.port) + + M1.agreement.pause(agreement_m1_m2[0].dn) + M1.agreement.pause(agreement_m1_m3[0].dn) + M2.agreement.pause(agreement_m2_m1[0].dn) + M2.agreement.pause(agreement_m2_m3[0].dn) + + # Step 5 + # Oldest update + # check that the entry on M1 contains employeeNumber= + description["S1"].rename_s(test_user_dn, 'employeeNumber=%s' % description["S1_MODRDN"], newsuperior=BASE_DISTINGUISHED, delold=1) + ents = description["S1"].search_s(BASE_DISTINGUISHED, ldap.SCOPE_SUBTREE, '(employeeNumber=%s)' % description["S1_MODRDN"]) + assert len(ents) == 1 + time.sleep(1) + + # Step 6 + # More recent update + # check that the entry on M2 contains employeeNumber= + description["S2"].rename_s(test_user_dn, 'employeeNumber=%s' % description["S2_MODRDN_1"], newsuperior=BASE_DISTINGUISHED, delold=1) + ents = description["S2"].search_s(BASE_DISTINGUISHED, ldap.SCOPE_SUBTREE, '(employeeNumber=%s)' % description["S2_MODRDN_1"]) + assert len(ents) == 1 + time.sleep(1) + + (no, new_test_user_dn) = _employeenumber_user_get_dn(description["S1_MODRDN"]) + description["S1"].modify_s(new_test_user_dn, description["S1_MOD"]) + ents = description["S1"].search_s(BASE_DISTINGUISHED, ldap.SCOPE_SUBTREE, '(employeeNumber=%s)' % value_S1) + assert len(ents) == 1 + time.sleep(1) + + (no, new_test_user_dn) = _employeenumber_user_get_dn(description["S2_MODRDN_1"]) + description["S2"].rename_s(new_test_user_dn, 'employeeNumber=%s' % description["S2_MODRDN_2"], newsuperior=BASE_DISTINGUISHED, delold=1) + ents = description["S2"].search_s(BASE_DISTINGUISHED, ldap.SCOPE_SUBTREE, '(employeeNumber=%s)' % description["S2_MODRDN_2"]) + assert len(ents) == 1 + time.sleep(1) + + _resume_ra_M2_then_M1(M1, M2, M3) + + #time.sleep(3600) + # Step 9 + # Check that M1 still contains employeeNumber= + ents = M1.search_s(BASE_DISTINGUISHED, ldap.SCOPE_SUBTREE, '(employeeNumber=%s)' % description["expected"]) + log.info('Search M1 employeeNumber=%s (vs. %s)' % (ents[0].getValue('employeeNumber'), description["expected"])) + assert len(ents) == 1 + assert ents[0].hasAttr('employeeNumber') and ents[0].getValue('employeeNumber') == description["expected"].encode() + + # Check that M2 still contains employeeNumber= + ents = M2.search_s(BASE_DISTINGUISHED, ldap.SCOPE_SUBTREE, '(employeeNumber=%s)' % description["expected"]) + log.info('Search M2 employeeNumber=%s (vs. %s)' % (ents[0].getValue('employeeNumber'), description["expected"])) + assert len(ents) == 1 + assert ents[0].hasAttr('employeeNumber') and ents[0].getValue('employeeNumber') == description["expected"].encode() + + # Check that M3 still contain employeeNumber and it contains employeeNumber= + ents = M3.search_s(BASE_DISTINGUISHED, ldap.SCOPE_SUBTREE, '(employeeNumber=*)') + assert len(ents) == MAX_EMPLOYEENUMBER_USER + ents = M3.search_s(BASE_DISTINGUISHED, ldap.SCOPE_SUBTREE, '(employeeNumber=%s)' % description["expected"]) + log.info('Search M3 employeeNumber=%s (vs. %s)' % (ents[0].getValue('employeeNumber'), description["expected"])) + assert len(ents) == 1 + assert ents[0].hasAttr('employeeNumber') and ents[0].getValue('employeeNumber') == description["expected"].encode() + +def test_ticket49658_34(topo): + """Do + M1: MODRDN -> V1 + M2: MODRDN -> V2 + M1: MOD(REPL) -> V1 + M2: MODRDN -> V1 + Replicate order: M1 then M2 + expected: V1 + + :id: 796d3d77-2401-49f5-89fa-80b231d3e758 + :setup: 3 Supplier Instances + 1. Use employeenumber=1000,ou=distinguished,ou=people, + :steps: + 1. Isolate M1 and M2 by pausing the replication agreements + 2. On M1 do DEL+ADD 1000 + MOD_ADD_13 + 3. On M2 do DEL+ADD 1000 + MOD_ADD_13 + 4. Enable replication agreement M2 -> M3, so that update step 2 is replicated first + 5. Enable replication agreement M1 -> M3, so that update step 3 is replicated second + 6. Check that the employeeNumber is 13 on all servers + :expectedresults: + 1. Fill in the result that is expected + 2. For each test step + """ + + # If you need any test suite initialization, + # please, write additional fixture for that (including finalizer). + # Topology for suites are predefined in lib389/topologies.py. + + # If you need host, port or any other data about instance, + # Please, use the instance object attributes for that (for example, topo.ms["supplier1"].serverid) + + + + if DEBUGGING: + # Add debugging steps(if any)... + pass + M1 = topo.ms["supplier1"] + M2 = topo.ms["supplier2"] + M3 = topo.ms["supplier3"] + value_init = '7' + last = '19' + value_S1 = '19.1' + value_S2 = '19.2' + + description = { + "S1": M1, + "S2": M2, + "S1_MODRDN": value_S1, + "S1_MOD": [(ldap.MOD_REPLACE, 'employeeNumber', value_S1.encode())], + "S2_MODRDN_1": value_S2, + "S2_MODRDN_2": value_S1, + "expected": value_S1} + + # This test takes the user_1 + (uid, test_user_dn) = _employeenumber_user_get_dn(int(last)) + + # + # Step 4 + # + # disable all RA from M1 and M2 + # only M3 can replicate the update + # + agreement_m1_m2 = M1.agreement.list(suffix=DEFAULT_SUFFIX, consumer_host=M2.host, consumer_port=M2.port) + agreement_m1_m3 = M1.agreement.list(suffix=DEFAULT_SUFFIX, consumer_host=M3.host, consumer_port=M3.port) + agreement_m2_m1 = M2.agreement.list(suffix=DEFAULT_SUFFIX, consumer_host=M1.host, consumer_port=M1.port) + agreement_m2_m3 = M2.agreement.list(suffix=DEFAULT_SUFFIX, consumer_host=M3.host, consumer_port=M3.port) + + M1.agreement.pause(agreement_m1_m2[0].dn) + M1.agreement.pause(agreement_m1_m3[0].dn) + M2.agreement.pause(agreement_m2_m1[0].dn) + M2.agreement.pause(agreement_m2_m3[0].dn) + + # Step 5 + # Oldest update + # check that the entry on M1 contains employeeNumber= + description["S1"].rename_s(test_user_dn, 'employeeNumber=%s' % description["S1_MODRDN"], newsuperior=BASE_DISTINGUISHED, delold=1) + ents = description["S1"].search_s(BASE_DISTINGUISHED, ldap.SCOPE_SUBTREE, '(employeeNumber=%s)' % description["S1_MODRDN"]) + assert len(ents) == 1 + time.sleep(1) + + # Step 6 + # More recent update + # check that the entry on M2 contains employeeNumber= + description["S2"].rename_s(test_user_dn, 'employeeNumber=%s' % description["S2_MODRDN_1"], newsuperior=BASE_DISTINGUISHED, delold=1) + ents = description["S2"].search_s(BASE_DISTINGUISHED, ldap.SCOPE_SUBTREE, '(employeeNumber=%s)' % description["S2_MODRDN_1"]) + assert len(ents) == 1 + time.sleep(1) + + (no, new_test_user_dn) = _employeenumber_user_get_dn(description["S1_MODRDN"]) + description["S1"].modify_s(new_test_user_dn, description["S1_MOD"]) + ents = description["S1"].search_s(BASE_DISTINGUISHED, ldap.SCOPE_SUBTREE, '(employeeNumber=%s)' % value_S1) + assert len(ents) == 1 + time.sleep(1) + + (no, new_test_user_dn) = _employeenumber_user_get_dn(description["S2_MODRDN_1"]) + description["S2"].rename_s(new_test_user_dn, 'employeeNumber=%s' % description["S2_MODRDN_2"], newsuperior=BASE_DISTINGUISHED, delold=1) + ents = description["S2"].search_s(BASE_DISTINGUISHED, ldap.SCOPE_SUBTREE, '(employeeNumber=%s)' % description["S2_MODRDN_2"]) + assert len(ents) == 1 + time.sleep(1) + + _resume_ra_M1_then_M2(M1, M2, M3) + + #time.sleep(3600) + # Step 9 + # Check that M1 still contains employeeNumber= + ents = M1.search_s(BASE_DISTINGUISHED, ldap.SCOPE_SUBTREE, '(employeeNumber=%s)' % description["expected"]) + log.info('Search M1 employeeNumber=%s (vs. %s)' % (ents[0].getValue('employeeNumber'), description["expected"])) + assert len(ents) == 1 + assert ents[0].hasAttr('employeeNumber') and ents[0].getValue('employeeNumber') == description["expected"].encode() + + # Check that M2 still contains employeeNumber= + ents = M2.search_s(BASE_DISTINGUISHED, ldap.SCOPE_SUBTREE, '(employeeNumber=%s)' % description["expected"]) + log.info('Search M2 employeeNumber=%s (vs. %s)' % (ents[0].getValue('employeeNumber'), description["expected"])) + assert len(ents) == 1 + assert ents[0].hasAttr('employeeNumber') and ents[0].getValue('employeeNumber') == description["expected"].encode() + + # Check that M3 still contain employeeNumber and it contains employeeNumber= + ents = M3.search_s(BASE_DISTINGUISHED, ldap.SCOPE_SUBTREE, '(employeeNumber=*)') + assert len(ents) == MAX_EMPLOYEENUMBER_USER + ents = M3.search_s(BASE_DISTINGUISHED, ldap.SCOPE_SUBTREE, '(employeeNumber=%s)' % description["expected"]) + log.info('Search M3 employeeNumber=%s (vs. %s)' % (ents[0].getValue('employeeNumber'), description["expected"])) + assert len(ents) == 1 + assert ents[0].hasAttr('employeeNumber') and ents[0].getValue('employeeNumber') == description["expected"].encode() +if __name__ == '__main__': + # Run isolated + # -s for DEBUG mode + CURRENT_FILE = os.path.realpath(__file__) + pytest.main(["-s", CURRENT_FILE]) + diff --git a/dirsrvtests/tests/tickets/ticket49788_test.py b/dirsrvtests/tests/tickets/ticket49788_test.py new file mode 100644 index 0000000..b755d22 --- /dev/null +++ b/dirsrvtests/tests/tickets/ticket49788_test.py @@ -0,0 +1,96 @@ +# --- BEGIN COPYRIGHT BLOCK --- +# Copyright (C) 2018 Dj Padzensky +# All rights reserved. +# +# License: GPL (version 3 or any later version). +# See LICENSE for details. +# --- END COPYRIGHT BLOCK --- +# +import logging +import time + +import ldap +import base64 +import pytest +import os + +from lib389 import Entry +from lib389.tasks import * +from lib389.utils import * +from lib389.properties import * +from lib389.topologies import topology_st +from lib389._constants import DEFAULT_SUFFIX, DN_CONFIG, DN_DM, PASSWORD, DEFAULT_SUFFIX_ESCAPED + +pytestmark = pytest.mark.tier2 + +logging.getLogger(__name__).setLevel(logging.DEBUG) +log = logging.getLogger(__name__) + +VALID_STRINGS = [ + 'dHJpdmlhbCBzdHJpbmc=' # trivial string + '8J+YjQ==', # 😍 + 'aGVsbG8g8J+YjQ==', # hello 😍 + '8J+krCBTbyB0aGVyZSEg8J+YoQ==', # 🤬 So there! 😡 + 'YnJvY2NvbGkgYmVlZg==', # broccoli beef + 'Y2FybmUgZGUgYnLDs2NvbGk=', # carne de brócoli + '2YTYrdmFINio2YLYsdmKINio2LHZiNmD2YTZig==', # لحم بقري بروكلي + '6KW/5YWw6Iqx54mb6IKJ', # 西兰花牛肉 + '6KW/6Jit6Iqx54mb6IKJ', # 西蘭花牛肉 + '0LPQvtCy0LXQtNGB0LrQviDQvNC10YHQviDQvtC0INCx0YDQvtC60YPQu9Cw', # говедско месо од брокула +] + +INVALID_STRINGS = [ + '0LPQxtCy0LXQtNGB0LrQviDQvNC10YHQviDQvtC0INCx0YDQvtC60YPQu9Cw', + '8R+KjQ==', +] + +USER_DN = 'cn=test_user,' + DEFAULT_SUFFIX + +def test_ticket49781(topology_st): + """ + Test that four-byte UTF-8 characters are accepted by the + directory string syntax. + """ + + # Add a test user + try: + topology_st.standalone.add_s(Entry((USER_DN, + {'objectclass': ['top', 'person'], + 'sn': 'sn', + 'description': 'Four-byte UTF8 test', + 'cn': 'test_user'}))) + except ldap.LDAPError as e: + log.fatal('Failed to add test user') + assert False + + try: + topology_st.standalone.modify_s(USER_DN, [(ldap.MOD_REPLACE, 'description', b'something else')]) + except ldap.LDAPError as e: + log.fatal('trivial test failed!') + assert False + + # Iterate over valid tests + for s in VALID_STRINGS: + decoded = base64.b64decode(s) + try: + topology_st.standalone.modify_s(USER_DN, [(ldap.MOD_REPLACE, 'description', decoded)]) + except ldap.LDAPError as e: + log.fatal('description: ' + decoded.decode('UTF-8') + ' failed') + assert False + + # Iterate over invalid tests + for s in INVALID_STRINGS: + decoded = base64.b64decode(s) + try: + topology_st.standalone.modify_s(USER_DN, [(ldap.MOD_REPLACE, 'description', decoded)]) + log.fatal('base64-decoded string ' + s + " was accepted, when it shouldn't have been!") + assert False + except ldap.LDAPError as e: + pass + + +if __name__ == '__main__': + # Run isolated + # -s for DEBUG mode + CURRENT_FILE = os.path.realpath(__file__) + pytest.main("-s %s" % CURRENT_FILE) diff --git a/dirsrvtests/tests/tickets/ticket50078_test.py b/dirsrvtests/tests/tickets/ticket50078_test.py new file mode 100644 index 0000000..96362f9 --- /dev/null +++ b/dirsrvtests/tests/tickets/ticket50078_test.py @@ -0,0 +1,78 @@ +# --- BEGIN COPYRIGHT BLOCK --- +# Copyright (C) 2022 Red Hat, Inc. +# All rights reserved. +# +# License: GPL (version 3 or any later version). +# See LICENSE for details. +# --- END COPYRIGHT BLOCK --- +# +import pytest +from lib389.utils import * +from lib389.topologies import topology_m1h1c1 +from lib389.idm.user import UserAccounts + +from lib389._constants import (DEFAULT_SUFFIX, REPLICA_RUV_FILTER, defaultProperties, + REPLICATION_BIND_DN, REPLICATION_BIND_PW, REPLICATION_BIND_METHOD, + REPLICATION_TRANSPORT, SUFFIX, RA_NAME, RA_BINDDN, RA_BINDPW, + RA_METHOD, RA_TRANSPORT_PROT, SUFFIX) + +pytestmark = pytest.mark.tier2 + +logging.getLogger(__name__).setLevel(logging.DEBUG) +log = logging.getLogger(__name__) + +TEST_USER = "test_user" + +def test_ticket50078(topology_m1h1c1): + """ + Test that for a MODRDN operation the cenotaph entry is created on + a hub or consumer. + """ + + M1 = topology_m1h1c1.ms["supplier1"] + H1 = topology_m1h1c1.hs["hub1"] + C1 = topology_m1h1c1.cs["consumer1"] + # + # Test replication is working + # + if M1.testReplication(DEFAULT_SUFFIX, topology_m1h1c1.cs["consumer1"]): + log.info('Replication is working.') + else: + log.fatal('Replication is not working.') + assert False + + ua = UserAccounts(M1, DEFAULT_SUFFIX) + ua.create(properties={ + 'uid': "%s%d" % (TEST_USER, 1), + 'cn' : "%s%d" % (TEST_USER, 1), + 'sn' : 'user', + 'uidNumber' : '1000', + 'gidNumber' : '2000', + 'homeDirectory' : '/home/testuser' + }) + + user = ua.get('%s1' % TEST_USER) + log.info(" Rename the test entry %s..." % user) + user.rename('uid=test_user_new') + + # wait until replication is in sync + if M1.testReplication(DEFAULT_SUFFIX, topology_m1h1c1.cs["consumer1"]): + log.info('Replication is working.') + else: + log.fatal('Replication is not working.') + assert False + + # check if cenotaph was created on hub and consumer + ents = H1.search_s(DEFAULT_SUFFIX, ldap.SCOPE_SUBTREE, filterstr="(&(objectclass=nstombstone)(cenotaphid=*))") + assert len(ents) == 1 + + ents = C1.search_s(DEFAULT_SUFFIX, ldap.SCOPE_SUBTREE, filterstr="(&(objectclass=nstombstone)(cenotaphid=*))") + assert len(ents) == 1 + + + +if __name__ == '__main__': + # Run isolated + # -s for DEBUG mode + CURRENT_FILE = os.path.realpath(__file__) + pytest.main("-s %s" % CURRENT_FILE) diff --git a/dirsrvtests/tests/tickets/ticket50232_test.py b/dirsrvtests/tests/tickets/ticket50232_test.py new file mode 100644 index 0000000..ee77fb7 --- /dev/null +++ b/dirsrvtests/tests/tickets/ticket50232_test.py @@ -0,0 +1,165 @@ +# --- BEGIN COPYRIGHT BLOCK --- +# Copyright (C) 2016 Red Hat, Inc. +# All rights reserved. +# +# License: GPL (version 3 or any later version). +# See LICENSE for details. +# --- END COPYRIGHT BLOCK --- +# +import logging + +import pytest +# from lib389.tasks import * +# from lib389.utils import * +from lib389.topologies import topology_st +from lib389.replica import ReplicationManager,Replicas + +from lib389._constants import DEFAULT_SUFFIX, BACKEND_NAME + +from lib389.idm.user import UserAccounts +from lib389.idm.organization import Organization +from lib389.idm.organizationalunit import OrganizationalUnit + +pytestmark = pytest.mark.tier2 + +log = logging.getLogger(__name__) + +NORMAL_SUFFIX = 'o=normal' +NORMAL_BACKEND_NAME = 'normal' +REVERSE_SUFFIX = 'o=reverse' +REVERSE_BACKEND_NAME = 'reverse' + +def _enable_replica(instance, suffix): + + repl = ReplicationManager(DEFAULT_SUFFIX) + repl._ensure_changelog(instance) + replicas = Replicas(instance) + replicas.create(properties={ + 'cn': 'replica', + 'nsDS5ReplicaRoot': suffix, + 'nsDS5ReplicaId': '1', + 'nsDS5Flags': '1', + 'nsDS5ReplicaType': '3' + }) + +def _populate_suffix(instance, suffixname): + + o = Organization(instance, 'o={}'.format(suffixname)) + o.create(properties={ + 'o': suffixname, + 'description': 'test' + }) + ou = OrganizationalUnit(instance, 'ou=people,o={}'.format(suffixname)) + ou.create(properties={ + 'ou': 'people' + }) + +def _get_replica_generation(instance, suffix): + + replicas = Replicas(instance) + replica = replicas.get(suffix) + ruv = replica.get_ruv() + return ruv._data_generation + +def _test_export_import(instance, suffix, backend): + + before_generation = _get_replica_generation(instance, suffix) + + instance.stop() + instance.db2ldif( + bename=backend, + suffixes=[suffix], + excludeSuffixes=[], + encrypt=False, + repl_data=True, + outputfile="/tmp/output_file", + ) + instance.ldif2db( + bename=None, + excludeSuffixes=None, + encrypt=False, + suffixes=[suffix], + import_file="/tmp/output_file", + ) + instance.start() + after_generation = _get_replica_generation(instance, suffix) + + assert (before_generation == after_generation) + +def test_ticket50232_normal(topology_st): + """ + The fix for ticket 50232 + + + The test sequence is: + - create suffix + - add suffix entry and some child entries + - "normally" done after populating suffix: enable replication + - get RUV and database generation + - export -r + - import + - get RUV and database generation + - assert database generation has not changed + """ + + log.info('Testing Ticket 50232 - export creates not imprtable ldif file, normal creation order') + + topology_st.standalone.backend.create(NORMAL_SUFFIX, {BACKEND_NAME: NORMAL_BACKEND_NAME}) + topology_st.standalone.mappingtree.create(NORMAL_SUFFIX, bename=NORMAL_BACKEND_NAME, parent=None) + + _populate_suffix(topology_st.standalone, NORMAL_BACKEND_NAME) + + repl = ReplicationManager(DEFAULT_SUFFIX) + repl._ensure_changelog(topology_st.standalone) + replicas = Replicas(topology_st.standalone) + replicas.create(properties={ + 'cn': 'replica', + 'nsDS5ReplicaRoot': NORMAL_SUFFIX, + 'nsDS5ReplicaId': '1', + 'nsDS5Flags': '1', + 'nsDS5ReplicaType': '3' + }) + + _test_export_import(topology_st.standalone, NORMAL_SUFFIX, NORMAL_BACKEND_NAME) + +def test_ticket50232_reverse(topology_st): + """ + The fix for ticket 50232 + + + The test sequence is: + - create suffix + - enable replication before suffix enztry is added + - add suffix entry and some child entries + - get RUV and database generation + - export -r + - import + - get RUV and database generation + - assert database generation has not changed + """ + + log.info('Testing Ticket 50232 - export creates not imprtable ldif file, normal creation order') + + # + # Setup Replication + # + log.info('Setting up replication...') + repl = ReplicationManager(DEFAULT_SUFFIX) + # repl.create_first_supplier(topology_st.standalone) + # + # enable dynamic plugins, memberof and retro cl plugin + # + topology_st.standalone.backend.create(REVERSE_SUFFIX, {BACKEND_NAME: REVERSE_BACKEND_NAME}) + topology_st.standalone.mappingtree.create(REVERSE_SUFFIX, bename=REVERSE_BACKEND_NAME, parent=None) + + _enable_replica(topology_st.standalone, REVERSE_SUFFIX) + + _populate_suffix(topology_st.standalone, REVERSE_BACKEND_NAME) + + _test_export_import(topology_st.standalone, REVERSE_SUFFIX, REVERSE_BACKEND_NAME) + +if __name__ == '__main__': + # Run isolated + # -s for DEBUG mode + CURRENT_FILE = os.path.realpath(__file__) + pytest.main("-s %s" % CURRENT_FILE) diff --git a/dirsrvtests/tests/tickets/ticket50234_test.py b/dirsrvtests/tests/tickets/ticket50234_test.py new file mode 100644 index 0000000..ac936d4 --- /dev/null +++ b/dirsrvtests/tests/tickets/ticket50234_test.py @@ -0,0 +1,72 @@ +# --- BEGIN COPYRIGHT BLOCK --- +# Copyright (C) 2019 Red Hat, Inc. +# All rights reserved. +# +# License: GPL (version 3 or any later version). +# See LICENSE for details. +# --- END COPYRIGHT BLOCK --- +# +import logging +import time +import ldap +import pytest + +from lib389.topologies import topology_st + +from lib389._constants import DEFAULT_SUFFIX + +from lib389.idm.user import UserAccount, UserAccounts +from lib389.idm.organizationalunit import OrganizationalUnit + +pytestmark = pytest.mark.tier2 + +log = logging.getLogger(__name__) + +def test_ticket50234(topology_st): + """ + The fix for ticket 50234 + + + The test sequence is: + - create more than 10 entries with objectclass organizational units ou=org{} + - add an Account in one of them, eg below ou=org5 + - do searches with search base ou=org5 and search filter "objectclass=organizationalunit" + - a subtree search should return 1 entry, the base entry + - a onelevel search should return no entry + """ + + log.info('Testing Ticket 50234 - onelvel search returns not matching entry') + + for i in range(1,15): + ou = OrganizationalUnit(topology_st.standalone, "ou=Org{},{}".format(i, DEFAULT_SUFFIX)) + ou.create(properties={'ou': 'Org'.format(i)}) + + properties = { + 'uid': 'Jeff Vedder', + 'cn': 'Jeff Vedder', + 'sn': 'user', + 'uidNumber': '1000', + 'gidNumber': '2000', + 'homeDirectory': '/home/' + 'JeffVedder', + 'userPassword': 'password' + } + user = UserAccount(topology_st.standalone, "cn=Jeff Vedder,ou=org5,{}".format(DEFAULT_SUFFIX)) + user.create(properties=properties) + + # in a subtree search the entry used as search base matches the filter and shoul be returned + ent = topology_st.standalone.getEntry("ou=org5,{}".format(DEFAULT_SUFFIX), ldap.SCOPE_SUBTREE, "(objectclass=organizationalunit)") + + # in a onelevel search the only child is an useraccount which does not match the filter + # no entry should be returned, which would cause getEntry to raise an exception we need to handle + found = 1 + try: + ent = topology_st.standalone.getEntry("ou=org5,{}".format(DEFAULT_SUFFIX), ldap.SCOPE_ONELEVEL, "(objectclass=organizationalunit)") + except ldap.NO_SUCH_OBJECT: + found = 0 + assert (found == 0) + +if __name__ == '__main__': + # Run isolated + # -s for DEBUG mode + CURRENT_FILE = os.path.realpath(__file__) + pytest.main("-s %s" % CURRENT_FILE) diff --git a/dirsrvtests/tests/tickets/ticket548_test.py b/dirsrvtests/tests/tickets/ticket548_test.py new file mode 100644 index 0000000..cac3cc5 --- /dev/null +++ b/dirsrvtests/tests/tickets/ticket548_test.py @@ -0,0 +1,408 @@ +# --- BEGIN COPYRIGHT BLOCK --- +# Copyright (C) 2016 Red Hat, Inc. +# All rights reserved. +# +# License: GPL (version 3 or any later version). +# See LICENSE for details. +# --- END COPYRIGHT BLOCK --- +# +import pytest +from lib389.tasks import * +from lib389.utils import * +from lib389.topologies import topology_st + +from lib389._constants import DEFAULT_SUFFIX, DN_CONFIG, DN_DM, PASSWORD, DEFAULT_SUFFIX_ESCAPED + +# Skip on older versions +pytestmark = [pytest.mark.tier2, + pytest.mark.skipif(ds_is_older('1.3.6'), reason="Not implemented")] + +log = logging.getLogger(__name__) + +# Assuming DEFAULT_SUFFIX is "dc=example,dc=com", otherwise it does not work... :( +SUBTREE_CONTAINER = 'cn=nsPwPolicyContainer,' + DEFAULT_SUFFIX +SUBTREE_PWPDN = 'cn=nsPwPolicyEntry,' + DEFAULT_SUFFIX +SUBTREE_PWP = 'cn=cn\3DnsPwPolicyEntry\2C' + DEFAULT_SUFFIX_ESCAPED + ',' + SUBTREE_CONTAINER +SUBTREE_COS_TMPLDN = 'cn=nsPwTemplateEntry,' + DEFAULT_SUFFIX +SUBTREE_COS_TMPL = 'cn=cn\3DnsPwTemplateEntry\2C' + DEFAULT_SUFFIX_ESCAPED + ',' + SUBTREE_CONTAINER +SUBTREE_COS_DEF = 'cn=nsPwPolicy_CoS,' + DEFAULT_SUFFIX + +USER1_DN = 'uid=user1,' + DEFAULT_SUFFIX +USER2_DN = 'uid=user2,' + DEFAULT_SUFFIX +USER3_DN = 'uid=user3,' + DEFAULT_SUFFIX +USER_PW = 'password' + + +def days_to_secs(days): + # Value of 60 * 60 * 24 + return days * 86400 + + +# Values are in days +def set_global_pwpolicy(topology_st, min_=1, max_=10, warn=3): + log.info(" +++++ Enable global password policy +++++\n") + # Enable password policy + try: + topology_st.standalone.modify_s(DN_CONFIG, [(ldap.MOD_REPLACE, 'nsslapd-pwpolicy-local', b'on')]) + except ldap.LDAPError as e: + log.error('Failed to set pwpolicy-local: error ' + e.message['desc']) + assert False + + # Convert our values to seconds + min_secs = days_to_secs(min_) + max_secs = days_to_secs(max_) + warn_secs = days_to_secs(warn) + + log.info(" Set global password Min Age -- %s day\n" % min_) + try: + topology_st.standalone.modify_s(DN_CONFIG, [(ldap.MOD_REPLACE, 'passwordMinAge', ('%s' % min_secs).encode())]) + except ldap.LDAPError as e: + log.error('Failed to set passwordMinAge: error ' + e.message['desc']) + assert False + + log.info(" Set global password Expiration -- on\n") + try: + topology_st.standalone.modify_s(DN_CONFIG, [(ldap.MOD_REPLACE, 'passwordExp', b'on')]) + except ldap.LDAPError as e: + log.error('Failed to set passwordExp: error ' + e.message['desc']) + assert False + + log.info(" Set global password Max Age -- %s days\n" % max_) + try: + topology_st.standalone.modify_s(DN_CONFIG, [(ldap.MOD_REPLACE, 'passwordMaxAge', ('%s' % max_secs).encode())]) + except ldap.LDAPError as e: + log.error('Failed to set passwordMaxAge: error ' + e.message['desc']) + assert False + + log.info(" Set global password Warning -- %s days\n" % warn) + try: + topology_st.standalone.modify_s(DN_CONFIG, [(ldap.MOD_REPLACE, 'passwordWarning', ('%s' % warn_secs).encode())]) + except ldap.LDAPError as e: + log.error('Failed to set passwordWarning: error ' + e.message['desc']) + assert False + + +def set_subtree_pwpolicy(topology_st, min_=2, max_=20, warn=6): + log.info(" +++++ Enable subtree level password policy +++++\n") + + # Convert our values to seconds + min_secs = days_to_secs(min_) + max_secs = days_to_secs(max_) + warn_secs = days_to_secs(warn) + + log.info(" Add the container") + try: + topology_st.standalone.add_s(Entry((SUBTREE_CONTAINER, {'objectclass': 'top nsContainer'.split(), + 'cn': 'nsPwPolicyContainer'}))) + except ldap.ALREADY_EXISTS: + pass + except ldap.LDAPError as e: + log.error('Failed to add subtree container: error ' + e.message['desc']) + # assert False + + try: + # Purge the old policy + topology_st.standalone.delete_s(SUBTREE_PWP) + except: + pass + + log.info( + " Add the password policy subentry {passwordMustChange: on, passwordMinAge: %s, passwordMaxAge: %s, passwordWarning: %s}" % ( + min_, max_, warn)) + try: + topology_st.standalone.add_s(Entry((SUBTREE_PWP, {'objectclass': 'top ldapsubentry passwordpolicy'.split(), + 'cn': SUBTREE_PWPDN, + 'passwordMustChange': 'on', + 'passwordExp': 'on', + 'passwordMinAge': '%s' % min_secs, + 'passwordMaxAge': '%s' % max_secs, + 'passwordWarning': '%s' % warn_secs, + 'passwordChange': 'on', + 'passwordStorageScheme': 'clear'}))) + except ldap.LDAPError as e: + log.error('Failed to add passwordpolicy: error ' + e.message['desc']) + assert False + + log.info(" Add the COS template") + try: + topology_st.standalone.add_s( + Entry((SUBTREE_COS_TMPL, {'objectclass': 'top ldapsubentry costemplate extensibleObject'.split(), + 'cn': SUBTREE_PWPDN, + 'cosPriority': '1', + 'cn': SUBTREE_COS_TMPLDN, + 'pwdpolicysubentry': SUBTREE_PWP}))) + except ldap.ALREADY_EXISTS: + pass + except ldap.LDAPError as e: + log.error('Failed to add COS template: error ' + e.message['desc']) + # assert False + + log.info(" Add the COS definition") + try: + topology_st.standalone.add_s( + Entry((SUBTREE_COS_DEF, {'objectclass': 'top ldapsubentry cosSuperDefinition cosPointerDefinition'.split(), + 'cn': SUBTREE_PWPDN, + 'costemplatedn': SUBTREE_COS_TMPL, + 'cosAttribute': 'pwdpolicysubentry default operational-default'}))) + except ldap.ALREADY_EXISTS: + pass + except ldap.LDAPError as e: + log.error('Failed to add COS def: error ' + e.message['desc']) + # assert False + + time.sleep(1) + + +def update_passwd(topology_st, user, passwd, newpasswd): + log.info(" Bind as {%s,%s}" % (user, passwd)) + topology_st.standalone.simple_bind_s(user, passwd) + try: + topology_st.standalone.modify_s(user, [(ldap.MOD_REPLACE, 'userpassword', newpasswd.encode())]) + except ldap.LDAPError as e: + log.fatal('test_ticket548: Failed to update the password ' + cpw + ' of user ' + user + ': error ' + e.message[ + 'desc']) + assert False + + time.sleep(1) + + +def check_shadow_attr_value(entry, attr_type, expected, dn): + if entry.hasAttr(attr_type): + actual = entry.getValue(attr_type) + if int(actual) == expected: + log.info('%s of entry %s has expected value %s' % (attr_type, dn, actual)) + assert True + else: + log.fatal('%s %s of entry %s does not have expected value %s' % (attr_type, actual, dn, expected)) + assert False + else: + log.fatal('entry %s does not have %s attr' % (dn, attr_type)) + assert False + + +def test_ticket548_test_with_no_policy(topology_st): + """ + Check shadowAccount under no password policy + """ + log.info("Case 1. No password policy") + + log.info("Bind as %s" % DN_DM) + topology_st.standalone.simple_bind_s(DN_DM, PASSWORD) + + log.info('Add an entry' + USER1_DN) + try: + topology_st.standalone.add_s( + Entry((USER1_DN, {'objectclass': "top person organizationalPerson inetOrgPerson shadowAccount".split(), + 'sn': '1', + 'cn': 'user 1', + 'uid': 'user1', + 'givenname': 'user', + 'mail': 'user1@' + DEFAULT_SUFFIX, + 'userpassword': USER_PW}))) + except ldap.LDAPError as e: + log.fatal('test_ticket548: Failed to add user' + USER1_DN + ': error ' + e.message['desc']) + assert False + + edate = int(time.time() / (60 * 60 * 24)) + log.info('Search entry %s' % USER1_DN) + + log.info("Bind as %s" % USER1_DN) + topology_st.standalone.simple_bind_s(USER1_DN, USER_PW) + entry = topology_st.standalone.getEntry(USER1_DN, ldap.SCOPE_BASE, "(objectclass=*)", ['shadowLastChange']) + check_shadow_attr_value(entry, 'shadowLastChange', edate, USER1_DN) + + log.info("Check shadowAccount with no policy was successfully verified.") + + +def test_ticket548_test_global_policy(topology_st): + """ + Check shadowAccount with global password policy + """ + + log.info("Case 2. Check shadowAccount with global password policy") + + log.info("Bind as %s" % DN_DM) + topology_st.standalone.simple_bind_s(DN_DM, PASSWORD) + + set_global_pwpolicy(topology_st) + + log.info('Add an entry' + USER2_DN) + try: + topology_st.standalone.add_s( + Entry((USER2_DN, {'objectclass': "top person organizationalPerson inetOrgPerson shadowAccount".split(), + 'sn': '2', + 'cn': 'user 2', + 'uid': 'user2', + 'givenname': 'user', + 'mail': 'user2@' + DEFAULT_SUFFIX, + 'userpassword': USER_PW}))) + except ldap.LDAPError as e: + log.fatal('test_ticket548: Failed to add user' + USER2_DN + ': error ' + e.message['desc']) + assert False + + edate = int(time.time() / (60 * 60 * 24)) + + log.info("Bind as %s" % USER1_DN) + topology_st.standalone.simple_bind_s(USER1_DN, USER_PW) + + log.info('Search entry %s' % USER1_DN) + entry = topology_st.standalone.getEntry(USER1_DN, ldap.SCOPE_BASE, "(objectclass=*)") + check_shadow_attr_value(entry, 'shadowLastChange', edate, USER1_DN) + + # passwordMinAge -- 1 day + check_shadow_attr_value(entry, 'shadowMin', 1, USER1_DN) + + # passwordMaxAge -- 10 days + check_shadow_attr_value(entry, 'shadowMax', 10, USER1_DN) + + # passwordWarning -- 3 days + check_shadow_attr_value(entry, 'shadowWarning', 3, USER1_DN) + + log.info("Bind as %s" % USER2_DN) + topology_st.standalone.simple_bind_s(USER2_DN, USER_PW) + + log.info('Search entry %s' % USER2_DN) + entry = topology_st.standalone.getEntry(USER2_DN, ldap.SCOPE_BASE, "(objectclass=*)") + check_shadow_attr_value(entry, 'shadowLastChange', edate, USER2_DN) + + # passwordMinAge -- 1 day + check_shadow_attr_value(entry, 'shadowMin', 1, USER2_DN) + + # passwordMaxAge -- 10 days + check_shadow_attr_value(entry, 'shadowMax', 10, USER2_DN) + + # passwordWarning -- 3 days + check_shadow_attr_value(entry, 'shadowWarning', 3, USER2_DN) + + # Bind as DM again, change policy + log.info("Bind as %s" % DN_DM) + topology_st.standalone.simple_bind_s(DN_DM, PASSWORD) + set_global_pwpolicy(topology_st, 3, 30, 9) + + # change the user password, then check again. + log.info("Bind as %s" % USER2_DN) + topology_st.standalone.simple_bind_s(USER2_DN, USER_PW) + + newpasswd = USER_PW + '2' + update_passwd(topology_st, USER2_DN, USER_PW, newpasswd) + + log.info("Re-bind as %s with new password" % USER2_DN) + topology_st.standalone.simple_bind_s(USER2_DN, newpasswd) + + ## This tests if we update the shadow values on password change. + log.info('Search entry %s' % USER2_DN) + entry = topology_st.standalone.getEntry(USER2_DN, ldap.SCOPE_BASE, "(objectclass=*)") + + # passwordMinAge -- 1 day + check_shadow_attr_value(entry, 'shadowMin', 3, USER2_DN) + + # passwordMaxAge -- 10 days + check_shadow_attr_value(entry, 'shadowMax', 30, USER2_DN) + + # passwordWarning -- 3 days + check_shadow_attr_value(entry, 'shadowWarning', 9, USER2_DN) + + log.info("Check shadowAccount with global policy was successfully verified.") + + +def test_ticket548_test_subtree_policy(topology_st): + """ + Check shadowAccount with subtree level password policy + """ + + log.info("Case 3. Check shadowAccount with subtree level password policy") + + log.info("Bind as %s" % DN_DM) + topology_st.standalone.simple_bind_s(DN_DM, PASSWORD) + # Check the global policy values + + set_subtree_pwpolicy(topology_st, 2, 20, 6) + + log.info('Add an entry' + USER3_DN) + try: + topology_st.standalone.add_s( + Entry((USER3_DN, {'objectclass': "top person organizationalPerson inetOrgPerson shadowAccount".split(), + 'sn': '3', + 'cn': 'user 3', + 'uid': 'user3', + 'givenname': 'user', + 'mail': 'user3@' + DEFAULT_SUFFIX, + 'userpassword': USER_PW}))) + except ldap.LDAPError as e: + log.fatal('test_ticket548: Failed to add user' + USER3_DN + ': error ' + e.message['desc']) + assert False + + log.info('Search entry %s' % USER3_DN) + entry0 = topology_st.standalone.getEntry(USER3_DN, ldap.SCOPE_BASE, "(objectclass=*)") + + log.info('Expecting shadowLastChange 0 since passwordMustChange is on') + check_shadow_attr_value(entry0, 'shadowLastChange', 0, USER3_DN) + + # passwordMinAge -- 2 day + check_shadow_attr_value(entry0, 'shadowMin', 2, USER3_DN) + + # passwordMaxAge -- 20 days + check_shadow_attr_value(entry0, 'shadowMax', 20, USER3_DN) + + # passwordWarning -- 6 days + check_shadow_attr_value(entry0, 'shadowWarning', 6, USER3_DN) + + log.info("Bind as %s" % USER3_DN) + topology_st.standalone.simple_bind_s(USER3_DN, USER_PW) + + log.info('Search entry %s' % USER3_DN) + try: + entry1 = topology_st.standalone.getEntry(USER3_DN, ldap.SCOPE_BASE, "(objectclass=*)") + except ldap.UNWILLING_TO_PERFORM: + log.info('test_ticket548: Search by' + USER3_DN + ' failed by UNWILLING_TO_PERFORM as expected') + except ldap.LDAPError as e: + log.fatal('test_ticket548: Failed to serch user' + USER3_DN + ' by self: error ' + e.message['desc']) + assert False + + log.info("Bind as %s and updating the password with a new one" % USER3_DN) + topology_st.standalone.simple_bind_s(USER3_DN, USER_PW) + + # Bind as DM again, change policy + log.info("Bind as %s" % DN_DM) + topology_st.standalone.simple_bind_s(DN_DM, PASSWORD) + + set_subtree_pwpolicy(topology_st, 4, 40, 12) + + newpasswd = USER_PW + '0' + update_passwd(topology_st, USER3_DN, USER_PW, newpasswd) + + log.info("Re-bind as %s with new password" % USER3_DN) + topology_st.standalone.simple_bind_s(USER3_DN, newpasswd) + + try: + entry2 = topology_st.standalone.getEntry(USER3_DN, ldap.SCOPE_BASE, "(objectclass=*)") + except ldap.LDAPError as e: + log.fatal('test_ticket548: Failed to serch user' + USER3_DN + ' by self: error ' + e.message['desc']) + assert False + + edate = int(time.time() / (60 * 60 * 24)) + + log.info('Expecting shadowLastChange %d once userPassword is updated', edate) + check_shadow_attr_value(entry2, 'shadowLastChange', edate, USER3_DN) + + log.info('Search entry %s' % USER3_DN) + entry = topology_st.standalone.getEntry(USER3_DN, ldap.SCOPE_BASE, "(objectclass=*)") + check_shadow_attr_value(entry, 'shadowLastChange', edate, USER3_DN) + + # passwordMinAge -- 1 day + check_shadow_attr_value(entry, 'shadowMin', 4, USER3_DN) + + # passwordMaxAge -- 10 days + check_shadow_attr_value(entry, 'shadowMax', 40, USER3_DN) + + # passwordWarning -- 3 days + check_shadow_attr_value(entry, 'shadowWarning', 12, USER3_DN) + + log.info("Check shadowAccount with subtree level policy was successfully verified.") + + +if __name__ == '__main__': + # Run isolated + # -s for DEBUG mode + CURRENT_FILE = os.path.realpath(__file__) + pytest.main("-s %s" % CURRENT_FILE) diff --git a/dirsrvtests/tests/tmp/README b/dirsrvtests/tests/tmp/README new file mode 100644 index 0000000..0e8f416 --- /dev/null +++ b/dirsrvtests/tests/tmp/README @@ -0,0 +1,10 @@ +TMP DIRECTORY README + +This directory is used to store files(LDIFs, etc) that are created during the ticket script runtime. The script is also responsible for removing any files it places in this directory. This directory can be retrieved via getDir() from the DirSrv class. + +Example: + + tmp_dir_path = topology.standalone.getDir(__file__, TMP_DIR) + + new_ldif = tmp_dir_path + "export.ldif" + diff --git a/dirsrvtests/tests/tmp/__init__.py b/dirsrvtests/tests/tmp/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/docker.mk b/docker.mk new file mode 100644 index 0000000..4891440 --- /dev/null +++ b/docker.mk @@ -0,0 +1,6 @@ + +suse: + docker build -t 389-ds-suse:latest -f docker/389-ds-suse/Dockerfile . + +fedora: + docker build -t 389-ds-fedora:latest -f docker/389-ds-fedora/Dockerfile . diff --git a/docker/389-ds-fedora/Dockerfile b/docker/389-ds-fedora/Dockerfile new file mode 100644 index 0000000..bdd56fd --- /dev/null +++ b/docker/389-ds-fedora/Dockerfile @@ -0,0 +1,51 @@ +# --- BEGIN COPYRIGHT BLOCK --- +# Copyright (C) 2017 Red Hat, Inc. +# All rights reserved. +# +# License: GPL (version 3 or any later version). +# See LICENSE for details. +# --- END COPYRIGHT BLOCK --- + +FROM fedora:latest +MAINTAINER 389-devel@lists.fedoraproject.org +EXPOSE 3389 3636 + +ADD ./ /usr/local/src/389-ds-base +WORKDIR /usr/local/src/389-ds-base + +# install dependencies +RUN dnf upgrade -y \ + && dnf install --setopt=strict=False -y @buildsys-build rpm-build make bzip2 git rsync \ + `grep -E "^(Build)?Requires" rpm/389-ds-base.spec.in \ + | grep -v -E '(name|MODULE)' \ + | awk '{ print $2 }' \ + | sed 's/%{python3_pkgversion}/3/g' \ + | grep -v "^/" \ + | grep -v pkgversion \ + | sort | uniq \ + | tr '\n' ' '` \ + && dnf clean all + +# build +RUN make -f rpm.mk rpms || sh -c 'echo "build failed, sleeping for some time to allow you debug" ; sleep 3600' + +RUN dnf install -y dist/rpms/*389*.rpm && \ + dnf clean all + +# Link some known static locations to point to /data +RUN mkdir -p /data/config && \ + mkdir -p /data/ssca && \ + mkdir -p /data/run && \ + mkdir -p /var/run/dirsrv && \ + ln -s /data/config /etc/dirsrv/slapd-localhost && \ + ln -s /data/ssca /etc/dirsrv/ssca && \ + ln -s /data/run /var/run/dirsrv + +VOLUME /data + +#USER dirsrv + +HEALTHCHECK --start-period=5m --timeout=5s --interval=5s --retries=2 \ + CMD /usr/libexec/dirsrv/dscontainer -H + +CMD [ "/usr/libexec/dirsrv/dscontainer", "-r" ] diff --git a/docker/389-ds-suse/Dockerfile b/docker/389-ds-suse/Dockerfile new file mode 100644 index 0000000..4a0bbe0 --- /dev/null +++ b/docker/389-ds-suse/Dockerfile @@ -0,0 +1,82 @@ +#!BuildTag: 389-ds-container +FROM opensuse/leap:15.1 +MAINTAINER wbrown@suse.de + +EXPOSE 3389 3636 + +# RUN zypper ar -G obs://network:ldap network:ldap && \ +RUN zypper ar http://download.opensuse.org/update/leap/15.1/oss/ u && \ + zypper ar http://download.opensuse.org/distribution/leap/15.1/repo/oss/ m && \ + zypper ar http://download.opensuse.org/repositories/network:ldap/openSUSE_Leap_15.1/ "network:ldap" && \ + zypper mr -p 97 "network:ldap" && \ + zypper --gpg-auto-import-keys ref + +RUN zypper --non-interactive si --build-deps-only 389-ds && \ + zypper in -y acl cargo cyrus-sasl cyrus-sasl-plain db48-utils krb5-client libLLVM7 libedit0 libgit2-26 libhttp_parser2_7_1 libssh2-1 mozilla-nss-tools rust + +# Install build dependencies +# RUN zypper in -C -y autoconf automake cracklib-devel cyrus-sasl-devel db-devel doxygen gcc-c++ \ +# gdb krb5-devel libcmocka-devel libtalloc-devel libtevent-devel libtool \ +# net-snmp-devel openldap2-devel pam-devel pkgconfig python-rpm-macros "pkgconfig(icu-i18n)" \ +# "pkgconfig(icu-uc)" "pkgconfig(libcap)" "pkgconfig(libpcre)" "pkgconfig(libsystemd)" \ +# "pkgconfig(nspr)" "pkgconfig(nss)" rsync cargo rust rust-std acl cyrus-sasl-plain db-utils \ +# bind-utils krb5 fillup shadow openldap2-devel pkgconfig "pkgconfig(nspr)" "pkgconfig(nss)" \ +# "pkgconfig(systemd)" python3-argcomplete python3-argparse-manpage python3-ldap \ +# python3-pyasn1 python3-pyasn1-modules python3-python-dateutil python3-six krb5-client \ +# mozilla-nss-tools + +# Push source code to the container +ADD ./ /usr/local/src/389-ds-base +WORKDIR /usr/local/src/389-ds-base + + +# Build and install +# Derived from rpm --eval '%configure' on opensuse. +RUN autoreconf -fiv && \ + ./configure --host=x86_64-suse-linux-gnu --build=x86_64-suse-linux-gnu \ + --program-prefix= \ + --disable-dependency-tracking \ + --prefix=/usr \ + --exec-prefix=/usr \ + --bindir=/usr/bin \ + --sbindir=/usr/sbin \ + --sysconfdir=/etc \ + --datadir=/usr/share \ + --includedir=/usr/include \ + --libdir=/usr/lib64 \ + --libexecdir=/usr/lib \ + --localstatedir=/var \ + --sharedstatedir=/var/lib \ + --mandir=/usr/share/man \ + --infodir=/usr/share/info \ + --disable-dependency-tracking \ + --enable-debug \ + --enable-gcc-security --enable-autobind --enable-auto-dn-suffix --with-openldap \ + --enable-cmocka --enable-rust --disable-perl --with-pythonexec="python3" --without-systemd \ + --libexecdir=/usr/lib/dirsrv/ --prefix=/ && \ + make -j 12 && \ + make install && \ + make lib389 && \ + make lib389-install + +# Link some known static locations to point to /data +RUN mkdir -p /data/config && \ + mkdir -p /data/ssca && \ + mkdir -p /data/run && \ + mkdir -p /var/run/dirsrv && \ + ln -s /data/config /etc/dirsrv/slapd-localhost && \ + ln -s /data/ssca /etc/dirsrv/ssca && \ + ln -s /data/run /var/run/dirsrv + +# Temporal volumes for each instance + +VOLUME /data + +# Set the userup correctly. +# USER dirsrv + +HEALTHCHECK --start-period=5m --timeout=5s --interval=5s --retries=2 \ + CMD /usr/libexec/dirsrv/dscontainer -H + +CMD [ "/usr/libexec/dirsrv/dscontainer", "-r" ] + diff --git a/docker/389-ds-suse/Dockerfile.release b/docker/389-ds-suse/Dockerfile.release new file mode 100644 index 0000000..6f4adf7 --- /dev/null +++ b/docker/389-ds-suse/Dockerfile.release @@ -0,0 +1,72 @@ +#!BuildTag: 389-ds-container +FROM opensuse/leap:15.1 +MAINTAINER wbrown@suse.de + +EXPOSE 3389 3636 + +# RUN zypper ar -G obs://network:ldap network:ldap && \ +RUN zypper ar http://download.opensuse.org/update/leap/15.1/oss/ u && \ + zypper ar http://download.opensuse.org/distribution/leap/15.1/repo/oss/ m && \ + zypper ar http://download.opensuse.org/repositories/network:ldap/openSUSE_Leap_15.1/ "network:ldap" && \ + zypper mr -p 97 "network:ldap" && \ + zypper --gpg-auto-import-keys ref + +# Push source code to the container - we do this early because we want the zypper and +# build instructions in a single RUN stanza to minimise the container final size. +ADD ./ /usr/local/src/389-ds-base +WORKDIR /usr/local/src/389-ds-base + + +# Build and install +# Derived from rpm --eval '%configure' on opensuse. + +RUN zypper --non-interactive si --build-deps-only 389-ds && \ + zypper in -y 389-ds rust cargo rust-std && \ + zypper rm -y 389-ds lib389 && \ + autoreconf -fiv && \ + ./configure --host=x86_64-suse-linux-gnu --build=x86_64-suse-linux-gnu \ + --program-prefix= \ + --disable-dependency-tracking \ + --prefix=/usr \ + --exec-prefix=/usr \ + --bindir=/usr/bin \ + --sbindir=/usr/sbin \ + --sysconfdir=/etc \ + --datadir=/usr/share \ + --includedir=/usr/include \ + --libdir=/usr/lib64 \ + --libexecdir=/usr/lib \ + --localstatedir=/var \ + --sharedstatedir=/var/lib \ + --mandir=/usr/share/man \ + --infodir=/usr/share/info \ + --disable-dependency-tracking \ + --enable-gcc-security --enable-autobind --enable-auto-dn-suffix --with-openldap \ + --enable-rust --disable-perl --with-pythonexec="python3" --without-systemd \ + --libexecdir=/usr/lib/dirsrv/ --prefix=/ && \ + make -j 12 && \ + make install && \ + make lib389 && \ + make lib389-install && \ + make clean && \ + zypper rm -y -u rust cargo rust-std gcc gcc-c++ automake autoconf + +# Link some known static locations to point to /data +RUN mkdir -p /data/config && \ + mkdir -p /data/ssca && \ + mkdir -p /data/run && \ + mkdir -p /var/run/dirsrv && \ + ln -s /data/config /etc/dirsrv/slapd-localhost && \ + ln -s /data/ssca /etc/dirsrv/ssca && \ + ln -s /data/run /var/run/dirsrv + +# Temporal volumes for each instance + +VOLUME /data + +# Set the userup correctly. This was created as part of the 389ds in above. +# For k8s we'll need 389 to not drop privs? I think we don't specify a user +# here and ds should do the right thing if a non root user runs the server. +# USER dirsrv + +CMD [ "/usr/libexec/dirsrv/dscontainer", "-r" ] diff --git a/docker/README.md b/docker/README.md new file mode 100644 index 0000000..01cf351 --- /dev/null +++ b/docker/README.md @@ -0,0 +1,61 @@ + +#### Issue Description +This folder contains proof of concept dockerfiles for 389 Directory Server. This utilises many of our latest +developments for installing instances and configuring them. We have developed native, clean, and powerful container +integration. This container image is usable on CentOS / RHEL / Fedora atomic host, and pure docker implementations. +Please note this image will not currently work in openshift due to a reliance on volume features that openshift does +not support, but we will correct this. + + +#### Using the files +These docker files are designed to be build from docker hub as the will do a remote git fetch during the build process. +They are not currently designed to operate on a local source tree (we may add this later). + +``` +cd docker/389ds_poc; +docker build -t 389ds_poc:latest . +``` + +#### Deploying and using the final product + +``` +docker create -h ldap.example.com 389ds_poc:latest +docker start +docker inspect | grep IPAddress +ldapsearch -H ldap://

-b '' -s base -x + +.... +supportedLDAPVersion: 3 +vendorName: 389 Project +vendorVersion: 389-Directory/1.3.6.3 B2017.093.354 + +``` + +To expose the ports you may consider adding: + +``` +-P +OR +-p 127.0.0.1:$HOSTPORT:$CONTAINERPORT +``` + +You can not currently use a persistent volume with the 389ds_poc image due to an issue with docker volumes. This will be +corrected by https://github.com/389ds/389-ds-base/issues/2272 + +#### Warnings + +The 389ds_poc container is supplied with a static Directory Manager password. This is HIGHLY INSECURE and should not be +used in production. The password is "directory manager password". + +The 389ds_poc container has some issues with volume over-rides due to our use of a pre-built instance. We are working to +resolve this, but until a solution is derived, you can not override the datavolumes. + +#### Other ideas + +* We could develop a dockerfile that builds and runs DS tests in an isolated environment. +* Make a container image that allows mounting an arbitrary 389-ds repo into it for simple development purposes. + +#### NOTE of 389 DS project support + +This is not a "supported" method of deployment to a production system and may result in data loss. This should be +considered an experimental deployment method until otherwise announced. + diff --git a/docs/custom.css b/docs/custom.css new file mode 100644 index 0000000..16d91cd --- /dev/null +++ b/docs/custom.css @@ -0,0 +1,1366 @@ +/* The standard CSS for doxygen 1.8.6 */ + +body, table, div, p, dl { + font: 400 14px/22px Liberation Sans,DejaVu Sans,Roboto,sans-serif; +} + +/* @group Heading Levels */ + +h1.groupheader { + font-size: 150%; +} + +.title { + font: 400 14px/28px Liberation Sans,DejaVu Sans,Roboto,sans-serif; + font-size: 150%; + font-weight: bold; + margin: 10px 2px; +} + +h2.groupheader { + border-bottom: 1px solid #879ECB; + color: #354C7B; + font-size: 150%; + font-weight: normal; + margin-top: 1.75em; + padding-top: 8px; + padding-bottom: 4px; + width: 100%; +} + +h3.groupheader { + font-size: 100%; +} + +h1, h2, h3, h4, h5, h6 { + -webkit-transition: text-shadow 0.5s linear; + -moz-transition: text-shadow 0.5s linear; + -ms-transition: text-shadow 0.5s linear; + -o-transition: text-shadow 0.5s linear; + transition: text-shadow 0.5s linear; + margin-right: 15px; +} + +h1.glow, h2.glow, h3.glow, h4.glow, h5.glow, h6.glow { + text-shadow: 0 0 15px cyan; +} + +dt { + font-weight: bold; +} + +div.multicol { + -moz-column-gap: 1em; + -webkit-column-gap: 1em; + -moz-column-count: 3; + -webkit-column-count: 3; +} + +p.startli, p.startdd { + margin-top: 2px; +} + +p.starttd { + margin-top: 0px; +} + +p.endli { + margin-bottom: 0px; +} + +p.enddd { + margin-bottom: 4px; +} + +p.endtd { + margin-bottom: 2px; +} + +/* @end */ + +caption { + font-weight: bold; +} + +span.legend { + font-size: 70%; + text-align: center; +} + +h3.version { + font-size: 90%; + text-align: center; +} + +div.qindex, div.navtab{ + background-color: #EBEFF6; + border: 1px solid #A3B4D7; + text-align: center; +} + +div.qindex, div.navpath { + width: 100%; + line-height: 140%; +} + +div.navtab { + margin-right: 15px; +} + +/* @group Link Styling */ + +a { + color: #3D578C; + font-weight: normal; + text-decoration: none; +} + +.contents a:visited { + color: #4665A2; +} + +a:hover { + text-decoration: underline; +} + +a.qindex { + font-weight: bold; +} + +a.qindexHL { + font-weight: bold; + background-color: #9CAFD4; + color: #ffffff; + border: 1px double #869DCA; +} + +.contents a.qindexHL:visited { + color: #ffffff; +} + +a.el { + font-weight: bold; +} + +a.elRef { +} + +a.code, a.code:visited, a.line, a.line:visited { + color: #4665A2; +} + +a.codeRef, a.codeRef:visited, a.lineRef, a.lineRef:visited { + color: #4665A2; +} + +/* @end */ + +dl.el { + margin-left: -1cm; +} + +pre.fragment { + border: 1px solid #C4CFE5; + background-color: #FBFCFD; + padding: 4px 6px; + margin: 4px 8px 4px 2px; + overflow: auto; + word-wrap: break-word; + font-size: 9pt; + line-height: 125%; + font-family: monospace, fixed; + font-size: 105%; +} + +div.fragment { + padding: 4px 6px; + margin: 4px 8px 4px 2px; + background-color: #FBFCFD; + border: 1px solid #C4CFE5; +} + +div.line { + font-family: monospace, fixed; + font-size: 13px; + min-height: 13px; + line-height: 1.0; + text-wrap: unrestricted; + white-space: -moz-pre-wrap; /* Moz */ + white-space: -pre-wrap; /* Opera 4-6 */ + white-space: -o-pre-wrap; /* Opera 7 */ + white-space: pre-wrap; /* CSS3 */ + word-wrap: break-word; /* IE 5.5+ */ + text-indent: -53px; + padding-left: 53px; + padding-bottom: 0px; + margin: 0px; + -webkit-transition-property: background-color, box-shadow; + -webkit-transition-duration: 0.5s; + -moz-transition-property: background-color, box-shadow; + -moz-transition-duration: 0.5s; + -ms-transition-property: background-color, box-shadow; + -ms-transition-duration: 0.5s; + -o-transition-property: background-color, box-shadow; + -o-transition-duration: 0.5s; + transition-property: background-color, box-shadow; + transition-duration: 0.5s; +} + +div.line.glow { + background-color: cyan; + box-shadow: 0 0 10px cyan; +} + + +span.lineno { + padding-right: 4px; + text-align: right; + border-right: 2px solid #0F0; + background-color: #E8E8E8; + white-space: pre; +} +span.lineno a { + background-color: #D8D8D8; +} + +span.lineno a:hover { + background-color: #C8C8C8; +} + +div.ah { + background-color: black; + font-weight: bold; + color: #ffffff; + margin-bottom: 3px; + margin-top: 3px; + padding: 0.2em; + border: solid thin #333; + border-radius: 0.5em; + -webkit-border-radius: .5em; + -moz-border-radius: .5em; + box-shadow: 2px 2px 3px #999; + -webkit-box-shadow: 2px 2px 3px #999; + -moz-box-shadow: rgba(0, 0, 0, 0.15) 2px 2px 2px; + background-image: -webkit-gradient(linear, left top, left bottom, from(#eee), to(#000),color-stop(0.3, #444)); + background-image: -moz-linear-gradient(center top, #eee 0%, #444 40%, #000); +} + +div.groupHeader { + margin-left: 16px; + margin-top: 12px; + font-weight: bold; +} + +div.groupText { + margin-left: 16px; + font-style: italic; +} + +body { + background-color: white; + color: black; + margin: 0; +} + +div.contents { + margin-top: 10px; + margin-left: 12px; + margin-right: 8px; +} + +td.indexkey { + background-color: #EBEFF6; + font-weight: bold; + border: 1px solid #C4CFE5; + margin: 2px 0px 2px 0; + padding: 2px 10px; + white-space: nowrap; + vertical-align: top; +} + +td.indexvalue { + background-color: #EBEFF6; + border: 1px solid #C4CFE5; + padding: 2px 10px; + margin: 2px 0px; +} + +tr.memlist { + background-color: #EEF1F7; +} + +p.formulaDsp { + text-align: center; +} + +img.formulaDsp { + +} + +img.formulaInl { + vertical-align: middle; +} + +div.center { + text-align: center; + margin-top: 0px; + margin-bottom: 0px; + padding: 0px; +} + +div.center img { + border: 0px; +} + +address.footer { + text-align: right; + padding-right: 12px; +} + +img.footer { + border: 0px; + vertical-align: middle; +} + +/* @group Code Colorization */ + +span.keyword { + color: #008000 +} + +span.keywordtype { + color: #604020 +} + +span.keywordflow { + color: #e08000 +} + +span.comment { + color: #800000 +} + +span.preprocessor { + color: #806020 +} + +span.stringliteral { + color: #002080 +} + +span.charliteral { + color: #008080 +} + +span.vhdldigit { + color: #ff00ff +} + +span.vhdlchar { + color: #000000 +} + +span.vhdlkeyword { + color: #700070 +} + +span.vhdllogic { + color: #ff0000 +} + +blockquote { + background-color: #F7F8FB; + border-left: 2px solid #9CAFD4; + margin: 0 24px 0 4px; + padding: 0 12px 0 16px; +} + +/* @end */ + +/* +.search { + color: #003399; + font-weight: bold; +} + +form.search { + margin-bottom: 0px; + margin-top: 0px; +} + +input.search { + font-size: 75%; + color: #000080; + font-weight: normal; + background-color: #e8eef2; +} +*/ + +td.tiny { + font-size: 75%; +} + +.dirtab { + padding: 4px; + border-collapse: collapse; + border: 1px solid #A3B4D7; +} + +th.dirtab { + background: #EBEFF6; + font-weight: bold; +} + +hr { + height: 0px; + border: none; + border-top: 1px solid #4A6AAA; +} + +hr.footer { + height: 1px; +} + +/* @group Member Descriptions */ + +table.memberdecls { + border-spacing: 0px; + padding: 0px; +} + +.memberdecls td, .fieldtable tr { + -webkit-transition-property: background-color, box-shadow; + -webkit-transition-duration: 0.5s; + -moz-transition-property: background-color, box-shadow; + -moz-transition-duration: 0.5s; + -ms-transition-property: background-color, box-shadow; + -ms-transition-duration: 0.5s; + -o-transition-property: background-color, box-shadow; + -o-transition-duration: 0.5s; + transition-property: background-color, box-shadow; + transition-duration: 0.5s; +} + +.memberdecls td.glow, .fieldtable tr.glow { + background-color: cyan; + box-shadow: 0 0 15px cyan; +} + +.mdescLeft, .mdescRight, +.memItemLeft, .memItemRight, +.memTemplItemLeft, .memTemplItemRight, .memTemplParams { + background-color: #F9FAFC; + border: none; + margin: 4px; + padding: 1px 0 0 8px; +} + +.mdescLeft, .mdescRight { + padding: 0px 8px 4px 8px; + color: #555; +} + +.memSeparator { + border-bottom: 1px solid #DEE4F0; + line-height: 1px; + margin: 0px; + padding: 0px; +} + +.memItemLeft, .memTemplItemLeft { + white-space: nowrap; +} + +.memItemRight { + width: 100%; +} + +.memTemplParams { + color: #4665A2; + white-space: nowrap; + font-size: 80%; +} + +/* @end */ + +/* @group Member Details */ + +/* Styles for detailed member documentation */ + +.memtemplate { + font-size: 80%; + color: #4665A2; + font-weight: normal; + margin-left: 9px; +} + +.memnav { + background-color: #EBEFF6; + border: 1px solid #A3B4D7; + text-align: center; + margin: 2px; + margin-right: 15px; + padding: 2px; +} + +.mempage { + width: 100%; +} + +.memitem { + padding: 0; + margin-bottom: 10px; + margin-right: 5px; + -webkit-transition: box-shadow 0.5s linear; + -moz-transition: box-shadow 0.5s linear; + -ms-transition: box-shadow 0.5s linear; + -o-transition: box-shadow 0.5s linear; + transition: box-shadow 0.5s linear; + display: table !important; + width: 100%; +} + +.memitem.glow { + box-shadow: 0 0 15px cyan; +} + +.memname { + font-weight: bold; + margin-left: 6px; +} + +.memname td { + vertical-align: bottom; +} + +.memproto, dl.reflist dt { + border-top: 1px solid #A8B8D9; + border-left: 1px solid #A8B8D9; + border-right: 1px solid #A8B8D9; + padding: 6px 0px 6px 0px; + color: #253555; + font-weight: bold; + text-shadow: 0px 1px 1px rgba(255, 255, 255, 0.9); + background-image:url('nav_f.png'); + background-repeat:repeat-x; + background-color: #E2E8F2; + /* opera specific markup */ + box-shadow: 5px 5px 5px rgba(0, 0, 0, 0.15); + border-top-right-radius: 4px; + border-top-left-radius: 4px; + /* firefox specific markup */ + -moz-box-shadow: rgba(0, 0, 0, 0.15) 5px 5px 5px; + -moz-border-radius-topright: 4px; + -moz-border-radius-topleft: 4px; + /* webkit specific markup */ + -webkit-box-shadow: 5px 5px 5px rgba(0, 0, 0, 0.15); + -webkit-border-top-right-radius: 4px; + -webkit-border-top-left-radius: 4px; + +} + +.memdoc, dl.reflist dd { + border-bottom: 1px solid #A8B8D9; + border-left: 1px solid #A8B8D9; + border-right: 1px solid #A8B8D9; + padding: 6px 10px 2px 10px; + background-color: #FBFCFD; + border-top-width: 0; + background-image:url('nav_g.png'); + background-repeat:repeat-x; + background-color: #FFFFFF; + /* opera specific markup */ + border-bottom-left-radius: 4px; + border-bottom-right-radius: 4px; + box-shadow: 5px 5px 5px rgba(0, 0, 0, 0.15); + /* firefox specific markup */ + -moz-border-radius-bottomleft: 4px; + -moz-border-radius-bottomright: 4px; + -moz-box-shadow: rgba(0, 0, 0, 0.15) 5px 5px 5px; + /* webkit specific markup */ + -webkit-border-bottom-left-radius: 4px; + -webkit-border-bottom-right-radius: 4px; + -webkit-box-shadow: 5px 5px 5px rgba(0, 0, 0, 0.15); +} + +dl.reflist dt { + padding: 5px; +} + +dl.reflist dd { + margin: 0px 0px 10px 0px; + padding: 5px; +} + +.paramkey { + text-align: right; +} + +.paramtype { + white-space: nowrap; +} + +.paramname { + color: #602020; + white-space: nowrap; +} +.paramname em { + font-style: normal; +} +.paramname code { + line-height: 14px; +} + +.params, .retval, .exception, .tparams { + margin-left: 0px; + padding-left: 0px; +} + +.params .paramname, .retval .paramname { + font-weight: bold; + vertical-align: top; +} + +.params .paramtype { + font-style: italic; + vertical-align: top; +} + +.params .paramdir { + font-family: "courier new",courier,monospace; + vertical-align: top; +} + +table.mlabels { + border-spacing: 0px; +} + +td.mlabels-left { + width: 100%; + padding: 0px; +} + +td.mlabels-right { + vertical-align: bottom; + padding: 0px; + white-space: nowrap; +} + +span.mlabels { + margin-left: 8px; +} + +span.mlabel { + background-color: #728DC1; + border-top:1px solid #5373B4; + border-left:1px solid #5373B4; + border-right:1px solid #C4CFE5; + border-bottom:1px solid #C4CFE5; + text-shadow: none; + color: white; + margin-right: 4px; + padding: 2px 3px; + border-radius: 3px; + font-size: 7pt; + white-space: nowrap; + vertical-align: middle; +} + + + +/* @end */ + +/* these are for tree view when not used as main index */ + +div.directory { + margin: 10px 0px; + border-top: 1px solid #A8B8D9; + border-bottom: 1px solid #A8B8D9; + width: 100%; +} + +.directory table { + border-collapse:collapse; +} + +.directory td { + margin: 0px; + padding: 0px; + vertical-align: top; +} + +.directory td.entry { + white-space: nowrap; + padding-right: 6px; + padding-top: 3px; +} + +.directory td.entry a { + outline:none; +} + +.directory td.entry a img { + border: none; +} + +.directory td.desc { + width: 100%; + padding-left: 6px; + padding-right: 6px; + padding-top: 3px; + border-left: 1px solid rgba(0,0,0,0.05); +} + +.directory tr.even { + padding-left: 6px; + background-color: #F7F8FB; +} + +.directory img { + vertical-align: -30%; +} + +.directory .levels { + white-space: nowrap; + width: 100%; + text-align: right; + font-size: 9pt; +} + +.directory .levels span { + cursor: pointer; + padding-left: 2px; + padding-right: 2px; + color: #3D578C; +} + +div.dynheader { + margin-top: 8px; + -webkit-touch-callout: none; + -webkit-user-select: none; + -khtml-user-select: none; + -moz-user-select: none; + -ms-user-select: none; + user-select: none; +} + +address { + font-style: normal; + color: #2A3D61; +} + +table.doxtable { + border-collapse:collapse; + margin-top: 4px; + margin-bottom: 4px; +} + +table.doxtable td, table.doxtable th { + border: 1px solid #2D4068; + padding: 3px 7px 2px; +} + +table.doxtable th { + background-color: #374F7F; + color: #FFFFFF; + font-size: 110%; + padding-bottom: 4px; + padding-top: 5px; +} + +table.fieldtable { + /*width: 100%;*/ + margin-bottom: 10px; + border: 1px solid #A8B8D9; + border-spacing: 0px; + -moz-border-radius: 4px; + -webkit-border-radius: 4px; + border-radius: 4px; + -moz-box-shadow: rgba(0, 0, 0, 0.15) 2px 2px 2px; + -webkit-box-shadow: 2px 2px 2px rgba(0, 0, 0, 0.15); + box-shadow: 2px 2px 2px rgba(0, 0, 0, 0.15); +} + +.fieldtable td, .fieldtable th { + padding: 3px 7px 2px; +} + +.fieldtable td.fieldtype, .fieldtable td.fieldname { + white-space: nowrap; + border-right: 1px solid #A8B8D9; + border-bottom: 1px solid #A8B8D9; + vertical-align: top; +} + +.fieldtable td.fieldname { + padding-top: 3px; +} + +.fieldtable td.fielddoc { + border-bottom: 1px solid #A8B8D9; + /*width: 100%;*/ +} + +.fieldtable td.fielddoc p:first-child { + margin-top: 0px; +} + +.fieldtable td.fielddoc p:last-child { + margin-bottom: 2px; +} + +.fieldtable tr:last-child td { + border-bottom: none; +} + +.fieldtable th { + background-image:url('nav_f.png'); + background-repeat:repeat-x; + background-color: #E2E8F2; + font-size: 90%; + color: #253555; + padding-bottom: 4px; + padding-top: 5px; + text-align:left; + -moz-border-radius-topleft: 4px; + -moz-border-radius-topright: 4px; + -webkit-border-top-left-radius: 4px; + -webkit-border-top-right-radius: 4px; + border-top-left-radius: 4px; + border-top-right-radius: 4px; + border-bottom: 1px solid #A8B8D9; +} + + +.tabsearch { + top: 0px; + left: 10px; + height: 36px; + background-image: url('tab_b.png'); + z-index: 101; + overflow: hidden; + font-size: 13px; +} + +.navpath ul +{ + font-size: 11px; + background-image:url('tab_b.png'); + background-repeat:repeat-x; + background-position: 0 -5px; + height:30px; + line-height:30px; + color:#8AA0CC; + border:solid 1px #C2CDE4; + overflow:hidden; + margin:0px; + padding:0px; +} + +.navpath li +{ + list-style-type:none; + float:left; + padding-left:10px; + padding-right:15px; + background-image:url('bc_s.png'); + background-repeat:no-repeat; + background-position:right; + color:#364D7C; +} + +.navpath li.navelem a +{ + height:32px; + display:block; + text-decoration: none; + outline: none; + color: #283A5D; + font-family: 'Lucida Grande',Geneva,Helvetica,Arial,sans-serif; + text-shadow: 0px 1px 1px rgba(255, 255, 255, 0.9); + text-decoration: none; +} + +.navpath li.navelem a:hover +{ + color:#6884BD; +} + +.navpath li.footer +{ + list-style-type:none; + float:right; + padding-left:10px; + padding-right:15px; + background-image:none; + background-repeat:no-repeat; + background-position:right; + color:#364D7C; + font-size: 8pt; +} + + +div.summary +{ + float: right; + font-size: 8pt; + padding-right: 5px; + width: 50%; + text-align: right; +} + +div.summary a +{ + white-space: nowrap; +} + +div.ingroups +{ + font-size: 8pt; + width: 50%; + text-align: left; +} + +div.ingroups a +{ + white-space: nowrap; +} + +div.header +{ + background-image:url('nav_h.png'); + background-repeat:repeat-x; + background-color: #F9FAFC; + margin: 0px; + border-bottom: 1px solid #C4CFE5; +} + +div.headertitle +{ + padding: 5px 5px 5px 10px; +} + +dl +{ + padding: 0 0 0 10px; +} + +/* dl.note, dl.warning, dl.attention, dl.pre, dl.post, dl.invariant, dl.deprecated, dl.todo, dl.test, dl.bug */ +dl.section +{ + margin-left: 0px; + padding-left: 0px; +} + +dl.note +{ + margin-left:-7px; + padding-left: 3px; + border-left:4px solid; + border-color: #D0C000; +} + +dl.warning, dl.attention +{ + margin-left:-7px; + padding-left: 3px; + border-left:4px solid; + border-color: #FF0000; +} + +dl.pre, dl.post, dl.invariant +{ + margin-left:-7px; + padding-left: 3px; + border-left:4px solid; + border-color: #00D000; +} + +dl.deprecated +{ + margin-left:-7px; + padding-left: 3px; + border-left:4px solid; + border-color: #505050; +} + +dl.todo +{ + margin-left:-7px; + padding-left: 3px; + border-left:4px solid; + border-color: #00C0E0; +} + +dl.test +{ + margin-left:-7px; + padding-left: 3px; + border-left:4px solid; + border-color: #3030E0; +} + +dl.bug +{ + margin-left:-7px; + padding-left: 3px; + border-left:4px solid; + border-color: #C08050; +} + +dl.section dd { + margin-bottom: 6px; +} + + +#projectlogo +{ + text-align: center; + vertical-align: bottom; + border-collapse: separate; +} + +#projectlogo img +{ + border: 0px none; +} + +#projectname +{ + font: 300% Tahoma, Arial,sans-serif; + margin: 0px; + padding: 2px 0px; +} + +#projectbrief +{ + font: 120% Tahoma, Arial,sans-serif; + margin: 0px; + padding: 0px; +} + +#projectnumber +{ + font: 50% Tahoma, Arial,sans-serif; + margin: 0px; + padding: 0px; +} + +#titlearea +{ + padding: 0px; + margin: 0px; + width: 100%; + border-bottom: 1px solid #5373B4; +} + +.image +{ + text-align: center; +} + +.dotgraph +{ + text-align: center; +} + +.mscgraph +{ + text-align: center; +} + +.diagraph +{ + text-align: center; +} + +.caption +{ + font-weight: bold; +} + +div.zoom +{ + border: 1px solid #90A5CE; +} + +dl.citelist { + margin-bottom:50px; +} + +dl.citelist dt { + color:#334975; + float:left; + font-weight:bold; + margin-right:10px; + padding:5px; +} + +dl.citelist dd { + margin:2px 0; + padding:5px 0; +} + +div.toc { + padding: 14px 25px; + background-color: #F4F6FA; + border: 1px solid #D8DFEE; + border-radius: 7px 7px 7px 7px; + float: right; + height: auto; + margin: 0 20px 10px 10px; + width: 200px; +} + +div.toc li { + background: url("bdwn.png") no-repeat scroll 0 5px transparent; + font: 10px/1.2 Verdana,DejaVu Sans,Geneva,sans-serif; + margin-top: 5px; + padding-left: 10px; + padding-top: 2px; +} + +div.toc h3 { + font: bold 12px/1.2 Arial,FreeSans,sans-serif; + color: #4665A2; + border-bottom: 0 none; + margin: 0; +} + +div.toc ul { + list-style: none outside none; + border: medium none; + padding: 0px; +} + +div.toc li.level1 { + margin-left: 0px; +} + +div.toc li.level2 { + margin-left: 15px; +} + +div.toc li.level3 { + margin-left: 30px; +} + +div.toc li.level4 { + margin-left: 45px; +} + +.inherit_header { + font-weight: bold; + color: gray; + cursor: pointer; + -webkit-touch-callout: none; + -webkit-user-select: none; + -khtml-user-select: none; + -moz-user-select: none; + -ms-user-select: none; + user-select: none; +} + +.inherit_header td { + padding: 6px 0px 2px 5px; +} + +.inherit { + display: none; +} + +tr.heading h2 { + margin-top: 12px; + margin-bottom: 4px; +} + +/* tooltip related style info */ + +.ttc { + position: absolute; + display: none; +} + +#powerTip { + cursor: default; + white-space: nowrap; + background-color: white; + border: 1px solid gray; + border-radius: 4px 4px 4px 4px; + box-shadow: 1px 1px 7px gray; + display: none; + font-size: smaller; + max-width: 80%; + opacity: 0.9; + padding: 1ex 1em 1em; + position: absolute; + z-index: 2147483647; +} + +#powerTip div.ttdoc { + color: grey; + font-style: italic; +} + +#powerTip div.ttname a { + font-weight: bold; +} + +#powerTip div.ttname { + font-weight: bold; +} + +#powerTip div.ttdeci { + color: #006318; +} + +#powerTip div { + margin: 0px; + padding: 0px; + font: 12px/16px Roboto,sans-serif; +} + +#powerTip:before, #powerTip:after { + content: ""; + position: absolute; + margin: 0px; +} + +#powerTip.n:after, #powerTip.n:before, +#powerTip.s:after, #powerTip.s:before, +#powerTip.w:after, #powerTip.w:before, +#powerTip.e:after, #powerTip.e:before, +#powerTip.ne:after, #powerTip.ne:before, +#powerTip.se:after, #powerTip.se:before, +#powerTip.nw:after, #powerTip.nw:before, +#powerTip.sw:after, #powerTip.sw:before { + border: solid transparent; + content: " "; + height: 0; + width: 0; + position: absolute; +} + +#powerTip.n:after, #powerTip.s:after, +#powerTip.w:after, #powerTip.e:after, +#powerTip.nw:after, #powerTip.ne:after, +#powerTip.sw:after, #powerTip.se:after { + border-color: rgba(255, 255, 255, 0); +} + +#powerTip.n:before, #powerTip.s:before, +#powerTip.w:before, #powerTip.e:before, +#powerTip.nw:before, #powerTip.ne:before, +#powerTip.sw:before, #powerTip.se:before { + border-color: rgba(128, 128, 128, 0); +} + +#powerTip.n:after, #powerTip.n:before, +#powerTip.ne:after, #powerTip.ne:before, +#powerTip.nw:after, #powerTip.nw:before { + top: 100%; +} + +#powerTip.n:after, #powerTip.ne:after, #powerTip.nw:after { + border-top-color: #ffffff; + border-width: 10px; + margin: 0px -10px; +} +#powerTip.n:before { + border-top-color: #808080; + border-width: 11px; + margin: 0px -11px; +} +#powerTip.n:after, #powerTip.n:before { + left: 50%; +} + +#powerTip.nw:after, #powerTip.nw:before { + right: 14px; +} + +#powerTip.ne:after, #powerTip.ne:before { + left: 14px; +} + +#powerTip.s:after, #powerTip.s:before, +#powerTip.se:after, #powerTip.se:before, +#powerTip.sw:after, #powerTip.sw:before { + bottom: 100%; +} + +#powerTip.s:after, #powerTip.se:after, #powerTip.sw:after { + border-bottom-color: #ffffff; + border-width: 10px; + margin: 0px -10px; +} + +#powerTip.s:before, #powerTip.se:before, #powerTip.sw:before { + border-bottom-color: #808080; + border-width: 11px; + margin: 0px -11px; +} + +#powerTip.s:after, #powerTip.s:before { + left: 50%; +} + +#powerTip.sw:after, #powerTip.sw:before { + right: 14px; +} + +#powerTip.se:after, #powerTip.se:before { + left: 14px; +} + +#powerTip.e:after, #powerTip.e:before { + left: 100%; +} +#powerTip.e:after { + border-left-color: #ffffff; + border-width: 10px; + top: 50%; + margin-top: -10px; +} +#powerTip.e:before { + border-left-color: #808080; + border-width: 11px; + top: 50%; + margin-top: -11px; +} + +#powerTip.w:after, #powerTip.w:before { + right: 100%; +} +#powerTip.w:after { + border-right-color: #ffffff; + border-width: 10px; + top: 50%; + margin-top: -10px; +} +#powerTip.w:before { + border-right-color: #808080; + border-width: 11px; + top: 50%; + margin-top: -11px; +} + +@media print +{ + #top { display: none; } + #side-nav { display: none; } + #nav-path { display: none; } + body { overflow:visible; } + h1, h2, h3, h4, h5, h6 { page-break-after: avoid; } + .summary { display: none; } + .memitem { page-break-inside: avoid; } + #doc-content + { + margin-left:0 !important; + height:auto !important; + width:auto !important; + overflow:inherit; + display:inline; + } +} + diff --git a/docs/doc_header.html b/docs/doc_header.html new file mode 100644 index 0000000..aad2db5 --- /dev/null +++ b/docs/doc_header.html @@ -0,0 +1,47 @@ + + + + + + + +$projectname: $title +$title + + + +$treeview +$search +$mathjax + +$extrastylesheet + + +
+ + +
+ + + + + + + + + + + + + + + + + + +
+
$projectbrief
+
$searchbox
+
+ + diff --git a/docs/slapi.doxy.in b/docs/slapi.doxy.in new file mode 100644 index 0000000..5e0efe6 --- /dev/null +++ b/docs/slapi.doxy.in @@ -0,0 +1,2602 @@ +# Doxyfile 1.9.1 + +# This file describes the settings to be used by the documentation system +# doxygen (www.doxygen.org) for a project. +# +# All text after a double hash (##) is considered a comment and is placed in +# front of the TAG it is preceding. +# +# All text after a single hash (#) is considered a comment and will be ignored. +# The format is: +# TAG = value [value, ...] +# For lists, items can also be appended using: +# TAG += value [value, ...] +# Values that contain spaces should be placed between quotes (\" \"). + +#--------------------------------------------------------------------------- +# Project related configuration options +#--------------------------------------------------------------------------- + +# This tag specifies the encoding used for all characters in the configuration +# file that follow. The default is UTF-8 which is also the encoding used for all +# text before the first occurrence of this tag. Doxygen uses libiconv (or the +# iconv built into libc) for the transcoding. See +# https://www.gnu.org/software/libiconv/ for the list of possible encodings. +# The default value is: UTF-8. + +DOXYFILE_ENCODING = UTF-8 + +# The PROJECT_NAME tag is a single word (or a sequence of words surrounded by +# double-quotes, unless you are using Doxywizard) that should identify the +# project for which the documentation is generated. This name is used in the +# title of most generated pages and in a few other places. +# The default value is: My Project. + +PROJECT_NAME = @PACKAGE_NAME@ + +# The PROJECT_NUMBER tag can be used to enter a project or revision number. This +# could be handy for archiving the generated documentation or if some version +# control system is used. + +PROJECT_NUMBER = @PACKAGE_VERSION@ + +# Using the PROJECT_BRIEF tag one can provide an optional one line description +# for a project that appears at the top of each page and should give viewer a +# quick idea about the purpose of the project. Keep the description short. + +PROJECT_BRIEF = + +# With the PROJECT_LOGO tag one can specify a logo or an icon that is included +# in the documentation. The maximum height of the logo should not exceed 55 +# pixels and the maximum width should not exceed 200 pixels. Doxygen will copy +# the logo to the output directory. + +PROJECT_LOGO = + +# The OUTPUT_DIRECTORY tag is used to specify the (relative or absolute) path +# into which the generated documentation will be written. If a relative path is +# entered, it will be relative to the location where doxygen was started. If +# left blank the current directory will be used. + +OUTPUT_DIRECTORY = @abs_top_builddir@ + +# If the CREATE_SUBDIRS tag is set to YES then doxygen will create 4096 sub- +# directories (in 2 levels) under the output directory of each output format and +# will distribute the generated files over these directories. Enabling this +# option can be useful when feeding doxygen a huge amount of source files, where +# putting all generated files in the same directory would otherwise causes +# performance problems for the file system. +# The default value is: NO. + +CREATE_SUBDIRS = NO + +# If the ALLOW_UNICODE_NAMES tag is set to YES, doxygen will allow non-ASCII +# characters to appear in the names of generated files. If set to NO, non-ASCII +# characters will be escaped, for example _xE3_x81_x84 will be used for Unicode +# U+3044. +# The default value is: NO. + +ALLOW_UNICODE_NAMES = NO + +# The OUTPUT_LANGUAGE tag is used to specify the language in which all +# documentation generated by doxygen is written. Doxygen will use this +# information to generate all constant output in the proper language. +# Possible values are: Afrikaans, Arabic, Armenian, Brazilian, Catalan, Chinese, +# Chinese-Traditional, Croatian, Czech, Danish, Dutch, English (United States), +# Esperanto, Farsi (Persian), Finnish, French, German, Greek, Hungarian, +# Indonesian, Italian, Japanese, Japanese-en (Japanese with English messages), +# Korean, Korean-en (Korean with English messages), Latvian, Lithuanian, +# Macedonian, Norwegian, Persian (Farsi), Polish, Portuguese, Romanian, Russian, +# Serbian, Serbian-Cyrillic, Slovak, Slovene, Spanish, Swedish, Turkish, +# Ukrainian and Vietnamese. +# The default value is: English. + +OUTPUT_LANGUAGE = English + +# The OUTPUT_TEXT_DIRECTION tag is used to specify the direction in which all +# documentation generated by doxygen is written. Doxygen will use this +# information to generate all generated output in the proper direction. +# Possible values are: None, LTR, RTL and Context. +# The default value is: None. + +OUTPUT_TEXT_DIRECTION = None + +# If the BRIEF_MEMBER_DESC tag is set to YES, doxygen will include brief member +# descriptions after the members that are listed in the file and class +# documentation (similar to Javadoc). Set to NO to disable this. +# The default value is: YES. + +BRIEF_MEMBER_DESC = YES + +# If the REPEAT_BRIEF tag is set to YES, doxygen will prepend the brief +# description of a member or function before the detailed description +# +# Note: If both HIDE_UNDOC_MEMBERS and BRIEF_MEMBER_DESC are set to NO, the +# brief descriptions will be completely suppressed. +# The default value is: YES. + +REPEAT_BRIEF = YES + +# This tag implements a quasi-intelligent brief description abbreviator that is +# used to form the text in various listings. Each string in this list, if found +# as the leading text of the brief description, will be stripped from the text +# and the result, after processing the whole list, is used as the annotated +# text. Otherwise, the brief description is used as-is. If left blank, the +# following values are used ($name is automatically replaced with the name of +# the entity):The $name class, The $name widget, The $name file, is, provides, +# specifies, contains, represents, a, an and the. + +ABBREVIATE_BRIEF = + +# If the ALWAYS_DETAILED_SEC and REPEAT_BRIEF tags are both set to YES then +# doxygen will generate a detailed section even if there is only a brief +# description. +# The default value is: NO. + +ALWAYS_DETAILED_SEC = NO + +# If the INLINE_INHERITED_MEMB tag is set to YES, doxygen will show all +# inherited members of a class in the documentation of that class as if those +# members were ordinary class members. Constructors, destructors and assignment +# operators of the base classes will not be shown. +# The default value is: NO. + +INLINE_INHERITED_MEMB = NO + +# If the FULL_PATH_NAMES tag is set to YES, doxygen will prepend the full path +# before files name in the file list and in the header files. If set to NO the +# shortest path that makes the file name unique will be used +# The default value is: YES. + +FULL_PATH_NAMES = NO + +# The STRIP_FROM_PATH tag can be used to strip a user-defined part of the path. +# Stripping is only done if one of the specified strings matches the left-hand +# part of the path. The tag can be used to show relative paths in the file list. +# If left blank the directory from which doxygen is run is used as the path to +# strip. +# +# Note that you can specify absolute paths here, but also relative paths, which +# will be relative from the directory where doxygen is started. +# This tag requires that the tag FULL_PATH_NAMES is set to YES. + +STRIP_FROM_PATH = + +# The STRIP_FROM_INC_PATH tag can be used to strip a user-defined part of the +# path mentioned in the documentation of a class, which tells the reader which +# header file to include in order to use a class. If left blank only the name of +# the header file containing the class definition is used. Otherwise one should +# specify the list of include paths that are normally passed to the compiler +# using the -I flag. + +STRIP_FROM_INC_PATH = + +# If the SHORT_NAMES tag is set to YES, doxygen will generate much shorter (but +# less readable) file names. This can be useful is your file systems doesn't +# support long names like on DOS, Mac, or CD-ROM. +# The default value is: NO. + +SHORT_NAMES = NO + +# If the JAVADOC_AUTOBRIEF tag is set to YES then doxygen will interpret the +# first line (until the first dot) of a Javadoc-style comment as the brief +# description. If set to NO, the Javadoc-style will behave just like regular Qt- +# style comments (thus requiring an explicit @brief command for a brief +# description.) +# The default value is: NO. + +JAVADOC_AUTOBRIEF = NO + +# If the JAVADOC_BANNER tag is set to YES then doxygen will interpret a line +# such as +# /*************** +# as being the beginning of a Javadoc-style comment "banner". If set to NO, the +# Javadoc-style will behave just like regular comments and it will not be +# interpreted by doxygen. +# The default value is: NO. + +JAVADOC_BANNER = NO + +# If the QT_AUTOBRIEF tag is set to YES then doxygen will interpret the first +# line (until the first dot) of a Qt-style comment as the brief description. If +# set to NO, the Qt-style will behave just like regular Qt-style comments (thus +# requiring an explicit \brief command for a brief description.) +# The default value is: NO. + +QT_AUTOBRIEF = NO + +# The MULTILINE_CPP_IS_BRIEF tag can be set to YES to make doxygen treat a +# multi-line C++ special comment block (i.e. a block of //! or /// comments) as +# a brief description. This used to be the default behavior. The new default is +# to treat a multi-line C++ comment block as a detailed description. Set this +# tag to YES if you prefer the old behavior instead. +# +# Note that setting this tag to YES also means that rational rose comments are +# not recognized any more. +# The default value is: NO. + +MULTILINE_CPP_IS_BRIEF = NO + +# By default Python docstrings are displayed as preformatted text and doxygen's +# special commands cannot be used. By setting PYTHON_DOCSTRING to NO the +# doxygen's special commands can be used and the contents of the docstring +# documentation blocks is shown as doxygen documentation. +# The default value is: YES. + +PYTHON_DOCSTRING = YES + +# If the INHERIT_DOCS tag is set to YES then an undocumented member inherits the +# documentation from any documented member that it re-implements. +# The default value is: YES. + +INHERIT_DOCS = YES + +# If the SEPARATE_MEMBER_PAGES tag is set to YES then doxygen will produce a new +# page for each member. If set to NO, the documentation of a member will be part +# of the file/class/namespace that contains it. +# The default value is: NO. + +SEPARATE_MEMBER_PAGES = NO + +# The TAB_SIZE tag can be used to set the number of spaces in a tab. Doxygen +# uses this value to replace tabs by spaces in code fragments. +# Minimum value: 1, maximum value: 16, default value: 4. + +TAB_SIZE = 4 + +# This tag can be used to specify a number of aliases that act as commands in +# the documentation. An alias has the form: +# name=value +# For example adding +# "sideeffect=@par Side Effects:\n" +# will allow you to put the command \sideeffect (or @sideeffect) in the +# documentation, which will result in a user-defined paragraph with heading +# "Side Effects:". You can put \n's in the value part of an alias to insert +# newlines (in the resulting output). You can put ^^ in the value part of an +# alias to insert a newline as if a physical newline was in the original file. +# When you need a literal { or } or , in the value part of an alias you have to +# escape them by means of a backslash (\), this can lead to conflicts with the +# commands \{ and \} for these it is advised to use the version @{ and @} or use +# a double escape (\\{ and \\}) + +ALIASES = + +# Set the OPTIMIZE_OUTPUT_FOR_C tag to YES if your project consists of C sources +# only. Doxygen will then generate output that is more tailored for C. For +# instance, some of the names that are used will be different. The list of all +# members will be omitted, etc. +# The default value is: NO. + +OPTIMIZE_OUTPUT_FOR_C = YES + +# Set the OPTIMIZE_OUTPUT_JAVA tag to YES if your project consists of Java or +# Python sources only. Doxygen will then generate output that is more tailored +# for that language. For instance, namespaces will be presented as packages, +# qualified scopes will look different, etc. +# The default value is: NO. + +OPTIMIZE_OUTPUT_JAVA = NO + +# Set the OPTIMIZE_FOR_FORTRAN tag to YES if your project consists of Fortran +# sources. Doxygen will then generate output that is tailored for Fortran. +# The default value is: NO. + +OPTIMIZE_FOR_FORTRAN = NO + +# Set the OPTIMIZE_OUTPUT_VHDL tag to YES if your project consists of VHDL +# sources. Doxygen will then generate output that is tailored for VHDL. +# The default value is: NO. + +OPTIMIZE_OUTPUT_VHDL = NO + +# Set the OPTIMIZE_OUTPUT_SLICE tag to YES if your project consists of Slice +# sources only. Doxygen will then generate output that is more tailored for that +# language. For instance, namespaces will be presented as modules, types will be +# separated into more groups, etc. +# The default value is: NO. + +OPTIMIZE_OUTPUT_SLICE = NO + +# Doxygen selects the parser to use depending on the extension of the files it +# parses. With this tag you can assign which parser to use for a given +# extension. Doxygen has a built-in mapping, but you can override or extend it +# using this tag. The format is ext=language, where ext is a file extension, and +# language is one of the parsers supported by doxygen: IDL, Java, JavaScript, +# Csharp (C#), C, C++, D, PHP, md (Markdown), Objective-C, Python, Slice, VHDL, +# Fortran (fixed format Fortran: FortranFixed, free formatted Fortran: +# FortranFree, unknown formatted Fortran: Fortran. In the later case the parser +# tries to guess whether the code is fixed or free formatted code, this is the +# default for Fortran type files). For instance to make doxygen treat .inc files +# as Fortran files (default is PHP), and .f files as C (default is Fortran), +# use: inc=Fortran f=C. +# +# Note: For files without extension you can use no_extension as a placeholder. +# +# Note that for custom extensions you also need to set FILE_PATTERNS otherwise +# the files are not read by doxygen. When specifying no_extension you should add +# * to the FILE_PATTERNS. +# +# Note see also the list of default file extension mappings. + +EXTENSION_MAPPING = + +# If the MARKDOWN_SUPPORT tag is enabled then doxygen pre-processes all comments +# according to the Markdown format, which allows for more readable +# documentation. See https://daringfireball.net/projects/markdown/ for details. +# The output of markdown processing is further processed by doxygen, so you can +# mix doxygen, HTML, and XML commands with Markdown formatting. Disable only in +# case of backward compatibilities issues. +# The default value is: YES. + +MARKDOWN_SUPPORT = YES + +# When the TOC_INCLUDE_HEADINGS tag is set to a non-zero value, all headings up +# to that level are automatically included in the table of contents, even if +# they do not have an id attribute. +# Note: This feature currently applies only to Markdown headings. +# Minimum value: 0, maximum value: 99, default value: 5. +# This tag requires that the tag MARKDOWN_SUPPORT is set to YES. + +TOC_INCLUDE_HEADINGS = 5 + +# When enabled doxygen tries to link words that correspond to documented +# classes, or namespaces to their corresponding documentation. Such a link can +# be prevented in individual cases by putting a % sign in front of the word or +# globally by setting AUTOLINK_SUPPORT to NO. +# The default value is: YES. + +AUTOLINK_SUPPORT = YES + +# If you use STL classes (i.e. std::string, std::vector, etc.) but do not want +# to include (a tag file for) the STL sources as input, then you should set this +# tag to YES in order to let doxygen match functions declarations and +# definitions whose arguments contain STL classes (e.g. func(std::string); +# versus func(std::string) {}). This also make the inheritance and collaboration +# diagrams that involve STL classes more complete and accurate. +# The default value is: NO. + +BUILTIN_STL_SUPPORT = NO + +# If you use Microsoft's C++/CLI language, you should set this option to YES to +# enable parsing support. +# The default value is: NO. + +CPP_CLI_SUPPORT = NO + +# Set the SIP_SUPPORT tag to YES if your project consists of sip (see: +# https://www.riverbankcomputing.com/software/sip/intro) sources only. Doxygen +# will parse them like normal C++ but will assume all classes use public instead +# of private inheritance when no explicit protection keyword is present. +# The default value is: NO. + +SIP_SUPPORT = NO + +# For Microsoft's IDL there are propget and propput attributes to indicate +# getter and setter methods for a property. Setting this option to YES will make +# doxygen to replace the get and set methods by a property in the documentation. +# This will only work if the methods are indeed getting or setting a simple +# type. If this is not the case, or you want to show the methods anyway, you +# should set this option to NO. +# The default value is: YES. + +IDL_PROPERTY_SUPPORT = YES + +# If member grouping is used in the documentation and the DISTRIBUTE_GROUP_DOC +# tag is set to YES then doxygen will reuse the documentation of the first +# member in the group (if any) for the other members of the group. By default +# all members of a group must be documented explicitly. +# The default value is: NO. + +DISTRIBUTE_GROUP_DOC = NO + +# If one adds a struct or class to a group and this option is enabled, then also +# any nested class or struct is added to the same group. By default this option +# is disabled and one has to add nested compounds explicitly via \ingroup. +# The default value is: NO. + +GROUP_NESTED_COMPOUNDS = NO + +# Set the SUBGROUPING tag to YES to allow class member groups of the same type +# (for instance a group of public functions) to be put as a subgroup of that +# type (e.g. under the Public Functions section). Set it to NO to prevent +# subgrouping. Alternatively, this can be done per class using the +# \nosubgrouping command. +# The default value is: YES. + +SUBGROUPING = YES + +# When the INLINE_GROUPED_CLASSES tag is set to YES, classes, structs and unions +# are shown inside the group in which they are included (e.g. using \ingroup) +# instead of on a separate page (for HTML and Man pages) or section (for LaTeX +# and RTF). +# +# Note that this feature does not work in combination with +# SEPARATE_MEMBER_PAGES. +# The default value is: NO. + +INLINE_GROUPED_CLASSES = NO + +# When the INLINE_SIMPLE_STRUCTS tag is set to YES, structs, classes, and unions +# with only public data fields or simple typedef fields will be shown inline in +# the documentation of the scope in which they are defined (i.e. file, +# namespace, or group documentation), provided this scope is documented. If set +# to NO, structs, classes, and unions are shown on a separate page (for HTML and +# Man pages) or section (for LaTeX and RTF). +# The default value is: NO. + +INLINE_SIMPLE_STRUCTS = NO + +# When TYPEDEF_HIDES_STRUCT tag is enabled, a typedef of a struct, union, or +# enum is documented as struct, union, or enum with the name of the typedef. So +# typedef struct TypeS {} TypeT, will appear in the documentation as a struct +# with name TypeT. When disabled the typedef will appear as a member of a file, +# namespace, or class. And the struct will be named TypeS. This can typically be +# useful for C code in case the coding convention dictates that all compound +# types are typedef'ed and only the typedef is referenced, never the tag name. +# The default value is: NO. + +TYPEDEF_HIDES_STRUCT = YES + +# The size of the symbol lookup cache can be set using LOOKUP_CACHE_SIZE. This +# cache is used to resolve symbols given their name and scope. Since this can be +# an expensive process and often the same symbol appears multiple times in the +# code, doxygen keeps a cache of pre-resolved symbols. If the cache is too small +# doxygen will become slower. If the cache is too large, memory is wasted. The +# cache size is given by this formula: 2^(16+LOOKUP_CACHE_SIZE). The valid range +# is 0..9, the default is 0, corresponding to a cache size of 2^16=65536 +# symbols. At the end of a run doxygen will report the cache usage and suggest +# the optimal cache size from a speed point of view. +# Minimum value: 0, maximum value: 9, default value: 0. + +LOOKUP_CACHE_SIZE = 0 + +# The NUM_PROC_THREADS specifies the number threads doxygen is allowed to use +# during processing. When set to 0 doxygen will based this on the number of +# cores available in the system. You can set it explicitly to a value larger +# than 0 to get more control over the balance between CPU load and processing +# speed. At this moment only the input processing can be done using multiple +# threads. Since this is still an experimental feature the default is set to 1, +# which efficively disables parallel processing. Please report any issues you +# encounter. Generating dot graphs in parallel is controlled by the +# DOT_NUM_THREADS setting. +# Minimum value: 0, maximum value: 32, default value: 1. + +NUM_PROC_THREADS = 1 + +#--------------------------------------------------------------------------- +# Build related configuration options +#--------------------------------------------------------------------------- + +# If the EXTRACT_ALL tag is set to YES, doxygen will assume all entities in +# documentation are documented, even if no documentation was available. Private +# class members and static file members will be hidden unless the +# EXTRACT_PRIVATE respectively EXTRACT_STATIC tags are set to YES. +# Note: This will also disable the warnings about undocumented members that are +# normally produced when WARNINGS is set to YES. +# The default value is: NO. + +EXTRACT_ALL = NO + +# If the EXTRACT_PRIVATE tag is set to YES, all private members of a class will +# be included in the documentation. +# The default value is: NO. + +EXTRACT_PRIVATE = NO + +# If the EXTRACT_PRIV_VIRTUAL tag is set to YES, documented private virtual +# methods of a class will be included in the documentation. +# The default value is: NO. + +EXTRACT_PRIV_VIRTUAL = NO + +# If the EXTRACT_PACKAGE tag is set to YES, all members with package or internal +# scope will be included in the documentation. +# The default value is: NO. + +EXTRACT_PACKAGE = NO + +# If the EXTRACT_STATIC tag is set to YES, all static members of a file will be +# included in the documentation. +# The default value is: NO. + +EXTRACT_STATIC = NO + +# If the EXTRACT_LOCAL_CLASSES tag is set to YES, classes (and structs) defined +# locally in source files will be included in the documentation. If set to NO, +# only classes defined in header files are included. Does not have any effect +# for Java sources. +# The default value is: YES. + +EXTRACT_LOCAL_CLASSES = YES + +# This flag is only useful for Objective-C code. If set to YES, local methods, +# which are defined in the implementation section but not in the interface are +# included in the documentation. If set to NO, only methods in the interface are +# included. +# The default value is: NO. + +EXTRACT_LOCAL_METHODS = NO + +# If this flag is set to YES, the members of anonymous namespaces will be +# extracted and appear in the documentation as a namespace called +# 'anonymous_namespace{file}', where file will be replaced with the base name of +# the file that contains the anonymous namespace. By default anonymous namespace +# are hidden. +# The default value is: NO. + +EXTRACT_ANON_NSPACES = NO + +# If this flag is set to YES, the name of an unnamed parameter in a declaration +# will be determined by the corresponding definition. By default unnamed +# parameters remain unnamed in the output. +# The default value is: YES. + +RESOLVE_UNNAMED_PARAMS = YES + +# If the HIDE_UNDOC_MEMBERS tag is set to YES, doxygen will hide all +# undocumented members inside documented classes or files. If set to NO these +# members will be included in the various overviews, but no documentation +# section is generated. This option has no effect if EXTRACT_ALL is enabled. +# The default value is: NO. + +HIDE_UNDOC_MEMBERS = NO + +# If the HIDE_UNDOC_CLASSES tag is set to YES, doxygen will hide all +# undocumented classes that are normally visible in the class hierarchy. If set +# to NO, these classes will be included in the various overviews. This option +# has no effect if EXTRACT_ALL is enabled. +# The default value is: NO. + +HIDE_UNDOC_CLASSES = NO + +# If the HIDE_FRIEND_COMPOUNDS tag is set to YES, doxygen will hide all friend +# declarations. If set to NO, these declarations will be included in the +# documentation. +# The default value is: NO. + +HIDE_FRIEND_COMPOUNDS = NO + +# If the HIDE_IN_BODY_DOCS tag is set to YES, doxygen will hide any +# documentation blocks found inside the body of a function. If set to NO, these +# blocks will be appended to the function's detailed documentation block. +# The default value is: NO. + +HIDE_IN_BODY_DOCS = NO + +# The INTERNAL_DOCS tag determines if documentation that is typed after a +# \internal command is included. If the tag is set to NO then the documentation +# will be excluded. Set it to YES to include the internal documentation. +# The default value is: NO. + +INTERNAL_DOCS = NO + +# With the correct setting of option CASE_SENSE_NAMES doxygen will better be +# able to match the capabilities of the underlying filesystem. In case the +# filesystem is case sensitive (i.e. it supports files in the same directory +# whose names only differ in casing), the option must be set to YES to properly +# deal with such files in case they appear in the input. For filesystems that +# are not case sensitive the option should be be set to NO to properly deal with +# output files written for symbols that only differ in casing, such as for two +# classes, one named CLASS and the other named Class, and to also support +# references to files without having to specify the exact matching casing. On +# Windows (including Cygwin) and MacOS, users should typically set this option +# to NO, whereas on Linux or other Unix flavors it should typically be set to +# YES. +# The default value is: system dependent. + +CASE_SENSE_NAMES = YES + +# If the HIDE_SCOPE_NAMES tag is set to NO then doxygen will show members with +# their full class and namespace scopes in the documentation. If set to YES, the +# scope will be hidden. +# The default value is: NO. + +HIDE_SCOPE_NAMES = NO + +# If the HIDE_COMPOUND_REFERENCE tag is set to NO (default) then doxygen will +# append additional text to a page's title, such as Class Reference. If set to +# YES the compound reference will be hidden. +# The default value is: NO. + +HIDE_COMPOUND_REFERENCE= NO + +# If the SHOW_INCLUDE_FILES tag is set to YES then doxygen will put a list of +# the files that are included by a file in the documentation of that file. +# The default value is: YES. + +SHOW_INCLUDE_FILES = YES + +# If the SHOW_GROUPED_MEMB_INC tag is set to YES then Doxygen will add for each +# grouped member an include statement to the documentation, telling the reader +# which file to include in order to use the member. +# The default value is: NO. + +SHOW_GROUPED_MEMB_INC = NO + +# If the FORCE_LOCAL_INCLUDES tag is set to YES then doxygen will list include +# files with double quotes in the documentation rather than with sharp brackets. +# The default value is: NO. + +FORCE_LOCAL_INCLUDES = NO + +# If the INLINE_INFO tag is set to YES then a tag [inline] is inserted in the +# documentation for inline members. +# The default value is: YES. + +INLINE_INFO = YES + +# If the SORT_MEMBER_DOCS tag is set to YES then doxygen will sort the +# (detailed) documentation of file and class members alphabetically by member +# name. If set to NO, the members will appear in declaration order. +# The default value is: YES. + +SORT_MEMBER_DOCS = YES + +# If the SORT_BRIEF_DOCS tag is set to YES then doxygen will sort the brief +# descriptions of file, namespace and class members alphabetically by member +# name. If set to NO, the members will appear in declaration order. Note that +# this will also influence the order of the classes in the class list. +# The default value is: NO. + +SORT_BRIEF_DOCS = NO + +# If the SORT_MEMBERS_CTORS_1ST tag is set to YES then doxygen will sort the +# (brief and detailed) documentation of class members so that constructors and +# destructors are listed first. If set to NO the constructors will appear in the +# respective orders defined by SORT_BRIEF_DOCS and SORT_MEMBER_DOCS. +# Note: If SORT_BRIEF_DOCS is set to NO this option is ignored for sorting brief +# member documentation. +# Note: If SORT_MEMBER_DOCS is set to NO this option is ignored for sorting +# detailed member documentation. +# The default value is: NO. + +SORT_MEMBERS_CTORS_1ST = NO + +# If the SORT_GROUP_NAMES tag is set to YES then doxygen will sort the hierarchy +# of group names into alphabetical order. If set to NO the group names will +# appear in their defined order. +# The default value is: NO. + +SORT_GROUP_NAMES = NO + +# If the SORT_BY_SCOPE_NAME tag is set to YES, the class list will be sorted by +# fully-qualified names, including namespaces. If set to NO, the class list will +# be sorted only by class name, not including the namespace part. +# Note: This option is not very useful if HIDE_SCOPE_NAMES is set to YES. +# Note: This option applies only to the class list, not to the alphabetical +# list. +# The default value is: NO. + +SORT_BY_SCOPE_NAME = NO + +# If the STRICT_PROTO_MATCHING option is enabled and doxygen fails to do proper +# type resolution of all parameters of a function it will reject a match between +# the prototype and the implementation of a member function even if there is +# only one candidate or it is obvious which candidate to choose by doing a +# simple string match. By disabling STRICT_PROTO_MATCHING doxygen will still +# accept a match between prototype and implementation in such cases. +# The default value is: NO. + +STRICT_PROTO_MATCHING = NO + +# The GENERATE_TODOLIST tag can be used to enable (YES) or disable (NO) the todo +# list. This list is created by putting \todo commands in the documentation. +# The default value is: YES. + +GENERATE_TODOLIST = YES + +# The GENERATE_TESTLIST tag can be used to enable (YES) or disable (NO) the test +# list. This list is created by putting \test commands in the documentation. +# The default value is: YES. + +GENERATE_TESTLIST = YES + +# The GENERATE_BUGLIST tag can be used to enable (YES) or disable (NO) the bug +# list. This list is created by putting \bug commands in the documentation. +# The default value is: YES. + +GENERATE_BUGLIST = YES + +# The GENERATE_DEPRECATEDLIST tag can be used to enable (YES) or disable (NO) +# the deprecated list. This list is created by putting \deprecated commands in +# the documentation. +# The default value is: YES. + +GENERATE_DEPRECATEDLIST= YES + +# The ENABLED_SECTIONS tag can be used to enable conditional documentation +# sections, marked by \if ... \endif and \cond +# ... \endcond blocks. + +ENABLED_SECTIONS = + +# The MAX_INITIALIZER_LINES tag determines the maximum number of lines that the +# initial value of a variable or macro / define can have for it to appear in the +# documentation. If the initializer consists of more lines than specified here +# it will be hidden. Use a value of 0 to hide initializers completely. The +# appearance of the value of individual variables and macros / defines can be +# controlled using \showinitializer or \hideinitializer command in the +# documentation regardless of this setting. +# Minimum value: 0, maximum value: 10000, default value: 30. + +MAX_INITIALIZER_LINES = 30 + +# Set the SHOW_USED_FILES tag to NO to disable the list of files generated at +# the bottom of the documentation of classes and structs. If set to YES, the +# list will mention the files that were used to generate the documentation. +# The default value is: YES. + +SHOW_USED_FILES = NO + +# Set the SHOW_FILES tag to NO to disable the generation of the Files page. This +# will remove the Files entry from the Quick Index and from the Folder Tree View +# (if specified). +# The default value is: YES. + +SHOW_FILES = YES + +# Set the SHOW_NAMESPACES tag to NO to disable the generation of the Namespaces +# page. This will remove the Namespaces entry from the Quick Index and from the +# Folder Tree View (if specified). +# The default value is: YES. + +SHOW_NAMESPACES = NO + +# The FILE_VERSION_FILTER tag can be used to specify a program or script that +# doxygen should invoke to get the current version for each file (typically from +# the version control system). Doxygen will invoke the program by executing (via +# popen()) the command command input-file, where command is the value of the +# FILE_VERSION_FILTER tag, and input-file is the name of an input file provided +# by doxygen. Whatever the program writes to standard output is used as the file +# version. For an example see the documentation. + +FILE_VERSION_FILTER = + +# The LAYOUT_FILE tag can be used to specify a layout file which will be parsed +# by doxygen. The layout file controls the global structure of the generated +# output files in an output format independent way. To create the layout file +# that represents doxygen's defaults, run doxygen with the -l option. You can +# optionally specify a file name after the option, if omitted DoxygenLayout.xml +# will be used as the name of the layout file. +# +# Note that if you run doxygen from a directory containing a file called +# DoxygenLayout.xml, doxygen will parse it automatically even if the LAYOUT_FILE +# tag is left empty. + +LAYOUT_FILE = + +# The CITE_BIB_FILES tag can be used to specify one or more bib files containing +# the reference definitions. This must be a list of .bib files. The .bib +# extension is automatically appended if omitted. This requires the bibtex tool +# to be installed. See also https://en.wikipedia.org/wiki/BibTeX for more info. +# For LaTeX the style of the bibliography can be controlled using +# LATEX_BIB_STYLE. To use this feature you need bibtex and perl available in the +# search path. See also \cite for info how to create references. + +CITE_BIB_FILES = + +#--------------------------------------------------------------------------- +# Configuration options related to warning and progress messages +#--------------------------------------------------------------------------- + +# The QUIET tag can be used to turn on/off the messages that are generated to +# standard output by doxygen. If QUIET is set to YES this implies that the +# messages are off. +# The default value is: NO. + +QUIET = NO + +# The WARNINGS tag can be used to turn on/off the warning messages that are +# generated to standard error (stderr) by doxygen. If WARNINGS is set to YES +# this implies that the warnings are on. +# +# Tip: Turn warnings on while writing the documentation. +# The default value is: YES. + +WARNINGS = YES + +# If the WARN_IF_UNDOCUMENTED tag is set to YES then doxygen will generate +# warnings for undocumented members. If EXTRACT_ALL is set to YES then this flag +# will automatically be disabled. +# The default value is: YES. + +WARN_IF_UNDOCUMENTED = YES + +# If the WARN_IF_DOC_ERROR tag is set to YES, doxygen will generate warnings for +# potential errors in the documentation, such as not documenting some parameters +# in a documented function, or documenting parameters that don't exist or using +# markup commands wrongly. +# The default value is: YES. + +WARN_IF_DOC_ERROR = YES + +# This WARN_NO_PARAMDOC option can be enabled to get warnings for functions that +# are documented, but have no documentation for their parameters or return +# value. If set to NO, doxygen will only warn about wrong or incomplete +# parameter documentation, but not about the absence of documentation. If +# EXTRACT_ALL is set to YES then this flag will automatically be disabled. +# The default value is: NO. + +WARN_NO_PARAMDOC = NO + +# If the WARN_AS_ERROR tag is set to YES then doxygen will immediately stop when +# a warning is encountered. If the WARN_AS_ERROR tag is set to FAIL_ON_WARNINGS +# then doxygen will continue running as if WARN_AS_ERROR tag is set to NO, but +# at the end of the doxygen process doxygen will return with a non-zero status. +# Possible values are: NO, YES and FAIL_ON_WARNINGS. +# The default value is: NO. + +WARN_AS_ERROR = NO + +# The WARN_FORMAT tag determines the format of the warning messages that doxygen +# can produce. The string should contain the $file, $line, and $text tags, which +# will be replaced by the file and line number from which the warning originated +# and the warning text. Optionally the format may contain $version, which will +# be replaced by the version of the file (if it could be obtained via +# FILE_VERSION_FILTER) +# The default value is: $file:$line: $text. + +WARN_FORMAT = "$file:$line: $text" + +# The WARN_LOGFILE tag can be used to specify a file to which warning and error +# messages should be written. If left blank the output is written to standard +# error (stderr). + +WARN_LOGFILE = + +#--------------------------------------------------------------------------- +# Configuration options related to the input files +#--------------------------------------------------------------------------- + +# The INPUT tag is used to specify the files and/or directories that contain +# documented source files. You may enter file names like myfile.cpp or +# directories like /usr/src/myproject. Separate the files or directories with +# spaces. See also FILE_PATTERNS and EXTENSION_MAPPING +# Note: If this tag is empty the current directory is searched. + +INPUT = src/libsds/include/sds.h + +# This tag can be used to specify the character encoding of the source files +# that doxygen parses. Internally doxygen uses the UTF-8 encoding. Doxygen uses +# libiconv (or the iconv built into libc) for the transcoding. See the libiconv +# documentation (see: +# https://www.gnu.org/software/libiconv/) for the list of possible encodings. +# The default value is: UTF-8. + +INPUT_ENCODING = UTF-8 + +# If the value of the INPUT tag contains directories, you can use the +# FILE_PATTERNS tag to specify one or more wildcard patterns (like *.cpp and +# *.h) to filter out the source-files in the directories. +# +# Note that for custom extensions or not directly supported extensions you also +# need to set EXTENSION_MAPPING for the extension otherwise the files are not +# read by doxygen. +# +# Note the list of default checked file patterns might differ from the list of +# default file extension mappings. +# +# If left blank the following patterns are tested:*.c, *.cc, *.cxx, *.cpp, +# *.c++, *.java, *.ii, *.ixx, *.ipp, *.i++, *.inl, *.idl, *.ddl, *.odl, *.h, +# *.hh, *.hxx, *.hpp, *.h++, *.cs, *.d, *.php, *.php4, *.php5, *.phtml, *.inc, +# *.m, *.markdown, *.md, *.mm, *.dox (to be provided as doxygen C comment), +# *.py, *.pyw, *.f90, *.f95, *.f03, *.f08, *.f18, *.f, *.for, *.vhd, *.vhdl, +# *.ucf, *.qsf and *.ice. + +FILE_PATTERNS = + +# The RECURSIVE tag can be used to specify whether or not subdirectories should +# be searched for input files as well. +# The default value is: NO. + +RECURSIVE = NO + +# The EXCLUDE tag can be used to specify files and/or directories that should be +# excluded from the INPUT source files. This way you can easily exclude a +# subdirectory from a directory tree whose root is specified with the INPUT tag. +# +# Note that relative paths are relative to the directory from which doxygen is +# run. + +EXCLUDE = + +# The EXCLUDE_SYMLINKS tag can be used to select whether or not files or +# directories that are symbolic links (a Unix file system feature) are excluded +# from the input. +# The default value is: NO. + +EXCLUDE_SYMLINKS = NO + +# If the value of the INPUT tag contains directories, you can use the +# EXCLUDE_PATTERNS tag to specify one or more wildcard patterns to exclude +# certain files from those directories. +# +# Note that the wildcards are matched against the file with absolute path, so to +# exclude all test directories for example use the pattern */test/* + +EXCLUDE_PATTERNS = + +# The EXCLUDE_SYMBOLS tag can be used to specify one or more symbol names +# (namespaces, classes, functions, etc.) that should be excluded from the +# output. The symbol name can be a fully qualified name, a word, or if the +# wildcard * is used, a substring. Examples: ANamespace, AClass, +# AClass::ANamespace, ANamespace::*Test +# +# Note that the wildcards are matched against the file with absolute path, so to +# exclude all test directories use the pattern */test/* + +EXCLUDE_SYMBOLS = + +# The EXAMPLE_PATH tag can be used to specify one or more files or directories +# that contain example code fragments that are included (see the \include +# command). + +EXAMPLE_PATH = + +# If the value of the EXAMPLE_PATH tag contains directories, you can use the +# EXAMPLE_PATTERNS tag to specify one or more wildcard pattern (like *.cpp and +# *.h) to filter out the source-files in the directories. If left blank all +# files are included. + +EXAMPLE_PATTERNS = + +# If the EXAMPLE_RECURSIVE tag is set to YES then subdirectories will be +# searched for input files to be used with the \include or \dontinclude commands +# irrespective of the value of the RECURSIVE tag. +# The default value is: NO. + +EXAMPLE_RECURSIVE = NO + +# The IMAGE_PATH tag can be used to specify one or more files or directories +# that contain images that are to be included in the documentation (see the +# \image command). + +IMAGE_PATH = + +# The INPUT_FILTER tag can be used to specify a program that doxygen should +# invoke to filter for each input file. Doxygen will invoke the filter program +# by executing (via popen()) the command: +# +# +# +# where is the value of the INPUT_FILTER tag, and is the +# name of an input file. Doxygen will then use the output that the filter +# program writes to standard output. If FILTER_PATTERNS is specified, this tag +# will be ignored. +# +# Note that the filter must not add or remove lines; it is applied before the +# code is scanned, but not when the output code is generated. If lines are added +# or removed, the anchors will not be placed correctly. +# +# Note that for custom extensions or not directly supported extensions you also +# need to set EXTENSION_MAPPING for the extension otherwise the files are not +# properly processed by doxygen. + +INPUT_FILTER = + +# The FILTER_PATTERNS tag can be used to specify filters on a per file pattern +# basis. Doxygen will compare the file name with each pattern and apply the +# filter if there is a match. The filters are a list of the form: pattern=filter +# (like *.cpp=my_cpp_filter). See INPUT_FILTER for further information on how +# filters are used. If the FILTER_PATTERNS tag is empty or if none of the +# patterns match the file name, INPUT_FILTER is applied. +# +# Note that for custom extensions or not directly supported extensions you also +# need to set EXTENSION_MAPPING for the extension otherwise the files are not +# properly processed by doxygen. + +FILTER_PATTERNS = + +# If the FILTER_SOURCE_FILES tag is set to YES, the input filter (if set using +# INPUT_FILTER) will also be used to filter the input files that are used for +# producing the source files to browse (i.e. when SOURCE_BROWSER is set to YES). +# The default value is: NO. + +FILTER_SOURCE_FILES = NO + +# The FILTER_SOURCE_PATTERNS tag can be used to specify source filters per file +# pattern. A pattern will override the setting for FILTER_PATTERN (if any) and +# it is also possible to disable source filtering for a specific pattern using +# *.ext= (so without naming a filter). +# This tag requires that the tag FILTER_SOURCE_FILES is set to YES. + +FILTER_SOURCE_PATTERNS = + +# If the USE_MDFILE_AS_MAINPAGE tag refers to the name of a markdown file that +# is part of the input, its contents will be placed on the main page +# (index.html). This can be useful if you have a project on for instance GitHub +# and want to reuse the introduction page also for the doxygen output. + +USE_MDFILE_AS_MAINPAGE = + +#--------------------------------------------------------------------------- +# Configuration options related to source browsing +#--------------------------------------------------------------------------- + +# If the SOURCE_BROWSER tag is set to YES then a list of source files will be +# generated. Documented entities will be cross-referenced with these sources. +# +# Note: To get rid of all source code in the generated output, make sure that +# also VERBATIM_HEADERS is set to NO. +# The default value is: NO. + +SOURCE_BROWSER = NO + +# Setting the INLINE_SOURCES tag to YES will include the body of functions, +# classes and enums directly into the documentation. +# The default value is: NO. + +INLINE_SOURCES = NO + +# Setting the STRIP_CODE_COMMENTS tag to YES will instruct doxygen to hide any +# special comment blocks from generated source code fragments. Normal C, C++ and +# Fortran comments will always remain visible. +# The default value is: YES. + +STRIP_CODE_COMMENTS = YES + +# If the REFERENCED_BY_RELATION tag is set to YES then for each documented +# entity all documented functions referencing it will be listed. +# The default value is: NO. + +REFERENCED_BY_RELATION = NO + +# If the REFERENCES_RELATION tag is set to YES then for each documented function +# all documented entities called/used by that function will be listed. +# The default value is: NO. + +REFERENCES_RELATION = NO + +# If the REFERENCES_LINK_SOURCE tag is set to YES and SOURCE_BROWSER tag is set +# to YES then the hyperlinks from functions in REFERENCES_RELATION and +# REFERENCED_BY_RELATION lists will link to the source code. Otherwise they will +# link to the documentation. +# The default value is: YES. + +REFERENCES_LINK_SOURCE = YES + +# If SOURCE_TOOLTIPS is enabled (the default) then hovering a hyperlink in the +# source code will show a tooltip with additional information such as prototype, +# brief description and links to the definition and documentation. Since this +# will make the HTML file larger and loading of large files a bit slower, you +# can opt to disable this feature. +# The default value is: YES. +# This tag requires that the tag SOURCE_BROWSER is set to YES. + +SOURCE_TOOLTIPS = YES + +# If the USE_HTAGS tag is set to YES then the references to source code will +# point to the HTML generated by the htags(1) tool instead of doxygen built-in +# source browser. The htags tool is part of GNU's global source tagging system +# (see https://www.gnu.org/software/global/global.html). You will need version +# 4.8.6 or higher. +# +# To use it do the following: +# - Install the latest version of global +# - Enable SOURCE_BROWSER and USE_HTAGS in the configuration file +# - Make sure the INPUT points to the root of the source tree +# - Run doxygen as normal +# +# Doxygen will invoke htags (and that will in turn invoke gtags), so these +# tools must be available from the command line (i.e. in the search path). +# +# The result: instead of the source browser generated by doxygen, the links to +# source code will now point to the output of htags. +# The default value is: NO. +# This tag requires that the tag SOURCE_BROWSER is set to YES. + +USE_HTAGS = NO + +# If the VERBATIM_HEADERS tag is set the YES then doxygen will generate a +# verbatim copy of the header file for each class for which an include is +# specified. Set to NO to disable this. +# See also: Section \class. +# The default value is: YES. + +VERBATIM_HEADERS = YES + +# If the CLANG_ASSISTED_PARSING tag is set to YES then doxygen will use the +# clang parser (see: +# http://clang.llvm.org/) for more accurate parsing at the cost of reduced +# performance. This can be particularly helpful with template rich C++ code for +# which doxygen's built-in parser lacks the necessary type information. +# Note: The availability of this option depends on whether or not doxygen was +# generated with the -Duse_libclang=ON option for CMake. +# The default value is: NO. + +CLANG_ASSISTED_PARSING = NO + +# If clang assisted parsing is enabled and the CLANG_ADD_INC_PATHS tag is set to +# YES then doxygen will add the directory of each input to the include path. +# The default value is: YES. + +CLANG_ADD_INC_PATHS = YES + +# If clang assisted parsing is enabled you can provide the compiler with command +# line options that you would normally use when invoking the compiler. Note that +# the include paths will already be set by doxygen for the files and directories +# specified with INPUT and INCLUDE_PATH. +# This tag requires that the tag CLANG_ASSISTED_PARSING is set to YES. + +CLANG_OPTIONS = + +# If clang assisted parsing is enabled you can provide the clang parser with the +# path to the directory containing a file called compile_commands.json. This +# file is the compilation database (see: +# http://clang.llvm.org/docs/HowToSetupToolingForLLVM.html) containing the +# options used when the source files were built. This is equivalent to +# specifying the -p option to a clang tool, such as clang-check. These options +# will then be passed to the parser. Any options specified with CLANG_OPTIONS +# will be added as well. +# Note: The availability of this option depends on whether or not doxygen was +# generated with the -Duse_libclang=ON option for CMake. + +CLANG_DATABASE_PATH = + +#--------------------------------------------------------------------------- +# Configuration options related to the alphabetical class index +#--------------------------------------------------------------------------- + +# If the ALPHABETICAL_INDEX tag is set to YES, an alphabetical index of all +# compounds will be generated. Enable this if the project contains a lot of +# classes, structs, unions or interfaces. +# The default value is: YES. + +ALPHABETICAL_INDEX = YES + +# In case all classes in a project start with a common prefix, all classes will +# be put under the same header in the alphabetical index. The IGNORE_PREFIX tag +# can be used to specify a prefix (or a list of prefixes) that should be ignored +# while generating the index headers. +# This tag requires that the tag ALPHABETICAL_INDEX is set to YES. + +IGNORE_PREFIX = + +#--------------------------------------------------------------------------- +# Configuration options related to the HTML output +#--------------------------------------------------------------------------- + +# If the GENERATE_HTML tag is set to YES, doxygen will generate HTML output +# The default value is: YES. + +GENERATE_HTML = YES + +# The HTML_OUTPUT tag is used to specify where the HTML docs will be put. If a +# relative path is entered the value of OUTPUT_DIRECTORY will be put in front of +# it. +# The default directory is: html. +# This tag requires that the tag GENERATE_HTML is set to YES. + +HTML_OUTPUT = html + +# The HTML_FILE_EXTENSION tag can be used to specify the file extension for each +# generated HTML page (for example: .htm, .php, .asp). +# The default value is: .html. +# This tag requires that the tag GENERATE_HTML is set to YES. + +HTML_FILE_EXTENSION = .html + +# The HTML_HEADER tag can be used to specify a user-defined HTML header file for +# each generated HTML page. If the tag is left blank doxygen will generate a +# standard header. +# +# To get valid HTML the header file that includes any scripts and style sheets +# that doxygen needs, which is dependent on the configuration options used (e.g. +# the setting GENERATE_TREEVIEW). It is highly recommended to start with a +# default header using +# doxygen -w html new_header.html new_footer.html new_stylesheet.css +# YourConfigFile +# and then modify the file new_header.html. See also section "Doxygen usage" +# for information on how to generate the default header that doxygen normally +# uses. +# Note: The header is subject to change so you typically have to regenerate the +# default header when upgrading to a newer version of doxygen. For a description +# of the possible markers and block names see the documentation. +# This tag requires that the tag GENERATE_HTML is set to YES. + +HTML_HEADER = + +# The HTML_FOOTER tag can be used to specify a user-defined HTML footer for each +# generated HTML page. If the tag is left blank doxygen will generate a standard +# footer. See HTML_HEADER for more information on how to generate a default +# footer and what special commands can be used inside the footer. See also +# section "Doxygen usage" for information on how to generate the default footer +# that doxygen normally uses. +# This tag requires that the tag GENERATE_HTML is set to YES. + +HTML_FOOTER = + +# The HTML_STYLESHEET tag can be used to specify a user-defined cascading style +# sheet that is used by each HTML page. It can be used to fine-tune the look of +# the HTML output. If left blank doxygen will generate a default style sheet. +# See also section "Doxygen usage" for information on how to generate the style +# sheet that doxygen normally uses. +# Note: It is recommended to use HTML_EXTRA_STYLESHEET instead of this tag, as +# it is more robust and this tag (HTML_STYLESHEET) will in the future become +# obsolete. +# This tag requires that the tag GENERATE_HTML is set to YES. + +HTML_STYLESHEET = + +# The HTML_EXTRA_STYLESHEET tag can be used to specify additional user-defined +# cascading style sheets that are included after the standard style sheets +# created by doxygen. Using this option one can overrule certain style aspects. +# This is preferred over using HTML_STYLESHEET since it does not replace the +# standard style sheet and is therefore more robust against future updates. +# Doxygen will copy the style sheet files to the output directory. +# Note: The order of the extra style sheet files is of importance (e.g. the last +# style sheet in the list overrules the setting of the previous ones in the +# list). For an example see the documentation. +# This tag requires that the tag GENERATE_HTML is set to YES. + +HTML_EXTRA_STYLESHEET = docs/custom.css + +# The HTML_EXTRA_FILES tag can be used to specify one or more extra images or +# other source files which should be copied to the HTML output directory. Note +# that these files will be copied to the base HTML output directory. Use the +# $relpath^ marker in the HTML_HEADER and/or HTML_FOOTER files to load these +# files. In the HTML_STYLESHEET file, use the file name only. Also note that the +# files will be copied as-is; there are no commands or markers available. +# This tag requires that the tag GENERATE_HTML is set to YES. + +HTML_EXTRA_FILES = + +# The HTML_COLORSTYLE_HUE tag controls the color of the HTML output. Doxygen +# will adjust the colors in the style sheet and background images according to +# this color. Hue is specified as an angle on a colorwheel, see +# https://en.wikipedia.org/wiki/Hue for more information. For instance the value +# 0 represents red, 60 is yellow, 120 is green, 180 is cyan, 240 is blue, 300 +# purple, and 360 is red again. +# Minimum value: 0, maximum value: 359, default value: 220. +# This tag requires that the tag GENERATE_HTML is set to YES. + +HTML_COLORSTYLE_HUE = 195 + +# The HTML_COLORSTYLE_SAT tag controls the purity (or saturation) of the colors +# in the HTML output. For a value of 0 the output will use grayscales only. A +# value of 255 will produce the most vivid colors. +# Minimum value: 0, maximum value: 255, default value: 100. +# This tag requires that the tag GENERATE_HTML is set to YES. + +HTML_COLORSTYLE_SAT = 96 + +# The HTML_COLORSTYLE_GAMMA tag controls the gamma correction applied to the +# luminance component of the colors in the HTML output. Values below 100 +# gradually make the output lighter, whereas values above 100 make the output +# darker. The value divided by 100 is the actual gamma applied, so 80 represents +# a gamma of 0.8, The value 220 represents a gamma of 2.2, and 100 does not +# change the gamma. +# Minimum value: 40, maximum value: 240, default value: 80. +# This tag requires that the tag GENERATE_HTML is set to YES. + +HTML_COLORSTYLE_GAMMA = 80 + +# If the HTML_TIMESTAMP tag is set to YES then the footer of each generated HTML +# page will contain the date and time when the page was generated. Setting this +# to YES can help to show when doxygen was last run and thus if the +# documentation is up to date. +# The default value is: NO. +# This tag requires that the tag GENERATE_HTML is set to YES. + +HTML_TIMESTAMP = NO + +# If the HTML_DYNAMIC_MENUS tag is set to YES then the generated HTML +# documentation will contain a main index with vertical navigation menus that +# are dynamically created via JavaScript. If disabled, the navigation index will +# consists of multiple levels of tabs that are statically embedded in every HTML +# page. Disable this option to support browsers that do not have JavaScript, +# like the Qt help browser. +# The default value is: YES. +# This tag requires that the tag GENERATE_HTML is set to YES. + +HTML_DYNAMIC_MENUS = YES + +# If the HTML_DYNAMIC_SECTIONS tag is set to YES then the generated HTML +# documentation will contain sections that can be hidden and shown after the +# page has loaded. +# The default value is: NO. +# This tag requires that the tag GENERATE_HTML is set to YES. + +HTML_DYNAMIC_SECTIONS = NO + +# With HTML_INDEX_NUM_ENTRIES one can control the preferred number of entries +# shown in the various tree structured indices initially; the user can expand +# and collapse entries dynamically later on. Doxygen will expand the tree to +# such a level that at most the specified number of entries are visible (unless +# a fully collapsed tree already exceeds this amount). So setting the number of +# entries 1 will produce a full collapsed tree by default. 0 is a special value +# representing an infinite number of entries and will result in a full expanded +# tree by default. +# Minimum value: 0, maximum value: 9999, default value: 100. +# This tag requires that the tag GENERATE_HTML is set to YES. + +HTML_INDEX_NUM_ENTRIES = 100 + +# If the GENERATE_DOCSET tag is set to YES, additional index files will be +# generated that can be used as input for Apple's Xcode 3 integrated development +# environment (see: +# https://developer.apple.com/xcode/), introduced with OSX 10.5 (Leopard). To +# create a documentation set, doxygen will generate a Makefile in the HTML +# output directory. Running make will produce the docset in that directory and +# running make install will install the docset in +# ~/Library/Developer/Shared/Documentation/DocSets so that Xcode will find it at +# startup. See https://developer.apple.com/library/archive/featuredarticles/Doxy +# genXcode/_index.html for more information. +# The default value is: NO. +# This tag requires that the tag GENERATE_HTML is set to YES. + +GENERATE_DOCSET = NO + +# This tag determines the name of the docset feed. A documentation feed provides +# an umbrella under which multiple documentation sets from a single provider +# (such as a company or product suite) can be grouped. +# The default value is: Doxygen generated docs. +# This tag requires that the tag GENERATE_DOCSET is set to YES. + +DOCSET_FEEDNAME = "Doxygen generated docs" + +# This tag specifies a string that should uniquely identify the documentation +# set bundle. This should be a reverse domain-name style string, e.g. +# com.mycompany.MyDocSet. Doxygen will append .docset to the name. +# The default value is: org.doxygen.Project. +# This tag requires that the tag GENERATE_DOCSET is set to YES. + +DOCSET_BUNDLE_ID = org.doxygen.Project + +# The DOCSET_PUBLISHER_ID tag specifies a string that should uniquely identify +# the documentation publisher. This should be a reverse domain-name style +# string, e.g. com.mycompany.MyDocSet.documentation. +# The default value is: org.doxygen.Publisher. +# This tag requires that the tag GENERATE_DOCSET is set to YES. + +DOCSET_PUBLISHER_ID = org.doxygen.Publisher + +# The DOCSET_PUBLISHER_NAME tag identifies the documentation publisher. +# The default value is: Publisher. +# This tag requires that the tag GENERATE_DOCSET is set to YES. + +DOCSET_PUBLISHER_NAME = Publisher + +# If the GENERATE_HTMLHELP tag is set to YES then doxygen generates three +# additional HTML index files: index.hhp, index.hhc, and index.hhk. The +# index.hhp is a project file that can be read by Microsoft's HTML Help Workshop +# (see: +# https://www.microsoft.com/en-us/download/details.aspx?id=21138) on Windows. +# +# The HTML Help Workshop contains a compiler that can convert all HTML output +# generated by doxygen into a single compiled HTML file (.chm). Compiled HTML +# files are now used as the Windows 98 help format, and will replace the old +# Windows help format (.hlp) on all Windows platforms in the future. Compressed +# HTML files also contain an index, a table of contents, and you can search for +# words in the documentation. The HTML workshop also contains a viewer for +# compressed HTML files. +# The default value is: NO. +# This tag requires that the tag GENERATE_HTML is set to YES. + +GENERATE_HTMLHELP = NO + +# The CHM_FILE tag can be used to specify the file name of the resulting .chm +# file. You can add a path in front of the file if the result should not be +# written to the html output directory. +# This tag requires that the tag GENERATE_HTMLHELP is set to YES. + +CHM_FILE = + +# The HHC_LOCATION tag can be used to specify the location (absolute path +# including file name) of the HTML help compiler (hhc.exe). If non-empty, +# doxygen will try to run the HTML help compiler on the generated index.hhp. +# The file has to be specified with full path. +# This tag requires that the tag GENERATE_HTMLHELP is set to YES. + +HHC_LOCATION = + +# The GENERATE_CHI flag controls if a separate .chi index file is generated +# (YES) or that it should be included in the main .chm file (NO). +# The default value is: NO. +# This tag requires that the tag GENERATE_HTMLHELP is set to YES. + +GENERATE_CHI = NO + +# The CHM_INDEX_ENCODING is used to encode HtmlHelp index (hhk), content (hhc) +# and project file content. +# This tag requires that the tag GENERATE_HTMLHELP is set to YES. + +CHM_INDEX_ENCODING = + +# The BINARY_TOC flag controls whether a binary table of contents is generated +# (YES) or a normal table of contents (NO) in the .chm file. Furthermore it +# enables the Previous and Next buttons. +# The default value is: NO. +# This tag requires that the tag GENERATE_HTMLHELP is set to YES. + +BINARY_TOC = NO + +# The TOC_EXPAND flag can be set to YES to add extra items for group members to +# the table of contents of the HTML help documentation and to the tree view. +# The default value is: NO. +# This tag requires that the tag GENERATE_HTMLHELP is set to YES. + +TOC_EXPAND = NO + +# If the GENERATE_QHP tag is set to YES and both QHP_NAMESPACE and +# QHP_VIRTUAL_FOLDER are set, an additional index file will be generated that +# can be used as input for Qt's qhelpgenerator to generate a Qt Compressed Help +# (.qch) of the generated HTML documentation. +# The default value is: NO. +# This tag requires that the tag GENERATE_HTML is set to YES. + +GENERATE_QHP = NO + +# If the QHG_LOCATION tag is specified, the QCH_FILE tag can be used to specify +# the file name of the resulting .qch file. The path specified is relative to +# the HTML output folder. +# This tag requires that the tag GENERATE_QHP is set to YES. + +QCH_FILE = + +# The QHP_NAMESPACE tag specifies the namespace to use when generating Qt Help +# Project output. For more information please see Qt Help Project / Namespace +# (see: +# https://doc.qt.io/archives/qt-4.8/qthelpproject.html#namespace). +# The default value is: org.doxygen.Project. +# This tag requires that the tag GENERATE_QHP is set to YES. + +QHP_NAMESPACE = org.doxygen.Project + +# The QHP_VIRTUAL_FOLDER tag specifies the namespace to use when generating Qt +# Help Project output. For more information please see Qt Help Project / Virtual +# Folders (see: +# https://doc.qt.io/archives/qt-4.8/qthelpproject.html#virtual-folders). +# The default value is: doc. +# This tag requires that the tag GENERATE_QHP is set to YES. + +QHP_VIRTUAL_FOLDER = doc + +# If the QHP_CUST_FILTER_NAME tag is set, it specifies the name of a custom +# filter to add. For more information please see Qt Help Project / Custom +# Filters (see: +# https://doc.qt.io/archives/qt-4.8/qthelpproject.html#custom-filters). +# This tag requires that the tag GENERATE_QHP is set to YES. + +QHP_CUST_FILTER_NAME = + +# The QHP_CUST_FILTER_ATTRS tag specifies the list of the attributes of the +# custom filter to add. For more information please see Qt Help Project / Custom +# Filters (see: +# https://doc.qt.io/archives/qt-4.8/qthelpproject.html#custom-filters). +# This tag requires that the tag GENERATE_QHP is set to YES. + +QHP_CUST_FILTER_ATTRS = + +# The QHP_SECT_FILTER_ATTRS tag specifies the list of the attributes this +# project's filter section matches. Qt Help Project / Filter Attributes (see: +# https://doc.qt.io/archives/qt-4.8/qthelpproject.html#filter-attributes). +# This tag requires that the tag GENERATE_QHP is set to YES. + +QHP_SECT_FILTER_ATTRS = + +# The QHG_LOCATION tag can be used to specify the location (absolute path +# including file name) of Qt's qhelpgenerator. If non-empty doxygen will try to +# run qhelpgenerator on the generated .qhp file. +# This tag requires that the tag GENERATE_QHP is set to YES. + +QHG_LOCATION = + +# If the GENERATE_ECLIPSEHELP tag is set to YES, additional index files will be +# generated, together with the HTML files, they form an Eclipse help plugin. To +# install this plugin and make it available under the help contents menu in +# Eclipse, the contents of the directory containing the HTML and XML files needs +# to be copied into the plugins directory of eclipse. The name of the directory +# within the plugins directory should be the same as the ECLIPSE_DOC_ID value. +# After copying Eclipse needs to be restarted before the help appears. +# The default value is: NO. +# This tag requires that the tag GENERATE_HTML is set to YES. + +GENERATE_ECLIPSEHELP = NO + +# A unique identifier for the Eclipse help plugin. When installing the plugin +# the directory name containing the HTML and XML files should also have this +# name. Each documentation set should have its own identifier. +# The default value is: org.doxygen.Project. +# This tag requires that the tag GENERATE_ECLIPSEHELP is set to YES. + +ECLIPSE_DOC_ID = org.doxygen.Project + +# If you want full control over the layout of the generated HTML pages it might +# be necessary to disable the index and replace it with your own. The +# DISABLE_INDEX tag can be used to turn on/off the condensed index (tabs) at top +# of each HTML page. A value of NO enables the index and the value YES disables +# it. Since the tabs in the index contain the same information as the navigation +# tree, you can set this option to YES if you also set GENERATE_TREEVIEW to YES. +# The default value is: NO. +# This tag requires that the tag GENERATE_HTML is set to YES. + +DISABLE_INDEX = NO + +# The GENERATE_TREEVIEW tag is used to specify whether a tree-like index +# structure should be generated to display hierarchical information. If the tag +# value is set to YES, a side panel will be generated containing a tree-like +# index structure (just like the one that is generated for HTML Help). For this +# to work a browser that supports JavaScript, DHTML, CSS and frames is required +# (i.e. any modern browser). Windows users are probably better off using the +# HTML help feature. Via custom style sheets (see HTML_EXTRA_STYLESHEET) one can +# further fine-tune the look of the index. As an example, the default style +# sheet generated by doxygen has an example that shows how to put an image at +# the root of the tree instead of the PROJECT_NAME. Since the tree basically has +# the same information as the tab index, you could consider setting +# DISABLE_INDEX to YES when enabling this option. +# The default value is: NO. +# This tag requires that the tag GENERATE_HTML is set to YES. + +GENERATE_TREEVIEW = YES + +# The ENUM_VALUES_PER_LINE tag can be used to set the number of enum values that +# doxygen will group on one line in the generated HTML documentation. +# +# Note that a value of 0 will completely suppress the enum values from appearing +# in the overview section. +# Minimum value: 0, maximum value: 20, default value: 4. +# This tag requires that the tag GENERATE_HTML is set to YES. + +ENUM_VALUES_PER_LINE = 4 + +# If the treeview is enabled (see GENERATE_TREEVIEW) then this tag can be used +# to set the initial width (in pixels) of the frame in which the tree is shown. +# Minimum value: 0, maximum value: 1500, default value: 250. +# This tag requires that the tag GENERATE_HTML is set to YES. + +TREEVIEW_WIDTH = 250 + +# If the EXT_LINKS_IN_WINDOW option is set to YES, doxygen will open links to +# external symbols imported via tag files in a separate window. +# The default value is: NO. +# This tag requires that the tag GENERATE_HTML is set to YES. + +EXT_LINKS_IN_WINDOW = NO + +# If the HTML_FORMULA_FORMAT option is set to svg, doxygen will use the pdf2svg +# tool (see https://github.com/dawbarton/pdf2svg) or inkscape (see +# https://inkscape.org) to generate formulas as SVG images instead of PNGs for +# the HTML output. These images will generally look nicer at scaled resolutions. +# Possible values are: png (the default) and svg (looks nicer but requires the +# pdf2svg or inkscape tool). +# The default value is: png. +# This tag requires that the tag GENERATE_HTML is set to YES. + +HTML_FORMULA_FORMAT = png + +# Use this tag to change the font size of LaTeX formulas included as images in +# the HTML documentation. When you change the font size after a successful +# doxygen run you need to manually remove any form_*.png images from the HTML +# output directory to force them to be regenerated. +# Minimum value: 8, maximum value: 50, default value: 10. +# This tag requires that the tag GENERATE_HTML is set to YES. + +FORMULA_FONTSIZE = 10 + +# Use the FORMULA_TRANSPARENT tag to determine whether or not the images +# generated for formulas are transparent PNGs. Transparent PNGs are not +# supported properly for IE 6.0, but are supported on all modern browsers. +# +# Note that when changing this option you need to delete any form_*.png files in +# the HTML output directory before the changes have effect. +# The default value is: YES. +# This tag requires that the tag GENERATE_HTML is set to YES. + +FORMULA_TRANSPARENT = YES + +# The FORMULA_MACROFILE can contain LaTeX \newcommand and \renewcommand commands +# to create new LaTeX commands to be used in formulas as building blocks. See +# the section "Including formulas" for details. + +FORMULA_MACROFILE = + +# Enable the USE_MATHJAX option to render LaTeX formulas using MathJax (see +# https://www.mathjax.org) which uses client side JavaScript for the rendering +# instead of using pre-rendered bitmaps. Use this if you do not have LaTeX +# installed or if you want to formulas look prettier in the HTML output. When +# enabled you may also need to install MathJax separately and configure the path +# to it using the MATHJAX_RELPATH option. +# The default value is: NO. +# This tag requires that the tag GENERATE_HTML is set to YES. + +USE_MATHJAX = NO + +# When MathJax is enabled you can set the default output format to be used for +# the MathJax output. See the MathJax site (see: +# http://docs.mathjax.org/en/v2.7-latest/output.html) for more details. +# Possible values are: HTML-CSS (which is slower, but has the best +# compatibility), NativeMML (i.e. MathML) and SVG. +# The default value is: HTML-CSS. +# This tag requires that the tag USE_MATHJAX is set to YES. + +MATHJAX_FORMAT = HTML-CSS + +# When MathJax is enabled you need to specify the location relative to the HTML +# output directory using the MATHJAX_RELPATH option. The destination directory +# should contain the MathJax.js script. For instance, if the mathjax directory +# is located at the same level as the HTML output directory, then +# MATHJAX_RELPATH should be ../mathjax. The default value points to the MathJax +# Content Delivery Network so you can quickly see the result without installing +# MathJax. However, it is strongly recommended to install a local copy of +# MathJax from https://www.mathjax.org before deployment. +# The default value is: https://cdn.jsdelivr.net/npm/mathjax@2. +# This tag requires that the tag USE_MATHJAX is set to YES. + +MATHJAX_RELPATH = http://cdn.mathjax.org/mathjax/latest + +# The MATHJAX_EXTENSIONS tag can be used to specify one or more MathJax +# extension names that should be enabled during MathJax rendering. For example +# MATHJAX_EXTENSIONS = TeX/AMSmath TeX/AMSsymbols +# This tag requires that the tag USE_MATHJAX is set to YES. + +MATHJAX_EXTENSIONS = + +# The MATHJAX_CODEFILE tag can be used to specify a file with javascript pieces +# of code that will be used on startup of the MathJax code. See the MathJax site +# (see: +# http://docs.mathjax.org/en/v2.7-latest/output.html) for more details. For an +# example see the documentation. +# This tag requires that the tag USE_MATHJAX is set to YES. + +MATHJAX_CODEFILE = + +# When the SEARCHENGINE tag is enabled doxygen will generate a search box for +# the HTML output. The underlying search engine uses javascript and DHTML and +# should work on any modern browser. Note that when using HTML help +# (GENERATE_HTMLHELP), Qt help (GENERATE_QHP), or docsets (GENERATE_DOCSET) +# there is already a search function so this one should typically be disabled. +# For large projects the javascript based search engine can be slow, then +# enabling SERVER_BASED_SEARCH may provide a better solution. It is possible to +# search using the keyboard; to jump to the search box use + S +# (what the is depends on the OS and browser, but it is typically +# , /