From 27e72686537c38c7bf28264dafd9a348542113cc Mon Sep 17 00:00:00 2001 From: Timo Aaltonen Date: Wed, 18 Mar 2020 06:47:32 +0000 Subject: [PATCH] Import 389-ds-base_1.4.3.4.orig.tar.bz2 [dgit import orig 389-ds-base_1.4.3.4.orig.tar.bz2] --- .cargo/config.in | 6 + .clang-format | 49 + .cvsignore | 5 + .dockerignore | 3 + .gitignore | 236 + LICENSE | 28 + LICENSE.GPLv3+ | 674 + LICENSE.mit | 32 + LICENSE.openssl | 11 + Makefile.am | 2479 ++ README.md | 116 + VERSION.sh | 54 + autogen.sh | 96 + buildnum.py | 28 + configure.ac | 927 + dirsrvtests/README | 28 + dirsrvtests/__init__.py | 0 dirsrvtests/conftest.py | 109 + dirsrvtests/create_test.py | 322 + dirsrvtests/pytest.ini | 6 + dirsrvtests/tests/__init__.py | 0 dirsrvtests/tests/data/README | 11 + dirsrvtests/tests/data/__init__.py | 0 dirsrvtests/tests/data/basic/__init__.py | 0 dirsrvtests/tests/data/basic/dse.ldif.broken | 95 + .../tests/data/ticket47953/__init__.py | 0 .../tests/data/ticket47953/ticket47953.ldif | 27 + .../tests/data/ticket47988/__init__.py | 0 .../data/ticket47988/schema_ipa3.3.tar.gz | Bin 0 -> 98049 bytes .../data/ticket47988/schema_ipa4.1.tar.gz | Bin 0 -> 87335 bytes .../tests/data/ticket48212/__init__.py | 0 .../data/ticket48212/example1k_posix.ldif | 17017 ++++++++++++++ .../tests/data/ticket49121/utf8str.txt | 1 + .../tests/data/ticket49441/binary.ldif | 858 + dirsrvtests/tests/perf/create_data.py | 289 + dirsrvtests/tests/perf/memberof_test.py | 405 + .../tests/perf/search_performance_test.py | 161 + dirsrvtests/tests/stress/README | 13 + dirsrvtests/tests/stress/__init__.py | 1 + .../stress/cos/cos_scale_template_test.py | 150 + .../tests/stress/reliabilty/__init__.py | 0 .../stress/reliabilty/reliab_7_5_test.py | 576 + .../stress/reliabilty/reliab_conn_test.py | 227 + .../replication/mmr_01_4m-2h-4c_test.py | 971 + .../stress/replication/mmr_01_4m_test.py | 574 + dirsrvtests/tests/stress/search/__init__.py | 0 dirsrvtests/tests/stress/search/simple.py | 59 + dirsrvtests/tests/suites/__init__.py | 0 dirsrvtests/tests/suites/acl/__init__.py | 3 + dirsrvtests/tests/suites/acl/acivattr_test.py | 252 + dirsrvtests/tests/suites/acl/acl_deny_test.py | 200 + dirsrvtests/tests/suites/acl/acl_test.py | 1150 + dirsrvtests/tests/suites/acl/conftest.py | 125 + .../acl/default_aci_allows_self_write.py | 133 + dirsrvtests/tests/suites/acl/deladd_test.py | 456 + .../suites/acl/enhanced_aci_modrnd_test.py | 123 + .../suites/acl/globalgroup_part2_test.py | 470 + .../tests/suites/acl/globalgroup_test.py | 431 + .../tests/suites/acl/keywords_part2_test.py | 388 + dirsrvtests/tests/suites/acl/keywords_test.py | 467 + dirsrvtests/tests/suites/acl/misc_test.py | 414 + dirsrvtests/tests/suites/acl/modify_test.py | 575 + dirsrvtests/tests/suites/acl/modrdn_test.py | 299 + .../suites/acl/repeated_ldap_add_test.py | 489 + dirsrvtests/tests/suites/acl/roledn_test.py | 274 + .../suites/acl/search_real_part2_test.py | 456 + .../suites/acl/search_real_part3_test.py | 469 + .../tests/suites/acl/search_real_test.py | 410 + .../suites/acl/selfdn_permissions_test.py | 352 + dirsrvtests/tests/suites/acl/syntax_test.py | 262 + dirsrvtests/tests/suites/acl/userattr_test.py | 298 + .../tests/suites/acl/valueacl_part2_test.py | 432 + dirsrvtests/tests/suites/acl/valueacl_test.py | 747 + .../tests/suites/attr_encryption/__init__.py | 0 .../attr_encryption/attr_encryption_test.py | 453 + .../tests/suites/auth_token/__init__.py | 0 .../suites/auth_token/basic_auth_test.py | 240 + .../suites/automember_plugin/__init__.py | 3 + .../automember_plugin/automember_mod_test.py | 143 + .../automember_plugin/automember_test.py | 300 + .../suites/automember_plugin/basic_test.py | 854 + .../automember_plugin/configuration_test.py | 58 + .../tests/suites/backups/backup_test.py | 73 + dirsrvtests/tests/suites/basic/__init__.py | 3 + dirsrvtests/tests/suites/basic/basic_test.py | 1409 ++ dirsrvtests/tests/suites/betxns/__init__.py | 3 + dirsrvtests/tests/suites/betxns/betxn_test.py | 344 + dirsrvtests/tests/suites/clu/__init__.py | 3 + dirsrvtests/tests/suites/clu/clu_test.py | 95 + dirsrvtests/tests/suites/config/__init__.py | 3 + .../tests/suites/config/autotuning_test.py | 335 + .../tests/suites/config/config_test.py | 467 + .../tests/suites/config/regression_test.py | 114 + .../config/removed_config_49298_test.py | 90 + dirsrvtests/tests/suites/cos/__init__.py | 3 + dirsrvtests/tests/suites/cos/cos_test.py | 84 + .../tests/suites/cos/indirect_cos_test.py | 173 + .../tests/suites/disk_monitoring/__init__.py | 3 + .../disk_monitoring/disk_monitoring_test.py | 584 + .../suites/disk_monitoring/disk_space_test.py | 45 + dirsrvtests/tests/suites/ds_logs/__init__.py | 3 + .../tests/suites/ds_logs/ds_logs_test.py | 857 + .../tests/suites/ds_logs/regression_test.py | 79 + dirsrvtests/tests/suites/ds_tools/__init__.py | 4 + .../tests/suites/ds_tools/logpipe_test.py | 78 + .../tests/suites/ds_tools/replcheck_test.py | 502 + .../tests/suites/dynamic_plugins/__init__.py | 3 + .../dynamic_plugins/dynamic_plugins_test.py | 441 + .../suites/dynamic_plugins/stress_tests.py | 131 + dirsrvtests/tests/suites/filter/__init__.py | 3 + .../tests/suites/filter/basic_filter_test.py | 49 + .../tests/suites/filter/bitw_filter_test.py | 398 + .../suites/filter/complex_filters_test.py | 139 + .../tests/suites/filter/filter_cert_test.py | 69 + .../suites/filter/filter_index_match_test.py | 862 + .../suites/filter/filter_indexing_test.py | 169 + .../tests/suites/filter/filter_logic_test.py | 447 + .../tests/suites/filter/filter_match_test.py | 776 + .../tests/suites/filter/filter_test.py | 315 + .../filter/filter_with_non_root_user_test.py | 384 + .../suites/filter/filterscanlimit_test.py | 418 + .../tests/suites/filter/large_filter_test.py | 165 + .../filter/rfc3673_all_oper_attrs_test.py | 171 + .../suites/filter/schema_validation_test.py | 252 + .../suites/filter/vfilter_attribute_test.py | 219 + .../suites/filter/vfilter_simple_test.py | 556 + .../tests/suites/fourwaymmr/__init__.py | 0 .../suites/fourwaymmr/fourwaymmr_test.py | 477 + .../suites/fractional/fractional_test.py | 404 + .../suites/get_effective_rights/__init__.py | 3 + .../get_effective_rights/acceptance_test.py | 132 + dirsrvtests/tests/suites/gssapi/__init__.py | 3 + .../tests/suites/gssapi/simple_gssapi_test.py | 147 + .../tests/suites/gssapi_repl/__init__.py | 3 + .../suites/gssapi_repl/gssapi_repl_test.py | 174 + dirsrvtests/tests/suites/import/__init__.py | 3 + .../tests/suites/import/regression_test.py | 305 + dirsrvtests/tests/suites/ldapi/__init__.py | 3 + dirsrvtests/tests/suites/lib389/__init__.py | 0 .../suites/lib389/config_compare_test.py | 38 + .../suites/lib389/dsldapobject/__init__.py | 0 .../lib389/dsldapobject/dn_construct_test.py | 236 + .../tests/suites/lib389/idm/__init__.py | 0 .../suites/lib389/idm/user_compare_i2_test.py | 49 + .../lib389/idm/user_compare_m2Repl_test.py | 57 + .../suites/lib389/idm/user_compare_st_test.py | 78 + dirsrvtests/tests/suites/logging/__init__.py | 3 + .../suites/logging/logging_config_test.py | 87 + .../tests/suites/mapping_tree/__init__.py | 0 .../be_del_and_default_naming_attr_test.py | 90 + .../referral_during_tot_init_test.py | 69 + .../tests/suites/memberof_plugin/__init__.py | 3 + .../suites/memberof_plugin/regression_test.py | 863 + .../memory_leaks/MMR_double_free_test.py | 166 + .../tests/suites/memory_leaks/__init__.py | 3 + .../suites/memory_leaks/range_search_test.py | 71 + .../tests/suites/migration/__init__.py | 3 + .../suites/migration/export_data_test.py | 82 + .../suites/migration/import_data_test.py | 70 + dirsrvtests/tests/suites/monitor/__init__.py | 3 + .../tests/suites/monitor/monitor_test.py | 70 + .../tests/suites/paged_results/__init__.py | 3 + .../paged_results/paged_results_test.py | 1179 + dirsrvtests/tests/suites/password/__init__.py | 3 + .../tests/suites/password/password_test.py | 72 + .../password/pbkdf2_upgrade_plugin_test.py | 52 + .../tests/suites/password/pwdAdmin_test.py | 363 + .../tests/suites/password/pwdModify_test.py | 282 + .../password/pwdPolicy_attribute_test.py | 260 + .../password/pwdPolicy_controls_test.py | 292 + .../password/pwdPolicy_inherit_global_test.py | 212 + .../suites/password/pwdPolicy_syntax_test.py | 292 + .../suites/password/pwdPolicy_token_test.py | 83 + .../suites/password/pwdPolicy_warning_test.py | 599 + .../tests/suites/password/pwd_algo_test.py | 177 + .../password/pwd_lockout_bypass_test.py | 82 + .../tests/suites/password/pwd_log_test.py | 87 + .../suites/password/pwd_upgrade_on_bind.py | 140 + .../tests/suites/password/pwp_history_test.py | 263 + dirsrvtests/tests/suites/password/pwp_test.py | 511 + .../tests/suites/password/regression_test.py | 325 + .../suites/password/series_of_bugs_test.py | 134 + dirsrvtests/tests/suites/plugins/__init__.py | 3 + .../tests/suites/plugins/acceptance_test.py | 1805 ++ .../tests/suites/plugins/accpol_test.py | 1094 + .../plugins/attr_nsslapd-pluginarg_test.py | 211 + dirsrvtests/tests/suites/plugins/cos_test.py | 220 + .../tests/suites/plugins/deref_aci_test.py | 141 + dirsrvtests/tests/suites/plugins/dna_test.py | 86 + .../tests/suites/plugins/memberof_test.py | 2827 +++ .../plugins/pluginpath_validation_test.py | 111 + .../tests/suites/plugins/referint_test.py | 105 + .../suites/plugins/rootdn_plugin_test.py | 595 + dirsrvtests/tests/suites/psearch/__init__.py | 3 + .../tests/suites/psearch/psearch_test.py | 74 + .../tests/suites/pwp_storage/storage_test.py | 164 + .../suites/referint_plugin/rename_test.py | 179 + .../tests/suites/replication/__init__.py | 21 + .../suites/replication/acceptance_test.py | 507 + .../suites/replication/cascading_test.py | 152 + .../suites/replication/changelog_test.py | 724 + .../replication/changelog_trimming_test.py | 134 + .../replication/cleanallruv_max_tasks_test.py | 72 + .../suites/replication/cleanallruv_test.py | 827 + .../replication/conflict_resolve_test.py | 880 + .../tests/suites/replication/conftest.py | 53 + .../suites/replication/encryption_cl5_test.py | 135 + .../suites/replication/regression_test.py | 904 + .../suites/replication/replica_config_test.py | 285 + .../tests/suites/replication/ruvstore_test.py | 163 + .../suites/replication/single_master_test.py | 159 + .../replication/tls_client_auth_repl_test.py | 176 + .../replication/tombstone_fixup_test.py | 129 + .../suites/replication/tombstone_test.py | 63 + .../wait_for_async_feature_test.py | 212 + .../tests/suites/resource_limits/__init__.py | 0 .../suites/resource_limits/fdlimits_test.py | 76 + dirsrvtests/tests/suites/roles/basic_test.py | 295 + dirsrvtests/tests/suites/sasl/__init__.py | 3 + .../tests/suites/sasl/allowed_mechs_test.py | 183 + dirsrvtests/tests/suites/sasl/plain_test.py | 95 + .../tests/suites/sasl/regression_test.py | 182 + dirsrvtests/tests/suites/schema/__init__.py | 3 + .../tests/suites/schema/eduperson_test.py | 90 + .../tests/suites/schema/schema_reload_test.py | 155 + .../suites/schema/schema_replication_test.py | 702 + .../tests/suites/schema/schema_test.py | 173 + dirsrvtests/tests/suites/setup_ds/__init__.py | 12 + .../tests/suites/setup_ds/dscreate_test.py | 125 + .../tests/suites/setup_ds/remove_test.py | 68 + .../tests/suites/setup_ds/setup_ds_test.py | 84 + dirsrvtests/tests/suites/snmp/__init__.py | 3 + dirsrvtests/tests/suites/stat/__init__.py | 0 .../tests/suites/stat/mmt_state_test.py | 361 + dirsrvtests/tests/suites/syntax/__init__.py | 3 + .../tests/suites/syntax/acceptance_test.py | 112 + dirsrvtests/tests/suites/syntax/mr_test.py | 61 + dirsrvtests/tests/suites/tls/__init__.py | 3 + dirsrvtests/tests/suites/tls/cipher_test.py | 51 + .../tests/suites/tls/ssl_version_test.py | 55 + .../tests/suites/tls/tls_check_crl_test.py | 54 + .../tests/suites/tls/tls_ldaps_only_test.py | 46 + dirsrvtests/tests/suites/vlv/__init__.py | 3 + .../tests/suites/vlv/regression_test.py | 110 + dirsrvtests/tests/tickets/__init__.py | 0 dirsrvtests/tests/tickets/ticket47462_test.py | 296 + dirsrvtests/tests/tickets/ticket47560_test.py | 191 + dirsrvtests/tests/tickets/ticket47573_test.py | 235 + dirsrvtests/tests/tickets/ticket47619_test.py | 97 + dirsrvtests/tests/tickets/ticket47640_test.py | 82 + .../tests/tickets/ticket47653MMR_test.py | 348 + dirsrvtests/tests/tickets/ticket47676_test.py | 252 + dirsrvtests/tests/tickets/ticket47714_test.py | 213 + dirsrvtests/tests/tickets/ticket47721_test.py | 293 + dirsrvtests/tests/tickets/ticket47781_test.py | 104 + dirsrvtests/tests/tickets/ticket47787_test.py | 428 + dirsrvtests/tests/tickets/ticket47808_test.py | 101 + dirsrvtests/tests/tickets/ticket47815_test.py | 116 + dirsrvtests/tests/tickets/ticket47823_test.py | 965 + dirsrvtests/tests/tickets/ticket47828_test.py | 652 + dirsrvtests/tests/tickets/ticket47829_test.py | 629 + dirsrvtests/tests/tickets/ticket47833_test.py | 220 + .../tests/tickets/ticket47869MMR_test.py | 200 + dirsrvtests/tests/tickets/ticket47871_test.py | 108 + dirsrvtests/tests/tickets/ticket47900_test.py | 212 + dirsrvtests/tests/tickets/ticket47910_test.py | 166 + dirsrvtests/tests/tickets/ticket47920_test.py | 130 + dirsrvtests/tests/tickets/ticket47921_test.py | 88 + dirsrvtests/tests/tickets/ticket47927_test.py | 267 + dirsrvtests/tests/tickets/ticket47931_test.py | 171 + dirsrvtests/tests/tickets/ticket47953_test.py | 73 + dirsrvtests/tests/tickets/ticket47963_test.py | 152 + dirsrvtests/tests/tickets/ticket47970_test.py | 89 + dirsrvtests/tests/tickets/ticket47973_test.py | 227 + dirsrvtests/tests/tickets/ticket47976_test.py | 160 + dirsrvtests/tests/tickets/ticket47980_test.py | 595 + dirsrvtests/tests/tickets/ticket47981_test.py | 228 + dirsrvtests/tests/tickets/ticket47988_test.py | 371 + dirsrvtests/tests/tickets/ticket48005_test.py | 365 + dirsrvtests/tests/tickets/ticket48013_test.py | 95 + dirsrvtests/tests/tickets/ticket48026_test.py | 121 + dirsrvtests/tests/tickets/ticket48109_test.py | 338 + dirsrvtests/tests/tickets/ticket48170_test.py | 43 + dirsrvtests/tests/tickets/ticket48194_test.py | 352 + dirsrvtests/tests/tickets/ticket48212_test.py | 134 + dirsrvtests/tests/tickets/ticket48214_test.py | 105 + dirsrvtests/tests/tickets/ticket48228_test.py | 274 + dirsrvtests/tests/tickets/ticket48233_test.py | 61 + dirsrvtests/tests/tickets/ticket48234_test.py | 99 + dirsrvtests/tests/tickets/ticket48252_test.py | 120 + dirsrvtests/tests/tickets/ticket48265_test.py | 76 + dirsrvtests/tests/tickets/ticket48266_test.py | 280 + dirsrvtests/tests/tickets/ticket48270_test.py | 118 + dirsrvtests/tests/tickets/ticket48272_test.py | 136 + dirsrvtests/tests/tickets/ticket48294_test.py | 220 + dirsrvtests/tests/tickets/ticket48295_test.py | 144 + dirsrvtests/tests/tickets/ticket48312_test.py | 124 + dirsrvtests/tests/tickets/ticket48325_test.py | 132 + dirsrvtests/tests/tickets/ticket48342_test.py | 142 + dirsrvtests/tests/tickets/ticket48354_test.py | 57 + dirsrvtests/tests/tickets/ticket48362_test.py | 169 + dirsrvtests/tests/tickets/ticket48366_test.py | 148 + dirsrvtests/tests/tickets/ticket48370_test.py | 194 + dirsrvtests/tests/tickets/ticket48383_test.py | 95 + dirsrvtests/tests/tickets/ticket48497_test.py | 114 + dirsrvtests/tests/tickets/ticket48637_test.py | 150 + dirsrvtests/tests/tickets/ticket48665_test.py | 72 + dirsrvtests/tests/tickets/ticket48745_test.py | 128 + dirsrvtests/tests/tickets/ticket48746_test.py | 148 + dirsrvtests/tests/tickets/ticket48759_test.py | 227 + dirsrvtests/tests/tickets/ticket48784_test.py | 141 + dirsrvtests/tests/tickets/ticket48798_test.py | 65 + dirsrvtests/tests/tickets/ticket48799_test.py | 87 + dirsrvtests/tests/tickets/ticket48808_test.py | 303 + dirsrvtests/tests/tickets/ticket48844_test.py | 136 + dirsrvtests/tests/tickets/ticket48891_test.py | 102 + dirsrvtests/tests/tickets/ticket48893_test.py | 53 + dirsrvtests/tests/tickets/ticket48896_test.py | 139 + dirsrvtests/tests/tickets/ticket48906_test.py | 302 + dirsrvtests/tests/tickets/ticket48916_test.py | 135 + dirsrvtests/tests/tickets/ticket48944_test.py | 211 + dirsrvtests/tests/tickets/ticket48956_test.py | 128 + dirsrvtests/tests/tickets/ticket48961_test.py | 145 + dirsrvtests/tests/tickets/ticket48973_test.py | 306 + dirsrvtests/tests/tickets/ticket49008_test.py | 125 + dirsrvtests/tests/tickets/ticket49020_test.py | 73 + dirsrvtests/tests/tickets/ticket49039_test.py | 119 + dirsrvtests/tests/tickets/ticket49072_test.py | 114 + dirsrvtests/tests/tickets/ticket49073_test.py | 150 + dirsrvtests/tests/tickets/ticket49076_test.py | 105 + dirsrvtests/tests/tickets/ticket49095_test.py | 87 + dirsrvtests/tests/tickets/ticket49104_test.py | 88 + dirsrvtests/tests/tickets/ticket49121_test.py | 206 + dirsrvtests/tests/tickets/ticket49122_test.py | 94 + dirsrvtests/tests/tickets/ticket49180_test.py | 124 + dirsrvtests/tests/tickets/ticket49184_test.py | 148 + dirsrvtests/tests/tickets/ticket49192_test.py | 179 + dirsrvtests/tests/tickets/ticket49227_test.py | 149 + dirsrvtests/tests/tickets/ticket49249_test.py | 142 + dirsrvtests/tests/tickets/ticket49273_test.py | 52 + dirsrvtests/tests/tickets/ticket49287_test.py | 347 + dirsrvtests/tests/tickets/ticket49290_test.py | 68 + dirsrvtests/tests/tickets/ticket49303_test.py | 113 + dirsrvtests/tests/tickets/ticket49386_test.py | 151 + dirsrvtests/tests/tickets/ticket49412_test.py | 67 + dirsrvtests/tests/tickets/ticket49441_test.py | 76 + dirsrvtests/tests/tickets/ticket49460_test.py | 117 + dirsrvtests/tests/tickets/ticket49463_test.py | 228 + dirsrvtests/tests/tickets/ticket49471_test.py | 81 + dirsrvtests/tests/tickets/ticket49540_test.py | 135 + .../tests/tickets/ticket49623_2_test.py | 66 + dirsrvtests/tests/tickets/ticket49658_test.py | 4266 ++++ dirsrvtests/tests/tickets/ticket49788_test.py | 88 + dirsrvtests/tests/tickets/ticket50078_test.py | 70 + dirsrvtests/tests/tickets/ticket50232_test.py | 165 + dirsrvtests/tests/tickets/ticket50234_test.py | 72 + dirsrvtests/tests/tickets/ticket548_test.py | 408 + dirsrvtests/tests/tmp/README | 10 + dirsrvtests/tests/tmp/__init__.py | 0 docker.mk | 6 + docker/389-ds-fedora/Dockerfile | 51 + docker/389-ds-suse/Dockerfile | 82 + docker/389-ds-suse/Dockerfile.release | 72 + docker/README.md | 61 + docs/CREDITS.artwork | 1 + docs/custom.css | 1366 ++ docs/doc_header.html | 47 + docs/intro.md | 143 + docs/job-safety.md | 90 + docs/logo-banner.png | Bin 0 -> 6193 bytes docs/logo-banner.xcf | Bin 0 -> 9136 bytes docs/logo-square.xcf | Bin 0 -> 9136 bytes docs/nunc-stans-intro.dia | Bin 0 -> 8292 bytes docs/nunc-stans-intro.png | Bin 0 -> 74103 bytes docs/nunc-stans-job-states.dia | Bin 0 -> 1766 bytes docs/nunc-stans-job-states.png | Bin 0 -> 11487 bytes docs/slapi.doxy.in | 2366 ++ docs/tops_tops.xcf | Bin 0 -> 4841 bytes include/base/crit.h | 211 + include/base/dbtbase.h | 227 + include/base/ereport.h | 56 + include/base/file.h | 95 + include/base/fsmutex.h | 98 + include/base/plist.h | 73 + include/base/pool.h | 92 + include/base/shexp.h | 118 + include/base/systems.h | 246 + include/base/systhr.h | 88 + include/base/util.h | 92 + include/i18n.h | 191 + include/ldaputil/cert.h | 31 + include/ldaputil/certmap.h | 280 + include/ldaputil/dbconf.h | 96 + include/ldaputil/encode.h | 33 + include/ldaputil/errors.h | 100 + include/ldaputil/init.h | 31 + include/ldaputil/ldapauth.h | 36 + include/ldaputil/ldaputil.h | 140 + include/libaccess/acl.h | 48 + include/libaccess/aclerror.h | 307 + include/libaccess/acleval.h | 44 + include/libaccess/aclglobal.h | 61 + include/libaccess/aclproto.h | 152 + include/libaccess/aclstruct.h | 284 + include/libaccess/attrec.h | 165 + include/libaccess/authdb.h | 33 + include/libaccess/dbtlibaccess.h | 172 + include/libaccess/dnfstruct.h | 61 + include/libaccess/ipfstruct.h | 92 + include/libaccess/las.h | 170 + include/libaccess/nsauth.h | 297 + include/libaccess/nsautherr.h | 104 + include/libaccess/nserror.h | 54 + include/libaccess/symbols.h | 99 + include/libaccess/userauth.h | 21 + include/libaccess/usi.h | 90 + include/libaccess/usrcache.h | 88 + include/libadmin/dbtlibadmin.h | 28 + include/libadmin/libadmin.h | 93 + include/netsite.h | 196 + include/public/base/systems.h | 101 + include/public/netsite.h | 29 + include/public/nsacl/aclapi.h | 373 + include/public/nsacl/acldef.h | 465 + include/public/nsacl/nserrdef.h | 113 + include/public/nsacl/plistdef.h | 70 + include/public/nsapi.h | 260 + ldap/admin/src/70-dirsrv.conf | 52 + ldap/admin/src/base-initconfig.in | 48 + ldap/admin/src/defaults.inf.in | 63 + ldap/admin/src/initconfig.in | 12 + ldap/admin/src/logconv.pl | 2860 +++ ldap/admin/src/makemccvlvindexes | 214 + ldap/admin/src/makevlvindex | 112 + ldap/admin/src/makevlvsearch | 141 + ldap/admin/src/scripts/10cleanupldapi.pl | 23 + ldap/admin/src/scripts/10delautodnsuffix.pl | 23 + ldap/admin/src/scripts/10fixrundir.pl | 39 + ldap/admin/src/scripts/20betxn.pl | 74 + ldap/admin/src/scripts/50AES-pbe-plugin.ldif | 16 + .../src/scripts/50acctusabilityplugin.ldif | 21 + .../src/scripts/50addchainingsaslpwroles.ldif | 6 + .../admin/src/scripts/50automemberplugin.ldif | 15 + .../src/scripts/50bitstringsyntaxplugin.ldif | 14 + ldap/admin/src/scripts/50contentsync.ldif | 23 + .../scripts/50deliverymethodsyntaxplugin.ldif | 14 + ldap/admin/src/scripts/50derefplugin.ldif | 16 + .../src/scripts/50disableurisyntaxplugin.ldif | 9 + .../scripts/50enhancedguidesyntaxplugin.ldif | 14 + ldap/admin/src/scripts/50entryusnindex.ldif | 7 + .../src/scripts/50faxnumbersyntaxplugin.ldif | 14 + ldap/admin/src/scripts/50faxsyntaxplugin.ldif | 14 + ldap/admin/src/scripts/50fixNsState.pl | 241 + .../src/scripts/50guidesyntaxplugin.ldif | 14 + .../src/scripts/50linkedattrsplugin.ldif | 16 + .../src/scripts/50managedentriesplugin.ldif | 16 + ldap/admin/src/scripts/50memberofindex.ldif | 6 + ldap/admin/src/scripts/50memberofplugin.ldif | 17 + .../src/scripts/50nameuidsyntaxplugin.ldif | 14 + ldap/admin/src/scripts/50nstombstonecsn.ldif | 7 + .../scripts/50numericstringsyntaxplugin.ldif | 14 + .../50printablestringsyntaxplugin.ldif | 14 + .../admin/src/scripts/50refintprecedence.ldif | 4 + .../src/scripts/50retroclprecedence.ldif | 4 + .../scripts/50rootdnaccesscontrolplugin.ldif | 15 + .../src/scripts/50schemareloadplugin.ldif | 14 + .../src/scripts/50smd5pwdstorageplugin.ldif | 13 + .../src/scripts/50syntaxvalidplugin.ldif | 14 + ldap/admin/src/scripts/50targetuniqueid.ldif | 7 + .../50teletexterminalidsyntaxplugin.ldif | 14 + .../scripts/50telexnumbersyntaxplugin.ldif | 14 + ldap/admin/src/scripts/50updateconfig.ldif | 10 + ldap/admin/src/scripts/50usnplugin.ldif | 15 + ldap/admin/src/scripts/52updateAESplugin.pl | 87 + .../scripts/60removeLegacyReplication.ldif | 2 + .../admin/src/scripts/60upgradeconfigfiles.pl | 69 + .../admin/src/scripts/60upgradeschemafiles.pl | 189 + ldap/admin/src/scripts/70upgradefromldif.pl | 108 + .../admin/src/scripts/80upgradednformat.pl.in | 307 + ldap/admin/src/scripts/81changelog.pl | 34 + .../src/scripts/82targetuniqueidindex.pl | 52 + ldap/admin/src/scripts/90subtreerename.pl | 57 + ldap/admin/src/scripts/91reindex.pl.in | 103 + ldap/admin/src/scripts/91subtreereindex.pl | 152 + ldap/admin/src/scripts/DSCreate.pm.in | 1551 ++ ldap/admin/src/scripts/DSDialogs.pm | 233 + ldap/admin/src/scripts/DSMigration.pm.in | 1175 + ldap/admin/src/scripts/DSSharedLib.in | 179 + ldap/admin/src/scripts/DSUpdate.pm.in | 534 + ldap/admin/src/scripts/DSUpdateDialogs.pm | 152 + ldap/admin/src/scripts/DSUtil.pm.in | 1665 ++ ldap/admin/src/scripts/Dialog.pm | 249 + ldap/admin/src/scripts/DialogManager.pm.in | 212 + ldap/admin/src/scripts/FileConn.pm | 461 + ldap/admin/src/scripts/Inf.pm | 268 + ldap/admin/src/scripts/Migration.pm.in | 327 + ldap/admin/src/scripts/Resource.pm | 137 + ldap/admin/src/scripts/Setup.pm.in | 240 + ldap/admin/src/scripts/SetupDialogs.pm.in | 221 + ldap/admin/src/scripts/SetupLog.pm | 82 + ldap/admin/src/scripts/bak2db.in | 87 + ldap/admin/src/scripts/bak2db.pl.in | 108 + ldap/admin/src/scripts/cl-dump.pl | 323 + ldap/admin/src/scripts/cleanallruv.pl.in | 118 + ldap/admin/src/scripts/db2bak.in | 84 + ldap/admin/src/scripts/db2bak.pl.in | 148 + ldap/admin/src/scripts/db2index.in | 103 + ldap/admin/src/scripts/db2index.pl.in | 178 + ldap/admin/src/scripts/db2ldif.in | 171 + ldap/admin/src/scripts/db2ldif.pl.in | 259 + ldap/admin/src/scripts/dbmon.sh.in | 271 + ldap/admin/src/scripts/dbverify.in | 79 + ldap/admin/src/scripts/dn2rdn.in | 65 + ldap/admin/src/scripts/dnaplugindepends.ldif | 7 + ldap/admin/src/scripts/ds-logpipe.py | 417 + ldap/admin/src/scripts/ds-replcheck | 1616 ++ ldap/admin/src/scripts/ds_selinux_enabled.in | 25 + .../src/scripts/ds_selinux_port_query.in | 61 + ldap/admin/src/scripts/dscreate.map.in | 40 + ldap/admin/src/scripts/dsorgentries.map.in | 22 + ldap/admin/src/scripts/dsupdate.map.in | 36 + ldap/admin/src/scripts/exampleupdate.ldif | 11 + ldap/admin/src/scripts/exampleupdate.pl | 27 + ldap/admin/src/scripts/exampleupdate.sh | 34 + ldap/admin/src/scripts/failedbinds.py | 175 + .../admin/src/scripts/fixup-linkedattrs.pl.in | 104 + ldap/admin/src/scripts/fixup-memberof.pl.in | 117 + ldap/admin/src/scripts/ldif2db.in | 120 + ldap/admin/src/scripts/ldif2db.pl.in | 209 + ldap/admin/src/scripts/ldif2ldap.in | 180 + ldap/admin/src/scripts/logregex.py | 28 + ldap/admin/src/scripts/migrate-ds.pl.in | 52 + ldap/admin/src/scripts/migrate-ds.res | 35 + ldap/admin/src/scripts/monitor.in | 177 + ldap/admin/src/scripts/ns-accountstatus.pl.in | 1045 + ldap/admin/src/scripts/ns-activate.pl.in | 749 + ldap/admin/src/scripts/ns-inactivate.pl.in | 625 + ldap/admin/src/scripts/ns-newpwpolicy.pl.in | 188 + ldap/admin/src/scripts/ns-slapd-gdb.py | 171 + ldap/admin/src/scripts/readnsstate.in | 100 + ldap/admin/src/scripts/remove-ds.pl.in | 79 + ldap/admin/src/scripts/repl-monitor.pl.in | 1343 ++ ldap/admin/src/scripts/restart-dirsrv.in | 59 + ldap/admin/src/scripts/restoreconfig.in | 59 + ldap/admin/src/scripts/saveconfig.in | 61 + ldap/admin/src/scripts/schema-reload.pl.in | 103 + ldap/admin/src/scripts/setup-ds.pl.in | 94 + ldap/admin/src/scripts/setup-ds.res.in | 205 + ldap/admin/src/scripts/start-dirsrv.in | 124 + ldap/admin/src/scripts/status-dirsrv.in | 95 + ldap/admin/src/scripts/stop-dirsrv.in | 107 + ldap/admin/src/scripts/suffix2instance.in | 66 + ldap/admin/src/scripts/syntax-validate.pl.in | 115 + ldap/admin/src/scripts/template-bak2db.in | 4 + ldap/admin/src/scripts/template-bak2db.pl.in | 28 + .../src/scripts/template-cleanallruv.pl.in | 28 + ldap/admin/src/scripts/template-db2bak.in | 4 + ldap/admin/src/scripts/template-db2bak.pl.in | 29 + ldap/admin/src/scripts/template-db2index.in | 4 + .../admin/src/scripts/template-db2index.pl.in | 28 + ldap/admin/src/scripts/template-db2ldif.in | 5 + ldap/admin/src/scripts/template-db2ldif.pl.in | 31 + ldap/admin/src/scripts/template-dbverify.in | 4 + ldap/admin/src/scripts/template-dn2rdn.in | 4 + .../scripts/template-fixup-linkedattrs.pl.in | 28 + .../src/scripts/template-fixup-memberof.pl.in | 28 + .../scripts/template-fixup-memberuid.pl.in | 154 + ldap/admin/src/scripts/template-ldif2db.in | 4 + ldap/admin/src/scripts/template-ldif2db.pl.in | 28 + ldap/admin/src/scripts/template-ldif2ldap.in | 4 + ldap/admin/src/scripts/template-monitor.in | 4 + .../scripts/template-ns-accountstatus.pl.in | 28 + .../src/scripts/template-ns-activate.pl.in | 28 + .../src/scripts/template-ns-inactivate.pl.in | 28 + .../src/scripts/template-ns-newpwpolicy.pl.in | 28 + .../src/scripts/template-restart-slapd.in | 3 + .../src/scripts/template-restoreconfig.in | 4 + ldap/admin/src/scripts/template-saveconfig.in | 4 + .../src/scripts/template-schema-reload.pl.in | 28 + .../admin/src/scripts/template-start-slapd.in | 4 + ldap/admin/src/scripts/template-stop-slapd.in | 3 + .../src/scripts/template-suffix2instance.in | 4 + .../scripts/template-syntax-validate.pl.in | 28 + ldap/admin/src/scripts/template-upgradedb.in | 4 + .../src/scripts/template-upgradednformat.in | 4 + .../template-usn-tombstone-cleanup.pl.in | 27 + .../src/scripts/template-verify-db.pl.in | 27 + ldap/admin/src/scripts/template-vlvindex.in | 4 + ldap/admin/src/scripts/upgradedb.in | 66 + ldap/admin/src/scripts/upgradednformat.in | 79 + .../src/scripts/usn-tombstone-cleanup.pl.in | 119 + ldap/admin/src/scripts/verify-db.pl.in | 240 + ldap/admin/src/scripts/vlvindex.in | 73 + ldap/admin/src/slapd.inf.in | 37 + ldap/admin/src/template-initconfig.in | 22 + ldap/admin/src/upgradeServer | 542 + ldap/include/avl.h | 85 + ldap/include/dblayer.h | 15 + ldap/include/disptmpl.h | 328 + ldap/include/ldaprot.h | 317 + ldap/include/portable.h | 336 + ldap/include/regex.h | 71 + ldap/include/srchpref.h | 121 + ldap/include/sysexits-compat.h | 115 + ldap/ldif/Ace.ldif | 2607 +++ ldap/ldif/European.ldif | 7592 +++++++ ldap/ldif/Eurosuffix.ldif | 15 + ldap/ldif/Example-roles.ldif | 2998 +++ ldap/ldif/Example-views.ldif | 3170 +++ ldap/ldif/Example.ldif | 2984 +++ ldap/ldif/template-baseacis.ldif.in | 5 + ldap/ldif/template-country.ldif.in | 4 + ldap/ldif/template-domain.ldif.in | 4 + ldap/ldif/template-dse-minimal.ldif.in | 558 + ldap/ldif/template-dse.ldif.in | 1186 + ldap/ldif/template-ldapi-autobind.ldif.in | 22 + ldap/ldif/template-ldapi-default.ldif.in | 8 + ldap/ldif/template-ldapi.ldif.in | 7 + ldap/ldif/template-locality.ldif.in | 4 + ldap/ldif/template-org.ldif.in | 4 + ldap/ldif/template-orgunit.ldif.in | 4 + ldap/ldif/template-sasl.ldif.in | 16 + ldap/ldif/template-state.ldif.in | 4 + ldap/ldif/template-suffix-db.ldif.in | 29 + ldap/ldif/template.ldif | 96 + ldap/libraries/libavl/avl.c | 779 + ldap/libraries/libavl/testavl.c | 127 + ldap/schema/00core.ldif | 886 + ldap/schema/01core389.ldif | 336 + ldap/schema/02common.ldif | 181 + ldap/schema/05rfc2927.ldif | 22 + ldap/schema/05rfc4523.ldif | 165 + ldap/schema/05rfc4524.ldif | 296 + ldap/schema/06inetorgperson.ldif | 135 + ldap/schema/10automember-plugin.ldif | 94 + ldap/schema/10dna-plugin.ldif | 219 + ldap/schema/10mep-plugin.ldif | 112 + ldap/schema/10presence.ldif | 28 + ldap/schema/10rfc2307.ldif | 55 + ldap/schema/10rfc2307bis.ldif | 348 + ldap/schema/20subscriber.ldif | 35 + ldap/schema/25java-object.ldif | 27 + ldap/schema/28pilot.ldif | 29 + ldap/schema/30ns-common.ldif | 77 + ldap/schema/50ns-admin.ldif | 45 + ldap/schema/50ns-certificate.ldif | 17 + ldap/schema/50ns-directory.ldif | 92 + ldap/schema/50ns-mail.ldif | 51 + ldap/schema/50ns-value.ldif | 27 + ldap/schema/50ns-web.ldif | 17 + ldap/schema/60acctpolicy.ldif | 47 + ldap/schema/60autofs.ldif | 44 + ldap/schema/60changelog.ldif | 105 + ldap/schema/60eduperson.ldif | 169 + ldap/schema/60inetmail.ldif | 104 + ldap/schema/60kerberos.ldif | 283 + ldap/schema/60krb5kdc.ldif | 159 + ldap/schema/60mozilla.ldif | 233 + ldap/schema/60nis.ldif | 146 + ldap/schema/60nss-ldap.ldif | 29 + ldap/schema/60pam-plugin.ldif | 23 + ldap/schema/60posix-winsync-plugin.ldif | 15 + ldap/schema/60pureftpd.ldif | 129 + ldap/schema/60qmail.ldif | 442 + ldap/schema/60radius.ldif | 559 + ldap/schema/60rfc2739.ldif | 123 + ldap/schema/60rfc3712.ldif | 486 + ldap/schema/60rfc4876.ldif | 198 + ldap/schema/60sabayon.ldif | 66 + ldap/schema/60samba.ldif | 205 + ldap/schema/60samba3.ldif | 566 + ldap/schema/60sendmail.ldif | 54 + ldap/schema/60sudo.ldif | 121 + ldap/schema/60trust.ldif | 47 + ldap/schema/99user.ldif | 14 + ldap/schema/slapd-collations.conf | 263 + .../plugins/acct_usability/acct_usability.c | 428 + .../plugins/acct_usability/acct_usability.h | 35 + ldap/servers/plugins/acctpolicy/acct_config.c | 177 + ldap/servers/plugins/acctpolicy/acct_init.c | 298 + ldap/servers/plugins/acctpolicy/acct_plugin.c | 498 + ldap/servers/plugins/acctpolicy/acct_util.c | 287 + ldap/servers/plugins/acctpolicy/acctpolicy.h | 98 + .../plugins/acctpolicy/sampleconfig.ldif | 40 + .../plugins/acctpolicy/samplepolicy.ldif | 27 + ldap/servers/plugins/acl/ACL-Notes | 218 + ldap/servers/plugins/acl/acl.c | 4353 ++++ ldap/servers/plugins/acl/acl.h | 865 + ldap/servers/plugins/acl/acl_ext.c | 1111 + ldap/servers/plugins/acl/aclanom.c | 575 + ldap/servers/plugins/acl/acleffectiverights.c | 1046 + ldap/servers/plugins/acl/aclgroup.c | 472 + ldap/servers/plugins/acl/aclinit.c | 530 + ldap/servers/plugins/acl/acllas.c | 4525 ++++ ldap/servers/plugins/acl/acllist.c | 1004 + ldap/servers/plugins/acl/aclparse.c | 2327 ++ ldap/servers/plugins/acl/aclplugin.c | 386 + ldap/servers/plugins/acl/aclutil.c | 1509 ++ ldap/servers/plugins/addn/addn.c | 504 + ldap/servers/plugins/addn/addn.h | 26 + ldap/servers/plugins/automember/automember.c | 2971 +++ ldap/servers/plugins/automember/automember.h | 113 + ldap/servers/plugins/bitwise/bitwise.c | 195 + ldap/servers/plugins/chainingdb/cb.h | 478 + ldap/servers/plugins/chainingdb/cb_abandon.c | 60 + ldap/servers/plugins/chainingdb/cb_acl.c | 65 + ldap/servers/plugins/chainingdb/cb_add.c | 271 + ldap/servers/plugins/chainingdb/cb_bind.c | 313 + ldap/servers/plugins/chainingdb/cb_cleanup.c | 30 + ldap/servers/plugins/chainingdb/cb_close.c | 105 + ldap/servers/plugins/chainingdb/cb_compare.c | 250 + ldap/servers/plugins/chainingdb/cb_config.c | 642 + .../plugins/chainingdb/cb_conn_stateless.c | 972 + ldap/servers/plugins/chainingdb/cb_controls.c | 303 + ldap/servers/plugins/chainingdb/cb_debug.c | 21 + ldap/servers/plugins/chainingdb/cb_delete.c | 241 + ldap/servers/plugins/chainingdb/cb_init.c | 130 + ldap/servers/plugins/chainingdb/cb_instance.c | 2101 ++ ldap/servers/plugins/chainingdb/cb_modify.c | 280 + ldap/servers/plugins/chainingdb/cb_modrdn.c | 258 + ldap/servers/plugins/chainingdb/cb_monitor.c | 245 + ldap/servers/plugins/chainingdb/cb_schema.c | 55 + ldap/servers/plugins/chainingdb/cb_search.c | 765 + ldap/servers/plugins/chainingdb/cb_size.c | 29 + ldap/servers/plugins/chainingdb/cb_start.c | 50 + ldap/servers/plugins/chainingdb/cb_temp.c | 25 + ldap/servers/plugins/chainingdb/cb_test.c | 82 + ldap/servers/plugins/chainingdb/cb_unbind.c | 32 + ldap/servers/plugins/chainingdb/cb_utils.c | 383 + ldap/servers/plugins/collation/collate.c | 509 + ldap/servers/plugins/collation/collate.h | 44 + ldap/servers/plugins/collation/config.c | 185 + ldap/servers/plugins/collation/config.h | 20 + ldap/servers/plugins/collation/debug.c | 12 + ldap/servers/plugins/collation/orfilter.c | 1058 + ldap/servers/plugins/collation/orfilter.h | 18 + ldap/servers/plugins/cos/cos.c | 275 + ldap/servers/plugins/cos/cos_cache.c | 3483 +++ ldap/servers/plugins/cos/cos_cache.h | 26 + ldap/servers/plugins/deref/deref.c | 783 + ldap/servers/plugins/deref/deref.h | 30 + ldap/servers/plugins/distrib/Makefile | 60 + ldap/servers/plugins/distrib/Makefile.HPUX | 30 + ldap/servers/plugins/distrib/Makefile.HPUX64 | 30 + ldap/servers/plugins/distrib/Makefile.Linux | 33 + ldap/servers/plugins/distrib/Makefile.SOLARIS | 33 + .../plugins/distrib/Makefile.SOLARIS64 | 33 + .../plugins/distrib/Makefile.SOLARISx86 | 33 + ldap/servers/plugins/distrib/README | 23 + ldap/servers/plugins/distrib/distrib.c | 223 + ldap/servers/plugins/dna/addentries.sh | 2 + ldap/servers/plugins/dna/config.sh | 5 + ldap/servers/plugins/dna/del_test_entries.dns | 6 + ldap/servers/plugins/dna/delentries.sh | 2 + ldap/servers/plugins/dna/dna.c | 4768 ++++ ldap/servers/plugins/dna/editentries.sh | 2 + ldap/servers/plugins/dna/oneentry.sh | 2 + ldap/servers/plugins/dna/posix.ldif | 38 + ldap/servers/plugins/dna/posix_one.ldif | 10 + ldap/servers/plugins/dna/posix_test.ldif | 58 + ldap/servers/plugins/dna/seeconfig.sh | 2 + ldap/servers/plugins/dna/seeentries.sh | 2 + ldap/servers/plugins/dna/subtest.ldif | 31 + ldap/servers/plugins/http/http_client.c | 243 + ldap/servers/plugins/http/http_client.h | 72 + ldap/servers/plugins/http/http_impl.c | 1378 ++ ldap/servers/plugins/http/http_impl.h | 33 + ldap/servers/plugins/linkedattrs/fixup_task.c | 460 + .../plugins/linkedattrs/linked_attrs.c | 2185 ++ .../plugins/linkedattrs/linked_attrs.h | 114 + ldap/servers/plugins/memberof/memberof.c | 3325 +++ ldap/servers/plugins/memberof/memberof.h | 108 + .../plugins/memberof/memberof_config.c | 1128 + ldap/servers/plugins/mep/mep.c | 2912 +++ ldap/servers/plugins/mep/mep.h | 102 + ldap/servers/plugins/pam_passthru/README | 208 + .../plugins/pam_passthru/pam_passthru.h | 139 + .../plugins/pam_passthru/pam_ptconfig.c | 850 + .../plugins/pam_passthru/pam_ptdebug.c | 21 + .../servers/plugins/pam_passthru/pam_ptimpl.c | 463 + .../plugins/pam_passthru/pam_ptpreop.c | 713 + ldap/servers/plugins/passthru/PT-Notes | 33 + ldap/servers/plugins/passthru/passthru.h | 141 + ldap/servers/plugins/passthru/ptbind.c | 142 + ldap/servers/plugins/passthru/ptconfig.c | 377 + ldap/servers/plugins/passthru/ptconn.c | 456 + ldap/servers/plugins/passthru/ptdebug.c | 22 + ldap/servers/plugins/passthru/ptpreop.c | 264 + ldap/servers/plugins/passthru/ptutil.c | 89 + ldap/servers/plugins/posix-winsync/README | 50 + .../plugins/posix-winsync/posix-group-func.c | 1030 + .../plugins/posix-winsync/posix-group-func.h | 27 + .../plugins/posix-winsync/posix-group-task.c | 510 + .../posix-winsync/posix-winsync-config.c | 304 + .../plugins/posix-winsync/posix-winsync.c | 2222 ++ .../plugins/posix-winsync/posix-wsp-ident.h | 58 + .../plugins/presence/images/aim-offline.gif | Bin 0 -> 113 bytes .../plugins/presence/images/aim-online.gif | Bin 0 -> 895 bytes .../plugins/presence/images/icq-disabled.gif | Bin 0 -> 138 bytes .../plugins/presence/images/icq-offline.gif | Bin 0 -> 198 bytes .../plugins/presence/images/icq-online.gif | Bin 0 -> 198 bytes .../plugins/presence/images/yahoo-offline.gif | Bin 0 -> 84 bytes .../plugins/presence/images/yahoo-online.gif | Bin 0 -> 140 bytes ldap/servers/plugins/presence/presence.c | 1144 + ldap/servers/plugins/presence/presence.ldif | 52 + ldap/servers/plugins/pwdstorage/clear_pwd.c | 76 + ldap/servers/plugins/pwdstorage/crypt_pwd.c | 125 + ldap/servers/plugins/pwdstorage/md5.h | 71 + ldap/servers/plugins/pwdstorage/md5_pwd.c | 105 + ldap/servers/plugins/pwdstorage/md5c.c | 342 + .../plugins/pwdstorage/ns-mta-md5_pwd.bu | 408 + .../plugins/pwdstorage/ns-mta-md5_pwd.c | 87 + ldap/servers/plugins/pwdstorage/pbkdf2_pwd.c | 328 + ldap/servers/plugins/pwdstorage/pwd_init.c | 430 + ldap/servers/plugins/pwdstorage/pwd_util.c | 43 + ldap/servers/plugins/pwdstorage/pwdstorage.h | 99 + ldap/servers/plugins/pwdstorage/sha_pwd.c | 244 + ldap/servers/plugins/pwdstorage/smd5_pwd.c | 140 + ldap/servers/plugins/pwdstorage/ssha_pwd.c | 172 + ldap/servers/plugins/referint/referint.c | 1760 ++ ldap/servers/plugins/replication/cl5.h | 51 + ldap/servers/plugins/replication/cl5_api.c | 6301 ++++++ ldap/servers/plugins/replication/cl5_api.h | 409 + .../servers/plugins/replication/cl5_clcache.c | 1163 + .../servers/plugins/replication/cl5_clcache.h | 31 + ldap/servers/plugins/replication/cl5_config.c | 788 + ldap/servers/plugins/replication/cl5_init.c | 85 + ldap/servers/plugins/replication/cl5_test.c | 824 + ldap/servers/plugins/replication/cl5_test.h | 27 + ldap/servers/plugins/replication/cl_crypt.c | 176 + ldap/servers/plugins/replication/cl_crypt.h | 24 + ldap/servers/plugins/replication/csnpl.c | 447 + ldap/servers/plugins/replication/csnpl.h | 36 + ldap/servers/plugins/replication/llist.c | 337 + ldap/servers/plugins/replication/llist.h | 33 + ldap/servers/plugins/replication/profile.c | 50 + .../plugins/replication/repl-session-plugin.h | 82 + ldap/servers/plugins/replication/repl5.h | 899 + ldap/servers/plugins/replication/repl5_agmt.c | 3315 +++ .../plugins/replication/repl5_agmtlist.c | 819 + .../plugins/replication/repl5_backoff.c | 231 + .../plugins/replication/repl5_connection.c | 2008 ++ .../plugins/replication/repl5_inc_protocol.c | 2384 ++ ldap/servers/plugins/replication/repl5_init.c | 983 + .../plugins/replication/repl5_mtnode_ext.c | 208 + .../plugins/replication/repl5_plugins.c | 1468 ++ .../plugins/replication/repl5_prot_private.h | 88 + .../plugins/replication/repl5_protocol.c | 522 + .../plugins/replication/repl5_protocol_util.c | 728 + .../plugins/replication/repl5_replica.c | 4142 ++++ .../replication/repl5_replica_config.c | 3654 +++ .../replication/repl5_replica_dnhash.c | 194 + .../plugins/replication/repl5_replica_hash.c | 226 + .../plugins/replication/repl5_replsupplier.c | 160 + ldap/servers/plugins/replication/repl5_ruv.c | 2447 ++ ldap/servers/plugins/replication/repl5_ruv.h | 130 + .../plugins/replication/repl5_schedule.c | 698 + .../plugins/replication/repl5_tot_protocol.c | 927 + .../servers/plugins/replication/repl5_total.c | 815 + .../plugins/replication/repl5_updatedn_list.c | 374 + .../plugins/replication/repl_connext.c | 237 + .../plugins/replication/repl_controls.c | 302 + ldap/servers/plugins/replication/repl_ext.c | 119 + ldap/servers/plugins/replication/repl_extop.c | 1755 ++ .../plugins/replication/repl_globals.c | 149 + .../servers/plugins/replication/repl_helper.c | 92 + .../servers/plugins/replication/repl_helper.h | 77 + ldap/servers/plugins/replication/repl_opext.c | 88 + .../plugins/replication/repl_session_plugin.c | 164 + .../servers/plugins/replication/repl_shared.h | 123 + ldap/servers/plugins/replication/replutil.c | 1086 + .../replication/test_repl_session_plugin.c | 301 + .../plugins/replication/tests/dnp_sim.c | 1002 + .../plugins/replication/tests/dnp_sim2.c | 940 + .../plugins/replication/tests/dnp_sim3.c | 1399 ++ .../servers/plugins/replication/tests/makesim | 53 + ldap/servers/plugins/replication/urp.c | 2233 ++ ldap/servers/plugins/replication/urp.h | 61 + ldap/servers/plugins/replication/urp_glue.c | 241 + .../plugins/replication/urp_tombstone.c | 458 + .../plugins/replication/windows_connection.c | 1870 ++ .../replication/windows_inc_protocol.c | 1623 ++ .../plugins/replication/windows_private.c | 2784 +++ .../replication/windows_prot_private.h | 53 + .../replication/windows_protocol_util.c | 5802 +++++ .../replication/windows_tot_protocol.c | 441 + .../servers/plugins/replication/windowsrepl.h | 233 + .../plugins/replication/winsync-plugin.h | 252 + ldap/servers/plugins/retrocl/linktest.c | 25 + ldap/servers/plugins/retrocl/retrocl.c | 690 + ldap/servers/plugins/retrocl/retrocl.h | 148 + ldap/servers/plugins/retrocl/retrocl.txt | 110 + ldap/servers/plugins/retrocl/retrocl_cn.c | 423 + ldap/servers/plugins/retrocl/retrocl_create.c | 341 + ldap/servers/plugins/retrocl/retrocl_po.c | 694 + .../servers/plugins/retrocl/retrocl_rootdse.c | 72 + ldap/servers/plugins/retrocl/retrocl_trim.c | 496 + ldap/servers/plugins/rever/pbe.c | 568 + ldap/servers/plugins/rever/rever.c | 148 + ldap/servers/plugins/rever/rever.h | 43 + ldap/servers/plugins/roles/roles_cache.c | 2124 ++ ldap/servers/plugins/roles/roles_cache.h | 63 + ldap/servers/plugins/roles/roles_plugin.c | 357 + .../plugins/rootdn_access/rootdn_access.c | 739 + .../plugins/rootdn_access/rootdn_access.h | 27 + .../plugins/schema_reload/schema_reload.c | 283 + .../servers/plugins/statechange/statechange.c | 490 + ldap/servers/plugins/sync/sync.h | 182 + ldap/servers/plugins/sync/sync_init.c | 148 + ldap/servers/plugins/sync/sync_persist.c | 697 + ldap/servers/plugins/sync/sync_refresh.c | 743 + ldap/servers/plugins/sync/sync_util.c | 703 + ldap/servers/plugins/syntaxes/bin.c | 370 + ldap/servers/plugins/syntaxes/bitstring.c | 235 + ldap/servers/plugins/syntaxes/ces.c | 445 + ldap/servers/plugins/syntaxes/cis.c | 1295 ++ ldap/servers/plugins/syntaxes/debug.c | 19 + .../servers/plugins/syntaxes/deliverymethod.c | 304 + ldap/servers/plugins/syntaxes/dn.c | 203 + ldap/servers/plugins/syntaxes/facsimile.c | 310 + ldap/servers/plugins/syntaxes/guide.c | 717 + ldap/servers/plugins/syntaxes/int.c | 285 + ldap/servers/plugins/syntaxes/nameoptuid.c | 279 + ldap/servers/plugins/syntaxes/numericstring.c | 285 + ldap/servers/plugins/syntaxes/phonetic.c | 649 + ldap/servers/plugins/syntaxes/sicis.c | 167 + ldap/servers/plugins/syntaxes/string.c | 955 + ldap/servers/plugins/syntaxes/syntax.h | 187 + ldap/servers/plugins/syntaxes/syntax_common.c | 88 + ldap/servers/plugins/syntaxes/tel.c | 294 + ldap/servers/plugins/syntaxes/teletex.c | 328 + ldap/servers/plugins/syntaxes/telex.c | 243 + ldap/servers/plugins/syntaxes/validate.c | 568 + ldap/servers/plugins/syntaxes/validate_task.c | 273 + ldap/servers/plugins/syntaxes/value.c | 390 + ldap/servers/plugins/uiduniq/7bit.c | 852 + ldap/servers/plugins/uiduniq/UID-Notes | 96 + ldap/servers/plugins/uiduniq/plugin-utils.h | 65 + ldap/servers/plugins/uiduniq/uid.c | 1650 ++ ldap/servers/plugins/uiduniq/utils.c | 224 + ldap/servers/plugins/usn/usn.c | 732 + ldap/servers/plugins/usn/usn.h | 28 + ldap/servers/plugins/usn/usn_cleanup.c | 371 + .../plugins/vattrsp_template/vattrsp.c | 384 + ldap/servers/plugins/views/views.c | 1801 ++ ldap/servers/plugins/whoami/whoami.c | 113 + ldap/servers/slapd/abandon.c | 160 + ldap/servers/slapd/add.c | 1021 + ldap/servers/slapd/agtmmap.c | 314 + ldap/servers/slapd/agtmmap.h | 193 + ldap/servers/slapd/apibroker.c | 298 + ldap/servers/slapd/attr.c | 1016 + ldap/servers/slapd/attrlist.c | 327 + ldap/servers/slapd/attrsyntax.c | 1740 ++ ldap/servers/slapd/auditlog.c | 386 + ldap/servers/slapd/auth.c | 532 + ldap/servers/slapd/auth.h | 23 + ldap/servers/slapd/ava.c | 69 + ldap/servers/slapd/back-ldbm/ancestorid.c | 399 + ldap/servers/slapd/back-ldbm/archive.c | 499 + ldap/servers/slapd/back-ldbm/attrcrypt.h | 41 + ldap/servers/slapd/back-ldbm/back-ldbm.h | 874 + ldap/servers/slapd/back-ldbm/backentry.c | 134 + ldap/servers/slapd/back-ldbm/cache.c | 2224 ++ ldap/servers/slapd/back-ldbm/cleanup.c | 75 + ldap/servers/slapd/back-ldbm/close.c | 52 + .../slapd/back-ldbm/db-bdb/bdb_config.c | 2217 ++ .../slapd/back-ldbm/db-bdb/bdb_import.c | 3423 +++ .../back-ldbm/db-bdb/bdb_import_threads.c | 3988 ++++ .../back-ldbm/db-bdb/bdb_instance_config.c | 293 + .../slapd/back-ldbm/db-bdb/bdb_layer.c | 6086 +++++ .../slapd/back-ldbm/db-bdb/bdb_layer.h | 164 + .../slapd/back-ldbm/db-bdb/bdb_ldif2db.c | 3314 +++ .../servers/slapd/back-ldbm/db-bdb/bdb_misc.c | 394 + .../slapd/back-ldbm/db-bdb/bdb_monitor.c | 310 + .../slapd/back-ldbm/db-bdb/bdb_upgrade.c | 411 + .../slapd/back-ldbm/db-bdb/bdb_verify.c | 233 + .../slapd/back-ldbm/db-bdb/bdb_version.c | 199 + ldap/servers/slapd/back-ldbm/dblayer.c | 1385 ++ ldap/servers/slapd/back-ldbm/dblayer.h | 173 + ldap/servers/slapd/back-ldbm/dbsize.c | 33 + ldap/servers/slapd/back-ldbm/dbverify.c | 27 + ldap/servers/slapd/back-ldbm/dn2entry.c | 269 + ldap/servers/slapd/back-ldbm/entrystore.c | 20 + ldap/servers/slapd/back-ldbm/filterindex.c | 1084 + ldap/servers/slapd/back-ldbm/findentry.c | 390 + ldap/servers/slapd/back-ldbm/haschildren.c | 14 + ldap/servers/slapd/back-ldbm/id2entry.c | 463 + ldap/servers/slapd/back-ldbm/idl.c | 1597 ++ ldap/servers/slapd/back-ldbm/idl_common.c | 550 + ldap/servers/slapd/back-ldbm/idl_new.c | 1026 + ldap/servers/slapd/back-ldbm/idl_set.c | 549 + ldap/servers/slapd/back-ldbm/idl_shim.c | 148 + ldap/servers/slapd/back-ldbm/import.c | 123 + ldap/servers/slapd/back-ldbm/import.h | 227 + ldap/servers/slapd/back-ldbm/index.c | 2600 +++ ldap/servers/slapd/back-ldbm/init.c | 194 + ldap/servers/slapd/back-ldbm/instance.c | 433 + ldap/servers/slapd/back-ldbm/ldbm_abandon.c | 23 + ldap/servers/slapd/back-ldbm/ldbm_add.c | 1504 ++ ldap/servers/slapd/back-ldbm/ldbm_attr.c | 1247 ++ ldap/servers/slapd/back-ldbm/ldbm_attrcrypt.c | 1650 ++ .../slapd/back-ldbm/ldbm_attrcrypt_config.c | 312 + ldap/servers/slapd/back-ldbm/ldbm_bind.c | 113 + ldap/servers/slapd/back-ldbm/ldbm_compare.c | 104 + ldap/servers/slapd/back-ldbm/ldbm_config.c | 1757 ++ ldap/servers/slapd/back-ldbm/ldbm_config.h | 167 + ldap/servers/slapd/back-ldbm/ldbm_delete.c | 1546 ++ ldap/servers/slapd/back-ldbm/ldbm_entryrdn.c | 3518 +++ .../slapd/back-ldbm/ldbm_index_config.c | 457 + .../slapd/back-ldbm/ldbm_instance_config.c | 1241 + ldap/servers/slapd/back-ldbm/ldbm_modify.c | 1102 + ldap/servers/slapd/back-ldbm/ldbm_modrdn.c | 2204 ++ ldap/servers/slapd/back-ldbm/ldbm_search.c | 1919 ++ ldap/servers/slapd/back-ldbm/ldbm_unbind.c | 22 + ldap/servers/slapd/back-ldbm/ldbm_usn.c | 208 + ldap/servers/slapd/back-ldbm/ldif2ldbm.c | 367 + ldap/servers/slapd/back-ldbm/matchrule.c | 153 + ldap/servers/slapd/back-ldbm/misc.c | 628 + ldap/servers/slapd/back-ldbm/nextid.c | 221 + ldap/servers/slapd/back-ldbm/parents.c | 192 + ldap/servers/slapd/back-ldbm/perfctrs.c | 290 + ldap/servers/slapd/back-ldbm/perfctrs.h | 60 + .../servers/slapd/back-ldbm/proto-back-ldbm.h | 673 + ldap/servers/slapd/back-ldbm/rmdb.c | 63 + ldap/servers/slapd/back-ldbm/seq.c | 291 + ldap/servers/slapd/back-ldbm/sort.c | 872 + ldap/servers/slapd/back-ldbm/start.c | 189 + .../back-ldbm/tools/index_dump/index_dump.c | 212 + ldap/servers/slapd/back-ldbm/uniqueid2entry.c | 80 + ldap/servers/slapd/back-ldbm/vlv.c | 2097 ++ ldap/servers/slapd/back-ldbm/vlv_key.c | 72 + ldap/servers/slapd/back-ldbm/vlv_key.h | 30 + ldap/servers/slapd/back-ldbm/vlv_srch.c | 861 + ldap/servers/slapd/back-ldbm/vlv_srch.h | 149 + ldap/servers/slapd/backend.c | 679 + ldap/servers/slapd/backend_manager.c | 467 + ldap/servers/slapd/bind.c | 932 + ldap/servers/slapd/bitset.c | 55 + ldap/servers/slapd/bulk_import.c | 145 + ldap/servers/slapd/ch_malloc.c | 351 + ldap/servers/slapd/charray.c | 520 + ldap/servers/slapd/compare.c | 195 + ldap/servers/slapd/computed.c | 325 + ldap/servers/slapd/config.c | 565 + ldap/servers/slapd/configdse.c | 651 + ldap/servers/slapd/connection.c | 2358 ++ ldap/servers/slapd/conntable.c | 712 + ldap/servers/slapd/control.c | 683 + ldap/servers/slapd/counters.c | 150 + ldap/servers/slapd/csn.c | 387 + ldap/servers/slapd/csngen.c | 818 + ldap/servers/slapd/csngen.h | 34 + ldap/servers/slapd/csnset.c | 319 + ldap/servers/slapd/daemon.c | 2786 +++ ldap/servers/slapd/defbackend.c | 206 + ldap/servers/slapd/delete.c | 387 + ldap/servers/slapd/detach.c | 306 + ldap/servers/slapd/disconnect_error_strings.h | 37 + ldap/servers/slapd/disconnect_errors.h | 38 + ldap/servers/slapd/dl.c | 232 + ldap/servers/slapd/dn.c | 3362 +++ ldap/servers/slapd/dse.c | 2844 +++ ldap/servers/slapd/dynalib.c | 143 + ldap/servers/slapd/entry.c | 4214 ++++ ldap/servers/slapd/entrywsi.c | 1536 ++ ldap/servers/slapd/errormap.c | 186 + ldap/servers/slapd/eventq.c | 479 + ldap/servers/slapd/extendop.c | 544 + ldap/servers/slapd/factory.c | 451 + ldap/servers/slapd/fe.h | 167 + ldap/servers/slapd/features.c | 53 + ldap/servers/slapd/fedse.c | 3000 +++ ldap/servers/slapd/fileio.c | 84 + ldap/servers/slapd/filter.c | 1618 ++ ldap/servers/slapd/filter.h | 45 + ldap/servers/slapd/filtercmp.c | 437 + ldap/servers/slapd/filterentry.c | 1054 + ldap/servers/slapd/generation.c | 134 + ldap/servers/slapd/getfilelist.c | 289 + ldap/servers/slapd/getopt_ext.c | 238 + ldap/servers/slapd/getopt_ext.h | 106 + ldap/servers/slapd/getsocketpeer.c | 136 + ldap/servers/slapd/getsocketpeer.h | 16 + ldap/servers/slapd/globals.c | 92 + ldap/servers/slapd/house.c | 92 + ldap/servers/slapd/http.h | 50 + ldap/servers/slapd/init.c | 52 + ldap/servers/slapd/intrinsics.h | 86 + ldap/servers/slapd/ldaputil.c | 2381 ++ ldap/servers/slapd/ldbmlinktest.c | 45 + ldap/servers/slapd/lenstr.c | 89 + ldap/servers/slapd/libglobs.c | 8633 +++++++ ldap/servers/slapd/libmakefile | 130 + ldap/servers/slapd/listConfigAttrs.pl | 109 + ldap/servers/slapd/localhost.c | 238 + ldap/servers/slapd/lock.c | 75 + ldap/servers/slapd/log.c | 5328 +++++ ldap/servers/slapd/log.h | 240 + ldap/servers/slapd/main.c | 2894 +++ ldap/servers/slapd/mapping_tree.c | 3779 ++++ ldap/servers/slapd/match.c | 339 + ldap/servers/slapd/mkDBErrStrs.pl | 86 + ldap/servers/slapd/modify.c | 1481 ++ ldap/servers/slapd/modrdn.c | 724 + ldap/servers/slapd/modutil.c | 817 + ldap/servers/slapd/monitor.c | 182 + ldap/servers/slapd/object.c | 101 + ldap/servers/slapd/objset.c | 355 + ldap/servers/slapd/openldapber.h | 27 + ldap/servers/slapd/operation.c | 653 + ldap/servers/slapd/opshared.c | 1729 ++ ldap/servers/slapd/pagedresults.c | 1054 + ldap/servers/slapd/passwd_extop.c | 946 + ldap/servers/slapd/pblock.c | 4494 ++++ ldap/servers/slapd/pblock_v3.h | 222 + ldap/servers/slapd/plugin.c | 4561 ++++ ldap/servers/slapd/plugin_acl.c | 225 + ldap/servers/slapd/plugin_internal_op.c | 884 + ldap/servers/slapd/plugin_mmr.c | 71 + ldap/servers/slapd/plugin_mr.c | 773 + ldap/servers/slapd/plugin_role.c | 39 + ldap/servers/slapd/plugin_syntax.c | 1015 + ldap/servers/slapd/poll_using_select.c | 139 + ldap/servers/slapd/poll_using_select.h | 52 + ldap/servers/slapd/prerrstrs.h | 142 + ldap/servers/slapd/protect_db.c | 522 + ldap/servers/slapd/protect_db.h | 88 + ldap/servers/slapd/proto-slap.h | 1568 ++ ldap/servers/slapd/proxyauth.c | 227 + ldap/servers/slapd/psearch.c | 735 + ldap/servers/slapd/pw.c | 3389 +++ ldap/servers/slapd/pw.h | 51 + ldap/servers/slapd/pw_mgmt.c | 315 + ldap/servers/slapd/pw_retry.c | 268 + ldap/servers/slapd/pw_verify.c | 175 + ldap/servers/slapd/pw_verify.h | 17 + ldap/servers/slapd/rdn.c | 1093 + ldap/servers/slapd/referral.c | 513 + ldap/servers/slapd/regex.c | 258 + ldap/servers/slapd/resourcelimit.c | 613 + ldap/servers/slapd/result.c | 2354 ++ ldap/servers/slapd/rootdse.c | 351 + ldap/servers/slapd/sasl_io.c | 778 + ldap/servers/slapd/sasl_map.c | 724 + ldap/servers/slapd/saslbind.c | 1181 + ldap/servers/slapd/schema.c | 6589 ++++++ ldap/servers/slapd/schemaparse.c | 284 + ldap/servers/slapd/search.c | 434 + ldap/servers/slapd/secerrstrs.h | 394 + ldap/servers/slapd/security_wrappers.c | 458 + ldap/servers/slapd/slap.h | 2725 +++ ldap/servers/slapd/slapd.lite.key | 11 + ldap/servers/slapd/slapd.normal.key | 12 + ldap/servers/slapd/slapd_plhash.c | 67 + ldap/servers/slapd/slapi-plugin-compat4.h | 176 + ldap/servers/slapd/slapi-plugin.h | 8276 +++++++ ldap/servers/slapd/slapi-private.h | 1468 ++ ldap/servers/slapd/slapi2nspr.c | 274 + ldap/servers/slapd/slapi_counter.c | 398 + ldap/servers/slapd/slapi_pal.c | 417 + ldap/servers/slapd/slapi_pal.h | 55 + ldap/servers/slapd/snmp_collator.c | 825 + ldap/servers/slapd/snmp_collator.h | 35 + ldap/servers/slapd/snoop.c | 48 + ldap/servers/slapd/sort.c | 106 + ldap/servers/slapd/ssl.c | 3055 +++ ldap/servers/slapd/sslerrstrs.h | 303 + ldap/servers/slapd/start_tls_extop.c | 421 + ldap/servers/slapd/statechange.h | 57 + ldap/servers/slapd/str2filter.c | 501 + ldap/servers/slapd/strdup.c | 34 + ldap/servers/slapd/stubrepl.c | 50 + ldap/servers/slapd/stubs.c | 38 + ldap/servers/slapd/subentry.c | 65 + ldap/servers/slapd/task.c | 3029 +++ ldap/servers/slapd/tempnam.c | 53 + ldap/servers/slapd/test-plugins/Makefile | 52 + ldap/servers/slapd/test-plugins/Makefile.AIX | 39 + ldap/servers/slapd/test-plugins/Makefile.BSDI | 34 + ldap/servers/slapd/test-plugins/Makefile.HPUX | 28 + .../slapd/test-plugins/Makefile.HPUX64 | 27 + ldap/servers/slapd/test-plugins/Makefile.IRIX | 34 + .../servers/slapd/test-plugins/Makefile.Linux | 34 + ldap/servers/slapd/test-plugins/Makefile.OSF1 | 33 + .../slapd/test-plugins/Makefile.ReliantUNIX | 34 + .../slapd/test-plugins/Makefile.SOLARIS | 31 + .../slapd/test-plugins/Makefile.SOLARIS64 | 31 + .../slapd/test-plugins/Makefile.SOLARISx86 | 34 + .../slapd/test-plugins/Makefile.UnixWare | 34 + .../slapd/test-plugins/Makefile.UnixWareUDK | 34 + .../slapd/test-plugins/Makefile.server | 59 + ldap/servers/slapd/test-plugins/README | 190 + .../servers/slapd/test-plugins/clients/README | 48 + .../slapd/test-plugins/clients/ReqExtOp.java | 80 + .../slapd/test-plugins/clients/reqextop.c | 92 + ldap/servers/slapd/test-plugins/installDse.pl | 138 + ldap/servers/slapd/test-plugins/nicknames | 13 + ldap/servers/slapd/test-plugins/sampletask.c | 191 + ldap/servers/slapd/test-plugins/testbind.c | 256 + .../slapd/test-plugins/testdatainterop.c | 295 + .../slapd/test-plugins/testdbinterop.c | 177 + .../slapd/test-plugins/testdbinterop.h | 28 + ldap/servers/slapd/test-plugins/testentry.c | 135 + .../slapd/test-plugins/testextendedop.c | 192 + ldap/servers/slapd/test-plugins/testgetip.c | 142 + ldap/servers/slapd/test-plugins/testpostop.c | 372 + ldap/servers/slapd/test-plugins/testpreop.c | 243 + .../servers/slapd/test-plugins/testsaslbind.c | 144 + ldap/servers/slapd/thread_data.c | 209 + ldap/servers/slapd/time.c | 694 + ldap/servers/slapd/tools/dbscan.c | 1369 ++ ldap/servers/slapd/tools/eggencode.c | 65 + ldap/servers/slapd/tools/ldaptool-sasl.c | 341 + ldap/servers/slapd/tools/ldaptool-sasl.h | 45 + ldap/servers/slapd/tools/ldaptool.h | 166 + ldap/servers/slapd/tools/ldclt/README | 1 + ldap/servers/slapd/tools/ldclt/data.c | 395 + .../slapd/tools/ldclt/examples/001/add.ksh | 26 + .../tools/ldclt/examples/001/add_incr.ksh | 29 + .../slapd/tools/ldclt/examples/001/config.ksh | 27 + .../slapd/tools/ldclt/examples/001/delete.ksh | 27 + .../slapd/tools/ldclt/examples/001/env.ksh | 22 + .../slapd/tools/ldclt/examples/001/search.ksh | 27 + .../slapd/tools/ldclt/examples/002/add.ksh | 30 + .../slapd/tools/ldclt/examples/002/config.ksh | 28 + .../slapd/tools/ldclt/examples/002/env.ksh | 22 + .../slapd/tools/ldclt/examples/002/ldif01.ksh | 31 + .../slapd/tools/ldclt/examples/002/ldif02.ksh | 31 + .../slapd/tools/ldclt/examples/002/ldif03.ksh | 31 + .../slapd/tools/ldclt/examples/002/ofile | 12 + .../servers/slapd/tools/ldclt/examples/README | 52 + ldap/servers/slapd/tools/ldclt/ldap-private.h | 306 + ldap/servers/slapd/tools/ldclt/ldapfct.c | 3651 +++ ldap/servers/slapd/tools/ldclt/ldclt.c | 2854 +++ ldap/servers/slapd/tools/ldclt/ldclt.h | 773 + ldap/servers/slapd/tools/ldclt/ldclt.man | 754 + ldap/servers/slapd/tools/ldclt/ldclt.use | 87 + ldap/servers/slapd/tools/ldclt/ldcltU.c | 230 + ldap/servers/slapd/tools/ldclt/opCheck.c | 838 + ldap/servers/slapd/tools/ldclt/parser.c | 520 + ldap/servers/slapd/tools/ldclt/port.c | 85 + ldap/servers/slapd/tools/ldclt/port.h | 60 + ldap/servers/slapd/tools/ldclt/remote.h | 46 + ldap/servers/slapd/tools/ldclt/repcheck.c | 167 + ldap/servers/slapd/tools/ldclt/repslave.c | 331 + ldap/servers/slapd/tools/ldclt/scalab01.c | 957 + ldap/servers/slapd/tools/ldclt/scalab01.h | 98 + ldap/servers/slapd/tools/ldclt/srv.c | 106 + ldap/servers/slapd/tools/ldclt/threadMain.c | 1095 + ldap/servers/slapd/tools/ldclt/utils.c | 204 + ldap/servers/slapd/tools/ldclt/utils.h | 51 + ldap/servers/slapd/tools/ldclt/version.c | 1 + ldap/servers/slapd/tools/ldclt/workarounds.c | 89 + ldap/servers/slapd/tools/ldif.c | 157 + ldap/servers/slapd/tools/migratecred.c | 151 + ldap/servers/slapd/tools/mkdep.c | 282 + ldap/servers/slapd/tools/mmldif.c | 1735 ++ ldap/servers/slapd/tools/pwenc.c | 440 + ldap/servers/slapd/tools/rsearch/addthread.c | 434 + ldap/servers/slapd/tools/rsearch/addthread.h | 28 + ldap/servers/slapd/tools/rsearch/infadd.c | 336 + ldap/servers/slapd/tools/rsearch/infadd.h | 31 + ldap/servers/slapd/tools/rsearch/main.c | 279 + ldap/servers/slapd/tools/rsearch/nametable.c | 202 + ldap/servers/slapd/tools/rsearch/nametable.h | 41 + ldap/servers/slapd/tools/rsearch/rsearch.c | 515 + ldap/servers/slapd/tools/rsearch/rsearch.h | 60 + .../tools/rsearch/scripts/dbgen-FamilyNames | 13419 +++++++++++ .../tools/rsearch/scripts/dbgen-GivenNames | 8606 +++++++ .../tools/rsearch/scripts/dbgen-OrgUnits | 5 + .../slapd/tools/rsearch/scripts/dbgen.pl.in | 1178 + ldap/servers/slapd/tools/rsearch/sdattable.c | 245 + ldap/servers/slapd/tools/rsearch/sdattable.h | 40 + .../slapd/tools/rsearch/searchthread.c | 693 + .../slapd/tools/rsearch/searchthread.h | 27 + ldap/servers/slapd/unbind.c | 94 + ldap/servers/slapd/uniqueid.c | 279 + ldap/servers/slapd/uniqueidgen.c | 225 + ldap/servers/slapd/utf8.c | 410 + ldap/servers/slapd/utf8compare.c | 2286 ++ ldap/servers/slapd/util.c | 1638 ++ ldap/servers/slapd/uuid.c | 899 + ldap/servers/slapd/uuid.h | 119 + ldap/servers/slapd/value.c | 580 + ldap/servers/slapd/valueset.c | 1485 ++ ldap/servers/slapd/vattr.c | 2617 +++ ldap/servers/slapd/vattr_spi.h | 64 + ldap/servers/slapd/views.h | 37 + ldap/servers/snmp/ldap-agent.c | 764 + ldap/servers/snmp/ldap-agent.conf.in | 30 + ldap/servers/snmp/ldap-agent.h | 194 + ldap/servers/snmp/main.c | 555 + ldap/servers/snmp/redhat-directory.mib | 818 + ldap/systools/README | 31 + ldap/systools/getHPPatches.pl | 91 + ldap/systools/getSolPatches.pl | 72 + ldap/systools/hp_patches.c | 24 + ldap/systools/mergeSolPatches.pl | 65 + ldap/systools/pio.h | 37 + ldap/systools/sol_patches.c | 183 + ldap/systools/viewcore.c | 480 + lib/base/.cvsignore | 1 + lib/base/crit.cpp | 393 + lib/base/dnsdmain.cpp | 156 + lib/base/ereport.cpp | 70 + lib/base/file.cpp | 356 + lib/base/fsmutex.cpp | 155 + lib/base/lexer_pvt.h | 40 + lib/base/nscperror.c | 169 + lib/base/plist.cpp | 1163 + lib/base/plist_pvt.h | 133 + lib/base/pool.cpp | 628 + lib/base/shexp.cpp | 290 + lib/base/system.cpp | 244 + lib/base/systhr.cpp | 180 + lib/base/util.cpp | 475 + lib/ldaputil/.cvsignore | 1 + lib/ldaputil/cert.c | 504 + lib/ldaputil/certmap.c | 1612 ++ lib/ldaputil/certmap.conf | 51 + lib/ldaputil/dbconf.c | 686 + lib/ldaputil/encode.c | 151 + lib/ldaputil/errors.c | 210 + lib/ldaputil/examples/Makefile | 89 + lib/ldaputil/examples/README | 100 + lib/ldaputil/examples/init.c | 44 + lib/ldaputil/examples/plugin.c | 240 + lib/ldaputil/examples/plugin.h | 35 + lib/ldaputil/init.c | 138 + lib/ldaputil/ldapauth.c | 214 + lib/ldaputil/ldapu-changes.html | 406 + lib/ldaputil/ldaputili.h | 68 + lib/ldaputil/vtable.c | 210 + lib/libaccess/.cvsignore | 1 + lib/libaccess/access_plhash.cpp | 73 + lib/libaccess/access_plhash.h | 24 + lib/libaccess/acl.tab.cpp | 1703 ++ lib/libaccess/acl.tab.h | 52 + lib/libaccess/acl.yy.cpp | 2005 ++ lib/libaccess/aclcache.cpp | 492 + lib/libaccess/aclcache.h | 34 + lib/libaccess/aclerror.cpp | 254 + lib/libaccess/acleval.cpp | 564 + lib/libaccess/aclflush.cpp | 185 + lib/libaccess/aclpriv.h | 191 + lib/libaccess/aclscan.h | 33 + lib/libaccess/aclscan.l | 379 + lib/libaccess/aclspace.cpp | 45 + lib/libaccess/acltext.y | 928 + lib/libaccess/acltools.cpp | 1749 ++ lib/libaccess/aclutil.cpp | 234 + lib/libaccess/aclutil.h | 34 + lib/libaccess/authdb.cpp | 231 + lib/libaccess/las.h | 40 + lib/libaccess/lasdns.cpp | 382 + lib/libaccess/lasdns.h | 19 + lib/libaccess/lasgroup.cpp | 174 + lib/libaccess/lasip.cpp | 736 + lib/libaccess/lasip.h | 24 + lib/libaccess/lastod.cpp | 187 + lib/libaccess/lasuser.cpp | 162 + lib/libaccess/ldapauth.h | 42 + lib/libaccess/method.cpp | 171 + lib/libaccess/nsautherr.cpp | 134 + lib/libaccess/nseframe.cpp | 216 + lib/libaccess/oneeval.cpp | 1057 + lib/libaccess/oneeval.h | 24 + lib/libaccess/parse.h | 29 + lib/libaccess/permhash.h | 78 + lib/libaccess/register.cpp | 871 + lib/libaccess/symbols.cpp | 353 + lib/libaccess/usi.cpp | 379 + lib/libaccess/usrcache.cpp | 654 + lib/libaccess/yy-sed | 24 + lib/libadmin/.cvsignore | 1 + lib/libadmin/error.c | 107 + lib/libadmin/template.c | 70 + lib/libadmin/util.c | 190 + lib/libsi18n/getstrmem.h | 1156 + lib/libsi18n/getstrprop.c | 86 + lib/libsi18n/gsslapd.h | 33 + lib/libsi18n/makstrdb.c | 216 + lib/libsi18n/reshash.c | 292 + lib/libsi18n/reshash.h | 64 + lib/libsi18n/txtfile.c | 131 + lib/libsi18n/txtfile.h | 56 + m4/db.m4 | 143 + m4/doxygen.m4 | 18 + m4/fhs.m4 | 40 + m4/netsnmp.m4 | 102 + m4/openldap.m4 | 166 + m4/selinux.m4 | 26 + m4/systemd.m4 | 138 + man/man1/cl-dump.1 | 99 + man/man1/cl-dump.pl.1 | 99 + man/man1/dbgen.pl.1 | 93 + man/man1/dbscan.1 | 106 + man/man1/ds-logpipe.py.1 | 100 + man/man1/ds-replcheck.1 | 163 + man/man1/dsktune.1 | 64 + man/man1/infadd.1 | 82 + man/man1/ldap-agent.1 | 59 + man/man1/ldclt.1 | 235 + man/man1/ldif.1 | 55 + man/man1/logconv.pl.1 | 139 + man/man1/migratecred.1 | 65 + man/man1/mmldif.1 | 61 + man/man1/pwdhash.1 | 62 + man/man1/readnsstate.1 | 50 + man/man1/repl-monitor.1 | 138 + man/man1/repl-monitor.pl.1 | 138 + man/man1/rsearch.1 | 138 + man/man5/99user.ldif.5 | 54 + man/man5/certmap.conf.5 | 133 + man/man5/dirsrv.5 | 46 + man/man5/dirsrv.systemd.5 | 39 + man/man5/slapd-collations.conf.5 | 51 + man/man5/template-initconfig.5 | 62 + man/man8/bak2db.8 | 59 + man/man8/bak2db.pl.8 | 78 + man/man8/cleanallruv.pl.8 | 81 + man/man8/db2bak.8 | 63 + man/man8/db2bak.pl.8 | 77 + man/man8/db2index.8 | 67 + man/man8/db2index.pl.8 | 81 + man/man8/db2ldif.8 | 106 + man/man8/db2ldif.pl.8 | 120 + man/man8/dbmon.sh.8 | 54 + man/man8/dbverify.8 | 63 + man/man8/dn2rdn.8 | 57 + man/man8/fixup-linkedattrs.pl.8 | 72 + man/man8/fixup-memberof.pl.8 | 76 + man/man8/ldif2db.8 | 89 + man/man8/ldif2db.pl.8 | 98 + man/man8/ldif2ldap.8 | 62 + man/man8/migrate-ds.pl.8 | 164 + man/man8/monitor.8 | 67 + man/man8/ns-accountstatus.pl.8 | 95 + man/man8/ns-activate.pl.8 | 73 + man/man8/ns-inactivate.pl.8 | 72 + man/man8/ns-newpwpolicy.pl.8 | 79 + man/man8/ns-slapd.8 | 60 + man/man8/remove-ds.pl.8 | 58 + man/man8/restart-dirsrv.8 | 50 + man/man8/restoreconfig.8 | 46 + man/man8/saveconfig.8 | 46 + man/man8/schema-reload.pl.8 | 70 + man/man8/setup-ds.pl.8 | 116 + man/man8/start-dirsrv.8 | 50 + man/man8/status-dirsrv.8 | 55 + man/man8/stop-dirsrv.8 | 50 + man/man8/suffix2instance.8 | 51 + man/man8/syntax-validate.pl.8 | 73 + man/man8/upgradedb.8 | 56 + man/man8/upgradednformat.8 | 55 + man/man8/usn-tombstone-cleanup.pl.8 | 76 + man/man8/verify-db.pl.8 | 49 + man/man8/vlvindex.8 | 62 + profiling/stap/probe_do_search_detail.stp | 64 + profiling/stap/probe_log_access_detail.stp | 51 + profiling/stap/probe_op_shared_search.stp | 64 + rfcs/Makefile | 13 + rfcs/examples/template-bare-06.txt | 426 + rfcs/src/draft-wibrown-ldapssotoken-00.xml | 453 + rpm.mk | 136 + rpm/389-ds-base-devel.README | 4 + rpm/389-ds-base-git.sh | 16 + rpm/389-ds-base.spec.in | 2097 ++ rpm/add_patches.sh | 55 + rpm/rpmverrel.sh | 17 + src/Cargo.lock | 447 + src/Cargo.toml | 14 + src/cockpit/389-console/.babelrc | 7 + src/cockpit/389-console/.eslintignore | 1 + src/cockpit/389-console/.eslintrc.json | 56 + src/cockpit/389-console/README.md | 213 + src/cockpit/389-console/audit-ci.json | 7 + src/cockpit/389-console/buildAndRun.sh | 45 + .../389-console/cockpit_dist/banner.html | 16 + .../389-console/cockpit_dist/css/ds.css | 706 + .../fonts/OpenSans-Bold-webfont.woff | Bin 0 -> 98780 bytes .../fonts/OpenSans-Bold-webfont.woff2 | Bin 0 -> 63732 bytes .../fonts/OpenSans-BoldItalic-webfont.woff2 | Bin 0 -> 59512 bytes .../fonts/OpenSans-Italic-webfont.woff | Bin 0 -> 93880 bytes .../fonts/OpenSans-Italic-webfont.woff2 | Bin 0 -> 59912 bytes .../fonts/OpenSans-Light-webfont.ttf | Bin 0 -> 222412 bytes .../fonts/OpenSans-Light-webfont.woff | Bin 0 -> 97924 bytes .../fonts/OpenSans-Light-webfont.woff2 | Bin 0 -> 63180 bytes .../fonts/OpenSans-Regular-webfont.ttf | Bin 0 -> 217360 bytes .../fonts/OpenSans-Regular-webfont.woff | Bin 0 -> 96116 bytes .../fonts/OpenSans-Regular-webfont.woff2 | Bin 0 -> 61980 bytes .../fonts/OpenSans-Semibold-webfont.ttf | Bin 0 -> 221328 bytes .../fonts/OpenSans-Semibold-webfont.woff | Bin 0 -> 98464 bytes .../fonts/OpenSans-Semibold-webfont.woff2 | Bin 0 -> 63736 bytes .../OpenSans-SemiboldItalic-webfont.woff | Bin 0 -> 94548 bytes .../fonts/PatternFlyIcons-webfont.ttf | Bin 0 -> 37996 bytes .../fonts/PatternFlyIcons-webfont.woff | Bin 0 -> 38072 bytes .../fonts/fontawesome-webfont.woff | Bin 0 -> 98024 bytes .../fonts/fontawesome-webfont.woff2 | Bin 0 -> 77160 bytes .../fonts/glyphicons-halflings-regular.woff | Bin 0 -> 23424 bytes .../fonts/glyphicons-halflings-regular.woff2 | Bin 0 -> 18028 bytes .../cockpit_dist/images/sort_asc.png | Bin 0 -> 160 bytes .../cockpit_dist/images/sort_asc_disabled.png | Bin 0 -> 148 bytes .../cockpit_dist/images/sort_both.png | Bin 0 -> 201 bytes .../cockpit_dist/images/sort_desc.png | Bin 0 -> 158 bytes .../images/sort_desc_disabled.png | Bin 0 -> 146 bytes .../389-console/cockpit_dist/index.html | 21 + .../389-console/cockpit_dist/index.min.js.gz | Bin 0 -> 431829 bytes .../389-console/cockpit_dist/manifest.json | 13 + .../389-console/cockpit_dist/static/32px.png | Bin 0 -> 3121 bytes .../389-console/cockpit_dist/static/40px.png | Bin 0 -> 1880 bytes .../cockpit_dist/static/Typeahead.css | 256 + .../cockpit_dist/static/bootpopup.min.js | 1 + .../static/bootstrap-theme.min.css | 6 + .../static/bootstrap-theme.min.css.map | 1 + .../cockpit_dist/static/bootstrap.min.css | 6 + .../cockpit_dist/static/bootstrap.min.css.map | 1 + .../cockpit_dist/static/bootstrap.min.js | 7 + .../389-console/cockpit_dist/static/c3.min.js | 1 + .../389-console/cockpit_dist/static/d3.min.js | 5 + .../static/dataTables.datetime-moment.js | 75 + .../static/images/ui-icons_444444_256x240.png | Bin 0 -> 6992 bytes .../static/images/ui-icons_555555_256x240.png | Bin 0 -> 6988 bytes .../static/images/ui-icons_777620_256x240.png | Bin 0 -> 4549 bytes .../static/images/ui-icons_777777_256x240.png | Bin 0 -> 6999 bytes .../static/images/ui-icons_cc0000_256x240.png | Bin 0 -> 4549 bytes .../static/images/ui-icons_ffffff_256x240.png | Bin 0 -> 6299 bytes .../cockpit_dist/static/jquery-3.3.1.min.js | 4 + .../static/jquery.dataTables.min.css | 1 + .../static/jquery.dataTables.min.js | 167 + .../static/jquery.dataTables.select.min.js | 28 + .../static/jquery.timepicker.min.css | 1 + .../static/jquery.timepicker.min.js | 7 + .../cockpit_dist/static/jstree.min.js | 6 + .../cockpit_dist/static/moment.min.js | 2 + .../cockpit_dist/static/page.min.css | 1 + .../static/patternfly-additions.css | 8988 ++++++++ .../cockpit_dist/static/patternfly.css | 10535 +++++++++ .../cockpit_dist/static/patternfly.min.js | 2 + .../cockpit_dist/static/style.min.css | 2 + .../cockpit_dist/static/throbber.gif | Bin 0 -> 1720 bytes src/cockpit/389-console/node_modules.mk | 15 + .../org.port389.cockpit_console.metainfo.xml | 18 + src/cockpit/389-console/package-lock.json | 18659 ++++++++++++++++ src/cockpit/389-console/package.json | 69 + src/cockpit/389-console/src/banner.html | 16 + src/cockpit/389-console/src/css/ds.css | 706 + src/cockpit/389-console/src/database.jsx | 1387 ++ src/cockpit/389-console/src/ds.jsx | 1905 ++ .../src/fonts/OpenSans-Bold-webfont.woff | Bin 0 -> 98780 bytes .../src/fonts/OpenSans-Bold-webfont.woff2 | Bin 0 -> 63732 bytes .../fonts/OpenSans-BoldItalic-webfont.woff2 | Bin 0 -> 59512 bytes .../src/fonts/OpenSans-Italic-webfont.woff | Bin 0 -> 93880 bytes .../src/fonts/OpenSans-Italic-webfont.woff2 | Bin 0 -> 59912 bytes .../src/fonts/OpenSans-Light-webfont.ttf | Bin 0 -> 222412 bytes .../src/fonts/OpenSans-Light-webfont.woff | Bin 0 -> 97924 bytes .../src/fonts/OpenSans-Light-webfont.woff2 | Bin 0 -> 63180 bytes .../src/fonts/OpenSans-Regular-webfont.ttf | Bin 0 -> 217360 bytes .../src/fonts/OpenSans-Regular-webfont.woff | Bin 0 -> 96116 bytes .../src/fonts/OpenSans-Regular-webfont.woff2 | Bin 0 -> 61980 bytes .../src/fonts/OpenSans-Semibold-webfont.ttf | Bin 0 -> 221328 bytes .../src/fonts/OpenSans-Semibold-webfont.woff | Bin 0 -> 98464 bytes .../src/fonts/OpenSans-Semibold-webfont.woff2 | Bin 0 -> 63736 bytes .../OpenSans-SemiboldItalic-webfont.woff | Bin 0 -> 94548 bytes .../src/fonts/PatternFlyIcons-webfont.ttf | Bin 0 -> 37996 bytes .../src/fonts/PatternFlyIcons-webfont.woff | Bin 0 -> 38072 bytes .../src/fonts/fontawesome-webfont.woff | Bin 0 -> 98024 bytes .../src/fonts/fontawesome-webfont.woff2 | Bin 0 -> 77160 bytes .../fonts/glyphicons-halflings-regular.woff | Bin 0 -> 23424 bytes .../fonts/glyphicons-halflings-regular.woff2 | Bin 0 -> 18028 bytes .../389-console/src/images/sort_asc.png | Bin 0 -> 160 bytes .../src/images/sort_asc_disabled.png | Bin 0 -> 148 bytes .../389-console/src/images/sort_both.png | Bin 0 -> 201 bytes .../389-console/src/images/sort_desc.png | Bin 0 -> 158 bytes .../src/images/sort_desc_disabled.png | Bin 0 -> 146 bytes src/cockpit/389-console/src/index.es6 | 7 + src/cockpit/389-console/src/index.html | 21 + .../389-console/src/lib/customCollapse.jsx | 55 + .../src/lib/customTableToolbar.jsx | 77 + .../src/lib/database/attrEncryption.jsx | 185 + .../389-console/src/lib/database/backups.jsx | 1113 + .../389-console/src/lib/database/chaining.jsx | 1472 ++ .../src/lib/database/databaseConfig.jsx | 477 + .../src/lib/database/databaseModal.jsx | 532 + .../src/lib/database/databaseTables.jsx | 1219 + .../src/lib/database/globalPwp.jsx | 1345 ++ .../389-console/src/lib/database/indexes.jsx | 946 + .../389-console/src/lib/database/localPwp.jsx | 2375 ++ .../src/lib/database/referrals.jsx | 417 + .../389-console/src/lib/database/suffix.jsx | 1008 + .../src/lib/database/suffixConfig.jsx | 125 + .../src/lib/database/vlvIndexes.jsx | 953 + src/cockpit/389-console/src/lib/dsTable.jsx | 411 + .../389-console/src/lib/monitor/accesslog.jsx | 122 + .../src/lib/monitor/auditfaillog.jsx | 122 + .../389-console/src/lib/monitor/auditlog.jsx | 122 + .../src/lib/monitor/chainingMonitor.jsx | 182 + .../389-console/src/lib/monitor/dbMonitor.jsx | 348 + .../389-console/src/lib/monitor/errorlog.jsx | 144 + .../src/lib/monitor/monitorModals.jsx | 1150 + .../src/lib/monitor/monitorTables.jsx | 2701 +++ .../src/lib/monitor/replMonitor.jsx | 1605 ++ .../src/lib/monitor/serverMonitor.jsx | 232 + .../src/lib/monitor/snmpMonitor.jsx | 230 + .../src/lib/monitor/suffixMonitor.jsx | 406 + .../389-console/src/lib/notifications.jsx | 229 + .../src/lib/plugins/accountPolicy.jsx | 677 + .../src/lib/plugins/attributeUniqueness.jsx | 710 + .../src/lib/plugins/autoMembership.jsx | 1037 + .../389-console/src/lib/plugins/dna.jsx | 1170 + .../src/lib/plugins/linkedAttributes.jsx | 490 + .../src/lib/plugins/managedEntries.jsx | 904 + .../389-console/src/lib/plugins/memberOf.jsx | 1098 + .../lib/plugins/passthroughAuthentication.jsx | 1186 + .../src/lib/plugins/pluginBasicConfig.jsx | 350 + .../src/lib/plugins/pluginModal.jsx | 210 + .../src/lib/plugins/pluginTables.jsx | 1688 ++ .../src/lib/plugins/referentialIntegrity.jsx | 786 + .../src/lib/plugins/retroChangelog.jsx | 277 + .../src/lib/plugins/rootDNAccessControl.jsx | 348 + .../389-console/src/lib/plugins/usn.jsx | 357 + .../389-console/src/lib/plugins/winsync.jsx | 395 + .../src/lib/replication/replAgmts.jsx | 1150 + .../src/lib/replication/replChangelog.jsx | 395 + .../src/lib/replication/replConfig.jsx | 640 + .../src/lib/replication/replModals.jsx | 1697 ++ .../src/lib/replication/replSuffix.jsx | 473 + .../src/lib/replication/replTables.jsx | 608 + .../src/lib/replication/replTasks.jsx | 314 + .../src/lib/replication/winsyncAgmts.jsx | 1210 + .../src/lib/schema/schemaModals.jsx | 660 + .../src/lib/schema/schemaTables.jsx | 558 + .../lib/security/certificateManagement.jsx | 640 + .../389-console/src/lib/security/ciphers.jsx | 314 + .../src/lib/security/securityModals.jsx | 689 + .../src/lib/security/securityTables.jsx | 456 + .../389-console/src/lib/server/accessLog.jsx | 735 + .../389-console/src/lib/server/auditLog.jsx | 595 + .../src/lib/server/auditfailLog.jsx | 593 + .../389-console/src/lib/server/errorLog.jsx | 928 + .../389-console/src/lib/server/ldapi.jsx | 303 + .../389-console/src/lib/server/sasl.jsx | 760 + .../src/lib/server/serverModals.jsx | 224 + .../src/lib/server/serverTables.jsx | 247 + .../389-console/src/lib/server/settings.jsx | 1320 ++ .../389-console/src/lib/server/tuning.jsx | 528 + src/cockpit/389-console/src/lib/tools.jsx | 154 + src/cockpit/389-console/src/manifest.json | 13 + src/cockpit/389-console/src/monitor.jsx | 1293 ++ src/cockpit/389-console/src/plugins.jsx | 549 + src/cockpit/389-console/src/replication.jsx | 1123 + src/cockpit/389-console/src/schema.jsx | 1020 + src/cockpit/389-console/src/security.jsx | 887 + src/cockpit/389-console/src/server.jsx | 334 + src/cockpit/389-console/src/static/32px.png | Bin 0 -> 3121 bytes src/cockpit/389-console/src/static/40px.png | Bin 0 -> 1880 bytes .../389-console/src/static/Typeahead.css | 256 + .../389-console/src/static/bootpopup.min.js | 1 + .../src/static/bootstrap-theme.min.css | 6 + .../src/static/bootstrap-theme.min.css.map | 1 + .../389-console/src/static/bootstrap.min.css | 6 + .../src/static/bootstrap.min.css.map | 1 + .../389-console/src/static/bootstrap.min.js | 7 + src/cockpit/389-console/src/static/c3.min.js | 1 + src/cockpit/389-console/src/static/d3.min.js | 5 + .../src/static/dataTables.datetime-moment.js | 75 + .../static/images/ui-icons_444444_256x240.png | Bin 0 -> 6992 bytes .../static/images/ui-icons_555555_256x240.png | Bin 0 -> 6988 bytes .../static/images/ui-icons_777620_256x240.png | Bin 0 -> 4549 bytes .../static/images/ui-icons_777777_256x240.png | Bin 0 -> 6999 bytes .../static/images/ui-icons_cc0000_256x240.png | Bin 0 -> 4549 bytes .../static/images/ui-icons_ffffff_256x240.png | Bin 0 -> 6299 bytes .../src/static/jquery-3.3.1.min.js | 4 + .../src/static/jquery.dataTables.min.css | 1 + .../src/static/jquery.dataTables.min.js | 167 + .../static/jquery.dataTables.select.min.js | 28 + .../src/static/jquery.timepicker.min.css | 1 + .../src/static/jquery.timepicker.min.js | 7 + .../389-console/src/static/jstree.min.js | 6 + .../389-console/src/static/moment.min.js | 2 + .../389-console/src/static/page.min.css | 1 + .../src/static/patternfly-additions.css | 8988 ++++++++ .../389-console/src/static/patternfly.css | 10535 +++++++++ .../389-console/src/static/patternfly.min.js | 2 + .../389-console/src/static/style.min.css | 2 + .../389-console/src/static/throbber.gif | Bin 0 -> 1720 bytes src/cockpit/389-console/webpack.config.js | 175 + src/contrib/README.md | 6 + src/contrib/back-ldif/add.c | 198 + src/contrib/back-ldif/back-ldif.h | 91 + src/contrib/back-ldif/bind.c | 116 + src/contrib/back-ldif/close.c | 69 + src/contrib/back-ldif/compare.c | 89 + src/contrib/back-ldif/config.c | 206 + src/contrib/back-ldif/delete.c | 137 + src/contrib/back-ldif/init.c | 110 + src/contrib/back-ldif/modify.c | 564 + src/contrib/back-ldif/modrdn.c | 282 + src/contrib/back-ldif/monitor.c | 128 + src/contrib/back-ldif/search.c | 196 + src/contrib/back-ldif/start.c | 39 + src/contrib/back-ldif/unbind.c | 35 + src/lib389/.coveragerc | 2 + src/lib389/.gitignore | 10 + src/lib389/LICENSE | 675 + src/lib389/MANIFEST.in | 2 + src/lib389/Makefile | 33 + src/lib389/README.md | 26 + src/lib389/VERSION | 1 + src/lib389/cli/dsconf | 151 + src/lib389/cli/dscontainer | 390 + src/lib389/cli/dscreate | 84 + src/lib389/cli/dsctl | 148 + src/lib389/cli/dsidm | 142 + src/lib389/doc/Makefile | 230 + src/lib389/doc/source/accesscontrol.rst | 6 + src/lib389/doc/source/aci.rst | 66 + src/lib389/doc/source/agreement.rst | 27 + src/lib389/doc/source/backend.rst | 29 + src/lib389/doc/source/changelog.rst | 22 + src/lib389/doc/source/conf.py | 339 + src/lib389/doc/source/config.rst | 47 + src/lib389/doc/source/databases.rst | 7 + src/lib389/doc/source/dirsrv_log.rst | 30 + src/lib389/doc/source/domain.rst | 21 + src/lib389/doc/source/dseldif.rst | 30 + src/lib389/doc/source/group.rst | 44 + src/lib389/doc/source/guidelines.rst | 635 + src/lib389/doc/source/identitymanagement.rst | 10 + src/lib389/doc/source/index.rst | 53 + src/lib389/doc/source/indexes.rst | 53 + src/lib389/doc/source/ldclt.rst | 42 + src/lib389/doc/source/mappingtree.rst | 31 + src/lib389/doc/source/monitor.rst | 19 + src/lib389/doc/source/need_to_be_triaged.rst | 18 + src/lib389/doc/source/organizationalunit.rst | 41 + src/lib389/doc/source/passwd.rst | 33 + src/lib389/doc/source/paths.rst | 43 + src/lib389/doc/source/plugin.rst | 35 + src/lib389/doc/source/replica.rst | 87 + src/lib389/doc/source/replication.rst | 9 + src/lib389/doc/source/repltools.rst | 43 + src/lib389/doc/source/rootdse.rst | 25 + src/lib389/doc/source/schema.rst | 62 + src/lib389/doc/source/services.rst | 40 + src/lib389/doc/source/task.rst | 59 + src/lib389/doc/source/user.rst | 53 + src/lib389/doc/source/utils.rst | 23 + src/lib389/dsadmin.pylintrc | 236 + src/lib389/lib389/__init__.py | 3484 +++ src/lib389/lib389/_constants.py | 356 + src/lib389/lib389/_controls.py | 240 + src/lib389/lib389/_entry.py | 645 + src/lib389/lib389/_ldifconn.py | 53 + src/lib389/lib389/_mapped_object.py | 1243 + src/lib389/lib389/_replication.py | 210 + src/lib389/lib389/aci.py | 240 + src/lib389/lib389/agreement.py | 1251 ++ src/lib389/lib389/backend.py | 1041 + src/lib389/lib389/chaining.py | 186 + src/lib389/lib389/cli_base/__init__.py | 436 + src/lib389/lib389/cli_base/dsrc.py | 205 + src/lib389/lib389/cli_conf/__init__.py | 129 + src/lib389/lib389/cli_conf/backend.py | 1120 + src/lib389/lib389/cli_conf/backup.py | 55 + src/lib389/lib389/cli_conf/chaining.py | 288 + src/lib389/lib389/cli_conf/config.py | 59 + src/lib389/lib389/cli_conf/conflicts.py | 127 + .../lib389/cli_conf/directory_manager.py | 33 + src/lib389/lib389/cli_conf/monitor.py | 124 + src/lib389/lib389/cli_conf/plugin.py | 142 + .../lib389/cli_conf/plugins/__init__.py | 0 .../lib389/cli_conf/plugins/accountpolicy.py | 128 + .../lib389/cli_conf/plugins/attruniq.py | 170 + .../lib389/cli_conf/plugins/automember.py | 239 + src/lib389/lib389/cli_conf/plugins/dna.py | 270 + .../lib389/cli_conf/plugins/linkedattr.py | 125 + .../lib389/cli_conf/plugins/managedentries.py | 236 + .../lib389/cli_conf/plugins/memberof.py | 144 + .../cli_conf/plugins/passthroughauth.py | 258 + .../lib389/cli_conf/plugins/posix_winsync.py | 75 + .../lib389/cli_conf/plugins/referint.py | 119 + .../lib389/cli_conf/plugins/retrochangelog.py | 54 + .../lib389/cli_conf/plugins/rootdn_ac.py | 68 + src/lib389/lib389/cli_conf/plugins/usn.py | 71 + src/lib389/lib389/cli_conf/pwpolicy.py | 294 + src/lib389/lib389/cli_conf/replication.py | 1489 ++ src/lib389/lib389/cli_conf/saslmappings.py | 110 + src/lib389/lib389/cli_conf/schema.py | 385 + src/lib389/lib389/cli_conf/security.py | 493 + src/lib389/lib389/cli_ctl/__init__.py | 8 + src/lib389/lib389/cli_ctl/dbtasks.py | 131 + src/lib389/lib389/cli_ctl/health.py | 124 + src/lib389/lib389/cli_ctl/instance.py | 196 + src/lib389/lib389/cli_ctl/nsstate.py | 64 + src/lib389/lib389/cli_ctl/tls.py | 144 + src/lib389/lib389/cli_idm/__init__.py | 117 + src/lib389/lib389/cli_idm/account.py | 193 + src/lib389/lib389/cli_idm/client_config.py | 290 + src/lib389/lib389/cli_idm/group.py | 127 + src/lib389/lib389/cli_idm/initialise.py | 22 + .../lib389/cli_idm/organizationalunit.py | 84 + src/lib389/lib389/cli_idm/posixgroup.py | 83 + src/lib389/lib389/cli_idm/role.py | 126 + src/lib389/lib389/cli_idm/user.py | 119 + src/lib389/lib389/clitools/__init__.py | 77 + src/lib389/lib389/clitools/ds_aci_lint | 41 + src/lib389/lib389/clitools/ds_backend_getattr | 42 + src/lib389/lib389/clitools/ds_backend_list | 39 + src/lib389/lib389/clitools/ds_backend_setattr | 43 + .../lib389/clitools/ds_krb_create_keytab | 40 + .../lib389/clitools/ds_krb_create_principal | 38 + .../lib389/clitools/ds_krb_create_realm | 34 + .../lib389/clitools/ds_krb_destroy_realm | 34 + src/lib389/lib389/clitools/ds_monitor_backend | 39 + src/lib389/lib389/clitools/ds_monitor_server | 35 + .../clitools/ds_schema_attributetype_list | 33 + .../clitools/ds_schema_attributetype_query | 46 + src/lib389/lib389/clitools/ds_setup | 77 + src/lib389/lib389/config.py | 510 + src/lib389/lib389/configurations/__init__.py | 44 + src/lib389/lib389/configurations/config.py | 44 + .../lib389/configurations/config_001003006.py | 129 + .../lib389/configurations/config_001004000.py | 132 + .../lib389/configurations/config_001004002.py | 131 + src/lib389/lib389/configurations/sample.py | 145 + src/lib389/lib389/conflicts.py | 182 + src/lib389/lib389/cos.py | 199 + src/lib389/lib389/dbgen.py | 188 + src/lib389/lib389/dirsrv_log.py | 271 + src/lib389/lib389/ds_instance.py | 97 + src/lib389/lib389/dseldif.py | 375 + src/lib389/lib389/encrypted_attributes.py | 49 + src/lib389/lib389/exceptions.py | 43 + src/lib389/lib389/extended_operations.py | 50 + src/lib389/lib389/extensibleobject.py | 53 + src/lib389/lib389/idm/__init__.py | 0 src/lib389/lib389/idm/account.py | 381 + src/lib389/lib389/idm/country.py | 53 + src/lib389/lib389/idm/directorymanager.py | 53 + src/lib389/lib389/idm/domain.py | 31 + src/lib389/lib389/idm/group.py | 202 + src/lib389/lib389/idm/ipadomain.py | 36 + src/lib389/lib389/idm/nscontainer.py | 90 + src/lib389/lib389/idm/organization.py | 53 + src/lib389/lib389/idm/organizationalrole.py | 54 + src/lib389/lib389/idm/organizationalunit.py | 54 + src/lib389/lib389/idm/posixgroup.py | 82 + src/lib389/lib389/idm/role.py | 340 + src/lib389/lib389/idm/services.py | 65 + src/lib389/lib389/idm/user.py | 230 + src/lib389/lib389/index.py | 266 + src/lib389/lib389/instance/__init__.py | 7 + src/lib389/lib389/instance/options.py | 314 + src/lib389/lib389/instance/remove.py | 159 + src/lib389/lib389/instance/setup.py | 976 + src/lib389/lib389/ldclt.py | 166 + src/lib389/lib389/lint.py | 397 + src/lib389/lib389/mappingTree.py | 452 + src/lib389/lib389/mit_krb5.py | 232 + src/lib389/lib389/monitor.py | 276 + src/lib389/lib389/ns-slapd.valgrind | 29 + src/lib389/lib389/nss_ssl.py | 970 + src/lib389/lib389/passwd.py | 80 + src/lib389/lib389/password_plugins.py | 55 + src/lib389/lib389/paths.py | 217 + src/lib389/lib389/plugins.py | 2245 ++ src/lib389/lib389/properties.py | 498 + src/lib389/lib389/pwpolicy.py | 351 + src/lib389/lib389/referral.py | 35 + src/lib389/lib389/replica.py | 2594 +++ src/lib389/lib389/repltools.py | 298 + src/lib389/lib389/rootdse.py | 50 + src/lib389/lib389/sasl.py | 37 + src/lib389/lib389/saslmap.py | 59 + src/lib389/lib389/schema.py | 786 + src/lib389/lib389/suffix.py | 96 + src/lib389/lib389/tasks.py | 1478 ++ src/lib389/lib389/tests/__init__.py | 7 + src/lib389/lib389/tests/aci_parse_test.py | 57 + src/lib389/lib389/tests/aci_test.py | 122 + src/lib389/lib389/tests/agreement_test.py | 362 + src/lib389/lib389/tests/backendLegacy_test.py | 282 + src/lib389/lib389/tests/backend_test.py | 365 + src/lib389/lib389/tests/cli/__init__.py | 111 + .../lib389/tests/cli/adm_instance_test.py | 30 + .../lib389/tests/cli/conf_backend_test.py | 509 + .../lib389/tests/cli/conf_backup_test.py | 50 + .../lib389/tests/cli/conf_chaining_test.py | 198 + .../lib389/tests/cli/conf_conflicts_test.py | 161 + .../tests/cli/conf_directory_manager_test.py | 22 + .../lib389/tests/cli/conf_plugin_test.py | 53 + .../lib389/tests/cli/conf_plugins/__init__.py | 0 .../tests/cli/conf_plugins/automember_test.py | 122 + .../tests/cli/conf_plugins/memberof_test.py | 490 + .../tests/cli/conf_plugins/referint_test.py | 119 + .../tests/cli/conf_plugins/rootdn_ac_test.py | 281 + .../lib389/tests/cli/conf_plugins/usn_test.py | 53 + .../lib389/tests/cli/conf_pwpolicy_test.py | 151 + .../lib389/tests/cli/conf_schema_test.py | 1 + .../lib389/tests/cli/ctl_dbtasks_test.py | 74 + src/lib389/lib389/tests/cli/dsrc_test.py | 180 + src/lib389/lib389/tests/cli/idm_group_test.py | 89 + .../lib389/tests/cli/idm_user_modify_test.py | 95 + src/lib389/lib389/tests/cli/idm_user_test.py | 93 + src/lib389/lib389/tests/config.py | 40 + src/lib389/lib389/tests/config_test.py | 87 + .../lib389/tests/configurations/__init__.py | 0 .../configurations/config_001003006_test.py | 47 + .../configurations/config_001004000_test.py | 231 + src/lib389/lib389/tests/conftest.py | 121 + src/lib389/lib389/tests/dereference_test.py | 99 + src/lib389/lib389/tests/dirsrv_log_test.py | 123 + src/lib389/lib389/tests/dirsrv_test.py | 215 + src/lib389/lib389/tests/dsadmin_basic_test.py | 126 + .../tests/dsadmin_create_remove_test.py | 97 + src/lib389/lib389/tests/dsadmin_test.py | 256 + src/lib389/lib389/tests/dseldif_test.py | 130 + .../lib389/tests/effective_rights_test.py | 93 + src/lib389/lib389/tests/entry_test.py | 98 + src/lib389/lib389/tests/healthcheck_test.py | 61 + src/lib389/lib389/tests/idm/__init__.py | 0 src/lib389/lib389/tests/idm/account_test.py | 82 + src/lib389/lib389/tests/idm/services_test.py | 55 + .../lib389/tests/idm/user_and_group_test.py | 93 + src/lib389/lib389/tests/index_test.py | 69 + src/lib389/lib389/tests/ldclt_test.py | 64 + src/lib389/lib389/tests/mapped_object_test.py | 23 + .../lib389/tests/mappingTreeLegacy_test.py | 271 + src/lib389/lib389/tests/mappingtree_test.py | 64 + src/lib389/lib389/tests/nss_ssl_test.py | 95 + src/lib389/lib389/tests/paths_test.py | 36 + src/lib389/lib389/tests/plugin_test.py | 137 + src/lib389/lib389/tests/plugins/__init__.py | 0 .../lib389/tests/plugins/memberof_test.py | 354 + .../lib389/tests/plugins/referint_test.py | 83 + src/lib389/lib389/tests/plugins/usn_test.py | 240 + src/lib389/lib389/tests/plugins/utils.py | 124 + src/lib389/lib389/tests/referral_test.py | 36 + src/lib389/lib389/tests/replicaLegacy_test.py | 457 + src/lib389/lib389/tests/replica_test.py | 375 + src/lib389/lib389/tests/schema_test.py | 121 + src/lib389/lib389/tests/suffix_test.py | 105 + src/lib389/lib389/tests/test_module_proxy.py | 79 + src/lib389/lib389/tests/tls_external_test.py | 82 + src/lib389/lib389/tests/utils_test.py | 192 + src/lib389/lib389/tombstone.py | 216 + src/lib389/lib389/tools.py | 1008 + src/lib389/lib389/topologies.py | 492 + src/lib389/lib389/utils.py | 1382 ++ src/lib389/requirements.txt | 10 + src/lib389/setup.cfg | 7 + src/lib389/setup.py | 97 + src/lib389/tox.ini | 6 + src/librnsslapd/Cargo.toml | 25 + src/librnsslapd/README.md | 4 + src/librnsslapd/build.rs | 15 + src/librnsslapd/src/lib.rs | 70 + src/librslapd/Cargo.toml | 25 + src/librslapd/README.md | 3 + src/librslapd/build.rs | 15 + src/librslapd/src/lib.rs | 56 + src/libsds/Cargo.toml | 18 + src/libsds/README.md | 30 + src/libsds/external/csiphash/csiphash.c | 138 + .../build/gcc_gnumake_kbuild/Kbuild | 86 + .../build/msvc_gnumake/liblfds711.def | 75 + .../liblfds711/build/msvc_gnumake/makefile | 113 + .../external/liblfds711/build/wdk_7.1/dirs | 3 + ..._entry_renamed_to_avoid_compiler_warning.c | 17 + .../liblfds711/build/wdk_7.1/liblfds711.def | 75 + .../readme_before_win_kernel_build.txt | 32 + ...me_before_win_kernel_dynamic_lib_build.bat | 24 + ...nme_before_win_kernel_static_lib_build.bat | 23 + .../liblfds711/build/wdk_7.1/sources.dynamic | 66 + .../liblfds711/build/wdk_7.1/sources.static | 64 + .../external/liblfds711/inc/liblfds711.h | 36 + .../lfds711_btree_addonly_unbalanced.h | 119 + .../inc/liblfds711/lfds711_freelist.h | 67 + .../inc/liblfds711/lfds711_hash_addonly.h | 135 + ...fds711_list_addonly_singlylinked_ordered.h | 85 + ...s711_list_addonly_singlylinked_unordered.h | 88 + .../liblfds711/inc/liblfds711/lfds711_misc.h | 241 + ...ds711_porting_abstraction_layer_compiler.h | 462 + ...rting_abstraction_layer_operating_system.h | 79 + ...s711_porting_abstraction_layer_processor.h | 407 + .../liblfds711/inc/liblfds711/lfds711_prng.h | 73 + ..._queue_bounded_manyproducer_manyconsumer.h | 64 + ...ue_bounded_singleproducer_singleconsumer.h | 57 + ...ueue_unbounded_manyproducer_manyconsumer.h | 58 + .../inc/liblfds711/lfds711_ringbuffer.h | 64 + .../liblfds711/inc/liblfds711/lfds711_stack.h | 54 + ...lfds711_btree_addonly_unbalanced_cleanup.c | 109 + .../lfds711_btree_addonly_unbalanced_get.c | 444 + .../lfds711_btree_addonly_unbalanced_init.c | 30 + .../lfds711_btree_addonly_unbalanced_insert.c | 147 + ...fds711_btree_addonly_unbalanced_internal.h | 22 + .../lfds711_btree_addonly_unbalanced_query.c | 114 + .../lfds711_freelist_cleanup.c | 31 + .../lfds711_freelist/lfds711_freelist_init.c | 45 + .../lfds711_freelist_internal.h | 6 + .../lfds711_freelist/lfds711_freelist_pop.c | 76 + .../lfds711_freelist/lfds711_freelist_push.c | 107 + .../lfds711_freelist/lfds711_freelist_query.c | 138 + .../lfds711_hash_addonly_cleanup.c | 58 + .../lfds711_hash_addonly_get.c | 44 + .../lfds711_hash_addonly_init.c | 51 + .../lfds711_hash_addonly_insert.c | 56 + .../lfds711_hash_addonly_internal.h | 4 + .../lfds711_hash_addonly_iterate.c | 52 + .../lfds711_hash_addonly_query.c | 108 + ...ist_addonly_singlylinked_ordered_cleanup.c | 33 + ...11_list_addonly_singlylinked_ordered_get.c | 26 + ...1_list_addonly_singlylinked_ordered_init.c | 35 + ...list_addonly_singlylinked_ordered_insert.c | 126 + ...st_addonly_singlylinked_ordered_internal.h | 4 + ..._list_addonly_singlylinked_ordered_query.c | 116 + ...t_addonly_singlylinked_unordered_cleanup.c | 33 + ..._list_addonly_singlylinked_unordered_get.c | 30 + ...list_addonly_singlylinked_unordered_init.c | 32 + ...st_addonly_singlylinked_unordered_insert.c | 175 + ..._addonly_singlylinked_unordered_internal.h | 4 + ...ist_addonly_singlylinked_unordered_query.c | 116 + .../src/lfds711_misc/lfds711_misc_globals.c | 9 + .../src/lfds711_misc/lfds711_misc_internal.h | 5 + .../lfds711_misc_internal_backoff_init.c | 19 + .../src/lfds711_misc/lfds711_misc_query.c | 32 + .../src/lfds711_prng/lfds711_prng_init.c | 37 + .../src/lfds711_prng/lfds711_prng_internal.h | 4 + ...ounded_manyproducer_manyconsumer_cleanup.c | 24 + ...ounded_manyproducer_manyconsumer_dequeue.c | 78 + ...ounded_manyproducer_manyconsumer_enqueue.c | 76 + ...e_bounded_manyproducer_manyconsumer_init.c | 39 + ...unded_manyproducer_manyconsumer_internal.h | 4 + ..._bounded_manyproducer_manyconsumer_query.c | 118 + ...ed_singleproducer_singleconsumer_cleanup.c | 26 + ...ed_singleproducer_singleconsumer_dequeue.c | 37 + ...ed_singleproducer_singleconsumer_enqueue.c | 34 + ...unded_singleproducer_singleconsumer_init.c | 60 + ...d_singleproducer_singleconsumer_internal.h | 4 + ...nded_singleproducer_singleconsumer_query.c | 78 + ...ounded_manyproducer_manyconsumer_cleanup.c | 43 + ...ounded_manyproducer_manyconsumer_dequeue.c | 122 + ...ounded_manyproducer_manyconsumer_enqueue.c | 85 + ...unbounded_manyproducer_manyconsumer_init.c | 47 + ...unded_manyproducer_manyconsumer_internal.h | 13 + ...nbounded_manyproducer_manyconsumer_query.c | 123 + .../lfds711_ringbuffer_cleanup.c | 84 + .../lfds711_ringbuffer_init.c | 38 + .../lfds711_ringbuffer_internal.h | 4 + .../lfds711_ringbuffer_query.c | 75 + .../lfds711_ringbuffer_read.c | 39 + .../lfds711_ringbuffer_write.c | 70 + .../src/lfds711_stack/lfds711_stack_cleanup.c | 31 + .../src/lfds711_stack/lfds711_stack_init.c | 28 + .../lfds711_stack/lfds711_stack_internal.h | 4 + .../src/lfds711_stack/lfds711_stack_pop.c | 49 + .../src/lfds711_stack/lfds711_stack_push.c | 41 + .../src/lfds711_stack/lfds711_stack_query.c | 120 + .../liblfds711/src/liblfds711_internal.h | 96 + src/libsds/include/sds.h | 1429 ++ src/libsds/sds/bpt/bpt.c | 391 + src/libsds/sds/bpt/bpt.h | 90 + src/libsds/sds/bpt/common.c | 817 + src/libsds/sds/bpt/list.c | 55 + src/libsds/sds/bpt/map.c | 367 + src/libsds/sds/bpt/search.c | 130 + src/libsds/sds/bpt/set.c | 285 + src/libsds/sds/bpt/verify.c | 74 + src/libsds/sds/bpt_cow/atomic.c | 97 + src/libsds/sds/bpt_cow/bpt_cow.c | 597 + src/libsds/sds/bpt_cow/bpt_cow.h | 48 + src/libsds/sds/bpt_cow/delete.c | 98 + src/libsds/sds/bpt_cow/insert.c | 155 + src/libsds/sds/bpt_cow/node.c | 250 + src/libsds/sds/bpt_cow/search.c | 80 + src/libsds/sds/bpt_cow/txn.c | 484 + src/libsds/sds/bpt_cow/verify.c | 230 + src/libsds/sds/core/crc32c.c | 147 + src/libsds/sds/core/utils.c | 142 + src/libsds/sds/ht/ht.c | 69 + src/libsds/sds/ht/ht.h | 35 + src/libsds/sds/ht/map.c | 64 + src/libsds/sds/ht/node.c | 63 + src/libsds/sds/ht/op.c | 358 + src/libsds/sds/ht/verify.c | 154 + src/libsds/sds/lib.rs | 32 + src/libsds/sds/queue/lqueue.c | 251 + src/libsds/sds/queue/queue.c | 125 + src/libsds/sds/queue/queue.h | 13 + src/libsds/sds/queue/tqueue.c | 65 + src/libsds/sds/sds_internal.h | 32 + src/libsds/sds/tqueue.rs | 131 + src/libsds/test/benchmark.c | 690 + src/libsds/test/benchmark.h | 22 + src/libsds/test/benchmark_par.c | 680 + src/libsds/test/benchmark_par.h | 78 + src/libsds/test/benchmark_parwrap.c | 422 + src/libsds/test/test_fixtures.c | 234 + src/libsds/test/test_sds.c | 25 + src/libsds/test/test_sds.h | 266 + src/libsds/test/test_sds_bpt.c | 1008 + src/libsds/test/test_sds_cow.c | 632 + src/libsds/test/test_sds_csiphash.c | 79 + src/libsds/test/test_sds_ht.c | 138 + src/libsds/test/test_sds_lqueue.c | 219 + src/libsds/test/test_sds_queue.c | 178 + src/libsds/test/test_sds_set.c | 386 + src/libsds/test/test_sds_tqueue.c | 209 + src/pkgconfig/dirsrv.pc.in | 11 + src/pkgconfig/libsds.pc.in | 12 + src/pkgconfig/svrcore.pc.in | 11 + src/slapd/Cargo.toml | 10 + src/slapd/src/error.rs | 8 + src/slapd/src/fernet.rs | 39 + src/slapd/src/lib.rs | 5 + src/svrcore/AUTHORS | 3 + src/svrcore/COPYING | 2 + src/svrcore/ChangeLog | 0 src/svrcore/INSTALL | 370 + src/svrcore/INSTALL.win | 77 + src/svrcore/LICENSE | 27 + src/svrcore/Makefile.am | 54 + src/svrcore/NEWS | 31 + src/svrcore/README | 58 + src/svrcore/TODO | 3 + src/svrcore/autogen.sh | 92 + src/svrcore/configure.ac | 69 + src/svrcore/examples/svrcore_driver.c | 140 + src/svrcore/m4/nspr.m4 | 110 + src/svrcore/m4/nss.m4 | 114 + src/svrcore/m4/systemd.m4 | 41 + src/svrcore/src/Makefile.am | 30 + src/svrcore/src/Makefile.win | 77 + src/svrcore/src/alt.c | 107 + src/svrcore/src/cache.c | 227 + src/svrcore/src/errors.c | 28 + src/svrcore/src/file.c | 236 + src/svrcore/src/key.ico | Bin 0 -> 766 bytes src/svrcore/src/logo.ico | Bin 0 -> 766 bytes src/svrcore/src/manifest.mn | 29 + src/svrcore/src/ntgetpin.c | 146 + src/svrcore/src/ntgetpin.rc | 92 + src/svrcore/src/ntresource.h | 26 + src/svrcore/src/pin.c | 79 + src/svrcore/src/pk11.c | 296 + src/svrcore/src/std-systemd.c | 234 + src/svrcore/src/std.c | 167 + src/svrcore/src/svrcore.h | 308 + src/svrcore/src/systemd-ask-pass.c | 468 + src/svrcore/src/user.c | 167 + test/libslapd/counters/atomic.c | 69 + test/libslapd/operation/v3_compat.c | 60 + test/libslapd/pblock/analytics.c | 35 + test/libslapd/pblock/pblock_accessors.txt | 317 + .../libslapd/pblock/pblock_accessors_freq.txt | 634 + test/libslapd/pblock/v3_compat.c | 220 + test/libslapd/schema/filter_validate.c | 124 + test/libslapd/spal/meminfo.c | 68 + test/libslapd/test.c | 37 + test/main.c | 20 + test/pblock_analyse.py | 148 + test/plugins/pwdstorage/pbkdf2.c | 80 + test/plugins/test.c | 31 + test/test_slapd.h | 65 + wrappers/cl-dump.in | 28 + wrappers/ds_systemd_ask_password_acl.in | 34 + wrappers/initscript.in | 360 + wrappers/ldap-agent-initscript.in | 227 + wrappers/repl-monitor.in | 28 + wrappers/systemd-snmp.service.in | 16 + wrappers/systemd.group.in | 7 + .../systemd.template.service.custom.conf.in | 53 + wrappers/systemd.template.service.in | 21 + .../systemd.template.service.xsan.conf.in | 11 + 2176 files changed, 820623 insertions(+) create mode 100644 .cargo/config.in create mode 100644 .clang-format create mode 100644 .cvsignore create mode 100644 .dockerignore create mode 100644 .gitignore create mode 100644 LICENSE create mode 100644 LICENSE.GPLv3+ create mode 100644 LICENSE.mit create mode 100644 LICENSE.openssl create mode 100644 Makefile.am create mode 100644 README.md create mode 100644 VERSION.sh create mode 100755 autogen.sh create mode 100755 buildnum.py create mode 100644 configure.ac create mode 100644 dirsrvtests/README create mode 100644 dirsrvtests/__init__.py create mode 100644 dirsrvtests/conftest.py create mode 100755 dirsrvtests/create_test.py create mode 100644 dirsrvtests/pytest.ini create mode 100644 dirsrvtests/tests/__init__.py create mode 100644 dirsrvtests/tests/data/README create mode 100644 dirsrvtests/tests/data/__init__.py create mode 100644 dirsrvtests/tests/data/basic/__init__.py create mode 100644 dirsrvtests/tests/data/basic/dse.ldif.broken create mode 100644 dirsrvtests/tests/data/ticket47953/__init__.py create mode 100644 dirsrvtests/tests/data/ticket47953/ticket47953.ldif create mode 100644 dirsrvtests/tests/data/ticket47988/__init__.py create mode 100644 dirsrvtests/tests/data/ticket47988/schema_ipa3.3.tar.gz create mode 100644 dirsrvtests/tests/data/ticket47988/schema_ipa4.1.tar.gz create mode 100644 dirsrvtests/tests/data/ticket48212/__init__.py create mode 100644 dirsrvtests/tests/data/ticket48212/example1k_posix.ldif create mode 100644 dirsrvtests/tests/data/ticket49121/utf8str.txt create mode 100644 dirsrvtests/tests/data/ticket49441/binary.ldif create mode 100755 dirsrvtests/tests/perf/create_data.py create mode 100755 dirsrvtests/tests/perf/memberof_test.py create mode 100644 dirsrvtests/tests/perf/search_performance_test.py create mode 100644 dirsrvtests/tests/stress/README create mode 100644 dirsrvtests/tests/stress/__init__.py create mode 100644 dirsrvtests/tests/stress/cos/cos_scale_template_test.py create mode 100644 dirsrvtests/tests/stress/reliabilty/__init__.py create mode 100644 dirsrvtests/tests/stress/reliabilty/reliab_7_5_test.py create mode 100644 dirsrvtests/tests/stress/reliabilty/reliab_conn_test.py create mode 100644 dirsrvtests/tests/stress/replication/mmr_01_4m-2h-4c_test.py create mode 100644 dirsrvtests/tests/stress/replication/mmr_01_4m_test.py create mode 100644 dirsrvtests/tests/stress/search/__init__.py create mode 100644 dirsrvtests/tests/stress/search/simple.py create mode 100644 dirsrvtests/tests/suites/__init__.py create mode 100644 dirsrvtests/tests/suites/acl/__init__.py create mode 100644 dirsrvtests/tests/suites/acl/acivattr_test.py create mode 100644 dirsrvtests/tests/suites/acl/acl_deny_test.py create mode 100644 dirsrvtests/tests/suites/acl/acl_test.py create mode 100644 dirsrvtests/tests/suites/acl/conftest.py create mode 100644 dirsrvtests/tests/suites/acl/default_aci_allows_self_write.py create mode 100644 dirsrvtests/tests/suites/acl/deladd_test.py create mode 100644 dirsrvtests/tests/suites/acl/enhanced_aci_modrnd_test.py create mode 100644 dirsrvtests/tests/suites/acl/globalgroup_part2_test.py create mode 100644 dirsrvtests/tests/suites/acl/globalgroup_test.py create mode 100644 dirsrvtests/tests/suites/acl/keywords_part2_test.py create mode 100644 dirsrvtests/tests/suites/acl/keywords_test.py create mode 100644 dirsrvtests/tests/suites/acl/misc_test.py create mode 100644 dirsrvtests/tests/suites/acl/modify_test.py create mode 100644 dirsrvtests/tests/suites/acl/modrdn_test.py create mode 100644 dirsrvtests/tests/suites/acl/repeated_ldap_add_test.py create mode 100644 dirsrvtests/tests/suites/acl/roledn_test.py create mode 100644 dirsrvtests/tests/suites/acl/search_real_part2_test.py create mode 100644 dirsrvtests/tests/suites/acl/search_real_part3_test.py create mode 100644 dirsrvtests/tests/suites/acl/search_real_test.py create mode 100644 dirsrvtests/tests/suites/acl/selfdn_permissions_test.py create mode 100644 dirsrvtests/tests/suites/acl/syntax_test.py create mode 100644 dirsrvtests/tests/suites/acl/userattr_test.py create mode 100644 dirsrvtests/tests/suites/acl/valueacl_part2_test.py create mode 100644 dirsrvtests/tests/suites/acl/valueacl_test.py create mode 100644 dirsrvtests/tests/suites/attr_encryption/__init__.py create mode 100644 dirsrvtests/tests/suites/attr_encryption/attr_encryption_test.py create mode 100644 dirsrvtests/tests/suites/auth_token/__init__.py create mode 100644 dirsrvtests/tests/suites/auth_token/basic_auth_test.py create mode 100644 dirsrvtests/tests/suites/automember_plugin/__init__.py create mode 100644 dirsrvtests/tests/suites/automember_plugin/automember_mod_test.py create mode 100644 dirsrvtests/tests/suites/automember_plugin/automember_test.py create mode 100644 dirsrvtests/tests/suites/automember_plugin/basic_test.py create mode 100644 dirsrvtests/tests/suites/automember_plugin/configuration_test.py create mode 100644 dirsrvtests/tests/suites/backups/backup_test.py create mode 100644 dirsrvtests/tests/suites/basic/__init__.py create mode 100644 dirsrvtests/tests/suites/basic/basic_test.py create mode 100644 dirsrvtests/tests/suites/betxns/__init__.py create mode 100644 dirsrvtests/tests/suites/betxns/betxn_test.py create mode 100644 dirsrvtests/tests/suites/clu/__init__.py create mode 100644 dirsrvtests/tests/suites/clu/clu_test.py create mode 100644 dirsrvtests/tests/suites/config/__init__.py create mode 100644 dirsrvtests/tests/suites/config/autotuning_test.py create mode 100644 dirsrvtests/tests/suites/config/config_test.py create mode 100644 dirsrvtests/tests/suites/config/regression_test.py create mode 100644 dirsrvtests/tests/suites/config/removed_config_49298_test.py create mode 100644 dirsrvtests/tests/suites/cos/__init__.py create mode 100644 dirsrvtests/tests/suites/cos/cos_test.py create mode 100644 dirsrvtests/tests/suites/cos/indirect_cos_test.py create mode 100644 dirsrvtests/tests/suites/disk_monitoring/__init__.py create mode 100644 dirsrvtests/tests/suites/disk_monitoring/disk_monitoring_test.py create mode 100644 dirsrvtests/tests/suites/disk_monitoring/disk_space_test.py create mode 100644 dirsrvtests/tests/suites/ds_logs/__init__.py create mode 100644 dirsrvtests/tests/suites/ds_logs/ds_logs_test.py create mode 100644 dirsrvtests/tests/suites/ds_logs/regression_test.py create mode 100644 dirsrvtests/tests/suites/ds_tools/__init__.py create mode 100644 dirsrvtests/tests/suites/ds_tools/logpipe_test.py create mode 100644 dirsrvtests/tests/suites/ds_tools/replcheck_test.py create mode 100644 dirsrvtests/tests/suites/dynamic_plugins/__init__.py create mode 100644 dirsrvtests/tests/suites/dynamic_plugins/dynamic_plugins_test.py create mode 100644 dirsrvtests/tests/suites/dynamic_plugins/stress_tests.py create mode 100644 dirsrvtests/tests/suites/filter/__init__.py create mode 100644 dirsrvtests/tests/suites/filter/basic_filter_test.py create mode 100644 dirsrvtests/tests/suites/filter/bitw_filter_test.py create mode 100644 dirsrvtests/tests/suites/filter/complex_filters_test.py create mode 100644 dirsrvtests/tests/suites/filter/filter_cert_test.py create mode 100644 dirsrvtests/tests/suites/filter/filter_index_match_test.py create mode 100644 dirsrvtests/tests/suites/filter/filter_indexing_test.py create mode 100644 dirsrvtests/tests/suites/filter/filter_logic_test.py create mode 100644 dirsrvtests/tests/suites/filter/filter_match_test.py create mode 100644 dirsrvtests/tests/suites/filter/filter_test.py create mode 100644 dirsrvtests/tests/suites/filter/filter_with_non_root_user_test.py create mode 100644 dirsrvtests/tests/suites/filter/filterscanlimit_test.py create mode 100644 dirsrvtests/tests/suites/filter/large_filter_test.py create mode 100644 dirsrvtests/tests/suites/filter/rfc3673_all_oper_attrs_test.py create mode 100644 dirsrvtests/tests/suites/filter/schema_validation_test.py create mode 100644 dirsrvtests/tests/suites/filter/vfilter_attribute_test.py create mode 100644 dirsrvtests/tests/suites/filter/vfilter_simple_test.py create mode 100644 dirsrvtests/tests/suites/fourwaymmr/__init__.py create mode 100644 dirsrvtests/tests/suites/fourwaymmr/fourwaymmr_test.py create mode 100644 dirsrvtests/tests/suites/fractional/fractional_test.py create mode 100644 dirsrvtests/tests/suites/get_effective_rights/__init__.py create mode 100644 dirsrvtests/tests/suites/get_effective_rights/acceptance_test.py create mode 100644 dirsrvtests/tests/suites/gssapi/__init__.py create mode 100644 dirsrvtests/tests/suites/gssapi/simple_gssapi_test.py create mode 100644 dirsrvtests/tests/suites/gssapi_repl/__init__.py create mode 100644 dirsrvtests/tests/suites/gssapi_repl/gssapi_repl_test.py create mode 100644 dirsrvtests/tests/suites/import/__init__.py create mode 100644 dirsrvtests/tests/suites/import/regression_test.py create mode 100644 dirsrvtests/tests/suites/ldapi/__init__.py create mode 100644 dirsrvtests/tests/suites/lib389/__init__.py create mode 100644 dirsrvtests/tests/suites/lib389/config_compare_test.py create mode 100644 dirsrvtests/tests/suites/lib389/dsldapobject/__init__.py create mode 100644 dirsrvtests/tests/suites/lib389/dsldapobject/dn_construct_test.py create mode 100644 dirsrvtests/tests/suites/lib389/idm/__init__.py create mode 100644 dirsrvtests/tests/suites/lib389/idm/user_compare_i2_test.py create mode 100644 dirsrvtests/tests/suites/lib389/idm/user_compare_m2Repl_test.py create mode 100644 dirsrvtests/tests/suites/lib389/idm/user_compare_st_test.py create mode 100644 dirsrvtests/tests/suites/logging/__init__.py create mode 100644 dirsrvtests/tests/suites/logging/logging_config_test.py create mode 100644 dirsrvtests/tests/suites/mapping_tree/__init__.py create mode 100644 dirsrvtests/tests/suites/mapping_tree/be_del_and_default_naming_attr_test.py create mode 100644 dirsrvtests/tests/suites/mapping_tree/referral_during_tot_init_test.py create mode 100644 dirsrvtests/tests/suites/memberof_plugin/__init__.py create mode 100644 dirsrvtests/tests/suites/memberof_plugin/regression_test.py create mode 100644 dirsrvtests/tests/suites/memory_leaks/MMR_double_free_test.py create mode 100644 dirsrvtests/tests/suites/memory_leaks/__init__.py create mode 100644 dirsrvtests/tests/suites/memory_leaks/range_search_test.py create mode 100644 dirsrvtests/tests/suites/migration/__init__.py create mode 100644 dirsrvtests/tests/suites/migration/export_data_test.py create mode 100644 dirsrvtests/tests/suites/migration/import_data_test.py create mode 100644 dirsrvtests/tests/suites/monitor/__init__.py create mode 100644 dirsrvtests/tests/suites/monitor/monitor_test.py create mode 100644 dirsrvtests/tests/suites/paged_results/__init__.py create mode 100644 dirsrvtests/tests/suites/paged_results/paged_results_test.py create mode 100644 dirsrvtests/tests/suites/password/__init__.py create mode 100644 dirsrvtests/tests/suites/password/password_test.py create mode 100644 dirsrvtests/tests/suites/password/pbkdf2_upgrade_plugin_test.py create mode 100644 dirsrvtests/tests/suites/password/pwdAdmin_test.py create mode 100644 dirsrvtests/tests/suites/password/pwdModify_test.py create mode 100644 dirsrvtests/tests/suites/password/pwdPolicy_attribute_test.py create mode 100644 dirsrvtests/tests/suites/password/pwdPolicy_controls_test.py create mode 100644 dirsrvtests/tests/suites/password/pwdPolicy_inherit_global_test.py create mode 100644 dirsrvtests/tests/suites/password/pwdPolicy_syntax_test.py create mode 100644 dirsrvtests/tests/suites/password/pwdPolicy_token_test.py create mode 100644 dirsrvtests/tests/suites/password/pwdPolicy_warning_test.py create mode 100644 dirsrvtests/tests/suites/password/pwd_algo_test.py create mode 100644 dirsrvtests/tests/suites/password/pwd_lockout_bypass_test.py create mode 100644 dirsrvtests/tests/suites/password/pwd_log_test.py create mode 100644 dirsrvtests/tests/suites/password/pwd_upgrade_on_bind.py create mode 100644 dirsrvtests/tests/suites/password/pwp_history_test.py create mode 100644 dirsrvtests/tests/suites/password/pwp_test.py create mode 100644 dirsrvtests/tests/suites/password/regression_test.py create mode 100644 dirsrvtests/tests/suites/password/series_of_bugs_test.py create mode 100644 dirsrvtests/tests/suites/plugins/__init__.py create mode 100644 dirsrvtests/tests/suites/plugins/acceptance_test.py create mode 100644 dirsrvtests/tests/suites/plugins/accpol_test.py create mode 100644 dirsrvtests/tests/suites/plugins/attr_nsslapd-pluginarg_test.py create mode 100644 dirsrvtests/tests/suites/plugins/cos_test.py create mode 100644 dirsrvtests/tests/suites/plugins/deref_aci_test.py create mode 100644 dirsrvtests/tests/suites/plugins/dna_test.py create mode 100644 dirsrvtests/tests/suites/plugins/memberof_test.py create mode 100644 dirsrvtests/tests/suites/plugins/pluginpath_validation_test.py create mode 100644 dirsrvtests/tests/suites/plugins/referint_test.py create mode 100644 dirsrvtests/tests/suites/plugins/rootdn_plugin_test.py create mode 100644 dirsrvtests/tests/suites/psearch/__init__.py create mode 100644 dirsrvtests/tests/suites/psearch/psearch_test.py create mode 100644 dirsrvtests/tests/suites/pwp_storage/storage_test.py create mode 100644 dirsrvtests/tests/suites/referint_plugin/rename_test.py create mode 100644 dirsrvtests/tests/suites/replication/__init__.py create mode 100644 dirsrvtests/tests/suites/replication/acceptance_test.py create mode 100644 dirsrvtests/tests/suites/replication/cascading_test.py create mode 100644 dirsrvtests/tests/suites/replication/changelog_test.py create mode 100644 dirsrvtests/tests/suites/replication/changelog_trimming_test.py create mode 100644 dirsrvtests/tests/suites/replication/cleanallruv_max_tasks_test.py create mode 100644 dirsrvtests/tests/suites/replication/cleanallruv_test.py create mode 100644 dirsrvtests/tests/suites/replication/conflict_resolve_test.py create mode 100644 dirsrvtests/tests/suites/replication/conftest.py create mode 100644 dirsrvtests/tests/suites/replication/encryption_cl5_test.py create mode 100644 dirsrvtests/tests/suites/replication/regression_test.py create mode 100644 dirsrvtests/tests/suites/replication/replica_config_test.py create mode 100644 dirsrvtests/tests/suites/replication/ruvstore_test.py create mode 100644 dirsrvtests/tests/suites/replication/single_master_test.py create mode 100644 dirsrvtests/tests/suites/replication/tls_client_auth_repl_test.py create mode 100644 dirsrvtests/tests/suites/replication/tombstone_fixup_test.py create mode 100644 dirsrvtests/tests/suites/replication/tombstone_test.py create mode 100644 dirsrvtests/tests/suites/replication/wait_for_async_feature_test.py create mode 100644 dirsrvtests/tests/suites/resource_limits/__init__.py create mode 100644 dirsrvtests/tests/suites/resource_limits/fdlimits_test.py create mode 100644 dirsrvtests/tests/suites/roles/basic_test.py create mode 100644 dirsrvtests/tests/suites/sasl/__init__.py create mode 100644 dirsrvtests/tests/suites/sasl/allowed_mechs_test.py create mode 100644 dirsrvtests/tests/suites/sasl/plain_test.py create mode 100644 dirsrvtests/tests/suites/sasl/regression_test.py create mode 100644 dirsrvtests/tests/suites/schema/__init__.py create mode 100644 dirsrvtests/tests/suites/schema/eduperson_test.py create mode 100644 dirsrvtests/tests/suites/schema/schema_reload_test.py create mode 100644 dirsrvtests/tests/suites/schema/schema_replication_test.py create mode 100644 dirsrvtests/tests/suites/schema/schema_test.py create mode 100644 dirsrvtests/tests/suites/setup_ds/__init__.py create mode 100644 dirsrvtests/tests/suites/setup_ds/dscreate_test.py create mode 100644 dirsrvtests/tests/suites/setup_ds/remove_test.py create mode 100644 dirsrvtests/tests/suites/setup_ds/setup_ds_test.py create mode 100644 dirsrvtests/tests/suites/snmp/__init__.py create mode 100644 dirsrvtests/tests/suites/stat/__init__.py create mode 100644 dirsrvtests/tests/suites/stat/mmt_state_test.py create mode 100644 dirsrvtests/tests/suites/syntax/__init__.py create mode 100644 dirsrvtests/tests/suites/syntax/acceptance_test.py create mode 100644 dirsrvtests/tests/suites/syntax/mr_test.py create mode 100644 dirsrvtests/tests/suites/tls/__init__.py create mode 100644 dirsrvtests/tests/suites/tls/cipher_test.py create mode 100644 dirsrvtests/tests/suites/tls/ssl_version_test.py create mode 100644 dirsrvtests/tests/suites/tls/tls_check_crl_test.py create mode 100644 dirsrvtests/tests/suites/tls/tls_ldaps_only_test.py create mode 100644 dirsrvtests/tests/suites/vlv/__init__.py create mode 100644 dirsrvtests/tests/suites/vlv/regression_test.py create mode 100644 dirsrvtests/tests/tickets/__init__.py create mode 100644 dirsrvtests/tests/tickets/ticket47462_test.py create mode 100644 dirsrvtests/tests/tickets/ticket47560_test.py create mode 100644 dirsrvtests/tests/tickets/ticket47573_test.py create mode 100644 dirsrvtests/tests/tickets/ticket47619_test.py create mode 100644 dirsrvtests/tests/tickets/ticket47640_test.py create mode 100644 dirsrvtests/tests/tickets/ticket47653MMR_test.py create mode 100644 dirsrvtests/tests/tickets/ticket47676_test.py create mode 100644 dirsrvtests/tests/tickets/ticket47714_test.py create mode 100644 dirsrvtests/tests/tickets/ticket47721_test.py create mode 100644 dirsrvtests/tests/tickets/ticket47781_test.py create mode 100644 dirsrvtests/tests/tickets/ticket47787_test.py create mode 100644 dirsrvtests/tests/tickets/ticket47808_test.py create mode 100644 dirsrvtests/tests/tickets/ticket47815_test.py create mode 100644 dirsrvtests/tests/tickets/ticket47823_test.py create mode 100644 dirsrvtests/tests/tickets/ticket47828_test.py create mode 100644 dirsrvtests/tests/tickets/ticket47829_test.py create mode 100644 dirsrvtests/tests/tickets/ticket47833_test.py create mode 100644 dirsrvtests/tests/tickets/ticket47869MMR_test.py create mode 100644 dirsrvtests/tests/tickets/ticket47871_test.py create mode 100644 dirsrvtests/tests/tickets/ticket47900_test.py create mode 100644 dirsrvtests/tests/tickets/ticket47910_test.py create mode 100644 dirsrvtests/tests/tickets/ticket47920_test.py create mode 100644 dirsrvtests/tests/tickets/ticket47921_test.py create mode 100644 dirsrvtests/tests/tickets/ticket47927_test.py create mode 100644 dirsrvtests/tests/tickets/ticket47931_test.py create mode 100644 dirsrvtests/tests/tickets/ticket47953_test.py create mode 100644 dirsrvtests/tests/tickets/ticket47963_test.py create mode 100644 dirsrvtests/tests/tickets/ticket47970_test.py create mode 100644 dirsrvtests/tests/tickets/ticket47973_test.py create mode 100644 dirsrvtests/tests/tickets/ticket47976_test.py create mode 100644 dirsrvtests/tests/tickets/ticket47980_test.py create mode 100644 dirsrvtests/tests/tickets/ticket47981_test.py create mode 100644 dirsrvtests/tests/tickets/ticket47988_test.py create mode 100644 dirsrvtests/tests/tickets/ticket48005_test.py create mode 100644 dirsrvtests/tests/tickets/ticket48013_test.py create mode 100644 dirsrvtests/tests/tickets/ticket48026_test.py create mode 100644 dirsrvtests/tests/tickets/ticket48109_test.py create mode 100644 dirsrvtests/tests/tickets/ticket48170_test.py create mode 100644 dirsrvtests/tests/tickets/ticket48194_test.py create mode 100644 dirsrvtests/tests/tickets/ticket48212_test.py create mode 100644 dirsrvtests/tests/tickets/ticket48214_test.py create mode 100644 dirsrvtests/tests/tickets/ticket48228_test.py create mode 100644 dirsrvtests/tests/tickets/ticket48233_test.py create mode 100644 dirsrvtests/tests/tickets/ticket48234_test.py create mode 100644 dirsrvtests/tests/tickets/ticket48252_test.py create mode 100644 dirsrvtests/tests/tickets/ticket48265_test.py create mode 100644 dirsrvtests/tests/tickets/ticket48266_test.py create mode 100644 dirsrvtests/tests/tickets/ticket48270_test.py create mode 100644 dirsrvtests/tests/tickets/ticket48272_test.py create mode 100644 dirsrvtests/tests/tickets/ticket48294_test.py create mode 100644 dirsrvtests/tests/tickets/ticket48295_test.py create mode 100644 dirsrvtests/tests/tickets/ticket48312_test.py create mode 100644 dirsrvtests/tests/tickets/ticket48325_test.py create mode 100644 dirsrvtests/tests/tickets/ticket48342_test.py create mode 100644 dirsrvtests/tests/tickets/ticket48354_test.py create mode 100644 dirsrvtests/tests/tickets/ticket48362_test.py create mode 100644 dirsrvtests/tests/tickets/ticket48366_test.py create mode 100644 dirsrvtests/tests/tickets/ticket48370_test.py create mode 100644 dirsrvtests/tests/tickets/ticket48383_test.py create mode 100644 dirsrvtests/tests/tickets/ticket48497_test.py create mode 100644 dirsrvtests/tests/tickets/ticket48637_test.py create mode 100644 dirsrvtests/tests/tickets/ticket48665_test.py create mode 100644 dirsrvtests/tests/tickets/ticket48745_test.py create mode 100644 dirsrvtests/tests/tickets/ticket48746_test.py create mode 100644 dirsrvtests/tests/tickets/ticket48759_test.py create mode 100644 dirsrvtests/tests/tickets/ticket48784_test.py create mode 100644 dirsrvtests/tests/tickets/ticket48798_test.py create mode 100644 dirsrvtests/tests/tickets/ticket48799_test.py create mode 100644 dirsrvtests/tests/tickets/ticket48808_test.py create mode 100644 dirsrvtests/tests/tickets/ticket48844_test.py create mode 100644 dirsrvtests/tests/tickets/ticket48891_test.py create mode 100644 dirsrvtests/tests/tickets/ticket48893_test.py create mode 100644 dirsrvtests/tests/tickets/ticket48896_test.py create mode 100644 dirsrvtests/tests/tickets/ticket48906_test.py create mode 100644 dirsrvtests/tests/tickets/ticket48916_test.py create mode 100644 dirsrvtests/tests/tickets/ticket48944_test.py create mode 100644 dirsrvtests/tests/tickets/ticket48956_test.py create mode 100644 dirsrvtests/tests/tickets/ticket48961_test.py create mode 100644 dirsrvtests/tests/tickets/ticket48973_test.py create mode 100644 dirsrvtests/tests/tickets/ticket49008_test.py create mode 100644 dirsrvtests/tests/tickets/ticket49020_test.py create mode 100644 dirsrvtests/tests/tickets/ticket49039_test.py create mode 100644 dirsrvtests/tests/tickets/ticket49072_test.py create mode 100644 dirsrvtests/tests/tickets/ticket49073_test.py create mode 100644 dirsrvtests/tests/tickets/ticket49076_test.py create mode 100644 dirsrvtests/tests/tickets/ticket49095_test.py create mode 100644 dirsrvtests/tests/tickets/ticket49104_test.py create mode 100644 dirsrvtests/tests/tickets/ticket49121_test.py create mode 100644 dirsrvtests/tests/tickets/ticket49122_test.py create mode 100644 dirsrvtests/tests/tickets/ticket49180_test.py create mode 100644 dirsrvtests/tests/tickets/ticket49184_test.py create mode 100644 dirsrvtests/tests/tickets/ticket49192_test.py create mode 100644 dirsrvtests/tests/tickets/ticket49227_test.py create mode 100644 dirsrvtests/tests/tickets/ticket49249_test.py create mode 100644 dirsrvtests/tests/tickets/ticket49273_test.py create mode 100644 dirsrvtests/tests/tickets/ticket49287_test.py create mode 100644 dirsrvtests/tests/tickets/ticket49290_test.py create mode 100644 dirsrvtests/tests/tickets/ticket49303_test.py create mode 100644 dirsrvtests/tests/tickets/ticket49386_test.py create mode 100644 dirsrvtests/tests/tickets/ticket49412_test.py create mode 100644 dirsrvtests/tests/tickets/ticket49441_test.py create mode 100644 dirsrvtests/tests/tickets/ticket49460_test.py create mode 100644 dirsrvtests/tests/tickets/ticket49463_test.py create mode 100644 dirsrvtests/tests/tickets/ticket49471_test.py create mode 100644 dirsrvtests/tests/tickets/ticket49540_test.py create mode 100644 dirsrvtests/tests/tickets/ticket49623_2_test.py create mode 100644 dirsrvtests/tests/tickets/ticket49658_test.py create mode 100644 dirsrvtests/tests/tickets/ticket49788_test.py create mode 100644 dirsrvtests/tests/tickets/ticket50078_test.py create mode 100644 dirsrvtests/tests/tickets/ticket50232_test.py create mode 100644 dirsrvtests/tests/tickets/ticket50234_test.py create mode 100644 dirsrvtests/tests/tickets/ticket548_test.py create mode 100644 dirsrvtests/tests/tmp/README create mode 100644 dirsrvtests/tests/tmp/__init__.py create mode 100644 docker.mk create mode 100644 docker/389-ds-fedora/Dockerfile create mode 100644 docker/389-ds-suse/Dockerfile create mode 100644 docker/389-ds-suse/Dockerfile.release create mode 100644 docker/README.md create mode 100644 docs/CREDITS.artwork create mode 100644 docs/custom.css create mode 100644 docs/doc_header.html create mode 100644 docs/intro.md create mode 100644 docs/job-safety.md create mode 100644 docs/logo-banner.png create mode 100644 docs/logo-banner.xcf create mode 100644 docs/logo-square.xcf create mode 100644 docs/nunc-stans-intro.dia create mode 100644 docs/nunc-stans-intro.png create mode 100644 docs/nunc-stans-job-states.dia create mode 100644 docs/nunc-stans-job-states.png create mode 100644 docs/slapi.doxy.in create mode 100644 docs/tops_tops.xcf create mode 100644 include/base/crit.h create mode 100644 include/base/dbtbase.h create mode 100644 include/base/ereport.h create mode 100644 include/base/file.h create mode 100644 include/base/fsmutex.h create mode 100644 include/base/plist.h create mode 100644 include/base/pool.h create mode 100644 include/base/shexp.h create mode 100644 include/base/systems.h create mode 100644 include/base/systhr.h create mode 100644 include/base/util.h create mode 100644 include/i18n.h create mode 100644 include/ldaputil/cert.h create mode 100644 include/ldaputil/certmap.h create mode 100644 include/ldaputil/dbconf.h create mode 100644 include/ldaputil/encode.h create mode 100644 include/ldaputil/errors.h create mode 100644 include/ldaputil/init.h create mode 100644 include/ldaputil/ldapauth.h create mode 100644 include/ldaputil/ldaputil.h create mode 100644 include/libaccess/acl.h create mode 100644 include/libaccess/aclerror.h create mode 100644 include/libaccess/acleval.h create mode 100644 include/libaccess/aclglobal.h create mode 100644 include/libaccess/aclproto.h create mode 100644 include/libaccess/aclstruct.h create mode 100644 include/libaccess/attrec.h create mode 100644 include/libaccess/authdb.h create mode 100644 include/libaccess/dbtlibaccess.h create mode 100644 include/libaccess/dnfstruct.h create mode 100644 include/libaccess/ipfstruct.h create mode 100644 include/libaccess/las.h create mode 100644 include/libaccess/nsauth.h create mode 100644 include/libaccess/nsautherr.h create mode 100644 include/libaccess/nserror.h create mode 100644 include/libaccess/symbols.h create mode 100644 include/libaccess/userauth.h create mode 100644 include/libaccess/usi.h create mode 100644 include/libaccess/usrcache.h create mode 100644 include/libadmin/dbtlibadmin.h create mode 100644 include/libadmin/libadmin.h create mode 100644 include/netsite.h create mode 100644 include/public/base/systems.h create mode 100644 include/public/netsite.h create mode 100644 include/public/nsacl/aclapi.h create mode 100644 include/public/nsacl/acldef.h create mode 100644 include/public/nsacl/nserrdef.h create mode 100644 include/public/nsacl/plistdef.h create mode 100644 include/public/nsapi.h create mode 100644 ldap/admin/src/70-dirsrv.conf create mode 100644 ldap/admin/src/base-initconfig.in create mode 100644 ldap/admin/src/defaults.inf.in create mode 100644 ldap/admin/src/initconfig.in create mode 100755 ldap/admin/src/logconv.pl create mode 100644 ldap/admin/src/makemccvlvindexes create mode 100644 ldap/admin/src/makevlvindex create mode 100644 ldap/admin/src/makevlvsearch create mode 100644 ldap/admin/src/scripts/10cleanupldapi.pl create mode 100644 ldap/admin/src/scripts/10delautodnsuffix.pl create mode 100644 ldap/admin/src/scripts/10fixrundir.pl create mode 100644 ldap/admin/src/scripts/20betxn.pl create mode 100644 ldap/admin/src/scripts/50AES-pbe-plugin.ldif create mode 100644 ldap/admin/src/scripts/50acctusabilityplugin.ldif create mode 100644 ldap/admin/src/scripts/50addchainingsaslpwroles.ldif create mode 100644 ldap/admin/src/scripts/50automemberplugin.ldif create mode 100644 ldap/admin/src/scripts/50bitstringsyntaxplugin.ldif create mode 100644 ldap/admin/src/scripts/50contentsync.ldif create mode 100644 ldap/admin/src/scripts/50deliverymethodsyntaxplugin.ldif create mode 100644 ldap/admin/src/scripts/50derefplugin.ldif create mode 100644 ldap/admin/src/scripts/50disableurisyntaxplugin.ldif create mode 100644 ldap/admin/src/scripts/50enhancedguidesyntaxplugin.ldif create mode 100644 ldap/admin/src/scripts/50entryusnindex.ldif create mode 100644 ldap/admin/src/scripts/50faxnumbersyntaxplugin.ldif create mode 100644 ldap/admin/src/scripts/50faxsyntaxplugin.ldif create mode 100644 ldap/admin/src/scripts/50fixNsState.pl create mode 100644 ldap/admin/src/scripts/50guidesyntaxplugin.ldif create mode 100644 ldap/admin/src/scripts/50linkedattrsplugin.ldif create mode 100644 ldap/admin/src/scripts/50managedentriesplugin.ldif create mode 100644 ldap/admin/src/scripts/50memberofindex.ldif create mode 100644 ldap/admin/src/scripts/50memberofplugin.ldif create mode 100644 ldap/admin/src/scripts/50nameuidsyntaxplugin.ldif create mode 100644 ldap/admin/src/scripts/50nstombstonecsn.ldif create mode 100644 ldap/admin/src/scripts/50numericstringsyntaxplugin.ldif create mode 100644 ldap/admin/src/scripts/50printablestringsyntaxplugin.ldif create mode 100644 ldap/admin/src/scripts/50refintprecedence.ldif create mode 100644 ldap/admin/src/scripts/50retroclprecedence.ldif create mode 100644 ldap/admin/src/scripts/50rootdnaccesscontrolplugin.ldif create mode 100644 ldap/admin/src/scripts/50schemareloadplugin.ldif create mode 100644 ldap/admin/src/scripts/50smd5pwdstorageplugin.ldif create mode 100644 ldap/admin/src/scripts/50syntaxvalidplugin.ldif create mode 100644 ldap/admin/src/scripts/50targetuniqueid.ldif create mode 100644 ldap/admin/src/scripts/50teletexterminalidsyntaxplugin.ldif create mode 100644 ldap/admin/src/scripts/50telexnumbersyntaxplugin.ldif create mode 100644 ldap/admin/src/scripts/50updateconfig.ldif create mode 100644 ldap/admin/src/scripts/50usnplugin.ldif create mode 100644 ldap/admin/src/scripts/52updateAESplugin.pl create mode 100644 ldap/admin/src/scripts/60removeLegacyReplication.ldif create mode 100644 ldap/admin/src/scripts/60upgradeconfigfiles.pl create mode 100644 ldap/admin/src/scripts/60upgradeschemafiles.pl create mode 100644 ldap/admin/src/scripts/70upgradefromldif.pl create mode 100644 ldap/admin/src/scripts/80upgradednformat.pl.in create mode 100644 ldap/admin/src/scripts/81changelog.pl create mode 100644 ldap/admin/src/scripts/82targetuniqueidindex.pl create mode 100644 ldap/admin/src/scripts/90subtreerename.pl create mode 100644 ldap/admin/src/scripts/91reindex.pl.in create mode 100644 ldap/admin/src/scripts/91subtreereindex.pl create mode 100644 ldap/admin/src/scripts/DSCreate.pm.in create mode 100644 ldap/admin/src/scripts/DSDialogs.pm create mode 100644 ldap/admin/src/scripts/DSMigration.pm.in create mode 100644 ldap/admin/src/scripts/DSSharedLib.in create mode 100644 ldap/admin/src/scripts/DSUpdate.pm.in create mode 100644 ldap/admin/src/scripts/DSUpdateDialogs.pm create mode 100644 ldap/admin/src/scripts/DSUtil.pm.in create mode 100644 ldap/admin/src/scripts/Dialog.pm create mode 100644 ldap/admin/src/scripts/DialogManager.pm.in create mode 100644 ldap/admin/src/scripts/FileConn.pm create mode 100644 ldap/admin/src/scripts/Inf.pm create mode 100644 ldap/admin/src/scripts/Migration.pm.in create mode 100644 ldap/admin/src/scripts/Resource.pm create mode 100644 ldap/admin/src/scripts/Setup.pm.in create mode 100644 ldap/admin/src/scripts/SetupDialogs.pm.in create mode 100644 ldap/admin/src/scripts/SetupLog.pm create mode 100755 ldap/admin/src/scripts/bak2db.in create mode 100644 ldap/admin/src/scripts/bak2db.pl.in create mode 100755 ldap/admin/src/scripts/cl-dump.pl create mode 100644 ldap/admin/src/scripts/cleanallruv.pl.in create mode 100755 ldap/admin/src/scripts/db2bak.in create mode 100644 ldap/admin/src/scripts/db2bak.pl.in create mode 100755 ldap/admin/src/scripts/db2index.in create mode 100644 ldap/admin/src/scripts/db2index.pl.in create mode 100755 ldap/admin/src/scripts/db2ldif.in create mode 100644 ldap/admin/src/scripts/db2ldif.pl.in create mode 100644 ldap/admin/src/scripts/dbmon.sh.in create mode 100755 ldap/admin/src/scripts/dbverify.in create mode 100755 ldap/admin/src/scripts/dn2rdn.in create mode 100644 ldap/admin/src/scripts/dnaplugindepends.ldif create mode 100644 ldap/admin/src/scripts/ds-logpipe.py create mode 100755 ldap/admin/src/scripts/ds-replcheck create mode 100755 ldap/admin/src/scripts/ds_selinux_enabled.in create mode 100644 ldap/admin/src/scripts/ds_selinux_port_query.in create mode 100644 ldap/admin/src/scripts/dscreate.map.in create mode 100644 ldap/admin/src/scripts/dsorgentries.map.in create mode 100644 ldap/admin/src/scripts/dsupdate.map.in create mode 100644 ldap/admin/src/scripts/exampleupdate.ldif create mode 100644 ldap/admin/src/scripts/exampleupdate.pl create mode 100644 ldap/admin/src/scripts/exampleupdate.sh create mode 100644 ldap/admin/src/scripts/failedbinds.py create mode 100644 ldap/admin/src/scripts/fixup-linkedattrs.pl.in create mode 100644 ldap/admin/src/scripts/fixup-memberof.pl.in create mode 100755 ldap/admin/src/scripts/ldif2db.in create mode 100644 ldap/admin/src/scripts/ldif2db.pl.in create mode 100755 ldap/admin/src/scripts/ldif2ldap.in create mode 100644 ldap/admin/src/scripts/logregex.py create mode 100644 ldap/admin/src/scripts/migrate-ds.pl.in create mode 100644 ldap/admin/src/scripts/migrate-ds.res create mode 100755 ldap/admin/src/scripts/monitor.in create mode 100644 ldap/admin/src/scripts/ns-accountstatus.pl.in create mode 100644 ldap/admin/src/scripts/ns-activate.pl.in create mode 100644 ldap/admin/src/scripts/ns-inactivate.pl.in create mode 100755 ldap/admin/src/scripts/ns-newpwpolicy.pl.in create mode 100644 ldap/admin/src/scripts/ns-slapd-gdb.py create mode 100644 ldap/admin/src/scripts/readnsstate.in create mode 100755 ldap/admin/src/scripts/remove-ds.pl.in create mode 100755 ldap/admin/src/scripts/repl-monitor.pl.in create mode 100644 ldap/admin/src/scripts/restart-dirsrv.in create mode 100755 ldap/admin/src/scripts/restoreconfig.in create mode 100755 ldap/admin/src/scripts/saveconfig.in create mode 100644 ldap/admin/src/scripts/schema-reload.pl.in create mode 100644 ldap/admin/src/scripts/setup-ds.pl.in create mode 100644 ldap/admin/src/scripts/setup-ds.res.in create mode 100755 ldap/admin/src/scripts/start-dirsrv.in create mode 100755 ldap/admin/src/scripts/status-dirsrv.in create mode 100755 ldap/admin/src/scripts/stop-dirsrv.in create mode 100755 ldap/admin/src/scripts/suffix2instance.in create mode 100644 ldap/admin/src/scripts/syntax-validate.pl.in create mode 100755 ldap/admin/src/scripts/template-bak2db.in create mode 100644 ldap/admin/src/scripts/template-bak2db.pl.in create mode 100644 ldap/admin/src/scripts/template-cleanallruv.pl.in create mode 100755 ldap/admin/src/scripts/template-db2bak.in create mode 100644 ldap/admin/src/scripts/template-db2bak.pl.in create mode 100755 ldap/admin/src/scripts/template-db2index.in create mode 100644 ldap/admin/src/scripts/template-db2index.pl.in create mode 100755 ldap/admin/src/scripts/template-db2ldif.in create mode 100644 ldap/admin/src/scripts/template-db2ldif.pl.in create mode 100755 ldap/admin/src/scripts/template-dbverify.in create mode 100755 ldap/admin/src/scripts/template-dn2rdn.in create mode 100644 ldap/admin/src/scripts/template-fixup-linkedattrs.pl.in create mode 100644 ldap/admin/src/scripts/template-fixup-memberof.pl.in create mode 100644 ldap/admin/src/scripts/template-fixup-memberuid.pl.in create mode 100755 ldap/admin/src/scripts/template-ldif2db.in create mode 100644 ldap/admin/src/scripts/template-ldif2db.pl.in create mode 100755 ldap/admin/src/scripts/template-ldif2ldap.in create mode 100755 ldap/admin/src/scripts/template-monitor.in create mode 100644 ldap/admin/src/scripts/template-ns-accountstatus.pl.in create mode 100644 ldap/admin/src/scripts/template-ns-activate.pl.in create mode 100644 ldap/admin/src/scripts/template-ns-inactivate.pl.in create mode 100755 ldap/admin/src/scripts/template-ns-newpwpolicy.pl.in create mode 100644 ldap/admin/src/scripts/template-restart-slapd.in create mode 100755 ldap/admin/src/scripts/template-restoreconfig.in create mode 100755 ldap/admin/src/scripts/template-saveconfig.in create mode 100644 ldap/admin/src/scripts/template-schema-reload.pl.in create mode 100755 ldap/admin/src/scripts/template-start-slapd.in create mode 100755 ldap/admin/src/scripts/template-stop-slapd.in create mode 100755 ldap/admin/src/scripts/template-suffix2instance.in create mode 100644 ldap/admin/src/scripts/template-syntax-validate.pl.in create mode 100755 ldap/admin/src/scripts/template-upgradedb.in create mode 100755 ldap/admin/src/scripts/template-upgradednformat.in create mode 100644 ldap/admin/src/scripts/template-usn-tombstone-cleanup.pl.in create mode 100644 ldap/admin/src/scripts/template-verify-db.pl.in create mode 100755 ldap/admin/src/scripts/template-vlvindex.in create mode 100755 ldap/admin/src/scripts/upgradedb.in create mode 100755 ldap/admin/src/scripts/upgradednformat.in create mode 100644 ldap/admin/src/scripts/usn-tombstone-cleanup.pl.in create mode 100644 ldap/admin/src/scripts/verify-db.pl.in create mode 100755 ldap/admin/src/scripts/vlvindex.in create mode 100644 ldap/admin/src/slapd.inf.in create mode 100644 ldap/admin/src/template-initconfig.in create mode 100755 ldap/admin/src/upgradeServer create mode 100644 ldap/include/avl.h create mode 100644 ldap/include/dblayer.h create mode 100644 ldap/include/disptmpl.h create mode 100644 ldap/include/ldaprot.h create mode 100644 ldap/include/portable.h create mode 100644 ldap/include/regex.h create mode 100644 ldap/include/srchpref.h create mode 100644 ldap/include/sysexits-compat.h create mode 100644 ldap/ldif/Ace.ldif create mode 100644 ldap/ldif/European.ldif create mode 100644 ldap/ldif/Eurosuffix.ldif create mode 100644 ldap/ldif/Example-roles.ldif create mode 100644 ldap/ldif/Example-views.ldif create mode 100644 ldap/ldif/Example.ldif create mode 100644 ldap/ldif/template-baseacis.ldif.in create mode 100644 ldap/ldif/template-country.ldif.in create mode 100644 ldap/ldif/template-domain.ldif.in create mode 100644 ldap/ldif/template-dse-minimal.ldif.in create mode 100644 ldap/ldif/template-dse.ldif.in create mode 100644 ldap/ldif/template-ldapi-autobind.ldif.in create mode 100644 ldap/ldif/template-ldapi-default.ldif.in create mode 100644 ldap/ldif/template-ldapi.ldif.in create mode 100644 ldap/ldif/template-locality.ldif.in create mode 100644 ldap/ldif/template-org.ldif.in create mode 100644 ldap/ldif/template-orgunit.ldif.in create mode 100644 ldap/ldif/template-sasl.ldif.in create mode 100644 ldap/ldif/template-state.ldif.in create mode 100644 ldap/ldif/template-suffix-db.ldif.in create mode 100644 ldap/ldif/template.ldif create mode 100644 ldap/libraries/libavl/avl.c create mode 100644 ldap/libraries/libavl/testavl.c create mode 100644 ldap/schema/00core.ldif create mode 100644 ldap/schema/01core389.ldif create mode 100644 ldap/schema/02common.ldif create mode 100644 ldap/schema/05rfc2927.ldif create mode 100644 ldap/schema/05rfc4523.ldif create mode 100644 ldap/schema/05rfc4524.ldif create mode 100644 ldap/schema/06inetorgperson.ldif create mode 100644 ldap/schema/10automember-plugin.ldif create mode 100644 ldap/schema/10dna-plugin.ldif create mode 100644 ldap/schema/10mep-plugin.ldif create mode 100644 ldap/schema/10presence.ldif create mode 100644 ldap/schema/10rfc2307.ldif create mode 100644 ldap/schema/10rfc2307bis.ldif create mode 100644 ldap/schema/20subscriber.ldif create mode 100644 ldap/schema/25java-object.ldif create mode 100644 ldap/schema/28pilot.ldif create mode 100644 ldap/schema/30ns-common.ldif create mode 100644 ldap/schema/50ns-admin.ldif create mode 100644 ldap/schema/50ns-certificate.ldif create mode 100644 ldap/schema/50ns-directory.ldif create mode 100644 ldap/schema/50ns-mail.ldif create mode 100644 ldap/schema/50ns-value.ldif create mode 100644 ldap/schema/50ns-web.ldif create mode 100644 ldap/schema/60acctpolicy.ldif create mode 100644 ldap/schema/60autofs.ldif create mode 100644 ldap/schema/60changelog.ldif create mode 100644 ldap/schema/60eduperson.ldif create mode 100644 ldap/schema/60inetmail.ldif create mode 100644 ldap/schema/60kerberos.ldif create mode 100644 ldap/schema/60krb5kdc.ldif create mode 100644 ldap/schema/60mozilla.ldif create mode 100644 ldap/schema/60nis.ldif create mode 100644 ldap/schema/60nss-ldap.ldif create mode 100644 ldap/schema/60pam-plugin.ldif create mode 100644 ldap/schema/60posix-winsync-plugin.ldif create mode 100644 ldap/schema/60pureftpd.ldif create mode 100644 ldap/schema/60qmail.ldif create mode 100644 ldap/schema/60radius.ldif create mode 100644 ldap/schema/60rfc2739.ldif create mode 100644 ldap/schema/60rfc3712.ldif create mode 100644 ldap/schema/60rfc4876.ldif create mode 100644 ldap/schema/60sabayon.ldif create mode 100644 ldap/schema/60samba.ldif create mode 100644 ldap/schema/60samba3.ldif create mode 100644 ldap/schema/60sendmail.ldif create mode 100644 ldap/schema/60sudo.ldif create mode 100644 ldap/schema/60trust.ldif create mode 100644 ldap/schema/99user.ldif create mode 100644 ldap/schema/slapd-collations.conf create mode 100644 ldap/servers/plugins/acct_usability/acct_usability.c create mode 100644 ldap/servers/plugins/acct_usability/acct_usability.h create mode 100644 ldap/servers/plugins/acctpolicy/acct_config.c create mode 100644 ldap/servers/plugins/acctpolicy/acct_init.c create mode 100644 ldap/servers/plugins/acctpolicy/acct_plugin.c create mode 100644 ldap/servers/plugins/acctpolicy/acct_util.c create mode 100644 ldap/servers/plugins/acctpolicy/acctpolicy.h create mode 100644 ldap/servers/plugins/acctpolicy/sampleconfig.ldif create mode 100644 ldap/servers/plugins/acctpolicy/samplepolicy.ldif create mode 100644 ldap/servers/plugins/acl/ACL-Notes create mode 100644 ldap/servers/plugins/acl/acl.c create mode 100644 ldap/servers/plugins/acl/acl.h create mode 100644 ldap/servers/plugins/acl/acl_ext.c create mode 100644 ldap/servers/plugins/acl/aclanom.c create mode 100644 ldap/servers/plugins/acl/acleffectiverights.c create mode 100644 ldap/servers/plugins/acl/aclgroup.c create mode 100644 ldap/servers/plugins/acl/aclinit.c create mode 100644 ldap/servers/plugins/acl/acllas.c create mode 100644 ldap/servers/plugins/acl/acllist.c create mode 100644 ldap/servers/plugins/acl/aclparse.c create mode 100644 ldap/servers/plugins/acl/aclplugin.c create mode 100644 ldap/servers/plugins/acl/aclutil.c create mode 100644 ldap/servers/plugins/addn/addn.c create mode 100644 ldap/servers/plugins/addn/addn.h create mode 100644 ldap/servers/plugins/automember/automember.c create mode 100644 ldap/servers/plugins/automember/automember.h create mode 100644 ldap/servers/plugins/bitwise/bitwise.c create mode 100644 ldap/servers/plugins/chainingdb/cb.h create mode 100644 ldap/servers/plugins/chainingdb/cb_abandon.c create mode 100644 ldap/servers/plugins/chainingdb/cb_acl.c create mode 100644 ldap/servers/plugins/chainingdb/cb_add.c create mode 100644 ldap/servers/plugins/chainingdb/cb_bind.c create mode 100644 ldap/servers/plugins/chainingdb/cb_cleanup.c create mode 100644 ldap/servers/plugins/chainingdb/cb_close.c create mode 100644 ldap/servers/plugins/chainingdb/cb_compare.c create mode 100644 ldap/servers/plugins/chainingdb/cb_config.c create mode 100644 ldap/servers/plugins/chainingdb/cb_conn_stateless.c create mode 100644 ldap/servers/plugins/chainingdb/cb_controls.c create mode 100644 ldap/servers/plugins/chainingdb/cb_debug.c create mode 100644 ldap/servers/plugins/chainingdb/cb_delete.c create mode 100644 ldap/servers/plugins/chainingdb/cb_init.c create mode 100644 ldap/servers/plugins/chainingdb/cb_instance.c create mode 100644 ldap/servers/plugins/chainingdb/cb_modify.c create mode 100644 ldap/servers/plugins/chainingdb/cb_modrdn.c create mode 100644 ldap/servers/plugins/chainingdb/cb_monitor.c create mode 100644 ldap/servers/plugins/chainingdb/cb_schema.c create mode 100644 ldap/servers/plugins/chainingdb/cb_search.c create mode 100644 ldap/servers/plugins/chainingdb/cb_size.c create mode 100644 ldap/servers/plugins/chainingdb/cb_start.c create mode 100644 ldap/servers/plugins/chainingdb/cb_temp.c create mode 100644 ldap/servers/plugins/chainingdb/cb_test.c create mode 100644 ldap/servers/plugins/chainingdb/cb_unbind.c create mode 100644 ldap/servers/plugins/chainingdb/cb_utils.c create mode 100644 ldap/servers/plugins/collation/collate.c create mode 100644 ldap/servers/plugins/collation/collate.h create mode 100644 ldap/servers/plugins/collation/config.c create mode 100644 ldap/servers/plugins/collation/config.h create mode 100644 ldap/servers/plugins/collation/debug.c create mode 100644 ldap/servers/plugins/collation/orfilter.c create mode 100644 ldap/servers/plugins/collation/orfilter.h create mode 100644 ldap/servers/plugins/cos/cos.c create mode 100644 ldap/servers/plugins/cos/cos_cache.c create mode 100644 ldap/servers/plugins/cos/cos_cache.h create mode 100644 ldap/servers/plugins/deref/deref.c create mode 100644 ldap/servers/plugins/deref/deref.h create mode 100644 ldap/servers/plugins/distrib/Makefile create mode 100644 ldap/servers/plugins/distrib/Makefile.HPUX create mode 100644 ldap/servers/plugins/distrib/Makefile.HPUX64 create mode 100644 ldap/servers/plugins/distrib/Makefile.Linux create mode 100644 ldap/servers/plugins/distrib/Makefile.SOLARIS create mode 100644 ldap/servers/plugins/distrib/Makefile.SOLARIS64 create mode 100644 ldap/servers/plugins/distrib/Makefile.SOLARISx86 create mode 100644 ldap/servers/plugins/distrib/README create mode 100644 ldap/servers/plugins/distrib/distrib.c create mode 100755 ldap/servers/plugins/dna/addentries.sh create mode 100755 ldap/servers/plugins/dna/config.sh create mode 100644 ldap/servers/plugins/dna/del_test_entries.dns create mode 100755 ldap/servers/plugins/dna/delentries.sh create mode 100644 ldap/servers/plugins/dna/dna.c create mode 100755 ldap/servers/plugins/dna/editentries.sh create mode 100755 ldap/servers/plugins/dna/oneentry.sh create mode 100644 ldap/servers/plugins/dna/posix.ldif create mode 100644 ldap/servers/plugins/dna/posix_one.ldif create mode 100644 ldap/servers/plugins/dna/posix_test.ldif create mode 100755 ldap/servers/plugins/dna/seeconfig.sh create mode 100755 ldap/servers/plugins/dna/seeentries.sh create mode 100644 ldap/servers/plugins/dna/subtest.ldif create mode 100644 ldap/servers/plugins/http/http_client.c create mode 100644 ldap/servers/plugins/http/http_client.h create mode 100644 ldap/servers/plugins/http/http_impl.c create mode 100644 ldap/servers/plugins/http/http_impl.h create mode 100644 ldap/servers/plugins/linkedattrs/fixup_task.c create mode 100644 ldap/servers/plugins/linkedattrs/linked_attrs.c create mode 100644 ldap/servers/plugins/linkedattrs/linked_attrs.h create mode 100644 ldap/servers/plugins/memberof/memberof.c create mode 100644 ldap/servers/plugins/memberof/memberof.h create mode 100644 ldap/servers/plugins/memberof/memberof_config.c create mode 100644 ldap/servers/plugins/mep/mep.c create mode 100644 ldap/servers/plugins/mep/mep.h create mode 100644 ldap/servers/plugins/pam_passthru/README create mode 100644 ldap/servers/plugins/pam_passthru/pam_passthru.h create mode 100644 ldap/servers/plugins/pam_passthru/pam_ptconfig.c create mode 100644 ldap/servers/plugins/pam_passthru/pam_ptdebug.c create mode 100644 ldap/servers/plugins/pam_passthru/pam_ptimpl.c create mode 100644 ldap/servers/plugins/pam_passthru/pam_ptpreop.c create mode 100644 ldap/servers/plugins/passthru/PT-Notes create mode 100644 ldap/servers/plugins/passthru/passthru.h create mode 100644 ldap/servers/plugins/passthru/ptbind.c create mode 100644 ldap/servers/plugins/passthru/ptconfig.c create mode 100644 ldap/servers/plugins/passthru/ptconn.c create mode 100644 ldap/servers/plugins/passthru/ptdebug.c create mode 100644 ldap/servers/plugins/passthru/ptpreop.c create mode 100644 ldap/servers/plugins/passthru/ptutil.c create mode 100644 ldap/servers/plugins/posix-winsync/README create mode 100644 ldap/servers/plugins/posix-winsync/posix-group-func.c create mode 100644 ldap/servers/plugins/posix-winsync/posix-group-func.h create mode 100644 ldap/servers/plugins/posix-winsync/posix-group-task.c create mode 100644 ldap/servers/plugins/posix-winsync/posix-winsync-config.c create mode 100644 ldap/servers/plugins/posix-winsync/posix-winsync.c create mode 100644 ldap/servers/plugins/posix-winsync/posix-wsp-ident.h create mode 100644 ldap/servers/plugins/presence/images/aim-offline.gif create mode 100644 ldap/servers/plugins/presence/images/aim-online.gif create mode 100644 ldap/servers/plugins/presence/images/icq-disabled.gif create mode 100644 ldap/servers/plugins/presence/images/icq-offline.gif create mode 100644 ldap/servers/plugins/presence/images/icq-online.gif create mode 100644 ldap/servers/plugins/presence/images/yahoo-offline.gif create mode 100644 ldap/servers/plugins/presence/images/yahoo-online.gif create mode 100644 ldap/servers/plugins/presence/presence.c create mode 100644 ldap/servers/plugins/presence/presence.ldif create mode 100644 ldap/servers/plugins/pwdstorage/clear_pwd.c create mode 100644 ldap/servers/plugins/pwdstorage/crypt_pwd.c create mode 100644 ldap/servers/plugins/pwdstorage/md5.h create mode 100644 ldap/servers/plugins/pwdstorage/md5_pwd.c create mode 100644 ldap/servers/plugins/pwdstorage/md5c.c create mode 100644 ldap/servers/plugins/pwdstorage/ns-mta-md5_pwd.bu create mode 100644 ldap/servers/plugins/pwdstorage/ns-mta-md5_pwd.c create mode 100644 ldap/servers/plugins/pwdstorage/pbkdf2_pwd.c create mode 100644 ldap/servers/plugins/pwdstorage/pwd_init.c create mode 100644 ldap/servers/plugins/pwdstorage/pwd_util.c create mode 100644 ldap/servers/plugins/pwdstorage/pwdstorage.h create mode 100644 ldap/servers/plugins/pwdstorage/sha_pwd.c create mode 100644 ldap/servers/plugins/pwdstorage/smd5_pwd.c create mode 100644 ldap/servers/plugins/pwdstorage/ssha_pwd.c create mode 100644 ldap/servers/plugins/referint/referint.c create mode 100644 ldap/servers/plugins/replication/cl5.h create mode 100644 ldap/servers/plugins/replication/cl5_api.c create mode 100644 ldap/servers/plugins/replication/cl5_api.h create mode 100644 ldap/servers/plugins/replication/cl5_clcache.c create mode 100644 ldap/servers/plugins/replication/cl5_clcache.h create mode 100644 ldap/servers/plugins/replication/cl5_config.c create mode 100644 ldap/servers/plugins/replication/cl5_init.c create mode 100644 ldap/servers/plugins/replication/cl5_test.c create mode 100644 ldap/servers/plugins/replication/cl5_test.h create mode 100644 ldap/servers/plugins/replication/cl_crypt.c create mode 100644 ldap/servers/plugins/replication/cl_crypt.h create mode 100644 ldap/servers/plugins/replication/csnpl.c create mode 100644 ldap/servers/plugins/replication/csnpl.h create mode 100644 ldap/servers/plugins/replication/llist.c create mode 100644 ldap/servers/plugins/replication/llist.h create mode 100644 ldap/servers/plugins/replication/profile.c create mode 100644 ldap/servers/plugins/replication/repl-session-plugin.h create mode 100644 ldap/servers/plugins/replication/repl5.h create mode 100644 ldap/servers/plugins/replication/repl5_agmt.c create mode 100644 ldap/servers/plugins/replication/repl5_agmtlist.c create mode 100644 ldap/servers/plugins/replication/repl5_backoff.c create mode 100644 ldap/servers/plugins/replication/repl5_connection.c create mode 100644 ldap/servers/plugins/replication/repl5_inc_protocol.c create mode 100644 ldap/servers/plugins/replication/repl5_init.c create mode 100644 ldap/servers/plugins/replication/repl5_mtnode_ext.c create mode 100644 ldap/servers/plugins/replication/repl5_plugins.c create mode 100644 ldap/servers/plugins/replication/repl5_prot_private.h create mode 100644 ldap/servers/plugins/replication/repl5_protocol.c create mode 100644 ldap/servers/plugins/replication/repl5_protocol_util.c create mode 100644 ldap/servers/plugins/replication/repl5_replica.c create mode 100644 ldap/servers/plugins/replication/repl5_replica_config.c create mode 100644 ldap/servers/plugins/replication/repl5_replica_dnhash.c create mode 100644 ldap/servers/plugins/replication/repl5_replica_hash.c create mode 100644 ldap/servers/plugins/replication/repl5_replsupplier.c create mode 100644 ldap/servers/plugins/replication/repl5_ruv.c create mode 100644 ldap/servers/plugins/replication/repl5_ruv.h create mode 100644 ldap/servers/plugins/replication/repl5_schedule.c create mode 100644 ldap/servers/plugins/replication/repl5_tot_protocol.c create mode 100644 ldap/servers/plugins/replication/repl5_total.c create mode 100644 ldap/servers/plugins/replication/repl5_updatedn_list.c create mode 100644 ldap/servers/plugins/replication/repl_connext.c create mode 100644 ldap/servers/plugins/replication/repl_controls.c create mode 100644 ldap/servers/plugins/replication/repl_ext.c create mode 100644 ldap/servers/plugins/replication/repl_extop.c create mode 100644 ldap/servers/plugins/replication/repl_globals.c create mode 100644 ldap/servers/plugins/replication/repl_helper.c create mode 100644 ldap/servers/plugins/replication/repl_helper.h create mode 100644 ldap/servers/plugins/replication/repl_opext.c create mode 100644 ldap/servers/plugins/replication/repl_session_plugin.c create mode 100644 ldap/servers/plugins/replication/repl_shared.h create mode 100644 ldap/servers/plugins/replication/replutil.c create mode 100644 ldap/servers/plugins/replication/test_repl_session_plugin.c create mode 100644 ldap/servers/plugins/replication/tests/dnp_sim.c create mode 100644 ldap/servers/plugins/replication/tests/dnp_sim2.c create mode 100644 ldap/servers/plugins/replication/tests/dnp_sim3.c create mode 100755 ldap/servers/plugins/replication/tests/makesim create mode 100644 ldap/servers/plugins/replication/urp.c create mode 100644 ldap/servers/plugins/replication/urp.h create mode 100644 ldap/servers/plugins/replication/urp_glue.c create mode 100644 ldap/servers/plugins/replication/urp_tombstone.c create mode 100644 ldap/servers/plugins/replication/windows_connection.c create mode 100644 ldap/servers/plugins/replication/windows_inc_protocol.c create mode 100644 ldap/servers/plugins/replication/windows_private.c create mode 100644 ldap/servers/plugins/replication/windows_prot_private.h create mode 100644 ldap/servers/plugins/replication/windows_protocol_util.c create mode 100644 ldap/servers/plugins/replication/windows_tot_protocol.c create mode 100644 ldap/servers/plugins/replication/windowsrepl.h create mode 100644 ldap/servers/plugins/replication/winsync-plugin.h create mode 100644 ldap/servers/plugins/retrocl/linktest.c create mode 100644 ldap/servers/plugins/retrocl/retrocl.c create mode 100644 ldap/servers/plugins/retrocl/retrocl.h create mode 100644 ldap/servers/plugins/retrocl/retrocl.txt create mode 100644 ldap/servers/plugins/retrocl/retrocl_cn.c create mode 100644 ldap/servers/plugins/retrocl/retrocl_create.c create mode 100644 ldap/servers/plugins/retrocl/retrocl_po.c create mode 100644 ldap/servers/plugins/retrocl/retrocl_rootdse.c create mode 100644 ldap/servers/plugins/retrocl/retrocl_trim.c create mode 100644 ldap/servers/plugins/rever/pbe.c create mode 100644 ldap/servers/plugins/rever/rever.c create mode 100644 ldap/servers/plugins/rever/rever.h create mode 100644 ldap/servers/plugins/roles/roles_cache.c create mode 100644 ldap/servers/plugins/roles/roles_cache.h create mode 100644 ldap/servers/plugins/roles/roles_plugin.c create mode 100644 ldap/servers/plugins/rootdn_access/rootdn_access.c create mode 100644 ldap/servers/plugins/rootdn_access/rootdn_access.h create mode 100644 ldap/servers/plugins/schema_reload/schema_reload.c create mode 100644 ldap/servers/plugins/statechange/statechange.c create mode 100644 ldap/servers/plugins/sync/sync.h create mode 100644 ldap/servers/plugins/sync/sync_init.c create mode 100644 ldap/servers/plugins/sync/sync_persist.c create mode 100644 ldap/servers/plugins/sync/sync_refresh.c create mode 100644 ldap/servers/plugins/sync/sync_util.c create mode 100644 ldap/servers/plugins/syntaxes/bin.c create mode 100644 ldap/servers/plugins/syntaxes/bitstring.c create mode 100644 ldap/servers/plugins/syntaxes/ces.c create mode 100644 ldap/servers/plugins/syntaxes/cis.c create mode 100644 ldap/servers/plugins/syntaxes/debug.c create mode 100644 ldap/servers/plugins/syntaxes/deliverymethod.c create mode 100644 ldap/servers/plugins/syntaxes/dn.c create mode 100644 ldap/servers/plugins/syntaxes/facsimile.c create mode 100644 ldap/servers/plugins/syntaxes/guide.c create mode 100644 ldap/servers/plugins/syntaxes/int.c create mode 100644 ldap/servers/plugins/syntaxes/nameoptuid.c create mode 100644 ldap/servers/plugins/syntaxes/numericstring.c create mode 100644 ldap/servers/plugins/syntaxes/phonetic.c create mode 100644 ldap/servers/plugins/syntaxes/sicis.c create mode 100644 ldap/servers/plugins/syntaxes/string.c create mode 100644 ldap/servers/plugins/syntaxes/syntax.h create mode 100644 ldap/servers/plugins/syntaxes/syntax_common.c create mode 100644 ldap/servers/plugins/syntaxes/tel.c create mode 100644 ldap/servers/plugins/syntaxes/teletex.c create mode 100644 ldap/servers/plugins/syntaxes/telex.c create mode 100644 ldap/servers/plugins/syntaxes/validate.c create mode 100644 ldap/servers/plugins/syntaxes/validate_task.c create mode 100644 ldap/servers/plugins/syntaxes/value.c create mode 100644 ldap/servers/plugins/uiduniq/7bit.c create mode 100644 ldap/servers/plugins/uiduniq/UID-Notes create mode 100644 ldap/servers/plugins/uiduniq/plugin-utils.h create mode 100644 ldap/servers/plugins/uiduniq/uid.c create mode 100644 ldap/servers/plugins/uiduniq/utils.c create mode 100644 ldap/servers/plugins/usn/usn.c create mode 100644 ldap/servers/plugins/usn/usn.h create mode 100644 ldap/servers/plugins/usn/usn_cleanup.c create mode 100644 ldap/servers/plugins/vattrsp_template/vattrsp.c create mode 100644 ldap/servers/plugins/views/views.c create mode 100644 ldap/servers/plugins/whoami/whoami.c create mode 100644 ldap/servers/slapd/abandon.c create mode 100644 ldap/servers/slapd/add.c create mode 100644 ldap/servers/slapd/agtmmap.c create mode 100644 ldap/servers/slapd/agtmmap.h create mode 100644 ldap/servers/slapd/apibroker.c create mode 100644 ldap/servers/slapd/attr.c create mode 100644 ldap/servers/slapd/attrlist.c create mode 100644 ldap/servers/slapd/attrsyntax.c create mode 100644 ldap/servers/slapd/auditlog.c create mode 100644 ldap/servers/slapd/auth.c create mode 100644 ldap/servers/slapd/auth.h create mode 100644 ldap/servers/slapd/ava.c create mode 100644 ldap/servers/slapd/back-ldbm/ancestorid.c create mode 100644 ldap/servers/slapd/back-ldbm/archive.c create mode 100644 ldap/servers/slapd/back-ldbm/attrcrypt.h create mode 100644 ldap/servers/slapd/back-ldbm/back-ldbm.h create mode 100644 ldap/servers/slapd/back-ldbm/backentry.c create mode 100644 ldap/servers/slapd/back-ldbm/cache.c create mode 100644 ldap/servers/slapd/back-ldbm/cleanup.c create mode 100644 ldap/servers/slapd/back-ldbm/close.c create mode 100644 ldap/servers/slapd/back-ldbm/db-bdb/bdb_config.c create mode 100644 ldap/servers/slapd/back-ldbm/db-bdb/bdb_import.c create mode 100644 ldap/servers/slapd/back-ldbm/db-bdb/bdb_import_threads.c create mode 100644 ldap/servers/slapd/back-ldbm/db-bdb/bdb_instance_config.c create mode 100644 ldap/servers/slapd/back-ldbm/db-bdb/bdb_layer.c create mode 100644 ldap/servers/slapd/back-ldbm/db-bdb/bdb_layer.h create mode 100644 ldap/servers/slapd/back-ldbm/db-bdb/bdb_ldif2db.c create mode 100644 ldap/servers/slapd/back-ldbm/db-bdb/bdb_misc.c create mode 100644 ldap/servers/slapd/back-ldbm/db-bdb/bdb_monitor.c create mode 100644 ldap/servers/slapd/back-ldbm/db-bdb/bdb_upgrade.c create mode 100644 ldap/servers/slapd/back-ldbm/db-bdb/bdb_verify.c create mode 100644 ldap/servers/slapd/back-ldbm/db-bdb/bdb_version.c create mode 100644 ldap/servers/slapd/back-ldbm/dblayer.c create mode 100644 ldap/servers/slapd/back-ldbm/dblayer.h create mode 100644 ldap/servers/slapd/back-ldbm/dbsize.c create mode 100644 ldap/servers/slapd/back-ldbm/dbverify.c create mode 100644 ldap/servers/slapd/back-ldbm/dn2entry.c create mode 100644 ldap/servers/slapd/back-ldbm/entrystore.c create mode 100644 ldap/servers/slapd/back-ldbm/filterindex.c create mode 100644 ldap/servers/slapd/back-ldbm/findentry.c create mode 100644 ldap/servers/slapd/back-ldbm/haschildren.c create mode 100644 ldap/servers/slapd/back-ldbm/id2entry.c create mode 100644 ldap/servers/slapd/back-ldbm/idl.c create mode 100644 ldap/servers/slapd/back-ldbm/idl_common.c create mode 100644 ldap/servers/slapd/back-ldbm/idl_new.c create mode 100644 ldap/servers/slapd/back-ldbm/idl_set.c create mode 100644 ldap/servers/slapd/back-ldbm/idl_shim.c create mode 100644 ldap/servers/slapd/back-ldbm/import.c create mode 100644 ldap/servers/slapd/back-ldbm/import.h create mode 100644 ldap/servers/slapd/back-ldbm/index.c create mode 100644 ldap/servers/slapd/back-ldbm/init.c create mode 100644 ldap/servers/slapd/back-ldbm/instance.c create mode 100644 ldap/servers/slapd/back-ldbm/ldbm_abandon.c create mode 100644 ldap/servers/slapd/back-ldbm/ldbm_add.c create mode 100644 ldap/servers/slapd/back-ldbm/ldbm_attr.c create mode 100644 ldap/servers/slapd/back-ldbm/ldbm_attrcrypt.c create mode 100644 ldap/servers/slapd/back-ldbm/ldbm_attrcrypt_config.c create mode 100644 ldap/servers/slapd/back-ldbm/ldbm_bind.c create mode 100644 ldap/servers/slapd/back-ldbm/ldbm_compare.c create mode 100644 ldap/servers/slapd/back-ldbm/ldbm_config.c create mode 100644 ldap/servers/slapd/back-ldbm/ldbm_config.h create mode 100644 ldap/servers/slapd/back-ldbm/ldbm_delete.c create mode 100644 ldap/servers/slapd/back-ldbm/ldbm_entryrdn.c create mode 100644 ldap/servers/slapd/back-ldbm/ldbm_index_config.c create mode 100644 ldap/servers/slapd/back-ldbm/ldbm_instance_config.c create mode 100644 ldap/servers/slapd/back-ldbm/ldbm_modify.c create mode 100644 ldap/servers/slapd/back-ldbm/ldbm_modrdn.c create mode 100644 ldap/servers/slapd/back-ldbm/ldbm_search.c create mode 100644 ldap/servers/slapd/back-ldbm/ldbm_unbind.c create mode 100644 ldap/servers/slapd/back-ldbm/ldbm_usn.c create mode 100644 ldap/servers/slapd/back-ldbm/ldif2ldbm.c create mode 100644 ldap/servers/slapd/back-ldbm/matchrule.c create mode 100644 ldap/servers/slapd/back-ldbm/misc.c create mode 100644 ldap/servers/slapd/back-ldbm/nextid.c create mode 100644 ldap/servers/slapd/back-ldbm/parents.c create mode 100644 ldap/servers/slapd/back-ldbm/perfctrs.c create mode 100644 ldap/servers/slapd/back-ldbm/perfctrs.h create mode 100644 ldap/servers/slapd/back-ldbm/proto-back-ldbm.h create mode 100644 ldap/servers/slapd/back-ldbm/rmdb.c create mode 100644 ldap/servers/slapd/back-ldbm/seq.c create mode 100644 ldap/servers/slapd/back-ldbm/sort.c create mode 100644 ldap/servers/slapd/back-ldbm/start.c create mode 100644 ldap/servers/slapd/back-ldbm/tools/index_dump/index_dump.c create mode 100644 ldap/servers/slapd/back-ldbm/uniqueid2entry.c create mode 100644 ldap/servers/slapd/back-ldbm/vlv.c create mode 100644 ldap/servers/slapd/back-ldbm/vlv_key.c create mode 100644 ldap/servers/slapd/back-ldbm/vlv_key.h create mode 100644 ldap/servers/slapd/back-ldbm/vlv_srch.c create mode 100644 ldap/servers/slapd/back-ldbm/vlv_srch.h create mode 100644 ldap/servers/slapd/backend.c create mode 100644 ldap/servers/slapd/backend_manager.c create mode 100644 ldap/servers/slapd/bind.c create mode 100644 ldap/servers/slapd/bitset.c create mode 100644 ldap/servers/slapd/bulk_import.c create mode 100644 ldap/servers/slapd/ch_malloc.c create mode 100644 ldap/servers/slapd/charray.c create mode 100644 ldap/servers/slapd/compare.c create mode 100644 ldap/servers/slapd/computed.c create mode 100644 ldap/servers/slapd/config.c create mode 100644 ldap/servers/slapd/configdse.c create mode 100644 ldap/servers/slapd/connection.c create mode 100644 ldap/servers/slapd/conntable.c create mode 100644 ldap/servers/slapd/control.c create mode 100644 ldap/servers/slapd/counters.c create mode 100644 ldap/servers/slapd/csn.c create mode 100644 ldap/servers/slapd/csngen.c create mode 100644 ldap/servers/slapd/csngen.h create mode 100644 ldap/servers/slapd/csnset.c create mode 100644 ldap/servers/slapd/daemon.c create mode 100644 ldap/servers/slapd/defbackend.c create mode 100644 ldap/servers/slapd/delete.c create mode 100644 ldap/servers/slapd/detach.c create mode 100644 ldap/servers/slapd/disconnect_error_strings.h create mode 100644 ldap/servers/slapd/disconnect_errors.h create mode 100644 ldap/servers/slapd/dl.c create mode 100644 ldap/servers/slapd/dn.c create mode 100644 ldap/servers/slapd/dse.c create mode 100644 ldap/servers/slapd/dynalib.c create mode 100644 ldap/servers/slapd/entry.c create mode 100644 ldap/servers/slapd/entrywsi.c create mode 100644 ldap/servers/slapd/errormap.c create mode 100644 ldap/servers/slapd/eventq.c create mode 100644 ldap/servers/slapd/extendop.c create mode 100644 ldap/servers/slapd/factory.c create mode 100644 ldap/servers/slapd/fe.h create mode 100644 ldap/servers/slapd/features.c create mode 100644 ldap/servers/slapd/fedse.c create mode 100644 ldap/servers/slapd/fileio.c create mode 100644 ldap/servers/slapd/filter.c create mode 100644 ldap/servers/slapd/filter.h create mode 100644 ldap/servers/slapd/filtercmp.c create mode 100644 ldap/servers/slapd/filterentry.c create mode 100644 ldap/servers/slapd/generation.c create mode 100644 ldap/servers/slapd/getfilelist.c create mode 100644 ldap/servers/slapd/getopt_ext.c create mode 100644 ldap/servers/slapd/getopt_ext.h create mode 100644 ldap/servers/slapd/getsocketpeer.c create mode 100644 ldap/servers/slapd/getsocketpeer.h create mode 100644 ldap/servers/slapd/globals.c create mode 100644 ldap/servers/slapd/house.c create mode 100644 ldap/servers/slapd/http.h create mode 100644 ldap/servers/slapd/init.c create mode 100644 ldap/servers/slapd/intrinsics.h create mode 100644 ldap/servers/slapd/ldaputil.c create mode 100644 ldap/servers/slapd/ldbmlinktest.c create mode 100644 ldap/servers/slapd/lenstr.c create mode 100644 ldap/servers/slapd/libglobs.c create mode 100644 ldap/servers/slapd/libmakefile create mode 100644 ldap/servers/slapd/listConfigAttrs.pl create mode 100644 ldap/servers/slapd/localhost.c create mode 100644 ldap/servers/slapd/lock.c create mode 100644 ldap/servers/slapd/log.c create mode 100644 ldap/servers/slapd/log.h create mode 100644 ldap/servers/slapd/main.c create mode 100644 ldap/servers/slapd/mapping_tree.c create mode 100644 ldap/servers/slapd/match.c create mode 100755 ldap/servers/slapd/mkDBErrStrs.pl create mode 100644 ldap/servers/slapd/modify.c create mode 100644 ldap/servers/slapd/modrdn.c create mode 100644 ldap/servers/slapd/modutil.c create mode 100644 ldap/servers/slapd/monitor.c create mode 100644 ldap/servers/slapd/object.c create mode 100644 ldap/servers/slapd/objset.c create mode 100644 ldap/servers/slapd/openldapber.h create mode 100644 ldap/servers/slapd/operation.c create mode 100644 ldap/servers/slapd/opshared.c create mode 100644 ldap/servers/slapd/pagedresults.c create mode 100644 ldap/servers/slapd/passwd_extop.c create mode 100644 ldap/servers/slapd/pblock.c create mode 100644 ldap/servers/slapd/pblock_v3.h create mode 100644 ldap/servers/slapd/plugin.c create mode 100644 ldap/servers/slapd/plugin_acl.c create mode 100644 ldap/servers/slapd/plugin_internal_op.c create mode 100644 ldap/servers/slapd/plugin_mmr.c create mode 100644 ldap/servers/slapd/plugin_mr.c create mode 100644 ldap/servers/slapd/plugin_role.c create mode 100644 ldap/servers/slapd/plugin_syntax.c create mode 100644 ldap/servers/slapd/poll_using_select.c create mode 100644 ldap/servers/slapd/poll_using_select.h create mode 100644 ldap/servers/slapd/prerrstrs.h create mode 100644 ldap/servers/slapd/protect_db.c create mode 100644 ldap/servers/slapd/protect_db.h create mode 100644 ldap/servers/slapd/proto-slap.h create mode 100644 ldap/servers/slapd/proxyauth.c create mode 100644 ldap/servers/slapd/psearch.c create mode 100644 ldap/servers/slapd/pw.c create mode 100644 ldap/servers/slapd/pw.h create mode 100644 ldap/servers/slapd/pw_mgmt.c create mode 100644 ldap/servers/slapd/pw_retry.c create mode 100644 ldap/servers/slapd/pw_verify.c create mode 100644 ldap/servers/slapd/pw_verify.h create mode 100644 ldap/servers/slapd/rdn.c create mode 100644 ldap/servers/slapd/referral.c create mode 100644 ldap/servers/slapd/regex.c create mode 100644 ldap/servers/slapd/resourcelimit.c create mode 100644 ldap/servers/slapd/result.c create mode 100644 ldap/servers/slapd/rootdse.c create mode 100644 ldap/servers/slapd/sasl_io.c create mode 100644 ldap/servers/slapd/sasl_map.c create mode 100644 ldap/servers/slapd/saslbind.c create mode 100644 ldap/servers/slapd/schema.c create mode 100644 ldap/servers/slapd/schemaparse.c create mode 100644 ldap/servers/slapd/search.c create mode 100644 ldap/servers/slapd/secerrstrs.h create mode 100644 ldap/servers/slapd/security_wrappers.c create mode 100644 ldap/servers/slapd/slap.h create mode 100644 ldap/servers/slapd/slapd.lite.key create mode 100644 ldap/servers/slapd/slapd.normal.key create mode 100644 ldap/servers/slapd/slapd_plhash.c create mode 100644 ldap/servers/slapd/slapi-plugin-compat4.h create mode 100644 ldap/servers/slapd/slapi-plugin.h create mode 100644 ldap/servers/slapd/slapi-private.h create mode 100644 ldap/servers/slapd/slapi2nspr.c create mode 100644 ldap/servers/slapd/slapi_counter.c create mode 100644 ldap/servers/slapd/slapi_pal.c create mode 100644 ldap/servers/slapd/slapi_pal.h create mode 100644 ldap/servers/slapd/snmp_collator.c create mode 100644 ldap/servers/slapd/snmp_collator.h create mode 100644 ldap/servers/slapd/snoop.c create mode 100644 ldap/servers/slapd/sort.c create mode 100644 ldap/servers/slapd/ssl.c create mode 100644 ldap/servers/slapd/sslerrstrs.h create mode 100644 ldap/servers/slapd/start_tls_extop.c create mode 100644 ldap/servers/slapd/statechange.h create mode 100644 ldap/servers/slapd/str2filter.c create mode 100644 ldap/servers/slapd/strdup.c create mode 100644 ldap/servers/slapd/stubrepl.c create mode 100644 ldap/servers/slapd/stubs.c create mode 100644 ldap/servers/slapd/subentry.c create mode 100644 ldap/servers/slapd/task.c create mode 100644 ldap/servers/slapd/tempnam.c create mode 100644 ldap/servers/slapd/test-plugins/Makefile create mode 100644 ldap/servers/slapd/test-plugins/Makefile.AIX create mode 100644 ldap/servers/slapd/test-plugins/Makefile.BSDI create mode 100644 ldap/servers/slapd/test-plugins/Makefile.HPUX create mode 100644 ldap/servers/slapd/test-plugins/Makefile.HPUX64 create mode 100644 ldap/servers/slapd/test-plugins/Makefile.IRIX create mode 100644 ldap/servers/slapd/test-plugins/Makefile.Linux create mode 100644 ldap/servers/slapd/test-plugins/Makefile.OSF1 create mode 100644 ldap/servers/slapd/test-plugins/Makefile.ReliantUNIX create mode 100644 ldap/servers/slapd/test-plugins/Makefile.SOLARIS create mode 100644 ldap/servers/slapd/test-plugins/Makefile.SOLARIS64 create mode 100644 ldap/servers/slapd/test-plugins/Makefile.SOLARISx86 create mode 100644 ldap/servers/slapd/test-plugins/Makefile.UnixWare create mode 100644 ldap/servers/slapd/test-plugins/Makefile.UnixWareUDK create mode 100644 ldap/servers/slapd/test-plugins/Makefile.server create mode 100644 ldap/servers/slapd/test-plugins/README create mode 100644 ldap/servers/slapd/test-plugins/clients/README create mode 100644 ldap/servers/slapd/test-plugins/clients/ReqExtOp.java create mode 100644 ldap/servers/slapd/test-plugins/clients/reqextop.c create mode 100755 ldap/servers/slapd/test-plugins/installDse.pl create mode 100644 ldap/servers/slapd/test-plugins/nicknames create mode 100644 ldap/servers/slapd/test-plugins/sampletask.c create mode 100644 ldap/servers/slapd/test-plugins/testbind.c create mode 100644 ldap/servers/slapd/test-plugins/testdatainterop.c create mode 100644 ldap/servers/slapd/test-plugins/testdbinterop.c create mode 100644 ldap/servers/slapd/test-plugins/testdbinterop.h create mode 100644 ldap/servers/slapd/test-plugins/testentry.c create mode 100644 ldap/servers/slapd/test-plugins/testextendedop.c create mode 100644 ldap/servers/slapd/test-plugins/testgetip.c create mode 100644 ldap/servers/slapd/test-plugins/testpostop.c create mode 100644 ldap/servers/slapd/test-plugins/testpreop.c create mode 100644 ldap/servers/slapd/test-plugins/testsaslbind.c create mode 100644 ldap/servers/slapd/thread_data.c create mode 100644 ldap/servers/slapd/time.c create mode 100644 ldap/servers/slapd/tools/dbscan.c create mode 100644 ldap/servers/slapd/tools/eggencode.c create mode 100644 ldap/servers/slapd/tools/ldaptool-sasl.c create mode 100644 ldap/servers/slapd/tools/ldaptool-sasl.h create mode 100644 ldap/servers/slapd/tools/ldaptool.h create mode 100644 ldap/servers/slapd/tools/ldclt/README create mode 100644 ldap/servers/slapd/tools/ldclt/data.c create mode 100644 ldap/servers/slapd/tools/ldclt/examples/001/add.ksh create mode 100644 ldap/servers/slapd/tools/ldclt/examples/001/add_incr.ksh create mode 100644 ldap/servers/slapd/tools/ldclt/examples/001/config.ksh create mode 100644 ldap/servers/slapd/tools/ldclt/examples/001/delete.ksh create mode 100644 ldap/servers/slapd/tools/ldclt/examples/001/env.ksh create mode 100644 ldap/servers/slapd/tools/ldclt/examples/001/search.ksh create mode 100644 ldap/servers/slapd/tools/ldclt/examples/002/add.ksh create mode 100644 ldap/servers/slapd/tools/ldclt/examples/002/config.ksh create mode 100644 ldap/servers/slapd/tools/ldclt/examples/002/env.ksh create mode 100644 ldap/servers/slapd/tools/ldclt/examples/002/ldif01.ksh create mode 100644 ldap/servers/slapd/tools/ldclt/examples/002/ldif02.ksh create mode 100644 ldap/servers/slapd/tools/ldclt/examples/002/ldif03.ksh create mode 100644 ldap/servers/slapd/tools/ldclt/examples/002/ofile create mode 100644 ldap/servers/slapd/tools/ldclt/examples/README create mode 100644 ldap/servers/slapd/tools/ldclt/ldap-private.h create mode 100644 ldap/servers/slapd/tools/ldclt/ldapfct.c create mode 100644 ldap/servers/slapd/tools/ldclt/ldclt.c create mode 100644 ldap/servers/slapd/tools/ldclt/ldclt.h create mode 100644 ldap/servers/slapd/tools/ldclt/ldclt.man create mode 100644 ldap/servers/slapd/tools/ldclt/ldclt.use create mode 100644 ldap/servers/slapd/tools/ldclt/ldcltU.c create mode 100644 ldap/servers/slapd/tools/ldclt/opCheck.c create mode 100644 ldap/servers/slapd/tools/ldclt/parser.c create mode 100644 ldap/servers/slapd/tools/ldclt/port.c create mode 100644 ldap/servers/slapd/tools/ldclt/port.h create mode 100644 ldap/servers/slapd/tools/ldclt/remote.h create mode 100644 ldap/servers/slapd/tools/ldclt/repcheck.c create mode 100644 ldap/servers/slapd/tools/ldclt/repslave.c create mode 100644 ldap/servers/slapd/tools/ldclt/scalab01.c create mode 100644 ldap/servers/slapd/tools/ldclt/scalab01.h create mode 100644 ldap/servers/slapd/tools/ldclt/srv.c create mode 100644 ldap/servers/slapd/tools/ldclt/threadMain.c create mode 100644 ldap/servers/slapd/tools/ldclt/utils.c create mode 100644 ldap/servers/slapd/tools/ldclt/utils.h create mode 100644 ldap/servers/slapd/tools/ldclt/version.c create mode 100644 ldap/servers/slapd/tools/ldclt/workarounds.c create mode 100644 ldap/servers/slapd/tools/ldif.c create mode 100644 ldap/servers/slapd/tools/migratecred.c create mode 100644 ldap/servers/slapd/tools/mkdep.c create mode 100644 ldap/servers/slapd/tools/mmldif.c create mode 100644 ldap/servers/slapd/tools/pwenc.c create mode 100644 ldap/servers/slapd/tools/rsearch/addthread.c create mode 100644 ldap/servers/slapd/tools/rsearch/addthread.h create mode 100644 ldap/servers/slapd/tools/rsearch/infadd.c create mode 100644 ldap/servers/slapd/tools/rsearch/infadd.h create mode 100644 ldap/servers/slapd/tools/rsearch/main.c create mode 100644 ldap/servers/slapd/tools/rsearch/nametable.c create mode 100644 ldap/servers/slapd/tools/rsearch/nametable.h create mode 100644 ldap/servers/slapd/tools/rsearch/rsearch.c create mode 100644 ldap/servers/slapd/tools/rsearch/rsearch.h create mode 100644 ldap/servers/slapd/tools/rsearch/scripts/dbgen-FamilyNames create mode 100644 ldap/servers/slapd/tools/rsearch/scripts/dbgen-GivenNames create mode 100644 ldap/servers/slapd/tools/rsearch/scripts/dbgen-OrgUnits create mode 100755 ldap/servers/slapd/tools/rsearch/scripts/dbgen.pl.in create mode 100644 ldap/servers/slapd/tools/rsearch/sdattable.c create mode 100644 ldap/servers/slapd/tools/rsearch/sdattable.h create mode 100644 ldap/servers/slapd/tools/rsearch/searchthread.c create mode 100644 ldap/servers/slapd/tools/rsearch/searchthread.h create mode 100644 ldap/servers/slapd/unbind.c create mode 100644 ldap/servers/slapd/uniqueid.c create mode 100644 ldap/servers/slapd/uniqueidgen.c create mode 100644 ldap/servers/slapd/utf8.c create mode 100644 ldap/servers/slapd/utf8compare.c create mode 100644 ldap/servers/slapd/util.c create mode 100644 ldap/servers/slapd/uuid.c create mode 100644 ldap/servers/slapd/uuid.h create mode 100644 ldap/servers/slapd/value.c create mode 100644 ldap/servers/slapd/valueset.c create mode 100644 ldap/servers/slapd/vattr.c create mode 100644 ldap/servers/slapd/vattr_spi.h create mode 100644 ldap/servers/slapd/views.h create mode 100644 ldap/servers/snmp/ldap-agent.c create mode 100644 ldap/servers/snmp/ldap-agent.conf.in create mode 100644 ldap/servers/snmp/ldap-agent.h create mode 100644 ldap/servers/snmp/main.c create mode 100644 ldap/servers/snmp/redhat-directory.mib create mode 100644 ldap/systools/README create mode 100755 ldap/systools/getHPPatches.pl create mode 100755 ldap/systools/getSolPatches.pl create mode 100644 ldap/systools/hp_patches.c create mode 100755 ldap/systools/mergeSolPatches.pl create mode 100644 ldap/systools/pio.h create mode 100644 ldap/systools/sol_patches.c create mode 100644 ldap/systools/viewcore.c create mode 100644 lib/base/.cvsignore create mode 100644 lib/base/crit.cpp create mode 100644 lib/base/dnsdmain.cpp create mode 100644 lib/base/ereport.cpp create mode 100644 lib/base/file.cpp create mode 100644 lib/base/fsmutex.cpp create mode 100644 lib/base/lexer_pvt.h create mode 100644 lib/base/nscperror.c create mode 100644 lib/base/plist.cpp create mode 100644 lib/base/plist_pvt.h create mode 100644 lib/base/pool.cpp create mode 100644 lib/base/shexp.cpp create mode 100644 lib/base/system.cpp create mode 100644 lib/base/systhr.cpp create mode 100644 lib/base/util.cpp create mode 100644 lib/ldaputil/.cvsignore create mode 100644 lib/ldaputil/cert.c create mode 100644 lib/ldaputil/certmap.c create mode 100644 lib/ldaputil/certmap.conf create mode 100644 lib/ldaputil/dbconf.c create mode 100644 lib/ldaputil/encode.c create mode 100644 lib/ldaputil/errors.c create mode 100644 lib/ldaputil/examples/Makefile create mode 100644 lib/ldaputil/examples/README create mode 100644 lib/ldaputil/examples/init.c create mode 100644 lib/ldaputil/examples/plugin.c create mode 100644 lib/ldaputil/examples/plugin.h create mode 100644 lib/ldaputil/init.c create mode 100644 lib/ldaputil/ldapauth.c create mode 100644 lib/ldaputil/ldapu-changes.html create mode 100644 lib/ldaputil/ldaputili.h create mode 100644 lib/ldaputil/vtable.c create mode 100644 lib/libaccess/.cvsignore create mode 100644 lib/libaccess/access_plhash.cpp create mode 100644 lib/libaccess/access_plhash.h create mode 100644 lib/libaccess/acl.tab.cpp create mode 100644 lib/libaccess/acl.tab.h create mode 100644 lib/libaccess/acl.yy.cpp create mode 100644 lib/libaccess/aclcache.cpp create mode 100644 lib/libaccess/aclcache.h create mode 100644 lib/libaccess/aclerror.cpp create mode 100644 lib/libaccess/acleval.cpp create mode 100644 lib/libaccess/aclflush.cpp create mode 100644 lib/libaccess/aclpriv.h create mode 100644 lib/libaccess/aclscan.h create mode 100644 lib/libaccess/aclscan.l create mode 100644 lib/libaccess/aclspace.cpp create mode 100644 lib/libaccess/acltext.y create mode 100644 lib/libaccess/acltools.cpp create mode 100644 lib/libaccess/aclutil.cpp create mode 100644 lib/libaccess/aclutil.h create mode 100644 lib/libaccess/authdb.cpp create mode 100644 lib/libaccess/las.h create mode 100644 lib/libaccess/lasdns.cpp create mode 100644 lib/libaccess/lasdns.h create mode 100644 lib/libaccess/lasgroup.cpp create mode 100644 lib/libaccess/lasip.cpp create mode 100644 lib/libaccess/lasip.h create mode 100644 lib/libaccess/lastod.cpp create mode 100644 lib/libaccess/lasuser.cpp create mode 100644 lib/libaccess/ldapauth.h create mode 100644 lib/libaccess/method.cpp create mode 100644 lib/libaccess/nsautherr.cpp create mode 100644 lib/libaccess/nseframe.cpp create mode 100644 lib/libaccess/oneeval.cpp create mode 100644 lib/libaccess/oneeval.h create mode 100644 lib/libaccess/parse.h create mode 100644 lib/libaccess/permhash.h create mode 100644 lib/libaccess/register.cpp create mode 100644 lib/libaccess/symbols.cpp create mode 100644 lib/libaccess/usi.cpp create mode 100644 lib/libaccess/usrcache.cpp create mode 100644 lib/libaccess/yy-sed create mode 100644 lib/libadmin/.cvsignore create mode 100644 lib/libadmin/error.c create mode 100644 lib/libadmin/template.c create mode 100644 lib/libadmin/util.c create mode 100644 lib/libsi18n/getstrmem.h create mode 100644 lib/libsi18n/getstrprop.c create mode 100644 lib/libsi18n/gsslapd.h create mode 100644 lib/libsi18n/makstrdb.c create mode 100644 lib/libsi18n/reshash.c create mode 100644 lib/libsi18n/reshash.h create mode 100644 lib/libsi18n/txtfile.c create mode 100644 lib/libsi18n/txtfile.h create mode 100644 m4/db.m4 create mode 100644 m4/doxygen.m4 create mode 100644 m4/fhs.m4 create mode 100644 m4/netsnmp.m4 create mode 100644 m4/openldap.m4 create mode 100644 m4/selinux.m4 create mode 100644 m4/systemd.m4 create mode 100644 man/man1/cl-dump.1 create mode 100644 man/man1/cl-dump.pl.1 create mode 100644 man/man1/dbgen.pl.1 create mode 100644 man/man1/dbscan.1 create mode 100644 man/man1/ds-logpipe.py.1 create mode 100644 man/man1/ds-replcheck.1 create mode 100644 man/man1/dsktune.1 create mode 100644 man/man1/infadd.1 create mode 100644 man/man1/ldap-agent.1 create mode 100644 man/man1/ldclt.1 create mode 100644 man/man1/ldif.1 create mode 100644 man/man1/logconv.pl.1 create mode 100644 man/man1/migratecred.1 create mode 100644 man/man1/mmldif.1 create mode 100644 man/man1/pwdhash.1 create mode 100644 man/man1/readnsstate.1 create mode 100644 man/man1/repl-monitor.1 create mode 100644 man/man1/repl-monitor.pl.1 create mode 100644 man/man1/rsearch.1 create mode 100644 man/man5/99user.ldif.5 create mode 100644 man/man5/certmap.conf.5 create mode 100644 man/man5/dirsrv.5 create mode 100644 man/man5/dirsrv.systemd.5 create mode 100644 man/man5/slapd-collations.conf.5 create mode 100644 man/man5/template-initconfig.5 create mode 100644 man/man8/bak2db.8 create mode 100644 man/man8/bak2db.pl.8 create mode 100644 man/man8/cleanallruv.pl.8 create mode 100644 man/man8/db2bak.8 create mode 100644 man/man8/db2bak.pl.8 create mode 100644 man/man8/db2index.8 create mode 100644 man/man8/db2index.pl.8 create mode 100644 man/man8/db2ldif.8 create mode 100644 man/man8/db2ldif.pl.8 create mode 100644 man/man8/dbmon.sh.8 create mode 100644 man/man8/dbverify.8 create mode 100644 man/man8/dn2rdn.8 create mode 100644 man/man8/fixup-linkedattrs.pl.8 create mode 100644 man/man8/fixup-memberof.pl.8 create mode 100644 man/man8/ldif2db.8 create mode 100644 man/man8/ldif2db.pl.8 create mode 100644 man/man8/ldif2ldap.8 create mode 100644 man/man8/migrate-ds.pl.8 create mode 100644 man/man8/monitor.8 create mode 100644 man/man8/ns-accountstatus.pl.8 create mode 100644 man/man8/ns-activate.pl.8 create mode 100644 man/man8/ns-inactivate.pl.8 create mode 100644 man/man8/ns-newpwpolicy.pl.8 create mode 100644 man/man8/ns-slapd.8 create mode 100644 man/man8/remove-ds.pl.8 create mode 100644 man/man8/restart-dirsrv.8 create mode 100644 man/man8/restoreconfig.8 create mode 100644 man/man8/saveconfig.8 create mode 100644 man/man8/schema-reload.pl.8 create mode 100644 man/man8/setup-ds.pl.8 create mode 100644 man/man8/start-dirsrv.8 create mode 100644 man/man8/status-dirsrv.8 create mode 100644 man/man8/stop-dirsrv.8 create mode 100644 man/man8/suffix2instance.8 create mode 100644 man/man8/syntax-validate.pl.8 create mode 100644 man/man8/upgradedb.8 create mode 100644 man/man8/upgradednformat.8 create mode 100644 man/man8/usn-tombstone-cleanup.pl.8 create mode 100644 man/man8/verify-db.pl.8 create mode 100644 man/man8/vlvindex.8 create mode 100644 profiling/stap/probe_do_search_detail.stp create mode 100644 profiling/stap/probe_log_access_detail.stp create mode 100644 profiling/stap/probe_op_shared_search.stp create mode 100644 rfcs/Makefile create mode 100644 rfcs/examples/template-bare-06.txt create mode 100644 rfcs/src/draft-wibrown-ldapssotoken-00.xml create mode 100644 rpm.mk create mode 100644 rpm/389-ds-base-devel.README create mode 100644 rpm/389-ds-base-git.sh create mode 100644 rpm/389-ds-base.spec.in create mode 100755 rpm/add_patches.sh create mode 100755 rpm/rpmverrel.sh create mode 100644 src/Cargo.lock create mode 100644 src/Cargo.toml create mode 100644 src/cockpit/389-console/.babelrc create mode 100644 src/cockpit/389-console/.eslintignore create mode 100644 src/cockpit/389-console/.eslintrc.json create mode 100644 src/cockpit/389-console/README.md create mode 100644 src/cockpit/389-console/audit-ci.json create mode 100755 src/cockpit/389-console/buildAndRun.sh create mode 100644 src/cockpit/389-console/cockpit_dist/banner.html create mode 100644 src/cockpit/389-console/cockpit_dist/css/ds.css create mode 100644 src/cockpit/389-console/cockpit_dist/fonts/OpenSans-Bold-webfont.woff create mode 100644 src/cockpit/389-console/cockpit_dist/fonts/OpenSans-Bold-webfont.woff2 create mode 100644 src/cockpit/389-console/cockpit_dist/fonts/OpenSans-BoldItalic-webfont.woff2 create mode 100644 src/cockpit/389-console/cockpit_dist/fonts/OpenSans-Italic-webfont.woff create mode 100644 src/cockpit/389-console/cockpit_dist/fonts/OpenSans-Italic-webfont.woff2 create mode 100644 src/cockpit/389-console/cockpit_dist/fonts/OpenSans-Light-webfont.ttf create mode 100644 src/cockpit/389-console/cockpit_dist/fonts/OpenSans-Light-webfont.woff create mode 100644 src/cockpit/389-console/cockpit_dist/fonts/OpenSans-Light-webfont.woff2 create mode 100644 src/cockpit/389-console/cockpit_dist/fonts/OpenSans-Regular-webfont.ttf create mode 100644 src/cockpit/389-console/cockpit_dist/fonts/OpenSans-Regular-webfont.woff create mode 100644 src/cockpit/389-console/cockpit_dist/fonts/OpenSans-Regular-webfont.woff2 create mode 100644 src/cockpit/389-console/cockpit_dist/fonts/OpenSans-Semibold-webfont.ttf create mode 100644 src/cockpit/389-console/cockpit_dist/fonts/OpenSans-Semibold-webfont.woff create mode 100644 src/cockpit/389-console/cockpit_dist/fonts/OpenSans-Semibold-webfont.woff2 create mode 100644 src/cockpit/389-console/cockpit_dist/fonts/OpenSans-SemiboldItalic-webfont.woff create mode 100644 src/cockpit/389-console/cockpit_dist/fonts/PatternFlyIcons-webfont.ttf create mode 100644 src/cockpit/389-console/cockpit_dist/fonts/PatternFlyIcons-webfont.woff create mode 100644 src/cockpit/389-console/cockpit_dist/fonts/fontawesome-webfont.woff create mode 100644 src/cockpit/389-console/cockpit_dist/fonts/fontawesome-webfont.woff2 create mode 100644 src/cockpit/389-console/cockpit_dist/fonts/glyphicons-halflings-regular.woff create mode 100644 src/cockpit/389-console/cockpit_dist/fonts/glyphicons-halflings-regular.woff2 create mode 100644 src/cockpit/389-console/cockpit_dist/images/sort_asc.png create mode 100644 src/cockpit/389-console/cockpit_dist/images/sort_asc_disabled.png create mode 100644 src/cockpit/389-console/cockpit_dist/images/sort_both.png create mode 100644 src/cockpit/389-console/cockpit_dist/images/sort_desc.png create mode 100644 src/cockpit/389-console/cockpit_dist/images/sort_desc_disabled.png create mode 100644 src/cockpit/389-console/cockpit_dist/index.html create mode 100644 src/cockpit/389-console/cockpit_dist/index.min.js.gz create mode 100644 src/cockpit/389-console/cockpit_dist/manifest.json create mode 100644 src/cockpit/389-console/cockpit_dist/static/32px.png create mode 100644 src/cockpit/389-console/cockpit_dist/static/40px.png create mode 100644 src/cockpit/389-console/cockpit_dist/static/Typeahead.css create mode 100644 src/cockpit/389-console/cockpit_dist/static/bootpopup.min.js create mode 100644 src/cockpit/389-console/cockpit_dist/static/bootstrap-theme.min.css create mode 100644 src/cockpit/389-console/cockpit_dist/static/bootstrap-theme.min.css.map create mode 100644 src/cockpit/389-console/cockpit_dist/static/bootstrap.min.css create mode 100644 src/cockpit/389-console/cockpit_dist/static/bootstrap.min.css.map create mode 100644 src/cockpit/389-console/cockpit_dist/static/bootstrap.min.js create mode 100644 src/cockpit/389-console/cockpit_dist/static/c3.min.js create mode 100644 src/cockpit/389-console/cockpit_dist/static/d3.min.js create mode 100644 src/cockpit/389-console/cockpit_dist/static/dataTables.datetime-moment.js create mode 100644 src/cockpit/389-console/cockpit_dist/static/images/ui-icons_444444_256x240.png create mode 100644 src/cockpit/389-console/cockpit_dist/static/images/ui-icons_555555_256x240.png create mode 100644 src/cockpit/389-console/cockpit_dist/static/images/ui-icons_777620_256x240.png create mode 100644 src/cockpit/389-console/cockpit_dist/static/images/ui-icons_777777_256x240.png create mode 100644 src/cockpit/389-console/cockpit_dist/static/images/ui-icons_cc0000_256x240.png create mode 100644 src/cockpit/389-console/cockpit_dist/static/images/ui-icons_ffffff_256x240.png create mode 100644 src/cockpit/389-console/cockpit_dist/static/jquery-3.3.1.min.js create mode 100644 src/cockpit/389-console/cockpit_dist/static/jquery.dataTables.min.css create mode 100644 src/cockpit/389-console/cockpit_dist/static/jquery.dataTables.min.js create mode 100644 src/cockpit/389-console/cockpit_dist/static/jquery.dataTables.select.min.js create mode 100644 src/cockpit/389-console/cockpit_dist/static/jquery.timepicker.min.css create mode 100644 src/cockpit/389-console/cockpit_dist/static/jquery.timepicker.min.js create mode 100644 src/cockpit/389-console/cockpit_dist/static/jstree.min.js create mode 100644 src/cockpit/389-console/cockpit_dist/static/moment.min.js create mode 100644 src/cockpit/389-console/cockpit_dist/static/page.min.css create mode 100644 src/cockpit/389-console/cockpit_dist/static/patternfly-additions.css create mode 100644 src/cockpit/389-console/cockpit_dist/static/patternfly.css create mode 100644 src/cockpit/389-console/cockpit_dist/static/patternfly.min.js create mode 100644 src/cockpit/389-console/cockpit_dist/static/style.min.css create mode 100644 src/cockpit/389-console/cockpit_dist/static/throbber.gif create mode 100644 src/cockpit/389-console/node_modules.mk create mode 100644 src/cockpit/389-console/org.port389.cockpit_console.metainfo.xml create mode 100644 src/cockpit/389-console/package-lock.json create mode 100644 src/cockpit/389-console/package.json create mode 100644 src/cockpit/389-console/src/banner.html create mode 100644 src/cockpit/389-console/src/css/ds.css create mode 100644 src/cockpit/389-console/src/database.jsx create mode 100644 src/cockpit/389-console/src/ds.jsx create mode 100644 src/cockpit/389-console/src/fonts/OpenSans-Bold-webfont.woff create mode 100644 src/cockpit/389-console/src/fonts/OpenSans-Bold-webfont.woff2 create mode 100644 src/cockpit/389-console/src/fonts/OpenSans-BoldItalic-webfont.woff2 create mode 100644 src/cockpit/389-console/src/fonts/OpenSans-Italic-webfont.woff create mode 100644 src/cockpit/389-console/src/fonts/OpenSans-Italic-webfont.woff2 create mode 100644 src/cockpit/389-console/src/fonts/OpenSans-Light-webfont.ttf create mode 100644 src/cockpit/389-console/src/fonts/OpenSans-Light-webfont.woff create mode 100644 src/cockpit/389-console/src/fonts/OpenSans-Light-webfont.woff2 create mode 100644 src/cockpit/389-console/src/fonts/OpenSans-Regular-webfont.ttf create mode 100644 src/cockpit/389-console/src/fonts/OpenSans-Regular-webfont.woff create mode 100644 src/cockpit/389-console/src/fonts/OpenSans-Regular-webfont.woff2 create mode 100644 src/cockpit/389-console/src/fonts/OpenSans-Semibold-webfont.ttf create mode 100644 src/cockpit/389-console/src/fonts/OpenSans-Semibold-webfont.woff create mode 100644 src/cockpit/389-console/src/fonts/OpenSans-Semibold-webfont.woff2 create mode 100644 src/cockpit/389-console/src/fonts/OpenSans-SemiboldItalic-webfont.woff create mode 100644 src/cockpit/389-console/src/fonts/PatternFlyIcons-webfont.ttf create mode 100644 src/cockpit/389-console/src/fonts/PatternFlyIcons-webfont.woff create mode 100644 src/cockpit/389-console/src/fonts/fontawesome-webfont.woff create mode 100644 src/cockpit/389-console/src/fonts/fontawesome-webfont.woff2 create mode 100644 src/cockpit/389-console/src/fonts/glyphicons-halflings-regular.woff create mode 100644 src/cockpit/389-console/src/fonts/glyphicons-halflings-regular.woff2 create mode 100644 src/cockpit/389-console/src/images/sort_asc.png create mode 100644 src/cockpit/389-console/src/images/sort_asc_disabled.png create mode 100644 src/cockpit/389-console/src/images/sort_both.png create mode 100644 src/cockpit/389-console/src/images/sort_desc.png create mode 100644 src/cockpit/389-console/src/images/sort_desc_disabled.png create mode 100644 src/cockpit/389-console/src/index.es6 create mode 100644 src/cockpit/389-console/src/index.html create mode 100644 src/cockpit/389-console/src/lib/customCollapse.jsx create mode 100644 src/cockpit/389-console/src/lib/customTableToolbar.jsx create mode 100644 src/cockpit/389-console/src/lib/database/attrEncryption.jsx create mode 100644 src/cockpit/389-console/src/lib/database/backups.jsx create mode 100644 src/cockpit/389-console/src/lib/database/chaining.jsx create mode 100644 src/cockpit/389-console/src/lib/database/databaseConfig.jsx create mode 100644 src/cockpit/389-console/src/lib/database/databaseModal.jsx create mode 100644 src/cockpit/389-console/src/lib/database/databaseTables.jsx create mode 100644 src/cockpit/389-console/src/lib/database/globalPwp.jsx create mode 100644 src/cockpit/389-console/src/lib/database/indexes.jsx create mode 100644 src/cockpit/389-console/src/lib/database/localPwp.jsx create mode 100644 src/cockpit/389-console/src/lib/database/referrals.jsx create mode 100644 src/cockpit/389-console/src/lib/database/suffix.jsx create mode 100644 src/cockpit/389-console/src/lib/database/suffixConfig.jsx create mode 100644 src/cockpit/389-console/src/lib/database/vlvIndexes.jsx create mode 100644 src/cockpit/389-console/src/lib/dsTable.jsx create mode 100644 src/cockpit/389-console/src/lib/monitor/accesslog.jsx create mode 100644 src/cockpit/389-console/src/lib/monitor/auditfaillog.jsx create mode 100644 src/cockpit/389-console/src/lib/monitor/auditlog.jsx create mode 100644 src/cockpit/389-console/src/lib/monitor/chainingMonitor.jsx create mode 100644 src/cockpit/389-console/src/lib/monitor/dbMonitor.jsx create mode 100644 src/cockpit/389-console/src/lib/monitor/errorlog.jsx create mode 100644 src/cockpit/389-console/src/lib/monitor/monitorModals.jsx create mode 100644 src/cockpit/389-console/src/lib/monitor/monitorTables.jsx create mode 100644 src/cockpit/389-console/src/lib/monitor/replMonitor.jsx create mode 100644 src/cockpit/389-console/src/lib/monitor/serverMonitor.jsx create mode 100644 src/cockpit/389-console/src/lib/monitor/snmpMonitor.jsx create mode 100644 src/cockpit/389-console/src/lib/monitor/suffixMonitor.jsx create mode 100644 src/cockpit/389-console/src/lib/notifications.jsx create mode 100644 src/cockpit/389-console/src/lib/plugins/accountPolicy.jsx create mode 100644 src/cockpit/389-console/src/lib/plugins/attributeUniqueness.jsx create mode 100644 src/cockpit/389-console/src/lib/plugins/autoMembership.jsx create mode 100644 src/cockpit/389-console/src/lib/plugins/dna.jsx create mode 100644 src/cockpit/389-console/src/lib/plugins/linkedAttributes.jsx create mode 100644 src/cockpit/389-console/src/lib/plugins/managedEntries.jsx create mode 100644 src/cockpit/389-console/src/lib/plugins/memberOf.jsx create mode 100644 src/cockpit/389-console/src/lib/plugins/passthroughAuthentication.jsx create mode 100644 src/cockpit/389-console/src/lib/plugins/pluginBasicConfig.jsx create mode 100644 src/cockpit/389-console/src/lib/plugins/pluginModal.jsx create mode 100644 src/cockpit/389-console/src/lib/plugins/pluginTables.jsx create mode 100644 src/cockpit/389-console/src/lib/plugins/referentialIntegrity.jsx create mode 100644 src/cockpit/389-console/src/lib/plugins/retroChangelog.jsx create mode 100644 src/cockpit/389-console/src/lib/plugins/rootDNAccessControl.jsx create mode 100644 src/cockpit/389-console/src/lib/plugins/usn.jsx create mode 100644 src/cockpit/389-console/src/lib/plugins/winsync.jsx create mode 100644 src/cockpit/389-console/src/lib/replication/replAgmts.jsx create mode 100644 src/cockpit/389-console/src/lib/replication/replChangelog.jsx create mode 100644 src/cockpit/389-console/src/lib/replication/replConfig.jsx create mode 100644 src/cockpit/389-console/src/lib/replication/replModals.jsx create mode 100644 src/cockpit/389-console/src/lib/replication/replSuffix.jsx create mode 100644 src/cockpit/389-console/src/lib/replication/replTables.jsx create mode 100644 src/cockpit/389-console/src/lib/replication/replTasks.jsx create mode 100644 src/cockpit/389-console/src/lib/replication/winsyncAgmts.jsx create mode 100644 src/cockpit/389-console/src/lib/schema/schemaModals.jsx create mode 100644 src/cockpit/389-console/src/lib/schema/schemaTables.jsx create mode 100644 src/cockpit/389-console/src/lib/security/certificateManagement.jsx create mode 100644 src/cockpit/389-console/src/lib/security/ciphers.jsx create mode 100644 src/cockpit/389-console/src/lib/security/securityModals.jsx create mode 100644 src/cockpit/389-console/src/lib/security/securityTables.jsx create mode 100644 src/cockpit/389-console/src/lib/server/accessLog.jsx create mode 100644 src/cockpit/389-console/src/lib/server/auditLog.jsx create mode 100644 src/cockpit/389-console/src/lib/server/auditfailLog.jsx create mode 100644 src/cockpit/389-console/src/lib/server/errorLog.jsx create mode 100644 src/cockpit/389-console/src/lib/server/ldapi.jsx create mode 100644 src/cockpit/389-console/src/lib/server/sasl.jsx create mode 100644 src/cockpit/389-console/src/lib/server/serverModals.jsx create mode 100644 src/cockpit/389-console/src/lib/server/serverTables.jsx create mode 100644 src/cockpit/389-console/src/lib/server/settings.jsx create mode 100644 src/cockpit/389-console/src/lib/server/tuning.jsx create mode 100644 src/cockpit/389-console/src/lib/tools.jsx create mode 100644 src/cockpit/389-console/src/manifest.json create mode 100644 src/cockpit/389-console/src/monitor.jsx create mode 100644 src/cockpit/389-console/src/plugins.jsx create mode 100644 src/cockpit/389-console/src/replication.jsx create mode 100644 src/cockpit/389-console/src/schema.jsx create mode 100644 src/cockpit/389-console/src/security.jsx create mode 100644 src/cockpit/389-console/src/server.jsx create mode 100644 src/cockpit/389-console/src/static/32px.png create mode 100644 src/cockpit/389-console/src/static/40px.png create mode 100644 src/cockpit/389-console/src/static/Typeahead.css create mode 100644 src/cockpit/389-console/src/static/bootpopup.min.js create mode 100644 src/cockpit/389-console/src/static/bootstrap-theme.min.css create mode 100644 src/cockpit/389-console/src/static/bootstrap-theme.min.css.map create mode 100644 src/cockpit/389-console/src/static/bootstrap.min.css create mode 100644 src/cockpit/389-console/src/static/bootstrap.min.css.map create mode 100644 src/cockpit/389-console/src/static/bootstrap.min.js create mode 100644 src/cockpit/389-console/src/static/c3.min.js create mode 100644 src/cockpit/389-console/src/static/d3.min.js create mode 100644 src/cockpit/389-console/src/static/dataTables.datetime-moment.js create mode 100644 src/cockpit/389-console/src/static/images/ui-icons_444444_256x240.png create mode 100644 src/cockpit/389-console/src/static/images/ui-icons_555555_256x240.png create mode 100644 src/cockpit/389-console/src/static/images/ui-icons_777620_256x240.png create mode 100644 src/cockpit/389-console/src/static/images/ui-icons_777777_256x240.png create mode 100644 src/cockpit/389-console/src/static/images/ui-icons_cc0000_256x240.png create mode 100644 src/cockpit/389-console/src/static/images/ui-icons_ffffff_256x240.png create mode 100644 src/cockpit/389-console/src/static/jquery-3.3.1.min.js create mode 100644 src/cockpit/389-console/src/static/jquery.dataTables.min.css create mode 100644 src/cockpit/389-console/src/static/jquery.dataTables.min.js create mode 100644 src/cockpit/389-console/src/static/jquery.dataTables.select.min.js create mode 100644 src/cockpit/389-console/src/static/jquery.timepicker.min.css create mode 100644 src/cockpit/389-console/src/static/jquery.timepicker.min.js create mode 100644 src/cockpit/389-console/src/static/jstree.min.js create mode 100644 src/cockpit/389-console/src/static/moment.min.js create mode 100644 src/cockpit/389-console/src/static/page.min.css create mode 100644 src/cockpit/389-console/src/static/patternfly-additions.css create mode 100644 src/cockpit/389-console/src/static/patternfly.css create mode 100644 src/cockpit/389-console/src/static/patternfly.min.js create mode 100644 src/cockpit/389-console/src/static/style.min.css create mode 100644 src/cockpit/389-console/src/static/throbber.gif create mode 100644 src/cockpit/389-console/webpack.config.js create mode 100644 src/contrib/README.md create mode 100644 src/contrib/back-ldif/add.c create mode 100644 src/contrib/back-ldif/back-ldif.h create mode 100644 src/contrib/back-ldif/bind.c create mode 100644 src/contrib/back-ldif/close.c create mode 100644 src/contrib/back-ldif/compare.c create mode 100644 src/contrib/back-ldif/config.c create mode 100644 src/contrib/back-ldif/delete.c create mode 100644 src/contrib/back-ldif/init.c create mode 100644 src/contrib/back-ldif/modify.c create mode 100644 src/contrib/back-ldif/modrdn.c create mode 100644 src/contrib/back-ldif/monitor.c create mode 100644 src/contrib/back-ldif/search.c create mode 100644 src/contrib/back-ldif/start.c create mode 100644 src/contrib/back-ldif/unbind.c create mode 100644 src/lib389/.coveragerc create mode 100644 src/lib389/.gitignore create mode 100644 src/lib389/LICENSE create mode 100644 src/lib389/MANIFEST.in create mode 100644 src/lib389/Makefile create mode 100644 src/lib389/README.md create mode 100644 src/lib389/VERSION create mode 100755 src/lib389/cli/dsconf create mode 100755 src/lib389/cli/dscontainer create mode 100755 src/lib389/cli/dscreate create mode 100755 src/lib389/cli/dsctl create mode 100755 src/lib389/cli/dsidm create mode 100644 src/lib389/doc/Makefile create mode 100644 src/lib389/doc/source/accesscontrol.rst create mode 100644 src/lib389/doc/source/aci.rst create mode 100644 src/lib389/doc/source/agreement.rst create mode 100644 src/lib389/doc/source/backend.rst create mode 100644 src/lib389/doc/source/changelog.rst create mode 100644 src/lib389/doc/source/conf.py create mode 100644 src/lib389/doc/source/config.rst create mode 100644 src/lib389/doc/source/databases.rst create mode 100644 src/lib389/doc/source/dirsrv_log.rst create mode 100644 src/lib389/doc/source/domain.rst create mode 100644 src/lib389/doc/source/dseldif.rst create mode 100644 src/lib389/doc/source/group.rst create mode 100644 src/lib389/doc/source/guidelines.rst create mode 100644 src/lib389/doc/source/identitymanagement.rst create mode 100644 src/lib389/doc/source/index.rst create mode 100644 src/lib389/doc/source/indexes.rst create mode 100644 src/lib389/doc/source/ldclt.rst create mode 100644 src/lib389/doc/source/mappingtree.rst create mode 100644 src/lib389/doc/source/monitor.rst create mode 100644 src/lib389/doc/source/need_to_be_triaged.rst create mode 100644 src/lib389/doc/source/organizationalunit.rst create mode 100644 src/lib389/doc/source/passwd.rst create mode 100644 src/lib389/doc/source/paths.rst create mode 100644 src/lib389/doc/source/plugin.rst create mode 100644 src/lib389/doc/source/replica.rst create mode 100644 src/lib389/doc/source/replication.rst create mode 100644 src/lib389/doc/source/repltools.rst create mode 100644 src/lib389/doc/source/rootdse.rst create mode 100644 src/lib389/doc/source/schema.rst create mode 100644 src/lib389/doc/source/services.rst create mode 100644 src/lib389/doc/source/task.rst create mode 100644 src/lib389/doc/source/user.rst create mode 100644 src/lib389/doc/source/utils.rst create mode 100644 src/lib389/dsadmin.pylintrc create mode 100644 src/lib389/lib389/__init__.py create mode 100644 src/lib389/lib389/_constants.py create mode 100644 src/lib389/lib389/_controls.py create mode 100644 src/lib389/lib389/_entry.py create mode 100644 src/lib389/lib389/_ldifconn.py create mode 100644 src/lib389/lib389/_mapped_object.py create mode 100644 src/lib389/lib389/_replication.py create mode 100644 src/lib389/lib389/aci.py create mode 100644 src/lib389/lib389/agreement.py create mode 100644 src/lib389/lib389/backend.py create mode 100644 src/lib389/lib389/chaining.py create mode 100644 src/lib389/lib389/cli_base/__init__.py create mode 100644 src/lib389/lib389/cli_base/dsrc.py create mode 100644 src/lib389/lib389/cli_conf/__init__.py create mode 100644 src/lib389/lib389/cli_conf/backend.py create mode 100644 src/lib389/lib389/cli_conf/backup.py create mode 100644 src/lib389/lib389/cli_conf/chaining.py create mode 100644 src/lib389/lib389/cli_conf/config.py create mode 100644 src/lib389/lib389/cli_conf/conflicts.py create mode 100644 src/lib389/lib389/cli_conf/directory_manager.py create mode 100644 src/lib389/lib389/cli_conf/monitor.py create mode 100644 src/lib389/lib389/cli_conf/plugin.py create mode 100644 src/lib389/lib389/cli_conf/plugins/__init__.py create mode 100644 src/lib389/lib389/cli_conf/plugins/accountpolicy.py create mode 100644 src/lib389/lib389/cli_conf/plugins/attruniq.py create mode 100644 src/lib389/lib389/cli_conf/plugins/automember.py create mode 100644 src/lib389/lib389/cli_conf/plugins/dna.py create mode 100644 src/lib389/lib389/cli_conf/plugins/linkedattr.py create mode 100644 src/lib389/lib389/cli_conf/plugins/managedentries.py create mode 100644 src/lib389/lib389/cli_conf/plugins/memberof.py create mode 100644 src/lib389/lib389/cli_conf/plugins/passthroughauth.py create mode 100644 src/lib389/lib389/cli_conf/plugins/posix_winsync.py create mode 100644 src/lib389/lib389/cli_conf/plugins/referint.py create mode 100644 src/lib389/lib389/cli_conf/plugins/retrochangelog.py create mode 100644 src/lib389/lib389/cli_conf/plugins/rootdn_ac.py create mode 100644 src/lib389/lib389/cli_conf/plugins/usn.py create mode 100644 src/lib389/lib389/cli_conf/pwpolicy.py create mode 100644 src/lib389/lib389/cli_conf/replication.py create mode 100644 src/lib389/lib389/cli_conf/saslmappings.py create mode 100644 src/lib389/lib389/cli_conf/schema.py create mode 100644 src/lib389/lib389/cli_conf/security.py create mode 100644 src/lib389/lib389/cli_ctl/__init__.py create mode 100644 src/lib389/lib389/cli_ctl/dbtasks.py create mode 100644 src/lib389/lib389/cli_ctl/health.py create mode 100644 src/lib389/lib389/cli_ctl/instance.py create mode 100644 src/lib389/lib389/cli_ctl/nsstate.py create mode 100644 src/lib389/lib389/cli_ctl/tls.py create mode 100644 src/lib389/lib389/cli_idm/__init__.py create mode 100644 src/lib389/lib389/cli_idm/account.py create mode 100644 src/lib389/lib389/cli_idm/client_config.py create mode 100644 src/lib389/lib389/cli_idm/group.py create mode 100644 src/lib389/lib389/cli_idm/initialise.py create mode 100644 src/lib389/lib389/cli_idm/organizationalunit.py create mode 100644 src/lib389/lib389/cli_idm/posixgroup.py create mode 100644 src/lib389/lib389/cli_idm/role.py create mode 100644 src/lib389/lib389/cli_idm/user.py create mode 100755 src/lib389/lib389/clitools/__init__.py create mode 100755 src/lib389/lib389/clitools/ds_aci_lint create mode 100755 src/lib389/lib389/clitools/ds_backend_getattr create mode 100755 src/lib389/lib389/clitools/ds_backend_list create mode 100755 src/lib389/lib389/clitools/ds_backend_setattr create mode 100755 src/lib389/lib389/clitools/ds_krb_create_keytab create mode 100755 src/lib389/lib389/clitools/ds_krb_create_principal create mode 100755 src/lib389/lib389/clitools/ds_krb_create_realm create mode 100755 src/lib389/lib389/clitools/ds_krb_destroy_realm create mode 100755 src/lib389/lib389/clitools/ds_monitor_backend create mode 100755 src/lib389/lib389/clitools/ds_monitor_server create mode 100755 src/lib389/lib389/clitools/ds_schema_attributetype_list create mode 100755 src/lib389/lib389/clitools/ds_schema_attributetype_query create mode 100755 src/lib389/lib389/clitools/ds_setup create mode 100644 src/lib389/lib389/config.py create mode 100644 src/lib389/lib389/configurations/__init__.py create mode 100644 src/lib389/lib389/configurations/config.py create mode 100644 src/lib389/lib389/configurations/config_001003006.py create mode 100644 src/lib389/lib389/configurations/config_001004000.py create mode 100644 src/lib389/lib389/configurations/config_001004002.py create mode 100644 src/lib389/lib389/configurations/sample.py create mode 100644 src/lib389/lib389/conflicts.py create mode 100644 src/lib389/lib389/cos.py create mode 100644 src/lib389/lib389/dbgen.py create mode 100644 src/lib389/lib389/dirsrv_log.py create mode 100644 src/lib389/lib389/ds_instance.py create mode 100644 src/lib389/lib389/dseldif.py create mode 100644 src/lib389/lib389/encrypted_attributes.py create mode 100644 src/lib389/lib389/exceptions.py create mode 100644 src/lib389/lib389/extended_operations.py create mode 100644 src/lib389/lib389/extensibleobject.py create mode 100644 src/lib389/lib389/idm/__init__.py create mode 100644 src/lib389/lib389/idm/account.py create mode 100644 src/lib389/lib389/idm/country.py create mode 100644 src/lib389/lib389/idm/directorymanager.py create mode 100644 src/lib389/lib389/idm/domain.py create mode 100644 src/lib389/lib389/idm/group.py create mode 100644 src/lib389/lib389/idm/ipadomain.py create mode 100644 src/lib389/lib389/idm/nscontainer.py create mode 100644 src/lib389/lib389/idm/organization.py create mode 100644 src/lib389/lib389/idm/organizationalrole.py create mode 100644 src/lib389/lib389/idm/organizationalunit.py create mode 100644 src/lib389/lib389/idm/posixgroup.py create mode 100644 src/lib389/lib389/idm/role.py create mode 100644 src/lib389/lib389/idm/services.py create mode 100644 src/lib389/lib389/idm/user.py create mode 100644 src/lib389/lib389/index.py create mode 100644 src/lib389/lib389/instance/__init__.py create mode 100644 src/lib389/lib389/instance/options.py create mode 100644 src/lib389/lib389/instance/remove.py create mode 100644 src/lib389/lib389/instance/setup.py create mode 100644 src/lib389/lib389/ldclt.py create mode 100644 src/lib389/lib389/lint.py create mode 100644 src/lib389/lib389/mappingTree.py create mode 100644 src/lib389/lib389/mit_krb5.py create mode 100644 src/lib389/lib389/monitor.py create mode 100755 src/lib389/lib389/ns-slapd.valgrind create mode 100644 src/lib389/lib389/nss_ssl.py create mode 100644 src/lib389/lib389/passwd.py create mode 100644 src/lib389/lib389/password_plugins.py create mode 100644 src/lib389/lib389/paths.py create mode 100644 src/lib389/lib389/plugins.py create mode 100644 src/lib389/lib389/properties.py create mode 100644 src/lib389/lib389/pwpolicy.py create mode 100644 src/lib389/lib389/referral.py create mode 100644 src/lib389/lib389/replica.py create mode 100644 src/lib389/lib389/repltools.py create mode 100644 src/lib389/lib389/rootdse.py create mode 100644 src/lib389/lib389/sasl.py create mode 100644 src/lib389/lib389/saslmap.py create mode 100755 src/lib389/lib389/schema.py create mode 100644 src/lib389/lib389/suffix.py create mode 100644 src/lib389/lib389/tasks.py create mode 100644 src/lib389/lib389/tests/__init__.py create mode 100644 src/lib389/lib389/tests/aci_parse_test.py create mode 100644 src/lib389/lib389/tests/aci_test.py create mode 100644 src/lib389/lib389/tests/agreement_test.py create mode 100644 src/lib389/lib389/tests/backendLegacy_test.py create mode 100644 src/lib389/lib389/tests/backend_test.py create mode 100644 src/lib389/lib389/tests/cli/__init__.py create mode 100644 src/lib389/lib389/tests/cli/adm_instance_test.py create mode 100644 src/lib389/lib389/tests/cli/conf_backend_test.py create mode 100644 src/lib389/lib389/tests/cli/conf_backup_test.py create mode 100644 src/lib389/lib389/tests/cli/conf_chaining_test.py create mode 100644 src/lib389/lib389/tests/cli/conf_conflicts_test.py create mode 100644 src/lib389/lib389/tests/cli/conf_directory_manager_test.py create mode 100644 src/lib389/lib389/tests/cli/conf_plugin_test.py create mode 100644 src/lib389/lib389/tests/cli/conf_plugins/__init__.py create mode 100644 src/lib389/lib389/tests/cli/conf_plugins/automember_test.py create mode 100644 src/lib389/lib389/tests/cli/conf_plugins/memberof_test.py create mode 100644 src/lib389/lib389/tests/cli/conf_plugins/referint_test.py create mode 100644 src/lib389/lib389/tests/cli/conf_plugins/rootdn_ac_test.py create mode 100644 src/lib389/lib389/tests/cli/conf_plugins/usn_test.py create mode 100644 src/lib389/lib389/tests/cli/conf_pwpolicy_test.py create mode 100644 src/lib389/lib389/tests/cli/conf_schema_test.py create mode 100644 src/lib389/lib389/tests/cli/ctl_dbtasks_test.py create mode 100644 src/lib389/lib389/tests/cli/dsrc_test.py create mode 100644 src/lib389/lib389/tests/cli/idm_group_test.py create mode 100644 src/lib389/lib389/tests/cli/idm_user_modify_test.py create mode 100644 src/lib389/lib389/tests/cli/idm_user_test.py create mode 100644 src/lib389/lib389/tests/config.py create mode 100644 src/lib389/lib389/tests/config_test.py create mode 100644 src/lib389/lib389/tests/configurations/__init__.py create mode 100644 src/lib389/lib389/tests/configurations/config_001003006_test.py create mode 100644 src/lib389/lib389/tests/configurations/config_001004000_test.py create mode 100644 src/lib389/lib389/tests/conftest.py create mode 100644 src/lib389/lib389/tests/dereference_test.py create mode 100644 src/lib389/lib389/tests/dirsrv_log_test.py create mode 100644 src/lib389/lib389/tests/dirsrv_test.py create mode 100644 src/lib389/lib389/tests/dsadmin_basic_test.py create mode 100644 src/lib389/lib389/tests/dsadmin_create_remove_test.py create mode 100644 src/lib389/lib389/tests/dsadmin_test.py create mode 100644 src/lib389/lib389/tests/dseldif_test.py create mode 100644 src/lib389/lib389/tests/effective_rights_test.py create mode 100644 src/lib389/lib389/tests/entry_test.py create mode 100644 src/lib389/lib389/tests/healthcheck_test.py create mode 100644 src/lib389/lib389/tests/idm/__init__.py create mode 100644 src/lib389/lib389/tests/idm/account_test.py create mode 100644 src/lib389/lib389/tests/idm/services_test.py create mode 100644 src/lib389/lib389/tests/idm/user_and_group_test.py create mode 100644 src/lib389/lib389/tests/index_test.py create mode 100644 src/lib389/lib389/tests/ldclt_test.py create mode 100644 src/lib389/lib389/tests/mapped_object_test.py create mode 100644 src/lib389/lib389/tests/mappingTreeLegacy_test.py create mode 100644 src/lib389/lib389/tests/mappingtree_test.py create mode 100644 src/lib389/lib389/tests/nss_ssl_test.py create mode 100644 src/lib389/lib389/tests/paths_test.py create mode 100644 src/lib389/lib389/tests/plugin_test.py create mode 100644 src/lib389/lib389/tests/plugins/__init__.py create mode 100644 src/lib389/lib389/tests/plugins/memberof_test.py create mode 100644 src/lib389/lib389/tests/plugins/referint_test.py create mode 100644 src/lib389/lib389/tests/plugins/usn_test.py create mode 100644 src/lib389/lib389/tests/plugins/utils.py create mode 100644 src/lib389/lib389/tests/referral_test.py create mode 100644 src/lib389/lib389/tests/replicaLegacy_test.py create mode 100644 src/lib389/lib389/tests/replica_test.py create mode 100644 src/lib389/lib389/tests/schema_test.py create mode 100644 src/lib389/lib389/tests/suffix_test.py create mode 100644 src/lib389/lib389/tests/test_module_proxy.py create mode 100644 src/lib389/lib389/tests/tls_external_test.py create mode 100644 src/lib389/lib389/tests/utils_test.py create mode 100644 src/lib389/lib389/tombstone.py create mode 100644 src/lib389/lib389/tools.py create mode 100644 src/lib389/lib389/topologies.py create mode 100644 src/lib389/lib389/utils.py create mode 100644 src/lib389/requirements.txt create mode 100644 src/lib389/setup.cfg create mode 100644 src/lib389/setup.py create mode 100644 src/lib389/tox.ini create mode 100644 src/librnsslapd/Cargo.toml create mode 100644 src/librnsslapd/README.md create mode 100644 src/librnsslapd/build.rs create mode 100644 src/librnsslapd/src/lib.rs create mode 100644 src/librslapd/Cargo.toml create mode 100644 src/librslapd/README.md create mode 100644 src/librslapd/build.rs create mode 100644 src/librslapd/src/lib.rs create mode 100644 src/libsds/Cargo.toml create mode 100644 src/libsds/README.md create mode 100644 src/libsds/external/csiphash/csiphash.c create mode 100644 src/libsds/external/liblfds711/build/gcc_gnumake_kbuild/Kbuild create mode 100644 src/libsds/external/liblfds711/build/msvc_gnumake/liblfds711.def create mode 100644 src/libsds/external/liblfds711/build/msvc_gnumake/makefile create mode 100644 src/libsds/external/liblfds711/build/wdk_7.1/dirs create mode 100644 src/libsds/external/liblfds711/build/wdk_7.1/driver_entry_renamed_to_avoid_compiler_warning.c create mode 100644 src/libsds/external/liblfds711/build/wdk_7.1/liblfds711.def create mode 100644 src/libsds/external/liblfds711/build/wdk_7.1/readme_before_win_kernel_build.txt create mode 100644 src/libsds/external/liblfds711/build/wdk_7.1/runme_before_win_kernel_dynamic_lib_build.bat create mode 100644 src/libsds/external/liblfds711/build/wdk_7.1/runme_before_win_kernel_static_lib_build.bat create mode 100644 src/libsds/external/liblfds711/build/wdk_7.1/sources.dynamic create mode 100644 src/libsds/external/liblfds711/build/wdk_7.1/sources.static create mode 100644 src/libsds/external/liblfds711/inc/liblfds711.h create mode 100644 src/libsds/external/liblfds711/inc/liblfds711/lfds711_btree_addonly_unbalanced.h create mode 100644 src/libsds/external/liblfds711/inc/liblfds711/lfds711_freelist.h create mode 100644 src/libsds/external/liblfds711/inc/liblfds711/lfds711_hash_addonly.h create mode 100644 src/libsds/external/liblfds711/inc/liblfds711/lfds711_list_addonly_singlylinked_ordered.h create mode 100644 src/libsds/external/liblfds711/inc/liblfds711/lfds711_list_addonly_singlylinked_unordered.h create mode 100644 src/libsds/external/liblfds711/inc/liblfds711/lfds711_misc.h create mode 100644 src/libsds/external/liblfds711/inc/liblfds711/lfds711_porting_abstraction_layer_compiler.h create mode 100644 src/libsds/external/liblfds711/inc/liblfds711/lfds711_porting_abstraction_layer_operating_system.h create mode 100644 src/libsds/external/liblfds711/inc/liblfds711/lfds711_porting_abstraction_layer_processor.h create mode 100644 src/libsds/external/liblfds711/inc/liblfds711/lfds711_prng.h create mode 100644 src/libsds/external/liblfds711/inc/liblfds711/lfds711_queue_bounded_manyproducer_manyconsumer.h create mode 100644 src/libsds/external/liblfds711/inc/liblfds711/lfds711_queue_bounded_singleproducer_singleconsumer.h create mode 100644 src/libsds/external/liblfds711/inc/liblfds711/lfds711_queue_unbounded_manyproducer_manyconsumer.h create mode 100644 src/libsds/external/liblfds711/inc/liblfds711/lfds711_ringbuffer.h create mode 100644 src/libsds/external/liblfds711/inc/liblfds711/lfds711_stack.h create mode 100644 src/libsds/external/liblfds711/src/lfds711_btree_addonly_unbalanced/lfds711_btree_addonly_unbalanced_cleanup.c create mode 100644 src/libsds/external/liblfds711/src/lfds711_btree_addonly_unbalanced/lfds711_btree_addonly_unbalanced_get.c create mode 100644 src/libsds/external/liblfds711/src/lfds711_btree_addonly_unbalanced/lfds711_btree_addonly_unbalanced_init.c create mode 100644 src/libsds/external/liblfds711/src/lfds711_btree_addonly_unbalanced/lfds711_btree_addonly_unbalanced_insert.c create mode 100644 src/libsds/external/liblfds711/src/lfds711_btree_addonly_unbalanced/lfds711_btree_addonly_unbalanced_internal.h create mode 100644 src/libsds/external/liblfds711/src/lfds711_btree_addonly_unbalanced/lfds711_btree_addonly_unbalanced_query.c create mode 100644 src/libsds/external/liblfds711/src/lfds711_freelist/lfds711_freelist_cleanup.c create mode 100644 src/libsds/external/liblfds711/src/lfds711_freelist/lfds711_freelist_init.c create mode 100644 src/libsds/external/liblfds711/src/lfds711_freelist/lfds711_freelist_internal.h create mode 100644 src/libsds/external/liblfds711/src/lfds711_freelist/lfds711_freelist_pop.c create mode 100644 src/libsds/external/liblfds711/src/lfds711_freelist/lfds711_freelist_push.c create mode 100644 src/libsds/external/liblfds711/src/lfds711_freelist/lfds711_freelist_query.c create mode 100644 src/libsds/external/liblfds711/src/lfds711_hash_addonly/lfds711_hash_addonly_cleanup.c create mode 100644 src/libsds/external/liblfds711/src/lfds711_hash_addonly/lfds711_hash_addonly_get.c create mode 100644 src/libsds/external/liblfds711/src/lfds711_hash_addonly/lfds711_hash_addonly_init.c create mode 100644 src/libsds/external/liblfds711/src/lfds711_hash_addonly/lfds711_hash_addonly_insert.c create mode 100644 src/libsds/external/liblfds711/src/lfds711_hash_addonly/lfds711_hash_addonly_internal.h create mode 100644 src/libsds/external/liblfds711/src/lfds711_hash_addonly/lfds711_hash_addonly_iterate.c create mode 100644 src/libsds/external/liblfds711/src/lfds711_hash_addonly/lfds711_hash_addonly_query.c create mode 100644 src/libsds/external/liblfds711/src/lfds711_list_addonly_singlylinked_ordered/lfds711_list_addonly_singlylinked_ordered_cleanup.c create mode 100644 src/libsds/external/liblfds711/src/lfds711_list_addonly_singlylinked_ordered/lfds711_list_addonly_singlylinked_ordered_get.c create mode 100644 src/libsds/external/liblfds711/src/lfds711_list_addonly_singlylinked_ordered/lfds711_list_addonly_singlylinked_ordered_init.c create mode 100644 src/libsds/external/liblfds711/src/lfds711_list_addonly_singlylinked_ordered/lfds711_list_addonly_singlylinked_ordered_insert.c create mode 100644 src/libsds/external/liblfds711/src/lfds711_list_addonly_singlylinked_ordered/lfds711_list_addonly_singlylinked_ordered_internal.h create mode 100644 src/libsds/external/liblfds711/src/lfds711_list_addonly_singlylinked_ordered/lfds711_list_addonly_singlylinked_ordered_query.c create mode 100644 src/libsds/external/liblfds711/src/lfds711_list_addonly_singlylinked_unordered/lfds711_list_addonly_singlylinked_unordered_cleanup.c create mode 100644 src/libsds/external/liblfds711/src/lfds711_list_addonly_singlylinked_unordered/lfds711_list_addonly_singlylinked_unordered_get.c create mode 100644 src/libsds/external/liblfds711/src/lfds711_list_addonly_singlylinked_unordered/lfds711_list_addonly_singlylinked_unordered_init.c create mode 100644 src/libsds/external/liblfds711/src/lfds711_list_addonly_singlylinked_unordered/lfds711_list_addonly_singlylinked_unordered_insert.c create mode 100644 src/libsds/external/liblfds711/src/lfds711_list_addonly_singlylinked_unordered/lfds711_list_addonly_singlylinked_unordered_internal.h create mode 100644 src/libsds/external/liblfds711/src/lfds711_list_addonly_singlylinked_unordered/lfds711_list_addonly_singlylinked_unordered_query.c create mode 100644 src/libsds/external/liblfds711/src/lfds711_misc/lfds711_misc_globals.c create mode 100644 src/libsds/external/liblfds711/src/lfds711_misc/lfds711_misc_internal.h create mode 100644 src/libsds/external/liblfds711/src/lfds711_misc/lfds711_misc_internal_backoff_init.c create mode 100644 src/libsds/external/liblfds711/src/lfds711_misc/lfds711_misc_query.c create mode 100644 src/libsds/external/liblfds711/src/lfds711_prng/lfds711_prng_init.c create mode 100644 src/libsds/external/liblfds711/src/lfds711_prng/lfds711_prng_internal.h create mode 100644 src/libsds/external/liblfds711/src/lfds711_queue_bounded_manyproducer_manyconsumer/lfds711_queue_bounded_manyproducer_manyconsumer_cleanup.c create mode 100644 src/libsds/external/liblfds711/src/lfds711_queue_bounded_manyproducer_manyconsumer/lfds711_queue_bounded_manyproducer_manyconsumer_dequeue.c create mode 100644 src/libsds/external/liblfds711/src/lfds711_queue_bounded_manyproducer_manyconsumer/lfds711_queue_bounded_manyproducer_manyconsumer_enqueue.c create mode 100644 src/libsds/external/liblfds711/src/lfds711_queue_bounded_manyproducer_manyconsumer/lfds711_queue_bounded_manyproducer_manyconsumer_init.c create mode 100644 src/libsds/external/liblfds711/src/lfds711_queue_bounded_manyproducer_manyconsumer/lfds711_queue_bounded_manyproducer_manyconsumer_internal.h create mode 100644 src/libsds/external/liblfds711/src/lfds711_queue_bounded_manyproducer_manyconsumer/lfds711_queue_bounded_manyproducer_manyconsumer_query.c create mode 100644 src/libsds/external/liblfds711/src/lfds711_queue_bounded_singleproducer_singleconsumer/lfds711_queue_bounded_singleproducer_singleconsumer_cleanup.c create mode 100644 src/libsds/external/liblfds711/src/lfds711_queue_bounded_singleproducer_singleconsumer/lfds711_queue_bounded_singleproducer_singleconsumer_dequeue.c create mode 100644 src/libsds/external/liblfds711/src/lfds711_queue_bounded_singleproducer_singleconsumer/lfds711_queue_bounded_singleproducer_singleconsumer_enqueue.c create mode 100644 src/libsds/external/liblfds711/src/lfds711_queue_bounded_singleproducer_singleconsumer/lfds711_queue_bounded_singleproducer_singleconsumer_init.c create mode 100644 src/libsds/external/liblfds711/src/lfds711_queue_bounded_singleproducer_singleconsumer/lfds711_queue_bounded_singleproducer_singleconsumer_internal.h create mode 100644 src/libsds/external/liblfds711/src/lfds711_queue_bounded_singleproducer_singleconsumer/lfds711_queue_bounded_singleproducer_singleconsumer_query.c create mode 100644 src/libsds/external/liblfds711/src/lfds711_queue_unbounded_manyproducer_manyconsumer/lfds711_queue_unbounded_manyproducer_manyconsumer_cleanup.c create mode 100644 src/libsds/external/liblfds711/src/lfds711_queue_unbounded_manyproducer_manyconsumer/lfds711_queue_unbounded_manyproducer_manyconsumer_dequeue.c create mode 100644 src/libsds/external/liblfds711/src/lfds711_queue_unbounded_manyproducer_manyconsumer/lfds711_queue_unbounded_manyproducer_manyconsumer_enqueue.c create mode 100644 src/libsds/external/liblfds711/src/lfds711_queue_unbounded_manyproducer_manyconsumer/lfds711_queue_unbounded_manyproducer_manyconsumer_init.c create mode 100644 src/libsds/external/liblfds711/src/lfds711_queue_unbounded_manyproducer_manyconsumer/lfds711_queue_unbounded_manyproducer_manyconsumer_internal.h create mode 100644 src/libsds/external/liblfds711/src/lfds711_queue_unbounded_manyproducer_manyconsumer/lfds711_queue_unbounded_manyproducer_manyconsumer_query.c create mode 100644 src/libsds/external/liblfds711/src/lfds711_ringbuffer/lfds711_ringbuffer_cleanup.c create mode 100644 src/libsds/external/liblfds711/src/lfds711_ringbuffer/lfds711_ringbuffer_init.c create mode 100644 src/libsds/external/liblfds711/src/lfds711_ringbuffer/lfds711_ringbuffer_internal.h create mode 100644 src/libsds/external/liblfds711/src/lfds711_ringbuffer/lfds711_ringbuffer_query.c create mode 100644 src/libsds/external/liblfds711/src/lfds711_ringbuffer/lfds711_ringbuffer_read.c create mode 100644 src/libsds/external/liblfds711/src/lfds711_ringbuffer/lfds711_ringbuffer_write.c create mode 100644 src/libsds/external/liblfds711/src/lfds711_stack/lfds711_stack_cleanup.c create mode 100644 src/libsds/external/liblfds711/src/lfds711_stack/lfds711_stack_init.c create mode 100644 src/libsds/external/liblfds711/src/lfds711_stack/lfds711_stack_internal.h create mode 100644 src/libsds/external/liblfds711/src/lfds711_stack/lfds711_stack_pop.c create mode 100644 src/libsds/external/liblfds711/src/lfds711_stack/lfds711_stack_push.c create mode 100644 src/libsds/external/liblfds711/src/lfds711_stack/lfds711_stack_query.c create mode 100644 src/libsds/external/liblfds711/src/liblfds711_internal.h create mode 100644 src/libsds/include/sds.h create mode 100644 src/libsds/sds/bpt/bpt.c create mode 100644 src/libsds/sds/bpt/bpt.h create mode 100644 src/libsds/sds/bpt/common.c create mode 100644 src/libsds/sds/bpt/list.c create mode 100644 src/libsds/sds/bpt/map.c create mode 100644 src/libsds/sds/bpt/search.c create mode 100644 src/libsds/sds/bpt/set.c create mode 100644 src/libsds/sds/bpt/verify.c create mode 100644 src/libsds/sds/bpt_cow/atomic.c create mode 100644 src/libsds/sds/bpt_cow/bpt_cow.c create mode 100644 src/libsds/sds/bpt_cow/bpt_cow.h create mode 100644 src/libsds/sds/bpt_cow/delete.c create mode 100644 src/libsds/sds/bpt_cow/insert.c create mode 100644 src/libsds/sds/bpt_cow/node.c create mode 100644 src/libsds/sds/bpt_cow/search.c create mode 100644 src/libsds/sds/bpt_cow/txn.c create mode 100644 src/libsds/sds/bpt_cow/verify.c create mode 100644 src/libsds/sds/core/crc32c.c create mode 100644 src/libsds/sds/core/utils.c create mode 100644 src/libsds/sds/ht/ht.c create mode 100644 src/libsds/sds/ht/ht.h create mode 100644 src/libsds/sds/ht/map.c create mode 100644 src/libsds/sds/ht/node.c create mode 100644 src/libsds/sds/ht/op.c create mode 100644 src/libsds/sds/ht/verify.c create mode 100644 src/libsds/sds/lib.rs create mode 100644 src/libsds/sds/queue/lqueue.c create mode 100644 src/libsds/sds/queue/queue.c create mode 100644 src/libsds/sds/queue/queue.h create mode 100644 src/libsds/sds/queue/tqueue.c create mode 100644 src/libsds/sds/sds_internal.h create mode 100644 src/libsds/sds/tqueue.rs create mode 100644 src/libsds/test/benchmark.c create mode 100644 src/libsds/test/benchmark.h create mode 100644 src/libsds/test/benchmark_par.c create mode 100644 src/libsds/test/benchmark_par.h create mode 100644 src/libsds/test/benchmark_parwrap.c create mode 100644 src/libsds/test/test_fixtures.c create mode 100644 src/libsds/test/test_sds.c create mode 100644 src/libsds/test/test_sds.h create mode 100644 src/libsds/test/test_sds_bpt.c create mode 100644 src/libsds/test/test_sds_cow.c create mode 100644 src/libsds/test/test_sds_csiphash.c create mode 100644 src/libsds/test/test_sds_ht.c create mode 100644 src/libsds/test/test_sds_lqueue.c create mode 100644 src/libsds/test/test_sds_queue.c create mode 100644 src/libsds/test/test_sds_set.c create mode 100644 src/libsds/test/test_sds_tqueue.c create mode 100644 src/pkgconfig/dirsrv.pc.in create mode 100644 src/pkgconfig/libsds.pc.in create mode 100644 src/pkgconfig/svrcore.pc.in create mode 100644 src/slapd/Cargo.toml create mode 100644 src/slapd/src/error.rs create mode 100644 src/slapd/src/fernet.rs create mode 100644 src/slapd/src/lib.rs create mode 100644 src/svrcore/AUTHORS create mode 100644 src/svrcore/COPYING create mode 100644 src/svrcore/ChangeLog create mode 100644 src/svrcore/INSTALL create mode 100644 src/svrcore/INSTALL.win create mode 100644 src/svrcore/LICENSE create mode 100644 src/svrcore/Makefile.am create mode 100644 src/svrcore/NEWS create mode 100644 src/svrcore/README create mode 100644 src/svrcore/TODO create mode 100755 src/svrcore/autogen.sh create mode 100644 src/svrcore/configure.ac create mode 100644 src/svrcore/examples/svrcore_driver.c create mode 100644 src/svrcore/m4/nspr.m4 create mode 100644 src/svrcore/m4/nss.m4 create mode 100644 src/svrcore/m4/systemd.m4 create mode 100644 src/svrcore/src/Makefile.am create mode 100644 src/svrcore/src/Makefile.win create mode 100644 src/svrcore/src/alt.c create mode 100644 src/svrcore/src/cache.c create mode 100644 src/svrcore/src/errors.c create mode 100644 src/svrcore/src/file.c create mode 100644 src/svrcore/src/key.ico create mode 100644 src/svrcore/src/logo.ico create mode 100644 src/svrcore/src/manifest.mn create mode 100644 src/svrcore/src/ntgetpin.c create mode 100644 src/svrcore/src/ntgetpin.rc create mode 100644 src/svrcore/src/ntresource.h create mode 100644 src/svrcore/src/pin.c create mode 100644 src/svrcore/src/pk11.c create mode 100644 src/svrcore/src/std-systemd.c create mode 100644 src/svrcore/src/std.c create mode 100644 src/svrcore/src/svrcore.h create mode 100644 src/svrcore/src/systemd-ask-pass.c create mode 100644 src/svrcore/src/user.c create mode 100644 test/libslapd/counters/atomic.c create mode 100644 test/libslapd/operation/v3_compat.c create mode 100644 test/libslapd/pblock/analytics.c create mode 100644 test/libslapd/pblock/pblock_accessors.txt create mode 100644 test/libslapd/pblock/pblock_accessors_freq.txt create mode 100644 test/libslapd/pblock/v3_compat.c create mode 100644 test/libslapd/schema/filter_validate.c create mode 100644 test/libslapd/spal/meminfo.c create mode 100644 test/libslapd/test.c create mode 100644 test/main.c create mode 100644 test/pblock_analyse.py create mode 100644 test/plugins/pwdstorage/pbkdf2.c create mode 100644 test/plugins/test.c create mode 100644 test/test_slapd.h create mode 100755 wrappers/cl-dump.in create mode 100644 wrappers/ds_systemd_ask_password_acl.in create mode 100644 wrappers/initscript.in create mode 100644 wrappers/ldap-agent-initscript.in create mode 100755 wrappers/repl-monitor.in create mode 100644 wrappers/systemd-snmp.service.in create mode 100644 wrappers/systemd.group.in create mode 100644 wrappers/systemd.template.service.custom.conf.in create mode 100644 wrappers/systemd.template.service.in create mode 100644 wrappers/systemd.template.service.xsan.conf.in diff --git a/.cargo/config.in b/.cargo/config.in new file mode 100644 index 0000000..d7d8ff4 --- /dev/null +++ b/.cargo/config.in @@ -0,0 +1,6 @@ +[source.crates-io] +registry = "https://github.com/rust-lang/crates.io-index" +@rust_vendor_sources@ + +[source.vendored-sources] +directory = "./vendor" diff --git a/.clang-format b/.clang-format new file mode 100644 index 0000000..ca89d76 --- /dev/null +++ b/.clang-format @@ -0,0 +1,49 @@ +--- +# BasedOnStyle: Mozilla +AccessModifierOffset: 0 +# ConstructorInitializerIndentWidth: 4 +# AlignEscapedNewlinesLeft: true +# AlignTrailingComments: true +AllowAllParametersOfDeclarationOnNextLine: false +AllowShortIfStatementsOnASingleLine: false +AllowShortLoopsOnASingleLine: false +# AlwaysBreakTemplateDeclarations: false +# AlwaysBreakBeforeMultilineStrings: false +BreakBeforeBinaryOperators: false +BreakBeforeTernaryOperators: true +BreakConstructorInitializersBeforeComma: false +BinPackParameters: false +ColumnLimit: 0 +ConstructorInitializerAllOnOneLineOrOnePerLine: false +# DerivePointerBinding: true +# ExperimentalAutoDetectBinPacking: false +# IndentCaseLabels: true +MaxEmptyLinesToKeep: 2 +# NamespaceIndentation: None +# ObjCSpaceBeforeProtocolList: false +# PenaltyBreakBeforeFirstCallParameter: 19 +# PenaltyBreakComment: 60 +# PenaltyBreakString: 1000 +# PenaltyBreakFirstLessLess: 120 +# PenaltyExcessCharacter: 1000000 +# PenaltyReturnTypeOnItsOwnLine: 200 +# PointerBindsToType: true +SpacesBeforeTrailingComments: 2 +# Cpp11BracedListStyle: false +Standard: Cpp03 +IndentWidth: 4 +TabWidth: 4 +UseTab: Never +SpaceBeforeAssignmentOperators: true +BreakBeforeBraces: Mozilla +IndentFunctionDeclarationAfterType: false +SpacesInParentheses: false +SpacesInAngles: false +SpaceInEmptyParentheses: false +SpacesInCStyleCastParentheses: false +SpaceAfterControlStatementKeyword: true +ContinuationIndentWidth: 4 +SortIncludes: false +AlwaysBreakAfterReturnType: TopLevelDefinitions +... + diff --git a/.cvsignore b/.cvsignore new file mode 100644 index 0000000..e50a4a3 --- /dev/null +++ b/.cvsignore @@ -0,0 +1,5 @@ +Linux +built +modules.mk +pumpkin.dat +.cvsignore diff --git a/.dockerignore b/.dockerignore new file mode 100644 index 0000000..1905eb5 --- /dev/null +++ b/.dockerignore @@ -0,0 +1,3 @@ +.git +.gitignore +./src/cockpit/389-console/node_modules diff --git a/.gitignore b/.gitignore new file mode 100644 index 0000000..7a92af0 --- /dev/null +++ b/.gitignore @@ -0,0 +1,236 @@ +autom4te.cache +m4/libtool.m4 +m4/ltoptions.m4 +m4/ltsugar.m4 +m4/ltversion.m4 +m4/lt~obsolete.m4 +Makefile.in +aclocal.m4 +ar-lib +compile +config.guess +config.h.in +config.h.in~ +config.sub +configure +depcomp +install-sh +ltmain.sh +missing +Makefile +config.h +config.log +config.status +libtool +stamp-h1 +*~ +*.patch +.DS_Store +.autotools +.cproject +.project +.settings +.cache +*.a +*.rsa +*.dirstamp +*.la +*.lo +*.o +*.rso +*.pyc +*.rej +__pycache__ +.libs +.deps +rpmbuild +rpm/389-ds-base.spec +Makefile +config.h +config.log +config.status +dberrstrs.h +dbscan +dirsrv.pc +dsktune +infadd +ldap-agent +ldclt +ldif +libtool +makstrdb +migratecred +mmldif +ns-slapd +ns-slapd.properties +pwdhash +rsearch +stamp-h1 +benchmark_par_sds +benchmark_sds +doxyfile.stamp +tags +test-driver +test-suite.log +test_libsds +test_libsds.log +test_libsds.trs +test_nuncstans +test_nuncstans.log +test_nuncstans.trs +test_nuncstans_stress_large +test_nuncstans_stress_small +test_nuncstans_stress_small.log +test_nuncstans_stress_small.trs +test_slapd +test_slapd.log +test_slapd.trs +ldap/admin/src/dirsrv +ldap/admin/src/defaults.inf +ldap/admin/src/scripts/80upgradednformat.pl +ldap/admin/src/scripts/DSCreate.pm +ldap/admin/src/scripts/DSMigration.pm +ldap/admin/src/scripts/DSSharedLib +ldap/admin/src/scripts/DSUpdate.pm +ldap/admin/src/scripts/DSUtil.pm +ldap/admin/src/scripts/DialogManager.pm +ldap/admin/src/scripts/Migration.pm +ldap/admin/src/scripts/Setup.pm +ldap/admin/src/scripts/SetupDialogs.pm +ldap/admin/src/scripts/bak2db +ldap/admin/src/scripts/bak2db.pl +ldap/admin/src/scripts/cleanallruv.pl +ldap/admin/src/scripts/db2bak +ldap/admin/src/scripts/db2bak.pl +ldap/admin/src/scripts/db2index +ldap/admin/src/scripts/db2index.pl +ldap/admin/src/scripts/db2ldif +ldap/admin/src/scripts/db2ldif.pl +ldap/admin/src/scripts/dbverify +ldap/admin/src/scripts/dn2rdn +ldap/admin/src/scripts/dscreate.map +ldap/admin/src/scripts/dsorgentries.map +ldap/admin/src/scripts/dsupdate.map +ldap/admin/src/scripts/fixup-linkedattrs.pl +ldap/admin/src/scripts/fixup-memberof.pl +ldap/admin/src/scripts/ldif2db +ldap/admin/src/scripts/ldif2db.pl +ldap/admin/src/scripts/ldif2ldap +ldap/admin/src/scripts/migrate-ds.pl +ldap/admin/src/scripts/monitor +ldap/admin/src/scripts/ns-accountstatus.pl +ldap/admin/src/scripts/ns-activate.pl +ldap/admin/src/scripts/ns-inactivate.pl +ldap/admin/src/scripts/ns-newpwpolicy.pl +ldap/admin/src/scripts/remove-ds.pl +ldap/admin/src/scripts/repl-monitor.pl +ldap/admin/src/scripts/restart-dirsrv +ldap/admin/src/scripts/restoreconfig +ldap/admin/src/scripts/saveconfig +ldap/admin/src/scripts/schema-reload.pl +ldap/admin/src/scripts/setup-ds.pl +ldap/admin/src/scripts/setup-ds.res +ldap/admin/src/scripts/start-dirsrv +ldap/admin/src/scripts/stop-dirsrv +ldap/admin/src/scripts/suffix2instance +ldap/admin/src/scripts/syntax-validate.pl +ldap/admin/src/scripts/template-bak2db +ldap/admin/src/scripts/template-bak2db.pl +ldap/admin/src/scripts/template-cleanallruv.pl +ldap/admin/src/scripts/template-db2bak +ldap/admin/src/scripts/template-db2bak.pl +ldap/admin/src/scripts/template-db2index +ldap/admin/src/scripts/template-db2index.pl +ldap/admin/src/scripts/template-db2ldif +ldap/admin/src/scripts/template-db2ldif.pl +ldap/admin/src/scripts/template-dbverify +ldap/admin/src/scripts/template-dn2rdn +ldap/admin/src/scripts/template-fixup-linkedattrs.pl +ldap/admin/src/scripts/template-fixup-memberof.pl +ldap/admin/src/scripts/template-fixup-memberuid.pl +ldap/admin/src/scripts/template-ldif2db +ldap/admin/src/scripts/template-ldif2db.pl +ldap/admin/src/scripts/template-ldif2ldap +ldap/admin/src/scripts/template-monitor +ldap/admin/src/scripts/template-ns-accountstatus.pl +ldap/admin/src/scripts/template-ns-activate.pl +ldap/admin/src/scripts/template-ns-inactivate.pl +ldap/admin/src/scripts/template-ns-newpwpolicy.pl +ldap/admin/src/scripts/template-restart-slapd +ldap/admin/src/scripts/template-restoreconfig +ldap/admin/src/scripts/template-saveconfig +ldap/admin/src/scripts/template-schema-reload.pl +ldap/admin/src/scripts/template-start-slapd +ldap/admin/src/scripts/template-stop-slapd +ldap/admin/src/scripts/template-suffix2instance +ldap/admin/src/scripts/template-syntax-validate.pl +ldap/admin/src/scripts/template-upgradednformat +ldap/admin/src/scripts/template-usn-tombstone-cleanup.pl +ldap/admin/src/scripts/template-verify-db.pl +ldap/admin/src/scripts/template-vlvindex +ldap/admin/src/scripts/upgradedb +ldap/admin/src/scripts/upgradednformat +ldap/admin/src/scripts/usn-tombstone-cleanup.pl +ldap/admin/src/scripts/verify-db.pl +ldap/admin/src/scripts/vlvindex +ldap/admin/src/scripts/91reindex.pl +ldap/admin/src/scripts/dbmon.sh +ldap/admin/src/scripts/ds_selinux_enabled +ldap/admin/src/scripts/ds_selinux_port_query +ldap/admin/src/scripts/readnsstate +ldap/admin/src/scripts/status-dirsrv +ldap/admin/src/slapd.inf +ldap/admin/src/template-initconfig +ldap/ldif/template-baseacis.ldif +ldap/ldif/template-bitwise.ldif +ldap/ldif/template-country.ldif +ldap/ldif/template-dnaplugin.ldif +ldap/ldif/template-domain.ldif +ldap/ldif/template-dse.ldif +ldap/ldif/template-ldapi-autobind.ldif +ldap/ldif/template-ldapi-default.ldif +ldap/ldif/template-ldapi.ldif +ldap/ldif/template-locality.ldif +ldap/ldif/template-org.ldif +ldap/ldif/template-orgunit.ldif +ldap/ldif/template-pampta.ldif +ldap/ldif/template-sasl.ldif +ldap/ldif/template-state.ldif +ldap/ldif/template-suffix-db.ldif +ldap/ldif/template-dse-minimal.ldif +ldap/servers/slapd/tools/rsearch/scripts/dbgen.pl +ldap/servers/snmp/ldap-agent.conf +src/pkgconfig/libsds.pc +src/pkgconfig/nunc-stans.pc +src/pkgconfig/svrcore.pc +wrappers/cl-dump +wrappers/dbscan +wrappers/dirsrv +wrappers/dirsrv-snmp +wrappers/dsktune +wrappers/infadd +wrappers/ldap-agent +wrappers/ldclt +wrappers/ldif +wrappers/migratecred +wrappers/mmldif +wrappers/pwdhash +wrappers/repl-monitor +wrappers/rsearch +wrappers/ds_systemd_ask_password_acl +docs/slapi.doxy +man/man3/ +html/ +.pytest_cache/ +src/lib389/dist/ +src/lib389/man/ +src/libsds/target/ +src/librslapd/target/ +dist +venv +.idea +src/cockpit/389-console/cockpit_dist/ +src/cockpit/389-console/node_modules/ +ldap/servers/slapd/rust-slapi-private.h +vendor +vendor.tar.gz diff --git a/LICENSE b/LICENSE new file mode 100644 index 0000000..395a348 --- /dev/null +++ b/LICENSE @@ -0,0 +1,28 @@ +Copyright (C) 2015 Red Hat +See files 'LICENSE.GPLv3+', 'LICENSE.openssl', and 'LICENSE.mit' for +more information. + +This program is free software; you can redistribute it and/or modify +it under the terms of the GNU General Public License as published by +the Free Software Foundation, either version 3 of the License, or +(at your option) any later version. + +This program is distributed in the hope that it will be useful, +but WITHOUT ANY WARRANTY; without even the implied warranty of +MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +GNU General Public License for more details. + +You should have received a copy of the GNU General Public License +along with this program. If not, see . + +Additional permission under GPLv3 section 7: + +If you modify this Program, or any covered work, by linking or +combining it with OpenSSL, or a modified version of OpenSSL licensed +under the OpenSSL license +(https://www.openssl.org/source/license.html), the licensors of this +Program grant you additional permission to convey the resulting +work. Corresponding Source for a non-source form of such a +combination shall include the source code for the parts that are +licensed under the OpenSSL license as well as that of the covered +work. diff --git a/LICENSE.GPLv3+ b/LICENSE.GPLv3+ new file mode 100644 index 0000000..94a9ed0 --- /dev/null +++ b/LICENSE.GPLv3+ @@ -0,0 +1,674 @@ + GNU GENERAL PUBLIC LICENSE + Version 3, 29 June 2007 + + Copyright (C) 2007 Free Software Foundation, Inc. + Everyone is permitted to copy and distribute verbatim copies + of this license document, but changing it is not allowed. + + Preamble + + The GNU General Public License is a free, copyleft license for +software and other kinds of works. + + The licenses for most software and other practical works are designed +to take away your freedom to share and change the works. By contrast, +the GNU General Public License is intended to guarantee your freedom to +share and change all versions of a program--to make sure it remains free +software for all its users. We, the Free Software Foundation, use the +GNU General Public License for most of our software; it applies also to +any other work released this way by its authors. You can apply it to +your programs, too. + + When we speak of free software, we are referring to freedom, not +price. Our General Public Licenses are designed to make sure that you +have the freedom to distribute copies of free software (and charge for +them if you wish), that you receive source code or can get it if you +want it, that you can change the software or use pieces of it in new +free programs, and that you know you can do these things. + + To protect your rights, we need to prevent others from denying you +these rights or asking you to surrender the rights. Therefore, you have +certain responsibilities if you distribute copies of the software, or if +you modify it: responsibilities to respect the freedom of others. + + For example, if you distribute copies of such a program, whether +gratis or for a fee, you must pass on to the recipients the same +freedoms that you received. You must make sure that they, too, receive +or can get the source code. And you must show them these terms so they +know their rights. + + Developers that use the GNU GPL protect your rights with two steps: +(1) assert copyright on the software, and (2) offer you this License +giving you legal permission to copy, distribute and/or modify it. + + For the developers' and authors' protection, the GPL clearly explains +that there is no warranty for this free software. For both users' and +authors' sake, the GPL requires that modified versions be marked as +changed, so that their problems will not be attributed erroneously to +authors of previous versions. + + Some devices are designed to deny users access to install or run +modified versions of the software inside them, although the manufacturer +can do so. This is fundamentally incompatible with the aim of +protecting users' freedom to change the software. The systematic +pattern of such abuse occurs in the area of products for individuals to +use, which is precisely where it is most unacceptable. Therefore, we +have designed this version of the GPL to prohibit the practice for those +products. If such problems arise substantially in other domains, we +stand ready to extend this provision to those domains in future versions +of the GPL, as needed to protect the freedom of users. + + Finally, every program is threatened constantly by software patents. +States should not allow patents to restrict development and use of +software on general-purpose computers, but in those that do, we wish to +avoid the special danger that patents applied to a free program could +make it effectively proprietary. To prevent this, the GPL assures that +patents cannot be used to render the program non-free. + + The precise terms and conditions for copying, distribution and +modification follow. + + TERMS AND CONDITIONS + + 0. Definitions. + + "This License" refers to version 3 of the GNU General Public License. + + "Copyright" also means copyright-like laws that apply to other kinds of +works, such as semiconductor masks. + + "The Program" refers to any copyrightable work licensed under this +License. Each licensee is addressed as "you". "Licensees" and +"recipients" may be individuals or organizations. + + To "modify" a work means to copy from or adapt all or part of the work +in a fashion requiring copyright permission, other than the making of an +exact copy. The resulting work is called a "modified version" of the +earlier work or a work "based on" the earlier work. + + A "covered work" means either the unmodified Program or a work based +on the Program. + + To "propagate" a work means to do anything with it that, without +permission, would make you directly or secondarily liable for +infringement under applicable copyright law, except executing it on a +computer or modifying a private copy. Propagation includes copying, +distribution (with or without modification), making available to the +public, and in some countries other activities as well. + + To "convey" a work means any kind of propagation that enables other +parties to make or receive copies. Mere interaction with a user through +a computer network, with no transfer of a copy, is not conveying. + + An interactive user interface displays "Appropriate Legal Notices" +to the extent that it includes a convenient and prominently visible +feature that (1) displays an appropriate copyright notice, and (2) +tells the user that there is no warranty for the work (except to the +extent that warranties are provided), that licensees may convey the +work under this License, and how to view a copy of this License. If +the interface presents a list of user commands or options, such as a +menu, a prominent item in the list meets this criterion. + + 1. Source Code. + + The "source code" for a work means the preferred form of the work +for making modifications to it. "Object code" means any non-source +form of a work. + + A "Standard Interface" means an interface that either is an official +standard defined by a recognized standards body, or, in the case of +interfaces specified for a particular programming language, one that +is widely used among developers working in that language. + + The "System Libraries" of an executable work include anything, other +than the work as a whole, that (a) is included in the normal form of +packaging a Major Component, but which is not part of that Major +Component, and (b) serves only to enable use of the work with that +Major Component, or to implement a Standard Interface for which an +implementation is available to the public in source code form. A +"Major Component", in this context, means a major essential component +(kernel, window system, and so on) of the specific operating system +(if any) on which the executable work runs, or a compiler used to +produce the work, or an object code interpreter used to run it. + + The "Corresponding Source" for a work in object code form means all +the source code needed to generate, install, and (for an executable +work) run the object code and to modify the work, including scripts to +control those activities. However, it does not include the work's +System Libraries, or general-purpose tools or generally available free +programs which are used unmodified in performing those activities but +which are not part of the work. For example, Corresponding Source +includes interface definition files associated with source files for +the work, and the source code for shared libraries and dynamically +linked subprograms that the work is specifically designed to require, +such as by intimate data communication or control flow between those +subprograms and other parts of the work. + + The Corresponding Source need not include anything that users +can regenerate automatically from other parts of the Corresponding +Source. + + The Corresponding Source for a work in source code form is that +same work. + + 2. Basic Permissions. + + All rights granted under this License are granted for the term of +copyright on the Program, and are irrevocable provided the stated +conditions are met. This License explicitly affirms your unlimited +permission to run the unmodified Program. The output from running a +covered work is covered by this License only if the output, given its +content, constitutes a covered work. This License acknowledges your +rights of fair use or other equivalent, as provided by copyright law. + + You may make, run and propagate covered works that you do not +convey, without conditions so long as your license otherwise remains +in force. You may convey covered works to others for the sole purpose +of having them make modifications exclusively for you, or provide you +with facilities for running those works, provided that you comply with +the terms of this License in conveying all material for which you do +not control copyright. Those thus making or running the covered works +for you must do so exclusively on your behalf, under your direction +and control, on terms that prohibit them from making any copies of +your copyrighted material outside their relationship with you. + + Conveying under any other circumstances is permitted solely under +the conditions stated below. Sublicensing is not allowed; section 10 +makes it unnecessary. + + 3. Protecting Users' Legal Rights From Anti-Circumvention Law. + + No covered work shall be deemed part of an effective technological +measure under any applicable law fulfilling obligations under article +11 of the WIPO copyright treaty adopted on 20 December 1996, or +similar laws prohibiting or restricting circumvention of such +measures. + + When you convey a covered work, you waive any legal power to forbid +circumvention of technological measures to the extent such circumvention +is effected by exercising rights under this License with respect to +the covered work, and you disclaim any intention to limit operation or +modification of the work as a means of enforcing, against the work's +users, your or third parties' legal rights to forbid circumvention of +technological measures. + + 4. Conveying Verbatim Copies. + + You may convey verbatim copies of the Program's source code as you +receive it, in any medium, provided that you conspicuously and +appropriately publish on each copy an appropriate copyright notice; +keep intact all notices stating that this License and any +non-permissive terms added in accord with section 7 apply to the code; +keep intact all notices of the absence of any warranty; and give all +recipients a copy of this License along with the Program. + + You may charge any price or no price for each copy that you convey, +and you may offer support or warranty protection for a fee. + + 5. Conveying Modified Source Versions. + + You may convey a work based on the Program, or the modifications to +produce it from the Program, in the form of source code under the +terms of section 4, provided that you also meet all of these conditions: + + a) The work must carry prominent notices stating that you modified + it, and giving a relevant date. + + b) The work must carry prominent notices stating that it is + released under this License and any conditions added under section + 7. This requirement modifies the requirement in section 4 to + "keep intact all notices". + + c) You must license the entire work, as a whole, under this + License to anyone who comes into possession of a copy. This + License will therefore apply, along with any applicable section 7 + additional terms, to the whole of the work, and all its parts, + regardless of how they are packaged. This License gives no + permission to license the work in any other way, but it does not + invalidate such permission if you have separately received it. + + d) If the work has interactive user interfaces, each must display + Appropriate Legal Notices; however, if the Program has interactive + interfaces that do not display Appropriate Legal Notices, your + work need not make them do so. + + A compilation of a covered work with other separate and independent +works, which are not by their nature extensions of the covered work, +and which are not combined with it such as to form a larger program, +in or on a volume of a storage or distribution medium, is called an +"aggregate" if the compilation and its resulting copyright are not +used to limit the access or legal rights of the compilation's users +beyond what the individual works permit. Inclusion of a covered work +in an aggregate does not cause this License to apply to the other +parts of the aggregate. + + 6. Conveying Non-Source Forms. + + You may convey a covered work in object code form under the terms +of sections 4 and 5, provided that you also convey the +machine-readable Corresponding Source under the terms of this License, +in one of these ways: + + a) Convey the object code in, or embodied in, a physical product + (including a physical distribution medium), accompanied by the + Corresponding Source fixed on a durable physical medium + customarily used for software interchange. + + b) Convey the object code in, or embodied in, a physical product + (including a physical distribution medium), accompanied by a + written offer, valid for at least three years and valid for as + long as you offer spare parts or customer support for that product + model, to give anyone who possesses the object code either (1) a + copy of the Corresponding Source for all the software in the + product that is covered by this License, on a durable physical + medium customarily used for software interchange, for a price no + more than your reasonable cost of physically performing this + conveying of source, or (2) access to copy the + Corresponding Source from a network server at no charge. + + c) Convey individual copies of the object code with a copy of the + written offer to provide the Corresponding Source. This + alternative is allowed only occasionally and noncommercially, and + only if you received the object code with such an offer, in accord + with subsection 6b. + + d) Convey the object code by offering access from a designated + place (gratis or for a charge), and offer equivalent access to the + Corresponding Source in the same way through the same place at no + further charge. You need not require recipients to copy the + Corresponding Source along with the object code. If the place to + copy the object code is a network server, the Corresponding Source + may be on a different server (operated by you or a third party) + that supports equivalent copying facilities, provided you maintain + clear directions next to the object code saying where to find the + Corresponding Source. Regardless of what server hosts the + Corresponding Source, you remain obligated to ensure that it is + available for as long as needed to satisfy these requirements. + + e) Convey the object code using peer-to-peer transmission, provided + you inform other peers where the object code and Corresponding + Source of the work are being offered to the general public at no + charge under subsection 6d. + + A separable portion of the object code, whose source code is excluded +from the Corresponding Source as a System Library, need not be +included in conveying the object code work. + + A "User Product" is either (1) a "consumer product", which means any +tangible personal property which is normally used for personal, family, +or household purposes, or (2) anything designed or sold for incorporation +into a dwelling. In determining whether a product is a consumer product, +doubtful cases shall be resolved in favor of coverage. For a particular +product received by a particular user, "normally used" refers to a +typical or common use of that class of product, regardless of the status +of the particular user or of the way in which the particular user +actually uses, or expects or is expected to use, the product. A product +is a consumer product regardless of whether the product has substantial +commercial, industrial or non-consumer uses, unless such uses represent +the only significant mode of use of the product. + + "Installation Information" for a User Product means any methods, +procedures, authorization keys, or other information required to install +and execute modified versions of a covered work in that User Product from +a modified version of its Corresponding Source. The information must +suffice to ensure that the continued functioning of the modified object +code is in no case prevented or interfered with solely because +modification has been made. + + If you convey an object code work under this section in, or with, or +specifically for use in, a User Product, and the conveying occurs as +part of a transaction in which the right of possession and use of the +User Product is transferred to the recipient in perpetuity or for a +fixed term (regardless of how the transaction is characterized), the +Corresponding Source conveyed under this section must be accompanied +by the Installation Information. But this requirement does not apply +if neither you nor any third party retains the ability to install +modified object code on the User Product (for example, the work has +been installed in ROM). + + The requirement to provide Installation Information does not include a +requirement to continue to provide support service, warranty, or updates +for a work that has been modified or installed by the recipient, or for +the User Product in which it has been modified or installed. Access to a +network may be denied when the modification itself materially and +adversely affects the operation of the network or violates the rules and +protocols for communication across the network. + + Corresponding Source conveyed, and Installation Information provided, +in accord with this section must be in a format that is publicly +documented (and with an implementation available to the public in +source code form), and must require no special password or key for +unpacking, reading or copying. + + 7. Additional Terms. + + "Additional permissions" are terms that supplement the terms of this +License by making exceptions from one or more of its conditions. +Additional permissions that are applicable to the entire Program shall +be treated as though they were included in this License, to the extent +that they are valid under applicable law. If additional permissions +apply only to part of the Program, that part may be used separately +under those permissions, but the entire Program remains governed by +this License without regard to the additional permissions. + + When you convey a copy of a covered work, you may at your option +remove any additional permissions from that copy, or from any part of +it. (Additional permissions may be written to require their own +removal in certain cases when you modify the work.) You may place +additional permissions on material, added by you to a covered work, +for which you have or can give appropriate copyright permission. + + Notwithstanding any other provision of this License, for material you +add to a covered work, you may (if authorized by the copyright holders of +that material) supplement the terms of this License with terms: + + a) Disclaiming warranty or limiting liability differently from the + terms of sections 15 and 16 of this License; or + + b) Requiring preservation of specified reasonable legal notices or + author attributions in that material or in the Appropriate Legal + Notices displayed by works containing it; or + + c) Prohibiting misrepresentation of the origin of that material, or + requiring that modified versions of such material be marked in + reasonable ways as different from the original version; or + + d) Limiting the use for publicity purposes of names of licensors or + authors of the material; or + + e) Declining to grant rights under trademark law for use of some + trade names, trademarks, or service marks; or + + f) Requiring indemnification of licensors and authors of that + material by anyone who conveys the material (or modified versions of + it) with contractual assumptions of liability to the recipient, for + any liability that these contractual assumptions directly impose on + those licensors and authors. + + All other non-permissive additional terms are considered "further +restrictions" within the meaning of section 10. If the Program as you +received it, or any part of it, contains a notice stating that it is +governed by this License along with a term that is a further +restriction, you may remove that term. If a license document contains +a further restriction but permits relicensing or conveying under this +License, you may add to a covered work material governed by the terms +of that license document, provided that the further restriction does +not survive such relicensing or conveying. + + If you add terms to a covered work in accord with this section, you +must place, in the relevant source files, a statement of the +additional terms that apply to those files, or a notice indicating +where to find the applicable terms. + + Additional terms, permissive or non-permissive, may be stated in the +form of a separately written license, or stated as exceptions; +the above requirements apply either way. + + 8. Termination. + + You may not propagate or modify a covered work except as expressly +provided under this License. Any attempt otherwise to propagate or +modify it is void, and will automatically terminate your rights under +this License (including any patent licenses granted under the third +paragraph of section 11). + + However, if you cease all violation of this License, then your +license from a particular copyright holder is reinstated (a) +provisionally, unless and until the copyright holder explicitly and +finally terminates your license, and (b) permanently, if the copyright +holder fails to notify you of the violation by some reasonable means +prior to 60 days after the cessation. + + Moreover, your license from a particular copyright holder is +reinstated permanently if the copyright holder notifies you of the +violation by some reasonable means, this is the first time you have +received notice of violation of this License (for any work) from that +copyright holder, and you cure the violation prior to 30 days after +your receipt of the notice. + + Termination of your rights under this section does not terminate the +licenses of parties who have received copies or rights from you under +this License. If your rights have been terminated and not permanently +reinstated, you do not qualify to receive new licenses for the same +material under section 10. + + 9. Acceptance Not Required for Having Copies. + + You are not required to accept this License in order to receive or +run a copy of the Program. Ancillary propagation of a covered work +occurring solely as a consequence of using peer-to-peer transmission +to receive a copy likewise does not require acceptance. However, +nothing other than this License grants you permission to propagate or +modify any covered work. These actions infringe copyright if you do +not accept this License. Therefore, by modifying or propagating a +covered work, you indicate your acceptance of this License to do so. + + 10. Automatic Licensing of Downstream Recipients. + + Each time you convey a covered work, the recipient automatically +receives a license from the original licensors, to run, modify and +propagate that work, subject to this License. You are not responsible +for enforcing compliance by third parties with this License. + + An "entity transaction" is a transaction transferring control of an +organization, or substantially all assets of one, or subdividing an +organization, or merging organizations. If propagation of a covered +work results from an entity transaction, each party to that +transaction who receives a copy of the work also receives whatever +licenses to the work the party's predecessor in interest had or could +give under the previous paragraph, plus a right to possession of the +Corresponding Source of the work from the predecessor in interest, if +the predecessor has it or can get it with reasonable efforts. + + You may not impose any further restrictions on the exercise of the +rights granted or affirmed under this License. For example, you may +not impose a license fee, royalty, or other charge for exercise of +rights granted under this License, and you may not initiate litigation +(including a cross-claim or counterclaim in a lawsuit) alleging that +any patent claim is infringed by making, using, selling, offering for +sale, or importing the Program or any portion of it. + + 11. Patents. + + A "contributor" is a copyright holder who authorizes use under this +License of the Program or a work on which the Program is based. The +work thus licensed is called the contributor's "contributor version". + + A contributor's "essential patent claims" are all patent claims +owned or controlled by the contributor, whether already acquired or +hereafter acquired, that would be infringed by some manner, permitted +by this License, of making, using, or selling its contributor version, +but do not include claims that would be infringed only as a +consequence of further modification of the contributor version. For +purposes of this definition, "control" includes the right to grant +patent sublicenses in a manner consistent with the requirements of +this License. + + Each contributor grants you a non-exclusive, worldwide, royalty-free +patent license under the contributor's essential patent claims, to +make, use, sell, offer for sale, import and otherwise run, modify and +propagate the contents of its contributor version. + + In the following three paragraphs, a "patent license" is any express +agreement or commitment, however denominated, not to enforce a patent +(such as an express permission to practice a patent or covenant not to +sue for patent infringement). To "grant" such a patent license to a +party means to make such an agreement or commitment not to enforce a +patent against the party. + + If you convey a covered work, knowingly relying on a patent license, +and the Corresponding Source of the work is not available for anyone +to copy, free of charge and under the terms of this License, through a +publicly available network server or other readily accessible means, +then you must either (1) cause the Corresponding Source to be so +available, or (2) arrange to deprive yourself of the benefit of the +patent license for this particular work, or (3) arrange, in a manner +consistent with the requirements of this License, to extend the patent +license to downstream recipients. "Knowingly relying" means you have +actual knowledge that, but for the patent license, your conveying the +covered work in a country, or your recipient's use of the covered work +in a country, would infringe one or more identifiable patents in that +country that you have reason to believe are valid. + + If, pursuant to or in connection with a single transaction or +arrangement, you convey, or propagate by procuring conveyance of, a +covered work, and grant a patent license to some of the parties +receiving the covered work authorizing them to use, propagate, modify +or convey a specific copy of the covered work, then the patent license +you grant is automatically extended to all recipients of the covered +work and works based on it. + + A patent license is "discriminatory" if it does not include within +the scope of its coverage, prohibits the exercise of, or is +conditioned on the non-exercise of one or more of the rights that are +specifically granted under this License. You may not convey a covered +work if you are a party to an arrangement with a third party that is +in the business of distributing software, under which you make payment +to the third party based on the extent of your activity of conveying +the work, and under which the third party grants, to any of the +parties who would receive the covered work from you, a discriminatory +patent license (a) in connection with copies of the covered work +conveyed by you (or copies made from those copies), or (b) primarily +for and in connection with specific products or compilations that +contain the covered work, unless you entered into that arrangement, +or that patent license was granted, prior to 28 March 2007. + + Nothing in this License shall be construed as excluding or limiting +any implied license or other defenses to infringement that may +otherwise be available to you under applicable patent law. + + 12. No Surrender of Others' Freedom. + + If conditions are imposed on you (whether by court order, agreement or +otherwise) that contradict the conditions of this License, they do not +excuse you from the conditions of this License. If you cannot convey a +covered work so as to satisfy simultaneously your obligations under this +License and any other pertinent obligations, then as a consequence you may +not convey it at all. For example, if you agree to terms that obligate you +to collect a royalty for further conveying from those to whom you convey +the Program, the only way you could satisfy both those terms and this +License would be to refrain entirely from conveying the Program. + + 13. Use with the GNU Affero General Public License. + + Notwithstanding any other provision of this License, you have +permission to link or combine any covered work with a work licensed +under version 3 of the GNU Affero General Public License into a single +combined work, and to convey the resulting work. The terms of this +License will continue to apply to the part which is the covered work, +but the special requirements of the GNU Affero General Public License, +section 13, concerning interaction through a network will apply to the +combination as such. + + 14. Revised Versions of this License. + + The Free Software Foundation may publish revised and/or new versions of +the GNU General Public License from time to time. Such new versions will +be similar in spirit to the present version, but may differ in detail to +address new problems or concerns. + + Each version is given a distinguishing version number. If the +Program specifies that a certain numbered version of the GNU General +Public License "or any later version" applies to it, you have the +option of following the terms and conditions either of that numbered +version or of any later version published by the Free Software +Foundation. If the Program does not specify a version number of the +GNU General Public License, you may choose any version ever published +by the Free Software Foundation. + + If the Program specifies that a proxy can decide which future +versions of the GNU General Public License can be used, that proxy's +public statement of acceptance of a version permanently authorizes you +to choose that version for the Program. + + Later license versions may give you additional or different +permissions. However, no additional obligations are imposed on any +author or copyright holder as a result of your choosing to follow a +later version. + + 15. Disclaimer of Warranty. + + THERE IS NO WARRANTY FOR THE PROGRAM, TO THE EXTENT PERMITTED BY +APPLICABLE LAW. EXCEPT WHEN OTHERWISE STATED IN WRITING THE COPYRIGHT +HOLDERS AND/OR OTHER PARTIES PROVIDE THE PROGRAM "AS IS" WITHOUT WARRANTY +OF ANY KIND, EITHER EXPRESSED OR IMPLIED, INCLUDING, BUT NOT LIMITED TO, +THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR +PURPOSE. THE ENTIRE RISK AS TO THE QUALITY AND PERFORMANCE OF THE PROGRAM +IS WITH YOU. SHOULD THE PROGRAM PROVE DEFECTIVE, YOU ASSUME THE COST OF +ALL NECESSARY SERVICING, REPAIR OR CORRECTION. + + 16. Limitation of Liability. + + IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN WRITING +WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MODIFIES AND/OR CONVEYS +THE PROGRAM AS PERMITTED ABOVE, BE LIABLE TO YOU FOR DAMAGES, INCLUDING ANY +GENERAL, SPECIAL, INCIDENTAL OR CONSEQUENTIAL DAMAGES ARISING OUT OF THE +USE OR INABILITY TO USE THE PROGRAM (INCLUDING BUT NOT LIMITED TO LOSS OF +DATA OR DATA BEING RENDERED INACCURATE OR LOSSES SUSTAINED BY YOU OR THIRD +PARTIES OR A FAILURE OF THE PROGRAM TO OPERATE WITH ANY OTHER PROGRAMS), +EVEN IF SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE POSSIBILITY OF +SUCH DAMAGES. + + 17. Interpretation of Sections 15 and 16. + + If the disclaimer of warranty and limitation of liability provided +above cannot be given local legal effect according to their terms, +reviewing courts shall apply local law that most closely approximates +an absolute waiver of all civil liability in connection with the +Program, unless a warranty or assumption of liability accompanies a +copy of the Program in return for a fee. + + END OF TERMS AND CONDITIONS + + How to Apply These Terms to Your New Programs + + If you develop a new program, and you want it to be of the greatest +possible use to the public, the best way to achieve this is to make it +free software which everyone can redistribute and change under these terms. + + To do so, attach the following notices to the program. It is safest +to attach them to the start of each source file to most effectively +state the exclusion of warranty; and each file should have at least +the "copyright" line and a pointer to where the full notice is found. + + + Copyright (C) + + This program is free software: you can redistribute it and/or modify + it under the terms of the GNU General Public License as published by + the Free Software Foundation, either version 3 of the License, or + (at your option) any later version. + + This program is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + You should have received a copy of the GNU General Public License + along with this program. If not, see . + +Also add information on how to contact you by electronic and paper mail. + + If the program does terminal interaction, make it output a short +notice like this when it starts in an interactive mode: + + Copyright (C) + This program comes with ABSOLUTELY NO WARRANTY; for details type `show w'. + This is free software, and you are welcome to redistribute it + under certain conditions; type `show c' for details. + +The hypothetical commands `show w' and `show c' should show the appropriate +parts of the General Public License. Of course, your program's commands +might be different; for a GUI interface, you would use an "about box". + + You should also get your employer (if you work as a programmer) or school, +if any, to sign a "copyright disclaimer" for the program, if necessary. +For more information on this, and how to apply and follow the GNU GPL, see +. + + The GNU General Public License does not permit incorporating your program +into proprietary programs. If your program is a subroutine library, you +may consider it more useful to permit linking proprietary applications with +the library. If this is what you want to do, use the GNU Lesser General +Public License instead of this License. But first, please read +. diff --git a/LICENSE.mit b/LICENSE.mit new file mode 100644 index 0000000..9c78d66 --- /dev/null +++ b/LICENSE.mit @@ -0,0 +1,32 @@ +/* + Copyright (c) 2013 Marek Majkowski + + Permission is hereby granted, free of charge, to any person obtaining a copy + of this software and associated documentation files (the "Software"), to deal + in the Software without restriction, including without limitation the rights + to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + copies of the Software, and to permit persons to whom the Software is + furnished to do so, subject to the following conditions: + + The above copyright notice and this permission notice shall be included in + all copies or substantial portions of the Software. + + THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN + THE SOFTWARE. + + + Original location: + https://github.com/majek/csiphash/ + + Solution inspired by code from: + Samuel Neves (supercop/crypto_auth/siphash24/little) + djb (supercop/crypto_auth/siphash24/little2) + Jean-Philippe Aumasson (https://131002.net/siphash/siphash24.c) +*/ + + diff --git a/LICENSE.openssl b/LICENSE.openssl new file mode 100644 index 0000000..1625bce --- /dev/null +++ b/LICENSE.openssl @@ -0,0 +1,11 @@ +Additional permission under GPLv3 section 7: + +If you modify this Program, or any covered work, by linking or +combining it with OpenSSL, or a modified version of OpenSSL licensed +under the OpenSSL license +(https://www.openssl.org/source/license.html), the licensors of this +Program grant you additional permission to convey the resulting +work. Corresponding Source for a non-source form of such a +combination shall include the source code for the parts that are +licensed under the OpenSSL license as well as that of the covered +work. diff --git a/Makefile.am b/Makefile.am new file mode 100644 index 0000000..1e88a38 --- /dev/null +++ b/Makefile.am @@ -0,0 +1,2479 @@ +# look for included m4 files in the ./m4/ directory +ACLOCAL_AMFLAGS = -I m4 +NULLSTRING := +SPACE := $(NULLSTRING) # the space is between the ) and the # +COLON := $(NULLSTRING):# a colon +QUOTE := $(NULLSTRING)"# a double quote" + +#------------------------ +# Compiler Flags +#------------------------ +# +# First, we setup the definitions from configure.ac +# + +PYTHON := python3 +BUILDNUM := $(shell $(srcdir)/buildnum.py) +NQBUILDNUM := $(subst \,,$(subst $(QUOTE),,$(BUILDNUM))) +DEBUG_DEFINES = @debug_defs@ +DEBUG_CFLAGS = @debug_cflags@ +DEBUG_CXXFLAGS = @debug_cxxflags@ +GCCSEC_CFLAGS = @gccsec_cflags@ +ASAN_CFLAGS = @asan_cflags@ +MSAN_CFLAGS = @msan_cflags@ +TSAN_CFLAGS = @tsan_cflags@ +UBSAN_CFLAGS = @ubsan_cflags@ + +SYSTEMD_DEFINES = @systemd_defs@ + +CMOCKA_INCLUDES = $(CMOCKA_CFLAGS) + +PROFILING_DEFINES = @profiling_defs@ +SYSTEMTAP_DEFINES = @systemtap_defs@ +NSPR_INCLUDES = $(NSPR_CFLAGS) + +# Rust inclusions. +if RUST_ENABLE +# Rust enabled +RUST_ON = 1 +CARGO_FLAGS = @cargo_defs@ +RUSTC_FLAGS = @asan_rust_defs@ @msan_rust_defs@ @tsan_rust_defs@ @debug_rust_defs@ +RUST_LDFLAGS = -ldl -lpthread -lgcc_s -lc -lm -lrt -lutil +RUST_DEFINES = -DRUST_ENABLE +if RUST_ENABLE_OFFLINE +RUST_OFFLINE = --locked --offline +else +RUST_OFFLINE = +endif +else +# Rust disabled +RUST_ON = 0 +CARGO_FLAGS = +RUSTC_FLAGS = +RUST_LDFLAGS = +RUST_DEFINES = +endif + +if ENABLE_PERL +PERL_ON = 1 +else +PERL_ON = 0 +endif + +if ENABLE_LEGACY +LEGACY_ON = 1 +else +LEGACY_ON = 0 +endif + +if CLANG_ENABLE +CLANG_ON = 1 +CLANG_LDFLAGS = -latomic +else +CLANG_ON = 0 +CLANG_LDFLAGS = +endif + +# We can't add the lfds includes all the time as they have a "bomb" in them that +# prevents compilation on unsupported hardware arches. +if ATOMIC_QUEUE_OPERATIONS +SDS_INCLUDES = -I$(srcdir)/src/libsds/include/ -I$(srcdir)/src/libsds/external/ -I$(srcdir)/src/libsds/external/liblfds711/inc/ +else +SDS_INCLUDES = -I$(srcdir)/src/libsds/include/ -I$(srcdir)/src/libsds/external/ +endif + +SVRCORE_INCLUDES = -I$(srcdir)/src/svrcore/src/ + +# the -U undefines these symbols - should use the corresponding DS_ ones instead - see configure.ac +DS_DEFINES = -DBUILD_NUM=$(BUILDNUM) -DVENDOR="\"$(vendor)\"" -DBRAND="\"$(brand)\"" -DCAPBRAND="\"$(capbrand)\"" \ + -UPACKAGE_VERSION -UPACKAGE_TARNAME -UPACKAGE_STRING -UPACKAGE_BUGREPORT +DS_INCLUDES = -I$(srcdir)/ldap/include -I$(srcdir)/ldap/servers/slapd -I$(srcdir)/include -I. + + +if enable_asan +ASAN_ON = 1 +SANITIZER = ASAN +else +ASAN_ON = 0 +endif + +if enable_msan +MSAN_ON = 1 +SANITIZER = MSAN +else +MSAN_ON = 0 +endif + +if enable_tsan +TSAN_ON = 1 +SANITIZER = TSAN +else +TSAN_ON = 0 +endif + +if enable_ubsan +UBSAN_ON = 1 +SANITIZER = UBSAN +else +UBSAN_ON = 0 +endif + +if with_systemd +WITH_SYSTEMD = 1 +else +WITH_SYSTEMD = 0 +endif + +# these paths are dependent on the settings of prefix and exec_prefix which may be specified +# at make time. So we cannot use AC_DEFINE in the configure.ac because that would set the +# values prior to their being defined. Defining them here ensures that they are properly +# expanded before use. See create_instance.h for more details. The quoting ensures that +# the values are quoted for the shell command, and the value expands to a quoted string +# value in the header file e.g. +# #define LOCALSTATEDIR "/var" +# without the quotes, it would be +# #define LOCALSTATEDIR /var +# which would be an error +PATH_DEFINES = -DLOCALSTATEDIR="\"$(localstatedir)\"" -DSYSCONFDIR="\"$(sysconfdir)\"" \ + -DLIBDIR="\"$(libdir)\"" -DBINDIR="\"$(bindir)\"" \ + -DDATADIR="\"$(datadir)\"" -DDOCDIR="\"$(docdir)\"" \ + -DSBINDIR="\"$(sbindir)\"" -DPLUGINDIR="\"$(serverplugindir)\"" \ + -DTEMPLATEDIR="\"$(sampledatadir)\"" -DSYSTEMSCHEMADIR="\"$(systemschemadir)\"" + +# Now that we have all our defines in place, setup the CPPFLAGS + +# These flags are the "must have" for all components +AM_CPPFLAGS = $(DEBUG_DEFINES) $(PROFILING_DEFINES) $(SYSTEMTAP_DEFINES) $(RUST_DEFINES) +AM_CFLAGS = $(DEBUG_CFLAGS) $(GCCSEC_CFLAGS) $(ASAN_CFLAGS) $(MSAN_CFLAGS) $(TSAN_CFLAGS) $(UBSAN_CFLAGS) +AM_CXXFLAGS = $(DEBUG_CXXFLAGS) $(GCCSEC_CFLAGS) $(ASAN_CFLAGS) $(MSAN_CFLAGS) $(TSAN_CFLAGS) $(UBSAN_CFLAGS) +# Flags for Directory Server +# WARNING: This needs a clean up, because slap.h is a horrible mess and is publically exposed! +DSPLUGIN_CPPFLAGS = $(DS_DEFINES) $(DS_INCLUDES) $(PATH_DEFINES) $(SYSTEMD_DEFINES) @openldap_inc@ $(NSS_CFLAGS) $(NSPR_INCLUDES) $(SYSTEMD_CFLAGS) +# This should give access to internal headers only for tests!!! +DSINTERNAL_CPPFLAGS = -I$(srcdir)/include/ldaputil +# Flags for Datastructure Library +SDS_CPPFLAGS = $(SDS_INCLUDES) $(NSPR_INCLUDES) + +#------------------------ +# Linker Flags +#------------------------ +CMOCKA_LINKS = $(CMOCKA_LIBS) +PROFILING_LINKS = @profiling_links@ + +NSPR_LINK = $(NSPR_LIBS) +NSS_LINK = $(NSS_LIBS) + +# with recent versions of openldap - if you link with both ldap_r and ldap, the +# shared lib _fini for one will stomp on the other, and the program will crash +LDAPSDK_LINK_NOTHR = @openldap_lib@ -lldap@ol_libver@ @ldap_lib_ldif@ -llber@ol_libver@ +LDAPSDK_LINK = @openldap_lib@ -lldap_r@ol_libver@ @ldap_lib_ldif@ -llber@ol_libver@ +ldaplib = @ldaplib@ +ldaplib_defs = @ldaplib_defs@ + +DB_LINK = @db_lib@ -ldb-@db_libver@ +SASL_LINK = $(SASL_LIBS) +NETSNMP_LINK = @netsnmp_lib@ @netsnmp_link@ +PAM_LINK = -lpam +EVENT_LINK = $(EVENT_LIBS) +PW_CRACK_LINK = -lcrack + +LIBSOCKET=@LIBSOCKET@ +LIBNSL=@LIBNSL@ +LIBDL=@LIBDL@ +LIBCSTD=@LIBCSTD@ +LIBCRUN=@LIBCRUN@ +THREADLIB=@THREADLIB@ +LIBCRYPT=@LIBCRYPT@ + +# We need to make sure that libpthread is linked before libc on HP-UX. +if HPUX +AM_LDFLAGS = -lpthread +else +#AM_LDFLAGS = -Wl,-z,defs +AM_LDFLAGS = $(PW_CRACK_LINK) $(RUST_LDFLAGS) $(ASAN_CFLAGS) $(MSAN_CFLAGS) $(TSAN_CFLAGS) $(UBSAN_CFLAGS) $(PROFILING_LINKS) $(CLANG_LDFLAGS) +endif #end hpux + +# https://www.gnu.org/software/libtool/manual/html_node/Updating-version-info.html#Updating-version-info +# So, libtool library versions are described by three integers: +# +# current +# +# The most recent interface number that this library implements. +# revision +# +# The implementation number of the current interface. +# age +# +# The difference between the newest and oldest interfaces that this library implements. In other words, the library implements all the interface numbers in the range from number current - age to current. +# +# Here are a set of rules to help you update your library version information: +# +# Start with version information of ‘0:0:0’ for each libtool library. +# Update the version information only immediately before a public release of your software. More frequent updates are unnecessary, and only guarantee that the current interface number gets larger faster. +# If the library source code has changed at all since the last update, then increment revision (‘c:r:a’ becomes ‘c:r+1:a’). +# If any interfaces have been added, removed, or changed since the last update, increment current, and set revision to 0. +# If any interfaces have been added since the last public release, then increment age. +# If any interfaces have been removed or changed since the last public release, then set age to 0. + +SDS_LDFLAGS = $(NSPR_LINK) $(NSS_LINK) -lpthread -version-info 0:0:0 +SLAPD_LDFLAGS = -version-info 1:0:1 + + +#------------------------ +# Generated Sources +#------------------------ +BUILT_SOURCES = dberrstrs.h \ + $(POLICY_FC) + +if RUST_ENABLE +BUILT_SOURCES += rust-slapi-private.h rust-nsslapd-private.h +endif + +if enable_posix_winsync +LIBPOSIX_WINSYNC_PLUGIN = libposix-winsync-plugin.la +endif + +CLEANFILES = dberrstrs.h ns-slapd.properties \ + ldap/admin/src/scripts/template-dbverify ldap/admin/src/template-initconfig \ + ldap/admin/src/scripts/dscreate.map ldap/admin/src/scripts/remove-ds.pl \ + ldap/admin/src/scripts/DSCreate.pm ldap/admin/src/scripts/DSMigration.pm \ + ldap/admin/src/scripts/DSUpdate.pm ldap/admin/src/scripts/dsupdate.map \ + ldap/admin/src/scripts/dsorgentries.map ldap/admin/src/scripts/migrate-ds.pl \ + ldap/admin/src/scripts/Migration.pm ldap/admin/src/scripts/SetupDialogs.pm \ + ldap/admin/src/scripts/setup-ds.pl ldap/admin/src/scripts/setup-ds.res \ + ldap/admin/src/scripts/start-dirsrv ldap/admin/src/scripts/stop-dirsrv \ + ldap/admin/src/scripts/restart-dirsrv ldap/admin/src/scripts/Setup.pm \ + ldap/admin/src/scripts/status-dirsrv \ + ldap/admin/src/scripts/template-bak2db ldap/admin/src/scripts/template-bak2db.pl \ + ldap/admin/src/scripts/template-db2bak ldap/admin/src/scripts/template-db2bak.pl \ + ldap/admin/src/scripts/template-db2index ldap/admin/src/scripts/template-db2index.pl \ + ldap/admin/src/scripts/template-db2ldif ldap/admin/src/scripts/template-db2ldif.pl \ + ldap/admin/src/scripts/template-ldif2db ldap/admin/src/scripts/template-ldif2db.pl \ + ldap/admin/src/scripts/template-ldif2ldap ldap/admin/src/scripts/template-monitor \ + ldap/admin/src/scripts/template-ns-accountstatus.pl ldap/admin/src/scripts/template-ns-activate.pl \ + ldap/admin/src/scripts/template-ns-inactivate.pl ldap/admin/src/scripts/template-ns-newpwpolicy.pl \ + ldap/admin/src/scripts/template-restart-slapd ldap/admin/src/scripts/template-restoreconfig \ + ldap/admin/src/scripts/template-saveconfig ldap/admin/src/scripts/template-start-slapd \ + ldap/admin/src/scripts/template-stop-slapd ldap/admin/src/scripts/template-suffix2instance \ + ldap/admin/src/scripts/template-upgradedb \ + ldap/admin/src/scripts/template-upgradednformat \ + ldap/admin/src/scripts/template-usn-tombstone-cleanup.pl \ + ldap/admin/src/scripts/template-verify-db.pl \ + ldap/admin/src/scripts/template-vlvindex ldap/admin/src/scripts/DSUtil.pm \ + ldap/ldif/template-baseacis.ldif ldap/ldif/template-bitwise.ldif ldap/ldif/template-country.ldif \ + ldap/ldif/template-dnaplugin.ldif ldap/ldif/template-domain.ldif ldap/ldif/template-dse.ldif \ + ldap/ldif/template-dse-minimal.ldif \ + ldap/ldif/template-ldapi-autobind.ldif ldap/ldif/template-ldapi-default.ldif \ + ldap/ldif/template-ldapi.ldif ldap/ldif/template-locality.ldif ldap/ldif/template-org.ldif \ + ldap/ldif/template-orgunit.ldif ldap/ldif/template-pampta.ldif ldap/ldif/template-sasl.ldif \ + ldap/ldif/template-state.ldif ldap/ldif/template-suffix-db.ldif \ + ldap/admin/src/scripts/bak2db ldap/admin/src/scripts/db2bak ldap/admin/src/scripts/upgradedb \ + ldap/admin/src/scripts/db2index ldap/admin/src/scripts/db2ldif \ + ldap/admin/src/scripts/dn2rdn ldap/admin/src/scripts/ldif2db \ + ldap/admin/src/scripts/ldif2ldap ldap/admin/src/scripts/monitor \ + ldap/admin/src/scripts/restoreconfig ldap/admin/src/scripts/saveconfig \ + ldap/admin/src/scripts/suffix2instance \ + ldap/admin/src/scripts/upgradednformat ldap/admin/src/scripts/vlvindex \ + ldap/admin/src/scripts/bak2db.pl ldap/admin/src/scripts/db2bak.pl \ + ldap/admin/src/scripts/db2index.pl ldap/admin/src/scripts/db2ldif.pl \ + ldap/admin/src/scripts/fixup-linkedattrs.pl ldap/admin/src/scripts/fixup-memberof.pl \ + ldap/admin/src/scripts/cleanallruv.pl ldap/admin/src/scripts/ldif2db.pl \ + ldap/admin/src/scripts/ns-accountstatus.pl ldap/admin/src/scripts/ns-activate.pl \ + ldap/admin/src/scripts/ns-inactivate.pl ldap/admin/src/scripts/ns-newpwpolicy.pl \ + ldap/admin/src/scripts/schema-reload.pl ldap/admin/src/scripts/syntax-validate.pl \ + ldap/admin/src/scripts/usn-tombstone-cleanup.pl ldap/admin/src/scripts/verify-db.pl \ + ldap/admin/src/scripts/ds_selinux_port_query ldap/admin/src/scripts/ds_selinux_enabled \ + ldap/admin/src/scripts/dbverify ldap/admin/src/scripts/readnsstate \ + doxyfile.stamp ldap/admin/src/scripts/dbmon.sh \ + $(NULL) + +if RUST_ENABLE +CLEANFILES += rust-slapi-private.h +endif + +clean-local: + -rm -rf dist + -rm -rf $(abs_top_builddir)/html + -rm -rf $(abs_top_builddir)/man/man3 +if RUST_ENABLE + CARGO_TARGET_DIR=$(abs_top_builddir)/rs cargo clean --manifest-path=$(srcdir)/src/libsds/Cargo.toml +endif + +dberrstrs.h: Makefile + perl $(srcdir)/ldap/servers/slapd/mkDBErrStrs.pl -i @db_incdir@ -o . + + +#------------------------ +# Install Paths +#------------------------ +prefixdir = @prefixdir@ +configdir = $(sysconfdir)@configdir@ +sampledatadir = $(datadir)@sampledatadir@ +systemschemadir = $(datadir)@systemschemadir@ +propertydir = $(datadir)@propertydir@ +schemadir = $(sysconfdir)@schemadir@ +serverdir = $(libdir)/@serverdir@ +serverplugindir = $(libdir)@serverplugindir@ +taskdir = $(datadir)@scripttemplatedir@ +systemdsystemunitdir = @with_systemdsystemunitdir@ +systemdsystemunitdropindir = @with_systemdsystemunitdir@/$(PACKAGE_NAME)@.service.d +systemdsystemconfdir = @with_systemdsystemconfdir@ +systemdgroupname = @with_systemdgroupname@ +initdir = @initdir@ +initconfigdir = $(sysconfdir)@initconfigdir@ +instconfigdir = @instconfigdir@ +perldir = $(libdir)@perldir@ +pythondir = $(libdir)@pythondir@ +infdir = $(datadir)@infdir@ +mibdir = $(datadir)@mibdir@ +updatedir = $(datadir)@updatedir@ +pkgconfigdir = $(libdir)/pkgconfig +serverincdir = $(includedir)/@serverincdir@ +gdbautoloaddir = $(prefixdir)/share/gdb/auto-load$(sbindir) +cockpitdir = $(prefixdir)/share/cockpit@cockpitdir@ +metainfodir = $(prefixdir)/share/metainfo/389-console +tmpfiles_d = @tmpfiles_d@ + +# This has to be hardcoded to /lib - $libdir changes between lib/lib64, but +# sysctl.d is always in /lib. +sysctldir = @prefixdir@/lib/sysctl.d + +defaultuser=@defaultuser@ +defaultgroup=@defaultgroup@ + +#------------------------ +# Build Products +#------------------------ +sbin_PROGRAMS = ns-slapd ldap-agent + +bin_PROGRAMS = dbscan \ + ldclt \ + pwdhash +if ENABLE_LEGACY +bin_PROGRAMS += \ + infadd \ + ldif \ + migratecred \ + mmldif \ + rsearch +endif + +# ---------------------------------------------------------------------------------------- +# This odd looking definition is to keep the libraries in ORDER that they are needed. rsds +# is needed by sds, which is needed by ns. So we have a blank LTLIB, then append in order +# based on defines +# ---------------------------------------------------------------------------------------- + +server_LTLIBRARIES = libsds.la libslapd.la libldaputil.la libns-dshttpd.la + +lib_LTLIBRARIES = libsvrcore.la + +# this is how to add optional plugins +if enable_pam_passthru +LIBPAM_PASSTHRU_PLUGIN = libpam-passthru-plugin.la +enable_pam_passthru = 1 +endif +if enable_dna +LIBDNA_PLUGIN = libdna-plugin.la +enable_dna = 1 +endif + +if enable_bitwise +LIBBITWISE_PLUGIN = libbitwise-plugin.la +enable_bitwise = 1 +endif + +if enable_presence +LIBPRESENCE_PLUGIN = libpresence-plugin.la +LIBPRESENCE_SCHEMA = $(srcdir)/ldap/schema/10presence.ldif +enable_presence = on +else +enable_presence = off +endif + +if enable_acctpolicy +LIBACCTPOLICY_PLUGIN = libacctpolicy-plugin.la +LIBACCTPOLICY_SCHEMA = $(srcdir)/ldap/schema/60acctpolicy.ldif +enable_acctpolicy = 1 +endif + +serverplugin_LTLIBRARIES = libacl-plugin.la \ + libaddn-plugin.la \ + libattr-unique-plugin.la \ + libautomember-plugin.la libback-ldbm.la libchainingdb-plugin.la \ + libcollation-plugin.la libcos-plugin.la libderef-plugin.la \ + libpbe-plugin.la libdistrib-plugin.la libhttp-client-plugin.la \ + liblinkedattrs-plugin.la libmanagedentries-plugin.la \ + libmemberof-plugin.la libpassthru-plugin.la libpwdstorage-plugin.la \ + libcontentsync-plugin.la \ + libreferint-plugin.la libreplication-plugin.la libretrocl-plugin.la \ + libroles-plugin.la libstatechange-plugin.la libsyntax-plugin.la \ + libviews-plugin.la libschemareload-plugin.la libusn-plugin.la \ + libacctusability-plugin.la librootdn-access-plugin.la \ + libwhoami-plugin.la $(LIBACCTPOLICY_PLUGIN) \ + $(LIBPAM_PASSTHRU_PLUGIN) $(LIBDNA_PLUGIN) \ + $(LIBBITWISE_PLUGIN) $(LIBPRESENCE_PLUGIN) $(LIBPOSIX_WINSYNC_PLUGIN) + +noinst_LIBRARIES = libavl.a + +dist_noinst_HEADERS = \ + include/i18n.h \ + include/netsite.h \ + include/base/crit.h \ + include/base/dbtbase.h \ + include/base/ereport.h \ + include/base/file.h \ + include/base/fsmutex.h \ + include/base/plist.h \ + include/base/pool.h \ + include/base/shexp.h \ + include/base/systems.h \ + include/base/systhr.h \ + include/base/util.h \ + include/ldaputil/cert.h \ + include/ldaputil/certmap.h \ + include/ldaputil/dbconf.h \ + include/ldaputil/encode.h \ + include/ldaputil/errors.h \ + include/ldaputil/init.h \ + include/ldaputil/ldapauth.h \ + include/ldaputil/ldaputil.h \ + include/libaccess/aclerror.h \ + include/libaccess/acleval.h \ + include/libaccess/aclglobal.h \ + include/libaccess/acl.h \ + include/libaccess/aclproto.h \ + include/libaccess/aclstruct.h \ + include/libaccess/attrec.h \ + include/libaccess/authdb.h \ + include/libaccess/dbtlibaccess.h \ + include/libaccess/dnfstruct.h \ + include/libaccess/ipfstruct.h \ + include/libaccess/las.h \ + include/libaccess/nsautherr.h \ + include/libaccess/nsauth.h \ + include/libaccess/nserror.h \ + include/libaccess/symbols.h \ + include/libaccess/userauth.h \ + include/libaccess/usi.h \ + include/libaccess/usrcache.h \ + include/libadmin/dbtlibadmin.h \ + include/libadmin/libadmin.h \ + include/public/netsite.h \ + include/public/nsapi.h \ + include/public/base/systems.h \ + include/public/nsacl/aclapi.h \ + include/public/nsacl/acldef.h \ + include/public/nsacl/nserrdef.h \ + include/public/nsacl/plistdef.h \ + ldap/include/avl.h \ + ldap/include/dblayer.h \ + ldap/include/disptmpl.h \ + ldap/include/ldaprot.h \ + ldap/include/portable.h \ + ldap/include/regex.h \ + ldap/include/srchpref.h \ + ldap/include/sysexits-compat.h \ + ldap/servers/plugins/addn/addn.h \ + ldap/servers/plugins/collation/config.h \ + ldap/servers/plugins/collation/collate.h \ + ldap/servers/plugins/collation/orfilter.h \ + ldap/servers/plugins/chainingdb/cb.h \ + ldap/servers/plugins/deref/deref.h \ + ldap/servers/plugins/acctpolicy/acctpolicy.h \ + ldap/servers/plugins/posix-winsync/posix-wsp-ident.h \ + ldap/servers/plugins/posix-winsync/posix-group-func.h \ + ldap/servers/plugins/roles/roles_cache.h \ + ldap/servers/plugins/usn/usn.h \ + ldap/servers/plugins/pwdstorage/pwdstorage.h \ + ldap/servers/plugins/pwdstorage/md5.h \ + ldap/servers/plugins/acl/acl.h \ + ldap/servers/plugins/linkedattrs/linked_attrs.h \ + ldap/servers/plugins/rootdn_access/rootdn_access.h \ + ldap/servers/plugins/acct_usability/acct_usability.h \ + ldap/servers/plugins/retrocl/retrocl.h \ + ldap/servers/plugins/uiduniq/plugin-utils.h \ + ldap/servers/plugins/memberof/memberof.h \ + ldap/servers/plugins/replication/cl5_api.h \ + ldap/servers/plugins/replication/llist.h \ + ldap/servers/plugins/replication/repl_shared.h \ + ldap/servers/plugins/replication/csnpl.h \ + ldap/servers/plugins/replication/cl5.h \ + ldap/servers/plugins/replication/repl-session-plugin.h \ + ldap/servers/plugins/replication/windows_prot_private.h \ + ldap/servers/plugins/replication/repl_helper.h \ + ldap/servers/plugins/replication/repl5.h \ + ldap/servers/plugins/replication/cl5_test.h \ + ldap/servers/plugins/replication/repl5_ruv.h \ + ldap/servers/plugins/replication/cl5_clcache.h \ + ldap/servers/plugins/replication/cl_crypt.h \ + ldap/servers/plugins/replication/urp.h \ + ldap/servers/plugins/replication/winsync-plugin.h \ + ldap/servers/plugins/replication/windowsrepl.h \ + ldap/servers/plugins/replication/repl5_prot_private.h \ + ldap/servers/plugins/pam_passthru/pam_passthru.h \ + ldap/servers/plugins/syntaxes/syntax.h \ + ldap/servers/plugins/cos/cos_cache.h \ + ldap/servers/plugins/sync/sync.h \ + ldap/servers/plugins/passthru/passthru.h \ + ldap/servers/plugins/rever/rever.h \ + ldap/servers/plugins/http/http_client.h \ + ldap/servers/plugins/http/http_impl.h \ + ldap/servers/plugins/automember/automember.h \ + ldap/servers/plugins/mep/mep.h \ + ldap/servers/slapd/agtmmap.h \ + ldap/servers/slapd/auth.h \ + ldap/servers/slapd/csngen.h \ + ldap/servers/slapd/disconnect_errors.h \ + ldap/servers/slapd/disconnect_error_strings.h \ + ldap/servers/slapd/fe.h \ + ldap/servers/slapd/filter.h \ + ldap/servers/slapd/getopt_ext.h \ + ldap/servers/slapd/getsocketpeer.h \ + ldap/servers/slapd/http.h \ + ldap/servers/slapd/intrinsics.h \ + ldap/servers/slapd/log.h \ + ldap/servers/slapd/openldapber.h \ + ldap/servers/slapd/pblock_v3.h \ + ldap/servers/slapd/poll_using_select.h \ + ldap/servers/slapd/prerrstrs.h \ + ldap/servers/slapd/protect_db.h \ + ldap/servers/slapd/proto-slap.h \ + ldap/servers/slapd/pw.h \ + ldap/servers/slapd/pw_verify.h \ + ldap/servers/slapd/secerrstrs.h \ + ldap/servers/slapd/slap.h \ + ldap/servers/slapd/slapi_pal.h \ + ldap/servers/slapd/slapi-plugin-compat4.h \ + ldap/servers/slapd/slapi-plugin.h \ + ldap/servers/slapd/slapi-private.h \ + ldap/servers/slapd/snmp_collator.h \ + ldap/servers/slapd/sslerrstrs.h \ + ldap/servers/slapd/statechange.h \ + ldap/servers/slapd/uuid.h \ + ldap/servers/slapd/vattr_spi.h \ + ldap/servers/slapd/views.h \ + ldap/servers/slapd/back-ldbm/attrcrypt.h \ + ldap/servers/slapd/back-ldbm/back-ldbm.h \ + ldap/servers/slapd/back-ldbm/dblayer.h \ + ldap/servers/slapd/back-ldbm/import.h \ + ldap/servers/slapd/back-ldbm/ldbm_config.h \ + ldap/servers/slapd/back-ldbm/perfctrs.h \ + ldap/servers/slapd/back-ldbm/proto-back-ldbm.h \ + ldap/servers/slapd/back-ldbm/vlv_key.h \ + ldap/servers/slapd/back-ldbm/vlv_srch.h \ + ldap/servers/slapd/tools/ldaptool.h \ + ldap/servers/slapd/tools/ldaptool-sasl.h \ + ldap/servers/slapd/tools/ldclt/ldap-private.h \ + ldap/servers/slapd/tools/ldclt/ldclt.h \ + ldap/servers/slapd/tools/ldclt/port.h \ + ldap/servers/slapd/tools/ldclt/remote.h \ + ldap/servers/slapd/tools/ldclt/scalab01.h \ + ldap/servers/slapd/tools/ldclt/utils.h \ + ldap/servers/slapd/tools/rsearch/addthread.h \ + ldap/servers/slapd/tools/rsearch/infadd.h \ + ldap/servers/slapd/tools/rsearch/nametable.h \ + ldap/servers/slapd/tools/rsearch/rsearch.h \ + ldap/servers/slapd/tools/rsearch/sdattable.h \ + ldap/servers/slapd/tools/rsearch/searchthread.h \ + ldap/servers/snmp/ldap-agent.h \ + ldap/systools/pio.h \ + lib/base/lexer_pvt.h \ + lib/base/plist_pvt.h \ + lib/ldaputil/ldaputili.h \ + lib/libaccess/access_plhash.h \ + lib/libaccess/aclcache.h \ + lib/libaccess/aclpriv.h \ + lib/libaccess/aclscan.h \ + lib/libaccess/acl.tab.h \ + lib/libaccess/aclutil.h \ + lib/libaccess/lasdns.h \ + lib/libaccess/las.h \ + lib/libaccess/lasip.h \ + lib/libaccess/ldapauth.h \ + lib/libaccess/oneeval.h \ + lib/libaccess/parse.h \ + lib/libaccess/permhash.h \ + lib/libsi18n/getstrmem.h \ + lib/libsi18n/gsslapd.h \ + lib/libsi18n/reshash.h \ + lib/libsi18n/txtfile.h \ + src/libsds/sds/sds_internal.h \ + src/libsds/sds/bpt/bpt.h \ + src/libsds/sds/bpt_cow/bpt_cow.h \ + src/libsds/sds/queue/queue.h \ + src/libsds/sds/ht/ht.h + +if ATOMIC_QUEUE_OPERATIONS +dist_noinst_HEADERS += \ + src/libsds/external/liblfds711/inc/liblfds711.h \ + src/libsds/external/liblfds711/inc/liblfds711/lfds711_porting_abstraction_layer_compiler.h \ + src/libsds/external/liblfds711/inc/liblfds711/lfds711_porting_abstraction_layer_processor.h \ + src/libsds/external/liblfds711/inc/liblfds711/lfds711_porting_abstraction_layer_operating_system.h \ + src/libsds/external/liblfds711/inc/liblfds711/lfds711_prng.h \ + src/libsds/external/liblfds711/inc/liblfds711/lfds711_misc.h \ + src/libsds/external/liblfds711/inc/liblfds711/lfds711_queue_unbounded_manyproducer_manyconsumer.h \ + src/libsds/external/liblfds711/src/liblfds711_internal.h \ + src/libsds/external/liblfds711/src/lfds711_misc/lfds711_misc_internal.h \ + src/libsds/external/liblfds711/src/lfds711_queue_unbounded_manyproducer_manyconsumer/lfds711_queue_unbounded_manyproducer_manyconsumer_internal.h +endif + +if ENABLE_CMOCKA +dist_noinst_HEADERS += \ + test/test_slapd.h \ + src/libsds/test/test_sds.h \ + src/libsds/test/benchmark.h \ + src/libsds/test/benchmark_par.h +endif + +dist_noinst_DATA = \ + $(srcdir)/buildnum.py \ + $(srcdir)/ldap/admin/src/*.in \ + $(srcdir)/ldap/admin/src/scripts/*.in \ + $(srcdir)/ldap/admin/src/scripts/*.ldif \ + $(srcdir)/ldap/admin/src/scripts/*.py \ + $(srcdir)/ldap/admin/src/scripts/*.sh \ + $(srcdir)/ldap/admin/src/scripts/ds-replcheck \ + $(srcdir)/ldap/admin/src/scripts/migrate-ds.res \ + $(srcdir)/ldap/ldif/*.in \ + $(srcdir)/ldap/ldif/*.ldif \ + $(srcdir)/ldap/schema/*.ldif \ + $(srcdir)/ldap/schema/slapd-collations.conf \ + $(srcdir)/ldap/servers/snmp/ldap-agent.conf \ + $(srcdir)/ldap/servers/snmp/redhat-directory.mib \ + $(srcdir)/lib/ldaputil/certmap.conf \ + $(srcdir)/m4 \ + $(srcdir)/rpm/389-ds-base.spec.in \ + $(srcdir)/rpm/389-ds-base-devel.README \ + $(srcdir)/rpm/389-ds-base-git.sh \ + $(srcdir)/README.md \ + $(srcdir)/LICENSE \ + $(srcdir)/LICENSE.* \ + $(srcdir)/VERSION.sh \ + $(srcdir)/wrappers/*.in \ + $(srcdir)/dirsrvtests \ + $(srcdir)/src/lib389/setup.py \ + $(srcdir)/src/lib389 + +if ENABLE_PERL +dist_noinst_DATA += \ + $(srcdir)/ldap/admin/src/*.pl \ + $(srcdir)/ldap/admin/src/scripts/*.pl \ + $(srcdir)/ldap/admin/src/scripts/*.pm \ + $(srcdir)/ldap/servers/slapd/mkDBErrStrs.pl \ + $(srcdir)/ldap/servers/slapd/tools/rsearch/scripts/dbgen* +endif + +#------------------------ +# Installed Files +#------------------------ +config_DATA = $(srcdir)/lib/ldaputil/certmap.conf \ + $(srcdir)/ldap/schema/slapd-collations.conf \ + ldap/servers/snmp/ldap-agent.conf + +if ENABLE_LEGACY +config_DATA += ldap/admin/src/template-initconfig +endif + +# the schema files in this list are either not +# standard schema, not tested, or not compatible +# with the default schema e.g. there is +# considerable overlap of 60changelog.ldif and 01common.ldif +# and 60inetmail.ldif and 50ns-mail.ldif among others +sampledata_DATA = ldap/admin/src/scripts/DSSharedLib \ + $(srcdir)/ldap/ldif/Ace.ldif \ + $(srcdir)/ldap/ldif/European.ldif \ + $(srcdir)/ldap/ldif/Eurosuffix.ldif \ + $(srcdir)/ldap/ldif/Example.ldif \ + $(srcdir)/ldap/ldif/Example-roles.ldif \ + $(srcdir)/ldap/ldif/Example-views.ldif \ + $(srcdir)/ldap/ldif/template.ldif \ + ldap/ldif/template-dse.ldif \ + ldap/ldif/template-dse-minimal.ldif \ + ldap/ldif/template-suffix-db.ldif \ + ldap/ldif/template-ldapi.ldif \ + ldap/ldif/template-ldapi-default.ldif \ + ldap/ldif/template-ldapi-autobind.ldif \ + ldap/ldif/template-org.ldif \ + ldap/ldif/template-domain.ldif \ + ldap/ldif/template-state.ldif \ + ldap/ldif/template-locality.ldif \ + ldap/ldif/template-country.ldif \ + ldap/ldif/template-orgunit.ldif \ + ldap/ldif/template-baseacis.ldif \ + ldap/ldif/template-sasl.ldif \ + $(srcdir)/ldap/servers/slapd/tools/rsearch/scripts/dbgen-FamilyNames \ + $(srcdir)/ldap/servers/slapd/tools/rsearch/scripts/dbgen-GivenNames \ + $(srcdir)/ldap/servers/slapd/tools/rsearch/scripts/dbgen-OrgUnits \ + $(srcdir)/ldap/schema/10rfc2307bis.ldif \ + $(srcdir)/ldap/schema/60changelog.ldif \ + $(srcdir)/ldap/schema/60inetmail.ldif \ + $(srcdir)/ldap/schema/60krb5kdc.ldif \ + $(srcdir)/ldap/schema/60kerberos.ldif \ + $(srcdir)/ldap/schema/60nis.ldif \ + $(srcdir)/ldap/schema/60qmail.ldif \ + $(srcdir)/ldap/schema/60radius.ldif \ + $(srcdir)/ldap/schema/60rfc4876.ldif \ + $(srcdir)/ldap/schema/60samba.ldif \ + $(srcdir)/ldap/schema/60samba3.ldif \ + $(srcdir)/ldap/schema/60sendmail.ldif \ + $(LIBPRESENCE_SCHEMA) + +systemschema_DATA = $(srcdir)/ldap/schema/00core.ldif \ + $(srcdir)/ldap/schema/01core389.ldif \ + $(srcdir)/ldap/schema/02common.ldif \ + $(srcdir)/ldap/schema/05rfc2927.ldif \ + $(srcdir)/ldap/schema/05rfc4523.ldif \ + $(srcdir)/ldap/schema/05rfc4524.ldif \ + $(srcdir)/ldap/schema/06inetorgperson.ldif \ + $(srcdir)/ldap/schema/10automember-plugin.ldif \ + $(srcdir)/ldap/schema/10dna-plugin.ldif \ + $(srcdir)/ldap/schema/10mep-plugin.ldif \ + $(srcdir)/ldap/schema/10rfc2307.ldif \ + $(srcdir)/ldap/schema/20subscriber.ldif \ + $(srcdir)/ldap/schema/25java-object.ldif \ + $(srcdir)/ldap/schema/28pilot.ldif \ + $(srcdir)/ldap/schema/30ns-common.ldif \ + $(srcdir)/ldap/schema/50ns-admin.ldif \ + $(srcdir)/ldap/schema/50ns-certificate.ldif \ + $(srcdir)/ldap/schema/50ns-directory.ldif \ + $(srcdir)/ldap/schema/50ns-mail.ldif \ + $(srcdir)/ldap/schema/50ns-value.ldif \ + $(srcdir)/ldap/schema/50ns-web.ldif \ + $(srcdir)/ldap/schema/60pam-plugin.ldif \ + $(srcdir)/ldap/schema/60posix-winsync-plugin.ldif \ + $(srcdir)/ldap/schema/60autofs.ldif \ + $(srcdir)/ldap/schema/60eduperson.ldif \ + $(srcdir)/ldap/schema/60mozilla.ldif \ + $(srcdir)/ldap/schema/60pureftpd.ldif \ + $(srcdir)/ldap/schema/60rfc2739.ldif \ + $(srcdir)/ldap/schema/60rfc3712.ldif \ + $(srcdir)/ldap/schema/60sabayon.ldif \ + $(srcdir)/ldap/schema/60sudo.ldif \ + $(srcdir)/ldap/schema/60trust.ldif \ + $(srcdir)/ldap/schema/60nss-ldap.ldif \ + $(LIBACCTPOLICY_SCHEMA) + +schema_DATA = $(srcdir)/ldap/schema/99user.ldif + +libexec_SCRIPTS = +if ENABLE_PERL +libexec_SCRIPTS += ldap/admin/src/scripts/ds_selinux_enabled \ + ldap/admin/src/scripts/ds_selinux_port_query +endif +if SYSTEMD +libexec_SCRIPTS += wrappers/ds_systemd_ask_password_acl +endif + +install-data-hook: + if [ "$(srcdir)" != "." ]; then cp -r $(srcdir)/src/cockpit src ; fi + mkdir -p src/cockpit/389-console/cockpit_dist/ + mkdir -p $(DESTDIR)$(cockpitdir) + rsync -rupE src/cockpit/389-console/cockpit_dist/ $(DESTDIR)$(cockpitdir) + mkdir -p $(DESTDIR)$(metainfodir) + rsync -up src/cockpit/389-console/org.port389.cockpit_console.metainfo.xml $(DESTDIR)$(metainfodir)/org.port389.cockpit_console.metainfo.xml + +sbin_SCRIPTS = +if ENABLE_PERL +sbin_SCRIPTS += ldap/admin/src/scripts/setup-ds.pl \ + ldap/admin/src/scripts/migrate-ds.pl \ + ldap/admin/src/scripts/remove-ds.pl \ + ldap/admin/src/scripts/bak2db.pl \ + ldap/admin/src/scripts/db2bak.pl \ + ldap/admin/src/scripts/db2index.pl \ + ldap/admin/src/scripts/db2ldif.pl \ + ldap/admin/src/scripts/fixup-linkedattrs.pl \ + ldap/admin/src/scripts/fixup-memberof.pl \ + ldap/admin/src/scripts/cleanallruv.pl \ + ldap/admin/src/scripts/ldif2db.pl \ + ldap/admin/src/scripts/ns-accountstatus.pl \ + ldap/admin/src/scripts/ns-activate.pl \ + ldap/admin/src/scripts/ns-inactivate.pl \ + ldap/admin/src/scripts/ns-newpwpolicy.pl \ + ldap/admin/src/scripts/schema-reload.pl \ + ldap/admin/src/scripts/syntax-validate.pl \ + ldap/admin/src/scripts/usn-tombstone-cleanup.pl \ + ldap/admin/src/scripts/verify-db.pl +endif +if ENABLE_LEGACY +sbin_SCRIPTS += \ + ldap/admin/src/scripts/start-dirsrv \ + ldap/admin/src/scripts/stop-dirsrv \ + ldap/admin/src/scripts/restart-dirsrv \ + ldap/admin/src/scripts/status-dirsrv \ + ldap/admin/src/scripts/bak2db \ + ldap/admin/src/scripts/db2bak \ + ldap/admin/src/scripts/db2index \ + ldap/admin/src/scripts/db2ldif \ + ldap/admin/src/scripts/dn2rdn \ + ldap/admin/src/scripts/ldif2db \ + ldap/admin/src/scripts/ldif2ldap \ + ldap/admin/src/scripts/monitor \ + ldap/admin/src/scripts/restoreconfig \ + ldap/admin/src/scripts/saveconfig \ + ldap/admin/src/scripts/suffix2instance \ + ldap/admin/src/scripts/upgradednformat \ + ldap/admin/src/scripts/vlvindex \ + ldap/admin/src/scripts/dbverify \ + ldap/admin/src/scripts/upgradedb \ + ldap/admin/src/scripts/dbmon.sh +endif + +bin_SCRIPTS = \ + ldap/admin/src/scripts/readnsstate + +if ENABLE_PERL +bin_SCRIPTS += ldap/servers/slapd/tools/rsearch/scripts/dbgen.pl \ + wrappers/cl-dump \ + ldap/admin/src/scripts/cl-dump.pl \ + wrappers/repl-monitor \ + ldap/admin/src/scripts/repl-monitor.pl +endif + +# For scripts that are "as is". +dist_bin_SCRIPTS = ldap/admin/src/scripts/ds-replcheck \ + ldap/admin/src/scripts/ds-logpipe.py + +dist_bin_SCRIPTS += ldap/admin/src/logconv.pl + +# SCRIPTS makes them executables - these are perl modules +# and should not be marked as executable - so use DATA +if ENABLE_PERL +perl_DATA = ldap/admin/src/scripts/SetupLog.pm \ + ldap/admin/src/scripts/Resource.pm \ + ldap/admin/src/scripts/DSUtil.pm \ + ldap/admin/src/scripts/Setup.pm \ + ldap/admin/src/scripts/SetupDialogs.pm \ + ldap/admin/src/scripts/Inf.pm \ + ldap/admin/src/scripts/DialogManager.pm \ + ldap/admin/src/scripts/Dialog.pm \ + ldap/admin/src/scripts/DSDialogs.pm \ + ldap/admin/src/scripts/Migration.pm \ + ldap/admin/src/scripts/DSMigration.pm \ + ldap/admin/src/scripts/FileConn.pm \ + ldap/admin/src/scripts/DSCreate.pm \ + ldap/admin/src/scripts/DSUpdate.pm \ + ldap/admin/src/scripts/DSUpdateDialogs.pm +endif + +python_DATA = ldap/admin/src/scripts/failedbinds.py \ + ldap/admin/src/scripts/logregex.py + +gdbautoload_DATA = ldap/admin/src/scripts/ns-slapd-gdb.py + +dist_sysctl_DATA = ldap/admin/src/70-dirsrv.conf + +if ENABLE_PERL +property_DATA = ldap/admin/src/scripts/setup-ds.res \ + ldap/admin/src/scripts/migrate-ds.res + +task_SCRIPTS = ldap/admin/src/scripts/template-bak2db \ + ldap/admin/src/scripts/template-db2bak \ + ldap/admin/src/scripts/template-db2index \ + ldap/admin/src/scripts/template-db2ldif \ + ldap/admin/src/scripts/template-dn2rdn \ + ldap/admin/src/scripts/template-ldif2db \ + ldap/admin/src/scripts/template-ldif2ldap \ + ldap/admin/src/scripts/template-monitor \ + ldap/admin/src/scripts/template-restart-slapd \ + ldap/admin/src/scripts/template-restoreconfig \ + ldap/admin/src/scripts/template-saveconfig \ + ldap/admin/src/scripts/template-start-slapd \ + ldap/admin/src/scripts/template-stop-slapd \ + ldap/admin/src/scripts/template-suffix2instance \ + ldap/admin/src/scripts/template-upgradednformat \ + ldap/admin/src/scripts/template-vlvindex \ + ldap/admin/src/scripts/template-bak2db.pl \ + ldap/admin/src/scripts/template-db2bak.pl \ + ldap/admin/src/scripts/template-db2index.pl \ + ldap/admin/src/scripts/template-db2ldif.pl \ + ldap/admin/src/scripts/template-fixup-linkedattrs.pl \ + ldap/admin/src/scripts/template-fixup-memberof.pl \ + ldap/admin/src/scripts/template-fixup-memberuid.pl \ + ldap/admin/src/scripts/template-cleanallruv.pl \ + ldap/admin/src/scripts/template-ldif2db.pl \ + ldap/admin/src/scripts/template-ns-accountstatus.pl \ + ldap/admin/src/scripts/template-ns-activate.pl \ + ldap/admin/src/scripts/template-ns-inactivate.pl \ + ldap/admin/src/scripts/template-ns-newpwpolicy.pl \ + ldap/admin/src/scripts/template-schema-reload.pl \ + ldap/admin/src/scripts/template-syntax-validate.pl \ + ldap/admin/src/scripts/template-usn-tombstone-cleanup.pl \ + ldap/admin/src/scripts/template-verify-db.pl \ + ldap/admin/src/scripts/template-dbverify +endif + +if SYSTEMD +# yes, that is an @ in the filename . . . +systemdsystemunit_DATA = wrappers/$(PACKAGE_NAME)@.service \ + wrappers/$(systemdgroupname) \ + wrappers/$(PACKAGE_NAME)-snmp.service + +if with_sanitizer +systemdsystemunitdropin_DATA = wrappers/$(PACKAGE_NAME)@.service.d/xsan.conf +else +systemdsystemunitdropin_DATA = wrappers/$(PACKAGE_NAME)@.service.d/custom.conf +endif + +else +if INITDDIR +init_SCRIPTS = wrappers/$(PACKAGE_NAME) \ + wrappers/$(PACKAGE_NAME)-snmp +endif +endif + +if INITDDIR +initconfig_DATA = ldap/admin/src/$(PACKAGE_NAME) +endif + +inf_DATA = ldap/admin/src/slapd.inf \ + ldap/admin/src/scripts/dscreate.map \ + ldap/admin/src/scripts/dsupdate.map \ + ldap/admin/src/scripts/dsorgentries.map \ + ldap/admin/src/defaults.inf + +mib_DATA = ldap/servers/snmp/redhat-directory.mib + +pkgconfig_DATA = src/pkgconfig/dirsrv.pc \ + src/pkgconfig/libsds.pc \ + src/pkgconfig/svrcore.pc + +#------------------------ +# header files +#------------------------ +serverinc_HEADERS = ldap/servers/plugins/replication/repl-session-plugin.h \ + ldap/servers/slapd/slapi_pal.h \ + ldap/servers/slapd/slapi-plugin.h \ + ldap/servers/plugins/replication/winsync-plugin.h \ + src/libsds/include/sds.h + +include_HEADERS = src/svrcore/src/svrcore.h + +#------------------------ +# man pages +#------------------------ +dist_man_MANS = man/man1/dbscan.1 \ + man/man1/ds-logpipe.py.1 \ + man/man1/ds-replcheck.1 \ + man/man1/ldap-agent.1 \ + man/man1/ldclt.1 \ + man/man1/logconv.pl.1 \ + man/man1/pwdhash.1 \ + man/man1/readnsstate.1 \ + man/man5/99user.ldif.5 \ + man/man8/ns-slapd.8 \ + man/man5/certmap.conf.5 \ + man/man5/dirsrv.5 \ + man/man5/dirsrv.systemd.5 \ + man/man5/slapd-collations.conf.5 +if ENABLE_LEGACY +dist_man_MANS += \ + man/man1/infadd.1 \ + man/man1/ldif.1 \ + man/man1/migratecred.1 \ + man/man1/mmldif.1 \ + man/man1/rsearch.1 +endif +if ENABLE_PERL +dist_man_MANS += man/man1/cl-dump.1 \ + man/man1/cl-dump.pl.1 \ + man/man1/dbgen.pl.1 \ + man/man1/repl-monitor.1 \ + man/man1/repl-monitor.pl.1 \ + man/man8/migrate-ds.pl.8 \ + man/man8/restart-dirsrv.8 \ + man/man8/setup-ds.pl.8 \ + man/man8/start-dirsrv.8 \ + man/man8/stop-dirsrv.8 \ + man/man8/status-dirsrv.8 \ + man/man8/bak2db.8 \ + man/man8/bak2db.pl.8 \ + man/man8/cleanallruv.pl.8 \ + man/man8/dbverify.8 \ + man/man8/db2bak.8 \ + man/man8/db2bak.pl.8 \ + man/man8/db2ldif.8 \ + man/man8/db2ldif.pl.8 \ + man/man8/db2index.8 \ + man/man8/db2index.pl.8 \ + man/man8/fixup-linkedattrs.pl.8 \ + man/man8/fixup-memberof.pl.8 \ + man/man8/ldif2db.8 \ + man/man8/ldif2db.pl.8 \ + man/man8/dbmon.sh.8 \ + man/man8/dn2rdn.8 \ + man/man8/ldif2ldap.8 \ + man/man8/monitor.8 \ + man/man8/ns-accountstatus.pl.8 \ + man/man8/ns-newpwpolicy.pl.8 \ + man/man8/ns-activate.pl.8 \ + man/man8/ns-inactivate.pl.8 \ + man/man8/remove-ds.pl.8 \ + man/man8/restoreconfig.8 \ + man/man8/saveconfig.8 \ + man/man8/schema-reload.pl.8 \ + man/man8/suffix2instance.8 \ + man/man8/syntax-validate.pl.8 \ + man/man8/upgradednformat.8 \ + man/man8/upgradedb.8 \ + man/man8/usn-tombstone-cleanup.pl.8 \ + man/man8/vlvindex.8 \ + man/man8/verify-db.pl.8 \ + man/man5/template-initconfig.5 +endif + +#------------------------ +# updates +# the first 3 are just the examples provided - since they +# do not begin with two digits, they will be ignored +# the remaining items should begin with two digits that +# correspond to the order in which they should be applied +# perl files and LDIF files are DATA - not executable +# processed by the update script +# shell scripts and other files are SCRIPTS - executable +#------------------------ +if ENABLE_PERL +update_DATA = ldap/admin/src/scripts/exampleupdate.pl \ + ldap/admin/src/scripts/exampleupdate.ldif \ + ldap/admin/src/scripts/10cleanupldapi.pl \ + ldap/admin/src/scripts/10delautodnsuffix.pl \ + ldap/admin/src/scripts/10fixrundir.pl \ + ldap/admin/src/scripts/20betxn.pl \ + ldap/admin/src/scripts/50addchainingsaslpwroles.ldif \ + ldap/admin/src/scripts/50acctusabilityplugin.ldif \ + ldap/admin/src/scripts/50automemberplugin.ldif \ + ldap/admin/src/scripts/50memberofindex.ldif \ + ldap/admin/src/scripts/50nstombstonecsn.ldif \ + ldap/admin/src/scripts/50bitstringsyntaxplugin.ldif \ + ldap/admin/src/scripts/50managedentriesplugin.ldif \ + ldap/admin/src/scripts/50memberofplugin.ldif \ + ldap/admin/src/scripts/50deliverymethodsyntaxplugin.ldif \ + ldap/admin/src/scripts/50nameuidsyntaxplugin.ldif \ + ldap/admin/src/scripts/50derefplugin.ldif \ + ldap/admin/src/scripts/50numericstringsyntaxplugin.ldif \ + ldap/admin/src/scripts/50disableurisyntaxplugin.ldif \ + ldap/admin/src/scripts/50printablestringsyntaxplugin.ldif \ + ldap/admin/src/scripts/50enhancedguidesyntaxplugin.ldif \ + ldap/admin/src/scripts/50schemareloadplugin.ldif \ + ldap/admin/src/scripts/50entryusnindex.ldif \ + ldap/admin/src/scripts/50syntaxvalidplugin.ldif \ + ldap/admin/src/scripts/50faxnumbersyntaxplugin.ldif \ + ldap/admin/src/scripts/50teletexterminalidsyntaxplugin.ldif \ + ldap/admin/src/scripts/50faxsyntaxplugin.ldif \ + ldap/admin/src/scripts/50fixNsState.pl \ + ldap/admin/src/scripts/50telexnumbersyntaxplugin.ldif \ + ldap/admin/src/scripts/50guidesyntaxplugin.ldif \ + ldap/admin/src/scripts/50targetuniqueid.ldif \ + ldap/admin/src/scripts/60removeLegacyReplication.ldif \ + ldap/admin/src/scripts/50linkedattrsplugin.ldif \ + ldap/admin/src/scripts/50usnplugin.ldif \ + ldap/admin/src/scripts/50smd5pwdstorageplugin.ldif \ + ldap/admin/src/scripts/50refintprecedence.ldif \ + ldap/admin/src/scripts/50retroclprecedence.ldif \ + ldap/admin/src/scripts/50rootdnaccesscontrolplugin.ldif \ + ldap/admin/src/scripts/50contentsync.ldif \ + ldap/admin/src/scripts/60upgradeschemafiles.pl \ + ldap/admin/src/scripts/60upgradeconfigfiles.pl \ + ldap/admin/src/scripts/70upgradefromldif.pl \ + ldap/admin/src/scripts/80upgradednformat.pl \ + ldap/admin/src/scripts/81changelog.pl \ + ldap/admin/src/scripts/82targetuniqueidindex.pl \ + ldap/admin/src/scripts/90subtreerename.pl \ + ldap/admin/src/scripts/91subtreereindex.pl \ + ldap/admin/src/scripts/50AES-pbe-plugin.ldif\ + ldap/admin/src/scripts/50updateconfig.ldif \ + ldap/admin/src/scripts/52updateAESplugin.pl \ + ldap/admin/src/scripts/dnaplugindepends.ldif \ + ldap/admin/src/scripts/91reindex.pl + +update_SCRIPTS = ldap/admin/src/scripts/exampleupdate.sh +endif + +#//////////////////////////////////////////////////////////////// +# +# Server Strings +# +#//////////////////////////////////////////////////////////////// +#------------------------ +# makstrdb +#------------------------ +if ENABLE_PERL +noinst_PROGRAMS = makstrdb + +nodist_property_DATA = ns-slapd.properties + +makstrdb_SOURCES = lib/libsi18n/makstrdb.c + +makstrdb_CPPFLAGS = $(AM_CPPFLAGS) $(DSPLUGIN_CPPFLAGS) + +#------------------------ +# ns-slapd.properties +#------------------------ +ns-slapd.properties: makstrdb + ./makstrdb + +endif + + +#//////////////////////////////////////////////////////////////// +# +# Static Server Libraries +# +#//////////////////////////////////////////////////////////////// +#------------------------ +# libavl +#------------------------ +libavl_a_SOURCES = ldap/libraries/libavl/avl.c +libavl_a_CPPFLAGS = $(AM_CPPFLAGS) $(DSPLUGIN_CPPFLAGS) + +#------------------------ +# libldaputil +#------------------------ +libldaputil_la_SOURCES = lib/ldaputil/cert.c \ + lib/ldaputil/certmap.c \ + lib/ldaputil/dbconf.c \ + lib/ldaputil/encode.c \ + lib/ldaputil/errors.c \ + lib/ldaputil/init.c \ + lib/ldaputil/ldapauth.c \ + lib/ldaputil/vtable.c + +libldaputil_la_CPPFLAGS = $(AM_CPPFLAGS) $(DSPLUGIN_CPPFLAGS) $(DSINTERNAL_CPPFLAGS) -I$(srcdir)/lib/ldaputil +libldaputil_la_LIBADD = libslapd.la $(NSS_LINK) $(NSPR_LINK) +libldaputil_la_LDFLAGS = $(AM_LDFLAGS) + +#//////////////////////////////////////////////////////////////// +# +# Dynamic Server Libraries +# +#//////////////////////////////////////////////////////////////// + +#------------------------ +# libsvrcore +#------------------------ +libsvrcore_la_SOURCES = \ + src/svrcore/src/alt.c \ + src/svrcore/src/cache.c \ + src/svrcore/src/errors.c \ + src/svrcore/src/file.c \ + src/svrcore/src/ntgetpin.c \ + src/svrcore/src/ntresource.h \ + src/svrcore/src/pin.c \ + src/svrcore/src/pk11.c \ + src/svrcore/src/std.c \ + src/svrcore/src/systemd-ask-pass.c \ + src/svrcore/src/std-systemd.c \ + src/svrcore/src/user.c + +libsvrcore_la_LDFLAGS = $(AM_LDFLAGS) +libsvrcore_la_CPPFLAGS = $(AM_CPPFLAGS) $(SVRCORE_INCLUDES) $(DSPLUGIN_CPPFLAGS) +libsvrcore_la_LIBADD = $(NSS_LINK) $(NSPR_LINK) + +#------------------------ +# libsds +#------------------------ +libsds_la_SOURCES = src/libsds/sds/core/utils.c \ + src/libsds/sds/core/crc32c.c \ + src/libsds/sds/bpt/bpt.c \ + src/libsds/sds/bpt/list.c \ + src/libsds/sds/bpt/search.c \ + src/libsds/sds/bpt/common.c \ + src/libsds/sds/bpt/map.c \ + src/libsds/sds/bpt/set.c \ + src/libsds/sds/bpt/verify.c \ + src/libsds/sds/bpt_cow/atomic.c \ + src/libsds/sds/bpt_cow/bpt_cow.c \ + src/libsds/sds/bpt_cow/delete.c \ + src/libsds/sds/bpt_cow/insert.c \ + src/libsds/sds/bpt_cow/node.c \ + src/libsds/sds/bpt_cow/search.c \ + src/libsds/sds/bpt_cow/txn.c \ + src/libsds/sds/bpt_cow/verify.c \ + src/libsds/sds/queue/queue.c \ + src/libsds/sds/queue/lqueue.c \ + src/libsds/external/csiphash/csiphash.c \ + src/libsds/sds/ht/ht.c \ + src/libsds/sds/ht/node.c \ + src/libsds/sds/ht/map.c \ + src/libsds/sds/ht/op.c \ + src/libsds/sds/ht/verify.c +if ATOMIC_QUEUE_OPERATIONS +libsds_la_SOURCES += \ + src/libsds/external/liblfds711/src/lfds711_queue_unbounded_manyproducer_manyconsumer/lfds711_queue_unbounded_manyproducer_manyconsumer_cleanup.c \ + src/libsds/external/liblfds711/src/lfds711_queue_unbounded_manyproducer_manyconsumer/lfds711_queue_unbounded_manyproducer_manyconsumer_dequeue.c \ + src/libsds/external/liblfds711/src/lfds711_queue_unbounded_manyproducer_manyconsumer/lfds711_queue_unbounded_manyproducer_manyconsumer_enqueue.c \ + src/libsds/external/liblfds711/src/lfds711_queue_unbounded_manyproducer_manyconsumer/lfds711_queue_unbounded_manyproducer_manyconsumer_init.c \ + src/libsds/external/liblfds711/src/lfds711_queue_unbounded_manyproducer_manyconsumer/lfds711_queue_unbounded_manyproducer_manyconsumer_query.c \ + src/libsds/external/liblfds711/src/lfds711_misc/lfds711_misc_internal_backoff_init.c \ + src/libsds/external/liblfds711/src/lfds711_misc/lfds711_misc_globals.c +endif + +# EDIT THESE AT THE TOP OF THE MAKE FILE!!! +libsds_la_CPPFLAGS = $(AM_CPPFLAGS) $(SDS_CPPFLAGS) +libsds_la_LDFLAGS = $(AM_LDFLAGS) $(SDS_LDFLAGS) + +if RUST_ENABLE + +noinst_LTLIBRARIES = librsds.la librslapd.la librnsslapd.la + +### Why does this exist? +# +# Both cargo and autotools are really opinionated. It's really hard to make this work. :( +# +# https://people.gnome.org/~federico/blog/librsvg-build-infrastructure.html +# https://gitlab.gnome.org/GNOME/librsvg/blob/master/Makefile.am + +### Rust datastructures + +RSDS_LIB = @abs_top_builddir@/rs/@rust_target_dir@/librsds.a + +libsds_la_LIBADD = $(RSDS_LIB) + +librsds_la_SOURCES = \ + src/libsds/Cargo.toml \ + src/libsds/sds/lib.rs \ + src/libsds/sds/tqueue.rs + +librsds_la_EXTRA = src/libsds/Cargo.lock + +@abs_top_builddir@/rs/@rust_target_dir@/librsds.a: $(librsds_la_SOURCES) + RUST_BACKTRACE=1 RUSTC_BOOTSTRAP=1 \ + CARGO_TARGET_DIR=$(abs_top_builddir)/rs \ + cargo rustc $(RUST_OFFLINE) --manifest-path=$(srcdir)/src/libsds/Cargo.toml \ + $(CARGO_FLAGS) --verbose -- $(RUSTC_FLAGS) + +### Rust lib slapd components +RSLAPD_LIB = @abs_top_builddir@/rs/@rust_target_dir@/librslapd.a + +librslapd_la_SOURCES = \ + src/librslapd/Cargo.toml \ + src/librslapd/build.rs \ + src/librslapd/src/lib.rs + +librslapd_la_EXTRA = src/librslapd/Cargo.lock + +@abs_top_builddir@/rs/@rust_target_dir@/librslapd.a: $(librslapd_la_SOURCES) + RUST_BACKTRACE=1 RUSTC_BOOTSTRAP=1 \ + CARGO_TARGET_DIR=$(abs_top_builddir)/rs \ + SLAPD_HEADER_DIR=$(abs_top_builddir)/ \ + cargo rustc $(RUST_OFFLINE) --manifest-path=$(srcdir)/src/librslapd/Cargo.toml \ + $(CARGO_FLAGS) --verbose -- $(RUSTC_FLAGS) + +# The header needs the lib build first. +rust-slapi-private.h: @abs_top_builddir@/rs/@rust_target_dir@/librslapd.a + +# Build rust ns-slapd components as a library. +RNSSLAPD_LIB = @abs_top_builddir@/rs/@rust_target_dir@/librnsslapd.a + +librnsslapd_la_SOURCES = \ + src/librnsslapd/Cargo.toml \ + src/librnsslapd/build.rs \ + src/librnsslapd/src/lib.rs + +librnsslapd_la_EXTRA = src/librnsslapd/Cargo.lock + +@abs_top_builddir@/rs/@rust_target_dir@/librnsslapd.a: $(librnsslapd_la_SOURCES) + RUST_BACKTRACE=1 RUSTC_BOOTSTRAP=1 \ + CARGO_TARGET_DIR=$(abs_top_builddir)/rs \ + SLAPD_HEADER_DIR=$(abs_top_builddir)/ \ + cargo rustc $(RUST_OFFLINE) --manifest-path=$(srcdir)/src/librnsslapd/Cargo.toml \ + $(CARGO_FLAGS) --verbose -- $(RUSTC_FLAGS) + +# The header needs the lib build first. +rust-nsslapd-private.h: @abs_top_builddir@/rs/@rust_target_dir@/librnsslapd.a + +EXTRA_DIST = $(librsds_la_SOURCES) $(librsds_la_EXTRA) \ + $(librslapd_la_SOURCES) $(librslapd_la_EXTRA) \ + $(librnsslapd_la_SOURCES) $(librnsslapd_la_EXTRA) + +## Run rust tests +# cargo does not support offline tests :( +if RUST_ENABLE_OFFLINE +else +check-local: + RUST_BACKTRACE=1 RUSTC_BOOTSTRAP=1 \ + CARGO_TARGET_DIR=$(abs_top_builddir)/rs \ + cargo test $(RUST_OFFLINE) --manifest-path=$(srcdir)/src/libsds/Cargo.toml + RUST_BACKTRACE=1 RUSTC_BOOTSTRAP=1 \ + CARGO_TARGET_DIR=$(abs_top_builddir)/rs \ + SLAPD_HEADER_DIR=$(abs_top_builddir)/ \ + cargo test $(RUST_OFFLINE) --manifest-path=$(srcdir)/src/librslapd/Cargo.toml + RUST_BACKTRACE=1 RUSTC_BOOTSTRAP=1 \ + CARGO_TARGET_DIR=$(abs_top_builddir)/rs \ + SLAPD_HEADER_DIR=$(abs_top_builddir)/ \ + cargo test $(RUST_OFFLINE) --manifest-path=$(srcdir)/src/librnsslapd/Cargo.toml +endif + +else +# Just build the tqueue in C. +libsds_la_SOURCES += \ + src/libsds/sds/queue/tqueue.c +endif + +#------------------------ +# libns-dshttpd +#------------------------ +libns_dshttpd_la_SOURCES = lib/libaccess/access_plhash.cpp \ + lib/libaccess/acl.tab.cpp \ + lib/libaccess/acl.yy.cpp \ + lib/libaccess/aclcache.cpp \ + lib/libaccess/aclerror.cpp \ + lib/libaccess/acleval.cpp \ + lib/libaccess/aclflush.cpp \ + lib/libaccess/aclspace.cpp \ + lib/libaccess/acltools.cpp \ + lib/libaccess/aclutil.cpp \ + lib/libaccess/authdb.cpp \ + lib/libaccess/lasdns.cpp \ + lib/libaccess/lasgroup.cpp \ + lib/libaccess/lasip.cpp \ + lib/libaccess/lastod.cpp \ + lib/libaccess/lasuser.cpp \ + lib/libaccess/method.cpp \ + lib/libaccess/nseframe.cpp \ + lib/libaccess/nsautherr.cpp \ + lib/libaccess/oneeval.cpp \ + lib/libaccess/register.cpp \ + lib/libaccess/symbols.cpp \ + lib/libaccess/usi.cpp \ + lib/libaccess/usrcache.cpp \ + lib/libadmin/error.c \ + lib/libadmin/template.c \ + lib/libadmin/util.c \ + lib/base/crit.cpp \ + lib/base/dnsdmain.cpp \ + lib/base/ereport.cpp \ + lib/base/file.cpp \ + lib/base/fsmutex.cpp \ + lib/base/nscperror.c \ + lib/base/plist.cpp \ + lib/base/pool.cpp \ + lib/base/shexp.cpp \ + lib/base/system.cpp \ + lib/base/systhr.cpp \ + lib/base/util.cpp \ + lib/libsi18n/getstrprop.c \ + lib/libsi18n/reshash.c \ + lib/libsi18n/txtfile.c + +libns_dshttpd_la_CPPFLAGS = -I$(srcdir)/include/base $(AM_CPPFLAGS) $(DSPLUGIN_CPPFLAGS) -I$(srcdir)/lib/ldaputil +libns_dshttpd_la_LIBADD = libslapd.la libldaputil.la $(LDAPSDK_LINK) $(SASL_LINK) $(NSS_LINK) $(NSPR_LINK) +# Mark that this is a per version library. +libns_dshttpd_la_LDFLAGS = -release @PACKAGE_VERSION@ + +#------------------------ +# libslapd +#------------------------ +libslapd_la_SOURCES = ldap/servers/slapd/add.c \ + ldap/servers/slapd/agtmmap.c \ + ldap/servers/slapd/apibroker.c \ + ldap/servers/slapd/attr.c \ + ldap/servers/slapd/attrlist.c \ + ldap/servers/slapd/attrsyntax.c \ + ldap/servers/slapd/auditlog.c \ + ldap/servers/slapd/ava.c \ + ldap/servers/slapd/backend.c \ + ldap/servers/slapd/backend_manager.c \ + ldap/servers/slapd/bitset.c \ + ldap/servers/slapd/bulk_import.c \ + ldap/servers/slapd/charray.c \ + ldap/servers/slapd/ch_malloc.c \ + ldap/servers/slapd/computed.c \ + ldap/servers/slapd/control.c \ + ldap/servers/slapd/configdse.c \ + ldap/servers/slapd/counters.c \ + ldap/servers/slapd/csn.c \ + ldap/servers/slapd/csngen.c \ + ldap/servers/slapd/csnset.c \ + ldap/servers/slapd/defbackend.c \ + ldap/servers/slapd/delete.c \ + ldap/servers/slapd/dl.c \ + ldap/servers/slapd/dn.c \ + ldap/servers/slapd/dse.c \ + ldap/servers/slapd/dynalib.c \ + ldap/servers/slapd/entry.c \ + ldap/servers/slapd/entrywsi.c \ + ldap/servers/slapd/errormap.c \ + ldap/servers/slapd/eventq.c \ + ldap/servers/slapd/factory.c \ + ldap/servers/slapd/features.c \ + ldap/servers/slapd/fileio.c \ + ldap/servers/slapd/filter.c \ + ldap/servers/slapd/filtercmp.c \ + ldap/servers/slapd/filterentry.c \ + ldap/servers/slapd/generation.c \ + ldap/servers/slapd/getfilelist.c \ + ldap/servers/slapd/ldaputil.c \ + ldap/servers/slapd/lenstr.c \ + ldap/servers/slapd/libglobs.c \ + ldap/servers/slapd/localhost.c \ + ldap/servers/slapd/log.c \ + ldap/servers/slapd/mapping_tree.c \ + ldap/servers/slapd/match.c \ + ldap/servers/slapd/modify.c \ + ldap/servers/slapd/modrdn.c \ + ldap/servers/slapd/modutil.c \ + ldap/servers/slapd/object.c \ + ldap/servers/slapd/objset.c \ + ldap/servers/slapd/operation.c \ + ldap/servers/slapd/opshared.c \ + ldap/servers/slapd/pagedresults.c \ + ldap/servers/slapd/pblock.c \ + ldap/servers/slapd/plugin.c \ + ldap/servers/slapd/plugin_acl.c \ + ldap/servers/slapd/plugin_mmr.c \ + ldap/servers/slapd/plugin_internal_op.c \ + ldap/servers/slapd/plugin_mr.c \ + ldap/servers/slapd/plugin_role.c \ + ldap/servers/slapd/plugin_syntax.c \ + ldap/servers/slapd/protect_db.c \ + ldap/servers/slapd/proxyauth.c \ + ldap/servers/slapd/pw.c \ + ldap/servers/slapd/pw_retry.c \ + ldap/servers/slapd/rdn.c \ + ldap/servers/slapd/referral.c \ + ldap/servers/slapd/regex.c \ + ldap/servers/slapd/resourcelimit.c \ + ldap/servers/slapd/result.c \ + ldap/servers/slapd/sasl_map.c \ + ldap/servers/slapd/schema.c \ + ldap/servers/slapd/schemaparse.c \ + ldap/servers/slapd/security_wrappers.c \ + ldap/servers/slapd/slapd_plhash.c \ + ldap/servers/slapd/slapi_counter.c \ + ldap/servers/slapd/slapi2nspr.c \ + ldap/servers/slapd/snmp_collator.c \ + ldap/servers/slapd/sort.c \ + ldap/servers/slapd/ssl.c \ + ldap/servers/slapd/str2filter.c \ + ldap/servers/slapd/subentry.c \ + ldap/servers/slapd/task.c \ + ldap/servers/slapd/time.c \ + ldap/servers/slapd/thread_data.c \ + ldap/servers/slapd/uniqueid.c \ + ldap/servers/slapd/uniqueidgen.c \ + ldap/servers/slapd/utf8.c \ + ldap/servers/slapd/utf8compare.c \ + ldap/servers/slapd/util.c \ + ldap/servers/slapd/uuid.c \ + ldap/servers/slapd/value.c \ + ldap/servers/slapd/valueset.c \ + ldap/servers/slapd/vattr.c \ + ldap/servers/slapd/slapi_pal.c \ + $(libavl_a_SOURCES) + +libslapd_la_CPPFLAGS = $(AM_CPPFLAGS) $(DSPLUGIN_CPPFLAGS) $(SASL_CFLAGS) @db_inc@ $(KERBEROS_CFLAGS) $(PCRE_CFLAGS) $(SDS_CPPFLAGS) $(SVRCORE_INCLUDES) +libslapd_la_LIBADD = $(LDAPSDK_LINK) $(SASL_LINK) $(NSS_LINK) $(NSPR_LINK) $(KERBEROS_LIBS) $(PCRE_LIBS) $(THREADLIB) $(SYSTEMD_LIBS) libsds.la libsvrcore.la +libslapd_la_LDFLAGS = $(AM_LDFLAGS) $(SLAPD_LDFLAGS) + +if RUST_ENABLE +libslapd_la_LIBADD += $(RSLAPD_LIB) +libslapd_la_LDFLAGS += -lssl -lcrypto +endif + + + +#//////////////////////////////////////////////////////////////// +# +# Plugins +# +#//////////////////////////////////////////////////////////////// +#------------------------ +# libback-ldbm +#------------------------ +libback_ldbm_la_SOURCES = ldap/servers/slapd/back-ldbm/ancestorid.c \ + ldap/servers/slapd/back-ldbm/archive.c \ + ldap/servers/slapd/back-ldbm/backentry.c \ + ldap/servers/slapd/back-ldbm/cache.c \ + ldap/servers/slapd/back-ldbm/cleanup.c \ + ldap/servers/slapd/back-ldbm/close.c \ + ldap/servers/slapd/back-ldbm/dblayer.c \ + ldap/servers/slapd/back-ldbm/dbsize.c \ + ldap/servers/slapd/back-ldbm/dn2entry.c \ + ldap/servers/slapd/back-ldbm/entrystore.c \ + ldap/servers/slapd/back-ldbm/filterindex.c \ + ldap/servers/slapd/back-ldbm/findentry.c \ + ldap/servers/slapd/back-ldbm/haschildren.c \ + ldap/servers/slapd/back-ldbm/id2entry.c \ + ldap/servers/slapd/back-ldbm/idl.c \ + ldap/servers/slapd/back-ldbm/idl_shim.c \ + ldap/servers/slapd/back-ldbm/idl_new.c \ + ldap/servers/slapd/back-ldbm/idl_set.c \ + ldap/servers/slapd/back-ldbm/idl_common.c \ + ldap/servers/slapd/back-ldbm/import.c \ + ldap/servers/slapd/back-ldbm/index.c \ + ldap/servers/slapd/back-ldbm/init.c \ + ldap/servers/slapd/back-ldbm/instance.c \ + ldap/servers/slapd/back-ldbm/ldbm_abandon.c \ + ldap/servers/slapd/back-ldbm/ldbm_add.c \ + ldap/servers/slapd/back-ldbm/ldbm_attr.c \ + ldap/servers/slapd/back-ldbm/ldbm_attrcrypt.c \ + ldap/servers/slapd/back-ldbm/ldbm_attrcrypt_config.c \ + ldap/servers/slapd/back-ldbm/ldbm_bind.c \ + ldap/servers/slapd/back-ldbm/ldbm_compare.c \ + ldap/servers/slapd/back-ldbm/ldbm_config.c \ + ldap/servers/slapd/back-ldbm/ldbm_delete.c \ + ldap/servers/slapd/back-ldbm/ldbm_entryrdn.c \ + ldap/servers/slapd/back-ldbm/ldbm_index_config.c \ + ldap/servers/slapd/back-ldbm/ldbm_instance_config.c \ + ldap/servers/slapd/back-ldbm/ldbm_modify.c \ + ldap/servers/slapd/back-ldbm/ldbm_modrdn.c \ + ldap/servers/slapd/back-ldbm/ldbm_search.c \ + ldap/servers/slapd/back-ldbm/ldbm_unbind.c \ + ldap/servers/slapd/back-ldbm/ldbm_usn.c \ + ldap/servers/slapd/back-ldbm/ldif2ldbm.c \ + ldap/servers/slapd/back-ldbm/dbverify.c \ + ldap/servers/slapd/back-ldbm/matchrule.c \ + ldap/servers/slapd/back-ldbm/misc.c \ + ldap/servers/slapd/back-ldbm/nextid.c \ + ldap/servers/slapd/back-ldbm/parents.c \ + ldap/servers/slapd/back-ldbm/perfctrs.c \ + ldap/servers/slapd/back-ldbm/rmdb.c \ + ldap/servers/slapd/back-ldbm/seq.c \ + ldap/servers/slapd/back-ldbm/sort.c \ + ldap/servers/slapd/back-ldbm/start.c \ + ldap/servers/slapd/back-ldbm/uniqueid2entry.c \ + ldap/servers/slapd/back-ldbm/vlv.c \ + ldap/servers/slapd/back-ldbm/vlv_key.c \ + ldap/servers/slapd/back-ldbm/vlv_srch.c \ + ldap/servers/slapd/back-ldbm/db-bdb/bdb_config.c \ + ldap/servers/slapd/back-ldbm/db-bdb/bdb_instance_config.c \ + ldap/servers/slapd/back-ldbm/db-bdb/bdb_verify.c \ + ldap/servers/slapd/back-ldbm/db-bdb/bdb_layer.c \ + ldap/servers/slapd/back-ldbm/db-bdb/bdb_misc.c \ + ldap/servers/slapd/back-ldbm/db-bdb/bdb_upgrade.c \ + ldap/servers/slapd/back-ldbm/db-bdb/bdb_version.c \ + ldap/servers/slapd/back-ldbm/db-bdb/bdb_monitor.c \ + ldap/servers/slapd/back-ldbm/db-bdb/bdb_ldif2db.c \ + ldap/servers/slapd/back-ldbm/db-bdb/bdb_import.c \ + ldap/servers/slapd/back-ldbm/db-bdb/bdb_import_threads.c + + +libback_ldbm_la_CPPFLAGS = $(AM_CPPFLAGS) $(DSPLUGIN_CPPFLAGS) @db_inc@ +libback_ldbm_la_DEPENDENCIES = libslapd.la +libback_ldbm_la_LIBADD = libslapd.la $(DB_LINK) $(LDAPSDK_LINK) $(NSPR_LINK) +libback_ldbm_la_LDFLAGS = -avoid-version + +#------------------------ +# libacctpolicy-plugin +#------------------------ +libacctpolicy_plugin_la_SOURCES = ldap/servers/plugins/acctpolicy/acct_config.c \ + ldap/servers/plugins/acctpolicy/acct_init.c \ + ldap/servers/plugins/acctpolicy/acct_plugin.c \ + ldap/servers/plugins/acctpolicy/acct_util.c + +libacctpolicy_plugin_la_CPPFLAGS = $(AM_CPPFLAGS) $(DSPLUGIN_CPPFLAGS) +libacctpolicy_plugin_la_LIBADD = libslapd.la $(NSPR_LINK) +libacctpolicy_plugin_la_DEPENDENCIES = libslapd.la +libacctpolicy_plugin_la_LDFLAGS = -avoid-version + +#------------------------ +# libacctusability-plugin +#------------------------ +libacctusability_plugin_la_SOURCES = ldap/servers/plugins/acct_usability/acct_usability.c + +libacctusability_plugin_la_CPPFLAGS = $(AM_CPPFLAGS) $(DSPLUGIN_CPPFLAGS) +libacctusability_plugin_la_LIBADD = libslapd.la $(NSPR_LINK) +libacctusability_plugin_la_DEPENDENCIES = libslapd.la +libacctusability_plugin_la_LDFLAGS = -avoid-version + +#------------------------ +# libacl-plugin +#------------------------ +libacl_plugin_la_SOURCES = ldap/servers/plugins/acl/acl.c \ + ldap/servers/plugins/acl/acl_ext.c \ + ldap/servers/plugins/acl/aclanom.c \ + ldap/servers/plugins/acl/acleffectiverights.c \ + ldap/servers/plugins/acl/aclgroup.c \ + ldap/servers/plugins/acl/aclinit.c \ + ldap/servers/plugins/acl/acllas.c \ + ldap/servers/plugins/acl/acllist.c \ + ldap/servers/plugins/acl/aclparse.c \ + ldap/servers/plugins/acl/aclplugin.c \ + ldap/servers/plugins/acl/aclutil.c + +libacl_plugin_la_CPPFLAGS = -I$(srcdir)/include/libaccess $(AM_CPPFLAGS) $(DSPLUGIN_CPPFLAGS) +libacl_plugin_la_DEPENDENCIES = libslapd.la libns-dshttpd.la +libacl_plugin_la_LIBADD = libslapd.la libns-dshttpd.la $(LDAPSDK_LINK) $(NSPR_LINK) $(LIBCSTD) $(LIBCRUN) +libacl_plugin_la_LDFLAGS = -avoid-version +# libacl_plugin_la_LINK = $(CXXLINK) -avoid-version + +#------------------------ +# libaddn-plugin +#------------------------ +libaddn_plugin_la_SOURCES = ldap/servers/plugins/addn/addn.c + +libaddn_plugin_la_CPPFLAGS = $(AM_CPPFLAGS) $(DSPLUGIN_CPPFLAGS) +libaddn_plugin_la_LIBADD = libslapd.la $(NSPR_LINK) +libaddn_plugin_la_DEPENDENCIES = libslapd.la +libaddn_plugin_la_LDFLAGS = -avoid-version + +#------------------------ +# librootdn-access-plugin +#------------------------ +# +librootdn_access_plugin_la_SOURCES = ldap/servers/plugins/rootdn_access/rootdn_access.c + +librootdn_access_plugin_la_CPPFLAGS = $(AM_CPPFLAGS) $(DSPLUGIN_CPPFLAGS) +librootdn_access_plugin_la_LIBADD = libslapd.la $(NSPR_LINK) +librootdn_access_plugin_la_DEPENDENCIES = libslapd.la +librootdn_access_plugin_la_LDFLAGS = -avoid-version + + +#------------------------ +# libautomember-plugin +#------------------------ +libautomember_plugin_la_SOURCES = ldap/servers/plugins/automember/automember.c + +libautomember_plugin_la_CPPFLAGS = $(AM_CPPFLAGS) $(DSPLUGIN_CPPFLAGS) +libautomember_plugin_la_LIBADD = libslapd.la $(NSPR_LINK) +libautomember_plugin_la_DEPENDENCIES = libslapd.la +libautomember_plugin_la_LDFLAGS = -avoid-version + +#------------------------ +# libattr-unique-plugin +#------------------------ +libattr_unique_plugin_la_SOURCES = ldap/servers/plugins/uiduniq/7bit.c \ + ldap/servers/plugins/uiduniq/uid.c \ + ldap/servers/plugins/uiduniq/utils.c + +libattr_unique_plugin_la_CPPFLAGS = $(AM_CPPFLAGS) $(DSPLUGIN_CPPFLAGS) +libattr_unique_plugin_la_LIBADD = libslapd.la $(NSPR_LINK) +libattr_unique_plugin_la_DEPENDENCIES = libslapd.la +libattr_unique_plugin_la_LDFLAGS = -avoid-version + +#------------------------ +# libbitwise-plugin +#------------------------ +libbitwise_plugin_la_SOURCES = ldap/servers/plugins/bitwise/bitwise.c + +libbitwise_plugin_la_CPPFLAGS = $(AM_CPPFLAGS) $(DSPLUGIN_CPPFLAGS) +libbitwise_plugin_la_LIBADD = libslapd.la +libbitwise_plugin_la_DEPENDENCIES = libslapd.la +libbitwise_plugin_la_LDFLAGS = -avoid-version + +#------------------------ +# libchainingdb-plugin +#------------------------ +libchainingdb_plugin_la_SOURCES = ldap/servers/plugins/chainingdb/cb_abandon.c \ + ldap/servers/plugins/chainingdb/cb_acl.c \ + ldap/servers/plugins/chainingdb/cb_add.c \ + ldap/servers/plugins/chainingdb/cb_bind.c \ + ldap/servers/plugins/chainingdb/cb_cleanup.c \ + ldap/servers/plugins/chainingdb/cb_close.c \ + ldap/servers/plugins/chainingdb/cb_compare.c \ + ldap/servers/plugins/chainingdb/cb_config.c \ + ldap/servers/plugins/chainingdb/cb_conn_stateless.c \ + ldap/servers/plugins/chainingdb/cb_controls.c \ + ldap/servers/plugins/chainingdb/cb_debug.c \ + ldap/servers/plugins/chainingdb/cb_delete.c \ + ldap/servers/plugins/chainingdb/cb_init.c \ + ldap/servers/plugins/chainingdb/cb_instance.c \ + ldap/servers/plugins/chainingdb/cb_modify.c \ + ldap/servers/plugins/chainingdb/cb_modrdn.c \ + ldap/servers/plugins/chainingdb/cb_monitor.c \ + ldap/servers/plugins/chainingdb/cb_schema.c \ + ldap/servers/plugins/chainingdb/cb_search.c \ + ldap/servers/plugins/chainingdb/cb_size.c \ + ldap/servers/plugins/chainingdb/cb_start.c \ + ldap/servers/plugins/chainingdb/cb_temp.c \ + ldap/servers/plugins/chainingdb/cb_test.c \ + ldap/servers/plugins/chainingdb/cb_unbind.c \ + ldap/servers/plugins/chainingdb/cb_utils.c + +libchainingdb_plugin_la_CPPFLAGS = $(AM_CPPFLAGS) $(DSPLUGIN_CPPFLAGS) +libchainingdb_plugin_la_LIBADD = libslapd.la $(LDAPSDK_LINK) $(NSPR_LINK) +libchainingdb_plugin_la_DEPENDENCIES = libslapd.la +libchainingdb_plugin_la_LDFLAGS = -avoid-version + +#------------------------ +# libcollation-plugin +#------------------------ +libcollation_plugin_la_SOURCES = ldap/servers/plugins/collation/collate.c \ + ldap/servers/plugins/collation/config.c \ + ldap/servers/plugins/collation/orfilter.c + +libcollation_plugin_la_CPPFLAGS = $(AM_CPPFLAGS) $(DSPLUGIN_CPPFLAGS) $(ICU_CFLAGS) +libcollation_plugin_la_LIBADD = libslapd.la $(LDAPSDK_LINK) $(NSPR_LINK) $(ICU_LIBS) $(LIBCSTD) $(LIBCRUN) +libcollation_plugin_la_DEPENDENCIES = libslapd.la +libcollation_plugin_la_LDFLAGS = -avoid-version +# libcollation_plugin_la_LINK = $(CXXLINK) -avoid-version + +#------------------------ +# libcos-plugin +#------------------------ +libcos_plugin_la_SOURCES = ldap/servers/plugins/cos/cos.c \ + ldap/servers/plugins/cos/cos_cache.c + +libcos_plugin_la_CPPFLAGS = $(AM_CPPFLAGS) $(DSPLUGIN_CPPFLAGS) +libcos_plugin_la_LIBADD = libslapd.la $(LDAPSDK_LINK) $(NSPR_LINK) +libcos_plugin_la_DEPENDENCIES = libslapd.la +libcos_plugin_la_LDFLAGS = -avoid-version + +#------------------------ +# libderef-plugin +#----------------------- +libderef_plugin_la_SOURCES = ldap/servers/plugins/deref/deref.c + +libderef_plugin_la_CPPFLAGS = $(AM_CPPFLAGS) $(DSPLUGIN_CPPFLAGS) +libderef_plugin_la_LIBADD = libslapd.la $(LDAPSDK_LINK) $(NSPR_LINK) +libderef_plugin_la_DEPENDENCIES = libslapd.la +libderef_plugin_la_LDFLAGS = -avoid-version + +#------------------------ +# libpbe-plugin +#----------------------- +libpbe_plugin_la_SOURCES = ldap/servers/plugins/rever/pbe.c \ + ldap/servers/plugins/rever/rever.c + +libpbe_plugin_la_CPPFLAGS = $(AM_CPPFLAGS) $(DSPLUGIN_CPPFLAGS) $(SVRCORE_INCLUDES) +libpbe_plugin_la_LIBADD = libslapd.la libsvrcore.la $(NSS_LINK) +libpbe_plugin_la_DEPENDENCIES = libslapd.la +libpbe_plugin_la_LDFLAGS = -avoid-version + +#------------------------ +# libdistrib-plugin +#------------------------ +libdistrib_plugin_la_SOURCES = ldap/servers/plugins/distrib/distrib.c + +libdistrib_plugin_la_CPPFLAGS = $(AM_CPPFLAGS) $(DSPLUGIN_CPPFLAGS) +libdistrib_plugin_la_LIBADD = libslapd.la +libdistrib_plugin_la_DEPENDENCIES = libslapd.la +libdistrib_plugin_la_LDFLAGS = -avoid-version + +#------------------------ +# libdna-plugin +#------------------------ +libdna_plugin_la_SOURCES = ldap/servers/plugins/dna/dna.c + +libdna_plugin_la_CPPFLAGS = $(AM_CPPFLAGS) $(DSPLUGIN_CPPFLAGS) +libdna_plugin_la_LIBADD = libslapd.la $(LDAPSDK_LINK) $(NSPR_LINK) +libdna_plugin_la_DEPENDENCIES = libslapd.la +libdna_plugin_la_LDFLAGS = -avoid-version + +#------------------------ +# libhttp-client-plugin +#------------------------ +libhttp_client_plugin_la_SOURCES = ldap/servers/plugins/http/http_client.c \ + ldap/servers/plugins/http/http_impl.c + +libhttp_client_plugin_la_CPPFLAGS = $(AM_CPPFLAGS) $(DSPLUGIN_CPPFLAGS) +libhttp_client_plugin_la_LIBADD = libslapd.la $(NSS_LINK) $(NSPR_LINK) +libhttp_client_plugin_la_DEPENDENCIES = libslapd.la +libhttp_client_plugin_la_LDFLAGS = -avoid-version + +#------------------------ +# liblinkedattrs-plugin +#------------------------ +liblinkedattrs_plugin_la_SOURCES = ldap/servers/plugins/linkedattrs/fixup_task.c \ + ldap/servers/plugins/linkedattrs/linked_attrs.c + +liblinkedattrs_plugin_la_CPPFLAGS = $(AM_CPPFLAGS) $(DSPLUGIN_CPPFLAGS) +liblinkedattrs_plugin_la_LIBADD = libslapd.la $(NSPR_LINK) +liblinkedattrs_plugin_la_DEPENDENCIES = libslapd.la +liblinkedattrs_plugin_la_LDFLAGS = -avoid-version + +#------------------------ +# libmanagedentries-plugin +#------------------------ +libmanagedentries_plugin_la_SOURCES = ldap/servers/plugins/mep/mep.c + +libmanagedentries_plugin_la_CPPFLAGS = $(AM_CPPFLAGS) $(DSPLUGIN_CPPFLAGS) +libmanagedentries_plugin_la_LIBADD = libslapd.la $(NSPR_LINK) +libmanagedentries_plugin_la_DEPENDENCIES = libslapd.la +libmanagedentries_plugin_la_LDFLAGS = -avoid-version + +#------------------------ +# libmemberof-plugin +#------------------------ +libmemberof_plugin_la_SOURCES= ldap/servers/plugins/memberof/memberof.c \ + ldap/servers/plugins/memberof/memberof_config.c + +libmemberof_plugin_la_CPPFLAGS = $(AM_CPPFLAGS) $(DSPLUGIN_CPPFLAGS) +libmemberof_plugin_la_LIBADD = libslapd.la $(LDAPSDK_LINK) $(NSPR_LINK) +libmemberof_plugin_la_DEPENDENCIES = libslapd.la +libmemberof_plugin_la_LDFLAGS = -avoid-version + +#------------------------ +# libpam-passthru-plugin +#------------------------ +libpam_passthru_plugin_la_SOURCES = ldap/servers/plugins/pam_passthru/pam_ptconfig.c \ + ldap/servers/plugins/pam_passthru/pam_ptdebug.c \ + ldap/servers/plugins/pam_passthru/pam_ptimpl.c \ + ldap/servers/plugins/pam_passthru/pam_ptpreop.c + +libpam_passthru_plugin_la_CPPFLAGS = $(AM_CPPFLAGS) $(DSPLUGIN_CPPFLAGS) +libpam_passthru_plugin_la_LIBADD = libslapd.la $(LDAPSDK_LINK) $(NSPR_LINK) $(PAM_LINK) +libpam_passthru_plugin_la_DEPENDENCIES = libslapd.la +libpam_passthru_plugin_la_LDFLAGS = -avoid-version + +#------------------------ +# libpassthru-plugin +#------------------------ +libpassthru_plugin_la_SOURCES = ldap/servers/plugins/passthru/ptbind.c \ + ldap/servers/plugins/passthru/ptconfig.c \ + ldap/servers/plugins/passthru/ptconn.c \ + ldap/servers/plugins/passthru/ptdebug.c \ + ldap/servers/plugins/passthru/ptpreop.c \ + ldap/servers/plugins/passthru/ptutil.c + +libpassthru_plugin_la_CPPFLAGS = $(AM_CPPFLAGS) $(DSPLUGIN_CPPFLAGS) +libpassthru_plugin_la_LIBADD = libslapd.la $(LDAPSDK_LINK) $(NSPR_LINK) +libpassthru_plugin_la_DEPENDENCIES = libslapd.la +libpassthru_plugin_la_LDFLAGS = -avoid-version + +#------------------------ +# libposix-winsync-plugin +#------------------------ +libposix_winsync_plugin_la_SOURCES = ldap/servers/plugins/posix-winsync/posix-winsync.c \ + ldap/servers/plugins/posix-winsync/posix-group-func.c \ + ldap/servers/plugins/posix-winsync/posix-group-task.c \ + ldap/servers/plugins/posix-winsync/posix-winsync-config.c + +libposix_winsync_plugin_la_CPPFLAGS = $(AM_CPPFLAGS) $(DSPLUGIN_CPPFLAGS) -DWINSYNC_TEST_POSIX \ + -I$(srcdir)/ldap/servers/plugins/replication +libposix_winsync_plugin_la_LIBADD = libslapd.la $(LDAPSDK_LINK) $(NSPR_LINK) +libposix_winsync_plugin_la_DEPENDENCIES = libslapd.la +libposix_winsync_plugin_la_LDFLAGS = -avoid-version + +#------------------------ +# libpresence-plugin +#------------------------ +libpresence_plugin_la_SOURCES = ldap/servers/plugins/presence/presence.c + +libpresence_plugin_la_CPPFLAGS = -I$(srcdir)/ldap/servers/plugins/http $(AM_CPPFLAGS) $(DSPLUGIN_CPPFLAGS) +libpresence_plugin_la_LIBADD = libslapd.la +libpresence_plugin_la_DEPENDENCIES = libslapd.la +libpresence_plugin_la_LDFLAGS = -avoid-version + +#------------------------ +# libpwdstorage-plugin +#------------------------ +libpwdstorage_plugin_la_SOURCES = ldap/servers/plugins/pwdstorage/clear_pwd.c \ + ldap/servers/plugins/pwdstorage/crypt_pwd.c \ + ldap/servers/plugins/pwdstorage/md5_pwd.c \ + ldap/servers/plugins/pwdstorage/md5c.c \ + ldap/servers/plugins/pwdstorage/ns-mta-md5_pwd.c \ + ldap/servers/plugins/pwdstorage/pwd_init.c \ + ldap/servers/plugins/pwdstorage/pwd_util.c \ + ldap/servers/plugins/pwdstorage/sha_pwd.c \ + ldap/servers/plugins/pwdstorage/smd5_pwd.c \ + ldap/servers/plugins/pwdstorage/ssha_pwd.c \ + ldap/servers/plugins/pwdstorage/pbkdf2_pwd.c + +libpwdstorage_plugin_la_CPPFLAGS = $(AM_CPPFLAGS) $(DSPLUGIN_CPPFLAGS) +libpwdstorage_plugin_la_LIBADD = libslapd.la $(NSS_LINK) $(NSPR_LINK) $(LIBCRYPT) +libpwdstorage_plugin_la_DEPENDENCIES = libslapd.la +libpwdstorage_plugin_la_LDFLAGS = -avoid-version + +#------------------------ +# libcontentsync-plugin +#------------------------ +libcontentsync_plugin_la_SOURCES = ldap/servers/plugins/sync/sync_init.c \ + ldap/servers/plugins/sync/sync_util.c \ + ldap/servers/plugins/sync/sync_refresh.c \ + ldap/servers/plugins/sync/sync_persist.c + +libcontentsync_plugin_la_CPPFLAGS = $(AM_CPPFLAGS) $(DSPLUGIN_CPPFLAGS) +libcontentsync_plugin_la_LIBADD = libslapd.la $(NSS_LINK) $(NSPR_LINK) $(LIBCRYPT) +libcontentsync_plugin_la_DEPENDENCIES = libslapd.la +libcontentsync_plugin_la_LDFLAGS = -avoid-version + +#------------------------ +# libreferint-plugin +#------------------------ +libreferint_plugin_la_SOURCES = ldap/servers/plugins/referint/referint.c + +libreferint_plugin_la_CPPFLAGS = $(AM_CPPFLAGS) $(DSPLUGIN_CPPFLAGS) +libreferint_plugin_la_LIBADD = libslapd.la $(LDAPSDK_LINK) $(NSPR_LINK) +libreferint_plugin_la_DEPENDENCIES = libslapd.la +libreferint_plugin_la_LDFLAGS = -avoid-version + +#------------------------ +# libreplication-plugin +#------------------------ +libreplication_plugin_la_SOURCES = ldap/servers/plugins/replication/cl5_api.c \ + ldap/servers/plugins/replication/cl5_clcache.c \ + ldap/servers/plugins/replication/cl5_config.c \ + ldap/servers/plugins/replication/cl5_init.c \ + ldap/servers/plugins/replication/cl_crypt.c \ + ldap/servers/plugins/replication/csnpl.c \ + ldap/servers/plugins/replication/llist.c \ + ldap/servers/plugins/replication/repl_connext.c \ + ldap/servers/plugins/replication/repl_controls.c \ + ldap/servers/plugins/replication/repl_ext.c \ + ldap/servers/plugins/replication/repl_extop.c \ + ldap/servers/plugins/replication/repl_globals.c \ + ldap/servers/plugins/replication/repl_opext.c \ + ldap/servers/plugins/replication/repl_session_plugin.c \ + ldap/servers/plugins/replication/repl5_agmt.c \ + ldap/servers/plugins/replication/repl5_agmtlist.c \ + ldap/servers/plugins/replication/repl5_backoff.c \ + ldap/servers/plugins/replication/repl5_connection.c \ + ldap/servers/plugins/replication/repl5_inc_protocol.c \ + ldap/servers/plugins/replication/repl5_init.c \ + ldap/servers/plugins/replication/repl5_mtnode_ext.c \ + ldap/servers/plugins/replication/repl5_plugins.c \ + ldap/servers/plugins/replication/repl5_protocol.c \ + ldap/servers/plugins/replication/repl5_protocol_util.c \ + ldap/servers/plugins/replication/repl5_replica.c \ + ldap/servers/plugins/replication/repl5_replica_config.c \ + ldap/servers/plugins/replication/repl5_replica_dnhash.c \ + ldap/servers/plugins/replication/repl5_replica_hash.c \ + ldap/servers/plugins/replication/repl5_ruv.c \ + ldap/servers/plugins/replication/repl5_schedule.c \ + ldap/servers/plugins/replication/repl5_tot_protocol.c \ + ldap/servers/plugins/replication/repl5_total.c \ + ldap/servers/plugins/replication/repl5_updatedn_list.c \ + ldap/servers/plugins/replication/replutil.c \ + ldap/servers/plugins/replication/urp.c \ + ldap/servers/plugins/replication/urp_glue.c \ + ldap/servers/plugins/replication/urp_tombstone.c \ + ldap/servers/plugins/replication/windows_connection.c \ + ldap/servers/plugins/replication/windows_inc_protocol.c \ + ldap/servers/plugins/replication/windows_private.c \ + ldap/servers/plugins/replication/windows_protocol_util.c \ + ldap/servers/plugins/replication/windows_tot_protocol.c + +libreplication_plugin_la_CPPFLAGS = $(AM_CPPFLAGS) $(DSPLUGIN_CPPFLAGS) $(ICU_CFLAGS) @db_inc@ +libreplication_plugin_la_LIBADD = libslapd.la $(LDAPSDK_LINK) $(NSS_LINK) $(NSPR_LINK) $(ICU_LIBS) $(DB_LINK) +libreplication_plugin_la_DEPENDENCIES = libslapd.la +libreplication_plugin_la_LDFLAGS = -avoid-version + +#------------------------ +# libretrocl-plugin +#------------------------ +libretrocl_plugin_la_SOURCES = ldap/servers/plugins/retrocl/retrocl.c \ + ldap/servers/plugins/retrocl/retrocl_cn.c \ + ldap/servers/plugins/retrocl/retrocl_create.c \ + ldap/servers/plugins/retrocl/retrocl_po.c \ + ldap/servers/plugins/retrocl/retrocl_rootdse.c \ + ldap/servers/plugins/retrocl/retrocl_trim.c + +libretrocl_plugin_la_CPPFLAGS = $(AM_CPPFLAGS) $(DSPLUGIN_CPPFLAGS) +libretrocl_plugin_la_LIBADD = libslapd.la $(LDAPSDK_LINK) $(NSPR_LINK) +libretrocl_plugin_la_DEPENDENCIES = libslapd.la +libretrocl_plugin_la_LDFLAGS = -avoid-version + +#------------------------ +# libroles-plugin +#------------------------ +libroles_plugin_la_SOURCES = ldap/servers/plugins/roles/roles_cache.c \ + ldap/servers/plugins/roles/roles_plugin.c + +libroles_plugin_la_CPPFLAGS = $(AM_CPPFLAGS) $(DSPLUGIN_CPPFLAGS) +libroles_plugin_la_LIBADD = libslapd.la $(NSPR_LINK) +libroles_plugin_la_DEPENDENCIES = libslapd.la +libroles_plugin_la_LDFLAGS = -avoid-version + +#------------------------ +# libschemareload-plugin +#------------------------ +libschemareload_plugin_la_SOURCES = ldap/servers/plugins/schema_reload/schema_reload.c + +libschemareload_plugin_la_CPPFLAGS = $(AM_CPPFLAGS) $(DSPLUGIN_CPPFLAGS) +libschemareload_plugin_la_LIBADD = libslapd.la $(NSPR_LINK) +libschemareload_plugin_la_DEPENDENCIES = libslapd.la +libschemareload_plugin_la_LDFLAGS = -avoid-version + +#------------------------ +# libstatechange-plugin +#------------------------ +libstatechange_plugin_la_SOURCES = ldap/servers/plugins/statechange/statechange.c + +libstatechange_plugin_la_CPPFLAGS = $(AM_CPPFLAGS) $(DSPLUGIN_CPPFLAGS) +libstatechange_plugin_la_LIBADD = libslapd.la +libstatechange_plugin_la_DEPENDENCIES = libslapd.la +libstatechange_plugin_la_LDFLAGS = -avoid-version + +#------------------------ +# libsyntax-plugin +#------------------------ +libsyntax_plugin_la_SOURCES = ldap/servers/plugins/syntaxes/bin.c \ + ldap/servers/plugins/syntaxes/bitstring.c \ + ldap/servers/plugins/syntaxes/ces.c \ + ldap/servers/plugins/syntaxes/cis.c \ + ldap/servers/plugins/syntaxes/debug.c \ + ldap/servers/plugins/syntaxes/dn.c \ + ldap/servers/plugins/syntaxes/deliverymethod.c \ + ldap/servers/plugins/syntaxes/facsimile.c \ + ldap/servers/plugins/syntaxes/guide.c \ + ldap/servers/plugins/syntaxes/int.c \ + ldap/servers/plugins/syntaxes/nameoptuid.c \ + ldap/servers/plugins/syntaxes/numericstring.c \ + ldap/servers/plugins/syntaxes/phonetic.c \ + ldap/servers/plugins/syntaxes/sicis.c \ + ldap/servers/plugins/syntaxes/string.c \ + ldap/servers/plugins/syntaxes/syntax_common.c \ + ldap/servers/plugins/syntaxes/tel.c \ + ldap/servers/plugins/syntaxes/telex.c \ + ldap/servers/plugins/syntaxes/teletex.c \ + ldap/servers/plugins/syntaxes/validate.c \ + ldap/servers/plugins/syntaxes/validate_task.c \ + ldap/servers/plugins/syntaxes/value.c + +libsyntax_plugin_la_CPPFLAGS = $(AM_CPPFLAGS) $(DSPLUGIN_CPPFLAGS) +libsyntax_plugin_la_LIBADD = libslapd.la $(LDAPSDK_LINK) $(NSPR_LINK) +libsyntax_plugin_la_DEPENDENCIES = libslapd.la +libsyntax_plugin_la_LDFLAGS = -avoid-version + +#------------------------ +# libusn-plugin +#------------------------ +libusn_plugin_la_SOURCES = ldap/servers/plugins/usn/usn.c \ + ldap/servers/plugins/usn/usn_cleanup.c + +libusn_plugin_la_CPPFLAGS = $(AM_CPPFLAGS) $(DSPLUGIN_CPPFLAGS) +libusn_plugin_la_LIBADD = libslapd.la $(LDAPSDK_LINK) $(NSPR_LINK) +libusn_plugin_la_DEPENDENCIES = libslapd.la +libusn_plugin_la_LDFLAGS = -avoid-version + +#------------------------ +# libviews-plugin +#------------------------ +libviews_plugin_la_SOURCES = ldap/servers/plugins/views/views.c + +libviews_plugin_la_CPPFLAGS = $(AM_CPPFLAGS) $(DSPLUGIN_CPPFLAGS) +libviews_plugin_la_LIBADD = libslapd.la $(LDAPSDK_LINK) $(NSPR_LINK) +libviews_plugin_la_DEPENDENCIES = libslapd.la +libviews_plugin_la_LDFLAGS = -avoid-version + +#------------------------ +# libwhoami-plugin +#------------------------ +libwhoami_plugin_la_SOURCES = ldap/servers/plugins/whoami/whoami.c + +libwhoami_plugin_la_CPPFLAGS = $(AM_CPPFLAGS) $(DSPLUGIN_CPPFLAGS) +libwhoami_plugin_la_LIBADD = libslapd.la $(LDAPSDK_LINK) $(NSPR_LINK) +libwhoami_plugin_la_DEPENDENCIES = libslapd.la +libwhoami_plugin_la_LDFLAGS = -avoid-version + +#------------------------ +#//////////////////////////////////////////////////////////////// +# +# Programs +# +#//////////////////////////////////////////////////////////////// +#------------------------ +# dbscan +#------------------------ +dbscan_SOURCES = ldap/servers/slapd/tools/dbscan.c + +dbscan_CPPFLAGS = @db_inc@ $(NSPR_INCLUDES) $(AM_CPPFLAGS) +dbscan_LDADD = $(NSPR_LINK) $(DB_LINK) + +#------------------------ +# infadd +#------------------------ +infadd_SOURCES = ldap/servers/slapd/tools/rsearch/addthread.c \ + ldap/servers/slapd/tools/rsearch/infadd.c \ + ldap/servers/slapd/tools/rsearch/nametable.c + +infadd_CPPFLAGS = $(AM_CPPFLAGS) $(DSPLUGIN_CPPFLAGS) +infadd_LDADD = $(NSPR_LINK) $(NSS_LINK) $(LDAPSDK_LINK) $(SASL_LINK) $(LIBSOCKET) + +#------------------------ +# ldap-agent +#------------------------ +ldap_agent_SOURCES = ldap/servers/snmp/main.c \ + ldap/servers/snmp/ldap-agent.c \ + ldap/servers/slapd/agtmmap.c + +ldap_agent_CPPFLAGS = $(AM_CPPFLAGS) $(DSPLUGIN_CPPFLAGS) @netsnmp_inc@ +ldap_agent_LDADD = $(LDAPSDK_LINK_NOTHR) $(SASL_LINK) $(NSS_LINK) $(NSPR_LINK) $(NETSNMP_LINK) $(THREADLIB) + + +#------------------------ +# ldclt +#------------------------ +ldclt_SOURCES = ldap/servers/slapd/tools/ldaptool-sasl.c \ + ldap/servers/slapd/tools/ldclt/data.c \ + ldap/servers/slapd/tools/ldclt/ldapfct.c \ + ldap/servers/slapd/tools/ldclt/ldclt.c \ + ldap/servers/slapd/tools/ldclt/ldcltU.c \ + ldap/servers/slapd/tools/ldclt/parser.c \ + ldap/servers/slapd/tools/ldclt/port.c \ + ldap/servers/slapd/tools/ldclt/scalab01.c \ + ldap/servers/slapd/tools/ldclt/threadMain.c \ + ldap/servers/slapd/tools/ldclt/utils.c \ + ldap/servers/slapd/tools/ldclt/version.c \ + ldap/servers/slapd/tools/ldclt/workarounds.c + +ldclt_CPPFLAGS = $(AM_CPPFLAGS) -I$(srcdir)/ldap/servers/slapd/tools $(DSPLUGIN_CPPFLAGS) $(SASL_CFLAGS) +ldclt_LDADD = $(NSPR_LINK) $(NSS_LINK) $(LDAPSDK_LINK) $(SASL_LINK) $(LIBNSL) $(LIBSOCKET) $(LIBDL) $(THREADLIB) + +#------------------------ +# ldif +#------------------------ +ldif_SOURCES = ldap/servers/slapd/tools/ldif.c + +ldif_CPPFLAGS = $(AM_CPPFLAGS) $(DSPLUGIN_CPPFLAGS) +ldif_LDADD = $(NSPR_LINK) $(NSS_LINK) $(LDAPSDK_LINK_NOTHR) $(SASL_LINK) + +#------------------------ +# migratecred +#------------------------ +migratecred_SOURCES = ldap/servers/slapd/tools/migratecred.c + +migratecred_CPPFLAGS = $(AM_CPPFLAGS) $(DSPLUGIN_CPPFLAGS) +migratecred_LDADD = libslapd.la libsvrcore.la $(NSPR_LINK) $(NSS_LINK) $(LDAPSDK_LINK) $(SASL_LINK) +migratecred_DEPENDENCIES = libslapd.la + +#------------------------ +# mmldif +#------------------------ +mmldif_SOURCES = ldap/servers/slapd/tools/mmldif.c + +mmldif_CPPFLAGS = $(AM_CPPFLAGS) $(DSPLUGIN_CPPFLAGS) +mmldif_LDADD = libslapd.la libsvrcore.la $(NSPR_LINK) $(NSS_LINK) $(LDAPSDK_LINK_NOTHR) $(SASL_LINK) +mmldif_DEPENDENCIES = libslapd.la + +#------------------------ +# ns-slapd +#------------------------ +if enable_ldapi + GETSOCKETPEER=ldap/servers/slapd/getsocketpeer.c + enable_ldapi = 1 +endif +if enable_autobind + enable_autobind = 1 +endif +if enable_auto_dn_suffix + enable_auto_dn_suffix = 1 +endif + +ns_slapd_SOURCES = ldap/servers/slapd/abandon.c \ + ldap/servers/slapd/auth.c \ + ldap/servers/slapd/bind.c \ + ldap/servers/slapd/compare.c \ + ldap/servers/slapd/config.c \ + ldap/servers/slapd/connection.c \ + ldap/servers/slapd/conntable.c \ + ldap/servers/slapd/daemon.c \ + ldap/servers/slapd/detach.c \ + ldap/servers/slapd/extendop.c \ + ldap/servers/slapd/fedse.c \ + ldap/servers/slapd/fileio.c \ + ldap/servers/slapd/getopt_ext.c \ + ldap/servers/slapd/globals.c \ + ldap/servers/slapd/house.c \ + ldap/servers/slapd/init.c \ + ldap/servers/slapd/main.c \ + ldap/servers/slapd/monitor.c \ + ldap/servers/slapd/passwd_extop.c \ + ldap/servers/slapd/psearch.c \ + ldap/servers/slapd/pw_mgmt.c \ + ldap/servers/slapd/pw_verify.c \ + ldap/servers/slapd/rootdse.c \ + ldap/servers/slapd/sasl_io.c \ + ldap/servers/slapd/saslbind.c \ + ldap/servers/slapd/search.c \ + ldap/servers/slapd/start_tls_extop.c \ + ldap/servers/slapd/strdup.c \ + ldap/servers/slapd/stubs.c \ + ldap/servers/slapd/tempnam.c \ + ldap/servers/slapd/unbind.c \ + $(GETSOCKETPEER) + +ns_slapd_CPPFLAGS = $(AM_CPPFLAGS) $(DSPLUGIN_CPPFLAGS) $(SASL_CFLAGS) $(SVRCORE_INCLUDES) +ns_slapd_LDADD = libslapd.la libldaputil.la libsvrcore.la $(LDAPSDK_LINK) $(NSS_LINK) $(LIBADD_DL) \ + $(NSPR_LINK) $(SASL_LINK) $(LIBNSL) $(LIBSOCKET) $(THREADLIB) $(SYSTEMD_LIBS) $(EVENT_LINK) +if RUST_ENABLE +ns_slapd_LDADD += $(RNSSLAPD_LIB) +ns_slapd_CPPFLAGS += -lssl -lcrypto +endif +ns_slapd_DEPENDENCIES = libslapd.la libldaputil.la +# We need to link ns-slapd with the C++ compiler on HP-UX since we load +# some C++ shared libraries (such as icu). +if HPUX +ns_slapd_LINK = $(CXXLINK) +else +ns_slapd_LINK = $(LINK) +endif + + +#------------------------ +# pwdhash +#------------------------ +pwdhash_SOURCES = ldap/servers/slapd/tools/pwenc.c + +pwdhash_CPPFLAGS = $(AM_CPPFLAGS) $(DSPLUGIN_CPPFLAGS) +pwdhash_LDADD = libslapd.la libsvrcore.la $(NSPR_LINK) $(NSS_LINK) $(LDAPSDK_LINK) $(SASL_LINK) +pwdhash_DEPENDENCIES = libslapd.la + +#------------------------ +# rsearch +#------------------------ +rsearch_SOURCES = ldap/servers/slapd/tools/rsearch/nametable.c \ + ldap/servers/slapd/tools/rsearch/rsearch.c \ + ldap/servers/slapd/tools/rsearch/sdattable.c \ + ldap/servers/slapd/tools/rsearch/searchthread.c + +rsearch_CPPFLAGS = $(AM_CPPFLAGS) $(DSPLUGIN_CPPFLAGS) +rsearch_LDADD = $(NSPR_LINK) $(NSS_LINK) $(LDAPSDK_LINK) $(SASL_LINK) $(LIBSOCKET) + +#------------------------- +# CMOCKA TEST PROGRAMS +#------------------------- +if ENABLE_CMOCKA + +check_PROGRAMS = test_slapd \ + test_libsds \ + benchmark_sds \ + benchmark_par_sds +# Mark all check programs for testing +TESTS = test_slapd \ + test_libsds + +test_slapd_SOURCES = test/main.c \ + test/libslapd/test.c \ + test/libslapd/counters/atomic.c \ + test/libslapd/pblock/analytics.c \ + test/libslapd/pblock/v3_compat.c \ + test/libslapd/schema/filter_validate.c \ + test/libslapd/operation/v3_compat.c \ + test/libslapd/spal/meminfo.c \ + test/plugins/test.c \ + test/plugins/pwdstorage/pbkdf2.c + +# We need to link a lot of plugins for this test. +test_slapd_LDADD = libslapd.la \ + libpwdstorage-plugin.la \ + $(NSS_LINK) $(NSPR_LINK) +test_slapd_LDFLAGS = $(AM_CPPFLAGS) $(CMOCKA_LINKS) +### WARNING: Slap.h needs cert.h, which requires the -I/lib/ldaputil!!! +### WARNING: Slap.h pulls ssl.h, which requires nss!!!! +# We need to pull in plugin header paths too: +test_slapd_CPPFLAGS = $(AM_CPPFLAGS) $(DSPLUGIN_CPPFLAGS) $(DSINTERNAL_CPPFLAGS) \ + -I$(srcdir)/ldap/servers/plugins/pwdstorage + +test_libsds_SOURCES = src/libsds/test/test_sds.c \ + src/libsds/test/test_sds_bpt.c \ + src/libsds/test/test_sds_cow.c \ + src/libsds/test/test_sds_set.c \ + src/libsds/test/test_sds_queue.c \ + src/libsds/test/test_sds_tqueue.c \ + src/libsds/test/test_sds_lqueue.c \ + src/libsds/test/test_sds_csiphash.c \ + src/libsds/test/test_sds_ht.c \ + src/libsds/test/test_fixtures.c + +test_libsds_LDFLAGS = $(ASAN_CFLAGS) $(MSAN_CFLAGS) $(TSAN_CFLAGS) $(UBSAN_CFLAGS) $(PROFILING_LINKS) $(CMOCKA_LINKS) +test_libsds_LDADD = libsds.la $(NSPR_LINK) +test_libsds_CPPFLAGS = $(AM_CPPFLAGS) $(CMOCKA_INCLUDES) $(SDS_CPPFLAGS) + +benchmark_sds_SOURCES = src/libsds/test/benchmark.c \ + $(libavl_a_SOURCES) +benchmark_sds_LDFLAGS = $(ASAN_CFLAGS) $(MSAN_CFLAGS) $(TSAN_CFLAGS) $(UBSAN_CFLAGS) $(PROFILING_LINKS) $(CMOCKA_LINKS) +benchmark_sds_LDADD = libsds.la $(NSPR_LINK) +benchmark_sds_CPPFLAGS = $(AM_CPPFLAGS) $(CMOCKA_INCLUDES) $(SDS_CPPFLAGS) $(DS_INCLUDES) + +benchmark_par_sds_SOURCES = src/libsds/test/benchmark_parwrap.c \ + src/libsds/test/benchmark_par.c \ + $(libavl_a_SOURCES) +benchmark_par_sds_LDFLAGS = $(ASAN_CFLAGS) $(MSAN_CFLAGS) $(TSAN_CFLAGS) $(UBSAN_CFLAGS) $(PROFILING_LINKS) $(CMOCKA_LINKS) +benchmark_par_sds_LDADD = libsds.la $(NSPR_LINK) +benchmark_par_sds_CPPFLAGS = $(AM_CPPFLAGS) $(CMOCKA_INCLUDES) $(SDS_CPPFLAGS) $(DS_INCLUDES) + +endif +#------------------------ +# end cmocka tests +#------------------------ + +# these are for the config files and scripts that we need to generate and replace +# the paths and other tokens with the real values set during configure/make +# note that we cannot just use AC_OUTPUT to do this for us, since it will do things like this: +# LD_LIBRARY_PATH = ${prefix}/lib/dirsrv +# i.e. it literally copies in '${prefix}' rather than expanding it out - we want this instead: +# LD_LIBRARY_PATH = /usr/lib/dirsrv +fixupcmd = sed \ + -e 's,@bindir\@,$(bindir),g' \ + -e 's,@sbindir\@,$(sbindir),g' \ + -e 's,@libdir\@,$(libdir),g' \ + -e 's,@libexecdir\@,$(libexecdir),g' \ + -e 's,@nss_libdir\@,$(nss_libdir),g' \ + -e 's,@ldaptool_bindir\@,$(ldaptool_bindir),g' \ + -e 's,@ldaptool_opts\@,$(ldaptool_opts),g' \ + -e 's,@plainldif_opts\@,$(plainldif_opts),g' \ + -e 's,@db_libdir\@,$(db_libdir),g' \ + -e 's,@db_bindir\@,$(db_bindir),g' \ + -e 's,@netsnmp_libdir\@,$(netsnmp_libdir),g' \ + -e 's,@pcre_libdir\@,$(pcre_libdir),g' \ + -e 's,@propertydir\@,$(propertydir),g' \ + -e 's,@datadir\@,$(datadir),g' \ + -e 's,@schemadir\@,$(schemadir),g' \ + -e 's,@serverdir\@,$(serverdir),g' \ + -e 's,@serverincdir\@,$(serverincdir),g' \ + -e 's,@serverplugindir\@,$(serverplugindir),g' \ + -e 's,@taskdir\@,$(taskdir),g' \ + -e 's,@configdir\@,$(configdir),g' \ + -e 's,@sysconfdir\@,$(sysconfdir),g' \ + -e 's,@localstatedir\@,$(localstatedir),g' \ + -e 's,@localrundir\@,$(localrundir),g' \ + -e 's,@infdir\@,$(infdir),g' \ + -e 's,@mibdir\@,$(mibdir),g' \ + -e 's,@cockpitdir\@,$(cockpitdir),g' \ + -e 's,@templatedir\@,$(sampledatadir),g' \ + -e 's,@systemschemadir\@,$(systemschemadir),g' \ + -e 's,@package_name\@,$(PACKAGE_NAME),g' \ + -e 's,@instconfigdir\@,$(instconfigdir),g' \ + -e 's,@enable_ldapi\@,$(enable_ldapi),g' \ + -e 's,@enable_pam_passthru\@,$(enable_pam_passthru),g' \ + -e 's,@enable_bitwise\@,$(enable_bitwise),g' \ + -e 's,@enable_dna\@,$(enable_dna),g' \ + -e 's,@enable_autobind\@,$(enable_autobind),g' \ + -e 's,@enable_auto_dn_suffix\@,$(enable_auto_dn_suffix),g' \ + -e 's,@enable_presence\@,$(enable_presence),g' \ + -e 's,@enable_asan\@,$(ASAN_ON),g' \ + -e 's,@enable_msan\@,$(MSAN_ON),g' \ + -e 's,@enable_tsan\@,$(TSAN_ON),g' \ + -e 's,@enable_ubsan\@,$(UBSAN_ON),g' \ + -e 's,@SANITIZER\@,$(SANITIZER),g' \ + -e 's,@enable_perl\@,@enable_perl@,g' \ + -e 's,@enable_rust\@,@enable_rust@,g' \ + -e 's,@ECHO_N\@,$(ECHO_N),g' \ + -e 's,@ECHO_C\@,$(ECHO_C),g' \ + -e 's,@brand\@,$(brand),g' \ + -e 's,@capbrand\@,$(capbrand),g' \ + -e 's,@vendor\@,$(vendor),g' \ + -e 's,@PACKAGE_NAME\@,$(PACKAGE_NAME),g' \ + -e 's,@PACKAGE_VERSION\@,$(PACKAGE_VERSION),g' \ + -e 's,@RPM_VERSION\@,$(RPM_VERSION),g' \ + -e 's,@PACKAGE_BASE_VERSION\@,$(PACKAGE_BASE_VERSION),g' \ + -e 's,@CONSOLE_VERSION\@,$(CONSOLE_VERSION),g' \ + -e 's,@BUILDNUM\@,$(BUILDNUM),g' \ + -e 's,@NQBUILD_NUM\@,$(NQBUILDNUM),g' \ + -e 's,@perlpath\@,$(perldir),g' \ + -e 's,@defaultuser\@,$(defaultuser),g' \ + -e 's,@defaultgroup\@,$(defaultgroup),g' \ + -e 's,@with_fhs_opt\@,@with_fhs_opt@,g' \ + -e 's,@with_selinux\@,@with_selinux@,g' \ + -e 's,@with_systemd\@,$(WITH_SYSTEMD),g' \ + -e 's,@tmpfiles_d\@,$(tmpfiles_d),g' \ + -e 's,@perlexec\@,@perlexec@,g' \ + -e 's,@pythonexec\@,@pythonexec@,g' \ + -e 's,@sttyexec\@,@sttyexec@,g' \ + -e 's,@initconfigdir\@,$(initconfigdir),g' \ + -e 's,@updatedir\@,$(updatedir),g' \ + -e 's,@ldaplib\@,$(ldaplib),g' \ + -e 's,@ldaplib_defs\@,$(ldaplib_defs),g' \ + -e 's,@systemdsystemunitdir\@,$(systemdsystemunitdir),g' \ + -e 's,@systemdsystemconfdir\@,$(systemdsystemconfdir),g' \ + -e 's,@systemdgroupname\@,$(systemdgroupname),g' \ + -e 's,@prefixdir\@,$(prefixdir),g' + +%: %.in + mkdir -p $(dir $@) + $(fixupcmd) $^ > $@ + +%/$(PACKAGE_NAME): %/initscript.in + if [ ! -d $(dir $@) ] ; then mkdir -p $(dir $@) ; fi + $(fixupcmd) $^ > $@ + +%/$(PACKAGE_NAME): %/base-initconfig.in + if [ ! -d $(dir $@) ] ; then mkdir -p $(dir $@) ; fi +if SYSTEMD + $(fixupcmd) $^ | sed -e 's/@preamble@/# This file is in systemd EnvironmentFile format - see man systemd.exec/' > $@ +else + $(fixupcmd) $^ | sed -n -e 's/@preamble@//' -e '/^#/{p;d;}' -e '/^$$/{p;d;}' -e 's/^\([^=]*\)\(=.*\)$$/\1\2 ; export \1/ ; p' > $@ + $(fixupcmd) $(srcdir)/ldap/admin/src/initconfig.in >> $@ +endif + +%/template-initconfig: %/template-initconfig.in + if [ ! -d $(dir $@) ] ; then mkdir -p $(dir $@) ; fi +if SYSTEMD + $(fixupcmd) $^ | sed -e 's/@preamble@/# This file is in systemd EnvironmentFile format - see man systemd.exec/' > $@ +else + $(fixupcmd) $^ | sed -n -e 's/@preamble@//' -e '/^#/{p;d;}' -e '/^$$/{p;d;}' -e 's/^\([^=]*\)\(=.*\)$$/\1\2 ; export \1/ ; p' > $@ +endif + +%/$(PACKAGE_NAME)-snmp: %/ldap-agent-initscript.in + if [ ! -d $(dir $@) ] ; then mkdir -p $(dir $@) ; fi + $(fixupcmd) $^ > $@ + +# yes, that is an @ in the filename . . . +%/$(PACKAGE_NAME)@.service: %/systemd.template.service.in + if [ ! -d $(dir $@) ] ; then mkdir -p $(dir $@) ; fi + $(fixupcmd) $^ > $@ + +%/$(PACKAGE_NAME)@.service.d/custom.conf: %/systemd.template.service.custom.conf.in + if [ ! -d $(dir $@) ] ; then mkdir -p $(dir $@) ; fi + $(fixupcmd) $^ > $@ + +if with_sanitizer +%/$(PACKAGE_NAME)@.service.d/xsan.conf: %/systemd.template.service.xsan.conf.in + if [ ! -d $(dir $@) ] ; then mkdir -p $(dir $@) ; fi + $(fixupcmd) $^ > $@ +endif + +%/$(systemdgroupname): %/systemd.group.in + if [ ! -d $(dir $@) ] ; then mkdir -p $(dir $@) ; fi + $(fixupcmd) $^ > $@ + +%/$(PACKAGE_NAME)-snmp.service: %/systemd-snmp.service.in + if [ ! -d $(dir $@) ] ; then mkdir -p $(dir $@) ; fi + $(fixupcmd) $^ > $@ + +# if distdir is a git tag, use that for the git archive tag, else +# just assume a developer build and use HEAD +git-archive: + if [ -n "$(SRCDISTDIR)" -a -d "$(SRCDISTDIR)" ] ; then \ + srcdistdir=$(SRCDISTDIR) ; \ + else \ + srcdistdir=`pwd` ; \ + fi ; \ + cd $(srcdir) ; \ + if git show-ref --tags -q $(distdir) ; then \ + gittag=$(distdir) ; \ + else \ + gittag=HEAD ; \ + fi ; \ + git archive --prefix=$(distdir)/ $$gittag | bzip2 > $$srcdistdir/$(distdir).tar.bz2 + +# Python test tests +# How will we update this to python 3? + +lib389: src/lib389/setup.py + cd $(srcdir)/src/lib389; $(PYTHON) setup.py build ; $(PYTHON) setup.py build_manpages + +lib389-install: lib389 + cd $(srcdir)/src/lib389; $(PYTHON) setup.py install --skip-build --force + +NODE_MODULES_TEST = src/cockpit/389-console/node_modules/webpack +WEBPACK_TEST = src/cockpit/389-console/cockpit_dist/index.html + +# Cockpit UI plugin - we install the dependancies and build the JS sources +# and then we use install-data-hook for copying the results on 'make install' +$(NODE_MODULES_TEST): + cd src/cockpit/389-console; make -f node_modules.mk install + +$(WEBPACK_TEST): $(NODE_MODULES_TEST) + cd src/cockpit/389-console; make -f node_modules.mk build-cockpit-plugin + +389-console: $(WEBPACK_TEST) + +# This requires a built source tree and avoids having to install anything system-wide +389-console-devel-install: + cd $(srcdir)/src/cockpit/389-console; \ + rm ~/.local/share/cockpit/389-console; \ + mkdir -p ~/.local/share/cockpit/; \ + ln -s $$(pwd)/dist ~/.local/share/cockpit/389-console + +389-console-clean: + cd $(srcdir)/src/cockpit/389-console; make -f node_modules.mk clean + + +if HAVE_DOXYGEN + +# The rm in man3 is to remove files like: _home_william_development_389ds_libsds_src_.3 +# If there is a way to ignore this in doxygen I'm all ears ... + +doxyfile.stamp: + cd $(srcdir); $(DOXYGEN) $(abs_top_builddir)/docs/slapi.doxy + rm -f $(abs_top_builddir)/man/man3/_* + touch doxyfile.stamp + +# Add the docs to make all. +all-local: doxyfile.stamp + +endif diff --git a/README.md b/README.md new file mode 100644 index 0000000..2cfe5f4 --- /dev/null +++ b/README.md @@ -0,0 +1,116 @@ +389 Directory Server +==================== + +389 Directory Server is a highly usable, fully featured, reliable +and secure LDAP server implementation. It handles many of the +largest LDAP deployments in the world. + +All our code has been extensively tested with sanitisation tools. +As well as a rich feature set of fail-over and backup technologies +gives administrators confidence their accounts are safe. + +License +------- + +The 389 Directory Server is subject to the terms detailed in the +license agreement file called LICENSE. + +Late-breaking news and information on the 389 Directory Server is +available on our wiki page: + + https://www.port389.org/ + +Build Requirements (as of 2020-02-12) +------------------------------------- + +nspr-devel +nss-devel +perl-generators +openldap-devel +libdb-devel +cyrus-sasl-devel +icu +libicu-devel +pcre-devel +cracklib-devel +libatomic +clang +gcc +gcc-c++ +net-snmp-devel +lm_sensors-devel +bzip2-devel +zlib-devel +openssl-devel +pam-devel +systemd-units +systemd-devel +libasan +cargo +rust +pkgconfig +pkgconfig(systemd) +pkgconfig(krb5) +autoconf +automake +libtool +doxygen +libcmocka-devel +libevent-devel +python3-devel +python3-setuptools +python3-ldap +python3-six +python3-pyasn1 +python3-pyasn1-modules +python3-dateutil +python3-argcomplete +python3-argparse-manpage +python3-libselinux +python3-policycoreutils +rsync +npm +nodejs +nspr-devel +nss-devel +openldap-devel +libdb-devel +cyrus-sasl-devel +libicu-devel +pcre-devel +libtalloc-devel +libevent-devel +libtevent-devel +systemd-devel + +Building +-------- + + autoreconf -fiv + ./configure --enable-debug --with-openldap --enable-cmocka --enable-asan + make + make lib389 + sudo make install + sudo make lib389-install + +Note: **--enable-asan** is optional, and it should only be used for debugging/development purposes. + +See also: + +Testing +------- + + make check + sudo py.test -s 389-ds-base/dirsrvtests/tests/suites/basic/ + +To debug the make check item's, you'll need libtool to help: + + libtool --mode=execute gdb /home/william/build/ds/test_slapd + +More information +---------------- + +Please see our contributing guide online: + + https://www.port389.org/docs/389ds/contributing.html + diff --git a/VERSION.sh b/VERSION.sh new file mode 100644 index 0000000..2a725e9 --- /dev/null +++ b/VERSION.sh @@ -0,0 +1,54 @@ +# brand is lower case - used for names that don't appear to end users +# brand is used for file naming - should contain no spaces +brand=389 +# capbrand is the properly capitalized brand name that appears to end users +# may contain spaces +capbrand=389 +# vendor is the properly formatted vendor/manufacturer name that appears to end users +vendor="389 Project" + +# PACKAGE_VERSION is constructed from these +VERSION_MAJOR=1 +VERSION_MINOR=4 +VERSION_MAINT=3.4 +# NOTE: VERSION_PREREL is automatically set for builds made out of a git tree +VERSION_PREREL= +VERSION_DATE=$(date -u +%Y%m%d) + +# Set the version and release numbers for local developer RPM builds. We +# set these here because we do not want the git commit hash in the RPM +# version since it can make RPM upgrades difficult. If we have a git +# commit hash, we add it into the release number below. +RPM_RELEASE=${VERSION_DATE} +RPM_VERSION=${VERSION_MAJOR}.${VERSION_MINOR}.${VERSION_MAINT} + +if $(git -C "$srcdir" rev-parse --is-inside-work-tree > /dev/null 2>&1); then +# Check if the source is from a git repo +# if this is not a git repo, git log will say +# fatal: Not a git repository +# to stderr and stdout will be empty +# this tells git to print the short commit hash from the last commit + COMMIT=$(git -C "$srcdir" log -1 --pretty=format:%h 2> /dev/null) + if test -n "$COMMIT" ; then + VERSION_PREREL=.${VERSION_DATE}git$COMMIT + RPM_RELEASE=${RPM_RELEASE}git$COMMIT + fi +fi + +# the real version used throughout configure and make +# NOTE: because of autoconf/automake harshness, we cannot override the settings +# below in C code - there is no way to override the default #defines +# for these set with AC_INIT - so configure.ac should AC_DEFINE +# DS_PACKAGE_VERSION DS_PACKAGE_TARNAME DS_PACKAGE_BUGREPORT +# for use in C code - other code (perl scripts, shell scripts, Makefiles) +# can use PACKAGE_VERSION et. al. +PACKAGE_VERSION=$VERSION_MAJOR.$VERSION_MINOR.${VERSION_MAINT}${VERSION_PREREL} +# the name of the source tarball - see make dist +PACKAGE_TARNAME=${brand}-ds-base +# url for bug reports +PACKAGE_BUGREPORT="${PACKAGE_BUGREPORT}enter_bug.cgi?product=$brand" +PACKAGE_STRING="$PACKAGE_TARNAME $PACKAGE_VERSION" +# the version of the ds console package that this directory server +# is compatible with +# console .2 is still compatible with 389 .3 for now +CONSOLE_VERSION=$VERSION_MAJOR.2 diff --git a/autogen.sh b/autogen.sh new file mode 100755 index 0000000..06a5fac --- /dev/null +++ b/autogen.sh @@ -0,0 +1,96 @@ +#!/bin/sh + +# set required versions of tools here +# the version is dotted integers like X.Y.Z where +# X, Y, and Z are integers +# comparisons are done using shell -lt, -gt, etc. +# this works if the numbers are zero filled as well +# so 06 == 6 + +# autoconf version required +# need 2.69 or later +ac_need_maj=2 +ac_need_min=69 +# automake version required +# need 1.13.4 or later +am_need_maj=1 +am_need_min=13 +am_need_rev=4 +# libtool version required +# need 2.4.2 or later +lt_need_maj=2 +lt_need_min=4 +lt_need_rev=2 +# should never have to touch anything below this line unless there is a bug +########################################################################### + +# input +# arg1 - version string in the form "X.Y[.Z]" - the .Z is optional +# args remaining - the needed X, Y, and Z to match +# output +# return 0 - success - the version string is >= the required X.Y.Z +# return 1 - failure - the version string is < the required X.Y.Z +# NOTE: All input must be integers, otherwise you will see shell errors +checkvers() { + vers="$1"; shift + needmaj="$1"; shift + needmin="$1"; shift + if [ "$#" != "0" ]; then + needrev="$1"; shift + fi + verslist=`echo $vers | tr '.' ' '` + set $verslist + maj=$1; shift + min=$1; shift + if [ "$#" != "0" ]; then + rev=$1; shift + fi + if [ "$maj" -gt "$needmaj" ] ; then return 0; fi + if [ "$maj" -lt "$needmaj" ] ; then return 1; fi + # if we got here, maj == needmaj + if [ -z "$needmin" ] ; then return 0; fi + if [ "$min" -gt "$needmin" ] ; then return 0; fi + if [ "$min" -lt "$needmin" ] ; then return 1; fi + # if we got here, min == needmin + if [ -z "$needrev" ] ; then return 0; fi + if [ "$rev" -gt "$needrev" ] ; then return 0; fi + if [ "$rev" -lt "$needrev" ] ; then return 1; fi + # if we got here, rev == needrev + return 0 +} + +# Check autoconf version +AC_VERSION=`autoconf --version | sed '/^autoconf/ {s/^.* \([1-9][0-9.]*\)$/\1/; q}'` +if checkvers "$AC_VERSION" $ac_need_maj $ac_need_min ; then + echo Found valid autoconf version $AC_VERSION +else + echo "You must have autoconf version $ac_need_maj.$ac_need_min or later installed (found version $AC_VERSION)." + exit 1 +fi + +# Check automake version +AM_VERSION=`automake --version | sed '/^automake/ {s/^.* \([1-9][0-9.]*\)$/\1/; q}'` +if checkvers "$AM_VERSION" $am_need_maj $am_need_min $am_need_rev ; then + echo Found valid automake version $AM_VERSION +else + echo "You must have automake version $am_need_maj.$am_need_min.$am_need_rev or later installed (found version $AM_VERSION)." + exit 1 +fi + +# Check libtool version +# NOTE: some libtool versions report a letter at the end e.g. on RHEL6 +# the version is 2.2.6b - for comparison purposes, just strip off the +# letter - note that the shell -lt and -gt comparisons will fail with +# test: 6b: integer expression expected if the number to compare +# contains a non-digit +LT_VERSION=`libtool --version | sed '/GNU libtool/ {s/^.* \([1-9][0-9a-zA-Z.]*\)$/\1/; s/[a-zA-Z]//g; q}'` +if checkvers "$LT_VERSION" $lt_need_maj $lt_need_min $lt_need_rev ; then + echo Found valid libtool version $LT_VERSION +else + echo "You must have libtool version $lt_need_maj.$lt_need_min.$lt_need_rev or later installed (found version $LT_VERSION)." + exit 1 +fi + +# Run autoreconf +echo "Running autoreconf -fvi" +autoreconf -fvi diff --git a/buildnum.py b/buildnum.py new file mode 100755 index 0000000..e205077 --- /dev/null +++ b/buildnum.py @@ -0,0 +1,28 @@ +#!/usr/bin/python3 +# --- BEGIN COPYRIGHT BLOCK --- +# Copyright (C) 2020 Red Hat, Inc. +# All rights reserved. +# +# License: GPL (version 3 or any later version). +# See LICENSE for details. +# --- END COPYRIGHT BLOCK --- + +# Generate a build number in the format YYYY.DDD.HHMM + +import os +import time + +SDE = os.getenv('SOURCE_DATE_EPOCH') +if SDE is not None: + obj = time.gmtime(int(SDE)) +else: + obj = time.gmtime() + +year = obj[0] +doy = obj[7] +if doy < 100: + doy = "0" + str(doy) +tod = str(obj[3]) + str(obj[4]) +buildnum = f"{year}.{doy}.{tod}" + +print(f'\\"{buildnum}\\"', end = '') diff --git a/configure.ac b/configure.ac new file mode 100644 index 0000000..2fa1b2c --- /dev/null +++ b/configure.ac @@ -0,0 +1,927 @@ +# -*- Autoconf -*- +# Process this file with autoconf to produce a configure script. +AC_PREREQ(2.59) +AC_INIT([dirsrv],[1.0],[http://bugzilla.redhat.com/]) +# AC_CONFIG_HEADER must be called right after AC_INIT. +AC_CONFIG_HEADERS([config.h]) +# include the version information +. $srcdir/VERSION.sh +AC_MSG_NOTICE(This is configure for $PACKAGE_TARNAME $PACKAGE_VERSION) +AC_DEFINE_UNQUOTED([DS_PACKAGE_VERSION], "$PACKAGE_VERSION", [package version]) +AC_DEFINE_UNQUOTED([DS_PACKAGE_TARNAME], "$PACKAGE_TARNAME", [package tarball name]) +AC_DEFINE_UNQUOTED([DS_PACKAGE_BUGREPORT], "$PACKAGE_BUGREPORT", [package bug report url]) +AC_DEFINE_UNQUOTED([DS_PACKAGE_STRING], "$PACKAGE_STRING", [package string]) +AM_INIT_AUTOMAKE([1.9 foreign subdir-objects dist-bzip2 no-dist-gzip no-define tar-pax]) +# define these for automake distdir +VERSION=$PACKAGE_VERSION +PACKAGE=$PACKAGE_TARNAME +AC_DEFINE_UNQUOTED([VERSION], "$VERSION", [package version]) +AC_DEFINE_UNQUOTED([PACKAGE], "$PACKAGE", [package tar name]) +AC_SUBST([RPM_VERSION]) +AC_SUBST([RPM_RELEASE]) +AC_SUBST([VERSION_PREREL]) +AC_SUBST([CONSOLE_VERSION]) +AM_MAINTAINER_MODE +AC_CANONICAL_HOST + +AC_CONFIG_MACRO_DIRS([m4]) + +# Checks for programs. +: ${CXXFLAGS=""} +AC_PROG_CXX +: ${CFLAGS=""} +AC_PROG_CC +AM_PROG_CC_C_O +AM_PROG_AS +AC_PROG_CC_STDC +PKG_PROG_PKG_CONFIG + +# disable static libs by default - we only use a couple +AC_DISABLE_STATIC +AC_PROG_LIBTOOL + +# Checks for header files. +AC_HEADER_DIRENT +AC_HEADER_STDC +AC_HEADER_SYS_WAIT +AC_CHECK_HEADERS([arpa/inet.h errno.h fcntl.h malloc.h netdb.h netinet/in.h stdlib.h string.h strings.h sys/file.h sys/socket.h sys/time.h syslog.h unistd.h mntent.h sys/sysinfo.h sys/endian.h endian.h]) + +# These are *required* headers without option. +AC_CHECK_HEADERS([inttypes.h], [], AC_MSG_ERROR([unable to locate required header inttypes.h])) +AC_CHECK_HEADERS([crack.h], [], AC_MSG_ERROR([unable to locate required header crack.h])) + + +# Checks for typedefs, structures, and compiler characteristics. +AC_HEADER_STAT +AC_C_CONST +AC_HEADER_STDBOOL +AC_TYPE_UID_T +AC_TYPE_PID_T +AC_TYPE_SIZE_T +AC_HEADER_TIME +AC_STRUCT_TM + +# Checks for library functions. +AC_FUNC_CHOWN +AC_FUNC_CLOSEDIR_VOID +AC_FUNC_ERROR_AT_LINE +AC_FUNC_FORK +AC_FUNC_LSTAT +AC_FUNC_LSTAT_FOLLOWS_SLASHED_SYMLINK +AC_FUNC_MALLOC +AC_FUNC_MEMCMP +AC_FUNC_MMAP +AC_TYPE_SIGNAL +AC_FUNC_STAT +AC_FUNC_STRERROR_R +AC_FUNC_STRFTIME +AC_FUNC_VPRINTF +AC_CHECK_FUNCS([endpwent ftruncate getcwd getaddrinfo inet_pton inet_ntop localtime_r memmove memset mkdir munmap putenv rmdir setrlimit socket strcasecmp strchr strcspn strdup strerror strncasecmp strpbrk strrchr strstr strtol tzset]) + +# These functions are *required* without option. +AC_CHECK_FUNCS([clock_gettime], [], AC_MSG_ERROR([unable to locate required symbol clock_gettime])) + +# This will detect if we need to add the LIBADD_DL value for us. +LT_LIB_DLLOAD + +# Optional rust component support. +AC_MSG_CHECKING(for --enable-rust-offline) +AC_ARG_ENABLE(rust_offline, AS_HELP_STRING([--enable-rust-offline], [Enable rust building offline. you MUST have run vendor! (default: no)]), + [], [ enable_rust_offline=no ]) +AC_MSG_RESULT($enable_rust_offline) +AM_CONDITIONAL([RUST_ENABLE_OFFLINE],[test "$enable_rust_offline" = yes]) + +AS_IF([test "$enable_rust_offline" = yes], + [rust_vendor_sources="replace-with = \"vendored-sources\""], + [rust_vendor_sources=""]) +AC_SUBST([rust_vendor_sources]) + +AC_MSG_CHECKING(for --enable-rust) +AC_ARG_ENABLE(rust, AS_HELP_STRING([--enable-rust], [Enable rust language features (default: no)]), + [], [ enable_rust=no ]) +AC_MSG_RESULT($enable_rust) +if test "$enable_rust" = yes -o "$enable_rust_offline" = yes; then + AC_CHECK_PROG(CARGO, [cargo], [yes], [no]) + AC_CHECK_PROG(RUSTC, [rustc], [yes], [no]) + # Since fernet uses the openssl lib. + PKG_CHECK_MODULES([OPENSSL], [openssl]) + + AS_IF([test "$CARGO" != "yes" -o "$RUSTC" != "yes"], [ + AC_MSG_FAILURE("Rust based plugins cannot be built cargo=$CARGO rustc=$RUSTC") + ]) +fi +AC_SUBST([enable_rust]) +AM_CONDITIONAL([RUST_ENABLE],[test "$enable_rust" = yes -o "$enable_rust_offline" = yes]) + + +AC_MSG_CHECKING(for --enable-debug) +AC_ARG_ENABLE(debug, AS_HELP_STRING([--enable-debug], [Enable debug features (default: no)]), + [], [ enable_debug=no ]) +AC_MSG_RESULT($enable_debug) +if test "$enable_debug" = yes ; then + debug_defs="-DDEBUG -DMCC_DEBUG" + debug_cflags="-g3 -O0 -rdynamic" + debug_cxxflags="-g3 -O0 -rdynamic" + debug_rust_defs="-C debuginfo=2" + cargo_defs="" + rust_target_dir="debug" +else + debug_defs="" + # set the default safe CFLAGS that would be set by AC_PROG_CC otherwise + debug_cflags="-g -O2" + debug_cxxflags="-g -O2" + debug_rust_defs="-C debuginfo=2" + cargo_defs="--release" + rust_target_dir="release" +fi +AC_SUBST([debug_defs]) +AC_SUBST([debug_cflags]) +AC_SUBST([debug_cxxflags]) +AC_SUBST([debug_rust_defs]) +AC_SUBST([cargo_defs]) +AC_SUBST([rust_target_dir]) +AM_CONDITIONAL([DEBUG],[test "$enable_debug" = yes]) + +AC_MSG_CHECKING(for --enable-asan) +AC_ARG_ENABLE(asan, AS_HELP_STRING([--enable-asan], [Enable gcc/clang address sanitizer options (default: no)]), + [], [ enable_asan=no ]) +AC_MSG_RESULT($enable_asan) +if test "$enable_asan" = yes ; then + asan_cflags="-fsanitize=address -fno-omit-frame-pointer -lasan" + asan_rust_defs="-Z sanitizer=address" +else + asan_cflags="" + asan_rust_defs="" +fi +AC_SUBST([asan_cflags]) +AC_SUBST([asan_rust_defs]) +AM_CONDITIONAL(enable_asan,test "$enable_asan" = "yes") + +AC_MSG_CHECKING(for --enable-msan) +AC_ARG_ENABLE(msan, AS_HELP_STRING([--enable-msan], [Enable gcc/clang memory sanitizer options (default: no)]), + [], [ enable_msan=no ]) +AC_MSG_RESULT($enable_msan) +if test "$enable_msan" = yes ; then + msan_cflags="-fsanitize=memory -fsanitize-memory-track-origins -fno-omit-frame-pointer" + msan_rust_defs="-Z sanitizer=memory" +else + msan_cflags="" + msan_rust_defs="" +fi +AC_SUBST([msan_cflags]) +AC_SUBST([msan_rust_defs]) +AM_CONDITIONAL(enable_msan,test "$enable_msan" = "yes") + +AC_MSG_CHECKING(for --enable-tsan) +AC_ARG_ENABLE(tsan, AS_HELP_STRING([--enable-tsan], [Enable gcc/clang thread sanitizer options (default: no)]), + [], [ enable_tsan=no ]) +AC_MSG_RESULT($enable_tsan) +if test "$enable_tsan" = yes ; then + tsan_cflags="-fsanitize=thread -fno-omit-frame-pointer" + tsan_rust_defs="-Z sanitizer=thread" +else + tsan_cflags="" + tsan_rust_defs="" +fi +AC_SUBST([tsan_cflags]) +AC_SUBST([tsan_rust_defs]) +AM_CONDITIONAL(enable_tsan,test "$enable_tsan" = "yes") + +AC_MSG_CHECKING(for --enable-ubsan) +AC_ARG_ENABLE(ubsan, AS_HELP_STRING([--enable-tsan], [Enable gcc/clang undefined behaviour sanitizer options (default: no)]), + [], [ enable_ubsan=no ]) +AC_MSG_RESULT($enable_ubsan) +if test "$enable_ubsan" = yes ; then + ubsan_cflags="-fsanitize=undefined -fno-omit-frame-pointer" + ubsan_rust_defs="" +else + ubsan_cflags="" + ubsan_rust_defs="" +fi +AC_SUBST([ubsan_cflags]) +AC_SUBST([ubsan_rust_defs]) +AM_CONDITIONAL(enable_ubsan,test "$enable_ubsan" = "yes") + +AM_CONDITIONAL(with_sanitizer,test "$enable_asan" = "yes" -o "$enable_msan" = "yes" -o "$enable_tsan" = "yes" -o "$enable_ubsan" = "yes") + +AC_MSG_CHECKING(for --enable-clang) +AC_ARG_ENABLE(clang, AS_HELP_STRING([--enable-clang], [Enable clang (default: no)]), + [], [ enable_clang=no ]) +AC_MSG_RESULT($enable_clang) +AM_CONDITIONAL(CLANG_ENABLE,test "$enable_clang" = "yes") + + +AC_MSG_CHECKING(for --enable-legacy) +AC_ARG_ENABLE(legacy, AS_HELP_STRING([--enable-legacy], [Enable deprecated legacy functionality (default: no)]), + [], [ enable_legacy=no ]) +AC_MSG_RESULT($enable_legacy) +AC_SUBST([enable_legacy]) +AM_CONDITIONAL(ENABLE_LEGACY,test "$enable_legacy" = "yes") + +if test "$enable_legacy" = yes ; then + enable_perl=yes +fi +AC_MSG_CHECKING(for --enable-perl) +AC_ARG_ENABLE(perl, AS_HELP_STRING([--enable-perl], [Enable deprecated legacy perl scripts (default: no)]), + [], [ enable_perl=no ]) +AC_MSG_RESULT($enable_perl) +AC_SUBST([enable_perl]) +AM_CONDITIONAL(ENABLE_PERL,test "$enable_perl" = "yes") + + +AM_CONDITIONAL([RPM_HARDEND_CC], [test -f /usr/lib/rpm/redhat/redhat-hardened-cc1]) +AC_MSG_CHECKING(for --enable-gcc-security) +AC_ARG_ENABLE(gcc-security, AS_HELP_STRING([--enable-gcc-security], [Enable gcc secure compilation options (default: no)]), + [], [ enable_gcc_security=no ]) +AC_MSG_RESULT($enable_gcc_security) +if test "$enable_gcc_security" = yes ; then + gccsec_cflags="-Wall -Wp,-D_FORTIFY_SOURCE=2 -fexceptions -fstack-protector-strong --param=ssp-buffer-size=4 -grecord-gcc-switches -Werror=format-security" +else + # Without this, -fPIC doesn't work on generic fedora builds, --disable-gcc-sec. + gccsec_cflags="" +fi +AM_COND_IF([RPM_HARDEND_CC], + [ gccsec_cflags="$gccsec_flags -specs=/usr/lib/rpm/redhat/redhat-hardened-cc1" ], + []) +AC_SUBST([gccsec_cflags]) + +# Pull in profiling. +AC_MSG_CHECKING(for --enable-profiling) +AC_ARG_ENABLE(profiling, AS_HELP_STRING([--enable-profiling], [Enable gcov profiling features (default: no)]), + [], [ enable_profiling=no ]) +AC_MSG_RESULT($enable_profiling) +if test "$enable_profiling" = yes ; then + profiling_defs="-fprofile-arcs -ftest-coverage -g3 -O0" + profiling_links="-lgcov --coverage" +else + profiling_defs="" + profiling_links="" +fi +AC_SUBST([profiling_defs]) +AC_SUBST([profiling_links]) + +AC_MSG_CHECKING(for --enable-systemtap) +AC_ARG_ENABLE(systemtap, AS_HELP_STRING([--enable-systemtap], [Enable systemtap probe features (default: no)]), + [], [ enable_systemtap=no ]) +AC_MSG_RESULT($enable_systemtap) +if test "$enable_systemtap" = yes ; then + systemtap_defs="-DSYSTEMTAP" +else + systemtap_defs="" +fi +AC_SUBST([systemtap_defs]) + + +# these enables are for optional or experimental features +AC_MSG_CHECKING(for --enable-pam-passthru) +AC_ARG_ENABLE(pam-passthru, + AS_HELP_STRING([--enable-pam-passthru], + [enable the PAM passthrough auth plugin (default: yes)]), + [], [ enable_pam_passthru=yes ]) +AC_MSG_RESULT($enable_pam_passthru) +if test "$enable_pam_passthru" = yes ; then + # check for pam header file used by plugins/pass_passthru/pam_ptimpl.c + AC_CHECK_HEADER([security/pam_appl.h], [], [AC_MSG_ERROR([Missing header file security/pam_appl.h])]) + AC_DEFINE([ENABLE_PAM_PASSTHRU], [1], [enable the pam passthru auth plugin]) +fi +AM_CONDITIONAL(enable_pam_passthru,test "$enable_pam_passthru" = "yes") + +if test -z "$enable_dna" ; then + enable_dna=yes # if not set on cmdline, set default +fi +AC_MSG_CHECKING(for --enable-dna) +AC_ARG_ENABLE(dna, + AS_HELP_STRING([--enable-dna], + [enable the Distributed Numeric Assignment (DNA) plugin (default: yes)])) +if test "$enable_dna" = yes ; then + AC_MSG_RESULT(yes) + AC_DEFINE([ENABLE_DNA], [1], [enable the dna plugin]) +else + AC_MSG_RESULT(no) +fi +AM_CONDITIONAL(enable_dna,test "$enable_dna" = "yes") + +if test -z "$enable_ldapi" ; then + enable_ldapi=yes # if not set on cmdline, set default +fi +AC_MSG_CHECKING(for --enable-ldapi) +AC_ARG_ENABLE(ldapi, + AS_HELP_STRING([--enable-ldapi], + [enable LDAP over unix domain socket (LDAPI) support (default: yes)])) +if test "$enable_ldapi" = yes ; then + AC_MSG_RESULT(yes) + AC_DEFINE([ENABLE_LDAPI], [1], [enable ldapi support in the server]) +else + AC_MSG_RESULT(no) +fi +AM_CONDITIONAL(enable_ldapi,test "$enable_ldapi" = "yes") + +if test -z "$enable_autobind" ; then + enable_autobind=yes # if not set on cmdline, set default +fi +AC_MSG_CHECKING(for --enable-autobind) +AC_ARG_ENABLE(autobind, + AS_HELP_STRING([--enable-autobind], + [enable auto bind over unix domain socket (LDAPI) support (default: no)])) +if test "$enable_ldapi" = yes -a "$enable_autobind" = yes ; then + AC_MSG_RESULT(yes) + AC_DEFINE([ENABLE_AUTOBIND], [1], [enable ldapi auto bind support in the server]) +else + AC_MSG_RESULT(no) +fi +AM_CONDITIONAL(enable_autobind,test "$enable_autobind" = "yes") + +if test -z "$enable_auto_dn_suffix" ; then + enable_auto_dn_suffix=no # if not set on cmdline, set default +fi +AC_MSG_CHECKING(for --enable-auto-dn-suffix) +AC_ARG_ENABLE(auto-dn-suffix, + AS_HELP_STRING([--enable-auto-dn-suffix], + [enable auto bind with auto dn suffix over unix domain socket (LDAPI) support (default: no)])) +if test "$enable_ldapi" = yes -a "$enable_autobind" = yes -a "$enable_auto_dn_suffix" = "yes"; then + AC_MSG_RESULT(yes) + AC_DEFINE([ENABLE_AUTO_DN_SUFFIX], [1], [enable ldapi auto bind with auto dn suffix support in the server]) +else + AC_MSG_RESULT(no) +fi +AM_CONDITIONAL(enable_auto_dn_suffix,test "$enable_auto_dn_suffix" = "yes") + +if test -z "$enable_bitwise" ; then + enable_bitwise=yes # if not set on cmdline, set default +fi +AC_MSG_CHECKING(for --enable-bitwise) +AC_ARG_ENABLE(bitwise, + AS_HELP_STRING([--enable-bitwise], + [enable the bitwise matching rule plugin (default: yes)])) +if test "$enable_bitwise" = yes ; then + AC_MSG_RESULT(yes) + AC_DEFINE([ENABLE_BITWISE], [1], [enable the bitwise plugin]) +else + AC_MSG_RESULT(no) +fi +AM_CONDITIONAL(enable_bitwise,test "$enable_bitwise" = "yes") + +if test -z "$enable_presence" ; then + enable_presence=no # if not set on cmdline, set default +fi +AC_MSG_CHECKING(for --enable-presence) +AC_ARG_ENABLE(presence, + AS_HELP_STRING([--enable-presence], + [enable the presence plugin (default: no)])) +if test "$enable_presence" = yes ; then + AC_MSG_RESULT(yes) + AC_DEFINE([ENABLE_PRESENCE], [1], [enable the presence plugin]) +else + AC_MSG_RESULT(no) +fi +AM_CONDITIONAL(enable_presence,test "$enable_presence" = "yes") + +if test -z "$enable_acctpolicy" ; then + enable_acctpolicy=yes # if not set on cmdline, set default +fi +AC_MSG_CHECKING(for --enable-acctpolicy) +AC_ARG_ENABLE(acctpolicy, + AS_HELP_STRING([--enable-acctpolicy], + [enable the account policy plugin (default: yes)])) +if test "$enable_acctpolicy" = yes ; then + AC_MSG_RESULT(yes) + AC_DEFINE([ENABLE_ACCTPOLICY], [1], [enable the account policy plugin]) +else + AC_MSG_RESULT(no) +fi +AM_CONDITIONAL(enable_acctpolicy,test "$enable_acctpolicy" = "yes") + +if test -z "$enable_posix_winsync" ; then + enable_posix_winsync=yes # if not set on cmdline, set default +fi +AC_MSG_CHECKING(for --enable-posix-winsync) +AC_ARG_ENABLE(posix_winsync, + AS_HELP_STRING([--enable-posix-winsync], + [enable support for POSIX user/group attributes in winsync (default: yes)])) +if test "$enable_posix_winsync" = yes ; then + AC_MSG_RESULT(yes) + AC_DEFINE([ENABLE_POSIX_WINSYNC], [1], [enable support for POSIX user/group attributes in winsync]) +else + AC_MSG_RESULT(no) +fi +AM_CONDITIONAL(enable_posix_winsync,test "$enable_posix_winsync" = "yes") + +# the default prefix - override with --prefix or --with-fhs +AC_PREFIX_DEFAULT([/opt/$PACKAGE_NAME]) + +# If we have no prefix specified, we need to fix the prefix variable. +# If we don't what happens is $prefixdir ends up as NONE, and then +# later configure changes $prefix to $ac_default_prefix underneath us. +if test "$prefix" = "NONE"; then + prefix=$ac_default_prefix +fi + +m4_include(m4/fhs.m4) + +localrundir='/run' +cockpitdir=/389-console + +# installation paths - by default, we store everything +# under the prefix. The with-fhs option will use /usr, +# /etc, and /var. The with-fhs-opt option will use the +# prefix, but it's sysconfdir and localstatedir will be +# /etc/opt, and /var/opt. +if test "$with_fhs_opt" = "yes"; then + # Override sysconfdir and localstatedir if FHS optional + # package was requested. + prefixdir=$prefix + sysconfdir='/etc/opt' + localstatedir='/var/opt' + localrundir='/var/opt/run' + # relative to datadir + sampledatadir=/data + # relative to datadir + systemschemadir=/schema + # relative to datadir + scripttemplatedir=/script-templates + # relative to datadir + updatedir=/updates + # relative to libdir + serverdir= + # relative to includedir + serverincdir= + # relative to libdir + serverplugindir=/plugins + # relative to datadir + infdir=/inf + # relative to datadir + mibdir=/mibs + # location of property/resource files, relative to datadir + propertydir=/properties + # relative to libdir + perldir=/perl + # relative to libdir + pythondir=/python +else + if test "$with_fhs" = "yes"; then + ac_default_prefix=/usr + prefix=$ac_default_prefix + exec_prefix=$prefix + dnl as opposed to the default /usr/etc + sysconfdir='/etc' + dnl as opposed to the default /usr/var + localstatedir='/var' + localrundir='/run' + fi + prefixdir=$prefix + # relative to datadir + sampledatadir=/$PACKAGE_NAME/data + # relative to datadir + systemschemadir=/$PACKAGE_NAME/schema + # relative to datadir + scripttemplatedir=/$PACKAGE_NAME/script-templates + # relative to datadir + updatedir=/$PACKAGE_NAME/updates + # relative to libdir + serverdir=$PACKAGE_NAME + # relative to includedir + serverincdir=$PACKAGE_NAME + # relative to libdir + serverplugindir=/$PACKAGE_NAME/plugins + # relative to datadir + infdir=/$PACKAGE_NAME/inf + # relative to datadir + mibdir=/$PACKAGE_NAME/mibs + # location of property/resource files, relative to datadir + propertydir=/$PACKAGE_NAME/properties + # relative to libdir + perldir=/$PACKAGE_NAME/perl + # relative to libdir + pythondir=/$PACKAGE_NAME/python +fi + +# if mandir is the default value, override it +# otherwise, the user must have set it - just use it +if test X"$mandir" = X'${prefix}/man' ; then + mandir='$(datadir)/man' +fi + +# Shared paths for all layouts +# relative to sysconfdir +configdir=/$PACKAGE_NAME/config +# relative to sysconfdir +schemadir=/$PACKAGE_NAME/schema + +# default user, group +defaultuser=dirsrv +defaultgroup=dirsrv + +AC_MSG_CHECKING(for --with-perldir) +AC_ARG_WITH([perldir], + AS_HELP_STRING([--with-perldir=PATH], + [Directory for perl]) +) +if test -n "$with_perldir"; then + if test "$with_perldir" = yes ; then + AC_MSG_ERROR([You must specify --with-perldir=/full/path/to/perl]) + elif test "$with_perldir" = no ; then + with_perldir= + else + AC_MSG_RESULT([$with_perldir]) + fi +else + with_perldir= +fi + +AC_MSG_CHECKING(for --with-pythonexec) +AC_ARG_WITH([pythonexec], + AS_HELP_STRING([--with-pythonexec=PATH], + [Path to executable for python]) +) +if test -n "$with_pythonexec"; then + if test "$with_pythonexec" = yes ; then + AC_MSG_ERROR([You must specify --with-pythonexec=/full/path/to/python]) + elif test "$with_pythonexec" = no ; then + with_pythonexec=/usr/bin/python3 + else + AC_MSG_RESULT([$with_pythonexec]) + fi +else + with_pythonexec=/usr/bin/python3 +fi + +AC_SUBST(prefixdir) +AC_SUBST(configdir) +AC_SUBST(sampledatadir) +AC_SUBST(systemschemadir) +AC_SUBST(propertydir) +AC_SUBST(schemadir) +AC_SUBST(serverdir) +AC_SUBST(serverincdir) +AC_SUBST(serverplugindir) +AC_SUBST(scripttemplatedir) +AC_SUBST(perldir) +AC_SUBST(pythondir) +AC_SUBST(infdir) +AC_SUBST(mibdir) +AC_SUBST(mandir) +AC_SUBST(updatedir) +AC_SUBST(defaultuser) +AC_SUBST(defaultgroup) +AC_SUBST(cockpitdir) + +# check for --with-instconfigdir +AC_MSG_CHECKING(for --with-instconfigdir) +AC_ARG_WITH(instconfigdir, + AS_HELP_STRING([--with-instconfigdir=/path], + [Base directory for instance specific writable configuration directories (default $sysconfdir/$PACKAGE_NAME)]), +[ + if test $withval = yes ; then + AC_ERROR([Please specify a full path with --with-instconfigdir]) + fi + instconfigdir="$withval" + AC_MSG_RESULT($withval) +], +[ + dnl this value is expanded out in Makefile.am + instconfigdir='$(sysconfdir)/$(PACKAGE_NAME)' + AC_MSG_RESULT(no) +]) +AC_SUBST(instconfigdir) + +# WINNT should be true if building on Windows system not using +# cygnus, mingw, or the like and using cmd.exe as the shell +AM_CONDITIONAL([WINNT], false) + +# Deal with platform dependent defines +# initdir is the location for the SysV init scripts - very heavily platform +# dependent and not specified in fhs or lsb +# and not used if systemd is used +initdir='$(sysconfdir)/rc.d' +AC_MSG_CHECKING(for --with-initddir) +AC_ARG_WITH(initddir, + AS_HELP_STRING([--with-initddir=/path], + [Absolute path (not relative like some of the other options) that should contain the SysV init scripts (default '$(sysconfdir)/rc.d')]), +[ + AC_MSG_RESULT($withval) +], +[ + AC_MSG_RESULT(no) +]) + +AM_CONDITIONAL([INITDDIR], [test -n "$with_initddir" -a "$with_initddir" != "no"]) +# on most platforms, we will just use perl from PATH +# On some platforms, we cannot. Why not just use any old +# perl? Because of perldap. We use a perldap that is +# compiled to either 32bit or 64bit, so we must use a native +# perl binary compiled with the same bitsize. On Solaris +# and HP-UX, /usr/bin/perl is 32 bit, so we cannot use +# those with our 64 bit compiled product. +if test -n "$with_perldir"; then + perlexec="$with_perldir/perl" +else + perlexec='/usr/bin/env perl' +fi + +# This will let us change over the python version easier in the future. +if test -n "$with_pythonexec"; then + pythonexec="$with_pythonexec" +else + pythonexec='/usr/bin/python3' +fi + +# Default to no atomic queue operations. +with_atomic_queue="no" + +# we use stty in perl scripts to disable password echo +# this doesn't work unless the full absolute path of the +# stty command is used e.g. system("stty -echo") does not +# work but system("/bin/stty -echo") does work +# since the path of stty may not be the same on all +# platforms, we set the default here to /bin/stty and +# allow that value to be overridden in the platform +# specific section below +sttyexec=/bin/stty +case $host in + *-*-linux*) + AC_DEFINE([LINUX], [1], [Linux]) + AC_DEFINE([_GNU_SOURCE], [1], [GNU Source]) + platform="linux" + initdir='$(sysconfdir)/rc.d/init.d' + # do arch specific linux stuff here + case $host in + i*86-*-linux*) + AC_DEFINE([CPU_x86], [], [cpu type x86]) + ;; + x86_64-*-linux*) + AC_DEFINE([CPU_x86_64], [1], [cpu type x86_64]) + + # This turns on and off LFDS inside of libsds + # wibrown -- 2017-02-21 disabled temporarily + # with_atomic_queue="yes" + # AC_DEFINE([ATOMIC_QUEUE_OPERATIONS], [1], [enabling atomic queue operations]) + ;; + aarch64-*-linux*) + AC_DEFINE([CPU_arm], [], [cpu type arm]) + ;; + arm-*-linux*) + AC_DEFINE([CPU_arm], [], [cpu type arm]) + ;; + ppc64le-*-linux*) + ;; + ppc64-*-linux*) + ;; + ppc-*-linux*) + ;; + s390-*-linux*) + ;; + s390x-*-linux*) + ;; + esac + # some programs use the native thread library directly + THREADLIB=-lpthread + AC_SUBST([THREADLIB], [$THREADLIB]) + LIBCRYPT=-lcrypt + AC_SUBST([LIBCRYPT], [$LIBCRYPT]) + AC_DEFINE([USE_POSIX_RWLOCKS], [1], [POSIX rwlocks]) + ;; + *-*-freebsd*) + AC_DEFINE([FREEBSD], [1], [FreeBSD]) + platform="freebsd" + initdir='$(sysconfdir)/rc.d' + THREADLIB=-lthr + AC_SUBST([THREADLIB], [$THREADLIB]) + AC_DEFINE([USE_POSIX_RWLOCKS], [1], [POSIX rwlocks]) + LIBDL= + ;; + ia64-hp-hpux*) + AC_DEFINE([hpux], [1], [HP-UX]) + AC_DEFINE([HPUX], [1], [HP-UX]) + AC_DEFINE([HPUX11], [1], [HP-UX 11]) + AC_DEFINE([HPUX11_23], [1], [HP-UX 11.23]) + AC_DEFINE([CPU_ia64], [], [cpu type ia64]) + AC_DEFINE([OS_hpux], [1], [OS HP-UX]) + AC_DEFINE([_POSIX_C_SOURCE], [199506L], [POSIX revision]) + AC_DEFINE([_HPUX_SOURCE], [1], [Source namespace]) + AC_DEFINE([_INCLUDE_STDC__SOURCE_199901], [1], [to pick up all of the printf format macros in inttypes.h]) + # assume 64 bit + perlexec='/opt/perl_64/bin/perl' + platform="hpux" + initconfigdir="/$PACKAGE_NAME/config" + # HPUX doesn't use /etc for this + initdir=/init.d + ;; + hppa*-hp-hpux*) + AC_DEFINE([hpux], [1], [HP-UX]) + AC_DEFINE([HPUX], [1], [HP-UX]) + AC_DEFINE([HPUX11], [1], [HP-UX 11]) + AC_DEFINE([HPUX11_11], [1], [HP-UX 11.11]) + AC_DEFINE([CPU_hppa], [], [cpu type pa-risc]) + AC_DEFINE([OS_hpux], [1], [OS HP-UX]) + AC_DEFINE([_POSIX_C_SOURCE], [199506L], [POSIX revision]) + AC_DEFINE([_HPUX_SOURCE], [1], [Source namespace]) + AC_DEFINE([_INCLUDE_STDC__SOURCE_199901], [1], [to pick up all of the printf format macros in inttypes.h]) + # assume 64 bit + initconfigdir="/$PACKAGE_NAME/config" + perlexec='/opt/perl_64/bin/perl' + platform="hpux" + # HPUX doesn't use /etc for this + initdir=/init.d + ;; + *-*-solaris*) + AC_DEFINE([SVR4], [1], [SVR4]) + AC_DEFINE([__svr4], [1], [SVR4]) + AC_DEFINE([__svr4__], [1], [SVR4]) + AC_DEFINE([_SVID_GETTOD], [1], [SVID_GETTOD]) + AC_DEFINE([SOLARIS], [1], [SOLARIS]) + AC_DEFINE([OS_solaris], [1], [OS SOLARIS]) + AC_DEFINE([sunos5], [1], [SunOS5]) + AC_DEFINE([OSVERSION], [509], [OS version]) + AC_DEFINE([_REENTRANT], [1], [_REENTRANT]) + AC_DEFINE([NO_DOMAINNAME], [1], [no getdomainname]) +dnl socket nsl and dl are required to link several programs and libdb + LIBSOCKET=-lsocket + AC_SUBST([LIBSOCKET], [$LIBSOCKET]) + LIBNSL=-lnsl + AC_SUBST([LIBNSL], [$LIBNSL]) + LIBDL=-ldl + AC_SUBST([LIBDL], [$LIBDL]) +dnl Cstd and Crun are required to link any C++ related code + LIBCSTD=-lCstd + AC_SUBST([LIBCSTD], [$LIBCSTD]) + LIBCRUN=-lCrun + AC_SUBST([LIBCRUN], [$LIBCRUN]) + platform="solaris" + initdir='$(sysconfdir)/init.d' + case $host in + i?86-*-solaris2.1[[0-9]]*) + dnl I dont know why i386 need this explicit + AC_DEFINE([HAVE_GETPEERUCRED], [1], [have getpeerucred]) + ;; + sparc-*-solaris*) + dnl includes some assembler stuff in counter.o + AC_DEFINE([CPU_sparc], [], [cpu type sparc]) + TARGET='SPARC' + ;; + esac + ;; + *) + platform="" + ;; +esac + +### TO CHECK FOR SSE4.2!!! +# gcc -march=native -dM -E - < /dev/null | grep SSE +# We can just use the define in GCC instead! + +AC_MSG_CHECKING([for GCC provided 64-bit atomic operations]) +AC_LINK_IFELSE([AC_LANG_PROGRAM([[ + #include + ]], + [[ + uint64_t t_counter = 0; + uint64_t t_oldval = 0; + uint64_t t_newval = 1; + + __atomic_compare_exchange_8(&t_counter, &t_oldval, t_newval, 0, __ATOMIC_SEQ_CST, __ATOMIC_SEQ_CST); + __atomic_add_fetch_8(&t_counter, t_newval, __ATOMIC_SEQ_CST); + __atomic_sub_fetch_8(&t_counter, t_newval, __ATOMIC_SEQ_CST); + __atomic_load(&t_counter, &t_oldval, __ATOMIC_SEQ_CST); + return 0; + ]])], + [ + AC_DEFINE([ATOMIC_64BIT_OPERATIONS], [1], [have 64-bit atomic operation functions provided by gcc]) + AC_MSG_RESULT([yes]) + ], + [ + AC_MSG_RESULT([no]) + ] +) + +# cmd line overrides default setting above +if test -n "$with_initddir" ; then + initdir="$with_initddir" +fi + +# sysv init scripts not used when systemd is used +AC_SUBST(initdir) +AC_SUBST(perlexec) +AC_SUBST(pythonexec) +AC_SUBST(sttyexec) + +# set default initconfigdir if not already set +# value will be set so as to be relative to $(sysconfdir) +if test -z "$initconfigdir" ; then + if test -d /etc/sysconfig ; then + initconfigdir=/sysconfig + elif test -d /etc/default ; then + initconfigdir=/default + else + initconfigdir="/$PACKAGE_NAME/config" + fi +fi +AC_SUBST(initconfigdir) + +# Conditionals for makefile.am +AM_CONDITIONAL([ATOMIC_QUEUE_OPERATIONS], [test "$with_atomic_queue" = "yes"]) +AM_CONDITIONAL([HPUX],[test "$platform" = "hpux"]) +AM_CONDITIONAL([SOLARIS],[test "$platform" = "solaris"]) +AM_CONDITIONAL([FREEBSD],[test "$platform" = "freebsd"]) +AM_CONDITIONAL([SPARC],[test "x$TARGET" = xSPARC]) + +# Check for library dependencies +PKG_CHECK_MODULES([EVENT], [libevent]) + +if $PKG_CONFIG --exists nspr; then + PKG_CHECK_MODULES([NSPR], [nspr]) +else + PKG_CHECK_MODULES([NSPR], [dirsec-nspr]) +fi + +if $PKG_CONFIG --exists nss; then + PKG_CHECK_MODULES([NSS], [nss]) + nss_libdir=`$PKG_CONFIG --libs-only-L nss | sed -e s/-L// | sed -e s/\ .*$//` +else + PKG_CHECK_MODULES([NSS], [dirsec-nss]) + nss_libdir=`$PKG_CONFIG --libs-only-L dirsec-nss | sed -e s/-L// | sed -e s/\ .*$//` +fi +AC_SUBST(nss_libdir) + +m4_include(m4/openldap.m4) +m4_include(m4/db.m4) + +PKG_CHECK_MODULES([SASL], [libsasl2]) + +PKG_CHECK_MODULES([ICU], [icu-i18n >= 60.2]) + +m4_include(m4/netsnmp.m4) + +PKG_CHECK_MODULES([KERBEROS], [krb5]) +krb5_vendor=`$PKG_CONFIG --variable=vendor krb5` +if test "$krb5_vendor" = "MIT"; then + AC_DEFINE(HAVE_KRB5, 1, [Define if you have Kerberos V]) + save_LIBS="$LIBS" + LIBS="$KERBEROS_LIBS" + AC_CHECK_FUNCS([krb5_cc_new_unique]) + LIBS="$save_LIBS" +elif test "$krb5_vendor" = "Heimdal"; then + AC_DEFINE(HAVE_HEIMDAL_KERBEROS, 1, [Define if you have Heimdal Kerberos]) +fi + +if $PKG_CONFIG --exists pcre; then + PKG_CHECK_MODULES([PCRE], [pcre]) + pcre_libdir=`$PKG_CONFIG --libs-only-L pcre | sed -e s/-L// | sed -e s/\ .*$//` +else + PKG_CHECK_MODULES([PCRE], [libpcre]) + pcre_libdir=`$PKG_CONFIG --libs-only-L libpcre | sed -e s/-L// | sed -e s/\ .*$//` +fi +AC_SUBST(pcre_libdir) + +m4_include(m4/selinux.m4) +m4_include(m4/systemd.m4) + +AC_MSG_CHECKING(whether to enable cmocka unit tests) +AC_ARG_ENABLE(cmocka, AS_HELP_STRING([--enable-cmocka], [Enable cmocka unit tests (default: no)])) +if test "x$enable_cmocka" = "xyes"; then + AC_MSG_RESULT(yes) + PKG_CHECK_MODULES([CMOCKA], [cmocka]) + AC_DEFINE([ENABLE_CMOCKA], [1], [Enable cmocka unit tests]) +else + AC_MSG_RESULT(no) +fi +AM_CONDITIONAL([ENABLE_CMOCKA], [test "x$enable_cmocka" = "xyes"]) + +m4_include(m4/doxygen.m4) + +PACKAGE_BASE_VERSION=`echo $PACKAGE_VERSION | awk -F\. '{print $1"."$2}'` +AC_SUBST(PACKAGE_BASE_VERSION) + +AM_CONDITIONAL(OPENLDAP,test "$with_openldap" = "yes") + +# write out paths for binary components +AC_SUBST(ldaplib) +AC_SUBST(ldaplib_defs) +AC_SUBST(ldaptool_bindir) +AC_SUBST(ldaptool_opts) +AC_SUBST(plainldif_opts) +AC_SUBST(localrundir) + +AC_SUBST(brand) +AC_SUBST(capbrand) +AC_SUBST(vendor) + +# AC_DEFINE([USE_OLD_UNHASHED], [], [Use old unhashed code]) + +# Internally we use a macro function slapi_log_err() to call slapi_log_error() +# which gives us the option to do performance testing without the presence of +# logging. To remove the presence of error logging undefine LDAP_ERROR_LOGGING. +AC_DEFINE([LDAP_ERROR_LOGGING], [1], [LDAP error logging flag]) + +# Build our pkgconfig files +# This currently conflicts with %.in: rule in Makefile.am, which should be removed eventually. + +# AC_CONFIG_FILES([ldap/admin/src/defaults.inf]) + +AC_CONFIG_FILES([src/pkgconfig/dirsrv.pc src/pkgconfig/libsds.pc src/pkgconfig/svrcore.pc]) + +AC_CONFIG_FILES([Makefile rpm/389-ds-base.spec ]) + +AC_CONFIG_FILES([.cargo/config]) + +AC_OUTPUT + diff --git a/dirsrvtests/README b/dirsrvtests/README new file mode 100644 index 0000000..48b003f --- /dev/null +++ b/dirsrvtests/README @@ -0,0 +1,28 @@ +389-ds-base-tests README +================================================= + +Prerequisites: +------------------------------------------------- +Install the python-lib389 packages, or +download the source(git clone ssh://git.fedorahosted.org/git/389/lib389.git) and set your PYTHONPATH accordingly + + +Description: +------------------------------------------------- +This package includes python-lib389 based python scripts for testing the Directory Server. The following describes the various types of tests available: + +tickets - These scripts test individual bug fixes +suites - These test functinoal areas of the server +stress - These tests perform "stress" tests on the server + +There is also a "create_test.py" script available to construct a template test script for creating new tests. + + +Documentation: +------------------------------------------------- +See http://www.port389.org for the latest information + +http://www.port389.org/docs/389ds/FAQ/upstream-test-framework.html +http://www.port389.org/docs/389ds/howto/howto-write-lib389.html +http://www.port389.org/docs/389ds/howto/howto-run-lib389-jenkins.html + diff --git a/dirsrvtests/__init__.py b/dirsrvtests/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/dirsrvtests/conftest.py b/dirsrvtests/conftest.py new file mode 100644 index 0000000..9bf6f32 --- /dev/null +++ b/dirsrvtests/conftest.py @@ -0,0 +1,109 @@ +import subprocess +import logging +import pytest +import shutil +import glob +import os + +from lib389.paths import Paths +from enum import Enum + +pkgs = ['389-ds-base', 'nss', 'nspr', 'openldap', 'cyrus-sasl'] +p = Paths() + +class FIPSState(Enum): + ENABLED = 'enabled' + DISABLED = 'disabled' + NOT_AVAILABLE = 'not_available' + + def __unicode__(self): + return self.value + + def __str__(self): + return self.value + +def get_rpm_version(pkg): + try: + result = subprocess.check_output(['rpm', '-q', '--queryformat', + '%{VERSION}-%{RELEASE}', pkg]) + except: + result = b"not installed" + + return result.decode('utf-8') + + +def is_fips(): + # Are we running in FIPS mode? + if not os.path.exists('/proc/sys/crypto/fips_enabled'): + return FIPSState.NOT_AVAILABLE + state = None + with open('/proc/sys/crypto/fips_enabled', 'r') as f: + state = f.readline().strip() + if state == '1': + return FIPSState.ENABLED + else: + return FIPSState.DISABLED + + +@pytest.fixture(autouse=True) +def _environment(request): + if "_metadata" in dir(request.config): + for pkg in pkgs: + request.config._metadata[pkg] = get_rpm_version(pkg) + request.config._metadata['FIPS'] = is_fips() + + +def pytest_cmdline_main(config): + logging.basicConfig(level=logging.DEBUG) + + +def pytest_report_header(config): + header = "" + for pkg in pkgs: + header += "%s: %s\n" % (pkg, get_rpm_version(pkg)) + header += "FIPS: %s" % is_fips() + return header + + +@pytest.fixture(scope="function", autouse=True) +def log_test_name_to_journald(request): + if p.with_systemd: + def log_current_test(): + subprocess.Popen("echo $PYTEST_CURRENT_TEST | systemd-cat -t pytest", stdin=subprocess.PIPE, shell=True) + + log_current_test() + request.addfinalizer(log_current_test) + return log_test_name_to_journald + + +@pytest.fixture(scope="function", autouse=True) +def rotate_xsan_logs(request): + # Do we have a pytest-html installed? + pytest_html = request.config.pluginmanager.getplugin('html') + if pytest_html is not None: + # We have it installed, but let's check if we actually use it (--html=report.html) + pytest_htmlpath = request.config.getoption('htmlpath') + if p.asan_enabled and pytest_htmlpath is not None: + # ASAN is enabled and an HTML report was requested, + # rotate the ASAN logs so that only relevant logs are attached to the case in the report. + xsan_logs_dir = f'{p.run_dir}/bak' + if not os.path.exists(xsan_logs_dir): + os.mkdir(xsan_logs_dir) + else: + for f in glob.glob(f'{p.run_dir}/ns-slapd-*san*'): + shutil.move(f, xsan_logs_dir) + return rotate_xsan_logs + + +@pytest.hookimpl(hookwrapper=True) +def pytest_runtest_makereport(item, call): + pytest_html = item.config.pluginmanager.getplugin('html') + outcome = yield + report = outcome.get_result() + extra = getattr(report, 'extra', []) + if report.when == 'call' and pytest_html is not None: + for f in glob.glob(f'{p.run_dir}/ns-slapd-*san*'): + with open(f) as asan_report: + text = asan_report.read() + extra.append(pytest_html.extras.text(text, name=os.path.basename(f))) + report.extra = extra diff --git a/dirsrvtests/create_test.py b/dirsrvtests/create_test.py new file mode 100755 index 0000000..fd6df7b --- /dev/null +++ b/dirsrvtests/create_test.py @@ -0,0 +1,322 @@ +#!/usr/bin/python3 +# +# --- BEGIN COPYRIGHT BLOCK --- +# Copyright (C) 2016 Red Hat, Inc. +# All rights reserved. +# +# License: GPL (version 3 or any later version). +# See LICENSE for details. +# --- END COPYRIGHT BLOCK --- + +import optparse +import os +import re +import sys +import uuid +from lib389 import topologies + +"""This script generates a template test script that handles the +non-interesting parts of a test script: +- topology fixture that doesn't exist in in lib389/topologies.py +- test function (to be completed by the user), +- run-isolated function +""" + + +def displayUsage(): + """Display the usage""" + + print ('\nUsage:\ncreate_ticket.py -t|--ticket ' + + '-s|--suite ' + + '[ i|--instances ' + + '[ -m|--masters -h|--hubs ' + + '-c|--consumers ] -o|--outputfile ]\n') + print ('If only "-t" is provided then a single standalone instance is ' + + 'created. Or you can create a test suite script using ' + + '"-s|--suite" instead of using "-t|--ticket". The "-i" option ' + + 'can add mulitple standalone instances (maximum 99). However, you' + + ' can not mix "-i" with the replication options (-m, -h , -c). ' + + 'There is a maximum of 99 masters, 99 hubs, and 99 consumers.') + print('If "-s|--suite" option was chosen, then no topology would be added ' + + 'to the test script. You can find predefined fixtures in the lib389/topologies.py ' + + 'and use them or write a new one if you have a special case.') + exit(1) + + +def writeFinalizer(): + """Write the finalizer function - delete/stop each instance""" + + def writeInstanceOp(action): + TEST.write(' map(lambda inst: inst.{}(), topology.all_insts.values())\n'.format(action)) + + TEST.write('\n def fin():\n') + TEST.write(' """If we are debugging just stop the instances, otherwise remove them"""\n\n') + TEST.write(' if DEBUGGING:\n') + writeInstanceOp('stop') + TEST.write(' else:\n') + writeInstanceOp('delete') + TEST.write('\n request.addfinalizer(fin)') + TEST.write('\n\n') + + +def get_existing_topologies(inst, masters, hubs, consumers): + """Check if the requested topology exists""" + setup_text = "" + + if inst: + if inst == 1: + i = 'st' + setup_text = "Standalone Instance" + else: + i = 'i{}'.format(inst) + setup_text = "{} Standalone Instances".format(inst) + else: + i = '' + if masters: + ms = 'm{}'.format(masters) + if len(setup_text) > 0: + setup_text += ", " + if masters == 1: + setup_text += "Master Instance" + else: + setup_text += "{} Master Instances".format(masters) + else: + ms = '' + if hubs: + hs = 'h{}'.format(hubs) + if len(setup_text) > 0: + setup_text += ", " + if hubs == 1: + setup_text += "Hub Instance" + else: + setup_text += "{} Hub Instances".format(hubs) + else: + hs = '' + if consumers: + cs = 'c{}'.format(consumers) + if len(setup_text) > 0: + setup_text += ", " + if consumers == 1: + setup_text += "Consumer Instance" + else: + setup_text += "{} Consumer Instances".format(consumers) + else: + cs = '' + + my_topology = 'topology_{}{}{}{}'.format(i, ms, hs, cs) + + # Returns True in the first element of a list, if topology was found + if my_topology in dir(topologies): + return [True, my_topology, setup_text] + else: + return [False, my_topology, setup_text] + + +def check_id_uniqueness(id_value): + """Checks if ID is already present in other tests. + create_test.py script should exist in the directory + with a 'tests' dir. + """ + + tests_dir = os.path.join(os.getcwd(), 'tests') + + for root, dirs, files in os.walk(tests_dir): + for name in files: + if name.endswith('.py'): + with open(os.path.join(root, name), "r") as cifile: + for line in cifile: + if re.search(str(id_value), line): + return False + + return True + + +desc = 'Script to generate an initial lib389 test script. ' + \ + 'This generates the topology, test, final, and run-isolated functions.' + +if len(sys.argv) > 0: + parser = optparse.OptionParser(description=desc, add_help_option=False) + + # Script options + parser.add_option('-t', '--ticket', dest='ticket', default=None) + parser.add_option('-s', '--suite', dest='suite', default=None) + parser.add_option('-i', '--instances', dest='inst', default='0') + parser.add_option('-m', '--masters', dest='masters', default='0') + parser.add_option('-h', '--hubs', dest='hubs', default='0') + parser.add_option('-c', '--consumers', dest='consumers', default='0') + parser.add_option('-o', '--outputfile', dest='filename', default=None) + + # Validate the options + try: + (args, opts) = parser.parse_args() + except: + displayUsage() + + if args.ticket is None and args.suite is None: + print('Missing required ticket number/suite name') + displayUsage() + + if args.ticket and args.suite: + print('You must choose either "-t|--ticket" or "-s|--suite", ' + + 'but not both.') + displayUsage() + + if int(args.masters) == 0: + if int(args.hubs) > 0 or int(args.consumers) > 0: + print('You must use "-m|--masters" if you want to have hubs ' + + 'and/or consumers') + displayUsage() + + if not args.masters.isdigit() or \ + int(args.masters) > 99 or \ + int(args.masters) < 0: + print('Invalid value for "--masters", it must be a number and it can' + + ' not be greater than 99') + displayUsage() + + if not args.hubs.isdigit() or int(args.hubs) > 99 or int(args.hubs) < 0: + print('Invalid value for "--hubs", it must be a number and it can ' + + 'not be greater than 99') + displayUsage() + + if not args.consumers.isdigit() or \ + int(args.consumers) > 99 or \ + int(args.consumers) < 0: + print('Invalid value for "--consumers", it must be a number and it ' + + 'can not be greater than 99') + displayUsage() + + if args.inst: + if not args.inst.isdigit() or \ + int(args.inst) > 99 or \ + int(args.inst) < 0: + print('Invalid value for "--instances", it must be a number ' + + 'greater than 0 and not greater than 99') + displayUsage() + if int(args.inst) > 0: + if int(args.masters) > 0 or \ + int(args.hubs) > 0 or \ + int(args.consumers) > 0: + print('You can not mix "--instances" with replication.') + displayUsage() + + # Extract usable values + ticket = args.ticket + suite = args.suite + + if args.inst == '0' and args.masters == '0' and args.hubs == '0' \ + and args.consumers == '0': + instances = 1 + my_topology = [True, 'topology_st', "Standalone Instance"] + else: + instances = int(args.inst) + masters = int(args.masters) + hubs = int(args.hubs) + consumers = int(args.consumers) + my_topology = get_existing_topologies(instances, masters, hubs, consumers) + filename = args.filename + setup_text = my_topology[2] + + # Create/open the new test script file + if not filename: + if ticket: + filename = 'ticket' + ticket + '_test.py' + else: + filename = suite + '_test.py' + + try: + TEST = open(filename, "w") + except IOError: + print("Can\'t open file:", filename) + exit(1) + + # Write the imports + if my_topology[0]: + topology_import = 'from lib389.topologies import {} as topo\n'.format(my_topology[1]) + else: + topology_import = 'from lib389.topologies import create_topology\n' + + TEST.write('import logging\nimport pytest\nimport os\n') + TEST.write('from lib389._constants import *\n') + TEST.write('{}\n'.format(topology_import)) + + TEST.write('DEBUGGING = os.getenv("DEBUGGING", default=False)\n') + TEST.write('if DEBUGGING:\n') + TEST.write(' logging.getLogger(__name__).setLevel(logging.DEBUG)\n') + TEST.write('else:\n') + TEST.write(' logging.getLogger(__name__).setLevel(logging.INFO)\n') + TEST.write('log = logging.getLogger(__name__)\n\n') + + # Add topology function for non existing (in lib389/topologies.py) topologies only + if not my_topology[0]: + # Write the replication or standalone classes + topologies_str = "" + if masters > 0: + topologies_str += " {} masters".format(masters) + if hubs > 0: + topologies_str += " {} hubs".format(hubs) + if consumers > 0: + topologies_str += " {} consumers".format(consumers) + if instances > 0: + topologies_str += " {} standalone instances".format(instances) + + # Write the 'topology function' + TEST.write('\n@pytest.fixture(scope="module")\n') + TEST.write('def topo(request):\n') + TEST.write(' """Create a topology with{}"""\n\n'.format(topologies_str)) + TEST.write(' topology = create_topology({\n') + if masters > 0: + TEST.write(' ReplicaRole.MASTER: {},\n'.format(masters)) + if hubs > 0: + TEST.write(' ReplicaRole.HUB: {},\n'.format(hubs)) + if consumers > 0: + TEST.write(' ReplicaRole.CONSUMER: {},\n'.format(consumers)) + if instances > 0: + TEST.write(' ReplicaRole.STANDALONE: {},\n'.format(instances)) + TEST.write(' })\n') + + TEST.write(' # You can write replica test here. Just uncomment the block and choose instances\n') + TEST.write(' # replicas = Replicas(topology.ms["master1"])\n') + TEST.write(' # replicas.test(DEFAULT_SUFFIX, topology.cs["consumer1"])\n') + + writeFinalizer() + TEST.write(' return topology\n\n') + + tc_id = '0' + while not check_id_uniqueness(tc_id): tc_id = uuid.uuid4() + + # Write the test function + if ticket: + TEST.write('\ndef test_ticket{}(topo):\n'.format(ticket)) + else: + TEST.write('\ndef test_something(topo):\n') + TEST.write(' """Specify a test case purpose or name here\n\n') + TEST.write(' :id: {}\n'.format(tc_id)) + TEST.write(' :setup: ' + setup_text + '\n') + TEST.write(' :steps:\n') + TEST.write(' 1. Fill in test case steps here\n') + TEST.write(' 2. And indent them like this (RST format requirement)\n') + TEST.write(' :expectedresults:\n') + TEST.write(' 1. Fill in the result that is expected\n') + TEST.write(' 2. For each test step\n') + TEST.write(' """\n\n') + TEST.write(' # If you need any test suite initialization,\n') + TEST.write(' # please, write additional fixture for that (including finalizer).\n' + ' # Topology for suites are predefined in lib389/topologies.py.\n\n') + TEST.write(' # If you need host, port or any other data about instance,\n') + TEST.write(' # Please, use the instance object attributes for that (for example, topo.ms["master1"].serverid)\n\n') + + TEST.write(' if DEBUGGING:\n') + TEST.write(' # Add debugging steps(if any)...\n') + TEST.write(' pass\n\n\n') + + # Write the main function + TEST.write("if __name__ == '__main__':\n") + TEST.write(' # Run isolated\n') + TEST.write(' # -s for DEBUG mode\n') + TEST.write(' CURRENT_FILE = os.path.realpath(__file__)\n') + TEST.write(' pytest.main(["-s", CURRENT_FILE])\n\n') + + # Done, close things up + TEST.close() + print('Created: ' + filename) diff --git a/dirsrvtests/pytest.ini b/dirsrvtests/pytest.ini new file mode 100644 index 0000000..48984e5 --- /dev/null +++ b/dirsrvtests/pytest.ini @@ -0,0 +1,6 @@ +[pytest] +markers = + tier0: mark a test as part of tier0 + tier1: mark a test as part of tier1 + tier2: mark a test as part of tier2 + tier3: mark a test as part of tier3 diff --git a/dirsrvtests/tests/__init__.py b/dirsrvtests/tests/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/dirsrvtests/tests/data/README b/dirsrvtests/tests/data/README new file mode 100644 index 0000000..4261f92 --- /dev/null +++ b/dirsrvtests/tests/data/README @@ -0,0 +1,11 @@ +DATA DIRECTORY README + +This directory is used for storing LDIF files used by the dirsrvtests scripts. +This directory can be retrieved via getDir() from the DirSrv class. + +Example: + + data_dir_path = topology.standalone.getDir(__file__, DATA_DIR) + + ldif_file = data_dir_path + "ticket44444/1000entries.ldif" + diff --git a/dirsrvtests/tests/data/__init__.py b/dirsrvtests/tests/data/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/dirsrvtests/tests/data/basic/__init__.py b/dirsrvtests/tests/data/basic/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/dirsrvtests/tests/data/basic/dse.ldif.broken b/dirsrvtests/tests/data/basic/dse.ldif.broken new file mode 100644 index 0000000..489b443 --- /dev/null +++ b/dirsrvtests/tests/data/basic/dse.ldif.broken @@ -0,0 +1,95 @@ +dn: +objectClass: top +aci: (targetattr != "aci")(version 3.0; aci "rootdse anon read access"; allow( + read,search,compare) userdn="ldap:///anyone";) +creatorsName: cn=server,cn=plugins,cn=config +modifiersName: cn=server,cn=plugins,cn=config +createTimestamp: 20150204165610Z +modifyTimestamp: 20150204165610Z + +dn: cn=config +cn: config +objectClass: top +objectClass: extensibleObject +objectClass: nsslapdConfig +nsslapd-schemadir: /etc/dirsrv/slapd-localhost/schema +nsslapd-lockdir: /var/lock/dirsrv/slapd-localhost +nsslapd-tmpdir: /tmp +nsslapd-certdir: /etc/dirsrv/slapd-localhost +nsslapd-ldifdir: /var/lib/dirsrv/slapd-localhost/ldif +nsslapd-bakdir: /var/lib/dirsrv/slapd-localhost/bak +nsslapd-rundir: /var/run/dirsrv +nsslapd-instancedir: /usr/lib64/dirsrv/slapd-localhost +nsslapd-accesslog-logging-enabled: on +nsslapd-accesslog-maxlogsperdir: 10 +nsslapd-accesslog-mode: 600 +nsslapd-accesslog-maxlogsize: 100 +nsslapd-accesslog-logrotationtime: 1 +nsslapd-accesslog-logrotationtimeunit: day +nsslapd-accesslog-logrotationsync-enabled: off +nsslapd-accesslog-logrotationsynchour: 0 +nsslapd-accesslog-logrotationsyncmin: 0 +nsslapd-accesslog: /var/log/dirsrv/slapd-localhost/access +nsslapd-enquote-sup-oc: off +nsslapd-localhost: localhost.localdomain +nsslapd-schemacheck: on +nsslapd-syntaxcheck: on +nsslapd-dn-validate-strict: off +nsslapd-rewrite-rfc1274: off +nsslapd-return-exact-case: on +nsslapd-ssl-check-hostname: on +nsslapd-validate-cert: warn +nsslapd-allow-unauthenticated-binds: off +nsslapd-require-secure-binds: off +nsslapd-allow-anonymous####-access: on +nsslapd-localssf: 71 +nsslapd-minssf: 0 +nsslapd-port: 389 +nsslapd-localuser: nobody +nsslapd-errorlog-logging-enabled: on +nsslapd-errorlog-mode: 600 +nsslapd-errorlog-maxlogsperdir: 2 +nsslapd-errorlog-maxlogsize: 100 +nsslapd-errorlog-logrotationtime: 1 +nsslapd-errorlog-logrotationtimeunit: week +nsslapd-errorlog-logrotationsync-enabled: off +nsslapd-errorlog-logrotationsynchour: 0 +nsslapd-errorlog-logrotationsyncmin: 0 +nsslapd-errorlog: /var/log/dirsrv/slapd-localhost/errors +nsslapd-auditlog: /var/log/dirsrv/slapd-localhost/audit +nsslapd-auditlog-mode: 600 +nsslapd-auditlog-maxlogsize: 100 +nsslapd-auditlog-logrotationtime: 1 +nsslapd-auditlog-logrotationtimeunit: day +nsslapd-rootdn: cn=dm +nsslapd-maxdescriptors: 1024 +nsslapd-max-filter-nest-level: 40 +nsslapd-ndn-cache-enabled: on +nsslapd-sasl-mapping-fallback: off +nsslapd-dynamic-plugins: off +nsslapd-allow-hashed-passwords: off +nsslapd-ldapifilepath: /var/run/slapd-localhost.socket +nsslapd-ldapilisten: off +nsslapd-ldapiautobind: off +nsslapd-ldapimaprootdn: cn=dm +nsslapd-ldapimaptoentries: off +nsslapd-ldapiuidnumbertype: uidNumber +nsslapd-ldapigidnumbertype: gidNumber +nsslapd-ldapientrysearchbase: dc=example,dc=com +nsslapd-defaultnamingcontext: dc=example,dc=com +aci: (targetattr="*")(version 3.0; acl "Configuration Administrators Group"; a + llow (all) groupdn="ldap:///cn=Configuration Administrators,ou=Groups,ou=Topo + logyManagement,o=NetscapeRoot";) +aci: (targetattr="*")(version 3.0; acl "Configuration Administrator"; allow (a + ll) userdn="ldap:///uid=admin,ou=Administrators,ou=TopologyManagement,o=Netsc + apeRoot";) +aci: (targetattr = "*")(version 3.0; acl "SIE Group"; allow (all) groupdn = "l + dap:///cn=slapd-localhost,cn=389 Directory Server,cn=Server Group,cn=localhos + t.localdomain,ou=example.com,o=NetscapeRoot";) +modifiersName: cn=dm +modifyTimestamp: 20150205195242Z +nsslapd-auditlog-logging-enabled: on +nsslapd-auditlog-logging-hide-unhashed-pw: off +nsslapd-rootpw: {SSHA}AQH9bTYZW4kfkfyHg1k+lG88H2dFOuwakzFEpw== +numSubordinates: 10 + diff --git a/dirsrvtests/tests/data/ticket47953/__init__.py b/dirsrvtests/tests/data/ticket47953/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/dirsrvtests/tests/data/ticket47953/ticket47953.ldif b/dirsrvtests/tests/data/ticket47953/ticket47953.ldif new file mode 100644 index 0000000..e59977e --- /dev/null +++ b/dirsrvtests/tests/data/ticket47953/ticket47953.ldif @@ -0,0 +1,27 @@ +dn: dc=example,dc=com +objectClass: top +objectClass: domain +dc: example +aci: (targetattr!="userPassword")(version 3.0; acl "Enable anonymous access"; + allow (read, search, compare) userdn="ldap:///anyone";) +aci: (targetattr="carLicense || description || displayName || facsimileTelepho + neNumber || homePhone || homePostalAddress || initials || jpegPhoto || labele + dURI || mail || mobile || pager || photo || postOfficeBox || postalAddress || + postalCode || preferredDeliveryMethod || preferredLanguage || registeredAddr + ess || roomNumber || secretary || seeAlso || st || street || telephoneNumber + || telexNumber || title || userCertificate || userPassword || userSMIMECertif + icate || x500UniqueIdentifier")(version 3.0; acl "Enable self write for commo + n attributes"; allow (write) userdn="ldap:///self";) +aci: (targetattr ="fffff")(version 3.0;acl "Directory Administrators Group";al + low (all) (groupdn = "ldap:///cn=Directory Administrators, dc=example,dc=com" + );) +aci: (targetattr="*")(version 3.0; acl "Configuration Administrators Group"; a + llow (all) groupdn="ldap:///cn=Configuration Administrators,ou=Groups,ou=Topo + logyManagement,o=NetscapeRoot";) +aci: (targetattr="*")(version 3.0; acl "Configuration Administrator"; allow (a + ll) userdn="ldap:///uid=admin,ou=Administrators,ou=TopologyManagement,o=Netsc + apeRoot";) +aci: (targetattr = "*")(version 3.0; acl "TEST ACI"; allow (writ + e) groupdn = "ldap:///cn=slapd-localhost,cn=389 Directory Server,cn=Server Gr + oup,cn=localhost.localdomain,ou=example.com,o=NetscapeRoot";) + diff --git a/dirsrvtests/tests/data/ticket47988/__init__.py b/dirsrvtests/tests/data/ticket47988/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/dirsrvtests/tests/data/ticket47988/schema_ipa3.3.tar.gz b/dirsrvtests/tests/data/ticket47988/schema_ipa3.3.tar.gz new file mode 100644 index 0000000000000000000000000000000000000000..2b309a04ee00be73556d5941c0bda10ba46d33f7 GIT binary patch literal 98049 zcmV)CK*GNtiwFq#8NyTm19M|&Wo=egPE;BB4VR8WMU1@XMM$-1z^{<$cTxGjT zqIpVoQu~1>DVZBe;>#nk_ZtWdNkkyP#UY8=+W-D`&jHK;phSw2pd>^pWpM@=Jl)gN z*BmH)dh_g88LrdQGW^>pmrv@-zwz~^QmvnqEA?`@R(VsdRN($Mr1AXF-n@x|(C`U) z6HYweGX7Y5-$V6((yR38q`dS3`|kMGc7v5`9xt6}X1gWFvZqh<4(#B1eQp0YDpl71 zjnmV5<+M_R{y(WUs&7d7iQa$j^*8VT!?%azYv;UsNm^I^AA|1sw-Nc;yK4Oc<)gVB zkiPHDd}Bf2x-3A|}|YxwkMvhpHg8ZPmvWe1^ePoj_#J0yl{ee!&=@IZoASP;1u z^`X6RPDx1pML@hM`*D6bCg;?pzTuF5G;wT`^lX#50VPI2mRJ(ZsYNC$cKaE=e<{RD%0*NJ$V|#CF*Ob8iXo=0=E+Z*9jR6H20hP9x`t zz@6l`?&#aqcm%CAFMp8Vnu9^}a`fY8xHp`8P)~0tZ^>RP9UB@3n7-kLD};2>8MMB^ z{mrl4UU&3^0E5oDqsz{4h=g1XNR#xNgHg9N?llLbKOXe2hMf`_Qu+|5ri|A?v`*@QB>{HuQPu zWxGe|oFme8&C(HRd?6zWtii79JBCS*$S}g^Yqjzb`RWBB-h0s`or(`^A z3VOOOF)YjG4T5wSBv?|@2H5n@q)T2t0Q$ZeA%7#80lfiZ1j3U~8xFMQ+p~Fyz#ZGY zX4R%=QO*=aQjmCF1LVRVAZ73dCeM<4(AXOOeK@%E+~f0pkF=nLgO4~c@RiDF!M8($ zrni{bE@M^z?b)vBM3xQAXCvm;J0d|e`JI}fWVK^2?2zBj#TPIy4c`FJ%Xx{Y?}hf3 zFy`zqRFK2%G1tffR3PzOXC;{7t>p)k1K*2gbNJT-5^pIC6N^r5myIJB+*9aRz+_Q3 zw5N75x~8@ROf(iy()r%%^he#ROK5J2QX=mOyvR4joALgDe8A#^=5pzKD7xMF4F?}f zFs9{g-V~Zri6#pJG>;3a9bgzJq9}OqM}Pwl1~uGZ`X2jbyFfOy6uBm6=+yTXLR;m$ zIUeq&5e2f3d>TYfh`(+rbvibH+6sra2dtPGI81PmdLWMRo8cGUaSP@MDse~wt3k-qZ~n;p?TG39{Gp&&4dCIUu^x9hy)+7th8f!gPK z??dY&seKpCJuh!YlfF56l60RJOHb50iqlM|mV@fnv51Evc=)e>t3 zgC02HIIH6s6FX$^43x-{Ea+kaR6;|R`x7zUe+I0zw?-)R<7l;{!B6A^sg^1yrO)+p zsZy#`YNxewsRn;mEA>Jbio=N+tz#1?hISxeaB&JLjNCg0#0E9Xss-M80l}4#jtstrs^8NSVjU$D}#<0S5R7U`a8QeB6+zqW#(ZwcSnGLm0F{L@gxLz<*Epg_{7}RRG-*m{s_VW&iE_PDrl2&=x-F8 zR`UX!m>!)P3~jQI^CwpiyKR0qxFQa18}1zG=&EGs8iGxW0!i0_VWK`bI3fq&=*qu=r~_^j ztpjERBNiGO+YtF+2R$8;RfR~fk^W93V)wo_0&6J(zlGs)Jm|%`oq-O>Z*+ogD46(w z2Aj~JEs1%0*mVW=FmAMInh;!sT}y%Q+W(2DlI6vKNBZyz9Ps}P0!SRJ3qxu1bXGLlQpBJ;w@o*e9DK%ejxr2>nmE z7v1}{&b^Lk0MP@)CmY#)kc=0T?vq^pC1{_5eZFYz@!V7T^Cj4~=|n{JOGd-T<0`s? z<_vI8xi5{j>c;lN2p6HTJ0WvOO1p|*ihO~L3hACbC@xoM^*UbF%3Jp_Qj zu#{)5mPr3ImzP@!RlZree-z}HFChRq8X{98b=eMbU_MA!MRkK%TqZ}$h2dXAF^56*jyI0MWj?9*1dc^dxV*Bf^r`xJX**u{JX3${ zdSLzMY5BB~n*Xcq=l@>l+Ry*(=l}NefBX5r{rum4{%=43x1axe$@xEKl+hUD&Otu; zhSW;i>(d33=jO24FA=aT3lO>`m{wD0z957NOUf46-aYu|>kWbdF z_(Fs#-=sh+i8)F&_%R#+3HS5FlD;w(Y)A;qXZ!DtL{ z&>?+?V)kvq*w1`L*7h_CBOntY@PZ&RO$rT53{08C;s{a#A{VPbuEm3NUj$5OMciE4 zV1aVE^rcj-)ghGr0;8x_h5#G?FEwPsKw4qqGmDkCP8$v_fy+uDhr=`hi`sh>IQuM ziUvtv;Q17Q@|Jb41!TKc8W1Zg5|CWFl7S^Fit4^(uW*i3VR5m*ntah^4mMF!6ukH( zu&z7x26U<+Oji~qc`=UhpqCF&S?l@);Br^JJ_zm%e0a2lJuvI}DAhZPg3FfG@b!6^ z;M0Jn>WY#{JOlujccB^7@TnDrfirvy!z741zAQ6R5^U%3;ap^Hn4g2m*E zZI8+qy|}6N1I4LfabHi>YnwqspJxPVmmI+OCEpbhAajjYK*r&kr|eAO$eLmUpbNIB0yxDc zTHUl3Tz6Bkpj&vr^aGeP_Y?)WXBkVra3fEbXKD-MEKVE@mmb>}qw`a(DQvX+zUTKm z{rovuSeT8rtu!QFRG2$znluQ~nBvmHk{NVrL{5k}mZ97h0%pQ2NVYFOk(vC`bQPBQn_&67mv8YOU zVHtp2Q~#pIJ?Df@B{8=yVYm6IoSw3!({RSF9g4+inFQeGJ1Kc?E|aW#)KQ_PyZ%s6 zi{eH|K4tfgIP6?dH7m#qQdu}w7$?8BAu7pAtw~nxN?DtHgS#m7O2qZXS{&jMJDzo z_`n4t?$fF0A!^FM6_d3s=Ir2&OP><`wmO9`AI>TjeOdA2k%1*W6PA=K5qxA@m7aGJ z_x-1EZ-_6gA5sTBYZIfqL=6qOc!-dUs}L3parG!;5cVtA-B;cb5BDv&qD_jfpDx|K z5BY3A*G*oV%xse5jUBzZ~)HQ6H9uAA-m9@#EXWC%#fEz7C)M1&14- z6(x_kF%4g=jC}u=2u*xL=eFt4cugMr^5IOJa{^pr%S`e1b;lAy)k6mp;w>9QesG(< zF9*93b#S4~s{$;skBy?Z;JQXJMHDkJULTC>xRcx13PygKk{VwAt+`pRB>c1kkggS* z=Z#sJoR7)eG23%Ts)~3n`>H=18#guHi3725E%|&!1oz zCX$55u?;V}7airpM+uVydxp{G0-|ALXCw~C45H-{d(1?N-tj#-sH(Vd8IG_C;fig<_7}10e|4-mOwK!C$57yA zl8|rtz>0dKT4!gR@}o(w{F!!QK0lp8LCiMfhf8~JfEI&|> zn_wGVViBhg#!7 z_kH{$@*RYivL>B7!whqb2`hgpM+8Fg_pG$t=?^-s=BU$7!+I93ZFuNu68mYIg9$`t z&XCt()o3tojmCp!?=4B4a52F>mSB8ZV3lV0rTKRx3Ch_Vkse}_chCf0N~{F8=`Oue zfIFrG4pcf89x0arYbXZ+>y!pshS zd|As>bT;hoca=V!lqaC2ZfehW0PF+jf7DJZ^(6mWE#vvGwaUpp|N9cx;UVevn}mIL zCA)iulG~$TFla;thI)|oiv$5vuIZ(6x2Xxkdq2N(u)>Jx2 zJ`7%JiO&Qmo4!XxR1seH=O$XC`OGo2-ADNQ(Lwa2!9iq#L zVP5xrdjZ~%w)#TWiQ$VW3L1)oC)nNM{P{!>5(i0~B3l@ng^*OS*% zl?8p@!*!^ABb;k25aond{Dr9htjRY7P4S`;jee{lan)as4PVx>q3uJQnZpY#COsO4 zJNg-~jT4o!$_j3a&&Qgf76zJ^zEaDao9y5ORx!avoOq`X)NRhp*skQV{?3)AvBJA%BxzPp-E!Ks&Y^sk29@4#;?dj+Fj zGx|e5JxZ;PYx*m;LYXxYAM`Bi35{1V8E6Nqo83yxyH2H43pQ;Nx*3O1I7X zJTYmCJ2j-Hhu<(*Jw#(5Z+sop9`i-{ox9LC0BDX$BLys>OG@N_Zt`Xjj&=q^eRIG}d6^M=L#q>hXR=RbQhB=3+)si}_V;)cMPC)9}6y;-)2xiGCI{7=vd+L3YH=ngDHQ8j1oFC1V zdnNmqOqR;s!?}Y($#P2iQaMGfI7!T@r6M7=j$xT3Q>H>1Ppv|k3D(L}i4Yx0l30-C z>w@>VKF5gX)k*Llx9LCWN@XJHd&E)cpj6Dht28M|CGdY5;;Y)gkY=mL0c9h8RjW#& z$wt+jVOlu5E7(eY6nbF@m5=WW0?)L$xlL<-{i*o7%XcuSeV2Y>gNIHLBc^IaTegq^xrkb- zZl7yRNm;2=S~w(C%N-kQg8)!MCkU_sP)pC9cmLcyb}SS(NmQoPQxtFQ|MuE`}k{6iWm+_!Q{QuDZ^P8Ul)yL^md4Nv$ z|J>Q#+iU0jKlgT9Z~mXp@t2FbXXo)P)^z$!kc2GadZef^)eniPFOB_ro%KOczmYen zx))X#O5)r2BfA{1{hi(VE`=Wb?)TD@`26pU?K%~J=|1#=UvCC~-*7jL5;IL8kdLA} zQI)2wx(xMradMRY4FAI4KSAKiwt_U?a@`=yHkzBfoiR6_L|O8%(b!c@=>EO|OWJ5) zR*1qz!@9-9cs7s$G!An2m{ps&@GAbvb+*x}m)+J#SHd*IrM|JVNk{XljL}ae@4_F| zLmLLJkwwn4rfr7r^&g_lx!-7)4bS{UO3%&Ozo^MfJa1!nlNZaK85|?vOn!>L0rPC& znajRgSF!hC%sXS4nhunkKGs+&&i5$D7*1%ry_16(&qmXSvY8A$XS|Vr9RF-XODPR# ztZeMAE{VnpJ7RzP8y}}lgeC_ln=SZTk6$Jh>9KC$WcrORkhEQ{tRRpYrpG!s*fhyfVvZwRrY*22 z^`p9#;`S4IRF6D*pmK%OlfI7;Y_PO=q-!cnCcE2I&6EOP8_oJ$G~Ftt>49y$UG7B8 z!pAUlu95s?gg7t@$~r;?0zG9<EkD(}JpPE)N9}$B zvm5LH5nI{Gm(FRwtQRWJS@t5)Fo?!OSfEk1J=Z+6&3U-&9!Jhq=&Sl>pW`_6ooJRs zuT{7Q3PSUQlL>z@n_2ppq0&$18uD%_>F8XS1_533LXx1BQZ@0J32CPi82IFXycNCx zL7D95CJw#Sa#(V%@RZ1!u_3)Al0L|h8@&sjv{ApWi{xW$i{ZsSU2W1#OW~6GNkvWe zl*mJLWpvZv{ZKdiqSH<3975!Xy^NC>hQot6sa7S*J?XYFGCp$rhUA+mW)@J3pKhI< z45+v;)ru5INc@kYo1>F{=k)aAkAHM8`hz134gQ;kGylw&i{2ld-cjfC>9KyQnSW~V z>);=sJN@G$xyP$Vck+++j?a((!0rl*NvT|t71SG)u{X68{mB4&RCAbSh)u`vd>d9|* zR_Mnf0s9gscfi*e#fJNltIBxyd?y?U$4|+jHohF4OZf)IMvolEz=Hxj#^KLy%7;PP zLLT)q(5f5#y>obWynT3a_7N|n?n4*SygDC}c1$GEKzb4ala>oEZZjXDHzkHnMPcRB zqfS@k5Es6GYQK@K$nRamhejDLgZ%F0IhiA6a8x20z%OL(k9p+b{q*NgpB#7OZ@cm6 z=iSzB&DM_8eij#m^~mR97;Zvi4~6fKlmpHXJT0*BzzW__Do8>}f8nJOm6Qfb=e3Ky z`J|tQCH_z*Z2rL|y&U8eb(rw}9pI(6K2F5>r6M25my$@fg7H7Z*M5?H|AhTYp+SC6$e(o_Wn&xu4Y*W%dDPuz z4S6*lJLaZ}$(+#5V|w=^AT0pS(VVRWGG4-((x z!GQY0-j(O2ggHMTtrZa1U>g_b$D4@8Cfn#=e*VXxcYM4#C)O+`*r)OL5uVlW>9|J6 zXagtw#M%OD;AyU*m+OL0wd3ZdU)xwjD3m#sIQtYVE!Yo}%P(gJ$XUQ(CmqL;%quDJ zV{O{PJy&pV9T@Z%2SE99Zc4(RwGy;@E?C=DDNNzDs@M++} zsAvL8OaKb8K3ap_QbH>qe#;vEDeviMQm|{UOdPYMF-OZ|O5N(lVcZ= zVVGM+PKpatzI}#oZ8KBuRNsYeI{kCN0dy97zC5@ps)u6#;F=4MNw)yN@!XF?hf?TX z`LI_?Gt+MgI$icbEd2TBzq3Bz^Z5MmnEkgrgOgosjhugi-gz6v!utH5_5Wg*u+|&9 zAB)chk^O8V|Liw?GxztOP5x}FDW4rxKK!38`7FEW4o)u4JE!Uc=t)}s($0TrI4`~k z49NGkx253vKNhn2Q{_lXoiNi#fhYUeP%oMsDORUey+UmTQbslP2B zbUOnl=VuQ#AD>Sj+^H9jW}c6im!1Pi=Mv7=&_faw>~wOql_kC}O)Yn1EjGi>Bag*o zI#K2=shkU=cS2_iq*7A}I1nDZ5{(fwLo|Dnia7q)A{ye3rKAX3(2DiLuWEbnc!4-0 zo5LqsSVaGBJh}=Z?iR>pXoWCf*~Sh*mZ@#lN7-3nj4F?!<;s8fnfeqyECsB2e!Ase zxm#}H{v6@<5YC4%OHvgT?_iZ9R}r}@R${T&NGnT%@oZ?rO*E~}!a-hGrb`YU!?zB0 z8>O$SL8s<%{P{)itaGY2AkqB$j=eAGr;j_QXZhxf^V7e~8=|@2Jw7}!I+uo?Fa1_Y z-_vNsWa>HbM-~Q1rMbt9jN&0txZ=$lidyorK{_W(Y}w0Ke|OdNIFrZ+ja;(WuvF6- zq&{x=`gFrzz)s4~2j4`#pXL?$*wz+v+-ySin2LCQlL&uciEyXFn6xT+a1I4H1iepR ziLx;_&Ug%(>zt?QJN9WhbPSLk-sEy;Jd;YSL_Dq7Zkz9zE^r`G3Jz%;$Kb*=6kz)^ z$}tp8a|z#rl^1EAIp!H)87EyC4?U|UZ^67IzqF`Zt*A3X7j#m(iqzf5R#3(2)$6Ey z4KKEuEKAeb3iT@PO3fv=jLzODivn~v=6Q| zMeD<>avBddz${@dq4zW(OW1aK-qR;1kdTn zFZ7q26C6wLr_4f_v1z6J%B(g<<1Zeki}Q9J_mDeC&+|RwSiSHL_Nmv7=s=SNe~Pv# z&~M@4IT2>*{X9_bncO?dkms%8=RX_vRNXcg(lORUs`Vl?I2Q3r6 z^4NEIq|%%?EC$;sC;*qMMq-|&I@)&wD;Y$1K<4vM7RRN-zM0R*+BWATuaHaX@Tpkm z3jS8ukqUlfR?O=22S2LVM@I|Vu38!7c^-z^q?&RM2(->_KU%9zQ7%rD8Coa%*I5h9 z3^TbXotzIiO8g?q55rc3p%<2+T;+J3W9)Mb)Z$Y1r^`pnGgJt1mAz+X^cBC9MqzQK z-ST*Y;jAp5FR?=LTP5aFexuZw&u?j8Uw*A!S^2g0Y7+NtKQZ>c^w8IM)?`7(^mQFkxY1d@tW8@QSDUE07wm2g3pC^gj)L?ZZ?5n)oSp-%;C1_`o$maTw3L6RQrk zYb@0u;IVrTgKy}>lihGtMLYYFc@V}YO)LCm7&}3wgPT?5*n(>?JRZi^w7k)>2%|+6 zPYMA}kSIYhrTAVwTZL%GlJ3-twnlBa!hg&;`U#V-;s4D^hf@yWk0@%cu+6&OAA-myE^V}A5;mtC0={fr#FlMadnK4lC zPK;Gr12y^9LE6ld?FqGHnkk3{`f6up5ktg!}S02-m#$ z>Bpj{?Hu41{fNnCF}eRx$a7VQ&ZlK!1=E*qlICIhqgz@@tgyIIaN$vV-t>+{nnBBk zE4F9T`k8x6SZ(x!wzWgIYs-LW%c;M-RcVp>MME&QNl9`)lN2~dE>&Bm=qvM)Lb(UT zhceDtAO(Yu=Jmhobz*GC6{`gWR0B|1gI0D&+l3>y<++5`MgdqDF_C?Fnz!=Mg?Slb zr&O|8oGqSsbLK-vrL0*<$3s*?cU=@;$R&Ehv{t$^fQf%V-G?{hy zGB9WXBM}k42kr_YbZ)~L4&*)2r(tZ79;+a{_`@L#=cDRK;ZI3M(k0nS-N1>CYW;mM zp4qtUZqA3({P7aU(qo#4a`oqoM7z!_7H2x2n~mHFI?oAnt=hL(HI+H&s+&3Y*W&b> ztR9w^P~{Y7%gbIey$xoj)T`X#x7NM-%gpIX_scQB z7h6suFJ^5nk;uzXS&^P}W%G~uKgX~|470KKGWkF2yL)g?t^Y=g=l^`G|Nbn0Ke62W zvn|$z>#Z+?E@&nqE$E8>D1l?&_gU|_b98pRO;WQs&(|OKVt1kw9icovLY>;sq_?S~ zKawEJKn`ry>&>ltW2?Ett{&KDX!+GovVRW)KlIrD4Un+EcLslFo_`g<{=9W`G1$U% zpZ~j-FDt(~TfPBsfwk6{#i(`<_8Y`lw;OPWa~43ue@x=c`2reg`T?3bTctL(0SGfr zm+_HJnHdmXrNYD1eiYwD9D5Ub7f%i(k^UG6+%XY|=$IFU zmt&4VPrUo%p|D+~!Ou?Q-34BDqmihZXzl*?`4cQ#&>zD9Q}=S}!}Ay)9ZBLJ2YI_? z^fxd1B<=l(2;4Ifh}oeh#%9ssjuU8_h4!DBFO1^2UW1h9@H8=X#S?kXj;U9KsC^Sr zlfcth>OZ8JKf<&qY$Lw)lO*u`)TpTuP{sf>t#1A}bah~&>-uqYU4M`0$}7Hm#peYc*k z|FHxAyy^eX@b?o#qtXTFKdJ->7R^!@1UmJZd}2jGKFdSV$U!lHK3w#&;w4%Yi$-9m9iZSh)&_)T2?!Xk(a-#` zHGXW|N~v{i2H(n4(OslBMyi%#q=f-}C*=;m^ejL3EJV1hc!WE-5%wG}n4~x~UJyuB zSBw)-q{ngOp;pMBAq_GrRt0cgQJjIdJ z8aUfE+6V0x22B~xc8CT0>=Cav(oJ@EgCg4lW6309_?UG%TgV86XRo>1vv4bCWt{CC zch@>E#0mNEQU}}`WlalZ7<#KwhF2!a-Y`b-85YJ48=>~bO3Z3{n!AJ!@c8e5J z&ZNIbDyhK9UJ)n1Ch=sfkwTY07*^0t%|MGjoyK-y4#Py|lxPbRC%O;8hG2Z0KT5!R~AxCF0-C11WPqZ(9x4q}X+an%>5^>F(#E!`L zK=LD5UGSH(yJ*Ktcn}bHtZ`<~#q29KEboz3T)k(X9Y-~OKAZ~IfT{ZHrC*%C4LPhkdn#Q(EVZ|37a zcUtwg_|Iqg!vM<9$H+_$FS>sLTlqR*pHD9if2)ZE4&5ZaPMi_d490r8X*|sCoW%cy zJ;W1A?2!0)+$?ivXXu3eiGnqOlpe5wYVy{2@=oA#BbDp)*YivEE8pB%cXAa5E;|ie zKT3V{fE;5B9$=&3ijRN6@At*`YgoDldL{$c$WN%qId;OoWZ0D9}3|Hc03^m?81!C$|SQ#zX1{acZY1cYu%dIG~t zoG5$1A)Ouf4!^?lozExy2svS2P6p@4{XPP6(PJIf?eqpGhnJ_F9_wEAx)=T9ZSu2y z1XC(7*7Gwbz`Q{O@n0}IDU2%=xn~mJ^(n(2CZQQW%rXU-QC1YR;C>zgfxlp+LKI{E zl_cONnuX~xCg&qNiQMgvtbM=+m^ucht{Xa-TCP9A_gk&{NA?-ETYUDc!|KgOqp{U! z)%VzCza!u|iBtxqk4PO3Pr?Vo_U~PP%-eea-&ZH&U!+RCDIvm68wzJ(*k?YSq1(!-krzM*00@Db_hMZ+vF+vVuSYR>(HTD z>jwG@A~&3Pvcz?E7k|XsOO#tgpqiq$L~R@*-vBEPBn}LEe6!8W??Qi|0*)|5T92GE z+zYI+5wJiON8y7&MqG&xq5}|Wu2H0q0mM5WmI?WL(>j91J#;8Tqjbbns|!OLDV4j& zj{iJ7rb-gf-4LlnK9k0i#1&trP)N3c&F?znaS|ixo~SRp+uY_E{pqoI2wj;#lkJhG zxy&%fipp7(;=+SP4G*|fp9*tR0@%>gByu@I@nl|btCBCPoyTTpWiG0AwLLQ zhc?M#p3V@Hd*Ry3Qk7^{FblaXXohsPFS^8RXGFW0mZjf$MX&w{f_M$r2jCej|1k;6k9tv{Vq@ zWy&Xl_(q!YL<`0D+cv%8zKFC#DaxpGwf zg*n&(hT~mWF5FfQNhQpsk1}{#TB0qE(fKbNEbgz30PH zE_{oPTKR0_u)jDiFHlrYY6ekBs)O;LADyUlh4TINp)%#ojqs_wiyhB{^{jmS#HC-B z5RKp~tAV))XiG!WG@v<(UFFLStAl2zn68GzD4esGUm?n~+hT0#4CH3=FjnF|y_$I_ zq`4~xM5lr0tV^YUmj|ge1nIG;UZ^A zbZhl672@*@OyFDa?d3;p?qO$6lHP#aU5e$Ay7?4XerbEu+u!s4ZGWoQ-8k`I&i`+B zr_pZb{r~E_Z~lMJ^7j)n=AUhWxFs@9_mz8|EXkDeMCDj4JT>$-UTztkQBYanHk|Zl zdJ+5MNdlVI)d9aEwXHw7;zz?|wutYycZJtN){9@*S8@VZq0Tcz*JTVKKS~J_uCZFG24P zHK1-h8o>bI30}M%urz^p<2zw8q4$ss#z{Q(lkfq)hZacceLTr#0VEQ3Ej?t+7LD|U zh7CPDnP+;nhn}|KfN!uQ2>(pS%T_~(C4vO`+IP~)mFU5R{b+#-1LO@I6fI+0H03T23B!+7{!xRq>0}|dWJ87xBfTEgL{?|$L4!8Wky&PP~dSUKq< z6It#yDWJ)V*x#A@suMlHYI^s}v$66YX$S_-%{o~%&*~QVx zmy^Q|9kb2IeqIho4HHK``uM?yI2ylzli+uNOlgVYXHfnd#g!qcX(H+2jp7zZW)m~$ z2!G5hnYh^maB}f9{QNt>l{)mf#slvN=%p{P=JzI{Kh2+afxMru5lOfo)Rl*?(4(Z9 z^@H-UQo*;WOSdaL8I_KdU$b{IuxJcT*LaZgB}P}g_W)dm79PN_xJypR)+ga9d$OQm zgvdtYy77|PcA5G)Z{?>(6B}F{U9c^z=$bFztdB!dN>o|R*;g7q-uJ@ZYxl9)}7_Ehs;U-86O4E_h5cieR5n*03{^g$<2HT#Vf&wl0b zf0+5_he$m8^lUR<1hI|+VO^)SBf~zu|73iiy|XGxm=~NMhBA&$222RH3v3F~LIBdU zDIGr%IZ#sdRWgRk;r~$6uk@Y;2%w=cXsb^)2?!;9@tI$>3NU5dvkm+8PC0qYp;|lN zOiPC`r4&Ea?%!VVRF@3=yI$VYz3J@DPkA3xYzUp73uijxfY3CSyr|);5{_MUGMI_8 z;Mo&&h&k(b*2cb931(YUGDxwsW8G)zj0fz^*9EKc#ZD(<^epndL+)xY8@OZ%e?{Q# z<)J<13zFwG`YzzzY1Yxdq@~DM!TQB_aQ|KDDad-`!Y1_sLQq}%ojV!L$jSCz8OS?% zko%qf>6wo~oIyI8m8Z4W5^C7fHsbeubu`p%IcB$5sG!TkXLVVTV2frm+M3ehU!j#V?j)R@67>EHvk9vydk$A_K4 z@loNc&kBD_!ygd>Mx!_?g?~0k%-5d@kUAz(YV9<)8wbsLy;TRUh5v0g+X^-hC)bNd zP7t|LW#9l#Dcw*;>dPeZT!1o4hWxxNLJD_A2-c6p9h!R@{!HYwIEg%rg^!cxK*Y|@ zdLUxwK(D2|p8_teD7TuHb!M1paYWCL2C2Y$(SL~W(#Oa^Iauji1>dUgic;i~@gtBC zT@V<_qyE=P;LYe_-fXM~Bu%|H4jnfQMnUKg{LmlY05d)pMeE?!z1S)r2W(c#7JWYN zyh%x)I|3r7vq*fpB=GE`l_vt26)BQFq3boqwp1)`;t8y5UCr?a+CXec%#%qpnb)ZSY`BYY;L7 z0h6y@Bwftt8VSj5A#lq3c~;O44A2ta=iuQFM`m5DIKg?Aowx*Lxv@R(UZ&DqzSPaf(oPav+wO{ zMuIF{D{F1l`LQ-pfE)t62QvZTkF{btdQi_RI7n_|=Do65jd^1Z6X==<1}!NWA%43~ zn%>2UUFIuxGe67OfoDj%ydK~+rI>mT<0><6{1A5T9XZMO?RuT3sH-@oxeDf<>Su zaQtehSsLoiT$_)fD_0qipL!Wk=KVicgU9v5oib(@vy#q=*(+IiPS~}O#8FnxoX8;Ywt!Z(fN&p^t7tt4 zWWSU^9;vmQ7c?KGjtZ*Kn607?n<}iCub_@83$>Curcg!6DmM=eXQJ#X!kS7@8aGo2 z%Oa?C7696%WQGn%kBR{kMP?-%y9w!|R5oSpSCd9K>r@mNf-$)k3$ZBRQQ|3EO%)Ol zCxn7LS!9#5GFQ!xb0V0z;Maq|R3b?j6R$}+<2Hvh7EU9vA7SfzQ69D?`=K-#;lI){ z2rFzRQxQD(!lTa@LTDUd3ALRj2Z@>ap(aQeH<@YG zUY4GMU)U#J25ZV%yHdwr$(CZDV4q zlfL=C?{4kAcXzA$!>R6%-Bo?gbDrmS7(w|5C?=}#@d=FKhthxcnIC#8D|1ZoswcIo zL9m2BK`@D&puP7WF~L|m%JZ#Qbvo4*wDk1vJe6#{t>Rcm4U9P>^U7eD5l|(e)e8+=qy}!PTi>{)0)ZDsnsq3g<+?X; z(&*A<1O(+G%|!tyK4wW1f~&s)r;`$E*jlB9cNp|KpAbDeSUaJg-TJp~hBiH=QDxuX zlarR*Z_jzY$UylUK!yhsBMr3jfgnVEfPj2+kNmKyAIO2@)zvzqx^X|aJ!GHCP68nenyw;|K zorANJ=kh`QxqhYVl*qQB`7`$ta+L-p*zQj0$S4HzxbH3K{0 zKXAYx65UWNI+X7YO<%lTE6-!)_3oTtF+~*JBklU2sy{DTZ(0(@+(5{yK-3Sm3|b9M!|MzyN?e{mddW}L6*ke=N4s> z!w8p!N7L>~o)9~s4h#lK@Mpj%0#!C`SJu`Lk-}oG*D>f4iTFilNG0_d!TtA*-ecFSTaNHJC%HweZEG7PXMs<3 zp20hB$XTnyZztikm71dYnjt}A`G}R6>WequlDkfpqCl*^3WPTNJoOE^|D*z?7U!cz8PL9A#wKzT5n=XYDbZ>VS;ZTV|Lh^6Z@lp@e=1g zPU;;zd)jGmJ8&UKlP&9Ndl|WMgWp&93Pr)M$KMR}0`0tj(9oY<-$1?_M(K0RGGszn zNFrKO!t1#+%Ij+o&Yp$w=Tm|o3xOuX>hn(?ZdzbjYOpGMItR8!PiNLaFV!31avD9o zf@2QMaq8|D-b5h^b*+303xNSw?T=zTR0{HmUgA1TS#m zv&0AJgywE^cpo)?sC@oy^6MPq7oBOm#H|$Sv7O7Xx~Gg6`}X zovHxvyJ;Bc@G`@iDFd-_P(dA_5?B- zSRY(besdSp3>n}MUDz_7j3UedOnw}`p0Tf5wg{h`l|19U98FoD8r_rOWYps)^=*`@ z?(&FirvF+Xf^#I2McGsv&L+wJ_yXF1DQ5bq2=}BL@@Q<|4J?0M<`cDcLnGkqTUK|5U5UB;G752M6q zrg|VT9Dyz@KIsdJkQ^^Cri?Hc^D?^0Im;Af(?k}+QyjwtWm?_);5J`q7 zl^_0Xa#zZ}Qdu>nR@4gVA=9g9R|PsyNHwaQHxUDg7=m+j!TaQ}Oq&S0XyhG5vGC?@ zHBb>*2kSLYcBNw2A3b_T}>>Xw{^O-2X+78kvN+>Pbjx|f;* zQ%k;M9m`3%VO}-9D_+Y@GeprdpQr_lf_DqG6CW7(Ee>}PhDIbS#DN5|8s@zVY@V#d z12lQX>lKg5pF%#xbJe;4z1J{Si-lV|WSwWqJrAVzwN>j?H$Kk%e2Qwctb85SXu{CR zYP3*1G2X_7z7?|it@@vh)wSRBfI+oh?auvUaDt0$VLFtLUca^#F#s;J9_5w*0aUs#OsKlf5??DeyVCe1Ay*)5NE^o^6#VOf={bVjyHe(ow_9 z@Ph1{X{D6n-hDTB6*O1+0zv<}SG%t*M81^nT)if~Jvom_Ty_PbVyn_pvHtkDQUe?C z=>iWC+!y}dj+rey@PZ*T>QexoI#YX6Pej>0N}Sl8&1=-7!U*Bww_r(J zoP{dxVc)5U_}Cbd|K!sF9y0mNRS%X(wxaoBO_V4jL>MHP$+_n-P}idcI&!oMrp{2h zq~-dvu&XACm)?3MVeC>frbUCjQ3<=U_oXbs)XYh}`*+~k_^l2lNf*tUXOA?WhmKg# zWgye5`Qvh7NpPMt?O%oRF6WbUD3bsMv-lx7WRhix zPZjIY#WK?5<=+Nh0^#&KG=6UpAF_@Rk8ic&B98IefyqQ%NX$Zt>GU^8NwYdai_13z z@o4)brh;A{-?(o)}`tPIEO2&$bRdpd!*OZSY01U_{M zGN9h-(ae}!RVQ6LU?yHn>JMpgX1kQ^pK+w?EWT&gHL3+rOykaX6Pb9^dP}mf;_$!E zyA#4+@Hc1Ik14^T&VU^X!o=eDCWL9XmFb_4`+50?Z`Pz-?w5Qv9q{1~D@0Jd>&8gW zTQ!dNx;s~kzlZ2y%h9>4>Xp^CT0>#-Z?qlwbXgIJb`#0zb_q1;(QjbJe$S_W%m^c? zQ*Lr5PTmHGk1nXs8p$nO>1kv83uRJqbWiU6G62JrS++8b^lgwy@%kNIdeND(49M#T zlVOuq9O-O3;zoEEaUlT(mWftg zUrX-bvm9fmHi;RzjdrE$SN|+KbSi4hmJO{~Ef+xwn0%u;qX}hSaG%A8+xznn^z=^QilE5#)TD_;KWi+__fqJr8n*f;vQa1G9b0mj>X|00oM zk^c*h+@!rku?4MthRH+UN9Zpz$Rb^x3_(aY+9RE>o~0QH_WM<0UCzi_`G<(VV7^%f#CU~qI zMBw)#hMDb(dWWVv#q@*_EiyBn5Y9E+_3olv_E>E9np5hkZKG!p-iBWOkFuW z(+pb9DNc2P3Mo`s0drs{5A#*nt0&_H;;Kj@WeYB-W?yE11-lL3>zrf9#px^a#n7#t z8zxVYW|xQjI12_$M1v&U#6n=a+#vN}(R)<|45saxWT0=2LlFJ!rey5`=dLuj+a$tL`lfNpmXB23Rd7Du;%V{Zx_>t&83Tj%FEx9V>CbR0%SgxDKMxuM(I1k(?DY$SV3TttJ_vZ@^=M>u>h{2FgN8@` zi5oQoUBYn~TvROF!~;AXcc7tV>-18^&Q&rs`yD$-mn?XzO|oK@_V+A1igUL;BQZrY3CRF$Z{b_w;`O|p zBMqxNxLIBN1u|1dIgPoIr03Hu3)cpT^p32ULt1yOAo8Q!I8?>gdlj;|JnlPF2`j-X zUS@nsc4z{~$0OQ>n&S~{nbM{Sgb zQ-6$TBxA_f-E-+31DXI(_v|S{ncP2B- zB{}y77bG=3%BoB~bM7nh$tl5%}Wp}TM|4jhdXZu zJjJj{p||E2!bn7GXD+T87e~s$_MQ4^=5G9I)E?PPbD|bA#|IPzQR#z6WH}8qEDv@? z=fCLXF>tv`tKoQyXc}qZqMAZaXyCgd+Go`d6;(noi$cw6;H;A_Rg?1YZer2A;XuxE zC0_WPZYi*TJ?r&z$ifcgovY)w-zq4F7OxkgoVFbj{& zc-iaIu9I`Sr-H<@biqBahln6$w?8eS9kkS4z$k+bqllBOu{u!Wrh<|p5m$!-!9J|wv&uB#hH5Mlog{nM@l5YD`qYAOI=xXjslx)Trf1|!3ty38 zDCAw;>TGGICiPHt95@+=w%HPqDIady>{sVGXmQj~VqW*A!X)0QJwbxPJMxcfNNC7* z#!*zpzbqh)#3iY@Z(+4_#N>}OG>R;gMGweCyr7cN{2k)g^KfN0o$2|gj&IwX5#KFuDJdUqwXe( z+aP2G^#1|6&gE`CR4a6mZ*i&jFJWMi_Bj1NX6`|^N#sKZ*rl+WX=|ArSRRkOX$6XZCvF39FzTe?0?uV64Gjd=!SWGl`qpMT+;h-yQ&m= zsuNpZr&^f>gK~5Ha4Axm$gnkDB6Mm<`7M8&ny2WHfo!j&@MI@=PJ?oAI2IG}kxDXf z-ywFABAqkBa%QfRfU!j2)j}D@3Zp?d5O%}VTBx>tkbSg=^|?5|en;oA?)@8a(G~c@ z8cPx=^o);1r&q!(jFLtKW~2MtG$fE%vl)t!L=1)lN3}p?KE00*_UmErkin72L*fK_ z`}9P!qX~#x`i}^}XsU0)(=aXI4@D@Ul_(gq8XRSSF!F-dF1lAu*a2~38PZKz4@Mv)ry8*e+7lOXJ zWfMWU{&sammHh1w` zLA~DQN_yF#mYPk`hhV!cWiL5BdsN2k$+n+q_j!1YJq=@t<5|h&v4DA(`s+ue8DK)* z#-=3!Ptxmi-92z?%Z1DqXx+SS`?PcN@*ci+j|5lqd=$I)DCTk+l&cEnkw}Kj4zo$P zpGWh%xI`?==CT9>h%uv?vyT@1Q9P_KXYMe(W!|*m<+!@G5sF7zmW>7~`9){m#Gzw@ zTi?yu*3Ea{$2p!Rk+E33^rd zxf$*>>}M6=;=GiwBl5v@yG8`bT6tjxfpPniU9Kr?(H(rx^qmR53}SXnNCAG~c`b?Y zo9*rG9p*V7EAHFx#y`3aIZpf`dJ>UTWguk4%Lu9{e)S&mPgV7s@9{*k-#nV|O=D?K zho7I_J`2C)%mTGMqmJRufK^Yr4ZFZ4tE+#s@LQ?}Ushl)ut3)KP1qIfD)Y}Rum7L# z_NsDQ^u^aL&3Nw{&MY&acMHkG27i? z*R+GE0O+nc0n*l^D~<7^bN9K|ov7n&U?r~sJEEj~ zrVJ|8CT!NJ$G-Nv7M)19Y&=iexeMjC!5x89dV2@VCf+d>jEA-eVH>!NBHP`PdHEN^_=J$O4@Vb2hf#_Rnp6&<4{nohjAbUkdlPK1ye+=d$2hrfeY=g`XOI?_ z@~8JUjjX|o$PnolnONX>Xz5(d%0;_f=0B)#Fcu>uIPzn$KS;2LU4?pMO(9KZ#!Q7? zg(S-v+n)Q8+J|-F-Wp*fO~b|=^1Pg#em?Xu)ny;QIAa=jJ6J$Zygv17v!hC_R|Yrf zg+A}HTdusojg<942xGZwvv*Wd%8ZvsHu-44?DYpaC1pZ}eB}wOeUHs3UHL@>{vR$` zn9mtUuBnis0{X~V495G%LHD`!LUoRm<8o=r0}h$M3V9Lf@7t4lQVg+9J1Ix zEc+ie0p}U9hE}I@*FG`Um`8T(gi&bOO;zyEp>J%&*SKg9+mZ8F>`Qs_cIYH!V%+JC*;jh?!rzP;SD*@DP8!jB&-7iJuNn+rutvp93(6Pq3VX7dG|?+3*6^(GNE~7sn83>^>vx(n9g=Xf zWb#3u14zV=oE`n_{c{}&K4->wtkkQ2KZfnjH;`+I?hPt;QA0Yrag;*)D1^*t5K@bI zX8eFZp}%EXU>+)Hr4rwH%;vwXeTLBNU(OGE?OHdg*UOA5ADaE@g=!{M6Q@SYm%M6H zi*AE49;eUilt04U@_+^#c1Bf+07+fa`^G4zLh`~-&w3pwOF_C?cF%63Y%i^r=<#B_ z+a|02o3}Pv#6lK%!B#z6^iJd*o^UBNq4Jj#KgP)WTUlqCsD-Fvp3IKCOIZ<0~N?aN)v~ssEnm7Lmduz&iE313R{lRcHM7qsyoc z9|pxZhHIqAhHU!t_jEp#eVO4MT4|`B4>p2@;sSH`8vsM=V?yZ9)nJe%*(9FdI3$Ns zlWyOap5#K=-k`=YirLU-7fR_s$K_L7&c#qtB1~E3&9-a>^tBGh-(TvI+%o45Y}M(8 z9OeQhFFUn_bmgDy;fibFCQf|C2c)~G4FVv+k&8tz&J<2gS*|}tV9-QI)CY$iu31e*`0?W9R%_m7W{Aa%s>Z{)0%0W2`KL>9842 z#Kj|5dsnwPJ(Rf@>5K@)isN89&F*Qdt;+QWPcFD!J@ri8Qk%wCzG3Q4kHwZ}pU9L) z^TC#;P2*{wwwrYLAjt;t_~c^2^;~ugrp}(*hC>*6H|FYoqot@pWlM!2{ClGQ%5lcswOG_fk8U8@&s3j6&T@WX0x>lY3Zi5XSR&V$E^D7 zhFk})5ah*^=@9x%znS%I1_V4T$^F_bgDv3t>-N73gUft4R*yV@z4XQuzz(Qp*Szdn z)&e{`0+KMAxXxVso?_^PL2a5rXb@)(V_a=A>M3KUVV{|md`-sapIe@ruWzH^Jd!4yIK8-L1o)qD&TFX{^D3iz071}4xQJF1Hpx}vx}9X>Xe zXR97a<^AQKfZ${GKTJ4`7a!iIyM~qVL3%gU41Pw~meIjEV|fQJgy7a`jpo~p<1zk2 z6XoLBxzP%4LvUB0;}eTF&Xke_IwV+Org~N#Y`7i?r3tEbMxR~RvQ9}vEJ~7%)-%y@U8s@xJ7zyL zD9-J|f; zJ&)}?SUbN=H7UO*uVGZ`Hs=jVk(t-lEZv}~L>YvkWkV+EdZPZIIXm*Ns)|9Vl?2W- z6z90%;5V7NWb33i?cDR(lSLSWES9J(KeU==GB~v4??MgFfA4-Mo_JEiAo#48*ZoG0 zAUKv@q=DDAP*BpF{XL-rYeBa1`zoMKt5Ne$RwAnw9NyL5;umr?^9^GuspcMtg!D@} zlsCnwNHr)=bl^F+V!d2r636PnlzShAvV?7OMs08Vr|APEkbyu>L(`*!w!8}K!0%<7 zCElbGh)-W0F1KNiR)LQ8P##E@Vi?WlH1kbZWY~Nu)@QZd;8+=y)#HBN7M`1qK!eX2 z1;8~id{#<5uvshW*XPXBw$?|?SG@zsIR>ow81C0QE)^AQ$Z~Y&C1zeQ+y;|@j|xVQ zuM9Ldf8O)Q@d7uo6lC6kId6}8z#-hJuQE5Law^&@WhH9!^a(0sl>~#$MCgPt<$2bA zK_5Lxy?B&pXE4rpx~p@K(436q7IgR9#=QCz+vj`F7xV|}RkXGAtN?O=X$HjHmof0< ze2nqloTwTk>Ax-8?Grf#6BS7pH(Z5P<10d+CB@S;rp6oDZmBnc(yU^q`R*6O$+9KuH_wPLMQsW*AXp?U|i1-gD6 zdwm$^W726cAOc`!bMju1~FsoWyhQ^cMeSZB=&C?aSVKZ0s{!#GSh)-0habs*K* zA&=;JB6)#nr9LH}xZyr6@UEe^;LI=HP%J%aklQAx)s)o7D)nt2Jd&Sb3AVO)gYc=` z)B~F_($X?Ni>;{(Goh~7X=MBOHvbfhvs$lT7+>yLV(y*4+u*y8kWoP+AS?ZTbU!!c z6y%XWs0G?IyXui0HAbX%Nq^L=QS&q~a!Y*RnJ=a4FmWEI?;DNuiY2>9eL9TD zGDzy^&&28eifCop$V?}z+1wI=uGH}rJWL|~+spQFYeb%sBL<^YQ&YL=70V$?Lup3D z1yM#5e4bafT+%oM;xd3rM1bK%=;#koH+QUMfxUfu`J(zUv>GHBMd(vqP)T6(wF6>FDfz?irqEhg;2+8Bu}WG zL|bM3ai`%+x{Vn$VyBKh(0Pfv{nMIyoAUx2b$5Lf1hZk%otI{7euU@ZBqZz8WShF4 z=`B_P=fZs^_DY$UG`^nhLKX(uE@<o&08$VH5*pf^}L)%+4jVCzs*(Zjr_yoQ=UeB{_`{ z_Rk$&QB9FZZSobd6I=p2nD(olwrheOMyb;|7VD!X5{nXai4=uc6_Rn7xiOLqB)u9e z!AsJBb_u^L+#)NnO`|_`IoBbTE>d-vZGEdS*2#tGYnhQ}9eqo>C~6}p;kVe6HH=A? zfUDPMpQG^~d-4ujNp|H#0Wt*WaZ9cTb=OG_-+e3vDVt2iJqE|~zecNwy+^f_TW7q( zwZprg-Rs7!$ofcEdL2?L1q3eZ)vV@g#QXxs9<{Qx|95psMvG1ATr#oFr8FY_U&ohiWz?U6`1wL#UR znueBH92gvlcN~CMyQaS&$(G;L**B7e*sF#A)sE(#Q%YACDrh#_OLph8-y{SuF5DOO z;q5W*QT*E)>rAC)uv@>-`&h6zA~4C=tRD}VhQABv(?j2l^x++zXJ*LSL#_8ve4*vk zh%l4nmoIi9QSpILOHZf{EtJg4Nma(58ML|m`9Ft^Ml7n@PqyCMN({K6smY)rX&fjs zAD%9o+yK}aSH7ItJb2>MiLqTk>yDTJok!(Bd)D^umu=X%V6-)%`<(CFDU+Nk#!&QI zz|-d#+<=Do6V6l&d}xQG1zYO+t?wtL@=)R#QiHT0w%7_d!Bm#$E!;Xq&hC4a>Id07ehChr=d!`0X~Ep;@5aBL z7xg8AKv4 z#T94NLVw09j-F&)$={7*mr4(~A zRzZ?i-gpi3&u{{e$rC^p2pm%*8WVa_+3U|9WLTT}V7&!eV-U_n#tj_6T3WI6J(MWC z5XYjZ?FXI`hf}R!fAgnEyZWNL0OQF6XUb598m8-SMf?~Qe@K7aSPdBci|#^EVV(Rt zZY0acI-Pj&8fT+s=wK$KVvN5Ef9vmbK z&u8^N1jfXy&~m+bifld9OGCgUKLnF=ZRGz_H^z^NXT@}4%my)*D`$?AS_vTY_=zWU z1o{Q`MDh@D3)lota6>9Z+T(XBT{Wh?Gfcf$K#_NuR!4rDt1M-asFga?Yp$w)AWp7m`Rm#n*GTgWyUfxrljcf7WCTFCZDoFc4~ zndm9P5UwR`P^eNmBXkCrE_OCP{&iXYiBV~1e7mgIRjbK zqm@`KuMGXr8Ts+8JE4{RJnAoj<;4e!_Vr$~^*9H(H|UZ)m9!d%&5!WC-@{!MBi?@t zads30vV_RyJ&e3&h+|h8kgl#zLZ>Kr&89eW>X4p@NK+mFT4r5XNB(sv4s(sJt;=9G z_!n!6^Xs-dQy#M9-nhfh;0M!V1&4mXVCBsw)77cBXkNRc{pM-o`hrSmg@}s-$b`;J zeDAz?C`;ZuQ?eFvq#nEGuI&z^hB)Rxr|QhnKL&0**K;&jCrYssy3yLy!@@RGnZ#Tgw}z}b zx8vV9h!{vW$djuz93?;yz#y}EZd9UtODtqSW;R`}cAT}b4zhJIsDEappTZxp_SY5c z=F%3{*lS!gT4ST@Xh>A(L6CQ}tr6FIIP0_EMHM5A`h`4s5StUJPjqB}xBO7i%yxO` zaJGENl5n}FaH>|TaOGu$ezNgSqfDjU@YR`gUfI5Gmu1qdbNd)PXkq7ksFvyQxeLAa z^^_@WG!nGu!^s+6v41F671{vxAEu|+RTGx&aCgvb0)Ib5zBp|A4CL(@W+|^?+4y{Y z(=^AOnf*bHvZBW^fAgh3{pTxg@triX8}-lUGtnzu+}d?@*Ju8t*Q<$E5lmCv#VT{Z zk9uzaSC(36-0ttFqP5+>IGdN~5{zQvcb}5xl&9~?WzqF8QpVlC<$7uZovP$8Hrj+9 znx6BlPh z(KEavmkQw`dgWPPANsXg+l7pdc#&9?HyM%EUyh(89bbl2xEQ?Z zwA55PSZc{+S^C48Twdm{pLo;hJ?4i5PnWz}aeOsy(y*2}4&viTBVF-&qhW!VK0e#& zRpM1{dLwytdSdX7j~oMO6a~cxuC;a|iawr_^#>lF-3Qm4# z=Ycz#-6}(#B5)l#o=Nd}sAj$7ut6gurvoxlv{;u^%UDQ6byX2Kr~lhg9dRzbJf-e)6NpH6bI+ubLKe3mq@2 z<>%L;keAoA)|S*Dtw36FsYuUrRy5KmZ+NR{?47UW`x7#q*vtkjXIva(2 zgi*NQrXv4fSKUfHGUdAS+>1&DN`mjiD@hQ0r8^fxLs_^8DDc+F!Oz@G}_#O9{6FU1-IZkjJ^ zA!Gr|G)4e&k6UkApCwk}&d|w5`+RCy zsMy{XVt)TRdJWt_kSFuCJzd|A=Tza@R*spOb%i6cncjRx^yNu$@Yr5-;OrFvTP75f z@pD^E=|$>7?=E)U2!l;BvUZ$4%CxNH$*k+Z<2N+G_Z`S&$_KTL|DDB`QM_<{5KLE{ zP1Fn3aW#3{bzoY5F)6JYCIf;a;X4sH4A(vbqNOyTirqi%caVh`fMR#85DQ`s8V#TB z21+UIGGO^O$?VZ^(e;5~bdf4`rn&e#Z7Ot!rC^oeK|d@!MUp{u`kx_L^}!1EK_O{` z5TqNfjWzP{V?R?Wq^Q@XB(T9B?$m~kXQ&)lI%H*AgRV>&i$^d;%&0h~H&-0DEs2My zulLfLT7eePdqO5MHEGhFsI-d)O6W1O&9%YvNz5?Jl|v|-N;>Gpcg*$RMbxdrex
$g12Z|qgBD~d_asHX-x!XWV?%yb2y({_lTeQ9}CVehV?no z>;r=Vi2b-B$MIQU^c(%A3DV#eu?l}hVjkUb+{Omoa16Pk2h}OX=mn5!7 z1^yP?hMVqY>B-bUp(7VRy;5F8)+Zzj_DMU!7`@mQi zC?C!p*ih4zokm0h9vQD}YBM4*mg zgxEEsRP&PaaTy8gG`J`Zuv)GzfpRvvcIovfltrD+{o&V1nNX^N%M67ej>-;f)Skw> z$B?n6f&nwis0Sj~r>#({bc$zW7)B?&KgD<-n>pW6sHciS9Dy$^VU*8+Mq^scOmJOo zrdwo`g9#8FdlyM;bMI6*5Kk58R&gHho8>l$ZlOPO!r$V59q*XY4x0t0kOz)1)Tip& zGP+f*1G!*cK>Z=!f1KiZiO5sxs)yGC(>kWCR9C$l2XHt30Q#YHSOl zO<*tIVG(Y<1?N2UW)-$mCT_1QgtwoM;`iQ}=KgxO)!ud+TC(?8Ttw_Ayy`@Osf|}d z{2IsfUHhqtnK5a)<7F<6fEj}huZ?jlJbXn35m3q?r}5tNO3+4Ka~TsJvm5=*SWl8; z*|b4poLg_5;q~duK`{r;psHWW8NZM8i2dbfbNw5o{Sm5K(S2_U^o{1P1eTr2rHoP_i`6qVdYWGUif#6+z zd>?HH%mZ(51pYJitGb3WCXbham!_6bjbAGmUEAe>K3|1%OW_!po8g6F8n&Fevfgi@ zQ*Tb=R#bkT4Ij;@%nwip<0|D~Tv}uB8`)BEtnDm<8||YBR#5Gw>eh5{lJyKY{S$aK zlfzo*pDgRZEvxI#ne}}UY!bFrDE7}@KKO**n9iHNqvD`x7hamb$u_L03p)2sMPeyWL*=}HP5#r0&R8GCP`dxG9tdq6T35!dT{zu?JaaYEG zjGA(Y7iGVOT4$-Fd3Xv)@`X1Uwlzaa*lQM+lBZCVlX0NVCRlIxp6J!)Fk0n-`ygJn z8B$Kb*HySK_8uHrorapa@O9^v48Fg)U=NhfY0T|g=H?H_)5(ht&dlR0x`0&|Z_!NE z$j724#Ir>a^C&@9B<}2#7YofgC&v2xc&6K9Qt?fT%JaWVrxFe9ro~w?b;SG3c?I1K zJw2hs)7eGpY+SS+=tEB_C?SZRJGv;wH*Z@1IU;tg9)a6~+ce z^4_t#+fG8!*cMXa`)=f>=Lp|v#BBa{gy(RM4ItGXNu1e}&Z~B6kYU&y;y`;y#?JJ* z{z(Y1y>{f#Q{MF!3&!DEfB$8w@(rL%h)Cy|rF5pm2*YzEZa}p=>&{M%5h^|6uW2;B zus?Ro&3x*PckQ(L#|#=4gU(PN)2kORQlnVOYpxj^_a`8e@;SY69&hIR!NYp~8*%90 z?wCB51(YjMj7C$1)L$$l65$4E|!mrQR1X!#(g!QQ6LO=gTd=7{6KcKn2jUokV z@w>}g*dd=iYu7RsliXiJ`7i}+|LQUxTnO2v+3t1gBS%e_oaN_Aab5cdODiikBI`@L zrHNwR=dxg?mDNw`bMHmkg)Slr!6X*J)+*Q_P#TOM3MW4L+g%JNblu9 zbwcUcK66@N>2Iv``pgpAit(nGm-?|PW=oXhnt91;#s2J4sHvm*(OnP>uOmj;!k6SB z{1s>3Yn=4M$&3cVjhU3x;)s;RfLs~ni7NzHcme)JujEL?XPGY}dup}vYFJHn@(Zlp zs=?8D4dp0_+QLT|I`OCmNOm9$r~@uT6K=>xZ*blZjBg(I9lmurPXi5oojkgxUw3Qt zs3AZoKX1Me78;X~G%xgj+vYMB0s;5t#K|Ojeuo|viE&nMS-{K0*UfhEsPtSr*M0a( zKX#sp1{P!Ovh)>);Z^Pb1+l1t?SlzB>}|ZI?+ypDuNbAT*nCn=VEZ}RDNoI>SU`RI z7LgS)y^UQ@ll(#0{=bDTzALoZe^hYydgEfb9m2dJsRfBtync5d0p17q1|Mi z=t}-s=#m=Y6Ce8eN&a6OUB)ZlY$d~b_r_YXkUgZn3<`Lte{$UR%Y1cVaYpLkdZ}50 zZ&*H?`QU?R8iq(*6}>yd9rqmTZ%v-#@`oNR;c#I5|D`LE;24_gAh+!WDeBBdwEf|) zPaOF!rL6SE7>AlZiZvXmaKdnUJH^J_1NRZ7uszOXba>O+kBes@jT8*9|9_fXBUQ@C zx8(0l;))Own0AEXW3p9r<8)zkt^claxjEW6La#!^Delq|q^EO#J1Ic7-cbR0ZA z3V)XerPf6nF@iiI=tMu-C_5dJ&@LghI)HlcC3Nqft*%fB9@Ne9!-?N*9f*G)y`lwV zhm`SJwf7G*#1~1%j{XWhsCF$kuR-kZQ0fI49C`&t3)8J)rD*rk=%_7WATirRfRB8`+fd9RVXwnlT@GYV*hKD>(u!_40f5HjQyKw z9eSb`3`*RH1IN0+y9_*Ba};~YF6VNWVo|2@QVAhABY!?HimEyO4~t!60@1eL-lJAE z{NVOL5<3)Vpsy9tor$~S_NkjEwrw{aBHl^o5&7atVGJ6JLZ?Pf#6->ltggM|JkUp_ ziw1eYvS!+bo5<%!8?r9nt!F71kg~b@#VywHXH#z_V{Y-l&lLJA)m(oBXww7{q_H(Res633G?b6 zNKbIBMK;Sp(vmPwK)DRK4yp0%S#qPAMI;ni^xqu<~Sx%{DgUTRT8 zCTUHo)Y$#Y&c(`(QX|q?n{c`` zY{Hzkj~|qHd+>Nss;HV$O_O}^U|%ktx)yLgeRXSIuwU637k`^ph*do1q1!`$Bzga| zG)9Wz{j=d6p71q-pvw+r%X)7dTBIN7%iqTEd?5fGt2k^`C_P5az0 z;rD+4)j%r0j+~aydTB|@1*X@STw=ZtzM<)-cqk-V3tFpaSEpg(S_IRiX~Qrt!8XZ!@jB177!%F@N~tZ?##&g$t?Q_y>} zT6HlXC+CS>bv$NR;`HpeH$;S&wA8Rh1?T;slF&*!(G_S&Cjmsgo5B5C8bJB~WU8!5;K}@5 zqUH_o56=N5pMO|(!>~B)Ocvt3Dr2eyw%jmm4^QT1x?a!W$t2X*z8j~j8d|j(dY++4 zQ!{3hJqkbq%0Sy+8TKlVKIa*t)bK;25dAuhPnvJi8^??9PMtJ6q}^z}XmHmWovT6Yj}S+Nn})eUXLLjSVKIC91t&V*Ng~N)>uBsVT?(f#csT^J=m&CKiAN7Id(awb(H;7%BMJTdt-Mkhj^eciRXf=9pb|C zEBy~)R6>Oc{P z^(9z?eL^~|;ud!1jH%e(i%;I3HKt=qV)v|CSs??LIl!heSeVtc%Lh2y!LMuh2T3CX z{Ju7RTPn0VNzY}#)JiPAjrM@t(fa0M@Y+)A(f}i*O_KzEt(Hbh@u4%+Srnk>yJCKoP`(-%T6ytnc@QjV)4CQ?lmcx9 zR$L{$J1W_tQ`8 zq}ySioz!O+Cr3a3t;`d3zlN!Ee_|b#ee37Hl|KkF#uod%$&SwZEa3^xm6DWTd5v7X zjf(Gq2vF$8@|nc}J#xrvDq+o?=KglGzFk+UlXu=93{K^;>qO+ef*YgfM=^PFWWmVi z9zuA~po*V}=cug7Y(}gSg|mEY4LvqqbVZB~9bE`+^j~oBXq`LCfHp>HiKdkFn@uB~ zafF6>emv;)#Mk*4MJ9Cg@L}VaT!B8)k$$7zYCkf>$7>F*8Vq$XjQ#^QAXFqJ&~625 z426orC_*7>DWZQ9#Y>Qr(CdIoO8?oP0~@9cgB|#n*Le6+9(KO;k3QP~pd(A?OP?J% znZr457`x_lH1=ET29L5B_JeC18fb`|Y?AnV@Xhx6O@>TO_u)7H!5%y`tnsiF4Rtyl zd)z18+g)@4MB!Wz&Hn!G+5sTXTux)x9{;IL@cVmfH};m&2J`$UilZ(6J|L&k^(7ve zTJl5M``sRY$}M3zrqkTpZ>}8-a@|IqZeGv;H3y|LUX3fr9$43uHkt>G^_vm7qka3c zz=YWGJ@@WqM}FvEoAUx{+S)-PO;@g;WHv0=!d+hVez3oG<5!Gz*B*Umm+=OM9S8vS z_jc9}09CpTwf)?}0QMv+>yY9^27#XM(W#B5tKGflTJP3D*UT{PsA_C|8$i867|I@U z$+vdfYe#6RwrJY_`lXwboBCfFuKX|XaX%Tu7+^Q?5@(og1%5W%N_{uI+47=vtKQzu zFo(sO!KNzc_WCEk+OYMoNleRLd+j*!jyl-wA0Osc545bqz$+{CN~}A*C)D~YzkZ+) z_Y$9%nTtQT_|M8l@ECup#D?5PMt{D@WJoF=9X;jeE9`s9f))UaGTk+B%lYyjN9QlG zxo8778Z&R3rCuIY05uGNDC5J?Lv%Uj$znNZJb;dQ4-p8TE}PKJl*JsLliFui>5^wuYyO0X5 zTNOSkmMWZchlo;PQT_-j)db*pU=F(>ch)LLE~!vU-_3$L%HE@0cQ82RKV)V*aYTr} z)OjIZ?5dyVef9HDTTtLiovy&|zC162-vfE}1;5Y!)IX2Z&#x!vUoP|w>#C!VzOi3v zB!u6;9si|%pI#j50UF;s@0g?dO@ICHxTl{wvfwz<0QR-T6|Q>OR*(P>C(PZr-b{nkU`4Daj4~KCy46bd*T$K5)!}to&;nHmuldmJrgMoMj z<|AXPgvns4q9xC%=7Fox)b1g{*a09oF;)1jTKs_fZF`>|@*JzRTmnYS|2Oi-TjOwY z9Yjy#13KOR6TNuy`TusB^|$)(&+_+{|L-mT-&_8_xBP!^`TySX|Gnk^`;qehHR>y^ z-+smgRE9Bc0T64K1j<4#K8zz-?H_bGbE%m6zBHMD%zGw9>JK$0^ol$XSC0ElL^I$+ zv(z}QnMGk*`v*)%&CAT~n=g)qy#f|^l7mV_e2+-j*GTB&X6j;iQ#A@wdd8@xGZQX$ z>aPM92PQ#`$G$fm7bAM+`^3dTUE`v6bUqys@W8xSXtZAr3o{uin%k|$!MqH@yr##5 zQ3HM{vj+SY0mcUN1yI)>9ZiEIE?|QUqDQd7ADR~#JY4)W;G?1OF%XjVBN&m7&4v?R z=*Q&C{95xSel6DLY$$taQ_EWZZGWn_vHr(F^QG#4z{TE9q5enft^UWe{Jq)#-t2#G z_P;m#-<$pK&Hndh|ND{be`{I)1H{nUPk(!Nb^_Xkmf?YT2x=rpN{qzcdq;g7^%;8N zcrmOHoTLXo19mwcV*)@D1oR8+2w4>^A9U2q98n=L5c5EP8`G5c_O1+MFYOgeGvj>5 zlBE$(3`TtB0z}IgcRoU|1JegP!NkQR3ZBSpu{}ri5%9KDNDJMv-GA=eII}5|KUusnCI4e8aI$f{6CtY`{ne%HvGNmf6wvv zrvJU^e{cHVoBsEv|GnvdZ~EVlr2jPjQ2S-=RP0z@a<(I$_PYw^^)-ZstM*T-!d^;VH?f)sP%dsApeJh6~J z(yPNzn-x$raFT2Pv6Yq4Lvt^z^}x@}{yD`}X4t4Jal<3+Oe1-TvyP6hLvJ0UL59`~P$Nt-Ah4ix=)dl3J+n$Byy- zycItxMx?{{$IfLG{A=Q$9LYd(ey#15<(bKNZi$QD(Qyyt0OQ65#*f1$Hf!<;t0d*z z$&gw)E`IDBh5?NEcWxz8l|poh1i`nj@TW`9=B;4(eR-P0jin(9n9Cp_pBH8Rq$tZ` zNba|Ol*941oD9xxzH~|E78(j^_a)Z~VmHX2<*_k`8Ku49`0q zcH1(l)$&HlmIi~tQw8Xk8A2_(utdJ^rQ$4u`kfW!4I@*0ZkVc;3v@uau{wurocKdO zNu*OuDr}Qsv=iXB2#XP-m;@8UuWh}%#{|4ftHQPTBbIle>?00xB_>& z=)kgLZ|kvlmD`&+blqBsP*HNz=m6Krk|Q&wHdiH=_HR+sW9pC$iqmOi-sd^Ua>G>R z4E;3oJ*yxwS3}9OLipvS3?Bo|!`VFU?;z;mjzH1?1;`;kjt9uC8D1uhIb8CB9Lf-0#J=^qHaG zvnIj|!)PkPD3@$HvO1GjqOSR5yCz=ov$@d!iYW0wI24z8S+eUi*xxa9Y8x z=fi+_>9r%l2%;q;vAvmDq>HpVc1C)46K+U~exHU&o25bkzGX6J?G?|4p>zG(r{%yD z+Q_c{1)wrZk0O*fE9p&qCyNIFk;ZPoX%y89p2!f1mAf2*$`BAh-m_-vtlDsOMR}*A zM-Gw_qQ7%ECgQt&O3RIT#E%4^5ohSAh@qy8^A$}3k4f( z!AfyuA!VQq#$Y>m;HfaXG#ggMDueWhs$iQ{UCvMX%&3jMrj$ABtQy&C0kCsFyUwew z#u)>G9;rtjnR}Y&+#Fd5Ra0u3&}N)Ei4gw3mg$u>iYHNa=8VlbQ|%0Hl$GNlW8_Lo z5l}c?tG;&?q_d2?nIHLYzTPI(sIj_WS+S278+I!{>=Rvyyg2;8y!ku;{W^0^K?hlZwiKsj&mw7O)ttRu$#xWdnR^UyY~u6ZxNZ+RfGr z<$r3lTCLqe{-@@f|If4h{X}^eaQfL+F`ofvr+;p@>j%s@Y)hZQrSj?HgxKKX=mNk5 zh1!Hhq=KmZROWu%#+AGktWA z4bB)_>}Gz}&(yZJNIp+VNKfT-yYTd>s`Z^|lXuv8{;8`N+4O;ZAJf)#g5=Su+mTWS z2J5D|w+k~!51F`PWBFLwOkyAN&Ss;I&y^!@K_F&>;rp2MQ;b!9biGO$*T6#0zm2)k z6T2wRyn(MoJxZa`mD7(M%{%=E^UwY)(98kMk3Vd zf#$z0%|@OdX3isXPtUB|yibQY^J?V6xd z#SWAsbnEx#OgR0hlPgN}mUp;*Z04n{>+AoA`Cq-rdBOU>%|>glnXmubfZK29KhN;@ zmjCrF|La@+*SGwyZ~0%}^1r_2fBm8Izczoo`o9fjU;vqyLJX7wbadV^gHF)-oU&2? zSA+k)r>vAp&juWH6JXx=HWow#&X{h|+`@yxq4@jFD5yM70oZfkC%;R0HDV&nuH)Q- zkcAOl93z}XrfF~;QL=;Cl2Y!yMg;9^1kn})@Z0zaL@R85*_{uYxxw@h%u2?8dnxb zmLv#^ghAgkQW_d5xs>FH6H177biM>q)+deW97t)J>+8ORCy$)_U^E%YRTc|g{2lWg zeF6l8as$GvcR0L0I?wIlbOS~4VQ@VWOvb1B()`iym0)%>a(X^WZ?I5)SKE+AP$F-N zO^Sv7F#9p+#}t8YK+eC3L(fJ|CYy=fT>BZRwj}-}sEXot_1*97O+!q^fJ8K2*l4;D zt%wdR><0=TQV`~sj{LjDQ1v?ZE<<+i#z}TQ8QGYkVvNy*qLNmKtk+3bh8#`DynZRl z=w#P_!<>n^16xJwA>=u>ZX-kDiOebjci-_kY#D+zA*aWqA~DQ8o@6;D&l`!w!&Auv z@_0#K*!Y9-`}UqCI>ed(8RYE4#H_i%+u0S|U@qK2E4LGV&#b+@UxpP^+%$3WUCWm3 zgFGq@<0$GTaTdF=VG6-kpPRqO;^xm(h4>>Ioc2HBnM1#S`W@5EZoQ0V&iw2qu2@SW zy7x0cde%KX{)qU2|JUiCemXk&^|(LSIy-7}#9`a7{eGu=^6C&cbIk}thv8OqUYH{% zQqdh-!?Wu|6n(#h(ObJaF=Ya_+mr#+Zk8&z-71ZN`Ddj$%x;&ZK+HB#FvM!onyr09 zsVv12)0Ss9m+XSYFVOrq?r$!iPuO+A0sNqJenn|1la`X8w?x^kYb{Kq5yUpoye{-4+XfKBAh z|LZyae)@@};t&_ll~L~h;(Kx8eCqk$S0^hh9Z?G8{9U-%iX+%cmUxA+5FUyk~`k57K)eu8JywQs$~wrK7NIJQ9G`S$!$_PZ0DImM$VXAYfx^Z;w2|XDR~OS zcaio;<9!T#bd90!z-Ee>Xo3{|n)onMw#6oaCm9yoaN{(H{B3@B6W=QR+d@a7ap*iy zil7a61qOaS_LDS@W_bBJ@Xoo}06rN7sqqTFWr|FDjomh;58A$m_iQmQt$4Oc&stj$ zFVR9cpo2i^XCxv;NF%s&W=R}YJ-uT; zeM}B3Ku8xVOHtL3+HHCj2y%?iBOQADr~_p`(AI%&<$-JGh)5V$h36z*x|Jo4`>j|y z=ckoPBqbE6s|1l#5Uv%zPFmY_$!v0eD;*EfpsdGm`Nz?pH2?oaugnVat_d=HB>TeiFslri^wlqw$(iQ^ zud!PJo}w|dq`{IK!H)6&W0Y(w;H+s@fc8v)x_92GlV*KiWp5kE{^cYBXUfzJ@-i-F zu0LeI*CNz)CTrcIw6~ud_n{LVlCNUv$nS21e>Cm6fmQv7!ypoW7bx@D06NG4bOzGl zED))=AP*1!ehebb3yfdce>wAtuj9zf9czu9Xeh)-USgn){6L4p$45FGzQjSnRwhKgyqHb5hA$34pKh z$UjPgTZMPJNLUCR^ApM&ctWdmB(_qapXFg&R77g+6sDk0St;!~;F&6cX0n?p`9f#0 zIR7`u>kCx zy`*Jx z$t3UZpB#N82jl30;u_}BOe}%2uj}T@`~(b}rjAypPnOxc=TD#v0PPmpzp^%s99V(e zY)7!V@d(ypfIj*Ze-vf?z?%o^md%6d1C(Caz<~U&D8x21J{AOaRx)kmCX7-|6(^1H z_LYoO&V(|Yj;#VN^9UOaOVlIOnuS1M<#2TK#?=XPVxeR)RYD%dd`0?#YtBPD7XI;2 z61T)&pzJZ&gd89VXrH^LvPqMqv^d?fslCnopb5*5j+*-p(X_wBuv-WDVb3^XV1&SK ztsUgqbQz1GsuV1N3q!|E&)fhRMvSPV^#J}Syv4vKUXtj zxq@r6kI<5hPB_8k1{q00EROQoDAZD|cwW6cDute07~?9ckIX*M-QqxTzpfZ84*)M4 z@!V?*cfwgEje1Taobx??&8S32nT6v2SRtyWddW_Jl9t>|nQS>ZWdv zq(`E%*xJ1#+<9f0s|FI2WQcPS`T5!5+$n+c;tI2F5J=p1r^_Sb zm++vOIYYF8N&Hp-zG_JEG(~<=9Hz_;X}bhc9eJ3WQc{8iko>F2yTmu2gHwA!>T(Z$ z!uRZ8T2dQv;8SsEDUM2X$DA_*xG5%);UM1N8Bl>y5l5~+4M$G=p-6>vgBg%=9AS3A zEn{wvIz8ikqECQzeBL20B<||{SY|kS>e-zcBuh*>1SKjN`1e`fxrT1A3{4FRc^hQW z#l1E|84!L8(|hSIlTYzIQ-bfgAhAUxV1q$CNh=|e4={KALEEbrWN@o$Fh)9{!$3Io)>aki$Qt)<*TmBaAM48K_zK6eH-He%I42MjNX>!L!|b&@PXUwTvA~TEA;QnGbOto8R_o9FDC(1~u8oV(3-M0^lWsdpd$W0!|GD)v1GE+tsWMv7-H@9sgy))`a=Z!-B0ZH|WBs!D(AVlYZe&>v-_e6~Xj1_v%Ok83?H-MWoUJ(5TrjQX?i+apSl)sso62oldZv|qB=BIN z{{#0J+=(ZlKl^-JlKN#7+@E;eI1Z~B0o8kBuxUW9OjIpw4~>L;N`|xTr^tB zp{|80&@Uh3tfOdHBC*K1@Cd)Q(l4M>p;)^--Gae+=NLxOFXvJI!JT#V=S>_)#~w9X z3l6JU#E$fDcw}@olH^O~Ahmms&D6B>j9k%{vx;JdhHHr3hTB{$+w*_}14#TVz%&}( zM5NFbt_pC2N@c(^GomN&g39|Tp&#uS!Lg&C#XAM^rACbr%gI7Op>}WpMC>FzbKjn0 zvo0ElK-!`%=i9XPNM_x=jXk+w>@M4^nT3=JE4hIn#p+MGoqq^rVh+R5q}`!m_bc3xwlg(11^bgNVcCWodo0=v zqI#<^Hf+A-c$wjE`%}9frCWFY)zc+_nV$cp-rm`5VLh}|N4>Yf7Khnej|1k;6iu+m12|%!n+h_Sj0C{9L09o`|Xw14?Fj> z)OBDB{=>ggspfKNt7c6>yRWPZk?6JO6F#r-G{N?GvaHl!2ZJs<3ShBipD=#ct}DK@ zbCXU=Spw~cGTTXcPEVDmMuG>u$WD}t2=8AqXWp}mJ!=h9>x}PNl=2=7JDhFmmk(eD zPk3ZiuryR;b@g0H9hbuvi>(kvHQ+T^o((hiv1xOy&20!;^^=c-_0j_SivKYGC+4IX zrZ1-dH5=`M{*I)HEm*~=>?3BUVHIj3`Y z1a7`pM5D%CN7kT1IWsbGg$Ltyx@m3w#C+4nMpZUFl=8XcIAZP5wOd5j8OMae7*DG} zwO>M#PSulQQ%@g990;Hv9f4KigX}X0+7*8buJhK18Zr{%D|GVwx+y4I? ze?O5|Amu$EAr~}F)=`!+p`d}Lc9Uh}pFe%NySv*CoXFXZlj~2q>}~q#I!&E%u$A&I zC?R)RrM+dvT^RSicB_dS2(m?!E8fIO@NeG}gEKbb&UuIZ+V5|5x+hFO$HWkrtk+iM z6F=hyOxYSN-g(~dbEI*&IL|cnt7)(`rokrE9L@aEO)x+|L80=v+B?A``(U999Y`|0 zcthdE6++ll5MqJ6`GLZ>W`zTveSGLIy~x)1Z~N210ft;YVfhE*^SkwO{@-XcnoVH# z!2frF@9*ukcaZ-#n)NsS{~Uidi#7z-pJ~Ne4Jae@Cs!oG+jr8*mFU5R{v^M517H(? z-D^Ml`#XFYHjq#1WA8S_JzVPS|N8qocploM-Lth_|HW}b_Rfjo=wTGsK->DrqL{&B z>APP#i9h|IlKlS6n)ODz4!c96-P~`r|KBn@R~xvZ^!@@YeqbGpkm(!_xIAvW zp*hzE`_TJx2wLb)yV3l>K2X94VZHlM?pVb?B>lS_;Hqh<_XvHIZu`QN29Vd7U-%*jG>mWq zhrNPcfbQ&;%hWdwlyd`Dln3hY1Ho0!badcAmP2Wqlz6Wh7+4O!yV`fp8B;2(gVo%J7J0*G@t;!XJNr`)g#p&44N)4}DTblK%aRXEjR}`(jr}^o z^OJ=JG~1-LR9Y&}%}Rlhg)zQo18^Ay z|C;!Zf}xhjgq}ZJ5||oWl5#mP(%}<*xG{XG90E=^S>kj5Hy`A7F+jeqeF;|NI69E0fbthwo2OGj9!szDb64jGf%T43z zWG+6X?b0cQ%{ZH~F-+8ymau%=@t|TxbJ)dgcoq&FK1T5zvo&VmHjj8~Rx0-xlR9fm zc2>3XrE>OZ?u&$GktHtR0%2+r&aq!}(UeYijm;-_N!=ED3`7m|M;I*5IY_$IQ$GK~ z1{}<{%TH&yC<9E{)8}QmK#q&8)^cuJTT~swf29@)H1~)igt6!F)f!uvAy_UxjRT6< zZSs*w0he2mL{Oec{)QIY8k5E)ZWI2feBc_D!Z2(NCOuO>qL&(b;I5Dn%+L{@cP<2j za8pu=yH(8(?h^FHchIsS0jaUsJg*8eeM~{Lbih4ASJG2>_Xu`^Dn_k7Q!H(~1dwu$ zqLeDE^6KLSpR(HQuvrdfylN{qv5BC5Y=niZnonTYKu_Gl^CE=FZanb{O~0djn@W#0 z`%dH$zp$H+a!zjMz2nHa0If%TYbH2XO13hG5_a$u;e9q4SWtHHb%77!KsqGqj7}7T)LC7i5+ed8dZn z^3JJ$?YIv>Z0Tg=C;1O2aTdFAh=)4pt#98&{typ`X91xdcHrVI=YG-M7>}dpS@kVN zeF`PZEF7_ibsbE~#LcB*4|RDa2OtN_QS7NUsz$w4vaWCw?py<3L!T0a6GXz9o!Kkx z)p~M~zsm)39*Y1an_j=hX>Kt(YO+ourw^5Uq#t$OmYM4-!FXo=)Wd8in z>FvsQk;6^W+--jAB%CfA8v z0R>DUg-p_iKb#;FnWt*jKrjkrjB}m`uJ2RqPS=4|D&8#Rf-n{QqtBpWv<_?yL#9Pp zd_OHJflnj~wA67EY?n?Ns;pYeiC|j9=6&}3YyX}{7K!~w+`w!~N9O|($Y{RuB?w`I zRCl=Cpzd~)Af~Esh@%>NS_M*b_5S%;w<;Q`#^%KhbS4*^R>iROBDsdeFU^kX;nUc) z8%XYSGj*L;`>;(2JT-B7=Aa2mZ3wz49|1KTo3mNRqg1D_;EQ4lO#%{|`I|OoYix{%FyU!D+vNCaN_2o!e%(m}Fc#_wo(Fe>8uQJ;@t( z^I;gkK7jc`)VB|V@r^I|8oo%^sNbtk$lM?D$?{p5jigO1pHbGXiYeeXS2zU#$sh)$ zRD#tdAkh#B>lUtMd&kX!TW5+jXes{Il>99LspIIpYnV6;SkWZ4xGaWUd(l=r_M>wNk^@4PTk^Nk4knx*@Y`{=s0Jdkh+8!{2=3daJ3G zMeaD=jbW#K$gx56yqeEA$Wwn(k6dE8(64nssHKqV$|rh0`Ptg{l?>U*y$ytH-4|~! zDjQTf50!7RwnGQ9CLTwva|bU?g4}t<=o-&z&CTG^11aTDC7nX#Db8ljc>XjZR4eE8 z(mPE{Hrg6n968Ws9uKJN%!`R9m&oMFBbxoIl3a9i3$Wnafwe^7A=`^Fzm5|PX+ z+j>5{EUn!MUep*}Qf)ZVfy0x@Uq)E0E!U~eQ7&7k8rPwuLzGMPyw<TR z8+K%iEPMCf9Mu;m2qy`>SYvAXVU~^D*79*a)x{o59z8_m+5OgkfNkgA{Gyr6UpDpu z8t0m9#1<6+PCDwr8dF0PW0nMJkr6-XwfZ;;u7fPi^;=`%_{Pb#UXI7y24Kz#=!5yT zK2HZ{wAL85?_UKu^!EI*@8mx&r2W9pwUc5_R8V6k9Si`lZ(Ntktuxw=@E~RSnnC}R z70AF+BDxMIB}YSYTduK3R>pkGd!Ze*l~2kf5~&|KnX`m;_sGa=t3a|8sBgpDKKbvJ zVwyPKT$WAfPbA65p51sXev(BO9ku!1Z+HZ0`E*`}%+;Xz%(+bs_Y{8;3>ugh>^`@l za)u1o`Z(nlxWMft27W!mdBnc-OmO4$2;UFP>UImBM*x)Q?}w5e`Uj#k7pF1pf>N91^n7+uK!AM$P<9R1rU+yX zZy8X^=CuqU9(MLLRHn6Nel%AO5sY9ah)Uf^=l)!2%Bylls@3RkW@oBm;= z5&8z*RLf4?t}*L@bN>!HKpS8GA#!bpHgS`eP%Pf157*dTDyq=vPp0tjVH|%8@H2No zgfANoym93J;XGiYzI_(o`W+Y8q(No{FFWz90!qj^>V`mtZbp30A25#1;Fca^|W$1X}4)v*Eb^d*I*A8%~nX1;cw$Q&bU#T6?;q^M+P66qD3` zJ<9}fS?+4OODu?3V^k7x)x<~HSEM*I{uY=gb65%ZfOUU6VZ1drAvGqZ1V3bWLX^aY zHd}Lzr>Vzt*eQ<2{8opjpuQNcwF|g4)0{}1F^ZYP#a;-B! z{b&}DrWC^H0|1x}DYY_cwkEd5)YO)ZY+35l<>s;^j;=da^8VcNfR)3aVz6gh57MHj zj*xwH#njlGqn7&xzM)E^nWbCJK~uLWJVjjP!K`3k#dlNbB&n*i7SoPlo{c$&Em^6i zP2_iaQJVyzpTR>$M?X}7hSP!0o&uc7+(0j%pC*f|fH^?IvbNBh9`j!N&~3F&G3^{D zkdseefiTqAs-aN+0$z3HOhK8ROh1?NvYI;&l}_ZbaPk>5Lq%=*T6Xl=;i5xmK~E3cQC3HhVjhQ zh$0v%YOZ3?Wjx4qyIm1JRk3k8carj(O?Nd`Dn}NJ4$Y0yiuzDuOz>iMkzAX(dCVkD zm_?{tmJE&LFE6NIgszjwdU1&TF;EZgAx4ri&~nD1^B_0_+Cbt5U-KW?AVSiyOrxyb zd=roSE>0R9lG9Z#bq?91xsmjrWB(c+&S*P!u3)~s%iamYWUxU<|KMpNesL6E!FEa; zb4;E(ym>5Mw&)3j>%`9-0(C-JS@JSJ>;)j@*XuHX+ThWO)1Nxg^~51ZDU$2YPR@?) zK8ON-JM1@_FtxP;4K()-_LmFikyL>7LU@wP;$c7sm7kdL+gROOR99z)<^OnkVppRq z%bCypSEVP5{<5<9=q55P2B4;4`sYnWcE(s8P;$bIp0nNrJzItXnpkV?A3S1Afz5kP z9JUHLr1;F95&zXeW!^#|J;EsM$vn8j_XkdLjkY`vbP+4Z_ihL)^490yFkd~1a<}Zj z?RjxEFQ!!X+sG-496LJiFg*LiJB+8+77mKl=0fO7un6oFR#tG&f1msJ+3!v`;U25d zaOT|OwYW0O&2;ZNa1kdFzt-l*$dQDKKW`k2`t}Zo*K{_6AHTt4kT~@8S9`)s5-#ay z&j${WW;&D+Z}_7)^FO0Im?yiEZIOrPJWQm^oNGp=8fD7oq^;)!z(e2)bJD-5VF64} ztovE!1dqTX&aQD@;v@s!DUj&9FvW>)HEP(E*sEIMtC0*uSM4Bz0AHX9O(_EJNhw$D z;3I!5{7)=bf8K^RZfr*XQ1MiLaoecUy?uTt0syc}gK&`ql)0(^0Y+U*2>3&@ z0>Hf&@-Wm^%s?6%1{um;go0cRB@`;+eJUJ+$3nMwBp0}}-@NDGfnYY~=cT-*2TOu) zN2?(@yV{qWCD5#b`;IiHaOhxck>`LcNU;;t5isuR^AuzE5JF1eDG}z{_>D3Y4IX!? zOzs3WazSGS!)I%h1G% zb`(hRbE}vw-J3OOULF7yez^{q4|*L_q^UiSZaxsH8!(@m+o)?!zL>MXMgAV4n>JQ4PZK` zIY{j=#ASE}bav+Oq;Bd;7+&+v7DqE)nnbUW6oteOKEjxS3 zAw+F83r|8@n*meyFlfdj*jx#DH+iLg@)##XS zx?7rZv25)l3>Q97O|}MwPTe5%fNN|CC#?g+M0zBSkMacy#Zsf%gttMGO`K3#$fMZv zr#L$4UU}9ovRh*G1aB0+ffWuxR7@)gaI)xtCm#EWa~1HSFv}PO8Y);2$7ec37%+#R z6amb4H-US@Mw2vSS3a6GQ$NZ?-DaMn8?oYvlOQZlYz#Y95%_>0;GAVB;#!Lwi1L<{ z4PUct%+rwM1XEdk=wi4m{s?ORl_Fby?lg$L`QC+PR?k;K8&keb^O+^G zH%%E^GdH#_S)=`krXufwvr%fS)@^t)WVN7?>;Fa5e7<(mDzZ;CS~~`9_SF)NM^sEe$M*BRO zQHynz@<_6}CT1gsUtf4HFDFOGTk@6_PD;C+Ucyyv^oAo5J50lP)3D(1=6_# zNvaz<89KdE<3%bx>}JX6SbYF2P)G*W;-p!2*9Ed``nV}Sk5dW2>5cx$ zab=wR%*3fn$=Y!0_GQ!d8i1;zEBQ#WZUR^{5~)S$dEkCi7b44=_MVK=&yyh3#~M-E z16AA1x<***o2o9aIYOz}xI8PqUX`9LN_9e}BMMX1AC`_i)N+bZy_7D~93Vd!It#9O z4}|qd0wmLr943D5?sC%!@@qUvTpzY8YdVeg6-}qKCsd7g7V|-rV&w>HpxCf7*223x zMPDG!GW>817!Y&oyYlyGeBH0H$#`5+6K&oZz%LfrhH9M#Tbcc<0yYz}X|)bay00yT_mV8x71`tq)Pxp}xwApa1+Kz<`6Y;l=r22S)S+okzN zp=SBUjf=?d9G=3o>!#xf7BAY_`@jBSwL;r*tgQsma=SeS zEhSEl?=wG23ldi0L0}x}(b=;VSgbl1&tlGqG>EAb1Qc7~J|cK%bV%8IA%y%I##iK7 z&JQip5}7*p#0u;E2%-Q&CR6g@?37oov&78#lgZAuzfOg`4CoaV5#Zj)W)?9lWPu1c zIr>5|f&@P{^p4Ik<4zp;q5zRVZohOENh*cbKm0KKEsF0VzKNs51V`aty#=s0)9%E# z!3*<_y-AfkFi$q6sS=jiIc&nz{gZ$xd2kwslw!4%LCc#fA`#T7q)DqP)_zH_4ku|A zk1)jFmdaG~JQ{Q*mlJbkgcua3h_JLIG=q2?hw=481yse`8akCGZCc7J^8Ny6E_V4%#6IctmP`=N@f~0A}VoP;Bg6 z$f$^HYUO5DDFqfrX37;fdmdR{{9+!*Uqf3dnaJ`%FI>K0$~596lIv25_6qu>pKG)f zG}$1PH}1*sEaf59`pt?vpfRO0B$g-qP%qp%eo`KxP98KhW=Ape_qs`Nt1iBBZ)$Z7 z0@Fx{OLVR3q{0=@eA1_H$YR&@2*!V#kNL@ETIdHP#F2jBk2v{8!MHeD-HdL{r|~tn zPFO9&q5hofX53?0zj)@{_sJbxm>Bpdq99AV@G;;2raQ#$zk#!3E;A?RDB<4P5|cpA zxPW=KeP6#$rda>nmk#La23M4R`jpH%ce)rL5$6=KE( zP|Jt$i_Z&`({H6;q`FNZQikgBb`ZiiChg|2tVC=Uprwe-itR+8w#HV3-2JBBQDsq# zouWl#-VODULX;T9u&P?%Ryl2|aQvFa-;!?H=pI;AQG3w>R+V*|fTIYiPOR*SiIvwe zu~IrFIsqe6#^WTu_02#^ zXf@uYl2%x-gWnOPFo?x9offC+5-BnfNu=mH#Y&P?MN&0j3OK|N76*v%IGF%{c zQXI&G7E|_>(bJ;T#M6c8Ae_f)!Hx>U()ePPAxZPI+66;CR9X&Qb14Wbjfcx%uA-*t zqae-8PITQ9N)T~E2E`RA52DY*lRoiResSCvzGZYd5S~T+A(d-cgnVa^S0N)2qTXM4nH(_M$X~jj;d=R$BC@rQC3&=K*C-2(kxW z0MH!2=nxhb{}7W0O$!8_N`Vq{A&tyb5*^uKrpcWiu&(;}%|~XhE{%k|whNC4!YqqX zyb5MK#md36n}^4Ut<)evBY_3`Yd-&P9xSt?$096i3yi;P1??k!skn59dywZs5{Ccs zBIUJ`i}Qnb4#U98pz23SjIO-o+HV9lQGPZ$tOuS@W5!IJbUW;}xg-!x4TbFD3KJ=g(RlsDN~FTm)9DO&`iOIL z_3*ScJYCFxnwoA=_=PTNoTA;{T5R@XbwIms)mRskO>wjaQS|NPCD?z;oAUPgyw+mS z@JQu~BGUgYxx#aze^Z!_#C46W4%_-16>E1{iWxW8<0b~<7szC(E&AI+b% z3WDD==HzqdkY{j2Y@5K7r%hq^BqI#IK)gg8v6H=7z@{$@=*d2YT%dpv!7y+~yC)-s zv`X+Vl@#3O2tdhY85Hbm6r7**^GQ_b8#rdpUWW{xL1|Pxlt)!r(%2k0QIH-1MpaOn zgx}n~S3l7Q8p^{C$LNQ`P%&qXuQH^;ymdqc@h8~V>Ne%6?V^~ayy6t=36a%kln&13 zSTzP;!n<-`tK?i&`+_xW>TbD#H=3qwc`V58S52v03a+sVMc;VotU!{56z0Nm2RWo+zEqVkxf+a4BaM#Qwo1a zCJD?GZ_YJxr-~v0)|N*?%S3{5GO)1#qR$LiD6EcUhqhx5jT?FkW2>=3=CJb73ADm) zCxaf1ZhyTLc8?nSOFZX<;g#clGo4>n4`JJcaF9$=l?h)i|3QM*e>@WwmTsv8J!($V zeF_s8Zvdeink+{W{Uj$i5vQqIF++V+fx;S-PoczoG0CoDp1x7PjyFysHy-70NLc$M z)`;~%cfQ04=Jmy&;+V!54W?qe7!Ffkt*XAPv7<|WAA0 z0AsBcrMx+2c-OY0135)5d^I$Q7`C6XBvhOwsy9|oeTGm58c$oKAHkwGho^E!K@++) z?xNg*atR*B4C0f~m=fCaLoeCxdjqP*{xnS7+~E7ir$IEi=T3Hp)P(1_rE-&CXU2Of zeT$L>i{}QGCb;zYjjgnw-ux=)WODVJ|Bwo0pvFiY;L0^T#P6=2AWUNme1@M03na?1 z)tDMx#reZ(hSIWz!VOq2G!$bWIys7S{Fi?iX2{H~RhkR9YG*2uD{E}&LAT98C+HoN z(=1607YR-sbT5`%_~eL^tBZ3^XrN2lL`sG_bJm~gp&E*?@RH;sP!oOviVT5J%6xF_ zJvn&5Z;WY99%O6jkY}zmRyAaUb&9R5bHvB4Mh>DL)5tH)^Jx#gWLM1Byh4A2eIsSTEr4y@@Qc zlDF~EEg&Aa*%=No%bt*K)H{Y#mTJ35cohP(0Ufk?BWBLv%|bY==vnQV`R)yaFC9|* zx&ExL$$L~R!81Q)Q}#`kjeq|1X@ohWz0k*0E!(_8m7g`Yp_@19FsqjbX>uY%B&^aB!|@wIEY5b(S*AD%(IjgWF2`RU2^LJ$lsG2P^o3xl5yvg zaCfYYn$v=POmpo=Xx0i`QFlbzEs(JQ2Pio`;Jx%gK><0u)a0_TL)1Pn5hX8rn5V_1 zV_-g1XkfhMBarVToq!Jbz#KABRV!TBkC9>yFG=h@dWg9DD6MhtygwMALjnBYob^3( z_{Z>9r_<3FyXt4uS3g}GwYWmHW<2Prdui`?^*j4>pni}3)IX2Z&l{e>LEgYB?8de8 zrGNBUUG;-&^&8|Y_4_n-^&pKeMx8F&ebn_gJpwGNsV=Azoxajwrv0y9y6P(F^)W-5 zfsf-uaj^p8-PNoNCJmEoN=l$ao6@od>RFd*YAM^v#p@kessJ1M7oCw*llf&rmo?px zQobbP0e^@!Z_EasKw4b-vi5=OANJoG8%;*)Kmc!WsuKtM?=?aG$HY&ph5<+>>5#Jq z*;b=ykM5u+8ijwQ)thKT|CW`^LerRqG8S@l-e=b}M#1}vi;$LIVq2bN)X)GcQb26O zi__S%rlH_dUIw#D7e9|2W}FBBh@AdPYlRos%&4@s%@WdK;{ZKT>J9D)A;$GBxiT__ z8vGAdL{&P_xv9YTa?|{B&v(L6;qzF_uhQZLZ=G?+5%_i473W+0686tQ;?qWqS2UA- z*tmHGDrlmtV!~@nyrGRseJ9MgMTu_7eLR1o#;yh+PWr-qMb_G~p7d4&Ym zw-dz?R+ZIFxrL)%!j)CyZBvL`J+71ai5tSX$ZQ8G>oJ1Hu7v%XKl&ATF+V23MeC#) zw2A1aTJF6LXiBMEv-oI#LT0_;1S1S?bi0V}(ru_Oe5o;Nmch2rete%L4vEg{Ri0@n z3S;-%Qk$VMtp}ykI*kDr<`$PAEVqjB&6GLY+#(uuUpQ*t-aj?WATsTe+Oac zbgsDlrxT8EoGGWWI(U-hkI?Ocz*VKmDWs~R8ZeWQJ&daP^00p~KyHU=vq2LN;s`WE7WBgpE88E5VV#&{Nj< zK`CDVf~tzEQtcc&BRg+Fw{ynEA+HgPzId}#q@@ES=;;joeWUCCWXS6TLlad?_Izrp zG0{kMl0~CzIugna{XTQXDvTc*q#p*q;%Re8aHK2g$%hZ}DC%79c#vYhI@x1^(xh$7 zQ!Q1VSa}b?7Xr7$f!D=Qhm6G63hlFiQ3#%Wjf5 z%R*Y#k;jOZN-Q?ekE%fQ1zn^l$QG+_@fLo4_W2=`4VU9EcDx?;#~Y5~yC~PhpZQ$F zPu~R|$wbDJpY3KIkqtktgM2JDg1UvU&R(XgDZ{SYZA5E(dtV+H!rWVRb$ee9n|rN; zpV^@k`gm|D>I6?Ks0%xS6JGXC(5UK!Uq16lHTZdv-JlCF-b+Obp1k=9JtLY9J6-iI ze}NkEiMW%Yivyc=r62`0_P6U;2XedFRy-yl+V5OpmORZYyS$kte#p}um?1*jWffgS z$s~GhOKisaG6OypZiOOmbB%4u8t0_N%I89zgkCm@LKgCJ6vI^K+kk?;`CC9PaNDB* zM0E?}`^TZb<;CsMl;I z9&zJwP%(6T``cFRW`3q0;KzXK?MZ@V%(kcmR%T&T_yuqA(_NDgM5===7?h8io>K`S z$eHdpm=t)_^UDY$*#f0-JOQHekqR+3)Of1EJ3y71(Z@K)IodYo$c6Vv#v$1SWm*sn zBgbos`-}u+k^}}=_eo%(e08Agm4X6-qUmX&K&qASz&uF#tzZg7RA6D%le23>bkglA zH+A7^zz7`N@w*8X2Ev-p8}Q&}6^pL`DHxBpZh7#bAT(f2xn=zr`<)7SZ9dRjHt46_ zo&+6!7GOi?pOW&T)&@qk2}Oe=OqCVmou@b`%5cpVx=%xcHczF_mGgiuCggrD$1Yll z6N(#;R}iv#;=)HRddPC~?DOa6mqZOHnx#;nHNe!u(Bh*aza?k_Formf#j;j7&f!kb{%Gd6k^ne z`=C~;=PE0Ha{-+9Ya}5c{SufNwx6CE5z6%B`5@nd228Z{lR1Q1C<;S*^700Zf*TIa zw%-ExTdoPu6`V*ME$FthNt)S>of>mpWu>%@ZS(+{sd&7Hx|V4&^$=B{NKJgp3KX#| zdh`IQW0adzXC@UdYNw_?x=zX)M`kb>wVX`=4)2INAy5nE7ZoU#2OoAjdJ|b_5>M-M z84VIoT{@Qmxs>X5OsXTe&Yba>;$cm?`xVB9db7G&No=NDt|B?{Q)kJn=~Nijq?XJD zynmWY)V3h34GUNie<9OV7|_DB(oLrCR_9fyn0{mV)MOivzaV1XR1Dw7kg>+*Jijf4 zxNQP#N5AlS6|QnMW~6~w09uo+KtqiI0{r`|rx_1k@?s2j8?H2?sS{qKlz%e{Txb<8 zd1BRy6C8O6Bk`3#oH004e0XyWPpBnHM?%0;G=t%NcCHP6VIR@h%@S z^ytJBgB$d3i9_|#FLC1fDs?e6_52}B=$Ye!s^;r6Rx1VXf+W6YPWWXM+@E;eI1ZIS z48O^0iZ*|)vx7K!d4BRIZW8GySiIE*+^gL0Eumz=aqpYsKJiA**fQk^w!?z(4?!dFg-p`VWOU4Djy5aeaFjOx!HTI>MUfmp= zZI$LcfVY8CqY4$c(+tz4=~WgTT*JcLjg$EH*r*Bp1oLAm#}__5Jr7JVz2mGyZ5Hv! z6DHatuCYVYywBt_QwPo;GF)A`U;wMN#xAVg5S77eKd~FRLvL9d?wqO}5CZa4NhX3t zXRlq@Pi2IKwXae+3XUD|klc3`+F&txM{e6Z41FiTjDS;Q)=eSDlNjgU2Js}N*J{jq zacQLNRwAWT$XFo!RQD|0cf^)ED1@KtC(ZCvQ~Z(Pr+lwc;inSEW#Omx4I|`}&q`T* zaLp8e3IV9bc&q?a-d{-oYK@U!;KCH#Xdkp&Sms%)@Dne_`tfRt;2JtaMjr57jE~O> zydu(t;7^%B1Abx+1U#hSSG`G8W1UoAbyDLsx|Tr`*q~Q6T%5<*XMf1^u8Ql-7Vr4NAe_GL}oWebB);#o2hCl*Gz83l+M&rnLPHbOj^hVMoP=n`PJZS z+LZy9CUZHn_)s3zXyhtNd7pN|B0y~+(Z{NjtL&SJEd*r0CgVJCSXV&+2aMh3d<&wl8rV_%{8M@6(^SsXX12KUiW!)Op!7!2HoWn$Pq3L z%BRl_s9TYB1AeoCz$MBrihB{-OT&;kf(G4h{)36JLDv!Hf*KpyH@x6F$Wo&XKkK3^ zv6Bai!xl zh`#YyKU^-xZS1K~QgBtk?UVv%szEgt=`%TmOn`Y?qNLTe=WAwU;ey~3b5@@32 z{d~$P`%$#EVm+U%aHr>!e4n(Fswi6zg`{?3W5>X5o`@c2Oh1`b%#0u^?HJ6%^_`vMb%o;wmJdRE@ zb5to8q7y19g&~L+LtdLoT>^*a!PDYN96LIvs)nAQ@+f9;E%zI7*}3*3B~-~iXbw+2 z(Sr3!q{bK~YT(pporYHmFP*Bi zQAZrB7D-UM5bfzV$0zyG(e=f6PQq}L@FYzqKv-SSqeZx`x%|uSYwzA^WK&r(s)|r* zj7SAOsSKjfvgV|1Byjhe?;$Hfj8CFm1CQ2Kmu|5QJtmZKHJ|?m+LKKemWyn#S7R2ohJLX@ua-1;{s<0Kfo4wLP|VV5sd9 zL?>Z^?@?q2oobJh;1+)WrlyFJMaob{cv6&V(Gm=??z+;|x1J5sV%j4q}wP`Bw zA5;x@)gnIWP+x+WT*{e#;?d}!Ye(8X7ucl7-vVi->xFv@E%^-|ZW7H+(Z=U# zH!dHUrMfWOFrfPBEGaf&PYDo_Oeis63}S7PNL4Vwlt> zW|Adx=99v!FDL3{5?^EwyalBWtL6BZ=iNuEb(dV;;MQeie0|=TBB9a7i^x&6faol6 za)q_+q%;b8y!=_jm1VPfe7vbe)qU=SOed8F1XT|KwjPnbXP9U1hXi`gB{|(mY186L z3)%EVc?nh5BuIgY*fB(Q8;-X&wPs6@c8MEtlLK6Gtr9OcOU{$+f-Z9d(J;YiYcTjU z6d=*fXk@Fj5U3{?Jtx`01Fa?nXDRnI==9tNK5Ps}Tvw}Lx(<6o_P)Q>G8^&r~WOO`|Rv;GWH9)8^&nc zr(p4f7wYuvMAVdjtfG7ox;pwpw01CuX|rB$)d5WWZ(9ZAI#L-e zk|b8;UVeF1Ev|CBw|10$b*REcFpfJ>yBc(bd7kD%bBfWO?a6O4dE_6Y_)ChHzvXQ;(D0E_y}5R)#@eNPylj(I zcPeHN#!+KWxUlvqKHaRHP;}@*7;B8?YQW@ea>QL2NLW~NaF3}B9UW|n`tYNZDjfLW z!g81`I;87ZA`;l03}mVRMh@aG_gG_?c3MOh)3&aRUg=Fd2|dQ+OwHX5Wef7_oEZsw z6-X#144-{vHtPFIIPPK+#8X)A1K3I?*EghO=!yRfx(CQLs496Z4MyY8XBeRD2|Y!? zh~S6N38gE5URcPxnTeQlAYS%6zaBGBF5e6F0ehjh_Va=Dv&_EC%+OLE^eS^gzEUU| z9D!bc2EB)SXn;oK32zN!POsuP^qpv?83il=1y<9wl_y~a(3FMWFzu+eGCPiuIi)ry zdtG=3Z`KbQir&)D8GBnPXi7jHh3TtfEh0F6;12!pIB<#lQwRX`iL`1tZP4SFZ*2bsT> zj(r!;^yt)rob>1|rh~q(9%j%^YixBS+Rnv0PYbXwTe@wmSt*$eY`!oQV*yUnt!r#0 zqqm$&Q5@W9H)h?Rsx-2e(i<>I+6ThQezS=eSMfde#$cCTnYg)4`t%IC5(lrjQv%+f zTSv#;-tl2)aC`*gt+CvQXWlb*>wBjCj)Ezz1xm|PN5E zl^N3|AVQILRyxE!;wH@<>{=d3!6UAX7!M2Kc$&FMStXL5gB*^aw` za?PAOZiR)nxo25;F`$qKbemLIW2-dq+CuG047?UMZYKKJ^wa9Cp&&-XNkJ@ ztpZ=Ht8C_TUK*ue{czm#ht>>>wnwxXSiGy!`V;b0oc*X8JLDBXRfav=4^3z2 zxerDYjKjoON=(e-pi3??lT;>~=X1+_>EKVC6oD>{S#d)@D_Ddvu|duxE-b~l*_}<2 zsj+Haw6tg0PRmjfu;O>-=kg?+1MXMBgA`M(u}TSZVLhSZRCwDcZCj}1DK$G_jLTZ{ zO?9&nNjZmDxo2++hhKOYlecP}%~m*<-480Po-*u>ioGA{kOb0a!kDws8e8gQOn1gr zHCTM}`M8K!x_p?PJ`t-B_i&^&_GIFoI-P1;>56m~Mddw&E$nNJ@y4m9%h|mYYHVRE zit;WIMGklcfRp~wxs(X>Afw3$7^2(HN=^y&Jt`wHpC&-=LS=^jC9Nt42m1xJ{nrFU zPUI}Oy^!_}GES~>DcyYrGMmTeK#KA!UiF5Lt5vYfD$%rNEO&o0GZffy{F%eg@sG@Tk!;3@m&au1`u-{br*qutW$+wv-_WWG2D`KR9XB zTgx4rpsDUDyfAIUQ`@pTp+!{M*2-*8iveK=nPOQ|rr*D{4sGZ~|0xr2}W zt6;f9M0h-~k7`44^f)2|pMM{uS;a;=^X9OCyluE2DWm?h8ZbJy1qH(aT>7cha5_Q) z7|Y5Rl250VzDUo;uIKedstk^4l%>m(8L70(EhWyv=BHsLg z$Ijwgzhk9qc}`Qv3U%>)tVyjrKfNgZ5+`>c=g8`%95@Qikir zB~Ud^H3niZ*>go7sIpT)6L|Ix`&8!KmncdrvfO9`VX+hY9OKcGx1b9gnYs8c#r z|G-nNSVy{)KSgac{8o(_H)l4DG^?zCWtk4kmf@-AjQdnx_jjJ0%36_yyQ_|y)uYEM zR?EeI7`C~ce1UpViNuaSr^|RM zI`3!)o?!fu^d=aCP$`}RsT>|;3C}bA=-5$Sw#a1(|Idn(NM*FKb4f_5;n`APrnxu6 zh?fpYkyO2p zQhgj7QMFc2rm!bG%xl=$v7>=0xNaqrNfFR3WiYnu(W`3of~%p?*kol(D|ZxaH;1dr zBT>+J;4-K7ziSnVFiHfeBoRSMWv(O45ssFwl*N+_3$(J3j;U^_{?~>`7V|UPYOufm zZ&0F#A6cD#MVkc`5tNcu#g|S`hasGR>*|0f26V;AwIj`BBC=i1uQ|K56)Nul4(6|@ zw6&HVVcD@NxlsemRY7GGz}2Aw27*xh)Y|9D9(X42q~(Iks;uWb(im$Z$q8lHEX^sX z3JVVZg1?`u4AvlBUlBI zqDQbu8{E%~xMd>{b7f~3Kv&J@I>Tr-R8AEya?n98G8{IK&O7AxR#WC);lYBAQrCXQ zFb!-%Nde&=X%DkR>EObdvc)a2u~z`8ZNTDO9_J9J5+!{jiNfMO_(f0=$c26Ii}2Rw z3lrH>De_V*C1f1g7%W7W9$Vy0v|!RR9kidu_h?Ur7*JomIL=DgqVOqP6jj+xudDYj(~*b^N`-R4IQP?b4yHMS@axD z;1WiT;`i$KrTX@cW-AG3Y8Qs&VSB_BY$0=)JFK~>J!;%rz)xCE33B=>Um2{?S>`K) z@672agU6k?*RL>rL_yJC*kMK=53BmTvYt~udtopdV;YPU#7v>JKc2vF{;hwGvt1!A z9#bZmqyDdd6eUT0@0o0iE6pUK?2Iat1Um7LLw`#i#cusGzDoJAF6LC$2fHVcNAVab zySwozgb9c@itP-C(gu^V5hkS1Zk&i=!e{Iq)&3|0PLeSA#`c1_b+%<@oeEEf`Ob06 z61g>WLI!Rrz?5cOru%R&nj&c8B3*^vJHLihWAhTpR6~Z5GO8-45czA^DTOQ-j!FmL z#}kCkZPRZfm8u9<%3+U=9cku_#-?Lz(J_RAmlTs++A#!Ed7Jyi`Nh^{|G2kxc5!s_ z<>at4IJr2VEqN6wur=0R%Bi5SeNc>tJ-#3R;Uv@(hQ#8LEv!{u$6B;6850XwSe;+3 zbh25c6H>d?Y=Y`#RC?n^`!nbM3n-eC#5bvC4HB|ZCO+i>j+ywrI_Zs(O9?MQ2Lx2n zC*(XCY_f3nps3!slcV?!k>* zv{KaU))wW4Ey5^5s(2EBW}*XS@nCQqPOgD|yxF`Ln$x&GkmekG|54+Mu(X##t?B_8 z`?6|VL7?@DGX1GIYmw=Xt8Z4qKl5;Q8NpcSqcyfVl?@X_DlfgrC*NZ`JLEg8Ms^Dm z_3k5k7esW0zd_`E=TR)6_fFQTp(+D7l*hoq*qa%RSt+nB7a;LP@HXY&ybACrZDt9t z^r)(7)xAIWMd{sMU_{d0V}=mm#0gL!S}F0^7*m;}w#rMs&g-rSkk54%HKSvy%9kOv z8PF@X;5X)HeaO3}B0Xvy2AysrUhV_t58h+)Jr-i+gY>aHdBC5G@Gl%^@ zQN@4)Wskp3#^>?TM1jpsMBPZ#nz;)HL)tgWOy)bUqd#UyH+y z!#F91ArPm>b6vw;Wf`jAClDCg@@NB(9ta=!n|jnS3OtR)N75lNM<6A#2(E8J_%GuR zFJ{ZCF~gd*5{BOEO68Xg)|Yl_PhKD9dtgMa9rd z95iMHrOHL2txj5nwX)0W`<7LSS&`34jXh<2U%QzvH!>RKlN7JOv}{wDm4kQr=(i{x zrPo;B9utD_^Ebj&}=~K&*?H9nW5Oj(Ihe7(n zVUzM9iHZ^M?Rj&|&Ll$5E>7T~^Xve(w7TCvKkJ^xu44y(sWal$9e`oJk|!wf(ozw} zo$jEQ%XjRIaOrd~T;zu9f>gZQPekpPgT`XP{57@+-7XgAV;!%n;*&}~LRT^6myaW< zvB#}=Vw3N8rW|yL&3&faZxwM#QnXWXNmMS;v!j!QgFC;)I?Dzr0o9seN=pYRRf^<| z-GT)iYbu^ZZj4l?14Hdfi#(~sNV^N5;UW+(F`QFX=#rdaKA5hzD-2fPek#@3k}-9A zFLA!CI>oCpbAdD!xj4;`i&dorE2%9%vnPt24F9g@ zRb(xkJH6@jtc5tab7d{8QrT3QljQ&3-kUbJjb!P<^BKP)qwhqt9bwrbC2Hv#_l=+> zyGq?!*rL?cbHChx2uMO31ULYwrGNc<&dD7FNGwT}JW=-al$iiQB#_Hlp7T6uS_H`s zg(G$z4LVy_6c)BfP~XiHOFgecc;bc`quSnw^c$JCd&W>cJEaiwH2SwD5QBfinXA-Q2gQL9; zfe_dkItB2WFHf^LBGx}+v6M}34{EK%U+l?!i0~I!B0cd71p~y-luQAaj$ZZc4#&bOrjpmIE@h&H8ineDFr}9Sj>`1BYB9+$kYMh0Iu7iQk3>!T5$_t3x~=&*c&IIN;vSlrhzIcG~BLWxKdCS=hbL`JbD7hAoOMSccgJ_vwhm>>E*@8=j^dr9y-k&<9 zmG@z95zcrQ>);LwH4k&G5HRP!j1i`B059*!hu4-+jA*5DrlZQh6xPf$)lH#XdL=W6cr}B`*mSC$n3Eb*=b@w`Tj+0IFD4T}c zoSyTPF0nzuC%Y5nqwk6`uF{X&x70^O>6In5ImVg@i$CrASA&Aj#=$B-n(=*w@00-WAslH`4L2B>VZ{6=#I%s zKsLel*u1Ko6b#u0F}uCp{ZWSjeQ13w-#=4GwpA5@sqkix!UG7HCX zv8h#DH+<_F@(XOdMfr{f#;i?RU03o=Q$vW-{!E$x&O-ULXoP5VUpt%gI>c^#>yuVu6$Gm2GKZhyhbzQ0xxk)wWUJmVN9?7n38Sp$pqX+nW&+k*T{_oe?PvU& zbgswR3H{YFuFAN7kPwH#wJBDJl&hJK;s^$(s-#=J&(gW1U~Ls%UKM`r$Fs`!kd;lP z(b|ipHjb1`)62Vz|Oho!`Q4*xj;16PWTIia=!`Gs3^xJn~oWc&?ILQRl zW;^ILWO^OwR}lT%7DE3Dy#Zh6j!ck1ikzp)cFy4oEXHrJ52DoTW3x18d7|_(&zmFB z&3ghP9sVBjbwTN;L^=G*Szw`Td85ZvbKg*=&1{ZUgs>{yMNMf9Ysf8>#twnP%uLyq z$!*={-SsRh=EW{yFIB zJy+Is{+5(k?!3+#PR-ELG9L$XzVPKX5rY$VhTo~TepoL!69oJiXYwjfIpfgEoWpR| zR_9!e|DmeuvvU~uiB3sBgT6)`RMcEXQPCDTIO_@ z0WDJ6Oa&t%lZ=5p&x0nzES-Z*7a;pmf=QojnJ^#opJ>X>NwE$yZ_YOn{xT6Q*RLdT zI2agmFOw`(gqJ+>z+V;o&Vml{oOt3($a6QlM0-jOB-{93IZBsNut8NwR_Z%K^rH#l zvBn{wx^$9~*_Nrqo2ZCGJEf%422Pd>1#ERr19hO$ggE$-0s^TMN{UrI8CJGT1H~gj zd~-Ef<*oLb>I;umG5k9W^$vHXoR7UvkK@IoFUbbbn2`-G_G!x5x!b1^y4KHT$2M)V z-$b(?VZo)&jn&(nLVZ}tw5m{X!Aa4Pky1w$l256a!)ihxKMM1D00B**$ezL#K@IsZc8f-e{{&DjSy zB7l*oi$k1?oIX0tKgu8vU0jhGp#Etk7jL&rd!wvVPf`P7WK@6;>xA`$j3|rakdQHj zrL7Mk!~bkUEC4 zFq*5Hih3|;ERKsbjRih!imFsNx=q{yeccFyR8M0u5PH49!a?U!vTG!hwo_p@+SeVN zkUYR`68-lwv@2!aId@eJQ=ROYVye5!9-w!H*5ZQ(*toTdF2lnNkGFPPABzGx>O8hx zmb$^`Be}QFq73t%rRmIR zb3q|0Bs8W9VVTWAlu*j}!Td2=pzB0uw9YHg7=m@|Eo6O@aHmn2v5V#iuPxbS_41j_`e<}h4 zVxFE&Rj8mLMMmk)+9inv*=tF)%4q#PC}3~SrvuB2!-jW-u7ec?;bz87ik6Mon+MUm(9nW=Mb-|;Mbj9Q(?s&uyWa_yJ0!CNGS{8@I zx=8R^c~|{5n=`p{qCO*BGtD0-%R6Js!vc;4%t?eO;rLu9KG~Q{S|_hMWK)%K8H6Z^ zwlI~!>)(o1Th}Qkyb4=xkw3+Gj3cQuwfW%so$C@L_Qce#7!#r7B7Q~EhyNKxorDsX zLp?}*;)^%)w0B5rcZv}=%+SL1N!Uj~c)LhOQm>Rsp>tDxI9$qb58vt4N##_H0+-ElBAx z;yg40&i^+}!Z9g-%8v1iB4U&0r~ZxF_7PR-<`HM>juLbY+XSAQCcho=vTNp|Tnun5 zb5e0jE#T$H##CnjP}L-9GGeE6F*547_f1f$^#+r zD~MZd&(`9QpkN z!}9rgB(duF9;-AtZZbarY(SI0$uUbU{9Q>r=g2VvtMKi31Bv-jRe2fAk7N1u>k#wR z@0-(d9u|HnBq5uAUoPOm=y?o?SS9t zSSp|e4NGyK>Fw!NFFeb!Y28rPg{a;I3_x^j;<2mg**^sWpnvLxOaAppy%448{dM@X zsuxr~^!9}VCG^c+SwQ3=OmH6!gPp`LmKp32v7bCF{fBS|EU+>t=aQephe2FRhYWUiM@GE+5LS`=HJP2H>5oY7MK16gTlJUf*fnjyD7g~9 zrpqjMYJ%Irsk-&KGh(!s_EdVhChe&%8%*PXJ%O!elb)jf4Y%l8XQHpaV-T}9<>{V= zCyH{O;Cm%~rW2@;cm>jwasw68;nAV=gg&SM=`;j4C-{Z|Be?8~Xw8ut4cbHA{;@JP z*MilX#6KI6AvTJe!t<$%oVJRD4^>u!>sW!ShhRNF&YcEd>ZmH+E+2clL!-ht3ZHEe zQ&+0P_E1$%rw?Jm@FZG1!b~soCdl?0gA|dY&dSODT*0rhmcms8@GE#*SJi^ISft*s z%uP$)8ma}f*cc%!&jCS-(IbV$8}7T{N|6)Y;p!)ylc4z&h41n)~$sDpYq%ZVLY`aCed@g+^Q`zm7e9-A*@kpji( z)hf4K1a@V%J6(a!QMrR-Mf#M46vKoU0B_{v(U0R$QYR7E_!ePJMT!ya`zV`(dnixl z)d|JxXU1;OOVq>i5iD}f2QiZuWQ#|SWtd{aG7mVL;8Ib7*ZtQr;jl~M59WHt=3QNf z`68ua_$H9Qb(WwS_PQ4wZkVERllga%U~=qPR%KBk;DEtRAq=pLj|Z3~86;u;hf7jpG0!5+^9Xrw<>{R37AL{Ne+YAA zZH0FU`+H_Exx&k*P$7mFvUI+N$F;AlRo}3I@jmevjQ1J%i!!%`$2#6|^^4>Aj7`}L ze$jmuW}Er(_pi6tiYy;-TZence_^-RXN=Ze$+m^e)5OUKFCh%?mia4DC$K`6 z+LHqnkfX3-q{5QCn8A;U0SFi7&A`*fa}OziTX5}f@+_K~;Z=AuId4Vaz0PLauL2K*z*cghT>;d zF52BKdGdb?!^N-QIT?H{6>;$%ZoI15bhUc6fxQn+QKK}x!sAR}(9EKRAM1!B#*Xw5 zbl^0Vme&RwxTs#ZdQPljaI?7zj&DiXY>wzMjq~p< zYQLwe&yj2NWXv}{g=65`AOhvsWBQCMYmtS1LBynT_~evSpM_bWDTDKMUaz-~^{jou zy%PgXE0AF+xUgFmiScbEB@E?a$RLl35Z0D_L9$9SB7v3Z%O_q5n)CRYjuv3}$Pa>u z%yE3&uy1jeT7DUiMFPbje;hBqS0Y}kDPZ=|#}cTb$c z?V7GvD`FmPFcUlU3C@G?GJLiuq@Czm>v8L`H8!MUVQWi6S8TlJDcEq+$1qkN9K4Ml z(tb6i1Pzw2oK$-g6X+(J-b=N~Pz4=R?e=e#^79lKbh_+Kc+Tmi@ho{mgPOceYIaVZ zq+*_!FXHqyq|@cS;WNWUGsFr4I7$?!xE}h9(?h)RQrMbdpl&oS+pT`&e0*ppV z5;}Fr&BV)gD_>N)X=LfG5N6UM{-YK)L1a4jzQxhC{#g_8fddB?(!i*!CZ>()+3DBV zpZ&l*?A5Y(NKpw-(La5Af$bfh@M(_9kOfsspok1fa-`yPb}`+Vj8E!&D8}MO98tq{ z*IPAxn&x^Hll!S(_*D$=rxas1<<<-&i6X4fG&R%S)q6FghAriiNbIf`S{ zetCEZiFumiOeI;0np$gFO;hc;cZ$_S!i7tYrv@f!+`z=t-o=3maBQY&(|#nK{eDN*`Pa zpPB%c9ugmV)LJwhjkEEV*39HNIPsH{N7GBBd_3|D@_hXBW0+`>8O&0P_XV$*XXfeX zLP;N2rRf_&5PPKQb1ofs!c{{p!_7QUxp+Wlw^-DJSRA$8K7cPKw0-Q7_$m_u=@gyN zaT}QychuF2Ed$;23qMx{Z)X!WhmSZwVD@qbMpC0#-X&qN&U>uz)Gb@eb6JCa1~x$d z+P_09voXn1lxTL1P%h1iN*q#W2My6)Kf(Y71ad1br<)zb<=n7m99BvLOfgtNxM3t6 z_L$Ywmv-3enkuZeZavmNeSM@u)~Z_WtmFjtWVSAM%yVkE*OtibYKsb0v5OFM^R{A+ z$M_L;2Zw|#&R^hWFDB>TKC$Wa9RA;Udj93~?3ZuTo1Kf(5lQ1v+b`4UcyiXCQM%r^ zY9St|6WRN!Y)vGxPryY<=X-CX=G&7krA0N{V8t_2<&TDYhSC{@9@Fq7{o$tbjC@E*sbmQ8fB_i;gPwzZ>NZ?f}im;u-pvxG#1@YSGCnSbPlP{QDg z6pYyxYv^Q|g*)&gL3aWwdZa24&nstjf#-Sn6vmqBaLQBGh5#l73UxiG6un%Q6ffZq zNnFTs`#U5>_H_ze{BZYCZ1ByLHkJLO#!8m89LT!3`RtS^P-lKpQc4DUpKB#oDkYzt z@`WZx;V+?>4F3!wipo`JCwr`M?5#;XphuoiPr77-F_?H!PNtVD=}ga|Ckq|(m^0p_ zHGGk>B6S+hmY7(pC{~q+Tz9zK5f0AlsX9C~yornGfj={@;bA!Eyn#?9un*GjiGj$W zkqcN`ctwErj3gLH@UBCgS{igj_wHJW)>+3bB=D0T3bk@6w}utCsMzxSU~9ADun zZ}@0|03Fbdtoqi?rQB>cf^HR4c=~4RgXyZHImS5iWyS;QAJ>M|;kmn1xb&)gz|HQG zzS%aM!G=l~Pu6pGfg6M@2p%wAR^|*31WV*r3IwXEI0km`i?ar2Ay$Z5?-AahL+L?f zUT-7*RZ}Q%tO6KZl~Y%mIpHZNnJLEd<US95iiLaeKIAcAB$-4@ln_SLNR zURPfGYF2xXHCbHR6sag2sn~XJlh|OXtk~;%)u%LwFS}Yx#9u9?JLPu&Buk5Qmc~`V zC67(|@8vqU)R>r|hE{ps%6s?`26f2ORgU4Q2d9AVBMx$BG-q*mUl7#e13?VF2qaS z9r^3A4n9Wx&Z(Tpo<6;Nt9XqQ@FX6Liq)|w41w1W^C&R})%c*o2SEjn?Z7-(c7sk{ zm%^KMCPuGX8+BntBCDeP%OJg8Cgc1z*RvP@jVzxL64ua2@cT2!>Vn?x46+nV3TgHu zbG)&RmX1^?hI3!YU7Ea9!=$)1Wf_rFnBTc4scDNLMU-3G3@JP-A-~4>*K+p!7{PO$ z`>)96OP7WjEooBtkNtEK$2H+}S?+P4BL0%Fwwb||I8K=VfZ*GLV&Z(!hX)ptB4GZQ zE@Q0ZB3ai=e|ddwABIJ3AJ-%h>f+;V3$}(z6=7Oktp#t$W38*TIuxy1NmteOKI$rA zXpEH1D0-~fLs($^#5MV({k}c6bwJqhzIx(C_DXz!6ihG`J}QiHLt~0?_c!U8-hMPgh=QF9fIVi0aA`EQDvhmQi>&upTWme|CQ5tpjwt>pRYEo+ zV}Qq@XkvND?CM^=c=8H(Coyg4ab|7kY@EVc%~0003r~-DfpEDve4w4(u&2}J?-2t@ z66^8-=ibDI#s zI8fP`L>EGFS};a+Vb79CmH~lwkvh#UR45pKJ4i>#n5tFg5035MjbJH%{c# z?eZ#G#A+oni4~Tnu`Ak31?G+8mRhT?3zw3P;fPbf*nSlrR8O?OQI$58X0cU_e2qHR zVHS$&p9gz-s(?GZ!IP*xMKQR4zi~5HyU@1z}wHB^624_Fi`U zgJWq?!#4{~zVj6fB3)Jcm5SNf=_QVF`WYDBXcY)2_Vm(LG;P2bx>+%0l@xLfCFBOU zpk1|O$$U-EPE|}-WY8Hll?trt+4rPdw!z)>Sj%$%rsypGR|X+aLRT%dbx256-`Hl6 zU1Oo6wV;8KT}u%Tj|~88kv#)tUy<}2+>&^pK4vhbl4u6x>nt`0W>wq z|LSK51xw0dQltHy5mDEa@o4Jv`PnHJ0cbsydC2Qw_47qCTugaP#uqfr+vjPB3E@}R ziaL%H=~0*^$*SJ*eIyBn14V0T70>7$0o$+yffvqTduyiXvYCM!j1z~?D5Kr0tfJ+l z3??gx6ApNM;vQ=UtJR5cOPy(wg-=ns%unLb&u+q(Vj@O}*7TC6uo<4iupU)As~?EZ znaobB(nZ6hcw$`E0CZOmwg#Hkf8QL&8t z%fJ77>Bpp-!~yg|iAVS#RJy^9by11W!j~v7D)1kaknJnq(O{P7f;#DjeZQGJXfDL* zD>zYewuHs&L;8fnI*S7EKHwCddO^kc1?(*_5UU}_O*MZVmeSP)OpfH-num)EKLIx? z_=f2&*;#D!7__o)qS$7U(zNJcS_=qhH?Btk*_)nyvlt)EX18RO)d@VjTcl_2%Ib4u zDKcbQT#T!BPf>wgBiuKdk?V=Kg&XU!U&_Z@XOUJ2x+}9tZULM%tePc}mFhM0JyB@a z5!D)HZztE>y(b*rn) zki)%v$CZyD#OPNk6{gzWOX6FR3ZORjI3|^=A}ovk>Ka=o5iPB?^F~3eVOfc75ij8f zY>u~guDDW}GT9s1DHCE#ocOvX=sNZ1ggt}Bh7Pg?9qlV>*rRfsN{;1yzl?TwZM$jHewV?A9ZEUJNYHP>juc;Uzh7f!FnIq}yk=jdw8$kmTje&)@>B8}sR zIyKeCbXP0HEM0&%`#nqNuIu30@9(M?g*nqzj5-@04w~8rgaG4l zgEB76F7En$dH;|dR7)3WcF;17CR%h3wL>&&(XD3Jbb3CX712|L(9QS2IFr9)1Xkc%lgfwke^DLdi0+plh zL#tIT2(EL+e=^3O>?PSCeJ}gfpUA7TGm-pZrb9T8N3f55T z5H^|qfwww#_*Q{8qsr(kxlgad`|1)TanPD~Jlx-7d$bo7EvMIu-&10(zTZqv)8`~k z{opIud(Tmz!yI49)T1UO`(%In`X&E}Wiy<`F%aKFD$6k7#cb;nlxvH|+9xPI)}GE% z73=1>dj?gUmQ_LS+w4?*rFPypx8{FbT7HM0!3P5H1C0P|0PXjNSd?C}qO}XIq6#wvL?yf3_ ziY!bHSzF=Rbz^NU-DzW|OVMU!*VD6KY`5zgn+TQ)x3zaTa^!Mt(#-A0$Zr&CK_APN zT9=r-d*Jna&>4^QNZPHAN3jj{DUnlz&m74Sgvskpv~cTGjB1>U?qP6(9bKPZDiIsY zMOry(bVrI9IO>j-Ht5GQr=a9j@bb4u8&ntgS%Os;{>$V!80)~G2oAyRs3JaaV?H6T zZ4NUX*$y9jwkCtHQYSq`e&}y;C0C9nWUPH+uZJ>uI9j0<3#Ak#@%c z-odVhC&|%t@)*v3z>gG2_=0+fu)UZ~ef1F6h8jB1{UT{dH^N^91tR*8%P5CmcrYnR z`V;=vs!26W1g$U{?SI}^30jzESEy_9{p;=Zxf+h}8^goB%m%C;DibzVok>TJqF&&ye3y-`aywiSssfI|@b*)>}vr?+;7Adf1#~m4r z?j^6?H8wBZ_-_1(eWm*p<%`&VHE&KapQ6x^BwY~Hrs!bYk>GQ{<)6j((bHIELb8oJ z!aTWxcGe*aIC$gusIdh#$_hLKVi+T01{a*bqA5Nc{+UGq@u6)>oNvw+ zG*Axe!@n9PL0M~0Hocz!q^i8-ETq4td9lKk3&Vu77CVIJ;BOn#MK&p|7L9X4`5E6SN~a>{HhNn+;%e9htX4h=) z`>s*0k)`J(S*w19yE6=&lhk7guFah7{^E4RNX0Lgh_M`YdN^=-qr+N!xkh&aGjslo zDv;!e^jIfR*}BoTzG9NdcvUDqe1N`%;QDA~CuAYD)Sl$L@mRImQ%9|*Rlz*2_T)C` zb$ooh5*GR;^Jk$zVqjG0mkd_%8DiWb3DD>l8QQQ$w2ch)=glTUU`2mGFhM^)$wHiR zxv^V=N6yQuSkRmc)#yp7#M)ymse0FM4=s^6NYyL*jKzD{XN-HQm^Fr!o)94bUZN1I zHeM&v^%8wh&T#rG2~Ug=4xDM~Hs0Zf6Op>jl{T0aE4cfn%nm`SBWoYGtCVn=r0CPL653F$t z9A|=tyGkT(;m@a1_*~Z0aWxo}a+(u6J1!0Uzw`f>6PZaDLTQ^gtuIQX7H4o!$p}jd zILJ1iW64avA_IH85nQha%HTjIc%GvK-?6Ck;hMb_l#&jzj+H?H_gcM5ds%Aro-Dyk znD4K~_AI!aBbBdh2=&7G6x1D`oKG!@AKm=+L?o4NnpPG>H;tIQ_H0WC0^Y&;^kGY}-HcCIJ9`fqhpk}+}0ajj&MYehROm$chF5YlYBx z=&f0#zSQB)6s!h~qg-bcjpJoB`(eqi!e>v!w0jdca{5LdhjU8cINH~>7?QDLtA-xN z0l-)4B-zs+8&l&j%BePsP+w=7$5mb~pA*i!TtsU)=dm<$-h#){3=#7jBgG>7?s)R0 zdaV79C%HJzPAdpHbdtW(@`IP)ik<`S)9dI*jI`D?3H|;W#)}pCub#!wW%(~n*OYe` z`{kg@0Mm%M#bBkI9awU05d|1|R`ums8i!Z2*>d40g$O#=@7+kl+uN1V+o0DbtXz|@ zR*wD*o+t*7{P;Y4(j*o{i3H>L9*r~{3^*b6xj73X%7tKF;1fw};;Z=t-W$(-7{xq( zB9bDWA!pi>zPxJOt)WAxQ!Q;-25IJoF9?}<%9Hn6Esc!ZF&t<7g#0mDs1(0IFt+tq z8wHcw?jE}qm%c%RdX^y16~zr?p_UsG)L)qAk_460nmHput(UM}qh~WP!xtA(GIpaIg8XKUv!@gh+Io9BZnUSI3K2K@ zHJyi3DykaVV+=FoJ(EXf-?*6XmSRV>e6b34VpJI+0`QFrb6gTOj%aQ}~SYMdqQHI8!l_>spvyEQLw$?`vt-o1~l^Osav(lBY{}NNM)q%V4}8gU={t z*hg-vCEPlzFKX*FR6Vwt=e0qFqP%z1OfB7g8@5|%{4eV%E-mVi?LkvpZUi;;F-rHy zk91AbqQn^|=b|I^Ba-8IJFZx*$`&NVvVOT#^>e3}V@5zp&k$h~W*mw1h6PK)XOWVnB92UE?E^>!=F`b1Le$Khb#9EaDTAI}XKhDG_{BzDsV(Dg#O<5*jEgg$SZUod(3~Jf|AGd8TFtL$qZ&fI*a&HMY!bx%tnerKBmh!z{<#o z23@eiBM4?zdnUp9MWY$3sr4hH;}P`y^T3c&3{uFw!NN8)7J0|^%})y?tk?% z_cZs?Ic%Z{yrU$ziAuun+pExVzn3F;O(hme#H$9|hLI(5(l6LComgBW9JG5nF=D{W zH-OR_*5a*)o9iTV0}%oz;-<&4v(llu4vXydgcos#@+);^q}p({1!o6L=$J%sA%+`x8;pS7$M zT7R?|iO_@(&g|?5{No-e;x|H_V8}U1mqPz{p#M$7-I&J=!6GNXpe2`EI#Q8-hTmu& zSSqc{-5G~9nqF$!@q_~9d_o~F>oK?YGRxvAxFmKxHnwam$KFgM-SSqFe7XeVVY)~O z18!qA%p>ngSfZA5R@D;k7EHGCH#0JTq+WN67+UEY*SiY`v~v0M^YZ&5C}OrlvDXb!M0 z&<3JE-W-N#kJMGDRB3t({KXE(<(WpfBpNARWzPgrK6_lw{X`-+B(V&`ihfJvmbMR8$^C667%X_4f&(`Ly zA~Zo#Cb$%aWzBFOn7|$*%Wa%E36{|?(B_w9`Qk4eP zF`ekcn$@!vH@EJp=ahH-%wx_L^wH2(3Hr_{<{jEuzfawuv|Q#7*+OK7=9uQOeCK^>9@pgK4lGRP-$F~_3@hm2w>wEK^wts@9GgL@O7J84oNDRn4}RQ4 z4&**ve>_lwNlRQ(A}Z5HrmIAX!t_EhnyyKI={_DEnfsQ3 zyyub?)50$>Gwn=@}q+k+ug13;T84i>$CRf3m%%A<>Ge@RM z!P9U966C$~m?MzjM%Jfln6{-j=G?jWV0ur=9Lw9W&j)2$D^gL3+JR#^A*orI8d-k|cFa zDe9@Kza$M&9A-ku4M0Q`;GeHP!@d8~qJLZ5}q@Pp)O%lD34TJ(2vc zKV_{30(zy%i-6wg)tFaqtBHCw#u9H>lCqFP8eMSPZf5#!h3@U53aB*SaxMTrJ~?xm zcCJi@R;ua2Rd#z?UI%JpnLUPcpPO79TtKY&*WtgQD=UGYK7NZ$g6qIV7g><6udA7n zudi>P8L5eZ9351|K!kG$&puejp{@wC5z-{Zo3IPHP6w4Jxiq~F zTzc;3KQ5_Wu-Z)a9Q8++3RMtwb$~|i8o4aE;?Zz;;YTs&*rl-}bfq!FaxOx8UWb9?JLK4k z;lbgsjS)!H6Ysh{SZ{)@l?sVQFcC>cXC*>c80y{w!B%DDW#1B8IOEUGuFn}KjHBQluwHvnqUY}P zxM8UF;7L&XIPt=)%JlF^1YK>WkD~qJKzm-7pg4#@5J8hBILl*y5$tgC|C2Pij~>QZ z=(nIWYFNDiUgmZTFTIJ&V8BzD1Zk$k?DQL`idgT$jHi;mZe)j6ZU|2vat zpr=dvE^^35v888NIm6b8p00}kPL^4?13w;gitn4;YYJk_gWh8cMRq|TGK11c6tCwZ zm0yiHlYDIOr%cI4d~6;w8QB;-PPoD{_SX}#)w*RK>lrz-8QPE_uN$jb6(;CBeDG(l zunOeMInF<~uJaT(xVLk@FpsY2mbpCc)TV^zWxJ^9LwxWUHeL901=p5!lc}7>pPNt( zBl(P*`Wp(XHPqNmaxkE95?dyLX!{AUQKO*>GF1j~-ZPw~E;uc-dW**tlR=FVal^fp zy<#vtY#vlbQ{_ORXl8%Lf|w;zXIHT}Z8@AY*hr_gnbJw#J&887O62bzXd7qh=kbNV zxP~q5g&SZ$kpe-T4L4eqJ*T>41dAgZ5PpD{iXUCk0aa3etfS5A500sHAPy`U08S@q zR7pbv)=i@lj_jZR5Pkxq2G;gjnglC6Y?Mo5(R>c>HP0T3@9|n}&e7&Eht3|=UN>iH zQyx?3XS2s&mQfI{WePGFsv482RsHl3j^ zLVJ(jG|G{M+0Gr&UGPD^ldLCaXjF1pdQa0S&zbOq zcu${BzO4KI4%99rsm^NQqfJrJ)g~{D)V?4WYjb05f*#6^$WnCXZf;a{I4+$8IEzpu z!?bTcvAggdI)MQT{x!s?!Jiox9tWuV%24PvtO@y;QkJ5nGpGOB|Bc%mDaN3SSnkDv&yBjbnK1FgZgd*^Zem*Ilsss{=Q7D z{7(5$MY*86vO4IW=R60Q64uf}}%RSGpk%q=}W+Lak#x8n$_M7GpaKc2vN||00*% zMzNhyl{#?a*iNlpyuqb@8NL+%;6P)|rk;c^un{r;$&X@$=?Uir6N)hUqK5=Gg;r$) zm4Q>o{OO3jHwtY`=)FmEbpZxrJLEmxP&M#AWH9>N0qEIo{AQ@?OhJADV`jEEzQw zU}V;CdUk41LqL}TQJLKki77)6Xn#-~v@Z}-ze;bj#c2{9l^Le*(XV{O`&r>Rf}xCb zcZ)O^V9|^%>5N=hdEKgGo$beb zEr7f0NrW?0MLjlL`!vjnPn}Ochj{JLHu2grWbV%%L)3u9e11hl-8}q8@(0@ps_zZ8 zm3)>2;mghIo5|m}tz_KSi_2_*dX}#hw4}W{vMTZbK7R+^-r7Oxv5vvy*uKudPH8;uS14{%u3sVEE!6^Z9e#YHoFtAAfFK=$pc`H}sqQ^=HbVqU? z+pKr?)`Zr*eX0a0l4Eyuun{pOo^Y|r0}Am?84iaM&WO}323Jkap7Kt9e$I}@A3j1>d7*D z2+tCXBbN!`*RG@6DbA`{Y7v{=v;IM;bZP#=6-g*!*K$!W!{-?|Qa#0z2TFzu-qdgY zD@Hb5n``BF;W3BCHIHpvr)gmWGF5Z!c0Aw_-<3(ApE?l8DyHu6*zO8K--Uf zn97g47UQx1avdgMf!)O+6(5+&k8WxNaF=Om5O5ZmJ(cMcEHao!zs=$eEHZrvEI37s z$GVZkM0vg}(GAI_jgo3LV zt!{Nq!3t8xtAKU9Lf)$|YT@55PFc70>MfaUT?w|Ybz5m4_8=H3Om0UpUprC1qO)^8_NphX70Zr-fKA*SR1g6In<^*2toSV zj$t+^dN`^KSDqzNale%2l)J?68Wl%JvC3~rfb0CPHy7s=!&{wD zi&$0^91tH0GB>wEXMFMVwAuceMI1)^6g>%Ef!QZ>>yXzIq;L@UzbrFNL+#~6J;KG_mYj6 z+1WD^JJYN2+}w?j=oCL&{5k!J?rV`ftq4v#)cR?@GM}@Lgh>$iC0IF!XgMo}z|r?s z8`$WmN{s|7In5k_$_)3)5vU${E>?I}bRY7tQO|Fh2cd4su=noqZCq>;Q zAx^O(iPjpPi^ta-hAeCWOdeJQFby=@>flS@UU&D_RnY&2XM;ttw&1y}THarl za?4oJIyOoN&dqwV2#eenLV8YJ_q|O$x*u|E6j2ZCoHhmdT$xnTd5aQ z48EC^lq<4V8wT!V(Kp|LlSRL}>c*B2dGxc;GD)z3@kCO4oyyu-ZgKVYiA`zpTL0S% z*Vna4U%$Oj#sPJA2~{Sr`OJj3whC8ytYt~?dXNb0;UyI(_@wNj4jC-h3hz$Zq|Le>Q@>EUBmfpV#zJ96vvu~ZE<~i zi8*&waKqDqNT&+x2d4kvgw|!htA!yjJl(}K4vrPhG=XuV6x6S|mbU?Rzs-5#a)t?) zY|e6ZE}v+{>u1Jp(5%pJ?JV^JdYC?EbQFs)UYWd-BmnPn7Bo1$hLc*9E=W1nT*`e zOGFZI1NXhWPnte!B&{{t#W2fI2IENei|5{m}| zJ`xOx8k{g;P$aK8T4vMRBw52Ib^Q649N@lOWc8O+d_M>=`r4N}u zKNU8J_JFLoM*q;O$JWub^{z1*@@@|HMkD^i`zlmE^Wzgg9tYX&wNg8SCpkRaJN^s1 zMXo-wFv2IK*x@jX9DWe-a!m@?O3N)0?R1p=D4AS%gN>`2>nd8bwDa`xVsZg14($3= z&g?J_xhB-&TD)s?JlgwwObd=eCkH7(dSsXUM8d)taU-O7ko-41^*RNMJLDd49oDm! zlMSJJO@U_SBbeTvUR8om+ThM*a1&wYCutDg`MKH=Z?Dfkss2 zJevINWQuVXQPu{Z*#Cl=F!9*;f3nHxEjLtNI9oamQ>sAoV-|^^rUdsMnwL?niq@8;|j*&MTsqNfq%Wlyl zrJ61o{(1CsSfs!*LK|0WWJdSw=KA*ACyYbOgDg}DgSr0-qt8z0+@&cNzBipihh=%1 zilVQEC11&Mj(x!N81IFXpq_z3Hx`r2o9Wr<*Vv!^z|(kz8K2bq69T8v4s2C#ob^DVa}K2O11u_)>`3ZJ1%qBC9IqEG z64Ccb6YK@9C_+zWAS^*CrD$fQ7H_|L3?HR+to@_hQ+BRUvT;HHczmoFr=J5J?8sGp zWi42tHBo4lQCX_Hd?VV>I~HBv*&~&w5%}IZ(f?3q~!{<$#Z}t8G6C-m@|Qz3W=T;vuChp#6gUS zbHe8YMg=Geat9mv3#{FOF$~YHFFChhD5i~U7E4ikZ#wBSFmC%-0`FU{4OpBMSI=$p z41*)i&~|kS^{$y22by=16Pui!qQ^R&`H8L*AiOAE9o9sjx)%j#sK6~HT*74sT?K}c zFE3xP!(X0w=}Tp|J?0{u-WJ+?ToGd_v$*_Bp-Rf*?d$E0r_D~5d6CYC8!AK{vGi>t zhgThrJu+6I%e44PvKYq3l~Gj1VvH!YKe`>VX1Sve`tV{_35O6sc5D5NP)oITCnR51ArC6&3EljhwBU$&rNVf5y zQ5O0^n#Ak;xt=@N3R(AdHs>Q5q?|>BTHpkM;|1)|I!m)GUi$S%ix>o%|x0b3{VNTkbKt7OSqPbwa}`T^23& zm{tq4l?m%W-&NX%IK;ap<(vX~h6|*3iDs)+8)&M16G@ny&S_V;Q7WMEm~+jSJ(N^e z_VkG^8UoK1-G;9OR%u2Q=LE7Lr@19`k5L8x)g;<~k3#oi=T~^jSwi~q^Ld*54)XGi zb*;;a^2YtPm<|UjE8@@3l3BbA;I{AYqZeiWFYnR&e1WL|Mp?v9p-94Hd_h91=%CJ; zv%n6^r1>xke)Ws4Z|^qYDY8=D%xtceog(6dEozAjj*l&K(wL`)RfafNcR=fdIs~!{ zgMM#-g*bcIITdob^IwrJnB-V*ueFNnBbJvy1E*jW=SkgGo*jz)yD$!e+v~G>HdaGi z#A8lE;8T7pjj^7UF{cAdQ{_6mhbvuBu}e644meOkv*nDD`ICv`OmZ$T4%o=q2Vr;o zh3&Hc^Z&sEyZ>0dv3jg;=A)S#q_ol&DeCDW{IR0wyu9*xj?znF<-;g@i}X^GPp9WY zD=32vjpi2Ted)yuY2G0!A-4kaAFqJ<&(qltFvL!kng?8T@x0pH8QAaj1j&`w6(k_& zr&gvF6cN?9L9q-ObsYwwwC-ukG7L@r8^tD!(?Z#M%$W=@_Mt-UejC(n(me3Z zbFAhyx!a0I1m9CLnF9}DfnFTSC>AV}h&hS?G%6q*9-_HgH{7;B=eD~t=`NOeA(w8_ zU_-W7)#9Lr2K=A?Q%yerv>f{601vebh&cTU0p~gx#o+_EE=J|Lk{VTn8W4uqOxB?9 z8$Q%f3EaeD`dxHzVKtdA-pD7}3h%)Fp&jD;8YQc5PKMPxrQWNl(fUO8>ipP4%R)BS zP=Q67i85uB=Ei4-QJ0L1lIoaP;r~E+oY;rsAC~#jM_mAf%YU?hNIb>_?!6lz5)VL` z6o%b_^D4Eg3`?oxYKdjhhY$HjSSzEsfFp0fm^2|0AV`%6JT<7KX+lpWXP>Oqj~0s^ z96VTUU)JD5Q~Yc)LGPeQXKBpDi3F!XkeOL+FSYpc+Om5vMBRuYmp5Ez(tAULa+$KB zh@-ji7plTVJB4FCeNi`HA?XJ~9_{8?eB6Mq$ERnvQ!z(No_P)9pgY3l<~h__nk!{X z{qy&ux#CJ3GW$j)8@OZSe8*Uqi>5KBfLy&H5@lPcJ8qjoNFM7e)-f6><(w2VD6rPT z^eyi@J&BfwjWAzMt%PG~0|{Q(+av}btHfbe2150O1ygRRj7oST0jOI1y)ILxop=J{ zbq1@Gg2PUT_K%t<6q<_~G#=j}aPZZ=`c}?c)|PR3%poRKBq3of+G(LwU-K;X_3C_i z^PS31WpygT0WO3ZIB(XJ!$!OK4>?V5#E4z4Px07G;j-oJ) zNRg^w(;Xj{3Ew?7vwUg@kZ3HwZP-CO>&8|r;4&N8qEc;OY`Bx>;M>b$96Vj7@FKLl zlE<7$TwGQtg|u+K2fB8UhE*AtiA#WvSvZkvsU+g=jIOPMf#6NZB?fs9-s~ben6)^z2lxtuX&-vG}F5 zSe$m_27~$UgqypLHcrtpCSk!<7ciH)RH(5(`4|$6p29zUVt*1$>iBP1@ctBpaR{cv zpD4Au%=!Kywdb1P`5MJaDyZEoVz*KWKqLCGLCK~0S9-VeV~3WtN|y$6ASiKw0tbZJ zh!XZAiqpGSET8)@__PZ~0RK9JM?_xhM_s>0Kf~ZhU^~IEiR}eFM{x|_nuUwv^H!M^ zHjwuYDK_0BOmk%FFMrKaKbQ&L3_}pt+Ys$+8BtaK?iRSjXIadn*QV~ZY}+9F(=aOU zUp?B;w*EAMoA-sm;LO1qUne3GB14^vns5npU2>WuccKuZAQJDf(DE8??S9PUn~n=`AVC!$5tpon{CO} z&yY~4UAE*tym>a8KIzItncGOu?@-+*La$19ONL&r{UBNjB}t^;(V^FqG~>+00U84Q zJjPEH33zZ)k{|0mcJeCwWEj>wsCJRpO4>H*lFj`13X#+&48DGB8A zc2YX#g3AWAh6bkC5MhjPwoGoW<=dltE7qgt_AJ8H4NgNUaoloR@Qc8Dw8Aj5MTw!| zK#5bL{ey}4YoVfQAeE^emoN#uk@iRI11rU*Xh?(_IJMQzh@Rfa&6ic4;6;!xqINaOqESA+urlw`CX!T0F zxLWV(AeRaVOvlsnix9gQ<#V*B8hUxcR+^4ClxB7TGLDXiqV_@65{JQZLJ4INq=F%L zn?waig}^QI8~{bB%+UxY6Q0&i=1RaM9*bh}moH!DQ4qx8v!8{ZBf%*&{0z7A1^&l# z++X@e^cA9!Li&|TP>NlsrxKJf+7Y7GH_%->`jY%tKMU}QO=7>mJGA_%^1-(e+32u9 z92_6voBFKzx4+*^mXTT=rc|xPpoM)%3GtLg0hr^^z~d?C$fMr~UPujQwqcF%n&o{0 zuFwwLSJ1G71#B>RsX|z_y_W#caQCPJ02d`kcQ}^`{RSUAMjxHfN7wDF-FeuW#BgiE zUq_k(T~|NpTH9|)&_e5UBXa`x_&f*92_*u8N?1TQXkDGKUJbjY3`dG-xU#&-m^so!7AHhmqLAeT{p=d`JRj$qzke~IrXwMErMhx z@L%&CAH5g%m15$-du;2X17<$ls(O`i!EY~%NTT+=wNRqgYk|jHgG#g0;QNlve-QMvBkQDbAh zX~97qOit1Y)2{tMqm}-91CG<;F-?&F73Mu2_-Kr>!Z@r2oVe>$I<^v#fi-XA(orcH zw_jPdh{zdTxVUoj{MK6(mMOe%EsWl9qcl-6@ZOCtjWf~JMEB_CUfc-sa zqFu^Q?(Lh&5~7KlQsJZG0LLbVJT2G?24zuQKHmze3fo7Y@;9|Z1Grz z8Mwgaj%~wRhzy^VlW_$3bO{4)i^y z^KgQO_MiE`mEjGqO`6^=yoP2(1&zH1;q6$y!;{C0NdvDfC( zyqj%um=-j`$b-5f$`~_|z+1r1;*EWJ+n|7?ZR&xjm6Q({hT}L- zo3Sbq8>Bl{#imJ8vXb{sqQxUP;AQJsw!d4DqRyv{bUy8N5vP9e)lY)wC@7R?_mxzw zsfm4v685*RU$ERZ<=ld7N#A1mA`DULA#Jk9)hUE_%&ygVH+AdWaal!ZCPOy4qO{_9 zY!?<7jWdsJB!g+o>t-0z({JZdvV0M>!Ka)gNZTNyfr--ygTH10uv7-Np$@So*xstr zEXi+4YhG18Thp*~%rz|0o{9=!)0f8q*FjZ53m#D6Ux!aZEz*?4`zIJh$Ba&38J&S7 z?gTWiBELqJ26bUrT>5+TaJ|Ej7XXmOt&-f9VWkn z6*3%O8)g}zEs{W%3C+nc3Ue%c@mP%DTb??>egDJVD-$GI9}O^WLlZzKZ1f-cRn_sm zAh{-&YGsin-}KEY%L?8HgF~$agPKj&*|w6t(u0(SfJ_?aA=M}prji1@qcK|N zRB8Qff~(NAqQG{ayq-o_sfS>b)p1!h;)qnY*rH14*obV5zJ}nK$WWXP6T90}X(D;- zEu$KP-Qz>Ek78(kUFNU9`B5Q=09qqK)`tOFqWU-Wya|07Za_zB%;H}1XCl?U$|hO* z1gl9{M|Mg8c6S5WV5aUmkj)!MPJ*yC@9AJPsLZxzettspT* z)?i@!4{mj(Y>TUo^4769*kE^m1u^-XpCu?wN{C5<_`pk5jXyDEV2Pd6R*k$h5V|f@ z64&a(Y>JvC`FIAa3l@=DtDkCJvxK82l*4qV$GY=v%EUvE_?o) z816JG6=jJFEF%vg90;GAFmU3Ac)8s1TxFnhZQ zWi@E!*j&CZ!GL%GaRW>D^5L!$wv{K6QC^38w$!YP6B9!@I25VFl zkh(UPb@*uX(PNEw$R-?JUVppJeD2_R!rw!C6$ax9e&Z~-%{8NhXoEp{yBY^BQgQ_ zjue_z)ve0^ZB>#L0)S0QQ0p?AH1AH3))YoQDUK#(Awt!x0BBao+B*~_&;Iw3C zf$P=ncTuI52I*7$1bwT{Ka;)HMKb2F@?Buc#$^S#a&Y|dcx=~zpG}?)m`}Q+9?J#K z-_g;*n*ojcr#6lc_sl3V2FWq<23}d=I_Rd2P>;Dm5w9adR+NotKzPmE%sADpZJdC+ zHMvP0$#E0Vv;(1Hc#Cq3E_Bh>8mv@b8eqPj(9XD}j0Z;jhlgqt!#72b4(vW;22bnD z8eDOxi<*TMYNtL`3q+A>>XR6@3(|m;9a_$u4u1Fsipd&7LPkEsbaP?89)33J?54{g zz0wsi9MF|>lK=aZ7e@9a_{W}+%~XoEwK9@&E-qLqZ8uui6t`o^)E476{I4qHd-Q|! z`NDr$7p0_IYO_nOv%}bM&tqoLvRhJFAw<&Ia&G!GzfV%RvVyNg`gqT>$dD!^pM=?O zVfe%8rZ{R;#NpZ~+m-S;Hi5!v^Z?zI6bfLJ$p#lB3gjC&O9u3lz*Fi$6eT4dmGQE8 zyjm6yDMdhKLBQCOqF7;6Ate{_B?@c{uUs&#lvX@=y@opAHkK2UNU$tok|X;_nd(>Y z2Z9?YER0NT4R{G<={72Il!$TF|4(F?OMG%`G5J4bVKL z$=(*CM1+ybFvMSlSGPhI$%%0^-96Ti!;*`^Zo$;3W#KamX#Xz^?Q6S&_TN-q!$e&zrw4sPU5&0aL+h3DI)}Xs zvNpw8fBg^JQ_p!An#Bs3DrnjckA^+H^@cuU51KZE^GtS7ah_de4_f|C(3?rAIy}&U zsnd8T#B=7cZ8M!A!qT7(M3j`%NKuWAvAx>T!RI4sWXz)MYm@{hSx6g-ALmX@NXsp< zz)T&x!xP&SM-5l<-MqQhTjIPuZ?=RD?EUWYhKEjnxz*e+U^{z^H#6S;E+_yrbiE}7 zfB`ByGBIU0(d=WFe>U>J|MH*w({yH>(!Hq~6KOujN7F<^eeTG(L zm^h~DBW})RW{-A{%($>YGlqx<%zZ?nx$2@HIPdKA zJNbro#gB2o>3&v&8?a?6I%MDL!aFjH^GGVE)h?XiHE<)bCHfO-vJL=<#Ci&N86bCE1F;@G#tvZF} z?%>#{Ii08fj^fxy;ebhsNQ#p*Fmpy$y;c7hrYYuAYcE{zdxAlPX~vv=P17HJi3}A7 zCMPl^RK!kXWsLeW)L8GyF}Q5FH^4yGo&l%H{X72^IiOR7X_-O&8&poFDsvv46KA5_ z3`caeE&%N&DPU%+Qa($@EMwYJ2MU+gQgYz(X&=u3r7R|vcjCII1rV|W^{-`Al zv5ofV1O4O+oU##%sKA#}uI8zJJz$MHRN^ED%1NG2(RQOEAXot4IinVbn8X#FGcNc# zYEmLDC_1w63Hdjypw^Sg$WcQJjkoKux1fa{3=P4B@5C5lrdBzJ2qh*ngyoYmiU8Gi z6P~<6=Dcm0-$NO}H*z%BVGb@HjHhK5a%B%3u8ZfuY&%m?y7p-s)Ew)IC)yeiM}yl3 z<}!!1N%!}r+_BKR_Sk%Z>pWOU1=S|q-)|*i*_@Q|SiQ*=$jP5^wA!_|;v8^?ol%HX z(S+HBiF6Y!(C*qd)EsxernzH|yJqQ8r|?`|sHdc9PG1vF60XH*JeDCmKX`@xDVnh* z9nKlp6qM}xH}O<8mZlka>`kO;jM&k>Rgj2YJ=hGKC{~!e9&<(VT!I_6P2z_`4yvth*W=kM=&}dr@ZWGDyvtg#Z4Ke`L>(2n9lxsBD=uK-83| z6RHKPJl3wnZF+lp)s1vRQ7OrU^c;+}oyagCI3pJB&S0sggk{(1cK;6RlxTA0i*N=m z?(C;z$*4hVOMgs#;lfwW*x1;e=l%i%hdDCl1fMA12tzXQI=5>)&iQ{g>Ls?7orvWL!4EIC|*w19boi<3XcjUeaPuPZhaBZ1CtZ&wS zy3&C*tepEsnyJcBz!dBUFf1QO{!L_wbIbqef)^EUK-#3zz+0$%5 z;eD9FY&sW|K5Dy%1DN_i+MnkvDvB_nIkg}Ng(3jr8cRGEx@k%Bm@Cezsxoq(-#9+% zvL;bLZ1`C_A0i}nn}@#^^Y{Wh4Y=%!Xr)>I2<>xf|HvBL`x@is0v3MP!7fE{%Q*?a zZMfjV<|c3E`;>bjf$Iyw9%W1-tNrcaBvfh>`Kwyt_JOxacyt)!qrKq}%}X`PiI?g* zI8SFkz{)sPlq0_zHAfA`Uz)>+C_qA^tLu~Z&4*f>wYN0wM8tOD>v-iYXCx3u*krx7 zLX&bvCvigkNt#fcJdjj7iHa#pY4zuxWMxjF^r$1&(qN_AsASy;<&=heQx%w`asQBy- zc5OVCQ*m}<fMx##Wc^fjC zEwiSYz38C2lkSBL1`4OXcn)}O!AxXfIG!!Q@)l`2o?al0_(sFB6np10&f8=wxldv4 zX{lONf8iXy##&EOFoDxpsVy>Z1JtUW+Tvygpd_DA+$v0F zHXVj;uI0b*+&SV1aYI*P3*4D|dnJPnmT`%0G3le7dFU$^W-C1Uk?Gn}ZQ&%%qnE4u z`#dc4_A|Mfp8XRUF`>1(X7Ekf*JfE0_lgz#|7OrG)C=KsC6!Rswz%9Rte(6*bDW z0p>B+g11_`*D^PN(zdB(_LwU^v4J7H!AX<3yn=jRzk~?2&7zsoFW@UQc(|Fx_t;o> z1D=H|n72`f)tev;T%aS1_T7?rKSOBf17;O4YV3>>>JSTpHhMiRCT#;M)>6!1w=8He zd-OBdS4}H7&a^2c?UweWXnm{Z-GWubCGVE^ z296Yll8rS1(d;=2kW#^e@lMdg=t8jR*lr?3dWpV>oIj37Ql3$@+uoVV$e{DD_L$m< z-6Ceik;@HQq6!9A>PMuiwdts z1!&?+SdsSU)^ua&s=&2>^^3|?tPvOl>?iqE1h68;3k8KNFQ)R07P1x%d*!jtlzt?y zHsu@`LR7OXzGfeN8{fBYhHlSP^9o_Z2>k37`6I7YZ7po0$c#~NqHa-IAx~v+9;foI zb7_=yAzI-bv@Q2rhm-VJ-;5lQbs6O58d=OIOU7Egi~gCN446Xpk%awBk4S9sGtgj3~qLbika{>tklK&Tt;;>O~rPPvQ0 zAUm+)_FN3mG0E$S!CbYkxwbV&3cZSm`RUadYhCAB{sCIp9DzUB$XCaALc*Y5XiE|X zO{8{I6^1xZpDCfNh}2}mPfVm%6AU}DB*=EN<&#q^NM*E{JTkYLiot;*V4OXMa}hC6 z$ZHg2Srksg1{k+}gK`$m309&66Kjv+NV$RK6``!Wo#`U-4i0>&3~@V*V4Cvfz@u+iB!* ztZJj%$+yJ6i*-=yYnsNe0{e|9Zkw5oZ3UP$OSxS+>ITtPm00exiY56sjMA0Wa%AJX zk|?PmYEzu*2R2XJ_<->S23LSVAz0du4)=c|PW2;&Ly9uKm0!wiq%?ZNjRwxA8C!If1nw9 zs&vVn#(S;lqU&^Wjx%ICB3=>^P_hLB%$|G^voVShaaT(D?p%NSWK4B8>e#oxd#tbU zw&g*3ZH2exr4Npe1{7FQm5$$UCVyY1g@5t&wa|48)WklZ+05qQgMWtyKMIL52Hz>G z#W%~7o6+7iVWY`13wPj0f}ZLt3Ng%45L|vm0$@^OZ=dpN#xR$9V1YU*H|qO%y&pUS zRwCE+ai$f~YzaH4uL}L}Oe+3cGwn8LYd2XeSK}frR9$Lm)g&2@rd7M~vxm@3LZjDG zW>qWbBpy>}NqbpQ{k8=@HBE4dc*HOXrpx>L$QH5UOG~7KH z=`Q$reBm$9>Nvf;3FnJgY=S=tx&#SUdOAYZQO^8(G`;WqK&W%W zkHJ(a@(M}rO4={6ruCwWZQ_m-@t5r3-7<%>^IWG9)=Y@4K#*Eef7R|9N2*6rj6_pq z>8Vvo#=hGJFSgl&6v|*%&LfM4N>|C&ZXZ2Frc7c)my53*M;z!f)Jc{W=`4-c1QKC{ z^e<*yWtwXdNKEoGDenpEKzS9eIQs6P21q5P&Em5l4oe^jv>(H4$75VGd6p&#ymt~~ zfs?5u0vEjx9Cu);Be&r6>Vo%imP1i$G*s4qo5hvkyUrSk)kQ76nS$$ddPjDM-x1Uje&iO%9jzbI*S&5%!)_9z}JV= zQ#nSQe7@X#SmsMuTVKh(`uLe+7FMc+y$$k`EpZ!yN2C?{0MDu53^AMEUcRAB1~yRh z$_6a2=u=H`=Rz!!I3qWrUs5ALk3|l>!qfBDr0`$#QqAL@_!Z>1M3Ayt?TG~RwF_8z z4dtp6RxlZSxPwg)_r>#Q9;uyCu-M=ThC?(`OKe|U>^c`q68ypI$kK15Dohy?RK;Oo z05ah5tVQXy#OQ$`w?TifMZ^%}qtR0se0?>^a8%>(&M0LjJnf=2TwbtL(1&Yfu=@Pj zqBC1!C;MsTv6!yCuF1ed=^f&*xAl;2>_5lpHCn^G=8B<#pimb*lM;HxT;FG5_n zUv9z-tS3Jp_7OM1|h+prBb)Tn+{(9l~0*{K7y>ye=p#cBWyP0<2(2MmSB z=CL;hh7Na+k5@8Y-(F_%GKYQb8V1;lE_f&>y+U~&%kV^&5ijoMH3!q7uJT>g2oF`2 zSp4(GhIqiJEd(my6Ra>e@&>zmqyG4gCcxf~Dt!E}oO=(@pS&_iWG;gq>yI`wLPQ

%mBh-$&m`ZK zzPtk~Tj7`M?J&JEdef!Q3EL+9aJD8fvAPml`?!@n5R5S3V1@(1H#9tP2J z{<&!xme?~r)}{<=qwDLjF2LnuZB0lmV?PEv0Gy4vhR|p^{?U*xQa-MC+@NK8+gfvQqmz& zK3U$ys9UN+H+EYk+i5SddXH?WyQpWDL#|6mQ+AO*T!%P;Z@`4A06DqPb&@0hEg8x* zcbDeTJ3$$f>)U@^rnFMgqVB9BX6UiQmJxt%n41 zBz;c)>SxFVZKK&%j=mACT))g|O+kdhrkCC2Bu#e0mk4b;=#{947&-Bm8&DecP_4JA z^W=IG2mWG8*_p6p1$nI64(ezHZ6rKwt#nH79cuye9KmGys<5na&^4^rymLdYU>jNd zJ-tWskpe@83ogA$p@ajcb96iuKzv1n69&r(W#uO_*K5$*!D^_Q8E@|sw!zinu_zXQ z`SN8R1wkA>`&syzv+#z+@H5=Y7x*7X34G}ZE7zq)l?X>=T{y*zmAP)KgL)Js#|BKc z<#{@bh|mXfZP1~=UY?#Y!_wbWUj;Tu8-k1B;h-mZ%I=Yzd-HIC1o;nUmR2;Nf5JsL z*_}W8u}Eo_G_2oP4PblB;kbleD9#kVu9|5KnZ&>Zd|etNSIco@l0Blj=#sc-hop2u zwpYcq&Eu0go7RkMyK!iFC5N~UUeG^>SgX{bmn&l7rrNFVQ5Xlgts7IJ%-7N8A_QP7 z1pHuEdji*K98PEHB0RlRi3|LQ>mV+VS=uadWK>O6)=atD8ceOqff?k0v44|=w(aP- z)N4?c-fKd>az#g&#{ovSb#d2n4tJQM*t2g`HB#o7KkI0Y5m_Yk&aD$$g=cAH@R&=b z%U_V?eS^td>K!{1CJj5y{QIJD$k)1CQXJb;cA8KYL()Q8M6y=_o)>c9$dKbsqoOJ^ zzApNEu|c0qPSSRKe{vno(zFN?l=?_^3WE^dl|Q>izi3PJBXB>4rV$oxY1$D*9_KMh zA4gQ#Y!h=_lTwJa+;IJw;g0zqbN>s%yXAvO*a=*^d&Za^TV2Oe7L{csXT^_iqQaKm zHI+o2)Uwc&>{-i#zerLUNU8J^nGY+6}--Aiu_4=Q88H?-EsR|lSp%S~k!9kCT0!JbmeMjXN4 zB9ito<;!0_6PsLcDHaZD}R=G;b)BfM}c0p|L@xKHXZ*-|NJlSf3dIMaJHRXP5uk! z-LE(7>-p8m-=O^l%eF|v!=JMVx$nb}<>`Ha*_HpzUehJ)2nhp24fA1qo<{{rS$s(| zHcw$Ry<)=>ON!xoXcH76miYY9z&WPE*dc5-_@zGjo#>&eyh+h^EH!c9-=o}M)+ zYZl-%w5cy00u;uP$D>>a&3{Q&@WxSN zmkYV39NB4hlxKeE2y_?z0iz80_HaDQrw13Sd43=MbeSeQzf8{A30&b%A4%f{e$tpN z`0M26%z6?TTtY;5WGb>fxB!_XX$3jkm zb`|&fnFc?9zjxtl`6h54+CvsA3gL~SdNGVPQOODoVzH3Zg5D60<`h~p{t*fDXzZfX z{x9(``2;e*%7+~_SuadKEE;ieB{e`lv$K2JY{ZU2BNAWyC*BH=DSRnHoLu(6h6e;E z;6szUgXjnUGOp=(0bH$cdZ+Lx9;AXxkiu|OzYL%7jAz)6;2=&Zc+?05Fa4q%8+4~4 zFDdR4UqV17WTh{`)lm3}ux?yl25(y3GCt;5ydKqaxVJdO+IYPZ!GjdPZa+J|dcyym zz^c!ey$>fJ*>HDv0LyQ}E+U)=6r#(;z`M=)i=cIblvF-feSc(+SKJqI?BFTLq)7Y_ z0XBcTJYBUe@GqMZ`8SOn{J~8i8~9F;{DsYu|HE0s>p?>&57BRaR7|2^6*HudISm$9 z_$~Edp*46XOm9{ooo;i@9rwO&xD!C@ecil*ebjY8o$xL@ag~JQlk?LkTfKdC=w048 zhsd5zfB9|o&h@c(eCywm0E0YrcP|dV%MP6Qu!f#T_aWxaDppDJ@Vo2)%uLKzpC$Kc z<+POt-`mZfk`+MUrNl2i`qHuYcI#qj2e=vidUJ7p2KL8G)!?_!cyHa{st;p%#Ogrl z*t@%Rc&=xs-wA_h-?wq#J>K;7>7{~}uiUtK@V(tUv0&HU{mRkzcKh8cI^M4II{PH*7KvxNO8vZ&J)&Q94! zb}{}hXqTD{cxxXW-9CT56^1UgH^2QP=id;wx)M~N9%+|de>7a-a-%Lg_|FxePB&?f zsN5#}4Bc#5@6I0Kb;v`s--4(}vu_x5%gaM~-HB};GJgm>>a9o1NBgN8RzCH;WIj9V z_%CsK=f@K%u~QKyD4)@FI+QwCwxp?-m(lEptM0Asnfp$atZV9yX}PKf)1;!t6+??! zP5b3LWH~ z+<6$^e_nwJ;ZLc+DWZrPlhI4A7n*@KO+V*d0MDl@0jzX*^o7dR-7V@~B=<9EvYsX` zZqlmMcX5-4*3&dg<9glZ$nk9+S+|+HxJTEwsOq4@=j-5ozU+alW9EzARUA0FQ`b{R zF$2NFoFYY3lscK?&Y$R>Vl6;D6Cp#16zE(l%a8M(^=x#tr#<6(J#JRl!wTaf_@2Hq zAXLqrAC~WYiaXBDyMsa1-2PGd_P=p4qo3jC6~#Om57~8^zB4>U&20~?+Xh=VSk8*~ zmxivn`=NFBqOj98cHZ^ziRdlA&h-wHTzdx zyS?cHYwTlk{->F?C3E^MmiW~g_ZBv*(jXKnPg=r!?Y;LOQk>4}!xm@pdd(51y=^2}_?9b{>kb~R?nrOABDwC(@#^b} zlJhWmC?3Td(;t7{mp0hlExonBh2cWYyq~vQgO%5JmRPHc@o~Ga&w1BY?I0#Cxkc>S$5Vm#d59pB z+w1eKV+H!%@3D2iCt+4_cz1ZJi3;9z7ltb6@f1IyDv(R4z;Fu-o4#98O>_Bh58dDH zvA*5!QCv91O>JQ?C~Pi1-Za}j#;)Jp?&-U=HMQUAa2V{Df^wAMIN1!-`FS(r!+|jc zZr$=^Zs83LD-8c-UYJ=a^YhQU)eeVc)YTNO)W%74bM}>NrttKmfkMcBooD~Tov#6F`8kidQ$ii z)&^rabx3u=p-}strSnA=iIU|gXB_dIi|XPSe@;n&xl#9ZQS>xaa3A$pF8r5hzMRwY z{*G6Kp%Q~G-}yB9_j|wfm9Z#p{Ul9h%PfO2^&3(@Y@f=qSvSnf4oBFV18w0I1#E_Y zMk2hM^gMbpi~=IZBNB)ws(sHxm_Pw9d!HeLo}?|xTX%p8^N@uP=Ub1u-bQ+MzmsnX zS2YLrNbTFvzzPNZ=X!V_)Ti+~c|%*>MpX%3lAovPj~mYSA+pcB7hfZ!9oV_^ktZMz{S4kzsvc?# z-%>OoL{!2)%={#X0^`+x?9$b@a%M-Wo-fI9t!~lz98Q;={yDljR9(f;M2oJ{;wic+ z7S4Cx)jieKvmmBO3?cX`It#r?Le5|k!KqBhi~k%Q9V#pz_Vn}|*4HUaz!tCe?3A<0 zC-z=(uHXb;0(;e+YvOQx@5|Lhykf*3aK})4Lz9&lj3H7-*AJ5PqK<7-#kV{n)s7bm7FwV9filmp=*TsZoF4YK$EWbku4Qg~9hM zox6QAzmE-apmJbHHVHoP|CEj$?P@FXt3Ue@Cc$)he;+y4IC{GpPu-m$A6lUKezkj;k!*>+dIQZ+CyRsShKo4}zfYXX!=_ z+F^I7ttOPLaUNaSMq2zp-}AR*mc0`2dmLjaN%82|zuj)-$g%E89{Y=6N5MdH=k9D3 zKD_1K?w9U%;UflN=k4wlDZ~v4Be-2?S`kXBy8q$lJ!v{Tpi^*lZ`2R;mJ3@~k-+|w z-JwQ7nCGJ4=@ps+>jc!zOPBcno)Z!f%)@XT=V`TQRMnVk`VjW)hj4p+?x^!{rT5iS zxm+U#q@Orul^*7QvRL*+_TWF6Mu)(M3Zzs;E2hbM^t}wZfr+Y!(-Atub-iTAQz2i1 z@;Vks?dIm3eK2u2A6I9sd@S@RiXyh|dIQj=;XBhIX7LcV@!mA4vHD<81*Fmsjw}`qp%|z_`HD07G_iS3`%A-BKh6~j?GB=XLe9=$4<~KMGAFm$D44Fd)vFae z-#3$@g9f77Mc0lHj;{^$TooPR9}@ZEwN8Q>P*Ms8IvqY|bOp|@pT+_I2<^FGvA3SX zIxuF#{>yZjAR7hq8#@2`m+%OzviY_NLk=1S993f2RNmx z(WZG%p|OG<6lZFgMbr4YV1F}ON_9oX5sN*=`3^TIGrCUb`8avl7Sp8tK#fm|%Ft*t zcmR!7#hPcd@keGf2$C|RMbHbGKdIQStXFDKM;Yz>1+HGN;#7kU&ipl1eq=OSwLA9H zupib+ok)3T5S+rk0w-Q6U6nXW>^4s4QF<4|d||gqiU_+E`4sGjDwqfC({zEp-ENez z8wX0Y(dG={)!^=JJZUdH#LN0va^*O8d?Uk>;c`KyPH*Y~M&Y$(khhcEB-Mm3u2!cPz_*!@PU-iYhl)jkwo=la}l z<(rXs(_OJ_&3E?rrD z40=Df5$pfy%KGoVH%7VbBqizDE%wI!?Hkp$5}N7y{#PKM&bu)(kT0iG8)`+Fkq0mh zP}j6?91Sl&5*NnU745#dX@X{y-2^T{k}sfLM8wQv2p@-)!frf)1r|k%xyMpI{UR(L z)1Y)fMDolgSm0E>Izrb=hq;e}{#&q&dFU$0M_)P3>z!`EXQCOKc@jr&{%>8VU|E)ZS-Lc1u2ae{gc&b z3w^+XF6d?h2jG6i93JTvQ~DX5+!nDnEktSN7D5C2R${x|O^$E>d3Js_zWy(@^6BJw zX;E-~;{$CWt3^zwxw|{MmC5qvx8H&_O!^{GuGeGNnzL)~^*{dj^T(e*{`~Rhk3WC> j`Qy(YfByLM$Dcp`{PE|HKY#prkAMDuewN4x0C)-jQ#yn~ literal 0 HcmV?d00001 diff --git a/dirsrvtests/tests/data/ticket47988/schema_ipa4.1.tar.gz b/dirsrvtests/tests/data/ticket47988/schema_ipa4.1.tar.gz new file mode 100644 index 0000000000000000000000000000000000000000..84de0e93835cb23d76bbaa22a753ea407dca493e GIT binary patch literal 87335 zcmV)2K+L}%iwFq*8NyTm19M|&Wo=egQE-@~2VR8WMU1@XMM$-1z^{<$cTxGjT zqIpVoQu~1>DVZBe;>#nk_ZtWdNkkyP#UY8=+W-D`&jHK;phSw2pd>^pWpM@=Jl)gN z*BmH)dh_g88LrdQGW^>pmrv@-zwz~^QmvnqEA>jba{8uRsZ`4KH>C0W(B8a>g3#~@ zc@s`N-!lGKd*4I#f6}Y;>7=~$0{iax)^>xHYaTD1XlA=5$FiqS^bYLcdVOvGH!4-u z|BchrM!D8#K>wdq8_@sdCwl+A*WbMV58ob=ubuPmC23vte+;_k-$vwX@2d3+l#k|i zK>EHn^Nj_8Kc_yWB=Dx;t>M$3$;yj}X}H9vmK}t?J&8g}?2s6)^~v+e!UG9fVL{|t z)Q9%MIVB?$6|g`T_hws*88g@!#Na;hInlfJdlvp%0 zY$p&D|A5{J0GDHtxp70GmrQEk04jr+-g31`7dW0fW7M&}OBnbwv8TlK!Xt9)+tBBs zm+c;jw`kDDH#u& zf}XBR49l{4gCHFS36|8f0XDre>5`WZfWB`=$lpk2KySbpf$-$hh6Ann_G}&^aL0D9 zS+(g|lru$<6eOP40J-o7NEy6=$+P4hG`5C+9}X@(_xQZuBQ0p*;3EzUe5Ep4@a@o` z=`AL<%a|2Fd$wyjk!1t(*@(IIjz|zqey3(AS?$;hJLLCs@deCF!#4o*a$X|pd!fB0 zj5#|D736Sx%r&wA6-YeSSqWx%Yx%+C!1tor9RBry#9Io(#G+H%W#b42_Z0dSFj>?M z?WvuNuBq(+6O9FwbiTJb{ZaSo5}KQ$l*oGmFY-MV8q_!#4 z-%56ZMI9QZAA;6EPC-Cn$S~Pspkv6$W=C{SOgUh4C2tkY zs+1~~+G(v^s==RCh!@CZ^P)rETLBKB3)bM+w%(I=XV@a|I}T$jAJbqm_!}b|te|`c zF?}jK-KSFrwX6WG!<^g8_hk43O!#*~DgfXFAk-02{RIB3G)k3f1yP-l@5fibD)6b^ z3kH+64fZ$m{1sSq;Hv+ge0=LMmELlUAXrajwN!%!1(6)qIiivjvL86)!1lWva9$7t zl#Kg?IsUwb=J-1}Ce6VQFu*?mONyc7>K0@ptN+sA+V_-`No?c=|F{I`$){<863Uieo5_3M%g z8wbKF004myym7!gg6WL_QIwuPe7vM#U>ab8TZt=5E)3V0an1}G#0_FaH^3HJo;$T? z#~y?(Ja|%zW$+ZgQ>w5eDh#2yINZA@+N(5_z-TiXwut>IpCx+HveK}Oxz>=v-(cb! zc(50GOL8$Dj^JfVofg>bfScBrBu8jGSEodS$Cu!@Q->rjDpxaLt9@f)J9dbJ zt7im~=YVJkq_cS|1X*TpLRH}g+?}`jz*&?9u2Fd~0ep2l(@@-jI3Q>PFeVmAptZ1F zRNqDjAO&#j5N4#-M5dvPOe+x4u@J9P12-3^3QLu8RfI@w zw8=R1H;PTGc>zvLk4_DSHd)B|6R#&_Iqzy<69W)N30rB?Mz4caJvNNq1nMZ^Zo2g{ zvY=dwCyBvA0voUVnc>=hu(A1`bpCJLWDXfxWl-P|^Efehyf8v@z8RN?-OKY{=lIuV zZ`{eDV_UpZB$hs%QXjNp4@5Izaa_zq*5MwYful9hC^C(Jc4scQhJuvw*WqZO6c1rg z0V)uvjcIK>HLbNYts{Fu|LwUs)P_(){_xyC1Ho&?&6lzu7d(!z?JW%3{R-&Xmy%li zMcS8WsKB85sP{s2t2xE(%ORK|d}_nt{{Z&;)arJ0RWfu9!KOulr0c*iQ6C%}kppma z<=;Tm0k?_P0W*RT3k{8JhLL}HoeLdtV!YwG@Hh!f-hr^kUu4KnLVE zIzcxSOng9tO=!@T#5_Ihx&nI`H`+8!2rj~|rNDRX|3p;D@?yXveRu^9_ygQo29KGc}LQQBtKdIM8ws~DaE&r zwA;B2ZUc+uAix*v9a+@u3J)EL2YnC9-)A{C1tA7R5Xjxb2V9uQNl(m+Qt)jw>VrCl z64b+e0Cem5_=Jodo`y+lowa1SrEI&$@>jY?trL~?x!3qm+rX%%VDy=fNL|L1lcVLr z@UNk9=Br}1R*MArT3Vic%V$E~+@xTQVcM~n!=QS{8^_=>pVWH-$D$`(UfEUpRQSM3eA-Cu|EpC`_xt}|ljr8J*)I{WEDI32C74!IXucqXJE5H@;6{@9u~%GCDq#fg zY3RQ>4p8a@(D#8ugRF(;I|F$f{II3Sphz?7cNS&Tz3Mg>i=zFycf z+;K;YTaZuIt@uKOD&M3)EQvWvHTW?c015Z=#FD-;6>LZd%xC-Wjzm@hS_B$7J1oM| z5MoiQySjy8N*U?#o7l*WTMgJKNHn>qM+Isbz9pwR9s8QGnhkUc5!H-O!if4KLcjR>aMv4HhVuOJ7RWS{*{^FEEN~WeBkG|58IH43y55q4iL9M6ccKKW3S-Iu$Ib z%}<}MgIAUCfYR_nOtybrNrWCanm2}TgT+M&7qjtxfK#b2g|L=`5c4_rVO-%2Am@wa z)+v4!WhN_>53*j0Cp;5nmEXzv>!5i$1iV9!ydze3Bn5@#f~Hvb`;yN3b1%fd^{z@4 zAmNx*VXGp1%z=rg_%umZ`f$F!`Bx#j=-_pK?R=F!)!6=DwiM@?azOXzf2s`_TB-a` zyk|9RQ{zhVV=GDXc|=1qd=Wbdc8%~TdX z_YU9uz|;-+`V|e5zQFS-0Oc*~UJJ-}tu!E3R3sp|bR`2zRut8J$zI_cslwu7fi?M} z%N%T?rYLyvNnl-f>J8{rLzu2CO7dbH<3TSUpt9EW3Bcv9dVLVw8TjyM3wvPJ^HHjI z6a|+ptKsYOFu|t*P1O}8lXwUKF7HA!sNqv93Ik{O7KTX>2Zi8dEp&jX%A!DC1;279 zL_-&s76psR7uz0{FM4rRQ2_bz5(CfE2XIncDCdUVPRb!jii=Xszk>Jf=?8}JqM|qj z;ffWRVMf4|@}i(!+U9izmnRML`?T8yi`ZgHEq1 z(D}3tCkLKdQ53i?W_cWkFXhhI`b=$6Fgtfk+h+*c5bJ9g^{PS#$dHeHoy{agHBO|!(858Pg*y7T{LU8IV)2S*^R-BVS%`_xYR+v5TX2WQ?%%mNa@}i*q z%kXh7Bx6yP^1?Czxu*U_jeE`sol0VEUBYhjQ#n0lOQ+$CT{{$u(=rLb&397r+*~GE z_o$;nO?UmFpcciAkbKJS9dX#XplVi-7o@UqtT0Y~Z9`O&m0FXm+Lgd`;wwXnbxM-*-AUnzpBxgD+zb0Ha~^7??45Ud$j^T3!L&RZHV$A}K_*^g>nd3p z`-y8Nhqzdj-K|G%$xEfhWhEJ(Svi$98>%RknV4Xkt{PCjnj&pZl=9t&6>Z`g^(|1L z(a(4RyNgWhOYngUM%<@U(L>afe=8 zS0ebxwkkdEB<}l9;ocBmT0f)?de$aJd5Ibta`6x$8CM}J7~<+t#vtrhuDh?iBOdNs za7CLGT|ZsAdmr-Iey*F88Fo&XGV!SwsB?^ZjhMt;OKZ^!TKCshSLsvvMBB>q3~Rt2 zIR6_Q*^|`z??$512;l^i0$zyIz!xt+f-@he76W`FeZ8|hwlgGY%I1}fb0N2KK^Fkgnuwu9czsY}H3Y!|ExucyI_aaKL0>~%7_ z7PA?bihC4Sg9r)z~AJsQYHPf@1)-zGm)Zqd`}LlDlS}xBWyyrVjHpjMXdT? z9cvDg^A6ZC6u6lr57gr(*hZHa#@xH5H`G6Z=J;V2a!-zOB839&H<<@XAIK08Jm{iSw4!dv zqk0(+G`-`QAq|I2t!%DU>gE4d8yGA9FC!>&D}KQgpGti#ZPEtTr^bmYoF)|`Aj19= za^3ao`q{0 z9(tO@ewyZB0+E?B}Y&&*s6+MNM} zd`iWYZ_(Jr+jbQX_Qy>vcWKdD`l<2-~Ch%P6FdENKz1$aZ+>I+#XhA*ZlXebVzV0Vl2=MzCl93*j)pwEx?AmjeI zm{QMt5^P6m8Y(s4j9{(^EdGqY`m~I6!!iArA2n|bb#tYUhSWC~cd6Q6hQSx(q#SoD7`S^fRRvX6!eEz#o ze%CW_`-UI7i4%Sn;SBPBBE#V)q{(@E^j5_8vX~W`@&5(2kk^$-n|5R6!@?s%WX`0eQUIgGTOv~r&2=W5^?rJ&(r(V+0 zzZ!PG1HZBC6^we#=nwhyD78AS>95!dW!6M|(6g*3G`>NcC0;lsRcW*N*R~*ha!nvA z-li89FufIsk8*L`EXKN;#&bt6A}5u;SZMtQheoPGF9_?n2D!Cj0RfqSzs^ zc=GLpo%Jg3v^I(ww})3v!K2p5@Mppx;@;jC|5*h7|i?8xBb;I%xgTdH|=e9?n2)Hplx=-Yse16#iL}|LYdglyciz$o2?g$ly_5&U)E8c z4jt~LJPsQEJe1cJbBJ~jVMtD0ySL_*6u_|s>AU38K(+fFU z>DEmm=FRwt83VanE?IVxvvDpnQK)`qIlVX8yM^%n<8FAhGz}9Q=(YTBMRTxgrutM0 z_$&jiR!ODgWs#DimPJ~^GPjVYD|aMUOX{$Wc|5&40jXzGl#fLsm?fv^=el%C^mF! zSSwE@LUbfaVnLR#3*O`U93!4rC&7Q*rvIcXm5HS95l5wiQZf6k(xfDn!2fB8uWAEB znynrOl#TdRtty2k8&z|LY2oayU@Q4i=!GFvKE5vqJk#dpHm&{jr{eF9<34clp{%^~ z4q_X-IZQodJQoaPkt2S+9&Kl{HDUnNsB%ZbLlp>xfiMdT7l6Ik{9UR7xAFy`2V5* z=QlS3st@B;d4Nv$|7`9b9JI3jp9lNzmJ+GhA(?=eWMhpkr8@bpi_^xUldi<->D^S1W4d9mD? z!O;TFFwYvEx$L`p6FQIDyfcQW=|H*ZVU4Bye2=_@;e@tY%?!+VHX1(`&7|*G z3xh~gQdkIU1MP~+25mTrWE*Et5@fu z=|&+<_f6yNVkcq-J_f!uMDmjn;=n8@>j)JHbd)`jJHW~uWVcqVB`FczL7Uttl_T_? z{8ab)bO)=Cn*DfsH`oCpwzQMa?XzA{FI1kh=tZJo5RC`0K!ap&u6bzb^KgB28dx{J ztLmG53Pazuf>{#1mf;>K2+b2#BK*ZnX6a&vN;jTs$oqw)ljgeA2x!O)NrF~N)x>8e zq-H5F@X0=TD|`lm(%H{#=sU6Duw>ofDUmT_1A0j$eUKy9dKWxttNKtC$;a3(!;3?@ z+NPP7!X@<+i<;~)k%#C?>!!y0p>7UEr`yswgvb$l8Al-uhX-*|txA-8;yrC-eB}5I z$v0!nET9%Y-8%2|skkuJiWEqQ+#S))NvGF7JG=b-A6J*X{t1Q#|1E_x`^@Ld?(gmH zN&C~;Y5G(>`&9o+{~w>)z0(u9$E!!1*+;vl7pK2tcezIobK~n?{?RFVv{ubNdUAdB zkKV7Rf3obO_1vTI|NqhN{K0aMHszyTpxaLOv8yFisavTB= z^6(gkKf5g-25Ad<)XzYxcJTN1@%icA@#XmrUP#@CE~I&NJ|s1DB+)>+5(1r;b1rT( zAEDPJhR#G`<+GFamB=A3eE-ybBU_Q*+lY@8Ww;EoyO(1nmXyI!iDUr3khyoV$iw^b z&mTWp_Q>6{!_m+Cjs1$L9jW~+%n9p(%f&Fnq-e)k7Q&2uOpIdb2~+bfH78N$D|-TQaVf`D~KsQ7KnXG zXoo%tZ`hJzQ0E|s#9QcC7YB(cpkjkJ{=rL0M7!SjAL462$-aNWex=YLzbE9+Dvq+Y z2mg9pD!x9s+G90&)jz(PZ#mZu=8L%C*q^S4&tV2B#d~arW4`0Yt`*t0c;;?sI?@8d zE26{bK;7?0uFiu2^|`$(%S#D!u18ucAh5=^E-y~E5sh`W)w}-mkAC;`bbC&$8BDN8 z3$AL%%}jr4V-cZH`c&fVQ?RsPKTIyaj2R$f z0fU`%90oG4q{NT0X$$u}&3ESSkR6rM?4lS;W zy8jiC#SHA9$_AuKjAz^g;H?=q0So(I+9psp6aX=brEdaK^j88u&#kM;`_e!(pJgs9 z=EEko8r216bQR+_%;oTrRgXWI$Ot3*){0KmiFidWh#rRnRZ{!DlM%^xb)E8<3#*usHg_Bqw-4Qd8!xzdCGR!yut~Dn- z%?vzWJ?!Oi-r>?Ax^g2ggl%tt>w|BYHLLmIPEc}%HWSE29O zk7E0rZ^7(mXp48HWV7XLhR->5&~}z*9Y8b;rC6fsMGhcJ)DIoBZ|)=8(xu^F-N$Xt z`iLaSX&AFogBGEgTSiul3sb&*j&JQTUG7xhg>Krt3%~(%7CNpxxXP=CV*lWp3y(=R z0KoA)jD3qz=-#-nS4uO}uMs+3_CPHB`RBj09^muz;`o&PTb{wmE_O%OKSA%j3qoOi z{;%qPvujxEwf&v^vtD36TgyKCExnof`>#5Gwo#YQj!GZ?uZDbPr3H-xc+Sng3Tx< zELB!_EVTg=9KW<}In53!`^LXA7iKQwcZ_9-IP=5i~U_2c-9%kFvmEZu-a^KVUaUrL`oZJ(WIn=dcU{?u=X=HAum zaYyT18hSqW8U=k%qY<5{XT%>_7$BDB9z8OOheY9u*Ka6l$;t-toGh_nF5mRK%cjSf zL_TQblEs#xn$95gal_ZgTmAxeQhYx6Ci49>ugJ%CcbR1;6RO9Q$Mc&+_ybFX%@SkM zvgE-z6ks3pzVu3zjk&ePW6)gZJWX$!r|Hl!Kz4ka$(`{+DzOsrQpI*pe@AzL1Bp^_ zNbNWV7pA5Fo1amRp=g?m_#UjhNVCi_%K(cw>C$-Uc{zCt<|X;1LEUOaofEpClfqS` z?mo4GN>*>Wj*8dt^H!5#X*yq`Ud3Ihx#X76*&C%?NV3OvvC6n-_>O75?6t*00GbDd zDrDRD@SI_Z=i5*54PqDZgyN0|6F(ar_)Nxem}MM2^#u$lT&YLz4Dh0jJ)aQenXn#Z zVf{?KuNY4A;5wz~^zf<}NO;K|jT0#_OPCAjJ@!aBN7u2bG%}}ux?y}c3x{67@kK`c z)x-)EZ6{jrTpIa>{&I7IW9a>qSqL*WEtOyC)y8Q2`QvnP-k!!iWDe4^d`~-8&%J|v zrfWxZkdg&|inb}xZ{guN5oYN9JW%h2+&hYpXR)kXsW3BXI+f)(9G6?X1%;ljhmQ9C!GoO#O zOwLJOA=lL5s#xa|{+8I03Vvip%<9VrKdRVAD;2bDwKB-_-1k$HYRo+#&^o*KWUV$u zxj0dJXr1go&03&mn8`%xWPHF;;uleV7`7q|y|4`BD#z;#W1nN72A48FT|8Qzp+bnO z>^;+?ulS`j3X7}MEsr;7&dT!n0xJ~1RbVdVHwumU{8sAg%db;cR(_p&HHrJCpBQ^z zc<8G=tFs_&`r4Su8abpb9D-zcHD>j4c+x$>2lwkT=T&JLf+RD-43eekNG80!cunjK zQHaj+yJw_DAb^V4ry8HyV|tk3B%%Wcc*Rludhpc0CT@(~x7Bvy zKXOe_9LDqR#Hxes8cQ_@c+B2??;AStB)42uNu7PkJP6~HrWO7&jGdrLgPT?5*qm!H zJnn}>THa_`gwdjkCxrmZim2P`bVW_ej zgk57yB-|IjLAd6{Pj~X3wsU}+_ai2o#pK~bF3(jiI-iz_5lmmWN$Q8`Pi|=ivBKg; z&V@(qdDA-*X$B1&uGpS+>u2UIVYNvgG_4(JdukaFZ5j2Kw<;|%eNhvPO;VEFPbUS= zkqgz9Df-HMq)_Yu@u7%w=14)~qgnm0e4S|9ami{y0aXK3)}WQ$(RN|UZFw%CwUz@G zMoeU1oaU`KbYWhG*e;Z8p3fFL&Kx_%^Lb*;*jv+$eCi(2S3zEeUkg`A&|u}0jaF_E zWBw3{J5p@o`ZSq!_%hIF0V5F+zDMo~B6M!e84lz<(Who?kshlcytsn_3};7mr0}OC zBk7WCq;B9uN45SwXwPh2Uv1Ba)9mpQ$I?@ph;sGkj3jlPS1e9ht$0QeBFo4G6ILBT;83d$7v z|DgPTHK`)(!WUOj2YPz`Klr(Skjeko#FT(<`Tt(v@3{q)8(KoyU$e58OmBmkDbrPM z@muR&{blC#q=&^A;EOFIk>|5E7f9qqs4PiOy0rPn{GVgkA_mFWd7b>9)qPm@>H2T& zQ~u9``rG<{iNBv%X8zeOyMpW8&;2XVOhj7HCI3+Z$DZr5?rHnv{B)0`W^taMe%uM| zNt);g4LsL(*#8TVu)nwa ze`k(+Ov z8fp9inmJpgHns%_(@vN1k!_h75MJc9w&0z@6I^JY#-kNP8ZiyiZ8lkTNp5JMN&$IhpGJ}ybn0`CiE_z z97rPlF%a0)5s2uR7lqejjzCAe`{SXom8Zc^R^Z%wPI8+fQ8m%p{q@sFShk=)`W~k4 z<C}KQmVt#c{oQG0)+t zW9o({@|+!08-%Ds9Z?g{NwL&>j1zZ+X;Ii#c;`lu=eV&}Q^TW-0ccv?{&DE)NJrQ3 zX><+0M|5SC=n`zzH8})_g`>I_a+oN)D_VK3AKE~}u{cKg7fN)PWjP9y#K+J~w6sTZ z59XFWwYWjSHsopTnv06OL6e1egR!^k&!n8ycrMe7vrRu>o&no0JrPgoe|sMfs@~WN zlkuvw|EKi-TD`ho&DQ^D)*Em7|4aP+#L%d83HpyJ0fGf{E;}bbmz6}_m&!Y)CT`UZ zNQ&i!Bd`{SQK{j1=0)z9mYUa+@S7W42j0IXTHWG4HZtDI(h^O}OrVu9y#bcysHTaZ zsAqvVk-~!j;&QB9k49GX4J9yy%8FU~WTB8TcgFCAVQFMxxd@X_loQ9Ckvq%x9CT)K zsCxR6-9{G%qgen(8*}EEP`Ih9!g|8d%EIxR8#rN_W8lw&w_a# zI(T6^4^s?e5S`~{%V5l9S z;5XI=gn9u87_ZSw+_5o!Y}}2hb!`UU$WqaLkZz1rEk#QU1Nu(P9e(Lqe(srza9Q#Q zo0$=IEytV0I5b`mNK{u0BT%HrVc?)v$e$q%GRRjAdgX=NDg@}5(&L?q|k5(Lq>PP@oF(ofAcQueeap&+FVT$f0%xzL^U!w!QN0Z@GWy{})f3aCO%IC;k5ps*Q%Q|F^1*RtxR_wd$My z?~DAQ|DT$Fwkzv>lS%*j1-||5T3neYR|&0jFoY=bKHI7ybvek!)qOIrzoo%D8ta(6lHj&qwEc16rW*W z>^K~atU&V%5Di=0Gv~%-?gJJiz~p0(Gh%N;5z{mf)13tEIKTY7(U^i|tsem+EdwL` zd?Q5(zmB)ab5-cX-w8%a@Ufq#kYXnNJyJ;tP7d-o`4WYbaf%eW{K2q-ZfY7@($i^d z7v?ZbWKM~;FoBXM10qq_Ni%G5Q;Zk$gKTe(+ophA~24;b-F_~0z(#Q5Br;r6JK zK`lQqSM+v@ncwF{>r+^r6{$1gbj?s&tD2l1fl3i-1=6yKB0-2Zd9oH<@S)Gq=}mVQ zSNId{&%oQk%i-+_k3osJdPZVLWPBj`k*qHGOW9pg$4ht+5O|v6OpAk6o`O;VVDFg} zY{gF@pouGDxcYDYQbnc$Gnuk@@KTy*4BnJp3#+8j2~8M0qT6@2Uv5TTTQz&wYT`s7x}{g%1@`rOpY(F{sgx2wa-4CT^|2h z5eXcwqHq{lBdQsU^>pKKklb65`wM#vCzRMBa`CuX=FU#g3Hu`jYXT`fVguFWt?=ZX zz~x3N*XfswYxae2?(AxE<9jwc^K3VWUG#t)V+$T&qu`24+T}czTPe|9EzWv^w4-b_WxGhryHV_fG%o<#iu=YhV1yes6cX z?Th}OzmQWpn%Lbtk&Fa{Zc2Ir!;GvTdBh=|pLUPG!t?D<9e#uyvCp0U#c8jHfLwN2 zn_acL{m$|AS-Z=wuDe&4z0*DNvwZ?nDlpb@6U)QAK?L!iFgr1f%NMz4BG+~)!yhK0 z89&Z41(;D*6tv)e9s+^CV5CA2V*ZuL<0qQA>Cq-Wv8zX4+WjjS}hbIH=$o-Nt0`94Q*5P`m-%0dV05P#JuI*O(UAgU(9uxApEK z40gXvM}%;1v0`?d=MRgP%}i4J_Hc&U05dM z?@j9n7WcrS42{weQ>`uxZKPE04m?M1Kz9SA68TIVP9j@;nL;7i7B;_YkH=An zq}x$nc(=XBGy2nG;Q+eQfhOA{Pji`Ijun-&D8_{ciy9uVsXi6vrUbB|r%7OQgyPA( z;8q1+R?FSCCb68O$c-mH6$iTyqi@2BC&}gJQS?^6*7n#X;@>1+gkfZTb4Aez94vGs z$v94FAts%+V61yY{Cs|I+{f%7a2=W?i+MVIOzwqiD~VO2S-~vivY;7Ct9{WWW;-L= z#T-}v6v#|7S;<#9*-?`*VJWhR;-Du*pnI${pv^|?C`6Gs@jJW~(BwWOF6Fy42pbRt z4-Q-Z0A09jOFWIA}FwM6)lZM?uc8G7}Jt#)$xt;_D{vIQNAAI|i=L z+1}RiHYQ7`0s9TuxrYmZBGY0)c-JwX2;v)Y%o8o--*1`piu)qc4y7ogva-<~mET~e z7bkh?g6Dt`r%ebohLh9}YK_WIe|x8$LaFR*xVru~NUB@OXj^1wjzwZw${%qev#!0% zoiR<<$(wa_&N$$KX+pI`k(hK*-K+5P1miURHL-lo+NhvBxzTFgb~Hxe<3ij;EXJZ5 z5{tX;NTi_>I6IUFI+U>bZeT#l%d>lihC`m=_E9o8Ka?qc2P$E(jPhByI>y5A9BS}z zZymH1@x=ei5l*zK(r^}^DX#Z?SjdI%d81Z7+c@mcmdy(k6_c7qRFdjo{1+!3m99{} zzdlr^ytxrRm3Oi2IIy0TkDs`7ixQ#{d_^@dp99*$(9|_(PC{Gxa>MGN*~zD?Au$T) z?DbcO^6WMkTRa1~={$@TxKA%<9&%~!iUCQ}z;o86QozfDR3k%vW`XDZvh-|t3y!O) zjq6f;kq=561ni1ws?o5>VN#69m?f1%(wsZ?p6UEsTmofgEryGN)+*$#1wh6rlhu)) zq9mUMEzhwRAFsZ<@r3yr4-vyf&Jv}q<-=5n&oVH9Z^5@0AGN-Rom)|S3vzcMmPhL5 zDzN<8_NceNm;KxOSgqP&QID_-A?%`{PLh>c-U(zaq7*H@V?Q!(_Gy+=QJ()*zwx zQMNkirPVgxS+@7iEYxWEai}a%R4EFs?_D%Zqhbx+S*koYyv=@Mp)fXa5a`=HkKn~+ z|MX}4W<)(>Hg-_LAJLo}MrcWbj_y1m-bu&_28oxSxuJ)@Z?T)nP?@$Q=2Eo@W%wbU z5MB;0WZe*WUo`O`2!3Oix)`|_4fAnAH_qw8M1}Yi@_gtA%|bh5wV+1W$9M}rksrAh z%%`y6<3l_@dBCt3-6J0as_xgIcl#-zb~qZr0N@E;ydAJKfp^1uVKSljkPOCAICdld z5x$2ONa}q&NoD~g5O%F}$e1k}=?e`T>F{Kp>E#}JnuY_u!IB{SGaWA*4L+6#669;% zi6=Lr2OIiJJza?esTyjGX}V>7nB2nBvYw-L>duS38{g%oC$z`$$?!e+bj-G}io@e5 zoWvqc{5H}vddzlPepH~n+ZOlmQ|$lwJ1wp4EopUX>{WlUY#&yf z>>FtbhabQDh0g}t!lFAnu*R@*(hd_@?zSnQ$qCrsnfj^~Ji=;v_sfg9ToeUy)b?6Z zk_9kQSBncbiEV4_DlQ^&?PKAeC^Rc~q-2VP)IQAQL{USRzUyB5%V}B|;1U)GuO6bG z=Btq?Qx=mUl`rIU0f=h>9MUiW6pt`U?L_)bXm1(gm%RR^hbwhLo?tq41~FYhXz28a z6swWDzAOMml-km&_~eFX`1cf751fq%>(erVy&Bw-(6os)Am;69a-Z1&ZXB)ygtfyx zX?US37hx2ybXGxz*}d+ac6ZM&PdcAF$89=ho00vz9*$}{j$HKdgAH*seg!AN?*N(7 z62;G;+&79VLQ-8v(&HP&Js+8M%%CIuF_UOwClkQQ?8SmTyjv zLt;u)S^24Sh3@BNQuG=BX$l+r>_su)a5M%jH{ge1`1}l9?Es%ErLmvNus84E=N$WT z)!LVw_B723m5!Wi;+1v4cwkjCzvPK88~hJ5^XG!~FyuY+O}vtrb&j^Gd9N;c;!6hq zgU&l{I&;naVFCJ}lU2=rZOOA=I{Y7I{`nyi&ptid%;!O@qCi+p)7p_?pWa^?A80j~ zMG5nQ^TSZaNvF?*VEc?sL0Skvde)`m6_EoaRbM7!C>{O}HT_EOc?JR0QVg2vlSu+X zNq_##FIxqeGVa-iy}46P-g2nc&NtK2VN5B-tJ?irOP=b2fq&P_d%8EBx%nyYV~P!- z^K;=$ryUUL#*$Yxe3innuTBONaTYv#f(|ifz4qGJ_e#NRrIZX(EbUkiNj&2LdwsKD zRleBqWQ?9gu5-*?4Q2zEEa4jj?m-sXQ@$X1UZd{<-e$du{v{1X#tPQYu7&&WQcpqF z8y6<2=MaMG+Hc&+Xhu$UuwfuKvmp1{y|Z%{gE+l-G%HVQttHglYol4yZVS;W z;Eq)@z*L*TE$Qe3wVs?_bx)7m{nL}&S6>wVMhbsG2pEmRpb-AqAkklcAwa5_NU71R z@70d#)oP;(Tnqo(tG5(v4oh}0KJZj~vIr^MX(3oY z5_hN{r0^#qr$r}lFcvkNEj|Qp0I>CR6 z@Y1KqKsi{Yw{pH!-xa0ECF2GlBibM^l1KfQiRaAdVqUMU2PE}$ZyZ>5?2SC%?Yq7^ zz6EA{A&S<)tp~AHJ`LEclr8#v*?E(aK6eB}PG^w#bV=aFM=MVRFe_3ZeL~l3j4i2H z+=dfa*=aS$A7}$@sv2whw-7#2VM9E0~Ocb=z9CtaQeV zd(WA)#{PO>`wBZa_i`*FDD2v-N1luoHY-}&Cn zno0_d@$@5}M9&sGwRc|>gu@hs1VO;$tC!IgW^@gNljwJ z8Q@uy-Mn7Y;CV=Ab_NJajLt8+;@bR)stXrP%9;LO##f2UqhYZI(9BT0Dc+i6cAd+UW++W zia+M0w5OH8%adlRx#|Vy@)V0^_%aF(H-?&}pzywmj-3l|8qHbTtD0? zV|G3(>6|DuvVHz|h?J&9bu>H$dwi9WYCIJk#oRVdHfCi?7N(y+K1EhxN>Q4eiFGt} zY!qNCPiagO<=`r3(Kd0Hb7q8BN-X@^i<j^q5LKq>T4?U3$2gp*WxYIjQFa+&O(m!lH&Y19AgFZ~09u7)h7L$iiUAWvW(6C&4(XFrHf8OX zlZHR*ROA?fHn|21F(}|k;wf59B@z%Pgq%E?XOpus*OVP+L@<59uLpstM3OKjUXygj zZ4PP7pGIOo!q)eyJZw$&LuoL=f2Cy*R@h9YB6#kVN1rW(P&>jBYRx(aiRt;FCO4eRf=>w3fGWU`tc>bh8x$175psvg3OV#6F~=aw?1$BlZARVE$gx zks3lA0&|$rUkrOzC;VT(@XQHlWj3oovP9omn8nOd-v^IKpsiUHdlzlG-5QG9`ZjmA z$3D+jajxP%C+1+)RKRV#KI^nv@~Xa2>*8!GwzapT0qC=h9U-}#pvAfeqE zZ#`>nc6dJP(pNOJt9)-M-+Q*MS{dH_zlx&#y4>jj6S%Ry)kQEvF@m^0|F_iu7Qz3| zYGB!UtDKRINJm1WkE(%J00YVR=oxGDh^PAv;}Wtq{AiEPEc{?hv#cMnp@2_N!a1Gf z2J;xczHt?yE1tJH6z#ZNA{$SSz~?{9fgSM0|11a6uC4<(0{|R2OZZRFA)-D{-G3_| zwQ5?mtyK{nI8bj~K+L$}#(0|RyKz!OgAJ>UPI%0OoWc_0n215bDR;^`pSu&d;5MIw z?7I~iCM!eGVwL|OF>MXtST>6HsJVXb<8adq>@4&241RXm{yK_t{r7Jv=w~T#<={f9 z8iMt953-TZ+4$h*M0j2BN9c7LywW#e)e~M*=?Q+6x0M7Ig0FYW_|3gRgYO(pswj4o zN=u>iY^dKKZ5&GGR>%b@G{tq6VH^K#Nh>{tIK7Y%*Et<$` zM9~&;(mql`yTrRfG_OVAhf<+X`h~ZFWKoUfCzIO?D={*hozt&`BW&%GWNZ1r>BohyqRSX=L|M@N+4yK8xc;vjO7`%-LwVXItwmBwL+k@kpo|8uoGta>@#E`g(wX zJqrZ{pf!jy;*~Pycst4>*mA?^don2+Xd=v=MF^X@FW{HpJ01eyRYC-a7!d1`?Hn1fek;zfG2c_t4ksy<>mC?@;8S zWQp@T1FbCw{es(=jy3g97D-!)I335A_fh;e9wdqibXTr}ViO zAo*>px2S5y)Y4e`L1u=r{&x9-7~x+4B;D$x{OO? z$6Y`kSVo4rZVoWc8P_9Kk*U4xDANqKO9YLKSq^*>ock?^)-p#xUR7Jnl)UkQS1U?J ze_HJ^H-rhbVs_gM=LM8G2j1E|8V4Da&{%rXZE*r&KEn+x~F zEkX~i$}V{feFg@|`r(;^+L_m5(ar)>O%zaAIrLL1Hz3d-o{kZ_1Paiy;V5cJc@BR| zP(U}kP8Qv&8hp((?Y`C=I>wwd78q<(On79=kqGoXt7%L;TpNNFL?j*S_wEz+Ugwpb zSX%T1LZHYrk*0)4_qu}OG&yfwVU?@%eX;Vk@S%e{2AL`4Sg?%!LE^L`h#}W{34S$`s-*T*4yNjLNjy-F?99{99 z8heMw$Vr7w?%Jx-IOKuY$rYX~c&u$>L-r0iL1 z{o$#irAvuFiszXs>pPi8Nt5AHbyMblNBOM>gz_YLM{c;BDVYs3EsGZ?=r<55evPCi z`XsXA#7XcH_{%WoI9rY8t#6#c$X|qGugs%&oA2RadrHdP6m^kSmy<9fEBDf;e(m$t zDcZ5pzB2O2dN5@wbIBvAo);V$r72%9xRgwh`8Vh-DL;xgLbGh~#v-x^!2)C?I7_;* z7q8`tOL#0ufwW^%^4iqcjzM(}=?qLiYA=OV-hGn}!jON#hkDJLU}Nl1@k-*i)q8C4 zr?}deH7mmQVPE=-H_x~4|LgTxIOA{K?+5C3y(jDe_S+l{cCah%RTOC_!V%4zhFjpe zQ{x+~EH1;KgqA6pbL+7w%wRRoc!ltCLU${g>wec>x>$U8R>y+gevlD3E!v8mU$W=Y z0exQB8{P57Mv{<0s!Ykps&L8gzw$j>1T@Hz`-a1jdMzvjSi=Nz06SDdF^V7vWehu1oOlZoLXF@J1+WSY*l`Mb zt(+D`i`6Z{phC+={VP3c##@wmX|2JTZjQd~w#}dEF}wg7KEg4rDdN~Nrkkd@?GT~1 z7$M_Z{VDlnpXv9}4E0gIbpC5lCvgazSX_Ti03%Xk^tqZ7EV22hrT*akvJoR4x#0U)eCngEo!DgRWYKmzOD=!8o zs09pJ8O_PRc!t z06#89JT+K@QwcSsVrxT6tPosQ;_wy_1O9=$woA6YhrktB2tT|Il3>MdF7HvpViTBY zpmI5)vY%bu7NgFuaIKocKd!640*D=RTQ7l*##=D_v?atA62J=yDbo739q_I(`yKh* zfH2^!T$goWE%D1H^HAF==MU{_#2|W3wXmXwi|9+&wcTqyRmregD5cu7_UfE)o~j_ZY4d%p+hsfKj*2-} zm3E@~YS@l0)#D$o&Xai_f>Q2CIkt)V5`E?PfHX>qxFW*}j^39KrW4SPVSRkbUGz~~ zR9@mHpl`97i0t?SVnKl3_f8>n6Lhnr8ml({4$LiMETbxd!v-RaH*B^|@5b(bDk`v1z44_*`N0bt8Of@|zo6JK`-{TupME~=7z z8_kfLKG46XazUM0t(@S2O%x9ka0uiE;&fCNR3ig=hfZ-yLhwD@EV$M{Yz=MzQURnVIR+P8UA*$z&y~>G7{#`Ztwaw{Z5IQHtLMlVduf1E$CmM>S!V_ZfvacE=y(~O(CN%D zH)_({_cm7@zF(;MQ_0E~Rv7C&fspb}+BAcv>A8;@LKQBWi)j*J1@sC>d=@@>&`YdPV;envUBE5fecpVzue^;)6v8t*pP)0XA@+3$ zVfZe-LMUfRXIK9Iww3% z6EPi?nad^3K`%RXD}&13kJPPI%e2H<@yPF+BG{LS^m0|d+*5n9olz6PRJ;vpGDW$T zvm|io5>5gD=Gzfv0XLnaz?0|K3C3B!Uxu+}+5G#|m7??sJ%GK#}NBp*9eW-R}!lISL)B z$2`&>=H;A3fjJRmIiW(fHcH2a>HT-ZGoc-V$6~Rbm5>K});^*1%~xxzfICY}o?hFJ&qRQtgcUxhWQBPi((S!Ni1Smx7f`DkK%t zynq_L6dTvrLfaqey;%mLc)J>&SD4i&CxMme!Fe6&R$@GfVQ^u!nr&qXyN|8)Ur|zk z=3Oi5N3%d36@f&xdwa3(9XR1b{{GAYwJO%9-0hXVe1!`Xv{%74oho`;Q;6YRz1}DE zEP~FD^(Lk%Xcz{)HwtjS`2XDFf~!1T5<@P8U6Dg-Y6szp88#2xn4m6bp^nM0W98W- zImZ%*3`s6p#SnseuWU72AP3OXWKo~L69h~hi>Q~p@ByW>m8AVUvgKT(#mG;kn6z+O z0OFaRz#xBUcMM2{#lapI&n^uiG%Jl|R^DW7Zxn`9+Ajl5P3DdI<1jG>TowL{#vd(l zcN91kohOQO@8^ojF?ni_Li?LqucO*ABb9B^!r=gDDhkKcrQI?BUyjK*L}YFlkKqd= zp3b{iO=E^fxqK)V{AcYlXW?KRI0=mf@#zV?Jh&x0-l!ZV6r2sGF*8@Vutx!!hRW`u zIM8tW@{&Rh^_aIp>_pVwJ6au9NY~ zyM4r+%yxOY&3bfv3Tq|>(ulv=vHw_tKmxm7a9v-MwYThRudLzk*rT~jC=@1!hiSyT}k$djSf8;9@I4|qDJAM zdn^so+0m#ITyP0Bl8j+HG&@ydfR@|41X6Wu(v;QOU;f@`lnb+B!ExKG!)Si!YAL5^ zv4B6v6W6hroVY26u)C9X=ZWui&=2jnT~AYDf_A#SSR2xdz38lXmxh z7TSJz49Tc<>spF+rfsi@m^N5YkC!tuLaVNXHvS+^d{X+i;?OjYm;C<6w1Lr3L(3(YHBA^<2$DibK_{X`Jxrtu@Q)3lHyUyYC+_L?J3mXVdtQUQwY)OF;j`fTCAMQ8Tnl} z4vd^jeuZ7dnab4`lJ}=ktyYzPkOW!13!B6ID$+ z{?jGU#H~vv!t$Odvh=FZV$E~Gb|u9WE;u_hq&2!cT--W#S(#}{ITA*8l?mn7q@V!M zGQ`U;otPo`tEq2SfL*$omFGkTRY>6b(h>Abe#7e1ggIn<1L~ z=ymQ9q+O1U7DSX6P1gzJvcp=clp37S45z;Ug72)~p*cJ*c+}>(>EyQj#|WZ5?RON= zI+fxROTkTNF)q~jVuny2fpycl?UMG|)pi`{-du;0!6Dw&rU?po*LAQdW!F;@oo-c_ znLyWCFdX}B^48&Y3p=%vt*=1a(v-TPJ$A>b?rt1)fNycZq}_W#+onN7@!zC}=cWz5 zfWVHgZifS+3A^`<8VA9aCpRUOjdl}*$It2_|FLMxk%=-}#`lgKv4K7cV#WXu=It$7 z`=!sEB!KwDI1XT6YSIi}=u%-q;;_d1`p}C$*bs-U`73z#gjO|>6_@elGK`?(IT5mo z3um?EO~^UXeJCtEd&qqAU>LCMexRgVko;VjQHw$CqKJ>BR zlnLcIxGEP+a$<_PzQYb5cG;R6Gnx${EH48&VZ2~4&a>8C1B9Z+`pKcHn2sLskQ^n4-1OE8XDKM zY_jwUmU2Q43O1A%*uBTW9TXAK+43F2(4?zdJoLT4ira3H%+%cttBUQ`aVg+B(7Xs} ziDClKLFStgYUPdwLEzG1c&;y)3Jrw;d<{)-QE35iwT-ThKruU#|1 zv^0p;fLSFH-xF)GTPcPly_lgt%M;_WDLX@sB0ZfU6vFm7+YKmjc65veC~94**4G+b zk9VEwmN-3P78k>4zmdRS1to==l%ybGit$&-X?9MOex6kzU?>;>ozGd;GWAGYww1QbVgmdqQ_M) zzsir{!Djk$ToKql{^J$Xg5_sbwEQC1?JM(@AThOGc*}gy{@OYXl(;p|cZC~04V5-ta5+gX_9Y9bS|U>c)E=eUo~m2kwl0y*G=NohOhSbRl*fC31Q zJ18{GcXP7!TWpsadNZUv)=`%0gDD`_Ne*vra^8NMuWabW?NF-&vnlmYx&YIe+InV| zj!78x8DCHYYmpsVi97_2i~8UG4)94d+S%)2_l1qzWEP}WFQTtJL$v3AhP9SCpw>*F zK|F(*2K+bVA7hwPgvDvf_nSEJz*3*%y2ZDa%vf{wMgvrn&(GLD#6z*vQjz16JeGXZ~L2p-M8)QEOCyOt+c7z5PaNWvjm3kuLj zRku-ldO>S)%c(Z{WQAEqWWIeuYM+A=zuSx@*#fzz*(H(f4m{z;9(>0cl`4ZCI&QKi zX$-y4WX=4PG>5)*_zkBg_yiKxIZ%J z%}#~tLsva9AM5DHl}e;DUC)aSIN;pjxbP0iebZdscqsUPHscr$-* zKnR9`LZ0k+OnsS#_{8cU4Vdp*xTZ+vbk5#62dgg$?A^b#^}!wCx~c-fq1Heb&}T<+ zRKN{C*p>}TB)cbp^ZlR6;$;f12=bn1RC6Y-3$mbJR96#q73>aRioM#X?#LcJaqCh( zOZ_pf*DF3Zm$dv;y^1M3BYZ~*D7!$|3MT*;J^h(w4!MXv5^87 zhUw<+0h+#IQKf6s(ITpoze_b;7m;JT>s*KJBq#|yj5r8qPN85!fxG_nNv8ARy) zSx+UttJQ-E>$5`a#!F$Za6({i!!+KxyGHz?vg1>O)8FoxX4y;hrgfevj5jZIC~YZ= z7Xh`8u^i}f1Ibw7a-gjRC^7fh(Z+aTJBUITN%cAdFGKxzhLOO(OszNMyLqY>ahMRR zlqOi2Sav5yQp;Q!VBlxj+Hr%@oYVWe zoAs)^B7WKf4*ZrHuR+*bTMEHmdb% z2pa0HdjJ#@*XUF+YKQfxa2W7oe?IyTiW|^*uj`w7s`whOp6q)vVwL<_#NmF>Br%Ne z;6nnmHvLs&PQzC8dZ;P}>Exz#Mj8kHFyw9lVaHG8Y>Gh8PCKUy=2bZfCRj+9+oSy~ zUq?{P7C_YmJkqcK#!Wr6LjoJQn+(uAFykp!|C08+VUPVoH8#wjUrENYz8zlx=7xLX zxZK5f!TqR5!Q1IMdA}SrC0azF_(FI9nr-~=Ov!h-_a*sr``T>-^Hab-&|D}ws4IYb zI~h_K;}_zk^)JT&eTzNO_(FB^U30tLtESUjC%$r^GpL|IJiGnAFWH*50_ylNM-rVqi_*1K!1azN!;4`A$PI{Ny zHC(~x{7=Rhx7BhaIuM?Ii~lgFzum24t!yD@2RId}hUSMul|EQ>W+nP_?ooRky896( zh=w#t4vsB6=Aiu`2)^BCf@uT>vIUrSwLNm3v%8e6o=GGO6`-+lVge*UE0cA-3z~F z4M25b=W=?$awG*Q(0li1elsS7xXH8?Z1SU=#r(85Rja1)Hjt zSjfTzU0nzSM7MSM!$j7bq_vrYcHcc*_q_b1plWOp4M^j|y0H(>DIe-pOWI2@A1kXn z{>eWEO~mIug>uP=v2=BR5GlW_CI$2Ku|DbB)AP(AUA5}quVbXkJd9&Fp?yp_I5gkr zID>F=@PcXTvoj}x2ll=a4zU>#L1}V6;?uv>#|@_Ao>?jX2^1YLxauS)d5lafhby}7 z=r*dGS}j@x~7DL0mO_nnDb#QlxmQVQ+j}P`ux4|+F7Lj&^{)nNVbxtoXP;V=KBJVE~ih}Ddt@0>LmKVO$UB{>fpq za69_{;)(-6;wjh1_Z&mn*g-JH2mg0~NIdyT9N+;MWKL{FMyY^ctYf6$)+=JcYUmtq zjR=hY?+~$ar=vUI8bXr%E|W7ywDpCVA90`gKVY%YfZ)HYKMdU)+reyfgrsuPkXf;` zyS#^M2_978l!L?g2ZO<4@QABQlInUu1|kZ{{l`%FqI;76XM~unF@roS9;U9r4W;C9 z@XM*g6Rjt;u7%Gn@`Ymp(-2E@r-0N?bVO1+%>=K!IbeZ<3g`ck9u_4)v%u`eD7BJp z2f+tvj++Xxge>9-TMsh;S0K#O>p=XF#VOoIZ@(ycte7ci#W0lc9!jA8U(q4s3^HcA z)M@~c2hqP6Fc5?<<~x^5pbm6;0Folpu(6C*32{R48?(+H>T^B*fSitUfECf*B__G; zoa9JH0~Wb_&_R$|mM^pyscvG5N(moMGY)gn8s_tI<|Ht^Q}@PxM`? zfwb*9O&Rn8fqnHHwf%p>#QZ2MnWWg+3Jmgh|G+J`Cl#d-C78S=f~YS0uTSxlv)sh* z^VSrUc}(8myZEjj)beB{K^mu&C6Ar17dQXC6ey%~oxXipL2NNYZs}Xub*Q^8g=Y#&|u#yg%uH|>t0x(7cV;%Im_M? zg}`HZ{KezsI=AhwztHrd*$ITl@-g$iOq_QB9nD=M?(6(|>GXcG>N>2ZXUODh6sZ$9 zfYW;?h&Pe^ge}Ps$pcgNkz_E?mS^A|JW;hHJqx|TRP|x#*(WRF%e+mUKOwc1?6Gq# zQ@x~C2|HYAP4=zx_xK`&zkCxqu=stvQ$F+k{H)zl{n@r%-LvMezYm}^n-QSxzCJMy zTj9A}y%VbWF}CZY+M%@bjEekTl%17dS5M7P`$!kC)*YdAdY20w$WuIt=~@O7F8N#N z=>WySdD2QpsOYqHRU#oAKs&rN3#T>T9pJ{$IO0TL4hy#rTmJ32MX|v^u>(*8GMHC8XvfEF~cw-&yUBL1YA!`#QVue^usmR_1*o*)3`F{{PXI_?lU7uD6^vvbW`Qm>I=`f-Yr8tzp~5&;n`H+EvEywb4Bxx zzon}A(ts`gX$D4yKTE%OQLFePNYw3(s-BC{l--SRDE?>Bh=)T+Vz0hOCq~tIBMXFz z@aiY2kR|{$p}koOszs{)0#03CM76CKE~UXz&7s9rty1I(y2hkNfueWJ-jtJwS-^oK z3Ff}ZC)456L(BcGTSfRWyN>F1^#^EVf~@gFcLyd6)%rB*O*olK=Gc-46~Oq4z%946 z}t z*A3DiIolyaLmdnE#M9o{)ZOh)!CY5B_KhQGA5X0Kj3t#FvLc;D9yR{`r$rLzQb)It zfsQ;lk-kowXj=&{EY5=ZW`rT;)>XujPodqy6X*T97Ihufg=G8F+(RYivcIqxFM+~N zmsuY8U9j(1Sk!qG1QmG0V3&D~#|xb0C5Q`=!!RPV+HCA*U|q$Tt*>vbO%1JUf|fbY z;{jc?OEbc|uK{?oL@nwYWc|9Mggc7TNKQc2aBC7irn=c>dFa4Ilieq+PuS3i*&kxW ziyry;BJ8qzvUZ%Q?f`A~o1}Qb2Alh>3AvNZtT&HyRy!LF_T-;V=EM)3*x-Ip6+u0c z{c7D{z=ehFd~nxZ?RdB^O7y))=uI~?k!4r?2x3S0X2`n1Z)H;4U9Z?_lZxp4OqtAenT@B`BB7p+wd`jZ|qs%-KrXjwF}7GgWiQ@ z^>uRH>^!Pj<5zIcE{{<;8&EfGC1UnZQJ6yyBCCBE4w$$E-|Yp=-;+Jup2%&3WF2Fr z{H>52D=Wm6a?x{Bkna*qZTv*3=?egGA+T`-)Zeq0)9!Qmd%}DMnRd$!I#UqaBNG<% z;NI6_{*oNst|TxmoW8jmHJ@b6ZPRo_+0Kp07iINNI&}2%#Jt(pA{Ba&ig-n1T=iiV z&zogD#StT%MSib>dT4zmkJcp=mCYzjyzoe#m!S=I&5rHHhiLdMeMjAP`|oe+wt@Da zx8fT=!l(~$UQ%G!)6-cA!^Rt&HG|uRetOVD^VC8-=w%^FFJ9+pfuaB`|KUlk04JDg z!h2QppzpEdfqriXp>h=D%f(k*j$d45Q!;!$%r`n?49x^sV@0f#5oa8<3Z$>yz7X-} zpHj*so%xXmpAQw)nc|k>%7w8zigpFO_Ds7x$9)tjA|5!)XF{Wd!(h$|M*MJoXvex! z#gF-W@I`4!`3fAsWTOx;0H#-hd!~!S>G!>Os=}41hwlb;`pTH<|M;JqcHA#l zZD$$m_?}b22XEWCxO_&RPJ%7JDdKU&ppC6zzO730&Ya$I&|(^K*$W-JZEpcH_`NlQtE`3|-3yc$mSv?g z3W0(^9dt)MlsLQOxPBLfM3N8}$H<>B889cemFYo;Ujm+Umd0gK-;I$ONe(2aLVxXi zeT=1G>C>*t&X0WQ^`w$Uvy)((ET+&Nwv(>3dE^-!8F!>dat&cy6F$>0AIj$2Kk0SR zx?-&4-7UcU?li7(tN0LlWFPWZ>bni`&v>%f?yJ}}n~!dKiL!`K@8mkoXHnRR;aCVNZkGm6odJ5+$gtdECy zKl**`yJ_!fOq0jCM>FCd45QDZc)KH~^j++AgYjd1_7@YY;a0aT_)L~b49K<(j2dEj zJ;?3qte!|V3w_V5=qox*5BG-jMtm79?T{|-ag?;j$`^vCk!Sq1cPQNMHy)M5sxz%7 z7Jh=#5$#Nk`vEzP zkmZ)(StKqmeU#D|eD|a1A3Om96ojEuCi^O?p`__a>-Zew+s0uf?Nmm?6dbiBEF#ii z(b;h23t=yfq@LT9dBbg+K83>YsQp4AAaOYE%q(HtArK(mJP0?pe@WDVOxA*6FCB3W zs}&KuBAQ|vrXD`wX~-{WJci-VOtEl)yN1FJb9+?9NbR;8%q(NNNt5CF=jVdpsRn5j zzlp9vpg5rnC)y8L2>fzHB8#aKvHP;$elWCpYGQX_t}%l&dJfO(dSzjCEK2965FLVy z??xLO^K(D7;`R_yWKAkJG6dSK6lz0i+AmMDksx?S`S4**$iuX>h(eE2K}2tju_U6t zXA}Z~WB&FhvYNYKGwI0Io;4;X#el?xa1(0S$qXh?{fnNtO-N==YBTxTB)pb9+!G5S z$k-MBZ0z@N@KuS$*)~+gFnRev)T^To3&wvH`IC&dnfnjl@&ozP4K4WSg3sVxG&)WU z-E-1=LssS{4=12Ogzb2qWZB;tn(0lX#cFhVmJq_nx>i0BI~!W45&9ERRZEi*PQ7dr z#yU3hO7riD=>FNp56Weu?z4NpxmIV;A4RY4dF}s^b_&SZfpDzhHHgas@Lm%BvGmqj zVj4A-EP@m^Xv!no!Y~%KSVI*qo9+i+Oyo>QbV?owe^TgAwaWU=%(noX&O^f7@WNZi zA~{+}VNuiQMVg+^?`V`yyeXjJ{jw?>tdOGck7Z?lW3@|*{e1{h(>_*~#LZOm#hl(O zTFL^H{8`gjbwDitB9Z0q04?Ei-69}k1?naXXD-$lfm10>InXq(vYSa0?|f--SeM|( zQW>vFp8M;%`)R%JCJ%TAPl(dkuS?>YPPnf#0K zyPW?!H9w=>W=Cg!-gbji=IFw(tLytwe>~6sI`U81(07&J=d*v4 z*5`?rZFtLSr1gDgqPBX^7rUEesN7n=SN{=T{pBjICx05X(^#9~uAT#feLLiOghDqE zXMY~6T2qbx_q!SMM_=sQz@E^j|Cf1KAgeYH92q}Tk_Z=yCUKUP^}1n#h8~g%$zoSn za}eyvubN|(cN}A8)%2@HtS6=;PgOS;(a6LX-5xu6NG$GyQk%Q$3`X_IZjeM)H9yJJ2zV8 zEseYIus_nSAzJIRKuUL5PjOKSa=SAoEC=Bc^8!*L{D!0(TzfE~3kwT?tE_%t@D1h1 zSHI;l!{30IGV=;5Q=JW){_2m_(#Tz?@Xr=vrddl{=!*CfqzXmtsWn~{#WjB;1J8mxRRda%TJ&@!A zt=8GsCf0}4XNOjtuNWQVCDKl;iAERC@0PEOf;6Py>-|@>-?JsTVtLK#jGwKU?`Gwu z<&B;(v{a_)Gw__{Zs!+okcY2kI!=80^`BPn#2er*xg^sQNNR^!%gx4VoS(VCmMeQn z?qHByO>o{`n3Dd^7KhZV{8qE$m&7AQPi8^P?bb*gbe&VZP+m7e>@rM@j)S3(@8}O& zwYaBag9A85Q{<_I0&|sAN`?0xy3Z}cute}hAy!1F`0DV$Z5Mw~oJKkYv!&elSl96) zkOUWWpWA4>=fnBcgVG%i%;b>tN?>j@*eBXRsh7HaKHHpbbp(1`Vt_k!!}c-xA!D+9 zp>7u_ef8up8g@;yFE+u)+K8~&j*r(J!|P_V07}k-*5u!mTA@u#i&{I^po)aniP}b* z#Wp1WiN`{g#b$+=vEMO3xdHbvTCnCA(8jZsNMT(V?2Q~5?5%ZgKs(jykthiF_Lz%Ks_Aku(SG~uk#e7?;X`_8FRJT~!pY$=@ zhM)8ue+(V3IqSM*lO5y>_~CiR9dtdb9bS9x$eaTci6xyKkYscnt1sD4p zU}pU-_wAo3z#8t)Jp%6Ke#xtGYSFGTewmpsH zUp~9FZSqTg)xos@4K)ZO^JtArz%0tw~wd`2(m zAp&*~7K(R-JBn?!nos8m8UA5f&7jDhS67`llne$=>zH$1dn zY#9pwmd*U)oI-msWF$MaxK;-H3PNrW%DAk5AD2-LsAu9^KfJ7$HvnAKFC$-c1>od# z((E*D;V)BYhWo-CfaUYqH(B+lRb4IPEP=XhS;Km8TbBLBPn^P6u^Fu2f|Ny( zztRl=RHfc9{5untjVO4_|K)> zD4w3V6VUyydj!Qdm4;5LMvz%{Jh!*8+q2(&&Yr+q@Cnj~EF@lmy)m<6HqjtW8~>{= zGVh7wA7g~&QgCJylO8I~%bO#WaPvnmIKm7|(+%xkD&~tvJx2x@y9AXPHpXV z#xhx&Gg=1|gw=r)bg@0juxirO-{R96pPH)%nc+UEUMNtr$Kj&ZfrPNQJ@TNKgA@#p zr}dzQYvkMe*B(d0rr>^8i==7^>{Kd5E=63#$03OSHb@j*@~VC&(cgX~SzsQRe^Do7 zLQIYSe>fx}V;0x;Y62)?luGwiAq+u-c7a8+Hu1smK1>hKR)L*pbhe2Lj3j~O+a*Pa zBpyxM2Pv-&d4jUrOMsR%0%-5Wq;Of4k@4xpBa?UAd$UdSgSP8QSTS3uhu9Fb;IZc~ zqtBEQhE{!kusJSdB4wYgVf>RkEE{m0{HC6o1KSwC%KuSF>@kG_4+q`U8uKF$b5PBm z>&?H@CF?ori>?ny7?Ban?j0E1pN&ZAbruAB(3R+kYRV0_F>I2x(yb=`wl!NW&JnDR zj2vKw1mh}A1$Ufnp-i08?}RBf`N1{|V~*@6>s1~#Cl;GsVCNpgJ&QYK1_TLXKmJ4u zn+io&PqMNxK4*M0VTD-p?UuF_0&=?qTe8e|_08OW}u>bLql1PtgGG7WoSi;^Mm4j8#oqPpNeE6z3J zy}I_Q9I*}0M*cnF8_d)PSP#&uB6I?J`|Yu0VN=-8e%#kAH1aQTAm?~EiOaYLkT`)h zGyEPN8Tjn!57lfZKI{Ce-f%Ym?g7=0dMOg?;^Sfq*SA`T?QSjx(o)uazAIJOi7sq0 z_TUFI!oPx!Qs_jSNK@Jz--jbv4=idqPKdNLcL$lAN2-RBK@my7My zSVDr84ObqwcxWU!uV|A62xxsjl=I=J5)Y&}8@mgd4o51x_R)bI=FLT0m8f)^j*~#F zaNu}7cg17-G&#xw%6bC$waKSSuG&b6m`kFS@6@%x^6b!o=$s&ODm&sT^_bvNuQE55QsOX7PTD-6U@tUwS zV31gS>3&>M`FBkxtyZuuCnyBw4dDns23$7CaO^a+v1h}>9R3)gmi3Z~KTDNm5e-Tq ziiTM{dbf6~AXKoU_{m+Rl$5+AM{YtL*G-`6k_*Ovz>V2Xxucj<3VcgpBqi zx-}g#-f`E*EXucOi4b4{vEbNYVVtwAH{1r>+Tn|-RN(pA2)v}QG-X05&J#tZD!>XL8+27R*zAuZ z81v!dgI8|SYE_K(-kd4bqYA0)O+U{DQGk(C(7125DI_#OkK(Ye1;jGfyFj8s7WLZ(YB2kd70+eJa zDX*<0e|Q1bwUp+pQpvKrBW-}_Q=>AvqE)p)N}>Rty=04eONgyBsohE$!|$GNAVQgz z`b^#Wps0i&rcvo4Vug&YCX;qxjhdKCa;U)2My@aGHdv7Zt`21T*Qko*+{oZi44JRV zqyU5L0yjU)o%*_2-bMMmQW?LZRu;f)9t)ACNwfBL_8>*V(b& z#qW1jX*M++ZC=c#MUVKOL;LC@%KeHwv0&C#>@)dft_2z;B@I{WMv*8zV_FzE_hAE$d_{%$5r0n+U$ z6d{TbVSnjtFFG^BTg!aURz22l#AE-b(KI={^WS#@E9Xn%bbr6Q^~XLGmOe+dzsl2J z-p?q1?_iYvl6?U_?C0c@C5blw)30L#7j06UGW#DR;df1aWt#^xIBzI6Tes(dJm+cZMCU?w zf1>gF(!w&74NCFu?dFCOX+GL91#4iK5ZVuz?Dv=kc_sv9;toq2F4 zV7k%RW6?q$4&jZzX+#C&<@&%9^S2Q=3<+KPA43cssu^>kItIUfAPkuP2QszN(^tXxfGrAoh zFzOy)*{T}*ASLhv^YY$t9Pi6avM~sZn(hluGKKMi|2((BUv9kLY;n~quRkeKBSzt` zn-K+sbPC^?d0sEEi+y##aQ=H(%Qqv0J$ z4vJv`Q{z&I+w1QA?iNhvP=d;mo8cj$FV)XkCO2r}dF3m(aQwfo`eb=~1q}xLfPI7O zpeBc5X^@`;eA_br!gxoxY1n!v>c1plz$1k%kt__p8?}7+CJMjzii-H;t2A$)xTHLf zIDNb`CqDZ&n7s5#HhbyQ+H^Cltv}jZbrrpsxTzoXqc>UyCKQe6yF{mru(Paj5aK8Y z50HX`s*CX^HgqY)5lK(0jDFSmOVCdizB1h+Zz2a=!a|R&+O)!Yo>=2j?0o&ODOcZ8 zU{$_it^Ob0MPJ4}nl+~90_-?H*2d3o@~`?YWA5+%j<^cp2A3;g3gI@Pz`G8_$r_Hh ziWlMGAkMFFfQGT=ouN(aDUKb_)6SK{!}myX_P4?Y=(juu;+|PU&!d0aAAr(n`_~R6 z>Q0-@M(n{A-IqPeS1!-5zdg3I^>uw}krQc1`Y+#=m4Zly`*=)+OUY!cH=sDbRGyT< z56V-ylas%cW}aX%iXgfPuj>6 zjyHYPHU8BSuH0Phe~V<^(7)PN5-1RNBMW0qjqnVWd~4tf-(NHKq(5$q-YvPD5S+&o zTa?kQ>JyOrc?wB%J>1>;SyCBRpd9`Wy52cBviM)ujWe-rYhv4&*tRFOZQHhOdt%$R z?WAvhd!K#l;+(2g)vMNjUA@%r_x(JNO6R`m{+qe&w0}0EHd_3kqj~=FyX#M8#%~#k zVGy)xNN{-z`oR#!!*2H}p2_VBne^@SA$qGEMQp*#*?pda-}+HXQB2}x!b1(vr)Z)$ zeU2ppHhBD^|LxpI0emw`Y%sTRFSO7cp4~XB8t{Y=lez2z!LC-obO?hq-LWs4EiBdb zSZ{oT(_aMP7@oG{#@NihC?U?a`VSB2636CnS%;&aD~F}ga8A(%Ee(I~` zK*R<&^|ww`6=kt>*BdS=o8?7^htlKF7!I>kc5FT2K1_b$I*+!NR}nQfeh{d51jMsJ zcMJymo-mcAUbuk~K143gvE>B9e5Hdi`10%*aMBOulHd)b!N5#cA4;?T^%;u)@iiuR*UxMi%SJhZAYJL!Tm>u#lkIsC%% zWpzlPjk}=R`jZUV`;jI8GHn|P#|PMt95lCx@qt?8QG?+H7DvzyJd(!09AG5#B^)^POb;2KEQD{Rkt z&|c;@(b-E>4CHacaNi-d+Q;n)YY)1BPLOQCtH{=`4l*rX;eF^j~hX?*lTJR||pk zCtlnh%Cl!F;OV{tp}U#>+qk#2bL&|YFr!Ba4ov=SccZs8#yNLj(a#RgKyesM)&4i(H;O4mi2WOUXY;kT^!?#L&NYMdHJeYC3G^oe zCbAQYD;0=u|03dlaxBls@jjr;%}4XZ#z8Ls?$$v~@v57BKH#nYVx81&gYDR|RB&IS zj1s?Ix@W3fG;3P7E|=lgUFd4{E&Orhydrc~|Bc^Ci1sn*UAM((djsO7R~_UV4+p%R zhUh<-XsgBdi0(eP0S0;P+0Wi#U8pYdpYmdZ)GQtlhRJodl6Z}0)X-oD$c@{%SsL(gZf8$Vp;w(29KuB6LRAG;aByEDSU;=oSr(%OrWn=?P z0Zh%HEP#Wr``nd|KCcs4G2Dy1#JKQt;??U1F!-k9$rB?)x)t+2(0RqHzHnb}2c&Z* z&=?P%MFF%NGbR`>5`X)9(tc8Dp%3)G11(Gd!+!#;Q$lF*04+2x4O-U?q3)a+q|x#; z3(!<4OmCEMF4Fc*o`~)7m3911k!q5sNB05*f(?=Af(>LgTfM-$K$BE(I+g?UFP3U;RbV9tuXu zEU?hW+)1ffoDx@8DsY~8e7OH-riEeTq$CR#hN{PIKhjR|xx;AZJ^kDg(~32E$Ex28 z%>80V#1r7a9s$0m6IlR1#BPZD@Bfo(MYTB9y*=)e((A-sbyFzDf~1!xwVmWWXb5Xd z5l8b_Uh`i1)A_sKE+UCZFPQrGfNhOZqhg%!F0Xrzf@4oN<+CPe+;{~2?&p2M?n88a0fG zj+4)6UR<~Mq6Q+>ZPG`KF2k>Ft8Q0jIFjTN)&v^~MCRzb1jMUSZ(&eSiDh}h0#rZX zf~VWOw}}=*L!&MAv~=4xn89fo^RU#Mg$CMK@lToEU=ItLPc;8%6&+ij9jQ!g38+G< z<G9P3vkCTIr4&n-MeZ_UoFljUe|%BacrgY z*osw6#3=0z9M97$ehiUd6N+Y*6%VvaBZX=$$BCl-rqA4}W?_~{kYx8YnVu3V*M>i8 zaShfIkp)vc%E-~2@FeTboKh@k=74?pa-dh7*n$}#1&R>Qy&++7*+H}JI?^NN617b2 zGgPdh3W9AJgSTsynS0F{rG8T?E!IS0Ry2vS*&Ob zDZuWz%uLvAiyjmCQIV@Tt5upR7Z~hmgB|S4;lB@?*OM0@bhsv4Bt2-?$wfMCR1;;e zKzTwGHz9xIyC8<~MAphCXAX8b-0nhVPi{-7kGi$0*$AU!ZicCNv1j?RxJKnG&(wBm zYRyBUp<%ollx|B>Ht=@38{tn^H)}tF1Is0I{S}<0z?FMI4IVB_!xO>z({d)@Me4Kr z&A0b|yTt(5g`(qgDMM6!-Zyr(n?Yq~QoN-^++?KSES2rH5vZZ6n`AZpL!6KW7CW&v zdy9dS0CFYc8E!F?J7gByKmfbn^7n6{ChV}0gVV-De3Z;3lKPQwgU7eVHYIx=@N(q& z_#G~o^p@sFrl;;9T``%TWhKR@iU7B2?_i%CQ>yO}0w zFFDCx6G(b1^#<1z)f5{RV7DRX(JAce!b_fWL3vT7iox8)o0c(C5@z2dyr~GiIp~`k z|Ev8c$WSD6pedmM-9#4Eo1QqA0_;b1L+jW@E3B^NRn;-uI%$86gSoBMD^$Ms^cR2lbXd-oEv~EoqP|iQ-3Va_|o_w8b|3oYh`C^ zKdKL*WYLgO4cud=d*G1mfFcpDMDH<2f_m)7si_T|PrdKyc6X?$R3tjiUA)X3HdsO) zb_d`T9zYPeJCJUV9^nhQ&E`eqhMkmEjai9Rfdn_YSv5OZb)80F;&F8#Zl3n)@6BxDI}?K76+ATt(DAFh)q9p6(3}6Sad}&BpswubcBS4wYKM)8W86 zGemuZk`pSE{*&5*UdDt*3ylvIT2vGPkVm8vQshthNlF672)t2Bzyk4JQ&-$x74cj6 zUS#>wf$~VCJ{)7S)x-aB{wP%5ofhG7E+AqXFrT~fq2O4+RGGrH`CdINzsQA4NWWSU>C{>aF8uxHEiTwe9K=7K#bcoA7a*^8M2iBw; zSXEe9@uKuO1v4b^9wyjcwl2raDqGLBy*xdrh}oQbo3!2Y4~}e^!y5T*4TV<;He9ri z>9f0By}GPG?QdCF()Z({TS8=3B>Ax=^G=@srRr!mW; zuh7v6VZ)gVcsSf=WlLS`iyofr^vF*Y#}Z(LBOHIVyT{ayb#G9I$41CAnl=I&D4imA zVm8eW>-wO9b%NQG!F;bln}n7sPu*Y^D7Vb}B*0YJuuDAl_rUB5M-t|+fDLXQBz(q& z?zsoyxzj_C$Hu1Yo_HR;h3&9n_USe#T4wQ&Xb=!JNV6h0rbS`$5h)2A)>0?L{lyO3 z^))%v9yl6p8(n@Nb2)uak;(22sqyNR${qkO*T9a_B8oyT0u75Ec-_yncu4I?%%iV8F0jC4<`Jt*qXl`@fbcf4+rYECqUM%o3DqVC-DM=S~rvJamY zDzFJog>2hdsUyL;QjJW2CX6=SCJa_zM|tBX$qLMYpZ?EgEW3F!UW$5g-j%sDrs2hW z#Zn;jVgc0XswLJ1y2%;Fk>-K3-E! zRZxy)kZ<2KfbC(7>&Un9DR|`_aP~DIT$g9<(|5|F`Ry3+hV1(z_S(J&bRPjcCjc)g zfE!xCj@EZC0Tn>6x?}PE>O@<>1oPn-xL!A2AbsLrs6FY#S0J4k9hwqv5;;no`%JOK zSN=edZanz@a6@mnIfR#a3ib$EH>LE!ZCJ6w1C!&&bEVN9VJLXcVIC^d4hb^666vEI zb2DRCIW9OOZ=uRUIuyp#x!CT$1Vz~0s&mPKp~#F^ZW~8MN!w|TwzN1H4+A~AMfJ+y zfOJU2yKbQi3w_{h?HQxNqMzP)6>zVpU;VHb@M5#LxDH9U)GA^3E|k@6=&>jS0>%!Q z*P^1=W3cfTnAo9F<6aQP``jB-udF*tFZzWeTT*vRxd|=cnNL+q9dJ8zFuG z_Eu5+@KvbfW5_q^Wf zzvG^kdJqj5&k@Dx_I$!)^I;SoBz%te1twA4KicAev%qY9LnCBxapRWN%tDVpTs_p- zTNUMU$v=TIUz?k1A(&0Go@_cvybtH_cu^RYaC<`IV~M;s!H;RkKqevXWLYa3XB6d| zz7C#cu%7w-^7D(o@BDn00J_iq@7*tncfINsWk4+3ulUQXqS4o7oZ520Nc>`) z{P9uYRlmz=38b0^hhf*0)#fSjOvWIv5CZdBnB&bLt_7cGkKH@v@RZ;6Ok~zFIq?Z) zXx0McYOxU_W5@NZH>G$2O>Q77I6{N@zO7$r-(*?2N+su_Q^1=10r(E5g$3tgRvRdc z2)J&2Au1!hZv@hAV?m#Ao0vR@!zOC4{L3!jJNMYn@sik=t4Ax5uKMA z^i&u{&QWZWJ*GuHZp6DPi&L(q1{5#L%YY`r=j3H>qMk`cbO$Os&%7*q0tUxL(Vg>Y zbzySXsD!c@3GnZEs6uW=yN~U%lNk2BVAlpwl7= z2{SQFzKbX$4JEEyZ4iThO;gayp(e}OGr)nsjt9s@ZtaAaJ3QCswCkN~I#@f-(^6IO=i!@;m6yIudcwMjL zgZu}QRX5-y8@!1nmmz`P1I~rafPnw$NWTmME|-R#@%o}^#o_!nt%Zj)UV@nY zU+nZ34fuvt1{F?Wc3JHaN{PuO`THtFA9u#V5x)mJ7rEiI31Fsoj?7;1Xz%#`zd`PJ zFbjb(qD5qU(Ebv<<%tmSd~Kq`3rtwjHayCKR!|4&?vYmIZgkYQxTPA{ouipsedP2A zkp}G`I3ZULZ>S9@m-N7mui-Z-(yk#ATiaUUtE7Q#;gEg%1R;N8CR-dHb|sWf>YOCQ zZBZ#+JfCR2O-~V`#^$9+&B!P62g(T#g-FKg^Y723t86isqIEgWqKMY2&4-Hw!gAAW zhi{{0av9NNX;z=SFcImkxsaO(HJOkzGt&h-Br1(8ldf$B;Qq|rScEm{QiAPjaXHF% z@2~Z>hGg>5ig(P4xjfAPCF^>i<_~Fv+YWka@BoFeH1Wz7mg(M#gh*6v9w912!~!|! zvM8CYg=DHGwXUCy3MvXcw#I(A8@!S#(dk-8DL$e~sY5-EhVBWO*=Y~Y*Krl=zB#r< zXv=Tf%I^G{4k96t41B!%>x@n?csuBBJA|IUxT84n74$mxJZUn-n{GIeKKtTan$lt9Gy~$D#E#ft zBagJ7DA~q`lprCRBBI1UwkK5;>aP+ETB#43(o+?#S|h;gYmu+yK{khEMZdqDr=1-9 z*L7wM+-}}ma6oj4esNnv9MddBVLcwtt#X=98eFAevaXkqO5IZh9Y!|(f`w5nmB}<$ zrRK9H(B*(kh{lo%w~@ds=+h{QqufJQItggLg`_jedXy3c(g0v(w!1-Wk5uA^`^#rm zk=-9%;J{-Xr7cMX0$uupxVGtXONn1;BmpeRv_i33RMDv`Umh%lY>mQqCaMz8AnYvI z_P_#ZyswPt$vs?NfF%;JBS$hmH;CM5bda2{fDfL;JP>kEQ^aYp_0g7~q}d^i$!5hx zn_<6PPCg2-qc)9*DL}{f;P3@y{5lVhhiM>!3XF$Q^!zP^UWaL2@w+B4s*OcPOlc)% z!wY5Nghwq(ab!OL;=ZCipXP~Ijyq$$2xTPE%JVpX5jr%Xv0$IqM2NDnBxNlp65@5e z2fOkW~E;`9kn*K8Ue-7dn!dL5IN-{7Yi|($WbZr%K>_hHP#8-StJ323D^lcuBcOtu^GNZ zYtEyXM@F<9Pj?`0O44#VUq>lKX*6vU3i}zE#fGOWpWGjbN{0^b=g0Djr>nn-uA4q@ zh6E4OrqvNMXe2l30+SGf zFN|kqn3bQ09?48KA;C4C7!HLLF7dhUaC-3`NkWBEr3Bm=uJ5tOxnCN2>7Z!@;qXm< ziyI902_qC`0=%s*EVrbj%%Jz6Q@&Ce(YPyeRTRfLIU23*x$yncKvuV2FKvyf7foM8 ziVs((XH;sX2JNl~n@N>sX(oI{3?=K_*nfw2 zcb$1~mnumPYQ@ z`RUa{L0=4~fN)T6iw{LJCD8cU(&j0dl`~u!Fbo|9MrFn_jfnY;+@6?||6qCTlLBI$ z8e+0U!KZe2J!*Ib#}^UCneq~ODbQ!IPPmlZW%`Gk$cw6D*5-?0W-)&^Ny{|k95$Y- zSGxU^N?{1LG;V8xt)w$-wG5C&4-gBamsiBMJx9LY6M**=z!NRt0wEdD{C%(lsHOr1 z9qmFj%tOsfjBLSo3Yb1gN(O1e{(RvzDOUDi+ufxGIsVc&6cpKwIzykL<)Orl=-r75 z7=J+Z>jv_<&kSN0mG)b6uHr^)U?_+*3ThnXcn$$3<4&WcI_f7UK4lO^v}OeA4JVLm z8aPhf>?DLm2{Ek_XiVqel(cuSf#bzVJO(FM0iP)p2Lw;Yc2V4pE5}x`h0xDk-;Uqm?+slmblIgWh#-$gU*w{Z~W5b9D zMhKJ$NloGgw!|71&WsrMEuo~jv6_z%PCQ5Pl89x|QD& z*;H{+!!%EhKTOg&KlirK(gN|ryM&&)JII5VYEWUPUnnmqqiZAax{m&>jW})nR&9Z* zHWPpx4bO|ZUo;(UX2|ej=a1boIXcvO(G0b0P*PKu!%DMGAEuI4O;eLbf zd!q|sbd3Jt1A|y80*xt`#@-kreouM@8lJ$_L8v>!}SfcGgr=|D>l}~ z&^O5J;Pl$)Qp?Ef1(2M5MGW(@Im+_ap3)^;}o5L(`OoyZr+9YmGR18*fK$$LxP3A(J;6Q zv&rk@inH^BS`rn{ueaq@jL4&P1Bc+l?Nq>pVD931%!09D^OJ+nt?CZ2JtJNawC)W) z3w*}kvVcs2%q#Npat2H3|Vu9 zLTsRo7K6rlXM?Qq-0F3fm778yk1DUPNfwqBU{`(XJil5gX`1+^b?z1hVZs}tMbqPa zkQ=d>IzW~eWNRp7cG5tStbz8hQcx;2G_M7|R(w3J_Gg!pvm8D#wOftK%!_m@O|^KU?=ZaUhbH=gmU z#C`gSfhrc$S!J$NBJ&KSu5~XMkEoIPpFlG90K5ah2<2MJh{i${5Z)<}&K9?sYSyE6 za(6ZTM{44)EGh-&(TTRx_V2hac+s*8>E(`-`E3K!%@I{Q>7IySlHOjl@42QE53wf# z_&|xz?7~S5&{Im#Sx$-2MuDQ^03M#rP{uHpS_U5zz|kZPZis&l_cA8;TwH8y*RX)) znm;^*T&xp=TXg>NQ~vk)+D=LIQjtBDC{UpIiiBIdnPL-z;@k^?9&lDky(zy&%%S0k zM~{TL++S=D7jYve-%<))pHXo#;pypsetcVK0k`1(R?T*@H?&8>W-SF66d9Nsm8phm z^~)|s{_$KhN5wHMD8JghQQq(_?qJuBurvg`2b6IwH)54D39Diu{>Z^+ZKU$KNg2V| z2E`1=K4uk=DbiM@``~Fq`)~Hq%NhJCL3%ZXu=OXj=s#&w!j?l@j%Ul2u`7tf3?d&d z4DaZXyGc#>z_ALu+6ta|()fANP(EGc#*i8VE2Vpte~wo$Mx@fiHNS3sk1-4SCm+T} zd|yByC&;xQP2e2_uD+_xc1qlO?HMO6HNpMSV7vbAK6`gzT|niF_8Q>Kc~et zx5&ab_Mi=}Nht3@m_HIoC#y>w#)1iKNQGjZ8yhShJFhpWF(zXHo27@pv%#5#0f}mc zMs%phL(xmEfv0&!7G!LqvqBJdgvH-Un!h}*ZV=IWigrbOeB1YjK&c9_Ktc0g?#5(B z?C+={d<2yZuHzjR3~%WfSdnPjnchJmR*QRJsaVE@P(LRYMxnfL^S=92Lm8@vgH6;# z86VS!z{>;ZzpoDCIv$`d^C8Dt+D$04WR?mnqO+JBb zI`KCTrM*=Bk*y=rTj|6jvu>ssCZ>25^^_lTq-+R8Wk0AZ^?402G#_8cb6<WF3#miaat0y4> zW}Lo~dvLM=L(is*4A;qweerd^X3= zsw2lc*v&FxSD$Dl8GqTI=DXs+D9f`o!j%Up70C9y8|1jZ7VgO+|8XkCmL{`P8m#4g z+K55SF*=LVwMNYf0J;z$rsc&!N-CcxU$5%cqN zXH9`{(AzRxK==pKfy!HKn5h8-0TZ|**VQ!6A2*}Ms)w8NpKBvDKe{yL;DwYY2+)m=*vqJ)`FjH;Ce&NrlOsm9tLe^@WUaUsB*`Abv z&Uu`PSN0{8$K{U~a|}9;QdD zpMUkmV~x{`R?yPR$0=$&{9;NDRQl_=d|#w7x1;cHq+H!bjLXsnyT@^9>KTBGFB)mb zIXo`{@?t46G2QJ}R!!@lFdu@9>3;1K)5clvaZ=2HNj6`&f(o<34(%Q4zLq&h$^;5L(0TFkerHTlOYwJfFZ z3QY>{2S@IyG96Hp zbczGaW{vvTL77lWbq?8LXVH>I#_=?&dp{Lu`*8jSTiso&nV1O8MuF*6E-OJ^PduFC z{3s!DDC@%&Wsk{1=q$6jQd;id=p9?(oh>-XPN9-C+iN(i50NkL!rO zqjwNe;Z=$K*t%sSZ%YLjeiM+5+g>b4Gl?}kFl4z|{0T@3bp*LS<9tv2qg9wuIa!K7 z*+5dEoClO}XYD7P>5Ru1GSo-%D?(_eXtDOo3X8Du>WA8H0eU!R7_;T_RXRB~f(KoW zup~FB52S$IX|A&qXIzO9d{(U)Tvgs!ytAF#p;KwnvD8Pwl51XaC8=z)!w9x46;$R$ zZQ)6_&10dp9B^DrfkSfqk~uqa;@}oL6+awhYS5f?Bwh^+b0x>p5A>E*RF!lLC_A$=bqk zhELg*&-_75jF5)+V&vgz?nn+)zfnQ(T=WMDCig=S3xmxsrpj5>Y|&hlo9Uq~i`Mc@ zUnaKVT9{ecIX@Dq4AMhz9DkQ%i9D$qTCMs_+%XOQHvrQ0W-D`v%r89$P`qw&y)yqv zU+p~!m^WYi`BUCCw4?P^+xo8eBgNZ3_w8N+cvJ$OQ~?*_6#$j$|Cz=_QjSpg{p&_E z%K+)Wq&uQFttp!PKiQh$MgZ-r>x)=%#zawd1}pegeghp?!AgzPYghjQ*uueBoucp6 zOYLenWF+ld*j=Zo&RuYJ=6q2S_^c0QLpJKkqq4gNsoQMirAT|&?Khd@G4IBzV}(Hp z*MTScP#5GByjjNk5olUJ3cd#VZ&ox79X>V1k)yg`LmCkUI5)Tyv%yJkt99+tV>Ynr z&#}^e4{Lw#v7ZGv6vuhkeYFL^d9$i4z_RDem;T`eh50|aYD@&JI24UQ;}^D}!l$jD z!jK?lZ@;zsm~X8zLBjxQ=`p~Pg#m}hUGT=5mok)Q!gxk`a%J+B1Wr9?xe*jzE(UwO6Bkt?Q7w!Xo>Z2(C7u1Imr$^@XC zRe(XOt;q|(`}M>c;OpO;J4zFGj~^EYlR<7tBb6>cE1=XVlW_NAecs;Lxp7z3zY3na zHPU=$`{sx;vb?++?t+mV#Y_Eb`^N`kxVv_#>;NLbO5r4;)!^{!h|AD!U2L<*ypg_p z+vcNUaQ-9>6D09zecWr`Y}1cQE3luN-h(%W1s>xhJ;R3;u4SENH7FyI$=3Mf~nh0`1&82kh&V8p?}w#|3Is0Fler~$7yZ%=<@3iv3CwBM7yT$ z6ZG!w%K{3pW6SpZ_O|2Gyj-PX2{{U|yq&@M`F{L~qLv^RGfn$Z5J^ZLi8#o6Q`~ol zVvqX5z6m=U_;pZy*RwL;-|%`NaEFEOtDV;N2&r5jzCTBDLVw`nR{y2B&h2gtU~t0? zMw@MMQAcEQ`|a}*8D_Q29Gl4suoW4mkc z- zZ>76gSwd1JemmOxs6eD&x0yH+bv3^vl5V;Vk!nml|ATPOm!k(V_pS}5G4Pec-m@kI zcE+7QP7uFu`_R%E2=k{AkHW8H@R&_Tmb`CE%(-$jkL;XCbHn?IPU(O=FzzA8`eh(1 zenv@HwVsBp1XNyPp3kGn!#KpBESeRnua|avP0!0HNRA>>+)P@i0$Ha^VRN9Zd0L%< z3YXyUSwS=igf|e?iA1r}j@{cNhp-$d>=3z$I#l3(b3cpKeZSY-#Mt_SQ?F=p)dsWc z0~66Yc~D@S;|~kxoh-dRGY)GXQPjk`=Md0C`njn^Sw!vQv3fxh3^gCA$@_-s*Hr|= zzwQz{K-!?T$6QLIe;CGd2d(u#^ggOg&-Y4GyQN95*lfg(;Y4ZoCXA91)R97}36)wQ zc=L1%wK>r<1EF8@m6_Qev$pm@?%q25+Gu3ZKDDulh~EG~-2EB-;M?HqSS_#Pn?X=0 znTn74MKD_)QWgveVX{)ixC@>6cME+bw$c*4!Vv?>HQ>uljOmA5i_GMM(=8I5N+(5N za%8ir@vzNVrFAK>2yT3qYVssd9X^08U@MR-=Imq=wFE+Z@BSKacN~tP#olva%%G;t zAZvz<#zqg1!-;G7cPO-G_;iCo5+(TY*Pz+)0A9}yWQY_*vi;s9d|_8~dXg zx8QNgE;B>N=kzU0&GAsuI1snPh&E>8^#@ZP(1t#U@-^L^GP>xoOWgDG86yJFZ5mlX zUIO``I6aB&DL6PHhj}_8;%%rYxF7yRzr4mZOBqkg?N-li_L#8$rko%VK_+}?p7Lo+ zD7-XV_4*4g52C|x9e7#F^;od_;sg=r!ZV%O-zd6&)V~^~gK&Xd;l3CKEPKe8u);7Rt>j*uwq3ewyZlmySqldcBGvC#amm& zg^2K^GxQiK!^8p^pCGeGoD$K#*id?{0=g!axkKdr@cbed924S$slT+dm@dhC^Y5c^ zA_$i<;O5_P8PKFE#hy@&$G4e#=zXAFB_fSvAN27i`*vENrYLFeXis(UmJ zc<1dxbYp!9VrWZL!j^0^xRa?S&EoW#O^_TvG>)dSqrZJd~?efhnWP z@6k)zqh#PH=Sotn?L_Pzur~lXKBihYTaF{bwF@o{%X%~uAsQnGE3E0EO__q7jr!m6 ziNDBDLL;;}!qX%wwQ2FYhXGc4gd-7R$|B?@Ejm6;NbVdO#tenfFg$~kRVjJMWh%{L zxCG>ilzwZF#lQ5AInPEzKZiwyHKV5YHzOlFm5Eew4b+6qh^$xl1+qwfahM@Rv@H`u z5;G*QdHp;BonL<2GF1NB_Tp4W^$p^^%5z_zldOj3KlhVUaT4;mjsmfuE6hi8^NnXj z{RfSr$wLH<{FM$KwWTzyIumsjahYo)pd=_vjOp zk10XDL3_qE@@BMVV_m;2r<$x{H2OSF6pFr-Xpp?|J3ld!b!T@i9q6#(foGgIa2`?y zsaxl^ldv{Ra>#R}c|X1f;jP1Dgoa!27=bz_0OD0{JGndOJ>nT{;3XT?HWWg-gy^^k z@u6y>sJXfKOLBRivXeD}L^ee+n!bSal#$s0lsbq0#hCTvJK|}`9{}sS^c^&!hS^5l zqiv??xvfZP)M;S~qQ;G=wHp=FB0#Df^{ zi$2D4FmYuUg(Cl}sjYmFyKpffxV5-{gMayonkUFF5dRc_Mt|{PTT$iJImhuR4vQX9Dk& zdQnU=b2*CZfPlX4s0PuQS>T;I>1q7B0q=1X#dj2442t-KEzCuB%G3=&RW=ZU4`7=t zn8OJzs&H(GEMNffIIx;>Jd~Rq-z9T*9*DHoC(*2B=~nU)XUzLorh>O(IGWkyD-zqFlVy#%9#py22D|xuhb8T~?u6a9FY4%UGjStE)07P{EmFh9M+)>r96oh??RB%2ImgTb+9J) z3!jRs=<@4#kk{QHjzh^o%Uhd3$uy7uFF#tJ<0vS(@D%}gw2JG;W?!FWDBPERROVa| zeuPh}j|=6|NI%^KOGe6&pNbK6HlapPKC4*GLwbQDl#e8emxDcS%&@^OL|6O?%5=;3 z_4{|Npc}!h4DJ^Q2Q=W457d+SO4bXfYw;#6Q@I}$H?*$_odmbhaW%*tWL?FN%AKJk zrvHu7c=rA{K?2N~`fvLm|BBxzRR@2vf(mzdUbd6Ql^=Mpov2b#9E(kaX5^3z{tBAE z?{Pd1?ioQTzim)C3Gm3>-#&Kp*sWF+FHdEQ&|40W!m526r`4;Zx89>rT#Y(f$t@59 zMe)LfB#?j|ArtXAR_&u|%hU+Vb-%fb5n;Xp$Hj{Ffw>|S|5oIcJYji&9*!-LfoSuL1TK2k(FodeGGwt|6KJYVKvFEo zjXEx&Gt9Qf#m(~M%s|${wyW%^mQNJ!8Yb$A7J1O4D1sa zDkivvJY?~VOFv*17Xw7T@v6Nxokp~lMr!G>A0m}7&)Bf{q*V_sga3NufX?`!<4=kI z28Y6S_s1@w9!c^qc704LRX@Typw|=(mCzgSUEUWDN-tdM@68fOCu91SyoE;qlbw;@ zB!s@JI$kI|%k#H&u!hwIUE8)$>OfgJ%ZdZyRr1>X>|0VgmG0#5Av$o%0LInbd*@*2 z++0P+Q#+Tk&4vh1eI>Ox59Ef6v-k(+44Xqx`0O$A3-#@(O|4RD5~sl$xCSG*G#zFHK&{|rG`O77(YKO6DNxQ82E(|hB8Cr=dBT=asv2eZtS!gx(uY= zu3yj+HI^L7Xz>;Nyn`68@0Gl|vWTk6Btsj&xlL(?dpczd!SSHTi_4N>ngt(Bg9^ND zA%S_bX*5*9%-FOJGBz+m<&q5{ELL$j8dLuU%Crko|Dkipm2&-M?6uAW@zbA;B6od+ zXBwlv8dQ!vWwS;S66F=-4mlqy;Z3(MMb~_GWVAXC=8%CH6XMzl0rmDK(&shx{mlqE z1}MoKU9jIEA|O(h9*0zDr;l->Yx5RXvRt^Bj44s}-b?_?4uqZ$ILac}kK^;brsp%9 zPcom!#xcrg6b{YEK^qMi`e7O0@y-eSLvyK!r&;IJsG^Qae40i?wOW;fBhg3&(+ldR zLfXb8tI|r3Aw*`2mhJY(f6FQNvrhR8ON6JU9u6$vdB!82pg~WD&j8X^Tlf3f>=FDo zxKJW7-TSD>-6lB-yMjSUoy#13*>X`|$Eoj+y>MEW{@oNx&7y*#F^tAn972#Hg53<; z-DH&qSQTAdNLU*zyWN`pST)9aTOS5SI`nb+G32`njfneB5?I_;HG+OQkwudTHJv$I5x{>CO& zTDKH&gW$1JCbF6N6=t%T`?~veCqwU~B%TE^qa>b#1s%ovs4o0hl2T*X!ihiP)=|*ucei6%$D4OpOzK!63#>^pA02vbSc>Y4MX_iUHYa1 zgT)-CtM0WxJ6Ph()8MrEr6O^)$Sh8Cr)=*1dUZ(n1&-Iv<0K3@!sz%|8c8v^6Dv(p zCK&g^w$dJzB)YT_8VqZH@JOw7arf(UWM1OfRPD9V?<9X{5@$b}m0QH#POpI3XxiI! zOSJyN=<0#RQFeCF93%?c)uzis-_6@4MB`R zO?U?s83Lh@`QX^Qa`1rP5W{;PWozM((NAR|>}sr2Y-Q~eK6W*75cOD!{K7m2=Fkgv zMQ!kv<)hn!1>O-phoTWle$n0~N+U@$AKE zWL7?^H@FdBz~MTRE82JRHa@z=vpcp21B@aT(v5mYi+E6Nm(h@p7WD=U`QeQiGQpdL za9Gl_+B37=YX)C(5OXvA8DEq4DEFJAf6An6k&t|ZIdL4{#TcDEp3%@v>U+@5NBEC# z&mR{dAVc^2GV!DrYXFJ}e8CezHFr<6Bb#9RufWDb$ZZMQe+SR>t`Y>)gcNbxT zHP_xt=nOwKR|n8=k28jIiYNO*oaY~v;VRrDhuPdXh?=hnFL?NgM`7k<9eGe)aO)Dt z-;o3lJ1Vr1?0fPUT#p?K2!sO6 zE_(g`8UG=V(k%W({MBx^(~EueGw7+GHVz%LxQgp>zpL)W-QU#j1_6w;Yxw%--pMC*)$@kxHwatm_gQGCgG|4OZXE3^ZDill5n!%bbwSx&)2kHB zxcBw*mDXL)@`3ZIf;u*VO^WTB8GKx(PP63BOh_av&g1UV9A~lWZ>umT_&=S z2D-)^;%1Mze}jbYe1%L}CEuDy>eJ3207Oo_8L{>T|gGC9icx=wZ`2s z#JIjiH(LG$joZRstOO_xTE&Vqb%|&bj*v{JOppr)m5WcI$rR z(zcIRG?P6L6L|VwXrka%;X5YYNKIHh%TKudif&1%A$z03Zu%g+y26P_r3Bz_{p#4p z2S9=`uG`MYybodm$g4a_fyg<^Un}gMgjBs%`W8{k4G}&< zgeM1C$!HP!5T72&v+AD!XQ6}Kzv0p7t_OioefUSM3q z1vH&_g8`5@-xmN9(X6y#V_{@k;R{IJlPK*`ToYY)Dhw?WcO=5{+n_^y3n$6RB+3~z z7Q)oNjwzjkD2Am@b|rfFQ_=&Rn*U{{xt~`YlkJbQJ5q?%~jwq zT5|0*Dp;Uv5dGEMxMPZj+1z2^|y z(4PEcHuDUO@MGv@H9-V*7h#<}FKC^wx>}>vLbSH_4y6Sjq!A;LfA{lo{h)F5Gds3? z7f%vIVS;HH{4F0IzV3F=z+w5HKk<+f_<5P!qE8;)i$x2X!rh3T5lzSKEA=jaff}+Y zQLrj44D3UJ==u}|9n|()6n!91T4HN-WA&_2Hn}}EW2G^yEQo6Ymqf39&eU_I390#X z@qV?~5?+f1rX1SO&h^yy*mBX&w&0F{q7$9%QwurK$b(qkfKH5qf!_8j@XJgS3Tjr6 z=G7N%=Mz2IJ~o2ZYuk$LJ6cGqcmU1H!d*vQbB7##h|15WBEDt#`ItUU^Z+j2-^1uz z%+vJ6YLJ>ux`!uU^2Uko%SSEe6M(}x;U*z!`DCfUHz`S)?Pv@(&?KHt1>F>+ui+#r zSDK#OVieUib*dw{&aLs7+?jQ{`vPM_JG{DCN^JV6sv#@?RMnZ&?-(;#Htk{ zv6N~hzOvG;#+kz7+aWxmmL$p8z*ABN!~1LxHGW~ryk_&|4f=V~o!`B{3GdUv(AR8Jniy^@! zAx0K;&k^2drxc_XtDzQd47JnCVsZ!I^oD zI1k`$6*f@`2JC5$1BFp@&mCONz#M3{t~oYp!c&{rBKLthWBvVv#^JXEw`WZ9!{Usy@z%2M|uzQy-IlxOB@$@51ThMk6k`1 zMef}dT>#4Y4{PHw{D*me1^&YoMh7i(QE;oUpjtv;WHc=|i5cCHVcao#%oGVonGJ_t zIu9M!mqFm@sA^^e4dVD86)I2tacqtLfj<`X8Pg6Af6bYRg|!vdy*Lrb@9G8T0hzAI ze7*vQw?o}usW)-L8Dr0kkSY;jmx-`KW6!x2e`7Lyg-&>nN8D*)B+6|DkJQ5Kk)y?; ziNRdT&=5YdpdDu}Pf2{m!!)z~+6?y7Z)x64dN*2gg_#fQadU9vB1}HHK#6X8-5&7T zE_ivFx)bqkDohPDk_9%Ge{z3lME$~t0j>F1a8}VwcfISAORRxDvI6Hru?MCWR~hp@ z&EQJFd@d+y*@0k(dJv9)>`OGx0*8eL1#lpiqCz4!sBcE zrkfyD+g8Tb8%&jj2|QrUVHNIlz7o8SGiSZ(q!&7(2h^Z5(R#FmbFtj?%H_R^U!iMN z={9ZBiJsajjJHbCmk+9J+OmN&_cG%C8IAcjAN-CCPJdL zo7ho7s>>w;*enE4*ARd~EA5=16QCMe#0+&FMgIUGzOFni%nyTb7Aa9U8%%3HiiXG# zox^i?x-KuGo3;yY)-<(d9-p)dW^)d;M=?I7r85&Ps`ATuFGB+BcmjlV1uCZSLDrXl(f#!7dm7o4X09(IlnNtK zflsCtOe0^!TP3%iaL5%)LtTKBkyz>tx}tZ`_CN4|`CP-K+_}=tS_BSs-8)fqL*#I$ z7jvyJ0Ls+O{jK5dH`hT{gczR$nFj7wtS;@go@{bXKR{!s*-6RX%TgN!bRo9=pmU6O5$6Li3_}9qV(UFfzVt7F)re<_OHNC9)))qb&!)%o?cY= zzzHD1^YsMLTI;j81HFIo)FPs38e3ZB#c{2!x%9OCO!V{7O(V%;Ueh7H$I?%p8?@W=|2N3CWG+qr?L>0q={F!(g&AW56iBv)#w zS5H3oyhqkvtZANGyc~ST(S{?g%T=&UE%}Zk%-Ql%Qi8lD^Y#KK?jqf7O>+)8xo&e~ zgKje&?pk@n1DRA|&T&Num4HuN!5y{JKjVZ01CPYCf=l?gUl9UfW3<8y=~h|xwt z*W3du7o2?K1&)3V^hw82mVf5nkwMUmn?I+pd6KUOAs!TvV|s)sRq(5-E<_qf5OaT6 zJ`|b1I*igguxK4h>aKlM+-etoTTh6InQh;v9hN-sbncolb;~ow0c=_-8g4tv zj1^cG+Kz;^i2G@*W+ZwxS0zej9lC!PXcofe40ula$|0Io$x9(xGk}W}Lzh_jS_EmD z87$C+XFSG0ZaONZl;Kh6^Bd8P2mA*ezqR*^tiBl9l%_gzoql-bMxp>JAtp><@Y*=0 z!%j{?&xKwy_+Ckwy=)0_p?><*>irZCdcoYWm}ah7uf^=`ica8=7Xh7bVeviW z{lFGO2Y|ute}pFno*to;hlYQk!h_WdQsFLz2DS68G~e-Wb^X+?!qPiB>)laSg1S1L z(Vi?^d2qYJu#SaFh5BB$tE#Ho>G1@%i@;G4%qbc>f$dKKne7D2y`=4@53*fv?Rvx8 zB)#N`ygqxrIuc7;F|2D9s6H^x62H}ICaH_7DcTObBn}Oo8AnLK%s0lmKE@uW-Ymxz z&A9^d=|QKMKj=d6T0%2ilAXZAQ%l)WVY0?5T^@(yNBuSk48Z4j(<~D+e+pv)yQPB- z3r+${KZ&+0EQgUofZ+{A*KpZ!)v{db6>i^`Wg8bYA6N#?j^U< z{ml3fdc%#zzBYfncTHwhchts2X0E~+s<36u(V?+?j$vES=8EAds>?cYL<5^fV zoSj_sdZ))EV^zqoj8@)hwxzTSUePScg8m>h?7(HhYZ$hK7kv(WCzP0>7 zTK*snr4^RXwqxd}S(Q$vr9{+@r=}$mNc~Fig$^A;^;Cu<-m0%mO-mt&d5*d1>B5!* zn(qAyf=0f*% z4i`G5pe@0N4!=vj;KfN{4ZthUCT1=BC$HS3)fVU3e4z7;mtaX1tK}`RhBhCUu&YL% z<8rl9Px4Way15@yrw3Dn~&yup0+}u4j_CN`}(}MC-|L z(yVSDm;?{a@d(N3184-Dc5Bx0*Y^vTWrfYWFspk3yTbA@1 z%1;GMoxAv8)liKP(^5K9oi`5lk5YGW3KMI0&OOYc62$h{@tt{b4&44&~a4T1zz ztnn>q9GuA+6qUKh$tp)wm1&T+ss^Oz;a{HbTdeIk5gyrm3d3(Dc~55M9e9l1GHkS5 zjo0jzn5aY++`Ge@GUNIEtsA+Rw}pK2tOWC0c~o;24_N}odq5_O_qon2(FQ2I2}9qt zf?22hU>b+1Firq-Fd6xodb57GS0_4^lGd+T|2AmYVN{j0)u^ymAuCC#H_J`nPRY!} zIADD}lg6LDPa4IY@&Tg=G{|jOP`S_KZ3^^%Hq@Yc&#|(n?j#uUSeU4I`Yek8VYn~? z^79EVQbB>d9;^gN+=G!5iBk|YRwdvYCEse4`6j*%C%z-Z2z^^c{g2|tXGX$QVyEI( z^H73vtJQ1N%eErtDJ_{-Wwx!a+>dAayvPh*4RsQrGHhi_Ka)QtP}0xL6?rj1+H^#? znuhIp_zJAPshq{gr}fM=2lG@tlACrA$G$aocH_xl;5|&KT(0_N30Je{l`JbvmgruH zUo?zEb)z7!96fpAsj?0HIFWf8KPeTwF1shEUBJC|gF=t*LY|Bq zV$tL;6QGcrR&nX?Clv-&e?}?{BiHe)UCg%Q@5VBlLCWQOUgGY?W7kHRhTNhlW|-b$ z>UnnaID~-1^tjzWJ%RC7SZ2h9WZt*pZ{z}C(6?$e2-_rUbp2R}uuZO; zE95<^Fxr%KoYr(uqlOkPj)L@f&|0E63Ul>rmFsEVgDyHJ7sib6rYY0Gd%<|YXKsT< zo1o^;ksqfXGMEpRq~=nQslr4C)MblI6ldn zZ$%mzr_NjM!`Sz15ELHaCx2d-G^a}mSXnS9C!|us{M(OVd3&y8SF6?)N0kv*Jf9qN zJHB_w49SBEWb<|zv8#wJQSrHSi>9EGFiZ1? zB^G9gb$EVX=nBp2bFtNFI#BmBCbg zmrY0z3h{l+ze^c^<7sWomt>+> zDonH*Xn(eCSJr1m3V_vf&~K6CmqF~Cs-xyUllg7d{hM;9(D*+ zWt^?B{|zsTTsJ^|Bw!CLJG&(i(B~+jEo_e`Q{F+XQIaLQNO&buG3Kot z3#lwmX~I4M{Ka*8mHAQ6+ojTdB2T!pDh!zdU%C3wZ>@wMpvB~gt}?)r?o@uvg504@ z-+zX-KYbH?wzgj?@$Bn?73u(6qKu3lD5Z)+(9R;(4}lG0rwfre7jb4j5((n3u0QU& z1B1Fn+Y>T6tnp>3OcBK_&VG1<9rCKD(yJ^Jk(Q>heegywOCyF!m*|WFgL9GR#Ih&@ zr)wTn$GgGv3p*T*(UGpekb^0W^tlx6^NOcZ+NrP-epMP^HZu+HY3?n#TAuPu4mOBI ztyKEZDQ;y+2N$?$?i=9_%9S@gr$Jv-4;WK==UH}PX%()pX)9~y^CpqZC7UNEHKCRW zBRI+mTbVGT%$Y4Cm28^fUJj31*jKE0KV1Zz>x8GmreJmnvnkHAyPacxyXDzO4P9!HU0(CAFlv4oRgTt)Ll&D1RlQ-Yf~}lTlF%rL zZiR6l8E!$WRU3=h55@ksxXT8I8B2c8W`1ZH)(1>8EiEiX7Kq9-gzqU9h>Fl57PUYe z94ZsUFdD|LkpQ93bvJlhQ6)nDX$F;5thKeavCDJ zjPM*0RIiDj!doIUwEF-GWC{0~nIenyg!w44+!axiwWNo;;;r2I5Nj^Fk8HwkmHVbW zlNHYA45J;ODWjM?K*N-Y{)F<}WzWw(R2b!pfcZ+FinU`i+$*RbBq}GgCwN30m!76o z*zDeUGwc?cgJNc`=Q~ZC;&WLmN;oTXx~85qlzJAMcVlXQ_IYrHJ>eQMyQgI_RISu3 zl?#xOvre9t*eryhpII3AkElmcvc38DlL~v5jc^Kgo=Y4s6NG6}x_P5eSO&=Y1=TU*HFyOTWe)@GSedJ3Gj>?OF-!Jm7ms8y&;7OX$*5FSY$j5+JgaBJ=C*! zwku3;oEwL?NizQV8PSt_YnRV&Qs^m1N)kv|32O_=4Ah8|_WU(bKETDEiL9wVfzRUPw zj7J&lW~2kEnNBq{f8*IR43fy*)j-1JU`zDtL2YI+w=+!_Yc?}AeMPM?)7L$;6F1>8 zj28A30oW|kNb1elWc2#v+VCKj&^NhgM^CO%Q&vF!FCMRcy{3$Rc?he^m zb%&|~1FYd7{#@%)k>#|Y2Ju`DoQG1NAGuVb(QtM6z>_{=yfUUO#O&-No?~b(#VkXX z6}F_ARQlMyQ5;Dtji&-?yx0x{Waf54QE)K}CrOMUU_O}uZ>jF(mdFJ6Guy4PzyB`~ z{|7s)O22~boNx=W;2rq`w&H&wumNztw_4G`w(0kl88z%eWfCKKQ?@Cgmk1JZV`DogdLS(nRw^;fy(5THin@Ve zF;7o1bfU{VMATd=_dLh`QzG{u6zHJ5kjJX#Mk7P$!@%y2ZSxR$Biv||Gc>xe3V1mXPms`#bqUQ^k$tOx{-OO2VDX8l~u?;K&h z#@tk%RGFUPwIQbjjrnCKHdtD==)?x!ne(^?&#;l-EHM-#C#=q8rxBd~l)`^bdS8Ce z?*i$)7t-678oZ!l@>TExy5fv|cURg@cmEmQ#M};xdBRgmEM|wqtTHi%WRlMwOnMNS zLC{kE2Kh9kuuY754_va$VrCLAwXs(I@1@X{#(} zeN%FTb|>QuHv`{kR-cXnpV3#aJPf{}LvECc?g`IMt`YaMrJ|o36fcroEbFXG`Ug#^ z1U$QQU@e;>NlzpsB8}#8MO%U^mu3oX*p9t2+3F3yOy8`V$KBRbP%{yTG~vQ@D5Xf-T}hj z?XY(qB?i62-=Gk@b0{IG^G;T$qPikDl&8SKBsKYMx6u9)1Anst5?91>P(q9w&j*e< zv#?lbzgEQ!$bNm{ipt$xFAa@_Hil9WCUZm2?I9khU@j=sfq6;jdG!E*d@cj^TwZ*7 z7X_waS zY3N#96GZnpWqKS&G63X4I)w`H#8=TYrh=#e@gDsylM$YSB2J2r2oqlk! zNS33{bGbi(<&zCfPbQaRUAatlPa3@P=E$tc>*8|vx(6G^`Q=IHbLY6-?_6G#%H=@6 z%Lzo*#1<+@rl;ZH70N1Em=`nAPR-U;g2akjg{7pTk{&_sEYll=749}*>E^m12Q>6=f_<3fb3VSMp*L=(%h)3}d z3)Q11B{#(zmcZQI0*8ks9Yjz9p%olNlv(Aqla?-QYgh!8+hSG>V%b^PvLlzJHmXG~ zx1A^#yj)?8qn~rZ%dp?`aAlFK0Y-q_O%^ZOfXL-30(;ZQ(8tSqlw{~bzhc~~@5Bkm zN>XDz!^=1yvY?V12konVHg^!K#@Z z4<(CNj)zoL&oSK(g})HL#oCK{Ay$~-hq$m8V(Hdg+t1lou#8Cu^huo9)H&KL$j75p zM(N2F3b&qki6McjdWdu``$6~FSfVpneP*8_t>k<@VGOoT(fiRTxDv-<4Ct2CugrK} z?M9)y1TQ1JGJw&BGHc{rxwZGrVoM#X~sOab+|*{-l9 zXAj7T@YQTCx`)Wi3-Ep5Zxb>OW2sYz`68m`50GfKs5(6+O2YV>No4DZ731wGYCSHy z115R%0x0kkhfB@r%NJ8V2d=K$60S&C$d z&QW2{b^&i3=qB{9;mEzR5_RN@*JP~2vg0`W*B8eiQs4s%1*q!QzUNxXQ@2bxgFNyZ z-3^2~u^}F_==5Uvyj->;H%VRj;F@6r<}xAU#&T?zPo3UAnR$H~anV}1Y`!pbdgh+` zZzLg#^<(+$+=OU~@=HFU)*aHcB5&l6))TduMT|`U3X?(eB3ZnUTOd5WpSiN0K9E~R zI4}=#KFYw>wH$8}cU_8gNlg?N>Ig~}*inznwfCawY33uh#UO-FH?$)pbn*tm%Lh^sdNP5-sRAzJuL`JRy6z~vbMwiRcovJLZRI3`E&pV+v=WOZWA;wZykJ6E9M7#WnK@JOxqWAO zKArfB$GF01hp@aKFy5LQMQ1Rj8e!OrhXKvxJF|$_gjxrw*TfijLb25qQ=wr>(2?dm z#h>Q}BpGzWbKZ`uN3M!s#}st{Tn3;9EThL2`LvYP2zicQ)f5Z;bNs3*Q-h=r)Byxm z1)*vw++9!pEQ~ynZevR#k*T`c%%Re9!~k(-@Us`+kUMSM`}+CHG!pfCUm42GpK&XL z*`wLsVNPTXl3mYD2D`Cq$G5wn*zZ}i}Vyf70uS-MBqt3}dizU(c-m!13BccoZE&g^Lgo<-tO z7d+r7&LClA^g9dfZ$JbmlgKztJ_})+#T(2KcYxEe5$r?gqn0L*XjFGZ0tu``2U}!5 zcLe8{m01d%%Z2dduN358xPzE{r~<}m4XN0MpiJfe8yiU z4M1e1<>&at)!{48@T#A(ZF3Q5F87nMjLZ5=4s;r`$lNIqhv0K_*{~NljMXeVsxr9Y z0(kBww_)nwj+rdE%L|WmL4xBNn&~(=qSs<+=_yD*HjctO54+>;VY-oM>`p(6Db;A^ zsBQd2Nt(G5m6=bPijS=QM=%ikh$`UK@!8bhp_JaHf#j~lEb{64In&4bFlC!s+ z5#Rk|GaukmL}tG70I;PMu4o$eId2%CR(a=Y-W}w`EstSTHT77OSVv6;=7*ddBmOGd zZ=-NzR$n)g5iPC6dS&i&8{#G!Xud%R3>GhJ>4;HS>H7K`~m*wY;JdlM(@PJ(tC zW+N;Tlqfh9LBdQ1(Q=C@G9Fn8MDeCcz$`p3K_O{G=}pl_y+RKCJSQR|`nT*+bLa!h;&O|4o&=|%CWbJ=6u zw?ZD1ASo{9B3NNdP|fw|*<22fcCtvea!|)?17$0NrFEm%e9pSiuiS;OA zmS>7^<87%Hn$_Yl5AaO&;%D0{X0m>+tiDAy%^Fs+uzj=!4hFHC;PMfeq*QBo2lD~P zhRt(!hiefkyga8bBQM}-;NUxDX#>UWmpRl}Lgv79H>>Zb2B>putgqOD{fw6>f3EbJ zD}40a#BVj-2^Jln02v_s^0<#QcLVtnqH#vt7j+p8zm-`P9d-Wv)M3GMSg01*DS4<0 z?7WPI*MXO$SyZ6`Sg6cG3akZhGrT{y93)ChRM<-db>@?1@f|g_GgL5-xM(Vl)0`{* z0#%Bor(MY!ahtjk*QjNz1mb+Ds*P8}7c>vQS z0OM2JVH=NljIsodcw%mSd`dafWmPEH6N^Eyz7rvd2>#vaDcNL32lx!u=d)aih~G^1 zFRk_El4 zEh`FC?TerkE;*X7;LL`yu%&U@Ld^w*g)&iT@!D7_7_&)pZGO!2cFZW-6eM=7OiS&G zu)-(lgM2{jitxRN8r-PrYA_W*)j z9(QWFjhdpzeceoSYz3IG+VZ)sriqK77UWLhQWF8)cuwV+C&T*|kA6Q;2@mcpekM!K z=8jpOa6{5ljoie_h;p*&bz_-ROWl#qqIh{-40#up88OJK3cKue{t!l)#T7E1@obq2 z8LzV0b?@@&YO=Rp{-`ixS8QZcdhpnY#C3M*{1v#6Zw14NHFS4XMy^!9y5P?+`OKD) z!-Ho#0&c4|J4XAeupWxM7t8X;H=-<8Wk0Mp3+#vSV_DwrjrV@5%MV=L4CX|51E^JIroH~x;`O?yAv*Q8e;?*6}^?(#ZjMtg-@jHv}@@B@7K8r9$ms>zbV+ad|21Bs}zUh_ym2d##oHo-0) z=0d1p!Vy!jo|Ca?BJiCoCKgkFvyzO9B%ZHll!LRbyV3rl=1L{&`avQ1NQ{OiIE{+z zRpc?n!)%rW8S*8x@EvnS_O8`5hZdCzOr{fX0;k&W5-wtoEmty6NN%D^b+1>sDop3x z2o7xe{Z9%dANk~XaBBC-ofn2a@hZux!)p}d$zt>s7eTT#c;Jr-z?nUMVC)L*8$B)W z;&9GUoiq7`Y;}iHGO5on^%`&;S={X5d;htPcwT`Dn>jP2*{e%GU4hq;o9R_y_D!4L z1~z*v@5uAnx5f@wm`0pIxFq>=;-j-_>wdSlJ<;-E2@Uwf^S$C+$ z)XukawolBsui`Wfusl_s&P!79FzbdC;!Xm>r2JvKn%vnU(7j+X&k)F0ZY)SI0^>n`#0-Dv zJv6|J!Ub*Wf|Sqoer*Qos+n$(CX6|0Q!a`6YH~59eDZz022aKpu;7+1!c-C}Y?tM7 z9dSIivN8D53}4|RlBULL++%Kt_o=bOn0dC41<}>5s=Gd_V1?UWbBVtUf=>|$qW{v5G565U3HIb33W8H1FP>yZ97n) z8qOag3r=S?S4PP>{Ol>pI{Y6QF~XWtL$N&%brrGc;+V=i{);ZZZd#-HMp@Ggppf8up6cJDmUwWty|~8Vs-Q*yG}iQ zcX1EvF{B<;%-I-4T=R1@E`wMXjun-5eO8j)k6l$TOoH${Ta9P4TAc_g@HEJLSbNeH zQC!WadR0P4zjMG@%3}>2iJr;64I%Tta8~AIaEaj|Ggl++N)&JkD)G7X^YQcc|FNPGT^E zrgwfA+5+`$8S@H*P?Cg=2ayteX=ElwWz)-m7|D{$UDM9Nl1oc2XT9pNidsvnpb{fc zUW8f_u$%-xcHGojiaUb-V9h&%=Nz6H_k%ao!1&dWS$4zPZjh#&T}_V< zKZoAz9+eKx*V%21Tx6<{0G9N`gR66xB0XV7m=jWP#9G%_oX$?CI~U_gwJk2zBUhqM zC1JBB3}uDj@yH_5q1-(x#i*5^TG$0*L3prO9Np-8)L@d?EM?SyV&D+we;L8RAw3t* zOqW?z;HW48DXT#K?PRH99K1TK$1s;nC61z&QCB$4w3M@nvED)GUB$I$d> zVEj*pZmWFUhK6Hv>ZSLZ6^kw5Ene}S6tE%wjrC3BXPa!!61T-NMtfartV^b7eINLH zeDBYZGa$8mVBPG%2X+Cu9`+AP*#M^K33#EX)a@~=scWt7MxoT^q3#~5AM6GLB1#Lq z6GrRvS7MYS_yz?lU@Sjrm;$(37@=nF{RJB-_KghL$}?6KaB4;`gIyL_`w6rRl_j5TZGTH>2cY4)<3Iav0# z)5YpeVqMVoQd?=c!yU*jyaeWHR?QyKf;F&Eg=SRS*5Ek2@o$4D9|hHo8=XQ-_CA-C z7YS2XtVrJyZ&l%!*FB0f=y5xAr z>Wek(o7)X9OT!RjPOt$U1OD_0DZ(<%vv3$9@`0sU` zd0)8L1Ro5wuy=^yH`v!nkAkaIco7O$Fiyx&A0n6E_8JRRG%JXsOS-GWph)2`Nqll~ zyzs>T?hDffX=bJ?DN!M53@O5(PTxdbXeoVq%t!`^Uv)mGn`u)>B=FRqtx>DH1SBjE z6)-bBr}J6B6}oCI81|Zy72-4@YC-QY-avU4ST5=J5J-c!n9u_&m|z?7T;M=8($aBr*OMqS02qV8;^yuqFt zXo^*#={h+p(=*786837BbZ)(pan{f?(4DQ#(r!^W2_EnB8(b)88jIQ+6>-1xu`&!x zDwwyx)M3a!QbpTd&eIN$oy!rk2IE;AmKo?hcF}#s(FS_W9!pYu zv&=2Wp#OaNGb%cnkVY#)j`QUx`pAna?GUU@M5kS-jIa;A_{|3G*?rsUJC3sAyag!*zPOfJeDk1_f3nGCt9bT z$lmkmaYSJS;~~u*DWR_{(}t+rIok7&OZ4+1#+(o(1QQB=i@Ow|cwCPm0eqs^!soG1 zj8ExZybcjULj+}s{_y4)1q`hXDxk_7uR_Y?3zz%qspM!MDQ>mHiNLK=4S|}pOEtIy ztu(Rt$5J8GAjGlgtKqqq^&0%gUL7G~mA$NW+Nb3ZBj|6BHHH{%vh8a`{5A!hcXLr^ zEVD-1{L=Dk+~*7B*Ey2?jn&uxAZB@I)z@6Zs;h)C*u<)YP1PJ(M{h z<L6nu-~T7cz>oTa!RE(&GpweuJT5#z+gQiuC7fgP zEe7SYfnEp5Kc}Z6Au+Dv<0aqiB|ivN5h@^pfLsV(=|G=5uTtR1VJ&*h3CHw~K>F0F zvMAIbz~$)6NCHQpoG;`}`ORk1?s2jQDRl#Lt_2sQ&Yi;7h5jvGE|Y0Au3s;NezROZ z-l4laOM_()`d9G1t2?mRByTnx`oUf48av!@*w_p1G2!q3sPbV{qWJpKR#Y|*l$L6d z9BmtudcX9tx2={_5DEh)rzqJiF(I%~a3cBwwA77_ot_j_AaKLRPNrdpI&qN|GD2?% z+*}h@TP@W`mpk~R%wh_;Avl_!kf+_OPrYZmEqtjidzS8)2s z&v@n6XRu>5c=R&?rx{mGqMw25oh82X*I?_kNH%L!==WIjn(GY}*ITl-Hh9cpb(*<|h8;HBUu-bmN( zJpBW?ovnbF(Ym3+ws~@WBSB~lQ#(rk<|UYqy0tkamciTiPcp|$Co3Dx?74x z<&}wM{ev^_0`qZ>G*K-Ywu{979>nW(0v;U z?Sh!VEO?jBA8~g}B5B$)%vy^LU!vNIE#yhjh{R!Mg|Z8jKCnf%qZnEhq)U@N!1*HG zUwK8t&rzNyY%M4Iz*jqAK8)0s8>J7+BTR~c%-D;U{bhoS00hI3yp&Nj5glkCyun=$;44Mo}8dl zis8eCXa%ASXlOsxeL}rb7?)Rp-+FeiuvVp4+>==NYm3rji;$7)v0o`hH5~1JY(yMW zUL4h9T_c#*4d^w?C8<*B5Lshpcm5Jpr0QxYY#|OY^0%Lc(q7$;@wIs6wB(2EsgnFS zLdq*S+eK*`rV?~xW9v^SeJGs|aD<;u>BHEagwn@_mt;m^KbZQciD4*WL_!ma3M9GN zLNFM#3uE($~T`!%885FCH zyKT1pj|VEIy5bsm^COdi9@}~hTG^3+V>P3oFX0VDde-K&O9GBCJwCDubj@bBN~7iB zVC*fIx@=jYZlK{P9LYtRz65?aPn}e#mlr@chvo-|5k0 z_hNiGKD9S<$}nM2a^ztDt`m}P9*IKQ?S{|nU{7Y@k~q%h(atQ4Q-3E5%z(*@*Tkr< za5tWiulaIX4FM(^F?o!6{9qfU*~$v{c1p8bY9s2_x>qI6_jDd<7GuT;*5qg&K9Xoh z@<@Ixsq(nPnpSzc?Xac^(xv2kTzUzJcP1CkRBbOM?yn&0q`?ys*I(x6S)8^0p%b@N z#8#eBh9}SB$Ypr6M`w`Rxmj>du6yk1M0m6~&BzWp#u$1w)rBr&Z%GMo)JVaDY_(a_ zx+X*FQ&)+OK3GtV^KoichyDT$jC`~$6`@bz;?@o}2w3d4LWvwC&TM`T?fKdkq50LO zT%x35K($0(!;6%3CjG~h;E24P;9^{Sgk%$Ak<_i_LdZ*xG4C=x?fTM)lz`#GY)PsU2x zArC>JJrA$UGr29jdw<7^<~z%H?uXhhz4x&oF`6@K&y%*bf*|~l(UC%Zp z!KIg?*NWJ$=;|r;n`v-s`*>41fM3~VkMVG=BX_T%%&9wm2TK|_FC35W|4J6+O%SK- zY;)$SCR-)UqVA5*MsEC0Bwu?L$=7F?ow-VDK4Vb_x>#l1dNP;nk}Y&cl(~w6XP5q> z99Aa|S}QZJEa_lR+aatTYe=+P9@pL1aoxlCAqrz}{u!3%hhVO)=FhY;QxjGuv#+23 zn!ZPrT^KBbjLpU8u=LiSbAaaNNbkm|O8{0tslW3`?*G5y5xH&da@*BFfd*Cj7zPXsgu{~vg;$YBuRNNWHb#9R)jIu60=7geO?ms!v zaR-DZgHhj(`h$!5xD%}0S`4EKi=l1ME#jE_%fqutyj;Z*mdTm+PvK~Zyse|*prfRQ zV-#}Wt|Cg@)22X|GMLNrN2bfcloli2srTdJVLsNZRi~mTt)~H#-Yxi0x){wMD%csY z|M&mF@{O@%AJr86Jcv@7ZW!8eRkH>UPS24;wRd0}8kfQCU8Zdev@l>jmo`R%M>iQ6 zHSS=KZIGF1y9f;WI{PDU06`9>k5Pt2JxD$&5C^iUEBWL`)4PSv64up3sv8o z1u6W(ty$62ospuFGOjj~Td>3V15iu{vCA>h z7@>&SUz@3z_3EXw>t2hVOWhoctWJm}o%*vo50>O*s$G}q%A5H+slW0_7+SEXE!a|> zYf=#3_*~#ns11SHd+$H~vT<*3Wx4fOGo_CDv9Ev_J99i7y7Bo9WTlQ;R#;*mdhzri zAC1xo*aohn;9qNB*Ddk)ESOXE=(gDZ_OO~9V~;gL-qqde{Aix1rj$)DBTPY;TZSn( z<-zZ$(vzp}fN8=BJj&@ikN~@hzfaJ|_If;Z@$Zf9)hr1o-t5l2SF*{$y>eAJv}aWg zO=S$>D4I>ZFvBkZgS>>Mp>_PA=Q3ozZ-kP*364|TQdFtXSFnqw|1lUS22{O^Fq@xX z^7VgDk0!s5&yJ_3|9kb1i=*=Chbd$yDL%eIG-T<@(XU9~P?+r8M?*$ZQveA(AlN*` zDUz|MJ$*<$CjU*Nx0qOJ6bq)J*V0QFe;c*tJ7 z3F>;1l|&md3u+Q=ER4wvQ@O_mWF`ujxG;9|gKOlmxH4p@&uf@4RR9h9;LDC-Y*I$r zH!WGx+nHrZWT}ZQF*ggQsIQL^pO2swpv9;@9eEjnS=vj6WDeSQ)&Aj3{IG=J#PnnTWU)(6#3OfryPF?=G}tzbY@ zXFu!)K0+^qJ7FP8rT0MaWC)h#P=4 zu*GgunQ=;R+sFuy($7IOzbF>*+XtL-+f*P1JK=%`A3iF#DiU9KiIG{#1?pkQ7>b&~ z=uQ28b#(z#?bc7TzvAO>Q<80}P^uHQ&XvQfOZd&d$?v#LO3L(Bqz1may0o_5-%{1NK|DLBGXM)IaskhR-1Wb zL*8w_l#PPx#YLEKfixwR3bq<4)rEcvo23jQ)q#)J zU{P`yTa>~LqYhR@C=xI zRBrmH!xShO&wH(vT3{F`wZz`7qx&d+0MlBKm;6c(2mkwhlUY?#@b7R|<@rnU+^F>5 z9ja^29xWh?ASsl>%yc3zfX)I7!=i+h(GHQMsi`ta6|`j|y?%3OOOLrCC&@F1TAJ?H zJn+E@3(4|=yPoo)(b+XA8ucN~wO@}!Yzz1t)8@^SbqcSG+D<4};&AKQBWO=`jZz1V>R66fe!@XIH$Gg;U3eCDl|q zQC5m4g-6aEeO#h*hYdkeDhwj{|9|(xRe`V+Yg0PJ7xRRry0$ZQqyeDA`h&j87nLX> z!Mwvpxb|EM>9M^GQl!IIJtsOyA7re&sv5ePC{h(Du5ac5P6-%t$nzVT>y_+`F&t<7 zg!C?0sVs;9khE|y1e=B9i1NUqY$d*hzzir)sK(*O$Eqhz6@#~LwF-+qe66|%Q~1$c zBzXfp;lT6=!Kb}=n4bg-UndrO%po};M&R9$6MnP_PmT0w@<c(q|QK!U2zIM zqG=O{WdnMkjnwRhx`#@#3F)zzuv5&+KNC{ZWwrOnCYXGX0rC?+y4A%iCx`%n_jsRV zJdKGn6$_3ANA?jL87~%v?6$!===(eA=8yrOdmy)>f)&y~T7bK(&OBE)va>2hj3nS1t7 zY%07St02Q0^Gq4L;1}_N_%a&F{gf*GREYfOZr30{YYpfcDVPbm$fmo9+~r^X8yUDt z*N;|2OCL6{&p4y39N7?KX-v*$@L+1%(J{S8Y!K()ZKb$umT0`oCEJ z_t%xTq(2&L6O~{vt6&Bnoave_!eSkA5j!*72Cf4<14K`O&s*(r7>`zwV9HhD%-$#4 zCp5zYPYYJ<;t*Efn!%oVYc72+TpIZltCp#IKBYCmcNHYmkmV2>u&B&z(poP1-Uq84@6Pw;C02QZg?7t)XhP0XiS#q~RF3q!Sl@*3$m-1%Jc#Mh1QCAh z-754V_?h=?g=Yz*G&g28ug40Saf|+qdAtFudznd;D_wO!Ov(|xW?&a=xj356l3&86=6Kfac0eZhduiu3*{2_nIRSjTBibg9kjA zHM4u4^apDsnJ_KsNZkl^!0M)zgaF%=lGJ6=Z1PA+xEy|?zMq!c$lLQxnISw*@p?`a zT@x&B@CE61k9B5c-db~zlS~pVTPqH>^WM0*P9#ueVJi+*hFhU4aI?)=6lb`>OC68) z5l?U?o(MY&Yxf$q@aZbf5NT&;P|PDQihHX{%SxrJBcD3`dzP7blriIY{gj|xf_wm{ zc5Ase1}1Y8sj3D-RTjhJ*;O9scY3&oC|F$k1qyZ@1+#drOPt%}L3*r_yV^85ZL){# z<}6E*z1fla&)UY6wQbp>9O`VHB$)QOTQ9vx$@20~GOm=uDMl7bwHJd`(h5Zq{%#7i zbp;5XP(udhyDY2zWggAqrK%_9f&gB~{WzmOrSPP-K(O^#ai2Up-?^S1UGAKoA0B@> zo{X=K&(CUTFBGe1N;gu}X`P=())p$-elBN$K}w=-#MH)j9&!mz;%W%cp^@m$-ThHl zaSm{|oS{3(>ev85CPhFSd~AsXR+2Trlk0-Iz7`6#nnt0oEgRexFcM^jQ@|4KF|sjp zTE|O2yNl;7C>3x?*k}_qp_Ma%YI&APeC}}7rR(nIYzWuQWV zAXf{a+AC3eNhC;$;!9vwyA)r_xsNV#BT;(v!=L53_C(v3M-A){cQ}@<9LAei5-jG= zum0n2exyhea1bz$2!JfBSgBHnoICXHsgOf+AZ&l^$Yg0>zgk%H%f&=Dgq$s`_~}>d zw_T9XV-EEBnkG`KaA%*)@I)yb0PS#v;_40u*j5tP5Dw5{Vv=4=X?{^u^#{bDwCb^k zd}EhvV;x_)LADWr-rd1EyBaSF4yxBdcAe7l+zO*_>jTb>y zol@hiP&}Mc*kHi=n%tn}_HUD79tMCWCPH4u6BOWD_(>vSEfs*kE#>9;SW8#yt%&Ba z7|y20hbG^FmZ9#kL&lGEhBLZl_A~ar(ZJmMsE(Z99Mxw8F}o3{r5@KVK`g!UyZh#q zM^E5eoZP1w8T)F>JLxe|+(AF|V4T9^EOzdk=z%GiZ_xqkd*zN)|2ucU+i8L;3>j7B z9Y;JBSykWATv>Z#X&z@duP+K-u^tRR>>5=m;}luo{rU0XAA$d%TyAtM;O2dYHcY?L zDR`5(pW`Lq-K|59Q}jG>0%1cMlHbpc#nJ=Av7N5x@#Q)igTG*ah{+3-b<=gLrU_ik zbGH<2i-jfB=s!I(G%M5nI#Lvj368fp+gkas(9Qi)s zAZ3?x?1Bw<94a+5WoBFvlJI{NrI+5@N;CG-+fw0YgOEi@rCD?PbGL7VSl}B! z4u_{+5c0|dIg(|Ml^$(tz`%{ED4lb|Ry4@56&*(*d<`mdz|7IhC#{a~Y~ew6ct)6p zRG!pOuE8jh-tPF;WnC=~O7F@YpLQ)fE`1h9J6Fg~AHjSJaZrRoV3+<}mMjYq>HULY zlckP$?Ar2n7OzJ^Hnz6Wx=K(q$cME9EmC96vk#Ome6YSW2nD(#vfH%)az&Is1NW|T z6=1a~(%{-gsjR!S0+oeg%$HDcSLz7tS+qmP{9@*$St7>sgm9#&&C=@nZxq;cfEXS0 zbs3>4cQ}coMQ}S#d`}B8xKY=bVidnqOrX|%(;_a?`u}oualJw*mP>TZ6elshVee+J z2ra?Tmz=tAG95wLm-mzsUejl>NIwYublZJ>J`tCya!Q2--~o-Xm8CkRB$ZLY#nG6e zvNEm6X>BX91WQ_CR!pa9Q;p@O)Hwo%u^gP(0JXVS!vL*|KdY3kQFa{JW>a}P#REiuY-C&WxyOuXlBEJzNFZ-4 zbNL_YusmStlN$gYxjVH*1!0HB$J^SXMB8i0r5*Im$$A|j?c$ZcT!nJZQjPE-#XB<5 zjyDcrW{>y>I7i6tBj_68VOKC1JHICGAPn45n4_hN9xnjgSGk(mFQ^wyg3Dz!f7O~m z-80n~{N7A9RjU9{_8#o)W>tT~!KkR3K>8mDyTGD2d;^2Vx2~$<$~Ch$(5-CV=If*? zOAWMri%W@@Tbn8%K~!G_gbH1S54T)`gM$x=`W$AVZ+#B8N8vnY)-~FD&a68QV=+jW zPz{75r@sWK-mSe=E(@YWK{7fC4V|nxt=Hxv*oI!}_W~FHcbkI$=R$ko&;EYlfjjkn zeDQ*ighr_l6`Vue;^46Z-;uow0FOZy9Dr8!yN7GC7(SyDeEJ9k9G;W@<+)FQeVz5RH@J2ZO@oE>6(y%~_Mj z<1>^(`fWPh8DAVTeV#HXqs?_Mwcw|@`!t>MIBGh(^Os&ntc1K-i+|14DkbZMD?tKv zQ4!_F3Cz^4NstFwEid79q-_d2%Xy7@yTwbJh3 zpD?~&`G)M68bcEHkBW|<$-jwZC_+bFczbP<6FN0fhgG!pEu&jLxedN~L3RQ%hsw_ybYuZI<$toD4jNC}4Fpz$MuJm;j z#1i+Y_p?PWgZY4vpZ$p z?XoD{lx)}!tWsvym)ruH`IqO!BsRkFD zR;yZ#>JVeyWC-V=Iy#z!fgfd)59SEyR*H(FB1VC6If{zVyei`eV z9v>QI7;rDRo74f-GjpS+PU6Ib` zIdv|*d9Y69DlLsJ;ac%~%Q(Jao#bg1YM{-^gvm+@X6xVd*)m15fB?8AiG|Vp^W-Y{ zKP8L?bBY5jjiNyY^9B7$AL?eR3f9WGr$#`Z{Vk&pfjo;5NQ30-85M*gdkaEN(WO=r#5?j$0@lap#d-6A4ZC_u|K$Q@6~HC!aY(n}u4 z3$W?~v0O`*G;xes2b*J`?XX|GA(Y1L?ZTkq`N5KNij0smF`SpL)d?tSrQre5Qhbr?Rff2|Q(%(>qG1}AZqY{dxZ zvA>-i9-cgg5ACwP@I+=_i|*o z@jMyFy{-#Zd#V?Cw`Nhu;TaWslVk&th#Qq}{N%=BKT3G9H4LoQSoaF2ZRyt;F-7;|$s7X=p{zArN5nG7u@&=3r$Jb|qorQDskjAL8;SvObW z3|iLiRN)M`VsCrG-M&Rk;r+PuBiIAs<`a_2B;0iacoK{*jl-@%nyzz&X|ic6G-!Kx z##LR~u32k&bP`1CA7%h}9H9a(Z>QL9cise8GF(GJ}<}gvv5A_P^)z zYO5CaJuM%xjBg0-EB?6hLw|J_M?TmRB4YdANJ?euFdei!rQEUH-Ke#Zjrs+PWN$Pq z+Yx#>3~9divhUR%h9>qXx)&v+A5WTX?d1Qd(g)POK+MIZzX~6(U?(#>FL=vc<@d?c ze?IApQK^S}90l289nG}w;L5$Nu-X2ABBHz3^Jvol%{*U4m%%9=fh5!3bkChC| zos%mtQ$#AE#TO6`B#0G)>``d6%UHcs)(F%IEaYc&UCO&?c9ipE?3xJ(%6=yXR|LID zScnk`%*}ysaEA$Sg*X7OAP+!kAB%OD-i^}F>mZ!Vgo;gPXdZd-T(=k<+%bWO^a$b-K0dA@txmG-0g+%P%}n5#347fwg_djJNk$E#Hm ze^;9K#yl)J`os!Po99M23<+tPL4 z3U^h~D6~wp)W;`})^{6nTbG=VZxJbuM2$vd0gMU7=~9=i^-* z&N)S)kU&72BJnVc%n(}|q?~IRtR(o_f373KOK^@1{FL3N$sJ4#Kf29dIt-39bOE$l zMo}vRuCEKV42#(j9^?$=*%YOt^pJZ@G`mm;->8(-WNpeBi>ek=8jEuw`MQIo#1W^W zl#nraj^~yUDIsI>L(KRPrSVqqn82)|iSo#BwdTJwXO#3}1f@Z0lq6o(H$I!w;5WmJ zSrk_ze605SD$JW@u)6(%hQ?KVLV*eiR>d~B>%4HyH|xXEd6*&4VlG^US~4&=h8{CU zLHR)hY@l~^EU*RaF!2^yXLi-Sp_#xrvA3XMpCxOHE;tbu>{V_%gfZDT`u7oa7B+-Rqe0yL=wV&99bn>@RqoMsliFn z)&{AdHn+7}vn5xmpp14R|23scONteE#?D!>La#gfq-HJhg76}VZxe4Rh0wLRZskb2 z2gwXc>I(6%4sk#%m7`$}c1I)r!+Rg)T>_JQ;)UaRa(#KMsF}V@h6lT!{*zrJl{al) z{&&>C;yKt9eh?uxbuiGmKoP80Z3!E5`#E;7c2NeHwP|)8rQUbn41t=>KMyILhr7v; z4Wiv9@!Y@hQnkomU!J^6-(v|b5wAURiY4C);&JR7{d7=rhsp}Y5iryQS)GHowZnkP z=PV_4%HLy6On(wY_k?GIej#%<62)3{~l6%D?@^DtB%hR{pk>z64+ za6-iA}q;^_&M}u_hEpkqFKDeRI{SLBrqiO@h}y& zqv+wSGw#m@;&gU8-MJV~sy9lJDr%M=tzN4;V6S*>;`kQ!`qHXJb>x>K2&pus-l1Hv zsz7?E!g9FyANtYbWT}=@PAVC}Oyq!QRqJf-Afhc5rP2vb(kPI%Z9aJw7TaWxH2@md z%l3BIYHV{`25`a{wwCFgABG#O?S^cVo8oL_1@u@~nT>-FN}M%a&tv0OPWW9ybBF4* zZ+zzefLSPt5b^A4ug}^W&Gpm-J5u{k)`&7Wx$_v>i-$Q@=IE4Nk2&KDA9YDXVVcsqt@o?0Pi>+!~CfbDE}F6~ZVCo%jh(5uWFlGIclq1V3h$wV)Jh1d(%- zJj9yL9Oy?xR9-shfwu^e@fJ2YS8nc zT7l3i0J|y9$loOBh0J|)TH;I0R|gKvXs9MEabemuz4;<;H~IITosVqKiHYZ=xq1lI zrm!*E8ih&d6X8DmAJ{td6PUf#t5%%+iY&%a#yR+J(#I6K^PxFhy}mct=JVeuHOB+$$=$}qMF69I_LBjzKxviQk5iPo)guI;ganxMQ5rBVl` znfG-*o61OsY38-{ElNBK7wjk`YpaSj{S}ts1hXP00yo+tfr%g?!&x&}L{5wm{EPMs zN|mw%yS8~3ED+e|3oGw4MO*7ti`Ii~ZosQ@3VKbwK8uKhez}aJpFlxhv=wTy&KvzL z8RT`ug9o{Yg4tXWPT)Nx6!_DzAMIQ0N0^?YO6QXO^^&(^9br;9kh0W?- zV1bVpi!yj9+>Rb|0S|2`^&6$r>B-Qto3Wu*ypC58I8dkHJw%@1M#6q{QoQfsOZ)(uZ0#WXH4{VG2a}X65lMd|iRi#W_ z4CyM9;J}J0n=@4elxG6j<-soyn><*d31`z(ENLwB?vWBH^x*irb-E_7E2b;omuMG( zrQ{}>%q>2%2C;E8*{mLs#*qXB2CLPM(9S4u$6@;Y_~HV&*t2*Rhf;QfVzwAtHz9qJ zLoThv_Vt(}KHB_3;EXbE*v|#hu(I^jTPgM(e5iz%tS3A&m#ielQD}PmDAYs9ip%lg z@%2>9>RhQwMNOT%s{H!lx5^3=wZ{MbHB^|W(5|X5nfi()@9=E;^$2C;r$-lT5qh`m z9ctCVGJ~En+{c?B12g&kf0A73UVnE~1HxWe$k-0TUQV4<8OzRTuwB4|G=s^jl0+9& zv_(ziWuyXB!D*8yWp=($pGtYl#-x0Yxq5(g5CyKsPVFRSHO|mX?0%t?_3YkG#mibS zc~lhBz^zaHjGkUe9mcbXvWnNJv*DtT7@c7cJ%)Jq>xQN2FyF?X_CcCfW;_oZW*~W>C7+gvptR+o(Kon>e=r0GGJL}j zYhf1Oa&_o|z1(gG8)7w$3$S5}=~-SkVlf?<>(2D}P_M^ue`$gFEw{iNwyPvTX~8F9 zIH;;RQl*NKpKL0cQtQtiy2HVD|Ifd&KMR5i{2NxyKT{#6KmRku4%aE)gd|U4 zopfU@PpG6a$f^sft0ZXF(F}5C^FQO8o%(PgZK`HVsrdW2a*2ivF8vAp{hi$md=7@O&-+Ms}w_;w197pBdv;HJ_39H4;O zAkp$Tv_(-d0Y4%^+NnG*M~~4VmccO{IC@8EF|;~!^c3=p^nJO3P#+~*Zm{L{3h*Q9 z6V&HPDyUL$2NvQTm;~^_omm{liM?J%xb~r+vX&kSog}IZ;j@f?R^;H9-XrGV<8`)L zX9jP5j}=M`>#dCC7pp`Tsou)imA*dDYr*O3Idvo==yfb&5+}S$caF;hKhI{e2b(Lp z-|VB1+1w>aWnU7jvazYyK}kJ}v@mC0c#gU6$@t>>@>o6lvZMuVOcc-A;ju$F1)aN? zvgI@M^XFa#=)Ba-?6FQz-F!_;k6~3yM<#cZM1xXR)5mCbadjy##R_A;%OGPZwBIFh z6#QG&j=*VHgN}oF%V!_J8Ma*s%{ z^xXg5c8AcI4rhQr-{Yo4t}^gLUFa1A*<45oIu*X5bo&y(BhD9BLp zRBCU?U<(}b6F;fG3VDc1Y_LY-geXL zcQ2XaMZ5^T3_ESvU-^U2!*Wvdwtg>PJ}x{-7pucf%rnu}=IQ=-0e^ylY^w$4uhYqo5Y& z_rQzbF}Xp_x|OKV=ty!8dzE8i@k+c@Nsc&(KHlKk#TCz@k3B|jU5Rw|n5*n??LbIX zB-Q{~abzFj#sy93%c*CmEO(RJa}vkdJldIikLiw(BGBHLLz3L1ogRDgA~!R-Z=n#S zqj~gW6(mY4r8WG&4E$fzn08mNv1_R3%Pq|a`mf2{4XcYg-f^UycgQ=AkOdP(Sm=Bu?RXU=Cts4l zsjdIukc5)poP&oJoLKh{ZE1vPch8Ivfxi<+Qv$H78ft7}SaX4`uyJJNi0DSxfsN{f z4xZi72P+?cK2H1vm@aoFw{U_WaBSpopY9~Xbqu%$B*WXhAi`NvbyXgGdBsWaHUKbt z%u#T%rcGa65@&4Z7wa3aMZRhsKi&ZQ#XaRHq4jddk7n^4Mj=qu>NDv1uy?pFjD{ck z6@kh6AlV8xrZ_1PbJ{GvmRE;7h4CoZ#Q7=%ZOGBqOOggixpnX zE1~kRA#VTiI@6$Nk#tb)Z7Y9ztR5e3C#;gT$dN?;0(Y8U_}^;hfhEtSTt~fWTzlJu zX>5bRQupw{T zW7`C78S86&)OOcxn=T!4%#yNNkYz}8z%9$G>Gpn#lwQFWJ?54Yu zV08y}U4BEA?JqXrr-M5q9o)HIg|RpP>_zj3V4f-4=QB}cQWJX@MC|M5zap$Q=1LxH zO`l@v%J-4UBCfOYRx!f04D~eFQ;ASFPa(sD++lp1t{02ohYtI?FwrUW+hbdcQJNNy z8D@c@%ysIm1htRe8ASt|wifDA_No(1^oBS?BgkNsUPr;d*1keGTaRJx*z?FfIZ)d% z!91UG`$gns>%@KbkYB6rG*nKUD=ZfD{%OnwFdK0Gh&Cke*&qq#&mEorCo zQ^f7`Scu_H9&o`8>D|pE6NP2(w~MdmSL-L0b?cPMHY;LFfu?*nGLfbw>~;FVXhP;< zQ3Y?$bG0)zHRC5m@9DdOxouYVI&QKRvF)~%oT`hJrK%s*ps+;d7b-wMPcD-9JKQzD zs%Dj-1MWtOrK;6+Yqdsdb{z5Ves%e3B|Yz;G~To`_g8*22iqjt$-tOg<(I$?3*ZKs zpw=pCe+2%61qHEI^TJWr{2qER7Ne?DMrC8E8IH`g#-bc}Iq2|mh*-Z60KAGPcefW0 zDk1SDPG-2lUQ_V_T8b1CTo6RcDpoG3RsNL?>95F28lu2~tMrT`Dgs{0VVGe^8GVw| zI66U`yh63)sH4Y|<01viHb_$$z1;>17FWE{dxae0H!q1u?+$Z_l(2z&t{Pu)ooTM% z+2;8(C{vyA+HGV-+U90Q>39aK5n@bjWf$4HVaZ1pG>1Xm8ntjp<#oP5lo1zpP#v}p zl)@gMx6LcCJ5e(1elpj&Gyg|+sc${LWoK6mp-d!Ma*H5jV%#K?`Z2q+r6B7rk~t7j1AOu8WRs8Oe05 zCsM-f5U&Z3owyeuS!6 z@Xr~YEz7UDlAll8UK%elgkiyx05)$JVyDbIg@o+Q)xs;S_Ily&vLIkLlVDfFUOifb zbNUhFoiD(-_HJ?6NPK~iLa705$Hu*txd7Y{048a%dtfq4?Duclu2Dbsq zcyJ&rSf{eV1r)ZDa-$s-wyw%dsXrr!Z;)f7#_;(%yN$_LkzmrXEi#<110bp)@gauE zYGLSk1y*gGuszmPL}KHTu$=%bU*DIf5lRuHo-c$JWqq(K%YwdONfn!Xf{!a;FgZp~wt&pqqv3fym>KJZe zkcFAqQJ#zn{7lT`|G@KcX@e&JO(-TNpp?AW^*b{Dv?paoN+c%*eHF!UiW%eb58F~o zdElS)tzc`@WpD={haIgY&Nk-ETG}p;mn(ylV^(Ew=gBRvedD@NRPiQ**m$55B{(mA z>oJ$bZ#;fT4v1d}EAr;&S5>R7$TODMvn$R_^4qnp6ga!Wa!X!g$J<{7(73*YdUe+8 zqnY%&3TF4dlq`NZl{jTF$UQR#iE?;{@yxUw{sv8Kj;WsYQtA#XV`;_(U|ZPnIm#Tz z+IN^c?#n@W>nuK{1P9Umz}!LP42`kJ5S|Ff89yPt3s#(Uj9AZ`#N~04QLw7?W;v(W z=5>7*kJX8Hqruahd%wq4RqBLo;3?JhMfG7#aMtnR7c#qTOp|e-G`K?1b$8f8n+&Bz z0z69T^XHL2o}9qFwX;;#p~852UBX4h@gT&db3#8#Kd*yuUZ4oF+Fumvv$C*Irnf|4 zA%&fHf|c2MamifWhF*7=NU3K#vPf&dr$@aiKf2WrR%i>BV8Vpjc@OCX1Aj1TSfpH& z5#Al6nX2SRZaP>4z;Z$wm7G+MQ4Hz=DPMu)asiG{{O?$VV>xKwwXktz-8*)-$DV;^ zYdG93M2x_B1e*nJz&i0c!v_XsRu6Nt?O1s_&4Wnrb($)wesDe{2-=@NM&2@*u{9k|8Ip!$bn4$!i1c*1jP4lSJqtK@ z0u#&-$e^Bs0p_Swyqq7Zu{U9|n(yVpkxTH{b^>G3<8?sjJ1mK<+KRt=HwZDIJC%~| zHr&G32*IvMol28Lec;qn_Q=>+Acfj*N;*uxQoIk=xcRSKL&}J_dXXV6#Vx)3Wzxi1 zE%}J-jOz9P^$dj^32$#wOsd9>n;*t@oChi@ixhRfWIn?+kwOx)c)5Z_BI0Brvi7np z1ElQ4`r!-7&)$l|thn;@nCUGK_v!{bt>4)_*oW(5Xd^A-e+OacfiL!ZwhT{U--oaL zAzEoozk~KAwZE?;-?U-TJcTPl-^+xV$3DmLeaE=%(A+3E_xu>iZ%qw1Fe9haqX|JS z-mO9}g5P=1JboH!OaxYqUg>J9Ks>N<+Q+w08E#vZsrt0FgH3Frr=r%ycnGcb=g$ui zkRtGk{Im9f=kfbnJSn5y;SlDQepaZ8rbG+rs0a__>^4&^lmyow|2 zVl&A_9WYyuxvQ)z6DjRN#;;lJ4fX|5Rd|F8-JxK{#!CRMS$n3B;#18*44vrCZN`$* zJelnY8n_YoEGQ(Z54PbrHzsP2JqO3R0?I-?BB;LmF!YGOcno zSc{3A#D#8P&%t9{j8p!Bw$YMaQUV< zG4l#NLFbV@N*JU#o87IKVBE^jFwUx*1~(xG6bdaUsr4&<3!WusW%boKQAdo!W#qaC zGSWBm!ig7xr}enfm0w$Ru!jin`~N-IiS?lgt~22?DqT0d-QM;=yC*} zKlMUJ-~L@++&Z>+{Tx*O=ANDJTu+ZKcTUd_kG~vG##hJZXY6`9{_O}JI1zL@SE^)& zN!}%Wx@R%~`N{G)nuY7RKUevxi8KX_N-O^day157_=wU4biNr<>;$iFP7>;*Bx`6g zri?ji(2xRnViADc1a}b+j?#-nirm?SDyq2>P~3B8`(lO&nc1J9=yP{Bw^yA8kxW#W z_2tftVy=tl$|yFOm;{yZMVtmd&KHZ+&$O*^aXvl%JC<^yR8=W5@aI-Tp`M407#1zh zpLj^~fuMAI5J=AL%^KnjJ0h@gV~my&0-%VS)5mh%!>6UCMrv92>I=&exot5=15Xvf z6Xm1DC3*9$?eV=e=qH=td%=-(Bq%;GijjG-Jg2df_v<$wv(QN}Q~V%&fdkt(v8oko zQ^LTta3+(9*9YdbyXvBWz8c>W9x^t`0>4h6_x~{%aFdan*AL=6jC(Z)glw{vI+k@9 z9z;LS3NyR&5?E?YG8pAUVD+X!1|v?b$pKSjfZ?WVa}Jn}7t9H~*(6f@J%({!#&Jf# z6Qko`NVF;%eTEH~YL#?l=KWxB=+BZzT}XlzNg9 zTJIAuvMf`t^bE~p<)Dy^3|CX6*i9kXKnrQ`V#XMq%}1LAx~FNN6$x}IPi;;I4Wnop zM&EQdd#oAx)^vI@>2P2J=8hKD;pqJ2Zz z476(cNQO~)8|zTzJGFN!uFjU$-Etswi(@mVTQi*y&mW_zY`fl1CBj+%td9?*K1d&@n1^K~a+gaOgZJxOE zj4l0Jk2KBS3yL-dpUG2U*UL*7E51u@x+*>)1z+K5C{8HEOV?HsFFH9)c)G+Thm;{45m**f%aosjj;c&Tm)?O8dQw^JYYShE?^jHIZtwowvZ=-S3B zFS+&290u-b`O?OWPU0S2!(wN|K0QlOoLxtr@;5% z20>TYLy+C^Xs#Xjmf=D)_-gH(^}>bqKtDW)BqwQw)I$EaYke)74X|ODX*C#XB|D?% zR{PImqOdl-eTyGPfHtr0kh&obHCPV(8ahIEFlF8XH@q8fE;xSS$Nc3gOG}7-OY7_E ziVf8ah&5%%5&tyY^CF3}cov5nydU_D7GdX&Ir@g)Pm~@@5=F^3Y&{l*qc1A zIP*mdm|i1I?d_o>v;k{R$FqN2S`!f7*W~_dUeS_+>exUXSTs98byOlTd-fsEE~|PaAyq=bugPc-{E?m6IESv%3W;p-Bwu3Vc^W1jjH-T`u<~aH@Nh z=+i+l$>seRV;YNjmOOE8?_h|WWlHRJD;42UOSxh3Iy}2w0;#3bZ{0C;EF+}0= zuo*w6`SK|@f+yz?!IZaw;Z+&$Ya?N;36D1?yvhBmk?)St&~=yZKHDmJ?mFL1ih-XB zp`@c9vv8fl%5w=f`G?+3^U)+baX5+sTFcs?B+Yj+{3gMD*^ijepiDF&Jn5(nZ#FOZOUgN6PnXP|?9?1iw3 z2HD+GhxIkx?pPmuK_CK?Z8=YG67gF0^{Rw%xyKMV7pJ^xk)KH(7=3sL77pKA!ZwRw zWSHK;63+G6dWBI&mcq@3PZ2T=?={K>ld=f9<^ot5!deNo$kLw&>*a^Ks7kRVHNc#} zqq5b3>#v?{(6<4lh=6D6uJY~%tYMAt8U)J;HfpAr^8 zSGwys$DW{iuo87rV8#0HwIMS0`S|Sc6Grrj{_tOTh>a&F3>J@NG3{CJ&gE5IpM(-i z-oo8-^-miPJAa3h0J2|UfniS1pZY^{myU0$XqSY5dQ;qP&prA5kD^q zB}cZdUAi9(=tgTpeL}|djn8kEKntyqPr++jTqYe6cg1Cu<-z+ZmA68cpIb9hEE>?( zjNl95{@r-9`&CTOQ%&ekF+|g24$(fl8a6AE7-pbOhUvH>MR}kuRf}Ei@?NS@=_n#- zTr5?^{{$j}CJ-M3Bpe<0CGf*}YBP;>QUkT-Bw4TyYB@id4;@x0e}g5%c=)vL+zh#X zqVN%)LMl<#>x+CSD=L?bN3xzIX6;9^hT!i87T19#f4;uCN_;;xD{*@+Ih)vbUS-#< zVs0dZiB~7naiLyr2T{RuHbw<#b2Ef}4m*#v% z_z^e!_V+)1#KcwiuqOM`rN}hQc5)wS~@yd^OGJ`VOmHEDb`>v~NQeE?Fa1r6{ z*Y!WD&KIVG2uKlE3forXTT9b#+e2E{B*2M63~xC;6s!H0at_lx^>d}s+M0h|E3bP& z$ejjBG+6K&Ttz470=wtBucC0;FLmCp9#_WW0h_za$D^oE@+6`RyF)X&|23H1mr}*U z-OH6y(=ST$i7>te0}aum2wslSrc~ z({+fvx`;g#lG?^CMpT)=t)y^o@F*`f#;e9bxcnSUw6ZA9*pi#IXs6?Nh&C`@E|KH| z4GFlc$vOg8r77=OM@ukA_EB=#yAYybtj_|zoR1Te(+P``aSMWL#GBo0s+&oe8VCQX z#oJ(k5F+fSfr<9D+Mi5$#=!V)>74hYymMME_GvrU`;*((hrf>cQMyCi&>zBiuyFkS z`0B%JZ?6yk9rgSBd&Yn9HS7=f_y05)?u`x(_WGmI{-64T-Tl4MpIHA1`cim-d*LOF z{VBTvd)NDS<$2qVztY!#^!|f=KEi1>IluS^Ot{~#*yoe;$yaE>upg`0{dmcJ+_{ zB2N`7M*Qz$iw~BoFhHNxOTZT9Ci>|%U* zbv(H~8DFxC>&uJt>Cp!;RQzpMinKIhqJqu9kW9sh{{eR=g?@!|cIOd1XXYc25G?gs zyn3v23&I#SV;mjbJADEFivt9f3iSC0dveJk@Rvlm{W`2pS$UWNgB6Se(!D z>cN*)T)%gJK8vHB-!4wr1iskF>%0i`(?vY;5f2pneR6bxT)Oae3!EkLoHSl1Gx5um0%Py6 z`Oo9kDv5FG9;;vY^ZO6%0rU_H@9IvT7BAqd%t3>V_$cra8HTGFLKs6X1w`N>IANB= z^e%{C*zis3XvQz-B8it`wJP~#^|;Txbt>;sLYm9?4yh#wWbCWya`Py@t@>H}z|Qgd z+tU}vu=MVIMEFOy)OGkGIgL|27gC}jE$QnAn*99!-uRFCPT+HBPg%GTV2cyoHq17G zB!>pESjfkM?hwxA7+Mql7RzpF?jj%GKgGqAg_ruHeA-cyb;I<6k#2Qd!*VO-Pk3vji<$2*2gaZ9-kd`M%M zj_Q};9iH(3+YubZM+z=AVsmo8DCY+CR7A)`FYzYKeT3iVJ_O%}+=P^mr@2R+R$a!+ z9E#hcMh<-oeJr`rD-m2s@$2@rH z=>Mhk!RiF<@|gSO31O4!25%zpT!y z3}@8<)Cn(g5UuoQUqZ(@jOTFr+qYuR^|2S+^`mHx@r|n9i^DH+022?^(34={W45AX zl{62($N@+h7+Ykuc<`ll{*bHyN@Yi0?$YOuy|k{2sXfQd==ZDBlVh+yew0mq`;3=1 z49=N0mPc$2a{Jhe>w4N&JN+kj`ttB>BDEC@9XAiYw9Zp9qq6sfqc5%d?NO&MqWkTs z7oKrFCnlJ4iqK4O>CP?b`NJ=50PS^Ye%2Ka(pBg^o)vaa-Tuo~hQp&y(iG-CicsD|*boZpJ4**3)M3ik)6hui%?!5&H`l2K|n^7kkf6$NzwK z$?S$*d+(_G{Q8y{y3}63`%%h2A-Y-!D$t0u%dX!Vu5i9lBW(Xt@ac4u_K5Or!q3pn zmi6xJ5?=Z|M0+$3vN$=ypj(=s%9~DXa*_E(;8AZqTi)AO-LUd$>^bw<*}#7b;~Ou$ zkPMh5VS@Y_Z6Al+0P~hKjq)s*-Mbpz#*w)nRL;7l;h2`IYA{VIYAhI9)N0xa(rDHvM>&IAA~j@Ty%c2PP2H4yQI}^-Q0R* zk=I;X4leq86|dqjzJ09dckyte?MHp8^vV}(36}%9AOK(KVECc7H=Q63|IX{f zU;9Hu&&5xM{@H^68T1FE!GXa4jQYEy5#oRLhJ!c!&vU%q@IP<(pEvx^8~*1F|MQ0b zdBgv_;eVbN|1-gTi(`3yWq0sN7!zz%V2{G>#7@Aq#0xLg9q`Vey~Za{zMcm*?$i$# z9||xb{3#VUMHErp3MNSqz@I>yG@UCag6GqYE?ql3`iqVQ(E&S5yhYZemP~Qok+{QQ zLC z*GY-IP3uluyOh&T?^|;plk>0UvCWy&&#}Z885A1WtV-HMFw1L+u#J6h!L`3d?(}*( z{_RN1oSZ9-4A&zb7IX$+%-_6(YX4DcKwkpc>QZuG3+>*sU54GdAY(!x|KB-*IHE zF2>uZy?xGyRM|X$+=rHRDX6qCDlDXUaR3Xk9DswRF?8T(i)djvvf{%D1n2pruiSDTmtX zq3VX$JlK!@vcZ15k*lwI^Qs3~3@m^lf9bD$Pc6H~`&T{GVxZv^Yhg)IB=Gh0HJe`b zfQ#V<7#*o;<^q3K+bbUOU{oA3B~uU2L^MghdCEZ!pLmda#f9k#EW!Kh)f{H-vs=Wj zeLNL-Uy_jY_2tQvV+FeP_mkD%iJxQ~-t8Z1qJkGagrN$0d=I~)l!r^G!0-tcHr?Cl zEMB?nu7$mJ)ZI_IyI+DZbBddKg2|w;xp;ZwBQq%qV6g&B^? zc9_oV&5RER#v^d;mM8NG?$EHr@L#8ynUyl%zuv8OILxE2rtnSPtxwFmPdO8xd|;)v zzD6gAzhCpMD_XjgsSr6B0ieRcnk@v`|L%padFZ2&+@NN0I+h=L(Yw@WG|*k9%H&1h z8b1BBmwc%E&#TC?#CLqnpLPJd#sEaQ@7yVo+S7V{yLPeRNvt@@u-4I1i=;woS%&=5F%Dez;gRz|2r@CNYXur(rLe^Al$Y4Got-u1#<6kVUh(QKV0FsHsD^~2LsS=JkdC1^Ur z(d=srFUw#v`~`{duHuv6yI~X%IUbQfe5cy?)Q1OX&dc6s$e<@_i_*pepu#+4;lt;x zLtU>DiM*wn{TCHzfN%MzN>T5;bh<{493-;P6C+r#tIO*nVz7`0)sn8 zO7NoeB#!T|INyiJKJ!t04v=iMD+*Xjm?PvLYq=+`mSp&BZNCK?Qt7Edu$v2ecdq3)`o9?wIH#1Mk7WUw%r z$ma|u0h~%>MP83rG0>sH^5IAiPhfqW!UNdg)*c^nR+$v(y`K9K-u?Kg;b?Wi_5}*V zZh2&poy!Lw_;&O2`(j4-(H>pBMS0^Oi!1bt8SE!}u(f9I#<3Qs^2-?ONY(sC=_Ob^ zZvE7@W{{(Wc4Q3=ld&!b_k82nx5n`lnVqLKDDWy}q?#A%MVa0(i+m0ju zoA-$0cujL@U@XJv3=k?!dp7hIvuz$r$eZhwPXu4GD`YA+WlrPo{&?0Xd+_rw`f#v6 zw1=U-t)VaWBF3_7k3}r2uW=-M_DF;ca^W>l8GfdOAbhdJrGcd$C!E|%C7X)B-v0zW zfVVBK^QNH7bX;cfUVfRd-TvNa+ZaaH7z9DzYZ*ob+Mz$xRuf9rIFGJ8MOu7c_xUKA zC65IB9*0;;lHED>Z=beu1xBJA21;rjB#(cs~mUe-wEa*Y^}ZsM3_dYFGq*O^OrpY>tJrB8o2UQWLBXoxAddY#uLcRp$bu5tD)zt}mXX0?)7oS>rS?E$^ zS!nfo3(%(KJ<}m(aS@*4zG+fp^~RtIR&5pmFSM4@+CqC3Su7kvF;Io_6=(9t#O68e zE)7?HISUlpEkp%{oSnfQPTG!nPHwYNFkL^@s0w(#Ba@RSbl` ziR6vfItgk*Nhuf@wEvLMH*kLaIGpph(4Gnwd+R!E0%O)3zf6Zoe)tL!`VF@ePAWXS zTL|at-W%yD%4u(}KU*>I8#-MeWr3?)&S7gG;FPXLo8~=*#u9o^d{Xl)n#RWk`ZaN?ERw-QH* z-NxxWa`%FmFYGo+5n;C?pMrf+1@nM?nl8|{+l?}I<3O%9+I&KIHMn~lPudH&;kr7O zTsqGkcVw6|Tu#Xp@(;bt`>FjhD|iW%(_SE6lq~bVc%+s7Ae3RbzUBYrokfG7!=;HQd@OGXswuKH~hV-gW_+s zVUHVKfQ@+KI+!~jrV-vC2vUoYhI4N@d0A>7)IBYy>4_fzNC zP?|mTf4tO=YBblW7a>}(eMiggi0kfZABwMYUHV&kXXIjJqTAe0V+~(D^I?H_+Ce9jjvx8Zip%gEJ-Vd(C`hQqh|J{$qD7U?cNqY7P zN8`TxMzyVkX1eZw0rKg57$XDuayYf2R+JgJ1=9d^O}oa?@asq7i!pXZyRUA#Kr_m2 z0+%34SI{mZV&*Y~kHbn~H%?%IMbTpJvE)xb^|QNpo;x5Sd1ezVaH?J%q3gNB+($wG zZN3h9=xUyhKKr*pB)`*NBz}5Vh+fl){bc=}{)YdG^H01qJ0-PQe=bjxmp~M)h}J}m zlSh!IFxF`h&3GVfy~>@1U&}jt6wND-(PuXu&DLq|Fh6u0ksh$0<`3hik6|L0|0*7- z`b}}n_{F(@v%XE_Y%IGLpV#T*H!sj}M15%Llkn|@w@!WIa5=))QclAAC3C(F?m`hW#p(9H%8!2OCj{HRw<>1VWZ zTg2Y95~Z0>5E|IC65Gu>Illh;@yYS{@*isD)5-DNqTsya18pIzMNFr;+dI0Y$@2Q$ zZ@?NReUTv5>#=Li*^Pbu)7$Iq_4ayuy}jOEZ?Cu4+w1N1_Ii80z207LueaCR>+SXS wdV9US-d=C7x7XY2?e+G0d%eBhUT?3r*W2su_4ayuJ=g312Xc7;Bml4p0C5%+oB#j- literal 0 HcmV?d00001 diff --git a/dirsrvtests/tests/data/ticket48212/__init__.py b/dirsrvtests/tests/data/ticket48212/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/dirsrvtests/tests/data/ticket48212/example1k_posix.ldif b/dirsrvtests/tests/data/ticket48212/example1k_posix.ldif new file mode 100644 index 0000000..50000f2 --- /dev/null +++ b/dirsrvtests/tests/data/ticket48212/example1k_posix.ldif @@ -0,0 +1,17017 @@ +dn: dc=example,dc=com +objectClass: top +objectClass: domain +dc: example +aci: (target=ldap:///dc=example,dc=com)(targetattr=*)(version 3.0; acl "acl1"; allow(write) userdn = "ldap:///self";) +aci: (target=ldap:///dc=example,dc=com)(targetattr=*)(version 3.0; acl "acl2"; allow(read, search, compare) userdn = "ldap:///anyone";) + +dn: ou=People,dc=example,dc=com +objectClass: top +objectClass: organizationalunit +ou: People + +dn: ou=Groups,dc=example,dc=com +objectClass: top +objectClass: organizationalunit +ou: Groups + +dn: cn=user0,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user0 +sn: user0 +uid: uid0 +givenname: givenname0 +description: description0 +userPassword: password0 +mail: uid0 +uidnumber: 0 +gidnumber: 0 +homeDirectory: /home/uid0 + +dn: cn=user1,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user1 +sn: user1 +uid: uid1 +givenname: givenname1 +description: description1 +userPassword: password1 +mail: uid1 +uidnumber: 1 +gidnumber: 1 +homeDirectory: /home/uid1 + +dn: cn=user2,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user2 +sn: user2 +uid: uid2 +givenname: givenname2 +description: description2 +userPassword: password2 +mail: uid2 +uidnumber: 2 +gidnumber: 2 +homeDirectory: /home/uid2 + +dn: cn=user3,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user3 +sn: user3 +uid: uid3 +givenname: givenname3 +description: description3 +userPassword: password3 +mail: uid3 +uidnumber: 3 +gidnumber: 3 +homeDirectory: /home/uid3 + +dn: cn=user4,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user4 +sn: user4 +uid: uid4 +givenname: givenname4 +description: description4 +userPassword: password4 +mail: uid4 +uidnumber: 4 +gidnumber: 4 +homeDirectory: /home/uid4 + +dn: cn=user5,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user5 +sn: user5 +uid: uid5 +givenname: givenname5 +description: description5 +userPassword: password5 +mail: uid5 +uidnumber: 5 +gidnumber: 5 +homeDirectory: /home/uid5 + +dn: cn=user6,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user6 +sn: user6 +uid: uid6 +givenname: givenname6 +description: description6 +userPassword: password6 +mail: uid6 +uidnumber: 6 +gidnumber: 6 +homeDirectory: /home/uid6 + +dn: cn=user7,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user7 +sn: user7 +uid: uid7 +givenname: givenname7 +description: description7 +userPassword: password7 +mail: uid7 +uidnumber: 7 +gidnumber: 7 +homeDirectory: /home/uid7 + +dn: cn=user8,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user8 +sn: user8 +uid: uid8 +givenname: givenname8 +description: description8 +userPassword: password8 +mail: uid8 +uidnumber: 8 +gidnumber: 8 +homeDirectory: /home/uid8 + +dn: cn=user9,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user9 +sn: user9 +uid: uid9 +givenname: givenname9 +description: description9 +userPassword: password9 +mail: uid9 +uidnumber: 9 +gidnumber: 9 +homeDirectory: /home/uid9 + +dn: cn=user10,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user10 +sn: user10 +uid: uid10 +givenname: givenname10 +description: description10 +userPassword: password10 +mail: uid10 +uidnumber: 10 +gidnumber: 10 +homeDirectory: /home/uid10 + +dn: cn=user11,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user11 +sn: user11 +uid: uid11 +givenname: givenname11 +description: description11 +userPassword: password11 +mail: uid11 +uidnumber: 11 +gidnumber: 11 +homeDirectory: /home/uid11 + +dn: cn=user12,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user12 +sn: user12 +uid: uid12 +givenname: givenname12 +description: description12 +userPassword: password12 +mail: uid12 +uidnumber: 12 +gidnumber: 12 +homeDirectory: /home/uid12 + +dn: cn=user13,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user13 +sn: user13 +uid: uid13 +givenname: givenname13 +description: description13 +userPassword: password13 +mail: uid13 +uidnumber: 13 +gidnumber: 13 +homeDirectory: /home/uid13 + +dn: cn=user14,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user14 +sn: user14 +uid: uid14 +givenname: givenname14 +description: description14 +userPassword: password14 +mail: uid14 +uidnumber: 14 +gidnumber: 14 +homeDirectory: /home/uid14 + +dn: cn=user15,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user15 +sn: user15 +uid: uid15 +givenname: givenname15 +description: description15 +userPassword: password15 +mail: uid15 +uidnumber: 15 +gidnumber: 15 +homeDirectory: /home/uid15 + +dn: cn=user16,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user16 +sn: user16 +uid: uid16 +givenname: givenname16 +description: description16 +userPassword: password16 +mail: uid16 +uidnumber: 16 +gidnumber: 16 +homeDirectory: /home/uid16 + +dn: cn=user17,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user17 +sn: user17 +uid: uid17 +givenname: givenname17 +description: description17 +userPassword: password17 +mail: uid17 +uidnumber: 17 +gidnumber: 17 +homeDirectory: /home/uid17 + +dn: cn=user18,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user18 +sn: user18 +uid: uid18 +givenname: givenname18 +description: description18 +userPassword: password18 +mail: uid18 +uidnumber: 18 +gidnumber: 18 +homeDirectory: /home/uid18 + +dn: cn=user19,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user19 +sn: user19 +uid: uid19 +givenname: givenname19 +description: description19 +userPassword: password19 +mail: uid19 +uidnumber: 19 +gidnumber: 19 +homeDirectory: /home/uid19 + +dn: cn=user20,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user20 +sn: user20 +uid: uid20 +givenname: givenname20 +description: description20 +userPassword: password20 +mail: uid20 +uidnumber: 20 +gidnumber: 20 +homeDirectory: /home/uid20 + +dn: cn=user21,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user21 +sn: user21 +uid: uid21 +givenname: givenname21 +description: description21 +userPassword: password21 +mail: uid21 +uidnumber: 21 +gidnumber: 21 +homeDirectory: /home/uid21 + +dn: cn=user22,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user22 +sn: user22 +uid: uid22 +givenname: givenname22 +description: description22 +userPassword: password22 +mail: uid22 +uidnumber: 22 +gidnumber: 22 +homeDirectory: /home/uid22 + +dn: cn=user23,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user23 +sn: user23 +uid: uid23 +givenname: givenname23 +description: description23 +userPassword: password23 +mail: uid23 +uidnumber: 23 +gidnumber: 23 +homeDirectory: /home/uid23 + +dn: cn=user24,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user24 +sn: user24 +uid: uid24 +givenname: givenname24 +description: description24 +userPassword: password24 +mail: uid24 +uidnumber: 24 +gidnumber: 24 +homeDirectory: /home/uid24 + +dn: cn=user25,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user25 +sn: user25 +uid: uid25 +givenname: givenname25 +description: description25 +userPassword: password25 +mail: uid25 +uidnumber: 25 +gidnumber: 25 +homeDirectory: /home/uid25 + +dn: cn=user26,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user26 +sn: user26 +uid: uid26 +givenname: givenname26 +description: description26 +userPassword: password26 +mail: uid26 +uidnumber: 26 +gidnumber: 26 +homeDirectory: /home/uid26 + +dn: cn=user27,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user27 +sn: user27 +uid: uid27 +givenname: givenname27 +description: description27 +userPassword: password27 +mail: uid27 +uidnumber: 27 +gidnumber: 27 +homeDirectory: /home/uid27 + +dn: cn=user28,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user28 +sn: user28 +uid: uid28 +givenname: givenname28 +description: description28 +userPassword: password28 +mail: uid28 +uidnumber: 28 +gidnumber: 28 +homeDirectory: /home/uid28 + +dn: cn=user29,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user29 +sn: user29 +uid: uid29 +givenname: givenname29 +description: description29 +userPassword: password29 +mail: uid29 +uidnumber: 29 +gidnumber: 29 +homeDirectory: /home/uid29 + +dn: cn=user30,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user30 +sn: user30 +uid: uid30 +givenname: givenname30 +description: description30 +userPassword: password30 +mail: uid30 +uidnumber: 30 +gidnumber: 30 +homeDirectory: /home/uid30 + +dn: cn=user31,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user31 +sn: user31 +uid: uid31 +givenname: givenname31 +description: description31 +userPassword: password31 +mail: uid31 +uidnumber: 31 +gidnumber: 31 +homeDirectory: /home/uid31 + +dn: cn=user32,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user32 +sn: user32 +uid: uid32 +givenname: givenname32 +description: description32 +userPassword: password32 +mail: uid32 +uidnumber: 32 +gidnumber: 32 +homeDirectory: /home/uid32 + +dn: cn=user33,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user33 +sn: user33 +uid: uid33 +givenname: givenname33 +description: description33 +userPassword: password33 +mail: uid33 +uidnumber: 33 +gidnumber: 33 +homeDirectory: /home/uid33 + +dn: cn=user34,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user34 +sn: user34 +uid: uid34 +givenname: givenname34 +description: description34 +userPassword: password34 +mail: uid34 +uidnumber: 34 +gidnumber: 34 +homeDirectory: /home/uid34 + +dn: cn=user35,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user35 +sn: user35 +uid: uid35 +givenname: givenname35 +description: description35 +userPassword: password35 +mail: uid35 +uidnumber: 35 +gidnumber: 35 +homeDirectory: /home/uid35 + +dn: cn=user36,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user36 +sn: user36 +uid: uid36 +givenname: givenname36 +description: description36 +userPassword: password36 +mail: uid36 +uidnumber: 36 +gidnumber: 36 +homeDirectory: /home/uid36 + +dn: cn=user37,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user37 +sn: user37 +uid: uid37 +givenname: givenname37 +description: description37 +userPassword: password37 +mail: uid37 +uidnumber: 37 +gidnumber: 37 +homeDirectory: /home/uid37 + +dn: cn=user38,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user38 +sn: user38 +uid: uid38 +givenname: givenname38 +description: description38 +userPassword: password38 +mail: uid38 +uidnumber: 38 +gidnumber: 38 +homeDirectory: /home/uid38 + +dn: cn=user39,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user39 +sn: user39 +uid: uid39 +givenname: givenname39 +description: description39 +userPassword: password39 +mail: uid39 +uidnumber: 39 +gidnumber: 39 +homeDirectory: /home/uid39 + +dn: cn=user40,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user40 +sn: user40 +uid: uid40 +givenname: givenname40 +description: description40 +userPassword: password40 +mail: uid40 +uidnumber: 40 +gidnumber: 40 +homeDirectory: /home/uid40 + +dn: cn=user41,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user41 +sn: user41 +uid: uid41 +givenname: givenname41 +description: description41 +userPassword: password41 +mail: uid41 +uidnumber: 41 +gidnumber: 41 +homeDirectory: /home/uid41 + +dn: cn=user42,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user42 +sn: user42 +uid: uid42 +givenname: givenname42 +description: description42 +userPassword: password42 +mail: uid42 +uidnumber: 42 +gidnumber: 42 +homeDirectory: /home/uid42 + +dn: cn=user43,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user43 +sn: user43 +uid: uid43 +givenname: givenname43 +description: description43 +userPassword: password43 +mail: uid43 +uidnumber: 43 +gidnumber: 43 +homeDirectory: /home/uid43 + +dn: cn=user44,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user44 +sn: user44 +uid: uid44 +givenname: givenname44 +description: description44 +userPassword: password44 +mail: uid44 +uidnumber: 44 +gidnumber: 44 +homeDirectory: /home/uid44 + +dn: cn=user45,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user45 +sn: user45 +uid: uid45 +givenname: givenname45 +description: description45 +userPassword: password45 +mail: uid45 +uidnumber: 45 +gidnumber: 45 +homeDirectory: /home/uid45 + +dn: cn=user46,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user46 +sn: user46 +uid: uid46 +givenname: givenname46 +description: description46 +userPassword: password46 +mail: uid46 +uidnumber: 46 +gidnumber: 46 +homeDirectory: /home/uid46 + +dn: cn=user47,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user47 +sn: user47 +uid: uid47 +givenname: givenname47 +description: description47 +userPassword: password47 +mail: uid47 +uidnumber: 47 +gidnumber: 47 +homeDirectory: /home/uid47 + +dn: cn=user48,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user48 +sn: user48 +uid: uid48 +givenname: givenname48 +description: description48 +userPassword: password48 +mail: uid48 +uidnumber: 48 +gidnumber: 48 +homeDirectory: /home/uid48 + +dn: cn=user49,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user49 +sn: user49 +uid: uid49 +givenname: givenname49 +description: description49 +userPassword: password49 +mail: uid49 +uidnumber: 49 +gidnumber: 49 +homeDirectory: /home/uid49 + +dn: cn=user50,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user50 +sn: user50 +uid: uid50 +givenname: givenname50 +description: description50 +userPassword: password50 +mail: uid50 +uidnumber: 50 +gidnumber: 50 +homeDirectory: /home/uid50 + +dn: cn=user51,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user51 +sn: user51 +uid: uid51 +givenname: givenname51 +description: description51 +userPassword: password51 +mail: uid51 +uidnumber: 51 +gidnumber: 51 +homeDirectory: /home/uid51 + +dn: cn=user52,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user52 +sn: user52 +uid: uid52 +givenname: givenname52 +description: description52 +userPassword: password52 +mail: uid52 +uidnumber: 52 +gidnumber: 52 +homeDirectory: /home/uid52 + +dn: cn=user53,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user53 +sn: user53 +uid: uid53 +givenname: givenname53 +description: description53 +userPassword: password53 +mail: uid53 +uidnumber: 53 +gidnumber: 53 +homeDirectory: /home/uid53 + +dn: cn=user54,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user54 +sn: user54 +uid: uid54 +givenname: givenname54 +description: description54 +userPassword: password54 +mail: uid54 +uidnumber: 54 +gidnumber: 54 +homeDirectory: /home/uid54 + +dn: cn=user55,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user55 +sn: user55 +uid: uid55 +givenname: givenname55 +description: description55 +userPassword: password55 +mail: uid55 +uidnumber: 55 +gidnumber: 55 +homeDirectory: /home/uid55 + +dn: cn=user56,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user56 +sn: user56 +uid: uid56 +givenname: givenname56 +description: description56 +userPassword: password56 +mail: uid56 +uidnumber: 56 +gidnumber: 56 +homeDirectory: /home/uid56 + +dn: cn=user57,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user57 +sn: user57 +uid: uid57 +givenname: givenname57 +description: description57 +userPassword: password57 +mail: uid57 +uidnumber: 57 +gidnumber: 57 +homeDirectory: /home/uid57 + +dn: cn=user58,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user58 +sn: user58 +uid: uid58 +givenname: givenname58 +description: description58 +userPassword: password58 +mail: uid58 +uidnumber: 58 +gidnumber: 58 +homeDirectory: /home/uid58 + +dn: cn=user59,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user59 +sn: user59 +uid: uid59 +givenname: givenname59 +description: description59 +userPassword: password59 +mail: uid59 +uidnumber: 59 +gidnumber: 59 +homeDirectory: /home/uid59 + +dn: cn=user60,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user60 +sn: user60 +uid: uid60 +givenname: givenname60 +description: description60 +userPassword: password60 +mail: uid60 +uidnumber: 60 +gidnumber: 60 +homeDirectory: /home/uid60 + +dn: cn=user61,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user61 +sn: user61 +uid: uid61 +givenname: givenname61 +description: description61 +userPassword: password61 +mail: uid61 +uidnumber: 61 +gidnumber: 61 +homeDirectory: /home/uid61 + +dn: cn=user62,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user62 +sn: user62 +uid: uid62 +givenname: givenname62 +description: description62 +userPassword: password62 +mail: uid62 +uidnumber: 62 +gidnumber: 62 +homeDirectory: /home/uid62 + +dn: cn=user63,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user63 +sn: user63 +uid: uid63 +givenname: givenname63 +description: description63 +userPassword: password63 +mail: uid63 +uidnumber: 63 +gidnumber: 63 +homeDirectory: /home/uid63 + +dn: cn=user64,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user64 +sn: user64 +uid: uid64 +givenname: givenname64 +description: description64 +userPassword: password64 +mail: uid64 +uidnumber: 64 +gidnumber: 64 +homeDirectory: /home/uid64 + +dn: cn=user65,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user65 +sn: user65 +uid: uid65 +givenname: givenname65 +description: description65 +userPassword: password65 +mail: uid65 +uidnumber: 65 +gidnumber: 65 +homeDirectory: /home/uid65 + +dn: cn=user66,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user66 +sn: user66 +uid: uid66 +givenname: givenname66 +description: description66 +userPassword: password66 +mail: uid66 +uidnumber: 66 +gidnumber: 66 +homeDirectory: /home/uid66 + +dn: cn=user67,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user67 +sn: user67 +uid: uid67 +givenname: givenname67 +description: description67 +userPassword: password67 +mail: uid67 +uidnumber: 67 +gidnumber: 67 +homeDirectory: /home/uid67 + +dn: cn=user68,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user68 +sn: user68 +uid: uid68 +givenname: givenname68 +description: description68 +userPassword: password68 +mail: uid68 +uidnumber: 68 +gidnumber: 68 +homeDirectory: /home/uid68 + +dn: cn=user69,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user69 +sn: user69 +uid: uid69 +givenname: givenname69 +description: description69 +userPassword: password69 +mail: uid69 +uidnumber: 69 +gidnumber: 69 +homeDirectory: /home/uid69 + +dn: cn=user70,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user70 +sn: user70 +uid: uid70 +givenname: givenname70 +description: description70 +userPassword: password70 +mail: uid70 +uidnumber: 70 +gidnumber: 70 +homeDirectory: /home/uid70 + +dn: cn=user71,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user71 +sn: user71 +uid: uid71 +givenname: givenname71 +description: description71 +userPassword: password71 +mail: uid71 +uidnumber: 71 +gidnumber: 71 +homeDirectory: /home/uid71 + +dn: cn=user72,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user72 +sn: user72 +uid: uid72 +givenname: givenname72 +description: description72 +userPassword: password72 +mail: uid72 +uidnumber: 72 +gidnumber: 72 +homeDirectory: /home/uid72 + +dn: cn=user73,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user73 +sn: user73 +uid: uid73 +givenname: givenname73 +description: description73 +userPassword: password73 +mail: uid73 +uidnumber: 73 +gidnumber: 73 +homeDirectory: /home/uid73 + +dn: cn=user74,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user74 +sn: user74 +uid: uid74 +givenname: givenname74 +description: description74 +userPassword: password74 +mail: uid74 +uidnumber: 74 +gidnumber: 74 +homeDirectory: /home/uid74 + +dn: cn=user75,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user75 +sn: user75 +uid: uid75 +givenname: givenname75 +description: description75 +userPassword: password75 +mail: uid75 +uidnumber: 75 +gidnumber: 75 +homeDirectory: /home/uid75 + +dn: cn=user76,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user76 +sn: user76 +uid: uid76 +givenname: givenname76 +description: description76 +userPassword: password76 +mail: uid76 +uidnumber: 76 +gidnumber: 76 +homeDirectory: /home/uid76 + +dn: cn=user77,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user77 +sn: user77 +uid: uid77 +givenname: givenname77 +description: description77 +userPassword: password77 +mail: uid77 +uidnumber: 77 +gidnumber: 77 +homeDirectory: /home/uid77 + +dn: cn=user78,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user78 +sn: user78 +uid: uid78 +givenname: givenname78 +description: description78 +userPassword: password78 +mail: uid78 +uidnumber: 78 +gidnumber: 78 +homeDirectory: /home/uid78 + +dn: cn=user79,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user79 +sn: user79 +uid: uid79 +givenname: givenname79 +description: description79 +userPassword: password79 +mail: uid79 +uidnumber: 79 +gidnumber: 79 +homeDirectory: /home/uid79 + +dn: cn=user80,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user80 +sn: user80 +uid: uid80 +givenname: givenname80 +description: description80 +userPassword: password80 +mail: uid80 +uidnumber: 80 +gidnumber: 80 +homeDirectory: /home/uid80 + +dn: cn=user81,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user81 +sn: user81 +uid: uid81 +givenname: givenname81 +description: description81 +userPassword: password81 +mail: uid81 +uidnumber: 81 +gidnumber: 81 +homeDirectory: /home/uid81 + +dn: cn=user82,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user82 +sn: user82 +uid: uid82 +givenname: givenname82 +description: description82 +userPassword: password82 +mail: uid82 +uidnumber: 82 +gidnumber: 82 +homeDirectory: /home/uid82 + +dn: cn=user83,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user83 +sn: user83 +uid: uid83 +givenname: givenname83 +description: description83 +userPassword: password83 +mail: uid83 +uidnumber: 83 +gidnumber: 83 +homeDirectory: /home/uid83 + +dn: cn=user84,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user84 +sn: user84 +uid: uid84 +givenname: givenname84 +description: description84 +userPassword: password84 +mail: uid84 +uidnumber: 84 +gidnumber: 84 +homeDirectory: /home/uid84 + +dn: cn=user85,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user85 +sn: user85 +uid: uid85 +givenname: givenname85 +description: description85 +userPassword: password85 +mail: uid85 +uidnumber: 85 +gidnumber: 85 +homeDirectory: /home/uid85 + +dn: cn=user86,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user86 +sn: user86 +uid: uid86 +givenname: givenname86 +description: description86 +userPassword: password86 +mail: uid86 +uidnumber: 86 +gidnumber: 86 +homeDirectory: /home/uid86 + +dn: cn=user87,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user87 +sn: user87 +uid: uid87 +givenname: givenname87 +description: description87 +userPassword: password87 +mail: uid87 +uidnumber: 87 +gidnumber: 87 +homeDirectory: /home/uid87 + +dn: cn=user88,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user88 +sn: user88 +uid: uid88 +givenname: givenname88 +description: description88 +userPassword: password88 +mail: uid88 +uidnumber: 88 +gidnumber: 88 +homeDirectory: /home/uid88 + +dn: cn=user89,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user89 +sn: user89 +uid: uid89 +givenname: givenname89 +description: description89 +userPassword: password89 +mail: uid89 +uidnumber: 89 +gidnumber: 89 +homeDirectory: /home/uid89 + +dn: cn=user90,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user90 +sn: user90 +uid: uid90 +givenname: givenname90 +description: description90 +userPassword: password90 +mail: uid90 +uidnumber: 90 +gidnumber: 90 +homeDirectory: /home/uid90 + +dn: cn=user91,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user91 +sn: user91 +uid: uid91 +givenname: givenname91 +description: description91 +userPassword: password91 +mail: uid91 +uidnumber: 91 +gidnumber: 91 +homeDirectory: /home/uid91 + +dn: cn=user92,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user92 +sn: user92 +uid: uid92 +givenname: givenname92 +description: description92 +userPassword: password92 +mail: uid92 +uidnumber: 92 +gidnumber: 92 +homeDirectory: /home/uid92 + +dn: cn=user93,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user93 +sn: user93 +uid: uid93 +givenname: givenname93 +description: description93 +userPassword: password93 +mail: uid93 +uidnumber: 93 +gidnumber: 93 +homeDirectory: /home/uid93 + +dn: cn=user94,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user94 +sn: user94 +uid: uid94 +givenname: givenname94 +description: description94 +userPassword: password94 +mail: uid94 +uidnumber: 94 +gidnumber: 94 +homeDirectory: /home/uid94 + +dn: cn=user95,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user95 +sn: user95 +uid: uid95 +givenname: givenname95 +description: description95 +userPassword: password95 +mail: uid95 +uidnumber: 95 +gidnumber: 95 +homeDirectory: /home/uid95 + +dn: cn=user96,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user96 +sn: user96 +uid: uid96 +givenname: givenname96 +description: description96 +userPassword: password96 +mail: uid96 +uidnumber: 96 +gidnumber: 96 +homeDirectory: /home/uid96 + +dn: cn=user97,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user97 +sn: user97 +uid: uid97 +givenname: givenname97 +description: description97 +userPassword: password97 +mail: uid97 +uidnumber: 97 +gidnumber: 97 +homeDirectory: /home/uid97 + +dn: cn=user98,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user98 +sn: user98 +uid: uid98 +givenname: givenname98 +description: description98 +userPassword: password98 +mail: uid98 +uidnumber: 98 +gidnumber: 98 +homeDirectory: /home/uid98 + +dn: cn=user99,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user99 +sn: user99 +uid: uid99 +givenname: givenname99 +description: description99 +userPassword: password99 +mail: uid99 +uidnumber: 99 +gidnumber: 99 +homeDirectory: /home/uid99 + +dn: cn=user100,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user100 +sn: user100 +uid: uid100 +givenname: givenname100 +description: description100 +userPassword: password100 +mail: uid100 +uidnumber: 100 +gidnumber: 100 +homeDirectory: /home/uid100 + +dn: cn=user101,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user101 +sn: user101 +uid: uid101 +givenname: givenname101 +description: description101 +userPassword: password101 +mail: uid101 +uidnumber: 101 +gidnumber: 101 +homeDirectory: /home/uid101 + +dn: cn=user102,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user102 +sn: user102 +uid: uid102 +givenname: givenname102 +description: description102 +userPassword: password102 +mail: uid102 +uidnumber: 102 +gidnumber: 102 +homeDirectory: /home/uid102 + +dn: cn=user103,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user103 +sn: user103 +uid: uid103 +givenname: givenname103 +description: description103 +userPassword: password103 +mail: uid103 +uidnumber: 103 +gidnumber: 103 +homeDirectory: /home/uid103 + +dn: cn=user104,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user104 +sn: user104 +uid: uid104 +givenname: givenname104 +description: description104 +userPassword: password104 +mail: uid104 +uidnumber: 104 +gidnumber: 104 +homeDirectory: /home/uid104 + +dn: cn=user105,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user105 +sn: user105 +uid: uid105 +givenname: givenname105 +description: description105 +userPassword: password105 +mail: uid105 +uidnumber: 105 +gidnumber: 105 +homeDirectory: /home/uid105 + +dn: cn=user106,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user106 +sn: user106 +uid: uid106 +givenname: givenname106 +description: description106 +userPassword: password106 +mail: uid106 +uidnumber: 106 +gidnumber: 106 +homeDirectory: /home/uid106 + +dn: cn=user107,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user107 +sn: user107 +uid: uid107 +givenname: givenname107 +description: description107 +userPassword: password107 +mail: uid107 +uidnumber: 107 +gidnumber: 107 +homeDirectory: /home/uid107 + +dn: cn=user108,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user108 +sn: user108 +uid: uid108 +givenname: givenname108 +description: description108 +userPassword: password108 +mail: uid108 +uidnumber: 108 +gidnumber: 108 +homeDirectory: /home/uid108 + +dn: cn=user109,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user109 +sn: user109 +uid: uid109 +givenname: givenname109 +description: description109 +userPassword: password109 +mail: uid109 +uidnumber: 109 +gidnumber: 109 +homeDirectory: /home/uid109 + +dn: cn=user110,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user110 +sn: user110 +uid: uid110 +givenname: givenname110 +description: description110 +userPassword: password110 +mail: uid110 +uidnumber: 110 +gidnumber: 110 +homeDirectory: /home/uid110 + +dn: cn=user111,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user111 +sn: user111 +uid: uid111 +givenname: givenname111 +description: description111 +userPassword: password111 +mail: uid111 +uidnumber: 111 +gidnumber: 111 +homeDirectory: /home/uid111 + +dn: cn=user112,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user112 +sn: user112 +uid: uid112 +givenname: givenname112 +description: description112 +userPassword: password112 +mail: uid112 +uidnumber: 112 +gidnumber: 112 +homeDirectory: /home/uid112 + +dn: cn=user113,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user113 +sn: user113 +uid: uid113 +givenname: givenname113 +description: description113 +userPassword: password113 +mail: uid113 +uidnumber: 113 +gidnumber: 113 +homeDirectory: /home/uid113 + +dn: cn=user114,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user114 +sn: user114 +uid: uid114 +givenname: givenname114 +description: description114 +userPassword: password114 +mail: uid114 +uidnumber: 114 +gidnumber: 114 +homeDirectory: /home/uid114 + +dn: cn=user115,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user115 +sn: user115 +uid: uid115 +givenname: givenname115 +description: description115 +userPassword: password115 +mail: uid115 +uidnumber: 115 +gidnumber: 115 +homeDirectory: /home/uid115 + +dn: cn=user116,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user116 +sn: user116 +uid: uid116 +givenname: givenname116 +description: description116 +userPassword: password116 +mail: uid116 +uidnumber: 116 +gidnumber: 116 +homeDirectory: /home/uid116 + +dn: cn=user117,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user117 +sn: user117 +uid: uid117 +givenname: givenname117 +description: description117 +userPassword: password117 +mail: uid117 +uidnumber: 117 +gidnumber: 117 +homeDirectory: /home/uid117 + +dn: cn=user118,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user118 +sn: user118 +uid: uid118 +givenname: givenname118 +description: description118 +userPassword: password118 +mail: uid118 +uidnumber: 118 +gidnumber: 118 +homeDirectory: /home/uid118 + +dn: cn=user119,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user119 +sn: user119 +uid: uid119 +givenname: givenname119 +description: description119 +userPassword: password119 +mail: uid119 +uidnumber: 119 +gidnumber: 119 +homeDirectory: /home/uid119 + +dn: cn=user120,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user120 +sn: user120 +uid: uid120 +givenname: givenname120 +description: description120 +userPassword: password120 +mail: uid120 +uidnumber: 120 +gidnumber: 120 +homeDirectory: /home/uid120 + +dn: cn=user121,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user121 +sn: user121 +uid: uid121 +givenname: givenname121 +description: description121 +userPassword: password121 +mail: uid121 +uidnumber: 121 +gidnumber: 121 +homeDirectory: /home/uid121 + +dn: cn=user122,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user122 +sn: user122 +uid: uid122 +givenname: givenname122 +description: description122 +userPassword: password122 +mail: uid122 +uidnumber: 122 +gidnumber: 122 +homeDirectory: /home/uid122 + +dn: cn=user123,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user123 +sn: user123 +uid: uid123 +givenname: givenname123 +description: description123 +userPassword: password123 +mail: uid123 +uidnumber: 123 +gidnumber: 123 +homeDirectory: /home/uid123 + +dn: cn=user124,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user124 +sn: user124 +uid: uid124 +givenname: givenname124 +description: description124 +userPassword: password124 +mail: uid124 +uidnumber: 124 +gidnumber: 124 +homeDirectory: /home/uid124 + +dn: cn=user125,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user125 +sn: user125 +uid: uid125 +givenname: givenname125 +description: description125 +userPassword: password125 +mail: uid125 +uidnumber: 125 +gidnumber: 125 +homeDirectory: /home/uid125 + +dn: cn=user126,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user126 +sn: user126 +uid: uid126 +givenname: givenname126 +description: description126 +userPassword: password126 +mail: uid126 +uidnumber: 126 +gidnumber: 126 +homeDirectory: /home/uid126 + +dn: cn=user127,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user127 +sn: user127 +uid: uid127 +givenname: givenname127 +description: description127 +userPassword: password127 +mail: uid127 +uidnumber: 127 +gidnumber: 127 +homeDirectory: /home/uid127 + +dn: cn=user128,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user128 +sn: user128 +uid: uid128 +givenname: givenname128 +description: description128 +userPassword: password128 +mail: uid128 +uidnumber: 128 +gidnumber: 128 +homeDirectory: /home/uid128 + +dn: cn=user129,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user129 +sn: user129 +uid: uid129 +givenname: givenname129 +description: description129 +userPassword: password129 +mail: uid129 +uidnumber: 129 +gidnumber: 129 +homeDirectory: /home/uid129 + +dn: cn=user130,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user130 +sn: user130 +uid: uid130 +givenname: givenname130 +description: description130 +userPassword: password130 +mail: uid130 +uidnumber: 130 +gidnumber: 130 +homeDirectory: /home/uid130 + +dn: cn=user131,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user131 +sn: user131 +uid: uid131 +givenname: givenname131 +description: description131 +userPassword: password131 +mail: uid131 +uidnumber: 131 +gidnumber: 131 +homeDirectory: /home/uid131 + +dn: cn=user132,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user132 +sn: user132 +uid: uid132 +givenname: givenname132 +description: description132 +userPassword: password132 +mail: uid132 +uidnumber: 132 +gidnumber: 132 +homeDirectory: /home/uid132 + +dn: cn=user133,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user133 +sn: user133 +uid: uid133 +givenname: givenname133 +description: description133 +userPassword: password133 +mail: uid133 +uidnumber: 133 +gidnumber: 133 +homeDirectory: /home/uid133 + +dn: cn=user134,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user134 +sn: user134 +uid: uid134 +givenname: givenname134 +description: description134 +userPassword: password134 +mail: uid134 +uidnumber: 134 +gidnumber: 134 +homeDirectory: /home/uid134 + +dn: cn=user135,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user135 +sn: user135 +uid: uid135 +givenname: givenname135 +description: description135 +userPassword: password135 +mail: uid135 +uidnumber: 135 +gidnumber: 135 +homeDirectory: /home/uid135 + +dn: cn=user136,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user136 +sn: user136 +uid: uid136 +givenname: givenname136 +description: description136 +userPassword: password136 +mail: uid136 +uidnumber: 136 +gidnumber: 136 +homeDirectory: /home/uid136 + +dn: cn=user137,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user137 +sn: user137 +uid: uid137 +givenname: givenname137 +description: description137 +userPassword: password137 +mail: uid137 +uidnumber: 137 +gidnumber: 137 +homeDirectory: /home/uid137 + +dn: cn=user138,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user138 +sn: user138 +uid: uid138 +givenname: givenname138 +description: description138 +userPassword: password138 +mail: uid138 +uidnumber: 138 +gidnumber: 138 +homeDirectory: /home/uid138 + +dn: cn=user139,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user139 +sn: user139 +uid: uid139 +givenname: givenname139 +description: description139 +userPassword: password139 +mail: uid139 +uidnumber: 139 +gidnumber: 139 +homeDirectory: /home/uid139 + +dn: cn=user140,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user140 +sn: user140 +uid: uid140 +givenname: givenname140 +description: description140 +userPassword: password140 +mail: uid140 +uidnumber: 140 +gidnumber: 140 +homeDirectory: /home/uid140 + +dn: cn=user141,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user141 +sn: user141 +uid: uid141 +givenname: givenname141 +description: description141 +userPassword: password141 +mail: uid141 +uidnumber: 141 +gidnumber: 141 +homeDirectory: /home/uid141 + +dn: cn=user142,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user142 +sn: user142 +uid: uid142 +givenname: givenname142 +description: description142 +userPassword: password142 +mail: uid142 +uidnumber: 142 +gidnumber: 142 +homeDirectory: /home/uid142 + +dn: cn=user143,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user143 +sn: user143 +uid: uid143 +givenname: givenname143 +description: description143 +userPassword: password143 +mail: uid143 +uidnumber: 143 +gidnumber: 143 +homeDirectory: /home/uid143 + +dn: cn=user144,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user144 +sn: user144 +uid: uid144 +givenname: givenname144 +description: description144 +userPassword: password144 +mail: uid144 +uidnumber: 144 +gidnumber: 144 +homeDirectory: /home/uid144 + +dn: cn=user145,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user145 +sn: user145 +uid: uid145 +givenname: givenname145 +description: description145 +userPassword: password145 +mail: uid145 +uidnumber: 145 +gidnumber: 145 +homeDirectory: /home/uid145 + +dn: cn=user146,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user146 +sn: user146 +uid: uid146 +givenname: givenname146 +description: description146 +userPassword: password146 +mail: uid146 +uidnumber: 146 +gidnumber: 146 +homeDirectory: /home/uid146 + +dn: cn=user147,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user147 +sn: user147 +uid: uid147 +givenname: givenname147 +description: description147 +userPassword: password147 +mail: uid147 +uidnumber: 147 +gidnumber: 147 +homeDirectory: /home/uid147 + +dn: cn=user148,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user148 +sn: user148 +uid: uid148 +givenname: givenname148 +description: description148 +userPassword: password148 +mail: uid148 +uidnumber: 148 +gidnumber: 148 +homeDirectory: /home/uid148 + +dn: cn=user149,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user149 +sn: user149 +uid: uid149 +givenname: givenname149 +description: description149 +userPassword: password149 +mail: uid149 +uidnumber: 149 +gidnumber: 149 +homeDirectory: /home/uid149 + +dn: cn=user150,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user150 +sn: user150 +uid: uid150 +givenname: givenname150 +description: description150 +userPassword: password150 +mail: uid150 +uidnumber: 150 +gidnumber: 150 +homeDirectory: /home/uid150 + +dn: cn=user151,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user151 +sn: user151 +uid: uid151 +givenname: givenname151 +description: description151 +userPassword: password151 +mail: uid151 +uidnumber: 151 +gidnumber: 151 +homeDirectory: /home/uid151 + +dn: cn=user152,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user152 +sn: user152 +uid: uid152 +givenname: givenname152 +description: description152 +userPassword: password152 +mail: uid152 +uidnumber: 152 +gidnumber: 152 +homeDirectory: /home/uid152 + +dn: cn=user153,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user153 +sn: user153 +uid: uid153 +givenname: givenname153 +description: description153 +userPassword: password153 +mail: uid153 +uidnumber: 153 +gidnumber: 153 +homeDirectory: /home/uid153 + +dn: cn=user154,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user154 +sn: user154 +uid: uid154 +givenname: givenname154 +description: description154 +userPassword: password154 +mail: uid154 +uidnumber: 154 +gidnumber: 154 +homeDirectory: /home/uid154 + +dn: cn=user155,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user155 +sn: user155 +uid: uid155 +givenname: givenname155 +description: description155 +userPassword: password155 +mail: uid155 +uidnumber: 155 +gidnumber: 155 +homeDirectory: /home/uid155 + +dn: cn=user156,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user156 +sn: user156 +uid: uid156 +givenname: givenname156 +description: description156 +userPassword: password156 +mail: uid156 +uidnumber: 156 +gidnumber: 156 +homeDirectory: /home/uid156 + +dn: cn=user157,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user157 +sn: user157 +uid: uid157 +givenname: givenname157 +description: description157 +userPassword: password157 +mail: uid157 +uidnumber: 157 +gidnumber: 157 +homeDirectory: /home/uid157 + +dn: cn=user158,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user158 +sn: user158 +uid: uid158 +givenname: givenname158 +description: description158 +userPassword: password158 +mail: uid158 +uidnumber: 158 +gidnumber: 158 +homeDirectory: /home/uid158 + +dn: cn=user159,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user159 +sn: user159 +uid: uid159 +givenname: givenname159 +description: description159 +userPassword: password159 +mail: uid159 +uidnumber: 159 +gidnumber: 159 +homeDirectory: /home/uid159 + +dn: cn=user160,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user160 +sn: user160 +uid: uid160 +givenname: givenname160 +description: description160 +userPassword: password160 +mail: uid160 +uidnumber: 160 +gidnumber: 160 +homeDirectory: /home/uid160 + +dn: cn=user161,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user161 +sn: user161 +uid: uid161 +givenname: givenname161 +description: description161 +userPassword: password161 +mail: uid161 +uidnumber: 161 +gidnumber: 161 +homeDirectory: /home/uid161 + +dn: cn=user162,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user162 +sn: user162 +uid: uid162 +givenname: givenname162 +description: description162 +userPassword: password162 +mail: uid162 +uidnumber: 162 +gidnumber: 162 +homeDirectory: /home/uid162 + +dn: cn=user163,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user163 +sn: user163 +uid: uid163 +givenname: givenname163 +description: description163 +userPassword: password163 +mail: uid163 +uidnumber: 163 +gidnumber: 163 +homeDirectory: /home/uid163 + +dn: cn=user164,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user164 +sn: user164 +uid: uid164 +givenname: givenname164 +description: description164 +userPassword: password164 +mail: uid164 +uidnumber: 164 +gidnumber: 164 +homeDirectory: /home/uid164 + +dn: cn=user165,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user165 +sn: user165 +uid: uid165 +givenname: givenname165 +description: description165 +userPassword: password165 +mail: uid165 +uidnumber: 165 +gidnumber: 165 +homeDirectory: /home/uid165 + +dn: cn=user166,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user166 +sn: user166 +uid: uid166 +givenname: givenname166 +description: description166 +userPassword: password166 +mail: uid166 +uidnumber: 166 +gidnumber: 166 +homeDirectory: /home/uid166 + +dn: cn=user167,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user167 +sn: user167 +uid: uid167 +givenname: givenname167 +description: description167 +userPassword: password167 +mail: uid167 +uidnumber: 167 +gidnumber: 167 +homeDirectory: /home/uid167 + +dn: cn=user168,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user168 +sn: user168 +uid: uid168 +givenname: givenname168 +description: description168 +userPassword: password168 +mail: uid168 +uidnumber: 168 +gidnumber: 168 +homeDirectory: /home/uid168 + +dn: cn=user169,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user169 +sn: user169 +uid: uid169 +givenname: givenname169 +description: description169 +userPassword: password169 +mail: uid169 +uidnumber: 169 +gidnumber: 169 +homeDirectory: /home/uid169 + +dn: cn=user170,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user170 +sn: user170 +uid: uid170 +givenname: givenname170 +description: description170 +userPassword: password170 +mail: uid170 +uidnumber: 170 +gidnumber: 170 +homeDirectory: /home/uid170 + +dn: cn=user171,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user171 +sn: user171 +uid: uid171 +givenname: givenname171 +description: description171 +userPassword: password171 +mail: uid171 +uidnumber: 171 +gidnumber: 171 +homeDirectory: /home/uid171 + +dn: cn=user172,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user172 +sn: user172 +uid: uid172 +givenname: givenname172 +description: description172 +userPassword: password172 +mail: uid172 +uidnumber: 172 +gidnumber: 172 +homeDirectory: /home/uid172 + +dn: cn=user173,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user173 +sn: user173 +uid: uid173 +givenname: givenname173 +description: description173 +userPassword: password173 +mail: uid173 +uidnumber: 173 +gidnumber: 173 +homeDirectory: /home/uid173 + +dn: cn=user174,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user174 +sn: user174 +uid: uid174 +givenname: givenname174 +description: description174 +userPassword: password174 +mail: uid174 +uidnumber: 174 +gidnumber: 174 +homeDirectory: /home/uid174 + +dn: cn=user175,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user175 +sn: user175 +uid: uid175 +givenname: givenname175 +description: description175 +userPassword: password175 +mail: uid175 +uidnumber: 175 +gidnumber: 175 +homeDirectory: /home/uid175 + +dn: cn=user176,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user176 +sn: user176 +uid: uid176 +givenname: givenname176 +description: description176 +userPassword: password176 +mail: uid176 +uidnumber: 176 +gidnumber: 176 +homeDirectory: /home/uid176 + +dn: cn=user177,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user177 +sn: user177 +uid: uid177 +givenname: givenname177 +description: description177 +userPassword: password177 +mail: uid177 +uidnumber: 177 +gidnumber: 177 +homeDirectory: /home/uid177 + +dn: cn=user178,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user178 +sn: user178 +uid: uid178 +givenname: givenname178 +description: description178 +userPassword: password178 +mail: uid178 +uidnumber: 178 +gidnumber: 178 +homeDirectory: /home/uid178 + +dn: cn=user179,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user179 +sn: user179 +uid: uid179 +givenname: givenname179 +description: description179 +userPassword: password179 +mail: uid179 +uidnumber: 179 +gidnumber: 179 +homeDirectory: /home/uid179 + +dn: cn=user180,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user180 +sn: user180 +uid: uid180 +givenname: givenname180 +description: description180 +userPassword: password180 +mail: uid180 +uidnumber: 180 +gidnumber: 180 +homeDirectory: /home/uid180 + +dn: cn=user181,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user181 +sn: user181 +uid: uid181 +givenname: givenname181 +description: description181 +userPassword: password181 +mail: uid181 +uidnumber: 181 +gidnumber: 181 +homeDirectory: /home/uid181 + +dn: cn=user182,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user182 +sn: user182 +uid: uid182 +givenname: givenname182 +description: description182 +userPassword: password182 +mail: uid182 +uidnumber: 182 +gidnumber: 182 +homeDirectory: /home/uid182 + +dn: cn=user183,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user183 +sn: user183 +uid: uid183 +givenname: givenname183 +description: description183 +userPassword: password183 +mail: uid183 +uidnumber: 183 +gidnumber: 183 +homeDirectory: /home/uid183 + +dn: cn=user184,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user184 +sn: user184 +uid: uid184 +givenname: givenname184 +description: description184 +userPassword: password184 +mail: uid184 +uidnumber: 184 +gidnumber: 184 +homeDirectory: /home/uid184 + +dn: cn=user185,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user185 +sn: user185 +uid: uid185 +givenname: givenname185 +description: description185 +userPassword: password185 +mail: uid185 +uidnumber: 185 +gidnumber: 185 +homeDirectory: /home/uid185 + +dn: cn=user186,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user186 +sn: user186 +uid: uid186 +givenname: givenname186 +description: description186 +userPassword: password186 +mail: uid186 +uidnumber: 186 +gidnumber: 186 +homeDirectory: /home/uid186 + +dn: cn=user187,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user187 +sn: user187 +uid: uid187 +givenname: givenname187 +description: description187 +userPassword: password187 +mail: uid187 +uidnumber: 187 +gidnumber: 187 +homeDirectory: /home/uid187 + +dn: cn=user188,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user188 +sn: user188 +uid: uid188 +givenname: givenname188 +description: description188 +userPassword: password188 +mail: uid188 +uidnumber: 188 +gidnumber: 188 +homeDirectory: /home/uid188 + +dn: cn=user189,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user189 +sn: user189 +uid: uid189 +givenname: givenname189 +description: description189 +userPassword: password189 +mail: uid189 +uidnumber: 189 +gidnumber: 189 +homeDirectory: /home/uid189 + +dn: cn=user190,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user190 +sn: user190 +uid: uid190 +givenname: givenname190 +description: description190 +userPassword: password190 +mail: uid190 +uidnumber: 190 +gidnumber: 190 +homeDirectory: /home/uid190 + +dn: cn=user191,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user191 +sn: user191 +uid: uid191 +givenname: givenname191 +description: description191 +userPassword: password191 +mail: uid191 +uidnumber: 191 +gidnumber: 191 +homeDirectory: /home/uid191 + +dn: cn=user192,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user192 +sn: user192 +uid: uid192 +givenname: givenname192 +description: description192 +userPassword: password192 +mail: uid192 +uidnumber: 192 +gidnumber: 192 +homeDirectory: /home/uid192 + +dn: cn=user193,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user193 +sn: user193 +uid: uid193 +givenname: givenname193 +description: description193 +userPassword: password193 +mail: uid193 +uidnumber: 193 +gidnumber: 193 +homeDirectory: /home/uid193 + +dn: cn=user194,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user194 +sn: user194 +uid: uid194 +givenname: givenname194 +description: description194 +userPassword: password194 +mail: uid194 +uidnumber: 194 +gidnumber: 194 +homeDirectory: /home/uid194 + +dn: cn=user195,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user195 +sn: user195 +uid: uid195 +givenname: givenname195 +description: description195 +userPassword: password195 +mail: uid195 +uidnumber: 195 +gidnumber: 195 +homeDirectory: /home/uid195 + +dn: cn=user196,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user196 +sn: user196 +uid: uid196 +givenname: givenname196 +description: description196 +userPassword: password196 +mail: uid196 +uidnumber: 196 +gidnumber: 196 +homeDirectory: /home/uid196 + +dn: cn=user197,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user197 +sn: user197 +uid: uid197 +givenname: givenname197 +description: description197 +userPassword: password197 +mail: uid197 +uidnumber: 197 +gidnumber: 197 +homeDirectory: /home/uid197 + +dn: cn=user198,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user198 +sn: user198 +uid: uid198 +givenname: givenname198 +description: description198 +userPassword: password198 +mail: uid198 +uidnumber: 198 +gidnumber: 198 +homeDirectory: /home/uid198 + +dn: cn=user199,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user199 +sn: user199 +uid: uid199 +givenname: givenname199 +description: description199 +userPassword: password199 +mail: uid199 +uidnumber: 199 +gidnumber: 199 +homeDirectory: /home/uid199 + +dn: cn=user200,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user200 +sn: user200 +uid: uid200 +givenname: givenname200 +description: description200 +userPassword: password200 +mail: uid200 +uidnumber: 200 +gidnumber: 200 +homeDirectory: /home/uid200 + +dn: cn=user201,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user201 +sn: user201 +uid: uid201 +givenname: givenname201 +description: description201 +userPassword: password201 +mail: uid201 +uidnumber: 201 +gidnumber: 201 +homeDirectory: /home/uid201 + +dn: cn=user202,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user202 +sn: user202 +uid: uid202 +givenname: givenname202 +description: description202 +userPassword: password202 +mail: uid202 +uidnumber: 202 +gidnumber: 202 +homeDirectory: /home/uid202 + +dn: cn=user203,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user203 +sn: user203 +uid: uid203 +givenname: givenname203 +description: description203 +userPassword: password203 +mail: uid203 +uidnumber: 203 +gidnumber: 203 +homeDirectory: /home/uid203 + +dn: cn=user204,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user204 +sn: user204 +uid: uid204 +givenname: givenname204 +description: description204 +userPassword: password204 +mail: uid204 +uidnumber: 204 +gidnumber: 204 +homeDirectory: /home/uid204 + +dn: cn=user205,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user205 +sn: user205 +uid: uid205 +givenname: givenname205 +description: description205 +userPassword: password205 +mail: uid205 +uidnumber: 205 +gidnumber: 205 +homeDirectory: /home/uid205 + +dn: cn=user206,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user206 +sn: user206 +uid: uid206 +givenname: givenname206 +description: description206 +userPassword: password206 +mail: uid206 +uidnumber: 206 +gidnumber: 206 +homeDirectory: /home/uid206 + +dn: cn=user207,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user207 +sn: user207 +uid: uid207 +givenname: givenname207 +description: description207 +userPassword: password207 +mail: uid207 +uidnumber: 207 +gidnumber: 207 +homeDirectory: /home/uid207 + +dn: cn=user208,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user208 +sn: user208 +uid: uid208 +givenname: givenname208 +description: description208 +userPassword: password208 +mail: uid208 +uidnumber: 208 +gidnumber: 208 +homeDirectory: /home/uid208 + +dn: cn=user209,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user209 +sn: user209 +uid: uid209 +givenname: givenname209 +description: description209 +userPassword: password209 +mail: uid209 +uidnumber: 209 +gidnumber: 209 +homeDirectory: /home/uid209 + +dn: cn=user210,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user210 +sn: user210 +uid: uid210 +givenname: givenname210 +description: description210 +userPassword: password210 +mail: uid210 +uidnumber: 210 +gidnumber: 210 +homeDirectory: /home/uid210 + +dn: cn=user211,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user211 +sn: user211 +uid: uid211 +givenname: givenname211 +description: description211 +userPassword: password211 +mail: uid211 +uidnumber: 211 +gidnumber: 211 +homeDirectory: /home/uid211 + +dn: cn=user212,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user212 +sn: user212 +uid: uid212 +givenname: givenname212 +description: description212 +userPassword: password212 +mail: uid212 +uidnumber: 212 +gidnumber: 212 +homeDirectory: /home/uid212 + +dn: cn=user213,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user213 +sn: user213 +uid: uid213 +givenname: givenname213 +description: description213 +userPassword: password213 +mail: uid213 +uidnumber: 213 +gidnumber: 213 +homeDirectory: /home/uid213 + +dn: cn=user214,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user214 +sn: user214 +uid: uid214 +givenname: givenname214 +description: description214 +userPassword: password214 +mail: uid214 +uidnumber: 214 +gidnumber: 214 +homeDirectory: /home/uid214 + +dn: cn=user215,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user215 +sn: user215 +uid: uid215 +givenname: givenname215 +description: description215 +userPassword: password215 +mail: uid215 +uidnumber: 215 +gidnumber: 215 +homeDirectory: /home/uid215 + +dn: cn=user216,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user216 +sn: user216 +uid: uid216 +givenname: givenname216 +description: description216 +userPassword: password216 +mail: uid216 +uidnumber: 216 +gidnumber: 216 +homeDirectory: /home/uid216 + +dn: cn=user217,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user217 +sn: user217 +uid: uid217 +givenname: givenname217 +description: description217 +userPassword: password217 +mail: uid217 +uidnumber: 217 +gidnumber: 217 +homeDirectory: /home/uid217 + +dn: cn=user218,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user218 +sn: user218 +uid: uid218 +givenname: givenname218 +description: description218 +userPassword: password218 +mail: uid218 +uidnumber: 218 +gidnumber: 218 +homeDirectory: /home/uid218 + +dn: cn=user219,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user219 +sn: user219 +uid: uid219 +givenname: givenname219 +description: description219 +userPassword: password219 +mail: uid219 +uidnumber: 219 +gidnumber: 219 +homeDirectory: /home/uid219 + +dn: cn=user220,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user220 +sn: user220 +uid: uid220 +givenname: givenname220 +description: description220 +userPassword: password220 +mail: uid220 +uidnumber: 220 +gidnumber: 220 +homeDirectory: /home/uid220 + +dn: cn=user221,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user221 +sn: user221 +uid: uid221 +givenname: givenname221 +description: description221 +userPassword: password221 +mail: uid221 +uidnumber: 221 +gidnumber: 221 +homeDirectory: /home/uid221 + +dn: cn=user222,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user222 +sn: user222 +uid: uid222 +givenname: givenname222 +description: description222 +userPassword: password222 +mail: uid222 +uidnumber: 222 +gidnumber: 222 +homeDirectory: /home/uid222 + +dn: cn=user223,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user223 +sn: user223 +uid: uid223 +givenname: givenname223 +description: description223 +userPassword: password223 +mail: uid223 +uidnumber: 223 +gidnumber: 223 +homeDirectory: /home/uid223 + +dn: cn=user224,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user224 +sn: user224 +uid: uid224 +givenname: givenname224 +description: description224 +userPassword: password224 +mail: uid224 +uidnumber: 224 +gidnumber: 224 +homeDirectory: /home/uid224 + +dn: cn=user225,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user225 +sn: user225 +uid: uid225 +givenname: givenname225 +description: description225 +userPassword: password225 +mail: uid225 +uidnumber: 225 +gidnumber: 225 +homeDirectory: /home/uid225 + +dn: cn=user226,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user226 +sn: user226 +uid: uid226 +givenname: givenname226 +description: description226 +userPassword: password226 +mail: uid226 +uidnumber: 226 +gidnumber: 226 +homeDirectory: /home/uid226 + +dn: cn=user227,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user227 +sn: user227 +uid: uid227 +givenname: givenname227 +description: description227 +userPassword: password227 +mail: uid227 +uidnumber: 227 +gidnumber: 227 +homeDirectory: /home/uid227 + +dn: cn=user228,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user228 +sn: user228 +uid: uid228 +givenname: givenname228 +description: description228 +userPassword: password228 +mail: uid228 +uidnumber: 228 +gidnumber: 228 +homeDirectory: /home/uid228 + +dn: cn=user229,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user229 +sn: user229 +uid: uid229 +givenname: givenname229 +description: description229 +userPassword: password229 +mail: uid229 +uidnumber: 229 +gidnumber: 229 +homeDirectory: /home/uid229 + +dn: cn=user230,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user230 +sn: user230 +uid: uid230 +givenname: givenname230 +description: description230 +userPassword: password230 +mail: uid230 +uidnumber: 230 +gidnumber: 230 +homeDirectory: /home/uid230 + +dn: cn=user231,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user231 +sn: user231 +uid: uid231 +givenname: givenname231 +description: description231 +userPassword: password231 +mail: uid231 +uidnumber: 231 +gidnumber: 231 +homeDirectory: /home/uid231 + +dn: cn=user232,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user232 +sn: user232 +uid: uid232 +givenname: givenname232 +description: description232 +userPassword: password232 +mail: uid232 +uidnumber: 232 +gidnumber: 232 +homeDirectory: /home/uid232 + +dn: cn=user233,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user233 +sn: user233 +uid: uid233 +givenname: givenname233 +description: description233 +userPassword: password233 +mail: uid233 +uidnumber: 233 +gidnumber: 233 +homeDirectory: /home/uid233 + +dn: cn=user234,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user234 +sn: user234 +uid: uid234 +givenname: givenname234 +description: description234 +userPassword: password234 +mail: uid234 +uidnumber: 234 +gidnumber: 234 +homeDirectory: /home/uid234 + +dn: cn=user235,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user235 +sn: user235 +uid: uid235 +givenname: givenname235 +description: description235 +userPassword: password235 +mail: uid235 +uidnumber: 235 +gidnumber: 235 +homeDirectory: /home/uid235 + +dn: cn=user236,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user236 +sn: user236 +uid: uid236 +givenname: givenname236 +description: description236 +userPassword: password236 +mail: uid236 +uidnumber: 236 +gidnumber: 236 +homeDirectory: /home/uid236 + +dn: cn=user237,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user237 +sn: user237 +uid: uid237 +givenname: givenname237 +description: description237 +userPassword: password237 +mail: uid237 +uidnumber: 237 +gidnumber: 237 +homeDirectory: /home/uid237 + +dn: cn=user238,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user238 +sn: user238 +uid: uid238 +givenname: givenname238 +description: description238 +userPassword: password238 +mail: uid238 +uidnumber: 238 +gidnumber: 238 +homeDirectory: /home/uid238 + +dn: cn=user239,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user239 +sn: user239 +uid: uid239 +givenname: givenname239 +description: description239 +userPassword: password239 +mail: uid239 +uidnumber: 239 +gidnumber: 239 +homeDirectory: /home/uid239 + +dn: cn=user240,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user240 +sn: user240 +uid: uid240 +givenname: givenname240 +description: description240 +userPassword: password240 +mail: uid240 +uidnumber: 240 +gidnumber: 240 +homeDirectory: /home/uid240 + +dn: cn=user241,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user241 +sn: user241 +uid: uid241 +givenname: givenname241 +description: description241 +userPassword: password241 +mail: uid241 +uidnumber: 241 +gidnumber: 241 +homeDirectory: /home/uid241 + +dn: cn=user242,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user242 +sn: user242 +uid: uid242 +givenname: givenname242 +description: description242 +userPassword: password242 +mail: uid242 +uidnumber: 242 +gidnumber: 242 +homeDirectory: /home/uid242 + +dn: cn=user243,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user243 +sn: user243 +uid: uid243 +givenname: givenname243 +description: description243 +userPassword: password243 +mail: uid243 +uidnumber: 243 +gidnumber: 243 +homeDirectory: /home/uid243 + +dn: cn=user244,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user244 +sn: user244 +uid: uid244 +givenname: givenname244 +description: description244 +userPassword: password244 +mail: uid244 +uidnumber: 244 +gidnumber: 244 +homeDirectory: /home/uid244 + +dn: cn=user245,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user245 +sn: user245 +uid: uid245 +givenname: givenname245 +description: description245 +userPassword: password245 +mail: uid245 +uidnumber: 245 +gidnumber: 245 +homeDirectory: /home/uid245 + +dn: cn=user246,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user246 +sn: user246 +uid: uid246 +givenname: givenname246 +description: description246 +userPassword: password246 +mail: uid246 +uidnumber: 246 +gidnumber: 246 +homeDirectory: /home/uid246 + +dn: cn=user247,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user247 +sn: user247 +uid: uid247 +givenname: givenname247 +description: description247 +userPassword: password247 +mail: uid247 +uidnumber: 247 +gidnumber: 247 +homeDirectory: /home/uid247 + +dn: cn=user248,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user248 +sn: user248 +uid: uid248 +givenname: givenname248 +description: description248 +userPassword: password248 +mail: uid248 +uidnumber: 248 +gidnumber: 248 +homeDirectory: /home/uid248 + +dn: cn=user249,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user249 +sn: user249 +uid: uid249 +givenname: givenname249 +description: description249 +userPassword: password249 +mail: uid249 +uidnumber: 249 +gidnumber: 249 +homeDirectory: /home/uid249 + +dn: cn=user250,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user250 +sn: user250 +uid: uid250 +givenname: givenname250 +description: description250 +userPassword: password250 +mail: uid250 +uidnumber: 250 +gidnumber: 250 +homeDirectory: /home/uid250 + +dn: cn=user251,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user251 +sn: user251 +uid: uid251 +givenname: givenname251 +description: description251 +userPassword: password251 +mail: uid251 +uidnumber: 251 +gidnumber: 251 +homeDirectory: /home/uid251 + +dn: cn=user252,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user252 +sn: user252 +uid: uid252 +givenname: givenname252 +description: description252 +userPassword: password252 +mail: uid252 +uidnumber: 252 +gidnumber: 252 +homeDirectory: /home/uid252 + +dn: cn=user253,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user253 +sn: user253 +uid: uid253 +givenname: givenname253 +description: description253 +userPassword: password253 +mail: uid253 +uidnumber: 253 +gidnumber: 253 +homeDirectory: /home/uid253 + +dn: cn=user254,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user254 +sn: user254 +uid: uid254 +givenname: givenname254 +description: description254 +userPassword: password254 +mail: uid254 +uidnumber: 254 +gidnumber: 254 +homeDirectory: /home/uid254 + +dn: cn=user255,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user255 +sn: user255 +uid: uid255 +givenname: givenname255 +description: description255 +userPassword: password255 +mail: uid255 +uidnumber: 255 +gidnumber: 255 +homeDirectory: /home/uid255 + +dn: cn=user256,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user256 +sn: user256 +uid: uid256 +givenname: givenname256 +description: description256 +userPassword: password256 +mail: uid256 +uidnumber: 256 +gidnumber: 256 +homeDirectory: /home/uid256 + +dn: cn=user257,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user257 +sn: user257 +uid: uid257 +givenname: givenname257 +description: description257 +userPassword: password257 +mail: uid257 +uidnumber: 257 +gidnumber: 257 +homeDirectory: /home/uid257 + +dn: cn=user258,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user258 +sn: user258 +uid: uid258 +givenname: givenname258 +description: description258 +userPassword: password258 +mail: uid258 +uidnumber: 258 +gidnumber: 258 +homeDirectory: /home/uid258 + +dn: cn=user259,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user259 +sn: user259 +uid: uid259 +givenname: givenname259 +description: description259 +userPassword: password259 +mail: uid259 +uidnumber: 259 +gidnumber: 259 +homeDirectory: /home/uid259 + +dn: cn=user260,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user260 +sn: user260 +uid: uid260 +givenname: givenname260 +description: description260 +userPassword: password260 +mail: uid260 +uidnumber: 260 +gidnumber: 260 +homeDirectory: /home/uid260 + +dn: cn=user261,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user261 +sn: user261 +uid: uid261 +givenname: givenname261 +description: description261 +userPassword: password261 +mail: uid261 +uidnumber: 261 +gidnumber: 261 +homeDirectory: /home/uid261 + +dn: cn=user262,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user262 +sn: user262 +uid: uid262 +givenname: givenname262 +description: description262 +userPassword: password262 +mail: uid262 +uidnumber: 262 +gidnumber: 262 +homeDirectory: /home/uid262 + +dn: cn=user263,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user263 +sn: user263 +uid: uid263 +givenname: givenname263 +description: description263 +userPassword: password263 +mail: uid263 +uidnumber: 263 +gidnumber: 263 +homeDirectory: /home/uid263 + +dn: cn=user264,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user264 +sn: user264 +uid: uid264 +givenname: givenname264 +description: description264 +userPassword: password264 +mail: uid264 +uidnumber: 264 +gidnumber: 264 +homeDirectory: /home/uid264 + +dn: cn=user265,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user265 +sn: user265 +uid: uid265 +givenname: givenname265 +description: description265 +userPassword: password265 +mail: uid265 +uidnumber: 265 +gidnumber: 265 +homeDirectory: /home/uid265 + +dn: cn=user266,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user266 +sn: user266 +uid: uid266 +givenname: givenname266 +description: description266 +userPassword: password266 +mail: uid266 +uidnumber: 266 +gidnumber: 266 +homeDirectory: /home/uid266 + +dn: cn=user267,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user267 +sn: user267 +uid: uid267 +givenname: givenname267 +description: description267 +userPassword: password267 +mail: uid267 +uidnumber: 267 +gidnumber: 267 +homeDirectory: /home/uid267 + +dn: cn=user268,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user268 +sn: user268 +uid: uid268 +givenname: givenname268 +description: description268 +userPassword: password268 +mail: uid268 +uidnumber: 268 +gidnumber: 268 +homeDirectory: /home/uid268 + +dn: cn=user269,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user269 +sn: user269 +uid: uid269 +givenname: givenname269 +description: description269 +userPassword: password269 +mail: uid269 +uidnumber: 269 +gidnumber: 269 +homeDirectory: /home/uid269 + +dn: cn=user270,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user270 +sn: user270 +uid: uid270 +givenname: givenname270 +description: description270 +userPassword: password270 +mail: uid270 +uidnumber: 270 +gidnumber: 270 +homeDirectory: /home/uid270 + +dn: cn=user271,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user271 +sn: user271 +uid: uid271 +givenname: givenname271 +description: description271 +userPassword: password271 +mail: uid271 +uidnumber: 271 +gidnumber: 271 +homeDirectory: /home/uid271 + +dn: cn=user272,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user272 +sn: user272 +uid: uid272 +givenname: givenname272 +description: description272 +userPassword: password272 +mail: uid272 +uidnumber: 272 +gidnumber: 272 +homeDirectory: /home/uid272 + +dn: cn=user273,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user273 +sn: user273 +uid: uid273 +givenname: givenname273 +description: description273 +userPassword: password273 +mail: uid273 +uidnumber: 273 +gidnumber: 273 +homeDirectory: /home/uid273 + +dn: cn=user274,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user274 +sn: user274 +uid: uid274 +givenname: givenname274 +description: description274 +userPassword: password274 +mail: uid274 +uidnumber: 274 +gidnumber: 274 +homeDirectory: /home/uid274 + +dn: cn=user275,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user275 +sn: user275 +uid: uid275 +givenname: givenname275 +description: description275 +userPassword: password275 +mail: uid275 +uidnumber: 275 +gidnumber: 275 +homeDirectory: /home/uid275 + +dn: cn=user276,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user276 +sn: user276 +uid: uid276 +givenname: givenname276 +description: description276 +userPassword: password276 +mail: uid276 +uidnumber: 276 +gidnumber: 276 +homeDirectory: /home/uid276 + +dn: cn=user277,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user277 +sn: user277 +uid: uid277 +givenname: givenname277 +description: description277 +userPassword: password277 +mail: uid277 +uidnumber: 277 +gidnumber: 277 +homeDirectory: /home/uid277 + +dn: cn=user278,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user278 +sn: user278 +uid: uid278 +givenname: givenname278 +description: description278 +userPassword: password278 +mail: uid278 +uidnumber: 278 +gidnumber: 278 +homeDirectory: /home/uid278 + +dn: cn=user279,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user279 +sn: user279 +uid: uid279 +givenname: givenname279 +description: description279 +userPassword: password279 +mail: uid279 +uidnumber: 279 +gidnumber: 279 +homeDirectory: /home/uid279 + +dn: cn=user280,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user280 +sn: user280 +uid: uid280 +givenname: givenname280 +description: description280 +userPassword: password280 +mail: uid280 +uidnumber: 280 +gidnumber: 280 +homeDirectory: /home/uid280 + +dn: cn=user281,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user281 +sn: user281 +uid: uid281 +givenname: givenname281 +description: description281 +userPassword: password281 +mail: uid281 +uidnumber: 281 +gidnumber: 281 +homeDirectory: /home/uid281 + +dn: cn=user282,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user282 +sn: user282 +uid: uid282 +givenname: givenname282 +description: description282 +userPassword: password282 +mail: uid282 +uidnumber: 282 +gidnumber: 282 +homeDirectory: /home/uid282 + +dn: cn=user283,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user283 +sn: user283 +uid: uid283 +givenname: givenname283 +description: description283 +userPassword: password283 +mail: uid283 +uidnumber: 283 +gidnumber: 283 +homeDirectory: /home/uid283 + +dn: cn=user284,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user284 +sn: user284 +uid: uid284 +givenname: givenname284 +description: description284 +userPassword: password284 +mail: uid284 +uidnumber: 284 +gidnumber: 284 +homeDirectory: /home/uid284 + +dn: cn=user285,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user285 +sn: user285 +uid: uid285 +givenname: givenname285 +description: description285 +userPassword: password285 +mail: uid285 +uidnumber: 285 +gidnumber: 285 +homeDirectory: /home/uid285 + +dn: cn=user286,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user286 +sn: user286 +uid: uid286 +givenname: givenname286 +description: description286 +userPassword: password286 +mail: uid286 +uidnumber: 286 +gidnumber: 286 +homeDirectory: /home/uid286 + +dn: cn=user287,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user287 +sn: user287 +uid: uid287 +givenname: givenname287 +description: description287 +userPassword: password287 +mail: uid287 +uidnumber: 287 +gidnumber: 287 +homeDirectory: /home/uid287 + +dn: cn=user288,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user288 +sn: user288 +uid: uid288 +givenname: givenname288 +description: description288 +userPassword: password288 +mail: uid288 +uidnumber: 288 +gidnumber: 288 +homeDirectory: /home/uid288 + +dn: cn=user289,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user289 +sn: user289 +uid: uid289 +givenname: givenname289 +description: description289 +userPassword: password289 +mail: uid289 +uidnumber: 289 +gidnumber: 289 +homeDirectory: /home/uid289 + +dn: cn=user290,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user290 +sn: user290 +uid: uid290 +givenname: givenname290 +description: description290 +userPassword: password290 +mail: uid290 +uidnumber: 290 +gidnumber: 290 +homeDirectory: /home/uid290 + +dn: cn=user291,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user291 +sn: user291 +uid: uid291 +givenname: givenname291 +description: description291 +userPassword: password291 +mail: uid291 +uidnumber: 291 +gidnumber: 291 +homeDirectory: /home/uid291 + +dn: cn=user292,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user292 +sn: user292 +uid: uid292 +givenname: givenname292 +description: description292 +userPassword: password292 +mail: uid292 +uidnumber: 292 +gidnumber: 292 +homeDirectory: /home/uid292 + +dn: cn=user293,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user293 +sn: user293 +uid: uid293 +givenname: givenname293 +description: description293 +userPassword: password293 +mail: uid293 +uidnumber: 293 +gidnumber: 293 +homeDirectory: /home/uid293 + +dn: cn=user294,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user294 +sn: user294 +uid: uid294 +givenname: givenname294 +description: description294 +userPassword: password294 +mail: uid294 +uidnumber: 294 +gidnumber: 294 +homeDirectory: /home/uid294 + +dn: cn=user295,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user295 +sn: user295 +uid: uid295 +givenname: givenname295 +description: description295 +userPassword: password295 +mail: uid295 +uidnumber: 295 +gidnumber: 295 +homeDirectory: /home/uid295 + +dn: cn=user296,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user296 +sn: user296 +uid: uid296 +givenname: givenname296 +description: description296 +userPassword: password296 +mail: uid296 +uidnumber: 296 +gidnumber: 296 +homeDirectory: /home/uid296 + +dn: cn=user297,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user297 +sn: user297 +uid: uid297 +givenname: givenname297 +description: description297 +userPassword: password297 +mail: uid297 +uidnumber: 297 +gidnumber: 297 +homeDirectory: /home/uid297 + +dn: cn=user298,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user298 +sn: user298 +uid: uid298 +givenname: givenname298 +description: description298 +userPassword: password298 +mail: uid298 +uidnumber: 298 +gidnumber: 298 +homeDirectory: /home/uid298 + +dn: cn=user299,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user299 +sn: user299 +uid: uid299 +givenname: givenname299 +description: description299 +userPassword: password299 +mail: uid299 +uidnumber: 299 +gidnumber: 299 +homeDirectory: /home/uid299 + +dn: cn=user300,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user300 +sn: user300 +uid: uid300 +givenname: givenname300 +description: description300 +userPassword: password300 +mail: uid300 +uidnumber: 300 +gidnumber: 300 +homeDirectory: /home/uid300 + +dn: cn=user301,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user301 +sn: user301 +uid: uid301 +givenname: givenname301 +description: description301 +userPassword: password301 +mail: uid301 +uidnumber: 301 +gidnumber: 301 +homeDirectory: /home/uid301 + +dn: cn=user302,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user302 +sn: user302 +uid: uid302 +givenname: givenname302 +description: description302 +userPassword: password302 +mail: uid302 +uidnumber: 302 +gidnumber: 302 +homeDirectory: /home/uid302 + +dn: cn=user303,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user303 +sn: user303 +uid: uid303 +givenname: givenname303 +description: description303 +userPassword: password303 +mail: uid303 +uidnumber: 303 +gidnumber: 303 +homeDirectory: /home/uid303 + +dn: cn=user304,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user304 +sn: user304 +uid: uid304 +givenname: givenname304 +description: description304 +userPassword: password304 +mail: uid304 +uidnumber: 304 +gidnumber: 304 +homeDirectory: /home/uid304 + +dn: cn=user305,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user305 +sn: user305 +uid: uid305 +givenname: givenname305 +description: description305 +userPassword: password305 +mail: uid305 +uidnumber: 305 +gidnumber: 305 +homeDirectory: /home/uid305 + +dn: cn=user306,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user306 +sn: user306 +uid: uid306 +givenname: givenname306 +description: description306 +userPassword: password306 +mail: uid306 +uidnumber: 306 +gidnumber: 306 +homeDirectory: /home/uid306 + +dn: cn=user307,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user307 +sn: user307 +uid: uid307 +givenname: givenname307 +description: description307 +userPassword: password307 +mail: uid307 +uidnumber: 307 +gidnumber: 307 +homeDirectory: /home/uid307 + +dn: cn=user308,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user308 +sn: user308 +uid: uid308 +givenname: givenname308 +description: description308 +userPassword: password308 +mail: uid308 +uidnumber: 308 +gidnumber: 308 +homeDirectory: /home/uid308 + +dn: cn=user309,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user309 +sn: user309 +uid: uid309 +givenname: givenname309 +description: description309 +userPassword: password309 +mail: uid309 +uidnumber: 309 +gidnumber: 309 +homeDirectory: /home/uid309 + +dn: cn=user310,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user310 +sn: user310 +uid: uid310 +givenname: givenname310 +description: description310 +userPassword: password310 +mail: uid310 +uidnumber: 310 +gidnumber: 310 +homeDirectory: /home/uid310 + +dn: cn=user311,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user311 +sn: user311 +uid: uid311 +givenname: givenname311 +description: description311 +userPassword: password311 +mail: uid311 +uidnumber: 311 +gidnumber: 311 +homeDirectory: /home/uid311 + +dn: cn=user312,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user312 +sn: user312 +uid: uid312 +givenname: givenname312 +description: description312 +userPassword: password312 +mail: uid312 +uidnumber: 312 +gidnumber: 312 +homeDirectory: /home/uid312 + +dn: cn=user313,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user313 +sn: user313 +uid: uid313 +givenname: givenname313 +description: description313 +userPassword: password313 +mail: uid313 +uidnumber: 313 +gidnumber: 313 +homeDirectory: /home/uid313 + +dn: cn=user314,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user314 +sn: user314 +uid: uid314 +givenname: givenname314 +description: description314 +userPassword: password314 +mail: uid314 +uidnumber: 314 +gidnumber: 314 +homeDirectory: /home/uid314 + +dn: cn=user315,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user315 +sn: user315 +uid: uid315 +givenname: givenname315 +description: description315 +userPassword: password315 +mail: uid315 +uidnumber: 315 +gidnumber: 315 +homeDirectory: /home/uid315 + +dn: cn=user316,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user316 +sn: user316 +uid: uid316 +givenname: givenname316 +description: description316 +userPassword: password316 +mail: uid316 +uidnumber: 316 +gidnumber: 316 +homeDirectory: /home/uid316 + +dn: cn=user317,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user317 +sn: user317 +uid: uid317 +givenname: givenname317 +description: description317 +userPassword: password317 +mail: uid317 +uidnumber: 317 +gidnumber: 317 +homeDirectory: /home/uid317 + +dn: cn=user318,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user318 +sn: user318 +uid: uid318 +givenname: givenname318 +description: description318 +userPassword: password318 +mail: uid318 +uidnumber: 318 +gidnumber: 318 +homeDirectory: /home/uid318 + +dn: cn=user319,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user319 +sn: user319 +uid: uid319 +givenname: givenname319 +description: description319 +userPassword: password319 +mail: uid319 +uidnumber: 319 +gidnumber: 319 +homeDirectory: /home/uid319 + +dn: cn=user320,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user320 +sn: user320 +uid: uid320 +givenname: givenname320 +description: description320 +userPassword: password320 +mail: uid320 +uidnumber: 320 +gidnumber: 320 +homeDirectory: /home/uid320 + +dn: cn=user321,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user321 +sn: user321 +uid: uid321 +givenname: givenname321 +description: description321 +userPassword: password321 +mail: uid321 +uidnumber: 321 +gidnumber: 321 +homeDirectory: /home/uid321 + +dn: cn=user322,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user322 +sn: user322 +uid: uid322 +givenname: givenname322 +description: description322 +userPassword: password322 +mail: uid322 +uidnumber: 322 +gidnumber: 322 +homeDirectory: /home/uid322 + +dn: cn=user323,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user323 +sn: user323 +uid: uid323 +givenname: givenname323 +description: description323 +userPassword: password323 +mail: uid323 +uidnumber: 323 +gidnumber: 323 +homeDirectory: /home/uid323 + +dn: cn=user324,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user324 +sn: user324 +uid: uid324 +givenname: givenname324 +description: description324 +userPassword: password324 +mail: uid324 +uidnumber: 324 +gidnumber: 324 +homeDirectory: /home/uid324 + +dn: cn=user325,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user325 +sn: user325 +uid: uid325 +givenname: givenname325 +description: description325 +userPassword: password325 +mail: uid325 +uidnumber: 325 +gidnumber: 325 +homeDirectory: /home/uid325 + +dn: cn=user326,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user326 +sn: user326 +uid: uid326 +givenname: givenname326 +description: description326 +userPassword: password326 +mail: uid326 +uidnumber: 326 +gidnumber: 326 +homeDirectory: /home/uid326 + +dn: cn=user327,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user327 +sn: user327 +uid: uid327 +givenname: givenname327 +description: description327 +userPassword: password327 +mail: uid327 +uidnumber: 327 +gidnumber: 327 +homeDirectory: /home/uid327 + +dn: cn=user328,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user328 +sn: user328 +uid: uid328 +givenname: givenname328 +description: description328 +userPassword: password328 +mail: uid328 +uidnumber: 328 +gidnumber: 328 +homeDirectory: /home/uid328 + +dn: cn=user329,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user329 +sn: user329 +uid: uid329 +givenname: givenname329 +description: description329 +userPassword: password329 +mail: uid329 +uidnumber: 329 +gidnumber: 329 +homeDirectory: /home/uid329 + +dn: cn=user330,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user330 +sn: user330 +uid: uid330 +givenname: givenname330 +description: description330 +userPassword: password330 +mail: uid330 +uidnumber: 330 +gidnumber: 330 +homeDirectory: /home/uid330 + +dn: cn=user331,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user331 +sn: user331 +uid: uid331 +givenname: givenname331 +description: description331 +userPassword: password331 +mail: uid331 +uidnumber: 331 +gidnumber: 331 +homeDirectory: /home/uid331 + +dn: cn=user332,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user332 +sn: user332 +uid: uid332 +givenname: givenname332 +description: description332 +userPassword: password332 +mail: uid332 +uidnumber: 332 +gidnumber: 332 +homeDirectory: /home/uid332 + +dn: cn=user333,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user333 +sn: user333 +uid: uid333 +givenname: givenname333 +description: description333 +userPassword: password333 +mail: uid333 +uidnumber: 333 +gidnumber: 333 +homeDirectory: /home/uid333 + +dn: cn=user334,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user334 +sn: user334 +uid: uid334 +givenname: givenname334 +description: description334 +userPassword: password334 +mail: uid334 +uidnumber: 334 +gidnumber: 334 +homeDirectory: /home/uid334 + +dn: cn=user335,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user335 +sn: user335 +uid: uid335 +givenname: givenname335 +description: description335 +userPassword: password335 +mail: uid335 +uidnumber: 335 +gidnumber: 335 +homeDirectory: /home/uid335 + +dn: cn=user336,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user336 +sn: user336 +uid: uid336 +givenname: givenname336 +description: description336 +userPassword: password336 +mail: uid336 +uidnumber: 336 +gidnumber: 336 +homeDirectory: /home/uid336 + +dn: cn=user337,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user337 +sn: user337 +uid: uid337 +givenname: givenname337 +description: description337 +userPassword: password337 +mail: uid337 +uidnumber: 337 +gidnumber: 337 +homeDirectory: /home/uid337 + +dn: cn=user338,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user338 +sn: user338 +uid: uid338 +givenname: givenname338 +description: description338 +userPassword: password338 +mail: uid338 +uidnumber: 338 +gidnumber: 338 +homeDirectory: /home/uid338 + +dn: cn=user339,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user339 +sn: user339 +uid: uid339 +givenname: givenname339 +description: description339 +userPassword: password339 +mail: uid339 +uidnumber: 339 +gidnumber: 339 +homeDirectory: /home/uid339 + +dn: cn=user340,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user340 +sn: user340 +uid: uid340 +givenname: givenname340 +description: description340 +userPassword: password340 +mail: uid340 +uidnumber: 340 +gidnumber: 340 +homeDirectory: /home/uid340 + +dn: cn=user341,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user341 +sn: user341 +uid: uid341 +givenname: givenname341 +description: description341 +userPassword: password341 +mail: uid341 +uidnumber: 341 +gidnumber: 341 +homeDirectory: /home/uid341 + +dn: cn=user342,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user342 +sn: user342 +uid: uid342 +givenname: givenname342 +description: description342 +userPassword: password342 +mail: uid342 +uidnumber: 342 +gidnumber: 342 +homeDirectory: /home/uid342 + +dn: cn=user343,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user343 +sn: user343 +uid: uid343 +givenname: givenname343 +description: description343 +userPassword: password343 +mail: uid343 +uidnumber: 343 +gidnumber: 343 +homeDirectory: /home/uid343 + +dn: cn=user344,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user344 +sn: user344 +uid: uid344 +givenname: givenname344 +description: description344 +userPassword: password344 +mail: uid344 +uidnumber: 344 +gidnumber: 344 +homeDirectory: /home/uid344 + +dn: cn=user345,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user345 +sn: user345 +uid: uid345 +givenname: givenname345 +description: description345 +userPassword: password345 +mail: uid345 +uidnumber: 345 +gidnumber: 345 +homeDirectory: /home/uid345 + +dn: cn=user346,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user346 +sn: user346 +uid: uid346 +givenname: givenname346 +description: description346 +userPassword: password346 +mail: uid346 +uidnumber: 346 +gidnumber: 346 +homeDirectory: /home/uid346 + +dn: cn=user347,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user347 +sn: user347 +uid: uid347 +givenname: givenname347 +description: description347 +userPassword: password347 +mail: uid347 +uidnumber: 347 +gidnumber: 347 +homeDirectory: /home/uid347 + +dn: cn=user348,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user348 +sn: user348 +uid: uid348 +givenname: givenname348 +description: description348 +userPassword: password348 +mail: uid348 +uidnumber: 348 +gidnumber: 348 +homeDirectory: /home/uid348 + +dn: cn=user349,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user349 +sn: user349 +uid: uid349 +givenname: givenname349 +description: description349 +userPassword: password349 +mail: uid349 +uidnumber: 349 +gidnumber: 349 +homeDirectory: /home/uid349 + +dn: cn=user350,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user350 +sn: user350 +uid: uid350 +givenname: givenname350 +description: description350 +userPassword: password350 +mail: uid350 +uidnumber: 350 +gidnumber: 350 +homeDirectory: /home/uid350 + +dn: cn=user351,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user351 +sn: user351 +uid: uid351 +givenname: givenname351 +description: description351 +userPassword: password351 +mail: uid351 +uidnumber: 351 +gidnumber: 351 +homeDirectory: /home/uid351 + +dn: cn=user352,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user352 +sn: user352 +uid: uid352 +givenname: givenname352 +description: description352 +userPassword: password352 +mail: uid352 +uidnumber: 352 +gidnumber: 352 +homeDirectory: /home/uid352 + +dn: cn=user353,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user353 +sn: user353 +uid: uid353 +givenname: givenname353 +description: description353 +userPassword: password353 +mail: uid353 +uidnumber: 353 +gidnumber: 353 +homeDirectory: /home/uid353 + +dn: cn=user354,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user354 +sn: user354 +uid: uid354 +givenname: givenname354 +description: description354 +userPassword: password354 +mail: uid354 +uidnumber: 354 +gidnumber: 354 +homeDirectory: /home/uid354 + +dn: cn=user355,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user355 +sn: user355 +uid: uid355 +givenname: givenname355 +description: description355 +userPassword: password355 +mail: uid355 +uidnumber: 355 +gidnumber: 355 +homeDirectory: /home/uid355 + +dn: cn=user356,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user356 +sn: user356 +uid: uid356 +givenname: givenname356 +description: description356 +userPassword: password356 +mail: uid356 +uidnumber: 356 +gidnumber: 356 +homeDirectory: /home/uid356 + +dn: cn=user357,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user357 +sn: user357 +uid: uid357 +givenname: givenname357 +description: description357 +userPassword: password357 +mail: uid357 +uidnumber: 357 +gidnumber: 357 +homeDirectory: /home/uid357 + +dn: cn=user358,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user358 +sn: user358 +uid: uid358 +givenname: givenname358 +description: description358 +userPassword: password358 +mail: uid358 +uidnumber: 358 +gidnumber: 358 +homeDirectory: /home/uid358 + +dn: cn=user359,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user359 +sn: user359 +uid: uid359 +givenname: givenname359 +description: description359 +userPassword: password359 +mail: uid359 +uidnumber: 359 +gidnumber: 359 +homeDirectory: /home/uid359 + +dn: cn=user360,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user360 +sn: user360 +uid: uid360 +givenname: givenname360 +description: description360 +userPassword: password360 +mail: uid360 +uidnumber: 360 +gidnumber: 360 +homeDirectory: /home/uid360 + +dn: cn=user361,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user361 +sn: user361 +uid: uid361 +givenname: givenname361 +description: description361 +userPassword: password361 +mail: uid361 +uidnumber: 361 +gidnumber: 361 +homeDirectory: /home/uid361 + +dn: cn=user362,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user362 +sn: user362 +uid: uid362 +givenname: givenname362 +description: description362 +userPassword: password362 +mail: uid362 +uidnumber: 362 +gidnumber: 362 +homeDirectory: /home/uid362 + +dn: cn=user363,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user363 +sn: user363 +uid: uid363 +givenname: givenname363 +description: description363 +userPassword: password363 +mail: uid363 +uidnumber: 363 +gidnumber: 363 +homeDirectory: /home/uid363 + +dn: cn=user364,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user364 +sn: user364 +uid: uid364 +givenname: givenname364 +description: description364 +userPassword: password364 +mail: uid364 +uidnumber: 364 +gidnumber: 364 +homeDirectory: /home/uid364 + +dn: cn=user365,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user365 +sn: user365 +uid: uid365 +givenname: givenname365 +description: description365 +userPassword: password365 +mail: uid365 +uidnumber: 365 +gidnumber: 365 +homeDirectory: /home/uid365 + +dn: cn=user366,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user366 +sn: user366 +uid: uid366 +givenname: givenname366 +description: description366 +userPassword: password366 +mail: uid366 +uidnumber: 366 +gidnumber: 366 +homeDirectory: /home/uid366 + +dn: cn=user367,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user367 +sn: user367 +uid: uid367 +givenname: givenname367 +description: description367 +userPassword: password367 +mail: uid367 +uidnumber: 367 +gidnumber: 367 +homeDirectory: /home/uid367 + +dn: cn=user368,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user368 +sn: user368 +uid: uid368 +givenname: givenname368 +description: description368 +userPassword: password368 +mail: uid368 +uidnumber: 368 +gidnumber: 368 +homeDirectory: /home/uid368 + +dn: cn=user369,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user369 +sn: user369 +uid: uid369 +givenname: givenname369 +description: description369 +userPassword: password369 +mail: uid369 +uidnumber: 369 +gidnumber: 369 +homeDirectory: /home/uid369 + +dn: cn=user370,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user370 +sn: user370 +uid: uid370 +givenname: givenname370 +description: description370 +userPassword: password370 +mail: uid370 +uidnumber: 370 +gidnumber: 370 +homeDirectory: /home/uid370 + +dn: cn=user371,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user371 +sn: user371 +uid: uid371 +givenname: givenname371 +description: description371 +userPassword: password371 +mail: uid371 +uidnumber: 371 +gidnumber: 371 +homeDirectory: /home/uid371 + +dn: cn=user372,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user372 +sn: user372 +uid: uid372 +givenname: givenname372 +description: description372 +userPassword: password372 +mail: uid372 +uidnumber: 372 +gidnumber: 372 +homeDirectory: /home/uid372 + +dn: cn=user373,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user373 +sn: user373 +uid: uid373 +givenname: givenname373 +description: description373 +userPassword: password373 +mail: uid373 +uidnumber: 373 +gidnumber: 373 +homeDirectory: /home/uid373 + +dn: cn=user374,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user374 +sn: user374 +uid: uid374 +givenname: givenname374 +description: description374 +userPassword: password374 +mail: uid374 +uidnumber: 374 +gidnumber: 374 +homeDirectory: /home/uid374 + +dn: cn=user375,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user375 +sn: user375 +uid: uid375 +givenname: givenname375 +description: description375 +userPassword: password375 +mail: uid375 +uidnumber: 375 +gidnumber: 375 +homeDirectory: /home/uid375 + +dn: cn=user376,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user376 +sn: user376 +uid: uid376 +givenname: givenname376 +description: description376 +userPassword: password376 +mail: uid376 +uidnumber: 376 +gidnumber: 376 +homeDirectory: /home/uid376 + +dn: cn=user377,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user377 +sn: user377 +uid: uid377 +givenname: givenname377 +description: description377 +userPassword: password377 +mail: uid377 +uidnumber: 377 +gidnumber: 377 +homeDirectory: /home/uid377 + +dn: cn=user378,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user378 +sn: user378 +uid: uid378 +givenname: givenname378 +description: description378 +userPassword: password378 +mail: uid378 +uidnumber: 378 +gidnumber: 378 +homeDirectory: /home/uid378 + +dn: cn=user379,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user379 +sn: user379 +uid: uid379 +givenname: givenname379 +description: description379 +userPassword: password379 +mail: uid379 +uidnumber: 379 +gidnumber: 379 +homeDirectory: /home/uid379 + +dn: cn=user380,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user380 +sn: user380 +uid: uid380 +givenname: givenname380 +description: description380 +userPassword: password380 +mail: uid380 +uidnumber: 380 +gidnumber: 380 +homeDirectory: /home/uid380 + +dn: cn=user381,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user381 +sn: user381 +uid: uid381 +givenname: givenname381 +description: description381 +userPassword: password381 +mail: uid381 +uidnumber: 381 +gidnumber: 381 +homeDirectory: /home/uid381 + +dn: cn=user382,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user382 +sn: user382 +uid: uid382 +givenname: givenname382 +description: description382 +userPassword: password382 +mail: uid382 +uidnumber: 382 +gidnumber: 382 +homeDirectory: /home/uid382 + +dn: cn=user383,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user383 +sn: user383 +uid: uid383 +givenname: givenname383 +description: description383 +userPassword: password383 +mail: uid383 +uidnumber: 383 +gidnumber: 383 +homeDirectory: /home/uid383 + +dn: cn=user384,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user384 +sn: user384 +uid: uid384 +givenname: givenname384 +description: description384 +userPassword: password384 +mail: uid384 +uidnumber: 384 +gidnumber: 384 +homeDirectory: /home/uid384 + +dn: cn=user385,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user385 +sn: user385 +uid: uid385 +givenname: givenname385 +description: description385 +userPassword: password385 +mail: uid385 +uidnumber: 385 +gidnumber: 385 +homeDirectory: /home/uid385 + +dn: cn=user386,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user386 +sn: user386 +uid: uid386 +givenname: givenname386 +description: description386 +userPassword: password386 +mail: uid386 +uidnumber: 386 +gidnumber: 386 +homeDirectory: /home/uid386 + +dn: cn=user387,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user387 +sn: user387 +uid: uid387 +givenname: givenname387 +description: description387 +userPassword: password387 +mail: uid387 +uidnumber: 387 +gidnumber: 387 +homeDirectory: /home/uid387 + +dn: cn=user388,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user388 +sn: user388 +uid: uid388 +givenname: givenname388 +description: description388 +userPassword: password388 +mail: uid388 +uidnumber: 388 +gidnumber: 388 +homeDirectory: /home/uid388 + +dn: cn=user389,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user389 +sn: user389 +uid: uid389 +givenname: givenname389 +description: description389 +userPassword: password389 +mail: uid389 +uidnumber: 389 +gidnumber: 389 +homeDirectory: /home/uid389 + +dn: cn=user390,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user390 +sn: user390 +uid: uid390 +givenname: givenname390 +description: description390 +userPassword: password390 +mail: uid390 +uidnumber: 390 +gidnumber: 390 +homeDirectory: /home/uid390 + +dn: cn=user391,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user391 +sn: user391 +uid: uid391 +givenname: givenname391 +description: description391 +userPassword: password391 +mail: uid391 +uidnumber: 391 +gidnumber: 391 +homeDirectory: /home/uid391 + +dn: cn=user392,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user392 +sn: user392 +uid: uid392 +givenname: givenname392 +description: description392 +userPassword: password392 +mail: uid392 +uidnumber: 392 +gidnumber: 392 +homeDirectory: /home/uid392 + +dn: cn=user393,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user393 +sn: user393 +uid: uid393 +givenname: givenname393 +description: description393 +userPassword: password393 +mail: uid393 +uidnumber: 393 +gidnumber: 393 +homeDirectory: /home/uid393 + +dn: cn=user394,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user394 +sn: user394 +uid: uid394 +givenname: givenname394 +description: description394 +userPassword: password394 +mail: uid394 +uidnumber: 394 +gidnumber: 394 +homeDirectory: /home/uid394 + +dn: cn=user395,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user395 +sn: user395 +uid: uid395 +givenname: givenname395 +description: description395 +userPassword: password395 +mail: uid395 +uidnumber: 395 +gidnumber: 395 +homeDirectory: /home/uid395 + +dn: cn=user396,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user396 +sn: user396 +uid: uid396 +givenname: givenname396 +description: description396 +userPassword: password396 +mail: uid396 +uidnumber: 396 +gidnumber: 396 +homeDirectory: /home/uid396 + +dn: cn=user397,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user397 +sn: user397 +uid: uid397 +givenname: givenname397 +description: description397 +userPassword: password397 +mail: uid397 +uidnumber: 397 +gidnumber: 397 +homeDirectory: /home/uid397 + +dn: cn=user398,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user398 +sn: user398 +uid: uid398 +givenname: givenname398 +description: description398 +userPassword: password398 +mail: uid398 +uidnumber: 398 +gidnumber: 398 +homeDirectory: /home/uid398 + +dn: cn=user399,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user399 +sn: user399 +uid: uid399 +givenname: givenname399 +description: description399 +userPassword: password399 +mail: uid399 +uidnumber: 399 +gidnumber: 399 +homeDirectory: /home/uid399 + +dn: cn=user400,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user400 +sn: user400 +uid: uid400 +givenname: givenname400 +description: description400 +userPassword: password400 +mail: uid400 +uidnumber: 400 +gidnumber: 400 +homeDirectory: /home/uid400 + +dn: cn=user401,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user401 +sn: user401 +uid: uid401 +givenname: givenname401 +description: description401 +userPassword: password401 +mail: uid401 +uidnumber: 401 +gidnumber: 401 +homeDirectory: /home/uid401 + +dn: cn=user402,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user402 +sn: user402 +uid: uid402 +givenname: givenname402 +description: description402 +userPassword: password402 +mail: uid402 +uidnumber: 402 +gidnumber: 402 +homeDirectory: /home/uid402 + +dn: cn=user403,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user403 +sn: user403 +uid: uid403 +givenname: givenname403 +description: description403 +userPassword: password403 +mail: uid403 +uidnumber: 403 +gidnumber: 403 +homeDirectory: /home/uid403 + +dn: cn=user404,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user404 +sn: user404 +uid: uid404 +givenname: givenname404 +description: description404 +userPassword: password404 +mail: uid404 +uidnumber: 404 +gidnumber: 404 +homeDirectory: /home/uid404 + +dn: cn=user405,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user405 +sn: user405 +uid: uid405 +givenname: givenname405 +description: description405 +userPassword: password405 +mail: uid405 +uidnumber: 405 +gidnumber: 405 +homeDirectory: /home/uid405 + +dn: cn=user406,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user406 +sn: user406 +uid: uid406 +givenname: givenname406 +description: description406 +userPassword: password406 +mail: uid406 +uidnumber: 406 +gidnumber: 406 +homeDirectory: /home/uid406 + +dn: cn=user407,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user407 +sn: user407 +uid: uid407 +givenname: givenname407 +description: description407 +userPassword: password407 +mail: uid407 +uidnumber: 407 +gidnumber: 407 +homeDirectory: /home/uid407 + +dn: cn=user408,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user408 +sn: user408 +uid: uid408 +givenname: givenname408 +description: description408 +userPassword: password408 +mail: uid408 +uidnumber: 408 +gidnumber: 408 +homeDirectory: /home/uid408 + +dn: cn=user409,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user409 +sn: user409 +uid: uid409 +givenname: givenname409 +description: description409 +userPassword: password409 +mail: uid409 +uidnumber: 409 +gidnumber: 409 +homeDirectory: /home/uid409 + +dn: cn=user410,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user410 +sn: user410 +uid: uid410 +givenname: givenname410 +description: description410 +userPassword: password410 +mail: uid410 +uidnumber: 410 +gidnumber: 410 +homeDirectory: /home/uid410 + +dn: cn=user411,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user411 +sn: user411 +uid: uid411 +givenname: givenname411 +description: description411 +userPassword: password411 +mail: uid411 +uidnumber: 411 +gidnumber: 411 +homeDirectory: /home/uid411 + +dn: cn=user412,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user412 +sn: user412 +uid: uid412 +givenname: givenname412 +description: description412 +userPassword: password412 +mail: uid412 +uidnumber: 412 +gidnumber: 412 +homeDirectory: /home/uid412 + +dn: cn=user413,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user413 +sn: user413 +uid: uid413 +givenname: givenname413 +description: description413 +userPassword: password413 +mail: uid413 +uidnumber: 413 +gidnumber: 413 +homeDirectory: /home/uid413 + +dn: cn=user414,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user414 +sn: user414 +uid: uid414 +givenname: givenname414 +description: description414 +userPassword: password414 +mail: uid414 +uidnumber: 414 +gidnumber: 414 +homeDirectory: /home/uid414 + +dn: cn=user415,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user415 +sn: user415 +uid: uid415 +givenname: givenname415 +description: description415 +userPassword: password415 +mail: uid415 +uidnumber: 415 +gidnumber: 415 +homeDirectory: /home/uid415 + +dn: cn=user416,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user416 +sn: user416 +uid: uid416 +givenname: givenname416 +description: description416 +userPassword: password416 +mail: uid416 +uidnumber: 416 +gidnumber: 416 +homeDirectory: /home/uid416 + +dn: cn=user417,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user417 +sn: user417 +uid: uid417 +givenname: givenname417 +description: description417 +userPassword: password417 +mail: uid417 +uidnumber: 417 +gidnumber: 417 +homeDirectory: /home/uid417 + +dn: cn=user418,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user418 +sn: user418 +uid: uid418 +givenname: givenname418 +description: description418 +userPassword: password418 +mail: uid418 +uidnumber: 418 +gidnumber: 418 +homeDirectory: /home/uid418 + +dn: cn=user419,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user419 +sn: user419 +uid: uid419 +givenname: givenname419 +description: description419 +userPassword: password419 +mail: uid419 +uidnumber: 419 +gidnumber: 419 +homeDirectory: /home/uid419 + +dn: cn=user420,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user420 +sn: user420 +uid: uid420 +givenname: givenname420 +description: description420 +userPassword: password420 +mail: uid420 +uidnumber: 420 +gidnumber: 420 +homeDirectory: /home/uid420 + +dn: cn=user421,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user421 +sn: user421 +uid: uid421 +givenname: givenname421 +description: description421 +userPassword: password421 +mail: uid421 +uidnumber: 421 +gidnumber: 421 +homeDirectory: /home/uid421 + +dn: cn=user422,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user422 +sn: user422 +uid: uid422 +givenname: givenname422 +description: description422 +userPassword: password422 +mail: uid422 +uidnumber: 422 +gidnumber: 422 +homeDirectory: /home/uid422 + +dn: cn=user423,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user423 +sn: user423 +uid: uid423 +givenname: givenname423 +description: description423 +userPassword: password423 +mail: uid423 +uidnumber: 423 +gidnumber: 423 +homeDirectory: /home/uid423 + +dn: cn=user424,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user424 +sn: user424 +uid: uid424 +givenname: givenname424 +description: description424 +userPassword: password424 +mail: uid424 +uidnumber: 424 +gidnumber: 424 +homeDirectory: /home/uid424 + +dn: cn=user425,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user425 +sn: user425 +uid: uid425 +givenname: givenname425 +description: description425 +userPassword: password425 +mail: uid425 +uidnumber: 425 +gidnumber: 425 +homeDirectory: /home/uid425 + +dn: cn=user426,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user426 +sn: user426 +uid: uid426 +givenname: givenname426 +description: description426 +userPassword: password426 +mail: uid426 +uidnumber: 426 +gidnumber: 426 +homeDirectory: /home/uid426 + +dn: cn=user427,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user427 +sn: user427 +uid: uid427 +givenname: givenname427 +description: description427 +userPassword: password427 +mail: uid427 +uidnumber: 427 +gidnumber: 427 +homeDirectory: /home/uid427 + +dn: cn=user428,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user428 +sn: user428 +uid: uid428 +givenname: givenname428 +description: description428 +userPassword: password428 +mail: uid428 +uidnumber: 428 +gidnumber: 428 +homeDirectory: /home/uid428 + +dn: cn=user429,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user429 +sn: user429 +uid: uid429 +givenname: givenname429 +description: description429 +userPassword: password429 +mail: uid429 +uidnumber: 429 +gidnumber: 429 +homeDirectory: /home/uid429 + +dn: cn=user430,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user430 +sn: user430 +uid: uid430 +givenname: givenname430 +description: description430 +userPassword: password430 +mail: uid430 +uidnumber: 430 +gidnumber: 430 +homeDirectory: /home/uid430 + +dn: cn=user431,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user431 +sn: user431 +uid: uid431 +givenname: givenname431 +description: description431 +userPassword: password431 +mail: uid431 +uidnumber: 431 +gidnumber: 431 +homeDirectory: /home/uid431 + +dn: cn=user432,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user432 +sn: user432 +uid: uid432 +givenname: givenname432 +description: description432 +userPassword: password432 +mail: uid432 +uidnumber: 432 +gidnumber: 432 +homeDirectory: /home/uid432 + +dn: cn=user433,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user433 +sn: user433 +uid: uid433 +givenname: givenname433 +description: description433 +userPassword: password433 +mail: uid433 +uidnumber: 433 +gidnumber: 433 +homeDirectory: /home/uid433 + +dn: cn=user434,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user434 +sn: user434 +uid: uid434 +givenname: givenname434 +description: description434 +userPassword: password434 +mail: uid434 +uidnumber: 434 +gidnumber: 434 +homeDirectory: /home/uid434 + +dn: cn=user435,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user435 +sn: user435 +uid: uid435 +givenname: givenname435 +description: description435 +userPassword: password435 +mail: uid435 +uidnumber: 435 +gidnumber: 435 +homeDirectory: /home/uid435 + +dn: cn=user436,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user436 +sn: user436 +uid: uid436 +givenname: givenname436 +description: description436 +userPassword: password436 +mail: uid436 +uidnumber: 436 +gidnumber: 436 +homeDirectory: /home/uid436 + +dn: cn=user437,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user437 +sn: user437 +uid: uid437 +givenname: givenname437 +description: description437 +userPassword: password437 +mail: uid437 +uidnumber: 437 +gidnumber: 437 +homeDirectory: /home/uid437 + +dn: cn=user438,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user438 +sn: user438 +uid: uid438 +givenname: givenname438 +description: description438 +userPassword: password438 +mail: uid438 +uidnumber: 438 +gidnumber: 438 +homeDirectory: /home/uid438 + +dn: cn=user439,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user439 +sn: user439 +uid: uid439 +givenname: givenname439 +description: description439 +userPassword: password439 +mail: uid439 +uidnumber: 439 +gidnumber: 439 +homeDirectory: /home/uid439 + +dn: cn=user440,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user440 +sn: user440 +uid: uid440 +givenname: givenname440 +description: description440 +userPassword: password440 +mail: uid440 +uidnumber: 440 +gidnumber: 440 +homeDirectory: /home/uid440 + +dn: cn=user441,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user441 +sn: user441 +uid: uid441 +givenname: givenname441 +description: description441 +userPassword: password441 +mail: uid441 +uidnumber: 441 +gidnumber: 441 +homeDirectory: /home/uid441 + +dn: cn=user442,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user442 +sn: user442 +uid: uid442 +givenname: givenname442 +description: description442 +userPassword: password442 +mail: uid442 +uidnumber: 442 +gidnumber: 442 +homeDirectory: /home/uid442 + +dn: cn=user443,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user443 +sn: user443 +uid: uid443 +givenname: givenname443 +description: description443 +userPassword: password443 +mail: uid443 +uidnumber: 443 +gidnumber: 443 +homeDirectory: /home/uid443 + +dn: cn=user444,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user444 +sn: user444 +uid: uid444 +givenname: givenname444 +description: description444 +userPassword: password444 +mail: uid444 +uidnumber: 444 +gidnumber: 444 +homeDirectory: /home/uid444 + +dn: cn=user445,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user445 +sn: user445 +uid: uid445 +givenname: givenname445 +description: description445 +userPassword: password445 +mail: uid445 +uidnumber: 445 +gidnumber: 445 +homeDirectory: /home/uid445 + +dn: cn=user446,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user446 +sn: user446 +uid: uid446 +givenname: givenname446 +description: description446 +userPassword: password446 +mail: uid446 +uidnumber: 446 +gidnumber: 446 +homeDirectory: /home/uid446 + +dn: cn=user447,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user447 +sn: user447 +uid: uid447 +givenname: givenname447 +description: description447 +userPassword: password447 +mail: uid447 +uidnumber: 447 +gidnumber: 447 +homeDirectory: /home/uid447 + +dn: cn=user448,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user448 +sn: user448 +uid: uid448 +givenname: givenname448 +description: description448 +userPassword: password448 +mail: uid448 +uidnumber: 448 +gidnumber: 448 +homeDirectory: /home/uid448 + +dn: cn=user449,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user449 +sn: user449 +uid: uid449 +givenname: givenname449 +description: description449 +userPassword: password449 +mail: uid449 +uidnumber: 449 +gidnumber: 449 +homeDirectory: /home/uid449 + +dn: cn=user450,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user450 +sn: user450 +uid: uid450 +givenname: givenname450 +description: description450 +userPassword: password450 +mail: uid450 +uidnumber: 450 +gidnumber: 450 +homeDirectory: /home/uid450 + +dn: cn=user451,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user451 +sn: user451 +uid: uid451 +givenname: givenname451 +description: description451 +userPassword: password451 +mail: uid451 +uidnumber: 451 +gidnumber: 451 +homeDirectory: /home/uid451 + +dn: cn=user452,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user452 +sn: user452 +uid: uid452 +givenname: givenname452 +description: description452 +userPassword: password452 +mail: uid452 +uidnumber: 452 +gidnumber: 452 +homeDirectory: /home/uid452 + +dn: cn=user453,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user453 +sn: user453 +uid: uid453 +givenname: givenname453 +description: description453 +userPassword: password453 +mail: uid453 +uidnumber: 453 +gidnumber: 453 +homeDirectory: /home/uid453 + +dn: cn=user454,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user454 +sn: user454 +uid: uid454 +givenname: givenname454 +description: description454 +userPassword: password454 +mail: uid454 +uidnumber: 454 +gidnumber: 454 +homeDirectory: /home/uid454 + +dn: cn=user455,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user455 +sn: user455 +uid: uid455 +givenname: givenname455 +description: description455 +userPassword: password455 +mail: uid455 +uidnumber: 455 +gidnumber: 455 +homeDirectory: /home/uid455 + +dn: cn=user456,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user456 +sn: user456 +uid: uid456 +givenname: givenname456 +description: description456 +userPassword: password456 +mail: uid456 +uidnumber: 456 +gidnumber: 456 +homeDirectory: /home/uid456 + +dn: cn=user457,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user457 +sn: user457 +uid: uid457 +givenname: givenname457 +description: description457 +userPassword: password457 +mail: uid457 +uidnumber: 457 +gidnumber: 457 +homeDirectory: /home/uid457 + +dn: cn=user458,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user458 +sn: user458 +uid: uid458 +givenname: givenname458 +description: description458 +userPassword: password458 +mail: uid458 +uidnumber: 458 +gidnumber: 458 +homeDirectory: /home/uid458 + +dn: cn=user459,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user459 +sn: user459 +uid: uid459 +givenname: givenname459 +description: description459 +userPassword: password459 +mail: uid459 +uidnumber: 459 +gidnumber: 459 +homeDirectory: /home/uid459 + +dn: cn=user460,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user460 +sn: user460 +uid: uid460 +givenname: givenname460 +description: description460 +userPassword: password460 +mail: uid460 +uidnumber: 460 +gidnumber: 460 +homeDirectory: /home/uid460 + +dn: cn=user461,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user461 +sn: user461 +uid: uid461 +givenname: givenname461 +description: description461 +userPassword: password461 +mail: uid461 +uidnumber: 461 +gidnumber: 461 +homeDirectory: /home/uid461 + +dn: cn=user462,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user462 +sn: user462 +uid: uid462 +givenname: givenname462 +description: description462 +userPassword: password462 +mail: uid462 +uidnumber: 462 +gidnumber: 462 +homeDirectory: /home/uid462 + +dn: cn=user463,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user463 +sn: user463 +uid: uid463 +givenname: givenname463 +description: description463 +userPassword: password463 +mail: uid463 +uidnumber: 463 +gidnumber: 463 +homeDirectory: /home/uid463 + +dn: cn=user464,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user464 +sn: user464 +uid: uid464 +givenname: givenname464 +description: description464 +userPassword: password464 +mail: uid464 +uidnumber: 464 +gidnumber: 464 +homeDirectory: /home/uid464 + +dn: cn=user465,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user465 +sn: user465 +uid: uid465 +givenname: givenname465 +description: description465 +userPassword: password465 +mail: uid465 +uidnumber: 465 +gidnumber: 465 +homeDirectory: /home/uid465 + +dn: cn=user466,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user466 +sn: user466 +uid: uid466 +givenname: givenname466 +description: description466 +userPassword: password466 +mail: uid466 +uidnumber: 466 +gidnumber: 466 +homeDirectory: /home/uid466 + +dn: cn=user467,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user467 +sn: user467 +uid: uid467 +givenname: givenname467 +description: description467 +userPassword: password467 +mail: uid467 +uidnumber: 467 +gidnumber: 467 +homeDirectory: /home/uid467 + +dn: cn=user468,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user468 +sn: user468 +uid: uid468 +givenname: givenname468 +description: description468 +userPassword: password468 +mail: uid468 +uidnumber: 468 +gidnumber: 468 +homeDirectory: /home/uid468 + +dn: cn=user469,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user469 +sn: user469 +uid: uid469 +givenname: givenname469 +description: description469 +userPassword: password469 +mail: uid469 +uidnumber: 469 +gidnumber: 469 +homeDirectory: /home/uid469 + +dn: cn=user470,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user470 +sn: user470 +uid: uid470 +givenname: givenname470 +description: description470 +userPassword: password470 +mail: uid470 +uidnumber: 470 +gidnumber: 470 +homeDirectory: /home/uid470 + +dn: cn=user471,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user471 +sn: user471 +uid: uid471 +givenname: givenname471 +description: description471 +userPassword: password471 +mail: uid471 +uidnumber: 471 +gidnumber: 471 +homeDirectory: /home/uid471 + +dn: cn=user472,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user472 +sn: user472 +uid: uid472 +givenname: givenname472 +description: description472 +userPassword: password472 +mail: uid472 +uidnumber: 472 +gidnumber: 472 +homeDirectory: /home/uid472 + +dn: cn=user473,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user473 +sn: user473 +uid: uid473 +givenname: givenname473 +description: description473 +userPassword: password473 +mail: uid473 +uidnumber: 473 +gidnumber: 473 +homeDirectory: /home/uid473 + +dn: cn=user474,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user474 +sn: user474 +uid: uid474 +givenname: givenname474 +description: description474 +userPassword: password474 +mail: uid474 +uidnumber: 474 +gidnumber: 474 +homeDirectory: /home/uid474 + +dn: cn=user475,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user475 +sn: user475 +uid: uid475 +givenname: givenname475 +description: description475 +userPassword: password475 +mail: uid475 +uidnumber: 475 +gidnumber: 475 +homeDirectory: /home/uid475 + +dn: cn=user476,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user476 +sn: user476 +uid: uid476 +givenname: givenname476 +description: description476 +userPassword: password476 +mail: uid476 +uidnumber: 476 +gidnumber: 476 +homeDirectory: /home/uid476 + +dn: cn=user477,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user477 +sn: user477 +uid: uid477 +givenname: givenname477 +description: description477 +userPassword: password477 +mail: uid477 +uidnumber: 477 +gidnumber: 477 +homeDirectory: /home/uid477 + +dn: cn=user478,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user478 +sn: user478 +uid: uid478 +givenname: givenname478 +description: description478 +userPassword: password478 +mail: uid478 +uidnumber: 478 +gidnumber: 478 +homeDirectory: /home/uid478 + +dn: cn=user479,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user479 +sn: user479 +uid: uid479 +givenname: givenname479 +description: description479 +userPassword: password479 +mail: uid479 +uidnumber: 479 +gidnumber: 479 +homeDirectory: /home/uid479 + +dn: cn=user480,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user480 +sn: user480 +uid: uid480 +givenname: givenname480 +description: description480 +userPassword: password480 +mail: uid480 +uidnumber: 480 +gidnumber: 480 +homeDirectory: /home/uid480 + +dn: cn=user481,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user481 +sn: user481 +uid: uid481 +givenname: givenname481 +description: description481 +userPassword: password481 +mail: uid481 +uidnumber: 481 +gidnumber: 481 +homeDirectory: /home/uid481 + +dn: cn=user482,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user482 +sn: user482 +uid: uid482 +givenname: givenname482 +description: description482 +userPassword: password482 +mail: uid482 +uidnumber: 482 +gidnumber: 482 +homeDirectory: /home/uid482 + +dn: cn=user483,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user483 +sn: user483 +uid: uid483 +givenname: givenname483 +description: description483 +userPassword: password483 +mail: uid483 +uidnumber: 483 +gidnumber: 483 +homeDirectory: /home/uid483 + +dn: cn=user484,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user484 +sn: user484 +uid: uid484 +givenname: givenname484 +description: description484 +userPassword: password484 +mail: uid484 +uidnumber: 484 +gidnumber: 484 +homeDirectory: /home/uid484 + +dn: cn=user485,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user485 +sn: user485 +uid: uid485 +givenname: givenname485 +description: description485 +userPassword: password485 +mail: uid485 +uidnumber: 485 +gidnumber: 485 +homeDirectory: /home/uid485 + +dn: cn=user486,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user486 +sn: user486 +uid: uid486 +givenname: givenname486 +description: description486 +userPassword: password486 +mail: uid486 +uidnumber: 486 +gidnumber: 486 +homeDirectory: /home/uid486 + +dn: cn=user487,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user487 +sn: user487 +uid: uid487 +givenname: givenname487 +description: description487 +userPassword: password487 +mail: uid487 +uidnumber: 487 +gidnumber: 487 +homeDirectory: /home/uid487 + +dn: cn=user488,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user488 +sn: user488 +uid: uid488 +givenname: givenname488 +description: description488 +userPassword: password488 +mail: uid488 +uidnumber: 488 +gidnumber: 488 +homeDirectory: /home/uid488 + +dn: cn=user489,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user489 +sn: user489 +uid: uid489 +givenname: givenname489 +description: description489 +userPassword: password489 +mail: uid489 +uidnumber: 489 +gidnumber: 489 +homeDirectory: /home/uid489 + +dn: cn=user490,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user490 +sn: user490 +uid: uid490 +givenname: givenname490 +description: description490 +userPassword: password490 +mail: uid490 +uidnumber: 490 +gidnumber: 490 +homeDirectory: /home/uid490 + +dn: cn=user491,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user491 +sn: user491 +uid: uid491 +givenname: givenname491 +description: description491 +userPassword: password491 +mail: uid491 +uidnumber: 491 +gidnumber: 491 +homeDirectory: /home/uid491 + +dn: cn=user492,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user492 +sn: user492 +uid: uid492 +givenname: givenname492 +description: description492 +userPassword: password492 +mail: uid492 +uidnumber: 492 +gidnumber: 492 +homeDirectory: /home/uid492 + +dn: cn=user493,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user493 +sn: user493 +uid: uid493 +givenname: givenname493 +description: description493 +userPassword: password493 +mail: uid493 +uidnumber: 493 +gidnumber: 493 +homeDirectory: /home/uid493 + +dn: cn=user494,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user494 +sn: user494 +uid: uid494 +givenname: givenname494 +description: description494 +userPassword: password494 +mail: uid494 +uidnumber: 494 +gidnumber: 494 +homeDirectory: /home/uid494 + +dn: cn=user495,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user495 +sn: user495 +uid: uid495 +givenname: givenname495 +description: description495 +userPassword: password495 +mail: uid495 +uidnumber: 495 +gidnumber: 495 +homeDirectory: /home/uid495 + +dn: cn=user496,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user496 +sn: user496 +uid: uid496 +givenname: givenname496 +description: description496 +userPassword: password496 +mail: uid496 +uidnumber: 496 +gidnumber: 496 +homeDirectory: /home/uid496 + +dn: cn=user497,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user497 +sn: user497 +uid: uid497 +givenname: givenname497 +description: description497 +userPassword: password497 +mail: uid497 +uidnumber: 497 +gidnumber: 497 +homeDirectory: /home/uid497 + +dn: cn=user498,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user498 +sn: user498 +uid: uid498 +givenname: givenname498 +description: description498 +userPassword: password498 +mail: uid498 +uidnumber: 498 +gidnumber: 498 +homeDirectory: /home/uid498 + +dn: cn=user499,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user499 +sn: user499 +uid: uid499 +givenname: givenname499 +description: description499 +userPassword: password499 +mail: uid499 +uidnumber: 499 +gidnumber: 499 +homeDirectory: /home/uid499 + +dn: cn=user500,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user500 +sn: user500 +uid: uid500 +givenname: givenname500 +description: description500 +userPassword: password500 +mail: uid500 +uidnumber: 500 +gidnumber: 500 +homeDirectory: /home/uid500 + +dn: cn=user501,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user501 +sn: user501 +uid: uid501 +givenname: givenname501 +description: description501 +userPassword: password501 +mail: uid501 +uidnumber: 501 +gidnumber: 501 +homeDirectory: /home/uid501 + +dn: cn=user502,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user502 +sn: user502 +uid: uid502 +givenname: givenname502 +description: description502 +userPassword: password502 +mail: uid502 +uidnumber: 502 +gidnumber: 502 +homeDirectory: /home/uid502 + +dn: cn=user503,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user503 +sn: user503 +uid: uid503 +givenname: givenname503 +description: description503 +userPassword: password503 +mail: uid503 +uidnumber: 503 +gidnumber: 503 +homeDirectory: /home/uid503 + +dn: cn=user504,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user504 +sn: user504 +uid: uid504 +givenname: givenname504 +description: description504 +userPassword: password504 +mail: uid504 +uidnumber: 504 +gidnumber: 504 +homeDirectory: /home/uid504 + +dn: cn=user505,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user505 +sn: user505 +uid: uid505 +givenname: givenname505 +description: description505 +userPassword: password505 +mail: uid505 +uidnumber: 505 +gidnumber: 505 +homeDirectory: /home/uid505 + +dn: cn=user506,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user506 +sn: user506 +uid: uid506 +givenname: givenname506 +description: description506 +userPassword: password506 +mail: uid506 +uidnumber: 506 +gidnumber: 506 +homeDirectory: /home/uid506 + +dn: cn=user507,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user507 +sn: user507 +uid: uid507 +givenname: givenname507 +description: description507 +userPassword: password507 +mail: uid507 +uidnumber: 507 +gidnumber: 507 +homeDirectory: /home/uid507 + +dn: cn=user508,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user508 +sn: user508 +uid: uid508 +givenname: givenname508 +description: description508 +userPassword: password508 +mail: uid508 +uidnumber: 508 +gidnumber: 508 +homeDirectory: /home/uid508 + +dn: cn=user509,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user509 +sn: user509 +uid: uid509 +givenname: givenname509 +description: description509 +userPassword: password509 +mail: uid509 +uidnumber: 509 +gidnumber: 509 +homeDirectory: /home/uid509 + +dn: cn=user510,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user510 +sn: user510 +uid: uid510 +givenname: givenname510 +description: description510 +userPassword: password510 +mail: uid510 +uidnumber: 510 +gidnumber: 510 +homeDirectory: /home/uid510 + +dn: cn=user511,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user511 +sn: user511 +uid: uid511 +givenname: givenname511 +description: description511 +userPassword: password511 +mail: uid511 +uidnumber: 511 +gidnumber: 511 +homeDirectory: /home/uid511 + +dn: cn=user512,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user512 +sn: user512 +uid: uid512 +givenname: givenname512 +description: description512 +userPassword: password512 +mail: uid512 +uidnumber: 512 +gidnumber: 512 +homeDirectory: /home/uid512 + +dn: cn=user513,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user513 +sn: user513 +uid: uid513 +givenname: givenname513 +description: description513 +userPassword: password513 +mail: uid513 +uidnumber: 513 +gidnumber: 513 +homeDirectory: /home/uid513 + +dn: cn=user514,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user514 +sn: user514 +uid: uid514 +givenname: givenname514 +description: description514 +userPassword: password514 +mail: uid514 +uidnumber: 514 +gidnumber: 514 +homeDirectory: /home/uid514 + +dn: cn=user515,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user515 +sn: user515 +uid: uid515 +givenname: givenname515 +description: description515 +userPassword: password515 +mail: uid515 +uidnumber: 515 +gidnumber: 515 +homeDirectory: /home/uid515 + +dn: cn=user516,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user516 +sn: user516 +uid: uid516 +givenname: givenname516 +description: description516 +userPassword: password516 +mail: uid516 +uidnumber: 516 +gidnumber: 516 +homeDirectory: /home/uid516 + +dn: cn=user517,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user517 +sn: user517 +uid: uid517 +givenname: givenname517 +description: description517 +userPassword: password517 +mail: uid517 +uidnumber: 517 +gidnumber: 517 +homeDirectory: /home/uid517 + +dn: cn=user518,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user518 +sn: user518 +uid: uid518 +givenname: givenname518 +description: description518 +userPassword: password518 +mail: uid518 +uidnumber: 518 +gidnumber: 518 +homeDirectory: /home/uid518 + +dn: cn=user519,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user519 +sn: user519 +uid: uid519 +givenname: givenname519 +description: description519 +userPassword: password519 +mail: uid519 +uidnumber: 519 +gidnumber: 519 +homeDirectory: /home/uid519 + +dn: cn=user520,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user520 +sn: user520 +uid: uid520 +givenname: givenname520 +description: description520 +userPassword: password520 +mail: uid520 +uidnumber: 520 +gidnumber: 520 +homeDirectory: /home/uid520 + +dn: cn=user521,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user521 +sn: user521 +uid: uid521 +givenname: givenname521 +description: description521 +userPassword: password521 +mail: uid521 +uidnumber: 521 +gidnumber: 521 +homeDirectory: /home/uid521 + +dn: cn=user522,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user522 +sn: user522 +uid: uid522 +givenname: givenname522 +description: description522 +userPassword: password522 +mail: uid522 +uidnumber: 522 +gidnumber: 522 +homeDirectory: /home/uid522 + +dn: cn=user523,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user523 +sn: user523 +uid: uid523 +givenname: givenname523 +description: description523 +userPassword: password523 +mail: uid523 +uidnumber: 523 +gidnumber: 523 +homeDirectory: /home/uid523 + +dn: cn=user524,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user524 +sn: user524 +uid: uid524 +givenname: givenname524 +description: description524 +userPassword: password524 +mail: uid524 +uidnumber: 524 +gidnumber: 524 +homeDirectory: /home/uid524 + +dn: cn=user525,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user525 +sn: user525 +uid: uid525 +givenname: givenname525 +description: description525 +userPassword: password525 +mail: uid525 +uidnumber: 525 +gidnumber: 525 +homeDirectory: /home/uid525 + +dn: cn=user526,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user526 +sn: user526 +uid: uid526 +givenname: givenname526 +description: description526 +userPassword: password526 +mail: uid526 +uidnumber: 526 +gidnumber: 526 +homeDirectory: /home/uid526 + +dn: cn=user527,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user527 +sn: user527 +uid: uid527 +givenname: givenname527 +description: description527 +userPassword: password527 +mail: uid527 +uidnumber: 527 +gidnumber: 527 +homeDirectory: /home/uid527 + +dn: cn=user528,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user528 +sn: user528 +uid: uid528 +givenname: givenname528 +description: description528 +userPassword: password528 +mail: uid528 +uidnumber: 528 +gidnumber: 528 +homeDirectory: /home/uid528 + +dn: cn=user529,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user529 +sn: user529 +uid: uid529 +givenname: givenname529 +description: description529 +userPassword: password529 +mail: uid529 +uidnumber: 529 +gidnumber: 529 +homeDirectory: /home/uid529 + +dn: cn=user530,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user530 +sn: user530 +uid: uid530 +givenname: givenname530 +description: description530 +userPassword: password530 +mail: uid530 +uidnumber: 530 +gidnumber: 530 +homeDirectory: /home/uid530 + +dn: cn=user531,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user531 +sn: user531 +uid: uid531 +givenname: givenname531 +description: description531 +userPassword: password531 +mail: uid531 +uidnumber: 531 +gidnumber: 531 +homeDirectory: /home/uid531 + +dn: cn=user532,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user532 +sn: user532 +uid: uid532 +givenname: givenname532 +description: description532 +userPassword: password532 +mail: uid532 +uidnumber: 532 +gidnumber: 532 +homeDirectory: /home/uid532 + +dn: cn=user533,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user533 +sn: user533 +uid: uid533 +givenname: givenname533 +description: description533 +userPassword: password533 +mail: uid533 +uidnumber: 533 +gidnumber: 533 +homeDirectory: /home/uid533 + +dn: cn=user534,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user534 +sn: user534 +uid: uid534 +givenname: givenname534 +description: description534 +userPassword: password534 +mail: uid534 +uidnumber: 534 +gidnumber: 534 +homeDirectory: /home/uid534 + +dn: cn=user535,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user535 +sn: user535 +uid: uid535 +givenname: givenname535 +description: description535 +userPassword: password535 +mail: uid535 +uidnumber: 535 +gidnumber: 535 +homeDirectory: /home/uid535 + +dn: cn=user536,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user536 +sn: user536 +uid: uid536 +givenname: givenname536 +description: description536 +userPassword: password536 +mail: uid536 +uidnumber: 536 +gidnumber: 536 +homeDirectory: /home/uid536 + +dn: cn=user537,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user537 +sn: user537 +uid: uid537 +givenname: givenname537 +description: description537 +userPassword: password537 +mail: uid537 +uidnumber: 537 +gidnumber: 537 +homeDirectory: /home/uid537 + +dn: cn=user538,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user538 +sn: user538 +uid: uid538 +givenname: givenname538 +description: description538 +userPassword: password538 +mail: uid538 +uidnumber: 538 +gidnumber: 538 +homeDirectory: /home/uid538 + +dn: cn=user539,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user539 +sn: user539 +uid: uid539 +givenname: givenname539 +description: description539 +userPassword: password539 +mail: uid539 +uidnumber: 539 +gidnumber: 539 +homeDirectory: /home/uid539 + +dn: cn=user540,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user540 +sn: user540 +uid: uid540 +givenname: givenname540 +description: description540 +userPassword: password540 +mail: uid540 +uidnumber: 540 +gidnumber: 540 +homeDirectory: /home/uid540 + +dn: cn=user541,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user541 +sn: user541 +uid: uid541 +givenname: givenname541 +description: description541 +userPassword: password541 +mail: uid541 +uidnumber: 541 +gidnumber: 541 +homeDirectory: /home/uid541 + +dn: cn=user542,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user542 +sn: user542 +uid: uid542 +givenname: givenname542 +description: description542 +userPassword: password542 +mail: uid542 +uidnumber: 542 +gidnumber: 542 +homeDirectory: /home/uid542 + +dn: cn=user543,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user543 +sn: user543 +uid: uid543 +givenname: givenname543 +description: description543 +userPassword: password543 +mail: uid543 +uidnumber: 543 +gidnumber: 543 +homeDirectory: /home/uid543 + +dn: cn=user544,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user544 +sn: user544 +uid: uid544 +givenname: givenname544 +description: description544 +userPassword: password544 +mail: uid544 +uidnumber: 544 +gidnumber: 544 +homeDirectory: /home/uid544 + +dn: cn=user545,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user545 +sn: user545 +uid: uid545 +givenname: givenname545 +description: description545 +userPassword: password545 +mail: uid545 +uidnumber: 545 +gidnumber: 545 +homeDirectory: /home/uid545 + +dn: cn=user546,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user546 +sn: user546 +uid: uid546 +givenname: givenname546 +description: description546 +userPassword: password546 +mail: uid546 +uidnumber: 546 +gidnumber: 546 +homeDirectory: /home/uid546 + +dn: cn=user547,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user547 +sn: user547 +uid: uid547 +givenname: givenname547 +description: description547 +userPassword: password547 +mail: uid547 +uidnumber: 547 +gidnumber: 547 +homeDirectory: /home/uid547 + +dn: cn=user548,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user548 +sn: user548 +uid: uid548 +givenname: givenname548 +description: description548 +userPassword: password548 +mail: uid548 +uidnumber: 548 +gidnumber: 548 +homeDirectory: /home/uid548 + +dn: cn=user549,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user549 +sn: user549 +uid: uid549 +givenname: givenname549 +description: description549 +userPassword: password549 +mail: uid549 +uidnumber: 549 +gidnumber: 549 +homeDirectory: /home/uid549 + +dn: cn=user550,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user550 +sn: user550 +uid: uid550 +givenname: givenname550 +description: description550 +userPassword: password550 +mail: uid550 +uidnumber: 550 +gidnumber: 550 +homeDirectory: /home/uid550 + +dn: cn=user551,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user551 +sn: user551 +uid: uid551 +givenname: givenname551 +description: description551 +userPassword: password551 +mail: uid551 +uidnumber: 551 +gidnumber: 551 +homeDirectory: /home/uid551 + +dn: cn=user552,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user552 +sn: user552 +uid: uid552 +givenname: givenname552 +description: description552 +userPassword: password552 +mail: uid552 +uidnumber: 552 +gidnumber: 552 +homeDirectory: /home/uid552 + +dn: cn=user553,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user553 +sn: user553 +uid: uid553 +givenname: givenname553 +description: description553 +userPassword: password553 +mail: uid553 +uidnumber: 553 +gidnumber: 553 +homeDirectory: /home/uid553 + +dn: cn=user554,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user554 +sn: user554 +uid: uid554 +givenname: givenname554 +description: description554 +userPassword: password554 +mail: uid554 +uidnumber: 554 +gidnumber: 554 +homeDirectory: /home/uid554 + +dn: cn=user555,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user555 +sn: user555 +uid: uid555 +givenname: givenname555 +description: description555 +userPassword: password555 +mail: uid555 +uidnumber: 555 +gidnumber: 555 +homeDirectory: /home/uid555 + +dn: cn=user556,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user556 +sn: user556 +uid: uid556 +givenname: givenname556 +description: description556 +userPassword: password556 +mail: uid556 +uidnumber: 556 +gidnumber: 556 +homeDirectory: /home/uid556 + +dn: cn=user557,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user557 +sn: user557 +uid: uid557 +givenname: givenname557 +description: description557 +userPassword: password557 +mail: uid557 +uidnumber: 557 +gidnumber: 557 +homeDirectory: /home/uid557 + +dn: cn=user558,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user558 +sn: user558 +uid: uid558 +givenname: givenname558 +description: description558 +userPassword: password558 +mail: uid558 +uidnumber: 558 +gidnumber: 558 +homeDirectory: /home/uid558 + +dn: cn=user559,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user559 +sn: user559 +uid: uid559 +givenname: givenname559 +description: description559 +userPassword: password559 +mail: uid559 +uidnumber: 559 +gidnumber: 559 +homeDirectory: /home/uid559 + +dn: cn=user560,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user560 +sn: user560 +uid: uid560 +givenname: givenname560 +description: description560 +userPassword: password560 +mail: uid560 +uidnumber: 560 +gidnumber: 560 +homeDirectory: /home/uid560 + +dn: cn=user561,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user561 +sn: user561 +uid: uid561 +givenname: givenname561 +description: description561 +userPassword: password561 +mail: uid561 +uidnumber: 561 +gidnumber: 561 +homeDirectory: /home/uid561 + +dn: cn=user562,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user562 +sn: user562 +uid: uid562 +givenname: givenname562 +description: description562 +userPassword: password562 +mail: uid562 +uidnumber: 562 +gidnumber: 562 +homeDirectory: /home/uid562 + +dn: cn=user563,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user563 +sn: user563 +uid: uid563 +givenname: givenname563 +description: description563 +userPassword: password563 +mail: uid563 +uidnumber: 563 +gidnumber: 563 +homeDirectory: /home/uid563 + +dn: cn=user564,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user564 +sn: user564 +uid: uid564 +givenname: givenname564 +description: description564 +userPassword: password564 +mail: uid564 +uidnumber: 564 +gidnumber: 564 +homeDirectory: /home/uid564 + +dn: cn=user565,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user565 +sn: user565 +uid: uid565 +givenname: givenname565 +description: description565 +userPassword: password565 +mail: uid565 +uidnumber: 565 +gidnumber: 565 +homeDirectory: /home/uid565 + +dn: cn=user566,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user566 +sn: user566 +uid: uid566 +givenname: givenname566 +description: description566 +userPassword: password566 +mail: uid566 +uidnumber: 566 +gidnumber: 566 +homeDirectory: /home/uid566 + +dn: cn=user567,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user567 +sn: user567 +uid: uid567 +givenname: givenname567 +description: description567 +userPassword: password567 +mail: uid567 +uidnumber: 567 +gidnumber: 567 +homeDirectory: /home/uid567 + +dn: cn=user568,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user568 +sn: user568 +uid: uid568 +givenname: givenname568 +description: description568 +userPassword: password568 +mail: uid568 +uidnumber: 568 +gidnumber: 568 +homeDirectory: /home/uid568 + +dn: cn=user569,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user569 +sn: user569 +uid: uid569 +givenname: givenname569 +description: description569 +userPassword: password569 +mail: uid569 +uidnumber: 569 +gidnumber: 569 +homeDirectory: /home/uid569 + +dn: cn=user570,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user570 +sn: user570 +uid: uid570 +givenname: givenname570 +description: description570 +userPassword: password570 +mail: uid570 +uidnumber: 570 +gidnumber: 570 +homeDirectory: /home/uid570 + +dn: cn=user571,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user571 +sn: user571 +uid: uid571 +givenname: givenname571 +description: description571 +userPassword: password571 +mail: uid571 +uidnumber: 571 +gidnumber: 571 +homeDirectory: /home/uid571 + +dn: cn=user572,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user572 +sn: user572 +uid: uid572 +givenname: givenname572 +description: description572 +userPassword: password572 +mail: uid572 +uidnumber: 572 +gidnumber: 572 +homeDirectory: /home/uid572 + +dn: cn=user573,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user573 +sn: user573 +uid: uid573 +givenname: givenname573 +description: description573 +userPassword: password573 +mail: uid573 +uidnumber: 573 +gidnumber: 573 +homeDirectory: /home/uid573 + +dn: cn=user574,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user574 +sn: user574 +uid: uid574 +givenname: givenname574 +description: description574 +userPassword: password574 +mail: uid574 +uidnumber: 574 +gidnumber: 574 +homeDirectory: /home/uid574 + +dn: cn=user575,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user575 +sn: user575 +uid: uid575 +givenname: givenname575 +description: description575 +userPassword: password575 +mail: uid575 +uidnumber: 575 +gidnumber: 575 +homeDirectory: /home/uid575 + +dn: cn=user576,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user576 +sn: user576 +uid: uid576 +givenname: givenname576 +description: description576 +userPassword: password576 +mail: uid576 +uidnumber: 576 +gidnumber: 576 +homeDirectory: /home/uid576 + +dn: cn=user577,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user577 +sn: user577 +uid: uid577 +givenname: givenname577 +description: description577 +userPassword: password577 +mail: uid577 +uidnumber: 577 +gidnumber: 577 +homeDirectory: /home/uid577 + +dn: cn=user578,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user578 +sn: user578 +uid: uid578 +givenname: givenname578 +description: description578 +userPassword: password578 +mail: uid578 +uidnumber: 578 +gidnumber: 578 +homeDirectory: /home/uid578 + +dn: cn=user579,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user579 +sn: user579 +uid: uid579 +givenname: givenname579 +description: description579 +userPassword: password579 +mail: uid579 +uidnumber: 579 +gidnumber: 579 +homeDirectory: /home/uid579 + +dn: cn=user580,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user580 +sn: user580 +uid: uid580 +givenname: givenname580 +description: description580 +userPassword: password580 +mail: uid580 +uidnumber: 580 +gidnumber: 580 +homeDirectory: /home/uid580 + +dn: cn=user581,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user581 +sn: user581 +uid: uid581 +givenname: givenname581 +description: description581 +userPassword: password581 +mail: uid581 +uidnumber: 581 +gidnumber: 581 +homeDirectory: /home/uid581 + +dn: cn=user582,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user582 +sn: user582 +uid: uid582 +givenname: givenname582 +description: description582 +userPassword: password582 +mail: uid582 +uidnumber: 582 +gidnumber: 582 +homeDirectory: /home/uid582 + +dn: cn=user583,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user583 +sn: user583 +uid: uid583 +givenname: givenname583 +description: description583 +userPassword: password583 +mail: uid583 +uidnumber: 583 +gidnumber: 583 +homeDirectory: /home/uid583 + +dn: cn=user584,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user584 +sn: user584 +uid: uid584 +givenname: givenname584 +description: description584 +userPassword: password584 +mail: uid584 +uidnumber: 584 +gidnumber: 584 +homeDirectory: /home/uid584 + +dn: cn=user585,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user585 +sn: user585 +uid: uid585 +givenname: givenname585 +description: description585 +userPassword: password585 +mail: uid585 +uidnumber: 585 +gidnumber: 585 +homeDirectory: /home/uid585 + +dn: cn=user586,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user586 +sn: user586 +uid: uid586 +givenname: givenname586 +description: description586 +userPassword: password586 +mail: uid586 +uidnumber: 586 +gidnumber: 586 +homeDirectory: /home/uid586 + +dn: cn=user587,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user587 +sn: user587 +uid: uid587 +givenname: givenname587 +description: description587 +userPassword: password587 +mail: uid587 +uidnumber: 587 +gidnumber: 587 +homeDirectory: /home/uid587 + +dn: cn=user588,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user588 +sn: user588 +uid: uid588 +givenname: givenname588 +description: description588 +userPassword: password588 +mail: uid588 +uidnumber: 588 +gidnumber: 588 +homeDirectory: /home/uid588 + +dn: cn=user589,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user589 +sn: user589 +uid: uid589 +givenname: givenname589 +description: description589 +userPassword: password589 +mail: uid589 +uidnumber: 589 +gidnumber: 589 +homeDirectory: /home/uid589 + +dn: cn=user590,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user590 +sn: user590 +uid: uid590 +givenname: givenname590 +description: description590 +userPassword: password590 +mail: uid590 +uidnumber: 590 +gidnumber: 590 +homeDirectory: /home/uid590 + +dn: cn=user591,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user591 +sn: user591 +uid: uid591 +givenname: givenname591 +description: description591 +userPassword: password591 +mail: uid591 +uidnumber: 591 +gidnumber: 591 +homeDirectory: /home/uid591 + +dn: cn=user592,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user592 +sn: user592 +uid: uid592 +givenname: givenname592 +description: description592 +userPassword: password592 +mail: uid592 +uidnumber: 592 +gidnumber: 592 +homeDirectory: /home/uid592 + +dn: cn=user593,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user593 +sn: user593 +uid: uid593 +givenname: givenname593 +description: description593 +userPassword: password593 +mail: uid593 +uidnumber: 593 +gidnumber: 593 +homeDirectory: /home/uid593 + +dn: cn=user594,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user594 +sn: user594 +uid: uid594 +givenname: givenname594 +description: description594 +userPassword: password594 +mail: uid594 +uidnumber: 594 +gidnumber: 594 +homeDirectory: /home/uid594 + +dn: cn=user595,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user595 +sn: user595 +uid: uid595 +givenname: givenname595 +description: description595 +userPassword: password595 +mail: uid595 +uidnumber: 595 +gidnumber: 595 +homeDirectory: /home/uid595 + +dn: cn=user596,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user596 +sn: user596 +uid: uid596 +givenname: givenname596 +description: description596 +userPassword: password596 +mail: uid596 +uidnumber: 596 +gidnumber: 596 +homeDirectory: /home/uid596 + +dn: cn=user597,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user597 +sn: user597 +uid: uid597 +givenname: givenname597 +description: description597 +userPassword: password597 +mail: uid597 +uidnumber: 597 +gidnumber: 597 +homeDirectory: /home/uid597 + +dn: cn=user598,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user598 +sn: user598 +uid: uid598 +givenname: givenname598 +description: description598 +userPassword: password598 +mail: uid598 +uidnumber: 598 +gidnumber: 598 +homeDirectory: /home/uid598 + +dn: cn=user599,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user599 +sn: user599 +uid: uid599 +givenname: givenname599 +description: description599 +userPassword: password599 +mail: uid599 +uidnumber: 599 +gidnumber: 599 +homeDirectory: /home/uid599 + +dn: cn=user600,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user600 +sn: user600 +uid: uid600 +givenname: givenname600 +description: description600 +userPassword: password600 +mail: uid600 +uidnumber: 600 +gidnumber: 600 +homeDirectory: /home/uid600 + +dn: cn=user601,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user601 +sn: user601 +uid: uid601 +givenname: givenname601 +description: description601 +userPassword: password601 +mail: uid601 +uidnumber: 601 +gidnumber: 601 +homeDirectory: /home/uid601 + +dn: cn=user602,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user602 +sn: user602 +uid: uid602 +givenname: givenname602 +description: description602 +userPassword: password602 +mail: uid602 +uidnumber: 602 +gidnumber: 602 +homeDirectory: /home/uid602 + +dn: cn=user603,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user603 +sn: user603 +uid: uid603 +givenname: givenname603 +description: description603 +userPassword: password603 +mail: uid603 +uidnumber: 603 +gidnumber: 603 +homeDirectory: /home/uid603 + +dn: cn=user604,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user604 +sn: user604 +uid: uid604 +givenname: givenname604 +description: description604 +userPassword: password604 +mail: uid604 +uidnumber: 604 +gidnumber: 604 +homeDirectory: /home/uid604 + +dn: cn=user605,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user605 +sn: user605 +uid: uid605 +givenname: givenname605 +description: description605 +userPassword: password605 +mail: uid605 +uidnumber: 605 +gidnumber: 605 +homeDirectory: /home/uid605 + +dn: cn=user606,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user606 +sn: user606 +uid: uid606 +givenname: givenname606 +description: description606 +userPassword: password606 +mail: uid606 +uidnumber: 606 +gidnumber: 606 +homeDirectory: /home/uid606 + +dn: cn=user607,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user607 +sn: user607 +uid: uid607 +givenname: givenname607 +description: description607 +userPassword: password607 +mail: uid607 +uidnumber: 607 +gidnumber: 607 +homeDirectory: /home/uid607 + +dn: cn=user608,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user608 +sn: user608 +uid: uid608 +givenname: givenname608 +description: description608 +userPassword: password608 +mail: uid608 +uidnumber: 608 +gidnumber: 608 +homeDirectory: /home/uid608 + +dn: cn=user609,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user609 +sn: user609 +uid: uid609 +givenname: givenname609 +description: description609 +userPassword: password609 +mail: uid609 +uidnumber: 609 +gidnumber: 609 +homeDirectory: /home/uid609 + +dn: cn=user610,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user610 +sn: user610 +uid: uid610 +givenname: givenname610 +description: description610 +userPassword: password610 +mail: uid610 +uidnumber: 610 +gidnumber: 610 +homeDirectory: /home/uid610 + +dn: cn=user611,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user611 +sn: user611 +uid: uid611 +givenname: givenname611 +description: description611 +userPassword: password611 +mail: uid611 +uidnumber: 611 +gidnumber: 611 +homeDirectory: /home/uid611 + +dn: cn=user612,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user612 +sn: user612 +uid: uid612 +givenname: givenname612 +description: description612 +userPassword: password612 +mail: uid612 +uidnumber: 612 +gidnumber: 612 +homeDirectory: /home/uid612 + +dn: cn=user613,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user613 +sn: user613 +uid: uid613 +givenname: givenname613 +description: description613 +userPassword: password613 +mail: uid613 +uidnumber: 613 +gidnumber: 613 +homeDirectory: /home/uid613 + +dn: cn=user614,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user614 +sn: user614 +uid: uid614 +givenname: givenname614 +description: description614 +userPassword: password614 +mail: uid614 +uidnumber: 614 +gidnumber: 614 +homeDirectory: /home/uid614 + +dn: cn=user615,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user615 +sn: user615 +uid: uid615 +givenname: givenname615 +description: description615 +userPassword: password615 +mail: uid615 +uidnumber: 615 +gidnumber: 615 +homeDirectory: /home/uid615 + +dn: cn=user616,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user616 +sn: user616 +uid: uid616 +givenname: givenname616 +description: description616 +userPassword: password616 +mail: uid616 +uidnumber: 616 +gidnumber: 616 +homeDirectory: /home/uid616 + +dn: cn=user617,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user617 +sn: user617 +uid: uid617 +givenname: givenname617 +description: description617 +userPassword: password617 +mail: uid617 +uidnumber: 617 +gidnumber: 617 +homeDirectory: /home/uid617 + +dn: cn=user618,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user618 +sn: user618 +uid: uid618 +givenname: givenname618 +description: description618 +userPassword: password618 +mail: uid618 +uidnumber: 618 +gidnumber: 618 +homeDirectory: /home/uid618 + +dn: cn=user619,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user619 +sn: user619 +uid: uid619 +givenname: givenname619 +description: description619 +userPassword: password619 +mail: uid619 +uidnumber: 619 +gidnumber: 619 +homeDirectory: /home/uid619 + +dn: cn=user620,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user620 +sn: user620 +uid: uid620 +givenname: givenname620 +description: description620 +userPassword: password620 +mail: uid620 +uidnumber: 620 +gidnumber: 620 +homeDirectory: /home/uid620 + +dn: cn=user621,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user621 +sn: user621 +uid: uid621 +givenname: givenname621 +description: description621 +userPassword: password621 +mail: uid621 +uidnumber: 621 +gidnumber: 621 +homeDirectory: /home/uid621 + +dn: cn=user622,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user622 +sn: user622 +uid: uid622 +givenname: givenname622 +description: description622 +userPassword: password622 +mail: uid622 +uidnumber: 622 +gidnumber: 622 +homeDirectory: /home/uid622 + +dn: cn=user623,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user623 +sn: user623 +uid: uid623 +givenname: givenname623 +description: description623 +userPassword: password623 +mail: uid623 +uidnumber: 623 +gidnumber: 623 +homeDirectory: /home/uid623 + +dn: cn=user624,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user624 +sn: user624 +uid: uid624 +givenname: givenname624 +description: description624 +userPassword: password624 +mail: uid624 +uidnumber: 624 +gidnumber: 624 +homeDirectory: /home/uid624 + +dn: cn=user625,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user625 +sn: user625 +uid: uid625 +givenname: givenname625 +description: description625 +userPassword: password625 +mail: uid625 +uidnumber: 625 +gidnumber: 625 +homeDirectory: /home/uid625 + +dn: cn=user626,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user626 +sn: user626 +uid: uid626 +givenname: givenname626 +description: description626 +userPassword: password626 +mail: uid626 +uidnumber: 626 +gidnumber: 626 +homeDirectory: /home/uid626 + +dn: cn=user627,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user627 +sn: user627 +uid: uid627 +givenname: givenname627 +description: description627 +userPassword: password627 +mail: uid627 +uidnumber: 627 +gidnumber: 627 +homeDirectory: /home/uid627 + +dn: cn=user628,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user628 +sn: user628 +uid: uid628 +givenname: givenname628 +description: description628 +userPassword: password628 +mail: uid628 +uidnumber: 628 +gidnumber: 628 +homeDirectory: /home/uid628 + +dn: cn=user629,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user629 +sn: user629 +uid: uid629 +givenname: givenname629 +description: description629 +userPassword: password629 +mail: uid629 +uidnumber: 629 +gidnumber: 629 +homeDirectory: /home/uid629 + +dn: cn=user630,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user630 +sn: user630 +uid: uid630 +givenname: givenname630 +description: description630 +userPassword: password630 +mail: uid630 +uidnumber: 630 +gidnumber: 630 +homeDirectory: /home/uid630 + +dn: cn=user631,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user631 +sn: user631 +uid: uid631 +givenname: givenname631 +description: description631 +userPassword: password631 +mail: uid631 +uidnumber: 631 +gidnumber: 631 +homeDirectory: /home/uid631 + +dn: cn=user632,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user632 +sn: user632 +uid: uid632 +givenname: givenname632 +description: description632 +userPassword: password632 +mail: uid632 +uidnumber: 632 +gidnumber: 632 +homeDirectory: /home/uid632 + +dn: cn=user633,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user633 +sn: user633 +uid: uid633 +givenname: givenname633 +description: description633 +userPassword: password633 +mail: uid633 +uidnumber: 633 +gidnumber: 633 +homeDirectory: /home/uid633 + +dn: cn=user634,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user634 +sn: user634 +uid: uid634 +givenname: givenname634 +description: description634 +userPassword: password634 +mail: uid634 +uidnumber: 634 +gidnumber: 634 +homeDirectory: /home/uid634 + +dn: cn=user635,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user635 +sn: user635 +uid: uid635 +givenname: givenname635 +description: description635 +userPassword: password635 +mail: uid635 +uidnumber: 635 +gidnumber: 635 +homeDirectory: /home/uid635 + +dn: cn=user636,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user636 +sn: user636 +uid: uid636 +givenname: givenname636 +description: description636 +userPassword: password636 +mail: uid636 +uidnumber: 636 +gidnumber: 636 +homeDirectory: /home/uid636 + +dn: cn=user637,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user637 +sn: user637 +uid: uid637 +givenname: givenname637 +description: description637 +userPassword: password637 +mail: uid637 +uidnumber: 637 +gidnumber: 637 +homeDirectory: /home/uid637 + +dn: cn=user638,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user638 +sn: user638 +uid: uid638 +givenname: givenname638 +description: description638 +userPassword: password638 +mail: uid638 +uidnumber: 638 +gidnumber: 638 +homeDirectory: /home/uid638 + +dn: cn=user639,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user639 +sn: user639 +uid: uid639 +givenname: givenname639 +description: description639 +userPassword: password639 +mail: uid639 +uidnumber: 639 +gidnumber: 639 +homeDirectory: /home/uid639 + +dn: cn=user640,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user640 +sn: user640 +uid: uid640 +givenname: givenname640 +description: description640 +userPassword: password640 +mail: uid640 +uidnumber: 640 +gidnumber: 640 +homeDirectory: /home/uid640 + +dn: cn=user641,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user641 +sn: user641 +uid: uid641 +givenname: givenname641 +description: description641 +userPassword: password641 +mail: uid641 +uidnumber: 641 +gidnumber: 641 +homeDirectory: /home/uid641 + +dn: cn=user642,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user642 +sn: user642 +uid: uid642 +givenname: givenname642 +description: description642 +userPassword: password642 +mail: uid642 +uidnumber: 642 +gidnumber: 642 +homeDirectory: /home/uid642 + +dn: cn=user643,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user643 +sn: user643 +uid: uid643 +givenname: givenname643 +description: description643 +userPassword: password643 +mail: uid643 +uidnumber: 643 +gidnumber: 643 +homeDirectory: /home/uid643 + +dn: cn=user644,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user644 +sn: user644 +uid: uid644 +givenname: givenname644 +description: description644 +userPassword: password644 +mail: uid644 +uidnumber: 644 +gidnumber: 644 +homeDirectory: /home/uid644 + +dn: cn=user645,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user645 +sn: user645 +uid: uid645 +givenname: givenname645 +description: description645 +userPassword: password645 +mail: uid645 +uidnumber: 645 +gidnumber: 645 +homeDirectory: /home/uid645 + +dn: cn=user646,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user646 +sn: user646 +uid: uid646 +givenname: givenname646 +description: description646 +userPassword: password646 +mail: uid646 +uidnumber: 646 +gidnumber: 646 +homeDirectory: /home/uid646 + +dn: cn=user647,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user647 +sn: user647 +uid: uid647 +givenname: givenname647 +description: description647 +userPassword: password647 +mail: uid647 +uidnumber: 647 +gidnumber: 647 +homeDirectory: /home/uid647 + +dn: cn=user648,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user648 +sn: user648 +uid: uid648 +givenname: givenname648 +description: description648 +userPassword: password648 +mail: uid648 +uidnumber: 648 +gidnumber: 648 +homeDirectory: /home/uid648 + +dn: cn=user649,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user649 +sn: user649 +uid: uid649 +givenname: givenname649 +description: description649 +userPassword: password649 +mail: uid649 +uidnumber: 649 +gidnumber: 649 +homeDirectory: /home/uid649 + +dn: cn=user650,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user650 +sn: user650 +uid: uid650 +givenname: givenname650 +description: description650 +userPassword: password650 +mail: uid650 +uidnumber: 650 +gidnumber: 650 +homeDirectory: /home/uid650 + +dn: cn=user651,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user651 +sn: user651 +uid: uid651 +givenname: givenname651 +description: description651 +userPassword: password651 +mail: uid651 +uidnumber: 651 +gidnumber: 651 +homeDirectory: /home/uid651 + +dn: cn=user652,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user652 +sn: user652 +uid: uid652 +givenname: givenname652 +description: description652 +userPassword: password652 +mail: uid652 +uidnumber: 652 +gidnumber: 652 +homeDirectory: /home/uid652 + +dn: cn=user653,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user653 +sn: user653 +uid: uid653 +givenname: givenname653 +description: description653 +userPassword: password653 +mail: uid653 +uidnumber: 653 +gidnumber: 653 +homeDirectory: /home/uid653 + +dn: cn=user654,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user654 +sn: user654 +uid: uid654 +givenname: givenname654 +description: description654 +userPassword: password654 +mail: uid654 +uidnumber: 654 +gidnumber: 654 +homeDirectory: /home/uid654 + +dn: cn=user655,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user655 +sn: user655 +uid: uid655 +givenname: givenname655 +description: description655 +userPassword: password655 +mail: uid655 +uidnumber: 655 +gidnumber: 655 +homeDirectory: /home/uid655 + +dn: cn=user656,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user656 +sn: user656 +uid: uid656 +givenname: givenname656 +description: description656 +userPassword: password656 +mail: uid656 +uidnumber: 656 +gidnumber: 656 +homeDirectory: /home/uid656 + +dn: cn=user657,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user657 +sn: user657 +uid: uid657 +givenname: givenname657 +description: description657 +userPassword: password657 +mail: uid657 +uidnumber: 657 +gidnumber: 657 +homeDirectory: /home/uid657 + +dn: cn=user658,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user658 +sn: user658 +uid: uid658 +givenname: givenname658 +description: description658 +userPassword: password658 +mail: uid658 +uidnumber: 658 +gidnumber: 658 +homeDirectory: /home/uid658 + +dn: cn=user659,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user659 +sn: user659 +uid: uid659 +givenname: givenname659 +description: description659 +userPassword: password659 +mail: uid659 +uidnumber: 659 +gidnumber: 659 +homeDirectory: /home/uid659 + +dn: cn=user660,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user660 +sn: user660 +uid: uid660 +givenname: givenname660 +description: description660 +userPassword: password660 +mail: uid660 +uidnumber: 660 +gidnumber: 660 +homeDirectory: /home/uid660 + +dn: cn=user661,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user661 +sn: user661 +uid: uid661 +givenname: givenname661 +description: description661 +userPassword: password661 +mail: uid661 +uidnumber: 661 +gidnumber: 661 +homeDirectory: /home/uid661 + +dn: cn=user662,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user662 +sn: user662 +uid: uid662 +givenname: givenname662 +description: description662 +userPassword: password662 +mail: uid662 +uidnumber: 662 +gidnumber: 662 +homeDirectory: /home/uid662 + +dn: cn=user663,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user663 +sn: user663 +uid: uid663 +givenname: givenname663 +description: description663 +userPassword: password663 +mail: uid663 +uidnumber: 663 +gidnumber: 663 +homeDirectory: /home/uid663 + +dn: cn=user664,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user664 +sn: user664 +uid: uid664 +givenname: givenname664 +description: description664 +userPassword: password664 +mail: uid664 +uidnumber: 664 +gidnumber: 664 +homeDirectory: /home/uid664 + +dn: cn=user665,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user665 +sn: user665 +uid: uid665 +givenname: givenname665 +description: description665 +userPassword: password665 +mail: uid665 +uidnumber: 665 +gidnumber: 665 +homeDirectory: /home/uid665 + +dn: cn=user666,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user666 +sn: user666 +uid: uid666 +givenname: givenname666 +description: description666 +userPassword: password666 +mail: uid666 +uidnumber: 666 +gidnumber: 666 +homeDirectory: /home/uid666 + +dn: cn=user667,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user667 +sn: user667 +uid: uid667 +givenname: givenname667 +description: description667 +userPassword: password667 +mail: uid667 +uidnumber: 667 +gidnumber: 667 +homeDirectory: /home/uid667 + +dn: cn=user668,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user668 +sn: user668 +uid: uid668 +givenname: givenname668 +description: description668 +userPassword: password668 +mail: uid668 +uidnumber: 668 +gidnumber: 668 +homeDirectory: /home/uid668 + +dn: cn=user669,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user669 +sn: user669 +uid: uid669 +givenname: givenname669 +description: description669 +userPassword: password669 +mail: uid669 +uidnumber: 669 +gidnumber: 669 +homeDirectory: /home/uid669 + +dn: cn=user670,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user670 +sn: user670 +uid: uid670 +givenname: givenname670 +description: description670 +userPassword: password670 +mail: uid670 +uidnumber: 670 +gidnumber: 670 +homeDirectory: /home/uid670 + +dn: cn=user671,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user671 +sn: user671 +uid: uid671 +givenname: givenname671 +description: description671 +userPassword: password671 +mail: uid671 +uidnumber: 671 +gidnumber: 671 +homeDirectory: /home/uid671 + +dn: cn=user672,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user672 +sn: user672 +uid: uid672 +givenname: givenname672 +description: description672 +userPassword: password672 +mail: uid672 +uidnumber: 672 +gidnumber: 672 +homeDirectory: /home/uid672 + +dn: cn=user673,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user673 +sn: user673 +uid: uid673 +givenname: givenname673 +description: description673 +userPassword: password673 +mail: uid673 +uidnumber: 673 +gidnumber: 673 +homeDirectory: /home/uid673 + +dn: cn=user674,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user674 +sn: user674 +uid: uid674 +givenname: givenname674 +description: description674 +userPassword: password674 +mail: uid674 +uidnumber: 674 +gidnumber: 674 +homeDirectory: /home/uid674 + +dn: cn=user675,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user675 +sn: user675 +uid: uid675 +givenname: givenname675 +description: description675 +userPassword: password675 +mail: uid675 +uidnumber: 675 +gidnumber: 675 +homeDirectory: /home/uid675 + +dn: cn=user676,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user676 +sn: user676 +uid: uid676 +givenname: givenname676 +description: description676 +userPassword: password676 +mail: uid676 +uidnumber: 676 +gidnumber: 676 +homeDirectory: /home/uid676 + +dn: cn=user677,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user677 +sn: user677 +uid: uid677 +givenname: givenname677 +description: description677 +userPassword: password677 +mail: uid677 +uidnumber: 677 +gidnumber: 677 +homeDirectory: /home/uid677 + +dn: cn=user678,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user678 +sn: user678 +uid: uid678 +givenname: givenname678 +description: description678 +userPassword: password678 +mail: uid678 +uidnumber: 678 +gidnumber: 678 +homeDirectory: /home/uid678 + +dn: cn=user679,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user679 +sn: user679 +uid: uid679 +givenname: givenname679 +description: description679 +userPassword: password679 +mail: uid679 +uidnumber: 679 +gidnumber: 679 +homeDirectory: /home/uid679 + +dn: cn=user680,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user680 +sn: user680 +uid: uid680 +givenname: givenname680 +description: description680 +userPassword: password680 +mail: uid680 +uidnumber: 680 +gidnumber: 680 +homeDirectory: /home/uid680 + +dn: cn=user681,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user681 +sn: user681 +uid: uid681 +givenname: givenname681 +description: description681 +userPassword: password681 +mail: uid681 +uidnumber: 681 +gidnumber: 681 +homeDirectory: /home/uid681 + +dn: cn=user682,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user682 +sn: user682 +uid: uid682 +givenname: givenname682 +description: description682 +userPassword: password682 +mail: uid682 +uidnumber: 682 +gidnumber: 682 +homeDirectory: /home/uid682 + +dn: cn=user683,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user683 +sn: user683 +uid: uid683 +givenname: givenname683 +description: description683 +userPassword: password683 +mail: uid683 +uidnumber: 683 +gidnumber: 683 +homeDirectory: /home/uid683 + +dn: cn=user684,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user684 +sn: user684 +uid: uid684 +givenname: givenname684 +description: description684 +userPassword: password684 +mail: uid684 +uidnumber: 684 +gidnumber: 684 +homeDirectory: /home/uid684 + +dn: cn=user685,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user685 +sn: user685 +uid: uid685 +givenname: givenname685 +description: description685 +userPassword: password685 +mail: uid685 +uidnumber: 685 +gidnumber: 685 +homeDirectory: /home/uid685 + +dn: cn=user686,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user686 +sn: user686 +uid: uid686 +givenname: givenname686 +description: description686 +userPassword: password686 +mail: uid686 +uidnumber: 686 +gidnumber: 686 +homeDirectory: /home/uid686 + +dn: cn=user687,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user687 +sn: user687 +uid: uid687 +givenname: givenname687 +description: description687 +userPassword: password687 +mail: uid687 +uidnumber: 687 +gidnumber: 687 +homeDirectory: /home/uid687 + +dn: cn=user688,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user688 +sn: user688 +uid: uid688 +givenname: givenname688 +description: description688 +userPassword: password688 +mail: uid688 +uidnumber: 688 +gidnumber: 688 +homeDirectory: /home/uid688 + +dn: cn=user689,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user689 +sn: user689 +uid: uid689 +givenname: givenname689 +description: description689 +userPassword: password689 +mail: uid689 +uidnumber: 689 +gidnumber: 689 +homeDirectory: /home/uid689 + +dn: cn=user690,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user690 +sn: user690 +uid: uid690 +givenname: givenname690 +description: description690 +userPassword: password690 +mail: uid690 +uidnumber: 690 +gidnumber: 690 +homeDirectory: /home/uid690 + +dn: cn=user691,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user691 +sn: user691 +uid: uid691 +givenname: givenname691 +description: description691 +userPassword: password691 +mail: uid691 +uidnumber: 691 +gidnumber: 691 +homeDirectory: /home/uid691 + +dn: cn=user692,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user692 +sn: user692 +uid: uid692 +givenname: givenname692 +description: description692 +userPassword: password692 +mail: uid692 +uidnumber: 692 +gidnumber: 692 +homeDirectory: /home/uid692 + +dn: cn=user693,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user693 +sn: user693 +uid: uid693 +givenname: givenname693 +description: description693 +userPassword: password693 +mail: uid693 +uidnumber: 693 +gidnumber: 693 +homeDirectory: /home/uid693 + +dn: cn=user694,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user694 +sn: user694 +uid: uid694 +givenname: givenname694 +description: description694 +userPassword: password694 +mail: uid694 +uidnumber: 694 +gidnumber: 694 +homeDirectory: /home/uid694 + +dn: cn=user695,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user695 +sn: user695 +uid: uid695 +givenname: givenname695 +description: description695 +userPassword: password695 +mail: uid695 +uidnumber: 695 +gidnumber: 695 +homeDirectory: /home/uid695 + +dn: cn=user696,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user696 +sn: user696 +uid: uid696 +givenname: givenname696 +description: description696 +userPassword: password696 +mail: uid696 +uidnumber: 696 +gidnumber: 696 +homeDirectory: /home/uid696 + +dn: cn=user697,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user697 +sn: user697 +uid: uid697 +givenname: givenname697 +description: description697 +userPassword: password697 +mail: uid697 +uidnumber: 697 +gidnumber: 697 +homeDirectory: /home/uid697 + +dn: cn=user698,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user698 +sn: user698 +uid: uid698 +givenname: givenname698 +description: description698 +userPassword: password698 +mail: uid698 +uidnumber: 698 +gidnumber: 698 +homeDirectory: /home/uid698 + +dn: cn=user699,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user699 +sn: user699 +uid: uid699 +givenname: givenname699 +description: description699 +userPassword: password699 +mail: uid699 +uidnumber: 699 +gidnumber: 699 +homeDirectory: /home/uid699 + +dn: cn=user700,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user700 +sn: user700 +uid: uid700 +givenname: givenname700 +description: description700 +userPassword: password700 +mail: uid700 +uidnumber: 700 +gidnumber: 700 +homeDirectory: /home/uid700 + +dn: cn=user701,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user701 +sn: user701 +uid: uid701 +givenname: givenname701 +description: description701 +userPassword: password701 +mail: uid701 +uidnumber: 701 +gidnumber: 701 +homeDirectory: /home/uid701 + +dn: cn=user702,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user702 +sn: user702 +uid: uid702 +givenname: givenname702 +description: description702 +userPassword: password702 +mail: uid702 +uidnumber: 702 +gidnumber: 702 +homeDirectory: /home/uid702 + +dn: cn=user703,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user703 +sn: user703 +uid: uid703 +givenname: givenname703 +description: description703 +userPassword: password703 +mail: uid703 +uidnumber: 703 +gidnumber: 703 +homeDirectory: /home/uid703 + +dn: cn=user704,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user704 +sn: user704 +uid: uid704 +givenname: givenname704 +description: description704 +userPassword: password704 +mail: uid704 +uidnumber: 704 +gidnumber: 704 +homeDirectory: /home/uid704 + +dn: cn=user705,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user705 +sn: user705 +uid: uid705 +givenname: givenname705 +description: description705 +userPassword: password705 +mail: uid705 +uidnumber: 705 +gidnumber: 705 +homeDirectory: /home/uid705 + +dn: cn=user706,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user706 +sn: user706 +uid: uid706 +givenname: givenname706 +description: description706 +userPassword: password706 +mail: uid706 +uidnumber: 706 +gidnumber: 706 +homeDirectory: /home/uid706 + +dn: cn=user707,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user707 +sn: user707 +uid: uid707 +givenname: givenname707 +description: description707 +userPassword: password707 +mail: uid707 +uidnumber: 707 +gidnumber: 707 +homeDirectory: /home/uid707 + +dn: cn=user708,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user708 +sn: user708 +uid: uid708 +givenname: givenname708 +description: description708 +userPassword: password708 +mail: uid708 +uidnumber: 708 +gidnumber: 708 +homeDirectory: /home/uid708 + +dn: cn=user709,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user709 +sn: user709 +uid: uid709 +givenname: givenname709 +description: description709 +userPassword: password709 +mail: uid709 +uidnumber: 709 +gidnumber: 709 +homeDirectory: /home/uid709 + +dn: cn=user710,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user710 +sn: user710 +uid: uid710 +givenname: givenname710 +description: description710 +userPassword: password710 +mail: uid710 +uidnumber: 710 +gidnumber: 710 +homeDirectory: /home/uid710 + +dn: cn=user711,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user711 +sn: user711 +uid: uid711 +givenname: givenname711 +description: description711 +userPassword: password711 +mail: uid711 +uidnumber: 711 +gidnumber: 711 +homeDirectory: /home/uid711 + +dn: cn=user712,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user712 +sn: user712 +uid: uid712 +givenname: givenname712 +description: description712 +userPassword: password712 +mail: uid712 +uidnumber: 712 +gidnumber: 712 +homeDirectory: /home/uid712 + +dn: cn=user713,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user713 +sn: user713 +uid: uid713 +givenname: givenname713 +description: description713 +userPassword: password713 +mail: uid713 +uidnumber: 713 +gidnumber: 713 +homeDirectory: /home/uid713 + +dn: cn=user714,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user714 +sn: user714 +uid: uid714 +givenname: givenname714 +description: description714 +userPassword: password714 +mail: uid714 +uidnumber: 714 +gidnumber: 714 +homeDirectory: /home/uid714 + +dn: cn=user715,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user715 +sn: user715 +uid: uid715 +givenname: givenname715 +description: description715 +userPassword: password715 +mail: uid715 +uidnumber: 715 +gidnumber: 715 +homeDirectory: /home/uid715 + +dn: cn=user716,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user716 +sn: user716 +uid: uid716 +givenname: givenname716 +description: description716 +userPassword: password716 +mail: uid716 +uidnumber: 716 +gidnumber: 716 +homeDirectory: /home/uid716 + +dn: cn=user717,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user717 +sn: user717 +uid: uid717 +givenname: givenname717 +description: description717 +userPassword: password717 +mail: uid717 +uidnumber: 717 +gidnumber: 717 +homeDirectory: /home/uid717 + +dn: cn=user718,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user718 +sn: user718 +uid: uid718 +givenname: givenname718 +description: description718 +userPassword: password718 +mail: uid718 +uidnumber: 718 +gidnumber: 718 +homeDirectory: /home/uid718 + +dn: cn=user719,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user719 +sn: user719 +uid: uid719 +givenname: givenname719 +description: description719 +userPassword: password719 +mail: uid719 +uidnumber: 719 +gidnumber: 719 +homeDirectory: /home/uid719 + +dn: cn=user720,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user720 +sn: user720 +uid: uid720 +givenname: givenname720 +description: description720 +userPassword: password720 +mail: uid720 +uidnumber: 720 +gidnumber: 720 +homeDirectory: /home/uid720 + +dn: cn=user721,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user721 +sn: user721 +uid: uid721 +givenname: givenname721 +description: description721 +userPassword: password721 +mail: uid721 +uidnumber: 721 +gidnumber: 721 +homeDirectory: /home/uid721 + +dn: cn=user722,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user722 +sn: user722 +uid: uid722 +givenname: givenname722 +description: description722 +userPassword: password722 +mail: uid722 +uidnumber: 722 +gidnumber: 722 +homeDirectory: /home/uid722 + +dn: cn=user723,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user723 +sn: user723 +uid: uid723 +givenname: givenname723 +description: description723 +userPassword: password723 +mail: uid723 +uidnumber: 723 +gidnumber: 723 +homeDirectory: /home/uid723 + +dn: cn=user724,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user724 +sn: user724 +uid: uid724 +givenname: givenname724 +description: description724 +userPassword: password724 +mail: uid724 +uidnumber: 724 +gidnumber: 724 +homeDirectory: /home/uid724 + +dn: cn=user725,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user725 +sn: user725 +uid: uid725 +givenname: givenname725 +description: description725 +userPassword: password725 +mail: uid725 +uidnumber: 725 +gidnumber: 725 +homeDirectory: /home/uid725 + +dn: cn=user726,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user726 +sn: user726 +uid: uid726 +givenname: givenname726 +description: description726 +userPassword: password726 +mail: uid726 +uidnumber: 726 +gidnumber: 726 +homeDirectory: /home/uid726 + +dn: cn=user727,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user727 +sn: user727 +uid: uid727 +givenname: givenname727 +description: description727 +userPassword: password727 +mail: uid727 +uidnumber: 727 +gidnumber: 727 +homeDirectory: /home/uid727 + +dn: cn=user728,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user728 +sn: user728 +uid: uid728 +givenname: givenname728 +description: description728 +userPassword: password728 +mail: uid728 +uidnumber: 728 +gidnumber: 728 +homeDirectory: /home/uid728 + +dn: cn=user729,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user729 +sn: user729 +uid: uid729 +givenname: givenname729 +description: description729 +userPassword: password729 +mail: uid729 +uidnumber: 729 +gidnumber: 729 +homeDirectory: /home/uid729 + +dn: cn=user730,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user730 +sn: user730 +uid: uid730 +givenname: givenname730 +description: description730 +userPassword: password730 +mail: uid730 +uidnumber: 730 +gidnumber: 730 +homeDirectory: /home/uid730 + +dn: cn=user731,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user731 +sn: user731 +uid: uid731 +givenname: givenname731 +description: description731 +userPassword: password731 +mail: uid731 +uidnumber: 731 +gidnumber: 731 +homeDirectory: /home/uid731 + +dn: cn=user732,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user732 +sn: user732 +uid: uid732 +givenname: givenname732 +description: description732 +userPassword: password732 +mail: uid732 +uidnumber: 732 +gidnumber: 732 +homeDirectory: /home/uid732 + +dn: cn=user733,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user733 +sn: user733 +uid: uid733 +givenname: givenname733 +description: description733 +userPassword: password733 +mail: uid733 +uidnumber: 733 +gidnumber: 733 +homeDirectory: /home/uid733 + +dn: cn=user734,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user734 +sn: user734 +uid: uid734 +givenname: givenname734 +description: description734 +userPassword: password734 +mail: uid734 +uidnumber: 734 +gidnumber: 734 +homeDirectory: /home/uid734 + +dn: cn=user735,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user735 +sn: user735 +uid: uid735 +givenname: givenname735 +description: description735 +userPassword: password735 +mail: uid735 +uidnumber: 735 +gidnumber: 735 +homeDirectory: /home/uid735 + +dn: cn=user736,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user736 +sn: user736 +uid: uid736 +givenname: givenname736 +description: description736 +userPassword: password736 +mail: uid736 +uidnumber: 736 +gidnumber: 736 +homeDirectory: /home/uid736 + +dn: cn=user737,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user737 +sn: user737 +uid: uid737 +givenname: givenname737 +description: description737 +userPassword: password737 +mail: uid737 +uidnumber: 737 +gidnumber: 737 +homeDirectory: /home/uid737 + +dn: cn=user738,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user738 +sn: user738 +uid: uid738 +givenname: givenname738 +description: description738 +userPassword: password738 +mail: uid738 +uidnumber: 738 +gidnumber: 738 +homeDirectory: /home/uid738 + +dn: cn=user739,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user739 +sn: user739 +uid: uid739 +givenname: givenname739 +description: description739 +userPassword: password739 +mail: uid739 +uidnumber: 739 +gidnumber: 739 +homeDirectory: /home/uid739 + +dn: cn=user740,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user740 +sn: user740 +uid: uid740 +givenname: givenname740 +description: description740 +userPassword: password740 +mail: uid740 +uidnumber: 740 +gidnumber: 740 +homeDirectory: /home/uid740 + +dn: cn=user741,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user741 +sn: user741 +uid: uid741 +givenname: givenname741 +description: description741 +userPassword: password741 +mail: uid741 +uidnumber: 741 +gidnumber: 741 +homeDirectory: /home/uid741 + +dn: cn=user742,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user742 +sn: user742 +uid: uid742 +givenname: givenname742 +description: description742 +userPassword: password742 +mail: uid742 +uidnumber: 742 +gidnumber: 742 +homeDirectory: /home/uid742 + +dn: cn=user743,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user743 +sn: user743 +uid: uid743 +givenname: givenname743 +description: description743 +userPassword: password743 +mail: uid743 +uidnumber: 743 +gidnumber: 743 +homeDirectory: /home/uid743 + +dn: cn=user744,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user744 +sn: user744 +uid: uid744 +givenname: givenname744 +description: description744 +userPassword: password744 +mail: uid744 +uidnumber: 744 +gidnumber: 744 +homeDirectory: /home/uid744 + +dn: cn=user745,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user745 +sn: user745 +uid: uid745 +givenname: givenname745 +description: description745 +userPassword: password745 +mail: uid745 +uidnumber: 745 +gidnumber: 745 +homeDirectory: /home/uid745 + +dn: cn=user746,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user746 +sn: user746 +uid: uid746 +givenname: givenname746 +description: description746 +userPassword: password746 +mail: uid746 +uidnumber: 746 +gidnumber: 746 +homeDirectory: /home/uid746 + +dn: cn=user747,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user747 +sn: user747 +uid: uid747 +givenname: givenname747 +description: description747 +userPassword: password747 +mail: uid747 +uidnumber: 747 +gidnumber: 747 +homeDirectory: /home/uid747 + +dn: cn=user748,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user748 +sn: user748 +uid: uid748 +givenname: givenname748 +description: description748 +userPassword: password748 +mail: uid748 +uidnumber: 748 +gidnumber: 748 +homeDirectory: /home/uid748 + +dn: cn=user749,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user749 +sn: user749 +uid: uid749 +givenname: givenname749 +description: description749 +userPassword: password749 +mail: uid749 +uidnumber: 749 +gidnumber: 749 +homeDirectory: /home/uid749 + +dn: cn=user750,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user750 +sn: user750 +uid: uid750 +givenname: givenname750 +description: description750 +userPassword: password750 +mail: uid750 +uidnumber: 750 +gidnumber: 750 +homeDirectory: /home/uid750 + +dn: cn=user751,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user751 +sn: user751 +uid: uid751 +givenname: givenname751 +description: description751 +userPassword: password751 +mail: uid751 +uidnumber: 751 +gidnumber: 751 +homeDirectory: /home/uid751 + +dn: cn=user752,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user752 +sn: user752 +uid: uid752 +givenname: givenname752 +description: description752 +userPassword: password752 +mail: uid752 +uidnumber: 752 +gidnumber: 752 +homeDirectory: /home/uid752 + +dn: cn=user753,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user753 +sn: user753 +uid: uid753 +givenname: givenname753 +description: description753 +userPassword: password753 +mail: uid753 +uidnumber: 753 +gidnumber: 753 +homeDirectory: /home/uid753 + +dn: cn=user754,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user754 +sn: user754 +uid: uid754 +givenname: givenname754 +description: description754 +userPassword: password754 +mail: uid754 +uidnumber: 754 +gidnumber: 754 +homeDirectory: /home/uid754 + +dn: cn=user755,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user755 +sn: user755 +uid: uid755 +givenname: givenname755 +description: description755 +userPassword: password755 +mail: uid755 +uidnumber: 755 +gidnumber: 755 +homeDirectory: /home/uid755 + +dn: cn=user756,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user756 +sn: user756 +uid: uid756 +givenname: givenname756 +description: description756 +userPassword: password756 +mail: uid756 +uidnumber: 756 +gidnumber: 756 +homeDirectory: /home/uid756 + +dn: cn=user757,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user757 +sn: user757 +uid: uid757 +givenname: givenname757 +description: description757 +userPassword: password757 +mail: uid757 +uidnumber: 757 +gidnumber: 757 +homeDirectory: /home/uid757 + +dn: cn=user758,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user758 +sn: user758 +uid: uid758 +givenname: givenname758 +description: description758 +userPassword: password758 +mail: uid758 +uidnumber: 758 +gidnumber: 758 +homeDirectory: /home/uid758 + +dn: cn=user759,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user759 +sn: user759 +uid: uid759 +givenname: givenname759 +description: description759 +userPassword: password759 +mail: uid759 +uidnumber: 759 +gidnumber: 759 +homeDirectory: /home/uid759 + +dn: cn=user760,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user760 +sn: user760 +uid: uid760 +givenname: givenname760 +description: description760 +userPassword: password760 +mail: uid760 +uidnumber: 760 +gidnumber: 760 +homeDirectory: /home/uid760 + +dn: cn=user761,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user761 +sn: user761 +uid: uid761 +givenname: givenname761 +description: description761 +userPassword: password761 +mail: uid761 +uidnumber: 761 +gidnumber: 761 +homeDirectory: /home/uid761 + +dn: cn=user762,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user762 +sn: user762 +uid: uid762 +givenname: givenname762 +description: description762 +userPassword: password762 +mail: uid762 +uidnumber: 762 +gidnumber: 762 +homeDirectory: /home/uid762 + +dn: cn=user763,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user763 +sn: user763 +uid: uid763 +givenname: givenname763 +description: description763 +userPassword: password763 +mail: uid763 +uidnumber: 763 +gidnumber: 763 +homeDirectory: /home/uid763 + +dn: cn=user764,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user764 +sn: user764 +uid: uid764 +givenname: givenname764 +description: description764 +userPassword: password764 +mail: uid764 +uidnumber: 764 +gidnumber: 764 +homeDirectory: /home/uid764 + +dn: cn=user765,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user765 +sn: user765 +uid: uid765 +givenname: givenname765 +description: description765 +userPassword: password765 +mail: uid765 +uidnumber: 765 +gidnumber: 765 +homeDirectory: /home/uid765 + +dn: cn=user766,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user766 +sn: user766 +uid: uid766 +givenname: givenname766 +description: description766 +userPassword: password766 +mail: uid766 +uidnumber: 766 +gidnumber: 766 +homeDirectory: /home/uid766 + +dn: cn=user767,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user767 +sn: user767 +uid: uid767 +givenname: givenname767 +description: description767 +userPassword: password767 +mail: uid767 +uidnumber: 767 +gidnumber: 767 +homeDirectory: /home/uid767 + +dn: cn=user768,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user768 +sn: user768 +uid: uid768 +givenname: givenname768 +description: description768 +userPassword: password768 +mail: uid768 +uidnumber: 768 +gidnumber: 768 +homeDirectory: /home/uid768 + +dn: cn=user769,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user769 +sn: user769 +uid: uid769 +givenname: givenname769 +description: description769 +userPassword: password769 +mail: uid769 +uidnumber: 769 +gidnumber: 769 +homeDirectory: /home/uid769 + +dn: cn=user770,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user770 +sn: user770 +uid: uid770 +givenname: givenname770 +description: description770 +userPassword: password770 +mail: uid770 +uidnumber: 770 +gidnumber: 770 +homeDirectory: /home/uid770 + +dn: cn=user771,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user771 +sn: user771 +uid: uid771 +givenname: givenname771 +description: description771 +userPassword: password771 +mail: uid771 +uidnumber: 771 +gidnumber: 771 +homeDirectory: /home/uid771 + +dn: cn=user772,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user772 +sn: user772 +uid: uid772 +givenname: givenname772 +description: description772 +userPassword: password772 +mail: uid772 +uidnumber: 772 +gidnumber: 772 +homeDirectory: /home/uid772 + +dn: cn=user773,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user773 +sn: user773 +uid: uid773 +givenname: givenname773 +description: description773 +userPassword: password773 +mail: uid773 +uidnumber: 773 +gidnumber: 773 +homeDirectory: /home/uid773 + +dn: cn=user774,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user774 +sn: user774 +uid: uid774 +givenname: givenname774 +description: description774 +userPassword: password774 +mail: uid774 +uidnumber: 774 +gidnumber: 774 +homeDirectory: /home/uid774 + +dn: cn=user775,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user775 +sn: user775 +uid: uid775 +givenname: givenname775 +description: description775 +userPassword: password775 +mail: uid775 +uidnumber: 775 +gidnumber: 775 +homeDirectory: /home/uid775 + +dn: cn=user776,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user776 +sn: user776 +uid: uid776 +givenname: givenname776 +description: description776 +userPassword: password776 +mail: uid776 +uidnumber: 776 +gidnumber: 776 +homeDirectory: /home/uid776 + +dn: cn=user777,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user777 +sn: user777 +uid: uid777 +givenname: givenname777 +description: description777 +userPassword: password777 +mail: uid777 +uidnumber: 777 +gidnumber: 777 +homeDirectory: /home/uid777 + +dn: cn=user778,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user778 +sn: user778 +uid: uid778 +givenname: givenname778 +description: description778 +userPassword: password778 +mail: uid778 +uidnumber: 778 +gidnumber: 778 +homeDirectory: /home/uid778 + +dn: cn=user779,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user779 +sn: user779 +uid: uid779 +givenname: givenname779 +description: description779 +userPassword: password779 +mail: uid779 +uidnumber: 779 +gidnumber: 779 +homeDirectory: /home/uid779 + +dn: cn=user780,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user780 +sn: user780 +uid: uid780 +givenname: givenname780 +description: description780 +userPassword: password780 +mail: uid780 +uidnumber: 780 +gidnumber: 780 +homeDirectory: /home/uid780 + +dn: cn=user781,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user781 +sn: user781 +uid: uid781 +givenname: givenname781 +description: description781 +userPassword: password781 +mail: uid781 +uidnumber: 781 +gidnumber: 781 +homeDirectory: /home/uid781 + +dn: cn=user782,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user782 +sn: user782 +uid: uid782 +givenname: givenname782 +description: description782 +userPassword: password782 +mail: uid782 +uidnumber: 782 +gidnumber: 782 +homeDirectory: /home/uid782 + +dn: cn=user783,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user783 +sn: user783 +uid: uid783 +givenname: givenname783 +description: description783 +userPassword: password783 +mail: uid783 +uidnumber: 783 +gidnumber: 783 +homeDirectory: /home/uid783 + +dn: cn=user784,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user784 +sn: user784 +uid: uid784 +givenname: givenname784 +description: description784 +userPassword: password784 +mail: uid784 +uidnumber: 784 +gidnumber: 784 +homeDirectory: /home/uid784 + +dn: cn=user785,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user785 +sn: user785 +uid: uid785 +givenname: givenname785 +description: description785 +userPassword: password785 +mail: uid785 +uidnumber: 785 +gidnumber: 785 +homeDirectory: /home/uid785 + +dn: cn=user786,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user786 +sn: user786 +uid: uid786 +givenname: givenname786 +description: description786 +userPassword: password786 +mail: uid786 +uidnumber: 786 +gidnumber: 786 +homeDirectory: /home/uid786 + +dn: cn=user787,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user787 +sn: user787 +uid: uid787 +givenname: givenname787 +description: description787 +userPassword: password787 +mail: uid787 +uidnumber: 787 +gidnumber: 787 +homeDirectory: /home/uid787 + +dn: cn=user788,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user788 +sn: user788 +uid: uid788 +givenname: givenname788 +description: description788 +userPassword: password788 +mail: uid788 +uidnumber: 788 +gidnumber: 788 +homeDirectory: /home/uid788 + +dn: cn=user789,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user789 +sn: user789 +uid: uid789 +givenname: givenname789 +description: description789 +userPassword: password789 +mail: uid789 +uidnumber: 789 +gidnumber: 789 +homeDirectory: /home/uid789 + +dn: cn=user790,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user790 +sn: user790 +uid: uid790 +givenname: givenname790 +description: description790 +userPassword: password790 +mail: uid790 +uidnumber: 790 +gidnumber: 790 +homeDirectory: /home/uid790 + +dn: cn=user791,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user791 +sn: user791 +uid: uid791 +givenname: givenname791 +description: description791 +userPassword: password791 +mail: uid791 +uidnumber: 791 +gidnumber: 791 +homeDirectory: /home/uid791 + +dn: cn=user792,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user792 +sn: user792 +uid: uid792 +givenname: givenname792 +description: description792 +userPassword: password792 +mail: uid792 +uidnumber: 792 +gidnumber: 792 +homeDirectory: /home/uid792 + +dn: cn=user793,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user793 +sn: user793 +uid: uid793 +givenname: givenname793 +description: description793 +userPassword: password793 +mail: uid793 +uidnumber: 793 +gidnumber: 793 +homeDirectory: /home/uid793 + +dn: cn=user794,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user794 +sn: user794 +uid: uid794 +givenname: givenname794 +description: description794 +userPassword: password794 +mail: uid794 +uidnumber: 794 +gidnumber: 794 +homeDirectory: /home/uid794 + +dn: cn=user795,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user795 +sn: user795 +uid: uid795 +givenname: givenname795 +description: description795 +userPassword: password795 +mail: uid795 +uidnumber: 795 +gidnumber: 795 +homeDirectory: /home/uid795 + +dn: cn=user796,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user796 +sn: user796 +uid: uid796 +givenname: givenname796 +description: description796 +userPassword: password796 +mail: uid796 +uidnumber: 796 +gidnumber: 796 +homeDirectory: /home/uid796 + +dn: cn=user797,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user797 +sn: user797 +uid: uid797 +givenname: givenname797 +description: description797 +userPassword: password797 +mail: uid797 +uidnumber: 797 +gidnumber: 797 +homeDirectory: /home/uid797 + +dn: cn=user798,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user798 +sn: user798 +uid: uid798 +givenname: givenname798 +description: description798 +userPassword: password798 +mail: uid798 +uidnumber: 798 +gidnumber: 798 +homeDirectory: /home/uid798 + +dn: cn=user799,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user799 +sn: user799 +uid: uid799 +givenname: givenname799 +description: description799 +userPassword: password799 +mail: uid799 +uidnumber: 799 +gidnumber: 799 +homeDirectory: /home/uid799 + +dn: cn=user800,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user800 +sn: user800 +uid: uid800 +givenname: givenname800 +description: description800 +userPassword: password800 +mail: uid800 +uidnumber: 800 +gidnumber: 800 +homeDirectory: /home/uid800 + +dn: cn=user801,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user801 +sn: user801 +uid: uid801 +givenname: givenname801 +description: description801 +userPassword: password801 +mail: uid801 +uidnumber: 801 +gidnumber: 801 +homeDirectory: /home/uid801 + +dn: cn=user802,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user802 +sn: user802 +uid: uid802 +givenname: givenname802 +description: description802 +userPassword: password802 +mail: uid802 +uidnumber: 802 +gidnumber: 802 +homeDirectory: /home/uid802 + +dn: cn=user803,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user803 +sn: user803 +uid: uid803 +givenname: givenname803 +description: description803 +userPassword: password803 +mail: uid803 +uidnumber: 803 +gidnumber: 803 +homeDirectory: /home/uid803 + +dn: cn=user804,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user804 +sn: user804 +uid: uid804 +givenname: givenname804 +description: description804 +userPassword: password804 +mail: uid804 +uidnumber: 804 +gidnumber: 804 +homeDirectory: /home/uid804 + +dn: cn=user805,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user805 +sn: user805 +uid: uid805 +givenname: givenname805 +description: description805 +userPassword: password805 +mail: uid805 +uidnumber: 805 +gidnumber: 805 +homeDirectory: /home/uid805 + +dn: cn=user806,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user806 +sn: user806 +uid: uid806 +givenname: givenname806 +description: description806 +userPassword: password806 +mail: uid806 +uidnumber: 806 +gidnumber: 806 +homeDirectory: /home/uid806 + +dn: cn=user807,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user807 +sn: user807 +uid: uid807 +givenname: givenname807 +description: description807 +userPassword: password807 +mail: uid807 +uidnumber: 807 +gidnumber: 807 +homeDirectory: /home/uid807 + +dn: cn=user808,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user808 +sn: user808 +uid: uid808 +givenname: givenname808 +description: description808 +userPassword: password808 +mail: uid808 +uidnumber: 808 +gidnumber: 808 +homeDirectory: /home/uid808 + +dn: cn=user809,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user809 +sn: user809 +uid: uid809 +givenname: givenname809 +description: description809 +userPassword: password809 +mail: uid809 +uidnumber: 809 +gidnumber: 809 +homeDirectory: /home/uid809 + +dn: cn=user810,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user810 +sn: user810 +uid: uid810 +givenname: givenname810 +description: description810 +userPassword: password810 +mail: uid810 +uidnumber: 810 +gidnumber: 810 +homeDirectory: /home/uid810 + +dn: cn=user811,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user811 +sn: user811 +uid: uid811 +givenname: givenname811 +description: description811 +userPassword: password811 +mail: uid811 +uidnumber: 811 +gidnumber: 811 +homeDirectory: /home/uid811 + +dn: cn=user812,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user812 +sn: user812 +uid: uid812 +givenname: givenname812 +description: description812 +userPassword: password812 +mail: uid812 +uidnumber: 812 +gidnumber: 812 +homeDirectory: /home/uid812 + +dn: cn=user813,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user813 +sn: user813 +uid: uid813 +givenname: givenname813 +description: description813 +userPassword: password813 +mail: uid813 +uidnumber: 813 +gidnumber: 813 +homeDirectory: /home/uid813 + +dn: cn=user814,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user814 +sn: user814 +uid: uid814 +givenname: givenname814 +description: description814 +userPassword: password814 +mail: uid814 +uidnumber: 814 +gidnumber: 814 +homeDirectory: /home/uid814 + +dn: cn=user815,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user815 +sn: user815 +uid: uid815 +givenname: givenname815 +description: description815 +userPassword: password815 +mail: uid815 +uidnumber: 815 +gidnumber: 815 +homeDirectory: /home/uid815 + +dn: cn=user816,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user816 +sn: user816 +uid: uid816 +givenname: givenname816 +description: description816 +userPassword: password816 +mail: uid816 +uidnumber: 816 +gidnumber: 816 +homeDirectory: /home/uid816 + +dn: cn=user817,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user817 +sn: user817 +uid: uid817 +givenname: givenname817 +description: description817 +userPassword: password817 +mail: uid817 +uidnumber: 817 +gidnumber: 817 +homeDirectory: /home/uid817 + +dn: cn=user818,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user818 +sn: user818 +uid: uid818 +givenname: givenname818 +description: description818 +userPassword: password818 +mail: uid818 +uidnumber: 818 +gidnumber: 818 +homeDirectory: /home/uid818 + +dn: cn=user819,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user819 +sn: user819 +uid: uid819 +givenname: givenname819 +description: description819 +userPassword: password819 +mail: uid819 +uidnumber: 819 +gidnumber: 819 +homeDirectory: /home/uid819 + +dn: cn=user820,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user820 +sn: user820 +uid: uid820 +givenname: givenname820 +description: description820 +userPassword: password820 +mail: uid820 +uidnumber: 820 +gidnumber: 820 +homeDirectory: /home/uid820 + +dn: cn=user821,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user821 +sn: user821 +uid: uid821 +givenname: givenname821 +description: description821 +userPassword: password821 +mail: uid821 +uidnumber: 821 +gidnumber: 821 +homeDirectory: /home/uid821 + +dn: cn=user822,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user822 +sn: user822 +uid: uid822 +givenname: givenname822 +description: description822 +userPassword: password822 +mail: uid822 +uidnumber: 822 +gidnumber: 822 +homeDirectory: /home/uid822 + +dn: cn=user823,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user823 +sn: user823 +uid: uid823 +givenname: givenname823 +description: description823 +userPassword: password823 +mail: uid823 +uidnumber: 823 +gidnumber: 823 +homeDirectory: /home/uid823 + +dn: cn=user824,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user824 +sn: user824 +uid: uid824 +givenname: givenname824 +description: description824 +userPassword: password824 +mail: uid824 +uidnumber: 824 +gidnumber: 824 +homeDirectory: /home/uid824 + +dn: cn=user825,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user825 +sn: user825 +uid: uid825 +givenname: givenname825 +description: description825 +userPassword: password825 +mail: uid825 +uidnumber: 825 +gidnumber: 825 +homeDirectory: /home/uid825 + +dn: cn=user826,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user826 +sn: user826 +uid: uid826 +givenname: givenname826 +description: description826 +userPassword: password826 +mail: uid826 +uidnumber: 826 +gidnumber: 826 +homeDirectory: /home/uid826 + +dn: cn=user827,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user827 +sn: user827 +uid: uid827 +givenname: givenname827 +description: description827 +userPassword: password827 +mail: uid827 +uidnumber: 827 +gidnumber: 827 +homeDirectory: /home/uid827 + +dn: cn=user828,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user828 +sn: user828 +uid: uid828 +givenname: givenname828 +description: description828 +userPassword: password828 +mail: uid828 +uidnumber: 828 +gidnumber: 828 +homeDirectory: /home/uid828 + +dn: cn=user829,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user829 +sn: user829 +uid: uid829 +givenname: givenname829 +description: description829 +userPassword: password829 +mail: uid829 +uidnumber: 829 +gidnumber: 829 +homeDirectory: /home/uid829 + +dn: cn=user830,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user830 +sn: user830 +uid: uid830 +givenname: givenname830 +description: description830 +userPassword: password830 +mail: uid830 +uidnumber: 830 +gidnumber: 830 +homeDirectory: /home/uid830 + +dn: cn=user831,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user831 +sn: user831 +uid: uid831 +givenname: givenname831 +description: description831 +userPassword: password831 +mail: uid831 +uidnumber: 831 +gidnumber: 831 +homeDirectory: /home/uid831 + +dn: cn=user832,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user832 +sn: user832 +uid: uid832 +givenname: givenname832 +description: description832 +userPassword: password832 +mail: uid832 +uidnumber: 832 +gidnumber: 832 +homeDirectory: /home/uid832 + +dn: cn=user833,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user833 +sn: user833 +uid: uid833 +givenname: givenname833 +description: description833 +userPassword: password833 +mail: uid833 +uidnumber: 833 +gidnumber: 833 +homeDirectory: /home/uid833 + +dn: cn=user834,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user834 +sn: user834 +uid: uid834 +givenname: givenname834 +description: description834 +userPassword: password834 +mail: uid834 +uidnumber: 834 +gidnumber: 834 +homeDirectory: /home/uid834 + +dn: cn=user835,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user835 +sn: user835 +uid: uid835 +givenname: givenname835 +description: description835 +userPassword: password835 +mail: uid835 +uidnumber: 835 +gidnumber: 835 +homeDirectory: /home/uid835 + +dn: cn=user836,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user836 +sn: user836 +uid: uid836 +givenname: givenname836 +description: description836 +userPassword: password836 +mail: uid836 +uidnumber: 836 +gidnumber: 836 +homeDirectory: /home/uid836 + +dn: cn=user837,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user837 +sn: user837 +uid: uid837 +givenname: givenname837 +description: description837 +userPassword: password837 +mail: uid837 +uidnumber: 837 +gidnumber: 837 +homeDirectory: /home/uid837 + +dn: cn=user838,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user838 +sn: user838 +uid: uid838 +givenname: givenname838 +description: description838 +userPassword: password838 +mail: uid838 +uidnumber: 838 +gidnumber: 838 +homeDirectory: /home/uid838 + +dn: cn=user839,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user839 +sn: user839 +uid: uid839 +givenname: givenname839 +description: description839 +userPassword: password839 +mail: uid839 +uidnumber: 839 +gidnumber: 839 +homeDirectory: /home/uid839 + +dn: cn=user840,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user840 +sn: user840 +uid: uid840 +givenname: givenname840 +description: description840 +userPassword: password840 +mail: uid840 +uidnumber: 840 +gidnumber: 840 +homeDirectory: /home/uid840 + +dn: cn=user841,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user841 +sn: user841 +uid: uid841 +givenname: givenname841 +description: description841 +userPassword: password841 +mail: uid841 +uidnumber: 841 +gidnumber: 841 +homeDirectory: /home/uid841 + +dn: cn=user842,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user842 +sn: user842 +uid: uid842 +givenname: givenname842 +description: description842 +userPassword: password842 +mail: uid842 +uidnumber: 842 +gidnumber: 842 +homeDirectory: /home/uid842 + +dn: cn=user843,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user843 +sn: user843 +uid: uid843 +givenname: givenname843 +description: description843 +userPassword: password843 +mail: uid843 +uidnumber: 843 +gidnumber: 843 +homeDirectory: /home/uid843 + +dn: cn=user844,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user844 +sn: user844 +uid: uid844 +givenname: givenname844 +description: description844 +userPassword: password844 +mail: uid844 +uidnumber: 844 +gidnumber: 844 +homeDirectory: /home/uid844 + +dn: cn=user845,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user845 +sn: user845 +uid: uid845 +givenname: givenname845 +description: description845 +userPassword: password845 +mail: uid845 +uidnumber: 845 +gidnumber: 845 +homeDirectory: /home/uid845 + +dn: cn=user846,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user846 +sn: user846 +uid: uid846 +givenname: givenname846 +description: description846 +userPassword: password846 +mail: uid846 +uidnumber: 846 +gidnumber: 846 +homeDirectory: /home/uid846 + +dn: cn=user847,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user847 +sn: user847 +uid: uid847 +givenname: givenname847 +description: description847 +userPassword: password847 +mail: uid847 +uidnumber: 847 +gidnumber: 847 +homeDirectory: /home/uid847 + +dn: cn=user848,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user848 +sn: user848 +uid: uid848 +givenname: givenname848 +description: description848 +userPassword: password848 +mail: uid848 +uidnumber: 848 +gidnumber: 848 +homeDirectory: /home/uid848 + +dn: cn=user849,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user849 +sn: user849 +uid: uid849 +givenname: givenname849 +description: description849 +userPassword: password849 +mail: uid849 +uidnumber: 849 +gidnumber: 849 +homeDirectory: /home/uid849 + +dn: cn=user850,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user850 +sn: user850 +uid: uid850 +givenname: givenname850 +description: description850 +userPassword: password850 +mail: uid850 +uidnumber: 850 +gidnumber: 850 +homeDirectory: /home/uid850 + +dn: cn=user851,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user851 +sn: user851 +uid: uid851 +givenname: givenname851 +description: description851 +userPassword: password851 +mail: uid851 +uidnumber: 851 +gidnumber: 851 +homeDirectory: /home/uid851 + +dn: cn=user852,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user852 +sn: user852 +uid: uid852 +givenname: givenname852 +description: description852 +userPassword: password852 +mail: uid852 +uidnumber: 852 +gidnumber: 852 +homeDirectory: /home/uid852 + +dn: cn=user853,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user853 +sn: user853 +uid: uid853 +givenname: givenname853 +description: description853 +userPassword: password853 +mail: uid853 +uidnumber: 853 +gidnumber: 853 +homeDirectory: /home/uid853 + +dn: cn=user854,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user854 +sn: user854 +uid: uid854 +givenname: givenname854 +description: description854 +userPassword: password854 +mail: uid854 +uidnumber: 854 +gidnumber: 854 +homeDirectory: /home/uid854 + +dn: cn=user855,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user855 +sn: user855 +uid: uid855 +givenname: givenname855 +description: description855 +userPassword: password855 +mail: uid855 +uidnumber: 855 +gidnumber: 855 +homeDirectory: /home/uid855 + +dn: cn=user856,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user856 +sn: user856 +uid: uid856 +givenname: givenname856 +description: description856 +userPassword: password856 +mail: uid856 +uidnumber: 856 +gidnumber: 856 +homeDirectory: /home/uid856 + +dn: cn=user857,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user857 +sn: user857 +uid: uid857 +givenname: givenname857 +description: description857 +userPassword: password857 +mail: uid857 +uidnumber: 857 +gidnumber: 857 +homeDirectory: /home/uid857 + +dn: cn=user858,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user858 +sn: user858 +uid: uid858 +givenname: givenname858 +description: description858 +userPassword: password858 +mail: uid858 +uidnumber: 858 +gidnumber: 858 +homeDirectory: /home/uid858 + +dn: cn=user859,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user859 +sn: user859 +uid: uid859 +givenname: givenname859 +description: description859 +userPassword: password859 +mail: uid859 +uidnumber: 859 +gidnumber: 859 +homeDirectory: /home/uid859 + +dn: cn=user860,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user860 +sn: user860 +uid: uid860 +givenname: givenname860 +description: description860 +userPassword: password860 +mail: uid860 +uidnumber: 860 +gidnumber: 860 +homeDirectory: /home/uid860 + +dn: cn=user861,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user861 +sn: user861 +uid: uid861 +givenname: givenname861 +description: description861 +userPassword: password861 +mail: uid861 +uidnumber: 861 +gidnumber: 861 +homeDirectory: /home/uid861 + +dn: cn=user862,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user862 +sn: user862 +uid: uid862 +givenname: givenname862 +description: description862 +userPassword: password862 +mail: uid862 +uidnumber: 862 +gidnumber: 862 +homeDirectory: /home/uid862 + +dn: cn=user863,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user863 +sn: user863 +uid: uid863 +givenname: givenname863 +description: description863 +userPassword: password863 +mail: uid863 +uidnumber: 863 +gidnumber: 863 +homeDirectory: /home/uid863 + +dn: cn=user864,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user864 +sn: user864 +uid: uid864 +givenname: givenname864 +description: description864 +userPassword: password864 +mail: uid864 +uidnumber: 864 +gidnumber: 864 +homeDirectory: /home/uid864 + +dn: cn=user865,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user865 +sn: user865 +uid: uid865 +givenname: givenname865 +description: description865 +userPassword: password865 +mail: uid865 +uidnumber: 865 +gidnumber: 865 +homeDirectory: /home/uid865 + +dn: cn=user866,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user866 +sn: user866 +uid: uid866 +givenname: givenname866 +description: description866 +userPassword: password866 +mail: uid866 +uidnumber: 866 +gidnumber: 866 +homeDirectory: /home/uid866 + +dn: cn=user867,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user867 +sn: user867 +uid: uid867 +givenname: givenname867 +description: description867 +userPassword: password867 +mail: uid867 +uidnumber: 867 +gidnumber: 867 +homeDirectory: /home/uid867 + +dn: cn=user868,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user868 +sn: user868 +uid: uid868 +givenname: givenname868 +description: description868 +userPassword: password868 +mail: uid868 +uidnumber: 868 +gidnumber: 868 +homeDirectory: /home/uid868 + +dn: cn=user869,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user869 +sn: user869 +uid: uid869 +givenname: givenname869 +description: description869 +userPassword: password869 +mail: uid869 +uidnumber: 869 +gidnumber: 869 +homeDirectory: /home/uid869 + +dn: cn=user870,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user870 +sn: user870 +uid: uid870 +givenname: givenname870 +description: description870 +userPassword: password870 +mail: uid870 +uidnumber: 870 +gidnumber: 870 +homeDirectory: /home/uid870 + +dn: cn=user871,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user871 +sn: user871 +uid: uid871 +givenname: givenname871 +description: description871 +userPassword: password871 +mail: uid871 +uidnumber: 871 +gidnumber: 871 +homeDirectory: /home/uid871 + +dn: cn=user872,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user872 +sn: user872 +uid: uid872 +givenname: givenname872 +description: description872 +userPassword: password872 +mail: uid872 +uidnumber: 872 +gidnumber: 872 +homeDirectory: /home/uid872 + +dn: cn=user873,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user873 +sn: user873 +uid: uid873 +givenname: givenname873 +description: description873 +userPassword: password873 +mail: uid873 +uidnumber: 873 +gidnumber: 873 +homeDirectory: /home/uid873 + +dn: cn=user874,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user874 +sn: user874 +uid: uid874 +givenname: givenname874 +description: description874 +userPassword: password874 +mail: uid874 +uidnumber: 874 +gidnumber: 874 +homeDirectory: /home/uid874 + +dn: cn=user875,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user875 +sn: user875 +uid: uid875 +givenname: givenname875 +description: description875 +userPassword: password875 +mail: uid875 +uidnumber: 875 +gidnumber: 875 +homeDirectory: /home/uid875 + +dn: cn=user876,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user876 +sn: user876 +uid: uid876 +givenname: givenname876 +description: description876 +userPassword: password876 +mail: uid876 +uidnumber: 876 +gidnumber: 876 +homeDirectory: /home/uid876 + +dn: cn=user877,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user877 +sn: user877 +uid: uid877 +givenname: givenname877 +description: description877 +userPassword: password877 +mail: uid877 +uidnumber: 877 +gidnumber: 877 +homeDirectory: /home/uid877 + +dn: cn=user878,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user878 +sn: user878 +uid: uid878 +givenname: givenname878 +description: description878 +userPassword: password878 +mail: uid878 +uidnumber: 878 +gidnumber: 878 +homeDirectory: /home/uid878 + +dn: cn=user879,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user879 +sn: user879 +uid: uid879 +givenname: givenname879 +description: description879 +userPassword: password879 +mail: uid879 +uidnumber: 879 +gidnumber: 879 +homeDirectory: /home/uid879 + +dn: cn=user880,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user880 +sn: user880 +uid: uid880 +givenname: givenname880 +description: description880 +userPassword: password880 +mail: uid880 +uidnumber: 880 +gidnumber: 880 +homeDirectory: /home/uid880 + +dn: cn=user881,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user881 +sn: user881 +uid: uid881 +givenname: givenname881 +description: description881 +userPassword: password881 +mail: uid881 +uidnumber: 881 +gidnumber: 881 +homeDirectory: /home/uid881 + +dn: cn=user882,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user882 +sn: user882 +uid: uid882 +givenname: givenname882 +description: description882 +userPassword: password882 +mail: uid882 +uidnumber: 882 +gidnumber: 882 +homeDirectory: /home/uid882 + +dn: cn=user883,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user883 +sn: user883 +uid: uid883 +givenname: givenname883 +description: description883 +userPassword: password883 +mail: uid883 +uidnumber: 883 +gidnumber: 883 +homeDirectory: /home/uid883 + +dn: cn=user884,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user884 +sn: user884 +uid: uid884 +givenname: givenname884 +description: description884 +userPassword: password884 +mail: uid884 +uidnumber: 884 +gidnumber: 884 +homeDirectory: /home/uid884 + +dn: cn=user885,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user885 +sn: user885 +uid: uid885 +givenname: givenname885 +description: description885 +userPassword: password885 +mail: uid885 +uidnumber: 885 +gidnumber: 885 +homeDirectory: /home/uid885 + +dn: cn=user886,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user886 +sn: user886 +uid: uid886 +givenname: givenname886 +description: description886 +userPassword: password886 +mail: uid886 +uidnumber: 886 +gidnumber: 886 +homeDirectory: /home/uid886 + +dn: cn=user887,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user887 +sn: user887 +uid: uid887 +givenname: givenname887 +description: description887 +userPassword: password887 +mail: uid887 +uidnumber: 887 +gidnumber: 887 +homeDirectory: /home/uid887 + +dn: cn=user888,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user888 +sn: user888 +uid: uid888 +givenname: givenname888 +description: description888 +userPassword: password888 +mail: uid888 +uidnumber: 888 +gidnumber: 888 +homeDirectory: /home/uid888 + +dn: cn=user889,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user889 +sn: user889 +uid: uid889 +givenname: givenname889 +description: description889 +userPassword: password889 +mail: uid889 +uidnumber: 889 +gidnumber: 889 +homeDirectory: /home/uid889 + +dn: cn=user890,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user890 +sn: user890 +uid: uid890 +givenname: givenname890 +description: description890 +userPassword: password890 +mail: uid890 +uidnumber: 890 +gidnumber: 890 +homeDirectory: /home/uid890 + +dn: cn=user891,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user891 +sn: user891 +uid: uid891 +givenname: givenname891 +description: description891 +userPassword: password891 +mail: uid891 +uidnumber: 891 +gidnumber: 891 +homeDirectory: /home/uid891 + +dn: cn=user892,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user892 +sn: user892 +uid: uid892 +givenname: givenname892 +description: description892 +userPassword: password892 +mail: uid892 +uidnumber: 892 +gidnumber: 892 +homeDirectory: /home/uid892 + +dn: cn=user893,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user893 +sn: user893 +uid: uid893 +givenname: givenname893 +description: description893 +userPassword: password893 +mail: uid893 +uidnumber: 893 +gidnumber: 893 +homeDirectory: /home/uid893 + +dn: cn=user894,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user894 +sn: user894 +uid: uid894 +givenname: givenname894 +description: description894 +userPassword: password894 +mail: uid894 +uidnumber: 894 +gidnumber: 894 +homeDirectory: /home/uid894 + +dn: cn=user895,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user895 +sn: user895 +uid: uid895 +givenname: givenname895 +description: description895 +userPassword: password895 +mail: uid895 +uidnumber: 895 +gidnumber: 895 +homeDirectory: /home/uid895 + +dn: cn=user896,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user896 +sn: user896 +uid: uid896 +givenname: givenname896 +description: description896 +userPassword: password896 +mail: uid896 +uidnumber: 896 +gidnumber: 896 +homeDirectory: /home/uid896 + +dn: cn=user897,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user897 +sn: user897 +uid: uid897 +givenname: givenname897 +description: description897 +userPassword: password897 +mail: uid897 +uidnumber: 897 +gidnumber: 897 +homeDirectory: /home/uid897 + +dn: cn=user898,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user898 +sn: user898 +uid: uid898 +givenname: givenname898 +description: description898 +userPassword: password898 +mail: uid898 +uidnumber: 898 +gidnumber: 898 +homeDirectory: /home/uid898 + +dn: cn=user899,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user899 +sn: user899 +uid: uid899 +givenname: givenname899 +description: description899 +userPassword: password899 +mail: uid899 +uidnumber: 899 +gidnumber: 899 +homeDirectory: /home/uid899 + +dn: cn=user900,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user900 +sn: user900 +uid: uid900 +givenname: givenname900 +description: description900 +userPassword: password900 +mail: uid900 +uidnumber: 900 +gidnumber: 900 +homeDirectory: /home/uid900 + +dn: cn=user901,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user901 +sn: user901 +uid: uid901 +givenname: givenname901 +description: description901 +userPassword: password901 +mail: uid901 +uidnumber: 901 +gidnumber: 901 +homeDirectory: /home/uid901 + +dn: cn=user902,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user902 +sn: user902 +uid: uid902 +givenname: givenname902 +description: description902 +userPassword: password902 +mail: uid902 +uidnumber: 902 +gidnumber: 902 +homeDirectory: /home/uid902 + +dn: cn=user903,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user903 +sn: user903 +uid: uid903 +givenname: givenname903 +description: description903 +userPassword: password903 +mail: uid903 +uidnumber: 903 +gidnumber: 903 +homeDirectory: /home/uid903 + +dn: cn=user904,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user904 +sn: user904 +uid: uid904 +givenname: givenname904 +description: description904 +userPassword: password904 +mail: uid904 +uidnumber: 904 +gidnumber: 904 +homeDirectory: /home/uid904 + +dn: cn=user905,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user905 +sn: user905 +uid: uid905 +givenname: givenname905 +description: description905 +userPassword: password905 +mail: uid905 +uidnumber: 905 +gidnumber: 905 +homeDirectory: /home/uid905 + +dn: cn=user906,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user906 +sn: user906 +uid: uid906 +givenname: givenname906 +description: description906 +userPassword: password906 +mail: uid906 +uidnumber: 906 +gidnumber: 906 +homeDirectory: /home/uid906 + +dn: cn=user907,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user907 +sn: user907 +uid: uid907 +givenname: givenname907 +description: description907 +userPassword: password907 +mail: uid907 +uidnumber: 907 +gidnumber: 907 +homeDirectory: /home/uid907 + +dn: cn=user908,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user908 +sn: user908 +uid: uid908 +givenname: givenname908 +description: description908 +userPassword: password908 +mail: uid908 +uidnumber: 908 +gidnumber: 908 +homeDirectory: /home/uid908 + +dn: cn=user909,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user909 +sn: user909 +uid: uid909 +givenname: givenname909 +description: description909 +userPassword: password909 +mail: uid909 +uidnumber: 909 +gidnumber: 909 +homeDirectory: /home/uid909 + +dn: cn=user910,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user910 +sn: user910 +uid: uid910 +givenname: givenname910 +description: description910 +userPassword: password910 +mail: uid910 +uidnumber: 910 +gidnumber: 910 +homeDirectory: /home/uid910 + +dn: cn=user911,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user911 +sn: user911 +uid: uid911 +givenname: givenname911 +description: description911 +userPassword: password911 +mail: uid911 +uidnumber: 911 +gidnumber: 911 +homeDirectory: /home/uid911 + +dn: cn=user912,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user912 +sn: user912 +uid: uid912 +givenname: givenname912 +description: description912 +userPassword: password912 +mail: uid912 +uidnumber: 912 +gidnumber: 912 +homeDirectory: /home/uid912 + +dn: cn=user913,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user913 +sn: user913 +uid: uid913 +givenname: givenname913 +description: description913 +userPassword: password913 +mail: uid913 +uidnumber: 913 +gidnumber: 913 +homeDirectory: /home/uid913 + +dn: cn=user914,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user914 +sn: user914 +uid: uid914 +givenname: givenname914 +description: description914 +userPassword: password914 +mail: uid914 +uidnumber: 914 +gidnumber: 914 +homeDirectory: /home/uid914 + +dn: cn=user915,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user915 +sn: user915 +uid: uid915 +givenname: givenname915 +description: description915 +userPassword: password915 +mail: uid915 +uidnumber: 915 +gidnumber: 915 +homeDirectory: /home/uid915 + +dn: cn=user916,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user916 +sn: user916 +uid: uid916 +givenname: givenname916 +description: description916 +userPassword: password916 +mail: uid916 +uidnumber: 916 +gidnumber: 916 +homeDirectory: /home/uid916 + +dn: cn=user917,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user917 +sn: user917 +uid: uid917 +givenname: givenname917 +description: description917 +userPassword: password917 +mail: uid917 +uidnumber: 917 +gidnumber: 917 +homeDirectory: /home/uid917 + +dn: cn=user918,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user918 +sn: user918 +uid: uid918 +givenname: givenname918 +description: description918 +userPassword: password918 +mail: uid918 +uidnumber: 918 +gidnumber: 918 +homeDirectory: /home/uid918 + +dn: cn=user919,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user919 +sn: user919 +uid: uid919 +givenname: givenname919 +description: description919 +userPassword: password919 +mail: uid919 +uidnumber: 919 +gidnumber: 919 +homeDirectory: /home/uid919 + +dn: cn=user920,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user920 +sn: user920 +uid: uid920 +givenname: givenname920 +description: description920 +userPassword: password920 +mail: uid920 +uidnumber: 920 +gidnumber: 920 +homeDirectory: /home/uid920 + +dn: cn=user921,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user921 +sn: user921 +uid: uid921 +givenname: givenname921 +description: description921 +userPassword: password921 +mail: uid921 +uidnumber: 921 +gidnumber: 921 +homeDirectory: /home/uid921 + +dn: cn=user922,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user922 +sn: user922 +uid: uid922 +givenname: givenname922 +description: description922 +userPassword: password922 +mail: uid922 +uidnumber: 922 +gidnumber: 922 +homeDirectory: /home/uid922 + +dn: cn=user923,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user923 +sn: user923 +uid: uid923 +givenname: givenname923 +description: description923 +userPassword: password923 +mail: uid923 +uidnumber: 923 +gidnumber: 923 +homeDirectory: /home/uid923 + +dn: cn=user924,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user924 +sn: user924 +uid: uid924 +givenname: givenname924 +description: description924 +userPassword: password924 +mail: uid924 +uidnumber: 924 +gidnumber: 924 +homeDirectory: /home/uid924 + +dn: cn=user925,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user925 +sn: user925 +uid: uid925 +givenname: givenname925 +description: description925 +userPassword: password925 +mail: uid925 +uidnumber: 925 +gidnumber: 925 +homeDirectory: /home/uid925 + +dn: cn=user926,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user926 +sn: user926 +uid: uid926 +givenname: givenname926 +description: description926 +userPassword: password926 +mail: uid926 +uidnumber: 926 +gidnumber: 926 +homeDirectory: /home/uid926 + +dn: cn=user927,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user927 +sn: user927 +uid: uid927 +givenname: givenname927 +description: description927 +userPassword: password927 +mail: uid927 +uidnumber: 927 +gidnumber: 927 +homeDirectory: /home/uid927 + +dn: cn=user928,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user928 +sn: user928 +uid: uid928 +givenname: givenname928 +description: description928 +userPassword: password928 +mail: uid928 +uidnumber: 928 +gidnumber: 928 +homeDirectory: /home/uid928 + +dn: cn=user929,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user929 +sn: user929 +uid: uid929 +givenname: givenname929 +description: description929 +userPassword: password929 +mail: uid929 +uidnumber: 929 +gidnumber: 929 +homeDirectory: /home/uid929 + +dn: cn=user930,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user930 +sn: user930 +uid: uid930 +givenname: givenname930 +description: description930 +userPassword: password930 +mail: uid930 +uidnumber: 930 +gidnumber: 930 +homeDirectory: /home/uid930 + +dn: cn=user931,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user931 +sn: user931 +uid: uid931 +givenname: givenname931 +description: description931 +userPassword: password931 +mail: uid931 +uidnumber: 931 +gidnumber: 931 +homeDirectory: /home/uid931 + +dn: cn=user932,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user932 +sn: user932 +uid: uid932 +givenname: givenname932 +description: description932 +userPassword: password932 +mail: uid932 +uidnumber: 932 +gidnumber: 932 +homeDirectory: /home/uid932 + +dn: cn=user933,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user933 +sn: user933 +uid: uid933 +givenname: givenname933 +description: description933 +userPassword: password933 +mail: uid933 +uidnumber: 933 +gidnumber: 933 +homeDirectory: /home/uid933 + +dn: cn=user934,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user934 +sn: user934 +uid: uid934 +givenname: givenname934 +description: description934 +userPassword: password934 +mail: uid934 +uidnumber: 934 +gidnumber: 934 +homeDirectory: /home/uid934 + +dn: cn=user935,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user935 +sn: user935 +uid: uid935 +givenname: givenname935 +description: description935 +userPassword: password935 +mail: uid935 +uidnumber: 935 +gidnumber: 935 +homeDirectory: /home/uid935 + +dn: cn=user936,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user936 +sn: user936 +uid: uid936 +givenname: givenname936 +description: description936 +userPassword: password936 +mail: uid936 +uidnumber: 936 +gidnumber: 936 +homeDirectory: /home/uid936 + +dn: cn=user937,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user937 +sn: user937 +uid: uid937 +givenname: givenname937 +description: description937 +userPassword: password937 +mail: uid937 +uidnumber: 937 +gidnumber: 937 +homeDirectory: /home/uid937 + +dn: cn=user938,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user938 +sn: user938 +uid: uid938 +givenname: givenname938 +description: description938 +userPassword: password938 +mail: uid938 +uidnumber: 938 +gidnumber: 938 +homeDirectory: /home/uid938 + +dn: cn=user939,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user939 +sn: user939 +uid: uid939 +givenname: givenname939 +description: description939 +userPassword: password939 +mail: uid939 +uidnumber: 939 +gidnumber: 939 +homeDirectory: /home/uid939 + +dn: cn=user940,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user940 +sn: user940 +uid: uid940 +givenname: givenname940 +description: description940 +userPassword: password940 +mail: uid940 +uidnumber: 940 +gidnumber: 940 +homeDirectory: /home/uid940 + +dn: cn=user941,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user941 +sn: user941 +uid: uid941 +givenname: givenname941 +description: description941 +userPassword: password941 +mail: uid941 +uidnumber: 941 +gidnumber: 941 +homeDirectory: /home/uid941 + +dn: cn=user942,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user942 +sn: user942 +uid: uid942 +givenname: givenname942 +description: description942 +userPassword: password942 +mail: uid942 +uidnumber: 942 +gidnumber: 942 +homeDirectory: /home/uid942 + +dn: cn=user943,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user943 +sn: user943 +uid: uid943 +givenname: givenname943 +description: description943 +userPassword: password943 +mail: uid943 +uidnumber: 943 +gidnumber: 943 +homeDirectory: /home/uid943 + +dn: cn=user944,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user944 +sn: user944 +uid: uid944 +givenname: givenname944 +description: description944 +userPassword: password944 +mail: uid944 +uidnumber: 944 +gidnumber: 944 +homeDirectory: /home/uid944 + +dn: cn=user945,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user945 +sn: user945 +uid: uid945 +givenname: givenname945 +description: description945 +userPassword: password945 +mail: uid945 +uidnumber: 945 +gidnumber: 945 +homeDirectory: /home/uid945 + +dn: cn=user946,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user946 +sn: user946 +uid: uid946 +givenname: givenname946 +description: description946 +userPassword: password946 +mail: uid946 +uidnumber: 946 +gidnumber: 946 +homeDirectory: /home/uid946 + +dn: cn=user947,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user947 +sn: user947 +uid: uid947 +givenname: givenname947 +description: description947 +userPassword: password947 +mail: uid947 +uidnumber: 947 +gidnumber: 947 +homeDirectory: /home/uid947 + +dn: cn=user948,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user948 +sn: user948 +uid: uid948 +givenname: givenname948 +description: description948 +userPassword: password948 +mail: uid948 +uidnumber: 948 +gidnumber: 948 +homeDirectory: /home/uid948 + +dn: cn=user949,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user949 +sn: user949 +uid: uid949 +givenname: givenname949 +description: description949 +userPassword: password949 +mail: uid949 +uidnumber: 949 +gidnumber: 949 +homeDirectory: /home/uid949 + +dn: cn=user950,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user950 +sn: user950 +uid: uid950 +givenname: givenname950 +description: description950 +userPassword: password950 +mail: uid950 +uidnumber: 950 +gidnumber: 950 +homeDirectory: /home/uid950 + +dn: cn=user951,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user951 +sn: user951 +uid: uid951 +givenname: givenname951 +description: description951 +userPassword: password951 +mail: uid951 +uidnumber: 951 +gidnumber: 951 +homeDirectory: /home/uid951 + +dn: cn=user952,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user952 +sn: user952 +uid: uid952 +givenname: givenname952 +description: description952 +userPassword: password952 +mail: uid952 +uidnumber: 952 +gidnumber: 952 +homeDirectory: /home/uid952 + +dn: cn=user953,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user953 +sn: user953 +uid: uid953 +givenname: givenname953 +description: description953 +userPassword: password953 +mail: uid953 +uidnumber: 953 +gidnumber: 953 +homeDirectory: /home/uid953 + +dn: cn=user954,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user954 +sn: user954 +uid: uid954 +givenname: givenname954 +description: description954 +userPassword: password954 +mail: uid954 +uidnumber: 954 +gidnumber: 954 +homeDirectory: /home/uid954 + +dn: cn=user955,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user955 +sn: user955 +uid: uid955 +givenname: givenname955 +description: description955 +userPassword: password955 +mail: uid955 +uidnumber: 955 +gidnumber: 955 +homeDirectory: /home/uid955 + +dn: cn=user956,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user956 +sn: user956 +uid: uid956 +givenname: givenname956 +description: description956 +userPassword: password956 +mail: uid956 +uidnumber: 956 +gidnumber: 956 +homeDirectory: /home/uid956 + +dn: cn=user957,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user957 +sn: user957 +uid: uid957 +givenname: givenname957 +description: description957 +userPassword: password957 +mail: uid957 +uidnumber: 957 +gidnumber: 957 +homeDirectory: /home/uid957 + +dn: cn=user958,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user958 +sn: user958 +uid: uid958 +givenname: givenname958 +description: description958 +userPassword: password958 +mail: uid958 +uidnumber: 958 +gidnumber: 958 +homeDirectory: /home/uid958 + +dn: cn=user959,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user959 +sn: user959 +uid: uid959 +givenname: givenname959 +description: description959 +userPassword: password959 +mail: uid959 +uidnumber: 959 +gidnumber: 959 +homeDirectory: /home/uid959 + +dn: cn=user960,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user960 +sn: user960 +uid: uid960 +givenname: givenname960 +description: description960 +userPassword: password960 +mail: uid960 +uidnumber: 960 +gidnumber: 960 +homeDirectory: /home/uid960 + +dn: cn=user961,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user961 +sn: user961 +uid: uid961 +givenname: givenname961 +description: description961 +userPassword: password961 +mail: uid961 +uidnumber: 961 +gidnumber: 961 +homeDirectory: /home/uid961 + +dn: cn=user962,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user962 +sn: user962 +uid: uid962 +givenname: givenname962 +description: description962 +userPassword: password962 +mail: uid962 +uidnumber: 962 +gidnumber: 962 +homeDirectory: /home/uid962 + +dn: cn=user963,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user963 +sn: user963 +uid: uid963 +givenname: givenname963 +description: description963 +userPassword: password963 +mail: uid963 +uidnumber: 963 +gidnumber: 963 +homeDirectory: /home/uid963 + +dn: cn=user964,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user964 +sn: user964 +uid: uid964 +givenname: givenname964 +description: description964 +userPassword: password964 +mail: uid964 +uidnumber: 964 +gidnumber: 964 +homeDirectory: /home/uid964 + +dn: cn=user965,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user965 +sn: user965 +uid: uid965 +givenname: givenname965 +description: description965 +userPassword: password965 +mail: uid965 +uidnumber: 965 +gidnumber: 965 +homeDirectory: /home/uid965 + +dn: cn=user966,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user966 +sn: user966 +uid: uid966 +givenname: givenname966 +description: description966 +userPassword: password966 +mail: uid966 +uidnumber: 966 +gidnumber: 966 +homeDirectory: /home/uid966 + +dn: cn=user967,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user967 +sn: user967 +uid: uid967 +givenname: givenname967 +description: description967 +userPassword: password967 +mail: uid967 +uidnumber: 967 +gidnumber: 967 +homeDirectory: /home/uid967 + +dn: cn=user968,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user968 +sn: user968 +uid: uid968 +givenname: givenname968 +description: description968 +userPassword: password968 +mail: uid968 +uidnumber: 968 +gidnumber: 968 +homeDirectory: /home/uid968 + +dn: cn=user969,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user969 +sn: user969 +uid: uid969 +givenname: givenname969 +description: description969 +userPassword: password969 +mail: uid969 +uidnumber: 969 +gidnumber: 969 +homeDirectory: /home/uid969 + +dn: cn=user970,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user970 +sn: user970 +uid: uid970 +givenname: givenname970 +description: description970 +userPassword: password970 +mail: uid970 +uidnumber: 970 +gidnumber: 970 +homeDirectory: /home/uid970 + +dn: cn=user971,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user971 +sn: user971 +uid: uid971 +givenname: givenname971 +description: description971 +userPassword: password971 +mail: uid971 +uidnumber: 971 +gidnumber: 971 +homeDirectory: /home/uid971 + +dn: cn=user972,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user972 +sn: user972 +uid: uid972 +givenname: givenname972 +description: description972 +userPassword: password972 +mail: uid972 +uidnumber: 972 +gidnumber: 972 +homeDirectory: /home/uid972 + +dn: cn=user973,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user973 +sn: user973 +uid: uid973 +givenname: givenname973 +description: description973 +userPassword: password973 +mail: uid973 +uidnumber: 973 +gidnumber: 973 +homeDirectory: /home/uid973 + +dn: cn=user974,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user974 +sn: user974 +uid: uid974 +givenname: givenname974 +description: description974 +userPassword: password974 +mail: uid974 +uidnumber: 974 +gidnumber: 974 +homeDirectory: /home/uid974 + +dn: cn=user975,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user975 +sn: user975 +uid: uid975 +givenname: givenname975 +description: description975 +userPassword: password975 +mail: uid975 +uidnumber: 975 +gidnumber: 975 +homeDirectory: /home/uid975 + +dn: cn=user976,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user976 +sn: user976 +uid: uid976 +givenname: givenname976 +description: description976 +userPassword: password976 +mail: uid976 +uidnumber: 976 +gidnumber: 976 +homeDirectory: /home/uid976 + +dn: cn=user977,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user977 +sn: user977 +uid: uid977 +givenname: givenname977 +description: description977 +userPassword: password977 +mail: uid977 +uidnumber: 977 +gidnumber: 977 +homeDirectory: /home/uid977 + +dn: cn=user978,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user978 +sn: user978 +uid: uid978 +givenname: givenname978 +description: description978 +userPassword: password978 +mail: uid978 +uidnumber: 978 +gidnumber: 978 +homeDirectory: /home/uid978 + +dn: cn=user979,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user979 +sn: user979 +uid: uid979 +givenname: givenname979 +description: description979 +userPassword: password979 +mail: uid979 +uidnumber: 979 +gidnumber: 979 +homeDirectory: /home/uid979 + +dn: cn=user980,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user980 +sn: user980 +uid: uid980 +givenname: givenname980 +description: description980 +userPassword: password980 +mail: uid980 +uidnumber: 980 +gidnumber: 980 +homeDirectory: /home/uid980 + +dn: cn=user981,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user981 +sn: user981 +uid: uid981 +givenname: givenname981 +description: description981 +userPassword: password981 +mail: uid981 +uidnumber: 981 +gidnumber: 981 +homeDirectory: /home/uid981 + +dn: cn=user982,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user982 +sn: user982 +uid: uid982 +givenname: givenname982 +description: description982 +userPassword: password982 +mail: uid982 +uidnumber: 982 +gidnumber: 982 +homeDirectory: /home/uid982 + +dn: cn=user983,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user983 +sn: user983 +uid: uid983 +givenname: givenname983 +description: description983 +userPassword: password983 +mail: uid983 +uidnumber: 983 +gidnumber: 983 +homeDirectory: /home/uid983 + +dn: cn=user984,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user984 +sn: user984 +uid: uid984 +givenname: givenname984 +description: description984 +userPassword: password984 +mail: uid984 +uidnumber: 984 +gidnumber: 984 +homeDirectory: /home/uid984 + +dn: cn=user985,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user985 +sn: user985 +uid: uid985 +givenname: givenname985 +description: description985 +userPassword: password985 +mail: uid985 +uidnumber: 985 +gidnumber: 985 +homeDirectory: /home/uid985 + +dn: cn=user986,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user986 +sn: user986 +uid: uid986 +givenname: givenname986 +description: description986 +userPassword: password986 +mail: uid986 +uidnumber: 986 +gidnumber: 986 +homeDirectory: /home/uid986 + +dn: cn=user987,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user987 +sn: user987 +uid: uid987 +givenname: givenname987 +description: description987 +userPassword: password987 +mail: uid987 +uidnumber: 987 +gidnumber: 987 +homeDirectory: /home/uid987 + +dn: cn=user988,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user988 +sn: user988 +uid: uid988 +givenname: givenname988 +description: description988 +userPassword: password988 +mail: uid988 +uidnumber: 988 +gidnumber: 988 +homeDirectory: /home/uid988 + +dn: cn=user989,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user989 +sn: user989 +uid: uid989 +givenname: givenname989 +description: description989 +userPassword: password989 +mail: uid989 +uidnumber: 989 +gidnumber: 989 +homeDirectory: /home/uid989 + +dn: cn=user990,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user990 +sn: user990 +uid: uid990 +givenname: givenname990 +description: description990 +userPassword: password990 +mail: uid990 +uidnumber: 990 +gidnumber: 990 +homeDirectory: /home/uid990 + +dn: cn=user991,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user991 +sn: user991 +uid: uid991 +givenname: givenname991 +description: description991 +userPassword: password991 +mail: uid991 +uidnumber: 991 +gidnumber: 991 +homeDirectory: /home/uid991 + +dn: cn=user992,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user992 +sn: user992 +uid: uid992 +givenname: givenname992 +description: description992 +userPassword: password992 +mail: uid992 +uidnumber: 992 +gidnumber: 992 +homeDirectory: /home/uid992 + +dn: cn=user993,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user993 +sn: user993 +uid: uid993 +givenname: givenname993 +description: description993 +userPassword: password993 +mail: uid993 +uidnumber: 993 +gidnumber: 993 +homeDirectory: /home/uid993 + +dn: cn=user994,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user994 +sn: user994 +uid: uid994 +givenname: givenname994 +description: description994 +userPassword: password994 +mail: uid994 +uidnumber: 994 +gidnumber: 994 +homeDirectory: /home/uid994 + +dn: cn=user995,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user995 +sn: user995 +uid: uid995 +givenname: givenname995 +description: description995 +userPassword: password995 +mail: uid995 +uidnumber: 995 +gidnumber: 995 +homeDirectory: /home/uid995 + +dn: cn=user996,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user996 +sn: user996 +uid: uid996 +givenname: givenname996 +description: description996 +userPassword: password996 +mail: uid996 +uidnumber: 996 +gidnumber: 996 +homeDirectory: /home/uid996 + +dn: cn=user997,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user997 +sn: user997 +uid: uid997 +givenname: givenname997 +description: description997 +userPassword: password997 +mail: uid997 +uidnumber: 997 +gidnumber: 997 +homeDirectory: /home/uid997 + +dn: cn=user998,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user998 +sn: user998 +uid: uid998 +givenname: givenname998 +description: description998 +userPassword: password998 +mail: uid998 +uidnumber: 998 +gidnumber: 998 +homeDirectory: /home/uid998 + +dn: cn=user999,ou=People,dc=example,dc=com +objectClass: top +objectClass: person +objectClass: organizationalPerson +objectClass: inetOrgPerson +objectClass: posixAccount +cn: user999 +sn: user999 +uid: uid999 +givenname: givenname999 +description: description999 +userPassword: password999 +mail: uid999 +uidnumber: 999 +gidnumber: 999 +homeDirectory: /home/uid999 + diff --git a/dirsrvtests/tests/data/ticket49121/utf8str.txt b/dirsrvtests/tests/data/ticket49121/utf8str.txt new file mode 100644 index 0000000..0005c4e --- /dev/null +++ b/dirsrvtests/tests/data/ticket49121/utf8str.txt @@ -0,0 +1 @@ +あいうえお diff --git a/dirsrvtests/tests/data/ticket49441/binary.ldif b/dirsrvtests/tests/data/ticket49441/binary.ldif new file mode 100644 index 0000000..bdebaf8 --- /dev/null +++ b/dirsrvtests/tests/data/ticket49441/binary.ldif @@ -0,0 +1,858 @@ +version: 1 + +# entry-id: 1 +dn: dc=example,dc=com +objectClass: domain +objectClass: top +dc: example +nsUniqueId: f49ca102-c2ee11e7-9170b029-e68fda34 +creatorsName: +modifiersName: +createTimestamp: 20171106123544Z +modifyTimestamp: 20171106123544Z + +# entry-id: 2 +dn: ou=binary,dc=example,dc=com +certificateRevocationList;binary:: MIITbjCCElYCAQEwDQYJKoZIhvcNAQEFBQAwVzELMAk + GA1UEBhMCVVMxEDAOBgNVBAoTB0VudHJ1c3QxIjAgBgNVBAsTGUNlcnRpZmljYXRpb24gQXV0aG9y + aXRpZXMxEjAQBgNVBAsTCURDb21TdWJDQRcNMTcxMDE2MTUxNjAyWhcNMTcxMDE5MTUxNjAyWjCCE + ZcwIwIEV4cj0hcNMTYxMTMwMDAyNDA0WjAMMAoGA1UdFQQDCgEAMCMCBFeHI9EXDTE2MTEzMDAwMj + gwNVowDDAKBgNVHRUEAwoBADAjAgRXhyPPFw0xNjExMzAwMDIxNDJaMAwwCgYDVR0VBAMKAQAwIwI + EV4cjzhcNMTYxMTMwMDAzMTE0WjAMMAoGA1UdFQQDCgEAMCMCBFeHI2gXDTE2MTEyOTE1MTM0M1ow + DDAKBgNVHRUEAwoBADA9AgRXhwCzFw0xNjExMDIyMjQ0NThaMCYwCgYDVR0VBAMKAQEwGAYDVR0YB + BEYDzIwMTYwOTA3MDEzODU1WjAjAgRXhvE4Fw0xNjA4MDExNDA5MTFaMAwwCgYDVR0VBAMKAQAwIw + IEV4bxNxcNMTYwODAxMTQwODU4WjAMMAoGA1UdFQQDCgEAMCMCBEkD2YYXDTE2MDcwNTE1NTg0NVo + wDDAKBgNVHRUEAwoBADAjAgRJA9mFFw0xNjA3MDUxNTU1MTlaMAwwCgYDVR0VBAMKAQAwIwIESQPT + cRcNMTYxMTMwMDAyODA1WjAMMAoGA1UdFQQDCgEAMCMCBEkD03AXDTE2MTEzMDAwMjgwNVowDDAKB + gNVHRUEAwoBADAjAgRJA9NuFw0xNjA2MjAxNjQ4NTlaMAwwCgYDVR0VBAMKAQAwIwIESQPSOBcNMT + YwNjE3MTU1OTM4WjAMMAoGA1UdFQQDCgEAMCMCBEkD0jcXDTE2MTEzMDAwMzExNFowDDAKBgNVHRU + EAwoBADAjAgRJA9I0Fw0xNjA2MjAxNzAyMDJaMAwwCgYDVR0VBAMKAQAwIwIESQPSMxcNMTYwNjIw + MTcwMjAyWjAMMAoGA1UdFQQDCgEAMCMCBEkD0jEXDTE2MDYxNzE1NDgwMlowDDAKBgNVHRUEAwoBA + DAjAgRJA9IwFw0xNjExMzAwMDMxMTRaMAwwCgYDVR0VBAMKAQAwIwIESQPSLhcNMTYwNjE3MTU0MD + A2WjAMMAoGA1UdFQQDCgEAMCMCBEkD0VIXDTE2MTEzMDAwMzExNFowDDAKBgNVHRUEAwoBADAjAgR + JA9FRFw0xNjExMzAwMDMxMTRaMAwwCgYDVR0VBAMKAQAwIwIESQPRTxcNMTYwNjE1MTkyMDU4WjAM + MAoGA1UdFQQDCgEAMCMCBEkD0U4XDTE2MDYxNTE5MjYyMlowDDAKBgNVHRUEAwoBADAjAgRJA9FLF + w0xNjA2MTUxODQ5MzZaMAwwCgYDVR0VBAMKAQAwIwIESQPRShcNMTYwNjE1MTQzNDU1WjAMMAoGA1 + UdFQQDCgEAMCMCBEkD0UkXDTE2MDYxNTE0MzEyMlowDDAKBgNVHRUEAwoBADAjAgRJA9FIFw0xNjA + 2MTUxNDMwMTdaMAwwCgYDVR0VBAMKAQAwIwIESQPQexcNMTYwNjE1MTkyNjIyWjAMMAoGA1UdFQQD + CgEAMCMCBEkD0HoXDTE2MDYxNTE5MjYyMlowDDAKBgNVHRUEAwoBADAjAgRJA9B4Fw0xNjA2MTQxM + TQ3MzlaMAwwCgYDVR0VBAMKAQAwIwIESQPQdxcNMTYwNjE1MTkyNTU5WjAMMAoGA1UdFQQDCgEAMC + MCBEkD0HYXDTE2MDYxNTE5MjU1OVowDDAKBgNVHRUEAwoBADAjAgRJA9B0Fw0xNjA2MTQxMTQzMzh + aMAwwCgYDVR0VBAMKAQAwIwIESQPQcxcNMTYwNjE0MTE0MDU4WjAMMAoGA1UdFQQDCgEAMCMCBEkD + 0HIXDTE2MDYxNTE5MjU0NlowDDAKBgNVHRUEAwoBADAjAgRJA9BwFw0xNjA2MTQxMTE3NDlaMAwwC + gYDVR0VBAMKAQAwIwIESQPLhhcNMTYwNjAxMjI1NTA1WjAMMAoGA1UdFQQDCgEAMCMCBEkDyRgXDT + E2MDUyNjIxNDQwOFowDDAKBgNVHRUEAwoBADAjAgRJA8kXFw0xNjA1MjYyMTQzMjdaMAwwCgYDVR0 + VBAMKAQAwIwIESQPIsRcNMTYwNTI2MTUxOTMwWjAMMAoGA1UdFQQDCgEAMCMCBEkDmmEXDTE2MDYx + NTE5MjU0NlowDDAKBgNVHRUEAwoBADAjAgRJA5pgFw0xNjA2MTUxOTI1NDZaMAwwCgYDVR0VBAMKA + QAwIwIESQOZ9RcNMTYwNjE1MTkyNDQzWjAMMAoGA1UdFQQDCgEFMCMCBEkDmfQXDTE2MDYxNTE5Mj + Q0M1owDDAKBgNVHRUEAwoBBTAjAgRJA5nyFw0xNjAyMDExOTM0MTlaMAwwCgYDVR0VBAMKAQAwIwI + ESQOXgBcNMTYwMTI2MTUwNTE5WjAMMAoGA1UdFQQDCgEAMCMCBEkDh0oXDTE1MTIxNzE3MzE0NVow + DDAKBgNVHRUEAwoBAzAjAgRJA3ZBFw0xNjAyMDIxNDM3MTZaMAwwCgYDVR0VBAMKAQMwIwIESQN2Q + BcNMTYwMjAyMTQzNzAzWjAMMAoGA1UdFQQDCgEDMCMCBEkDXsUXDTE1MTIwODIwMTM0OVowDDAKBg + NVHRUEAwoBAzAjAgRJA17EFw0xNTEyMDgyMDEzNDlaMAwwCgYDVR0VBAMKAQMwIwIESQNewxcNMTU + xMjA4MjAxMzUwWjAMMAoGA1UdFQQDCgEDMCMCBEkDWrkXDTE1MTIwODIwMTM1MFowDDAKBgNVHRUE + AwoBAzAjAgRJA1q4Fw0xNTEyMDgyMDEzNTBaMAwwCgYDVR0VBAMKAQMwIwIESQNatxcNMTUxMjA4M + jAxMzUwWjAMMAoGA1UdFQQDCgEDMCMCBEkDNjMXDTE2MDcwNTIwMDcxMlowDDAKBgNVHRUEAwoBBT + AjAgRJAwpwFw0xNjA2MTUxOTQwMDNaMAwwCgYDVR0VBAMKAQAwIwIESQMKbxcNMTYwNjE1MTk0MDA + zWjAMMAoGA1UdFQQDCgEAMCMCBEkC2Z0XDTE0MTAyMDE2NDgzN1owDDAKBgNVHRUEAwoBBTAjAgRJ + AthhFw0xNDEwMjAxNjQ4MzdaMAwwCgYDVR0VBAMKAQUwIwIESQLX7RcNMTQxMTEyMjAyNjA1WjAMM + AoGA1UdFQQDCgEFMCMCBEkC1+sXDTE0MTAyNzE1NTI1OVowDDAKBgNVHRUEAwoBAzAjAgRJAn2hFw + 0xNDAzMTMxNjUwMjZaMAwwCgYDVR0VBAMKAQAwIwIESQJ9MxcNMTQwMzEyMTUxODI5WjAMMAoGA1U + dFQQDCgEAMCMCBEkCfTEXDTE0MDMxMjExMzMzNVowDDAKBgNVHRUEAwoBADAjAgRJAn0wFw0xNDAz + MTIxMjE4MjFaMAwwCgYDVR0VBAMKAQAwIwIESQJ8YxcNMTQwMzEyMTEyNzEwWjAMMAoGA1UdFQQDC + gEAMCMCBEkCfGEXDTE0MDMxMDE0NTYxNlowDDAKBgNVHRUEAwoBADAjAgRJAnxgFw0xNDAzMTAxNT + A4MTVaMAwwCgYDVR0VBAMKAQAwIwIESQJ8XhcNMTQwMzEwMTIzMDM3WjAMMAoGA1UdFQQDCgEAMCM + CBEkCfF0XDTE0MDMxMDE0NTMyMlowDDAKBgNVHRUEAwoBADAjAgRJAnxbFw0xNDAzMTAxMDQ5NDBa + MAwwCgYDVR0VBAMKAQAwIwIESQJ8WhcNMTQwMzEwMTIwOTM2WjAMMAoGA1UdFQQDCgEAMCMCBEkCe + ywXDTE0MDMwNzEwMzcxM1owDDAKBgNVHRUEAwoBADAjAgRJAnsrFw0xNDAzMTAxMDQ3MTdaMAwwCg + YDVR0VBAMKAQAwIwIESQJ6xRcNMTQwMzA2MTEwMDM3WjAMMAoGA1UdFQQDCgEAMCMCBEkCesQXDTE + 0MDMwNzEwMzMyNVowDDAKBgNVHRUEAwoBADAjAgRJAm7jFw0xNDAyMDQyMTMwMjFaMAwwCgYDVR0V + BAMKAQAwIwIESQJrWhcNMTQwMTI3MTIyMTI0WjAMMAoGA1UdFQQDCgEAMCMCBEkCa1kXDTE0MDMwN + jEwNTY0OFowDDAKBgNVHRUEAwoBADAjAgRJAmjyFw0xNDAxMjExMDEyMTlaMAwwCgYDVR0VBAMKAQ + AwIwIESQJiPRcNMTQwMTAyMTYwMjIxWjAMMAoGA1UdFQQDCgEAMCMCBEkCXFgXDTEzMTIxODE3NTI + wNVowDDAKBgNVHRUEAwoBADAjAgRJAlW1Fw0xMzEyMDIxNTAzNTVaMAwwCgYDVR0VBAMKAQAwIwIE + SQJVshcNMTMxMjAyMTQ1NTM2WjAMMAoGA1UdFQQDCgEAMCMCBEkCVbEXDTEzMTIwMjE0NTk1OVowD + DAKBgNVHRUEAwoBADAjAgRJAlWvFw0xMzEyMDIxNDE3MzBaMAwwCgYDVR0VBAMKAQAwIwIESQJVrh + cNMTMxMjAyMTQ0OTMxWjAMMAoGA1UdFQQDCgEAMCMCBEkCVawXDTEzMTIwMjEzMTA1OFowDDAKBgN + VHRUEAwoBADAjAgRJAlWrFw0xMzEyMDIxNDEyMTVaMAwwCgYDVR0VBAMKAQAwIwIESQJONRcNMTMx + MTEyMjExMzI0WjAMMAoGA1UdFQQDCgEAMCMCBEkCJrkXDTEzMDkxMDA2NDUyNFowDDAKBgNVHRUEA + woBADAjAgRJAhmPFw0xMzA4MjExMDM0MTFaMAwwCgYDVR0VBAMKAQAwIwIESQIVrBcNMTMwODEyMT + g1NTU1WjAMMAoGA1UdFQQDCgEAMCMCBEkCFasXDTEzMTIxODE3MDQ0MlowDDAKBgNVHRUEAwoBADA + jAgRJAhAoFw0xMzA3MjkxNjAwMzVaMAwwCgYDVR0VBAMKAQAwIwIESQIQJxcNMTQwMTAyMTU1MDUy + WjAMMAoGA1UdFQQDCgEAMCMCBEkCCh8XDTEzMDcxNTA3MzY1NlowDDAKBgNVHRUEAwoBADAjAgRJA + gexFw0xMzA3MDgxNTU5MTRaMAwwCgYDVR0VBAMKAQAwIwIESQH73BcNMTMwNzI5MTU1NTAzWjAMMA + oGA1UdFQQDCgEAMCMCBEkB5EcXDTEzMDUyOTE0MDUyNVowDDAKBgNVHRUEAwoBADAjAgRJAcDtFw0 + xMzA1MTAyMDExNTBaMAwwCgYDVR0VBAMKAQAwIwIESQGmXBcNMTMwNDEwMDkyMTI2WjAMMAoGA1Ud + FQQDCgEAMCMCBEkBnj0XDTEzMDMyNTE4MTc0MFowDDAKBgNVHRUEAwoBADAjAgRJAYMOFw0xMzAyM + TExMTEwNDdaMAwwCgYDVR0VBAMKAQAwIwIESQF4PRcNMTMwODEyMTg0ODE2WjAMMAoGA1UdFQQDCg + EAMCMCBEkBcwcXDTEzMDEwMzE2NTgyMFowDDAKBgNVHRUEAwoBADAjAgRJAXMEFw0xMzAxMDMxMDA + yMjRaMAwwCgYDVR0VBAMKAQAwIwIESQFuRxcNMTMxMDA3MTMwMjM1WjAMMAoGA1UdFQQDCgEFMCMC + BEkBaLsXDTEzMDQxMDA5MTY1NVowDDAKBgNVHRUEAwoBADAjAgRJAWaQFw0xMjExMjkxNjAxMzJaM + AwwCgYDVR0VBAMKAQAwIwIESQFmhBcNMTIxMTI5MTE1NTIyWjAMMAoGA1UdFQQDCgEAMCMCBEkBZo + MXDTEyMTEyOTE1MjYwNVowDDAKBgNVHRUEAwoBADAjAgRJAWaBFw0xMjExMjkxMTAzNTJaMAwwCgY + DVR0VBAMKAQAwIwIESQFmgBcNMTIxMTI5MTE1MTU4WjAMMAoGA1UdFQQDCgEAMCMCBEkBYT8XDTEy + MTExNTA5NTI1OVowDDAKBgNVHRUEAwoBADAjAgRJAWCrFw0xMjExMTQxNDM2NDVaMAwwCgYDVR0VB + AMKAQAwIwIESQFgqhcNMTIxMTE1MDk0ODI1WjAMMAoGA1UdFQQDCgEAMCMCBEkBXT4XDTEzMTIwMj + EzMDcwMVowDDAKBgNVHRUEAwoBADAjAgRJAVvbFw0xMjExMjkxMTAwMzFaMAwwCgYDVR0VBAMKAQC + gMDAuMAsGA1UdFAQEAgIo8DAfBgNVHSMEGDAWgBT0Fi4Bu6uQGaQoQg2dwB+crxCGKzANBgkqhkiG + 9w0BAQUFAAOCAQEATe14zpsSjrGcW4yNZrdGtsupuJge+DQV+h1ZwBEQtsmOmMvbSdMsu+vMvTzHQ + KWJq56picjixY6v4vPqhRRZWP8evOc0NuoxpiUhgez3CKFQoJ2bdeaS/WCfqss3Sa4FZTUzkVWZde + moDH8CcHt5in3H7SwF5i9/rKB/bLuTjQg+LRKh2E9+FAkJn1S/ZRh1Vjd/KuRFOXD6odjV54oTWE0 + 6PcHBdwip62ridLdQopt3+e1UgwKBNJAmBD6uMN1tPmenUYWxh4xI7Ft4HQR58TdIiTZmfQHmEkjl + dBNEAoUK1hvRy6E2mSdRq9Yex8f+rGdxI1+++6lHaN1+M8jQ4g== +userCertificate;binary:: MIKE/jCCg+YCAQEwX6FdMFukWTBXMQswCQYDVQQGEwJVUzEQMA4GA + 1UEChMHRW50cnVzdDEiMCAGA1UECxMZQ2VydGlmaWNhdGlvbiBBdXRob3JpdGllczESMBAGA1UECx + MJRENvbVN1YkNBMGegZTBjMFukWTBXMQswCQYDVQQGEwJVUzEQMA4GA1UEChMHRW50cnVzdDEiMCA + GA1UECxMZQ2VydGlmaWNhdGlvbiBBdXRob3JpdGllczESMBAGA1UECxMJRENvbVN1YkNBAgRIwMPg + MA0GCSqGSIb3DQEBBQUAAgRXh6kjMCIYDzIwMTcxMDE1MjI0NjEzWhgPMjAxNzExMTQyMjQ2MTNaM + IKCuTCCEQoGCSqGSIb2fQdEADGCEPswghD3gAEEMIIQ8DBvMFcxCzAJBgNVBAYTAlVTMRAwDgYDVQ + QKEwdFbnRydXN0MSIwIAYDVQQLExlDZXJ0aWZpY2F0aW9uIEF1dGhvcml0aWVzMRIwEAYDVQQLEwl + EQ29tU3ViQ0EWFENBIERvbWFpbiBTZWFyY2hiYXNlME4wPzEVMBMGCgmSJomT8ixkARkWBWxvY2Fs + MRQwEgYKCZImiZPyLGQBGRYEVGVzdDEQMA4GA1UECxMHRGV2aWNlcxYLQ0xTIERldmljZXMwgYswa + DEVMBMGCgmSJomT8ixkARkWBWxvY2FsMRQwEgYKCZImiZPyLGQBGRYEVGVzdDETMBEGA1UECxMKVG + VzdCBVc2VyczEkMCIGA1UECxMbU1NPIEFkbWluaXN0cmF0aW9uIEFjY291bnRzFh9DTFMgU1NPIEF + kbWluaXN0cmF0aW9uIEFjY291bnRzMFQwQjEVMBMGCgmSJomT8ixkARkWBWxvY2FsMRQwEgYKCZIm + iZPyLGQBGRYEVGVzdDETMBEGA1UECxMKVGVzdCBVc2VycxYOQ0xTIFRlc3QgVXNlcnMwfDBfMRUwE + wYKCZImiZPyLGQBGRYFbG9jYWwxFDASBgoJkiaJk/IsZAEZFgRUZXN0MRswGQYDVQQLExJEb21haW + 4gQ29udHJvbGxlcnMxEzARBgNVBAsTCkdCIFNlcnZlcnMWGUNMUyBHQiBEb21haW4gQ29udHJvbGx + lcnMwfDBfMRUwEwYKCZImiZPyLGQBGRYFbG9jYWwxFDASBgoJkiaJk/IsZAEZFgR0ZXN0MRswGQYD + VQQLExJEb21haW4gQ29udHJvbGxlcnMxEzARBgNVBAsTClVTIFNlcnZlcnMWGUNMUyBVUyBEb21ha + W4gQ29udHJvbGxlcnMwgaIwgY4xFTATBgoJkiaJk/IsZAEZFgVsb2NhbDEUMBIGCgmSJomT8ixkAR + kWBFRlc3QxFDASBgNVBAsTC1Rlc3QtT2ZmaWNlMRAwDgYDVQQLEwdTZXJ2ZXJzMRMwEQYDVQQLEwp + HQiBTZXJ2ZXJzMRQwEgYDVQQLEwtBcHBsaWNhdGlvbjEMMAoGA1UECxMDV0VCFg9DTFMgR0IgV2Vi + IEFwcHMwgbUwgaExFTATBgoJkiaJk/IsZAEZFgVsb2NhbDEUMBIGCgmSJomT8ixkARkWBFRlc3QxF + DASBgNVBAsTC1Rlc3QtT2ZmaWNlMRAwDgYDVQQLEwdTZXJ2ZXJzMRMwEQYDVQQLEwpHQiBTZXJ2ZX + JzMRQwEgYDVQQLEwtBcHBsaWNhdGlvbjEMMAoGA1UECxMDV0VCMREwDwYDVQQLEwhJbnRyYW5ldBY + PQ0xTIEdCIEludHJhbmV0MIG1MIGhMRUwEwYKCZImiZPyLGQBGRYFbG9jYWwxFDASBgoJkiaJk/Is + ZAEZFgRUZXN0MRQwEgYDVQQLEwtUZXN0LU9mZmljZTEQMA4GA1UECxMHU2VydmVyczETMBEGA1UEC + xMKVVMgU2VydmVyczEUMBIGA1UECxMLQXBwbGljYXRpb24xDDAKBgNVBAsTA1dFQjERMA8GA1UECx + MISW50cmFuZXQWD0NMUyBVUyBJbnRyYW5ldDA8MDExCzAJBgNVBAYTAlVTMRAwDgYDVQQKEwdFbnR + ydXN0MRAwDgYDVQQLEwdEeW5Db3JwFgdEeW5Db3JwMEowODELMAkGA1UEBhMCVVMxEDAOBgNVBAoT + B0VudHJ1c3QxFzAVBgNVBAsTDkFkbWluaXN0cmF0b3JzFg5BZG1pbmlzdHJhdG9yczBKMDgxCzAJB + gNVBAYTAlVTMRAwDgYDVQQKEwdFbnRydXN0MRcwFQYDVQQLEw5HZW5lcmFsIE1vdG9ycxYOR2VuZX + JhbCBNb3RvcnMwczBZMQswCQYDVQQGEwJVUzEQMA4GA1UEChMHRW50cnVzdDEXMBUGA1UECxMOR2V + uZXJhbCBNb3RvcnMxHzAdBgNVBAsTFkdNIFVzZXIgQWRtaW5pc3RyYXRvcnMWFkdNIFVzZXIgQWRt + aW5pc3RyYXRvcnMwXzBPMQswCQYDVQQGEwJVUzEQMA4GA1UEChMHRW50cnVzdDEXMBUGA1UECxMOR + 2VuZXJhbCBNb3RvcnMxFTATBgNVBAsTDEdNIEVuZCBVc2VycxYMR00gRW5kIFVzZXJzMFYwQzEVMB + MGCgmSJomT8ixkARkWBWxvY2FsMRQwEgYKCZImiZPyLGQBGRYEVGVzdDEUMBIGA1UECxMLV2ViIFN + lcnZlcnMWD0NMUyBXZWIgU2VydmVyczBeMEcxFTATBgoJkiaJk/IsZAEZFgVsb2NhbDEUMBIGCgmS + JomT8ixkARkWBFRlc3QxGDAWBgNVBAsTD0NNUyBBZG1pbiBVc2VycxYTQ0xTIENNUyBBZG1pbiBVc + 2VyczBeMEcxFTATBgoJkiaJk/IsZAEZFgVsb2NhbDEUMBIGCgmSJomT8ixkARkWBFRlc3QxGDAWBg + NVBAsTD1BLSSBBZG1pbiBVc2VycxYTQ0xTIFBLSSBBZG1pbiBVc2VyczBLMD8xCzAJBgNVBAYTAnV + zMRAwDgYDVQQKEwdlbnRydXN0MQ8wDQYDVQQLEwZtb2JpbGUxDTALBgNVBAsTBGRlbW8WCERlbW8g + TURNMEgwMzELMAkGA1UEBhMCVVMxEDAOBgNVBAoTB0VudHJ1c3QxEjAQBgNVBAsTCUVtcGxveWVlc + xYRRW50cnVzdCBFbXBsb3llZXMwWzBQMRUwEwYKCZImiZPyLGQBGRYFTG9jYWwxFDASBgoJkiaJk/ + IsZAEZFgRUZXN0MRMwEQYDVQQLEwpUZXN0IFVzZXJzMQwwCgYDVQQHEwNERVYWB0NMUyBERVYwJDA + cMQswCQYDVQQGEwJ1czENMAsGA1UEChMETklTVBYETklTVDB2MGcxCzAJBgNVBAYTAlVTMRAwDgYD + VQQKEwdFbnRydXN0MRkwFwYDVQQLExBNYW5hZ2VkIFNlcnZpY2VzMRkwFwYDVQQLExBEZW1vIENvb + VByaXYgU3ViMRAwDgYDVQQLEwdEZXZpY2VzFgtNU08gRGV2aWNlczCBhDBuMQswCQYDVQQGEwJVUz + EQMA4GA1UEChMHRW50cnVzdDEZMBcGA1UECxMQTWFuYWdlZCBTZXJ2aWNlczEZMBcGA1UECxMQRGV + tbyBDb21Qcml2IFN1YjEXMBUGA1UECxMOQWRtaW5pc3RyYXRvcnMWEk1TTyBBZG1pbmlzdHJhdG9y + czB6MGkxCzAJBgNVBAYTAlVTMRAwDgYDVQQKEwdFbnRydXN0MRkwFwYDVQQLExBNYW5hZ2VkIFNlc + nZpY2VzMRkwFwYDVQQLExBEZW1vIENvbVByaXYgU3ViMRIwEAYDVQQLEwlFbXBsb3llZXMWDU1TTy + BFbXBsb3llZXMwRDAxMQswCQYDVQQGEwJVUzEQMA4GA1UEChMHR290U3ZlbjEQMA4GA1UECxMHRGV + 2aWNlcxYPR290U3ZlbiBEZXZpY2VzMIGEMFoxCzAJBgNVBAYTAlVTMRAwDgYDVQQKEwdFbnRydXN0 + MSAwHgYDVQQLExdFbnRydXN0IFNhbGVzIEVuZ2luZWVyczEXMBUGA1UECxMOQWRtaW5pc3RyYXRvc + nMWJkVudHJ1c3QgU2FsZXMgRW5naW5lZXJzIEFkbWluaXN0cmF0b3JzMHYwUzELMAkGA1UEBhMCVV + MxEDAOBgNVBAoTB0VudHJ1c3QxIDAeBgNVBAsTF0VudHJ1c3QgU2FsZXMgRW5naW5lZXJzMRAwDgY + DVQQLEwdEZXZpY2VzFh9FbnRydXN0IFNhbGVzIEVuZ2luZWVycyBEZXZpY2VzMHIwUTELMAkGA1UE + BhMCVVMxEDAOBgNVBAoTB0VudHJ1c3QxIDAeBgNVBAsTF0VudHJ1c3QgU2FsZXMgRW5naW5lZXJzM + Q4wDAYDVQQLEwVDYXJkcxYdRW50cnVzdCBTYWxlcyBFbmdpbmVlcnMgQ2FyZHMwdDBSMQswCQYDVQ + QGEwJVUzEQMA4GA1UEChMHRW50cnVzdDEgMB4GA1UECxMXRW50cnVzdCBTYWxlcyBFbmdpbmVlcnM + xDzANBgNVBAsTBlBlb3BsZRYeRW50cnVzdCBTYWxlcyBFbmdpbmVlcnMgUGVvcGxlMIGKMF0xCzAJ + BgNVBAYTAlVTMRAwDgYDVQQKEwdFbnRydXN0MSMwIQYDVQQLExpFbnRydXN0IFByb2R1Y3QgTWFuY + WdlbWVudDEXMBUGA1UECxMOQWRtaW5pc3RyYXRvcnMWKUVudHJ1c3QgUHJvZHVjdCBNYW5hZ2VtZW + 50IEFkbWluaXN0cmF0b3JzMHwwVjELMAkGA1UEBhMCVVMxEDAOBgNVBAoTB0VudHJ1c3QxIzAhBgN + VBAsTGkVudHJ1c3QgUHJvZHVjdCBNYW5hZ2VtZW50MRAwDgYDVQQLEwdEZXZpY2VzFiJFbnRydXN0 + IFByb2R1Y3QgTWFuYWdlbWVudCBEZXZpY2VzMHgwVDELMAkGA1UEBhMCVVMxEDAOBgNVBAoTB0Vud + HJ1c3QxIzAhBgNVBAsTGkVudHJ1c3QgUHJvZHVjdCBNYW5hZ2VtZW50MQ4wDAYDVQQLEwVDYXJkcx + YgRW50cnVzdCBQcm9kdWN0IE1hbmFnZW1lbnQgQ2FyZHMwejBVMQswCQYDVQQGEwJVUzEQMA4GA1U + EChMHRW50cnVzdDEjMCEGA1UECxMaRW50cnVzdCBQcm9kdWN0IE1hbmFnZW1lbnQxDzANBgNVBAsT + BlBlb3BsZRYhRW50cnVzdCBQcm9kdWN0IE1hbmFnZW1lbnQgUGVvcGxlMCQwHDELMAkGA1UEBhMCT + loxDTALBgNVBAoTBExJTloWBExJTlowTDA1MQswCQYDVQQGEwJOWjENMAsGA1UEChMETElOWjEXMB + UGA1UECxMOQWRtaW5pc3RyYXRvcnMWE0xJTlogQWRtaW5pc3RyYXRvcnMwPjAuMQswCQYDVQQGEwJ + OWjENMAsGA1UEChMETElOWjEQMA4GA1UECxMHRGV2aWNlcxYMTElOWiBEZXZpY2VzMDwwLTELMAkG + A1UEBhMCTloxDTALBgNVBAoTBExJTloxDzANBgNVBAsTBlBlb3BsZRYLTElOWiBQZW9wbGUwVDA0M + QswCQYDVQQGEwJVUzElMCMGA1UEChMcTWFnZWxsYW4gSGVhbHRoIFNlcnZpY2VzIEluYxYcTWFnZW + xsYW4gSGVhbHRoIFNlcnZpY2VzIEluYzBnMFExFTATBgoJkiaJk/IsZAEZFgVsb2NhbDEUMBIGCgm + SJomT8ixkARkWBHRlc3QxEzARBgNVBAsTClRlc3QgVXNlcnMxDTALBgNVBAcTBFRlc3QWEkNMUyBU + ZXN0IFVzZXIgVGVzdDBEMDoxCzAJBgNVBAYTAnVzMSswKQYDVQQKEyJGZWRlcmFsIEhvbWUgTG9hb + iBCYW5rIG9mIE5ldyBZb3JrFgZGSExCTlkwWjBKMQswCQYDVQQGEwJ1czErMCkGA1UEChMiRmVkZX + JhbCBIb21lIExvYW4gQmFuayBvZiBOZXcgWW9yazEOMAwGA1UECxMFMUxpbmsWDEZITEJOWSAxTGl + uazBcMEsxCzAJBgNVBAYTAnVzMSswKQYDVQQKEyJGZWRlcmFsIEhvbWUgTG9hbiBCYW5rIG9mIE5l + dyBZb3JrMQ8wDQYDVQQLEwZBZG1pbnMWDUZITEJOWSBBZG1pbnMwSAYJKoZIhvZ9B0QQMTswOTAQA + gEAAgEAAgEIAgEPAwIDeDAQAgEAAgEAAgEIAgEKAwIAeTAQAgEAAgEAAgEIAgEKAwIAeQMBADBxBg + kqhkiG9n0HTUAxZAxiQUVTLUNCQy0xMjgsIEFFUy1DQkMtMjU2LCBBRVMtR0NNLTEyOCwgQUVTLUd + DTS0yNTYsIFRSSVBMRURFUy1DQkMtMTkyLCBDQVNUNS1DQkMtODAsIENBU1Q1LUNCQy0xMjgwdgYJ + KoZIhvZ9B01BMWkMZ0VDRFNBLVJFQ09NTUVOREVELCBSU0FQU1MtUkVDT01NRU5ERUQsIFJTQS1SR + UNPTU1FTkRFRCwgRFNBLVJFQ09NTUVOREVELCBFQ0RTQS1TSEExLCBSU0EtU0hBMSwgRFNBLVNIQT + EwFwYJKoZIhvZ9B00QMQoECFJTQS0yMDQ4MIIWSQYJKoZIhvZ9B00AMYIWOjCCFjYwgYACAQAwADB + 5MQswCQYDVQQGEwJVUzEQMA4GA1UEChMHRW50cnVzdDEiMCAGA1UECxMZQ2VydGlmaWNhdGlvbiBB + dXRob3JpdGllczESMBAGA1UECxMJRENvbVN1YkNBMSAwHgYDVQQDExdTZWN1cml0eSBPZmZpY2VyI + FBvbGljeTB9AgEBMAAwdjELMAkGA1UEBhMCVVMxEDAOBgNVBAoTB0VudHJ1c3QxIjAgBgNVBAsTGU + NlcnRpZmljYXRpb24gQXV0aG9yaXRpZXMxEjAQBgNVBAsTCURDb21TdWJDQTEdMBsGA1UEAxMUQWR + taW5pc3RyYXRvciBQb2xpY3kweAIBAjAAMHExCzAJBgNVBAYTAlVTMRAwDgYDVQQKEwdFbnRydXN0 + MSIwIAYDVQQLExlDZXJ0aWZpY2F0aW9uIEF1dGhvcml0aWVzMRIwEAYDVQQLEwlEQ29tU3ViQ0ExG + DAWBgNVBAMTD0VuZCBVc2VyIFBvbGljeTB9AgEDMAAwdjELMAkGA1UEBhMCVVMxEDAOBgNVBAoTB0 + VudHJ1c3QxIjAgBgNVBAsTGUNlcnRpZmljYXRpb24gQXV0aG9yaXRpZXMxEjAQBgNVBAsTCURDb21 + TdWJDQTEdMBsGA1UEAxMUQWRtaW5pc3RyYXRvciBQb2xpY3kwfQIBBDAAMHYxCzAJBgNVBAYTAlVT + MRAwDgYDVQQKEwdFbnRydXN0MSIwIAYDVQQLExlDZXJ0aWZpY2F0aW9uIEF1dGhvcml0aWVzMRIwE + AYDVQQLEwlEQ29tU3ViQ0ExHTAbBgNVBAMTFEFkbWluaXN0cmF0b3IgUG9saWN5MHMCAQUwADBsMQ + swCQYDVQQGEwJVUzEQMA4GA1UEChMHRW50cnVzdDEiMCAGA1UECxMZQ2VydGlmaWNhdGlvbiBBdXR + ob3JpdGllczESMBAGA1UECxMJRENvbVN1YkNBMRMwEQYDVQQDEwpBU0ggUG9saWN5MH0CAQYwADB2 + MQswCQYDVQQGEwJVUzEQMA4GA1UEChMHRW50cnVzdDEiMCAGA1UECxMZQ2VydGlmaWNhdGlvbiBBd + XRob3JpdGllczESMBAGA1UECxMJRENvbVN1YkNBMR0wGwYDVQQDExRBZG1pbmlzdHJhdG9yIFBvbG + ljeTB9AgEHMAAwdjELMAkGA1UEBhMCVVMxEDAOBgNVBAoTB0VudHJ1c3QxIjAgBgNVBAsTGUNlcnR + pZmljYXRpb24gQXV0aG9yaXRpZXMxEjAQBgNVBAsTCURDb21TdWJDQTEdMBsGA1UEAxMUQWRtaW5p + c3RyYXRvciBQb2xpY3kwfAIBCDAAMHUxCzAJBgNVBAYTAlVTMRAwDgYDVQQKEwdFbnRydXN0MSIwI + AYDVQQLExlDZXJ0aWZpY2F0aW9uIEF1dGhvcml0aWVzMRIwEAYDVQQLEwlEQ29tU3ViQ0ExHDAaBg + NVBAMTE1NlcnZlciBMb2dpbiBQb2xpY3kwfAIBCTAAMHUxCzAJBgNVBAYTAlVTMRAwDgYDVQQKEwd + FbnRydXN0MSIwIAYDVQQLExlDZXJ0aWZpY2F0aW9uIEF1dGhvcml0aWVzMRIwEAYDVQQLEwlEQ29t + U3ViQ0ExHDAaBgNVBAMTE1NlcnZlciBMb2dpbiBQb2xpY3kwfQIBCjAAMHYxCzAJBgNVBAYTAlVTM + RAwDgYDVQQKEwdFbnRydXN0MSIwIAYDVQQLExlDZXJ0aWZpY2F0aW9uIEF1dGhvcml0aWVzMRIwEA + YDVQQLEwlEQ29tU3ViQ0ExHTAbBgNVBAMTFEFkbWluaXN0cmF0b3IgUG9saWN5MIGAAgEMMAAweTE + LMAkGA1UEBhMCVVMxEDAOBgNVBAoTB0VudHJ1c3QxIjAgBgNVBAsTGUNlcnRpZmljYXRpb24gQXV0 + aG9yaXRpZXMxEjAQBgNVBAsTCURDb21TdWJDQTEgMB4GA1UEAxMXQ0xTIFNlcnZlciBMb2dpbiBQb + 2xpY3kwgYACAQ0wADB5MQswCQYDVQQGEwJVUzEQMA4GA1UEChMHRW50cnVzdDEiMCAGA1UECxMZQ2 + VydGlmaWNhdGlvbiBBdXRob3JpdGllczESMBAGA1UECxMJRENvbVN1YkNBMSAwHgYDVQQDExdTZWN + 1cml0eSBPZmZpY2VyIFBvbGljeTCBgAIBDjAAMHkxCzAJBgNVBAYTAlVTMRAwDgYDVQQKEwdFbnRy + dXN0MSIwIAYDVQQLExlDZXJ0aWZpY2F0aW9uIEF1dGhvcml0aWVzMRIwEAYDVQQLEwlEQ29tU3ViQ + 0ExIDAeBgNVBAMTF1NlY3VyaXR5IE9mZmljZXIgUG9saWN5MH0CAQ8wADB2MQswCQYDVQQGEwJVUz + EQMA4GA1UEChMHRW50cnVzdDEiMCAGA1UECxMZQ2VydGlmaWNhdGlvbiBBdXRob3JpdGllczESMBA + GA1UECxMJRENvbVN1YkNBMR0wGwYDVQQDExRBZG1pbmlzdHJhdG9yIFBvbGljeTB9AgERMAAwdjEL + MAkGA1UEBhMCVVMxEDAOBgNVBAoTB0VudHJ1c3QxIjAgBgNVBAsTGUNlcnRpZmljYXRpb24gQXV0a + G9yaXRpZXMxEjAQBgNVBAsTCURDb21TdWJDQTEdMBsGA1UEAxMUQWRtaW5pc3RyYXRvciBQb2xpY3 + kwfAIBCzAAMHUxCzAJBgNVBAYTAlVTMRAwDgYDVQQKEwdFbnRydXN0MSIwIAYDVQQLExlDZXJ0aWZ + pY2F0aW9uIEF1dGhvcml0aWVzMRIwEAYDVQQLEwlEQ29tU3ViQ0ExHDAaBgNVBAMTE0NMUyBFbmQg + VXNlciBQb2xpY3kwfQIBEjAAMHYxCzAJBgNVBAYTAlVTMRAwDgYDVQQKEwdFbnRydXN0MSIwIAYDV + QQLExlDZXJ0aWZpY2F0aW9uIEF1dGhvcml0aWVzMRIwEAYDVQQLEwlEQ29tU3ViQ0ExHTAbBgNVBA + MTFEFkbWluaXN0cmF0b3IgUG9saWN5MH0CARMwADB2MQswCQYDVQQGEwJVUzEQMA4GA1UEChMHRW5 + 0cnVzdDEiMCAGA1UECxMZQ2VydGlmaWNhdGlvbiBBdXRob3JpdGllczESMBAGA1UECxMJRENvbVN1 + YkNBMR0wGwYDVQQDExRBZG1pbmlzdHJhdG9yIFBvbGljeTCBgAIBFDAAMHkxCzAJBgNVBAYTAlVTM + RAwDgYDVQQKEwdFbnRydXN0MSIwIAYDVQQLExlDZXJ0aWZpY2F0aW9uIEF1dGhvcml0aWVzMRIwEA + YDVQQLEwlEQ29tU3ViQ0ExIDAeBgNVBAMTF0R5bkNvcnAgRW5kIFVzZXIgUG9saWN5MH8CASAwADB + 4MQswCQYDVQQGEwJVUzEQMA4GA1UEChMHRW50cnVzdDEiMCAGA1UECxMZQ2VydGlmaWNhdGlvbiBB + dXRob3JpdGllczESMBAGA1UECxMJRENvbVN1YkNBMR8wHQYDVQQDExZDU1JFUyBSZXF1ZXN0b3IgU + G9saWN5MHkCASEwADByMQswCQYDVQQGEwJVUzEQMA4GA1UEChMHRW50cnVzdDEiMCAGA1UECxMZQ2 + VydGlmaWNhdGlvbiBBdXRob3JpdGllczESMBAGA1UECxMJRENvbVN1YkNBMRkwFwYDVQQDExBNRE1 + XUyBYQVAgUG9saWN5MEkCASIwADBCMQswCQYDVQQGEwJVUzEQMA4GA1UEChMHRW50cnVzdDEhMB8G + A1UEAxMYU09BUCBBZG1pbiBFeHBvcnQgUG9saWN5MIGDAgEjMAAwfDELMAkGA1UEBhMCVVMxEDAOB + gNVBAoTB0VudHJ1c3QxIjAgBgNVBAsTGUNlcnRpZmljYXRpb24gQXV0aG9yaXRpZXMxEjAQBgNVBA + sTCURDb21TdWJDQTEjMCEGA1UEAxMaRXhwb3J0YWJsZSBFbmQgVXNlciBQb2xpY3kweAIBJDAAMHE + xCzAJBgNVBAYTAlVTMRAwDgYDVQQKEwdFbnRydXN0MSIwIAYDVQQLExlDZXJ0aWZpY2F0aW9uIEF1 + dGhvcml0aWVzMRIwEAYDVQQLEwlEQ29tU3ViQ0ExGDAWBgNVBAMTD0VuZCBVc2VyIFBvbGljeTB9A + gElMAAwdjELMAkGA1UEBhMCVVMxEDAOBgNVBAoTB0VudHJ1c3QxIjAgBgNVBAsTGUNlcnRpZmljYX + Rpb24gQXV0aG9yaXRpZXMxEjAQBgNVBAsTCURDb21TdWJDQTEdMBsGA1UEAxMUQWRtaW5pc3RyYXR + vciBQb2xpY3kwfQIBJjAAMHYxCzAJBgNVBAYTAlVTMRAwDgYDVQQKEwdFbnRydXN0MSIwIAYDVQQL + ExlDZXJ0aWZpY2F0aW9uIEF1dGhvcml0aWVzMRIwEAYDVQQLEwlEQ29tU3ViQ0ExHTAbBgNVBAMTF + E1vYmlsZSBEZXZpY2UgUG9saWN5MHwCAScwADB1MQswCQYDVQQGEwJVUzEQMA4GA1UEChMHRW50cn + VzdDEiMCAGA1UECxMZQ2VydGlmaWNhdGlvbiBBdXRob3JpdGllczESMBAGA1UECxMJRENvbVN1YkN + BMRwwGgYDVQQDExNTZXJ2ZXIgTG9naW4gUG9saWN5MH0CASgwADB2MQswCQYDVQQGEwJVUzEQMA4G + A1UEChMHRW50cnVzdDEiMCAGA1UECxMZQ2VydGlmaWNhdGlvbiBBdXRob3JpdGllczESMBAGA1UEC + xMJRENvbVN1YkNBMR0wGwYDVQQDExRBZG1pbmlzdHJhdG9yIFBvbGljeTCBgQIBKTAAMHoxCzAJBg + NVBAYTAlVTMRAwDgYDVQQKEwdFbnRydXN0MSIwIAYDVQQLExlDZXJ0aWZpY2F0aW9uIEF1dGhvcml + 0aWVzMRIwEAYDVQQLEwlEQ29tU3ViQ0ExITAfBgNVBAMTGFNQT0MgU2VydmVyIExvZ2luIFBvbGlj + eTCBggIBKjAAMHsxCzAJBgNVBAYTAlVTMRAwDgYDVQQKEwdFbnRydXN0MSIwIAYDVQQLExlDZXJ0a + WZpY2F0aW9uIEF1dGhvcml0aWVzMRIwEAYDVQQLEwlEQ29tU3ViQ0ExIjAgBgNVBAMTGVNQT0MgQW + RtaW5pc3RyYXRvciBQb2xpY3kwfAIBKzAAMHUxCzAJBgNVBAYTAlVTMRAwDgYDVQQKEwdFbnRydXN + 0MSIwIAYDVQQLExlDZXJ0aWZpY2F0aW9uIEF1dGhvcml0aWVzMRIwEAYDVQQLEwlEQ29tU3ViQ0Ex + HDAaBgNVBAMTE1NlcnZlciBMb2dpbiBQb2xpY3kwgZECASwwADCBiTELMAkGA1UEBhMCVVMxEDAOB + gNVBAoTB0VudHJ1c3QxIjAgBgNVBAsTGUNlcnRpZmljYXRpb24gQXV0aG9yaXRpZXMxEjAQBgNVBA + sTCURDb21TdWJDQTEwMC4GA1UEAxMnTWFzdGVyIExpc3QgU2lnbmVyIEFkbWluaXN0cmF0b3IgUG9 + saWN5MH0CAS0wADB2MQswCQYDVQQGEwJVUzEQMA4GA1UEChMHRW50cnVzdDEiMCAGA1UECxMZQ2Vy + dGlmaWNhdGlvbiBBdXRob3JpdGllczESMBAGA1UECxMJRENvbVN1YkNBMR0wGwYDVQQDExRBZG1pb + mlzdHJhdG9yIFBvbGljeTB4AgEuMAAwcTELMAkGA1UEBhMCVVMxEDAOBgNVBAoTB0VudHJ1c3QxIj + AgBgNVBAsTGUNlcnRpZmljYXRpb24gQXV0aG9yaXRpZXMxEjAQBgNVBAsTCURDb21TdWJDQTEYMBY + GA1UEAxMPRW5kIFVzZXIgUG9saWN5MH0CAS8wADB2MQswCQYDVQQGEwJVUzEQMA4GA1UEChMHRW50 + cnVzdDEiMCAGA1UECxMZQ2VydGlmaWNhdGlvbiBBdXRob3JpdGllczESMBAGA1UECxMJRENvbVN1Y + kNBMR0wGwYDVQQDExRBZG1pbmlzdHJhdG9yIFBvbGljeTB4AgExMAAwcTELMAkGA1UEBhMCVVMxED + AOBgNVBAoTB0VudHJ1c3QxIjAgBgNVBAsTGUNlcnRpZmljYXRpb24gQXV0aG9yaXRpZXMxEjAQBgN + VBAsTCURDb21TdWJDQTEYMBYGA1UEAxMPRW5kIFVzZXIgUG9saWN5MH0CATIwADB2MQswCQYDVQQG + EwJVUzEQMA4GA1UEChMHRW50cnVzdDEiMCAGA1UECxMZQ2VydGlmaWNhdGlvbiBBdXRob3JpdGllc + zESMBAGA1UECxMJRENvbVN1YkNBMR0wGwYDVQQDExRBZG1pbmlzdHJhdG9yIFBvbGljeTB8AgEwMA + AwdTELMAkGA1UEBhMCVVMxEDAOBgNVBAoTB0VudHJ1c3QxIjAgBgNVBAsTGUNlcnRpZmljYXRpb24 + gQXV0aG9yaXRpZXMxEjAQBgNVBAsTCURDb21TdWJDQTEcMBoGA1UEAxMTU2VydmVyIExvZ2luIFBv + bGljeTB9AgEzMAAwdjELMAkGA1UEBhMCVVMxEDAOBgNVBAoTB0VudHJ1c3QxIjAgBgNVBAsTGUNlc + nRpZmljYXRpb24gQXV0aG9yaXRpZXMxEjAQBgNVBAsTCURDb21TdWJDQTEdMBsGA1UEAxMUQWRtaW + 5pc3RyYXRvciBQb2xpY3kwfQIBNTAAMHYxCzAJBgNVBAYTAlVTMRAwDgYDVQQKEwdFbnRydXN0MSI + wIAYDVQQLExlDZXJ0aWZpY2F0aW9uIEF1dGhvcml0aWVzMRIwEAYDVQQLEwlEQ29tU3ViQ0ExHTAb + BgNVBAMTFENhcmQgRW5kIFVzZXIgUG9saWN5MHgCATQwADBxMQswCQYDVQQGEwJVUzEQMA4GA1UEC + hMHRW50cnVzdDEiMCAGA1UECxMZQ2VydGlmaWNhdGlvbiBBdXRob3JpdGllczESMBAGA1UECxMJRE + NvbVN1YkNBMRgwFgYDVQQDEw9FbmQgVXNlciBQb2xpY3kwfAIBNjAAMHUxCzAJBgNVBAYTAlVTMRA + wDgYDVQQKEwdFbnRydXN0MSIwIAYDVQQLExlDZXJ0aWZpY2F0aW9uIEF1dGhvcml0aWVzMRIwEAYD + VQQLEwlEQ29tU3ViQ0ExHDAaBgNVBAMTE01ETSBFbmQgVXNlciBQb2xpY3kwfAIBNzAAMHUxCzAJB + gNVBAYTAlVTMRAwDgYDVQQKEwdFbnRydXN0MSIwIAYDVQQLExlDZXJ0aWZpY2F0aW9uIEF1dGhvcm + l0aWVzMRIwEAYDVQQLEwlEQ29tU3ViQ0ExHDAaBgNVBAMTE1NlcnZlciBMb2dpbiBQb2xpY3kwgYU + CATgwADB+MQswCQYDVQQGEwJVUzEQMA4GA1UEChMHRW50cnVzdDEiMCAGA1UECxMZQ2VydGlmaWNh + dGlvbiBBdXRob3JpdGllczESMBAGA1UECxMJRENvbVN1YkNBMSUwIwYDVQQDExxNU08gVU1TIEFkb + WluaXN0cmF0b3IgUG9saWN5MIJZ7wYKKoZIhvZ9B00uADGCWd8wglnbMDEwFwwSY3NjX3BpdjFrX2 + NhcmRhdXRoAgEnMBYwFDASDA1QaXYxS0NhcmRBdXRoAgFDMEwwEwwOY3NjX3Bpdm1peGVkXzMCASg + wNTAQMA4MCVBpdjFLQXV0aAIBRDAPMA0MCFBpdjJLRW5jAgFFMBAwDgwJUGl2MktTaWduAgFGMIG4 + MBAMC2VudF9hZF9jbHMxAgE3MIGjMIGgMA4MCUR1YWxVc2FnZQIBXDCBjTELMAkGA1UEBhMCVVMxE + DAOBgNVBAoTB0VudHJ1c3QxIjAgBgNVBAsTGUNlcnRpZmljYXRpb24gQXV0aG9yaXRpZXMxEjAQBg + NVBAsTCURDb21TdWJDQTE0MDIGA1UEAxMrQ0xTIDF5ciBEb21haW4gQ29udHJvbGxlciBEdWFsIFV + zYWdlIFBvbGljeTCBuDAQDAtlbnRfYWRfY2xzMgIBODCBozCBoDAODAlEdWFsVXNhZ2UCAV0wgY0x + CzAJBgNVBAYTAlVTMRAwDgYDVQQKEwdFbnRydXN0MSIwIAYDVQQLExlDZXJ0aWZpY2F0aW9uIEF1d + Ghvcml0aWVzMRIwEAYDVQQLEwlEQ29tU3ViQ0ExNDAyBgNVBAMTK0NMUyAyeXIgRG9tYWluIENvbn + Ryb2xsZXIgRHVhbCBVc2FnZSBQb2xpY3kwdTARDAxlbnRfYWRfY2xzMm0CAVIwTjAjMBAMCkVuY3J + 5cHRpb24CAgCQog8MCkVuY3J5cHRpb24CAQEwJzASDAxWZXJpZmljYXRpb24CAgCRohEMDFZlcmlm + aWNhdGlvbgIBAqIQDAtlbnRfZGVmYXVsdAIBAzCBvjASDA1lbnRfYWRfY2xzMm1hAgFUMIGnMIGkM + A8MCUR1YWxVc2FnZQICAJQwgZAxCzAJBgNVBAYTAlVTMRAwDgYDVQQKEwdFbnRydXN0MSIwIAYDVQ + QLExlDZXJ0aWZpY2F0aW9uIEF1dGhvcml0aWVzMRIwEAYDVQQLEwlEQ29tU3ViQ0ExNzA1BgNVBAM + TLkNMUyAybW9udGggRG9tYWluIENvbnRyb2xsZXIgRHVhbCBVc2FnZSBQb2xpY3kwgbAwDgwJZW50 + X2FkX2RjAgF4MIGdMIGaMBAMCkR1YWwgVXNhZ2UCAgDSMIGFMQswCQYDVQQGEwJVUzEQMA4GA1UEC + hMHRW50cnVzdDEiMCAGA1UECxMZQ2VydGlmaWNhdGlvbiBBdXRob3JpdGllczESMBAGA1UECxMJRE + NvbVN1YkNBMSwwKgYDVQQDEyNFbnRlcnByaXNlIERvbWFpbiBDb250cm9sbGVyIFBvbGljeTBHMBk + ME2VudF9hZG1zcnZjc191bXNfZWECAgCLMCowEjAQDApFbmNyeXB0aW9uAgIA9DAUMBIMDFZlcmlm + aWNhdGlvbgICAPUwRTAZDBRlbnRfYWRtc3J2Y3NfdXNlcnJlZwIBEjAoMBEwDwwKRW5jcnlwdGlvb + gIBHjATMBEMDFZlcmlmaWNhdGlvbgIBHzCBzzAZDBRlbnRfYWRtc3J2Y3NfdXNybWdtdAIBETCBsT + ARMA8MCkVuY3J5cHRpb24CARwwgZswEQwMVmVyaWZpY2F0aW9uAgEdMIGFMQswCQYDVQQGEwJVUzE + QMA4GA1UEChMHRW50cnVzdDEiMCAGA1UECxMZQ2VydGlmaWNhdGlvbiBBdXRob3JpdGllczESMBAG + A1UECxMJRENvbVN1YkNBMSwwKgYDVQQDEyNUcnVlUGFzcyBTZXJ2ZXIgVmVyaWZpY2F0aW9uIFBvb + GljeTA6MA4MCWVudF9iYXNpYwIBJjAoMBEwDwwKRW5jcnlwdGlvbgIBQTATMBEMDFZlcmlmaWNhdG + lvbgIBQjCCATkwDQwIZW50X2NsczECAS8wggEmMIGOMA8MCkVuY3J5cHRpb24CAVIwezELMAkGA1U + EBhMCVVMxEDAOBgNVBAoTB0VudHJ1c3QxIjAgBgNVBAsTGUNlcnRpZmljYXRpb24gQXV0aG9yaXRp + ZXMxEjAQBgNVBAsTCURDb21TdWJDQTEiMCAGA1UEAxMZQ0xTIDF5ciBFbmNyeXB0aW9uIFBvbGlje + TCBkjARDAxWZXJpZmljYXRpb24CAVMwfTELMAkGA1UEBhMCVVMxEDAOBgNVBAoTB0VudHJ1c3QxIj + AgBgNVBAsTGUNlcnRpZmljYXRpb24gQXV0aG9yaXRpZXMxEjAQBgNVBAsTCURDb21TdWJDQTEkMCI + GA1UEAxMbQ0xTIDF5ciBWZXJpZmljYXRpb24gUG9saWN5MIIBOTANDAhlbnRfY2xzMgIBMDCCASYw + gY4wDwwKRW5jcnlwdGlvbgIBVDB7MQswCQYDVQQGEwJVUzEQMA4GA1UEChMHRW50cnVzdDEiMCAGA + 1UECxMZQ2VydGlmaWNhdGlvbiBBdXRob3JpdGllczESMBAGA1UECxMJRENvbVN1YkNBMSIwIAYDVQ + QDExlDTFMgMnlyIEVuY3J5cHRpb24gUG9saWN5MIGSMBEMDFZlcmlmaWNhdGlvbgIBVTB9MQswCQY + DVQQGEwJVUzEQMA4GA1UEChMHRW50cnVzdDEiMCAGA1UECxMZQ2VydGlmaWNhdGlvbiBBdXRob3Jp + dGllczESMBAGA1UECxMJRENvbVN1YkNBMSQwIgYDVQQDExtDTFMgMnlyIFZlcmlmaWNhdGlvbiBQb + 2xpY3kwQDASDA1lbnRfY2xzX2FkbWluAgFXMCowEjAQDApFbmNyeXB0aW9uAgIAmDAUMBIMDFZlcm + lmaWNhdGlvbgICAJkwggFPMBMMDmVudF9jbHNfYWRtaW4yAgFWMIIBNjCBljAQDApFbmNyeXB0aW9 + uAgIAljCBgTELMAkGA1UEBhMCVVMxEDAOBgNVBAoTB0VudHJ1c3QxIjAgBgNVBAsTGUNlcnRpZmlj + YXRpb24gQXV0aG9yaXRpZXMxEjAQBgNVBAsTCURDb21TdWJDQTEoMCYGA1UEAxMfQ0xTIEFkbWluI + DJ5ciBFbmNyeXB0aW9uIFBvbGljeTCBmjASDAxWZXJpZmljYXRpb24CAgCXMIGDMQswCQYDVQQGEw + JVUzEQMA4GA1UEChMHRW50cnVzdDEiMCAGA1UECxMZQ2VydGlmaWNhdGlvbiBBdXRob3JpdGllczE + SMBAGA1UECxMJRENvbVN1YkNBMSowKAYDVQQDEyFDTFMgQWRtaW4gMnlyIFZlcmlmaWNhdGlvbiBQ + b2xpY3kwgbgwFwwSZW50X2Ntc2NsaWVudF9jbHMxAgExMIGcMIGZMA8MCkR1YWwgVXNhZ2UCAVYwg + YUxCzAJBgNVBAYTAlVTMRAwDgYDVQQKEwdFbnRydXN0MSIwIAYDVQQLExlDZXJ0aWZpY2F0aW9uIE + F1dGhvcml0aWVzMRIwEAYDVQQLEwlEQ29tU3ViQ0ExLDAqBgNVBAMTI0NMUyAxeXIgQUkgQ2xpZW5 + 0IER1YWwgVXNhZ2UgUG9saWN5MIG/MBkMFGVudF9jbXNjbGllbnRfY2xzMV9mAgEzMIGhMIGeMA8M + CkR1YWwgVXNhZ2UCAVgwgYoxCzAJBgNVBAYTAlVTMRAwDgYDVQQKEwdFbnRydXN0MSIwIAYDVQQLE + xlDZXJ0aWZpY2F0aW9uIEF1dGhvcml0aWVzMRIwEAYDVQQLEwlEQ29tU3ViQ0ExMTAvBgNVBAMTKE + NMUyAxeXIgQUkgQ2xpZW50IEZpbGUgRHVhbCBVc2FnZSBQb2xpY3kwgbgwFwwSZW50X2Ntc2NsaWV + udF9jbHMyAgEyMIGcMIGZMA8MCkR1YWwgVXNhZ2UCAVcwgYUxCzAJBgNVBAYTAlVTMRAwDgYDVQQK + EwdFbnRydXN0MSIwIAYDVQQLExlDZXJ0aWZpY2F0aW9uIEF1dGhvcml0aWVzMRIwEAYDVQQLEwlEQ + 29tU3ViQ0ExLDAqBgNVBAMTI0NMUyAyeXIgQUkgQ2xpZW50IER1YWwgVXNhZ2UgUG9saWN5MIG/MB + kMFGVudF9jbXNjbGllbnRfY2xzMl9mAgE0MIGhMIGeMA8MCkR1YWwgVXNhZ2UCAVkwgYoxCzAJBgN + VBAYTAlVTMRAwDgYDVQQKEwdFbnRydXN0MSIwIAYDVQQLExlDZXJ0aWZpY2F0aW9uIEF1dGhvcml0 + aWVzMRIwEAYDVQQLEwlEQ29tU3ViQ0ExMTAvBgNVBAMTKENMUyAyeXIgQUkgQ2xpZW50IEZpbGUgR + HVhbCBVc2FnZSBQb2xpY3kwLjAXDBJlbnRfY21zY2xpZW50X3NrZHUCASowEzARMA8MCkR1YWwgVX + NhZ2UCAUkwMDAZDBRlbnRfY21zY2xpZW50X3NrZHVfZgIBKzATMBEwDwwKRHVhbCBVc2FnZQIBSjC + BuDAXDBJlbnRfY21zc2VydmVyX2NsczECATUwgZwwgZkwDwwKRHVhbCBVc2FnZQIBWjCBhTELMAkG + A1UEBhMCVVMxEDAOBgNVBAoTB0VudHJ1c3QxIjAgBgNVBAsTGUNlcnRpZmljYXRpb24gQXV0aG9ya + XRpZXMxEjAQBgNVBAsTCURDb21TdWJDQTEsMCoGA1UEAxMjQ0xTIDF5ciBBSSBTZXJ2ZXIgRHVhbC + BVc2FnZSBQb2xpY3kwgbgwFwwSZW50X2Ntc3NlcnZlcl9jbHMyAgE2MIGcMIGZMA8MCkR1YWwgVXN + hZ2UCAVswgYUxCzAJBgNVBAYTAlVTMRAwDgYDVQQKEwdFbnRydXN0MSIwIAYDVQQLExlDZXJ0aWZp + Y2F0aW9uIEF1dGhvcml0aWVzMRIwEAYDVQQLEwlEQ29tU3ViQ0ExLDAqBgNVBAMTI0NMUyAyeXIgQ + UkgU2VydmVyIER1YWwgVXNhZ2UgUG9saWN5MC4wFwwSZW50X2Ntc3NlcnZlcl9za2R1AgEsMBMwET + APDApEdWFsIFVzYWdlAgFLMEYwGAwSZW50X2NzcmVzX2FwcHJvdmVyAgIAjDAqMBIwEAwKRW5jcnl + wdGlvbgICAPYwFDASDAxWZXJpZmljYXRpb24CAgD3MEYwGAwTZW50X2NzcmVzX3JlcXVlc3RvcgIB + bzAqMBIwEAwKRW5jcnlwdGlvbgICAMUwFDASDAxWZXJpZmljYXRpb24CAgDGMDwwEAwLZW50X2RlZ + mF1bHQCAQMwKDARMA8MCkVuY3J5cHRpb24CAQEwEzARDAxWZXJpZmljYXRpb24CAQIwggE8MBAMC2 + VudF9kZXNrdG9wAgEHMIIBJjCBjjAPDApFbmNyeXB0aW9uAgEJMHsxCzAJBgNVBAYTAlVTMRAwDgY + DVQQKEwdFbnRydXN0MSIwIAYDVQQLExlDZXJ0aWZpY2F0aW9uIEF1dGhvcml0aWVzMRIwEAYDVQQL + EwlEQ29tU3ViQ0ExIjAgBgNVBAMTGVNhZmVOZXQgRW5jcnlwdGlvbiBQb2xpY3kwgZIwEQwMVmVya + WZpY2F0aW9uAgEKMH0xCzAJBgNVBAYTAlVTMRAwDgYDVQQKEwdFbnRydXN0MSIwIAYDVQQLExlDZX + J0aWZpY2F0aW9uIEF1dGhvcml0aWVzMRIwEAYDVQQLEwlEQ29tU3ViQ0ExJDAiBgNVBAMTG1NhZmV + OZXQgVmVyaWZpY2F0aW9uIFBvbGljeTCBpDAVDBBlbnRfZHVfYmFzaWNfZWt1AgFtMIGKMIGHMBAM + CkR1YWwgVXNhZ2UCAgDCMHMxCzAJBgNVBAYTAlVTMRAwDgYDVQQKEwdFbnRydXN0MSIwIAYDVQQLE + xlDZXJ0aWZpY2F0aW9uIEF1dGhvcml0aWVzMRIwEAYDVQQLEwlEQ29tU3ViQ0ExGjAYBgNVBAMTEU + R1YWwgVXNhZ2UgUG9saWN5MEMwFQwQZW50X2VhY2NhdHRhY2hlZAIBaDAqMBIwEAwKRW5jcnlwdGl + vbgICALgwFDASDAxWZXJpZmljYXRpb24CAgC5MD0wDwwKZW50X2VhY2NvbgIBajAqMBIwEAwKRW5j + cnlwdGlvbgICALwwFDASDAxWZXJpZmljYXRpb24CAgC9MEUwFwwSZW50X2VhY2NzdGFuZGFsb25lA + gFpMCowEjAQDApFbmNyeXB0aW9uAgIAujAUMBIMDFZlcmlmaWNhdGlvbgICALswggGiMAwMB2VudF + 9lZnMCARUwggGQMHgwCAwDRUZTAgEnMGwxCzAJBgNVBAYTAlVTMRAwDgYDVQQKEwdFbnRydXN0MSI + wIAYDVQQLExlDZXJ0aWZpY2F0aW9uIEF1dGhvcml0aWVzMRIwEAYDVQQLEwlEQ29tU3ViQ0ExEzAR + BgNVBAMTCkVGUyBQb2xpY3kwgYYwDwwKRW5jcnlwdGlvbgIBJTBzMQswCQYDVQQGEwJVUzEQMA4GA + 1UEChMHRW50cnVzdDEiMCAGA1UECxMZQ2VydGlmaWNhdGlvbiBBdXRob3JpdGllczESMBAGA1UECx + MJRENvbVN1YkNBMRowGAYDVQQDExFFbmNyeXB0aW9uIFBvbGljeTCBijARDAxWZXJpZmljYXRpb24 + CASYwdTELMAkGA1UEBhMCVVMxEDAOBgNVBAoTB0VudHJ1c3QxIjAgBgNVBAsTGUNlcnRpZmljYXRp + b24gQXV0aG9yaXRpZXMxEjAQBgNVBAsTCURDb21TdWJDQTEcMBoGA1UEAxMTVmVyaWZpY2F0aW9uI + FBvbGljeTBEMBgME2VudF9lc3Zwbl9jb21tZWRvaWQCASUwKDARMA8MCkVuY3J5cHRpb24CAT8wEz + ARDAxWZXJpZmljYXRpb24CAUAwggE7MA8MCmVudF9ldG9rZW4CAWwwggEmMIGOMBAMCkVuY3J5cHR + pb24CAgDAMHoxCzAJBgNVBAYTAlVTMRAwDgYDVQQKEwdFbnRydXN0MSIwIAYDVQQLExlDZXJ0aWZp + Y2F0aW9uIEF1dGhvcml0aWVzMRIwEAYDVQQLEwlEQ29tU3ViQ0ExITAfBgNVBAMTGGVUb2tlbiBFb + mNyeXB0aW9uIFBvbGljeTCBkjASDAxWZXJpZmljYXRpb24CAgDBMHwxCzAJBgNVBAYTAlVTMRAwDg + YDVQQKEwdFbnRydXN0MSIwIAYDVQQLExlDZXJ0aWZpY2F0aW9uIEF1dGhvcml0aWVzMRIwEAYDVQQ + LEwlEQ29tU3ViQ0ExIzAhBgNVBAMTGmVUb2tlbiBWZXJpZmljYXRpb24gUG9saWN5MIIBOTAPDApl + bnRfZXhwb3J0AgEGMIIBJDCBjTAPDApFbmNyeXB0aW9uAgEHMHoxCzAJBgNVBAYTAlVTMRAwDgYDV + QQKEwdFbnRydXN0MSIwIAYDVQQLExlDZXJ0aWZpY2F0aW9uIEF1dGhvcml0aWVzMRIwEAYDVQQLEw + lEQ29tU3ViQ0ExITAfBgNVBAMUGEVuY3J5cHRpb24gUG9saWN5X0V4cG9ydDCBkTARDAxWZXJpZml + jYXRpb24CAQgwfDELMAkGA1UEBhMCVVMxEDAOBgNVBAoTB0VudHJ1c3QxIjAgBgNVBAsTGUNlcnRp + ZmljYXRpb24gQXV0aG9yaXRpZXMxEjAQBgNVBAsTCURDb21TdWJDQTEjMCEGA1UEAxQaVmVyaWZpY + 2F0aW9uIFBvbGljeV9FeHBvcnQwggE9MBQMD2VudF9nZW1hbHRvX2NzcAIBXjCCASMwgYowEAwKRW + 5jcnlwdGlvbgICAKswdjELMAkGA1UEBhMCVVMxEDAOBgNVBAoTB0VudHJ1c3QxIjAgBgNVBAsTGUN + lcnRpZmljYXRpb24gQXV0aG9yaXRpZXMxEjAQBgNVBAsTCURDb21TdWJDQTEdMBsGA1UEAxMUR00g + RW5jcnlwdGlvbiBQb2xpY3kwgZMwEgwMVmVyaWZpY2F0aW9uAgIArDB9MQswCQYDVQQGEwJVUzEQM + A4GA1UEChMHRW50cnVzdDEiMCAGA1UECxMZQ2VydGlmaWNhdGlvbiBBdXRob3JpdGllczESMBAGA1 + UECxMJRENvbVN1YkNBMSQwIgYDVQQDExtHZW1hbHRvIFZlcmlmaWNhdGlvbiBQb2xpY3kwgb8wFgw + RZW50X2lpc19za2R1X2NsczECATkwgaQwgaEwDwwKRHVhbCBVc2FnZQIBXjCBjTELMAkGA1UEBhMC + VVMxEDAOBgNVBAoTB0VudHJ1c3QxIjAgBgNVBAsTGUNlcnRpZmljYXRpb24gQXV0aG9yaXRpZXMxE + jAQBgNVBAsTCURDb21TdWJDQTE0MDIGA1UEAxMrQ0xTIDF5ciBJSVMgRHVhbCBVc2FnZSBObyBLZX + kgQmFja3VwIFBvbGljeTCBvzAWDBFlbnRfaWlzX3NrZHVfY2xzMgIBOjCBpDCBoTAPDApEdWFsIFV + zYWdlAgFfMIGNMQswCQYDVQQGEwJVUzEQMA4GA1UEChMHRW50cnVzdDEiMCAGA1UECxMZQ2VydGlm + aWNhdGlvbiBBdXRob3JpdGllczESMBAGA1UECxMJRENvbVN1YkNBMTQwMgYDVQQDEytDTFMgMnlyI + ElJUyBEdWFsIFVzYWdlIE5vIEtleSBCYWNrdXAgUG9saWN5MHswFwwSZW50X2lpc19za2R1X2Nscz + JtAgFTME4wIzAQDApFbmNyeXB0aW9uAgIAkqIPDApFbmNyeXB0aW9uAgEBMCcwEgwMVmVyaWZpY2F + 0aW9uAgIAk6IRDAxWZXJpZmljYXRpb24CAQKiEAwLZW50X2RlZmF1bHQCAQMwgcUwGAwTZW50X2lp + c19za2R1X2NsczJtYQIBVTCBqDCBpTAQDApEdWFsIFVzYWdlAgIAlTCBkDELMAkGA1UEBhMCVVMxE + DAOBgNVBAoTB0VudHJ1c3QxIjAgBgNVBAsTGUNlcnRpZmljYXRpb24gQXV0aG9yaXRpZXMxEjAQBg + NVBAsTCURDb21TdWJDQTE3MDUGA1UEAxMuQ0xTIDJtb250aCBJSVMgRHVhbCBVc2FnZSBObyBLZXk + gQmFja3VwIFBvbGljeTCBpzAQDAtlbnRfbWFjaGluZQIBeTCBkjCBjzAQDApEdWFsIFVzYWdlAgIA + 0zB7MQswCQYDVQQGEwJVUzEQMA4GA1UEChMHRW50cnVzdDEiMCAGA1UECxMZQ2VydGlmaWNhdGlvb + iBBdXRob3JpdGllczESMBAGA1UECxMJRENvbVN1YkNBMSIwIAYDVQQDExlFbnRlcnByaXNlIE1hY2 + hpbmUgUG9saWN5MEAwEgwNZW50X21kbXdzX2NsaQIBcDAqMBIwEAwKRW5jcnlwdGlvbgICAMcwFDA + SDAxWZXJpZmljYXRpb24CAgDIMEIwFAwPZW50X21saXN0X2FkbWluAgF/MCowEjAQDApFbmNyeXB0 + aW9uAgIA3jAUMBIMDFZlcmlmaWNhdGlvbgICAN8wggE5MBUMEGVudF9tbGlzdF9zaWduZXICAX4wg + gEeMIGHMBAMCkVuY3J5cHRpb24CAgDcMHMxCzAJBgNVBAYTAlVTMRAwDgYDVQQKEwdFbnRydXN0MS + IwIAYDVQQLExlDZXJ0aWZpY2F0aW9uIEF1dGhvcml0aWVzMRIwEAYDVQQLEwlEQ29tU3ViQ0ExGjA + YBgNVBAMTEUVuY3J5cHRpb24gUG9saWN5MIGRMBIMDFZlcmlmaWNhdGlvbgICAN0wezELMAkGA1UE + BhMCVVMxEDAOBgNVBAoTB0VudHJ1c3QxIjAgBgNVBAsTGUNlcnRpZmljYXRpb24gQXV0aG9yaXRpZ + XMxEjAQBgNVBAsTCURDb21TdWJDQTEiMCAGA1UEAxMZTWFzdGVyIExpc3QgU2lnbmVyIFBvbGljeT + CCAeQwGAwTZW50X21zX3NjX2NhcGlfY2xzMQIBLTCCAcYwgZ0wDwwKRHVhbCBVc2FnZQIBTDCBiTE + LMAkGA1UEBhMCVVMxEDAOBgNVBAoTB0VudHJ1c3QxIjAgBgNVBAsTGUNlcnRpZmljYXRpb24gQXV0 + aG9yaXRpZXMxEjAQBgNVBAsTCURDb21TdWJDQTEwMC4GA1UEAxMnQ0xTIDF5ciBEdWFsIFVzYWdlI + E5vIEtleSBCYWNrdXAgUG9saWN5MIGOMA8MCkVuY3J5cHRpb24CAU4wezELMAkGA1UEBhMCVVMxED + AOBgNVBAoTB0VudHJ1c3QxIjAgBgNVBAsTGUNlcnRpZmljYXRpb24gQXV0aG9yaXRpZXMxEjAQBgN + VBAsTCURDb21TdWJDQTEiMCAGA1UEAxMZQ0xTIDF5ciBFbmNyeXB0aW9uIFBvbGljeTCBkjARDAxW + ZXJpZmljYXRpb24CAU0wfTELMAkGA1UEBhMCVVMxEDAOBgNVBAoTB0VudHJ1c3QxIjAgBgNVBAsTG + UNlcnRpZmljYXRpb24gQXV0aG9yaXRpZXMxEjAQBgNVBAsTCURDb21TdWJDQTEkMCIGA1UEAxMbQ0 + xTIDF5ciBWZXJpZmljYXRpb24gUG9saWN5MIIB5DAYDBNlbnRfbXNfc2NfY2FwaV9jbHMyAgEuMII + BxjCBnTAPDApEdWFsIFVzYWdlAgFPMIGJMQswCQYDVQQGEwJVUzEQMA4GA1UEChMHRW50cnVzdDEi + MCAGA1UECxMZQ2VydGlmaWNhdGlvbiBBdXRob3JpdGllczESMBAGA1UECxMJRENvbVN1YkNBMTAwL + gYDVQQDEydDTFMgMnlyIER1YWwgVXNhZ2UgTm8gS2V5IEJhY2t1cCBQb2xpY3kwgY4wDwwKRW5jcn + lwdGlvbgIBUTB7MQswCQYDVQQGEwJVUzEQMA4GA1UEChMHRW50cnVzdDEiMCAGA1UECxMZQ2VydGl + maWNhdGlvbiBBdXRob3JpdGllczESMBAGA1UECxMJRENvbVN1YkNBMSIwIAYDVQQDExlDTFMgMnly + IEVuY3J5cHRpb24gUG9saWN5MIGSMBEMDFZlcmlmaWNhdGlvbgIBUDB9MQswCQYDVQQGEwJVUzEQM + A4GA1UEChMHRW50cnVzdDEiMCAGA1UECxMZQ2VydGlmaWNhdGlvbiBBdXRob3JpdGllczESMBAGA1 + UECxMJRENvbVN1YkNBMSQwIgYDVQQDExtDTFMgMnlyIFZlcmlmaWNhdGlvbiBQb2xpY3kwggHyMBk + MFGVudF9tc19zY19jYXBpX2NsczJtAgFRMIIB0zCBoTAQDApEdWFsIFVzYWdlAgIAjTCBjDELMAkG + A1UEBhMCVVMxEDAOBgNVBAoTB0VudHJ1c3QxIjAgBgNVBAsTGUNlcnRpZmljYXRpb24gQXV0aG9ya + XRpZXMxEjAQBgNVBAsTCURDb21TdWJDQTEzMDEGA1UEAxMqQ0xTIDJtb250aCBEdWFsIFVzYWdlIE + 5vIEtleSBCYWNrdXAgUG9saWN5MIGSMBAMCkVuY3J5cHRpb24CAgCPMH4xCzAJBgNVBAYTAlVTMRA + wDgYDVQQKEwdFbnRydXN0MSIwIAYDVQQLExlDZXJ0aWZpY2F0aW9uIEF1dGhvcml0aWVzMRIwEAYD + VQQLEwlEQ29tU3ViQ0ExJTAjBgNVBAMTHENMUyAybW9udGggRW5jcnlwdGlvbiBQb2xpY3kwgZcwE + gwMVmVyaWZpY2F0aW9uAgIAjjCBgDELMAkGA1UEBhMCVVMxEDAOBgNVBAoTB0VudHJ1c3QxIjAgBg + NVBAsTGUNlcnRpZmljYXRpb24gQXV0aG9yaXRpZXMxEjAQBgNVBAsTCURDb21TdWJDQTEnMCUGA1U + EAxMeQ0xTIDJtb250aCBWZXJpZmljYXRpb24gUG9saWN5MIIB5zAYDBNlbnRfbXNfc2NfY2FwaV9j + bHM0AgFPMIIByTCBnjAQDApEdWFsIFVzYWdlAgIAhzCBiTELMAkGA1UEBhMCVVMxEDAOBgNVBAoTB + 0VudHJ1c3QxIjAgBgNVBAsTGUNlcnRpZmljYXRpb24gQXV0aG9yaXRpZXMxEjAQBgNVBAsTCURDb2 + 1TdWJDQTEwMC4GA1UEAxMnQ0xTIDR5ciBEdWFsIFVzYWdlIE5vIEtleSBCYWNrdXAgUG9saWN5MIG + PMBAMCkVuY3J5cHRpb24CAgCJMHsxCzAJBgNVBAYTAlVTMRAwDgYDVQQKEwdFbnRydXN0MSIwIAYD + VQQLExlDZXJ0aWZpY2F0aW9uIEF1dGhvcml0aWVzMRIwEAYDVQQLEwlEQ29tU3ViQ0ExIjAgBgNVB + AMTGUNMUyA0eXIgRW5jcnlwdGlvbiBQb2xpY3kwgZMwEgwMVmVyaWZpY2F0aW9uAgIAiDB9MQswCQ + YDVQQGEwJVUzEQMA4GA1UEChMHRW50cnVzdDEiMCAGA1UECxMZQ2VydGlmaWNhdGlvbiBBdXRob3J + pdGllczESMBAGA1UECxMJRENvbVN1YkNBMSQwIgYDVQQDExtDTFMgNHlyIFZlcmlmaWNhdGlvbiBQ + b2xpY3kwggHnMBgME2VudF9tc19zY19jYXBpX2NsczUCAVAwggHJMIGeMBAMCkR1YWwgVXNhZ2UCA + gCKMIGJMQswCQYDVQQGEwJVUzEQMA4GA1UEChMHRW50cnVzdDEiMCAGA1UECxMZQ2VydGlmaWNhdG + lvbiBBdXRob3JpdGllczESMBAGA1UECxMJRENvbVN1YkNBMTAwLgYDVQQDEydDTFMgNXlyIER1YWw + gVXNhZ2UgTm8gS2V5IEJhY2t1cCBQb2xpY3kwgY8wEAwKRW5jcnlwdGlvbgICAIwwezELMAkGA1UE + BhMCVVMxEDAOBgNVBAoTB0VudHJ1c3QxIjAgBgNVBAsTGUNlcnRpZmljYXRpb24gQXV0aG9yaXRpZ + XMxEjAQBgNVBAsTCURDb21TdWJDQTEiMCAGA1UEAxMZQ0xTIDV5ciBFbmNyeXB0aW9uIFBvbGljeT + CBkzASDAxWZXJpZmljYXRpb24CAgCLMH0xCzAJBgNVBAYTAlVTMRAwDgYDVQQKEwdFbnRydXN0MSI + wIAYDVQQLExlDZXJ0aWZpY2F0aW9uIEF1dGhvcml0aWVzMRIwEAYDVQQLEwlEQ29tU3ViQ0ExJDAi + BgNVBAMTG0NMUyA1eXIgVmVyaWZpY2F0aW9uIFBvbGljeTCCAfgwGAwTZW50X21zX3NjX2NsczRfM + TAyNAIBXDCCAdowgaMwEAwKRHVhbCBVc2FnZQICAKUwgY4xCzAJBgNVBAYTAlVTMRAwDgYDVQQKEw + dFbnRydXN0MSIwIAYDVQQLExlDZXJ0aWZpY2F0aW9uIEF1dGhvcml0aWVzMRIwEAYDVQQLEwlEQ29 + tU3ViQ0ExNTAzBgNVBAMTLENMUyAxMDI0IDR5ciBEdWFsIFVzYWdlIE5vIEtleSBCYWNrdXAgUG9s + aWN5MIGVMBAMCkVuY3J5cHRpb24CAgCnMIGAMQswCQYDVQQGEwJVUzEQMA4GA1UEChMHRW50cnVzd + DEiMCAGA1UECxMZQ2VydGlmaWNhdGlvbiBBdXRob3JpdGllczESMBAGA1UECxMJRENvbVN1YkNBMS + cwJQYDVQQDEx5DTFMgMTAyNCA0eXIgRW5jcnlwdGlvbiBQb2xpY3kwgZkwEgwMVmVyaWZpY2F0aW9 + uAgIApjCBgjELMAkGA1UEBhMCVVMxEDAOBgNVBAoTB0VudHJ1c3QxIjAgBgNVBAsTGUNlcnRpZmlj + YXRpb24gQXV0aG9yaXRpZXMxEjAQBgNVBAsTCURDb21TdWJDQTEpMCcGA1UEAxMgQ0xTIDEwMjQgN + HlyIFZlcmlmaWNhdGlvbiBQb2xpY3kwggH4MBgME2VudF9tc19zY19jbHM0XzIwNDgCAVowggHaMI + GjMBAMCkR1YWwgVXNhZ2UCAgCfMIGOMQswCQYDVQQGEwJVUzEQMA4GA1UEChMHRW50cnVzdDEiMCA + GA1UECxMZQ2VydGlmaWNhdGlvbiBBdXRob3JpdGllczESMBAGA1UECxMJRENvbVN1YkNBMTUwMwYD + VQQDEyxDTFMgMjA0OCA0eXIgRHVhbCBVc2FnZSBObyBLZXkgQmFja3VwIFBvbGljeTCBlTAQDApFb + mNyeXB0aW9uAgIAoTCBgDELMAkGA1UEBhMCVVMxEDAOBgNVBAoTB0VudHJ1c3QxIjAgBgNVBAsTGU + NlcnRpZmljYXRpb24gQXV0aG9yaXRpZXMxEjAQBgNVBAsTCURDb21TdWJDQTEnMCUGA1UEAxMeQ0x + TIDIwNDggNHlyIEVuY3J5cHRpb24gUG9saWN5MIGZMBIMDFZlcmlmaWNhdGlvbgICAKAwgYIxCzAJ + BgNVBAYTAlVTMRAwDgYDVQQKEwdFbnRydXN0MSIwIAYDVQQLExlDZXJ0aWZpY2F0aW9uIEF1dGhvc + ml0aWVzMRIwEAYDVQQLEwlEQ29tU3ViQ0ExKTAnBgNVBAMTIENMUyAyMDQ4IDR5ciBWZXJpZmljYX + Rpb24gUG9saWN5MIIB+DAYDBNlbnRfbXNfc2NfY2xzNV8xMDI0AgFdMIIB2jCBozAQDApEdWFsIFV + zYWdlAgIAqDCBjjELMAkGA1UEBhMCVVMxEDAOBgNVBAoTB0VudHJ1c3QxIjAgBgNVBAsTGUNlcnRp + ZmljYXRpb24gQXV0aG9yaXRpZXMxEjAQBgNVBAsTCURDb21TdWJDQTE1MDMGA1UEAxMsQ0xTIDEwM + jQgNXlyIER1YWwgVXNhZ2UgTm8gS2V5IEJhY2t1cCBQb2xpY3kwgZUwEAwKRW5jcnlwdGlvbgICAK + owgYAxCzAJBgNVBAYTAlVTMRAwDgYDVQQKEwdFbnRydXN0MSIwIAYDVQQLExlDZXJ0aWZpY2F0aW9 + uIEF1dGhvcml0aWVzMRIwEAYDVQQLEwlEQ29tU3ViQ0ExJzAlBgNVBAMTHkNMUyAxMDI0IDV5ciBF + bmNyeXB0aW9uIFBvbGljeTCBmTASDAxWZXJpZmljYXRpb24CAgCpMIGCMQswCQYDVQQGEwJVUzEQM + A4GA1UEChMHRW50cnVzdDEiMCAGA1UECxMZQ2VydGlmaWNhdGlvbiBBdXRob3JpdGllczESMBAGA1 + UECxMJRENvbVN1YkNBMSkwJwYDVQQDEyBDTFMgMTAyNCA1eXIgVmVyaWZpY2F0aW9uIFBvbGljeTC + CAfgwGAwTZW50X21zX3NjX2NsczVfMjA0OAIBWzCCAdowgaMwEAwKRHVhbCBVc2FnZQICAKIwgY4x + CzAJBgNVBAYTAlVTMRAwDgYDVQQKEwdFbnRydXN0MSIwIAYDVQQLExlDZXJ0aWZpY2F0aW9uIEF1d + Ghvcml0aWVzMRIwEAYDVQQLEwlEQ29tU3ViQ0ExNTAzBgNVBAMTLENMUyAyMDQ4IDV5ciBEdWFsIF + VzYWdlIE5vIEtleSBCYWNrdXAgUG9saWN5MIGVMBAMCkVuY3J5cHRpb24CAgCkMIGAMQswCQYDVQQ + GEwJVUzEQMA4GA1UEChMHRW50cnVzdDEiMCAGA1UECxMZQ2VydGlmaWNhdGlvbiBBdXRob3JpdGll + czESMBAGA1UECxMJRENvbVN1YkNBMScwJQYDVQQDEx5DTFMgMjA0OCA1eXIgRW5jcnlwdGlvbiBQb + 2xpY3kwgZkwEgwMVmVyaWZpY2F0aW9uAgIAozCBgjELMAkGA1UEBhMCVVMxEDAOBgNVBAoTB0VudH + J1c3QxIjAgBgNVBAsTGUNlcnRpZmljYXRpb24gQXV0aG9yaXRpZXMxEjAQBgNVBAsTCURDb21TdWJ + DQTEpMCcGA1UEAxMgQ0xTIDIwNDggNXlyIFZlcmlmaWNhdGlvbiBQb2xpY3kwggHpMBcMEmVudF9t + c19zY19jbHNfMjA0OAIBWDCCAcwwgZ8wEAwKRHVhbCBVc2FnZQICAJowgYoxCzAJBgNVBAYTAlVTM + RAwDgYDVQQKEwdFbnRydXN0MSIwIAYDVQQLExlDZXJ0aWZpY2F0aW9uIEF1dGhvcml0aWVzMRIwEA + YDVQQLEwlEQ29tU3ViQ0ExMTAvBgNVBAMTKENMUyAyMDQ4IER1YWwgVXNhZ2UgTm8gS2V5IEJhY2t + 1cCBQb2xpY3kwgZAwEAwKRW5jcnlwdGlvbgICAJwwfDELMAkGA1UEBhMCVVMxEDAOBgNVBAoTB0Vu + dHJ1c3QxIjAgBgNVBAsTGUNlcnRpZmljYXRpb24gQXV0aG9yaXRpZXMxEjAQBgNVBAsTCURDb21Td + WJDQTEjMCEGA1UEAxMaQ0xTIDIwNDggRW5jcnlwdGlvbiBQb2xpY3kwgZQwEgwMVmVyaWZpY2F0aW + 9uAgIAmzB+MQswCQYDVQQGEwJVUzEQMA4GA1UEChMHRW50cnVzdDEiMCAGA1UECxMZQ2VydGlmaWN + hdGlvbiBBdXRob3JpdGllczESMBAGA1UECxMJRENvbVN1YkNBMSUwIwYDVQQDExxDTFMgMjA0OCBW + ZXJpZmljYXRpb24gUG9saWN5MIG1MBgME2VudF9tc19zbXJ0Y3JkX2NhcGkCAQ8wgZgwgZUwDwwKR + HVhbCBVc2FnZQIBGTCBgTELMAkGA1UEBhMCVVMxEDAOBgNVBAoTB0VudHJ1c3QxIjAgBgNVBAsTGU + NlcnRpZmljYXRpb24gQXV0aG9yaXRpZXMxEjAQBgNVBAsTCURDb21TdWJDQTEoMCYGA1UEAxMfRHV + hbCBVc2FnZSBObyBLZXkgQmFja3VwIFBvbGljeTCCAakwEAwKZW50X21zY2FwaQICAIEwggGTMHkw + CQwDRUZTAgIA4zBsMQswCQYDVQQGEwJVUzEQMA4GA1UEChMHRW50cnVzdDEiMCAGA1UECxMZQ2Vyd + GlmaWNhdGlvbiBBdXRob3JpdGllczESMBAGA1UECxMJRENvbVN1YkNBMRMwEQYDVQQDEwpFRlMgUG + 9saWN5MIGHMBAMCkVuY3J5cHRpb24CAgDhMHMxCzAJBgNVBAYTAlVTMRAwDgYDVQQKEwdFbnRydXN + 0MSIwIAYDVQQLExlDZXJ0aWZpY2F0aW9uIEF1dGhvcml0aWVzMRIwEAYDVQQLEwlEQ29tU3ViQ0Ex + GjAYBgNVBAMTEUVuY3J5cHRpb24gUG9saWN5MIGLMBIMDFZlcmlmaWNhdGlvbgICAOIwdTELMAkGA + 1UEBhMCVVMxEDAOBgNVBAoTB0VudHJ1c3QxIjAgBgNVBAsTGUNlcnRpZmljYXRpb24gQXV0aG9yaX + RpZXMxEjAQBgNVBAsTCURDb21TdWJDQTEcMBoGA1UEAxMTVmVyaWZpY2F0aW9uIFBvbGljeTBDMBc + MEmVudF9tc2Z0X3NtYXJ0Y2FyZAIBDjAoMBEwDwwKRW5jcnlwdGlvbgIBFzATMBEMDFZlcmlmaWNh + dGlvbgIBGDA/MBMMDmVudF9tc2dzY2FubmVyAgENMCgwETAPDApFbmNyeXB0aW9uAgEVMBMwEQwMV + mVyaWZpY2F0aW9uAgEWMD4wEgwNZW50X21zZ3NlcnZlcgIBDDAoMBEwDwwKRW5jcnlwdGlvbgIBEz + ATMBEMDFZlcmlmaWNhdGlvbgIBFDBAMBIMDGVudF9tc29hZG1pbgICAIkwKjASMBAMCkVuY3J5cHR + pb24CAgDxMBQwEgwMVmVyaWZpY2F0aW9uAgIA8DCCATkwFQwQZW50X21zdHdva2V5cGFpcgIBWTCC + AR4wgYowEAwKRW5jcnlwdGlvbgICAJ0wdjELMAkGA1UEBhMCVVMxEDAOBgNVBAoTB0VudHJ1c3QxI + jAgBgNVBAsTGUNlcnRpZmljYXRpb24gQXV0aG9yaXRpZXMxEjAQBgNVBAsTCURDb21TdWJDQTEdMB + sGA1UEAxMUR00gRW5jcnlwdGlvbiBQb2xpY3kwgY4wEgwMVmVyaWZpY2F0aW9uAgIAnjB4MQswCQY + DVQQGEwJVUzEQMA4GA1UEChMHRW50cnVzdDEiMCAGA1UECxMZQ2VydGlmaWNhdGlvbiBBdXRob3Jp + dGllczESMBAGA1UECxMJRENvbVN1YkNBMR8wHQYDVQQDExZHTSBWZXJpZmljYXRpb24gUG9saWN5M + IIBvjARDAxlbnRfbm9ucmVwdWQCARQwggGnMIGGMA8MCkVuY3J5cHRpb24CASIwczELMAkGA1UEBh + MCVVMxEDAOBgNVBAoTB0VudHJ1c3QxIjAgBgNVBAsTGUNlcnRpZmljYXRpb24gQXV0aG9yaXRpZXM + xEjAQBgNVBAsTCURDb21TdWJDQTEaMBgGA1UEAxMRRW5jcnlwdGlvbiBQb2xpY3kwgY4wEwwOTm9u + cmVwdWRpYXRpb24CASQwdzELMAkGA1UEBhMCVVMxEDAOBgNVBAoTB0VudHJ1c3QxIjAgBgNVBAsTG + UNlcnRpZmljYXRpb24gQXV0aG9yaXRpZXMxEjAQBgNVBAsTCURDb21TdWJDQTEeMBwGA1UEAxMVTm + 9ucmVwdWRpYXRpb24gUG9saWN5MIGKMBEMDFZlcmlmaWNhdGlvbgIBIzB1MQswCQYDVQQGEwJVUzE + QMA4GA1UEChMHRW50cnVzdDEiMCAGA1UECxMZQ2VydGlmaWNhdGlvbiBBdXRob3JpdGllczESMBAG + A1UECxMJRENvbVN1YkNBMRwwGgYDVQQDExNWZXJpZmljYXRpb24gUG9saWN5MIICQDAZDBRlbnRfb + m9ucmVwdWRfYW5kX2VmcwIBFzCCAiEweDAIDANFRlMCAS0wbDELMAkGA1UEBhMCVVMxEDAOBgNVBA + oTB0VudHJ1c3QxIjAgBgNVBAsTGUNlcnRpZmljYXRpb24gQXV0aG9yaXRpZXMxEjAQBgNVBAsTCUR + Db21TdWJDQTETMBEGA1UEAxMKRUZTIFBvbGljeTCBhjAPDApFbmNyeXB0aW9uAgEqMHMxCzAJBgNV + BAYTAlVTMRAwDgYDVQQKEwdFbnRydXN0MSIwIAYDVQQLExlDZXJ0aWZpY2F0aW9uIEF1dGhvcml0a + WVzMRIwEAYDVQQLEwlEQ29tU3ViQ0ExGjAYBgNVBAMTEUVuY3J5cHRpb24gUG9saWN5MIGOMBMMDk + 5vbnJlcHVkaWF0aW9uAgEsMHcxCzAJBgNVBAYTAlVTMRAwDgYDVQQKEwdFbnRydXN0MSIwIAYDVQQ + LExlDZXJ0aWZpY2F0aW9uIEF1dGhvcml0aWVzMRIwEAYDVQQLEwlEQ29tU3ViQ0ExHjAcBgNVBAMT + FU5vbnJlcHVkaWF0aW9uIFBvbGljeTCBijARDAxWZXJpZmljYXRpb24CASswdTELMAkGA1UEBhMCV + VMxEDAOBgNVBAoTB0VudHJ1c3QxIjAgBgNVBAsTGUNlcnRpZmljYXRpb24gQXV0aG9yaXRpZXMxEj + AQBgNVBAsTCURDb21TdWJDQTEcMBoGA1UEAxMTVmVyaWZpY2F0aW9uIFBvbGljeTA5MA0MCGVudF9 + vY3NwAgEpMCgwETAPDApFbmNyeXB0aW9uAgFHMBMwEQwMVmVyaWZpY2F0aW9uAgFIMD0wEQwMZW50 + X3Byb2ZzcnZyAgEFMCgwETAPDApFbmNyeXB0aW9uAgEFMBMwEQwMVmVyaWZpY2F0aW9uAgEGMDgwD + AwHZW50X3JkcAIBQDAoMBEwDwwKRW5jcnlwdGlvbgIBaTATMBEMDFZlcmlmaWNhdGlvbgIBajCBqj + ASDA1lbnRfc2lnbl9uaXN0AgFyMIGTMIGQMBIMDFZlcmlmaWNhdGlvbgICAMowejELMAkGA1UEBhM + CVVMxEDAOBgNVBAoTB0VudHJ1c3QxIjAgBgNVBAsTGUNlcnRpZmljYXRpb24gQXV0aG9yaXRpZXMx + EjAQBgNVBAsTCURDb21TdWJDQTEhMB8GA1UEAxMYTklTVCBWZXJpZmljYXRpb24gUG9saWN5MIGkM + BYMEWVudF9za3BfZHVhbHVzYWdlAgEYMIGJMIGGMA8MCkR1YWwgVXNhZ2UCAS4wczELMAkGA1UEBh + MCVVMxEDAOBgNVBAoTB0VudHJ1c3QxIjAgBgNVBAsTGUNlcnRpZmljYXRpb24gQXV0aG9yaXRpZXM + xEjAQBgNVBAsTCURDb21TdWJDQTEaMBgGA1UEAxMRRHVhbCBVc2FnZSBQb2xpY3kwLTATDA1lbnRf + c2twbm9ucmVwAgIAgDAWMBQwEgwMVmVyaWZpY2F0aW9uAgIA4DAwMBgMEmVudF9za3Bub25yZXBfY + XV0aAICAIYwFDASMBAMCkR1YWwgVXNhZ2UCAgDrMEEwEwwOZW50X3Nwb2NfYWRtaW4CAXwwKjASMB + AMCkVuY3J5cHRpb24CAgDYMBQwEgwMVmVyaWZpY2F0aW9uAgIA2TBCMBQMD2VudF9zcG9jX2NsaWV + udAIBejAqMBIwEAwKRW5jcnlwdGlvbgICANQwFDASDAxWZXJpZmljYXRpb24CAgDVMD4wEAwLZW50 + X3Nwb2NfZHYCAX0wKjASMBAMCkVuY3J5cHRpb24CAgDaMBQwEgwMVmVyaWZpY2F0aW9uAgIA2zBCM + BQMD2VudF9zcG9jX3NlcnZlcgIBezAqMBIwEAwKRW5jcnlwdGlvbgICANYwFDASDAxWZXJpZmljYX + Rpb24CAgDXMIG+MBMMDWVudF9zc2xfYmFzaWMCAgCIMIGmMBIwEAwKRW5jcnlwdGlvbgICAO8wgY8 + wEgwMVmVyaWZpY2F0aW9uAgIA7jB5MQswCQYDVQQGEwJVUzEQMA4GA1UEChMHRW50cnVzdDEiMCAG + A1UECxMZQ2VydGlmaWNhdGlvbiBBdXRob3JpdGllczESMBAGA1UECxMJRENvbVN1YkNBMSAwHgYDV + QQDFBdWZXJpZmljYXRpb25fcDEwIFBvbGljeTB7MBIMDGVudF9zc2xfY2VydAICAIcwUDAkMBAMCk + VuY3J5cHRpb24CAgDsohAMCkVuY3J5cHRpb24CAgDvMCgwEgwMVmVyaWZpY2F0aW9uAgIA7aISDAx + WZXJpZmljYXRpb24CAgDuohMMDWVudF9zc2xfYmFzaWMCAgCIMIIBKDAXDBJlbnRfc3RhbmRhbG9u + ZV9lZnMCARYwggELMIGLMBAMC0NNUCBTaWduaW5nAgEpMHcxCzAJBgNVBAYTAlVTMRAwDgYDVQQKE + wdFbnRydXN0MSIwIAYDVQQLExlDZXJ0aWZpY2F0aW9uIEF1dGhvcml0aWVzMRIwEAYDVQQLEwlEQ2 + 9tU3ViQ0ExHjAcBgNVBAMTFU1TIENNUCBTaWduaW5nIFBvbGljeTB7MAgMA0VGUwIBKDBvMQswCQY + DVQQGEwJVUzEQMA4GA1UEChMHRW50cnVzdDEiMCAGA1UECxMZQ2VydGlmaWNhdGlvbiBBdXRob3Jp + dGllczESMBAGA1UECxMJRENvbVN1YkNBMRYwFAYDVQQDEw1NUyBFRlMgUG9saWN5MD4wEgwNZW50X + 3RpbWVzdGFtcAIBBDAoMBEwDwwKRW5jcnlwdGlvbgIBAzATMBEMDFZlcmlmaWNhdGlvbgIBBDBDMB + UMEGVudF90aW1lc3RhbXBpbmcCAXcwKjASMBAMCkVuY3J5cHRpb24CAgDQMBQwEgwMVmVyaWZpY2F + 0aW9uAgIA0TCBxzARDAxlbnRfdHJ1ZXBhc3MCAQgwgbEwETAPDApFbmNyeXB0aW9uAgELMIGbMBEM + DFZlcmlmaWNhdGlvbgIBDDCBhTELMAkGA1UEBhMCVVMxEDAOBgNVBAoTB0VudHJ1c3QxIjAgBgNVB + AsTGUNlcnRpZmljYXRpb24gQXV0aG9yaXRpZXMxEjAQBgNVBAsTCURDb21TdWJDQTEsMCoGA1UEAx + MjVHJ1ZVBhc3MgU2VydmVyIFZlcmlmaWNhdGlvbiBQb2xpY3kwgc0wFwwSZW50X3RydWVwYXNzX21 + 1bHRpAgEJMIGxMBEwDwwKRW5jcnlwdGlvbgIBDTCBmzARDAxWZXJpZmljYXRpb24CAQ4wgYUxCzAJ + BgNVBAYTAlVTMRAwDgYDVQQKEwdFbnRydXN0MSIwIAYDVQQLExlDZXJ0aWZpY2F0aW9uIEF1dGhvc + ml0aWVzMRIwEAYDVQQLEwlEQ29tU3ViQ0ExLDAqBgNVBAMTI1RydWVQYXNzIFNlcnZlciBWZXJpZm + ljYXRpb24gUG9saWN5MIIBLzATDA5lbnRfdHdva2V5cGFpcgIBEzCCARYwgYYwDwwKRW5jcnlwdGl + vbgIBIDBzMQswCQYDVQQGEwJVUzEQMA4GA1UEChMHRW50cnVzdDEiMCAGA1UECxMZQ2VydGlmaWNh + dGlvbiBBdXRob3JpdGllczESMBAGA1UECxMJRENvbVN1YkNBMRowGAYDVQQDExFFbmNyeXB0aW9uI + FBvbGljeTCBijARDAxWZXJpZmljYXRpb24CASEwdTELMAkGA1UEBhMCVVMxEDAOBgNVBAoTB0VudH + J1c3QxIjAgBgNVBAsTGUNlcnRpZmljYXRpb24gQXV0aG9yaXRpZXMxEjAQBgNVBAsTCURDb21TdWJ + DQTEcMBoGA1UEAxMTVmVyaWZpY2F0aW9uIFBvbGljeTCCAUMwFwwSZW50X3R3b2tleXBhaXJfcDEw + AgEkMIIBJjCBjjATDA5FbmNyeXB0aW9uX3AxMAIBPTB3MQswCQYDVQQGEwJVUzEQMA4GA1UEChMHR + W50cnVzdDEiMCAGA1UECxMZQ2VydGlmaWNhdGlvbiBBdXRob3JpdGllczESMBAGA1UECxMJRENvbV + N1YkNBMR4wHAYDVQQDFBVFbmNyeXB0aW9uX3AxMCBQb2xpY3kwgZIwFQwQVmVyaWZpY2F0aW9uX3A + xMAIBPjB5MQswCQYDVQQGEwJVUzEQMA4GA1UEChMHRW50cnVzdDEiMCAGA1UECxMZQ2VydGlmaWNh + dGlvbiBBdXRob3JpdGllczESMBAGA1UECxMJRENvbVN1YkNBMSAwHgYDVQQDFBdWZXJpZmljYXRpb + 25fcDEwIFBvbGljeTBBMBMMDWVudF91bXNfYWRtaW4CAgCKMCowEjAQDApFbmNyeXB0aW9uAgIA8j + AUMBIMDFZlcmlmaWNhdGlvbgICAPMwOzAPDAplbnRfeGFwc3J2AgEQMCgwETAPDApFbmNyeXB0aW9 + uAgEaMBMwEQwMVmVyaWZpY2F0aW9uAgEbMIGuMBUMEGVwYXNzX2RvY19zaWduZXICAWQwgZQwgZEw + FQwPRG9jdW1lbnQgU2lnbmVyAgIAtzB4MQswCQYDVQQGEwJVUzEQMA4GA1UEChMHRW50cnVzdDEiM + CAGA1UECxMZQ2VydGlmaWNhdGlvbiBBdXRob3JpdGllczESMBAGA1UECxMJRENvbVN1YkNBMR8wHQ + YDVQQDExZEb2N1bWVudCBTaWduZXIgUG9saWN5MIGzMBoMFGVwYXNzX2RvY19zaWduZXJfZHRsAgI + AhDCBlDCBkTAVDA9Eb2N1bWVudCBTaWduZXICAgDoMHgxCzAJBgNVBAYTAlVTMRAwDgYDVQQKEwdF + bnRydXN0MSIwIAYDVQQLExlDZXJ0aWZpY2F0aW9uIEF1dGhvcml0aWVzMRIwEAYDVQQLEwlEQ29tU + 3ViQ0ExHzAdBgNVBAMTFkRvY3VtZW50IFNpZ25lciBQb2xpY3kwgbYwFwwSZXBhc3NfbWxpc3Rfc2 + lnbmVyAgFjMIGaMIGXMBgMEk1hc3RlciBMaXN0IFNpZ25lcgICALYwezELMAkGA1UEBhMCVVMxEDA + OBgNVBAoTB0VudHJ1c3QxIjAgBgNVBAsTGUNlcnRpZmljYXRpb24gQXV0aG9yaXRpZXMxEjAQBgNV + BAsTCURDb21TdWJDQTEiMCAGA1UEAxMZTWFzdGVyIExpc3QgU2lnbmVyIFBvbGljeTAqMBIMDW1vY + mlsZV9kZXZpY2UCAXEwFDASMBAMCkR1YWwgVXNhZ2UCAgDJMIG4MBYMEW1vYmlsZV9kZXZpY2VfMW + twAgF2MIGdMIGaMBIMDFZlcmlmaWNhdGlvbgICAM8wgYMxCzAJBgNVBAYTAlVTMRAwDgYDVQQKEwd + FbnRydXN0MSIwIAYDVQQLExlDZXJ0aWZpY2F0aW9uIEF1dGhvcml0aWVzMRIwEAYDVQQLEwlEQ29t + U3ViQ0ExKjAoBgNVBAMTIU1vYmlsZSBEZXZpY2UgVmVyaWZpY2F0aW9uIFBvbGljeTCCAVowEAwKb + XNfdGhyZWV5cgICAIUwggFEMIGdMBAMCkVuY3J5cHRpb24CAgDpMIGIMQswCQYDVQQGEwJVUzEQMA + 4GA1UEChMHRW50cnVzdDEiMCAGA1UECxMZQ2VydGlmaWNhdGlvbiBBdXRob3JpdGllczESMBAGA1U + ECxMJRENvbVN1YkNBMS8wLQYDVQQDEyZNaWNyb1NvZnQgVGhyZWUgWWVhciBFbmNyeXB0aW9uIFBv + bGljeTCBoTASDAxWZXJpZmljYXRpb24CAgDqMIGKMQswCQYDVQQGEwJVUzEQMA4GA1UEChMHRW50c + nVzdDEiMCAGA1UECxMZQ2VydGlmaWNhdGlvbiBBdXRob3JpdGllczESMBAGA1UECxMJRENvbVN1Yk + NBMTEwLwYDVQQDEyhNaWNyb1NvZnQgVGhyZWUgWWVhciBWZXJpZmljYXRpb24gUG9saWN5MD4wEgw + NbXNfdnBuX3NlcnZlcgIBIDAoMBEwDwwKRW5jcnlwdGlvbgIBODATMBEMDFZlcmlmaWNhdGlvbgIB + OTCBmDAPDApzc2xfZGV2aWNlAgFzMIGEMIGBMAkMA3NzbAICAMswdDELMAkGA1UEBhMCVVMxEDAOB + gNVBAoTB0VudHJ1c3QxIjAgBgNVBAsTGUNlcnRpZmljYXRpb24gQXV0aG9yaXRpZXMxEjAQBgNVBA + sTCURDb21TdWJDQTEbMBkGA1UEAxMSU1NMIEludGVyb3AgUG9saWN5MIIBQDAXDBJzc2xfZGV2aWN + lX2ludGVyb3ACAXQwggEjMIGQMAoMBHNzbDECAgDMMIGBMQswCQYDVQQGEwJVUzEQMA4GA1UEChMH + RW50cnVzdDEiMCAGA1UECxMZQ2VydGlmaWNhdGlvbiBBdXRob3JpdGllczESMBAGA1UECxMJRENvb + VN1YkNBMSgwJgYDVQQDEx9TU0wgSW50ZXJvcCBWZXJpZmljYXRpb24gUG9saWN5MIGNMAoMBHNzbD + ICAgDNMH8xCzAJBgNVBAYTAlVTMRAwDgYDVQQKEwdFbnRydXN0MSIwIAYDVQQLExlDZXJ0aWZpY2F + 0aW9uIEF1dGhvcml0aWVzMRIwEAYDVQQLEwlEQ29tU3ViQ0ExJjAkBgNVBAMTHVNTTCBJbnRlcm9w + IEVuY3J5cHRpb24gUG9saWN5MEMwFwwSdnBuX2NsaWVudF9tYWNoaW5lAgEhMCgwETAPDApFbmNye + XB0aW9uAgE6MBMwEQwMVmVyaWZpY2F0aW9uAgE7MIGiMBQMD3Zwbl9jbGllbnRfdXNlcgIBGTCBiT + CBhjAPDApEdWFsIFVzYWdlAgEvMHMxCzAJBgNVBAYTAlVTMRAwDgYDVQQKEwdFbnRydXN0MSIwIAY + DVQQLExlDZXJ0aWZpY2F0aW9uIEF1dGhvcml0aWVzMRIwEAYDVQQLEwlEQ29tU3ViQ0ExGjAYBgNV + BAMTEUR1YWwgVXNhZ2UgUG9saWN5MDgwDAwHdnBuX2RpcgIBCjAoMBEwDwwKRW5jcnlwdGlvbgIBD + zATMBEMDFZlcmlmaWNhdGlvbgIBEDA6MA4MCXZwbl9ub2RpcgIBCzAoMBEwDwwKRW5jcnlwdGlvbg + IBETATMBEMDFZlcmlmaWNhdGlvbgIBEjA6MA4MCXdlYl9hZF9kYwIBHzAoMBEwDwwKRW5jcnlwdGl + vbgIBNjATMBEMDFZlcmlmaWNhdGlvbgIBNzA/MBMMDndlYl9hZF9kY19jbHMxAgFDMCgwETAPDApF + bmNyeXB0aW9uAgFvMBMwEQwMVmVyaWZpY2F0aW9uAgFwMD8wEwwOd2ViX2FkX2RjX2NsczICAUQwK + DARMA8MCkVuY3J5cHRpb24CAXEwEzARDAxWZXJpZmljYXRpb24CAXIwggE+MBAMC3dlYl9hZF9zdn + IyAgFhMIIBKDCBjzAQDApFbmNyeXB0aW9uAgIAsTB7MQswCQYDVQQGEwJVUzEQMA4GA1UEChMHRW5 + 0cnVzdDEiMCAGA1UECxMZQ2VydGlmaWNhdGlvbiBBdXRob3JpdGllczESMBAGA1UECxMJRENvbVN1 + YkNBMSIwIAYDVQQDExlDTFMgMnlyIEVuY3J5cHRpb24gUG9saWN5MIGTMBIMDFZlcmlmaWNhdGlvb + gICALIwfTELMAkGA1UEBhMCVVMxEDAOBgNVBAoTB0VudHJ1c3QxIjAgBgNVBAsTGUNlcnRpZmljYX + Rpb24gQXV0aG9yaXRpZXMxEjAQBgNVBAsTCURDb21TdWJDQTEkMCIGA1UEAxMbQ0xTIDJ5ciBWZXJ + pZmljYXRpb24gUG9saWN5MIIBLjAQDAt3ZWJfYWRfc3ZyMwIBYjCCARgwgYcwEAwKRW5jcnlwdGlv + bgICALMwczELMAkGA1UEBhMCVVMxEDAOBgNVBAoTB0VudHJ1c3QxIjAgBgNVBAsTGUNlcnRpZmljY + XRpb24gQXV0aG9yaXRpZXMxEjAQBgNVBAsTCURDb21TdWJDQTEaMBgGA1UEAxMRRW5jcnlwdGlvbi + BQb2xpY3kwgYswEgwMVmVyaWZpY2F0aW9uAgIAtDB1MQswCQYDVQQGEwJVUzEQMA4GA1UEChMHRW5 + 0cnVzdDEiMCAGA1UECxMZQ2VydGlmaWNhdGlvbiBBdXRob3JpdGllczESMBAGA1UECxMJRENvbVN1 + YkNBMRwwGgYDVQQDExNWZXJpZmljYXRpb24gUG9saWN5MD8wEwwOd2ViX2FpX2Ntc19jbGkCAT4wK + DARMA8MCkVuY3J5cHRpb24CAWUwEzARDAxWZXJpZmljYXRpb24CAWYwPjASDA13ZWJfYWlfY21zX2 + RzAgE/MCgwETAPDApFbmNyeXB0aW9uAgFnMBMwEQwMVmVyaWZpY2F0aW9uAgFoMD8wEwwOd2ViX2F + pX2Ntc19zdnICAT0wKDARMA8MCkVuY3J5cHRpb24CAWMwEzARDAxWZXJpZmljYXRpb24CAWQwPDAO + DAl3ZWJfYmFzaWMCAWswKjASMBAMCkVuY3J5cHRpb24CAgC+MBQwEgwMVmVyaWZpY2F0aW9uAgIAv + zBCMBQMDndlYl9jbGlzdnJfZXhwAgIAgjAqMBIwEAwKRW5jcnlwdGlvbgICAOQwFDASDAxWZXJpZm + ljYXRpb24CAgDlMDkwDQwId2ViX2NsczECAUUwKDARMA8MCkVuY3J5cHRpb24CAXMwEzARDAxWZXJ + pZmljYXRpb24CAXQwOTANDAh3ZWJfY2xzMgIBRjAoMBEwDwwKRW5jcnlwdGlvbgIBdTATMBEMDFZl + cmlmaWNhdGlvbgIBdjA+MBIMDXdlYl9jbXNjbGllbnQCAUEwKDARMA8MCkVuY3J5cHRpb24CAWswE + zARDAxWZXJpZmljYXRpb24CAWwwRDAXDBJ3ZWJfY21zY2xpZW50X2NsczECAUswKTARMA8MCkVuY3 + J5cHRpb24CAX8wFDASDAxWZXJpZmljYXRpb24CAgCAMEUwFwwSd2ViX2Ntc2NsaWVudF9jbHMyAgF + MMCowEjAQDApFbmNyeXB0aW9uAgIAgTAUMBIMDFZlcmlmaWNhdGlvbgICAIIwPjASDA13ZWJfY21z + c2VydmVyAgFCMCgwETAPDApFbmNyeXB0aW9uAgFtMBMwEQwMVmVyaWZpY2F0aW9uAgFuMEUwFwwSd + 2ViX2Ntc3NlcnZlcl9jbHMxAgFNMCowEjAQDApFbmNyeXB0aW9uAgIAgzAUMBIMDFZlcmlmaWNhdG + lvbgICAIQwRTAXDBJ3ZWJfY21zc2VydmVyX2NsczICAU4wKjASMBAMCkVuY3J5cHRpb24CAgCFMBQ + wEgwMVmVyaWZpY2F0aW9uAgIAhjA9MBEMDHdlYl9jb2Rlc2lnbgIBHjAoMBEwDwwKRW5jcnlwdGlv + bgIBNDATMBEMDFZlcmlmaWNhdGlvbgIBNTBCMBYMEXdlYl9jb2Rlc2lnbl9jbHMxAgFJMCgwETAPD + ApFbmNyeXB0aW9uAgF7MBMwEQwMVmVyaWZpY2F0aW9uAgF8MEIwFgwRd2ViX2NvZGVzaWduX2Nscz + ICAUowKDARMA8MCkVuY3J5cHRpb24CAX0wEzARDAxWZXJpZmljYXRpb24CAX4wggEsMBAMC3dlYl9 + kZWZhdWx0AgEcMIIBFjCBhjAPDApFbmNyeXB0aW9uAgEwMHMxCzAJBgNVBAYTAlVTMRAwDgYDVQQK + EwdFbnRydXN0MSIwIAYDVQQLExlDZXJ0aWZpY2F0aW9uIEF1dGhvcml0aWVzMRIwEAYDVQQLEwlEQ + 29tU3ViQ0ExGjAYBgNVBAMTEUVuY3J5cHRpb24gUG9saWN5MIGKMBEMDFZlcmlmaWNhdGlvbgIBMT + B1MQswCQYDVQQGEwJVUzEQMA4GA1UEChMHRW50cnVzdDEiMCAGA1UECxMZQ2VydGlmaWNhdGlvbiB + BdXRob3JpdGllczESMBAGA1UECxMJRENvbVN1YkNBMRwwGgYDVQQDExNWZXJpZmljYXRpb24gUG9s + aWN5MCwwEwwOd2ViX29uZWtleXBhaXICATwwFTATMBEMDFZlcmlmaWNhdGlvbgIBYjA7MA8MCndlY + l9zZXJ2ZXICAR0wKDARMA8MCkVuY3J5cHRpb24CATIwEzARDAxWZXJpZmljYXRpb24CATMwKjAQDA + t3ZWJfc2VydmVyMgIBdTAWMBQwEgwMVmVyaWZpY2F0aW9uAgIAzjBEMBYMEHdlYl9zZXJ2ZXJfYmF + zaWMCAgCDMCowEjAQDApFbmNyeXB0aW9uAgIA5jAUMBIMDFZlcmlmaWNhdGlvbgICAOcwQDAUDA93 + ZWJfc2VydmVyX2NsczECAUcwKDARMA8MCkVuY3J5cHRpb24CAXcwEzARDAxWZXJpZmljYXRpb24CA + XgwggFAMBQMD3dlYl9zZXJ2ZXJfY2xzMgIBSDCCASYwgY4wDwwKRW5jcnlwdGlvbgIBeTB7MQswCQ + YDVQQGEwJVUzEQMA4GA1UEChMHRW50cnVzdDEiMCAGA1UECxMZQ2VydGlmaWNhdGlvbiBBdXRob3J + pdGllczESMBAGA1UECxMJRENvbVN1YkNBMSIwIAYDVQQDExlDTFMgMnlyIEVuY3J5cHRpb24gUG9s + aWN5MIGSMBEMDFZlcmlmaWNhdGlvbgIBejB9MQswCQYDVQQGEwJVUzEQMA4GA1UEChMHRW50cnVzd + DEiMCAGA1UECxMZQ2VydGlmaWNhdGlvbiBBdXRob3JpdGllczESMBAGA1UECxMJRENvbVN1YkNBMS + QwIgYDVQQDExtDTFMgMnlyIFZlcmlmaWNhdGlvbiBQb2xpY3kwggEyMBQMD3dlYl9zZXJ2ZXJfY2x + zMwIBYDCCARgwgYcwEAwKRW5jcnlwdGlvbgICAK8wczELMAkGA1UEBhMCVVMxEDAOBgNVBAoTB0Vu + dHJ1c3QxIjAgBgNVBAsTGUNlcnRpZmljYXRpb24gQXV0aG9yaXRpZXMxEjAQBgNVBAsTCURDb21Td + WJDQTEaMBgGA1UEAxMRRW5jcnlwdGlvbiBQb2xpY3kwgYswEgwMVmVyaWZpY2F0aW9uAgIAsDB1MQ + swCQYDVQQGEwJVUzEQMA4GA1UEChMHRW50cnVzdDEiMCAGA1UECxMZQ2VydGlmaWNhdGlvbiBBdXR + ob3JpdGllczESMBAGA1UECxMJRENvbVN1YkNBMRwwGgYDVQQDExNWZXJpZmljYXRpb24gUG9saWN5 + MIIBQjAUDA93ZWJfc2VydmVyX2NsczQCAV8wggEoMIGPMBAMCkVuY3J5cHRpb24CAgCtMHsxCzAJB + gNVBAYTAlVTMRAwDgYDVQQKEwdFbnRydXN0MSIwIAYDVQQLExlDZXJ0aWZpY2F0aW9uIEF1dGhvcm + l0aWVzMRIwEAYDVQQLEwlEQ29tU3ViQ0ExIjAgBgNVBAMTGUNMUyA0eXIgRW5jcnlwdGlvbiBQb2x + pY3kwgZMwEgwMVmVyaWZpY2F0aW9uAgIArjB9MQswCQYDVQQGEwJVUzEQMA4GA1UEChMHRW50cnVz + dDEiMCAGA1UECxMZQ2VydGlmaWNhdGlvbiBBdXRob3JpdGllczESMBAGA1UECxMJRENvbVN1YkNBM + SQwIgYDVQQDExtDTFMgNHlyIFZlcmlmaWNhdGlvbiBQb2xpY3kwRjAYDBN3ZWJfc2VydmVyX2V4cG + VyaWFuAgFuMCowEjAQDApFbmNyeXB0aW9uAgIAwzAUMBIMDFZlcmlmaWNhdGlvbgICAMQwQDAUDA9 + 3ZWJfc2VydmVyX2hpZ2gCATswKDARMA8MCkVuY3J5cHRpb24CAWAwEzARDAxWZXJpZmljYXRpb24C + AWEwGwYJKoZIhvZ9B000MQ4wDAYKKoZIhvZ9B001ATAhMB8GA1UdIwQYMBaAFDy++9gIa1JL8T+Oh + 9HW5F160lV9MA0GCSqGSIb3DQEBBQUAA4IBAQBelvaP82tFhjcHOTSDP97QLcqo2yE9RjjLtC/In8 + u/Zi/8y6jR9GRE11U6GbF+5+EJ5pckTMJ8Oorn3ZVOl4dKyzTN9m2rLjdUXNWd/th8Ja1RD/9hpMD + o5HUUYJEoOQxufTZnWfEZ2AISB7rXLCFZpdHGvc3H2ORtkhV+SuTmLpNkN1Zsbv8TXNi4szuX5sbA + y/mX7G8q0Twbb+GGpZjlKV226xc2Ejy3uYGrUK0kEr6u/ONTK1844vsuZPkcJOMcj7/c4o8oKKVMT + Fyafl1swsxHWn6MTh6WqI5k2LBcyEZSptDcG1brE7BU1JAOE9F7nkaoOOWefJs3n7B8piLg +crossCertificatePair;binary:: MIIGUqCCBk4wggZKMIIFMqADAgECAgRIwMPgMA0GCSqGSIb3 + DQEBBQUAMFgxCzAJBgNVBAYTAlVTMRAwDgYDVQQKEwdFbnRydXN0MSIwIAYDVQQLExlDZXJ0aWZpY + 2F0aW9uIEF1dGhvcml0aWVzMRMwEQYDVQQLEwpEQ29tUm9vdENBMB4XDTEwMDQyMDE0NDQwNloXDT + MwMDMyMDE1MTQwNlowVzELMAkGA1UEBhMCVVMxEDAOBgNVBAoTB0VudHJ1c3QxIjAgBgNVBAsTGUN + lcnRpZmljYXRpb24gQXV0aG9yaXRpZXMxEjAQBgNVBAsTCURDb21TdWJDQTCCASIwDQYJKoZIhvcN + AQEBBQADggEPADCCAQoCggEBAOMj486WAJ+GC3aOTn7g1p3+tzHJ8YUAoLW0y4WC6eleA+Yq9M+FP + Xlo+E6AMak4+HENfQMBa5bUgqJMGL20ZOktm0jpMtGtbS/J6Y9TrujpysVnO4SZwuWJOlwV+DLfgH + JYFcE/oeVej/TcoQw+zV0RkeDVA4npgOw5FWKzPlnKANF8KN598KK92jx+p60egFYyIY04MknO/cH + APZXT7tVIp1ljyHyNwMPWiwYdyVdR7IkrFQrb55lHEj4/KdHoLISe4/sQB1Yw6S9fz+A7HhF3BBkb + tNJk+jfjDL2/hNq0VP9b9zURJKSGEUTBaoAbvcWw7p7v2t7VOTB5Wb496SECAwEAAaOCAxswggMXM + A4GA1UdDwEB/wQEAwIBBjA8BgNVHSAENTAzMA8GDWCGSAGG+muBSAMKAgEwDwYNYIZIAYb6a4FIAw + oCAjAPBg1ghkgBhvprgUgDCgIDMA8GA1UdEwEB/wQFMAMBAf8wggEBBggrBgEFBQcBAQSB9DCB8TC + BnQYIKwYBBQUHMAKGgZBsZGFwOi8vZGNvbWRpcjEubWFuYWdlZC5lbnRydXN0LmNvbS9vdT1EQ29t + Um9vdENBLG91PUNlcnRpZmljYXRpb24lMjBBdXRob3JpdGllcyxvPUVudHJ1c3QsYz1VUz9jQUNlc + nRpZmljYXRlO2JpbmFyeSxjcm9zc0NlcnRpZmljYXRlUGFpcjtiaW5hcnkwTwYIKwYBBQUHMAKGQ2 + h0dHA6Ly9kY29td2ViMS5tYW5hZ2VkLmVudHJ1c3QuY29tL0FJQS9DZXJ0c0lzc3VlZFRvRENvbVJ + vb3RDQS5wN2MwggFUBgNVHR8EggFLMIIBRzCB06CB0KCBzYY4aHR0cDovL2Rjb213ZWIxLm1hbmFn + ZWQuZW50cnVzdC5jb20vQ1JMcy9EQ29tUm9vdENBMS5jcmyGgZBsZGFwOi8vZGNvbWRpcjEubWFuY + WdlZC5lbnRydXN0LmNvbS9jbj1XaW5Db21iaW5lZDEsb3U9RENvbVJvb3RDQSxvdT1DZXJ0aWZpY2 + F0aW9uJTIwQXV0aG9yaXRpZXMsbz1FbnRydXN0LGM9VVM/Y2VydGlmaWNhdGVSZXZvY2F0aW9uTGl + zdDtiaW5hcnkwb6BtoGukaTBnMQswCQYDVQQGEwJVUzEQMA4GA1UEChMHRW50cnVzdDEiMCAGA1UE + CxMZQ2VydGlmaWNhdGlvbiBBdXRob3JpdGllczETMBEGA1UECxMKRENvbVJvb3RDQTENMAsGA1UEA + xMEQ1JMMTAfBgNVHSMEGDAWgBRFx/xyHQhRD4vvL4V0iTRGDDP/JTAdBgNVHQ4EFgQUPL772AhrUk + vxP46H0dbkXXrSVX0wGQYJKoZIhvZ9B0EABAwwChsEVjcuMQMCAIEwDQYJKoZIhvcNAQEFBQADggE + BAJQrdloQCgTe0ahJyTU/fsKLzYXVGJOwnrwyof/+7emUfZS/OhKYuCfQ9w/wWLhT5SUzm9GDlUfk + YUfpL+/5joymDJO8/thcEq/k2PJepSFf7IMY8635kNz27kI9fA8JQGn7nEI8WBjX26qs7Ho7QKVkv + 6YEDuGeJwBLTGyNerDEf5n+DdMvrDmVAOs62T8uTZDb9gn/uIEGv3vaR+rs3KxvDhEr/2OFJtDWHw + PdHFOrr1pNkNWqdStwoE2/fxUfccQhLn+H5GgKLD7YT74uUCi+VFP1juV3F7jUlytgtMnnbqRIbDn + 4bMPn2HOmxdQ20amsdKX4bfosqFMepfSxWRQ= +crossCertificatePair;binary:: MIIGQaCCBj0wggY5MIIFIaADAgECAgRIwJY0MA0GCSqGSIb3 + DQEBBQUAMFgxCzAJBgNVBAYTAlVTMRAwDgYDVQQKEwdFbnRydXN0MSIwIAYDVQQLExlDZXJ0aWZpY + 2F0aW9uIEF1dGhvcml0aWVzMRMwEQYDVQQLEwpEQ29tUm9vdENBMB4XDTA4MDkwNTE4MDQxMVoXDT + E4MDkwNTAyMTMzN1owVzELMAkGA1UEBhMCVVMxEDAOBgNVBAoTB0VudHJ1c3QxIjAgBgNVBAsTGUN + lcnRpZmljYXRpb24gQXV0aG9yaXRpZXMxEjAQBgNVBAsTCURDb21TdWJDQTCCASIwDQYJKoZIhvcN + AQEBBQADggEPADCCAQoCggEBAL+MSY0GXRSMIIm5l+bMpXvk8rlG/Rjqaw0TNZ2w+KsG6ktNWXDll + A1i1l0Fvx2qj4O/z5bNfgmUmJZFamyWOS6TkwX2C+2DspI7P3a+gVTVu+7VJkevo3Hye2Pd6bAf/+ + bfV2IhSyAOe0wW0sANyQrIjzsU1r6YBjpcT1E5QZdnzSrEYRoBhJGXf8/v+Zu21AqOZ9EpagpvmsZ + 4UI8ORFg2PV0UOmnwNkMVO21JH1sUGYfKP9JAoO8vTzgwYbDN1w5DMC7SqWBl00OF6pGGaglJ5D16 + OcopR8aZVePxj+dW+MADgEufai5CqhUKZ6CA1pa+P6c1lPcFEGgz9AQS420CAwEAAaOCAwowggMGM + A4GA1UdDwEB/wQEAwIBBjA8BgNVHSAENTAzMA8GDWCGSAGG+muBSAMKAgEwDwYNYIZIAYb6a4FIAw + oCAjAPBg1ghkgBhvprgUgDCgIDMA8GA1UdEwEB/wQFMAMBAf8wggEBBggrBgEFBQcBAQSB9DCB8TC + BnQYIKwYBBQUHMAKGgZBsZGFwOi8vZGNvbWRpcjEubWFuYWdlZC5lbnRydXN0LmNvbS9vdT1EQ29t + Um9vdENBLG91PUNlcnRpZmljYXRpb24lMjBBdXRob3JpdGllcyxvPUVudHJ1c3QsYz1VUz9jQUNlc + nRpZmljYXRlO2JpbmFyeSxjcm9zc0NlcnRpZmljYXRlUGFpcjtiaW5hcnkwTwYIKwYBBQUHMAKGQ2 + h0dHA6Ly9kY29td2ViMS5tYW5hZ2VkLmVudHJ1c3QuY29tL0FJQS9DZXJ0c0lzc3VlZFRvRENvbVJ + vb3RDQS5wN2MwggFDBgNVHR8EggE6MIIBNjCBwqCBv6CBvIaBgGxkYXA6Ly9kY29tZGlyMS5tYW5h + Z2VkLmVudHJ1c3QuY29tL291PURDb21Sb290Q0Esb3U9Q2VydGlmaWNhdGlvbiUyMEF1dGhvcml0a + WVzLG89RW50cnVzdCxjPVVTP2NlcnRpZmljYXRlUmV2b2NhdGlvbkxpc3Q7YmluYXJ5hjdodHRwOi + 8vZGNvbXdlYjEubWFuYWdlZC5lbnRydXN0LmNvbS9DUkxzL0RDb21Sb290Q0EuY3JsMG+gbaBrpGk + wZzELMAkGA1UEBhMCVVMxEDAOBgNVBAoTB0VudHJ1c3QxIjAgBgNVBAsTGUNlcnRpZmljYXRpb24g + QXV0aG9yaXRpZXMxEzARBgNVBAsTCkRDb21Sb290Q0ExDTALBgNVBAMTBENSTDEwHwYDVR0jBBgwF + oAUh1mBY1JFXsCw39HI6bl1OBAu3tkwHQYDVR0OBBYEFPQWLgG7q5AZpChCDZ3AH5yvEIYrMBkGCS + qGSIb2fQdBAAQMMAobBFY3LjEDAgCBMA0GCSqGSIb3DQEBBQUAA4IBAQCrafi2DFqdhpXtzeJpUgZ + glNOwZUBOp5thJUH7+yMcgl5Ka4JIqqNpw3ZbFPFT9Ni4IzDmJYyPgqHmgRubxFWpAHdP8SjEK7pl + 6DwDmbCAWBiq7SmSfqt502FUUyiTcZsCLi6GqE4fetej41t3NaGidqyVQXPJ26Ti2jNT4NzRnADi6 + vOzMzxMSkWH1OaHoGLtTVpIjkbJZygnSmof4+gs4M1fmH4FVTcWV6t8zbTwkH4RTYSHVX04aM4ZBp + nhMq6sk9uNL+qndpWkO7u7zr6K527kl6/t1Xr9/vnzD0ACVk/gluI7MvCUIzP55o01Rp90ZCMIMak + u0qrESgh0GXln +cACertificate;binary:: MIIGSjCCBTKgAwIBAgIESMDD4DANBgkqhkiG9w0BAQUFADBYMQswCQY + DVQQGEwJVUzEQMA4GA1UEChMHRW50cnVzdDEiMCAGA1UECxMZQ2VydGlmaWNhdGlvbiBBdXRob3Jp + dGllczETMBEGA1UECxMKRENvbVJvb3RDQTAeFw0xMDA0MjAxNDQ0MDZaFw0zMDAzMjAxNTE0MDZaM + FcxCzAJBgNVBAYTAlVTMRAwDgYDVQQKEwdFbnRydXN0MSIwIAYDVQQLExlDZXJ0aWZpY2F0aW9uIE + F1dGhvcml0aWVzMRIwEAYDVQQLEwlEQ29tU3ViQ0EwggEiMA0GCSqGSIb3DQEBAQUAA4IBDwAwggE + KAoIBAQDjI+POlgCfhgt2jk5+4Nad/rcxyfGFAKC1tMuFgunpXgPmKvTPhT15aPhOgDGpOPhxDX0D + AWuW1IKiTBi9tGTpLZtI6TLRrW0vyemPU67o6crFZzuEmcLliTpcFfgy34ByWBXBP6HlXo/03KEMP + s1dEZHg1QOJ6YDsORVisz5ZygDRfCjeffCivdo8fqetHoBWMiGNODJJzv3BwD2V0+7VSKdZY8h8jc + DD1osGHclXUeyJKxUK2+eZRxI+PynR6CyEnuP7EAdWMOkvX8/gOx4RdwQZG7TSZPo34wy9v4TatFT + /W/c1ESSkhhFEwWqAG73FsO6e79re1TkweVm+PekhAgMBAAGjggMbMIIDFzAOBgNVHQ8BAf8EBAMC + AQYwPAYDVR0gBDUwMzAPBg1ghkgBhvprgUgDCgIBMA8GDWCGSAGG+muBSAMKAgIwDwYNYIZIAYb6a + 4FIAwoCAzAPBgNVHRMBAf8EBTADAQH/MIIBAQYIKwYBBQUHAQEEgfQwgfEwgZ0GCCsGAQUFBzACho + GQbGRhcDovL2Rjb21kaXIxLm1hbmFnZWQuZW50cnVzdC5jb20vb3U9RENvbVJvb3RDQSxvdT1DZXJ + 0aWZpY2F0aW9uJTIwQXV0aG9yaXRpZXMsbz1FbnRydXN0LGM9VVM/Y0FDZXJ0aWZpY2F0ZTtiaW5h + cnksY3Jvc3NDZXJ0aWZpY2F0ZVBhaXI7YmluYXJ5ME8GCCsGAQUFBzAChkNodHRwOi8vZGNvbXdlY + jEubWFuYWdlZC5lbnRydXN0LmNvbS9BSUEvQ2VydHNJc3N1ZWRUb0RDb21Sb290Q0EucDdjMIIBVA + YDVR0fBIIBSzCCAUcwgdOggdCggc2GOGh0dHA6Ly9kY29td2ViMS5tYW5hZ2VkLmVudHJ1c3QuY29 + tL0NSTHMvRENvbVJvb3RDQTEuY3JshoGQbGRhcDovL2Rjb21kaXIxLm1hbmFnZWQuZW50cnVzdC5j + b20vY249V2luQ29tYmluZWQxLG91PURDb21Sb290Q0Esb3U9Q2VydGlmaWNhdGlvbiUyMEF1dGhvc + ml0aWVzLG89RW50cnVzdCxjPVVTP2NlcnRpZmljYXRlUmV2b2NhdGlvbkxpc3Q7YmluYXJ5MG+gba + BrpGkwZzELMAkGA1UEBhMCVVMxEDAOBgNVBAoTB0VudHJ1c3QxIjAgBgNVBAsTGUNlcnRpZmljYXR + pb24gQXV0aG9yaXRpZXMxEzARBgNVBAsTCkRDb21Sb290Q0ExDTALBgNVBAMTBENSTDEwHwYDVR0j + BBgwFoAURcf8ch0IUQ+L7y+FdIk0Rgwz/yUwHQYDVR0OBBYEFDy++9gIa1JL8T+Oh9HW5F160lV9M + BkGCSqGSIb2fQdBAAQMMAobBFY3LjEDAgCBMA0GCSqGSIb3DQEBBQUAA4IBAQCUK3ZaEAoE3tGoSc + k1P37Ci82F1RiTsJ68MqH//u3plH2UvzoSmLgn0PcP8Fi4U+UlM5vRg5VH5GFH6S/v+Y6MpgyTvP7 + YXBKv5NjyXqUhX+yDGPOt+ZDc9u5CPXwPCUBp+5xCPFgY19uqrOx6O0ClZL+mBA7hnicAS0xsjXqw + xH+Z/g3TL6w5lQDrOtk/Lk2Q2/YJ/7iBBr972kfq7Nysbw4RK/9jhSbQ1h8D3RxTq69aTZDVqnUrc + KBNv38VH3HEIS5/h+RoCiw+2E++LlAovlRT9Y7ldxe41JcrYLTJ526kSGw5+GzD59hzpsXUNtGprH + Sl+G36LKhTHqX0sVkU +cACertificate;binary:: MIIGOTCCBSGgAwIBAgIESMCWNDANBgkqhkiG9w0BAQUFADBYMQswCQY + DVQQGEwJVUzEQMA4GA1UEChMHRW50cnVzdDEiMCAGA1UECxMZQ2VydGlmaWNhdGlvbiBBdXRob3Jp + dGllczETMBEGA1UECxMKRENvbVJvb3RDQTAeFw0wODA5MDUxODA0MTFaFw0xODA5MDUwMjEzMzdaM + FcxCzAJBgNVBAYTAlVTMRAwDgYDVQQKEwdFbnRydXN0MSIwIAYDVQQLExlDZXJ0aWZpY2F0aW9uIE + F1dGhvcml0aWVzMRIwEAYDVQQLEwlEQ29tU3ViQ0EwggEiMA0GCSqGSIb3DQEBAQUAA4IBDwAwggE + KAoIBAQC/jEmNBl0UjCCJuZfmzKV75PK5Rv0Y6msNEzWdsPirBupLTVlw5ZQNYtZdBb8dqo+Dv8+W + zX4JlJiWRWpsljkuk5MF9gvtg7KSOz92voFU1bvu1SZHr6Nx8ntj3emwH//m31diIUsgDntMFtLAD + ckKyI87FNa+mAY6XE9ROUGXZ80qxGEaAYSRl3/P7/mbttQKjmfRKWoKb5rGeFCPDkRYNj1dFDpp8D + ZDFTttSR9bFBmHyj/SQKDvL084MGGwzdcOQzAu0qlgZdNDheqRhmoJSeQ9ejnKKUfGmVXj8Y/nVvj + AA4BLn2ouQqoVCmeggNaWvj+nNZT3BRBoM/QEEuNtAgMBAAGjggMKMIIDBjAOBgNVHQ8BAf8EBAMC + AQYwPAYDVR0gBDUwMzAPBg1ghkgBhvprgUgDCgIBMA8GDWCGSAGG+muBSAMKAgIwDwYNYIZIAYb6a + 4FIAwoCAzAPBgNVHRMBAf8EBTADAQH/MIIBAQYIKwYBBQUHAQEEgfQwgfEwgZ0GCCsGAQUFBzACho + GQbGRhcDovL2Rjb21kaXIxLm1hbmFnZWQuZW50cnVzdC5jb20vb3U9RENvbVJvb3RDQSxvdT1DZXJ + 0aWZpY2F0aW9uJTIwQXV0aG9yaXRpZXMsbz1FbnRydXN0LGM9VVM/Y0FDZXJ0aWZpY2F0ZTtiaW5h + cnksY3Jvc3NDZXJ0aWZpY2F0ZVBhaXI7YmluYXJ5ME8GCCsGAQUFBzAChkNodHRwOi8vZGNvbXdlY + jEubWFuYWdlZC5lbnRydXN0LmNvbS9BSUEvQ2VydHNJc3N1ZWRUb0RDb21Sb290Q0EucDdjMIIBQw + YDVR0fBIIBOjCCATYwgcKggb+ggbyGgYBsZGFwOi8vZGNvbWRpcjEubWFuYWdlZC5lbnRydXN0LmN + vbS9vdT1EQ29tUm9vdENBLG91PUNlcnRpZmljYXRpb24lMjBBdXRob3JpdGllcyxvPUVudHJ1c3Qs + Yz1VUz9jZXJ0aWZpY2F0ZVJldm9jYXRpb25MaXN0O2JpbmFyeYY3aHR0cDovL2Rjb213ZWIxLm1hb + mFnZWQuZW50cnVzdC5jb20vQ1JMcy9EQ29tUm9vdENBLmNybDBvoG2ga6RpMGcxCzAJBgNVBAYTAl + VTMRAwDgYDVQQKEwdFbnRydXN0MSIwIAYDVQQLExlDZXJ0aWZpY2F0aW9uIEF1dGhvcml0aWVzMRM + wEQYDVQQLEwpEQ29tUm9vdENBMQ0wCwYDVQQDEwRDUkwxMB8GA1UdIwQYMBaAFIdZgWNSRV7AsN/R + yOm5dTgQLt7ZMB0GA1UdDgQWBBT0Fi4Bu6uQGaQoQg2dwB+crxCGKzAZBgkqhkiG9n0HQQAEDDAKG + wRWNy4xAwIAgTANBgkqhkiG9w0BAQUFAAOCAQEAq2n4tgxanYaV7c3iaVIGYJTTsGVATqebYSVB+/ + sjHIJeSmuCSKqjacN2WxTxU/TYuCMw5iWMj4Kh5oEbm8RVqQB3T/EoxCu6Zeg8A5mwgFgYqu0pkn6 + redNhVFMok3GbAi4uhqhOH3rXo+NbdzWhonaslUFzyduk4tozU+Dc0ZwA4urzszM8TEpFh9Tmh6Bi + 7U1aSI5GyWcoJ0pqH+PoLODNX5h+BVU3FlerfM208JB+EU2Eh1V9OGjOGQaZ4TKurJPbjS/qp3aVp + Du7u86+iudu5Jev7dV6/f758w9AAlZP4JbiOzLwlCMz+eaNNUafdGQjCDGpLtKqxEoIdBl5Zw== +objectClass: organizationalUnit +objectClass: top +objectClass: extensibleobject +ou: binary +nsUniqueId: f49ca103-c2ee11e7-9170b029-e68fda34 +creatorsName: +modifiersName: +createTimestamp: 20171106123544Z +modifyTimestamp: 20171106123544Z + +# entry-id: 3 +dn: cn=test,ou=binary,dc=example,dc=com +userCertificate:: MIIGfzCCBWcCAQEwgYOhgYAwfqR8MHoxCzAJBgNVBAYTAlVTMRAwDgYDVQQK + EwdFbnRydXN0MSIwIAYDVQQLExlDZXJ0aWZpY2F0aW9uIEF1dGhvcml0aWVzMRIwEAYDVQQLEwlEQ + 29tU3ViQ0ExITAfBgNVBAMTGFNQT0MgU2VydmVyIExvZ2luIFBvbGljeTBnoGUwYzBbpFkwVzELMA + kGA1UEBhMCVVMxEDAOBgNVBAoTB0VudHJ1c3QxIjAgBgNVBAsTGUNlcnRpZmljYXRpb24gQXV0aG9 + yaXRpZXMxEjAQBgNVBAsTCURDb21TdWJDQQIESMDD4DANBgkqhkiG9w0BAQUFAAIEV4eo1TAiGA8y + MDE3MTAxNTIyNDYwOVoYDzIwMTcxMTE0MjI0NjA5WjCCBBUwHwYJKoZIhvZ9B00BMRIwEAIBAAIBC + AIBCAIBCgMCAGkwFAYJKoZIhvZ9B00DMQcwBQwDQUxMMBEGCSqGSIb2fQdNBTEEAwID2DAPBgkqhk + iG9n0HTQYxAgwAMBcGCSqGSIb2fQdNCTEKDAhSU0EtMjA0ODApBgkqhkiG9n0HTQ4xHDAaDAlwcml + udGFibGUMB3RlbGV0ZXgMBHV0ZjgwEQYJKoZIhvZ9B00PMQQDAgeAMBEGCSqGSIb2fQdNFTEEAwIH + gDAQBgkqhkiG9n0HTRYxAwMBADAQBgkqhkiG9n0HTQgxAwMBADAQBgkqhkiG9n0HTSwxAwMBADAPB + gkqhkiG9n0HTQsxAjAAMBAGCSqGSIb2fQdNDDEDAwEAMBAGCSqGSIb2fQdNDTEDAgEeMA8GCSqGSI + b2fQdNEzECDAAwEAYJKoZIhvZ9B00XMQMBAQAwEQYJKoZIhvZ9B00YMQQCAgfQMBAGCSqGSIb2fQd + NHzEDAQEAMBAGCSqGSIb2fQdNJjEDAwEAMBAGCSqGSIb2fQdNGTEDAgECMBAGCSqGSIb2fQdNGzED + AQEAMBAGCSqGSIb2fQdNKTEDAQEAMBAGCSqGSIb2fQdNHDEDAgEAMBAGCSqGSIb2fQdNHTEDAgEBM + BAGCSqGSIb2fQdNIDEDAwEAMBEGCSqGSIb2fQdNITEEAwIE8DAPBgkqhkiG9n0HTSMxAgwAMA8GCS + qGSIb2fQdNJDECDAAwJAYJKoZIhvZ9B00lMRcwFQwJRGlyZWN0b3J5DANFQUIMA0dBTDAQBgkqhki + G9n0HTSsxAwMBADAPBgkqhkiG9n0HTTYxAgwAMBEGCSqGSIb2fQdNMzEEAwIHgDAPBgkqhkiG9n0H + TScxAgwAMBAGCSqGSIb2fQdNETEDAgECMBAGCSqGSIb2fQdNKDEDAgFkMBEGCiqGSIb2fQdNLQExA + wIBAzBEBgoqhkiG9n0HTS0CMTYwNAwMZW50ZWxsaWdlbmNlDAZkaXJlY3QMCHpmLWxvY2FsDAp6Zi + 1yb2FtaW5nDAZ6Zi1tc2YwFwYKKoZIhvZ9B00tAzEJDAdleGVjdXRlMBAGCSqGSIb2fQdNMTEDAQE + AMBAGCSqGSIb2fQdNMjEDAQEAMBAGCSqGSIb2fQdNOTEDAQH/MA8GCSqGSIb2fQdNLzECDAAwEAYJ + KoZIhvZ9B004MQMBAQAwEwYJKoZIhvZ9B003MQYMBENBU1QwEAYJKoZIhvZ9B007MQMBAQAwFgYJK + oZIhvZ9B009MQkMB0VudHJ1c3QwEAYJKoZIhvZ9B00+MQMBAQAwEAYJKoZIhvZ9B00/MQMBAQAwFw + YJKoZIhvZ9B00KMQoMCFJTQS0yMDQ4MBAGCSqGSIb2fQdNQzEDAQEAMCEwHwYDVR0jBBgwFoAUPL7 + 72AhrUkvxP46H0dbkXXrSVX0wDQYJKoZIhvcNAQEFBQADggEBADrezRWX0fuPC415BUa3tafMLaVO + 24v3CP+qYud4Z6IKI7jNtt2pcneaYjQ7iaxypE3N7Wwlim6Ak4yuwwJ9SrKOSe7YPiFOuugvNy2fk + +f2h3bFkLm40bkjPPH8bih4sLyU8RcN2cAJLxHINwXO3ALKBo3IdxrfcoKquO7g+R4+ZPvmS/95J9 + aQ08FZKpkv+ORPRZySkr0zMUARdBBguklHqFeczn5tQnmJcsfVlP4DC7IPqw2xM8l3b+iAH5pyqgb + o/Lk11VWkD11s3K8/Bf40eH23upDOwmYBAszHdXU4+5HNZ/An6xfVEjr/+KxUAEVD5TGQMVJY6SCS + zN3ONRc= +objectClass: top +objectClass: extensibleobject +cn: test +nsUniqueId: f49ca104-c2ee11e7-9170b029-e68fda34 +creatorsName: +modifiersName: +createTimestamp: 20171106123544Z +modifyTimestamp: 20171106123544Z + diff --git a/dirsrvtests/tests/perf/create_data.py b/dirsrvtests/tests/perf/create_data.py new file mode 100755 index 0000000..0d7e385 --- /dev/null +++ b/dirsrvtests/tests/perf/create_data.py @@ -0,0 +1,289 @@ +#!/usr/bin/python2 +from __future__ import ( + print_function, + division +) + +import sys +import math + + +class RHDSData(object): + def __init__( + self, + stream=sys.stdout, + users=10000, + groups=100, + grps_puser=20, + nest_level=10, + ngrps_puser=10, + domain="redhat.com", + basedn="dc=example,dc=com", + ): + self.users = users + self.groups = groups + self.basedn = basedn + self.domain = domain + self.stream = stream + + self.grps_puser = grps_puser + self.nest_level = nest_level + self.ngrps_puser = ngrps_puser + + self.user_defaults = { + 'objectClass': [ + 'person', + 'top', + 'inetorgperson', + 'organizationalperson', + 'inetuser', + 'posixaccount'], + 'uidNumber': ['-1'], + 'gidNumber': ['-1'], + } + + self.group_defaults = { + 'objectClass': [ + 'top', + 'inetuser', + 'posixgroup', + 'groupofnames'], + 'gidNumber': [-1], + } + + def put_entry(self, entry): + """ + Abstract method, implementation depends on if we want just print LDIF, + or update LDAP directly + """ + raise NotImplementedError() + + def gen_user(self, uid): + user = dict(self.user_defaults) + user['dn'] = 'uid={uid},ou=people,{suffix}'.format( + uid=uid, + suffix=self.basedn, + ) + user['uid'] = [uid] + user['displayName'] = ['{} {}'.format(uid, uid)] + user['sn'] = [uid] + user['homeDirectory'] = ['/other-home/{}'.format(uid)] + user['mail'] = ['{uid}@{domain}'.format( + uid=uid, domain=self.domain)] + user['givenName'] = [uid] + user['cn'] = ['{} {}'.format(uid, uid)] + + return user + + def username_generator(self, start, stop, step=1): + for i in range(start, stop, step): + yield 'user%s' % i + + def gen_group(self, name, members=(), group_members=()): + group = dict(self.group_defaults) + group['dn'] = 'cn={name},ou=groups,{suffix}'.format( + name=name, + suffix=self.basedn, + ) + group['cn'] = [name] + group['member'] = ['uid={uid},ou=people,{suffix}'.format( + uid=uid, + suffix=self.basedn, + ) for uid in members] + group['member'].extend( + ['cn={name},ou=groups,{suffix}'.format( + name=name, + suffix=self.basedn, + ) for name in group_members]) + return group + + def groupname_generator(self, start, stop, step=1): + for i in range(start, stop, step): + yield 'group%s' % i + + def gen_users_and_groups(self): + self.__gen_entries_with_groups( + self.users, + self.groups, + self.grps_puser, + self.ngrps_puser, + self.nest_level, + self.username_generator, self.gen_user, + self.groupname_generator, self.gen_group + ) + + def __gen_entries_with_groups( + self, + num_of_entries, + num_of_groups, + groups_per_entry, + nested_groups_per_entry, + max_nesting_level, + gen_entry_name_f, gen_entry_f, + gen_group_name_f, gen_group_f + ): + assert num_of_groups % groups_per_entry == 0 + assert num_of_groups >= groups_per_entry + assert groups_per_entry > nested_groups_per_entry + assert max_nesting_level > 0 + assert nested_groups_per_entry > 0 + assert ( + groups_per_entry - nested_groups_per_entry > + int(math.ceil(nested_groups_per_entry / float(max_nesting_level))) + ), ( + "At least {} groups is required to generate proper amount of " + "nested groups".format( + nested_groups_per_entry + + int(math.ceil( + nested_groups_per_entry / float(max_nesting_level)) + ) + ) + ) + + for uid in gen_entry_name_f(0, num_of_entries): + self.put_entry(gen_entry_f(uid)) + + # create N groups per entry, of them are nested + # User/Host (max nesting level = 2) + # | + # +--- G1 --- G2 (nested) --- G3 (nested, max level) + # | + # +--- G5 --- G6 (nested) + # | + # ...... + # | + # +--- GN + + # how many members should be added to groups (set of groups_per_entry + # have the same members) + entries_per_group = num_of_entries // (num_of_groups // groups_per_entry) + + # generate groups and put users there + for i in range(num_of_groups // groups_per_entry): + + uids = list(gen_entry_name_f( + i * entries_per_group, + (i + 1) * entries_per_group + )) + + # per user + last_grp_name = None + nest_lvl = 0 + nested_groups_added = 0 + + for group_name in gen_group_name_f( + i * groups_per_entry, + (i + 1) * groups_per_entry, + ): + # create nested groups first + if nested_groups_added < nested_groups_per_entry: + if nest_lvl == 0: + # the top group + self.put_entry( + gen_group_f( + group_name, + members=uids + ) + ) + nest_lvl += 1 + nested_groups_added += 1 + elif nest_lvl == max_nesting_level: + # the last level group this group is not nested + self.put_entry( + gen_group_f( + group_name, + group_members=[last_grp_name], + ) + ) + nest_lvl = 0 + else: + # mid level group + self.put_entry( + gen_group_f( + group_name, + group_members=[last_grp_name] + ) + ) + nested_groups_added += 1 + nest_lvl += 1 + + last_grp_name = group_name + else: + # rest of groups have direct membership + if nest_lvl != 0: + # assign the last nested group if exists + self.put_entry( + gen_group_f( + group_name, + members=uids, + group_members=[last_grp_name], + ) + ) + nest_lvl = 0 + else: + self.put_entry( + gen_group_f( + group_name, + members=uids + ) + ) + + def __generate_entries_with_users_groups( + self, + num_of_entries_direct_members, + num_of_entries_indirect_members, + entries_per_user, + entries_per_group, + gen_entry_name_f, gen_entry_f, + ): + assert num_of_entries_direct_members % entries_per_user == 0 + assert num_of_entries_indirect_members % entries_per_group == 0 + + num_of_entries = num_of_entries_direct_members + num_of_entries_indirect_members + + # direct members + users_per_entry = self.users // (num_of_entries_direct_members // entries_per_user) + + start_user = 0 + stop_user = users_per_entry + for name in gen_entry_name_f(0, num_of_entries_direct_members): + self.put_entry( + gen_entry_f( + name, + user_members=self.username_generator(start_user, stop_user), + ) + ) + start_user = stop_user % self.users + stop_user = start_user + users_per_entry + stop_user = stop_user if stop_user < self.users else self.users + + groups_per_entry = self.groups // (num_of_entries_indirect_members // entries_per_group) + + # indirect members + start_group = 0 + stop_group = groups_per_entry + for name in gen_entry_name_f(num_of_entries_direct_members, num_of_entries): + self.put_entry( + gen_entry_f( + name, + usergroup_members=self.groupname_generator(start_group, stop_group), + ) + ) + start_group = stop_group % self.groups + stop_group = start_group + groups_per_entry + stop_group = stop_group if stop_group < self.groups else self.groups + + def do_magic(self): + self.gen_users_and_groups() + + +class RHDSDataLDIF(RHDSData): + def put_entry(self, entry): + print(file=self.stream) + print("dn:", entry['dn'], file=self.stream) + for k, values in entry.items(): + if k == 'dn': + continue + for v in values: + print("{}: {}".format(k, v), file=self.stream) + print(file=self.stream) diff --git a/dirsrvtests/tests/perf/memberof_test.py b/dirsrvtests/tests/perf/memberof_test.py new file mode 100755 index 0000000..6d89d93 --- /dev/null +++ b/dirsrvtests/tests/perf/memberof_test.py @@ -0,0 +1,405 @@ +# --- BEGIN COPYRIGHT BLOCK --- +# Copyright (C) 2017 Red Hat, Inc. +# All rights reserved. +# +# License: GPL (version 3 or any later version). +# See LICENSE for details. +# --- END COPYRIGHT BLOCK --- +# +import pytest +import subprocess +from lib389 import Entry +from lib389.tasks import Tasks +from lib389.dseldif import DSEldif +from create_data import RHDSDataLDIF +from lib389.properties import TASK_WAIT +from lib389.utils import ldap, os, time, logging, ds_is_older +from lib389._constants import SUFFIX, DN_SCHEMA, DN_DM, DEFAULT_SUFFIX, PASSWORD, PLUGIN_MEMBER_OF, \ + PLUGIN_MANAGED_ENTRY, PLUGIN_AUTOMEMBER, DN_CONFIG_LDBM, HOST_STANDALONE, PORT_STANDALONE +from lib389.topologies import topology_st as topo + +pytestmark = pytest.mark.tier3 + +MEMOF_PLUGIN = ('cn=' + PLUGIN_MEMBER_OF + ',cn=plugins,cn=config') +MAN_ENTRY_PLUGIN = ('cn=' + PLUGIN_MANAGED_ENTRY + ',cn=plugins,cn=config') +AUTO_MEM_PLUGIN = ('cn=' + PLUGIN_AUTOMEMBER + ',cn=plugins,cn=config') +DOMAIN = 'redhat.com' +LDAP_MOD = '/usr/bin/ldapmodify' +FILTER = 'objectClass=*' +USER_FILTER = '(|(uid=user*)(cn=group*))' +MEMBEROF_ATTR = 'memberOf' +DN_ATTR = 'dn:' + +logging.basicConfig(level=logging.DEBUG) +log = logging.getLogger(__name__) + + +@pytest.fixture(scope="module") +def memberof_setup(topo, request): + """Configure required plugins and restart the server""" + + log.info('Configuring memberOf, managedEntry and autoMembers plugins and restarting the server') + topo.standalone.simple_bind_s(DN_DM, PASSWORD) + try: + topo.standalone.plugins.enable(name=PLUGIN_MEMBER_OF) + except ldap.LDAPError as e: + log.error('Failed to enable {} plugin'.format(PLUGIN_MEMBER_OF)) + raise e + try: + topo.standalone.plugins.enable(name=PLUGIN_MANAGED_ENTRY) + topo.standalone.plugins.enable(name=PLUGIN_AUTOMEMBER) + except ldap.LDAPError as e: + log.error('Failed to enable {}, {} plugins'.format(PLUGIN_MANAGED_ENTRY, PLUGIN_AUTOMEMBER)) + raise e + + log.info('Change config values for db-locks and dbcachesize to import large ldif files') + if ds_is_older('1.3.6'): + topo.standalone.stop(timeout=10) + dse_ldif = DSEldif(topo.standalone) + try: + dse_ldif.replace(DN_CONFIG_LDBM, 'nsslapd-db-locks', '100000') + dse_ldif.replace(DN_CONFIG_LDBM, 'nsslapd-dbcachesize', '10000000') + except: + log.error('Failed to replace cn=config values of db-locks and dbcachesize') + raise + topo.standalone.start(timeout=10) + else: + try: + topo.standalone.modify_s(DN_CONFIG_LDBM, [(ldap.MOD_REPLACE, 'nsslapd-db-locks', '100000')]) + topo.standalone.modify_s(DN_CONFIG_LDBM, [(ldap.MOD_REPLACE, 'nsslapd-cache-autosize', '0')]) + topo.standalone.modify_s(DN_CONFIG_LDBM, [(ldap.MOD_REPLACE, 'nsslapd-dbcachesize', '10000000')]) + except ldap.LDAPError as e: + log.error( + 'Failed to replace values of nsslapd-db-locks and nsslapd-dbcachesize {}'.format(e.message['desc'])) + raise e + topo.standalone.restart(timeout=10) + + def fin(): + log.info('Disabling plugins {}, {}, {}'.format(PLUGIN_MEMBER_OF, PLUGIN_MANAGED_ENTRY, PLUGIN_AUTOMEMBER)) + topo.standalone.simple_bind_s(DN_DM, PASSWORD) + try: + topo.standalone.plugins.disable(name=PLUGIN_MEMBER_OF) + topo.standalone.plugins.disable(name=PLUGIN_MANAGED_ENTRY) + topo.standalone.plugins.disable(name=PLUGIN_AUTOMEMBER) + except ldap.LDAPError as e: + log.error('Failed to disable plugins, {}'.format(e.message['desc'])) + assert False + topo.standalone.restart(timeout=10) + + request.addfinalizer(fin) + + +def _create_base_ldif(topo, import_base=False): + """Create base ldif file to clean entries from suffix""" + + log.info('Add base entry for online import') + ldif_dir = topo.standalone.get_ldif_dir() + ldif_file = os.path.join(ldif_dir, '/perf.ldif') + log.info('LDIF FILE is this: {}'.format(ldif_file)) + base_ldif = """dn: dc=example,dc=com +objectclass: top +objectclass: domain +dc: example + +dn: ou=people,dc=example,dc=com +objectclass: top +objectclass: organizationalUnit +ou: people + +dn: ou=groups,dc=example,dc=com +objectclass: top +objectclass: organizationalUnit +ou: groups +""" + with open(ldif_file, "w") as fd: + fd.write(base_ldif) + if import_base: + log.info('Adding base entry to suffix to remove users/groups and leave only the OUs') + try: + topo.standalone.tasks.importLDIF(suffix=SUFFIX, input_file=ldif_file, args={TASK_WAIT: True}) + except ValueError as e: + log.error('Online import failed' + e.message('desc')) + assert False + else: + log.info('Return LDIF file') + return ldif_file + + +def _run_fixup_memberof(topo): + """Run fixup memberOf task and measure the time taken""" + + log.info('Running fixup memberOf task and measuring the time taken') + start = time.time() + try: + topo.standalone.tasks.fixupMemberOf(suffix=SUFFIX, args={TASK_WAIT: True}) + except ValueError as e: + log.error('Running fixup MemberOf task failed' + e.message('desc')) + assert False + end = time.time() + cmd_time = int(end - start) + return cmd_time + + +def _nested_import_add_ldif(topo, nof_users, nof_groups, grps_user, ngrps_user, nof_depth, is_import=False): + """Create LDIF files for given nof users, groups and nested group levels""" + + log.info('Checking if the operation is Import or Ldapadd') + if is_import: + log.info('Import: Create base entry before adding users and groups') + exp_entries = nof_users + nof_groups + data_ldif = _create_base_ldif(topo, False) + log.info('Create data LDIF file by appending users, groups and nested groups') + with open(data_ldif, 'a') as file1: + data = RHDSDataLDIF(stream=file1, users=nof_users, groups=nof_groups, grps_puser=grps_user, + nest_level=nof_depth, ngrps_puser=ngrps_user, basedn=SUFFIX) + data.do_magic() + start = time.time() + log.info('Run importLDIF task to add entries to Server') + try: + topo.standalone.tasks.importLDIF(suffix=SUFFIX, input_file=data_ldif, args={TASK_WAIT: True}) + except ValueError as e: + log.error('Online import failed' + e.message('desc')) + assert False + end = time.time() + time_import = int(end - start) + + log.info('Check if number of entries created matches the expected entries') + users_groups = topo.standalone.search_s(SUFFIX, ldap.SCOPE_SUBTREE, USER_FILTER, [DN_ATTR]) + act_entries = str(users_groups).count(DN_ATTR) + log.info('Expected entries: {}, Actual entries: {}'.format(exp_entries, act_entries)) + assert act_entries == exp_entries + return time_import + else: + log.info('Ldapadd: Create data LDIF file with users, groups and nested groups') + ldif_dir = topo.standalone.get_ldif_dir() + data_ldif = os.path.join(ldif_dir, '/perf_add.ldif') + with open(data_ldif, 'w') as file1: + data = RHDSDataLDIF(stream=file1, users=nof_users, groups=nof_groups, grps_puser=grps_user, + nest_level=nof_depth, ngrps_puser=ngrps_user, basedn=SUFFIX) + data.do_magic() + start = time.time() + log.info('Run LDAPMODIFY to add entries to Server') + try: + subprocess.check_output( + [LDAP_MOD, '-cx', '-D', DN_DM, '-w', PASSWORD, '-h', HOST_STANDALONE, '-p', str(PORT_STANDALONE), '-af', + data_ldif]) + except subprocess.CalledProcessError as e: + log.error('LDAPMODIFY failed to add entries, error:{:s}'.format(str(e))) + raise e + end = time.time() + cmd_time = int(end - start) + log.info('Time taken to complete LDAPADD: {} secs'.format(cmd_time)) + return cmd_time + + +def _sync_memberof_attrs(topo, exp_memberof): + """Check if expected entries are created or attributes are synced""" + + log.info('_sync_memberof_attrs: Check if expected memberOf attributes are synced/created') + loop = 0 + start = time.time() + entries = topo.standalone.search_s(SUFFIX, ldap.SCOPE_SUBTREE, FILTER, [MEMBEROF_ATTR]) + act_memberof = str(entries).count(MEMBEROF_ATTR) + end = time.time() + cmd_time = int(end - start) + log.info('Loop-{}, expected memberOf attrs: {}, synced: {}, time for search-{} secs'.format(loop, exp_memberof, + act_memberof, cmd_time)) + while act_memberof != exp_memberof: + loop = loop + 1 + time.sleep(30) + start = time.time() + entries = topo.standalone.search_s(SUFFIX, ldap.SCOPE_SUBTREE, FILTER, [MEMBEROF_ATTR]) + act_memberof = str(entries).count(MEMBEROF_ATTR) + end = time.time() + cmd_time = cmd_time + int(end - start) + log.info('Loop-{}, expected memberOf attrs: {}, synced: {}, time for search-{} secs'.format(loop, exp_memberof, + act_memberof, + cmd_time)) + # Worst case scenario, exit the test after 10hrs of wait + if loop > 1200: + log.error('Either syncing memberOf attrs takes too long or some issue with the test itself') + assert False + sync_time = 1 + loop * 30 + log.info('Expected memberOf attrs: {}, Actual memberOf attrs: {}'.format(exp_memberof, act_memberof)) + assert act_memberof == exp_memberof + return sync_time + + +@pytest.mark.parametrize("nof_users, nof_groups, grps_user, ngrps_user, nof_depth", + [(20000, 200, 20, 10, 5), (50000, 500, 50, 10, 10), (100000, 1000, 100, 20, 20)]) +def test_nestgrps_import(topo, memberof_setup, nof_users, nof_groups, grps_user, ngrps_user, nof_depth): + """Import large users and nested groups with N depth and measure the time taken + + :id: 169a09f2-2c2d-4e42-8b90-a0bd1034f278 + :feature: MemberOf Plugin + :setup: Standalone instance, memberOf plugin enabled + :steps: 1. Create LDIF file for given nof_users and nof_groups + 2. Import entries to server + 3. Check if entries are created + 4. Run fixupMemberOf task to create memberOf attributes + 5. Check if memberOf attributes are synced for all users and groups + 6. Compare the actual no of memberOf attributes to the expected + 7. Measure the time taken to sync memberOf attributes + :expectedresults: MemberOf attributes should be synced + """ + + exp_memberof = (nof_users * grps_user) + ( + (nof_groups // grps_user) * (ngrps_user // nof_depth) * (nof_depth * (nof_depth + 1)) // 2) + log.info('Create nested ldif file with users-{}, groups-{}, nested-{}'.format(nof_users, nof_groups, nof_depth)) + log.info('Import LDIF file and measure the time taken') + import_time = _nested_import_add_ldif(topo, nof_users, nof_groups, grps_user, ngrps_user, nof_depth, True) + + log.info('Run fixup memberOf task and measure the time taken to complete the task') + fixup_time = _run_fixup_memberof(topo) + + log.info('Check the total number of memberOf entries created for users and groups') + sync_memberof = _sync_memberof_attrs(topo, exp_memberof) + + total_time = import_time + fixup_time + sync_memberof + log.info('Time for import-{}secs, fixup task-{}secs, total time for memberOf sync: {}secs'.format(import_time, + fixup_time, + total_time)) + + +@pytest.mark.parametrize("nof_users, nof_groups, grps_user, ngrps_user, nof_depth", + [(20000, 100, 20, 10, 5), (50000, 200, 50, 10, 10), (100000, 100, 20, 10, 10)]) +def test_nestgrps_add(topo, memberof_setup, nof_users, nof_groups, grps_user, ngrps_user, nof_depth): + """Import large users and nested groups with n depth and measure the time taken + + :id: 6eda75c6-5ae0-4b17-b610-d217d7ec7542 + :feature: MemberOf Plugin + :setup: Standalone instance, memberOf plugin enabled + :steps: 1. Create LDIF file for given nof_users and nof_groups + 2. Add entries using LDAPADD + 3. Check if entries are created + 4. Check if memberOf attributes are synced for all users and groups + 5. Compare the actual no of memberOf attributes to the expected + 6. Measure the time taken to sync memberOf attributes + :expectedresults: MemberOf attributes should be created and synced + """ + + exp_memberof = (nof_users * grps_user) + ( + (nof_groups // grps_user) * (ngrps_user // nof_depth) * (nof_depth * (nof_depth + 1)) // 2) + log.info('Creating base_ldif file and importing it to wipe out all users and groups') + _create_base_ldif(topo, True) + log.info('Create nested ldif file with users-{}, groups-{}, nested-{}'.format(nof_users, nof_groups, nof_depth)) + log.info('Run LDAPADD to add entries to Server') + add_time = _nested_import_add_ldif(topo, nof_users, nof_groups, grps_user, ngrps_user, nof_depth, False) + + log.info('Check the total number of memberOf entries created for users and groups') + sync_memberof = _sync_memberof_attrs(topo, exp_memberof) + total_time = add_time + sync_memberof + log.info('Time for ldapadd-{}secs, total time for memberOf sync: {}secs'.format(add_time, total_time)) + + +@pytest.mark.parametrize("nof_users, nof_groups, grps_user, ngrps_user, nof_depth", + [(20000, 200, 20, 10, 5), (50000, 500, 50, 10, 10), (100000, 1000, 100, 20, 20)]) +def test_mod_nestgrp(topo, memberof_setup, nof_users, nof_groups, grps_user, ngrps_user, nof_depth): + """Import bulk entries, modify nested groups at N depth and measure the time taken + + :id: 4bf8e753-6ded-4177-8225-aaf6aef4d131 + :feature: MemberOf Plugin + :setup: Standalone instance, memberOf plugin enabled + :steps: 1. Import bulk entries with nested group and create memberOf attributes + 2. Modify nested groups by adding new members at each nested level + 3. Check new memberOf attributes created for users and groups + 4. Compare the actual memberOf attributes with the expected + 5. Measure the time taken to sync memberOf attributes + :expectedresults: MemberOf attributes should be modified and synced + """ + + exp_memberof = (nof_users * grps_user) + ( + (nof_groups // grps_user) * (ngrps_user // nof_depth) * (nof_depth * (nof_depth + 1)) // 2) + log.info('Create nested ldif file, import it and measure the time taken') + import_time = _nested_import_add_ldif(topo, nof_users, nof_groups, grps_user, ngrps_user, nof_depth, True) + log.info('Run fixup memberOf task and measure the time to complete the task') + fixup_time = _run_fixup_memberof(topo) + sync_memberof = _sync_memberof_attrs(topo, exp_memberof) + total_time = import_time + fixup_time + sync_memberof + log.info('Time for import-{}secs, fixup task-{}secs, total time for memberOf sync: {}secs'.format(import_time, + fixup_time, + total_time)) + + log.info('Add {} users to existing nested groups at all depth level'.format(nof_groups)) + log.info('Add one user to each groups at different nest levels') + start = time.time() + for usr in range(nof_groups): + usrrdn = 'newcliusr{}'.format(usr) + userdn = 'uid={},ou=people,{}'.format(usrrdn, SUFFIX) + groupdn = 'cn=group{},ou=groups,{}'.format(usr, SUFFIX) + try: + topo.standalone.add_s(Entry((userdn, { + 'objectclass': 'top person inetUser inetOrgperson'.split(), + 'cn': usrrdn, + 'sn': usrrdn, + 'userpassword': 'Secret123'}))) + except ldap.LDAPError as e: + log.error('Failed to add {} user: error {}'.format(userdn, e.message['desc'])) + raise + try: + topo.standalone.modify_s(groupdn, [(ldap.MOD_ADD, 'member', userdn)]) + except ldap.LDAPError as e: + log.error('Error-{}: Failed to add user to group'.format(e.message['desc'])) + assert False + end = time.time() + cmd_time = int(end - start) + + exp_memberof = (nof_users * grps_user) + nof_groups + ( + (nof_groups // grps_user) * (ngrps_user // nof_depth) * (nof_depth * (nof_depth + 1))) + log.info('Check the total number of memberOf entries created for users and groups') + sync_memberof = _sync_memberof_attrs(topo, exp_memberof) + total_time = cmd_time + sync_memberof + log.info('Time taken add new members to existing nested groups + memberOf sync: {} secs'.format(total_time)) + + +@pytest.mark.parametrize("nof_users, nof_groups, grps_user, ngrps_user, nof_depth", + [(20000, 200, 20, 10, 5), (50000, 500, 50, 10, 10), (100000, 1000, 100, 20, 20)]) +def test_del_nestgrp(topo, memberof_setup, nof_users, nof_groups, grps_user, ngrps_user, nof_depth): + """Import bulk entries, delete nested groups at N depth and measure the time taken + + :id: d3d82ac5-d968-4cd6-a268-d380fc9fd51b + :feature: MemberOf Plugin + :setup: Standalone instance, memberOf plugin enabled + :steps: 1. Import bulk users and groups with nested level N. + 2. Run fixup memberOf task to create memberOf attributes + 3. Delete nested groups at nested level N + 4. Check memberOf attributes deleted for users and groups + 5. Compare the actual memberOf attributes with the expected + 6. Measure the time taken to sync memberOf attributes + :expectedresults: MemberOf attributes should be deleted and synced + """ + + exp_memberof = (nof_users * grps_user) + ( + (nof_groups // grps_user) * (ngrps_user // nof_depth) * (nof_depth * (nof_depth + 1)) // 2) + log.info('Create nested ldif file, import it and measure the time taken') + import_time = _nested_import_add_ldif(topo, nof_users, nof_groups, grps_user, ngrps_user, nof_depth, True) + log.info('Run fixup memberOf task and measure the time to complete the task') + fixup_time = _run_fixup_memberof(topo) + sync_memberof = _sync_memberof_attrs(topo, exp_memberof) + total_time = import_time + fixup_time + sync_memberof + log.info('Time taken to complete add users + memberOf sync: {} secs'.format(total_time)) + + log.info('Delete {} groups from nested groups at depth level-{}'.format(nof_depth, nof_depth)) + start = time.time() + for nos in range(nof_depth, nof_groups, grps_user): + groupdn = 'cn=group{},ou=groups,{}'.format(nos, SUFFIX) + try: + topo.standalone.delete_s(groupdn) + except ldap.LDAPError as e: + log.error('Error-{}: Failed to delete group'.format(e.message['desc'])) + assert False + end = time.time() + cmd_time = int(end - start) + + exp_memberof = exp_memberof - (nof_users + (nof_depth * (nof_groups // grps_user))) + log.info('Check memberOf attributes after deleting groups at depth-{}'.format(nof_depth)) + sync_memberof = _sync_memberof_attrs(topo, exp_memberof) + total_time = cmd_time + sync_memberof + log.info('Time taken to delete and sync memberOf attributes: {}secs'.format(total_time)) + + +if __name__ == '__main__': + # Run isolated + # -s for DEBUG mode + CURRENT_FILE = os.path.realpath(__file__) + pytest.main("-s {}".format(CURRENT_FILE)) diff --git a/dirsrvtests/tests/perf/search_performance_test.py b/dirsrvtests/tests/perf/search_performance_test.py new file mode 100644 index 0000000..bad54f4 --- /dev/null +++ b/dirsrvtests/tests/perf/search_performance_test.py @@ -0,0 +1,161 @@ +# --- BEGIN COPYRIGHT BLOCK --- +# Copyright (C) 2018 Red Hat, Inc. +# Copyright (C) 2019 William Brown +# All rights reserved. +# +# License: GPL (version 3 or any later version). +# See LICENSE for details. +# --- END COPYRIGHT BLOCK --- +# + +# Performance tests look different to others, they require some extra +# environmental settings. + +import ldap +import os +from lib389 import DirSrv +from lib389._constants import DEFAULT_SUFFIX + +from lib389.topologies import topology_st as topology + +from lib389.idm.domain import Domain +from lib389.idm.group import Groups +from lib389.idm.user import nsUserAccounts +from lib389.backend import Backends + +from lib389.ldclt import Ldclt +import time + +# We want to write a CSV such as: +# category,1 thread,4 thread,8 thread,16 thread +# testcategory,500,800,1000,2000 +# testcategory2,500,800,1000,2000 +TEST_MARKER = 'configured: search_performance_test.py' +# GROUP_MAX = 4000 +# USER_MAX = 6000 + +GROUP_MAX = 4000 +USER_MAX = 6000 + +TARGET_HOST = os.environ.get('PERF_TARGET_HOST', 'localhost') +TARGET_PORT = os.environ.get('PERF_TARGET_PORT', '389') + +def assert_data_present(inst): + # Do we have the backend marker? + d = Domain(inst, DEFAULT_SUFFIX) + try: + desc = d.get_attr_val_utf8('description') + if desc == TEST_MARKER: + return + except: + # Just reset everything. + pass + # Reset the backends + bes = Backends(inst) + try: + be = bes.get(DEFAULT_SUFFIX) + be.delete() + except: + pass + + be = bes.create(properties={ + 'nsslapd-suffix': DEFAULT_SUFFIX, + 'cn': 'userRoot', + }) + be.create_sample_entries('001004002') + + # Load our data + # We can't use dbgen as that relies on local access :( + + # Add 40,000 groups + groups = Groups(inst, DEFAULT_SUFFIX) + for i in range(1,GROUP_MAX): + rdn = 'group_{0:07d}'.format(i) + groups.create(properties={ + 'cn': rdn, + }) + + # Add 60,000 users + users = nsUserAccounts(inst, DEFAULT_SUFFIX) + for i in range(1,USER_MAX): + rdn = 'user_{0:07d}'.format(i) + users.create(properties={ + 'uid': rdn, + 'cn': rdn, + 'displayName': rdn, + 'uidNumber' : '%s' % i, + 'gidNumber' : '%s' % i, + 'homeDirectory' : '/home/%s' % rdn, + 'userPassword': rdn, + }) + + # Add the marker + d.replace('description', TEST_MARKER) + # Done! + +# Single uid +# 1000 uid +# 4000 uid +# 5000 uid +# 10,000 uid + +# & of single uid +# & of two 1000 uid sets +# & of two 4000 uid sets +# & of two 5000 uid sets +# & of two 10,000 uid sets + +# | of single uid +# | of two 1000 uid sets +# | of two 4000 uid sets +# | of two 5000 uid sets +# | of two 10,000 uid sets + +# & of user and group + +# | of user and group + +def _do_search_performance(inst, thread_count): + # Configure thread count + # Restart + print("Configuring with %s threads ..." % thread_count) + time.sleep(1) + inst.config.set('nsslapd-threadnumber', str(thread_count)) + inst.restart() + ld = Ldclt(inst) + out = ld.search_loadtest(DEFAULT_SUFFIX, "(uid=user_XXXXXXX)", min=1, max=USER_MAX) + return out + +# Need a check here +def test_user_search_performance(): + inst = DirSrv(verbose=True) + inst.remote_simple_allocate( + f"ldaps://{TARGET_HOST}", + password="password" + ) + # Need a better way to set this. + inst.host = TARGET_HOST + inst.port = TARGET_PORT + inst.open(reqcert=ldap.OPT_X_TLS_NEVER) + assert_data_present(inst) + r1 = _do_search_performance(inst, 1) + # r2 = _do_search_performance(inst, 4) + # r3 = _do_search_performance(inst, 6) + # r4 = _do_search_performance(inst, 8) + # r5 = _do_search_performance(inst, 12) + r6 = _do_search_performance(inst, 16) + # print("category,t1,t4,t6,t8,t12,t16") + # print("search,%s,%s,%s,%s,%s,%s" % (r1, r2, r3, r4, r5, r6)) + +def test_group_search_performance(): + pass + +## TODO +# Tweak cache levels +# turbo mode +# ldclt threads = 2x server? +# add perf logs to each test + + + + diff --git a/dirsrvtests/tests/stress/README b/dirsrvtests/tests/stress/README new file mode 100644 index 0000000..758cad4 --- /dev/null +++ b/dirsrvtests/tests/stress/README @@ -0,0 +1,13 @@ +README for "Stress" Tests + +Reliablity Tests +============================== + +A generic high load, long running tests + +reliab7_5_test.py +------------------------------ + +This script is a light-weight version of the legacy TET stress test called "Reliabilty 15". This test consists of two MMR Masters, and a 5000 entry database. The test starts off with two threads doing unindexed searchesi(1 for each master). These do not exit untl the entire test completes. Then while the unindexed searches are going on, the test performs a set of adds, mods, deletes, and modrdns on each master at the same time. It performs this set of operations 1000 times. The main goal of this script is to test stablilty, replication convergence, and memory growth/fragmentation. + +Known issue: the server can deadlock in the libdb4 code while performing modrdns(under investigation via https://fedorahosted.org/389/ticket/48166) diff --git a/dirsrvtests/tests/stress/__init__.py b/dirsrvtests/tests/stress/__init__.py new file mode 100644 index 0000000..40a96af --- /dev/null +++ b/dirsrvtests/tests/stress/__init__.py @@ -0,0 +1 @@ +# -*- coding: utf-8 -*- diff --git a/dirsrvtests/tests/stress/cos/cos_scale_template_test.py b/dirsrvtests/tests/stress/cos/cos_scale_template_test.py new file mode 100644 index 0000000..0d0601b --- /dev/null +++ b/dirsrvtests/tests/stress/cos/cos_scale_template_test.py @@ -0,0 +1,150 @@ +# --- BEGIN COPYRIGHT BLOCK --- +# Copyright (C) 2017 Red Hat, Inc. +# All rights reserved. +# +# License: GPL (version 3 or any later version). +# See LICENSE for details. +# --- END COPYRIGHT BLOCK --- +# + +import pytest + +from lib389.topologies import topology_st + +from lib389.plugins import ClassOfServicePlugin +from lib389.cos import CosIndirectDefinitions, CosTemplates, CosTemplate +from lib389.idm.user import UserAccounts, TEST_USER_PROPERTIES +from lib389.idm.organizationalunit import OrganizationalUnits + +from lib389._constants import DEFAULT_SUFFIX + +import time + +pytestmark = pytest.mark.tier3 + +# Given this should complete is about 0.005, this is generous. +# For the final test with 20 templates, about 0.02 is an acceptable time. +THRESHOLD = 0.05 + +class OUCosTemplate(CosTemplate): + def __init__(self, instance, dn=None): + """Create a OU specific cos template to replicate a specific user setup. + This template provides ou attrs onto the target entry. + + :param instance: A dirsrv instance + :type instance: DirSrv + :param dn: The dn of the template + :type dn: str + """ + super(OUCosTemplate, self).__init__(instance, dn) + self._rdn_attribute = 'ou' + self._must_attributes = ['ou'] + self._create_objectclasses = [ + 'top', + 'cosTemplate', + 'organizationalUnit', + ] + +class OUCosTemplates(CosTemplates): + def __init__(self, instance, basedn, rdn=None): + """Create an OU specific cos templates to replicate a specific use setup. + This costemplates object allows access to the OUCosTemplate types. + + :param instance: A dirsrv instance + :type instance: DirSrv + :param basedn: The basedn of the templates + :type basedn: str + :param rdn: The rdn of the templates + :type rdn: str + """ + super(OUCosTemplates, self).__init__(instance, basedn, rdn) + self._objectclasses = [ + 'cosTemplate', + 'organizationalUnit', + ] + self._filterattrs = ['ou'] + self._childobject = OUCosTemplate + +def test_indirect_template_scale(topology_st): + """Test that cos templates can be added at a reasonable scale + + :id: 7cbcdf22-1f9c-4222-9e76-685fe374fc20 + :steps: + 1. Enable COS plugin + 2. Create the test user + 3. Add an indirect cos template + 4. Add a cos template + 5. Add the user to the cos template and assert it works. + 6. Add 25,000 templates to the database + 7. Search the user. It should not exceed THRESHOLD. + :expected results: + 1. It is enabled. + 2. It is created. + 3. Is is created. + 4. It is created. + 5. It is valid. + 6. They are created. + 7. It is fast. + """ + + cos_plugin = ClassOfServicePlugin(topology_st.standalone) + cos_plugin.enable() + + topology_st.standalone.restart() + + # Now create, the indirect specifier, and a user to template onto. + users = UserAccounts(topology_st.standalone, DEFAULT_SUFFIX) + user = users.create(properties=TEST_USER_PROPERTIES) + + cos_inds = CosIndirectDefinitions(topology_st.standalone, DEFAULT_SUFFIX) + cos_ind = cos_inds.create(properties={ + 'cn' : 'cosIndirectDef', + 'cosIndirectSpecifier': 'seeAlso', + 'cosAttribute': [ + 'ou merge-schemes', + 'description merge-schemes', + 'postalCode merge-schemes', + ], + }) + + ous = OrganizationalUnits(topology_st.standalone, DEFAULT_SUFFIX) + ou_temp = ous.create(properties={'ou': 'templates'}) + cos_temps = OUCosTemplates(topology_st.standalone, ou_temp.dn) + + cos_temp_u = cos_temps.create(properties={ + 'ou' : 'ou_temp_u', + 'description' : 'desc_temp_u', + 'postalCode': '0' + }) + # Edit the user to add the seeAlso ... + user.set('seeAlso', cos_temp_u.dn) + + # Now create 25,0000 templates, they *don't* need to apply to the user though! + for i in range(1, 25001): + cos_temp_u = cos_temps.create(properties={ + 'ou' : 'ou_temp_%s' % i, + 'description' : 'desc_temp_%s' % i, + 'postalCode': '%s' % i + }) + + if i % 500 == 0: + start_time = time.monotonic() + u_search = users.get('testuser') + attrs = u_search.get_attr_vals_utf8('postalCode') + end_time = time.monotonic() + diff_time = end_time - start_time + assert diff_time < THRESHOLD + + if i == 10000: + # Now add our user to this template also. + user.add('seeAlso', cos_temp_u.dn) + + start_time = time.monotonic() + attrs_after = u_search.get_attr_vals_utf8('postalCode') + end_time = time.monotonic() + diff_time = end_time - start_time + assert(set(attrs) < set(attrs_after)) + assert diff_time < THRESHOLD + + + diff --git a/dirsrvtests/tests/stress/reliabilty/__init__.py b/dirsrvtests/tests/stress/reliabilty/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/dirsrvtests/tests/stress/reliabilty/reliab_7_5_test.py b/dirsrvtests/tests/stress/reliabilty/reliab_7_5_test.py new file mode 100644 index 0000000..8791e64 --- /dev/null +++ b/dirsrvtests/tests/stress/reliabilty/reliab_7_5_test.py @@ -0,0 +1,576 @@ +# --- BEGIN COPYRIGHT BLOCK --- +# Copyright (C) 2016 Red Hat, Inc. +# All rights reserved. +# +# License: GPL (version 3 or any later version). +# See LICENSE for details. +# --- END COPYRIGHT BLOCK --- + +import sys +import time +import ldap +import logging +import pytest +import threading +import random +from lib389 import DirSrv, Entry +from lib389._constants import * +from lib389.properties import * +from lib389.tasks import * +from lib389.utils import * + +from lib389.idm.directorymanager import DirectoryManager + +pytestmark = pytest.mark.tier3 + +logging.getLogger(__name__).setLevel(logging.DEBUG) +formatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s' + + ' - %(message)s') +handler = logging.StreamHandler() +handler.setFormatter(formatter) +log = logging.getLogger(__name__) +log.addHandler(handler) + +installation1_prefix = None +NUM_USERS = 5000 +MAX_PASSES = 1000 +CHECK_CONVERGENCE = True +ENABLE_VALGRIND = False +RUNNING = True + +DEBUGGING = os.getenv('DEBUGGING', default=False) + +class TopologyReplication(object): + def __init__(self, master1, master2): + master1.open() + self.master1 = master1 + master2.open() + self.master2 = master2 + + +@pytest.fixture(scope="module") +def topology(request): + global installation1_prefix + if installation1_prefix: + args_instance[SER_DEPLOYED_DIR] = installation1_prefix + + # Creating master 1... + master1 = DirSrv(verbose=DEBUGGING) + args_instance[SER_HOST] = HOST_MASTER_1 + args_instance[SER_PORT] = PORT_MASTER_1 + args_instance[SER_SECURE_PORT] = SECUREPORT_MASTER_1 + args_instance[SER_SERVERID_PROP] = SERVERID_MASTER_1 + args_instance[SER_CREATION_SUFFIX] = DEFAULT_SUFFIX + args_master = args_instance.copy() + master1.allocate(args_master) + instance_master1 = master1.exists() + if instance_master1: + master1.delete() + master1.create() + master1.open() + master1.replica.enableReplication(suffix=SUFFIX, role=ReplicaRole.MASTER, + replicaId=REPLICAID_MASTER_1) + + # Creating master 2... + master2 = DirSrv(verbose=DEBUGGING) + args_instance[SER_HOST] = HOST_MASTER_2 + args_instance[SER_PORT] = PORT_MASTER_2 + args_instance[SER_SECURE_PORT] = SECUREPORT_MASTER_2 + args_instance[SER_SERVERID_PROP] = SERVERID_MASTER_2 + args_instance[SER_CREATION_SUFFIX] = DEFAULT_SUFFIX + args_master = args_instance.copy() + master2.allocate(args_master) + instance_master2 = master2.exists() + if instance_master2: + master2.delete() + master2.create() + master2.open() + master2.replica.enableReplication(suffix=SUFFIX, role=ReplicaRole.MASTER, + replicaId=REPLICAID_MASTER_2) + + # + # Create all the agreements + # + # Creating agreement from master 1 to master 2 + properties = {RA_NAME: r'meTo_$host:$port', + RA_BINDDN: defaultProperties[REPLICATION_BIND_DN], + RA_BINDPW: defaultProperties[REPLICATION_BIND_PW], + RA_METHOD: defaultProperties[REPLICATION_BIND_METHOD], + RA_TRANSPORT_PROT: defaultProperties[REPLICATION_TRANSPORT]} + m1_m2_agmt = master1.agreement.create(suffix=SUFFIX, host=master2.host, + port=master2.port, + properties=properties) + if not m1_m2_agmt: + log.fatal("Fail to create a master -> master replica agreement") + sys.exit(1) + log.debug("%s created" % m1_m2_agmt) + + # Creating agreement from master 2 to master 1 + properties = {RA_NAME: r'meTo_$host:$port', + RA_BINDDN: defaultProperties[REPLICATION_BIND_DN], + RA_BINDPW: defaultProperties[REPLICATION_BIND_PW], + RA_METHOD: defaultProperties[REPLICATION_BIND_METHOD], + RA_TRANSPORT_PROT: defaultProperties[REPLICATION_TRANSPORT]} + m2_m1_agmt = master2.agreement.create(suffix=SUFFIX, host=master1.host, + port=master1.port, + properties=properties) + if not m2_m1_agmt: + log.fatal("Fail to create a master -> master replica agreement") + sys.exit(1) + log.debug("%s created" % m2_m1_agmt) + + # Allow the replicas to get situated with the new agreements... + time.sleep(5) + + # + # Import tests entries into master1 before we initialize master2 + # + ldif_dir = master1.get_ldif_dir() + + import_ldif = ldif_dir + '/rel7.5-entries.ldif' + + # First generate an ldif + try: + ldif = open(import_ldif, 'w') + except IOError as e: + log.fatal('Failed to create test ldif, error: %s - %s' % + (e.errno, e.strerror)) + assert False + + # Create the root node + ldif.write('dn: ' + DEFAULT_SUFFIX + '\n') + ldif.write('objectclass: top\n') + ldif.write('objectclass: domain\n') + ldif.write('dc: example\n') + ldif.write('\n') + + # Create the entries + idx = 0 + while idx < NUM_USERS: + count = str(idx) + ldif.write('dn: uid=master1_entry' + count + ',' + + DEFAULT_SUFFIX + '\n') + ldif.write('objectclass: top\n') + ldif.write('objectclass: person\n') + ldif.write('objectclass: inetorgperson\n') + ldif.write('objectclass: organizationalperson\n') + ldif.write('uid: master1_entry' + count + '\n') + ldif.write('cn: master1 entry' + count + '\n') + ldif.write('givenname: master1 ' + count + '\n') + ldif.write('sn: entry ' + count + '\n') + ldif.write('userpassword: master1_entry' + count + '\n') + ldif.write('description: ' + 'a' * random.randint(1, 1000) + '\n') + ldif.write('\n') + + ldif.write('dn: uid=master2_entry' + count + ',' + + DEFAULT_SUFFIX + '\n') + ldif.write('objectclass: top\n') + ldif.write('objectclass: person\n') + ldif.write('objectclass: inetorgperson\n') + ldif.write('objectclass: organizationalperson\n') + ldif.write('uid: master2_entry' + count + '\n') + ldif.write('cn: master2 entry' + count + '\n') + ldif.write('givenname: master2 ' + count + '\n') + ldif.write('sn: entry ' + count + '\n') + ldif.write('userpassword: master2_entry' + count + '\n') + ldif.write('description: ' + 'a' * random.randint(1, 1000) + '\n') + ldif.write('\n') + idx += 1 + + ldif.close() + + # Now import it + try: + master1.tasks.importLDIF(suffix=DEFAULT_SUFFIX, input_file=import_ldif, + args={TASK_WAIT: True}) + except ValueError: + log.fatal('test_reliab_7.5: Online import failed') + assert False + + # + # Initialize all the agreements + # + master1.agreement.init(SUFFIX, HOST_MASTER_2, PORT_MASTER_2) + master1.waitForReplInit(m1_m2_agmt) + + # Check replication is working... + if master1.testReplication(DEFAULT_SUFFIX, master2): + log.info('Replication is working.') + else: + log.fatal('Replication is not working.') + assert False + + # Clear out the tmp dir + master1.clearTmpDir(__file__) + + # Delete each instance in the end + def fin(): + master1.delete() + master2.delete() + if ENABLE_VALGRIND: + sbin_dir = get_sbin_dir(prefix=master1.prefix) + valgrind_disable(sbin_dir) + request.addfinalizer(fin) + + return TopologyReplication(master1, master2) + + +class AddDelUsers(threading.Thread): + def __init__(self, inst, masterid): + threading.Thread.__init__(self) + self.daemon = True + self.inst = inst + self.id = masterid + + def run(self): + # Add 5000 entries + idx = 0 + RDN = 'uid=add_del_master_' + self.id + '-' + + conn = DirectoryManager(self.inst).bind() + + while idx < NUM_USERS: + USER_DN = RDN + str(idx) + ',' + DEFAULT_SUFFIX + try: + conn.add_s(Entry((USER_DN, {'objectclass': + 'top extensibleObject'.split(), + 'uid': 'user' + str(idx), + 'cn': 'g' * random.randint(1, 500) + }))) + except ldap.LDAPError as e: + log.fatal('Add users to master ' + self.id + ' failed (' + + USER_DN + ') error: ' + e.message['desc']) + idx += 1 + conn.close() + + # Delete 5000 entries + conn = DirectoryManager(self.inst).bind() + idx = 0 + while idx < NUM_USERS: + USER_DN = RDN + str(idx) + ',' + DEFAULT_SUFFIX + try: + conn.delete_s(USER_DN) + except ldap.LDAPError as e: + log.fatal('Failed to delete (' + USER_DN + ') on master ' + + self.id + ': error ' + e.message['desc']) + idx += 1 + conn.close() + + +class ModUsers(threading.Thread): + # Do mods and modrdns + def __init__(self, inst, masterid): + threading.Thread.__init__(self) + self.daemon = True + self.inst = inst + self.id = masterid + + def run(self): + # Mod existing entries + conn = DirectoryManager(self.inst).bind() + idx = 0 + while idx < NUM_USERS: + USER_DN = ('uid=master' + self.id + '_entry' + str(idx) + ',' + + DEFAULT_SUFFIX) + try: + conn.modify(USER_DN, [(ldap.MOD_REPLACE, + 'givenname', + 'new givenname master1-' + str(idx))]) + except ldap.LDAPError as e: + log.fatal('Failed to modify (' + USER_DN + ') on master ' + + self.id + ': error ' + e.message['desc']) + idx += 1 + conn.close() + + # Modrdn existing entries + conn = DirectoryManager(self.inst).bind() + idx = 0 + while idx < NUM_USERS: + USER_DN = ('uid=master' + self.id + '_entry' + str(idx) + ',' + + DEFAULT_SUFFIX) + NEW_RDN = 'cn=master' + self.id + '_entry' + str(idx) + try: + conn.rename_s(USER_DN, NEW_RDN, delold=1) + except ldap.LDAPError as e: + log.error('Failed to modrdn (' + USER_DN + ') on master ' + + self.id + ': error ' + e.message['desc']) + idx += 1 + conn.close() + + # Undo modrdn to we can rerun this test + conn = DirectoryManager(self.inst).bind() + idx = 0 + while idx < NUM_USERS: + USER_DN = ('cn=master' + self.id + '_entry' + str(idx) + ',' + + DEFAULT_SUFFIX) + NEW_RDN = 'uid=master' + self.id + '_entry' + str(idx) + try: + conn.rename_s(USER_DN, NEW_RDN, delold=1) + except ldap.LDAPError as e: + log.error('Failed to modrdn (' + USER_DN + ') on master ' + + self.id + ': error ' + e.message['desc']) + idx += 1 + conn.close() + + +class DoSearches(threading.Thread): + # Search a master + def __init__(self, inst, masterid): + threading.Thread.__init__(self) + self.daemon = True + self.inst = inst + self.id = masterid + + def run(self): + # Equality + conn = DirectoryManager(self.inst).bind() + idx = 0 + while idx < NUM_USERS: + search_filter = ('(|(uid=master' + self.id + '_entry' + str(idx) + + ')(cn=master' + self.id + '_entry' + str(idx) + + '))') + try: + conn.search(DEFAULT_SUFFIX, ldap.SCOPE_SUBTREE, search_filter) + except ldap.LDAPError as e: + log.fatal('Search Users: Search failed (%s): %s' % + (search_filter, e.message['desc'])) + conn.close() + return + + idx += 1 + conn.close() + + # Substring + conn = DirectoryManager(self.inst).bind() + idx = 0 + while idx < NUM_USERS: + search_filter = ('(|(uid=master' + self.id + '_entry' + str(idx) + + '*)(cn=master' + self.id + '_entry' + str(idx) + + '*))') + try: + conn.search(DEFAULT_SUFFIX, ldap.SCOPE_SUBTREE, search_filter) + except ldap.LDAPError as e: + log.fatal('Search Users: Search failed (%s): %s' % + (search_filter, e.message['desc'])) + conn.close() + return + + idx += 1 + conn.close() + + +class DoFullSearches(threading.Thread): + # Search a master + def __init__(self, inst): + threading.Thread.__init__(self) + self.daemon = True + self.inst = inst + + def run(self): + global RUNNING + conn = DirectoryManager(self.inst).bind() + while RUNNING: + time.sleep(2) + try: + conn.search_s(DEFAULT_SUFFIX, + ldap.SCOPE_SUBTREE, + 'objectclass=top') + except ldap.LDAPError as e: + log.fatal('Full Search Users: Search failed (%s): %s' % + ('objectclass=*', e.message['desc'])) + conn.close() + assert False + + conn.close() + + +def test_reliab7_5_init(topology): + ''' + Reduce entry cache - to increase the cache churn + + Then process "reliability 15" type tests + ''' + + BACKEND_DN = 'cn=userroot,cn=ldbm database,cn=plugins,cn=config' + + # Update master 1 + try: + topology.master1.modify_s(BACKEND_DN, [(ldap.MOD_REPLACE, + 'nsslapd-cachememsize', + '512000'), + (ldap.MOD_REPLACE, + 'nsslapd-cachesize', + '500')]) + except ldap.LDAPError as e: + log.fatal('Failed to set cache settings: error ' + e.message['desc']) + assert False + + # Update master 2 + try: + topology.master2.modify_s(BACKEND_DN, [(ldap.MOD_REPLACE, + 'nsslapd-cachememsize', + '512000'), + (ldap.MOD_REPLACE, + 'nsslapd-cachesize', + '500')]) + except ldap.LDAPError as e: + log.fatal('Failed to set cache settings: error ' + e.message['desc']) + assert False + + # Restart the masters to pick up the new cache settings + topology.master1.stop(timeout=10) + topology.master2.stop(timeout=10) + + # This is the time to enable valgrind (if enabled) + if ENABLE_VALGRIND: + sbin_dir = get_sbin_dir(prefix=topology.master1.prefix) + valgrind_enable(sbin_dir) + + topology.master1.start(timeout=30) + topology.master2.start(timeout=30) + + +def test_reliab7_5_run(topology): + ''' + Starting issuing adds, deletes, mods, modrdns, and searches + ''' + global RUNNING + count = 1 + RUNNING = True + + # Start some searches to run through the entire stress test + fullSearch1 = DoFullSearches(topology.master1) + fullSearch1.start() + fullSearch2 = DoFullSearches(topology.master2) + fullSearch2.start() + + while count <= MAX_PASSES: + log.info('################## Reliabilty 7.5 Pass: %d' % count) + + # Master 1 + add_del_users1 = AddDelUsers(topology.master1, '1') + add_del_users1.start() + mod_users1 = ModUsers(topology.master1, '1') + mod_users1.start() + search1 = DoSearches(topology.master1, '1') + search1.start() + + # Master 2 + add_del_users2 = AddDelUsers(topology.master2, '2') + add_del_users2.start() + mod_users2 = ModUsers(topology.master2, '2') + mod_users2.start() + search2 = DoSearches(topology.master2, '2') + search2.start() + + # Search the masters + search3 = DoSearches(topology.master1, '1') + search3.start() + search4 = DoSearches(topology.master2, '2') + search4.start() + + # Wait for threads to finish + log.info('################## Waiting for threads to finish...') + add_del_users1.join() + mod_users1.join() + add_del_users2.join() + mod_users2.join() + log.info('################## Update threads finished.') + search1.join() + search2.join() + search3.join() + search4.join() + log.info('################## All threads finished.') + + # Allow some time for replication to catch up before firing + # off the next round of updates + time.sleep(5) + count += 1 + + # + # Wait for replication to converge + # + if CHECK_CONVERGENCE: + # Add an entry to each master, and wait for it to replicate + MASTER1_DN = 'uid=rel7.5-master1,' + DEFAULT_SUFFIX + MASTER2_DN = 'uid=rel7.5-master2,' + DEFAULT_SUFFIX + + # Master 1 + try: + topology.master1.add_s(Entry((MASTER1_DN, {'objectclass': + ['top', + 'extensibleObject'], + 'sn': '1', + 'cn': 'user 1', + 'uid': 'rel7.5-master1', + 'userpassword': + PASSWORD}))) + except ldap.LDAPError as e: + log.fatal('Failed to add replication test entry ' + MASTER1_DN + + ': error ' + e.message['desc']) + assert False + + log.info('################## Waiting for master 2 to converge...') + + while True: + entry = None + try: + entry = topology.master2.search_s(MASTER1_DN, + ldap.SCOPE_BASE, + 'objectclass=*') + except ldap.NO_SUCH_OBJECT: + pass + except ldap.LDAPError as e: + log.fatal('Search Users: Search failed (%s): %s' % + (MASTER1_DN, e.message['desc'])) + assert False + if entry: + break + time.sleep(5) + + log.info('################## Master 2 converged.') + + # Master 2 + try: + topology.master2.add_s( + Entry((MASTER2_DN, {'objectclass': ['top', + 'extensibleObject'], + 'sn': '1', + 'cn': 'user 1', + 'uid': 'rel7.5-master2', + 'userpassword': PASSWORD}))) + except ldap.LDAPError as e: + log.fatal('Failed to add replication test entry ' + MASTER1_DN + + ': error ' + e.message['desc']) + assert False + + log.info('################## Waiting for master 1 to converge...') + while True: + entry = None + try: + entry = topology.master1.search_s(MASTER2_DN, + ldap.SCOPE_BASE, + 'objectclass=*') + except ldap.NO_SUCH_OBJECT: + pass + except ldap.LDAPError as e: + log.fatal('Search Users: Search failed (%s): %s' % + (MASTER2_DN, e.message['desc'])) + assert False + if entry: + break + time.sleep(5) + + log.info('################## Master 1 converged.') + + # Stop the full searches + RUNNING = False + fullSearch1.join() + fullSearch2.join() + + +if __name__ == '__main__': + # Run isolated + # -s for DEBUG mode + CURRENT_FILE = os.path.realpath(__file__) + pytest.main("-s %s" % CURRENT_FILE) diff --git a/dirsrvtests/tests/stress/reliabilty/reliab_conn_test.py b/dirsrvtests/tests/stress/reliabilty/reliab_conn_test.py new file mode 100644 index 0000000..70b73a2 --- /dev/null +++ b/dirsrvtests/tests/stress/reliabilty/reliab_conn_test.py @@ -0,0 +1,227 @@ +import os +import sys +import time +import ldap +import logging +import pytest +import signal +import threading +from lib389.tools import DirSrvTools +from lib389._constants import * +from lib389.properties import * +from lib389.tasks import * +from lib389.utils import * +from lib389.idm.directorymanager import DirectoryManager +from lib389.idm.user import UserAccounts +from lib389.topologies import topology_st + +pytestmark = pytest.mark.tier3 + +logging.getLogger(__name__).setLevel(logging.DEBUG) +log = logging.getLogger(__name__) + +MAX_CONNS = 10000000 +MAX_THREADS = 20 +STOP = False +HOSTNAME = DirSrvTools.getLocalhost() +PORT = 389 +NUNC_STANS = False + + +def signalHandler(signal, frame): + """ + handle control-C cleanly + """ + global STOP + STOP = True + sys.exit(0) + + +def init(inst): + """Set the idle timeout, and add sample entries + """ + + inst.config.set('nsslapd-idletimeout', '5') + if NUNC_STANS: + inst.config.set('nsslapd-enable-nunc-stans', 'on') + inst.restart() + + users = UserAccounts(inst, DEFAULT_SUFFIX) + for idx in range(0, 9): + user = users.create_test_user(uid=str(idx), gid=str(idx)) + user.reset_password('password') + + +class BindOnlyConn(threading.Thread): + """This class opens and closes connections + """ + def __init__(self, inst): + """Initialize the thread class with the server instance info""" + threading.Thread.__init__(self) + self.daemon = True + self.inst = inst + + def run(self): + """Keep opening and closing connections""" + idx = 0 + err_count = 0 + global STOP + while idx < MAX_CONNS and not STOP: + try: + conn = DirectoryManager(self.inst).bind(connOnly=True) + conn.unbind_s() + time.sleep(.2) + err_count = 0 + except ldap.LDAPError as e: + err_count += 1 + if err_count > 3: + log.error('BindOnlyConn exiting thread: %s' % + (str(e))) + return + time.sleep(.4) + idx += 1 + + +class IdleConn(threading.Thread): + """This class opens and closes connections + """ + def __init__(self, inst): + """Initialize the thread class with the server instance info""" + threading.Thread.__init__(self) + self.daemon = True + self.inst = inst + + def run(self): + """Assume idleTimeout is set to less than 10 seconds + """ + idx = 0 + err_count = 0 + global STOP + while idx < (MAX_CONNS / 10) and not STOP: + try: + conn = self.inst.clone() + conn.simple_bind_s('uid=test_user_0,dc=example,dc=com', 'password') + conn.search_s('dc=example,dc=com', ldap.SCOPE_SUBTREE, + 'uid=*') + time.sleep(10) + conn.search_s('dc=example,dc=com', ldap.SCOPE_SUBTREE, + 'cn=*') + conn.unbind_s() + time.sleep(.2) + err_count = 0 + except ldap.LDAPError as e: + err_count += 1 + if err_count > 3: + log.error('IdleConn exiting thread: %s' % + (str(e))) + return + time.sleep(.4) + idx += 1 + + +class LongConn(threading.Thread): + """This class opens and closes connections to a specified server + """ + def __init__(self, inst): + """Initialize the thread class with the server instance info""" + threading.Thread.__init__(self) + self.daemon = True + self.inst = inst + + def run(self): + """Assume idleTimeout is set to less than 10 seconds + """ + idx = 0 + err_count = 0 + global STOP + while idx < MAX_CONNS and not STOP: + try: + conn = self.inst.clone() + conn.search_s('dc=example,dc=com', ldap.SCOPE_SUBTREE, + 'objectclass=*') + conn.search_s('dc=example,dc=com', ldap.SCOPE_SUBTREE, + 'uid=mark') + conn.search_s('dc=example,dc=com', ldap.SCOPE_SUBTREE, + 'cn=*') + conn.search_s('', ldap.SCOPE_BASE, 'objectclass=*') + conn.unbind_s() + time.sleep(.2) + err_count = 0 + except ldap.LDAPError as e: + err_count += 1 + if err_count > 3: + log.error('LongConn search exiting thread: %s' % + (str(e))) + return + time.sleep(.4) + idx += 1 + + +def test_connection_load(topology_st): + """Send the server a variety of connections using many threads: + - Open, Bind, Close + - Open, Bind, Search, wait to trigger idletimeout, Search, Close + - Open, Bind, Search, Search, Search, Close + """ + + # setup the control-C signal handler + signal.signal(signal.SIGINT, signalHandler) + + # Set the config and add sample entries + log.info('Initializing setup...') + init(topology_st.standalone) + + # + # Bind/Unbind Conn Threads + # + log.info('Launching Bind-Only Connection threads...') + threads = [] + idx = 0 + while idx < MAX_THREADS: + threads.append(BindOnlyConn(topology_st.standalone)) + idx += 1 + for thread in threads: + thread.start() + time.sleep(0.1) + + # + # Idle Conn Threads + # + log.info('Launching Idle Connection threads...') + idx = 0 + idle_threads = [] + while idx < MAX_THREADS: + idle_threads.append(IdleConn(topology_st.standalone)) + idx += 1 + for thread in idle_threads: + thread.start() + time.sleep(0.1) + + # + # Long Conn Threads + # + log.info('Launching Long Connection threads...') + idx = 0 + long_threads = [] + while idx < MAX_THREADS: + long_threads.append(LongConn(topology_st.standalone)) + idx += 1 + for thread in long_threads: + thread.start() + time.sleep(0.1) + + # + # Now wait for all the threads to complete + # + log.info('Waiting for threads to finish...') + while threading.active_count() > 0: + time.sleep(1) + + log.info('Done') + + +if __name__ == '__main__': + # Run isolated + # -s for DEBUG mode + CURRENT_FILE = os.path.realpath(__file__) + pytest.main("-s %s" % CURRENT_FILE) diff --git a/dirsrvtests/tests/stress/replication/mmr_01_4m-2h-4c_test.py b/dirsrvtests/tests/stress/replication/mmr_01_4m-2h-4c_test.py new file mode 100644 index 0000000..71b5883 --- /dev/null +++ b/dirsrvtests/tests/stress/replication/mmr_01_4m-2h-4c_test.py @@ -0,0 +1,971 @@ +import os +import sys +import time +import datetime +import ldap +import logging +import pytest +import threading +from lib389._constants import * +from lib389.properties import * +from lib389.tasks import * +from lib389.utils import * +from lib389.repltools import ReplTools + +pytestmark = pytest.mark.tier3 + +logging.getLogger(__name__).setLevel(logging.INFO) +log = logging.getLogger(__name__) + +DEBUGGING = False +ADD_DEL_COUNT = 5000 +MAX_LOOPS = 5 +TEST_CONVERGE_LATENCY = True +CONVERGENCE_TIMEOUT = '60' +master_list = [] +hub_list = [] +con_list = [] +TEST_START = time.time() + +LAST_DN_IDX = ADD_DEL_COUNT - 1 +LAST_DN_M1 = 'DEL dn="uid=master_1-%d,%s' % (LAST_DN_IDX, DEFAULT_SUFFIX) +LAST_DN_M2 = 'DEL dn="uid=master_2-%d,%s' % (LAST_DN_IDX, DEFAULT_SUFFIX) +LAST_DN_M3 = 'DEL dn="uid=master_3-%d,%s' % (LAST_DN_IDX, DEFAULT_SUFFIX) +LAST_DN_M4 = 'DEL dn="uid=master_4-%d,%s' % (LAST_DN_IDX, DEFAULT_SUFFIX) + + +class TopologyReplication(object): + """The Replication Topology Class""" + def __init__(self, master1, master2, master3, master4, hub1, hub2, + consumer1, consumer2, consumer3, consumer4): + """Init""" + master1.open() + self.master1 = master1 + master2.open() + self.master2 = master2 + master3.open() + self.master3 = master3 + master4.open() + self.master4 = master4 + hub1.open() + self.hub1 = hub1 + hub2.open() + self.hub2 = hub2 + consumer1.open() + self.consumer1 = consumer1 + consumer2.open() + self.consumer2 = consumer2 + consumer3.open() + self.consumer3 = consumer3 + consumer4.open() + self.consumer4 = consumer4 + master_list.append(master1.serverid) + master_list.append(master2.serverid) + master_list.append(master3.serverid) + master_list.append(master4.serverid) + hub_list.append(hub1.serverid) + hub_list.append(hub2.serverid) + con_list.append(consumer1.serverid) + con_list.append(consumer2.serverid) + con_list.append(consumer3.serverid) + con_list.append(consumer4.serverid) + + +@pytest.fixture(scope="module") +def topology(request): + """Create Replication Deployment""" + + # Creating master 1... + if DEBUGGING: + master1 = DirSrv(verbose=True) + else: + master1 = DirSrv(verbose=False) + args_instance[SER_HOST] = HOST_MASTER_1 + args_instance[SER_PORT] = PORT_MASTER_1 + args_instance[SER_SERVERID_PROP] = SERVERID_MASTER_1 + args_instance[SER_CREATION_SUFFIX] = DEFAULT_SUFFIX + args_master = args_instance.copy() + master1.allocate(args_master) + instance_master1 = master1.exists() + if instance_master1: + master1.delete() + master1.create() + master1.open() + master1.replica.enableReplication(suffix=SUFFIX, role=ReplicaRole.MASTER, + replicaId=REPLICAID_MASTER_1) + + # Creating master 2... + if DEBUGGING: + master2 = DirSrv(verbose=True) + else: + master2 = DirSrv(verbose=False) + args_instance[SER_HOST] = HOST_MASTER_2 + args_instance[SER_PORT] = PORT_MASTER_2 + args_instance[SER_SERVERID_PROP] = SERVERID_MASTER_2 + args_instance[SER_CREATION_SUFFIX] = DEFAULT_SUFFIX + args_master = args_instance.copy() + master2.allocate(args_master) + instance_master2 = master2.exists() + if instance_master2: + master2.delete() + master2.create() + master2.open() + master2.replica.enableReplication(suffix=SUFFIX, role=ReplicaRole.MASTER, + replicaId=REPLICAID_MASTER_2) + + # Creating master 3... + if DEBUGGING: + master3 = DirSrv(verbose=True) + else: + master3 = DirSrv(verbose=False) + args_instance[SER_HOST] = HOST_MASTER_3 + args_instance[SER_PORT] = PORT_MASTER_3 + args_instance[SER_SERVERID_PROP] = SERVERID_MASTER_3 + args_instance[SER_CREATION_SUFFIX] = DEFAULT_SUFFIX + args_master = args_instance.copy() + master3.allocate(args_master) + instance_master3 = master3.exists() + if instance_master3: + master3.delete() + master3.create() + master3.open() + master3.replica.enableReplication(suffix=SUFFIX, role=ReplicaRole.MASTER, + replicaId=REPLICAID_MASTER_3) + + # Creating master 4... + if DEBUGGING: + master4 = DirSrv(verbose=True) + else: + master4 = DirSrv(verbose=False) + args_instance[SER_HOST] = HOST_MASTER_4 + args_instance[SER_PORT] = PORT_MASTER_4 + args_instance[SER_SERVERID_PROP] = SERVERID_MASTER_4 + args_instance[SER_CREATION_SUFFIX] = DEFAULT_SUFFIX + args_master = args_instance.copy() + master4.allocate(args_master) + instance_master4 = master4.exists() + if instance_master4: + master4.delete() + master4.create() + master4.open() + master4.replica.enableReplication(suffix=SUFFIX, role=ReplicaRole.MASTER, + replicaId=REPLICAID_MASTER_4) + + # Creating hub 1... + if DEBUGGING: + hub1 = DirSrv(verbose=True) + else: + hub1 = DirSrv(verbose=False) + args_instance[SER_HOST] = HOST_HUB_1 + args_instance[SER_PORT] = PORT_HUB_1 + args_instance[SER_SERVERID_PROP] = SERVERID_HUB_1 + args_instance[SER_CREATION_SUFFIX] = DEFAULT_SUFFIX + args_hub = args_instance.copy() + hub1.allocate(args_hub) + instance_hub1 = hub1.exists() + if instance_hub1: + hub1.delete() + hub1.create() + hub1.open() + hub1.replica.enableReplication(suffix=SUFFIX, role=ReplicaRole.HUB, + replicaId=REPLICAID_HUB_1) + + # Creating hub 2... + if DEBUGGING: + hub2 = DirSrv(verbose=True) + else: + hub2 = DirSrv(verbose=False) + args_instance[SER_HOST] = HOST_HUB_2 + args_instance[SER_PORT] = PORT_HUB_2 + args_instance[SER_SERVERID_PROP] = SERVERID_HUB_2 + args_instance[SER_CREATION_SUFFIX] = DEFAULT_SUFFIX + args_hub = args_instance.copy() + hub2.allocate(args_hub) + instance_hub2 = hub2.exists() + if instance_hub2: + hub2.delete() + hub2.create() + hub2.open() + hub2.replica.enableReplication(suffix=SUFFIX, role=ReplicaRole.HUB, + replicaId=REPLICAID_HUB_2) + + # Creating consumer 1... + if DEBUGGING: + consumer1 = DirSrv(verbose=True) + else: + consumer1 = DirSrv(verbose=False) + args_instance[SER_HOST] = HOST_CONSUMER_1 + args_instance[SER_PORT] = PORT_CONSUMER_1 + args_instance[SER_SERVERID_PROP] = SERVERID_CONSUMER_1 + args_instance[SER_CREATION_SUFFIX] = DEFAULT_SUFFIX + args_consumer = args_instance.copy() + consumer1.allocate(args_consumer) + instance_consumer1 = consumer1.exists() + if instance_consumer1: + consumer1.delete() + consumer1.create() + consumer1.open() + consumer1.replica.enableReplication(suffix=SUFFIX, + role=ReplicaRole.CONSUMER, + replicaId=CONSUMER_REPLICAID) + + # Creating consumer 2... + if DEBUGGING: + consumer2 = DirSrv(verbose=True) + else: + consumer2 = DirSrv(verbose=False) + args_instance[SER_HOST] = HOST_CONSUMER_2 + args_instance[SER_PORT] = PORT_CONSUMER_2 + args_instance[SER_SERVERID_PROP] = SERVERID_CONSUMER_2 + args_instance[SER_CREATION_SUFFIX] = DEFAULT_SUFFIX + args_consumer = args_instance.copy() + consumer2.allocate(args_consumer) + instance_consumer2 = consumer2.exists() + if instance_consumer2: + consumer2.delete() + consumer2.create() + consumer2.open() + consumer2.replica.enableReplication(suffix=SUFFIX, + role=ReplicaRole.CONSUMER, + replicaId=CONSUMER_REPLICAID) + + # Creating consumer 3... + if DEBUGGING: + consumer3 = DirSrv(verbose=True) + else: + consumer3 = DirSrv(verbose=False) + args_instance[SER_HOST] = HOST_CONSUMER_3 + args_instance[SER_PORT] = PORT_CONSUMER_3 + args_instance[SER_SERVERID_PROP] = SERVERID_CONSUMER_3 + args_instance[SER_CREATION_SUFFIX] = DEFAULT_SUFFIX + args_consumer = args_instance.copy() + consumer3.allocate(args_consumer) + instance_consumer3 = consumer3.exists() + if instance_consumer3: + consumer3.delete() + consumer3.create() + consumer3.open() + consumer3.replica.enableReplication(suffix=SUFFIX, + role=ReplicaRole.CONSUMER, + replicaId=CONSUMER_REPLICAID) + + # Creating consumer 4... + if DEBUGGING: + consumer4 = DirSrv(verbose=True) + else: + consumer4 = DirSrv(verbose=False) + args_instance[SER_HOST] = HOST_CONSUMER_4 + args_instance[SER_PORT] = PORT_CONSUMER_4 + args_instance[SER_SERVERID_PROP] = SERVERID_CONSUMER_4 + args_instance[SER_CREATION_SUFFIX] = DEFAULT_SUFFIX + args_consumer = args_instance.copy() + consumer4.allocate(args_consumer) + instance_consumer4 = consumer4.exists() + if instance_consumer4: + consumer4.delete() + consumer4.create() + consumer4.open() + consumer4.replica.enableReplication(suffix=SUFFIX, + role=ReplicaRole.CONSUMER, + replicaId=CONSUMER_REPLICAID) + + # + # Create all the agreements + # + + # Creating agreement from master 1 to master 2 + properties = {RA_NAME: 'meTo_' + master2.host + ':' + str(master2.port), + RA_BINDDN: defaultProperties[REPLICATION_BIND_DN], + RA_BINDPW: defaultProperties[REPLICATION_BIND_PW], + RA_METHOD: defaultProperties[REPLICATION_BIND_METHOD], + RA_TRANSPORT_PROT: defaultProperties[REPLICATION_TRANSPORT]} + m1_m2_agmt = master1.agreement.create(suffix=SUFFIX, host=master2.host, + port=master2.port, + properties=properties) + if not m1_m2_agmt: + log.fatal("Fail to create a master -> master replica agreement") + sys.exit(1) + log.debug("%s created" % m1_m2_agmt) + + # Creating agreement from master 1 to master 3 + properties = {RA_NAME: 'meTo_' + master3.host + ':' + str(master3.port), + RA_BINDDN: defaultProperties[REPLICATION_BIND_DN], + RA_BINDPW: defaultProperties[REPLICATION_BIND_PW], + RA_METHOD: defaultProperties[REPLICATION_BIND_METHOD], + RA_TRANSPORT_PROT: defaultProperties[REPLICATION_TRANSPORT]} + m1_m3_agmt = master1.agreement.create(suffix=SUFFIX, host=master3.host, + port=master3.port, + properties=properties) + if not m1_m3_agmt: + log.fatal("Fail to create a master -> master replica agreement") + sys.exit(1) + log.debug("%s created" % m1_m3_agmt) + + # Creating agreement from master 1 to master 4 + properties = {RA_NAME: 'meTo_' + master4.host + ':' + str(master4.port), + RA_BINDDN: defaultProperties[REPLICATION_BIND_DN], + RA_BINDPW: defaultProperties[REPLICATION_BIND_PW], + RA_METHOD: defaultProperties[REPLICATION_BIND_METHOD], + RA_TRANSPORT_PROT: defaultProperties[REPLICATION_TRANSPORT]} + m1_m4_agmt = master1.agreement.create(suffix=SUFFIX, host=master4.host, + port=master4.port, + properties=properties) + if not m1_m4_agmt: + log.fatal("Fail to create a master -> master replica agreement") + sys.exit(1) + log.debug("%s created" % m1_m4_agmt) + + # Creating agreement from master 1 to hub 1 + properties = {RA_NAME: 'meTo_' + hub1.host + ':' + str(hub1.port), + RA_BINDDN: defaultProperties[REPLICATION_BIND_DN], + RA_BINDPW: defaultProperties[REPLICATION_BIND_PW], + RA_METHOD: defaultProperties[REPLICATION_BIND_METHOD], + RA_TRANSPORT_PROT: defaultProperties[REPLICATION_TRANSPORT]} + m1_h1_agmt = master1.agreement.create(suffix=SUFFIX, host=hub1.host, + port=hub1.port, + properties=properties) + if not m1_h1_agmt: + log.fatal("Fail to create a master -> hub replica agreement") + sys.exit(1) + log.debug("%s created" % m1_h1_agmt) + + # Creating agreement from master 1 to hub 2 + properties = {RA_NAME: 'meTo_' + hub2.host + ':' + str(hub2.port), + RA_BINDDN: defaultProperties[REPLICATION_BIND_DN], + RA_BINDPW: defaultProperties[REPLICATION_BIND_PW], + RA_METHOD: defaultProperties[REPLICATION_BIND_METHOD], + RA_TRANSPORT_PROT: defaultProperties[REPLICATION_TRANSPORT]} + m1_h2_agmt = master1.agreement.create(suffix=SUFFIX, host=hub2.host, + port=hub2.port, + properties=properties) + if not m1_h2_agmt: + log.fatal("Fail to create a master -> hub replica agreement") + sys.exit(1) + log.debug("%s created" % m1_h2_agmt) + + # Creating agreement from master 2 to master 1 + properties = {RA_NAME: 'meTo_' + master1.host + ':' + str(master1.port), + RA_BINDDN: defaultProperties[REPLICATION_BIND_DN], + RA_BINDPW: defaultProperties[REPLICATION_BIND_PW], + RA_METHOD: defaultProperties[REPLICATION_BIND_METHOD], + RA_TRANSPORT_PROT: defaultProperties[REPLICATION_TRANSPORT]} + m2_m1_agmt = master2.agreement.create(suffix=SUFFIX, host=master1.host, + port=master1.port, + properties=properties) + if not m2_m1_agmt: + log.fatal("Fail to create a master -> master replica agreement") + sys.exit(1) + log.debug("%s created" % m2_m1_agmt) + + # Creating agreement from master 2 to master 3 + properties = {RA_NAME: 'meTo_' + master3.host + ':' + str(master3.port), + RA_BINDDN: defaultProperties[REPLICATION_BIND_DN], + RA_BINDPW: defaultProperties[REPLICATION_BIND_PW], + RA_METHOD: defaultProperties[REPLICATION_BIND_METHOD], + RA_TRANSPORT_PROT: defaultProperties[REPLICATION_TRANSPORT]} + m2_m3_agmt = master2.agreement.create(suffix=SUFFIX, host=master3.host, + port=master3.port, + properties=properties) + if not m2_m3_agmt: + log.fatal("Fail to create a master -> master replica agreement") + sys.exit(1) + log.debug("%s created" % m2_m3_agmt) + + # Creating agreement from master 2 to master 4 + properties = {RA_NAME: 'meTo_' + master4.host + ':' + str(master4.port), + RA_BINDDN: defaultProperties[REPLICATION_BIND_DN], + RA_BINDPW: defaultProperties[REPLICATION_BIND_PW], + RA_METHOD: defaultProperties[REPLICATION_BIND_METHOD], + RA_TRANSPORT_PROT: defaultProperties[REPLICATION_TRANSPORT]} + m2_m4_agmt = master2.agreement.create(suffix=SUFFIX, host=master4.host, + port=master4.port, + properties=properties) + if not m2_m4_agmt: + log.fatal("Fail to create a master -> master replica agreement") + sys.exit(1) + log.debug("%s created" % m2_m4_agmt) + + # Creating agreement from master 2 to hub 1 + properties = {RA_NAME: 'meTo_' + hub1.host + ':' + str(hub1.port), + RA_BINDDN: defaultProperties[REPLICATION_BIND_DN], + RA_BINDPW: defaultProperties[REPLICATION_BIND_PW], + RA_METHOD: defaultProperties[REPLICATION_BIND_METHOD], + RA_TRANSPORT_PROT: defaultProperties[REPLICATION_TRANSPORT]} + m2_h1_agmt = master2.agreement.create(suffix=SUFFIX, host=hub1.host, + port=hub1.port, + properties=properties) + if not m2_h1_agmt: + log.fatal("Fail to create a master -> hub replica agreement") + sys.exit(1) + log.debug("%s created" % m2_h1_agmt) + + # Creating agreement from master 2 to hub 2 + properties = {RA_NAME: 'meTo_' + hub2.host + ':' + str(hub2.port), + RA_BINDDN: defaultProperties[REPLICATION_BIND_DN], + RA_BINDPW: defaultProperties[REPLICATION_BIND_PW], + RA_METHOD: defaultProperties[REPLICATION_BIND_METHOD], + RA_TRANSPORT_PROT: defaultProperties[REPLICATION_TRANSPORT]} + m2_h2_agmt = master2.agreement.create(suffix=SUFFIX, host=hub2.host, + port=hub2.port, + properties=properties) + if not m2_h2_agmt: + log.fatal("Fail to create a master -> hub replica agreement") + sys.exit(1) + log.debug("%s created" % m2_h2_agmt) + + # Creating agreement from master 3 to master 1 + properties = {RA_NAME: 'meTo_' + master1.host + ':' + str(master1.port), + RA_BINDDN: defaultProperties[REPLICATION_BIND_DN], + RA_BINDPW: defaultProperties[REPLICATION_BIND_PW], + RA_METHOD: defaultProperties[REPLICATION_BIND_METHOD], + RA_TRANSPORT_PROT: defaultProperties[REPLICATION_TRANSPORT]} + m3_m1_agmt = master3.agreement.create(suffix=SUFFIX, host=master1.host, + port=master1.port, + properties=properties) + if not m3_m1_agmt: + log.fatal("Fail to create a master -> master replica agreement") + sys.exit(1) + log.debug("%s created" % m3_m1_agmt) + + # Creating agreement from master 3 to master 2 + properties = {RA_NAME: 'meTo_' + master2.host + ':' + str(master2.port), + RA_BINDDN: defaultProperties[REPLICATION_BIND_DN], + RA_BINDPW: defaultProperties[REPLICATION_BIND_PW], + RA_METHOD: defaultProperties[REPLICATION_BIND_METHOD], + RA_TRANSPORT_PROT: defaultProperties[REPLICATION_TRANSPORT]} + m3_m2_agmt = master3.agreement.create(suffix=SUFFIX, host=master2.host, + port=master2.port, + properties=properties) + if not m3_m2_agmt: + log.fatal("Fail to create a master -> master replica agreement") + sys.exit(1) + log.debug("%s created" % m3_m2_agmt) + + # Creating agreement from master 3 to master 4 + properties = {RA_NAME: 'meTo_' + master4.host + ':' + str(master4.port), + RA_BINDDN: defaultProperties[REPLICATION_BIND_DN], + RA_BINDPW: defaultProperties[REPLICATION_BIND_PW], + RA_METHOD: defaultProperties[REPLICATION_BIND_METHOD], + RA_TRANSPORT_PROT: defaultProperties[REPLICATION_TRANSPORT]} + m3_m4_agmt = master3.agreement.create(suffix=SUFFIX, host=master4.host, + port=master4.port, + properties=properties) + if not m3_m4_agmt: + log.fatal("Fail to create a master -> master replica agreement") + sys.exit(1) + log.debug("%s created" % m3_m4_agmt) + + # Creating agreement from master 3 to hub 1 + properties = {RA_NAME: 'meTo_' + hub1.host + ':' + str(hub1.port), + RA_BINDDN: defaultProperties[REPLICATION_BIND_DN], + RA_BINDPW: defaultProperties[REPLICATION_BIND_PW], + RA_METHOD: defaultProperties[REPLICATION_BIND_METHOD], + RA_TRANSPORT_PROT: defaultProperties[REPLICATION_TRANSPORT]} + m3_h1_agmt = master3.agreement.create(suffix=SUFFIX, host=hub1.host, + port=hub1.port, + properties=properties) + if not m3_h1_agmt: + log.fatal("Fail to create a master -> hub replica agreement") + sys.exit(1) + log.debug("%s created" % m3_h1_agmt) + + # Creating agreement from master 3 to hub 2 + properties = {RA_NAME: 'meTo_' + hub2.host + ':' + str(hub2.port), + RA_BINDDN: defaultProperties[REPLICATION_BIND_DN], + RA_BINDPW: defaultProperties[REPLICATION_BIND_PW], + RA_METHOD: defaultProperties[REPLICATION_BIND_METHOD], + RA_TRANSPORT_PROT: defaultProperties[REPLICATION_TRANSPORT]} + m3_h2_agmt = master3.agreement.create(suffix=SUFFIX, host=hub2.host, + port=hub2.port, + properties=properties) + if not m3_h2_agmt: + log.fatal("Fail to create a master -> hub replica agreement") + sys.exit(1) + log.debug("%s created" % m3_h2_agmt) + + # Creating agreement from master 4 to master 1 + properties = {RA_NAME: 'meTo_' + master1.host + ':' + str(master1.port), + RA_BINDDN: defaultProperties[REPLICATION_BIND_DN], + RA_BINDPW: defaultProperties[REPLICATION_BIND_PW], + RA_METHOD: defaultProperties[REPLICATION_BIND_METHOD], + RA_TRANSPORT_PROT: defaultProperties[REPLICATION_TRANSPORT]} + m4_m1_agmt = master4.agreement.create(suffix=SUFFIX, host=master1.host, + port=master1.port, + properties=properties) + if not m4_m1_agmt: + log.fatal("Fail to create a master -> master replica agreement") + sys.exit(1) + log.debug("%s created" % m4_m1_agmt) + + # Creating agreement from master 4 to master 2 + properties = {RA_NAME: 'meTo_' + master2.host + ':' + str(master2.port), + RA_BINDDN: defaultProperties[REPLICATION_BIND_DN], + RA_BINDPW: defaultProperties[REPLICATION_BIND_PW], + RA_METHOD: defaultProperties[REPLICATION_BIND_METHOD], + RA_TRANSPORT_PROT: defaultProperties[REPLICATION_TRANSPORT]} + m4_m2_agmt = master4.agreement.create(suffix=SUFFIX, host=master2.host, + port=master2.port, + properties=properties) + if not m4_m2_agmt: + log.fatal("Fail to create a master -> master replica agreement") + sys.exit(1) + log.debug("%s created" % m4_m2_agmt) + + # Creating agreement from master 4 to master 3 + properties = {RA_NAME: 'meTo_' + master3.host + ':' + str(master3.port), + RA_BINDDN: defaultProperties[REPLICATION_BIND_DN], + RA_BINDPW: defaultProperties[REPLICATION_BIND_PW], + RA_METHOD: defaultProperties[REPLICATION_BIND_METHOD], + RA_TRANSPORT_PROT: defaultProperties[REPLICATION_TRANSPORT]} + m4_m3_agmt = master4.agreement.create(suffix=SUFFIX, host=master3.host, + port=master3.port, + properties=properties) + if not m4_m3_agmt: + log.fatal("Fail to create a master -> master replica agreement") + sys.exit(1) + log.debug("%s created" % m4_m3_agmt) + + # Creating agreement from master 4 to hub 1 + properties = {RA_NAME: 'meTo_' + hub1.host + ':' + str(hub1.port), + RA_BINDDN: defaultProperties[REPLICATION_BIND_DN], + RA_BINDPW: defaultProperties[REPLICATION_BIND_PW], + RA_METHOD: defaultProperties[REPLICATION_BIND_METHOD], + RA_TRANSPORT_PROT: defaultProperties[REPLICATION_TRANSPORT]} + m4_h1_agmt = master4.agreement.create(suffix=SUFFIX, host=hub1.host, + port=hub1.port, + properties=properties) + if not m4_h1_agmt: + log.fatal("Fail to create a master -> hub replica agreement") + sys.exit(1) + log.debug("%s created" % m4_h1_agmt) + + # Creating agreement from master 4 to hub 2 + properties = {RA_NAME: 'meTo_' + hub2.host + ':' + str(hub2.port), + RA_BINDDN: defaultProperties[REPLICATION_BIND_DN], + RA_BINDPW: defaultProperties[REPLICATION_BIND_PW], + RA_METHOD: defaultProperties[REPLICATION_BIND_METHOD], + RA_TRANSPORT_PROT: defaultProperties[REPLICATION_TRANSPORT]} + m4_h2_agmt = master4.agreement.create(suffix=SUFFIX, host=hub2.host, + port=hub2.port, + properties=properties) + if not m4_h2_agmt: + log.fatal("Fail to create a master -> hub replica agreement") + sys.exit(1) + log.debug("%s created" % m4_h2_agmt) + + # Creating agreement from hub 1 to consumer 1 + properties = {RA_NAME: 'me2_' + consumer1.host + ':' + str(consumer1.port), + RA_BINDDN: defaultProperties[REPLICATION_BIND_DN], + RA_BINDPW: defaultProperties[REPLICATION_BIND_PW], + RA_METHOD: defaultProperties[REPLICATION_BIND_METHOD], + RA_TRANSPORT_PROT: defaultProperties[REPLICATION_TRANSPORT]} + h1_c1_agmt = hub1.agreement.create(suffix=SUFFIX, host=consumer1.host, + port=consumer1.port, + properties=properties) + if not h1_c1_agmt: + log.fatal("Fail to create a hub -> consumer replica agreement") + sys.exit(1) + log.debug("%s created" % h1_c1_agmt) + + # Creating agreement from hub 1 to consumer 2 + properties = {RA_NAME: 'me2_' + consumer2.host + ':' + str(consumer2.port), + RA_BINDDN: defaultProperties[REPLICATION_BIND_DN], + RA_BINDPW: defaultProperties[REPLICATION_BIND_PW], + RA_METHOD: defaultProperties[REPLICATION_BIND_METHOD], + RA_TRANSPORT_PROT: defaultProperties[REPLICATION_TRANSPORT]} + h1_c2_agmt = hub1.agreement.create(suffix=SUFFIX, host=consumer2.host, + port=consumer2.port, + properties=properties) + if not h1_c2_agmt: + log.fatal("Fail to create a hub -> consumer replica agreement") + sys.exit(1) + log.debug("%s created" % h1_c2_agmt) + + # Creating agreement from hub 1 to consumer 3 + properties = {RA_NAME: 'me2_' + consumer3.host + ':' + str(consumer3.port), + RA_BINDDN: defaultProperties[REPLICATION_BIND_DN], + RA_BINDPW: defaultProperties[REPLICATION_BIND_PW], + RA_METHOD: defaultProperties[REPLICATION_BIND_METHOD], + RA_TRANSPORT_PROT: defaultProperties[REPLICATION_TRANSPORT]} + h1_c3_agmt = hub1.agreement.create(suffix=SUFFIX, host=consumer3.host, + port=consumer3.port, + properties=properties) + if not h1_c3_agmt: + log.fatal("Fail to create a hub -> consumer replica agreement") + sys.exit(1) + log.debug("%s created" % h1_c3_agmt) + + # Creating agreement from hub 1 to consumer 4 + properties = {RA_NAME: 'me2_' + consumer4.host + ':' + str(consumer4.port), + RA_BINDDN: defaultProperties[REPLICATION_BIND_DN], + RA_BINDPW: defaultProperties[REPLICATION_BIND_PW], + RA_METHOD: defaultProperties[REPLICATION_BIND_METHOD], + RA_TRANSPORT_PROT: defaultProperties[REPLICATION_TRANSPORT]} + h1_c4_agmt = hub1.agreement.create(suffix=SUFFIX, host=consumer4.host, + port=consumer4.port, + properties=properties) + if not h1_c4_agmt: + log.fatal("Fail to create a hub -> consumer replica agreement") + sys.exit(1) + log.debug("%s created" % h1_c4_agmt) + + # Creating agreement from hub 2 to consumer 1 + properties = {RA_NAME: 'me2_' + consumer1.host + ':' + str(consumer1.port), + RA_BINDDN: defaultProperties[REPLICATION_BIND_DN], + RA_BINDPW: defaultProperties[REPLICATION_BIND_PW], + RA_METHOD: defaultProperties[REPLICATION_BIND_METHOD], + RA_TRANSPORT_PROT: defaultProperties[REPLICATION_TRANSPORT]} + h2_c1_agmt = hub2.agreement.create(suffix=SUFFIX, host=consumer1.host, + port=consumer1.port, + properties=properties) + if not h2_c1_agmt: + log.fatal("Fail to create a hub -> consumer replica agreement") + sys.exit(1) + log.debug("%s created" % h2_c1_agmt) + + # Creating agreement from hub 2 to consumer 2 + properties = {RA_NAME: 'me2_' + consumer2.host + ':' + str(consumer2.port), + RA_BINDDN: defaultProperties[REPLICATION_BIND_DN], + RA_BINDPW: defaultProperties[REPLICATION_BIND_PW], + RA_METHOD: defaultProperties[REPLICATION_BIND_METHOD], + RA_TRANSPORT_PROT: defaultProperties[REPLICATION_TRANSPORT]} + h2_c2_agmt = hub2.agreement.create(suffix=SUFFIX, host=consumer2.host, + port=consumer2.port, + properties=properties) + if not h2_c2_agmt: + log.fatal("Fail to create a hub -> consumer replica agreement") + sys.exit(1) + log.debug("%s created" % h2_c2_agmt) + + # Creating agreement from hub 2 to consumer 3 + properties = {RA_NAME: 'me2_' + consumer3.host + ':' + str(consumer3.port), + RA_BINDDN: defaultProperties[REPLICATION_BIND_DN], + RA_BINDPW: defaultProperties[REPLICATION_BIND_PW], + RA_METHOD: defaultProperties[REPLICATION_BIND_METHOD], + RA_TRANSPORT_PROT: defaultProperties[REPLICATION_TRANSPORT]} + h2_c3_agmt = hub2.agreement.create(suffix=SUFFIX, host=consumer3.host, + port=consumer3.port, + properties=properties) + if not h2_c3_agmt: + log.fatal("Fail to create a hub -> consumer replica agreement") + sys.exit(1) + log.debug("%s created" % h2_c3_agmt) + + # Creating agreement from hub 2 to consumer 4 + properties = {RA_NAME: 'me2_' + consumer4.host + ':' + str(consumer4.port), + RA_BINDDN: defaultProperties[REPLICATION_BIND_DN], + RA_BINDPW: defaultProperties[REPLICATION_BIND_PW], + RA_METHOD: defaultProperties[REPLICATION_BIND_METHOD], + RA_TRANSPORT_PROT: defaultProperties[REPLICATION_TRANSPORT]} + h2_c4_agmt = hub2.agreement.create(suffix=SUFFIX, host=consumer4.host, + port=consumer4.port, + properties=properties) + if not h2_c4_agmt: + log.fatal("Fail to create a hub -> consumer replica agreement") + sys.exit(1) + log.debug("%s created" % h2_c4_agmt) + + # Allow the replicas to get situated with the new agreements... + time.sleep(5) + + # + # Initialize all the agreements + # + master1.agreement.init(SUFFIX, HOST_MASTER_2, PORT_MASTER_2) + master1.waitForReplInit(m1_m2_agmt) + master1.agreement.init(SUFFIX, HOST_MASTER_3, PORT_MASTER_3) + master1.waitForReplInit(m1_m3_agmt) + master1.agreement.init(SUFFIX, HOST_MASTER_4, PORT_MASTER_4) + master1.waitForReplInit(m1_m4_agmt) + master1.agreement.init(SUFFIX, HOST_HUB_1, PORT_HUB_1) + master1.waitForReplInit(m1_h1_agmt) + hub1.agreement.init(SUFFIX, HOST_CONSUMER_1, PORT_CONSUMER_1) + hub1.waitForReplInit(h1_c1_agmt) + hub1.agreement.init(SUFFIX, HOST_CONSUMER_2, PORT_CONSUMER_2) + hub1.waitForReplInit(h1_c2_agmt) + hub1.agreement.init(SUFFIX, HOST_CONSUMER_3, PORT_CONSUMER_3) + hub1.waitForReplInit(h1_c3_agmt) + hub1.agreement.init(SUFFIX, HOST_CONSUMER_4, PORT_CONSUMER_4) + hub1.waitForReplInit(h1_c4_agmt) + master1.agreement.init(SUFFIX, HOST_HUB_2, PORT_HUB_2) + master1.waitForReplInit(m1_h2_agmt) + + # Check replication is working... + if master1.testReplication(DEFAULT_SUFFIX, consumer1): + log.info('Replication is working.') + else: + log.fatal('Replication is not working.') + assert False + + def fin(): + """If we are debugging just stop the instances, otherwise remove + them + """ + if DEBUGGING: + master1.stop() + master2.stop() + master3.stop() + master4.stop() + hub1.stop() + hub2.stop() + consumer1.stop() + consumer2.stop() + consumer3.stop() + consumer4.stop() + else: + master1.delete() + master2.delete() + master3.delete() + master4.delete() + hub1.delete() + hub2.delete() + consumer1.delete() + consumer2.delete() + consumer3.delete() + consumer4.delete() + request.addfinalizer(fin) + + return TopologyReplication(master1, master2, master3, master4, hub1, hub2, + consumer1, consumer2, consumer3, consumer4) + + +class AddDelUsers(threading.Thread): + """Add's and delets 50000 entries""" + def __init__(self, inst): + """ + Initialize the thread + """ + threading.Thread.__init__(self) + self.daemon = True + self.inst = inst + self.name = inst.serverid + + def run(self): + """ + Start adding users + """ + idx = 0 + + log.info('AddDelUsers (%s) Adding and deleting %d entries...' % + (self.name, ADD_DEL_COUNT)) + + while idx < ADD_DEL_COUNT: + RDN_VAL = ('uid=%s-%d' % (self.name, idx)) + USER_DN = ('%s,%s' % (RDN_VAL, DEFAULT_SUFFIX)) + + try: + self.inst.add_s(Entry((USER_DN, {'objectclass': + 'top extensibleObject'.split(), + 'uid': RDN_VAL}))) + except ldap.LDAPError as e: + log.fatal('AddDelUsers (%s): failed to add (%s) error: %s' % + (self.name, USER_DN, str(e))) + assert False + + try: + self.inst.delete_s(USER_DN) + except ldap.LDAPError as e: + log.fatal('AddDelUsers (%s): failed to delete (%s) error: %s' % + (self.name, USER_DN, str(e))) + assert False + + idx += 1 + + log.info('AddDelUsers (%s) - Finished at: %s' % + (self.name, getDateTime())) + + +def measureConvergence(topology): + """Find and measure the convergence of entries from each master + """ + + replicas = [topology.master1, topology.master2, topology.master3, + topology.master4, topology.hub1, topology.hub2, + topology.consumer1, topology.consumer2, topology.consumer3, + topology.consumer4] + + if ADD_DEL_COUNT > 10: + interval = int(ADD_DEL_COUNT / 10) + else: + interval = 1 + + for master in [('1', topology.master1), + ('2', topology.master2), + ('3', topology.master3), + ('4', topology.master4)]: + # Start with the first entry + entries = ['ADD dn="uid=master_%s-0,%s' % + (master[0], DEFAULT_SUFFIX)] + + # Add incremental entries to the list + idx = interval + while idx < ADD_DEL_COUNT: + entries.append('ADD dn="uid=master_%s-%d,%s' % + (master[0], idx, DEFAULT_SUFFIX)) + idx += interval + + # Add the last entry to the list (if it was not already added) + if idx != (ADD_DEL_COUNT - 1): + entries.append('ADD dn="uid=master_%s-%d,%s' % + (master[0], (ADD_DEL_COUNT - 1), + DEFAULT_SUFFIX)) + + ReplTools.replConvReport(DEFAULT_SUFFIX, entries, master[1], replicas) + + +def test_MMR_Integrity(topology): + """Apply load to 4 masters at the same time. Perform adds and deletes. + If any updates are missed we will see an error 32 in the access logs or + we will have entries left over once the test completes. + """ + loop = 0 + + ALL_REPLICAS = [topology.master1, topology.master2, topology.master3, + topology.master4, + topology.hub1, topology.hub2, + topology.consumer1, topology.consumer2, + topology.consumer3, topology.consumer4] + + if TEST_CONVERGE_LATENCY: + try: + for inst in ALL_REPLICAS: + replica = inst.replicas.get(DEFAULT_SUFFIX) + replica.set('nsds5ReplicaReleaseTimeout', CONVERGENCE_TIMEOUT) + except ldap.LDAPError as e: + log.fatal('Failed to set replicas release timeout - error: %s' % + (str(e))) + assert False + + if DEBUGGING: + # Enable Repl logging, and increase the max logs + try: + for inst in ALL_REPLICAS: + inst.enableReplLogging() + inst.modify_s("cn=config", [(ldap.MOD_REPLACE, + 'nsslapd-errorlog-maxlogsperdir', + '5')]) + except ldap.LDAPError as e: + log.fatal('Failed to set max logs - error: %s' % (str(e))) + assert False + + while loop < MAX_LOOPS: + # Remove the current logs so we have a clean set of logs to check. + log.info('Pass %d...' % (loop + 1)) + log.info("Removing logs...") + for inst in ALL_REPLICAS: + inst.deleteAllLogs() + + # Fire off 4 threads to apply the load + log.info("Start adding/deleting: " + getDateTime()) + startTime = time.time() + add_del_m1 = AddDelUsers(topology.master1) + add_del_m1.start() + add_del_m2 = AddDelUsers(topology.master2) + add_del_m2.start() + add_del_m3 = AddDelUsers(topology.master3) + add_del_m3.start() + add_del_m4 = AddDelUsers(topology.master4) + add_del_m4.start() + + # Wait for threads to finish sending their updates + add_del_m1.join() + add_del_m2.join() + add_del_m3.join() + add_del_m4.join() + log.info("Finished adding/deleting entries: " + getDateTime()) + + # + # Loop checking for error 32's, and for convergence to complete + # + log.info("Waiting for replication to converge...") + while True: + # First check for error 32's + for inst in ALL_REPLICAS: + if inst.searchAccessLog(" err=32 "): + log.fatal('An add was missed on: ' + inst.serverid) + assert False + + # Next check to see if the last update is in the access log + converged = True + for inst in ALL_REPLICAS: + if not inst.searchAccessLog(LAST_DN_M1) or \ + not inst.searchAccessLog(LAST_DN_M2) or \ + not inst.searchAccessLog(LAST_DN_M3) or \ + not inst.searchAccessLog(LAST_DN_M4): + converged = False + break + + if converged: + elapsed_tm = int(time.time() - startTime) + convtime = str(datetime.timedelta(seconds=elapsed_tm)) + log.info('Replication converged at: ' + getDateTime() + + ' - Elapsed Time: ' + convtime) + break + else: + # Check if replication is idle + replicas = [topology.master1, topology.master2, + topology.master3, topology.master4, + topology.hub1, topology.hub2] + if ReplTools.replIdle(replicas, DEFAULT_SUFFIX): + # Replication is idle - wait 30 secs for access log buffer + time.sleep(30) + + # Now check the access log again... + converged = True + for inst in ALL_REPLICAS: + if not inst.searchAccessLog(LAST_DN_M1) or \ + not inst.searchAccessLog(LAST_DN_M2) or \ + not inst.searchAccessLog(LAST_DN_M3) or \ + not inst.searchAccessLog(LAST_DN_M4): + converged = False + break + + if converged: + elapsed_tm = int(time.time() - startTime) + convtime = str(datetime.timedelta(seconds=elapsed_tm)) + log.info('Replication converged at: ' + getDateTime() + + ' - Elapsed Time: ' + convtime) + break + else: + log.fatal('Stopping replication check: ' + + getDateTime()) + log.fatal('Failure: Replication is complete, but we ' + + 'never converged.') + assert False + + # Sleep a bit before the next pass + time.sleep(3) + + # + # Finally check the CSN's + # + log.info("Check the CSN's...") + if not ReplTools.checkCSNs(ALL_REPLICAS): + assert False + log.info("All CSN's present and accounted for.") + + # + # Print the convergence report + # + log.info('Measuring convergence...') + measureConvergence(topology) + + # + # Test complete + # + log.info('No lingering entries.') + log.info('Pass %d complete.' % (loop + 1)) + elapsed_tm = int(time.time() - TEST_START) + convtime = str(datetime.timedelta(seconds=elapsed_tm)) + log.info('Entire test ran for: ' + convtime) + + loop += 1 + + log.info('Test PASSED') + + +if __name__ == '__main__': + # Run isolated + # -s for DEBUG mode + CURRENT_FILE = os.path.realpath(__file__) + pytest.main("-s %s" % CURRENT_FILE) diff --git a/dirsrvtests/tests/stress/replication/mmr_01_4m_test.py b/dirsrvtests/tests/stress/replication/mmr_01_4m_test.py new file mode 100644 index 0000000..782403d --- /dev/null +++ b/dirsrvtests/tests/stress/replication/mmr_01_4m_test.py @@ -0,0 +1,574 @@ +import os +import sys +import time +import datetime +import ldap +import logging +import pytest +import threading +from lib389._constants import * +from lib389.properties import * +from lib389.tasks import * +from lib389.utils import * +from lib389.repltools import ReplTools + +pytestmark = pytest.mark.tier3 + +logging.getLogger(__name__).setLevel(logging.DEBUG) +log = logging.getLogger(__name__) + +DEBUGGING = False +ADD_DEL_COUNT = 50000 +MAX_LOOPS = 2 +TEST_CONVERGE_LATENCY = True +CONVERGENCE_TIMEOUT = '60' +master_list = [] +hub_list = [] +con_list = [] +TEST_START = time.time() + +LAST_DN_IDX = ADD_DEL_COUNT - 1 +LAST_DN_M1 = 'DEL dn="uid=master_1-%d,%s' % (LAST_DN_IDX, DEFAULT_SUFFIX) +LAST_DN_M2 = 'DEL dn="uid=master_2-%d,%s' % (LAST_DN_IDX, DEFAULT_SUFFIX) +LAST_DN_M3 = 'DEL dn="uid=master_3-%d,%s' % (LAST_DN_IDX, DEFAULT_SUFFIX) +LAST_DN_M4 = 'DEL dn="uid=master_4-%d,%s' % (LAST_DN_IDX, DEFAULT_SUFFIX) + + +class TopologyReplication(object): + """The Replication Topology Class""" + def __init__(self, master1, master2, master3, master4): + """Init""" + master1.open() + self.master1 = master1 + master2.open() + self.master2 = master2 + master3.open() + self.master3 = master3 + master4.open() + self.master4 = master4 + + +@pytest.fixture(scope="module") +def topology(request): + """Create Replication Deployment""" + + # Creating master 1... + if DEBUGGING: + master1 = DirSrv(verbose=True) + else: + master1 = DirSrv(verbose=False) + args_instance[SER_HOST] = HOST_MASTER_1 + args_instance[SER_PORT] = PORT_MASTER_1 + args_instance[SER_SERVERID_PROP] = SERVERID_MASTER_1 + args_instance[SER_CREATION_SUFFIX] = DEFAULT_SUFFIX + args_master = args_instance.copy() + master1.allocate(args_master) + instance_master1 = master1.exists() + if instance_master1: + master1.delete() + master1.create() + master1.open() + master1.replica.enableReplication(suffix=SUFFIX, role=ReplicaRole.MASTER, + replicaId=REPLICAID_MASTER_1) + + # Creating master 2... + if DEBUGGING: + master2 = DirSrv(verbose=True) + else: + master2 = DirSrv(verbose=False) + args_instance[SER_HOST] = HOST_MASTER_2 + args_instance[SER_PORT] = PORT_MASTER_2 + args_instance[SER_SERVERID_PROP] = SERVERID_MASTER_2 + args_instance[SER_CREATION_SUFFIX] = DEFAULT_SUFFIX + args_master = args_instance.copy() + master2.allocate(args_master) + instance_master2 = master2.exists() + if instance_master2: + master2.delete() + master2.create() + master2.open() + master2.replica.enableReplication(suffix=SUFFIX, role=ReplicaRole.MASTER, + replicaId=REPLICAID_MASTER_2) + + # Creating master 3... + if DEBUGGING: + master3 = DirSrv(verbose=True) + else: + master3 = DirSrv(verbose=False) + args_instance[SER_HOST] = HOST_MASTER_3 + args_instance[SER_PORT] = PORT_MASTER_3 + args_instance[SER_SERVERID_PROP] = SERVERID_MASTER_3 + args_instance[SER_CREATION_SUFFIX] = DEFAULT_SUFFIX + args_master = args_instance.copy() + master3.allocate(args_master) + instance_master3 = master3.exists() + if instance_master3: + master3.delete() + master3.create() + master3.open() + master3.replica.enableReplication(suffix=SUFFIX, role=ReplicaRole.MASTER, + replicaId=REPLICAID_MASTER_3) + + # Creating master 4... + if DEBUGGING: + master4 = DirSrv(verbose=True) + else: + master4 = DirSrv(verbose=False) + args_instance[SER_HOST] = HOST_MASTER_4 + args_instance[SER_PORT] = PORT_MASTER_4 + args_instance[SER_SERVERID_PROP] = SERVERID_MASTER_4 + args_instance[SER_CREATION_SUFFIX] = DEFAULT_SUFFIX + args_master = args_instance.copy() + master4.allocate(args_master) + instance_master4 = master4.exists() + if instance_master4: + master4.delete() + master4.create() + master4.open() + master4.replica.enableReplication(suffix=SUFFIX, role=ReplicaRole.MASTER, + replicaId=REPLICAID_MASTER_4) + + # + # Create all the agreements + # + # Creating agreement from master 1 to master 2 + properties = {RA_NAME: 'meTo_' + master2.host + ':' + str(master2.port), + RA_BINDDN: defaultProperties[REPLICATION_BIND_DN], + RA_BINDPW: defaultProperties[REPLICATION_BIND_PW], + RA_METHOD: defaultProperties[REPLICATION_BIND_METHOD], + RA_TRANSPORT_PROT: defaultProperties[REPLICATION_TRANSPORT]} + m1_m2_agmt = master1.agreement.create(suffix=SUFFIX, host=master2.host, + port=master2.port, + properties=properties) + if not m1_m2_agmt: + log.fatal("Fail to create a master -> master replica agreement") + sys.exit(1) + log.debug("%s created" % m1_m2_agmt) + + # Creating agreement from master 1 to master 3 + properties = {RA_NAME: 'meTo_' + master3.host + ':' + str(master3.port), + RA_BINDDN: defaultProperties[REPLICATION_BIND_DN], + RA_BINDPW: defaultProperties[REPLICATION_BIND_PW], + RA_METHOD: defaultProperties[REPLICATION_BIND_METHOD], + RA_TRANSPORT_PROT: defaultProperties[REPLICATION_TRANSPORT]} + m1_m3_agmt = master1.agreement.create(suffix=SUFFIX, host=master3.host, + port=master3.port, + properties=properties) + if not m1_m3_agmt: + log.fatal("Fail to create a master -> master replica agreement") + sys.exit(1) + log.debug("%s created" % m1_m3_agmt) + + # Creating agreement from master 1 to master 4 + properties = {RA_NAME: 'meTo_' + master4.host + ':' + str(master4.port), + RA_BINDDN: defaultProperties[REPLICATION_BIND_DN], + RA_BINDPW: defaultProperties[REPLICATION_BIND_PW], + RA_METHOD: defaultProperties[REPLICATION_BIND_METHOD], + RA_TRANSPORT_PROT: defaultProperties[REPLICATION_TRANSPORT]} + m1_m4_agmt = master1.agreement.create(suffix=SUFFIX, host=master4.host, + port=master4.port, + properties=properties) + if not m1_m4_agmt: + log.fatal("Fail to create a master -> master replica agreement") + sys.exit(1) + log.debug("%s created" % m1_m4_agmt) + + # Creating agreement from master 2 to master 1 + properties = {RA_NAME: 'meTo_' + master1.host + ':' + str(master1.port), + RA_BINDDN: defaultProperties[REPLICATION_BIND_DN], + RA_BINDPW: defaultProperties[REPLICATION_BIND_PW], + RA_METHOD: defaultProperties[REPLICATION_BIND_METHOD], + RA_TRANSPORT_PROT: defaultProperties[REPLICATION_TRANSPORT]} + m2_m1_agmt = master2.agreement.create(suffix=SUFFIX, host=master1.host, + port=master1.port, + properties=properties) + if not m2_m1_agmt: + log.fatal("Fail to create a master -> master replica agreement") + sys.exit(1) + log.debug("%s created" % m2_m1_agmt) + + # Creating agreement from master 2 to master 3 + properties = {RA_NAME: 'meTo_' + master3.host + ':' + str(master3.port), + RA_BINDDN: defaultProperties[REPLICATION_BIND_DN], + RA_BINDPW: defaultProperties[REPLICATION_BIND_PW], + RA_METHOD: defaultProperties[REPLICATION_BIND_METHOD], + RA_TRANSPORT_PROT: defaultProperties[REPLICATION_TRANSPORT]} + m2_m3_agmt = master2.agreement.create(suffix=SUFFIX, host=master3.host, + port=master3.port, + properties=properties) + if not m2_m3_agmt: + log.fatal("Fail to create a master -> master replica agreement") + sys.exit(1) + log.debug("%s created" % m2_m3_agmt) + + # Creating agreement from master 2 to master 4 + properties = {RA_NAME: 'meTo_' + master4.host + ':' + str(master4.port), + RA_BINDDN: defaultProperties[REPLICATION_BIND_DN], + RA_BINDPW: defaultProperties[REPLICATION_BIND_PW], + RA_METHOD: defaultProperties[REPLICATION_BIND_METHOD], + RA_TRANSPORT_PROT: defaultProperties[REPLICATION_TRANSPORT]} + m2_m4_agmt = master2.agreement.create(suffix=SUFFIX, host=master4.host, + port=master4.port, + properties=properties) + if not m2_m4_agmt: + log.fatal("Fail to create a master -> master replica agreement") + sys.exit(1) + log.debug("%s created" % m2_m4_agmt) + + # Creating agreement from master 3 to master 1 + properties = {RA_NAME: 'meTo_' + master1.host + ':' + str(master1.port), + RA_BINDDN: defaultProperties[REPLICATION_BIND_DN], + RA_BINDPW: defaultProperties[REPLICATION_BIND_PW], + RA_METHOD: defaultProperties[REPLICATION_BIND_METHOD], + RA_TRANSPORT_PROT: defaultProperties[REPLICATION_TRANSPORT]} + m3_m1_agmt = master3.agreement.create(suffix=SUFFIX, host=master1.host, + port=master1.port, + properties=properties) + if not m3_m1_agmt: + log.fatal("Fail to create a master -> master replica agreement") + sys.exit(1) + log.debug("%s created" % m3_m1_agmt) + + # Creating agreement from master 3 to master 2 + properties = {RA_NAME: 'meTo_' + master2.host + ':' + str(master2.port), + RA_BINDDN: defaultProperties[REPLICATION_BIND_DN], + RA_BINDPW: defaultProperties[REPLICATION_BIND_PW], + RA_METHOD: defaultProperties[REPLICATION_BIND_METHOD], + RA_TRANSPORT_PROT: defaultProperties[REPLICATION_TRANSPORT]} + m3_m2_agmt = master3.agreement.create(suffix=SUFFIX, host=master2.host, + port=master2.port, + properties=properties) + if not m3_m2_agmt: + log.fatal("Fail to create a master -> master replica agreement") + sys.exit(1) + log.debug("%s created" % m3_m2_agmt) + + # Creating agreement from master 3 to master 4 + properties = {RA_NAME: 'meTo_' + master4.host + ':' + str(master4.port), + RA_BINDDN: defaultProperties[REPLICATION_BIND_DN], + RA_BINDPW: defaultProperties[REPLICATION_BIND_PW], + RA_METHOD: defaultProperties[REPLICATION_BIND_METHOD], + RA_TRANSPORT_PROT: defaultProperties[REPLICATION_TRANSPORT]} + m3_m4_agmt = master3.agreement.create(suffix=SUFFIX, host=master4.host, + port=master4.port, + properties=properties) + if not m3_m4_agmt: + log.fatal("Fail to create a master -> master replica agreement") + sys.exit(1) + log.debug("%s created" % m3_m4_agmt) + + # Creating agreement from master 4 to master 1 + properties = {RA_NAME: 'meTo_' + master1.host + ':' + str(master1.port), + RA_BINDDN: defaultProperties[REPLICATION_BIND_DN], + RA_BINDPW: defaultProperties[REPLICATION_BIND_PW], + RA_METHOD: defaultProperties[REPLICATION_BIND_METHOD], + RA_TRANSPORT_PROT: defaultProperties[REPLICATION_TRANSPORT]} + m4_m1_agmt = master4.agreement.create(suffix=SUFFIX, host=master1.host, + port=master1.port, + properties=properties) + if not m4_m1_agmt: + log.fatal("Fail to create a master -> master replica agreement") + sys.exit(1) + log.debug("%s created" % m4_m1_agmt) + + # Creating agreement from master 4 to master 2 + properties = {RA_NAME: 'meTo_' + master2.host + ':' + str(master2.port), + RA_BINDDN: defaultProperties[REPLICATION_BIND_DN], + RA_BINDPW: defaultProperties[REPLICATION_BIND_PW], + RA_METHOD: defaultProperties[REPLICATION_BIND_METHOD], + RA_TRANSPORT_PROT: defaultProperties[REPLICATION_TRANSPORT]} + m4_m2_agmt = master4.agreement.create(suffix=SUFFIX, host=master2.host, + port=master2.port, + properties=properties) + if not m4_m2_agmt: + log.fatal("Fail to create a master -> master replica agreement") + sys.exit(1) + log.debug("%s created" % m4_m2_agmt) + + # Creating agreement from master 4 to master 3 + properties = {RA_NAME: 'meTo_' + master3.host + ':' + str(master3.port), + RA_BINDDN: defaultProperties[REPLICATION_BIND_DN], + RA_BINDPW: defaultProperties[REPLICATION_BIND_PW], + RA_METHOD: defaultProperties[REPLICATION_BIND_METHOD], + RA_TRANSPORT_PROT: defaultProperties[REPLICATION_TRANSPORT]} + m4_m3_agmt = master4.agreement.create(suffix=SUFFIX, host=master3.host, + port=master3.port, + properties=properties) + if not m4_m3_agmt: + log.fatal("Fail to create a master -> master replica agreement") + sys.exit(1) + log.debug("%s created" % m4_m3_agmt) + + # Allow the replicas to get situated with the new agreements... + time.sleep(5) + + # + # Initialize all the agreements + # + master1.agreement.init(SUFFIX, HOST_MASTER_2, PORT_MASTER_2) + master1.waitForReplInit(m1_m2_agmt) + master1.agreement.init(SUFFIX, HOST_MASTER_3, PORT_MASTER_3) + master1.waitForReplInit(m1_m3_agmt) + master1.agreement.init(SUFFIX, HOST_MASTER_4, PORT_MASTER_4) + master1.waitForReplInit(m1_m4_agmt) + + # Check replication is working... + if master1.testReplication(DEFAULT_SUFFIX, master4): + log.info('Replication is working.') + else: + log.fatal('Replication is not working.') + assert False + + def fin(): + """If we are debugging just stop the instances, otherwise remove + them + """ + if 1 or DEBUGGING: + master1.stop() + master2.stop() + master3.stop() + master4.stop() + else: + master1.delete() + master2.delete() + master3.delete() + master4.delete() + request.addfinalizer(fin) + + return TopologyReplication(master1, master2, master3, master4) + + +class AddDelUsers(threading.Thread): + """Add's and delets 50000 entries""" + def __init__(self, inst): + """ + Initialize the thread + """ + threading.Thread.__init__(self) + self.daemon = True + self.inst = inst + self.name = inst.serverid + + def run(self): + """ + Start adding users + """ + idx = 0 + + log.info('AddDelUsers (%s) Adding and deleting %d entries...' % + (self.name, ADD_DEL_COUNT)) + + while idx < ADD_DEL_COUNT: + RDN_VAL = ('uid=%s-%d' % (self.name, idx)) + USER_DN = ('%s,%s' % (RDN_VAL, DEFAULT_SUFFIX)) + + try: + self.inst.add_s(Entry((USER_DN, {'objectclass': + 'top extensibleObject'.split(), + 'uid': RDN_VAL}))) + except ldap.LDAPError as e: + log.fatal('AddDelUsers (%s): failed to add (%s) error: %s' % + (self.name, USER_DN, str(e))) + assert False + + try: + self.inst.delete_s(USER_DN) + except ldap.LDAPError as e: + log.fatal('AddDelUsers (%s): failed to delete (%s) error: %s' % + (self.name, USER_DN, str(e))) + assert False + + idx += 1 + + log.info('AddDelUsers (%s) - Finished at: %s' % + (self.name, getDateTime())) + + +def measureConvergence(topology): + """Find and measure the convergence of entries from each master + """ + + replicas = [topology.master1, topology.master2, topology.master3, + topology.master4] + + if ADD_DEL_COUNT > 10: + interval = int(ADD_DEL_COUNT / 10) + else: + interval = 1 + + for master in [('1', topology.master1), + ('2', topology.master2), + ('3', topology.master3), + ('4', topology.master4)]: + # Start with the first entry + entries = ['ADD dn="uid=master_%s-0,%s' % + (master[0], DEFAULT_SUFFIX)] + + # Add incremental entries to the list + idx = interval + while idx < ADD_DEL_COUNT: + entries.append('ADD dn="uid=master_%s-%d,%s' % + (master[0], idx, DEFAULT_SUFFIX)) + idx += interval + + # Add the last entry to the list (if it was not already added) + if idx != (ADD_DEL_COUNT - 1): + entries.append('ADD dn="uid=master_%s-%d,%s' % + (master[0], (ADD_DEL_COUNT - 1), + DEFAULT_SUFFIX)) + + ReplTools.replConvReport(DEFAULT_SUFFIX, entries, master[1], replicas) + + +def test_MMR_Integrity(topology): + """Apply load to 4 masters at the same time. Perform adds and deletes. + If any updates are missed we will see an error 32 in the access logs or + we will have entries left over once the test completes. + """ + loop = 0 + + ALL_REPLICAS = [topology.master1, topology.master2, topology.master3, + topology.master4] + + if TEST_CONVERGE_LATENCY: + try: + for inst in ALL_REPLICAS: + replica = inst.replicas.get(DEFAULT_SUFFIX) + replica.set('nsds5ReplicaReleaseTimeout', CONVERGENCE_TIMEOUT) + except ldap.LDAPError as e: + log.fatal('Failed to set replicas release timeout - error: %s' % + (str(e))) + assert False + + if DEBUGGING: + # Enable Repl logging, and increase the max logs + try: + for inst in ALL_REPLICAS: + inst.enableReplLogging() + inst.modify_s("cn=config", [(ldap.MOD_REPLACE, + 'nsslapd-errorlog-maxlogsperdir', + '5')]) + except ldap.LDAPError as e: + log.fatal('Failed to set max logs - error: %s' % (str(e))) + assert False + + while loop < MAX_LOOPS: + # Remove the current logs so we have a clean set of logs to check. + log.info('Pass %d...' % (loop + 1)) + log.info("Removing logs...") + for inst in ALL_REPLICAS: + inst.deleteAllLogs() + + # Fire off 4 threads to apply the load + log.info("Start adding/deleting: " + getDateTime()) + startTime = time.time() + add_del_m1 = AddDelUsers(topology.master1) + add_del_m1.start() + add_del_m2 = AddDelUsers(topology.master2) + add_del_m2.start() + add_del_m3 = AddDelUsers(topology.master3) + add_del_m3.start() + add_del_m4 = AddDelUsers(topology.master4) + add_del_m4.start() + + # Wait for threads to finish sending their updates + add_del_m1.join() + add_del_m2.join() + add_del_m3.join() + add_del_m4.join() + log.info("Finished adding/deleting entries: " + getDateTime()) + + # + # Loop checking for error 32's, and for convergence to complete + # + log.info("Waiting for replication to converge...") + while True: + # First check for error 32's + for inst in ALL_REPLICAS: + if inst.searchAccessLog(" err=32 "): + log.fatal('An add was missed on: ' + inst.serverid) + assert False + + # Next check to see if the last update is in the access log + converged = True + for inst in ALL_REPLICAS: + if not inst.searchAccessLog(LAST_DN_M1) or \ + not inst.searchAccessLog(LAST_DN_M2) or \ + not inst.searchAccessLog(LAST_DN_M3) or \ + not inst.searchAccessLog(LAST_DN_M4): + converged = False + break + + if converged: + elapsed_tm = int(time.time() - startTime) + convtime = str(datetime.timedelta(seconds=elapsed_tm)) + log.info('Replication converged at: ' + getDateTime() + + ' - Elapsed Time: ' + convtime) + break + else: + # Check if replication is idle + replicas = [topology.master1, topology.master2, + topology.master3, topology.master4] + if ReplTools.replIdle(replicas, DEFAULT_SUFFIX): + # Replication is idle - wait 30 secs for access log buffer + time.sleep(30) + + # Now check the access log again... + converged = True + for inst in ALL_REPLICAS: + if not inst.searchAccessLog(LAST_DN_M1) or \ + not inst.searchAccessLog(LAST_DN_M2) or \ + not inst.searchAccessLog(LAST_DN_M3) or \ + not inst.searchAccessLog(LAST_DN_M4): + converged = False + break + + if converged: + elapsed_tm = int(time.time() - startTime) + convtime = str(datetime.timedelta(seconds=elapsed_tm)) + log.info('Replication converged at: ' + getDateTime() + + ' - Elapsed Time: ' + convtime) + break + else: + log.fatal('Stopping replication check: ' + + getDateTime()) + log.fatal('Failure: Replication is complete, but we ' + + 'never converged.') + assert False + + # Sleep a bit before the next pass + time.sleep(3) + + # + # Finally check the CSN's + # + log.info("Check the CSN's...") + if not ReplTools.checkCSNs(ALL_REPLICAS): + assert False + log.info("All CSN's present and accounted for.") + + # + # Print the convergence report + # + log.info('Measuring convergence...') + measureConvergence(topology) + + # + # Test complete + # + log.info('No lingering entries.') + log.info('Pass %d complete.' % (loop + 1)) + elapsed_tm = int(time.time() - TEST_START) + convtime = str(datetime.timedelta(seconds=elapsed_tm)) + log.info('Entire test ran for: ' + convtime) + + loop += 1 + + log.info('Test PASSED') + + +if __name__ == '__main__': + # Run isolated + # -s for DEBUG mode + CURRENT_FILE = os.path.realpath(__file__) + pytest.main("-s %s" % CURRENT_FILE) diff --git a/dirsrvtests/tests/stress/search/__init__.py b/dirsrvtests/tests/stress/search/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/dirsrvtests/tests/stress/search/simple.py b/dirsrvtests/tests/stress/search/simple.py new file mode 100644 index 0000000..8222cb7 --- /dev/null +++ b/dirsrvtests/tests/stress/search/simple.py @@ -0,0 +1,59 @@ +# --- BEGIN COPYRIGHT BLOCK --- +# Copyright (C) 2019 William Brown +# All rights reserved. +# +# License: GPL (version 3 or any later version). +# See LICENSE for details. +# --- END COPYRIGHT BLOCK --- +# + +from lib389.topologies import topology_st +from lib389.dbgen import dbgen +from lib389.ldclt import Ldclt +from lib389.tasks import ImportTask + +from lib389._constants import DEFAULT_SUFFIX + + +def test_stress_search_simple(topology_st): + """Test a simple stress test of searches on the directory server. + + :id: 3786d01c-ea03-4655-a4f9-450693c75863 + :setup: Standalone Instance + :steps: + 1. Create test users + 2. Import them + 3. Stress test! + :expectedresults: + 1. Success + 2. Success + 3. Results are written to /tmp + """ + + inst = topology_st.standalone + + inst.config.set("nsslapd-verify-filter-schema", "off") + # Bump idllimit to test OR worst cases. + from lib389.config import LDBMConfig + lconfig = LDBMConfig(inst) + # lconfig.set("nsslapd-idlistscanlimit", '20000') + # lconfig.set("nsslapd-lookthroughlimit", '20000') + + + ldif_dir = inst.get_ldif_dir() + import_ldif = ldif_dir + '/basic_import.ldif' + dbgen(inst, 10000, import_ldif, DEFAULT_SUFFIX) + + r = ImportTask(inst) + r.import_suffix_from_ldif(ldiffile=import_ldif, suffix=DEFAULT_SUFFIX) + r.wait() + + # Run a small to warm up the server's caches ... + l = Ldclt(inst) + + l.search_loadtest(DEFAULT_SUFFIX, "(mail=XXXX@example.com)", rounds=1) + + # Now do it for realsies! + # l.search_loadtest(DEFAULT_SUFFIX, "(|(mail=XXXX@example.com)(nonexist=foo))", rounds=10) + l.search_loadtest(DEFAULT_SUFFIX, "(mail=XXXX@example.com)", rounds=10) + diff --git a/dirsrvtests/tests/suites/__init__.py b/dirsrvtests/tests/suites/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/dirsrvtests/tests/suites/acl/__init__.py b/dirsrvtests/tests/suites/acl/__init__.py new file mode 100644 index 0000000..147ecba --- /dev/null +++ b/dirsrvtests/tests/suites/acl/__init__.py @@ -0,0 +1,3 @@ +""" + :Requirement: 389-ds-base: Access Control Instructions (ACI) +""" diff --git a/dirsrvtests/tests/suites/acl/acivattr_test.py b/dirsrvtests/tests/suites/acl/acivattr_test.py new file mode 100644 index 0000000..35759f3 --- /dev/null +++ b/dirsrvtests/tests/suites/acl/acivattr_test.py @@ -0,0 +1,252 @@ +# --- BEGIN COPYRIGHT BLOCK --- +# Copyright (C) 2019 Red Hat, Inc. +# All rights reserved. +# +# License: GPL (version 3 or any later version). +# See LICENSE for details. +# --- END COPYRIGHT BLOCK --- + +import pytest, os, ldap +from lib389._constants import DEFAULT_SUFFIX, PW_DM +from lib389.idm.user import UserAccount +from lib389.idm.organization import Organization +from lib389.idm.organizationalunit import OrganizationalUnit +from lib389.cos import CosTemplate, CosClassicDefinition +from lib389.topologies import topology_st as topo +from lib389.idm.nscontainer import nsContainer +from lib389.idm.domain import Domain +from lib389.idm.role import FilteredRoles + +pytestmark = pytest.mark.tier1 + +DNBASE = "o=acivattr,{}".format(DEFAULT_SUFFIX) +ENG_USER = "cn=enguser1,ou=eng,{}".format(DNBASE) +SALES_UESER = "cn=salesuser1,ou=sales,{}".format(DNBASE) +ENG_MANAGER = "cn=engmanager1,ou=eng,{}".format(DNBASE) +SALES_MANAGER = "cn=salesmanager1,ou=sales,{}".format(DNBASE) +SALES_OU = "ou=sales,{}".format(DNBASE) +ENG_OU = "ou=eng,{}".format(DNBASE) +FILTERROLESALESROLE = "cn=FILTERROLESALESROLE,{}".format(DNBASE) +FILTERROLEENGROLE = "cn=FILTERROLEENGROLE,{}".format(DNBASE) + + +@pytest.fixture(scope="function") +def aci_of_user(request, topo): + aci_list = Domain(topo.standalone, DEFAULT_SUFFIX).get_attr_vals('aci') + + def finofaci(): + domain = Domain(topo.standalone, DEFAULT_SUFFIX) + domain.set('aci', None) + for i in aci_list: + domain.add("aci", i) + + request.addfinalizer(finofaci) + + +@pytest.fixture(scope="function") +def _add_user(request, topo): + org = Organization(topo.standalone).create(properties={"o": "acivattr"}, basedn=DEFAULT_SUFFIX) + org.add('aci', '(targetattr="*")(targetfilter="(nsrole=*)")(version 3.0; aci "tester"; ' + 'allow(all) userdn="ldap:///cn=enguser1,ou=eng,o=acivattr,{}";)'.format(DEFAULT_SUFFIX)) + + ou = OrganizationalUnit(topo.standalone, "ou=eng,o=acivattr,{}".format(DEFAULT_SUFFIX)) + ou.create(properties={'ou': 'eng'}) + + ou = OrganizationalUnit(topo.standalone, "ou=sales,o=acivattr,{}".format(DEFAULT_SUFFIX)) + ou.create(properties={'ou': 'sales'}) + + roles = FilteredRoles(topo.standalone, DNBASE) + roles.create(properties={'cn':'FILTERROLEENGROLE', 'nsRoleFilter':'cn=eng*'}) + roles.create(properties={'cn': 'FILTERROLESALESROLE', 'nsRoleFilter': 'cn=sales*'}) + + nsContainer(topo.standalone, + 'cn=cosClassicGenerateEmployeeTypeUsingnsroleTemplates,o=acivattr,{}'.format(DEFAULT_SUFFIX)).create( + properties={'cn': 'cosTemplates'}) + + properties = {'employeeType': 'EngType', 'cn':'"cn=filterRoleEngRole,o=acivattr,dc=example,dc=com",cn=cosClassicGenerateEmployeeTypeUsingnsroleTemplates,o=acivattr,dc=example,dc=com'} + CosTemplate(topo.standalone,'cn="cn=filterRoleEngRole,o=acivattr,dc=example,dc=com",' + 'cn=cosClassicGenerateEmployeeTypeUsingnsroleTemplates,o=acivattr,{}'.format(DEFAULT_SUFFIX)).\ + create(properties=properties) + + properties = {'employeeType': 'SalesType', 'cn': '"cn=filterRoleSalesRole,o=acivattr,dc=example,dc=com",cn=cosClassicGenerateEmployeeTypeUsingnsroleTemplates,o=acivattr,dc=example,dc=com'} + CosTemplate(topo.standalone, + 'cn="cn=filterRoleSalesRole,o=acivattr,dc=example,dc=com",cn=cosClassicGenerateEmployeeTypeUsingnsroleTemplates,' + 'o=acivattr,{}'.format(DEFAULT_SUFFIX)).create(properties=properties) + + properties = { + 'cosTemplateDn': 'cn=cosClassicGenerateEmployeeTypeUsingnsroleTemplates,o=acivattr,{}'.format(DEFAULT_SUFFIX), + 'cosAttribute': 'employeeType', 'cosSpecifier': 'nsrole', 'cn': 'cosClassicGenerateEmployeeTypeUsingnsrole'} + CosClassicDefinition(topo.standalone, + 'cn=cosClassicGenerateEmployeeTypeUsingnsrole,o=acivattr,{}'.format(DEFAULT_SUFFIX)).create( + properties=properties) + + properties = { + 'uid': 'salesuser1', + 'cn': 'salesuser1', + 'sn': 'user', + 'uidNumber': '1000', + 'gidNumber': '2000', + 'homeDirectory': '/home/' + 'salesuser1', + 'userPassword': PW_DM + } + user = UserAccount(topo.standalone, 'cn=salesuser1,ou=sales,o=acivattr,{}'.format(DEFAULT_SUFFIX)) + user.create(properties=properties) + + properties = { + 'uid': 'salesmanager1', + 'cn': 'salesmanager1', + 'sn': 'user', + 'uidNumber': '1000', + 'gidNumber': '2000', + 'homeDirectory': '/home/' + 'salesmanager1', + 'userPassword': PW_DM, + } + user = UserAccount(topo.standalone, 'cn=salesmanager1,ou=sales,o=acivattr,{}'.format(DEFAULT_SUFFIX)) + user.create(properties=properties) + + properties = { + 'uid': 'enguser1', + 'cn': 'enguser1', + 'sn': 'user', + 'uidNumber': '1000', + 'gidNumber': '2000', + 'homeDirectory': '/home/' + 'enguser1', + 'userPassword': PW_DM + } + user = UserAccount(topo.standalone, 'cn=enguser1,ou=eng,o=acivattr,{}'.format(DEFAULT_SUFFIX)) + user.create(properties=properties) + + properties = { + 'uid': 'engmanager1', + 'cn': 'engmanager1', + 'sn': 'user', + 'uidNumber': '1000', + 'gidNumber': '2000', + 'homeDirectory': '/home/' + 'engmanager1', + 'userPassword': PW_DM + } + user = UserAccount(topo.standalone, 'cn=engmanager1,ou=eng,o=acivattr,{}'.format(DEFAULT_SUFFIX)) + user.create(properties=properties) + + def fin(): + for DN in [ENG_USER,SALES_UESER,ENG_MANAGER,SALES_MANAGER,FILTERROLESALESROLE,FILTERROLEENGROLE,ENG_OU,SALES_OU, + 'cn="cn=filterRoleEngRole,o=acivattr,dc=example,dc=com",' + 'cn=cosClassicGenerateEmployeeTypeUsingnsroleTemplates,o=acivattr,dc=example,dc=com', + 'cn="cn=filterRoleSalesRole,o=acivattr,dc=example,dc=com",' + 'cn=cosClassicGenerateEmployeeTypeUsingnsroleTemplates,o=acivattr,{}'.format(DEFAULT_SUFFIX), 'cn=cosClassicGenerateEmployeeTypeUsingnsroleTemplates,o=acivattr,{}'.format(DEFAULT_SUFFIX), + 'cn=cosClassicGenerateEmployeeTypeUsingnsrole,o=acivattr,{}'.format(DEFAULT_SUFFIX), DNBASE]: + UserAccount(topo.standalone, DN).delete() + + request.addfinalizer(fin) + + +REAL_EQ_ACI = '(targetattr="*")(targetfilter="(cn=engmanager1)") (version 3.0; acl "real-eq"; allow (all) userdn="ldap:///{}";)'.format(ENG_USER) +REAL_PRES_ACI = '(targetattr="*")(targetfilter="(cn=*)") (version 3.0; acl "real-pres"; allow (all) userdn="ldap:///{}";)'.format(ENG_USER) +REAL_SUB_ACI = '(targetattr="*")(targetfilter="(cn=eng*)") (version 3.0; acl "real-sub"; allow (all) userdn="ldap:///{}";)'.format(ENG_USER) +ROLE_EQ_ACI = '(targetattr="*")(targetfilter="(nsrole=cn=filterroleengrole,o=sun.com)") (version 3.0; acl "role-eq"; allow (all) userdn="ldap:///{}";)'.format(ENG_USER) +ROLE_PRES_ACI = '(targetattr="*")(targetfilter="(nsrole=*)") (version 3.0; acl "role-pres"; allow (all) userdn="ldap:///{}";)'.format(ENG_USER) +ROLE_SUB_ACI = '(targetattr="*")(targetfilter="(nsrole=cn=filterroleeng*)") (version 3.0; acl "role-sub"; allow (all) userdn="ldap:///{}";)'.format(ENG_USER) +COS_EQ_ACI = '(targetattr="*")(targetfilter="(employeetype=engtype)") (version 3.0; acl "cos-eq"; allow (all) userdn="ldap:///{}";)'.format(ENG_USER) +COS_PRES_ACI = '(targetattr="*")(targetfilter="(employeetype=*)") (version 3.0; acl "cos-pres"; allow (all) userdn="ldap:///{}";)'.format(ENG_USER) +COS_SUB_ACI = '(targetattr="*")(targetfilter="(employeetype=eng*)") (version 3.0; acl "cos-sub"; allow (all) userdn="ldap:///{}";)'.format(ENG_USER) +LDAPURL_ACI = '(targetattr="*")(version 3.0; acl "url"; allow (all) userdn="ldap:///o=acivattr,dc=example,dc=com??sub?(nsrole=*eng*)";)' + + +@pytest.mark.parametrize("user,entry,aci", [ + (ENG_USER, ENG_MANAGER, REAL_EQ_ACI), + (ENG_USER, ENG_MANAGER, REAL_PRES_ACI), + (ENG_USER, ENG_MANAGER, REAL_SUB_ACI), + (ENG_USER, ENG_MANAGER, ROLE_PRES_ACI), + (ENG_USER, ENG_MANAGER, ROLE_SUB_ACI), + (ENG_USER, ENG_MANAGER, COS_EQ_ACI), + (ENG_USER, ENG_MANAGER, COS_PRES_ACI), + (ENG_USER, ENG_MANAGER, COS_SUB_ACI), + (ENG_USER, ENG_MANAGER, LDAPURL_ACI), +], ids=[ + "(ENG_USER, ENG_MANAGER, REAL_EQ_ACI)", + "(ENG_USER, ENG_MANAGER, REAL_PRES_ACI)", + "(ENG_USER, ENG_MANAGER, REAL_SUB_ACI)", + "(ENG_USER, ENG_MANAGER, ROLE_PRES_ACI)", + '(ENG_USER, ENG_MANAGER, ROLE_SUB_ACI)', + '(ENG_USER, ENG_MANAGER, COS_EQ_ACI)', + '(ENG_USER, ENG_MANAGER, COS_PRES_ACI)', + '(ENG_USER, ENG_MANAGER, COS_SUB_ACI)', + '(ENG_USER, ENG_MANAGER, LDAPURL_ACI)', +]) +def test_positive(topo, _add_user, aci_of_user, user, entry, aci): + """ + :id: ba6d5e9c-786b-11e8-860d-8c16451d917b + :parametrized: yes + :setup: server + :steps: + 1. Add test entry + 2. Add ACI + 3. ACI role should be followed + :expectedresults: + 1. Entry should be added + 2. Operation should succeed + 3. Operation should succeed + """ + # set aci + Domain(topo.standalone, DNBASE).set("aci", aci) + # create connection + conn = UserAccount(topo.standalone, user).bind(PW_DM) + # according to the aci , user will be able to change description + UserAccount(conn, entry).replace("description", "Fred") + assert UserAccount(conn, entry).present('description') + + +@pytest.mark.parametrize("user,entry,aci", [ + (ENG_USER, SALES_MANAGER, REAL_EQ_ACI), + (ENG_USER, SALES_OU, REAL_PRES_ACI), + (ENG_USER, SALES_MANAGER, REAL_SUB_ACI), + (ENG_USER, SALES_MANAGER, ROLE_EQ_ACI), + (ENG_USER, SALES_OU, ROLE_PRES_ACI), + (ENG_USER, SALES_MANAGER, ROLE_SUB_ACI), + (ENG_USER, SALES_MANAGER, COS_EQ_ACI), + (ENG_USER, SALES_OU, COS_PRES_ACI), + (ENG_USER, SALES_MANAGER, COS_SUB_ACI), + (SALES_UESER, SALES_MANAGER, LDAPURL_ACI), + (ENG_USER, ENG_MANAGER, ROLE_EQ_ACI), +], ids=[ + + "(ENG_USER, SALES_MANAGER, REAL_EQ_ACI)", + "(ENG_USER, SALES_OU, REAL_PRES_ACI)", + "(ENG_USER, SALES_MANAGER, REAL_SUB_ACI)", + "(ENG_USER, SALES_MANAGER, ROLE_EQ_ACI)", + "(ENG_USER, SALES_MANAGER, ROLE_PRES_ACI)", + '(ENG_USER, SALES_MANAGER, ROLE_SUB_ACI)', + '(ENG_USER, SALES_MANAGER, COS_EQ_ACI)', + '(ENG_USER, SALES_MANAGER, COS_PRES_ACI)', + '(ENG_USER, SALES_MANAGER, COS_SUB_ACI)', + '(SALES_UESER, SALES_MANAGER, LDAPURL_ACI)', + '(ENG_USER, ENG_MANAGER, ROLE_EQ_ACI)' + + +]) +def test_negative(topo, _add_user, aci_of_user, user, entry, aci): + """ + :id: c4c887c2-786b-11e8-a328-8c16451d917b + :parametrized: yes + :setup: server + :steps: + 1. Add test entry + 2. Add ACI + 3. ACI role should be followed + :expectedresults: + 1. Entry should be added + 2. Operation should succeed + 3. Operation should succeed + """ + # set aci + Domain(topo.standalone, DNBASE).set("aci", aci) + # create connection + conn = UserAccount(topo.standalone, user).bind(PW_DM) + # according to the aci , user will not be able to change description + with pytest.raises(ldap.INSUFFICIENT_ACCESS): + UserAccount(conn, entry).replace("description", "Fred") + + +if __name__ == "__main__": + CURRENT_FILE = os.path.realpath(__file__) + pytest.main("-s -v %s" % CURRENT_FILE) diff --git a/dirsrvtests/tests/suites/acl/acl_deny_test.py b/dirsrvtests/tests/suites/acl/acl_deny_test.py new file mode 100644 index 0000000..8ea6cd2 --- /dev/null +++ b/dirsrvtests/tests/suites/acl/acl_deny_test.py @@ -0,0 +1,200 @@ +import logging +import pytest +import os +import ldap +import time +from lib389._constants import * +from lib389.topologies import topology_st as topo +from lib389.idm.user import UserAccount, UserAccounts, TEST_USER_PROPERTIES +from lib389.idm.domain import Domain + +pytestmark = pytest.mark.tier1 + +DEBUGGING = os.getenv("DEBUGGING", default=False) +if DEBUGGING: + logging.getLogger(__name__).setLevel(logging.DEBUG) +else: + logging.getLogger(__name__).setLevel(logging.INFO) +log = logging.getLogger(__name__) + +BIND_DN2 = 'uid=tuser,ou=People,dc=example,dc=com' +BIND_RDN2 = 'tuser' +BIND_DN = 'uid=tuser1,ou=People,dc=example,dc=com' +BIND_RDN = 'tuser1' +SRCH_FILTER = "uid=tuser1" +SRCH_FILTER2 = "uid=tuser" + +aci_list_A = ['(targetattr != "userPassword") (version 3.0; acl "Anonymous access"; allow (read, search, compare)userdn = "ldap:///anyone";)', + '(targetattr = "*") (version 3.0;acl "allow tuser";allow (all)(userdn = "ldap:///uid=tuser5,ou=People,dc=example,dc=com");)', + '(targetattr != "uid || mail") (version 3.0; acl "deny-attrs"; deny (all) (userdn = "ldap:///anyone");)', + '(targetfilter = "(inetUserStatus=1)") ( version 3.0; acl "deny-specific-entry"; deny(all) (userdn = "ldap:///anyone");)'] + +aci_list_B = ['(targetattr != "userPassword") (version 3.0; acl "Anonymous access"; allow (read, search, compare)userdn = "ldap:///anyone";)', + '(targetattr != "uid || mail") (version 3.0; acl "deny-attrs"; deny (all) (userdn = "ldap:///anyone");)', + '(targetfilter = "(inetUserStatus=1)") ( version 3.0; acl "deny-specific-entry"; deny(all) (userdn = "ldap:///anyone");)'] + + +@pytest.fixture(scope="module") +def aci_setup(topo): + topo.standalone.log.info("Add {}".format(BIND_DN)) + user = UserAccount(topo.standalone, BIND_DN) + user_props = TEST_USER_PROPERTIES.copy() + user_props.update({'sn': BIND_RDN, + 'cn': BIND_RDN, + 'uid': BIND_RDN, + 'inetUserStatus': '1', + 'objectclass': 'extensibleObject', + 'userpassword': PASSWORD}) + user.create(properties=user_props, basedn=SUFFIX) + + topo.standalone.log.info("Add {}".format(BIND_DN2)) + user2 = UserAccount(topo.standalone, BIND_DN2) + user_props = TEST_USER_PROPERTIES.copy() + user_props.update({'sn': BIND_RDN2, + 'cn': BIND_RDN2, + 'uid': BIND_RDN2, + 'userpassword': PASSWORD}) + user2.create(properties=user_props, basedn=SUFFIX) + + +def test_multi_deny_aci(topo, aci_setup): + """Test that mutliple deny rules work, and that they the cache properly + stores the result + + :id: 294c366d-850e-459e-b5a0-3cc828ec3aca + :setup: Standalone Instance + :steps: + 1. Add aci_list_A aci's and verify two searches on the same connection + behave the same + 2. Add aci_list_B aci's and verify search fails as expected + :expectedresults: + 1. Both searches do not return any entries + 2. Seaches do not return any entries + """ + + if DEBUGGING: + # Maybe add aci logging? + pass + + suffix = Domain(topo.standalone, DEFAULT_SUFFIX) + + for run in range(2): + topo.standalone.log.info("Pass " + str(run + 1)) + + # Test ACI List A + topo.standalone.log.info("Testing two searches behave the same...") + topo.standalone.simple_bind_s(DN_DM, PASSWORD) + suffix.set('aci', aci_list_A, ldap.MOD_REPLACE) + time.sleep(1) + + topo.standalone.simple_bind_s(BIND_DN, PASSWORD) + entries = topo.standalone.search_s(DEFAULT_SUFFIX, ldap.SCOPE_SUBTREE, SRCH_FILTER) + if entries and entries[0]: + topo.standalone.log.fatal("Incorrectly got an entry returned from search 1") + assert False + + entries = topo.standalone.search_s(DEFAULT_SUFFIX, ldap.SCOPE_SUBTREE, SRCH_FILTER) + if entries and entries[0]: + topo.standalone.log.fatal("Incorrectly got an entry returned from search 2") + assert False + + entries = topo.standalone.search_s(DEFAULT_SUFFIX, ldap.SCOPE_SUBTREE, SRCH_FILTER2) + if entries is None or len(entries) == 0: + topo.standalone.log.fatal("Failed to get entry as good user") + assert False + + entries = topo.standalone.search_s(DEFAULT_SUFFIX, ldap.SCOPE_SUBTREE, SRCH_FILTER2) + if entries is None or len(entries) == 0: + topo.standalone.log.fatal("Failed to get entry as good user") + assert False + + # Bind a different user who has rights + topo.standalone.simple_bind_s(BIND_DN2, PASSWORD) + entries = topo.standalone.search_s(DEFAULT_SUFFIX, ldap.SCOPE_SUBTREE, SRCH_FILTER2) + if entries is None or len(entries) == 0: + topo.standalone.log.fatal("Failed to get entry as good user") + assert False + + entries = topo.standalone.search_s(DEFAULT_SUFFIX, ldap.SCOPE_SUBTREE, SRCH_FILTER2) + if entries is None or len(entries) == 0: + topo.standalone.log.fatal("Failed to get entry as good user (2)") + assert False + + if run > 0: + # Second pass + topo.standalone.restart() + + # Reset ACI's and do the second test + topo.standalone.log.info("Testing search does not return any entries...") + topo.standalone.simple_bind_s(DN_DM, PASSWORD) + suffix.set('aci', aci_list_B, ldap.MOD_REPLACE) + time.sleep(1) + + topo.standalone.simple_bind_s(BIND_DN, PASSWORD) + entries = topo.standalone.search_s(DEFAULT_SUFFIX, ldap.SCOPE_SUBTREE, SRCH_FILTER) + if entries and entries[0]: + topo.standalone.log.fatal("Incorrectly got an entry returned from search 1") + assert False + + entries = topo.standalone.search_s(DEFAULT_SUFFIX, ldap.SCOPE_SUBTREE, SRCH_FILTER) + if entries and entries[0]: + topo.standalone.log.fatal("Incorrectly got an entry returned from search 2") + assert False + + if run > 0: + # Second pass + topo.standalone.restart() + + # Bind as different user who has rights + topo.standalone.simple_bind_s(BIND_DN2, PASSWORD) + entries = topo.standalone.search_s(DEFAULT_SUFFIX, ldap.SCOPE_SUBTREE, SRCH_FILTER2) + if entries is None or len(entries) == 0: + topo.standalone.log.fatal("Failed to get entry as good user") + assert False + + entries = topo.standalone.search_s(DEFAULT_SUFFIX, ldap.SCOPE_SUBTREE, SRCH_FILTER2) + if entries is None or len(entries) == 0: + topo.standalone.log.fatal("Failed to get entry as good user (2)") + assert False + + entries = topo.standalone.search_s(DEFAULT_SUFFIX, ldap.SCOPE_SUBTREE, SRCH_FILTER) + if entries and entries[0]: + topo.standalone.log.fatal("Incorrectly got an entry returned from search 1") + assert False + + entries = topo.standalone.search_s(DEFAULT_SUFFIX, ldap.SCOPE_SUBTREE, SRCH_FILTER) + if entries and entries[0]: + topo.standalone.log.fatal("Incorrectly got an entry returned from search 2") + assert False + + # back to user 1 + topo.standalone.simple_bind_s(BIND_DN, PASSWORD) + entries = topo.standalone.search_s(DEFAULT_SUFFIX, ldap.SCOPE_SUBTREE, SRCH_FILTER2) + if entries is None or len(entries) == 0: + topo.standalone.log.fatal("Failed to get entry as user1") + assert False + + entries = topo.standalone.search_s(DEFAULT_SUFFIX, ldap.SCOPE_SUBTREE, SRCH_FILTER2) + if entries is None or len(entries) == 0: + topo.standalone.log.fatal("Failed to get entry as user1 (2)") + assert False + + entries = topo.standalone.search_s(DEFAULT_SUFFIX, ldap.SCOPE_SUBTREE, SRCH_FILTER) + if entries and entries[0]: + topo.standalone.log.fatal("Incorrectly got an entry returned from search 1") + assert False + + entries = topo.standalone.search_s(DEFAULT_SUFFIX, ldap.SCOPE_SUBTREE, SRCH_FILTER) + if entries and entries[0]: + topo.standalone.log.fatal("Incorrectly got an entry returned from search 2") + assert False + + topo.standalone.log.info("Test PASSED") + + +if __name__ == '__main__': + # Run isolated + # -s for DEBUG mode + CURRENT_FILE = os.path.realpath(__file__) + pytest.main(["-s", CURRENT_FILE]) + diff --git a/dirsrvtests/tests/suites/acl/acl_test.py b/dirsrvtests/tests/suites/acl/acl_test.py new file mode 100644 index 0000000..5ca8652 --- /dev/null +++ b/dirsrvtests/tests/suites/acl/acl_test.py @@ -0,0 +1,1150 @@ +# --- BEGIN COPYRIGHT BLOCK --- +# Copyright (C) 2016 Red Hat, Inc. +# All rights reserved. +# +# License: GPL (version 3 or any later version). +# See LICENSE for details. +# --- END COPYRIGHT BLOCK --- +# +import pytest +from ldap.controls.simple import GetEffectiveRightsControl +from lib389.tasks import * +from lib389.utils import * +from lib389.schema import Schema +from lib389.idm.domain import Domain +from lib389.idm.user import UserAccount, UserAccounts, TEST_USER_PROPERTIES +from lib389.idm.organizationalrole import OrganizationalRole, OrganizationalRoles + +from lib389.topologies import topology_m2 +from lib389._constants import SUFFIX, DN_SCHEMA, DN_DM, DEFAULT_SUFFIX, PASSWORD + +pytestmark = pytest.mark.tier1 + +logging.getLogger(__name__).setLevel(logging.DEBUG) +log = logging.getLogger(__name__) + +TEST_REPL_DN = "cn=test_repl, %s" % SUFFIX + +STAGING_CN = "staged user" +PRODUCTION_CN = "accounts" +EXCEPT_CN = "excepts" + +STAGING_DN = "cn=%s,%s" % (STAGING_CN, SUFFIX) +PRODUCTION_DN = "cn=%s,%s" % (PRODUCTION_CN, SUFFIX) +PROD_EXCEPT_DN = "cn=%s,%s" % (EXCEPT_CN, PRODUCTION_DN) + +STAGING_PATTERN = "cn=%s*,%s" % (STAGING_CN[:2], SUFFIX) +PRODUCTION_PATTERN = "cn=%s*,%s" % (PRODUCTION_CN[:2], SUFFIX) +BAD_STAGING_PATTERN = "cn=bad*,%s" % (SUFFIX) +BAD_PRODUCTION_PATTERN = "cn=bad*,%s" % (SUFFIX) + +BIND_RDN = "bind_entry" +BIND_DN = "uid=%s,%s" % (BIND_RDN, SUFFIX) +BIND_PW = "password" + +NEW_ACCOUNT = "new_account" +MAX_ACCOUNTS = 20 + +CONFIG_MODDN_ACI_ATTR = "nsslapd-moddn-aci" + +SRC_ENTRY_CN = "tuser" +EXT_RDN = "01" +DST_ENTRY_CN = SRC_ENTRY_CN + EXT_RDN + +SRC_ENTRY_DN = "cn=%s,%s" % (SRC_ENTRY_CN, SUFFIX) +DST_ENTRY_DN = "cn=%s,%s" % (DST_ENTRY_CN, SUFFIX) + + +def add_attr(topology_m2, attr_name): + """Adds attribute to the schema""" + + ATTR_VALUE = """(NAME '%s' \ + DESC 'Attribute filteri-Multi-Valued' \ + SYNTAX 1.3.6.1.4.1.1466.115.121.1.27)""" % attr_name + schema = Schema(topology_m2.ms["master1"]) + schema.add('attributeTypes', ATTR_VALUE) + + +@pytest.fixture(params=["lang-ja", "binary", "phonetic"]) +def aci_with_attr_subtype(request, topology_m2): + """Adds and deletes an ACI in the DEFAULT_SUFFIX""" + + TARGET_ATTR = 'protectedOperation' + USER_ATTR = 'allowedToPerform' + SUBTYPE = request.param + suffix = Domain(topology_m2.ms["master1"], DEFAULT_SUFFIX) + + log.info("========Executing test with '%s' subtype========" % SUBTYPE) + log.info(" Add a target attribute") + add_attr(topology_m2, TARGET_ATTR) + + log.info(" Add a user attribute") + add_attr(topology_m2, USER_ATTR) + + ACI_TARGET = '(targetattr=%s;%s)' % (TARGET_ATTR, SUBTYPE) + ACI_ALLOW = '(version 3.0; acl "test aci for subtypes"; allow (read) ' + ACI_SUBJECT = 'userattr = "%s;%s#GROUPDN";)' % (USER_ATTR, SUBTYPE) + ACI_BODY = ACI_TARGET + ACI_ALLOW + ACI_SUBJECT + + log.info("Add an ACI with attribute subtype") + suffix.add('aci', ACI_BODY) + + def fin(): + log.info("Finally, delete an ACI with the '%s' subtype" % + SUBTYPE) + suffix.remove('aci', ACI_BODY) + + request.addfinalizer(fin) + + return ACI_BODY + + +def test_aci_attr_subtype_targetattr(topology_m2, aci_with_attr_subtype): + """Checks, that ACIs allow attribute subtypes in the targetattr keyword + + :id: a99ccda0-5d0b-4d41-99cc-c5e207b3b687 + :parametrized: yes + :setup: MMR with two masters, + Define two attributes in the schema - targetattr and userattr, + Add an ACI with attribute subtypes - "lang-ja", "binary", "phonetic" + one by one + :steps: + 1. Search for the added attribute during setup + one by one for each subtypes "lang-ja", "binary", "phonetic" + :expectedresults: + 1. Attributes should be found successfully + one by one for each subtypes "lang-ja", "binary", "phonetic" + """ + + log.info("Search for the added attribute") + try: + entries = topology_m2.ms["master1"].search_s(DEFAULT_SUFFIX, + ldap.SCOPE_BASE, + '(objectclass=*)', ['aci']) + entry = str(entries[0]) + assert aci_with_attr_subtype in entry + log.info("The added attribute was found") + + except ldap.LDAPError as e: + log.fatal('Search failed, error: ' + e.message['desc']) + assert False + + +def _bind_manager(topology_m2): + topology_m2.ms["master1"].log.info("Bind as %s " % DN_DM) + topology_m2.ms["master1"].simple_bind_s(DN_DM, PASSWORD) + + +def _bind_normal(topology_m2): + # bind as bind_entry + topology_m2.ms["master1"].log.info("Bind as %s" % BIND_DN) + topology_m2.ms["master1"].simple_bind_s(BIND_DN, BIND_PW) + + +def _moddn_aci_deny_tree(topology_m2, mod_type=None, + target_from=STAGING_DN, target_to=PROD_EXCEPT_DN): + """It denies the access moddn_to in cn=except,cn=accounts,SUFFIX""" + + assert mod_type is not None + + ACI_TARGET_FROM = "" + ACI_TARGET_TO = "" + if target_from: + ACI_TARGET_FROM = "(target_from = \"ldap:///%s\")" % (target_from) + if target_to: + ACI_TARGET_TO = "(target_to = \"ldap:///%s\")" % (target_to) + + ACI_ALLOW = "(version 3.0; acl \"Deny MODDN to prod_except\"; deny (moddn)" + ACI_SUBJECT = " userdn = \"ldap:///%s\";)" % BIND_DN + ACI_BODY = ACI_TARGET_TO + ACI_TARGET_FROM + ACI_ALLOW + ACI_SUBJECT + # topology_m2.ms["master1"].modify_s(SUFFIX, mod) + topology_m2.ms["master1"].log.info("Add a DENY aci under %s " % PROD_EXCEPT_DN) + prod_except = OrganizationalRole(topology_m2.ms["master1"], PROD_EXCEPT_DN) + prod_except.set('aci', ACI_BODY, mod_type) + + +def _write_aci_staging(topology_m2, mod_type=None): + assert mod_type is not None + + ACI_TARGET = "(targetattr= \"uid\")(target=\"ldap:///uid=*,%s\")" % STAGING_DN + ACI_ALLOW = "(version 3.0; acl \"write staging entries\"; allow (write)" + ACI_SUBJECT = " userdn = \"ldap:///%s\";)" % BIND_DN + ACI_BODY = ACI_TARGET + ACI_ALLOW + ACI_SUBJECT + suffix = Domain(topology_m2.ms["master1"], SUFFIX) + suffix.set('aci', ACI_BODY, mod_type) + + +def _write_aci_production(topology_m2, mod_type=None): + assert mod_type is not None + + ACI_TARGET = "(targetattr= \"uid\")(target=\"ldap:///uid=*,%s\")" % PRODUCTION_DN + ACI_ALLOW = "(version 3.0; acl \"write production entries\"; allow (write)" + ACI_SUBJECT = " userdn = \"ldap:///%s\";)" % BIND_DN + ACI_BODY = ACI_TARGET + ACI_ALLOW + ACI_SUBJECT + suffix = Domain(topology_m2.ms["master1"], SUFFIX) + suffix.set('aci', ACI_BODY, mod_type) + + +def _moddn_aci_staging_to_production(topology_m2, mod_type=None, + target_from=STAGING_DN, target_to=PRODUCTION_DN): + assert mod_type is not None + + ACI_TARGET_FROM = "" + ACI_TARGET_TO = "" + if target_from: + ACI_TARGET_FROM = "(target_from = \"ldap:///%s\")" % (target_from) + if target_to: + ACI_TARGET_TO = "(target_to = \"ldap:///%s\")" % (target_to) + + ACI_ALLOW = "(version 3.0; acl \"MODDN from staging to production\"; allow (moddn)" + ACI_SUBJECT = " userdn = \"ldap:///%s\";)" % BIND_DN + ACI_BODY = ACI_TARGET_FROM + ACI_TARGET_TO + ACI_ALLOW + ACI_SUBJECT + suffix = Domain(topology_m2.ms["master1"], SUFFIX) + suffix.set('aci', ACI_BODY, mod_type) + + _write_aci_staging(topology_m2, mod_type=mod_type) + + +def _moddn_aci_from_production_to_staging(topology_m2, mod_type=None): + assert mod_type is not None + + ACI_TARGET = "(target_from = \"ldap:///%s\") (target_to = \"ldap:///%s\")" % ( + PRODUCTION_DN, STAGING_DN) + ACI_ALLOW = "(version 3.0; acl \"MODDN from production to staging\"; allow (moddn)" + ACI_SUBJECT = " userdn = \"ldap:///%s\";)" % BIND_DN + ACI_BODY = ACI_TARGET + ACI_ALLOW + ACI_SUBJECT + suffix = Domain(topology_m2.ms["master1"], SUFFIX) + suffix.set('aci', ACI_BODY, mod_type) + + _write_aci_production(topology_m2, mod_type=mod_type) + + +@pytest.fixture(scope="module") +def moddn_setup(topology_m2): + """Creates + - a staging DIT + - a production DIT + - add accounts in staging DIT + - enable ACL logging (commented for performance reason) + """ + + m1 = topology_m2.ms["master1"] + o_roles = OrganizationalRoles(m1, SUFFIX) + + m1.log.info("\n\n######## INITIALIZATION ########\n") + + # entry used to bind with + m1.log.info("Add {}".format(BIND_DN)) + user = UserAccount(m1, BIND_DN) + user_props = TEST_USER_PROPERTIES.copy() + user_props.update({'sn': BIND_RDN, + 'cn': BIND_RDN, + 'uid': BIND_RDN, + 'userpassword': BIND_PW}) + user.create(properties=user_props, basedn=SUFFIX) + + # DIT for staging + m1.log.info("Add {}".format(STAGING_DN)) + o_roles.create(properties={'cn': STAGING_CN, 'description': "staging DIT"}) + + # DIT for production + m1.log.info("Add {}".format(PRODUCTION_DN)) + o_roles.create(properties={'cn': PRODUCTION_CN, 'description': "production DIT"}) + + # DIT for production/except + m1.log.info("Add {}".format(PROD_EXCEPT_DN)) + o_roles_prod = OrganizationalRoles(m1, PRODUCTION_DN) + o_roles_prod.create(properties={'cn': EXCEPT_CN, 'description': "production except DIT"}) + + # enable acl error logging + # mod = [(ldap.MOD_REPLACE, 'nsslapd-errorlog-level', '128')] + # m1.modify_s(DN_CONFIG, mod) + # topology_m2.ms["master2"].modify_s(DN_CONFIG, mod) + + # add dummy entries in the staging DIT + staging_users = UserAccounts(m1, SUFFIX, rdn="cn={}".format(STAGING_CN)) + user_props = TEST_USER_PROPERTIES.copy() + for cpt in range(MAX_ACCOUNTS): + name = "{}{}".format(NEW_ACCOUNT, cpt) + user_props.update({'sn': name, 'cn': name, 'uid': name}) + staging_users.create(properties=user_props) + + +def test_mode_default_add_deny(topology_m2, moddn_setup): + """Tests that the ADD operation fails (no ADD aci on production) + + :id: 301d41d3-b8d8-44c5-8eb9-c2d2816b5a4f + :setup: MMR with two masters, + M1 - staging DIT + M2 - production DIT + add test accounts in staging DIT + :steps: + 1. Add an entry in production + :expectedresults: + 1. It should fail due to INSUFFICIENT_ACCESS + """ + + topology_m2.ms["master1"].log.info("\n\n######## mode moddn_aci : ADD (should fail) ########\n") + + _bind_normal(topology_m2) + + # + # First try to add an entry in production => INSUFFICIENT_ACCESS + # + try: + topology_m2.ms["master1"].log.info("Try to add %s" % PRODUCTION_DN) + name = "%s%d" % (NEW_ACCOUNT, 0) + topology_m2.ms["master1"].add_s(Entry(("uid=%s,%s" % (name, PRODUCTION_DN), { + 'objectclass': "top person".split(), + 'sn': name, + 'cn': name, + 'uid': name}))) + assert 0 # this is an error, we should not be allowed to add an entry in production + except Exception as e: + topology_m2.ms["master1"].log.info("Exception (expected): %s" % type(e).__name__) + assert isinstance(e, ldap.INSUFFICIENT_ACCESS) + + +def test_mode_default_delete_deny(topology_m2, moddn_setup): + """Tests that the DEL operation fails (no 'delete' aci on production) + + :id: 5dcb2213-3875-489a-8cb5-ace057120ad6 + :setup: MMR with two masters, + M1 - staging DIT + M2 - production DIT + add test accounts in staging DIT + :steps: + 1. Delete an entry in staging + :expectedresults: + 1. It should fail due to INSUFFICIENT_ACCESS + """ + + topology_m2.ms["master1"].log.info("\n\n######## DELETE (should fail) ########\n") + + _bind_normal(topology_m2) + # + # Second try to delete an entry in staging => INSUFFICIENT_ACCESS + # + try: + topology_m2.ms["master1"].log.info("Try to delete %s" % STAGING_DN) + name = "%s%d" % (NEW_ACCOUNT, 0) + topology_m2.ms["master1"].delete_s("uid=%s,%s" % (name, STAGING_DN)) + assert 0 # this is an error, we should not be allowed to add an entry in production + except Exception as e: + topology_m2.ms["master1"].log.info("Exception (expected): %s" % type(e).__name__) + assert isinstance(e, ldap.INSUFFICIENT_ACCESS) + + +@pytest.mark.parametrize("index,tfrom,tto,failure", + [(0, STAGING_DN, PRODUCTION_DN, False), + (1, STAGING_DN, PRODUCTION_DN, False), + (2, STAGING_DN, BAD_PRODUCTION_PATTERN, True), + (3, STAGING_PATTERN, PRODUCTION_DN, False), + (4, BAD_STAGING_PATTERN, PRODUCTION_DN, True), + (5, STAGING_PATTERN, PRODUCTION_PATTERN, False), + (6, None, PRODUCTION_PATTERN, False), + (7, STAGING_PATTERN, None, False), + (8, None, None, False)]) +def test_moddn_staging_prod(topology_m2, moddn_setup, + index, tfrom, tto, failure): + """This test case MOVE entry NEW_ACCOUNT0 from staging to prod + target_to/target_from: equality filter + + :id: cbafdd68-64d6-431f-9f22-6fbf9ed23ca0 + :parametrized: yes + :setup: MMR with two masters, + M1 - staging DIT + M2 - production DIT + add test accounts in staging DIT + :steps: + 1. Try to modify DN with moddn for each value of + STAGING_DN -> PRODUCTION_DN + 2. Try to modify DN with moddn for each value of + STAGING_DN -> PRODUCTION_DN with appropriate ACI + :expectedresults: + 1. It should fail due to INSUFFICIENT_ACCESS + 2. It should pass due to appropriate ACI + """ + + topology_m2.ms["master1"].log.info("\n\n######## MOVE staging -> Prod (%s) ########\n" % index) + _bind_normal(topology_m2) + + old_rdn = "uid=%s%s" % (NEW_ACCOUNT, index) + old_dn = "%s,%s" % (old_rdn, STAGING_DN) + new_rdn = old_rdn + new_superior = PRODUCTION_DN + + # + # Try to rename without the appropriate ACI => INSUFFICIENT_ACCESS + # + try: + topology_m2.ms["master1"].log.info("Try to MODDN %s -> %s,%s" % (old_dn, new_rdn, new_superior)) + topology_m2.ms["master1"].rename_s(old_dn, new_rdn, newsuperior=new_superior) + assert 0 + except AssertionError: + topology_m2.ms["master1"].log.info( + "Exception (not really expected exception but that is fine as it fails to rename)") + except Exception as e: + topology_m2.ms["master1"].log.info("Exception (expected): %s" % type(e).__name__) + assert isinstance(e, ldap.INSUFFICIENT_ACCESS) + + # successful MOD with the ACI + topology_m2.ms["master1"].log.info("\n\n######## MOVE to and from equality filter ########\n") + _bind_manager(topology_m2) + _moddn_aci_staging_to_production(topology_m2, mod_type=ldap.MOD_ADD, + target_from=tfrom, target_to=tto) + _bind_normal(topology_m2) + + try: + topology_m2.ms["master1"].log.info("Try to MODDN %s -> %s,%s" % (old_dn, new_rdn, new_superior)) + topology_m2.ms["master1"].rename_s(old_dn, new_rdn, newsuperior=new_superior) + except Exception as e: + topology_m2.ms["master1"].log.info("Exception (expected): %s" % type(e).__name__) + if failure: + assert isinstance(e, ldap.INSUFFICIENT_ACCESS) + + # successful MOD with the both ACI + _bind_manager(topology_m2) + _moddn_aci_staging_to_production(topology_m2, mod_type=ldap.MOD_DELETE, + target_from=tfrom, target_to=tto) + _bind_normal(topology_m2) + + +def test_moddn_staging_prod_9(topology_m2, moddn_setup): + """ + :id: 222dd7e8-7ff1-40b8-ad26-6f8e42fbfcd9 + :setup: MMR with two masters, + M1 - staging DIT + M2 - production DIT + add test accounts in staging DIT + :steps: + 1. Try to modify DN with moddn STAGING_DN -> PRODUCTION_DN + 2. Add the moddn aci that will not be evaluated because of the config flag + 3. Try to do modDN + 4. Remove the moddn aci + 5. Add the 'add' right to the production DN + 6. Try to modify DN with moddn with 'add' right + 7. Enable the moddn right + 8. Try to rename without the appropriate ACI + 9. Add the 'add' right to the production DN + 10. Try to rename without the appropriate ACI + 11. Remove the moddn aci + :expectedresults: + 1. It should fail due to INSUFFICIENT_ACCESS + 2. It should pass + 3. It should fail due to INSUFFICIENT_ACCESS + 4. It should pass + 5. It should pass + 6. It should pass + 7. It should pass + 8. It should fail due to INSUFFICIENT_ACCESS + 9. It should pass + 10. It should fail due to INSUFFICIENT_ACCESS + 11. It should pass + """ + topology_m2.ms["master1"].log.info("\n\n######## MOVE staging -> Prod (9) ########\n") + + _bind_normal(topology_m2) + old_rdn = "uid=%s9" % NEW_ACCOUNT + old_dn = "%s,%s" % (old_rdn, STAGING_DN) + new_rdn = old_rdn + new_superior = PRODUCTION_DN + prod = OrganizationalRole(topology_m2.ms["master1"], PRODUCTION_DN) + + # + # Try to rename without the appropriate ACI => INSUFFICIENT_ACCESS + # + try: + topology_m2.ms["master1"].log.info("Try to MODDN %s -> %s,%s" % (old_dn, new_rdn, new_superior)) + topology_m2.ms["master1"].rename_s(old_dn, new_rdn, newsuperior=new_superior) + assert 0 + except AssertionError: + topology_m2.ms["master1"].log.info( + "Exception (not really expected exception but that is fine as it fails to rename)") + except Exception as e: + topology_m2.ms["master1"].log.info("Exception (expected): %s" % type(e).__name__) + assert isinstance(e, ldap.INSUFFICIENT_ACCESS) + + ############# + # Now do tests with no support of moddn aci + ############# + topology_m2.ms["master1"].log.info("Disable the moddn right") + _bind_manager(topology_m2) + topology_m2.ms["master1"].config.set(CONFIG_MODDN_ACI_ATTR, 'off') + + # Add the moddn aci that will not be evaluated because of the config flag + topology_m2.ms["master1"].log.info("\n\n######## MOVE to and from equality filter ########\n") + _bind_manager(topology_m2) + _moddn_aci_staging_to_production(topology_m2, mod_type=ldap.MOD_ADD, + target_from=STAGING_DN, target_to=PRODUCTION_DN) + _bind_normal(topology_m2) + + # It will fail because it will test the ADD right + try: + topology_m2.ms["master1"].log.info("Try to MODDN %s -> %s,%s" % (old_dn, new_rdn, new_superior)) + topology_m2.ms["master1"].rename_s(old_dn, new_rdn, newsuperior=new_superior) + assert 0 + except AssertionError: + topology_m2.ms["master1"].log.info( + "Exception (not really expected exception but that is fine as it fails to rename)") + except Exception as e: + topology_m2.ms["master1"].log.info("Exception (expected): %s" % type(e).__name__) + assert isinstance(e, ldap.INSUFFICIENT_ACCESS) + + # remove the moddn aci + _bind_manager(topology_m2) + _moddn_aci_staging_to_production(topology_m2, mod_type=ldap.MOD_DELETE, + target_from=STAGING_DN, target_to=PRODUCTION_DN) + _bind_normal(topology_m2) + + # + # add the 'add' right to the production DN + # Then do a successful moddn + # + ACI_ALLOW = "(version 3.0; acl \"ADD rights to allow moddn\"; allow (add)" + ACI_SUBJECT = " userdn = \"ldap:///%s\";)" % BIND_DN + ACI_BODY = ACI_ALLOW + ACI_SUBJECT + + _bind_manager(topology_m2) + prod.add('aci', ACI_BODY) + _write_aci_staging(topology_m2, mod_type=ldap.MOD_ADD) + _bind_normal(topology_m2) + + topology_m2.ms["master1"].log.info("Try to MODDN %s -> %s,%s" % (old_dn, new_rdn, new_superior)) + topology_m2.ms["master1"].rename_s(old_dn, new_rdn, newsuperior=new_superior) + + _bind_manager(topology_m2) + prod.remove('aci', ACI_BODY) + _write_aci_staging(topology_m2, mod_type=ldap.MOD_DELETE) + _bind_normal(topology_m2) + + ############# + # Now do tests with support of moddn aci + ############# + topology_m2.ms["master1"].log.info("Enable the moddn right") + _bind_manager(topology_m2) + topology_m2.ms["master1"].config.set(CONFIG_MODDN_ACI_ATTR, 'on') + + topology_m2.ms["master1"].log.info("\n\n######## MOVE staging -> Prod (10) ########\n") + + _bind_normal(topology_m2) + old_rdn = "uid=%s10" % NEW_ACCOUNT + old_dn = "%s,%s" % (old_rdn, STAGING_DN) + new_rdn = old_rdn + new_superior = PRODUCTION_DN + + # + # Try to rename without the appropriate ACI => INSUFFICIENT_ACCESS + # + try: + topology_m2.ms["master1"].log.info("Try to MODDN %s -> %s,%s" % (old_dn, new_rdn, new_superior)) + topology_m2.ms["master1"].rename_s(old_dn, new_rdn, newsuperior=new_superior) + assert 0 + except AssertionError: + topology_m2.ms["master1"].log.info( + "Exception (not really expected exception but that is fine as it fails to rename)") + except Exception as e: + topology_m2.ms["master1"].log.info("Exception (expected): %s" % type(e).__name__) + assert isinstance(e, ldap.INSUFFICIENT_ACCESS) + + # + # add the 'add' right to the production DN + # Then do a failing moddn + # + ACI_ALLOW = "(version 3.0; acl \"ADD rights to allow moddn\"; allow (add)" + ACI_SUBJECT = " userdn = \"ldap:///%s\";)" % BIND_DN + ACI_BODY = ACI_ALLOW + ACI_SUBJECT + + _bind_manager(topology_m2) + prod.add('aci', ACI_BODY) + _write_aci_staging(topology_m2, mod_type=ldap.MOD_ADD) + _bind_normal(topology_m2) + + try: + topology_m2.ms["master1"].log.info("Try to MODDN %s -> %s,%s" % (old_dn, new_rdn, new_superior)) + topology_m2.ms["master1"].rename_s(old_dn, new_rdn, newsuperior=new_superior) + assert 0 + except AssertionError: + topology_m2.ms["master1"].log.info( + "Exception (not really expected exception but that is fine as it fails to rename)") + except Exception as e: + topology_m2.ms["master1"].log.info("Exception (expected): %s" % type(e).__name__) + assert isinstance(e, ldap.INSUFFICIENT_ACCESS) + + _bind_manager(topology_m2) + prod.remove('aci', ACI_BODY) + _write_aci_staging(topology_m2, mod_type=ldap.MOD_DELETE) + _bind_normal(topology_m2) + + # Add the moddn aci that will be evaluated because of the config flag + topology_m2.ms["master1"].log.info("\n\n######## MOVE to and from equality filter ########\n") + _bind_manager(topology_m2) + _moddn_aci_staging_to_production(topology_m2, mod_type=ldap.MOD_ADD, + target_from=STAGING_DN, target_to=PRODUCTION_DN) + _bind_normal(topology_m2) + + topology_m2.ms["master1"].log.info("Try to MODDN %s -> %s,%s" % (old_dn, new_rdn, new_superior)) + topology_m2.ms["master1"].rename_s(old_dn, new_rdn, newsuperior=new_superior) + + # remove the moddn aci + _bind_manager(topology_m2) + _moddn_aci_staging_to_production(topology_m2, mod_type=ldap.MOD_DELETE, + target_from=STAGING_DN, target_to=PRODUCTION_DN) + _bind_normal(topology_m2) + + +def test_moddn_prod_staging(topology_m2, moddn_setup): + """This test checks that we can move ACCOUNT11 from staging to prod + but not move back ACCOUNT11 from prod to staging + + :id: 2b061e92-483f-4399-9f56-8d1c1898b043 + :setup: MMR with two masters, + M1 - staging DIT + M2 - production DIT + add test accounts in staging DIT + :steps: + 1. Try to rename without the appropriate ACI + 2. Try to MOD with the ACI from stage to production + 3. Try to move back the entry to staging from production + :expectedresults: + 1. It should fail due to INSUFFICIENT_ACCESS + 2. It should pass + 3. It should fail due to INSUFFICIENT_ACCESS + """ + + topology_m2.ms["master1"].log.info("\n\n######## MOVE staging -> Prod (11) ########\n") + + _bind_normal(topology_m2) + + old_rdn = "uid=%s11" % NEW_ACCOUNT + old_dn = "%s,%s" % (old_rdn, STAGING_DN) + new_rdn = old_rdn + new_superior = PRODUCTION_DN + + # + # Try to rename without the appropriate ACI => INSUFFICIENT_ACCESS + # + try: + topology_m2.ms["master1"].log.info("Try to MODDN %s -> %s,%s" % (old_dn, new_rdn, new_superior)) + topology_m2.ms["master1"].rename_s(old_dn, new_rdn, newsuperior=new_superior) + assert 0 + except AssertionError: + topology_m2.ms["master1"].log.info( + "Exception (not really expected exception but that is fine as it fails to rename)") + except Exception as e: + topology_m2.ms["master1"].log.info("Exception (expected): %s" % type(e).__name__) + assert isinstance(e, ldap.INSUFFICIENT_ACCESS) + + # successful MOD with the ACI + topology_m2.ms["master1"].log.info("\n\n######## MOVE to and from equality filter ########\n") + _bind_manager(topology_m2) + _moddn_aci_staging_to_production(topology_m2, mod_type=ldap.MOD_ADD, + target_from=STAGING_DN, target_to=PRODUCTION_DN) + _bind_normal(topology_m2) + + topology_m2.ms["master1"].log.info("Try to MODDN %s -> %s,%s" % (old_dn, new_rdn, new_superior)) + topology_m2.ms["master1"].rename_s(old_dn, new_rdn, newsuperior=new_superior) + + # Now check we can not move back the entry to staging + old_rdn = "uid=%s11" % NEW_ACCOUNT + old_dn = "%s,%s" % (old_rdn, PRODUCTION_DN) + new_rdn = old_rdn + new_superior = STAGING_DN + + # add the write right because we want to check the moddn + _bind_manager(topology_m2) + _write_aci_production(topology_m2, mod_type=ldap.MOD_ADD) + _bind_normal(topology_m2) + + try: + topology_m2.ms["master1"].log.info("Try to move back MODDN %s -> %s,%s" % (old_dn, new_rdn, new_superior)) + topology_m2.ms["master1"].rename_s(old_dn, new_rdn, newsuperior=new_superior) + assert 0 + except AssertionError: + topology_m2.ms["master1"].log.info( + "Exception (not really expected exception but that is fine as it fails to rename)") + except Exception as e: + topology_m2.ms["master1"].log.info("Exception (expected): %s" % type(e).__name__) + assert isinstance(e, ldap.INSUFFICIENT_ACCESS) + + _bind_manager(topology_m2) + _write_aci_production(topology_m2, mod_type=ldap.MOD_DELETE) + _bind_normal(topology_m2) + + # successful MOD with the both ACI + _bind_manager(topology_m2) + _moddn_aci_staging_to_production(topology_m2, mod_type=ldap.MOD_DELETE, + target_from=STAGING_DN, target_to=PRODUCTION_DN) + _bind_normal(topology_m2) + + +def test_check_repl_M2_to_M1(topology_m2, moddn_setup): + """Checks that replication is still working M2->M1, using ACCOUNT12 + + :id: 08ac131d-34b7-443f-aacd-23025bbd7de1 + :setup: MMR with two masters, + M1 - staging DIT + M2 - production DIT + add test accounts in staging DIT + :steps: + 1. Add an entry in M2 + 2. Search entry on M1 + :expectedresults: + 1. It should pass + 2. It should pass + """ + + topology_m2.ms["master1"].log.info("Bind as %s (M2)" % DN_DM) + topology_m2.ms["master2"].simple_bind_s(DN_DM, PASSWORD) + + rdn = "uid=%s12" % NEW_ACCOUNT + dn = "%s,%s" % (rdn, STAGING_DN) + new_account = UserAccount(topology_m2.ms["master2"], dn) + + # First wait for the ACCOUNT19 entry being replicated on M2 + loop = 0 + while loop <= 10: + try: + ent = topology_m2.ms["master2"].getEntry(dn, ldap.SCOPE_BASE, "(objectclass=*)") + break + except ldap.NO_SUCH_OBJECT: + time.sleep(1) + loop += 1 + assert loop <= 10 + + attribute = 'description' + tested_value = b'Hello world' + topology_m2.ms["master1"].log.info("Update (M2) %s (%s)" % (dn, attribute)) + new_account.add(attribute, tested_value) + + loop = 0 + while loop <= 10: + ent = topology_m2.ms["master1"].getEntry(dn, ldap.SCOPE_BASE, "(objectclass=*)") + assert ent is not None + if ent.hasAttr(attribute) and (ent.getValue(attribute) == tested_value): + break + + time.sleep(1) + loop += 1 + assert loop < 10 + topology_m2.ms["master1"].log.info("Update %s (%s) replicated on M1" % (dn, attribute)) + + +def test_moddn_staging_prod_except(topology_m2, moddn_setup): + """This test case MOVE entry NEW_ACCOUNT13 from staging to prod + but fails to move entry NEW_ACCOUNT14 from staging to prod_except + + :id: 02d34f4c-8574-428d-b43f-31227426392c + :setup: MMR with two masters, + M1 - staging DIT + M2 - production DIT + add test accounts in staging DIT + :steps: + 1. Try to move entry staging -> Prod + without the appropriate ACI + 2. Do MOD with the appropriate ACI + 3. Try to move an entry under Prod/Except from stage + 4. Try to do MOD with appropriate ACI + :expectedresults: + 1. It should fail due to INSUFFICIENT_ACCESS + 2. It should pass + 3. It should fail due to INSUFFICIENT_ACCESS + 4. It should pass + """ + + topology_m2.ms["master1"].log.info("\n\n######## MOVE staging -> Prod (13) ########\n") + _bind_normal(topology_m2) + + old_rdn = "uid=%s13" % NEW_ACCOUNT + old_dn = "%s,%s" % (old_rdn, STAGING_DN) + new_rdn = old_rdn + new_superior = PRODUCTION_DN + + # + # Try to rename without the appropriate ACI => INSUFFICIENT_ACCESS + # + try: + topology_m2.ms["master1"].log.info("Try to MODDN %s -> %s,%s" % (old_dn, new_rdn, new_superior)) + topology_m2.ms["master1"].rename_s(old_dn, new_rdn, newsuperior=new_superior) + assert 0 + except AssertionError: + topology_m2.ms["master1"].log.info( + "Exception (not really expected exception but that is fine as it fails to rename)") + except Exception as e: + topology_m2.ms["master1"].log.info("Exception (expected): %s" % type(e).__name__) + assert isinstance(e, ldap.INSUFFICIENT_ACCESS) + + # successful MOD with the ACI + topology_m2.ms["master1"].log.info("\n\n######## MOVE to and from equality filter ########\n") + _bind_manager(topology_m2) + _moddn_aci_staging_to_production(topology_m2, mod_type=ldap.MOD_ADD, + target_from=STAGING_DN, target_to=PRODUCTION_DN) + _moddn_aci_deny_tree(topology_m2, mod_type=ldap.MOD_ADD) + _bind_normal(topology_m2) + + topology_m2.ms["master1"].log.info("Try to MODDN %s -> %s,%s" % (old_dn, new_rdn, new_superior)) + topology_m2.ms["master1"].rename_s(old_dn, new_rdn, newsuperior=new_superior) + + # + # Now try to move an entry under except + # + topology_m2.ms["master1"].log.info("\n\n######## MOVE staging -> Prod/Except (14) ########\n") + old_rdn = "uid=%s14" % NEW_ACCOUNT + old_dn = "%s,%s" % (old_rdn, STAGING_DN) + new_rdn = old_rdn + new_superior = PROD_EXCEPT_DN + try: + topology_m2.ms["master1"].log.info("Try to MODDN %s -> %s,%s" % (old_dn, new_rdn, new_superior)) + topology_m2.ms["master1"].rename_s(old_dn, new_rdn, newsuperior=new_superior) + assert 0 + except AssertionError: + topology_m2.ms["master1"].log.info( + "Exception (not really expected exception but that is fine as it fails to rename)") + except Exception as e: + topology_m2.ms["master1"].log.info("Exception (expected): %s" % type(e).__name__) + assert isinstance(e, ldap.INSUFFICIENT_ACCESS) + + # successful MOD with the both ACI + _bind_manager(topology_m2) + _moddn_aci_staging_to_production(topology_m2, mod_type=ldap.MOD_DELETE, + target_from=STAGING_DN, target_to=PRODUCTION_DN) + _moddn_aci_deny_tree(topology_m2, mod_type=ldap.MOD_DELETE) + _bind_normal(topology_m2) + + +def test_mode_default_ger_no_moddn(topology_m2, moddn_setup): + """mode moddn_aci : Check Get Effective Rights Controls for entries + + :id: f4785d73-3b14-49c0-b981-d6ff96fa3496 + :setup: MMR with two masters, + M1 - staging DIT + M2 - production DIT + add test accounts in staging DIT + :steps: + 1. Search for GER controls on M1 + 2. Check 'n' is not in the entryLevelRights + :expectedresults: + 1. It should pass + 2. It should pass + """ + + topology_m2.ms["master1"].log.info("\n\n######## mode moddn_aci : GER no moddn ########\n") + request_ctrl = GetEffectiveRightsControl(criticality=True, + authzId=ensure_bytes("dn: " + BIND_DN)) + msg_id = topology_m2.ms["master1"].search_ext(PRODUCTION_DN, + ldap.SCOPE_SUBTREE, + "objectclass=*", + serverctrls=[request_ctrl]) + rtype, rdata, rmsgid, response_ctrl = topology_m2.ms["master1"].result3(msg_id) + # ger={} + value = '' + for dn, attrs in rdata: + topology_m2.ms["master1"].log.info("dn: %s" % dn) + value = attrs['entryLevelRights'][0] + + topology_m2.ms["master1"].log.info("######## entryLevelRights: %r" % value) + assert b'n' not in value + + +def test_mode_default_ger_with_moddn(topology_m2, moddn_setup): + """This test case adds the moddn aci and check ger contains 'n' + + :id: a752a461-432d-483a-89c0-dfb34045a969 + :setup: MMR with two masters, + M1 - staging DIT + M2 - production DIT + add test accounts in staging DIT + :steps: + 1. Add moddn ACI on M2 + 2. Search for GER controls on M1 + 3. Check entryLevelRights value for entries + 4. Check 'n' is in the entryLevelRights + :expectedresults: + 1. It should pass + 2. It should pass + 3. It should pass + 4. It should pass + """ + + topology_m2.ms["master1"].log.info("\n\n######## mode moddn_aci: GER with moddn ########\n") + + # successful MOD with the ACI + _bind_manager(topology_m2) + _moddn_aci_staging_to_production(topology_m2, mod_type=ldap.MOD_ADD, + target_from=STAGING_DN, target_to=PRODUCTION_DN) + _bind_normal(topology_m2) + + request_ctrl = GetEffectiveRightsControl(criticality=True, + authzId=ensure_bytes("dn: " + BIND_DN)) + msg_id = topology_m2.ms["master1"].search_ext(PRODUCTION_DN, + ldap.SCOPE_SUBTREE, + "objectclass=*", + serverctrls=[request_ctrl]) + rtype, rdata, rmsgid, response_ctrl = topology_m2.ms["master1"].result3(msg_id) + # ger={} + value = '' + for dn, attrs in rdata: + topology_m2.ms["master1"].log.info("dn: %s" % dn) + value = attrs['entryLevelRights'][0] + + topology_m2.ms["master1"].log.info("######## entryLevelRights: %r" % value) + assert b'n' in value + + # successful MOD with the both ACI + _bind_manager(topology_m2) + _moddn_aci_staging_to_production(topology_m2, mod_type=ldap.MOD_DELETE, + target_from=STAGING_DN, target_to=PRODUCTION_DN) + _bind_normal(topology_m2) + + +def test_mode_legacy_ger_no_moddn1(topology_m2, moddn_setup): + """This test checks mode legacy : GER no moddn + + :id: e783e05b-d0d0-4fd4-9572-258a81b7bd24 + :setup: MMR with two masters, + M1 - staging DIT + M2 - production DIT + add test accounts in staging DIT + :steps: + 1. Disable ACI checks - set nsslapd-moddn-aci: off + 2. Search for GER controls on M1 + 3. Check entryLevelRights value for entries + 4. Check 'n' is not in the entryLevelRights + :expectedresults: + 1. It should pass + 2. It should pass + 3. It should pass + 4. It should pass + """ + + topology_m2.ms["master1"].log.info("\n\n######## Disable the moddn aci mod ########\n") + _bind_manager(topology_m2) + topology_m2.ms["master1"].config.set(CONFIG_MODDN_ACI_ATTR, 'off') + + topology_m2.ms["master1"].log.info("\n\n######## mode legacy 1: GER no moddn ########\n") + request_ctrl = GetEffectiveRightsControl(criticality=True, authzId=ensure_bytes("dn: " + BIND_DN)) + msg_id = topology_m2.ms["master1"].search_ext(PRODUCTION_DN, + ldap.SCOPE_SUBTREE, + "objectclass=*", + serverctrls=[request_ctrl]) + rtype, rdata, rmsgid, response_ctrl = topology_m2.ms["master1"].result3(msg_id) + # ger={} + value = '' + for dn, attrs in rdata: + topology_m2.ms["master1"].log.info("dn: %s" % dn) + value = attrs['entryLevelRights'][0] + + topology_m2.ms["master1"].log.info("######## entryLevelRights: %r" % value) + assert b'n' not in value + + +def test_mode_legacy_ger_no_moddn2(topology_m2, moddn_setup): + """This test checks mode legacy : GER no moddn + + :id: af87e024-1744-4f1d-a2d3-ea2687e2351d + :setup: MMR with two masters, + M1 - staging DIT + M2 - production DIT + add test accounts in staging DIT + :steps: + 1. Disable ACI checks - set nsslapd-moddn-aci: off + 2. Add moddn ACI on M1 + 3. Search for GER controls on M1 + 4. Check entryLevelRights value for entries + 5. Check 'n' is not in the entryLevelRights + :expectedresults: + 1. It should pass + 2. It should pass + 3. It should pass + 4. It should be pass + 5. It should pass + """ + + topology_m2.ms["master1"].log.info("\n\n######## Disable the moddn aci mod ########\n") + _bind_manager(topology_m2) + topology_m2.ms["master1"].config.set(CONFIG_MODDN_ACI_ATTR, 'off') + + topology_m2.ms["master1"].log.info("\n\n######## mode legacy 2: GER no moddn ########\n") + # successful MOD with the ACI + _bind_manager(topology_m2) + _moddn_aci_staging_to_production(topology_m2, mod_type=ldap.MOD_ADD, + target_from=STAGING_DN, target_to=PRODUCTION_DN) + _bind_normal(topology_m2) + + request_ctrl = GetEffectiveRightsControl(criticality=True, + authzId=ensure_bytes("dn: " + BIND_DN)) + msg_id = topology_m2.ms["master1"].search_ext(PRODUCTION_DN, + ldap.SCOPE_SUBTREE, + "objectclass=*", + serverctrls=[request_ctrl]) + rtype, rdata, rmsgid, response_ctrl = topology_m2.ms["master1"].result3(msg_id) + # ger={} + value = '' + for dn, attrs in rdata: + topology_m2.ms["master1"].log.info("dn: %s" % dn) + value = attrs['entryLevelRights'][0] + + topology_m2.ms["master1"].log.info("######## entryLevelRights: %r" % value) + assert b'n' not in value + + # successful MOD with the both ACI + _bind_manager(topology_m2) + _moddn_aci_staging_to_production(topology_m2, mod_type=ldap.MOD_DELETE, + target_from=STAGING_DN, target_to=PRODUCTION_DN) + _bind_normal(topology_m2) + + +def test_mode_legacy_ger_with_moddn(topology_m2, moddn_setup): + """This test checks mode legacy : GER with moddn + + :id: 37c1e537-1b5d-4fab-b62a-50cd8c5b3493 + :setup: MMR with two masters, + M1 - staging DIT + M2 - production DIT + add test accounts in staging DIT + :steps: + 1. Disable ACI checks - set nsslapd-moddn-aci: off + 2. Add moddn ACI on M1 + 3. Search for GER controls on M1 + 4. Check entryLevelRights value for entries + 5. Check 'n' is in the entryLevelRights + 6. Try MOD with the both ACI + :expectedresults: + 1. It should pass + 2. It should pass + 3. It should pass + 4. It should pass + 5. It should pass + 6. It should pass + """ + + suffix = Domain(topology_m2.ms["master1"], SUFFIX) + + topology_m2.ms["master1"].log.info("\n\n######## Disable the moddn aci mod ########\n") + _bind_manager(topology_m2) + topology_m2.ms["master1"].config.set(CONFIG_MODDN_ACI_ATTR, 'off') + + topology_m2.ms["master1"].log.info("\n\n######## mode legacy : GER with moddn ########\n") + + # being allowed to read/write the RDN attribute use to allow the RDN + ACI_TARGET = "(target = \"ldap:///%s\")(targetattr=\"uid\")" % (PRODUCTION_DN) + ACI_ALLOW = "(version 3.0; acl \"MODDN production changing the RDN attribute\"; allow (read,search,write)" + ACI_SUBJECT = " userdn = \"ldap:///%s\";)" % BIND_DN + ACI_BODY = ACI_TARGET + ACI_ALLOW + ACI_SUBJECT + + # successful MOD with the ACI + _bind_manager(topology_m2) + suffix.add('aci', ACI_BODY) + _bind_normal(topology_m2) + + request_ctrl = GetEffectiveRightsControl(criticality=True, authzId=ensure_bytes("dn: " + BIND_DN)) + msg_id = topology_m2.ms["master1"].search_ext(PRODUCTION_DN, + ldap.SCOPE_SUBTREE, + "objectclass=*", + serverctrls=[request_ctrl]) + rtype, rdata, rmsgid, response_ctrl = topology_m2.ms["master1"].result3(msg_id) + # ger={} + value = '' + for dn, attrs in rdata: + topology_m2.ms["master1"].log.info("dn: %s" % dn) + value = attrs['entryLevelRights'][0] + + topology_m2.ms["master1"].log.info("######## entryLevelRights: %r" % value) + assert b'n' in value + + # successful MOD with the both ACI + _bind_manager(topology_m2) + suffix.remove('aci', ACI_BODY) + # _bind_normal(topology_m2) + + +@pytest.fixture(scope="module") +def rdn_write_setup(topology_m2): + topology_m2.ms["master1"].log.info("\n\n######## Add entry tuser ########\n") + topology_m2.ms["master1"].add_s(Entry((SRC_ENTRY_DN, { + 'objectclass': "top person".split(), + 'sn': SRC_ENTRY_CN, + 'cn': SRC_ENTRY_CN}))) + + +def test_rdn_write_get_ger(topology_m2, rdn_write_setup): + """This test checks GER rights for anonymous + + :id: d5d85f87-b53d-4f50-8fa6-a9e55c75419b + :setup: MMR with two masters, + Add entry tuser + :steps: + 1. Search for GER controls on M1 + 2. Check entryLevelRights value for entries + 3. Check 'n' is not in the entryLevelRights + :expectedresults: + 1. It should pass + 2. It should be pass + 3. It should pass + """ + + ANONYMOUS_DN = "" + topology_m2.ms["master1"].log.info("\n\n######## GER rights for anonymous ########\n") + request_ctrl = GetEffectiveRightsControl(criticality=True, + authzId=ensure_bytes("dn:" + ANONYMOUS_DN)) + msg_id = topology_m2.ms["master1"].search_ext(SUFFIX, + ldap.SCOPE_SUBTREE, + "objectclass=*", + serverctrls=[request_ctrl]) + rtype, rdata, rmsgid, response_ctrl = topology_m2.ms["master1"].result3(msg_id) + value = '' + for dn, attrs in rdata: + topology_m2.ms["master1"].log.info("dn: %s" % dn) + for value in attrs['entryLevelRights']: + topology_m2.ms["master1"].log.info("######## entryLevelRights: %r" % value) + assert b'n' not in value + + +def test_rdn_write_modrdn_anonymous(topology_m2, rdn_write_setup): + """Tests anonymous user for modrdn + + :id: fc07be23-3341-44ab-a53c-c68c5f9569c7 + :setup: MMR with two masters, + Add entry tuser + :steps: + 1. Bind as anonymous user + 2. Try to perform MODRDN operation (SRC_ENTRY_DN -> DST_ENTRY_CN) + 3. Try to search DST_ENTRY_CN + :expectedresults: + 1. It should pass + 2. It should fails with INSUFFICIENT_ACCESS + 3. It should fails with NO_SUCH_OBJECT + """ + + ANONYMOUS_DN = "" + topology_m2.ms["master1"].close() + topology_m2.ms["master1"].binddn = ANONYMOUS_DN + topology_m2.ms["master1"].open() + msg_id = topology_m2.ms["master1"].search_ext("", ldap.SCOPE_BASE, "objectclass=*") + rtype, rdata, rmsgid, response_ctrl = topology_m2.ms["master1"].result3(msg_id) + for dn, attrs in rdata: + topology_m2.ms["master1"].log.info("dn: %s" % dn) + for attr in attrs: + topology_m2.ms["master1"].log.info("######## %r: %r" % (attr, attrs[attr])) + + try: + topology_m2.ms["master1"].rename_s(SRC_ENTRY_DN, "cn=%s" % DST_ENTRY_CN, delold=True) + except Exception as e: + topology_m2.ms["master1"].log.info("Exception (expected): %s" % type(e).__name__) + isinstance(e, ldap.INSUFFICIENT_ACCESS) + + try: + topology_m2.ms["master1"].getEntry(DST_ENTRY_DN, ldap.SCOPE_BASE, "objectclass=*") + assert False + except Exception as e: + topology_m2.ms["master1"].log.info("The entry was not renamed (expected)") + isinstance(e, ldap.NO_SUCH_OBJECT) + + _bind_manager(topology_m2) + + +if __name__ == '__main__': + # Run isolated + # -s for DEBUG mode + CURRENT_FILE = os.path.realpath(__file__) + pytest.main("-s %s" % CURRENT_FILE) diff --git a/dirsrvtests/tests/suites/acl/conftest.py b/dirsrvtests/tests/suites/acl/conftest.py new file mode 100644 index 0000000..b0a7241 --- /dev/null +++ b/dirsrvtests/tests/suites/acl/conftest.py @@ -0,0 +1,125 @@ +# --- BEGIN COPYRIGHT BLOCK --- +# Copyright (C) 2019 Red Hat, Inc. +# All rights reserved. +# +# License: GPL (version 3 or any later version). +# See LICENSE for details. +# --- END COPYRIGHT BLOCK ---- + +""" +This is the config file for keywords test scripts. + +""" + +import pytest + +from lib389._constants import DEFAULT_SUFFIX, PW_DM +from lib389.idm.user import UserAccounts +from lib389.idm.organizationalunit import OrganizationalUnit, OrganizationalUnits +from lib389.topologies import topology_st as topo +from lib389.idm.domain import Domain + + +@pytest.fixture(scope="function") +def aci_of_user(request, topo): + """ + Removes and Restores ACIs after the test. + """ + aci_list = Domain(topo.standalone, DEFAULT_SUFFIX).get_attr_vals_utf8('aci') + + def finofaci(): + """ + Removes and Restores ACIs after the test. + """ + domain = Domain(topo.standalone, DEFAULT_SUFFIX) + domain.remove_all('aci') + for aci in aci_list: + domain.add("aci", aci) + + request.addfinalizer(finofaci) + + +@pytest.fixture(scope="module") +def add_user(request, topo): + """ + This function will create user for the test and in the end entries will be deleted . + """ + ous_origin = OrganizationalUnits(topo.standalone, DEFAULT_SUFFIX) + ou_origin = ous_origin.create(properties={'ou': 'Keywords'}) + + ous_next = OrganizationalUnits(topo.standalone, ou_origin.dn) + for ou in ['Authmethod', 'Dayofweek', 'DNS', 'IP', 'Timeofday']: + ous_next.create(properties={'ou': ou}) + + users_day_of_week = UserAccounts(topo.standalone, f"ou=Dayofweek,ou=Keywords,{DEFAULT_SUFFIX}", rdn=None) + for user in ['EVERYDAY_KEY', 'TODAY_KEY', 'NODAY_KEY']: + users_day_of_week.create(properties={ + 'uid': user, + 'cn': user, + 'sn': 'user', + 'uidNumber': '1000', + 'gidNumber': '2000', + 'homeDirectory': '/home/' + user, + 'userPassword': PW_DM + }) + + users_ip = UserAccounts(topo.standalone, f"ou=IP,ou=Keywords,{DEFAULT_SUFFIX}", rdn=None) + for user in ['FULLIP_KEY', 'NETSCAPEIP_KEY', 'NOIP_KEY']: + users_ip.create(properties={ + 'uid': user, + 'cn': user, + 'sn': 'user', + 'uidNumber': '1000', + 'gidNumber': '2000', + 'homeDirectory': '/home/' + user, + 'userPassword': PW_DM + }) + + users_timeof_day = UserAccounts(topo.standalone, f"ou=Timeofday,ou=Keywords,{DEFAULT_SUFFIX}", rdn=None) + for user in ['FULLWORKER_KEY', 'DAYWORKER_KEY', 'NOWORKER_KEY', 'NIGHTWORKER_KEY']: + users_timeof_day.create(properties={ + 'uid': user, + 'cn': user, + 'sn': 'user', + 'uidNumber': '1000', + 'gidNumber': '2000', + 'homeDirectory': '/home/' + user, + 'userPassword': PW_DM + }) + + users_authmethod = UserAccounts(topo.standalone, f"ou=Authmethod,ou=Keywords,{DEFAULT_SUFFIX}", rdn=None) + for user in ['NONE_1_KEY', 'NONE_2_KEY', 'SIMPLE_1_KEY']: + users_authmethod.create(properties={ + 'uid': user, + 'cn': user, + 'sn': 'user', + 'uidNumber': '1000', + 'gidNumber': '2000', + 'homeDirectory': '/home/' + user, + 'userPassword': PW_DM + }) + + users_dns = UserAccounts(topo.standalone, f"ou=DNS,ou=Keywords,{DEFAULT_SUFFIX}", rdn=None) + for user in ['FULLDNS_KEY', 'SUNDNS_KEY', 'NODNS_KEY', 'NETSCAPEDNS_KEY']: + users_dns.create(properties={ + 'uid': user, + 'cn': user, + 'sn': 'user', + 'uidNumber': '1000', + 'gidNumber': '2000', + 'homeDirectory': '/home/' + user, + 'userPassword': PW_DM + }) + + def fin(): + """ + Deletes entries after the test. + """ + for user in users_day_of_week.list() + users_ip.list() + users_timeof_day.list() + \ + users_authmethod.list() + users_dns.list(): + user.delete() + + for ou in sorted(ous_next.list(), key=lambda x: len(x.dn), reverse=True): + ou.delete() + + request.addfinalizer(fin) diff --git a/dirsrvtests/tests/suites/acl/default_aci_allows_self_write.py b/dirsrvtests/tests/suites/acl/default_aci_allows_self_write.py new file mode 100644 index 0000000..5700abf --- /dev/null +++ b/dirsrvtests/tests/suites/acl/default_aci_allows_self_write.py @@ -0,0 +1,133 @@ +# --- BEGIN COPYRIGHT BLOCK --- +# Copyright (C) 2019 William Brown +# All rights reserved. +# +# License: GPL (version 3 or any later version). +# See LICENSE for details. +# --- END COPYRIGHT BLOCK --- + + +import pytest +from lib389.idm.user import nsUserAccounts, UserAccounts +from lib389.topologies import topology_st as topology +from lib389.paths import Paths +from lib389.utils import ds_is_older +from lib389._constants import * + +default_paths = Paths() + +pytestmark = pytest.mark.tier1 + +USER_PASSWORD = "some test password" +NEW_USER_PASSWORD = "some new password" + +@pytest.mark.skipif(default_paths.perl_enabled or ds_is_older('1.4.2.0'), reason="Default aci's in older versions do not support this functionality") +def test_acl_default_allow_self_write_nsuser(topology): + """ + Testing nsusers can self write and self read. This it a sanity test + so that our default entries have their aci's checked. + + :id: 4f0fb01a-36a6-430c-a2ee-ebeb036bd951 + + :setup: Standalone instance + + :steps: + 1. Testing comparison of two different users. + + :expectedresults: + 1. Should fail to compare + """ + topology.standalone.enable_tls() + nsusers = nsUserAccounts(topology.standalone, DEFAULT_SUFFIX) + # Create a user as dm. + user = nsusers.create(properties={ + 'uid': 'test_nsuser', + 'cn': 'test_nsuser', + 'displayName': 'testNsuser', + 'legalName': 'testNsuser', + 'uidNumber': '1001', + 'gidNumber': '1001', + 'homeDirectory': '/home/testnsuser', + 'userPassword': USER_PASSWORD, + }) + # Create a new con and bind as the user. + user_conn = user.bind(USER_PASSWORD) + + user_nsusers = nsUserAccounts(user_conn, DEFAULT_SUFFIX) + self_ent = user_nsusers.get(dn=user.dn) + + # Can we self read x,y,z + check = self_ent.get_attrs_vals_utf8([ + 'uid', + 'cn', + 'displayName', + 'legalName', + 'uidNumber', + 'gidNumber', + 'homeDirectory', + ]) + for k in check.values(): + # Could we read the values? + assert(isinstance(k, list)) + assert(len(k) > 0) + # Can we self change a,b,c + self_ent.ensure_attr_state({ + 'legalName': ['testNsuser_update'], + 'displayName': ['testNsuser_update'], + 'nsSshPublicKey': ['testkey'], + }) + # self change pw + self_ent.change_password(USER_PASSWORD, NEW_USER_PASSWORD) + + +@pytest.mark.skipif(default_paths.perl_enabled or ds_is_older('1.4.2.0'), reason="Default aci's in older versions do not support this functionality") +def test_acl_default_allow_self_write_user(topology): + """ + Testing users can self write and self read. This it a sanity test + so that our default entries have their aci's checked. + + :id: 4c52321b-f473-4c32-a1d5-489b138cd199 + + :setup: Standalone instance + + :steps: + 1. Testing comparison of two different users. + + :expectedresults: + 1. Should fail to compare + """ + topology.standalone.enable_tls() + users = UserAccounts(topology.standalone, DEFAULT_SUFFIX) + # Create a user as dm. + user = users.create(properties={ + 'uid': 'test_user', + 'cn': 'test_user', + 'sn': 'User', + 'uidNumber': '1002', + 'gidNumber': '1002', + 'homeDirectory': '/home/testuser', + 'userPassword': USER_PASSWORD, + }) + # Create a new con and bind as the user. + user_conn = user.bind(USER_PASSWORD) + + user_users = UserAccounts(user_conn, DEFAULT_SUFFIX) + self_ent = user_users.get(dn=user.dn) + # Can we self read x,y,z + check = self_ent.get_attrs_vals_utf8([ + 'uid', + 'cn', + 'sn', + 'uidNumber', + 'gidNumber', + 'homeDirectory', + ]) + for (a, k) in check.items(): + print(a) + # Could we read the values? + assert(isinstance(k, list)) + assert(len(k) > 0) + # Self change pw + self_ent.change_password(USER_PASSWORD, NEW_USER_PASSWORD) + + diff --git a/dirsrvtests/tests/suites/acl/deladd_test.py b/dirsrvtests/tests/suites/acl/deladd_test.py new file mode 100644 index 0000000..45a66be --- /dev/null +++ b/dirsrvtests/tests/suites/acl/deladd_test.py @@ -0,0 +1,456 @@ +# --- BEGIN COPYRIGHT BLOCK --- +# Copyright (C) 2019 Red Hat, Inc. +# All rights reserved. +# +# License: GPL (version 3 or any later version). +# See LICENSE for details. +# --- END COPYRIGHT BLOCK --- + +""" +Importing necessary Modules. +""" + +import os +import pytest + +from lib389._constants import DEFAULT_SUFFIX, PW_DM +from lib389.idm.user import UserAccount, UserAccounts +from lib389.idm.group import Groups +from lib389.idm.organizationalunit import OrganizationalUnit, OrganizationalUnits +from lib389.topologies import topology_st as topo +from lib389.idm.domain import Domain + +import ldap + +pytestmark = pytest.mark.tier1 + + +USER_WITH_ACI_DELADD = 'uid=test_user_1000,ou=People,dc=example,dc=com' +USER_DELADD = 'uid=test_user_1,ou=Accounting,dc=example,dc=com' + + +@pytest.fixture(scope="function") +def _aci_of_user(request, topo): + """ + Removes and Restores ACIs after the test. + """ + aci_list = Domain(topo.standalone, DEFAULT_SUFFIX).get_attr_vals('aci') + + def finofaci(): + """ + Removes and Restores ACIs after the test. + """ + domain = Domain(topo.standalone, DEFAULT_SUFFIX) + domain.remove_all('aci') + for i in aci_list: + domain.add("aci", i) + + request.addfinalizer(finofaci) + + +@pytest.fixture(scope="function") +def _add_user(request, topo): + """ + This function will create user for the test and in the end entries will be deleted . + """ + + users = UserAccounts(topo.standalone, DEFAULT_SUFFIX) + user = users.create_test_user() + user.set("userPassword", PW_DM) + + ous = OrganizationalUnits(topo.standalone, DEFAULT_SUFFIX) + ous.create(properties={'ou':'Accounting'}) + + users = UserAccounts(topo.standalone, DEFAULT_SUFFIX, rdn='ou=Accounting') + for i in range(1, 3): + user = users.create_test_user(uid=i, gid=i) + user.set("userPassword", PW_DM) + + def fin(): + """ + Deletes entries after the test. + """ + users1 = UserAccounts(topo.standalone, DEFAULT_SUFFIX, rdn=None) + for dn_dn in users1.list(): + dn_dn.delete() + + groups = Groups(topo.standalone, DEFAULT_SUFFIX) + for dn_dn in groups.list(): + dn_dn.delete() + + ou_ou = OrganizationalUnit(topo.standalone, f'ou=Accounting,{DEFAULT_SUFFIX}') + ou_ou.delete() + + request.addfinalizer(fin) + + +def test_allow_delete_access_to_groupdn(topo, _add_user, _aci_of_user): + + """ + Test allow delete access to groupdn + :id: 7cf15992-68ad-11e8-85af-54e1ad30572c + :setup: topo.standalone + :steps: + 1. Add test entry + 2. Add ACI that allows groupdn to delete + 3. Delete something using test USER_DELADD + 4. Remove ACI + :expectedresults: + 1. Entry should be added + 2. ACI should be added + 3. Delete operation should succeed + 4. Delete operation for ACI should succeed + """ + # Create Group and add member + groups = Groups(topo.standalone, DEFAULT_SUFFIX) + group = groups.create(properties={"cn": "group1", + "description": "testgroup"}) + group.add_member(USER_WITH_ACI_DELADD) + + # set aci + aci_target = f'(targetattr="*")' + aci_allow = f'(version 3.0; acl "All rights for {group.dn}"; allow (delete) ' + aci_subject = f'groupdn="ldap:///{group.dn}";)' + + Domain(topo.standalone, DEFAULT_SUFFIX).add("aci", (aci_target + aci_allow + aci_subject)) + + # create connection with USER_WITH_ACI_DELADD + conn = UserAccount(topo.standalone, USER_WITH_ACI_DELADD).bind(PW_DM) + + # Perform delete operation + for i in [USER_DELADD, USER_WITH_ACI_DELADD]: + UserAccount(conn, i).delete() + + +def test_allow_add_access_to_anyone(topo, _add_user, _aci_of_user): + + """ + Test to allow add access to anyone + :id: 5ca31cc4-68e0-11e8-8666-8c16451d917b + :setup: topo.standalone + :steps: + 1. Add test entry + 2. Add ACI that allows groupdn to add + 3. Add something using test USER_DELADD + 4. Remove ACI + :expectedresults: + 1. Entry should be added + 2. ACI should be added + 3. Add operation should succeed + 4. Delete operation for ACI should succeed + """ + # set aci + aci_target = f'(targetattr="*")' + aci_allow = f'(version 3.0; acl "All rights for anyone"; allow (add) ' + aci_subject = f'userdn="ldap:///anyone";)' + Domain(topo.standalone, DEFAULT_SUFFIX).add("aci", (aci_target + aci_allow + aci_subject)) + + # create connection with USER_WITH_ACI_DELADD + conn = UserAccount(topo.standalone, USER_WITH_ACI_DELADD).bind(PW_DM) + + # Perform add operation + users = UserAccounts(conn, DEFAULT_SUFFIX, rdn='ou=Accounting') + user = users.create_test_user(gid=3, uid=3) + assert user.exists() + + users = UserAccounts(conn, DEFAULT_SUFFIX) + user = users.create_test_user(gid=3, uid=3) + assert user.exists() + + +def test_allow_delete_access_to_anyone(topo, _add_user, _aci_of_user): + + """ + Test to allow delete access to anyone + :id: f5447c7e-68e1-11e8-84c4-8c16451d917b + :setup: server + :steps: + 1. Add test entry + 2. Add ACI that allows groupdn to delete some userdn + 3. Delete something using test USER_DELADD + 4. Remove ACI + :expectedresults: + 1. Entry should be added + 2. ACI should be added + 3. Operation should succeed + 4. Delete operation for ACI should succeed + """ + # set aci + aci_target = f'(targetattr="*")' + aci_allow = f'(version 3.0; acl "All rights for anyone"; allow (delete) ' + aci_subject = f'userdn="ldap:///anyone";)' + + Domain(topo.standalone, DEFAULT_SUFFIX).add("aci", (aci_target + aci_allow + aci_subject)) + + # create connection with USER_WITH_ACI_DELADD + conn = UserAccount(topo.standalone, USER_WITH_ACI_DELADD).bind(PW_DM) + + # Perform delete operation + UserAccount(conn, USER_DELADD).delete() + + +def test_allow_delete_access_not_to_userdn(topo, _add_user, _aci_of_user): + + """ + Test to Allow delete access to != userdn + :id: 00637f6e-68e3-11e8-92a3-8c16451d917b + :setup: server + :steps: + 1. Add test entry + 2. Add ACI that allows userdn not to delete some userdn + 3. Delete something using test USER_DELADD + 4. Remove ACI + :expectedresults: + 1. Entry should be added + 2. ACI should be added + 3. Operation should not succeed + 4. Delete operation for ACI should succeed + """ + # set aci + aci_target = f'(targetattr="*")' + aci_allow = f'(version 3.0; acl "All rights for %s"; allow (delete) ' % USER_DELADD + aci_subject = f'userdn!="ldap:///{USER_WITH_ACI_DELADD}";)' + + Domain(topo.standalone, DEFAULT_SUFFIX).add("aci", (aci_target + aci_allow + aci_subject)) + + # create connection with USER_WITH_ACI_DELADD + conn = UserAccount(topo.standalone, USER_WITH_ACI_DELADD).bind(PW_DM) + + # Perform delete operation + user = UserAccount(conn, USER_DELADD) + with pytest.raises(ldap.INSUFFICIENT_ACCESS): + user.delete() + + +def test_allow_delete_access_not_to_group(topo, _add_user, _aci_of_user): + + """ + Test to Allow delete access to != groupdn + :id: f58fc8b0-68e5-11e8-9313-8c16451d917b + :setup: server + :steps: + 1. Add test entry + 2. Add ACI that allows groupdn not to delete some userdn + 3. Delete something using test USER_DELADD belong to test group + 4. Remove ACI + :expectedresults: + 1. Entry should be added + 2. ACI should be added + 3. Operation should not succeed + 4. Delete operation for ACI should succeed + """ + # Create group + groups = Groups(topo.standalone, DEFAULT_SUFFIX) + group = groups.create(properties={"cn": "group1", + "description": "testgroup"}) + group.add_member(USER_WITH_ACI_DELADD) + + # set aci + aci_target = f'(targetattr="*")' + aci_allow = f'(version 3.0; acl "All rights for {group.dn}"; allow (delete)' + aci_subject = f'groupdn!="ldap:///{group.dn}";)' + + Domain(topo.standalone, DEFAULT_SUFFIX).add("aci", (aci_target + aci_allow + aci_subject)) + + # create connection with USER_WITH_ACI_DELADD + conn = UserAccount(topo.standalone, USER_WITH_ACI_DELADD).bind(PW_DM) + user = UserAccount(conn, USER_DELADD) + + # Perform delete operation + with pytest.raises(ldap.INSUFFICIENT_ACCESS): + user.delete() + + +def test_allow_add_access_to_parent(topo, _add_user, _aci_of_user): + + """ + Test to Allow add privilege to parent + :id: 9f099845-9dbc-412f-bdb9-19a5ea729694 + :setup: server + :steps: + 1. Add test entry + 2. Add ACI that Allow add privilege to parent + 3. Add something using test USER_DELADD + 4. Remove ACI + :expectedresults: + 1. Entry should be added + 2. ACI should be added + 3. Operation should succeed + 4. Delete operation for ACI should succeed + """ + # set aci + aci_target = f'(targetattr="*")' + aci_allow = f'(version 3.0; acl "All rights for parent"; allow (add) ' + aci_subject = f'userdn="ldap:///parent";)' + + Domain(topo.standalone, DEFAULT_SUFFIX).add("aci", (aci_target + aci_allow + aci_subject)) + + # create connection with USER_WITH_ACI_DELADD + conn = UserAccount(topo.standalone, USER_WITH_ACI_DELADD).bind(PW_DM) + + # Perform Allow add privilege to parent + users = UserAccounts(conn, DEFAULT_SUFFIX, rdn='uid=test_user_1000, ou=people') + user = users.create_test_user(gid=1, uid=1) + assert user.exists() + + # Delete created user + UserAccounts(topo.standalone, DEFAULT_SUFFIX).get('test_user_1').delete() + + +def test_allow_delete_access_to_parent(topo, _add_user, _aci_of_user): + + """ + Test to Allow delete access to parent + :id: 2dd7f624-68e7-11e8-8591-8c16451d917b + :setup: server + :steps: + 1. Add test entry + 2. Add ACI that Allow delete privilege to parent + 3. Delete something using test USER_DELADD + 4. Remove ACI + :expectedresults: + 1. Entry should be added + 2. ACI should be added + 3. Operation should succeed + 4. Delete operation for ACI should succeed + """ + # set aci + aci_target = f'(targetattr="*")' + aci_allow = f'(version 3.0; acl "All rights for parent"; allow (add,delete) ' + aci_subject = f'userdn="ldap:///parent";)' + + Domain(topo.standalone, DEFAULT_SUFFIX).add("aci", (aci_target + aci_allow + aci_subject)) + + # create connection with USER_WITH_ACI_DELADD + conn = UserAccount(topo.standalone, USER_WITH_ACI_DELADD).bind(PW_DM) + + # Create a user with parent 'uid=test_user_1000, ou=people, {}'.format(DEFAULT_SUFFIX) + users = UserAccounts(conn, DEFAULT_SUFFIX, rdn='uid=test_user_1000, ou=people') + new_user = users.create_test_user(gid=1, uid=1) + assert new_user.exists() + + # Perform Allow delete access to parent + new_user.delete() + + +def test_allow_delete_access_to_dynamic_group(topo, _add_user, _aci_of_user): + + """ + Test to Allow delete access to dynamic group + :id: 14ffa452-68ed-11e8-a60d-8c16451d917b + :setup: server + :steps: + 1. Add test entry + 2. Add ACI that Allow delete privilege to dynamic group + 3. Delete something using test USER_DELADD + 4. Remove ACI + :expectedresults: + 1. Entry should be added + 2. ACI should be added + 3. Operation should succeed + 4. Delete operation for ACI should succeed + """ + # Create dynamic group + groups = Groups(topo.standalone, DEFAULT_SUFFIX) + group = groups.create(properties={"cn": "group1", + "description": "testgroup"}) + + group.add("objectclass", "groupOfURLs") + group.add("memberURL", + f"ldap:///dc=example,dc=com??sub?(&(objectclass=person)(uid=test_user_1000))") + + # Set ACI + Domain(topo.standalone, DEFAULT_SUFFIX).\ + add("aci", f'(target = ldap:///{DEFAULT_SUFFIX})(targetattr=*)' + f'(version 3.0; acl "$tet_thistest"; ' + f'allow (delete) (groupdn = "ldap:///{group.dn}"); )') + + # create connection with USER_WITH_ACI_DELADD + conn = UserAccount(topo.standalone, USER_WITH_ACI_DELADD).bind(PW_DM) + + # Perform Allow delete access to dynamic group + UserAccount(conn, USER_DELADD).delete() + + +def test_allow_delete_access_to_dynamic_group_uid(topo, _add_user, _aci_of_user): + + """ + Test to Allow delete access to dynamic group + :id: 010a4f20-752a-4173-b763-f520c7a85b82 + :setup: server + :steps: + 1. Add test entry + 2. Add ACI that Allow delete privilege to dynamic group + 3. Delete something using test USER_DELADD + 4. Remove ACI + :expectedresults: + 1. Entry should be added + 2. ACI should be added + 3. Operation should succeed + 4. Delete operation for ACI should succeed + """ + # Create dynamic group + groups = Groups(topo.standalone, DEFAULT_SUFFIX) + group = groups.create(properties={"cn": "group1", + "description": "testgroup"}) + + group.add("objectclass", "groupOfURLs") + group.add("memberURL", + f'ldap:///{DEFAULT_SUFFIX}??sub?(&(objectclass=person)(cn=test_user_1000))') + + # Set ACI + Domain(topo.standalone, DEFAULT_SUFFIX).\ + add("aci", f'(target = ldap:///{DEFAULT_SUFFIX})' + f'(targetattr=uid)(version 3.0; acl "$tet_thistest"; ' + f'allow (delete) (groupdn = "ldap:///{group.dn}"); )') + + # create connection with USER_WITH_ACI_DELADD + conn = UserAccount(topo.standalone, USER_WITH_ACI_DELADD).bind(PW_DM) + + # Perform Allow delete access to dynamic group + UserAccount(conn, USER_DELADD).delete() + + +def test_allow_delete_access_not_to_dynamic_group(topo, _add_user, _aci_of_user): + + """ + Test to Allow delete access to != dynamic group + :id: 9ecb139d-bca8-428e-9044-fd89db5a3d14 + :setup: server + :steps: + 1. Add test entry + 2. Add ACI that delete access to != dynamic group + 3. Delete something using test USER_DELADD + 4. Remove ACI + :expectedresults: + 1. Entry should be added + 2. ACI should be added + 3. Operation should not succeed + 4. Delete operation for ACI should succeed + """ + # Create dynamic group + groups = Groups(topo.standalone, DEFAULT_SUFFIX) + group = groups.create(properties={"cn": "group1", + "description": "testgroup"}) + group.add("objectclass", "groupOfURLs") + group.add("memberURL", + f'ldap:///{DEFAULT_SUFFIX}??sub?(&(objectclass=person)(cn=test_user_1000))') + + # Set ACI + Domain(topo.standalone, DEFAULT_SUFFIX).\ + add("aci", f'(target = ldap:///{DEFAULT_SUFFIX})' + f'(targetattr=*)(version 3.0; acl "$tet_thistest"; ' + f'allow (delete) (groupdn != "ldap:///{group.dn}"); )') + + # create connection with USER_WITH_ACI_DELADD + conn = UserAccount(topo.standalone, USER_WITH_ACI_DELADD).bind(PW_DM) + user = UserAccount(conn, USER_DELADD) + + # Perform Allow delete access to != dynamic group + with pytest.raises(ldap.INSUFFICIENT_ACCESS): + user.delete() + + +if __name__ == "__main__": + CURRENT_FILE = os.path.realpath(__file__) + pytest.main("-s -v %s" % CURRENT_FILE) diff --git a/dirsrvtests/tests/suites/acl/enhanced_aci_modrnd_test.py b/dirsrvtests/tests/suites/acl/enhanced_aci_modrnd_test.py new file mode 100644 index 0000000..ca94569 --- /dev/null +++ b/dirsrvtests/tests/suites/acl/enhanced_aci_modrnd_test.py @@ -0,0 +1,123 @@ +# --- BEGIN COPYRIGHT BLOCK --- +# Copyright (C) 2016 Red Hat, Inc. +# All rights reserved. +# +# License: GPL (version 3 or any later version). +# See LICENSE for details. +# --- END COPYRIGHT BLOCK --- +# +import pytest +from lib389.tasks import * +from lib389.utils import * +from lib389.topologies import topology_st + +pytestmark = pytest.mark.tier1 + +logging.getLogger(__name__).setLevel(logging.DEBUG) +log = logging.getLogger(__name__) + +CONTAINER_1_OU = 'test_ou_1' +CONTAINER_2_OU = 'test_ou_2' +CONTAINER_1 = f'ou={CONTAINER_1_OU},dc=example,dc=com' +CONTAINER_2 = f'ou={CONTAINER_2_OU},dc=example,dc=com' +USER_CN = 'test_user' +USER_PWD = 'Secret123' +USER = f'cn={USER_CN},{CONTAINER_1}' + + +@pytest.fixture(scope="module") +def env_setup(topology_st): + """Adds two containers, one user and two ACI rules""" + + log.info("Add a container: %s" % CONTAINER_1) + topology_st.standalone.add_s(Entry((CONTAINER_1, + {'objectclass': 'top', + 'objectclass': 'organizationalunit', + 'ou': CONTAINER_1_OU, + }))) + + log.info("Add a container: %s" % CONTAINER_2) + topology_st.standalone.add_s(Entry((CONTAINER_2, + {'objectclass': 'top', + 'objectclass': 'organizationalunit', + 'ou': CONTAINER_2_OU, + }))) + + log.info("Add a user: %s" % USER) + topology_st.standalone.add_s(Entry((USER, + {'objectclass': 'top person'.split(), + 'cn': USER_CN, + 'sn': USER_CN, + 'userpassword': USER_PWD + }))) + + ACI_TARGET = '(targetattr="*")' + ACI_ALLOW = '(version 3.0; acl "All rights for %s"; allow (all) ' % USER + ACI_SUBJECT = 'userdn="ldap:///%s";)' % USER + ACI_BODY = ACI_TARGET + ACI_ALLOW + ACI_SUBJECT + mod = [(ldap.MOD_ADD, 'aci', ensure_bytes(ACI_BODY))] + + log.info("Add an ACI 'allow (all)' by %s to the %s" % (USER, + CONTAINER_1)) + topology_st.standalone.modify_s(CONTAINER_1, mod) + + log.info("Add an ACI 'allow (all)' by %s to the %s" % (USER, + CONTAINER_2)) + topology_st.standalone.modify_s(CONTAINER_2, mod) + + +@pytest.mark.ds47553 +def test_enhanced_aci_modrnd(topology_st, env_setup): + """Tests, that MODRDN operation is allowed, + if user has ACI right '(all)' under superior entries, + but doesn't have '(modrdn)' + + :id: 492cf2a9-2efe-4e3b-955e-85eca61d66b9 + :setup: Standalone instance + :steps: + 1. Create two containers + 2. Create a user within "ou=test_ou_1,dc=example,dc=com" + 3. Add an aci with a rule "cn=test_user is allowed all" within these containers + 4. Run MODRDN operation on the "cn=test_user" and set "newsuperior" to + the "ou=test_ou_2,dc=example,dc=com" + 5. Check there is no user under container one (ou=test_ou_1,dc=example,dc=com) + 6. Check there is a user under container two (ou=test_ou_2,dc=example,dc=com) + + :expectedresults: + 1. Two containers should be created + 2. User should be added successfully + 3. This should pass + 4. This should pass + 5. User should not be found under container ou=test_ou_1,dc=example,dc=com + 6. User should be found under container ou=test_ou_2,dc=example,dc=com + """ + + log.info("Bind as %s" % USER) + + topology_st.standalone.simple_bind_s(USER, USER_PWD) + + log.info("User MODRDN operation from %s to %s" % (CONTAINER_1, + CONTAINER_2)) + + topology_st.standalone.rename_s(USER, "cn=%s" % USER_CN, + newsuperior=CONTAINER_2, delold=1) + + log.info("Check there is no user in %s" % CONTAINER_1) + entries = topology_st.standalone.search_s(CONTAINER_1, + ldap.SCOPE_ONELEVEL, + 'cn=%s' % USER_CN) + assert not entries + + log.info("Check there is our user in %s" % CONTAINER_2) + entries = topology_st.standalone.search_s(CONTAINER_2, + ldap.SCOPE_ONELEVEL, + 'cn=%s' % USER_CN) + assert entries + + +if __name__ == '__main__': + # Run isolated + # -s for DEBUG mode + # -v for additional verbose + CURRENT_FILE = os.path.realpath(__file__) + pytest.main("-s -v %s" % CURRENT_FILE) diff --git a/dirsrvtests/tests/suites/acl/globalgroup_part2_test.py b/dirsrvtests/tests/suites/acl/globalgroup_part2_test.py new file mode 100644 index 0000000..b10fb1b --- /dev/null +++ b/dirsrvtests/tests/suites/acl/globalgroup_part2_test.py @@ -0,0 +1,470 @@ +# --- BEGIN COPYRIGHT BLOCK --- +# Copyright (C) 2019 Red Hat, Inc. +# All rights reserved. +# +# License: GPL (version 3 or any later version). +# See LICENSE for details. +# --- END COPYRIGHT BLOCK ---- + + +import pytest, os, ldap +from lib389._constants import DEFAULT_SUFFIX, PW_DM +from lib389.idm.user import UserAccount, UserAccounts +from lib389.idm.group import UniqueGroup, UniqueGroups +from lib389.idm.organizationalunit import OrganizationalUnit +from lib389.topologies import topology_st as topo +from lib389.idm.domain import Domain + +pytestmark = pytest.mark.tier1 + +NESTEDGROUP_OU_GLOBAL = "ou=nestedgroup, {}".format(DEFAULT_SUFFIX) +DEEPUSER_GLOBAL = "uid=DEEPUSER_GLOBAL, {}".format(NESTEDGROUP_OU_GLOBAL) +DEEPUSER2_GLOBAL = "uid=DEEPUSER2_GLOBAL, {}".format(NESTEDGROUP_OU_GLOBAL) +DEEPUSER3_GLOBAL = "uid=DEEPUSER3_GLOBAL, {}".format(NESTEDGROUP_OU_GLOBAL) +DEEPGROUPSCRATCHENTRY_GLOBAL = "uid=scratchEntry,{}".format(NESTEDGROUP_OU_GLOBAL) +GROUPDNATTRSCRATCHENTRY_GLOBAL = "uid=GROUPDNATTRSCRATCHENTRY_GLOBAL,{}".format(NESTEDGROUP_OU_GLOBAL) +GROUPDNATTRCHILDSCRATCHENTRY_GLOBAL = "uid=c1,{}".format(GROUPDNATTRSCRATCHENTRY_GLOBAL) +NEWCHILDSCRATCHENTRY_GLOBAL = "uid=newChild,{}".format(NESTEDGROUP_OU_GLOBAL) +ALLGROUPS_GLOBAL = "cn=ALLGROUPS_GLOBAL,{}".format(NESTEDGROUP_OU_GLOBAL) +GROUPA_GLOBAL = "cn=GROUPA_GLOBAL,{}".format(NESTEDGROUP_OU_GLOBAL) +GROUPB_GLOBAL = "cn=GROUPB_GLOBAL,{}".format(NESTEDGROUP_OU_GLOBAL) +GROUPC_GLOBAL = "cn=GROUPC_GLOBAL,{}".format(NESTEDGROUP_OU_GLOBAL) +GROUPD_GLOBAL = "cn=GROUPD_GLOBAL,{}".format(NESTEDGROUP_OU_GLOBAL) +GROUPE_GLOBAL = "cn=GROUPE_GLOBAL,{}".format(NESTEDGROUP_OU_GLOBAL) +GROUPF_GLOBAL = "cn=GROUPF_GLOBAL,{}".format(NESTEDGROUP_OU_GLOBAL) +GROUPG_GLOBAL = "cn=GROUPG_GLOBAL,{}".format(NESTEDGROUP_OU_GLOBAL) +GROUPH_GLOBAL = "cn=GROUPH_GLOBAL,{}".format(NESTEDGROUP_OU_GLOBAL) +CHILD1_GLOBAL = "uid=CHILD1_GLOBAL,{}".format(GROUPDNATTRSCRATCHENTRY_GLOBAL) +CONTAINER_1_DELADD = "ou=Product Development,{}".format(DEFAULT_SUFFIX) +CONTAINER_2_DELADD = "ou=Accounting,{}".format(DEFAULT_SUFFIX) + + +@pytest.fixture(scope="function") +def aci_of_user(request, topo): + aci_list = Domain(topo.standalone, DEFAULT_SUFFIX).get_attr_vals('aci') + + def finofaci(): + domain = Domain(topo.standalone, DEFAULT_SUFFIX) + domain.set('aci', None) + for i in aci_list: + domain.add("aci", i) + + request.addfinalizer(finofaci) + + +@pytest.fixture(scope="module") +def test_user(request, topo): + for demo in ['Product Development', 'Accounting', 'nestedgroup']: + OrganizationalUnit(topo.standalone, "ou={},{}".format(demo, DEFAULT_SUFFIX)).create(properties={'ou': demo}) + + uas = UserAccounts(topo.standalone, DEFAULT_SUFFIX, 'ou=nestedgroup') + for demo1 in ['DEEPUSER_GLOBAL', 'scratchEntry', 'DEEPUSER2_GLOBAL', + 'DEEPUSER3_GLOBAL', 'GROUPDNATTRSCRATCHENTRY_GLOBAL', 'newChild']: + uas.create(properties={ + 'uid': demo1, + 'cn': demo1, + 'sn': 'user', + 'uidNumber': '1000', + 'gidNumber': '2000', + 'homeDirectory': '/home/' + demo1, + 'userPassword': PW_DM + }) + + uas = UserAccounts(topo.standalone, DEFAULT_SUFFIX, 'uid=GROUPDNATTRSCRATCHENTRY_GLOBAL,ou=nestedgroup') + for demo1 in ['c1', 'CHILD1_GLOBAL']: + uas.create(properties={ + 'uid': demo1, + 'cn': demo1, + 'sn': 'user', + 'uidNumber': '1000', + 'gidNumber': '2000', + 'homeDirectory': '/home/' + demo1, + 'userPassword': PW_DM + }) + + grp = UniqueGroups(topo.standalone, DEFAULT_SUFFIX, rdn='ou=nestedgroup') + for i in [('ALLGROUPS_GLOBAL', GROUPA_GLOBAL), ('GROUPA_GLOBAL', GROUPB_GLOBAL), ('GROUPB_GLOBAL', GROUPC_GLOBAL), + ('GROUPC_GLOBAL', GROUPD_GLOBAL), ('GROUPD_GLOBAL', GROUPE_GLOBAL), ('GROUPE_GLOBAL', GROUPF_GLOBAL), + ('GROUPF_GLOBAL', GROUPG_GLOBAL), ('GROUPG_GLOBAL', GROUPH_GLOBAL), ('GROUPH_GLOBAL', DEEPUSER_GLOBAL)]: + grp.create(properties={'cn': i[0], + 'ou': 'groups', + 'uniquemember': i[1] + }) + + +def test_undefined_in_group_eval_five(topo, test_user, aci_of_user): + """ + Aci will not allow access as Group dn is not allowed so members will not allowed access. + + :id: 11451a96-7841-11e8-9f79-8c16451d917b + :setup: server + :steps: + 1. Add test entry + 2. Take a count of users using DN_DM + 3. Add test user + 4. add aci + 5. test should fulfil the aci rules + :expectedresults: + 1. Entry should be added + 2. Operation should succeed + 3. Operation should succeed + 4. Operation should succeed + 5. Operation should succeed + """ + + Domain(topo.standalone, DEFAULT_SUFFIX).add("aci",'(targetattr=*)(version 3.0; aci "tester"; allow(all) groupdn != "ldap:///{}\ || ldap:///{}";)'.format(ALLGROUPS_GLOBAL, GROUPF_GLOBAL)) + conn = UserAccount(topo.standalone, DEEPUSER2_GLOBAL).bind(PW_DM) + # This aci should NOT allow access + user = UserAccount(conn, DEEPGROUPSCRATCHENTRY_GLOBAL) + with pytest.raises(ldap.INSUFFICIENT_ACCESS): + user.replace("description", "Fred") + assert user.get_attr_val_utf8('uid') == 'scratchEntry' + + +def test_undefined_in_group_eval_six(topo, test_user, aci_of_user): + """ + Aci will not allow access as tested user is not a member of allowed Group dn + + :id: 1904572e-7841-11e8-a9d8-8c16451d917b + :setup: server + :steps: + 1. Add test entry + 2. Take a count of users using DN_DM + 3. Add test user + 4. add aci + 5. test should fullfil the aci rules + :expectedresults: + 1. Entry should be added + 2. Operation should succeed + 3. Operation should succeed + 4. Operation should succeed + 5. Operation should succeed + """ + Domain(topo.standalone, DEFAULT_SUFFIX).add("aci",'(targetattr=*)(version 3.0; aci "tester"; allow(all) groupdn = "ldap:///{} || ldap:///{}" ;)'.format(GROUPH_GLOBAL, ALLGROUPS_GLOBAL)) + conn = UserAccount(topo.standalone, DEEPUSER3_GLOBAL).bind(PW_DM) + # test UNDEFINED in group + user = UserAccount(conn, DEEPGROUPSCRATCHENTRY_GLOBAL) + with pytest.raises(ldap.INSUFFICIENT_ACCESS): + user.replace("description", "Fred") + assert user.get_attr_val_utf8('uid') == 'scratchEntry' + + +def test_undefined_in_group_eval_seven(topo, test_user, aci_of_user): + """ + Aci will not allow access as tested user is not a member of allowed Group dn + + :id: 206b43c4-7841-11e8-b3ed-8c16451d917b + :setup: server + :steps: + 1. Add test entry + 2. Take a count of users using DN_DM + 3. Add test user + 4. add aci + 5. test should fullfil the aci rules + :expectedresults: + 1. Entry should be added + 2. Operation should succeed + 3. Operation should succeed + 4. Operation should succeed + 5. Operation should succeed + """ + Domain(topo.standalone, DEFAULT_SUFFIX).add("aci",'(targetattr=*)(version 3.0; aci "tester"; allow(all) groupdn = "ldap:///{}\ || ldap:///{}";)'.format(ALLGROUPS_GLOBAL, GROUPH_GLOBAL)) + conn = UserAccount(topo.standalone, DEEPUSER3_GLOBAL).bind(PW_DM) + # test UNDEFINED in group + user = UserAccount(conn, DEEPGROUPSCRATCHENTRY_GLOBAL) + with pytest.raises(ldap.INSUFFICIENT_ACCESS): + user.replace("description", "Fred") + assert user.get_attr_val_utf8('uid') == 'scratchEntry' + + +def test_undefined_in_group_eval_eight(topo, test_user, aci_of_user): + """ + Aci will not allow access as Group dn is not allowed so members will not allowed access. + + :id: 26ca7456-7841-11e8-801e-8c16451d917b + :setup: server + :steps: + 1. Add test entry + 2. Take a count of users using DN_DM + 3. Add test user + 4. add aci + 5. test should fullfil the aci rules + :expectedresults: + 1. Entry should be added + 2. Operation should succeed + 3. Operation should succeed + 4. Operation should succeed + 5. Operation should succeed + """ + Domain(topo.standalone, DEFAULT_SUFFIX).add("aci",'(targetattr=*)(version 3.0; aci "tester"; allow(all) groupdn != "ldap:///{} || ldap:///{} || ldap:///{}" ;)'.format(GROUPH_GLOBAL, GROUPA_GLOBAL, ALLGROUPS_GLOBAL)) + conn = UserAccount(topo.standalone, DEEPUSER3_GLOBAL).bind(PW_DM) + # test UNDEFINED in group + user = UserAccount(conn, DEEPGROUPSCRATCHENTRY_GLOBAL) + with pytest.raises(ldap.INSUFFICIENT_ACCESS): + user.replace("description", "Fred") + assert user.get_attr_val_utf8('uid') == 'scratchEntry' + + +def test_undefined_in_group_eval_nine(topo, test_user, aci_of_user): + """ + Aci will not allow access as Group dn is not allowed so members will not allowed access. + + :id: 38c7fbb0-7841-11e8-90aa-8c16451d917b + :setup: server + :steps: + 1. Add test entry + 2. Take a count of users using DN_DM + 3. Add test user + 4. add aci + 5. test should fullfil the aci rules + :expectedresults: + 1. Entry should be added + 2. Operation should succeed + 3. Operation should succeed + 4. Operation should succeed + 5. Operation should succeed + """ + Domain(topo.standalone, DEFAULT_SUFFIX).add("aci",'(targetattr=*)(version 3.0; aci "tester"; allow(all) groupdn != "ldap:///{}\ || ldap:///{} || ldap:///{}";)'.format(ALLGROUPS_GLOBAL, GROUPA_GLOBAL, GROUPH_GLOBAL)) + conn = UserAccount(topo.standalone, DEEPUSER3_GLOBAL).bind(PW_DM) + # test UNDEFINED in group + user = UserAccount(conn, DEEPGROUPSCRATCHENTRY_GLOBAL) + with pytest.raises(ldap.INSUFFICIENT_ACCESS): + user.replace("sn", "Fred") + assert user.get_attr_val_utf8('uid') == 'scratchEntry' + + +def test_undefined_in_group_eval_ten(topo, test_user, aci_of_user): + """ + Test the userattr keyword to ensure that it evaluates correctly. + + :id: 46c0fb72-7841-11e8-af1d-8c16451d917b + :setup: server + :steps: + 1. Add test entry + 2. Take a count of users using DN_DM + 3. Add test user + 4. add aci + 5. test should fullfil the aci rules + :expectedresults: + 1. Entry should be added + 2. Operation should succeed + 3. Operation should succeed + 4. Operation should succeed + 5. Operation should succeed + """ + Domain(topo.standalone, DEFAULT_SUFFIX).add("aci",'(targetattr=*)(version 3.0; aci "tester"; allow(all) userattr = "description#GROUPDN";)') + user = UserAccount(topo.standalone, DEEPGROUPSCRATCHENTRY_GLOBAL) + user.add("description", [ALLGROUPS_GLOBAL, GROUPG_GLOBAL]) + conn = UserAccount(topo.standalone, DEEPUSER_GLOBAL).bind(PW_DM) + # Test the userattr keyword + user.add("sn", "Fred") + assert UserAccount(conn, DEEPGROUPSCRATCHENTRY_GLOBAL).get_attr_val_utf8('uid') == 'scratchEntry' + user.remove("description", [ALLGROUPS_GLOBAL, GROUPG_GLOBAL]) + + +def test_undefined_in_group_eval_eleven(topo, test_user, aci_of_user): + """ + Aci will not allow access as description is there with the user entry which is not allowed in ACI + + :id: 4cfa28e2-7841-11e8-8117-8c16451d917b + :setup: server + :steps: + 1. Add test entry + 2. Take a count of users using DN_DM + 3. Add test user + 4. add aci + 5. test should fullfil the aci rules + :expectedresults: + 1. Entry should be added + 2. Operation should succeed + 3. Operation should succeed + 4. Operation should succeed + 5. Operation should succeed + """ + Domain(topo.standalone, DEFAULT_SUFFIX).add("aci",'(targetattr=*)(version 3.0; aci "tester"; allow(all) not( userattr = "description#GROUPDN");)') + user = UserAccount(topo.standalone, DEEPGROUPSCRATCHENTRY_GLOBAL) + user.add("description", [ALLGROUPS_GLOBAL, GROUPH_GLOBAL]) + conn = UserAccount(topo.standalone, DEEPUSER_GLOBAL).bind(PW_DM) + # Test that not(UNDEFINED(attrval1)) + user1 = UserAccount(conn, DEEPGROUPSCRATCHENTRY_GLOBAL) + with pytest.raises(ldap.INSUFFICIENT_ACCESS): + user1.add("sn", "Fred1") + assert user.get_attr_val_utf8('cn') + user.remove("description", [ALLGROUPS_GLOBAL, GROUPH_GLOBAL]) + + +def test_undefined_in_group_eval_twelve(topo, test_user, aci_of_user): + """ + Test with the parent keyord that Yields TRUE as description is present in tested entry + + :id: 54f471ec-7841-11e8-8910-8c16451d917b + :setup: server + :steps: + 1. Add test entry + 2. Take a count of users using DN_DM + 3. Add test user + 4. add aci + 5. test should fullfil the aci rules + :expectedresults: + 1. Entry should be added + 2. Operation should succeed + 3. Operation should succeed + 4. Operation should succeed + 5. Operation should succeed + """ + Domain(topo.standalone, DEFAULT_SUFFIX).add("aci",'(targetattr=*)(version 3.0; aci "tester"; allow(all) userattr = "parent[0,1].description#GROUPDN";)') + user = UserAccount(topo.standalone, GROUPDNATTRSCRATCHENTRY_GLOBAL) + user.add("description", [ALLGROUPS_GLOBAL, GROUPD_GLOBAL]) + conn = UserAccount(topo.standalone, DEEPUSER_GLOBAL).bind(PW_DM) + # Test with the parent keyord + UserAccount(conn, GROUPDNATTRCHILDSCRATCHENTRY_GLOBAL).add("sn", "Fred") + assert UserAccount(conn, DEEPGROUPSCRATCHENTRY_GLOBAL).get_attr_val_utf8('cn') + user.remove("description", [ALLGROUPS_GLOBAL, GROUPD_GLOBAL]) + + +def test_undefined_in_group_eval_fourteen(topo, test_user, aci_of_user): + """ + Test with parent keyword that Yields FALSE as description is not present in tested entry + + :id: 5c527218-7841-11e8-8909-8c16451d917b + :setup: server + :steps: + 1. Add test entry + 2. Take a count of users using DN_DM + 3. Add test user + 4. add aci + 5. test should fullfil the aci rules + :expectedresults: + 1. Entry should be added + 2. Operation should succeed + 3. Operation should succeed + 4. Operation should succeed + 5. Operation should succeed + """ + Domain(topo.standalone, DEFAULT_SUFFIX).add("aci",'(targetattr=*)(version 3.0; aci "tester"; allow(all) userattr = "parent[0,1].description#GROUPDN";)') + user = UserAccount(topo.standalone, GROUPDNATTRSCRATCHENTRY_GLOBAL) + user.add("description", [ALLGROUPS_GLOBAL, GROUPG_GLOBAL]) + conn = UserAccount(topo.standalone, DEEPUSER2_GLOBAL).bind(PW_DM) + # Test with parent keyword + user1 = UserAccount(conn, GROUPDNATTRCHILDSCRATCHENTRY_GLOBAL) + with pytest.raises(ldap.INSUFFICIENT_ACCESS): + user1.add("sn", "Fred") + assert UserAccount(conn, DEEPGROUPSCRATCHENTRY_GLOBAL).get_attr_val_utf8('cn') + user.remove("description", [ALLGROUPS_GLOBAL, GROUPG_GLOBAL]) + + +def test_undefined_in_group_eval_fifteen(topo, test_user, aci_of_user): + """ + Here do the same tests for userattr with the parent keyword. + + :id: 6381c070-7841-11e8-a6b6-8c16451d917b + :setup: server + :steps: + 1. Add test entry + 2. Take a count of users using DN_DM + 3. Add test user + 4. add aci + 5. test should fullfil the aci rules + :expectedresults: + 1. Entry should be added + 2. Operation should succeed + 3. Operation should succeed + 4. Operation should succeed + 5. Operation should succeed + """ + Domain(topo.standalone, DEFAULT_SUFFIX).add("aci",'(targetattr=*)(version 3.0; aci "tester"; allow(all) userattr = "parent[0,1].description#USERDN";)') + UserAccount(topo.standalone, NESTEDGROUP_OU_GLOBAL).add("description", DEEPUSER_GLOBAL) + # Here do the same tests for userattr with the parent keyword. + conn = UserAccount(topo.standalone, DEEPUSER_GLOBAL).bind(PW_DM) + UserAccount(conn, NEWCHILDSCRATCHENTRY_GLOBAL).add("description", DEEPUSER_GLOBAL) + + +def test_undefined_in_group_eval_sixteen(topo, test_user, aci_of_user): + """ + Test with parent keyword with not key + + :id: 69852688-7841-11e8-8db1-8c16451d917b + :setup: server + :steps: + 1. Add test entry + 2. Take a count of users using DN_DM + 3. Add test user + 4. add aci + 5. test should fullfil the aci rules + :expectedresults: + 1. Entry should be added + 2. Operation should succeed + 3. Operation should succeed + 4. Operation should succeed + 5. Operation should succeed + """ + domain = Domain(topo.standalone, DEFAULT_SUFFIX) + domain.add("aci",'(targetattr=*)(version 3.0; aci "tester"; allow(all) not ( userattr = "parent[0,1].description#USERDN");)') + domain.add("description", DEEPUSER_GLOBAL) + conn = UserAccount(topo.standalone, DEEPUSER_GLOBAL).bind(PW_DM) + # Test with parent keyword with not key + user = UserAccount(conn, NEWCHILDSCRATCHENTRY_GLOBAL) + with pytest.raises(ldap.INSUFFICIENT_ACCESS): + user.add("description",DEEPUSER_GLOBAL) + + +def test_undefined_in_group_eval_seventeen(topo, test_user, aci_of_user): + """ + Test with the parent keyord that Yields TRUE as description is present in tested entry + + :id: 7054d1c0-7841-11e8-8177-8c16451d917b + :setup: server + :steps: + 1. Add test entry + 2. Take a count of users using DN_DM + 3. Add test user + 4. add aci + 5. test should fullfil the aci rules + :expectedresults: + 1. Entry should be added + 2. Operation should succeed + 3. Operation should succeed + 4. Operation should succeed + 5. Operation should succeed + """ + Domain(topo.standalone, DEFAULT_SUFFIX).add("aci",'(targetattr=*)(version 3.0; aci "tester"; allow(all) userattr = "parent[0,1].description#GROUPDN";)') + user = UserAccount(topo.standalone, GROUPDNATTRSCRATCHENTRY_GLOBAL) + # Test with the parent keyord + user.add("description", [ALLGROUPS_GLOBAL, GROUPD_GLOBAL]) + conn = UserAccount(topo.standalone, DEEPUSER_GLOBAL).bind(PW_DM) + UserAccount(conn, CHILD1_GLOBAL).add("description", DEEPUSER_GLOBAL) + user.remove("description", [ALLGROUPS_GLOBAL, GROUPD_GLOBAL]) + + +def test_undefined_in_group_eval_eighteen(topo, test_user, aci_of_user): + """ + Test with parent keyword with not key + + :id: 768b9ab0-7841-11e8-87c3-8c16451d917b + :setup: server + :steps: + 1. Add test entry + 2. Take a count of users using DN_DM + 3. Add test user + 4. add aci + 5. test should fullfil the aci rules + :expectedresults: + 1. Entry should be added + 2. Operation should succeed + 3. Operation should succeed + 4. Operation should succeed + 5. Operation should succeed + """ + Domain(topo.standalone, DEFAULT_SUFFIX).add("aci",'(targetattr=*)(version 3.0; aci "tester"; allow(all) not (userattr = "parent[0,1].description#GROUPDN" );)') + user = UserAccount(topo.standalone, GROUPDNATTRSCRATCHENTRY_GLOBAL) + # Test with parent keyword with not key + user.add("description", [ALLGROUPS_GLOBAL, GROUPH_GLOBAL]) + conn = UserAccount(topo.standalone, DEEPUSER_GLOBAL).bind(PW_DM) + user = UserAccount(conn, CHILD1_GLOBAL) + with pytest.raises(ldap.INSUFFICIENT_ACCESS): + user.add("description", DEEPUSER_GLOBAL) + + +if __name__ == "__main__": + CURRENT_FILE = os.path.realpath(__file__) + pytest.main("-s -v %s" % CURRENT_FILE) diff --git a/dirsrvtests/tests/suites/acl/globalgroup_test.py b/dirsrvtests/tests/suites/acl/globalgroup_test.py new file mode 100644 index 0000000..58c4392 --- /dev/null +++ b/dirsrvtests/tests/suites/acl/globalgroup_test.py @@ -0,0 +1,431 @@ +# --- BEGIN COPYRIGHT BLOCK --- +# Copyright (C) 2019 Red Hat, Inc. +# All rights reserved. +# +# License: GPL (version 3 or any later version). +# See LICENSE for details. +# --- END COPYRIGHT BLOCK ---- + + +import pytest, os, ldap +from lib389._constants import DEFAULT_SUFFIX, PW_DM +from lib389.idm.user import UserAccount, UserAccounts +from lib389.idm.group import UniqueGroup, UniqueGroups +from lib389.idm.organizationalunit import OrganizationalUnit +from lib389.topologies import topology_st as topo +from lib389.idm.domain import Domain + +pytestmark = pytest.mark.tier1 + +ACLGROUP_OU_GLOBAL = "ou=ACLGroup,{}".format(DEFAULT_SUFFIX) +NESTEDGROUP_OU_GLOBAL = "ou=nestedgroup, {}".format(DEFAULT_SUFFIX) +TESTING_OU_GLOBAL = "ou=Product Testing,{}".format(DEFAULT_SUFFIX) +DEEPUSER_GLOBAL = "uid=DEEPUSER_GLOBAL, {}".format(NESTEDGROUP_OU_GLOBAL) +DEEPUSER1_GLOBAL = "uid=DEEPUSER1_GLOBAL, {}".format(NESTEDGROUP_OU_GLOBAL) +DEEPUSER2_GLOBAL = "uid=DEEPUSER2_GLOBAL, {}".format(NESTEDGROUP_OU_GLOBAL) +DEEPUSER3_GLOBAL = "uid=DEEPUSER3_GLOBAL, {}".format(NESTEDGROUP_OU_GLOBAL) +DEEPGROUPSCRATCHENTRY_GLOBAL = "uid=scratchEntry,{}".format(NESTEDGROUP_OU_GLOBAL) +GROUPDNATTRSCRATCHENTRY_GLOBAL = "uid=GROUPDNATTRSCRATCHENTRY_GLOBAL,{}".format(NESTEDGROUP_OU_GLOBAL) +GROUPDNATTRCHILDSCRATCHENTRY_GLOBAL = "uid=c1,{}".format(GROUPDNATTRSCRATCHENTRY_GLOBAL) +NEWCHILDSCRATCHENTRY_GLOBAL = "uid=newChild,{}".format(NESTEDGROUP_OU_GLOBAL) +BIG_GLOBAL = "cn=BIG_GLOBAL Group,{}".format(DEFAULT_SUFFIX) +ALLGROUPS_GLOBAL = "cn=ALLGROUPS_GLOBAL,{}".format(NESTEDGROUP_OU_GLOBAL) +GROUPA_GLOBAL = "cn=GROUPA_GLOBAL,{}".format(NESTEDGROUP_OU_GLOBAL) +GROUPB_GLOBAL = "cn=GROUPB_GLOBAL,{}".format(NESTEDGROUP_OU_GLOBAL) +GROUPC_GLOBAL = "cn=GROUPC_GLOBAL,{}".format(NESTEDGROUP_OU_GLOBAL) +GROUPD_GLOBAL = "cn=GROUPD_GLOBAL,{}".format(NESTEDGROUP_OU_GLOBAL) +GROUPE_GLOBAL = "cn=GROUPE_GLOBAL,{}".format(NESTEDGROUP_OU_GLOBAL) +GROUPF_GLOBAL = "cn=GROUPF_GLOBAL,{}".format(NESTEDGROUP_OU_GLOBAL) +GROUPG_GLOBAL = "cn=GROUPG_GLOBAL,{}".format(NESTEDGROUP_OU_GLOBAL) +GROUPH_GLOBAL = "cn=GROUPH_GLOBAL,{}".format(NESTEDGROUP_OU_GLOBAL) +CONTAINER_1_DELADD = "ou=Product Development,{}".format(DEFAULT_SUFFIX) +CONTAINER_2_DELADD = "ou=Accounting,{}".format(DEFAULT_SUFFIX) + + +@pytest.fixture(scope="function") +def aci_of_user(request, topo): + aci_list = Domain(topo.standalone, DEFAULT_SUFFIX).get_attr_vals('aci') + + def finofaci(): + domain = Domain(topo.standalone, DEFAULT_SUFFIX) + domain.set('aci', None) + for i in aci_list: + domain.add("aci", i) + + request.addfinalizer(finofaci) + + +@pytest.fixture(scope="module") +def test_user(request, topo): + for demo in ['Product Development', 'Accounting', 'Product Testing', 'nestedgroup', 'ACLGroup']: + OrganizationalUnit(topo.standalone, "ou={},{}".format(demo, DEFAULT_SUFFIX)).create(properties={'ou': demo}) + + user = UserAccounts(topo.standalone, DEFAULT_SUFFIX, rdn='ou=Accounting') + for demo1 in ['Ted Morris', 'David Miller']: + user.create(properties= { + 'uid': demo1, + 'cn': demo1, + 'sn': 'user', + 'uidNumber': '1000', + 'gidNumber': '2000', + 'homeDirectory': '/home/' + demo1, + 'userPassword': PW_DM + }) + + uas = UserAccounts(topo.standalone, DEFAULT_SUFFIX, 'ou=nestedgroup') + for demo1 in ['DEEPUSER_GLOBAL', 'scratchEntry', 'DEEPUSER2_GLOBAL', 'DEEPUSER1_GLOBAL', + 'DEEPUSER3_GLOBAL', 'GROUPDNATTRSCRATCHENTRY_GLOBAL', 'newChild']: + uas.create(properties={ + 'uid': demo1, + 'cn': demo1, + 'sn': 'user', + 'uidNumber': '1000', + 'gidNumber': '2000', + 'homeDirectory': '/home/' + demo1, + 'userPassword': PW_DM + }) + + uas = UserAccounts(topo.standalone, DEFAULT_SUFFIX, 'uid=GROUPDNATTRSCRATCHENTRY_GLOBAL,ou=nestedgroup') + for demo1 in ['c1', 'CHILD1_GLOBAL']: + uas.create(properties={ + 'uid': demo1, + 'cn': demo1, + 'sn': 'user', + 'uidNumber': '1000', + 'gidNumber': '2000', + 'homeDirectory': '/home/' + demo1, + 'userPassword': PW_DM + }) + + grp = UniqueGroups(topo.standalone, DEFAULT_SUFFIX, rdn='ou=nestedgroup') + for i in [('ALLGROUPS_GLOBAL', GROUPA_GLOBAL), ('GROUPA_GLOBAL', GROUPB_GLOBAL), ('GROUPB_GLOBAL', GROUPC_GLOBAL), + ('GROUPC_GLOBAL', GROUPD_GLOBAL), ('GROUPD_GLOBAL', GROUPE_GLOBAL), ('GROUPE_GLOBAL', GROUPF_GLOBAL), + ('GROUPF_GLOBAL', GROUPG_GLOBAL), ('GROUPG_GLOBAL', GROUPH_GLOBAL), ('GROUPH_GLOBAL', DEEPUSER_GLOBAL)]: + grp.create(properties={'cn': i[0], + 'ou': 'groups', + 'uniquemember': i[1] + }) + + grp = UniqueGroup(topo.standalone, 'cn=BIG_GLOBAL Group,{}'.format(DEFAULT_SUFFIX)) + grp.create(properties={'cn': 'BIG_GLOBAL Group', + 'ou': 'groups', + 'uniquemember': ["uid=Ted Morris,ou=Accounting,{}".format(DEFAULT_SUFFIX), + "uid=David Miller,ou=Accounting,{}".format(DEFAULT_SUFFIX),] + }) + + +def test_caching_changes(topo, aci_of_user, test_user): + """ + Add user and then test deny + + :id: 26ed2dc2-783f-11e8-b1a5-8c16451d917b + :setup: server + :steps: + 1. Add test entry + 2. Take a count of users using DN_DM + 3. Add test user + 4. add aci + 5. test should fullfil the aci rules + :expectedresults: + 1. Entry should be added + 2. Operation should succeed + 3. Operation should succeed + 4. Operation should succeed + 5. Operation should succeed + """ + Domain(topo.standalone, DEFAULT_SUFFIX).add("aci",'(targetattr="roomnumber")(version 3.0; acl "ACLGroup"; deny ( read, search ) userdn = "ldap:///all" ;)') + user = UserAccounts(topo.standalone, DEFAULT_SUFFIX, "ou=AclGroup").create_test_user() + user.set('roomnumber', '3445') + conn = UserAccount(topo.standalone, DEEPUSER_GLOBAL).bind(PW_DM) + # targetattr="roomnumber" will be denied access + user = UserAccount(conn, 'uid=test_user_1000,ou=ACLGroup,dc=example,dc=com') + with pytest.raises(AssertionError): + assert user.get_attr_val_utf8('roomNumber') + UserAccount(topo.standalone, 'uid=test_user_1000,ou=ACLGroup,dc=example,dc=com').delete() + + +def test_deny_group_member_all_rights_to_user(topo, aci_of_user, test_user): + """ + Try deleting user while no access + + :id: 0da68a4c-7840-11e8-98c2-8c16451d917b + :setup: server + :steps: + 1. Add test entry + 2. Take a count of users using DN_DM + 3. delete test user + 4. add aci + 5. test should fullfil the aci rules + :expectedresults: + 1. Entry should be added + 2. Operation should succeed + 3. Operation should succeed + 4. Operation should succeed + 5. Operation should succeed + """ + Domain(topo.standalone, DEFAULT_SUFFIX).add("aci",'(targetattr="*")(version 3.0; acl "ACLGroup"; deny (all) groupdn = "ldap:///{}" ;)'.format(BIG_GLOBAL)) + conn = UserAccount(topo.standalone, "uid=Ted Morris, ou=Accounting, {}".format(DEFAULT_SUFFIX)).bind(PW_DM) + # group BIG_GLOBAL will have no access + user = UserAccount(conn, DEEPUSER3_GLOBAL) + with pytest.raises(ldap.INSUFFICIENT_ACCESS): + user.delete() + + +def test_deny_group_member_all_rights_to_group_members(topo, aci_of_user, test_user): + """ + Deny group member all rights + + :id: 2d4ff70c-7840-11e8-8472-8c16451d917b + :setup: server + :steps: + 1. Add test entry + 2. Take a count of users using DN_DM + 3. Add test user + 4. add aci + 5. test should fullfil the aci rules + :expectedresults: + 1. Entry should be added + 2. Operation should succeed + 3. Operation should succeed + 4. Operation should succeed + 5. Operation should succeed + """ + Domain(topo.standalone, DEFAULT_SUFFIX).add("aci",'(targetattr="*")(version 3.0; acl "ACLGroup"; deny (all) groupdn = "ldap:///{}" ;)'.format(BIG_GLOBAL)) + UserAccounts(topo.standalone, DEFAULT_SUFFIX, "ou=AclGroup").create_test_user() + conn = UserAccount(topo.standalone, "uid=Ted Morris, ou=Accounting, {}".format(DEFAULT_SUFFIX)).bind(PW_DM) + # group BIG_GLOBAL no access + user = UserAccount(conn, 'uid=test_user_1000,ou=ACLGroup,dc=example,dc=com') + with pytest.raises(IndexError): + user.get_attr_val_utf8('uid') + UserAccount(topo.standalone, 'uid=test_user_1000,ou=ACLGroup,dc=example,dc=com').delete() + + +def test_deeply_nested_groups_aci_denial(topo, test_user, aci_of_user): + """ + Test deeply nested groups (1) + This aci will not allow search or modify to a user too deep to be detected. + + :id: 3d98229c-7840-11e8-9f55-8c16451d917b + :setup: server + :steps: + 1. Add test entry + 2. Take a count of users using DN_DM + 3. Add test user + 4. add aci + 5. test should fullfil the aci rules + :expectedresults: + 1. Entry should be added + 2. Operation should succeed + 3. Operation should succeed + 4. Operation should succeed + 5. Operation should succeed + """ + Domain(topo.standalone, DEFAULT_SUFFIX).add("aci",'(targetattr="*")(version 3.0; acl "ACLGroup"; allow (all) groupdn = "ldap:///{}" ;)'.format(ALLGROUPS_GLOBAL)) + conn = UserAccount(topo.standalone, DEEPUSER_GLOBAL).bind(PW_DM) + # ALLGROUPS_GLOBAL have all access + assert UserAccount(conn, DEEPGROUPSCRATCHENTRY_GLOBAL).get_attr_val_utf8('uid') == 'scratchEntry' + user = UserAccount(conn, DEEPGROUPSCRATCHENTRY_GLOBAL) + with pytest.raises(ldap.INSUFFICIENT_ACCESS): + user.delete() + + +def test_deeply_nested_groups_aci_denial_two(topo, test_user, aci_of_user): + """ + Test deeply nested groups (2) + This aci will allow search and modify + + :id: 4ef6348e-7840-11e8-a70c-8c16451d917b + :setup: server + :steps: + 1. Add test entry + 2. Take a count of users using DN_DM + 3. Add test user + 4. add aci + 5. test should fullfil the aci rules + :expectedresults: + 1. Entry should be added + 2. Operation should succeed + 3. Operation should succeed + 4. Operation should succeed + 5. Operation should succeed + """ + Domain(topo.standalone, DEFAULT_SUFFIX).add("aci",'(targetattr="*")(version 3.0; acl "ACLGroup"; allow (all) groupdn = "ldap:///{}" ;)'.format(GROUPE_GLOBAL)) + conn = UserAccount(topo.standalone, DEEPUSER_GLOBAL).bind(PW_DM) + # GROUPE_GLOBAL have all access + user = UserAccount(conn, DEEPGROUPSCRATCHENTRY_GLOBAL) + user.add("sn", "Fred") + user.remove("sn", "Fred") + + +def test_deeply_nested_groups_aci_allow(topo, test_user, aci_of_user): + """ + Test deeply nested groups (3) + This aci will allow search and modify + + :id: 8d338210-7840-11e8-8584-8c16451d917b + :setup: server + :steps: + 1. Add test entry + 2. Take a count of users using DN_DM + 3. Add test user + 4. add aci + 5. test should fullfil the aci rules + :expectedresults: + 1. Entry should be added + 2. Operation should succeed + 3. Operation should succeed + 4. Operation should succeed + 5. Operation should succeed + """ + Domain(topo.standalone, DEFAULT_SUFFIX).add("aci", ['(targetattr="*")(version 3.0; acl "ACLGroup"; allow (all) groupdn = "ldap:///{}" ;)'.format(ALLGROUPS_GLOBAL), '(targetattr="*")(version 3.0; acl "ACLGroup"; allow (all) groupdn = "ldap:///{}" ;)'.format(GROUPE_GLOBAL)]) + conn = UserAccount(topo.standalone, DEEPUSER_GLOBAL).bind(PW_DM) + # test deeply nested groups + user = UserAccount(conn, DEEPGROUPSCRATCHENTRY_GLOBAL) + user.add("sn", "Fred") + user.remove("sn", "Fred") + + +def test_deeply_nested_groups_aci_allow_two(topo, test_user, aci_of_user): + """ + This aci will not allow search or modify to a user too deep to be detected. + + :id: 8d3459c4-7840-11e8-8ed8-8c16451d917b + :setup: server + :steps: + 1. Add test entry + 2. Take a count of users using DN_DM + 3. Add test user + 4. add aci + 5. test should fullfil the aci rules + :expectedresults: + 1. Entry should be added + 2. Operation should succeed + 3. Operation should succeed + 4. Operation should succeed + 5. Operation should succeed + """ + Domain(topo.standalone, DEFAULT_SUFFIX).add("aci",'(targetattr="*")(version 3.0; acl "ACLGroup"; allow (all) groupdn = "ldap:///{}" ;)'.format(ALLGROUPS_GLOBAL)) + conn = UserAccount(topo.standalone, DEEPUSER_GLOBAL).bind(PW_DM) + # This aci should not allow search or modify to a user too deep to be detected. + user = UserAccount(conn, DEEPGROUPSCRATCHENTRY_GLOBAL) + with pytest.raises(ldap.INSUFFICIENT_ACCESS): + user.add("sn", "Fred") + assert user.get_attr_val_utf8('uid') == 'scratchEntry' + + +def test_undefined_in_group_eval(topo, test_user, aci_of_user): + """ + + This aci will not allow access . + + :id: f1605e16-7840-11e8-b954-8c16451d917b + :setup: server + :steps: + 1. Add test entry + 2. Take a count of users using DN_DM + 3. Add test user + 4. add aci + 5. test should fullfil the aci rules + :expectedresults: + 1. Entry should be added + 2. Operation should succeed + 3. Operation should succeed + 4. Operation should succeed + 5. Operation should succeed + """ + Domain(topo.standalone, DEFAULT_SUFFIX).add("aci",'(targetattr="*")(version 3.0; acl "ACLGroup"; allow (all) groupdn != "ldap:///{}" ;)'.format(ALLGROUPS_GLOBAL)) + conn = UserAccount(topo.standalone, DEEPUSER_GLOBAL).bind(PW_DM) + # This aci should NOT allow access + user = UserAccount(conn, DEEPGROUPSCRATCHENTRY_GLOBAL) + with pytest.raises(ldap.INSUFFICIENT_ACCESS): + user.add("sn", "Fred") + assert user.get_attr_val_utf8('uid') == 'scratchEntry' + + +def test_undefined_in_group_eval_two(topo, test_user, aci_of_user): + """ + This aci will allow access + + :id: fcfbcce2-7840-11e8-ba77-8c16451d917b + :setup: server + :steps: + 1. Add test entry + 2. Take a count of users using DN_DM + 3. Add test user + 4. add aci + 5. test should fullfil the aci rules + :expectedresults: + 1. Entry should be added + 2. Operation should succeed + 3. Operation should succeed + 4. Operation should succeed + 5. Operation should succeed + """ + Domain(topo.standalone, DEFAULT_SUFFIX).add("aci",'(targetattr=*)(version 3.0; aci "tester"; allow(all) groupdn = "ldap:///{}\ || ldap:///{}";)'.format(ALLGROUPS_GLOBAL, GROUPG_GLOBAL)) + conn = UserAccount(topo.standalone, DEEPUSER_GLOBAL).bind(PW_DM) + user = UserAccount(conn, DEEPGROUPSCRATCHENTRY_GLOBAL) + # This aci should allow access + user.add("sn", "Fred") + assert UserAccount(conn, DEEPGROUPSCRATCHENTRY_GLOBAL).get_attr_val_utf8('uid') == 'scratchEntry' + user.remove("sn", "Fred") + + +def test_undefined_in_group_eval_three(topo, test_user, aci_of_user): + """ + This aci will allow access + + :id: 04943dcc-7841-11e8-8c46-8c16451d917b + :setup: server + :steps: + 1. Add test entry + 2. Take a count of users using DN_DM + 3. Add test user + 4. add aci + 5. test should fullfil the aci rules + :expectedresults: + 1. Entry should be added + 2. Operation should succeed + 3. Operation should succeed + 4. Operation should succeed + 5. Operation should succeed + """ + Domain(topo.standalone, DEFAULT_SUFFIX).add("aci",'(targetattr=*)(version 3.0; aci "tester"; allow(all) groupdn = "ldap:///{}\ || ldap:///{}";)'.format(GROUPG_GLOBAL, ALLGROUPS_GLOBAL)) + conn = UserAccount(topo.standalone, DEEPUSER_GLOBAL).bind(PW_DM) + user = Domain(conn, DEEPGROUPSCRATCHENTRY_GLOBAL) + # test UNDEFINED in group + user.add("sn", "Fred") + assert UserAccount(conn, DEEPGROUPSCRATCHENTRY_GLOBAL).get_attr_val_utf8('uid') == 'scratchEntry' + user.remove("sn", "Fred") + + +def test_undefined_in_group_eval_four(topo, test_user, aci_of_user): + """ + This aci will not allow access + + :id: 0b03d10e-7841-11e8-9341-8c16451d917b + :setup: server + :steps: + 1. Add test entry + 2. Take a count of users using DN_DM + 3. Add test user + 4. add aci + 5. test should fullfil the aci rules + :expectedresults: + 1. Entry should be added + 2. Operation should succeed + 3. Operation should succeed + 4. Operation should succeed + 5. Operation should succeed + """ + Domain(topo.standalone, DEFAULT_SUFFIX).add("aci",'(targetattr=*)(version 3.0; aci "tester"; allow(all) groupdn != "ldap:///{}\ || ldap:///{}";)'.format(ALLGROUPS_GLOBAL, GROUPG_GLOBAL)) + conn = UserAccount(topo.standalone, DEEPUSER1_GLOBAL).bind(PW_DM) + # test UNDEFINED in group + user = UserAccount(conn, DEEPGROUPSCRATCHENTRY_GLOBAL) + with pytest.raises(ldap.INSUFFICIENT_ACCESS): + user.add("sn", "Fred") + assert user.get_attr_val_utf8('uid') == 'scratchEntry' + + +if __name__ == "__main__": + CURRENT_FILE = os.path.realpath(__file__) + pytest.main("-s -v %s" % CURRENT_FILE) diff --git a/dirsrvtests/tests/suites/acl/keywords_part2_test.py b/dirsrvtests/tests/suites/acl/keywords_part2_test.py new file mode 100644 index 0000000..c2aa9ac --- /dev/null +++ b/dirsrvtests/tests/suites/acl/keywords_part2_test.py @@ -0,0 +1,388 @@ +# --- BEGIN COPYRIGHT BLOCK --- +# Copyright (C) 2019 Red Hat, Inc. +# All rights reserved. +# +# License: GPL (version 3 or any later version). +# See LICENSE for details. +# --- END COPYRIGHT BLOCK ---- + + +""" +This test script will test wrong/correct key value with ACIs. +""" + +import os +import time +from datetime import datetime +import pytest + +from lib389._constants import DEFAULT_SUFFIX, PW_DM +from lib389.idm.domain import Domain +from lib389.idm.organizationalunit import OrganizationalUnit +from lib389.idm.user import UserAccount + +import ldap + +pytestmark = pytest.mark.tier1 + + +KEYWORDS_OU_KEY = "ou=Keywords,{}".format(DEFAULT_SUFFIX) +DAYOFWEEK_OU_KEY = "ou=Dayofweek,{}".format(KEYWORDS_OU_KEY) +IP_OU_KEY = "ou=IP,{}".format(KEYWORDS_OU_KEY) +TIMEOFDAY_OU_KEY = "ou=Timeofday,{}".format(KEYWORDS_OU_KEY) +EVERYDAY_KEY = "uid=EVERYDAY_KEY,{}".format(DAYOFWEEK_OU_KEY) +TODAY_KEY = "uid=TODAY_KEY,{}".format(DAYOFWEEK_OU_KEY) +NODAY_KEY = "uid=NODAY_KEY,{}".format(DAYOFWEEK_OU_KEY) +FULLIP_KEY = "uid=FULLIP_KEY,{}".format(IP_OU_KEY) +NETSCAPEIP_KEY = "uid=NETSCAPEIP_KEY,{}".format(IP_OU_KEY) +NOIP_KEY = "uid=NOIP_KEY,{}".format(IP_OU_KEY) +FULLWORKER_KEY = "uid=FULLWORKER_KEY,{}".format(TIMEOFDAY_OU_KEY) +DAYWORKER_KEY = "uid=DAYWORKER_KEY,{}".format(TIMEOFDAY_OU_KEY) +NIGHTWORKER_KEY = "uid=NIGHTWORKER_KEY,{}".format(TIMEOFDAY_OU_KEY) +NOWORKER_KEY = "uid=NOWORKER_KEY,{}".format(TIMEOFDAY_OU_KEY) + + +def test_access_from_certain_network_only_ip(topo, add_user, aci_of_user): + """ + User can access the data when connecting from certain network only as per the ACI. + + :id: 4ec38296-7ac5-11e8-9816-8c16451d917b + :setup: Standalone Server + :steps: + 1. Add test entry + 2. Add ACI + 3. User should follow ACI role + :expectedresults: + 1. Entry should be added + 2. Operation should succeed + 3. Operation should succeed + """ + # Turn access log buffering off to make less time consuming + topo.standalone.config.set('nsslapd-accesslog-logbuffering', 'off') + + # Find the ip from ds logs , as we need to know the exact ip used by ds to run the instances. + # Wait till Access Log is generated + topo.standalone.restart() + + ip_ip = topo.standalone.ds_access_log.match('.* connection from ')[0].split()[-1] + + # Add ACI + domain = Domain(topo.standalone, DEFAULT_SUFFIX) + domain.add("aci", f'(target = "ldap:///{IP_OU_KEY}")(targetattr=*)(version 3.0; aci "IP aci"; ' + f'allow(all)userdn = "ldap:///{NETSCAPEIP_KEY}" and ip = "{ip_ip}" ;)') + + # create a new connection for the test + conn = UserAccount(topo.standalone, NETSCAPEIP_KEY).bind(PW_DM) + # Perform Operation + org = OrganizationalUnit(conn, IP_OU_KEY) + org.replace("seeAlso", "cn=1") + # remove the aci + domain.ensure_removed("aci", f'(target = "ldap:///{IP_OU_KEY}")(targetattr=*)(version 3.0; aci ' + f'"IP aci"; allow(all)userdn = "ldap:///{NETSCAPEIP_KEY}" and ' + f'ip = "{ip_ip}" ;)') + # Now add aci with new ip + domain.add("aci", f'(target = "ldap:///{IP_OU_KEY}")(targetattr=*)(version 3.0; aci "IP aci"; ' + f'allow(all)userdn = "ldap:///{NETSCAPEIP_KEY}" and ip = "100.1.1.1" ;)') + + # After changing the ip user cant access data + with pytest.raises(ldap.INSUFFICIENT_ACCESS): + org.replace("seeAlso", "cn=1") + + +def test_connectin_from_an_unauthorized_network(topo, add_user, aci_of_user): + """ + User cannot access the data when connectin from an unauthorized network as per the ACI. + + :id: 52d1ecce-7ac5-11e8-9ad9-8c16451d917b + :setup: Standalone Server + :steps: + 1. Add test entry + 2. Add ACI + 3. User should follow ACI role + :expectedresults: + 1. Entry should be added + 2. Operation should succeed + 3. Operation should succeed + """ + # Find the ip from ds logs , as we need to know the exact ip used by ds to run the instances. + ip_ip = topo.standalone.ds_access_log.match('.* connection from ')[0].split()[-1] + # Add ACI + domain = Domain(topo.standalone, DEFAULT_SUFFIX) + domain.add("aci", f'(target = "ldap:///{IP_OU_KEY}")' + f'(targetattr=*)(version 3.0; aci "IP aci"; ' + f'allow(all) userdn = "ldap:///{NETSCAPEIP_KEY}" ' + f'and ip != "{ip_ip}" ;)') + + # create a new connection for the test + conn = UserAccount(topo.standalone, NETSCAPEIP_KEY).bind(PW_DM) + # Perform Operation + org = OrganizationalUnit(conn, IP_OU_KEY) + with pytest.raises(ldap.INSUFFICIENT_ACCESS): + org.replace("seeAlso", "cn=1") + # Remove the ACI + domain.ensure_removed('aci', domain.get_attr_vals('aci')[-1]) + # Add new ACI + domain.add('aci', f'(target = "ldap:///{IP_OU_KEY}")(targetattr=*)' + f'(version 3.0; aci "IP aci"; allow(all) ' + f'userdn = "ldap:///{NETSCAPEIP_KEY}" and ip = "{ip_ip}" ;)') + + # now user can access data + org.replace("seeAlso", "cn=1") + + +def test_ip_keyword_test_noip_cannot(topo, add_user, aci_of_user): + """ + User NoIP cannot assess the data as per the ACI. + + :id: 570bc7f6-7ac5-11e8-88c1-8c16451d917b + :setup: Standalone Server + :steps: + 1. Add test entry + 2. Add ACI + 3. User should follow ACI role + :expectedresults: + 1. Entry should be added + 2. Operation should succeed + 3. Operation should succeed + """ + # Add ACI + Domain(topo.standalone, + DEFAULT_SUFFIX).add("aci", f'(target ="ldap:///{IP_OU_KEY}")' + f'(targetattr=*)(version 3.0; aci "IP aci"; allow(all) ' + f'userdn = "ldap:///{FULLIP_KEY}" and ip = "*" ;)') + + # Create a new connection for this test. + conn = UserAccount(topo.standalone, NOIP_KEY).bind(PW_DM) + # Perform Operation + org = OrganizationalUnit(conn, IP_OU_KEY) + with pytest.raises(ldap.INSUFFICIENT_ACCESS): + org.replace("seeAlso", "cn=1") + + +def test_user_can_access_the_data_at_any_time(topo, add_user, aci_of_user): + """ + User can access the data at any time as per the ACI. + + :id: 5b4da91a-7ac5-11e8-bbda-8c16451d917b + :setup: Standalone Server + :steps: + 1. Add test entry + 2. Add ACI + 3. User should follow ACI role + :expectedresults: + 1. Entry should be added + 2. Operation should succeed + 3. Operation should succeed + """ + # Add ACI + Domain(topo.standalone, + DEFAULT_SUFFIX).add("aci", f'(target = "ldap:///{TIMEOFDAY_OU_KEY}")' + f'(targetattr=*)(version 3.0; aci "Timeofday aci"; ' + f'allow(all) userdn ="ldap:///{FULLWORKER_KEY}" and ' + f'(timeofday >= "0000" and timeofday <= "2359") ;)') + + # Create a new connection for this test. + conn = UserAccount(topo.standalone, FULLWORKER_KEY).bind(PW_DM) + # Perform Operation + org = OrganizationalUnit(conn, TIMEOFDAY_OU_KEY) + org.replace("seeAlso", "cn=1") + + +def test_user_can_access_the_data_only_in_the_morning(topo, add_user, aci_of_user): + """ + User can access the data only in the morning as per the ACI. + + :id: 5f7d380c-7ac5-11e8-8124-8c16451d917b + :setup: Standalone Server + :steps: + 1. Add test entry + 2. Add ACI + 3. User should follow ACI role + :expectedresults: + 1. Entry should be added + 2. Operation should succeed + 3. Operation should succeed + """ + # Add ACI + Domain(topo.standalone, + DEFAULT_SUFFIX).add("aci", f'(target = "ldap:///{TIMEOFDAY_OU_KEY}")' + f'(targetattr=*)(version 3.0; aci "Timeofday aci"; ' + f'allow(all) userdn = "ldap:///{DAYWORKER_KEY}" ' + f'and timeofday < "1200" ;)') + + # Create a new connection for this test. + conn = UserAccount(topo.standalone, DAYWORKER_KEY).bind(PW_DM) + # Perform Operation + org = OrganizationalUnit(conn, TIMEOFDAY_OU_KEY) + if datetime.now().hour >= 12: + with pytest.raises(ldap.INSUFFICIENT_ACCESS): + org.replace("seeAlso", "cn=1") + else: + org.replace("seeAlso", "cn=1") + + +def test_user_can_access_the_data_only_in_the_afternoon(topo, add_user, aci_of_user): + """ + User can access the data only in the afternoon as per the ACI. + + :id: 63eb5b1c-7ac5-11e8-bd46-8c16451d917b + :setup: Standalone Server + :steps: + 1. Add test entry + 2. Add ACI + 3. User should follow ACI role + :expectedresults: + 1. Entry should be added + 2. Operation should succeed + 3. Operation should succeed + """ + # Add ACI + Domain(topo.standalone, + DEFAULT_SUFFIX).add("aci", f'(target = "ldap:///{TIMEOFDAY_OU_KEY}")' + f'(targetattr=*)(version 3.0; aci "Timeofday aci"; ' + f'allow(all) userdn = "ldap:///{NIGHTWORKER_KEY}" ' + f'and timeofday > \'1200\' ;)') + + # create a new connection for the test + conn = UserAccount(topo.standalone, NIGHTWORKER_KEY).bind(PW_DM) + # Perform Operation + org = OrganizationalUnit(conn, TIMEOFDAY_OU_KEY) + if datetime.now().hour < 12: + with pytest.raises(ldap.INSUFFICIENT_ACCESS): + org.replace("seeAlso", "cn=1") + else: + org.replace("seeAlso", "cn=1") + + +def test_timeofday_keyword(topo, add_user, aci_of_user): + """ + User NOWORKER_KEY can access the data as per the ACI after removing + ACI it cant. + + :id: 681dd58e-7ac5-11e8-bed1-8c16451d917b + :setup: Standalone Server + :steps: + 1. Add test entry + 2. Add ACI + 3. User should follow ACI role + :expectedresults: + 1. Entry should be added + 2. Operation should succeed + 3. Operation should succeed + """ + now = time.strftime("%c") + now_1 = "".join(now.split()[3].split(":"))[:4] + # Add ACI + domain = Domain(topo.standalone, DEFAULT_SUFFIX) + domain.add("aci", f'(target = "ldap:///{TIMEOFDAY_OU_KEY}")' + f'(targetattr=*)(version 3.0; aci "Timeofday aci"; ' + f'allow(all) userdn = "ldap:///{NOWORKER_KEY}" ' + f'and timeofday = \'{now_1}\' ;)') + + # Create a new connection for this test. + conn = UserAccount(topo.standalone, NOWORKER_KEY).bind(PW_DM) + # Perform Operation + org = OrganizationalUnit(conn, TIMEOFDAY_OU_KEY) + org.replace("seeAlso", "cn=1") + # Remove ACI + aci = domain.get_attr_vals_utf8('aci')[-1] + domain.ensure_removed('aci', aci) + assert aci not in domain.get_attr_vals_utf8('aci') + # after removing the ACI user cannot access the data + time.sleep(1) + with pytest.raises(ldap.INSUFFICIENT_ACCESS): + org.replace("seeAlso", "cn=1") + + +def test_dayofweek_keyword_test_everyday_can_access(topo, add_user, aci_of_user): + """ + User can access the data EVERYDAY_KEY as per the ACI. + + :id: 6c5922ca-7ac5-11e8-8f01-8c16451d917b + :setup: Standalone Server + :steps: + 1. Add test entry + 2. Add ACI + 3. User should follow ACI role + :expectedresults: + 1. Entry should be added + 2. Operation should succeed + 3. Operation should succeed + """ + # Add ACI + Domain(topo.standalone, + DEFAULT_SUFFIX).add("aci", f'(target = "ldap:///{DAYOFWEEK_OU_KEY}")' + f'(targetattr=*)(version 3.0; aci "Dayofweek aci"; ' + f'allow(all) userdn = "ldap:///{EVERYDAY_KEY}" and ' + f'dayofweek = "Sun, Mon, Tue, Wed, Thu, Fri, Sat" ;)') + + # Create a new connection for this test. + conn = UserAccount(topo.standalone, EVERYDAY_KEY).bind(PW_DM) + # Perform Operation + org = OrganizationalUnit(conn, DAYOFWEEK_OU_KEY) + org.replace("seeAlso", "cn=1") + + +def test_dayofweek_keyword_today_can_access(topo, add_user, aci_of_user): + """ + User can access the data one day per week as per the ACI. + + :id: 7131dc88-7ac5-11e8-acc2-8c16451d917b + :setup: Standalone Server + :steps: + 1. Add test entry + 2. Add ACI + 3. User should follow ACI role + :expectedresults: + 1. Entry should be added + 2. Operation should succeed + 3. Operation should succeed + """ + today_1 = time.strftime("%c").split()[0] + # Add ACI + Domain(topo.standalone, + DEFAULT_SUFFIX).add("aci", f'(target = "ldap:///{DAYOFWEEK_OU_KEY}")' + f'(targetattr=*)(version 3.0; aci "Dayofweek aci"; ' + f'allow(all) userdn = "ldap:///{TODAY_KEY}" ' + f'and dayofweek = \'{today_1}\' ;)') + + # Create a new connection for this test. + conn = UserAccount(topo.standalone, TODAY_KEY).bind(PW_DM) + # Perform Operation + org = OrganizationalUnit(conn, DAYOFWEEK_OU_KEY) + org.replace("seeAlso", "cn=1") + + +def test_user_cannot_access_the_data_at_all(topo, add_user, aci_of_user): + """ + User cannot access the data at all as per the ACI. + + :id: 75cdac5e-7ac5-11e8-968a-8c16451d917b + :setup: Standalone Server + :steps: + 1. Add test entry + 2. Add ACI + 3. User should follow ACI role + :expectedresults: + 1. Entry should be added + 2. Operation should succeed + 3. Operation should succeed + """ + # Add ACI + Domain(topo.standalone, + DEFAULT_SUFFIX).add("aci", f'(target = "ldap:///{DAYOFWEEK_OU_KEY}")' + f'(targetattr=*)(version 3.0; aci "Dayofweek aci"; ' + f'allow(all) userdn = "ldap:///{TODAY_KEY}" ' + f'and dayofweek = "$NEW_DATE" ;)') + + # Create a new connection for this test. + conn = UserAccount(topo.standalone, NODAY_KEY).bind(PW_DM) + # Perform Operation + org = OrganizationalUnit(conn, DAYOFWEEK_OU_KEY) + with pytest.raises(ldap.INSUFFICIENT_ACCESS): + org.replace("seeAlso", "cn=1") + + +if __name__ == "__main__": + CURRENT_FILE = os.path.realpath(__file__) + pytest.main("-s -v %s" % CURRENT_FILE) diff --git a/dirsrvtests/tests/suites/acl/keywords_test.py b/dirsrvtests/tests/suites/acl/keywords_test.py new file mode 100644 index 0000000..138e3ed --- /dev/null +++ b/dirsrvtests/tests/suites/acl/keywords_test.py @@ -0,0 +1,467 @@ +# --- BEGIN COPYRIGHT BLOCK --- +# Copyright (C) 2019 Red Hat, Inc. +# All rights reserved. +# +# License: GPL (version 3 or any later version). +# See LICENSE for details. +# --- END COPYRIGHT BLOCK ---- + +""" +This test script will test wrong/correct key value with ACIs. +""" + +import os +import socket +import pytest + +from lib389.idm.account import Anonymous +from lib389._constants import DEFAULT_SUFFIX, PW_DM +from lib389.idm.domain import Domain +from lib389.idm.organizationalunit import OrganizationalUnit +from lib389.idm.user import UserAccount + +import ldap + +pytestmark = pytest.mark.tier1 + +KEYWORDS_OU_KEY = "ou=Keywords,{}".format(DEFAULT_SUFFIX) +DNS_OU_KEY = "ou=DNS,{}".format(KEYWORDS_OU_KEY) +IP_OU_KEY = "ou=IP,{}".format(KEYWORDS_OU_KEY) +FULLIP_KEY = "uid=FULLIP_KEY,{}".format(IP_OU_KEY) +AUTHMETHOD_OU_KEY = "ou=Authmethod,{}".format(KEYWORDS_OU_KEY) +SIMPLE_1_KEY = "uid=SIMPLE_1_KEY,{}".format(AUTHMETHOD_OU_KEY) +FULLDNS_KEY = "uid=FULLDNS_KEY,{}".format(DNS_OU_KEY) +SUNDNS_KEY = "uid=SUNDNS_KEY,{}".format(DNS_OU_KEY) +NODNS_KEY = "uid=NODNS_KEY,{}".format(DNS_OU_KEY) +NETSCAPEDNS_KEY = "uid=NETSCAPEDNS_KEY,{}".format(DNS_OU_KEY) +NONE_1_KEY = "uid=NONE_1_KEY,{}".format(AUTHMETHOD_OU_KEY) +NONE_2_KEY = "uid=NONE_2_KEY,{}".format(AUTHMETHOD_OU_KEY) + + +NONE_ACI_KEY = f'(target = "ldap:///{AUTHMETHOD_OU_KEY}")' \ + f'(targetattr=*)(version 3.0; aci "Authmethod aci"; ' \ + f'allow(all) userdn = "ldap:///{NONE_1_KEY}" and authmethod = "none" ;)' + +SIMPLE_ACI_KEY = f'(target = "ldap:///{AUTHMETHOD_OU_KEY}")' \ + f'(targetattr=*)(version 3.0; aci "Authmethod aci"; ' \ + f'allow(all) userdn = "ldap:///{SIMPLE_1_KEY}" and authmethod = "simple" ;)' + + +def _add_aci(topo, name): + """ + This function will add ACI to DEFAULT_SUFFIX + """ + Domain(topo.standalone, DEFAULT_SUFFIX).add("aci", name) + + +def test_user_binds_with_a_password_and_can_access_the_data(topo, add_user, aci_of_user): + """ + User binds with a password and can access the data as per the ACI. + + :id: f6c4b6f0-7ac4-11e8-a517-8c16451d917b + :setup: Standalone Server + :steps: + 1. Add test entry + 2. Add ACI + 3. User should follow ACI role + :expectedresults: + 1. Entry should be added + 2. Operation should succeed + 3. Operation should succeed + """ + # Add ACI + _add_aci(topo, NONE_ACI_KEY) + # Create a new connection for this test. + conn = UserAccount(topo.standalone, NONE_1_KEY).bind(PW_DM) + # Perform Operation + OrganizationalUnit(conn, AUTHMETHOD_OU_KEY).replace("seeAlso", "cn=1") + + +def test_user_binds_with_a_bad_password_and_cannot_access_the_data(topo, add_user, aci_of_user): + """ + User binds with a BAD password and cannot access the data . + + :id: 0397744e-7ac5-11e8-bfb1-8c16451d917b + :setup: Standalone Server + :steps: + 1. Add test entry + 2. Add ACI + 3. User should follow ACI role + :expectedresults: + 1. Entry should be added + 2. Operation should succeed + 3. Operation should succeed + """ + # User binds with a bad password and cannot access the data + with pytest.raises(ldap.UNWILLING_TO_PERFORM): + UserAccount(topo.standalone, NONE_1_KEY).bind("") + + +def test_anonymous_user_cannot_access_the_data(topo, add_user, aci_of_user): + """ + Anonymous user cannot access the data + + :id: 0821a55c-7ac5-11e8-b214-8c16451d917b + :setup: Standalone Server + :steps: + 1. Add test entry + 2. Add ACI + 3. User should follow ACI role + :expectedresults: + 1. Entry should be added + 2. Operation should succeed + 3. Operation should succeed + """ + # Add ACI + _add_aci(topo, NONE_ACI_KEY) + + # Create a new connection for this test. + conn = Anonymous(topo.standalone).bind() + # Perform Operation + org = OrganizationalUnit(conn, AUTHMETHOD_OU_KEY) + with pytest.raises(ldap.INSUFFICIENT_ACCESS): + org.replace("seeAlso", "cn=1") + + +def test_authenticated_but_has_no_rigth_on_the_data(topo, add_user, aci_of_user): + """ + User has a password. He is authenticated but has no rigth on the data. + + :id: 11be7ebe-7ac5-11e8-b754-8c16451d917b + :setup: Standalone Server + :steps: + 1. Add test entry + 2. Add ACI + 3. User should follow ACI role + :expectedresults: + 1. Entry should be added + 2. Operation should succeed + 3. Operation should succeed + """ + # Add ACI + _add_aci(topo, NONE_ACI_KEY) + + # Create a new connection for this test. + conn = UserAccount(topo.standalone, SIMPLE_1_KEY).bind(PW_DM) + # Perform Operation + org = OrganizationalUnit(conn, AUTHMETHOD_OU_KEY) + with pytest.raises(ldap.INSUFFICIENT_ACCESS): + org.replace("seeAlso", "cn=1") + + +def test_the_bind_client_is_accessing_the_directory(topo, add_user, aci_of_user): + """ + The bind rule is evaluated to be true if the client is accessing the directory as per the ACI. + + :id: 1715bfb2-7ac5-11e8-8f2c-8c16451d917b + :setup: Standalone Server + :steps: + 1. Add test entry + 2. Add ACI + 3. User should follow ACI role + :expectedresults: + 1. Entry should be added + 2. Operation should succeed + 3. Operation should succeed + """ + # Add ACI + _add_aci(topo, SIMPLE_ACI_KEY) + + # Create a new connection for this test. + conn = UserAccount(topo.standalone, SIMPLE_1_KEY).bind(PW_DM) + # Perform Operation + OrganizationalUnit(conn, AUTHMETHOD_OU_KEY).replace("seeAlso", "cn=1") + + +def test_users_binds_with_a_password_and_can_access_the_data( + topo, add_user, aci_of_user): + """ + User binds with a password and can access the data as per the ACI. + + :id: 1bd01cb4-7ac5-11e8-a2f1-8c16451d917b + :setup: Standalone Server + :steps: + 1. Add test entry + 2. Add ACI + 3. User should follow ACI role + :expectedresults: + 1. Entry should be added + 2. Operation should succeed + 3. Operation should succeed + """ + # Add ACI + _add_aci(topo, SIMPLE_ACI_KEY) + + # Create a new connection for this test. + conn = UserAccount(topo.standalone, SIMPLE_1_KEY).bind(PW_DM) + # Perform Operation + OrganizationalUnit(conn, AUTHMETHOD_OU_KEY).replace("seeAlso", "cn=1") + + +def test_user_binds_without_any_password_and_cannot_access_the_data(topo, add_user, aci_of_user): + """ + User binds without any password and cannot access the data + + :id: 205777fa-7ac5-11e8-ba2f-8c16451d917b + :setup: Standalone Server + :steps: + 1. Add test entry + 2. Add ACI + 3. User should follow ACI role + :expectedresults: + 1. Entry should be added + 2. Operation should succeed + 3. Operation should succeed + """ + # Add ACI + _add_aci(topo, SIMPLE_ACI_KEY) + + # Create a new connection for this test. + conn = Anonymous(topo.standalone).bind() + # Perform Operation + org = OrganizationalUnit(conn, AUTHMETHOD_OU_KEY) + with pytest.raises(ldap.INSUFFICIENT_ACCESS): + org.replace("seeAlso", "cn=1") + + +def test_user_can_access_the_data_when_connecting_from_any_machine( + topo, add_user, aci_of_user +): + """ + User can access the data when connecting from any machine as per the ACI. + + :id: 28cbc008-7ac5-11e8-934e-8c16451d917b + :setup: Standalone Server + :steps: + 1. Add test entry + 2. Add ACI + 3. User should follow ACI role + :expectedresults: + 1. Entry should be added + 2. Operation should succeed + 3. Operation should succeed + """ + # Add ACI + Domain(topo.standalone, DEFAULT_SUFFIX)\ + .add("aci", f'(target ="ldap:///{DNS_OU_KEY}")' + f'(targetattr=*)(version 3.0; aci "DNS aci"; allow(all) ' + f'userdn = "ldap:///{FULLDNS_KEY}" and dns = "*" ;)') + + # Create a new connection for this test. + conn = UserAccount(topo.standalone, FULLDNS_KEY).bind(PW_DM) + # Perform Operation + OrganizationalUnit(conn, DNS_OU_KEY).replace("seeAlso", "cn=1") + + +def test_user_can_access_the_data_when_connecting_from_internal_ds_network_only( + topo, add_user, aci_of_user +): + """ + User can access the data when connecting from internal ICNC network only as per the ACI. + :id: 2cac2136-7ac5-11e8-8328-8c16451d917b + :setup: Standalone Server + :steps: + 1. Add test entry + 2. Add ACI + 3. User should follow ACI role + :expectedresults: + 1. Entry should be added + 2. Operation should succeed + 3. Operation should succeed + """ + dns_name = socket.getfqdn() + # Add ACI + Domain(topo.standalone, DEFAULT_SUFFIX).\ + add("aci", [f'(target = "ldap:///{DNS_OU_KEY}")' + f'(targetattr=*)(version 3.0; aci "DNS aci"; ' + f'allow(all) userdn = "ldap:///{SUNDNS_KEY}" and dns = "*redhat.com" ;)', + f'(target = "ldap:///{DNS_OU_KEY}")(targetattr=*)' + f'(version 3.0; aci "DNS aci"; allow(all) ' + f'userdn = "ldap:///{SUNDNS_KEY}" and dns = "{dns_name}" ;)']) + + # Create a new connection for this test. + conn = UserAccount(topo.standalone, SUNDNS_KEY).bind(PW_DM) + # Perform Operation + OrganizationalUnit(conn, DNS_OU_KEY).replace("seeAlso", "cn=1") + + +def test_user_can_access_the_data_when_connecting_from_some_network_only( + topo, add_user, aci_of_user +): + """ + User can access the data when connecting from some network only as per the ACI. + + :id: 3098512a-7ac5-11e8-af85-8c16451d917b + :setup: Standalone Server + :steps: + 1. Add test entry + 2. Add ACI + 3. User should follow ACI role + :expectedresults: + 1. Entry should be added + 2. Operation should succeed + 3. Operation should succeed + """ + dns_name = socket.getfqdn() + # Add ACI + Domain(topo.standalone, DEFAULT_SUFFIX)\ + .add("aci", f'(target = "ldap:///{DNS_OU_KEY}")' + f'(targetattr=*)(version 3.0; aci "DNS aci"; allow(all) ' + f'userdn = "ldap:///{NETSCAPEDNS_KEY}" ' + f'and dns = "{dns_name}" ;)') + + # Create a new connection for this test. + conn = UserAccount(topo.standalone, NETSCAPEDNS_KEY).bind(PW_DM) + # Perform Operation + OrganizationalUnit(conn, DNS_OU_KEY).replace("seeAlso", "cn=1") + + +def test_from_an_unauthorized_network(topo, add_user, aci_of_user): + """ + User cannot access the data when connecting from an unauthorized network as per the ACI. + + :id: 34cf9726-7ac5-11e8-bc12-8c16451d917b + :setup: Standalone Server + :steps: + 1. Add test entry + 2. Add ACI + 3. User should follow ACI role + :expectedresults: + 1. Entry should be added + 2. Operation should succeed + 3. Operation should succeed + """ + # Add ACI + Domain(topo.standalone, DEFAULT_SUFFIX).\ + add("aci", f'(target = "ldap:///{DNS_OU_KEY}")' + f'(targetattr=*)(version 3.0; aci "DNS aci"; allow(all) ' + f'userdn = "ldap:///{NETSCAPEDNS_KEY}" and dns != "red.iplanet.com" ;)') + + # Create a new connection for this test. + conn = UserAccount(topo.standalone, NETSCAPEDNS_KEY).bind(PW_DM) + # Perform Operation + OrganizationalUnit(conn, DNS_OU_KEY).replace("seeAlso", "cn=1") + + +def test_user_cannot_access_the_data_when_connecting_from_an_unauthorized_network_2( + topo, add_user, aci_of_user): + """ + User cannot access the data when connecting from an unauthorized network as per the ACI. + + :id: 396bdd44-7ac5-11e8-8014-8c16451d917b + :setup: Standalone Server + :steps: + 1. Add test entry + 2. Add ACI + 3. User should follow ACI role + :expectedresults: + 1. Entry should be added + 2. Operation should succeed + 3. Operation should succeed + """ + # Add ACI + Domain(topo.standalone, DEFAULT_SUFFIX).\ + add("aci", f'(target = "ldap:///{DNS_OU_KEY}")' + f'(targetattr=*)(version 3.0; aci "DNS aci"; allow(all) ' + f'userdn = "ldap:///{NETSCAPEDNS_KEY}" ' + f'and dnsalias != "www.redhat.com" ;)') + + # Create a new connection for this test. + conn = UserAccount(topo.standalone, NETSCAPEDNS_KEY).bind(PW_DM) + # Perform Operation + OrganizationalUnit(conn, DNS_OU_KEY).replace("seeAlso", "cn=1") + + +def test_user_cannot_access_the_data_if_not_from_a_certain_domain(topo, add_user, aci_of_user): + """ + User cannot access the data if not from a certain domain as per the ACI. + :id: 3d658972-7ac5-11e8-930f-8c16451d917b + :setup: Standalone Server + :steps: + 1. Add test entry + 2. Add ACI + 3. User should follow ACI role + :expectedresults: + 1. Entry should be added + 2. Operation should succeed + 3. Operation should succeed + """ + # Add ACI + Domain(topo.standalone, DEFAULT_SUFFIX).\ + add("aci", f'(target = "ldap:///{DNS_OU_KEY}")(targetattr=*)' + f'(version 3.0; aci "DNS aci"; allow(all) ' + f'userdn = "ldap:///{NODNS_KEY}" ' + f'and dns = "RAP.rock.SALSA.house.COM" ;)') + + # Create a new connection for this test. + conn = UserAccount(topo.standalone, NODNS_KEY).bind(PW_DM) + # Perform Operation + org = OrganizationalUnit(conn, AUTHMETHOD_OU_KEY) + with pytest.raises(ldap.INSUFFICIENT_ACCESS): + org.replace("seeAlso", "cn=1") + + +def test_dnsalias_keyword_test_nodns_cannot(topo, add_user, aci_of_user): + """ + Dnsalias Keyword NODNS_KEY cannot assess data as per the ACI. + + :id: 41b467be-7ac5-11e8-89a3-8c16451d917b + :setup: Standalone Server + :steps: + 1. Add test entry + 2. Add ACI + 3. User should follow ACI role + :expectedresults: + 1. Entry should be added + 2. Operation should succeed + 3. Operation should succeed + """ + # Add ACI + Domain(topo.standalone, DEFAULT_SUFFIX).\ + add("aci", f'(target = "ldap:///{DNS_OU_KEY}")(targetattr=*)' + f'(version 3.0; aci "DNS aci"; allow(all) ' + f'userdn = "ldap:///{NODNS_KEY}" and ' + f'dnsalias = "RAP.rock.SALSA.house.COM" ;)') + + # Create a new connection for this test. + conn = UserAccount(topo.standalone, NODNS_KEY).bind(PW_DM) + # Perform Operation + org = OrganizationalUnit(conn, DNS_OU_KEY) + with pytest.raises(ldap.INSUFFICIENT_ACCESS): + org.replace("seeAlso", "cn=1") + +@pytest.mark.ds50378 +@pytest.mark.bz1710848 +@pytest.mark.parametrize("ip_addr", ['127.0.0.1', "[::1]"]) +def test_user_can_access_from_ipv4_or_ipv6_address(topo, add_user, aci_of_user, ip_addr): + """ + User can modify the data when accessing the server from the allowed IPv4 and IPv6 addresses + + :id: 461e761e-7ac5-11e8-9ae4-8c16451d917b + :parametrized: yes + :setup: Standalone Server + :steps: + 1. Add ACI that has both IPv4 and IPv6 + 2. Connect from one of the IPs allowed in ACI + 3. Modify an attribute + :expectedresults: + 1. ACI should be added + 2. Conection should be successful + 3. Operation should be successful + """ + # Add ACI that contains both IPv4 and IPv6 + Domain(topo.standalone, DEFAULT_SUFFIX).\ + add("aci", f'(target ="ldap:///{IP_OU_KEY}")(targetattr=*) ' + f'(version 3.0; aci "IP aci"; allow(all) ' + f'userdn = "ldap:///{FULLIP_KEY}" and (ip = "127.0.0.1" or ip = "::1");)') + + # Create a new connection for this test. + conn = UserAccount(topo.standalone, FULLIP_KEY).bind(PW_DM, uri=f'ldap://{ip_addr}:{topo.standalone.port}') + + # Perform Operation + OrganizationalUnit(conn, IP_OU_KEY).replace("seeAlso", "cn=1") + + +if __name__ == "__main__": + CURRENT_FILE = os.path.realpath(__file__) + pytest.main("-s -v %s" % CURRENT_FILE) diff --git a/dirsrvtests/tests/suites/acl/misc_test.py b/dirsrvtests/tests/suites/acl/misc_test.py new file mode 100644 index 0000000..8f122b7 --- /dev/null +++ b/dirsrvtests/tests/suites/acl/misc_test.py @@ -0,0 +1,414 @@ +""" +# --- BEGIN COPYRIGHT BLOCK --- +# Copyright (C) 2019 RED Hat, Inc. +# All rights reserved. +# +# License: GPL (version 3 or any later version). +# See LICENSE for details. +# --- END COPYRIGHT BLOCK ---- +""" + +import os +import pytest + +from lib389._constants import DEFAULT_SUFFIX, PW_DM +from lib389.idm.user import UserAccount, UserAccounts +from lib389._mapped_object import DSLdapObject +from lib389.idm.account import Accounts, Anonymous +from lib389.idm.organizationalunit import OrganizationalUnit, OrganizationalUnits +from lib389.idm.group import Group, Groups +from lib389.topologies import topology_st as topo +from lib389.idm.domain import Domain +from lib389.plugins import ACLPlugin + +import ldap + +pytestmark = pytest.mark.tier1 + +PEOPLE = "ou=PEOPLE,{}".format(DEFAULT_SUFFIX) +DYNGROUP = "cn=DYNGROUP,{}".format(PEOPLE) +CONTAINER_1_DELADD = "ou=Product Development,{}".format(DEFAULT_SUFFIX) +CONTAINER_2_DELADD = "ou=Accounting,{}".format(DEFAULT_SUFFIX) + + +@pytest.fixture(scope="function") +def aci_of_user(request, topo): + """ + :param request: + :param topo: + """ + aci_list = Domain(topo.standalone, DEFAULT_SUFFIX).get_attr_vals('aci') + + def finofaci(): + """ + Removes and Restores ACIs after the test. + """ + domain = Domain(topo.standalone, DEFAULT_SUFFIX) + domain.remove_all('aci') + for i in aci_list: + domain.add("aci", i) + + request.addfinalizer(finofaci) + + +@pytest.fixture(scope="function") +def clean(request, topo): + """ + :param request: + :param topo: + """ + ous = OrganizationalUnits(topo.standalone, DEFAULT_SUFFIX) + try: + for i in ['Product Development', 'Accounting']: + ous.create(properties={'ou': i}) + except ldap.ALREADY_EXISTS as eoor_eoor: + topo.standalone.log.info("Exception (expected): %s" % type(eoor_eoor).__name__) + + def fin(): + """ + Deletes entries after the test. + """ + for scope_scope in [CONTAINER_1_DELADD, CONTAINER_2_DELADD, PEOPLE]: + try: + DSLdapObject(topo.standalone, scope_scope).delete() + except ldap.ALREADY_EXISTS as eoor_eoor: + topo.standalone.log.info("Exception (expected): %s" % type(eoor_eoor).__name__) + + request.addfinalizer(fin) + + +def test_accept_aci_in_addition_to_acl(topo, clean, aci_of_user): + """ + Misc Test 2 accept aci in addition to acl + :id: 8e9408fa-7db8-11e8-adaa-8c16451d917b + :setup: Standalone Instance + :steps: + 1. Add test entry + 2. Add ACI + 3. User should follow ACI role + :expectedresults: + 1. Entry should be added + 2. Operation should succeed + 3. Operation should succeed + """ + uas = UserAccounts(topo.standalone, DEFAULT_SUFFIX, rdn='ou=product development') + user = uas.create_test_user() + for i in [('mail', 'anujborah@okok.com'), ('givenname', 'Anuj'), ('userPassword', PW_DM)]: + user.set(i[0], i[1]) + + aci_target = "(targetattr=givenname)" + aci_allow = ('(version 3.0; acl "Name of the ACI"; deny (read, search, compare, write)') + aci_subject = 'userdn="ldap:///anyone";)' + Domain(topo.standalone, CONTAINER_1_DELADD).add("aci", aci_target + aci_allow + aci_subject) + + conn = Anonymous(topo.standalone).bind() + # aci will block targetattr=givenname to anyone + user = UserAccount(conn, user.dn) + with pytest.raises(AssertionError): + assert user.get_attr_val_utf8('givenname') == 'Anuj' + # aci will allow targetattr=uid to anyone + assert user.get_attr_val_utf8('uid') == 'test_user_1000' + + for i in uas.list(): + i.delete() + + +@pytest.mark.bz334451 +def test_more_then_40_acl_will_crash_slapd(topo, clean, aci_of_user): + """ + bug 334451 : more then 40 acl will crash slapd + superseded by Bug 772778 - acl cache overflown problem with > 200 acis + :id: 93a44c60-7db8-11e8-9439-8c16451d917b + :setup: Standalone Instance + :steps: + 1. Add test entry + 2. Add ACI + 3. User should follow ACI role + :expectedresults: + 1. Entry should be added + 2. Operation should succeed + 3. Operation should succeed + """ + uas = UserAccounts(topo.standalone, DEFAULT_SUFFIX, rdn='ou=Accounting') + user = uas.create_test_user() + + aci_target = '(target ="ldap:///{}")(targetattr !="userPassword")'.format(CONTAINER_1_DELADD) + # more_then_40_acl_will not crash_slapd + for i in range(40): + aci_allow = '(version 3.0;acl "ACI_{}";allow (read, search, compare)'.format(i) + aci_subject = 'userdn="ldap:///anyone";)' + aci_body = aci_target + aci_allow + aci_subject + Domain(topo.standalone, CONTAINER_1_DELADD).add("aci", aci_body) + conn = Anonymous(topo.standalone).bind() + assert UserAccount(conn, user.dn).get_attr_val_utf8('uid') == 'test_user_1000' + + for i in uas.list(): + i.delete() + +@pytest.mark.bz345643 +def test_search_access_should_not_include_read_access(topo, clean, aci_of_user): + """ + bug 345643 + Misc Test 4 search access should not include read access + :id: 98ab173e-7db8-11e8-a309-8c16451d917b + :setup: Standalone Instance + :steps: + 1. Add test entry + 2. Add ACI + 3. User should follow ACI role + :expectedresults: + 1. Entry should be added + 2. Operation should succeed + 3. Operation should succeed + """ + assert Domain(topo.standalone, DEFAULT_SUFFIX).present('aci') + Domain(topo.standalone, DEFAULT_SUFFIX)\ + .add("aci", [f'(target ="ldap:///{DEFAULT_SUFFIX}")(targetattr !="userPassword")' + '(version 3.0;acl "anonymous access";allow (search)' + '(userdn = "ldap:///anyone");)', + f'(target="ldap:///{DEFAULT_SUFFIX}") (targetattr = "*")(version 3.0; ' + 'acl "allow self write";allow(write) ' + 'userdn = "ldap:///self";)', + f'(target="ldap:///{DEFAULT_SUFFIX}") (targetattr = "*")(version 3.0; ' + 'acl "Allow all admin group"; allow(all) groupdn = "ldap:///cn=Directory ' + 'Administrators, {}";)']) + + conn = Anonymous(topo.standalone).bind() + # search_access_should_not_include_read_access + suffix = Domain(conn, DEFAULT_SUFFIX) + with pytest.raises(AssertionError): + assert suffix.present('aci') + + +def test_only_allow_some_targetattr(topo, clean, aci_of_user): + """ + Misc Test 5 only allow some targetattr (1/2) + :id: 9d27f048-7db8-11e8-a71c-8c16451d917b + :setup: Standalone Instance + :steps: + 1. Add test entry + 2. Add ACI + 3. User should follow ACI role + :expectedresults: + 1. Entry should be added + 2. Operation should succeed + 3. Operation should succeed + """ + + uas = UserAccounts(topo.standalone, DEFAULT_SUFFIX, rdn=None) + for i in range(1, 3): + user = uas.create_test_user(uid=i, gid=i) + user.replace_many(('cn', 'Anuj1'), ('mail', 'annandaBorah@anuj.com')) + + Domain(topo.standalone, DEFAULT_SUFFIX).\ + replace("aci", '(target="ldap:///{}")(targetattr="mail||objectClass")' + '(version 3.0; acl "Test";allow (read,search,compare) ' + '(userdn = "ldap:///anyone"); )'.format(DEFAULT_SUFFIX)) + + conn = Anonymous(topo.standalone).bind() + accounts = Accounts(conn, DEFAULT_SUFFIX) + + # aci will allow only mail targetattr + assert len(accounts.filter('(mail=*)')) == 2 + # aci will allow only mail targetattr + assert not accounts.filter('(cn=*)') + # with root no , blockage + assert len(Accounts(topo.standalone, DEFAULT_SUFFIX).filter('(uid=*)')) == 2 + + for i in uas.list(): + i.delete() + + +def test_only_allow_some_targetattr_two(topo, clean, aci_of_user): + """ + Misc Test 6 only allow some targetattr (2/2)" + :id: a188239c-7db8-11e8-903e-8c16451d917b + :setup: Standalone Instance + :steps: + 1. Add test entry + 2. Add ACI + 3. User should follow ACI role + :expectedresults: + 1. Entry should be added + 2. Operation should succeed + 3. Operation should succeed + """ + uas = UserAccounts(topo.standalone, DEFAULT_SUFFIX, rdn=None) + for i in range(5): + user = uas.create_test_user(uid=i, gid=i) + user.replace_many(('mail', 'anujborah@anujborah.com'), + ('cn', 'Anuj'), ('userPassword', PW_DM)) + + user1 = uas.create_test_user() + user1.replace_many(('mail', 'anujborah@anujborah.com'), ('userPassword', PW_DM)) + + Domain(topo.standalone, DEFAULT_SUFFIX).\ + replace("aci", '(target="ldap:///{}") (targetattr="mail||objectClass")' + '(targetfilter="cn=Anuj") (version 3.0; acl "$tet_thistest"; ' + 'allow (compare,read,search) ' + '(userdn = "ldap:///anyone"); )'.format(DEFAULT_SUFFIX)) + + conn = UserAccount(topo.standalone, user.dn).bind(PW_DM) + # aci will allow only mail targetattr but only for cn=Anuj + account = Accounts(conn, DEFAULT_SUFFIX) + assert len(account.filter('(mail=*)')) == 5 + assert not account.filter('(cn=*)') + + for i in account.filter('(mail=*)'): + assert i.get_attr_val_utf8('mail') == 'anujborah@anujborah.com' + + + conn = Anonymous(topo.standalone).bind() + # aci will allow only mail targetattr but only for cn=Anuj + account = Accounts(conn, DEFAULT_SUFFIX) + assert len(account.filter('(mail=*)')) == 5 + assert not account.filter('(cn=*)') + + for i in account.filter('(mail=*)'): + assert i.get_attr_val_utf8('mail') == 'anujborah@anujborah.com' + + # with root no blockage + assert len(Accounts(topo.standalone, DEFAULT_SUFFIX).filter('(mail=*)')) == 6 + + for i in uas.list(): + i.delete() + + + +@pytest.mark.bz326000 +def test_memberurl_needs_to_be_normalized(topo, clean, aci_of_user): + """ + Non-regression test for BUG 326000: MemberURL needs to be normalized + :id: a5d172e6-7db8-11e8-aca7-8c16451d917b + :setup: Standalone Instance + :steps: + 1. Add test entry + 2. Add ACI + 3. User should follow ACI role + :expectedresults: + 1. Entry should be added + 2. Operation should succeed + 3. Operation should succeed + """ + ou_ou = OrganizationalUnit(topo.standalone, "ou=PEOPLE,{}".format(DEFAULT_SUFFIX)) + ou_ou.set('aci', '(targetattr= *)' + '(version 3.0; acl "tester"; allow(all) ' + 'groupdn = "ldap:///cn =DYNGROUP,ou=PEOPLE, {}";)'.format(DEFAULT_SUFFIX)) + + groups = Groups(topo.standalone, DEFAULT_SUFFIX, rdn='ou=PEOPLE') + groups.create(properties={"cn": "DYNGROUP", + "description": "DYNGROUP", + 'objectClass': 'groupOfURLS', + 'memberURL': "ldap:///ou=PEOPLE,{}??sub?" + "(uid=test_user_2)".format(DEFAULT_SUFFIX)}) + + uas = UserAccounts(topo.standalone, DEFAULT_SUFFIX) + for demo1 in [(1, "Entry to test rights on."), (2, "Member of DYNGROUP")]: + user = uas.create_test_user(uid=demo1[0], gid=demo1[0]) + user.replace_many(('description', demo1[1]), ('userPassword', PW_DM)) + + ##with normal aci + conn = UserAccount(topo.standalone, uas.list()[1].dn).bind(PW_DM) + harry = UserAccount(conn, uas.list()[1].dn) + harry.add('sn', 'FRED') + + ##with abnomal aci + dygrp = Group(topo.standalone, DYNGROUP) + dygrp.remove('memberurl', "ldap:///ou=PEOPLE,{}??sub?(uid=test_user_2)".format(DEFAULT_SUFFIX)) + dygrp.add('memberurl', "ldap:///ou=PEOPLE,{}??sub?(uid=tesT_UsEr_2)".format(DEFAULT_SUFFIX)) + harry.add('sn', 'Not FRED') + + for i in uas.list(): + i.delete() + +@pytest.mark.bz624370 +def test_greater_than_200_acls_can_be_created(topo, clean, aci_of_user): + """ + Misc 10, check that greater than 200 ACLs can be created. Bug 624370 + :id: ac020252-7db8-11e8-8652-8c16451d917b + :setup: Standalone Instance + :steps: + 1. Add test entry + 2. Add ACI + 3. User should follow ACI role + :expectedresults: + 1. Entry should be added + 2. Operation should succeed + 3. Operation should succeed + """ + # greater_than_200_acls_can_be_created + uas = UserAccounts(topo.standalone, DEFAULT_SUFFIX) + for i in range(200): + user = uas.create_test_user(uid=i, gid=i) + user.set('aci', '(targetattr = "description")' + '(version 3.0;acl "foo{}"; allow (read, search, compare)' + '(userdn="ldap:///anyone");)'.format(i)) + + assert user.\ + get_attr_val_utf8('aci') == '(targetattr = "description")' \ + '(version 3.0;acl "foo{}"; allow ' \ + '(read, search, compare)' \ + '(userdn="ldap:///anyone");)'.format(i) + for i in uas.list(): + i.delete() + + +@pytest.mark.bz624453 +def test_server_bahaves_properly_with_very_long_attribute_names(topo, clean, aci_of_user): + """ + Make sure the server bahaves properly with very long attribute names. Bug 624453. + :id: b0d31942-7db8-11e8-a833-8c16451d917b + :setup: Standalone Instance + :steps: + 1. Add test entry + 2. Add ACI + 3. User should follow ACI role + :expectedresults: + 1. Entry should be added + 2. Operation should succeed + 3. Operation should succeed + """ + users = UserAccounts(topo.standalone, DEFAULT_SUFFIX) + users.create_test_user() + users.list()[0].set('userpassword', PW_DM) + + user = UserAccount(topo.standalone, 'uid=test_user_1000,ou=People,{}'.format(DEFAULT_SUFFIX)) + with pytest.raises(ldap.INVALID_SYNTAX): + user.add("aci", "a" * 9000) + + +def test_do_bind_as_201_distinct_users(topo, clean, aci_of_user): + """ + Do bind as 201 distinct users + Increase the nsslapd-aclpb-max-selected-acls in cn=ACL Plugin,cn=plugins,cn=config + Restart the server + Do bind as 201 distinct users + :id: c0060532-7db8-11e8-a124-8c16451d917b + :setup: Standalone Instance + :steps: + 1. Add test entry + 2. Add ACI + 3. User should follow ACI role + :expectedresults: + 1. Entry should be added + 2. Operation should succeed + 3. Operation should succeed + """ + uas = UserAccounts(topo.standalone, DEFAULT_SUFFIX) + for i in range(50): + user = uas.create_test_user(uid=i, gid=i) + user.set('userPassword', PW_DM) + + for i in range(len(uas.list())): + uas.list()[i].bind(PW_DM) + + ACLPlugin(topo.standalone).replace("nsslapd-aclpb-max-selected-acls", '220') + topo.standalone.restart() + + for i in range(len(uas.list())): + uas.list()[i].bind(PW_DM) + + +if __name__ == "__main__": + CURRENT_FILE = os.path.realpath(__file__) + pytest.main("-s -v %s" % CURRENT_FILE) diff --git a/dirsrvtests/tests/suites/acl/modify_test.py b/dirsrvtests/tests/suites/acl/modify_test.py new file mode 100644 index 0000000..cf51e61 --- /dev/null +++ b/dirsrvtests/tests/suites/acl/modify_test.py @@ -0,0 +1,575 @@ +# --- BEGIN COPYRIGHT BLOCK --- +# Copyright (C) 2019 Red Hat, Inc. +# All rights reserved. +# +# License: GPL (version 3 or any later version). +# See LICENSE for details. +# --- END COPYRIGHT BLOCK ---- + + +import pytest, os, ldap +from lib389._constants import DEFAULT_SUFFIX, PW_DM +from lib389.idm.user import UserAccount +from lib389.idm.account import Anonymous +from lib389.idm.group import Group, UniqueGroup +from lib389.idm.organizationalunit import OrganizationalUnit +from lib389.idm.group import Groups +from lib389.topologies import topology_st as topo +from lib389.idm.domain import Domain + +pytestmark = pytest.mark.tier1 + +CONTAINER_1_DELADD = "ou=Product Development,{}".format(DEFAULT_SUFFIX) +CONTAINER_2_DELADD = "ou=Accounting,{}".format(DEFAULT_SUFFIX) +USER_DELADD = "cn=Jeff Vedder,{}".format(CONTAINER_1_DELADD) +USER_WITH_ACI_DELADD = "cn=Sam Carter,{}".format(CONTAINER_2_DELADD) +KIRSTENVAUGHAN = "cn=Kirsten Vaughan, ou=Human Resources, {}".format(DEFAULT_SUFFIX) +HUMAN_OU_GLOBAL = "ou=Human Resources,{}".format(DEFAULT_SUFFIX) + + +@pytest.fixture(scope="function") +def cleanup_tree(request, topo): + + def fin(): + for i in [USER_DELADD, USER_WITH_ACI_DELADD, KIRSTENVAUGHAN, CONTAINER_1_DELADD, CONTAINER_2_DELADD, HUMAN_OU_GLOBAL]: + try: + UserAccount(topo.standalone, i).delete() + except: + pass + + request.addfinalizer(fin) + + +@pytest.fixture(scope="function") +def aci_of_user(request, topo): + aci_list = Domain(topo.standalone, DEFAULT_SUFFIX).get_attr_vals('aci') + + def finofaci(): + domain = Domain(topo.standalone, DEFAULT_SUFFIX) + domain.set('aci', None) + for i in aci_list: + domain.add("aci", i) + + request.addfinalizer(finofaci) + + +def test_allow_write_access_to_targetattr_with_a_single_attribute( + topo, aci_of_user, cleanup_tree): + """ + Modify Test 1 Allow write access to targetattr with a single attribute + :id: 620d7b82-7abf-11e8-a4db-8c16451d917b + :setup: server + :steps: + 1. Add test entry + 2. Add ACI + 3. User should follow ACI role + :expectedresults: + 1. Entry should be added + 2. Operation should succeed + 3. Operation should succeed + """ + ACI_BODY = '(targetattr = "title")(version 3.0; acl "ACI NAME"; allow (write) (userdn = "ldap:///anyone") ;)' + Domain(topo.standalone, DEFAULT_SUFFIX).add("aci", ACI_BODY) + + ou = OrganizationalUnit(topo.standalone, "ou=Product Development,{}".format(DEFAULT_SUFFIX)) + ou.create(properties={'ou': 'Product Development'}) + + properties = { + 'uid': 'Jeff Vedder', + 'cn': 'Jeff Vedder', + 'sn': 'user', + 'uidNumber': '1000', + 'gidNumber': '2000', + 'homeDirectory': '/home/' + 'JeffVedder', + 'userPassword': PW_DM + } + user = UserAccount(topo.standalone, "cn=Jeff Vedder,ou=Product Development,{}".format(DEFAULT_SUFFIX)) + user.create(properties=properties) + + # Allow write access to targetattr with a single attribute + conn = Anonymous(topo.standalone).bind() + ua = UserAccount(conn, USER_DELADD) + ua.add("title", "Architect") + assert ua.get_attr_val('title') + ua.remove("title", "Architect") + + +def test_allow_write_access_to_targetattr_with_multiple_attibutes( + topo, aci_of_user, cleanup_tree): + """ + Modify Test 2 Allow write access to targetattr with multiple attibutes + :id: 6b9f05c6-7abf-11e8-9ba1-8c16451d917b + :setup: server + :steps: + 1. Add test entry + 2. Add ACI + 3. User should follow ACI role + :expectedresults: + 1. Entry should be added + 2. Operation should succeed + 3. Operation should succeed + """ + ACI_BODY = '(targetattr = "telephonenumber || roomnumber")(version 3.0; acl "ACI NAME"; allow (write) (userdn = "ldap:///anyone") ;)' + Domain(topo.standalone, DEFAULT_SUFFIX).add("aci", ACI_BODY) + + ou = OrganizationalUnit(topo.standalone, "ou=Product Development,{}".format(DEFAULT_SUFFIX)) + ou.create(properties={'ou': 'Product Development'}) + + properties = { + 'uid': 'Jeff Vedder', + 'cn': 'Jeff Vedder', + 'sn': 'user', + 'uidNumber': '1000', + 'gidNumber': '2000', + 'homeDirectory': '/home/' + 'JeffVedder', + 'userPassword': PW_DM + } + user = UserAccount(topo.standalone, "cn=Jeff Vedder,ou=Product Development,{}".format(DEFAULT_SUFFIX)) + user.create(properties=properties) + + # Allow write access to targetattr with multiple attibutes + conn = Anonymous(topo.standalone).bind() + ua = UserAccount(conn, USER_DELADD) + ua.add("telephonenumber", "+1 408 555 1212") + assert ua.get_attr_val('telephonenumber') + ua.add("roomnumber", "101") + assert ua.get_attr_val('roomnumber') + + +def test_allow_write_access_to_userdn_all(topo, aci_of_user, cleanup_tree): + """ + Modify Test 3 Allow write access to userdn 'all' + :id: 70c58818-7abf-11e8-afa1-8c16451d917b + :setup: server + :steps: + 1. Add test entry + 2. Add ACI + 3. User should follow ACI role + :expectedresults: + 1. Entry should be added + 2. Operation should succeed + 3. Operation should succeed + """ + ACI_BODY = '(targetattr = "*")(version 3.0; acl "ACI NAME"; allow (write) (userdn = "ldap:///all") ;)' + Domain(topo.standalone, DEFAULT_SUFFIX).add("aci", ACI_BODY) + + for i in ['Product Development', 'Accounting']: + ou = OrganizationalUnit(topo.standalone, "ou={},{}".format(i, DEFAULT_SUFFIX)) + ou.create(properties={'ou': i}) + + for i in ['Jeff Vedder,ou=Product Development', 'Sam Carter,ou=Accounting']: + properties = { + 'uid': i, + 'cn': i, + 'sn': 'user', + 'uidNumber': '1000', + 'gidNumber': '2000', + 'homeDirectory': '/home/' + i, + 'userPassword': PW_DM + } + user = UserAccount(topo.standalone, "cn={},{}".format(i, DEFAULT_SUFFIX)) + user.create(properties=properties) + + # Allow write access to userdn 'all' + conn = Anonymous(topo.standalone).bind() + with pytest.raises(ldap.INSUFFICIENT_ACCESS): + UserAccount(conn, USER_DELADD).add("title", "Architect") + conn = UserAccount(topo.standalone, USER_WITH_ACI_DELADD).bind(PW_DM) + UserAccount(conn, USER_DELADD).add("title", "Architect") + assert UserAccount(conn, USER_DELADD).get_attr_val('title') + + +def test_allow_write_access_to_userdn_with_wildcards_in_dn( + topo, aci_of_user, cleanup_tree): + """ + Modify Test 4 Allow write access to userdn with wildcards in DN + :id: 766c2312-7abf-11e8-b57d-8c16451d917b + :setup: server + :steps: + 1. Add test entry + 2. Add ACI + 3. User should follow ACI role + :expectedresults: + 1. Entry should be added + 2. Operation should succeed + 3. Operation should succeed + """ + ACI_BODY = '(targetattr = "*")(version 3.0; acl "ACI NAME"; allow (write)(userdn = "ldap:///cn=*, ou=Product Development,{}") ;)'.format(DEFAULT_SUFFIX) + Domain(topo.standalone, DEFAULT_SUFFIX).add("aci", ACI_BODY) + + ou = OrganizationalUnit(topo.standalone, "ou=Product Development,{}".format(DEFAULT_SUFFIX)) + ou.create(properties={'ou': 'Product Development'}) + + properties = { + 'uid': 'Jeff Vedder', + 'cn': 'Jeff Vedder', + 'sn': 'user', + 'uidNumber': '1000', + 'gidNumber': '2000', + 'homeDirectory': '/home/' + 'JeffVedder', + 'userPassword': PW_DM + } + user = UserAccount(topo.standalone, "cn=Jeff Vedder,ou=Product Development,{}".format(DEFAULT_SUFFIX)) + user.create(properties=properties) + + conn = UserAccount(topo.standalone, USER_DELADD).bind(PW_DM) + # Allow write access to userdn with wildcards in DN + ua = UserAccount(conn, USER_DELADD) + ua.add("title", "Architect") + assert ua.get_attr_val('title') + + +def test_allow_write_access_to_userdn_with_multiple_dns(topo, aci_of_user, cleanup_tree): + """ + Modify Test 5 Allow write access to userdn with multiple DNs + :id: 7aae760a-7abf-11e8-bc3a-8c16451d917b + :setup: server + :steps: + 1. Add test entry + 2. Add ACI + 3. User should follow ACI role + :expectedresults: + 1. Entry should be added + 2. Operation should succeed + 3. Operation should succeed + """ + ACI_BODY = '(targetattr = "*")(version 3.0; acl "ACI NAME"; allow (write)(userdn = "ldap:///{} || ldap:///{}") ;)'.format(USER_DELADD, USER_WITH_ACI_DELADD) + Domain(topo.standalone, DEFAULT_SUFFIX).add("aci", ACI_BODY) + + for i in ['Product Development', 'Accounting', 'Human Resources']: + ou = OrganizationalUnit(topo.standalone, "ou={},{}".format(i, DEFAULT_SUFFIX)) + ou.create(properties={'ou': i}) + + for i in ['Jeff Vedder,ou=Product Development', 'Sam Carter,ou=Accounting', 'Kirsten Vaughan, ou=Human Resources']: + properties = { + 'uid': i, + 'cn': i, + 'sn': 'user', + 'uidNumber': '1000', + 'gidNumber': '2000', + 'homeDirectory': '/home/' + i, + 'userPassword': PW_DM + } + user = UserAccount(topo.standalone, "cn={},{}".format(i, DEFAULT_SUFFIX)) + user.create(properties=properties) + + conn = UserAccount(topo.standalone, USER_DELADD).bind(PW_DM) + # Allow write access to userdn with multiple DNs + ua = UserAccount(conn, KIRSTENVAUGHAN) + ua.add("title", "Architect") + conn = UserAccount(topo.standalone, USER_WITH_ACI_DELADD).bind(PW_DM) + # Allow write access to userdn with multiple DNs + ua = UserAccount(conn, USER_DELADD) + ua.add("title", "Architect") + assert ua.get_attr_val('title') + + +def test_allow_write_access_to_target_with_wildcards(topo, aci_of_user, cleanup_tree): + """ + Modify Test 6 Allow write access to target with wildcards + :id: 825fe884-7abf-11e8-8541-8c16451d917b + :setup: server + :steps: + 1. Add test entry + 2. Add ACI + 3. User should follow ACI role + :expectedresults: + 1. Entry should be added + 2. Operation should succeed + 3. Operation should succeed + """ + ACI_BODY = '(target = ldap:///{})(targetattr = "*")(version 3.0; acl "ACI NAME"; allow (write) (userdn = "ldap:///anyone") ;)'.format(DEFAULT_SUFFIX) + Domain(topo.standalone, DEFAULT_SUFFIX).add("aci", ACI_BODY) + + for i in ['Product Development', 'Accounting', 'Human Resources']: + ou = OrganizationalUnit(topo.standalone, "ou={},{}".format(i, DEFAULT_SUFFIX)) + ou.create(properties={'ou': i}) + + for i in ['Jeff Vedder,ou=Product Development', 'Sam Carter,ou=Accounting', 'Kirsten Vaughan, ou=Human Resources']: + properties = { + 'uid': i, + 'cn': i, + 'sn': 'user', + 'uidNumber': '1000', + 'gidNumber': '2000', + 'homeDirectory': '/home/' + i, + 'userPassword': PW_DM + } + user = UserAccount(topo.standalone, "cn={},{}".format(i, DEFAULT_SUFFIX)) + user.create(properties=properties) + + conn = UserAccount(topo.standalone, USER_DELADD).bind(PW_DM) + # Allow write access to target with wildcards + ua = UserAccount(conn, KIRSTENVAUGHAN) + ua.add("title", "Architect") + assert ua.get_attr_val('title') + conn = UserAccount(topo.standalone, USER_WITH_ACI_DELADD).bind(PW_DM) + # Allow write access to target with wildcards + ua = UserAccount(conn, USER_DELADD) + ua.add("title", "Architect") + assert ua.get_attr_val('title') + + +def test_allow_write_access_to_userdnattr(topo, aci_of_user, cleanup_tree): + """ + Modify Test 7 Allow write access to userdnattr + :id: 86b418f6-7abf-11e8-ae28-8c16451d917b + :setup: server + :steps: + 1. Add test entry + 2. Add ACI + 3. User should follow ACI role + :expectedresults: + 1. Entry should be added + 2. Operation should succeed + 3. Operation should succeed + """ + ACI_BODY = '(target = ldap:///{})(targetattr=*)(version 3.0; acl "$tet_thistest";allow (write) (userdn = "ldap:///anyone"); )'.format(DEFAULT_SUFFIX) + Domain(topo.standalone, DEFAULT_SUFFIX).add("aci", ACI_BODY) + + for i in ['Product Development', 'Accounting']: + ou = OrganizationalUnit(topo.standalone, "ou={},{}".format(i, DEFAULT_SUFFIX)) + ou.create(properties={'ou': i}) + + for i in ['Jeff Vedder,ou=Product Development', 'Sam Carter,ou=Accounting']: + properties = { + 'uid': i, + 'cn': i, + 'sn': 'user', + 'uidNumber': '1000', + 'gidNumber': '2000', + 'homeDirectory': '/home/' + i, + 'userPassword': PW_DM + } + user = UserAccount(topo.standalone, "cn={},{}".format(i, DEFAULT_SUFFIX)) + user.create(properties=properties) + + UserAccount(topo.standalone, USER_WITH_ACI_DELADD).add('manager', USER_WITH_ACI_DELADD) + conn = UserAccount(topo.standalone, USER_WITH_ACI_DELADD).bind(PW_DM) + # Allow write access to userdnattr + ua = UserAccount(conn, USER_DELADD) + ua.add('uid', 'scoobie') + assert ua.get_attr_val('uid') + ua.add('uid', 'jvedder') + assert ua.get_attr_val('uid') + + +def test_allow_selfwrite_access_to_anyone(topo, aci_of_user, cleanup_tree): + """ + Modify Test 8 Allow selfwrite access to anyone + :id: 8b3becf0-7abf-11e8-ac34-8c16451d917b + :setup: server + :steps: + 1. Add test entry + 2. Add ACI + 3. User should follow ACI role + :expectedresults: + 1. Entry should be added + 2. Operation should succeed + 3. Operation should succeed + """ + groups = Groups(topo.standalone, DEFAULT_SUFFIX) + group = groups.create(properties={"cn": "group1", + "description": "testgroup"}) + + ACI_BODY = '(target = ldap:///cn=group1,ou=Groups,{})(targetattr = "member")(version 3.0; acl "ACI NAME"; allow (selfwrite) (userdn = "ldap:///anyone") ;)'.format(DEFAULT_SUFFIX) + Domain(topo.standalone, DEFAULT_SUFFIX).add("aci", ACI_BODY) + + ou = OrganizationalUnit(topo.standalone, "ou=Product Development,{}".format(DEFAULT_SUFFIX)) + ou.create(properties={'ou': 'Product Development'}) + + properties = { + 'uid': 'Jeff Vedder', + 'cn': 'Jeff Vedder', + 'sn': 'user', + 'uidNumber': '1000', + 'gidNumber': '2000', + 'homeDirectory': '/home/' + 'JeffVedder', + 'userPassword': PW_DM + } + user = UserAccount(topo.standalone, "cn=Jeff Vedder,ou=Product Development,{}".format(DEFAULT_SUFFIX)) + user.create(properties=properties) + + conn = UserAccount(topo.standalone, USER_DELADD).bind(PW_DM) + # Allow selfwrite access to anyone + groups = Groups(conn, DEFAULT_SUFFIX) + groups.list()[0].add_member(USER_DELADD) + group.delete() + + +def test_uniquemember_should_also_be_the_owner(topo, aci_of_user): + """ + Modify Test 10 groupdnattr = \"ldap:///$BASEDN?owner\" if owner is a group, group's + uniquemember should also be the owner + :id: 9456b2d4-7abf-11e8-829d-8c16451d917b + :setup: server + :steps: + 1. Add test entry + 2. Add ACI + 3. User should follow ACI role + :expectedresults: + 1. Entry should be added + 2. Operation should succeed + 3. Operation should succeed + """ + for i in ['ACLGroupTest']: + ou = OrganizationalUnit(topo.standalone, "ou={},{}".format(i, DEFAULT_SUFFIX)) + ou.create(properties={'ou': i}) + + ou = OrganizationalUnit(topo.standalone, "ou=ACLDevelopment,{}".format(DEFAULT_SUFFIX)) + ou.create(properties={'ou': 'ACLDevelopment'}) + ou.set('aci','(targetattr="*")(version 3.0; acl "groupdnattr acl"; ' + 'allow (all)groupdnattr = "ldap:///{}?owner";)'.format(DEFAULT_SUFFIX)) + + grp = UniqueGroup(topo.standalone, "uid=anuj,ou=ACLDevelopment, {}".format(DEFAULT_SUFFIX)) + user_props = ( + {'sn': 'Borah', + 'cn': 'Anuj', + 'objectclass': ['top', 'person', 'organizationalPerson', 'inetOrgPerson', 'groupofUniquenames'], + 'userpassword': PW_DM, + 'givenname': 'Anuj', + 'ou': ['ACLDevelopment', 'People'], + 'roomnumber': '123', + 'uniquemember': 'cn=mandatory member' + } + ) + grp.create(properties=user_props) + + grp = UniqueGroup(topo.standalone, "uid=2ishani,ou=ACLDevelopment, {}".format(DEFAULT_SUFFIX)) + user_props = ( + {'sn': 'Borah', + 'cn': '2ishani', + 'objectclass': ['top', 'person','organizationalPerson', 'inetOrgPerson', 'groupofUniquenames'], + 'userpassword': PW_DM, + 'givenname': '2ishani', + 'ou': ['ACLDevelopment', 'People'], + 'roomnumber': '1234', + 'uniquemember': 'cn=mandatory member', "owner": "cn=group4, ou=ACLGroupTest, {}".format(DEFAULT_SUFFIX) + } + ) + grp.create(properties=user_props) + + grp = UniqueGroup(topo.standalone, 'cn=group1,ou=ACLGroupTest,'+DEFAULT_SUFFIX) + grp.create(properties={'cn': 'group1', + 'ou': 'groups'}) + grp.set('uniquemember', ["cn=group2, ou=ACLGroupTest, {}".format(DEFAULT_SUFFIX), + "cn=group3, ou=ACLGroupTest, {}".format(DEFAULT_SUFFIX)]) + + grp = UniqueGroup(topo.standalone, 'cn=group3,ou=ACLGroupTest,' + DEFAULT_SUFFIX) + grp.create(properties={'cn': 'group3', + 'ou': 'groups'}) + grp.set('uniquemember', ["cn=group4, ou=ACLGroupTest, {}".format(DEFAULT_SUFFIX)]) + + grp = UniqueGroup(topo.standalone, 'cn=group4,ou=ACLGroupTest,' + DEFAULT_SUFFIX) + grp.create(properties={ + 'cn': 'group4', + 'ou': 'groups'}) + grp.set('uniquemember', ["uid=anuj, ou=ACLDevelopment, {}".format(DEFAULT_SUFFIX)]) + + #uniquemember should also be the owner + conn = UserAccount(topo.standalone, "uid=anuj,ou=ACLDevelopment, {}".format(DEFAULT_SUFFIX)).bind(PW_DM) + ua = UserAccount(conn, "uid=2ishani, ou=ACLDevelopment, {}".format(DEFAULT_SUFFIX)) + ua.add('roomnumber', '9999') + assert ua.get_attr_val('roomnumber') + + for DN in ["cn=group4,ou=ACLGroupTest,{}".format(DEFAULT_SUFFIX), + "cn=group3,ou=ACLGroupTest,{}".format(DEFAULT_SUFFIX), + "cn=group1,ou=ACLGroupTest,{}".format(DEFAULT_SUFFIX), + "uid=2ishani,ou=ACLDevelopment,{}".format(DEFAULT_SUFFIX), + "uid=anuj,ou=ACLDevelopment,{}".format(DEFAULT_SUFFIX), "ou=ACLDevelopment,{}".format(DEFAULT_SUFFIX), + "ou=ACLGroupTest, {}".format(DEFAULT_SUFFIX)]: + UserAccount(topo.standalone, DN).delete() + + +def test_aci_with_both_allow_and_deny(topo, aci_of_user, cleanup_tree): + """ + Modify Test 12 aci with both allow and deny + :id: 9dcfe902-7abf-11e8-86dc-8c16451d917b + :setup: server + :steps: + 1. Add test entry + 2. Add ACI + 3. User should follow ACI role + :expectedresults: + 1. Entry should be added + 2. Operation should succeed + 3. Operation should succeed + """ + ACI_BODY = '(targetattr = "*")(version 3.0; acl "ACI NAME"; deny (read, search)userdn = "ldap:///{}"; allow (all) userdn = "ldap:///{}" ;)'.format(USER_WITH_ACI_DELADD, USER_DELADD) + Domain(topo.standalone, DEFAULT_SUFFIX).add("aci", ACI_BODY) + + for i in ['Product Development', 'Accounting']: + ou = OrganizationalUnit(topo.standalone, "ou={},{}".format(i, DEFAULT_SUFFIX)) + ou.create(properties={'ou': i}) + + for i in ['Jeff Vedder,ou=Product Development', 'Sam Carter,ou=Accounting']: + properties = { + 'uid': i, + 'cn': i, + 'sn': 'user', + 'uidNumber': '1000', + 'gidNumber': '2000', + 'homeDirectory': '/home/' + i, + 'userPassword': PW_DM + } + user = UserAccount(topo.standalone, "cn={},{}".format(i, DEFAULT_SUFFIX)) + user.create(properties=properties) + + conn = UserAccount(topo.standalone, USER_DELADD).bind(PW_DM) + # aci with both allow and deny, testing allow + assert UserAccount(conn, USER_WITH_ACI_DELADD).get_attr_val('uid') + conn = UserAccount(topo.standalone, USER_WITH_ACI_DELADD).bind(PW_DM) + # aci with both allow and deny, testing deny + with pytest.raises(IndexError): + UserAccount(conn, USER_WITH_ACI_DELADD).get_attr_val('uid') + + +def test_allow_owner_to_modify_entry(topo, aci_of_user, cleanup_tree): + """ + Modify Test 14 allow userdnattr = owner to modify entry + :id: aa302090-7abf-11e8-811a-8c16451d917b + :setup: server + :steps: + 1. Add test entry + 2. Add ACI + 3. User should follow ACI role + :expectedresults: + 1. Entry should be added + 2. Operation should succeed + 3. Operation should succeed + """ + grp = UniqueGroup(topo.standalone, 'cn=intranet,' + DEFAULT_SUFFIX) + grp.create(properties={ + 'cn': 'intranet', + 'ou': 'groups'}) + grp.set('owner', USER_WITH_ACI_DELADD) + + ACI_BODY = '(target ="ldap:///cn=intranet, {}") (targetattr ="*")(targetfilter ="(objectclass=groupOfUniqueNames)") (version 3.0;acl "$tet_thistest";allow(read, write, delete, search, compare, add) (userdnattr = "owner");)'.format(DEFAULT_SUFFIX) + Domain(topo.standalone, DEFAULT_SUFFIX).add("aci", ACI_BODY) + + for i in ['Product Development', 'Accounting']: + ou = OrganizationalUnit(topo.standalone, "ou={},{}".format(i, DEFAULT_SUFFIX)) + ou.create(properties={'ou': i}) + for i in ['Jeff Vedder,ou=Product Development', 'Sam Carter,ou=Accounting']: + properties = { + 'uid': i, + 'cn': i, + 'sn': 'user', + 'uidNumber': '1000', + 'gidNumber': '2000', + 'homeDirectory': '/home/' + i, + 'userPassword': PW_DM + } + user = UserAccount(topo.standalone, "cn={},{}".format(i, DEFAULT_SUFFIX)) + user.create(properties=properties) + + conn = UserAccount(topo.standalone, USER_WITH_ACI_DELADD).bind(PW_DM) + # allow userdnattr = owner to modify entry + ua = UserAccount(conn, 'cn=intranet,dc=example,dc=com') + ua.set('uniquemember', "cn=Andy Walker, ou=Accounting,dc=example,dc=com") + assert ua.get_attr_val('uniquemember') + + +if __name__ == "__main__": + CURRENT_FILE = os.path.realpath(__file__) + pytest.main("-s -v %s" % CURRENT_FILE) diff --git a/dirsrvtests/tests/suites/acl/modrdn_test.py b/dirsrvtests/tests/suites/acl/modrdn_test.py new file mode 100644 index 0000000..f67f3e5 --- /dev/null +++ b/dirsrvtests/tests/suites/acl/modrdn_test.py @@ -0,0 +1,299 @@ +# --- BEGIN COPYRIGHT BLOCK --- +# Copyright (C) 2019 Red Hat, Inc. +# All rights reserved. +# +# License: GPL (version 3 or any later version). +# See LICENSE for details. +# --- END COPYRIGHT BLOCK ---- + +import pytest, os, ldap +from lib389._constants import DEFAULT_SUFFIX, PW_DM +from lib389.idm.user import UserAccount +from lib389.idm.account import Anonymous +from lib389.idm.group import Group, UniqueGroup +from lib389.idm.organizationalunit import OrganizationalUnit, OrganizationalUnits +from lib389.topologies import topology_st as topo +from lib389.idm.domain import Domain + +pytestmark = pytest.mark.tier1 + +CONTAINER_1_DELADD = "ou=Product Development,{}".format(DEFAULT_SUFFIX) +CONTAINER_2_DELADD = "ou=Accounting,{}".format(DEFAULT_SUFFIX) +USER_DELADD = "cn=Jeff Vedder,{}".format(CONTAINER_1_DELADD) +USER_WITH_ACI_DELADD = "cn=Sam Carter,{}".format(CONTAINER_2_DELADD) +DYNAMIC_MODRDN = "cn=Test DYNAMIC_MODRDN Group 70, {}".format(DEFAULT_SUFFIX) +SAM_DAMMY_MODRDN = "cn=Sam Carter1,ou=Accounting,{}".format(DEFAULT_SUFFIX) +TRAC340_MODRDN = "cn=TRAC340_MODRDN,{}".format(DEFAULT_SUFFIX) +NEWENTRY9_MODRDN = "cn=NEWENTRY9_MODRDN,{}".format("ou=People,{}".format(DEFAULT_SUFFIX)) +OU0_OU_MODRDN = "ou=OU0,{}".format(DEFAULT_SUFFIX) +OU2_OU_MODRDN = "ou=OU2,{}".format(DEFAULT_SUFFIX) + + +@pytest.fixture(scope="function") +def aci_of_user(request, topo): + aci_list = Domain(topo.standalone, DEFAULT_SUFFIX).get_attr_vals('aci') + + def finofaci(): + domain = Domain(topo.standalone, DEFAULT_SUFFIX) + domain.set('aci', None) + for i in aci_list: + domain.add("aci", i) + + request.addfinalizer(finofaci) + + +@pytest.fixture(scope="function") +def _add_user(request, topo): + ou = OrganizationalUnit(topo.standalone, 'ou=Product Development,{}'.format(DEFAULT_SUFFIX)) + ou.create(properties={'ou': 'Product Development'}) + + ou = OrganizationalUnit(topo.standalone, 'ou=Accounting,{}'.format(DEFAULT_SUFFIX)) + ou.create(properties={'ou': 'Accounting'}) + + groups = Group(topo.standalone, DYNAMIC_MODRDN) + group_properties = {"cn": "Test DYNAMIC_MODRDN Group 70", + "objectclass": ["top", 'groupofURLs'], + 'memberURL': 'ldap:///{}??base?(cn=*)'.format(USER_WITH_ACI_DELADD)} + groups.create(properties=group_properties) + + properties = { + 'uid': 'Jeff Vedder', + 'cn': 'Jeff Vedder', + 'sn': 'user', + 'uidNumber': '1000', + 'gidNumber': '2000', + 'homeDirectory': '/home/' + 'JeffVedder', + 'userPassword': PW_DM + } + user = UserAccount(topo.standalone, 'cn=Jeff Vedder,ou=Product Development,{}'.format(DEFAULT_SUFFIX)) + user.create(properties=properties) + + properties = { + 'uid': 'Sam Carter', + 'cn': 'Sam Carter', + 'sn': 'user', + 'uidNumber': '1000', + 'gidNumber': '2000', + 'homeDirectory': '/home/' + 'SamCarter', + 'userPassword': PW_DM + } + user = UserAccount(topo.standalone, 'cn=Sam Carter,ou=Accounting,{}'.format(DEFAULT_SUFFIX)) + user.create(properties=properties) + + def fin(): + for DN in [USER_DELADD,USER_WITH_ACI_DELADD,DYNAMIC_MODRDN,CONTAINER_2_DELADD,CONTAINER_1_DELADD]: + UserAccount(topo.standalone, DN).delete() + + request.addfinalizer(fin) + + +def test_allow_write_privilege_to_anyone(topo, _add_user, aci_of_user): + """ + Modrdn Test 1 Allow write privilege to anyone + :id: 4406f12e-7932-11e8-9dea-8c16451d917b + :setup: server + :steps: + 1. Add test entry + 2. Add ACI + 3. User should follow ACI role + :expectedresults: + 1. Entry should be added + 2. Operation should succeed + 3. Operation should succeed + """ + Domain(topo.standalone, DEFAULT_SUFFIX).add("aci", + '(target ="ldap:///{}")(targetattr=*)(version 3.0;acl "$tet_thistest";allow ' + '(write) (userdn = "ldap:///anyone");)'.format(DEFAULT_SUFFIX)) + conn = Anonymous(topo.standalone).bind() + # Allow write privilege to anyone + useraccount = UserAccount(conn, USER_WITH_ACI_DELADD) + useraccount.rename("cn=Jeff Vedder") + assert 'cn=Jeff Vedder,ou=Accounting,dc=example,dc=com' == useraccount.dn + useraccount = UserAccount(conn, "cn=Jeff Vedder,ou=Accounting,dc=example,dc=com") + useraccount.rename("cn=Sam Carter") + assert 'cn=Sam Carter,ou=Accounting,dc=example,dc=com' == useraccount.dn + + +def test_allow_write_privilege_to_dynamic_group_with_scope_set_to_base_in_ldap_url( + topo, _add_user, aci_of_user +): + """ + Modrdn Test 2 Allow write privilege to DYNAMIC_MODRDN group with scope set to base in LDAP URL + :id: 4c0f8c00-7932-11e8-8398-8c16451d917b + :setup: server + :steps: + 1. Add test entry + 2. Add ACI + 3. User should follow ACI role + :expectedresults: + 1. Entry should be added + 2. Operation should succeed + 3. Operation should succeed + """ + Domain(topo.standalone, DEFAULT_SUFFIX).add("aci",'(target = ldap:///{})(targetattr=*)(version 3.0; acl "$tet_thistest"; allow(all)(groupdn = "ldap:///{}"); )'.format(DEFAULT_SUFFIX, DYNAMIC_MODRDN)) + conn = UserAccount(topo.standalone, USER_WITH_ACI_DELADD).bind(PW_DM) + # Allow write privilege to DYNAMIC_MODRDN group with scope set to base in LDAP URL + useraccount = UserAccount(conn, USER_DELADD) + useraccount.rename("cn=Jeffbo Vedder") + assert 'cn=Jeffbo Vedder,ou=Product Development,dc=example,dc=com' == useraccount.dn + useraccount = UserAccount(conn, "cn=Jeffbo Vedder,{}".format(CONTAINER_1_DELADD)) + useraccount.rename("cn=Jeff Vedder") + assert 'cn=Jeff Vedder,ou=Product Development,dc=example,dc=com' == useraccount.dn + + +def test_write_access_to_naming_atributes(topo, _add_user, aci_of_user): + """ + Test for write access to naming atributes (1) + Test that check for add writes to the new naming attr + :id: 532fc630-7932-11e8-8924-8c16451d917b + :setup: server + :steps: + 1. Add test entry + 2. Add ACI + 3. User should follow ACI role + :expectedresults: + 1. Entry should be added + 2. Operation should succeed + 3. Operation should succeed + """ + Domain(topo.standalone, DEFAULT_SUFFIX).add("aci", '(target ="ldap:///{}")(targetattr != "uid")(version 3.0;acl "$tet_thistest";allow (write) (userdn = "ldap:///anyone");)'.format(DEFAULT_SUFFIX)) + conn = UserAccount(topo.standalone, USER_WITH_ACI_DELADD).bind(PW_DM) + #Test for write access to naming atributes + useraccount = UserAccount(conn, USER_WITH_ACI_DELADD) + with pytest.raises(ldap.INSUFFICIENT_ACCESS): + useraccount.rename("uid=Jeffbo Vedder") + + +def test_write_access_to_naming_atributes_two(topo, _add_user, aci_of_user): + """ + Test for write access to naming atributes (2) + :id: 5a2077d2-7932-11e8-9e7b-8c16451d917b + :setup: server + :steps: + 1. Add test entry + 2. Add ACI + 3. User should follow ACI role + 4. Now try to modrdn it to cn, won't work if request deleteoldrdn. + :expectedresults: + 1. Entry should be added + 2. Operation should succeed + 3. Operation should succeed + 4. Operation should not succeed + """ + Domain(topo.standalone, DEFAULT_SUFFIX).add("aci", '(target ="ldap:///{}")(targetattr != "uid")(version 3.0;acl "$tet_thistest";allow (write) (userdn = "ldap:///anyone");)'.format(DEFAULT_SUFFIX)) + properties = { + 'uid': 'Sam Carter1', + 'cn': 'Sam Carter1', + 'sn': 'user', + 'uidNumber': '1000', + 'gidNumber': '2000', + 'homeDirectory': '/home/' + 'SamCarter1' + } + user = UserAccount(topo.standalone, 'cn=Sam Carter1,ou=Accounting,{}'.format(DEFAULT_SUFFIX)) + user.create(properties=properties) + user.set("userPassword", "password") + conn = UserAccount(topo.standalone, USER_WITH_ACI_DELADD).bind(PW_DM) + # Test for write access to naming atributes + useraccount = UserAccount(conn, SAM_DAMMY_MODRDN) + with pytest.raises(ldap.INSUFFICIENT_ACCESS): + useraccount.rename("uid=Jeffbo Vedder") + UserAccount(topo.standalone, SAM_DAMMY_MODRDN).delete() + + +@pytest.mark.bz950351 +def test_access_aci_list_contains_any_deny_rule(topo, _add_user, aci_of_user): + """ + Testing bug #950351: RHDS denies MODRDN access if ACI list contains any DENY rule + Bug description: If you create a deny ACI for some or more attributes there is incorrect behaviour + as you cannot rename the entry anymore + :id: 62cbbb8a-7932-11e8-96a7-8c16451d917b + :setup: server + :steps: + 1. Add test entry + 2. Adding a new ou ou=People to $BASEDN + 3. Adding a user NEWENTRY9_MODRDN to ou=People,$BASEDN + 4. Adding an allow rule for NEWENTRY9_MODRDN and for others an aci deny rule + :expectedresults: + 1. Entry should be added + 2. Operation should succeed + 3. Operation should succeed + 4. Operation should succeed + """ + properties = { + 'uid': 'NEWENTRY9_MODRDN', + 'cn': 'NEWENTRY9_MODRDN_People', + 'sn': 'user', + 'uidNumber': '1000', + 'gidNumber': '2000', + 'homeDirectory': '/home/' + 'NEWENTRY9_MODRDN' + } + user = UserAccount(topo.standalone, 'cn=NEWENTRY9_MODRDN,ou=People,{}'.format(DEFAULT_SUFFIX)) + user.create(properties=properties) + user.set("userPassword", "password") + user.set("telephoneNumber", "989898191") + user.set("mail", "anuj@anuj.com") + user.set("givenName", "givenName") + user.set("uid", "NEWENTRY9_MODRDN") + OrganizationalUnits(topo.standalone, DEFAULT_SUFFIX).get('People').add("aci", ['(targetattr = "*") ' + '(version 3.0;acl "admin";allow (all)(userdn = "ldap:///{}");)'.format(NEWENTRY9_MODRDN), + '(targetattr = "mail") (version 3.0;acl "deny_mail";deny (write)(userdn = "ldap:///anyone");)', + '(targetattr = "uid") (version 3.0;acl "allow uid";allow (write)(userdn = "ldap:///{}");)'.format(NEWENTRY9_MODRDN)]) + UserAccount(topo.standalone, NEWENTRY9_MODRDN).replace("userpassword", "Anuj") + useraccount = UserAccount(topo.standalone, NEWENTRY9_MODRDN) + useraccount.rename("uid=newrdnchnged") + assert 'uid=newrdnchnged,ou=People,dc=example,dc=com' == useraccount.dn + + +def test_renaming_target_entry(topo, _add_user, aci_of_user): + """ + Test for renaming target entry + :id: 6be1d33a-7932-11e8-9115-8c16451d917b + :setup: server + :steps: + 1. Add test entry + 2. Create a test user entry + 3.Create a new ou entry with an aci + 4. Make sure uid=$MYUID has the access + 5. Rename ou=OU0 to ou=OU1 + 6. Create another ou=OU2 + 7. Move ou=OU1 under ou=OU2 + 8. Make sure uid=$MYUID still has the access + :expectedresults: + 1. Entry should be added + 2. Operation should succeed + 3. Operation should succeed + 4. Operation should succeed + 5. Operation should succeed + 6. Operation should succeed + 7. Operation should succeed + 8. Operation should succeed + """ + properties = { + 'uid': 'TRAC340_MODRDN', + 'cn': 'TRAC340_MODRDN', + 'sn': 'user', + 'uidNumber': '1000', + 'gidNumber': '2000', + 'homeDirectory': '/home/' + 'TRAC340_MODRDN' + } + user = UserAccount(topo.standalone, 'cn=TRAC340_MODRDN,{}'.format(DEFAULT_SUFFIX)) + user.create(properties=properties) + user.set("userPassword", "password") + ou = OrganizationalUnit(topo.standalone, 'ou=OU0,{}'.format(DEFAULT_SUFFIX)) + ou.create(properties={'ou': 'OU0'}) + ou.set('aci', '(targetattr=*)(version 3.0; acl "$MYUID";allow(read, search, compare) userdn = "ldap:///{}";)'.format(TRAC340_MODRDN)) + conn = UserAccount(topo.standalone, TRAC340_MODRDN).bind(PW_DM) + assert OrganizationalUnits(conn, DEFAULT_SUFFIX).get('OU0') + # Test for renaming target entry + OrganizationalUnits(topo.standalone, DEFAULT_SUFFIX).get('OU0').rename("ou=OU1") + assert OrganizationalUnits(conn, DEFAULT_SUFFIX).get('OU1') + ou = OrganizationalUnit(topo.standalone, 'ou=OU2,{}'.format(DEFAULT_SUFFIX)) + ou.create(properties={'ou': 'OU2'}) + # Test for renaming target entry + OrganizationalUnits(topo.standalone, DEFAULT_SUFFIX).get('OU1').rename("ou=OU1", newsuperior=OU2_OU_MODRDN) + assert OrganizationalUnits(conn, DEFAULT_SUFFIX).get('OU1') + + +if __name__ == "__main__": + CURRENT_FILE = os.path.realpath(__file__) + pytest.main("-s -v %s" % CURRENT_FILE) diff --git a/dirsrvtests/tests/suites/acl/repeated_ldap_add_test.py b/dirsrvtests/tests/suites/acl/repeated_ldap_add_test.py new file mode 100644 index 0000000..dc09f9f --- /dev/null +++ b/dirsrvtests/tests/suites/acl/repeated_ldap_add_test.py @@ -0,0 +1,489 @@ +# --- BEGIN COPYRIGHT BLOCK --- +# Copyright (C) 2016 Red Hat, Inc. +# All rights reserved. +# +# License: GPL (version 3 or any later version). +# See LICENSE for details. +# --- END COPYRIGHT BLOCK --- +# +from subprocess import Popen + +import pytest +from lib389.paths import Paths +from lib389.tasks import * +from lib389.utils import * +from lib389.topologies import topology_st + +from lib389._constants import DN_DM, DEFAULT_SUFFIX, PASSWORD, SERVERID_STANDALONE + +pytestmark = pytest.mark.tier1 + +logging.getLogger(__name__).setLevel(logging.DEBUG) +log = logging.getLogger(__name__) + +CONFIG_DN = 'cn=config' +BOU = 'BOU' +BINDOU = 'ou=%s,%s' % (BOU, DEFAULT_SUFFIX) +BUID = 'buser123' +TUID = 'tuser0' +BINDDN = 'uid=%s,%s' % (BUID, BINDOU) +BINDPW = BUID +TESTDN = 'uid=%s,ou=people,%s' % (TUID, DEFAULT_SUFFIX) +TESTPW = TUID +BOGUSDN = 'uid=bogus,%s' % DEFAULT_SUFFIX +BOGUSDN2 = 'uid=bogus,ou=people,%s' % DEFAULT_SUFFIX +BOGUSSUFFIX = 'uid=bogus,ou=people,dc=bogus' +GROUPOU = 'ou=groups,%s' % DEFAULT_SUFFIX +BOGUSOU = 'ou=OU,%s' % DEFAULT_SUFFIX + +def get_ldap_error_msg(e, type): + return e.args[0][type] + +def pattern_accesslog(file, log_pattern): + for i in range(5): + try: + pattern_accesslog.last_pos += 1 + except AttributeError: + pattern_accesslog.last_pos = 0 + + found = None + file.seek(pattern_accesslog.last_pos) + + # Use a while true iteration because 'for line in file: hit a + # python bug that break file.tell() + while True: + line = file.readline() + found = log_pattern.search(line) + if ((line == '') or (found)): + break + + pattern_accesslog.last_pos = file.tell() + if found: + return line + else: + time.sleep(1) + return None + + +def check_op_result(server, op, dn, superior, exists, rc): + targetdn = dn + if op == 'search': + if exists: + opstr = 'Searching existing entry' + else: + opstr = 'Searching non-existing entry' + elif op == 'add': + if exists: + opstr = 'Adding existing entry' + else: + opstr = 'Adding non-existing entry' + elif op == 'modify': + if exists: + opstr = 'Modifying existing entry' + else: + opstr = 'Modifying non-existing entry' + elif op == 'modrdn': + if superior is not None: + targetdn = superior + if exists: + opstr = 'Moving to existing superior' + else: + opstr = 'Moving to non-existing superior' + else: + if exists: + opstr = 'Renaming existing entry' + else: + opstr = 'Renaming non-existing entry' + elif op == 'delete': + if exists: + opstr = 'Deleting existing entry' + else: + opstr = 'Deleting non-existing entry' + + if ldap.SUCCESS == rc: + expstr = 'be ok' + else: + expstr = 'fail with %s' % rc.__name__ + + log.info('%s %s, which should %s.' % (opstr, targetdn, expstr)) + time.sleep(1) + hit = 0 + try: + if op == 'search': + centry = server.search_s(dn, ldap.SCOPE_BASE, 'objectclass=*') + elif op == 'add': + server.add_s(Entry((dn, {'objectclass': 'top extensibleObject'.split(), + 'cn': 'test entry'}))) + elif op == 'modify': + server.modify_s(dn, [(ldap.MOD_REPLACE, 'description', b'test')]) + elif op == 'modrdn': + if superior is not None: + server.rename_s(dn, 'uid=new', newsuperior=superior, delold=1) + else: + server.rename_s(dn, 'uid=new', delold=1) + elif op == 'delete': + server.delete_s(dn) + else: + log.fatal('Unknown operation %s' % op) + assert False + except ldap.LDAPError as e: + hit = 1 + log.info("Exception (expected): %s" % type(e).__name__) + log.info('Desc {}'.format(get_ldap_error_msg(e,'desc'))) + assert isinstance(e, rc) + if 'matched' in e.args: + log.info('Matched is returned: {}'.format(get_ldap_error_msg(e, 'matched'))) + if rc != ldap.NO_SUCH_OBJECT: + assert False + + if ldap.SUCCESS == rc: + if op == 'search': + log.info('Search should return none') + assert len(centry) == 0 + else: + if 0 == hit: + log.info('Expected to fail with %s, but passed' % rc.__name__) + assert False + + log.info('PASSED\n') + + +@pytest.mark.bz1347760 +def test_repeated_ldap_add(topology_st): + """Prevent revealing the entry info to whom has no access rights. + + :id: 76d278bd-3e51-4579-951a-753e6703b4df + :setup: Standalone instance + :steps: + 1. Disable accesslog logbuffering + 2. Bind as "cn=Directory Manager" + 3. Add a organisational unit as BOU + 4. Add a bind user as uid=buser123,ou=BOU,dc=example,dc=com + 5. Add a test user as uid=tuser0,ou=People,dc=example,dc=com + 6. Delete aci in dc=example,dc=com + 7. Bind as Directory Manager, acquire an access log path and instance dir + 8. Bind as uid=buser123,ou=BOU,dc=example,dc=com who has no right to read the entry + 9. Bind as uid=bogus,ou=people,dc=bogus,bogus who does not exist + 10. Bind as uid=buser123,ou=BOU,dc=example,dc=com,bogus with wrong password + 11. Adding aci for uid=buser123,ou=BOU,dc=example,dc=com to ou=BOU,dc=example,dc=com. + 12. Bind as uid=buser123,ou=BOU,dc=example,dc=com now who has right to read the entry + :expectedresults: + 1. Operation should be successful + 2. Operation should be successful + 3. Operation should be successful + 4. Operation should be successful + 5. Operation should be successful + 6. Operation should be successful + 7. Operation should be successful + 8. Bind operation should be successful with no search result + 9. Bind operation should Fail + 10. Bind operation should Fail + 11. Operation should be successful + 12. Bind operation should be successful with search result + """ + log.info('Testing Bug 1347760 - Information disclosure via repeated use of LDAP ADD operation, etc.') + + log.info('Disabling accesslog logbuffering') + topology_st.standalone.modify_s(CONFIG_DN, [(ldap.MOD_REPLACE, 'nsslapd-accesslog-logbuffering', b'off')]) + + log.info('Bind as {%s,%s}' % (DN_DM, PASSWORD)) + topology_st.standalone.simple_bind_s(DN_DM, PASSWORD) + + log.info('Adding ou=%s a bind user belongs to.' % BOU) + topology_st.standalone.add_s(Entry((BINDOU, { + 'objectclass': 'top organizationalunit'.split(), + 'ou': BOU}))) + + log.info('Adding a bind user.') + topology_st.standalone.add_s(Entry((BINDDN, + {'objectclass': "top person organizationalPerson inetOrgPerson".split(), + 'cn': 'bind user', + 'sn': 'user', + 'userPassword': BINDPW}))) + + log.info('Adding a test user.') + topology_st.standalone.add_s(Entry((TESTDN, + {'objectclass': "top person organizationalPerson inetOrgPerson".split(), + 'cn': 'test user', + 'sn': 'user', + 'userPassword': TESTPW}))) + + log.info('Deleting aci in %s.' % DEFAULT_SUFFIX) + topology_st.standalone.modify_s(DEFAULT_SUFFIX, [(ldap.MOD_DELETE, 'aci', None)]) + + log.info('While binding as DM, acquire an access log path and instance dir') + ds_paths = Paths(serverid=topology_st.standalone.serverid, + instance=topology_st.standalone) + file_path = ds_paths.access_log + inst_dir = ds_paths.inst_dir + + log.info('Bind case 1. the bind user has no rights to read the entry itself, bind should be successful.') + log.info('Bind as {%s,%s} who has no access rights.' % (BINDDN, BINDPW)) + try: + topology_st.standalone.simple_bind_s(BINDDN, BINDPW) + except ldap.LDAPError as e: + log.info('Desc {}'.format(get_ldap_error_msg(e,'desc'))) + assert False + + file_obj = open(file_path, "r") + log.info('Access log path: %s' % file_path) + + log.info( + 'Bind case 2-1. the bind user does not exist, bind should fail with error %s' % ldap.INVALID_CREDENTIALS.__name__) + log.info('Bind as {%s,%s} who does not exist.' % (BOGUSDN, 'bogus')) + try: + topology_st.standalone.simple_bind_s(BOGUSDN, 'bogus') + except ldap.LDAPError as e: + log.info("Exception (expected): %s" % type(e).__name__) + log.info('Desc {}'.format(get_ldap_error_msg(e,'desc'))) + assert isinstance(e, ldap.INVALID_CREDENTIALS) + regex = re.compile('No such entry') + cause = pattern_accesslog(file_obj, regex) + if cause is None: + log.fatal('Cause not found - %s' % cause) + assert False + else: + log.info('Cause found - %s' % cause) + time.sleep(1) + + log.info( + 'Bind case 2-2. the bind user\'s suffix does not exist, bind should fail with error %s' % ldap.INVALID_CREDENTIALS.__name__) + log.info('Bind as {%s,%s} who does not exist.' % (BOGUSSUFFIX, 'bogus')) + with pytest.raises(ldap.INVALID_CREDENTIALS): + topology_st.standalone.simple_bind_s(BOGUSSUFFIX, 'bogus') + regex = re.compile('No suffix for bind') + cause = pattern_accesslog(file_obj, regex) + if cause is None: + log.fatal('Cause not found - %s' % cause) + assert False + else: + log.info('Cause found - %s' % cause) + time.sleep(1) + + log.info( + 'Bind case 2-3. the bind user\'s password is wrong, bind should fail with error %s' % ldap.INVALID_CREDENTIALS.__name__) + log.info('Bind as {%s,%s} who does not exist.' % (BINDDN, 'bogus')) + try: + topology_st.standalone.simple_bind_s(BINDDN, 'bogus') + except ldap.LDAPError as e: + log.info("Exception (expected): %s" % type(e).__name__) + log.info('Desc {}'.format(get_ldap_error_msg(e,'desc'))) + assert isinstance(e, ldap.INVALID_CREDENTIALS) + regex = re.compile('Invalid credentials') + cause = pattern_accesslog(file_obj, regex) + if cause is None: + log.fatal('Cause not found - %s' % cause) + assert False + else: + log.info('Cause found - %s' % cause) + time.sleep(1) + + log.info('Adding aci for %s to %s.' % (BINDDN, BINDOU)) + acival = '(targetattr="*")(version 3.0; acl "%s"; allow(all) userdn = "ldap:///%s";)' % (BUID, BINDDN) + log.info('aci: %s' % acival) + log.info('Bind as {%s,%s}' % (DN_DM, PASSWORD)) + topology_st.standalone.simple_bind_s(DN_DM, PASSWORD) + topology_st.standalone.modify_s(BINDOU, [(ldap.MOD_ADD, 'aci', ensure_bytes(acival))]) + time.sleep(1) + + log.info('Bind case 3. the bind user has the right to read the entry itself, bind should be successful.') + log.info('Bind as {%s,%s} which should be ok.\n' % (BINDDN, BINDPW)) + topology_st.standalone.simple_bind_s(BINDDN, BINDPW) + + log.info('The following operations are against the subtree the bind user %s has no rights.' % BINDDN) + # Search + exists = True + rc = ldap.SUCCESS + log.info( + 'Search case 1. the bind user has no rights to read the search entry, it should return no search results with %s' % rc) + check_op_result(topology_st.standalone, 'search', TESTDN, None, exists, rc) + + exists = False + rc = ldap.SUCCESS + log.info( + 'Search case 2-1. the search entry does not exist, the search should return no search results with %s' % rc.__name__) + check_op_result(topology_st.standalone, 'search', BOGUSDN, None, exists, rc) + + exists = False + rc = ldap.SUCCESS + log.info( + 'Search case 2-2. the search entry does not exist, the search should return no search results with %s' % rc.__name__) + check_op_result(topology_st.standalone, 'search', BOGUSDN2, None, exists, rc) + + # Add + exists = True + rc = ldap.INSUFFICIENT_ACCESS + log.info( + 'Add case 1. the bind user has no rights AND the adding entry exists, it should fail with %s' % rc.__name__) + check_op_result(topology_st.standalone, 'add', TESTDN, None, exists, rc) + + exists = False + rc = ldap.INSUFFICIENT_ACCESS + log.info( + 'Add case 2-1. the bind user has no rights AND the adding entry does not exist, it should fail with %s' % rc.__name__) + check_op_result(topology_st.standalone, 'add', BOGUSDN, None, exists, rc) + + exists = False + rc = ldap.INSUFFICIENT_ACCESS + log.info( + 'Add case 2-2. the bind user has no rights AND the adding entry does not exist, it should fail with %s' % rc.__name__) + check_op_result(topology_st.standalone, 'add', BOGUSDN2, None, exists, rc) + + # Modify + exists = True + rc = ldap.INSUFFICIENT_ACCESS + log.info( + 'Modify case 1. the bind user has no rights AND the modifying entry exists, it should fail with %s' % rc.__name__) + check_op_result(topology_st.standalone, 'modify', TESTDN, None, exists, rc) + + exists = False + rc = ldap.INSUFFICIENT_ACCESS + log.info( + 'Modify case 2-1. the bind user has no rights AND the modifying entry does not exist, it should fail with %s' % rc.__name__) + check_op_result(topology_st.standalone, 'modify', BOGUSDN, None, exists, rc) + + exists = False + rc = ldap.INSUFFICIENT_ACCESS + log.info( + 'Modify case 2-2. the bind user has no rights AND the modifying entry does not exist, it should fail with %s' % rc.__name__) + check_op_result(topology_st.standalone, 'modify', BOGUSDN2, None, exists, rc) + + # Modrdn + exists = True + rc = ldap.INSUFFICIENT_ACCESS + log.info( + 'Modrdn case 1. the bind user has no rights AND the renaming entry exists, it should fail with %s' % rc.__name__) + check_op_result(topology_st.standalone, 'modrdn', TESTDN, None, exists, rc) + + exists = False + rc = ldap.INSUFFICIENT_ACCESS + log.info( + 'Modrdn case 2-1. the bind user has no rights AND the renaming entry does not exist, it should fail with %s' % rc.__name__) + check_op_result(topology_st.standalone, 'modrdn', BOGUSDN, None, exists, rc) + + exists = False + rc = ldap.INSUFFICIENT_ACCESS + log.info( + 'Modrdn case 2-2. the bind user has no rights AND the renaming entry does not exist, it should fail with %s' % rc.__name__) + check_op_result(topology_st.standalone, 'modrdn', BOGUSDN2, None, exists, rc) + + exists = True + rc = ldap.INSUFFICIENT_ACCESS + log.info( + 'Modrdn case 3. the bind user has no rights AND the node moving an entry to exists, it should fail with %s' % rc.__name__) + check_op_result(topology_st.standalone, 'modrdn', TESTDN, GROUPOU, exists, rc) + + exists = False + rc = ldap.INSUFFICIENT_ACCESS + log.info( + 'Modrdn case 4-1. the bind user has no rights AND the node moving an entry to does not, it should fail with %s' % rc.__name__) + check_op_result(topology_st.standalone, 'modrdn', TESTDN, BOGUSOU, exists, rc) + + exists = False + rc = ldap.INSUFFICIENT_ACCESS + log.info( + 'Modrdn case 4-2. the bind user has no rights AND the node moving an entry to does not, it should fail with %s' % rc.__name__) + check_op_result(topology_st.standalone, 'modrdn', TESTDN, BOGUSOU, exists, rc) + + # Delete + exists = True + rc = ldap.INSUFFICIENT_ACCESS + log.info( + 'Delete case 1. the bind user has no rights AND the deleting entry exists, it should fail with %s' % rc.__name__) + check_op_result(topology_st.standalone, 'delete', TESTDN, None, exists, rc) + + exists = False + rc = ldap.INSUFFICIENT_ACCESS + log.info( + 'Delete case 2-1. the bind user has no rights AND the deleting entry does not exist, it should fail with %s' % rc.__name__) + check_op_result(topology_st.standalone, 'delete', BOGUSDN, None, exists, rc) + + exists = False + rc = ldap.INSUFFICIENT_ACCESS + log.info( + 'Delete case 2-2. the bind user has no rights AND the deleting entry does not exist, it should fail with %s' % rc.__name__) + check_op_result(topology_st.standalone, 'delete', BOGUSDN2, None, exists, rc) + + log.info('EXTRA: Check no regressions') + log.info('Adding aci for %s to %s.' % (BINDDN, DEFAULT_SUFFIX)) + acival = '(targetattr="*")(version 3.0; acl "%s-all"; allow(all) userdn = "ldap:///%s";)' % (BUID, BINDDN) + log.info('Bind as {%s,%s}' % (DN_DM, PASSWORD)) + topology_st.standalone.simple_bind_s(DN_DM, PASSWORD) + topology_st.standalone.modify_s(DEFAULT_SUFFIX, [(ldap.MOD_ADD, 'aci', ensure_bytes(acival))]) + time.sleep(1) + + log.info('Bind as {%s,%s}.' % (BINDDN, BINDPW)) + try: + topology_st.standalone.simple_bind_s(BINDDN, BINDPW) + except ldap.LDAPError as e: + log.info('Desc {}'.format(get_ldap_error_msg(e,'desc'))) + assert False + time.sleep(1) + + exists = False + rc = ldap.NO_SUCH_OBJECT + log.info('Search case. the search entry does not exist, the search should fail with %s' % rc.__name__) + check_op_result(topology_st.standalone, 'search', BOGUSDN2, None, exists, rc) + file_obj.close() + + exists = True + rc = ldap.ALREADY_EXISTS + log.info('Add case. the adding entry already exists, it should fail with %s' % rc.__name__) + check_op_result(topology_st.standalone, 'add', TESTDN, None, exists, rc) + + exists = False + rc = ldap.NO_SUCH_OBJECT + log.info('Modify case. the modifying entry does not exist, it should fail with %s' % rc.__name__) + check_op_result(topology_st.standalone, 'modify', BOGUSDN, None, exists, rc) + + exists = False + rc = ldap.NO_SUCH_OBJECT + log.info('Modrdn case 1. the renaming entry does not exist, it should fail with %s' % rc.__name__) + check_op_result(topology_st.standalone, 'modrdn', BOGUSDN, None, exists, rc) + + exists = False + rc = ldap.NO_SUCH_OBJECT + log.info('Modrdn case 2. the node moving an entry to does not, it should fail with %s' % rc.__name__) + check_op_result(topology_st.standalone, 'modrdn', TESTDN, BOGUSOU, exists, rc) + + exists = False + rc = ldap.NO_SUCH_OBJECT + log.info('Delete case. the deleting entry does not exist, it should fail with %s' % rc.__name__) + check_op_result(topology_st.standalone, 'delete', BOGUSDN, None, exists, rc) + + log.info('Inactivate %s' % BINDDN) + if ds_paths.version < '1.3': + nsinactivate = '%s/ns-inactivate.pl' % inst_dir + nsinactivate_cmd = [nsinactivate, '-D', DN_DM, '-w', PASSWORD, '-I', BINDDN] + else: + nsinactivate = '%s/ns-inactivate.pl' % ds_paths.sbin_dir + nsinactivate_cmd = [nsinactivate, '-Z', SERVERID_STANDALONE, '-D', DN_DM, '-w', PASSWORD, '-I', BINDDN] + log.info(nsinactivate_cmd) + p = Popen(nsinactivate_cmd) + assert (p.wait() == 0) + + log.info('Bind as {%s,%s} which should fail with %s.' % (BINDDN, BUID, ldap.UNWILLING_TO_PERFORM.__name__)) + try: + topology_st.standalone.simple_bind_s(BINDDN, BUID) + except ldap.LDAPError as e: + log.info("Exception (expected): %s" % type(e).__name__) + log.info('Desc {}'.format(get_ldap_error_msg(e,'desc'))) + assert isinstance(e, ldap.UNWILLING_TO_PERFORM) + + log.info('Bind as {%s,%s} which should fail with %s.' % (BINDDN, 'bogus', ldap.UNWILLING_TO_PERFORM.__name__)) + try: + topology_st.standalone.simple_bind_s(BINDDN, 'bogus') + except ldap.LDAPError as e: + log.info("Exception (expected): %s" % type(e).__name__) + log.info('Desc {}'.format(get_ldap_error_msg(e,'desc'))) + assert isinstance(e, ldap.UNWILLING_TO_PERFORM) + + log.info('SUCCESS') + + +if __name__ == '__main__': + # Run isolated + # -s for DEBUG mode + CURRENT_FILE = os.path.realpath(__file__) + pytest.main("-s %s" % CURRENT_FILE) + diff --git a/dirsrvtests/tests/suites/acl/roledn_test.py b/dirsrvtests/tests/suites/acl/roledn_test.py new file mode 100644 index 0000000..227ebd9 --- /dev/null +++ b/dirsrvtests/tests/suites/acl/roledn_test.py @@ -0,0 +1,274 @@ +# --- BEGIN COPYRIGHT BLOCK --- +# Copyright (C) 2019 Red Hat, Inc. +# All rights reserved. +# +# License: GPL (version 3 or any later version). +# See LICENSE for details. +# --- END COPYRIGHT BLOCK ---- + +""" +This script will test different type of roles. +""" + +import os +import pytest + +from lib389._constants import DEFAULT_SUFFIX, PW_DM +from lib389.idm.user import UserAccounts, UserAccount +from lib389.idm.organizationalunit import OrganizationalUnits +from lib389.topologies import topology_st as topo +from lib389.idm.domain import Domain +from lib389.idm.role import NestedRoles, ManagedRoles, FilteredRoles +from lib389.idm.account import Anonymous + +import ldap + + +pytestmark = pytest.mark.tier1 + + +OU_ROLE = f"ou=roledntest,{DEFAULT_SUFFIX}" +STEVE_ROLE = f"uid=STEVE_ROLE,{OU_ROLE}" +HARRY_ROLE = f"uid=HARRY_ROLE,{OU_ROLE}" +MARY_ROLE = f"uid=MARY_ROLE,{OU_ROLE}" +ROLE1 = f"cn=ROLE1,{OU_ROLE}" +ROLE2 = f"cn=ROLE2,{OU_ROLE}" +ROLE3 = f"cn=ROLE3,{OU_ROLE}" +ROLE21 = f"cn=ROLE21,{OU_ROLE}" +ROLE31 = f"cn=ROLE31,{OU_ROLE}" +FILTERROLE = f"cn=FILTERROLE,{OU_ROLE}" +JOE_ROLE = f"uid=JOE_ROLE,{OU_ROLE}" +NOROLEUSER = f"uid=NOROLEUSER,{OU_ROLE}" +SCRACHENTRY = f"uid=SCRACHENTRY,{OU_ROLE}" +ALL_ACCESS = f"uid=all access,{OU_ROLE}" +NOT_RULE_ACCESS = f"uid=not rule access,{OU_ROLE}" +OR_RULE_ACCESS = f"uid=or rule access,{OU_ROLE}" +NESTED_ROLE_TESTER = f"uid=nested role tester,{OU_ROLE}" + + +@pytest.fixture(scope="function") +def _aci_of_user(request, topo): + """ + Removes and Restores ACIs after the test. + """ + aci_list = Domain(topo.standalone, DEFAULT_SUFFIX).get_attr_vals_utf8('aci') + + def finofaci(): + """ + Removes and Restores ACIs after the test. + """ + domain = Domain(topo.standalone, DEFAULT_SUFFIX) + domain.remove_all('aci') + for i in aci_list: + domain.add("aci", i) + + request.addfinalizer(finofaci) + + +@pytest.fixture(scope="module") +def _add_user(request, topo): + """ + A Function that will create necessary users delete the created user + """ + ous = OrganizationalUnits(topo.standalone, DEFAULT_SUFFIX) + ou_ou = ous.create(properties={'ou': 'roledntest'}) + ou_ou.set('aci', [f'(target="ldap:///{NESTED_ROLE_TESTER}")(targetattr="*") ' + f'(version 3.0; aci "nested role aci"; allow(all)' + f'roledn = "ldap:///{ROLE2}";)', + f'(target="ldap:///{OR_RULE_ACCESS}")(targetattr="*")' + f'(version 3.0; aci "or role aci"; allow(all) ' + f'roledn = "ldap:///{ROLE1} || ldap:///{ROLE21}";)', + f'(target="ldap:///{ALL_ACCESS}")(targetattr=*)' + f'(version 3.0; aci "anyone role aci"; allow(all) ' + f'roledn = "ldap:///anyone";)', + f'(target="ldap:///{NOT_RULE_ACCESS}")(targetattr=*)' + f'(version 3.0; aci "not role aci"; allow(all)' + f'roledn != "ldap:///{ROLE1} || ldap:///{ROLE21}";)']) + + nestedroles = NestedRoles(topo.standalone, OU_ROLE) + for i in [('role2', [ROLE1, ROLE21]), ('role3', [ROLE2, ROLE31])]: + nestedroles.create(properties={'cn': i[0], + 'nsRoleDN': i[1]}) + + managedroles = ManagedRoles(topo.standalone, OU_ROLE) + for i in ['ROLE1', 'ROLE21', 'ROLE31']: + managedroles.create(properties={'cn': i}) + + filterroles = FilteredRoles(topo.standalone, OU_ROLE) + filterroles.create(properties={'cn': 'filterRole', + 'nsRoleFilter': 'sn=Dr Drake', + 'description': 'filter role tester'}) + + users = UserAccounts(topo.standalone, OU_ROLE, rdn=None) + for i in [('STEVE_ROLE', ROLE1, 'Has roles 1, 2 and 3.'), + ('HARRY_ROLE', ROLE21, 'Has roles 21, 2 and 3.'), + ('MARY_ROLE', ROLE31, 'Has roles 31 and 3.')]: + users.create(properties={ + 'uid': i[0], + 'cn': i[0], + 'sn': 'user', + 'uidNumber': '1000', + 'gidNumber': '2000', + 'homeDirectory': '/home/' + i[0], + 'userPassword': PW_DM, + 'nsRoleDN': i[1], + 'Description': i[2] + }) + + for i in [('JOE_ROLE', 'Has filterRole.'), + ('NOROLEUSER', 'Has no roles.'), + ('SCRACHENTRY', 'Entry to test rights on.'), + ('all access', 'Everyone has acccess (incl anon).'), + ('not rule access', 'Only accessible to mary.'), + ('or rule access', 'Only to steve and harry but nbot mary or anon'), + ('nested role tester', 'Only accessible to harry and steve.')]: + users.create(properties={ + 'uid': i[0], + 'cn': i[0], + 'sn': 'user', + 'uidNumber': '1000', + 'gidNumber': '2000', + 'homeDirectory': '/home/' + i[0], + 'userPassword': PW_DM, + 'Description': i[1] + }) + + # Setting SN for user JOE + UserAccount(topo.standalone, f'uid=JOE_ROLE,ou=roledntest,{DEFAULT_SUFFIX}').set('sn', 'Dr Drake') + + def fin(): + """ + It will delete the created users + """ + for i in users.list() + managedroles.list() + nestedroles.list(): + i.delete() + + request.addfinalizer(fin) + + +@pytest.mark.parametrize("user,entry", [ + (STEVE_ROLE, NESTED_ROLE_TESTER), + (HARRY_ROLE, NESTED_ROLE_TESTER), + (MARY_ROLE, NOT_RULE_ACCESS), + (STEVE_ROLE, OR_RULE_ACCESS), + (HARRY_ROLE, OR_RULE_ACCESS), + (STEVE_ROLE, ALL_ACCESS), + (HARRY_ROLE, ALL_ACCESS), + (MARY_ROLE, ALL_ACCESS), +], ids=[ + "(STEVE_ROLE, NESTED_ROLE_TESTER)", + "(HARRY_ROLE, NESTED_ROLE_TESTER)", + "(MARY_ROLE, NOT_RULE_ACCESS)", + "(STEVE_ROLE, OR_RULE_ACCESS)", + "(HARRY_ROLE, OR_RULE_ACCESS)", + "(STEVE_ROLE, ALL_ACCESS)", + "(HARRY_ROLE, ALL_ACCESS)", + "(MARY_ROLE, ALL_ACCESS)", +]) +def test_mod_seealso_positive(topo, _add_user, _aci_of_user, user, entry): + """ + Testing the roledn keyword that allows access control + based on the role of the bound user. + + :id: a33c5d6a-79f4-11e8-8551-8c16451d917b + :parametrized: yes + :setup: Standalone server + :steps: + 1. Add test entry + 2. Add ACI + 3. User should follow ACI role + :expectedresults: + 1. Entry should be added + 2. Operation should succeed + 3. Operation should succeed + """ + conn = UserAccount(topo.standalone, user).bind(PW_DM) + UserAccount(conn, entry).replace('seeAlso', 'cn=1') + + +@pytest.mark.parametrize( + "user,entry", [ + (MARY_ROLE, NESTED_ROLE_TESTER), + (STEVE_ROLE, NOT_RULE_ACCESS), + (HARRY_ROLE, NOT_RULE_ACCESS), + (MARY_ROLE, OR_RULE_ACCESS), + ], ids=[ + "(MARY_ROLE, NESTED_ROLE_TESTER)", + "(STEVE_ROLE, NOT_RULE_ACCESS)", + "(HARRY_ROLE, NOT_RULE_ACCESS)", + "(MARY_ROLE , OR_RULE_ACCESS)"] +) +def test_mod_seealso_negative(topo, _add_user, _aci_of_user, user, entry): + """ + Testing the roledn keyword that do not allows access control + based on the role of the bound user. + + :id: b2444aa2-79f4-11e8-a2c3-8c16451d917b + :parametrized: yes + :setup: Standalone server + :steps: + 1. Add test entry + 2. Add ACI + 3. User should follow ACI role + :expectedresults: + 1. Entry should be added + 2. Operation should succeed + 3. Operation should succeed + """ + conn = UserAccount(topo.standalone, user).bind(PW_DM) + user = UserAccount(conn, entry) + with pytest.raises(ldap.INSUFFICIENT_ACCESS): + user.replace('seeAlso', 'cn=1') + + +@pytest.mark.parametrize("entry", [NOT_RULE_ACCESS, ALL_ACCESS], + ids=["NOT_RULE_ACCESS", "ALL_ACCESS"]) +def test_mod_anonseealso_positive(topo, _add_user, _aci_of_user, entry): + """ + Testing the roledn keyword that allows access control + based on the role of the bound user. + + :id: c3eb41ac-79f4-11e8-aa8b-8c16451d917b + :parametrized: yes + :setup: Standalone server + :steps: + 1. Add test entry + 2. Add ACI + 3. User should follow ACI role + :expectedresults: + 1. Entry should be added + 2. Operation should succeed + 3. Operation should succeed + """ + conn = Anonymous(topo.standalone).bind() + UserAccount(conn, entry).replace('seeAlso', 'cn=1') + + +@pytest.mark.parametrize("entry", [NESTED_ROLE_TESTER, OR_RULE_ACCESS], + ids=["NESTED_ROLE_TESTER", "OR_RULE_ACCESS"]) +def test_mod_anonseealso_negaive(topo, _add_user, _aci_of_user, entry): + """ + Testing the roledn keyword that do not allows access control + based on the role of the bound user. + + :id: d385611a-79f4-11e8-adc8-8c16451d917b + :parametrized: yes + :setup: Standalone Instance + :steps: + 1. Add test entry + 2. Add ACI + 3. User should follow ACI role + :expectedresults: + 1. Entry should be added + 2. Operation should succeed + 3. Operation should succeed + """ + conn = Anonymous(topo.standalone).bind() + user = UserAccount(conn, entry) + with pytest.raises(ldap.INSUFFICIENT_ACCESS): + user.replace('seeAlso', 'cn=1') + + +if __name__ == "__main__": + CURRENT_FILE = os.path.realpath(__file__) + pytest.main("-s -v %s" % CURRENT_FILE) diff --git a/dirsrvtests/tests/suites/acl/search_real_part2_test.py b/dirsrvtests/tests/suites/acl/search_real_part2_test.py new file mode 100644 index 0000000..a01e0ce --- /dev/null +++ b/dirsrvtests/tests/suites/acl/search_real_part2_test.py @@ -0,0 +1,456 @@ +# --- BEGIN COPYRIGHT BLOCK --- +# Copyright (C) 2019 Red Hat, Inc. +# All rights reserved. +# +# License: GPL (version 3 or any later version). +# See LICENSE for details. +# --- END COPYRIGHT BLOCK ---- + +import pytest, os, ldap +from lib389._constants import DEFAULT_SUFFIX, PW_DM, ErrorLog +from lib389.idm.user import UserAccount, UserAccounts +from lib389.idm.account import Accounts +from lib389.idm.organizationalunit import OrganizationalUnits +from lib389.topologies import topology_st as topo +from lib389.idm.domain import Domain + +pytestmark = pytest.mark.tier1 + +CONTAINER_1_DELADD = "ou=Product Development,{}".format(DEFAULT_SUFFIX) +CONTAINER_2_DELADD = "ou=Accounting,{}".format(DEFAULT_SUFFIX) +USER_ANUJ = "uid=Anuj Borah,{}".format(CONTAINER_1_DELADD) +USER_ANANDA = "uid=Ananda Borah,{}".format(CONTAINER_2_DELADD) + + +@pytest.fixture(scope="function") +def aci_of_user(request, topo): + aci_list = Domain(topo.standalone, DEFAULT_SUFFIX).get_attr_vals('aci') + + def finofaci(): + domain = Domain(topo.standalone, DEFAULT_SUFFIX) + domain.set('aci', None) + for i in aci_list: + domain.add("aci", i) + + request.addfinalizer(finofaci) + + +@pytest.fixture(scope="module") +def test_uer(request, topo): + topo.standalone.config.loglevel((ErrorLog.ACL_SUMMARY,)) + + ous = OrganizationalUnits(topo.standalone, DEFAULT_SUFFIX) + for i in ['Product Development', 'Accounting']: + ous.create(properties={'ou': i}) + + users = UserAccounts(topo.standalone, DEFAULT_SUFFIX, rdn='ou=Product Development') + users.create(properties={ + 'uid': 'Anuj Borah', + 'cn': 'Anuj Borah', + 'sn': 'user', + 'uidNumber': '1000', + 'gidNumber': '2000', + 'homeDirectory': '/home/' + 'AnujBorah', + 'userPassword': PW_DM + }) + + users = UserAccounts(topo.standalone, DEFAULT_SUFFIX, rdn='ou=Accounting') + users.create(properties={ + 'uid': 'Ananda Borah', + 'cn': 'Ananda Borah', + 'sn': 'user', + 'uidNumber': '1000', + 'gidNumber': '2000', + 'homeDirectory': '/home/' + 'AnandaBorah', + 'userPassword': PW_DM + }) + + +def test_deny_all_access_with__target_set_on_non_leaf(topo, test_uer, aci_of_user): + """Search Test 11 Deny all access with != target set on non-leaf + :id: f1c5d72a-6e11-11e8-aa9d-8c16451d917b + :setup: Standalone Instance + :steps: + 1. Add Entry + 2. Add ACI + 3. Bind with test USER_ANUJ + 4. Try search + 5. Delete Entry,test USER_ANUJ, ACI + :expectedresults: + 1. Operation should success + 2. Operation should success + 3. Operation should success + 4. Operation should Fail + 5. Operation should success + """ + ACI_TARGET = "(target != ldap:///{})(targetattr=*)".format(CONTAINER_2_DELADD) + ACI_ALLOW = '(version 3.0; acl "Name of the ACI"; deny absolute (all)' + ACI_SUBJECT = 'userdn="ldap:///anyone";)' + ACI_BODY = ACI_TARGET + ACI_ALLOW + ACI_SUBJECT + Domain(topo.standalone, DEFAULT_SUFFIX).add("aci", ACI_BODY) + conn = UserAccount(topo.standalone, USER_ANANDA).bind(PW_DM) + # After binding with USER_ANANDA , aci will limit the search to itself + assert 1 == len(Accounts(conn, DEFAULT_SUFFIX).filter('(cn=*)')) + conn = UserAccount(topo.standalone, USER_ANUJ).bind(PW_DM) + # After binding with USER_ANUJ , aci will limit the search to itself + assert 1 == len(Accounts(conn, DEFAULT_SUFFIX).filter('(cn=*)')) + # After binding with root , the actual number of users will be given + assert 2 == len(Accounts(topo.standalone, DEFAULT_SUFFIX).filter('(cn=*)')) + + +def test_deny_all_access_with__target_set_on_wildcard_non_leaf( + topo, test_uer, aci_of_user +): + """Search Test 12 Deny all access with != target set on wildcard non-leaf + :id: 02f34640-6e12-11e8-a382-8c16451d917b + :setup: Standalone Instance + :steps: + 1. Add Entry + 2. Add ACI + 3. Bind with test USER_ANUJ + 4. Try search + 5. Delete Entry,test USER_ANUJ, ACI + :expectedresults: + 1. Operation should success + 2. Operation should success + 3. Operation should success + 4. Operation should Fail + 5. Operation should success + """ + ACI_TARGET = "(target != ldap:///ou=Product*,{})(targetattr=*)".format( + DEFAULT_SUFFIX) + ACI_ALLOW = '(version 3.0; acl "Name of the ACI"; deny absolute (all)' + ACI_SUBJECT = 'userdn="ldap:///anyone";)' + ACI_BODY = ACI_TARGET + ACI_ALLOW + ACI_SUBJECT + Domain(topo.standalone, DEFAULT_SUFFIX).add("aci", ACI_BODY) + conn = UserAccount(topo.standalone, USER_ANANDA).bind(PW_DM) + # aci will limit the search to ou=Product it will block others + assert 1 == len(Accounts(conn, DEFAULT_SUFFIX).filter('(cn=*)')) + conn = UserAccount(topo.standalone, USER_ANUJ).bind(PW_DM) + # aci will limit the search to ou=Product it will block others + assert 1 == len(Accounts(conn, DEFAULT_SUFFIX).filter('(cn=*)')) + # with root , aci will give actual no of users , without any limit. + assert 2 == len(Accounts(topo.standalone, DEFAULT_SUFFIX).filter('(cn=*)')) + + +def test_deny_all_access_with__target_set_on_wildcard_leaf( + topo, test_uer, aci_of_user +): + """Search Test 13 Deny all access with != target set on wildcard leaf + :id: 16c54d76-6e12-11e8-b5ba-8c16451d917b + :setup: Standalone Instance + :steps: + 1. Add Entry + 2. Add ACI + 3. Bind with test USER_ANUJ + 4. Try search + 5. Delete Entry,test USER_ANUJ, ACI + :expectedresults: + 1. Operation should success + 2. Operation should success + 3. Operation should success + 4. Operation should Fail + 5. Operation should success + """ + ACI_TARGET = "(target != ldap:///uid=Anuj*, ou=*,{})(targetattr=*)".format( + DEFAULT_SUFFIX) + ACI_ALLOW = '(version 3.0; acl "Name of the ACI"; deny absolute (all)' + ACI_SUBJECT = 'userdn="ldap:///anyone";)' + ACI_BODY = ACI_TARGET + ACI_ALLOW + ACI_SUBJECT + Domain(topo.standalone, DEFAULT_SUFFIX).add("aci", ACI_BODY) + conn = UserAccount(topo.standalone, USER_ANANDA).bind(PW_DM) + # aci will limit the search to cn=Jeff it will block others + assert 1 == len(Accounts(conn, DEFAULT_SUFFIX).filter('(cn=*)')) + conn = UserAccount(topo.standalone, USER_ANUJ).bind(PW_DM) + # aci will limit the search to cn=Jeff it will block others + assert 1 == len(Accounts(conn, DEFAULT_SUFFIX).filter('(cn=*)')) + # with root there is no aci blockage + assert 2 == len(Accounts(topo.standalone, DEFAULT_SUFFIX).filter('(cn=*)')) + + +def test_deny_all_access_with_targetfilter_using_equality_search( + topo, test_uer, aci_of_user +): + """Search Test 14 Deny all access with targetfilter using equality search + :id: 27255e04-6e12-11e8-8e35-8c16451d917b + :setup: Standalone Instance + :steps: + 1. Add Entry + 2. Add ACI + 3. Bind with test USER_ANUJ + 4. Try search + 5. Delete Entry,test USER_ANUJ, ACI + :expectedresults: + 1. Operation should success + 2. Operation should success + 3. Operation should success + 4. Operation should Fail + 5. Operation should success + """ + ACI_TARGET = '(targetfilter ="(uid=Anuj Borah)")(target = ldap:///{})(targetattr=*)'.format( + DEFAULT_SUFFIX) + ACI_ALLOW = '(version 3.0; acl "Name of the ACI"; deny absolute (all)' + ACI_SUBJECT = 'userdn="ldap:///anyone";)' + ACI_BODY = ACI_TARGET + ACI_ALLOW + ACI_SUBJECT + Domain(topo.standalone, DEFAULT_SUFFIX).add("aci", ACI_BODY) + conn = UserAccount(topo.standalone, USER_ANANDA).bind(PW_DM) + # aci will block the search to cn=Jeff + assert 0 == len(Accounts(conn, DEFAULT_SUFFIX).filter('(uid=Anuj Borah)')) + conn = UserAccount(topo.standalone, USER_ANUJ).bind(PW_DM) + # aci will block the search to cn=Jeff + assert 0 == len(Accounts(conn, DEFAULT_SUFFIX).filter('(uid=Anuj Borah)')) + # with root there is no blockage + assert 1 == len(Accounts(topo.standalone, DEFAULT_SUFFIX).filter('(uid=Anuj Borah)')) + + +def test_deny_all_access_with_targetfilter_using_equality_search_two( + topo, test_uer, aci_of_user +): + """Test that Search Test 15 Deny all access with targetfilter using != equality search + :id: 3966bcd4-6e12-11e8-83ce-8c16451d917b + :setup: Standalone Instance + :steps: + 1. Add Entry + 2. Add ACI + 3. Bind with test USER_ANUJ + 4. Try search + 5. Delete Entry,test USER_ANUJ, ACI + :expectedresults: + 1. Operation should success + 2. Operation should success + 3. Operation should success + 4. Operation should Fail + 5. Operation should success + """ + ACI_TARGET = '(targetfilter !="(uid=Anuj Borah)")(target = ldap:///{})(targetattr=*)'.format( + DEFAULT_SUFFIX) + ACI_ALLOW = '(version 3.0; acl "Name of the ACI"; deny absolute (all)' + ACI_SUBJECT = 'userdn="ldap:///anyone";)' + ACI_BODY = ACI_TARGET + ACI_ALLOW + ACI_SUBJECT + Domain(topo.standalone, DEFAULT_SUFFIX).add("aci", ACI_BODY) + conn = UserAccount(topo.standalone, USER_ANANDA).bind(PW_DM) + # aci will limit the search to cn=Jeff it will block others + assert 1 == len(Accounts(conn, DEFAULT_SUFFIX).filter('(cn=*)')) + conn = UserAccount(topo.standalone, USER_ANUJ).bind(PW_DM) + # aci will limit the search to cn=Jeff it will block others + assert 1 == len(Accounts(conn, DEFAULT_SUFFIX).filter('(cn=*)')) + # with root there is no blockage + assert 2 == len(Accounts(topo.standalone, DEFAULT_SUFFIX).filter('(cn=*)')) + + +def test_deny_all_access_with_targetfilter_using_substring_search( + topo, test_uer, aci_of_user +): + """Test that Search Test 16 Deny all access with targetfilter using substring search + :id: 44d7b4ba-6e12-11e8-b420-8c16451d917b + :setup: Standalone Instance + :steps: + 1. Add Entry + 2. Add ACI + 3. Bind with test USER_ANUJ + 4. Try search + 5. Delete Entry,test USER_ANUJ, ACI + :expectedresults: + 1. Operation should success + 2. Operation should success + 3. Operation should success + 4. Operation should Fail + 5. Operation should success + """ + ACI_TARGET = '(targetfilter ="(uid=Anu*)")(target = ldap:///{})(targetattr=*)'.format( + DEFAULT_SUFFIX) + ACI_ALLOW = '(version 3.0; acl "Name of the ACI"; deny absolute (all)' + ACI_SUBJECT = 'userdn="ldap:///anyone";)' + ACI_BODY = ACI_TARGET + ACI_ALLOW + ACI_SUBJECT + Domain(topo.standalone, DEFAULT_SUFFIX).add("aci", ACI_BODY) + conn = UserAccount(topo.standalone, USER_ANANDA).bind(PW_DM) + # aci block anything cn=j* + assert 0 == len(Accounts(conn, DEFAULT_SUFFIX).filter('(cn=Anu*)')) + conn = UserAccount(topo.standalone, USER_ANUJ).bind(PW_DM) + # aci block anything cn=j* + assert 0 == len(Accounts(conn, DEFAULT_SUFFIX).filter('(cn=Anu*)')) + # with root there is no blockage + assert 1 == len(Accounts(topo.standalone, DEFAULT_SUFFIX).filter('(cn=Anu*)')) + + +def test_deny_all_access_with_targetfilter_using_substring_search_two( + topo, test_uer, aci_of_user +): + """Test that Search Test 17 Deny all access with targetfilter using != substring search + :id: 55b12d98-6e12-11e8-8cf4-8c16451d917b + :setup: Standalone Instance + :steps: + 1. Add Entry + 2. Add ACI + 3. Bind with test USER_ANUJ + 4. Try search + 5. Delete Entry,test USER_ANUJ, ACI + :expectedresults: + 1. Operation should success + 2. Operation should success + 3. Operation should success + 4. Operation should Fail + 5. Operation should success + """ + ACI_TARGET = '(targetfilter !="(uid=Anu*)")(target = ldap:///{})(targetattr=*)'.format( + DEFAULT_SUFFIX + ) + ACI_ALLOW = '(version 3.0; acl "Name of the ACI"; deny absolute (all)' + ACI_SUBJECT = 'userdn="ldap:///anyone";)' + ACI_BODY = ACI_TARGET + ACI_ALLOW + ACI_SUBJECT + Domain(topo.standalone, DEFAULT_SUFFIX).add("aci", ACI_BODY) + conn = UserAccount(topo.standalone, USER_ANANDA).bind(PW_DM) + # aci allow anything cn=j*, it will block others + assert 1 == len(Accounts(conn, DEFAULT_SUFFIX).filter('(uid=*)')) + conn = UserAccount(topo.standalone, USER_ANUJ).bind(PW_DM) + # aci allow anything cn=j*, it will block others + assert 1 == len(Accounts(conn, DEFAULT_SUFFIX).filter('(uid=*)')) + # with root there is no blockage + assert 2 == len(Accounts(topo.standalone, DEFAULT_SUFFIX).filter('(uid=*)')) + + +def test_deny_all_access_with_targetfilter_using_boolean_or_of_two_equality_search( + topo, test_uer, aci_of_user +): + """Search Test 18 Deny all access with targetfilter using boolean OR of two equality search + :id: 29cc35fa-793f-11e8-988f-8c16451d917b + :setup: Standalone Instance + :steps: + 1. Add Entry + 2. Add ACI + 3. Bind with test USER_ANUJ + 4. Try search + 5. Delete Entry,test USER_ANUJ, ACI + :expectedresults: + 1. Operation should success + 2. Operation should success + 3. Operation should success + 4. Operation should Fail + 5. Operation should success + """ + Domain(topo.standalone, DEFAULT_SUFFIX).add("aci",'(target = ldap:///{})(targetattr = "*")' + '(targetfilter = (|(cn=scarter)(cn=jvaughan)))(version 3.0; acl "$tet_thistest"; ' + 'deny absolute (all) (userdn = "ldap:///anyone") ;)'.format(DEFAULT_SUFFIX)) + UserAccount(topo.standalone, USER_ANANDA).set("cn", "scarter") + UserAccount(topo.standalone, USER_ANUJ).set("cn", "jvaughan") + conn = UserAccount(topo.standalone, USER_ANANDA).bind(PW_DM) + # aci will deny_all_access_with_targetfilter_using_boolean_or_of_two_equality_search + user = UserAccount(conn, USER_ANANDA) + with pytest.raises(IndexError): + user.get_attr_val_utf8('uid') + # aci will deny_all_access_with_targetfilter_using_boolean_or_of_two_equality_search + user = UserAccount(conn, USER_ANUJ) + with pytest.raises(IndexError): + user.get_attr_val_utf8('uid') + # with root no blockage + assert UserAccount(topo.standalone, USER_ANANDA).get_attr_val_utf8('uid') == 'Ananda Borah' + # with root no blockage + assert UserAccount(topo.standalone, USER_ANUJ).get_attr_val_utf8('uid') == 'Anuj Borah' + + +def test_deny_all_access_to__userdn_two(topo, test_uer, aci_of_user): + """Search Test 19 Deny all access to != userdn + :id: 693496c0-6e12-11e8-80dc-8c16451d917b + :setup: Standalone Instance + :steps: + 1. Add Entry + 2. Add ACI + 3. Bind with test USER_ANUJ + 4. Try search + 5. Delete Entry,test USER_ANUJ, ACI + :expectedresults: + 1. Operation should success + 2. Operation should success + 3. Operation should success + 4. Operation should Fail + 5. Operation should success + """ + ACI_TARGET = "(target = ldap:///{})(targetattr=*)".format(DEFAULT_SUFFIX) + ACI_ALLOW = '(version 3.0; acl "Name of the ACI"; deny absolute (all)' + ACI_SUBJECT = 'userdn!="ldap:///{}";)'.format(USER_ANANDA) + ACI_BODY = ACI_TARGET + ACI_ALLOW + ACI_SUBJECT + Domain(topo.standalone, DEFAULT_SUFFIX).add("aci", ACI_BODY) + conn = UserAccount(topo.standalone, USER_ANANDA).bind(PW_DM) + # aci will not block anything for USER_ANANDA , it block other users + assert 2 == len(Accounts(conn, DEFAULT_SUFFIX).filter('(cn=*)')) + conn = UserAccount(topo.standalone, USER_ANUJ).bind(PW_DM) + # aci will not block anything for USER_ANANDA , it block other users + assert 0 == len(Accounts(conn, DEFAULT_SUFFIX).filter('(cn=*)')) + # with root thers is no aci blockage + assert 2 == len(Accounts(topo.standalone, DEFAULT_SUFFIX).filter('(cn=*)')) + + +def test_deny_all_access_with_userdn(topo, test_uer, aci_of_user): + """ + Search Test 20 Deny all access with userdn + :id: 75aada86-6e12-11e8-bd34-8c16451d917b + :setup: Standalone Instance + :steps: + 1. Add Entry + 2. Add ACI + 3. Bind with test USER_ANUJ + 4. Try search + 5. Delete Entry,test USER_ANUJ, ACI + :expectedresults: + 1. Operation should success + 2. Operation should success + 3. Operation should success + 4. Operation should Fail + 5. Operation should success + """ + ACI_TARGET = "(target = ldap:///{})(targetattr=*)".format(DEFAULT_SUFFIX) + ACI_ALLOW = '(version 3.0; acl "Name of the ACI"; deny absolute (all)' + ACI_SUBJECT = 'userdn="ldap:///{}";)'.format(USER_ANANDA) + ACI_BODY = ACI_TARGET + ACI_ALLOW + ACI_SUBJECT + Domain(topo.standalone, DEFAULT_SUFFIX).add("aci", ACI_BODY) + conn = UserAccount(topo.standalone, USER_ANANDA).bind(PW_DM) + # aci will block anything for USER_ANANDA , it not block other users + assert 0 == len(Accounts(conn, DEFAULT_SUFFIX).filter('(cn=*)')) + conn = UserAccount(topo.standalone, USER_ANUJ).bind(PW_DM) + # aci will block anything for USER_ANANDA , it not block other users + assert 2 == len(Accounts(conn, DEFAULT_SUFFIX).filter('(cn=*)')) + # with root thers is no aci blockage + assert 2 == len(Accounts(topo.standalone, DEFAULT_SUFFIX).filter('(cn=*)')) + + +def test_deny_all_access_with_targetfilter_using_presence_search( + topo, test_uer, aci_of_user +): + """ + Search Test 21 Deny all access with targetfilter using presence search + :id: 85244a42-6e12-11e8-9480-8c16451d917b + :setup: Standalone Instance + :steps: + 1. Add Entry + 2. Add ACI + 3. Bind with test USER_ANUJ + 4. Try search + 5. Delete Entry,test USER_ANUJ, ACI + :expectedresults: + 1. Operation should success + 2. Operation should success + 3. Operation should success + 4. Operation should Fail + 5. Operation should success + """ + user = UserAccounts(topo.standalone, DEFAULT_SUFFIX).create_test_user() + user.set('userPassword', PW_DM) + + ACI_TARGET = '(targetfilter ="(cn=*)")(target = ldap:///{})(targetattr=*)'.format( + DEFAULT_SUFFIX) + ACI_ALLOW = '(version 3.0; acl "Name of the ACI"; deny absolute (all)' + ACI_SUBJECT = 'userdn="ldap:///anyone";)' + ACI_BODY = ACI_TARGET + ACI_ALLOW + ACI_SUBJECT + Domain(topo.standalone, DEFAULT_SUFFIX).add("aci", ACI_BODY) + conn = UserAccount(topo.standalone, USER_ANANDA).bind(PW_DM) + # aci will eny_all_access_with_targetfilter_using_presence_search + user = UserAccount(conn, 'uid=test_user_1000,ou=People,{}'.format(DEFAULT_SUFFIX)) + with pytest.raises(IndexError): + user.get_attr_val_utf8('cn') + # with root no blockage + assert UserAccount(topo.standalone, 'uid=test_user_1000,ou=People,{}'.format(DEFAULT_SUFFIX)).get_attr_val_utf8('cn') == 'test_user_1000' + + +if __name__ == "__main__": + CURRENT_FILE = os.path.realpath(__file__) + pytest.main("-s -v %s" % CURRENT_FILE) diff --git a/dirsrvtests/tests/suites/acl/search_real_part3_test.py b/dirsrvtests/tests/suites/acl/search_real_part3_test.py new file mode 100644 index 0000000..9903a9f --- /dev/null +++ b/dirsrvtests/tests/suites/acl/search_real_part3_test.py @@ -0,0 +1,469 @@ +# --- BEGIN COPYRIGHT BLOCK --- +# Copyright (C) 2019 Red Hat, Inc. +# All rights reserved. +# +# License: GPL (version 3 or any later version). +# See LICENSE for details. +# --- END COPYRIGHT BLOCK ---- + +import pytest, os, ldap +from lib389._constants import DEFAULT_SUFFIX, PW_DM, ErrorLog +from lib389.idm.user import UserAccount, UserAccounts +from lib389.idm.organization import Organization +from lib389.idm.account import Accounts, Anonymous +from lib389.idm.group import Group, UniqueGroup +from lib389.idm.organizationalunit import OrganizationalUnit +from lib389.idm.group import Groups +from lib389.topologies import topology_st as topo +from lib389.idm.domain import Domain + +pytestmark = pytest.mark.tier1 + +CONTAINER_1_DELADD = "ou=Product Development,{}".format(DEFAULT_SUFFIX) +CONTAINER_2_DELADD = "ou=Accounting,{}".format(DEFAULT_SUFFIX) +USER_ANUJ = "uid=Anuj Borah,{}".format(CONTAINER_1_DELADD) +USER_ANANDA = "uid=Ananda Borah,{}".format(CONTAINER_2_DELADD) + + +@pytest.fixture(scope="function") +def aci_of_user(request, topo): + aci_list = Domain(topo.standalone, DEFAULT_SUFFIX).get_attr_vals('aci') + + def finofaci(): + domain = Domain(topo.standalone, DEFAULT_SUFFIX) + domain.set('aci', None) + for i in aci_list: + domain.add("aci", i) + + request.addfinalizer(finofaci) + + +@pytest.fixture(scope="module") +def test_uer(request, topo): + topo.standalone.config.loglevel((ErrorLog.ACL_SUMMARY,)) + + for i in ['Product Development', 'Accounting']: + OrganizationalUnit(topo.standalone, "ou={},{}".format(i, DEFAULT_SUFFIX)).create(properties={'ou': i}) + + users = UserAccounts(topo.standalone, DEFAULT_SUFFIX, rdn='ou=Product Development') + users.create(properties={ + 'uid': 'Anuj Borah', + 'cn': 'Anuj Borah', + 'sn': 'user', + 'uidNumber': '1000', + 'gidNumber': '2000', + 'homeDirectory': '/home/' + 'AnujBorah', + 'userPassword': PW_DM + }) + + users = UserAccounts(topo.standalone, DEFAULT_SUFFIX, rdn='ou=Accounting') + users.create(properties={ + 'uid': 'Ananda Borah', + 'cn': 'Ananda Borah', + 'sn': 'user', + 'uidNumber': '1000', + 'gidNumber': '2000', + 'homeDirectory': '/home/' + 'AnandaBorah', + 'userPassword': PW_DM + }) + + +def test_deny_search_access_to_userdn_with_ldap_url(topo, test_uer, aci_of_user): + """ + Search Test 23 Deny search access to userdn with LDAP URL + :id: 94f082d8-6e12-11e8-be72-8c16451d917b + :setup: Standalone Instance + :steps: + 1. Add Entry + 2. Add ACI + 3. Bind with test USER_ANUJ + 4. Try search + 5. Delete Entry,test USER_ANUJ, ACI + :expectedresults: + 1. Operation should success + 2. Operation should success + 3. Operation should success + 4. Operation should Fail + 5. Operation should success + """ + ACI_TARGET = "(target = ldap:///{})(targetattr=*)".format(DEFAULT_SUFFIX) + ACI_ALLOW = '(version 3.0; acl "Name of the ACI"; deny (search)' + ACI_SUBJECT = ( + 'userdn="ldap:///%s";)' % "{}??sub?(&(roomnumber=3445))".format(DEFAULT_SUFFIX) + ) + ACI_BODY = ACI_TARGET + ACI_ALLOW + ACI_SUBJECT + Domain(topo.standalone, DEFAULT_SUFFIX).add("aci", ACI_BODY) + UserAccount(topo.standalone, USER_ANANDA).set('roomnumber', '3445') + conn = UserAccount(topo.standalone, USER_ANANDA).bind(PW_DM) + # aci will block all users having roomnumber=3445 + assert 0 == len(Accounts(conn, DEFAULT_SUFFIX).filter('(cn=*)')) + conn = UserAccount(topo.standalone, USER_ANUJ).bind(PW_DM) + # aci will block roomnumber=3445 for all users USER_ANUJ does not have roomnumber + assert 2 == len(Accounts(conn, DEFAULT_SUFFIX).filter('(cn=*)')) + # with root there is no aci blockage + UserAccount(topo.standalone, USER_ANANDA).remove('roomnumber', '3445') + + +def test_deny_search_access_to_userdn_with_ldap_url_two(topo, test_uer, aci_of_user): + """ + Search Test 24 Deny search access to != userdn with LDAP URL + :id: a1ee05d2-6e12-11e8-8260-8c16451d917b + :setup: Standalone Instance + :steps: + 1. Add Entry + 2. Add ACI + 3. Bind with test USER_ANUJ + 4. Try search + 5. Delete Entry,test USER_ANUJ, ACI + :expectedresults: + 1. Operation should success + 2. Operation should success + 3. Operation should success + 4. Operation should Fail + 5. Operation should success + """ + ACI_TARGET = "(target = ldap:///{})(targetattr=*)".format(DEFAULT_SUFFIX) + ACI_ALLOW = '(version 3.0; acl "Name of the ACI"; deny (search)' + ACI_SUBJECT = ( + 'userdn != "ldap:///%s";)' % "{}??sub?(&(roomnumber=3445))".format(DEFAULT_SUFFIX) + ) + ACI_BODY = ACI_TARGET + ACI_ALLOW + ACI_SUBJECT + Domain(topo.standalone, DEFAULT_SUFFIX).add("aci", ACI_BODY) + UserAccount(topo.standalone, USER_ANANDA).set('roomnumber', '3445') + conn = UserAccount(topo.standalone, USER_ANANDA).bind(PW_DM) + # aci will not block all users having roomnumber=3445 , it will block others + assert 2 == len(Accounts(conn, DEFAULT_SUFFIX).filter('(cn=*)')) + conn = UserAccount(topo.standalone, USER_ANUJ).bind(PW_DM) + # aci will not block all users having roomnumber=3445 , it will block others + assert 0 == len(Accounts(conn, DEFAULT_SUFFIX).filter('(cn=*)')) + # with root there is no aci blockage + UserAccount(topo.standalone, USER_ANANDA).remove('roomnumber', '3445') + + +def test_deny_search_access_to_userdn_with_ldap_url_matching_all_users( + topo, test_uer, aci_of_user +): + """ + Search Test 25 Deny search access to userdn with LDAP URL matching all users + :id: b37f72ae-6e12-11e8-9c98-8c16451d917b + :setup: Standalone Instance + :steps: + 1. Add Entry + 2. Add ACI + 3. Bind with test USER_ANUJ + 4. Try search + 5. Delete Entry,test USER_ANUJ, ACI + :expectedresults: + 1. Operation should success + 2. Operation should success + 3. Operation should success + 4. Operation should Fail + 5. Operation should success + """ + ACI_TARGET = "(target = ldap:///{})(targetattr=*)".format(DEFAULT_SUFFIX) + ACI_ALLOW = '(version 3.0; acl "Name of the ACI"; deny (search)' + ACI_SUBJECT = 'userdn = "ldap:///%s";)' % "{}??sub?(&(cn=*))".format(DEFAULT_SUFFIX) + ACI_BODY = ACI_TARGET + ACI_ALLOW + ACI_SUBJECT + Domain(topo.standalone, DEFAULT_SUFFIX).add("aci", ACI_BODY) + conn = UserAccount(topo.standalone, USER_ANANDA).bind(PW_DM) + # aci will block all users LDAP URL matching all users + assert 0 == len(Accounts(conn, DEFAULT_SUFFIX).filter('(cn=*)')) + conn = UserAccount(topo.standalone, USER_ANUJ).bind(PW_DM) + # aci will block all users LDAP URL matching all users + assert 0 == len(Accounts(conn, DEFAULT_SUFFIX).filter('(cn=*)')) + # with root there is no aci blockage + assert 2 == len(Accounts(topo.standalone, DEFAULT_SUFFIX).filter('(cn=*)')) + + +def test_deny_read_access_to_a_dynamic_group(topo, test_uer, aci_of_user): + """ + Search Test 26 Deny read access to a dynamic group + :id: c0c5290e-6e12-11e8-a900-8c16451d917b + :setup: Standalone Instance + :steps: + 1. Add Entry + 2. Add ACI + 3. Bind with test USER_ANUJ + 4. Try search + 5. Delete Entry,test USER_ANUJ, ACI + :expectedresults: + 1. Operation should success + 2. Operation should success + 3. Operation should success + 4. Operation should Fail + 5. Operation should success + """ + groups = Groups(topo.standalone, DEFAULT_SUFFIX) + group_properties = {"cn": "group1", "description": "testgroup"} + group = groups.create(properties=group_properties) + group.add('objectClass', 'groupOfURLS') + group.set('memberURL', "ldap:///{}??sub?(&(ou=Accounting)(cn=Sam*))".format(DEFAULT_SUFFIX)) + group.add_member(USER_ANANDA) + + ACI_TARGET = '(target = ldap:///{})(targetattr = "*")'.format(DEFAULT_SUFFIX) + ACI_ALLOW = '(version 3.0; acl "All rights for %s"; deny(read)' % "Unknown" + ACI_SUBJECT = 'groupdn = "ldap:///cn=group1,ou=Groups,{}";)'.format(DEFAULT_SUFFIX) + ACI_BODY = ACI_TARGET + ACI_ALLOW + ACI_SUBJECT + Domain(topo.standalone, DEFAULT_SUFFIX).add("aci", ACI_BODY) + conn = UserAccount(topo.standalone, USER_ANANDA).bind(PW_DM) + # aci will block all 'memberURL', "ldap:///{}??sub?(&(ou=Accounting)(cn=Sam*))".format(DEFAULT_SUFFIX) + assert 0 == len(Accounts(conn, DEFAULT_SUFFIX).filter('(cn=*)')) + conn = UserAccount(topo.standalone, USER_ANUJ).bind(PW_DM) + # USER_ANUJ is not a member + assert 2 == len(Accounts(conn, DEFAULT_SUFFIX).filter('(cn=*)')) + group.delete() + + +def test_deny_read_access_to_dynamic_group_with_host_port_set_on_ldap_url( + topo, test_uer, aci_of_user +): + """ + Search Test 27 Deny read access to dynamic group with host:port set on LDAP URL + :id: ceb62158-6e12-11e8-8c36-8c16451d917b + :setup: Standalone Instance + :steps: + 1. Add Entry + 2. Add ACI + 3. Bind with test USER_ANUJ + 4. Try search + 5. Delete Entry,test USER_ANUJ, ACI + :expectedresults: + 1. Operation should success + 2. Operation should success + 3. Operation should success + 4. Operation should Fail + 5. Operation should success + """ + groups = Groups(topo.standalone, DEFAULT_SUFFIX) + group = groups.create(properties={"cn": "group1", + "description": "testgroup" + }) + group.add('objectClass', 'groupOfURLS') + group.set('memberURL', "ldap:///localhost:38901/{}??sub?(&(ou=Accounting)(cn=Sam*))".format(DEFAULT_SUFFIX)) + group.add_member(USER_ANANDA) + + ACI_TARGET = '(target = ldap:///{})(targetattr = "*")'.format(DEFAULT_SUFFIX) + ACI_ALLOW = '(version 3.0; acl "All rights for %s"; deny(read)' % "Unknown" + ACI_SUBJECT = 'groupdn = "ldap:///cn=group1,ou=Groups,{}";)'.format(DEFAULT_SUFFIX) + ACI_BODY = ACI_TARGET + ACI_ALLOW + ACI_SUBJECT + Domain(topo.standalone, DEFAULT_SUFFIX).add("aci", ACI_BODY) + conn = UserAccount(topo.standalone, USER_ANANDA).bind(PW_DM) + # aci will block 'memberURL', "ldap:///localhost:38901/dc=example,dc=com??sub?(&(ou=Accounting)(cn=Sam*))" + assert 0 == len(Accounts(conn, DEFAULT_SUFFIX).filter('(cn=*)')) + # with root there is no aci blockage + assert 2 == len(Accounts(topo.standalone, DEFAULT_SUFFIX).filter('(cn=*)')) + group.delete() + + +def test_deny_read_access_to_dynamic_group_with_scope_set_to_one_in_ldap_url( + topo, test_uer, aci_of_user +): + """ + Search Test 28 Deny read access to dynamic group with scope set to "one" in LDAP URL + :id: ddb30432-6e12-11e8-94db-8c16451d917b + :setup: Standalone Instance + :steps: + 1. Add Entry + 2. Add ACI + 3. Bind with test USER_ANUJ + 4. Try search + 5. Delete Entry,test USER_ANUJ, ACI + :expectedresults: + 1. Operation should success + 2. Operation should success + 3. Operation should success + 4. Operation should Fail + 5. Operation should success + """ + groups = Groups(topo.standalone, DEFAULT_SUFFIX) + group = groups.create(properties={"cn": "group1", + "description": "testgroup" + }) + group.add('objectClass', 'groupOfURLS') + group.set('memberURL', "ldap:///{}??sub?(&(ou=Accounting)(cn=Sam*))".format(DEFAULT_SUFFIX)) + group.add_member(USER_ANANDA) + + ACI_TARGET = '(targetattr = "*")' + ACI_ALLOW = '(version 3.0; acl "All rights for %s"; deny(read) ' % "Unknown" + ACI_SUBJECT = 'groupdn != "ldap:///cn=group1,ou=Groups,{}";)'.format(DEFAULT_SUFFIX) + ACI_BODY = ACI_TARGET + ACI_ALLOW + ACI_SUBJECT + Domain(topo.standalone, DEFAULT_SUFFIX).add("aci", ACI_BODY) + conn = UserAccount(topo.standalone, USER_ANANDA).bind(PW_DM) + # aci will allow only 'memberURL', "ldap:///{dc=example,dc=com??sub?(&(ou=Accounting)(cn=Sam*))" + assert 2 == len(Accounts(conn, DEFAULT_SUFFIX).filter('(cn=*)')) + conn = UserAccount(topo.standalone, USER_ANUJ).bind(PW_DM) + # aci will allow only 'memberURL', "ldap:///{dc=example,dc=com??sub?(&(ou=Accounting)(cn=Sam*))" + assert 0 == len(Accounts(conn, DEFAULT_SUFFIX).filter('(cn=*)')) + group.delete() + + +def test_deny_read_access_to_dynamic_group_two(topo, test_uer, aci_of_user): + """ + Search Test 29 Deny read access to != dynamic group + :id: eae2a6c6-6e12-11e8-80f3-8c16451d917b + :setup: Standalone Instance + :steps: + 1. Add Entry + 2. Add ACI + 3. Bind with test USER_ANUJ + 4. Try search + 5. Delete Entry,test USER_ANUJ, ACI + :expectedresults: + 1. Operation should success + 2. Operation should success + 3. Operation should success + 4. Operation should Fail + 5. Operation should success + """ + groups = Groups(topo.standalone, DEFAULT_SUFFIX) + group_properties = {"cn": "group1", + "description": "testgroup" + } + group = groups.create(properties=group_properties) + group.add('objectClass', 'groupofuniquenames') + group.set('uniquemember', [USER_ANANDA,USER_ANUJ]) + + ACI_TARGET = '(targetattr = "*")' + ACI_ALLOW = '(version 3.0; acl "All rights for %s"; deny(read) ' % "Unknown" + ACI_SUBJECT = 'groupdn = "ldap:///cn=group1,ou=Groups,{}";)'.format(DEFAULT_SUFFIX) + ACI_BODY = ACI_TARGET + ACI_ALLOW + ACI_SUBJECT + Domain(topo.standalone, DEFAULT_SUFFIX).add("aci", ACI_BODY) + conn = UserAccount(topo.standalone, USER_ANANDA).bind(PW_DM) + # aci will block groupdn = "ldap:///cn=group1,ou=Groups,dc=example,dc=com";) + assert 0 == len(Accounts(conn, DEFAULT_SUFFIX).filter('(cn=*)')) + conn = UserAccount(topo.standalone, USER_ANUJ).bind(PW_DM) + # aci will block groupdn = "ldap:///cn=group1,ou=Groups,dc=example,dc=com";) + assert 0 == len(Accounts(conn, DEFAULT_SUFFIX).filter('(cn=*)')) + # with root there is no aci blockage + assert 2 == len(Accounts(topo.standalone, DEFAULT_SUFFIX).filter('(cn=*)')) + group.delete() + + +def test_deny_access_to_group_should_deny_access_to_all_uniquemember( + topo, test_uer, aci_of_user +): + """ + Search Test 38 Deny access to group should deny access to all uniquemember (including chain group) + :id: 56b470e4-7941-11e8-912b-8c16451d917b + :setup: Standalone Instance + :steps: + 1. Add Entry + 2. Add ACI + 3. Bind with test USER_ANUJ + 4. Try search + 5. Delete Entry,test USER_ANUJ, ACI + :expectedresults: + 1. Operation should success + 2. Operation should success + 3. Operation should success + 4. Operation should Fail + 5. Operation should success + """ + + grp = UniqueGroup(topo.standalone, 'cn=Nested Group 1,' + DEFAULT_SUFFIX) + grp.create(properties={ + 'cn': 'Nested Group 1', + 'ou': 'groups', + 'uniquemember': "cn=Nested Group 2, {}".format(DEFAULT_SUFFIX) + }) + + grp = UniqueGroup(topo.standalone, 'cn=Nested Group 2,' + DEFAULT_SUFFIX) + grp.create(properties={ + 'cn': 'Nested Group 2', + 'ou': 'groups', + 'uniquemember': "cn=Nested Group 3, {}".format(DEFAULT_SUFFIX) + }) + + grp = UniqueGroup(topo.standalone, 'cn=Nested Group 3,' + DEFAULT_SUFFIX) + grp.create(properties={ + 'cn': 'Nested Group 3', + 'ou': 'groups', + 'uniquemember': [USER_ANANDA, USER_ANUJ] + }) + + Domain(topo.standalone, DEFAULT_SUFFIX).add("aci", '(target = ldap:///{})(targetattr=*)' + '(version 3.0; acl "$tet_thistest"; deny(read)(groupdn = "ldap:///cn=Nested Group 1, {}"); )'.format(DEFAULT_SUFFIX, DEFAULT_SUFFIX)) + conn = UserAccount(topo.standalone, USER_ANANDA).bind(PW_DM) + # deny_access_to_group_should_deny_access_to_all_uniquemember + assert 0 == len(Accounts(conn, DEFAULT_SUFFIX).filter('(cn=*)')) + conn = UserAccount(topo.standalone, USER_ANUJ).bind(PW_DM) + # deny_access_to_group_should_deny_access_to_all_uniquemember + assert 0 == len(Accounts(conn, DEFAULT_SUFFIX).filter('(cn=*)')) + # with root there is no aci blockage + assert 2 == len(Accounts(topo.standalone, DEFAULT_SUFFIX).filter('(cn=*)')) + + +def test_entry_with_lots_100_attributes(topo, test_uer, aci_of_user): + """ + Search Test 39 entry with lots (>100) attributes + :id: fc155f74-6e12-11e8-96ac-8c16451d917b + :setup: Standalone Instance + :steps: + 1. Add Entry + 2. Bind with test USER_ANUJ + 3. Try search + 4. Delete Entry,test USER_ANUJ, ACI + :expectedresults: + 1. Operation should success + 3. Operation should success + 4. Operation should success + 5. Operation should success + """ + for i in range(100): + user = UserAccounts(topo.standalone, DEFAULT_SUFFIX, rdn='ou=People').create_test_user(uid=i) + user.set("userPassword", "password") + + conn = UserAccount(topo.standalone, "uid=test_user_1,ou=People,{}".format(DEFAULT_SUFFIX)).bind(PW_DM) + # no aci no blockage + assert 1 == len(Accounts(conn, DEFAULT_SUFFIX).filter('(cn=Anuj*)')) + # no aci no blockage + assert 102 == len(Accounts(conn, DEFAULT_SUFFIX).filter('(uid=*)')) + conn = Anonymous(topo.standalone).bind() + # anonymous_search_on_monitor_entry + assert 102 == len(Accounts(conn, DEFAULT_SUFFIX).filter('(uid=*)')) + + +@pytest.mark.bz301798 +def test_groupdnattr_value_is_another_group(topo): + """ + Search Test 42 groupdnattr value is another group test #1 + :id: 52299e16-7944-11e8-b471-8c16451d917b + :setup: server + :steps: + 1. Add test entry + 2. Add ACI + 3. USER_ANUJ should follow ACI role + :expectedresults: + 1. Entry should be added + 2. Operation should succeed + 3. Operation should succeed + """ + Organization(topo.standalone).create(properties={"o": "nscpRoot"}, basedn=DEFAULT_SUFFIX) + + user = UserAccount(topo.standalone, "cn=dchan,o=nscpRoot,{}".format(DEFAULT_SUFFIX)) + user.create(properties={ + 'uid': 'dchan', + 'cn': 'dchan', + 'sn': 'user', + 'uidNumber': '1000', + 'gidNumber': '2000', + 'homeDirectory': '/home/' + 'dchan', + 'userPassword': PW_DM + }) + + grp = UniqueGroup(topo.standalone, 'cn=groupx,o=nscpRoot,' + DEFAULT_SUFFIX) + grp.create(properties={ + 'cn': 'groupx', + 'ou': 'groups', + }) + grp.set('uniquemember', 'cn=dchan,o=nscpRoot,{}'.format(DEFAULT_SUFFIX)) + grp.set('aci', '(targetattr="*")(version 3.0; acl "Enable Group Expansion"; allow (read, search, compare) groupdnattr="ldap:///o=nscpRoot?uniquemember?sub";)') + + conn = UserAccount(topo.standalone, 'cn=dchan,o=nscpRoot,{}'.format(DEFAULT_SUFFIX),).bind(PW_DM) + # acil will allow ldap:///o=nscpRoot?uniquemember?sub" + assert UserAccount(conn, 'cn=groupx,o=nscpRoot,{}'.format(DEFAULT_SUFFIX)).get_attr_val_utf8('cn') == 'groupx' + + +if __name__ == "__main__": + CURRENT_FILE = os.path.realpath(__file__) + pytest.main("-s -v %s" % CURRENT_FILE) diff --git a/dirsrvtests/tests/suites/acl/search_real_test.py b/dirsrvtests/tests/suites/acl/search_real_test.py new file mode 100644 index 0000000..e2b8ff4 --- /dev/null +++ b/dirsrvtests/tests/suites/acl/search_real_test.py @@ -0,0 +1,410 @@ +# --- BEGIN COPYRIGHT BLOCK --- +# Copyright (C) 2019 Red Hat, Inc. +# All rights reserved. +# +# License: GPL (version 3 or any later version). +# See LICENSE for details. +# --- END COPYRIGHT BLOCK ---- + +import pytest, os, ldap +from lib389._constants import DEFAULT_SUFFIX, PW_DM, ErrorLog +from lib389.idm.user import UserAccount, UserAccounts +from lib389.idm.account import Accounts +from lib389.idm.organizationalunit import OrganizationalUnit +from lib389.idm.group import Groups +from lib389.topologies import topology_st as topo +from lib389.idm.domain import Domain +from lib389.idm.posixgroup import PosixGroups + +pytestmark = pytest.mark.tier1 + +CONTAINER_1_DELADD = "ou=Product Development,{}".format(DEFAULT_SUFFIX) +CONTAINER_2_DELADD = "ou=Accounting,{}".format(DEFAULT_SUFFIX) +USER_ANUJ = "uid=Anuj Borah,{}".format(CONTAINER_1_DELADD) +USER_ANANDA = "uid=Ananda Borah,{}".format(CONTAINER_2_DELADD) + + +@pytest.fixture(scope="function") +def aci_of_user(request, topo): + aci_list = Domain(topo.standalone, DEFAULT_SUFFIX).get_attr_vals('aci') + + def finofaci(): + domain = Domain(topo.standalone, DEFAULT_SUFFIX) + domain.set('aci', None) + for i in aci_list: + domain.add("aci", i) + + request.addfinalizer(finofaci) + + +@pytest.fixture(scope="module") +def test_uer(request, topo): + topo.standalone.config.loglevel((ErrorLog.ACL_SUMMARY,)) + + for i in ['Product Development', 'Accounting']: + OrganizationalUnit(topo.standalone, "ou={},{}".format(i, DEFAULT_SUFFIX)).create(properties={'ou': i}) + + users = UserAccounts(topo.standalone, DEFAULT_SUFFIX, rdn='ou=Product Development') + users.create(properties={ + 'uid': 'Anuj Borah', + 'cn': 'Anuj Borah', + 'sn': 'user', + 'uidNumber': '1000', + 'gidNumber': '2000', + 'homeDirectory': '/home/' + 'AnujBorah', + 'userPassword': PW_DM + }) + + users = UserAccounts(topo.standalone, DEFAULT_SUFFIX, rdn='ou=Accounting') + users.create(properties={ + 'uid': 'Ananda Borah', + 'cn': 'Ananda Borah', + 'sn': 'user', + 'uidNumber': '1000', + 'gidNumber': '2000', + 'homeDirectory': '/home/' + 'AnandaBorah', + 'userPassword': PW_DM + }) + + +def test_deny_all_access_with_target_set(topo, test_uer, aci_of_user): + """Test that Deny all access with target set + :id: 0550e680-6e0e-11e8-82f4-8c16451d917b + :setup: Standalone Instance + :steps: + 1. Add Entry + 2. Add ACI + 3. Bind with test USER_ANUJ + 4. Try search + 5. Delete Entry,test USER_ANUJ, ACI + :expectedresults: + 1. Operation should success + 2. Operation should success + 3. Operation should success + 4. Operation should Fail + 5. Operation should success + """ + ACI_TARGET = "(target = ldap:///{})(targetattr=*)".format(USER_ANANDA) + ACI_ALLOW = '(version 3.0; acl "Name of the ACI"; deny absolute (all)' + ACI_SUBJECT = 'userdn="ldap:///anyone";)' + ACI_BODY = ACI_TARGET + ACI_ALLOW + ACI_SUBJECT + Domain(topo.standalone, DEFAULT_SUFFIX).add("aci", ACI_BODY) + conn = UserAccount(topo.standalone, USER_ANANDA).bind(PW_DM) + # aci will block all for all usrs + assert 0 == len(Accounts(conn, DEFAULT_SUFFIX).filter('(cn=Ananda*)')) + conn = UserAccount(topo.standalone, USER_ANUJ).bind(PW_DM) + # aci will block all for all usrs + assert 0 == len(Accounts(conn, DEFAULT_SUFFIX).filter('(cn=Ananda*)')) + # with root there is no aci blockage + assert 1 == len(Accounts(topo.standalone, DEFAULT_SUFFIX).filter('(cn=Ananda*)')) + + +def test_deny_all_access_to_a_target_with_wild_card(topo, test_uer, aci_of_user): + """Search Test 2 Deny all access to a target with wild card + :id: 1c370f98-6e11-11e8-9f10-8c16451d917b + :setup: Standalone Instance + :steps: + 1. Add Entry + 2. Add ACI + 3. Bind with test USER_ANUJ + 4. Try search + 5. Delete Entry,test USER_ANUJ, ACI + :expectedresults: + 1. Operation should success + 2. Operation should success + 3. Operation should success + 4. Operation should Fail + 5. Operation should success + """ + ACI_TARGET = "(target = ldap:///uid=Ananda*, ou=*,{})(targetattr=*)".format( + DEFAULT_SUFFIX + ) + ACI_ALLOW = '(version 3.0; acl "Name of the ACI"; deny absolute (all)' + ACI_SUBJECT = 'userdn="ldap:///anyone";)' + ACI_BODY = ACI_TARGET + ACI_ALLOW + ACI_SUBJECT + Domain(topo.standalone, DEFAULT_SUFFIX).add("aci", ACI_BODY) + conn = UserAccount(topo.standalone, USER_ANANDA).bind(PW_DM) + # aci will block (cn=Sam*) for all usrs + assert 0 == len(Accounts(conn, DEFAULT_SUFFIX).filter('(cn=Ananda*)')) + conn = UserAccount(topo.standalone, USER_ANUJ).bind(PW_DM) + # aci will block (cn=Sam*) for all usrs + assert 0 == len(Accounts(conn, DEFAULT_SUFFIX).filter('(cn=Ananda*)')) + # with root there is no aci blockage + assert 1 == len(Accounts(topo.standalone, DEFAULT_SUFFIX).filter('(cn=Ananda*)')) + + +def test_deny_all_access_without_a_target_set(topo, test_uer, aci_of_user): + """Search Test 3 Deny all access without a target set + :id: 2dbeb36a-6e11-11e8-ab9f-8c16451d917b + :setup: Standalone Instance + :steps: + 1. Add Entry + 2. Add ACI + 3. Bind with test USER_ANUJ + 4. Try search + 5. Delete Entry,test USER_ANUJ, ACI + :expectedresults: + 1. Operation should success + 2. Operation should success + 3. Operation should success + 4. Operation should Fail + 5. Operation should success + """ + ACI_TARGET = "(targetattr=*)" + ACI_ALLOW = '(version 3.0; acl "Name of the ACI"; deny absolute (all)' + ACI_SUBJECT = 'userdn="ldap:///anyone";)' + ACI_BODY = ACI_TARGET + ACI_ALLOW + ACI_SUBJECT + Domain(topo.standalone, DEFAULT_SUFFIX).add("aci", ACI_BODY) + conn = UserAccount(topo.standalone, USER_ANANDA).bind(PW_DM) + # aci will block all for all usrs + assert 0 == len(Accounts(conn, DEFAULT_SUFFIX).filter('(ou=Accounting)')) + conn = UserAccount(topo.standalone, USER_ANUJ).bind(PW_DM) + # aci will block all for all usrs + assert 0 == len(Accounts(conn, DEFAULT_SUFFIX).filter('(ou=Accounting)')) + # with root there is no aci blockage + assert 1 == len(Accounts(topo.standalone, DEFAULT_SUFFIX).filter('(ou=Accounting)')) + + +def test_deny_read_search_and_compare_access_with_target_and_targetattr_set( + topo, test_uer, aci_of_user +): + """Search Test 4 Deny read, search and compare access with target and targetattr set + :id: 3f4a87e4-6e11-11e8-a09f-8c16451d917b + :setup: Standalone Instance + :steps: + 1. Add Entry + 2. Add ACI + 3. Bind with test USER_ANUJ + 4. Try search + 5. Delete Entry,test USER_ANUJ, ACI + :expectedresults: + 1. Operation should success + 2. Operation should success + 3. Operation should success + 4. Operation should Fail + 5. Operation should success + """ + ACI_TARGET = "(target = ldap:///{})(targetattr=*)".format(CONTAINER_2_DELADD) + ACI_ALLOW = '(version 3.0; acl "Name of the ACI"; deny absolute (all)' + ACI_SUBJECT = 'userdn="ldap:///anyone";)' + ACI_BODY = ACI_TARGET + ACI_ALLOW + ACI_SUBJECT + Domain(topo.standalone, DEFAULT_SUFFIX).add("aci", ACI_BODY) + conn = UserAccount(topo.standalone, USER_ANANDA).bind(PW_DM) + # aci will block all for all usrs + assert 0 == len(Accounts(conn, DEFAULT_SUFFIX).filter('(ou=Accounting)')) + conn = UserAccount(topo.standalone, USER_ANUJ).bind(PW_DM) + # aci will block all for all usrs + assert 0 == len(Accounts(conn, DEFAULT_SUFFIX).filter('(ou=Accounting)')) + # with root there is no aci blockage + assert 1 == len(Accounts(topo.standalone, DEFAULT_SUFFIX).filter('(ou=Accounting)')) + + +def test_deny_read_access_to_multiple_groupdns(topo, test_uer, aci_of_user): + """Search Test 6 Deny read access to multiple groupdn's + :id: 8f3ba440-6e11-11e8-8b20-8c16451d917b + :setup: Standalone Instance + :steps: + 1. Add Entry + 2. Add ACI + 3. Bind with test USER_ANUJ + 4. Try search + 5. Delete Entry,test USER_ANUJ, ACI + :expectedresults: + 1. Operation should success + 2. Operation should success + 3. Operation should success + 4. Operation should Fail + 5. Operation should success + """ + groups = Groups(topo.standalone, DEFAULT_SUFFIX) + group = groups.create(properties={"cn": "group1", + "description": "testgroup" + }) + group.add_member(USER_ANANDA) + + posix_groups = PosixGroups(topo.standalone, DEFAULT_SUFFIX) + posix_group = posix_groups.create(properties={ + "cn": "group2", + "description": "testgroup2", + "gidNumber": "2000", + }) + posix_group.add_member(USER_ANUJ) + + ACI_TARGET = '(targetattr="*")' + ACI_ALLOW = '(version 3.0; acl "All rights for cn=group1,ou=Groups,{}"; deny(read)'.format(DEFAULT_SUFFIX) + ACI_SUBJECT = 'groupdn="ldap:///cn=group1,ou=Groups,{}||ldap:///cn=group2,ou=Groups,{}";)'.format(DEFAULT_SUFFIX, DEFAULT_SUFFIX) + ACI_BODY = ACI_TARGET + ACI_ALLOW + ACI_SUBJECT + Domain(topo.standalone, DEFAULT_SUFFIX).add("aci", ACI_BODY) + conn = UserAccount(topo.standalone, USER_ANANDA).bind(PW_DM) + # aci will block 'groupdn="ldap:///cn=group1,ou=Groups,dc=example,dc=com||ldap:///cn=group2,ou=Groups,dc=example,dc=com";) + assert 0 == len(Accounts(conn, DEFAULT_SUFFIX).filter('(cn=*)')) + conn = UserAccount(topo.standalone, USER_ANUJ).bind(PW_DM) + # aci will block 'groupdn="ldap:///cn=group1,ou=Groups,dc=example,dc=com||ldap:///cn=group2,ou=Groups,dc=example,dc=com";) + assert 0 == len(Accounts(conn, DEFAULT_SUFFIX).filter('(cn=*)')) + # with root there is no aci blockage + assert 3 == len(Accounts(topo.standalone, DEFAULT_SUFFIX).filter('(cn=*)')) + group = groups.get("group1") + group.delete() + posix_groups.get("group2") + posix_group.delete() + + +def test_deny_all_access_to_userdnattr(topo, test_uer, aci_of_user): + """Search Test 7 Deny all access to userdnattr" + :id: ae482494-6e11-11e8-ae33-8c16451d917b + :setup: Standalone Instance + :steps: + 1. Add Entry + 2. Add ACI + 3. Bind with test USER_ANUJ + 4. Try search + 5. Delete Entry,test USER_ANUJ, ACI + :expectedresults: + 1. Operation should success + 2. Operation should success + 3. Operation should success + 4. Operation should Fail + 5. Operation should success + """ + UserAccount(topo.standalone, USER_ANUJ).add('manager', USER_ANANDA) + ACI_TARGET = "(target = ldap:///{})(targetattr=*)".format(DEFAULT_SUFFIX) + ACI_ALLOW = '(version 3.0; acl "Name of the ACI"; deny absolute (all)' + ACI_SUBJECT = 'userdnattr="manager";)' + ACI_BODY = ACI_TARGET + ACI_ALLOW + ACI_SUBJECT + Domain(topo.standalone, DEFAULT_SUFFIX).add("aci", ACI_BODY) + conn = UserAccount(topo.standalone, USER_ANANDA).bind(PW_DM) + # aci will block only 'userdnattr="manager" + assert 0 == len(Accounts(conn, DEFAULT_SUFFIX).filter('(cn=Anuj Borah)')) + conn = UserAccount(topo.standalone, USER_ANUJ).bind(PW_DM) + # aci will block only 'userdnattr="manager" + assert 1 == len(Accounts(conn, DEFAULT_SUFFIX).filter('(cn=Anuj Borah)')) + # with root there is no aci blockage + assert 1 == len(Accounts(topo.standalone, DEFAULT_SUFFIX).filter('(cn=Anuj Borah)')) + UserAccount(topo.standalone, USER_ANUJ).remove('manager', USER_ANANDA) + + +def test_deny_all_access_with__target_set(topo, test_uer, aci_of_user): + """Search Test 8 Deny all access with != target set + :id: bc00aed0-6e11-11e8-be66-8c16451d917b + :setup: Standalone Instance + :steps: + 1. Add Entry + 2. Add ACI + 3. Bind with test USER_ANUJ + 4. Try search + 5. Delete Entry,test USER_ANUJ, ACI + :expectedresults: + 1. Operation should success + 2. Operation should success + 3. Operation should success + 4. Operation should Fail + 5. Operation should success + """ + Domain(topo.standalone, DEFAULT_SUFFIX).add("aci",'(target != "ldap:///{}")(targetattr = "*")' + '(version 3.0; acl "$tet_thistest"; deny absolute (all) (userdn = "ldap:///anyone") ;)'.format(USER_ANANDA)) + conn = UserAccount(topo.standalone, USER_ANANDA).bind(PW_DM) + # aci will not block USER_ANANDA will block others + assert 1 == len(Accounts(conn, DEFAULT_SUFFIX).filter('(cn=*)')) + conn = UserAccount(topo.standalone, USER_ANUJ).bind(PW_DM) + # aci will not block USER_ANANDA will block others + assert 1 == len(Accounts(conn, DEFAULT_SUFFIX).filter('(cn=*)')) + # with root there is no aci blockage + assert 2 == len(Accounts(topo.standalone, DEFAULT_SUFFIX).filter('(cn=*)')) + + +def test_deny_all_access_with__targetattr_set(topo, test_uer, aci_of_user): + """Search Test 9 Deny all access with != targetattr set + :id: d2d73b2e-6e11-11e8-ad3d-8c16451d917b + :setup: Standalone Instance + :steps: + 1. Add Entry + 2. Add ACI + 3. Bind with test USER_ANUJ + 4. Try search + 5. Delete Entry,test USER_ANUJ, ACI + :expectedresults: + 1. Operation should success + 2. Operation should success + 3. Operation should success + 4. Operation should Fail + 5. Operation should success + """ + testusers = UserAccounts(topo.standalone, DEFAULT_SUFFIX) + user = testusers.create(properties={ + 'uid': 'Anuj', + 'cn': 'Anuj', + 'sn': 'user', + 'uidNumber': '1000', + 'gidNumber': '2000', + 'homeDirectory': '/home/' + 'Anuj', + 'userPassword': PW_DM + }) + + ACI_TARGET = "(targetattr != uid||Objectclass)" + ACI_ALLOW = '(version 3.0; acl "Name of the ACI"; deny absolute (all)' + ACI_SUBJECT = 'userdn="ldap:///anyone";)' + ACI_BODY = ACI_TARGET + ACI_ALLOW + ACI_SUBJECT + Domain(topo.standalone, DEFAULT_SUFFIX).add("aci", ACI_BODY) + conn = UserAccount(topo.standalone, USER_ANANDA).bind(PW_DM) + # aci will allow only uid=* + assert 3 == len(Accounts(conn, DEFAULT_SUFFIX).filter('(uid=*)')) + # aci will allow only uid=* + assert 0 == len(Accounts(conn, DEFAULT_SUFFIX).filter('(cn=*)')) + conn = UserAccount(topo.standalone, USER_ANUJ).bind(PW_DM) + # aci will allow only uid=* + assert 3 == len(Accounts(conn, DEFAULT_SUFFIX).filter('(uid=*)')) + # aci will allow only uid=* + assert 0 == len(Accounts(conn, DEFAULT_SUFFIX).filter('(cn=*)')) + # with root there is no aci blockage + assert 3 == len(Accounts(topo.standalone, DEFAULT_SUFFIX).filter('(uid=*)')) + # with root there is no aci blockage + assert 3 == len(Accounts(topo.standalone, DEFAULT_SUFFIX).filter('(cn=*)')) + user.delete() + + +def test_deny_all_access_with_targetattr_set(topo, test_uer, aci_of_user): + """Search Test 10 Deny all access with targetattr set + :id: e1602ff2-6e11-11e8-8e55-8c16451d917b + :setup: Standalone Instance + :steps: + 1. Add Entry + 2. Add ACI + 3. Bind with test USER_ANUJ + 4. Try search + 5. Delete Entry,test USER_ANUJ, ACI + :expectedresults: + 1. Operation should success + 2. Operation should success + 3. Operation should success + 4. Operation should Fail + 5. Operation should success + """ + testuser = UserAccount(topo.standalone, "cn=Anuj12,ou=People,{}".format(DEFAULT_SUFFIX)) + testuser.create(properties={ + 'uid': 'Anuj12', + 'cn': 'Anuj12', + 'sn': 'user', + 'uidNumber': '1000', + 'gidNumber': '2000', + 'homeDirectory': '/home/' + 'Anuj12' + }) + + ACI_TARGET = "(targetattr = uid)" + ACI_ALLOW = '(version 3.0; acl "Name of the ACI"; deny absolute (all)' + ACI_SUBJECT = 'userdn="ldap:///anyone";)' + ACI_BODY = ACI_TARGET + ACI_ALLOW + ACI_SUBJECT + Domain(topo.standalone, DEFAULT_SUFFIX).add("aci", ACI_BODY) + conn = UserAccount(topo.standalone, USER_ANANDA).bind(PW_DM) + # aci will block only uid=* + assert 0 == len(Accounts(conn, DEFAULT_SUFFIX).filter('(uid=*)')) + conn = UserAccount(topo.standalone, USER_ANUJ).bind(PW_DM) + # aci will block only uid=* + assert 0 == len(Accounts(conn, DEFAULT_SUFFIX).filter('(uid=*)')) + # with root there is no aci blockage + assert 3 == len(Accounts(topo.standalone, DEFAULT_SUFFIX).filter('(uid=*)')) + testuser.delete() + + +if __name__ == "__main__": + CURRENT_FILE = os.path.realpath(__file__) + pytest.main("-s -v %s" % CURRENT_FILE) diff --git a/dirsrvtests/tests/suites/acl/selfdn_permissions_test.py b/dirsrvtests/tests/suites/acl/selfdn_permissions_test.py new file mode 100644 index 0000000..af75013 --- /dev/null +++ b/dirsrvtests/tests/suites/acl/selfdn_permissions_test.py @@ -0,0 +1,352 @@ +# --- BEGIN COPYRIGHT BLOCK --- +# Copyright (C) 2016 Red Hat, Inc. +# All rights reserved. +# +# License: GPL (version 3 or any later version). +# See LICENSE for details. +# --- END COPYRIGHT BLOCK --- +# +import logging + +import ldap +import pytest +from lib389 import Entry +from lib389._constants import * +from lib389.topologies import topology_st + +log = logging.getLogger(__name__) + +from lib389.utils import * + +# Skip on older versions +pytestmark = [pytest.mark.tier1, + pytest.mark.skipif(ds_is_older('1.3.2'), reason="Not implemented")] +OC_NAME = 'OCticket47653' +MUST = "(postalAddress $ postalCode)" +MAY = "(member $ street)" + +OTHER_NAME = 'other_entry' +MAX_OTHERS = 10 + +BIND_NAME = 'bind_entry' +BIND_DN = 'cn=%s, %s' % (BIND_NAME, SUFFIX) +BIND_PW = 'password' + +ENTRY_NAME = 'test_entry' +ENTRY_DN = 'cn=%s, %s' % (ENTRY_NAME, SUFFIX) +ENTRY_OC = "top person %s" % OC_NAME + + +def _oc_definition(oid_ext, name, must=None, may=None): + oid = "1.2.3.4.5.6.7.8.9.10.%d" % oid_ext + desc = 'To test ticket 47490' + sup = 'person' + if not must: + must = MUST + if not may: + may = MAY + + new_oc = "( %s NAME '%s' DESC '%s' SUP %s AUXILIARY MUST %s MAY %s )" % (oid, name, desc, sup, must, may) + return ensure_bytes(new_oc) + + +@pytest.fixture(scope="module") +def allow_user_init(topology_st): + """Initialize the test environment + + """ + topology_st.standalone.log.info("Add %s that allows 'member' attribute" % OC_NAME) + new_oc = _oc_definition(2, OC_NAME, must=MUST, may=MAY) + topology_st.standalone.schema.add_schema('objectClasses', new_oc) + + # entry used to bind with + topology_st.standalone.log.info("Add %s" % BIND_DN) + topology_st.standalone.add_s(Entry((BIND_DN, { + 'objectclass': "top person".split(), + 'sn': BIND_NAME, + 'cn': BIND_NAME, + 'userpassword': BIND_PW}))) + + # enable acl error logging + mod = [(ldap.MOD_REPLACE, 'nsslapd-errorlog-level', b'128')] + topology_st.standalone.modify_s(DN_CONFIG, mod) + + # Remove aci's to start with a clean slate + mod = [(ldap.MOD_DELETE, 'aci', None)] + topology_st.standalone.modify_s(SUFFIX, mod) + + # add dummy entries + for cpt in range(MAX_OTHERS): + name = "%s%d" % (OTHER_NAME, cpt) + topology_st.standalone.add_s(Entry(("cn=%s,%s" % (name, SUFFIX), { + 'objectclass': "top person".split(), + 'sn': name, + 'cn': name}))) + + +@pytest.mark.ds47653 +def test_selfdn_permission_add(topology_st, allow_user_init): + """Check add entry operation with and without SelfDN aci + + :id: e837a9ef-be92-48da-ad8b-ebf42b0fede1 + :setup: Standalone instance, add a entry which is used to bind, + enable acl error logging by setting 'nsslapd-errorlog-level' to '128', + remove aci's to start with a clean slate, and add dummy entries + :steps: + 1. Check we can not ADD an entry without the proper SELFDN aci + 2. Check with the proper ACI we can not ADD with 'member' attribute + 3. Check entry to add with memberS and with the ACI + 4. Check with the proper ACI and 'member' it succeeds to ADD + :expectedresults: + 1. Operation should be successful + 2. Operation should be successful + 3. Operation should fail with Insufficient Access + 4. Operation should be successful + """ + topology_st.standalone.log.info("\n\n######################### ADD ######################\n") + + # bind as bind_entry + topology_st.standalone.log.info("Bind as %s" % BIND_DN) + topology_st.standalone.simple_bind_s(BIND_DN, BIND_PW) + + # Prepare the entry with multivalued members + entry_with_members = Entry(ENTRY_DN) + entry_with_members.setValues('objectclass', 'top', 'person', 'OCticket47653') + entry_with_members.setValues('sn', ENTRY_NAME) + entry_with_members.setValues('cn', ENTRY_NAME) + entry_with_members.setValues('postalAddress', 'here') + entry_with_members.setValues('postalCode', '1234') + members = [] + for cpt in range(MAX_OTHERS): + name = "%s%d" % (OTHER_NAME, cpt) + members.append("cn=%s,%s" % (name, SUFFIX)) + members.append(BIND_DN) + entry_with_members.setValues('member', members) + + # Prepare the entry with one member + entry_with_member = Entry(ENTRY_DN) + entry_with_member.setValues('objectclass', 'top', 'person', 'OCticket47653') + entry_with_member.setValues('sn', ENTRY_NAME) + entry_with_member.setValues('cn', ENTRY_NAME) + entry_with_member.setValues('postalAddress', 'here') + entry_with_member.setValues('postalCode', '1234') + member = [] + member.append(BIND_DN) + entry_with_member.setValues('member', member) + + # entry to add WITH member being BIND_DN but WITHOUT the ACI -> ldap.INSUFFICIENT_ACCESS + try: + topology_st.standalone.log.info("Try to add Add %s (aci is missing): %r" % (ENTRY_DN, entry_with_member)) + + topology_st.standalone.add_s(entry_with_member) + except Exception as e: + topology_st.standalone.log.info("Exception (expected): %s" % type(e).__name__) + assert isinstance(e, ldap.INSUFFICIENT_ACCESS) + + # Ok Now add the proper ACI + topology_st.standalone.log.info("Bind as %s and add the ADD SELFDN aci" % DN_DM) + topology_st.standalone.simple_bind_s(DN_DM, PASSWORD) + + ACI_TARGET = "(target = \"ldap:///cn=*,%s\")" % SUFFIX + ACI_TARGETFILTER = "(targetfilter =\"(objectClass=%s)\")" % OC_NAME + ACI_ALLOW = "(version 3.0; acl \"SelfDN add\"; allow (add)" + ACI_SUBJECT = " userattr = \"member#selfDN\";)" + ACI_BODY = ACI_TARGET + ACI_TARGETFILTER + ACI_ALLOW + ACI_SUBJECT + mod = [(ldap.MOD_ADD, 'aci', ensure_bytes(ACI_BODY))] + topology_st.standalone.modify_s(SUFFIX, mod) + + # bind as bind_entry + topology_st.standalone.log.info("Bind as %s" % BIND_DN) + topology_st.standalone.simple_bind_s(BIND_DN, BIND_PW) + + # entry to add WITHOUT member and WITH the ACI -> ldap.INSUFFICIENT_ACCESS + try: + topology_st.standalone.log.info("Try to add Add %s (member is missing)" % ENTRY_DN) + topology_st.standalone.add_s(Entry((ENTRY_DN, { + 'objectclass': ENTRY_OC.split(), + 'sn': ENTRY_NAME, + 'cn': ENTRY_NAME, + 'postalAddress': 'here', + 'postalCode': '1234'}))) + except Exception as e: + topology_st.standalone.log.info("Exception (expected): %s" % type(e).__name__) + assert isinstance(e, ldap.INSUFFICIENT_ACCESS) + + # entry to add WITH memberS and WITH the ACI -> ldap.INSUFFICIENT_ACCESS + # member should contain only one value + try: + topology_st.standalone.log.info("Try to add Add %s (with several member values)" % ENTRY_DN) + topology_st.standalone.add_s(entry_with_members) + except Exception as e: + topology_st.standalone.log.info("Exception (expected): %s" % type(e).__name__) + assert isinstance(e, ldap.INSUFFICIENT_ACCESS) + + topology_st.standalone.log.info("Try to add Add %s should be successful" % ENTRY_DN) + topology_st.standalone.add_s(entry_with_member) + + +@pytest.mark.ds47653 +def test_selfdn_permission_search(topology_st, allow_user_init): + """Check search operation with and without SelfDN aci + + :id: 06d51ef9-c675-4583-99b2-4852dbda190e + :setup: Standalone instance, add a entry which is used to bind, + enable acl error logging by setting 'nsslapd-errorlog-level' to '128', + remove aci's to start with a clean slate, and add dummy entries + :steps: + 1. Check we can not search an entry without the proper SELFDN aci + 2. Add proper ACI + 3. Check we can search with the proper ACI + :expectedresults: + 1. Operation should be successful + 2. Operation should be successful + 3. Operation should be successful + """ + topology_st.standalone.log.info("\n\n######################### SEARCH ######################\n") + # bind as bind_entry + topology_st.standalone.log.info("Bind as %s" % BIND_DN) + topology_st.standalone.simple_bind_s(BIND_DN, BIND_PW) + + # entry to search WITH member being BIND_DN but WITHOUT the ACI -> no entry returned + topology_st.standalone.log.info("Try to search %s (aci is missing)" % ENTRY_DN) + ents = topology_st.standalone.search_s(ENTRY_DN, ldap.SCOPE_BASE, 'objectclass=*') + assert len(ents) == 0 + + # Ok Now add the proper ACI + topology_st.standalone.log.info("Bind as %s and add the READ/SEARCH SELFDN aci" % DN_DM) + topology_st.standalone.simple_bind_s(DN_DM, PASSWORD) + + ACI_TARGET = "(target = \"ldap:///cn=*,%s\")" % SUFFIX + ACI_TARGETATTR = "(targetattr = *)" + ACI_TARGETFILTER = "(targetfilter =\"(objectClass=%s)\")" % OC_NAME + ACI_ALLOW = "(version 3.0; acl \"SelfDN search-read\"; allow (read, search, compare)" + ACI_SUBJECT = " userattr = \"member#selfDN\";)" + ACI_BODY = ACI_TARGET + ACI_TARGETATTR + ACI_TARGETFILTER + ACI_ALLOW + ACI_SUBJECT + mod = [(ldap.MOD_ADD, 'aci', ensure_bytes(ACI_BODY))] + topology_st.standalone.modify_s(SUFFIX, mod) + + # bind as bind_entry + topology_st.standalone.log.info("Bind as %s" % BIND_DN) + topology_st.standalone.simple_bind_s(BIND_DN, BIND_PW) + + # entry to search with the proper aci + topology_st.standalone.log.info("Try to search %s should be successful" % ENTRY_DN) + ents = topology_st.standalone.search_s(ENTRY_DN, ldap.SCOPE_BASE, 'objectclass=*') + assert len(ents) == 1 + + +@pytest.mark.ds47653 +def test_selfdn_permission_modify(topology_st, allow_user_init): + """Check modify operation with and without SelfDN aci + + :id: 97a58844-095f-44b0-9029-dd29a7d83d68 + :setup: Standalone instance, add a entry which is used to bind, + enable acl error logging by setting 'nsslapd-errorlog-level' to '128', + remove aci's to start with a clean slate, and add dummy entries + :steps: + 1. Check we can not modify an entry without the proper SELFDN aci + 2. Add proper ACI + 3. Modify the entry and check the modified value + :expectedresults: + 1. Operation should be successful + 2. Operation should be successful + 3. Operation should be successful + """ + # bind as bind_entry + topology_st.standalone.log.info("Bind as %s" % BIND_DN) + topology_st.standalone.simple_bind_s(BIND_DN, BIND_PW) + + topology_st.standalone.log.info("\n\n######################### MODIFY ######################\n") + + # entry to modify WITH member being BIND_DN but WITHOUT the ACI -> ldap.INSUFFICIENT_ACCESS + try: + topology_st.standalone.log.info("Try to modify %s (aci is missing)" % ENTRY_DN) + mod = [(ldap.MOD_REPLACE, 'postalCode', b'9876')] + topology_st.standalone.modify_s(ENTRY_DN, mod) + except Exception as e: + topology_st.standalone.log.info("Exception (expected): %s" % type(e).__name__) + assert isinstance(e, ldap.INSUFFICIENT_ACCESS) + + # Ok Now add the proper ACI + topology_st.standalone.log.info("Bind as %s and add the WRITE SELFDN aci" % DN_DM) + topology_st.standalone.simple_bind_s(DN_DM, PASSWORD) + + ACI_TARGET = "(target = \"ldap:///cn=*,%s\")" % SUFFIX + ACI_TARGETATTR = "(targetattr = *)" + ACI_TARGETFILTER = "(targetfilter =\"(objectClass=%s)\")" % OC_NAME + ACI_ALLOW = "(version 3.0; acl \"SelfDN write\"; allow (write)" + ACI_SUBJECT = " userattr = \"member#selfDN\";)" + ACI_BODY = ACI_TARGET + ACI_TARGETATTR + ACI_TARGETFILTER + ACI_ALLOW + ACI_SUBJECT + mod = [(ldap.MOD_ADD, 'aci', ensure_bytes(ACI_BODY))] + topology_st.standalone.modify_s(SUFFIX, mod) + + # bind as bind_entry + topology_st.standalone.log.info("Bind as %s" % BIND_DN) + topology_st.standalone.simple_bind_s(BIND_DN, BIND_PW) + + # modify the entry and checks the value + topology_st.standalone.log.info("Try to modify %s. It should succeeds" % ENTRY_DN) + mod = [(ldap.MOD_REPLACE, 'postalCode', b'1928')] + topology_st.standalone.modify_s(ENTRY_DN, mod) + + ents = topology_st.standalone.search_s(ENTRY_DN, ldap.SCOPE_BASE, 'objectclass=*') + assert len(ents) == 1 + assert ensure_str(ents[0].postalCode) == '1928' + + +@pytest.mark.ds47653 +def test_selfdn_permission_delete(topology_st, allow_user_init): + """Check delete operation with and without SelfDN aci + + :id: 0ec4c0ec-e7b0-4ef1-8373-ab25aae34516 + :setup: Standalone instance, add a entry which is used to bind, + enable acl error logging by setting 'nsslapd-errorlog-level' to '128', + remove aci's to start with a clean slate, and add dummy entries + :steps: + 1. Check we can not delete an entry without the proper SELFDN aci + 2. Add proper ACI + 3. Check we can perform delete operation with proper ACI + :expectedresults: + 1. Operation should be successful + 2. Operation should be successful + """ + topology_st.standalone.log.info("\n\n######################### DELETE ######################\n") + + # bind as bind_entry + topology_st.standalone.log.info("Bind as %s" % BIND_DN) + topology_st.standalone.simple_bind_s(BIND_DN, BIND_PW) + + # entry to delete WITH member being BIND_DN but WITHOUT the ACI -> ldap.INSUFFICIENT_ACCESS + try: + topology_st.standalone.log.info("Try to delete %s (aci is missing)" % ENTRY_DN) + topology_st.standalone.delete_s(ENTRY_DN) + except Exception as e: + topology_st.standalone.log.info("Exception (expected): %s" % type(e).__name__) + assert isinstance(e, ldap.INSUFFICIENT_ACCESS) + + # Ok Now add the proper ACI + topology_st.standalone.log.info("Bind as %s and add the READ/SEARCH SELFDN aci" % DN_DM) + topology_st.standalone.simple_bind_s(DN_DM, PASSWORD) + + ACI_TARGET = "(target = \"ldap:///cn=*,%s\")" % SUFFIX + ACI_TARGETFILTER = "(targetfilter =\"(objectClass=%s)\")" % OC_NAME + ACI_ALLOW = "(version 3.0; acl \"SelfDN delete\"; allow (delete)" + ACI_SUBJECT = " userattr = \"member#selfDN\";)" + ACI_BODY = ACI_TARGET + ACI_TARGETFILTER + ACI_ALLOW + ACI_SUBJECT + mod = [(ldap.MOD_ADD, 'aci', ensure_bytes(ACI_BODY))] + topology_st.standalone.modify_s(SUFFIX, mod) + + # bind as bind_entry + topology_st.standalone.log.info("Bind as %s" % BIND_DN) + topology_st.standalone.simple_bind_s(BIND_DN, BIND_PW) + + # entry to delete with the proper aci + topology_st.standalone.log.info("Try to delete %s should be successful" % ENTRY_DN) + topology_st.standalone.delete_s(ENTRY_DN) + + +if __name__ == '__main__': + # Run isolated + # -s for DEBUG mode + CURRENT_FILE = os.path.realpath(__file__) + pytest.main("-s %s" % CURRENT_FILE) diff --git a/dirsrvtests/tests/suites/acl/syntax_test.py b/dirsrvtests/tests/suites/acl/syntax_test.py new file mode 100644 index 0000000..556b19b --- /dev/null +++ b/dirsrvtests/tests/suites/acl/syntax_test.py @@ -0,0 +1,262 @@ +""" +# --- BEGIN COPYRIGHT BLOCK --- +# Copyright (C) 2019 Red Hat, Inc. +# All rights reserved. +# +# License: GPL (version 3 or any later version). +# See LICENSE for details. +# --- END COPYRIGHT BLOCK ---- +""" + +import os +import pytest + +from lib389._constants import DEFAULT_SUFFIX +from lib389.idm.domain import Domain +from lib389.topologies import topology_st as topo + +import ldap + +pytestmark = pytest.mark.tier1 + +INVALID = [('test_targattrfilters_1', + f'(targattrfilters ="add=title:title=fred),del=cn:(cn!=harry)")' + f'(version 3.0; acl "Name of the ACI"; deny absolute (all)userdn="ldap:///anyone";)'), + ('test_targattrfilters_2', + f'(targattrfilters ="add=:(title=fred),del=cn:(cn!=harry)")' + f'(version 3.0; acl "Name of the ACI"; deny absolute (all)userdn="ldap:///anyone";)'), + ('test_targattrfilters_3', + f'(targattrfilters ="add=:(title=fred),del=cn:(cn!=harry))' + f'(version 3.0; acl "Name of the ACI"; deny absolute (all)userdn="ldap:///anyone";)'), + ('test_targattrfilters_4', + f'(targattrfilters ="add=title:(title=fred),=cn:(cn!=harry")' + f'(version 3.0; acl "Name of the ACI"; deny absolute (all)userdn="ldap:///anyone";)'), + ('test_targattrfilters_5', + f'(targattrfilters ="add=title:(|(title=fred)(cn=harry)),del=cn:(cn=harry)")' + f'(version 3.0; acl "Name of the ACI"; deny absolute (all)userdn="ldap:///anyone";)'), + ('test_targattrfilters_6', + f'(targattrfilters ="add=title:(|(title=fred)(title=harry)),del=cn:(title=harry)")' + f'(version 3.0; acl "Name of the ACI"; deny absolute (all)userdn="ldap:///anyone";)'), + ('test_targattrfilters_7', + f'(targattrfilters ="add=title:(cn=architect), ' + f'del=title:(title=architect) && l:(l=cn=Meylan,dc=example,dc=com")")' + f'(version 3.0; acl "Name of the ACI"; deny absolute (all)userdn="ldap:///anyone";)'), + ('test_targattrfilters_8', + f'(targattrfilters ="add=title:(cn=architect)")' + f'(version 3.0; acl "Name of the ACI"; deny absolute (all)userdn="ldap:///anyone";)'), + ('test_targattrfilters_9', + f'(targattrfilters ="add=title:(cn=arch*)")' + f'(version 3.0; acl "Name of the ACI"; deny absolute (all)userdn="ldap:///anyone";)'), + ('test_targattrfilters_10', + f'(targattrfilters ="add=title:(cn >= 1)")' + f'(version 3.0; acl "Name of the ACI"; deny absolute (all)userdn="ldap:///anyone";)'), + ('test_targattrfilters_11', + f'(targattrfilters ="add=title:(cn <= 1)")' + f'(version 3.0; acl "Name of the ACI"; deny absolute (all)userdn="ldap:///anyone";)'), + ('test_targattrfilters_12', + f'(targattrfilters ="add=title:(cn ~= 1)")' + f'(version 3.0; acl "Name of the ACI"; deny absolute (all)userdn="ldap:///anyone";)'), + ('test_targattrfilters_13', + f'(targattrfilters ="add=title:(!(cn ~= 1))")' + f'(version 3.0; acl "Name of the ACI"; deny absolute (all)userdn="ldap:///anyone";)'), + ('test_targattrfilters_14', + f'(targattrfilters ="add=title:(&(cn=fred)(cn ~= 1))")' + f'(version 3.0; acl "Name of the ACI"; deny absolute (all)userdn="ldap:///anyone";)'), + ('test_targattrfilters_15', + f'(targattrfilters ="add=title:(|(cn=fred)(cn ~= 1))")' + f'(version 3.0; acl "Name of the ACI"; deny absolute (all)userdn="ldap:///anyone";)'), + ('test_targattrfilters_16', + f'(targattrfilters ="add=title:(&(|(title=fred)(title=harry))(cn ~= 1))")' + f'(version 3.0; acl "Name of the ACI"; deny absolute (all)userdn="ldap:///anyone";)'), + ('test_targattrfilters_17', + f'\(targattrfilters ="add=title:(&(|(&(title=harry)(title=fred))' + f'(title=harry))(title ~= 1))")' + f'(version 3.0; acl "Name of the ACI"; deny absolute (all)userdn="ldap:///anyone";)'), + ('test_targattrfilters_19', + f'(target = ldap:///cn=Jeff Vedder,ou=Product Development,{DEFAULT_SUFFIX})' + f'(targetattr=*)' + f'(version 3.0; acl "Name of the ACI"; deny(write)gropdn="ldap:///anyone";)'), + ('test_targattrfilters_21', + f'(target = ldap:///cn=Jeff Vedder,ou=Product Development,{DEFAULT_SUFFIX})' + f'(targetattr=*)' + f'(version 3.0; acl "Name of the ACI"; deny(rite)userdn="ldap:///anyone";)'), + ('test_targattrfilters_22', + f'(targt = ldap:///cn=Jeff Vedder,ou=Product Development,{DEFAULT_SUFFIX})' + f'(targetattr=*)' + f'(version 3.0; acl "Name of the ACI"; deny absolute (all)userdn="ldap:///anyone";)'), + ('test_targattrfilters_23', + f'(target = ldap:///cn=Jeff Vedder,ou=Product Development,{DEFAULT_SUFFIX})' + f'(targetattr=*)' + f'(version 3.0; acl "Name of the ACI"; absolute (all)userdn="ldap:///anyone";)'), + ('test_Missing_acl_mispel', + f'(target = ldap:///cn=Jeff Vedder,ou=Product Development,{DEFAULT_SUFFIX})' + f'(targetattr=*)' + f'(version 3.0; alc "Name of the ACI"; deny absolute (all)userdn="ldap:///anyone";)'), + ('test_Missing_acl_string', + f'(target = ldap:///cn=Jeff Vedder,ou=Product Development,{DEFAULT_SUFFIX})' + f'(targetattr=*)' + f'(version 3.0; "Name of the ACI"; deny absolute (all)userdn="ldap:///anyone";)'), + ('test_Wrong_version_string', + f'(target = ldap:///cn=Jeff Vedder,ou=Product Development,{DEFAULT_SUFFIX})' + f'(targetattr=*)' + f'(version 2.0; acl "Name of the ACI"; deny absolute (all)userdn="ldap:///anyone";)'), + ('test_Missing_version_string', + f'(target = ldap:///cn=Jeff Vedder,ou=Product Development,{DEFAULT_SUFFIX})' + f'(targetattr=*)' + f'(; acl "Name of the ACI"; deny absolute (all)userdn="ldap:///anyone";)'), + ('test_Authenticate_statement', + f'(target = ldap:///cn=Jeff Vedder,ou=Product Development,{DEFAULT_SUFFIX})' + f'(targetattr != "uid")' + f'(targetattr=*)(version 3.0; acl "Name of the ACI"; deny absolute (all)' + f'userdn="ldap:///anyone";)'), + ('test_Multiple_targets', + f'(target = ldap:///ou=Product Development,{DEFAULT_SUFFIX})' + f'(target = ldap:///ou=Product Testing,{DEFAULT_SUFFIX})(targetattr=*)' + f'(version 3.0; acl "Name of the ACI"; deny absolute (all)userdn="ldap:///anyone";)'), + ('test_Target_set_to_self', + f'(target = ldap:///self)(targetattr=*)' + f'(version 3.0; acl "Name of the ACI"; deny absolute (all)userdn="ldap:///anyone";)'), + ('test_target_set_with_ldap_instead_of_ldap', + f'(target = ldap:\\\{DEFAULT_SUFFIX})(targetattr=*)' + f'(version 3.0; acl "Name of the ACI"; deny absolute (all)userdn="ldap:///anyone";)'), + ('test_target_set_with_more_than_three', + f'(target = ldap:////{DEFAULT_SUFFIX})(targetattr=*)' + f'(version 3.0; acl "Name of the ACI"; deny absolute (all)userdn="ldap:///anyone";)'), + ('test_target_set_with_less_than_three', + f'(target = ldap://{DEFAULT_SUFFIX})(targetattr=*)' + f'(version 3.0; acl "Name of the ACI"; deny absolute (all)userdn="ldap:///anyone";)'), + ('test_bind_rule_set_with_less_than_three', + f'(target = ldap:///{DEFAULT_SUFFIX})(targetattr=*)' + f'(version 3.0; acl "Name of the ACI"; deny absolute (all)userdn="ldap:/anyone";)'), + ('test_Use_semicolon_instead_of_comma_in_permission', + f'(target = ldap:///{DEFAULT_SUFFIX})(targetattr=*)' + f'(version 3.0; acl "Name of the ACI"; deny ' + f'(read; search; compare; write)userdn="ldap:///anyone";)'), + ('test_Use_double_equal_instead_of_equal_in_the_target', + f'(target == ldap:///{DEFAULT_SUFFIX})(targetattr=*)' + f'(version 3.0; acl "Name of the ACI"; deny absolute (all)userdn="ldap:///anyone";)'), + ('test_use_double_equal_instead_of_equal_in_user_and_group_access', + f'(target = ldap:///{DEFAULT_SUFFIX})' + f'(version 3.0; acl "Name of the ACI"; deny absolute (all)' + f'userdn == "ldap:///anyone";)'), + ('test_donot_cote_the_name_of_the_aci', + f'(target = ldap:///{DEFAULT_SUFFIX})' + f'(version 3.0; acl Name of the ACI ; deny absolute (all)userdn = "ldap:///anyone";)'), + ('test_extra_parentheses_case_1', + f'( )(target = ldap:///{DEFAULT_SUFFIX}) (targetattr=*)' + f'(version 3.0; acl "Name of the ACI"; deny absolute (all)userdn = "ldap:///anyone";)'), + ('test_extra_parentheses_case_2', + f'(((((target = ldap:///{DEFAULT_SUFFIX})(targetattr=*)' + f'(version 3.0; acl "Name of the ACI"; deny absolute (all)' + f'userdn == "ldap:///anyone";)'), + ('test_extra_parentheses_case_3', + f'(((target = ldap:///{DEFAULT_SUFFIX}) (targetattr=*)' + f'(version 3.0; acl "Name of the ACI"; deny absolute ' + f'(all)userdn = "ldap:///anyone";)))'), + ('test_no_semicolon_at_the_end_of_the_aci', + f'(target = ldap:///{DEFAULT_SUFFIX}) (targetattr=*)' + f'(version 3.0; acl "Name of the ACI"; deny absolute (all)userdn = "ldap:///anyone")'), + ('test_a_character_different_of_a_semicolon_at_the_end_of_the_aci', + f'(target = ldap:///{DEFAULT_SUFFIX}) (targetattr=*)' + f'(version 3.0; acl "Name of the ACI"; deny absolute (all)userdn = "ldap:///anyone"%)'), + ('test_bad_filter', + f'(target = ldap:///{DEFAULT_SUFFIX}) ' + f'(targetattr="cn |&| sn |(|) uid")' + f'(version 3.0; acl "Name of the ACI"; deny absolute (all)userdn = "ldap:///anyone";)'), + ('test_Use_double_equal_instead_of_equal_in_the_targattrfilters', + f'(target = ldap:///{DEFAULT_SUFFIX})(targattrfilters== "add=title:(title=architect)")' + f'(version 3.0; acl "Name of the ACI"; deny absolute (all)userdn="ldap:///anyone";)'), + ('test_Use_double_equal_instead_of_equal_inside_the_targattrfilters', + f'(target = ldap:///{DEFAULT_SUFFIX})(targattrfilters="add==title:(title==architect)")' + f'(version 3.0; acl "Name of the ACI"; deny absolute (all)userdn="ldap:///anyone";)'),] + + +FAILED = [('test_targattrfilters_18', + f'(target = ldap:///cn=Jeff Vedder,ou=Product Development,{DEFAULT_SUFFIX})' + f'(targetattr=*)' + f'(version 3.0; acl "Name of the ACI"; deny(write)userdn="ldap:///{"123" * 300}";)'), + ('test_targattrfilters_20', + f'(target = ldap:///cn=Jeff Vedder,ou=Product Development,{DEFAULT_SUFFIX})' + f'(targetattr=*)' + f'(version 3.0; acl "Name of the ACI"; deny(write)userdns="ldap:///anyone";)'), + ('test_bind_rule_set_with_more_than_three', + f'(target = ldap:///{DEFAULT_SUFFIX})(targetattr=*)' + f'(version 3.0; acl "Name of the ACI"; deny absolute (all)' + f'userdn="ldap:////////anyone";)'), + ('test_Use_double_equal_instead_of_equal_in_the_targetattr', + f'(target = ldap:///{DEFAULT_SUFFIX})(targetattr==*)' + f'(version 3.0; acl "Name of the ACI"; deny absolute (all)userdn="ldap:///anyone";)'), + ('test_Use_double_equal_instead_of_equal_in_the_targetfilter', + f'(target = ldap:///{DEFAULT_SUFFIX})(targetfilter==*)' + f'(version 3.0; acl "Name of the ACI"; deny absolute ' + f'(all)userdn="ldap:///anyone";)'), ] + + +@pytest.mark.xfail(reason='https://bugzilla.redhat.com/show_bug.cgi?id=1691473') +@pytest.mark.parametrize("real_value", [a[1] for a in FAILED], + ids=[a[0] for a in FAILED]) +def test_aci_invalid_syntax_fail(topo, real_value): + """ + + Try to set wrong ACI syntax. + + :id: 83c40784-fff5-49c8-9535-7064c9c19e7e + :parametrized: yes + :setup: Standalone Instance + :steps: + 1. Create ACI + 2. Try to setup the ACI with Instance + :expectedresults: + 1. It should pass + 2. It should not pass + """ + domain = Domain(topo.standalone, DEFAULT_SUFFIX) + with pytest.raises(ldap.INVALID_SYNTAX): + domain.add("aci", real_value) + + +@pytest.mark.parametrize("real_value", [a[1] for a in INVALID], + ids=[a[0] for a in INVALID]) +def test_aci_invalid_syntax(topo, real_value): + """ + + Try to set wrong ACI syntax. + + :id: e8bf20b6-48be-4574-8300-056e42a0f0a8 + :parametrized: yes + :setup: Standalone Instance + :steps: + 1. Create ACI + 2. Try to setup the ACI with Instance + :expectedresults: + 1. It should pass + 2. It should not pass + """ + domain = Domain(topo.standalone, DEFAULT_SUFFIX) + with pytest.raises(ldap.INVALID_SYNTAX): + domain.add("aci", real_value) + + +def test_target_set_above_the_entry_test(topo): + """ + Try to set wrong ACI syntax. + + :id: d544d09a-6ed1-11e8-8872-8c16451d917b + :setup: Standalone Instance + :steps: + 1. Create ACI + 2. Try to setup the ACI with Instance + :expectedresults: + 1. It should pass + 2. It should not pass + """ + domain = Domain(topo.standalone, "ou=People,{}".format(DEFAULT_SUFFIX)) + with pytest.raises(ldap.INVALID_SYNTAX): + domain.add("aci", f'(target = ldap:///{DEFAULT_SUFFIX})' + f'(targetattr=*)(version 3.0; acl "Name of the ACI"; deny absolute ' + f'(all)userdn="ldap:///anyone";)') + + +if __name__ == "__main__": + CURRENT_FILE = os.path.realpath(__file__) + pytest.main("-s -v %s" % CURRENT_FILE) diff --git a/dirsrvtests/tests/suites/acl/userattr_test.py b/dirsrvtests/tests/suites/acl/userattr_test.py new file mode 100644 index 0000000..542d7af --- /dev/null +++ b/dirsrvtests/tests/suites/acl/userattr_test.py @@ -0,0 +1,298 @@ +# --- BEGIN COPYRIGHT BLOCK --- +# Copyright (C) 2019 Red Hat, Inc. +# All rights reserved. +# +# License: GPL (version 3 or any later version). +# See LICENSE for details. +# --- END COPYRIGHT BLOCK ---- + +""" +This script will test different type of user attributes. +""" + +import os +import pytest + +from lib389._constants import DEFAULT_SUFFIX, PW_DM +from lib389.idm.user import UserAccount, UserAccounts +from lib389.idm.organizationalunit import OrganizationalUnits +from lib389.idm.group import Groups +from lib389.idm.role import ManagedRoles +from lib389.topologies import topology_st as topo + +import ldap + +pytestmark = pytest.mark.tier1 + + +OU = f"ou=Accounting,{DEFAULT_SUFFIX}" +OU_2 = f"ou=Inheritance,{DEFAULT_SUFFIX}" +CAN = f"uid=Anuj Borah,{OU}" +CANNOT = f"uid=Ananda Borah,{OU}" +LEVEL_0 = f"uid=Grandson,{OU_2}" +LEVEL_1 = f"uid=Child,{OU_2}" +LEVEL_2 = f"uid=Parent,{OU_2}" +LEVEL_3 = f"uid=Grandparent,{OU_2}" +LEVEL_4 = f"uid=Ancestor,{OU_2}" +ROLE1 = f'cn=ROLE1,{OU}' +ROLE2 = f'cn=ROLE2,{OU}' +NSSIMPLEGROUP = f'cn=NSSIMPLEGROUP,{OU}' +NSSIMPLEGROUP1 = f'cn=NSSIMPLEGROUP1,{OU}' +ROLEDNACCESS = f'uid=ROLEDNACCESS,{OU}' +USERDNACCESS = f'uid=USERDNACCESS,{OU}' +GROUPDNACCESS = f'uid=GROUPDNACCESS,{OU}' +LDAPURLACCESS = f'uid=LDAPURLACCESS,{OU}' +ATTRNAMEACCESS = f'uid=ATTRNAMEACCESS,{OU}' +ANCESTORS = f'ou=ANCESTORS,{OU_2}' +GRANDPARENTS = f'ou=GRANDPARENTS,{ANCESTORS}' +PARENTS = f'ou=PARENTS,{GRANDPARENTS}' +CHILDREN = f'ou=CHILDREN,{PARENTS}' +GRANDSONS = f'ou=GRANDSONS,{CHILDREN}' + + +@pytest.fixture(scope="module") +def _add_user(topo): + """ + This function will create user for the test and in the end entries will be deleted . + """ + role_aci_body = '(targetattr=*)(version 3.0; aci "role aci"; allow(all)' + # Creating OUs + ous = OrganizationalUnits(topo.standalone, DEFAULT_SUFFIX) + ou_accounting = ous.create(properties={'ou': 'Accounting'}) + ou_accounting.set('aci', [f'(target="ldap:///{ROLEDNACCESS}"){role_aci_body} ' + f'userattr = "Description#ROLEDN";)', + f'(target="ldap:///{USERDNACCESS}"){role_aci_body} ' + f'userattr = "Description#USERDN";)', + f'(target="ldap:///{GROUPDNACCESS}"){role_aci_body} ' + f'userattr = "Description#GROUPDN";)', + f'(target="ldap:///{LDAPURLACCESS}"){role_aci_body} ' + f'userattr = "Description#LDAPURL";)', + f'(target="ldap:///{ATTRNAMEACCESS}"){role_aci_body} ' + f'userattr = "Description#4612";)']) + + ou_inheritance = ous.create(properties={'ou': 'Inheritance', + 'street': LEVEL_4, + 'seeAlso': LEVEL_3, + 'st': LEVEL_2, + 'description': LEVEL_1, + 'businessCategory': LEVEL_0}) + + inheritance_aci_body = '(targetattr=*)(version 3.0; aci "Inheritance aci"; allow(all) ' + ou_inheritance.set('aci', [f'{inheritance_aci_body} ' + f'userattr = "parent[0].businessCategory#USERDN";)', + f'{inheritance_aci_body} ' + f'userattr = "parent[0,1].description#USERDN";)', + f'{inheritance_aci_body} ' + f'userattr = "parent[0,1,2].st#USERDN";)', + f'{inheritance_aci_body} ' + f'userattr = "parent[0,1,2,3].seeAlso#USERDN";)', + f'{inheritance_aci_body} ' + f'userattr = "parent[0,1,2,3,4].street#USERDN";)']) + + # Creating Users + users = UserAccounts(topo.standalone, OU, rdn=None) + + for i in [['Anuj Borah', 'Sunnyvale', ROLE1, '4612'], + ['Ananda Borah', 'Santa Clara', ROLE2, 'Its Unknown']]: + users.create(properties={ + 'uid': i[0], + 'cn': i[0].split()[0], + 'sn': 'user', + 'uidNumber': '1000', + 'gidNumber': '2000', + 'homeDirectory': '/home/' + i[0].split()[0], + 'userPassword': PW_DM, + 'givenname': i[0].split()[0], + 'l': i[1], + 'mail': "anuj@borah.com", + 'telephonenumber': "+1 408 555 4798", + 'facsimiletelephonenumber': "+1 408 555 9751", + 'roomnumber': i[3], + 'Description': i[3], + 'nsRoleDN': i[2] + }) + + for demo1 in [('ROLEDNACCESS', ROLE1), + ('USERDNACCESS', CAN), + ('GROUPDNACCESS', NSSIMPLEGROUP), + ('ATTRNAMEACCESS', '4612'), + ('LDAPURLACCESS', f"ldap:///{DEFAULT_SUFFIX}??sub?(l=Sunnyvale)")]: + users.create(properties={ + 'uid': demo1[0], + 'cn': demo1[0], + 'sn': 'user', + 'uidNumber': '1000', + 'gidNumber': '2000', + 'homeDirectory': '/home/' + demo1[0], + 'userPassword': PW_DM, + 'Description': demo1[1] + }) + + # Creating roles + roles = ManagedRoles(topo.standalone, OU) + for i in ['ROLE1', 'ROLE2']: + roles.create(properties={"cn": i}) + + # Creating Groups + grps = Groups(topo.standalone, OU, rdn=None) + for i in [('NSSIMPLEGROUP', CAN), ('NSSIMPLEGROUP1', CANNOT)]: + grps.create(properties={ + 'cn': i[0], + 'ou': 'groups', + 'member': i[1] + }) + + users = UserAccounts(topo.standalone, OU_2, rdn=None) + for i in ['Grandson', 'Child', 'Parent', 'Grandparent', 'Ancestor']: + users.create( + properties={ + 'uid': i, + 'cn': i, + 'sn': 'user', + 'uidNumber': '1000', + 'gidNumber': '2000', + 'homeDirectory': '/home/' + i, + 'userPassword': PW_DM + }) + + # Creating Other OUs + for dn_dn in [(OU_2, 'ANCESTORS'), + (ANCESTORS, 'GRANDPARENTS'), + (GRANDPARENTS, 'PARENTS'), + (PARENTS, 'CHILDREN'), + (CHILDREN, 'GRANDSONS')]: + OrganizationalUnits(topo.standalone, dn_dn[0]).create(properties={'ou': dn_dn[1]}) + + +@pytest.mark.parametrize("user,entry", [ + (CAN, ROLEDNACCESS), + (CAN, USERDNACCESS), + (CAN, GROUPDNACCESS), + (CAN, LDAPURLACCESS), + (CAN, ATTRNAMEACCESS), + (LEVEL_0, OU_2), + (LEVEL_1, ANCESTORS), + (LEVEL_2, GRANDPARENTS), + (LEVEL_4, OU_2), + (LEVEL_4, ANCESTORS), + (LEVEL_4, GRANDPARENTS), + (LEVEL_4, PARENTS), + (LEVEL_4, CHILDREN), + pytest.param(LEVEL_3, CHILDREN, marks=pytest.mark.xfail(reason="May be some bug")), +], ids=[ + "(CAN,ROLEDNACCESS)", + "(CAN,USERDNACCESS)", + "(CAN,GROUPDNACCESS)", + "(CAN,LDAPURLACCESS)", + "(CAN,ATTRNAMEACCESS)", + "(LEVEL_0, OU_2)", + "(LEVEL_1,ANCESTORS)", + "(LEVEL_2,GRANDPARENTS)", + "(LEVEL_4,OU_2)", + "(LEVEL_4, ANCESTORS)", + "(LEVEL_4,GRANDPARENTS)", + "(LEVEL_4,PARENTS)", + "(LEVEL_4,CHILDREN)", + "(LEVEL_3, CHILDREN)" +]) +def test_mod_see_also_positive(topo, _add_user, user, entry): + """ + Try to set seeAlso on entry with binding specific user, it will success + as per the ACI. + + :id: 65745426-7a01-11e8-8ac2-8c16451d917b + :parametrized: yes + :setup: Standalone Instance + :steps: + 1. Add test entry + 2. Add ACI + 3. User should follow ACI role + :expectedresults: + 1. Entry should be added + 2. Operation should succeed + 3. Operation should succeed + """ + conn = UserAccount(topo.standalone, user).bind(PW_DM) + UserAccount(conn, entry).replace('seeAlso', 'cn=1') + + +@pytest.mark.parametrize("user,entry", [ + (CANNOT, ROLEDNACCESS), + (CANNOT, USERDNACCESS), + (CANNOT, GROUPDNACCESS), + (CANNOT, LDAPURLACCESS), + (CANNOT, ATTRNAMEACCESS), + (LEVEL_0, ANCESTORS), + (LEVEL_0, GRANDPARENTS), + (LEVEL_0, PARENTS), + (LEVEL_0, CHILDREN), + (LEVEL_2, PARENTS), + (LEVEL_4, GRANDSONS), +], ids=[ + "(CANNOT,ROLEDNACCESS)", + "(CANNOT,USERDNACCESS)", + "(CANNOT,GROUPDNACCESS)", + "(CANNOT,LDAPURLACCESS)", + "(CANNOT,ATTRNAMEACCESS)", + "(LEVEL_0, ANCESTORS)", + "(LEVEL_0,GRANDPARENTS)", + "(LEVEL_0,PARENTS)", + "(LEVEL_0,CHILDREN)", + "(LEVEL_2,PARENTS)", + "(LEVEL_4,GRANDSONS)", +]) +def test_mod_see_also_negative(topo, _add_user, user, entry): + """ + Try to set seeAlso on entry with binding specific user, it will Fail + as per the ACI. + + :id: 9ea93252-7a01-11e8-a85b-8c16451d917b + :parametrized: yes + :setup: Standalone Instance + :steps: + 1. Add test entry + 2. Add ACI + 3. User should follow ACI role + :expectedresults: + 1. Entry should be added + 2. Operation should succeed + 3. Operation should succeed + """ + conn = UserAccount(topo.standalone, user).bind(PW_DM) + user = UserAccount(conn, entry) + with pytest.raises(ldap.INSUFFICIENT_ACCESS): + user.replace('seeAlso', 'cn=1') + + +@pytest.mark.parametrize("user,entry", [ + (CANNOT, USERDNACCESS), + (CANNOT, ROLEDNACCESS), + (CANNOT, GROUPDNACCESS) +]) +def test_last_three(topo, _add_user, user, entry): + """ + When we use the userattr keyword to associate the entry used to bind + with the target entry the ACI applies only to the target specified and + not to subentries. + + :id: add58a0a-7a01-11e8-85f1-8c16451d917b + :parametrized: yes + :setup: Standalone Instance + :steps: + 1. Add test entry + 2. Add ACI + 3. User should follow ACI role + :expectedresults: + 1. Entry should be added + 2. Operation should succeed + 3. Operation should succeed + """ + conn = UserAccount(topo.standalone, user).bind(PW_DM) + users = UserAccounts(conn, entry) + with pytest.raises(ldap.INSUFFICIENT_ACCESS): + users.create_test_user() + + +if __name__ == "__main__": + CURRENT_FILE = os.path.realpath(__file__) + pytest.main("-s -v %s" % CURRENT_FILE) diff --git a/dirsrvtests/tests/suites/acl/valueacl_part2_test.py b/dirsrvtests/tests/suites/acl/valueacl_part2_test.py new file mode 100644 index 0000000..5f5b1c6 --- /dev/null +++ b/dirsrvtests/tests/suites/acl/valueacl_part2_test.py @@ -0,0 +1,432 @@ +# --- BEGIN COPYRIGHT BLOCK --- +# Copyright (C) 2019 Red Hat, Inc. +# All rights reserved. +# +# License: GPL (version 3 or any later version). +# See LICENSE for details. +# --- END COPYRIGHT BLOCK ---- + +import pytest, os, ldap +from lib389._constants import DEFAULT_SUFFIX, PW_DM +from lib389.idm.user import UserAccount +from lib389.idm.account import Anonymous +from lib389.idm.organizationalunit import OrganizationalUnit +from lib389.topologies import topology_st as topo +from lib389.idm.domain import Domain + +pytestmark = pytest.mark.tier1 + +CONTAINER_1_DELADD = "ou=Product Development,{}".format(DEFAULT_SUFFIX) +CONTAINER_2_DELADD = "ou=Accounting,{}".format(DEFAULT_SUFFIX) +USER_DELADD = "cn=Jeff Vedder,{}".format(CONTAINER_1_DELADD) +USER_WITH_ACI_DELADD = "cn=Sam Carter,{}".format(CONTAINER_2_DELADD) +FRED = "cn=FRED,ou=Accounting,{}".format(DEFAULT_SUFFIX) +HARRY = "cn=HARRY,ou=Accounting,{}".format(DEFAULT_SUFFIX) +KIRSTENVAUGHAN = "cn=Kirsten Vaughan,ou=Human Resources,{}".format(DEFAULT_SUFFIX) +HUMAN_OU_GLOBAL = "ou=Human Resources,{}".format(DEFAULT_SUFFIX) + + +@pytest.fixture(scope="function") +def aci_of_user(request, topo): + aci_list = Domain(topo.standalone, DEFAULT_SUFFIX).get_attr_vals('aci') + + def finofaci(): + domain = Domain(topo.standalone, DEFAULT_SUFFIX) + domain.set('aci', None) + for i in aci_list: + domain.add("aci", i) + + request.addfinalizer(finofaci) + + +@pytest.fixture(scope="function") +def _add_user(request, topo): + for i in ["Product Development", 'Accounting', "Human Resources"]: + ou = OrganizationalUnit(topo.standalone, "ou={},{}".format(i, DEFAULT_SUFFIX)) + ou.create(properties={'ou': i}) + + properties = { + 'uid': 'Jeff Vedder', + 'cn': 'Jeff Vedder', + 'sn': 'user', + 'uidNumber': '1000', + 'gidNumber': '2000', + 'homeDirectory': '/home/' + 'JeffVedder', + 'userPassword': 'password' + } + user = UserAccount(topo.standalone, 'cn=Jeff Vedder,{}'.format(CONTAINER_1_DELADD)) + user.create(properties=properties) + user.set('secretary', 'cn=Arpitoo Borah, o=Red Hat, c=As') + user.set('mail', 'anuj@anuj.Borah') + + properties = { + 'uid': 'Sam Carter', + 'cn': 'Sam Carter', + 'sn': 'user', + 'uidNumber': '1000', + 'gidNumber': '2000', + 'homeDirectory': '/home/' + 'SamCarter', + 'userPassword': 'password' + } + user = UserAccount(topo.standalone, 'cn=Sam Carter,{}'.format(CONTAINER_2_DELADD)) + user.create(properties=properties) + + properties = { + 'uid': 'Kirsten Vaughan', + 'cn': 'Kirsten Vaughan', + 'sn': 'Kirsten Vaughan', + 'uidNumber': '1000', + 'gidNumber': '2000', + 'homeDirectory': '/home/' + 'KirstenVaughan', + 'userPassword': 'password' + } + user = UserAccount(topo.standalone, 'cn=Kirsten Vaughan, ou=Human Resources,{}'.format(DEFAULT_SUFFIX)) + user.create(properties=properties) + + properties = { + 'uid': 'HARRY', + 'cn': 'HARRY', + 'sn': 'HARRY', + 'uidNumber': '1000', + 'gidNumber': '2000', + 'homeDirectory': '/home/' + 'HARRY', + 'userPassword': 'password' + } + user = UserAccount(topo.standalone, 'cn=HARRY, ou=Accounting,{}'.format(DEFAULT_SUFFIX)) + user.create(properties=properties) + + def fin(): + for DN in [USER_DELADD, USER_WITH_ACI_DELADD, FRED, HARRY, KIRSTENVAUGHAN, + HUMAN_OU_GLOBAL, CONTAINER_2_DELADD,CONTAINER_1_DELADD]: + ua = UserAccount(topo.standalone, DN) + try: + ua.delete() + except: + pass + + request.addfinalizer(fin) + + +def test_we_can_search_as_expected(topo, _add_user, aci_of_user): + """ + Testing the targattrfilters keyword that allows access control based on the value of the attributes being added (or deleted)) + Test that we can search as expected + :id: e845dbba-7aa9-11e8-8988-8c16451d917b + :setup: server + :steps: + 1. Add test entry + 2. Add ACI + 3. User should follow ACI role + :expectedresults: + 1. Entry should be added + 2. Operation should succeed + 3. Operation should succeed + """ + ACI_BODY = '(target="ldap:///cn=*,ou=Product Development, {}")' \ + '(targetfilter="cn=Jeff*")(targetattr="secretary || objectclass || mail")' \ + '(targattrfilters = "add=title:(title=arch*)")(version 3.0; acl "$tet_thistest"; ' \ + 'allow (write,read,search,compare) (userdn = "ldap:///anyone") ;)'.format(DEFAULT_SUFFIX) + Domain(topo.standalone, DEFAULT_SUFFIX).add("aci", ACI_BODY) + conn = Anonymous(topo.standalone).bind() + # aci will allow secretary , mail , objectclass + user = UserAccount(conn, USER_DELADD) + assert user.get_attr_vals('secretary') + assert user.get_attr_vals('mail') + assert user.get_attr_vals('objectclass') + + +def test_we_can_mod_title_as_expected(topo, _add_user, aci_of_user): + """ + Testing the targattrfilters keyword that allows access control based on the + value of the attributes being added (or deleted)) + "Valueacl Test $tet_thistest Test search will work with targattrfilters present." + :id: f8c1ea88-7aa9-11e8-a55c-8c16451d917b + :setup: server + :steps: + 1. Add test entry + 2. Add ACI + 3. User should follow ACI role + :expectedresults: + 1. Entry should be added + 2. Operation should succeed + 3. Operation should succeed + """ + ACI_BODY = '(target="ldap:///cn=*,ou=Product Development, {}")' \ + '(targetfilter="cn=Jeff*")(targetattr="secretary || objectclass || mail")' \ + '(targattrfilters = "add=title:(title=arch*)")(version 3.0; acl "$tet_thistest"; ' \ + 'allow (write,read,search,compare) (userdn = "ldap:///anyone") ;)'.format(DEFAULT_SUFFIX) + Domain(topo.standalone, DEFAULT_SUFFIX).add("aci", ACI_BODY) + # aci will not allow 'title', 'topdog' + conn = UserAccount(topo.standalone, USER_WITH_ACI_DELADD).bind(PW_DM) + user = UserAccount(conn, USER_DELADD) + with pytest.raises(ldap.INSUFFICIENT_ACCESS): + user.add('title', 'topdog') + + +def test_modify_with_multiple_filters(topo, _add_user, aci_of_user): + """ + Testing the targattrfilters keyword that allows access control based on the + value of the attributes being added (or deleted)) + "Valueacl Test $tet_thistest Allowed by multiple." + :id: fd9d223e-7aa9-11e8-a83b-8c16451d917b + :setup: server + :steps: + 1. Add test entry + 2. Add ACI + 3. User should follow ACI role + :expectedresults: + 1. Entry should be added + 2. Operation should succeed + 3. Operation should succeed + """ + ACI_BODY = '(targattrfilters = "add=title:(title=architect) && secretary:' \ + '(secretary=cn=Meylan,{}), del=title:(title=architect) && secretary:' \ + '(secretary=cn=Meylan,{})")(version 3.0; acl "$tet_thistest"; allow (write) ' \ + '(userdn = "ldap:///anyone") ;)'.format( + DEFAULT_SUFFIX, DEFAULT_SUFFIX + ) + Domain(topo.standalone, DEFAULT_SUFFIX).add("aci", ACI_BODY) + conn = UserAccount(topo.standalone, USER_WITH_ACI_DELADD).bind(PW_DM) + # aci will allow title some attribute only + user = UserAccount(conn, USER_DELADD) + user.add("title", "architect") + assert user.get_attr_val('title') + user.add("secretary", "cn=Meylan,dc=example,dc=com") + assert user.get_attr_val('secretary') + + +def test_denied_by_multiple_filters(topo, _add_user, aci_of_user): + """ + Testing the targattrfilters keyword that allows access control based on the value of the + attributes being added (or deleted)) + "Valueacl Test $tet_thistest Denied by multiple filters." + :id: 034c6c62-7aaa-11e8-8634-8c16451d917b + :setup: server + :steps: + 1. Add test entry + 2. Add ACI + 3. User should follow ACI role + :expectedresults: + 1. Entry should be added + 2. Operation should succeed + 3. Operation should succeed + """ + ACI_BODY = '(targattrfilters = "add=title:(title=architect) && secretary:' \ + '(secretary=cn=Meylan,{}), del=title:(title=architect) && secretary:' \ + '(secretary=cn=Meylan,{})")(version 3.0; acl "$tet_thistest"; allow (write) ' \ + '(userdn = "ldap:///anyone") ;)'.format(DEFAULT_SUFFIX, DEFAULT_SUFFIX) + Domain(topo.standalone, DEFAULT_SUFFIX).add("aci", ACI_BODY) + conn = UserAccount(topo.standalone, USER_WITH_ACI_DELADD).bind(PW_DM) + # aci will allow title some attribute only + user = UserAccount(conn, USER_DELADD) + user.add("title", "architect") + assert user.get_attr_val('title') + user.add("secretary", "cn=Meylan,dc=example,dc=com") + assert user.get_attr_val('secretary') + # aci will allow title some attribute only + with pytest.raises(ldap.INSUFFICIENT_ACCESS): + user.add("secretary", "cn=Grenoble,dc=example,dc=com") + + +def test_allowed_add_one_attribute(topo, _add_user, aci_of_user): + """ + Testing the targattrfilters keyword that allows access control based on the value of the + attributes being added (or deleted)) + "Valueacl Test $tet_thistest Allowed add one attribute (in presence of multiple filters)" + :id: 086c7f0c-7aaa-11e8-b69f-8c16451d917b + :setup: server + :steps: + 1. Add test entry + 2. Add ACI + 3. User should follow ACI role + :expectedresults: + 1. Entry should be added + 2. Operation should succeed + 3. Operation should succeed + """ + ACI_BODY = '(targattrfilters = "add=title:(title=architect) && secretary:(secretary=cn=Meylan, {}), ' \ + 'del=title:(title=architect) && secretary:(secretary=cn=Meylan, {})")(version 3.0; acl "$tet_thistest"; ' \ + 'allow (write) (userdn = "ldap:///{}") ;)'.format( + DEFAULT_SUFFIX, DEFAULT_SUFFIX, USER_WITH_ACI_DELADD) + Domain(topo.standalone, DEFAULT_SUFFIX).add("aci", ACI_BODY) + conn = UserAccount(topo.standalone, USER_WITH_ACI_DELADD).bind(PW_DM) + user = UserAccount(conn, USER_DELADD) + # aci will allow add ad delete + user.add('title', 'architect') + assert user.get_attr_val('title') + user.remove('title', 'architect') + + +def test_cannot_add_an_entry_with_attribute_values_we_are_not_allowed_add( + topo, _add_user, aci_of_user +): + """ + Testing the targattrfilters keyword that allows access control based on the value of the + attributes being added (or deleted)) + "Valueacl Test $tet_thistest Test not allowed add an entry" + :id: 0d0effee-7aaa-11e8-b673-8c16451d917b + :setup: server + :steps: + 1. Add test entry + 2. Add ACI + 3. User should follow ACI role + :expectedresults: + 1. Entry should be added + 2. Operation should succeed + 3. Operation should succeed + """ + ACI_BODY = '(targattrfilters = "add=title:(|(title=engineer)(title=cool dude)(title=scum)) ' \ + '&& secretary:(secretary=cn=Meylan, {}), del=title:(|(title=engineer)(title=cool dude)' \ + '(title=scum))")(version 3.0; aci "$tet_thistest"; allow (add) userdn = "ldap:///{}";)'.format( + DEFAULT_SUFFIX, DEFAULT_SUFFIX) + Domain(topo.standalone, DEFAULT_SUFFIX).add("aci", ACI_BODY) + properties = { + 'uid': 'FRED', + 'cn': 'FRED', + 'sn': 'user', + 'uidNumber': '1000', + 'gidNumber': '2000', + 'homeDirectory': '/home/' + 'FRED' + } + user = UserAccount(topo.standalone, 'cn=FRED,ou=Accounting,{}'.format(DEFAULT_SUFFIX)) + user.create(properties=properties) + user.set('title', ['anuj', 'kumar', 'borah']) + conn = UserAccount(topo.standalone, USER_WITH_ACI_DELADD).bind(PW_DM) + # aci will not allow adding objectclass + user = UserAccount(conn, USER_WITH_ACI_DELADD) + with pytest.raises(ldap.INSUFFICIENT_ACCESS): + user.add("objectclass", "person") + + +def test_on_modrdn(topo, _add_user, aci_of_user): + """ + Testing the targattrfilters keyword that allows access control based on the value of the + attributes being added (or deleted)) + Test that valuacls kick in for modrdn operation. + :id: 12985dde-7aaa-11e8-abde-8c16451d917b + :setup: server + :steps: + 1. Add test entry + 2. Add ACI + 3. User should follow ACI role + :expectedresults: + 1. Entry should be added + 2. Operation should succeed + 3. Operation should succeed + """ + ACI_BODY = '(target="ldap:///cn=*,ou=Accounting,{}")(targattrfilters = "add=cn:(|(cn=engineer)), ' \ + 'del=title:(|(title=engineer)(title=cool dude)(title=scum))")(version 3.0; aci "$tet_thistest"; ' \ + 'allow (write) userdn = "ldap:///{}";)'.format(DEFAULT_SUFFIX, USER_WITH_ACI_DELADD) + Domain(topo.standalone, DEFAULT_SUFFIX).add("aci", ACI_BODY) + conn = UserAccount(topo.standalone, USER_WITH_ACI_DELADD).bind(PW_DM) + # modrdn_s is not allowed with ou=OU1 + useraccount = UserAccount(conn, FRED) + with pytest.raises(ldap.INSUFFICIENT_ACCESS): + useraccount.rename("ou=OU1") + + +def test_on_modrdn_allow(topo, _add_user, aci_of_user): + """ + Testing the targattrfilters keyword that allows access control based on the value of the attributes being + added (or deleted)) + "Valueacl Test $tet_thistest Test modrdn still works (2)" + :id: 17720562-7aaa-11e8-82ee-8c16451d917b + :setup: server + :steps: + 1. Add test entry + 2. Add ACI + 3. User should follow ACI role + :expectedresults: + 1. Entry should be added + 2. Operation should succeed + 3. Operation should succeed + """ + ACI_BODY = '(target="ldap:///{}")(targattrfilters = "add=cn:((cn=engineer)), del=cn:((cn=jonny))")' \ + '(version 3.0; aci "$tet_thistest"; allow (write) ' \ + 'userdn = "ldap:///{}";)'.format(DEFAULT_SUFFIX, USER_WITH_ACI_DELADD) + Domain(topo.standalone, DEFAULT_SUFFIX).add("aci", ACI_BODY) + properties = { + 'uid': 'jonny', + 'cn': 'jonny', + 'sn': 'user', + 'uidNumber': '1000', + 'gidNumber': '2000', + 'homeDirectory': '/home/' + 'jonny' + } + user = UserAccount(topo.standalone, 'cn=jonny,{}'.format(DEFAULT_SUFFIX)) + user.create(properties=properties) + conn = UserAccount(topo.standalone, USER_WITH_ACI_DELADD).bind(PW_DM) + # aci will allow modrdn_s on cn=engineer + useraccount = UserAccount(conn, "cn=jonny,{}".format(DEFAULT_SUFFIX)) + useraccount.rename("cn=engineer") + assert useraccount.dn == 'cn=engineer,dc=example,dc=com' + + +@pytest.mark.bz979515 +def test_targattrfilters_keyword(topo): + """ + Testing the targattrfilters keyword that allows access control based on the value + of the attributes being added (or deleted)) + "Bug #979515 - ACLs inoperative in some search scenarios [rhel-6.5]" + "Bug #979516 is a clone for DS8.2 on RHEL5.9" + "Bug #979514 is a clone for RHEL6.4 zStream errata" + :id: 23f9e9d0-7aaa-11e8-b16b-8c16451d917b + :setup: server + :steps: + 1. Add test entry + 2. Add ACI + 3. User should follow ACI role + :expectedresults: + 1. Entry should be added + 2. Operation should succeed + 3. Operation should succeed + """ + domain = Domain(topo.standalone, DEFAULT_SUFFIX) + domain.set('aci', None) + ou = OrganizationalUnit(topo.standalone, 'ou=bug979515,{}'.format(DEFAULT_SUFFIX)) + ou.create(properties={'ou': 'bug979515'}) + Domain(topo.standalone, DEFAULT_SUFFIX).add("aci", '(target="ldap:///ou=bug979515,{}") ' + '(targetattr= "uid") ( version 3.0; acl "read other subscriber"; allow (compare, read, search) ' + 'userdn="ldap:///uid=*,ou=bug979515,{}" ; )'.format(DEFAULT_SUFFIX, DEFAULT_SUFFIX)) + properties = { + 'uid': 'acientryusr1', + 'cn': 'acientryusr1', + 'sn': 'user', + 'uidNumber': '1000', + 'gidNumber': '2000', + 'homeDirectory': '/home/' + 'acientryusr1' + } + user = UserAccount(topo.standalone, 'cn=acientryusr1,ou=bug979515,{}'.format(DEFAULT_SUFFIX)) + user.create(properties=properties) + user.set('telephoneNumber', '99972566596') + user.set('mail', 'anuj@anuj.com') + user.set("userPassword", "password") + + properties = { + 'uid': 'newaciphoneusr1', + 'cn': 'newaciphoneusr1', + 'sn': 'user', + 'uidNumber': '1000', + 'gidNumber': '2000', + 'homeDirectory': '/home/' + 'newaciphoneusr1' + } + user = UserAccount(topo.standalone, 'cn=newaciphoneusr1,ou=bug979515,{}'.format(DEFAULT_SUFFIX)) + user.create(properties=properties) + user.set('telephoneNumber', '99972566596') + user.set('mail', 'anuj@anuj.com') + conn = UserAccount(topo.standalone, "cn=acientryusr1,ou=bug979515,{}".format(DEFAULT_SUFFIX)).bind(PW_DM) + # Testing the targattrfilters keyword that allows access control based on the value of the attributes being added (or deleted)) + user = UserAccount(conn, "cn=acientryusr1,ou=bug979515,{}".format(DEFAULT_SUFFIX)) + with pytest.raises(IndexError): + user.get_attr_vals('mail') + user.get_attr_vals('telephoneNumber') + user.get_attr_vals('cn') + user = UserAccount(topo.standalone, "cn=acientryusr1,ou=bug979515,{}".format(DEFAULT_SUFFIX)) + user.get_attr_vals('mail') + user.get_attr_vals('telephoneNumber') + user.get_attr_vals('cn') + + +if __name__ == '__main__': + CURRENT_FILE = os.path.realpath(__file__) + pytest.main("-s -v %s" % CURRENT_FILE) diff --git a/dirsrvtests/tests/suites/acl/valueacl_test.py b/dirsrvtests/tests/suites/acl/valueacl_test.py new file mode 100644 index 0000000..54bc134 --- /dev/null +++ b/dirsrvtests/tests/suites/acl/valueacl_test.py @@ -0,0 +1,747 @@ +# --- BEGIN COPYRIGHT BLOCK --- +# Copyright (C) 2019 Red Hat, Inc. +# All rights reserved. +# +# License: GPL (version 3 or any later version). +# See LICENSE for details. +# --- END COPYRIGHT BLOCK ---- + +import pytest, os, ldap +from lib389._constants import DEFAULT_SUFFIX, PW_DM +from lib389.idm.user import UserAccount +from lib389.idm.account import Anonymous +from lib389.idm.organizationalunit import OrganizationalUnit +from lib389.topologies import topology_st as topo +from lib389.idm.domain import Domain + +pytestmark = pytest.mark.tier1 + +CONTAINER_1_DELADD = "ou=Product Development,{}".format(DEFAULT_SUFFIX) +CONTAINER_2_DELADD = "ou=Accounting,{}".format(DEFAULT_SUFFIX) +USER_DELADD = "cn=Jeff Vedder,{}".format(CONTAINER_1_DELADD) +USER_WITH_ACI_DELADD = "cn=Sam Carter,{}".format(CONTAINER_2_DELADD) +FRED = "cn=FRED,ou=Accounting,{}".format(DEFAULT_SUFFIX) +HARRY = "cn=HARRY,ou=Accounting,{}".format(DEFAULT_SUFFIX) +KIRSTENVAUGHAN = "cn=Kirsten Vaughan,ou=Human Resources,{}".format(DEFAULT_SUFFIX) +HUMAN_OU_GLOBAL = "ou=Human Resources,{}".format(DEFAULT_SUFFIX) + + +@pytest.fixture(scope="function") +def aci_of_user(request, topo): + aci_list = Domain(topo.standalone, DEFAULT_SUFFIX).get_attr_vals('aci') + + def finofaci(): + domain = Domain(topo.standalone, DEFAULT_SUFFIX) + domain.set('aci', None) + for i in aci_list: + domain.add("aci", i) + + request.addfinalizer(finofaci) + + +@pytest.fixture(scope="function") +def _add_user(request, topo): + for i in ["Product Development", 'Accounting', "Human Resources"]: + ou = OrganizationalUnit(topo.standalone, "ou={},{}".format(i, DEFAULT_SUFFIX)) + ou.create(properties={'ou': i}) + + properties = { + 'uid': 'Jeff Vedder', + 'cn': 'Jeff Vedder', + 'sn': 'user', + 'uidNumber': '1000', + 'gidNumber': '2000', + 'homeDirectory': '/home/' + 'JeffVedder', + 'userPassword': 'password' + } + user = UserAccount(topo.standalone, 'cn=Jeff Vedder,{}'.format(CONTAINER_1_DELADD)) + user.create(properties=properties) + user.set('secretary', 'cn=Arpitoo Borah, o=Red Hat, c=As') + user.set('mail', 'anuj@anuj.Borah') + + properties = { + 'uid': 'Sam Carter', + 'cn': 'Sam Carter', + 'sn': 'user', + 'uidNumber': '1000', + 'gidNumber': '2000', + 'homeDirectory': '/home/' + 'SamCarter', + 'userPassword': 'password' + } + user = UserAccount(topo.standalone, 'cn=Sam Carter,{}'.format(CONTAINER_2_DELADD)) + user.create(properties=properties) + + properties = { + 'uid': 'Kirsten Vaughan', + 'cn': 'Kirsten Vaughan', + 'sn': 'Kirsten Vaughan', + 'uidNumber': '1000', + 'gidNumber': '2000', + 'homeDirectory': '/home/' + 'KirstenVaughan', + 'userPassword': 'password' + } + user = UserAccount(topo.standalone, 'cn=Kirsten Vaughan, ou=Human Resources,{}'.format(DEFAULT_SUFFIX)) + user.create(properties=properties) + + properties = { + 'uid': 'HARRY', + 'cn': 'HARRY', + 'sn': 'HARRY', + 'uidNumber': '1000', + 'gidNumber': '2000', + 'homeDirectory': '/home/' + 'HARRY', + 'userPassword': 'password' + } + user = UserAccount(topo.standalone, 'cn=HARRY, ou=Accounting,{}'.format(DEFAULT_SUFFIX)) + user.create(properties=properties) + + def fin(): + for DN in [USER_DELADD, USER_WITH_ACI_DELADD, FRED, HARRY, KIRSTENVAUGHAN, + HUMAN_OU_GLOBAL, CONTAINER_2_DELADD,CONTAINER_1_DELADD]: + ua = UserAccount(topo.standalone, DN) + try: + ua.delete() + except: + pass + + request.addfinalizer(fin) + + +class _ModTitleArchitectJeffVedder: + def __init__(self, topo, value, conn): + self.topo = topo + self.value = value + self.conn = conn + self.user = UserAccount(self.conn, USER_DELADD) + + def add(self): + self.user.add("title", self.value) + + def delete(self): + self.user.remove("title", self.value) + + +class _DelTitleArchitectJeffVedder: + def __init__(self, topo, conn): + self.topo = topo + self.conn = conn + + def delete(self): + UserAccount(self.conn, USER_DELADD).remove("title", None) + + +class _AddTitleWithRoot: + def __init__(self, topo, value): + self.topo = topo + self.value = value + self.user = UserAccount(self.topo.standalone, USER_DELADD) + + def add(self): + self.user.add("title", self.value) + + def delete(self): + self.user.remove("title", self.value) + + +class _AddFREDWithRoot: + def __init__(self, topo, title1, title2, title3): + self.topo = topo + self.title1 = title1 + self.title2 = title2 + self.title3 = title3 + + def create(self): + properties = { + 'uid': 'FRED', + 'cn': 'FRED', + 'sn': 'user', + 'uidNumber': '1000', + 'gidNumber': '2000', + 'homeDirectory': '/home/' + 'FRED' + } + user = UserAccount(self.topo.standalone, "cn=FRED, ou=Accounting,{}".format(DEFAULT_SUFFIX)) + user.create(properties=properties) + user.set("title", [self.title1, self.title2, self.title3]) + + +def test_delete_an_attribute_value_we_are_not_allowed_to_delete( + topo, _add_user, aci_of_user +): + """ + Testing the targattrfilters keyword that allows access control based on the value + of the attributes being added (or deleted)) + Test that we can MODIFY:add an attribute value we are allowed to add + :id: 7c41baa6-7aa9-11e8-9bdc-8c16451d917b + :setup: server + :steps: + 1. Add test entry + 2. Add ACI + 3. User should follow ACI role + :expectedresults: + 1. Entry should be added + 2. Operation should succeed + 3. Operation should succeed + """ + ACI_BODY = '(targattrfilters = "add=title:(title=architect), del=title:(title=architect)")' \ + '(version 3.0; acl "ACI NAME"; allow (write) (userdn = "ldap:///{}") ;)'.format(USER_WITH_ACI_DELADD) + Domain(topo.standalone, DEFAULT_SUFFIX).add("aci", ACI_BODY) + conn = UserAccount(topo.standalone, USER_WITH_ACI_DELADD).bind(PW_DM) + with pytest.raises(ldap.INSUFFICIENT_ACCESS): + _ModTitleArchitectJeffVedder(topo, "engineer", conn).add() + _ModTitleArchitectJeffVedder(topo, "architect", conn).add() + + +def test_donot_allow_write_access_to_title_if_value_is_not_architect( + topo, _add_user, aci_of_user +): + """ + Testing the targattrfilters keyword that allows access control based on the value of the + attributes being added (or deleted)) + Test that we cannot MODIFY:add an attribute value we are not allowed to add + :id: 822c607e-7aa9-11e8-b2e7-8c16451d917b + :setup: server + :steps: + 1. Add test entry + 2. Add ACI + 3. User should follow ACI role + :expectedresults: + 1. Entry should be added + 2. Operation should succeed + 3. Operation should succeed + """ + ACI_BODY = '(targattrfilters = "add=title:(title=architect), del=title:(title=architect)")' \ + '(version 3.0; acl "$tet_thistest"; allow (write) (userdn = "ldap:///{}") ;)'.format(USER_WITH_ACI_DELADD) + Domain(topo.standalone, DEFAULT_SUFFIX).add("aci", ACI_BODY) + # aci will allow to add title architect + conn = UserAccount(topo.standalone, USER_WITH_ACI_DELADD).bind(PW_DM) + _ModTitleArchitectJeffVedder(topo, "architect", conn).add() + # aci will noo allow to add title architect1 + with pytest.raises(ldap.INSUFFICIENT_ACCESS): + _ModTitleArchitectJeffVedder(topo, "architect1", conn).add() + + +def test_delete_an_attribute_value_we_are_allowed_to_delete( + topo, _add_user, aci_of_user +): + """ + Testing the targattrfilters keyword that allows access control based on the value of + the attributes being added (or deleted)) + Test that we can MODIFY:delete an attribute value we are allowed to delete, + :id: 86f36b34-7aa9-11e8-ab16-8c16451d917b + :setup: server + :steps: + 1. Add test entry + 2. Add ACI + 3. User should follow ACI role + :expectedresults: + 1. Entry should be added + 2. Operation should succeed + 3. Operation should succeed + """ + ACI_BODY = '(targattrfilters = "add=title:(title=architect), del=title:(title=architect)")' \ + '(version 3.0; acl "$tet_thistest"; allow (write) (userdn = "ldap:///{}") ;)'.format(USER_WITH_ACI_DELADD) + Domain(topo.standalone, DEFAULT_SUFFIX).add("aci", ACI_BODY) + _AddTitleWithRoot(topo, "architect").add() + # aci will allow to delete title architect + conn = UserAccount(topo.standalone, USER_WITH_ACI_DELADD).bind(PW_DM) + r1 = _ModTitleArchitectJeffVedder(topo, "architect", conn) + r1.delete() + + +def test_delete_an_attribute_value_we_are_not_allowed_to_deleted( + topo, _add_user, aci_of_user +): + """ + Testing the targattrfilters keyword that allows access control based on the value of the + attributes being added (or deleted)) + Test that we cannot MODIFY:delete an attribute value we are allowed to delete, + :id: 8c9f3a90-7aa9-11e8-bf2e-8c16451d917b + :setup: server + :steps: + 1. Add test entry + 2. Add ACI + 3. User should follow ACI role + :expectedresults: + 1. Entry should be added + 2. Operation should succeed + 3. Operation should succeed + """ + ACI_BODY = '(targattrfilters = "add=title:(title=architect), del=title:(title=architect)")' \ + '(version 3.0; acl "$tet_thistest"; allow (write) (userdn = "ldap:///{}") ;)'.format(USER_WITH_ACI_DELADD) + Domain(topo.standalone, DEFAULT_SUFFIX).add("aci", ACI_BODY) + _AddTitleWithRoot(topo, "engineer").add() + # acl will not allow to delete title engineer + conn = UserAccount(topo.standalone, USER_WITH_ACI_DELADD).bind(PW_DM) + with pytest.raises(ldap.INSUFFICIENT_ACCESS): + _ModTitleArchitectJeffVedder(topo, "engineer", conn).delete() + + +def test_allow_modify_replace(topo, _add_user, aci_of_user): + """ + Testing the targattrfilters keyword that allows access control based on the value of the + attributes being added (or deleted)) + Test that we can MODIFY:replace an attribute if we have correct add/delete rights. + :id: 9148a234-7aa9-11e8-a1f1-8c16451d917b + :setup: server + :steps: + 1. Add test entry + 2. Add ACI + 3. User should follow ACI role + :expectedresults: + 1. Entry should be added + 2. Operation should succeed + 3. Operation should succeed + """ + ACI_BODY = '(targattrfilters = "add=title:(title=engineer), del=title:(|(title=architect)' \ + '(title=idiot))")(version 3.0; acl "$tet_thistest"; ' \ + 'allow (write) (userdn = "ldap:///{}") ;)'.format(USER_WITH_ACI_DELADD) + Domain(topo.standalone, DEFAULT_SUFFIX).add("aci", ACI_BODY) + _AddTitleWithRoot(topo, "architect").add() + _AddTitleWithRoot(topo, "idiot").add() + _AddTitleWithRoot(topo, "engineer").add() + # acl will not allow to delete title engineer + conn = UserAccount(topo.standalone, USER_WITH_ACI_DELADD).bind(PW_DM) + with pytest.raises(ldap.INSUFFICIENT_ACCESS): + _ModTitleArchitectJeffVedder(topo, "engineer", conn).delete() + + +def test_allow_modify_delete(topo, _add_user, aci_of_user): + """ + Testing the targattrfilters keyword that allows access control based on the value of the + attributes being added (or deleted)) + "Valueacl Test $tet_thistest Don't Allow modify:replace because of lack of delete rights" + :id: 962842d2-7aa9-11e8-b39e-8c16451d917b + :setup: server + :steps: + 1. Add test entry + 2. Add ACI + 3. User should follow ACI role + :expectedresults: + 1. Entry should be added + 2. Operation should succeed + 3. Operation should succeed + """ + ACI_BODY = '(targattrfilters = "add=title:(title=engineer), del=title:(|(title=architect))")' \ + '(version 3.0; acl "$tet_thistest"; allow (write) ' \ + '(userdn = "ldap:///{}") ;)'.format(USER_WITH_ACI_DELADD) + Domain(topo.standalone, DEFAULT_SUFFIX).add("aci", ACI_BODY) + _AddTitleWithRoot(topo, "architect").add() + _AddTitleWithRoot(topo, "idiot").add() + conn = UserAccount(topo.standalone, USER_WITH_ACI_DELADD).bind(PW_DM) + _ModTitleArchitectJeffVedder(topo, "architect", conn).delete() + # acl will not allow to delete title idiot + conn = UserAccount(topo.standalone, USER_WITH_ACI_DELADD).bind(PW_DM) + with pytest.raises(ldap.INSUFFICIENT_ACCESS): + _ModTitleArchitectJeffVedder(topo, "idiot", conn).delete() + + +def test_replace_an_attribute_if_we_lack(topo, _add_user, aci_of_user): + """ + Testing the targattrfilters keyword that allows access control based on the value of the + attributes being added (or deleted)) + Test that we cannot MODIFY:replace an attribute if we lack + :id: 9b1e6afa-7aa9-11e8-ac5b-8c16451d917b + :setup: server + :steps: + 1. Add test entry + 2. Add ACI + 3. User should follow ACI role + :expectedresults: + 1. Entry should be added + 2. Operation should succeed + 3. Operation should succeed + """ + ACI_BODY = '(targattrfilters = "add=title:(title=engineer), del=title:(|(title=architect))")' \ + '(version 3.0; acl "$tet_thistest"; allow (write) ' \ + '(userdn = "ldap:///{}") ;)'.format(USER_WITH_ACI_DELADD) + Domain(topo.standalone, DEFAULT_SUFFIX).add("aci", ACI_BODY) + _AddTitleWithRoot(topo, "architect").add() + _AddTitleWithRoot(topo, "idiot").add() + conn = UserAccount(topo.standalone, USER_WITH_ACI_DELADD).bind(PW_DM) + _ModTitleArchitectJeffVedder(topo, "architect", conn).delete() + # acl will not allow to delete title idiot + with pytest.raises(ldap.INSUFFICIENT_ACCESS): + _ModTitleArchitectJeffVedder(topo, "idiot", conn).delete() + + +def test_remove_an_attribute_if_we_have_del_rights_to_all_attr_value( + topo, _add_user, aci_of_user +): + """ + Testing the targattrfilters keyword that allows access control based on the value of the + attributes being added (or deleted)) + Test that we can use MODIFY:delete to entirely remove an attribute if we have del rights + to all attr values negative case tested next. + :id: a0c9e0c4-7aa9-11e8-8880-8c16451d917b + :setup: server + :steps: + 1. Add test entry + 2. Add ACI + 3. User should follow ACI role + :expectedresults: + 1. Entry should be added + 2. Operation should succeed + 3. Operation should succeed + """ + ACI_BODY = '(targattrfilters = "add=title:(title=engineer), del=title:(|(title=architect)' \ + '(title=idiot))")(version 3.0; acl "$tet_thistest"; allow (write)' \ + ' (userdn = "ldap:///{}") ;)'.format(USER_WITH_ACI_DELADD) + Domain(topo.standalone, DEFAULT_SUFFIX).add("aci", ACI_BODY) + _AddTitleWithRoot(topo, "architect").add() + _AddTitleWithRoot(topo, "idiot").add() + # acl will allow to delete title idiot + conn = UserAccount(topo.standalone, USER_WITH_ACI_DELADD).bind(PW_DM) + _DelTitleArchitectJeffVedder(topo,conn).delete() + + +def test_remove_an_attribute_if_we_donot_have_del_rights_to_all_attr_value( + topo, _add_user, aci_of_user +): + """ + Testing the targattrfilters keyword that allows access control based on the value of the + attributes being added (or deleted)) + Test that we can use MODIFY:delete to entirely remove an attribute if we have not del + rights to all attr values + :id: a6862eaa-7aa9-11e8-8bf9-8c16451d917b + :setup: server + :steps: + 1. Add test entry + 2. Add ACI + 3. User should follow ACI role + :expectedresults: + 1. Entry should be added + 2. Operation should succeed + 3. Operation should succeed + """ + ACI_BODY = '(targattrfilters = "add=title:(title=engineer), del=title:(|(title=architect)' \ + '(title=idiot))")(version 3.0; acl "$tet_thistest"; allow (write) ' \ + '(userdn = "ldap:///{}") ;)'.format(USER_WITH_ACI_DELADD) + Domain(topo.standalone, DEFAULT_SUFFIX).add("aci", ACI_BODY) + _AddTitleWithRoot(topo, "architect").add() + _AddTitleWithRoot(topo, "sailor").add() + # aci will not allow to delete all titles + conn = UserAccount(topo.standalone, USER_WITH_ACI_DELADD).bind(PW_DM) + with pytest.raises(ldap.INSUFFICIENT_ACCESS): + _DelTitleArchitectJeffVedder(topo, conn).delete() + + +def test_remove_an_attribute_if_we_have_del_rights_to_all_attr_values( + topo, _add_user, aci_of_user +): + """ + Testing the targattrfilters keyword that allows access control based on the value of the + attributes being added (or deleted)) + Test that we can use MODIFY:replace to entirely remove an attribute if we have del rights to all attr values + :id: ab04c7e8-7aa9-11e8-84db-8c16451d917b + :setup: server + :steps: + 1. Add test entry + 2. Add ACI + 3. User should follow ACI role + :expectedresults: + 1. Entry should be added + 2. Operation should succeed + 3. Operation should succeed + """ + ACI_BODY = '(targattrfilters = "add=title:(title=engineer), del=title:(|(title=architect)' \ + '(title=idiot))")(version 3.0; acl "$tet_thistest"; allow (write) ' \ + '(userdn = "ldap:///{}") ;)'.format(USER_WITH_ACI_DELADD) + Domain(topo.standalone, DEFAULT_SUFFIX).add("aci", ACI_BODY) + _AddTitleWithRoot(topo, "architect").add() + _AddTitleWithRoot(topo, "idiot").add() + # aci allowing to delete an_attribute_if_we_have_del_rights_to_all_attr_values + conn = UserAccount(topo.standalone, USER_WITH_ACI_DELADD).bind(PW_DM) + _DelTitleArchitectJeffVedder(topo, conn).delete() + + +def test_cantnot_delete_an_entry_with_attribute_values_we_are_not_allowed_delete( + topo, _add_user, aci_of_user +): + """ + Testing the targattrfilters keyword that allows access control based on the value of + the attributes being added (or deleted)) + Test we cannot DELETE an entry with attribute values we are not allowed delete, + :id: b525d94c-7aa9-11e8-8539-8c16451d917b + :setup: server + :steps: + 1. Add test entry + 2. Add ACI + 3. User should follow ACI role + :expectedresults: + 1. Entry should be added + 2. Operation should succeed + 3. Operation should succeed + """ + ACI_BODY = '(targattrfilters = "add=title:(|(title=engineer)(title=cool dude)(title=scum)), ' \ + 'del=title:(|(title=engineer)(title=cool dude)(title=scum))")(version 3.0; ' \ + 'aci "$tet_thistest"; allow (delete) userdn = "ldap:///{}";)'.format(USER_WITH_ACI_DELADD) + Domain(topo.standalone, DEFAULT_SUFFIX).add("aci", ACI_BODY) + _AddFREDWithRoot(topo, "engineer", "cool dude", "ANuj").create() + conn = UserAccount(topo.standalone, USER_WITH_ACI_DELADD).bind(PW_DM) + # aci will not allow to delete + with pytest.raises(ldap.INSUFFICIENT_ACCESS): + UserAccount(conn, FRED).delete() + + +def test_we_can_add_and_delete_an_entry_with_attribute_values_we_are_allowed_add_and_delete( + topo, _add_user, aci_of_user +): + """ + Testing the targattrfilters keyword that allows access control based on the value of the + attributes being added (or deleted)) + Test we can DELETE an entry with attribute values we are allowed delete + :id: ba138e54-7aa9-11e8-8037-8c16451d917b + :setup: server + :steps: + 1. Add test entry + 2. Add ACI + 3. User should follow ACI role + :expectedresults: + 1. Entry should be added + 2. Operation should succeed + 3. Operation should succeed + """ + ACI_BODY = '(targattrfilters = "add=title:(|(title=engineer)(title=cool dude)(title=scum)), ' \ + 'del=title:(|(title=engineer)(title=cool dude)(title=scum))")(version 3.0; ' \ + 'aci "$tet_thistest"; allow (delete) userdn = "ldap:///{}";)'.format(USER_WITH_ACI_DELADD) + Domain(topo.standalone, DEFAULT_SUFFIX).add("aci", ACI_BODY) + _AddFREDWithRoot(topo, "engineer", "cool dude", "scum").create() + conn = UserAccount(topo.standalone, USER_WITH_ACI_DELADD).bind(PW_DM) + # aci will allow to delete + UserAccount(conn, FRED).delete() + + +def test_allow_title(topo, _add_user, aci_of_user): + """ + Testing the targattrfilters keyword that allows access control based on the value of the + attributes being added (or deleted)) + Test that if attr appears in targetattr and in targattrfilters then targattrfilters + applies--ie. targattrfilters is a refinement of targattrfilters. + :id: beadf328-7aa9-11e8-bb08-8c16451d917b + :setup: server + :steps: + 1. Add test entry + 2. Add ACI + 3. User should follow ACI role + :expectedresults: + 1. Entry should be added + 2. Operation should succeed + 3. Operation should succeed + """ + ACI_BODY = '(targetattr="title")(targattrfilters = "add=title:(|(title=engineer)' \ + '(title=cool dude)(title=scum)), del=title:(|(title=engineer)(title=cool dude)' \ + '(title=scum))")(version 3.0; aci "$tet_thistest"; allow (write) ' \ + 'userdn = "ldap:///{}";)'.format(USER_WITH_ACI_DELADD) + Domain(topo.standalone, DEFAULT_SUFFIX).add("aci", ACI_BODY) + _AddTitleWithRoot(topo, "engineer").add() + _AddTitleWithRoot(topo, "cool dude").add() + # # aci will not allow to add title topdog + conn = UserAccount(topo.standalone, USER_WITH_ACI_DELADD).bind(PW_DM) + with pytest.raises(ldap.INSUFFICIENT_ACCESS): + _ModTitleArchitectJeffVedder(topo, "topdog", conn).add() + + +def test_allow_to_modify(topo, _add_user, aci_of_user): + """ + Testing the targattrfilters keyword that allows access control based on the value of the + attributes being added (or deleted)) + Test that I can have secretary in targetattr and title in targattrfilters. + :id: c32e4704-7aa9-11e8-951d-8c16451d917b + :setup: server + :steps: + 1. Add test entry + 2. Add ACI + 3. User should follow ACI role + :expectedresults: + 1. Entry should be added + 2. Operation should succeed + 3. Operation should succeed + """ + ACI_BODY = '(targetattr="secretary")(targattrfilters = "add=title:(|(title=engineer)' \ + '(title=cool dude)(title=scum)), del=title:(|(title=engineer)(title=cool dude)' \ + '(title=scum))")(version 3.0; aci "$tet_thistest"; allow (write)' \ + ' userdn = "ldap:///{}";)'.format(USER_WITH_ACI_DELADD) + Domain(topo.standalone, DEFAULT_SUFFIX).add("aci", ACI_BODY) + _AddTitleWithRoot(topo, "engineer").add() + _AddTitleWithRoot(topo, "cool dude").add() + conn = UserAccount(topo.standalone, USER_WITH_ACI_DELADD).bind(PW_DM) + user = UserAccount(conn, USER_DELADD) + # aci will allow to add 'secretary', "cn=emporte quoi + user.add('secretary', "cn=emporte quoi, {}".format(DEFAULT_SUFFIX)) + assert user.get_attr_val('secretary') + + +def test_selfwrite_does_not_confer_write_on_a_targattrfilters_atribute(topo, _add_user, aci_of_user): + """ + Testing the targattrfilters keyword that allows access control based on the value of + the attributes being added (or deleted)) + Selfwrite does not confer "write" on a targattrfilters atribute. + :id: c7b9ec2e-7aa9-11e8-ba4a-8c16451d917b + :setup: server + :steps: + 1. Add test entry + 2. Add ACI + 3. User should follow ACI role + :expectedresults: + 1. Entry should be added + 2. Operation should succeed + 3. Operation should succeed + """ + ACI_BODY = '(targattrfilters = "add=title:(|(title=engineer)(title=cool dude)(title=scum)), ' \ + 'del=title:(|(title=engineer)(title=cool dude)(title=scum))")(version 3.0; ' \ + 'aci "$tet_thistest"; allow (selfwrite) userdn = "ldap:///{}";)'.format(USER_WITH_ACI_DELADD) + Domain(topo.standalone, DEFAULT_SUFFIX).add("aci", ACI_BODY) + # aci will not allow to add selfwrite_does_not_confer_write_on_a_targattrfilters_atribute + conn = UserAccount(topo.standalone, USER_WITH_ACI_DELADD).bind(PW_DM) + with pytest.raises(ldap.INSUFFICIENT_ACCESS): + _ModTitleArchitectJeffVedder(topo, "engineer", conn).add() + + +def test_selfwrite_continues_to_give_rights_to_attr_in_targetattr_list( + topo, _add_user, aci_of_user +): + """ + Testing the targattrfilters keyword that allows access control based on the value of + the attributes being added (or deleted)) + Selfwrite continues to give rights to attr in targetattr list. + :id: cd287680-7aa9-11e8-a8e2-8c16451d917b + :setup: server + :steps: + 1. Add test entry + 2. Add ACI + 3. User should follow ACI role + :expectedresults: + 1. Entry should be added + 2. Operation should succeed + 3. Operation should succeed + """ + ACI_BODY = '(targetattr="secretary")(targattrfilters = "add=title:(|(title=engineer)' \ + '(title=cool dude)(title=scum)), del=title:(|(title=engineer)(title=cool dude)' \ + '(title=scum))")(version 3.0; aci "$tet_thistest"; allow (selfwrite) ' \ + 'userdn = "ldap:///{}";)'.format(USER_WITH_ACI_DELADD) + Domain(topo.standalone, DEFAULT_SUFFIX).add("aci", ACI_BODY) + # selfwrite_continues_to_give_rights_to_attr_in_targetattr_list + conn = UserAccount(topo.standalone, USER_WITH_ACI_DELADD).bind(PW_DM) + with pytest.raises(ldap.INSUFFICIENT_ACCESS): + _ModTitleArchitectJeffVedder(topo, "engineer", conn).add() + + +def test_add_an_attribute_value_we_are_allowed_to_add_with_ldapanyone( + topo, _add_user, aci_of_user +): + """ + Testing the targattrfilters keyword that allows access control based on the value of the + attributes being added (or deleted)) + Test that we can MODIFY:add an attribute value we are allowed to add with ldap:///anyone + :id: d1e1d7ac-7aa9-11e8-b968-8c16451d917b + :setup: server + :steps: + 1. Add test entry + 2. Add ACI + 3. User should follow ACI role + :expectedresults: + 1. Entry should be added + 2. Operation should succeed + 3. Operation should succeed + """ + ACI_BODY = '(targattrfilters = "add=title:(title=architect), del=title:(title=architect)")' \ + '(version 3.0; acl "$tet_thistest"; allow (write) userdn = "ldap:///anyone";)' + Domain(topo.standalone, DEFAULT_SUFFIX).add("aci", ACI_BODY) + _AddTitleWithRoot(topo, "engineer").add() + # aci will allow to add title architect + conn = UserAccount(topo.standalone, USER_WITH_ACI_DELADD).bind(PW_DM) + _ModTitleArchitectJeffVedder(topo, "architect", conn).add() + + +def test_hierarchy(topo, _add_user, aci_of_user): + """ + Testing the targattrfilters keyword that allows access control based on the value of + the attributes being added (or deleted)) + Test that with two targattrfilters in the hierarchy that the general one applies. + This is the correct behaviour, even if it's a bit + :id: d7ae354a-7aa9-11e8-8b0d-8c16451d917b + :setup: server + :steps: + 1. Add test entry + 2. Add ACI + 3. User should follow ACI role + :expectedresults: + 1. Entry should be added + 2. Operation should succeed + 3. Operation should succeed + """ + ACI_BODY = '(targattrfilters = "add=title:(title=arch*)")(version 3.0; acl "$tet_thistest"; ' \ + 'allow (write) (userdn = "ldap:///anyone") ;)' + ACI_BODY1 = '(targattrfilters = "add=title:(title=architect)")(version 3.0; ' \ + 'acl "$tet_thistest"; allow (write) (userdn = "ldap:///anyone") ;)' + Domain(topo.standalone, DEFAULT_SUFFIX).add("aci", ACI_BODY) + Domain(topo.standalone, DEFAULT_SUFFIX).add("aci", ACI_BODY1) + _AddTitleWithRoot(topo, "engineer").add() + # aci will allow to add title architect + conn = UserAccount(topo.standalone, USER_WITH_ACI_DELADD).bind(PW_DM) + _ModTitleArchitectJeffVedder(topo, "architect", conn).add() + # aci will not allow to add title architect + with pytest.raises(ldap.INSUFFICIENT_ACCESS): + _ModTitleArchitectJeffVedder(topo, "engineer", conn).add() + + +def test_targattrfilters_and_search_permissions_and_that_ldapmodify_works_as_expected( + topo, _add_user, aci_of_user +): + """ + Testing the targattrfilters keyword that allows access control based on the value of the + attributes being added (or deleted)) + Test that we can have targattrfilters and search permissions and that ldapmodify works as expected. + :id: ddae7a22-7aa9-11e8-ad6b-8c16451d917b + :setup: server + :steps: + 1. Add test entry + 2. Add ACI + 3. User should follow ACI role + :expectedresults: + 1. Entry should be added + 2. Operation should succeed + 3. Operation should succeed + """ + ACI_BODY = '(targetattr="secretary || objectclass || mail")(targattrfilters = "add=title:' \ + '(title=arch*)")(version 3.0; acl "$tet_thistest"; ' \ + 'allow (write,read,search,compare) (userdn = "ldap:///anyone") ;)' + Domain(topo.standalone, DEFAULT_SUFFIX).add("aci", ACI_BODY) + # aci will allow to add title architect + conn = UserAccount(topo.standalone, USER_WITH_ACI_DELADD).bind(PW_DM) + _ModTitleArchitectJeffVedder(topo, "architect", conn).add() + + +def test_targattrfilters_and_search_permissions_and_that_ldapmodify_works_as_expected_two( + topo, _add_user, aci_of_user +): + """ + Testing the targattrfilters keyword that allows access control based on the value of + the attributes being added (or deleted)) + Test that we can have targattrfilters and search permissions and that ldapsearch works as expected. + :id: e25d116e-7aa9-11e8-81d8-8c16451d917b + :setup: server + :steps: + 1. Add test entry + 2. Add ACI + 3. User should follow ACI role + :expectedresults: + 1. Entry should be added + 2. Operation should succeed + 3. Operation should succeed + """ + ACI_BODY = '(targetattr="secretary || objectclass || mail")(targattrfilters = ' \ + '"add=title:(title=arch*)")(version 3.0; acl "$tet_thistest"; allow ' \ + '(write,read,search,compare) (userdn = "ldap:///anyone") ;)' + Domain(topo.standalone, DEFAULT_SUFFIX).add("aci", ACI_BODY) + conn = Anonymous(topo.standalone).bind() + user = UserAccount(conn, USER_DELADD) + #targattrfilters_and_search_permissions_and_that_ldapmodify_works_as_expected + assert user.get_attr_vals('secretary') + assert user.get_attr_vals('mail') + assert user.get_attr_vals('objectclass') + + +if __name__ == "__main__": + CURRENT_FILE = os.path.realpath(__file__) + pytest.main("-s -v %s" % CURRENT_FILE) diff --git a/dirsrvtests/tests/suites/attr_encryption/__init__.py b/dirsrvtests/tests/suites/attr_encryption/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/dirsrvtests/tests/suites/attr_encryption/attr_encryption_test.py b/dirsrvtests/tests/suites/attr_encryption/attr_encryption_test.py new file mode 100644 index 0000000..694bab8 --- /dev/null +++ b/dirsrvtests/tests/suites/attr_encryption/attr_encryption_test.py @@ -0,0 +1,453 @@ +# --- BEGIN COPYRIGHT BLOCK --- +# Copyright (C) 2016 Red Hat, Inc. +# All rights reserved. +# +# License: GPL (version 3 or any later version). +# See LICENSE for details. +# --- END COPYRIGHT BLOCK --- +# + +import logging +import pytest +from lib389.tasks import * +from lib389.topologies import topology_st as topo +from lib389.utils import * +from lib389._constants import DEFAULT_SUFFIX +from lib389.idm.user import UserAccounts, TEST_USER_PROPERTIES +from lib389.backend import Backends +from lib389.idm.domain import Domain +from lib389.encrypted_attributes import EncryptedAttrs + +pytestmark = pytest.mark.tier1 + +USER_DN = 'uid=test_user,%s' % DEFAULT_SUFFIX + +logging.getLogger(__name__).setLevel(logging.INFO) +log = logging.getLogger(__name__) + + +@pytest.fixture(scope="module") +def enable_user_attr_encryption(topo, request): + """ Enables attribute encryption for various attributes + Adds a test user with encrypted attributes + """ + + log.info("Enable TLS for attribute encryption") + topo.standalone.enable_tls() + + log.info("Enables attribute encryption") + backends = Backends(topo.standalone) + backend = backends.list()[0] + encrypt_attrs = EncryptedAttrs(topo.standalone, basedn='cn=encrypted attributes,{}'.format(backend.dn)) + log.info("Enables attribute encryption for employeeNumber and telephoneNumber") + emp_num_encrypt = encrypt_attrs.create(properties={'cn': 'employeeNumber', 'nsEncryptionAlgorithm': 'AES'}) + telephone_encrypt = encrypt_attrs.create(properties={'cn': 'telephoneNumber', 'nsEncryptionAlgorithm': '3DES'}) + + log.info("Add a test user with encrypted attributes") + users = UserAccounts(topo.standalone, DEFAULT_SUFFIX) + test_user = users.create(properties=TEST_USER_PROPERTIES) + test_user.replace('employeeNumber', '1000') + test_user.replace('telephonenumber', '1234567890') + + def fin(): + log.info("Remove attribute encryption for various attributes") + emp_num_encrypt.delete() + telephone_encrypt.delete() + + request.addfinalizer(fin) + return test_user + + +def test_basic(topo, enable_user_attr_encryption): + """Tests encrypted attributes with a test user entry + :id: d767d5c8-b934-4b14-9774-bd13480d81b3 + :setup: Standalone instance + Enable AES encryption config on employeenumber + Enable 3DES encryption config on telephoneNumber + Add a test user with with encrypted attributes + :steps: + 1. Restart the server + 2. Check employeenumber encryption enabled + 3. Check telephoneNumber encryption enabled + 4. Check that encrypted attribute is present for user i.e. telephonenumber + :expectedresults: + 1. This should be successful + 2. This should be successful + 3. This should be successful + 4. This should be successful + """ + + log.info("Restart the server") + topo.standalone.restart() + backends = Backends(topo.standalone) + backend = backends.list()[0] + encrypt_attrs = backend.get_encrypted_attrs() + + log.info("Extracting values of cn from the list of objects in encrypt_attrs") + log.info("And appending the cn values in a list") + enc_attrs_cns = [] + for enc_attr in encrypt_attrs: + enc_attrs_cns.append(enc_attr.rdn) + + log.info("Check employeenumber encryption is enabled") + assert "employeeNumber" in enc_attrs_cns + + log.info("Check telephoneNumber encryption is enabled") + assert "telephoneNumber" in enc_attrs_cns + + log.info("Check that encrypted attribute is present for user i.e. telephonenumber") + assert enable_user_attr_encryption.present('telephoneNumber') + + +def test_export_import_ciphertext(topo, enable_user_attr_encryption): + """Configure attribute encryption, store some data, check that we can export the ciphertext + :id: b433e215-2926-48a5-818f-c21abc40fc2d + :setup: Standalone instance + Enable AES encryption config on employeenumber + Enable 3DES encryption config on telephoneNumber + Add a test user with encrypted attributes + :steps: + 1. Export data as ciphertext + 2. Check that the attribute is present in the exported file + 3. Check that the encrypted value of attribute is not present in the exported file + 4. Delete the test user entry with encrypted data + 5. Import the previously exported data as ciphertext + 6. Check attribute telephoneNumber should be imported + :expectedresults: + 1. This should be successful + 2. This should be successful + 3. This should be successful + 4. This should be successful + 5. This should be successful + 6. This should be successful + """ + + log.info("Export data as ciphertext") + export_ldif = os.path.join(topo.standalone.ds_paths.ldif_dir, "export_ciphertext.ldif") + + # Offline export + topo.standalone.stop() + if not topo.standalone.db2ldif(bename=DEFAULT_BENAME, suffixes=(DEFAULT_SUFFIX,), + excludeSuffixes=None, encrypt=False, repl_data=None, outputfile=export_ldif): + log.fatal('Failed to run offline db2ldif') + assert False + topo.standalone.start() + + log.info("Check that the attribute is present in the exported file") + log.info("Check that the encrypted value of attribute is not present in the exported file") + with open(export_ldif, 'r') as ldif_file: + ldif = ldif_file.read() + assert 'telephonenumber' in ldif + assert 'telephonenumber: 1234567890' not in ldif + + log.info("Delete the test user entry with encrypted data") + enable_user_attr_encryption.delete() + + log.info("Import data as ciphertext, which was exported previously") + import_ldif = os.path.join(topo.standalone.ds_paths.ldif_dir, "export_ciphertext.ldif") + + # Offline export + topo.standalone.stop() + if not topo.standalone.ldif2db(bename=DEFAULT_BENAME, suffixes=(DEFAULT_SUFFIX,), + excludeSuffixes=None, encrypt=False, import_file=import_ldif): + log.fatal('Failed to run offline ldif2db') + assert False + topo.standalone.start() + + log.info("Check that the data with encrypted attribute is imported properly") + users = UserAccounts(topo.standalone, DEFAULT_SUFFIX) + user = users.get('testuser') + assert user.present("telephoneNumber") + + +def test_export_import_plaintext(topo, enable_user_attr_encryption): + """Configure attribute encryption, store some data, check that we can export the plain text + :id: b171e215-0456-48a5-245f-c21abc40fc2d + :setup: Standalone instance + Enable AES encryption config on employeenumber + Enable 3DES encryption config on telephoneNumber + Add a test user with encrypted attributes + :steps: + 1. Export data as plain text + 2. Check that the attribute is present in the exported file + 3. Check that the encrypted value of attribute is also present in the exported file + 4. Delete the test user entry with encrypted data + 5. Import data as plaintext + 6. Check attribute value of telephoneNumber + :expectedresults: + 1. This should be successful + 2. This should be successful + 3. This should be successful + 4. This should be successful + 5. This should be successful + 6. This should be successful + """ + + log.info("Export data as plain text") + export_ldif = os.path.join(topo.standalone.ds_paths.ldif_dir, "export_plaintext.ldif") + + # Offline export + topo.standalone.stop() + if not topo.standalone.db2ldif(bename=DEFAULT_BENAME, suffixes=(DEFAULT_SUFFIX,), + excludeSuffixes=None, encrypt=True, repl_data=None, outputfile=export_ldif): + log.fatal('Failed to run offline db2ldif') + assert False + topo.standalone.start() + + log.info("Check that the attribute is present in the exported file") + log.info("Check that the plain text value of the encrypted attribute is present in the exported file") + with open(export_ldif, 'r') as ldif_file: + assert 'telephoneNumber: 1234567890' in ldif_file.read() + + log.info("Delete the test user entry with encrypted data") + enable_user_attr_encryption.delete() + + log.info("Import data as plain text, which was exported previously") + import_ldif = os.path.join(topo.standalone.ds_paths.ldif_dir, "export_plaintext.ldif") + + # Offline export + topo.standalone.stop() + if not topo.standalone.ldif2db(bename=DEFAULT_BENAME, suffixes=(DEFAULT_SUFFIX,), + excludeSuffixes=None, encrypt=True, import_file=import_ldif): + log.fatal('Failed to run offline ldif2db') + assert False + topo.standalone.start() + + log.info("Check that the attribute is imported properly") + users = UserAccounts(topo.standalone, DEFAULT_SUFFIX) + user = users.get('testuser') + assert user.present("telephoneNumber") + + +def test_attr_encryption_unindexed(topo, enable_user_attr_encryption): + """Configure attribute encryption for an un-indexed attribute, check that we can export encrypted data + :id: d3ef38e1-bb5a-44d8-a3a4-4a25a57e3454 + :setup: Standalone instance + Enable AES encryption config on employeenumber + Enable 3DES encryption config on telephoneNumber + Add a test user with encrypted attributes + :steps: + 1. Export data as cipher text + 2. Check that the unindexed attribute employeenumber is present in exported ldif file + 3. Check that the unindexed attribute employeenumber value is not present in exported ldif file + :expectedresults: + 1. This should be successful + 2. This should be successful + 3. This should be successful + """ + log.info("Export data as cipher text") + export_ldif = os.path.join(topo.standalone.ds_paths.ldif_dir, "emp_num_ciphertext.ldif") + + # Offline export + topo.standalone.stop() + if not topo.standalone.db2ldif(bename=DEFAULT_BENAME, suffixes=(DEFAULT_SUFFIX,), + excludeSuffixes=None, encrypt=False, repl_data=None, outputfile=export_ldif): + log.fatal('Failed to run offline db2ldif') + assert False + topo.standalone.start() + + log.info("Check that the attribute is present in the exported file") + log.info("Check that the encrypted value of attribute is not present in the exported file") + with open(export_ldif, 'r') as ldif_file: + ldif = ldif_file.read() + assert 'employeeNumber' in ldif + assert 'employeeNumber: 1000' not in ldif + + +def test_attr_encryption_multiple_backends(topo, enable_user_attr_encryption): + """Tests Configuration of attribute encryption for multiple backends + Where both the backends have attribute encryption + :id: 9ece3e6c-96b7-4dd5-b092-d76dda23472d + :setup: Standalone instance + SSL Enabled + :steps: + 1. Add two test backends + 2. Configure attribute encryption for telephonenumber in one test backend + 3. Configure attribute encryption for employeenumber in another test backend + 4. Add a test user in both backends with encrypted attributes + 5. Export data as ciphertext from both backends + 6. Check that telephoneNumber is encrypted in the ldif file of db1 + 7. Check that employeeNumber is encrypted in the ldif file of db2 + 8. Delete both test backends + :expectedresults: + 1. This should be successful + 2. This should be successful + 3. This should be successful + 4. This should be successful + 5. This should be successful + 6. This should be successful + 7. This should be successful + 8. This should be successful + """ + log.info("Add two test backends") + test_suffix1 = 'dc=test1,dc=com' + test_db1 = 'test_db1' + test_suffix2 = 'dc=test2,dc=com' + test_db2 = 'test_db2' + + # Create backends + backends = Backends(topo.standalone) + backend = backends.list()[0] + test_backend1 = backends.create(properties={'cn': test_db1, + 'nsslapd-suffix': test_suffix1}) + test_backend2 = backends.create(properties={'cn': test_db2, + 'nsslapd-suffix': test_suffix2}) + + # Create the top of the tree + suffix1 = Domain(topo.standalone, test_suffix1) + test1 = suffix1.create(properties={'dc': 'test1'}) + suffix2 = Domain(topo.standalone, test_suffix2) + test2 = suffix2.create(properties={'dc': 'test2'}) + + log.info("Enables attribute encryption for telephoneNumber in test_backend1") + backend1_encrypt_attrs = EncryptedAttrs(topo.standalone, basedn='cn=encrypted attributes,{}'.format(test_backend1.dn)) + b1_encrypt = backend1_encrypt_attrs.create(properties={'cn': 'telephoneNumber', + 'nsEncryptionAlgorithm': 'AES'}) + + log.info("Enables attribute encryption for employeeNumber in test_backend2") + backend2_encrypt_attrs = EncryptedAttrs(topo.standalone, basedn='cn=encrypted attributes,{}'.format(test_backend2.dn)) + b2_encrypt = backend2_encrypt_attrs.create(properties={'cn': 'employeeNumber', + 'nsEncryptionAlgorithm': 'AES'}) + + log.info("Add a test user with encrypted attributes in both backends") + users = UserAccounts(topo.standalone, test1.dn, None) + test_user = users.create(properties=TEST_USER_PROPERTIES) + test_user.replace('telephoneNumber', '1234567890') + + users = UserAccounts(topo.standalone, test2.dn, None) + test_user = users.create(properties=TEST_USER_PROPERTIES) + test_user.replace('employeeNumber', '1000') + + log.info("Export data as ciphertext from both backends") + export_db1 = os.path.join(topo.standalone.ds_paths.ldif_dir, "export_db1.ldif") + export_db2 = os.path.join(topo.standalone.ds_paths.ldif_dir, "export_db2.ldif") + + # Offline export + topo.standalone.stop() + if not topo.standalone.db2ldif(bename=test_db1, suffixes=(test_suffix1,), + excludeSuffixes=None, encrypt=False, repl_data=None, outputfile=export_db1): + log.fatal('Failed to run offline db2ldif') + assert False + + if not topo.standalone.db2ldif(bename=test_db2, suffixes=(test_suffix2,), + excludeSuffixes=None, encrypt=False, repl_data=None, outputfile=export_db2): + log.fatal('Failed to run offline db2ldif') + assert False + topo.standalone.start() + + log.info("Check that the attribute is present in the exported file in db1") + log.info("Check that the encrypted value of attribute is not present in the exported file in db1") + with open(export_db1, 'r') as ldif_file: + ldif = ldif_file.read() + assert 'telephoneNumber' in ldif + assert 'telephoneNumber: 1234567890' not in ldif + + log.info("Check that the attribute is present in the exported file in db2") + log.info("Check that the encrypted value of attribute is not present in the exported file in db2") + with open(export_db2, 'r') as ldif_file: + ldif = ldif_file.read() + assert 'employeeNumber' in ldif + assert 'employeeNumber: 1000' not in ldif + + log.info("Delete test backends") + test_backend1.delete() + test_backend2.delete() + + +def test_attr_encryption_backends(topo, enable_user_attr_encryption): + """Tests Configuration of attribute encryption for single backend + where more backends are present + :id: f3ef40e1-17d6-44d8-a3a4-4a25a57e9064 + :setup: Standalone instance + SSL Enabled + :steps: + 1. Add two test backends + 2. Configure attribute encryption for telephoneNumber in one test backend + 3. Add a test user in both backends with telephoneNumber + 4. Export ldif from both test backends + 5. Check that telephonenumber is encrypted in the ldif file of db1 + 6. Check that telephonenumber is not encrypted in the ldif file of db2 + 7. Delete both test backends + :expectedresults: + 1. This should be successful + 2. This should be successful + 3. This should be successful + 4. This should be successful + 5. This should be successful + 6. This should be successful + 7. This should be successful + """ + log.info("Add two test backends") + test_suffix1 = 'dc=test1,dc=com' + test_db1 = 'test_db1' + test_suffix2 = 'dc=test2,dc=com' + test_db2 = 'test_db2' + + # Create backends + backends = Backends(topo.standalone) + test_backend1 = backends.create(properties={'cn': test_db1, + 'nsslapd-suffix': test_suffix1}) + test_backend2 = backends.create(properties={'cn': test_db2, + 'nsslapd-suffix': test_suffix2}) + + # Create the top of the tree + suffix1 = Domain(topo.standalone, test_suffix1) + test1 = suffix1.create(properties={'dc': 'test1'}) + suffix2 = Domain(topo.standalone, test_suffix2) + test2 = suffix2.create(properties={'dc': 'test2'}) + + log.info("Enables attribute encryption for telephoneNumber in test_backend1") + backend1_encrypt_attrs = EncryptedAttrs(topo.standalone, basedn='cn=encrypted attributes,{}'.format(test_backend1.dn)) + b1_encrypt = backend1_encrypt_attrs.create(properties={'cn': 'telephoneNumber', + 'nsEncryptionAlgorithm': 'AES'}) + + log.info("Add a test user with telephoneNumber in both backends") + users = UserAccounts(topo.standalone, test1.dn, None) + test_user = users.create(properties=TEST_USER_PROPERTIES) + test_user.replace('telephoneNumber', '1234567890') + + users = UserAccounts(topo.standalone, test2.dn, None) + test_user = users.create(properties=TEST_USER_PROPERTIES) + test_user.replace('telephoneNumber', '1234567890') + + log.info("Export data as ciphertext from both backends") + export_db1 = os.path.join(topo.standalone.ds_paths.ldif_dir, "export_db1.ldif") + export_db2 = os.path.join(topo.standalone.ds_paths.ldif_dir, "export_db2.ldif") + + # Offline export + topo.standalone.stop() + if not topo.standalone.db2ldif(bename=test_db1, suffixes=(test_suffix1,), + excludeSuffixes=None, encrypt=False, repl_data=None, outputfile=export_db1): + log.fatal('Failed to run offline db2ldif') + assert False + + if not topo.standalone.db2ldif(bename=test_db2, suffixes=(test_suffix2,), + excludeSuffixes=None, encrypt=False, repl_data=None, outputfile=export_db2): + log.fatal('Failed to run offline db2ldif') + assert False + topo.standalone.start() + + log.info("Check that the attribute is present in the exported file in db1") + log.info("Check that the encrypted value of attribute is not present in the exported file in db1") + with open(export_db1, 'r') as ldif_file: + ldif = ldif_file.read() + assert 'telephoneNumber' in ldif + assert 'telephoneNumber: 1234567890' not in ldif + + log.info("Check that the attribute is present in the exported file in db2") + log.info("Check that the value of attribute is also present in the exported file in db2") + with open(export_db2, 'r') as ldif_file: + ldif = ldif_file.read() + assert 'telephoneNumber' in ldif + assert 'telephoneNumber: 1234567890' in ldif + + log.info("Delete test backends") + test_backend1.delete() + test_backend2.delete() + + +if __name__ == '__main__': + # Run isolated + # -s for DEBUG mode + CURRENT_FILE = os.path.realpath(__file__) + pytest.main("-s %s" % CURRENT_FILE) diff --git a/dirsrvtests/tests/suites/auth_token/__init__.py b/dirsrvtests/tests/suites/auth_token/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/dirsrvtests/tests/suites/auth_token/basic_auth_test.py b/dirsrvtests/tests/suites/auth_token/basic_auth_test.py new file mode 100644 index 0000000..2e592a4 --- /dev/null +++ b/dirsrvtests/tests/suites/auth_token/basic_auth_test.py @@ -0,0 +1,240 @@ +# --- BEGIN COPYRIGHT BLOCK --- +# Copyright (C) 2019 William Brown +# All rights reserved. +# +# License: GPL (version 3 or any later version). +# See LICENSE for details. +# --- END COPYRIGHT BLOCK --- + +import ldap +import pytest +import time +from lib389.idm.user import nsUserAccounts, UserAccounts +from lib389.topologies import topology_st as topology +from lib389.paths import Paths +from lib389.utils import ds_is_older +from lib389._constants import * +from lib389.idm.directorymanager import DirectoryManager +from lib389.idm.account import Anonymous +from lib389.extended_operations import LdapSSOTokenRequest + +default_paths = Paths() + +pytestmark = pytest.mark.tier1 + +USER_PASSWORD = "password aouoaeu" +TEST_KEY = "4PXhmtKG7iCdT9C49GoBdD92x5X1tvF3eW9bHq4ND2Q=" + +@pytest.mark.skipif(not default_paths.rust_enabled or ds_is_older('1.4.2.0'), reason="Auth tokens are not available in older versions") +def test_ldap_auth_token_config(topology): + """ Test that we are able to configure the ldapssotoken backend with various types and states. + + :id: e9b9360b-76df-40ef-9f45-b448df4c9eda + + :setup: Standalone instance + + :steps: + 1. Enable the feature + 2. Set a key manually. + 3. Regerate a key server side. + 4. Attempt to set invalid keys. + 5. Disable the feature + 6. Assert that key changes are rejected + + :expectedresults: + 1. Feature enables + 2. Key is set and accepted + 3. The key is regenerated and unique + 4. The key is rejected + 5. The disable functions online + 6. The key changes are rejected + """ + # Enable token + topology.standalone.config.set('nsslapd-enable-ldapssotoken', 'on') # enable it. + # Set a key + topology.standalone.config.set('nsslapd-ldapssotoken-secret', TEST_KEY) + # regen a key + topology.standalone.config.remove_all('nsslapd-ldapssotoken-secret') + k1 = topology.standalone.config.get_attr_val_utf8('nsslapd-ldapssotoken-secret') + assert(k1 != TEST_KEY) + # set an invalid key + with pytest.raises(ldap.UNWILLING_TO_PERFORM): + topology.standalone.config.set('nsslapd-ldapssotoken-secret', 'invalid key') + with pytest.raises(ldap.UNWILLING_TO_PERFORM): + topology.standalone.config.set('nsslapd-ldapssotoken-secret', '') + # Disable token + topology.standalone.config.set('nsslapd-enable-ldapssotoken', 'off') # disable it. + # Set a key + with pytest.raises(ldap.OPERATIONS_ERROR): + topology.standalone.config.set('nsslapd-ldapssotoken-secret', TEST_KEY) + # regen a key + with pytest.raises(ldap.OPERATIONS_ERROR): + topology.standalone.config.remove_all('nsslapd-ldapssotoken-secret') + + +@pytest.mark.skipif(not default_paths.rust_enabled or ds_is_older('1.4.2.0'), reason="Auth tokens are not available in older versions") +def test_ldap_auth_token_nsuser(topology): + """ + Test that we can generate and authenticate with authentication tokens + for users in the directory, as well as security properties around these + tokens. + + :id: 65335341-c85b-457d-ac7d-c4079ac90a60 + + :setup: Standalone instance + + :steps: + 1. Create an account + 2. Generate a token for the account + 3. Authenticate with the token + 4. Assert that a token can not be issued from a token-authed account + 5. Regenerate the server key + 6. Assert the token no longer authenticates + + :expectedresults: + 1. Account is created + 2. Token is generated + 3. Token authenticates + 4. Token is NOT issued + 5. The key is regenerated + 6. The token fails to bind. + """ + topology.standalone.enable_tls() + topology.standalone.config.set('nsslapd-enable-ldapssotoken', 'on') # enable it. + nsusers = nsUserAccounts(topology.standalone, DEFAULT_SUFFIX) + # Create a user as dm. + user = nsusers.create(properties={ + 'uid': 'test_nsuser', + 'cn': 'test_nsuser', + 'displayName': 'testNsuser', + 'legalName': 'testNsuser', + 'uidNumber': '1001', + 'gidNumber': '1001', + 'homeDirectory': '/home/testnsuser', + 'userPassword': USER_PASSWORD, + }) + # Create a new con and bind as the user. + user_conn = user.bind(USER_PASSWORD) + user_account = nsUserAccounts(user_conn, DEFAULT_SUFFIX).get('test_nsuser') + # From the user_conn do an extop_s for the token + token = user_account.request_sso_token() + # Great! Now do a bind where the token is the pw: + # user_conn_tok = user.bind(token) + user_conn_tok = user.authenticate_sso_token(token) + # Assert whoami. + # Assert that user_conn_tok with the token can NOT get a new token. + user_tok_account = nsUserAccounts(user_conn_tok, DEFAULT_SUFFIX).get('test_nsuser') + with pytest.raises(ldap.UNWILLING_TO_PERFORM): + user_tok_account.request_sso_token() + + # Check with a lowered ttl (should deny) + topology.standalone.config.set('nsslapd-ldapssotoken-ttl-secs', '1') # Set a low ttl + # Ensure it's past - the one time I'll allow a sleep .... + time.sleep(2) + with pytest.raises(ldap.INVALID_CREDENTIALS): + user.authenticate_sso_token(token) + topology.standalone.config.set('nsslapd-ldapssotoken-ttl-secs', '3600') # Set a reasonable + + # Regenerate the server token key + topology.standalone.config.remove_all('nsslapd-ldapssotoken-secret') + # check we fail to authenticate. + with pytest.raises(ldap.INVALID_CREDENTIALS): + user.authenticate_sso_token(token) + +@pytest.mark.skipif(not default_paths.rust_enabled or ds_is_older('1.4.2.0'), reason="Auth tokens are not available in older versions") +def test_ldap_auth_token_disabled(topology): + """ Assert when the feature is disabled that token operations are not able to progress + + :id: ccde5d0b-7f2d-49d5-b9d5-f7082f8f36a3 + + :setup: Standalone instance + + :steps: + 1. Create a user + 2. Attempt to get a token. + 3. Enable the feature, get a token, then disable it. + 4. Attempt to auth + + :expectedresults: + 1. Success + 2. Fails to get a token + 3. Token is received + 4. Auth fails as token is disabled. + """ + topology.standalone.enable_tls() + topology.standalone.config.set('nsslapd-enable-ldapssotoken', 'off') # disable it. + nsusers = nsUserAccounts(topology.standalone, DEFAULT_SUFFIX) + # Create a user as dm. + user = nsusers.create(properties={ + 'uid': 'test_nsuser1', + 'cn': 'test_nsuser1', + 'displayName': 'testNsuser1', + 'legalName': 'testNsuser1', + 'uidNumber': '1002', + 'gidNumber': '1002', + 'homeDirectory': '/home/testnsuser1', + 'userPassword': USER_PASSWORD, + }) + # Create a new con and bind as the user. + user_conn = user.bind(USER_PASSWORD) + user_account = nsUserAccounts(user_conn, DEFAULT_SUFFIX).get('test_nsuser1') + # From the user_conn do an extop_s for the token + with pytest.raises(ldap.PROTOCOL_ERROR): + user_account.request_sso_token() + # Now enable it + topology.standalone.config.set('nsslapd-enable-ldapssotoken', 'on') + token = user_account.request_sso_token() + # Now disable + topology.standalone.config.set('nsslapd-enable-ldapssotoken', 'off') + # Now attempt to bind (should fail) + with pytest.raises(ldap.INVALID_CREDENTIALS): + user_account.authenticate_sso_token(token) + + +@pytest.mark.skipif(not default_paths.rust_enabled or ds_is_older('1.4.2.0'), reason="Auth tokens are not available in older versions") +def test_ldap_auth_token_directory_manager(topology): + """ Test token auth with directory manager is denied + + :id: ec9aec64-3edf-4f3f-853a-7527b0c42124 + + :setup: Standalone instance + + :steps: + 1. Attempt to generate a token as DM + + :expectedresults: + 1. Fails + """ + topology.standalone.enable_tls() + topology.standalone.config.set('nsslapd-enable-ldapssotoken', 'on') # enable it. + + dm = DirectoryManager(topology.standalone) + # Try getting a token at DM, should fail. + with pytest.raises(ldap.UNWILLING_TO_PERFORM): + dm.request_sso_token() + +## test as anon (will fail) +@pytest.mark.skipif(not default_paths.rust_enabled or ds_is_older('1.4.2.0'), reason="Auth tokens are not available in older versions") +def test_ldap_auth_token_anonymous(topology): + """ Test token auth with Anonymous is denied. + + :id: 966068c3-fbc6-468d-a554-18d68d1d895b + + :setup: Standalone instance + + :steps: + 1. Attempt to generate a token as Anonymous + + :expectedresults: + 1. Fails + """ + topology.standalone.enable_tls() + topology.standalone.config.set('nsslapd-enable-ldapssotoken', 'on') # enable it. + + anon_conn = Anonymous(topology.standalone).bind() + # Build the request + req = LdapSSOTokenRequest() + # Get the response + with pytest.raises(ldap.UNWILLING_TO_PERFORM): + (_, res) = anon_conn.extop_s(req, escapehatch='i am sure') + diff --git a/dirsrvtests/tests/suites/automember_plugin/__init__.py b/dirsrvtests/tests/suites/automember_plugin/__init__.py new file mode 100644 index 0000000..fd6c4a5 --- /dev/null +++ b/dirsrvtests/tests/suites/automember_plugin/__init__.py @@ -0,0 +1,3 @@ +""" + :Requirement: 389-ds-base: Auto Member +""" diff --git a/dirsrvtests/tests/suites/automember_plugin/automember_mod_test.py b/dirsrvtests/tests/suites/automember_plugin/automember_mod_test.py new file mode 100644 index 0000000..13d96f3 --- /dev/null +++ b/dirsrvtests/tests/suites/automember_plugin/automember_mod_test.py @@ -0,0 +1,143 @@ +import logging +import pytest +import os +from lib389.utils import ds_is_older +from lib389._constants import * +from lib389.plugins import AutoMembershipPlugin, AutoMembershipDefinitions +from lib389.idm.user import UserAccounts +from lib389.idm.group import Groups +from lib389.topologies import topology_st as topo + +# Skip on older versions +pytestmark = [pytest.mark.tier1, + pytest.mark.skipif(ds_is_older('1.4.0'), reason="Not implemented")] + +DEBUGGING = os.getenv("DEBUGGING", default=False) +if DEBUGGING: + logging.getLogger(__name__).setLevel(logging.DEBUG) +else: + logging.getLogger(__name__).setLevel(logging.INFO) +log = logging.getLogger(__name__) + + +@pytest.fixture(scope="module") +def automember_fixture(topo, request): + # Create group + groups = [] + group_obj = Groups(topo.standalone, DEFAULT_SUFFIX) + groups.append(group_obj.create(properties={'cn': 'testgroup'})) + groups.append(group_obj.create(properties={'cn': 'testgroup2'})) + groups.append(group_obj.create(properties={'cn': 'testgroup3'})) + + # Create test user + user_accts = UserAccounts(topo.standalone, DEFAULT_SUFFIX) + user = user_accts.create_test_user() + + # Create automember definitions and regex rules + automember_prop = { + 'cn': 'testgroup_definition', + 'autoMemberScope': DEFAULT_SUFFIX, + 'autoMemberFilter': 'objectclass=posixaccount', + 'autoMemberDefaultGroup': groups[0].dn, + 'autoMemberGroupingAttr': 'member:dn', + } + automembers = AutoMembershipDefinitions(topo.standalone) + auto_def = automembers.create(properties=automember_prop) + auto_def.add_regex_rule("regex1", groups[1].dn, include_regex=['cn=mark.*']) + auto_def.add_regex_rule("regex2", groups[2].dn, include_regex=['cn=simon.*']) + + # Enable plugin + automemberplugin = AutoMembershipPlugin(topo.standalone) + automemberplugin.enable() + topo.standalone.restart() + + return (user, groups) + + +def test_mods(automember_fixture, topo): + """Modify the user so that it is added to the various automember groups + + :id: 28a2b070-7f16-4905-8831-c80fa6441693 + :setup: Standalone Instance + :steps: + 1. Update user that should add it to group[0] + 2. Update user that should add it to group[1] + 3. Update user that should add it to group[2] + 4. Update user that should add it to group[0] + 5. Test rebuild task correctly moves user to group[1] + :expectedresults: + 1. Success + 2. Success + 3. Success + 4. Success + 5. Success + """ + (user, groups) = automember_fixture + + # Update user which should go into group[0] + user.replace('cn', 'whatever') + groups[0].is_member(user.dn) + if groups[1].is_member(user.dn): + assert False + if groups[2].is_member(user.dn): + assert False + + # Update user0 which should go into group[1] + user.replace('cn', 'mark') + groups[1].is_member(user.dn) + if groups[0].is_member(user.dn): + assert False + if groups[2].is_member(user.dn): + assert False + + # Update user which should go into group[2] + user.replace('cn', 'simon') + groups[2].is_member(user.dn) + if groups[0].is_member(user.dn): + assert False + if groups[1].is_member(user.dn): + assert False + + # Update user which should go back into group[0] (full circle) + user.replace('cn', 'whatever') + groups[0].is_member(user.dn) + if groups[1].is_member(user.dn): + assert False + if groups[2].is_member(user.dn): + assert False + + # + # Test rebuild task. First disable plugin + # + automemberplugin = AutoMembershipPlugin(topo.standalone) + automemberplugin.disable() + topo.standalone.restart() + + # Make change that would move the entry from group[0] to group[1] + user.replace('cn', 'mark') + + # Enable plugin + automemberplugin.enable() + topo.standalone.restart() + + # Run rebuild task + task = automemberplugin.fixup(DEFAULT_SUFFIX, "objectclass=posixaccount") + task.wait() + + # Test membership + groups[1].is_member(user.dn) + if groups[0].is_member(user.dn): + assert False + if groups[2].is_member(user.dn): + assert False + + # Success + log.info("Test PASSED") + + +if __name__ == '__main__': + # Run isolated + # -s for DEBUG mode + CURRENT_FILE = os.path.realpath(__file__) + pytest.main(["-s", CURRENT_FILE]) + diff --git a/dirsrvtests/tests/suites/automember_plugin/automember_test.py b/dirsrvtests/tests/suites/automember_plugin/automember_test.py new file mode 100644 index 0000000..b34747e --- /dev/null +++ b/dirsrvtests/tests/suites/automember_plugin/automember_test.py @@ -0,0 +1,300 @@ +import logging +import pytest +import os +import ldap +from lib389.utils import ds_is_older +from lib389._constants import * +from lib389.plugins import AutoMembershipPlugin, AutoMembershipDefinition, AutoMembershipDefinitions, AutoMembershipRegexRule +from lib389._mapped_object import DSLdapObjects, DSLdapObject +from lib389 import agreement +from lib389.idm.user import UserAccount, UserAccounts, TEST_USER_PROPERTIES +from lib389.idm.group import Groups, Group +from lib389.topologies import topology_st as topo +from lib389._constants import DEFAULT_SUFFIX + + +# Skip on older versions +pytestmark = [pytest.mark.tier1, + pytest.mark.skipif(ds_is_older('1.3.7'), reason="Not implemented")] + +DEBUGGING = os.getenv("DEBUGGING", default=False) +if DEBUGGING: + logging.getLogger(__name__).setLevel(logging.DEBUG) +else: + logging.getLogger(__name__).setLevel(logging.INFO) +log = logging.getLogger(__name__) + + +@pytest.fixture(scope="module") +def automember_fixture(topo, request): + + groups = Groups(topo.standalone, DEFAULT_SUFFIX) + group = groups.create(properties={'cn': 'testgroup'}) + + automemberplugin = AutoMembershipPlugin(topo.standalone) + automemberplugin.enable() + + topo.standalone.restart() + + automember_prop = { + 'cn': 'testgroup_definition', + 'autoMemberScope': 'ou=People,' + DEFAULT_SUFFIX, + 'autoMemberFilter': 'objectclass=*', + 'autoMemberDefaultGroup': group.dn, + 'autoMemberGroupingAttr': 'member:dn', + } + + automembers = AutoMembershipDefinitions(topo.standalone, "cn=Auto Membership Plugin,cn=plugins,cn=config") + + automember = automembers.create(properties=automember_prop) + + return (group, automembers, automember) + + +def test_automemberscope(automember_fixture, topo): + """Test if the automember scope is valid + + :id: c3d3f250-e7fd-4441-8387-3d24c156e982 + :setup: Standalone instance, enabled Auto Membership Plugin + :steps: + 1. Create automember with invalid cn that raises + UNWILLING_TO_PERFORM exception + 2. If exception raised, set scope to any cn + 3. If exception is not raised, set scope to with ou=People + :expectedresults: + 1. Should be success + 2. Should be success + 3. Should be success + """ + + (group, automembers, automember) = automember_fixture + + automember_prop = { + 'cn': 'anyrandomcn', + 'autoMemberScope': 'ou=People,' + DEFAULT_SUFFIX, + 'autoMemberFilter': 'objectclass=*', + 'autoMemberDefaultGroup': group.dn, + 'autoMemberGroupingAttr': 'member:dn', + } + + # depends on issue #49465 + + # with pytest.raises(ldap.UNWILLING_TO_PERFORM): + # automember = automembers.create(properties=automember_prop) + # automember.set_scope("cn=No Entry,%s" % DEFAULT_SUFFIX) + + automember.set_scope("ou=People,%s" % DEFAULT_SUFFIX) + + +def test_automemberfilter(automember_fixture, topo): + """Test if the automember filter is valid + + :id: 935c55de-52dc-4f80-b7dd-3aacd30f6df2 + :setup: Standalone instance, enabled Auto Membership Plugin + :steps: + 1. Create automember with invalid filter that raises + UNWILLING_TO_PERFORM exception + 2. If exception raised, set filter to the invalid filter + 3. If exception is not raised, set filter as all objectClasses + :expectedresults: + 1. Should be success + 2. Should be success + 3. Should be success + """ + + (group, automembers, automember) = automember_fixture + + automember_prop = { + 'cn': 'anyrandomcn', + 'autoMemberScope': 'ou=People,' + DEFAULT_SUFFIX, + 'autoMemberFilter': '(ou=People', + 'autoMemberDefaultGroup': group.dn, + 'autoMemberGroupingAttr': 'member:dn', + } + + with pytest.raises(ldap.UNWILLING_TO_PERFORM): + automember = automembers.create(properties=automember_prop) + automember.set_filter("(ou=People") + + automember.set_filter("objectClass=*") + + +def test_adduser(automember_fixture, topo): + """Test if member is automatically added to the group + + :id: 14f1e2f5-2162-41ab-962c-5293516baf2e + :setup: Standalone instance, enabled Auto Membership Plugin + :steps: + 1. Create a user + 2. Assert that the user is member of the group + :expectedresults: + 1. Should be success + 2. Should be success + """ + + (group, automembers, automember) = automember_fixture + + users = UserAccounts(topo.standalone, DEFAULT_SUFFIX) + user = users.create(properties=TEST_USER_PROPERTIES) + + assert group.is_member(user.dn) + user.delete() + + +@pytest.mark.skipif(ds_is_older("1.4.1.2"), reason="Not implemented") +def test_delete_default_group(automember_fixture, topo): + """If memberof is enable and a user became member of default group + because of automember rule then delete the default group should succeeds + + :id: 8b55d077-8851-45a2-a547-b28a7983a3c2 + :setup: Standalone instance, enabled Auto Membership Plugin + :steps: + 1. Enable memberof plugin + 2. Create a user + 3. Assert that the user is member of the default group + 4. Delete the default group + :expectedresults: + 1. Should be success + 2. Should be success + 3. Should be success + 4. Should be success + """ + + (group, automembers, automember) = automember_fixture + + from lib389.plugins import MemberOfPlugin + memberof = MemberOfPlugin(topo.standalone) + memberof.enable() + topo.standalone.restart() + topo.standalone.setLogLevel(65536) + + users = UserAccounts(topo.standalone, DEFAULT_SUFFIX) + user_1 = users.create_test_user(uid=1) + + try: + assert group.is_member(user_1.dn) + group.delete() + error_lines = topo.standalone.ds_error_log.match('.*auto-membership-plugin - automember_update_member_value - group .default or target. does not exist .%s.$' % group.dn) + assert (len(error_lines) == 1) + finally: + user_1.delete() + topo.standalone.setLogLevel(0) + +@pytest.mark.skipif(ds_is_older("1.4.3.3"), reason="Not implemented") +def test_no_default_group(automember_fixture, topo): + """If memberof is enable and a user became member of default group + and default group does not exist then an INFO should be logged + + :id: 8882972f-fb3e-4d77-9729-0235897676bc + :setup: Standalone instance, enabled Auto Membership Plugin + :steps: + 1. Enable memberof plugin + 2. Set errorlog level to 0 (default) + 3. delete the default group + 4. Create a user + 5. Retrieve message in log + :expectedresults: + 1. Should be success + 2. Should be success + 3. Should be success + 4. Should be success + 5. Should be success + """ + + (group, automembers, automember) = automember_fixture + + from lib389.plugins import MemberOfPlugin + memberof = MemberOfPlugin(topo.standalone) + memberof.enable() + topo.standalone.restart() + topo.standalone.setLogLevel(0) + + # delete it if it exists + try: + group.get_attr_val_utf8('creatorsname') + group.delete() + except ldap.NO_SUCH_OBJECT: + pass + users = UserAccounts(topo.standalone, DEFAULT_SUFFIX) + user_1 = users.create_test_user(uid=1) + + try: + error_lines = topo.standalone.ds_error_log.match('.*auto-membership-plugin - automember_update_member_value - group .default or target. does not exist .%s.$' % group.dn) + assert (len(error_lines) > 0) + finally: + user_1.delete() + topo.standalone.setLogLevel(0) + +@pytest.mark.skipif(ds_is_older("1.4.1.2"), reason="Not implemented") +def test_delete_target_group(automember_fixture, topo): + """If memberof is enabld and a user became member of target group + because of automember rule then delete the target group should succeeds + + :id: bf5745e3-3de8-485d-8a68-e2fd460ce1cb + :setup: Standalone instance, enabled Auto Membership Plugin + :steps: + 1. Recreate the default group if it was deleted before + 2. Create a target group (using regex) + 3. Create a target group automember rule (regex) + 4. Enable memberof plugin + 5. Create a user that goes into the target group + 6. Assert that the user is member of the target group + 7. Delete the target group + 8. Check automember skipped the regex automember rule because target group did not exist + :expectedresults: + 1. Should be success + 2. Should be success + 3. Should be success + 4. Should be success + 5. Should be success + 6. Should be success + 7. Should be success + 8. Should be success + """ + + (group, automembers, automember) = automember_fixture + + # default group that may have been deleted in previous tests + try: + groups = Groups(topo.standalone, DEFAULT_SUFFIX) + group = groups.create(properties={'cn': 'testgroup'}) + except: + pass + + # target group that will receive regex automember + groups = Groups(topo.standalone, DEFAULT_SUFFIX) + group_regex = groups.create(properties={'cn': 'testgroup_regex'}) + + # regex automember definition + automember_regex_prop = { + 'cn': 'automember regex', + 'autoMemberTargetGroup': group_regex.dn, + 'autoMemberInclusiveRegex': 'uid=.*1', + } + automember_regex_dn = 'cn=automember regex, %s' % automember.dn + automember_regexes = AutoMembershipRegexRule(topo.standalone, automember_regex_dn) + automember_regex = automember_regexes.create(properties=automember_regex_prop) + + from lib389.plugins import MemberOfPlugin + memberof = MemberOfPlugin(topo.standalone) + memberof.enable() + + topo.standalone.restart() + topo.standalone.setLogLevel(65536) + + # create a user that goes into the target group but not in the default group + users = UserAccounts(topo.standalone, DEFAULT_SUFFIX) + user_1 = users.create_test_user(uid=1) + + try: + assert group_regex.is_member(user_1.dn) + assert not group.is_member(user_1.dn) + + # delete that target filter group + group_regex.delete() + error_lines = topo.standalone.ds_error_log.match('.*auto-membership-plugin - automember_update_member_value - group .default or target. does not exist .%s.$' % group_regex.dn) + # one line for default group and one for target group + assert (len(error_lines) == 1) + finally: + user_1.delete() + topo.standalone.setLogLevel(0) diff --git a/dirsrvtests/tests/suites/automember_plugin/basic_test.py b/dirsrvtests/tests/suites/automember_plugin/basic_test.py new file mode 100644 index 0000000..85d3224 --- /dev/null +++ b/dirsrvtests/tests/suites/automember_plugin/basic_test.py @@ -0,0 +1,854 @@ +# --- BEGIN COPYRIGHT BLOCK --- +# Copyright (C) 2019 Red Hat, Inc. +# All rights reserved. +# +# License: GPL (version 3 or any later version). +# See LICENSE for details. +# --- END COPYRIGHT BLOCK --- + +""" +Will test AutoMememer Plugin with AotoMember Task and Retro Changelog +""" + +import os +import pytest +from lib389.topologies import topology_m1 as topo +from lib389.idm.organizationalunit import OrganizationalUnits +from lib389.idm.domain import Domain +from lib389.idm.posixgroup import PosixGroups +from lib389.plugins import AutoMembershipPlugin, AutoMembershipDefinitions, \ + MemberOfPlugin, AutoMembershipRegexRules, AutoMembershipDefinition +from lib389.backend import Backends +from lib389.config import Config +from lib389._constants import DEFAULT_SUFFIX +from lib389.idm.user import UserAccounts +from lib389.idm.group import Groups, Group, UniqueGroup, nsAdminGroups, nsAdminGroup +from lib389.utils import ds_is_older +import ldap + +pytestmark = pytest.mark.tier1 + +BASE_SUFF = "dc=autoMembers,dc=com" +TEST_BASE = "dc=testAutoMembers,dc=com" +BASE_REPL = "dc=replAutoMembers,dc=com" +SUBSUFFIX = f'dc=SubSuffix,{BASE_SUFF}' +REPMANDN = "cn=ReplManager" +CACHE_SIZE = '-1' +CACHEMEM_SIZE = '10485760' +AUTO_MEM_SCOPE_TEST = f'ou=Employees,{TEST_BASE}' +AUTO_MEM_SCOPE_BASE = f'ou=Employees,{BASE_SUFF}' + + +def add_base_entries(topo): + """ + Will create suffix + """ + for suffix, backend_name in [(BASE_SUFF, 'AutoMembers'), (SUBSUFFIX, 'SubAutoMembers'), + (TEST_BASE, 'testAutoMembers'), (BASE_REPL, 'ReplAutoMembers'), + ("dc=SubSuffix,{}".format(BASE_REPL), 'ReplSubAutoMembers')]: + Backends(topo.ms["master1"]).create(properties={ + 'cn': backend_name, + 'nsslapd-suffix': suffix, + 'nsslapd-CACHE_SIZE': CACHE_SIZE, + 'nsslapd-CACHEMEM_SIZE': CACHEMEM_SIZE}) + Domain(topo.ms["master1"], suffix).create(properties={ + 'dc': suffix.split('=')[1].split(',')[0], + 'aci': [ + f'(targetattr="userPassword")(version 3.0;aci "Replication Manager ' + f'Access";allow (write,compare) userdn="ldap:///{REPMANDN},cn=config";)', + f'(target ="ldap:///{suffix}")(targetattr !="cn||sn||uid") (version 3.0;' + f'acl "Group Permission";allow (write) ' + f'(groupdn = "ldap:///cn=GroupMgr,{suffix}");)', + f'(target ="ldap:///{suffix}")(targetattr !="userPassword")(version 3.0;acl ' + f'"Anonym-read access"; allow (read,search,compare)(userdn="ldap:///anyone");)' + ] + }) + for suffix, ou_cn in [(BASE_SUFF, 'userGroups'), + (BASE_SUFF, 'Employees'), + (BASE_SUFF, 'TaskEmployees'), + (TEST_BASE, 'Employees')]: + OrganizationalUnits(topo.ms["master1"], suffix).create(properties={'ou': ou_cn}) + + +def add_user(topo, user_id, suffix, uid_no, gid_no, role_usr): + """ + Will create entries with nsAdminGroup objectclass + """ + objectclasses = ['top', 'person', 'posixaccount', 'inetuser', + 'nsMemberOf', 'nsAccount', 'nsAdminGroup'] + if ds_is_older('1.4.0'): + objectclasses.remove('nsAccount') + + user = nsAdminGroups(topo.ms["master1"], suffix, rdn=None).create(properties={ + 'cn': user_id, + 'sn': user_id, + 'uid': user_id, + 'homeDirectory': '/home/{}'.format(user_id), + 'loginShell': '/bin/bash', + 'uidNumber': uid_no, + 'gidNumber': gid_no, + 'objectclass': objectclasses, + 'nsAdminGroupName': role_usr, + 'seeAlso': 'uid={},{}'.format(user_id, suffix), + 'entrydn': 'uid={},{}'.format(user_id, suffix) + }) + return user + + +def check_groups(topo, group_dn, user_dn, member): + """ + Will check MEMBATTR + """ + return bool(Group(topo.ms["master1"], group_dn).present(member, user_dn)) + + +def add_group(topo, suffix, group_id): + """ + Will create groups + """ + Groups(topo.ms["master1"], suffix, rdn=None).create(properties={ + 'cn': group_id + }) + + +def number_memberof(topo, user, number): + """ + Function to check if the memberOf attribute is present. + """ + return len(nsAdminGroup(topo.ms["master1"], user).get_attr_vals_utf8('memberOf')) == number + + +def add_group_entries(topo): + """ + Will create multiple entries needed for this test script + """ + for suffix, group in [(SUBSUFFIX, 'subsuffGroups'), + (SUBSUFFIX, 'Employees'), + (TEST_BASE, 'testuserGroups'), + ("dc=SubSuffix,{}".format(BASE_REPL), 'replsubGroups'), + (BASE_REPL, 'replsubGroups')]: + add_group(topo, suffix, group) + for group_cn in ['SubDef1', 'SubDef2', 'SubDef3', 'SubDef4', 'SubDef5']: + add_group(topo, BASE_REPL, group_cn) + for user in ['Managers', 'Contractors', 'Interns', 'Visitors']: + add_group(topo, "cn=replsubGroups,{}".format(BASE_REPL), user) + for ou_ou, group_cn in [("ou=userGroups,{}".format(BASE_SUFF), 'SuffDef1'), + ("ou=userGroups,{}".format(BASE_SUFF), 'SuffDef2'), + ("ou=userGroups,{}".format(BASE_SUFF), 'SuffDef3'), + ("ou=userGroups,{}".format(BASE_SUFF), 'SuffDef4'), + ("ou=userGroups,{}".format(BASE_SUFF), 'SuffDef5'), + ("ou=userGroups,{}".format(BASE_SUFF), 'Contractors'), + ("ou=userGroups,{}".format(BASE_SUFF), 'Managers'), + ("CN=testuserGroups,{}".format(TEST_BASE), 'TestDef1'), + ("CN=testuserGroups,{}".format(TEST_BASE), 'TestDef2'), + ("CN=testuserGroups,{}".format(TEST_BASE), 'TestDef3'), + ("CN=testuserGroups,{}".format(TEST_BASE), 'TestDef4'), + ("CN=testuserGroups,{}".format(TEST_BASE), 'TestDef5')]: + add_group(topo, ou_ou, group_cn) + for ou_ou, group_cn, grp_no in [(SUBSUFFIX, 'SubDef1', '111'), + (SUBSUFFIX, 'SubDef2', '222'), + (SUBSUFFIX, 'SubDef3', '333'), + (SUBSUFFIX, 'SubDef4', '444'), + (SUBSUFFIX, 'SubDef5', '555'), + ('cn=subsuffGroups,{}'.format(SUBSUFFIX), + 'Managers', '666'), + ('cn=subsuffGroups,{}'.format(SUBSUFFIX), + 'Contractors', '999')]: + PosixGroups(topo.ms["master1"], ou_ou, rdn=None).create(properties={ + 'cn': group_cn, + 'gidNumber': grp_no + }) + + +def add_member_attr(topo, group_dn, user_dn, member): + """ + Will add members to groups + """ + Group(topo.ms["master1"], group_dn).add(member, user_dn) + + +def change_grp_objclass(new_object, member, type_of): + """ + Will change objectClass + """ + try: + type_of.remove(member, None) + except ldap.NO_SUCH_ATTRIBUTE: + pass + type_of.ensure_state(properties={ + 'cn': type_of.get_attr_val_utf8('cn'), + 'objectClass': ['top', 'nsMemberOf', new_object] + }) + + +@pytest.fixture(scope="module") +def _create_all_entries(topo): + """ + Fixture module that will create required entries for test cases. + """ + add_base_entries(topo) + add_group_entries(topo) + auto = AutoMembershipPlugin(topo.ms["master1"]) + auto.add("nsslapd-pluginConfigArea", "cn=autoMembersPlugin,{}".format(BASE_REPL)) + MemberOfPlugin(topo.ms["master1"]).enable() + automembers_definitions = AutoMembershipDefinitions(topo.ms["master1"]) + automembers_definitions.create(properties={ + 'cn': 'userGroups', + 'autoMemberScope': f'ou=Employees,{BASE_SUFF}', + 'autoMemberFilter': "objectclass=posixAccount", + 'autoMemberDefaultGroup': [ + f'cn=SuffDef1,ou=userGroups,{BASE_SUFF}', + f'cn=SuffDef2,ou=userGroups,{BASE_SUFF}', + f'cn=SuffDef3,ou=userGroups,{BASE_SUFF}', + f'cn=SuffDef4,ou=userGroups,{BASE_SUFF}', + f'cn=SuffDef5,ou=userGroups,{BASE_SUFF}' + ], + 'autoMemberGroupingAttr': 'member:dn', + }) + + automembers_definitions.create(properties={ + 'cn': 'subsuffGroups', + 'autoMemberScope': f'ou=Employees,{BASE_SUFF}', + 'autoMemberFilter': "objectclass=posixAccount", + 'autoMemberDefaultGroup': [ + f'cn=SubDef1,dc=subSuffix,{BASE_SUFF}', + f'cn=SubDef2,dc=subSuffix,{BASE_SUFF}', + f'cn=SubDef3,dc=subSuffix,{BASE_SUFF}', + f'cn=SubDef4,dc=subSuffix,{BASE_SUFF}', + f'cn=SubDef5,dc=subSuffix,{BASE_SUFF}', + ], + 'autoMemberGroupingAttr': 'memberuid:dn', + }) + + automembers_regex_usergroup = AutoMembershipRegexRules(topo.ms["master1"], + f'cn=userGroups,{auto.dn}') + automembers_regex_usergroup.create(properties={ + 'cn': 'Managers', + 'description': f'Group placement for Managers', + 'autoMemberTargetGroup': [f'cn=Managers,ou=userGroups,{BASE_SUFF}'], + 'autoMemberInclusiveRegex': [ + "gidNumber=^9", + "nsAdminGroupName=^Manager", + ], + "autoMemberExclusiveRegex": [ + "gidNumber=^[6-8]", + "nsAdminGroupName=^Junior$", + ], + }) + + automembers_regex_usergroup.create(properties={ + 'cn': 'Contractors', + 'description': f'Group placement for Contractors', + 'autoMemberTargetGroup': [f'cn=Contractors,ou=userGroups,{BASE_SUFF}'], + 'autoMemberInclusiveRegex': [ + "gidNumber=^1", + "nsAdminGroupName=Contractor", + ], + "autoMemberExclusiveRegex": [ + "gidNumber=^[2-4]", + "nsAdminGroupName=^Employee$", + ], + }) + + automembers_regex_sub = AutoMembershipRegexRules(topo.ms["master1"], + f'cn=subsuffGroups,{auto.dn}') + automembers_regex_sub.create(properties={ + 'cn': 'Managers', + 'description': f'Group placement for Managers', + 'autoMemberTargetGroup': [f'cn=Managers,cn=subsuffGroups,dc=subSuffix,{BASE_SUFF}'], + 'autoMemberInclusiveRegex': [ + "gidNumber=^[1-4]..3$", + "uidNumber=^5.5$", + "nsAdminGroupName=^Manager$|^Supervisor$", + ], + "autoMemberExclusiveRegex": [ + "gidNumber=^[6-8].0$", + "uidNumber=^999$", + "nsAdminGroupName=^Junior$", + ], + }) + + automembers_regex_sub.create(properties={ + 'cn': 'Contractors', + 'description': f'Group placement for Contractors', + 'autoMemberTargetGroup': [f'cn=Contractors,cn=subsuffGroups,dc=SubSuffix,{BASE_SUFF}'], + 'autoMemberInclusiveRegex': [ + "gidNumber=^[5-9].3$", + "uidNumber=^8..5$", + "nsAdminGroupName=^Contract|^Temporary$", + ], + "autoMemberExclusiveRegex": [ + "gidNumber=^[2-4]00$", + "uidNumber=^[1,3,8]99$", + "nsAdminGroupName=^Employee$", + ], + }) + for cn_name, ou_name in [('testuserGroups', 'Employees'), ('hostGroups', 'HostEntries')]: + automembers_definitions.create(properties={ + 'cn': cn_name, + 'autoMemberScope': f'ou={ou_name},dc=testautoMembers,dc=com', + 'autoMemberFilter': "objectclass=posixAccount", + 'autoMemberDefaultGroup': [ + f'cn=TestDef1,cn={cn_name},dc=testautoMembers,dc=com', + f'cn=TestDef2,cn={cn_name},dc=testautoMembers,dc=com', + f'cn=TestDef3,cn={cn_name},dc=testautoMembers,dc=com', + f'cn=TestDef4,cn={cn_name},dc=testautoMembers,dc=com', + f'cn=TestDef5,cn={cn_name},dc=testautoMembers,dc=com', + ], + 'autoMemberGroupingAttr': 'member:dn', + }) + + topo.ms["master1"].restart() + + +def test_disable_the_plug_in(topo, _create_all_entries): + """Plug-in and check the status + + :id: 4feee76c-e7ff-11e8-836e-8c16451d917b + :setup: Instance with replication + :steps: + 1. Disable the plug-in and check the status + 2. Enable the plug-in and check the status + :expected results: + 1. Should success + 2. Should success + """ + instance_auto = AutoMembershipPlugin(topo.ms["master1"]) + instance_auto.disable() + assert not instance_auto.status() + instance_auto.enable() + assert instance_auto.status() + + +def test_custom_config_area(topo, _create_all_entries): + """Custom config area + + :id: 4fefb8cc-e7ff-11e8-92fd-8c16451d917b + :setup: Instance with replication + :steps: + 1. Check whether the plugin can be configured for custom config area + 2. After adding custom config area can be removed + :expected results: + 1. Should success + 2. Should success + """ + instance_auto = AutoMembershipPlugin(topo.ms["master1"]) + instance_auto.replace("nsslapd-pluginConfigArea", DEFAULT_SUFFIX) + assert instance_auto.get_attr_val_utf8("nsslapd-pluginConfigArea") + instance_auto.remove("nsslapd-pluginConfigArea", DEFAULT_SUFFIX) + assert not instance_auto.get_attr_val_utf8("nsslapd-pluginConfigArea") + + +@pytest.mark.bz834053 +def test_ability_to_control_behavior_of_modifiers_name(topo, _create_all_entries): + """ + Control behaviour of modifier's name + + :id: 4ff16370-e7ff-11e8-838d-8c16451d917b + :setup: Instance with replication + :steps: + 1. Turn on 'nsslapd-plugin-binddn-tracking' + 2. Add an user + 3. Check the creatorsname in the user entry + 4. Check the internalCreatorsname in the user entry + 5. Check the modifiersname in the user entry + 6. Check the internalModifiersname in the user entry + 7. Unset nsslapd-plugin-binddn-tracking attribute under + cn=config and delete the test enteries + :expected results: + 1. Should success + 2. Should success + 3. Should success + 4. Should success + 5. Should success + 6. Should success + 7. Should success + """ + instance1 = topo.ms["master1"] + configure = Config(instance1) + configure.replace('nsslapd-plugin-binddn-tracking', 'on') + instance1.restart() + assert configure.get_attr_val_utf8('nsslapd-plugin-binddn-tracking') == 'on' + user = add_user(topo, "User_autoMembers_05", "ou=Employees,{}".format(TEST_BASE), + "19", "18", "Supervisor") + # search the User DN name for the creatorsname in user entry + assert user.get_attr_val_utf8('creatorsname') == 'cn=directory manager' + # search the User DN name for the internalCreatorsname in user entry + assert user.get_attr_val_utf8('internalCreatorsname') == \ + 'cn=ldbm database,cn=plugins,cn=config' + # search the modifiersname in the user entry + assert user.get_attr_val_utf8('modifiersname') == 'cn=directory manager' + # search the internalModifiersname in the user entry + assert user.get_attr_val_utf8('internalModifiersname') == \ + 'cn=MemberOf Plugin,cn=plugins,cn=config' + # unset nsslapd-plugin-binddn-tracking attribute + configure.replace('nsslapd-plugin-binddn-tracking', 'off') + instance1.restart() + # deleting test enteries of automember05 test case + user.delete() + + +def test_posixaccount_objectclass_automemberdefaultgroup(topo, _create_all_entries): + """Verify the PosixAccount user + + :id: 4ff0f642-e7ff-11e8-ac88-8c16451d917b + :setup: Instance with replication + :steps: + 1. Add users with PosixAccount ObjectClass + 2. Verify the same user added as a member to autoMemberDefaultGroup + :expected results: + 1. Should success + 2. Should success + """ + test_id = "autoMembers_05" + default_group = "cn=TestDef1,CN=testuserGroups,{}".format(TEST_BASE) + user = add_user(topo, "User_{}".format(test_id), AUTO_MEM_SCOPE_TEST, "19", "18", "Supervisor") + assert check_groups(topo, default_group, user.dn, "member") + user.delete() + with pytest.raises(AssertionError): + assert check_groups(topo, default_group, user.dn, "member") + + +def test_duplicated_member_attributes_added_when_the_entry_is_re_created(topo, _create_all_entries): + """Checking whether duplicated member attributes added when the entry is re-created + + :id: 4ff2afaa-e7ff-11e8-8a92-8c16451d917b + :setup: Instance with replication + :steps: + 1. Create a user + 2. It should present as member in all automember groups + 3. Delete use + 4. It should not present as member in all automember groups + 5. Recreate same user + 6. It should present as member in all automember groups + :expected results: + 1. Should success + 2. Should success + 3. Should success + 4. Should success + 5. Should success + 6. Should success + """ + test_id = "autoMembers_06" + default_group = "cn=TestDef1,CN=testuserGroups,{}".format(TEST_BASE) + user = add_user(topo, "User_{}".format(test_id), AUTO_MEM_SCOPE_TEST, "19", "16", "Supervisor") + assert check_groups(topo, default_group, user.dn, "member") + user.delete() + with pytest.raises(AssertionError): + assert check_groups(topo, default_group, user.dn, "member") + user = add_user(topo, "User_{}".format(test_id), AUTO_MEM_SCOPE_TEST, "19", "15", "Supervisor") + assert check_groups(topo, default_group, user.dn, "member") + user.delete() + + +def test_multi_valued_automemberdefaultgroup_for_hostgroups(topo, _create_all_entries): + """Multi-valued autoMemberDefaultGroup + + :id: 4ff32a02-e7ff-11e8-99a1-8c16451d917b + :setup: Instance with replication + :steps: + 1. Create a user + 2. Check user is present in all Automember Groups as member + 3. Delete the user + 4. Check user is not present in all Automember Groups + :expected results: + 1. Should success + 2. Should success + 3. Should success + 4. Should success + """ + test_id = "autoMembers_07" + default_group1 = "cn=TestDef1,CN=testuserGroups,{}".format(TEST_BASE) + default_group2 = "cn=TestDef2,CN=testuserGroups,{}".format(TEST_BASE) + default_group3 = "cn=TestDef3,CN=testuserGroups,{}".format(TEST_BASE) + user = add_user(topo, "User_{}".format(test_id), AUTO_MEM_SCOPE_TEST, "19", "14", "TestEngr") + for grp in [default_group1, default_group2, default_group3]: + assert check_groups(topo, grp, user.dn, "member") + user.delete() + with pytest.raises(AssertionError): + assert check_groups(topo, default_group1, user.dn, "member") + + +def test_plugin_creates_member_attributes_of_the_automemberdefaultgroup(topo, _create_all_entries): + """Checking whether plugin creates member attributes if it already + exists for some of the autoMemberDefaultGroup + + :id: 4ff3ba76-e7ff-11e8-9846-8c16451d917b + :setup: Instance with replication + :steps: + 1. Add a non existing user to some groups as member + 2. Then Create the user + 3. Check the same user is present to other groups also as member + :expected results: + 1. Should success + 2. Should success + 3. Should success + """ + test_id = "autoMembers_08" + default_group1 = "cn=TestDef1,CN=testuserGroups,{}".format(TEST_BASE) + default_group2 = "cn=TestDef5,CN=testuserGroups,{}".format(TEST_BASE) + default_group3 = "cn=TestDef3,CN=testuserGroups,{}".format(TEST_BASE) + add_member_attr(topo, + "cn=TestDef2,CN=testuserGroups,{}".format(TEST_BASE), + "uid=User_{},{}".format(test_id, AUTO_MEM_SCOPE_TEST), "member") + add_member_attr(topo, + "cn=TestDef4,CN=testuserGroups,{}".format(TEST_BASE), + "uid=User_{},{}".format(test_id, AUTO_MEM_SCOPE_TEST), "member") + user = add_user(topo, "User_{}".format(test_id), AUTO_MEM_SCOPE_TEST, "19", "14", "TestEngr") + for grp in [default_group1, default_group2, default_group3]: + assert check_groups(topo, grp, user.dn, "member") + user.delete() + + +def test_multi_valued_automemberdefaultgroup_with_uniquemember(topo, _create_all_entries): + """Multi-valued autoMemberDefaultGroup with uniquemember attributes + + :id: 4ff4461c-e7ff-11e8-8124-8c16451d917b + :setup: Instance with replication + :steps: + 1. Modify automember config entry to use uniquemember + 2. Change object class for all groups which is used for automember grouping + 3. Add user uniquemember attributes + 4. Check uniqueMember attribute in groups + 5. Revert the changes done above + :expected results: + 1. Should success + 2. Should success + 3. Should success + 4. Should success + 5. Should success + """ + test_id = "autoMembers_09" + instance = topo.ms["master1"] + auto = AutoMembershipPlugin(topo.ms["master1"]) + # Modify automember config entry to use uniquemember: cn=testuserGroups,PLUGIN_AUTO + AutoMembershipDefinition( + instance, "cn=testuserGroups,{}".format(auto.dn)).replace('autoMemberGroupingAttr', + "uniquemember: dn") + instance.restart() + default_group1 = "cn=TestDef1,CN=testuserGroups,{}".format(TEST_BASE) + default_group2 = "cn=TestDef2,CN=testuserGroups,{}".format(TEST_BASE) + default_group3 = "cn=TestDef3,CN=testuserGroups,{}".format(TEST_BASE) + default_group4 = "cn=TestDef4,CN=testuserGroups,{}".format(TEST_BASE) + default_group5 = "cn=TestDef5,CN=testuserGroups,{}".format(TEST_BASE) + for grp in (default_group1, default_group2, default_group3, default_group4, default_group5): + instance_of_group = Group(topo.ms["master1"], grp) + change_grp_objclass("groupOfUniqueNames", "member", instance_of_group) + # Add user: uid=User_{test_id}, AutoMemScope + user = add_user(topo, "User_{}".format(test_id), AUTO_MEM_SCOPE_TEST, "19", "14", "New") + # Checking groups... + assert user.dn.lower() in UniqueGroup(topo.ms["master1"], + default_group1).get_attr_val_utf8("uniqueMember") + # Delete user uid=User_{test_id},AutoMemScope + user.delete() + # Change the automember config back to using \"member\" + AutoMembershipDefinition( + instance, "cn=testuserGroups,{}".format(auto.dn)).replace('autoMemberGroupingAttr', + "member: dn") + for grp in [default_group1, default_group2, default_group3, default_group4, default_group5]: + instance_of_group = UniqueGroup(topo.ms["master1"], grp) + change_grp_objclass("groupOfNames", "uniquemember", instance_of_group) + topo.ms["master1"].restart() + + +def test_invalid_automembergroupingattr_member(topo, _create_all_entries): + """Invalid autoMemberGroupingAttr-member + + :id: 4ff4b598-e7ff-11e8-a3a3-8c16451d917b + :setup: Instance with replication + :steps: + 1. Change object class for one group which is used for automember grouping + 2. Try to add user with invalid parameter + 3. Check member attribute on other groups + 4. Check member attribute on group where object class was changed + 5. Revert the object class where it was changed + :expected results: + 1. Should success + 2. Should fail (ldap.UNWILLING_TO_PERFORM) + 3. Should success + 4. Should fail (AssertionError) + 5. Should success + """ + test_id = "autoMembers_10" + default_group = "cn=TestDef1,CN=testuserGroups,{}".format(TEST_BASE) + instance_of_group = Group(topo.ms["master1"], default_group) + change_grp_objclass("groupOfUniqueNames", "member", instance_of_group) + with pytest.raises(ldap.UNWILLING_TO_PERFORM): + add_user(topo, "User_{}".format(test_id), AUTO_MEM_SCOPE_TEST, "19", "20", "Invalid") + with pytest.raises(AssertionError): + assert check_groups(topo, default_group, + "uid=User_{},{}".format(test_id, AUTO_MEM_SCOPE_TEST), "member") + change_grp_objclass("groupOfNames", "uniquemember", instance_of_group) + + +def test_valid_and_invalid_automembergroupingattr(topo, _create_all_entries): + """Valid and invalid autoMemberGroupingAttr + + :id: 4ff4fad0-e7ff-11e8-9cbd-8c16451d917b + :setup: Instance with replication + :steps: + 1. Change object class for some groups which is used for automember grouping + 2. Try to add user with invalid parameter + 3. Check member attribute on other groups + 4. Check member attribute on groups where object class was changed + 5. Revert the object class where it was changed + :expected results: + 1. Should success + 2. Should fail (ldap.UNWILLING_TO_PERFORM) + 3. Should success + 4. Should fail (AssertionError) + 5. Should success + """ + test_id = "autoMembers_11" + default_group_1 = "cn=TestDef1,CN=testuserGroups,{}".format(TEST_BASE) + default_group_2 = "cn=TestDef2,CN=testuserGroups,{}".format(TEST_BASE) + default_group_3 = "cn=TestDef3,CN=testuserGroups,{}".format(TEST_BASE) + default_group_4 = "cn=TestDef4,CN=testuserGroups,{}".format(TEST_BASE) + default_group_5 = "cn=TestDef5,CN=testuserGroups,{}".format(TEST_BASE) + grp_4_5 = [default_group_4, default_group_5] + for grp in grp_4_5: + instance_of_group = Group(topo.ms["master1"], grp) + change_grp_objclass("groupOfUniqueNames", "member", instance_of_group) + with pytest.raises(ldap.UNWILLING_TO_PERFORM): + add_user(topo, "User_{}".format(test_id), AUTO_MEM_SCOPE_TEST, "19", "24", "MixUsers") + for grp in [default_group_1, default_group_2, default_group_3]: + assert not check_groups(topo, grp, "cn=User_{},{}".format(test_id, + AUTO_MEM_SCOPE_TEST), "member") + for grp in grp_4_5: + with pytest.raises(AssertionError): + assert check_groups(topo, grp, "cn=User_{},{}".format(test_id, + AUTO_MEM_SCOPE_TEST), "member") + for grp in grp_4_5: + instance_of_group = Group(topo.ms["master1"], grp) + change_grp_objclass("groupOfNames", "uniquemember", instance_of_group) + + +def test_add_regular_expressions_for_user_groups_and_check_for_member_attribute_after_adding_users( + topo, _create_all_entries): + """Regular expressions for user groups + + :id: 4ff53fc2-e7ff-11e8-9a18-8c16451d917b + :setup: Instance with replication + :steps: + 1. Add user with a match with regular expressions for user groups + 2. check for member attribute after adding users + :expected results: + 1. Should success + 2. Should success + """ + test_id = "autoMembers_12" + default_group = f'cn=SuffDef1,ou=userGroups,{BASE_SUFF}' + user = add_user(topo, "User_{}".format(test_id), AUTO_MEM_SCOPE_BASE, "19", "0", "HR") + assert check_groups(topo, default_group, user.dn, "member") + assert number_memberof(topo, user.dn, 5) + user.delete() + + +LIST_FOR_PARAMETERIZATION = [ + ("autoMembers_22", "5288", "5289", "Contractor", "5291", "5292", "Contractors"), + ("autoMembers_21", "1161", "1162", "Contractor", "1162", "1163", "Contractors"), + ("autoMembers_20", "1188", "1189", "CEO", "1191", "1192", "Contractors"), + ("autoMembers_15", "9288", "9289", "Manager", "9291", "9292", "Managers"), + ("autoMembers_14", "561", "562", "Manager", "562", "563", "Managers"), + ("autoMembers_13", "9788", "9789", "VPEngg", "9392", "9393", "Managers")] + + +@pytest.mark.parametrize("testid, uid, gid, role, uid2, gid2, m_grp", LIST_FOR_PARAMETERIZATION) +def test_matching_gid_role_inclusive_regular_expression(topo, _create_all_entries, + testid, uid, gid, role, uid2, gid2, m_grp): + """ + Matching gid nos and Role for the Inclusive regular expression + + :id: 4ff71ce8-e7ff-11e8-b69b-8c16451d917b + :parametrized: yes + :setup: Instance with replication + :steps: + 1. Create users with matching gid nos and Role for the Inclusive regular expression + 2. It will be filtered with gidNumber, uidNumber and nsAdminGroupName + 3. It will a match for contract_grp + :expected results: + 1. Should success + 2. Should success + 3. Should success + """ + contract_grp = f'cn={m_grp},ou=userGroups,{BASE_SUFF}' + user1 = add_user(topo, "User_{}".format(testid), AUTO_MEM_SCOPE_BASE, uid, gid, role) + user2 = add_user(topo, "SecondUser_{}".format(testid), AUTO_MEM_SCOPE_BASE, + uid2, gid2, role) + for user_dn in [user1.dn, user2.dn]: + assert check_groups(topo, contract_grp, user_dn, "member") + assert number_memberof(topo, user1.dn, 1) + for user in [user1, user2]: + user.delete() + + +LIST_FOR_PARAMETERIZATION = [ + ("autoMembers_26", "5788", "5789", "Intern", "Contractors", "SuffDef1", 5), + ("autoMembers_25", "9788", "9789", "Employee", "Contractors", "Managers", 1), + ("autoMembers_24", "1110", "1111", "Employee", "Contractors", "SuffDef1", 5), + ("autoMembers_23", "2788", "2789", "Contractor", "Contractors", "SuffDef1", 5), + ("autoMembers_19", "5788", "5789", "HRManager", "Managers", "SuffDef1", 5), + ("autoMembers_18", "6788", "6789", "Junior", "Managers", "SuffDef1", 5), + ("autoMembers_17", "562", "563", "Junior", "Managers", "SuffDef1", 5), + ("autoMembers_16", "6788", "6789", "Manager", "Managers", "SuffDef1", 5)] + + +@pytest.mark.parametrize("testid, uid, gid, role, c_grp, m_grp, number", LIST_FOR_PARAMETERIZATION) +def test_gid_and_role_inclusive_exclusive_regular_expression(topo, _create_all_entries, + testid, uid, gid, role, + c_grp, m_grp, number): + """ + Matching gid nos and Role for the Inclusive and Exclusive regular expression + + :id: 4ff7d160-e7ff-11e8-8fbc-8c16451d917b + :parametrized: yes + :setup: Instance with replication + :steps: + 1. Create user with not matching gid nos and Role for + the Inclusive and Exclusive regular expression + 2. It will be filtered with gidNumber, uidNumber and nsAdminGroupName + 3. It will not match for contract_grp(Exclusive regular expression) + 4. It will match for default_group(Inclusive regular expression) + :expected results: + 1. Should success + 2. Should success + 3. Should success + 4. Should success + """ + contract_grp = f'cn={c_grp},ou=userGroups,{BASE_SUFF}' + default_group = f'cn={m_grp},ou=userGroups,{BASE_SUFF}' + user = add_user(topo, "User_{}".format(testid), AUTO_MEM_SCOPE_BASE, uid, gid, role) + with pytest.raises(AssertionError): + assert check_groups(topo, contract_grp, user.dn, "member") + check_groups(topo, default_group, user.dn, "member") + assert number_memberof(topo, user.dn, number) + user.delete() + + +LIST_FOR_PARAMETERIZATION = [ + ("autoMembers_32", "555", "720", "Employee", "SubDef1", "SubDef3"), + ("autoMembers_31", "515", "200", "Junior", "SubDef1", "SubDef5"), + ("autoMembers_30", "999", "400", "Supervisor", "SubDef1", "SubDef2"), + ("autoMembers_28", "555", "3663", "ContractHR", "Contractors,cn=subsuffGroups", + "Managers,cn=subsuffGroups")] + + +@pytest.mark.parametrize("testid, uid, gid, role, c_grp, m_grp", LIST_FOR_PARAMETERIZATION) +def test_managers_contractors_exclusive_regex_rules_member_uid(topo, _create_all_entries, + testid, uid, gid, role, + c_grp, m_grp): + """ + Match both managers and contractors exclusive regex rules + + :id: 4ff8be18-e7ff-11e8-94aa-8c16451d917b + :parametrized: yes + :setup: Instance with replication + :steps: + 1. Add Users to match both managers and contractors exclusive regex rules, + memberUid created in Default grp + 2. It will be filtered with gidNumber, uidNumber and nsAdminGroupName + 3. It will match for default_group1 and default_group2(Inclusive regular expression) + :expected results: + 1. Should success + 2. Should success + 3. Should success + """ + default_group1 = f'cn={c_grp},{SUBSUFFIX}' + default_group2 = f'cn={m_grp},{SUBSUFFIX}' + user = add_user(topo, "User_{}".format(testid), AUTO_MEM_SCOPE_BASE, uid, gid, role) + for group in [default_group1, default_group2]: + assert check_groups(topo, group, user.dn, "memberuid") + user.delete() + + +LIST_FOR_PARAMETERIZATION = [ + ("autoMembers_27", "595", "690", "ContractHR", "Managers", "Contractors"), + ("autoMembers_29", "8195", "2753", "Employee", "Contractors", "Managers"), + ("autoMembers_33", "545", "3333", "Supervisor", "Contractors", "Managers"), + ("autoMembers_34", "8195", "693", "Temporary", "Managers", "Contractors")] + + +@pytest.mark.parametrize("testid, uid, gid, role, c_grp, m_grp", LIST_FOR_PARAMETERIZATION) +def test_managers_inclusive_regex_rule(topo, _create_all_entries, + testid, uid, gid, role, c_grp, m_grp): + """ + Match managers inclusive regex rule, and no + inclusive/exclusive Contractors regex rules + + :id: 4ff8d862-e7ff-11e8-b688-8c16451d917b + :parametrized: yes + :setup: Instance with replication + :steps: + 1. Add User to match managers inclusive regex rule, and no + inclusive/exclusive Contractors regex rules + 2. It will be filtered with gidNumber, uidNumber and nsAdminGroupName(Supervisor) + 3. It will match for managers_grp(Inclusive regular expression) + 4. It will not match for contract_grp(Exclusive regular expression) + :expected results: + 1. Should success + 2. Should success + 3. Should success + 4. Should success + """ + contract_grp = f'cn={c_grp},cn=subsuffGroups,{SUBSUFFIX}' + managers_grp = f'cn={m_grp},cn=subsuffGroups,{SUBSUFFIX}' + user = add_user(topo, "User_{}".format(testid), AUTO_MEM_SCOPE_BASE, uid, gid, role) + check_groups(topo, managers_grp, user.dn, "memberuid") + with pytest.raises(AssertionError): + assert check_groups(topo, contract_grp, user.dn, "memberuid") + user.delete() + + +def test_reject_invalid_config_and_we_donot_deadlock_the_server(topo, _create_all_entries): + """ + Verify DS reject invalid config, and we don't deadlock the server + + :id: 4ff90c38-e7ff-11e8-b72a-8c16451d917b + :setup: Instance with replication + :steps: + 1. Verify DS reject invalid config, + 2. This operation don't deadlock the server + :expected results: + 1. Should success + 2. Should success + """ + # Changing config area to dc=automembers,dc=com + instance = AutoMembershipPlugin(topo.ms["master1"]) + instance.replace("nsslapd-pluginConfigArea", BASE_SUFF) + topo.ms["master1"] .restart() + # Attempting to add invalid config... + automembers = AutoMembershipDefinitions(topo.ms["master1"], BASE_SUFF) + with pytest.raises(ldap.UNWILLING_TO_PERFORM): + automembers.create(properties={ + 'cn': 'userGroups', + "autoMemberScope": BASE_SUFF, + "autoMemberFilter": "objectclass=posixAccount", + "autoMemberDefaultGroup": f'cn=SuffDef1,ou=userGroups,{BASE_SUFF}', + "autoMemberGroupingAttr": "member: dn" + }) + # Verify server is still working + automembers = AutoMembershipRegexRules(topo.ms["master1"], + f'cn=userGroups,cn=Auto Membership Plugin,' + f'cn=plugins,cn=config') + with pytest.raises(ldap.ALREADY_EXISTS): + automembers.create(properties={ + 'cn': 'Managers', + 'description': f'Group placement for Managers', + 'autoMemberTargetGroup': [f'cn=Managers,ou=userGroups,{BASE_SUFF}'], + 'autoMemberInclusiveRegex': [ + "gidNumber=^9", + "nsAdminGroupName=^Manager", + ], + }) + + # Adding first user... + for uid in range(300, 302): + UserAccounts(topo.ms["master1"], BASE_SUFF, rdn=None).create_test_user(uid=uid, gid=uid) + # Adding this line code to remove the automembers plugin configuration. + instance.remove("nsslapd-pluginConfigArea", BASE_SUFF) + topo.ms["master1"] .restart() + + +if __name__ == "__main__": + CURRENT_FILE = os.path.realpath(__file__) + pytest.main("-s -v %s" % CURRENT_FILE) diff --git a/dirsrvtests/tests/suites/automember_plugin/configuration_test.py b/dirsrvtests/tests/suites/automember_plugin/configuration_test.py new file mode 100644 index 0000000..0f9cc49 --- /dev/null +++ b/dirsrvtests/tests/suites/automember_plugin/configuration_test.py @@ -0,0 +1,58 @@ +# --- BEGIN COPYRIGHT BLOCK --- +# Copyright (C) 2019 Red Hat, Inc. +# All rights reserved. +# +# License: GPL (version 3 or any later version). +# See LICENSE for details. +# --- END COPYRIGHT BLOCK --- + +import os +import pytest + +from lib389.topologies import topology_st as topo +from lib389.plugins import AutoMembershipPlugin, AutoMembershipDefinitions, MemberOfPlugin +import ldap + +pytestmark = pytest.mark.tier1 + + +@pytest.mark.bz834056 +def test_configuration(topo): + """ + Automembership plugin and mixed in the plugin configuration + :id: 45a5a8f8-e800-11e8-ab16-8c16451d917b + :setup: Single Instance + :steps: + 1. Automembership plugin fails in a MMR setup, if data and config + area mixed in the plugin configuration + 2. Plugin configuration should throw proper error messages if not configured properly + :expected results: + 1. Should success + 2. Should success + """ + # Configure pluginConfigArea for PLUGIN_AUTO + AutoMembershipPlugin(topo.standalone).set("nsslapd-pluginConfigArea", 'cn=config') + # Enable MemberOf plugin + MemberOfPlugin(topo.standalone).enable() + topo.standalone.restart() + # Add invalid configuration, which mixes data and config area: All will fail + automembers = AutoMembershipDefinitions(topo.standalone) + with pytest.raises(ldap.UNWILLING_TO_PERFORM): + automembers.create(properties={ + 'cn': 'autouserGroups', + 'autoMemberScope': f'ou=Employees,cn=config', + 'autoMemberFilter': "objectclass=posixAccount", + 'autoMemberDefaultGroup': [f'cn=SuffDef1,ou=autouserGroups,cn=config', + f'cn=SuffDef2,ou=autouserGroups,cn=config'], + 'autoMemberGroupingAttr': 'member:dn' + }) + # Search in error logs + assert topo.standalone.ds_error_log.match('.*ERR - auto-membership-plugin - ' + 'automember_parse_config_entry - The default group ' + '"cn=SuffDef1,ou=autouserGroups,cn=config" ' + 'can not be a child of the plugin config area "cn=config"') + + +if __name__ == "__main__": + CURRENT_FILE = os.path.realpath(__file__) + pytest.main("-s -v %s" % CURRENT_FILE) diff --git a/dirsrvtests/tests/suites/backups/backup_test.py b/dirsrvtests/tests/suites/backups/backup_test.py new file mode 100644 index 0000000..e938914 --- /dev/null +++ b/dirsrvtests/tests/suites/backups/backup_test.py @@ -0,0 +1,73 @@ +import logging +import pytest +import os +from datetime import datetime +from lib389._constants import DEFAULT_SUFFIX, INSTALL_LATEST_CONFIG +from lib389.properties import BACKEND_SAMPLE_ENTRIES, TASK_WAIT +from lib389.topologies import topology_st as topo +from lib389.backend import Backend +from lib389.tasks import BackupTask, RestoreTask + +DEBUGGING = os.getenv("DEBUGGING", default=False) +if DEBUGGING: + logging.getLogger(__name__).setLevel(logging.DEBUG) +else: + logging.getLogger(__name__).setLevel(logging.INFO) +log = logging.getLogger(__name__) + + +def test_missing_backend(topo): + """Test that an error is returned when a restore is performed for a + backend that is no longer present. + + :id: 889b8028-35cf-41d7-91f6-bc5193683646 + :setup: Standalone Instance + :steps: + 1. Create a second backend + 2. Perform a back up + 3. Remove one of the backends from the config + 4. Perform a restore + :expectedresults: + 1. Success + 2. Success + 3. Success + 4. Failure + """ + + # Create a new backend + BE_NAME = 'backupRoot' + BE_SUFFIX = 'dc=back,dc=up' + props = { + 'cn': BE_NAME, + 'nsslapd-suffix': BE_SUFFIX, + BACKEND_SAMPLE_ENTRIES: INSTALL_LATEST_CONFIG + } + be = Backend(topo.standalone) + backend_entry = be.create(properties=props) + + # perform backup + backup_dir_name = "backup-%s" % datetime.now().strftime("%Y_%m_%d_%H_%M_%S") + archive = os.path.join(topo.standalone.ds_paths.backup_dir, backup_dir_name) + backup_task = BackupTask(topo.standalone) + task_properties = {'nsArchiveDir': archive} + backup_task.create(properties=task_properties) + backup_task.wait() + assert backup_task.get_exit_code() == 0 + + # Remove new backend + backend_entry.delete() + + # Restore the backup - it should fail + restore_task = RestoreTask(topo.standalone) + task_properties = {'nsArchiveDir': archive} + restore_task.create(properties=task_properties) + restore_task.wait() + assert restore_task.get_exit_code() != 0 + + +if __name__ == '__main__': + # Run isolated + # -s for DEBUG mode + CURRENT_FILE = os.path.realpath(__file__) + pytest.main(["-s", CURRENT_FILE]) + diff --git a/dirsrvtests/tests/suites/basic/__init__.py b/dirsrvtests/tests/suites/basic/__init__.py new file mode 100644 index 0000000..8371b76 --- /dev/null +++ b/dirsrvtests/tests/suites/basic/__init__.py @@ -0,0 +1,3 @@ +""" + :Requirement: 389-ds-base: Basic Directory Server Operations +""" diff --git a/dirsrvtests/tests/suites/basic/basic_test.py b/dirsrvtests/tests/suites/basic/basic_test.py new file mode 100644 index 0000000..40f95a4 --- /dev/null +++ b/dirsrvtests/tests/suites/basic/basic_test.py @@ -0,0 +1,1409 @@ +# --- BEGIN COPYRIGHT BLOCK --- +# Copyright (C) 2016 Red Hat, Inc. +# All rights reserved. +# +# License: GPL (version 3 or any later version). +# See LICENSE for details. +# --- END COPYRIGHT BLOCK --- +# + +""" + :Requirement: Basic Directory Server Operations +""" + +from subprocess import check_output, PIPE, run +from lib389 import DirSrv +from lib389.idm.user import UserAccounts +import pytest +from lib389.tasks import * +from lib389.utils import * +from lib389.topologies import topology_st +from lib389.dbgen import dbgen +from lib389.idm.organizationalunit import OrganizationalUnits +from lib389._constants import DN_DM, PASSWORD, PW_DM +from lib389.topologies import topology_st +from lib389.paths import Paths +from lib389.idm.directorymanager import DirectoryManager +from lib389.config import LDBMConfig +from lib389.dseldif import DSEldif +from lib389.rootdse import RootDSE + + +pytestmark = pytest.mark.tier0 + +default_paths = Paths() + +log = logging.getLogger(__name__) + +# Globals +USER1_DN = 'uid=user1,' + DEFAULT_SUFFIX +USER2_DN = 'uid=user2,' + DEFAULT_SUFFIX +USER3_DN = 'uid=user3,' + DEFAULT_SUFFIX +USER4_DN = 'uid=user4,' + DEFAULT_SUFFIX + +ROOTDSE_DEF_ATTR_LIST = ('namingContexts', + 'supportedLDAPVersion', + 'supportedControl', + 'supportedExtension', + 'supportedSASLMechanisms', + 'vendorName', + 'vendorVersion') + + +@pytest.fixture(scope="module") +def import_example_ldif(topology_st): + """Import the Example LDIF for the tests in this suite""" + + log.info('Initializing the "basic" test suite') + + ldif = '%s/dirsrv/data/Example.ldif' % topology_st.standalone.get_data_dir() + import_ldif = topology_st.standalone.get_ldif_dir() + "/Example.ldif" + shutil.copy(ldif, import_ldif) + + import_task = ImportTask(topology_st.standalone) + import_task.import_suffix_from_ldif(ldiffile=import_ldif, suffix=DEFAULT_SUFFIX) + import_task.wait() + + +@pytest.fixture(params=ROOTDSE_DEF_ATTR_LIST) +def rootdse_attr(topology_st, request): + """Adds an attr from the list + as the default attr to the rootDSE + """ + # Ensure the server is started and connected + topology_st.standalone.start() + + RETURN_DEFAULT_OPATTR = "nsslapd-return-default-opattr" + rootdse_attr_name = ensure_bytes(request.param) + + log.info(" Add the %s: %s to rootdse" % (RETURN_DEFAULT_OPATTR, + rootdse_attr_name)) + mod = [(ldap.MOD_ADD, RETURN_DEFAULT_OPATTR, rootdse_attr_name)] + try: + topology_st.standalone.modify_s("", mod) + except ldap.LDAPError as e: + log.fatal('Failed to add attr: error (%s)' % (e.args[0]['desc'])) + assert False + + def fin(): + log.info(" Delete the %s: %s from rootdse" % (RETURN_DEFAULT_OPATTR, + rootdse_attr_name)) + mod = [(ldap.MOD_DELETE, RETURN_DEFAULT_OPATTR, rootdse_attr_name)] + try: + topology_st.standalone.modify_s("", mod) + except ldap.LDAPError as e: + log.fatal('Failed to delete attr: error (%s)' % (e.args[0]['desc'])) + assert False + + request.addfinalizer(fin) + + return rootdse_attr_name + + +def test_basic_ops(topology_st, import_example_ldif): + """Tests adds, mods, modrdns, and deletes operations + + :id: 33f97f55-60bf-46c7-b880-6c488517ae19 + + :setup: Standalone instance + + :steps: + 1. Add 3 test users USER1, USER2 and USER3 to database + 2. Modify (ADD, REPLACE and DELETE) description for USER1 in database + 3. Rename USER1, USER2 and USER3 using Modrds + 4. Delete test entries USER1, USER2 and USER3 + + :expectedresults: + 1. Add operation should PASS. + 2. Modify operations should PASS. + 3. Rename operations should PASS. + 4. Delete operations should PASS. + """ + log.info('Running test_basic_ops...') + USER1_NEWDN = 'cn=user1' + USER2_NEWDN = 'cn=user2' + USER3_NEWDN = 'cn=user3' + NEW_SUPERIOR = 'ou=people,' + DEFAULT_SUFFIX + USER1_RDN_DN = 'cn=user1,' + DEFAULT_SUFFIX + USER2_RDN_DN = 'cn=user2,' + DEFAULT_SUFFIX + USER3_RDN_DN = 'cn=user3,' + NEW_SUPERIOR # New superior test + + # + # Adds# + try: + topology_st.standalone.add_s(Entry((USER1_DN, + {'objectclass': "top extensibleObject".split(), + 'sn': '1', + 'cn': 'user1', + 'uid': 'user1', + 'userpassword': 'password'}))) + except ldap.LDAPError as e: + log.error('Failed to add test user' + USER1_DN + ': error ' + e.args[0]['desc']) + assert False + + try: + topology_st.standalone.add_s(Entry((USER2_DN, + {'objectclass': "top extensibleObject".split(), + 'sn': '2', + 'cn': 'user2', + 'uid': 'user2', + 'userpassword': 'password'}))) + except ldap.LDAPError as e: + log.error('Failed to add test user' + USER2_DN + ': error ' + e.args[0]['desc']) + assert False + + try: + topology_st.standalone.add_s(Entry((USER3_DN, + {'objectclass': "top extensibleObject".split(), + 'sn': '3', + 'cn': 'user3', + 'uid': 'user3', + 'userpassword': 'password'}))) + except ldap.LDAPError as e: + log.error('Failed to add test user' + USER3_DN + ': error ' + e.args[0]['desc']) + assert False + + # + # Mods + # + try: + topology_st.standalone.modify_s(USER1_DN, [(ldap.MOD_ADD, 'description', + b'New description')]) + except ldap.LDAPError as e: + log.error('Failed to add description: error ' + e.args[0]['desc']) + assert False + + try: + topology_st.standalone.modify_s(USER1_DN, [(ldap.MOD_REPLACE, 'description', + b'Modified description')]) + except ldap.LDAPError as e: + log.error('Failed to modify description: error ' + e.args[0]['desc']) + assert False + + try: + topology_st.standalone.modify_s(USER1_DN, [(ldap.MOD_DELETE, 'description', + None)]) + except ldap.LDAPError as e: + log.error('Failed to delete description: error ' + e.args[0]['desc']) + assert False + + # + # Modrdns + # + try: + topology_st.standalone.rename_s(USER1_DN, USER1_NEWDN, delold=1) + except ldap.LDAPError as e: + log.error('Failed to modrdn user1: error ' + e.args[0]['desc']) + assert False + + try: + topology_st.standalone.rename_s(USER2_DN, USER2_NEWDN, delold=0) + except ldap.LDAPError as e: + log.error('Failed to modrdn user2: error ' + e.args[0]['desc']) + assert False # Modrdn - New superior + + try: + topology_st.standalone.rename_s(USER3_DN, USER3_NEWDN, + newsuperior=NEW_SUPERIOR, delold=1) + except ldap.LDAPError as e: + log.error('Failed to modrdn(new superior) user3: error ' + e.args[0]['desc']) + assert False + # + # Deletes + # + try: + topology_st.standalone.delete_s(USER1_RDN_DN) + except ldap.LDAPError as e: + log.error('Failed to delete test entry1: ' + e.args[0]['desc']) + assert False + + try: + topology_st.standalone.delete_s(USER2_RDN_DN) + except ldap.LDAPError as e: + log.error('Failed to delete test entry2: ' + e.args[0]['desc']) + assert False + + try: + topology_st.standalone.delete_s(USER3_RDN_DN) + except ldap.LDAPError as e: + log.error('Failed to delete test entry3: ' + e.args[0]['desc']) + assert False + log.info('test_basic_ops: PASSED') + + +def test_basic_import_export(topology_st, import_example_ldif): + """Test online and offline LDIF import & export + + :id: 3ceeea11-9235-4e20-b80e-7203b2c6e149 + + :setup: Standalone instance + + :steps: + 1. Generate a test ldif (50k entries) + 2. Import test ldif file using Online import. + 3. Import test ldif file using Offline import (ldif2db). + 4. Export test ldif file using Online export. + 5. Export test ldif file using Offline export (db2ldif). + 6. Cleanup - Import the Example LDIF for the other tests in this suite + + :expectedresults: + 1. Test ldif file creation should PASS. + 2. Online import should PASS. + 3. Offline import should PASS. + 4. Online export should PASS. + 5. Offline export should PASS. + 6. Cleanup should PASS. + """ + + log.info('Running test_basic_import_export...') + + # + # Test online/offline LDIF imports + # + topology_st.standalone.start() + + # Generate a test ldif (50k entries) + log.info("Generating LDIF...") + ldif_dir = topology_st.standalone.get_ldif_dir() + import_ldif = ldif_dir + '/basic_import.ldif' + dbgen(topology_st.standalone, 50000, import_ldif, DEFAULT_SUFFIX) + + # Online + log.info("Importing LDIF online...") + import_task = ImportTask(topology_st.standalone) + import_task.import_suffix_from_ldif(ldiffile=import_ldif, suffix=DEFAULT_SUFFIX) + + # Wait a bit till the task is created and available for searching + time.sleep(0.5) + + # Good as place as any to quick test the task has some expected attributes + if ds_is_newer('1.4.1.2'): + assert import_task.present('nstaskcreated') + assert import_task.present('nstasklog') + assert import_task.present('nstaskcurrentitem') + assert import_task.present('nstasktotalitems') + assert import_task.present('ttl') + + import_task.wait() + + # Offline + log.info("Importing LDIF offline...") + topology_st.standalone.stop() + if not topology_st.standalone.ldif2db(DEFAULT_BENAME, None, None, None, import_ldif): + log.fatal('test_basic_import_export: Offline import failed') + assert False + topology_st.standalone.start() + + # + # Test online and offline LDIF export + # + + # Online export + log.info("Exporting LDIF online...") + export_ldif = ldif_dir + '/export.ldif' + + export_task = ExportTask(topology_st.standalone) + export_task.export_suffix_to_ldif(ldiffile=export_ldif, suffix=DEFAULT_SUFFIX) + export_task.wait() + + # Offline export + log.info("Exporting LDIF offline...") + topology_st.standalone.stop() + if not topology_st.standalone.db2ldif(DEFAULT_BENAME, (DEFAULT_SUFFIX,), + None, None, None, export_ldif): + log.fatal('test_basic_import_export: Failed to run offline db2ldif') + assert False + + topology_st.standalone.start() + + # + # Cleanup - Import the Example LDIF for the other tests in this suite + # + log.info("Restore datrabase, import initial LDIF...") + ldif = '%s/dirsrv/data/Example.ldif' % topology_st.standalone.get_data_dir() + import_ldif = topology_st.standalone.get_ldif_dir() + "/Example.ldif" + shutil.copyfile(ldif, import_ldif) + + import_task = ImportTask(topology_st.standalone) + import_task.import_suffix_from_ldif(ldiffile=import_ldif, suffix=DEFAULT_SUFFIX) + import_task.wait() + + log.info('test_basic_import_export: PASSED') + + +def test_basic_backup(topology_st, import_example_ldif): + """Tests online and offline backup and restore + + :id: 0e9d91f8-8748-40b6-ab03-fbd1998eb985 + + :setup: Standalone instance and import example.ldif + + :steps: + 1. Test online backup using db2bak. + 2. Test online restore using bak2db. + 3. Test offline backup using db2bak. + 4. Test offline restore using bak2db. + + :expectedresults: + 1. Online backup should PASS. + 2. Online restore should PASS. + 3. Offline backup should PASS. + 4. Offline restore should PASS. + """ + + log.info('Running test_basic_backup...') + + backup_dir = topology_st.standalone.get_bak_dir() + '/backup_test' + + # Test online backup + try: + topology_st.standalone.tasks.db2bak(backup_dir=backup_dir, + args={TASK_WAIT: True}) + except ValueError: + log.fatal('test_basic_backup: Online backup failed') + assert False + + # Test online restore + try: + topology_st.standalone.tasks.bak2db(backup_dir=backup_dir, + args={TASK_WAIT: True}) + except ValueError: + log.fatal('test_basic_backup: Online restore failed') + assert False + + # Test offline backup + topology_st.standalone.stop() + if not topology_st.standalone.db2bak(backup_dir): + log.fatal('test_basic_backup: Offline backup failed') + assert False + + # Test offline restore + if not topology_st.standalone.bak2db(backup_dir): + log.fatal('test_basic_backup: Offline backup failed') + assert False + topology_st.standalone.start() + + log.info('test_basic_backup: PASSED') + + +def test_basic_db2index(topology_st, import_example_ldif): + """Assert db2index can operate correctly. + + :id: 191fc0fd-9722-46b5-a7c3-e8760effe119 + + :setup: Standalone instance + + :steps: + 1: call db2index + + :expectedresults: + 1: Index succeeds. + + """ + topology_st.standalone.stop() + topology_st.standalone.db2index() + topology_st.standalone.db2index(suffixes=[DEFAULT_SUFFIX], attrs=['uid']) + topology_st.standalone.start() + + +def test_basic_acl(topology_st, import_example_ldif): + """Run some basic access control (ACL) tests + + :id: 4f4e705f-32f4-4065-b3a8-2b0c2525798b + + :setup: Standalone instance + + :steps: + 1. Add two test users USER1_DN and USER2_DN. + 2. Add an aci that denies USER1 from doing anything. + 3. Set the default anonymous access for USER2. + 4. Try searching entries using USER1. + 5. Try searching entries using USER2. + 6. Try searching entries using root dn. + 7. Cleanup - delete test users and test ACI. + + :expectedresults: + 1. Test Users should be added. + 2. ACI should be added. + 3. This operation should PASS. + 4. USER1 should not be able to search anything. + 5. USER2 should be able to search everything except password. + 6. RootDN should be allowed to search everything. + 7. Cleanup should PASS. + """ + + """Run some basic access control(ACL) tests""" + log.info('Running test_basic_acl...') + + DENY_ACI = ensure_bytes('(targetattr = "*")(version 3.0;acl "deny user";deny (all)(userdn = "ldap:///%s");)' % USER1_DN) + + # + # Add two users + # + try: + topology_st.standalone.add_s(Entry((USER1_DN, + {'objectclass': "top extensibleObject".split(), + 'sn': '1', + 'cn': 'user 1', + 'uid': 'user1', + 'userpassword': PASSWORD}))) + except ldap.LDAPError as e: + log.fatal('test_basic_acl: Failed to add test user ' + USER1_DN + + ': error ' + e.args[0]['desc']) + assert False + + try: + topology_st.standalone.add_s(Entry((USER2_DN, + {'objectclass': "top extensibleObject".split(), + 'sn': '2', + 'cn': 'user 2', + 'uid': 'user2', + 'userpassword': PASSWORD}))) + except ldap.LDAPError as e: + log.fatal('test_basic_acl: Failed to add test user ' + USER1_DN + + ': error ' + e.args[0]['desc']) + assert False + + # + # Add an aci that denies USER1 from doing anything, + # and also set the default anonymous access + # + try: + topology_st.standalone.modify_s(DEFAULT_SUFFIX, [(ldap.MOD_ADD, 'aci', DENY_ACI)]) + except ldap.LDAPError as e: + log.fatal('test_basic_acl: Failed to add DENY ACI: error ' + e.args[0]['desc']) + assert False + + # + # Make sure USER1_DN can not search anything, but USER2_dn can... + # + try: + topology_st.standalone.simple_bind_s(USER1_DN, PASSWORD) + except ldap.LDAPError as e: + log.fatal('test_basic_acl: Failed to bind as user1, error: ' + e.args[0]['desc']) + assert False + + try: + entries = topology_st.standalone.search_s(DEFAULT_SUFFIX, + ldap.SCOPE_SUBTREE, + '(uid=*)') + if entries: + log.fatal('test_basic_acl: User1 was incorrectly able to search the suffix!') + assert False + except ldap.LDAPError as e: + log.fatal('test_basic_acl: Search suffix failed(as user1): ' + e.args[0]['desc']) + assert False + + # Now try user2... Also check that userpassword is stripped out + try: + topology_st.standalone.simple_bind_s(USER2_DN, PASSWORD) + except ldap.LDAPError as e: + log.fatal('test_basic_acl: Failed to bind as user2, error: ' + e.args[0]['desc']) + assert False + + try: + entries = topology_st.standalone.search_s(DEFAULT_SUFFIX, + ldap.SCOPE_SUBTREE, + '(uid=user1)') + if not entries: + log.fatal('test_basic_acl: User1 incorrectly not able to search the suffix') + assert False + if entries[0].hasAttr('userpassword'): + # The default anonymous access aci should have stripped out userpassword + log.fatal('test_basic_acl: User2 was incorrectly able to see userpassword') + assert False + except ldap.LDAPError as e: + log.fatal('test_basic_acl: Search for user1 failed(as user2): ' + e.args[0]['desc']) + assert False + + # Make sure RootDN can also search (this also resets the bind dn to the + # Root DN for future operations) + try: + topology_st.standalone.simple_bind_s(DN_DM, PW_DM) + except ldap.LDAPError as e: + log.fatal('test_basic_acl: Failed to bind as ROotDN, error: ' + e.args[0]['desc']) + assert False + + try: + entries = topology_st.standalone.search_s(DEFAULT_SUFFIX, + ldap.SCOPE_SUBTREE, + '(uid=*)') + if not entries: + log.fatal('test_basic_acl: Root DN incorrectly not able to search the suffix') + assert False + except ldap.LDAPError as e: + log.fatal('test_basic_acl: Search for user1 failed(as user2): ' + e.args[0]['desc']) + assert False + + # + # Cleanup + # + try: + topology_st.standalone.modify_s(DEFAULT_SUFFIX, [(ldap.MOD_DELETE, 'aci', DENY_ACI)]) + except ldap.LDAPError as e: + log.fatal('test_basic_acl: Failed to delete DENY ACI: error ' + e.args[0]['desc']) + assert False + + try: + topology_st.standalone.delete_s(USER1_DN) + except ldap.LDAPError as e: + log.fatal('test_basic_acl: Failed to delete test entry1: ' + e.args[0]['desc']) + assert False + + try: + topology_st.standalone.delete_s(USER2_DN) + except ldap.LDAPError as e: + log.fatal('test_basic_acl: Failed to delete test entry2: ' + e.args[0]['desc']) + assert False + + log.info('test_basic_acl: PASSED') + + +def test_basic_searches(topology_st, import_example_ldif): + """Tests basic search operations with filters. + + :id: 426a59ff-49b8-4a70-b377-0c0634a29b6f + + :setup: Standalone instance, add example.ldif to the database + + :steps: + 1. Execute search command while using different filters. + 2. Check number of entries returned by search filters. + + :expectedresults: + 1. Search command should PASS. + 2. Number of result entries returned should match number of the database entries according to the search filter. + """ + + log.info('Running test_basic_searches...') + + filters = (('(uid=scarter)', 1), + ('(uid=tmorris*)', 1), + ('(uid=*hunt*)', 4), + ('(uid=*cope)', 2), + ('(mail=*)', 150), + ('(roomnumber>=4000)', 35), + ('(roomnumber<=4000)', 115), + ('(&(roomnumber>=4000)(roomnumber<=4500))', 18), + ('(!(l=sunnyvale))', 120), + ('(&(uid=t*)(l=santa clara))', 7), + ('(|(uid=k*)(uid=r*))', 18), + ('(|(uid=t*)(l=sunnyvale))', 50), + ('(&(!(uid=r*))(ou=people))', 139), + ('(&(uid=m*)(l=sunnyvale)(ou=people)(mail=*example*)(roomNumber=*))', 3), + ('(&(|(uid=m*)(l=santa clara))(roomNumber=22*))', 5), + ('(&(|(uid=m*)(l=santa clara))(roomNumber=22*)(!(roomnumber=2254)))', 4),) + + for (search_filter, search_result) in filters: + try: + entries = topology_st.standalone.search_s(DEFAULT_SUFFIX, + ldap.SCOPE_SUBTREE, + search_filter) + if len(entries) != search_result: + log.fatal('test_basic_searches: An incorrect number of entries\ + was returned from filter (%s): (%d) expected (%d)' % + (search_filter, len(entries), search_result)) + assert False + except ldap.LDAPError as e: + log.fatal('Search failed: ' + e.args[0]['desc']) + assert False + + log.info('test_basic_searches: PASSED') + + +@pytest.fixture(scope="module") +def add_test_entry(topology_st, request): + # Add test entry + topology_st.standalone.add_s(Entry((USER4_DN, + {'objectclass': "top extensibleObject".split(), + 'cn': 'user1', 'uid': 'user1'}))) + + +search_params = [(['1.1'], 'cn', False), + (['1.1', 'cn'], 'cn', True), + (['+'], 'nsUniqueId', True), + (['*'], 'cn', True), + (['cn'], 'cn', True)] +@pytest.mark.skipif(ds_is_older("1.4.2.0"), reason="Not implemented") +@pytest.mark.parametrize("attrs, attr, present", search_params) +def test_search_req_attrs(topology_st, add_test_entry, attrs, attr, present): + """Test requested attributes in search operations. + + :id: 426a59ff-49b8-4a70-b377-0c0634a29b6e + :parametrized: yes + :setup: Standalone instance + :steps: + 1. Test "1.1" does not return any attributes. + 2. Test "1.1" is ignored if there are other requested attributes + 3. Test "+" returns all operational attributes + 4. Test "*" returns all attributes + 5. Test requested attributes + + :expectedresults: + 1. Success + 2. Success + 3. Success + 4. Success + 5. Success + """ + + log.info("Testing attrs: {} attr: {} present: {}".format(attrs, attr, present)) + entry = topology_st.standalone.search_s(USER4_DN, + ldap.SCOPE_BASE, + 'objectclass=top', + attrs) + if present: + assert entry[0].hasAttr(attr) + else: + assert not entry[0].hasAttr(attr) + + +def test_basic_referrals(topology_st, import_example_ldif): + """Test LDAP server in referral mode. + + :id: c586aede-7ac3-4e8d-a1cf-bfa8b8d78cc2 + + :setup: Standalone instance + + :steps: + 1. Set the referral and the backend state + 2. Set backend state to referral mode. + 3. Set server to not follow referral. + 4. Search using referral. + 5. Make sure server can restart in referral mode. + 6. Cleanup - Delete referral. + + :expectedresults: + 1. Set the referral, and the backend state should PASS. + 2. Set backend state to referral mode should PASS. + 3. Set server to not follow referral should PASS. + 4. referral error(10) should occur. + 5. Restart should PASS. + 6. Cleanup should PASS. + """ + + log.info('Running test_basic_referrals...') + SUFFIX_CONFIG = 'cn="dc=example,dc=com",cn=mapping tree,cn=config' + # + # Set the referral, and the backend state + # + try: + topology_st.standalone.modify_s(SUFFIX_CONFIG, + [(ldap.MOD_REPLACE, + 'nsslapd-referral', + b'ldap://localhost.localdomain:389/o%3dnetscaperoot')]) + except ldap.LDAPError as e: + log.fatal('test_basic_referrals: Failed to set referral: error ' + e.args[0]['desc']) + assert False + + try: + topology_st.standalone.modify_s(SUFFIX_CONFIG, [(ldap.MOD_REPLACE, + 'nsslapd-state', b'Referral')]) + except ldap.LDAPError as e: + log.fatal('test_basic_referrals: Failed to set backend state: error ' + + e.args[0]['desc']) + assert False + + # + # Test that a referral error is returned + # + topology_st.standalone.set_option(ldap.OPT_REFERRALS, 0) # Do not follow referral + try: + topology_st.standalone.search_s(DEFAULT_SUFFIX, ldap.SCOPE_SUBTREE, 'objectclass=top') + except ldap.REFERRAL: + pass + except ldap.LDAPError as e: + log.fatal('test_basic_referrals: Search failed: ' + e.args[0]['desc']) + assert False + + # + # Make sure server can restart in referral mode + # + topology_st.standalone.restart(timeout=10) + + # + # Cleanup + # + try: + topology_st.standalone.modify_s(SUFFIX_CONFIG, [(ldap.MOD_REPLACE, + 'nsslapd-state', b'Backend')]) + except ldap.LDAPError as e: + log.fatal('test_basic_referrals: Failed to set backend state: error ' + + e.args[0]['desc']) + assert False + + try: + topology_st.standalone.modify_s(SUFFIX_CONFIG, [(ldap.MOD_DELETE, + 'nsslapd-referral', None)]) + except ldap.LDAPError as e: + log.fatal('test_basic_referrals: Failed to delete referral: error ' + + e.args[0]['desc']) + assert False + topology_st.standalone.set_option(ldap.OPT_REFERRALS, 1) + + log.info('test_basic_referrals: PASSED') + + +def test_basic_systemctl(topology_st, import_example_ldif): + """Tests systemctl/lib389 can stop and start the server. + + :id: a92a7438-ecfa-4583-a89c-5fbfc0220b69 + + :setup: Standalone instance + + :steps: + 1. Stop the server. + 2. Start the server. + 3. Stop the server, break the dse.ldif and dse.ldif.bak, so a start fails. + 4. Verify that systemctl detects the failed start. + 5. Fix the dse.ldif, and make sure the server starts up. + 6. Verify systemctl correctly identifies the successful start. + + :expectedresults: + 1. Server should be stopped. + 2. Server should start + 3. Stop should work but start after breaking dse.ldif should fail. + 4. Systemctl should be able to detect the failed start. + 5. Server should start. + 6. Systemctl should be able to detect the successful start. + """ + + log.info('Running test_basic_systemctl...') + + config_dir = topology_st.standalone.get_config_dir() + + # + # Stop the server + # + log.info('Stopping the server...') + topology_st.standalone.stop() + log.info('Stopped the server.') + + # + # Start the server + # + log.info('Starting the server...') + topology_st.standalone.start() + log.info('Started the server.') + + # + # Stop the server, break the dse.ldif so a start fails, + # and verify that systemctl detects the failed start + # + log.info('Stopping the server...') + topology_st.standalone.stop() + log.info('Stopped the server before breaking the dse.ldif.') + + shutil.copy(config_dir + '/dse.ldif', config_dir + '/dse.ldif.correct') + open(config_dir + '/dse.ldif', 'w').close() + # We need to kill the .bak file too, DS is just too smart! + open(config_dir + '/dse.ldif.bak', 'w').close() + + log.info('Attempting to start the server with broken dse.ldif...') + try: + topology_st.standalone.start() + except Exception as e: + log.info('Server failed to start as expected: ' + str(e)) + log.info('Check the status...') + assert (not topology_st.standalone.status()) + log.info('Server failed to start as expected') + time.sleep(5) + + # + # Fix the dse.ldif, and make sure the server starts up, + # and systemctl correctly identifies the successful start + # + shutil.copy(config_dir + '/dse.ldif.correct', config_dir + '/dse.ldif') + log.info('Starting the server with good dse.ldif...') + topology_st.standalone.start() + log.info('Check the status...') + assert (topology_st.standalone.status()) + log.info('Server started after fixing dse.ldif.') + + log.info('test_basic_systemctl: PASSED') + + +def test_basic_ldapagent(topology_st, import_example_ldif): + """Tests that the ldap agent starts + + :id: da1d1846-8fc4-4b8c-8e53-4c9c16eff1ba + + :setup: Standalone instance + + :steps: + 1. Start SNMP ldap agent using command. + 2. Cleanup - Kill SNMP agent process. + + :expectedresults: + 1. SNMP agent should start. + 2. SNMP agent process should be successfully killed. + """ + + log.info('Running test_basic_ldapagent...') + + var_dir = topology_st.standalone.get_local_state_dir() + + config_file = os.path.join(topology_st.standalone.get_sysconf_dir(), 'dirsrv/config/agent.conf') + + agent_config_file = open(config_file, 'w') + agent_config_file.write('agentx-master ' + var_dir + '/agentx/master\n') + agent_config_file.write('agent-logdir ' + var_dir + '/log/dirsrv\n') + agent_config_file.write('server slapd-' + topology_st.standalone.serverid + '\n') + agent_config_file.close() + + # Remember, this is *forking* + check_output([os.path.join(topology_st.standalone.get_sbin_dir(), 'ldap-agent'), config_file]) + # First kill any previous agents .... + pidpath = os.path.join(var_dir, 'run/ldap-agent.pid') + pid = None + with open(pidpath, 'r') as pf: + pid = pf.readlines()[0].strip() + if pid: + log.debug('test_basic_ldapagent: Terminating agent %s', pid) + check_output(['kill', pid]) + + log.info('test_basic_ldapagent: PASSED') + + +@pytest.mark.skipif(not get_user_is_ds_owner(), + reason="process ownership permission is required") +def test_basic_dse_survives_kill9(topology_st, import_example_ldif): + """Tests that the dse.ldif is not wiped out after the process is killed (bug 910581) + + :id: 10f141da-9b22-443a-885c-87271dcd7a59 + + :setup: Standalone instance + + :steps: + 1. Check out pid of ns-slapd process and Kill ns-slapd process. + 2. Check the contents of dse.ldif file. + 3. Start server. + + :expectedresults: + 1. ns-slapd process should be killed. + 2. dse.ldif should not be corrupted. + 3. Server should start successfully. + """ + log.info('Running test_basic_dse...') + + dse_file = topology_st.standalone.confdir + '/dse.ldif' + pid = check_output(['pidof', '-s', 'ns-slapd']).strip() + # We can't guarantee we have access to sudo in any environment ... Either + # run py.test with sudo, or as the same user as the dirsrv. + check_output(['kill', '-9', ensure_str(pid)]) + if os.path.getsize(dse_file) == 0: + log.fatal('test_basic_dse: dse.ldif\'s content was incorrectly removed!') + assert False + + topology_st.standalone.start(timeout=60) + log.info('dse.ldif was not corrupted, and the server was restarted') + + log.info('test_basic_dse: PASSED') + # Give the server time to startup, in some conditions this can be racey without systemd notification. Only affects this one test though... + time.sleep(10) + + +@pytest.mark.parametrize("rootdse_attr_name", ROOTDSE_DEF_ATTR_LIST) +def test_def_rootdse_attr(topology_st, import_example_ldif, rootdse_attr_name): + """Tests that operational attributes are not returned by default in rootDSE searches + + :id: 4fee33cc-4019-4c27-89e8-998e6c770dc0 + :parametrized: yes + :setup: Standalone instance + + :steps: + 1. Make an ldapsearch for rootdse attribute + 2. Check the returned entries. + + :expectedresults: + 1. Search should not fail + 2. Operational attributes should not be returned. + """ + + topology_st.standalone.start() + + log.info(" Assert rootdse search hasn't %s attr" % rootdse_attr_name) + try: + entry = topology_st.standalone.search_s("", ldap.SCOPE_BASE)[0] + assert not entry.hasAttr(rootdse_attr_name) + + except ldap.LDAPError as e: + log.fatal('Search failed, error: ' + e.args[0]['desc']) + assert False + + +def test_mod_def_rootdse_attr(topology_st, import_example_ldif, rootdse_attr): + """Tests that operational attributes are returned by default in rootDSE searches after config modification + + :id: c7831e04-f458-4e23-83c7-b6f66109f639 + :parametrized: yes + :setup: Standalone instance and we are using rootdse_attr fixture which +adds nsslapd-return-default-opattr attr with value of one operation attribute. + + :steps: + 1. Make an ldapsearch for rootdse attribute + 2. Check the returned entries. + + :expectedresults: + 1. Search should not fail + 2. Operational attributes should be returned after the config modification + """ + + log.info(" Assert rootdse search has %s attr" % rootdse_attr) + try: + entry = topology_st.standalone.search_s("", ldap.SCOPE_BASE)[0] + assert entry.hasAttr(rootdse_attr) + + except ldap.LDAPError as e: + log.fatal('Search failed, error: ' + e.args[0]['desc']) + assert False + + +@pytest.fixture(scope="module") +def create_users(topology_st): + """Add users to the default suffix + """ + + users = UserAccounts(topology_st.standalone, DEFAULT_SUFFIX) + user_names = ["Directory", "Server", "389", "lib389", "pytest"] + + log.info('Adding 5 test users') + for name in user_names: + users.create(properties={ + 'uid': name, + 'sn': name, + 'cn': name, + 'uidNumber': '1000', + 'gidNumber': '1000', + 'homeDirectory': '/home/%s' % name, + 'mail': '%s@example.com' % name, + 'userpassword': 'pass%s' % name, + }) + + +def test_basic_anonymous_search(topology_st, create_users): + """Tests basic anonymous search operations + + :id: c7831e04-f458-4e50-83c7-b6f77109f639 + :setup: Standalone instance + Add 5 test users with different user names + :steps: + 1. Execute anonymous search with different filters + :expectedresults: + 1. Search should be successful + """ + + filters = ["uid=Directory", "(|(uid=S*)(uid=3*))", "(&(uid=l*)(mail=l*))", "(&(!(uid=D*))(ou=People))"] + log.info("Execute anonymous search with different filters") + for filtr in filters: + entries = topology_st.standalone.search_s(DEFAULT_SUFFIX, ldap.SCOPE_SUBTREE, filtr) + assert len(entries) != 0 + + +@pytest.mark.ds604 +@pytest.mark.bz915801 +def test_search_original_type(topology_st, create_users): + """Test ldapsearch returning original attributes + using nsslapd-search-return-original-type-switch + + :id: d7831d04-f558-4e50-93c7-b6f77109f640 + :setup: Standalone instance + Add some test entries + :steps: + 1. Set nsslapd-search-return-original-type-switch to ON + 2. Check that ldapsearch *does* return unknown attributes + 3. Turn off nsslapd-search-return-original-type-switch + 4. Check that ldapsearch doesn't return any unknown attributes + :expectedresults: + 1. nsslapd-search-return-original-type-switch should be set to ON + 2. ldapsearch should return unknown attributes + 3. nsslapd-search-return-original-type-switch should be OFF + 4. ldapsearch should not return any unknown attributes + """ + + log.info("Set nsslapd-search-return-original-type-switch to ON") + topology_st.standalone.config.set('nsslapd-search-return-original-type-switch', 'on') + + log.info("Check that ldapsearch *does* return unknown attributes") + entries = topology_st.standalone.search_s(DEFAULT_SUFFIX, ldap.SCOPE_SUBTREE, 'uid=Directory', + ['objectclass overflow', 'unknown']) + assert "objectclass overflow" in entries[0].getAttrs() + + log.info("Set nsslapd-search-return-original-type-switch to Off") + topology_st.standalone.config.set('nsslapd-search-return-original-type-switch', 'off') + log.info("Check that ldapsearch *does not* return unknown attributes") + entries = topology_st.standalone.search_s(DEFAULT_SUFFIX, ldap.SCOPE_SUBTREE, 'uid=Directory', + ['objectclass overflow', 'unknown']) + assert "objectclass overflow" not in entries[0].getAttrs() + + +@pytest.mark.bz192901 +def test_search_ou(topology_st): + """Test that DS should not return an entry that does not match the filter + + :id: d7831d05-f117-4e89-93c7-b6f77109f640 + :setup: Standalone instance + :steps: + 1. Create an OU entry without sub entries + 2. Search from the OU with the filter that does not match the OU + :expectedresults: + 1. Creation of OU should be successful + 2. Search should not return any results + """ + + log.info("Create a test OU without sub entries") + ou = OrganizationalUnits(topology_st.standalone, DEFAULT_SUFFIX) + ou.create(properties={ + 'ou': 'test_ou', + }) + + search_base = ("ou=test_ou,%s" % DEFAULT_SUFFIX) + log.info("Search from the OU with the filter that does not match the OU, it should not return anything") + entries = topology_st.standalone.search_s(search_base, ldap.SCOPE_SUBTREE, 'uid=*', ['dn']) + assert len(entries) == 0 + + +@pytest.mark.bz1044135 +@pytest.mark.ds47319 +def test_connection_buffer_size(topology_st): + """Test connection buffer size adjustable with different values(valid values and invalid) + + :id: e7831d05-f117-4ec9-1203-b6f77109f117 + :setup: Standalone instance + :steps: + 1. Set nsslapd-connection-buffer to some valid values (2, 0 , 1) + 2. Set nsslapd-connection-buffer to some invalid values (-1, a) + :expectedresults: + 1. This should pass + 2. This should fail + """ + + valid_values = ['2', '0', '1'] + for value in valid_values: + topology_st.standalone.config.replace('nsslapd-connection-buffer', value) + + invalid_values = ['-1', 'a'] + for value in invalid_values: + with pytest.raises(ldap.OPERATIONS_ERROR): + topology_st.standalone.config.replace('nsslapd-connection-buffer', value) + + +@pytest.mark.bz1637439 +def test_critical_msg_on_empty_range_idl(topology_st): + """Doing a range index lookup should not report a critical message even if IDL is empty + + :id: a07a2222-0551-44a6-b113-401d23799364 + :setup: Standalone instance + :steps: + 1. Create an index for internationalISDNNumber. (attribute chosen because it is + unlikely that previous tests used it) + 2. telephoneNumber being indexed by default create 20 users without telephoneNumber + 3. add a telephoneNumber value and delete it to trigger an empty index database + 4. Do a search that triggers a range lookup on empty telephoneNumber + 5. Check that the critical message is not logged in error logs + :expectedresults: + 1. This should pass + 2. This should pass + 3. This should pass + 4. This should pass on normal build but could abort a debug build + 4. This should pass + """ + indexedAttr = 'internationalISDNNumber' + + # Step 1 + from lib389.index import Indexes + + indexes = Indexes(topology_st.standalone) + indexes.create(properties={ + 'cn': indexedAttr, + 'nsSystemIndex': 'false', + 'nsIndexType': 'eq' + }) + topology_st.standalone.restart() + + # Step 2 + users = UserAccounts(topology_st.standalone, DEFAULT_SUFFIX) + log.info('Adding 20 users without "%s"' % indexedAttr) + for i in range(20): + name = 'user_%d' % i + last_user = users.create(properties={ + 'uid': name, + 'sn': name, + 'cn': name, + 'uidNumber': '1000', + 'gidNumber': '1000', + 'homeDirectory': '/home/%s' % name, + 'mail': '%s@example.com' % name, + 'userpassword': 'pass%s' % name, + }) + + # Step 3 + # required update to create the indexAttr (i.e. 'loginShell') database, and then make it empty + topology_st.standalone.modify_s(last_user.dn, [(ldap.MOD_ADD, indexedAttr, b'1234')]) + ent = topology_st.standalone.getEntry(last_user.dn, ldap.SCOPE_BASE,) + assert ent + assert ent.hasAttr(indexedAttr) + topology_st.standalone.modify_s(last_user.dn, [(ldap.MOD_DELETE, indexedAttr, None)]) + ent = topology_st.standalone.getEntry(last_user.dn, ldap.SCOPE_BASE,) + assert ent + assert not ent.hasAttr(indexedAttr) + + # Step 4 + # The first component being not indexed the range on second is evaluated + try: + ents = topology_st.standalone.search_s(DEFAULT_SUFFIX, ldap.SCOPE_SUBTREE, '(&(sudoNotAfter=*)(%s>=111))' % indexedAttr) + assert len(ents) == 0 + except ldap.SERVER_DOWN: + log.error('Likely testing against a debug version that asserted') + pass + + # Step 5 + assert not topology_st.standalone.searchErrorsLog('CRIT - list_candidates - NULL idl was recieved from filter_candidates_ext.') + + +@pytest.mark.bz1647099 +@pytest.mark.ds50026 +def test_ldbm_modification_audit_log(topology_st): + """When updating LDBM config attributes, those attributes/values are not listed + in the audit log + + :id: 5bf75c47-a283-430e-a65c-3c5fd8dbadb8 + :setup: Standalone Instance + :steps: + 1. Bind as DM + 2. Enable audit log + 3. Update a set of config attrs in LDBM config + 4. Restart the server + 5. Check that config attrs are listed in the audit log + :expectedresults: + 1. Operation successful + 2. Operation successful + 3. Operation successful + 4. Operation successful + 5. Audit log should contain modification of attrs" + """ + + VALUE = '10001' + + d_manager = DirectoryManager(topology_st.standalone) + conn = d_manager.bind() + config_ldbm = LDBMConfig(conn) + + log.info("Enable audit logging") + conn.config.enable_log('audit') + + attrs = ['nsslapd-lookthroughlimit', 'nsslapd-pagedidlistscanlimit', 'nsslapd-idlistscanlimit', 'nsslapd-db-locks'] + + for attr in attrs: + log.info("Set attribute %s to value %s" % (attr, VALUE)) + config_ldbm.set(attr, VALUE) + + log.info('Restart the server to flush the logs') + conn.restart() + + for attr in attrs: + log.info("Check if attribute %s is replaced in the audit log" % attr) + assert conn.searchAuditLog('replace: %s' % attr) + assert conn.searchAuditLog('%s: %s' % (attr, VALUE)) + + +@pytest.mark.skipif(not get_user_is_root() or not default_paths.perl_enabled or ds_is_older('1.4.0.0'), + reason="This test is only required if perl is enabled, and requires root.") +def test_dscreate(request): + """Test that dscreate works, we need this for now until setup-ds.pl is + fully discontinued. + + :id: 5bf75c47-a283-430e-a65c-3c5fd8dbadb9 + :setup: None + :steps: + 1. Create template file for dscreate + 2. Create instance using template file + :expectedresults: + 1. Should succeeds + 2. Should succeeds + """ + + template_file = "/tmp/dssetup.inf" + template_text = """[general] +config_version = 2 +# This invalid hostname ... +full_machine_name = localhost.localdomain +# Means we absolutely require this. +strict_host_checking = False +# In tests, we can be run in containers, NEVER trust +# that systemd is there, or functional in any capacity +systemd = False + +[slapd] +instance_name = test_dscreate +root_dn = cn=directory manager +root_password = someLongPassword_123 +# We do not have access to high ports in containers, +# so default to something higher. +port = 38999 +secure_port = 63699 + + +[backend-userroot] +suffix = dc=example,dc=com +sample_entries = yes +""" + + with open(template_file, "w") as template_fd: + template_fd.write(template_text) + + # Unset PYTHONPATH to avoid mixing old CLI tools and new lib389 + tmp_env = os.environ + if "PYTHONPATH" in tmp_env: + del tmp_env["PYTHONPATH"] + try: + subprocess.check_call([ + 'dscreate', + 'from-file', + template_file + ], env=tmp_env) + except subprocess.CalledProcessError as e: + log.fatal("dscreate failed! Error ({}) {}".format(e.returncode, e.output)) + assert False + + def fin(): + os.remove(template_file) + try: + subprocess.check_call(['dsctl', 'test_dscreate', 'remove', '--do-it']) + except subprocess.CalledProcessError as e: + log.fatal("Failed to remove test instance Error ({}) {}".format(e.returncode, e.output)) + + request.addfinalizer(fin) + + +@pytest.fixture(scope="function") +def dscreate_long_instance(request): + template_file = "/tmp/dssetup.inf" + longname_serverid = "test-longname-deadbeef-deadbeef-deadbeef-deadbeef-deadbeef" + template_text = """[general] +config_version = 2 +# This invalid hostname ... +full_machine_name = localhost.localdomain +# Means we absolutely require this. +strict_host_checking = False +# In tests, we can be run in containers, NEVER trust +# that systemd is there, or functional in any capacity +systemd = False + +[slapd] +instance_name = %s +root_dn = cn=directory manager +root_password = someLongPassword_123 +# We do not have access to high ports in containers, +# so default to something higher. +port = 38999 +secure_port = 63699 + + +[backend-userroot] +suffix = dc=example,dc=com +sample_entries = yes +""" % longname_serverid + + with open(template_file, "w") as template_fd: + template_fd.write(template_text) + + # Unset PYTHONPATH to avoid mixing old CLI tools and new lib389 + tmp_env = os.environ + if "PYTHONPATH" in tmp_env: + del tmp_env["PYTHONPATH"] + try: + subprocess.check_call([ + 'dscreate', + 'from-file', + template_file + ], env=tmp_env) + except subprocess.CalledProcessError as e: + log.fatal("dscreate failed! Error ({}) {}".format(e.returncode, e.output)) + assert False + + inst = DirSrv(verbose=True, external_log=log) + dse_ldif = DSEldif(inst, + serverid=longname_serverid) + + socket_path = dse_ldif.get("cn=config", "nsslapd-ldapifilepath") + inst.local_simple_allocate( + serverid=longname_serverid, + ldapuri=f"ldapi://{socket_path[0].replace('/', '%2f')}", + password="someLongPassword_123" + ) + inst.ldapi_enabled = 'on' + inst.ldapi_socket = socket_path + inst.ldapi_autobind = 'off' + try: + inst.open() + except: + log.fatal("Failed to connect via ldapi to %s instance" % longname_serverid) + os.remove(template_file) + try: + subprocess.check_call(['dsctl', longname_serverid, 'remove', '--do-it']) + except subprocess.CalledProcessError as e: + log.fatal("Failed to remove test instance Error ({}) {}".format(e.returncode, e.output)) + + def fin(): + os.remove(template_file) + try: + subprocess.check_call(['dsctl', longname_serverid, 'remove', '--do-it']) + except subprocess.CalledProcessError as e: + log.fatal("Failed to remove test instance Error ({}) {}".format(e.returncode, e.output)) + + request.addfinalizer(fin) + + return inst + + +@pytest.mark.skipif(not get_user_is_root() or not default_paths.perl_enabled or ds_is_older('1.4.2.0'), + reason="This test is only required with new admin cli, and requires root.") +@pytest.mark.bz1748016 +@pytest.mark.ds50581 +def test_dscreate_ldapi(dscreate_long_instance): + """Test that an instance with a long name can + handle ldapi connection using a long socket name + + :id: 5d72d955-aff8-4741-8c9a-32c1c707cf1f + :setup: None + :steps: + 1. create an instance with a long serverId name, that open a ldapi connection + 2. Connect with ldapi, that hit 50581 and crash the instance + :expectedresults: + 1. Should succeeds + 2. Should succeeds + """ + + root_dse = RootDSE(dscreate_long_instance) + log.info(root_dse.get_supported_ctrls()) + + +@pytest.mark.skipif(not get_user_is_root() or not default_paths.perl_enabled or ds_is_older('1.4.2.0'), + reason="This test is only required with new admin cli, and requires root.") +@pytest.mark.bz1715406 +@pytest.mark.ds50923 +def test_dscreate_multiple_dashes_name(dscreate_long_instance): + """Test that an instance with a multiple dashes in the name + can be removed with dsctl --remove-all + + :id: 265c3ac7-5ba6-4278-b8f4-4e7692afd1a5 + :setup: An instance with a few dashes in its name + :steps: + 1. Run 'dsctl --remove-all' command + 2. Check if the instance exists + :expectedresults: + 1. Should succeeds + 2. Instance doesn't exists + """ + + p = run(['dsctl', '--remove-all'], stdout=PIPE, input='Yes\n', encoding='ascii') + assert not dscreate_long_instance.exists() + + + +if __name__ == '__main__': + # Run isolated + # -s for DEBUG mode + CURRENT_FILE = os.path.realpath(__file__) + pytest.main("-s %s" % CURRENT_FILE) diff --git a/dirsrvtests/tests/suites/betxns/__init__.py b/dirsrvtests/tests/suites/betxns/__init__.py new file mode 100644 index 0000000..1c260e0 --- /dev/null +++ b/dirsrvtests/tests/suites/betxns/__init__.py @@ -0,0 +1,3 @@ +""" + :Requirement: 389-ds-base: betxn Plugin +""" diff --git a/dirsrvtests/tests/suites/betxns/betxn_test.py b/dirsrvtests/tests/suites/betxns/betxn_test.py new file mode 100644 index 0000000..ee160df --- /dev/null +++ b/dirsrvtests/tests/suites/betxns/betxn_test.py @@ -0,0 +1,344 @@ +# --- BEGIN COPYRIGHT BLOCK --- +# Copyright (C) 2016 Red Hat, Inc. +# All rights reserved. +# +# License: GPL (version 3 or any later version). +# See LICENSE for details. +# --- END COPYRIGHT BLOCK --- +# +import pytest +import ldap +from lib389.tasks import * +from lib389.utils import * +from lib389.topologies import topology_st +from lib389.plugins import (SevenBitCheckPlugin, AttributeUniquenessPlugin, + MemberOfPlugin, ManagedEntriesPlugin, + ReferentialIntegrityPlugin, MEPTemplates, + MEPConfigs) +from lib389.idm.user import UserAccounts, TEST_USER_PROPERTIES +from lib389.idm.organizationalunit import OrganizationalUnits +from lib389.idm.group import Groups, Group +from lib389._constants import DEFAULT_SUFFIX + +pytestmark = pytest.mark.tier1 + +logging.getLogger(__name__).setLevel(logging.DEBUG) +log = logging.getLogger(__name__) + + +def test_betxt_7bit(topology_st): + """Test that the 7-bit plugin correctly rejects an invalid update + + :id: 9e2ab27b-eda9-4cd9-9968-a1a8513210fd + + :setup: Standalone instance and enabled dynamic plugins + + :steps: 1. Enable PLUGIN_7_BIT_CHECK to "ON" + 2. Add test user + 3. Try to Modify test user's RDN to have 8 bit RDN + 4. Execute search operation for new 8 bit RDN + 5. Remove the test user for cleanup + + :expectedresults: + 1. PLUGIN_7_BIT_CHECK should be ON + 2. Test users should be added + 3. Modify RDN for test user should FAIL + 4. Search operation should FAIL + 5. Test user should be removed + """ + + log.info('Running test_betxt_7bit...') + + BAD_RDN = u'uid=Fu\u00c4\u00e8' + + sevenbc = SevenBitCheckPlugin(topology_st.standalone) + sevenbc.enable() + topology_st.standalone.restart() + + users = UserAccounts(topology_st.standalone, basedn=DEFAULT_SUFFIX) + user = users.create(properties=TEST_USER_PROPERTIES) + + # Attempt a modrdn, this should fail + with pytest.raises(ldap.LDAPError): + user.rename(BAD_RDN) + + # Make sure the operation did not succeed, attempt to search for the new RDN + with pytest.raises(ldap.LDAPError): + users.get(u'Fu\u00c4\u00e8') + + # Make sure original entry is present + user_check = users.get("testuser") + assert user_check.dn.lower() == user.dn.lower() + + # Cleanup - remove the user + user.delete() + + log.info('test_betxt_7bit: PASSED') + + +def test_betxn_attr_uniqueness(topology_st): + """Test that we can not add two entries that have the same attr value that is + defined by the plugin + + :id: 42aeb41c-fbb5-4bc6-a97b-56274034d29f + + :setup: Standalone instance and enabled dynamic plugins + + :steps: 1. Enable PLUGIN_ATTR_UNIQUENESS plugin as "ON" + 2. Add a test user + 3. Add another test user having duplicate uid as previous one + 4. Cleanup - disable PLUGIN_ATTR_UNIQUENESS plugin as "OFF" + 5. Cleanup - remove test user entry + + :expectedresults: + 1. PLUGIN_ATTR_UNIQUENESS plugin should be ON + 2. Test user should be added + 3. Add operation should FAIL + 4. PLUGIN_ATTR_UNIQUENESS plugin should be "OFF" + 5. Test user entry should be removed + """ + + attruniq = AttributeUniquenessPlugin(topology_st.standalone) + attruniq.enable() + topology_st.standalone.restart() + + users = UserAccounts(topology_st.standalone, basedn=DEFAULT_SUFFIX) + user1 = users.create(properties={ + 'uid': 'testuser1', + 'cn': 'testuser1', + 'sn': 'user1', + 'uidNumber': '1001', + 'gidNumber': '2001', + 'homeDirectory': '/home/testuser1' + }) + + with pytest.raises(ldap.LDAPError): + users.create(properties={ + 'uid': ['testuser2', 'testuser1'], + 'cn': 'testuser2', + 'sn': 'user2', + 'uidNumber': '1002', + 'gidNumber': '2002', + 'homeDirectory': '/home/testuser2' + }) + + user1.delete() + + log.info('test_betxn_attr_uniqueness: PASSED') + + +def test_betxn_memberof(topology_st): + """Test PLUGIN_MEMBER_OF plugin + + :id: 70d0b96e-b693-4bf7-bbf5-102a66ac5993 + + :setup: Standalone instance and enabled dynamic plugins + + :steps: 1. Enable and configure memberOf plugin + 2. Set memberofgroupattr="member" and memberofAutoAddOC="referral" + 3. Add two test groups - group1 and group2 + 4. Add group2 to group1 + 5. Add group1 to group2 + + :expectedresults: + 1. memberOf plugin plugin should be ON + 2. Set memberofgroupattr="member" and memberofAutoAddOC="referral" should PASS + 3. Add operation should PASS + 4. Add operation should FAIL + 5. Add operation should FAIL + """ + + memberof = MemberOfPlugin(topology_st.standalone) + memberof.enable() + memberof.set_autoaddoc('referral') + topology_st.standalone.restart() + + groups = Groups(topology_st.standalone, DEFAULT_SUFFIX) + group1 = groups.create(properties={'cn': 'group1'}) + group2 = groups.create(properties={'cn': 'group2'}) + + # We may need to mod groups to not have nsMemberOf ... ? + if not ds_is_older('1.3.7'): + group1.remove('objectClass', 'nsMemberOf') + group2.remove('objectClass', 'nsMemberOf') + + # Add group2 to group1 - it should fail with objectclass violation + with pytest.raises(ldap.OBJECT_CLASS_VIOLATION): + group1.add_member(group2.dn) + + # verify entry cache reflects the current/correct state of group1 + assert not group1.is_member(group2.dn) + + # Done + log.info('test_betxn_memberof: PASSED') + + +def test_betxn_modrdn_memberof_cache_corruption(topology_st): + """Test modrdn operations and memberOf be txn post op failures + + :id: 70d0b96e-b693-4bf7-bbf5-102a66ac5994 + + :setup: Standalone instance + + :steps: 1. Enable and configure memberOf plugin + 2. Set memberofgroupattr="member" and memberofAutoAddOC="nsContainer" + 3. Create group and user outside of memberOf plugin scope + 4. Do modrdn to move group into scope + 5. Do modrdn to move group into scope (again) + + :expectedresults: + 1. memberOf plugin plugin should be ON + 2. Set memberofgroupattr="member" and memberofAutoAddOC="nsContainer" should PASS + 3. Creating group and user should PASS + 4. Modrdn should fail with objectclass violation + 5. Second modrdn should also fail with objectclass violation + """ + + peoplebase = 'ou=people,%s' % DEFAULT_SUFFIX + memberof = MemberOfPlugin(topology_st.standalone) + memberof.enable() + memberof.set_autoaddoc('nsContainer') # Bad OC + memberof.set('memberOfEntryScope', peoplebase) + memberof.set('memberOfAllBackends', 'on') + topology_st.standalone.restart() + + groups = Groups(topology_st.standalone, DEFAULT_SUFFIX) + group = groups.create(properties={ + 'cn': 'group', + }) + + # Create user and add it to group + users = UserAccounts(topology_st.standalone, basedn=DEFAULT_SUFFIX) + user = users.ensure_state(properties=TEST_USER_PROPERTIES) + if not ds_is_older('1.3.7'): + user.remove('objectClass', 'nsMemberOf') + + group.add_member(user.dn) + + # Attempt modrdn that should fail, but the original entry should stay in the cache + with pytest.raises(ldap.OBJECT_CLASS_VIOLATION): + group.rename('cn=group_to_people', newsuperior=peoplebase) + + # Should fail, but not with NO_SUCH_OBJECT as the original entry should still be in the cache + with pytest.raises(ldap.OBJECT_CLASS_VIOLATION): + group.rename('cn=group_to_people', newsuperior=peoplebase) + + # Done + log.info('test_betxn_modrdn_memberof: PASSED') + + +def test_ri_and_mep_cache_corruption(topology_st): + """Test RI plugin aborts change after MEP plugin fails. + This is really testing the entry cache for corruption + + :id: 70d0b96e-b693-4bf7-bbf5-102a66ac5995 + + :setup: Standalone instance + + :steps: 1. Enable and configure mep and ri plugins + 2. Add user and add it to a group + 3. Disable MEP plugin and remove MEP group + 4. Delete user + 5. Check that user is still a member of the group + + :expectedresults: + 1. Success + 2. Success + 3. Success + 4. It fails with NO_SUCH_OBJECT + 5. Success + + """ + # Start plugins + topology_st.standalone.config.set('nsslapd-dynamic-plugins', 'on') + mep_plugin = ManagedEntriesPlugin(topology_st.standalone) + mep_plugin.enable() + ri_plugin = ReferentialIntegrityPlugin(topology_st.standalone) + ri_plugin.enable() + + # Add our org units + ous = OrganizationalUnits(topology_st.standalone, DEFAULT_SUFFIX) + ou_people = ous.create(properties={'ou': 'managed_people'}) + ou_groups = ous.create(properties={'ou': 'managed_groups'}) + + # Configure MEP + mep_templates = MEPTemplates(topology_st.standalone, DEFAULT_SUFFIX) + mep_template1 = mep_templates.create(properties={ + 'cn': 'MEP template', + 'mepRDNAttr': 'cn', + 'mepStaticAttr': 'objectclass: posixGroup|objectclass: extensibleObject'.split('|'), + 'mepMappedAttr': 'cn: $cn|uid: $cn|gidNumber: $uidNumber'.split('|') + }) + mep_configs = MEPConfigs(topology_st.standalone) + mep_configs.create(properties={'cn': 'config', + 'originScope': ou_people.dn, + 'originFilter': 'objectclass=posixAccount', + 'managedBase': ou_groups.dn, + 'managedTemplate': mep_template1.dn}) + + # Add an entry that meets the MEP scope + users = UserAccounts(topology_st.standalone, DEFAULT_SUFFIX, + rdn='ou={}'.format(ou_people.rdn)) + user = users.create(properties={ + 'uid': 'test-user1', + 'cn': 'test-user', + 'sn': 'test-user', + 'uidNumber': '10011', + 'gidNumber': '20011', + 'homeDirectory': '/home/test-user1' + }) + + # Add group + groups = Groups(topology_st.standalone, DEFAULT_SUFFIX) + user_group = groups.ensure_state(properties={'cn': 'group', 'member': user.dn}) + + # Check if a managed group entry was created + mep_group = Group(topology_st.standalone, dn='cn={},{}'.format(user.rdn, ou_groups.dn)) + if not mep_group.exists(): + log.fatal("MEP group was not created for the user") + assert False + + # Test MEP be txn pre op failure does not corrupt entry cache + # Should get the same exception for both rename attempts + with pytest.raises(ldap.UNWILLING_TO_PERFORM): + mep_group.rename("cn=modrdn group") + + with pytest.raises(ldap.UNWILLING_TO_PERFORM): + mep_group.rename("cn=modrdn group") + + # Mess with MEP so it fails + mep_plugin.disable() + mep_group.delete() + mep_plugin.enable() + + # Add another group to verify entry cache is not corrupted + test_group = groups.create(properties={'cn': 'test_group'}) + + # Delete user, should fail in MEP be txn post op, and user should still be a member + with pytest.raises(ldap.NO_SUCH_OBJECT): + user.delete() + + # Verify membership is intact + if not user_group.is_member(user.dn): + log.fatal("Member was incorrectly removed from the group!! Or so it seems") + + # Restart server and test again in case this was a cache issue + topology_st.standalone.restart() + if user_group.is_member(user.dn): + log.info("The entry cache was corrupted") + assert False + + assert False + + # Verify test group is still found in entry cache by deleting it + test_group.delete() + + # Success + log.info("Test PASSED") + + +if __name__ == '__main__': + # Run isolated + # -s for DEBUG mode + CURRENT_FILE = os.path.realpath(__file__) + pytest.main("-s %s" % CURRENT_FILE) diff --git a/dirsrvtests/tests/suites/clu/__init__.py b/dirsrvtests/tests/suites/clu/__init__.py new file mode 100644 index 0000000..bff658e --- /dev/null +++ b/dirsrvtests/tests/suites/clu/__init__.py @@ -0,0 +1,3 @@ +""" + :Requirement: 389-ds-base: Command Line Utility +""" diff --git a/dirsrvtests/tests/suites/clu/clu_test.py b/dirsrvtests/tests/suites/clu/clu_test.py new file mode 100644 index 0000000..fec6915 --- /dev/null +++ b/dirsrvtests/tests/suites/clu/clu_test.py @@ -0,0 +1,95 @@ +# --- BEGIN COPYRIGHT BLOCK --- +# Copyright (C) 2016 Red Hat, Inc. +# All rights reserved. +# +# License: GPL (version 3 or any later version). +# See LICENSE for details. +# --- END COPYRIGHT BLOCK --- +# +import time +import subprocess +import pytest +from lib389.tasks import * +from lib389.utils import * +from lib389.topologies import topology_st + +pytestmark = pytest.mark.tier0 + +logging.getLogger(__name__).setLevel(logging.DEBUG) +log = logging.getLogger(__name__) + + +def test_clu_pwdhash(topology_st): + """Test the pwdhash script output and encrypted password length + + :id: faaafd01-6748-4451-9d2b-f3bd47902447 + + :setup: Standalone instance + + :steps: + 1. Execute /usr/bin/pwdhash -s ssha testpassword command from command line + 2. Check if there is any output + 3. Check the length of the generated output + + :expectedresults: + 1. Execution should PASS + 2. There should be an output from the command + 3. Output length should not be less than 20 + """ + + log.info('Running test_clu_pwdhash...') + + cmd = '%s -s ssha testpassword' % os.path.join(topology_st.standalone.get_bin_dir(), 'pwdhash') + p = os.popen(cmd) + result = p.readline() + p.close() + + if not result: + log.fatal('test_clu_pwdhash: Failed to run pwdhash') + assert False + + if len(result) < 20: + log.fatal('test_clu_pwdhash: Encrypted password is too short') + assert False + log.info('pwdhash generated: ' + result) + log.info('test_clu_pwdhash: PASSED') + + +def test_clu_pwdhash_mod(topology_st): + """Test the pwdhash script output with -D configdir + + :id: 874ab5e2-207b-4a95-b4c0-22d97b8ab643 + + :setup: Standalone instance + + :steps: + 1. Set nsslapd-rootpwstoragescheme & passwordStorageScheme to SSHA256 & SSHA384 respectively + 2. Execute /usr/bin/pwdhash -D /etc/dirsrv/slapd-instance_name/ + 3. Check if there is any output + 4. Check if the command returns the hashed string using the algorithm set in nsslapd-rootpwstoragescheme + + :expectedresults: + 1. nsslapd-rootpwstoragescheme & passwordStorageScheme should set to SSHA256 & SSHA384 respectively + 2. Execution should PASS + 3. There should be an output from the command + 4. Command should return the hashed string using the algorithm set in nsslapd-rootpwstoragescheme + """ + + log.info('Running test_clu_pwdhash_mod...') + topology_st.standalone.config.set('nsslapd-rootpwstoragescheme', 'SSHA256') + topology_st.standalone.config.set('passwordStorageScheme', 'SSHA384') + cmd = [os.path.join(topology_st.standalone.get_bin_dir(), 'pwdhash'), '-D', '/etc/dirsrv/slapd-standalone1', + 'password'] + result = subprocess.check_output(cmd) + stdout = ensure_str(result) + assert result, "Failed to run pwdhash" + assert 'SSHA256' in stdout + log.info('pwdhash generated: ' + stdout) + log.info('returned the hashed string using the algorithm set in nsslapd-rootpwstoragescheme') + + +if __name__ == '__main__': + # Run isolated + # -s for DEBUG mode + CURRENT_FILE = os.path.realpath(__file__) + pytest.main("-s %s" % CURRENT_FILE) diff --git a/dirsrvtests/tests/suites/config/__init__.py b/dirsrvtests/tests/suites/config/__init__.py new file mode 100644 index 0000000..bda0655 --- /dev/null +++ b/dirsrvtests/tests/suites/config/__init__.py @@ -0,0 +1,3 @@ +""" + :Requirement: 389-ds-base: Directory Server Configurations +""" diff --git a/dirsrvtests/tests/suites/config/autotuning_test.py b/dirsrvtests/tests/suites/config/autotuning_test.py new file mode 100644 index 0000000..d1c7514 --- /dev/null +++ b/dirsrvtests/tests/suites/config/autotuning_test.py @@ -0,0 +1,335 @@ +# --- BEGIN COPYRIGHT BLOCK --- +# Copyright (C) 2017 Red Hat, Inc. +# All rights reserved. +# +# License: GPL (version 3 or any later version). +# See LICENSE for details. +# --- END COPYRIGHT BLOCK --- +# +import pytest +from lib389._mapped_object import DSLdapObject +from lib389.utils import * +from lib389.topologies import topology_st as topo + +from lib389._constants import DN_CONFIG_LDBM, DN_CONFIG_LDBM_BDB, DN_USERROOT_LDBM, DEFAULT_SUFFIX + +pytestmark = pytest.mark.tier0 + +DEBUGGING = os.getenv("DEBUGGING", default=False) +if DEBUGGING: + logging.getLogger(__name__).setLevel(logging.DEBUG) +else: + logging.getLogger(__name__).setLevel(logging.INFO) +log = logging.getLogger(__name__) + + +def test_threads_basic(topo): + """Check that a number of threads are able to be autotuned + + :id: 371fb9c4-9607-4a4b-a4a2-6f00809d6257 + :setup: Standalone instance + :steps: + 1. Set nsslapd-threadnumber to -1 + 2. Check that number of threads is positive + :expectedresults: + 1. nsslapd-threadnumber should be successfully set + 2. nsslapd-threadnumber is positive + """ + + log.info("Set nsslapd-threadnumber: -1 to enable autotuning") + topo.standalone.config.set("nsslapd-threadnumber", "-1") + + log.info("Assert nsslapd-threadnumber is equal to the documented expected value") + assert topo.standalone.config.get_attr_val_int("nsslapd-threadnumber") > 0 + + +@pytest.mark.parametrize("invalid_value", ('-2', '0', 'invalid')) +def test_threads_invalid_value(topo, invalid_value): + """Check nsslapd-threadnumber for an invalid values + + :id: 1979eddf-8222-4c9d-809d-269c26de636e + :parametrized: yes + :setup: Standalone instance + :steps: + 1. Set nsslapd-threadnumber to -2, 0, invalid_str + :expectedresults: + 1. The operation should fail + """ + + log.info("Set nsslapd-threadnumber: {}. Operation should fail".format(invalid_value)) + with pytest.raises(ldap.OPERATIONS_ERROR): + topo.standalone.config.set("nsslapd-threadnumber", invalid_value) + + +def test_threads_back_from_manual_value(topo): + """Check that thread autotuning works after manual tuning + + :id: 4b674016-e5ca-426b-a9c0-a94745a7dd25 + :setup: Standalone instance + :steps: + 1. Set nsslapd-threadnumber to -1 and save the autotuned value + 2. Decrease nsslapd-threadnumber by 2 + 3. Set nsslapd-threadnumber to -1 + 4. Check that nsslapd-threadnumber is back to autotuned value + :expectedresults: + 1. nsslapd-threadnumber should be successfully set + 2. nsslapd-threadnumber should be successfully decreased + 3. nsslapd-threadnumber should be successfully set + 4. nsslapd-threadnumber is set back to the autotuned value + """ + + log.info("Set nsslapd-threadnumber: -1 to enable autotuning and save the new value") + topo.standalone.config.set("nsslapd-threadnumber", "-1") + autotuned_value = topo.standalone.config.get_attr_val_utf8("nsslapd-threadnumber") + + log.info("Set nsslapd-threadnumber to the autotuned value decreased by 2") + new_value = str(int(autotuned_value) - 2) + topo.standalone.config.set("nsslapd-threadnumber", new_value) + assert topo.standalone.config.get_attr_val_utf8("nsslapd-threadnumber") == new_value + + log.info("Set nsslapd-threadnumber: -1 to enable autotuning") + topo.standalone.config.set("nsslapd-threadnumber", "-1") + + log.info("Assert nsslapd-threadnumber is back to the autotuned value") + assert topo.standalone.config.get_attr_val_utf8("nsslapd-threadnumber") == autotuned_value + + +@pytest.mark.parametrize("autosize,autosize_split", (('', ''), ('', '0'), ('10', '40'), ('', '40'), + ('10', ''), ('10', '40'), ('10', '0'))) +def test_cache_autosize_non_zero(topo, autosize, autosize_split): + """Check that autosizing works works properly in different combinations + + :id: 83fa099c-a6c9-457a-82db-0982b67e8598 + :parametrized: yes + :setup: Standalone instance + :steps: + 1. Set in the cn=config,cn=ldbm database,cn=plugins,cn=config: + nsslapd-cache-autosize, nsslapd-cache-autosize-split to the next value pairs: + ('', ''), ('', '0'), ('10', '40'), ('', '40'), + ('10', ''), ('10', '40'), ('10', '0') + '' - for deleting the value (set to default) + 2. Try to modify nsslapd-dbcachesize and nsslapd-cachememsize to + some real value, it should be rejected + 3. Restart the instance + 4. Check nsslapd-dbcachesize and nsslapd-cachememsize + :expectedresults: + 1. nsslapd-cache-autosize, nsslapd-cache-autosize-split are successfully set + 2. Modify operation should be rejected + 3. The instance should be successfully restarted + 4. nsslapd-dbcachesize and nsslapd-cachememsize should set + to value greater than 512KB + """ + + config_ldbm = DSLdapObject(topo.standalone, DN_CONFIG_LDBM) + bdb_config_ldbm = DSLdapObject(topo.standalone, DN_CONFIG_LDBM_BDB) + userroot_ldbm = DSLdapObject(topo.standalone, DN_USERROOT_LDBM) + + cachesize = '33333333' + + if ds_is_older('1.4.2'): + dbcachesize_val = config_ldbm.get_attr_val('nsslapd-dbcachesize') + autosize_val = config_ldbm.get_attr_val('nsslapd-cache-autosize') + autosize_split_val = config_ldbm.get_attr_val('nsslapd-cache-autosize-split') + else: + dbcachesize_val = bdb_config_ldbm.get_attr_val('nsslapd-dbcachesize') + autosize_val = bdb_config_ldbm.get_attr_val('nsslapd-cache-autosize') + autosize_split_val = bdb_config_ldbm.get_attr_val('nsslapd-cache-autosize-split') + + cachenensize_val = userroot_ldbm.get_attr_val('nsslapd-cachememsize') + dncachenensize_val = userroot_ldbm.get_attr_val('nsslapd-dncachememsize') + + log.info("Check nsslapd-dbcachesize and nsslapd-cachememsize before the test") + log.info("nsslapd-dbcachesize == {}".format(dbcachesize_val)) + log.info("nsslapd-cachememsize == {}".format(cachenensize_val)) + log.info("nsslapd-dncachememsize == {}".format(dncachenensize_val)) + log.info("nsslapd-cache-autosize == {}".format(autosize_val)) + log.info("nsslapd-cache-autosize-split == {}".format(autosize_split_val)) + + if autosize: + log.info("Set nsslapd-cache-autosize to {}".format(autosize)) + config_ldbm.set('nsslapd-cache-autosize', autosize) + else: + log.info("Delete nsslapd-cache-autosize") + try: + config_ldbm.remove('nsslapd-cache-autosize', autosize_val) + except ValueError: + log.info("nsslapd-cache-autosize wasn't found") + + if autosize_split: + log.info("Set nsslapd-cache-autosize-split to {}".format(autosize_split)) + config_ldbm.set('nsslapd-cache-autosize-split', autosize_split) + else: + log.info("Delete nsslapd-cache-autosize-split") + try: + config_ldbm.remove('nsslapd-cache-autosize-split', autosize_split_val) + except ValueError: + log.info("nsslapd-cache-autosize-split wasn't found") + + log.info("Trying to set nsslapd-cachememsize to {}".format(cachesize)) + with pytest.raises(ldap.UNWILLING_TO_PERFORM): + userroot_ldbm.set('nsslapd-cachememsize', cachesize) + log.info("Trying to set nsslapd-dbcachesize to {}".format(cachesize)) + with pytest.raises(ldap.UNWILLING_TO_PERFORM): + config_ldbm.set('nsslapd-dbcachesize ', cachesize) + topo.standalone.restart() + + if ds_is_older('1.4.2'): + dbcachesize_val = config_ldbm.get_attr_val('nsslapd-dbcachesize') + autosize_val = config_ldbm.get_attr_val('nsslapd-cache-autosize') + autosize_split_val = config_ldbm.get_attr_val('nsslapd-cache-autosize-split') + else: + dbcachesize_val = bdb_config_ldbm.get_attr_val('nsslapd-dbcachesize') + autosize_val = bdb_config_ldbm.get_attr_val('nsslapd-cache-autosize') + autosize_split_val = bdb_config_ldbm.get_attr_val('nsslapd-cache-autosize-split') + + cachenensize_val = userroot_ldbm.get_attr_val('nsslapd-cachememsize') + dncachenensize_val = userroot_ldbm.get_attr_val('nsslapd-dncachememsize') + + log.info("Check nsslapd-dbcachesize and nsslapd-cachememsize in the appropriate range.") + log.info("nsslapd-dbcachesize == {}".format(dbcachesize_val)) + log.info("nsslapd-cachememsize == {}".format(cachenensize_val)) + log.info("nsslapd-dncachememsize == {}".format(dncachenensize_val)) + log.info("nsslapd-cache-autosize == {}".format(autosize_val)) + log.info("nsslapd-cache-autosize-split == {}".format(autosize_split_val)) + assert int(dbcachesize_val) >= 512000 + assert int(cachenensize_val) >= 512000 + assert int(dncachenensize_val) >= 512000 + + +@pytest.mark.parametrize("autosize_split", ('0', '', '40')) +def test_cache_autosize_basic_sane(topo, autosize_split): + """Check that autotuning cachesizes works properly with different values + + :id: 9dc363ef-f551-446d-8b83-8ac45dabb8df + :parametrized: yes + :setup: Standalone instance + :steps: + 1. Set in the cn=config,cn=ldbm database,cn=plugins,cn=config: + nsslapd-cache-autosize, nsslapd-cache-autosize-split to the next value pairs: + ('0', '0'), ('0', ''), ('0', '40') + '' - for deleting the value (set to default) + 2. Set in the cn=config,cn=ldbm database,cn=plugins,cn=config: + nsslapd-dbcachesize: 0 and some same value + 3. Set in the cn=UserRoot,cn=ldbm database,cn=plugins,cn=config: + nsslapd-cachememsize: 0 and some same value + 4. Restart the instance + 5. Check nsslapd-dbcachesize and nsslapd-cachememsize + :expectedresults: + 1. nsslapd-cache-autosize, nsslapd-cache-autosize-split are successfully set + 2. nsslapd-dbcachesize are successfully set + 3. nsslapd-cachememsize are successfully set + 4. The instance should be successfully restarted + 5. nsslapd-dbcachesize and nsslapd-cachememsize should set + to value greater than 512KB + """ + + config_ldbm = DSLdapObject(topo.standalone, DN_CONFIG_LDBM) + bdb_config_ldbm = DSLdapObject(topo.standalone, DN_CONFIG_LDBM_BDB) + userroot_ldbm = DSLdapObject(topo.standalone, DN_USERROOT_LDBM) + config_ldbm.set('nsslapd-cache-autosize', '0') + + # Test with caches with both real values and 0 + for cachesize in ('0', '33333333'): + if ds_is_older('1.4.2'): + dbcachesize_val = config_ldbm.get_attr_val('nsslapd-dbcachesize') + autosize_val = config_ldbm.get_attr_val('nsslapd-cache-autosize') + autosize_split_val = config_ldbm.get_attr_val('nsslapd-cache-autosize-split') + else: + dbcachesize_val = bdb_config_ldbm.get_attr_val('nsslapd-dbcachesize') + autosize_val = bdb_config_ldbm.get_attr_val('nsslapd-cache-autosize') + autosize_split_val = bdb_config_ldbm.get_attr_val('nsslapd-cache-autosize-split') + + cachenensize_val = userroot_ldbm.get_attr_val('nsslapd-cachememsize') + dncachenensize_val = userroot_ldbm.get_attr_val('nsslapd-dncachememsize') + + log.info("Check nsslapd-dbcachesize and nsslapd-cachememsize before the test") + log.info("nsslapd-dbcachesize == {}".format(dbcachesize_val)) + log.info("nsslapd-cachememsize == {}".format(cachenensize_val)) + log.info("nsslapd-cache-autosize == {}".format(autosize_val)) + log.info("nsslapd-cache-autosize-split == {}".format(autosize_split_val)) + + if autosize_split: + log.info("Set nsslapd-cache-autosize-split to {}".format(autosize_split)) + config_ldbm.set('nsslapd-cache-autosize-split', autosize_split) + else: + log.info("Delete nsslapd-cache-autosize-split") + try: + config_ldbm.remove('nsslapd-cache-autosize-split', autosize_split_val) + except ValueError: + log.info("nsslapd-cache-autosize-split wasn't found") + + log.info("Set nsslapd-dbcachesize to {}".format(cachesize)) + config_ldbm.set('nsslapd-dbcachesize', cachesize) + log.info("Set nsslapd-cachememsize to {}".format(cachesize)) + userroot_ldbm.set('nsslapd-cachememsize', cachesize) + topo.standalone.restart() + + if ds_is_older('1.4.2'): + dbcachesize_val = config_ldbm.get_attr_val('nsslapd-dbcachesize') + autosize_val = config_ldbm.get_attr_val('nsslapd-cache-autosize') + autosize_split_val = config_ldbm.get_attr_val('nsslapd-cache-autosize-split') + else: + dbcachesize_val = bdb_config_ldbm.get_attr_val('nsslapd-dbcachesize') + autosize_val = bdb_config_ldbm.get_attr_val('nsslapd-cache-autosize') + autosize_split_val = bdb_config_ldbm.get_attr_val('nsslapd-cache-autosize-split') + + cachenensize_val = userroot_ldbm.get_attr_val('nsslapd-cachememsize') + dncachenensize_val = userroot_ldbm.get_attr_val('nsslapd-dncachememsize') + + log.info("Check nsslapd-dbcachesize and nsslapd-cachememsize in the appropriate range.") + log.info("nsslapd-dbcachesize == {}".format(dbcachesize_val)) + log.info("nsslapd-cachememsize == {}".format(cachenensize_val)) + log.info("nsslapd-dncachememsize == {}".format(dncachenensize_val)) + log.info("nsslapd-cache-autosize == {}".format(autosize_val)) + log.info("nsslapd-cache-autosize-split == {}".format(autosize_split_val)) + assert int(dbcachesize_val) >= 512000 + assert int(cachenensize_val) >= 512000 + assert int(dncachenensize_val) >= 512000 + + +@pytest.mark.parametrize("invalid_value", ('-2', '102', 'invalid')) +def test_cache_autosize_invalid_values(topo, invalid_value): + """Check that we can't set invalid values to autosize attributes + + :id: 2f0d01b5-ca91-4dc2-97bc-ad0ac8d08633 + :parametrized: yes + :setup: Standalone instance + :steps: + 1. Stop the instance + 2. Set in the cn=config,cn=ldbm database,cn=plugins,cn=config: + nsslapd-cache-autosize and nsslapd-cache-autosize-split + to invalid values like (-2, 102, invalid_str) + 3. Try to start the instance + :expectedresults: + 1. The instance should stop successfully + 2. nsslapd-cache-autosize, nsslapd-cache-autosize-split are successfully set + 3. Starting the instance should fail + """ + + config_ldbm = DSLdapObject(topo.standalone, DN_CONFIG_LDBM) + bdb_config_ldbm = DSLdapObject(topo.standalone, DN_CONFIG_LDBM_BDB) + if ds_is_older('1.4.2'): + autosize_val = config_ldbm.get_attr_val('nsslapd-cache-autosize') + autosize_split_val = config_ldbm.get_attr_val('nsslapd-cache-autosize-split') + else: + autosize_val = bdb_config_ldbm.get_attr_val('nsslapd-cache-autosize') + autosize_split_val = bdb_config_ldbm.get_attr_val('nsslapd-cache-autosize-split') + + log.info("Set nsslapd-cache-autosize-split to {}".format(invalid_value)) + with pytest.raises(ldap.UNWILLING_TO_PERFORM): + config_ldbm.set('nsslapd-cache-autosize-split', invalid_value) + topo.standalone.restart() + config_ldbm.remove('nsslapd-cache-autosize-split', autosize_split_val) + + log.info("Set nsslapd-cache-autosize to {}".format(invalid_value)) + with pytest.raises(ldap.UNWILLING_TO_PERFORM): + config_ldbm.set('nsslapd-cache-autosize', invalid_value) + topo.standalone.restart() + config_ldbm.remove('nsslapd-cache-autosize', autosize_val) + + +if __name__ == '__main__': + # Run isolated + # -s for DEBUG mode + CURRENT_FILE = os.path.realpath(__file__) + pytest.main("-s %s" % CURRENT_FILE) diff --git a/dirsrvtests/tests/suites/config/config_test.py b/dirsrvtests/tests/suites/config/config_test.py new file mode 100644 index 0000000..567059b --- /dev/null +++ b/dirsrvtests/tests/suites/config/config_test.py @@ -0,0 +1,467 @@ +# --- BEGIN COPYRIGHT BLOCK --- +# Copyright (C) 2016 Red Hat, Inc. +# All rights reserved. +# +# License: GPL (version 3 or any later version). +# See LICENSE for details. +# --- END COPYRIGHT BLOCK --- +# + +import logging +import pytest +from lib389.tasks import * +from lib389.topologies import topology_m2, topology_st as topo +from lib389.utils import * +from lib389._constants import DN_CONFIG, DEFAULT_SUFFIX +from lib389.idm.user import UserAccounts, TEST_USER_PROPERTIES +from lib389.backend import * +from lib389.config import LDBMConfig, BDB_LDBMConfig +from lib389.cos import CosPointerDefinitions, CosTemplates +from lib389.backend import Backends +from lib389.monitor import MonitorLDBM + +pytestmark = pytest.mark.tier0 + +USER_DN = 'uid=test_user,%s' % DEFAULT_SUFFIX + +logging.getLogger(__name__).setLevel(logging.INFO) +log = logging.getLogger(__name__) + +@pytest.fixture(scope="module") +def big_file(): + TEMP_BIG_FILE = '' + # 1024*1024=1048576 + # B for 1 MiB + # Big for 3 MiB + for x in range(1048576): + TEMP_BIG_FILE += '+' + + return TEMP_BIG_FILE + + +def test_maxbersize_repl(topology_m2, big_file): + """maxbersize is ignored in the replicated operations. + + :id: ad57de60-7d56-4323-bbca-5556e5cdb126 + :setup: MMR with two masters, test user, + 1 MiB big value for any attribute + :steps: + 1. Set maxbersize attribute to a small value (20KiB) on master2 + 2. Add the big value to master2 + 3. Add the big value to master1 + 4. Check if the big value was successfully replicated to master2 + :expectedresults: + 1. maxbersize should be successfully set + 2. Adding the big value to master2 failed + 3. Adding the big value to master1 succeed + 4. The big value is successfully replicated to master2 + """ + + users_m1 = UserAccounts(topology_m2.ms["master1"], DEFAULT_SUFFIX) + users_m2 = UserAccounts(topology_m2.ms["master2"], DEFAULT_SUFFIX) + + user_m1 = users_m1.create(properties=TEST_USER_PROPERTIES) + time.sleep(2) + user_m2 = users_m2.get(dn=user_m1.dn) + + log.info("Set nsslapd-maxbersize: 20K to master2") + topology_m2.ms["master2"].config.set('nsslapd-maxbersize', '20480') + + topology_m2.ms["master2"].restart() + + log.info('Try to add attribute with a big value to master2 - expect to FAIL') + with pytest.raises(ldap.SERVER_DOWN): + user_m2.add('jpegphoto', big_file) + + topology_m2.ms["master2"].restart() + topology_m2.ms["master1"].restart() + + log.info('Try to add attribute with a big value to master1 - expect to PASS') + user_m1.add('jpegphoto', big_file) + + time.sleep(2) + + log.info('Check if a big value was successfully added to master1') + + photo_m1 = user_m1.get_attr_vals('jpegphoto') + + log.info('Check if a big value was successfully replicated to master2') + photo_m2 = user_m2.get_attr_vals('jpegphoto') + + assert photo_m2 == photo_m1 + +def test_config_listen_backport_size(topology_m2): + """Check that nsslapd-listen-backlog-size acted as expected + + :id: a4385d58-a6ab-491e-a604-6df0e8ed91cd + :setup: MMR with two masters + :steps: + 1. Search for nsslapd-listen-backlog-size + 2. Set nsslapd-listen-backlog-size to a positive value + 3. Set nsslapd-listen-backlog-size to a negative value + 4. Set nsslapd-listen-backlog-size to an invalid value + 5. Set nsslapd-listen-backlog-size back to a default value + :expectedresults: + 1. Search should be successful + 2. nsslapd-listen-backlog-size should be successfully set + 3. nsslapd-listen-backlog-size should be successfully set + 4. Modification with an invalid value should throw an error + 5. nsslapd-listen-backlog-size should be successfully set + """ + + default_val = topology_m2.ms["master1"].config.get_attr_val_bytes('nsslapd-listen-backlog-size') + + topology_m2.ms["master1"].config.replace('nsslapd-listen-backlog-size', '256') + + topology_m2.ms["master1"].config.replace('nsslapd-listen-backlog-size', '-1') + + with pytest.raises(ldap.LDAPError): + topology_m2.ms["master1"].config.replace('nsslapd-listen-backlog-size', 'ZZ') + + topology_m2.ms["master1"].config.replace('nsslapd-listen-backlog-size', default_val) + + +def test_config_deadlock_policy(topology_m2): + """Check that nsslapd-db-deadlock-policy acted as expected + + :id: a24e25fd-bc15-47fa-b018-372f6a2ec59c + :setup: MMR with two masters + :steps: + 1. Search for nsslapd-db-deadlock-policy and check if + it contains a default value + 2. Set nsslapd-db-deadlock-policy to a positive value + 3. Set nsslapd-db-deadlock-policy to a negative value + 4. Set nsslapd-db-deadlock-policy to an invalid value + 5. Set nsslapd-db-deadlock-policy back to a default value + :expectedresults: + 1. Search should be a successful and should contain a default value + 2. nsslapd-db-deadlock-policy should be successfully set + 3. nsslapd-db-deadlock-policy should be successfully set + 4. Modification with an invalid value should throw an error + 5. nsslapd-db-deadlock-policy should be successfully set + """ + + default_val = b'9' + + ldbmconfig = LDBMConfig(topology_m2.ms["master1"]) + bdbconfig = BDB_LDBMConfig(topology_m2.ms["master1"]) + + if ds_is_older('1.4.2'): + deadlock_policy = ldbmconfig.get_attr_val_bytes('nsslapd-db-deadlock-policy') + else: + deadlock_policy = bdbconfig.get_attr_val_bytes('nsslapd-db-deadlock-policy') + + assert deadlock_policy == default_val + + # Try a range of valid values + for val in (b'0', b'5', b'9'): + ldbmconfig.replace('nsslapd-db-deadlock-policy', val) + if ds_is_older('1.4.2'): + deadlock_policy = ldbmconfig.get_attr_val_bytes('nsslapd-db-deadlock-policy') + else: + deadlock_policy = bdbconfig.get_attr_val_bytes('nsslapd-db-deadlock-policy') + + assert deadlock_policy == val + + # Try a range of invalid values + for val in ('-1', '10'): + with pytest.raises(ldap.LDAPError): + ldbmconfig.replace('nsslapd-db-deadlock-policy', val) + + # Cleanup - undo what we've done + ldbmconfig.replace('nsslapd-db-deadlock-policy', deadlock_policy) + + +@pytest.mark.bz766322 +@pytest.mark.ds26 +def test_defaultnamingcontext(topo): + """Tests configuration attribute defaultNamingContext in the rootdse + + :id: de9a21d3-00f9-4c6d-bb40-56aa1ba36578 + :setup: Standalone instance + :steps: + 1. Check the attribute nsslapd-defaultnamingcontext is present in cn=config + 2. Delete nsslapd-defaultnamingcontext attribute + 3. Add new valid Suffix and modify nsslapd-defaultnamingcontext with new suffix + 4. Add new invalid value at runtime to nsslapd-defaultnamingcontext + 5. Modify nsslapd-defaultnamingcontext with blank value + 6. Add new suffix when nsslapd-defaultnamingcontext is empty + 7. Check the value of the nsslapd-defaultnamingcontext automatically have the new suffix + 8. Adding new suffix when nsslapd-defaultnamingcontext is not empty + 9. Check the value of the nsslapd-defaultnamingcontext has not changed + 10. Remove the newly added suffix and check the values of the attribute is not changed + 11. Remove the original suffix which is currently nsslapd-defaultnamingcontext + 12. Check nsslapd-defaultnamingcontext become empty. + :expectedresults: + 1. This should be successful + 2. It should give 'server unwilling to perform' error + 3. It should be successful + 4. It should give 'no such object' error + 5. It should be successful + 6. Add should be successful + 7. nsslapd-defaultnamingcontext should have new suffix + 8. Add should be successful + 9. defaultnamingcontext should not change + 10. Remove should be successful and defaultnamingcontext should not change + 11. Removal should be successful + 12. nsslapd-defaultnamingcontext should be empty + """ + + backends = Backends(topo.standalone) + test_suffix1 = 'dc=test1,dc=com' + test_db1 = 'test1_db' + test_suffix2 = 'dc=test2,dc=com' + test_db2 = 'test2_db' + test_suffix3 = 'dc=test3,dc=com' + test_db3 = 'test3_db' + + log.info("Check the attribute nsslapd-defaultnamingcontext is present in cn=config") + assert topo.standalone.config.present('nsslapd-defaultnamingcontext') + + log.info("Delete nsslapd-defaultnamingcontext attribute") + with pytest.raises(ldap.UNWILLING_TO_PERFORM): + topo.standalone.config.remove_all('nsslapd-defaultnamingcontext') + + b1 = backends.create(properties={'cn': test_db1, + 'nsslapd-suffix': test_suffix1}) + + log.info("modify nsslapd-defaultnamingcontext with new suffix") + topo.standalone.config.replace('nsslapd-defaultnamingcontext', test_suffix1) + + log.info("Add new invalid value at runtime to nsslapd-defaultnamingcontext") + with pytest.raises(ldap.NO_SUCH_OBJECT): + topo.standalone.config.replace('nsslapd-defaultnamingcontext', 'some_invalid_value') + + log.info("Modify nsslapd-defaultnamingcontext with blank value") + topo.standalone.config.replace('nsslapd-defaultnamingcontext', ' ') + + log.info("Add new suffix when nsslapd-defaultnamingcontext is empty") + b2 = backends.create(properties={'cn': test_db2, + 'nsslapd-suffix': test_suffix2}) + + log.info("Check the value of the nsslapd-defaultnamingcontext automatically have the new suffix") + assert topo.standalone.config.get_attr_val_utf8('nsslapd-defaultnamingcontext') == test_suffix2 + + log.info("Adding new suffix when nsslapd-defaultnamingcontext is not empty") + b3 = backends.create(properties={'cn': test_db3, + 'nsslapd-suffix': test_suffix3}) + + log.info("Check the value of the nsslapd-defaultnamingcontext has not changed") + assert topo.standalone.config.get_attr_val_utf8('nsslapd-defaultnamingcontext') == test_suffix2 + + log.info("Remove the newly added suffix and check the values of the attribute is not changed") + b3.delete() + assert topo.standalone.config.get_attr_val_utf8('nsslapd-defaultnamingcontext') == test_suffix2 + + log.info("Remove all the suffix at the end") + b1.delete() + b2.delete() + + +@pytest.mark.xfail(reason="This may fail due to bug 1610234") +def test_defaultnamingcontext_1(topo): + """This test case should be part of function test_defaultnamingcontext + Please move it back after we have a fix for bug 1610234 + """ + log.info("Remove the original suffix which is currently nsslapd-defaultnamingcontext" + "and check nsslapd-defaultnamingcontext become empty.") + + """ Please remove these declarations after moving the test + to function test_defaultnamingcontext + """ + backends = Backends(topo.standalone) + test_db2 = 'test2_db' + test_suffix2 = 'dc=test2,dc=com' + b2 = backends.create(properties={'cn': test_db2, + 'nsslapd-suffix': test_suffix2}) + b2.delete() + assert topo.standalone.config.get_attr_val_utf8('nsslapd-defaultnamingcontext') == ' ' + + +@pytest.mark.bz602456 +def test_allow_add_delete_config_attributes(topo): + """Tests configuration attributes are allowed to add and delete + + :id: d9a3f264-4111-406b-9900-a70e5403458a + :setup: Standalone instance + :steps: + 1. Add a new valid attribute at runtime to cn=config + 2. Check if the new valid attribute is present + 3. Delete nsslapd-listenhost to restore the default value + 4. Restart the server + 5. Check nsslapd-listenhost is present with default value + 6. Add new invalid attribute at runtime to cn=config + 7. Make sure the invalid attribute is not added + :expectedresults: + 1. This should be successful + 2. This should be successful + 3. This should be successful + 4. This should be successful + 5. This should be successful + 6. It should give 'server unwilling to perform' error + 7. Invalid attribute should not be added + """ + default_listenhost = topo.standalone.config.get_attr_val_utf8('nsslapd-listenhost') + + log.info("Add a new valid attribute at runtime to cn=config") + topo.standalone.config.add('nsslapd-listenhost', 'localhost') + assert topo.standalone.config.present('nsslapd-listenhost', 'localhost') + + log.info("Delete nsslapd-listenhost to restore the default value") + topo.standalone.config.remove('nsslapd-listenhost', 'localhost') + topo.standalone.restart() + assert topo.standalone.config.present('nsslapd-listenhost', default_listenhost) + + log.info("Add new invalid attribute at runtime to cn=config") + with pytest.raises(ldap.UNWILLING_TO_PERFORM): + topo.standalone.config.add('invalid-attribute', 'invalid-value') + + log.info("Make sure the invalid attribute is not added") + assert not topo.standalone.config.present('invalid-attribute', 'invalid-value') + + +@pytest.mark.bz918705 +@pytest.mark.ds511 +def test_ignore_virtual_attrs(topo): + """Test nsslapd-ignore-virtual-attrs configuration attribute + + :id: 9915d71b-2c71-4ac0-91d7-92655d53541b + :setup: Standalone instance + :steps: + 1. Check the attribute nsslapd-ignore-virtual-attrs is present in cn=config + 2. Check the default value of attribute nsslapd-ignore-virtual-attrs should be OFF + 3. Set the valid values i.e. on/ON and off/OFF for nsslapd-ignore-virtual-attrs + 4. Set invalid value for attribute nsslapd-ignore-virtual-attrs + 5. Set nsslapd-ignore-virtual-attrs=off + 6. Add cosPointer, cosTemplate and test entry to default suffix, where virtual attribute is postal code + 7. Test if virtual attribute i.e. postal code shown in test entry while nsslapd-ignore-virtual-attrs: off + 8. Set nsslapd-ignore-virtual-attrs=on + 9. Test if virtual attribute i.e. postal code not shown while nsslapd-ignore-virtual-attrs: on + :expectedresults: + 1. This should be successful + 2. This should be successful + 3. This should be successful + 4. This should fail + 5. This should be successful + 6. This should be successful + 7. Postal code should be present + 8. This should be successful + 9. Postal code should not be present + """ + + log.info("Check the attribute nsslapd-ignore-virtual-attrs is present in cn=config") + assert topo.standalone.config.present('nsslapd-ignore-virtual-attrs') + + log.info("Check the default value of attribute nsslapd-ignore-virtual-attrs should be OFF") + assert topo.standalone.config.get_attr_val_utf8('nsslapd-ignore-virtual-attrs') == "off" + + log.info("Set the valid values i.e. on/ON and off/OFF for nsslapd-ignore-virtual-attrs") + for attribute_value in ['on', 'off', 'ON', 'OFF']: + topo.standalone.config.set('nsslapd-ignore-virtual-attrs', attribute_value) + assert topo.standalone.config.present('nsslapd-ignore-virtual-attrs', attribute_value) + + log.info("Set invalid value for attribute nsslapd-ignore-virtual-attrs") + with pytest.raises(ldap.OPERATIONS_ERROR): + topo.standalone.config.set('nsslapd-ignore-virtual-attrs', 'invalid_value') + + cos_template_properties = { + 'cn': 'cosTemplateExample', + 'postalcode': '117' + } + cos_templates = CosTemplates(topo.standalone, DEFAULT_SUFFIX, 'ou=People') + test_cos_template = cos_templates.create(properties=cos_template_properties) + + log.info("Add cosPointer, cosTemplate and test entry to default suffix, where virtual attribute is postal code") + cos_pointer_properties = { + 'cn': 'cosPointer', + 'description': 'cosPointer example', + 'cosTemplateDn': 'cn=cosTemplateExample,ou=People,dc=example,dc=com', + 'cosAttribute': 'postalcode', + } + cos_pointer_definitions = CosPointerDefinitions(topo.standalone, DEFAULT_SUFFIX, 'ou=People') + test_cos_pointer_definition = cos_pointer_definitions.create(properties=cos_pointer_properties) + + test_users = UserAccounts(topo.standalone, DEFAULT_SUFFIX) + test_user = test_users.create(properties=TEST_USER_PROPERTIES) + + log.info("Test if virtual attribute i.e. postal code shown in test entry while nsslapd-ignore-virtual-attrs: off") + assert test_user.present('postalcode', '117') + + log.info("Set nsslapd-ignore-virtual-attrs=on") + topo.standalone.config.set('nsslapd-ignore-virtual-attrs', 'on') + + log.info("Test if virtual attribute i.e. postal code not shown while nsslapd-ignore-virtual-attrs: on") + assert not test_user.present('postalcode', '117') + + +@pytest.mark.bz918694 +@pytest.mark.ds408 +def test_ndn_cache_enabled(topo): + """Test nsslapd-ignore-virtual-attrs configuration attribute + + :id: 2caa3ec0-cd05-458e-9e21-3b73cf4697ff + :setup: Standalone instance + :steps: + 1. Check the attribute nsslapd-ndn-cache-enabled is present in cn=config + 2. Check the attribute nsslapd-ndn-cache-enabled has the default value set as ON + 3. Check the attribute nsslapd-ndn-cache-max-size is present in cn=config + 4. Check the backend monitor output for Normalized DN cache statistics while nsslapd-ndn-cache-enabled is OFF + 5. Set nsslapd-ndn-cache-enabled ON and check the backend monitor output for Normalized DN cache statistics + 6. Set invalid value for nsslapd-ndn-cache-enabled + 7. Set invalid value for nsslapd-ndn-cache-max-size + :expectedresults: + 1. This should be successful + 2. This should be successful + 3. This should be successful + 4. Backend monitor output should not have NDN cache statistics + 5. Backend monitor output should have NDN cache statistics + 6. This should fail + 7. This should fail + """ + log.info("Check the attribute nsslapd-ndn-cache-enabled is present in cn=config") + assert topo.standalone.config.present('nsslapd-ndn-cache-enabled') + + log.info("Check the attribute nsslapd-ndn-cache-enabled has the default value set as ON") + assert topo.standalone.config.get_attr_val_utf8('nsslapd-ndn-cache-enabled') == 'on' + + log.info("Check the attribute nsslapd-ndn-cache-max-size is present in cn=config") + assert topo.standalone.config.present('nsslapd-ndn-cache-max-size') + + backends = Backends(topo.standalone) + backend = backends.get(DEFAULT_BENAME) + + log.info("Ticket#49593 : NDN cache stats should be under the global stats - Implemented in 1.4") + log.info("Fetch the monitor value according to the ds version") + if ds_is_older('1.4'): + monitor = backend.get_monitor() + else: + monitor = MonitorLDBM(topo.standalone) + + log.info("Check the backend monitor output for Normalized DN cache statistics, " + "while nsslapd-ndn-cache-enabled is off") + topo.standalone.config.set('nsslapd-ndn-cache-enabled', 'off') + topo.standalone.restart() + assert not monitor.present('normalizedDnCacheHits') + + log.info("Check the backend monitor output for Normalized DN cache statistics, " + "while nsslapd-ndn-cache-enabled is on") + topo.standalone.config.set('nsslapd-ndn-cache-enabled', 'on') + topo.standalone.restart() + assert monitor.present('normalizedDnCacheHits') + + log.info("Set invalid value for nsslapd-ndn-cache-enabled") + with pytest.raises(ldap.OPERATIONS_ERROR): + topo.standalone.config.set('nsslapd-ndn-cache-enabled', 'invalid_value') + + log.info("Set invalid value for nsslapd-ndn-cache-max-size") + with pytest.raises(ldap.OPERATIONS_ERROR): + topo.standalone.config.set('nsslapd-ndn-cache-max-size', 'invalid_value') + + +if __name__ == '__main__': + # Run isolated + # -s for DEBUG mode + CURRENT_FILE = os.path.realpath(__file__) + pytest.main("-s %s" % CURRENT_FILE) + + diff --git a/dirsrvtests/tests/suites/config/regression_test.py b/dirsrvtests/tests/suites/config/regression_test.py new file mode 100644 index 0000000..bbe4c51 --- /dev/null +++ b/dirsrvtests/tests/suites/config/regression_test.py @@ -0,0 +1,114 @@ +# --- BEGIN COPYRIGHT BLOCK --- +# Copyright (C) 2017 Red Hat, Inc. +# All rights reserved. +# +# License: GPL (version 3 or any later version). +# See LICENSE for details. +# --- END COPYRIGHT BLOCK --- +# +import logging +import pytest +from lib389.utils import * +from lib389.dseldif import DSEldif +from lib389.config import LDBMConfig +from lib389.backend import Backends +from lib389.topologies import topology_st as topo + +pytestmark = pytest.mark.tier0 + +logging.getLogger(__name__).setLevel(logging.INFO) +log = logging.getLogger(__name__) + +CUSTOM_MEM = '9100100100' + + +# Function to return value of available memory in kb +def get_available_memory(): + with open('/proc/meminfo') as file: + for line in file: + if 'MemAvailable' in line: + free_mem_in_kb = line.split()[1] + return int(free_mem_in_kb) + + +@pytest.mark.skipif(get_available_memory() < (int(CUSTOM_MEM)/1024), reason="available memory is too low") +@pytest.mark.bz1627512 +@pytest.mark.ds49618 +def test_set_cachememsize_to_custom_value(topo): + """Test if value nsslapd-cachememsize remains set + at the custom setting of value above 3805132804 bytes + after changing the value to 9100100100 bytes + + :id: 8a3efc00-65a9-4ee7-b8ee-e35840991ea9 + :setup: Standalone Instance + :steps: + 1. Disable in the cn=config,cn=ldbm database,cn=plugins,cn=config: + nsslapd-cache-autosize by setting it to 0 + 2. Disable in the cn=config,cn=ldbm database,cn=plugins,cn=config: + nsslapd-cache-autosize-split by setting it to 0 + 3. Restart the instance + 4. Set in the cn=UserRoot,cn=ldbm database,cn=plugins,cn=config: + nsslapd-cachememsize: CUSTOM_MEM + :expectedresults: + 1. nsslapd-cache-autosize is successfully disabled + 2. nsslapd-cache-autosize-split is successfully disabled + 3. The instance should be successfully restarted + 4. nsslapd-cachememsize is successfully set + """ + + config_ldbm = LDBMConfig(topo.standalone) + backends = Backends(topo.standalone) + userroot_ldbm = backends.get("userroot") + + log.info("Disabling nsslapd-cache-autosize by setting it to 0") + assert config_ldbm.set('nsslapd-cache-autosize', '0') + + log.info("Disabling nsslapd-cache-autosize-split by setting it to 0") + assert config_ldbm.set('nsslapd-cache-autosize-split', '0') + + log.info("Restarting instance") + topo.standalone.restart() + log.info("Instance restarted successfully") + + log.info("Set nsslapd-cachememsize to value {}".format(CUSTOM_MEM)) + assert userroot_ldbm.set('nsslapd-cachememsize', CUSTOM_MEM) + + +def test_maxbersize_repl(topo): + """Check that instance starts when nsslapd-errorlog-maxlogsize + nsslapd-errorlog-logmaxdiskspace are set in certain order + + :id: 743e912c-2be4-4f5f-9c2a-93dcb18f51a0 + :setup: MMR with two masters + :steps: + 1. Stop the instance + 2. Set nsslapd-errorlog-maxlogsize before/after + nsslapd-errorlog-logmaxdiskspace + 3. Start the instance + 4. Check the error log for errors + :expectedresults: + 1. Success + 2. Success + 3. Success + 4. The error log should contain no errors + """ + + inst = topo.standalone + dse_ldif = DSEldif(inst) + + inst.stop() + log.info("Set nsslapd-errorlog-maxlogsize before nsslapd-errorlog-logmaxdiskspace") + dse_ldif.replace('cn=config', 'nsslapd-errorlog-maxlogsize', '300') + dse_ldif.replace('cn=config', 'nsslapd-errorlog-logmaxdiskspace', '500') + inst.start() + log.info("Assert no init_dse_file errors in the error log") + assert not inst.ds_error_log.match('.*ERR - init_dse_file.*') + + inst.stop() + log.info("Set nsslapd-errorlog-maxlogsize after nsslapd-errorlog-logmaxdiskspace") + dse_ldif.replace('cn=config', 'nsslapd-errorlog-logmaxdiskspace', '500') + dse_ldif.replace('cn=config', 'nsslapd-errorlog-maxlogsize', '300') + inst.start() + log.info("Assert no init_dse_file errors in the error log") + assert not inst.ds_error_log.match('.*ERR - init_dse_file.*') + diff --git a/dirsrvtests/tests/suites/config/removed_config_49298_test.py b/dirsrvtests/tests/suites/config/removed_config_49298_test.py new file mode 100644 index 0000000..7b585b4 --- /dev/null +++ b/dirsrvtests/tests/suites/config/removed_config_49298_test.py @@ -0,0 +1,90 @@ +# --- BEGIN COPYRIGHT BLOCK --- +# Copyright (C) 2017 Red Hat, Inc. +# All rights reserved. +# +# License: GPL (version 3 or any later version). +# See LICENSE for details. +# --- END COPYRIGHT BLOCK --- +# +import pytest +import os +import logging +import subprocess + +from lib389.topologies import topology_st as topo + +pytestmark = pytest.mark.tier0 + +DEBUGGING = os.getenv("DEBUGGING", default=False) +if DEBUGGING: + logging.getLogger(__name__).setLevel(logging.DEBUG) +else: + logging.getLogger(__name__).setLevel(logging.INFO) +log = logging.getLogger(__name__) + +def test_restore_config(topo): + """ + Check that if a dse.ldif and backup are removed, that the server still starts. + + :id: e1c38fa7-30bc-46f2-a934-f8336f387581 + :setup: Standalone instance + :steps: + 1. Stop the instance + 2. Delete 'dse.ldif' + 3. Start the instance + :expectedresults: + 1. Steps 1 and 2 succeed. + 2. Server will succeed to start with restored cfg. + """ + topo.standalone.stop() + + dse_path = topo.standalone.get_config_dir() + + log.info(dse_path) + + for i in ('dse.ldif', 'dse.ldif.startOK'): + p = os.path.join(dse_path, i) + d = os.path.join(dse_path, i + '.49298') + os.rename(p, d) + + # This will pass. + topo.standalone.start() + +def test_removed_config(topo): + """ + Check that if a dse.ldif and backup are removed, that the server + exits better than "segfault". + + :id: b45272d1-c197-473e-872f-07257fcb2ec0 + :setup: Standalone instance + :steps: + 1. Stop the instance + 2. Delete 'dse.ldif', 'dse.ldif.bak', 'dse.ldif.startOK' + 3. Start the instance + :expectedresults: + 1. Steps 1 and 2 succeed. + 2. Server will fail to start, but will not crash. + """ + topo.standalone.stop() + + dse_path = topo.standalone.get_config_dir() + + log.info(dse_path) + + for i in ('dse.ldif', 'dse.ldif.bak', 'dse.ldif.startOK'): + p = os.path.join(dse_path, i) + d = os.path.join(dse_path, i + '.49298') + os.rename(p, d) + + # We actually can't check the log output, because it can't read dse.ldif, + # don't know where to write it yet! All we want is the server fail to + # start here, rather than infinite run + segfault. + with pytest.raises(subprocess.CalledProcessError): + topo.standalone.start() + + # Restore the files so that setup-ds.l can work + for i in ('dse.ldif', 'dse.ldif.bak', 'dse.ldif.startOK'): + p = os.path.join(dse_path, i) + d = os.path.join(dse_path, i + '.49298') + os.rename(d, p) + diff --git a/dirsrvtests/tests/suites/cos/__init__.py b/dirsrvtests/tests/suites/cos/__init__.py new file mode 100644 index 0000000..b16a278 --- /dev/null +++ b/dirsrvtests/tests/suites/cos/__init__.py @@ -0,0 +1,3 @@ +""" + :Requirement: 389-ds-base: Class of Service +""" diff --git a/dirsrvtests/tests/suites/cos/cos_test.py b/dirsrvtests/tests/suites/cos/cos_test.py new file mode 100644 index 0000000..d6a498c --- /dev/null +++ b/dirsrvtests/tests/suites/cos/cos_test.py @@ -0,0 +1,84 @@ +# --- BEGIN COPYRIGHT BLOCK --- +# Copyright (C) 2019 Red Hat, Inc. +# All rights reserved. +# +# License: GPL (version 3 or any later version). +# See LICENSE for details. +# --- END COPYRIGHT BLOCK --- + +import pytest, os, ldap +from lib389.cos import CosClassicDefinition, CosClassicDefinitions, CosTemplate +from lib389._constants import DEFAULT_SUFFIX +from lib389.topologies import topology_st as topo +from lib389.idm.role import FilteredRoles +from lib389.idm.nscontainer import nsContainer +from lib389.idm.user import UserAccount + +pytestmark = pytest.mark.tier1 + +def test_positive(topo): + """ + :id: a5a74235-597f-4fe8-8c38-826860927472 + :setup: server + :steps: + 1. Add filter role entry + 2. Add ns container + 3. Add cos template + 4. Add CosClassic Definition + 5. Cos entries should be added and searchable + 6. employeeType attribute should be there in user entry as per the cos plugin property + :expectedresults: + 1. Operation should success + 2. Operation should success + 3. Operation should success + 4. Operation should success + 5. Operation should success + 6. Operation should success + """ + # Adding ns filter role + roles = FilteredRoles(topo.standalone, DEFAULT_SUFFIX) + roles.create(properties={'cn': 'FILTERROLEENGROLE', + 'nsRoleFilter': 'cn=eng*'}) + # adding ns container + nsContainer(topo.standalone,'cn=cosClassicGenerateEmployeeTypeUsingnsroleTemplates,{}'.format(DEFAULT_SUFFIX))\ + .create(properties={'cn': 'cosTemplates'}) + + # creating cos template + properties = {'employeeType': 'EngType', + 'cn': '"cn=filterRoleEngRole,dc=example,dc=com",cn=cosClassicGenerateEmployeeTypeUsingnsroleTemplates,dc=example,dc=com' + } + CosTemplate(topo.standalone, 'cn="cn=filterRoleEngRole,dc=example,dc=com",cn=cosClassicGenerateEmployeeTypeUsingnsroleTemplates,{}'.format(DEFAULT_SUFFIX))\ + .create(properties=properties) + + # creating CosClassicDefinition + properties = {'cosTemplateDn': 'cn=cosClassicGenerateEmployeeTypeUsingnsroleTemplates,{}'.format(DEFAULT_SUFFIX), + 'cosAttribute': 'employeeType', + 'cosSpecifier': 'nsrole', + 'cn': 'cosClassicGenerateEmployeeTypeUsingnsrole'} + CosClassicDefinition(topo.standalone,'cn=cosClassicGenerateEmployeeTypeUsingnsrole,{}'.format(DEFAULT_SUFFIX))\ + .create(properties=properties) + + # Adding User entry + properties = { + 'uid': 'enguser1', + 'cn': 'enguser1', + 'sn': 'user', + 'uidNumber': '1000', + 'gidNumber': '2000', + 'homeDirectory': '/home/' + 'enguser1' + } + user = UserAccount(topo.standalone, 'cn=enguser1,{}'.format(DEFAULT_SUFFIX)) + user.create(properties=properties) + + # Asserting Cos should be added and searchable + cosdef = CosClassicDefinitions(topo.standalone, DEFAULT_SUFFIX).get('cosClassicGenerateEmployeeTypeUsingnsrole') + assert cosdef.dn == 'cn=cosClassicGenerateEmployeeTypeUsingnsrole,dc=example,dc=com' + assert cosdef.get_attr_val_utf8('cn') == 'cosClassicGenerateEmployeeTypeUsingnsrole' + + # CoS definition entry's cosSpecifier attribute specifies the employeeType attribute + assert user.present('employeeType') + + +if __name__ == "__main__": + CURRENT_FILE = os.path.realpath(__file__) + pytest.main("-s -v %s" % CURRENT_FILE) diff --git a/dirsrvtests/tests/suites/cos/indirect_cos_test.py b/dirsrvtests/tests/suites/cos/indirect_cos_test.py new file mode 100644 index 0000000..4689297 --- /dev/null +++ b/dirsrvtests/tests/suites/cos/indirect_cos_test.py @@ -0,0 +1,173 @@ +import logging +import pytest +import os +import ldap +import time +import subprocess + +from lib389 import Entry +from lib389.idm.user import UserAccounts +from lib389.idm.domain import Domain +from lib389.topologies import topology_st as topo +from lib389._constants import (DEFAULT_SUFFIX, DN_DM, PASSWORD, HOST_STANDALONE, + SERVERID_STANDALONE, PORT_STANDALONE) + +pytestmark = pytest.mark.tier1 + +DEBUGGING = os.getenv("DEBUGGING", default=False) +if DEBUGGING: + logging.getLogger(__name__).setLevel(logging.DEBUG) +else: + logging.getLogger(__name__).setLevel(logging.INFO) +log = logging.getLogger(__name__) + +TEST_USER_DN = "uid=test_user,ou=people,dc=example,dc=com" +OU_PEOPLE = 'ou=people,{}'.format(DEFAULT_SUFFIX) + +PW_POLICY_CONT_PEOPLE = 'cn="cn=nsPwPolicyEntry,' \ + 'ou=people,dc=example,dc=com",' \ + 'cn=nsPwPolicyContainer,ou=people,dc=example,dc=com' + +PW_POLICY_CONT_PEOPLE2 = 'cn="cn=nsPwPolicyEntry,' \ + 'dc=example,dc=com",' \ + 'cn=nsPwPolicyContainerdc=example,dc=com' + + +def check_user(inst): + """Search the test user and make sure it has the execpted attrs + """ + try: + entries = inst.search_s('dc=example,dc=com', ldap.SCOPE_SUBTREE, "uid=test_user") + log.debug('user: \n' + str(entries[0])) + assert entries[0].hasAttr('ou'), "Entry is missing ou cos attribute" + assert entries[0].hasAttr('x-department'), "Entry is missing description cos attribute" + assert entries[0].hasAttr('x-en-ou'), "Entry is missing givenname cos attribute" + except ldap.LDAPError as e: + log.fatal('Failed to search for user: ' + str(e)) + raise e + + +def setup_subtree_policy(topo): + """Set up subtree password policy + """ + + topo.standalone.config.set('nsslapd-pwpolicy-local', 'on') + + log.info('Create password policy for subtree {}'.format(OU_PEOPLE)) + try: + subprocess.call(['%s/ns-newpwpolicy.pl' % topo.standalone.get_sbin_dir(), + '-D', DN_DM, '-w', PASSWORD, + '-p', str(PORT_STANDALONE), '-h', HOST_STANDALONE, + '-S', DEFAULT_SUFFIX, '-Z', SERVERID_STANDALONE]) + except subprocess.CalledProcessError as e: + log.error('Failed to create pw policy policy for {}: error {}'.format( + OU_PEOPLE, e.message['desc'])) + raise e + + domain = Domain(topo.standalone, DEFAULT_SUFFIX) + domain.replace('pwdpolicysubentry', PW_POLICY_CONT_PEOPLE2) + + time.sleep(1) + + +def setup_indirect_cos(topo): + """Setup indirect COS definition and template + """ + cosDef = Entry(('cn=cosDefinition,dc=example,dc=com', + {'objectclass': ['top', 'ldapsubentry', + 'cossuperdefinition', + 'cosIndirectDefinition'], + 'cosAttribute': ['ou merge-schemes', + 'x-department merge-schemes', + 'x-en-ou merge-schemes'], + 'cosIndirectSpecifier': 'seeAlso', + 'cn': 'cosDefinition'})) + + cosTemplate = Entry(('cn=cosTemplate,dc=example,dc=com', + {'objectclass': ['top', + 'extensibleObject', + 'cosTemplate'], + 'ou': 'My COS Org', + 'x-department': 'My COS x-department', + 'x-en-ou': 'my COS x-en-ou', + 'cn': 'cosTemplate'})) + try: + topo.standalone.add_s(cosDef) + topo.standalone.add_s(cosTemplate) + except ldap.LDAPError as e: + log.fatal('Failed to add cos: error ' + str(e)) + raise e + time.sleep(1) + + +@pytest.fixture(scope="module") +def setup(topo, request): + """Add schema, and test user + """ + log.info('Add custom schema...') + try: + ATTR_1 = (b"( 1.3.6.1.4.1.409.389.2.189 NAME 'x-department' SYNTAX 1.3.6.1.4.1.1466.115.121.1.15 X-ORIGIN 'user defined' )") + ATTR_2 = (b"( 1.3.6.1.4.1.409.389.2.187 NAME 'x-en-ou' SYNTAX 1.3.6.1.4.1.1466.115.121.1.15 X-ORIGIN 'user defined' )") + OC = (b"( xPerson-oid NAME 'xPerson' DESC '' SUP person STRUCTURAL MAY ( x-department $ x-en-ou ) X-ORIGIN 'user defined' )") + topo.standalone.modify_s("cn=schema", [(ldap.MOD_ADD, 'attributeTypes', ATTR_1), + (ldap.MOD_ADD, 'attributeTypes', ATTR_2), + (ldap.MOD_ADD, 'objectClasses', OC)]) + except ldap.LDAPError as e: + log.fatal('Failed to add custom schema') + raise e + time.sleep(1) + + log.info('Add test user...') + users = UserAccounts(topo.standalone, DEFAULT_SUFFIX) + + user_properties = { + 'uid': 'test_user', + 'cn': 'test user', + 'sn': 'user', + 'uidNumber': '1000', + 'gidNumber': '2000', + 'homeDirectory': '/home/test_user', + 'seeAlso': 'cn=cosTemplate,dc=example,dc=com' + } + user = users.create(properties=user_properties) + + user.add('objectClass', 'xPerson') + + # Setup COS + log.info("Setup indirect COS...") + setup_indirect_cos(topo) + + +def test_indirect_cos(topo, setup): + """Test indirect cos + + :id: 890d5929-7d52-4a56-956e-129611b4649a + :setup: standalone + :steps: + 1. Test cos is working for test user + 2. Add subtree password policy + 3. Test cos is working for test user + :expectedresults: + 1. User has expected cos attrs + 2. Substree password policy setup is successful + 3 User still has expected cos attrs + """ + + # Step 1 - Search user and see if the COS attrs are included + log.info('Checking user...') + check_user(topo.standalone) + + # Step 2 - Add subtree password policy (Second COS - operational attribute) + setup_subtree_policy(topo) + + # Step 3 - Check user again now hat we have a mix of vattrs + log.info('Checking user...') + check_user(topo.standalone) + + +if __name__ == '__main__': + # Run isolated + # -s for DEBUG mode + CURRENT_FILE = os.path.realpath(__file__) + pytest.main("-s %s" % CURRENT_FILE) + diff --git a/dirsrvtests/tests/suites/disk_monitoring/__init__.py b/dirsrvtests/tests/suites/disk_monitoring/__init__.py new file mode 100644 index 0000000..2257178 --- /dev/null +++ b/dirsrvtests/tests/suites/disk_monitoring/__init__.py @@ -0,0 +1,3 @@ +""" + :Requirement: 389-ds-base: Disk Monitoring +""" diff --git a/dirsrvtests/tests/suites/disk_monitoring/disk_monitoring_test.py b/dirsrvtests/tests/suites/disk_monitoring/disk_monitoring_test.py new file mode 100644 index 0000000..c1d2974 --- /dev/null +++ b/dirsrvtests/tests/suites/disk_monitoring/disk_monitoring_test.py @@ -0,0 +1,584 @@ +# --- BEGIN COPYRIGHT BLOCK --- +# Copyright (C) 2018 Red Hat, Inc. +# All rights reserved. +# +# License: GPL (version 3 or any later version). +# See LICENSE for details. +# --- END COPYRIGHT BLOCK --- + + +import os +import subprocess +import re +import time +import pytest +from lib389.tasks import * +from lib389._constants import * +from lib389.utils import ensure_bytes +from lib389.topologies import topology_st as topo +from lib389.paths import * +from lib389.idm.user import UserAccounts + +pytestmark = pytest.mark.tier2 + +THRESHOLD = '30' +THRESHOLD_BYTES = '30000000' + + +def _withouterrorlog(topo, condition, maxtimesleep): + timecount = 0 + while eval(condition): + time.sleep(1) + timecount += 1 + if timecount >= maxtimesleep: break + assert not eval(condition) + + +def _witherrorlog(topo, condition, maxtimesleep): + timecount = 0 + with open(topo.standalone.errlog, 'r') as study: study = study.read() + while condition not in study: + time.sleep(1) + with open(topo.standalone.errlog, 'r') as study: study = study.read() + if timecount >= maxtimesleep: break + assert condition in study + + +def presetup(topo): + """ + This is function is part of fixture function setup , will setup the environment for this test. + """ + topo.standalone.stop() + if os.path.exists(topo.standalone.ds_paths.log_dir): + subprocess.call(['mount', '-t', 'tmpfs', '-o', 'size=35M', 'tmpfs', topo.standalone.ds_paths.log_dir]) + else: + os.mkdir(topo.standalone.ds_paths.log_dir) + subprocess.call(['mount', '-t', 'tmpfs', '-o', 'size=35M', 'tmpfs', topo.standalone.ds_paths.log_dir]) + subprocess.call('chown {}: -R {}'.format(DEFAULT_USER, topo.standalone.ds_paths.log_dir), shell=True) + subprocess.call('chown {}: -R {}/*'.format(DEFAULT_USER, topo.standalone.ds_paths.log_dir), shell=True) + subprocess.call('restorecon -FvvR {}'.format(topo.standalone.ds_paths.log_dir), shell=True) + topo.standalone.start() + + +def setupthesystem(topo): + """ + This function is part of fixture function setup , will setup the environment for this test. + """ + global TOTAL_SIZE, USED_SIZE, AVAIL_SIZE, HALF_THR_FILL_SIZE, FULL_THR_FILL_SIZE + topo.standalone.start() + topo.standalone.config.set('nsslapd-disk-monitoring-grace-period', '1') + topo.standalone.config.set('nsslapd-accesslog-logbuffering', 'off') + topo.standalone.config.set('nsslapd-disk-monitoring-threshold', ensure_bytes(THRESHOLD_BYTES)) + TOTAL_SIZE = int(re.findall(r'\d+', str(os.statvfs(topo.standalone.ds_paths.log_dir)))[2])*4096/1024/1024 + AVAIL_SIZE = round(int(re.findall(r'\d+', str(os.statvfs(topo.standalone.ds_paths.log_dir)))[3]) * 4096 / 1024 / 1024) + USED_SIZE = TOTAL_SIZE - AVAIL_SIZE + HALF_THR_FILL_SIZE = TOTAL_SIZE - float(THRESHOLD) + 5 - USED_SIZE + FULL_THR_FILL_SIZE = TOTAL_SIZE - 0.5 * float(THRESHOLD) + 5 - USED_SIZE + HALF_THR_FILL_SIZE = round(HALF_THR_FILL_SIZE) + FULL_THR_FILL_SIZE = round(FULL_THR_FILL_SIZE) + topo.standalone.restart() + + +@pytest.fixture(scope="module") +def setup(request, topo): + """ + This is the fixture function , will run before running every test case. + """ + presetup(topo) + setupthesystem(topo) + + def fin(): + topo.standalone.stop() + subprocess.call(['umount', '-fl', topo.standalone.ds_paths.log_dir]) + topo.standalone.start() + + request.addfinalizer(fin) + +@pytest.fixture(scope="function") +def reset_logs(topo): + """ + Reset the errors log file before the test + """ + open('{}/errors'.format(topo.standalone.ds_paths.log_dir), 'w').close() + + +def test_verify_operation_when_disk_monitoring_is_off(topo, setup, reset_logs): + """ + Verify operation when Disk monitoring is off + :id: 73a97536-fe9e-11e8-ba9f-8c16451d917b + :setup: Standalone + :steps: + 1. Turn off disk monitoring + 2. Go below the threshold + 3. Check DS is up and not entering shutdown mode + :expectedresults: + 1. Should Success + 2. Should Success + 3. Should Success + """ + try: + # Turn off disk monitoring + topo.standalone.config.set('nsslapd-disk-monitoring', 'off') + topo.standalone.restart() + # go below the threshold + subprocess.call(['dd', 'if=/dev/zero', 'of={}/foo'.format(topo.standalone.ds_paths.log_dir), 'bs=1M', 'count={}'.format(FULL_THR_FILL_SIZE)]) + subprocess.call(['dd', 'if=/dev/zero', 'of={}/foo1'.format(topo.standalone.ds_paths.log_dir), 'bs=1M', 'count={}'.format(FULL_THR_FILL_SIZE)]) + # Wait for disk monitoring plugin thread to wake up + _withouterrorlog(topo, 'topo.standalone.status() != True', 10) + # Check DS is up and not entering shutdown mode + assert topo.standalone.status() == True + finally: + os.remove('{}/foo'.format(topo.standalone.ds_paths.log_dir)) + os.remove('{}/foo1'.format(topo.standalone.ds_paths.log_dir)) + + +def test_free_up_the_disk_space_and_change_ds_config(topo, setup, reset_logs): + """ + Free up the disk space and change DS config + :id: 7be4d560-fe9e-11e8-a307-8c16451d917b + :setup: Standalone + :steps: + 1. Enabling Disk Monitoring plugin and setting disk monitoring logging to critical + 2. Verify no message about loglevel is present in the error log + 3. Verify no message about disabling logging is present in the error log + 4. Verify no message about removing rotated logs is present in the error log + :expectedresults: + 1. Should Success + 2. Should Success + 3. Should Success + 4. Should Success + """ + # Enabling Disk Monitoring plugin and setting disk monitoring logging to critical + assert topo.standalone.config.set('nsslapd-disk-monitoring', 'on') + assert topo.standalone.config.set('nsslapd-disk-monitoring-logging-critical', 'on') + assert topo.standalone.config.set('nsslapd-errorlog-level', '8') + topo.standalone.restart() + # Verify no message about loglevel is present in the error log + # Verify no message about disabling logging is present in the error log + # Verify no message about removing rotated logs is present in the error log + with open(topo.standalone.errlog, 'r') as study: study = study.read() + assert 'temporarily setting error loglevel to zero' not in study + assert 'disabling access and audit logging' not in study + assert 'deleting rotated logs' not in study + + +def test_verify_operation_with_nsslapd_disk_monitoring_logging_critical_off(topo, setup, reset_logs): + """ + Verify operation with "nsslapd-disk-monitoring-logging-critical: off + :id: 82363bca-fe9e-11e8-9ae7-8c16451d917b + :setup: Standalone + :steps: + 1. Verify that verbose logging was set to default level + 2. Verify that logging is disabled + 3. Verify that rotated logs were not removed + :expectedresults: + 1. Should Success + 2. Should Success + 3. Should Success + """ + try: + # Verify that verbose logging was set to default level + assert topo.standalone.config.set('nsslapd-disk-monitoring', 'on') + assert topo.standalone.config.set('nsslapd-disk-monitoring-logging-critical', 'off') + assert topo.standalone.config.set('nsslapd-errorlog-level', '8') + topo.standalone.restart() + subprocess.call(['dd', 'if=/dev/zero', 'of={}/foo'.format(topo.standalone.ds_paths.log_dir), 'bs=1M', 'count={}'.format(HALF_THR_FILL_SIZE)]) + _witherrorlog(topo, 'temporarily setting error loglevel to the default level', 11) + assert LOG_DEFAULT == int(re.findall(r'nsslapd-errorlog-level: \d+', str( + topo.standalone.search_s('cn=config', ldap.SCOPE_SUBTREE, '(objectclass=*)', ['nsslapd-errorlog-level'])))[ + 0].split(' ')[1]) + # Verify that logging is disabled + _withouterrorlog(topo, "topo.standalone.config.get_attr_val_utf8('nsslapd-accesslog-logging-enabled') != 'off'", 10) + assert topo.standalone.config.get_attr_val_utf8('nsslapd-accesslog-logging-enabled') == 'off' + # Verify that rotated logs were not removed + with open(topo.standalone.errlog, 'r') as study: study = study.read() + assert 'disabling access and audit logging' in study + _witherrorlog(topo, 'deleting rotated logs', 11) + study = open(topo.standalone.errlog).read() + assert "Unable to remove file: {}".format(topo.standalone.ds_paths.log_dir) not in study + assert 'is too far below the threshold' not in study + finally: + os.remove('{}/foo'.format(topo.standalone.ds_paths.log_dir)) + + +def test_operation_with_nsslapd_disk_monitoring_logging_critical_on_below_half_of_the_threshold(topo, setup, reset_logs): + """ + Verify operation with \"nsslapd-disk-monitoring-logging-critical: on\" below 1/2 of the threshold + Verify recovery + :id: 8940c502-fe9e-11e8-bcc0-8c16451d917b + :setup: Standalone + :steps: + 1. Verify that DS goes into shutdown mode + 2. Verify that DS exited shutdown mode + :expectedresults: + 1. Should Success + 2. Should Success + """ + assert topo.standalone.config.set('nsslapd-disk-monitoring', 'on') + assert topo.standalone.config.set('nsslapd-disk-monitoring-logging-critical', 'on') + topo.standalone.restart() + # Verify that DS goes into shutdown mode + if float(THRESHOLD) > FULL_THR_FILL_SIZE: + FULL_THR_FILL_SIZE_new = FULL_THR_FILL_SIZE + round(float(THRESHOLD) - FULL_THR_FILL_SIZE) + 1 + subprocess.call(['dd', 'if=/dev/zero', 'of={}/foo'.format(topo.standalone.ds_paths.log_dir), 'bs=1M', 'count={}'.format(FULL_THR_FILL_SIZE_new)]) + else: + subprocess.call(['dd', 'if=/dev/zero', 'of={}/foo'.format(topo.standalone.ds_paths.log_dir), 'bs=1M', 'count={}'.format(FULL_THR_FILL_SIZE)]) + _witherrorlog(topo, 'is too far below the threshold', 20) + os.remove('{}/foo'.format(topo.standalone.ds_paths.log_dir)) + # Verify that DS exited shutdown mode + _witherrorlog(topo, 'Available disk space is now acceptable', 25) + + +def test_setting_nsslapd_disk_monitoring_logging_critical_to_off(topo, setup, reset_logs): + """ + Setting nsslapd-disk-monitoring-logging-critical to \"off\ + :id: 93265ec4-fe9e-11e8-af93-8c16451d917b + :setup: Standalone + :steps: + 1. Setting nsslapd-disk-monitoring-logging-critical to \"off\ + :expectedresults: + 1. Should Success + """ + assert topo.standalone.config.set('nsslapd-disk-monitoring', 'on') + assert topo.standalone.config.set('nsslapd-disk-monitoring-logging-critical', 'off') + assert topo.standalone.config.set('nsslapd-errorlog-level', '8') + topo.standalone.restart() + assert topo.standalone.status() == True + + +def test_operation_with_nsslapd_disk_monitoring_logging_critical_off(topo, setup, reset_logs): + """ + Verify operation with \"nsslapd-disk-monitoring-logging-critical: off + :id: 97985a52-fe9e-11e8-9914-8c16451d917b + :setup: Standalone + :steps: + 1. Verify that logging is disabled + 2. Verify that rotated logs were removed + 3. Verify that verbose logging was set to default level + 4. Verify that logging is disabled + 5. Verify that rotated logs were removed + :expectedresults: + 1. Should Success + 2. Should Success + 3. Should Success + 4. Should Success + 5. Should Success + """ + # Verify that logging is disabled + try: + assert topo.standalone.config.set('nsslapd-disk-monitoring', 'on') + assert topo.standalone.config.set('nsslapd-disk-monitoring-logging-critical', 'off') + assert topo.standalone.config.set('nsslapd-errorlog-level', '8') + assert topo.standalone.config.set('nsslapd-accesslog-maxlogsize', '1') + assert topo.standalone.config.set('nsslapd-accesslog-logrotationtimeunit', 'minute') + assert topo.standalone.config.set('nsslapd-accesslog-level', '772') + topo.standalone.restart() + # Verify that rotated logs were removed + users = UserAccounts(topo.standalone, DEFAULT_SUFFIX) + for i in range(10): + user_properties = { + 'uid': 'cn=anuj{}'.format(i), + 'cn': 'cn=anuj{}'.format(i), + 'sn': 'cn=anuj{}'.format(i), + 'userPassword': "Itsme123", + 'uidNumber': '1{}'.format(i), + 'gidNumber': '2{}'.format(i), + 'homeDirectory': '/home/{}'.format(i) + } + users.create(properties=user_properties) + for j in range(100): + for i in [i for i in users.list()]: i.bind('Itsme123') + assert re.findall(r'access.\d+-\d+',str(os.listdir(topo.standalone.ds_paths.log_dir))) + topo.standalone.bind_s(DN_DM, PW_DM) + assert topo.standalone.config.set('nsslapd-accesslog-maxlogsize', '100') + assert topo.standalone.config.set('nsslapd-accesslog-logrotationtimeunit', 'day') + assert topo.standalone.config.set('nsslapd-accesslog-level', '256') + topo.standalone.restart() + subprocess.call(['dd', 'if=/dev/zero', 'of={}/foo2'.format(topo.standalone.ds_paths.log_dir), 'bs=1M', 'count={}'.format(HALF_THR_FILL_SIZE)]) + # Verify that verbose logging was set to default level + _witherrorlog(topo, 'temporarily setting error loglevel to the default level', 10) + assert LOG_DEFAULT == int(re.findall(r'nsslapd-errorlog-level: \d+', str( + topo.standalone.search_s('cn=config', ldap.SCOPE_SUBTREE, '(objectclass=*)', ['nsslapd-errorlog-level'])))[0].split(' ')[1]) + # Verify that logging is disabled + _withouterrorlog(topo, "topo.standalone.config.get_attr_val_utf8('nsslapd-accesslog-logging-enabled') != 'off'", 20) + with open(topo.standalone.errlog, 'r') as study: study = study.read() + assert 'disabling access and audit logging' in study + # Verify that rotated logs were removed + _witherrorlog(topo, 'deleting rotated logs', 10) + with open(topo.standalone.errlog, 'r') as study:study = study.read() + assert 'Unable to remove file:' not in study + assert 'is too far below the threshold' not in study + for i in [i for i in users.list()]: i.delete() + finally: + os.remove('{}/foo2'.format(topo.standalone.ds_paths.log_dir)) + + +def test_operation_with_nsslapd_disk_monitoring_logging_critical_off_below_half_of_the_threshold(topo, setup, reset_logs): + """ + Verify operation with \"nsslapd-disk-monitoring-logging-critical: off\" below 1/2 of the threshold + Verify shutdown + Recovery and setup + :id: 9d4c7d48-fe9e-11e8-b5d6-8c16451d917b + :setup: Standalone + :steps: + 1. Verify that DS goes into shutdown mode + 2. Verifying that DS has been shut down after the grace period + 3. Verify logging enabled + 4. Create rotated logfile + 5. Enable verbose logging + :expectedresults: + 1. Should Success + 2. Should Success + 3. Should Success + 4. Should Success + 5. Should Success + """ + assert topo.standalone.config.set('nsslapd-disk-monitoring', 'on') + assert topo.standalone.config.set('nsslapd-disk-monitoring-logging-critical', 'off') + topo.standalone.restart() + # Verify that DS goes into shutdown mode + if float(THRESHOLD) > FULL_THR_FILL_SIZE: + FULL_THR_FILL_SIZE_new = FULL_THR_FILL_SIZE + round(float(THRESHOLD) - FULL_THR_FILL_SIZE) + subprocess.call(['dd', 'if=/dev/zero', 'of={}/foo'.format(topo.standalone.ds_paths.log_dir), 'bs=1M', 'count={}'.format(FULL_THR_FILL_SIZE_new)]) + else: + subprocess.call(['dd', 'if=/dev/zero', 'of={}/foo'.format(topo.standalone.ds_paths.log_dir), 'bs=1M', 'count={}'.format(FULL_THR_FILL_SIZE)]) + # Increased sleep to avoid failure + _witherrorlog(topo, 'is too far below the threshold', 100) + _witherrorlog(topo, 'Signaling slapd for shutdown', 2) + # Verifying that DS has been shut down after the grace period + assert topo.standalone.status() == False + # free_space + os.remove('{}/foo'.format(topo.standalone.ds_paths.log_dir)) + open('{}/errors'.format(topo.standalone.ds_paths.log_dir), 'w').close() + # StartSlapd + topo.standalone.start() + # verify logging enabled + assert topo.standalone.config.get_attr_val_utf8('nsslapd-accesslog-logging-enabled') == 'on' + assert topo.standalone.config.get_attr_val_utf8('nsslapd-errorlog-logging-enabled') == 'on' + with open(topo.standalone.errlog, 'r') as study: study = study.read() + assert 'disabling access and audit logging' not in study + assert topo.standalone.config.set('nsslapd-accesslog-maxlogsize', '1') + assert topo.standalone.config.set('nsslapd-accesslog-logrotationtimeunit', 'minute') + assert topo.standalone.config.set('nsslapd-accesslog-level', '772') + topo.standalone.restart() + # create rotated logfile + users = UserAccounts(topo.standalone, DEFAULT_SUFFIX) + for i in range(10): + user_properties = { + 'uid': 'cn=anuj{}'.format(i), + 'cn': 'cn=anuj{}'.format(i), + 'sn': 'cn=anuj{}'.format(i), + 'userPassword': "Itsme123", + 'uidNumber': '1{}'.format(i), + 'gidNumber': '2{}'.format(i), + 'homeDirectory': '/home/{}'.format(i) + } + users.create(properties=user_properties) + for j in range(100): + for i in [i for i in users.list()]: i.bind('Itsme123') + assert re.findall(r'access.\d+-\d+',str(os.listdir(topo.standalone.ds_paths.log_dir))) + topo.standalone.bind_s(DN_DM, PW_DM) + # enable verbose logging + assert topo.standalone.config.set('nsslapd-accesslog-maxlogsize', '100') + assert topo.standalone.config.set('nsslapd-accesslog-logrotationtimeunit', 'day') + assert topo.standalone.config.set('nsslapd-accesslog-level', '256') + assert topo.standalone.config.set('nsslapd-errorlog-level', '8') + topo.standalone.restart() + for i in [i for i in users.list()]: i.delete() + + +def test_go_straight_below_half_of_the_threshold(topo, setup, reset_logs): + """ + Go straight below 1/2 of the threshold + Recovery and setup + :id: a2a0664c-fe9e-11e8-b220-8c16451d917b + :setup: Standalone + :steps: + 1. Go straight below 1/2 of the threshold + 2. Verify that verbose logging was set to default level + 3. Verify that logging is disabled + 4. Verify DS is in shutdown mode + 5. Verify DS has recovered from shutdown + :expectedresults: + 1. Should Success + 2. Should Success + 3. Should Success + 4. Should Success + 5. Should Success + """ + assert topo.standalone.config.set('nsslapd-disk-monitoring', 'on') + assert topo.standalone.config.set('nsslapd-disk-monitoring-logging-critical', 'off') + assert topo.standalone.config.set('nsslapd-errorlog-level', '8') + topo.standalone.restart() + if float(THRESHOLD) > FULL_THR_FILL_SIZE: + FULL_THR_FILL_SIZE_new = FULL_THR_FILL_SIZE + round(float(THRESHOLD) - FULL_THR_FILL_SIZE) + 1 + subprocess.call(['dd', 'if=/dev/zero', 'of={}/foo'.format(topo.standalone.ds_paths.log_dir), 'bs=1M', 'count={}'.format(FULL_THR_FILL_SIZE_new)]) + else: + subprocess.call(['dd', 'if=/dev/zero', 'of={}/foo'.format(topo.standalone.ds_paths.log_dir), 'bs=1M', 'count={}'.format(FULL_THR_FILL_SIZE)]) + _witherrorlog(topo, 'temporarily setting error loglevel to the default level', 11) + # Verify that verbose logging was set to default level + assert LOG_DEFAULT == int(re.findall(r'nsslapd-errorlog-level: \d+', + str(topo.standalone.search_s('cn=config', ldap.SCOPE_SUBTREE, + '(objectclass=*)', + ['nsslapd-errorlog-level'])) + )[0].split(' ')[1]) + # Verify that logging is disabled + _withouterrorlog(topo, "topo.standalone.config.get_attr_val_utf8('nsslapd-accesslog-logging-enabled') != 'off'", 11) + # Verify that rotated logs were removed + _witherrorlog(topo, 'disabling access and audit logging', 2) + _witherrorlog(topo, 'deleting rotated logs', 11) + with open(topo.standalone.errlog, 'r') as study:study = study.read() + assert 'Unable to remove file:' not in study + # Verify DS is in shutdown mode + _withouterrorlog(topo, 'topo.standalone.status() != False', 90) + _witherrorlog(topo, 'is too far below the threshold', 2) + # Verify DS has recovered from shutdown + os.remove('{}/foo'.format(topo.standalone.ds_paths.log_dir)) + open('{}/errors'.format(topo.standalone.ds_paths.log_dir), 'w').close() + topo.standalone.start() + _withouterrorlog(topo, "topo.standalone.config.get_attr_val_utf8('nsslapd-accesslog-logging-enabled') != 'on'", 20) + with open(topo.standalone.errlog, 'r') as study: study = study.read() + assert 'disabling access and audit logging' not in study + + +def test_go_straight_below_4kb(topo, setup, reset_logs): + """ + Go straight below 4KB + :id: a855115a-fe9e-11e8-8e91-8c16451d917b + :setup: Standalone + :steps: + 1. Go straight below 4KB + 2. Clean space + :expectedresults: + 1. Should Success + 2. Should Success + """ + assert topo.standalone.config.set('nsslapd-disk-monitoring', 'on') + topo.standalone.restart() + subprocess.call(['dd', 'if=/dev/zero', 'of={}/foo'.format(topo.standalone.ds_paths.log_dir), 'bs=1M', 'count={}'.format(FULL_THR_FILL_SIZE)]) + subprocess.call(['dd', 'if=/dev/zero', 'of={}/foo1'.format(topo.standalone.ds_paths.log_dir), 'bs=1M', 'count={}'.format(FULL_THR_FILL_SIZE)]) + _withouterrorlog(topo, 'topo.standalone.status() != False', 11) + os.remove('{}/foo'.format(topo.standalone.ds_paths.log_dir)) + os.remove('{}/foo1'.format(topo.standalone.ds_paths.log_dir)) + topo.standalone.start() + assert topo.standalone.status() == True + + +@pytest.mark.bz982325 +def test_threshold_to_overflow_value(topo, setup, reset_logs): + """ + Overflow in nsslapd-disk-monitoring-threshold + :id: ad60ab3c-fe9e-11e8-88dc-8c16451d917b + :setup: Standalone + :steps: + 1. Setting nsslapd-disk-monitoring-threshold to overflow_value + :expectedresults: + 1. Should Success + """ + overflow_value = '3000000000' + # Setting nsslapd-disk-monitoring-threshold to overflow_value + assert topo.standalone.config.set('nsslapd-disk-monitoring-threshold', ensure_bytes(overflow_value)) + assert overflow_value == re.findall(r'nsslapd-disk-monitoring-threshold: \d+', str( + topo.standalone.search_s('cn=config', ldap.SCOPE_SUBTREE, '(objectclass=*)', + ['nsslapd-disk-monitoring-threshold'])))[0].split(' ')[1] + + +@pytest.mark.bz970995 +def test_threshold_is_reached_to_half(topo, setup, reset_logs): + """ + RHDS not shutting down when disk monitoring threshold is reached to half. + :id: b2d3665e-fe9e-11e8-b9c0-8c16451d917b + :setup: Standalone + :steps: Standalone + 1. Verify that there is not endless loop of error messages + :expectedresults: + 1. Should Success + """ + + assert topo.standalone.config.set('nsslapd-disk-monitoring', 'on') + assert topo.standalone.config.set('nsslapd-disk-monitoring-logging-critical', 'on') + assert topo.standalone.config.set('nsslapd-errorlog-level', '8') + assert topo.standalone.config.set('nsslapd-disk-monitoring-threshold', ensure_bytes(THRESHOLD_BYTES)) + topo.standalone.restart() + subprocess.call(['dd', 'if=/dev/zero', 'of={}/foo'.format(topo.standalone.ds_paths.log_dir), 'bs=1M', 'count={}'.format(HALF_THR_FILL_SIZE)]) + # Verify that there is not endless loop of error messages + _witherrorlog(topo, "temporarily setting error loglevel to the default level", 10) + with open(topo.standalone.errlog, 'r') as study:study = study.read() + assert len(re.findall("temporarily setting error loglevel to the default level", study)) == 1 + os.remove('{}/foo'.format(topo.standalone.ds_paths.log_dir)) + + +@pytest.mark.parametrize("test_input,expected", [ + ("nsslapd-disk-monitoring-threshold", '-2'), + ("nsslapd-disk-monitoring-threshold", '9223372036854775808'), + ("nsslapd-disk-monitoring-threshold", '2047'), + ("nsslapd-disk-monitoring-threshold", '0'), + ("nsslapd-disk-monitoring-threshold", '-1294967296'), + ("nsslapd-disk-monitoring-threshold", 'invalid'), + ("nsslapd-disk-monitoring", 'invalid'), + ("nsslapd-disk-monitoring", '1'), + ("nsslapd-disk-monitoring-grace-period", '0'), + ("nsslapd-disk-monitoring-grace-period", '525 948'), + ("nsslapd-disk-monitoring-grace-period", '-1'), + ("nsslapd-disk-monitoring-logging-critical", 'oninvalid'), + ("nsslapd-disk-monitoring-grace-period", '-1'), + ("nsslapd-disk-monitoring-grace-period", '0'), +]) +def test_negagtive_parameterize(topo, setup, reset_logs, test_input, expected): + """ + Verify that invalid operations are not permitted + + :id: b88efbf8-fe9e-11e8-8499-8c16451d917b + :parametrized: yes + :setup: Standalone + :steps: + 1. Verify that invalid operations are not permitted. + :expectedresults: + 1. Should not success. + """ + with pytest.raises(Exception): + topo.standalone.config.set(test_input, ensure_bytes(expected)) + + +def test_valid_operations_are_permitted(topo, setup, reset_logs): + """ + Verify that valid operations are permitted + :id: bd4f83f6-fe9e-11e8-88f4-8c16451d917b + :setup: Standalone + :steps: + 1. Verify that valid operations are permitted + :expectedresults: + 1. Should Success. + """ + assert topo.standalone.config.set('nsslapd-disk-monitoring', 'on') + assert topo.standalone.config.set('nsslapd-disk-monitoring-logging-critical', 'on') + assert topo.standalone.config.set('nsslapd-errorlog-level', '8') + topo.standalone.restart() + # Trying to delete nsslapd-disk-monitoring-threshold + assert topo.standalone.modify_s('cn=config', [(ldap.MOD_DELETE, 'nsslapd-disk-monitoring-threshold', '')]) + # Trying to add another value to nsslapd-disk-monitoring-threshold (check that it is not multivalued) + topo.standalone.config.add('nsslapd-disk-monitoring-threshold', '2000001') + # Trying to delete nsslapd-disk-monitoring + assert topo.standalone.modify_s('cn=config', [(ldap.MOD_DELETE, 'nsslapd-disk-monitoring', ensure_bytes(str( + topo.standalone.search_s('cn=config', ldap.SCOPE_SUBTREE, '(objectclass=*)', ['nsslapd-disk-monitoring'])[ + 0]).split(' ')[2].split('\n\n')[0]))]) + # Trying to add another value to nsslapd-disk-monitoring + topo.standalone.config.add('nsslapd-disk-monitoring', 'off') + # Trying to delete nsslapd-disk-monitoring-grace-period + assert topo.standalone.modify_s('cn=config', [(ldap.MOD_DELETE, 'nsslapd-disk-monitoring-grace-period', '')]) + # Trying to add another value to nsslapd-disk-monitoring-grace-period + topo.standalone.config.add('nsslapd-disk-monitoring-grace-period', '61') + # Trying to delete nsslapd-disk-monitoring-logging-critical + assert topo.standalone.modify_s('cn=config', [(ldap.MOD_DELETE, 'nsslapd-disk-monitoring-logging-critical', + ensure_bytes(str( + topo.standalone.search_s('cn=config', ldap.SCOPE_SUBTREE, + '(objectclass=*)', [ + 'nsslapd-disk-monitoring-logging-critical'])[ + 0]).split(' ')[2].split('\n\n')[0]))]) + # Trying to add another value to nsslapd-disk-monitoring-logging-critical + assert topo.standalone.config.set('nsslapd-disk-monitoring-logging-critical', 'on') + + +if __name__ == '__main__': + CURRENT_FILE = os.path.realpath(__file__) + pytest.main("-s -v %s" % CURRENT_FILE) diff --git a/dirsrvtests/tests/suites/disk_monitoring/disk_space_test.py b/dirsrvtests/tests/suites/disk_monitoring/disk_space_test.py new file mode 100644 index 0000000..892c78e --- /dev/null +++ b/dirsrvtests/tests/suites/disk_monitoring/disk_space_test.py @@ -0,0 +1,45 @@ +# --- BEGIN COPYRIGHT BLOCK --- +# Copyright (C) 2019 Red Hat, Inc. +# All rights reserved. +# +# License: GPL (version 3 or any later version). +# See LICENSE for details. +# --- END COPYRIGHT BLOCK --- +from lib389.monitor import MonitorDiskSpace +from lib389.topologies import topology_st as topo + + +def test_basic(topo): + """Test that the cn=disk space,cn=monitor gives at least one value + + :id: f1962762-2c6c-4e50-97af-a00012a7486d + :setup: Standalone + :steps: + 1. Get cn=disk space,cn=monitor entry + 2. Check it has at least one dsDisk attribute + 3. Check dsDisk attribute has the partition and sizes + 4. Check the numbers are valid integers + :expectedresults: + 1. It should succeed + 2. It should succeed + 3. It should succeed + 4. It should succeed + """ + + inst = topo.standalone + + # Turn off disk monitoring + disk_space_mon = MonitorDiskSpace(inst) + disk_str = disk_space_mon.get_disks()[0] + + inst.log.info('Check that "partition", "size", "used", "available", "use%" words are present in the string') + words = ["partition", "size", "used", "available", "use%"] + assert all(map(lambda word: word in disk_str, words)) + + inst.log.info("Check that the sizes are numbers") + for word in words[1:]: + number = disk_str.split(f'{word}="')[1].split('"')[0] + try: + int(number) + except ValueError: + raise ValueError(f'A "{word}" value is not a number') diff --git a/dirsrvtests/tests/suites/ds_logs/__init__.py b/dirsrvtests/tests/suites/ds_logs/__init__.py new file mode 100644 index 0000000..feefbd2 --- /dev/null +++ b/dirsrvtests/tests/suites/ds_logs/__init__.py @@ -0,0 +1,3 @@ +""" + :Requirement: 389-ds-base: Directory Server Logs +""" diff --git a/dirsrvtests/tests/suites/ds_logs/ds_logs_test.py b/dirsrvtests/tests/suites/ds_logs/ds_logs_test.py new file mode 100644 index 0000000..94686f5 --- /dev/null +++ b/dirsrvtests/tests/suites/ds_logs/ds_logs_test.py @@ -0,0 +1,857 @@ +# --- BEGIN COPYRIGHT BLOCK --- +# Copyright (C) 2015 Red Hat, Inc. +# All rights reserved. +# +# License: GPL (version 3 or any later version). +# See LICENSE for details. +# --- END COPYRIGHT BLOCK --- +# +import os +import logging +import pytest +import subprocess +from lib389._mapped_object import DSLdapObject +from lib389.topologies import topology_st +from lib389.plugins import AutoMembershipPlugin, ReferentialIntegrityPlugin, AutoMembershipDefinitions +from lib389.idm.user import UserAccounts +from lib389.idm.group import Groups +from lib389.idm.organizationalunit import OrganizationalUnits +from lib389._constants import DEFAULT_SUFFIX, LOG_ACCESS_LEVEL, DN_CONFIG, HOST_STANDALONE, PORT_STANDALONE, DN_DM, PASSWORD +from lib389.utils import ds_is_older +import ldap +import glob + +pytestmark = pytest.mark.tier1 + +logging.getLogger(__name__).setLevel(logging.DEBUG) +log = logging.getLogger(__name__) + +PLUGIN_TIMESTAMP = 'nsslapd-logging-hr-timestamps-enabled' +PLUGIN_LOGGING = 'nsslapd-plugin-logging' +USER1_DN = 'uid=user1,' + DEFAULT_SUFFIX + + +def add_users(topology_st, users_num): + users = UserAccounts(topology_st, DEFAULT_SUFFIX) + log.info('Adding %d users' % users_num) + for i in range(0, users_num): + uid = 1000 + i + users.create(properties={ + 'uid': 'testuser%d' % uid, + 'cn': 'testuser%d' % uid, + 'sn': 'user', + 'uidNumber': '%d' % uid, + 'gidNumber': '%d' % uid, + 'homeDirectory': '/home/testuser%d' % uid + }) + + +def search_users(topology_st): + users = UserAccounts(topology_st, DEFAULT_SUFFIX) + entries = users.list() + # We just assert we got some data ... + assert len(entries) > 0 + + +def delete_obj(obj): + if obj.exists(): + obj.delete() + + +def add_group_and_perform_user_operations(topology_st): + topo = topology_st.standalone + + # Add the automember group + groups = Groups(topo, DEFAULT_SUFFIX) + group = groups.create(properties={'cn': 'group'}) + + ous = OrganizationalUnits(topo, DEFAULT_SUFFIX) + branch1 = ous.create(properties={'ou': 'branch1'}) + + # Add the automember config entry + am_configs = AutoMembershipDefinitions(topo) + am_config = am_configs.create(properties={'cn': 'config', + 'autoMemberScope': branch1.dn, + 'autoMemberFilter': 'objectclass=top', + 'autoMemberDefaultGroup': group.dn, + 'autoMemberGroupingAttr': 'member:dn'}) + + # Add a user that should get added to the group + users = UserAccounts(topo, DEFAULT_SUFFIX, rdn='ou={}'.format(branch1.rdn)) + test_user = users.create_test_user(uid=777) + + # Check if created user is group member + assert test_user.dn in group.list_members() + + log.info('Renaming user') + test_user.rename('uid=new_test_user_777', newsuperior=DEFAULT_SUFFIX) + + log.info('Delete the user') + delete_obj(test_user) + + log.info('Delete automember entry, org. unit and group for the next test') + delete_obj(am_config) + delete_obj(branch1) + delete_obj(group) + + +@pytest.fixture(scope="module") +def enable_plugins(topology_st): + topo = topology_st.standalone + + log.info("Enable automember plugin") + plugin = AutoMembershipPlugin(topo) + plugin.enable() + + log.info('Enable Referential Integrity plugin') + plugin = ReferentialIntegrityPlugin(topo) + plugin.enable() + + log.info('Set nsslapd-plugin-logging to on') + topo.config.set(PLUGIN_LOGGING, 'ON') + + log.info('Restart the server') + topo.restart() + + +def add_user_log_level(topology_st, loglevel, request): + topo = topology_st.standalone + default_log_level = topo.config.get_attr_val_utf8(LOG_ACCESS_LEVEL) + log.info(f'Configure access log level to {loglevel}') + topo.config.set(LOG_ACCESS_LEVEL, str(loglevel)) + add_group_and_perform_user_operations(topology_st) + + def fin(): + topo.config.set(LOG_ACCESS_LEVEL, default_log_level) + log.info('Delete the previous access logs for the next test') + topo.deleteAccessLogs() + request.addfinalizer(fin) + + +@pytest.fixture(scope="function") +def add_user_log_level_260(topology_st, enable_plugins, request): + access_log_level = 4 + 256 + add_user_log_level(topology_st, access_log_level, request) + + +@pytest.fixture(scope="function") +def add_user_log_level_516(topology_st, enable_plugins, request): + access_log_level = 4 + 512 + add_user_log_level(topology_st, access_log_level, request) + + +@pytest.fixture(scope="function") +def add_user_log_level_131076(topology_st, enable_plugins, request): + access_log_level = 4 + 131072 + add_user_log_level(topology_st, access_log_level, request) + + +@pytest.fixture(scope="function") +def clean_access_logs(topology_st, request): + def _clean_access_logs(): + topo = topology_st.standalone + log.info("Stopping the instance") + topo.stop() + log.info("Deleting the access logs") + topo.deleteAccessLogs() + log.info("Starting the instance") + topo.start() + + request.addfinalizer(_clean_access_logs) + + return clean_access_logs + + +def set_audit_log_config_values(topology_st, request, enabled, logsize): + topo = topology_st.standalone + + topo.config.set('nsslapd-auditlog-logging-enabled', enabled) + topo.config.set('nsslapd-auditlog-maxlogsize', logsize) + + def fin(): + topo.start() + log.info('Setting audit log config back to default values') + topo.config.set('nsslapd-auditlog-logging-enabled', 'off') + topo.config.set('nsslapd-auditlog-maxlogsize', '100') + + request.addfinalizer(fin) + + +@pytest.fixture(scope="function") +def set_audit_log_config_values_to_rotate(topology_st, request): + set_audit_log_config_values(topology_st, request, 'on', '1') + + +@pytest.mark.bz1273549 +def test_check_default(topology_st): + """Check the default value of nsslapd-logging-hr-timestamps-enabled, + it should be ON + + :id: 2d15002e-9ed3-4796-b0bb-bf04e4e59bd3 + + :setup: Standalone instance + + :steps: + 1. Fetch the value of nsslapd-logging-hr-timestamps-enabled attribute + 2. Test that the attribute value should be "ON" by default + + :expectedresults: + 1. Value should be fetched successfully + 2. Value should be "ON" by default + """ + + # Get the default value of nsslapd-logging-hr-timestamps-enabled attribute + default = topology_st.standalone.config.get_attr_val_utf8(PLUGIN_TIMESTAMP) + + # Now check it should be ON by default + assert default == "on" + log.debug(default) + + +@pytest.mark.bz1273549 +def test_plugin_set_invalid(topology_st): + """Try to set some invalid values for nsslapd-logging-hr-timestamps-enabled + attribute + + :id: c60a68d2-703a-42bf-a5c2-4040736d511a + + :setup: Standalone instance + + :steps: + 1. Set some "JUNK" value of nsslapd-logging-hr-timestamps-enabled attribute + + :expectedresults: + 1. There should be an operation error + """ + + log.info('test_plugin_set_invalid - Expect to fail with junk value') + with pytest.raises(ldap.OPERATIONS_ERROR): + result = topology_st.standalone.config.set(PLUGIN_TIMESTAMP, 'JUNK') + + +@pytest.mark.bz1273549 +def test_log_plugin_on(topology_st): + """Check access logs for millisecond, when + nsslapd-logging-hr-timestamps-enabled=ON + + :id: 65ae4e2a-295f-4222-8d69-12124bc7a872 + + :setup: Standalone instance + + :steps: + 1. To generate big logs, add 100 test users + 2. Search users to generate more access logs + 3. Restart server + 4. Parse the logs to check the milliseconds got recorded in logs + + :expectedresults: + 1. Add operation should be successful + 2. Search operation should be successful + 3. Server should be restarted successfully + 4. There should be milliseconds added in the access logs + """ + + log.info('Bug 1273549 - Check access logs for millisecond, when attribute is ON') + log.info('perform any ldap operation, which will trigger the logs') + add_users(topology_st.standalone, 10) + search_users(topology_st.standalone) + + log.info('Restart the server to flush the logs') + topology_st.standalone.restart(timeout=10) + + log.info('parse the access logs') + access_log_lines = topology_st.standalone.ds_access_log.readlines() + assert len(access_log_lines) > 0 + assert topology_st.standalone.ds_access_log.match(r'^\[.+\d{9}.+\].+') + + +@pytest.mark.bz1273549 +def test_log_plugin_off(topology_st): + """Milliseconds should be absent from access logs when + nsslapd-logging-hr-timestamps-enabled=OFF + + :id: b3400e46-d940-4574-b399-e3f4b49bc4b5 + + :setup: Standalone instance + + :steps: + 1. Set nsslapd-logging-hr-timestamps-enabled=OFF + 2. Restart the server + 3. Delete old access logs + 4. Do search operations to generate fresh access logs + 5. Restart the server + 6. Check access logs + + :expectedresults: + 1. Attribute nsslapd-logging-hr-timestamps-enabled should be set to "OFF" + 2. Server should restart + 3. Access logs should be deleted + 4. Search operation should PASS + 5. Server should restart + 6. There should not be any milliseconds added in the access logs + """ + + log.info('Bug 1273549 - Check access logs for missing millisecond, when attribute is OFF') + + log.info('test_log_plugin_off - set the configuration attribute to OFF') + topology_st.standalone.config.set(PLUGIN_TIMESTAMP, 'OFF') + + log.info('Restart the server to flush the logs') + topology_st.standalone.restart(timeout=10) + + log.info('test_log_plugin_off - delete the previous access logs') + topology_st.standalone.deleteAccessLogs() + + # Now generate some fresh logs + search_users(topology_st.standalone) + + log.info('Restart the server to flush the logs') + topology_st.standalone.restart(timeout=10) + + log.info('check access log that microseconds are not present') + access_log_lines = topology_st.standalone.ds_access_log.readlines() + assert len(access_log_lines) > 0 + assert not topology_st.standalone.ds_access_log.match(r'^\[.+\d{9}.+\].+') + + +@pytest.mark.xfail(ds_is_older('1.4.0'), reason="May fail on 1.3.x because of bug 1358706") +@pytest.mark.bz1358706 +@pytest.mark.ds49029 +def test_internal_log_server_level_0(topology_st, clean_access_logs): + """Tests server-initiated internal operations + :id: 798d06fe-92e8-4648-af66-21349c20638e + :setup: Standalone instance + :steps: + 1. Set nsslapd-plugin-logging to on + 2. Configure access log level to only 0 + 3. Check the access logs. + :expectedresults: + 1. Operation should be successful + 2. Operation should be successful + 3. Access log should not contain internal operations log formats + """ + + topo = topology_st.standalone + default_log_level = topo.config.get_attr_val_utf8(LOG_ACCESS_LEVEL) + + + log.info('Set nsslapd-plugin-logging to on') + topo.config.set(PLUGIN_LOGGING, 'ON') + + log.info('Configure access log level to 0') + access_log_level = '0' + topo.config.set(LOG_ACCESS_LEVEL, access_log_level) + + log.info('Restart the server to flush the logs') + topo.restart() + + # These comments contain lines we are trying to find without regex (the op numbers are just examples) + log.info("Check if access log does not contain internal log of MOD operation") + # (Internal) op=2(2)(1) SRCH base="cn=config + assert not topo.ds_access_log.match(r'.*\(Internal\) op=[0-9]+\([0-9]+\)\([0-9]+\) SRCH base="cn=config.*') + # (Internal) op=2(2)(1) RESULT err=0 tag=48 nentries=1 + assert not topo.ds_access_log.match(r'.*\(Internal\) op=[0-9]+\([0-9]+\)\([0-9]+\) RESULT err=0 tag=48 nentries=1.*') + + log.info("Check if the other internal operations are not present") + # conn=Internal(0) op=0 + assert not topo.ds_access_log.match(r'.*conn=Internal\([0-9]+\) op=[0-9]+\([0-9]+\)\([0-9]+\).*') + + topo.config.set(LOG_ACCESS_LEVEL, default_log_level) + + +@pytest.mark.xfail(ds_is_older('1.4.0'), reason="May fail on 1.3.x because of bug 1358706") +@pytest.mark.bz1358706 +@pytest.mark.ds49029 +def test_internal_log_server_level_4(topology_st, clean_access_logs): + """Tests server-initiated internal operations + :id: a3500e47-d941-4575-b399-e3f4b49bc4b6 + :setup: Standalone instance + :steps: + 1. Set nsslapd-plugin-logging to on + 2. Configure access log level to only 4 + 3. Check the access logs, it should contain info about MOD operation of cn=config and other + internal operations should have the conn field set to Internal + and all values inside parenthesis set to 0. + :expectedresults: + 1. Operation should be successful + 2. Operation should be successful + 3. Access log should contain correct internal log formats with cn=config modification: + "(Internal) op=2(1)(1)" + "conn=Internal(0)" + """ + + topo = topology_st.standalone + default_log_level = topo.config.get_attr_val_utf8(LOG_ACCESS_LEVEL) + + log.info('Set nsslapd-plugin-logging to on') + topo.config.set(PLUGIN_LOGGING, 'ON') + + log.info('Configure access log level to 4') + access_log_level = '4' + topo.config.set(LOG_ACCESS_LEVEL, access_log_level) + + log.info('Restart the server to flush the logs') + topo.restart() + + try: + # These comments contain lines we are trying to find without regex (the op numbers are just examples) + log.info("Check if access log contains internal MOD operation in correct format") + # (Internal) op=2(2)(1) SRCH base="cn=config + assert topo.ds_access_log.match(r'.*\(Internal\) op=[0-9]+\([0-9]+\)\([0-9]+\) SRCH base="cn=config.*') + # (Internal) op=2(2)(1) RESULT err=0 tag=48 nentries=1 + assert topo.ds_access_log.match(r'.*\(Internal\) op=[0-9]+\([0-9]+\)\([0-9]+\) RESULT err=0 tag=48 nentries=1.*') + + log.info("Check if the other internal operations have the correct format") + # conn=Internal(0) op=0 + assert topo.ds_access_log.match(r'.*conn=Internal\([0-9]+\) op=[0-9]+\([0-9]+\)\([0-9]+\).*') + finally: + topo.config.set(LOG_ACCESS_LEVEL, default_log_level) + + +@pytest.mark.xfail(ds_is_older('1.4.0'), reason="May fail on 1.3.x because of bug 1358706") +@pytest.mark.bz1358706 +@pytest.mark.ds49029 +def test_internal_log_level_260(topology_st, add_user_log_level_260): + """Tests client initiated operations when automember plugin is enabled + :id: e68a303e-c037-42b2-a5a0-fbea27c338a9 + :setup: Standalone instance with internal operation + logging on and nsslapd-plugin-logging to on + :steps: + 1. Configure access log level to 260 (4 + 256) + 2. Set nsslapd-plugin-logging to on + 3. Enable Referential Integrity and automember plugins + 4. Restart the server + 5. Add a test group + 6. Add a test user and add it as member of the test group + 7. Rename the test user + 8. Delete the test user + 9. Check the access logs for nested internal operation logs + :expectedresults: + 1. Operation should be successful + 2. Operation should be successful + 3. Operation should be successful + 4. Operation should be successful + 5. Operation should be successful + 6. Operation should be successful + 7. Operation should be successful + 8. Operation should be successful + 9. Access log should contain internal info about operations of the user + """ + + topo = topology_st.standalone + + log.info('Restart the server to flush the logs') + topo.restart() + + # These comments contain lines we are trying to find without regex (the op numbers are just examples) + log.info("Check the access logs for ADD operation of the user") + # op=10 ADD dn="uid=test_user_777,ou=topology_st, branch1,dc=example,dc=com" + assert topo.ds_access_log.match(r'.*op=[0-9]+ ADD dn="uid=test_user_777,ou=branch1,dc=example,dc=com".*') + # (Internal) op=10(1)(1) MOD dn="cn=group,ou=Groups,dc=example,dc=com" + assert topo.ds_access_log.match(r'.*\(Internal\) op=[0-9]+\([0-9]+\)\([0-9]+\) ' + r'MOD dn="cn=group,ou=Groups,dc=example,dc=com".*') + # (Internal) op=10(1)(2) SRCH base="cn=group,ou=Groups,dc=example,dc=com" + assert topo.ds_access_log.match(r'.*\(Internal\) op=[0-9]+\([0-9]+\)\([0-9]+\) SRCH base="cn=group,' + r'ou=Groups,dc=example,dc=com".*') + # (Internal) op=10(1)(2) RESULT err=0 tag=48 nentries=1 + assert topo.ds_access_log.match(r'.*\(Internal\) op=[0-9]+\([0-9]+\)\([0-9]+\) RESULT err=0 tag=48 nentries=1*') + # (Internal) op=10(1)(1) RESULT err=0 tag=48 + assert topo.ds_access_log.match(r'.*\(Internal\) op=[0-9]+\([0-9]+\)\([0-9]+\) RESULT err=0 tag=48.*') + # op=10 RESULT err=0 tag=105 + assert topo.ds_access_log.match(r'.*op=[0-9]+ RESULT err=0 tag=105.*') + + log.info("Check the access logs for MOD operation of the user") + # op=12 MODRDN dn="uid=test_user_777,ou=branch1,dc=example,dc=com" ' + # 'newrdn="uid=new_test_user_777" newsuperior="dc=example,dc=com" + assert topo.ds_access_log.match(r'.*op=[0-9]+ MODRDN dn="uid=test_user_777,ou=branch1,dc=example,dc=com" ' + 'newrdn="uid=new_test_user_777" newsuperior="dc=example,dc=com".*') + # (Internal) op=12(1)(1) SRCH base="uid=test_user_777, ou=branch1,dc=example,dc=com" + assert topo.ds_access_log.match(r'.*\(Internal\) op=[0-9]+\([0-9]+\)\([0-9]+\) SRCH base="uid=test_user_777,' + 'ou=branch1,dc=example,dc=com".*') + # (Internal) op=12(1)(1) RESULT err=0 tag=48 nentries=1 + assert topo.ds_access_log.match(r'.*\(Internal\) op=[0-9]+\([0-9]+\)\([0-9]+\) RESULT err=0 tag=48 nentries=1.*') + # op=12 RESULT err=0 tag=109 + assert topo.ds_access_log.match(r'.*op=[0-9]+ RESULT err=0 tag=109.*') + + log.info("Check the access logs for DEL operation of the user") + # op=15 DEL dn="uid=new_test_user_777,dc=example,dc=com" + assert topo.ds_access_log.match(r'.*op=[0-9]+ DEL dn="uid=new_test_user_777,dc=example,dc=com".*') + # (Internal) op=15(1)(1) SRCH base="uid=new_test_user_777, dc=example,dc=com" + assert topo.ds_access_log.match(r'.*\(Internal\) op=[0-9]+\([0-9]+\)\([0-9]+\) SRCH base="uid=new_test_user_777,' + 'dc=example,dc=com".*') + # (Internal) op=15(1)(1) RESULT err=0 tag=48 nentries=1 + assert topo.ds_access_log.match(r'.*\(Internal\) op=[0-9]+\([0-9]+\)\([0-9]+\) RESULT err=0 tag=48 nentries=1.*') + # op=15 RESULT err=0 tag=107 + assert topo.ds_access_log.match(r'.*op=[0-9]+ RESULT err=0 tag=107.*') + + log.info("Check if the other internal operations have the correct format") + # conn=Internal(0) op=0 + assert topo.ds_access_log.match(r'.*conn=Internal\([0-9]+\) op=[0-9]+\([0-9]+\)\([0-9]+\).*') + + +@pytest.mark.xfail(ds_is_older('1.4.0'), reason="May fail on 1.3.x because of bug 1358706") +@pytest.mark.bz1358706 +@pytest.mark.ds49029 +def test_internal_log_level_131076(topology_st, add_user_log_level_131076): + """Tests client-initiated operations while referential integrity plugin is enabled + :id: 44836ac9-dabd-4a8c-abd5-ecd7c2509739 + :setup: Standalone instance + Configure access log level to - 131072 + 4 + Set nsslapd-plugin-logging to on + :steps: + 1. Configure access log level to 131076 + 2. Set nsslapd-plugin-logging to on + 3. Enable Referential Integrity and automember plugins + 4. Restart the server + 5. Add a test group + 6. Add a test user and add it as member of the test group + 7. Rename the test user + 8. Delete the test user + 9. Check the access logs for nested internal operation logs + :expectedresults: + 1. Operation should be successful + 2. Operation should be successful + 3. Operation should be successful + 4. Operation should be successful + 5. Operation should be successful + 6. Operation should be successful + 7. Operation should be successful + 8. Operation should be successful + 9. Access log should contain internal info about operations of the user + """ + + topo = topology_st.standalone + + log.info('Restart the server to flush the logs') + topo.restart() + + # These comments contain lines we are trying to find without regex (the op numbers are just examples) + log.info("Check the access logs for ADD operation of the user") + # op=10 ADD dn="uid=test_user_777,ou=branch1,dc=example,dc=com" + assert not topo.ds_access_log.match(r'.*op=[0-9]+ ADD dn="uid=test_user_777,ou=branch1,dc=example,dc=com".*') + # (Internal) op=10(1)(1) MOD dn="cn=group,ou=Groups,dc=example,dc=com" + assert topo.ds_access_log.match(r'.*\(Internal\) op=[0-9]+\([0-9]+\)\([0-9]+\) ' + r'MOD dn="cn=group,ou=Groups,dc=example,dc=com".*') + # (Internal) op=10(1)(2) SRCH base="cn=group,ou=Groups,dc=example,dc=com" + assert topo.ds_access_log.match(r'.*\(Internal\) op=[0-9]+\([0-9]+\)\([0-9]+\) ' + r'SRCH base="cn=group,ou=Groups,dc=example,dc=com".*') + # (Internal) op=10(1)(2) RESULT err=0 tag=48 nentries=1*') + assert topo.ds_access_log.match(r'.*\(Internal\) op=[0-9]+\([0-9]+\)\([0-9]+\) RESULT err=0 tag=48 nentries=1*') + # (Internal) op=10(1)(1) RESULT err=0 tag=48 + assert topo.ds_access_log.match(r'.*\(Internal\) op=[0-9]+\([0-9]+\)\([0-9]+\) RESULT err=0 tag=48.*') + # op=10 RESULT err=0 tag=105 + assert not topo.ds_access_log.match(r'.*op=[0-9]+ RESULT err=0 tag=105.*') + + log.info("Check the access logs for MOD operation of the user") + # op=12 MODRDN dn="uid=test_user_777,ou=branch1,dc=example,dc=com" ' + # 'newrdn="uid=new_test_user_777" newsuperior="dc=example,dc=com" + assert not topo.ds_access_log.match(r'.*op=[0-9]+ MODRDN dn="uid=test_user_777,ou=branch1,dc=example,dc=com" ' + 'newrdn="uid=new_test_user_777" newsuperior="dc=example,dc=com".*') + # (Internal) op=12(1)(1) SRCH base="uid=test_user_777, ou=branch1,dc=example,dc=com" + assert topo.ds_access_log.match(r'.*\(Internal\) op=[0-9]+\([0-9]+\)\([0-9]+\) SRCH base="uid=test_user_777,' + 'ou=branch1,dc=example,dc=com".*') + # (Internal) op=12(1)(1) RESULT err=0 tag=48 nentries=1 + assert topo.ds_access_log.match(r'.*\(Internal\) op=[0-9]+\([0-9]+\)\([0-9]+\) RESULT err=0 tag=48 nentries=1.*') + # op=12 RESULT err=0 tag=109 + assert not topo.ds_access_log.match(r'.*op=[0-9]+ RESULT err=0 tag=109.*') + + log.info("Check the access logs for DEL operation of the user") + # op=15 DEL dn="uid=new_test_user_777,dc=example,dc=com" + assert not topo.ds_access_log.match(r'.*op=[0-9]+ DEL dn="uid=new_test_user_777,dc=example,dc=com".*') + # (Internal) op=15(1)(1) SRCH base="uid=new_test_user_777, dc=example,dc=com" + assert topo.ds_access_log.match(r'.*\(Internal\) op=[0-9]+\([0-9]+\)\([0-9]+\) SRCH base="uid=new_test_user_777,' + 'dc=example,dc=com".*') + # (Internal) op=15(1)(1) RESULT err=0 tag=48 nentries=1 + assert topo.ds_access_log.match(r'.*\(Internal\) op=[0-9]+\([0-9]+\)\([0-9]+\) RESULT err=0 tag=48 nentries=1.*') + # op=15 RESULT err=0 tag=107 + assert not topo.ds_access_log.match(r'.*op=[0-9]+ RESULT err=0 tag=107.*') + + log.info("Check if the other internal operations have the correct format") + # conn=Internal(0) op=0 + assert topo.ds_access_log.match(r'.*conn=Internal\([0-9]+\) op=[0-9]+\([0-9]+\)\([0-9]+\).*') + + +@pytest.mark.xfail(ds_is_older('1.4.0'), reason="May fail on 1.3.x because of bug 1358706") +@pytest.mark.bz1358706 +@pytest.mark.ds49029 +def test_internal_log_level_516(topology_st, add_user_log_level_516): + """Tests client initiated operations when referential integrity plugin is enabled + :id: bee1d681-763d-4fa5-aca2-569cf93f8b71 + :setup: Standalone instance + Configure access log level to - 512+4 + Set nsslapd-plugin-logging to on + :steps: + 1. Configure access log level to 516 + 2. Set nsslapd-plugin-logging to on + 3. Enable Referential Integrity and automember plugins + 4. Restart the server + 5. Add a test group + 6. Add a test user and add it as member of the test group + 7. Rename the test user + 8. Delete the test user + 9. Check the access logs for nested internal operation logs + :expectedresults: + 1. Operation should be successful + 2. Operation should be successful + 3. Operation should be successful + 4. Operation should be successful + 5. Operation should be successful + 6. Operation should be successful + 7. Operation should be successful + 8. Operation should be successful + 9. Access log should contain internal info about operations of the user + """ + + topo = topology_st.standalone + + log.info('Restart the server to flush the logs') + topo.restart() + + # These comments contain lines we are trying to find without regex (the op numbers are just examples) + log.info("Check the access logs for ADD operation of the user") + # op=10 ADD dn="uid=test_user_777,ou=branch1,dc=example,dc=com" + assert not topo.ds_access_log.match(r'.*op=[0-9]+ ADD dn="uid=test_user_777,ou=branch1,dc=example,dc=com".*') + # (Internal) op=10(1)(1) MOD dn="cn=group,ou=Groups,dc=example,dc=com" + assert topo.ds_access_log.match(r'.*\(Internal\) op=[0-9]+\([0-9]+\)\([0-9]+\) ' + r'MOD dn="cn=group,ou=Groups,dc=example,dc=com".*') + # (Internal) op=10(1)(2) SRCH base="cn=group,ou=Groups,dc=example,dc=com" + assert topo.ds_access_log.match(r'.*\(Internal\) op=[0-9]+\([0-9]+\)\([0-9]+\) ' + r'SRCH base="cn=group,ou=Groups,dc=example,dc=com".*') + # (Internal) op=10(1)(2) ENTRY dn="cn=group,ou=Groups,dc=example,dc=com" + assert topo.ds_access_log.match(r'.*\(Internal\) op=[0-9]+\([0-9]+\)\([0-9]+\) ' + r'ENTRY dn="cn=group,ou=Groups,dc=example,dc=com".*') + # (Internal) op=10(1)(2) RESULT err=0 tag=48 nentries=1*') + assert topo.ds_access_log.match(r'.*\(Internal\) op=[0-9]+\([0-9]+\)\([0-9]+\) RESULT err=0 tag=48 nentries=1*') + # (Internal) op=10(1)(1) RESULT err=0 tag=48 + assert topo.ds_access_log.match(r'.*\(Internal\) op=[0-9]+\([0-9]+\)\([0-9]+\) RESULT err=0 tag=48.*') + # op=10 RESULT err=0 tag=105 + assert not topo.ds_access_log.match(r'.*op=[0-9]+ RESULT err=0 tag=105.*') + + log.info("Check the access logs for MOD operation of the user") + # op=12 MODRDN dn="uid=test_user_777,ou=branch1,dc=example,dc=com" ' + # 'newrdn="uid=new_test_user_777" newsuperior="dc=example,dc=com" + assert not topo.ds_access_log.match(r'.*op=[0-9]+ MODRDN dn="uid=test_user_777,ou=branch1,dc=example,dc=com" ' + 'newrdn="uid=new_test_user_777" newsuperior="dc=example,dc=com".*') + # Internal) op=12(1)(1) SRCH base="uid=test_user_777, ou=branch1,dc=example,dc=com" + assert topo.ds_access_log.match(r'.*\(Internal\) op=[0-9]+\([0-9]+\)\([0-9]+\) SRCH base="uid=test_user_777,' + 'ou=branch1,dc=example,dc=com".*') + # (Internal) op=12(1)(1) ENTRY dn="uid=test_user_777, ou=branch1,dc=example,dc=com" + assert topo.ds_access_log.match(r'.*\(Internal\) op=[0-9]+\([0-9]+\)\([0-9]+\) ENTRY dn="uid=test_user_777,' + 'ou=branch1,dc=example,dc=com".*') + # (Internal) op=12(1)(1) RESULT err=0 tag=48 nentries=1 + assert topo.ds_access_log.match(r'.*\(Internal\) op=[0-9]+\([0-9]+\)\([0-9]+\) RESULT err=0 tag=48 nentries=1.*') + # op=12 RESULT err=0 tag=109 + assert not topo.ds_access_log.match(r'.*op=[0-9]+ RESULT err=0 tag=109.*') + + log.info("Check the access logs for DEL operation of the user") + # op=15 DEL dn="uid=new_test_user_777,dc=example,dc=com" + assert not topo.ds_access_log.match(r'.*op=[0-9]+ DEL dn="uid=new_test_user_777,dc=example,dc=com".*') + # (Internal) op=15(1)(1) SRCH base="uid=new_test_user_777, dc=example,dc=com" + assert topo.ds_access_log.match(r'.*\(Internal\) op=[0-9]+\([0-9]+\)\([0-9]+\) SRCH base="uid=new_test_user_777,' + 'dc=example,dc=com".*') + # (Internal) op=15(1)(1) ENTRY dn="uid=new_test_user_777, dc=example,dc=com" + assert topo.ds_access_log.match(r'.*\(Internal\) op=[0-9]+\([0-9]+\)\([0-9]+\) ENTRY dn="uid=new_test_user_777,' + 'dc=example,dc=com".*') + # (Internal) op=15(1)(1) RESULT err=0 tag=48 nentries=1 + assert topo.ds_access_log.match(r'.*\(Internal\) op=[0-9]+\([0-9]+\)\([0-9]+\) RESULT err=0 tag=48 nentries=1.*') + # op=15 RESULT err=0 tag=107 + assert not topo.ds_access_log.match(r'.*op=[0-9]+ RESULT err=0 tag=107.*') + + log.info("Check if the other internal operations have the correct format") + # conn=Internal(0) op=0 + assert topo.ds_access_log.match(r'.*conn=Internal\([0-9]+\) op=[0-9]+\([0-9]+\)\([0-9]+\).*') + + +@pytest.mark.skipif(ds_is_older('1.4.2.0'), reason="Not implemented") +@pytest.mark.bz1358706 +@pytest.mark.ds49232 +def test_access_log_truncated_search_message(topology_st, clean_access_logs): + """Tests that the access log message is properly truncated when the message is too long + + :id: 0a9af37d-3311-4a2f-ac0a-9a1c631aaf27 + :setup: Standalone instance + :steps: + 1. Make a search with a 2048+ characters basedn, filter and attribute list + 2. Check the access log has the message and it's truncated + :expectedresults: + 1. Operation should be successful + 2. Access log should contain truncated basedn, filter and attribute list + """ + + topo = topology_st.standalone + + large_str_base = "".join("cn=test," for _ in range(512)) + large_str_filter = "".join("(cn=test)" for _ in range(512)) + users = UserAccounts(topo, f'{large_str_base}dc=ending') + users._list_attrlist = [f'cn{i}' for i in range(512)] + log.info("Make a search") + users.filter(f'(|(objectclass=tester){large_str_filter}(cn=ending))') + + log.info('Restart the server to flush the logs') + topo.restart() + + assert topo.ds_access_log.match(r'.*cn=test,cn=test,.*') + assert topo.ds_access_log.match(r'.*objectClass=tester.*') + assert topo.ds_access_log.match(r'.*cn10.*') + assert not topo.ds_access_log.match(r'.*dc=ending.*') + assert not topo.ds_access_log.match(r'.*cn=ending.*') + assert not topo.ds_access_log.match(r'.*cn500.*') + + + +@pytest.mark.xfail(ds_is_older('1.4.2.0'), reason="May fail because of bug 1732053") +@pytest.mark.bz1732053 +@pytest.mark.ds50510 +def test_etime_at_border_of_second(topology_st, clean_access_logs): + topo = topology_st.standalone + + + prog = os.path.join(topo.ds_paths.bin_dir, 'rsearch') + + cmd = [prog] + + # base search + cmd.extend(['-s', DN_CONFIG]) + + # scope of the search + cmd.extend(['-S', '0']) + + # host / port + cmd.extend(['-h', HOST_STANDALONE]) + cmd.extend(['-p', str(PORT_STANDALONE)]) + + # bound as DM to make it faster + cmd.extend(['-D', DN_DM]) + cmd.extend(['-w', PASSWORD]) + + # filter + cmd.extend(['-f', "(cn=config)"]) + + # 2 samples SRCH + cmd.extend(['-C', "2"]) + + output = subprocess.check_output(cmd) + topo.stop() + + # No etime with 0.199xxx (everything should be few ms) + invalid_etime = topo.ds_access_log.match(r'.*etime=0\.19.*') + if invalid_etime: + for i in range(len(invalid_etime)): + log.error('It remains invalid or weird etime: %s' % invalid_etime[i]) + assert not invalid_etime + + +@pytest.mark.xfail(ds_is_older('1.3.10.1'), reason="May fail because of bug 1662461") +@pytest.mark.bz1662461 +@pytest.mark.ds50428 +@pytest.mark.ds49969 +def test_log_base_dn_when_invalid_attr_request(topology_st): + """Test that DS correctly logs the base dn when a search with invalid attribute request is performed + + :id: 859de962-c261-4ffb-8705-97bceab1ba2c + :setup: Standalone instance + :steps: + 1. Disable the accesslog-logbuffering config parameter + 2. Delete the previous access log + 3. Perform a base search on the DEFAULT_SUFFIX, using invalid "" "" attribute request + 4. Check the access log file for 'invalid attribute request' + 5. Check the access log file for 'SRCH base="\(null\)"' + 6. Check the access log file for 'SRCH base="DEFAULT_SUFFIX"' + :expectedresults: + 1. Operations are visible in the access log in real time + 2. Fresh new access log is created + 3. The search operation raises a Protocol error + 4. The access log should have an 'invalid attribute request' message + 5. The access log should not have "\(null\)" as value for the Search base dn + 6. The access log should have the value of DEFAULT_SUFFIX as Search base dn + """ + + entry = DSLdapObject(topology_st.standalone, DEFAULT_SUFFIX) + + log.info('Set accesslog logbuffering to off to get the log in real time') + topology_st.standalone.config.set('nsslapd-accesslog-logbuffering', 'off') + + log.info('delete the previous access logs to get a fresh new one') + topology_st.standalone.deleteAccessLogs() + + log.info("Search the default suffix, with invalid '\"\" \"\"' attribute request") + log.info("A Protocol error exception should be raised, see https://pagure.io/389-ds-base/issue/49969") + # A ldap.PROTOCOL_ERROR exception is expected + with pytest.raises(ldap.PROTOCOL_ERROR): + assert entry.get_attrs_vals_utf8(['', '']) + + # Search for appropriate messages in the access log + log.info('Check the access logs for correct messages') + # We should find the 'invalid attribute request' information + assert topology_st.standalone.ds_access_log.match(r'.*invalid attribute request.*') + # We should not find a "(null)" base dn mention + assert not topology_st.standalone.ds_access_log.match(r'.*SRCH base="\(null\)".*') + # We should find the base dn for the search + assert topology_st.standalone.ds_access_log.match(r'.*SRCH base="{}".*'.format(DEFAULT_SUFFIX)) + + +@pytest.mark.xfail(ds_is_older('1.3.8', '1.4.2'), reason="May fail because of bug 1676948") +@pytest.mark.bz1676948 +@pytest.mark.ds50536 +def test_audit_log_rotate_and_check_string(topology_st, clean_access_logs, set_audit_log_config_values_to_rotate): + """Version string should be logged only once at the top of audit log + after it is rotated. + + :id: 14dffb22-2f9c-11e9-8a03-54e1ad30572c + + :setup: Standalone instance + + :steps: + 1. Set nsslapd-auditlog-logging-enabled: on + 2. Set nsslapd-auditlog-maxlogsize: 1 + 3. Do modifications to the entry, until audit log file is rotated + 4. Check audit logs + + :expectedresults: + 1. Attribute nsslapd-auditlog-logging-enabled should be set to on + 2. Attribute nsslapd-auditlog-maxlogsize should be set to 1 + 3. Audit file should grow till 1MB and then should be rotated + 4. Audit file log should contain version string only once at the top + """ + + standalone = topology_st.standalone + search_ds = '389-Directory' + + users = UserAccounts(standalone, DEFAULT_SUFFIX) + user = users.create(properties={ + 'uid': 'test_audit_log', + 'cn': 'test', + 'sn': 'user', + 'uidNumber': '1000', + 'gidNumber': '1000', + 'homeDirectory': '/home/test', + }) + + log.info('Doing modifications to rotate audit log') + audit_log = standalone.ds_paths.audit_log + while len(glob.glob(audit_log + '*')) == 2: + user.replace('description', 'test'*100) + + log.info('Doing one more modification just in case') + user.replace('description', 'test2'*100) + + standalone.stop() + + count = 0 + with open(audit_log) as f: + log.info('Check that DS string is present on first line') + assert search_ds in f.readline() + f.seek(0) + + log.info('Check that DS string is present only once') + for line in f.readlines(): + if search_ds in line: + count += 1 + assert count == 1 + + +if __name__ == '__main__': + # Run isolated + # -s for DEBUG mode + CURRENT_FILE = os.path.realpath(__file__) + pytest.main("-s %s" % CURRENT_FILE) diff --git a/dirsrvtests/tests/suites/ds_logs/regression_test.py b/dirsrvtests/tests/suites/ds_logs/regression_test.py new file mode 100644 index 0000000..e2de21a --- /dev/null +++ b/dirsrvtests/tests/suites/ds_logs/regression_test.py @@ -0,0 +1,79 @@ +# --- BEGIN COPYRIGHT BLOCK --- +# Copyright (C) 2017 Red Hat, Inc. +# All rights reserved. +# +# License: GPL (version 3 or any later version). +# See LICENSE for details. +# --- END COPYRIGHT BLOCK --- +# + +import pytest +from lib389.dseldif import DSEldif +from lib389._constants import DN_CONFIG, LOG_REPLICA, LOG_DEFAULT, LOG_TRACE, LOG_ACL +from lib389.utils import os, logging +from lib389.topologies import topology_st as topo + +pytestmark = pytest.mark.tier2 + +DEBUGGING = os.getenv("DEBUGGING", default=False) +if DEBUGGING: + logging.getLogger(__name__).setLevel(logging.DEBUG) +else: + logging.getLogger(__name__).setLevel(logging.INFO) +log = logging.getLogger(__name__) + + +@pytest.mark.bz1460718 +@pytest.mark.parametrize("log_level", [(LOG_REPLICA + LOG_DEFAULT), (LOG_ACL + LOG_DEFAULT), (LOG_TRACE + LOG_DEFAULT)]) +def test_default_loglevel_stripped(topo, log_level): + """The default log level 16384 is stripped from the log level returned to a client + + :id: c300f8f1-aa11-4621-b124-e2be51930a6b + :parametrized: yes + :setup: Standalone instance + + :steps: 1. Change the error log level to the default and custom value. + 2. Check if the server returns the new value. + + :expectedresults: + 1. Changing the error log level should be successful. + 2. Server should return the new log level. + """ + + assert topo.standalone.config.set('nsslapd-errorlog-level', str(log_level)) + assert topo.standalone.config.get_attr_val_int('nsslapd-errorlog-level') == log_level + + +@pytest.mark.bz1460718 +def test_dse_config_loglevel_error(topo): + """Manually setting nsslapd-errorlog-level to 64 in dse.ldif throws error + + :id: 0eeefa17-ec1c-4208-8e7b-44d8fbc38f10 + + :setup: Standalone instance + + :steps: 1. Stop the server, edit dse.ldif file and change nsslapd-errorlog-level value to 64 + 2. Start the server and observe the error logs. + + :expectedresults: + 1. Server should be successfully stopped and nsslapd-errorlog-level value should be changed. + 2. Server should be successfully started without any errors being reported in the logs. + """ + + topo.standalone.stop(timeout=10) + dse_ldif = DSEldif(topo.standalone) + try: + dse_ldif.replace(DN_CONFIG, 'nsslapd-errorlog-level', 64) + except: + log.error('Failed to replace cn=config values of nsslapd-errorlog-level') + raise + topo.standalone.start(timeout=10) + assert not topo.standalone.ds_error_log.match( + '.*nsslapd-errorlog-level: ignoring 64 \\(since -d 266354688 was given on the command line\\).*') + + +if __name__ == '__main__': + # Run isolated + # -s for DEBUG mode + CURRENT_FILE = os.path.realpath(__file__) + pytest.main("-s %s" % CURRENT_FILE) diff --git a/dirsrvtests/tests/suites/ds_tools/__init__.py b/dirsrvtests/tests/suites/ds_tools/__init__.py new file mode 100644 index 0000000..10d3805 --- /dev/null +++ b/dirsrvtests/tests/suites/ds_tools/__init__.py @@ -0,0 +1,4 @@ + +""" + :Requirement: 389-ds-base: Directory Server Tools +""" diff --git a/dirsrvtests/tests/suites/ds_tools/logpipe_test.py b/dirsrvtests/tests/suites/ds_tools/logpipe_test.py new file mode 100644 index 0000000..4f0e65d --- /dev/null +++ b/dirsrvtests/tests/suites/ds_tools/logpipe_test.py @@ -0,0 +1,78 @@ +# --- BEGIN COPYRIGHT BLOCK --- +# Copyright (C) 2017 Red Hat, Inc. +# All rights reserved. +# +# License: GPL (version 3 or any later version). +# See LICENSE for details. +# --- END COPYRIGHT BLOCK --- +# +import pytest +import subprocess +from lib389.utils import * +from lib389.topologies import topology_st as topo + +pytestmark = pytest.mark.tier1 + +DEBUGGING = os.getenv("DEBUGGING", default=False) +if DEBUGGING: + logging.getLogger(__name__).setLevel(logging.DEBUG) +else: + logging.getLogger(__name__).setLevel(logging.INFO) +log = logging.getLogger(__name__) + +SYS_TEST_USER = 'dirsrv_testuser' + + +@pytest.fixture(scope="module") +def sys_test_user(request): + """Creates and deletes a system test user""" + + cmd = ['/usr/sbin/useradd', SYS_TEST_USER] + + log.info('Add system test user - {}'.format(SYS_TEST_USER)) + try: + subprocess.call(cmd) + except subprocess.CalledProcessError as e: + log.exception('Failed to add user {} error {}'.format(SYS_TEST_USER, e.output)) + + def fin(): + cmd = ['/usr/sbin/userdel', SYS_TEST_USER] + + log.info('Delete system test user - {}'.format(SYS_TEST_USER)) + try: + subprocess.call(cmd) + except subprocess.CalledProcessError as e: + log.exception('Failed to delete user {} error {}'.format(SYS_TEST_USER, e.output)) + + request.addfinalizer(fin) + + +def test_user_permissions(topo, sys_test_user): + """Check permissions for usual user operations in log dir + + :id: 4e423cd5-300c-4df0-ab40-aec7e51c3be8 + :feature: ds-logpipe + :setup: Standalone instance + :steps: 1. Add a new user to the system + 2. Try to create a logpipe in the log directory with '-u' option specifying the user + 3. Delete the user + :expectedresults: Permission denied error happens + """ + + ds_logpipe_path = os.path.join(topo.standalone.ds_paths.bin_dir, 'ds-logpipe.py') + fakelogpipe_path = os.path.join(topo.standalone.ds_paths.log_dir, 'fakelog.pipe') + + # I think we need to add a function for this to lib389, when we will port the full test suite + cmd = [ds_logpipe_path, fakelogpipe_path, '-u', SYS_TEST_USER] + + log.info('Try to create a logpipe in the log directory with "-u" option specifying the user') + with pytest.raises(subprocess.CalledProcessError) as cp: + result = subprocess.check_output(cmd) + assert 'Permission denied' in result + + +if __name__ == '__main__': + # Run isolated + # -s for DEBUG mode + CURRENT_FILE = os.path.realpath(__file__) + pytest.main("-s %s" % CURRENT_FILE) diff --git a/dirsrvtests/tests/suites/ds_tools/replcheck_test.py b/dirsrvtests/tests/suites/ds_tools/replcheck_test.py new file mode 100644 index 0000000..010340a --- /dev/null +++ b/dirsrvtests/tests/suites/ds_tools/replcheck_test.py @@ -0,0 +1,502 @@ +# --- BEGIN COPYRIGHT BLOCK --- +# Copyright (C) 2018 Red Hat, Inc. +# All rights reserved. +# +# License: GPL (version 3 or any later version). +# See LICENSE for details. +# --- END COPYRIGHT BLOCK --- +# +import pytest +import subprocess +from lib389.utils import * +from lib389.replica import Replicas, Replica, ReplicationManager +from lib389._constants import * +from lib389.config import CertmapLegacy +from lib389.idm.nscontainer import nsContainers +from lib389.idm.user import UserAccounts, TEST_USER_PROPERTIES +from lib389.idm.services import ServiceAccounts +from lib389.topologies import topology_m2 as topo + +pytestmark = pytest.mark.tier1 + +DEBUGGING = os.getenv("DEBUGGING", default=False) +if DEBUGGING: + logging.getLogger(__name__).setLevel(logging.DEBUG) +else: + logging.getLogger(__name__).setLevel(logging.INFO) +log = logging.getLogger(__name__) + + +def _create_container(inst, dn, name): + """Creates container entry""" + + conts = nsContainers(inst, dn) + cont = conts.create(properties={'cn': name}) + time.sleep(1) + return cont + + +def _delete_container(cont): + """Deletes container entry""" + + cont.delete() + time.sleep(1) + + +@pytest.fixture(scope="module") +def topo_tls_ldapi(topo): + """Enable TLS on both masters and reconfigure both agreements + to use TLS Client auth. Also, setup ldapi and export DB + """ + + m1 = topo.ms["master1"] + m2 = topo.ms["master2"] + # Create the certmap before we restart for enable_tls + cm_m1 = CertmapLegacy(m1) + cm_m2 = CertmapLegacy(m2) + + # We need to configure the same maps for both .... + certmaps = cm_m1.list() + certmaps['default']['DNComps'] = None + certmaps['default']['CmapLdapAttr'] = 'nsCertSubjectDN' + + cm_m1.set(certmaps) + cm_m2.set(certmaps) + + [i.enable_tls() for i in topo] + + # Create the replication dns + services = ServiceAccounts(m1, DEFAULT_SUFFIX) + repl_m1 = services.get('%s:%s' % (m1.host, m1.sslport)) + repl_m1.set('nsCertSubjectDN', m1.get_server_tls_subject()) + + repl_m2 = services.get('%s:%s' % (m2.host, m2.sslport)) + repl_m2.set('nsCertSubjectDN', m2.get_server_tls_subject()) + + # Check the replication is "done". + repl = ReplicationManager(DEFAULT_SUFFIX) + repl.wait_for_replication(m1, m2) + # Now change the auth type + + replica_m1 = Replicas(m1).get(DEFAULT_SUFFIX) + agmt_m1 = replica_m1.get_agreements().list()[0] + + agmt_m1.replace_many( + ('nsDS5ReplicaBindMethod', 'SSLCLIENTAUTH'), + ('nsDS5ReplicaTransportInfo', 'SSL'), + ('nsDS5ReplicaPort', '%s' % m2.sslport), + ) + agmt_m1.remove_all('nsDS5ReplicaBindDN') + + replica_m2 = Replicas(m2).get(DEFAULT_SUFFIX) + agmt_m2 = replica_m2.get_agreements().list()[0] + + agmt_m2.replace_many( + ('nsDS5ReplicaBindMethod', 'SSLCLIENTAUTH'), + ('nsDS5ReplicaTransportInfo', 'SSL'), + ('nsDS5ReplicaPort', '%s' % m1.sslport), + ) + agmt_m2.remove_all('nsDS5ReplicaBindDN') + + log.info("Export LDAPTLS_CACERTDIR env variable for ds-replcheck") + os.environ["LDAPTLS_CACERTDIR"] = m1.get_ssca_dir() + + for inst in topo: + inst.config.set('nsslapd-ldapilisten', 'on') + inst.config.set('nsslapd-ldapifilepath', '/var/run/slapd-{}.socket'.format(inst.serverid)) + inst.restart() + + repl.test_replication(m1, m2) + repl.test_replication(m2, m1) + + return topo + + +def replcheck_cmd_list(topo_tls_ldapi): + """Check ds-replcheck tool through ldap, ldaps, ldap with StartTLS, ldapi + and compare exported ldif files + """ + + m1 = topo_tls_ldapi.ms["master1"] + m2 = topo_tls_ldapi.ms["master2"] + + for inst in topo_tls_ldapi: + inst.stop() + inst.db2ldif(bename=DEFAULT_BENAME, suffixes=[DEFAULT_SUFFIX], excludeSuffixes=[], + encrypt=False, repl_data=True, outputfile='/tmp/export_{}.ldif'.format(inst.serverid)) + inst.start() + + ds_replcheck_path = os.path.join(m1.ds_paths.bin_dir, 'ds-replcheck') + + if ds_is_newer("1.4.1.2"): + replcheck_cmd = [[ds_replcheck_path, 'online', '-b', DEFAULT_SUFFIX, '-D', DN_DM, '-w', PW_DM, '-l', '1', + '-m', 'ldap://{}:{}'.format(m1.host, m1.port), '--conflicts', + '-r', 'ldap://{}:{}'.format(m2.host, m2.port)], + [ds_replcheck_path, 'online', '-b', DEFAULT_SUFFIX, '-D', DN_DM, '-w', PW_DM, '-l', '1', + '-m', 'ldaps://{}:{}'.format(m1.host, m1.sslport), '--conflicts', + '-r', 'ldaps://{}:{}'.format(m2.host, m2.sslport)], + [ds_replcheck_path, 'online', '-b', DEFAULT_SUFFIX, '-D', DN_DM, '-w', PW_DM, '-l', '1', + '-m', 'ldap://{}:{}'.format(m1.host, m1.port), '-Z', m1.get_ssca_dir(), + '-r', 'ldap://{}:{}'.format(m2.host, m2.port), '--conflicts'], + [ds_replcheck_path, 'online', '-b', DEFAULT_SUFFIX, '-D', DN_DM, '-w', PW_DM, '-l', '1', + '-m', 'ldapi://%2fvar%2frun%2fslapd-{}.socket'.format(m1.serverid), '--conflict', + '-r', 'ldapi://%2fvar%2frun%2fslapd-{}.socket'.format(m2.serverid)], + [ds_replcheck_path, 'offline', '-b', DEFAULT_SUFFIX, '--conflicts', '--rid', '1', + '-m', '/tmp/export_{}.ldif'.format(m1.serverid), + '-r', '/tmp/export_{}.ldif'.format(m2.serverid)]] + else: + replcheck_cmd = [[ds_replcheck_path, '-b', DEFAULT_SUFFIX, '-D', DN_DM, '-w', PW_DM, '-l', '1', + '-m', 'ldap://{}:{}'.format(m1.host, m1.port), '--conflicts', + '-r', 'ldap://{}:{}'.format(m2.host, m2.port)], + [ds_replcheck_path, '-b', DEFAULT_SUFFIX, '-D', DN_DM, '-w', PW_DM, '-l', '1', + '-m', 'ldaps://{}:{}'.format(m1.host, m1.sslport), '--conflicts', + '-r', 'ldaps://{}:{}'.format(m2.host, m2.sslport)], + [ds_replcheck_path, '-b', DEFAULT_SUFFIX, '-D', DN_DM, '-w', PW_DM, '-l', '1', + '-m', 'ldap://{}:{}'.format(m1.host, m1.port), '-Z', m1.get_ssca_dir(), + '-r', 'ldap://{}:{}'.format(m2.host, m2.port), '--conflicts'], + [ds_replcheck_path, '-b', DEFAULT_SUFFIX, '-D', DN_DM, '-w', PW_DM, '-l', '1', + '-m', 'ldapi://%2fvar%2frun%2fslapd-{}.socket'.format(m1.serverid), '--conflict', + '-r', 'ldapi://%2fvar%2frun%2fslapd-{}.socket'.format(m2.serverid)], + [ds_replcheck_path, '-b', DEFAULT_SUFFIX, '-D', DN_DM, '-w', PW_DM, '--conflicts', + '-M', '/tmp/export_{}.ldif'.format(m1.serverid), + '-R', '/tmp/export_{}.ldif'.format(m2.serverid)]] + + return replcheck_cmd + +@pytest.mark.skipif(ds_is_older("1.4.1.2"), reason="Not implemented") +def test_state(topo_tls_ldapi): + """Check "state" report + + :id: 1cc6b28b-8a42-45fb-ab50-9552db0ac178 + :setup: Two master replication + :steps: + 1. Get the replication state value + 2. The state value is as expected + :expectedresults: + 1. It should be successful + 2. It should be successful + """ + m1 = topo_tls_ldapi.ms["master1"] + m2 = topo_tls_ldapi.ms["master2"] + ds_replcheck_path = os.path.join(m1.ds_paths.bin_dir, 'ds-replcheck') + + tool_cmd = [ds_replcheck_path, 'state', '-b', DEFAULT_SUFFIX, '-D', DN_DM, '-w', PW_DM, + '-m', 'ldaps://{}:{}'.format(m1.host, m1.sslport), + '-r', 'ldaps://{}:{}'.format(m2.host, m2.sslport)] + result = subprocess.check_output(tool_cmd, encoding='utf-8') + assert (result.rstrip() == "Replication State: Master and Replica are in perfect synchronization") + + +def test_check_ruv(topo_tls_ldapi): + """Check that the report has RUV + + :id: 1cc6b28b-8a42-45fb-ab50-9552db0ac179 + :setup: Two master replication + :steps: + 1. Get RUV from master and replica + 2. Generate the report + 3. Check that the RUV is mentioned in the report + :expectedresults: + 1. It should be successful + 2. It should be successful + 3. The RUV should be mentioned in the report + """ + + m1 = topo_tls_ldapi.ms["master1"] + + replicas_m1 = Replica(m1, DEFAULT_SUFFIX) + ruv_entries = replicas_m1.get_attr_vals_utf8('nsds50ruv') + + for tool_cmd in replcheck_cmd_list(topo_tls_ldapi): + result = subprocess.check_output(tool_cmd, encoding='utf-8') + assert all([ruv_entry in result for ruv_entry in ruv_entries]) + + +def test_missing_entries(topo_tls_ldapi): + """Check that the report has missing entries + + :id: f91b6798-6e6e-420a-ad2f-3222bb908b7d + :setup: Two master replication + :steps: + 1. Pause replication between master and replica + 2. Add two entries to master and two entries to replica + 3. Generate the report + 4. Check that the entries DN are mentioned in the report + :expectedresults: + 1. It should be successful + 2. It should be successful + 3. It should be successful + 4. The entries DN should be mentioned in the report + """ + + m1 = topo_tls_ldapi.ms["master1"] + m2 = topo_tls_ldapi.ms["master2"] + + try: + topo_tls_ldapi.pause_all_replicas() + users_m1 = UserAccounts(m1, DEFAULT_SUFFIX) + user0 = users_m1.create_test_user(1000) + user1 = users_m1.create_test_user(1001) + users_m2 = UserAccounts(m2, DEFAULT_SUFFIX) + user2 = users_m2.create_test_user(1002) + user3 = users_m2.create_test_user(1003) + + for tool_cmd in replcheck_cmd_list(topo_tls_ldapi): + result = subprocess.check_output(tool_cmd, encoding='utf-8').lower() + assert user0.dn.lower() in result + assert user1.dn.lower() in result + finally: + user0.delete() + user1.delete() + user2.delete() + user3.delete() + topo_tls_ldapi.resume_all_replicas() + + +def test_tombstones(topo_tls_ldapi): + """Check that the report mentions right number of tombstones + + :id: bd27de78-0046-431c-8240-a93052df1cdc + :setup: Two master replication + :steps: + 1. Add an entry to master and wait for replication + 2. Pause replication between master and replica + 3. Delete the entry from master + 4. Generate the report + 5. Check that we have different number of tombstones in the report + :expectedresults: + 1. It should be successful + 2. It should be successful + 3. It should be successful + 4. It should be successful + 5. It should be successful + """ + + m1 = topo_tls_ldapi.ms["master1"] + + try: + users_m1 = UserAccounts(m1, DEFAULT_SUFFIX) + user_m1 = users_m1.create(properties=TEST_USER_PROPERTIES) + time.sleep(1) + topo_tls_ldapi.pause_all_replicas() + user_m1.delete() + time.sleep(2) + + for tool_cmd in replcheck_cmd_list(topo_tls_ldapi): + result = subprocess.check_output(tool_cmd, encoding='utf-8').lower() + log.debug(result) + finally: + topo_tls_ldapi.resume_all_replicas() + + +def test_conflict_entries(topo_tls_ldapi): + """Check that the report has conflict entries + + :id: 4eda0c5d-0824-4cfd-896e-845faf49ddaf + :setup: Two master replication + :steps: + 1. Pause replication between master and replica + 2. Add two entries to master and two entries to replica + 3. Delete first entry from master + 4. Add a child to the first entry + 5. Resume replication between master and replica + 6. Generate the report + 7. Check that the entries DN are mentioned in the report + :expectedresults: + 1. It should be successful + 2. It should be successful + 3. It should be successful + 4. It should be successful + 5. It should be successful + 6. It should be successful + 7. The entries DN should be mentioned in the report + """ + + m1 = topo_tls_ldapi.ms["master1"] + m2 = topo_tls_ldapi.ms["master2"] + + topo_tls_ldapi.pause_all_replicas() + + _create_container(m1, DEFAULT_SUFFIX, 'conflict_parent0') + _create_container(m2, DEFAULT_SUFFIX, 'conflict_parent0') + cont_p_m1 = _create_container(m1, DEFAULT_SUFFIX, 'conflict_parent1') + cont_p_m2 = _create_container(m2, DEFAULT_SUFFIX, 'conflict_parent1') + _delete_container(cont_p_m1) + _create_container(m2, cont_p_m2.dn, 'conflict_child0') + + topo_tls_ldapi.resume_all_replicas() + time.sleep(5) + + for tool_cmd in replcheck_cmd_list(topo_tls_ldapi): + result = subprocess.check_output(tool_cmd, encoding='utf-8') + assert 'conflict_parent1' in result + + +def test_inconsistencies(topo_tls_ldapi): + """Check that the report mentions inconsistencies with attributes + + :id: c8fe3e84-b346-4969-8f5d-3462b643a1d2 + :setup: Two master replication + :steps: + 1. Add an entry to master and wait for replication + 2. Pause replication between master and replica + 3. Set different description attr values to master and replica + 4. Add telephoneNumber attribute to master and not to replica + 5. Generate the report + 6. Check that attribute values are mentioned in the report + 7. Generate the report with -i option to ignore some attributes + 8. Check that attribute values are mentioned in the report + :expectedresults: + 1. It should be successful + 2. It should be successful + 3. It should be successful + 4. It should be successful + 5. It should be successful + 6. The attribute values should be mentioned in the report + 7. It should be successful + 8. The attribute values should not be mentioned in the report + """ + + m1 = topo_tls_ldapi.ms["master1"] + m2 = topo_tls_ldapi.ms["master2"] + attr_m1 = "m1_inconsistency" + attr_m2 = "m2_inconsistency" + attr_first = "first ordered valued" + attr_second = "second ordered valued" + attr_m1_only = "123123123" + + try: + users_m1 = UserAccounts(m1, DEFAULT_SUFFIX) + users_m2 = UserAccounts(m2, DEFAULT_SUFFIX) + user_m1 = users_m1.create(properties=TEST_USER_PROPERTIES) + time.sleep(1) + user_m2 = users_m2.get(user_m1.rdn) + topo_tls_ldapi.pause_all_replicas() + user_m1.set("description", attr_m1) + user_m2.set("description", attr_m2) + user_m1.set("telephonenumber", attr_m1_only) + # Add the same multi-valued attrs, but out of order + user_m1.set("cn", [attr_first, attr_second]) + user_m2.set("cn", [attr_second, attr_first]) + time.sleep(2) + + for tool_cmd in replcheck_cmd_list(topo_tls_ldapi): + result = subprocess.check_output(tool_cmd, encoding='utf-8').lower() + assert attr_m1 in result + assert attr_m2 in result + assert attr_m1_only in result + if ds_is_newer("1.3.9.1", "1.4.1.2"): + assert attr_first not in result + assert attr_second not in result + # Ignore some attributes and check the output + tool_cmd.extend(['-i', '{},{}'.format('description', 'telephonenumber')]) + result = subprocess.check_output(tool_cmd, encoding='utf-8').lower() + assert attr_m1 not in result + assert attr_m2 not in result + assert attr_m1_only not in result + if ds_is_newer("1.3.9.1", "1.4.1.2"): + assert attr_first not in result + assert attr_second not in result + + finally: + topo_tls_ldapi.resume_all_replicas() + user_m1.delete() + + +def test_suffix_exists(topo_tls_ldapi): + """Check if wrong suffix is provided, server is giving Error: Failed + to validate suffix. + + :id: ce75debc-c07f-4e72-8787-8f99cbfaf1e2 + :setup: Two master replication + :steps: + 1. Run ds-replcheck with wrong suffix (Non Existing) + :expectedresults: + 1. It should be unsuccessful + """ + m1 = topo_tls_ldapi.ms["master1"] + m2 = topo_tls_ldapi.ms["master2"] + ds_replcheck_path = os.path.join(m1.ds_paths.bin_dir, 'ds-replcheck') + + if ds_is_newer("1.4.1.2"): + tool_cmd = [ds_replcheck_path, 'online', '-b', 'dc=test,dc=com', '-D', DN_DM, '-w', PW_DM, + '-m', 'ldaps://{}:{}'.format(m1.host, m1.sslport), + '-r', 'ldaps://{}:{}'.format(m2.host, m2.sslport)] + else: + tool_cmd = [ds_replcheck_path, '-b', 'dc=test,dc=com', '-D', DN_DM, '-w', PW_DM, + '-m', 'ldaps://{}:{}'.format(m1.host, m1.sslport), + '-r', 'ldaps://{}:{}'.format(m2.host, m2.sslport)] + + result1 = subprocess.Popen(tool_cmd, stdout=subprocess.PIPE, stderr=subprocess.STDOUT, encoding='utf-8') + result = result1.communicate() + assert "Failed to validate suffix" in result[0] + + +def test_check_missing_tombstones(topo_tls_ldapi): + """Check missing tombstone entries is not reported. + + :id: 93067a5a-416e-4243-9418-c4dfcf42e093 + :setup: Two master replication + :steps: + 1. Pause replication between master and replica + 2. Add and delete an entry on the master + 3. Run ds-replcheck + 4. Verify there are NO complaints about missing entries/tombstones + :expectedresults: + 1. It should be successful + 2. It should be successful + 3. It should be successful + 4. It should be successful + """ + m1 = topo_tls_ldapi.ms["master1"] + m2 = topo_tls_ldapi.ms["master2"] + + try: + topo_tls_ldapi.pause_all_replicas() + users_m1 = UserAccounts(m1, DEFAULT_SUFFIX) + user0 = users_m1.create_test_user(1000) + user0.delete() + for tool_cmd in replcheck_cmd_list(topo_tls_ldapi): + result = subprocess.check_output(tool_cmd, encoding='utf-8').lower() + assert "entries missing on replica" not in result + + finally: + topo_tls_ldapi.resume_all_replicas() + + +def test_dsreplcheck_with_password_file(topo_tls_ldapi, tmpdir): + """Check ds-replcheck works if password file is provided + with -y option. + + :id: 0d847ec7-6eaf-4cb5-a9c6-e4a5a1778f93 + :setup: Two master replication + :steps: + 1. Create a password file with the default password of the server. + 2. Run ds-replcheck with -y option (used to pass password file) + :expectedresults: + 1. It should be successful + 2. It should be successful + """ + m1 = topo_tls_ldapi.ms["master1"] + m2 = topo_tls_ldapi.ms["master2"] + + ds_replcheck_path = os.path.join(m1.ds_paths.bin_dir, 'ds-replcheck') + f = tmpdir.mkdir("my_dir").join("password_file.txt") + f.write(PW_DM) + + if ds_is_newer("1.4.1.2"): + tool_cmd = [ds_replcheck_path, 'online', '-b', DEFAULT_SUFFIX, '-D', DN_DM, '-y', f.strpath, + '-m', 'ldaps://{}:{}'.format(m1.host, m1.sslport), + '-r', 'ldaps://{}:{}'.format(m2.host, m2.sslport)] + else: + tool_cmd = [ds_replcheck_path, '-b', DEFAULT_SUFFIX, '-D', DN_DM, '-y', f.strpath, + '-m', 'ldaps://{}:{}'.format(m1.host, m1.sslport), + '-r', 'ldaps://{}:{}'.format(m2.host, m2.sslport)] + + subprocess.Popen(tool_cmd, stdout=subprocess.PIPE, stderr=subprocess.STDOUT, encoding='utf-8') + + +if __name__ == '__main__': + # Run isolated + # -s for DEBUG mode + CURRENT_FILE = os.path.realpath(__file__) diff --git a/dirsrvtests/tests/suites/dynamic_plugins/__init__.py b/dirsrvtests/tests/suites/dynamic_plugins/__init__.py new file mode 100644 index 0000000..8041ca2 --- /dev/null +++ b/dirsrvtests/tests/suites/dynamic_plugins/__init__.py @@ -0,0 +1,3 @@ +""" + :Requirement: 389-ds-base: Dynamic Plugins +""" diff --git a/dirsrvtests/tests/suites/dynamic_plugins/dynamic_plugins_test.py b/dirsrvtests/tests/suites/dynamic_plugins/dynamic_plugins_test.py new file mode 100644 index 0000000..8b03e30 --- /dev/null +++ b/dirsrvtests/tests/suites/dynamic_plugins/dynamic_plugins_test.py @@ -0,0 +1,441 @@ +# --- BEGIN COPYRIGHT BLOCK --- +# Copyright (C) 2016 Red Hat, Inc. +# All rights reserved. +# +# License: GPL (version 3 or any later version). +# See LICENSE for details. +# --- END COPYRIGHT BLOCK --- +# +''' +Created on Dec 09, 2014 + +@author: mreynolds +''' +import logging + +import ldap.sasl +import pytest +from lib389.tasks import * +from lib389.replica import ReplicationManager +from lib389.config import LDBMConfig +from lib389._constants import * +from lib389.topologies import topology_m2 +from ..plugins import acceptance_test +from . import stress_tests + +pytestmark = pytest.mark.tier1 + +log = logging.getLogger(__name__) + + +def check_replicas(topology_m2): + """Check that replication is in sync and working""" + + m1 = topology_m2.ms["master1"] + m2 = topology_m2.ms["master2"] + + log.info('Checking if replication is in sync...') + repl = ReplicationManager(DEFAULT_SUFFIX) + repl.test_replication_topology(topology_m2) + # + # Verify the databases are identical. There should not be any "user, entry, employee" entries + # + log.info('Checking if the data is the same between the replicas...') + + # Check the master + try: + entries = m1.search_s(DEFAULT_SUFFIX, + ldap.SCOPE_SUBTREE, + "(|(uid=person*)(uid=entry*)(uid=employee*))") + if len(entries) > 0: + log.error('Master database has incorrect data set!\n') + assert False + except ldap.LDAPError as e: + log.fatal('Unable to search db on master: ' + e.message['desc']) + assert False + + # Check the consumer + try: + entries = m2.search_s(DEFAULT_SUFFIX, + ldap.SCOPE_SUBTREE, + "(|(uid=person*)(uid=entry*)(uid=employee*))") + if len(entries) > 0: + log.error('Consumer database in not consistent with master database') + assert False + except ldap.LDAPError as e: + log.fatal('Unable to search db on consumer: ' + e.message['desc']) + assert False + + log.info('Data is consistent across the replicas.\n') + + +def test_acceptance(topology_m2): + """Exercise each plugin and its main features, while + changing the configuration without restarting the server. + + :id: 96136538-0151-4b09-9933-0e0cbf2c786c + :setup: 2 Master Instances + :steps: + 1. Pause all replication + 2. Set nsslapd-dynamic-plugins to on + 3. Try to update LDBM config entry + 4. Go through all plugin basic functionality + 5. Resume replication + 6. Go through all plugin basic functionality again + 7. Check that data in sync and replication is working + :expectedresults: + 1. Success + 2. Success + 3. Success + 4. Success + 5. Success + 6. Success + 7. Success + """ + + m1 = topology_m2.ms["master1"] + msg = ' (no replication)' + replication_run = False + + # First part of the test should be without replication + topology_m2.pause_all_replicas() + + # First enable dynamic plugins + m1.config.replace('nsslapd-dynamic-plugins', 'on') + + # Test that critical plugins can be updated even though the change might not be applied + ldbm_config = LDBMConfig(m1) + ldbm_config.replace('description', 'test') + + while True: + # First run the tests with replication disabled, then rerun them with replication set up + + ############################################################################ + # Test plugin functionality + ############################################################################ + + log.info('####################################################################') + log.info('Testing Dynamic Plugins Functionality' + msg + '...') + log.info('####################################################################\n') + + acceptance_test.check_all_plugins(topology_m2) + + log.info('####################################################################') + log.info('Successfully Tested Dynamic Plugins Functionality' + msg + '.') + log.info('####################################################################\n') + + if replication_run: + # We're done. + break + else: + log.info('Resume replication and run everything one more time') + topology_m2.resume_all_replicas() + + replication_run = True + msg = ' (replication enabled)' + time.sleep(1) + + ############################################################################ + # Check replication, and data are in sync + ############################################################################ + check_replicas(topology_m2) + + +def test_memory_corruption(topology_m2): + """Check the plugins for memory corruption issues while + dynamic plugins option is enabled + + :id: 96136538-0151-4b09-9933-0e0cbf2c7862 + :setup: 2 Master Instances + :steps: + 1. Pause all replication + 2. Set nsslapd-dynamic-plugins to on + 3. Try to update LDBM config entry + 4. Restart the plugin many times in a linked list fashion + restarting previous and preprevious plugins in the list of all plugins + 5. Run the functional test + 6. Repeat 4 and 5 steps for all plugins + 7. Resume replication + 8. Go through 4-6 steps once more + 9. Check that data in sync and replication is working + :expectedresults: + 1. Success + 2. Success + 3. Success + 4. Success + 5. Success + 6. Success + 7. Success + 8. Success + 9. Success + """ + + + m1 = topology_m2.ms["master1"] + msg = ' (no replication)' + replication_run = False + + # First part of the test should be without replication + topology_m2.pause_all_replicas() + + # First enable dynamic plugins + m1.config.replace('nsslapd-dynamic-plugins', 'on') + + # Test that critical plugins can be updated even though the change might not be applied + ldbm_config = LDBMConfig(m1) + ldbm_config.replace('description', 'test') + + while True: + # First run the tests with replication disabled, then rerun them with replication set up + + ############################################################################ + # Test the stability by exercising the internal lists, callabcks, and task handlers + ############################################################################ + + log.info('####################################################################') + log.info('Testing Dynamic Plugins for Memory Corruption' + msg + '...') + log.info('####################################################################\n') + prev_plugin_test = None + prev_prev_plugin_test = None + + for plugin_test in acceptance_test.func_tests: + # + # Restart the plugin several times (and prev plugins) - work that linked list + # + plugin_test(topology_m2, "restart") + + if prev_prev_plugin_test: + prev_prev_plugin_test(topology_m2, "restart") + + plugin_test(topology_m2, "restart") + + if prev_plugin_test: + prev_plugin_test(topology_m2, "restart") + + plugin_test(topology_m2, "restart") + + # Now run the functional test + plugin_test(topology_m2, "dynamic") + + # Set the previous tests + if prev_plugin_test: + prev_prev_plugin_test = prev_plugin_test + prev_plugin_test = plugin_test + + log.info('####################################################################') + log.info('Successfully Tested Dynamic Plugins for Memory Corruption' + msg + '.') + log.info('####################################################################\n') + + if replication_run: + # We're done. + break + else: + log.info('Resume replication and run everything one more time') + topology_m2.resume_all_replicas() + + replication_run = True + msg = ' (replication enabled)' + time.sleep(1) + + ############################################################################ + # Check replication, and data are in sync + ############################################################################ + check_replicas(topology_m2) + +@pytest.mark.tier2 +def test_stress(topology_m2): + """Test plugins while under a big load. Perform the test 5 times + + :id: 96136538-0151-4b09-9933-0e0cbf2c7863 + :setup: 2 Master Instances + :steps: + 1. Pause all replication + 2. Set nsslapd-dynamic-plugins to on + 3. Try to update LDBM config entry + 4. Do one run through all tests + 5. Enable Referential integrity and MemberOf plugins + 6. Launch three new threads to add a bunch of users + 7. While we are adding users restart the MemberOf and + Linked Attributes plugins many times + 8. Wait for the 'adding' threads to complete + 9. Now launch three threads to delete the users + 10. Restart both the MemberOf, Referential integrity and + Linked Attributes plugins during these deletes + 11. Wait for the 'deleting' threads to complete + 12. Now make sure both the MemberOf and Referential integrity plugins still work correctly + 13. Cleanup the stress tests (delete the group entry) + 14. Perform 4-13 steps five times + 15. Resume replication + 16. Go through 4-14 steps once more + 17. Check that data in sync and replication is working + :expectedresults: + 1. Success + 2. Success + 3. Success + 4. Success + 5. Success + 6. Success + 7. Success + 8. Success + 9. Success + 10. Success + 11. Success + 12. Success + 13. Success + 14. Success + 15. Success + 16. Success + 17. Success + """ + + m1 = topology_m2.ms["master1"] + msg = ' (no replication)' + replication_run = False + stress_max_runs = 5 + + # First part of the test should be without replication + topology_m2.pause_all_replicas() + + # First enable dynamic plugins + m1.config.replace('nsslapd-dynamic-plugins', 'on') + + # Test that critical plugins can be updated even though the change might not be applied + ldbm_config = LDBMConfig(m1) + ldbm_config.replace('description', 'test') + + while True: + # First run the tests with replication disabled, then rerun them with replication set up + + log.info('Do one run through all tests ' + msg + '...') + acceptance_test.check_all_plugins(topology_m2) + + log.info('####################################################################') + log.info('Stressing Dynamic Plugins' + msg + '...') + log.info('####################################################################\n') + + stress_tests.configureMO(m1) + stress_tests.configureRI(m1) + + stress_count = 0 + while stress_count < stress_max_runs: + log.info('####################################################################') + log.info('Running stress test' + msg + '. Run (%d/%d)...' % (stress_count + 1, stress_max_runs)) + log.info('####################################################################\n') + + # Launch three new threads to add a bunch of users + add_users = stress_tests.AddUsers(m1, 'employee', True) + add_users.start() + add_users2 = stress_tests.AddUsers(m1, 'entry', True) + add_users2.start() + add_users3 = stress_tests.AddUsers(m1, 'person', True) + add_users3.start() + time.sleep(1) + + # While we are adding users restart the MO plugin and an idle plugin + m1.plugins.disable(name=PLUGIN_MEMBER_OF) + m1.plugins.enable(name=PLUGIN_MEMBER_OF) + time.sleep(1) + m1.plugins.disable(name=PLUGIN_MEMBER_OF) + time.sleep(1) + m1.plugins.enable(name=PLUGIN_MEMBER_OF) + m1.plugins.disable(name=PLUGIN_LINKED_ATTRS) + m1.plugins.enable(name=PLUGIN_LINKED_ATTRS) + time.sleep(1) + m1.plugins.disable(name=PLUGIN_MEMBER_OF) + m1.plugins.enable(name=PLUGIN_MEMBER_OF) + time.sleep(2) + m1.plugins.disable(name=PLUGIN_MEMBER_OF) + time.sleep(1) + m1.plugins.enable(name=PLUGIN_MEMBER_OF) + m1.plugins.disable(name=PLUGIN_LINKED_ATTRS) + m1.plugins.enable(name=PLUGIN_LINKED_ATTRS) + m1.plugins.disable(name=PLUGIN_MEMBER_OF) + time.sleep(1) + m1.plugins.enable(name=PLUGIN_MEMBER_OF) + m1.plugins.disable(name=PLUGIN_MEMBER_OF) + m1.plugins.enable(name=PLUGIN_MEMBER_OF) + + # Wait for the 'adding' threads to complete + add_users.join() + add_users2.join() + add_users3.join() + + # Now launch three threads to delete the users + del_users = stress_tests.DelUsers(m1, 'employee') + del_users.start() + del_users2 = stress_tests.DelUsers(m1, 'entry') + del_users2.start() + del_users3 = stress_tests.DelUsers(m1, 'person') + del_users3.start() + time.sleep(1) + + # Restart both the MO, RI plugins during these deletes, and an idle plugin + m1.plugins.disable(name=PLUGIN_REFER_INTEGRITY) + m1.plugins.disable(name=PLUGIN_MEMBER_OF) + m1.plugins.enable(name=PLUGIN_MEMBER_OF) + m1.plugins.enable(name=PLUGIN_REFER_INTEGRITY) + time.sleep(1) + m1.plugins.disable(name=PLUGIN_REFER_INTEGRITY) + time.sleep(1) + m1.plugins.disable(name=PLUGIN_MEMBER_OF) + time.sleep(1) + m1.plugins.enable(name=PLUGIN_MEMBER_OF) + time.sleep(1) + m1.plugins.enable(name=PLUGIN_REFER_INTEGRITY) + m1.plugins.disable(name=PLUGIN_LINKED_ATTRS) + m1.plugins.enable(name=PLUGIN_LINKED_ATTRS) + m1.plugins.disable(name=PLUGIN_REFER_INTEGRITY) + m1.plugins.disable(name=PLUGIN_MEMBER_OF) + m1.plugins.enable(name=PLUGIN_MEMBER_OF) + m1.plugins.enable(name=PLUGIN_REFER_INTEGRITY) + time.sleep(2) + m1.plugins.disable(name=PLUGIN_REFER_INTEGRITY) + time.sleep(1) + m1.plugins.disable(name=PLUGIN_MEMBER_OF) + time.sleep(1) + m1.plugins.enable(name=PLUGIN_MEMBER_OF) + time.sleep(1) + m1.plugins.enable(name=PLUGIN_REFER_INTEGRITY) + m1.plugins.disable(name=PLUGIN_LINKED_ATTRS) + m1.plugins.enable(name=PLUGIN_LINKED_ATTRS) + + # Wait for the 'deleting' threads to complete + del_users.join() + del_users2.join() + del_users3.join() + + # Now make sure both the MO and RI plugins still work correctly + acceptance_test.func_tests[8](topology_m2, "dynamic") # RI plugin + acceptance_test.func_tests[5](topology_m2, "dynamic") # MO plugin + + # Cleanup the stress tests + stress_tests.cleanup(m1) + + stress_count += 1 + log.info('####################################################################') + log.info('Successfully Stressed Dynamic Plugins' + msg + + '. Completed (%d/%d)' % (stress_count, stress_max_runs)) + log.info('####################################################################\n') + + if replication_run: + # We're done. + break + else: + log.info('Resume replication and run everything one more time') + topology_m2.resume_all_replicas() + + replication_run = True + msg = ' (replication enabled)' + time.sleep(1) + + ############################################################################ + # Check replication, and data are in sync + ############################################################################ + check_replicas(topology_m2) + + +if __name__ == '__main__': + # Run isolated + # -s for DEBUG mode + CURRENT_FILE = os.path.realpath(__file__) + pytest.main("-s %s" % CURRENT_FILE) diff --git a/dirsrvtests/tests/suites/dynamic_plugins/stress_tests.py b/dirsrvtests/tests/suites/dynamic_plugins/stress_tests.py new file mode 100644 index 0000000..0f62b4f --- /dev/null +++ b/dirsrvtests/tests/suites/dynamic_plugins/stress_tests.py @@ -0,0 +1,131 @@ +# --- BEGIN COPYRIGHT BLOCK --- +# Copyright (C) 2016 Red Hat, Inc. +# All rights reserved. +# +# License: GPL (version 3 or any later version). +# See LICENSE for details. +# --- END COPYRIGHT BLOCK --- +# +''' +Created on Dec 16, 2014 + +@author: mreynolds +''' +import logging +import threading + +import ldap +import pytest +from lib389 import Entry +from lib389._constants import * +from lib389.properties import * +from lib389.plugins import ReferentialIntegrityPlugin, MemberOfPlugin +from lib389.utils import * +from lib389.idm.directorymanager import * + +pytestmark = pytest.mark.tier2 + +log = logging.getLogger(__name__) + +NUM_USERS = 250 +GROUP_DN = 'cn=stress-group,' + DEFAULT_SUFFIX + + +# Configure Referential Integrity Plugin for stress test +def configureRI(inst): + plugin = ReferentialIntegrityPlugin(inst) + plugin.enable() + plugin.replace('referint-membership-attr', 'uniquemember') + + +# Configure MemberOf Plugin for stress test +def configureMO(inst): + plugin = MemberOfPlugin(inst) + plugin.enable() + plugin.replace('memberofgroupattr', 'uniquemember') + + +def cleanup(conn): + try: + conn.delete_s(GROUP_DN) + except ldap.LDAPError as e: + log.fatal('cleanup: failed to delete group (' + GROUP_DN + ') error: ' + e.message['desc']) + assert False + + +class DelUsers(threading.Thread): + def __init__(self, inst, rdnval): + threading.Thread.__init__(self) + self.daemon = True + self.inst = inst + self.rdnval = rdnval + + def run(self): + dm = DirectoryManager(self.inst) + conn = dm.bind() + idx = 0 + log.info('DelUsers - Deleting ' + str(NUM_USERS) + ' entries (' + self.rdnval + ')...') + while idx < NUM_USERS: + USER_DN = 'uid=' + self.rdnval + str(idx) + ',' + DEFAULT_SUFFIX + try: + conn.delete_s(USER_DN) + except ldap.LDAPError as e: + if e == ldap.UNAVAILABLE or e == ldap.SERVER_DOWN: + log.fatal('DeleteUsers: failed to delete (' + USER_DN + ') error: ' + e.message['desc']) + assert False + + idx += 1 + + conn.close() + log.info('DelUsers - Finished deleting ' + str(NUM_USERS) + ' entries (' + self.rdnval + ').') + + +class AddUsers(threading.Thread): + def __init__(self, inst, rdnval, addToGroup): + threading.Thread.__init__(self) + self.daemon = True + self.inst = inst + self.addToGroup = addToGroup + self.rdnval = rdnval + + def run(self): + # Start adding users + dm = DirectoryManager(self.inst) + conn = dm.bind() + idx = 0 + + if self.addToGroup: + try: + conn.add_s(Entry((GROUP_DN, + {'objectclass': b'top groupOfNames groupOfUniqueNames'.split(), + 'cn': 'stress-group'}))) + except ldap.LDAPError as e: + if e == ldap.UNAVAILABLE or e == ldap.SERVER_DOWN: + log.fatal('AddUsers: failed to add group (' + GROUP_DN + ') error: ' + e.message['desc']) + assert False + + log.info('AddUsers - Adding ' + str(NUM_USERS) + ' entries (' + self.rdnval + ')...') + + while idx < NUM_USERS: + USER_DN = 'uid=' + self.rdnval + str(idx) + ',' + DEFAULT_SUFFIX + try: + conn.add_s(Entry((USER_DN, {'objectclass': b'top nsOrgPerson'.split(), + 'uid': ensure_bytes('user' + str(idx))}))) + except ldap.LDAPError as e: + if e == ldap.UNAVAILABLE or e == ldap.SERVER_DOWN: + log.fatal('AddUsers: failed to add (' + USER_DN + ') error: ' + e.message['desc']) + assert False + + if self.addToGroup: + # Add the user to the group + try: + conn.modify_s(GROUP_DN, [(ldap.MOD_ADD, 'uniquemember', ensure_bytes(USER_DN))]) + except ldap.LDAPError as e: + if e == ldap.UNAVAILABLE or e == ldap.SERVER_DOWN: + log.fatal('AddUsers: Failed to add user' + USER_DN + ' to group: error ' + e.message['desc']) + assert False + + idx += 1 + + conn.close() + log.info('AddUsers - Finished adding ' + str(NUM_USERS) + ' entries (' + self.rdnval + ').') diff --git a/dirsrvtests/tests/suites/filter/__init__.py b/dirsrvtests/tests/suites/filter/__init__.py new file mode 100644 index 0000000..beccf4b --- /dev/null +++ b/dirsrvtests/tests/suites/filter/__init__.py @@ -0,0 +1,3 @@ +""" + :Requirement: 389-ds-base: LDAP Filters +""" diff --git a/dirsrvtests/tests/suites/filter/basic_filter_test.py b/dirsrvtests/tests/suites/filter/basic_filter_test.py new file mode 100644 index 0000000..f12581a --- /dev/null +++ b/dirsrvtests/tests/suites/filter/basic_filter_test.py @@ -0,0 +1,49 @@ +# --- BEGIN COPYRIGHT BLOCK --- +# Copyright (C) 2019 RED Hat, Inc. +# All rights reserved. +# +# License: GPL (version 3 or any later version). +# See LICENSE for details. +# --- END COPYRIGHT BLOCK ---- + +import pytest, os + +from lib389._constants import DEFAULT_SUFFIX, PW_DM +from lib389.topologies import topology_st as topo + +from lib389.idm.user import UserAccount, UserAccounts +from lib389.idm.account import Accounts + +pytestmark = pytest.mark.tier0 + +def test_search_attr(topo): + """ + Test filter can search attributes + :id: 9a1b0a4b-111c-4105-866d-4288f143ee07 + :setup: server + :steps: + 1. Add test entry + 2. make search + :expectedresults: + 1. Entry should be added + 2. Operation should succeed + """ + user = UserAccounts(topo.standalone, DEFAULT_SUFFIX) + for i in range(1, 5): + user1 = user.create_test_user(uid=i) + user1.set("mail", "AnujBorah{}@ok.com".format(i)) + + # Testing filter is working for any king of attr + + user = Accounts(topo.standalone, DEFAULT_SUFFIX) + + assert len(user.filter('(mail=*)')) == 4 + assert len(user.filter('(uid=*)')) == 4 + + # Testing filter is working for other filters + assert len(user.filter("(objectclass=inetOrgPerson)")) == 4 + + +if __name__ == "__main__": + CURRENT_FILE = os.path.realpath(__file__) + pytest.main("-s -v %s" % CURRENT_FILE) diff --git a/dirsrvtests/tests/suites/filter/bitw_filter_test.py b/dirsrvtests/tests/suites/filter/bitw_filter_test.py new file mode 100644 index 0000000..c844feb --- /dev/null +++ b/dirsrvtests/tests/suites/filter/bitw_filter_test.py @@ -0,0 +1,398 @@ +# --- BEGIN COPYRIGHT BLOCK --- +# Copyright (C) 2019 Red Hat, Inc. +# All rights reserved. +# +# License: GPL (version 3 or any later version). +# See LICENSE for details. +# --- END COPYRIGHT BLOCK --- + +""" +This script will test different type of Filers. +""" + +import os +import pytest + +from lib389.topologies import topology_st as topo +from lib389._constants import PW_DM +from lib389.idm.user import UserAccounts +from lib389.idm.account import Accounts +from lib389.plugins import BitwisePlugin +from lib389.schema import Schema +from lib389.backend import Backends +from lib389.idm.domain import Domain + +import ldap + +FILTER_TESTPERSON = "objectclass=testperson" +FILTER_TESTERPERSON = "objectclass=testerperson" +FILTER_CONTROL = f"(& ({FILTER_TESTPERSON}) (testUserAccountControl:1.2.840.113556.1.4.803:=514))" +SUFFIX = 'dc=anuj,dc=com' + + +class CreateUsers(): + """ + Will create users with different testUserAccountControl, testUserStatus + """ + def __init__(self, *args): + self.args = args + + def user_create(self): + """ + Will create users with different testUserAccountControl, testUserStatus + """ + self.args[0].create(properties={ + 'sn': self.args[1], + 'uid': self.args[1], + 'cn': self.args[1], + 'userpassword': PW_DM, + 'givenName': 'bit', + 'mail': '{}@redhat.com'.format(self.args[1]), + 'objectclass': 'top account posixaccount organizationalPerson ' + 'inetOrgPerson testperson'.split(), + 'testUserAccountControl': [i for i in self.args[2]], + 'testUserStatus': [i for i in self.args[3]], + 'uidNumber': str(self.args[4]), + 'gidNumber': str(self.args[4]), + 'homeDirectory': self.args[1] + }) + + def create_users_other(self): + """ + Will create users with different testUserAccountControl(8388608) + """ + self.args[0].create(properties={ + 'telephoneNumber': '98989819{}'.format(self.args[1]), + 'uid': 'anuj_{}'.format(self.args[1]), + 'sn': 'testwise_{}'.format(self.args[1]), + 'cn': 'bit testwise{}'.format(self.args[1]), + 'userpassword': PW_DM, + 'givenName': 'anuj_{}'.format(self.args[1]), + 'mail': 'anuj_{}@example.com'.format(self.args[1]), + 'objectclass': 'top account posixaccount organizationalPerson ' + 'inetOrgPerson testperson'.split(), + 'testUserAccountControl': '8388608', + 'testUserStatus': 'PasswordExpired', + 'uidNumber': str(self.args[1]), + 'gidNumber': str(self.args[1]), + 'homeDirectory': '/home/' + 'testwise_{}'.format(self.args[1]) + }) + + def user_create_52(self): + """ + Will create users with different testUserAccountControl(16777216) + """ + self.args[0].create(properties={ + 'telephoneNumber': '98989819{}'.format(self.args[1]), + 'uid': 'bditwfilter52_test{}'.format(self.args[1]), + 'sn': 'bditwfilter52_test{}'.format(self.args[1]), + 'cn': 'bit bditwfilter52_test{}'.format(self.args[1]), + 'userpassword': PW_DM, + 'givenName': 'bditwfilter52_test{}'.format(self.args[1]), + 'mail': 'bditwfilter52_test{}@example.com'.format(self.args[1]), + 'objectclass': 'top account posixaccount organizationalPerson ' + 'inetOrgPerson testperson'.split(), + 'testUserAccountControl': '16777216', + 'testUserStatus': 'PasswordExpired', + 'uidNumber': str(self.args[1]), + 'gidNumber': str(self.args[1]), + 'homeDirectory': '/home/' + 'bditwfilter52_test{}'.format(self.args[1]) + }) + + +@pytest.fixture(scope="module") +def _create_schema(request, topo): + Schema(topo.standalone).\ + add('attributetypes', + ["( NAME 'testUserAccountControl' DESC 'Attribute Bitwise filteri-Multi-Valued'" + "SYNTAX 1.3.6.1.4.1.1466.115.121.1.27 )", + "( NAME 'testUserStatus' DESC 'State of User account active/disabled'" + "SYNTAX 1.3.6.1.4.1.1466.115.121.1.15 )"]) + + Schema(topo.standalone).\ + add('objectClasses', "( NAME 'testperson' SUP top STRUCTURAL MUST " + "( sn $ cn $ testUserAccountControl $ " + "testUserStatus )MAY( userPassword $ telephoneNumber $ " + "seeAlso $ description ) X-ORIGIN 'BitWise' )") + + # Creating Backend + backends = Backends(topo.standalone) + backend = backends.create(properties={'nsslapd-suffix': SUFFIX, 'cn': 'AnujRoot'}) + + # Creating suffix + suffix = Domain(topo.standalone, SUFFIX).create(properties={'dc': 'anuj'}) + + # Creating users + users = UserAccounts(topo.standalone, suffix.dn, rdn=None) + for user in [('btestuser1', ['514'], ['Disabled'], 100), + ('btestuser2', ['65536'], ['PasswordNeverExpired'], 101), + ('btestuser3', ['8388608'], ['PasswordExpired'], 102), + ('btestuser4', ['256'], ['TempDuplicateAccount'], 103), + ('btestuser5', ['16777216'], ['TrustedAuthDelegation'], 104), + ('btestuser6', ['528'], ['AccountLocked'], 105), + ('btestuser7', ['513'], ['AccountActive'], 106), + ('btestuser11', ['655236'], ['TestStatus1'], 107), + ('btestuser12', ['665522'], ['TestStatus2'], 108), + ('btestuser13', ['266552'], ['TestStatus3'], 109), + ('btestuser8', ['98536', '99512', '99528'], + ['AccountActive', 'PasswordExxpired', 'AccountLocked'], 110), + ('btestuser9', ['87536', '912', ], ['AccountActive', + 'PasswordNeverExpired', ], 111), + ('btestuser10', ['89536', '97546', '96579'], + ['TestVerify1', 'TestVerify2', 'TestVerify3'], 112)]: + CreateUsers(users, user[0], user[1], user[2], user[3]).user_create() + + def fin(): + """ + Deletes entries after the test. + """ + for user in users.list(): + user.delete() + + suffix.delete() + backend.delete() + + request.addfinalizer(fin) + + +def increasesizelimit(topo, size): + """ + Will change nsslapd-sizelimit to desire value + """ + topo.standalone.config.set('nsslapd-sizelimit', str(size)) + + +def test_bitwise_plugin_status(topo, _create_schema): + """ + Checking bitwise plugin enabled or not, by default it should be enabled. + If disabled, this test case would enable the plugin + :id: 3ade097e-9ebd-11e8-b2e7-8c16451d917b + :setup: Standalone + :steps: + 1. Create Filter rules. + 2. Try to pass filter rules as per the condition . + :expectedresults: + 1. It should pass + 2. It should pass + """ + # Assert plugin BitwisePlugin is on + assert BitwisePlugin(topo.standalone).status() + + +def test_search_disabled_accounts(topo, _create_schema): + """ + Searching for integer Disabled Accounts. + Bitwise AND operator should match each integer, so it should return one entry. + :id: 467ef0ea-9ebd-11e8-a37f-8c16451d917b + :setup: Standalone + :steps: + 1. Create Filter rules. + 2. Try to pass filter rules as per the condition . + :expectedresults: + 1. It should pass + 2. It should pass + + """ + assert len(Accounts(topo.standalone, SUFFIX).filter(FILTER_CONTROL)) == 2 + + +def test_plugin_can_be_disabled(topo, _create_schema): + """ + Verify whether plugin can be disabled + :id: 4ed21588-9ebd-11e8-b862-8c16451d917b + :setup: Standalone + :steps: + 1. Create Filter rules. + 2. Try to pass filter rules as per the condition . + :expectedresults: + 1. It should pass + 2. It should pass + """ + bitwise = BitwisePlugin(topo.standalone) + assert bitwise.status() + # make BitwisePlugin off + bitwise.disable() + topo.standalone.restart() + assert not bitwise.status() + + +def test_plugin_is_disabled(topo, _create_schema): + """ + Testing Bitwise search when plugin is disabled + Bitwise search filter should give proper error message + :id: 54bebbfe-9ebd-11e8-8ca4-8c16451d917b + :setup: Standalone + :steps: + 1. Create Filter rules. + 2. Try to pass filter rules as per the condition . + :expectedresults: + 1. It should pass + 2. It should pass + """ + with pytest.raises(ldap.UNAVAILABLE_CRITICAL_EXTENSION): + Accounts(topo.standalone, SUFFIX).filter(FILTER_CONTROL) + + +def test_enabling_works_fine(topo, _create_schema): + """ + Enabling the plugin to make sure re-enabling works fine + :id: 5a2fc2b8-9ebd-11e8-8e18-8c16451d917b + :setup: Standalone + :steps: + 1. Create Filter rules. + 2. Try to pass filter rules as per the condition . + :expectedresults: + 1. It should pass + 2. It should pass + """ + # make BitwisePlugin off + bitwise = BitwisePlugin(topo.standalone) + bitwise.disable() + # make BitwisePlugin on again + bitwise.enable() + topo.standalone.restart() + assert bitwise.status() + assert len(Accounts(topo.standalone, SUFFIX).filter(FILTER_CONTROL)) == 2 + + +@pytest.mark.parametrize("filter_name, value", [ + (f"(& ({FILTER_TESTPERSON}) (testUserAccountControl:1.2.840.113556.1.4.803:=513))", 1), + (f"(& ({FILTER_TESTPERSON}) (testUserAccountControl:1.2.840.113556.1.4.803:=16777216))", 1), + (f"(& ({FILTER_TESTPERSON}) (testUserAccountControl:1.2.840.113556.1.4.803:=8388608))", 1), + (f"(& ({FILTER_TESTPERSON}) (testUserAccountControl:1.2.840.113556.1.4.804:=5))", 3), + (f"(& ({FILTER_TESTPERSON}) (testUserAccountControl:1.2.840.113556.1.4.804:=8))", 3), + (f"(& ({FILTER_TESTPERSON}) (testUserAccountControl:1.2.840.113556.1.4.804:=7))", 5), + (f"(& ({FILTER_TESTERPERSON}) (testUserAccountControl:1.2.840.113556.1.4.804:=7))", 0), + (f"(& ({FILTER_TESTPERSON}) (&(testUserAccountControl:1.2.840.113556.1.4.803:=98536)" + "(testUserAccountControl:1.2.840.113556.1.4.803:=912)))", 0), + (f"(& ({FILTER_TESTPERSON}) (&(testUserAccountControl:1.2.840.113556.1.4.804:=87)" + "(testUserAccountControl:1.2.840.113556.1.4.804:=91)))", 8), + (f"(& ({FILTER_TESTPERSON}) (&(testUserAccountControl:1.2.840.113556.1.4.803:=89536)" + "(testUserAccountControl:1.2.840.113556.1.4.804:=79)))", 1), + (f"(& ({FILTER_TESTPERSON}) (|(testUserAccountControl:1.2.840.113556.1.4.803:=89536)" + "(testUserAccountControl:1.2.840.113556.1.4.804:=79)))", 8), + (f"(& ({FILTER_TESTPERSON}) (|(testUserAccountControl:1.2.840.113556.1.4.803:=89)" + "(testUserAccountControl:1.2.840.113556.1.4.803:=536)))", 0), + (f"(& ({FILTER_TESTPERSON}) (testUserAccountControl:1.2.840.113556.1.4.803:=x))", 13), + (f"(& ({FILTER_TESTPERSON}) (testUserAccountControl:1.2.840.113556.1.4.803:=&\\*#$%))", 13), + (f"(& ({FILTER_TESTPERSON}) (testUserAccountControl:1.2.840.113556.1.4.803:=-65536))", 0), + (f"(& ({FILTER_TESTPERSON}) (testUserAccountControl:1.2.840.113556.1.4.803:=-1))", 0), + (f"(& ({FILTER_TESTPERSON}) (testUserAccountControl:1.2.840.113556.1.4.803:=-))", 13), + (f"(& ({FILTER_TESTPERSON}) (testUserAccountControl:1.2.840.113556.1.4.803:=))", 13), + (f"(& ({FILTER_TESTPERSON}) (testUserAccountControl:1.2.840.113556.1.4.803:=\\*))", 13), + (f"(& ({FILTER_TESTPERSON}) (testUserAccountControl:1.2.840.113556.1.4.804:=\\*))", 0), + (f"(& ({FILTER_TESTPERSON}) (testUserAccountControl:1.2.840.113556.1.4.803:=6552))", 0), + (f"(& ({FILTER_TESTPERSON}\\))(testUserAccountControl:1.2.840.113556.1.4.804:=6552))", 0), + (f"(& ({FILTER_TESTPERSON}) (testUserAccountControl:1.2.840.113556.1.4.803:=65536))", 5) +]) +def test_all_together(topo, _create_schema, filter_name, value): + """Target_set_with_ldap_instead_of_ldap + + :id: ba7f5106-9ebd-11e8-9ad6-8c16451d917b + :parametrized: yes + :setup: Standalone + :steps: + 1. Create Filter rules. + 2. Try to pass filter rules as per the condition . + :expectedresults: + 1. It should pass + 2. It should pass + """ + assert len(Accounts(topo.standalone, SUFFIX).filter(filter_name)) == value + + +def test_5_entries(topo, _create_schema): + """ + Bitwise filter test for 5 entries + By default the size limit is 2000 + Inorder to perform stress tests, we need to icrease the nsslapd-sizelimit. + IncrSizeLimit 52000 + :id: e939aa64-9ebd-11e8-815e-8c16451d917b + :setup: Standalone + :steps: + 1. Create Filter rules. + 2. Try to pass filter rules as per the condition . + :expectedresults: + 1. It should pass + 2. It should pass + """ + filter51 = f"(& ({FILTER_TESTPERSON}) (testUserAccountControl:1.2.840.113556.1.4.803:=8388608))" + increasesizelimit(topo, 52000) + users = UserAccounts(topo.standalone, SUFFIX, rdn=None) + for i in range(5): + CreateUsers(users, i).create_users_other() + assert len(Accounts(topo.standalone, SUFFIX).filter(filter51)) == 6 + increasesizelimit(topo, 2000) + + +def test_5_entries1(topo, _create_schema): + """ + Bitwise filter for 5 entries + By default the size limit is 2000 + Inorder to perform stress tests, we need to icrease the nsslapd-sizelimit. + IncrSizeLimit 52000 + :id: ef8b050c-9ebd-11e8-979d-8c16451d917b + :setup: Standalone + :steps: + 1. Create Filter rules. + 2. Try to pass filter rules as per the condition . + :expectedresults: + 1. It should pass + 2. It should pass + """ + filter52 = f"(& ({FILTER_TESTPERSON})(testUserAccountControl:1.2.840.113556.1.4.804:=16777216))" + increasesizelimit(topo, 52000) + users = UserAccounts(topo.standalone, SUFFIX, rdn=None) + for i in range(5): + CreateUsers(users, i).user_create_52() + assert len(Accounts(topo.standalone, SUFFIX).filter(filter52)) == 6 + increasesizelimit(topo, 2000) + + +def test_5_entries3(topo, _create_schema): + """ + Bitwise filter test for entries + By default the size limit is 2000 + Inorder to perform stress tests, we need to icrease the nsslapd-sizelimit. + IncrSizeLimit 52000 + :id: f5b06648-9ebd-11e8-b08f-8c16451d917b + :setup: Standalone + :steps: + 1. Create Filter rules. + 2. Try to pass filter rules as per the condition . + :expectedresults: + 1. It should pass + 2. It should pass + """ + increasesizelimit(topo, 52000) + assert len(Accounts(topo.standalone, SUFFIX).filter( + "(testUserAccountControl:1.2.840.113556.1.4.803:=8388608, " + "['attrlist=cn:sn:uid:testUserAccountControl'])")) == 6 + increasesizelimit(topo, 2000) + + +def test_5_entries4(topo, _create_schema): + """ + Bitwise filter for entries + By default the size limit is 2000 + Inorder to perform stress tests, we need to icrease the nsslapd-sizelimit. + IncrSizeLimit 52000 + :id: fa5f7a4e-9ebd-11e8-ad54-8c16451d917b + :setup: Standalone + :steps: + 1. Create Filter rules. + 2. Try to pass filter rules as per the condition . + :expectedresults: + 1. It should pass + 2. It should pass + """ + increasesizelimit(topo, 52000) + assert len(Accounts(topo.standalone, SUFFIX). + filter("(testUserAccountControl:1.2.840.113556.1.4.804:=16777216," + "['attrlist=cn:sn:uid:testUserAccountControl'])")) == 6 + increasesizelimit(topo, 2000) + + +if __name__ == '__main__': + CURRENT_FILE = os.path.realpath(__file__) + pytest.main("-s -v %s" % CURRENT_FILE) diff --git a/dirsrvtests/tests/suites/filter/complex_filters_test.py b/dirsrvtests/tests/suites/filter/complex_filters_test.py new file mode 100644 index 0000000..b96cc2a --- /dev/null +++ b/dirsrvtests/tests/suites/filter/complex_filters_test.py @@ -0,0 +1,139 @@ +import logging +import pytest +import os +import ldap +from lib389._constants import * +from lib389.topologies import topology_st as topo +from lib389.idm.user import UserAccounts + +pytestmark = pytest.mark.tier1 + +DEBUGGING = os.getenv("DEBUGGING", default=False) +if DEBUGGING: + logging.getLogger(__name__).setLevel(logging.DEBUG) +else: + logging.getLogger(__name__).setLevel(logging.INFO) +log = logging.getLogger(__name__) +ALL_FILTERS = [] + + +# Parameterized filters to test +AND_FILTERS = [("(&(uid=uid1)(sn=last1)(givenname=first1))", 1), + ("(&(uid=uid1)(&(sn=last1)(givenname=first1)))", 1), + ("(&(uid=uid1)(&(&(sn=last1))(&(givenname=first1))))", 1), + ("(&(uid=*)(sn=last3)(givenname=*))", 1), + ("(&(uid=*)(&(sn=last3)(givenname=*)))", 1), + ("(&(uid=uid5)(&(&(sn=*))(&(givenname=*))))", 1), + ("(&(objectclass=*)(uid=*)(sn=last*))", 5), + ("(&(objectclass=*)(uid=*)(sn=last1))", 1)] + +OR_FILTERS = [("(|(uid=uid1)(sn=last1)(givenname=first1))", 1), + ("(|(uid=uid1)(|(sn=last1)(givenname=first1)))", 1), + ("(|(uid=uid1)(|(|(sn=last1))(|(givenname=first1))))", 1), + ("(|(objectclass=*)(sn=last1)(|(givenname=first1)))", 14), + ("(|(&(objectclass=*)(sn=last1))(|(givenname=first1)))", 1), + ("(|(&(objectclass=*)(sn=last))(|(givenname=first1)))", 1)] + +NOT_FILTERS = [("(&(uid=uid1)(!(cn=NULL)))", 1), + ("(&(!(cn=NULL))(uid=uid1))", 1), + ("(&(uid=*)(&(!(uid=1))(!(givenname=first1))))", 4)] + +MIX_FILTERS = [("(&(|(uid=uid1)(uid=NULL))(sn=last1))", 1), + ("(&(|(uid=uid1)(uid=NULL))(!(sn=NULL)))", 1), + ("(&(|(uid=uid1)(sn=last2))(givenname=first1))", 1), + ("(|(&(uid=uid1)(!(uid=NULL)))(sn=last2))", 2), + ("(|(&(uid=uid1)(uid=NULL))(sn=last2))", 1), + ("(&(uid=uid5)(sn=*)(cn=*)(givenname=*)(uid=u*)(sn=la*)" + + "(cn=full*)(givenname=f*)(uid>=u)(!(givenname=NULL)))", 1), + ("(|(&(objectclass=*)(sn=last))(&(givenname=first1)))", 1)] + +ZERO_AND_FILTERS = [("(&(uid=uid1)(sn=last1)(givenname=NULL))", 0), + ("(&(uid=uid1)(&(sn=last1)(givenname=NULL)))", 0), + ("(&(uid=uid1)(&(&(sn=last1))(&(givenname=NULL))))", 0), + ("(&(uid=uid1)(&(&(sn=last1))(&(givenname=NULL)(sn=*)))(|(sn=NULL)))", 0), + ("(&(uid=uid1)(&(&(sn=last*))(&(givenname=first*)))(&(sn=NULL)))", 0)] + +ZERO_OR_FILTERS = [("(|(uid=NULL)(sn=NULL)(givenname=NULL))", 0), + ("(|(uid=NULL)(|(sn=NULL)(givenname=NULL)))", 0), + ("(|(uid=NULL)(|(|(sn=NULL))(|(givenname=NULL))))", 0)] + +RANGE_FILTERS = [("(uid>=uid3)", 3), + ("(&(uid=*)(uid>=uid3))", 3), + ("(|(uid>=uid3)(uid<=uid5))", 5), + ("(&(uid>=uid3)(uid<=uid5))", 3), + ("(|(&(uid>=uid3)(uid<=uid5))(uid=*))", 5)] + +LONG_FILTERS = [("(|(uid=*)(uid=*)(uid=*)(uid=*)(uid=*)(uid=*)(uid=*)(uid=*)" + + "(uid=*)(uid=*)(uid=*)(uid=*)(uid=*)(uid=*)(uid=*)(uid=*)" + + "(uid=*)(uid=*)(uid=*)(uid=*)(uid=*)(uid=*)(uid=*)(uid=*)" + + "(uid=*)(uid=*)(uid=*)(uid=*)(uid=*)(uid=*)(uid=*)(uid=*)" + + "(uid=*)(uid=*)(uid=*)(uid=*)(uid=*)(uid=*)(uid=*)(uid=*)" + + "(uid=*)(uid=*)(uid=*)(uid=*)(uid=*)(uid=*)(uid=*)(uid=*)" + + "(uid=*)(uid=*)(uid=*)(uid=*)(uid=*)(uid=*)(uid=*)(uid=*)" + + "(uid=*)(uid=*)(uid=*)(uid=*)(uid=*)(uid=*)(uid=*)(uid=*)" + + "(uid=*))", 5)] + + +# Combine all the filters +ALL_FILTERS += AND_FILTERS +ALL_FILTERS += OR_FILTERS +ALL_FILTERS += NOT_FILTERS +ALL_FILTERS += MIX_FILTERS +ALL_FILTERS += ZERO_AND_FILTERS +ALL_FILTERS += ZERO_OR_FILTERS +ALL_FILTERS += LONG_FILTERS +ALL_FILTERS += RANGE_FILTERS + + +@pytest.fixture(scope="module") +def setup(topo, request): + """Add teset users + """ + + users = UserAccounts(topo.standalone, DEFAULT_SUFFIX) + for i in range(1, 6): + users.create(properties={ + 'uid': 'uid%s' % i, + 'cn': 'full%s' % i, + 'sn': 'last%s' % i, + 'givenname': 'first%s' % i, + 'uidNumber': '%s' % i, + 'gidNumber': '%s' % i, + 'homeDirectory': '/home/user%s' % i + }) + + +@pytest.mark.parametrize("myfilter, expected_results", ALL_FILTERS) +def test_filters(topo, setup, myfilter, expected_results): + """Test various complex search filters and verify they are returning the + expected number of entries + + :id: ee9ead27-5f63-4aed-844d-c39b99138c8d + :parametrized: yes + :setup: standalone + :steps: + 1. Issue search + 2. Check the number of returned entries against the expected number + :expectedresults: + 1. Search succeeds + 2. The number of returned entries matches the expected number + """ + + log.info("Testing filter \"{}\"...".format(myfilter)) + try: + entries = topo.standalone.search_s(DEFAULT_SUFFIX, ldap.SCOPE_SUBTREE, myfilter) + if len(entries) != expected_results: + log.fatal("Search filter \"{}\") returned {} entries, but we expected {}".format( + myfilter, len(entries), expected_results)) + assert False + except ldap.LDAPError as e: + log.fatal("Search filter \"{}\") generated ldap error: {}".format(myfilter, str(e))) + assert False + + +if __name__ == '__main__': + # Run isolated + # -s for DEBUG mode + CURRENT_FILE = os.path.realpath(__file__) + pytest.main("-s %s" % CURRENT_FILE) + diff --git a/dirsrvtests/tests/suites/filter/filter_cert_test.py b/dirsrvtests/tests/suites/filter/filter_cert_test.py new file mode 100644 index 0000000..af53765 --- /dev/null +++ b/dirsrvtests/tests/suites/filter/filter_cert_test.py @@ -0,0 +1,69 @@ +# --- BEGIN COPYRIGHT BLOCK --- +# Copyright (C) 2019 Red Hat, Inc. +# All rights reserved. +# +# License: GPL (version 3 or any later version). +# See LICENSE for details. +# --- END COPYRIGHT BLOCK ---- + + +""" +verify and testing Filter from a search +""" + +import os +import pytest + +from lib389._constants import DEFAULT_SUFFIX +from lib389.topologies import topology_st as topo +from lib389.idm.user import UserAccounts +from lib389.idm.account import Accounts +from lib389.nss_ssl import NssSsl +from lib389.utils import search_filter_escape_bytes + +pytestmark = pytest.mark.tier1 + + +def test_positive(topo): + """Test User certificate field + :id: e984ac40-63d1-4176-ad1e-0cbe71391b5f + :setup: Standalone + :steps: + 1. Create entries with userCertificate field. + 2. Try to search/filter them with userCertificate field. + :expected results: + 1. Pass + 2. Pass + """ + # SETUP TLS + topo.standalone.stop() + NssSsl(topo.standalone).reinit() + NssSsl(topo.standalone).create_rsa_ca() + NssSsl(topo.standalone).create_rsa_key_and_cert() + # Create user + NssSsl(topo.standalone).create_rsa_user('testuser1') + NssSsl(topo.standalone).create_rsa_user('testuser2') + # Creating cert users + topo.standalone.start() + users_people = UserAccounts(topo.standalone, DEFAULT_SUFFIX) + for count in range(1, 3): + user = users_people.create_test_user(uid=count, gid=count) + tls_locs = NssSsl(topo.standalone).get_rsa_user(f'testuser{count}') + # {'ca': ca_path, 'key': key_path, 'crt': crt_path} + user.enroll_certificate(tls_locs['crt_der_path']) + + assert Accounts(topo.standalone, DEFAULT_SUFFIX).filter("(usercertificate=*)") + assert Accounts(topo.standalone, DEFAULT_SUFFIX).filter("(userCertificate;binary=*)") + user1_cert = users_people.list()[0].get_attr_val("userCertificate;binary") + assert Accounts(topo.standalone, DEFAULT_SUFFIX).filter( + f'(userCertificate;binary={search_filter_escape_bytes(user1_cert)})')[0].dn == \ + 'uid=test_user_1,ou=People,dc=example,dc=com' + user2_cert = users_people.list()[1].get_attr_val("userCertificate;binary") + assert Accounts(topo.standalone, DEFAULT_SUFFIX).filter( + f'(userCertificate;binary={search_filter_escape_bytes(user2_cert)})')[0].dn == \ + 'uid=test_user_2,ou=People,dc=example,dc=com' + + +if __name__ == '__main__': + CURRENT_FILE = os.path.realpath(__file__) + pytest.main("-s -v %s" % CURRENT_FILE) diff --git a/dirsrvtests/tests/suites/filter/filter_index_match_test.py b/dirsrvtests/tests/suites/filter/filter_index_match_test.py new file mode 100644 index 0000000..aea0662 --- /dev/null +++ b/dirsrvtests/tests/suites/filter/filter_index_match_test.py @@ -0,0 +1,862 @@ +# --- BEGIN COPYRIGHT BLOCK --- +# Copyright (C) 2019 Red Hat, Inc. +# All rights reserved. +# +# License: GPL (version 3 or any later version). +# See LICENSE for details. +# --- END COPYRIGHT BLOCK ---- + + +""" +Test the matching rules feature . +""" + +import os +import pytest + +from lib389._constants import DEFAULT_SUFFIX +from lib389.topologies import topology_st +from lib389.cos import CosTemplates +from lib389.index import Indexes +from lib389.schema import Schema + +import ldap + +pytestmark = pytest.mark.tier1 + + +TESTED_MATCHING_RULES = ["bitStringMatch", + "caseExactIA5Match", + "caseExactMatch", + "caseExactOrderingMatch", + "caseExactSubstringsMatch", + "caseExactIA5SubstringsMatch", + "generalizedTimeMatch", + "generalizedTimeOrderingMatch", + "booleanMatch", + "caseIgnoreIA5Match", + "caseIgnoreIA5SubstringsMatch", + "caseIgnoreMatch", + "caseIgnoreOrderingMatch", + "caseIgnoreSubstringsMatch", + "caseIgnoreListMatch", + "caseIgnoreListSubstringsMatch", + "objectIdentifierMatch", + "directoryStringFirstComponentMatch", + "objectIdentifierFirstComponentMatch", + "distinguishedNameMatch", + "integerMatch", + "integerOrderingMatch", + "integerFirstComponentMatch", + "uniqueMemberMatch", + "numericStringMatch", + "numericStringOrderingMatch", + "numericStringSubstringsMatch", + "telephoneNumberMatch", + "telephoneNumberSubstringsMatch", + "octetStringMatch", + "octetStringOrderingMatch"] + + +LIST_CN_INDEX = [('attroctetStringMatch', ['pres', 'eq']), + ('attrbitStringMatch', ['pres', 'eq']), + ('attrcaseExactIA5Match', ['pres', 'eq', 'sub']), + ('attrcaseExactMatch', ['pres', 'eq', 'sub']), + ('attrgeneralizedTimeMatch', ['pres', 'eq']), + ('attrbooleanMatch', ['pres', 'eq']), + ('attrcaseIgnoreIA5Match', ['pres', 'eq', 'sub']), + ('attrcaseIgnoreMatch', ['pres', 'eq', 'sub']), + ('attrcaseIgnoreListMatch', ['pres', 'eq', 'sub']), + ('attrobjectIdentifierMatch', ['pres', 'eq']), + ('attrdistinguishedNameMatch', ['pres', 'eq']), + ('attrintegerMatch', ['pres', 'eq']), + ('attruniqueMemberMatch', ['pres', 'eq']), + ('attrnumericStringMatch', ['pres', 'eq', 'sub']), + ('attrtelephoneNumberMatch', ['pres', 'eq', 'sub']), + ('attrdirectoryStringFirstComponentMatch', ['pres', 'eq']), + ('attrobjectIdentifierFirstComponentMatch', ['pres', 'eq']), + ('attrintegerFirstComponentMatch', ['pres', 'eq'])] + + +LIST_ATTR_INDEX = [ + {'attr': 'attrbitStringMatch', + 'positive': ["'0010'B", "'0011'B", "'0100'B", "'0101'B", "'0110'B"], + 'negative': ["'0001'B", "'0001'B", "'0010'B", "'0010'B", "'0011'B", + "'0011'B", "'0100'B", "'0100'B", "'0101'B", "'0101'B", + "'0110'B", "'0110'B"]}, + {'attr': 'attrcaseExactIA5Match', + 'positive': ['sPrain', 'spRain', 'sprAin', 'spraIn', 'sprain'], + 'negative': ['Sprain', 'Sprain', 'sPrain', 'sPrain', 'spRain', + 'spRain', 'sprAin', 'sprAin', 'spraIn', 'spraIn', + 'sprain', 'sprain']}, + {'attr': 'attrcaseExactMatch', + 'positive': ['ÇéliNé Ändrè', 'Çéliné ÄndrÈ', 'Çéliné Ändrè', 'çÉliné Ändrè'], + 'negative': ['ÇélIné Ändrè', 'ÇélIné Ändrè', 'ÇéliNé Ändrè', 'ÇéliNé Ändrè', + 'Çéliné ÄndrÈ', 'Çéliné ÄndrÈ', 'Çéliné Ändrè', 'Çéliné Ändrè', + 'çÉliné Ändrè', 'çÉliné Ändrè']}, + {'attr': 'attrgeneralizedTimeMatch', + 'positive': ['20100218171301Z', '20100218171302Z', '20100218171303Z', + '20100218171304Z', '20100218171305Z'], + 'negative': ['20100218171300Z', '20100218171300Z', '20100218171301Z', + '20100218171301Z', '20100218171302Z', '20100218171302Z', + '20100218171303Z', '20100218171303Z', '20100218171304Z', + '20100218171304Z', '20100218171305Z', '20100218171305Z']}, + {'attr': 'attrbooleanMatch', + 'positive': ['FALSE'], + 'negative': ['TRUE', 'TRUE', 'FALSE', 'FALSE']}, + {'attr': 'attrcaseIgnoreIA5Match', + 'positive': ['sprain2', 'sprain3', 'sprain4', 'sprain5', 'sprain6'], + 'negative': ['sprain1', 'sprain1', 'sprain2', 'sprain2', 'sprain3', + 'sprain3', 'sprain4', 'sprain4', 'sprain5', 'sprain5', + 'sprain6', 'sprain6']}, + {'attr': 'attrcaseIgnoreMatch', + 'positive': ['ÇélIné Ändrè2', 'ÇélIné Ändrè3', 'ÇélIné Ändrè4', + 'ÇélIné Ändrè5', 'ÇélIné Ändrè6'], + 'negative': ['ÇélIné Ändrè1', 'ÇélIné Ändrè1', 'ÇélIné Ändrè2', + 'ÇélIné Ändrè2', 'ÇélIné Ändrè3', 'ÇélIné Ändrè3', + 'ÇélIné Ändrè4', 'ÇélIné Ändrè4', 'ÇélIné Ändrè5', + 'ÇélIné Ändrè5', 'ÇélIné Ändrè6', 'ÇélIné Ändrè6']}, + {'attr': 'attrcaseIgnoreListMatch', + 'positive': ['foo2$bar', 'foo3$bar', 'foo4$bar', 'foo5$bar', 'foo6$bar'], + 'negative': ['foo1$bar', 'foo1$bar', 'foo2$bar', 'foo2$bar', 'foo3$bar', + 'foo3$bar', 'foo4$bar', 'foo4$bar', 'foo5$bar', 'foo5$bar', + 'foo6$bar', 'foo6$bar']}, + {'attr': 'attrobjectIdentifierMatch', + 'positive': ['1.3.6.1.4.1.1466.115.121.1.24', '1.3.6.1.4.1.1466.115.121.1.26', + '1.3.6.1.4.1.1466.115.121.1.40', '1.3.6.1.4.1.1466.115.121.1.41', + '1.3.6.1.4.1.1466.115.121.1.6'], + 'negative': ['1.3.6.1.4.1.1466.115.121.1.15', '1.3.6.1.4.1.1466.115.121.1.15', + '1.3.6.1.4.1.1466.115.121.1.24', '1.3.6.1.4.1.1466.115.121.1.24', + '1.3.6.1.4.1.1466.115.121.1.26', '1.3.6.1.4.1.1466.115.121.1.26', + '1.3.6.1.4.1.1466.115.121.1.40', '1.3.6.1.4.1.1466.115.121.1.40', + '1.3.6.1.4.1.1466.115.121.1.41', '1.3.6.1.4.1.1466.115.121.1.41', + '1.3.6.1.4.1.1466.115.121.1.6', '1.3.6.1.4.1.1466.115.121.1.6']}, + {'attr': 'attrdirectoryStringFirstComponentMatch', + 'positive': ['ÇélIné Ändrè2', 'ÇélIné Ändrè3', 'ÇélIné Ändrè4', 'ÇélIné Ändrè5', + 'ÇélIné Ändrè6'], + 'negative': ['ÇélIné Ändrè1', 'ÇélIné Ändrè1', 'ÇélIné Ändrè2', 'ÇélIné Ändrè2', + 'ÇélIné Ändrè3', 'ÇélIné Ändrè3', 'ÇélIné Ändrè4', 'ÇélIné Ändrè4', + 'ÇélIné Ändrè5', 'ÇélIné Ändrè5', 'ÇélIné Ändrè6', 'ÇélIné Ändrè6']}, + {'attr': 'attrobjectIdentifierFirstComponentMatch', + 'positive': ['1.3.6.1.4.1.1466.115.121.1.24', '1.3.6.1.4.1.1466.115.121.1.26', + '1.3.6.1.4.1.1466.115.121.1.40', '1.3.6.1.4.1.1466.115.121.1.41', + '1.3.6.1.4.1.1466.115.121.1.6'], + 'negative': ['1.3.6.1.4.1.1466.115.121.1.15', '1.3.6.1.4.1.1466.115.121.1.15', + '1.3.6.1.4.1.1466.115.121.1.24', '1.3.6.1.4.1.1466.115.121.1.24', + '1.3.6.1.4.1.1466.115.121.1.26', '1.3.6.1.4.1.1466.115.121.1.26', + '1.3.6.1.4.1.1466.115.121.1.40', '1.3.6.1.4.1.1466.115.121.1.40', + '1.3.6.1.4.1.1466.115.121.1.41', '1.3.6.1.4.1.1466.115.121.1.41', + '1.3.6.1.4.1.1466.115.121.1.6', '1.3.6.1.4.1.1466.115.121.1.6']}, + {'attr': 'attrdistinguishedNameMatch', + 'positive': ['cn=foo2,cn=bar', 'cn=foo3,cn=bar', 'cn=foo4,cn=bar', 'cn=foo5,cn=bar', + 'cn=foo6,cn=bar'], + 'negative': ['cn=foo1,cn=bar', 'cn=foo1,cn=bar', 'cn=foo2,cn=bar', 'cn=foo2,cn=bar', + 'cn=foo3,cn=bar', 'cn=foo3,cn=bar', 'cn=foo4,cn=bar', 'cn=foo4,cn=bar', + 'cn=foo5,cn=bar', 'cn=foo5,cn=bar', 'cn=foo6,cn=bar', 'cn=foo6,cn=bar']}, + {'attr': 'attrintegerMatch', + 'positive': ['-1', '0', '1', '2', '3'], + 'negative': ['-2', '-2', '-1', '-1', '0', '0', '1', '1', '2', '2', '3', '3']}, + {'attr': 'attrintegerFirstComponentMatch', + 'positive': ['-1', '0', '1', '2', '3'], + 'negative': ['-2', '-2', '-1', '-1', '0', '0', '1', '1', '2', '2', '3', '3']}, + {'attr': 'attruniqueMemberMatch', + 'positive': ["cn=foo2,cn=bar#'0010'B", "cn=foo3,cn=bar#'0011'B", + "cn=foo4,cn=bar#'0100'B", "cn=foo5,cn=bar#'0101'B", + "cn=foo6,cn=bar#'0110'B"], + 'negative': ["cn=foo1,cn=bar#'0001'B", "cn=foo1,cn=bar#'0001'B", + "cn=foo2,cn=bar#'0010'B", "cn=foo2,cn=bar#'0010'B", + "cn=foo3,cn=bar#'0011'B", "cn=foo3,cn=bar#'0011'B", + "cn=foo4,cn=bar#'0100'B", "cn=foo4,cn=bar#'0100'B", + "cn=foo5,cn=bar#'0101'B", "cn=foo5,cn=bar#'0101'B", + "cn=foo6,cn=bar#'0110'B", "cn=foo6,cn=bar#'0110'B"]}, + {'attr': 'attrnumericStringMatch', + 'positive': ['00002', '00003', '00004', '00005', '00006'], + 'negative': ['00001', '00001', '00002', '00002', '00003', '00003', + '00004', '00004', '00005', '00005', '00006', '00006']}, + {'attr': 'attrtelephoneNumberMatch', + 'positive': ['+1 408 555 5625', '+1 408 555 6201', '+1 408 555 8585', + '+1 408 555 9187', '+1 408 555 9423'], + 'negative': ['+1 408 555 4798', '+1 408 555 4798', '+1 408 555 5625', + '+1 408 555 5625', '+1 408 555 6201', '+1 408 555 6201', + '+1 408 555 8585', '+1 408 555 8585', '+1 408 555 9187', + '+1 408 555 9187', '+1 408 555 9423', '+1 408 555 9423']}, + {'attr': 'attroctetStringMatch', + 'positive': ['AAAAAAAAAAAAAAI=', 'AAAAAAAAAAAAAAM=', 'AAAAAAAAAAAAAAQ=', + 'AAAAAAAAAAAAAAU=', 'AAAAAAAAAAAAAAY='], + 'negative': ['AAAAAAAAAAAAAAE=', 'AAAAAAAAAAAAAAE=', 'AAAAAAAAAAAAAAI=', + 'AAAAAAAAAAAAAAI=', 'AAAAAAAAAAAAAAM=', 'AAAAAAAAAAAAAAM=', + 'AAAAAAAAAAAAAAQ=', 'AAAAAAAAAAAAAAQ=', 'AAAAAAAAAAAAAAU=', + 'AAAAAAAAAAAAAAU=', 'AAAAAAAAAAAAAAY=', 'AAAAAAAAAAAAAAY=']}] + + +LIST_MOD_ATTR_ALL = [ + {'attr': 'attrcaseExactMatch', + 'positive': ['ÇélIné Ändrè'], + 'negative': ['ÇélIné Ändrè', 'ÇéliNé Ändrè', 'Çéliné ÄndrÈ', 'Çéliné Ändrè', + 'çÉliné Ändrè']}, + {'attr': 'attrgeneralizedTimeMatch', + 'positive': ['20100218171300Z'], + 'negative': ['20100218171300Z', '20100218171301Z', '20100218171302Z', + '20100218171303Z', '20100218171304Z', '20100218171305Z']}, + {'attr': 'attrbooleanMatch', + 'positive': ['TRUE'], + 'negative': ['TRUE', 'FALSE']}, + {'attr': 'attrcaseIgnoreIA5Match', + 'positive': ['sprain1'], + 'negative': ['sprain1', 'sprain2', 'sprain3', 'sprain4', 'sprain5', 'sprain6']}, + {'attr': 'attrcaseIgnoreMatch', + 'positive': ['ÇélIné Ändrè1'], + 'negative': ['ÇélIné Ändrè1', 'ÇélIné Ändrè2', 'ÇélIné Ändrè3', 'ÇélIné Ändrè4', + 'ÇélIné Ändrè5', 'ÇélIné Ändrè6']}, + {'attr': 'attrcaseIgnoreListMatch', + 'positive': ['foo1$bar'], + 'negative': ['foo1$bar', 'foo2$bar', 'foo3$bar', 'foo4$bar', 'foo5$bar', 'foo6$bar']}, + {'attr': 'attrbitStringMatch', + 'positive': ["'0001'B"], + 'negative': ["'0001'B", "'0010'B", "'0011'B", "'0100'B", "'0101'B", "'0110'B"]}, + {'attr': 'attrcaseExactIA5Match', + 'positive': ['Sprain'], + 'negative': ['Sprain', 'sPrain', 'spRain', 'sprAin', 'spraIn', 'sprain']}, + {'attr': 'attrobjectIdentifierMatch', + 'positive': ['1.3.6.1.4.1.1466.115.121.1.15'], + 'negative': ['1.3.6.1.4.1.1466.115.121.1.15', '1.3.6.1.4.1.1466.115.121.1.24', + '1.3.6.1.4.1.1466.115.121.1.26', '1.3.6.1.4.1.1466.115.121.1.40', + '1.3.6.1.4.1.1466.115.121.1.41', '1.3.6.1.4.1.1466.115.121.1.6']}, + {'attr': 'attrdirectoryStringFirstComponentMatch', + 'positive': ['ÇélIné Ändrè1'], + 'negative': ['ÇélIné Ändrè1', 'ÇélIné Ändrè2', 'ÇélIné Ändrè3', 'ÇélIné Ändrè4', + 'ÇélIné Ändrè5', 'ÇélIné Ändrè6']}, + {'attr': 'attrobjectIdentifierFirstComponentMatch', + 'positive': ['1.3.6.1.4.1.1466.115.121.1.15'], + 'negative': ['1.3.6.1.4.1.1466.115.121.1.15', '1.3.6.1.4.1.1466.115.121.1.24', + '1.3.6.1.4.1.1466.115.121.1.26', '1.3.6.1.4.1.1466.115.121.1.40', + '1.3.6.1.4.1.1466.115.121.1.41', '1.3.6.1.4.1.1466.115.121.1.6']}, + {'attr': 'attrdistinguishedNameMatch', + 'positive': ['cn=foo1,cn=bar'], + 'negative': ['cn=foo1,cn=bar', 'cn=foo2,cn=bar', 'cn=foo3,cn=bar', + 'cn=foo4,cn=bar', 'cn=foo5,cn=bar', 'cn=foo6,cn=bar']}, + {'attr': 'attrintegerMatch', + 'positive': ['-2'], + 'negative': ['-2', '-1', '0', '1', '2', '3']}, + {'attr': 'attrintegerFirstComponentMatch', + 'positive': ['-2'], + 'negative': ['-2', '-1', '0', '1', '2', '3']}, + {'attr': 'attruniqueMemberMatch', + 'positive': ["cn=foo1,cn=bar#'0001'B"], + 'negative': ["cn=foo1,cn=bar#'0001'B", "cn=foo2,cn=bar#'0010'B", + "cn=foo3,cn=bar#'0011'B", "cn=foo4,cn=bar#'0100'B", + "cn=foo5,cn=bar#'0101'B", "cn=foo6,cn=bar#'0110'B"]}, + {'attr': 'attrnumericStringMatch', + 'positive': ['00001'], + 'negative': ['00001', '00002', '00003', '00004', '00005', '00006']}, + {'attr': 'attrgeneralizedTimeMatch', + 'positive': ['+1 408 555 4798'], + 'negative': ['+1 408 555 4798', '+1 408 555 5625', '+1 408 555 6201', + '+1 408 555 8585', '+1 408 555 9187', '+1 408 555 9423']}, + {'attr': 'attroctetStringMatch', + 'positive': ['AAAAAAAAAAAAAAE='], + 'negative': ['AAAAAAAAAAAAAAE=', 'AAAAAAAAAAAAAAI=', 'AAAAAAAAAAAAAAM=', + 'AAAAAAAAAAAAAAQ=', 'AAAAAAAAAAAAAAU=', 'AAAAAAAAAAAAAAY=']}] + + +LIST_MOD_REPLACE_ALL = [ + {'attr': 'attrcaseExactIA5Match', + 'positive': ['Sprain', 'sPrain', 'spRain', 'sprAin', 'spraIn', 'sprain'], + 'negative': ['Sprain', 'Sprain', 'sPrain', 'sPrain', 'spRain', 'spRain', + 'sprAin', 'sprAin', 'spraIn', 'spraIn', 'sprain', 'sprain']}, + {'attr': 'attrcaseExactMatch', + 'positive': ['ÇélIné Ändrè', 'ÇéliNé Ändrè', 'Çéliné ÄndrÈ', + 'Çéliné Ändrè', 'çÉliné Ändrè'], + 'negative': ['ÇélIné Ändrè', 'ÇélIné Ändrè', 'ÇéliNé Ändrè', 'ÇéliNé Ändrè', + 'Çéliné ÄndrÈ', 'Çéliné ÄndrÈ', 'Çéliné Ändrè', 'Çéliné Ändrè', + 'çÉliné Ändrè', 'çÉliné Ändrè']}, + {'attr': 'attrbitStringMatch', + 'positive': ["'0001'B", "'0010'B", "'0011'B", "'0100'B", "'0101'B", "'0110'B"], + 'negative': ["'0001'B", "'0001'B", "'0010'B", "'0010'B", "'0011'B", "'0011'B", + "'0100'B", "'0100'B", "'0101'B", "'0101'B", "'0110'B", "'0110'B"]}, + {'attr': 'attrgeneralizedTimeMatch', + 'positive': ['20100218171300Z', '20100218171301Z', '20100218171302Z', + '20100218171303Z', '20100218171304Z', '20100218171305Z'], + 'negative': ['20100218171300Z', '20100218171300Z', '20100218171301Z', + '20100218171301Z', '20100218171302Z', '20100218171302Z', + '20100218171303Z', '20100218171303Z', '20100218171304Z', + '20100218171304Z', '20100218171305Z', '20100218171305Z']}, + {'attr': 'attrbooleanMatch', + 'positive': ['TRUE', 'FALSE'], + 'negative': ['TRUE', 'TRUE', 'FALSE', 'FALSE']}, + {'attr': 'attrcaseIgnoreIA5Match', + 'positive': ['sprain1', 'sprain2', 'sprain3', 'sprain4', 'sprain5', 'sprain6'], + 'negative': ['sprain1', 'sprain1', 'sprain2', 'sprain2', 'sprain3', 'sprain3', + 'sprain4', 'sprain4', 'sprain5', 'sprain5', 'sprain6', 'sprain6']}, + {'attr': 'attrcaseIgnoreMatch', + 'positive': ['ÇélIné Ändrè1', 'ÇélIné Ändrè2', 'ÇélIné Ändrè3', 'ÇélIné Ändrè4', + 'ÇélIné Ändrè5', 'ÇélIné Ändrè6'], + 'negative': ['ÇélIné Ändrè1', 'ÇélIné Ändrè1', 'ÇélIné Ändrè2', 'ÇélIné Ändrè2', + 'ÇélIné Ändrè3', 'ÇélIné Ändrè3', 'ÇélIné Ändrè4', 'ÇélIné Ändrè4', + 'ÇélIné Ändrè5', 'ÇélIné Ändrè5', 'ÇélIné Ändrè6', 'ÇélIné Ändrè6']}, + {'attr': 'attrcaseIgnoreListMatch', + 'positive': ['foo1$bar', 'foo2$bar', 'foo3$bar', 'foo4$bar', 'foo5$bar', 'foo6$bar'], + 'negative': ['foo1$bar', 'foo1$bar', 'foo2$bar', 'foo2$bar', 'foo3$bar', 'foo3$bar', + 'foo4$bar', 'foo4$bar', 'foo5$bar', 'foo5$bar', 'foo6$bar', 'foo6$bar']}, + {'attr': 'attrobjectIdentifierMatch', + 'positive': ['1.3.6.1.4.1.1466.115.121.1.15', '1.3.6.1.4.1.1466.115.121.1.24', + '1.3.6.1.4.1.1466.115.121.1.26', '1.3.6.1.4.1.1466.115.121.1.40', + '1.3.6.1.4.1.1466.115.121.1.41', '1.3.6.1.4.1.1466.115.121.1.6'], + 'negative': ['1.3.6.1.4.1.1466.115.121.1.15', '1.3.6.1.4.1.1466.115.121.1.15', + '1.3.6.1.4.1.1466.115.121.1.24', '1.3.6.1.4.1.1466.115.121.1.24', + '1.3.6.1.4.1.1466.115.121.1.26', '1.3.6.1.4.1.1466.115.121.1.26', + '1.3.6.1.4.1.1466.115.121.1.40', '1.3.6.1.4.1.1466.115.121.1.40', + '1.3.6.1.4.1.1466.115.121.1.41', '1.3.6.1.4.1.1466.115.121.1.41', + '1.3.6.1.4.1.1466.115.121.1.6', '1.3.6.1.4.1.1466.115.121.1.6']}, + {'attr': 'attrdirectoryStringFirstComponentMatch', + 'positive': ['ÇélIné Ändrè1', 'ÇélIné Ändrè2', 'ÇélIné Ändrè3', 'ÇélIné Ändrè4', + 'ÇélIné Ändrè5', 'ÇélIné Ändrè6'], + 'negative': ['ÇélIné Ändrè1', 'ÇélIné Ändrè1', 'ÇélIné Ändrè2', 'ÇélIné Ändrè2', + 'ÇélIné Ändrè3', 'ÇélIné Ändrè3', 'ÇélIné Ändrè4', 'ÇélIné Ändrè4', + 'ÇélIné Ändrè5', 'ÇélIné Ändrè5', 'ÇélIné Ändrè6', 'ÇélIné Ändrè6']}, + {'attr': 'attrobjectIdentifierFirstComponentMatch', + 'positive': ['1.3.6.1.4.1.1466.115.121.1.15', '1.3.6.1.4.1.1466.115.121.1.24', + '1.3.6.1.4.1.1466.115.121.1.26', '1.3.6.1.4.1.1466.115.121.1.40', + '1.3.6.1.4.1.1466.115.121.1.41', '1.3.6.1.4.1.1466.115.121.1.6'], + 'negative': ['1.3.6.1.4.1.1466.115.121.1.15', '1.3.6.1.4.1.1466.115.121.1.15', + '1.3.6.1.4.1.1466.115.121.1.24', '1.3.6.1.4.1.1466.115.121.1.24', + '1.3.6.1.4.1.1466.115.121.1.26', '1.3.6.1.4.1.1466.115.121.1.26', + '1.3.6.1.4.1.1466.115.121.1.40', '1.3.6.1.4.1.1466.115.121.1.40', + '1.3.6.1.4.1.1466.115.121.1.41', '1.3.6.1.4.1.1466.115.121.1.41', + '1.3.6.1.4.1.1466.115.121.1.6', '1.3.6.1.4.1.1466.115.121.1.6']}, + {'attr': 'attrdistinguishedNameMatch', + 'positive': ['cn=foo1,cn=bar', 'cn=foo2,cn=bar', 'cn=foo3,cn=bar', 'cn=foo4,cn=bar', + 'cn=foo5,cn=bar', 'cn=foo6,cn=bar'], + 'negative': ['cn=foo1,cn=bar', 'cn=foo1,cn=bar', 'cn=foo2,cn=bar', 'cn=foo2,cn=bar', + 'cn=foo3,cn=bar', 'cn=foo3,cn=bar', 'cn=foo4,cn=bar', 'cn=foo4,cn=bar', + 'cn=foo5,cn=bar', 'cn=foo5,cn=bar', 'cn=foo6,cn=bar', 'cn=foo6,cn=bar']}, + {'attr': 'attrintegerMatch', + 'positive': ['-2', '-1', '0', '1', '2', '3'], + 'negative': ['-2', '-2', '-1', '-1', '0', '0', '1', '1', '2', '2', '3', '3']}, + {'attr': 'attrintegerFirstComponentMatch', + 'positive': ['-2', '-1', '0', '1', '2', '3'], + 'negative': ['-2', '-2', '-1', '-1', '0', '0', '1', '1', '2', '2', '3', '3']}, + {'attr': 'attruniqueMemberMatch', + 'positive': ["cn=foo1,cn=bar#'0001'B", "cn=foo2,cn=bar#'0010'B", + "cn=foo3,cn=bar#'0011'B", "cn=foo4,cn=bar#'0100'B", + "cn=foo5,cn=bar#'0101'B", "cn=foo6,cn=bar#'0110'B"], + 'negative': ["cn=foo1,cn=bar#'0001'B", "cn=foo1,cn=bar#'0001'B", + "cn=foo2,cn=bar#'0010'B", "cn=foo2,cn=bar#'0010'B", + "cn=foo3,cn=bar#'0011'B", "cn=foo3,cn=bar#'0011'B", + "cn=foo4,cn=bar#'0100'B", "cn=foo4,cn=bar#'0100'B", + "cn=foo5,cn=bar#'0101'B", "cn=foo5,cn=bar#'0101'B", + "cn=foo6,cn=bar#'0110'B", "cn=foo6,cn=bar#'0110'B"]}, + {'attr': 'attrnumericStringMatch', + 'positive': ['00001', '00002', '00003', '00004', '00005', '00006'], + 'negative': ['00001', '00001', '00002', '00002', '00003', '00003', + '00004', '00004', '00005', '00005', '00006', '00006']}, + {'attr': 'attrtelephoneNumberMatch', + 'positive': ['+1 408 555 4798', '+1 408 555 5625', '+1 408 555 6201', + '+1 408 555 8585', '+1 408 555 9187', '+1 408 555 9423'], + 'negative': ['+1 408 555 4798', '+1 408 555 4798', '+1 408 555 5625', + '+1 408 555 5625', '+1 408 555 6201', '+1 408 555 6201', + '+1 408 555 8585', '+1 408 555 8585', '+1 408 555 9187', + '+1 408 555 9187', '+1 408 555 9423', '+1 408 555 9423']}, + {'attr': 'attroctetStringMatch', + 'positive': ['AAAAAAAAAAAAAAE=', 'AAAAAAAAAAAAAAI=', 'AAAAAAAAAAAAAAM=', + 'AAAAAAAAAAAAAAQ=', 'AAAAAAAAAAAAAAU=', 'AAAAAAAAAAAAAAY='], + 'negative': ['AAAAAAAAAAAAAAE=', 'AAAAAAAAAAAAAAE=', 'AAAAAAAAAAAAAAI=', + 'AAAAAAAAAAAAAAI=', 'AAAAAAAAAAAAAAM=', 'AAAAAAAAAAAAAAM=', + 'AAAAAAAAAAAAAAQ=', 'AAAAAAAAAAAAAAQ=', 'AAAAAAAAAAAAAAU=', + 'AAAAAAAAAAAAAAU=', 'AAAAAAAAAAAAAAY=', 'AAAAAAAAAAAAAAY=']}] + + +LIST_MOD_DEL_ALL = [ + {'attr': 'attrbitStringMatch', + 'positive_negative': ["'0001'B"]}, + {'attr': 'attrcaseExactIA5Match', + 'positive_negative': ['Sprain']}, + {'attr': 'attrbitStringMatch', + 'positive_negative': ["'0001'B"]}, + {'attr': 'attrcaseExactMatch', + 'positive_negative': ['ÇélIné Ändrè']}, + {'attr': 'attrgeneralizedTimeMatch', + 'positive_negative': ['20100218171300Z']}, + {'attr': 'attrbooleanMatch', + 'positive_negative': ['TRUE']}, + {'attr': 'attrcaseIgnoreIA5Match', + 'positive_negative': ['sprain1']}, + {'attr': 'attrcaseIgnoreMatch', + 'positive_negative': ['ÇélIné Ändrè1']}, + {'attr': 'attrcaseIgnoreListMatch', + 'positive_negative': ['foo1$bar']}, + {'attr': 'attrobjectIdentifierMatch', + 'positive_negative': ['1.3.6.1.4.1.1466.115.121.1.15']}, + {'attr': 'attrdirectoryStringFirstComponentMatch', + 'positive_negative': ['ÇélIné Ändrè1']}, + {'attr': 'attrintegerMatch', + 'positive_negative': ['-2']}, + {'attr': 'attrintegerFirstComponentMatch', + 'positive_negative': ['cn=foo1,cn=bar']}, + {'attr': 'attrintegerFirstComponentMatch', + 'positive_negative': ['-2']}, + {'attr': 'attruniqueMemberMatch', + 'positive_negative': ["cn=foo1,cn=bar#'0001'B"]}, + {'attr': 'attrnumericStringMatch', + 'positive_negative': ['00001']}, + {'attr': 'attrtelephoneNumberMatch', + 'positive_negative': ['+1 408 555 4798']}, + {'attr': 'attroctetStringMatch', + 'positive_negative': ['AAAAAAAAAAAAAAE=']}] + + +@pytest.fixture(scope="module") +def _create_index_entry(topology_st): + """Create index entries. + :id: 9c93aec8-b87d-11e9-93b0-8c16451d917b + :setup: Standalone + :steps: + 1. Test index entries can be created. + :expected results: + 1. Pass + """ + indexes = Indexes(topology_st.standalone) + for cn_cn, index_type in LIST_CN_INDEX: + indexes.create(properties={ + 'cn': cn_cn, + 'nsSystemIndex': 'true', + 'nsIndexType': index_type + }) + + +@pytest.mark.parametrize("index", LIST_ATTR_INDEX) +def test_valid_invalid_attributes(topology_st, _create_index_entry, index): + """Test valid and invalid values of attributes + :id: 93dc9e02-b87d-11e9-b39b-8c16451d917b + :parametrized: yes + :setup: Standalone + :steps: + 1. Create entry with an attribute that uses that matching rule + 2. Delete existing entry + 3. Create entry with an attribute that uses that matching rule providing duplicate + values that are duplicates according to the equality matching rule. + :expected results: + 1. Pass + 2. Pass + 3. Fail(ldap.TYPE_OR_VALUE_EXISTS) + """ + cos = CosTemplates(topology_st.standalone, DEFAULT_SUFFIX) + # Entry with extensibleObject + entry = cos.create(properties={'cn': 'addentry' + index['attr'], + index['attr']: index['positive']}) + entry.delete() + with pytest.raises(ldap.TYPE_OR_VALUE_EXISTS): + cos.create(properties={'cn': 'addentry' + index['attr'].split('attr')[1], + index['attr']: index['negative']}) + + +@pytest.mark.parametrize("mod", LIST_MOD_ATTR_ALL) +def test_mods(topology_st, _create_index_entry, mod): + """Test valid and invalid values of attributes mods + :id: 8c15874c-b87d-11e9-9c5d-8c16451d917b + :parametrized: yes + :setup: Standalone + :steps: + 1. Create entry with an attribute that uses matching mod + 2. Add an attribute that uses that matching mod providing duplicate + values that are duplicates according to the equality matching. + 3. Delete existing entry + :expected results: + 1. Pass + 2. Fail(ldap.TYPE_OR_VALUE_EXISTS) + 3. Pass + """ + cos = CosTemplates(topology_st.standalone, DEFAULT_SUFFIX) + # Entry with extensibleObject + entry = cos.create(properties={'cn': 'addentry'+mod['attr'], + mod['attr']: mod['positive']}) + with pytest.raises(ldap.TYPE_OR_VALUE_EXISTS): + entry.add(mod['attr'], mod['negative']) + entry.delete() + + +@pytest.mark.parametrize("mode", LIST_MOD_REPLACE_ALL) +def test_mods_replace(topology_st, _create_index_entry, mode): + """Test modes replace + :id: 2dd46b7a-b928-11e9-91dd-8c16451d917b + :parametrized: yes + :setup: Standalone + :steps: + 1. Create entry with an attribute that uses matching mode + 2. Add an attribute that uses that matching mode providing duplicate + values that are duplicates according to the equality matching. + 3. Delete existing entry + :expected results: + 1. Pass + 2. Fail(ldap.TYPE_OR_VALUE_EXISTS) + 3. Pass + """ + cos = CosTemplates(topology_st.standalone, DEFAULT_SUFFIX) + # Entry with extensibleObject + entry = cos.create(properties={'cn': 'addentry'+mode['attr'], + mode['attr']: mode['positive']}) + with pytest.raises(ldap.TYPE_OR_VALUE_EXISTS): + entry.replace(mode['attr'], mode['negative']) + entry.delete() + + +@pytest.mark.parametrize("mode", LIST_MOD_DEL_ALL) +def test_mods_delete(topology_st, _create_index_entry, mode): + """Test modes delete + :id: 1dda055e-b928-11e9-b5c1-8c16451d917b + :parametrized: yes + :setup: Standalone + :steps: + 1. Create entry with an attribute that uses matching mode + 2. Add an attribute that uses that matching mode providing duplicate + values that are duplicates according to the equality matching. + 3. Delete existing entry + :expected results: + 1. Pass + 2. Fail(ldap.NO_SUCH_ATTRIBUTE) + 3. Pass + """ + cos = CosTemplates(topology_st.standalone, DEFAULT_SUFFIX) + # Entry with extensibleObject + entry = cos.create(properties={'cn': 'addentry'+mode['attr'], + mode['attr']: mode['positive_negative']}) + entry.remove(mode['attr'], mode['positive_negative'][0]) + with pytest.raises(ldap.NO_SUCH_ATTRIBUTE): + entry.remove(mode['attr'], mode['positive_negative'][0]) + entry.delete() + + +ATTR = ["( 2.16.840.1.113730.3.1.999999.0 NAME 'attroctetStringMatch' " + "DESC 'for testing matching rules' EQUALITY octetStringMatch " + "ORDERING octetStringOrderingMatch SYNTAX 1.3.6.1.4.1.1466.115.121.1.40 " + "X-ORIGIN 'matching rule tests' )", + "( 2.16.840.1.113730.3.1.999999.1 NAME 'attrbitStringMatch' DESC " + "'for testing matching rules' EQUALITY bitStringMatch SYNTAX " + "1.3.6.1.4.1.1466.115.121.1.6 X-ORIGIN 'matching rule tests' )", + "( 2.16.840.1.113730.3.1.999999.2 NAME 'attrcaseExactIA5Match' " + "DESC 'for testing matching rules' EQUALITY caseExactIA5Match " + "SUBSTR caseExactIA5SubstringsMatch SYNTAX 1.3.6.1.4.1.1466.115.121.1.26 " + "X-ORIGIN 'matching rule tests' )", + "( 2.16.840.1.113730.3.1.999999.3 NAME 'attrcaseExactMatch' DESC " + "'for testing matching rules' EQUALITY caseExactMatch ORDERING " + "caseExactOrderingMatch SUBSTR caseExactSubstringsMatch SYNTAX " + "1.3.6.1.4.1.1466.115.121.1.15 " + "X-ORIGIN 'matching rule tests' )", + "( 2.16.840.1.113730.3.1.999999.4 NAME 'attrgeneralizedTimeMatch' DESC " + "'for testing matching rules' EQUALITY generalizedTimeMatch ORDERING " + "generalizedTimeOrderingMatch SYNTAX 1.3.6.1.4.1.1466.115.121.1.24 " + "X-ORIGIN 'matching rule tests' )", + "( 2.16.840.1.113730.3.1.999999.5 NAME 'attrbooleanMatch' DESC " + "'for testing matching rules' EQUALITY booleanMatch SYNTAX " + "1.3.6.1.4.1.1466.115.121.1.7 X-ORIGIN 'matching rule tests' )", + "( 2.16.840.1.113730.3.1.999999.6 NAME 'attrcaseIgnoreIA5Match' DESC " + "'for testing matching rules' EQUALITY caseIgnoreIA5Match SUBSTR " + "caseIgnoreIA5SubstringsMatch SYNTAX 1.3.6.1.4.1.1466.115.121.1.26 " + "X-ORIGIN 'matching rule tests' )", + "( 2.16.840.1.113730.3.1.999999.7 NAME 'attrcaseIgnoreMatch' DESC " + "'for testing matching rules' EQUALITY caseIgnoreMatch ORDERING " + "caseIgnoreOrderingMatch SUBSTR caseIgnoreSubstringsMatch " + "SYNTAX 1.3.6.1.4.1.1466.115.121.1.15 X-ORIGIN 'matching rule tests' )", + "( 2.16.840.1.113730.3.1.999999.8 NAME 'attrcaseIgnoreListMatch' DESC " + "'for testing matching rules' EQUALITY caseIgnoreListMatch SUBSTR " + "caseIgnoreListSubstringsMatch SYNTAX 1.3.6.1.4.1.1466.115.121.1.41 " + "X-ORIGIN 'matching rule tests' )", + "( 2.16.840.1.113730.3.1.999999.9 NAME 'attrobjectIdentifierMatch' DESC " + "'for testing matching rules' EQUALITY objectIdentifierMatch SYNTAX " + "1.3.6.1.4.1.1466.115.121.1.38 X-ORIGIN 'matching rule tests' )", + "( 2.16.840.1.113730.3.1.999999.10 NAME 'attrdistinguishedNameMatch' DESC " + "'for testing matching rules' EQUALITY distinguishedNameMatch SYNTAX " + "1.3.6.1.4.1.1466.115.121.1.12 X-ORIGIN 'matching rule tests' )", + "( 2.16.840.1.113730.3.1.999999.11 NAME 'attrintegerMatch' DESC " + "'for testing matching rules' EQUALITY integerMatch ORDERING " + "integerOrderingMatch SYNTAX 1.3.6.1.4.1.1466.115.121.1.27 " + "X-ORIGIN 'matching rule tests' )", + "( 2.16.840.1.113730.3.1.999999.12 NAME 'attruniqueMemberMatch' DESC " + "'for testing matching rules' EQUALITY uniqueMemberMatch SYNTAX " + "1.3.6.1.4.1.1466.115.121.1.34 X-ORIGIN 'matching rule tests' )", + "( 2.16.840.1.113730.3.1.999999.13 NAME 'attrnumericStringMatch' DESC " + "'for testing matching rules' EQUALITY numericStringMatch ORDERING " + "numericStringOrderingMatch SUBSTR numericStringSubstringsMatch " + "SYNTAX 1.3.6.1.4.1.1466.115.121.1.36 X-ORIGIN 'matching rule tests' )", + "( 2.16.840.1.113730.3.1.999999.14 NAME 'attrtelephoneNumberMatch' DESC " + "'for testing matching rules' EQUALITY telephoneNumberMatch SUBSTR " + "telephoneNumberSubstringsMatch SYNTAX 1.3.6.1.4.1.1466.115.121.1.50 " + "X-ORIGIN 'matching rule tests' )", + "( 2.16.840.1.113730.3.1.999999.15 NAME 'attrdirectoryStringFirstComponentMatch' " + "DESC 'for testing matching rules' EQUALITY directoryStringFirstComponentMatch " + "SYNTAX 1.3.6.1.4.1.1466.115.121.1.15 X-ORIGIN 'matching rule tests' )", + "( 2.16.840.1.113730.3.1.999999.16 NAME 'attrobjectIdentifierFirstComponentMatch' " + "DESC 'for testing matching rules' EQUALITY objectIdentifierFirstComponentMatch " + "SYNTAX 1.3.6.1.4.1.1466.115.121.1.38 X-ORIGIN 'matching rule tests' )", + "( 2.16.840.1.113730.3.1.999999.17 NAME 'attrintegerFirstComponentMatch' " + "DESC 'for testing matching rules' EQUALITY integerFirstComponentMatch SYNTAX " + "1.3.6.1.4.1.1466.115.121.1.27 X-ORIGIN 'matching rule tests' )"] + + +LIST_ATTR_TO_CREATE = [ + ('entryoctetStringMatch0', 'AAAAAAAAAAAAAAE='), + ('entryoctetStringMatch1', 'AAAAAAAAAAAAAAI='), + ('entryoctetStringMatch2', 'AAAAAAAAAAAAAAM='), + ('entryoctetStringMatch3', 'AAAAAAAAAAAAAAQ='), + ('entryoctetStringMatch4', 'AAAAAAAAAAAAAAU='), + ('entryoctetStringMatch5', 'AAAAAAAAAAAAAAY='), + ('entrybitStringMatch0', "'0001'B"), + ('entrybitStringMatch1', "'0010'B"), + ('entrybitStringMatch2', "'0011'B"), + ('entrybitStringMatch3', "'0100'B"), + ('entrybitStringMatch4', "'0101'B"), + ('entrybitStringMatch5', "'0110'B"), + ('entrycaseExactIA5Match0', "Sprain"), + ('entrycaseExactIA5Match1', "sPrain"), + ('entrycaseExactIA5Match2', "spRain"), + ('entrycaseExactIA5Match3', "sprAin"), + ('entrycaseExactIA5Match4', "spraIn"), + ('entrycaseExactIA5Match5', "sprain"), + ('entrycaseExactMatch0', "ÇélIné Ändrè"), + ('entrycaseExactMatch1', "ÇéliNé Ändrè"), + ('entrycaseExactMatch2', "Çéliné ÄndrÈ"), + ('entrycaseExactMatch3', "Çéliné Ändrè"), + ('entrycaseExactMatch4', "çÉliné Ändrè"), + ('entrygeneralizedTimeMatch0', "20100218171300Z"), + ('entrygeneralizedTimeMatch1', "20100218171301Z"), + ('entrygeneralizedTimeMatch2', "20100218171302Z"), + ('entrygeneralizedTimeMatch3', "20100218171303Z"), + ('entrygeneralizedTimeMatch4', "20100218171304Z"), + ('entrygeneralizedTimeMatch5', "20100218171305Z"), + ('entrybooleanMatch0', "TRUE"), + ('entrybooleanMatch1', "FALSE"), + ('entrycaseIgnoreIA5Match0', "sprain1"), + ('entrycaseIgnoreIA5Match1', "sprain2"), + ('entrycaseIgnoreIA5Match2', "sprain3"), + ('entrycaseIgnoreIA5Match3', "sprain4"), + ('entrycaseIgnoreIA5Match4', "sprain5"), + ('entrycaseIgnoreIA5Match5', "sprain6"), + ('entrycaseIgnoreMatch0', "ÇélIné Ändrè1"), + ('entrycaseIgnoreMatch1', "ÇélIné Ändrè2"), + ('entrycaseIgnoreMatch2', "ÇélIné Ändrè3"), + ('entrycaseIgnoreMatch3', "ÇélIné Ändrè4"), + ('entrycaseIgnoreMatch4', "ÇélIné Ändrè5"), + ('entrycaseIgnoreMatch5', "ÇélIné Ändrè6"), + ('entrycaseIgnoreListMatch0', "foo1$bar"), + ('entrycaseIgnoreListMatch1', "foo2$bar"), + ('entrycaseIgnoreListMatch2', "foo3$bar"), + ('entrycaseIgnoreListMatch3', "foo4$bar"), + ('entrycaseIgnoreListMatch4', "foo5$bar"), + ('entrycaseIgnoreListMatch5', "foo6$bar"), + ('entryobjectIdentifierMatch0', "1.3.6.1.4.1.1466.115.121.1.15"), + ('entryobjectIdentifierMatch1', "1.3.6.1.4.1.1466.115.121.1.24"), + ('entryobjectIdentifierMatch2', "1.3.6.1.4.1.1466.115.121.1.26"), + ('entryobjectIdentifierMatch3', "1.3.6.1.4.1.1466.115.121.1.40"), + ('entryobjectIdentifierMatch4', "1.3.6.1.4.1.1466.115.121.1.41"), + ('entryobjectIdentifierMatch5', "1.3.6.1.4.1.1466.115.121.1.6"), + ('entrydistinguishedNameMatch0', "cn=foo1,cn=bar"), + ('entrydistinguishedNameMatch1', "cn=foo2,cn=bar"), + ('entrydistinguishedNameMatch2', "cn=foo3,cn=bar"), + ('entrydistinguishedNameMatch3', "cn=foo4,cn=bar"), + ('entrydistinguishedNameMatch4', "cn=foo5,cn=bar"), + ('entrydistinguishedNameMatch5', "cn=foo6,cn=bar"), + ('entryintegerMatch0', "-2"), + ('entryintegerMatch1', "-1"), + ('entryintegerMatch2', "0"), + ('entryintegerMatch3', "1"), + ('entryintegerMatch4', "2"), + ('entryintegerMatch5', "3"), + ('entryuniqueMemberMatch0', "cn=foo1,cn=bar#'0001'B"), + ('entryuniqueMemberMatch1', "cn=foo2,cn=bar#'0010'B"), + ('entryuniqueMemberMatch2', "cn=foo3,cn=bar#'0011'B"), + ('entryuniqueMemberMatch3', "cn=foo4,cn=bar#'0100'B"), + ('entryuniqueMemberMatch4', "cn=foo5,cn=bar#'0101'B"), + ('entryuniqueMemberMatch5', "cn=foo6,cn=bar#'0110'B"), + ('entrynumericStringMatch0', "00001"), + ('entrynumericStringMatch1', "00002"), + ('entrynumericStringMatch2', "00003"), + ('entrynumericStringMatch3', "00004"), + ('entrynumericStringMatch4', "00005"), + ('entrynumericStringMatch5', "00006"), + ('entrytelephoneNumberMatch0', "+1 408 555 4798"), + ('entrytelephoneNumberMatch1', "+1 408 555 5625"), + ('entrytelephoneNumberMatch2', "+1 408 555 6201"), + ('entrytelephoneNumberMatch3', "+1 408 555 8585"), + ('entrytelephoneNumberMatch4', "+1 408 555 9187"), + ('entrytelephoneNumberMatch5', "+1 408 555 9423"), + ('entrydirectoryStringFirstComponentMatch0', "ÇélIné Ändrè1"), + ('entrydirectoryStringFirstComponentMatch1', "ÇélIné Ändrè2"), + ('entrydirectoryStringFirstComponentMatch2', "ÇélIné Ändrè3"), + ('entrydirectoryStringFirstComponentMatch3', "ÇélIné Ändrè4"), + ('entrydirectoryStringFirstComponentMatch4', "ÇélIné Ändrè5"), + ('entrydirectoryStringFirstComponentMatch5', "ÇélIné Ändrè6"), + ('entryobjectIdentifierFirstComponentMatch0', "1.3.6.1.4.1.1466.115.121.1.15"), + ('entryobjectIdentifierFirstComponentMatch1', "1.3.6.1.4.1.1466.115.121.1.24"), + ('entryobjectIdentifierFirstComponentMatch2', "1.3.6.1.4.1.1466.115.121.1.26"), + ('entryobjectIdentifierFirstComponentMatch3', "1.3.6.1.4.1.1466.115.121.1.40"), + ('entryobjectIdentifierFirstComponentMatch4', "1.3.6.1.4.1.1466.115.121.1.41"), + ('entryobjectIdentifierFirstComponentMatch5', "1.3.6.1.4.1.1466.115.121.1.6"), + ('entryintegerFirstComponentMatch0', "-2"), + ('entryintegerFirstComponentMatch1', "-1"), + ('entryintegerFirstComponentMatch2', "0"), + ('entryintegerFirstComponentMatch3', "1"), + ('entryintegerFirstComponentMatch4', "2"), + ('entryintegerFirstComponentMatch5', "3")] + + +@pytest.fixture(scope="module") +def _create_entries(topology_st): + """ + Add attribute types to schema and Create filter + entries(Entry with extensibleObject) + """ + for attribute in ATTR: + Schema(topology_st.standalone).add('attributetypes', attribute) + cos = CosTemplates(topology_st.standalone, DEFAULT_SUFFIX) + # Entry with extensibleObject + for attr, value in LIST_ATTR_TO_CREATE: + cos.create(properties={ + 'cn': attr, + 'attr' + attr.split('entry')[1][:-1]: value + }) + + +FILTER_VALUES = [ + ["(attrbitStringMatch='0001'B)", 1, + "(attrbitStringMatch:bitStringMatch:='000100000'B)"], + ["(attrgeneralizedTimeMatch=20100218171300Z)", 1, + "(attrcaseExactIA5Match=SPRAIN)"], + ["(attrcaseExactMatch>=ÇélIné Ändrè)", 5, + "(attrcaseExactMatch=ÇéLINé ÄNDRè)"], + ["(attrcaseExactMatch:caseExactMatch:=ÇélIné Ändrè)", 1, + "(attrcaseExactMatch>=çéliné ändrè)"], + ["(attrcaseExactIA5Match=Sprain)", 1, + "(attrgeneralizedTimeMatch=20300218171300Z)"], + ["(attrbooleanMatch=TRUE)", 1, + "(attrgeneralizedTimeMatch>=20300218171300Z)"], + ["(attrcaseIgnoreIA5Match=sprain1)", 1, + "(attrcaseIgnoreIA5Match=sprain9999)"], + ["(attrcaseIgnoreMatch=ÇélIné Ändrè1)", 1, + "(attrcaseIgnoreMatch=ÇélIné Ändrè9999)"], + ["(attrcaseIgnoreMatch>=ÇélIné Ändrè1)", 6, + "(attrcaseIgnoreMatch>=ÇélIné Ändrè9999)"], + ["(attrcaseIgnoreListMatch=foo1$bar)", 1, + "(attrcaseIgnoreListMatch=foo1$bar$baz$biff)"], + ["(attrobjectIdentifierMatch=1.3.6.1.4.1.1466.115.121.1.15)", 1, + "(attrobjectIdentifierMatch=1.3.6.1.4.1.1466.115.121.1.15.99999)"], + ["(attrgeneralizedTimeMatch>=20100218171300Z)", 6, + "(attrdirectoryStringFirstComponentMatch=ÇélIné Ändrè9999)"], + ["(attrdirectoryStringFirstComponentMatch=ÇélIné Ändrè1)", 1, + "(attrobjectIdentifierFirstComponentMatch=1.3.6.1.4.1.1466.115.121.1.15.99999)"], + ["(attrobjectIdentifierFirstComponentMatch=1.3.6.1.4.1.1466.115.121.1.15)", 1, + "(attrdistinguishedNameMatch=cn=foo1,cn=bar,cn=baz)"], + ["(attrdistinguishedNameMatch=cn=foo1,cn=bar)", 1, + "(attrintegerMatch=-20)"], + ["(attrintegerMatch=-2)", 1, + "(attrintegerMatch>=20)"], + ["(attrintegerMatch>=-2)", 6, + "(attrintegerFirstComponentMatch=-20)"], + ["(attrintegerFirstComponentMatch=-2)", 1, + "(attruniqueMemberMatch=cn=foo1,cn=bar#'00010000'B)"], + ["(attruniqueMemberMatch=cn=foo1,cn=bar#'0001'B)", 1, + "(attrnumericStringMatch=000000001)"], + ["(attrnumericStringMatch=00001)", 1, + "(attrnumericStringMatch>=01)"], + ["(attrnumericStringMatch>=00001)", 6, + "(attrtelephoneNumberMatch=+2 408 555 4798)"], + ["(attrtelephoneNumberMatch=+1 408 555 4798)", 1, + "(attroctetStringMatch=AAAAAAAAAAAAAAEB)"], + ["(attroctetStringMatch=AAAAAAAAAAAAAAE=)", 1, + "(attroctetStringMatch>=AAAAAAAAAAABAQE=)"], + ["(attroctetStringMatch>=AAAAAAAAAAAAAAE=)", 6, + "(attrdirectoryStringFirstComponentMatch=ÇélIné Ändrè9998)"]] + + +def test_search_positive_negative(topology_st, _create_entries): + """Filters with positive and with no output. + :id: abe3e6dd-9ecc-12e8-adf0-8c16451d917b + :parametrized: yes + :setup: Standalone + :steps: + 1.For valid filer output should match the exact value given. + 2. For invalid filter there should not be any output. + :expected results: + 1. Pass + 2. Pass + """ + cos = CosTemplates(topology_st.standalone, DEFAULT_SUFFIX) + for attr, value, negative_filter in FILTER_VALUES: + assert len(cos.filter(attr)) == value + assert not cos.filter(negative_filter) + + +LIST_EXT_ATTR_COUNT = [ + ("(attrbitStringMatch:bitStringMatch:='0001'B)", 1), + ("(attrcaseExactIA5Match:caseExactIA5Match:=Sprain)", 1), + ("(attrcaseExactMatch:caseExactMatch:=ÇélIné Ändrè)", 1), + ("(attrcaseExactMatch:caseExactOrderingMatch:=ÇélIné Ändrè)", 5), + ("(attrgeneralizedTimeMatch:generalizedTimeMatch:=20100218171300Z)", 1), + ("(attrgeneralizedTimeMatch:generalizedTimeOrderingMatch:=20100218171300Z)", 6), + ("(attrbooleanMatch:booleanMatch:=TRUE)", 1), + ("(attrcaseIgnoreIA5Match:caseIgnoreIA5Match:=sprain1)", 1), + ("(attrcaseIgnoreMatch:caseIgnoreMatch:=ÇélIné Ändrè1)", 1), + ("(attrcaseIgnoreMatch:caseIgnoreOrderingMatch:=ÇélIné Ändrè1)", 6), + ("(attrcaseIgnoreListMatch:caseIgnoreListMatch:=foo1$bar)", 1), + ("(attrobjectIdentifierMatch:objectIdentifierMatch:=1.3.6.1.4.1.1466.115.121.1.15)", 1), + ("(attrdirectoryStringFirstComponentMatch:directoryString" + "FirstComponentMatch:=ÇélIné Ändrè1)", 1), + ("(attrobjectIdentifierFirstComponentMatch:objectIdentifier" + "FirstComponentMatch:=1.3.6.1.4.1.1466.115.121.1.15)", 1), + ("(attrdistinguishedNameMatch:distinguishedNameMatch:=cn=foo1,cn=bar)", 1), + ("(attrintegerMatch:integerMatch:=-2)", 1), + ("(attrintegerMatch:integerOrderingMatch:=-2)", 6), + ("(attrintegerFirstComponentMatch:integerFirstComponentMatch:=-2)", 1), + ("(attruniqueMemberMatch:uniqueMemberMatch:=cn=foo1,cn=bar#'0001'B)", 1), + ("(attrnumericStringMatch:numericStringMatch:=00001)", 1), + ("(attrnumericStringMatch:numericStringMatch:=00001)", 1), + ("(attrtelephoneNumberMatch:telephoneNumberMatch:=+1 408 555 4798)", 1), + ("(attroctetStringMatch:octetStringMatch:=AAAAAAAAAAAAAAE=)", 1), + ("(attroctetStringMatch:octetStringOrderingMatch:=AAAAAAAAAAAAAAE=)", 6), + ("(attrcaseExactMatch=*ÇélIné Ändrè*)", 1), + ("(attrcaseExactMatch=ÇélIné Ändrè*)", 1), + ("(attrcaseExactMatch=*ÇélIné Ändrè)", 1), + ("(attrcaseExactMatch=*é Ä*)", 5), + ("(attrcaseExactIA5Match=*Sprain*)", 1), + ("(attrcaseExactIA5Match=Sprain*)", 1), + ("(attrcaseExactIA5Match=*Sprain)", 1), + ("(attrcaseExactIA5Match=*rai*)", 3), + ("(attrcaseIgnoreIA5Match=*sprain1*)", 1), + ("(attrcaseIgnoreIA5Match=sprain1*)", 1), + ("(attrcaseIgnoreIA5Match=*sprain1)", 1), + ("(attrcaseIgnoreIA5Match=*rai*)", 6), + ("(attrcaseIgnoreMatch=*ÇélIné Ändrè1*)", 1), + ("(attrcaseIgnoreMatch=ÇélIné Ändrè1*)", 1), + ("(attrcaseIgnoreMatch=*ÇélIné Ändrè1)", 1), + ("(attrcaseIgnoreMatch=*é Ä*)", 6), + ("(attrcaseIgnoreListMatch=*foo1$bar*)", 1), + ("(attrcaseIgnoreListMatch=foo1$bar*)", 1), + ("(attrcaseIgnoreListMatch=*foo1$bar)", 1), + ("(attrcaseIgnoreListMatch=*1$b*)", 1), + ("(attrnumericStringMatch=*00001*)", 1), + ("(attrnumericStringMatch=00001*)", 1), + ("(attrnumericStringMatch=*00001)", 1), + ("(attrnumericStringMatch=*000*)", 6), + ("(attrtelephoneNumberMatch=*+1 408 555 4798*)", 1), + ("(attrtelephoneNumberMatch=+1 408 555 4798*)", 1), + ("(attrtelephoneNumberMatch=*+1 408 555 4798)", 1), + ("(attrtelephoneNumberMatch=* 55*)", 6)] + + +@pytest.mark.parametrize("attr, value", LIST_EXT_ATTR_COUNT) +def test_do_extensible_search(topology_st, _create_entries, attr, value): + """Match filter and output. + :id: abe3e6dd-9ecc-11e8-adf0-8c16451d917c + :parametrized: yes + :setup: Standalone + :steps: + 1. Filer output should match the exact value given. + :expected results: + 1. Pass + """ + cos = CosTemplates(topology_st.standalone, DEFAULT_SUFFIX) + assert len(cos.filter(attr)) == value + + +if __name__ == '__main__': + CURRENT_FILE = os.path.realpath(__file__) + pytest.main("-s -v %s" % CURRENT_FILE) diff --git a/dirsrvtests/tests/suites/filter/filter_indexing_test.py b/dirsrvtests/tests/suites/filter/filter_indexing_test.py new file mode 100644 index 0000000..bebb6c6 --- /dev/null +++ b/dirsrvtests/tests/suites/filter/filter_indexing_test.py @@ -0,0 +1,169 @@ +# --- BEGIN COPYRIGHT BLOCK --- +# Copyright (C) 2019 Red Hat, Inc. +# All rights reserved. +# +# License: GPL (version 3 or any later version). +# See LICENSE for details. +# --- END COPYRIGHT BLOCK ---- + + +""" +verify and testing indexing Filter from a search +""" + +import os +import pytest + +from lib389._constants import DEFAULT_SUFFIX, PW_DM +from lib389.topologies import topology_st as topo +from lib389.idm.user import UserAccounts +from lib389.idm.account import Accounts +from lib389.cos import CosTemplates +from lib389.schema import Schema + +pytestmark = pytest.mark.tier1 + + +FILTERS = ["(|(|(ou=nothing1)(ou=people))(|(ou=nothing2)(ou=nothing3)))", + "(|(|(ou=people)(ou=nothing1))(|(ou=nothing2)(ou=nothing3)))", + "(|(|(ou=nothing1)(ou=nothing2))(|(ou=people)(ou=nothing3)))", + "(|(|(ou=nothing1)(ou=nothing2))(|(ou=nothing3)(ou=people)))", + "(&(sn<=0000000000000000)(givenname>=FFFFFFFFFFFFFFFF))", + "(&(sn>=0000000000000000)(sn<=1111111111111111))", + "(&(sn>=0000000000000000)(givenname<=FFFFFFFFFFFFFFFF))"] + +INDEXES = ["(uidNumber=18446744073709551617)", + "(gidNumber=18446744073709551617)", + "(MYINTATTR=18446744073709551617)", + "(&(uidNumber=*)(!(uidNumber=18446744073709551617)))", + "(&(gidNumber=*)(!(gidNumber=18446744073709551617)))", + "(&(uidNumber=*)(!(gidNumber=18446744073709551617)))", + "(&(myintattr=*)(!(myintattr=18446744073709551617)))", + "(uidNumber>=-18446744073709551617)", + "(gidNumber>=-18446744073709551617)", + "(uidNumber<=18446744073709551617)", + "(gidNumber<=18446744073709551617)", + "(myintattr<=18446744073709551617)"] + + +INDEXES_FALSE = ["(gidNumber=54321)", + "(uidNumber=54321)", + "(myintattr=54321)", + "(gidNumber<=-999999999999999999999999999999)", + "(uidNumber<=-999999999999999999999999999999)", + "(myintattr<=-999999999999999999999999999999)", + "(gidNumber>=999999999999999999999999999999)", + "(uidNumber>=999999999999999999999999999999)", + "(myintattr>=999999999999999999999999999999)"] + + +@pytest.fixture(scope="module") +def _create_entries(topo): + """ + Will create necessary users for this script. + """ + # Creating Users + users_people = UserAccounts(topo.standalone, DEFAULT_SUFFIX) + + for count in range(3): + users_people.create(properties={ + 'ou': ['Accounting', 'People'], + 'cn': f'User {count}F', + 'sn': f'{count}' * 16, + 'givenname': 'FFFFFFFFFFFFFFFF', + 'uid': f'user{count}F', + 'mail': f'user{count}F@test.com', + 'manager': f'uid=user{count}F,ou=People,{DEFAULT_SUFFIX}', + 'userpassword': PW_DM, + 'homeDirectory': '/home/' + f'user{count}F', + 'uidNumber': '1000', + 'gidNumber': '2000', + }) + + cos = CosTemplates(topo.standalone, DEFAULT_SUFFIX, rdn='ou=People') + for user, number, des in [('a', '18446744073709551617', '2^64+1'), + ('b', '18446744073709551618', '2^64+1'), + ('c', '-18446744073709551617', '-2^64+1'), + ('d', '-18446744073709551618', '-2^64+1'), + ('e', '0', '0'), + ('f', '2', '2'), + ('g', '-2', '-2')]: + cos.create(properties={ + 'cn': user, + 'uidnumber': number, + 'gidnumber': number, + 'myintattr': number, + 'description': f'uidnumber value {des} - gidnumber is same but not indexed' + }) + + +@pytest.mark.parametrize("real_value", FILTERS) +def test_positive(topo, _create_entries, real_value): + """Test positive filters + + :id: 57243326-91ae-11e9-aca3-8c16451d917b + :parametrized: yes + :setup: Standalone + :steps: + 1. Try to pass filter rules as per the condition . + :expected results: + 1. Pass + """ + assert Accounts(topo.standalone, DEFAULT_SUFFIX).filter(real_value) + + +def test_indexing_schema(topo, _create_entries): + """Test with schema + :id: 67a2179a-91ae-11e9-9a33-8c16451d917b + :setup: Standalone + :steps: + 1. Add attribute types to Schema. + 2. Try to pass filter rules as per the condition . + :expected results: + 1. Pass + 2. Pass + """ + cos = CosTemplates(topo.standalone, DEFAULT_SUFFIX, rdn='ou=People') + Schema(topo.standalone).add('attributetypes', + "( 8.9.10.11.12.13.14.15 NAME 'myintattr' DESC 'for integer " + "syntax index ordering testing' EQUALITY integerMatch ORDERING " + "integerOrderingMatch SYNTAX 1.3.6.1.4.1.1466.115.121.1.27 )") + topo.standalone.restart() + assert cos.filter("(myintattr>=-18446744073709551617)") + + +@pytest.mark.parametrize("real_value", INDEXES) +def test_indexing(topo, _create_entries, real_value): + """Test positive index filters + + :id: 7337589a-91ae-11e9-ad44-8c16451d917b + :parametrized: yes + :setup: Standalone + :steps: + 1. Try to pass filter rules as per the condition . + :expected results: + 1. Pass + """ + cos = CosTemplates(topo.standalone, DEFAULT_SUFFIX, rdn='ou=People') + assert cos.filter(real_value) + + +@pytest.mark.parametrize("real_value", INDEXES_FALSE) +def test_indexing_negative(topo, _create_entries, real_value): + """Test negative index filters + + :id: 7e19deae-91ae-11e9-900c-8c16451d917b + :parametrized: yes + :setup: Standalone + :steps: + 1. Try to pass negative filter rules as per the condition . + :expected results: + 1. Fail + """ + cos = CosTemplates(topo.standalone, DEFAULT_SUFFIX, rdn='ou=People') + assert not cos.filter(real_value) + + +if __name__ == '__main__': + CURRENT_FILE = os.path.realpath(__file__) + pytest.main("-s -v %s" % CURRENT_FILE) diff --git a/dirsrvtests/tests/suites/filter/filter_logic_test.py b/dirsrvtests/tests/suites/filter/filter_logic_test.py new file mode 100644 index 0000000..d600916 --- /dev/null +++ b/dirsrvtests/tests/suites/filter/filter_logic_test.py @@ -0,0 +1,447 @@ +# --- BEGIN COPYRIGHT BLOCK --- +# Copyright (C) 2017 Red Hat, Inc. +# All rights reserved. +# +# License: GPL (version 3 or any later version). +# See LICENSE for details. +# --- END COPYRIGHT BLOCK --- +# + +import pytest +import ldap + +from lib389.topologies import topology_st +from lib389._constants import DEFAULT_SUFFIX + +from lib389.idm.user import UserAccounts + +pytestmark = pytest.mark.tier1 + +""" +This test case asserts that various logical filters apply correctly and as expected. +This is to assert that we have correct and working search operations, especially related +to indexed content from filterindex.c and idl_sets. + +important to note, some tests check greater than 10 elements to assert that k-way intersect +works, where as most of these actually hit the filtertest threshold so they early return. +""" + +USER0_DN = 'uid=user0,ou=People,%s' % DEFAULT_SUFFIX +USER1_DN = 'uid=user1,ou=People,%s' % DEFAULT_SUFFIX +USER2_DN = 'uid=user2,ou=People,%s' % DEFAULT_SUFFIX +USER3_DN = 'uid=user3,ou=People,%s' % DEFAULT_SUFFIX +USER4_DN = 'uid=user4,ou=People,%s' % DEFAULT_SUFFIX +USER5_DN = 'uid=user5,ou=People,%s' % DEFAULT_SUFFIX +USER6_DN = 'uid=user6,ou=People,%s' % DEFAULT_SUFFIX +USER7_DN = 'uid=user7,ou=People,%s' % DEFAULT_SUFFIX +USER8_DN = 'uid=user8,ou=People,%s' % DEFAULT_SUFFIX +USER9_DN = 'uid=user9,ou=People,%s' % DEFAULT_SUFFIX +USER10_DN = 'uid=user10,ou=People,%s' % DEFAULT_SUFFIX +USER11_DN = 'uid=user11,ou=People,%s' % DEFAULT_SUFFIX +USER12_DN = 'uid=user12,ou=People,%s' % DEFAULT_SUFFIX +USER13_DN = 'uid=user13,ou=People,%s' % DEFAULT_SUFFIX +USER14_DN = 'uid=user14,ou=People,%s' % DEFAULT_SUFFIX +USER15_DN = 'uid=user15,ou=People,%s' % DEFAULT_SUFFIX +USER16_DN = 'uid=user16,ou=People,%s' % DEFAULT_SUFFIX +USER17_DN = 'uid=user17,ou=People,%s' % DEFAULT_SUFFIX +USER18_DN = 'uid=user18,ou=People,%s' % DEFAULT_SUFFIX +USER19_DN = 'uid=user19,ou=People,%s' % DEFAULT_SUFFIX + +@pytest.fixture(scope="module") +def topology_st_f(topology_st): + # Add our users to the topology_st + users = UserAccounts(topology_st.standalone, DEFAULT_SUFFIX) + for i in range(0, 20): + users.create(properties={ + 'uid': 'user%s' % i, + 'cn': 'user%s' % i, + 'sn': '%s' % i, + 'uidNumber': '%s' % i, + 'gidNumber': '%s' % i, + 'homeDirectory': '/home/user%s' % i + }) + # return it + # print("ATTACH NOW") + # import time + # time.sleep(30) + return topology_st.standalone + +def _check_filter(topology_st_f, filt, expect_len, expect_dns): + # print("checking %s" % filt) + results = topology_st_f.search_s("ou=People,%s" % DEFAULT_SUFFIX, ldap.SCOPE_ONELEVEL, filt, ['uid',]) + assert len(results) == expect_len + result_dns = [result.dn for result in results] + assert set(expect_dns) == set(result_dns) + + +def test_eq(topology_st_f): + """Test filter logic with "equal to" operator + + :id: 1b0b7e59-a5ac-4825-8d36-525f4f0149a9 + :setup: Standalone instance with 20 test users added + from uid=user0 to uid=user20 + :steps: + 1. Search for test users with filter '(uid=user0)' + :expectedresults: + 1. There should be 1 user listed user0 + """ + _check_filter(topology_st_f, '(uid=user0)', 1, [USER0_DN]) + + +def test_sub(topology_st_f): + """Test filter logic with "sub" + + :id: 8cfa946d-7ddf-4f8e-9f9f-39da8f35304e + :setup: Standalone instance with 20 test users added + from uid=user0 to uid=user20 + :steps: + 1. Search for test users with filter (uid=user*) + :expectedresults: + 1. There should be 20 users listed from user0 to user19 + """ + _check_filter(topology_st_f, '(uid=user*)', 20, [ + USER0_DN, USER1_DN, USER2_DN, USER3_DN, USER4_DN, + USER5_DN, USER6_DN, USER7_DN, USER8_DN, USER9_DN, + USER10_DN, USER11_DN, USER12_DN, USER13_DN, USER14_DN, + USER15_DN, USER16_DN, USER17_DN, USER18_DN, USER19_DN + ]) + + +def test_not_eq(topology_st_f): + """Test filter logic with "not equal to" operator + + :id: 1422ec65-421d-473b-89ba-649f8decc1ab + :setup: Standalone instance with 20 test users added + from uid=user0 to uid=user20 + :steps: + 1. Search for test users with filter (!(uid=user0) + :expectedresults: + 1. There should be 19 users listed from user1 to user19 + """ + _check_filter(topology_st_f, '(!(uid=user0))', 19, [ + USER1_DN, USER2_DN, USER3_DN, USER4_DN, USER5_DN, + USER6_DN, USER7_DN, USER8_DN, USER9_DN, + USER10_DN, USER11_DN, USER12_DN, USER13_DN, USER14_DN, + USER15_DN, USER16_DN, USER17_DN, USER18_DN, USER19_DN + ]) + +# More not cases? + +def test_ranges(topology_st_f): + """Test filter logic with range + + :id: cc7c25f0-6a6e-465b-8d32-7fcc1aec84ee + :setup: Standalone instance with 20 test users added + from uid=user0 to uid=user20 + :steps: + 1. Search for test users with filter (uid>=user5) + 2. Search for test users with filter (uid<=user4) + 3. Search for test users with filter (uid>=ZZZZ) + 4. Search for test users with filter (uid<=aaaa) + :expectedresults: + 1. There should be 5 users listed from user5 to user9 + 2. There should be 15 users listed from user0 to user4 + and from user10 to user19 + 3. There should not be any user listed + 4. There should not be any user listed + """ + + ### REMEMBER: user10 is less than user5 because it's strcmp!!! + _check_filter(topology_st_f, '(uid>=user5)', 5, [ + USER5_DN, USER6_DN, USER7_DN, USER8_DN, USER9_DN, + ]) + _check_filter(topology_st_f, '(uid<=user4)', 15, [ + USER0_DN, USER1_DN, USER2_DN, USER3_DN, USER4_DN, + USER10_DN, USER11_DN, USER12_DN, USER13_DN, USER14_DN, + USER15_DN, USER16_DN, USER17_DN, USER18_DN, USER19_DN + ]) + _check_filter(topology_st_f, '(uid>=ZZZZ)', 0, []) + _check_filter(topology_st_f, '(uid<=aaaa)', 0, []) + + +def test_and_eq(topology_st_f): + """Test filter logic with "AND" operator + + :id: 4721fd7c-8d0b-43e6-b2e8-a5bac7674f99 + :setup: Standalone instance with 20 test users added + from uid=user0 to uid=user20 + :steps: + 1. Search for test users with filter (&(uid=user0)(cn=user0)) + 2. Search for test users with filter (&(uid=user0)(cn=user1)) + 3. Search for test users with filter (&(uid=user0)(cn=user0)(sn=0)) + 4. Search for test users with filter (&(uid=user0)(cn=user1)(sn=0)) + 5. Search for test users with filter (&(uid=user0)(cn=user0)(sn=1)) + :expectedresults: + 1. There should be 1 user listed i.e. user0 + 2. There should not be any user listed + 3. There should be 1 user listed i.e. user0 + 4. There should not be any user listed + 5. There should not be any user listed + """ + _check_filter(topology_st_f, '(&(uid=user0)(cn=user0))', 1, [USER0_DN]) + _check_filter(topology_st_f, '(&(uid=user0)(cn=user1))', 0, []) + _check_filter(topology_st_f, '(&(uid=user0)(cn=user0)(sn=0))', 1, [USER0_DN]) + _check_filter(topology_st_f, '(&(uid=user0)(cn=user1)(sn=0))', 0, []) + _check_filter(topology_st_f, '(&(uid=user0)(cn=user0)(sn=1))', 0, []) + + +def test_range(topology_st_f): + """Test filter logic with range + + :id: 617e6290-866e-4b5d-a300-d8f1715ad052 + :setup: Standalone instance with 20 test users added + from uid=user0 to uid=user20 + :steps: + 1. Search for test users with filter (&(uid>=user5)(cn<=user7)) + :expectedresults: + 1. There should be 3 users listed i.e. user5 to user7 + """ + _check_filter(topology_st_f, '(&(uid>=user5)(cn<=user7))', 3, [ + USER5_DN, USER6_DN, USER7_DN + ]) + + +def test_and_allid_shortcut(topology_st_f): + """Test filter logic with "AND" operator + and shortcuts + + :id: f4784752-d269-4ceb-aada-fafe0a5fc14c + :setup: Standalone instance with 20 test users added + from uid=user0 to uid=user20 + :steps: + 1. Search for test users with filter (&(objectClass=*)(uid=user0)(cn=user0)) + 2. Search for test users with filter (&(uid=user0)(cn=user0)(objectClass=*)) + :expectedresults: + 1. There should be 1 user listed i.e. user0 + 2. There should be 1 user listed i.e. user0 + """ + _check_filter(topology_st_f, '(&(objectClass=*)(uid=user0)(cn=user0))', 1, [USER0_DN]) + _check_filter(topology_st_f, '(&(uid=user0)(cn=user0)(objectClass=*))', 1, [USER0_DN]) + + +def test_or_eq(topology_st_f): + """Test filter logic with "or" and "equal to" operators + + :id: a23a4fc9-0f5c-49ce-b1f7-6ac10bcd7763 + :setup: Standalone instance with 20 test users added + from uid=user0 to uid=user20 + :steps: + 1. Search for test users with filter (|(uid=user0)(cn=user0)) + 2. Search for test users with filter (|(uid=user0)(uid=user1)) + 3. Search for test users with filter (|(uid=user0)(cn=user0)(sn=0)) + 4. Search for test users with filter (|(uid=user0)(uid=user1)(sn=0)) + 5. Search for test users with filter (|(uid=user0)(uid=user1)(uid=user2)) + :expectedresults: + 1. There should be 1 user listed i.e. user0 + 2. There should be 2 users listed i.e. user0 and user1 + 3. There should be 1 user listed i.e. user0 + 4. There should be 2 users listed i.e. user0 and user1 + 5. There should be 3 users listed i.e. user0 to user2 + """ + _check_filter(topology_st_f, '(|(uid=user0)(cn=user0))', 1, [USER0_DN]) + _check_filter(topology_st_f, '(|(uid=user0)(uid=user1))', 2, [USER0_DN, USER1_DN]) + _check_filter(topology_st_f, '(|(uid=user0)(cn=user0)(sn=0))', 1, [USER0_DN]) + _check_filter(topology_st_f, '(|(uid=user0)(uid=user1)(sn=0))', 2, [USER0_DN, USER1_DN]) + _check_filter(topology_st_f, '(|(uid=user0)(uid=user1)(uid=user2))', 3, [USER0_DN, USER1_DN, USER2_DN]) + + +def test_and_not_eq(topology_st_f): + """Test filter logic with "not equal" to operator + + :id: bd00cb2b-35bb-49c0-8387-f60a6ada7c87 + :setup: Standalone instance with 20 test users added + from uid=user0 to uid=user20 + :steps: + 1. Search for test users with filter (&(uid=user0)(!(cn=user0))) + 2. Search for test users with filter (&(uid=*)(!(uid=user0))) + :expectedresults: + 1. There should be no users listed + 2. There should be 19 users listed i.e. user1 to user19 + """ + _check_filter(topology_st_f, '(&(uid=user0)(!(cn=user0)))', 0, []) + _check_filter(topology_st_f, '(&(uid=*)(!(uid=user0)))', 19, [ + USER1_DN, USER2_DN, USER3_DN, USER4_DN, USER5_DN, + USER6_DN, USER7_DN, USER8_DN, USER9_DN, + USER10_DN, USER11_DN, USER12_DN, USER13_DN, USER14_DN, + USER15_DN, USER16_DN, USER17_DN, USER18_DN, USER19_DN + ]) + + +def test_or_not_eq(topology_st_f): + """Test filter logic with "OR and NOT" operators + + :id: 8f62f339-72c9-49e4-8126-b2a14e61b9c0 + :setup: Standalone instance with 20 test users added + from uid=user0 to uid=user20 + :steps: + 1. Search for test users with filter (|(!(uid=user0))(!(uid=user1))) + :expectedresults: + 1. There should be 20 users listed i.e. user0 to user19 + """ + _check_filter(topology_st_f, '(|(!(uid=user0))(!(uid=user1)))', 20, [ + USER0_DN, USER1_DN, USER2_DN, USER3_DN, USER4_DN, + USER5_DN, USER6_DN, USER7_DN, USER8_DN, USER9_DN, + USER10_DN, USER11_DN, USER12_DN, USER13_DN, USER14_DN, + USER15_DN, USER16_DN, USER17_DN, USER18_DN, USER19_DN + ]) + + +def test_and_range(topology_st_f): + """Test filter logic with range + + :id: 8e5a0e2a-4ee1-4cd7-b5ec-90ad4d3ace64 + :setup: Standalone instance with 20 test users added + from uid=user0 to uid=user20 + :steps: + 1. Search for test users with filter (&(uid>=user5)(uid=user6)) + 2. Search for test users with filter (&(uid>=user5)(uid=user0)) + 3. Search for test users with filter (&(uid>=user5)(uid=user6)(sn=6)) + 4. Search for test users with filter (&(uid>=user5)(uid=user0)(sn=0)) + 5. Search for test users with filter (&(uid>=user5)(uid=user0)(sn=1)) + 6. Search for test users with filter (&(uid>=user5)(uid>=user6)) + 7. Search for test users with filter (&(uid>=user5)(uid>=user6)(uid>=user7)) + :expectedresults: + 1. There should be 1 user listed i.e. user6 + 2. There should be no users listed + 3. There should be 1 user listed i.e. user6 + 4. There should be no users listed + 5. There should be no users listed + 6. There should be 4 users listed i.e. user6 to user9 + 7. There should be 3 users listed i.e. user7 to user9 + """ + # These all hit shortcut cases. + _check_filter(topology_st_f, '(&(uid>=user5)(uid=user6))', 1, [USER6_DN]) + _check_filter(topology_st_f, '(&(uid>=user5)(uid=user0))', 0, []) + _check_filter(topology_st_f, '(&(uid>=user5)(uid=user6)(sn=6))', 1, [USER6_DN]) + _check_filter(topology_st_f, '(&(uid>=user5)(uid=user0)(sn=0))', 0, []) + _check_filter(topology_st_f, '(&(uid>=user5)(uid=user0)(sn=1))', 0, []) + # These all take 2-way or k-way cases. + _check_filter(topology_st_f, '(&(uid>=user5)(uid>=user6))', 4, [ + USER6_DN, USER7_DN, USER8_DN, USER9_DN, + ]) + _check_filter(topology_st_f, '(&(uid>=user5)(uid>=user6)(uid>=user7))', 3, [ + USER7_DN, USER8_DN, USER9_DN, + ]) + + + +def test_or_range(topology_st_f): + """Test filter logic with range + + :id: bc413e74-667a-48b0-8fbd-e9b7d18a01e4 + :setup: Standalone instance with 20 test users added + from uid=user0 to uid=user20 + :steps: + 1. Search for test users with filter (|(uid>=user5)(uid=user6)) + 2. Search for test users with filter (|(uid>=user5)(uid=user0)) + :expectedresults: + 1. There should be 5 users listed i.e. user5 to user9 + 2. There should be 6 users listed i.e. user5 to user9 and user0 + """ + _check_filter(topology_st_f, '(|(uid>=user5)(uid=user6))', 5, [ + USER5_DN, USER6_DN, USER7_DN, USER8_DN, USER9_DN, + ]) + _check_filter(topology_st_f, '(|(uid>=user5)(uid=user0))', 6, [ + USER0_DN, + USER5_DN, USER6_DN, USER7_DN, USER8_DN, USER9_DN, + ]) + + +def test_and_and_eq(topology_st_f): + """Test filter logic with "AND" and "equal to" operators + + :id: 5c66eb38-d01f-459e-81e4-d335f97211c7 + :setup: Standalone instance with 20 test users added + from uid=user0 to uid=user20 + :steps: + 1. Search for test users with filter (&(&(uid=user0)(sn=0))(cn=user0)) + 2. Search for test users with filter (&(&(uid=user1)(sn=0))(cn=user0)) + 3. Search for test users with filter (&(&(uid=user0)(sn=1))(cn=user0)) + 4. Search for test users with filter (&(&(uid=user0)(sn=0))(cn=user1)) + :expectedresults: + 1. There should be 1 user listed i.e. user0 + 2. There should be no users listed + 3. There should be no users listed + 4. There should be no users listed + """ + _check_filter(topology_st_f, '(&(&(uid=user0)(sn=0))(cn=user0))', 1, [USER0_DN]) + _check_filter(topology_st_f, '(&(&(uid=user1)(sn=0))(cn=user0))', 0, []) + _check_filter(topology_st_f, '(&(&(uid=user0)(sn=1))(cn=user0))', 0, []) + _check_filter(topology_st_f, '(&(&(uid=user0)(sn=0))(cn=user1))', 0, []) + + +def test_or_or_eq(topology_st_f): + """Test filter logic with "AND" and "equal to" operators + + :id: 0cab4bbd-637c-419d-8069-ad5463ecaa75 + :setup: Standalone instance with 20 test users added + from uid=user0 to uid=user20 + :steps: + 1. Search for test users with filter (|(|(uid=user0)(sn=0))(cn=user0)) + 2. Search for test users with filter (|(|(uid=user1)(sn=0))(cn=user0)) + 3. Search for test users with filter (|(|(uid=user0)(sn=1))(cn=user0)) + 4. Search for test users with filter (|(|(uid=user0)(sn=0))(cn=user1)) + 5. Search for test users with filter (|(|(uid=user0)(sn=1))(cn=user2)) + :expectedresults: + 1. There should be 1 user listed i.e. user0 + 2. There should be 2 users listed i.e. user0, user1 + 3. There should be 2 users listed i.e. user0, user1 + 4. There should be 2 users listed i.e. user0, user1 + 5. There should be 3 users listed i.e. user0, user1 and user2 + """ + _check_filter(topology_st_f, '(|(|(uid=user0)(sn=0))(cn=user0))', 1, [USER0_DN]) + _check_filter(topology_st_f, '(|(|(uid=user1)(sn=0))(cn=user0))', 2, [USER0_DN, USER1_DN]) + _check_filter(topology_st_f, '(|(|(uid=user0)(sn=1))(cn=user0))', 2, [USER0_DN, USER1_DN]) + _check_filter(topology_st_f, '(|(|(uid=user0)(sn=0))(cn=user1))', 2, [USER0_DN, USER1_DN]) + _check_filter(topology_st_f, '(|(|(uid=user0)(sn=1))(cn=user2))', 3, [USER0_DN, USER1_DN, USER2_DN]) + + +def test_and_or_eq(topology_st_f): + """Test filter logic with "AND" and "equal to" operators + + :id: 2ce7cc2e-6058-422d-ac3e-e678decf1cc4 + :setup: Standalone instance with 20 test users added + from uid=user0 to uid=user20 + :steps: + 1. Search for test users with filter (&(|(uid=user0)(sn=0))(cn=user0)) + 2. Search for test users with filter (&(|(uid=user1)(sn=0))(cn=user0)) + 3. Search for test users with filter (&(|(uid=user0)(sn=1))(cn=user0)) + 4. Search for test users with filter (&(|(uid=user0)(sn=0))(cn=user1)) + 5. Search for test users with filter (&(|(uid=user0)(sn=1))(cn=*)) + :expectedresults: + 1. There should be 1 user listed i.e. user0 + 2. There should be 1 user listed i.e. user0 + 3. There should be 1 user listed i.e. user0 + 4. There should be no users listed + 5. There should be 2 users listed i.e. user0 and user1 + """ + _check_filter(topology_st_f, '(&(|(uid=user0)(sn=0))(cn=user0))', 1, [USER0_DN]) + _check_filter(topology_st_f, '(&(|(uid=user1)(sn=0))(cn=user0))', 1, [USER0_DN]) + _check_filter(topology_st_f, '(&(|(uid=user0)(sn=1))(cn=user0))', 1, [USER0_DN]) + _check_filter(topology_st_f, '(&(|(uid=user0)(sn=0))(cn=user1))', 0, []) + _check_filter(topology_st_f, '(&(|(uid=user0)(sn=1))(cn=*))', 2, [USER0_DN, USER1_DN]) + + +def test_or_and_eq(topology_st_f): + """Test filter logic with "AND" and "equal to" operators + + :id: ee9fb400-451a-479e-852c-f59b4c937a8d + :setup: Standalone instance with 20 test users added + from uid=user0 to uid=user20 + :steps: + 1. Search for test users with filter (|(&(uid=user0)(sn=0))(uid=user0)) + 2. Search for test users with filter (|(&(uid=user1)(sn=2))(uid=user0)) + 3. Search for test users with filter (|(&(uid=user0)(sn=1))(uid=user0)) + 4. Search for test users with filter (|(&(uid=user1)(sn=1))(uid=user0)) + :expectedresults: + 1. There should be 1 user listed i.e. user0 + 2. There should be 1 user listed i.e. user0 + 3. There should be 1 user listed i.e. user0 + 4. There should be 2 user listed i.e. user0 and user1 + """ + _check_filter(topology_st_f, '(|(&(uid=user0)(sn=0))(uid=user0))', 1, [USER0_DN]) + _check_filter(topology_st_f, '(|(&(uid=user1)(sn=2))(uid=user0))', 1, [USER0_DN]) + _check_filter(topology_st_f, '(|(&(uid=user0)(sn=1))(uid=user0))', 1, [USER0_DN]) + _check_filter(topology_st_f, '(|(&(uid=user1)(sn=1))(uid=user0))', 2, [USER0_DN, USER1_DN]) + + diff --git a/dirsrvtests/tests/suites/filter/filter_match_test.py b/dirsrvtests/tests/suites/filter/filter_match_test.py new file mode 100644 index 0000000..6339a99 --- /dev/null +++ b/dirsrvtests/tests/suites/filter/filter_match_test.py @@ -0,0 +1,776 @@ +# --- BEGIN COPYRIGHT BLOCK --- +# Copyright (C) 2019 Red Hat, Inc. +# All rights reserved. +# +# License: GPL (version 3 or any later version). +# See LICENSE for details. +# --- END COPYRIGHT BLOCK ---- + + +""" +Test the matching rules feature . +""" + +import os +import pytest + +from lib389._constants import DEFAULT_SUFFIX +from lib389.topologies import topology_st +from lib389.cos import CosTemplates +from lib389.schema import Schema + +import ldap + +pytestmark = pytest.mark.tier1 + + +ATTR = ["( 2.16.840.1.113730.3.1.999999.0 NAME 'attroctetStringMatch' " + "DESC 'for testing matching rules' EQUALITY octetStringMatch " + "ORDERING octetStringOrderingMatch SYNTAX 1.3.6.1.4.1.1466.115.121.1.40 " + "X-ORIGIN 'matching rule tests' )", + "( 2.16.840.1.113730.3.1.999999.1 NAME 'attrbitStringMatch' DESC " + "'for testing matching rules' EQUALITY bitStringMatch SYNTAX " + "1.3.6.1.4.1.1466.115.121.1.6 X-ORIGIN 'matching rule tests' )", + "( 2.16.840.1.113730.3.1.999999.2 NAME 'attrcaseExactIA5Match' " + "DESC 'for testing matching rules' EQUALITY caseExactIA5Match " + "SUBSTR caseExactIA5SubstringsMatch SYNTAX 1.3.6.1.4.1.1466.115.121.1.26 " + "X-ORIGIN 'matching rule tests' )", + "( 2.16.840.1.113730.3.1.999999.3 NAME 'attrcaseExactMatch' DESC " + "'for testing matching rules' EQUALITY caseExactMatch ORDERING " + "caseExactOrderingMatch SUBSTR caseExactSubstringsMatch SYNTAX " + "1.3.6.1.4.1.1466.115.121.1.15 " + "X-ORIGIN 'matching rule tests' )", + "( 2.16.840.1.113730.3.1.999999.4 NAME 'attrgeneralizedTimeMatch' DESC " + "'for testing matching rules' EQUALITY generalizedTimeMatch ORDERING " + "generalizedTimeOrderingMatch SYNTAX 1.3.6.1.4.1.1466.115.121.1.24 " + "X-ORIGIN 'matching rule tests' )", + "( 2.16.840.1.113730.3.1.999999.5 NAME 'attrbooleanMatch' DESC " + "'for testing matching rules' EQUALITY booleanMatch SYNTAX " + "1.3.6.1.4.1.1466.115.121.1.7 X-ORIGIN 'matching rule tests' )", + "( 2.16.840.1.113730.3.1.999999.6 NAME 'attrcaseIgnoreIA5Match' DESC " + "'for testing matching rules' EQUALITY caseIgnoreIA5Match SUBSTR " + "caseIgnoreIA5SubstringsMatch SYNTAX 1.3.6.1.4.1.1466.115.121.1.26 " + "X-ORIGIN 'matching rule tests' )", + "( 2.16.840.1.113730.3.1.999999.7 NAME 'attrcaseIgnoreMatch' DESC " + "'for testing matching rules' EQUALITY caseIgnoreMatch ORDERING " + "caseIgnoreOrderingMatch SUBSTR caseIgnoreSubstringsMatch " + "SYNTAX 1.3.6.1.4.1.1466.115.121.1.15 X-ORIGIN 'matching rule tests' )", + "( 2.16.840.1.113730.3.1.999999.8 NAME 'attrcaseIgnoreListMatch' DESC " + "'for testing matching rules' EQUALITY caseIgnoreListMatch SUBSTR " + "caseIgnoreListSubstringsMatch SYNTAX 1.3.6.1.4.1.1466.115.121.1.41 " + "X-ORIGIN 'matching rule tests' )", + "( 2.16.840.1.113730.3.1.999999.9 NAME 'attrobjectIdentifierMatch' DESC " + "'for testing matching rules' EQUALITY objectIdentifierMatch SYNTAX " + "1.3.6.1.4.1.1466.115.121.1.38 X-ORIGIN 'matching rule tests' )", + "( 2.16.840.1.113730.3.1.999999.10 NAME 'attrdistinguishedNameMatch' DESC " + "'for testing matching rules' EQUALITY distinguishedNameMatch SYNTAX " + "1.3.6.1.4.1.1466.115.121.1.12 X-ORIGIN 'matching rule tests' )", + "( 2.16.840.1.113730.3.1.999999.11 NAME 'attrintegerMatch' DESC " + "'for testing matching rules' EQUALITY integerMatch ORDERING " + "integerOrderingMatch SYNTAX 1.3.6.1.4.1.1466.115.121.1.27 " + "X-ORIGIN 'matching rule tests' )", + "( 2.16.840.1.113730.3.1.999999.12 NAME 'attruniqueMemberMatch' DESC " + "'for testing matching rules' EQUALITY uniqueMemberMatch SYNTAX " + "1.3.6.1.4.1.1466.115.121.1.34 X-ORIGIN 'matching rule tests' )", + "( 2.16.840.1.113730.3.1.999999.13 NAME 'attrnumericStringMatch' DESC " + "'for testing matching rules' EQUALITY numericStringMatch ORDERING " + "numericStringOrderingMatch SUBSTR numericStringSubstringsMatch " + "SYNTAX 1.3.6.1.4.1.1466.115.121.1.36 X-ORIGIN 'matching rule tests' )", + "( 2.16.840.1.113730.3.1.999999.14 NAME 'attrtelephoneNumberMatch' DESC " + "'for testing matching rules' EQUALITY telephoneNumberMatch SUBSTR " + "telephoneNumberSubstringsMatch SYNTAX 1.3.6.1.4.1.1466.115.121.1.50 " + "X-ORIGIN 'matching rule tests' )", + "( 2.16.840.1.113730.3.1.999999.15 NAME 'attrdirectoryStringFirstComponentMatch' " + "DESC 'for testing matching rules' EQUALITY directoryStringFirstComponentMatch " + "SYNTAX 1.3.6.1.4.1.1466.115.121.1.15 X-ORIGIN 'matching rule tests' )", + "( 2.16.840.1.113730.3.1.999999.16 NAME 'attrobjectIdentifierFirstComponentMatch' " + "DESC 'for testing matching rules' EQUALITY objectIdentifierFirstComponentMatch " + "SYNTAX 1.3.6.1.4.1.1466.115.121.1.38 X-ORIGIN 'matching rule tests' )", + "( 2.16.840.1.113730.3.1.999999.17 NAME 'attrintegerFirstComponentMatch' " + "DESC 'for testing matching rules' EQUALITY integerFirstComponentMatch SYNTAX " + "1.3.6.1.4.1.1466.115.121.1.27 X-ORIGIN 'matching rule tests' )"] + +TESTED_MATCHING_RULES = ["bitStringMatch", "caseExactIA5Match", "caseExactMatch", + "caseExactOrderingMatch", "caseExactSubstringsMatch", + "caseExactIA5SubstringsMatch", "generalizedTimeMatch", + "generalizedTimeOrderingMatch", "booleanMatch", "caseIgnoreIA5Match", + "caseIgnoreIA5SubstringsMatch", "caseIgnoreMatch", + "caseIgnoreOrderingMatch", "caseIgnoreSubstringsMatch", + "caseIgnoreListMatch", "caseIgnoreListSubstringsMatch", + "objectIdentifierMatch", "directoryStringFirstComponentMatch", + "objectIdentifierFirstComponentMatch", "distinguishedNameMatch", + "integerMatch", "integerOrderingMatch", "integerFirstComponentMatch", + "uniqueMemberMatch", "numericStringMatch", "numericStringOrderingMatch", + "numericStringSubstringsMatch", "telephoneNumberMatch", + "telephoneNumberSubstringsMatch", "octetStringMatch", + "octetStringOrderingMatch"] + + +MATCHING_RULES = [ + {'attr': 'attrbitStringMatch', + 'positive': ["'0010'B", "'0011'B", "'0100'B", "'0101'B", "'0110'B"], + 'negative': ["'0001'B", "'0001'B", "'0010'B", "'0010'B", "'0011'B", + "'0011'B", "'0100'B", "'0100'B", "'0101'B", + "'0101'B", "'0110'B", "'0110'B"]}, + {'attr': 'attrcaseExactIA5Match', + 'positive': ['sPrain', 'spRain', 'sprAin', 'spraIn', 'sprain'], + 'negative': ['Sprain', 'Sprain', 'Sprain', 'Sprain', 'SpRain', + 'SpRain', 'SprAin', 'SprAin', 'SpraIn', 'SpraIn', + 'Sprain', 'Sprain']}, + {'attr': 'attrcaseExactMatch', + 'positive': ['ÇéliNé Ändrè', 'Çéliné ÄndrÈ', 'Çéliné Ändrè', 'çÉliné Ändrè'], + 'negative': ['ÇélIné Ändrè', 'ÇélIné Ändrè', 'ÇéliNé Ändrè', 'ÇéliNé Ändrè', + 'Çéliné ÄndrÈ', 'Çéliné ÄndrÈ', 'Çéliné Ändrè', 'Çéliné Ändrè', + 'çÉliné Ändrè', 'çÉliné Ändrè']}, + {'attr': 'attrgeneralizedTimeMatch', + 'positive': ['20100218171301Z', '20100218171302Z', '20100218171303Z', + '20100218171304Z', '20100218171305Z'], + 'negative': ['20100218171300Z', '20100218171300Z', '20100218171301Z', + '20100218171301Z', '20100218171302Z', '20100218171302Z', + '20100218171303Z', '20100218171303Z', '20100218171304Z', + '20100218171304Z', '20100218171305Z', '20100218171305Z']}, + {'attr': 'attrbooleanMatch', + 'positive': ['FALSE'], + 'negative': ['TRUE', 'TRUE', 'FALSE', 'FALSE']}, + {'attr': 'attrcaseIgnoreIA5Match', + 'positive': ['sprain2', 'sprain3', 'sprain4', 'sprain5', 'sprain6'], + 'negative': ['sprain1', 'sprain1', 'sprain2', 'sprain2', 'sprain3', + 'sprain3', 'sprain4', 'sprain4', 'sprain5', 'sprain5', + 'sprain6', 'sprain6']}, + {'attr': 'attrcaseIgnoreMatch', + 'positive': ['ÇélIné Ändrè2', 'ÇélIné Ändrè3', 'ÇélIné Ändrè4', + 'ÇélIné Ändrè5', 'ÇélIné Ändrè6'], + 'negative': ['ÇélIné Ändrè1', 'ÇélIné Ändrè1', 'ÇélIné Ändrè2', + 'ÇélIné Ändrè2', 'ÇélIné Ändrè3', 'ÇélIné Ändrè3', + 'ÇélIné Ändrè4', 'ÇélIné Ändrè4', 'ÇélIné Ändrè5', + 'ÇélIné Ändrè5', 'ÇélIné Ändrè6', 'ÇélIné Ändrè6']}, + {'attr': 'attrcaseIgnoreListMatch', + 'positive': ['foo2$bar', 'foo3$bar', 'foo4$bar', 'foo5$bar', 'foo6$bar'], + 'negative': ['foo1$bar', 'foo1$bar', 'foo2$bar', 'foo2$bar', 'foo3$bar', + 'foo3$bar', 'foo4$bar', 'foo4$bar', 'foo5$bar', 'foo5$bar', + 'foo6$bar', 'foo6$bar']}, + {'attr': 'attrobjectIdentifierMatch', + 'positive': ['1.3.6.1.4.1.1466.115.121.1.24', '1.3.6.1.4.1.1466.115.121.1.26', + '1.3.6.1.4.1.1466.115.121.1.40', '1.3.6.1.4.1.1466.115.121.1.41', + '1.3.6.1.4.1.1466.115.121.1.6'], + 'negative': ['1.3.6.1.4.1.1466.115.121.1.15', '1.3.6.1.4.1.1466.115.121.1.15', + '1.3.6.1.4.1.1466.115.121.1.24', '1.3.6.1.4.1.1466.115.121.1.24', + '1.3.6.1.4.1.1466.115.121.1.26', '1.3.6.1.4.1.1466.115.121.1.26', + '1.3.6.1.4.1.1466.115.121.1.40', '1.3.6.1.4.1.1466.115.121.1.40', + '1.3.6.1.4.1.1466.115.121.1.41', '1.3.6.1.4.1.1466.115.121.1.41', + '1.3.6.1.4.1.1466.115.121.1.6', '1.3.6.1.4.1.1466.115.121.1.6']}, + {'attr': 'attrdirectoryStringFirstComponentMatch', + 'positive': ['ÇélIné Ändrè2', 'ÇélIné Ändrè3', 'ÇélIné Ändrè4', 'ÇélIné Ändrè5', + 'ÇélIné Ändrè6'], + 'negative': ['ÇélIné Ändrè1', 'ÇélIné Ändrè1', 'ÇélIné Ändrè2', 'ÇélIné Ändrè2', + 'ÇélIné Ändrè3', 'ÇélIné Ändrè3', 'ÇélIné Ändrè4', 'ÇélIné Ändrè4', + 'ÇélIné Ändrè5', 'ÇélIné Ändrè5', 'ÇélIné Ändrè6', 'ÇélIné Ändrè6']}, + {'attr': 'attrobjectIdentifierFirstComponentMatch', + 'positive': ['1.3.6.1.4.1.1466.115.121.1.24', '1.3.6.1.4.1.1466.115.121.1.26', + '1.3.6.1.4.1.1466.115.121.1.40', '1.3.6.1.4.1.1466.115.121.1.41', + '1.3.6.1.4.1.1466.115.121.1.6'], + 'negative': ['1.3.6.1.4.1.1466.115.121.1.15', '1.3.6.1.4.1.1466.115.121.1.15', + '1.3.6.1.4.1.1466.115.121.1.24', '1.3.6.1.4.1.1466.115.121.1.24', + '1.3.6.1.4.1.1466.115.121.1.26', '1.3.6.1.4.1.1466.115.121.1.26', + '1.3.6.1.4.1.1466.115.121.1.40', '1.3.6.1.4.1.1466.115.121.1.40', + '1.3.6.1.4.1.1466.115.121.1.41', '1.3.6.1.4.1.1466.115.121.1.41', + '1.3.6.1.4.1.1466.115.121.1.6', '1.3.6.1.4.1.1466.115.121.1.6']}, + {'attr': 'attrdistinguishedNameMatch', + 'positive': ['cn=foo2,cn=bar', 'cn=foo3,cn=bar', 'cn=foo4,cn=bar', + 'cn=foo5,cn=bar', 'cn=foo6,cn=bar'], + 'negative': ['cn=foo1,cn=bar', 'cn=foo1,cn=bar', 'cn=foo2,cn=bar', + 'cn=foo2,cn=bar', 'cn=foo3,cn=bar', 'cn=foo3,cn=bar', + 'cn=foo4,cn=bar', 'cn=foo4,cn=bar', 'cn=foo5,cn=bar', + 'cn=foo5,cn=bar', 'cn=foo6,cn=bar', 'cn=foo6,cn=bar']}, + {'attr': 'attrintegerMatch', + 'positive': ['-1', '0', '1', '2', '3'], + 'negative': ['-2', '-2', '-1', '-1', '0', '0', '1', '1', '2', '2', '3', '3']}, + {'attr': 'attrintegerFirstComponentMatch', + 'positive': ['-1', '0', '1', '2', '3'], + 'negative': ['-2', '-2', '-1', '-1', '0', '0', '1', '1', '2', '2', '3', '3']}, + {'attr': 'attruniqueMemberMatch', + 'positive': ["cn=foo2,cn=bar#'0010'B", "cn=foo3,cn=bar#'0011'B", + "cn=foo4,cn=bar#'0100'B", "cn=foo5,cn=bar#'0101'B", + "cn=foo6,cn=bar#'0110'B"], + 'negative': ["cn=foo1,cn=bar#'0001'B", "cn=foo1,cn=bar#'0001'B", + "cn=foo2,cn=bar#'0010'B", "cn=foo2,cn=bar#'0010'B", + "cn=foo3,cn=bar#'0011'B", "cn=foo3,cn=bar#'0011'B", + "cn=foo4,cn=bar#'0100'B", "cn=foo4,cn=bar#'0100'B", + "cn=foo5,cn=bar#'0101'B", "cn=foo5,cn=bar#'0101'B", + "cn=foo6,cn=bar#'0110'B", "cn=foo6,cn=bar#'0110'B"]}, + {'attr': 'attrnumericStringMatch', + 'positive': ['00002', '00003', '00004', '00005', '00006'], + 'negative': ['00001', '00001', '00002', '00002', '00003', '00003', '00004', + '00004', '00005', '00005', '00006', '00006']}, + {'attr': 'attrtelephoneNumberMatch', + 'positive': ['+1 408 555 5625', '+1 408 555 6201', '+1 408 555 8585', + '+1 408 555 9187', '+1 408 555 9423'], + 'negative': ['+1 408 555 4798', '+1 408 555 4798', '+1 408 555 5625', + '+1 408 555 5625', '+1 408 555 6201', '+1 408 555 6201', + '+1 408 555 8585', '+1 408 555 8585', '+1 408 555 9187', + '+1 408 555 9187', '+1 408 555 9423', '+1 408 555 9423']}, + {'attr': 'attroctetStringMatch', + 'positive': ['AAAAAAAAAAAAAAI=', 'AAAAAAAAAAAAAAM=', 'AAAAAAAAAAAAAAQ=', + 'AAAAAAAAAAAAAAU=', 'AAAAAAAAAAAAAAY='], + 'negative': ['AAAAAAAAAAAAAAE=', 'AAAAAAAAAAAAAAE=', 'AAAAAAAAAAAAAAI=', + 'AAAAAAAAAAAAAAI=', 'AAAAAAAAAAAAAAM=', 'AAAAAAAAAAAAAAM=', + 'AAAAAAAAAAAAAAQ=', 'AAAAAAAAAAAAAAQ=', 'AAAAAAAAAAAAAAU=', + 'AAAAAAAAAAAAAAU=', 'AAAAAAAAAAAAAAY=', 'AAAAAAAAAAAAAAY=']}] + + +MATCHING_MODES = [ + {'attr': 'attrbitStringMatch', + 'positive': ["'0001'B"], + 'negative': ["'0001'B", "'0010'B", "'0011'B", "'0100'B", "'0101'B", "'0110'B"]}, + {'attr': 'attrcaseExactIA5Match', + 'positive': 'Sprain', + 'negative': ['Sprain', 'sPrain', 'spRain', 'sprAin', 'spraIn', 'sprain']}, + {'attr': 'attrcaseExactMatch', + 'positive': 'ÇélIné Ändrè', + 'negative': ['ÇélIné Ändrè', 'ÇéliNé Ändrè', 'Çéliné ÄndrÈ', 'Çéliné Ändrè', 'çÉliné Ändrè']}, + {'attr': 'attrgeneralizedTimeMatch', + 'positive': '20100218171300Z', + 'negative': ['20100218171300Z', '20100218171301Z', '20100218171302Z', + '20100218171303Z', '20100218171304Z', '20100218171305Z']}, + {'attr': 'attrbooleanMatch', + 'positive': 'TRUE', + 'negative': ['TRUE', 'FALSE']}, + {'attr': 'attrcaseIgnoreIA5Match', + 'positive': 'sprain1', + 'negative': ['sprain1', 'sprain2', 'sprain3', 'sprain4', 'sprain5', 'sprain6']}, + {'attr': 'attrcaseIgnoreMatch', + 'positive': 'ÇélIné Ändrè1', + 'negative': ['ÇélIné Ändrè1', 'ÇélIné Ändrè2', 'ÇélIné Ändrè3', 'ÇélIné Ändrè4', + 'ÇélIné Ändrè5', 'ÇélIné Ändrè6']}, + {'attr': 'attrcaseIgnoreListMatch', + 'positive': 'foo1$bar', + 'negative': ['foo1$bar', 'foo2$bar', 'foo3$bar', 'foo4$bar', 'foo5$bar', 'foo6$bar']}, + {'attr': 'attrobjectIdentifierMatch', + 'positive': '1.3.6.1.4.1.1466.115.121.1.15', + 'negative': ['1.3.6.1.4.1.1466.115.121.1.15', '1.3.6.1.4.1.1466.115.121.1.24', + '1.3.6.1.4.1.1466.115.121.1.26', '1.3.6.1.4.1.1466.115.121.1.40', + '1.3.6.1.4.1.1466.115.121.1.41', '1.3.6.1.4.1.1466.115.121.1.6']}, + {'attr': 'attrdirectoryStringFirstComponentMatch', + 'positive': 'ÇélIné Ändrè1', + 'negative': ['ÇélIné Ändrè1', 'ÇélIné Ändrè2', 'ÇélIné Ändrè3', 'ÇélIné Ändrè4', + 'ÇélIné Ändrè5', 'ÇélIné Ändrè6']}, + {'attr': 'attrobjectIdentifierFirstComponentMatch', + 'positive': '1.3.6.1.4.1.1466.115.121.1.15', + 'negative': ['1.3.6.1.4.1.1466.115.121.1.15', '1.3.6.1.4.1.1466.115.121.1.24', + '1.3.6.1.4.1.1466.115.121.1.26', '1.3.6.1.4.1.1466.115.121.1.40', + '1.3.6.1.4.1.1466.115.121.1.41', '1.3.6.1.4.1.1466.115.121.1.6']}, + {'attr': 'attrdistinguishedNameMatch', + 'positive': 'cn=foo1,cn=bar', + 'negative': ['cn=foo1,cn=bar', 'cn=foo2,cn=bar', 'cn=foo3,cn=bar', 'cn=foo4,cn=bar', + 'cn=foo5,cn=bar', 'cn=foo6,cn=bar']}, + {'attr': 'attrintegerMatch', + 'positive': '-2', + 'negative': ['-2', '-1', '0', '1', '2', '3']}, + {'attr': 'attrintegerFirstComponentMatch', + 'positive': '-2', + 'negative': ['-2', '-1', '0', '1', '2', '3']}, + {'attr': 'attruniqueMemberMatch', + 'positive': "cn=foo1,cn=bar#'0001'B", + 'negative': ["cn=foo1,cn=bar#'0001'B", "cn=foo2,cn=bar#'0010'B", "cn=foo3,cn=bar#'0011'B", + "cn=foo4,cn=bar#'0100'B", "cn=foo5,cn=bar#'0101'B", "cn=foo6,cn=bar#'0110'B"]}, + {'attr': 'attrnumericStringMatch', + 'positive': '00001', + 'negative': ['00001', '00002', '00003', '00004', '00005', '00006']}, + {'attr': 'attrtelephoneNumberMatch', + 'positive': '+1 408 555 4798', + 'negative': ['+1 408 555 4798', '+1 408 555 5625', '+1 408 555 6201', '+1 408 555 8585', + '+1 408 555 9187', '+1 408 555 9423']}, + {'attr': 'attroctetStringMatch', + 'positive': 'AAAAAAAAAAAAAAE=', + 'negative': ['AAAAAAAAAAAAAAE=', 'AAAAAAAAAAAAAAI=', 'AAAAAAAAAAAAAAM=', 'AAAAAAAAAAAAAAQ=', + 'AAAAAAAAAAAAAAU=', 'AAAAAAAAAAAAAAY=']}] + +MODE_REPLACE = [ + {'attr': 'attrbitStringMatch', + 'positive': ["'0001'B", "'0010'B", "'0011'B", "'0100'B", "'0101'B", "'0110'B"], + 'negative': ["'0001'B", "'0001'B", "'0010'B", "'0010'B", "'0011'B", "'0011'B", + "'0100'B", "'0100'B", "'0101'B", "'0101'B", "'0110'B", "'0110'B"]}, + {'attr': 'attrcaseExactIA5Match', + 'positive': ['Sprain', 'sPrain', 'spRain', 'sprAin', 'spraIn', 'sprain'], + 'negative': ['Sprain', 'Sprain', 'sPrain', 'sPrain', 'spRain', 'spRain', + 'sprAin', 'sprAin', 'spraIn', 'spraIn', 'sprain', 'sprain']}, + {'attr': 'attrcaseExactMatch', + 'positive': ['ÇélIné Ändrè', 'ÇéliNé Ändrè', 'Çéliné ÄndrÈ', 'Çéliné Ändrè', + 'çÉliné Ändrè'], + 'negative': ['ÇélIné Ändrè', 'ÇélIné Ändrè', 'ÇéliNé Ändrè', 'ÇéliNé Ändrè', + 'Çéliné ÄndrÈ', 'Çéliné ÄndrÈ', 'Çéliné Ändrè', 'Çéliné Ändrè', + 'çÉliné Ändrè', 'çÉliné Ändrè']}, + {'attr': 'attrgeneralizedTimeMatch', + 'positive': ['20100218171300Z', '20100218171301Z', '20100218171302Z', '20100218171303Z', + '20100218171304Z', '20100218171305Z'], + 'negative': ['20100218171300Z', '20100218171300Z', '20100218171301Z', '20100218171301Z', + '20100218171302Z', '20100218171302Z', '20100218171303Z', '20100218171303Z', + '20100218171304Z', '20100218171304Z', '20100218171305Z', '20100218171305Z']}, + {'attr': 'attrbooleanMatch', + 'positive': ['TRUE', 'FALSE'], + 'negative': ['TRUE', 'TRUE', 'FALSE', 'FALSE']}, + {'attr': 'attrcaseIgnoreIA5Match', + 'positive': ['sprain1', 'sprain2', 'sprain3', 'sprain4', 'sprain5', 'sprain6'], + 'negative': ['sprain1', 'sprain1', 'sprain2', 'sprain2', 'sprain3', 'sprain3', + 'sprain4', 'sprain4', 'sprain5', 'sprain5', 'sprain6', 'sprain6']}, + {'attr': 'attrcaseIgnoreMatch', + 'positive': ['ÇélIné Ändrè1', 'ÇélIné Ändrè2', 'ÇélIné Ändrè3', 'ÇélIné Ändrè4', + 'ÇélIné Ändrè5', 'ÇélIné Ändrè6'], + 'negative': ['ÇélIné Ändrè1', 'ÇélIné Ändrè1', 'ÇélIné Ändrè2', 'ÇélIné Ändrè2', + 'ÇélIné Ändrè3', 'ÇélIné Ändrè3', 'ÇélIné Ändrè4', 'ÇélIné Ändrè4', + 'ÇélIné Ändrè5', 'ÇélIné Ändrè5', 'ÇélIné Ändrè6', 'ÇélIné Ändrè6']}, + {'attr': 'attrcaseIgnoreListMatch', + 'positive': ['foo1$bar', 'foo2$bar', 'foo3$bar', 'foo4$bar', 'foo5$bar', 'foo6$bar'], + 'negative': ['foo1$bar', 'foo1$bar', 'foo2$bar', 'foo2$bar', 'foo3$bar', 'foo3$bar', + 'foo4$bar', 'foo4$bar', 'foo5$bar', 'foo5$bar', 'foo6$bar', 'foo6$bar']}, + {'attr': 'attrobjectIdentifierFirstComponentMatch', + 'positive': ['1.3.6.1.4.1.1466.115.121.1.15', '1.3.6.1.4.1.1466.115.121.1.24', + '1.3.6.1.4.1.1466.115.121.1.26', '1.3.6.1.4.1.1466.115.121.1.40', + '1.3.6.1.4.1.1466.115.121.1.41', '1.3.6.1.4.1.1466.115.121.1.6'], + 'negative': ['1.3.6.1.4.1.1466.115.121.1.15', '1.3.6.1.4.1.1466.115.121.1.15', + '1.3.6.1.4.1.1466.115.121.1.24', '1.3.6.1.4.1.1466.115.121.1.24', + '1.3.6.1.4.1.1466.115.121.1.26', '1.3.6.1.4.1.1466.115.121.1.26', + '1.3.6.1.4.1.1466.115.121.1.40', '1.3.6.1.4.1.1466.115.121.1.40', + '1.3.6.1.4.1.1466.115.121.1.41', '1.3.6.1.4.1.1466.115.121.1.41', + '1.3.6.1.4.1.1466.115.121.1.6', '1.3.6.1.4.1.1466.115.121.1.6']}, + {'attr': 'attrdistinguishedNameMatch', + 'positive': ['cn=foo1,cn=bar', 'cn=foo2,cn=bar', 'cn=foo3,cn=bar', 'cn=foo4,cn=bar', + 'cn=foo5,cn=bar', 'cn=foo6,cn=bar'], + 'negative': ['cn=foo1,cn=bar', 'cn=foo1,cn=bar', 'cn=foo2,cn=bar', 'cn=foo2,cn=bar', + 'cn=foo3,cn=bar', 'cn=foo3,cn=bar', 'cn=foo4,cn=bar', 'cn=foo4,cn=bar', + 'cn=foo5,cn=bar', 'cn=foo5,cn=bar', 'cn=foo6,cn=bar', 'cn=foo6,cn=bar']}, + {'attr': 'attrintegerMatch', + 'positive': ['-2', '-1', '0', '1', '2', '3'], + 'negative': ['-2', '-2', '-1', '-1', '0', '0', '1', '1', '2', '2', '3', '3']}, + {'attr': 'attrintegerFirstComponentMatch', + 'positive': ['-2', '-1', '0', '1', '2', '3'], + 'negative': ['-2', '-2', '-1', '-1', '0', '0', '1', '1', '2', '2', '3', '3']}, + {'attr': 'attruniqueMemberMatch', + 'positive': ["cn=foo1,cn=bar#'0001'B", "cn=foo2,cn=bar#'0010'B", "cn=foo3,cn=bar#'0011'B", + "cn=foo4,cn=bar#'0100'B", "cn=foo5,cn=bar#'0101'B", "cn=foo6,cn=bar#'0110'B"], + 'negative': ["cn=foo1,cn=bar#'0001'B", "cn=foo1,cn=bar#'0001'B", "cn=foo2,cn=bar#'0010'B", + "cn=foo2,cn=bar#'0010'B", "cn=foo3,cn=bar#'0011'B", "cn=foo3,cn=bar#'0011'B", + "cn=foo4,cn=bar#'0100'B", "cn=foo4,cn=bar#'0100'B", "cn=foo5,cn=bar#'0101'B", + "cn=foo5,cn=bar#'0101'B", "cn=foo6,cn=bar#'0110'B", "cn=foo6,cn=bar#'0110'B"]}, + {'attr': 'attrnumericStringMatch', + 'positive': ['00001', '00002', '00003', '00004', '00005', '00006'], + 'negative': ['00001', '00001', '00002', '00002', '00003', '00003', '00004', '00004', '00005', + '00005', '00006', '00006']}, + {'attr': 'attrtelephoneNumberMatch', + 'positive': ['+1 408 555 4798', '+1 408 555 5625', '+1 408 555 6201', '+1 408 555 8585', + '+1 408 555 9187', '+1 408 555 9423'], + 'negative': ['+1 408 555 4798', '+1 408 555 4798', '+1 408 555 5625', '+1 408 555 5625', + '+1 408 555 6201', '+1 408 555 6201', '+1 408 555 8585', '+1 408 555 8585', + '+1 408 555 9187', '+1 408 555 9187', '+1 408 555 9423', '+1 408 555 9423']}, + {'attr': 'attroctetStringMatch', + 'positive': ['AAAAAAAAAAAAAAE=', 'AAAAAAAAAAAAAAI=', 'AAAAAAAAAAAAAAM=', 'AAAAAAAAAAAAAAQ=', + 'AAAAAAAAAAAAAAU=', 'AAAAAAAAAAAAAAY='], + 'negative': ['AAAAAAAAAAAAAAE=', 'AAAAAAAAAAAAAAE=', 'AAAAAAAAAAAAAAI=', 'AAAAAAAAAAAAAAI=', + 'AAAAAAAAAAAAAAM=', 'AAAAAAAAAAAAAAM=', 'AAAAAAAAAAAAAAQ=', 'AAAAAAAAAAAAAAQ=', + 'AAAAAAAAAAAAAAU=', 'AAAAAAAAAAAAAAU=', 'AAAAAAAAAAAAAAY=', 'AAAAAAAAAAAAAAY=']}, + {'attr': 'attrobjectIdentifierMatch', + 'positive': ['1.3.6.1.4.1.1466.115.121.1.15', '1.3.6.1.4.1.1466.115.121.1.24', + '1.3.6.1.4.1.1466.115.121.1.26', '1.3.6.1.4.1.1466.115.121.1.40', + '1.3.6.1.4.1.1466.115.121.1.41', '1.3.6.1.4.1.1466.115.121.1.6'], + 'negative': ['1.3.6.1.4.1.1466.115.121.1.15', '1.3.6.1.4.1.1466.115.121.1.15', + '1.3.6.1.4.1.1466.115.121.1.24', '1.3.6.1.4.1.1466.115.121.1.24', + '1.3.6.1.4.1.1466.115.121.1.26', '1.3.6.1.4.1.1466.115.121.1.26', + '1.3.6.1.4.1.1466.115.121.1.40', '1.3.6.1.4.1.1466.115.121.1.40', + '1.3.6.1.4.1.1466.115.121.1.41', '1.3.6.1.4.1.1466.115.121.1.41', + '1.3.6.1.4.1.1466.115.121.1.6', '1.3.6.1.4.1.1466.115.121.1.6']}, + {'attr': 'attrdirectoryStringFirstComponentMatch', + 'positive': ['ÇélIné Ändrè1', 'ÇélIné Ändrè2', 'ÇélIné Ändrè3', 'ÇélIné Ändrè4', + 'ÇélIné Ändrè5', 'ÇélIné Ändrè6'], + 'negative': ['ÇélIné Ändrè1', 'ÇélIné Ändrè1', 'ÇélIné Ändrè2', 'ÇélIné Ändrè2', + 'ÇélIné Ändrè3', 'ÇélIné Ändrè3', 'ÇélIné Ändrè4', 'ÇélIné Ändrè4', + 'ÇélIné Ändrè5', 'ÇélIné Ändrè5', 'ÇélIné Ändrè6', 'ÇélIné Ändrè6']}] + + +LIST_ATTR = [ + ('entryoctetStringMatch0', 'AAAAAAAAAAAAAAE='), + ('entryoctetStringMatch1', 'AAAAAAAAAAAAAAI='), + ('entryoctetStringMatch2', 'AAAAAAAAAAAAAAM='), + ('entryoctetStringMatch3', 'AAAAAAAAAAAAAAQ='), + ('entryoctetStringMatch4', 'AAAAAAAAAAAAAAU='), + ('entryoctetStringMatch5', 'AAAAAAAAAAAAAAY='), + ('entrybitStringMatch0', "'0001'B"), + ('entrybitStringMatch1', "'0010'B"), + ('entrybitStringMatch2', "'0011'B"), + ('entrybitStringMatch3', "'0100'B"), + ('entrybitStringMatch4', "'0101'B"), + ('entrybitStringMatch5', "'0110'B"), + ('entrycaseExactIA5Match0', "Sprain"), + ('entrycaseExactIA5Match1', "sPrain"), + ('entrycaseExactIA5Match2', "spRain"), + ('entrycaseExactIA5Match3', "sprAin"), + ('entrycaseExactIA5Match4', "spraIn"), + ('entrycaseExactIA5Match5', "sprain"), + ('entrycaseExactMatch0', "ÇélIné Ändrè"), + ('entrycaseExactMatch1', "ÇéliNé Ändrè"), + ('entrycaseExactMatch2', "Çéliné ÄndrÈ"), + ('entrycaseExactMatch3', "Çéliné Ändrè"), + ('entrycaseExactMatch4', "çÉliné Ändrè"), + ('entrygeneralizedTimeMatch0', "20100218171300Z"), + ('entrygeneralizedTimeMatch1', "20100218171301Z"), + ('entrygeneralizedTimeMatch2', "20100218171302Z"), + ('entrygeneralizedTimeMatch3', "20100218171303Z"), + ('entrygeneralizedTimeMatch4', "20100218171304Z"), + ('entrygeneralizedTimeMatch5', "20100218171305Z"), + ('entrybooleanMatch0', "TRUE"), + ('entrybooleanMatch1', "FALSE"), + ('entrycaseIgnoreIA5Match0', "sprain1"), + ('entrycaseIgnoreIA5Match1', "sprain2"), + ('entrycaseIgnoreIA5Match2', "sprain3"), + ('entrycaseIgnoreIA5Match3', "sprain4"), + ('entrycaseIgnoreIA5Match4', "sprain5"), + ('entrycaseIgnoreIA5Match5', "sprain6"), + ('entrycaseIgnoreMatch0', "ÇélIné Ändrè1"), + ('entrycaseIgnoreMatch1', "ÇélIné Ändrè2"), + ('entrycaseIgnoreMatch2', "ÇélIné Ändrè3"), + ('entrycaseIgnoreMatch3', "ÇélIné Ändrè4"), + ('entrycaseIgnoreMatch4', "ÇélIné Ändrè5"), + ('entrycaseIgnoreMatch5', "ÇélIné Ändrè6"), + ('entrycaseIgnoreListMatch0', "foo1$bar"), + ('entrycaseIgnoreListMatch1', "foo2$bar"), + ('entrycaseIgnoreListMatch2', "foo3$bar"), + ('entrycaseIgnoreListMatch3', "foo4$bar"), + ('entrycaseIgnoreListMatch4', "foo5$bar"), + ('entrycaseIgnoreListMatch5', "foo6$bar"), + ('entryobjectIdentifierMatch0', "1.3.6.1.4.1.1466.115.121.1.15"), + ('entryobjectIdentifierMatch1', "1.3.6.1.4.1.1466.115.121.1.24"), + ('entryobjectIdentifierMatch2', "1.3.6.1.4.1.1466.115.121.1.26"), + ('entryobjectIdentifierMatch3', "1.3.6.1.4.1.1466.115.121.1.40"), + ('entryobjectIdentifierMatch4', "1.3.6.1.4.1.1466.115.121.1.41"), + ('entryobjectIdentifierMatch5', "1.3.6.1.4.1.1466.115.121.1.6"), + ('entrydistinguishedNameMatch0', "cn=foo1,cn=bar"), + ('entrydistinguishedNameMatch1', "cn=foo2,cn=bar"), + ('entrydistinguishedNameMatch2', "cn=foo3,cn=bar"), + ('entrydistinguishedNameMatch3', "cn=foo4,cn=bar"), + ('entrydistinguishedNameMatch4', "cn=foo5,cn=bar"), + ('entrydistinguishedNameMatch5', "cn=foo6,cn=bar"), + ('entryintegerMatch0', "-2"), + ('entryintegerMatch1', "-1"), + ('entryintegerMatch2', "0"), + ('entryintegerMatch3', "1"), + ('entryintegerMatch4', "2"), + ('entryintegerMatch5', "3"), + ('entryuniqueMemberMatch0', "cn=foo1,cn=bar#'0001'B"), + ('entryuniqueMemberMatch1', "cn=foo2,cn=bar#'0010'B"), + ('entryuniqueMemberMatch2', "cn=foo3,cn=bar#'0011'B"), + ('entryuniqueMemberMatch3', "cn=foo4,cn=bar#'0100'B"), + ('entryuniqueMemberMatch4', "cn=foo5,cn=bar#'0101'B"), + ('entryuniqueMemberMatch5', "cn=foo6,cn=bar#'0110'B"), + ('entrynumericStringMatch0', "00001"), + ('entrynumericStringMatch1', "00002"), + ('entrynumericStringMatch2', "00003"), + ('entrynumericStringMatch3', "00004"), + ('entrynumericStringMatch4', "00005"), + ('entrynumericStringMatch5', "00006"), + ('entrytelephoneNumberMatch0', "+1 408 555 4798"), + ('entrytelephoneNumberMatch1', "+1 408 555 5625"), + ('entrytelephoneNumberMatch2', "+1 408 555 6201"), + ('entrytelephoneNumberMatch3', "+1 408 555 8585"), + ('entrytelephoneNumberMatch4', "+1 408 555 9187"), + ('entrytelephoneNumberMatch5', "+1 408 555 9423"), + ('entrydirectoryStringFirstComponentMatch0', "ÇélIné Ändrè1"), + ('entrydirectoryStringFirstComponentMatch1', "ÇélIné Ändrè2"), + ('entrydirectoryStringFirstComponentMatch2', "ÇélIné Ändrè3"), + ('entrydirectoryStringFirstComponentMatch3', "ÇélIné Ändrè4"), + ('entrydirectoryStringFirstComponentMatch4', "ÇélIné Ändrè5"), + ('entrydirectoryStringFirstComponentMatch5', "ÇélIné Ändrè6"), + ('entryobjectIdentifierFirstComponentMatch0', "1.3.6.1.4.1.1466.115.121.1.15"), + ('entryobjectIdentifierFirstComponentMatch1', "1.3.6.1.4.1.1466.115.121.1.24"), + ('entryobjectIdentifierFirstComponentMatch2', "1.3.6.1.4.1.1466.115.121.1.26"), + ('entryobjectIdentifierFirstComponentMatch3', "1.3.6.1.4.1.1466.115.121.1.40"), + ('entryobjectIdentifierFirstComponentMatch4', "1.3.6.1.4.1.1466.115.121.1.41"), + ('entryobjectIdentifierFirstComponentMatch5', "1.3.6.1.4.1.1466.115.121.1.6"), + ('entryintegerFirstComponentMatch0', "-2"), + ('entryintegerFirstComponentMatch1', "-1"), + ('entryintegerFirstComponentMatch2', "0"), + ('entryintegerFirstComponentMatch3', "1"), + ('entryintegerFirstComponentMatch4', "2"), + ('entryintegerFirstComponentMatch5', "3")] + + +POSITIVE_NEGATIVE_VALUES = [ + ["(attrbitStringMatch='0001'B)", 1, + "(attrbitStringMatch:bitStringMatch:='000100000'B)"], + ["(attrgeneralizedTimeMatch=20100218171300Z)", 1, + "(attrcaseExactIA5Match=SPRAIN)"], + ["(attrcaseExactMatch>=ÇélIné Ändrè)", 5, + "(attrcaseExactMatch=ÇéLINé ÄNDRè)"], + ["(attrcaseExactMatch:caseExactMatch:=ÇélIné Ändrè)", 1, + "(attrcaseExactMatch>=çéliné ändrè)"], + ["(attrcaseExactIA5Match=Sprain)", 1, + "(attrgeneralizedTimeMatch=20300218171300Z)"], + ["(attrbooleanMatch=TRUE)", 1, + "(attrgeneralizedTimeMatch>=20300218171300Z)"], + ["(attrcaseIgnoreIA5Match=sprain1)", 1, + "(attrcaseIgnoreIA5Match=sprain9999)"], + ["(attrcaseIgnoreMatch=ÇélIné Ändrè1)", 1, + "(attrcaseIgnoreMatch=ÇélIné Ändrè9999)"], + ["(attrcaseIgnoreMatch>=ÇélIné Ändrè1)", 6, + "(attrcaseIgnoreMatch>=ÇélIné Ändrè9999)"], + ["(attrcaseIgnoreListMatch=foo1$bar)", 1, + "(attrcaseIgnoreListMatch=foo1$bar$baz$biff)"], + ["(attrobjectIdentifierMatch=1.3.6.1.4.1.1466.115.121.1.15)", 1, + "(attrobjectIdentifierMatch=1.3.6.1.4.1.1466.115.121.1.15.99999)"], + ["(attrgeneralizedTimeMatch>=20100218171300Z)", 6, + "(attroctetStringMatch>=AAAAAAAAAAABAQQ=)"], + ["(attrdirectoryStringFirstComponentMatch=ÇélIné Ändrè1)", 1, + "(attrdirectoryStringFirstComponentMatch=ÇélIné Ändrè9999)"], + ["(attrobjectIdentifierFirstComponentMatch=1.3.6.1.4.1.1466.115.121.1.15)", 1, + "(attrobjectIdentifierFirstComponentMatch=1.3.6.1.4.1.1466.115.121.1.15.99999)"], + ["(attrdistinguishedNameMatch=cn=foo1,cn=bar)", 1, + "(attrdistinguishedNameMatch=cn=foo1,cn=bar,cn=baz)"], + ["(attrintegerMatch=-2)", 1, + "(attrintegerMatch=-20)"], + ["(attrintegerMatch>=-2)", 6, + "(attrintegerMatch>=20)"], + ["(attrintegerFirstComponentMatch=-2)", 1, + "(attrintegerFirstComponentMatch=-20)"], + ["(attruniqueMemberMatch=cn=foo1,cn=bar#'0001'B)", 1, + "(attruniqueMemberMatch=cn=foo1,cn=bar#'00010000'B)"], + ["(attrnumericStringMatch=00001)", 1, + "(attrnumericStringMatch=000000001)"], + ["(attrnumericStringMatch>=00001)", 6, + "(attrnumericStringMatch>=01)"], + ["(attrtelephoneNumberMatch=+1 408 555 4798)", 1, + "(attrtelephoneNumberMatch=+2 408 555 4798)"], + ["(attroctetStringMatch=AAAAAAAAAAAAAAE=)", 1, + "(attroctetStringMatch=AAAAAAAAAAAAAAEB)"], + ["(attroctetStringMatch>=AAAAAAAAAAAAAAE=)", 6, + "(attroctetStringMatch>=AAAAAAAAAAABAQE=)"]] + + +LIST_EXT = [("(attrbitStringMatch:bitStringMatch:='0001'B)", 1), + ("(attrcaseExactIA5Match:caseExactIA5Match:=Sprain)", 1), + ("(attrcaseExactMatch:caseExactMatch:=ÇélIné Ändrè)", 1), + ("(attrcaseExactMatch:caseExactOrderingMatch:=ÇélIné Ändrè)", 5), + ("(attrgeneralizedTimeMatch:generalizedTimeMatch:=20100218171300Z)", 1), + ("(attrgeneralizedTimeMatch:generalizedTimeOrderingMatch:=20100218171300Z)", 6), + ("(attrbooleanMatch:booleanMatch:=TRUE)", 1), + ("(attrcaseIgnoreIA5Match:caseIgnoreIA5Match:=sprain1)", 1), + ("(attrcaseIgnoreMatch:caseIgnoreMatch:=ÇélIné Ändrè1)", 1), + ("(attrcaseIgnoreMatch:caseIgnoreOrderingMatch:=ÇélIné Ändrè1)", 6), + ("(attrcaseIgnoreListMatch:caseIgnoreListMatch:=foo1$bar)", 1), + ("(attrobjectIdentifierMatch:objectIdentifierMatch:=1.3.6.1.4.1.1466.115.121.1.15)", 1), + ("(attrdirectoryStringFirstComponentMatch:directory" + "StringFirstComponentMatch:=ÇélIné Ändrè1)", 1), + ("(attrobjectIdentifierFirstComponentMatch:objectIdentifier" + "FirstComponentMatch:=1.3.6.1.4.1.1466.115.121.1.15)", 1), + ("(attrdistinguishedNameMatch:distinguishedNameMatch:=cn=foo1,cn=bar)", 1), + ("(attrintegerMatch:integerMatch:=-2)", 1), + ("(attrintegerMatch:integerOrderingMatch:=-2)", 6), + ("(attrintegerFirstComponentMatch:integerFirstComponentMatch:=-2)", 1), + ("(attruniqueMemberMatch:uniqueMemberMatch:=cn=foo1,cn=bar#'0001'B)", 1), + ("(attrnumericStringMatch:numericStringMatch:=00001)", 1), + ("(attrnumericStringMatch:numericStringMatch:=00001)", 1), + ("(attrtelephoneNumberMatch:telephoneNumberMatch:=+1 408 555 4798)", 1), + ("(attroctetStringMatch:octetStringMatch:=AAAAAAAAAAAAAAE=)", 1), + ("(attroctetStringMatch:octetStringOrderingMatch:=AAAAAAAAAAAAAAE=)", 6), + ("(attrcaseExactMatch=*ÇélIné Ändrè*)", 1), + ("(attrcaseExactMatch=ÇélIné Ändrè*)", 1), + ("(attrcaseExactMatch=*ÇélIné Ändrè)", 1), + ("(attrcaseExactMatch=*é Ä*)", 5), + ("(attrcaseExactIA5Match=*Sprain*)", 1), + ("(attrcaseExactIA5Match=Sprain*)", 1), + ("(attrcaseExactIA5Match=*Sprain)", 1), + ("(attrcaseExactIA5Match=*rai*)", 3), + ("(attrcaseIgnoreIA5Match=*sprain1*)", 1), + ("(attrcaseIgnoreIA5Match=sprain1*)", 1), + ("(attrcaseIgnoreIA5Match=*sprain1)", 1), + ("(attrcaseIgnoreIA5Match=*rai*)", 6), + ("(attrcaseIgnoreMatch=*ÇélIné Ändrè1*)", 1), + ("(attrcaseIgnoreMatch=ÇélIné Ändrè1*)", 1), + ("(attrcaseIgnoreMatch=*ÇélIné Ändrè1)", 1), + ("(attrcaseIgnoreMatch=*é Ä*)", 6), + ("(attrcaseIgnoreListMatch=*foo1$bar*)", 1), + ("(attrcaseIgnoreListMatch=foo1$bar*)", 1), + ("(attrcaseIgnoreListMatch=*foo1$bar)", 1), + ("(attrcaseIgnoreListMatch=*1$b*)", 1), + ("(attrnumericStringMatch=*00001*)", 1), + ("(attrnumericStringMatch=00001*)", 1), + ("(attrnumericStringMatch=*00001)", 1), + ("(attrnumericStringMatch=*000*)", 6), + ("(attrtelephoneNumberMatch=*+1 408 555 4798*)", 1), + ("(attrtelephoneNumberMatch=+1 408 555 4798*)", 1), + ("(attrtelephoneNumberMatch=*+1 408 555 4798)", 1), + ("(attrtelephoneNumberMatch=* 55*)", 6)] + + +def test_matching_rules(topology_st): + """Test matching rules. + :id: 8cb6e62a-8cfc-11e9-be9a-8c16451d917b + :setup: Standalone + :steps: + 1. Search for matching rule. + 2. Matching rule should be there in schema. + :expected results: + 1. Pass + 2. Pass + """ + matchingrules = Schema(topology_st.standalone).get_matchingrules() + assert matchingrules + rules = set(matchingrule.names for matchingrule in matchingrules) + rules1 = [role[0] for role in rules if len(role) != 0] + for rule in TESTED_MATCHING_RULES: + assert rule in rules1 + + +def test_add_attribute_types(topology_st): + """Test add attribute types to schema + :id: 84d6dece-8cfc-11e9-89a3-8c16451d917b + :setup: Standalone + :steps: + 1. Add new attribute types to schema. + :expected results: + 1. Pass + """ + for attribute in ATTR: + Schema(topology_st.standalone).add('attributetypes', attribute) + + +@pytest.mark.parametrize("rule", MATCHING_RULES) +def test_valid_invalid_attributes(topology_st, rule): + """Delete duplicate attributes + + :id: d0bf3942-ba71-4947-90c8-1bfa9f0b838f + :parametrized: yes + :setup: Standalone + :steps: + 1. Create entry with an attribute that uses that matching rule + 2. Delete existing entry + 3. Create entry with an attribute that uses that matching rule providing duplicate + values that are duplicates according to the equality matching rule. + :expected results: + 1. Pass + 2. Pass + 3. Fail(ldap.TYPE_OR_VALUE_EXISTS) + """ + # Entry with extensibleObject + cos = CosTemplates(topology_st.standalone, DEFAULT_SUFFIX) + entry = cos.create(properties={'cn': 'addentry'+rule['attr'], + rule['attr']: rule['positive']}) + entry.delete() + with pytest.raises(ldap.TYPE_OR_VALUE_EXISTS): + cos.create(properties={'cn': 'addentry'+rule['attr'].split('attr')[1], + rule['attr']: rule['negative']}) + + +@pytest.mark.parametrize("mode", MATCHING_MODES) +def test_valid_invalid_modes(topology_st, mode): + """Add duplicate attributes + + :id: dec03362-ba26-41da-b479-e2b788403fce + :parametrized: yes + :setup: Standalone + :steps: + 1. Create entry with an attribute that uses matching mode + 2. Add an attribute that uses that matching mode providing duplicate + values that are duplicates according to the equality matching. + 3. Delete existing entry + :expected results: + 1. Pass + 2. Fail(ldap.TYPE_OR_VALUE_EXISTS) + 3. Pass + """ + # Entry with extensibleObject + cos = CosTemplates(topology_st.standalone, DEFAULT_SUFFIX) + entry = cos.create(properties={'cn': 'addentry'+mode['attr'], + mode['attr']: mode['positive']}) + with pytest.raises(ldap.TYPE_OR_VALUE_EXISTS): + entry.add(mode['attr'], mode['negative']) + entry.delete() + + +@pytest.mark.parametrize("mode", MODE_REPLACE) +def test_valid_invalid_mode_replace(topology_st, mode): + """Replace and Delete duplicate attribute + + :id: 7ec19eca-8cfc-11e9-a0df-8c16451d917b + :parametrized: yes + :setup: Standalone + :steps: + 1. Create entry with an attribute that uses that matching rule + 2. Replace an attribute that uses that matching rule + 3. Replace an attribute that uses that matching rule providing duplicate + values that are duplicates according to the equality matching mode. + 4. Delete existing attribute + 5. Try to delete the deleted attribute again. + 6. Delete entry + :expected results: + 1. Pass + 2. Pass + 3. Fail(ldap.TYPE_OR_VALUE_EXISTS) + 4. Pass + 5. Fail(ldap.NO_SUCH_ATTRIBUTE) + 6. Pass + """ + # Entry with extensibleObject + cos = CosTemplates(topology_st.standalone, DEFAULT_SUFFIX) + user = cos.create(properties={'cn': 'addentry'+mode['attr']}) + + # Replace Operation + user.replace(mode['attr'], mode['positive']) + with pytest.raises(ldap.TYPE_OR_VALUE_EXISTS): + user.replace(mode['attr'], mode['negative']) + # Delete Operation + user.remove(mode['attr'], mode['positive'][0]) + with pytest.raises(ldap.NO_SUCH_ATTRIBUTE): + user.remove(mode['attr'], mode['positive'][0]) + user.delete() + + +@pytest.fixture(scope="module") +def _searches(topology_st): + """ + Add attribute types to schema + """ + cos = CosTemplates(topology_st.standalone, DEFAULT_SUFFIX) + for attr, value in LIST_ATTR: + cos.create(properties={ + 'cn': attr, + 'attr' + attr.split('entry')[1][:-1]: value + }) + + +@pytest.mark.parametrize("attr, po_value, ne_attr", POSITIVE_NEGATIVE_VALUES) +def test_match_count(topology_st, _searches, attr, po_value, ne_attr): + """Search for an attribute with that matching rule with an assertion + value that should match + + :id: 00276180-b902-11e9-bff2-8c16451d917b + :parametrized: yes + :setup: Standalone + :steps: + 1. Filter rules as per the condition and assert the no of output. + 2. Negative filter with no outputs. + :expected results: + 1. Pass + 2. Pass + """ + cos = CosTemplates(topology_st.standalone, DEFAULT_SUFFIX) + assert len(cos.filter(attr)) == po_value + assert not cos.filter(ne_attr) + + +@pytest.mark.parametrize("attr, value", LIST_EXT) +def test_extensible_search(topology_st, _searches, attr, value): + """Match filter and output. + + :id: abe3e6dd-9ecc-11e8-adf0-8c16451d917c + :parametrized: yes + :setup: Standalone + :steps: + 1. Filer output should match the exact value given. + :expected results: + 1. Pass + """ + cos = CosTemplates(topology_st.standalone, DEFAULT_SUFFIX) + assert len(cos.filter(attr)) == value + + +if __name__ == '__main__': + CURRENT_FILE = os.path.realpath(__file__) + pytest.main("-s -v %s" % CURRENT_FILE) diff --git a/dirsrvtests/tests/suites/filter/filter_test.py b/dirsrvtests/tests/suites/filter/filter_test.py new file mode 100644 index 0000000..bb3febe --- /dev/null +++ b/dirsrvtests/tests/suites/filter/filter_test.py @@ -0,0 +1,315 @@ +# --- BEGIN COPYRIGHT BLOCK --- +# Copyright (C) 2016 Red Hat, Inc. +# All rights reserved. +# +# License: GPL (version 3 or any later version). +# See LICENSE for details. +# --- END COPYRIGHT BLOCK --- +# +import logging + +import pytest +from lib389.tasks import * +from lib389.topologies import topology_st +from lib389._constants import PASSWORD, DEFAULT_SUFFIX, DN_DM, SUFFIX +from lib389.utils import * + +pytestmark = pytest.mark.tier1 + +logging.getLogger(__name__).setLevel(logging.DEBUG) +log = logging.getLogger(__name__) + +ENTRY_NAME = 'test_entry' + + +@pytest.mark.bz918686 +@pytest.mark.ds497 +def test_filter_escaped(topology_st): + """Test we can search for an '*' in a attribute value. + + :id: 5c9aa40c-c641-4603-bce3-b19f4c1f2031 + :setup: Standalone instance + :steps: + 1. Add a test user with an '*' in its attribute value + i.e. 'cn=test * me' + 2. Add another similar test user without '*' in its attribute value + 3. Search test user using search filter "cn=*\\**" + :expectedresults: + 1. This should pass + 2. This should pass + 3. Test user with 'cn=test * me' only, should be listed + """ + log.info('Running test_filter_escaped...') + + USER1_DN = 'uid=test_entry,' + DEFAULT_SUFFIX + USER2_DN = 'uid=test_entry2,' + DEFAULT_SUFFIX + + try: + topology_st.standalone.add_s(Entry((USER1_DN, {'objectclass': "top extensibleObject".split(), + 'sn': '1', + 'cn': 'test * me', + 'uid': 'test_entry', + 'userpassword': PASSWORD}))) + except ldap.LDAPError as e: + log.fatal('test_filter_escaped: Failed to add test user ' + USER1_DN + ': error ' + + e.message['desc']) + assert False + + try: + topology_st.standalone.add_s(Entry((USER2_DN, {'objectclass': "top extensibleObject".split(), + 'sn': '2', + 'cn': 'test me', + 'uid': 'test_entry2', + 'userpassword': PASSWORD}))) + except ldap.LDAPError as e: + log.fatal('test_filter_escaped: Failed to add test user ' + USER2_DN + ': error ' + e.message['desc']) + assert False + + try: + entry = topology_st.standalone.search_s(DEFAULT_SUFFIX, ldap.SCOPE_SUBTREE, 'cn=*\\**') + if not entry or len(entry) > 1: + log.fatal('test_filter_escaped: Entry was not found using "cn=*\\**"') + assert False + except ldap.LDAPError as e: + log.fatal('test_filter_escaped: Failed to search for user(%s), error: %s' % + (USER1_DN, e.message('desc'))) + assert False + + log.info('test_filter_escaped: PASSED') + + +def test_filter_search_original_attrs(topology_st): + """Search and request attributes with extra characters. The returned entry + should not have these extra characters: objectclass EXTRA" + + :id: d30d8a1c-84ac-47ba-95f9-41e3453fbf3a + :setup: Standalone instance + :steps: + 1. Execute a search operation for attributes with extra characters + 2. Check the search result have these extra characters or not + :expectedresults: + 1. Search should pass + 2. Search result should not have these extra characters attribute + """ + + log.info('Running test_filter_search_original_attrs...') + + try: + entry = topology_st.standalone.search_s(DEFAULT_SUFFIX, ldap.SCOPE_BASE, + 'objectclass=top', ['objectclass-EXTRA']) + if entry[0].hasAttr('objectclass-EXTRA'): + log.fatal('test_filter_search_original_attrs: Entry does not have the original attribute') + assert False + except ldap.LDAPError as e: + log.fatal('test_filter_search_original_attrs: Failed to search suffix(%s), error: %s' % + (DEFAULT_SUFFIX, e.message('desc'))) + assert False + + log.info('test_filter_search_original_attrs: PASSED') + +@pytest.mark.bz1511462 +def test_filter_scope_one(topology_st): + """Test ldapsearch with scope one gives only single entry + + :id: cf5a6078-bbe6-4d43-ac71-553c45923f91 + :setup: Standalone instance + :steps: + 1. Search cn=Directory Administrators,dc=example,dc=com using ldapsearch with + scope one using base as dc=example,dc=com + 2. Check that search should return only one entry + :expectedresults: + 1. This should pass + 2. This should pass + """ + + parent_dn="dn: dc=example,dc=com" + child_dn="dn: cn=Directory Administrators,dc=example,dc=com" + + log.info('Search user using ldapsearch with scope one') + results = topology_st.standalone.search_s(DEFAULT_SUFFIX, ldap.SCOPE_ONELEVEL,'cn=Directory Administrators',['cn'] ) + log.info(results) + + log.info('Search should only have one entry') + assert len(results) == 1 + +@pytest.mark.ds47313 +def test_filter_with_attribute_subtype(topology_st): + """Adds 2 test entries and Search with + filters including subtype and ! + + :id: 0e69f5f2-6a0a-480e-8282-fbcc50231908 + :setup: Standalone instance + :steps: + 1. Add 2 entries and create 3 filters + 2. Search for entry with filter: (&(cn=test_entry en only)(!(cn=test_entry fr))) + 3. Search for entry with filter: (&(cn=test_entry en only)(!(cn;fr=test_entry fr))) + 4. Search for entry with filter: (&(cn=test_entry en only)(!(cn;en=test_entry en))) + 5. Delete the added entries + :expectedresults: + 1. Operation should be successful + 2. Search should be successful + 3. Search should be successful + 4. Search should not be successful + 5. Delete the added entries + """ + + # bind as directory manager + topology_st.standalone.log.info("Bind as %s" % DN_DM) + topology_st.standalone.simple_bind_s(DN_DM, PASSWORD) + + # enable filter error logging + # mod = [(ldap.MOD_REPLACE, 'nsslapd-errorlog-level', '32')] + # topology_st.standalone.modify_s(DN_CONFIG, mod) + + topology_st.standalone.log.info("\n\n######################### ADD ######################\n") + + # Prepare the entry with cn;fr & cn;en + entry_name_fr = '%s fr' % (ENTRY_NAME) + entry_name_en = '%s en' % (ENTRY_NAME) + entry_name_both = '%s both' % (ENTRY_NAME) + entry_dn_both = 'cn=%s, %s' % (entry_name_both, SUFFIX) + entry_both = Entry(entry_dn_both) + entry_both.setValues('objectclass', 'top', 'person') + entry_both.setValues('sn', entry_name_both) + entry_both.setValues('cn', entry_name_both) + entry_both.setValues('cn;fr', entry_name_fr) + entry_both.setValues('cn;en', entry_name_en) + + # Prepare the entry with one member + entry_name_en_only = '%s en only' % (ENTRY_NAME) + entry_dn_en_only = 'cn=%s, %s' % (entry_name_en_only, SUFFIX) + entry_en_only = Entry(entry_dn_en_only) + entry_en_only.setValues('objectclass', 'top', 'person') + entry_en_only.setValues('sn', entry_name_en_only) + entry_en_only.setValues('cn', entry_name_en_only) + entry_en_only.setValues('cn;en', entry_name_en) + + topology_st.standalone.log.info("Try to add Add %s: %r" % (entry_dn_both, entry_both)) + topology_st.standalone.add_s(entry_both) + + topology_st.standalone.log.info("Try to add Add %s: %r" % (entry_dn_en_only, entry_en_only)) + topology_st.standalone.add_s(entry_en_only) + + topology_st.standalone.log.info("\n\n######################### SEARCH ######################\n") + + # filter: (&(cn=test_entry en only)(!(cn=test_entry fr))) + myfilter = '(&(sn=%s)(!(cn=%s)))' % (entry_name_en_only, entry_name_fr) + topology_st.standalone.log.info("Try to search with filter %s" % myfilter) + ents = topology_st.standalone.search_s(SUFFIX, ldap.SCOPE_SUBTREE, myfilter) + assert len(ents) == 1 + assert ensure_str(ents[0].sn) == entry_name_en_only + topology_st.standalone.log.info("Found %s" % ents[0].dn) + + # filter: (&(cn=test_entry en only)(!(cn;fr=test_entry fr))) + myfilter = '(&(sn=%s)(!(cn;fr=%s)))' % (entry_name_en_only, entry_name_fr) + topology_st.standalone.log.info("Try to search with filter %s" % myfilter) + ents = topology_st.standalone.search_s(SUFFIX, ldap.SCOPE_SUBTREE, myfilter) + assert len(ents) == 1 + assert ensure_str(ents[0].sn) == entry_name_en_only + topology_st.standalone.log.info("Found %s" % ents[0].dn) + + # filter: (&(cn=test_entry en only)(!(cn;en=test_entry en))) + myfilter = '(&(sn=%s)(!(cn;en=%s)))' % (entry_name_en_only, entry_name_en) + topology_st.standalone.log.info("Try to search with filter %s" % myfilter) + ents = topology_st.standalone.search_s(SUFFIX, ldap.SCOPE_SUBTREE, myfilter) + assert len(ents) == 0 + topology_st.standalone.log.info("Found none") + + topology_st.standalone.log.info("\n\n######################### DELETE ######################\n") + + topology_st.standalone.log.info("Try to delete %s " % entry_dn_both) + topology_st.standalone.delete_s(entry_dn_both) + + topology_st.standalone.log.info("Try to delete %s " % entry_dn_en_only) + topology_st.standalone.delete_s(entry_dn_en_only) + + log.info('Testcase PASSED') + +@pytest.mark.bz1615155 +def test_extended_search(topology_st): + """Test we can search with equality extended matching rule + + :id: 396942ac-467b-435b-8d9f-e80c7ec4ba6c + :setup: Standalone instance + :steps: + 1. Add a test user with 'sn: ext-test-entry' + 2. Search '(cn:de:=ext-test-entry)' + 3. Search '(sn:caseIgnoreIA5Match:=EXT-TEST-ENTRY)' + 4. Search '(sn:caseIgnoreMatch:=EXT-TEST-ENTRY)' + 5. Search '(sn:caseExactMatch:=EXT-TEST-ENTRY)' + 6. Search '(sn:caseExactMatch:=ext-test-entry)' + 7. Search '(sn:caseExactIA5Match:=EXT-TEST-ENTRY)' + 8. Search '(sn:caseExactIA5Match:=ext-test-entry)' + :expectedresults: + 1. This should pass + 2. This should return one entry + 3. This should return one entry + 4. This should return one entry + 5. This should return NO entry + 6. This should return one entry + 7. This should return NO entry + 8. This should return one entry + 3. return one entry + """ + log.info('Running test_filter_escaped...') + + ATTR_VAL = 'ext-test-entry' + USER1_DN = "uid=%s,%s" % (ATTR_VAL, DEFAULT_SUFFIX) + + try: + topology_st.standalone.add_s(Entry((USER1_DN, {'objectclass': "top extensibleObject".split(), + 'sn': ATTR_VAL.encode(), + 'cn': ATTR_VAL.encode(), + 'uid': ATTR_VAL.encode()}))) + except ldap.LDAPError as e: + log.fatal('test_extended_search: Failed to add test user ' + USER1_DN + ': error ' + + e.message['desc']) + assert False + + # filter: '(cn:de:=ext-test-entry)' + myfilter = '(cn:de:=%s)' % ATTR_VAL + topology_st.standalone.log.info("Try to search with filter %s" % myfilter) + ents = topology_st.standalone.search_s(SUFFIX, ldap.SCOPE_SUBTREE, myfilter) + assert len(ents) == 1 + + # filter: '(sn:caseIgnoreIA5Match:=EXT-TEST-ENTRY)' + myfilter = '(cn:caseIgnoreIA5Match:=%s)' % ATTR_VAL.upper() + topology_st.standalone.log.info("Try to search with filter %s" % myfilter) + ents = topology_st.standalone.search_s(SUFFIX, ldap.SCOPE_SUBTREE, myfilter) + assert len(ents) == 1 + + # filter: '(sn:caseIgnoreMatch:=EXT-TEST-ENTRY)' + myfilter = '(cn:caseIgnoreMatch:=%s)' % ATTR_VAL.upper() + topology_st.standalone.log.info("Try to search with filter %s" % myfilter) + ents = topology_st.standalone.search_s(SUFFIX, ldap.SCOPE_SUBTREE, myfilter) + assert len(ents) == 1 + + # filter: '(sn:caseExactMatch:=EXT-TEST-ENTRY)' + myfilter = '(cn:caseExactMatch:=%s)' % ATTR_VAL.upper() + topology_st.standalone.log.info("Try to search with filter %s" % myfilter) + ents = topology_st.standalone.search_s(SUFFIX, ldap.SCOPE_SUBTREE, myfilter) + assert len(ents) == 0 + + # filter: '(sn:caseExactMatch:=ext-test-entry)' + myfilter = '(cn:caseExactMatch:=%s)' % ATTR_VAL + topology_st.standalone.log.info("Try to search with filter %s" % myfilter) + ents = topology_st.standalone.search_s(SUFFIX, ldap.SCOPE_SUBTREE, myfilter) + assert len(ents) == 1 + + # filter: '(sn:caseExactIA5Match:=EXT-TEST-ENTRY)' + myfilter = '(cn:caseExactIA5Match:=%s)' % ATTR_VAL.upper() + topology_st.standalone.log.info("Try to search with filter %s" % myfilter) + ents = topology_st.standalone.search_s(SUFFIX, ldap.SCOPE_SUBTREE, myfilter) + assert len(ents) == 0 + + # filter: '(sn:caseExactIA5Match:=ext-test-entry)' + myfilter = '(cn:caseExactIA5Match:=%s)' % ATTR_VAL + topology_st.standalone.log.info("Try to search with filter %s" % myfilter) + ents = topology_st.standalone.search_s(SUFFIX, ldap.SCOPE_SUBTREE, myfilter) + assert len(ents) == 1 + +if __name__ == '__main__': + # Run isolated + # -s for DEBUG mode + CURRENT_FILE = os.path.realpath(__file__) + pytest.main("-s %s" % CURRENT_FILE) diff --git a/dirsrvtests/tests/suites/filter/filter_with_non_root_user_test.py b/dirsrvtests/tests/suites/filter/filter_with_non_root_user_test.py new file mode 100644 index 0000000..a9697c4 --- /dev/null +++ b/dirsrvtests/tests/suites/filter/filter_with_non_root_user_test.py @@ -0,0 +1,384 @@ +# --- BEGIN COPYRIGHT BLOCK --- +# Copyright (C) 2019 Red Hat, Inc. +# All rights reserved. +# +# License: GPL (version 3 or any later version). +# See LICENSE for details. +# --- END COPYRIGHT BLOCK ---- + + +""" +verify and testing Filter from a search +""" + +import os +import pytest + +from lib389._constants import DEFAULT_SUFFIX, PW_DM +from lib389.topologies import topology_st as topo +from lib389.idm.user import UserAccounts, UserAccount +from lib389.idm.account import Accounts + +pytestmark = pytest.mark.tier1 + +FILTER_MWARD = "(uid=mward)" +FILTER_L = "(l=sunnyvale)" +FILTER_MAIL = "(mail=jreu*)" +FILTER_EXAM = "(mail=*exam*)" +FILTER_7393 = "(telephonenumber=*7393)" +FILTER_408 = "(telephonenumber=*408*3)" +FILTER_UID = "(uid=*)" +FILTER_PASSWD = "(userpassword=*)" +FILTER_FRED = "(fred=*)" +FILTER_AAA = "(uid:2.16.840.1.113730.3.3.2.15.1:=>AAA)" +FILTER_AAA_ES = "(uid:es:=>AAA)" +FILTER_AAA_UID = "(uid:2.16.840.1.113730.3.3.2.15.1.5:=AAA)" +FILTER_100 = "(uid:2.16.840.1.113730.3.3.2.15.1:=>user100)" +FILTER_ES_100 = "(uid:es:=>user100)" +FILTER_UID_100 = "(uid:2.16.840.1.113730.3.3.2.15.1.5:=user100)" +FILTER_UID_1 = "(uid:2.16.840.1.113730.3.3.2.15.1:=<1)" +FILTER_UID_ES = "(uid:es:=<1)" +FILTER_UID_2 = "(uid:2.16.840.1.113730.3.3.2.15.1.1:=1)" +FILTER_UID_USER1 = "(uid:2.16.840.1.113730.3.3.2.15.1:= +# All rights reserved. +# +# License: GPL (version 3 or any later version). +# See LICENSE for details. +# --- END COPYRIGHT BLOCK --- +# + +import pytest +import ldap +from lib389.topologies import topology_st as topology_st_pre +from lib389.dirsrv_log import DirsrvAccessLog +from lib389._mapped_object import DSLdapObjects +from lib389._constants import DEFAULT_SUFFIX +from lib389.extensibleobject import UnsafeExtensibleObjects + +def _check_value(inst_cfg, value, exvalue=None): + if exvalue is None: + exvalue = value + inst_cfg.set('nsslapd-verify-filter-schema', value) + assert(inst_cfg.get_attr_val_utf8('nsslapd-verify-filter-schema') == exvalue) + +@pytest.fixture(scope="module") +def topology_st(topology_st_pre): + raw_objects = UnsafeExtensibleObjects(topology_st_pre.standalone, basedn=DEFAULT_SUFFIX) + # Add an object that won't be able to be queried due to invalid attrs. + raw_objects.create(properties = { + "cn": "test_obj", + "a": "a", + "b": "b", + "uid": "foo" + }) + return topology_st_pre + + +@pytest.mark.ds50349 +def test_filter_validation_config(topology_st): + """Test that the new on/warn/off setting can be set and read + correctly + + :id: ac14dad5-5bdf-474f-9936-7ce2d20fb8b6 + :setup: Standalone instance + :steps: + 1. Check the default value of nsslapd-verify-filter-schema + 2. Set the value to "on". + 3. Read the value is "on". + 4. Set the value to "warn". + 5. Read the value is "warn". + 6. Set the value to "off". + 7. Read the value is "off". + 8. Delete the value (reset) + 9. Check the reset value matches 1. + :expectedresults: + 1. Value is "on", "off", or "warn". + 2. Success + 3. Value is "on" + 4. Success + 5. Value is "warn" + 6. Success + 7. Value is "off" + 8. Success + 9. Value is same as from 1. + """ + inst_cfg = topology_st.standalone.config + + initial_value = inst_cfg.get_attr_val_utf8('nsslapd-verify-filter-schema') + + # Check legacy values that may have been set + _check_value(inst_cfg, "on", "reject-invalid") + _check_value(inst_cfg, "warn", "process-safe") + _check_value(inst_cfg, "off") + # Check the more descriptive values + _check_value(inst_cfg, "reject-invalid") + _check_value(inst_cfg, "process-safe") + _check_value(inst_cfg, "warn-invalid") + _check_value(inst_cfg, "off") + + # This should fail + + with pytest.raises(ldap.OPERATIONS_ERROR): + _check_value(inst_cfg, "thnaounaou") + + inst_cfg.remove_all('nsslapd-verify-filter-schema') + final_value = inst_cfg.get_attr_val_utf8('nsslapd-verify-filter-schema') + assert(initial_value == final_value) + + +@pytest.mark.ds50349 +def test_filter_validation_enabled(topology_st): + """Test that queries which are invalid, are correctly rejected by the server. + + :id: 05afdbbd-0d7f-4774-958c-2139827fed70 + :setup: Standalone instance + :steps: + 1. Search a well formed query + 2. Search a poorly formed query + 3. Search a poorly formed complex (and/or) query + 4. Test the server can be restarted + :expectedresults: + 1. No warnings + 2. Query is rejected (err) + 3. Query is rejected (err) + 4. Server restarts + """ + inst = topology_st.standalone + + # In case the default has changed, we set the value to warn. + inst.config.set("nsslapd-verify-filter-schema", "reject-invalid") + raw_objects = DSLdapObjects(inst, basedn=DEFAULT_SUFFIX) + + # Check a good query has no errors. + r = raw_objects.filter("(objectClass=*)") + + with pytest.raises(ldap.UNWILLING_TO_PERFORM): + # Check a bad one DOES emit an error. + r = raw_objects.filter("(a=a)") + + with pytest.raises(ldap.UNWILLING_TO_PERFORM): + # Check a bad complex one does emit an error. + r = raw_objects.filter("(&(a=a)(b=b)(objectClass=*))") + + # Does restart work? + inst.restart() + + +@pytest.mark.ds50349 +def test_filter_validation_warn_safe(topology_st): + """Test that queries which are invalid, are correctly marked as "notes=F" in + the access log, and return no entries or partial sets. + + :id: 8b2b23fe-d878-435c-bc84-8c298be4ca1f + :setup: Standalone instance + :steps: + 1. Search a well formed query + 2. Search a poorly formed query + 3. Search a poorly formed complex (and/or) query + :expectedresults: + 1. No warnings + 2. notes=F is present + 3. notes=F is present + """ + inst = topology_st.standalone + + # In case the default has changed, we set the value to warn. + inst.config.set("nsslapd-verify-filter-schema", "process-safe") + # Set the access log to un-buffered so we get it immediately. + inst.config.set("nsslapd-accesslog-logbuffering", "off") + + # Setup the query object. + # Now we don't care if there are any results, we only care about good/bad queries. + # To do this we have to bypass some of the lib389 magic, and just emit raw queries + # to check them. Turns out lib389 is well designed and this just works as expected + # if you use a single DSLdapObjects and filter. :) + raw_objects = DSLdapObjects(inst, basedn=DEFAULT_SUFFIX) + + # Find any initial notes=F + access_log = DirsrvAccessLog(inst) + r_init = access_log.match(".*notes=F.*") + + # Check a good query has no warnings. + r = raw_objects.filter("(objectClass=*)") + assert(len(r) > 0) + r_s1 = access_log.match(".*notes=F.*") + # Should be the same number of log lines IE 0. + assert(len(r_init) == len(r_s1)) + + # Check a bad one DOES emit a warning. + r = raw_objects.filter("(a=a)") + assert(len(r) == 0) + r_s2 = access_log.match(".*notes=F.*") + # Should be the greate number of log lines IE +1 + assert(len(r_init) + 1 == len(r_s2)) + + # Check a bad complex one does emit a warning. + r = raw_objects.filter("(&(a=a)(b=b)(objectClass=*))") + assert(len(r) == 0) + r_s3 = access_log.match(".*notes=F.*") + # Should be the greate number of log lines IE +2 + assert(len(r_init) + 2 == len(r_s3)) + + # Check that we can still get things when partial + r = raw_objects.filter("(|(a=a)(b=b)(uid=foo))") + assert(len(r) == 1) + r_s4 = access_log.match(".*notes=F.*") + # Should be the greate number of log lines IE +2 + assert(len(r_init) + 3 == len(r_s4)) + + +@pytest.mark.ds50349 +def test_filter_validation_warn_unsafe(topology_st): + """Test that queries which are invalid, are correctly marked as "notes=F" in + the access log, and uses the legacy query behaviour to return unsafe sets. + + :id: 8b2b23fe-d878-435c-bc84-8c298be4ca1f + :setup: Standalone instance + :steps: + 1. Search a well formed query + 2. Search a poorly formed query + 3. Search a poorly formed complex (and/or) query + :expectedresults: + 1. No warnings + 2. notes=F is present + 3. notes=F is present + """ + inst = topology_st.standalone + + # In case the default has changed, we set the value to warn. + inst.config.set("nsslapd-verify-filter-schema", "warn-invalid") + # Set the access log to un-buffered so we get it immediately. + inst.config.set("nsslapd-accesslog-logbuffering", "off") + + # Setup the query object. + # Now we don't care if there are any results, we only care about good/bad queries. + # To do this we have to bypass some of the lib389 magic, and just emit raw queries + # to check them. Turns out lib389 is well designed and this just works as expected + # if you use a single DSLdapObjects and filter. :) + raw_objects = DSLdapObjects(inst, basedn=DEFAULT_SUFFIX) + + # Find any initial notes=F + access_log = DirsrvAccessLog(inst) + r_init = access_log.match(".*notes=(U,)?F.*") + + # Check a good query has no warnings. + r = raw_objects.filter("(objectClass=*)") + assert(len(r) > 0) + r_s1 = access_log.match(".*notes=(U,)?F.*") + # Should be the same number of log lines IE 0. + assert(len(r_init) == len(r_s1)) + + # Check a bad one DOES emit a warning. + r = raw_objects.filter("(a=a)") + assert(len(r) == 1) + # NOTE: Unlike warn-process-safely, these become UNINDEXED and show in the logs. + r_s2 = access_log.match(".*notes=(U,)?F.*") + # Should be the greate number of log lines IE +1 + assert(len(r_init) + 1 == len(r_s2)) + + # Check a bad complex one does emit a warning. + r = raw_objects.filter("(&(a=a)(b=b)(objectClass=*))") + assert(len(r) == 1) + r_s3 = access_log.match(".*notes=(U,)?F.*") + # Should be the greate number of log lines IE +2 + assert(len(r_init) + 2 == len(r_s3)) + + # Check that we can still get things when partial + r = raw_objects.filter("(|(a=a)(b=b)(uid=foo))") + assert(len(r) == 1) + r_s4 = access_log.match(".*notes=(U,)?F.*") + # Should be the greate number of log lines IE +2 + assert(len(r_init) + 3 == len(r_s4)) + diff --git a/dirsrvtests/tests/suites/filter/vfilter_attribute_test.py b/dirsrvtests/tests/suites/filter/vfilter_attribute_test.py new file mode 100644 index 0000000..defcf25 --- /dev/null +++ b/dirsrvtests/tests/suites/filter/vfilter_attribute_test.py @@ -0,0 +1,219 @@ +# --- BEGIN COPYRIGHT BLOCK --- +# Copyright (C) 2019 Red Hat, Inc. +# All rights reserved. +# +# License: GPL (version 3 or any later version). +# See LICENSE for details. +# --- END COPYRIGHT BLOCK --- + +""" +This script will test different type of Filters. +""" + +import os +import pytest + +from lib389._constants import DEFAULT_SUFFIX, PW_DM +from lib389.topologies import topology_st as topo +from lib389.idm.user import UserAccounts +from lib389.idm.organizationalunit import OrganizationalUnits +from lib389.schema import Schema +from lib389.idm.account import Accounts + +pytestmark = pytest.mark.tier1 + +FILTER_COMBINE = f"(& (| (nsRoleDN=cn=new managed role) (sn=Hall)) (l=sunnyvale))" +FILTER_RJ = "(uid=rjense2)" +FILTER_CN = "(nsRoleDN=cn=new managed *)" +FILTER_CN_MT = f"(& {FILTER_CN} (uid=mtyler))" + +VALUES_POSITIVE = [ + (FILTER_COMBINE, ['*', 'cn'], 'cn'), + (FILTER_COMBINE, ['cn', 'cn', 'cn'], 'cn'), + (FILTER_COMBINE, ['cn', 'Cn', 'CN'], 'cn'), + (FILTER_COMBINE, ['cn', '*'], 'cn'), + (FILTER_COMBINE, ['modifiersName', 'modifyTimestamp'], 'modifiersName'), + (FILTER_COMBINE, ['modifiersName', 'modifyTimestamp'], 'modifyTimestamp'), + (FILTER_COMBINE, ['*', 'modifiersName', 'modifyTimestamp'], 'modifiersName'), + (FILTER_COMBINE, ['*', 'modifiersName', 'modifyTimestamp'], 'modifyTimestamp'), + (FILTER_COMBINE, ['cn', 'modifiersName', 'modifyTimestamp'], 'modifiersName'), + (FILTER_COMBINE, ['cn', 'modifiersName', 'modifyTimestamp'], 'modifyTimestamp'), + (FILTER_COMBINE, ['cn', 'modifiersName', 'modifyTimestamp'], 'cn'), + (FILTER_COMBINE, ['cn', 'modifiersName', 'nsRoleDN'], 'cn'), + (FILTER_COMBINE, ['cn', 'modifiersName', 'nsRoleDN'], 'modifiersName'), + (FILTER_COMBINE, ['cn', 'modifiersName', 'nsRoleDN'], 'nsRoleDN'), + (FILTER_COMBINE, ['cn', '*', 'modifiersName', 'nsRoleDN'], 'cn'), + (FILTER_COMBINE, ['cn', '*', 'modifiersName', 'nsRoleDN'], 'modifiersName'), + (FILTER_COMBINE, ['cn', '*', 'modifiersName', 'nsRoleDN'], 'nsRoleDN'), + (FILTER_RJ, ['*', 'mailquota'], 'mailquota'), + (FILTER_RJ, ['mailquota', '*'], 'mailquota'), + (FILTER_RJ, ['mailquota'], 'mailquota'), + (FILTER_RJ, ['mailquota', 'nsRoleDN'], 'mailquota'), + (FILTER_RJ, ['mailquota', 'nsRoleDN'], 'nsRoleDN'), + (FILTER_CN, ['cn', 'nsRoleDN'], 'cn'), + (FILTER_CN, ['cn', 'nsRoleDN'], 'nsRoleDN'), + (FILTER_CN_MT, ['mailquota', 'nsRoleDN'], 'mailquota'), + (FILTER_CN_MT, ['mailquota', 'nsRoleDN'], 'nsRoleDN'), + (FILTER_CN_MT, ['mailquota', 'modifiersName', 'nsRoleDN'], 'mailquota'), + (FILTER_CN_MT, ['mailquota', 'modifiersName', 'nsRoleDN'], 'modifiersName'), + (FILTER_CN_MT, ['mailquota', 'modifiersName', 'nsRoleDN'], 'nsRoleDN'), + (FILTER_CN_MT, ['*', 'modifiersName', 'nsRoleDN'], 'nsRoleDN'), + (FILTER_CN_MT, ['*', 'modifiersName', 'nsRoleDN'], 'modifiersName')] + + +LIST_OF_USER = ['scarter', 'tmorris', 'kvaughan', 'abergin', 'dmiller', + 'gfarmer', 'kwinters', 'trigden', 'cschmith', 'jwallace', + 'jwalker', 'tclow', 'rdaugherty', 'jreuter', 'tmason', + 'btalbot', 'mward', 'bjablons', 'jmcFarla', 'llabonte', + 'jcampaig', 'bhal2', 'alutz', 'achassin', 'hmiller', + 'jcampai2', 'lulrich', 'mlangdon', 'striplet', + 'gtriplet', 'jfalena', 'speterso', 'ejohnson', + 'prigden', 'bwalker', 'kjensen', 'mlott', + 'cwallace', 'tpierce', 'rbannist', 'bplante', + 'rmills', 'bschneid', 'skellehe', 'brentz', + 'dsmith', 'scarte2', 'dthorud', 'ekohler', + 'lcampbel', 'tlabonte', 'slee', 'bfree', + 'tschneid', 'prose', 'jhunter', 'ashelton', + 'mmcinnis', 'falbers', 'mschneid', 'pcruse', + 'tkelly', 'gtyler'] + + +@pytest.fixture(scope="module") +def _create_test_entries(topo): + """ + :param topo: + :return: Will create users used for this test script . + """ + users_people = UserAccounts(topo.standalone, DEFAULT_SUFFIX) + for demo1 in LIST_OF_USER: + users_people.create(properties={ + 'uid': demo1, + 'cn': demo1, + 'sn': demo1, + 'uidNumber': str(1000), + 'gidNumber': '2000', + 'homeDirectory': '/home/' + demo1, + 'givenname': demo1, + 'userpassword': PW_DM + }) + + users_people.create(properties={ + 'uid': 'bhall', + 'cn': 'Benjamin Hall', + 'sn': 'Hall', + 'uidNumber': str(1000), + 'gidNumber': '2000', + 'homeDirectory': '/home/' + 'bhall', + 'mail': 'bhall@anuj.com', + 'givenname': 'Benjamin', + 'ou': ['Product Development', 'People'], + 'l': 'sunnyvale', + 'telephonenumber': '+1 408 555 6067', + 'roomnumber': '2511', + 'manager': 'uid=trigden, ou=People, dc=example, dc=com', + 'nsRoleDN': 'cn=new managed role, ou=People, dc=example, dc=com', + 'userpassword': PW_DM, + }) + + ous = OrganizationalUnits(topo.standalone, DEFAULT_SUFFIX) + ou_ou = ous.create(properties={'ou': 'COS'}) + + ous = OrganizationalUnits(topo.standalone, ou_ou.dn) + ous.create(properties={'ou': 'MailSchemeClasses'}) + + Schema(topo.standalone).\ + add('attributetypes', "( 9.9.8.4 NAME 'emailclass' SYNTAX 1.3.6.1.4.1.1466.115.121.1.15 " + "X-ORIGIN 'RFC 2256' )") + Schema(topo.standalone).\ + add('objectclasses', "( 9.9.8.2 NAME 'mailSchemeUser' DESC " + "'User Defined ObjectClass' SUP 'top' MUST " + "( objectclass ) MAY (aci $ emailclass) X-ORIGIN 'RFC 2256' )") + + users_people.create(properties={ + 'cn': 'Randy Jensen', + 'sn': 'Jensen', + 'givenname': 'Randy', + 'objectclass': 'top account person organizationalPerson inetOrgPerson mailSchemeUser ' + 'mailRecipient posixaccount'.split(), + 'l': 'sunnyvale', + 'uid': 'rjense2', + 'uidNumber': str(1000), + 'gidNumber': str(1000), + 'homeDirectory': '/home/' + 'rjense2', + 'mail': 'rjense2@example.com', + 'telephonenumber': '+1 408 555 9045', + 'roomnumber': '1984', + 'manager': 'uid=jwalker, ou=People, dc=example,dc=com', + 'nsRoleDN': 'cn=new managed role, ou=People, dc=example, dc=com', + 'emailclass': 'vpemail', + 'mailquota': '600', + 'userpassword': PW_DM, + }) + + users_people.create(properties={ + 'cn': 'Bjorn Talbot', + 'sn': 'Talbot', + 'givenname': 'Bjorn', + 'objectclass': 'top account person organizationalPerson inetOrgPerson posixaccount'.split(), + 'ou': ['Product Development', 'People'], + 'l': 'Santa Clara', + 'uid': 'btalbo2', + 'mail': 'btalbo2@example.com', + 'telephonenumber': '+1 408 555 4234', + 'roomnumber': '1205', + 'uidNumber': str(1000), + 'gidNumber': str(1000), + 'homeDirectory': '/home/' + 'btalbo2', + 'manager': 'uid=trigden, ou=People, dc=example,dc=com', + 'nsRoleDN': 'cn=new managed role, ou=People, dc=example, dc=com', + 'userpassword': PW_DM + }) + + users_people.create(properties={ + 'objectclass': 'top ' + 'account ' + 'person ' + 'organizationalPerson ' + 'inetOrgPerson ' + 'mailRecipient ' + 'mailSchemeUser ' + 'posixaccount'.split(), + 'cn': 'Matthew Tyler', + 'sn': 'Tyler', + 'givenname': 'Matthew', + 'ou': ['Human Resources', 'People'], + 'l': 'Cupertino', + 'uid': 'mtyler', + 'mail': 'mtyler@example.com', + 'telephonenumber': '+1 408 555 7907', + 'roomnumber': '2701', + 'uidNumber': str(1000), + 'gidNumber': str(1000), + 'homeDirectory': '/home/' + 'mtyler', + 'manager': 'uid=jwalker, ou=People, dc=example,dc=com', + 'nsRoleDN': 'cn=new managed role, ou=People, dc=example, dc=com', + 'mailquota': '600', + 'userpassword': PW_DM}) + + +@pytest.mark.parametrize("filter_test, condition, filter_out", VALUES_POSITIVE) +def test_all_together_positive(topo, _create_test_entries, filter_test, condition, filter_out): + """Test filter with positive results. + + :id: 51924a38-9baa-11e8-b22a-8c16451d917b + :parametrized: yes + :setup: Standalone Server + :steps: + 1. Create Filter rules. + 2. Try to pass filter rules as per the condition . + :expected results: + 1. It should pass + 2. It should pass + """ + account = Accounts(topo.standalone, DEFAULT_SUFFIX) + assert account.filter(filter_test)[0].get_attrs_vals_utf8(condition)[filter_out] + + +if __name__ == '__main__': + CURRENT_FILE = os.path.realpath(__file__) + pytest.main("-s -v %s" % CURRENT_FILE) diff --git a/dirsrvtests/tests/suites/filter/vfilter_simple_test.py b/dirsrvtests/tests/suites/filter/vfilter_simple_test.py new file mode 100644 index 0000000..b009973 --- /dev/null +++ b/dirsrvtests/tests/suites/filter/vfilter_simple_test.py @@ -0,0 +1,556 @@ +# --- BEGIN COPYRIGHT BLOCK --- +# Copyright (C) 2019 Red Hat, Inc. +# All rights reserved. +# +# License: GPL (version 3 or any later version). +# See LICENSE for details. +# --- END COPYRIGHT BLOCK ---- + +""" +verify and testing Filter from a search +""" + +import os +import pytest + +from lib389._constants import DEFAULT_SUFFIX, PW_DM +from lib389.topologies import topology_st as topo +from lib389.idm.organizationalunit import OrganizationalUnits +from lib389.idm.account import Accounts +from lib389.idm.user import UserAccount, UserAccounts +from lib389.schema import Schema +from lib389.idm.role import ManagedRoles, FilteredRoles + +pytestmark = pytest.mark.tier1 + +FILTER_POSTAL = "(postalCode=99999)" +FILTER_ADDRESS = "(postalAddress=345 California Av., Mountain View, CA)" +FILTER_8888 = "(postalCode:2.16.840.1.113730.3.3.2.7.1:=88888)" +FILTER_6666 = "(postalCode:2.16.840.1.113730.3.3.2.7.1.3:=66666)" +FILTER_VPE = "(emailclass=vpe*)" +FILTER_EMAIL = "(emailclass=*emai*)" +FILTER_EMAILQUATA = "(mailquota=*00)" +FILTER_QUATA = '(mailquota=*6*0)' +FILTER_ROLE = '(nsRole=*)' +FILTER_POST = '(postalAddress=*)' +FILTER_CLASS = "(emailclass:2.16.840.1.113730.3.3.2.15.1:=>AAA)" +FILTER_CLASSES = "(emailclass:es:=>AAA)" +FILTER_AAA = "(emailclass:2.16.840.1.113730.3.3.2.15.1.5:=AAA)" +FILTER_VE = "(emailclass:2.16.840.1.113730.3.3.2.15.1:=>vpemail)" +FILTER_VPEM = "(emailclass:es:=>vpemail)" +FILTER_900 = "(mailquota:2.16.840.1.113730.3.3.2.15.1.1:=900)" +FILTER_7777 = "(postalCode:de:==77777)" +FILTER_FRED = '(fred=*)' +FILTER_ECLASS = "(emailclass:2.16.840.1.113730.3.3.2.15.1.5:=vpemail)" +FILTER_ECLASS_1 = "(emailclass:2.16.840.1.113730.3.3.2.15.1:=<1)" +FILTER_ECLASS_2 = "(emailclass:es:=<1)" +FILTER_ECLASS_3 = "(emailclass:2.16.840.1.113730.3.3.2.15.1.1:=1)" +FILTER_ECLASS_4 = "(emailclass:2.16.840.1.113730.3.3.2.15.1:= 0: + return True + return False + + +def _allow_machine_account(inst, name): + # First we need to get the mapping tree dn + mt = inst.mappingtree.list(suffix=DEFAULT_SUFFIX)[0] + inst.modify_s('cn=replica,%s' % mt.dn, [ + (ldap.MOD_REPLACE, 'nsDS5ReplicaBindDN', "uid=%s,ou=Machines,%s" % (name, DEFAULT_SUFFIX)) + ]) + + +def test_gssapi_repl(topology_m2): + """Test gssapi authenticated replication agreement of two masters using KDC + + :id: 552850aa-afc3-473e-9c39-aae802b46f11 + + :setup: MMR with two masters + + :steps: + 1. Create the locations on each master for the other master to bind to + 2. Set on the cn=replica config to accept the other masters mapping under mapping tree + 3. Create the replication agreements from M1->M2 and vice versa (M2->M1) + 4. Set the replica bind method to sasl gssapi for both agreements + 5. Initialize all the agreements + 6. Create a user on M1 and check if user is created on M2 + 7. Create a user on M2 and check if user is created on M1 + + :expectedresults: + 1. Locations should be added successfully + 2. Configuration should be added successfully + 3. Replication agreements should be added successfully + 4. Bind method should be set to sasl gssapi for both agreements + 5. Agreements should be initialized successfully + 6. Test User should be created on M1 and M2 both + 7. Test User should be created on M1 and M2 both + """ + + return + master1 = topology_m2.ms["master1"] + master2 = topology_m2.ms["master2"] + + # Create the locations on each master for the other to bind to. + _create_machine_ou(master1) + _create_machine_ou(master2) + + _create_machine_account(master1, 'ldap/%s' % HOST_MASTER_1) + _create_machine_account(master1, 'ldap/%s' % HOST_MASTER_2) + _create_machine_account(master2, 'ldap/%s' % HOST_MASTER_1) + _create_machine_account(master2, 'ldap/%s' % HOST_MASTER_2) + + # Set on the cn=replica config to accept the other masters princ mapping under mapping tree + _allow_machine_account(master1, 'ldap/%s' % HOST_MASTER_2) + _allow_machine_account(master2, 'ldap/%s' % HOST_MASTER_1) + + # + # Create all the agreements + # + # Creating agreement from master 1 to master 2 + + # Set the replica bind method to sasl gssapi + properties = {RA_NAME: r'meTo_$host:$port', + RA_METHOD: 'SASL/GSSAPI', + RA_TRANSPORT_PROT: defaultProperties[REPLICATION_TRANSPORT]} + m1_m2_agmt = master1.agreement.create(suffix=SUFFIX, host=master2.host, port=master2.port, properties=properties) + if not m1_m2_agmt: + log.fatal("Fail to create a master -> master replica agreement") + sys.exit(1) + log.debug("%s created" % m1_m2_agmt) + + # Creating agreement from master 2 to master 1 + + # Set the replica bind method to sasl gssapi + properties = {RA_NAME: r'meTo_$host:$port', + RA_METHOD: 'SASL/GSSAPI', + RA_TRANSPORT_PROT: defaultProperties[REPLICATION_TRANSPORT]} + m2_m1_agmt = master2.agreement.create(suffix=SUFFIX, host=master1.host, port=master1.port, properties=properties) + if not m2_m1_agmt: + log.fatal("Fail to create a master -> master replica agreement") + sys.exit(1) + log.debug("%s created" % m2_m1_agmt) + + # Allow the replicas to get situated with the new agreements... + time.sleep(5) + + # + # Initialize all the agreements + # + master1.agreement.init(SUFFIX, HOST_MASTER_2, PORT_MASTER_2) + master1.waitForReplInit(m1_m2_agmt) + + # Check replication is working... + if master1.testReplication(DEFAULT_SUFFIX, master2): + log.info('Replication is working.') + else: + log.fatal('Replication is not working.') + assert False + + # Add a user to master 1 + _create_machine_account(master1, 'http/one.example.com') + # Check it's on 2 + time.sleep(5) + assert (_check_machine_account(master2, 'http/one.example.com')) + # Add a user to master 2 + _create_machine_account(master2, 'http/two.example.com') + # Check it's on 1 + time.sleep(5) + assert (_check_machine_account(master2, 'http/two.example.com')) + + +if __name__ == '__main__': + # Run isolated + # -s for DEBUG mode + CURRENT_FILE = os.path.realpath(__file__) + pytest.main("-s %s" % CURRENT_FILE) diff --git a/dirsrvtests/tests/suites/import/__init__.py b/dirsrvtests/tests/suites/import/__init__.py new file mode 100644 index 0000000..8584e71 --- /dev/null +++ b/dirsrvtests/tests/suites/import/__init__.py @@ -0,0 +1,3 @@ +""" + :Requirement: 389-ds-base: DataBase Import +""" diff --git a/dirsrvtests/tests/suites/import/regression_test.py b/dirsrvtests/tests/suites/import/regression_test.py new file mode 100644 index 0000000..7be9e39 --- /dev/null +++ b/dirsrvtests/tests/suites/import/regression_test.py @@ -0,0 +1,305 @@ +# Copyright (C) 2017 Red Hat, Inc. +# All rights reserved. +# +# License: GPL (version 3 or any later version). +# See LICENSE for details. +# --- END COPYRIGHT BLOCK --- +# +import pytest +from lib389.backend import Backends +from lib389.properties import TASK_WAIT +from lib389.utils import time, ldap, os, logging +from lib389.topologies import topology_st as topo +from lib389.dbgen import dbgen +from lib389._constants import DEFAULT_SUFFIX +from lib389.tasks import * +from lib389.idm.user import UserAccounts +import threading +import time + +from lib389.idm.directorymanager import DirectoryManager + +pytestmark = pytest.mark.tier1 + +DEBUGGING = os.getenv("DEBUGGING", default=False) +if DEBUGGING: + logging.getLogger(__name__).setLevel(logging.DEBUG) +else: + logging.getLogger(__name__).setLevel(logging.INFO) +log = logging.getLogger(__name__) + +TEST_SUFFIX1 = "dc=importest1,dc=com" +TEST_BACKEND1 = "importest1" +TEST_SUFFIX2 = "dc=importest2,dc=com" +TEST_BACKEND2 = "importest2" +TEST_DEFAULT_SUFFIX = "dc=default,dc=com" +TEST_DEFAULT_NAME = "default" + + +class AddDelUsers(threading.Thread): + def __init__(self, inst): + threading.Thread.__init__(self) + self.daemon = True + self.inst = inst + self._should_stop = False + self._ran = False + + def run(self): + # Add 1000 entries + log.info('Run.') + conn = DirectoryManager(self.inst.standalone).bind() + + time.sleep(30) + log.info('Adding users.') + for i in range(1000): + user = UserAccounts(conn, DEFAULT_SUFFIX) + users = user.create_test_user(uid=i) + users.delete() + self._ran = True + if self._should_stop: + break + if not self._should_stop: + raise RuntimeError('We finished too soon.') + conn.close() + + def stop(self): + self._should_stop = True + + def has_started(self): + return self._ran + + +def test_replay_import_operation(topo): + """ Check after certain failed import operation, is it + possible to replay an import operation + + :id: 5f5ca532-8e18-4f7b-86bc-ac585215a473 + :feature: Import + :setup: Standalone instance + :steps: + 1. Export the backend into an ldif file + 2. Perform high load of operation on the server (Add/Del users) + 3. Perform an import operation + 4. Again perform an import operation (same as 3) + :expectedresults: + 1. It should be successful + 2. It should be successful + 3. It should be unsuccessful, should give OPERATIONS_ERROR + 4. It should be successful now + """ + log.info("Exporting LDIF online...") + ldif_dir = topo.standalone.get_ldif_dir() + export_ldif = ldif_dir + '/export.ldif' + + r = ExportTask(topo.standalone) + r.export_suffix_to_ldif(ldiffile=export_ldif, suffix=DEFAULT_SUFFIX) + r.wait() + add_del_users1 = AddDelUsers(topo) + add_del_users1.start() + + log.info("Importing LDIF online, should raise operation error.") + + trials = 0 + while not add_del_users1.has_started() and trials < 10: + trials += 1 + time.sleep(1) + r = ImportTask(topo.standalone) + try: + r.import_suffix_from_ldif(ldiffile=export_ldif, suffix=DEFAULT_SUFFIX) + except ldap.OPERATIONS_ERROR: + break + log.info(f'Looping. Tried {trials} times so far.') + add_del_users1.stop() + add_del_users1.join() + + log.info("Importing LDIF online") + + r = ImportTask(topo.standalone) + r.import_suffix_from_ldif(ldiffile=export_ldif, suffix=DEFAULT_SUFFIX) + + +def test_import_be_default(topo): + """ Create a backend using the name "default". previously this name was + used int + + :id: 8e507beb-e917-4330-8cac-1ff0eee10508 + :feature: Import + :setup: Standalone instance + :steps: + 1. Create a test suffix using the be name of "default" + 2. Create an ldif for the "default" backend + 3. Import ldif + 4. Verify all entries were imported + :expectedresults: + 1. Success + 2. Success + 3. Success + 4. Success + """ + log.info('Adding suffix:{} and backend: {}...'.format(TEST_DEFAULT_SUFFIX, + TEST_DEFAULT_NAME)) + backends = Backends(topo.standalone) + backends.create(properties={'nsslapd-suffix': TEST_DEFAULT_SUFFIX, + 'name': TEST_DEFAULT_NAME}) + + log.info('Create LDIF file and import it...') + ldif_dir = topo.standalone.get_ldif_dir() + ldif_file = os.path.join(ldif_dir, 'default.ldif') + dbgen(topo.standalone, 5, ldif_file, TEST_DEFAULT_SUFFIX) + + log.info('Stopping the server and running offline import...') + topo.standalone.stop() + assert topo.standalone.ldif2db(TEST_DEFAULT_NAME, None, None, + None, ldif_file) + topo.standalone.start() + + log.info('Verifying entry count after import...') + entries = topo.standalone.search_s(TEST_DEFAULT_SUFFIX, + ldap.SCOPE_SUBTREE, + "(objectclass=*)") + assert len(entries) > 1 + + log.info('Test PASSED') + + +def test_del_suffix_import(topo): + """Adding a database entry fails if the same database was deleted after an import + + :id: 652421ef-738b-47ed-80ec-2ceece6b5d77 + :feature: Import + :setup: Standalone instance + :steps: 1. Create a test suffix and add few entries + 2. Stop the server and do offline import using ldif2db + 3. Delete the suffix backend + 4. Add a new suffix with the same database name + 5. Check if adding the same database name is a success + :expectedresults: Adding database with the same name should be successful + """ + + log.info('Adding suffix:{} and backend: {}'.format(TEST_SUFFIX1, TEST_BACKEND1)) + backends = Backends(topo.standalone) + backend = backends.create(properties={'nsslapd-suffix': TEST_SUFFIX1, + 'name': TEST_BACKEND1}) + + log.info('Create LDIF file and import it') + ldif_dir = topo.standalone.get_ldif_dir() + ldif_file = os.path.join(ldif_dir, 'suffix_del1.ldif') + + dbgen(topo.standalone, 10, ldif_file, TEST_SUFFIX1) + + log.info('Stopping the server and running offline import') + topo.standalone.stop() + assert topo.standalone.ldif2db(TEST_BACKEND1, TEST_SUFFIX1, None, None, ldif_file) + topo.standalone.start() + + log.info('Deleting suffix-{}'.format(TEST_SUFFIX2)) + backend.delete() + + log.info('Adding the same database-{} after deleting it'.format(TEST_BACKEND1)) + backends.create(properties={'nsslapd-suffix': TEST_SUFFIX1, + 'name': TEST_BACKEND1}) + + +def test_del_suffix_backend(topo): + """Adding a database entry fails if the same database was deleted after an import + + :id: ac702c35-74b6-434e-8e30-316433f3e91a + :feature: Import + :setup: Standalone instance + :steps: 1. Create a test suffix and add entries + 2. Stop the server and do online import using ldif2db + 3. Delete the suffix backend + 4. Add a new suffix with the same database name + 5. Restart the server and check the status + :expectedresults: Adding database with the same name should be successful and the server should not hang + """ + + log.info('Adding suffix:{} and backend: {}'.format(TEST_SUFFIX2, TEST_BACKEND2)) + backends = Backends(topo.standalone) + backend = backends.create(properties={'nsslapd-suffix': TEST_SUFFIX2, + 'name': TEST_BACKEND2}) + + log.info('Create LDIF file and import it') + ldif_dir = topo.standalone.get_ldif_dir() + ldif_file = os.path.join(ldif_dir, 'suffix_del2.ldif') + + dbgen(topo.standalone, 10, ldif_file, TEST_SUFFIX2) + + topo.standalone.tasks.importLDIF(suffix=TEST_SUFFIX2, input_file=ldif_file, args={TASK_WAIT: True}) + + log.info('Deleting suffix-{}'.format(TEST_SUFFIX2)) + backend.delete() + + log.info('Adding the same database-{} after deleting it'.format(TEST_BACKEND2)) + backends.create(properties={'nsslapd-suffix': TEST_SUFFIX2, + 'name': TEST_BACKEND2}) + log.info('Checking if server can be restarted after re-adding the same database') + topo.standalone.restart() + assert not topo.standalone.detectDisorderlyShutdown() + + +@pytest.mark.bz1406101 +@pytest.mark.ds49071 +def test_import_duplicate_dn(topo): + """Import ldif with duplicate DNs, should not log error "unable to flush" + + :id: dce2b898-119d-42b8-a236-1130f58bff17 + :setup: Standalone instance, ldif file with duplicate entries + :steps: + 1. Create a ldif file with duplicate entries + 2. Import ldif file to DS + 3. Check error log file, it should not log "unable to flush" + 4. Check error log file, it should log "Duplicated DN detected" + :expectedresults: + 1. Success + 2. Success + 3. Success + 4. Success + """ + + standalone = topo.standalone + + log.info('Delete the previous error logs') + standalone.deleteErrorLogs() + + log.info('Create import file') + l = """dn: dc=example,dc=com +objectclass: top +objectclass: domain +dc: example + +dn: ou=myDups00001,dc=example,dc=com +objectclass: top +objectclass: organizationalUnit +ou: myDups00001 + +dn: ou=myDups00001,dc=example,dc=com +objectclass: top +objectclass: organizationalUnit +ou: myDups00001 +""" + + ldif_dir = standalone.get_ldif_dir() + ldif_file = os.path.join(ldif_dir, 'data.ldif') + with open(ldif_file, "w") as fd: + fd.write(l) + fd.close() + + log.info('Import ldif with duplicate entry') + assert standalone.tasks.importLDIF(suffix=DEFAULT_SUFFIX, input_file=ldif_file, args={TASK_WAIT: True}) + + log.info('Restart the server to flush the logs') + standalone.restart() + + log.info('Error log should not have "unable to flush" message') + assert not standalone.ds_error_log.match('.*unable to flush.*') + + log.info('Error log should have "Duplicated DN detected" message') + assert standalone.ds_error_log.match('.*Duplicated DN detected.*') + + +if __name__ == '__main__': + # Run isolated + # -s for DEBUG mode + CURRENT_FILE = os.path.realpath(__file__) + pytest.main("-s {}".format(CURRENT_FILE)) diff --git a/dirsrvtests/tests/suites/ldapi/__init__.py b/dirsrvtests/tests/suites/ldapi/__init__.py new file mode 100644 index 0000000..330903c --- /dev/null +++ b/dirsrvtests/tests/suites/ldapi/__init__.py @@ -0,0 +1,3 @@ +""" + :Requirement: 389-ds-base: ldapi +""" diff --git a/dirsrvtests/tests/suites/lib389/__init__.py b/dirsrvtests/tests/suites/lib389/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/dirsrvtests/tests/suites/lib389/config_compare_test.py b/dirsrvtests/tests/suites/lib389/config_compare_test.py new file mode 100644 index 0000000..709bae8 --- /dev/null +++ b/dirsrvtests/tests/suites/lib389/config_compare_test.py @@ -0,0 +1,38 @@ +import os +import pytest + +from lib389.topologies import topology_i2 +from lib389.config import Config + +pytestmark = pytest.mark.tier1 + +def test_config_compare(topology_i2): + """ + Compare test between cn=config of two different Directory Server intance. + + :id: 7b3e17d6-41ca-4926-bc3b-8173dd912a61 + + :setup: two isolated directory servers + + :steps: 1. Compare if cn=config is the same + + :expectedresults: 1. It should be the same (excluding unique id attrs) + """ + st1_config = topology_i2.ins.get('standalone1').config + st2_config = topology_i2.ins.get('standalone2').config + # 'nsslapd-port' attribute is expected to be same in cn=config comparison, + # but they are different in our testing environment + # as we are using 2 DS instances running, both running simultaneuosly. + # Hence explicitly adding 'nsslapd-port' to compare_exclude. + st1_config._compare_exclude.append('nsslapd-port') + st2_config._compare_exclude.append('nsslapd-port') + st1_config._compare_exclude.append('nsslapd-secureport') + st2_config._compare_exclude.append('nsslapd-secureport') + + assert Config.compare(st1_config, st2_config) + +if __name__ == '__main__': + # Run isolated + # -s for DEBUG mode + CURRENT_FILE = os.path.realpath(__file__) + pytest.main("-s %s" % CURRENT_FILE) diff --git a/dirsrvtests/tests/suites/lib389/dsldapobject/__init__.py b/dirsrvtests/tests/suites/lib389/dsldapobject/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/dirsrvtests/tests/suites/lib389/dsldapobject/dn_construct_test.py b/dirsrvtests/tests/suites/lib389/dsldapobject/dn_construct_test.py new file mode 100644 index 0000000..86d0a0d --- /dev/null +++ b/dirsrvtests/tests/suites/lib389/dsldapobject/dn_construct_test.py @@ -0,0 +1,236 @@ +# --- BEGIN COPYRIGHT BLOCK --- +# Copyright (C) 2019 William Brown +# All rights reserved. +# +# License: GPL (version 3 or any later version). +# See LICENSE for details. +# --- END COPYRIGHT BLOCK --- +# + +import ldap +import pytest + +from lib389._constants import DEFAULT_SUFFIX +from lib389.topologies import topology_st + +from lib389.idm.group import Groups, Group + +pytestmark = pytest.mark.tier1 + +################################################################################# +# This is a series of test cases to assert that various DN construction scenarios +# work as expected in lib389. +# +# DSLdapObjects are designed to allow explicit control, or to "safely assume" +# so that ldap concepts aren't as confusing. +# You can thus construct an object with a DN that is: +# * defined by you expliticly +# * derived from properties of the object automatically +# +# There are also two paths to construction: from the pluralised factory style +# builder, or from the singular. The factory style has very few extra parts +# but it's worth testing anyway. +# +# In no case do we derive a multi value rdn due to their complexity. +# + +def test_mul_explicit_rdn(topology_st): + """Test that with multiple cn and an explicit rdn, we use the rdn + + :id: b39ef204-45c0-4a74-9b59-b4ac1199d78c + + :setup: standalone instance + + :steps: 1. Create with mulitple cn and rdn + + :expectedresults: 1. Create success + """ + # Create with an explicit rdn value, given to the properties/rdn + gps = Groups(topology_st.standalone, DEFAULT_SUFFIX) + gp = gps.create('cn=test_mul_explicit_rdn', + properties={ + 'cn': ['test_mul_explicit_rdn', 'other_cn_test_mul_explicit_rdn'], + }) + assert gp.dn.lower() == f'cn=test_mul_explicit_rdn,ou=groups,{DEFAULT_SUFFIX}'.lower() + gp.delete() + +def test_mul_derive_single_dn(topology_st): + """Test that with single cn we derive rdn correctly. + + :id: f34f271a-ca57-4aa0-905a-b5392ce06c79 + + :setup: standalone instance + + :steps: 1. Create with single cn + + :expectedresults: 1. Create success + """ + gps = Groups(topology_st.standalone, DEFAULT_SUFFIX) + gp = gps.create(properties={ + 'cn': ['test_mul_derive_single_dn'], + }) + assert gp.dn.lower() == f'cn=test_mul_derive_single_dn,ou=groups,{DEFAULT_SUFFIX}'.lower() + gp.delete() + +def test_mul_derive_mult_dn(topology_st): + """Test that with multiple cn we derive rdn correctly. + + :id: 1e1f5483-bfad-4f73-9dfb-aec54d08b268 + + :setup: standalone instance + + :steps: 1. Create with multiple cn + + :expectedresults: 1. Create success + """ + gps = Groups(topology_st.standalone, DEFAULT_SUFFIX) + gp = gps.create(properties={ + 'cn': ['test_mul_derive_mult_dn', 'test_mul_derive_single_dn'], + }) + assert gp.dn.lower() == f'cn=test_mul_derive_mult_dn,ou=groups,{DEFAULT_SUFFIX}'.lower() + gp.delete() + +def test_sin_explicit_dn(topology_st): + """Test explicit dn with create + + :id: 2d812225-243b-4f87-85ad-d403a4ae0267 + + :setup: standalone instance + + :steps: 1. Create with explicit dn + + :expectedresults: 1. Create success + """ + expect_dn = f'cn=test_sin_explicit_dn,ou=groups,{DEFAULT_SUFFIX}' + gp = Group(topology_st.standalone, dn=expect_dn) + gp.create(properties={ + 'cn': ['test_sin_explicit_dn'], + }) + assert gp.dn.lower() == expect_dn.lower() + gp.delete() + +def test_sin_explicit_rdn(topology_st): + """Test explicit rdn with create. + + :id: a2c14e50-8086-4edb-9088-3f4a8e875c3a + + :setup: standalone instance + + :steps: 1. Create with explicit rdn + + :expectedresults: 1. Create success + """ + gp = Group(topology_st.standalone) + gp.create(rdn='cn=test_sin_explicit_rdn', + basedn=f'ou=groups,{DEFAULT_SUFFIX}', + properties={ + 'cn': ['test_sin_explicit_rdn'], + }) + assert gp.dn.lower() == f'cn=test_sin_explicit_rdn,ou=groups,{DEFAULT_SUFFIX}'.lower() + gp.delete() + +def test_sin_derive_single_dn(topology_st): + """Derive the dn from a single cn + + :id: d7597016-214c-4fbd-8b48-71eb16ea9ede + + :setup: standalone instance + + :steps: 1. Create with a single cn (no dn, no rdn) + + :expectedresults: 1. Create success + """ + gp = Group(topology_st.standalone) + gp.create(basedn=f'ou=groups,{DEFAULT_SUFFIX}', + properties={ + 'cn': ['test_sin_explicit_dn'], + }) + assert gp.dn.lower() == f'cn=test_sin_explicit_dn,ou=groups,{DEFAULT_SUFFIX}'.lower() + gp.delete() + +def test_sin_derive_mult_dn(topology_st): + """Derive the dn from multiple cn + + :id: 0a1a7132-a08f-4b56-ae52-30c8ca59cfaf + + :setup: standalone instance + + :steps: 1. Create with multiple cn + + :expectedresults: 1. Create success + """ + gp = Group(topology_st.standalone) + gp.create(basedn=f'ou=groups,{DEFAULT_SUFFIX}', + properties={ + 'cn': ['test_sin_derive_mult_dn', 'other_test_sin_derive_mult_dn'], + }) + assert gp.dn.lower() == f'cn=test_sin_derive_mult_dn,ou=groups,{DEFAULT_SUFFIX}'.lower() + gp.delete() + +def test_sin_invalid_no_basedn(topology_st): + """Test that with insufficent data, create fails. + + :id: a710b81c-cb74-4632-97b3-bdbcccd40954 + + :setup: standalone instance + + :steps: 1. Create with no basedn (no rdn derivation will work) + + :expectedresults: 1. Create fails + """ + gp = Group(topology_st.standalone) + # No basedn, so we can't derive the full dn from this. + with pytest.raises(ldap.UNWILLING_TO_PERFORM): + gp.create(properties={ + 'cn': ['test_sin_invalid_no_basedn'], + }) + +def test_sin_invalid_no_rdn(topology_st): + """Test that with no cn, rdn derivation fails. + + :id: c3bb28f8-db59-4d8a-8920-169879ef702b + + :setup: standalone instance + + :steps: 1. Create with no cn + + :expectedresults: 1. Create fails + """ + gp = Group(topology_st.standalone) + with pytest.raises(ldap.UNWILLING_TO_PERFORM): + # Note lack of rdn derivable type (cn) AND no rdn + gp.create(basedn=f'ou=groups,{DEFAULT_SUFFIX}', + properties={ + 'member': ['test_sin_explicit_dn'], + }) + +def test_sin_non_present_rdn(topology_st): + """Test that with an rdn not present in attributes, create succeeds in some cases. + + :id: a5d9cb24-8907-4622-ac85-90407a66e00a + + :setup: standalone instance + + :steps: 1. Create with an rdn not in properties + + :expectedresults: 1. Create success + """ + # Test that creating something with an rdn not present in the properties works + # NOTE: I think that this is 389-ds making this work, NOT lib389. + gp1 = Group(topology_st.standalone) + gp1.create(rdn='cn=test_sin_non_present_rdn', + basedn=f'ou=groups,{DEFAULT_SUFFIX}', + properties={ + 'cn': ['other_test_sin_non_present_rdn'], + }) + assert gp1.dn.lower() == f'cn=test_sin_non_present_rdn,ou=groups,{DEFAULT_SUFFIX}'.lower() + gp1.delete() + + # Now, test where there is no cn. lib389 is blocking this today, but + # 50259 will change this. + gp2 = Group(topology_st.standalone) + gp2.create(rdn='cn=test_sin_non_present_rdn', + basedn=f'ou=groups,{DEFAULT_SUFFIX}', + properties={}) + assert gp2.dn.lower() == f'cn=test_sin_non_present_rdn,ou=groups,{DEFAULT_SUFFIX}'.lower() + gp2.delete() diff --git a/dirsrvtests/tests/suites/lib389/idm/__init__.py b/dirsrvtests/tests/suites/lib389/idm/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/dirsrvtests/tests/suites/lib389/idm/user_compare_i2_test.py b/dirsrvtests/tests/suites/lib389/idm/user_compare_i2_test.py new file mode 100644 index 0000000..c7540e4 --- /dev/null +++ b/dirsrvtests/tests/suites/lib389/idm/user_compare_i2_test.py @@ -0,0 +1,49 @@ +import os +import pytest +from lib389._constants import DEFAULT_SUFFIX +from lib389.idm.user import UserAccounts, UserAccount +from lib389.topologies import topology_i2 + +pytestmark = pytest.mark.tier1 + +def test_user_compare_i2(topology_i2): + """ + Compare test between users of two different Directory Server intances. + + :id: f0ffaf59-e2c2-41ec-9f26-e9b1ef287463 + + :setup: two isolated directory servers + + :steps: 1. Add an identical user to each server + 2. Compare if the users are "the same" + + :expectedresults: 1. Users are added + 2. The users are reported as the same + """ + st1_users = UserAccounts(topology_i2.ins.get('standalone1'), DEFAULT_SUFFIX) + st2_users = UserAccounts(topology_i2.ins.get('standalone2'), DEFAULT_SUFFIX) + + # Create user + user_properties = { + 'uid': 'testuser', + 'cn': 'testuser', + 'sn': 'user', + 'uidNumber': '1000', + 'gidNumber': '2000', + 'homeDirectory': '/home/testuser' + } + + st1_users.create(properties=user_properties) + st1_testuser = st1_users.get('testuser') + + st2_users.create(properties=user_properties) + st2_testuser = st2_users.get('testuser') + + assert UserAccount.compare(st1_testuser, st2_testuser) + + +if __name__ == '__main__': + # Run isolated + # -s for DEBUG mode + CURRENT_FILE = os.path.realpath(__file__) + pytest.main("-s %s" % CURRENT_FILE) diff --git a/dirsrvtests/tests/suites/lib389/idm/user_compare_m2Repl_test.py b/dirsrvtests/tests/suites/lib389/idm/user_compare_m2Repl_test.py new file mode 100644 index 0000000..ab4fe3d --- /dev/null +++ b/dirsrvtests/tests/suites/lib389/idm/user_compare_m2Repl_test.py @@ -0,0 +1,57 @@ +import os +import pytest +from lib389._constants import DEFAULT_SUFFIX +from lib389.replica import ReplicationManager +from lib389.idm.user import UserAccounts, UserAccount +from lib389.topologies import topology_m2 + +pytestmark = pytest.mark.tier1 + +def test_user_compare_m2Repl(topology_m2): + """ + User compare test between users of master to master replicaton topology. + + :id: 7c243bea-4075-4304-864d-5b789d364871 + + :setup: 2 master MMR + + :steps: 1. Add a user to m1 + 2. Wait for replication + 3. Compare if the user is the same + + :expectedresults: 1. User is added + 2. Replication success + 3. The user is the same + """ + rm = ReplicationManager(DEFAULT_SUFFIX) + m1 = topology_m2.ms.get('master1') + m2 = topology_m2.ms.get('master2') + + m1_users = UserAccounts(m1, DEFAULT_SUFFIX) + m2_users = UserAccounts(m2, DEFAULT_SUFFIX) + + # Create 1st user + user1_properties = { + 'uid': 'testuser', + 'cn': 'testuser', + 'sn': 'user', + 'uidNumber': '1000', + 'gidNumber': '2000', + 'homeDirectory': '/home/testuser' + } + + m1_users.create(properties=user1_properties) + m1_testuser = m1_users.get('testuser') + + rm.wait_for_replication(m1, m2) + + m2_testuser = m2_users.get('testuser') + + assert UserAccount.compare(m1_testuser, m2_testuser) + + +if __name__ == '__main__': + # Run isolated + # -s for DEBUG mode + CURRENT_FILE = os.path.realpath(__file__) + pytest.main("-s %s" % CURRENT_FILE) diff --git a/dirsrvtests/tests/suites/lib389/idm/user_compare_st_test.py b/dirsrvtests/tests/suites/lib389/idm/user_compare_st_test.py new file mode 100644 index 0000000..4703bb4 --- /dev/null +++ b/dirsrvtests/tests/suites/lib389/idm/user_compare_st_test.py @@ -0,0 +1,78 @@ +import os +import pytest +from lib389._constants import DEFAULT_SUFFIX +from lib389.idm.group import Groups +from lib389.idm.user import UserAccounts, UserAccount +from lib389.topologies import topology_st as topology + +pytestmark = pytest.mark.tier1 + +def test_user_compare(topology): + """ + Testing compare function + + :id: 26f2dea9-be1e-48ca-bcea-79592823390c + + :setup: Standalone instance + + :steps: + 1. Testing comparison of two different users. + 2. Testing comparison of 'str' object with itself. + 3. Testing comparison of user with similar user (different object id). + 4. Testing comparison of user with group. + + :expectedresults: + 1. Should fail to compare + 2. Should raise value error + 3. Should be the same despite uuid difference + 4. Should fail to compare + """ + users = UserAccounts(topology.standalone, DEFAULT_SUFFIX) + groups = Groups(topology.standalone, DEFAULT_SUFFIX) + # Create 1st user + user1_properties = { + 'uid': 'testuser1', + 'cn': 'testuser1', + 'sn': 'user', + 'uidNumber': '1000', + 'gidNumber': '2000', + 'homeDirectory': '/home/testuser1' + } + + users.create(properties=user1_properties) + testuser1 = users.get('testuser1') + # Create 2nd user + user2_properties = { + 'uid': 'testuser2', + 'cn': 'testuser2', + 'sn': 'user', + 'uidNumber': '1001', + 'gidNumber': '2002', + 'homeDirectory': '/home/testuser2' + } + + users.create(properties=user2_properties) + testuser2 = users.get('testuser2') + # create group + group_properties = { + 'cn' : 'group1', + 'description' : 'testgroup' + } + + testuser1_copy = users.get("testuser1") + group = groups.create(properties=group_properties) + + assert UserAccount.compare(testuser1, testuser2) is False + + with pytest.raises(ValueError): + UserAccount.compare("test_str_object","test_str_object") + + assert UserAccount.compare(testuser1, testuser1_copy) + assert UserAccount.compare(testuser1, group) is False + + +if __name__ == '__main__': + # Run isolated + # -s for DEBUG mode + CURRENT_FILE = os.path.realpath(__file__) + pytest.main("-s %s" % CURRENT_FILE) diff --git a/dirsrvtests/tests/suites/logging/__init__.py b/dirsrvtests/tests/suites/logging/__init__.py new file mode 100644 index 0000000..7f812e3 --- /dev/null +++ b/dirsrvtests/tests/suites/logging/__init__.py @@ -0,0 +1,3 @@ +""" + :Requirement: 389-ds-base: Directory Server Logging Configurations +""" diff --git a/dirsrvtests/tests/suites/logging/logging_config_test.py b/dirsrvtests/tests/suites/logging/logging_config_test.py new file mode 100644 index 0000000..ac154b7 --- /dev/null +++ b/dirsrvtests/tests/suites/logging/logging_config_test.py @@ -0,0 +1,87 @@ +import logging +import pytest +import os +import ldap +from lib389._constants import * +from lib389.topologies import topology_st as topo + +DEBUGGING = os.getenv("DEBUGGING", default=False) +if DEBUGGING: + logging.getLogger(__name__).setLevel(logging.DEBUG) +else: + logging.getLogger(__name__).setLevel(logging.INFO) +log = logging.getLogger(__name__) + +big_value = "1111111111111111111111111111111111111111111" + + +@pytest.mark.parametrize("attr, invalid_vals, valid_vals", + [ + ("logexpirationtime", ["-2", "0"], ["1", "-1"]), + ("maxlogsize", ["-2", "0"], ["100", "-1"]), + ("logmaxdiskspace", ["-2", "0"], ["100", "-1"]), + ("logminfreediskspace", ["-2", "0"], ["100", "-1"]), + ("mode", ["888", "778", "77", "7777"], ["777", "000", "600"]), + ("maxlogsperdir", ["-1", "0"], ["1", "20"]), + ("logrotationsynchour", ["-1", "24"], ["0", "23"]), + ("logrotationsyncmin", ["-1", "60"], ["0", "59"]), + ("logrotationtime", ["-2", "0"], ["100", "-1"]) + ]) +def test_logging_digit_config(topo, attr, invalid_vals, valid_vals): + """Validate logging config settings + + :id: a0ef30e5-538b-46fa-9762-01a4435a15e9 + :parametrized: yes + :setup: Standalone Instance + :steps: + 1. Test log expiration time + 2. Test log max size + 3. Test log max disk space + 4. Test log min disk space + 5. Test log mode + 6. Test log max number of logs + 7. Test log rotation hour + 8. Test log rotation minute + 9. Test log rotation time + :expectedresults: + 1. Success + 2. Success + 3. Success + 4. Success + 5. Success + 6. Success + 7. Success + 8. Success + 9. Success + """ + + accesslog_attr = "nsslapd-accesslog-{}".format(attr) + auditlog_attr = "nsslapd-auditlog-{}".format(attr) + auditfaillog_attr = "nsslapd-auditfaillog-{}".format(attr) + errorlog_attr = "nsslapd-errorlog-{}".format(attr) + + # Test each log + for attr in [accesslog_attr, auditlog_attr, auditfaillog_attr, errorlog_attr]: + # Invalid values + for invalid_val in invalid_vals: + with pytest.raises(ldap.LDAPError): + topo.standalone.config.set(attr, invalid_val) + + # Invalid high value + with pytest.raises(ldap.LDAPError): + topo.standalone.config.set(attr, big_value) + + # Non digits + with pytest.raises(ldap.LDAPError): + topo.standalone.config.set(attr, "abc") + + # Valid values + for valid_val in valid_vals: + topo.standalone.config.set(attr, valid_val) + + +if __name__ == '__main__': + # Run isolated + # -s for DEBUG mode + CURRENT_FILE = os.path.realpath(__file__) + pytest.main(["-s", CURRENT_FILE]) diff --git a/dirsrvtests/tests/suites/mapping_tree/__init__.py b/dirsrvtests/tests/suites/mapping_tree/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/dirsrvtests/tests/suites/mapping_tree/be_del_and_default_naming_attr_test.py b/dirsrvtests/tests/suites/mapping_tree/be_del_and_default_naming_attr_test.py new file mode 100644 index 0000000..34a2de2 --- /dev/null +++ b/dirsrvtests/tests/suites/mapping_tree/be_del_and_default_naming_attr_test.py @@ -0,0 +1,90 @@ +import logging +import pytest +import os +from lib389._constants import DEFAULT_SUFFIX +from lib389.topologies import topology_m1 as topo +from lib389.backend import Backends +from lib389.encrypted_attributes import EncryptedAttrs + +DEBUGGING = os.getenv("DEBUGGING", default=False) +if DEBUGGING: + logging.getLogger(__name__).setLevel(logging.DEBUG) +else: + logging.getLogger(__name__).setLevel(logging.INFO) +log = logging.getLogger(__name__) + +SECOND_SUFFIX = 'o=namingcontext' +THIRD_SUFFIX = 'o=namingcontext2' + +def test_be_delete(topo): + """Test that we can delete a backend that contains replication + configuration and encrypted attributes. The default naming + context should also be updated to reflect the next available suffix + + :id: 5208f897-7c95-4925-bad0-9ceb95fee678 + :setup: Master Instance + :steps: + 1. Create second backend/suffix + 2. Add an encrypted attribute to the default suffix + 2. Delete default suffix + 3. Check the nsslapd-defaultnamingcontext is updated + 4. Delete the last backend + 5. Check the namingcontext has not changed + 6. Add new backend + 7. Set default naming context + 8. Verify the naming context is correct + :expectedresults: + 1. Success + 2. Success + 3. Success + 4. Success + 5. Success + 6. Success + 7. Success + 8. Success + """ + + inst = topo.ms["master1"] + + # Create second suffix + backends = Backends(inst) + default_backend = backends.get(DEFAULT_SUFFIX) + new_backend = backends.create(properties={'nsslapd-suffix': SECOND_SUFFIX, + 'name': 'namingRoot'}) + + # Add encrypted attribute entry under default suffix + encrypt_attrs = EncryptedAttrs(inst, basedn='cn=encrypted attributes,{}'.format(default_backend.dn)) + encrypt_attrs.create(properties={'cn': 'employeeNumber', 'nsEncryptionAlgorithm': 'AES'}) + + # Delete default suffix + default_backend.delete() + + # Check that the default naming context is set to the new/second suffix + default_naming_ctx = inst.config.get_attr_val_utf8('nsslapd-defaultnamingcontext') + assert default_naming_ctx == SECOND_SUFFIX + + # delete new backend, but the naming context should not change + new_backend.delete() + + # Check that the default naming context is still set to the new/second suffix + default_naming_ctx = inst.config.get_attr_val_utf8('nsslapd-defaultnamingcontext') + assert default_naming_ctx == SECOND_SUFFIX + + # Add new backend + new_backend = backends.create(properties={'nsslapd-suffix': THIRD_SUFFIX, + 'name': 'namingRoot2'}) + + # manaully set naming context + inst.config.set('nsslapd-defaultnamingcontext', THIRD_SUFFIX) + + # Verify naming context is correct + default_naming_ctx = inst.config.get_attr_val_utf8('nsslapd-defaultnamingcontext') + assert default_naming_ctx == THIRD_SUFFIX + + +if __name__ == '__main__': + # Run isolated + # -s for DEBUG mode + CURRENT_FILE = os.path.realpath(__file__) + pytest.main(["-s", CURRENT_FILE]) + diff --git a/dirsrvtests/tests/suites/mapping_tree/referral_during_tot_init_test.py b/dirsrvtests/tests/suites/mapping_tree/referral_during_tot_init_test.py new file mode 100644 index 0000000..730969a --- /dev/null +++ b/dirsrvtests/tests/suites/mapping_tree/referral_during_tot_init_test.py @@ -0,0 +1,69 @@ +# --- BEGIN COPYRIGHT BLOCK --- +# Copyright (C) 2017 Red Hat, Inc. +# All rights reserved. +# +# License: GPL (version 3 or any later version). +# See LICENSE for details. +# --- END COPYRIGHT BLOCK --- +# +import ldap +import pytest +from lib389.topologies import topology_m2 +from lib389._constants import (DEFAULT_SUFFIX, HOST_MASTER_2, PORT_MASTER_2, TASK_WAIT) +from lib389.agreement import Agreements + +from lib389.idm.user import (TEST_USER_PROPERTIES, UserAccounts) + +from lib389.dbgen import dbgen +from lib389.utils import ds_is_older + +pytestmark = pytest.mark.tier1 + +@pytest.mark.skipif(ds_is_older("1.4.0.0"), reason="Not implemented") +def test_referral_during_tot(topology_m2): + + master1 = topology_m2.ms["master1"] + master2 = topology_m2.ms["master2"] + + users = UserAccounts(master2, DEFAULT_SUFFIX) + + u = users.create(properties=TEST_USER_PROPERTIES) + u.set('userPassword', 'password') + + binddn = u.dn + bindpw = 'password' + + # Create a bunch of entries on master1 + ldif_dir = master1.get_ldif_dir() + import_ldif = ldif_dir + '/ref_during_tot_import.ldif' + dbgen(master1, 10000, import_ldif, DEFAULT_SUFFIX) + + master1.stop() + master1.ldif2db(bename=None, excludeSuffixes=None, encrypt=False, suffixes=[DEFAULT_SUFFIX], import_file=import_ldif) + master1.start() + # Recreate the user on m1 also, so that if the init finishes first ew don't lose the user on m2 + users = UserAccounts(master1, DEFAULT_SUFFIX) + u = users.create(properties=TEST_USER_PROPERTIES) + u.set('userPassword', 'password') + # Now export them to master2 + agmts = Agreements(master1) + agmts.list()[0].begin_reinit() + + # While that's happening try to bind as a user to master 2 + # This should trigger the referral code. + referred = False + for i in range(0, 100): + conn = ldap.initialize(master2.toLDAPURL()) + conn.set_option(ldap.OPT_REFERRALS, False) + try: + conn.simple_bind_s(binddn, bindpw) + conn.unbind_s() + except ldap.REFERRAL: + referred = True + break + # Means we never go a referral, should not happen! + assert referred + + # Done. + + diff --git a/dirsrvtests/tests/suites/memberof_plugin/__init__.py b/dirsrvtests/tests/suites/memberof_plugin/__init__.py new file mode 100644 index 0000000..d5b1467 --- /dev/null +++ b/dirsrvtests/tests/suites/memberof_plugin/__init__.py @@ -0,0 +1,3 @@ +""" + :Requirement: 389-ds-base: Memberof Plugin +""" diff --git a/dirsrvtests/tests/suites/memberof_plugin/regression_test.py b/dirsrvtests/tests/suites/memberof_plugin/regression_test.py new file mode 100644 index 0000000..ca74791 --- /dev/null +++ b/dirsrvtests/tests/suites/memberof_plugin/regression_test.py @@ -0,0 +1,863 @@ +# --- BEGIN COPYRIGHT BLOCK --- +# Copyright (C) 2017 Red Hat, Inc. +# All rights reserved. +# +# License: GPL (version 3 or any later version). +# See LICENSE for details. +# --- END COPYRIGHT BLOCK --- +# +import logging +import pytest +import os +import time +import ldap +from random import sample +from lib389.utils import ds_is_older, ensure_list_bytes, ensure_bytes, ensure_str +from lib389.topologies import topology_m1h1c1 as topo, topology_st, topology_m2 as topo_m2 +from lib389._constants import * +from lib389.plugins import MemberOfPlugin +from lib389 import Entry +from lib389.idm.user import UserAccount, UserAccounts, TEST_USER_PROPERTIES +from lib389.idm.group import Groups, Group +from lib389.replica import ReplicationManager +from lib389.tasks import * +from lib389.idm.nscontainer import nsContainers + + +# Skip on older versions +pytestmark = [pytest.mark.tier1, + pytest.mark.skipif(ds_is_older('1.3.7'), reason="Not implemented")] + +USER_CN = 'user_' +GROUP_CN = 'group1' +DEBUGGING = os.getenv('DEBUGGING', False) +SUBTREE_1 = 'cn=sub1,%s' % SUFFIX +SUBTREE_2 = 'cn=sub2,%s' % SUFFIX + + +if DEBUGGING: + logging.getLogger(__name__).setLevel(logging.DEBUG) +else: + logging.getLogger(__name__).setLevel(logging.INFO) +log = logging.getLogger(__name__) + + +def add_users(topo_m2, users_num, suffix): + """Add users to the default suffix + Return the list of added user DNs. + """ + users_list = [] + users = UserAccounts(topo_m2.ms["master1"], suffix, rdn=None) + log.info('Adding %d users' % users_num) + for num in sample(list(range(1000)), users_num): + num_ran = int(round(num)) + USER_NAME = 'test%05d' % num_ran + user = users.create(properties={ + 'uid': USER_NAME, + 'sn': USER_NAME, + 'cn': USER_NAME, + 'uidNumber': '%s' % num_ran, + 'gidNumber': '%s' % num_ran, + 'homeDirectory': '/home/%s' % USER_NAME, + 'mail': '%s@redhat.com' % USER_NAME, + 'userpassword': 'pass%s' % num_ran, + }) + users_list.append(user) + return users_list + + +def config_memberof(server): + # Configure fractional to prevent total init to send memberof + memberof = MemberOfPlugin(server) + memberof.enable() + memberof.set_autoaddoc('nsMemberOf') + server.restart() + ents = server.agreement.list(suffix=DEFAULT_SUFFIX) + for ent in ents: + log.info('update %s to add nsDS5ReplicatedAttributeListTotal' % ent.dn) + server.agreement.setProperties(agmnt_dn=ents[0].dn, + properties={RA_FRAC_EXCLUDE: '(objectclass=*) $ EXCLUDE memberOf', + RA_FRAC_EXCLUDE_TOTAL_UPDATE: '(objectclass=*) $ EXCLUDE '}) + + +def send_updates_now(server): + ents = server.agreement.list(suffix=DEFAULT_SUFFIX) + for ent in ents: + server.agreement.pause(ent.dn) + server.agreement.resume(ent.dn) + + +def _find_memberof(server, member_dn, group_dn): + #To get the specific server's (M1, C1 and H1) user and group + user = UserAccount(server, member_dn) + assert user.exists() + group = Group(server, group_dn) + assert group.exists() + + #test that the user entry should have memberof attribute with sepecified group dn value + assert group._dn in user.get_attr_vals_utf8('memberOf') + + +@pytest.mark.bz1352121 +def test_memberof_with_repl(topo): + """Test that we allowed to enable MemberOf plugin in dedicated consumer + + :id: ef71cd7c-e792-41bf-a3c0-b3b38391cbe5 + :setup: 1 Master - 1 Hub - 1 Consumer + :steps: + 1. Configure replication to EXCLUDE memberof + 2. Enable memberof plugin + 3. Create users/groups + 4. Make user_0 member of group_0 + 5. Checks that user_0 is memberof group_0 on M,H,C + 6. Make group_0 member of group_1 (nest group) + 7. Checks that user_0 is memberof group_0 and group_1 on M,H,C + 8. Check group_0 is memberof group_1 on M,H,C + 9. Remove group_0 from group_1 + 10. Check group_0 and user_0 are NOT memberof group_1 on M,H,C + 11. Remove user_0 from group_0 + 12. Check user_0 is not memberof group_0 and group_1 on M,H,C + 13. Disable memberof on C + 14. make user_0 member of group_1 + 15. Checks that user_0 is memberof group_0 on M,H but not on C + 16. Enable memberof on C + 17. Checks that user_0 is memberof group_0 on M,H but not on C + 18. Run memberof fixup task + 19. Checks that user_0 is memberof group_0 on M,H,C + :expectedresults: + 1. Configuration should be successful + 2. Plugin should be enabled + 3. Users and groups should be created + 4. user_0 should be member of group_0 + 5. user_0 should be memberof group_0 on M,H,C + 6. group_0 should be member of group_1 + 7. user_0 should be memberof group_0 and group_1 on M,H,C + 8. group_0 should be memberof group_1 on M,H,C + 9. group_0 from group_1 removal should be successful + 10. group_0 and user_0 should not be memberof group_1 on M,H,C + 11. user_0 from group_0 remove should be successful + 12. user_0 should not be memberof group_0 and group_1 on M,H,C + 13. memberof should be disabled on C + 14. user_0 should be member of group_1 + 15. user_0 should be memberof group_0 on M,H and should not on C + 16. Enable memberof on C should be successful + 17. user_0 should be memberof group_0 on M,H should not on C + 18. memberof fixup task should be successful + 19. user_0 should be memberof group_0 on M,H,C + """ + + M1 = topo.ms["master1"] + H1 = topo.hs["hub1"] + C1 = topo.cs["consumer1"] + + # Step 1 & 2 + M1.config.enable_log('audit') + config_memberof(M1) + M1.restart() + + H1.config.enable_log('audit') + config_memberof(H1) + H1.restart() + + C1.config.enable_log('audit') + config_memberof(C1) + C1.restart() + + #Declare lists of users and groups + test_users = [] + test_groups = [] + + # Step 3 + #In for loop create users and add them in the user list + #it creates user_0 to user_9 (range is fun) + for i in range(10): + CN = '%s%d' % (USER_CN, i) + users = UserAccounts(M1, SUFFIX) + user_props = TEST_USER_PROPERTIES.copy() + user_props.update({'uid': CN, 'cn': CN, 'sn': '_%s' % CN}) + testuser = users.create(properties=user_props) + time.sleep(2) + test_users.append(testuser) + + #In for loop create groups and add them to the group list + #it creates group_0 to group_2 (range is fun) + for i in range(3): + CN = '%s%d' % (GROUP_CN, i) + groups = Groups(M1, SUFFIX) + testgroup = groups.create(properties={'cn': CN}) + time.sleep(2) + test_groups.append(testgroup) + + # Step 4 + #Now start testing by adding differnt user to differn group + if not ds_is_older('1.3.7'): + test_groups[0].remove('objectClass', 'nsMemberOf') + + member_dn = test_users[0].dn + grp0_dn = test_groups[0].dn + grp1_dn = test_groups[1].dn + + test_groups[0].add_member(member_dn) + time.sleep(5) + + # Step 5 + for i in [M1, H1, C1]: + _find_memberof(i, member_dn, grp0_dn) + + # Step 6 + test_groups[1].add_member(test_groups[0].dn) + time.sleep(5) + + # Step 7 + for i in [grp0_dn, grp1_dn]: + for inst in [M1, H1, C1]: + _find_memberof(inst, member_dn, i) + + # Step 8 + for i in [M1, H1, C1]: + _find_memberof(i, grp0_dn, grp1_dn) + + # Step 9 + test_groups[1].remove_member(test_groups[0].dn) + time.sleep(5) + + # Step 10 + # For negative testcase, we are using assertionerror + for inst in [M1, H1, C1]: + for i in [grp0_dn, member_dn]: + with pytest.raises(AssertionError): + _find_memberof(inst, i, grp1_dn) + + # Step 11 + test_groups[0].remove_member(member_dn) + time.sleep(5) + + # Step 12 + for inst in [M1, H1, C1]: + for grp in [grp0_dn, grp1_dn]: + with pytest.raises(AssertionError): + _find_memberof(inst, member_dn, grp) + + # Step 13 + C1.plugins.disable(name=PLUGIN_MEMBER_OF) + C1.restart() + + # Step 14 + test_groups[0].add_member(member_dn) + time.sleep(5) + + # Step 15 + for i in [M1, H1]: + _find_memberof(i, member_dn, grp0_dn) + with pytest.raises(AssertionError): + _find_memberof(C1, member_dn, grp0_dn) + + # Step 16 + memberof = MemberOfPlugin(C1) + memberof.enable() + C1.restart() + + # Step 17 + for i in [M1, H1]: + _find_memberof(i, member_dn, grp0_dn) + with pytest.raises(AssertionError): + _find_memberof(C1, member_dn, grp0_dn) + + # Step 18 + memberof.fixup(SUFFIX) + time.sleep(5) + + # Step 19 + for i in [M1, H1, C1]: + _find_memberof(i, member_dn, grp0_dn) + + +@pytest.mark.skipif(ds_is_older('1.3.7'), reason="Not implemented") +def test_scheme_violation_errors_logged(topo_m2): + """Check that ERR messages are verbose enough, if a member entry + doesn't have the appropriate objectclass to support 'memberof' attribute + + :id: e2af0aaa-447e-4e85-a5ce-57ae66260d0b + :setup: Standalone instance + :steps: + 1. Enable memberofPlugin and set autoaddoc to nsMemberOf + 2. Restart the instance + 3. Add a user without nsMemberOf attribute + 4. Create a group and add the user to the group + 5. Check that user has memberOf attribute + 6. Check the error log for ".*oc_check_allowed_sv.*USER_DN.*memberOf.*not allowed.*" + and ".*schema violation caught - repair operation.*" patterns + :expectedresults: + 1. Should be successful + 2. Should be successful + 3. Should be successful + 4. Should be successful + 5. User should have the attribute + 6. Errors should be logged + """ + + inst = topo_m2.ms["master1"] + memberof = MemberOfPlugin(inst) + memberof.enable() + memberof.set_autoaddoc('nsMemberOf') + inst.restart() + + users = UserAccounts(inst, SUFFIX) + user_props = TEST_USER_PROPERTIES.copy() + user_props.update({'uid': USER_CN, 'cn': USER_CN, 'sn': USER_CN}) + testuser = users.create(properties=user_props) + testuser.remove('objectclass', 'nsMemberOf') + + groups = Groups(inst, SUFFIX) + testgroup = groups.create(properties={'cn': GROUP_CN}) + + testgroup.add('member', testuser.dn) + + user_memberof_attr = testuser.get_attr_val_utf8('memberof') + assert user_memberof_attr + log.info('memberOf attr value - {}'.format(user_memberof_attr)) + + pattern = ".*oc_check_allowed_sv.*{}.*memberOf.*not allowed.*".format(testuser.dn) + log.info("pattern = %s" % pattern) + assert inst.ds_error_log.match(pattern) + + pattern = ".*schema violation caught - repair operation.*" + assert inst.ds_error_log.match(pattern) + + +@pytest.mark.bz1192099 +def test_memberof_with_changelog_reset(topo_m2): + """Test that replication does not break, after DS stop-start, due to changelog reset + + :id: 60c11636-55a1-4704-9e09-2c6bcc828de4 + :setup: 2 Masters + :steps: + 1. On M1 and M2, Enable memberof + 2. On M1, add 999 entries allowing memberof + 3. On M1, add a group with these 999 entries as members + 4. Stop M1 in between, + when add the group memerof is called and before it is finished the + add, so step 4 should be executed after memberof has started and + before the add has finished + 5. Check that replication is working fine + :expectedresults: + 1. memberof should be enabled + 2. Entries should be added + 3. Add operation should start + 4. M1 should be stopped + 5. Replication should be working fine + """ + m1 = topo_m2.ms["master1"] + m2 = topo_m2.ms["master2"] + + log.info("Configure memberof on M1 and M2") + memberof = MemberOfPlugin(m1) + memberof.enable() + memberof.set_autoaddoc('nsMemberOf') + m1.restart() + + memberof = MemberOfPlugin(m2) + memberof.enable() + memberof.set_autoaddoc('nsMemberOf') + m2.restart() + + log.info("On M1, add 999 test entries allowing memberof") + users_list = add_users(topo_m2, 999, DEFAULT_SUFFIX) + + log.info("On M1, add a group with these 999 entries as members") + dic_of_attributes = {'cn': ensure_bytes('testgroup'), + 'objectclass': ensure_list_bytes(['top', 'groupOfNames'])} + + for user in users_list: + dic_of_attributes.setdefault('member', []) + dic_of_attributes['member'].append(user.dn) + + log.info('Adding the test group using async function') + groupdn = 'cn=testgroup,%s' % DEFAULT_SUFFIX + m1.add(Entry((groupdn, dic_of_attributes))) + + #shutdown the server in-between adding the group + m1.stop() + + #start the server + m1.start() + + log.info("Check the log messages for error") + error_msg = "ERR - NSMMReplicationPlugin - ruv_compare_ruv" + assert not m1.ds_error_log.match(error_msg) + + log.info("Check that the replication is working fine both ways, M1 <-> M2") + repl = ReplicationManager(DEFAULT_SUFFIX) + repl.test_replication_topology(topo_m2) + + +def add_container(inst, dn, name, sleep=False): + """Creates container entry""" + conts = nsContainers(inst, dn) + cont = conts.create(properties={'cn': name}) + if sleep: + time.sleep(1) + return cont + + +def add_member(server, cn, subtree): + dn = subtree + users = UserAccounts(server, dn, rdn=None) + users.create(properties={'uid': 'test_%s' % cn, + 'cn': "%s" % cn, + 'sn': 'SN', + 'description': 'member', + 'uidNumber': '1000', + 'gidNumber': '2000', + 'homeDirectory': '/home/testuser' + }) + + +def add_group(server, cn, subtree): + group = Groups(server, subtree, rdn=None) + group.create(properties={'cn': "%s" % cn, + 'member': ['uid=test_m1,%s' % SUBTREE_1, 'uid=test_m2,%s' % SUBTREE_1], + 'description': 'group'}) + + +def rename_entry(server, cn, from_subtree, to_subtree): + dn = '%s,%s' % (cn, from_subtree) + nrdn = '%s-new' % cn + log.fatal('Renaming user (%s): new %s' % (dn, nrdn)) + server.rename_s(dn, nrdn, newsuperior=to_subtree, delold=0) + + +def _find_memberof_ext(server, user_dn=None, group_dn=None, find_result=True): + assert (server) + assert (user_dn) + assert (group_dn) + ent = server.getEntry(user_dn, ldap.SCOPE_BASE, "(objectclass=*)", ['memberof']) + found = False + if ent.hasAttr('memberof'): + + for val in ent.getValues('memberof'): + server.log.info("!!!!!!! %s: memberof->%s" % (user_dn, val)) + if ensure_str(val) == group_dn: + found = True + break + + if find_result: + assert found + else: + assert (not found) + + +@pytest.mark.ds49161 +def test_memberof_group(topology_st): + """Test memberof does not fail if group is moved into scope + + :id: d1d276ae-6375-4ad8-9437-6a0afcbee7d2 + + :setup: Single instance + + :steps: + 1. Enable memberof plugin and set memberofentryscope + 2. Restart the server + 3. Add test sub-suffixes + 4. Add test users + 5. Add test groups + 6. Check for memberof attribute added to the test users + 7. Rename the group entry + 8. Check the new name is reflected in memberof attribute of user + + :expectedresults: + 1. memberof plugin should be enabled and memberofentryscope should be set + 2. Server should be restarted + 3. Sub-suffixes should be added + 4. Test users should be added + 5. Test groups should be added + 6. memberof attribute should be present in the test users + 7. Group entry should be renamed + 8. New group name should be present in memberof attribute of user + """ + + inst = topology_st.standalone + log.info('Enable memberof plugin and set the scope as cn=sub1,dc=example,dc=com') + memberof = MemberOfPlugin(inst) + memberof.enable() + memberof.replace('memberOfEntryScope', SUBTREE_1) + inst.restart() + + add_container(inst, SUFFIX, 'sub1') + add_container(inst, SUFFIX, 'sub2') + add_member(inst, 'm1', SUBTREE_1) + add_member(inst, 'm2', SUBTREE_1) + add_group(inst, 'g1', SUBTREE_1) + add_group(inst, 'g2', SUBTREE_2) + + # _check_memberof + dn1 = '%s,%s' % ('uid=test_m1', SUBTREE_1) + dn2 = '%s,%s' % ('uid=test_m2', SUBTREE_1) + g1 = '%s,%s' % ('cn=g1', SUBTREE_1) + g2 = '%s,%s' % ('cn=g2', SUBTREE_2) + _find_memberof_ext(inst, dn1, g1, True) + _find_memberof_ext(inst, dn2, g1, True) + _find_memberof_ext(inst, dn1, g2, False) + _find_memberof_ext(inst, dn2, g2, False) + + rename_entry(inst, 'cn=g2', SUBTREE_2, SUBTREE_1) + + g2n = '%s,%s' % ('cn=g2-new', SUBTREE_1) + _find_memberof_ext(inst, dn1, g1, True) + _find_memberof_ext(inst, dn2, g1, True) + _find_memberof_ext(inst, dn1, g2n, True) + _find_memberof_ext(inst, dn2, g2n, True) + + +def _config_memberof_entrycache_on_modrdn_failure(server): + + server.plugins.enable(name=PLUGIN_MEMBER_OF) + peoplebase = 'ou=people,%s' % SUFFIX + MEMBEROF_PLUGIN_DN = ('cn=' + PLUGIN_MEMBER_OF + ',cn=plugins,cn=config') + server.modify_s(MEMBEROF_PLUGIN_DN, [(ldap.MOD_REPLACE, 'memberOfAllBackends', b'on'), + (ldap.MOD_REPLACE, 'memberOfEntryScope', peoplebase.encode()), + (ldap.MOD_REPLACE, 'memberOfAutoAddOC', b'nsMemberOf')]) + + +def _disable_auto_oc_memberof(server): + MEMBEROF_PLUGIN_DN = ('cn=' + PLUGIN_MEMBER_OF + ',cn=plugins,cn=config') + server.modify_s(MEMBEROF_PLUGIN_DN, + [(ldap.MOD_REPLACE, 'memberOfAutoAddOC', b'nsContainer')]) + + +@pytest.mark.ds49967 +def test_entrycache_on_modrdn_failure(topology_st): + """This test checks that when a modrdn fails, the destination entry is not returned by a search + This could happen in case the destination entry remains in the entry cache + + :id: a4d8ac0b-2448-406a-9dc2-5a72851e30b6 + :setup: Standalone Instance + :steps: + 1. configure memberof to only scope ou=people,SUFFIX + 2. Creates 10 users + 3. Create groups0 (in peoplebase) that contain user0 and user1 + 4. Check user0 and user1 have memberof=group0.dn + 5. Create group1 (OUT peoplebase) that contain user0 and user1 + 6. Check user0 and user1 have NOT memberof=group1.dn + 7. Move group1 IN peoplebase and check users0 and user1 HAVE memberof=group1.dn + 8. Create group2 (OUT peoplebase) that contain user2 and user3. Group2 contains a specific description value + 9. Check user2 and user3 have NOT memberof=group2.dn + 10. configure memberof so that added objectclass does not allow 'memberof' attribute + 11. Move group2 IN peoplebase and check move failed OPERATIONS_ERROR (because memberof failed) + 12. Search all groups and check that the group, having the specific description value, + has the original DN of group2.dn + :expectedresults: + 1. should succeed + 2. should succeed + 3. should succeed + 4. should succeed + 5. should succeed + 6. should succeed + 7. should succeed + 8. should succeed + 9. should succeed + 10. should succeed + 11. should fail OPERATION_ERROR because memberof plugin fails to add 'memberof' to members. + 12. should succeed + + """ + + # only scopes peoplebase + _config_memberof_entrycache_on_modrdn_failure(topology_st.standalone) + topology_st.standalone.restart(timeout=10) + + # create 10 users + peoplebase = 'ou=people,%s' % SUFFIX + for i in range(10): + cn = 'user%d' % i + dn = 'cn=%s,%s' % (cn, peoplebase) + log.fatal('Adding user (%s): ' % dn) + topology_st.standalone.add_s(Entry((dn, {'objectclass': ['top', 'person'], + 'sn': 'user_%s' % cn, + 'description': 'add on standalone'}))) + + # Check that members of group0 (in the scope) have 'memberof + group0_dn = 'cn=group_in0,%s' % peoplebase + topology_st.standalone.add_s(Entry((group0_dn, {'objectclass': ['top', 'groupofnames'], + 'member': [ + 'cn=user0,%s' % peoplebase, + 'cn=user1,%s' % peoplebase, + ], + 'description': 'mygroup'}))) + + # Check the those entries have memberof with group0 + for i in range(2): + user_dn = 'cn=user%d,%s' % (i, peoplebase) + ent = topology_st.standalone.getEntry(user_dn, ldap.SCOPE_BASE, "(objectclass=*)", ['memberof']) + assert ent.hasAttr('memberof') + found = False + for val in ent.getValues('memberof'): + topology_st.standalone.log.info("!!!!!!! %s: memberof->%s (vs %s)" % (user_dn, val, group0_dn.encode().lower())) + if val.lower() == group0_dn.encode().lower(): + found = True + break + assert found + + # Create a group1 out of the scope + group1_dn = 'cn=group_out1,%s' % SUFFIX + topology_st.standalone.add_s(Entry((group1_dn, {'objectclass': ['top', 'groupofnames'], + 'member': [ + 'cn=user0,%s' % peoplebase, + 'cn=user1,%s' % peoplebase, + ], + 'description': 'mygroup'}))) + + # Check the those entries have not memberof with group1 + for i in range(2): + user_dn = 'cn=user%d,%s' % (i, peoplebase) + ent = topology_st.standalone.getEntry(user_dn, ldap.SCOPE_BASE, "(objectclass=*)", ['memberof']) + assert ent.hasAttr('memberof') + found = False + for val in ent.getValues('memberof'): + topology_st.standalone.log.info("!!!!!!! %s: memberof->%s (vs %s)" % (user_dn, val, group1_dn.encode().lower())) + if val.lower() == group1_dn.encode().lower(): + found = True + break + assert not found + + # move group1 into the scope and check user0 and user1 are memberof group1 + topology_st.standalone.rename_s(group1_dn, 'cn=group_in1', newsuperior=peoplebase, delold=0) + new_group1_dn = 'cn=group_in1,%s' % peoplebase + for i in range(2): + user_dn = 'cn=user%d,%s' % (i, peoplebase) + ent = topology_st.standalone.getEntry(user_dn, ldap.SCOPE_BASE, "(objectclass=*)", ['memberof']) + assert ent.hasAttr('memberof') + found = False + for val in ent.getValues('memberof'): + topology_st.standalone.log.info("!!!!!!! %s: memberof->%s (vs %s)" % (user_dn, val, new_group1_dn.encode().lower())) + if val.lower() == new_group1_dn.encode().lower(): + found = True + break + assert found + + # Create a group2 out of the scope with a SPECIFIC description value + entry_description = "this is to check that the entry having this description has the appropriate DN" + group2_dn = 'cn=group_out2,%s' % SUFFIX + topology_st.standalone.add_s(Entry((group2_dn, {'objectclass': ['top', 'groupofnames'], + 'member': [ + 'cn=user2,%s' % peoplebase, + 'cn=user3,%s' % peoplebase, + ], + 'description': entry_description}))) + + # Check the those entries have not memberof with group2 + for i in (2, 3): + user_dn = 'cn=user%d,%s' % (i, peoplebase) + ent = topology_st.standalone.getEntry(user_dn, ldap.SCOPE_BASE, "(objectclass=*)", ['memberof']) + assert not ent.hasAttr('memberof') + + # memberof will not add the missing objectclass + _disable_auto_oc_memberof(topology_st.standalone) + topology_st.standalone.restart(timeout=10) + + # move group2 into the scope and check it fails + try: + topology_st.standalone.rename_s(group2_dn, 'cn=group_in2', newsuperior=peoplebase, delold=0) + topology_st.standalone.log.info("This is unexpected, modrdn should fail as the member entry have not the appropriate objectclass") + assert False + except ldap.OBJECT_CLASS_VIOLATION: + pass + + # retrieve the entry having the specific description value + # check that the entry DN is the original group2 DN + ents = topology_st.standalone.search_s(DEFAULT_SUFFIX, ldap.SCOPE_SUBTREE, '(cn=gr*)') + found = False + for ent in ents: + topology_st.standalone.log.info("retrieve: %s with desc=%s" % (ent.dn, ent.getValue('description'))) + if ent.getValue('description') == entry_description.encode(): + found = True + assert ent.dn == group2_dn + assert found + + +def _config_memberof_silent_memberof_failure(server): + _config_memberof_entrycache_on_modrdn_failure(server) + + +def test_silent_memberof_failure(topology_st): + """This test checks that if during a MODRDN, the memberof plugin fails + then MODRDN also fails + + :id: 095aee01-581c-43dd-a241-71f9631a18bb + :setup: Standalone Instance + :steps: + 1. configure memberof to only scope ou=people,SUFFIX + 2. Do some cleanup and Creates 10 users + 3. Create groups0 (IN peoplebase) that contain user0 and user1 + 4. Check user0 and user1 have memberof=group0.dn + 5. Create group1 (OUT peoplebase) that contain user0 and user1 + 6. Check user0 and user1 have NOT memberof=group1.dn + 7. Move group1 IN peoplebase and check users0 and user1 HAVE memberof=group1.dn + 8. Create group2 (OUT peoplebase) that contain user2 and user3. + 9. Check user2 and user3 have NOT memberof=group2.dn + 10. configure memberof so that added objectclass does not allow 'memberof' attribute + 11. Move group2 IN peoplebase and check move failed OPERATIONS_ERROR (because memberof failed) + 12. Check user2 and user3 have NOT memberof=group2.dn + 13. ADD group3 (IN peoplebase) with user4 and user5 members and check add failed OPERATIONS_ERROR (because memberof failed) + 14. Check user4 and user5 have NOT memberof=group2.dn + :expectedresults: + 1. should succeed + 2. should succeed + 3. should succeed + 4. should succeed + 5. should succeed + 6. should succeed + 7. should succeed + 8. should succeed + 9. should succeed + 10. should succeed + 11. should fail OPERATION_ERROR because memberof plugin fails to add 'memberof' to members. + 12. should succeed + 14. should fail OPERATION_ERROR because memberof plugin fails to add 'memberof' to members + 14. should succeed + """ + # only scopes peoplebase + _config_memberof_silent_memberof_failure(topology_st.standalone) + topology_st.standalone.restart(timeout=10) + + # first do some cleanup + peoplebase = 'ou=people,%s' % SUFFIX + for i in range(10): + cn = 'user%d' % i + dn = 'cn=%s,%s' % (cn, peoplebase) + topology_st.standalone.delete_s(dn) + topology_st.standalone.delete_s('cn=group_in0,%s' % peoplebase) + topology_st.standalone.delete_s('cn=group_in1,%s' % peoplebase) + topology_st.standalone.delete_s('cn=group_out2,%s' % SUFFIX) + + # create 10 users + for i in range(10): + cn = 'user%d' % i + dn = 'cn=%s,%s' % (cn, peoplebase) + log.fatal('Adding user (%s): ' % dn) + topology_st.standalone.add_s(Entry((dn, {'objectclass': ['top', 'person'], + 'sn': 'user_%s' % cn, + 'description': 'add on standalone'}))) + + # Check that members of group0 (in the scope) have 'memberof + group0_dn = 'cn=group_in0,%s' % peoplebase + topology_st.standalone.add_s(Entry((group0_dn, {'objectclass': ['top', 'groupofnames'], + 'member': [ + 'cn=user0,%s' % peoplebase, + 'cn=user1,%s' % peoplebase, + ], + 'description': 'mygroup'}))) + + # Check the those entries have memberof with group0 + for i in range(2): + user_dn = 'cn=user%d,%s' % (i, peoplebase) + ent = topology_st.standalone.getEntry(user_dn, ldap.SCOPE_BASE, "(objectclass=*)", ['memberof']) + assert ent.hasAttr('memberof') + found = False + for val in ent.getValues('memberof'): + topology_st.standalone.log.info("!!!!!!! %s: memberof->%s (vs %s)" % (user_dn, val, group0_dn.encode().lower())) + if val.lower() == group0_dn.encode().lower(): + found = True + break + assert found + + # Create a group1 out of the scope + group1_dn = 'cn=group_out1,%s' % SUFFIX + topology_st.standalone.add_s(Entry((group1_dn, {'objectclass': ['top', 'groupofnames'], + 'member': [ + 'cn=user0,%s' % peoplebase, + 'cn=user1,%s' % peoplebase, + ], + 'description': 'mygroup'}))) + + # Check the those entries have not memberof with group1 + for i in range(2): + user_dn = 'cn=user%d,%s' % (i, peoplebase) + ent = topology_st.standalone.getEntry(user_dn, ldap.SCOPE_BASE, "(objectclass=*)", ['memberof']) + assert ent.hasAttr('memberof') + found = False + for val in ent.getValues('memberof'): + topology_st.standalone.log.info("!!!!!!! %s: memberof->%s (vs %s)" % (user_dn, val, group1_dn.encode().lower())) + if val.lower() == group1_dn.encode().lower(): + found = True + break + assert not found + + # move group1 into the scope and check user0 and user1 are memberof group1 + topology_st.standalone.rename_s(group1_dn, 'cn=group_in1', newsuperior=peoplebase, delold=0) + new_group1_dn = 'cn=group_in1,%s' % peoplebase + for i in range(2): + user_dn = 'cn=user%d,%s' % (i, peoplebase) + ent = topology_st.standalone.getEntry(user_dn, ldap.SCOPE_BASE, "(objectclass=*)", ['memberof']) + assert ent.hasAttr('memberof') + found = False + for val in ent.getValues('memberof'): + topology_st.standalone.log.info("!!!!!!! %s: memberof->%s (vs %s)" % (user_dn, val, new_group1_dn.encode().lower())) + if val.lower() == new_group1_dn.encode().lower(): + found = True + break + assert found + + # Create a group2 out of the scope + group2_dn = 'cn=group_out2,%s' % SUFFIX + topology_st.standalone.add_s(Entry((group2_dn, {'objectclass': ['top', 'groupofnames'], + 'member': [ + 'cn=user2,%s' % peoplebase, + 'cn=user3,%s' % peoplebase, + ], + 'description': 'mygroup'}))) + + # Check the those entries have not memberof with group2 + for i in (2, 3): + user_dn = 'cn=user%d,%s' % (i, peoplebase) + ent = topology_st.standalone.getEntry(user_dn, ldap.SCOPE_BASE, "(objectclass=*)", ['memberof']) + assert not ent.hasAttr('memberof') + + # memberof will not add the missing objectclass + _disable_auto_oc_memberof(topology_st.standalone) + topology_st.standalone.restart(timeout=10) + + # move group2 into the scope and check it fails + try: + topology_st.standalone.rename_s(group2_dn, 'cn=group_in2', newsuperior=peoplebase, delold=0) + topology_st.standalone.log.info("This is unexpected, modrdn should fail as the member entry have not the appropriate objectclass") + assert False + except ldap.OBJECT_CLASS_VIOLATION: + pass + + # Check the those entries have not memberof + for i in (2, 3): + user_dn = 'cn=user%d,%s' % (i, peoplebase) + ent = topology_st.standalone.getEntry(user_dn, ldap.SCOPE_BASE, "(objectclass=*)", ['memberof']) + topology_st.standalone.log.info("Should assert %s has memberof is %s" % (user_dn, ent.hasAttr('memberof'))) + assert not ent.hasAttr('memberof') + + # Create a group3 in the scope + group3_dn = 'cn=group3_in,%s' % peoplebase + try: + topology_st.standalone.add_s(Entry((group3_dn, {'objectclass': ['top', 'groupofnames'], + 'member': [ + 'cn=user4,%s' % peoplebase, + 'cn=user5,%s' % peoplebase, + ], + 'description': 'mygroup'}))) + topology_st.standalone.log.info("This is unexpected, ADD should fail as the member entry have not the appropriate objectclass") + assert False + except ldap.OBJECT_CLASS_VIOLATION: + pass + except ldap.OPERATIONS_ERROR: + pass + + # Check the those entries do not have memberof + for i in (4, 5): + user_dn = 'cn=user%d,%s' % (i, peoplebase) + ent = topology_st.standalone.getEntry(user_dn, ldap.SCOPE_BASE, "(objectclass=*)", ['memberof']) + topology_st.standalone.log.info("Should assert %s has memberof is %s" % (user_dn, ent.hasAttr('memberof'))) + assert not ent.hasAttr('memberof') + +if __name__ == '__main__': + # Run isolated + # -s for DEBUG mode + CURRENT_FILE = os.path.realpath(__file__) + pytest.main("-s %s" % CURRENT_FILE) + diff --git a/dirsrvtests/tests/suites/memory_leaks/MMR_double_free_test.py b/dirsrvtests/tests/suites/memory_leaks/MMR_double_free_test.py new file mode 100644 index 0000000..45a921d --- /dev/null +++ b/dirsrvtests/tests/suites/memory_leaks/MMR_double_free_test.py @@ -0,0 +1,166 @@ +# --- BEGIN COPYRIGHT BLOCK --- +# Copyright (C) 2020 Red Hat, Inc. +# All rights reserved. +# +# License: GPL (version 3 or any later version). +# See LICENSE for details. +# --- END COPYRIGHT BLOCK --- +# +import pytest +from lib389.replica import Replicas, Replica +from lib389.tasks import * +from lib389.utils import * +from lib389.paths import Paths +from lib389.topologies import topology_m2 + +from lib389._constants import (DEFAULT_SUFFIX, DN_CONFIG) +from lib389.properties import (REPLICA_PURGE_DELAY, REPLICA_PURGE_INTERVAL) + +from lib389.idm.user import UserAccounts + +pytestmark = pytest.mark.tier2 + +DEBUGGING = os.getenv("DEBUGGING", default=False) +if DEBUGGING: + logging.getLogger(__name__).setLevel(logging.DEBUG) +else: + logging.getLogger(__name__).setLevel(logging.INFO) +log = logging.getLogger(__name__) + +ds_paths = Paths() + +@pytest.fixture(scope="module") +def topology_setup(topology_m2): + """Configure the topology with purge parameters and enable audit logging + + - configure replica purge delay and interval on master1 and master2 + - enable audit log on master1 and master2 + - restart master1 and master2 + """ + m1 = topology_m2.ms["master1"] + m2 = topology_m2.ms["master2"] + + replica1 = Replicas(m1).get(DEFAULT_SUFFIX) + replica2 = Replicas(m2).get(DEFAULT_SUFFIX) + + replica1.set('nsDS5ReplicaPurgeDelay','5') + replica2.set('nsDS5ReplicaPurgeDelay','5') + assert replica1.present('nsDS5ReplicaPurgeDelay') + assert replica2.present('nsDS5ReplicaPurgeDelay') + replica1.display_attr('nsDS5ReplicaPurgeDelay') + replica2.display_attr('nsDS5ReplicaPurgeDelay') + + replica1.set('nsDS5ReplicaTombstonePurgeInterval', '5') + replica2.set('nsDS5ReplicaTombstonePurgeInterval', '5') + assert replica1.present('nsDS5ReplicaTombstonePurgeInterval') + assert replica2.present('nsDS5ReplicaTombstonePurgeInterval') + replica1.display_attr('nsDS5ReplicaTombstonePurgeInterval') + replica2.display_attr('nsDS5ReplicaTombstonePurgeInterval') + + + m1.config.set('nsslapd-auditlog-logging-enabled', 'on') + m2.config.set('nsslapd-auditlog-logging-enabled', 'on') + m1.restart() + m2.restart() + + +@pytest.mark.skipif(not ds_paths.asan_enabled, reason="Don't run if ASAN is not enabled") +@pytest.mark.ds48226 +@pytest.mark.bz1243970 +@pytest.mark.bz1262363 +def test_MMR_double_free(topology_m2, topology_setup, timeout=5): + """Reproduce conditions where a double free occurs and check it does not make + the server crash + + :id: 91580b1c-ad10-49bc-8aed-402edac59f46 + :setup: replicated topology - purge delay and purge interval are configured + :steps: + 1. create an entry on master1 + 2. modify the entry with description add + 3. check the entry is correctly replicated on master2 + 4. stop master2 + 5. delete the entry's description on master1 + 6. stop master1 + 7. start master2 + 8. delete the entry's description on master2 + 9. add an entry's description on master2 + 10. wait the purge delay duration + 11. add again an entry's description on master2 + :expectedresults: + 1. entry exists on master1 + 2. modification is effective + 3. entry exists on master2 and modification is effective + 4. master2 is stopped + 5. description is removed from entry on master1 + 6. master1 is stopped + 7. master2 is started - not synchronized with master1 + 8. description is removed from entry on master2 (same op should be performed too by replication mecanism) + 9. description to entry is added on master2 + 10. Purge delay has expired - changes are erased + 11. description to entry is added again on master2 + """ + name = 'test_entry' + + entry_m1 = UserAccounts(topology_m2.ms["master1"], DEFAULT_SUFFIX) + entry = entry_m1.create(properties={ + 'uid': name, + 'sn': name, + 'cn': name, + 'uidNumber': '1001', + 'gidNumber': '1001', + 'homeDirectory': '/home/test_entry', + 'userPassword': 'test_entry_pwd' + }) + + log.info('First do an update that is replicated') + entry.add('description', '5') + + log.info('Check the update in the replicated entry') + entry_m2 = UserAccounts(topology_m2.ms["master2"], DEFAULT_SUFFIX) + + success = 0 + for i in range(0, timeout): + try: + entry_repl = entry_m2.get(name) + out = entry_repl.display_attr('description') + if len(out) > 0: + success = 1 + break + except: + time.sleep(1) + + assert success + + log.info('Stop M2 so that it will not receive the next update') + topology_m2.ms["master2"].stop(10) + + log.info('Perform a del operation that is not replicated') + entry.remove('description', '5') + + log.info("Stop M1 so that it will keep del '5' that is unknown from master2") + topology_m2.ms["master1"].stop(10) + + log.info('start M2 to do the next updates') + topology_m2.ms["master2"].start() + + log.info("del 'description' by '5'") + entry_repl.remove('description', '5') + + log.info("add 'description' by '5'") + entry_repl.add('description', '5') + + log.info('sleep of purge delay so that the next update will purge the CSN_7') + time.sleep(6) + + log.info("add 'description' by '6' that purge the state info") + entry_repl.add('description', '6') + + log.info('Restart master1') + topology_m2.ms["master1"].start(30) + + +if __name__ == '__main__': + # Run isolated + # -s for DEBUG mode + CURRENT_FILE = os.path.realpath(__file__) + pytest.main("-s %s" % CURRENT_FILE) diff --git a/dirsrvtests/tests/suites/memory_leaks/__init__.py b/dirsrvtests/tests/suites/memory_leaks/__init__.py new file mode 100644 index 0000000..c94c077 --- /dev/null +++ b/dirsrvtests/tests/suites/memory_leaks/__init__.py @@ -0,0 +1,3 @@ +""" + :Requirement: 389-ds-base: Test Memory Leaks +""" diff --git a/dirsrvtests/tests/suites/memory_leaks/range_search_test.py b/dirsrvtests/tests/suites/memory_leaks/range_search_test.py new file mode 100644 index 0000000..f228ba2 --- /dev/null +++ b/dirsrvtests/tests/suites/memory_leaks/range_search_test.py @@ -0,0 +1,71 @@ +# --- BEGIN COPYRIGHT BLOCK --- +# Copyright (C) 2016 Red Hat, Inc. +# All rights reserved. +# +# License: GPL (version 3 or any later version). +# See LICENSE for details. +# --- END COPYRIGHT BLOCK --- +# +import pytest +from lib389.tasks import * +from lib389.utils import * +from lib389.paths import Paths +from lib389.topologies import topology_st +from lib389._constants import * + +pytestmark = pytest.mark.tier2 + +logging.getLogger(__name__).setLevel(logging.DEBUG) +log = logging.getLogger(__name__) + +ds_paths = Paths() + + +@pytest.mark.skipif(not ds_paths.asan_enabled, reason="Don't run if ASAN is not enabled") +def test_range_search(topology_st): + """Add 100 entries, and run a range search. When we encounter an error + we still need to disable valgrind before exiting + + :id: aadccf78-a2a8-48cc-8769-4764c7966189 + :setup: Standalone instance, Retro changelog file, + Enabled Valgrind if the system doesn't have asan + :steps: + 1. Add 100 test entries + 2. Issue a range search with a changenumber filter + 3. There should be no leak + :expectedresults: + 1. 100 test entries should be added + 2. Search should be successful + 3. Success + """ + + log.info('Running test_range_search...') + topology_st.standalone.plugins.enable(name=PLUGIN_RETRO_CHANGELOG) + topology_st.standalone.restart() + + success = True + + # Add 100 test entries + for idx in range(1, 100): + idx = str(idx) + USER_DN = 'uid=user' + idx + ',' + DEFAULT_SUFFIX + try: + topology_st.standalone.add_s(Entry((USER_DN, {'objectclass': "top extensibleObject".split(), + 'uid': 'user' + idx}))) + except ldap.LDAPError as e: + log.fatal('test_range_search: Failed to add test user ' + USER_DN + ': error ' + e.message['desc']) + success = False + time.sleep(1) + + # Issue range search + assert success + entries = topology_st.standalone.search_s(RETROCL_SUFFIX, ldap.SCOPE_SUBTREE, + '(&(changenumber>=74)(changenumber<=84))') + assert entries + + +if __name__ == '__main__': + # Run isolated + # -s for DEBUG mode + CURRENT_FILE = os.path.realpath(__file__) + pytest.main("-s %s" % CURRENT_FILE) diff --git a/dirsrvtests/tests/suites/migration/__init__.py b/dirsrvtests/tests/suites/migration/__init__.py new file mode 100644 index 0000000..120786b --- /dev/null +++ b/dirsrvtests/tests/suites/migration/__init__.py @@ -0,0 +1,3 @@ +""" + :Requirement: 389-ds-base: DataBase Import +""" \ No newline at end of file diff --git a/dirsrvtests/tests/suites/migration/export_data_test.py b/dirsrvtests/tests/suites/migration/export_data_test.py new file mode 100644 index 0000000..3ad820a --- /dev/null +++ b/dirsrvtests/tests/suites/migration/export_data_test.py @@ -0,0 +1,82 @@ +# --- BEGIN COPYRIGHT BLOCK --- +# Copyright (C) 2019 Red Hat, Inc. +# All rights reserved. +# +# License: GPL (version 3 or any later version). +# See LICENSE for details. +# --- END COPYRIGHT BLOCK --- +# + +import logging +import pytest +import os + +from lib389._constants import * +from lib389.topologies import topology_st +from lib389.idm.user import UserAccounts, TEST_USER_PROPERTIES + +pytestmark = pytest.mark.tier3 + +logging.getLogger(__name__).setLevel(logging.DEBUG) +log = logging.getLogger(__name__) + + +@pytest.mark.skipif(os.getenv('MIGRATION') is None, reason="This test is meant to execute in specific test environment") +def test_export_data_from_source_host(topology_st): + """Prepare export file for migration using a single instance of Directory Server + + :id: 47f97d87-60f7-4f80-a72b-e7daa1de0061 + :setup: Standalone + :steps: + 1. Add a test user with employeeNumber and telephoneNumber + 2. Add a test user with escaped DN + 3. Create export file + 4. Check if values of searched attributes are present in exported file + :expectedresults: + 1. Success + 2. Success + 3. Success + 4. Success + """ + + standalone = topology_st.standalone + output_file = os.path.join(topology_st.standalone.ds_paths.ldif_dir, "migration_export.ldif") + + log.info("Add a test user") + users = UserAccounts(standalone, DEFAULT_SUFFIX) + test_user = users.create(properties=TEST_USER_PROPERTIES) + test_user.add('employeeNumber', '1000') + test_user.add('telephoneNumber', '1234567890') + + assert test_user.present('employeeNumber', value='1000') + assert test_user.present('telephoneNumber', value='1234567890') + + log.info("Creating user with escaped DN") + users.create(properties={ + 'uid': '\\#\\,\\+"\\\\>:\\=\\<\\<\\>\\;/', + 'cn': 'tuser2', + 'sn': 'user', + 'uidNumber': '1000', + 'gidNumber': '2000', + 'homeDirectory': '/home/tuser2', + }) + + log.info("Exporting LDIF offline...") + standalone.stop() + standalone.db2ldif(bename=DEFAULT_BENAME, suffixes=[DEFAULT_SUFFIX], + excludeSuffixes=None, encrypt=None, repl_data=None, outputfile=output_file) + standalone.start() + + log.info("Check that value of attribute is present in the exported file") + with open(output_file, 'r') as ldif_file: + ldif = ldif_file.read() + assert 'employeeNumber: 1000' in ldif + assert 'telephoneNumber: 1234567890' in ldif + assert 'uid: \\#\\,\\+"\\\\>:\\=\\<\\<\\>\\;/' in ldif + + +if __name__ == '__main__': + # Run isolated + # -s for DEBUG mode + CURRENT_FILE = os.path.realpath(__file__) + pytest.main(["-s", CURRENT_FILE]) diff --git a/dirsrvtests/tests/suites/migration/import_data_test.py b/dirsrvtests/tests/suites/migration/import_data_test.py new file mode 100644 index 0000000..0f03051 --- /dev/null +++ b/dirsrvtests/tests/suites/migration/import_data_test.py @@ -0,0 +1,70 @@ +# --- BEGIN COPYRIGHT BLOCK --- +# Copyright (C) 2019 Red Hat, Inc. +# All rights reserved. +# +# License: GPL (version 3 or any later version). +# See LICENSE for details. +# --- END COPYRIGHT BLOCK --- +# + +import logging +import pytest +import os + +from lib389._constants import * +from lib389.topologies import topology_st +from lib389.idm.user import UserAccounts + +pytestmark = pytest.mark.tier3 + +logging.getLogger(__name__).setLevel(logging.DEBUG) +log = logging.getLogger(__name__) + + +@pytest.mark.skipif(os.getenv('MIGRATION') is None, reason="This test is meant to execute in specific test environment") +def test_import_data_to_target_host(topology_st): + """Import file created in export_data_test.py using a single instance of Directory Server + + :id: 7e896b0c-6838-49c7-8e1d-5e8114f5fb02 + :setup: Standalone + :steps: + 1. Check that attribute values are present in input file + 2. Import input file + 3. Check imported user data + :expectedresults: + 1. Success + 2. Success + 3. Success + """ + + standalone = topology_st.standalone + input_file = os.path.join(topology_st.standalone.ds_paths.ldif_dir, "migration_export.ldif") + + log.info("Check that value of attribute is present in the exported file") + with open(input_file, 'r') as ldif_file: + ldif = ldif_file.read() + assert 'employeeNumber: 1000' in ldif + assert 'telephoneNumber: 1234567890' in ldif + assert 'uid: \\#\\,\\+"\\\\>:\\=\\<\\<\\>\\;/' in ldif + + log.info('Stopping the server and running offline import...') + standalone.stop() + assert standalone.ldif2db(bename=DEFAULT_BENAME, suffixes=[DEFAULT_SUFFIX], encrypt=None, excludeSuffixes=None, + import_file=input_file) + standalone.start() + + log.info("Check imported user data") + users = UserAccounts(standalone, DEFAULT_SUFFIX) + test_user = users.get('testuser') + assert test_user.present('employeeNumber', value='1000') + assert test_user.present('telephoneNumber', value='1234567890') + test_user = users.get('\\#\\,\\+"\\\\>:\\=\\<\\<\\>\\;/') + assert test_user.present('cn', value='tuser2') + assert test_user.present('uid', value='\\#\\,\\+"\\\\>:\\=\\<\\<\\>\\;/') + + +if __name__ == '__main__': + # Run isolated + # -s for DEBUG mode + CURRENT_FILE = os.path.realpath(__file__) + pytest.main(["-s", CURRENT_FILE]) diff --git a/dirsrvtests/tests/suites/monitor/__init__.py b/dirsrvtests/tests/suites/monitor/__init__.py new file mode 100644 index 0000000..080d1ac --- /dev/null +++ b/dirsrvtests/tests/suites/monitor/__init__.py @@ -0,0 +1,3 @@ +""" + :Requirement: 389-ds-base: Status - Performance Monitor +""" diff --git a/dirsrvtests/tests/suites/monitor/monitor_test.py b/dirsrvtests/tests/suites/monitor/monitor_test.py new file mode 100644 index 0000000..5786903 --- /dev/null +++ b/dirsrvtests/tests/suites/monitor/monitor_test.py @@ -0,0 +1,70 @@ +import logging +import pytest +import os +from lib389.monitor import * +from lib389._constants import * +from lib389.topologies import topology_st as topo + +DEBUGGING = os.getenv("DEBUGGING", default=False) +if DEBUGGING: + logging.getLogger(__name__).setLevel(logging.DEBUG) +else: + logging.getLogger(__name__).setLevel(logging.INFO) +log = logging.getLogger(__name__) + +pytestmark = pytest.mark.tier1 +def test_monitor(topo): + """This test is to display monitor attributes to check the performace + + :id: f7c8a815-07cf-4e67-9574-d26a0937d3db + :setup: Single instance + :steps: + 1. Get the cn=monitor connections attributes + 2. Print connections attributes + 3. Get the cn=monitor version + 4. Print cn=monitor version + 5. Get the cn=monitor threads attributes + 6. Print cn=monitor threads attributes + 7. Get cn=monitor backends attributes + 8. Print cn=monitor backends attributes + 9. Get cn=monitor operations attributes + 10. Print cn=monitor operations attributes + 11. Get cn=monitor statistics attributes + 12. Print cn=monitor statistics attributes + :expectedresults: + 1. cn=monitor attributes should be fetched and printed successfully. + """ + + #define the monitor object from Monitor class in lib389 + monitor = Monitor(topo.standalone) + + #get monitor connections + connections = monitor.get_connections() + log.info('connection: {0[0]}, currentconnections: {0[1]}, totalconnections: {0[2]}'.format(connections)) + + #get monitor version + version = monitor.get_version() + log.info('version :: %s' %version) + + #get monitor threads + threads = monitor.get_threads() + log.info('threads: {0[0]},currentconnectionsatmaxthreads: {0[1]},maxthreadsperconnhits: {0[2]}'.format(threads)) + + #get monitor backends + backend = monitor.get_backends() + log.info('nbackends: {0[0]}, backendmonitordn: {0[1]}'.format(backend)) + + #get monitor operations + operations = monitor.get_operations() + log.info('opsinitiated: {0[0]}, opscompleted: {0[1]}'.format(operations)) + + #get monitor stats + stats = monitor.get_statistics() + log.info('dtablesize: {0[0]},readwaiters: {0[1]},entriessent: {0[2]},bytessent: {0[3]},currenttime: {0[4]},starttime: {0[5]}'.format(stats)) + + +if __name__ == '__main__': + # Run isolated + # -s for DEBUG mode + CURRENT_FILE = os.path.realpath(__file__) + pytest.main("-s %s" % CURRENT_FILE) diff --git a/dirsrvtests/tests/suites/paged_results/__init__.py b/dirsrvtests/tests/suites/paged_results/__init__.py new file mode 100644 index 0000000..806f40b --- /dev/null +++ b/dirsrvtests/tests/suites/paged_results/__init__.py @@ -0,0 +1,3 @@ +""" + :Requirement: 389-ds-base: Simple Paged Results +""" diff --git a/dirsrvtests/tests/suites/paged_results/paged_results_test.py b/dirsrvtests/tests/suites/paged_results/paged_results_test.py new file mode 100644 index 0000000..9fdceb1 --- /dev/null +++ b/dirsrvtests/tests/suites/paged_results/paged_results_test.py @@ -0,0 +1,1179 @@ +# --- BEGIN COPYRIGHT BLOCK --- +# Copyright (C) 2016 Red Hat, Inc. +# All rights reserved. +# +# License: GPL (version 3 or any later version). +# See LICENSE for details. +# --- END COPYRIGHT BLOCK --- +# +import socket +from random import sample + +import pytest +from ldap.controls import SimplePagedResultsControl, GetEffectiveRightsControl +from lib389.tasks import * +from lib389.utils import * +from lib389.topologies import topology_st +from lib389._constants import DN_LDBM, DN_DM, DEFAULT_SUFFIX, BACKEND_NAME, PASSWORD + +from lib389._controls import SSSRequestControl + +from lib389.idm.user import UserAccounts +from lib389.idm.organization import Organization +from lib389.idm.organizationalunit import OrganizationalUnit +from lib389.backend import Backends + +from lib389._mapped_object import DSLdapObject + +pytestmark = pytest.mark.tier1 + +DEBUGGING = os.getenv('DEBUGGING', False) + +if DEBUGGING: + logging.getLogger(__name__).setLevel(logging.DEBUG) +else: + logging.getLogger(__name__).setLevel(logging.INFO) + +log = logging.getLogger(__name__) + +TEST_USER_PWD = 'simplepaged_test' + +NEW_SUFFIX_1_NAME = 'test_parent' +NEW_SUFFIX_1 = 'o={}'.format(NEW_SUFFIX_1_NAME) +NEW_SUFFIX_2_NAME = 'child' +NEW_SUFFIX_2 = 'ou={},{}'.format(NEW_SUFFIX_2_NAME, NEW_SUFFIX_1) +NEW_BACKEND_1 = 'parent_base' +NEW_BACKEND_2 = 'child_base' + +HOSTNAME = socket.getfqdn() +IP_ADDRESS = socket.gethostbyname(HOSTNAME) + + +@pytest.fixture(scope="module") +def create_user(topology_st, request): + """User for binding operation""" + + log.info('Adding user simplepaged_test') + + users = UserAccounts(topology_st.standalone, DEFAULT_SUFFIX) + user = users.create(properties={ + 'uid': 'simplepaged_test', + 'cn': 'simplepaged_test', + 'sn': 'simplepaged_test', + 'uidNumber': '1234', + 'gidNumber': '1234', + 'homeDirectory': '/home/simplepaged_test', + 'userPassword': TEST_USER_PWD, + }) + + # Now add the ACI so simplepage_test can read the users ... + ACI_BODY = ensure_bytes('(targetattr= "uid || sn || dn")(version 3.0; acl "Allow read for user"; allow (read,search,compare) userdn = "ldap:///all";)') + topology_st.standalone.modify_s(DEFAULT_SUFFIX, [(ldap.MOD_REPLACE, 'aci', ACI_BODY)]) + + def fin(): + log.info('Deleting user simplepaged_test') + user.delete() + + request.addfinalizer(fin) + + return user + +@pytest.fixture(scope="module") +def new_suffixes(topology_st): + """Add two suffixes with backends, one is a parent + of the another + """ + + log.info('Adding suffix:{} and backend: {}'.format(NEW_SUFFIX_1, NEW_BACKEND_1)) + + bes = Backends(topology_st.standalone) + + be_1 = bes.create(properties={ + 'cn': 'NEW_BACKEND_1', + 'nsslapd-suffix': NEW_SUFFIX_1, + }) + # Create the root objects with their ACI + log.info('Adding ACI to allow our test user to search') + ACI_TARGET = '(targetattr != "userPassword || aci")' + ACI_ALLOW = '(version 3.0; acl "Enable anonymous access";allow (read, search, compare)' + ACI_SUBJECT = '(userdn = "ldap:///anyone");)' + ACI_BODY = ACI_TARGET + ACI_ALLOW + ACI_SUBJECT + + o_1 = Organization(topology_st.standalone, NEW_SUFFIX_1) + o_1.create(properties={ + 'o': NEW_SUFFIX_1_NAME, + 'aci': ACI_BODY, + }) + + log.info('Adding suffix:{} and backend: {}'.format(NEW_SUFFIX_2, NEW_BACKEND_2)) + be_2 = bes.create(properties={ + 'cn': 'NEW_BACKEND_2', + 'nsslapd-suffix': NEW_SUFFIX_2, + }) + + # We have to adjust the MT to say that BE_1 is a parent. + mt = be_2.get_mapping_tree() + mt.set_parent(NEW_SUFFIX_1) + + ou_2 = OrganizationalUnit(topology_st.standalone, NEW_SUFFIX_2) + ou_2.create(properties={ + 'ou': NEW_SUFFIX_2_NAME + }) + + +def add_users(topology_st, users_num, suffix): + """Add users to the default suffix + + Return the list of added user DNs. + """ + + users_list = [] + users = UserAccounts(topology_st.standalone, suffix, rdn=None) + + log.info('Adding %d users' % users_num) + for num in sample(range(1000), users_num): + num_ran = int(round(num)) + USER_NAME = 'test%05d' % num_ran + + user = users.create(properties={ + 'uid': USER_NAME, + 'sn': USER_NAME, + 'cn': USER_NAME, + 'uidNumber': '%s' % num_ran, + 'gidNumber': '%s' % num_ran, + 'homeDirectory': '/home/%s' % USER_NAME, + 'mail': '%s@redhat.com' % USER_NAME, + 'userpassword': 'pass%s' % num_ran, + }) + users_list.append(user) + return users_list + + +def del_users(users_list): + """Delete users with DNs from given list""" + + log.info('Deleting %d users' % len(users_list)) + for user in users_list: + user.delete() + + +def change_conf_attr(topology_st, suffix, attr_name, attr_value): + """Change configurational attribute in the given suffix. + + Returns previous attribute value. + """ + + entry = DSLdapObject(topology_st.standalone, suffix) + + attr_value_bck = entry.get_attr_val_bytes(attr_name) + log.info('Set %s to %s. Previous value - %s. Modified suffix - %s.' % ( + attr_name, attr_value, attr_value_bck, suffix)) + if attr_value is None: + entry.remove_all(attr_name) + else: + entry.replace(attr_name, attr_value) + return attr_value_bck + + +def paged_search(conn, suffix, controls, search_flt, searchreq_attrlist): + """Search at the DEFAULT_SUFFIX with ldap.SCOPE_SUBTREE + using Simple Paged Control(should the first item in the + list controls. + Assert that no cookie left at the end. + + Return the list with results summarized from all pages. + """ + + pages = 0 + pctrls = [] + all_results = [] + req_pr_ctrl = controls[0] + log.info('Running simple paged result search with - ' + 'search suffix: {}; filter: {}; attr list {}; ' + 'page_size = {}; controls: {}.'.format(suffix, search_flt, + searchreq_attrlist, + req_pr_ctrl.size, + str(controls))) + msgid = conn.search_ext(suffix, ldap.SCOPE_SUBTREE, search_flt, searchreq_attrlist, serverctrls=controls) + while True: + log.info('Getting page %d' % (pages,)) + rtype, rdata, rmsgid, rctrls = conn.result3(msgid) + log.debug('Data: {}'.format(rdata)) + all_results.extend(rdata) + pages += 1 + pctrls = [ + c + for c in rctrls + if c.controlType == SimplePagedResultsControl.controlType + ] + + if pctrls: + if pctrls[0].cookie: + # Copy cookie from response control to request control + log.debug('Cookie: {}'.format(pctrls[0].cookie)) + req_pr_ctrl.cookie = pctrls[0].cookie + msgid = conn.search_ext(suffix, ldap.SCOPE_SUBTREE, search_flt, searchreq_attrlist, serverctrls=controls) + else: + break # No more pages available + else: + break + + assert not pctrls[0].cookie + return all_results + + +@pytest.mark.parametrize("page_size,users_num", [(6, 5), (5, 5), (5, 25)]) +def test_search_success(topology_st, create_user, page_size, users_num): + """Verify that search with a simple paged results control + returns all entries it should without errors. + + :id: ddd15b70-64f1-4a85-a793-b24761e50354 + :parametrized: yes + :feature: Simple paged results + :setup: Standalone instance, test user for binding, + varying number of users for the search base + :steps: + 1. Bind as test user + 2. Search through added users with a simple paged control + :expectedresults: + 1. Bind should be successful + 2. All users should be found + """ + + users_list = add_users(topology_st, users_num, DEFAULT_SUFFIX) + search_flt = r'(uid=test*)' + searchreq_attrlist = ['dn', 'sn'] + + log.info('Set user bind %s ' % create_user) + conn = create_user.bind(TEST_USER_PWD) + + req_ctrl = SimplePagedResultsControl(True, size=page_size, cookie='') + all_results = paged_search(conn, DEFAULT_SUFFIX, [req_ctrl], search_flt, searchreq_attrlist) + + log.info('%d results' % len(all_results)) + assert len(all_results) == len(users_list) + + del_users(users_list) + + +@pytest.mark.parametrize("page_size,users_num,suffix,attr_name,attr_value,expected_err", [ + (50, 200, 'cn=config,%s' % DN_LDBM, 'nsslapd-idlistscanlimit', '100', + ldap.UNWILLING_TO_PERFORM), + (5, 15, DN_CONFIG, 'nsslapd-timelimit', '20', + ldap.UNAVAILABLE_CRITICAL_EXTENSION), + (21, 50, DN_CONFIG, 'nsslapd-sizelimit', '20', + ldap.SIZELIMIT_EXCEEDED), + (21, 50, DN_CONFIG, 'nsslapd-pagedsizelimit', '5', + ldap.SIZELIMIT_EXCEEDED), + (5, 50, 'cn=config,%s' % DN_LDBM, 'nsslapd-lookthroughlimit', '20', + ldap.ADMINLIMIT_EXCEEDED)]) +def test_search_limits_fail(topology_st, create_user, page_size, users_num, + suffix, attr_name, attr_value, expected_err): + """Verify that search with a simple paged results control + throws expected exceptoins when corresponding limits are + exceeded. + + :id: e3067107-bd6d-493d-9989-3e641a9337b0 + :parametrized: yes + :setup: Standalone instance, test user for binding, + varying number of users for the search base + :steps: + 1. Bind as test user + 2. Set limit attribute to the value that will cause + an expected exception + 3. Search through added users with a simple paged control + :expectedresults: + 1. Bind should be successful + 2. Operation should be successful + 3. Should fail with appropriate exception + """ + + users_list = add_users(topology_st, users_num, DEFAULT_SUFFIX) + attr_value_bck = change_conf_attr(topology_st, suffix, attr_name, attr_value) + conf_param_dict = {attr_name: attr_value} + search_flt = r'(uid=test*)' + searchreq_attrlist = ['dn', 'sn'] + controls = [] + + try: + log.info('Set user bind') + conn = create_user.bind(TEST_USER_PWD) + + log.info('Create simple paged results control instance') + req_ctrl = SimplePagedResultsControl(True, size=page_size, cookie='') + controls.append(req_ctrl) + if attr_name == 'nsslapd-idlistscanlimit': + sort_ctrl = SSSRequestControl(True, ['sn']) + controls.append(sort_ctrl) + log.info('Initiate ldapsearch with created control instance') + msgid = conn.search_ext(DEFAULT_SUFFIX, ldap.SCOPE_SUBTREE, + search_flt, searchreq_attrlist, serverctrls=controls) + + time_val = conf_param_dict.get('nsslapd-timelimit') + if time_val: + time.sleep(int(time_val) + 10) + + pages = 0 + all_results = [] + pctrls = [] + while True: + log.info('Getting page %d' % (pages,)) + if pages == 0 and (time_val or attr_name in ('nsslapd-lookthroughlimit', + 'nsslapd-pagesizelimit')): + rtype, rdata, rmsgid, rctrls = conn.result3(msgid) + else: + with pytest.raises(expected_err): + rtype, rdata, rmsgid, rctrls = conn.result3(msgid) + all_results.extend(rdata) + pages += 1 + pctrls = [ + c + for c in rctrls + if c.controlType == SimplePagedResultsControl.controlType + ] + + if pctrls: + if pctrls[0].cookie: + # Copy cookie from response control to request control + req_ctrl.cookie = pctrls[0].cookie + msgid = conn.search_ext(DEFAULT_SUFFIX, ldap.SCOPE_SUBTREE, + search_flt, searchreq_attrlist, serverctrls=controls) + else: + break # No more pages available + else: + break + finally: + del_users(users_list) + change_conf_attr(topology_st, suffix, attr_name, attr_value_bck) + + +def test_search_sort_success(topology_st, create_user): + """Verify that search with a simple paged results control + and a server side sort control returns all entries + it should without errors. + + :id: 17d8b150-ed43-41e1-b80f-ee9b4ce45155 + :setup: Standalone instance, test user for binding, + varying number of users for the search base + :steps: + 1. Bind as test user + 2. Search through added users with a simple paged control + and a server side sort control + :expectedresults: + 1. Bind should be successful + 2. All users should be found and sorted + """ + + users_num = 50 + page_size = 5 + users_list = add_users(topology_st, users_num, DEFAULT_SUFFIX) + search_flt = r'(uid=test*)' + searchreq_attrlist = ['dn', 'sn'] + + try: + conn = create_user.bind(TEST_USER_PWD) + + req_ctrl = SimplePagedResultsControl(True, size=page_size, cookie='') + sort_ctrl = SSSRequestControl(True, ['sn']) + + log.info('Initiate ldapsearch with created control instance') + log.info('Collect data with sorting') + controls = [req_ctrl, sort_ctrl] + results_sorted = paged_search(conn, DEFAULT_SUFFIX, controls, + search_flt, searchreq_attrlist) + + log.info('Substring numbers from user DNs') + # r_nums = map(lambda x: int(x[0][8:13]), results_sorted) + r_nums = [int(x[0][8:13]) for x in results_sorted] + + log.info('Assert that list is sorted') + assert all(r_nums[i] <= r_nums[i + 1] for i in range(len(r_nums) - 1)) + finally: + del_users(users_list) + + +def test_search_abandon(topology_st, create_user): + """Verify that search with simple paged results control + can be abandon + + :id: 0008538b-7585-4356-839f-268828066978 + :setup: Standalone instance, test user for binding, + varying number of users for the search base + :steps: + 1. Bind as test user + 2. Search through added users with a simple paged control + 3. Abandon the search + :expectedresults: + 1. Bind should be successful + 2. Search should be started successfully + 3. It should throw an ldap.TIMEOUT exception + while trying to get the rest of the search results + """ + + users_num = 10 + page_size = 2 + users_list = add_users(topology_st, users_num, DEFAULT_SUFFIX) + search_flt = r'(uid=test*)' + searchreq_attrlist = ['dn', 'sn'] + + try: + log.info('Set user bind') + conn = create_user.bind(TEST_USER_PWD) + + log.info('Create simple paged results control instance') + req_ctrl = SimplePagedResultsControl(True, size=page_size, cookie='') + controls = [req_ctrl] + + log.info('Initiate a search with a paged results control') + msgid = conn.search_ext(DEFAULT_SUFFIX, ldap.SCOPE_SUBTREE, + search_flt, searchreq_attrlist, serverctrls=controls) + log.info('Abandon the search') + conn.abandon(msgid) + + log.info('Expect an ldap.TIMEOUT exception, while trying to get the search results') + with pytest.raises(ldap.TIMEOUT): + conn.result3(msgid, timeout=5) + finally: + del_users(users_list) + + +def test_search_with_timelimit(topology_st, create_user): + """Verify that after performing multiple simple paged searches + to completion, each with a timelimit, it wouldn't fail, if we sleep + for a time more than the timelimit. + + :id: 6cd7234b-136c-419f-bf3e-43aa73592cff + :setup: Standalone instance, test user for binding, + varying number of users for the search base + :steps: + 1. Bind as test user + 2. Search through added users with a simple paged control + and timelimit set to 5 + 3. When the returned cookie is empty, wait 10 seconds + 4. Perform steps 2 and 3 three times in a row + :expectedresults: + 1. Bind should be successful + 2. No error should happen + 3. 10 seconds should pass + 4. No error should happen + """ + + users_num = 100 + page_size = 50 + timelimit = 5 + users_list = add_users(topology_st, users_num, DEFAULT_SUFFIX) + search_flt = r'(uid=test*)' + searchreq_attrlist = ['dn', 'sn'] + + try: + log.info('Set user bind') + conn = create_user.bind(TEST_USER_PWD) + + log.info('Create simple paged results control instance') + req_ctrl = SimplePagedResultsControl(True, size=page_size, cookie='') + controls = [req_ctrl] + + for ii in range(3): + log.info('Iteration %d' % ii) + msgid = conn.search_ext(DEFAULT_SUFFIX, ldap.SCOPE_SUBTREE, search_flt, + searchreq_attrlist, serverctrls=controls, timeout=timelimit) + + pages = 0 + pctrls = [] + while True: + log.info('Getting page %d' % (pages,)) + rtype, rdata, rmsgid, rctrls = conn.result3(msgid) + pages += 1 + pctrls = [ + c + for c in rctrls + if c.controlType == SimplePagedResultsControl.controlType + ] + + if pctrls: + if pctrls[0].cookie: + # Copy cookie from response control to request control + req_ctrl.cookie = pctrls[0].cookie + msgid = conn.search_ext(DEFAULT_SUFFIX, ldap.SCOPE_SUBTREE, search_flt, + searchreq_attrlist, serverctrls=controls, timeout=timelimit) + else: + log.info('Done with this search - sleeping %d seconds' % ( + timelimit * 2)) + time.sleep(timelimit * 2) + break # No more pages available + else: + break + finally: + del_users(users_list) + + +@pytest.mark.parametrize('aci_subject', + ('dns = "{}"'.format(HOSTNAME), + 'ip = "{}"'.format(IP_ADDRESS))) +def test_search_dns_ip_aci(topology_st, create_user, aci_subject): + """Verify that after performing multiple simple paged searches + to completion on the suffix with DNS or IP based ACI + + :id: bbfddc46-a8c8-49ae-8c90-7265d05b22a9 + :parametrized: yes + :setup: Standalone instance, test user for binding, + varying number of users for the search base + :steps: + 1. Back up and remove all previous ACI from suffix + 2. Add an anonymous ACI for DNS check + 3. Bind as test user + 4. Search through added users with a simple paged control + 5. Perform steps 4 three times in a row + 6. Return ACI to the initial state + 7. Go through all steps once again, but use IP subject dn + instead of DNS + :expectedresults: + 1. Operation should be successful + 2. Anonymous ACI should be successfully added + 3. Bind should be successful + 4. No error happens, all users should be found and sorted + 5. Results should remain the same + 6. ACI should be successfully returned + 7. Results should be the same with ACI with IP subject dn + """ + + users_num = 100 + page_size = 5 + users_list = add_users(topology_st, users_num, DEFAULT_SUFFIX) + search_flt = r'(uid=test*)' + searchreq_attrlist = ['dn', 'sn'] + + try: + log.info('Back up current suffix ACI') + acis_bck = topology_st.standalone.aci.list(DEFAULT_SUFFIX, ldap.SCOPE_BASE) + + log.info('Add test ACI') + ACI_TARGET = '(targetattr != "userPassword")' + ACI_ALLOW = '(version 3.0;acl "Anonymous access within domain"; allow (read,compare,search)' + ACI_SUBJECT = '(userdn = "ldap:///anyone") and (%s);)' % aci_subject + ACI_BODY = ensure_bytes(ACI_TARGET + ACI_ALLOW + ACI_SUBJECT) + topology_st.standalone.modify_s(DEFAULT_SUFFIX, [(ldap.MOD_REPLACE, 'aci', ACI_BODY)]) + log.info('Set user bind') + conn = create_user.bind(TEST_USER_PWD, uri=f'ldap://{IP_ADDRESS}:{topology_st.standalone.port}') + + log.info('Create simple paged results control instance') + req_ctrl = SimplePagedResultsControl(True, size=page_size, cookie='') + controls = [req_ctrl] + + log.info('Initiate three searches with a paged results control') + for ii in range(3): + log.info('%d search' % (ii + 1)) + all_results = paged_search(conn, DEFAULT_SUFFIX, controls, + search_flt, searchreq_attrlist) + log.info('%d results' % len(all_results)) + assert len(all_results) == len(users_list) + log.info('If we are here, then no error has happened. We are good.') + + finally: + log.info('Restore ACI') + topology_st.standalone.modify_s(DEFAULT_SUFFIX, [(ldap.MOD_DELETE, 'aci', None)]) + for aci in acis_bck: + topology_st.standalone.modify_s(DEFAULT_SUFFIX, [(ldap.MOD_ADD, 'aci', aci.getRawAci())]) + del_users(users_list) + + +def test_search_multiple_paging(topology_st, create_user): + """Verify that after performing multiple simple paged searches + on a single connection without a complition, it wouldn't fail. + + :id: 628b29a6-2d47-4116-a88d-00b87405ef7f + :setup: Standalone instance, test user for binding, + varying number of users for the search base + :steps: + 1. Bind as test user + 2. Initiate the search with a simple paged control + 3. Acquire the returned cookie only one time + 4. Perform steps 2 and 3 three times in a row + :expectedresults: + 1. Bind should be successful + 2. Search should be successfully initiated + 3. Cookie should be successfully acquired + 4. No error happens + """ + + users_num = 100 + page_size = 30 + users_list = add_users(topology_st, users_num, DEFAULT_SUFFIX) + search_flt = r'(uid=test*)' + searchreq_attrlist = ['dn', 'sn'] + + try: + log.info('Set user bind') + conn = create_user.bind(TEST_USER_PWD) + + log.info('Create simple paged results control instance') + req_ctrl = SimplePagedResultsControl(True, size=page_size, cookie='') + controls = [req_ctrl] + + for ii in range(3): + log.info('Iteration %d' % ii) + msgid = conn.search_ext(DEFAULT_SUFFIX, ldap.SCOPE_SUBTREE, + search_flt, searchreq_attrlist, serverctrls=controls) + rtype, rdata, rmsgid, rctrls = conn.result3(msgid) + pctrls = [ + c + for c in rctrls + if c.controlType == SimplePagedResultsControl.controlType + ] + + # Copy cookie from response control to request control + req_ctrl.cookie = pctrls[0].cookie + msgid = conn.search_ext(DEFAULT_SUFFIX, ldap.SCOPE_SUBTREE, + search_flt, searchreq_attrlist, serverctrls=controls) + finally: + del_users(users_list) + + +@pytest.mark.parametrize("invalid_cookie", [1000, -1]) +def test_search_invalid_cookie(topology_st, create_user, invalid_cookie): + """Verify that using invalid cookie while performing + search with the simple paged results control throws + a TypeError exception + + :id: 107be12d-4fe4-47fe-ae86-f3e340a56f42 + :parametrized: yes + :setup: Standalone instance, test user for binding, + varying number of users for the search base + :steps: + 1. Bind as test user + 2. Initiate the search with a simple paged control + 3. Put an invalid cookie (-1, 1000) to the control + 4. Continue the search + :expectedresults: + 1. Bind should be successful + 2. Search should be successfully initiated + 3. Cookie should be added + 4. It should throw a TypeError exception + """ + + users_num = 100 + page_size = 50 + users_list = add_users(topology_st, users_num, DEFAULT_SUFFIX) + search_flt = r'(uid=test*)' + searchreq_attrlist = ['dn', 'sn'] + + try: + log.info('Set user bind') + conn = create_user.bind(TEST_USER_PWD) + + log.info('Create simple paged results control instance') + req_ctrl = SimplePagedResultsControl(True, size=page_size, cookie='') + controls = [req_ctrl] + + msgid = conn.search_ext(DEFAULT_SUFFIX, ldap.SCOPE_SUBTREE, + search_flt, searchreq_attrlist, serverctrls=controls) + rtype, rdata, rmsgid, rctrls = conn.result3(msgid) + + log.info('Put an invalid cookie (%d) to the control. TypeError is expected' % + invalid_cookie) + req_ctrl.cookie = invalid_cookie + with pytest.raises(TypeError): + msgid = conn.search_ext(DEFAULT_SUFFIX, ldap.SCOPE_SUBTREE, + search_flt, searchreq_attrlist, serverctrls=controls) + finally: + del_users(users_list) + + +def test_search_abandon_with_zero_size(topology_st, create_user): + """Verify that search with simple paged results control + can be abandon using page_size = 0 + + :id: d2fd9a10-84e1-4b69-a8a7-36ca1427c171 + :setup: Standalone instance, test user for binding, + varying number of users for the search base + :steps: + 1. Bind as test user + 2. Search through added users with a simple paged control + and page_size = 0 + :expectedresults: + 1. Bind should be successful + 2. No cookie should be returned at all + """ + + users_num = 10 + page_size = 0 + users_list = add_users(topology_st, users_num, DEFAULT_SUFFIX) + search_flt = r'(uid=test*)' + searchreq_attrlist = ['dn', 'sn'] + + try: + log.info('Set user bind') + conn = create_user.bind(TEST_USER_PWD) + + log.info('Create simple paged results control instance') + req_ctrl = SimplePagedResultsControl(True, size=page_size, cookie='') + controls = [req_ctrl] + + msgid = conn.search_ext(DEFAULT_SUFFIX, ldap.SCOPE_SUBTREE, + search_flt, searchreq_attrlist, serverctrls=controls) + rtype, rdata, rmsgid, rctrls = conn.result3(msgid) + pctrls = [ + c + for c in rctrls + if c.controlType == SimplePagedResultsControl.controlType + ] + assert not pctrls[0].cookie + finally: + del_users(users_list) + + +def test_search_pagedsizelimit_success(topology_st, create_user): + """Verify that search with a simple paged results control + returns all entries it should without errors while + valid value set to nsslapd-pagedsizelimit. + + :id: 88193f10-f6f0-42f5-ae9c-ff34b8f9ee8c + :setup: Standalone instance, test user for binding, + 10 users for the search base + :steps: + 1. Set nsslapd-pagedsizelimit: 20 + 2. Bind as test user + 3. Search through added users with a simple paged control + using page_size = 10 + :expectedresults: + 1. nsslapd-pagedsizelimit should be successfully set + 2. Bind should be successful + 3. All users should be found + """ + + users_num = 10 + page_size = 10 + attr_name = 'nsslapd-pagedsizelimit' + attr_value = '20' + attr_value_bck = change_conf_attr(topology_st, DN_CONFIG, attr_name, attr_value) + users_list = add_users(topology_st, users_num, DEFAULT_SUFFIX) + search_flt = r'(uid=test*)' + searchreq_attrlist = ['dn', 'sn'] + + try: + log.info('Set user bind') + conn = create_user.bind(TEST_USER_PWD) + + req_ctrl = SimplePagedResultsControl(True, size=page_size, cookie='') + controls = [req_ctrl] + + all_results = paged_search(conn, DEFAULT_SUFFIX, controls, search_flt, searchreq_attrlist) + + log.info('%d results' % len(all_results)) + assert len(all_results) == len(users_list) + + finally: + del_users(users_list) + change_conf_attr(topology_st, DN_CONFIG, 'nsslapd-pagedsizelimit', attr_value_bck) + + +@pytest.mark.parametrize('conf_attr,user_attr,expected_rs', + (('5', '15', 'PASS'), ('15', '5', ldap.SIZELIMIT_EXCEEDED))) +def test_search_nspagedsizelimit(topology_st, create_user, + conf_attr, user_attr, expected_rs): + """Verify that nsPagedSizeLimit attribute overrides + nsslapd-pagedsizelimit while performing search with + the simple paged results control. + + :id: b08c6ad2-ba28-447a-9f04-5377c3661d0d + :parametrized: yes + :setup: Standalone instance, test user for binding, + 10 users for the search base + :steps: + 1. Set nsslapd-pagedsizelimit: 5 + 2. Set nsPagedSizeLimit: 15 + 3. Bind as test user + 4. Search through added users with a simple paged control + using page_size = 10 + 5. Bind as Directory Manager + 6. Restore all values + 7. Set nsslapd-pagedsizelimit: 15 + 8. Set nsPagedSizeLimit: 5 + 9. Bind as test user + 10. Search through added users with a simple paged control + using page_size = 10 + :expectedresults: + 1. nsslapd-pagedsizelimit should be successfully set + 2. nsPagedSizeLimit should be successfully set + 3. Bind should be successful + 4. No error happens, all users should be found + 5. Bind should be successful + 6. All values should be restored + 7. nsslapd-pagedsizelimit should be successfully set + 8. nsPagedSizeLimit should be successfully set + 9. Bind should be successful + 10. It should throw SIZELIMIT_EXCEEDED exception + """ + + users_num = 10 + page_size = 10 + users_list = add_users(topology_st, users_num, DEFAULT_SUFFIX) + search_flt = r'(uid=test*)' + searchreq_attrlist = ['dn', 'sn'] + conf_attr_bck = change_conf_attr(topology_st, DN_CONFIG, 'nsslapd-pagedsizelimit', conf_attr) + user_attr_bck = change_conf_attr(topology_st, create_user.dn, 'nsPagedSizeLimit', user_attr) + + try: + log.info('Set user bind') + conn = create_user.bind(TEST_USER_PWD) + + req_ctrl = SimplePagedResultsControl(True, size=page_size, cookie='') + controls = [req_ctrl] + + if expected_rs == ldap.SIZELIMIT_EXCEEDED: + log.info('Expect to fail with SIZELIMIT_EXCEEDED') + with pytest.raises(expected_rs): + all_results = paged_search(conn, DEFAULT_SUFFIX, controls, search_flt, searchreq_attrlist) + elif expected_rs == 'PASS': + log.info('Expect to pass') + all_results = paged_search(conn, DEFAULT_SUFFIX, controls, search_flt, searchreq_attrlist) + log.info('%d results' % len(all_results)) + assert len(all_results) == len(users_list) + + finally: + del_users(users_list) + change_conf_attr(topology_st, DN_CONFIG, 'nsslapd-pagedsizelimit', conf_attr_bck) + change_conf_attr(topology_st, create_user.dn, 'nsPagedSizeLimit', user_attr_bck) + + +@pytest.mark.parametrize('conf_attr_values,expected_rs', + ((('5000', '100', '100'), ldap.ADMINLIMIT_EXCEEDED), + (('5000', '120', '122'), 'PASS'))) +def test_search_paged_limits(topology_st, create_user, conf_attr_values, expected_rs): + """Verify that nsslapd-idlistscanlimit and + nsslapd-lookthroughlimit can limit the administrator + search abilities. + + :id: e0f8b916-7276-4bd3-9e73-8696a4468811 + :parametrized: yes + :setup: Standalone instance, test user for binding, + 10 users for the search base + :steps: + 1. Set nsslapd-sizelimit and nsslapd-pagedsizelimit to 5000 + 2. Set nsslapd-idlistscanlimit: 120 + 3. Set nsslapd-lookthroughlimit: 122 + 4. Bind as test user + 5. Search through added users with a simple paged control + using page_size = 10 + 6. Bind as Directory Manager + 7. Set nsslapd-idlistscanlimit: 100 + 8. Set nsslapd-lookthroughlimit: 100 + 9. Bind as test user + 10. Search through added users with a simple paged control + using page_size = 10 + :expectedresults: + 1. nsslapd-sizelimit and nsslapd-pagedsizelimit + should be successfully set + 2. nsslapd-idlistscanlimit should be successfully set + 3. nsslapd-lookthroughlimit should be successfully set + 4. Bind should be successful + 5. No error happens, all users should be found + 6. Bind should be successful + 7. nsslapd-idlistscanlimit should be successfully set + 8. nsslapd-lookthroughlimit should be successfully set + 9. Bind should be successful + 10. It should throw ADMINLIMIT_EXCEEDED exception + """ + + users_num = 101 + page_size = 10 + users_list = add_users(topology_st, users_num, DEFAULT_SUFFIX) + search_flt = r'(uid=test*)' + searchreq_attrlist = ['dn', 'sn'] + size_attr_bck = change_conf_attr(topology_st, DN_CONFIG, 'nsslapd-sizelimit', conf_attr_values[0]) + pagedsize_attr_bck = change_conf_attr(topology_st, DN_CONFIG, 'nsslapd-pagedsizelimit', conf_attr_values[0]) + idlistscan_attr_bck = change_conf_attr(topology_st, 'cn=config,%s' % DN_LDBM, 'nsslapd-idlistscanlimit', conf_attr_values[1]) + lookthrough_attr_bck = change_conf_attr(topology_st, 'cn=config,%s' % DN_LDBM, 'nsslapd-lookthroughlimit', conf_attr_values[2]) + + try: + log.info('Set user bind') + conn = create_user.bind(TEST_USER_PWD) + + req_ctrl = SimplePagedResultsControl(True, size=page_size, cookie='') + controls = [req_ctrl] + + if expected_rs == ldap.ADMINLIMIT_EXCEEDED: + log.info('Expect to fail with ADMINLIMIT_EXCEEDED') + with pytest.raises(expected_rs): + all_results = paged_search(conn, DEFAULT_SUFFIX, controls, search_flt, searchreq_attrlist) + elif expected_rs == 'PASS': + log.info('Expect to pass') + all_results = paged_search(conn, DEFAULT_SUFFIX, controls, search_flt, searchreq_attrlist) + log.info('%d results' % len(all_results)) + assert len(all_results) == len(users_list) + finally: + del_users(users_list) + change_conf_attr(topology_st, DN_CONFIG, 'nsslapd-sizelimit', size_attr_bck) + change_conf_attr(topology_st, DN_CONFIG, 'nsslapd-pagedsizelimit', pagedsize_attr_bck) + change_conf_attr(topology_st, 'cn=config,%s' % DN_LDBM, 'nsslapd-lookthroughlimit', lookthrough_attr_bck) + change_conf_attr(topology_st, 'cn=config,%s' % DN_LDBM, 'nsslapd-idlistscanlimit', idlistscan_attr_bck) + + +@pytest.mark.parametrize('conf_attr_values,expected_rs', + ((('1000', '100', '100'), ldap.ADMINLIMIT_EXCEEDED), + (('1000', '120', '122'), 'PASS'))) +def test_search_paged_user_limits(topology_st, create_user, conf_attr_values, expected_rs): + """Verify that nsPagedIDListScanLimit and nsPagedLookthroughLimit + override nsslapd-idlistscanlimit and nsslapd-lookthroughlimit + while performing search with the simple paged results control. + + :id: 69e393e9-1ab8-4f4e-b4a1-06ca63dc7b1b + :parametrized: yes + :setup: Standalone instance, test user for binding, + 10 users for the search base + :steps: + 1. Set nsslapd-idlistscanlimit: 1000 + 2. Set nsslapd-lookthroughlimit: 1000 + 3. Set nsPagedIDListScanLimit: 120 + 4. Set nsPagedLookthroughLimit: 122 + 5. Bind as test user + 6. Search through added users with a simple paged control + using page_size = 10 + 7. Bind as Directory Manager + 8. Set nsPagedIDListScanLimit: 100 + 9. Set nsPagedLookthroughLimit: 100 + 10. Bind as test user + 11. Search through added users with a simple paged control + using page_size = 10 + :expectedresults: + 1. nsslapd-idlistscanlimit should be successfully set + 2. nsslapd-lookthroughlimit should be successfully set + 3. nsPagedIDListScanLimit should be successfully set + 4. nsPagedLookthroughLimit should be successfully set + 5. Bind should be successful + 6. No error happens, all users should be found + 7. Bind should be successful + 8. nsPagedIDListScanLimit should be successfully set + 9. nsPagedLookthroughLimit should be successfully set + 10. Bind should be successful + 11. It should throw ADMINLIMIT_EXCEEDED exception + """ + + users_num = 101 + page_size = 10 + users_list = add_users(topology_st, users_num, DEFAULT_SUFFIX) + search_flt = r'(uid=test*)' + searchreq_attrlist = ['dn', 'sn'] + lookthrough_attr_bck = change_conf_attr(topology_st, 'cn=config,%s' % DN_LDBM, 'nsslapd-lookthroughlimit', conf_attr_values[0]) + idlistscan_attr_bck = change_conf_attr(topology_st, 'cn=config,%s' % DN_LDBM, 'nsslapd-idlistscanlimit', conf_attr_values[0]) + user_idlistscan_attr_bck = change_conf_attr(topology_st, create_user.dn, 'nsPagedIDListScanLimit', conf_attr_values[1]) + user_lookthrough_attr_bck = change_conf_attr(topology_st, create_user.dn, 'nsPagedLookthroughLimit', conf_attr_values[2]) + + try: + log.info('Set user bind') + conn = create_user.bind(TEST_USER_PWD) + + req_ctrl = SimplePagedResultsControl(True, size=page_size, cookie='') + controls = [req_ctrl] + + if expected_rs == ldap.ADMINLIMIT_EXCEEDED: + log.info('Expect to fail with ADMINLIMIT_EXCEEDED') + with pytest.raises(expected_rs): + all_results = paged_search(conn, DEFAULT_SUFFIX, controls, search_flt, searchreq_attrlist) + elif expected_rs == 'PASS': + log.info('Expect to pass') + all_results = paged_search(conn, DEFAULT_SUFFIX, controls, search_flt, searchreq_attrlist) + log.info('%d results' % len(all_results)) + assert len(all_results) == len(users_list) + finally: + del_users(users_list) + change_conf_attr(topology_st, 'cn=config,%s' % DN_LDBM, 'nsslapd-lookthroughlimit', lookthrough_attr_bck) + change_conf_attr(topology_st, 'cn=config,%s' % DN_LDBM, 'nsslapd-idlistscanlimit', idlistscan_attr_bck) + change_conf_attr(topology_st, create_user.dn, 'nsPagedIDListScanLimit', user_idlistscan_attr_bck) + change_conf_attr(topology_st, create_user.dn, 'nsPagedLookthroughLimit', user_lookthrough_attr_bck) + + +def test_ger_basic(topology_st, create_user): + """Verify that search with a simple paged results control + and get effective rights control returns all entries + it should without errors. + + :id: 7b0bdfc7-a2f2-4c1a-bcab-f1eb8b330d45 + :setup: Standalone instance, test user for binding, + varying number of users for the search base + :steps: + 1. Search through added users with a simple paged control + and get effective rights control + :expectedresults: + 1. All users should be found, every found entry should have + an 'attributeLevelRights' returned + """ + + users_list = add_users(topology_st, 20, DEFAULT_SUFFIX) + search_flt = r'(uid=test*)' + searchreq_attrlist = ['dn', 'sn'] + page_size = 4 + + try: + spr_ctrl = SimplePagedResultsControl(True, size=page_size, cookie='') + ger_ctrl = GetEffectiveRightsControl(True, ensure_bytes("dn: " + DN_DM)) + + all_results = paged_search(topology_st.standalone, DEFAULT_SUFFIX, [spr_ctrl, ger_ctrl], + search_flt, searchreq_attrlist) + + log.info('{} results'.format(len(all_results))) + assert len(all_results) == len(users_list) + log.info('Check for attributeLevelRights') + assert all(attrs['attributeLevelRights'][0] for dn, attrs in all_results) + finally: + log.info('Remove added users') + del_users(users_list) + + +def test_multi_suffix_search(topology_st, create_user, new_suffixes): + """Verify that page result search returns empty cookie + if there is no returned entry. + + :id: 9712345b-9e38-4df6-8794-05f12c457d39 + :setup: Standalone instance, test user for binding, + two suffixes with backends, one is inserted into another, + 10 users for the search base within each suffix + :steps: + 1. Bind as test user + 2. Search through all 20 added users with a simple paged control + using page_size = 4 + 3. Wait some time for the logs to be updated + 4. Check access log + :expectedresults: + 1. Bind should be successful + 2. All users should be found + 3. Some time should pass + 4. The access log should contain the pr_cookie for each page request + and it should be equal 0, except the last one should be equal -1 + """ + + search_flt = r'(uid=test*)' + searchreq_attrlist = ['dn', 'sn'] + page_size = 4 + users_num = 20 + + log.info('Clear the access log') + topology_st.standalone.deleteAccessLogs() + + users_list_1 = add_users(topology_st, 10, NEW_SUFFIX_1) + users_list_2 = add_users(topology_st, 10, NEW_SUFFIX_2) + + try: + req_ctrl = SimplePagedResultsControl(True, size=page_size, cookie='') + + all_results = paged_search(topology_st.standalone, NEW_SUFFIX_1, [req_ctrl], search_flt, searchreq_attrlist) + + log.info('{} results'.format(len(all_results))) + assert len(all_results) == users_num + + log.info('Restart the server to flush the logs') + topology_st.standalone.restart(timeout=10) + + access_log_lines = topology_st.standalone.ds_access_log.match('.*pr_cookie=.*') + pr_cookie_list = ([line.rsplit('=', 1)[-1] for line in access_log_lines]) + pr_cookie_list = [int(pr_cookie) for pr_cookie in pr_cookie_list] + log.info('Assert that last pr_cookie == -1 and others pr_cookie == 0') + pr_cookie_zeros = list(pr_cookie == 0 for pr_cookie in pr_cookie_list[0:-1]) + assert all(pr_cookie_zeros) + assert pr_cookie_list[-1] == -1 + finally: + log.info('Remove added users') + del_users(users_list_1) + del_users(users_list_2) + + +@pytest.mark.parametrize('conf_attr_value', (None, '-1', '1000')) +def test_maxsimplepaged_per_conn_success(topology_st, create_user, conf_attr_value): + """Verify that nsslapd-maxsimplepaged-per-conn acts according design + + :id: 192e2f25-04ee-4ff9-9340-d875dcbe8011 + :parametrized: yes + :setup: Standalone instance, test user for binding, + 20 users for the search base + :steps: + 1. Set nsslapd-maxsimplepaged-per-conn in cn=config + to the next values: no value, -1, some positive + 2. Search through the added users with a simple paged control + using page size = 4 + :expectedresults: + 1. nsslapd-maxsimplepaged-per-conn should be successfully set + 2. If no value or value = -1 - all users should be found, + default behaviour; If the value is positive, + the value is the max simple paged results requests per connection. + """ + + users_list = add_users(topology_st, 20, DEFAULT_SUFFIX) + search_flt = r'(uid=test*)' + searchreq_attrlist = ['dn', 'sn'] + page_size = 4 + if conf_attr_value: + max_per_con_bck = change_conf_attr(topology_st, DN_CONFIG, 'nsslapd-maxsimplepaged-per-conn', conf_attr_value) + + try: + log.info('Set user bind') + conn = create_user.bind(TEST_USER_PWD) + + req_ctrl = SimplePagedResultsControl(True, size=page_size, cookie='') + + all_results = paged_search(conn, DEFAULT_SUFFIX, [req_ctrl], search_flt, searchreq_attrlist) + + log.info('{} results'.format(len(all_results))) + assert len(all_results) == len(users_list) + finally: + log.info('Remove added users') + del_users(users_list) + if conf_attr_value: + change_conf_attr(topology_st, DN_CONFIG, 'nsslapd-maxsimplepaged-per-conn', max_per_con_bck) + + +@pytest.mark.parametrize('conf_attr_value', ('0', '1')) +def test_maxsimplepaged_per_conn_failure(topology_st, create_user, conf_attr_value): + """Verify that nsslapd-maxsimplepaged-per-conn acts according design + + :id: eb609e63-2829-4331-8439-a35f99694efa + :parametrized: yes + :setup: Standalone instance, test user for binding, + 20 users for the search base + :steps: + 1. Set nsslapd-maxsimplepaged-per-conn = 0 in cn=config + 2. Search through the added users with a simple paged control + using page size = 4 + 3. Set nsslapd-maxsimplepaged-per-conn = 1 in cn=config + 4. Search through the added users with a simple paged control + using page size = 4 two times, but don't close the connections + :expectedresults: + 1. nsslapd-maxsimplepaged-per-conn should be successfully set + 2. UNWILLING_TO_PERFORM should be thrown + 3. Bind should be successful + 4. UNWILLING_TO_PERFORM should be thrown + """ + + users_list = add_users(topology_st, 20, DEFAULT_SUFFIX) + search_flt = r'(uid=test*)' + searchreq_attrlist = ['dn', 'sn'] + page_size = 4 + max_per_con_bck = change_conf_attr(topology_st, DN_CONFIG, 'nsslapd-maxsimplepaged-per-conn', conf_attr_value) + + try: + log.info('Set user bind') + conn = create_user.bind(TEST_USER_PWD) + + log.info('Create simple paged results control instance') + req_ctrl = SimplePagedResultsControl(True, size=page_size, cookie='') + + with pytest.raises(ldap.UNWILLING_TO_PERFORM): + msgid = conn.search_ext(DEFAULT_SUFFIX, ldap.SCOPE_SUBTREE, + search_flt, searchreq_attrlist, serverctrls=[req_ctrl]) + rtype, rdata, rmsgid, rctrls = conn.result3(msgid) + + # If nsslapd-maxsimplepaged-per-conn = 1, + # it should pass this point, but failed on the next search + assert conf_attr_value == '1' + msgid = conn.search_ext(DEFAULT_SUFFIX, ldap.SCOPE_SUBTREE, + search_flt, searchreq_attrlist, serverctrls=[req_ctrl]) + rtype, rdata, rmsgid, rctrls = conn.result3(msgid) + finally: + log.info('Remove added users') + del_users(users_list) + change_conf_attr(topology_st, DN_CONFIG, 'nsslapd-maxsimplepaged-per-conn', max_per_con_bck) + +if __name__ == '__main__': + # Run isolated + # -s for DEBUG mode + CURRENT_FILE = os.path.realpath(__file__) + pytest.main("-s %s" % CURRENT_FILE) diff --git a/dirsrvtests/tests/suites/password/__init__.py b/dirsrvtests/tests/suites/password/__init__.py new file mode 100644 index 0000000..d48fba6 --- /dev/null +++ b/dirsrvtests/tests/suites/password/__init__.py @@ -0,0 +1,3 @@ +""" + :Requirement: 389-ds-base: Password Policy +""" diff --git a/dirsrvtests/tests/suites/password/password_test.py b/dirsrvtests/tests/suites/password/password_test.py new file mode 100644 index 0000000..3807947 --- /dev/null +++ b/dirsrvtests/tests/suites/password/password_test.py @@ -0,0 +1,72 @@ +# --- BEGIN COPYRIGHT BLOCK --- +# Copyright (C) 2016 Red Hat, Inc. +# All rights reserved. +# +# License: GPL (version 3 or any later version). +# See LICENSE for details. +# --- END COPYRIGHT BLOCK --- +# +import logging + +import pytest +from lib389.tasks import * +from lib389.topologies import topology_st +from lib389._constants import PASSWORD, DEFAULT_SUFFIX + +from lib389.idm.user import UserAccounts, TEST_USER_PROPERTIES + +pytestmark = pytest.mark.tier1 + +logging.getLogger(__name__).setLevel(logging.DEBUG) +log = logging.getLogger(__name__) + + +@pytest.mark.bz918684 +@pytest.mark.ds394 +def test_password_delete_specific_password(topology_st): + """Delete a specific userPassword, and make sure + it is actually deleted from the entry + + :id: 800f432a-52ab-4661-ac66-a2bdd9b984d6 + :setup: Standalone instance + :steps: + 1. Add a user with userPassword attribute in cleartext + 2. Delete the added value of userPassword attribute + 3. Check if the userPassword attribute is deleted + 4. Delete the user + :expectedresults: + 1. The user with userPassword in cleartext should be added successfully + 2. Operation should be successful + 3. UserPassword should be deleted + 4. The user should be successfully deleted + """ + + log.info('Running test_password_delete_specific_password...') + + users = UserAccounts(topology_st.standalone, DEFAULT_SUFFIX) + + user = users.create(properties=TEST_USER_PROPERTIES) + + # + # Add a test user with a password + # + user.set('userpassword', PASSWORD) + + # + # Delete the exact password + # + user.remove('userpassword', PASSWORD) + + # + # Check the password is actually deleted + # + assert not user.present('userPassword') + + log.info('test_password_delete_specific_password: PASSED') + + +if __name__ == '__main__': + # Run isolated + # -s for DEBUG mode + CURRENT_FILE = os.path.realpath(__file__) + pytest.main("-s %s" % CURRENT_FILE) diff --git a/dirsrvtests/tests/suites/password/pbkdf2_upgrade_plugin_test.py b/dirsrvtests/tests/suites/password/pbkdf2_upgrade_plugin_test.py new file mode 100644 index 0000000..90dae36 --- /dev/null +++ b/dirsrvtests/tests/suites/password/pbkdf2_upgrade_plugin_test.py @@ -0,0 +1,52 @@ +# --- BEGIN COPYRIGHT BLOCK --- +# Copyright (C) 2018 Red Hat, Inc. +# All rights reserved. +# +# License: GPL (version 3 or any later version). +# See LICENSE for details. +# --- END COPYRIGHT BLOCK --- +# +import pytest +from lib389.topologies import topology_st +from lib389.password_plugins import PBKDF2Plugin +from lib389.utils import ds_is_older + +pytestmark = pytest.mark.tier1 + +@pytest.mark.skipif(ds_is_older('1.4.1'), reason="Not implemented") +def test_pbkdf2_upgrade(topology_st): + """On upgrade pbkdf2 doesn't ship. We need to be able to + provide this on upgrade to make sure default hashes work. + However, password plugins are special - they need really + early bootstap so that setting the default has specs work. + + This tests that the removal of the pbkdf2 plugin causes + it to be re-bootstrapped and added. + + :id: c2198692-7c02-433b-af5b-3be54920571a + :setup: Single instance + :steps: 1. Remove the PBKDF2 plugin + 2. Restart the server + 3. Restart the server + :expectedresults: + 1. Plugin is removed (IE pre-upgrade state) + 2. The plugin is bootstrapped and added + 3. No change (already bootstrapped) + + """ + # Remove the pbkdf2 plugin config + p1 = PBKDF2Plugin(topology_st.standalone) + assert(p1.exists()) + p1._protected = False + p1.delete() + # Restart + topology_st.standalone.restart() + # check it's been readded. + p2 = PBKDF2Plugin(topology_st.standalone) + assert(p2.exists()) + # Now restart to make sure we still work from the non-bootstrap form + topology_st.standalone.restart() + p3 = PBKDF2Plugin(topology_st.standalone) + assert(p3.exists()) + + diff --git a/dirsrvtests/tests/suites/password/pwdAdmin_test.py b/dirsrvtests/tests/suites/password/pwdAdmin_test.py new file mode 100644 index 0000000..16869d4 --- /dev/null +++ b/dirsrvtests/tests/suites/password/pwdAdmin_test.py @@ -0,0 +1,363 @@ +# --- BEGIN COPYRIGHT BLOCK --- +# Copyright (C) 2016 Red Hat, Inc. +# All rights reserved. +# +# License: GPL (version 3 or any later version). +# See LICENSE for details. +# --- END COPYRIGHT BLOCK --- +# +import pytest +from lib389.tasks import * +from lib389.utils import * +from lib389.topologies import topology_st + +from lib389.idm.user import UserAccounts +from lib389.idm.group import Groups +from lib389.idm.domain import Domain + +from lib389._constants import SUFFIX, DN_DM, PASSWORD, DEFAULT_SUFFIX + +pytestmark = pytest.mark.tier1 + +logging.getLogger(__name__).setLevel(logging.DEBUG) +log = logging.getLogger(__name__) + +CONFIG_DN = 'cn=config' +ADMIN_NAME = 'passwd_admin' +ADMIN_DN = 'cn=%s,%s' % (ADMIN_NAME, SUFFIX) +ADMIN2_NAME = 'passwd_admin2' +ADMIN2_DN = 'cn=%s,%s' % (ADMIN2_NAME, SUFFIX) +ADMIN_PWD = 'ntaheonusheoasuhoau_9' +ADMIN_GROUP_DN = 'cn=password admin group,%s' % (SUFFIX) +ENTRY_NAME = 'Joe Schmo' +ENTRY_DN = 'cn=%s,%s' % (ENTRY_NAME, SUFFIX) +INVALID_PWDS = ('2_Short', 'No_Number', 'N0Special', '{SSHA}bBy8UdtPZwu8uZna9QOYG3Pr41RpIRVDl8wddw==') + + +@pytest.fixture(scope="module") +def password_policy(topology_st): + """Set up password policy + Create a Password Admin entry; + Set up password policy attributes in config; + Add an aci to give everyone full access; + Test that the setup works + """ + + log.info('test_pwdAdmin_init: Creating Password Administrator entries...') + + users = UserAccounts(topology_st.standalone, DEFAULT_SUFFIX) + groups = Groups(topology_st.standalone, DEFAULT_SUFFIX) + + # Add Password Admin 1 + admin1_user = users.create(properties={ + 'uid': 'admin1', + 'cn' : 'admin1', + 'sn' : 'strator', + 'uidNumber' : '1000', + 'gidNumber' : '2000', + 'homeDirectory' : '/home/admin1', + 'userPassword': ADMIN_PWD + }) + + # Add Password Admin 2 + + admin2_user = users.create(properties={ + 'uid': 'admin2', + 'cn' : 'admin2', + 'sn' : 'strator', + 'uidNumber' : '1000', + 'gidNumber' : '2000', + 'homeDirectory' : '/home/admin2', + 'userPassword': ADMIN_PWD + }) + + # Add Password Admin Group + + admin_group = groups.create(properties={ + 'cn': 'password admin group' + }) + + admin_group.add_member(admin1_user.dn) + admin_group.add_member(admin2_user.dn) + + # Configure password policy + + log.info('test_pwdAdmin_init: Configuring password policy...') + + topology_st.standalone.config.replace_many( + ('nsslapd-pwpolicy-local', 'on'), + ('passwordCheckSyntax', 'on'), + ('passwordMinCategories', '1'), + ('passwordMinTokenLength', '2'), + ('passwordExp', 'on'), + ('passwordMinDigits', '1'), + ('passwordMinSpecials', '1') + ) + + # + # Add an aci to allow everyone all access (just makes things easier) + # + log.info('Add aci to allow password admin to add/update entries...') + + domain = Domain(topology_st.standalone, DEFAULT_SUFFIX) + + ACI_TARGET = "(target = \"ldap:///%s\")" % SUFFIX + ACI_TARGETATTR = "(targetattr = *)" + ACI_ALLOW = "(version 3.0; acl \"Password Admin Access\"; allow (all) " + ACI_SUBJECT = "(userdn = \"ldap:///anyone\");)" + ACI_BODY = ACI_TARGET + ACI_TARGETATTR + ACI_ALLOW + ACI_SUBJECT + + domain.add('aci', ACI_BODY) + + # + # Bind as the future Password Admin + # + log.info('test_pwdAdmin_init: Bind as the Password Administrator (before activating)...') + admin_conn = admin1_user.bind(ADMIN_PWD) + + # + # Setup our test entry, and test password policy is working + # + + # Connect up an admin authed users connection. + admin_users = UserAccounts(admin_conn, DEFAULT_SUFFIX) + + # + # Start by attempting to add an entry with an invalid password + # + log.info('test_pwdAdmin_init: Attempt to add entries with invalid passwords, these adds should fail...') + for passwd in INVALID_PWDS: + with pytest.raises(ldap.CONSTRAINT_VIOLATION): + admin_users.create(properties={ + 'uid': 'example', + 'cn' : 'example', + 'sn' : 'example', + 'uidNumber' : '1000', + 'gidNumber' : '2000', + 'homeDirectory' : '/home/example', + 'userPassword': passwd + }) + + return (admin_group, admin1_user, admin2_user) + +def test_pwdAdmin_bypass(topology_st, password_policy): + """Test that password administrators/root DN can + bypass password syntax/policy + + :id: 743bfe33-a1f7-482b-8807-efeb7aa57348 + :setup: Standalone instance, Password Admin entry, + Password policy configured as below: + nsslapd-pwpolicy-local: on + passwordCheckSyntax: on + passwordMinCategories: 1 + passwordMinTokenLength: 2 + passwordExp: on + passwordMinDigits: 1 + passwordMinSpecials: 1 + :steps: + 1: Add users with invalid passwords + :expectedresults: + 1: Users should be added successfully. + """ + + # + # Now activate a password administator, bind as root dn to do the config + # update, then rebind as the password admin + # + log.info('test_pwdAdmin: Activate the Password Administator...') + + # Extract our fixture data. + + (admin_group, admin1_user, admin2_user) = password_policy + + # Set the password admin + + topology_st.standalone.config.set('passwordAdminDN', admin1_user.dn) + + # + # Get our test entry + # + + admin_conn = admin1_user.bind(ADMIN_PWD) + admin_users = UserAccounts(admin_conn, DEFAULT_SUFFIX) + + # + # Start adding entries with invalid passwords, delete the entry after each pass. + # + for passwd in INVALID_PWDS: + u1 = admin_users.create(properties={ + 'uid': 'example', + 'cn' : 'example', + 'sn' : 'example', + 'uidNumber' : '1000', + 'gidNumber' : '2000', + 'homeDirectory' : '/home/example', + 'userPassword': passwd + }) + u1.delete() + + +def test_pwdAdmin_no_admin(topology_st, password_policy): + """Test that password administrators/root DN can + bypass password syntax/policy + + :id: 74347798-7cc7-4ce7-ad5c-06387ffde02c + :setup: Standalone instance, Password Admin entry, + Password policy configured as below: + nsslapd-pwpolicy-local: on + passwordCheckSyntax: on + passwordMinCategories: 1 + passwordMinTokenLength: 2 + passwordExp: on + passwordMinDigits: 1 + passwordMinSpecials: 1 + :steps: + 1: Create a user + 2: Attempt to set passwords on the user that are invalid + :expectedresults: + 1: Success + 2: The passwords should NOT be set + """ + (admin_group, admin1_user, admin2_user) = password_policy + + # Remove password admin + + # Can't use pytest.raises. because this may or may not exist + try: + topology_st.standalone.config.remove_all('passwordAdminDN') + except ldap.NO_SUCH_ATTRIBUTE: + pass + + # + # Add the entry for the next round of testing (modify password) + # + admin_conn = admin1_user.bind(ADMIN_PWD) + admin_users = UserAccounts(admin_conn, DEFAULT_SUFFIX) + + u2 = admin_users.create(properties={ + 'uid': 'example', + 'cn' : 'example', + 'sn' : 'example', + 'uidNumber' : '1000', + 'gidNumber' : '2000', + 'homeDirectory' : '/home/example', + 'userPassword': ADMIN_PWD + }) + + # + # Make invalid password updates that should fail + # + for passwd in INVALID_PWDS: + with pytest.raises(ldap.CONSTRAINT_VIOLATION): + u2.replace('userPassword', passwd) + + +def test_pwdAdmin_modify(topology_st, password_policy): + """Test that password administrators/root DN can modify + passwords rather than adding them. + + :id: 85326527-8eeb-401f-9d1b-4ef55dee45a4 + :setup: Standalone instance, Password Admin entry, + Password policy configured as below: + nsslapd-pwpolicy-local: on + passwordCheckSyntax: on + passwordMinCategories: 1 + passwordMinTokenLength: 2 + passwordExp: on + passwordMinDigits: 1 + passwordMinSpecials: 1 + :steps: + 1: Retrieve the user + 2: Replace the password with invalid content + :expectedresults: + 1: Success + 2: The password should be set + """ + (admin_group, admin1_user, admin2_user) = password_policy + + # Update config - set the password admin + topology_st.standalone.config.set('passwordAdminDN', admin1_user.dn) + + admin_conn = admin1_user.bind(ADMIN_PWD) + admin_users = UserAccounts(admin_conn, DEFAULT_SUFFIX) + + u3 = admin_users.get('example') + # + # Make the same password updates, but this time they should succeed + # + for passwd in INVALID_PWDS: + u3.replace('userPassword', passwd) + +def test_pwdAdmin_group(topology_st, password_policy): + """Test that password admin group can bypass policy. + + :id: 4d62ae34-0f25-486e-b823-afd2b431e9b0 + :setup: Standalone instance, Password Admin entry, + Password policy configured as below: + nsslapd-pwpolicy-local: on + passwordCheckSyntax: on + passwordMinCategories: 1 + passwordMinTokenLength: 2 + passwordExp: on + passwordMinDigits: 1 + passwordMinSpecials: 1 + :steps: + 1: Add group to passwordadmin dn + 2: Attempt to set invalid passwords. + :expectedresults: + 1: Success. + 2: Password should be set. + """ + (admin_group, admin1_user, admin2_user) = password_policy + + # Update config - set the password admin group + topology_st.standalone.config.set('passwordAdminDN', admin_group.dn) + + # Bind as admin2, who is in the group. + + admin2_conn = admin2_user.bind(ADMIN_PWD) + admin2_users = UserAccounts(admin2_conn, DEFAULT_SUFFIX) + + u4 = admin2_users.get('example') + + # Make some invalid password updates, but they should succeed + for passwd in INVALID_PWDS: + u4.replace('userPassword', passwd) + + +def test_pwdAdmin_config_validation(topology_st, password_policy): + """Check passwordAdminDN for valid and invalid values + + :id: f7049482-41e8-438b-ae18-cdd2612c783a + :setup: Standalone instance, Password Admin entry, + Password policy configured as below: + nsslapd-pwpolicy-local: on + passwordCheckSyntax: on + passwordMinCategories: 1 + passwordMinTokenLength: 1 + passwordExp: on + passwordMinDigits: 1 + passwordMinSpecials: 1 + :steps: + 1. Add multiple attributes - one already exists so just try and add the second one + 2. Set passwordAdminDN attribute to an invalid value (ZZZZZ) + :expectedresults: + 1. The operation should fail + 2. The operation should fail + """ + + (admin_group, admin1_user, admin2_user) = password_policy + # Add multiple attributes - one already exists so just try and add the second one + topology_st.standalone.config.set('passwordAdminDN', admin_group.dn) + with pytest.raises(ldap.OBJECT_CLASS_VIOLATION): + topology_st.standalone.config.add('passwordAdminDN', admin1_user.dn) + + # Attempt to set invalid DN + with pytest.raises(ldap.INVALID_SYNTAX): + topology_st.standalone.config.set('passwordAdminDN', 'zzzzzzzzzzzz') + +if __name__ == '__main__': + # Run isolated + # -s for DEBUG mode + CURRENT_FILE = os.path.realpath(__file__) + pytest.main("-s %s" % CURRENT_FILE) diff --git a/dirsrvtests/tests/suites/password/pwdModify_test.py b/dirsrvtests/tests/suites/password/pwdModify_test.py new file mode 100644 index 0000000..9e32823 --- /dev/null +++ b/dirsrvtests/tests/suites/password/pwdModify_test.py @@ -0,0 +1,282 @@ +# Copyright (C) 2018 Red Hat, Inc. +# All rights reserved. +# +# License: GPL (version 3 or any later version). +# See LICENSE for details. +# --- END COPYRIGHT BLOCK --- +# +import pytest +import re +from ldap.controls import LDAPControl +from lib389._constants import * +from lib389.tasks import * +from lib389.utils import * +from lib389.topologies import topology_st as topo +from lib389.idm.user import UserAccounts, TEST_USER_PROPERTIES +from lib389.idm.organizationalunit import OrganizationalUnits +from lib389.pwpolicy import PwPolicyManager + +pytestmark = pytest.mark.tier1 + +DEBUGGING = os.getenv("DEBUGGING", default=False) +if DEBUGGING: + logging.getLogger(__name__).setLevel(logging.DEBUG) +else: + logging.getLogger(__name__).setLevel(logging.INFO) +log = logging.getLogger(__name__) + + +OLD_PASSWD = 'password' +NEW_PASSWD = 'newpassword' +SHORT_PASSWD = 'wd' +TESTPEOPLE_OU = "TestPeople_bug834047" +USER_ACI = '(targetattr="userpassword")(version 3.0; acl "pwp test"; allow (all) userdn="ldap:///self";)' + + +@pytest.fixture(scope="function") +def pwd_policy_setup(topo, request): + """ + Setup to set passwordStorageScheme as CLEAR + passwordHistory to on + passwordStorageScheme to SSHA + passwordHistory off + """ + log.info("Change the pwd storage type to clear and change the password once to refresh it(for the rest of tests") + topo.standalone.simple_bind_s(DN_DM, PASSWORD) + topo.standalone.config.set('passwordStorageScheme', 'CLEAR') + assert topo.standalone.passwd_s(user_2.dn, OLD_PASSWD, NEW_PASSWD) + topo.standalone.config.set('passwordHistory', 'on') + + def fin(): + topo.standalone.simple_bind_s(DN_DM, PASSWORD) + topo.standalone.config.set('passwordStorageScheme', 'SSHA') + topo.standalone.config.set('passwordHistory', 'off') + request.addfinalizer(fin) + + +def test_pwd_modify_with_different_operation(topo): + """Performing various password modify operation, + make sure that password is actually modified + + :id: e36d68a8-0960-48e4-932c-6c2f64abaebc + :setup: Standalone instance and TLS enabled + :steps: + 1. Attempt for Password change for an entry that does not exists + 2. Attempt for Password change for an entry that exists + 3. Attempt for Password change to old for an entry that exists + 4. Attempt for Password Change with Binddn as testuser but with wrong old password + 5. Attempt for Password Change with Binddn as testuser + 6. Attempt for Password Change without giving newpassword + 7. Checking password change Operation using a Non-Secure connection + 8. Testuser attempts to change password for testuser2(userPassword attribute is Set) + 9. Directory Manager attempts to change password for testuser2(userPassword attribute is Set) + 10. Create a password syntax policy. Attempt to change to password that violates that policy + 11. userPassword mod with control results in ber decode error + + :expectedresults: + 1. Operation should be successful + 2. Operation should be successful + 3. Operation should be successful + 4. Operation should not be successful + 5. Operation should be successful + 6. Operation should be successful + 7. Operation should not be successful + 8. Operation should not be successful + 9. Operation should be successful + 10. Operation should violates the policy + 11. Operation should be successful + """ + + topo.standalone.enable_tls() + os.environ["LDAPTLS_CACERTDIR"] = topo.standalone.get_ssca_dir() + users = UserAccounts(topo.standalone, DEFAULT_SUFFIX) + TEST_USER_PROPERTIES['userpassword'] = OLD_PASSWD + global user + user = users.create(properties=TEST_USER_PROPERTIES) + ous = OrganizationalUnits(topo.standalone, DEFAULT_SUFFIX) + ou = ous.get('people') + ou.add('aci', USER_ACI) + + with pytest.raises(ldap.NO_SUCH_OBJECT): + log.info("Attempt for Password change for an entry that does not exists") + assert topo.standalone.passwd_s('uid=testuser1,ou=People,dc=example,dc=com', OLD_PASSWD, NEW_PASSWD) + log.info("Attempt for Password change for an entry that exists") + assert topo.standalone.passwd_s(user.dn, OLD_PASSWD, NEW_PASSWD) + log.info("Attempt for Password change to old for an entry that exists") + assert topo.standalone.passwd_s(user.dn, NEW_PASSWD, OLD_PASSWD) + log.info("Attempt for Password Change with Binddn as testuser but with wrong old password") + topo.standalone.simple_bind_s(user.dn, OLD_PASSWD) + with pytest.raises(ldap.INVALID_CREDENTIALS): + topo.standalone.passwd_s(user.dn, NEW_PASSWD, NEW_PASSWD) + log.info("Attempt for Password Change with Binddn as testuser") + assert topo.standalone.passwd_s(user.dn, OLD_PASSWD, NEW_PASSWD) + log.info("Attempt for Password Change without giving newpassword") + assert topo.standalone.passwd_s(user.dn, None, OLD_PASSWD) + assert user.get_attr_val_utf8('uid') == 'testuser' + log.info("Change password to NEW_PASSWD i.e newpassword") + assert topo.standalone.passwd_s(user.dn, None, NEW_PASSWD) + assert topo.standalone.passwd_s(user.dn, NEW_PASSWD, None) + log.info("Check binding with old/new password") + password = [OLD_PASSWD, NEW_PASSWD] + for pass_val in password: + with pytest.raises(ldap.INVALID_CREDENTIALS): + topo.standalone.simple_bind_s(user.dn, pass_val) + log.info("Change password back to OLD_PASSWD i.e password") + topo.standalone.simple_bind_s(DN_DM, PASSWORD) + assert topo.standalone.passwd_s(user.dn, None, NEW_PASSWD) + log.info("Checking password change Operation using a Non-Secure connection") + conn = ldap.initialize("ldap://%s:%s" % (HOST_STANDALONE, PORT_STANDALONE)) + with pytest.raises(ldap.CONFIDENTIALITY_REQUIRED): + conn.passwd_s(user.dn, NEW_PASSWD, OLD_PASSWD) + log.info("Testuser attempts to change password for testuser2(userPassword attribute is Set)") + global user_2 + users = UserAccounts(topo.standalone, DEFAULT_SUFFIX) + user_2 = users.create(properties={ + 'uid': 'testuser2', + 'cn': 'testuser2', + 'sn': 'testuser2', + 'uidNumber': '3000', + 'gidNumber': '4000', + 'homeDirectory': '/home/testuser2', + 'userPassword': OLD_PASSWD + }) + + topo.standalone.simple_bind_s(user.dn, NEW_PASSWD) + with pytest.raises(ldap.INSUFFICIENT_ACCESS): + assert topo.standalone.passwd_s(user_2.dn, OLD_PASSWD, NEW_PASSWD) + log.info("Directory Manager attempts to change password for testuser2(userPassword attribute is Set)") + topo.standalone.simple_bind_s(DN_DM, PASSWORD) + assert topo.standalone.passwd_s(user_2.dn, OLD_PASSWD, NEW_PASSWD) + log.info("Changing userPassword attribute to Undefined for testuser2") + topo.standalone.modify_s(user_2.dn, [(ldap.MOD_REPLACE, 'userPassword', None)]) + log.info("Testuser attempts to change password for testuser2(userPassword attribute is Undefined)") + with pytest.raises(ldap.INSUFFICIENT_ACCESS): + topo.standalone.simple_bind_s(user.dn, NEW_PASSWD) + assert topo.standalone.passwd_s(user_2.dn, None, NEW_PASSWD) + log.info("Directory Manager attempts to change password for testuser2(userPassword attribute is Undefined)") + topo.standalone.simple_bind_s(DN_DM, PASSWORD) + assert topo.standalone.passwd_s(user_2.dn, None, OLD_PASSWD) + log.info("Create a password syntax policy. Attempt to change to password that violates that policy") + topo.standalone.config.set('PasswordCheckSyntax', 'on') + with pytest.raises(ldap.CONSTRAINT_VIOLATION): + assert topo.standalone.passwd_s(user_2.dn, OLD_PASSWD, SHORT_PASSWD) + log.info("Reset password syntax policy") + topo.standalone.config.set('PasswordCheckSyntax', 'off') + log.info("userPassword mod with control results in ber decode error") + topo.standalone.simple_bind_s(DN_DM, PASSWORD) + assert topo.standalone.modify_ext_s(user.dn, [(ldap.MOD_REPLACE, 'userpassword', b'abcdefg')], + serverctrls=[LDAPControl('2.16.840.1.113730.3.4.2', 1, None)]) + log.info("Reseting the testuser's password") + topo.standalone.passwd_s(user.dn, 'abcdefg', NEW_PASSWD) + + +def test_pwd_modify_with_password_policy(topo, pwd_policy_setup): + """Performing various password modify operation, + with passwordStorageScheme as CLEAR + passwordHistory to on + + :id: 200bf0fd-20ab-4dde-849e-54067e98b917 + :setup: Standalone instance (TLS enabled) with pwd_policy_setup + :steps: + 1. Change the password and check that a new entry has been added to the history + 2. Try changing password to one stored in history + 3. Change the password several times in a row, and try binding after each change + 4. Try to bind using short password + + :expectedresults: + 1. Operation should be successful + 2. Operation should be unsuccessful + 3. Operation should be successful + 4. Operation should be unsuccessful + """ + log.info("Change the password and check that a new entry has been added to the history") + topo.standalone.passwd_s(user_2.dn, NEW_PASSWD, OLD_PASSWD) + regex = re.search('Z(.+)', user_2.get_attr_val_utf8('passwordhistory')) + assert NEW_PASSWD == regex.group(1) + log.info("Try changing password to one stored in history. Should fail") + with pytest.raises(ldap.CONSTRAINT_VIOLATION): + assert topo.standalone.passwd_s(user_2.dn, OLD_PASSWD, NEW_PASSWD) + log.info("Change the password several times in a row, and try binding after each change") + topo.standalone.passwd_s(user.dn, NEW_PASSWD, OLD_PASSWD) + assert topo.standalone.simple_bind_s(user.dn, OLD_PASSWD) + topo.standalone.passwd_s(user.dn, OLD_PASSWD, SHORT_PASSWD) + assert topo.standalone.simple_bind_s(user.dn, SHORT_PASSWD) + with pytest.raises(ldap.CONSTRAINT_VIOLATION): + topo.standalone.passwd_s(user.dn, SHORT_PASSWD, OLD_PASSWD) + + +def test_pwd_modify_with_subsuffix(topo): + """Performing various password modify operation. + + :id: 2255b4e6-3546-4ec5-84a5-cd8b3d894ac5 + :setup: Standalone instance (TLS enabled) + :steps: + 1. Add a new SubSuffix & password policy + 2. Add two New users under the SubEntry + 3. Change password of uid=test_user0,ou=TestPeople_bug834047,dc=example,dc=com to newpassword + 4. Try to delete password- case when password is specified + 5. Try to delete password- case when password is not specified + + :expectedresults: + 1. Operation should be successful + 2. Operation should be successful + 3. Operation should be successful + 4. Operation should be successful + 5. Operation should be successful + """ + + log.info("Add a new SubSuffix") + topo.standalone.simple_bind_s(DN_DM, PASSWORD) + + ous = OrganizationalUnits(topo.standalone, DEFAULT_SUFFIX) + ou_temp = ous.create(properties={'ou': TESTPEOPLE_OU}) + ou_temp.add('aci', USER_ACI) + + log.info("Add the container & create password policies") + policy = PwPolicyManager(topo.standalone) + policy.create_subtree_policy(ou_temp.dn, properties={ + 'passwordHistory': 'on', + 'passwordInHistory': '6', + 'passwordChange': 'on', + 'passwordStorageScheme': 'CLEAR'}) + + log.info("Add two New users under the SubEntry") + user = UserAccounts(topo.standalone, DEFAULT_SUFFIX, rdn='ou=TestPeople_bug834047') + test_user0 = user.create(properties={ + 'uid': 'test_user0', + 'cn': 'test0', + 'sn': 'test0', + 'uidNumber': '3002', + 'gidNumber': '4002', + 'homeDirectory': '/home/test_user0', + 'userPassword': OLD_PASSWD + }) + + test_user1 = user.create(properties={ + 'uid': 'test_user1', + 'cn': 'test1', + 'sn': 'test1', + 'uidNumber': '3003', + 'gidNumber': '4003', + 'homeDirectory': '/home/test_user3', + 'userPassword': OLD_PASSWD + }) + + log.info("Changing password of {} to newpassword".format(test_user0.dn)) + test_user0.rebind(OLD_PASSWD) + test_user0.reset_password(NEW_PASSWD) + test_user0.rebind(NEW_PASSWD) + + log.info("Try to delete password- case when password is specified") + test_user0.remove('userPassword', NEW_PASSWD) + + test_user1.rebind(OLD_PASSWD) + log.info("Try to delete password- case when password is not specified") + test_user1.remove_all('userPassword') + + +if __name__ == '__main__': + # Run isolated + # -s for DEBUG mode + CURRENT_FILE = os.path.realpath(__file__) + pytest.main(["-s", CURRENT_FILE]) diff --git a/dirsrvtests/tests/suites/password/pwdPolicy_attribute_test.py b/dirsrvtests/tests/suites/password/pwdPolicy_attribute_test.py new file mode 100644 index 0000000..b37eff7 --- /dev/null +++ b/dirsrvtests/tests/suites/password/pwdPolicy_attribute_test.py @@ -0,0 +1,260 @@ +# --- BEGIN COPYRIGHT BLOCK --- +# Copyright (C) 2016 Red Hat, Inc. +# All rights reserved. +# +# License: GPL (version 3 or any later version). +# See LICENSE for details. +# --- END COPYRIGHT BLOCK --- +# +import pytest +from lib389.tasks import * +from lib389.utils import * +from lib389.topologies import topology_st +from lib389.pwpolicy import PwPolicyManager +from lib389.idm.user import UserAccount, UserAccounts, TEST_USER_PROPERTIES +from lib389.idm.organizationalunit import OrganizationalUnits +from lib389._constants import (DEFAULT_SUFFIX, DN_DM, PASSWORD) + +pytestmark = pytest.mark.tier1 + +OU_PEOPLE = 'ou=people,{}'.format(DEFAULT_SUFFIX) +TEST_USER_NAME = 'simplepaged_test' +TEST_USER_DN = 'uid={},{}'.format(TEST_USER_NAME, OU_PEOPLE) +TEST_USER_PWD = 'simplepaged_test' +PW_POLICY_CONT_USER = 'cn="cn=nsPwPolicyEntry,uid=simplepaged_test,' \ + 'ou=people,dc=example,dc=com",' \ + 'cn=nsPwPolicyContainer,ou=people,dc=example,dc=com' +PW_POLICY_CONT_PEOPLE = 'cn="cn=nsPwPolicyEntry,' \ + 'ou=people,dc=example,dc=com",' \ + 'cn=nsPwPolicyContainer,ou=people,dc=example,dc=com' + +logging.getLogger(__name__).setLevel(logging.INFO) +log = logging.getLogger(__name__) + + +@pytest.fixture(scope="module") +def create_user(topology_st, request): + """User for binding operation""" + topology_st.standalone.config.set('nsslapd-auditlog-logging-enabled', 'on') + log.info('Adding test user {}') + users = UserAccounts(topology_st.standalone, OU_PEOPLE, rdn=None) + user_props = TEST_USER_PROPERTIES.copy() + user_props.update({'uid': TEST_USER_NAME, 'userpassword': TEST_USER_PWD}) + try: + user = users.create(properties=user_props) + except: + pass # debug only + + USER_ACI = '(targetattr="*")(version 3.0; acl "pwp test"; allow (all) userdn="ldap:///%s";)' % user.dn + ous = OrganizationalUnits(topology_st.standalone, DEFAULT_SUFFIX) + ou_people = ous.get('people') + ou_people.add('aci', USER_ACI) + + def fin(): + log.info('Deleting user {}'.format(user.dn)) + topology_st.standalone.simple_bind_s(DN_DM, PASSWORD) + + request.addfinalizer(fin) + + +@pytest.fixture(scope="module") +def password_policy(topology_st, create_user): + """Set up password policy for subtree and user""" + + pwp = PwPolicyManager(topology_st.standalone) + policy_props = {} + log.info('Create password policy for subtree {}'.format(OU_PEOPLE)) + pwp.create_subtree_policy(OU_PEOPLE, policy_props) + + log.info('Create password policy for user {}'.format(TEST_USER_DN)) + pwp.create_user_policy(TEST_USER_DN, policy_props) + + +@pytest.mark.skipif(ds_is_older('1.4.3.3'), reason="Not implemented") +def test_pwd_reset(topology_st, create_user): + """Test new password policy attribute "pwdReset" + + :id: 03db357b-4800-411e-a36e-28a534293004 + :setup: Standalone instance + :steps: + 1. Enable passwordMustChange + 2. Reset user's password + 3. Check that the pwdReset attribute is set to TRUE + 4. Bind as the user and change its password + 5. Check that pwdReset is now set to FALSE + 6. Reset password policy configuration + :expected results: + 1. Success + 2. Success + 3. Success + 4. Success + 5. Success + 6. Success + """ + + # Set password policy config + topology_st.standalone.config.replace('passwordMustChange', 'on') + time.sleep(.5) + + # Reset user's password + our_user = UserAccount(topology_st.standalone, TEST_USER_DN) + our_user.replace('userpassword', PASSWORD) + + # Check that pwdReset is TRUE + assert our_user.get_attr_val_utf8('pwdReset') == 'TRUE' + + # Bind as user and change its own password + our_user.rebind(PASSWORD) + our_user.replace('userpassword', PASSWORD) + + # Check that pwdReset is FALSE + topology_st.standalone.simple_bind_s(DN_DM, PASSWORD) + assert our_user.get_attr_val_utf8('pwdReset') == 'FALSE' + + # Reset password policy config + topology_st.standalone.config.replace('passwordMustChange', 'off') + + +@pytest.mark.parametrize('subtree_pwchange,user_pwchange,exception', + [('on', 'off', ldap.UNWILLING_TO_PERFORM), + ('off', 'off', ldap.UNWILLING_TO_PERFORM), + ('off', 'on', False), ('on', 'on', False)]) +def test_change_pwd(topology_st, create_user, password_policy, + subtree_pwchange, user_pwchange, exception): + """Verify that 'passwordChange' attr works as expected + User should have a priority over a subtree. + + :id: 2c884432-2ba1-4662-8e5d-2cd49f77e5fa + :parametrized: yes + :setup: Standalone instance, a test user, + password policy entries for a user and a subtree + :steps: + 1. Set passwordChange on the user and the subtree + to various combinations + 2. Bind as test user + 3. Try to change password + 4. Clean up - change the password to default while bound as DM + :expectedresults: + 1. passwordChange should be successfully set + 2. Bind should be successful + 3. Subtree/User passwordChange - result, accordingly: + off/on, on/on - success; + on/off, off/off - UNWILLING_TO_PERFORM + 4. Operation should be successful + """ + + users = UserAccounts(topology_st.standalone, OU_PEOPLE, rdn=None) + user = users.get(TEST_USER_NAME) + + log.info('Set passwordChange to "{}" - {}'.format(subtree_pwchange, OU_PEOPLE)) + pwp = PwPolicyManager(topology_st.standalone) + subtree_policy = pwp.get_pwpolicy_entry(OU_PEOPLE) + subtree_policy.set('passwordChange', subtree_pwchange) + + time.sleep(1) + + log.info('Set passwordChange to "{}" - {}'.format(user_pwchange, TEST_USER_DN)) + pwp2 = PwPolicyManager(topology_st.standalone) + user_policy = pwp2.get_pwpolicy_entry(TEST_USER_DN) + user_policy.set('passwordChange', user_pwchange) + user_policy.set('passwordExp', 'on') + + time.sleep(1) + + try: + log.info('Bind as user and modify userPassword') + user.rebind(TEST_USER_PWD) + if exception: + with pytest.raises(exception): + user.reset_password('new_pass') + else: + user.reset_password('new_pass') + except ldap.LDAPError as e: + log.error('Failed to change userpassword for {}: error {}'.format( + TEST_USER_DN, e.message['info'])) + raise e + finally: + log.info('Bind as DM') + topology_st.standalone.simple_bind_s(DN_DM, PASSWORD) + user.reset_password(TEST_USER_PWD) + + +def test_pwd_min_age(topology_st, create_user, password_policy): + """If we set passwordMinAge to some value, for example to 10, then it + should not allow the user to change the password within 10 seconds after + his previous change. + + :id: 85b98516-8c82-45bd-b9ec-90bd1245e09c + :setup: Standalone instance, a test user, + password policy entries for a user and a subtree + :steps: + 1. Set passwordMinAge to 10 on the user pwpolicy entry + 2. Set passwordMinAge to 10 on the subtree pwpolicy entry + 3. Set passwordMinAge to 10 on the cn=config entry + 4. Bind as test user + 5. Try to change the password two times in a row + 6. Wait 12 seconds + 7. Try to change the password + 8. Clean up - change the password to default while bound as DM + :expectedresults: + 1. passwordMinAge should be successfully set on the user pwpolicy entry + 2. passwordMinAge should be successfully set on the subtree pwpolicy entry + 3. passwordMinAge should be successfully set on the cn=config entry + 4. Bind should be successful + 5. The password should be successfully changed + 6. 12 seconds have passed + 7. Constraint Violation error should be raised + 8. Operation should be successful + """ + + num_seconds = '10' + users = UserAccounts(topology_st.standalone, OU_PEOPLE, rdn=None) + user = users.get(TEST_USER_NAME) + + log.info('Set passwordminage to "{}" - {}'.format(num_seconds, OU_PEOPLE)) + pwp = PwPolicyManager(topology_st.standalone) + subtree_policy = pwp.get_pwpolicy_entry(OU_PEOPLE) + subtree_policy.set('passwordminage', num_seconds) + + log.info('Set passwordminage to "{}" - {}'.format(num_seconds, TEST_USER_DN)) + user_policy = pwp.get_pwpolicy_entry(TEST_USER_DN) + user_policy.set('passwordminage', num_seconds) + + log.info('Set passwordminage to "{}" - {}'.format(num_seconds, DN_CONFIG)) + topology_st.standalone.config.set('passwordminage', num_seconds) + + time.sleep(1) + + log.info('Bind as user and modify userPassword') + user.rebind(TEST_USER_PWD) + user.reset_password('new_pass') + + time.sleep(1) + + log.info('Bind as user and modify userPassword straight away after previous change') + user.rebind('new_pass') + with pytest.raises(ldap.CONSTRAINT_VIOLATION): + user.reset_password('new_new_pass') + + log.info('Wait {} second'.format(int(num_seconds) + 2)) + time.sleep(int(num_seconds) + 2) + + try: + log.info('Bind as user and modify userPassword') + user.rebind('new_pass') + user.reset_password(TEST_USER_PWD) + except ldap.LDAPError as e: + log.error('Failed to change userpassword for {}: error {}'.format( + TEST_USER_DN, e.message['info'])) + raise e + finally: + log.info('Bind as DM') + topology_st.standalone.simple_bind_s(DN_DM, PASSWORD) + user.reset_password(TEST_USER_PWD) + + +if __name__ == '__main__': + # Run isolated + # -s for DEBUG mode + CURRENT_FILE = os.path.realpath(__file__) + pytest.main("-s %s" % CURRENT_FILE) diff --git a/dirsrvtests/tests/suites/password/pwdPolicy_controls_test.py b/dirsrvtests/tests/suites/password/pwdPolicy_controls_test.py new file mode 100644 index 0000000..ab1974c --- /dev/null +++ b/dirsrvtests/tests/suites/password/pwdPolicy_controls_test.py @@ -0,0 +1,292 @@ +import logging +import pytest +import os +import ldap +import time +from ldap.controls.ppolicy import PasswordPolicyControl +from lib389.topologies import topology_st as topo +from lib389.idm.user import UserAccounts +from lib389._constants import (DN_DM, PASSWORD, DEFAULT_SUFFIX) +from lib389.idm.organizationalunit import OrganizationalUnits + +pytestmark = pytest.mark.tier1 + +DEBUGGING = os.getenv("DEBUGGING", default=False) +if DEBUGGING: + logging.getLogger(__name__).setLevel(logging.DEBUG) +else: + logging.getLogger(__name__).setLevel(logging.INFO) +log = logging.getLogger(__name__) + +USER_DN = 'uid=test entry,ou=people,dc=example,dc=com' +USER_PW = b'password123' +USER_ACI = '(targetattr="userpassword")(version 3.0; acl "pwp test"; allow (all) userdn="ldap:///self";)' + + +@pytest.fixture +def init_user(topo, request): + """Initialize a user - Delete and re-add test user + """ + try: + topo.standalone.simple_bind_s(DN_DM, PASSWORD) + users = UserAccounts(topo.standalone, DEFAULT_SUFFIX) + user = users.get('test entry') + user.delete() + except ldap.NO_SUCH_OBJECT: + pass + except ldap.LDAPError as e: + log.error("Failed to delete user, error: {}".format(e.message['desc'])) + assert False + + user_data = {'uid': 'test entry', + 'cn': 'test entry', + 'sn': 'test entry', + 'uidNumber': '3000', + 'gidNumber': '4000', + 'homeDirectory': '/home/test_entry', + 'userPassword': USER_PW} + users.create(properties=user_data) + + +def change_passwd(topo): + """Reset users password as the user, then re-bind as Directory Manager + """ + users = UserAccounts(topo.standalone, DEFAULT_SUFFIX) + user = users.get('test entry') + user.rebind(USER_PW) + user.reset_password(USER_PW) + topo.standalone.simple_bind_s(DN_DM, PASSWORD) + + +def bind_and_get_control(topo, err=0): + """Bind as the user, and return any controls + """ + res_type = res_data = res_msgid = res_ctrls = None + result_id = '' + + try: + result_id = topo.standalone.simple_bind(USER_DN, USER_PW, + serverctrls=[PasswordPolicyControl()]) + res_type, res_data, res_msgid, res_ctrls = topo.standalone.result3(result_id) + if err: + log.fatal('Expected an error, but bind succeeded') + assert False + except ldap.LDAPError as e: + if err: + log.debug('Got expected error: {}'.format(str(e))) + pass + else: + log.fatal('Did not expect an error: {}'.format(str(e))) + assert False + + if DEBUGGING and res_ctrls and len(res_ctrls) > 0: + for ctl in res_ctrls: + if ctl.timeBeforeExpiration: + log.debug('control time before expiration: {}'.format(ctl.timeBeforeExpiration)) + if ctl.graceAuthNsRemaining: + log.debug('control grace login remaining: {}'.format(ctl.graceAuthNsRemaining)) + if ctl.error is not None and ctl.error >= 0: + log.debug('control error: {}'.format(ctl.error)) + + topo.standalone.simple_bind_s(DN_DM, PASSWORD) + return res_ctrls + + +def test_pwd_must_change(topo, init_user): + """Test for expiration control when password must be changed because an + admin reset the password + + :id: a3d99be5-0b69-410d-b72f-04eda8821a56 + :setup: Standalone instance, a user for testing + :steps: + 1. Configure password policy and reset password as admin + 2. Bind, and check for expired control withthe proper error code "2" + :expectedresults: + 1. Config update succeeds, adn the password is reset + 2. The EXPIRED control is returned, and we the expected error code "2" + """ + + log.info('Configure password policy with paswordMustChange set to "on"') + topo.standalone.config.set('passwordExp', 'on') + topo.standalone.config.set('passwordMaxAge', '200') + topo.standalone.config.set('passwordGraceLimit', '0') + topo.standalone.config.set('passwordWarning', '199') + topo.standalone.config.set('passwordMustChange', 'on') + + ous = OrganizationalUnits(topo.standalone, DEFAULT_SUFFIX) + ou = ous.get('people') + ou.add('aci', USER_ACI) + + log.info('Reset userpassword as Directory Manager') + users = UserAccounts(topo.standalone, DEFAULT_SUFFIX) + user = users.get('test entry') + user.reset_password(USER_PW) + + log.info('Bind should return ctrl with error code 2 (changeAfterReset)') + time.sleep(2) + ctrls = bind_and_get_control(topo) + if ctrls and len(ctrls) > 0: + if ctrls[0].error is None: + log.fatal("Response ctrl error code not set") + assert False + elif ctrls[0].error != 2: + log.fatal("Got unexpected error code: {}".format(ctrls[0].error)) + assert False + else: + log.fatal("We did not get a response ctrl") + assert False + + +def test_pwd_expired_grace_limit(topo, init_user): + """Test for expiration control when password is expired, but there are + remaining grace logins + + :id: a3d99be5-0b69-410d-b72f-04eda8821a51 + :setup: Standalone instance, a user for testing + :steps: + 1. Configure password policy and reset password,adn allow it to expire + 2. Bind, and check for expired control, and grace limit + 3. Bind again, consuming the last grace login, control should be returned + 4. Bind again, it should fail, and no control returned + :expectedresults: + 1. Config update and password reset are successful + 2. The EXPIRED control is returned, and we get the expected number + of grace logins in the control + 3. The response control has the expected value for grace logins + 4. The bind fails with error 49, and no contorl is returned + """ + + log.info('Configure password policy with grace limit set tot 2') + topo.standalone.config.set('passwordExp', 'on') + topo.standalone.config.set('passwordMaxAge', '5') + topo.standalone.config.set('passwordGraceLimit', '2') + + log.info('Change password and wait for it to expire') + change_passwd(topo) + time.sleep(6) + + log.info('Bind and use up one grace login (only one left)') + ctrls = bind_and_get_control(topo) + if ctrls is None or len(ctrls) == 0: + log.fatal('Did not get EXPIRED control in resposne') + assert False + else: + if int(ctrls[0].graceAuthNsRemaining) != 1: + log.fatal('Got unexpected value for grace logins: {}'.format(ctrls[0].graceAuthNsRemaining)) + assert False + + log.info('Use up last grace login, should get control') + ctrls = bind_and_get_control(topo) + if ctrls is None or len(ctrls) == 0: + log.fatal('Did not get control in response') + assert False + + log.info('No grace login available, bind should fail, and no control should be returned') + ctrls = bind_and_get_control(topo, err=49) + if ctrls and len(ctrls) > 0: + log.fatal('Incorrectly got control in response') + assert False + + +def test_pwd_expiring_with_warning(topo, init_user): + """Test expiring control response before and after warning is sent + + :id: 3594431f-e681-4a04-8edb-33ad2d9dad5b + :setup: Standalone instance, a user for testing + :steps: + 1. Configure password policy, and reset password + 2. Check for EXPIRING control, and the "time to expire" + 3. Bind again, as a warning has now been sent, and check the "time to expire" + :expectedresults: + 1. Configuration update and password reset are successful + 2. Get the EXPIRING control, and the expected "time to expire" values + 3. Get the EXPIRING control, and the expected "time to expire" values + """ + + log.info('Configure password policy') + topo.standalone.config.set('passwordExp', 'on') + topo.standalone.config.set('passwordMaxAge', '50') + topo.standalone.config.set('passwordWarning', '50') + + log.info('Change password and get controls') + change_passwd(topo) + ctrls = bind_and_get_control(topo) + if ctrls is None or len(ctrls) == 0: + log.fatal('Did not get EXPIRING control in response') + assert False + + if int(ctrls[0].timeBeforeExpiration) < 50: + log.fatal('Got unexpected value for timeBeforeExpiration: {}'.format(ctrls[0].timeBeforeExpiration)) + assert False + + log.info('Warning has been sent, try the bind again, and recheck the expiring time') + time.sleep(5) + ctrls = bind_and_get_control(topo) + if ctrls is None or len(ctrls) == 0: + log.fatal('Did not get EXPIRING control in resposne') + assert False + + if int(ctrls[0].timeBeforeExpiration) > 50: + log.fatal('Got unexpected value for timeBeforeExpiration: {}'.format(ctrls[0].timeBeforeExpiration)) + assert False + + +def test_pwd_expiring_with_no_warning(topo, init_user): + """Test expiring control response when no warning is sent + + :id: a3d99be5-0b69-410d-b72f-04eda8821a54 + :setup: Standalone instance, a user for testing + :steps: + 1. Configure password policy, and reset password + 2. Bind, and check that no controls are returned + 3. Set passwordSendExpiringTime to "on", bind, and check that the + EXPIRING control is returned + :expectedresults: + 1. Configuration update and passwordreset are successful + 2. No control is returned from bind + 3. A control is returned after setting "passwordSendExpiringTime" + """ + + log.info('Configure password policy') + topo.standalone.config.set('passwordExp', 'on') + topo.standalone.config.set('passwordMaxAge', '50') + topo.standalone.config.set('passwordWarning', '5') + + log.info('When the warning is less than the max age, we never send expiring control response') + change_passwd(topo) + ctrls = bind_and_get_control(topo) + if len(ctrls) > 0: + log.fatal('Incorrectly got a response control: {}'.format(ctrls)) + assert False + + log.info('Turn on sending expiring control regardless of warning') + topo.standalone.config.set('passwordSendExpiringTime', 'on') + + ctrls = bind_and_get_control(topo) + if ctrls is None or len(ctrls) == 0: + log.fatal('Did not get EXPIRED control in response') + assert False + + if int(ctrls[0].timeBeforeExpiration) < 49: + log.fatal('Got unexpected value for time before expiration: {}'.format(ctrls[0].timeBeforeExpiration)) + assert False + + log.info('Check expiring time again') + time.sleep(6) + ctrls = bind_and_get_control(topo) + if ctrls is None or len(ctrls) == 0: + log.fatal('Did not get EXPIRED control in resposne') + assert False + + if int(ctrls[0].timeBeforeExpiration) > 51: + log.fatal('Got unexpected value for time before expiration: {}'.format(ctrls[0].timeBeforeExpiration)) + assert False + + log.info('Turn off sending expiring control (restore the default setting)') + topo.standalone.config.set('passwordSendExpiringTime', 'off') + +if __name__ == '__main__': + # Run isolated + # -s for DEBUG mode + CURRENT_FILE = os.path.realpath(__file__) + pytest.main("-s %s" % CURRENT_FILE) diff --git a/dirsrvtests/tests/suites/password/pwdPolicy_inherit_global_test.py b/dirsrvtests/tests/suites/password/pwdPolicy_inherit_global_test.py new file mode 100644 index 0000000..1fe57c3 --- /dev/null +++ b/dirsrvtests/tests/suites/password/pwdPolicy_inherit_global_test.py @@ -0,0 +1,212 @@ +# --- BEGIN COPYRIGHT BLOCK --- +# Copyright (C) 2019 Red Hat, Inc. +# All rights reserved. +# +# License: GPL (version 3 or any later version). +# See LICENSE for details. +# --- END COPYRIGHT BLOCK --- +# +import logging +import time + +import ldap +import pytest +from lib389.utils import * +from lib389._constants import * +from lib389.pwpolicy import PwPolicyManager +from lib389.topologies import topology_st +from lib389.idm.organizationalunit import OrganizationalUnits +from lib389.idm.user import UserAccounts, TEST_USER_PROPERTIES + +pytestmark = pytest.mark.tier1 + +logging.getLogger(__name__).setLevel(logging.INFO) +log = logging.getLogger(__name__) + +OU_PEOPLE = 'ou=People,' + DEFAULT_SUFFIX +ATTR_INHERIT_GLOBAL = 'nsslapd-pwpolicy-inherit-global' +ATTR_CHECK_SYNTAX = 'passwordCheckSyntax' + +BN = 'uid=buser,' + OU_PEOPLE +TEMP_USER = 'cn=test{}' +TEMP_USER_DN = '%s,%s' % (TEMP_USER, OU_PEOPLE) + + +@pytest.fixture(scope="module") +def create_user(topology_st, request): + """User for binding operation""" + + log.info('Adding user {}'.format(BN)) + + users = UserAccounts(topology_st.standalone, OU_PEOPLE, rdn=None) + user_props = TEST_USER_PROPERTIES.copy() + user_props.update({'uid': 'buser', 'cn': 'buser', 'userpassword': PASSWORD}) + user = users.create(properties=user_props) + + log.info('Adding an aci for the bind user') + BN_ACI = '(targetattr="*")(version 3.0; acl "pwp test"; allow (all) userdn="ldap:///%s";)' % user.dn + ous = OrganizationalUnits(topology_st.standalone, DEFAULT_SUFFIX) + ou_people = ous.get('people') + ou_people.add('aci', BN_ACI) + + def fin(): + log.info('Deleting user {}'.format(BN)) + user.delete() + ous = OrganizationalUnits(topology_st.standalone, DEFAULT_SUFFIX) + ou_people = ous.get('people') + ou_people.remove('aci', BN_ACI) + + request.addfinalizer(fin) + + +@pytest.fixture(scope="module") +def password_policy(topology_st, create_user): + """Set global password policy. + Then, set fine-grained subtree level password policy + to ou=People with no password syntax. + + Note: do not touch nsslapd-pwpolicy-inherit-global -- off by default + """ + + log.info('Enable fine-grained policy') + pwp = PwPolicyManager(topology_st.standalone) + policy_props = { + 'passwordMustChange': 'off', + 'passwordExp': 'off', + 'passwordMinAge': '0', + 'passwordChange': 'off', + 'passwordStorageScheme': 'ssha' + } + pwp.create_subtree_policy(OU_PEOPLE, policy_props) + check_attr_val(topology_st.standalone, ATTR_INHERIT_GLOBAL, 'off') + check_attr_val(topology_st.standalone, ATTR_CHECK_SYNTAX, 'off') + + +def check_attr_val(inst, attr, expected): + """Check that entry has the value""" + + val = inst.config.get_attr_val_utf8(attr) + assert val == expected, 'Default value of %s is not %s, but %s' % ( + attr, expected, val) + + log.info('Default value of %s is %s' % (attr, expected)) + + +@pytest.mark.parametrize('inherit_value,checksyntax_value', + [('off', 'off'), ('on', 'off'), ('off', 'on')]) +def test_entry_has_no_restrictions(topology_st, password_policy, create_user, + inherit_value, checksyntax_value): + """Make sure an entry added to ou=people has no password syntax restrictions + + :id: 2f07ff40-76ca-45a9-a556-331c94084945 + :parametrized: yes + :setup: Standalone instance, test user, + password policy entries for a subtree + :steps: + 1. Bind as test user + 2. Set 'nsslapd-pwpolicy-inherit-global' and + 'passwordCheckSyntax' accordingly: + 'off' and 'off'; 'on' and 'off'; 'off' and 'on' + 3. Try to add user with a short password + 4. Cleanup - remove temp user bound as DM + :expectedresults: + 1. Bind should be successful + 2. Attributes should be successfully set + 3. No exceptions should occur + 4. Operation should be successful + """ + + log.info('Set {} to {}'.format(ATTR_INHERIT_GLOBAL, inherit_value)) + log.info('Set {} to {}'.format(ATTR_CHECK_SYNTAX, checksyntax_value)) + topology_st.standalone.config.set(ATTR_INHERIT_GLOBAL, inherit_value) + topology_st.standalone.config.set(ATTR_CHECK_SYNTAX, checksyntax_value) + + # Wait a second for cn=config to apply + time.sleep(1) + check_attr_val(topology_st.standalone, ATTR_INHERIT_GLOBAL, inherit_value) + check_attr_val(topology_st.standalone, ATTR_CHECK_SYNTAX, checksyntax_value) + + log.info('Bind as test user') + topology_st.standalone.simple_bind_s(BN, PASSWORD) + + log.info('Make sure an entry added to ou=people has ' + 'no password syntax restrictions.') + + users = UserAccounts(topology_st.standalone, OU_PEOPLE, rdn=None) + user_props = TEST_USER_PROPERTIES.copy() + user_props.update({'cn': 'test0', 'userpassword': 'short'}) + user = users.create(properties=user_props) + + topology_st.standalone.simple_bind_s(DN_DM, PASSWORD) + + # Remove test user + user.delete() + + +def test_entry_has_restrictions(topology_st, password_policy, create_user): + """Set 'nsslapd-pwpolicy-inherit-global: on' and 'passwordCheckSyntax: on'. + Make sure that syntax rules work, if set them at both: cn=config and + ou=people policy container. + + :id: 4bb0f474-17c1-40f7-aab4-4ddc17d019e8 + :setup: Standalone instance, test user, + password policy entries for a subtree + :steps: + 1. Bind as test user + 2. Switch 'nsslapd-pwpolicy-inherit-global: on' + 3. Switch 'passwordCheckSyntax: on' + 4. Set 'passwordMinLength: 9' to: + cn=config and ou=people policy container + 5. Try to add user with a short password (<9) + 6. Try to add user with a long password (>9) + 7. Cleanup - remove temp users bound as DM + :expectedresults: + 1. Bind should be successful + 2. nsslapd-pwpolicy-inherit-global should be successfully set + 3. passwordCheckSyntax should be successfully set + 4. passwordMinLength should be successfully set + 5. User should be rejected + 6. User should be rejected + 7. Operation should be successful + """ + + log.info('Set {} to {}'.format(ATTR_INHERIT_GLOBAL, 'on')) + log.info('Set {} to {}'.format(ATTR_CHECK_SYNTAX, 'on')) + topology_st.standalone.config.set(ATTR_INHERIT_GLOBAL, 'on') + topology_st.standalone.config.set(ATTR_CHECK_SYNTAX, 'on') + + pwp = PwPolicyManager(topology_st.standalone) + policy = pwp.get_pwpolicy_entry(OU_PEOPLE) + policy.set('passwordMinLength', '9') + + # Wait a second for cn=config to apply + time.sleep(1) + check_attr_val(topology_st.standalone, ATTR_INHERIT_GLOBAL, 'on') + check_attr_val(topology_st.standalone, ATTR_CHECK_SYNTAX, 'on') + + log.info('Bind as test user') + topology_st.standalone.simple_bind_s(BN, PASSWORD) + users = UserAccounts(topology_st.standalone, OU_PEOPLE, rdn=None) + user_props = TEST_USER_PROPERTIES.copy() + + log.info('Try to add user with a short password (<9)') + with pytest.raises(ldap.CONSTRAINT_VIOLATION): + user_props.update({'cn': 'test0', 'userpassword': 'short'}) + user = users.create(properties=user_props) + + log.info('Try to add user with a long password (>9)') + user_props.update({'cn': 'test1', 'userpassword': 'Reallylong1'}) + user = users.create(properties=user_props) + + log.info('Bind as DM user') + topology_st.standalone.simple_bind_s(DN_DM, PASSWORD) + + # Remove test user 1 + user.delete() + + +if __name__ == '__main__': + # Run isolated + # -s for DEBUG mode + CURRENT_FILE = os.path.realpath(__file__) + pytest.main("-s %s" % CURRENT_FILE) diff --git a/dirsrvtests/tests/suites/password/pwdPolicy_syntax_test.py b/dirsrvtests/tests/suites/password/pwdPolicy_syntax_test.py new file mode 100644 index 0000000..82d1a97 --- /dev/null +++ b/dirsrvtests/tests/suites/password/pwdPolicy_syntax_test.py @@ -0,0 +1,292 @@ +# --- BEGIN COPYRIGHT BLOCK --- +# Copyright (C) 2016 Red Hat, Inc. +# All rights reserved. +# +# License: GPL (version 3 or any later version). +# See LICENSE for details. +# --- END COPYRIGHT BLOCK --- +# +import logging + +import pytest +from lib389.tasks import * +from lib389.utils import * +from lib389.topologies import topology_st +from lib389._constants import DEFAULT_SUFFIX, PASSWORD, DN_DM +from lib389.idm.user import UserAccounts +from lib389.idm.organizationalunit import OrganizationalUnits + +pytestmark = pytest.mark.tier1 + +USER_DN = 'uid=user,ou=People,%s' % DEFAULT_SUFFIX +USER_RDN = 'user' +USER_ACI = '(targetattr="userpassword")(version 3.0; acl "pwp test"; allow (all) userdn="ldap:///self";)' + +logging.getLogger(__name__).setLevel(logging.INFO) +log = logging.getLogger(__name__) + + +@pytest.fixture(scope="module") +def password_policy(topology_st): + """Set global password policy""" + + log.info('Enable global password policy. Check for syntax.') + topology_st.standalone.config.set('passwordCheckSyntax', 'on') + topology_st.standalone.config.set('nsslapd-pwpolicy-local', 'off') + topology_st.standalone.config.set('passwordMinCategories', '1') + + +@pytest.fixture(scope="module") +def create_user(topology_st): + """Create the test user.""" + users = UserAccounts(topology_st.standalone, DEFAULT_SUFFIX) + users.create(properties={ + 'uid': USER_RDN, + 'cn': USER_RDN, + 'sn': USER_RDN, + 'uidNumber': '3000', + 'gidNumber': '4000', + 'homeDirectory': '/home/user', + 'description': 'd_e_s_c', + 'userPassword': PASSWORD + }) + + +def setPolicy(inst, attr, value): + """Bind as Root DN, set policy, and then bind as user""" + + inst.simple_bind_s(DN_DM, PASSWORD) + + # Set the policy value + value = str(value) + inst.config.set(attr, value) + + inst.simple_bind_s(USER_DN, PASSWORD) + + +def resetPasswd(inst): + """Reset the user password for the next test""" + + # First, bind as the ROOT DN so we can set the password + inst.simple_bind_s(DN_DM, PASSWORD) + + # Now set the password + users = UserAccounts(inst, DEFAULT_SUFFIX) + user = users.get(USER_RDN) + user.reset_password(PASSWORD) + + +def tryPassword(inst, policy_attr, value, reset_value, pw_bad, pw_good, msg): + """Attempt to change the users password + inst: DirSrv Object + password: password + msg - error message if failure + """ + + setPolicy(inst, policy_attr, value) + users = UserAccounts(inst, DEFAULT_SUFFIX) + user = users.get(USER_RDN) + try: + user.reset_password(pw_bad) + log.fatal('Invalid password was unexpectedly accepted (%s)' % + (policy_attr)) + assert False + except ldap.CONSTRAINT_VIOLATION: + log.info('Invalid password correctly rejected by %s: %s' % + (policy_attr, msg)) + pass + except ldap.LDAPError as e: + log.fatal("Failed to change password: " + str(e)) + assert False + + # Change password that is allowed + user.reset_password(pw_good) + + # Reset for the next test + resetPasswd(inst) + setPolicy(inst, policy_attr, reset_value) + + +def test_basic(topology_st, create_user, password_policy): + """Ensure that on a password change, the policy syntax + is enforced correctly. + + :id: e8de7029-7fa6-4e96-9eb6-4a121f4c8fb3 + :setup: Standalone instance, a test user, + global password policy with: + passwordCheckSyntax - on; nsslapd-pwpolicy-local - off; + passwordMinCategories - 1 + :steps: + 1. Set passwordMinLength to 10 in cn=config + 2. Set userPassword to 'passwd' in cn=config + 3. Set userPassword to 'password123' in cn=config + 4. Set passwordMinLength to 2 in cn=config + 5. Set passwordMinDigits to 2 in cn=config + 6. Set userPassword to 'passwd' in cn=config + 7. Set userPassword to 'password123' in cn=config + 8. Set passwordMinDigits to 0 in cn=config + 9. Set passwordMinAlphas to 2 in cn=config + 10. Set userPassword to 'p123456789' in cn=config + 11. Set userPassword to 'password123' in cn=config + 12. Set passwordMinAlphas to 0 in cn=config + 13. Set passwordMaxRepeats to 2 in cn=config + 14. Set userPassword to 'password' in cn=config + 15. Set userPassword to 'password123' in cn=config + 16. Set passwordMaxRepeats to 0 in cn=config + 17. Set passwordMinSpecials to 2 in cn=config + 18. Set userPassword to 'passwd' in cn=config + 19. Set userPassword to 'password_#$' in cn=config + 20. Set passwordMinSpecials to 0 in cn=config + 21. Set passwordMinLowers to 2 in cn=config + 22. Set userPassword to 'PASSWORD123' in cn=config + 23. Set userPassword to 'password123' in cn=config + 24. Set passwordMinLowers to 0 in cn=config + 25. Set passwordMinUppers to 2 in cn=config + 26. Set userPassword to 'password' in cn=config + 27. Set userPassword to 'PASSWORD' in cn=config + 28. Set passwordMinUppers to 0 in cn=config + 29. Test passwordDictCheck + 30. Test passwordPalindrome + 31. Test passwordMaxSequence for forward number sequence + 32. Test passwordMaxSequence for backward number sequence + 33. Test passwordMaxSequence for forward alpha sequence + 34. Test passwordMaxSequence for backward alpha sequence + 35. Test passwordMaxClassChars for digits + 36. Test passwordMaxClassChars for specials + 37. Test passwordMaxClassChars for lowers + 38. Test passwordMaxClassChars for uppers + 39. Test passwordBadWords using 'redhat' and 'fedora' + 40. Test passwordUserAttrs using description attribute + + :expectedresults: + 1. passwordMinLength should be successfully set + 2. Password should be rejected because length too short + 3. Password should be accepted + 4. passwordMinLength should be successfully set + 5. passwordMinDigits should be successfully set + 6. Password should be rejected because + it does not contain minimum number of digits + 7. Password should be accepted + 8. passwordMinDigits should be successfully set + 9. passwordMinAlphas should be successfully set + 10. Password should be rejected because + it does not contain minimum number of alphas + 11. Password should be accepted + 12. passwordMinAlphas should be successfully set + 13. passwordMaxRepeats should be successfully set + 14. Password should be rejected because too many repeating characters + 15. Password should be accepted + 16. passwordMaxRepeats should be successfully set + 17. passwordMinSpecials should be successfully set + 18. Password should be rejected because + it does not contain minimum number of special characters + 19. Password should be accepted + 20. passwordMinSpecials should be successfully set + 21. passwordMinLowers should be successfully set + 22. Password should be rejected because + it does not contain minimum number of lowercase characters + 23. Password should be accepted + 24. passwordMinLowers should be successfully set + 25. passwordMinUppers should be successfully set + 26. Password should be rejected because + it does not contain minimum number of lowercase characters + 27. Password should be accepted + 28. passwordMinUppers should be successfully set + 29. The passwordDictCheck test succeeds + 30. The passwordPalindrome test succeeds + 31. Test passwordMaxSequence for forward number sequence succeeds + 32. Test passwordMaxSequence for backward number sequence succeeds + 33. Test passwordMaxSequence for forward alpha sequence succeeds + 34. Test passwordMaxSequence for backward alpha sequence succeeds + 35. Test passwordMaxClassChars for digits succeeds + 36. Test passwordMaxClassChars for specials succeeds + 37. Test passwordMaxClassChars for lowers succeeds + 38. Test passwordMaxClassChars for uppers succeeds + 39. The passwordBadWords test succeeds + 40. The passwordUserAttrs test succeeds + """ + + # + # Test each syntax category + # + ous = OrganizationalUnits(topology_st.standalone, DEFAULT_SUFFIX) + ou = ous.get('people') + ou.add('aci', USER_ACI) + + # Min Length + tryPassword(topology_st.standalone, 'passwordMinLength', 10, 2, 'passwd', + 'password123', 'length too short') + # Min Digit + tryPassword(topology_st.standalone, 'passwordMinDigits', 2, 0, 'passwd', + 'password123', 'does not contain minimum number of digits') + # Min Alphas + tryPassword(topology_st.standalone, 'passwordMinAlphas', 2, 0, 'p123456789', + 'password123', 'does not contain minimum number of alphas') + # Max Repeats + tryPassword(topology_st.standalone, 'passwordMaxRepeats', 2, 0, 'passsword', + 'password123', 'too many repeating characters') + # Min Specials + tryPassword(topology_st.standalone, 'passwordMinSpecials', 2, 0, 'passwd', + 'password_#$', + 'does not contain minimum number of special characters') + # Min Lowers + tryPassword(topology_st.standalone, 'passwordMinLowers', 2, 0, 'PASSWORD123', + 'password123', + 'does not contain minimum number of lowercase characters') + # Min Uppers + tryPassword(topology_st.standalone, 'passwordMinUppers', 2, 0, 'password', + 'PASSWORD', + 'does not contain minimum number of lowercase characters') + # Min 8-bits - "ldap" package only accepts ascii strings at the moment + + if ds_is_newer('1.4.0.13'): + # Dictionary check + tryPassword(topology_st.standalone, 'passwordDictCheck', 'on', 'on', 'PASSWORD', + '13_#Kad472h', 'Password found in dictionary') + + # Palindromes + tryPassword(topology_st.standalone, 'passwordPalindrome', 'on', 'on', 'Za12_#_21aZ', + '13_#Kad472h', 'Password is palindrome') + + # Sequences + tryPassword(topology_st.standalone, 'passwordMaxSequence', 3, 0, 'Za1_1234', + '13_#Kad472h', 'Max montonic sequence is not allowed') + tryPassword(topology_st.standalone, 'passwordMaxSequence', 3, 0, 'Za1_4321', + '13_#Kad472h', 'Max montonic sequence is not allowed') + tryPassword(topology_st.standalone, 'passwordMaxSequence', 3, 0, 'Za1_abcd', + '13_#Kad472h', 'Max montonic sequence is not allowed') + tryPassword(topology_st.standalone, 'passwordMaxSequence', 3, 0, 'Za1_dcba', + '13_#Kad472h', 'Max montonic sequence is not allowed') + + # Sequence Sets + tryPassword(topology_st.standalone, 'passwordMaxSeqSets', 2, 0, 'Za1_123--123', + '13_#Kad472h', 'Max montonic sequence is not allowed') + + # Max characters in a character class + tryPassword(topology_st.standalone, 'passwordMaxClassChars', 3, 0, 'Za1_9376', + '13_#Kad472h', 'Too may consecutive characters from the same class') + tryPassword(topology_st.standalone, 'passwordMaxClassChars', 3, 0, 'Za1_#$&!', + '13_#Kad472h', 'Too may consecutive characters from the same class') + tryPassword(topology_st.standalone, 'passwordMaxClassChars', 3, 0, 'Za1_ahtf', + '13_#Kad472h', 'Too may consecutive characters from the same class') + tryPassword(topology_st.standalone, 'passwordMaxClassChars', 3, 0, 'Za1_HTSE', + '13_#Kad472h', 'Too may consecutive characters from the same class') + + # Bad words + tryPassword(topology_st.standalone, 'passwordBadWords', 'redhat fedora', 'none', 'Za1_redhat', + '13_#Kad472h', 'Too may consecutive characters from the same class') + tryPassword(topology_st.standalone, 'passwordBadWords', 'redhat fedora', 'none', 'Za1_fedora', + '13_#Kad472h', 'Too may consecutive characters from the same class') + + # User Attributes + tryPassword(topology_st.standalone, 'passwordUserAttributes', 'description', 0, 'Za1_d_e_s_c', + '13_#Kad472h', 'Password found in user entry') + + log.info('pwdPolicy tests PASSED') + + +if __name__ == '__main__': + # Run isolated + # -s for DEBUG mode + CURRENT_FILE = os.path.realpath(__file__) + pytest.main("-s %s" % CURRENT_FILE) diff --git a/dirsrvtests/tests/suites/password/pwdPolicy_token_test.py b/dirsrvtests/tests/suites/password/pwdPolicy_token_test.py new file mode 100644 index 0000000..bf49506 --- /dev/null +++ b/dirsrvtests/tests/suites/password/pwdPolicy_token_test.py @@ -0,0 +1,83 @@ +import logging +import pytest +import os +import time +import ldap +from lib389._constants import * +from lib389.idm.user import UserAccounts +from lib389.idm.organizationalunit import OrganizationalUnits +from lib389.topologies import topology_st as topo + +pytestmark = pytest.mark.tier1 + +DEBUGGING = os.getenv("DEBUGGING", default=False) +if DEBUGGING: + logging.getLogger(__name__).setLevel(logging.DEBUG) +else: + logging.getLogger(__name__).setLevel(logging.INFO) +log = logging.getLogger(__name__) + +USER_DN = 'uid=Test_user1,ou=People,dc=example,dc=com' +USER_ACI = '(targetattr="userpassword")(version 3.0; acl "pwp test"; allow (all) userdn="ldap:///self";)' +TOKEN = 'test_user1' + +user_properties = { + 'uid': 'Test_user1', + 'cn': 'test_user1', + 'sn': 'test_user1', + 'uidNumber': '1001', + 'gidNumber': '2001', + 'userpassword': PASSWORD, + 'description': 'userdesc', + 'homeDirectory': '/home/{}'.format('test_user')} + + +def pwd_setup(topo): + ous = OrganizationalUnits(topo.standalone, DEFAULT_SUFFIX) + ou = ous.get('people') + ou.add('aci', USER_ACI) + + topo.standalone.config.replace_many(('passwordCheckSyntax', 'on'), + ('passwordMinLength', '4'), + ('passwordMinCategories', '1')) + users = UserAccounts(topo.standalone, DEFAULT_SUFFIX) + return users.create(properties=user_properties) + + +def test_token_lengths(topo): + """Test that password token length is enforced for various lengths including + the same length as the attribute being checked by the policy. + + :id: dae9d916-2a03-4707-b454-9e901d295b13 + :setup: Standalone instance + :steps: + 1. Test token length rejects password of the same length as rdn value + :expectedresults: + 1. Passwords are rejected + """ + user = pwd_setup(topo) + for length in ['4', '6', '10']: + topo.standalone.simple_bind_s(DN_DM, PASSWORD) + topo.standalone.config.set('passwordMinTokenLength', length) + topo.standalone.simple_bind_s(USER_DN, PASSWORD) + time.sleep(1) + + try: + passwd = TOKEN[:int(length)] + log.info("Testing password len {} token ({})".format(length, passwd)) + user.replace('userpassword', passwd) + log.fatal('Password incorrectly allowed!') + assert False + except ldap.CONSTRAINT_VIOLATION as e: + log.info('Password correctly rejected: ' + str(e)) + except ldap.LDAPError as e: + log.fatal('Unexpected failure ' + str(e)) + assert False + + +if __name__ == '__main__': + # Run isolated + # -s for DEBUG mode + CURRENT_FILE = os.path.realpath(__file__) + pytest.main("-s %s" % CURRENT_FILE) + diff --git a/dirsrvtests/tests/suites/password/pwdPolicy_warning_test.py b/dirsrvtests/tests/suites/password/pwdPolicy_warning_test.py new file mode 100644 index 0000000..0dca5f5 --- /dev/null +++ b/dirsrvtests/tests/suites/password/pwdPolicy_warning_test.py @@ -0,0 +1,599 @@ +# --- BEGIN COPYRIGHT BLOCK --- +# Copyright (C) 2016 Red Hat, Inc. +# All rights reserved. +# +# License: GPL (version 3 or any later version). +# See LICENSE for details. +# --- END COPYRIGHT BLOCK --- +# +import pytest +import subprocess +from ldap.controls.ppolicy import PasswordPolicyControl +from lib389.tasks import * +from lib389.utils import * +from lib389.topologies import topology_st +from lib389.idm.user import UserAccounts +from lib389.idm.organizationalunit import OrganizationalUnits +from lib389._constants import (DEFAULT_SUFFIX, DN_CONFIG, PASSWORD, DN_DM, + HOST_STANDALONE, PORT_STANDALONE, SERVERID_STANDALONE) +from dateutil.parser import parse as dt_parse +from lib389.config import Config +import datetime + +pytestmark = pytest.mark.tier1 + +CONFIG_ATTR = 'passwordSendExpiringTime' +USER_DN = 'uid=tuser,ou=people,{}'.format(DEFAULT_SUFFIX) +USER_RDN = 'tuser' +USER_PASSWD = 'secret123' +USER_ACI = '(targetattr="userpassword")(version 3.0; acl "pwp test"; allow (all) userdn="ldap:///self";)' + +logging.getLogger(__name__).setLevel(logging.INFO) +log = logging.getLogger(__name__) + + +@pytest.fixture +def global_policy(topology_st, request): + """Sets the required global + password policy attributes under + cn=config entry + """ + + attrs = {'passwordExp': '', + 'passwordMaxAge': '', + 'passwordWarning': '', + CONFIG_ATTR: ''} + + log.info('Get the default values') + entry = topology_st.standalone.getEntry(DN_CONFIG, ldap.SCOPE_BASE, + '(objectClass=*)', attrs.keys()) + + for key in attrs.keys(): + attrs[key] = entry.getValue(key) + + log.info('Set the new values') + topology_st.standalone.config.replace_many(('passwordExp', 'on'), + ('passwordMaxAge', '172800'), + ('passwordWarning', '86400'), + (CONFIG_ATTR, 'on')) + + def fin(): + """Resets the defaults""" + + log.info('Reset the defaults') + topology_st.standalone.simple_bind_s(DN_DM, PASSWORD) + for key in attrs.keys(): + topology_st.standalone.config.replace(key, attrs[key]) + + request.addfinalizer(fin) + # A short sleep is required after the modifying password policy or cn=config + time.sleep(0.5) + + +@pytest.fixture +def global_policy_default(topology_st, request): + """Sets the required global password policy + attributes for testing the default behavior + of password expiry warning time + """ + + attrs = {'passwordExp': '', + 'passwordMaxAge': '', + 'passwordWarning': '', + CONFIG_ATTR: ''} + + log.info('Get the default values') + entry = topology_st.standalone.getEntry(DN_CONFIG, ldap.SCOPE_BASE, + '(objectClass=*)', attrs.keys()) + for key in attrs.keys(): + attrs[key] = entry.getValue(key) + + log.info('Set the new values') + topology_st.standalone.config.replace_many( + ('passwordExp', 'on'), + ('passwordMaxAge', '8640000'), + ('passwordWarning', '86400'), + (CONFIG_ATTR, 'off')) + + def fin(): + """Resets the defaults""" + + log.info('Reset the defaults') + topology_st.standalone.simple_bind_s(DN_DM, PASSWORD) + for key in attrs.keys(): + topology_st.standalone.config.replace(key, attrs[key]) + + request.addfinalizer(fin) + # A short sleep is required after modifying password policy or cn=config + time.sleep(0.5) + + +@pytest.fixture +def add_user(topology_st, request): + """Adds a user for binding""" + + log.info('Add the user') + + users = UserAccounts(topology_st.standalone, DEFAULT_SUFFIX) + user = users.create(properties={ + 'uid': USER_RDN, + 'cn': USER_RDN, + 'sn': USER_RDN, + 'uidNumber': '3000', + 'gidNumber': '4000', + 'homeDirectory': '/home/user', + 'description': 'd_e_s_c', + 'userPassword': USER_PASSWD + }) + + def fin(): + """Removes the user entry""" + + log.info('Remove the user entry') + topology_st.standalone.simple_bind_s(DN_DM, PASSWORD) + user.delete() + + request.addfinalizer(fin) + + +@pytest.fixture +def local_policy(topology_st, add_user): + """Sets fine grained policy for user entry""" + + log.info("Setting fine grained policy for user ({})".format(USER_DN)) + + subprocess.call(['%s/ns-newpwpolicy.pl' % topology_st.standalone.get_sbin_dir(), + '-D', DN_DM, + '-w', PASSWORD, '-h', HOST_STANDALONE, + '-p', str(PORT_STANDALONE), '-U', USER_DN, + '-Z', SERVERID_STANDALONE]) + # A short sleep is required after modifying password policy + time.sleep(0.5) + + +def get_password_warning(topology_st): + """Gets the password expiry warning time for the user""" + + res_type = res_data = res_msgid = res_ctrls = None + result_id = '' + + log.info('Bind with the user and request the password expiry warning time') + + result_id = topology_st.standalone.simple_bind(USER_DN, USER_PASSWD, + serverctrls=[PasswordPolicyControl()]) + res_type, res_data, res_msgid, res_ctrls = \ + topology_st.standalone.result3(result_id) + # Return the control + return res_ctrls + + +def set_conf_attr(topology_st, attr, val): + """Sets the value of a given attribute under cn=config""" + + log.info("Setting {} to {}".format(attr, val)) + topology_st.standalone.config.set(attr, val) + # A short sleep is required after modifying cn=config + time.sleep(0.5) + + +def get_conf_attr(topology_st, attr): + """Gets the value of a given attribute under cn=config entry + """ + return topology_st.standalone.config.get_attr_val_utf8(attr) + + +@pytest.mark.parametrize("value", (' ', 'junk123', 'on', 'off')) +def test_different_values(topology_st, value): + """Try to set passwordSendExpiringTime attribute + to various values both valid and invalid + + :id: 3e6d79fb-b4c8-4860-897e-5b207815a75d + :parametrized: yes + :setup: Standalone instance + :steps: + 1. Try to set passwordSendExpiringTime to 'on' and 'off' + under cn=config entry + 2. Try to set passwordSendExpiringTime to ' ' and 'junk123' + under cn=config entry + 3. Run the search command to check the + value of passwordSendExpiringTime attribute + :expectedresults: + 1. Valid values should be accepted and saved + 2. Should be rejected with an OPERATIONS_ERROR + 3. The attribute should be changed for valid values + and unchanged for invalid + """ + + log.info('Get the default value') + defval = get_conf_attr(topology_st, CONFIG_ATTR) + + if value not in ('on', 'off'): + log.info('An invalid value is being tested') + with pytest.raises(ldap.OPERATIONS_ERROR): + set_conf_attr(topology_st, CONFIG_ATTR, value) + + log.info('Now check the value is unchanged') + assert get_conf_attr(topology_st, CONFIG_ATTR) == defval + + log.info("Invalid value {} was rejected correctly".format(value)) + else: + log.info('A valid value is being tested') + set_conf_attr(topology_st, CONFIG_ATTR, value) + + log.info('Now check that the value has been changed') + assert str(get_conf_attr(topology_st, CONFIG_ATTR)) == value + + log.info("{} is now set to {}".format(CONFIG_ATTR, value)) + + log.info('Set passwordSendExpiringTime back to the default value') + set_conf_attr(topology_st, CONFIG_ATTR, defval) + + +def test_expiry_time(topology_st, global_policy, add_user): + """Test whether the password expiry warning + time for a user is returned appropriately + + :id: 7adfd395-9b25-4cc0-9b71-14710dc1a28c + :setup: Standalone instance, a user entry, + Global password policy configured as below: + passwordExp: on + passwordMaxAge: 172800 + passwordWarning: 86400 + passwordSendExpiringTime: on + :steps: + 1. Bind as the normal user + 2. Request password policy control for the user + 3. Bind as DM + :expectedresults: + 1. Bind should be successful + 2. The password expiry warning time for the user should be returned + 3. Bind should be successful + """ + + res_ctrls = None + + ous = OrganizationalUnits(topology_st.standalone, DEFAULT_SUFFIX) + ou = ous.get('people') + ou.add('aci', USER_ACI) + + log.info('Get the password expiry warning time') + log.info("Binding with ({}) and requesting the password expiry warning time" + .format(USER_DN)) + res_ctrls = get_password_warning(topology_st) + + log.info('Check whether the time is returned') + assert res_ctrls + + log.info("user's password will expire in {:d} seconds" + .format(res_ctrls[0].timeBeforeExpiration)) + + log.info("Rebinding as DM") + topology_st.standalone.simple_bind_s(DN_DM, PASSWORD) + + +@pytest.mark.parametrize("attr,val", [(CONFIG_ATTR, 'off'), + ('passwordWarning', '3600')]) +def test_password_warning(topology_st, global_policy, add_user, attr, val): + """Test password expiry warning time by setting passwordSendExpiringTime to off + and setting passwordWarning to a short value + + :id: 39f54b3c-8c80-43ca-856a-174d81c56ce8 + :parametrized: yes + :setup: Standalone instance, a test user, + Global password policy configured as below: + passwordExp: on + passwordMaxAge: 172800 + passwordWarning: 86400 + passwordSendExpiringTime: on + :steps: + 1. Set passwordSendExpiringTime attribute to off or + to on and passwordWarning to a small value (3600) + 2. Bind as the normal user + 3. Request the password expiry warning time + 4. Bind as DM + :expectedresults: + 1. passwordSendExpiringTime and passwordWarning are set successfully + 2. Bind should be successful + 3. Password expiry warning time should be returned for the small value + and should not be returned when passwordSendExpiringTime is off + 4. Bind should be successful + """ + + log.info('Set configuration parameter') + set_conf_attr(topology_st, attr, val) + + log.info("Binding with ({}) and requesting password expiry warning time" + .format(USER_DN)) + res_ctrls = get_password_warning(topology_st) + + log.info('Check the state of the control') + if not res_ctrls: + log.info("Password Expiry warning time is not returned as {} is set to {}" + .format(attr, val)) + else: + log.info("({}) password will expire in {:d} seconds" + .format(USER_DN, res_ctrls[0].timeBeforeExpiration)) + + log.info("Rebinding as DM") + topology_st.standalone.simple_bind_s(DN_DM, PASSWORD) + + +def test_with_different_password_states(topology_st, global_policy, add_user): + """Test the control with different password states + + :id: d297fb1a-661f-4d52-bb43-2a2a340b8b0e + :setup: Standalone instance, a user entry, + Global password policy configured as below: + passwordExp: on + passwordMaxAge: 172800 + passwordWarning: 86400 + passwordSendExpiringTime: on + :steps: + 1. Expire user's password by changing + passwordExpirationTime timestamp + 2. Try to bind to the server with the user entry + 3. Revert back user's passwordExpirationTime + 4. Try to bind with the user entry and request + the control + 5. Bind as DM + :expectedresults: + 1. Operation should be successful + 2. Operation should fail because of Invalid Credentials + 3. passwordExpirationTime is successfully changed + 4. Bind should be successful and the password expiry + warning time should be returned + 5. Bind should be successful + """ + + res_ctrls = None + + log.info("Expire user's password by changing passwordExpirationTime timestamp") + users = UserAccounts(topology_st.standalone, DEFAULT_SUFFIX) + user = users.get(USER_RDN) + old_ts = user.get_attr_val_utf8('passwordExpirationTime') + log.info("Old passwordExpirationTime: {}".format(old_ts)) + + new_ts = (dt_parse(old_ts) - datetime.timedelta(31)).strftime('%Y%m%d%H%M%SZ') + log.info("New passwordExpirationTime: {}".format(new_ts)) + user.replace('passwordExpirationTime', new_ts) + + log.info("Attempting to bind with user {} and retrive the password expiry warning time".format(USER_DN)) + with pytest.raises(ldap.INVALID_CREDENTIALS) as ex: + res_ctrls = get_password_warning(topology_st) + + log.info("Bind Failed, error: {}".format(str(ex))) + + log.info("Rebinding as DM") + topology_st.standalone.simple_bind_s(DN_DM, PASSWORD) + + log.info("Reverting back user's passwordExpirationTime") + + user.replace('passwordExpirationTime', old_ts) + + log.info("Rebinding with {} and retrieving the password expiry warning time".format(USER_DN)) + res_ctrls = get_password_warning(topology_st) + + log.info('Check that the control is returned') + assert res_ctrls + + log.info("user's password will expire in {:d} seconds" + .format(res_ctrls[0].timeBeforeExpiration)) + + log.info("Rebinding as DM") + topology_st.standalone.simple_bind_s(DN_DM, PASSWORD) + + +def test_default_behavior(topology_st, global_policy_default, add_user): + """Test the default behavior of password expiry warning time + + :id: c47fa824-ee08-4b78-885f-bca4c42bb655 + :setup: Standalone instance, a user entry, + Global password policy configured as below: + passwordExp: on + passwordMaxAge: 8640000 + passwordWarning: 86400 + passwordSendExpiringTime: off + :steps: + 1. Bind as the normal user + 2. Request the control for the user + 3. Bind as DM + :expectedresults: + 1. Bind should be successful + 2. No control should be returned + 3. Bind should be successful + """ + + res_ctrls = None + + log.info("Binding with {} and requesting the password expiry warning time" + .format(USER_DN)) + res_ctrls = get_password_warning(topology_st) + + log.info('Check that no control is returned') + assert not res_ctrls + + log.info("Rebinding as DM") + topology_st.standalone.simple_bind_s(DN_DM, PASSWORD) + + +def test_when_maxage_and_warning_are_the_same(topology_st, global_policy_default, add_user): + """Test the warning expiry when passwordMaxAge and + passwordWarning are set to the same value. + + :id: e57a1b1c-96fc-11e7-a91b-28d244694824 + :setup: Standalone instance, a user entry, + Global password policy configured as below: + passwordExp: on + passwordMaxAge: 86400 + passwordWarning: 86400 + passwordSendExpiringTime: off + :steps: + 1. Bind as the normal user + 2. Change user's password to reset its password expiration time + 3. Request the control for the user + 4. Bind as DM + :expectedresults: + 1. Bind should be successful + 2. Password should be changed and password's expiration time reset + 3. Password expiry warning time should be returned by the + server since passwordMaxAge and passwordWarning are set + to the same value + 4. Bind should be successful + """ + + log.info('Set the new values') + topology_st.standalone.config.set('passwordMaxAge', '86400') + res_ctrls = None + + log.info("First change user's password to reset its password expiration time") + users = UserAccounts(topology_st.standalone, DEFAULT_SUFFIX) + user = users.get(USER_RDN) + user.rebind(USER_PASSWD) + user.reset_password(USER_PASSWD) + + time.sleep(2) + log.info("Binding with {} and requesting the password expiry warning time" + .format(USER_DN)) + res_ctrls = get_password_warning(topology_st) + + log.info('Check that control is returned even' + 'if passwordSendExpiringTime is set to off') + assert res_ctrls + + log.info("user's password will expire in {:d} seconds".format(res_ctrls[0].timeBeforeExpiration)) + + log.info("Rebinding as DM") + topology_st.standalone.simple_bind_s(DN_DM, PASSWORD) + + +def test_with_local_policy(topology_st, global_policy, local_policy): + """Test the attribute with fine grained policy set for the user + + :id: ab7d9f86-8cfe-48c3-8baa-739e599f006a + :setup: Standalone instance, a user entry, + Global password policy configured as below: + passwordExp: on + passwordMaxAge: 172800 + passwordWarning: 86400 + passwordSendExpiringTime: on + Fine grained password policy for the user using ns-newpwpolicy.pl + :steps: + 1. Bind as the normal user + 2. Request the control for the user + 3. Bind as DM + :expectedresults: + 1. Bind should be successful + 2. Password expiry warning time should not be returned for the user + 3. Bind should be successful + """ + + res_ctrls = None + + log.info("Attempting to get password expiry warning time for user {}".format(USER_DN)) + res_ctrls = get_password_warning(topology_st) + + log.info('Check that the control is not returned') + assert not res_ctrls + + log.info("Password expiry warning time is not returned") + + log.info("Rebinding as DM") + topology_st.standalone.simple_bind_s(DN_DM, PASSWORD) + + +@pytest.mark.bz1589144 +@pytest.mark.ds50091 +def test_search_shadowWarning_when_passwordWarning_is_lower(topology_st, global_policy): + """Test if value shadowWarning is present with global password policy + when passwordWarning is set with lower value. + + :id: c1e82de6-1aa3-42c3-844a-9720172158a3 + :setup: Standalone Instance + :steps: + 1. Bind as Directory Manager + 2. Set global password policy + 3. Add test user to instance. + 4. Modify passwordWarning to have smaller value than 86400 + 5. Bind as the new user + 6. Search for shadowWarning attribute + 7. Rebind as Directory Manager + :expectedresults: + 1. Binding should be successful + 2. Setting password policy should be successful + 3. Adding test user should be successful + 4. Modifying passwordWarning should be successful + 5. Binding should be successful + 6. Attribute shadowWarning should be found + 7. Binding should be successful + """ + + users = UserAccounts(topology_st.standalone, DEFAULT_SUFFIX) + + log.info("Bind as %s" % DN_DM) + assert topology_st.standalone.simple_bind_s(DN_DM, PASSWORD) + + log.info("Creating test user") + testuser = users.create_test_user(1004) + testuser.add('objectclass', 'shadowAccount') + testuser.set('userPassword', USER_PASSWD) + + log.info("Setting passwordWarning to smaller value than 86400") + assert topology_st.standalone.config.set('passwordWarning', '86399') + + log.info("Bind as test user") + assert topology_st.standalone.simple_bind_s(testuser.dn, USER_PASSWD) + + log.info("Check if attribute shadowWarning is present") + topology_st.standalone.simple_bind_s(DN_DM, PASSWORD) + assert testuser.present('shadowWarning') + + +@pytest.mark.bug624080 +def test_password_expire_works(topology_st): + """Regression test for bug624080. If passwordMaxAge is set to a + value and a new user is added, if the passwordMaxAge is changed + to a shorter expiration time and the new users password + is then changed ..... the passwordExpirationTime for the + new user should be changed too. There was a bug in DS 6.2 + where the expirationtime remained unchanged. + + :id: 1ead6052-4636-11ea-b5af-8c16451d917b + :setup: Standalone + :steps: + 1. Set the Global password policy and a passwordMaxAge to 5 days + 2. Add the new user + 3. Check the users password expiration time now + 4. Decrease global passwordMaxAge to 2 days + 5. Modify the users password + 6. Modify the user one more time to make sur etime has been reset + 7. turn off the password policy + :expected results: + 1. Success + 2. Success + 3. Success + 4. Success + 5. Success + 6. Success + 7. Success + """ + config = Config(topology_st.standalone) + config.replace_many(('passwordMaxAge', '432000'), + ('passwordExp', 'on')) + user = UserAccounts(topology_st.standalone, DEFAULT_SUFFIX, rdn=None).create_test_user() + user.set('userPassword', 'anuj') + expire_time = user.get_attr_val_utf8('passwordExpirationTime') + config.replace('passwordMaxAge', '172800') + user.set('userPassword', 'borah') + expire_time2 = user.get_attr_val_utf8('passwordExpirationTime') + config.replace('passwordMaxAge', '604800') + user.set('userPassword', 'anujagaiin') + expire_time3 = user.get_attr_val_utf8('passwordExpirationTime') + assert expire_time != expire_time2 != expire_time3 + config.replace('passwordExp', 'off') + + +if __name__ == '__main__': + # Run isolated + # -s for DEBUG mode + CURRENT_FILE = os.path.realpath(__file__) + pytest.main("-s %s" % CURRENT_FILE) diff --git a/dirsrvtests/tests/suites/password/pwd_algo_test.py b/dirsrvtests/tests/suites/password/pwd_algo_test.py new file mode 100644 index 0000000..bfd0309 --- /dev/null +++ b/dirsrvtests/tests/suites/password/pwd_algo_test.py @@ -0,0 +1,177 @@ +# --- BEGIN COPYRIGHT BLOCK --- +# Copyright (C) 2019 Red Hat, Inc. +# All rights reserved. +# +# License: GPL (version 3 or any later version). +# See LICENSE for details. +# --- END COPYRIGHT BLOCK --- +# +import pytest +from lib389.tasks import * +from lib389.utils import * +from lib389.topologies import topology_st +from lib389._constants import DEFAULT_SUFFIX, HOST_STANDALONE, PORT_STANDALONE +from lib389.idm.user import UserAccounts, TEST_USER_PROPERTIES + +pytestmark = pytest.mark.tier1 + +DEBUGGING = os.getenv('DEBUGGING', False) +USER_DN = 'uid=user,ou=People,%s' % DEFAULT_SUFFIX + +logging.getLogger(__name__).setLevel(logging.INFO) +log = logging.getLogger(__name__) + + +def _test_bind(user, password): + result = True + try: + userconn = user.bind(password) + userconn.unbind_s() + except ldap.INVALID_CREDENTIALS: + result = False + return result + + +def _test_algo(inst, algo_name): + inst.config.set('passwordStorageScheme', algo_name) + + users = UserAccounts(inst, DEFAULT_SUFFIX) + user_props = TEST_USER_PROPERTIES.copy() + user_props.update({'uid': 'user', 'cn': 'buser', 'userpassword': 'Secret123'}) + user = users.create(properties=user_props) + + # Make sure when we read the userPassword field, it is the correct ALGO + pw_field = user.get_attr_val_utf8('userPassword') + + if algo_name != 'CLEAR' and algo_name != 'DEFAULT': + assert (algo_name[:5].lower() in pw_field.lower()) + # Now make sure a bind works + assert (_test_bind(user, 'Secret123')) + # Bind with a wrong shorter password, should fail + assert (not _test_bind(user, 'Wrong')) + # Bind with a wrong longer password, should fail + assert (not _test_bind(user, 'This is even more wrong')) + # Bind with a wrong exact length password. + assert (not _test_bind(user, 'Alsowrong')) + # Bind with a subset password, should fail + assert (not _test_bind(user, 'Secret')) + if not algo_name.startswith('CRYPT'): + # Bind with a subset password that is 1 char shorter, to detect off by 1 in clear + assert (not _test_bind(user, 'Secret12')) + # Bind with a superset password, should fail + assert (not _test_bind(user, 'Secret123456')) + + # Delete the user + user.delete() + + +def _test_bind_for_pbkdf2_algo(inst, password): + result = True + userconn = ldap.initialize("ldap://%s:%s" % (HOST_STANDALONE, PORT_STANDALONE)) + try: + userconn.simple_bind_s(USER_DN, password) + userconn.unbind_s() + except ldap.INVALID_CREDENTIALS: + result = False + return result + + +def _test_algo_for_pbkdf2(inst, algo_name): + inst.config.set('passwordStorageScheme', algo_name) + + if DEBUGGING: + print('Testing %s' % algo_name) + + # Create the user with a password + users = UserAccounts(inst, DEFAULT_SUFFIX) + user_props = TEST_USER_PROPERTIES.copy() + user_props.update({'uid': 'user', 'cn': 'buser', 'userpassword': 'Secret123'}) + user = users.create(properties=user_props) + + # Make sure when we read the userPassword field, it is the correct ALGO + pw_field = user.get_attr_val_utf8_l('userPassword') + + if DEBUGGING: + print(pw_field) + + if algo_name != 'CLEAR': + lalgo_name = algo_name.lower() + assert (pw_field.startswith('{' + lalgo_name + '}')) + + # Now make sure a bind works + assert (_test_bind_for_pbkdf2_algo(inst, 'Secret123')) + # Bind with a wrong shorter password, should fail + assert (not _test_bind_for_pbkdf2_algo(inst, 'Wrong')) + # Bind with a wrong longer password, should fail + assert (not _test_bind_for_pbkdf2_algo(inst, 'This is even more wrong')) + # Bind with a password that has the algo in the name + assert (not _test_bind_for_pbkdf2_algo(inst, '{%s}SomeValues....' % algo_name)) + # Bind with a wrong exact length password. + assert (not _test_bind_for_pbkdf2_algo(inst, 'Alsowrong')) + # Bind with a subset password, should fail + assert (not _test_bind_for_pbkdf2_algo(inst, 'Secret')) + if algo_name != 'CRYPT': + # Bind with a subset password that is 1 char shorter, to detect off by 1 in clear + assert (not _test_bind_for_pbkdf2_algo(inst, 'Secret12')) + # Bind with a superset password, should fail + assert (not _test_bind_for_pbkdf2_algo(inst, 'Secret123456')) + + # Delete the user + inst.delete_s(USER_DN) + + +@pytest.mark.parametrize("algo", + ('CLEAR', 'CRYPT', 'CRYPT-MD5', 'CRYPT-SHA256', 'CRYPT-SHA512', + 'MD5', 'SHA', 'SHA256', 'SHA384', 'SHA512', 'SMD5', 'SSHA', + 'SSHA256', 'SSHA384', 'SSHA512', 'PBKDF2_SHA256', 'DEFAULT',)) +def test_pwd_algo_test(topology_st, algo): + """Assert that all of our password algorithms correctly PASS and FAIL varying + password conditions. + + :id: fbb308a8-8374-4abd-b786-1f88e56f7650 + :parametrized: yes + """ + if algo == 'DEFAULT': + if ds_is_older('1.4.0'): + pytest.skip("Not implemented") + _test_algo(topology_st.standalone, algo) + log.info('Test %s PASSED' % algo) + + +@pytest.mark.ds397 +def test_pbkdf2_algo(topology_st): + """Changing password storage scheme to PBKDF2_SHA256 + and trying to bind with different password combination + + :id: 112e265b-f468-4758-b8fa-ed8742de0182 + :setup: Standalone instance + :steps: + 1. Change password storage scheme to PBKDF2_SHA256 + 2. Add a test user entry + 3. Bind with correct password + 4. Bind with incorrect password combination(brute-force) + :expectedresults: + 1. Operation should be successful + 2. Operation should be successful + 3. Bind should be successful + 4. Should not allow to bind with incorrect password + """ + if DEBUGGING: + # Add debugging steps(if any)... + log.info("ATTACH NOW") + time.sleep(30) + + # Merge this to the password suite in the future + + for algo in ('PBKDF2_SHA256',): + for i in range(0, 10): + _test_algo_for_pbkdf2(topology_st.standalone, algo) + + log.info('Test PASSED') + + +if __name__ == '__main__': + # Run isolated + # -s for DEBUG mode + CURRENT_FILE = os.path.realpath(__file__) + pytest.main("-s %s" % CURRENT_FILE) diff --git a/dirsrvtests/tests/suites/password/pwd_lockout_bypass_test.py b/dirsrvtests/tests/suites/password/pwd_lockout_bypass_test.py new file mode 100644 index 0000000..a4e0094 --- /dev/null +++ b/dirsrvtests/tests/suites/password/pwd_lockout_bypass_test.py @@ -0,0 +1,82 @@ +# --- BEGIN COPYRIGHT BLOCK --- +# Copyright (C) 2017 Red Hat, Inc. +# All rights reserved. +# +# License: GPL (version 3 or any later version). +# See LICENSE for details. +# --- END COPYRIGHT BLOCK --- +# +import pytest +from lib389.tasks import * +from lib389.utils import * +from lib389.topologies import topology_st +from lib389.idm.user import UserAccounts, TEST_USER_PROPERTIES +import ldap + +pytestmark = pytest.mark.tier1 + +# The irony of these names is not lost on me. +GOOD_PASSWORD = 'password' +BAD_PASSWORD = 'aontseunao' + +logging.getLogger(__name__).setLevel(logging.INFO) +log = logging.getLogger(__name__) + + +def test_lockout_bypass(topology_st): + """Check basic password lockout functionality + + :id: 2482a992-1719-495c-b75b-78fe5c48c873 + :setup: Standalone instance + :steps: + 1. Set passwordMaxFailure to 1 + 2. Set passwordLockDuration to 7 + 3. Set passwordLockout to 'on' + 4. Create a user + 5. Set a userPassword attribute + 6. Bind as the user with a bad credentials + 7. Bind as the user with a bad credentials + 8. Bind as the user with a good credentials + :expectedresults: + 1. passwordMaxFailure should be successfully set + 2. passwordLockDuration should be successfully set + 3. passwordLockout should be successfully set + 4. User should be created + 5. userPassword should be successfully set + 6. Should throw an invalid credentials error + 7. Should throw a constraint violation error + 8. Should throw a constraint violation error + """ + + inst = topology_st.standalone + + # Configure the lock policy + inst.config.set('passwordMaxFailure', '1') + inst.config.set('passwordLockoutDuration', '99999') + inst.config.set('passwordLockout', 'on') + + # Create the account + users = UserAccounts(inst, DEFAULT_SUFFIX) + testuser = users.create(properties=TEST_USER_PROPERTIES) + testuser.set('userPassword', GOOD_PASSWORD) + + conn = testuser.bind(GOOD_PASSWORD) + assert conn != None + conn.unbind_s() + + # Bind with bad creds twice + # This is the failure. + with pytest.raises(ldap.INVALID_CREDENTIALS): + conn = testuser.bind(BAD_PASSWORD) + # Now we should not be able to ATTEMPT the bind. It doesn't matter that + # we disclose that we have hit the rate limit here, what matters is that + # it exists. + with pytest.raises(ldap.CONSTRAINT_VIOLATION): + conn = testuser.bind(BAD_PASSWORD) + + # now bind with good creds + # Should be error 19 still. + with pytest.raises(ldap.CONSTRAINT_VIOLATION): + conn = testuser.bind(GOOD_PASSWORD) + + diff --git a/dirsrvtests/tests/suites/password/pwd_log_test.py b/dirsrvtests/tests/suites/password/pwd_log_test.py new file mode 100644 index 0000000..55ef415 --- /dev/null +++ b/dirsrvtests/tests/suites/password/pwd_log_test.py @@ -0,0 +1,87 @@ +# --- BEGIN COPYRIGHT BLOCK --- +# Copyright (C) 2019 Red Hat, Inc. +# All rights reserved. +# +# License: GPL (version 3 or any later version). +# See LICENSE for details. +# --- END COPYRIGHT BLOCK --- +# + +import logging +import pytest +from lib389.tasks import * +from lib389.topologies import topology_st +from lib389.idm.user import UserAccounts, TEST_USER_PROPERTIES +from lib389._constants import DEFAULT_SUFFIX + +pytestmark = pytest.mark.tier1 + +logging.getLogger(__name__).setLevel(logging.DEBUG) +log = logging.getLogger(__name__) + + +@pytest.mark.ds365 +def test_hide_unhashed_pwd(topology_st): + """Change userPassword, enable hiding of un-hashed + password and check the audit logs. + + :id: c4a5d08d-f525-459b-82b9-3f68dae6fc71 + :setup: Standalone instance + :steps: + 1. Add a test user entry + 2. Set a new password for user and nsslapd-auditlog-logging-enabled to 'on' + 3. Disable nsslapd-auditlog-logging-hide-unhashed-pw + 4. Check the audit logs + 5. Set a new password for user and nsslapd-auditlog-logging-hide-unhashed-pw to 'on' + 6. Check the audit logs + :expectedresults: + 1. User addition should be successful + 2. New password should be set and audit logs should be enabled + 3. Operation should be successful + 4. Audit logs should show password without hash + 5. Operation should be successful + 6. Audit logs should hide password which is un-hashed + """ + + users = UserAccounts(topology_st.standalone, DEFAULT_SUFFIX) + user_props = TEST_USER_PROPERTIES.copy() + user_props.update({'uid': 'user', 'cn': 'buser', 'userpassword': 'Secret123'}) + user = users.create(properties=user_props) + + # Enable the audit log + topology_st.standalone.config.set('nsslapd-auditlog-logging-enabled','on') + + # Allow the unhashed password to be written to audit log + topology_st.standalone.config.set('nsslapd-auditlog-logging-hide-unhashed-pw', 'off') + topology_st.standalone.config.set('nsslapd-unhashed-pw-switch', 'on') + + # Set new password, and check the audit log + user.reset_password('mypassword') + + # Check audit log + time.sleep(1) + if not topology_st.standalone.searchAuditLog('unhashed#user#password: mypassword'): + log.fatal('failed to find unhashed password in auditlog') + assert False + + # Hide unhashed password in audit log + topology_st.standalone.config.set('nsslapd-auditlog-logging-hide-unhashed-pw', 'on') + + # Modify password, and check the audit log + user.reset_password('hidepassword') + + # Check audit log + time.sleep(1) + if topology_st.standalone.searchAuditLog('unhashed#user#password: hidepassword'): + log.fatal('Found unhashed password in auditlog') + assert False + + log.info('Test complete') + + +if __name__ == '__main__': + # Run isolated + # -s for DEBUG mode + CURRENT_FILE = os.path.realpath(__file__) + pytest.main("-s %s" % CURRENT_FILE) + diff --git a/dirsrvtests/tests/suites/password/pwd_upgrade_on_bind.py b/dirsrvtests/tests/suites/password/pwd_upgrade_on_bind.py new file mode 100644 index 0000000..e826e6e --- /dev/null +++ b/dirsrvtests/tests/suites/password/pwd_upgrade_on_bind.py @@ -0,0 +1,140 @@ +# --- BEGIN COPYRIGHT BLOCK --- +# Copyright (C) 2019 William Brown +# All rights reserved. +# +# License: GPL (version 3 or any later version). +# See LICENSE for details. +# --- END COPYRIGHT BLOCK --- +# +import ldap +import pytest +from lib389.topologies import topology_st +from lib389.idm.user import UserAccounts +from lib389._constants import (DEFAULT_SUFFIX, PASSWORD) + +def test_password_hash_on_upgrade(topology_st): + """If a legacy password hash is present, assert that on a correct bind + the hash is "upgraded" to the latest-and-greatest hash format on the + server. + + Assert also that password FAILURE does not alter the password. + + :id: 42cf99e6-454d-46f5-8f1c-8bb699864a07 + :setup: Single instance + :steps: 1. Set a password hash in SSHA256, and hash to pbkdf2 statically + 2. Test a faulty bind + 3. Assert the PW is SSHA256 + 4. Test a correct bind + 5. Assert the PW is PBKDF2 + :expectedresults: + 1. Successfully set the values + 2. The bind fails + 3. The PW is SSHA256 + 4. The bind succeeds + 5. The PW is PBKDF2 + """ + # Make sure the server is set to pkbdf + topology_st.standalone.config.set('passwordStorageScheme', 'PBKDF2_SHA256') + topology_st.standalone.config.set('nsslapd-allow-hashed-passwords', 'on') + topology_st.standalone.config.set('nsslapd-enable-upgrade-hash', 'on') + + users = UserAccounts(topology_st.standalone, DEFAULT_SUFFIX) + user = users.create_test_user() + # Static version of "password" in SSHA256. + user.set('userPassword', "{SSHA256}9eliEQgjfc4Fcj1IXZtc/ne1GRF+OIjz/NfSTX4f7HByGMQrWHLMLA==") + # Attempt to bind with incorrect password. + with pytest.raises(ldap.INVALID_CREDENTIALS): + badconn = user.bind('badpassword') + # Check the pw is SSHA256 + up = user.get_attr_val_utf8('userPassword') + assert up.startswith('{SSHA256}') + + # Bind with correct. + conn = user.bind(PASSWORD) + # Check the pw is now PBKDF2! + up = user.get_attr_val_utf8('userPassword') + assert up.startswith('{PBKDF2_SHA256}') + +def test_password_hash_on_upgrade_clearcrypt(topology_st): + """In some deploymentes, some passwords MAY be in clear or crypt which have + specific possible application integrations allowing the read value to be + processed by other entities. We avoid upgrading these two, to prevent + breaking these integrations. + + :id: 27712492-a4bf-4ea9-977b-b4850ddfb628 + :setup: Single instance + :steps: 1. Set a password hash in CLEAR, and hash to pbkdf2 statically + 2. Test a correct bind + 3. Assert the PW is CLEAR + 4. Set the password to CRYPT + 5. Test a correct bind + 6. Assert the PW is CLEAR + :expectedresults: + 1. Successfully set the values + 2. The bind succeeds + 3. The PW is CLEAR + 4. The set succeeds + 4. The bind succeeds + 5. The PW is CRYPT + """ + # Make sure the server is set to pkbdf + topology_st.standalone.config.set('nsslapd-allow-hashed-passwords', 'on') + topology_st.standalone.config.set('nsslapd-enable-upgrade-hash', 'on') + + users = UserAccounts(topology_st.standalone, DEFAULT_SUFFIX) + user = users.create_test_user(1001) + + topology_st.standalone.config.set('passwordStorageScheme', 'CLEAR') + user.set('userPassword', "password") + topology_st.standalone.config.set('passwordStorageScheme', 'PBKDF2_SHA256') + + conn = user.bind(PASSWORD) + up = user.get_attr_val_utf8('userPassword') + assert up.startswith('password') + + user.set('userPassword', "{crypt}I0S3Ry62CSoFg") + conn = user.bind(PASSWORD) + up = user.get_attr_val_utf8('userPassword') + assert up.startswith('{crypt}') + +def test_password_hash_on_upgrade_disable(topology_st): + """If a legacy password hash is present, assert that on a correct bind + the hash is "upgraded" to the latest-and-greatest hash format on the + server. But some people may not like this, so test that we can disable + the feature too! + + :id: ed315145-a3d1-4f17-b04c-73d3638e7ade + :setup: Single instance + :steps: 1. Set a password hash in SSHA256, and hash to pbkdf2 statically + 2. Test a faulty bind + 3. Assert the PW is SSHA256 + 4. Test a correct bind + 5. Assert the PW is SSHA256 + :expectedresults: + 1. Successfully set the values + 2. The bind fails + 3. The PW is SSHA256 + 4. The bind succeeds + 5. The PW is SSHA256 + """ + # Make sure the server is set to pkbdf + topology_st.standalone.config.set('passwordStorageScheme', 'PBKDF2_SHA256') + topology_st.standalone.config.set('nsslapd-allow-hashed-passwords', 'on') + topology_st.standalone.config.set('nsslapd-enable-upgrade-hash', 'off') + + users = UserAccounts(topology_st.standalone, DEFAULT_SUFFIX) + user = users.create_test_user(1002) + # Static version of "password" in SSHA256. + user.set('userPassword', "{SSHA256}9eliEQgjfc4Fcj1IXZtc/ne1GRF+OIjz/NfSTX4f7HByGMQrWHLMLA==") + # Attempt to bind with incorrect password. + with pytest.raises(ldap.INVALID_CREDENTIALS): + badconn = user.bind('badpassword') + # Check the pw is SSHA256 + up = user.get_attr_val_utf8('userPassword') + assert up.startswith('{SSHA256}') + + # Bind with correct. + conn = user.bind(PASSWORD) + # Check the pw is NOT upgraded! + up = user.get_attr_val_utf8('userPassword') + assert up.startswith('{SSHA256}') diff --git a/dirsrvtests/tests/suites/password/pwp_history_test.py b/dirsrvtests/tests/suites/password/pwp_history_test.py new file mode 100644 index 0000000..16e181d --- /dev/null +++ b/dirsrvtests/tests/suites/password/pwp_history_test.py @@ -0,0 +1,263 @@ +# --- BEGIN COPYRIGHT BLOCK --- +# Copyright (C) 2016 Red Hat, Inc. +# All rights reserved. +# +# License: GPL (version 3 or any later version). +# See LICENSE for details. +# --- END COPYRIGHT BLOCK --- +# +import pytest +import time +import logging +from lib389.tasks import * +from lib389.utils import ds_is_newer +from lib389.topologies import topology_st +from lib389.idm.user import UserAccounts, TEST_USER_PROPERTIES +from lib389.idm.directorymanager import DirectoryManager +from lib389.idm.organizationalunit import OrganizationalUnits +from lib389._constants import DEFAULT_SUFFIX + +pytestmark = pytest.mark.tier1 + +logging.getLogger(__name__).setLevel(logging.DEBUG) +log = logging.getLogger(__name__) + + +def test_basic(topology_st): + """Test basic password policy history feature functionality + + :id: 83d74f7d-3036-4944-8839-1b40bbf265ff + :setup: Standalone instance + :steps: + 1. Configure password history policy as bellow: + passwordHistory: on + passwordInHistory: 3 + passwordChange: on + passwordStorageScheme: CLEAR + 2. Add a test user + 3. Attempt to change password to the same password + 4. Change password four times + 5. Check that we only have 3 passwords stored in history + 6. Attempt to change the password to previous passwords + 7. Reset password by Directory Manager (admin reset) + 8. Try and change the password to the previous password before the reset + 9. Test passwordInHistory set to "0" rejects only the current password + 10. Test passwordInHistory set to "2" rejects previous passwords + + + :expectedresults: + 1. Password history policy should be configured successfully + 2. User should be added successfully + 3. Password change should be correctly rejected + with Constrant Violation error + 4. Password should be successfully changed + 5. Only 3 passwords should be stored in history + 6. Password changes should be correctly rejected + with Constrant Violation error + 7. Password should be successfully reset + 8. Password change should be correctly rejected + with Constrant Violation error + 9. Success + 10. Success + """ + + # + # Configure password history policy and add a test user + # + try: + topology_st.standalone.config.replace_many(('passwordHistory', 'on'), + ('passwordInHistory', '3'), + ('passwordChange', 'on'), + ('passwordStorageScheme', 'CLEAR'), + ('nsslapd-auditlog-logging-enabled', 'on')) + log.info('Configured password policy.') + except ldap.LDAPError as e: + log.fatal('Failed to configure password policy: ' + str(e)) + assert False + time.sleep(1) + + # Add aci so users can change their own password + USER_ACI = '(targetattr="userpassword || passwordHistory")(version 3.0; acl "pwp test"; allow (all) userdn="ldap:///self";)' + ous = OrganizationalUnits(topology_st.standalone, DEFAULT_SUFFIX) + ou = ous.get('people') + ou.add('aci', USER_ACI) + + # Create user + users = UserAccounts(topology_st.standalone, DEFAULT_SUFFIX) + user = users.create(properties=TEST_USER_PROPERTIES) + user.set('userpassword', 'password') + user.rebind('password') + + # + # Test that password history is enforced. + # + # Attempt to change password to the same password + try: + user.set('userpassword', 'password') + log.info('Incorrectly able to to set password to existing password.') + assert False + except ldap.CONSTRAINT_VIOLATION: + log.info('Password change correctly rejected') + except ldap.LDAPError as e: + log.fatal('Failed to attempt to change password: ' + str(e)) + assert False + + # + # Keep changing password until we fill the password history (3) + # + user.set('userpassword', 'password1') + user.rebind('password1') + time.sleep(.5) + user.set('userpassword', 'password2') + user.rebind('password2') + time.sleep(.5) + user.set('userpassword', 'password3') + user.rebind('password3') + time.sleep(.5) + user.set('userpassword', 'password4') + user.rebind('password4') + time.sleep(.5) + + # + # Check that we only have 3 passwords stored in history + # + pwds = user.get_attr_vals('passwordHistory') + if len(pwds) != 3: + log.fatal('Incorrect number of passwords stored in history: %d' % + len(pwds)) + log.error('password history: ' + str(pwds)) + assert False + else: + log.info('Correct number of passwords found in history.') + + # + # Attempt to change the password to previous passwords + # + try: + user.set('userpassword', 'password1') + log.fatal('Incorrectly able to to set password to previous password1.') + log.fatal('password history: ' + str(user.get_attr_vals('passwordhistory'))) + assert False + except ldap.CONSTRAINT_VIOLATION: + log.info('Password change correctly rejected') + except ldap.LDAPError as e: + log.fatal('Failed to attempt to change password: ' + str(e)) + assert False + try: + user.set('userpassword', 'password2') + log.fatal('Incorrectly able to to set password to previous password2.') + log.fatal('password history: ' + str(user.get_attr_vals('passwordhistory'))) + assert False + except ldap.CONSTRAINT_VIOLATION: + log.info('Password change correctly rejected') + except ldap.LDAPError as e: + log.fatal('Failed to attempt to change password: ' + str(e)) + assert False + try: + user.set('userpassword', 'password3') + log.fatal('Incorrectly able to to set password to previous password3.') + log.fatal('password history: ' + str(user.get_attr_vals('passwordhistory'))) + assert False + except ldap.CONSTRAINT_VIOLATION: + log.info('Password change correctly rejected') + except ldap.LDAPError as e: + log.fatal('Failed to attempt to change password: ' + str(e)) + assert False + + # + # Reset password by Directory Manager(admin reset) + # + dm = DirectoryManager(topology_st.standalone) + dm.rebind() + time.sleep(.5) + user.set('userpassword', 'password-reset') + time.sleep(1) + + # Try and change the password to the previous password before the reset + try: + user.rebind('password-reset') + user.set('userpassword', 'password4') + log.fatal('Incorrectly able to to set password to previous password4.') + log.fatal('password history: ' + str(user.get_attr_vals('passwordhistory'))) + assert False + except ldap.CONSTRAINT_VIOLATION: + log.info('Password change correctly rejected') + except ldap.LDAPError as e: + log.fatal('Failed to attempt to change password: ' + str(e)) + assert False + + if ds_is_newer("1.4.1.2"): + # + # Test passwordInHistory to 0 + # + dm = DirectoryManager(topology_st.standalone) + dm.rebind() + try: + topology_st.standalone.config.replace('passwordInHistory', '0') + log.info('Configured passwordInHistory to 0.') + except ldap.LDAPError as e: + log.fatal('Failed to configure password policy (passwordInHistory to 0): ' + str(e)) + assert False + time.sleep(1) + + # Verify the older passwords in the entry (passwordhistory) are ignored + user.rebind('password-reset') + user.set('userpassword', 'password4') + time.sleep(.5) + try: + user.set('userpassword', 'password4') + log.fatal('Incorrectly able to to set password to current password4.') + log.fatal('password history: ' + str(user.get_attr_vals('passwordhistory'))) + assert False + except ldap.CONSTRAINT_VIOLATION: + log.info('Password change correctly rejected') + except ldap.LDAPError as e: + log.fatal('Failed to attempt to change password: ' + str(e)) + assert False + + # Need to make one successful update so history list is reset + user.set('userpassword', 'password5') + + # + # Set the history count back to a positive value and make sure things still work + # as expected + # + dm = DirectoryManager(topology_st.standalone) + dm.rebind() + try: + topology_st.standalone.config.replace('passwordInHistory', '2') + log.info('Configured passwordInHistory to 2.') + except ldap.LDAPError as e: + log.fatal('Failed to configure password policy (passwordInHistory to 2): ' + str(e)) + assert False + time.sleep(1) + + try: + user.rebind('password5') + user.set('userpassword', 'password5') + log.fatal('Incorrectly able to to set password to current password5.') + log.fatal('password history: ' + str(user.get_attr_vals('passwordhistory'))) + assert False + except ldap.CONSTRAINT_VIOLATION: + log.info('Password change correctly rejected') + except ldap.LDAPError as e: + log.fatal('Failed to attempt to change password: ' + str(e)) + assert False + + # Test that old password that was in history is not being checked + try: + user.set('userpassword', 'password1') + except ldap.LDAPError as e: + log.fatal('Failed to attempt to change password: ' + str(e)) + log.fatal('password history: ' + str(user.get_attr_vals('passwordhistory'))) + assert False + + # Done + log.info('Test suite PASSED.') + + +if __name__ == '__main__': + # Run isolated + # -s for DEBUG mode + CURRENT_FILE = os.path.realpath(__file__) + pytest.main("-s %s" % CURRENT_FILE) diff --git a/dirsrvtests/tests/suites/password/pwp_test.py b/dirsrvtests/tests/suites/password/pwp_test.py new file mode 100644 index 0000000..cc29f6f --- /dev/null +++ b/dirsrvtests/tests/suites/password/pwp_test.py @@ -0,0 +1,511 @@ +""" +# --- BEGIN COPYRIGHT BLOCK --- +# Copyright (C) 2020 Red Hat, Inc. +# All rights reserved. +# +# License: GPL (version 3 or any later version). +# See LICENSE for details. +# --- END COPYRIGHT BLOCK --- +""" + +import os +import pytest +from lib389.topologies import topology_st as topo +from lib389.idm.user import UserAccounts, UserAccount +from lib389._constants import DEFAULT_SUFFIX +from lib389.config import Config +from lib389.idm.group import Group +import ldap +import time + +pytestmark = pytest.mark.tier1 + + +def _create_user(topo, uid, cn, uidNumber, userpassword): + """ + Will Create user + """ + user = UserAccounts(topo.standalone, DEFAULT_SUFFIX).create(properties={ + 'uid': uid, + 'sn': cn.split(' ')[-1], + 'cn': cn, + 'givenname': cn.split(' ')[0], + 'uidNumber': uidNumber, + 'gidNumber': uidNumber, + 'mail': f'{uid}@example.com', + 'userpassword': userpassword, + 'homeDirectory': f'/home/{uid}' + }) + return user + + +def _change_password_with_own(topo, user_dn, password, new_password): + """ + Change user password with user self + """ + conn = UserAccount(topo.standalone, user_dn).bind(password) + real_user = UserAccount(conn, user_dn) + real_user.replace('userpassword', new_password) + + +def _change_password_with_root(topo, user_dn, new_password): + """ + Root will change user password + """ + UserAccount(topo.standalone, user_dn).replace('userpassword', new_password) + + +@pytest.fixture(scope="function") +def _fix_password(topo, request): + user = _create_user(topo, 'dbyers', 'Danny Byers', '1001', 'dbyers1') + user.replace('userpassword', 'dbyers1') + + def fin(): + user.delete() + request.addfinalizer(fin) + + +def test_passwordchange_to_no(topo, _fix_password): + """Change password fo a user even password even though pw policy is set to no + + :id: 16c64ef0-5a20-11ea-a902-8c16451d917b + :setup: Standalone + :steps: + 1. Adding an user with uid=dbyers + 2. Set Password change to Must Not Change After Reset + 3. Setting Password policy to May Not Change Password + 4. Try to change password fo a user even password even though pw policy is set to no + 5. Set Password change to May Change Password + 6. Try to change password fo a user even password + 7. Try to change password with invalid credentials. Should see error message. + :expected results: + 1. Success + 2. Success + 3. Success + 4. Success + 5. Success + 6. Success + 7. Success + """ + # Adding an user with uid=dbyers + user = f'uid=dbyers,ou=People,{DEFAULT_SUFFIX}' + config = Config(topo.standalone) + # Set Password change to Must Not Change After Reset + config.replace_many( + ('passwordmustchange', 'off'), + ('passwordchange', 'off')) + # Try to change password fo a user even password even though pw policy is set to no + with pytest.raises(ldap.UNWILLING_TO_PERFORM): + _change_password_with_own(topo, user, 'dbyers1', 'AB') + # Set Password change to May Change Password + config.replace('passwordchange', 'on') + _change_password_with_own(topo, user, 'dbyers1', 'dbyers1') + # Try to change password with invalid credentials. Should see error message. + with pytest.raises(ldap.INVALID_CREDENTIALS): + _change_password_with_own(topo, f'uid=dbyers,ou=People,{DEFAULT_SUFFIX}', 'AB', 'dbyers1') + + +def test_password_check_syntax(topo, _fix_password): + """Password check syntax + + :id: 1e6fcc9e-5a20-11ea-9659-8c16451d917b + :setup: Standalone + :steps: + 1. Sets Password check syntax to on + 2. Try to change to a password that violates length. Should get error + 3. Attempt to Modify password to db which is in error to policy + 4. change min pw length to 5 + 5. Attempt to Modify password to dby3rs which is in error to policy + 6. Attempt to Modify password to danny which is in error to policy + 7. Attempt to Modify password to byers which is in error to policy + 8. Change min pw length to 6 + 9. Try to change the password + 10. Trying to set to a password containing value of sn + 11. Sets policy to not check pw syntax + 12. Test that when checking syntax is off, you can use small passwords + 13. Test that when checking syntax is off, trivial passwords can be used + 14. Changing password minimum length from 6 to 10 + 15. Setting policy to Check Password Syntax again + 16. Try to change to a password that violates length + 17. Reset Password + :expected results: + 1. Success + 2. Success + 3. Success + 4. Success + 5. Success + 6. Success + 7. Success + 8. Success + 9. Success + 10. Success + 11. Success + 12. Success + 13. Success + 14. Success + 15. Success + 16. Fail + 17. Success + """ + config = Config(topo.standalone) + # Sets Password check syntax to on + config.replace('passwordchecksyntax', 'on') + # Try to change to a password that violates length. Should get error + with pytest.raises(ldap.CONSTRAINT_VIOLATION): + _change_password_with_own(topo, f'uid=dbyers,ou=People,{DEFAULT_SUFFIX}', 'dbyers1', 'dbyers2') + # Attempt to Modify password to db which is in error to policy + with pytest.raises(ldap.CONSTRAINT_VIOLATION): + _change_password_with_own(topo, f'uid=dbyers,ou=People,{DEFAULT_SUFFIX}', 'dbyers1', 'db') + # change min pw length to 5 + config.replace('passwordminlength', '5') + # Attempt to Modify password to dby3rs which is in error to policy + # Attempt to Modify password to danny which is in error to policy + # Attempt to Modify password to byers which is in error to policy + for password in ['dbyers', 'Danny', 'byers']: + with pytest.raises(ldap.CONSTRAINT_VIOLATION): + _change_password_with_own(topo, f'uid=dbyers,ou=People,{DEFAULT_SUFFIX}', 'dbyers1', password) + # Change min pw length to 6 + config.replace('passwordminlength', '6') + # Try to change the password + # Trying to set to a password containing value of sn + for password in ['dby3rs1', 'dbyers2', '67Danny89', 'YAByers8']: + with pytest.raises(ldap.CONSTRAINT_VIOLATION): + _change_password_with_own(topo, f'uid=dbyers,ou=People,{DEFAULT_SUFFIX}', 'dbyers1', password) + # Sets policy to not check pw syntax + # Test that when checking syntax is off, you can use small passwords + # Test that when checking syntax is off, trivial passwords can be used + config.replace('passwordchecksyntax', 'off') + for password, new_pass in [('dbyers1', 'db'), ('db', 'dbyers'), ('dbyers', 'dbyers1')]: + _change_password_with_own(topo, f'uid=dbyers,ou=People,{DEFAULT_SUFFIX}', password, new_pass) + # Changing password minimum length from 6 to 10 + # Setting policy to Check Password Syntax again + config.replace_many( + ('passwordminlength', '10'), + ('passwordchecksyntax', 'on')) + # Try to change to a password that violates length + with pytest.raises(ldap.CONSTRAINT_VIOLATION): + _change_password_with_own(topo, f'uid=dbyers,ou=People,{DEFAULT_SUFFIX}', 'dbyers1', 'db') + UserAccount(topo.standalone, f'uid=dbyers,ou=People,{DEFAULT_SUFFIX}').replace('userpassword', 'dbyers1') + + +def test_too_big_password(topo, _fix_password): + """Test for long long password + + :id: 299a3fb4-5a20-11ea-bba8-8c16451d917b + :setup: Standalone + :steps: + 1. Setting policy to keep password histories + 2. Changing number of password in history to 3 + 3. Modify password from dby3rs1 to dby3rs2 + 4. Checking that the passwordhistory attribute has been added + 5. Add a password test for long long password + 6. Changing number of password in history to 6 and passwordhistory off + :expected results: + 1. Success + 2. Success + 3. Success + 4. Success + 5. Success + 6. Success + """ + config = Config(topo.standalone) + # Setting policy to keep password histories + config.replace_many( + ('passwordchecksyntax', 'off'), + ('passwordhistory', 'on')) + assert config.get_attr_val_utf8('passwordinhistory') == '6' + # Changing number of password in history to 3 + config.replace('passwordinhistory', '3') + # Modify password from dby3rs1 to dby3rs2 + _change_password_with_own(topo, f'uid=dbyers,ou=People,{DEFAULT_SUFFIX}', 'dbyers1', 'dbyers2') + with pytest.raises(ldap.CONSTRAINT_VIOLATION): + _change_password_with_own(topo, f'uid=dbyers,ou=People,{DEFAULT_SUFFIX}', 'dbyers2', 'dbyers1') + # Checking that the passwordhistory attribute has been added + assert UserAccount(topo.standalone, f'uid=dbyers,ou=People,{DEFAULT_SUFFIX}').get_attr_val_utf8('passwordhistory') + # Add a password test for long long password + long_pass = 50*'0123456789'+'LENGTH=510' + _change_password_with_own(topo, f'uid=dbyers,ou=People,{DEFAULT_SUFFIX}', 'dbyers2', long_pass) + with pytest.raises(ldap.CONSTRAINT_VIOLATION): + _change_password_with_own(topo, f'uid=dbyers,ou=People,{DEFAULT_SUFFIX}', long_pass, long_pass) + _change_password_with_root(topo, f'uid=dbyers,ou=People,{DEFAULT_SUFFIX}', 'dbyers1') + # Changing number of password in history to 6 and passwordhistory off + config.replace_many(('passwordhistory', 'off'), + ('passwordinhistory', '6')) + + +def test_pwminage(topo, _fix_password): + """Test pwminage + + :id: 2df7bf32-5a20-11ea-ad23-8c16451d917b + :setup: Standalone + :steps: + 1. Get pwminage; should be 0 currently + 2. Sets policy to pwminage 3 + 3. Change current password + 4. Try to change password again + 5. Try now after 3 secs is up, should work. + :expected results: + 1. Success + 2. Success + 3. Success + 4. Fail + 5. Success + """ + config = Config(topo.standalone) + # Get pwminage; should be 0 currently + assert config.get_attr_val_utf8('passwordminage') == '0' + # Sets policy to pwminage 3 + config.replace('passwordminage', '3') + # Change current password + _change_password_with_own(topo, f'uid=dbyers,ou=People,{DEFAULT_SUFFIX}', 'dbyers1', 'dbyers2') + # Try to change password again + with pytest.raises(ldap.CONSTRAINT_VIOLATION): + _change_password_with_own(topo, f'uid=dbyers,ou=People,{DEFAULT_SUFFIX}', 'dbyers2', 'dbyers1') + for _ in range(3): + time.sleep(1) + # Try now after 3 secs is up, should work. + _change_password_with_own(topo, f'uid=dbyers,ou=People,{DEFAULT_SUFFIX}', 'dbyers2', 'dbyers1') + config.replace('passwordminage', '0') + + +def test_invalid_credentials(topo, _fix_password): + """Test bind again with valid password: We should be locked + + :id: 3233ca78-5a20-11ea-8d35-8c16451d917b + :setup: Standalone + :steps: + 1. Search if passwordlockout is off + 2. Turns on passwordlockout + 3. sets lockout duration to 3 seconds + 4. Changing pw failure count reset duration to 3 sec and passwordminlength to 10 + 5. Try to bind with invalid credentials + 6. Change password to password lockout forever + 7. Try to bind with invalid credentials + 8. Now bind again with valid password: We should be locked + 9. Delete dby3rs before exiting + 10. Reset server + :expected results: + 1. Success + 2. Success + 3. Success + 4. Success + 5. Fail + 6. Success + 7. Success + 8. Success + 9. Success + 10. Success + """ + config = Config(topo.standalone) + # Search if passwordlockout is off + assert config.get_attr_val_utf8('passwordlockout') == 'off' + # Turns on passwordlockout + # sets lockout duration to 3 seconds + # Changing pw failure count reset duration to 3 sec and passwordminlength to 10 + config.replace_many( + ('passwordlockout', 'on'), + ('passwordlockoutduration', '3'), + ('passwordresetfailurecount', '3'), + ('passwordminlength', '10')) + # Try to bind with invalid credentials + for _ in range(3): + with pytest.raises(ldap.INVALID_CREDENTIALS): + _change_password_with_own(topo, f'uid=dbyers,ou=People,{DEFAULT_SUFFIX}', 'Invalid', 'dbyers1') + with pytest.raises(ldap.CONSTRAINT_VIOLATION): + _change_password_with_own(topo, f'uid=dbyers,ou=People,{DEFAULT_SUFFIX}', 'Invalid', 'dbyers1') + for _ in range(3): + time.sleep(1) + _change_password_with_own(topo, f'uid=dbyers,ou=People,{DEFAULT_SUFFIX}', 'dbyers1', 'dbyers1') + # Change password to password lockout forever + config.replace('passwordunlock', 'off') + # Try to bind with invalid credentials + for _ in range(3): + with pytest.raises(ldap.INVALID_CREDENTIALS): + _change_password_with_own(topo, f'uid=dbyers,ou=People,{DEFAULT_SUFFIX}', 'Invalid', 'dbyers1') + with pytest.raises(ldap.CONSTRAINT_VIOLATION): + _change_password_with_own(topo, f'uid=dbyers,ou=People,{DEFAULT_SUFFIX}', 'Invalid', 'dbyers1') + for _ in range(3): + time.sleep(1) + # Now bind again with valid password: We should be locked + with pytest.raises(ldap.CONSTRAINT_VIOLATION): + _change_password_with_own(topo, f'uid=dbyers,ou=People,{DEFAULT_SUFFIX}', 'dbyers1', 'dbyers1') + # Delete dby3rs before exiting + _change_password_with_root(topo, f'uid=dbyers,ou=People,{DEFAULT_SUFFIX}', 'dbyers1') + time.sleep(1) + _change_password_with_own(topo, f'uid=dbyers,ou=People,{DEFAULT_SUFFIX}', 'dbyers1', 'dbyers1') + # Reset server + config.replace_many( + ('passwordinhistory', '6'), + ('passwordlockout', 'off'), + ('passwordlockoutduration', '3600'), + ('passwordminlength', '6'), + ('passwordresetfailurecount', '600'), + ('passwordunlock', 'on')) + + +def test_expiration_date(topo, _fix_password): + """Test check the expiration date is still in the future + + :id: 3691739a-5a20-11ea-8712-8c16451d917b + :setup: Standalone + :steps: + 1. Password expiration + 2. Add a user with a password expiration date + 3. Modify their password + 4. Check the expiration date is still in the future + 5. Modify the password expiration date + 6. Check the expiration date is still in the future + 7. Change policy so that user can change passwords + 8. Deleting user + 9. Adding user + 10. Set password history ON + 11. Modify password Once + 12. Try to change the password with same one + :expected results: + 1. Success + 2. Success + 3. Success + 4. Success + 5. Success + 6. Success + 7. Success + 8. Success + 9. Success + 10. Success + 11. Success + 12. Fail + """ + # Add a user with a password expiration date + user = UserAccounts(topo.standalone, DEFAULT_SUFFIX).create_test_user() + user.replace_many( + ('userpassword', 'bind4now'), + ('passwordExpirationTime', '20380119031404Z')) + # Modify their password + user.replace('userPassword', 'secreter') + # Check the expiration date is still in the future + assert user.get_attr_val_utf8('passwordExpirationTime') == '20380119031404Z' + # Modify the password expiration date + user.replace('passwordExpirationTime', '20380119031405Z') + # Check the expiration date is still in the future + assert user.get_attr_val_utf8('passwordExpirationTime') == '20380119031405Z' + config = Config(topo.standalone) + # Change policy so that user can change passwords + config.replace('passwordchange', 'on') + # Deleting user + UserAccount(topo.standalone, f'uid=test_user_1000,ou=People,{DEFAULT_SUFFIX}').delete() + # Adding user + user = UserAccounts(topo.standalone, DEFAULT_SUFFIX).create_test_user() + # Set password history ON + config.replace('passwordhistory', 'on') + # Modify password Once + user.replace('userPassword', 'secreter') + time.sleep(1) + assert 'PBKDF2_SHA256' in user.get_attr_val_utf8('userPassword') + # Try to change the password with same one + for _ in range(3): + with pytest.raises(ldap.CONSTRAINT_VIOLATION): + _change_password_with_own(topo, user.dn, 'secreter', 'secreter') + user.delete() + + +def test_passwordlockout(topo, _fix_password): + """Test adding admin user diradmin to Directory Administrator group + + :id: 3ffcffda-5a20-11ea-a3af-8c16451d917b + :setup: Standalone + :steps: + 1. Account Lockout must be cleared on successful password change + 2. Adding admin user diradmin + 3. Adding admin user diradmin to Directory Administrator group + 4. Turn on passwordlockout + 5. Sets lockout duration to 30 seconds + 6. Sets failure count reset duration to 30 sec + 7. Sets max password bind failure count to 3 + 8. Reset password retry count (to 0) + 9. Try to bind with invalid credentials(3 times) + 10. Try to bind with valid pw, should give lockout error + 11. Reset password using admin login + 12. Try to login as the user to check the unlocking of account. Will also change + the password back to original + 13. Change to account lockout forever until reset + 14. Reset password retry count (to 0) + 15. Try to bind with invalid credentials(3 times) + 16. Try to bind with valid pw, should give lockout error + 17. Reset password using admin login + 18. Try to login as the user to check the unlocking of account. Will also change the + password back to original + :expected results: + 1. Success + 2. Success + 3. Success + 4. Success + 5. Success + 6. Success + 7. Success + 8. Success + 9. Fail + 10. Success + 11. Success + 12. Success + 13. Success + 14. Success + 15. Fail + 16. Success + 17. Success + 18. Success + """ + config = Config(topo.standalone) + # Adding admin user diradmin + user = UserAccounts(topo.standalone, DEFAULT_SUFFIX).create_test_user() + user.replace('userpassword', 'dby3rs2') + admin = _create_user(topo, 'diradmin', 'Anuj Borah', '1002', 'diradmin') + # Adding admin user diradmin to Directory Administrator group + Group(topo.standalone, f'cn=Directory Administrators,{DEFAULT_SUFFIX}').add('uniquemember', admin.dn) + # Turn on passwordlockout + # Sets lockout duration to 30 seconds + # Sets failure count reset duration to 30 sec + # Sets max password bind failure count to 3 + # Reset password retry count (to 0) + config.replace_many( + ('passwordlockout', 'on'), + ('passwordlockoutduration', '30'), + ('passwordresetfailurecount', '30'), + ('passwordmaxfailure', '3'), + ('passwordhistory', 'off')) + user.replace('passwordretrycount', '0') + # Try to bind with invalid credentials(3 times) + for _ in range(3): + with pytest.raises(ldap.INVALID_CREDENTIALS): + _change_password_with_own(topo, user.dn, 'Invalid', 'secreter') + # Try to bind with valid pw, should give lockout error + with pytest.raises(ldap.CONSTRAINT_VIOLATION): + _change_password_with_own(topo, user.dn, 'Invalid', 'secreter') + # Reset password using admin login + conn = admin.bind('diradmin') + UserAccount(conn, user.dn).replace('userpassword', 'dby3rs2') + time.sleep(1) + # Try to login as the user to check the unlocking of account. Will also change + # the password back to original + _change_password_with_own(topo, user.dn, 'dby3rs2', 'secreter') + # Change to account lockout forever until reset + # Reset password retry count (to 0) + config.replace('passwordunlock', 'off') + user.replace('passwordretrycount', '0') + # Try to bind with invalid credentials(3 times) + for _ in range(3): + with pytest.raises(ldap.INVALID_CREDENTIALS): + _change_password_with_own(topo, user.dn, 'Invalid', 'secreter') + # Try to bind with valid pw, should give lockout error + with pytest.raises(ldap.CONSTRAINT_VIOLATION): + _change_password_with_own(topo, user.dn, 'Invalid', 'secreter') + # Reset password using admin login + UserAccount(conn, user.dn).replace('userpassword', 'dby3rs2') + time.sleep(1) + # Try to login as the user to check the unlocking of account. Will also change the + # password back to original + _change_password_with_own(topo, user.dn, 'dby3rs2', 'secreter') + + +if __name__ == "__main__": + CURRENT_FILE = os.path.realpath(__file__) + pytest.main("-s -v %s" % CURRENT_FILE) \ No newline at end of file diff --git a/dirsrvtests/tests/suites/password/regression_test.py b/dirsrvtests/tests/suites/password/regression_test.py new file mode 100644 index 0000000..38c32ce --- /dev/null +++ b/dirsrvtests/tests/suites/password/regression_test.py @@ -0,0 +1,325 @@ +# Copyright (C) 2017 Red Hat, Inc. +# All rights reserved. +# +# License: GPL (version 3 or any later version). +# See LICENSE for details. +# --- END COPYRIGHT BLOCK --- +# +import pytest +import time +from lib389._constants import PASSWORD, DN_DM, DEFAULT_SUFFIX +from lib389._constants import SUFFIX, PASSWORD, DN_DM, DN_CONFIG, PLUGIN_RETRO_CHANGELOG, DEFAULT_SUFFIX, DEFAULT_CHANGELOG_DB +from lib389 import Entry +from lib389.topologies import topology_m1 as topo_master +from lib389.idm.user import UserAccounts +from lib389.utils import ldap, os, logging, ensure_bytes, ds_is_newer +from lib389.topologies import topology_st as topo +from lib389.idm.organizationalunit import OrganizationalUnits + +pytestmark = pytest.mark.tier1 + +DEBUGGING = os.getenv("DEBUGGING", default=False) +if DEBUGGING: + logging.getLogger(__name__).setLevel(logging.DEBUG) +else: + logging.getLogger(__name__).setLevel(logging.INFO) +log = logging.getLogger(__name__) + +user_data = {'cn': 'CNpwtest1', 'sn': 'SNpwtest1', 'uid': 'UIDpwtest1', 'mail': 'MAILpwtest1@redhat.com', + 'givenname': 'GNpwtest1'} + +TEST_PASSWORDS = list(user_data.values()) +# Add substring/token values of "CNpwtest1" +TEST_PASSWORDS += ['CNpwtest1ZZZZ', 'ZZZZZCNpwtest1', + 'ZCNpwtest1', 'CNpwtest1Z', 'ZCNpwtest1Z', + 'ZZCNpwtest1', 'CNpwtest1ZZ', 'ZZCNpwtest1ZZ', + 'ZZZCNpwtest1', 'CNpwtest1ZZZ', 'ZZZCNpwtest1ZZZ', + 'ZZZZZZCNpwtest1ZZZZZZZZ'] + +TEST_PASSWORDS2 = ( + 'CN12pwtest31', 'SN3pwtest231', 'UID1pwtest123', 'MAIL2pwtest12@redhat.com', '2GN1pwtest123', 'People123') + +def _check_unhashed_userpw(inst, user_dn, is_present=False): + """Check if unhashed#user#password attribute is present or not in the changelog""" + unhashed_pwd_attribute = 'unhashed#user#password' + + changelog_dbdir = os.path.join(os.path.dirname(inst.dbdir), DEFAULT_CHANGELOG_DB) + for dbfile in os.listdir(changelog_dbdir): + if dbfile.endswith('.db'): + changelog_dbfile = os.path.join(changelog_dbdir, dbfile) + log.info('Changelog dbfile file exist: {}'.format(changelog_dbfile)) + log.info('Running dbscan -f to check {} attr'.format(unhashed_pwd_attribute)) + dbscanOut = inst.dbscan(DEFAULT_CHANGELOG_DB, changelog_dbfile) + for entry in dbscanOut.split(b'dbid: '): + if ensure_bytes('operation: modify') in entry and ensure_bytes(user_dn) in entry and ensure_bytes('userPassword') in entry: + if is_present: + assert ensure_bytes(unhashed_pwd_attribute) in entry + else: + assert ensure_bytes(unhashed_pwd_attribute) not in entry + +@pytest.fixture(scope="module") +def passw_policy(topo, request): + """Configure password policy with PasswordCheckSyntax attribute set to on""" + + log.info('Configure Pwpolicy with PasswordCheckSyntax and nsslapd-pwpolicy-local set to on') + topo.standalone.simple_bind_s(DN_DM, PASSWORD) + topo.standalone.config.set('PasswordExp', 'on') + topo.standalone.config.set('PasswordCheckSyntax', 'off') + topo.standalone.config.set('nsslapd-pwpolicy-local', 'on') + + subtree = 'ou=people,{}'.format(DEFAULT_SUFFIX) + log.info('Configure subtree password policy for {}'.format(subtree)) + topo.standalone.subtreePwdPolicy(subtree, {'passwordchange': b'on', + 'passwordCheckSyntax': b'on', + 'passwordLockout': b'on', + 'passwordResetFailureCount': b'3', + 'passwordLockoutDuration': b'3', + 'passwordMaxFailure': b'2'}) + time.sleep(1) + + def fin(): + log.info('Reset pwpolicy configuration settings') + topo.standalone.simple_bind_s(DN_DM, PASSWORD) + topo.standalone.config.set('PasswordExp', 'off') + topo.standalone.config.set('PasswordCheckSyntax', 'off') + topo.standalone.config.set('nsslapd-pwpolicy-local', 'off') + + request.addfinalizer(fin) + + +@pytest.fixture(scope="module") +def create_user(topo, request): + """Add test users using UserAccounts""" + + log.info('Adding user-uid={},ou=people,{}'.format(user_data['uid'], DEFAULT_SUFFIX)) + users = UserAccounts(topo.standalone, DEFAULT_SUFFIX) + user_properties = { + 'uidNumber': '1001', + 'gidNumber': '2001', + 'cn': 'pwtest1', + 'userpassword': PASSWORD, + 'homeDirectory': '/home/pwtest1'} + user_properties.update(user_data) + tuser = users.create(properties=user_properties) + + def fin(): + log.info('Deleting user-{}'.format(tuser.dn)) + tuser.delete() + + request.addfinalizer(fin) + return tuser + + +def test_pwp_local_unlock(topo, passw_policy, create_user): + """Test subtree policies use the same global default for passwordUnlock + + :id: 741a8417-5f65-4012-b9ed-87987ce3ca1b + :setup: Standalone instance + :steps: + 1. Test user can bind + 2. Bind with bad passwords to lockout account, and verify account is locked + 3. Wait for lockout interval, and bind with valid password + :expectedresults: + 1. Bind successful + 2. Entry is locked + 3. Entry can bind with correct password + """ + # Add aci so users can change their own password + USER_ACI = '(targetattr="userpassword")(version 3.0; acl "pwp test"; allow (all) userdn="ldap:///self";)' + ous = OrganizationalUnits(topo.standalone, DEFAULT_SUFFIX) + ou = ous.get('people') + ou.add('aci', USER_ACI) + + log.info("Verify user can bind...") + create_user.bind(PASSWORD) + + log.info('Test passwordUnlock default - user should be able to reset password after lockout') + for i in range(0, 2): + try: + create_user.bind("bad-password") + except ldap.INVALID_CREDENTIALS: + # expected + pass + except ldap.LDAPError as e: + log.fatal("Got unexpected failure: " + str(e)) + raise e + + log.info('Verify account is locked') + with pytest.raises(ldap.CONSTRAINT_VIOLATION): + create_user.bind(PASSWORD) + + log.info('Wait for lockout duration...') + time.sleep(4) + + log.info('Check if user can now bind with correct password') + create_user.bind(PASSWORD) + + +@pytest.mark.bz1465600 +@pytest.mark.parametrize("user_pasw", TEST_PASSWORDS) +def test_trivial_passw_check(topo, passw_policy, create_user, user_pasw): + """PasswordCheckSyntax attribute fails to validate cn, sn, uid, givenname, ou and mail attributes + + :id: bf9fe1ef-56cb-46a3-a6f8-5530398a06dc + :parametrized: yes + :setup: Standalone instance. + :steps: + 1. Configure local password policy with PasswordCheckSyntax set to on. + 2. Add users with cn, sn, uid, givenname, mail and userPassword attributes. + 3. Configure subtree password policy for ou=people subtree. + 4. Reset userPassword with trivial values like cn, sn, uid, givenname, ou and mail attributes. + :expectedresults: + 1. Enabling PasswordCheckSyntax should PASS. + 2. Add users should PASS. + 3. Configure subtree password policy should PASS. + 4. Resetting userPassword to cn, sn, uid and mail should be rejected. + """ + + create_user.rebind(PASSWORD) + log.info('Replace userPassword attribute with {}'.format(user_pasw)) + with pytest.raises(ldap.CONSTRAINT_VIOLATION) as excinfo: + create_user.reset_password(user_pasw) + log.fatal('Failed: Userpassword with {} is accepted'.format(user_pasw)) + assert 'password based off of user entry' in str(excinfo.value) + + # reset password + topo.standalone.simple_bind_s(DN_DM, PASSWORD) + create_user.set('userPassword', PASSWORD) + + +@pytest.mark.parametrize("user_pasw", TEST_PASSWORDS) +def test_global_vs_local(topo, passw_policy, create_user, user_pasw): + """Passwords rejected if its similar to uid, cn, sn, givenname, ou and mail attributes + + :id: dfd6cf5d-8bcd-4895-a691-a43ad9ec1be8 + :parametrized: yes + :setup: Standalone instance + :steps: + 1. Configure global password policy with PasswordCheckSyntax set to off + 2. Add users with cn, sn, uid, mail, givenname and userPassword attributes + 3. Replace userPassword similar to cn, sn, uid, givenname, ou and mail attributes + :expectedresults: + 1. Disabling the local policy should PASS. + 2. Add users should PASS. + 3. Resetting userPasswords similar to cn, sn, uid, givenname, ou and mail attributes should PASS. + """ + + log.info('Configure Pwpolicy with PasswordCheckSyntax and nsslapd-pwpolicy-local set to off') + topo.standalone.simple_bind_s(DN_DM, PASSWORD) + topo.standalone.config.set('nsslapd-pwpolicy-local', 'off') + + create_user.rebind(PASSWORD) + log.info('Replace userPassword attribute with {}'.format(user_pasw)) + create_user.reset_password(user_pasw) + + # reset password + create_user.set('userPassword', PASSWORD) + +@pytest.mark.ds49789 +def test_unhashed_pw_switch(topo_master): + """Check that nsslapd-unhashed-pw-switch works corrently + + :id: e5aba180-d174-424d-92b0-14fe7bb0b92a + :setup: Master Instance + :steps: + 1. A Master is created, enable retrocl (not used here) + 2. create a set of users + 3. update userpassword of user1 and check that unhashed#user#password is not logged (default) + 4. udpate userpassword of user2 and check that unhashed#user#password is not logged ('nolog') + 5. udpate userpassword of user3 and check that unhashed#user#password is logged ('on') + :expectedresults: + 1. Success + 2. Success + 3 Success (unhashed#user#password is not logged in the replication changelog) + 4. Success (unhashed#user#password is not logged in the replication changelog) + 5. Success (unhashed#user#password is logged in the replication changelog) + """ + MAX_USERS = 10 + PEOPLE_DN = ("ou=people," + DEFAULT_SUFFIX) + + inst = topo_master.ms["master1"] + inst.modify_s("cn=Retro Changelog Plugin,cn=plugins,cn=config", + [(ldap.MOD_REPLACE, 'nsslapd-changelogmaxage', b'2m'), + (ldap.MOD_REPLACE, 'nsslapd-changelog-trim-interval', b"5s"), + (ldap.MOD_REPLACE, 'nsslapd-logAccess', b'on')]) + inst.config.loglevel(vals=[256 + 4], service='access') + inst.restart() + # If you need any test suite initialization, + # please, write additional fixture for that (including finalizer). + # Topology for suites are predefined in lib389/topologies.py. + + # enable dynamic plugins, memberof and retro cl plugin + # + log.info('Enable plugins...') + try: + inst.modify_s(DN_CONFIG, + [(ldap.MOD_REPLACE, + 'nsslapd-dynamic-plugins', + b'on')]) + except ldap.LDAPError as e: + ldap.error('Failed to enable dynamic plugins! ' + e.message['desc']) + assert False + + #topology_st.standalone.plugins.enable(name=PLUGIN_MEMBER_OF) + inst.plugins.enable(name=PLUGIN_RETRO_CHANGELOG) + #topology_st.standalone.modify_s("cn=changelog,cn=ldbm database,cn=plugins,cn=config", [(ldap.MOD_REPLACE, 'nsslapd-cachememsize', str(100000))]) + inst.restart() + + log.info('create users and group...') + for idx in range(1, MAX_USERS): + try: + USER_DN = ("uid=member%d,%s" % (idx, PEOPLE_DN)) + inst.add_s(Entry((USER_DN, + {'objectclass': 'top extensibleObject'.split(), + 'uid': 'member%d' % (idx)}))) + except ldap.LDAPError as e: + log.fatal('Failed to add user (%s): error %s' % (USER_DN, e.message['desc'])) + assert False + + # Check default is that unhashed#user#password is not logged on 1.4.1.6+ + user = "uid=member1,%s" % (PEOPLE_DN) + inst.modify_s(user, [(ldap.MOD_REPLACE, + 'userpassword', + PASSWORD.encode())]) + inst.stop() + if ds_is_newer('1.4.1.6'): + _check_unhashed_userpw(inst, user, is_present=False) + else: + _check_unhashed_userpw(inst, user, is_present=True) + + # Check with nolog that unhashed#user#password is not logged + inst.modify_s(DN_CONFIG, + [(ldap.MOD_REPLACE, + 'nsslapd-unhashed-pw-switch', + b'nolog')]) + inst.restart() + user = "uid=member2,%s" % (PEOPLE_DN) + inst.modify_s(user, [(ldap.MOD_REPLACE, + 'userpassword', + PASSWORD.encode())]) + inst.stop() + _check_unhashed_userpw(inst, user, is_present=False) + + # Check with value 'on' that unhashed#user#password is logged + inst.modify_s(DN_CONFIG, + [(ldap.MOD_REPLACE, + 'nsslapd-unhashed-pw-switch', + b'on')]) + inst.restart() + user = "uid=member3,%s" % (PEOPLE_DN) + inst.modify_s(user, [(ldap.MOD_REPLACE, + 'userpassword', + PASSWORD.encode())]) + inst.stop() + _check_unhashed_userpw(inst, user, is_present=True) + + if DEBUGGING: + # Add debugging steps(if any)... + pass + + +if __name__ == '__main__': + # Run isolated + # -s for DEBUG mode + CURRENT_FILE = os.path.realpath(__file__) + pytest.main(["-s", CURRENT_FILE]) diff --git a/dirsrvtests/tests/suites/password/series_of_bugs_test.py b/dirsrvtests/tests/suites/password/series_of_bugs_test.py new file mode 100644 index 0000000..b34e785 --- /dev/null +++ b/dirsrvtests/tests/suites/password/series_of_bugs_test.py @@ -0,0 +1,134 @@ +""" +# --- BEGIN COPYRIGHT BLOCK --- +# Copyright (C) 2020 Red Hat, Inc. +# All rights reserved. +# +# License: GPL (version 3 or any later version). +# See LICENSE for details. +# --- END COPYRIGHT BLOCK --- +""" + +import os +import pytest +from lib389.topologies import topology_st as topo +from lib389.idm.user import UserAccounts, UserAccount +from lib389._constants import DEFAULT_SUFFIX +from lib389.pwpolicy import PwPolicyManager +from lib389.config import Config +from lib389.idm.domain import Domain +import time + +pytestmark = pytest.mark.tier1 + + +def _create_user(topo, uid, ou): + user = UserAccounts(topo.standalone, DEFAULT_SUFFIX, rdn=ou).create(properties={ + 'uid': uid, + 'cn': uid, + 'sn': uid, + 'mail': f'{uid}@example.com', + 'homeDirectory': f'/home/{uid}', + 'uidNumber': '1000', + 'gidNumber': '1000' + }) + return user + + +def change_pwp_parameter(topo, pwp, operation, to_do): + pwp1 = PwPolicyManager(topo.standalone) + user = pwp1.get_pwpolicy_entry(f'{pwp},{DEFAULT_SUFFIX}') + user.replace(operation, to_do) + + +def change_password_of_user(topo, user_password_new_pass_list, pass_to_change): + """ + Will change password with self binding. + """ + for user, password, new_pass in user_password_new_pass_list: + real_user = UserAccount(topo.standalone, f'{user},{DEFAULT_SUFFIX}') + conn = real_user.bind(password) + UserAccount(conn, pass_to_change).replace('userpassword', new_pass) + + +@pytest.mark.bug1044164 +def test_local_password_policy(topo): + """Regression test for bug1044164 part 1. + + :id: d6f4a7fa-473b-11ea-8766-8c16451d917b + :setup: Standalone + :steps: + 1. Add a User as Password Admin + 2. Create a password admin user entry + 3. Add an aci to allow this user all rights + 4. Configure password admin + 5. Create local password policy and enable passwordmustchange + :expected results: + 1. Success + 2. Success + 3. Success + 4. Success + 5. Success + """ + user = _create_user(topo, 'pwadm_admin_1', None) + user.replace('userpassword', 'Secret123') + Domain(topo.standalone, DEFAULT_SUFFIX).set("aci", + f'(targetattr ="userpassword")(version 3.0;acl ' + f'"Allow password admin to write user ' + f'passwords";allow (write)(userdn = "ldap:///{user.dn}");)') + Config(topo.standalone).replace_many( + ('passwordAdminDN', user.dn), + ('passwordMustChange', 'off'), + ('nsslapd-pwpolicy-local', 'on')) + + +@pytest.mark.bug1044164 +def test_admin_user_to_perform_password_update(topo): + """Regression test for bug1044164 part 2. + + :id: 374fadc0-473c-11ea-9291-8c16451d917b + :setup: Standalone + :steps: + 1. Add another generic user but do not include the password (userpassword) + 2. Use admin user to perform a password update on generic user + 3. We don't need this ACI anymore. Delete it + :expected results: + 1. Success + 2. Success + 3. Success + """ + for uid, ou_ou in [('pwadm_user_1', None), ('pwadm_user_2', 'ou=People')]: + _create_user(topo, uid, ou_ou) + real_user = UserAccount(topo.standalone, f'uid=pwadm_admin_1,{DEFAULT_SUFFIX}') + conn = real_user.bind('Secret123') + UserAccount(conn, f'uid=pwadm_user_1,{DEFAULT_SUFFIX}').replace('userpassword', 'hello') + Domain(topo.standalone, DEFAULT_SUFFIX).remove('aci', + '(targetattr ="userpassword")(version 3.0;acl ' + '"Allow password admin to write user ' + 'passwords";allow (write)' + '(userdn = "ldap:///uid=pwadm_admin_1,dc=example,dc=com");)') + + +@pytest.mark.bug1118006 +def test_passwordexpirationtime_attribute(topo): + """Regression test for bug1118006. + + :id: 867472d2-473c-11ea-b583-8c16451d917b + :setup: Standalone + :steps: + 1. Check that the passwordExpirationTime attribute is set to the epoch date + :expected results: + 1. Success + """ + Config(topo.standalone).replace('passwordMustChange', 'on') + epoch_date = "19700101000000Z" + time.sleep(1) + UserAccount(topo.standalone, f'uid=pwadm_user_1,{DEFAULT_SUFFIX}').replace('userpassword', 'Secret123') + time.sleep(1) + assert UserAccount(topo.standalone, f'uid=pwadm_user_1,{DEFAULT_SUFFIX}').get_attr_val_utf8('passwordExpirationTime') == epoch_date + Config(topo.standalone).replace('passwordMustChange', 'off') + time.sleep(1) + + +if __name__ == "__main__": + CURRENT_FILE = os.path.realpath(__file__) + pytest.main("-s -v %s" % CURRENT_FILE) \ No newline at end of file diff --git a/dirsrvtests/tests/suites/plugins/__init__.py b/dirsrvtests/tests/suites/plugins/__init__.py new file mode 100644 index 0000000..fe45a34 --- /dev/null +++ b/dirsrvtests/tests/suites/plugins/__init__.py @@ -0,0 +1,3 @@ +""" + :Requirement: 389-ds-base: Directory Server Plugins +""" diff --git a/dirsrvtests/tests/suites/plugins/acceptance_test.py b/dirsrvtests/tests/suites/plugins/acceptance_test.py new file mode 100644 index 0000000..c4b68e2 --- /dev/null +++ b/dirsrvtests/tests/suites/plugins/acceptance_test.py @@ -0,0 +1,1805 @@ +# --- BEGIN COPYRIGHT BLOCK --- +# Copyright (C) 2016 Red Hat, Inc. +# All rights reserved. +# +# License: GPL (version 3 or any later version). +# See LICENSE for details. +# --- END COPYRIGHT BLOCK --- +# +''' +Created on Dec 09, 2014 + +@author: mreynolds +''' +import logging +import subprocess +import pytest +from lib389.utils import * +from lib389.plugins import * +from lib389._constants import * +from lib389.dseldif import DSEldif +from lib389.idm.user import UserAccounts +from lib389.idm.group import Groups +from lib389.idm.organizationalunit import OrganizationalUnits +from lib389.idm.domain import Domain +from lib389.topologies import create_topology, topology_i2 as topo + +pytestmark = pytest.mark.tier1 + +log = logging.getLogger(__name__) + +USER_DN = 'uid=test_user_1001,ou=people,dc=example,dc=com' +USER_PW = 'password' +GROUP_DN = 'cn=group,' + DEFAULT_SUFFIX +CONFIG_AREA = 'nsslapd-pluginConfigArea' + +if ds_is_older('1.3.7'): + MEMBER_ATTR = 'member' +else: + MEMBER_ATTR = 'memberOf' + +''' + Functional tests for each plugin + + Test: + plugin restarts (test when on and off) + plugin config validation + plugin dependencies + plugin functionality (including plugin tasks) +''' + + +def check_dependency(inst, plugin, online=True): + """Set the "account usability" plugin to depend on this plugin. + This plugin is generic, always enabled, and perfect for our testing + """ + + acct_usability = AccountUsabilityPlugin(inst) + acct_usability.replace('nsslapd-plugin-depends-on-named', plugin.rdn) + + if online: + with pytest.raises(ldap.UNWILLING_TO_PERFORM): + plugin.disable() + # Now undo the change + acct_usability.remove('nsslapd-plugin-depends-on-named', plugin.rdn) + else: + plugin.disable() + with pytest.raises((subprocess.CalledProcessError, ValueError)): + inst.restart() + dse_ldif = DSEldif(inst) + dse_ldif.delete(acct_usability.dn, 'nsslapd-plugin-depends-on-named') + dse_ldif.replace(plugin.dn, 'nsslapd-pluginEnabled', 'on') + inst.start() + + +def test_acctpolicy(topo, args=None): + """Test Account policy basic functionality + + :id: 9b87493b-0493-46f9-8364-6099d0e5d829 + :setup: Standalone Instance + :steps: + 1. Enable the plugin + 2. Restart the instance + 3. Add a config entry for 'lastLoginTime' + 4. Add a user + 5. Bind as the user + 6. Check testLastLoginTime was added to the user + 7. Replace 'stateattrname': 'testLastLoginTime' + 8. Bind as the user + 9. Check testLastLoginTime was added to the user + 10. Check nsslapd-plugin-depends-on-named for the plugin + 11. Clean up + :expectedresults: + 1. Success + 2. Success + 3. Success + 4. Success + 5. Success + 6. Success + 7. Success + 8. Success + 9. Success + 10. Success + 11. Success + """ + + inst = topo[0] + + # stop the plugin, and start it + plugin = AccountPolicyPlugin(inst) + plugin.disable() + plugin.enable() + + if args == "restart": + return True + + # If args is None then we run the test suite as pytest standalone and it's not dynamic + if args is None: + inst.restart() + + log.info('Testing {}'.format(PLUGIN_ACCT_POLICY)) + + ############################################################################ + # Configure plugin + ############################################################################ + # Add the config entry + ap_configs = AccountPolicyConfigs(inst) + try: + ap_config = ap_configs.create(properties={'cn': 'config', + 'alwaysrecordlogin': 'yes', + 'stateattrname': 'lastLoginTime'}) + except ldap.ALREADY_EXISTS: + ap_config = ap_configs.get('config') + ap_config.replace_many(('alwaysrecordlogin', 'yes'), + ('stateattrname', 'lastLoginTime')) + + ############################################################################ + # Test plugin + ############################################################################ + # Add an entry + users = UserAccounts(inst, DEFAULT_SUFFIX) + user = users.create_test_user(1000, 2000) + user.add('objectclass', 'extensibleObject') + user.replace('userPassword', USER_PW) + + # Bind as user + user.bind(USER_PW) + time.sleep(1) + + # Check lastLoginTime of USER1 + entries = inst.search_s(DEFAULT_SUFFIX, ldap.SCOPE_SUBTREE, 'lastLoginTime=*') + assert entries + + ############################################################################ + # Change config - change the stateAttrName to a new attribute + ############################################################################ + test_attribute = "( 2.16.840.1.113719.1.1.4.1.35999 \ + NAME 'testLastLoginTime' DESC 'Test Last login time' \ + SYNTAX 1.3.6.1.4.1.1466.115.121.1.24 SINGLE-VALUE USAGE \ + directoryOperation X-ORIGIN 'dirsrvtests' )" + Schema(inst).add('attributetypes', test_attribute) + ap_config.replace('stateattrname', 'testLastLoginTime') + + ############################################################################ + # Test plugin + ############################################################################ + # login as user + user.bind(USER_PW) + time.sleep(1) + + # Check testLastLoginTime was added to USER1 + entries = inst.search_s(DEFAULT_SUFFIX, ldap.SCOPE_SUBTREE, '(testLastLoginTime=*)') + assert entries + + ############################################################################ + # Test plugin dependency + ############################################################################ + check_dependency(inst, plugin, online=isinstance(args, str)) + + ############################################################################ + # Cleanup + ############################################################################ + user.delete() + + ############################################################################ + # Test passed + ############################################################################ + log.info('test_acctpolicy: PASS\n') + + return + + +def test_attruniq(topo, args=None): + """Test Attribute uniqueness basic functionality + + :id: 9b87493b-0493-46f9-8364-6099d0e5d801 + :setup: Standalone Instance + :steps: + 1. Enable the plugin + 2. Restart the instance + 3. Add a user: with 'mail' and 'mailAlternateAddress' attributes + 4. Replace 'uniqueness-attribute-name': 'cn' + 5. Try to add a user with the same 'cn' + 6. Replace 'uniqueness-attribute-name': 'mail' + 7. Try to add a user with the same 'mail' + 8. Add 'uniqueness-attribute-name': 'mailAlternateAddress' + 9. Try to add a user with the same 'mailAlternateAddress' + 10. Check nsslapd-plugin-depends-on-named for the plugin + 11. Clean up + :expectedresults: + 1. Success + 2. Success + 3. Success + 4. Success + 5. Should fail + 6. Success + 7. Should fail + 8. Success + 9. Should fail + 10. Success + 11. Success + """ + + inst = topo[0] + + # stop the plugin, and start it + plugin = AttributeUniquenessPlugin(inst) + plugin.disable() + plugin.enable() + + if args == "restart": + return + + # If args is None then we run the test suite as pytest standalone and it's not dynamic + if args is None: + inst.restart() + + log.info('Testing {}'.format(PLUGIN_ATTR_UNIQUENESS)) + user1_dict = {'objectclass': 'extensibleObject', + 'uid': 'testuser1', + 'cn': 'testuser1', + 'sn': 'user1', + 'uidNumber': '1001', + 'gidNumber': '2001', + 'mail': 'user1@example.com', + 'mailAlternateAddress': 'user1@alt.example.com', + 'homeDirectory': '/home/testuser1', + 'userpassword': 'password'} + user2_dict = {'objectclass': 'extensibleObject', + 'uid': 'testuser2', + 'cn': 'testuser2', + 'sn': 'user2', + 'uidNumber': '1000', + 'gidNumber': '2000', + 'homeDirectory': '/home/testuser2', + 'userpassword': 'password'} + + ############################################################################ + # Configure plugin + ############################################################################ + plugin.replace('uniqueness-attribute-name', 'cn') + if args is None: + inst.restart() + + ############################################################################ + # Test plugin + ############################################################################ + # Add an entry + users = UserAccounts(inst, DEFAULT_SUFFIX) + user1 = users.create(properties=user1_dict) + + # Add an entry with a duplicate "cn" + with pytest.raises(ldap.CONSTRAINT_VIOLATION): + user2_dict['cn'] = 'testuser1' + users.create(properties=user2_dict) + + ############################################################################ + # Change config to use "mail" instead of "uid" + ############################################################################ + + plugin.replace('uniqueness-attribute-name', 'mail') + + ############################################################################ + # Test plugin - Add an entry, that has a duplicate "mail" value + ############################################################################ + with pytest.raises(ldap.CONSTRAINT_VIOLATION): + user2_dict['mail'] = 'user1@example.com' + users.create(properties=user2_dict) + + ############################################################################ + # Reconfigure plugin for mail and mailAlternateAddress + ############################################################################ + plugin.add('uniqueness-attribute-name', 'mailAlternateAddress') + + ############################################################################ + # Test plugin - Add an entry, that has a duplicate "mail" value + ############################################################################ + with pytest.raises(ldap.CONSTRAINT_VIOLATION): + user2_dict['mail'] = 'user1@example.com' + users.create(properties=user2_dict) + + ############################################################################ + # Test plugin - Add an entry, that has a duplicate "mailAlternateAddress" value + ############################################################################ + with pytest.raises(ldap.CONSTRAINT_VIOLATION): + user2_dict['mailAlternateAddress'] = 'user1@alt.example.com' + users.create(properties=user2_dict) + + ############################################################################ + # Test plugin - Add an entry, that has a duplicate "mail" value conflicting mailAlternateAddress + ############################################################################ + with pytest.raises(ldap.CONSTRAINT_VIOLATION): + user2_dict['mail'] = 'user1@alt.example.com' + users.create(properties=user2_dict) + + ############################################################################ + # Test plugin - Add an entry, that has a duplicate "mailAlternateAddress" conflicting mail + ############################################################################ + with pytest.raises(ldap.CONSTRAINT_VIOLATION): + user2_dict['mailAlternateAddress'] = 'user1@example.com' + users.create(properties=user2_dict) + + ############################################################################ + # Test plugin dependency + ############################################################################ + check_dependency(inst, plugin, online=isinstance(args, str)) + + ############################################################################ + # Cleanup + ############################################################################ + user1.delete() + + ############################################################################ + # Test passed + ############################################################################ + log.info('test_attruniq: PASS\n') + return + + +def test_automember(topo, args=None): + """Test Auto Membership basic functionality + + :id: 9b87493b-0493-46f9-8364-6099d0e5d802 + :setup: Standalone Instance + :steps: + 1. Enable the plugin + 2. Restart the instance + 3. Add a group + 4. Add two Organisation Units entries + 5. Add a config entry for the group and one branch + 6. Add a user that should get added to the group + 7. Check the entry is in group + 8. Set groupattr to 'uniquemember:dn' and scope to branch2 + 9. Add a user that should get added to the group + 10. Check the group + 11. Disable plugin and restart + 12. Add an entry that should be picked up by automember + 13. Verify that the entry is not picked up by automember (yet) + 14. Check the group - uniquemember should not exist + 15. Enable plugin and restart + 16. Verify the fixup task worked + 17. Check nsslapd-plugin-depends-on-named for the plugin + 18. Clean up + :expectedresults: + 1. Success + 2. Success + 3. Success + 4. Success + 6. Success + 7. Success + 8. Success + 9. Success + 10. Success + 11. Success + 12. Success + 13. Success + 14. Success + 15. Success + 16. Success + 17. Success + 18. Success + """ + + inst = topo[0] + + # stop the plugin, and start it + plugin = AutoMembershipPlugin(inst) + plugin.disable() + plugin.enable() + + if args == "restart": + return + + # If args is None then we run the test suite as pytest standalone and it's not dynamic + if args is None: + inst.restart() + + log.info('Testing ' + PLUGIN_AUTOMEMBER + '...') + + ############################################################################ + # Configure plugin + ############################################################################ + + # Add the automember group + groups = Groups(inst, DEFAULT_SUFFIX) + group = groups.create(properties={'cn': 'group'}) + + ous = OrganizationalUnits(inst, DEFAULT_SUFFIX) + branch1 = ous.create(properties={'ou': 'branch1'}) + branch2 = ous.create(properties={'ou': 'branch2'}) + + # Add the automember config entry + am_configs = AutoMembershipDefinitions(inst) + am_config = am_configs.create(properties={'cn': 'config', + 'autoMemberScope': branch1.dn, + 'autoMemberFilter': 'objectclass=top', + 'autoMemberDefaultGroup': group.dn, + 'autoMemberGroupingAttr': '{}:dn'.format(MEMBER_ATTR)}) + + ############################################################################ + # Test the plugin + ############################################################################ + + users = UserAccounts(inst, DEFAULT_SUFFIX, rdn='ou={}'.format(branch1.rdn)) + # Add a user that should get added to the group + user1 = users.create_test_user(uid=1001) + + # Check the group + group_members = group.get_attr_vals_utf8(MEMBER_ATTR) + assert user1.dn in group_members + + ############################################################################ + # Change config + ############################################################################ + group.add('objectclass', 'groupOfUniqueNames') + am_config.set_groupattr('uniquemember:dn') + am_config.set_scope(branch2.dn) + + ############################################################################ + # Test plugin + ############################################################################ + # Add a user that should get added to the group + users = UserAccounts(inst, DEFAULT_SUFFIX, rdn='ou={}'.format(branch2.rdn)) + user2 = users.create_test_user(uid=1002) + + # Check the group + group_members = group.get_attr_vals_utf8('uniquemember') + assert user2.dn in group_members + + ############################################################################ + # Test Task + ############################################################################ + + # Disable plugin + plugin.disable() + + # If args is None then we run the test suite as pytest standalone and it's not dynamic + if args is None: + inst.restart() + + # Add an entry that should be picked up by automember - verify it is not(yet) + user3 = users.create_test_user(uid=1003) + + # Check the group - uniquemember should not exist + group_members = group.get_attr_vals_utf8('uniquemember') + assert user3.dn not in group_members + + # Enable plugin + plugin.enable() + + # If args is None then we run the test suite as pytest standalone and it's not dynamic + if args is None: + inst.restart() + task = plugin.fixup(branch2.dn, _filter='objectclass=top') + task.wait() + + # Verify the fixup task worked + group_members = group.get_attr_vals_utf8('uniquemember') + assert user3.dn in group_members + + ############################################################################ + # Test plugin dependency + ############################################################################ + check_dependency(inst, plugin, online=isinstance(args, str)) + + ############################################################################ + # Cleanup + ############################################################################ + user1.delete() + user2.delete() + user3.delete() + branch1.delete() + branch2.delete() + group.delete() + am_config.delete() + + ############################################################################ + # Test passed + ############################################################################ + log.info('test_automember: PASS\n') + return + + +def test_dna(topo, args=None): + """Test DNA basic functionality + + :id: 9b87493b-0493-46f9-8364-6099d0e5d803 + :setup: Standalone Instance + :steps: + 1. Enable the plugin + 2. Restart the instance + 3. Configure plugin for uidNumber + 4. Add a user + 5. See if the entry now has the new uidNumber assignment - uidNumber=1 + 6. Test the magic regen value + 7. See if the entry now has the new uidNumber assignment - uidNumber=2 + 8. Set 'dnaMagicRegen': '-2' + 9. Test the magic regen value + 10. See if the entry now has the new uidNumber assignment - uidNumber=3 + 11. Check nsslapd-plugin-depends-on-named for the plugin + 12. Clean up + :expectedresults: + 1. Success + 2. Success + 3. Success + 4. Success + 5. Success + 6. Success + 7. Success + 8. Success + 9. Success + 10. Success + 12. Success + """ + + inst = topo[0] + + # stop the plugin, and start it + plugin = DNAPlugin(inst) + plugin.disable() + plugin.enable() + + if args == "restart": + return + + # If args is None then we run the test suite as pytest standalone and it's not dynamic + if args is None: + inst.restart() + + log.info('Testing ' + PLUGIN_DNA + '...') + + ############################################################################ + # Configure plugin + ############################################################################ + dna_configs = DNAPluginConfigs(inst, plugin.dn) + try: + dna_config = dna_configs.create(properties={'cn': 'config', + 'dnatype': 'uidNumber', + 'dnafilter': '(objectclass=top)', + 'dnascope': DEFAULT_SUFFIX, + 'dnaMagicRegen': '-1', + 'dnaMaxValue': '50000', + 'dnaNextValue': '1'}) + except ldap.ALREADY_EXISTS: + dna_config = dna_configs.get('config') + dna_config.replace_many(('dnaNextValue', '1'), ('dnaMagicRegen', '-1')) + + ############################################################################ + # Test plugin + ############################################################################ + users = UserAccounts(inst, DEFAULT_SUFFIX) + user1 = users.create_test_user(uid=1) + + # See if the entry now has the new uidNumber assignment - uidNumber=1 + entries = inst.search_s(user1.dn, ldap.SCOPE_BASE, '(uidNumber=1)') + assert entries + + # Test the magic regen value + user1.replace('uidNumber', '-1') + + # See if the entry now has the new uidNumber assignment - uidNumber=2 + entries = inst.search_s(user1.dn, ldap.SCOPE_BASE, '(uidNumber=2)') + assert entries + + ################################################################################ + # Change the config + ################################################################################ + dna_config.replace('dnaMagicRegen', '-2') + + ################################################################################ + # Test plugin + ################################################################################ + + # Test the magic regen value + user1.replace('uidNumber', '-2') + + # See if the entry now has the new uidNumber assignment - uidNumber=3 + entries = inst.search_s(user1.dn, ldap.SCOPE_BASE, '(uidNumber=3)') + assert entries + + ############################################################################ + # Test plugin dependency + ############################################################################ + check_dependency(inst, plugin, online=isinstance(args, str)) + + ############################################################################ + # Cleanup + ############################################################################ + user1.delete() + dna_config.delete() + plugin.disable() + + # If args is None then we run the test suite as pytest standalone and it's not dynamic + if args is None: + inst.restart() + + ############################################################################ + # Test passed + ############################################################################ + log.info('test_dna: PASS\n') + return + + +def test_linkedattrs(topo, args=None): + """Test Linked Attributes basic functionality + + :id: 9b87493b-0493-46f9-8364-6099d0e5d804 + :setup: Standalone Instance + :steps: + 1. Enable the plugin + 2. Restart the instance + 3. Add a config entry for directReport + 4. Add test entries + 5. Add the linked attrs config entry + 6. User1 - Set "directReport" to user2 + 7. See if manager was added to the other entry + 8. User1 - Remove "directReport" + 9. See if manager was removed + 10. Change the config - using linkType "indirectReport" now + 11. Make sure the old linkType(directManager) is not working + 12. See if manager was added to the other entry, better not be... + 13. Now, set the new linkType "indirectReport", which should add "manager" to the other entry + 14. See if manager was added to the other entry, better not be + 15. Remove "indirectReport" should remove "manager" to the other entry + 16. See if manager was removed + 17. Disable plugin and make some updates that would of triggered the plugin + 18. The entry should not have a manager attribute + 19. Enable the plugin and rerun the task entry + 20. Add the task again + 21. Check if user2 now has a manager attribute now + 22. Check nsslapd-plugin-depends-on-named for the plugin + 23. Clean up + :expectedresults: + 1. Success + 2. Success + 3. Success + 4. Success + 5. Success + 6. Success + 7. Success + 8. Success + 9. Success + 10. Success + 11. Success + 12. Success + 13. Success + 14. Success + 15. Success + 16. Success + 17. Success + 18. Success + 19. Success + 20. Success + 21. Success + 22. Success + 23. Success + """ + + inst = topo[0] + + # stop the plugin, and start it + plugin = LinkedAttributesPlugin(inst) + plugin.disable() + plugin.enable() + + if args == "restart": + return + + # If args is None then we run the test suite as pytest standalone and it's not dynamic + if args is None: + inst.restart() + + log.info('Testing ' + PLUGIN_LINKED_ATTRS + '...') + + ############################################################################ + # Configure plugin + ############################################################################ + + # Add test entries + users = UserAccounts(inst, DEFAULT_SUFFIX) + user1 = users.create_test_user(uid=1001) + user1.add('objectclass', 'extensibleObject') + user2 = users.create_test_user(uid=1002) + user2.add('objectclass', 'extensibleObject') + + # Add the linked attrs config entry + la_configs = LinkedAttributesConfigs(inst) + la_config = la_configs.create(properties={'cn': 'config', + 'linkType': 'directReport', + 'managedType': 'manager'}) + + ############################################################################ + # Test plugin + ############################################################################ + # Set "directReport" should add "manager" to the other entry + user1.replace('directReport', user2.dn) + + # See if manager was added to the other entry + entries = inst.search_s(user2.dn, ldap.SCOPE_BASE, '(manager=*)') + assert entries + + # Remove "directReport" should remove "manager" to the other entry + user1.remove_all('directReport') + + # See if manager was removed + entries = inst.search_s(user2.dn, ldap.SCOPE_BASE, '(manager=*)') + assert not entries + + ############################################################################ + # Change the config - using linkType "indirectReport" now + ############################################################################ + la_config.replace('linkType', 'indirectReport') + + ############################################################################ + # Test plugin + ############################################################################ + # Make sure the old linkType(directManager) is not working + user1.replace('directReport', user2.dn) + + # See if manager was added to the other entry, better not be... + entries = inst.search_s(user2.dn, ldap.SCOPE_BASE, '(manager=*)') + assert not entries + + # Now, set the new linkType "indirectReport", which should add "manager" to the other entry + user1.replace('indirectReport', user2.dn) + + # See if manager was added to the other entry, better not be + entries = inst.search_s(user2.dn, ldap.SCOPE_BASE, '(manager=*)') + assert entries + + # Remove "indirectReport" should remove "manager" to the other entry + user1.remove_all('indirectReport') + + # See if manager was removed + entries = inst.search_s(user2.dn, ldap.SCOPE_BASE, '(manager=*)') + assert not entries + + ############################################################################ + # Test Fixup Task + ############################################################################ + # Disable plugin and make some updates that would of triggered the plugin + plugin.disable() + + # If args is None then we run the test suite as pytest standalone and it's not dynamic + if args is None: + inst.restart() + + user1.replace('indirectReport', user2.dn) + + # The entry should not have a manager attribute + entries = inst.search_s(user2.dn, ldap.SCOPE_BASE, '(manager=*)') + assert not entries + + # Enable the plugin and rerun the task entry + plugin.enable() + + # If args is None then we run the test suite as pytest standalone and it's not dynamic + if args is None: + inst.restart() + + # Add the task again + task = plugin.fixup(la_config.dn) + task.wait() + + # Check if user2 now has a manager attribute now + entries = inst.search_s(user2.dn, ldap.SCOPE_BASE, '(manager=*)') + assert entries + + ############################################################################ + # Test plugin dependency + ############################################################################ + check_dependency(inst, plugin, online=isinstance(args, str)) + + ############################################################################ + # Cleanup + ############################################################################ + user1.delete() + user2.delete() + la_config.delete() + + ############################################################################ + # Test passed + ############################################################################ + log.info('test_linkedattrs: PASS\n') + return + + +def test_memberof(topo, args=None): + """Test MemberOf basic functionality + + :id: 9b87493b-0493-46f9-8364-6099d0e5d805 + :setup: Standalone Instance + :steps: + 1. Enable the plugin + 2. Restart the instance + 3. Replace groupattr with 'member' + 4. Add our test entries + 5. Check if the user now has a "memberOf" attribute + 6. Remove "member" should remove "memberOf" from the entry + 7. Check that "memberOf" was removed + 8. Replace 'memberofgroupattr': 'uniquemember' + 9. Replace 'uniquemember': user1 + 10. Check if the user now has a "memberOf" attribute + 11. Remove "uniquemember" should remove "memberOf" from the entry + 12. Check that "memberOf" was removed + 13. The shared config entry uses "member" - the above test uses "uniquemember" + 14. Delete the test entries then read them to start with a clean slate + 15. Check if the user now has a "memberOf" attribute + 16. Check that "memberOf" was removed + 17. Replace 'memberofgroupattr': 'uniquemember' + 18. Check if the user now has a "memberOf" attribute + 19. Remove "uniquemember" should remove "memberOf" from the entry + 20. Check that "memberOf" was removed + 21. Replace 'memberofgroupattr': 'member' + 22. Remove shared config from plugin + 23. Check if the user now has a "memberOf" attribute + 24. Remove "uniquemember" should remove "memberOf" from the entry + 25. Check that "memberOf" was removed + 26. First change the plugin to use uniquemember + 27. Add uniquemember, should not update user1 + 28. Check for "memberOf" + 29. Enable memberof plugin + 30. Run the task and validate that it worked + 31. Check for "memberOf" + 32. Check nsslapd-plugin-depends-on-named for the plugin + 33. Clean up + :expectedresults: + 1. Success + 2. Success + 3. Success + 4. Success + 5. Success + 6. Success + 7. Success + 8. Success + 9. Success + 10. Success + 11. Success + 12. Success + 13. Success + 14. Success + 15. Success + 16. Success + 17. Success + 18. Success + 19. Success + 20. Success + 21. Success + 22. Success + 23. Success + 24. Success + 25. Success + 26. Success + 27. Success + 28. Success + 29. Success + 30. Success + 31. Success + 32. Success + 33. Success + """ + + inst = topo[0] + + # stop the plugin, and start it + plugin = MemberOfPlugin(inst) + plugin.disable() + plugin.enable() + + if args == "restart": + return + + # If args is None then we run the test suite as pytest standalone and it's not dynamic + if args is None: + inst.restart() + + log.info('Testing ' + PLUGIN_MEMBER_OF + '...') + + ############################################################################ + # Configure plugin + ############################################################################ + plugin.replace_groupattr('member') + + ############################################################################ + # Test plugin + ############################################################################ + # Add our test entries + users = UserAccounts(inst, DEFAULT_SUFFIX) + user1 = users.create_test_user(uid=1001) + + groups = Groups(inst, DEFAULT_SUFFIX) + group = groups.create(properties={'cn': 'group', + 'member': user1.dn}) + group.add('objectclass', 'groupOfUniqueNames') + + memberof_config = MemberOfSharedConfig(inst, 'cn=memberOf config,{}'.format(DEFAULT_SUFFIX)) + memberof_config.create(properties={'cn': 'memberOf config', + 'memberOfGroupAttr': 'member', + 'memberOfAttr': MEMBER_ATTR}) + + # Check if the user now has a "memberOf" attribute + entries = inst.search_s(user1.dn, ldap.SCOPE_BASE, '({}=*)'.format(MEMBER_ATTR)) + assert entries + + # Remove "member" should remove "memberOf" from the entry + group.remove_all('member') + + # Check that "memberOf" was removed + entries = inst.search_s(user1.dn, ldap.SCOPE_BASE, '({}=*)'.format(MEMBER_ATTR)) + assert not entries + + ############################################################################ + # Change the config + ############################################################################ + plugin.replace('memberofgroupattr', 'uniquemember') + + ############################################################################ + # Test plugin + ############################################################################ + group.replace('uniquemember', user1.dn) + + # Check if the user now has a "memberOf" attribute + entries = inst.search_s(user1.dn, ldap.SCOPE_BASE, '({}=*)'.format(MEMBER_ATTR)) + assert entries + + # Remove "uniquemember" should remove "memberOf" from the entry + group.remove_all('uniquemember') + + # Check that "memberOf" was removed + entries = inst.search_s(user1.dn, ldap.SCOPE_BASE, '({}=*)'.format(MEMBER_ATTR)) + assert not entries + + ############################################################################ + # Set the shared config entry and test the plugin + ############################################################################ + # The shared config entry uses "member" - the above test uses "uniquemember" + plugin.set_configarea(memberof_config.dn) + if args is None: + inst.restart() + + # Delete the test entries then readd them to start with a clean slate + user1.delete() + group.delete() + + user1 = users.create_test_user(uid=1001) + group = groups.create(properties={'cn': 'group', + 'member': user1.dn}) + group.add('objectclass', 'groupOfUniqueNames') + + # Test the shared config + # Check if the user now has a "memberOf" attribute + entries = inst.search_s(user1.dn, ldap.SCOPE_BASE, '({}=*)'.format(MEMBER_ATTR)) + assert entries + + group.remove_all('member') + + # Check that "memberOf" was removed + entries = inst.search_s(user1.dn, ldap.SCOPE_BASE, '({}=*)'.format(MEMBER_ATTR)) + assert not entries + + ############################################################################ + # Change the shared config entry to use 'uniquemember' and test the plugin + ############################################################################ + memberof_config.replace('memberofgroupattr', 'uniquemember') + + group.replace('uniquemember', user1.dn) + + # Check if the user now has a "memberOf" attribute + entries = inst.search_s(user1.dn, ldap.SCOPE_BASE, '({}=*)'.format(MEMBER_ATTR)) + assert entries + + # Remove "uniquemember" should remove "memberOf" from the entry + group.remove_all('uniquemember') + + # Check that "memberOf" was removed + entries = inst.search_s(user1.dn, ldap.SCOPE_BASE, '({}=*)'.format(MEMBER_ATTR)) + assert not entries + + ############################################################################ + # Remove shared config from plugin, and retest + ############################################################################ + # First change the plugin to use member before we move the shared config that uses uniquemember + plugin.replace('memberofgroupattr', 'member') + + # Remove shared config from plugin + plugin.remove_configarea() + + group.replace('member', user1.dn) + + # Check if the user now has a "memberOf" attribute + entries = inst.search_s(user1.dn, ldap.SCOPE_BASE, '({}=*)'.format(MEMBER_ATTR)) + assert entries + + # Remove "uniquemember" should remove "memberOf" from the entry + group.remove_all('member') + + # Check that "memberOf" was removed + entries = inst.search_s(user1.dn, ldap.SCOPE_BASE, '({}=*)'.format(MEMBER_ATTR)) + assert not entries + + ############################################################################ + # Test Fixup Task + ############################################################################ + plugin.disable() + + # If args is None then we run the test suite as pytest standalone and it's not dynamic + if args is None: + inst.restart() + + # First change the plugin to use uniquemember + plugin.replace('memberofgroupattr', 'uniquemember') + + # Add uniquemember, should not update USER1 + group.replace('uniquemember', user1.dn) + + # Check for "memberOf" + entries = inst.search_s(user1.dn, ldap.SCOPE_BASE, '({}=*)'.format(MEMBER_ATTR)) + assert not entries + + # Enable memberof plugin + plugin.enable() + + # If args is None then we run the test suite as pytest standalone and it's not dynamic + if args is None: + inst.restart() + + ############################################################# + # Test memberOf fixup arg validation: Test the DN and filter + ############################################################# + for basedn, filter in (('{}bad'.format(DEFAULT_SUFFIX), 'objectclass=top'), + ("bad", 'objectclass=top'), + (DEFAULT_SUFFIX, '(objectclass=top')): + task = plugin.fixup(basedn, filter) + task.wait() + exitcode = task.get_exit_code() + assert exitcode != "0", 'test_memberof: Task with invalid DN still reported success' + + #################################################### + # Test fixup works + #################################################### + # Run the task and validate that it worked + task = plugin.fixup(DEFAULT_SUFFIX, 'objectclass=top') + task.wait() + + # Check for "memberOf" + entries = inst.search_s(user1.dn, ldap.SCOPE_BASE, '({}=*)'.format(MEMBER_ATTR)) + assert entries + + ############################################################################ + # Test plugin dependency + ############################################################################ + check_dependency(inst, plugin, online=isinstance(args, str)) + + ############################################################################ + # Cleanup + ############################################################################ + user1.delete() + group.delete() + memberof_config.delete() + + ############################################################################ + # Test passed + ############################################################################ + log.info('test_memberof: PASS\n') + return + + +def test_mep(topo, args=None): + """Test Managed Entries basic functionality + + :id: 9b87493b-0493-46f9-8364-6099d0e5d806 + :setup: Standalone Instance + :steps: + 1. Enable the plugin + 2. Restart the instance + 3. Add our org units + 4. Set up config entry and template entry for the org units + 5. Add an entry that meets the MEP scope + 6. Check if a managed group entry was created + 7. Add a new template entry + 8. Add an entry that meets the MEP scope + 9. Check if a managed group entry was created + 10. Check nsslapd-plugin-depends-on-named for the plugin + 11. Clean up + :expectedresults: + 1. Success + 2. Success + 3. Success + 4. Success + 5. Success + 6. Success + 7. Success + 8. Success + 9. Success + 10. Success + 11. Success + """ + + inst = topo[0] + + # stop the plugin, and start it + plugin = ManagedEntriesPlugin(inst) + plugin.disable() + plugin.enable() + + if args == "restart": + return + + # If args is None then we run the test suite as pytest standalone and it's not dynamic + if args is None: + inst.restart() + + log.info('Testing ' + PLUGIN_MANAGED_ENTRY + '...') + + ############################################################################ + # Configure plugin + ############################################################################ + # Add our org units + ous = OrganizationalUnits(inst, DEFAULT_SUFFIX) + ou_people = ous.create(properties={'ou': 'managed_people'}) + ou_groups = ous.create(properties={'ou': 'managed_groups'}) + + mep_templates = MEPTemplates(inst, DEFAULT_SUFFIX) + mep_template1 = mep_templates.create(properties={ + 'cn': 'MEP template', + 'mepRDNAttr': 'cn', + 'mepStaticAttr': 'objectclass: posixGroup|objectclass: extensibleObject'.split('|'), + 'mepMappedAttr': 'cn: $cn|uid: $cn|gidNumber: $uidNumber'.split('|') + }) + mep_configs = MEPConfigs(inst) + mep_config = mep_configs.create(properties={'cn': 'config', + 'originScope': ou_people.dn, + 'originFilter': 'objectclass=posixAccount', + 'managedBase': ou_groups.dn, + 'managedTemplate': mep_template1.dn}) + if args is None: + inst.restart() + + ############################################################################ + # Test plugin + ############################################################################ + # Add an entry that meets the MEP scope + test_users_m1 = UserAccounts(inst, DEFAULT_SUFFIX, rdn='ou={}'.format(ou_people.rdn)) + test_user1 = test_users_m1.create_test_user(1001) + + # Check if a managed group entry was created + entries = inst.search_s('cn={},{}'.format(test_user1.rdn, ou_groups.dn), ldap.SCOPE_BASE, '(objectclass=top)') + assert len(entries) == 1 + + ############################################################################ + # Change the config + ############################################################################ + # Add a new template entry + mep_template2 = mep_templates.create(properties={ + 'cn': 'MEP template2', + 'mepRDNAttr': 'uid', + 'mepStaticAttr': 'objectclass: posixGroup|objectclass: extensibleObject'.split('|'), + 'mepMappedAttr': 'cn: $cn|uid: $cn|gidNumber: $uidNumber'.split('|') + }) + mep_config.replace('managedTemplate', mep_template2.dn) + + ############################################################################ + # Test plugin + ############################################################################ + # Add an entry that meets the MEP scope + test_user2 = test_users_m1.create_test_user(1002) + + # Check if a managed group entry was created + entries = inst.search_s('uid={},{}'.format(test_user2.rdn, ou_groups.dn), ldap.SCOPE_BASE, '(objectclass=top)') + assert len(entries) == 1 + + ############################################################################ + # Test plugin dependency + ############################################################################ + check_dependency(inst, plugin, online=isinstance(args, str)) + + ############################################################################ + # Cleanup + ############################################################################ + test_user1.delete() + test_user2.delete() + ou_people.delete() + ou_groups.delete() + mep_config.delete() + mep_template1.delete() + mep_template2.delete() + + ############################################################################ + # Test passed + ############################################################################ + log.info('test_mep: PASS\n') + return + + +def test_passthru(topo, args=None): + """Test Passthrough Authentication basic functionality + + :id: 9b87493b-0493-46f9-8364-6099d0e5d807 + :setup: Standalone Instance + :steps: + 1. Stop the plugin + 2. Restart the instance + 3. Create a second backend + 4. Create the top of the tree + 5. Add user to suffix1 + 6. Configure and start plugin + 7. Login as user + 8. Login as root DN + 9. Replace 'nsslapd-pluginarg0': ldap uri for second instance + 10. Login as user + 11. Login as root DN + 12. Check nsslapd-plugin-depends-on-named for the plugin + 13. Clean up + :expectedresults: + 1. Success + 2. Success + 3. Success + 4. Success + 5. Success + 6. Success + 7. Success + 8. Success + 9. Success + 10. Success + 11. Success + 12. Success + 13. Success + """ + + inst1 = topo[0] + inst2 = topo[1] + + # Passthru is a bit picky about the state of the entry - we can't just restart it + if args == "restart": + return + + # stop the plugin + plugin = PassThroughAuthenticationPlugin(inst1) + plugin.disable() + + # If args is None then we run the test suite as pytest standalone and it's not dynamic + if args is None: + inst1.restart() + + PASS_SUFFIX1 = 'dc=pass1,dc=thru' + PASS_SUFFIX2 = 'dc=pass2,dc=thru' + PASS_BE1 = 'PASS1' + PASS_BE2 = 'PASS2' + + log.info('Testing ' + PLUGIN_PASSTHRU + '...') + + ############################################################################ + # Use a new "remote" instance, and a user for auth + ############################################################################ + # Create a second backend + backend1 = inst2.backends.create(properties={'cn': PASS_BE1, + 'nsslapd-suffix': PASS_SUFFIX1}) + backend2 = inst2.backends.create(properties={'cn': PASS_BE2, + 'nsslapd-suffix': PASS_SUFFIX2}) + + # Create the top of the tree + suffix = Domain(inst2, PASS_SUFFIX1) + pass1 = suffix.create(properties={'dc': 'pass1'}) + suffix = Domain(inst2, PASS_SUFFIX2) + pass2 = suffix.create(properties={'dc': 'pass2'}) + + # Add user to suffix1 + users = UserAccounts(inst2, pass1.dn, None) + test_user1 = users.create_test_user(1001) + test_user1.replace('userpassword', 'password') + + users = UserAccounts(inst2, pass2.dn, None) + test_user2 = users.create_test_user(1002) + test_user2.replace('userpassword', 'password') + + ############################################################################ + # Configure and start plugin + ############################################################################ + plugin.replace('nsslapd-pluginarg0', + 'ldap://{}:{}/{}'.format(inst2.host, inst2.port, pass1.dn)) + plugin.enable() + + # If args is None then we run the test suite as pytest standalone and it's not dynamic + if args is None: + inst1.restart() + + ############################################################################ + # Test plugin + ############################################################################ + # login as user + inst1.simple_bind_s(test_user1.dn, "password") + + ############################################################################ + # Change the config + ############################################################################ + # login as root DN + inst1.simple_bind_s(DN_DM, PASSWORD) + + plugin.replace('nsslapd-pluginarg0', + 'ldap://{}:{}/{}'.format(inst2.host, inst2.port, pass2.dn)) + if args is None: + inst1.restart() + + ############################################################################ + # Test plugin + ############################################################################ + + # login as user + inst1.simple_bind_s(test_user2.dn, "password") + + # login as root DN + inst1.simple_bind_s(DN_DM, PASSWORD) + + # Clean up + backend1.delete() + backend2.delete() + + ############################################################################ + # Test plugin dependency + ############################################################################ + check_dependency(inst1, plugin, online=isinstance(args, str)) + + ############################################################################ + # Test passed + ############################################################################ + log.info('test_passthru: PASS\n') + return + + +def test_referint(topo, args=None): + """Test Referential Integrity basic functionality + + :id: 9b87493b-0493-46f9-8364-6099d0e5d808 + :setup: Standalone Instance + :steps: + 1. Enable the plugin + 2. Restart the instance + 3. Replace 'referint-membership-attr': 'member' + 4. Add some users and a group + 5. Grab the referint log file from the plugin + 6. Add shared config entry + 7. Delete one user + 8. Check for integrity + 9. Replace 'referint-membership-attr': 'uniquemember' + 10. Delete second user + 11. Check for integrity + 12. The shared config entry uses "member" - the above test used "uniquemember" + 13. Recreate users and a group + 14. Delete one user + 15. Check for integrity + 16. Change the shared config entry to use 'uniquemember' and test the plugin + 17. Delete second user + 18. Check for integrity + 19. First change the plugin to use member before we move the shared config that uses uniquemember + 20. Remove shared config from plugin + 21. Add test user + 22. Add user to group + 23. Delete a user + 24. Check for integrity + 25. Check nsslapd-plugin-depends-on-named for the plugin + 26. Clean up + :expectedresults: + 1. Success + 2. Success + 3. Success + 4. Success + 5. Success + 6. Success + 7. Success + 8. Success + 9. Success + 10. Success + 11. Success + 12. Success + 13. Success + 14. Success + 15. Success + 16. Success + 17. Success + 18. Success + 19. Success + 20. Success + 21. Success + 22. Success + 23. Success + 24. Success + 25. Success + 26. Success + """ + + inst = topo[0] + + # stop the plugin, and start it + plugin = ReferentialIntegrityPlugin(inst) + plugin.disable() + plugin.enable() + + if args == "restart": + return + + # If args is None then we run the test suite as pytest standalone and it's not dynamic + if args is None: + inst.restart() + + log.info('Testing ' + PLUGIN_REFER_INTEGRITY + '...') + + ############################################################################ + # Configure plugin + ############################################################################ + plugin.replace('referint-membership-attr', 'member') + + ############################################################################ + # Test plugin + ############################################################################ + # Add some users and a group + users = UserAccounts(inst, DEFAULT_SUFFIX, None) + user1 = users.create_test_user(uid=1001) + user2 = users.create_test_user(uid=1002) + + groups = Groups(inst, DEFAULT_SUFFIX, None) + group = groups.create(properties={'cn': 'group', + MEMBER_ATTR: user1.dn}) + group.add('objectclass', 'groupOfUniqueNames') + group.add('uniquemember', user2.dn) + + # Grab the referint log file from the plugin + referin_logfile = plugin.get_attr_val_utf8('referint-logfile') + + # Add shared config entry + referin_config = ReferentialIntegrityConfig(inst, 'cn=RI config,{}'.format(DEFAULT_SUFFIX)) + referin_config.create(properties={'cn': 'RI config', + 'referint-membership-attr': 'member', + 'referint-update-delay': '0', + 'referint-logfile': referin_logfile}) + + user1.delete() + + # Check for integrity + entry = inst.search_s(group.dn, ldap.SCOPE_BASE, '(member={})'.format(user1.dn)) + assert not entry + + ############################################################################ + # Change the config + ############################################################################ + plugin.replace('referint-membership-attr', 'uniquemember') + + ############################################################################ + # Test plugin + ############################################################################ + + user2.delete() + + # Check for integrity + entry = inst.search_s(group.dn, ldap.SCOPE_BASE, '(uniquemember={})'.format(user2.dn)) + assert not entry + + ############################################################################ + # Set the shared config entry and test the plugin + ############################################################################ + # The shared config entry uses "member" - the above test used "uniquemember" + plugin.set_configarea(referin_config.dn) + group.delete() + + user1 = users.create_test_user(uid=1001) + user2 = users.create_test_user(uid=1002) + group = groups.create(properties={'cn': 'group', + MEMBER_ATTR: user1.dn}) + group.add('objectclass', 'groupOfUniqueNames') + group.add('uniquemember', user2.dn) + + # Delete a user + user1.delete() + + # Check for integrity + entry = inst.search_s(group.dn, ldap.SCOPE_BASE, '(member={})'.format(user1.dn)) + assert not entry + + ############################################################################ + # Change the shared config entry to use 'uniquemember' and test the plugin + ############################################################################ + + referin_config.replace('referint-membership-attr', 'uniquemember') + + # Delete a user + user2.delete() + + # Check for integrity + entry = inst.search_s(group.dn, ldap.SCOPE_BASE, '(uniquemember={})'.format(user2.dn)) + assert not entry + + ############################################################################ + # Remove shared config from plugin, and retest + ############################################################################ + # First change the plugin to use member before we move the shared config that uses uniquemember + plugin.replace('referint-membership-attr', 'member') + + # Remove shared config from plugin + plugin.remove_configarea() + + # Add test user + user1 = users.create_test_user(uid=1001) + + # Add user to group + group.replace('member', user1.dn) + + # Delete a user + user1.delete() + + # Check for integrity + entry = inst.search_s(group.dn, ldap.SCOPE_BASE, '(member={})'.format(user1.dn)) + assert not entry + + ############################################################################ + # Test plugin dependency + ############################################################################ + check_dependency(inst, plugin, online=isinstance(args, str)) + + ############################################################################ + # Cleanup + ############################################################################ + group.delete() + referin_config.delete() + + ############################################################################ + # Test passed + ############################################################################ + log.info('test_referint: PASS\n') + return + + +def test_retrocl(topo, args=None): + """Test Retro Changelog basic functionality + + :id: 9b87493b-0493-46f9-8364-6099d0e5d810 + :setup: Standalone Instance + :steps: + 1. Enable the plugin + 2. Restart the instance + 3. Gather the current change count (it's not 1 once we start the stability tests) + 4. Add a user + 5. Check we logged this in the retro cl + 6. Change the config - disable plugin + 7. Delete the user + 8. Check we didn't log this in the retro cl + 9. Check nsslapd-plugin-depends-on-named for the plugin + 10. Clean up + :expectedresults: + 1. Success + 2. Success + 3. Success + 4. Success + 5. Success + 6. Success + 7. Success + 8. Success + 9. Success + 10. Success + """ + + inst = topo[0] + + # stop the plugin, and start it + plugin = RetroChangelogPlugin(inst) + plugin.disable() + plugin.enable() + + if args == "restart": + return + + # If args is None then we run the test suite as pytest standalone and it's not dynamic + if args is None: + inst.restart() + + log.info('Testing ' + PLUGIN_RETRO_CHANGELOG + '...') + + ############################################################################ + # Configure plugin + ############################################################################ + + # Gather the current change count (it's not 1 once we start the stabilty tests) + entry = inst.search_s(RETROCL_SUFFIX, ldap.SCOPE_SUBTREE, '(changenumber=*)') + entry_count = len(entry) + + ############################################################################ + # Test plugin + ############################################################################ + + # Add a user + users = UserAccounts(inst, DEFAULT_SUFFIX) + user1 = users.create_test_user(uid=1001) + + # Check we logged this in the retro cl + entry = inst.search_s(RETROCL_SUFFIX, ldap.SCOPE_SUBTREE, '(changenumber=*)') + assert entry + assert len(entry) != entry_count + + entry_count += 1 + + ############################################################################ + # Change the config - disable plugin + ############################################################################ + plugin.disable() + + # If args is None then we run the test suite as pytest standalone and it's not dynamic + if args is None: + inst.restart() + + ############################################################################ + # Test plugin + ############################################################################ + user1.delete() + + # Check we didn't logged this in the retro cl + entry = inst.search_s(RETROCL_SUFFIX, ldap.SCOPE_SUBTREE, '(changenumber=*)') + assert len(entry) == entry_count + + plugin.enable() + if args is None: + inst.restart() + + ############################################################################ + # Test plugin dependency + ############################################################################ + check_dependency(inst, plugin, online=isinstance(args, str)) + + ############################################################################ + # Test passed + ############################################################################ + log.info('test_retrocl: PASS\n') + return + + +def _rootdn_restart(inst): + """Special restart wrapper function for rootDN plugin""" + + with pytest.raises(ldap.LDAPError): + inst.restart() + # Bind as the user who can make updates to the config + inst.simple_bind_s(USER_DN, USER_PW) + # We need it online for other operations to work + inst.state = DIRSRV_STATE_ONLINE + + +def test_rootdn(topo, args=None): + """Test Root DNA Access control basic functionality + + :id: 9b87493b-0493-46f9-8364-6099d0e5d811 + :setup: Standalone Instance + :steps: + 1. Enable the plugin + 2. Restart the instance + 3. Add an user and aci to open up cn=config + 4. Set an aci so we can modify the plugin after we deny the root dn + 5. Set allowed IP to an unknown host - blocks root dn + 6. Bind as Root DN + 7. Bind as the user who can make updates to the config + 8. Test that invalid plugin changes are rejected + 9. Remove the restriction + 10. Bind as Root DN + 11. Check nsslapd-plugin-depends-on-named for the plugin + 12. Clean up + :expectedresults: + 1. Success + 2. Success + 3. Success + 4. Success + 5. Success + 6. Success + 7. Success + 8. Success + 9. Success + 10. Success + 11. Success + 12. Success + """ + + inst = topo[0] + + # stop the plugin, and start it + plugin = RootDNAccessControlPlugin(inst) + plugin.disable() + plugin.enable() + + if args == "restart": + return + + # If args is None then we run the test suite as pytest standalone and it's not dynamic + if args is None: + inst.restart() + + log.info('Testing ' + PLUGIN_ROOTDN_ACCESS + '...') + + ############################################################################ + # Configure plugin + ############################################################################ + + # Add an user and aci to open up cn=config + users = UserAccounts(inst, DEFAULT_SUFFIX) + user1 = users.create_test_user(uid=1001) + user1.replace('userpassword', USER_PW) + + # Set an aci so we can modify the plugin after ew deny the root dn + ACI = ('(target ="ldap:///cn=config")(targetattr = "*")(version 3.0;acl ' + + '"all access";allow (all)(userdn="ldap:///anyone");)') + inst.config.add('aci', ACI) + + # Set allowed IP to an unknown host - blocks root dn + plugin.replace('rootdn-allow-ip', '10.10.10.10') + + ############################################################################ + # Test plugin + ############################################################################ + # Bind as Root DN + if args is None: + _rootdn_restart(inst) + else: + with pytest.raises(ldap.LDAPError): + inst.simple_bind_s(DN_DM, PASSWORD) + # Bind as the user who can make updates to the config + inst.simple_bind_s(USER_DN, USER_PW) + + ############################################################################ + # Change the config + ############################################################################ + # First, test that invalid plugin changes are rejected + if args is None: + plugin.replace('rootdn-deny-ip', '12.12.ZZZ.12') + with pytest.raises((subprocess.CalledProcessError, ValueError)): + inst.restart() + dse_ldif = DSEldif(inst) + dse_ldif.delete(plugin.dn, 'rootdn-deny-ip') + _rootdn_restart(inst) + + plugin.replace('rootdn-allow-host', 'host._.com') + with pytest.raises((subprocess.CalledProcessError, ValueError)): + inst.restart() + dse_ldif = DSEldif(inst) + dse_ldif.delete(plugin.dn, 'rootdn-allow-host') + _rootdn_restart(inst) + else: + with pytest.raises(ldap.LDAPError): + plugin.replace('rootdn-deny-ip', '12.12.ZZZ.12') + + with pytest.raises(ldap.LDAPError): + plugin.replace('rootdn-allow-host', 'host._.com') + + # Remove the restriction + plugin.remove_all('rootdn-allow-ip') + if args is None: + inst.restart() + + ############################################################################ + # Test plugin + ############################################################################ + # Bind as Root DN + inst.simple_bind_s(DN_DM, PASSWORD) + + ############################################################################ + # Test plugin dependency + ############################################################################ + check_dependency(inst, plugin, online=isinstance(args, str)) + + ############################################################################ + # Cleanup - remove ACI from cn=config and test user + ############################################################################ + inst.config.remove('aci', ACI) + user1.delete() + + ############################################################################ + # Test passed + ############################################################################ + log.info('test_rootdn: PASS\n') + return + + +# Array of test functions +func_tests = [test_acctpolicy, test_attruniq, test_automember, test_dna, + test_linkedattrs, test_memberof, test_mep, test_passthru, + test_referint, test_retrocl, test_rootdn] + + +def check_all_plugins(topo, args="online"): + for func in func_tests: + func(topo, args) + + return diff --git a/dirsrvtests/tests/suites/plugins/accpol_test.py b/dirsrvtests/tests/suites/plugins/accpol_test.py new file mode 100644 index 0000000..73e2e54 --- /dev/null +++ b/dirsrvtests/tests/suites/plugins/accpol_test.py @@ -0,0 +1,1094 @@ +# --- BEGIN COPYRIGHT BLOCK --- +# Copyright (C) 2017 Red Hat, Inc. +# All rights reserved. +# +# License: GPL (version 3 or any later version). +# See LICENSE for details. +# --- END COPYRIGHT BLOCK --- +# +import pytest +import subprocess +from lib389.tasks import * +from lib389.utils import * +from lib389.topologies import topology_st +from lib389.idm.user import (UserAccount, UserAccounts) +from lib389.plugins import (AccountPolicyPlugin, AccountPolicyConfig) +from lib389.cos import (CosTemplate, CosPointerDefinition) +from lib389._constants import (PLUGIN_ACCT_POLICY, DN_PLUGIN, DN_DM, PASSWORD, DEFAULT_SUFFIX, + DN_CONFIG, SERVERID_STANDALONE) + +pytestmark = pytest.mark.tier1 + +LOCL_CONF = 'cn=AccountPolicy1,ou=people,dc=example,dc=com' +TEMPL_COS = 'cn=TempltCoS,ou=people,dc=example,dc=com' +DEFIN_COS = 'cn=DefnCoS,ou=people,dc=example,dc=com' +ACCPOL_DN = "cn={},{}".format(PLUGIN_ACCT_POLICY, DN_PLUGIN) +ACCP_CONF = "{},{}".format(DN_CONFIG, ACCPOL_DN) +USER_PASW = 'Secret1234' +INVL_PASW = 'Invalid234' + + +@pytest.fixture(scope="module") +def accpol_global(topology_st, request): + """Configure Global account policy plugin and restart the server""" + + log.info('Configuring Global account policy plugin, pwpolicy attributes and restarting the server') + plugin = AccountPolicyPlugin(topology_st.standalone) + try: + if DEBUGGING: + topology_st.standalone.config.set('nsslapd-auditlog-logging-enabled', 'on') + plugin.enable() + plugin.set('nsslapd-pluginarg0', ACCP_CONF) + accp = AccountPolicyConfig(topology_st.standalone, dn=ACCP_CONF) + accp.set('alwaysrecordlogin', 'yes') + accp.set('stateattrname', 'lastLoginTime') + accp.set('altstateattrname', 'createTimestamp') + accp.set('specattrname', 'acctPolicySubentry') + accp.set('limitattrname', 'accountInactivityLimit') + accp.set('accountInactivityLimit', '12') + topology_st.standalone.config.set('passwordexp', 'on') + topology_st.standalone.config.set('passwordmaxage', '400') + topology_st.standalone.config.set('passwordwarning', '1') + topology_st.standalone.config.set('passwordlockout', 'on') + topology_st.standalone.config.set('passwordlockoutduration', '5') + topology_st.standalone.config.set('passwordmaxfailure', '3') + topology_st.standalone.config.set('passwordunlock', 'on') + except ldap.LDAPError as e: + log.error('Failed to enable Global Account Policy Plugin and Password policy attributes') + raise e + topology_st.standalone.restart(timeout=10) + + def fin(): + log.info('Disabling Global accpolicy plugin and removing pwpolicy attrs') + try: + plugin = AccountPolicyPlugin(topology_st.standalone) + plugin.disable() + topology_st.standalone.config.set('passwordexp', 'off') + topology_st.standalone.config.set('passwordlockout', 'off') + except ldap.LDAPError as e: + log.error('Failed to disable Global accpolicy plugin, {}'.format(e.message['desc'])) + assert False + topology_st.standalone.restart(timeout=10) + + request.addfinalizer(fin) + + +@pytest.fixture(scope="module") +def accpol_local(topology_st, accpol_global, request): + """Configure Local account policy plugin for ou=people subtree and restart the server""" + + log.info('Adding Local account policy plugin configuration entries') + try: + topology_st.standalone.config.set('passwordmaxage', '400') + accp = AccountPolicyConfig(topology_st.standalone, dn=ACCP_CONF) + accp.remove_all('accountInactivityLimit') + locl_conf = AccountPolicyConfig(topology_st.standalone, dn=LOCL_CONF) + locl_conf.create(properties={'cn': 'AccountPolicy1', 'accountInactivityLimit': '10'}) + cos_template = CosTemplate(topology_st.standalone, dn=TEMPL_COS) + cos_template.create(properties={'cn': 'TempltCoS', 'acctPolicySubentry': LOCL_CONF}) + cos_def = CosPointerDefinition(topology_st.standalone, dn=DEFIN_COS) + cos_def.create(properties={ + 'cn': 'DefnCoS', + 'cosTemplateDn': TEMPL_COS, + 'cosAttribute': 'acctPolicySubentry default operational-default'}) + except ldap.LDAPError as e: + log.error('Failed to configure Local account policy plugin') + log.error('Failed to add entry {}, {}, {}:'.format(LOCL_CONF, TEMPL_COS, DEFIN_COS)) + raise e + topology_st.standalone.restart(timeout=10) + + def fin(): + log.info('Disabling Local accpolicy plugin and removing pwpolicy attrs') + try: + topology_st.standalone.plugins.disable(name=PLUGIN_ACCT_POLICY) + for entry_dn in [LOCL_CONF, TEMPL_COS, DEFIN_COS]: + entry = UserAccount(topology_st.standalone, dn=entry_dn) + entry.delete() + except ldap.LDAPError as e: + log.error('Failed to disable Local accpolicy plugin, {}'.format(e.message['desc'])) + assert False + topology_st.standalone.restart(timeout=10) + + request.addfinalizer(fin) + + +def pwacc_lock(topology_st, suffix, subtree, userid, nousrs): + """Lockout user account by attempting invalid password binds""" + + log.info('Lockout user account by attempting invalid password binds') + while (nousrs > 0): + usrrdn = '{}{}'.format(userid, nousrs) + userdn = 'uid={},{},{}'.format(usrrdn, subtree, suffix) + user = UserAccount(topology_st.standalone, dn=userdn) + for i in range(3): + with pytest.raises(ldap.INVALID_CREDENTIALS): + user.bind(INVL_PASW) + log.error('No invalid credentials error for User {}'.format(userdn)) + with pytest.raises(ldap.CONSTRAINT_VIOLATION): + user.bind(USER_PASW) + log.error('User {} is not locked, expected error 19'.format(userdn)) + nousrs = nousrs - 1 + time.sleep(1) + + +def userpw_reset(topology_st, suffix, subtree, userid, nousrs, bindusr, bindpw, newpasw): + """Reset user password""" + + while (nousrs > 0): + usrrdn = '{}{}'.format(userid, nousrs) + userdn = 'uid={},{},{}'.format(usrrdn, subtree, suffix) + user = UserAccount(topology_st.standalone, dn=userdn) + log.info('Reset user password for user-{}'.format(userdn)) + if (bindusr == "DirMgr"): + try: + user.replace('userPassword', newpasw) + except ldap.LDAPError as e: + log.error('Unable to reset userPassword for user-{}'.format(userdn)) + raise e + elif (bindusr == "RegUsr"): + user_conn = user.bind(bindpw) + try: + user_conn.replace('userPassword', newpasw) + except ldap.LDAPError as e: + log.error('Unable to reset userPassword for user-{}'.format(userdn)) + raise e + nousrs = nousrs - 1 + time.sleep(1) + + +def nsact_inact(topology_st, suffix, subtree, userid, nousrs, command, expected): + """Account activate/in-activate/status using ns-activate/inactivate/accountstatus.pl""" + + log.info('Account activate/in-activate/status using ns-activate/inactivate/accountstatus.pl') + while (nousrs > 0): + usrrdn = '{}{}'.format(userid, nousrs) + userdn = 'uid={},{},{}'.format(usrrdn, subtree, suffix) + log.info('Running {} for user {}'.format(command, userdn)) + if ds_is_older('1.3'): + action = '{}/{}'.format(inst_dir, command) + try: + output = subprocess.check_output([action, '-D', DN_DM, '-w', PASSWORD, '-I', userdn]) + except subprocess.CalledProcessError as err: + output = err.output + else: + action = '{}/{}'.format(topology_st.standalone.ds_paths.sbin_dir, command) + try: + output = subprocess.check_output( + [action, '-Z', SERVERID_STANDALONE, '-D', DN_DM, '-w', PASSWORD, '-I', userdn]) + except subprocess.CalledProcessError as err: + output = err.output + log.info('output: {}'.format(output)) + assert ensure_bytes(expected) in output + nousrs = nousrs - 1 + time.sleep(1) + + +def modify_attr(topology_st, base_dn, attr_name, attr_val): + """Modify attribute value for a given DN""" + + log.info('Modify attribute value for a given DN') + try: + entry = UserAccount(topology_st.standalone, dn=base_dn) + entry.replace(attr_name, attr_val) + except ldap.LDAPError as e: + log.error('Failed to replace lastLoginTime attribute for user-{} {}'.format(userdn, e.message['desc'])) + assert False + time.sleep(1) + + +def check_attr(topology_st, suffix, subtree, userid, nousrs, attr_name): + """Check ModifyTimeStamp attribute present for user""" + + log.info('Check ModifyTimeStamp attribute present for user') + while (nousrs > 0): + usrrdn = '{}{}'.format(userid, nousrs) + userdn = 'uid={},{},{}'.format(usrrdn, subtree, suffix) + user = UserAccount(topology_st.standalone, dn=userdn) + try: + user.get_attr_val(attr_name) + except ldap.LDAPError as e: + log.error('ModifyTimeStamp attribute is not present for user-{} {}'.format(userdn, e.message['desc'])) + assert False + nousrs = nousrs - 1 + + +def add_time_attr(topology_st, suffix, subtree, userid, nousrs, attr_name): + """Enable account by replacing lastLoginTime/createTimeStamp/ModifyTimeStamp attribute""" + + new_attr_val = time.strftime("%Y%m%d%H%M%S", time.gmtime()) + 'Z' + log.info('Enable account by replacing lastLoginTime/createTimeStamp/ModifyTimeStamp attribute') + while (nousrs > 0): + usrrdn = '{}{}'.format(userid, nousrs) + userdn = 'uid={},{},{}'.format(usrrdn, subtree, suffix) + user = UserAccount(topology_st.standalone, dn=userdn) + try: + user.replace(attr_name, new_attr_val) + except ldap.LDAPError as e: + log.error('Failed to add/replace {} attribute to-{}, for user-{}'.format(attr_name, new_attr_val, userdn)) + raise e + nousrs = nousrs - 1 + time.sleep(1) + time.sleep(1) + + +def modusr_attr(topology_st, suffix, subtree, userid, nousrs, attr_name, attr_value): + """Enable account by replacing cn attribute value, value of modifyTimeStamp changed""" + + log.info('Enable account by replacing cn attribute value, value of modifyTimeStamp changed') + while (nousrs > 0): + usrrdn = '{}{}'.format(userid, nousrs) + userdn = 'uid={},{},{}'.format(usrrdn, subtree, suffix) + user = UserAccount(topology_st.standalone, dn=userdn) + try: + user.replace(attr_name, attr_value) + except ldap.LDAPError as e: + log.error('Failed to add/replace {} attribute to-{}, for user-{}'.format(attr_name, attr_value, userdn)) + raise e + nousrs = nousrs - 1 + time.sleep(1) + + +def del_time_attr(topology_st, suffix, subtree, userid, nousrs, attr_name): + """Delete lastLoginTime/createTimeStamp/ModifyTimeStamp attribute from user account""" + + log.info('Delete lastLoginTime/createTimeStamp/ModifyTimeStamp attribute from user account') + while (nousrs > 0): + usrrdn = '{}{}'.format(userid, nousrs) + userdn = 'uid={},{},{}'.format(usrrdn, subtree, suffix) + user = UserAccount(topology_st.standalone, dn=userdn) + try: + user.remove_all(attr_name) + except ldap.LDAPError as e: + log.error('Failed to delete {} attribute for user-{}'.format(attr_name, userdn)) + raise e + nousrs = nousrs - 1 + time.sleep(1) + + +def add_users(topology_st, suffix, subtree, userid, nousrs, ulimit): + """Add users to default test instance with given suffix, subtree, userid and nousrs""" + + log.info('add_users: Pass all of these as parameters suffix, subtree, userid and nousrs') + users = UserAccounts(topology_st.standalone, suffix, rdn=subtree) + while (nousrs > ulimit): + usrrdn = '{}{}'.format(userid, nousrs) + user_properties = { + 'uid': usrrdn, + 'cn': usrrdn, + 'sn': usrrdn, + 'uidNumber': '1001', + 'gidNumber': '2001', + 'userpassword': USER_PASW, + 'homeDirectory': '/home/{}'.format(usrrdn)} + users.create(properties=user_properties) + nousrs = nousrs - 1 + + +def del_users(topology_st, suffix, subtree, userid, nousrs): + """Delete users from default test instance with given suffix, subtree, userid and nousrs""" + + log.info('del_users: Pass all of these as parameters suffix, subtree, userid and nousrs') + users = UserAccounts(topology_st.standalone, suffix, rdn=subtree) + while (nousrs > 0): + usrrdn = '{}{}'.format(userid, nousrs) + userdn = users.get(usrrdn) + userdn.delete() + nousrs = nousrs - 1 + + +def account_status(topology_st, suffix, subtree, userid, nousrs, ulimit, tochck): + """Check account status for the given suffix, subtree, userid and nousrs""" + + while (nousrs > ulimit): + usrrdn = '{}{}'.format(userid, nousrs) + userdn = 'uid={},{},{}'.format(usrrdn, subtree, suffix) + user = UserAccount(topology_st.standalone, dn=userdn) + if (tochck == "Enabled"): + try: + user.bind(USER_PASW) + except ldap.LDAPError as e: + log.error('User {} failed to login, expected 0'.format(userdn)) + raise e + elif (tochck == "Expired"): + with pytest.raises(ldap.INVALID_CREDENTIALS): + user.bind(USER_PASW) + log.error('User {} password not expired , expected error 49'.format(userdn)) + elif (tochck == "Disabled"): + with pytest.raises(ldap.CONSTRAINT_VIOLATION): + user.bind(USER_PASW) + log.error('User {} is not inactivated, expected error 19'.format(userdn)) + nousrs = nousrs - 1 + time.sleep(1) + + +def test_glact_inact(topology_st, accpol_global): + """Verify if user account is inactivated when accountInactivityLimit is exceeded. + + :id: 342af084-0ad0-442f-b6f6-5a8b8e5e4c28 + :setup: Standalone instance, Global account policy plugin configuration, + set accountInactivityLimit to few secs. + :steps: + 1. Add few users to ou=people subtree in the default suffix + 2. Check if users are active just before it reaches accountInactivityLimit. + 3. User accounts should not be inactivated, expected 0 + 4. Check if users are inactivated when accountInactivityLimit is exceeded. + 5. User accounts should be inactivated, expected error 19. + :expectedresults: + 1. Success + 2. Success + 3. Success + 4. Success + 5. Should return error code 19 + """ + + suffix = DEFAULT_SUFFIX + subtree = "ou=people" + userid = "glinactusr" + nousrs = 3 + log.info('AccountInactivityLimit set to 12. Account will be inactivated if not accessed in 12 secs') + add_users(topology_st, suffix, subtree, userid, nousrs, 0) + + log.info('Sleep for 10 secs to check if account is not inactivated, expected value 0') + time.sleep(10) + log.info('Account should not be inactivated since AccountInactivityLimit not exceeded') + account_status(topology_st, suffix, subtree, userid, 3, 2, "Enabled") + + log.info('Sleep for 3 more secs to check if account is inactivated') + time.sleep(3) + account_status(topology_st, suffix, subtree, userid, 2, 0, "Disabled") + + log.info('Sleep +10 secs to check if account {}3 is inactivated'.format(userid)) + time.sleep(10) + account_status(topology_st, suffix, subtree, userid, 3, 2, "Disabled") + del_users(topology_st, suffix, subtree, userid, nousrs) + + +def test_glremv_lastlogin(topology_st, accpol_global): + """Verify if user account is inactivated by createTimeStamp, if lastLoginTime attribute is missing. + + :id: 8ded5d8e-ed93-4c22-9c8e-78c479189f84 + :setup: Standalone instance, Global account policy plugin configuration, + set accountInactivityLimit to few secs. + :steps: + 1. Add few users to ou=people subtree in the default suffix + 2. Wait for few secs and bind as user to create lastLoginTime attribute. + 3. Remove the lastLoginTime attribute from the user. + 4. Wait till accountInactivityLimit exceeded based on createTimeStamp value + 5. Check if users are inactivated, expected error 19. + 6. Replace lastLoginTime attribute and check if account is activated + 7. User should be activated based on lastLoginTime attribute, expected 0 + :assert: + 1. Success + 2. Success + 3. Success + 4. Success + 5. Success + 6. Success + 7. Should return error code 19 + """ + + suffix = DEFAULT_SUFFIX + subtree = "ou=people" + userid = "nologtimeusr" + nousrs = 1 + log.info('AccountInactivityLimit set to 12. Account will be inactivated if not accessed in 12 secs') + add_users(topology_st, suffix, subtree, userid, nousrs, 0) + log.info('Sleep for 6 secs to check if account is not inactivated, expected value 0') + time.sleep(6) + log.info('Account should not be inactivated since AccountInactivityLimit not exceeded') + account_status(topology_st, suffix, subtree, userid, nousrs, 0, "Enabled") + del_time_attr(topology_st, suffix, subtree, userid, nousrs, 'lastLoginTime') + log.info('Sleep for 7 more secs to check if account is inactivated') + time.sleep(7) + account_status(topology_st, suffix, subtree, userid, nousrs, 0, "Disabled") + add_time_attr(topology_st, suffix, subtree, userid, nousrs, 'lastLoginTime') + log.info('Check if account is activated, expected 0') + account_status(topology_st, suffix, subtree, userid, nousrs, 0, "Enabled") + del_users(topology_st, suffix, subtree, userid, nousrs) + + +def test_glact_login(topology_st, accpol_global): + """Verify if user account can be activated by replacing the lastLoginTime attribute. + + :id: f89897cc-c13e-4824-af08-3dd1039bab3c + :setup: Standalone instance, Global account policy plugin configuration, + set accountInactivityLimit to few secs. + :steps: + 1. Add few users to ou=groups subtree in the default suffix + 2. Wait till accountInactivityLimit exceeded + 3. Run ldapsearch as normal user, expected error 19. + 4. Replace the lastLoginTime attribute and check if account is activated + 5. Run ldapsearch as normal user, expected 0. + :assert: + 1. Success + 2. Success + 3. Success + 4. Success + 5. Success + """ + + suffix = DEFAULT_SUFFIX + subtree = "ou=groups" + userid = "glactusr" + nousrs = 3 + log.info('AccountInactivityLimit set to 12. Account will be inactivated if not accessed in 12 secs') + add_users(topology_st, suffix, subtree, userid, nousrs, 0) + log.info('Sleep for 13 secs to check if account is inactivated, expected error 19') + time.sleep(13) + account_status(topology_st, suffix, subtree, userid, nousrs, 0, "Disabled") + add_time_attr(topology_st, suffix, subtree, userid, nousrs, 'lastLoginTime') + log.info('Check if account is activated, expected 0') + account_status(topology_st, suffix, subtree, userid, nousrs, 0, "Enabled") + del_users(topology_st, suffix, subtree, userid, nousrs) + + +def test_glinact_limit(topology_st, accpol_global): + """Verify if account policy plugin functions well when changing accountInactivityLimit value. + + :id: 7fbc373f-a3d7-4774-8d34-89b057c5e74b + :setup: Standalone instance, Global account policy plugin configuration, + set accountInactivityLimit to few secs. + :steps: + 1. Add few users to ou=groups subtree in the default suffix + 2. Check if users are active just before reaching accountInactivityLimit + 3. Modify AccountInactivityLimit to a bigger value + 4. Wait for additional few secs, but check users before it reaches accountInactivityLimit + 5. Wait till accountInactivityLimit exceeded and check users, expected error 19 + 6. Modify accountInactivityLimit to use the min value. + 7. Add few users to ou=groups subtree in the default suffix + 8. Wait till it reaches accountInactivityLimit and check users, expected error 19 + 9. Modify accountInactivityLimit to 10 times(30 secs) bigger than the initial value. + 10. Add few users to ou=groups subtree in the default suffix + 11. Wait for 90 secs and check if account is not inactivated, expected 0 + 12. Wait for +27 secs and check if account is not inactivated, expected 0 + 13. Wait for +30 secs and check if account is inactivated, error 19 + 14. Replace the lastLoginTime attribute and check if account is activated + 15. Modify accountInactivityLimit to 12 secs, which is the default + 16. Run ldapsearch as normal user, expected 0. + :assert: + 1. Success + 2. Success + 3. Success + 4. Success + 5. Success + 6. Success + 7. Success + 8. Success + 9. Success + 10. Success + 11. Success + 12. Success + 13. Success + 14. Success + 15. Success + 16. Success + """ + + suffix = DEFAULT_SUFFIX + subtree = "ou=groups" + userid = "inactestusr" + nousrs = 3 + + log.info('AccountInactivityLimit set to 12. Account will be inactivated if not accessed in 12 secs') + add_users(topology_st, suffix, subtree, userid, nousrs, 2) + log.info('Sleep for 9 secs to check if account is not inactivated, expected 0') + time.sleep(9) + account_status(topology_st, suffix, subtree, userid, nousrs, 2, "Enabled") + + modify_attr(topology_st, ACCP_CONF, 'accountInactivityLimit', '20') + time.sleep(17) + account_status(topology_st, suffix, subtree, userid, nousrs, 2, "Enabled") + time.sleep(20) + account_status(topology_st, suffix, subtree, userid, nousrs, 2, "Disabled") + + modify_attr(topology_st, ACCP_CONF, 'accountInactivityLimit', '1') + add_users(topology_st, suffix, subtree, userid, 2, 1) + time.sleep(2) + account_status(topology_st, suffix, subtree, userid, 2, 1, "Disabled") + + modify_attr(topology_st, ACCP_CONF, 'accountInactivityLimit', '30') + add_users(topology_st, suffix, subtree, userid, 1, 0) + time.sleep(27) + account_status(topology_st, suffix, subtree, userid, 1, 0, "Enabled") + time.sleep(30) + account_status(topology_st, suffix, subtree, userid, 1, 0, "Disabled") + + log.info('Check if account is activated, expected 0') + add_time_attr(topology_st, suffix, subtree, userid, nousrs, 'lastLoginTime') + account_status(topology_st, suffix, subtree, userid, nousrs, 0, "Enabled") + + modify_attr(topology_st, ACCP_CONF, 'accountInactivityLimit', '12') + del_users(topology_st, suffix, subtree, userid, nousrs) + + +def test_glnologin_attr(topology_st, accpol_global): + """Verify if user account is inactivated based on createTimeStamp attribute, no lastLoginTime attribute present + + :id: 3032f670-705d-4f69-96f5-d75445cffcfb + :setup: Standalone instance, Local account policy plugin configuration, + set accountInactivityLimit to few secs. + :steps: + 1. Configure Global account policy plugin with createTimestamp as stateattrname + 2. lastLoginTime attribute will not be effective. + 3. Add few users to ou=groups subtree in the default suffix + 4. Wait for 10 secs and check if account is not inactivated, expected 0 + 5. Modify AccountInactivityLimit to 20 secs + 6. Wait for +9 secs and check if account is not inactivated, expected 0 + 7. Wait for +3 secs and check if account is inactivated, error 19 + 8. Modify accountInactivityLimit to 3 secs + 9. Add few users to ou=groups subtree in the default suffix + 10. Wait for 3 secs and check if account is inactivated, error 19 + 11. Modify accountInactivityLimit to 30 secs + 12. Add few users to ou=groups subtree in the default suffix + 13. Wait for 90 secs and check if account is not inactivated, expected 0 + 14. Wait for +28 secs and check if account is not inactivated, expected 0 + 15. Wait for +2 secs and check if account is inactivated, error 19 + 16. Replace the lastLoginTime attribute and check if account is activated + 17. Modify accountInactivityLimit to 12 secs, which is the default + 18. Run ldapsearch as normal user, expected 0. + :assert: + 1. Success + 2. Success + 3. Success + 4. Success + 5. Success + 6. Success + 7. Success + 8. Success + 9. Success + 10. Success + 11. Success + 12. Success + 13. Success + 14. Success + 15. Success + 16. Success + 17. Success + 18. Success + """ + + suffix = DEFAULT_SUFFIX + subtree = "ou=groups" + userid = "nologinusr" + nousrs = 3 + + log.info('AccountInactivityLimit set to 12. Account will be inactivated if not accessed in 12 secs') + log.info('Set attribute StateAttrName to createTimestamp, loginTime attr wont be considered') + modify_attr(topology_st, ACCP_CONF, 'stateattrname', 'createTimestamp') + topology_st.standalone.restart(timeout=10) + add_users(topology_st, suffix, subtree, userid, nousrs, 2) + log.info('Sleep for 9 secs to check if account is not inactivated, expected 0') + time.sleep(9) + account_status(topology_st, suffix, subtree, userid, nousrs, 2, "Enabled") + + modify_attr(topology_st, ACCP_CONF, 'accountInactivityLimit', '20') + time.sleep(9) + account_status(topology_st, suffix, subtree, userid, nousrs, 2, "Enabled") + time.sleep(3) + account_status(topology_st, suffix, subtree, userid, nousrs, 2, "Disabled") + + modify_attr(topology_st, ACCP_CONF, 'accountInactivityLimit', '3') + add_users(topology_st, suffix, subtree, userid, 2, 1) + time.sleep(2) + account_status(topology_st, suffix, subtree, userid, 2, 1, "Enabled") + time.sleep(2) + account_status(topology_st, suffix, subtree, userid, 2, 1, "Disabled") + + modify_attr(topology_st, ACCP_CONF, 'accountInactivityLimit', '30') + add_users(topology_st, suffix, subtree, userid, 1, 0) + time.sleep(28) + account_status(topology_st, suffix, subtree, userid, 1, 0, "Enabled") + time.sleep(2) + account_status(topology_st, suffix, subtree, userid, 1, 0, "Disabled") + + modify_attr(topology_st, ACCP_CONF, 'accountInactivityLimit', '12') + log.info('Set attribute StateAttrName to lastLoginTime, the default') + modify_attr(topology_st, ACCP_CONF, 'stateattrname', 'lastLoginTime') + topology_st.standalone.restart(timeout=10) + add_time_attr(topology_st, suffix, subtree, userid, nousrs, 'lastLoginTime') + log.info('Check if account is activated, expected 0') + account_status(topology_st, suffix, subtree, userid, nousrs, 0, "Enabled") + del_users(topology_st, suffix, subtree, userid, nousrs) + + +def test_glnoalt_stattr(topology_st, accpol_global): + """Verify if user account can be inactivated based on lastLoginTime attribute, altstateattrname set to 1.1 + + :id: 8dcc3540-578f-422a-bb44-28c2cf20dbcd + :setup: Standalone instance, Global account policy plugin configuration, + set accountInactivityLimit to few secs. + :steps: + 1. Configure Global account policy plugin with altstateattrname to 1.1 + 2. Add few users to ou=groups subtree in the default suffix + 3. Wait till it reaches accountInactivityLimit + 4. Remove lastLoginTime attribute from the user entry + 5. Run ldapsearch as normal user, expected 0. no lastLoginTime attribute present + 6. Wait till it reaches accountInactivityLimit and check users, expected error 19 + :assert: + 1. Success + 2. Success + 3. Success + 4. Success + 5. Success + 6. Success + """ + + suffix = DEFAULT_SUFFIX + subtree = "ou=groups" + userid = "nologinusr" + nousrs = 3 + log.info('Set attribute altStateAttrName to 1.1') + modify_attr(topology_st, ACCP_CONF, 'altstateattrname', '1.1') + topology_st.standalone.restart(timeout=10) + add_users(topology_st, suffix, subtree, userid, nousrs, 0) + log.info('Sleep for 13 secs to check if account is not inactivated, expected 0') + time.sleep(13) + account_status(topology_st, suffix, subtree, userid, nousrs, 0, "Enabled") + log.info('lastLoginTime attribute is added from the above ldap bind by userdn') + time.sleep(13) + account_status(topology_st, suffix, subtree, userid, nousrs, 0, "Disabled") + del_time_attr(topology_st, suffix, subtree, userid, nousrs, 'lastLoginTime') + account_status(topology_st, suffix, subtree, userid, nousrs, 0, "Enabled") + modify_attr(topology_st, ACCP_CONF, 'altstateattrname', 'createTimestamp') + topology_st.standalone.restart(timeout=10) + add_time_attr(topology_st, suffix, subtree, userid, nousrs, 'lastLoginTime') + account_status(topology_st, suffix, subtree, userid, nousrs, 0, "Enabled") + del_users(topology_st, suffix, subtree, userid, nousrs) + + +def test_glattr_modtime(topology_st, accpol_global): + """Verify if user account can be inactivated based on modifyTimeStamp attribute + + :id: 67380839-2966-45dc-848a-167a954153e1 + :setup: Standalone instance, Global account policy plugin configuration, + set accountInactivityLimit to few secs. + :steps: + 1. Configure Global account policy plugin with altstateattrname to modifyTimestamp + 2. Add few users to ou=groups subtree in the default suffix + 3. Wait till the accountInactivityLimit exceeded and check users, expected error 19 + 4. Modify cn attribute for user, ModifyTimeStamp is updated. + 5. Check if user is activated based on ModifyTimeStamp attribute, expected 0 + 6. Change the plugin to use createTimeStamp and remove lastLoginTime attribute + 7. Check if account is inactivated, expected error 19 + :assert: + 1. Success + 2. Success + 3. Success + 4. Success + 5. Success + 6. Success + 7. Success + """ + + suffix = DEFAULT_SUFFIX + subtree = "ou=groups" + userid = "modtimeusr" + nousrs = 3 + log.info('Set attribute altStateAttrName to modifyTimestamp') + modify_attr(topology_st, ACCP_CONF, 'altstateattrname', 'modifyTimestamp') + topology_st.standalone.restart(timeout=10) + add_users(topology_st, suffix, subtree, userid, nousrs, 0) + log.info('Sleep for 13 secs to check if account is inactivated, expected 0') + time.sleep(13) + check_attr(topology_st, suffix, subtree, userid, nousrs, "modifyTimeStamp=*") + account_status(topology_st, suffix, subtree, userid, nousrs, 0, "Disabled") + attr_name = "cn" + attr_value = "cnewusr" + modusr_attr(topology_st, suffix, subtree, userid, nousrs, attr_name, attr_value) + account_status(topology_st, suffix, subtree, userid, nousrs, 0, "Enabled") + modify_attr(topology_st, ACCP_CONF, 'altstateattrname', 'createTimestamp') + del_time_attr(topology_st, suffix, subtree, userid, nousrs, 'lastLoginTime') + topology_st.standalone.restart(timeout=10) + account_status(topology_st, suffix, subtree, userid, nousrs, 0, "Disabled") + add_time_attr(topology_st, suffix, subtree, userid, nousrs, 'lastLoginTime') + account_status(topology_st, suffix, subtree, userid, nousrs, 0, "Enabled") + del_users(topology_st, suffix, subtree, userid, nousrs) + + +def test_glnoalt_nologin(topology_st, accpol_global): + """Verify if account policy plugin works if we set altstateattrname set to 1.1 and alwaysrecordlogin to NO + + :id: 49eda7db-84de-47ba-8f81-ac5e4de3a500 + :setup: Standalone instance, Global account policy plugin configuration, + set accountInactivityLimit to few secs. + :steps: + 1. Configure Global account policy plugin with altstateattrname to 1.1 + 2. Set alwaysrecordlogin to NO. + 3. Add few users to ou=groups subtree in the default suffix + 4. Wait till accountInactivityLimit exceeded and check users, expected 0 + 5. Check for lastLoginTime attribute, it should not be present + 6. Wait for few more secs and check if account is not inactivated, expected 0 + 7. Run ldapsearch as normal user, expected 0. no lastLoginTime attribute present + 8. Set altstateattrname to createTimeStamp + 9. Check if user account is inactivated based on createTimeStamp attribute. + 10. Account should be inactivated, expected error 19 + :assert: + 1. Success + 2. Success + 3. Success + 4. Success + 5. Success + 6. Success + 7. Success + 8. Success + 9. Success + 10. Success + """ + + suffix = DEFAULT_SUFFIX + subtree = "ou=groups" + userid = "norecrodlogusr" + nousrs = 3 + log.info('Set attribute altStateAttrName to 1.1') + modify_attr(topology_st, ACCP_CONF, 'altstateattrname', '1.1') + log.info('Set attribute alwaysrecordlogin to No') + modify_attr(topology_st, ACCP_CONF, 'alwaysrecordlogin', 'no') + topology_st.standalone.restart(timeout=10) + add_users(topology_st, suffix, subtree, userid, nousrs, 0) + log.info('Sleep for 13 secs to check if account is not inactivated, expected 0') + time.sleep(13) + account_status(topology_st, suffix, subtree, userid, nousrs, 0, "Enabled") + time.sleep(3) + account_status(topology_st, suffix, subtree, userid, nousrs, 0, "Enabled") + log.info('Set attribute altStateAttrName to createTimestamp') + modify_attr(topology_st, ACCP_CONF, 'altstateattrname', 'createTimestamp') + topology_st.standalone.restart(timeout=10) + time.sleep(2) + account_status(topology_st, suffix, subtree, userid, nousrs, 0, "Disabled") + log.info('Reset the default attribute values') + modify_attr(topology_st, ACCP_CONF, 'alwaysrecordlogin', 'yes') + topology_st.standalone.restart(timeout=10) + add_time_attr(topology_st, suffix, subtree, userid, nousrs, 'lastLoginTime') + account_status(topology_st, suffix, subtree, userid, nousrs, 0, "Enabled") + del_users(topology_st, suffix, subtree, userid, nousrs) + + +def test_glinact_nsact(topology_st, accpol_global): + """Verify if user account can be activated using ns-activate.pl script. + + :id: 876a7a7c-0b3f-4cd2-9b45-1dc80846e334 + :setup: Standalone instance, Global account policy plugin configuration, + set accountInactivityLimit to few secs. + :steps: + 1. Configure Global account policy plugin + 2. Add few users to ou=groups subtree in the default suffix + 3. Wait for few secs and inactivate user using ns-inactivate.pl + 4. Wait till accountInactivityLimit exceeded. + 5. Run ldapsearch as normal user, expected error 19. + 6. Activate user using ns-activate.pl script + 7. Check if account is activated, expected error 19 + 8. Replace the lastLoginTime attribute and check if account is activated + 9. Run ldapsearch as normal user, expected 0. + :assert: + 1. Success + 2. Success + 3. Success + 4. Success + 5. Success + 6. Success + 7. Success + 8. Success + 9. Success + """ + + suffix = DEFAULT_SUFFIX + subtree = "ou=groups" + userid = "nsactusr" + nousrs = 1 + log.info('AccountInactivityLimit set to 12. Account will be inactivated if not accessed in 12 secs') + add_users(topology_st, suffix, subtree, userid, nousrs, 0) + log.info('Sleep for 3 secs to check if account is not inactivated, expected value 0') + time.sleep(3) + nsact_inact(topology_st, suffix, subtree, userid, nousrs, "ns-activate.pl", "") + log.info('Sleep for 10 secs to check if account is inactivated, expected value 19') + time.sleep(10) + nsact_inact(topology_st, suffix, subtree, userid, nousrs, "ns-activate.pl", "") + account_status(topology_st, suffix, subtree, userid, nousrs, 0, "Disabled") + nsact_inact(topology_st, suffix, subtree, userid, nousrs, "ns-accountstatus.pl", + "- inactivated (inactivity limit exceeded)") + add_time_attr(topology_st, suffix, subtree, userid, nousrs, 'lastLoginTime') + account_status(topology_st, suffix, subtree, userid, nousrs, 0, "Enabled") + nsact_inact(topology_st, suffix, subtree, userid, nousrs, "ns-accountstatus.pl", "- activated") + del_users(topology_st, suffix, subtree, userid, nousrs) + + +def test_glinact_acclock(topology_st, accpol_global): + """Verify if user account is activated when account is unlocked by passwordlockoutduration. + + :id: 43601a61-065c-4c80-a7c2-e4f6ae17beb8 + :setup: Standalone instance, Global account policy plugin configuration, + set accountInactivityLimit to few secs. + :steps: + 1. Add few users to ou=groups subtree in the default suffix + 2. Wait for few secs and attempt invalid binds for user + 3. User account should be locked based on Account Lockout policy. + 4. Wait till accountInactivityLimit exceeded and check users, expected error 19 + 5. Wait for passwordlockoutduration and check if account is active + 6. Check if account is unlocked, expected error 19, since account is inactivated + 7. Replace the lastLoginTime attribute and check users, expected 0 + :assert: + 1. Success + 2. Success + 3. Success + 4. Success + 5. Success + 6. Success + 7. Success + """ + + suffix = DEFAULT_SUFFIX + subtree = "ou=groups" + userid = "pwlockusr" + nousrs = 1 + log.info('AccountInactivityLimit set to 12. Account will be inactivated if not accessed in 12 secs') + add_users(topology_st, suffix, subtree, userid, nousrs, 0) + log.info('Sleep for 3 secs and try invalid binds to lockout the user') + time.sleep(3) + + pwacc_lock(topology_st, suffix, subtree, userid, nousrs) + log.info('Sleep for 10 secs to check if account is inactivated, expected value 19') + time.sleep(10) + account_status(topology_st, suffix, subtree, userid, nousrs, 0, "Disabled") + + log.info('Add lastLoginTime to activate the user account') + add_time_attr(topology_st, suffix, subtree, userid, nousrs, 'lastLoginTime') + account_status(topology_st, suffix, subtree, userid, nousrs, 0, "Enabled") + + log.info('Checking if account is unlocked after passwordlockoutduration, but inactivated after accountInactivityLimit') + pwacc_lock(topology_st, suffix, subtree, userid, nousrs) + account_status(topology_st, suffix, subtree, userid, nousrs, 0, "Disabled") + + log.info('Account is expected to be unlocked after 5 secs of passwordlockoutduration') + time.sleep(5) + account_status(topology_st, suffix, subtree, userid, nousrs, 0, "Enabled") + + log.info('Sleep 13s and check if account inactivated based on accountInactivityLimit, expected 19') + time.sleep(13) + account_status(topology_st, suffix, subtree, userid, nousrs, 0, "Disabled") + del_users(topology_st, suffix, subtree, userid, nousrs) + + +def test_glnact_pwexp(topology_st, accpol_global): + """Verify if user account is activated when password is reset after password is expired + + :id: 3bb97992-101a-4e5a-b60a-4cc21adcc76e + :setup: Standalone instance, Global account policy plugin configuration, + set accountInactivityLimit to few secs. + :steps: + 1. Add few users to ou=groups subtree in the default suffix + 2. Set passwordmaxage to few secs + 3. Wait for passwordmaxage to reach and check if password expired + 4. Run ldapsearch as normal user, expected error 19. + 5. Reset the password for user account + 6. Wait till accountInactivityLimit exceeded and check users + 7. Run ldapsearch as normal user, expected error 19. + 8. Replace the lastLoginTime attribute and check if account is activated + 9. Run ldapsearch as normal user, expected 0. + :assert: + 1. Success + 2. Success + 3. Success + 4. Success + 5. Success + 6. Success + 7. Success + 8. Success + 9. Success + """ + + suffix = DEFAULT_SUFFIX + subtree = "ou=groups" + userid = "pwexpusr" + nousrs = 1 + try: + topology_st.standalone.config.set('passwordmaxage', '9') + except ldap.LDAPError as e: + log.error('Failed to change the value of passwordmaxage to 9') + raise e + log.info('AccountInactivityLimit set to 12. Account will be inactivated if not accessed in 12 secs') + log.info('Passwordmaxage is set to 9. Password will expire in 9 secs') + add_users(topology_st, suffix, subtree, userid, nousrs, 0) + + log.info('Sleep for 9 secs and check if password expired') + time.sleep(9) + account_status(topology_st, suffix, subtree, userid, nousrs, 0, "Expired") + time.sleep(4) # Passed inactivity + account_status(topology_st, suffix, subtree, userid, nousrs, 0, "Disabled") + + log.info('Add lastLoginTime to activate the user account') + add_time_attr(topology_st, suffix, subtree, userid, nousrs, 'lastLoginTime') + account_status(topology_st, suffix, subtree, userid, nousrs, 0, "Expired") + userpw_reset(topology_st, suffix, subtree, userid, nousrs, "DirMgr", PASSWORD, USER_PASW) + account_status(topology_st, suffix, subtree, userid, nousrs, 0, "Enabled") + + # Allow password to expire again, but inactivity continues + time.sleep(7) + + # reset password to counter expiration, we will test expiration again later + userpw_reset(topology_st, suffix, subtree, userid, nousrs, "DirMgr", PASSWORD, USER_PASW) + log.info('Sleep for 4 secs and check if account is now inactivated, expected error 19') + time.sleep(4) + account_status(topology_st, suffix, subtree, userid, nousrs, 0, "Disabled") + userpw_reset(topology_st, suffix, subtree, userid, nousrs, "DirMgr", PASSWORD, USER_PASW) + account_status(topology_st, suffix, subtree, userid, nousrs, 0, "Disabled") + + # Reset inactivity and check for expiration + add_time_attr(topology_st, suffix, subtree, userid, nousrs, 'lastLoginTime') + account_status(topology_st, suffix, subtree, userid, nousrs, 0, "Enabled") + time.sleep(8) + account_status(topology_st, suffix, subtree, userid, nousrs, 0, "Expired") + + # Reset account + userpw_reset(topology_st, suffix, subtree, userid, nousrs, "DirMgr", PASSWORD, USER_PASW) + account_status(topology_st, suffix, subtree, userid, nousrs, 0, "Enabled") + + # Reset maxage + try: + topology_st.standalone.config.set('passwordmaxage', '400') + except ldap.LDAPError as e: + log.error('Failed to change the value of passwordmaxage to 400') + raise e + del_users(topology_st, suffix, subtree, userid, nousrs) + + +def test_locact_inact(topology_st, accpol_local): + """Verify if user account is inactivated when accountInactivityLimit is exceeded. + + :id: 02140e36-79eb-4d88-ba28-66478689289b + :setup: Standalone instance, ou=people subtree configured for Local account + policy plugin configuration, set accountInactivityLimit to few secs. + :steps: + 1. Add few users to ou=people subtree in the default suffix + 2. Wait for few secs before it reaches accountInactivityLimit and check users. + 3. Run ldapsearch as normal user, expected 0 + 4. Wait till accountInactivityLimit is exceeded + 5. Run ldapsearch as normal user and check if its inactivated, expected error 19. + 6. Replace user's lastLoginTime attribute and check if its activated, expected 0 + :assert: + 1. Success + 2. Success + 3. Success + 4. Success + 5. Success + 6. Should return error code 19 + """ + + suffix = DEFAULT_SUFFIX + subtree = "ou=people" + userid = "inactusr" + nousrs = 3 + log.info('AccountInactivityLimit set to 10. Account will be inactivated if not accessed in 10 secs') + add_users(topology_st, suffix, subtree, userid, nousrs, 0) + log.info('Sleep for 9 secs to check if account is not inactivated, expected value 0') + time.sleep(9) + log.info('Account should not be inactivated since AccountInactivityLimit not exceeded') + account_status(topology_st, suffix, subtree, userid, 3, 2, "Enabled") + log.info('Sleep for 2 more secs to check if account is inactivated') + time.sleep(2) + account_status(topology_st, suffix, subtree, userid, 2, 0, "Disabled") + log.info('Sleep +9 secs to check if account {}3 is inactivated'.format(userid)) + time.sleep(9) + account_status(topology_st, suffix, subtree, userid, 3, 2, "Disabled") + log.info('Add lastLoginTime attribute to all users and check if its activated') + add_time_attr(topology_st, suffix, subtree, userid, nousrs, 'lastLoginTime') + account_status(topology_st, suffix, subtree, userid, nousrs, 0, "Enabled") + del_users(topology_st, suffix, subtree, userid, nousrs) + + +def test_locinact_modrdn(topology_st, accpol_local): + """Verify if user account is inactivated when moved from ou=groups to ou=people subtree. + + :id: 5f25bea3-fab0-4db4-b43d-2d47cc6e5ad1 + :setup: Standalone instance, ou=people subtree configured for Local account + policy plugin configuration, set accountInactivityLimit to few secs. + :steps: + 1. Add few users to ou=groups subtree in the default suffix + 2. Plugin configured to ou=people subtree only. + 3. Wait for few secs before it reaches accountInactivityLimit and check users. + 4. Run ldapsearch as normal user, expected 0 + 5. Wait till accountInactivityLimit exceeded + 6. Move users from ou=groups subtree to ou=people subtree + 7. Check if users are inactivated, expected error 19 + :assert: + 1. Success + 2. Success + 3. Success + 4. Success + 5. Success + 6. Success + 7. Should return error code 0 and 19 + """ + + suffix = DEFAULT_SUFFIX + subtree = "ou=groups" + userid = "nolockusr" + nousrs = 1 + log.info('Account should not be inactivated since the subtree is not configured') + add_users(topology_st, suffix, subtree, userid, nousrs, 0) + log.info('Sleep for 11 secs to check if account is not inactivated, expected value 0') + time.sleep(11) + account_status(topology_st, suffix, subtree, userid, nousrs, 0, "Enabled") + log.info('Moving users from ou=groups to ou=people subtree') + user = UserAccount(topology_st.standalone, dn='uid=nolockusr1,ou=groups,dc=example,dc=com') + try: + user.rename('uid=nolockusr1', newsuperior='ou=people,dc=example,dc=com') + except ldap.LDAPError as e: + log.error('Failed to move user uid=nolockusr1 from ou=groups to ou=people') + raise e + subtree = "ou=people" + log.info('Then wait for 11 secs and check if entries are inactivated') + time.sleep(11) + account_status(topology_st, suffix, subtree, userid, nousrs, 0, "Disabled") + add_time_attr(topology_st, suffix, subtree, userid, nousrs, 'lastLoginTime') + account_status(topology_st, suffix, subtree, userid, nousrs, 0, "Enabled") + del_users(topology_st, suffix, subtree, userid, nousrs) + + +def test_locact_modrdn(topology_st, accpol_local): + """Verify if user account is inactivated when users moved from ou=people to ou=groups subtree. + + :id: e821cbae-bfc3-40d3-947d-b228c809987f + :setup: Standalone instance, ou=people subtree configured for Local account + policy plugin configuration, set accountInactivityLimit to few secs. + :steps: + 1. Add few users to ou=people subtree in the default suffix + 2. Wait for few secs and check if users not inactivated, expected 0. + 3. Move users from ou=people to ou=groups subtree + 4. Wait till accountInactivityLimit is exceeded + 5. Check if users are active in ou=groups subtree, expected 0 + :assert: + 1. Success + 2. Success + 3. Success + 4. Success + 5. Success + """ + + suffix = DEFAULT_SUFFIX + subtree = "ou=people" + userid = "lockusr" + nousrs = 1 + log.info('Account should be inactivated since the subtree is configured') + add_users(topology_st, suffix, subtree, userid, nousrs, 0) + log.info('Sleep for 11 secs to check if account is inactivated, expected value 19') + time.sleep(11) + account_status(topology_st, suffix, subtree, userid, nousrs, 0, "Disabled") + log.info('Moving users from ou=people to ou=groups subtree') + user = UserAccount(topology_st.standalone, dn='uid=lockusr1,ou=people,dc=example,dc=com') + try: + user.rename('uid=lockusr1', newsuperior='ou=groups,dc=example,dc=com') + except ldap.LDAPError as e: + log.error('Failed to move user uid=lockusr1 from ou=people to ou=groups') + raise e + log.info('Sleep for +2 secs and check users from both ou=people and ou=groups subtree') + time.sleep(2) + subtree = "ou=groups" + account_status(topology_st, suffix, subtree, userid, 1, 0, "Enabled") + del_users(topology_st, suffix, subtree, userid, nousrs) + + +if __name__ == '__main__': + # Run isolated + # -s for DEBUG mode + CURRENT_FILE = os.path.realpath(__file__) + pytest.main("-s {}".format(CURRENT_FILE)) diff --git a/dirsrvtests/tests/suites/plugins/attr_nsslapd-pluginarg_test.py b/dirsrvtests/tests/suites/plugins/attr_nsslapd-pluginarg_test.py new file mode 100644 index 0000000..2afaa3d --- /dev/null +++ b/dirsrvtests/tests/suites/plugins/attr_nsslapd-pluginarg_test.py @@ -0,0 +1,211 @@ +# --- BEGIN COPYRIGHT BLOCK --- +# Copyright (C) 2016 Red Hat, Inc. +# All rights reserved. +# +# License: GPL (version 3 or any later version). +# See LICENSE for details. +# --- END COPYRIGHT BLOCK --- +# +import pytest +from lib389.tasks import * +from lib389.utils import * +from lib389.topologies import topology_st + +from lib389._constants import DEFAULT_SUFFIX, DN_PLUGIN, SUFFIX, PLUGIN_7_BIT_CHECK + +# Skip on older versions +pytestmark = [pytest.mark.tier1, + pytest.mark.skipif(ds_is_older('1.3'), reason="Not implemented")] + +logging.getLogger(__name__).setLevel(logging.DEBUG) +log = logging.getLogger(__name__) + +DN_7BITPLUGIN = "cn=7-bit check,%s" % DN_PLUGIN +ATTRS = ["uid", "mail", "userpassword", ",", SUFFIX, None] + + +@pytest.fixture(scope="module") +def enable_plugin(topology_st): + """Enabling the 7-bit plugin for the + environment setup""" + log.info("Ticket 47431 - 0: Enable 7bit plugin...") + topology_st.standalone.plugins.enable(name=PLUGIN_7_BIT_CHECK) + + +@pytest.mark.ds47431 +def test_duplicate_values(topology_st, enable_plugin): + """Check 26 duplicate values are treated as one + + :id: b23e04f1-2757-42cc-b3a2-26426c903f6d + :setup: Standalone instance, enable 7bit plugin + :steps: + 1. Modify the entry for cn=7-bit check,cn=plugins,cn=config as : + nsslapd-pluginarg0 : uid + nsslapd-pluginarg1 : mail + nsslapd-pluginarg2 : userpassword + nsslapd-pluginarg3 : , + nsslapd-pluginarg4 : dc=example,dc=com + 2. Set nsslapd-pluginarg2 to 'userpassword' for multiple time (ideally 27) + 3. Check whether duplicate values are treated as one + :expectedresults: + 1. It should be modified successfully + 2. It should be successful + 3. It should be successful + """ + + log.info("Ticket 47431 - 1: Check 26 duplicate values are treated as one...") + expected = "str2entry_dupcheck.* duplicate values for attribute type nsslapd-pluginarg2 detected in entry cn=7-bit check,cn=plugins,cn=config." + + log.debug('modify_s %s' % DN_7BITPLUGIN) + topology_st.standalone.modify_s(DN_7BITPLUGIN, + [(ldap.MOD_REPLACE, 'nsslapd-pluginarg0', b"uid"), + (ldap.MOD_REPLACE, 'nsslapd-pluginarg1', b"mail"), + (ldap.MOD_REPLACE, 'nsslapd-pluginarg2', b"userpassword"), + (ldap.MOD_REPLACE, 'nsslapd-pluginarg3', b","), + (ldap.MOD_REPLACE, 'nsslapd-pluginarg4', ensure_bytes(SUFFIX))]) + + arg2 = "nsslapd-pluginarg2: userpassword" + topology_st.standalone.stop() + dse_ldif = topology_st.standalone.confdir + '/dse.ldif' + os.system('mv %s %s.47431' % (dse_ldif, dse_ldif)) + os.system( + 'sed -e "s/\\(%s\\)/\\1\\n\\1\\n\\1\\n\\1\\n\\1\\n\\1\\n\\1\\n\\1\\n\\1\\n\\1\\n\\1\\n\\1\\n\\1\\n\\1\\n\\1\\n\\1\\n\\1\\n\\1\\n\\1\\n\\1\\n\\1\\n\\1\\n\\1\\n\\1\\n\\1\\n\\1\\n\\1/" %s.47431 > %s' % ( + arg2, dse_ldif, dse_ldif)) + topology_st.standalone.start() + + cmdline = 'egrep -i "%s" %s' % (expected, topology_st.standalone.errlog) + p = os.popen(cmdline, "r") + line = p.readline() + if line == "": + log.error('Expected error "%s" not logged in %s' % (expected, topology_st.standalone.errlog)) + assert False + else: + log.debug('line: %s' % line) + log.info('Expected error "%s" logged in %s' % (expected, topology_st.standalone.errlog)) + + log.info("Ticket 47431 - 1: done") + + +@pytest.mark.ds47431 +def test_multiple_value(topology_st, enable_plugin): + """Check two values belonging to one arg is fixed + + :id: 20c802bc-332f-4e8d-bcfb-8cd28123d695 + :setup: Standalone instance, enable 7bit plugin + :steps: + 1. Modify the entry for cn=7-bit check,cn=plugins,cn=config as : + nsslapd-pluginarg0 : uid + nsslapd-pluginarg0 : mail + nsslapd-pluginarg1 : userpassword + nsslapd-pluginarg2 : , + nsslapd-pluginarg3 : dc=example,dc=com + nsslapd-pluginarg4 : None + (Note : While modifying add two attributes entries for nsslapd-pluginarg0) + + 2. Check two values belonging to one arg is fixed + :expectedresults: + 1. Entries should be modified successfully + 2. Operation should be successful + """ + + log.info("Ticket 47431 - 2: Check two values belonging to one arg is fixed...") + + topology_st.standalone.modify_s(DN_7BITPLUGIN, + [(ldap.MOD_REPLACE, 'nsslapd-pluginarg0', b"uid"), + (ldap.MOD_ADD, 'nsslapd-pluginarg0', b"mail"), + (ldap.MOD_REPLACE, 'nsslapd-pluginarg1', b"userpassword"), + (ldap.MOD_REPLACE, 'nsslapd-pluginarg2', b","), + (ldap.MOD_REPLACE, 'nsslapd-pluginarg3', ensure_bytes(SUFFIX)), + (ldap.MOD_DELETE, 'nsslapd-pluginarg4', None)]) + + # PLUGIN LOG LEVEL + topology_st.standalone.modify_s(DN_CONFIG, [(ldap.MOD_REPLACE, 'nsslapd-errorlog-level', b'65536')]) + + topology_st.standalone.restart() + + cmdline = 'egrep -i %s %s' % ("NS7bitAttr_Init", topology_st.standalone.errlog) + p = os.popen(cmdline, "r") + i = 0 + while ATTRS[i]: + line = p.readline() + log.debug('line - %s' % line) + log.debug('ATTRS[%d] %s' % (i, ATTRS[i])) + if line == "": + break + elif line.find(ATTRS[i]) >= 0: + log.debug('%s was logged' % ATTRS[i]) + else: + log.error('%s was not logged.' % ATTRS[i]) + assert False + i = i + 1 + + log.info("Ticket 47431 - 2: done") + + +@pytest.mark.ds47431 +def test_missing_args(topology_st, enable_plugin): + """Check missing args are fixed + + :id: b2814399-7ed2-4fe0-981d-b0bdbbe31cfb + :setup: Standalone instance, enable 7bit plugin + :steps: + 1. Modify the entry for cn=7-bit check,cn=plugins,cn=config as : + nsslapd-pluginarg0 : None + nsslapd-pluginarg1 : uid + nsslapd-pluginarg2 : None + nsslapd-pluginarg3 : mail + nsslapd-pluginarg5 : userpassword + nsslapd-pluginarg7 : , + nsslapd-pluginarg9 : dc=example,dc=com + (Note: While modifying add 2 entries as None) + + 2. Change the nsslapd-errorlog-level to 65536 + 3. Check missing agrs are fixed + :expectedresults: + 1. Entries should be modified successfully + 2. Operation should be successful + 3. Operation should be successful + """ + + log.info("Ticket 47431 - 3: Check missing args are fixed...") + + topology_st.standalone.modify_s(DN_7BITPLUGIN, + [(ldap.MOD_DELETE, 'nsslapd-pluginarg0', None), + (ldap.MOD_REPLACE, 'nsslapd-pluginarg1', b"uid"), + (ldap.MOD_DELETE, 'nsslapd-pluginarg2', None), + (ldap.MOD_REPLACE, 'nsslapd-pluginarg3', b"mail"), + (ldap.MOD_REPLACE, 'nsslapd-pluginarg5', b"userpassword"), + (ldap.MOD_REPLACE, 'nsslapd-pluginarg7', b","), + (ldap.MOD_REPLACE, 'nsslapd-pluginarg9', ensure_bytes(SUFFIX))]) + + # PLUGIN LOG LEVEL + topology_st.standalone.modify_s(DN_CONFIG, [(ldap.MOD_REPLACE, 'nsslapd-errorlog-level', b'65536')]) + + topology_st.standalone.stop() + os.system('mv %s %s.47431' % (topology_st.standalone.errlog, topology_st.standalone.errlog)) + os.system('touch %s' % (topology_st.standalone.errlog)) + topology_st.standalone.start() + + cmdline = 'egrep -i %s %s' % ("NS7bitAttr_Init", topology_st.standalone.errlog) + p = os.popen(cmdline, "r") + i = 0 + while ATTRS[i]: + line = p.readline() + if line == "": + break + elif line.find(ATTRS[i]) >= 0: + log.debug('%s was logged' % ATTRS[i]) + else: + log.error('%s was not logged.' % ATTRS[i]) + assert False + i = i + 1 + + log.info("Ticket 47431 - 3: done") + log.info('Test complete') + + +if __name__ == '__main__': + # Run isolated + # -s for DEBUG mode + CURRENT_FILE = os.path.realpath(__file__) + pytest.main("-s %s" % CURRENT_FILE) diff --git a/dirsrvtests/tests/suites/plugins/cos_test.py b/dirsrvtests/tests/suites/plugins/cos_test.py new file mode 100644 index 0000000..82e29a0 --- /dev/null +++ b/dirsrvtests/tests/suites/plugins/cos_test.py @@ -0,0 +1,220 @@ +# --- BEGIN COPYRIGHT BLOCK --- +# Copyright (C) 2016 Red Hat, Inc. +# All rights reserved. +# +# License: GPL (version 3 or any later version). +# See LICENSE for details. +# --- END COPYRIGHT BLOCK --- +# +''' +Created on Nov 27th, 2018 + +@author: tbordaz +''' +import logging +import subprocess +import pytest +from lib389 import Entry +from lib389.utils import * +from lib389.plugins import * +from lib389._constants import * +from lib389.topologies import topology_st as topo + +pytestmark = pytest.mark.tier1 + +def add_user(server, uid, testbase, locality=None, tel=None, title=None): + dn = 'uid=%s,%s' % (uid, testbase) + log.fatal('Adding user (%s): ' % dn) + server.add_s(Entry((dn, {'objectclass': ['top', 'person', 'organizationalPerson', 'inetOrgPerson'], + 'cn': 'user_%s' % uid, + 'sn': 'user_%s' % uid, + 'uid': uid, + 'l': locality, + 'title': title, + 'telephoneNumber': tel, + 'description': 'description real'}))) + +@pytest.mark.ds50053 +def test_cos_operational_default(topo): + """operational-default cosAttribute should not overwrite an existing value + + :id: 12fadff9-e14a-4c64-a3ee-51152cb8fcfb + :setup: Standalone Instance + :steps: + 1. Create a user entry with attribute 'l' and 'telephonenumber' (real attribute with real value) + 2. Create cos that defines 'l' as operational-default (virt. attr. with value != real value) + 3. Create cos that defines 'telephone' as default (virt. attr. with value != real value) + 4. Check that telephone is retrieved with real value + 5. Check that 'l' is retrieved with real value + :expectedresults: + 1. should succeed + 2. should succeed + 3. should succeed + """ + + REAL = 'real' + VIRTUAL = 'virtual' + TEL_REAL = '1234 is %s' % REAL + TEL_VIRT = '4321 is %s' % VIRTUAL + + LOC_REAL = 'here is %s' % REAL + LOC_VIRT = 'there is %s' % VIRTUAL + + TITLE_REAL = 'title is %s' % REAL + + inst = topo[0] + + PEOPLE = 'ou=people,%s' % SUFFIX + add_user(inst, 'user_0', PEOPLE, locality=LOC_REAL, tel=TEL_REAL, title=TITLE_REAL) + + # locality cos operational-default + LOC_COS_TEMPLATE = "cn=locality_template,%s" % PEOPLE + LOC_COS_DEFINITION = "cn=locality_definition,%s" % PEOPLE + inst.add_s(Entry((LOC_COS_TEMPLATE, { + 'objectclass': ['top', 'extensibleObject', 'costemplate', + 'ldapsubentry'], + 'l': LOC_VIRT}))) + + inst.add_s(Entry((LOC_COS_DEFINITION, { + 'objectclass': ['top', 'LdapSubEntry', 'cosSuperDefinition', + 'cosPointerDefinition'], + 'cosTemplateDn': LOC_COS_TEMPLATE, + 'cosAttribute': 'l operational-default'}))) + + # telephone cos default + TEL_COS_TEMPLATE = "cn=telephone_template,%s" % PEOPLE + TEL_COS_DEFINITION = "cn=telephone_definition,%s" % PEOPLE + inst.add_s(Entry((TEL_COS_TEMPLATE, { + 'objectclass': ['top', 'extensibleObject', 'costemplate', + 'ldapsubentry'], + 'telephonenumber': TEL_VIRT}))) + + inst.add_s(Entry((TEL_COS_DEFINITION, { + 'objectclass': ['top', 'LdapSubEntry', 'cosSuperDefinition', + 'cosPointerDefinition'], + 'cosTemplateDn': TEL_COS_TEMPLATE, + 'cosAttribute': 'telephonenumber default'}))) + + # seeAlso cos operational + SEEALSO_VIRT = "dc=%s,dc=example,dc=com" % VIRTUAL + SEEALSO_COS_TEMPLATE = "cn=seealso_template,%s" % PEOPLE + SEEALSO_COS_DEFINITION = "cn=seealso_definition,%s" % PEOPLE + inst.add_s(Entry((SEEALSO_COS_TEMPLATE, { + 'objectclass': ['top', 'extensibleObject', 'costemplate', + 'ldapsubentry'], + 'seealso': SEEALSO_VIRT}))) + + inst.add_s(Entry((SEEALSO_COS_DEFINITION, { + 'objectclass': ['top', 'LdapSubEntry', 'cosSuperDefinition', + 'cosPointerDefinition'], + 'cosTemplateDn': SEEALSO_COS_TEMPLATE, + 'cosAttribute': 'seealso operational'}))) + + # description cos override + DESC_VIRT = "desc is %s" % VIRTUAL + DESC_COS_TEMPLATE = "cn=desc_template,%s" % PEOPLE + DESC_COS_DEFINITION = "cn=desc_definition,%s" % PEOPLE + inst.add_s(Entry((DESC_COS_TEMPLATE, { + 'objectclass': ['top', 'extensibleObject', 'costemplate', + 'ldapsubentry'], + 'description': DESC_VIRT}))) + + inst.add_s(Entry((DESC_COS_DEFINITION, { + 'objectclass': ['top', 'LdapSubEntry', 'cosSuperDefinition', + 'cosPointerDefinition'], + 'cosTemplateDn': DESC_COS_TEMPLATE, + 'cosAttribute': 'description override'}))) + + # title cos override + TITLE_VIRT = [] + for i in range(2): + TITLE_VIRT.append("title is %s %d" % (VIRTUAL, i)) + TITLE_COS_TEMPLATE = "cn=title_template,%s" % PEOPLE + TITLE_COS_DEFINITION = "cn=title_definition,%s" % PEOPLE + inst.add_s(Entry((TITLE_COS_TEMPLATE, { + 'objectclass': ['top', 'extensibleObject', 'costemplate', + 'ldapsubentry'], + 'title': TITLE_VIRT}))) + + inst.add_s(Entry((TITLE_COS_DEFINITION, { + 'objectclass': ['top', 'LdapSubEntry', 'cosSuperDefinition', + 'cosPointerDefinition'], + 'cosTemplateDn': TITLE_COS_TEMPLATE, + 'cosAttribute': 'title merge-schemes'}))) + + # note that the search requests both attributes (it is required for operational*) + ents = inst.search_s(SUFFIX, ldap.SCOPE_SUBTREE, "uid=user_0", ["telephonenumber", "l"]) + assert len(ents) == 1 + ent = ents[0] + + # Check telephonenumber (specifier default) with real value => real + assert ent.hasAttr('telephonenumber') + value = ent.getValue('telephonenumber') + log.info('Returned telephonenumber (exp. real): %s' % value) + log.info('Returned telephonenumber: %d' % value.find(REAL.encode())) + assert value.find(REAL.encode()) != -1 + + # Check 'locality' (specifier operational-default) with real value => real + assert ent.hasAttr('l') + value = ent.getValue('l') + log.info('Returned l (exp. real): %s ' % value) + log.info('Returned l: %d' % value.find(REAL.encode())) + assert value.find(REAL.encode()) != -1 + + # Check 'seealso' (specifier operational) without real value => virtual + assert not ent.hasAttr('seealso') + ents = inst.search_s(SUFFIX, ldap.SCOPE_SUBTREE, "uid=user_0", ["seealso"]) + assert len(ents) == 1 + ent = ents[0] + value = ent.getValue('seealso') + log.info('Returned seealso (exp. virtual): %s' % value) + log.info('Returned seealso: %d' % value.find(VIRTUAL.encode())) + assert value.find(VIRTUAL.encode()) != -1 + + # Check 'description' (specifier override) with real value => virtual + assert not ent.hasAttr('description') + ents = inst.search_s(SUFFIX, ldap.SCOPE_SUBTREE, "uid=user_0") + assert len(ents) == 1 + ent = ents[0] + value = ent.getValue('description') + log.info('Returned description (exp. virtual): %s' % value) + log.info('Returned description: %d' % value.find(VIRTUAL.encode())) + assert value.find(VIRTUAL.encode()) != -1 + + # Check 'title' (specifier merge-schemes) with real value => real value returned + ents = inst.search_s(SUFFIX, ldap.SCOPE_SUBTREE, "uid=user_0") + assert len(ents) == 1 + ent = ents[0] + found_real = False + found_virtual = False + for value in ent.getValues('title'): + log.info('Returned title (exp. real): %s' % value) + if value.find(VIRTUAL.encode()) != -1: + found_virtual = True + if value.find(REAL.encode()) != -1: + found_real = True + assert not found_virtual + assert found_real + + # Check 'title ((specifier merge-schemes) without real value => real value returned + ents = inst.search_s(SUFFIX, ldap.SCOPE_SUBTREE, "uid=user_0") + assert len(ents) == 1 + inst.modify_s(ents[0].dn,[(ldap.MOD_DELETE, 'title', None)]) + + inst.restart() + ents = inst.search_s(SUFFIX, ldap.SCOPE_SUBTREE, "uid=user_0") + assert len(ents) == 1 + ent = ents[0] + found_real = False + found_virtual = False + count = 0 + for value in ent.getValues('title'): + log.info('Returned title(exp. virt): %s' % value) + count = count + 1 + if value.find(VIRTUAL.encode()) != -1: + found_virtual = True + if value.find(REAL.encode()) != -1: + found_real = True + assert not found_real + assert found_virtual + assert count == 2 diff --git a/dirsrvtests/tests/suites/plugins/deref_aci_test.py b/dirsrvtests/tests/suites/plugins/deref_aci_test.py new file mode 100644 index 0000000..ee64ff1 --- /dev/null +++ b/dirsrvtests/tests/suites/plugins/deref_aci_test.py @@ -0,0 +1,141 @@ +import os +import logging +import pytest +import ldap +from lib389._constants import DEFAULT_SUFFIX, PASSWORD +from lib389.idm.organizationalunit import OrganizationalUnits +from lib389.idm.user import UserAccounts, TEST_USER_PROPERTIES +from lib389.idm.group import Groups +from lib389.topologies import topology_st as topo + +pytestmark = pytest.mark.tier1 + +DEBUGGING = os.getenv("DEBUGGING", default=None) +if DEBUGGING: + logging.getLogger(__name__).setLevel(logging.DEBUG) +else: + logging.getLogger(__name__).setLevel(logging.INFO) +log = logging.getLogger(__name__) + +ACCTS_DN = "ou=accounts,dc=example,dc=com" +USERS_DN = "ou=users,ou=accounts,dc=example,dc=com" +GROUPS_DN = "ou=groups,ou=accounts,dc=example,dc=com" +ADMIN_GROUP_DN = "cn=admins,ou=groups,ou=accounts,dc=example,dc=com" +ADMIN_DN = "uid=admin,ou=users,ou=accounts,dc=example,dc=com" + +ACCTS_ACI = ('(targetattr="userPassword")(version 3.0; acl "allow password ' + + 'search"; allow(search) userdn = "ldap:///all";)') +USERS_ACI = ('(targetattr = "cn || createtimestamp || description || displayname || entryusn || gecos ' + + '|| gidnumber || givenname || homedirectory || initials || ' + + 'loginshell || manager || modifytimestamp || objectclass || sn || title || uid || uidnumber")' + + '(targetfilter = "(objectclass=posixaccount)")' + + '(version 3.0;acl "Read Attributes";allow (compare,read,search) userdn = "ldap:///anyone";)') +GROUPS_ACIS = [ + ( + '(targetattr = "businesscategory || cn || createtimestamp || description |' + + '| entryusn || gidnumber || mepmanagedby || modifytimestamp || o || objectclass || ou || own' + + 'er || seealso")(targetfilter = "(objectclass=posixgroup)")(version 3.0;acl' + + '"permission:System: Read Groups";allow (compare,re' + + 'ad,search) userdn = "ldap:///anyone";)' + ), + ( + '(targetattr = "member || memberof || memberuid")(targetfilter = '+ + '"(objectclass=posixgroup)")(version 3.0;acl' + + '"permission:System: Read Group Membership";allow (compare,read' + + ',search) userdn = "ldap:///all";)' + ) +] + + +def test_deref_and_access_control(topo): + """Test that the deref plugin honors access control rules correctly + + The setup mimics a generic IPA DIT with its ACI's. The userpassword + attribute should not be returned + + :id: bedb6af2-b765-479d-808c-df0348e0ec95 + :setup: Standalone Instance + :steps: + 1. Create container entries with aci's + 2. Perform deref search and make sure userpassword is not returned + :expectedresults: + 1. Success + 2. Success + """ + + topo.standalone.config.set('nsslapd-schemacheck', 'off') + if DEBUGGING: + topo.standalone.config.enable_log('audit') + topo.standalone.config.set('nsslapd-errorlog-level', '128') + + # Accounts + ou1 = OrganizationalUnits(topo.standalone, DEFAULT_SUFFIX) + ou1.create(properties={ + 'ou': 'accounts', + 'aci': ACCTS_ACI + }) + + # Users + ou2 = OrganizationalUnits(topo.standalone, ACCTS_DN) + ou2.create(properties={ + 'ou': 'users', + 'aci': USERS_ACI + }) + + # Groups + ou3 = OrganizationalUnits(topo.standalone, ACCTS_DN) + ou3.create(properties={ + 'ou': 'groups', + 'aci': GROUPS_ACIS + }) + + # Create User + users = UserAccounts(topo.standalone, USERS_DN, rdn=None) + user_props = TEST_USER_PROPERTIES.copy() + user_props.update( + { + 'uid': 'user', + 'objectclass': ['posixAccount', 'extensibleObject'], + 'userpassword': PASSWORD + } + ) + user = users.create(properties=user_props) + + # Create Admin user + user_props = TEST_USER_PROPERTIES.copy() + user_props.update( + { + 'uid': 'admin', + 'objectclass': ['posixAccount', 'extensibleObject', 'inetuser'], + 'userpassword': PASSWORD, + 'memberOf': ADMIN_GROUP_DN + } + ) + users.create(properties=user_props) + + # Create Admin group + groups = Groups(topo.standalone, GROUPS_DN, rdn=None) + group_props = { + 'cn': 'admins', + 'gidNumber': '123', + 'objectclass': ['posixGroup', 'extensibleObject'], + 'member': ADMIN_DN + } + groups.create(properties=group_props) + + # Bind as user, then perform deref search on admin user + user.rebind(PASSWORD) + result, control_response = topo.standalone.dereference( + 'member:cn,userpassword', + base=ADMIN_GROUP_DN, + scope=ldap.SCOPE_BASE) + + log.info('Check, that the dereference search result does not have userpassword') + assert result[0][2][0].entry[0]['attrVals'][0]['type'] != 'userpassword' + + +if __name__ == '__main__': + # Run isolated + # -s for DEBUG mode + CURRENT_FILE = os.path.realpath(__file__) + pytest.main(["-s", CURRENT_FILE]) diff --git a/dirsrvtests/tests/suites/plugins/dna_test.py b/dirsrvtests/tests/suites/plugins/dna_test.py new file mode 100644 index 0000000..f6b1f00 --- /dev/null +++ b/dirsrvtests/tests/suites/plugins/dna_test.py @@ -0,0 +1,86 @@ +# --- BEGIN COPYRIGHT BLOCK --- +# Copyright (C) 2019 Red Hat, Inc. +# All rights reserved. +# +# License: GPL (version 3 or any later version). +# See LICENSE for details. +# --- END COPYRIGHT BLOCK --- +# +"""Test DNA plugin functionality""" + +import logging +import pytest +from lib389._constants import DEFAULT_SUFFIX +from lib389.plugins import DNAPlugin, DNAPluginSharedConfigs, DNAPluginConfigs +from lib389.idm.organizationalunit import OrganizationalUnits +from lib389.idm.user import UserAccounts +from lib389.topologies import topology_st +import ldap + +pytestmark = pytest.mark.tier1 + +log = logging.getLogger(__name__) + + +@pytest.mark.ds47937 +def test_dnatype_only_valid(topology_st): + """Test that DNA plugin only accepts valid attributes for "dnaType" + + :id: 0878ecff-5fdc-47d7-8c8f-edf4556f9746 + :setup: Standalone Instance + :steps: + 1. Create a use entry + 2. Create DNA shared config entry container + 3. Create DNA shared config entry + 4. Add DNA plugin config entry + 5. Enable DNA plugin + 6. Restart the instance + 7. Replace dnaType with invalid value + :expectedresults: + 1. Successful + 2. Successful + 3. Successful + 4. Successful + 5. Successful + 6. Successful + 7. Unwilling to perform exception should be raised + """ + + inst = topology_st.standalone + plugin = DNAPlugin(inst) + + log.info("Creating an entry...") + users = UserAccounts(inst, DEFAULT_SUFFIX) + users.create_test_user(uid=1) + + log.info("Creating \"ou=ranges\"...") + ous = OrganizationalUnits(inst, DEFAULT_SUFFIX) + ou_ranges = ous.create(properties={'ou': 'ranges'}) + ou_people = ous.get("People") + + log.info("Creating DNA shared config entry...") + shared_configs = DNAPluginSharedConfigs(inst, ou_ranges.dn) + shared_configs.create(properties={'dnaHostname': str(inst.host), + 'dnaPortNum': str(inst.port), + 'dnaRemainingValues': '9501'}) + + log.info("Add dna plugin config entry...") + configs = DNAPluginConfigs(inst, plugin.dn) + config = configs.create(properties={'cn': 'dna config', + 'dnaType': 'description', + 'dnaMaxValue': '10000', + 'dnaMagicRegen': '0', + 'dnaFilter': '(objectclass=top)', + 'dnaScope': ou_people.dn, + 'dnaNextValue': '500', + 'dnaSharedCfgDN': ou_ranges.dn}) + + log.info("Enable the DNA plugin...") + plugin.enable() + + log.info("Restarting the server...") + inst.restart() + + log.info("Apply an invalid attribute to the DNA config(dnaType: foo)...") + with pytest.raises(ldap.UNWILLING_TO_PERFORM): + config.replace('dnaType', 'foo') diff --git a/dirsrvtests/tests/suites/plugins/memberof_test.py b/dirsrvtests/tests/suites/plugins/memberof_test.py new file mode 100644 index 0000000..bc99eef --- /dev/null +++ b/dirsrvtests/tests/suites/plugins/memberof_test.py @@ -0,0 +1,2827 @@ +# --- BEGIN COPYRIGHT BLOCK --- +# Copyright (C) 2016 Red Hat, Inc. +# All rights reserved. +# +# License: GPL (version 3 or any later version). +# See LICENSE for details. +# --- END COPYRIGHT BLOCK --- +# +import pytest +from lib389.tasks import * +from lib389.utils import * +from lib389.topologies import topology_st +from lib389._constants import PLUGIN_MEMBER_OF, SUFFIX + +pytestmark = pytest.mark.tier1 + +DEBUGGING = os.getenv('DEBUGGING', False) + +logging.getLogger(__name__).setLevel(logging.DEBUG) +log = logging.getLogger(__name__) + +MEMBEROF_PLUGIN_DN = ('cn=' + PLUGIN_MEMBER_OF + ',cn=plugins,cn=config') +USER1_DN = 'uid=user1,' + DEFAULT_SUFFIX +USER2_DN = 'uid=user2,' + DEFAULT_SUFFIX +GROUP_DN = 'cn=group,' + DEFAULT_SUFFIX + +PLUGIN_TYPE = 'nsslapd-pluginType' +PLUGIN_MEMBEROF_GRP_ATTR = 'memberofgroupattr' +PLUGIN_ENABLED = 'nsslapd-pluginEnabled' + +USER_RDN = "user" +USERS_CONTAINER = "ou=people,%s" % SUFFIX + +GROUP_RDN = "group" +GROUPS_CONTAINER = "ou=groups,%s" % SUFFIX + + +def _set_memberofgroupattr_add(topology_st, values): + topology_st.standalone.modify_s(MEMBEROF_PLUGIN_DN, + [(ldap.MOD_ADD, + PLUGIN_MEMBEROF_GRP_ATTR, + ensure_bytes(values))]) + + +def _get_user_rdn(ext): + return ensure_bytes("uid=%s_%s" % (USER_RDN, ext)) + + +def _get_user_dn(ext): + return ensure_bytes("%s,%s" % (ensure_str(_get_user_rdn(ext)), USERS_CONTAINER)) + + +def _get_group_rdn(ext): + return ensure_bytes("cn=%s_%s" % (GROUP_RDN, ext)) + + +def _get_group_dn(ext): + return ensure_bytes("%s,%s" % (ensure_str(_get_group_rdn(ext)), GROUPS_CONTAINER)) + + +def _create_user(topology_st, ext): + user_dn = ensure_str(_get_user_dn(ext)) + topology_st.standalone.add_s(Entry((user_dn, { + 'objectclass': 'top extensibleObject'.split(), + 'uid': ensure_str(_get_user_rdn(ext)) + }))) + log.info("Create user %s" % user_dn) + return ensure_bytes(user_dn) + + +def _delete_user(topology_st, ext): + user_dn = ensure_str(_get_user_dn(ext)) + topology_st.standalone.delete_s(user_dn) + log.info("Delete user %s" % user_dn) + + +def _create_group(topology_st, ext): + group_dn = ensure_str(_get_group_dn(ext)) + topology_st.standalone.add_s(Entry((group_dn, { + 'objectclass': 'top groupOfNames groupOfUniqueNames extensibleObject'.split(), + 'ou': ensure_str(_get_group_rdn(ext)) + }))) + log.info("Create group %s" % group_dn) + return ensure_bytes(group_dn) + + +def _delete_group(topology_st, ext): + group_dn = ensure_str(_get_group_dn(ext)) + topology_st.standalone.delete_s(group_dn) + log.info("Delete group %s" % group_dn) + + +def _check_memberattr(topology_st, entry, memberattr, value): + log.info("Check %s.%s = %s" % (entry, memberattr, value)) + entry = topology_st.standalone.getEntry(ensure_str(entry), ldap.SCOPE_BASE, '(objectclass=*)', [memberattr]) + if not entry.hasAttr(ensure_str(memberattr)): + return False + + found = False + for val in entry.getValues(ensure_str(memberattr)): + log.info("%s: %s" % (memberattr, ensure_str(val))) + if ensure_str(value.lower()) == ensure_str(val.lower()): + found = True + break + return found + + +def _check_memberof(topology_st, member, group): + log.info("Lookup memberof from %s" % member) + entry = topology_st.standalone.getEntry(ensure_str(member), ldap.SCOPE_BASE, '(objectclass=*)', ['memberof']) + if not entry.hasAttr('memberof'): + return False + + found = False + for val in entry.getValues('memberof'): + log.info("memberof: %s" % ensure_str(val)) + if ensure_str(group.lower()) == ensure_str(val.lower()): + found = True + log.info("--> membership verified") + break + return found + + +def test_betxnpostoperation_replace(topology_st): + """Test modify the memberof plugin operation to use the new type + + :id: d222af17-17a6-48a0-8f22-a38306726a91 + :setup: Standalone instance + :steps: + 1. Set plugin type to betxnpostoperation + 2. Check is was changed + :expectedresults: + 1. Success + 2. Success + """ + + topology_st.standalone.modify_s(MEMBEROF_PLUGIN_DN, + [(ldap.MOD_REPLACE, + PLUGIN_TYPE, + b'betxnpostoperation')]) + topology_st.standalone.restart() + ent = topology_st.standalone.getEntry(MEMBEROF_PLUGIN_DN, ldap.SCOPE_BASE, "(objectclass=*)", [PLUGIN_TYPE]) + assert ent.hasAttr(PLUGIN_TYPE) + assert ent.getValue(PLUGIN_TYPE) == b'betxnpostoperation' + + +def test_memberofgroupattr_add(topology_st): + """Check multiple grouping attributes supported + + :id: d222af17-17a6-48a0-8f22-a38306726a92 + :setup: Standalone instance + :steps: + 1. Add memberofgroupattr - 'uniqueMember' + 2. Check we have 'uniqueMember' and 'member' values + :expectedresults: + 1. Success + 2. Success + """ + + _set_memberofgroupattr_add(topology_st, 'uniqueMember') + ent = topology_st.standalone.getEntry(MEMBEROF_PLUGIN_DN, ldap.SCOPE_BASE, "(objectclass=*)", + [PLUGIN_MEMBEROF_GRP_ATTR]) + assert ent.hasAttr(PLUGIN_MEMBEROF_GRP_ATTR) + assert b'member'.lower() in [x.lower() for x in ent.getValues(PLUGIN_MEMBEROF_GRP_ATTR)] + assert b'uniqueMember'.lower() in [x.lower() for x in ent.getValues(PLUGIN_MEMBEROF_GRP_ATTR)] + + +def test_enable(topology_st): + """Check the plug-in is started + + :id: d222af17-17a6-48a0-8f22-a38306726a93 + :setup: Standalone instance + :steps: + 1. Enable the plugin + 2. Restart the instance + :expectedresults: + 1. Success + 2. Server should start and plugin should be on + """ + + log.info("Enable MemberOf plugin") + topology_st.standalone.plugins.enable(name=PLUGIN_MEMBER_OF) + topology_st.standalone.restart() + ent = topology_st.standalone.getEntry(MEMBEROF_PLUGIN_DN, ldap.SCOPE_BASE, "(objectclass=*)", [PLUGIN_ENABLED]) + assert ent.hasAttr(PLUGIN_ENABLED) + assert ent.getValue(PLUGIN_ENABLED).lower() == b'on' + + +def test_member_add(topology_st): + """MemberOf attribute should be successfully added to both the users + + :id: d222af17-17a6-48a0-8f22-a38306726a94 + :setup: Standalone instance + :steps: + 1. Create user and groups + 2. Add the users as members to the groups + 3. Check the membership + :expectedresults: + 1. Success + 2. Success + 3. Success + """ + + memofenh1 = _create_user(topology_st, 'memofenh1') + memofenh2 = _create_user(topology_st, 'memofenh2') + + memofegrp1 = _create_group(topology_st, 'memofegrp1') + memofegrp2 = _create_group(topology_st, 'memofegrp2') + + mods = [(ldap.MOD_ADD, 'member', memofenh1), (ldap.MOD_ADD, 'uniqueMember', memofenh2)] + log.info("Update %s is memberof %s (member)" % (memofenh1, memofegrp1)) + log.info("Update %s is memberof %s (uniqueMember)" % (memofenh2, memofegrp1)) + topology_st.standalone.modify_s(ensure_str(memofegrp1), mods) + + log.info("Update %s is memberof %s (member)" % (memofenh1, memofegrp2)) + log.info("Update %s is memberof %s (uniqueMember)" % (memofenh2, memofegrp2)) + topology_st.standalone.modify_s(ensure_str(memofegrp2), mods) + + # assert enh1 is member of grp1 and grp2 + assert _check_memberof(topology_st, member=memofenh1, group=memofegrp1) + assert _check_memberof(topology_st, member=memofenh1, group=memofegrp2) + + # assert enh2 is member of grp1 and grp2 + assert _check_memberof(topology_st, member=memofenh2, group=memofegrp1) + assert _check_memberof(topology_st, member=memofenh2, group=memofegrp2) + + +def test_member_delete_gr1(topology_st): + """Partial removal of memberofgroupattr: removing member attribute from Group1 + + :id: d222af17-17a6-48a0-8f22-a38306726a95 + :setup: Standalone instance + :steps: + 1. Delete a member: enh1 in grp1 + 2. Check the states of the members were changed accordingly + :expectedresults: + 1. Success + 2. Success + """ + + memofenh1 = _get_user_dn('memofenh1') + memofenh2 = _get_user_dn('memofenh2') + + memofegrp1 = _get_group_dn('memofegrp1') + memofegrp2 = _get_group_dn('memofegrp2') + log.info("Update %s is no longer memberof %s (member)" % (memofenh1, memofegrp1)) + mods = [(ldap.MOD_DELETE, 'member', memofenh1)] + topology_st.standalone.modify_s(ensure_str(memofegrp1), mods) + + # assert enh1 is NOT member of grp1 and is member of grp2 + assert not _check_memberof(topology_st, member=memofenh1, group=memofegrp1) + assert _check_memberof(topology_st, member=memofenh1, group=memofegrp2) + + # assert enh2 is member of grp1 and is member of grp2 + assert _check_memberof(topology_st, member=memofenh2, group=memofegrp1) + assert _check_memberof(topology_st, member=memofenh2, group=memofegrp2) + + +def test_member_delete_gr2(topology_st): + """Partial removal of memberofgroupattr: removing uniqueMember attribute from Group2 + + :id: d222af17-17a6-48a0-8f22-a38306726a96 + :setup: Standalone instance + :steps: + 1. Delete a uniqueMember: enh2 in grp2 + 2. Check the states of the members were changed accordingly + :expectedresults: + 1. Success + 2. Success + """ + + memofenh1 = _get_user_dn('memofenh1') + memofenh2 = _get_user_dn('memofenh2') + + memofegrp1 = _get_group_dn('memofegrp1') + memofegrp2 = _get_group_dn('memofegrp2') + + log.info("Update %s is no longer memberof %s (uniqueMember)" % (memofenh1, memofegrp1)) + mods = [(ldap.MOD_DELETE, 'uniqueMember', memofenh2)] + topology_st.standalone.modify_s(ensure_str(memofegrp2), mods) + + # assert enh1 is NOT member of grp1 and is member of grp2 + assert not _check_memberof(topology_st, member=memofenh1, group=memofegrp1) + assert _check_memberof(topology_st, member=memofenh1, group=memofegrp2) + + # assert enh2 is member of grp1 and is NOT member of grp2 + assert _check_memberof(topology_st, member=memofenh2, group=memofegrp1) + assert not _check_memberof(topology_st, member=memofenh2, group=memofegrp2) + + +def test_member_delete_all(topology_st): + """Complete removal of memberofgroupattr + + :id: d222af17-17a6-48a0-8f22-a38306726a97 + :setup: Standalone instance + :steps: + 1. Delete the rest of the members + 2. Check the states of the members were changed accordingly + :expectedresults: + 1. Success + 2. Success + """ + + memofenh1 = _get_user_dn('memofenh1') + memofenh2 = _get_user_dn('memofenh2') + + memofegrp1 = _get_group_dn('memofegrp1') + memofegrp2 = _get_group_dn('memofegrp2') + + log.info("Update %s is no longer memberof %s (uniqueMember)" % (memofenh2, memofegrp1)) + mods = [(ldap.MOD_DELETE, 'uniqueMember', memofenh2)] + topology_st.standalone.modify_s(ensure_str(memofegrp1), mods) + + log.info("Update %s is no longer memberof %s (member)" % (memofenh1, memofegrp2)) + mods = [(ldap.MOD_DELETE, 'member', memofenh1)] + topology_st.standalone.modify_s(ensure_str(memofegrp2), mods) + + # assert enh1 is NOT member of grp1 and is member of grp2 + assert not _check_memberof(topology_st, member=memofenh1, group=memofegrp1) + assert not _check_memberof(topology_st, member=memofenh1, group=memofegrp2) + + # assert enh2 is member of grp1 and is NOT member of grp2 + assert not _check_memberof(topology_st, member=memofenh2, group=memofegrp1) + assert not _check_memberof(topology_st, member=memofenh2, group=memofegrp2) + + +def test_member_after_restart(topology_st): + """MemberOf attribute should be present on both the users + + :id: d222af17-17a6-48a0-8f22-a38306726a98 + :setup: Standalone instance + :steps: + 1. Add a couple of members to the groups + 2. Restart the instance + 3. Check the states of the members were changed accordingly + :expectedresults: + 1. Success + 2. Success + 3. Success + """ + + memofenh1 = _get_user_dn('memofenh1') + memofenh2 = _get_user_dn('memofenh2') + + memofegrp1 = _get_group_dn('memofegrp1') + memofegrp2 = _get_group_dn('memofegrp2') + + mods = [(ldap.MOD_ADD, 'member', memofenh1)] + log.info("Update %s is memberof %s (member)" % (memofenh1, memofegrp1)) + topology_st.standalone.modify_s(ensure_str(memofegrp1), mods) + + mods = [(ldap.MOD_ADD, 'uniqueMember', memofenh2)] + log.info("Update %s is memberof %s (uniqueMember)" % (memofenh2, memofegrp2)) + topology_st.standalone.modify_s(ensure_str(memofegrp2), mods) + + # assert enh1 is member of grp1 and is NOT member of grp2 + assert _check_memberof(topology_st, member=memofenh1, group=memofegrp1) + assert not _check_memberof(topology_st, member=memofenh1, group=memofegrp2) + + # assert enh2 is NOT member of grp1 and is member of grp2 + assert not _check_memberof(topology_st, member=memofenh2, group=memofegrp1) + assert _check_memberof(topology_st, member=memofenh2, group=memofegrp2) + + log.info("Remove uniqueMember as a memberofgrpattr") + topology_st.standalone.modify_s(MEMBEROF_PLUGIN_DN, + [(ldap.MOD_DELETE, + PLUGIN_MEMBEROF_GRP_ATTR, + [b'uniqueMember'])]) + topology_st.standalone.restart() + + log.info("Assert that this change of configuration did change the already set values") + # assert enh1 is member of grp1 and is NOT member of grp2 + assert _check_memberof(topology_st, member=memofenh1, group=memofegrp1) + assert not _check_memberof(topology_st, member=memofenh1, group=memofegrp2) + + # assert enh2 is NOT member of grp1 and is member of grp2 + assert not _check_memberof(topology_st, member=memofenh2, group=memofegrp1) + assert _check_memberof(topology_st, member=memofenh2, group=memofegrp2) + + _set_memberofgroupattr_add(topology_st, 'uniqueMember') + topology_st.standalone.restart() + + +def test_memberofgroupattr_uid(topology_st): + """MemberOf attribute should not be added to the user since memberuid is not a DN syntax attribute + + :id: d222af17-17a6-48a0-8f22-a38306726a99 + :setup: Standalone instance + :steps: + 1. Try to add memberUid to the group + :expectedresults: + 1. It should fail with Unwilling to perform error + """ + + try: + _set_memberofgroupattr_add(topology_st, 'memberUid') + log.error("Setting 'memberUid' as memberofgroupattr should be rejected") + assert False + except ldap.UNWILLING_TO_PERFORM: + log.error("Setting 'memberUid' as memberofgroupattr is rejected (expected)") + assert True + + +def test_member_add_duplicate_usr1(topology_st): + """Duplicate member attribute to groups + + :id: d222af17-17a6-48a0-8f22-a38306726a10 + :setup: Standalone instance + :steps: + 1. Try to add a member: enh1 which already exists + :expectedresults: + 1. It should fail with Type of value exists error + """ + + memofenh1 = _get_user_dn('memofenh1') + memofegrp1 = _get_group_dn('memofegrp1') + + # assert enh1 is member of grp1 + assert _check_memberof(topology_st, member=memofenh1, group=memofegrp1) + + mods = [(ldap.MOD_ADD, 'member', memofenh1)] + log.info("Try %s is memberof %s (member)" % (memofenh1, memofegrp1)) + try: + topology_st.standalone.modify_s(ensure_str(memofegrp1), mods) + log.error( + "Should not be allowed to add %s member of %s (because it was already member)" % (memofenh1, memofegrp1)) + assert False + except ldap.TYPE_OR_VALUE_EXISTS: + log.error("%s already member of %s --> fail (expected)" % (memofenh1, memofegrp1)) + assert True + + +def test_member_add_duplicate_usr2(topology_st): + """Duplicate uniqueMember attributes to groups + + :id: d222af17-17a6-48a0-8f22-a38306726a11 + :setup: Standalone instance + :steps: + 1. Try to add a uniqueMember: enh2 which already exists + :expectedresults: + 1. It should fail with Type of value exists error + """ + + memofenh1 = _get_user_dn('memofenh1') + memofenh2 = _get_user_dn('memofenh2') + + memofegrp1 = _get_group_dn('memofegrp1') + memofegrp2 = _get_group_dn('memofegrp2') + + log.info("Check initial status") + # assert enh1 is member of grp1 and is NOT member of grp2 + assert _check_memberof(topology_st, member=memofenh1, group=memofegrp1) + assert not _check_memberof(topology_st, member=memofenh1, group=memofegrp2) + + # assert enh2 is NOT member of grp1 and is member of grp2 + assert not _check_memberof(topology_st, member=memofenh2, group=memofegrp1) + assert _check_memberof(topology_st, member=memofenh2, group=memofegrp2) + + mods = [(ldap.MOD_ADD, 'uniqueMember', memofenh2)] + log.info("Try %s is memberof %s (member)" % (memofenh2, memofegrp2)) + try: + topology_st.standalone.modify_s(ensure_str(memofegrp2), mods) + log.error( + "Should not be allowed to add %s member of %s (because it was already member)" % (memofenh2, memofegrp2)) + assert False + except ldap.TYPE_OR_VALUE_EXISTS: + log.error("%s already member of %s --> fail (expected)" % (memofenh2, memofegrp2)) + assert True + + log.info("Check final status") + # assert enh1 is member of grp1 and is NOT member of grp2 + assert _check_memberof(topology_st, member=memofenh1, group=memofegrp1) + assert not _check_memberof(topology_st, member=memofenh1, group=memofegrp2) + + # assert enh2 is NOT member of grp1 and is member of grp2 + assert not _check_memberof(topology_st, member=memofenh2, group=memofegrp1) + assert _check_memberof(topology_st, member=memofenh2, group=memofegrp2) + + +#def test_memberof_MultiGrpAttr_012(topology_st): +# """ +# MemberURL attritbute should reflect the modrdn changes in the group. +# +# This test has been covered in MODRDN test suite +# +# At the beginning: +# memofenh1 is memberof memofegrp1 +# memofenh2 is memberof memofegrp2 +# +# At the end +# memofenh1 is memberof memofegrp1 +# memofenh2 is memberof memofegrp2 +# """ +# pass + + +#def test_memberof_MultiGrpAttr_013(topology_st): +# """ +# MemberURL attritbute should reflect the modrdn changes in the group. +# +# This test has been covered in MODRDN test suite +# +# At the beginning: +# memofenh1 is memberof memofegrp1 +# memofenh2 is memberof memofegrp2 +# +# At the end +# memofenh1 is memberof memofegrp1 +# memofenh2 is memberof memofegrp2 +# """ +# pass + + +def test_member_uniquemember_same_user(topology_st): + """Check the situation when both member and uniqueMember + pointing to the same user + + :id: d222af17-17a6-48a0-8f22-a38306726a13 + :setup: Standalone instance, grp3, + enh1 is member of + - grp1 (member) + - not grp2 + enh2 is member of + - not grp1 + - grp2 (uniquemember) + :steps: + 1. Add member: enh1 and uniqueMember: enh1 to grp3 + 2. Assert enh1 is member of + - grp1 (member) + - not grp2 + - grp3 (member uniquemember) + 3. Delete member: enh1 from grp3 + 4. Add member: enh2 to grp3 + 5. Assert enh1 is member of + - grp1 (member) + - not grp2 + - grp3 (uniquemember) + 6. Assert enh2 is member of + - not grp1 + - grp2 (uniquemember) + - grp3 (member) + :expectedresults: + 1. Success + 2. Success + 3. Success + 4. Success + 5. Success + 6. Success + """ + + memofenh1 = _get_user_dn('memofenh1') + memofenh2 = _get_user_dn('memofenh2') + + memofegrp1 = _get_group_dn('memofegrp1') + memofegrp2 = _get_group_dn('memofegrp2') + + log.info("Check initial status") + # assert enh1 is member of grp1 and is NOT member of grp2 + assert _check_memberof(topology_st, member=memofenh1, group=memofegrp1) + assert not _check_memberof(topology_st, member=memofenh1, group=memofegrp2) + + # assert enh2 is NOT member of grp1 and is member of grp2 + assert not _check_memberof(topology_st, member=memofenh2, group=memofegrp1) + assert _check_memberof(topology_st, member=memofenh2, group=memofegrp2) + + memofegrp3 = _create_group(topology_st, 'memofegrp3') + + mods = [(ldap.MOD_ADD, 'member', memofenh1), (ldap.MOD_ADD, 'uniqueMember', memofenh1)] + log.info("Update %s is memberof %s (member)" % (memofenh1, memofegrp3)) + log.info("Update %s is memberof %s (uniqueMember)" % (memofenh1, memofegrp3)) + topology_st.standalone.modify_s(ensure_str(memofegrp3), mods) + + # assert enh1 is member of + # - grp1 (member) + # - not grp2 + # - grp3 (member uniquemember) + assert _check_memberof(topology_st, member=memofenh1, group=memofegrp1) + assert not _check_memberof(topology_st, member=memofenh1, group=memofegrp2) + assert _check_memberof(topology_st, member=memofenh1, group=memofegrp3) + + mods = [(ldap.MOD_DELETE, 'member', memofenh1)] + log.info("Update %s is not memberof %s (member)" % (memofenh1, memofegrp3)) + topology_st.standalone.modify_s(ensure_str(memofegrp3), mods) + + mods = [(ldap.MOD_ADD, 'member', memofenh2)] + log.info("Update %s is memberof %s (member)" % (memofenh2, memofegrp3)) + topology_st.standalone.modify_s(ensure_str(memofegrp3), mods) + + # assert enh1 is member of + # - grp1 (member) + # - not grp2 + # - grp3 (uniquemember) + assert _check_memberof(topology_st, member=memofenh1, group=memofegrp1) + assert not _check_memberof(topology_st, member=memofenh1, group=memofegrp2) + assert _check_memberof(topology_st, member=memofenh1, group=memofegrp3) + + # assert enh2 is member of + # - not grp1 + # - grp2 (uniquemember) + # - grp3 (member) + assert not _check_memberof(topology_st, member=memofenh2, group=memofegrp1) + assert _check_memberof(topology_st, member=memofenh2, group=memofegrp2) + assert _check_memberof(topology_st, member=memofenh2, group=memofegrp3) + + ent = topology_st.standalone.getEntry(ensure_str(memofegrp3), ldap.SCOPE_BASE, "(objectclass=*)", ['member', 'uniqueMember']) + assert ent.hasAttr('member') + assert ensure_bytes(memofenh1) not in ent.getValues('member') + assert ensure_bytes(memofenh2) in ent.getValues('member') + assert ent.hasAttr('uniqueMember') + assert ensure_bytes(memofenh1) in ent.getValues('uniqueMember') + assert ensure_bytes(memofenh2) not in ent.getValues('uniqueMember') + + log.info("Checking final status") + # assert enh1 is member of + # - grp1 (member) + # - not grp2 + # - grp3 (uniquemember) + assert _check_memberof(topology_st, member=memofenh1, group=memofegrp1) + assert not _check_memberof(topology_st, member=memofenh1, group=memofegrp2) + assert _check_memberof(topology_st, member=memofenh1, group=memofegrp3) + + # assert enh2 is member of + # - not grp1 + # - grp2 (uniquemember) + # - grp3 (member) + assert not _check_memberof(topology_st, member=memofenh2, group=memofegrp1) + assert _check_memberof(topology_st, member=memofenh2, group=memofegrp2) + assert _check_memberof(topology_st, member=memofenh2, group=memofegrp3) + + +def test_member_not_exists(topology_st): + """Check the situation when we add non-existing users to member attribute + + :id: d222af17-17a6-48a0-8f22-a38306726a14 + :setup: Standalone instance, grp015, + enh1 is member of + - grp1 (member) + - not grp2 + - grp3 (uniquemember) + enh2 is member of + - not grp1 + - grp2 (uniquemember) + - grp3 (member) + :steps: + 1. Add member: dummy1 and uniqueMember: dummy2 to grp015 + 2. Assert enh1 is member of + - grp1 (member) + - not grp2 + - grp3 (uniquemember) + - not grp015 + 3. Assert enh2 is member of + - not grp1 + - grp2 (uniquemember) + - grp3 (member) + - not grp015 + :expectedresults: + 1. Success + 2. Success + 3. Success + """ + + memofenh1 = _get_user_dn('memofenh1') + memofenh2 = _get_user_dn('memofenh2') + dummy1 = _get_user_dn('dummy1') + dummy2 = _get_user_dn('dummy2') + + memofegrp1 = _get_group_dn('memofegrp1') + memofegrp2 = _get_group_dn('memofegrp2') + memofegrp3 = _get_group_dn('memofegrp3') + + log.info("Checking Initial status") + # assert enh1 is member of + # - grp1 (member) + # - not grp2 + # - grp3 (uniquemember) + assert _check_memberof(topology_st, member=memofenh1, group=memofegrp1) + assert not _check_memberof(topology_st, member=memofenh1, group=memofegrp2) + assert _check_memberof(topology_st, member=memofenh1, group=memofegrp3) + + # assert enh2 is member of + # - not grp1 + # - grp2 (uniquemember) + # - grp3 (member) + assert not _check_memberof(topology_st, member=memofenh2, group=memofegrp1) + assert _check_memberof(topology_st, member=memofenh2, group=memofegrp2) + assert _check_memberof(topology_st, member=memofenh2, group=memofegrp3) + + memofegrp015 = _create_group(topology_st, 'memofegrp015') + + mods = [(ldap.MOD_ADD, 'member', dummy1), (ldap.MOD_ADD, 'uniqueMember', dummy2)] + log.info("Update %s is memberof %s (member)" % (dummy1, memofegrp015)) + log.info("Update %s is memberof %s (uniqueMember)" % (dummy2, memofegrp015)) + topology_st.standalone.modify_s(ensure_str(memofegrp015), mods) + + ent = topology_st.standalone.getEntry(ensure_str(memofegrp015), ldap.SCOPE_BASE, "(objectclass=*)", ['member', 'uniqueMember']) + assert ent.hasAttr('member') + assert ensure_bytes(dummy1) in ent.getValues('member') + assert ensure_bytes(dummy2) not in ent.getValues('member') + assert ent.hasAttr('uniqueMember') + assert ensure_bytes(dummy1) not in ent.getValues('uniqueMember') + assert ensure_bytes(dummy2) in ent.getValues('uniqueMember') + + # assert enh1 is member of + # - grp1 (member) + # - not grp2 + # - grp3 (uniquemember) + assert _check_memberof(topology_st, member=memofenh1, group=memofegrp1) + assert not _check_memberof(topology_st, member=memofenh1, group=memofegrp2) + assert _check_memberof(topology_st, member=memofenh1, group=memofegrp3) + assert not _check_memberof(topology_st, member=memofenh1, group=memofegrp015) + + # assert enh2 is member of + # - not grp1 + # - grp2 (uniquemember) + # - grp3 (member) + # + assert not _check_memberof(topology_st, member=memofenh2, group=memofegrp1) + assert _check_memberof(topology_st, member=memofenh2, group=memofegrp2) + assert _check_memberof(topology_st, member=memofenh2, group=memofegrp3) + assert not _check_memberof(topology_st, member=memofenh2, group=memofegrp015) + + +def test_member_not_exists_complex(topology_st): + """Check the situation when we modify non-existing users member attribute + + :id: d222af17-17a6-48a0-8f22-a38306726a15 + :setup: Standalone instance, + enh1 is member of + - grp1 (member) + - not grp2 + - grp3 (uniquemember) + - not grp015 + enh2 is member of + - not grp1 + - grp2 (uniquemember) + - grp3 (member) + - not grp015 + :steps: + 1. Add member: enh1 and uniqueMember: enh1 to grp016 + 2. Assert enh1 is member of + - grp1 (member) + - not grp2 + - grp3 (uniquemember) + - not grp15 + - grp16 (member uniquemember) + 3. Assert enh2 is member of + - not grp1 + - grp2 (uniquemember) + - grp3 (member) + - not grp15 + - not grp16 + 4. Add member: dummy1 and uniqueMember: dummy2 to grp016 + 5. Assert enh1 is member of + - grp1 (member) + - not grp2 + - grp3 (uniquemember) + - not grp15 + - grp16 (member uniquemember) + 6. Assert enh2 is member of + - not grp1 + - grp2 (uniquemember) + - grp3 (member) + - not grp15 + - not grp16 + :expectedresults: + 1. Success + 2. Success + 3. Success + 4. Success + 5. Success + 6. Success + """ + + memofenh1 = _get_user_dn('memofenh1') + memofenh2 = _get_user_dn('memofenh2') + dummy1 = _get_user_dn('dummy1') + + memofegrp1 = _get_group_dn('memofegrp1') + memofegrp2 = _get_group_dn('memofegrp2') + memofegrp3 = _get_group_dn('memofegrp3') + memofegrp015 = _get_group_dn('memofegrp015') + + # assert enh1 is member of + # - grp1 (member) + # - not grp2 + # - grp3 (uniquemember) + # - not grp15 + assert _check_memberof(topology_st, member=memofenh1, group=memofegrp1) + assert not _check_memberof(topology_st, member=memofenh1, group=memofegrp2) + assert _check_memberof(topology_st, member=memofenh1, group=memofegrp3) + assert not _check_memberof(topology_st, member=memofenh1, group=memofegrp015) + + # assert enh2 is member of + # - not grp1 + # - grp2 (uniquemember) + # - grp3 (member) + # - not grp15 + assert not _check_memberof(topology_st, member=memofenh2, group=memofegrp1) + assert _check_memberof(topology_st, member=memofenh2, group=memofegrp2) + assert _check_memberof(topology_st, member=memofenh2, group=memofegrp3) + assert not _check_memberof(topology_st, member=memofenh2, group=memofegrp015) + + memofegrp016 = _create_group(topology_st, 'memofegrp016') + + mods = [(ldap.MOD_ADD, 'member', memofenh1), (ldap.MOD_ADD, 'uniqueMember', memofenh1)] + log.info("Update %s is memberof %s (member)" % (memofenh1, memofegrp016)) + log.info("Update %s is memberof %s (uniqueMember)" % (memofenh1, memofegrp016)) + topology_st.standalone.modify_s(ensure_str(memofegrp016), mods) + + # assert enh1 is member of + # - grp1 (member) + # - not grp2 + # - grp3 (uniquemember) + # - not grp15 + # - grp16 (member uniquemember) + assert _check_memberof(topology_st, member=memofenh1, group=memofegrp1) + assert not _check_memberof(topology_st, member=memofenh1, group=memofegrp2) + assert _check_memberof(topology_st, member=memofenh1, group=memofegrp3) + assert not _check_memberof(topology_st, member=memofenh1, group=memofegrp015) + assert _check_memberof(topology_st, member=memofenh1, group=memofegrp016) + + # assert enh2 is member of + # - not grp1 + # - grp2 (uniquemember) + # - grp3 (member) + # - not grp15 + # - not grp16 + assert not _check_memberof(topology_st, member=memofenh2, group=memofegrp1) + assert _check_memberof(topology_st, member=memofenh2, group=memofegrp2) + assert _check_memberof(topology_st, member=memofenh2, group=memofegrp3) + assert not _check_memberof(topology_st, member=memofenh2, group=memofegrp015) + assert not _check_memberof(topology_st, member=memofenh2, group=memofegrp016) + + mods = [(ldap.MOD_ADD, 'member', dummy1), ] + log.info("Update %s is memberof %s (member)" % (dummy1, memofegrp016)) + topology_st.standalone.modify_s(ensure_str(memofegrp016), mods) + + ent = topology_st.standalone.getEntry(ensure_str(memofegrp016), ldap.SCOPE_BASE, "(objectclass=*)", ['member', 'uniqueMember']) + assert ent.hasAttr('member') + assert ensure_bytes(dummy1) in ent.getValues('member') + assert ent.hasAttr('uniqueMember') + assert ensure_bytes(dummy1) not in ent.getValues('uniqueMember') + + mods = [(ldap.MOD_ADD, 'uniqueMember', dummy1), ] + log.info("Update %s is memberof %s (uniqueMember)" % (dummy1, memofegrp016)) + topology_st.standalone.modify_s(ensure_str(memofegrp016), mods) + + ent = topology_st.standalone.getEntry(ensure_str(memofegrp016), ldap.SCOPE_BASE, "(objectclass=*)", ['member', 'uniqueMember']) + assert ent.hasAttr('member') + assert ensure_bytes(dummy1) in ent.getValues('member') + assert ent.hasAttr('uniqueMember') + assert ensure_bytes(dummy1) in ent.getValues('uniqueMember') + + # assert enh1 is member of + # - grp1 (member) + # - not grp2 + # - grp3 (uniquemember) + # - not grp15 + # - grp16 (member uniquemember) + assert _check_memberof(topology_st, member=memofenh1, group=memofegrp1) + assert not _check_memberof(topology_st, member=memofenh1, group=memofegrp2) + assert _check_memberof(topology_st, member=memofenh1, group=memofegrp3) + assert not _check_memberof(topology_st, member=memofenh1, group=memofegrp015) + assert _check_memberof(topology_st, member=memofenh1, group=memofegrp016) + + # assert enh2 is member of + # - not grp1 + # - grp2 (uniquemember) + # - grp3 (member) + # - not grp15 + # - not grp16 + assert not _check_memberof(topology_st, member=memofenh2, group=memofegrp1) + assert _check_memberof(topology_st, member=memofenh2, group=memofegrp2) + assert _check_memberof(topology_st, member=memofenh2, group=memofegrp3) + assert not _check_memberof(topology_st, member=memofenh2, group=memofegrp015) + assert not _check_memberof(topology_st, member=memofenh2, group=memofegrp016) + + +def test_complex_group_scenario_1(topology_st): + """Check the situation when user1 and user2 are memberof grp017 + user2 is member of grp017 but not with a memberof attribute (memberUid) + + :id: d222af17-17a6-48a0-8f22-a38306726a16 + :setup: Standalone instance, grp017, + enh1 is member of + - grp1 (member) + - not grp2 + - grp3 (uniquemember) + - not grp015 + - grp016 (member uniquemember) + enh2 is member of + - not grp1 + - grp2 (uniquemember) + - grp3 (member) + - not grp015 + - not grp016 + :steps: + 1. Create user1 as grp17 (member) + 2. Create user2 as grp17 (uniqueMember) + 3. Create user3 as grp17 (memberuid) (not memberof attribute) + 4. Assert enh1 is member of + - grp1 (member) + - not grp2 + - grp3 (uniquemember) + - not grp15 + - grp16 (member uniquemember) + - not grp17 + 5. Assert enh2 is member of + - not grp1 + - grp2 (uniquemember) + - grp3 (member) + - not grp15 + - not grp16 + - not grp17 + 6. Assert user1 is member of + - not grp1 + - not grp2 + - not grp3 + - not grp15 + - not grp16 + - grp17 (member) + 7. Assert user2 is member of + - not grp1 + - not grp2 + - not grp3 + - not grp15 + - not grp16 + - grp17 (uniqueMember) + 8. Assert user3 is member of + - not grp1 + - not grp2 + - not grp3 + - not grp15 + - not grp16 + - NOT grp17 (memberuid) + :expectedresults: + 1. Success + 2. Success + 3. Success + 4. Success + 5. Success + 6. Success + 7. Success + 8. Success + """ + + memofenh1 = _get_user_dn('memofenh1') + memofenh2 = _get_user_dn('memofenh2') + + memofegrp1 = _get_group_dn('memofegrp1') + memofegrp2 = _get_group_dn('memofegrp2') + memofegrp3 = _get_group_dn('memofegrp3') + memofegrp015 = _get_group_dn('memofegrp015') + memofegrp016 = _get_group_dn('memofegrp016') + + # assert enh1 is member of + # - grp1 (member) + # - not grp2 + # - grp3 (uniquemember) + # - not grp15 + # - grp16 (member uniquemember) + assert _check_memberof(topology_st, member=memofenh1, group=memofegrp1) + assert not _check_memberof(topology_st, member=memofenh1, group=memofegrp2) + assert _check_memberof(topology_st, member=memofenh1, group=memofegrp3) + assert not _check_memberof(topology_st, member=memofenh1, group=memofegrp015) + assert _check_memberof(topology_st, member=memofenh1, group=memofegrp016) + + # assert enh2 is member of + # - not grp1 + # - grp2 (uniquemember) + # - grp3 (member) + # - not grp15 + # - not grp16 + assert not _check_memberof(topology_st, member=memofenh2, group=memofegrp1) + assert _check_memberof(topology_st, member=memofenh2, group=memofegrp2) + assert _check_memberof(topology_st, member=memofenh2, group=memofegrp3) + assert not _check_memberof(topology_st, member=memofenh2, group=memofegrp015) + assert not _check_memberof(topology_st, member=memofenh2, group=memofegrp016) + + # + # create user1 + # - not grp1 + # - not grp2 + # - not grp3 + # - not grp15 + # - not grp16 + # - grp17 (member) + # + # create user2 + # - not grp1 + # - not grp2 + # - not grp3 + # - not grp15 + # - not grp16 + # - grp17 (uniqueMember) + # + # create user3 + # - not grp1 + # - not grp2 + # - not grp3 + # - not grp15 + # - not grp16 + # - grp17 (memberuid) (not memberof attribute) + memofuser1 = _create_user(topology_st, 'memofuser1') + memofuser2 = _create_user(topology_st, 'memofuser2') + memofuser3 = _create_user(topology_st, 'memofuser3') + memofegrp017 = _create_group(topology_st, 'memofegrp017') + + mods = [(ldap.MOD_ADD, 'member', memofuser1), (ldap.MOD_ADD, 'uniqueMember', memofuser2), + (ldap.MOD_ADD, 'memberuid', memofuser3)] + log.info("Update %s is memberof %s (member)" % (memofuser1, memofegrp017)) + log.info("Update %s is memberof %s (uniqueMember)" % (memofuser2, memofegrp017)) + log.info("Update %s is memberof %s (memberuid)" % (memofuser3, memofegrp017)) + topology_st.standalone.modify_s(ensure_str(memofegrp017), mods) + + # assert enh1 is member of + # - grp1 (member) + # - not grp2 + # - grp3 (uniquemember) + # - not grp15 + # - grp16 (member uniquemember) + # - not grp17 + assert _check_memberof(topology_st, member=memofenh1, group=memofegrp1) + assert not _check_memberof(topology_st, member=memofenh1, group=memofegrp2) + assert _check_memberof(topology_st, member=memofenh1, group=memofegrp3) + assert not _check_memberof(topology_st, member=memofenh1, group=memofegrp015) + assert _check_memberof(topology_st, member=memofenh1, group=memofegrp016) + assert not _check_memberof(topology_st, member=memofenh2, group=memofegrp017) + + # assert enh2 is member of + # - not grp1 + # - grp2 (uniquemember) + # - grp3 (member) + # - not grp15 + # - not grp16 + # - not grp17 + assert not _check_memberof(topology_st, member=memofenh2, group=memofegrp1) + assert _check_memberof(topology_st, member=memofenh2, group=memofegrp2) + assert _check_memberof(topology_st, member=memofenh2, group=memofegrp3) + assert not _check_memberof(topology_st, member=memofenh2, group=memofegrp015) + assert not _check_memberof(topology_st, member=memofenh2, group=memofegrp016) + assert not _check_memberof(topology_st, member=memofenh2, group=memofegrp017) + + # assert user1 is member of + # - not grp1 + # - not grp2 + # - not grp3 + # - not grp15 + # - not grp16 + # - grp17 (member) + assert not _check_memberof(topology_st, member=memofuser1, group=memofegrp1) + assert not _check_memberof(topology_st, member=memofuser1, group=memofegrp2) + assert not _check_memberof(topology_st, member=memofuser1, group=memofegrp3) + assert not _check_memberof(topology_st, member=memofuser1, group=memofegrp015) + assert not _check_memberof(topology_st, member=memofuser1, group=memofegrp016) + assert _check_memberof(topology_st, member=memofuser1, group=memofegrp017) + + # assert user2 is member of + # - not grp1 + # - not grp2 + # - not grp3 + # - not grp15 + # - not grp16 + # - grp17 (uniqueMember) + assert not _check_memberof(topology_st, member=memofuser2, group=memofegrp1) + assert not _check_memberof(topology_st, member=memofuser2, group=memofegrp2) + assert not _check_memberof(topology_st, member=memofuser2, group=memofegrp3) + assert not _check_memberof(topology_st, member=memofuser2, group=memofegrp015) + assert not _check_memberof(topology_st, member=memofuser2, group=memofegrp016) + assert _check_memberof(topology_st, member=memofuser2, group=memofegrp017) + + # assert user3 is member of + # - not grp1 + # - not grp2 + # - not grp3 + # - not grp15 + # - not grp16 + # - NOT grp17 (memberuid) + assert not _check_memberof(topology_st, member=memofuser3, group=memofegrp1) + assert not _check_memberof(topology_st, member=memofuser3, group=memofegrp2) + assert not _check_memberof(topology_st, member=memofuser3, group=memofegrp3) + assert not _check_memberof(topology_st, member=memofuser3, group=memofegrp015) + assert not _check_memberof(topology_st, member=memofuser3, group=memofegrp016) + assert not _check_memberof(topology_st, member=memofuser3, group=memofegrp017) + + +def test_complex_group_scenario_2(topology_st): + """Check the situation when user1 and user2 are memberof grp018 + user2 is member of grp018 but not with a memberof attribute (memberUid) + + :id: d222af17-17a6-48a0-8f22-a38306726a17 + :setup: Standalone instance, grp018, + enh1 is member of + - grp1 (member) + - not grp2 + - grp3 (uniquemember) + - not grp015 + - grp016 (member uniquemember) + - not grp17 + enh2 is member of + - not grp1 + - grp2 (uniquemember) + - grp3 (member) + - not grp015 + - not grp016 + - not grp017 + user1 is member of + - not grp1 + - not grp2 + - not grp3 + - not grp015 + - not grp016 + - grp017 (member) + user2 is member of + - not grp1 + - not grp2 + - not grp3 + - not grp015 + - not grp016 + - grp017 (uniquemember) + user3 is member of + - not grp1 + - not grp2 + - not grp3 + - not grp015 + - not grp016 + - not grp017 (memberuid) + :steps: + 1. Add user1 as a member of grp18 (member, uniquemember) + 2. Assert user1 is member of + - not grp1 + - not grp2 + - not grp3 + - not grp15 + - not grp16 + - grp17 (member) + - grp18 (member, uniquemember) + 3. Delete user1 member/uniquemember attributes from grp018 + 4. Assert user1 is member of + - not grp1 + - not grp2 + - not grp3 + - not grp15 + - not grp16 + - grp17 (member) + - NOT grp18 (memberUid) + 5. Delete user1, user2, user3, grp17 entries + 6. Assert enh1 is member of + - grp1 (member) + - not grp2 + - grp3 (uniquemember) + - not grp15 + - grp16 (member uniquemember) + - not grp018 + 7. Assert enh2 is member of + - not grp1 + - grp2 (uniquemember) + - grp3 (member) + - not grp15 + - not grp16 + - not grp018 + :expectedresults: + 1. Success + 2. Success + 3. Success + 4. Success + 5. Success + 6. Success + 7. Success + """ + + memofenh1 = _get_user_dn('memofenh1') + memofenh2 = _get_user_dn('memofenh2') + memofuser1 = _get_user_dn('memofuser1') + memofuser2 = _get_user_dn('memofuser2') + memofuser3 = _get_user_dn('memofuser3') + + memofegrp1 = _get_group_dn('memofegrp1') + memofegrp2 = _get_group_dn('memofegrp2') + memofegrp3 = _get_group_dn('memofegrp3') + memofegrp015 = _get_group_dn('memofegrp015') + memofegrp016 = _get_group_dn('memofegrp016') + memofegrp017 = _get_group_dn('memofegrp017') + + # assert enh1 is member of + # - grp1 (member) + # - not grp2 + # - grp3 (uniquemember) + # - not grp15 + # - grp16 (member uniquemember) + # - not grp17 + assert _check_memberof(topology_st, member=memofenh1, group=memofegrp1) + assert not _check_memberof(topology_st, member=memofenh1, group=memofegrp2) + assert _check_memberof(topology_st, member=memofenh1, group=memofegrp3) + assert not _check_memberof(topology_st, member=memofenh1, group=memofegrp015) + assert _check_memberof(topology_st, member=memofenh1, group=memofegrp016) + assert not _check_memberof(topology_st, member=memofenh2, group=memofegrp017) + + # assert enh2 is member of + # - not grp1 + # - grp2 (uniquemember) + # - grp3 (member) + # - not grp15 + # - not grp16 + # - not grp17 + assert not _check_memberof(topology_st, member=memofenh2, group=memofegrp1) + assert _check_memberof(topology_st, member=memofenh2, group=memofegrp2) + assert _check_memberof(topology_st, member=memofenh2, group=memofegrp3) + assert not _check_memberof(topology_st, member=memofenh2, group=memofegrp015) + assert not _check_memberof(topology_st, member=memofenh2, group=memofegrp016) + assert not _check_memberof(topology_st, member=memofenh2, group=memofegrp017) + + # assert user1 is member of + # - not grp1 + # - not grp2 + # - not grp3 + # - not grp15 + # - not grp16 + # - grp17 (member) + assert not _check_memberof(topology_st, member=memofuser1, group=memofegrp1) + assert not _check_memberof(topology_st, member=memofuser1, group=memofegrp2) + assert not _check_memberof(topology_st, member=memofuser1, group=memofegrp3) + assert not _check_memberof(topology_st, member=memofuser1, group=memofegrp015) + assert not _check_memberof(topology_st, member=memofuser1, group=memofegrp016) + assert _check_memberof(topology_st, member=memofuser1, group=memofegrp017) + + # assert user2 is member of + # - not grp1 + # - not grp2 + # - not grp3 + # - not grp15 + # - not grp16 + # - grp17 (uniqueMember) + assert not _check_memberof(topology_st, member=memofuser2, group=memofegrp1) + assert not _check_memberof(topology_st, member=memofuser2, group=memofegrp2) + assert not _check_memberof(topology_st, member=memofuser2, group=memofegrp3) + assert not _check_memberof(topology_st, member=memofuser2, group=memofegrp015) + assert not _check_memberof(topology_st, member=memofuser2, group=memofegrp016) + assert _check_memberof(topology_st, member=memofuser2, group=memofegrp017) + + # assert user3 is member of + # - not grp1 + # - not grp2 + # - not grp3 + # - not grp15 + # - not grp16 + # - NOT grp17 (memberuid) + assert not _check_memberof(topology_st, member=memofuser3, group=memofegrp1) + assert not _check_memberof(topology_st, member=memofuser3, group=memofegrp2) + assert not _check_memberof(topology_st, member=memofuser3, group=memofegrp3) + assert not _check_memberof(topology_st, member=memofuser3, group=memofegrp015) + assert not _check_memberof(topology_st, member=memofuser3, group=memofegrp016) + assert not _check_memberof(topology_st, member=memofuser3, group=memofegrp017) + + # + # Create a group grp018 with user1 member/uniquemember + memofegrp018 = _create_group(topology_st, 'memofegrp018') + + mods = [(ldap.MOD_ADD, 'member', memofuser1), (ldap.MOD_ADD, 'uniqueMember', memofuser1), + (ldap.MOD_ADD, 'memberuid', memofuser1)] + log.info("Update %s is memberof %s (member)" % (memofuser1, memofegrp017)) + log.info("Update %s is memberof %s (uniqueMember)" % (memofuser1, memofegrp017)) + log.info("Update %s is memberof %s (memberuid)" % (memofuser1, memofegrp017)) + topology_st.standalone.modify_s(ensure_str(memofegrp018), mods) + + # assert user1 is member of + # - not grp1 + # - not grp2 + # - not grp3 + # - not grp15 + # - not grp16 + # - grp17 (member) + # - grp18 (member, uniquemember) + assert not _check_memberof(topology_st, member=memofuser1, group=memofegrp1) + assert not _check_memberof(topology_st, member=memofuser1, group=memofegrp2) + assert not _check_memberof(topology_st, member=memofuser1, group=memofegrp3) + assert not _check_memberof(topology_st, member=memofuser1, group=memofegrp015) + assert not _check_memberof(topology_st, member=memofuser1, group=memofegrp016) + assert _check_memberof(topology_st, member=memofuser1, group=memofegrp017) + assert _check_memberof(topology_st, member=memofuser1, group=memofegrp018) + + mods = [(ldap.MOD_DELETE, 'member', memofuser1), (ldap.MOD_DELETE, 'uniqueMember', memofuser1)] + log.info("Update %s is no longer memberof %s (member)" % (memofuser1, memofegrp018)) + log.info("Update %s is no longer memberof %s (uniqueMember)" % (memofuser1, memofegrp018)) + topology_st.standalone.modify_s(ensure_str(memofegrp018), mods) + + # assert user1 is member of + # - not grp1 + # - not grp2 + # - not grp3 + # - not grp15 + # - not grp16 + # - grp17 (member) + # - NOT grp18 (memberUid) + assert not _check_memberof(topology_st, member=memofuser1, group=memofegrp1) + assert not _check_memberof(topology_st, member=memofuser1, group=memofegrp2) + assert not _check_memberof(topology_st, member=memofuser1, group=memofegrp3) + assert not _check_memberof(topology_st, member=memofuser1, group=memofegrp015) + assert not _check_memberof(topology_st, member=memofuser1, group=memofegrp016) + assert _check_memberof(topology_st, member=memofuser1, group=memofegrp017) + assert not _check_memberof(topology_st, member=memofuser1, group=memofegrp018) + + # DEL user1, user2, user3, grp17 + topology_st.standalone.delete_s(ensure_str(memofuser1)) + topology_st.standalone.delete_s(ensure_str(memofuser2)) + topology_st.standalone.delete_s(ensure_str(memofuser3)) + topology_st.standalone.delete_s(ensure_str(memofegrp017)) + + # assert enh1 is member of + # - grp1 (member) + # - not grp2 + # - grp3 (uniquemember) + # - not grp15 + # - grp16 (member uniquemember) + # - not grp018 + assert _check_memberof(topology_st, member=memofenh1, group=memofegrp1) + assert not _check_memberof(topology_st, member=memofenh1, group=memofegrp2) + assert _check_memberof(topology_st, member=memofenh1, group=memofegrp3) + assert not _check_memberof(topology_st, member=memofenh1, group=memofegrp015) + assert _check_memberof(topology_st, member=memofenh1, group=memofegrp016) + assert not _check_memberof(topology_st, member=memofenh1, group=memofegrp018) + + # assert enh2 is member of + # - not grp1 + # - grp2 (uniquemember) + # - grp3 (member) + # - not grp15 + # - not grp16 + # - not grp018 + assert not _check_memberof(topology_st, member=memofenh2, group=memofegrp1) + assert _check_memberof(topology_st, member=memofenh2, group=memofegrp2) + assert _check_memberof(topology_st, member=memofenh2, group=memofegrp3) + assert not _check_memberof(topology_st, member=memofenh2, group=memofegrp015) + assert not _check_memberof(topology_st, member=memofenh2, group=memofegrp016) + assert not _check_memberof(topology_st, member=memofenh2, group=memofegrp018) + + +def test_complex_group_scenario_3(topology_st): + """Test a complex memberOf case: + Add user2 to grp19_2, + Add user3 to grp19_3, + Add grp19_2 and grp_19_3 to grp19_1 + + :id: d222af17-17a6-48a0-8f22-a38306726a18 + :setup: Standalone instance, + enh1 is member of + - grp1 (member) + - not grp2 + - grp3 (uniquemember) + - not grp015 + - grp016 (member uniquemember) + - not grp018 + enh2 is member of + - not grp1 + - grp2 (uniquemember) + - grp3 (member) + - not grp015 + - not grp016 + - not grp018 + :steps: + 1. Create user2 and user3 + 2. Create a group grp019_2 with user2 member + 3. Create a group grp019_3 with user3 member + 4. Create a group grp019_1 with memofegrp019_2, memofegrp019_3 member + 5. Assert memofegrp019_1 is member of + - not grp1 + - not grp2 + - not grp3 + - not grp15 + - not grp16 + - not grp018 + - not grp19_1 + - not grp019_2 + - not grp019_3 + + 6. Assert memofegrp019_2 is member of + - not grp1 + - not grp2 + - not grp3 + - not grp15 + - not grp16 + - not grp018 + - grp19_1 + - not grp019_2 + - not grp019_3 + 7. Assert memofegrp019_3 is member of + - not grp1 + - not grp2 + - not grp3 + - not grp15 + - not grp16 + - not grp018 + - grp19_1 + - not grp019_2 + - not grp019_3 + 8. Assert memofuser2 is member of + - not grp1 + - not grp2 + - not grp3 + - not grp15 + - not grp16 + - not grp018 + - grp19_1 + - grp019_2 + - not grp019_3 + 9. Assert memofuser3 is member of + - not grp1 + - not grp2 + - not grp3 + - not grp15 + - not grp16 + - not grp018 + - grp19_1 + - not grp019_2 + - grp019_3 + 10. Delete user2, user3, and all grp19* entries + 11. Assert enh1 is member of + - grp1 (member) + - not grp2 + - grp3 (uniquemember) + - not grp15 + - grp16 (member uniquemember) + - not grp018 + 12. Assert enh2 is member of + - not grp1 + - grp2 (uniquemember) + - grp3 (member) + - not grp15 + - not grp16 + - not grp018 + :expectedresults: + 1. Success + 2. Success + 3. Success + 4. Success + 5. Success + 6. Success + 7. Success + 8. Success + 9. Success + 10. Success + 11. Success + 12. Success + """ + + memofenh1 = _get_user_dn('memofenh1') + memofenh2 = _get_user_dn('memofenh2') + + memofegrp1 = _get_group_dn('memofegrp1') + memofegrp2 = _get_group_dn('memofegrp2') + memofegrp3 = _get_group_dn('memofegrp3') + memofegrp015 = _get_group_dn('memofegrp015') + memofegrp016 = _get_group_dn('memofegrp016') + memofegrp018 = _get_group_dn('memofegrp018') + + # assert enh1 is member of + # - grp1 (member) + # - not grp2 + # - grp3 (uniquemember) + # - not grp15 + # - grp16 (member uniquemember) + # - not grp018 + assert _check_memberof(topology_st, member=memofenh1, group=memofegrp1) + assert not _check_memberof(topology_st, member=memofenh1, group=memofegrp2) + assert _check_memberof(topology_st, member=memofenh1, group=memofegrp3) + assert not _check_memberof(topology_st, member=memofenh1, group=memofegrp015) + assert _check_memberof(topology_st, member=memofenh1, group=memofegrp016) + assert not _check_memberof(topology_st, member=memofenh1, group=memofegrp018) + + # assert enh2 is member of + # - not grp1 + # - grp2 (uniquemember) + # - grp3 (member) + # - not grp15 + # - not grp16 + # - not grp018 + assert not _check_memberof(topology_st, member=memofenh2, group=memofegrp1) + assert _check_memberof(topology_st, member=memofenh2, group=memofegrp2) + assert _check_memberof(topology_st, member=memofenh2, group=memofegrp3) + assert not _check_memberof(topology_st, member=memofenh2, group=memofegrp015) + assert not _check_memberof(topology_st, member=memofenh2, group=memofegrp016) + assert not _check_memberof(topology_st, member=memofenh2, group=memofegrp018) + + memofuser2 = _create_user(topology_st, 'memofuser2') + memofuser3 = _create_user(topology_st, 'memofuser3') + + # Create a group grp019_2 with user2 member + memofegrp019_2 = _create_group(topology_st, 'memofegrp019_2') + mods = [(ldap.MOD_ADD, 'member', memofuser2)] + topology_st.standalone.modify_s(ensure_str(memofegrp019_2), mods) + + # Create a group grp019_3 with user3 member + memofegrp019_3 = _create_group(topology_st, 'memofegrp019_3') + mods = [(ldap.MOD_ADD, 'member', memofuser3)] + topology_st.standalone.modify_s(ensure_str(memofegrp019_3), mods) + + mods = [(ldap.MOD_ADD, 'objectClass', b'inetUser')] + topology_st.standalone.modify_s(ensure_str(memofegrp019_2), mods) + topology_st.standalone.modify_s(ensure_str(memofegrp019_3), mods) + + # Create a group grp019_1 with memofegrp019_2, memofegrp019_3 member + memofegrp019_1 = _create_group(topology_st, 'memofegrp019_1') + mods = [(ldap.MOD_ADD, 'member', memofegrp019_2), (ldap.MOD_ADD, 'member', memofegrp019_3)] + topology_st.standalone.modify_s(ensure_str(memofegrp019_1), mods) + + # assert memofegrp019_1 is member of + # - not grp1 + # - not grp2 + # - not grp3 + # - not grp15 + # - not grp16 + # - not grp018 + # - not grp19_1 + # - not grp019_2 + # - not grp019_3 + assert not _check_memberof(topology_st, member=memofegrp019_1, group=memofegrp1) + assert not _check_memberof(topology_st, member=memofegrp019_1, group=memofegrp2) + assert not _check_memberof(topology_st, member=memofegrp019_1, group=memofegrp3) + assert not _check_memberof(topology_st, member=memofegrp019_1, group=memofegrp015) + assert not _check_memberof(topology_st, member=memofegrp019_1, group=memofegrp016) + assert not _check_memberof(topology_st, member=memofegrp019_1, group=memofegrp018) + assert not _check_memberof(topology_st, member=memofegrp019_1, group=memofegrp019_1) + assert not _check_memberof(topology_st, member=memofegrp019_1, group=memofegrp019_2) + assert not _check_memberof(topology_st, member=memofegrp019_1, group=memofegrp019_3) + + # assert memofegrp019_2 is member of + # - not grp1 + # - not grp2 + # - not grp3 + # - not grp15 + # - not grp16 + # - not grp018 + # - grp19_1 + # - not grp019_2 + # - not grp019_3 + assert not _check_memberof(topology_st, member=memofegrp019_2, group=memofegrp1) + assert not _check_memberof(topology_st, member=memofegrp019_2, group=memofegrp2) + assert not _check_memberof(topology_st, member=memofegrp019_2, group=memofegrp3) + assert not _check_memberof(topology_st, member=memofegrp019_2, group=memofegrp015) + assert not _check_memberof(topology_st, member=memofegrp019_2, group=memofegrp016) + assert not _check_memberof(topology_st, member=memofegrp019_2, group=memofegrp018) + assert _check_memberof(topology_st, member=memofegrp019_2, group=memofegrp019_1) + assert not _check_memberof(topology_st, member=memofegrp019_2, group=memofegrp019_2) + assert not _check_memberof(topology_st, member=memofegrp019_2, group=memofegrp019_3) + + # assert memofegrp019_3 is member of + # - not grp1 + # - not grp2 + # - not grp3 + # - not grp15 + # - not grp16 + # - not grp018 + # - grp19_1 + # - not grp019_2 + # - not grp019_3 + assert not _check_memberof(topology_st, member=memofegrp019_2, group=memofegrp1) + assert not _check_memberof(topology_st, member=memofegrp019_2, group=memofegrp2) + assert not _check_memberof(topology_st, member=memofegrp019_2, group=memofegrp3) + assert not _check_memberof(topology_st, member=memofegrp019_2, group=memofegrp015) + assert not _check_memberof(topology_st, member=memofegrp019_2, group=memofegrp016) + assert not _check_memberof(topology_st, member=memofegrp019_2, group=memofegrp018) + assert _check_memberof(topology_st, member=memofegrp019_2, group=memofegrp019_1) + assert not _check_memberof(topology_st, member=memofegrp019_2, group=memofegrp019_2) + assert not _check_memberof(topology_st, member=memofegrp019_2, group=memofegrp019_3) + + # assert memofuser2 is member of + # - not grp1 + # - not grp2 + # - not grp3 + # - not grp15 + # - not grp16 + # - not grp018 + # - grp19_1 + # - grp019_2 + # - not grp019_3 + assert not _check_memberof(topology_st, member=memofuser2, group=memofegrp1) + assert not _check_memberof(topology_st, member=memofuser2, group=memofegrp2) + assert not _check_memberof(topology_st, member=memofuser2, group=memofegrp3) + assert not _check_memberof(topology_st, member=memofuser2, group=memofegrp015) + assert not _check_memberof(topology_st, member=memofuser2, group=memofegrp016) + assert not _check_memberof(topology_st, member=memofuser2, group=memofegrp018) + assert _check_memberof(topology_st, member=memofuser2, group=memofegrp019_1) + assert _check_memberof(topology_st, member=memofuser2, group=memofegrp019_2) + assert not _check_memberof(topology_st, member=memofuser2, group=memofegrp019_3) + + # assert memofuser3 is member of + # - not grp1 + # - not grp2 + # - not grp3 + # - not grp15 + # - not grp16 + # - not grp018 + # - grp19_1 + # - not grp019_2 + # - grp019_3 + assert not _check_memberof(topology_st, member=memofuser3, group=memofegrp1) + assert not _check_memberof(topology_st, member=memofuser3, group=memofegrp2) + assert not _check_memberof(topology_st, member=memofuser3, group=memofegrp3) + assert not _check_memberof(topology_st, member=memofuser3, group=memofegrp015) + assert not _check_memberof(topology_st, member=memofuser3, group=memofegrp016) + assert not _check_memberof(topology_st, member=memofuser3, group=memofegrp018) + assert _check_memberof(topology_st, member=memofuser3, group=memofegrp019_1) + assert not _check_memberof(topology_st, member=memofuser3, group=memofegrp019_2) + assert _check_memberof(topology_st, member=memofuser3, group=memofegrp019_3) + + # DEL user2, user3, grp19* + topology_st.standalone.delete_s(ensure_str(memofuser2)) + topology_st.standalone.delete_s(ensure_str(memofuser3)) + topology_st.standalone.delete_s(ensure_str(memofegrp019_1)) + topology_st.standalone.delete_s(ensure_str(memofegrp019_2)) + topology_st.standalone.delete_s(ensure_str(memofegrp019_3)) + + # assert enh1 is member of + # - grp1 (member) + # - not grp2 + # - grp3 (uniquemember) + # - not grp15 + # - grp16 (member uniquemember) + # - not grp018 + assert _check_memberof(topology_st, member=memofenh1, group=memofegrp1) + assert not _check_memberof(topology_st, member=memofenh1, group=memofegrp2) + assert _check_memberof(topology_st, member=memofenh1, group=memofegrp3) + assert not _check_memberof(topology_st, member=memofenh1, group=memofegrp015) + assert _check_memberof(topology_st, member=memofenh1, group=memofegrp016) + assert not _check_memberof(topology_st, member=memofenh1, group=memofegrp018) + + # assert enh2 is member of + # - not grp1 + # - grp2 (uniquemember) + # - grp3 (member) + # - not grp15 + # - not grp16 + # - not grp018 + assert not _check_memberof(topology_st, member=memofenh2, group=memofegrp1) + assert _check_memberof(topology_st, member=memofenh2, group=memofegrp2) + assert _check_memberof(topology_st, member=memofenh2, group=memofegrp3) + assert not _check_memberof(topology_st, member=memofenh2, group=memofegrp015) + assert not _check_memberof(topology_st, member=memofenh2, group=memofegrp016) + assert not _check_memberof(topology_st, member=memofenh2, group=memofegrp018) + + +def test_complex_group_scenario_4(topology_st): + """Test a complex memberOf case: + Add user1 and grp[1-5] + Add user1 member of grp[1-4] + Add grp[1-4] member of grp5 + Check user1 is member of grp[1-5] + + :id: d223af17-17a6-48a0-8f22-a38306726a19 + :setup: Standalone instance, + enh1 is member of + - grp1 (member) + - not grp2 + - grp3 (uniquemember) + - not grp015 + - grp016 (member uniquemember) + - not grp018 + enh2 is member of + - not grp1 + - grp2 (uniquemember) + - grp3 (member) + - not grp015 + - not grp016 + - not grp018 + :steps: + 1. Create user1 + 2. Create grp[1-5] that can be inetUser (having memberof) + 3. Add user1 to grp[1-4] (uniqueMember) + 4. Create grp5 with grp[1-4] as member + 5. Assert user1 is a member grp[1-5] + 6. Delete user1 and all grp20 entries + :expectedresults: + 1. Success + 2. Success + 3. Success + 4. Success + 5. Success + 6. Success + """ + + memofenh1 = _get_user_dn('memofenh1') + memofenh2 = _get_user_dn('memofenh2') + + memofegrp1 = _get_group_dn('memofegrp1') + memofegrp2 = _get_group_dn('memofegrp2') + memofegrp3 = _get_group_dn('memofegrp3') + memofegrp015 = _get_group_dn('memofegrp015') + memofegrp016 = _get_group_dn('memofegrp016') + memofegrp018 = _get_group_dn('memofegrp018') + + # assert enh1 is member of + # - grp1 (member) + # - not grp2 + # - grp3 (uniquemember) + # - not grp15 + # - grp16 (member uniquemember) + # - not grp018 + assert _check_memberof(topology_st, member=memofenh1, group=memofegrp1) + assert not _check_memberof(topology_st, member=memofenh1, group=memofegrp2) + assert _check_memberof(topology_st, member=memofenh1, group=memofegrp3) + assert not _check_memberof(topology_st, member=memofenh1, group=memofegrp015) + assert _check_memberof(topology_st, member=memofenh1, group=memofegrp016) + assert not _check_memberof(topology_st, member=memofenh1, group=memofegrp018) + + # assert enh2 is member of + # - not grp1 + # - grp2 (uniquemember) + # - grp3 (member) + # - not grp15 + # - not grp16 + # - not grp018 + assert not _check_memberof(topology_st, member=memofenh2, group=memofegrp1) + assert _check_memberof(topology_st, member=memofenh2, group=memofegrp2) + assert _check_memberof(topology_st, member=memofenh2, group=memofegrp3) + assert not _check_memberof(topology_st, member=memofenh2, group=memofegrp015) + assert not _check_memberof(topology_st, member=memofenh2, group=memofegrp016) + assert not _check_memberof(topology_st, member=memofenh2, group=memofegrp018) + + # create user1 + memofuser1 = _create_user(topology_st, 'memofuser1') + + # create grp[1-5] that can be inetUser (having memberof) + memofegrp020_1 = _create_group(topology_st, 'memofegrp020_1') + memofegrp020_2 = _create_group(topology_st, 'memofegrp020_2') + memofegrp020_3 = _create_group(topology_st, 'memofegrp020_3') + memofegrp020_4 = _create_group(topology_st, 'memofegrp020_4') + memofegrp020_5 = _create_group(topology_st, 'memofegrp020_5') + mods = [(ldap.MOD_ADD, 'objectClass', b'inetUser')] + for grp in [memofegrp020_1, memofegrp020_2, memofegrp020_3, memofegrp020_4, memofegrp020_5]: + topology_st.standalone.modify_s(ensure_str(grp), mods) + + # add user1 to grp[1-4] (uniqueMember) + mods = [(ldap.MOD_ADD, 'uniqueMember', memofuser1)] + for grp in [memofegrp020_1, memofegrp020_2, memofegrp020_3, memofegrp020_4]: + topology_st.standalone.modify_s(ensure_str(grp), mods) + + # create grp5 with grp[1-4] as member + mods = [] + for grp in [memofegrp020_1, memofegrp020_2, memofegrp020_3, memofegrp020_4]: + mods.append((ldap.MOD_ADD, 'member', grp)) + topology_st.standalone.modify_s(ensure_str(memofegrp020_5), mods) + + assert _check_memberof(topology_st, member=memofuser1, group=memofegrp020_1) + assert _check_memberof(topology_st, member=memofuser1, group=memofegrp020_2) + assert _check_memberof(topology_st, member=memofuser1, group=memofegrp020_3) + assert _check_memberof(topology_st, member=memofuser1, group=memofegrp020_4) + assert _check_memberof(topology_st, member=memofuser1, group=memofegrp020_5) + + # DEL user1, grp20* + topology_st.standalone.delete_s(ensure_str(memofuser1)) + for grp in [memofegrp020_1, memofegrp020_2, memofegrp020_3, memofegrp020_4, memofegrp020_5]: + topology_st.standalone.delete_s(ensure_str(grp)) + + +def test_complex_group_scenario_5(topology_st): + """Test a complex memberOf case: + Add user[1-4] and Grp[1-4] + Add userX as uniquemember of GrpX + Add Grp5 + Grp[1-4] as members of Grp5 + user1 as member of Grp5 + Check that user1 is member of Grp1 and Grp5 + Check that user* are members of Grp5 + + :id: d222af17-17a6-48a0-8f22-a38306726a20 + :setup: Standalone instance, + enh1 is member of + - grp1 (member) + - not grp2 + - grp3 (uniquemember) + - not grp015 + - grp016 (member uniquemember) + - not grp018 + enh2 is member of + - not grp1 + - grp2 (uniquemember) + - grp3 (member) + - not grp015 + - not grp016 + - not grp018 + :steps: + 1. Create user1-4 + 2. Create grp[1-4] that can be inetUser (having memberof) + 3. Add userX (uniquemember) to grpX + 4. Create grp5 with grp[1-4] as member + user1 + 5. Assert user[1-4] are member of grp20_5 + 6. Assert userX is uniqueMember of grpX + 7. Check that user[1-4] is only 'uniqueMember' of the grp20_[1-4] + 8. Check that grp20_[1-4] are only 'member' of grp20_5 + 9. Check that user1 are only 'member' of grp20_5 + 10. Assert enh1 is member of + - grp1 (member) + - not grp2 + - grp3 (uniquemember) + - not grp15 + - grp16 (member uniquemember) + - not grp018 + - not grp20* + 11. Assert enh2 is member of + - not grp1 + - grp2 (uniquemember) + - grp3 (member) + - not grp15 + - not grp16 + - not grp018 + - not grp20* + :expectedresults: + 1. Success + 2. Success + 3. Success + 4. Success + 5. Success + 6. Success + 7. Success + 8. Success + 9. Success + 10. Success + 11. Success + """ + + memofenh1 = _get_user_dn('memofenh1') + memofenh2 = _get_user_dn('memofenh2') + + memofegrp1 = _get_group_dn('memofegrp1') + memofegrp2 = _get_group_dn('memofegrp2') + memofegrp3 = _get_group_dn('memofegrp3') + memofegrp015 = _get_group_dn('memofegrp015') + memofegrp016 = _get_group_dn('memofegrp016') + memofegrp018 = _get_group_dn('memofegrp018') + + # assert enh1 is member of + # - grp1 (member) + # - not grp2 + # - grp3 (uniquemember) + # - not grp15 + # - grp16 (member uniquemember) + # - not grp018 + assert _check_memberof(topology_st, member=memofenh1, group=memofegrp1) + assert not _check_memberof(topology_st, member=memofenh1, group=memofegrp2) + assert _check_memberof(topology_st, member=memofenh1, group=memofegrp3) + assert not _check_memberof(topology_st, member=memofenh1, group=memofegrp015) + assert _check_memberof(topology_st, member=memofenh1, group=memofegrp016) + assert not _check_memberof(topology_st, member=memofenh1, group=memofegrp018) + + # assert enh2 is member of + # - not grp1 + # - grp2 (uniquemember) + # - grp3 (member) + # - not grp15 + # - not grp16 + # - not grp018 + assert not _check_memberof(topology_st, member=memofenh2, group=memofegrp1) + assert _check_memberof(topology_st, member=memofenh2, group=memofegrp2) + assert _check_memberof(topology_st, member=memofenh2, group=memofegrp3) + assert not _check_memberof(topology_st, member=memofenh2, group=memofegrp015) + assert not _check_memberof(topology_st, member=memofenh2, group=memofegrp016) + assert not _check_memberof(topology_st, member=memofenh2, group=memofegrp018) + + # create user1-4 + memofuser1 = _create_user(topology_st, 'memofuser1') + memofuser2 = _create_user(topology_st, 'memofuser2') + memofuser3 = _create_user(topology_st, 'memofuser3') + memofuser4 = _create_user(topology_st, 'memofuser4') + + # create grp[1-4] that can be inetUser (having memberof) + # add userX (uniquemember) to grpX + memofegrp020_1 = _create_group(topology_st, 'memofegrp020_1') + memofegrp020_2 = _create_group(topology_st, 'memofegrp020_2') + memofegrp020_3 = _create_group(topology_st, 'memofegrp020_3') + memofegrp020_4 = _create_group(topology_st, 'memofegrp020_4') + for x in [(memofegrp020_1, memofuser1), + (memofegrp020_2, memofuser2), + (memofegrp020_3, memofuser3), + (memofegrp020_4, memofuser4)]: + mods = [(ldap.MOD_ADD, 'objectClass', b'inetUser'), (ldap.MOD_ADD, 'uniqueMember', x[1])] + topology_st.standalone.modify_s(ensure_str(x[0]), mods) + + # create grp5 with grp[1-4] as member + user1 + memofegrp020_5 = _create_group(topology_st, 'memofegrp020_5') + mods = [(ldap.MOD_ADD, 'member', memofuser1)] + for grp in [memofegrp020_1, memofegrp020_2, memofegrp020_3, memofegrp020_4]: + mods.append((ldap.MOD_ADD, 'member', grp)) + topology_st.standalone.modify_s(ensure_str(memofegrp020_5), mods) + + # assert user[1-4] are member of grp20_5 + for user in [memofuser1, memofuser2, memofuser3, memofuser4]: + assert _check_memberof(topology_st, member=user, group=memofegrp020_5) + + # assert userX is uniqueMember of grpX + assert _check_memberof(topology_st, member=memofuser1, group=memofegrp020_1) + assert _check_memberof(topology_st, member=memofuser2, group=memofegrp020_2) + assert _check_memberof(topology_st, member=memofuser3, group=memofegrp020_3) + assert _check_memberof(topology_st, member=memofuser4, group=memofegrp020_4) + + # check that user[1-4] is only 'uniqueMember' of the grp20_[1-4] + for x in [(memofegrp020_1, memofuser1), + (memofegrp020_2, memofuser2), + (memofegrp020_3, memofuser3), + (memofegrp020_4, memofuser4)]: + assert _check_memberattr(topology_st, x[0], 'uniqueMember', x[1]) + assert not _check_memberattr(topology_st, x[0], 'member', x[1]) + # check that grp20_[1-4] are only 'member' of grp20_5 + # check that user1 are only 'member' of grp20_5 + for x in [memofuser1, memofegrp020_1, memofegrp020_2, memofegrp020_3, memofegrp020_4]: + assert _check_memberattr(topology_st, memofegrp020_5, 'member', x) + assert not _check_memberattr(topology_st, memofegrp020_5, 'uniqueMember', x) + + for user in [memofuser2, memofuser3, memofuser4]: + assert not _check_memberattr(topology_st, memofegrp020_5, 'member', user) + assert not _check_memberattr(topology_st, memofegrp020_5, 'uniqueMember', user) + + # assert enh1 is member of + # - grp1 (member) + # - not grp2 + # - grp3 (uniquemember) + # - not grp15 + # - grp16 (member uniquemember) + # - not grp018 + # - not grp20* + assert _check_memberof(topology_st, member=memofenh1, group=memofegrp1) + assert not _check_memberof(topology_st, member=memofenh1, group=memofegrp2) + assert _check_memberof(topology_st, member=memofenh1, group=memofegrp3) + assert not _check_memberof(topology_st, member=memofenh1, group=memofegrp015) + assert _check_memberof(topology_st, member=memofenh1, group=memofegrp016) + assert not _check_memberof(topology_st, member=memofenh1, group=memofegrp018) + assert not _check_memberof(topology_st, member=memofenh1, group=memofegrp020_1) + assert not _check_memberof(topology_st, member=memofenh1, group=memofegrp020_2) + assert not _check_memberof(topology_st, member=memofenh1, group=memofegrp020_3) + assert not _check_memberof(topology_st, member=memofenh1, group=memofegrp020_4) + assert not _check_memberof(topology_st, member=memofenh1, group=memofegrp020_5) + + # assert enh2 is member of + # - not grp1 + # - grp2 (uniquemember) + # - grp3 (member) + # - not grp15 + # - not grp16 + # - not grp018 + # - not grp20* + assert not _check_memberof(topology_st, member=memofenh2, group=memofegrp1) + assert _check_memberof(topology_st, member=memofenh2, group=memofegrp2) + assert _check_memberof(topology_st, member=memofenh2, group=memofegrp3) + assert not _check_memberof(topology_st, member=memofenh2, group=memofegrp015) + assert not _check_memberof(topology_st, member=memofenh2, group=memofegrp016) + assert not _check_memberof(topology_st, member=memofenh2, group=memofegrp018) + assert not _check_memberof(topology_st, member=memofenh2, group=memofegrp020_1) + assert not _check_memberof(topology_st, member=memofenh2, group=memofegrp020_2) + assert not _check_memberof(topology_st, member=memofenh2, group=memofegrp020_3) + assert not _check_memberof(topology_st, member=memofenh2, group=memofegrp020_4) + assert not _check_memberof(topology_st, member=memofenh2, group=memofegrp020_5) + + +def test_complex_group_scenario_6(topology_st): + """Test a complex memberOf case: + add userX as member/uniqueMember of GrpX + add Grp5 as uniquemember of GrpX (this create a loop) + + :id: d222af17-17a6-48a0-8f22-a38306726a21 + :setup: Standalone instance + enh1 is member of + - grp1 (member) + - not grp2 + - grp3 (uniquemember) + - not grp15 + - grp16 (member uniquemember) + - not grp018 + - not grp20* + enh2 is member of + - not grp1 + - grp2 (uniquemember) + - grp3 (member) + - not grp15 + - not grp16 + - not grp018 + - not grp20* + user1 is member of grp20_5 + userX is uniquemember of grp20_X + grp[1-4] are member of grp20_5 + :steps: + 1. Add user[1-4] (member) to grp020_[1-4] + 2. Check that user[1-4] are 'member' and 'uniqueMember' of the grp20_[1-4] + 3. Add Grp[1-4] (uniqueMember) to grp5 + 4. Assert user[1-4] are member of grp20_[1-4] + 5. Assert that all groups are members of each others because Grp5 is member of all grp20_[1-4] + 6. Assert user[1-5] is uniqueMember of grp[1-5] + 7. Assert enh1 is member of + - grp1 (member) + - not grp2 + - grp3 (uniquemember) + - not grp15 + - grp16 (member uniquemember) + - not grp018 + - not grp20* + 8. Assert enh2 is member of + - not grp1 + - grp2 (uniquemember) + - grp3 (member) + - not grp15 + - not grp16 + - not grp018 + - not grp20* + :expectedresults: + 1. Success + 2. Success + 3. Success + 4. Success + 5. Success + 6. Success + 7. Success + 8. Success + """ + + memofenh1 = _get_user_dn('memofenh1') + memofenh2 = _get_user_dn('memofenh2') + + memofegrp1 = _get_group_dn('memofegrp1') + memofegrp2 = _get_group_dn('memofegrp2') + memofegrp3 = _get_group_dn('memofegrp3') + memofegrp015 = _get_group_dn('memofegrp015') + memofegrp016 = _get_group_dn('memofegrp016') + memofegrp018 = _get_group_dn('memofegrp018') + + memofuser1 = _get_user_dn('memofuser1') + memofuser2 = _get_user_dn('memofuser2') + memofuser3 = _get_user_dn('memofuser3') + memofuser4 = _get_user_dn('memofuser4') + + memofegrp020_1 = _get_group_dn('memofegrp020_1') + memofegrp020_2 = _get_group_dn('memofegrp020_2') + memofegrp020_3 = _get_group_dn('memofegrp020_3') + memofegrp020_4 = _get_group_dn('memofegrp020_4') + memofegrp020_5 = _get_group_dn('memofegrp020_5') + + # assert user[1-4] are member of grp20_5 + for user in [memofuser1, memofuser2, memofuser3, memofuser4]: + assert _check_memberof(topology_st, member=user, group=memofegrp020_5) + + # assert userX is member of grpX + assert _check_memberof(topology_st, member=memofuser1, group=memofegrp020_1) + assert _check_memberof(topology_st, member=memofuser2, group=memofegrp020_2) + assert _check_memberof(topology_st, member=memofuser3, group=memofegrp020_3) + assert _check_memberof(topology_st, member=memofuser4, group=memofegrp020_4) + + # assert enh1 is member of + # - grp1 (member) + # - not grp2 + # - grp3 (uniquemember) + # - not grp15 + # - grp16 (member uniquemember) + # - not grp018 + # - not grp20* + assert _check_memberof(topology_st, member=memofenh1, group=memofegrp1) + assert not _check_memberof(topology_st, member=memofenh1, group=memofegrp2) + assert _check_memberof(topology_st, member=memofenh1, group=memofegrp3) + assert not _check_memberof(topology_st, member=memofenh1, group=memofegrp015) + assert _check_memberof(topology_st, member=memofenh1, group=memofegrp016) + assert not _check_memberof(topology_st, member=memofenh1, group=memofegrp018) + assert not _check_memberof(topology_st, member=memofenh1, group=memofegrp020_1) + assert not _check_memberof(topology_st, member=memofenh1, group=memofegrp020_2) + assert not _check_memberof(topology_st, member=memofenh1, group=memofegrp020_3) + assert not _check_memberof(topology_st, member=memofenh1, group=memofegrp020_4) + assert not _check_memberof(topology_st, member=memofenh1, group=memofegrp020_5) + + # assert enh2 is member of + # - not grp1 + # - grp2 (uniquemember) + # - grp3 (member) + # - not grp15 + # - not grp16 + # - not grp018 + # - not grp20* + assert not _check_memberof(topology_st, member=memofenh2, group=memofegrp1) + assert _check_memberof(topology_st, member=memofenh2, group=memofegrp2) + assert _check_memberof(topology_st, member=memofenh2, group=memofegrp3) + assert not _check_memberof(topology_st, member=memofenh2, group=memofegrp015) + assert not _check_memberof(topology_st, member=memofenh2, group=memofegrp016) + assert not _check_memberof(topology_st, member=memofenh2, group=memofegrp018) + assert not _check_memberof(topology_st, member=memofenh2, group=memofegrp020_1) + assert not _check_memberof(topology_st, member=memofenh2, group=memofegrp020_2) + assert not _check_memberof(topology_st, member=memofenh2, group=memofegrp020_3) + assert not _check_memberof(topology_st, member=memofenh2, group=memofegrp020_4) + assert not _check_memberof(topology_st, member=memofenh2, group=memofegrp020_5) + + # check that user[1-4] is only 'uniqueMember' of the grp20_[1-4] + for x in [(memofegrp020_1, memofuser1), + (memofegrp020_2, memofuser2), + (memofegrp020_3, memofuser3), + (memofegrp020_4, memofuser4)]: + assert _check_memberattr(topology_st, x[0], 'uniqueMember', x[1]) + assert not _check_memberattr(topology_st, x[0], 'member', x[1]) + + # check that grp20_[1-4] are only 'member' of grp20_5 + # check that user1 is only 'member' of grp20_5 + for x in [memofuser1, memofegrp020_1, memofegrp020_2, memofegrp020_3, memofegrp020_4]: + assert _check_memberattr(topology_st, memofegrp020_5, 'member', x) + assert not _check_memberattr(topology_st, memofegrp020_5, 'uniqueMember', x) + + # check that user2-4 are neither 'member' nor 'uniquemember' of grp20_5 + for user in [memofuser2, memofuser3, memofuser4]: + assert not _check_memberattr(topology_st, memofegrp020_5, 'member', user) + assert not _check_memberattr(topology_st, memofegrp020_5, 'uniqueMember', user) + + # add userX (member) to grpX + for x in [(memofegrp020_1, memofuser1), + (memofegrp020_2, memofuser2), + (memofegrp020_3, memofuser3), + (memofegrp020_4, memofuser4)]: + mods = [(ldap.MOD_ADD, 'member', x[1])] + topology_st.standalone.modify_s(ensure_str(x[0]), mods) + + # check that user[1-4] are 'member' and 'uniqueMember' of the grp20_[1-4] + for x in [(memofegrp020_1, memofuser1), + (memofegrp020_2, memofuser2), + (memofegrp020_3, memofuser3), + (memofegrp020_4, memofuser4)]: + assert _check_memberattr(topology_st, x[0], 'uniqueMember', x[1]) + assert _check_memberattr(topology_st, x[0], 'member', x[1]) + + # add Grp[1-4] (uniqueMember) to grp5 + # it creates a membership loop !!! + mods = [(ldap.MOD_ADD, 'uniqueMember', memofegrp020_5)] + for grp in [memofegrp020_1, memofegrp020_2, memofegrp020_3, memofegrp020_4]: + topology_st.standalone.modify_s(ensure_str(grp), mods) + + time.sleep(5) + # assert user[1-4] are member of grp20_[1-4] + for user in [memofuser1, memofuser2, memofuser3, memofuser4]: + assert _check_memberof(topology_st, member=user, group=memofegrp020_5) + assert _check_memberof(topology_st, member=user, group=memofegrp020_4) + assert _check_memberof(topology_st, member=user, group=memofegrp020_3) + assert _check_memberof(topology_st, member=user, group=memofegrp020_2) + assert _check_memberof(topology_st, member=user, group=memofegrp020_1) + + # assert that all groups are members of each others because Grp5 + # is member of all grp20_[1-4] + for grp in [memofegrp020_1, memofegrp020_2, memofegrp020_3, memofegrp020_4]: + for owner in [memofegrp020_1, memofegrp020_2, memofegrp020_3, memofegrp020_4]: + if grp == owner: + # no member of itself + assert not _check_memberof(topology_st, member=grp, group=owner) + else: + assert _check_memberof(topology_st, member=grp, group=owner) + for grp in [memofegrp020_1, memofegrp020_2, memofegrp020_3, memofegrp020_4]: + assert _check_memberof(topology_st, member=grp, group=memofegrp020_5) + + # assert userX is uniqueMember of grpX + assert _check_memberof(topology_st, member=memofuser1, group=memofegrp020_1) + assert _check_memberof(topology_st, member=memofuser2, group=memofegrp020_2) + assert _check_memberof(topology_st, member=memofuser3, group=memofegrp020_3) + assert _check_memberof(topology_st, member=memofuser4, group=memofegrp020_4) + assert _check_memberof(topology_st, member=memofuser4, group=memofegrp020_5) + + # assert enh1 is member of + # - grp1 (member) + # - not grp2 + # - grp3 (uniquemember) + # - not grp15 + # - grp16 (member uniquemember) + # - not grp018 + # - not grp20* + assert _check_memberof(topology_st, member=memofenh1, group=memofegrp1) + assert not _check_memberof(topology_st, member=memofenh1, group=memofegrp2) + assert _check_memberof(topology_st, member=memofenh1, group=memofegrp3) + assert not _check_memberof(topology_st, member=memofenh1, group=memofegrp015) + assert _check_memberof(topology_st, member=memofenh1, group=memofegrp016) + assert not _check_memberof(topology_st, member=memofenh1, group=memofegrp018) + assert not _check_memberof(topology_st, member=memofenh1, group=memofegrp020_1) + assert not _check_memberof(topology_st, member=memofenh1, group=memofegrp020_2) + assert not _check_memberof(topology_st, member=memofenh1, group=memofegrp020_3) + assert not _check_memberof(topology_st, member=memofenh1, group=memofegrp020_4) + assert not _check_memberof(topology_st, member=memofenh1, group=memofegrp020_5) + + # assert enh2 is member of + # - not grp1 + # - grp2 (uniquemember) + # - grp3 (member) + # - not grp15 + # - not grp16 + # - not grp018 + # - not grp20* + assert not _check_memberof(topology_st, member=memofenh2, group=memofegrp1) + assert _check_memberof(topology_st, member=memofenh2, group=memofegrp2) + assert _check_memberof(topology_st, member=memofenh2, group=memofegrp3) + assert not _check_memberof(topology_st, member=memofenh2, group=memofegrp015) + assert not _check_memberof(topology_st, member=memofenh2, group=memofegrp016) + assert not _check_memberof(topology_st, member=memofenh2, group=memofegrp018) + assert not _check_memberof(topology_st, member=memofenh2, group=memofegrp020_1) + assert not _check_memberof(topology_st, member=memofenh2, group=memofegrp020_2) + assert not _check_memberof(topology_st, member=memofenh2, group=memofegrp020_3) + assert not _check_memberof(topology_st, member=memofenh2, group=memofegrp020_4) + assert not _check_memberof(topology_st, member=memofenh2, group=memofegrp020_5) + + +def verify_post_023(topology_st, memofegrp020_1, memofegrp020_2, memofegrp020_3, memofegrp020_4, memofegrp020_5, + memofuser1, memofuser2, memofuser3, memofuser4): + """ + /----member ---> G1 ---uniqueMember -------\ + / V + G5 ------------------------>member ---------- --->U1 + | + |----member ---> G2 ---member/uniqueMember -> U2 + |<--uniquemember-/ + | + |----member ---> G3 ---member/uniqueMember -> U3 + |<--uniquemember-/ + |----member ---> G4 ---member/uniqueMember -> U4 + |<--uniquemember-/ + """ + for x in [memofegrp020_1, memofegrp020_2, memofegrp020_3, memofegrp020_4]: + assert _check_memberattr(topology_st, memofegrp020_5, 'member', x) + assert not _check_memberattr(topology_st, memofegrp020_5, 'uniqueMember', x) + for x in [memofegrp020_2, memofegrp020_3, memofegrp020_4]: + assert not _check_memberattr(topology_st, x, 'member', memofegrp020_5) + assert _check_memberattr(topology_st, x, 'uniqueMember', memofegrp020_5) + # check that user[1-4] is only 'uniqueMember' of the grp20_[1-4] + for x in [(memofegrp020_2, memofuser2), + (memofegrp020_3, memofuser3), + (memofegrp020_4, memofuser4)]: + assert _check_memberattr(topology_st, x[0], 'uniqueMember', x[1]) + assert _check_memberattr(topology_st, x[0], 'member', x[1]) + assert _check_memberattr(topology_st, memofegrp020_1, 'uniqueMember', memofuser1) + assert not _check_memberattr(topology_st, memofegrp020_1, 'member', memofuser1) + assert not _check_memberattr(topology_st, memofegrp020_1, 'uniqueMember', memofegrp020_5) + assert not _check_memberattr(topology_st, memofegrp020_1, 'member', memofegrp020_5) + + for x in [memofegrp020_1, memofegrp020_2, memofegrp020_3, memofegrp020_4, memofuser1, memofuser2, memofuser3, + memofuser4]: + assert _check_memberof(topology_st, member=x, group=memofegrp020_5) + for x in [memofegrp020_2, memofegrp020_3, memofegrp020_4]: + assert _check_memberof(topology_st, member=memofegrp020_5, group=x) + + for user in [memofuser1, memofuser2, memofuser3, memofuser4]: + assert _check_memberof(topology_st, member=user, group=memofegrp020_5) + assert _check_memberof(topology_st, member=user, group=memofegrp020_4) + assert _check_memberof(topology_st, member=user, group=memofegrp020_3) + assert _check_memberof(topology_st, member=user, group=memofegrp020_2) + for grp in [memofegrp020_1, memofegrp020_2, memofegrp020_3, memofegrp020_4, memofegrp020_5]: + assert _check_memberof(topology_st, member=memofuser1, group=grp) + + +def test_complex_group_scenario_7(topology_st): + """Check the user removal from the complex membership topology + + :id: d222af17-17a6-48a0-8f22-a38306726a22 + :setup: Standalone instance, + enh1 is member of + - grp1 (member) + - not grp2 + - grp3 (uniquemember) + - not grp15 + - grp16 (member uniquemember) + - not grp018 + - not grp20* + enh2 is member of + - not grp1 + - grp2 (uniquemember) + - grp3 (member) + - not grp15 + - not grp16 + - not grp018 + - not grp20* + grp[1-4] are member of grp20_5 + user1 is member (member) of group_5 + grp5 is uniqueMember of grp20_[1-4] + user[1-4] is member/uniquemember of grp20_[1-4] + :steps: + 1. Delete user1 as 'member' of grp20_1 + 2. Delete grp020_5 as 'uniqueMember' of grp20_1 + 3. Check the result membership + :expectedresults: + 1. Success + 2. Success + 3. The result should be like this + + :: + + /----member ---> G1 ---uniqueMember -------\ + / V + G5 ------------------------>member ---------- --->U1 + | + |----member ---> G2 ---member/uniqueMember -> U2 + |<--uniquemember-/ + | + |----member ---> G3 ---member/uniqueMember -> U3 + |<--uniquemember-/ + |----member ---> G4 ---member/uniqueMember -> U4 + |<--uniquemember-/ + + """ + + memofenh1 = _get_user_dn('memofenh1') + memofenh2 = _get_user_dn('memofenh2') + + memofegrp1 = _get_group_dn('memofegrp1') + memofegrp2 = _get_group_dn('memofegrp2') + memofegrp3 = _get_group_dn('memofegrp3') + memofegrp015 = _get_group_dn('memofegrp015') + memofegrp016 = _get_group_dn('memofegrp016') + memofegrp018 = _get_group_dn('memofegrp018') + + memofuser1 = _get_user_dn('memofuser1') + memofuser2 = _get_user_dn('memofuser2') + memofuser3 = _get_user_dn('memofuser3') + memofuser4 = _get_user_dn('memofuser4') + + memofegrp020_1 = _get_group_dn('memofegrp020_1') + memofegrp020_2 = _get_group_dn('memofegrp020_2') + memofegrp020_3 = _get_group_dn('memofegrp020_3') + memofegrp020_4 = _get_group_dn('memofegrp020_4') + memofegrp020_5 = _get_group_dn('memofegrp020_5') + + # assert user[1-4] are member of grp20_[1-4] + for user in [memofuser1, memofuser2, memofuser3, memofuser4]: + assert _check_memberof(topology_st, member=user, group=memofegrp020_5) + assert _check_memberof(topology_st, member=user, group=memofegrp020_4) + assert _check_memberof(topology_st, member=user, group=memofegrp020_3) + assert _check_memberof(topology_st, member=user, group=memofegrp020_2) + assert _check_memberof(topology_st, member=user, group=memofegrp020_1) + + # assert that all groups are members of each others because Grp5 + # is member of all grp20_[1-4] + for grp in [memofegrp020_1, memofegrp020_2, memofegrp020_3, memofegrp020_4]: + for owner in [memofegrp020_1, memofegrp020_2, memofegrp020_3, memofegrp020_4]: + if grp == owner: + # no member of itself + assert not _check_memberof(topology_st, member=grp, group=owner) + else: + assert _check_memberof(topology_st, member=grp, group=owner) + for grp in [memofegrp020_1, memofegrp020_2, memofegrp020_3, memofegrp020_4]: + assert _check_memberof(topology_st, member=grp, group=memofegrp020_5) + + # assert userX is uniqueMember of grpX + assert _check_memberof(topology_st, member=memofuser1, group=memofegrp020_1) + assert _check_memberof(topology_st, member=memofuser2, group=memofegrp020_2) + assert _check_memberof(topology_st, member=memofuser3, group=memofegrp020_3) + assert _check_memberof(topology_st, member=memofuser4, group=memofegrp020_4) + assert _check_memberof(topology_st, member=memofuser4, group=memofegrp020_5) + + # assert enh1 is member of + # - grp1 (member) + # - not grp2 + # - grp3 (uniquemember) + # - not grp15 + # - grp16 (member uniquemember) + # - not grp018 + # - not grp20* + assert _check_memberof(topology_st, member=memofenh1, group=memofegrp1) + assert not _check_memberof(topology_st, member=memofenh1, group=memofegrp2) + assert _check_memberof(topology_st, member=memofenh1, group=memofegrp3) + assert not _check_memberof(topology_st, member=memofenh1, group=memofegrp015) + assert _check_memberof(topology_st, member=memofenh1, group=memofegrp016) + assert not _check_memberof(topology_st, member=memofenh1, group=memofegrp018) + assert not _check_memberof(topology_st, member=memofenh1, group=memofegrp020_1) + assert not _check_memberof(topology_st, member=memofenh1, group=memofegrp020_2) + assert not _check_memberof(topology_st, member=memofenh1, group=memofegrp020_3) + assert not _check_memberof(topology_st, member=memofenh1, group=memofegrp020_4) + assert not _check_memberof(topology_st, member=memofenh1, group=memofegrp020_5) + + # assert enh2 is member of + # - not grp1 + # - grp2 (uniquemember) + # - grp3 (member) + # - not grp15 + # - not grp16 + # - not grp018 + # - not grp20* + assert not _check_memberof(topology_st, member=memofenh2, group=memofegrp1) + assert _check_memberof(topology_st, member=memofenh2, group=memofegrp2) + assert _check_memberof(topology_st, member=memofenh2, group=memofegrp3) + assert not _check_memberof(topology_st, member=memofenh2, group=memofegrp015) + assert not _check_memberof(topology_st, member=memofenh2, group=memofegrp016) + assert not _check_memberof(topology_st, member=memofenh2, group=memofegrp018) + assert not _check_memberof(topology_st, member=memofenh2, group=memofegrp020_1) + assert not _check_memberof(topology_st, member=memofenh2, group=memofegrp020_2) + assert not _check_memberof(topology_st, member=memofenh2, group=memofegrp020_3) + assert not _check_memberof(topology_st, member=memofenh2, group=memofegrp020_4) + assert not _check_memberof(topology_st, member=memofenh2, group=memofegrp020_5) + + # check that user[1-4] is only 'uniqueMember' of the grp20_[1-4] + for x in [(memofegrp020_1, memofuser1), + (memofegrp020_2, memofuser2), + (memofegrp020_3, memofuser3), + (memofegrp020_4, memofuser4)]: + assert _check_memberattr(topology_st, x[0], 'uniqueMember', x[1]) + assert _check_memberattr(topology_st, x[0], 'member', x[1]) + + # check that grp20_[1-4] are 'uniqueMember' and 'member' of grp20_5 + # check that user1 is only 'member' of grp20_5 + for x in [memofegrp020_1, memofegrp020_2, memofegrp020_3, memofegrp020_4]: + assert _check_memberattr(topology_st, memofegrp020_5, 'member', x) + assert not _check_memberattr(topology_st, memofegrp020_5, 'uniqueMember', x) + assert _check_memberattr(topology_st, memofegrp020_5, 'member', memofuser1) + assert not _check_memberattr(topology_st, memofegrp020_5, 'uniqueMember', memofuser1) + + # DEL user1 as 'member' of grp20_1 + mods = [(ldap.MOD_DELETE, 'member', memofuser1)] + topology_st.standalone.modify_s(ensure_str(memofegrp020_1), mods) + + mods = [(ldap.MOD_DELETE, 'uniqueMember', memofegrp020_5)] + topology_st.standalone.modify_s(ensure_str(memofegrp020_1), mods) + + """ + /----member ---> G1 ---uniqueMember -------\ + / V + G5 ------------------------>member ---------- --->U1 + | + |----member ---> G2 ---member/uniqueMember -> U2 + |<--uniquemember-/ + | + |----member ---> G3 ---member/uniqueMember -> U3 + |<--uniquemember-/ + |----member ---> G4 ---member/uniqueMember -> U4 + |<--uniquemember-/ + """ + verify_post_023(topology_st, memofegrp020_1, memofegrp020_2, memofegrp020_3, memofegrp020_4, memofegrp020_5, + memofuser1, memofuser2, memofuser3, memofuser4) + + +def verify_post_024(topology_st, memofegrp020_1, memofegrp020_2, memofegrp020_3, memofegrp020_4, memofegrp020_5, + memofuser1, memofuser2, memofuser3, memofuser4): + """ + /----member ---> G1 ---member/uniqueMember -\ + / V + G5 ------------------------>member ---------- --->U1 + | + |----member ---> G2 ---member/uniqueMember -> U2 + |<--uniquemember-/ + | + |----member ---> G3 ---member/uniqueMember -> U3 + |<--uniquemember-/ + |----member ---> G4 ---member/uniqueMember -> U4 + |<--uniquemember-/ + """ + for x in [memofegrp020_1, memofegrp020_2, memofegrp020_3, memofegrp020_4]: + assert _check_memberattr(topology_st, memofegrp020_5, 'member', x) + assert not _check_memberattr(topology_st, memofegrp020_5, 'uniqueMember', x) + for x in [memofegrp020_2, memofegrp020_3, memofegrp020_4]: + assert not _check_memberattr(topology_st, x, 'member', memofegrp020_5) + assert _check_memberattr(topology_st, x, 'uniqueMember', memofegrp020_5) + # check that user[1-4] is only 'uniqueMember' of the grp20_[1-4] + for x in [(memofegrp020_1, memofuser1), + (memofegrp020_2, memofuser2), + (memofegrp020_3, memofuser3), + (memofegrp020_4, memofuser4)]: + assert _check_memberattr(topology_st, x[0], 'uniqueMember', x[1]) + assert _check_memberattr(topology_st, x[0], 'member', x[1]) + assert not _check_memberattr(topology_st, memofegrp020_1, 'uniqueMember', memofegrp020_5) + assert not _check_memberattr(topology_st, memofegrp020_1, 'member', memofegrp020_5) + + for x in [memofegrp020_1, memofegrp020_2, memofegrp020_3, memofegrp020_4, memofuser1, memofuser2, memofuser3, + memofuser4]: + assert _check_memberof(topology_st, member=x, group=memofegrp020_5) + for x in [memofegrp020_2, memofegrp020_3, memofegrp020_4]: + assert _check_memberof(topology_st, member=memofegrp020_5, group=x) + + for user in [memofuser1, memofuser2, memofuser3, memofuser4]: + assert _check_memberof(topology_st, member=user, group=memofegrp020_5) + assert _check_memberof(topology_st, member=user, group=memofegrp020_4) + assert _check_memberof(topology_st, member=user, group=memofegrp020_3) + assert _check_memberof(topology_st, member=user, group=memofegrp020_2) + for grp in [memofegrp020_1, memofegrp020_2, memofegrp020_3, memofegrp020_4, memofegrp020_5]: + assert _check_memberof(topology_st, member=memofuser1, group=grp) + + +def test_complex_group_scenario_8(topology_st): + """Check the user add operation to the complex membership topology + + :id: d222af17-17a6-48a0-8f22-a38306726a23 + :setup: Standalone instance, + + :: + + /----member ---> G1 ---uniqueMember -------\ + / V + G5 ------------------------>member ---------- --->U1 + | + |----member ---> G2 ---member/uniqueMember -> U2 + |<--uniquemember-/ + | + |----member ---> G3 ---member/uniqueMember -> U3 + |<--uniquemember-/ + |----member ---> G4 ---member/uniqueMember -> U4 + |<--uniquemember-/ + + :steps: + 1. Add user1 to grp020_1 + 2. Check the result membership + :expectedresults: + 1. Success + 2. The result should be like this + + :: + + /----member ---> G1 ---member/uniqueMember -\ + / V + G5 ------------------------>member ---------- --->U1 + | + |----member ---> G2 ---member/uniqueMember -> U2 + |<--uniquemember-/ + | + |----member ---> G3 ---member/uniqueMember -> U3 + |<--uniquemember-/ + |----member ---> G4 ---member/uniqueMember -> U4 + |<--uniquemember-/ + + """ + + memofuser1 = _get_user_dn('memofuser1') + memofuser2 = _get_user_dn('memofuser2') + memofuser3 = _get_user_dn('memofuser3') + memofuser4 = _get_user_dn('memofuser4') + + memofegrp020_1 = _get_group_dn('memofegrp020_1') + memofegrp020_2 = _get_group_dn('memofegrp020_2') + memofegrp020_3 = _get_group_dn('memofegrp020_3') + memofegrp020_4 = _get_group_dn('memofegrp020_4') + memofegrp020_5 = _get_group_dn('memofegrp020_5') + verify_post_023(topology_st, memofegrp020_1, memofegrp020_2, memofegrp020_3, memofegrp020_4, memofegrp020_5, + memofuser1, memofuser2, memofuser3, memofuser4) + + # ADD user1 as 'member' of grp20_1 + mods = [(ldap.MOD_ADD, 'member', memofuser1)] + topology_st.standalone.modify_s(ensure_str(memofegrp020_1), mods) + verify_post_024(topology_st, memofegrp020_1, memofegrp020_2, memofegrp020_3, memofegrp020_4, memofegrp020_5, + memofuser1, memofuser2, memofuser3, memofuser4) + + +def verify_post_025(topology_st, memofegrp020_1, memofegrp020_2, memofegrp020_3, memofegrp020_4, memofegrp020_5, + memofuser1, memofuser2, memofuser3, memofuser4): + """ + /----member ---> G1 + / + G5 ------------------------>member ---------- --->U1 + | + |----member ---> G2 + |----member ---> G3 + |----member ---> G4 + + """ + for x in [memofegrp020_1, memofegrp020_2, memofegrp020_3, memofegrp020_4]: + assert _check_memberattr(topology_st, memofegrp020_5, 'member', x) + assert not _check_memberattr(topology_st, memofegrp020_5, 'uniqueMember', x) + for x in [memofegrp020_1, memofegrp020_2, memofegrp020_3, memofegrp020_4]: + assert not _check_memberattr(topology_st, x, 'member', memofegrp020_5) + assert not _check_memberattr(topology_st, x, 'uniqueMember', memofegrp020_5) + # check that user[1-4] is only 'uniqueMember' of the grp20_[1-4] + for x in [(memofegrp020_1, memofuser1), + (memofegrp020_2, memofuser2), + (memofegrp020_3, memofuser3), + (memofegrp020_4, memofuser4)]: + assert not _check_memberattr(topology_st, x[0], 'uniqueMember', x[1]) + assert not _check_memberattr(topology_st, x[0], 'member', x[1]) + + for x in [memofegrp020_1, memofegrp020_2, memofegrp020_3, memofegrp020_4, memofuser1]: + assert _check_memberof(topology_st, member=x, group=memofegrp020_5) + for x in [memofuser2, memofuser3, memofuser4]: + assert not _check_memberof(topology_st, member=x, group=memofegrp020_5) + assert _check_memberof(topology_st, member=memofuser1, group=memofegrp020_5) + for user in [memofuser1, memofuser2, memofuser3, memofuser4]: + for grp in [memofegrp020_1, memofegrp020_2, memofegrp020_3, memofegrp020_4]: + assert not _check_memberof(topology_st, member=user, group=grp) + + +def test_complex_group_scenario_9(topology_st): + """Check the massive user deletion from the complex membership topology + + :id: d222af17-17a6-48a0-8f22-a38306726a24 + :setup: Standalone instance, + + :: + + /----member ---> G1 ---member/uniqueMember -\ + / V + G5 ------------------------>member ---------- --->U1 + | + |----member ---> G2 ---member/uniqueMember -> U2 + |<--uniquemember-/ + | + |----member ---> G3 ---member/uniqueMember -> U3 + |<--uniquemember-/ + |----member ---> G4 ---member/uniqueMember -> U4 + |<--uniquemember-/ + + :steps: + 1. Delete user[1-5] as 'member' and 'uniqueMember' from grp20_[1-5] + 2. Check the result membership + :expectedresults: + 1. Success + 2. The result should be like this + + :: + + /----member ---> G1 + / + G5 ------------------------>member ---------- --->U1 + | + |----member ---> G2 + |----member ---> G3 + |----member ---> G4 + + """ + + memofuser1 = _get_user_dn('memofuser1') + memofuser2 = _get_user_dn('memofuser2') + memofuser3 = _get_user_dn('memofuser3') + memofuser4 = _get_user_dn('memofuser4') + + memofegrp020_1 = _get_group_dn('memofegrp020_1') + memofegrp020_2 = _get_group_dn('memofegrp020_2') + memofegrp020_3 = _get_group_dn('memofegrp020_3') + memofegrp020_4 = _get_group_dn('memofegrp020_4') + memofegrp020_5 = _get_group_dn('memofegrp020_5') + verify_post_024(topology_st, memofegrp020_1, memofegrp020_2, memofegrp020_3, memofegrp020_4, memofegrp020_5, + memofuser1, memofuser2, memofuser3, memofuser4) + + # ADD inet + # for user in [memofuser1, memofuser2, memofuser3, memofuser4]: + # mods = [(ldap.MOD_ADD, 'objectClass', 'inetUser')] + # topology_st.standalone.modify_s(user, mods) + for x in [(memofegrp020_1, memofuser1), + (memofegrp020_2, memofuser2), + (memofegrp020_3, memofuser3), + (memofegrp020_4, memofuser4)]: + mods = [(ldap.MOD_DELETE, 'member', x[1]), + (ldap.MOD_DELETE, 'uniqueMember', x[1])] + topology_st.standalone.modify_s(ensure_str(x[0]), mods) + """ + /----member ---> G1 + / + G5 ------------------------>member ---------- --->U1 + | + |----member ---> G2 + |<--uniquemember-/ + | + |----member ---> G3 + |<--uniquemember-/ + |----member ---> G4 + |<--uniquemember-/ + """ + + for x in [memofegrp020_2, memofegrp020_3, memofegrp020_4]: + mods = [(ldap.MOD_DELETE, 'uniqueMember', memofegrp020_5)] + topology_st.standalone.modify_s(ensure_str(x), mods) + """ + /----member ---> G1 + / + G5 ------------------------>member ---------- --->U1 + | + |----member ---> G2 + |----member ---> G3 + |----member ---> G4 + + """ + + verify_post_025(topology_st, memofegrp020_1, memofegrp020_2, memofegrp020_3, memofegrp020_4, memofegrp020_5, + memofuser1, memofuser2, memofuser3, memofuser4) + + +def test_memberof_auto_add_oc(topology_st): + """Test the auto add objectclass (OC) feature. The plugin should add a predefined + objectclass that will allow memberOf to be added to an entry. + + :id: d222af17-17a6-48a0-8f22-a38306726a25 + :setup: Standalone instance + :steps: + 1. Enable dynamic plugins + 2. Enable memberOf plugin + 3. Test that the default add OC works. + 4. Add a group that already includes one user + 5. Assert memberOf on user1 + 6. Delete user1 and the group + 7. Test invalid value (config validation) + 8. Add valid objectclass + 9. Add two users + 10. Add a group that already includes one user + 11. Add a user to the group + :expectedresults: + 1. Success + 2. Success + 3. Success + 4. Success + 5. Success + 6. Success + 7. Success + 8. Success + 9. Success + 10. Success + 11. Success + """ + + # enable dynamic plugins + try: + topology_st.standalone.modify_s(DN_CONFIG, + [(ldap.MOD_REPLACE, + 'nsslapd-dynamic-plugins', + b'on')]) + except ldap.LDAPError as e: + ldap.error('Failed to enable dynamic plugins! ' + e.message['desc']) + assert False + + # Enable the plugin + topology_st.standalone.plugins.enable(name=PLUGIN_MEMBER_OF) + + # Test that the default add OC works. + + try: + topology_st.standalone.add_s(Entry((USER1_DN, + {'objectclass': 'top', + 'objectclass': 'person', + 'objectclass': 'organizationalPerson', + 'objectclass': 'inetorgperson', + 'sn': 'last', + 'cn': 'full', + 'givenname': 'user1', + 'uid': 'user1' + }))) + except ldap.LDAPError as e: + log.fatal('Failed to add user1 entry, error: ' + e.message['desc']) + assert False + + # Add a group(that already includes one user + try: + topology_st.standalone.add_s(Entry((GROUP_DN, + {'objectclass': 'top', + 'objectclass': 'groupOfNames', + 'cn': 'group', + 'member': USER1_DN + }))) + except ldap.LDAPError as e: + log.fatal('Failed to add group entry, error: ' + e.message['desc']) + assert False + + # Assert memberOf on user1 + _check_memberof(topology_st, USER1_DN, GROUP_DN) + + # Reset for the next test .... + topology_st.standalone.delete_s(USER1_DN) + topology_st.standalone.delete_s(GROUP_DN) + + # Test invalid value (config validation) + topology_st.standalone.plugins.enable(name=PLUGIN_MEMBER_OF) + try: + topology_st.standalone.modify_s(MEMBEROF_PLUGIN_DN, + [(ldap.MOD_REPLACE, + 'memberofAutoAddOC', + b'invalid123')]) + log.fatal('Incorrectly added invalid objectclass!') + assert False + except ldap.UNWILLING_TO_PERFORM: + log.info('Correctly rejected invalid objectclass') + except ldap.LDAPError as e: + ldap.error('Unexpected error adding invalid objectclass - error: ' + e.message['desc']) + assert False + + + # Add valid objectclass + topology_st.standalone.plugins.enable(name=PLUGIN_MEMBER_OF) + try: + topology_st.standalone.modify_s(MEMBEROF_PLUGIN_DN, + [(ldap.MOD_REPLACE, + 'memberofAutoAddOC', + b'inetuser')]) + except ldap.LDAPError as e: + log.fatal('Failed to configure memberOf plugin: error ' + e.message['desc']) + assert False + + # Add two users + try: + topology_st.standalone.add_s(Entry((USER1_DN, + {'objectclass': 'top', + 'objectclass': 'person', + 'objectclass': 'organizationalPerson', + 'objectclass': 'inetorgperson', + 'sn': 'last', + 'cn': 'full', + 'givenname': 'user1', + 'uid': 'user1' + }))) + except ldap.LDAPError as e: + log.fatal('Failed to add user1 entry, error: ' + e.message['desc']) + assert False + + try: + topology_st.standalone.add_s(Entry((USER2_DN, + {'objectclass': 'top', + 'objectclass': 'person', + 'objectclass': 'organizationalPerson', + 'objectclass': 'inetorgperson', + 'sn': 'last', + 'cn': 'full', + 'givenname': 'user2', + 'uid': 'user2' + }))) + except ldap.LDAPError as e: + log.fatal('Failed to add user2 entry, error: ' + e.message['desc']) + assert False + + # Add a group(that already includes one user + try: + topology_st.standalone.add_s(Entry((GROUP_DN, + {'objectclass': 'top', + 'objectclass': 'groupOfNames', + 'cn': 'group', + 'member': USER1_DN + }))) + except ldap.LDAPError as e: + log.fatal('Failed to add group entry, error: ' + e.message['desc']) + assert False + + # Add a user to the group + try: + topology_st.standalone.modify_s(GROUP_DN, + [(ldap.MOD_ADD, + 'member', + ensure_bytes(USER2_DN))]) + except ldap.LDAPError as e: + log.fatal('Failed to add user2 to group: error ' + e.message['desc']) + assert False + + log.info('Test complete.') + + +if __name__ == '__main__': + # Run isolated + # -s for DEBUG mode + CURRENT_FILE = os.path.realpath(__file__) + pytest.main("-s %s" % CURRENT_FILE) diff --git a/dirsrvtests/tests/suites/plugins/pluginpath_validation_test.py b/dirsrvtests/tests/suites/plugins/pluginpath_validation_test.py new file mode 100644 index 0000000..660ceac --- /dev/null +++ b/dirsrvtests/tests/suites/plugins/pluginpath_validation_test.py @@ -0,0 +1,111 @@ +# --- BEGIN COPYRIGHT BLOCK --- +# Copyright (C) 2016 Red Hat, Inc. +# All rights reserved. +# +# License: GPL (version 3 or any later version). +# See LICENSE for details. +# --- END COPYRIGHT BLOCK --- +# +import pytest +from lib389.tasks import * +from lib389.utils import * +from lib389.topologies import topology_st +from lib389.plugins import WhoamiPlugin + +pytestmark = pytest.mark.tier1 + +logging.getLogger(__name__).setLevel(logging.DEBUG) +log = logging.getLogger(__name__) + + +@pytest.mark.ds47384 +@pytest.mark.ds47601 +def test_pluginpath_validation(topology_st): + """Test pluginpath validation: relative and absolute paths + With the inclusion of ticket 47601 - we do allow plugin paths + outside the default location + + :id: 99f1fb2f-051d-4fd9-93d0-592dcd9b4c22 + :setup: Standalone instance + :steps: + 1. Copy the library to a temporary directory + 2. Add valid plugin paths + * using the absolute path to the current library + * using new remote location + 3. Set plugin path back to the default + 4. Check invalid path (no library present) + 5. Check invalid relative path (no library present) + + :expectedresults: + 1. This should pass + 2. This should pass + 3. This should pass + 4. This should fail + 5. This should fail + """ + + inst = topology_st.standalone + whoami = WhoamiPlugin(inst) + # /tmp nowadays comes with noexec bit set on some systems + # so instead let's write somewhere where dirsrv user has access + tmp_dir = inst.get_bak_dir() + plugin_dir = inst.get_plugin_dir() + + # Copy the library to our tmp directory + try: + shutil.copy('%s/libwhoami-plugin.so' % plugin_dir, tmp_dir) + except IOError as e: + log.fatal('Failed to copy %s/libwhoami-plugin.so to the tmp directory %s, error: %s' % ( + plugin_dir, tmp_dir, e.strerror)) + assert False + + # + # Test adding valid plugin paths + # + # Try using the absolute path to the current library + whoami.replace('nsslapd-pluginPath', '%s/libwhoami-plugin' % plugin_dir) + + # Try using new remote location + # If SELinux is enabled, plugin can't be loaded as it's not labeled properly + if selinux_present(): + import selinux + if selinux.is_selinux_enabled(): + with pytest.raises(ldap.UNWILLING_TO_PERFORM): + whoami.replace('nsslapd-pluginPath', '%s/libwhoami-plugin' % tmp_dir) + # Label it with lib_t, so it can be executed + # We can't use selinux.setfilecon() here, because py.test needs to have mac_admin capability + # Instead we can call chcon directly: + subprocess.check_call(['/usr/bin/chcon', '-t', 'lib_t', '%s/libwhoami-plugin.so' % tmp_dir]) + # And try to change the path again + whoami.replace('nsslapd-pluginPath', '%s/libwhoami-plugin' % tmp_dir) + else: + whoami.replace('nsslapd-pluginPath', '%s/libwhoami-plugin' % tmp_dir) + + # Set plugin path back to the default + whoami.replace('nsslapd-pluginPath', 'libwhoami-plugin') + + # + # Test invalid path (no library present) + # + with pytest.raises(ldap.UNWILLING_TO_PERFORM): + whoami.replace('nsslapd-pluginPath', '/bin/libwhoami-plugin') + # No exception?! This is an error + log.error('Invalid plugin path was incorrectly accepted by the server!') + + # + # Test invalid relative path (no library present) + # + with pytest.raises(ldap.UNWILLING_TO_PERFORM): + whoami.replace('nsslapd-pluginPath', '../libwhoami-plugin') + # No exception?! This is an error + log.error('Invalid plugin path was incorrectly accepted by the server!') + + log.info('Test complete') + + +if __name__ == '__main__': + # Run isolated + # -s for DEBUG mode + CURRENT_FILE = os.path.realpath(__file__) + pytest.main("-s %s" % CURRENT_FILE) + diff --git a/dirsrvtests/tests/suites/plugins/referint_test.py b/dirsrvtests/tests/suites/plugins/referint_test.py new file mode 100644 index 0000000..02b9857 --- /dev/null +++ b/dirsrvtests/tests/suites/plugins/referint_test.py @@ -0,0 +1,105 @@ +# --- BEGIN COPYRIGHT BLOCK --- +# Copyright (C) 2016 Red Hat, Inc. +# All rights reserved. +# +# License: GPL (version 3 or any later version). +# See LICENSE for details. +# --- END COPYRIGHT BLOCK --- +# +''' +Created on Dec 12, 2019 + +@author: tbordaz +''' +import logging +import subprocess +import pytest +from lib389 import Entry +from lib389.utils import * +from lib389.plugins import * +from lib389._constants import * +from lib389.idm.user import UserAccounts, UserAccount +from lib389.idm.group import Groups +from lib389.topologies import topology_st as topo + +pytestmark = pytest.mark.tier1 + +log = logging.getLogger(__name__) + +ESCAPED_RDN_BASE = "foo\\,oo" +def _user_get_dn(no): + uid = '%s%d' % (ESCAPED_RDN_BASE, no) + dn = 'uid=%s,%s' % (uid, SUFFIX) + return (uid, dn) + +def add_escaped_user(server, no): + (uid, dn) = _user_get_dn(no) + log.fatal('Adding user (%s): ' % dn) + server.add_s(Entry((dn, {'objectclass': ['top', 'person', 'organizationalPerson', 'inetOrgPerson'], + 'uid': [uid], + 'sn' : [uid], + 'cn' : [uid]}))) + return dn + +@pytest.mark.ds50020 +def test_referential_false_failure(topo): + """On MODRDN referential integrity can erronously fail + + :id: f77aeb80-c4c4-471b-8c1b-4733b714778b + :setup: Standalone Instance + :steps: + 1. Configure the plugin + 2. Create a group + - 1rst member the one that will be move + - more than 128 members + - last member is a DN containing escaped char + 3. Rename the 1rst member + :expectedresults: + 1. should succeed + 2. should succeed + 3. should succeed + """ + + inst = topo[0] + + # stop the plugin, and start it + plugin = ReferentialIntegrityPlugin(inst) + plugin.disable() + plugin.enable() + + ############################################################################ + # Configure plugin + ############################################################################ + GROUP_CONTAINER = "ou=groups,%s" % DEFAULT_SUFFIX + plugin.replace('referint-membership-attr', 'member') + plugin.replace('nsslapd-plugincontainerscope', GROUP_CONTAINER) + + ############################################################################ + # Creates a group with members having escaped DN + ############################################################################ + # Add some users and a group + users = UserAccounts(inst, DEFAULT_SUFFIX, None) + user1 = users.create_test_user(uid=1001) + user2 = users.create_test_user(uid=1002) + + groups = Groups(inst, GROUP_CONTAINER, None) + group = groups.create(properties={'cn': 'group'}) + group.add('member', user2.dn) + group.add('member', user1.dn) + + # Add more than 128 members so that referint follows the buggy path + for i in range(130): + escaped_user = add_escaped_user(inst, i) + group.add('member', escaped_user) + + ############################################################################ + # Check that the MODRDN succeeds + ########################################################################### + # Here we need to restart so that member values are taken in the right order + # the last value is the escaped one + inst.restart() + + # Here if the bug is fixed, referential is able to update the member value + inst.rename_s(user1.dn, 'uid=new_test_user_1001', newsuperior=SUFFIX, delold=0) + + diff --git a/dirsrvtests/tests/suites/plugins/rootdn_plugin_test.py b/dirsrvtests/tests/suites/plugins/rootdn_plugin_test.py new file mode 100644 index 0000000..a3fc999 --- /dev/null +++ b/dirsrvtests/tests/suites/plugins/rootdn_plugin_test.py @@ -0,0 +1,595 @@ +# --- BEGIN COPYRIGHT BLOCK --- +# Copyright (C) 2019 Red Hat, Inc. +# All rights reserved. +# +# License: GPL (version 3 or any later version). +# See LICENSE for details. +# --- END COPYRIGHT BLOCK --- +# +import logging +import socket +import ldap +import pytest +import uuid +import time +from lib389 import DirSrv +from lib389.utils import * +from lib389.tasks import * +from lib389.tools import DirSrvTools +from lib389.topologies import topology_st +from lib389._constants import DEFAULT_SUFFIX, DN_DM, PASSWORD +from lib389.idm.directorymanager import DirectoryManager +from lib389.plugins import RootDNAccessControlPlugin + + +pytestmark = pytest.mark.tier1 + +logging.getLogger(__name__).setLevel(logging.DEBUG) +log = logging.getLogger(__name__) + +localhost = DirSrvTools.getLocalhost() +hostname = socket.gethostname() + + +@pytest.fixture(scope="function") +def rootdn_cleanup(topology_st): + """Do a cleanup of the config area before the test """ + log.info('Cleaning up the config area') + plugin = RootDNAccessControlPlugin(topology_st.standalone) + plugin.remove_all_allow_host() + plugin.remove_all_deny_host() + plugin.remove_all_allow_ip() + plugin.remove_all_deny_ip() + + +@pytest.fixture(scope="module") +def rootdn_setup(topology_st): + """Initialize our setup to test the Root DN Access Control Plugin + + Test the following access control type: + + - Allowed IP address * + - Denied IP address * + - Specific time window + - Days allowed access + - Allowed host * + - Denied host * + + * means multiple valued + """ + + log.info('Initializing root DN test suite...') + + # Enable dynamic plugins + topology_st.standalone.config.set('nsslapd-dynamic-plugins', 'on') + + # Enable the plugin + global plugin + plugin = RootDNAccessControlPlugin(topology_st.standalone) + plugin.enable() + + log.info('test_rootdn_init: Initialized root DN test suite.') + + +def rootdn_bind(inst, uri=None, fail=False): + """Helper function to test root DN bind + """ + newinst = DirSrv(verbose=False) + args = {SER_PORT: inst.port, + SER_SERVERID_PROP: inst.serverid} + newinst.allocate(args) + newinst.open(uri=uri, connOnly=True) # This binds as root dn + + +def test_rootdn_access_specific_time(topology_st, rootdn_setup, rootdn_cleanup, timeout=5): + """Test binding inside and outside of a specific time + + :id: a0ef30e5-538b-46fa-9762-01a4435a15e8 + :setup: Standalone instance, rootdn plugin set up + :steps: + 1. Get the current time, and bump it ahead twohours + 2. Bind as Root DN + 3. Set config to allow the entire day + 4. Bind as Root DN + 5. Cleanup + :expectedresults: + 1. Success + 2. Should fail + 3. Success + 4. Success + 5. Success + """ + + log.info('Running test_rootdn_access_specific_time...') + dm = DirectoryManager(topology_st.standalone) + + # Get the current time, and bump it ahead twohours + current_hour = time.strftime("%H") + if int(current_hour) > 12: + open_time = '0200' + close_time = '0400' + else: + open_time = '1600' + close_time = '1800' + + assert plugin.replace_many(('rootdn-open-time', open_time), + ('rootdn-close-time', close_time)) + + attr_updated = 0 + for i in range(0, timeout): + if (plugin.get_attr_val_utf8('rootdn-open-time') == open_time) and (plugin.get_attr_val_utf8('rootdn-close-time') == close_time): + attr_updated = 1 + break + else: + time.sleep(.5) + + if not attr_updated : + raise Exception ("rootdn-open-time and rootdn-close-time were not updated") + + # Bind as Root DN - should fail + with pytest.raises(ldap.UNWILLING_TO_PERFORM): + dm.bind() + + + # Set config to allow the entire day + open_time = '0000' + close_time = '2359' + assert plugin.replace_many(('rootdn-open-time', open_time), + ('rootdn-close-time', close_time)) + + attr_updated = 0 + for i in range(0, timeout): + if (plugin.get_attr_val_utf8('rootdn-open-time') == open_time) and (plugin.get_attr_val_utf8('rootdn-close-time') == close_time): + attr_updated = 1 + break + else: + time.sleep(.5) + + if not attr_updated : + raise Exception ("rootdn-open-time and rootdn-close-time were not updated") + + # Bind as Root DN - should succeed + dm.bind() + + # Cleanup - undo the changes we made so the next test has a clean slate + assert plugin.apply_mods([(ldap.MOD_DELETE, 'rootdn-open-time'), + (ldap.MOD_DELETE, 'rootdn-close-time')]) + + +def test_rootdn_access_day_of_week(topology_st, rootdn_setup, rootdn_cleanup, timeout=5): + """Test the days of week feature + + :id: a0ef30e5-538b-46fa-9762-01a4435a15e1 + :setup: Standalone instance, rootdn plugin set up + :steps: + 1. Set the deny days + 2. Bind as Root DN + 3. Set the allow days + 4. Bind as Root DN + :expectedresults: + 1. Success + 2. Should fail + 3. Success + 4. Success + """ + + log.info('Running test_rootdn_access_day_of_week...') + dm = DirectoryManager(topology_st.standalone) + + days = ('Sun', 'Mon', 'Tue', 'Wed', 'Thu', 'Fri', 'Sat') + day = int(time.strftime("%w", time.gmtime())) + + if day == 6: + # Handle the roll over from Saturday into Sunday + deny_days = days[1] + ', ' + days[2] + allow_days = days[6] + ',' + days[0] + elif day > 3: + deny_days = days[0] + ', ' + days[1] + allow_days = days[day] + ',' + days[day - 1] + else: + deny_days = days[4] + ',' + days[5] + allow_days = days[day] + ',' + days[day + 1] + + log.info('Today: ' + days[day]) + log.info('Allowed days: ' + allow_days) + log.info('Deny days: ' + deny_days) + + # Set the deny days + plugin.set_days_allowed(deny_days) + + attr_updated = 0 + for i in range(0, timeout): + if (str(plugin.get_days_allowed()) == deny_days): + attr_updated = 1 + break + else: + time.sleep(.5) + + if not attr_updated : + raise Exception ("rootdn-days-allowed was not updated") + + # Bind as Root DN - should fail + with pytest.raises(ldap.UNWILLING_TO_PERFORM): + dm.bind() + + # Set the allow days + plugin.set_days_allowed(allow_days) + + attr_updated = 0 + for i in range(0, timeout): + if (str(plugin.get_days_allowed()) == allow_days): + attr_updated = 1 + break + else: + time.sleep(.5) + + if not attr_updated : + raise Exception ("rootdn-days-allowed was not updated") + + # Bind as Root DN - should succeed + dm.bind() + + +def test_rootdn_access_denied_ip(topology_st, rootdn_setup, rootdn_cleanup, timeout=5): + """Test denied IP feature - we can just test denying 127.0.0.1 + + :id: a0ef30e5-538b-46fa-9762-01a4435a15e2 + :setup: Standalone instance, rootdn plugin set up + :steps: + 1. Set rootdn-deny-ip to '127.0.0.1' and '::1' + 2. Bind as Root DN + 3. Change the denied IP so root DN succeeds + 4. Bind as Root DN + :expectedresults: + 1. Success + 2. Should fail + 3. Success + 4. Success + """ + + log.info('Running test_rootdn_access_denied_ip...') + plugin.add_deny_ip('127.0.0.1') + plugin.add_deny_ip('::1') + + attr_updated = 0 + for i in range(0, timeout): + if ('127.0.0.1' in str(plugin.get_deny_ip())): + attr_updated = 1 + break + else: + time.sleep(.5) + + if not attr_updated : + raise Exception ("rootdn-deny-ip was not updated") + + # Bind as Root DN - should fail + uri = 'ldap://{}:{}'.format('127.0.0.1', topology_st.standalone.port) + with pytest.raises(ldap.UNWILLING_TO_PERFORM): + rootdn_bind(topology_st.standalone, uri=uri) + + # Change the denied IP so root DN succeeds + plugin.apply_mods([(ldap.MOD_REPLACE, 'rootdn-deny-ip', '255.255.255.255')]) + + attr_updated = 0 + for i in range(0, timeout): + if ('255.255.255.255' in str(plugin.get_deny_ip())): + attr_updated = 1 + break + else: + time.sleep(.5) + + if not attr_updated : + raise Exception ("rootdn-deny-ip was not updated") + + # Bind as Root DN - should succeed + rootdn_bind(topology_st.standalone, uri=uri) + + +def test_rootdn_access_denied_host(topology_st, rootdn_setup, rootdn_cleanup, timeout=5): + """Test denied Host feature - we can just test denying localhost + + :id: a0ef30e5-538b-46fa-9762-01a4435a15e3 + :setup: Standalone instance, rootdn plugin set up + :steps: + 1. Set rootdn-deny-host to hostname (localhost if not accessable) + 2. Bind as Root DN + 3. Change the denied host so root DN succeeds + 4. Bind as Root DN + :expectedresults: + 1. Success + 2. Should fail + 3. Success + 4. Success + """ + + log.info('Running test_rootdn_access_denied_host...') + hostname = socket.gethostname() + plugin.add_deny_host(hostname) + if localhost != hostname: + plugin.add_deny_host(localhost) + + attr_updated = 0 + for i in range(0, timeout): + if (str(plugin.get_deny_host()) == hostname) or (str(plugin.get_deny_host()) == localhost): + attr_updated = 1 + break + else: + time.sleep(.5) + + if not attr_updated : + raise Exception ("rootdn-deny-host was not updated") + + # Bind as Root DN - should fail + uri = 'ldap://{}:{}'.format(localhost, topology_st.standalone.port) + with pytest.raises(ldap.UNWILLING_TO_PERFORM): + rootdn_bind(topology_st.standalone, uri=uri) + + # Change the denied host so root DN bind succeeds + rand_host = 'i.dont.exist.{}'.format(uuid.uuid4()) + plugin.apply_mods([(ldap.MOD_REPLACE, 'rootdn-deny-host', rand_host)]) + + attr_updated = 0 + for i in range(0, timeout): + if (plugin.get_deny_host() == rand_host): + attr_updated = 1 + break + else: + time.sleep(.5) + + if not attr_updated : + raise Exception ("rootdn-deny-host was not updated") + + # Bind as Root DN - should succeed + rootdn_bind(topology_st.standalone, uri=uri) + + +def test_rootdn_access_allowed_ip(topology_st, rootdn_setup, rootdn_cleanup, timeout=5): + """Test allowed ip feature + + :id: a0ef30e5-538b-46fa-9762-01a4435a15e4 + :setup: Standalone instance, rootdn plugin set up + :steps: + 1. Set allowed ip to 255.255.255.255 - blocks the Root DN + 2. Bind as Root DN + 3. Allow localhost + 4. Bind as Root DN + :expectedresults: + 1. Success + 2. Should fail + 3. Success + 4. Success + """ + + log.info('Running test_rootdn_access_allowed_ip...') + + # Set allowed ip to 255.255.255.255 - blocks the Root DN + plugin.add_allow_ip('255.255.255.255') + + attr_updated = 0 + for i in range(0, timeout): + if ('255.255.255.255' in plugin.get_allow_ip()): + attr_updated = 1 + break + else: + time.sleep(.5) + + if not attr_updated : + raise Exception ("rootdn-allow-ip was not updated") + + # Bind as Root DN - should fail + uri = 'ldap://{}:{}'.format(localhost, topology_st.standalone.port) + with pytest.raises(ldap.UNWILLING_TO_PERFORM): + rootdn_bind(topology_st.standalone, uri=uri) + + # Allow localhost + plugin.add_allow_ip('127.0.0.1') + plugin.add_allow_ip('::1') + + attr_updated = 0 + for i in range(0, timeout): + if ('127.0.0.1' in plugin.get_allow_ip()): + attr_updated = 1 + break + else: + time.sleep(.5) + + if not attr_updated : + raise Exception ("rootdn-allow-ip was not updated") + + # Bind as Root DN - should succeed + rootdn_bind(topology_st.standalone, uri=uri) + + +def test_rootdn_access_allowed_host(topology_st, rootdn_setup, rootdn_cleanup, timeout=5): + """Test allowed host feature + + :id: a0ef30e5-538b-46fa-9762-01a4435a15e5 + :setup: Standalone instance, rootdn plugin set up + :steps: + 1. Set allowed host to an unknown host - blocks the Root DN + 2. Bind as Root DN + 3. Allow localhost + 4. Bind as Root DN + :expectedresults: + 1. Success + 2. Should fail + 3. Success + 4. Success + """ + + log.info('Running test_rootdn_access_allowed_host...') + + # Set allowed host to an unknown host - blocks the Root DN + rand_host = 'i.dont.exist.{}'.format(uuid.uuid4()) + plugin.add_allow_host(rand_host) + + attr_updated = 0 + for i in range(0, timeout): + if (str(plugin.get_allow_host()) == rand_host): + attr_updated = 1 + break + else: + time.sleep(.5) + + if not attr_updated : + raise Exception ("rootdn-allow-host was not updated") + + # Bind as Root DN - should fail + uri = 'ldap://{}:{}'.format(localhost, topology_st.standalone.port) + with pytest.raises(ldap.UNWILLING_TO_PERFORM): + rootdn_bind(topology_st.standalone, uri=uri) + + # Allow localhost + plugin.remove_all_allow_host() + plugin.add_allow_host(localhost) + if hostname != localhost: + plugin.add_allow_host(hostname) + + attr_updated = 0 + for i in range(0, timeout): + if (str(plugin.get_allow_host()) == hostname) or (str(plugin.get_allow_host()) == localhost): + attr_updated = 1 + break + else: + time.sleep(.5) + + if not attr_updated : + raise Exception ("rootdn-allow-host was not updated") + + # Bind as Root DN - should succeed + rootdn_bind(topology_st.standalone, uri=uri) + + +def test_rootdn_config_validate(topology_st, rootdn_setup, rootdn_cleanup): + """Test plugin configuration validation + + :id: a0ef30e5-538b-46fa-9762-01a4435a15e6 + :setup: Standalone instance, rootdn plugin set up + :steps: + 1. Replace 'rootdn-open-time' with '0000' + 2. Add 'rootdn-open-time': '0000' and 'rootdn-open-time': '0001' + 3. Replace 'rootdn-open-time' with '-1' and 'rootdn-close-time' with '0000' + 4. Replace 'rootdn-open-time' with '2400' and 'rootdn-close-time' with '0000' + 5. Replace 'rootdn-open-time' with 'aaaaa' and 'rootdn-close-time' with '0000' + 6. Replace 'rootdn-close-time' with '0000' + 7. Add 'rootdn-close-time': '0000' and 'rootdn-close-time': '0001' + 8. Replace 'rootdn-open-time' with '0000' and 'rootdn-close-time' with '-1' + 9. Replace 'rootdn-open-time' with '0000' and 'rootdn-close-time' with '2400' + 10. Replace 'rootdn-open-time' with '0000' and 'rootdn-close-time' with 'aaaaa' + 11. Add 'rootdn-days-allowed': 'Mon' and 'rootdn-days-allowed': 'Tue' + 12. Replace 'rootdn-days-allowed' with 'Mon1' + 13. Replace 'rootdn-days-allowed' with 'Tue, Mon1' + 14. Replace 'rootdn-days-allowed' with 'm111m' + 15. Replace 'rootdn-days-allowed' with 'Gur' + 16. Replace 'rootdn-allow-ip' with '12.12.Z.12' + 17. Replace 'rootdn-allow-ip' with '123.234.345.456' + 18. Replace 'rootdn-allow-ip' with ':::' + 19. Replace 'rootdn-deny-ip' with '12.12.Z.12' + 20. Replace 'rootdn-deny-ip' with '123.234.345.456' + 21. Replace 'rootdn-deny-ip' with ':::' + 22. Replace 'rootdn-allow-host' with 'host._.com' + 23. Replace 'rootdn-deny-host' with 'host.####.com' + :expectedresults: + 1. Should fail + 2. Should fail + 3. Should fail + 4. Should fail + 5. Should fail + 6. Should fail + 7. Should fail + 8. Should fail + 9. Should fail + 10. Should fail + 11. Should fail + 12. Should fail + 13. Should fail + 14. Should fail + 15. Should fail + 16. Should fail + 17. Should fail + 18. Should fail + 19. Should fail + 20. Should fail + 21. Should fail + 22. Should fail + 23. Should fail + """ + + # Test invalid values for all settings + with pytest.raises(ldap.UNWILLING_TO_PERFORM): + log.info('Add just "rootdn-open-time"') + plugin.apply_mods([(ldap.MOD_REPLACE, 'rootdn-open-time', '0000')]) + + log.info('Add multiple "rootdn-open-time"') + plugin.apply_mods([(ldap.MOD_ADD, 'rootdn-open-time', '0000'), + (ldap.MOD_ADD, 'rootdn-open-time', '0001')]) + + log.info('Add invalid "rootdn-open-time" -1 ') + plugin.apply_mods([(ldap.MOD_REPLACE, 'rootdn-open-time', '-1'), + (ldap.MOD_REPLACE, 'rootdn-close-time', '0000')]) + + log.info('Add invalid "rootdn-open-time" 2400') + plugin.apply_mods([(ldap.MOD_REPLACE, 'rootdn-open-time', '2400'), + (ldap.MOD_REPLACE, 'rootdn-close-time', '0000')]) + + log.info('Add invalid "rootdn-open-time" aaaaa') + plugin.apply_mods([(ldap.MOD_REPLACE, 'rootdn-open-time','aaaaa'), + (ldap.MOD_REPLACE, 'rootdn-close-time', '0000')]) + + # Test rootdn-close-time + log.info('Add just "rootdn-close-time"') + plugin.apply_mods([(ldap.MOD_REPLACE, 'rootdn-close-time', '0000')]) + + log.info('Add multiple "rootdn-close-time"') + plugin.apply_mods([(ldap.MOD_ADD, 'rootdn-close-time', '0000'), + (ldap.MOD_ADD, 'rootdn-close-time', '0001')]) + + log.info('Add invalid "rootdn-close-time" -1 ') + plugin.apply_mods([(ldap.MOD_REPLACE, 'rootdn-open-time', '0000'), + (ldap.MOD_REPLACE, 'rootdn-close-time', '-1')]) + + log.info('Add invalid "rootdn-close-time" 2400') + plugin.apply_mods([(ldap.MOD_REPLACE, 'rootdn-open-time', '0000'), + (ldap.MOD_REPLACE, 'rootdn-close-time', '2400')]) + + log.info('Add invalid "rootdn-open-time" aaaaa') + plugin.apply_mods([(ldap.MOD_REPLACE, 'rootdn-open-time','0000'), + (ldap.MOD_REPLACE, 'rootdn-close-time','aaaaa')]) + + # Test days allowed + log.info('Add multiple "rootdn-days-allowed"') + plugin.apply_mods([(ldap.MOD_ADD, 'rootdn-days-allowed', 'Mon'), + (ldap.MOD_ADD, 'rootdn-days-allowed', 'Tue')]) + + log.info('Add invalid "rootdn-days-allowed"') + plugin.apply_mods([(ldap.MOD_REPLACE, 'rootdn-days-allowed', 'Mon1')]) + plugin.apply_mods([(ldap.MOD_REPLACE, 'rootdn-days-allowed', 'Tue, Mon1')]) + plugin.apply_mods([(ldap.MOD_REPLACE, 'rootdn-days-allowed', 'm111m')]) + plugin.apply_mods([(ldap.MOD_REPLACE, 'rootdn-days-allowed', 'Gur')]) + + # Test allow ips + log.info('Add invalid "rootdn-allow-ip"') + plugin.apply_mods([(ldap.MOD_REPLACE, 'rootdn-allow-ip', '12.12.Z.12')]) + plugin.apply_mods([(ldap.MOD_REPLACE, 'rootdn-allow-ip', '123.234.345.456')]) + plugin.apply_mods([(ldap.MOD_REPLACE, 'rootdn-allow-ip', ':::')]) + + # Test deny ips + log.info('Add invalid "rootdn-deny-ip"') + plugin.apply_mods([(ldap.MOD_REPLACE, 'rootdn-deny-ip', '12.12.Z.12')]) + plugin.apply_mods([(ldap.MOD_REPLACE, 'rootdn-deny-ip', '123.234.345.456')]) + plugin.apply_mods([(ldap.MOD_REPLACE, 'rootdn-deny-ip', ':::')]) + + # Test allow hosts + log.info('Add invalid "rootdn-allow-host"') + plugin.apply_mods([(ldap.MOD_REPLACE, 'rootdn-allow-host', 'host._.com')]) + + # Test deny hosts + log.info('Add invalid "rootdn-deny-host"') + plugin.apply_mods([(ldap.MOD_REPLACE, 'rootdn-deny-host', 'host.####.com')]) + + +if __name__ == '__main__': + # Run isolated + # -s for DEBUG mode + CURRENT_FILE = os.path.realpath(__file__) + pytest.main("-s %s" % CURRENT_FILE) diff --git a/dirsrvtests/tests/suites/psearch/__init__.py b/dirsrvtests/tests/suites/psearch/__init__.py new file mode 100644 index 0000000..a928609 --- /dev/null +++ b/dirsrvtests/tests/suites/psearch/__init__.py @@ -0,0 +1,3 @@ +""" + :Requirement: 389-ds-base: Persistent Search control +""" diff --git a/dirsrvtests/tests/suites/psearch/psearch_test.py b/dirsrvtests/tests/suites/psearch/psearch_test.py new file mode 100644 index 0000000..e80f272 --- /dev/null +++ b/dirsrvtests/tests/suites/psearch/psearch_test.py @@ -0,0 +1,74 @@ +# --- BEGIN COPYRIGHT BLOCK --- +# Copyright (C) 2016 Red Hat, Inc. +# All rights reserved. +# +# License: GPL (version 3 or any later version). +# See LICENSE for details. +# --- END COPYRIGHT BLOCK --- +# +import pytest +from lib389._constants import DEFAULT_SUFFIX +from lib389.topologies import topology_st +from lib389.idm.group import Groups +import ldap +from ldap.controls.psearch import PersistentSearchControl,EntryChangeNotificationControl + +pytestmark = pytest.mark.tier1 + +def _run_psearch(inst, msg_id): + """Run a search with EntryChangeNotificationControl""" + + results = [] + while True: + try: + _, data, _, _, _, _ = inst.result4(msgid=msg_id, all=0, timeout=1.0, add_ctrls=1, add_intermediates=1, + resp_ctrl_classes={EntryChangeNotificationControl.controlType:EntryChangeNotificationControl}) + # See if there are any entry changes + for dn, entry, srv_ctrls in data: + ecn_ctrls = filter(lambda c: c.controlType == EntryChangeNotificationControl.controlType, srv_ctrls) + if ecn_ctrls: + inst.log.info('%s has changed!' % dn) + results.append(dn) + except ldap.TIMEOUT: + # There are no more results, so we timeout. + inst.log.info('No more results') + return results + + +def test_psearch(topology_st): + """Check basic Persistent Search control functionality + + :id: 4b395ef4-c3ff-49d1-a680-b9fdffa633bd + :setup: Standalone instance + :steps: + 1. Run an extended search with a Persistent Search control + 2. Create a new group (could be any entry) + 3. Run an extended search with a Persistent Search control again + 4. Check that entry DN is in the result + :expectedresults: + 1. Operation should be successful + 2. Group should be successfully created + 3. Operation should be successful + 4. Entry DN should be in the result + """ + + # Create the search control + psc = PersistentSearchControl() + # do a search extended with the control + msg_id = topology_st.standalone.search_ext(base=DEFAULT_SUFFIX, scope=ldap.SCOPE_SUBTREE, attrlist=['*'], serverctrls=[psc]) + # Get the result for the message id with result4 + _run_psearch(topology_st.standalone, msg_id) + # Change an entry / add one + groups = Groups(topology_st.standalone, DEFAULT_SUFFIX) + group = groups.create(properties={'cn': 'group1', 'description': 'testgroup'}) + # Now run the result again and see what's there. + results = _run_psearch(topology_st.standalone, msg_id) + # assert our group is in the changeset. + assert(group.dn == results[0]) + + +if __name__ == '__main__': + # Run isolated + # -s for DEBUG mode + CURRENT_FILE = os.path.realpath(__file__) + pytest.main("-s %s" % CURRENT_FILE) diff --git a/dirsrvtests/tests/suites/pwp_storage/storage_test.py b/dirsrvtests/tests/suites/pwp_storage/storage_test.py new file mode 100644 index 0000000..888342f --- /dev/null +++ b/dirsrvtests/tests/suites/pwp_storage/storage_test.py @@ -0,0 +1,164 @@ +# --- BEGIN COPYRIGHT BLOCK --- +# Copyright (C) 2019 Red Hat, Inc. +# All rights reserved. +# +# License: GPL (version 3 or any later version). +# See LICENSE for details. +# --- END COPYRIGHT BLOCK ---- + + +""" +This file contains the test for password storage scheme +""" + +import os +import subprocess +import shutil +import pytest + +from lib389.topologies import topology_st as topo +from lib389.idm.user import UserAccounts, UserAccount +from lib389._constants import DEFAULT_SUFFIX +from lib389.config import Config +from lib389.password_plugins import PBKDF2Plugin, SSHA512Plugin + +pytestmark = pytest.mark.tier1 + + +def user_config(topo, field_value): + """ + Will set storage schema and create user. + """ + Config(topo.standalone).replace("passwordStorageScheme", field_value) + user = UserAccounts(topo.standalone, DEFAULT_SUFFIX).create_test_user() + user.set('userpassword', 'ItsMeAnuj') + return user + + +LIST_FOR_PARAMETERIZATION = ["CRYPT", "SHA", "SSHA", "SHA256", "SSHA256", + "SHA384", "SSHA384", "SHA512", "SSHA512", "MD5", "PBKDF2_SHA256"] + + +@pytest.mark.parametrize("value", LIST_FOR_PARAMETERIZATION, ids=LIST_FOR_PARAMETERIZATION) +def test_check_password_scheme(topo, value): + """Check all password scheme. + + :id: 196bccfc-33a6-11ea-a2a5-8c16451d917b + :parametrized: yes + :setup: Standalone + :steps: + 1. Change password scheme and create user with password. + 2. check password scheme is set . + 3. Delete user + :expected results: + 1. Pass + 2. Pass + 3. Pass + """ + user = user_config(topo, value) + assert '{' + f'{value.lower()}' + '}' in \ + UserAccount(topo.standalone, user.dn).get_attr_val_utf8('userpassword').lower() + user.delete() + + +def test_clear_scheme(topo): + """Check clear password scheme. + + :id: 2420aadc-33a6-11ea-b59a-8c16451d917b + :setup: Standalone + :steps: + 1. Change password scheme and create user with password. + 2. check password scheme is set . + 3. Delete user + :expected results: + 1. Pass + 2. Pass + 3. Pass + """ + user = user_config(topo, "CLEAR") + assert "ItsMeAnuj" in UserAccount(topo.standalone, user.dn).get_attr_val_utf8('userpassword') + user.delete() + + +def test_check_two_scheme(topo): + """Check password scheme SHA and CRYPT + + :id: 2b677f1e-33a6-11ea-a371-8c16451d917b + :setup: Standalone + :steps: + 1. Change password scheme and create user with password. + 2. check password scheme is set . + 3. Delete user + :expected results: + 1. Pass + 2. Pass + 3. Pass + """ + for schema, value in [("nsslapd-rootpwstoragescheme", "SHA"), + ("passwordStorageScheme", "CRYPT")]: + Config(topo.standalone).replace(schema, value) + topo.standalone.restart() + user = UserAccounts(topo.standalone, DEFAULT_SUFFIX).create_test_user() + user.set('userpassword', 'ItsMeAnuj') + assert '{' + f'{"CRYPT".lower()}' + '}' \ + in UserAccount(topo.standalone, user.dn).get_attr_val_utf8('userpassword').lower() + user.delete() + + +def test_check_pbkdf2_sha256(topo): + """Check password scheme PBKDF2_SHA256. + + :id: 31612e7e-33a6-11ea-a750-8c16451d917b + :setup: Standalone + :steps: + 1. Try to delete PBKDF2_SHA256. + 2. Should not deleted PBKDF2_SHA256 and server should up. + :expected results: + 1. Pass + 2. Pass + """ + value = 'PBKDF2_SHA256' + user = user_config(topo, value) + assert '{' + f'{value.lower()}' + '}' in \ + UserAccount(topo.standalone, user.dn).get_attr_val_utf8('userpassword').lower() + plg = PBKDF2Plugin(topo.standalone) + plg._protected = False + plg.delete() + topo.standalone.restart() + assert Config(topo.standalone).get_attr_val_utf8('passwordStorageScheme') == 'PBKDF2_SHA256' + assert topo.standalone.status() + user.delete() + + +def test_check_ssha512(topo): + """Check password scheme SSHA512. + + :id: 9db023d2-33a1-11ea-b68c-8c16451d917b + :setup: Standalone + :steps: + 1. Try to delete SSHA512Plugin. + 2. Should deleted SSHA512Plugin and server should not up. + 3. Restore dse file to recover + :expected results: + 1. Pass + 2. Pass + 3. Pass + """ + value = 'SSHA512' + config_dir = topo.standalone.get_config_dir() + user = user_config(topo, value) + assert '{' + f'{value.lower()}' + '}' in \ + UserAccount(topo.standalone, user.dn).get_attr_val_utf8('userpassword').lower() + plg = SSHA512Plugin(topo.standalone) + plg._protected = False + plg.delete() + with pytest.raises(subprocess.CalledProcessError): + topo.standalone.restart() + shutil.copy(config_dir + '/dse.ldif.startOK', config_dir + '/dse.ldif') + topo.standalone.restart() + user.delete() + + +if __name__ == "__main__": + CURRENT_FILE = os.path.realpath(__file__) + pytest.main("-s -v %s" % CURRENT_FILE) diff --git a/dirsrvtests/tests/suites/referint_plugin/rename_test.py b/dirsrvtests/tests/suites/referint_plugin/rename_test.py new file mode 100644 index 0000000..43307b5 --- /dev/null +++ b/dirsrvtests/tests/suites/referint_plugin/rename_test.py @@ -0,0 +1,179 @@ +# --- BEGIN COPYRIGHT BLOCK --- +# Copyright (C) 2019 William Brown +# All rights reserved. +# +# License: GPL (version 3 or any later version). +# See LICENSE for details. +# --- END COPYRIGHT BLOCK --- +# + +import pytest +from lib389._constants import DEFAULT_SUFFIX +from lib389.topologies import topology_m2 + +from lib389.replica import ReplicationManager +from lib389.idm.group import Groups +from lib389.idm.user import nsUserAccounts +from lib389.idm.organizationalunit import OrganizationalUnit as OrganisationalUnit + +from lib389.plugins import AutoMembershipPlugin, ReferentialIntegrityPlugin, AutoMembershipDefinitions, MemberOfPlugin + +pytestmark = pytest.mark.tier2 + +UCOUNT = 400 + +def _enable_plugins(inst, group_dn): + # Enable automember + amp = AutoMembershipPlugin(inst) + amp.enable() + + # Create the automember definition + automembers = AutoMembershipDefinitions(inst) + + automember = automembers.create(properties={ + 'cn': 'testgroup_definition', + 'autoMemberScope': DEFAULT_SUFFIX, + 'autoMemberFilter': 'objectclass=nsAccount', + 'autoMemberDefaultGroup': group_dn, + 'autoMemberGroupingAttr': 'member:dn', + }) + + # Enable MemberOf + mop = MemberOfPlugin(inst) + mop.enable() + + # Enable referint + rip = ReferentialIntegrityPlugin(inst) + # We only need to enable the plugin, the default configuration is sane and + # correctly coveres member as an enforced attribute. + rip.enable() + + # Restart to make sure it's enabled and good to go. + inst.restart() + +def test_rename_large_subtree(topology_m2): + """ + A report stated that the following configuration would lead + to an operation failure: + + ou=int,ou=account,dc=... + ou=s1,ou=int,ou=account,dc=... + ou=s2,ou=int,ou=account,dc=... + + rename ou=s1 to re-parent to ou=account, leaving: + + ou=int,ou=account,dc=... + ou=s1,ou=account,dc=... + ou=s2,ou=account,dc=... + + The ou=s1 if it has < 100 entries below, is able to be reparented. + + If ou=s1 has > 400 entries, it fails. + + Other conditions was the presence of referential integrity - so one would + assume that all users under s1 are a member of some group external to this. + + :id: 5915c38d-b3c2-4b7c-af76-8a1e002e27f7 + + :setup: standalone instance + + :steps: 1. Enable automember plugin + 2. Add UCOUNT users, and ensure they are members of a group. + 3. Enable refer-int plugin + 4. Move ou=s1 to a new parent + + :expectedresults: + 1. The plugin is enabled + 2. The users are members of the group + 3. The plugin is enabled + 4. The rename operation of ou=s1 succeeds + """ + + st = topology_m2.ms["master1"] + m2 = topology_m2.ms["master2"] + + # Create a default group + gps = Groups(st, DEFAULT_SUFFIX) + # Keep the group so we can get it's DN out. + group = gps.create(properties={ + 'cn': 'default_group' + }) + + _enable_plugins(st, group.dn) + _enable_plugins(m2, group.dn) + + # Now unlike normal, we bypass the plural-create method, because we need control + # over the exact DN of the OU to create. + # Create the ou=account + + # We don't need to set a DN here because ... + ou_account = OrganisationalUnit(st) + + # It's set in the .create step. + ou_account.create( + basedn = DEFAULT_SUFFIX, + properties={ + 'ou': 'account' + }) + # create the ou=int,ou=account + ou_int = OrganisationalUnit(st) + ou_int.create( + basedn = ou_account.dn, + properties={ + 'ou': 'int' + }) + # Create the ou=s1,ou=int,ou=account + ou_s1 = OrganisationalUnit(st) + ou_s1.create( + basedn = ou_int.dn, + properties={ + 'ou': 's1' + }) + + # Pause replication + repl = ReplicationManager(DEFAULT_SUFFIX) + repl.disable_to_master(m2, [st, ]) + + # Create the users 1 -> UCOUNT in ou=s1 + nsu = nsUserAccounts(st, basedn=ou_s1.dn, rdn=None) + for i in range(1000, 1000 + UCOUNT): + nsu.create_test_user(uid=i) + + # Enable replication + + repl.enable_to_master(m2, [st, ]) + + # Assert they are in the group as we expect + members = group.get_attr_vals_utf8('member') + assert len(members) == UCOUNT + + # Wait for replication + repl.wait_for_replication(st, m2, timeout=60) + + for i in range(0, 5): + # Move ou=s1 to ou=account as parent. We have to provide the rdn, + # even though it's not changing. + ou_s1.rename('ou=s1', newsuperior=ou_account.dn) + + members = group.get_attr_vals_utf8('member') + assert len(members) == UCOUNT + # Check that we really did refer-int properly, and ou=int is not in the members. + for member in members: + assert 'ou=int' not in member + + # Now move it back + ou_s1.rename('ou=s1', newsuperior=ou_int.dn) + members = group.get_attr_vals_utf8('member') + assert len(members) == UCOUNT + for member in members: + assert 'ou=int' in member + + # Check everythig on the other side is good. + repl.wait_for_replication(st, m2, timeout=60) + + group2 = Groups(m2, DEFAULT_SUFFIX).get('default_group') + + members = group2.get_attr_vals_utf8('member') + assert len(members) == UCOUNT + for member in members: + assert 'ou=int' in member diff --git a/dirsrvtests/tests/suites/replication/__init__.py b/dirsrvtests/tests/suites/replication/__init__.py new file mode 100644 index 0000000..2b5d493 --- /dev/null +++ b/dirsrvtests/tests/suites/replication/__init__.py @@ -0,0 +1,21 @@ +""" + :Requirement: 389-ds-base: Replication +""" +import time +import ldap +from lib389._constants import DEFAULT_SUFFIX + + +def get_repl_entries(topo, entry_name, attr_list): + """Get a list of test entries from all masters""" + + entries_list = [] + + time.sleep(10) + + for inst in topo.all_insts.values(): + entries = inst.search_s(DEFAULT_SUFFIX, ldap.SCOPE_SUBTREE, "uid={}".format(entry_name), attr_list) + entries_list += entries + + return entries_list + diff --git a/dirsrvtests/tests/suites/replication/acceptance_test.py b/dirsrvtests/tests/suites/replication/acceptance_test.py new file mode 100644 index 0000000..c8e0a4c --- /dev/null +++ b/dirsrvtests/tests/suites/replication/acceptance_test.py @@ -0,0 +1,507 @@ +# --- BEGIN COPYRIGHT BLOCK --- +# Copyright (C) 2017 Red Hat, Inc. +# All rights reserved. +# +# License: GPL (version 3 or any later version). +# See LICENSE for details. +# --- END COPYRIGHT BLOCK --- +# +import pytest +from lib389.replica import Replicas +from lib389.tasks import * +from lib389.utils import * +from lib389.topologies import topology_m4 as topo_m4 +from . import get_repl_entries +from lib389.idm.user import UserAccount +from lib389.replica import ReplicationManager +from lib389._constants import * + +pytestmark = pytest.mark.tier0 + +TEST_ENTRY_NAME = 'mmrepl_test' +TEST_ENTRY_DN = 'uid={},{}'.format(TEST_ENTRY_NAME, DEFAULT_SUFFIX) +NEW_SUFFIX_NAME = 'test_repl' +NEW_SUFFIX = 'o={}'.format(NEW_SUFFIX_NAME) +NEW_BACKEND = 'repl_base' + +DEBUGGING = os.getenv("DEBUGGING", default=False) +if DEBUGGING: + logging.getLogger(__name__).setLevel(logging.DEBUG) +else: + logging.getLogger(__name__).setLevel(logging.INFO) +log = logging.getLogger(__name__) + + +@pytest.fixture(scope="function") +def create_entry(topo_m4, request): + """Add test entry to master1""" + + log.info('Adding entry {}'.format(TEST_ENTRY_DN)) + + test_user = UserAccount(topo_m4.ms["master1"], TEST_ENTRY_DN) + if test_user.exists(): + log.info('Deleting entry {}'.format(TEST_ENTRY_DN)) + test_user.delete() + test_user.create(properties={ + 'uid': TEST_ENTRY_NAME, + 'cn': TEST_ENTRY_NAME, + 'sn': TEST_ENTRY_NAME, + 'userPassword': TEST_ENTRY_NAME, + 'uidNumber' : '1000', + 'gidNumber' : '2000', + 'homeDirectory' : '/home/mmrepl_test', + }) + +@pytest.fixture(scope="function") +def new_suffix(topo_m4, request): + """Add a new suffix and enable a replication on it""" + + for num in range(1, 5): + log.info('Adding suffix:{} and backend: {} to master{}'.format(NEW_SUFFIX, NEW_BACKEND, num)) + topo_m4.ms["master{}".format(num)].backend.create(NEW_SUFFIX, {BACKEND_NAME: NEW_BACKEND}) + topo_m4.ms["master{}".format(num)].mappingtree.create(NEW_SUFFIX, NEW_BACKEND) + + try: + topo_m4.ms["master{}".format(num)].add_s(Entry((NEW_SUFFIX, { + 'objectclass': 'top', + 'objectclass': 'organization', + 'o': NEW_SUFFIX_NAME, + 'description': NEW_SUFFIX_NAME + }))) + except ldap.LDAPError as e: + log.error('Failed to add suffix ({}): error ({})'.format(NEW_SUFFIX, e.message['desc'])) + raise + + def fin(): + for num in range(1, 5): + log.info('Deleting suffix:{} and backend: {} from master{}'.format(NEW_SUFFIX, NEW_BACKEND, num)) + topo_m4.ms["master{}".format(num)].mappingtree.delete(NEW_SUFFIX) + topo_m4.ms["master{}".format(num)].backend.delete(NEW_SUFFIX) + + request.addfinalizer(fin) + + +def test_add_entry(topo_m4, create_entry): + """Check that entries are replicated after add operation + + :id: 024250f1-5f7e-4f3b-a9f5-27741e6fd405 + :setup: Four masters replication setup, an entry + :steps: + 1. Check entry on all other masters + :expectedresults: + 1. The entry should be replicated to all masters + """ + + entries = get_repl_entries(topo_m4, TEST_ENTRY_NAME, ["uid"]) + assert all(entries), "Entry {} wasn't replicated successfully".format(TEST_ENTRY_DN) + + +def test_modify_entry(topo_m4, create_entry): + """Check that entries are replicated after modify operation + + :id: 36764053-622c-43c2-a132-d7a3ab7d9aaa + :setup: Four masters replication setup, an entry + :steps: + 1. Modify the entry on master1 - add attribute + 2. Wait for replication to happen + 3. Check entry on all other masters + 4. Modify the entry on master1 - replace attribute + 5. Wait for replication to happen + 6. Check entry on all other masters + 7. Modify the entry on master1 - delete attribute + 8. Wait for replication to happen + 9. Check entry on all other masters + :expectedresults: + 1. Attribute should be successfully added + 2. Some time should pass + 3. The change should be present on all masters + 4. Attribute should be successfully replaced + 5. Some time should pass + 6. The change should be present on all masters + 4. Attribute should be successfully deleted + 8. Some time should pass + 9. The change should be present on all masters + """ + + log.info('Modifying entry {} - add operation'.format(TEST_ENTRY_DN)) + + test_user = UserAccount(topo_m4.ms["master1"], TEST_ENTRY_DN) + test_user.add('mail', '{}@redhat.com'.format(TEST_ENTRY_NAME)) + time.sleep(1) + + all_user = topo_m4.all_get_dsldapobject(TEST_ENTRY_DN, UserAccount) + for u in all_user: + assert "{}@redhat.com".format(TEST_ENTRY_NAME) in u.get_attr_vals_utf8('mail') + + log.info('Modifying entry {} - replace operation'.format(TEST_ENTRY_DN)) + test_user.replace('mail', '{}@greenhat.com'.format(TEST_ENTRY_NAME)) + time.sleep(1) + + all_user = topo_m4.all_get_dsldapobject(TEST_ENTRY_DN, UserAccount) + for u in all_user: + assert "{}@greenhat.com".format(TEST_ENTRY_NAME) in u.get_attr_vals_utf8('mail') + + log.info('Modifying entry {} - delete operation'.format(TEST_ENTRY_DN)) + test_user.remove('mail', '{}@greenhat.com'.format(TEST_ENTRY_NAME)) + time.sleep(1) + + all_user = topo_m4.all_get_dsldapobject(TEST_ENTRY_DN, UserAccount) + for u in all_user: + assert "{}@greenhat.com".format(TEST_ENTRY_NAME) not in u.get_attr_vals_utf8('mail') + + +def test_delete_entry(topo_m4, create_entry): + """Check that entry deletion is replicated after delete operation + + :id: 18437262-9d6a-4b98-a47a-6182501ab9bc + :setup: Four masters replication setup, an entry + :steps: + 1. Delete the entry from master1 + 2. Check entry on all other masters + :expectedresults: + 1. The entry should be deleted + 2. The change should be present on all masters + """ + + log.info('Deleting entry {} during the test'.format(TEST_ENTRY_DN)) + topo_m4.ms["master1"].delete_s(TEST_ENTRY_DN) + + entries = get_repl_entries(topo_m4, TEST_ENTRY_NAME, ["uid"]) + assert not entries, "Entry deletion {} wasn't replicated successfully".format(TEST_ENTRY_DN) + + +@pytest.mark.parametrize("delold", [0, 1]) +def test_modrdn_entry(topo_m4, create_entry, delold): + """Check that entries are replicated after modrdn operation + + :id: 02558e6d-a745-45ae-8d88-34fe9b16adc9 + :parametrized: yes + :setup: Four masters replication setup, an entry + :steps: + 1. Make modrdn operation on entry on master1 with both delold 1 and 0 + 2. Check entry on all other masters + :expectedresults: + 1. Modrdn operation should be successful + 2. The change should be present on all masters + """ + + newrdn_name = 'newrdn' + newrdn_dn = 'uid={},{}'.format(newrdn_name, DEFAULT_SUFFIX) + log.info('Modify entry RDN {}'.format(TEST_ENTRY_DN)) + try: + topo_m4.ms["master1"].modrdn_s(TEST_ENTRY_DN, 'uid={}'.format(newrdn_name), delold) + except ldap.LDAPError as e: + log.error('Failed to modrdn entry (%s): error (%s)' % (TEST_ENTRY_DN, + e.message['desc'])) + raise e + + try: + entries_new = get_repl_entries(topo_m4, newrdn_name, ["uid"]) + assert all(entries_new), "Entry {} wasn't replicated successfully".format(newrdn_name) + if delold == 0: + entries_old = get_repl_entries(topo_m4, TEST_ENTRY_NAME, ["uid"]) + assert all(entries_old), "Entry with old rdn {} wasn't replicated successfully".format(TEST_ENTRY_DN) + else: + entries_old = get_repl_entries(topo_m4, TEST_ENTRY_NAME, ["uid"]) + assert not entries_old, "Entry with old rdn {} wasn't removed in replicas successfully".format( + TEST_ENTRY_DN) + finally: + log.info('Remove entry with new RDN {}'.format(newrdn_dn)) + topo_m4.ms["master1"].delete_s(newrdn_dn) + + +def test_modrdn_after_pause(topo_m4): + """Check that changes are properly replicated after replica pause + + :id: 6271dc9c-a993-4a9e-9c6d-05650cdab282 + :setup: Four masters replication setup, an entry + :steps: + 1. Pause all replicas + 2. Make modrdn operation on entry on master1 + 3. Resume all replicas + 4. Wait for replication to happen + 5. Check entry on all other masters + :expectedresults: + 1. Replicas should be paused + 2. Modrdn operation should be successful + 3. Replicas should be resumed + 4. Some time should pass + 5. The change should be present on all masters + """ + + newrdn_name = 'newrdn' + newrdn_dn = 'uid={},{}'.format(newrdn_name, DEFAULT_SUFFIX) + + log.info('Adding entry {}'.format(TEST_ENTRY_DN)) + try: + topo_m4.ms["master1"].add_s(Entry((TEST_ENTRY_DN, { + 'objectclass': 'top person'.split(), + 'objectclass': 'organizationalPerson', + 'objectclass': 'inetorgperson', + 'cn': TEST_ENTRY_NAME, + 'sn': TEST_ENTRY_NAME, + 'uid': TEST_ENTRY_NAME + }))) + except ldap.LDAPError as e: + log.error('Failed to add entry (%s): error (%s)' % (TEST_ENTRY_DN, + e.message['desc'])) + raise e + + log.info('Pause all replicas') + topo_m4.pause_all_replicas() + + log.info('Modify entry RDN {}'.format(TEST_ENTRY_DN)) + try: + topo_m4.ms["master1"].modrdn_s(TEST_ENTRY_DN, 'uid={}'.format(newrdn_name)) + except ldap.LDAPError as e: + log.error('Failed to modrdn entry (%s): error (%s)' % (TEST_ENTRY_DN, + e.message['desc'])) + raise e + + log.info('Resume all replicas') + topo_m4.resume_all_replicas() + + log.info('Wait for replication to happen') + time.sleep(3) + + try: + entries_new = get_repl_entries(topo_m4, newrdn_name, ["uid"]) + assert all(entries_new), "Entry {} wasn't replicated successfully".format(newrdn_name) + finally: + log.info('Remove entry with new RDN {}'.format(newrdn_dn)) + topo_m4.ms["master1"].delete_s(newrdn_dn) + + +@pytest.mark.bz842441 +def test_modify_stripattrs(topo_m4): + """Check that we can modify nsds5replicastripattrs + + :id: f36abed8-e262-4f35-98aa-71ae55611aaa + :setup: Four masters replication setup + :steps: + 1. Modify nsds5replicastripattrs attribute on any agreement + 2. Search for the modified attribute + :expectedresults: It should be contain the value + 1. nsds5replicastripattrs should be successfully set + 2. The modified attribute should be the one we set + """ + + m1 = topo_m4.ms["master1"] + agreement = m1.agreement.list(suffix=DEFAULT_SUFFIX)[0].dn + attr_value = b'modifiersname modifytimestamp' + + log.info('Modify nsds5replicastripattrs with {}'.format(attr_value)) + m1.modify_s(agreement, [(ldap.MOD_REPLACE, 'nsds5replicastripattrs', [attr_value])]) + + log.info('Check nsds5replicastripattrs for {}'.format(attr_value)) + entries = m1.search_s(agreement, ldap.SCOPE_BASE, "objectclass=*", ['nsds5replicastripattrs']) + assert attr_value in entries[0].data['nsds5replicastripattrs'] + + +def test_new_suffix(topo_m4, new_suffix): + """Check that we can enable replication on a new suffix + + :id: d44a9ed4-26b0-4189-b0d0-b2b336ddccbd + :setup: Four masters replication setup, a new suffix + :steps: + 1. Enable replication on the new suffix + 2. Check if replication works + 3. Disable replication on the new suffix + :expectedresults: + 1. Replication on the new suffix should be enabled + 2. Replication should work + 3. Replication on the new suffix should be disabled + """ + m1 = topo_m4.ms["master1"] + m2 = topo_m4.ms["master2"] + + repl = ReplicationManager(NEW_SUFFIX) + + repl.create_first_master(m1) + + repl.join_master(m1, m2) + + repl.test_replication(m1, m2) + repl.test_replication(m2, m1) + + repl.remove_master(m1) + repl.remove_master(m2) + +def test_many_attrs(topo_m4, create_entry): + """Check a replication with many attributes (add and delete) + + :id: d540b358-f67a-43c6-8df5-7c74b3cb7523 + :setup: Four masters replication setup, a test entry + :steps: + 1. Add 10 new attributes to the entry + 2. Delete few attributes: one from the beginning, + two from the middle and one from the end + 3. Check that the changes were replicated in the right order + :expectedresults: + 1. The attributes should be successfully added + 2. Delete operations should be successful + 3. The changes should be replicated in the right order + """ + + m1 = topo_m4.ms["master1"] + add_list = ensure_list_bytes(map(lambda x: "test{}".format(x), range(10))) + delete_list = ensure_list_bytes(map(lambda x: "test{}".format(x), [0, 4, 7, 9])) + test_user = UserAccount(topo_m4.ms["master1"], TEST_ENTRY_DN) + + log.info('Modifying entry {} - 10 add operations'.format(TEST_ENTRY_DN)) + for add_name in add_list: + test_user.add('description', add_name) + + log.info('Check that everything was properly replicated after an add operation') + entries = get_repl_entries(topo_m4, TEST_ENTRY_NAME, ["description"]) + for entry in entries: + assert all(entry.getValues("description")[i] == add_name for i, add_name in enumerate(add_list)) + + log.info('Modifying entry {} - 4 delete operations for {}'.format(TEST_ENTRY_DN, str(delete_list))) + for delete_name in delete_list: + test_user.remove('description', delete_name) + + log.info('Check that everything was properly replicated after a delete operation') + entries = get_repl_entries(topo_m4, TEST_ENTRY_NAME, ["description"]) + for entry in entries: + for i, value in enumerate(entry.getValues("description")): + assert value == [name for name in add_list if name not in delete_list][i] + assert value not in delete_list + + +def test_double_delete(topo_m4, create_entry): + """Check that double delete of the entry doesn't crash server + + :id: 5b85a5af-df29-42c7-b6cb-965ec5aa478e + :feature: Multi master replication + :setup: Four masters replication setup, a test entry + :steps: 1. Delete the entry + 2. Delete the entry on the second master + 3. Check that server is alive + :expectedresults: Server hasn't crash + """ + + log.info('Deleting entry {} from master1'.format(TEST_ENTRY_DN)) + topo_m4.ms["master1"].delete_s(TEST_ENTRY_DN) + + log.info('Deleting entry {} from master2'.format(TEST_ENTRY_DN)) + try: + topo_m4.ms["master2"].delete_s(TEST_ENTRY_DN) + except ldap.NO_SUCH_OBJECT: + log.info("Entry {} wasn't found master2. It is expected.".format(TEST_ENTRY_DN)) + + log.info('Make searches to check if server is alive') + entries = get_repl_entries(topo_m4, TEST_ENTRY_NAME, ["uid"]) + assert not entries, "Entry deletion {} wasn't replicated successfully".format(TEST_ENTRY_DN) + + +def test_password_repl_error(topo_m4, create_entry): + """Check that error about userpassword replication is properly logged + + :id: d4f12dc0-cd2c-4b92-9b8d-d764a60f0698 + :feature: Multi master replication + :setup: Four masters replication setup, a test entry + :steps: 1. Change userpassword on master 1 + 2. Restart the servers to flush the logs + 3. Check the error log for an replication error + :expectedresults: We don't have a replication error in the error log + """ + + m1 = topo_m4.ms["master1"] + m2 = topo_m4.ms["master2"] + TEST_ENTRY_NEW_PASS = 'new_{}'.format(TEST_ENTRY_NAME) + + log.info('Clean the error log') + m2.deleteErrorLogs() + + log.info('Set replication loglevel') + m2.config.loglevel((ErrorLog.REPLICA,)) + + log.info('Modifying entry {} - change userpassword on master 2'.format(TEST_ENTRY_DN)) + test_user_m1 = UserAccount(topo_m4.ms["master1"], TEST_ENTRY_DN) + test_user_m2 = UserAccount(topo_m4.ms["master2"], TEST_ENTRY_DN) + test_user_m3 = UserAccount(topo_m4.ms["master3"], TEST_ENTRY_DN) + test_user_m4 = UserAccount(topo_m4.ms["master4"], TEST_ENTRY_DN) + + test_user_m1.set('userpassword', TEST_ENTRY_NEW_PASS) + + log.info('Restart the servers to flush the logs') + for num in range(1, 5): + topo_m4.ms["master{}".format(num)].restart(timeout=10) + + m1_conn = test_user_m1.bind(TEST_ENTRY_NEW_PASS) + m2_conn = test_user_m2.bind(TEST_ENTRY_NEW_PASS) + m3_conn = test_user_m3.bind(TEST_ENTRY_NEW_PASS) + m4_conn = test_user_m4.bind(TEST_ENTRY_NEW_PASS) + + log.info('Check the error log for the error with {}'.format(TEST_ENTRY_DN)) + assert not m2.ds_error_log.match('.*can.t add a change for uid={}.*'.format(TEST_ENTRY_NAME)) + + +def test_invalid_agmt(topo_m4): + """Test adding that an invalid agreement is properly rejected and does not crash the server + + :id: 92f10f46-1be1-49ca-9358-784359397bc2 + :setup: MMR with four masters + :steps: + 1. Add invalid agreement (nsds5ReplicaEnabled set to invalid value) + 2. Verify the server is still running + :expectedresults: + 1. Invalid repl agreement should be rejected + 2. Server should be still running + """ + m1 = topo_m4.ms["master1"] + + # Add invalid agreement (nsds5ReplicaEnabled set to invalid value) + AGMT_DN = 'cn=whatever,cn=replica,cn="dc=example,dc=com",cn=mapping tree,cn=config' + try: + invalid_props = {RA_ENABLED: 'True', # Invalid value + RA_SCHEDULE: '0001-2359 0123456'} + m1.agreement.create(suffix=DEFAULT_SUFFIX, host='localhost', port=389, properties=invalid_props) + except ldap.UNWILLING_TO_PERFORM: + m1.log.info('Invalid repl agreement correctly rejected') + except ldap.LDAPError as e: + m1.log.fatal('Got unexpected error adding invalid agreement: ' + str(e)) + assert False + else: + m1.log.fatal('Invalid agreement was incorrectly accepted by the server') + assert False + + # Verify the server is still running + try: + m1.simple_bind_s(DN_DM, PASSWORD) + except ldap.LDAPError as e: + m1.log.fatal('Failed to bind: ' + str(e)) + assert False + + +def test_warining_for_invalid_replica(topo_m4): + """Testing logs to indicate the inconsistency when configuration is performed. + + :id: dd689d03-69b8-4bf9-a06e-2acd19d5e2c8 + :setup: MMR with four masters + :steps: + 1. Setup nsds5ReplicaBackoffMin to 20 + 2. Setup nsds5ReplicaBackoffMax to 10 + :expectedresults: + 1. nsds5ReplicaBackoffMin should set to 20 + 2. An error should be generated and also logged in the error logs. + """ + replicas = Replicas(topo_m4.ms["master1"]) + replica = replicas.list()[0] + log.info('Set nsds5ReplicaBackoffMin to 20') + replica.set('nsds5ReplicaBackoffMin', '20') + with pytest.raises(ldap.UNWILLING_TO_PERFORM): + log.info('Set nsds5ReplicaBackoffMax to 10') + replica.set('nsds5ReplicaBackoffMax', '10') + log.info('Resetting configuration: nsds5ReplicaBackoffMin') + replica.remove_all('nsds5ReplicaBackoffMin') + log.info('Check the error log for the error') + assert topo_m4.ms["master1"].ds_error_log.match('.*nsds5ReplicaBackoffMax.*10.*invalid.*') + + +if __name__ == '__main__': + # Run isolated + # -s for DEBUG mode + CURRENT_FILE = os.path.realpath(__file__) + pytest.main("-s %s" % CURRENT_FILE) diff --git a/dirsrvtests/tests/suites/replication/cascading_test.py b/dirsrvtests/tests/suites/replication/cascading_test.py new file mode 100644 index 0000000..5bf4fcb --- /dev/null +++ b/dirsrvtests/tests/suites/replication/cascading_test.py @@ -0,0 +1,152 @@ +# --- BEGIN COPYRIGHT BLOCK --- +# Copyright (C) 2018 Red Hat, Inc. +# All rights reserved. +# +# License: GPL (version 3 or any later version). +# See LICENSE for details. +# --- END COPYRIGHT BLOCK --- +# +import logging +import pytest +import os +import ldap +from lib389._constants import * +from lib389.replica import ReplicationManager +from lib389.plugins import MemberOfPlugin +from lib389.agreement import Agreements +from lib389.idm.user import UserAccount, TEST_USER_PROPERTIES +from lib389.idm.group import Groups +from lib389.topologies import topology_m1h1c1 as topo + +pytestmark = pytest.mark.tier1 + +DEBUGGING = os.getenv("DEBUGGING", default=False) +if DEBUGGING: + logging.getLogger(__name__).setLevel(logging.DEBUG) +else: + logging.getLogger(__name__).setLevel(logging.INFO) +log = logging.getLogger(__name__) + +BIND_DN = 'uid=tuser1,ou=People,dc=example,dc=com' +BIND_RDN = 'tuser1' + + +def config_memberof(server): + """Configure memberOf plugin and configure fractional + to prevent total init to send memberof + """ + + memberof = MemberOfPlugin(server) + memberof.enable() + memberof.set_autoaddoc('nsMemberOf') + server.restart() + agmts = Agreements(server) + for agmt in agmts.list(): + log.info('update %s to add nsDS5ReplicatedAttributeListTotal' % agmt.dn) + agmt.replace_many(('nsDS5ReplicatedAttributeListTotal', '(objectclass=*) $ EXCLUDE '), + ('nsDS5ReplicatedAttributeList', '(objectclass=*) $ EXCLUDE memberOf')) + + +def test_basic_with_hub(topo): + """Check that basic operations work in cascading replication, this includes + testing plugins that perform internal operatons, and replicated password + policy state attributes. + + :id: 4ac85552-45bc-477b-89a4-226dfff8c6cc + :setup: 1 master, 1 hub, 1 consumer + :steps: + 1. Enable memberOf plugin and set password account lockout settings + 2. Restart the instance + 3. Add a user + 4. Add a group + 5. Test that the replication works + 6. Add the user as a member to the group + 7. Test that the replication works + 8. Issue bad binds to update passwordRetryCount + 9. Test that replicaton works + 10. Check that passwordRetyCount was replicated + :expectedresults: + 1. Should be a success + 2. Should be a success + 3. Should be a success + 4. Should be a success + 5. Should be a success + 6. Should be a success + 7. Should be a success + 8. Should be a success + 9. Should be a success + 10. Should be a success + """ + + repl_manager = ReplicationManager(DEFAULT_SUFFIX) + master = topo.ms["master1"] + consumer = topo.cs["consumer1"] + hub = topo.hs["hub1"] + + for inst in topo: + config_memberof(inst) + inst.config.set('passwordlockout', 'on') + inst.config.set('passwordlockoutduration', '60') + inst.config.set('passwordmaxfailure', '3') + inst.config.set('passwordIsGlobalPolicy', 'on') + + # Create user + user1 = UserAccount(master, BIND_DN) + user_props = TEST_USER_PROPERTIES.copy() + user_props.update({'sn': BIND_RDN, + 'cn': BIND_RDN, + 'uid': BIND_RDN, + 'inetUserStatus': '1', + 'objectclass': 'extensibleObject', + 'userpassword': PASSWORD}) + user1.create(properties=user_props, basedn=SUFFIX) + + # Create group + groups = Groups(master, DEFAULT_SUFFIX) + group = groups.create(properties={'cn': 'group'}) + + # Test replication + repl_manager.test_replication(master, consumer) + + # Trigger memberOf plugin by adding user to group + group.replace('member', user1.dn) + + # Test replication once more + repl_manager.test_replication(master, consumer) + + # Issue bad password to update passwordRetryCount + try: + master.simple_bind_s(user1.dn, "badpassword") + except: + pass + + # Test replication one last time + master.simple_bind_s(DN_DM, PASSWORD) + repl_manager.test_replication(master, consumer) + + # Finally check if passwordRetyCount was replicated to the hub and consumer + user1 = UserAccount(hub, BIND_DN) + count = user1.get_attr_val_int('passwordRetryCount') + if count is None: + log.fatal('PasswordRetyCount was not replicated to hub') + assert False + if int(count) != 1: + log.fatal('PasswordRetyCount has unexpected value: {}'.format(count)) + assert False + + user1 = UserAccount(consumer, BIND_DN) + count = user1.get_attr_val_int('passwordRetryCount') + if count is None: + log.fatal('PasswordRetyCount was not replicated to consumer') + assert False + if int(count) != 1: + log.fatal('PasswordRetyCount has unexpected value: {}'.format(count)) + assert False + + +if __name__ == '__main__': + # Run isolated + # -s for DEBUG mode + CURRENT_FILE = os.path.realpath(__file__) + pytest.main(["-s", CURRENT_FILE]) + diff --git a/dirsrvtests/tests/suites/replication/changelog_test.py b/dirsrvtests/tests/suites/replication/changelog_test.py new file mode 100644 index 0000000..e395f0e --- /dev/null +++ b/dirsrvtests/tests/suites/replication/changelog_test.py @@ -0,0 +1,724 @@ +# --- BEGIN COPYRIGHT BLOCK --- +# Copyright (C) 2017 Red Hat, Inc. +# All rights reserved. +# +# License: GPL (version 3 or any later version). +# See LICENSE for details. +# --- END COPYRIGHT BLOCK --- +# +import os +import logging +import ldap +import ldif +import pytest +import time +import subprocess +from lib389.properties import TASK_WAIT +from lib389.replica import Replicas +from lib389.idm.user import UserAccounts +from lib389.topologies import topology_m2 as topo +from lib389._constants import * +from lib389.plugins import RetroChangelogPlugin +from lib389.dseldif import DSEldif +from lib389.tasks import * +from lib389.utils import * + +pytestmark = pytest.mark.tier1 + +TEST_ENTRY_NAME = 'replusr' +NEW_RDN_NAME = 'cl5usr' +CHANGELOG = 'cn=changelog5,cn=config' +RETROCHANGELOG = 'cn=Retro Changelog Plugin,cn=plugins,cn=config' +MAXAGE = 'nsslapd-changelogmaxage' +TRIMINTERVAL = 'nsslapd-changelogtrim-interval' +COMPACTDBINTERVAL = 'nsslapd-changelogcompactdb-interval' +FILTER = '(cn=*)' + +DEBUGGING = os.getenv('DEBUGGING', default=False) +if DEBUGGING: + logging.getLogger(__name__).setLevel(logging.DEBUG) +else: + logging.getLogger(__name__).setLevel(logging.INFO) +log = logging.getLogger(__name__) + + +def _perform_ldap_operations(topo): + """Add a test user, modify description, modrdn user and delete it""" + + log.info('Adding user {}'.format(TEST_ENTRY_NAME)) + users = UserAccounts(topo.ms['master1'], DEFAULT_SUFFIX) + user_properties = { + 'uid': TEST_ENTRY_NAME, + 'cn': TEST_ENTRY_NAME, + 'sn': TEST_ENTRY_NAME, + 'uidNumber': '1001', + 'gidNumber': '2001', + 'userpassword': PASSWORD, + 'description': 'userdesc', + 'homeDirectory': '/home/{}'.format(TEST_ENTRY_NAME)} + tuser = users.create(properties=user_properties) + tuser.replace('description', 'newdesc') + log.info('Modify RDN of user {}'.format(tuser.dn)) + try: + topo.ms['master1'].modrdn_s(tuser.dn, 'uid={}'.format(NEW_RDN_NAME), 0) + except ldap.LDAPError as e: + log.fatal('Failed to modrdn entry {}'.format(tuser.dn)) + raise e + tuser = users.get(NEW_RDN_NAME) + log.info('Deleting user: {}'.format(tuser.dn)) + tuser.delete() + + +def _create_changelog_dump(topo): + """Dump changelog using nss5task and check if ldap operations are logged""" + + log.info('Dump changelog using nss5task and check if ldap operations are logged') + changelog_dir = topo.ms['master1'].get_changelog_dir() + replicas = Replicas(topo.ms["master1"]) + replica = replicas.get(DEFAULT_SUFFIX) + log.info('Remove ldif files, if present in: {}'.format(changelog_dir)) + for files in os.listdir(changelog_dir): + if files.endswith('.ldif'): + changelog_file = os.path.join(changelog_dir, files) + try: + os.remove(changelog_file) + except OSError as e: + log.fatal('Failed to remove ldif file: {}'.format(changelog_file)) + raise e + log.info('Existing changelog ldif file: {} removed'.format(changelog_file)) + else: + log.info('No existing changelog ldif files present') + + log.info('Running nsds5task to dump changelog database to a file') + replica.begin_task_cl2ldif() + + log.info('Check if changelog ldif file exist in: {}'.format(changelog_dir)) + for files in os.listdir(changelog_dir): + if files.endswith('.ldif'): + changelog_ldif = os.path.join(changelog_dir, files) + log.info('Changelog ldif file exist: {}'.format(changelog_ldif)) + return changelog_ldif + else: + log.fatal('Changelog ldif file does not exist in: {}'.format(changelog_dir)) + assert False + + +def _check_changelog_ldif(topo, changelog_ldif): + """Check changelog ldif file for required ldap operations""" + + log.info('Checking changelog ldif file for ldap operations') + assert os.stat(changelog_ldif).st_size > 0, 'Changelog file has no contents' + with open(changelog_ldif, 'r') as fh: + content = fh.read() + ldap_operations = set() + log.info('Checking if all required changetype operations are present') + for entry_ldif in content.split('\n\n'): + for line in entry_ldif.split('\n'): + if line.startswith('changetype: '): + ldap_operations.add(line.split(': ')[1]) + valid_operations = set(ldif.valid_changetype_dict.keys()) + log.info('Valid ldap operations: {}'.format(valid_operations)) + log.info('Ldap operations found: {}'.format(ldap_operations)) + assert ldap_operations == valid_operations, 'Changelog ldif file does not contain all \ + changetype operations' + + +def get_ldap_error_msg(e, type): + return e.args[0][type] + + +@pytest.fixture(scope="module") +def changelog_init(topo): + """Initialize the test environment by changing log dir and + enabling cn=Retro Changelog Plugin,cn=plugins,cn=config + """ + log.info('Testing Ticket 47669 - Test duration syntax in the changelogs') + + # bind as directory manager + topo.ms["master1"].log.info("Bind as %s" % DN_DM) + topo.ms["master1"].simple_bind_s(DN_DM, PASSWORD) + + try: + changelogdir = os.path.join(os.path.dirname(topo.ms["master1"].dbdir), 'changelog') + topo.ms["master1"].modify_s(CHANGELOG, [(ldap.MOD_REPLACE, 'nsslapd-changelogdir', + ensure_bytes(changelogdir))]) + except ldap.LDAPError as e: + log.error('Failed to modify ' + CHANGELOG + ': error {}'.format(get_ldap_error_msg(e,'desc'))) + assert False + + try: + topo.ms["master1"].modify_s(RETROCHANGELOG, [(ldap.MOD_REPLACE, 'nsslapd-pluginEnabled', b'on')]) + except ldap.LDAPError as e: + log.error('Failed to enable ' + RETROCHANGELOG + ': error {}'.format(get_ldap_error_msg(e, 'desc'))) + assert False + + # restart the server + topo.ms["master1"].restart(timeout=10) + + +def add_and_check(topo, plugin, attr, val, isvalid): + """ + Helper function to add/replace attr: val and check the added value + """ + if isvalid: + log.info('Test %s: %s -- valid' % (attr, val)) + try: + topo.ms["master1"].modify_s(plugin, [(ldap.MOD_REPLACE, attr, ensure_bytes(val))]) + except ldap.LDAPError as e: + log.error('Failed to add ' + attr + ': ' + val + ' to ' + plugin + ': error {}'.format(get_ldap_error_msg(e,'desc'))) + assert False + else: + log.info('Test %s: %s -- invalid' % (attr, val)) + if plugin == CHANGELOG: + try: + topo.ms["master1"].modify_s(plugin, [(ldap.MOD_REPLACE, attr, ensure_bytes(val))]) + except ldap.LDAPError as e: + log.error('Expectedly failed to add ' + attr + ': ' + val + + ' to ' + plugin + ': error {}'.format(get_ldap_error_msg(e,'desc'))) + else: + try: + topo.ms["master1"].modify_s(plugin, [(ldap.MOD_REPLACE, attr, ensure_bytes(val))]) + except ldap.LDAPError as e: + log.error('Failed to add ' + attr + ': ' + val + ' to ' + plugin + ': error {}'.format(get_ldap_error_msg(e,'desc'))) + + try: + entries = topo.ms["master1"].search_s(plugin, ldap.SCOPE_BASE, FILTER, [attr]) + if isvalid: + if not entries[0].hasValue(attr, val): + log.fatal('%s does not have expected (%s: %s)' % (plugin, attr, val)) + assert False + else: + if plugin == CHANGELOG: + if entries[0].hasValue(attr, val): + log.fatal('%s has unexpected (%s: %s)' % (plugin, attr, val)) + assert False + else: + if not entries[0].hasValue(attr, val): + log.fatal('%s does not have expected (%s: %s)' % (plugin, attr, val)) + assert False + except ldap.LDAPError as e: + log.fatal('Unable to search for entry %s: error %s' % (plugin, e.message['desc'])) + assert False + +def remove_ldif_files_from_changelogdir(topo, extension): + """ + Remove existing ldif files from changelog dir + """ + changelog_dir = topo.ms['master1'].get_changelog_dir() + + log.info('Remove %s files, if present in: %s' % (extension, changelog_dir)) + for files in os.listdir(changelog_dir): + if files.endswith(extension): + changelog_file = os.path.join(changelog_dir, files) + try: + os.remove(changelog_file) + except OSError as e: + log.fatal('Failed to remove %s file: %s' % (extension,changelog_file)) + raise e + else: + log.info('Existing changelog %s file: %s removed' % (extension,changelog_file)) + + +@pytest.mark.xfail(ds_is_older('1.3.10.1'), reason="bug bz1685059") +@pytest.mark.bz1685059 +@pytest.mark.ds50498 +@pytest.mark.bz1769296 +def test_cldump_files_removed(topo): + """Verify bz1685059 : cl-dump generated ldif files are removed at the end, -l option is the way to keep them + + :id: fbb2f2a3-167b-4bc6-b513-9e0318b09edc + :setup: Replication with two master, nsslapd-changelogdir is '/var/lib/dirsrv/slapd-master1/changelog' + retrochangelog plugin disabled + :steps: + 1. Clean the changelog directory, removing .ldif files present, if any + 2. Clean the changelog directory, removing .done files present, if any + 3. Perform ldap operations to record replication changes + 4. Try a cl-dump call with invalid arguments to secure the next steps and to check bz1769296 + 5. Launch cl-dump cli without -l option + 6. Wait so that all cl-dump tasks be finished + 7. Check that all .ldif.done generated files have been removed from the changelog dir + 8. Launch cl-dump cli with -l option + 9. Wait so that all cl-dump tasks be finished + 10. Check that the generated .ldif.done files are present in the changelog dir + + :expectedresults: + 1. No remaining .ldif file in the changelog directory + 2. No remaining .ldif.done file in the changelog directory + 3. ldap operations are replicated and recorded in changelog + 4. A result code different from 0 is raised + 5. cl-dump is successfully executed + 6. cl-dump process has finished + 7. No .ldif.done files in the changelog dir + 8. cl-dump is successfully executed + 9. cl-dump process has finished + 10. .ldif.done generated files are present in the changelog dir + """ + + changelog_dir = topo.ms['master1'].get_changelog_dir() + + # Remove existing .ldif files in changelog dir + remove_ldif_files_from_changelogdir(topo, '.ldif') + + # Remove existing .ldif.done files in changelog dir + remove_ldif_files_from_changelogdir(topo, '.done') + + _perform_ldap_operations(topo) + + # This part to make sure that an error in the cl-dump script execution will be detected, + # primary condition before executing the core goal of this case : management of cl-dump generated files. + # As of today the returned code by cl-dump.pl is incorrect when run with invalid arguments (bz1769296) + # This piece of code will serve as reproducer and verification mean for bz1769296 + + log.info("Use cl-dump perl script without -l option : no generated ldif files should remain in %s " % changelog_dir) + cmdline=['/usr/bin/cl-dump', '-h', HOST_MASTER_1, '-p', 'invalid port', '-D', DN_DM, '-w', PASSWORD] + log.info('Command used : %s' % cmdline) + proc = subprocess.Popen(cmdline, stdout=subprocess.PIPE) + msg = proc.communicate() + log.info('output message : %s' % msg[0]) + assert proc.returncode != 0 + + # Now the core goal of the test case + # Using cl-dump without -l option + log.info("Use cl-dump perl script without -l option : no generated ldif files should remain in %s " % changelog_dir) + cmdline=['/usr/bin/cl-dump', '-h', HOST_MASTER_1, '-p', str(PORT_MASTER_1), '-D', DN_DM, '-w', PASSWORD] + log.info('Command used : %s' % cmdline) + proc = subprocess.Popen(cmdline, stdout=subprocess.PIPE) + proc.communicate() + assert proc.returncode == 0 + + log.info('Wait for all cl-dump files to be generated') + time.sleep(1) + + log.info('Check if cl-dump generated .ldif.done files are present - should not') + for files in os.listdir(changelog_dir): + if files.endswith('.done'): + log.fatal('cl-dump generated .ldif.done files are present in %s - they should not' % changelog_dir) + assert False + else: + log.info('All cl-dump generated .ldif files have been successfully removed from %s ' % changelog_dir) + + + # Using cl-dump with -l option + log.info("Use cl-dump perl script with -l option : generated ldif files should be kept in %s " % changelog_dir) + cmdline=['/usr/bin/cl-dump', '-h', HOST_MASTER_1, '-p', str(PORT_MASTER_1), '-D', DN_DM, '-w', PASSWORD, '-l'] + log.info('Command used : %s' % cmdline) + proc = subprocess.Popen(cmdline, stdout=subprocess.PIPE) + msg = proc.communicate() + assert proc.returncode == 0 + + log.info('Wait for all cl-dump files to be generated') + time.sleep(1) + + log.info('Check if cl-dump generated .ldif.done files are present - should be') + for files in os.listdir(changelog_dir): + if files.endswith('.done'): + cldump_file = os.path.join(changelog_dir, files) + log.info('Success : ldif file %s is present' % cldump_file) + break + else: + log.fatal('.ldif.done files are not present in %s - they should be' % changelog_dir) + assert False + +@pytest.mark.skipif(ds_is_older("1.3.10.1"), reason="Not implemented") +def test_dsconf_dump_changelog_files_removed(topo): + """Verify that the python counterpart of cl-dump (using dsconf) has a correct management of generated files + + :id: e41dcf90-098a-4386-acb5-789384579bf7 + :setup: Replication with two master, nsslapd-changelogdir is '/var/lib/dirsrv/slapd-master1/changelog' + retrochangelog plugin disabled + :steps: + 1. Clean the changelog directory, removing .ldif files present, if any + 2. Clean the changelog directory, removing .ldif.done files present, if any + 3. Perform ldap operations to record replication changes + 4. Try a dsconf call with invalid arguments to secure the next steps + 5. Launch dsconf dump-changelog cli without -l option + 6. Wait so that all dsconf tasks be finished + 7. Check that all .ldif.done generated files have been removed from the changelog dir + 8. Launch dsconf dump-changelog cli with -l option + 9. Wait so that all dsconf tasks be finished + 10. Check that the generated .ldif.done files are present in the changelog dir + + :expectedresults: + 1. No remaining .ldif file in the changelog directory + 2. No remaining .ldif.done file in the changelog directory + 3. ldap operations are replicated and recorded in changelog + 4. A result code different from 0 is raised + 5. dsconf dump-changelog is successfully executed + 6. dsconf process has finished + 7. No .ldif.done files in the changelog dir + 8. dsconf dump-changelog is successfully executed + 9. dsconf process has finished + 10. .ldif.done generated files are present in the changelog dir + """ + + changelog_dir = topo.ms['master1'].get_changelog_dir() + instance = topo.ms['master1'] + instance_url = 'ldap://%s:%s' % (HOST_MASTER_1, PORT_MASTER_1) + + # Remove existing .ldif files in changelog dir + remove_ldif_files_from_changelogdir(topo, '.ldif') + + # Remove existing .ldif.done files from changelog dir + remove_ldif_files_from_changelogdir(topo, '.done') + + _perform_ldap_operations(topo) + + # This part to make sure that an error in the python dsconf dump-changelog execution will be detected, + # primary condition before executing the core goal of this case : management of generated files. + + log.info("Use dsconf dump-changelog with invalid parameters") + cmdline=['python', '/usr/sbin/dsconf', instance_url, '-D', DN_DM, '-w', 'badpasswd', 'replication', 'dump-changelog'] + log.info('Command used : %s' % cmdline) + proc = subprocess.Popen(cmdline, stdout=subprocess.PIPE) + msg = proc.communicate() + log.info('output message : %s' % msg[0]) + assert proc.returncode != 0 + + # Now the core goal of the test case + # Using dsconf replication changelog without -l option + log.info('Use dsconf replication changelog without -l option: no generated ldif files should be present in %s ' % changelog_dir) + cmdline=['python', '/usr/sbin/dsconf', instance_url, '-D', DN_DM, '-w', PASSWORD, 'replication', 'dump-changelog'] + log.info('Command used : %s' % cmdline) + proc = subprocess.Popen(cmdline, stdout=subprocess.PIPE) + proc.communicate() + assert proc.returncode == 0 + + log.info('Wait for all dsconf dump-changelog files to be generated') + time.sleep(1) + + log.info('Check if dsconf dump-changelog generated .ldif.done files are present - should not') + for files in os.listdir(changelog_dir): + if files.endswith('.done'): + log.fatal('dump-changelog generated .ldif.done files are present in %s - they should not' % changelog_dir) + assert False + else: + log.info('All dsconf dump-changelog generated .ldif files have been successfully removed from %s ' % changelog_dir) + + # Using dsconf replication changelog without -l option + log.info('Use dsconf replication changelog with -l option: generated ldif files should be kept in %s ' % changelog_dir) + cmdline=['python', '/usr/sbin/dsconf', instance_url, '-D', DN_DM, '-w', PASSWORD, 'replication', 'dump-changelog', '-l'] + log.info('Command used : %s' % cmdline) + proc = subprocess.Popen(cmdline, stdout=subprocess.PIPE) + proc.communicate() + assert proc.returncode == 0 + + log.info('Wait for all dsconf dump-changelog files to be generated') + time.sleep(1) + + log.info('Check if dsconf dump-changelog generated .ldif.done files are present - should be') + for files in os.listdir(changelog_dir): + if files.endswith('.done'): + cldump_file = os.path.join(changelog_dir, files) + log.info('Success : ldif file %s is present' % cldump_file) + break + else: + log.fatal('.ldif.done files are not present in %s - they should be' % changelog_dir) + assert False + + +def test_verify_changelog(topo): + """Check if changelog dump file contains required ldap operations + + :id: 15ead076-8c18-410b-90eb-c2fe9eab966b + :setup: Replication with two masters. + :steps: 1. Add user to server. + 2. Perform ldap modify, modrdn and delete operations. + 3. Dump the changelog to a file using nsds5task. + 4. Check if changelog is updated with ldap operations. + :expectedresults: + 1. Add user should PASS. + 2. Ldap operations should PASS. + 3. Changelog should be dumped successfully. + 4. Changelog dump file should contain ldap operations + """ + + log.info('LDAP operations add, modify, modrdn and delete') + _perform_ldap_operations(topo) + changelog_ldif = _create_changelog_dump(topo) + _check_changelog_ldif(topo, changelog_ldif) + + +def test_verify_changelog_online_backup(topo): + """Check ldap operations in changelog dump file after online backup + + :id: 4001c34f-35b4-439e-8c2d-fa7e30375219 + :setup: Replication with two masters. + :steps: 1. Add user to server. + 2. Take online backup using db2bak task. + 3. Restore the database using bak2db task. + 4. Perform ldap modify, modrdn and delete operations. + 5. Dump the changelog to a file using nsds5task. + 6. Check if changelog is updated with ldap operations. + :expectedresults: + 1. Add user should PASS. + 2. Backup of database should PASS. + 3. Restore of database should PASS. + 4. Ldap operations should PASS. + 5. Changelog should be dumped successfully. + 6. Changelog dump file should contain ldap operations + """ + + backup_dir = os.path.join(topo.ms['master1'].get_bak_dir(), 'online_backup') + log.info('Run db2bak script to take database backup') + try: + topo.ms['master1'].tasks.db2bak(backup_dir=backup_dir, args={TASK_WAIT: True}) + except ValueError: + log.fatal('test_changelog5: Online backup failed') + assert False + + backup_checkdir = os.path.join(backup_dir, '.repl_changelog_backup', DEFAULT_CHANGELOG_DB) + if os.path.exists(backup_checkdir): + log.info('Database backup is created successfully') + else: + log.fatal('test_changelog5: backup directory does not exist : {}'.format(backup_checkdir)) + assert False + + log.info('Run bak2db to restore directory server') + try: + topo.ms['master1'].tasks.bak2db(backup_dir=backup_dir, args={TASK_WAIT: True}) + except ValueError: + log.fatal('test_changelog5: Online restore failed') + assert False + + log.info('LDAP operations add, modify, modrdn and delete') + _perform_ldap_operations(topo) + changelog_ldif = _create_changelog_dump(topo) + _check_changelog_ldif(topo, changelog_ldif) + + +def test_verify_changelog_offline_backup(topo): + """Check ldap operations in changelog dump file after offline backup + + :id: feed290d-57dd-46e4-9ab3-422c77589867 + :setup: Replication with two masters. + :steps: 1. Add user to server. + 2. Stop server and take offline backup using db2bak. + 3. Restore the database using bak2db. + 4. Perform ldap modify, modrdn and delete operations. + 5. Start the server and dump the changelog using nsds5task. + 6. Check if changelog is updated with ldap operations. + :expectedresults: + 1. Add user should PASS. + 2. Backup of database should PASS. + 3. Restore of database should PASS. + 4. Ldap operations should PASS. + 5. Changelog should be dumped successfully. + 6. Changelog dump file should contain ldap operations + """ + + backup_dir = os.path.join(topo.ms['master1'].get_bak_dir(), 'offline_backup') + + topo.ms['master1'].stop() + log.info('Run db2bak to take database backup') + try: + topo.ms['master1'].db2bak(backup_dir) + except ValueError: + log.fatal('test_changelog5: Offline backup failed') + assert False + + log.info('Run bak2db to restore directory server') + try: + topo.ms['master1'].bak2db(backup_dir) + except ValueError: + log.fatal('test_changelog5: Offline restore failed') + assert False + topo.ms['master1'].start() + + backup_checkdir = os.path.join(backup_dir, '.repl_changelog_backup', DEFAULT_CHANGELOG_DB) + if os.path.exists(backup_checkdir): + log.info('Database backup is created successfully') + else: + log.fatal('test_changelog5: backup directory does not exist : {}'.format(backup_checkdir)) + assert False + + log.info('LDAP operations add, modify, modrdn and delete') + _perform_ldap_operations(topo) + changelog_ldif = _create_changelog_dump(topo) + _check_changelog_ldif(topo, changelog_ldif) + + +@pytest.mark.ds47669 +def test_changelog_maxage(topo, changelog_init): + """Check nsslapd-changelog max age values + + :id: d284ff27-03b2-412c-ac74-ac4f2d2fae3b + :setup: Replication with two master, change nsslapd-changelogdir to + '/var/lib/dirsrv/slapd-master1/changelog' and + set cn=Retro Changelog Plugin,cn=plugins,cn=config to 'on' + :steps: + 1. Set nsslapd-changelogmaxage in cn=changelog5,cn=config to values - '12345','10s','30M','12h','2D','4w' + 2. Set nsslapd-changelogmaxage in cn=changelog5,cn=config to values - '-123','xyz' + + :expectedresults: + 1. Operation should be successful + 2. Operation should be unsuccessful + """ + log.info('1. Test nsslapd-changelogmaxage in cn=changelog5,cn=config') + + # bind as directory manager + topo.ms["master1"].log.info("Bind as %s" % DN_DM) + topo.ms["master1"].simple_bind_s(DN_DM, PASSWORD) + + add_and_check(topo, CHANGELOG, MAXAGE, '12345', True) + add_and_check(topo, CHANGELOG, MAXAGE, '10s', True) + add_and_check(topo, CHANGELOG, MAXAGE, '30M', True) + add_and_check(topo, CHANGELOG, MAXAGE, '12h', True) + add_and_check(topo, CHANGELOG, MAXAGE, '2D', True) + add_and_check(topo, CHANGELOG, MAXAGE, '4w', True) + add_and_check(topo, CHANGELOG, MAXAGE, '-123', False) + add_and_check(topo, CHANGELOG, MAXAGE, 'xyz', False) + + +@pytest.mark.ds47669 +def test_ticket47669_changelog_triminterval(topo, changelog_init): + """Check nsslapd-changelog triminterval values + + :id: 8f850c37-7e7c-49dd-a4e0-9344638616d6 + :setup: Replication with two master, change nsslapd-changelogdir to + '/var/lib/dirsrv/slapd-master1/changelog' and + set cn=Retro Changelog Plugin,cn=plugins,cn=config to 'on' + :steps: + 1. Set nsslapd-changelogtrim-interval in cn=changelog5,cn=config to values - + '12345','10s','30M','12h','2D','4w' + 2. Set nsslapd-changelogtrim-interval in cn=changelog5,cn=config to values - '-123','xyz' + + :expectedresults: + 1. Operation should be successful + 2. Operation should be unsuccessful + """ + log.info('2. Test nsslapd-changelogtrim-interval in cn=changelog5,cn=config') + + # bind as directory manager + topo.ms["master1"].log.info("Bind as %s" % DN_DM) + topo.ms["master1"].simple_bind_s(DN_DM, PASSWORD) + + add_and_check(topo, CHANGELOG, TRIMINTERVAL, '12345', True) + add_and_check(topo, CHANGELOG, TRIMINTERVAL, '10s', True) + add_and_check(topo, CHANGELOG, TRIMINTERVAL, '30M', True) + add_and_check(topo, CHANGELOG, TRIMINTERVAL, '12h', True) + add_and_check(topo, CHANGELOG, TRIMINTERVAL, '2D', True) + add_and_check(topo, CHANGELOG, TRIMINTERVAL, '4w', True) + add_and_check(topo, CHANGELOG, TRIMINTERVAL, '-123', False) + add_and_check(topo, CHANGELOG, TRIMINTERVAL, 'xyz', False) + + +@pytest.mark.ds47669 +def test_changelog_compactdbinterval(topo, changelog_init): + """Check nsslapd-changelog compactdbinterval values + + :id: 0f4b3118-9dfa-4c2a-945c-72847b42a48c + :setup: Replication with two master, change nsslapd-changelogdir to + '/var/lib/dirsrv/slapd-master1/changelog' and + set cn=Retro Changelog Plugin,cn=plugins,cn=config to 'on' + :steps: + 1. Set nsslapd-changelogcompactdb-interval in cn=changelog5,cn=config to values - + '12345','10s','30M','12h','2D','4w' + 2. Set nsslapd-changelogcompactdb-interval in cn=changelog5,cn=config to values - + '-123','xyz' + + :expectedresults: + 1. Operation should be successful + 2. Operation should be unsuccessful + """ + log.info('3. Test nsslapd-changelogcompactdb-interval in cn=changelog5,cn=config') + + # bind as directory manager + topo.ms["master1"].log.info("Bind as %s" % DN_DM) + topo.ms["master1"].simple_bind_s(DN_DM, PASSWORD) + + add_and_check(topo, CHANGELOG, COMPACTDBINTERVAL, '12345', True) + add_and_check(topo, CHANGELOG, COMPACTDBINTERVAL, '10s', True) + add_and_check(topo, CHANGELOG, COMPACTDBINTERVAL, '30M', True) + add_and_check(topo, CHANGELOG, COMPACTDBINTERVAL, '12h', True) + add_and_check(topo, CHANGELOG, COMPACTDBINTERVAL, '2D', True) + add_and_check(topo, CHANGELOG, COMPACTDBINTERVAL, '4w', True) + add_and_check(topo, CHANGELOG, COMPACTDBINTERVAL, '-123', False) + add_and_check(topo, CHANGELOG, COMPACTDBINTERVAL, 'xyz', False) + + +@pytest.mark.ds47669 +def test_retrochangelog_maxage(topo, changelog_init): + """Check nsslapd-retrochangelog max age values + + :id: 0cb84d81-3e86-4dbf-84a2-66aefd8281db + :setup: Replication with two master, change nsslapd-changelogdir to + '/var/lib/dirsrv/slapd-master1/changelog' and + set cn=Retro Changelog Plugin,cn=plugins,cn=config to 'on' + :steps: + 1. Set nsslapd-changelogmaxage in cn=Retro Changelog Plugin,cn=plugins,cn=config to values - + '12345','10s','30M','12h','2D','4w' + 2. Set nsslapd-changelogmaxage in cn=Retro Changelog Plugin,cn=plugins,cn=config to values - + '-123','xyz' + + :expectedresults: + 1. Operation should be successful + 2. Operation should be unsuccessful + """ + log.info('4. Test nsslapd-changelogmaxage in cn=Retro Changelog Plugin,cn=plugins,cn=config') + + # bind as directory manager + topo.ms["master1"].log.info("Bind as %s" % DN_DM) + topo.ms["master1"].simple_bind_s(DN_DM, PASSWORD) + + add_and_check(topo, RETROCHANGELOG, MAXAGE, '12345', True) + add_and_check(topo, RETROCHANGELOG, MAXAGE, '10s', True) + add_and_check(topo, RETROCHANGELOG, MAXAGE, '30M', True) + add_and_check(topo, RETROCHANGELOG, MAXAGE, '12h', True) + add_and_check(topo, RETROCHANGELOG, MAXAGE, '2D', True) + add_and_check(topo, RETROCHANGELOG, MAXAGE, '4w', True) + add_and_check(topo, RETROCHANGELOG, MAXAGE, '-123', False) + add_and_check(topo, RETROCHANGELOG, MAXAGE, 'xyz', False) + + topo.ms["master1"].log.info("ticket47669 was successfully verified.") + +@pytest.mark.ds50736 +def test_retrochangelog_trimming_crash(topo, changelog_init): + """Check that when retroCL nsslapd-retrocthangelog contains invalid + value, then the instance does not crash at shutdown + + :id: 5d9bd7ca-e9bf-4be9-8fc8-902aa5513052 + :setup: Replication with two master, change nsslapd-changelogdir to + '/var/lib/dirsrv/slapd-master1/changelog' and + set cn=Retro Changelog Plugin,cn=plugins,cn=config to 'on' + :steps: + 1. Set nsslapd-changelogmaxage in cn=Retro Changelog Plugin,cn=plugins,cn=config to value '-1' + This value is invalid. To disable retroCL trimming it should be set to 0 + 2. Do several restart + 3. check there is no 'Detected Disorderly Shutdown' message (crash) + 4. restore valid value for nsslapd-changelogmaxage '1w' + + :expectedresults: + 1. Operation should be successful + 2. Operation should be successful + 3. Operation should be successful + 4. Operation should be successful + """ + log.info('1. Test retroCL trimming crash in cn=Retro Changelog Plugin,cn=plugins,cn=config') + + # set the nsslapd-changelogmaxage directly on dse.ldif + # because the set value is invalid + topo.ms["master1"].log.info("ticket50736 start verification") + topo.ms["master1"].stop() + retroPlugin = RetroChangelogPlugin(topo.ms["master1"]) + dse_ldif = DSEldif(topo.ms["master1"]) + dse_ldif.replace(retroPlugin.dn, 'nsslapd-changelogmaxage', '-1') + topo.ms["master1"].start() + + # The crash should be systematic, but just in case do several restart + # with a delay to let all plugin init + for i in range(5): + time.sleep(1) + topo.ms["master1"].stop() + topo.ms["master1"].start() + + assert not topo.ms["master1"].detectDisorderlyShutdown() + + topo.ms["master1"].log.info("ticket 50736 was successfully verified.") + + + +if __name__ == '__main__': + # Run isolated + # -s for DEBUG mode + CURRENT_FILE = os.path.realpath(__file__) + pytest.main("-s %s" % CURRENT_FILE) diff --git a/dirsrvtests/tests/suites/replication/changelog_trimming_test.py b/dirsrvtests/tests/suites/replication/changelog_trimming_test.py new file mode 100644 index 0000000..ed7b279 --- /dev/null +++ b/dirsrvtests/tests/suites/replication/changelog_trimming_test.py @@ -0,0 +1,134 @@ +import logging +import pytest +import os +import ldap +import time +from lib389._constants import * +from lib389.properties import * +from lib389.topologies import topology_m1 as topo +from lib389.replica import Changelog5 +from lib389.idm.domain import Domain + +pytestmark = pytest.mark.tier1 + +DEBUGGING = os.getenv("DEBUGGING", default=False) +if DEBUGGING: + logging.getLogger(__name__).setLevel(logging.DEBUG) +else: + logging.getLogger(__name__).setLevel(logging.INFO) +log = logging.getLogger(__name__) + + +def do_mods(master, num): + """Perform a num of mods on the default suffix + """ + domain = Domain(master, DEFAULT_SUFFIX) + for i in range(num): + domain.replace('description', 'change %s' % i) + +@pytest.fixture(scope="module") +def setup_max_entries(topo, request): + """Configure logging and changelog max entries + """ + master = topo.ms["master1"] + + master.config.loglevel((ErrorLog.REPLICA,), 'error') + + cl = Changelog5(master) + cl.set_max_entries('2') + cl.set_trim_interval('300') + +@pytest.fixture(scope="module") +def setup_max_age(topo, request): + """Configure logging and changelog max age + """ + master = topo.ms["master1"] + master.config.loglevel((ErrorLog.REPLICA,), 'error') + + cl = Changelog5(master) + cl.set_max_age('5') + cl.set_trim_interval('300') + +def test_max_age(topo, setup_max_age): + """Test changing the trimming interval works with max age + + :id: b5de04a5-4d92-49ea-a725-1d278a1c647c + :setup: single master + :steps: + 1. Perform modification to populate changelog + 2. Adjust the changelog trimming interval + 3. Check is trimming occurrs within the new interval + + :expectedresults: + 1. Modifications are successful + 2. The changelog trimming interval is correctly lowered + 3. Trimming occurs + + """ + log.info("Testing changelog triming interval with max age...") + + master = topo.ms["master1"] + cl = Changelog5(master) + + # Do mods to build if cl entries + do_mods(master, 10) + time.sleep(6) # 5 seconds + 1 for good measure + + if master.searchErrorsLog("Trimmed") is True: + log.fatal('Trimming event unexpectedly occurred') + assert False + + cl.set_trim_interval('5') + + time.sleep(6) # Trimming should have occured + + if master.searchErrorsLog("Trimmed") is False: + log.fatal('Trimming event did not occur') + assert False + + +def test_max_entries(topo, setup_max_entries): + """Test changing the trimming interval works with max entries + + :id: b5de04a5-4d92-49ea-a725-1d278a1c647d + :setup: single master + :steps: + 1. Perform modification to populate changelog + 2. Adjust the changelog trimming interval + 3. Check is trimming occurrs within the new interval + + :expectedresults: + 1. Modifications are successful + 2. The changelog trimming interval is correctly lowered + 3. Trimming occurs + + """ + + log.info("Testing changelog triming interval with max entries...") + master = topo.ms["master1"] + cl = Changelog5(master) + + # reset errors log + master.deleteErrorLogs() + + # Do mods to build if cl entries + do_mods(master, 10) + + if master.searchErrorsLog("Trimmed") is True: + log.fatal('Trimming event unexpectedly occurred') + assert False + + cl.set_trim_interval('5') + + time.sleep(6) # Trimming should have occured + + if master.searchErrorsLog("Trimmed") is False: + log.fatal('Trimming event did not occur') + assert False + + +if __name__ == '__main__': + # Run isolated + # -s for DEBUG mode + CURRENT_FILE = os.path.realpath(__file__) + pytest.main("-s %s" % CURRENT_FILE) diff --git a/dirsrvtests/tests/suites/replication/cleanallruv_max_tasks_test.py b/dirsrvtests/tests/suites/replication/cleanallruv_max_tasks_test.py new file mode 100644 index 0000000..fb21d4b --- /dev/null +++ b/dirsrvtests/tests/suites/replication/cleanallruv_max_tasks_test.py @@ -0,0 +1,72 @@ +# --- BEGIN COPYRIGHT BLOCK --- +# Copyright (C) 2019 Red Hat, Inc. +# All rights reserved. +# +# License: GPL (version 3 or any later version). +# See LICENSE for details. +# --- END COPYRIGHT BLOCK --- +# +import threading +import pytest +import random +from lib389 import DirSrv +from lib389.tasks import * +from lib389.utils import * +from lib389.topologies import topology_m4, topology_m2 +from lib389._constants import * + +pytestmark = pytest.mark.tier1 + +@pytest.mark.skipif(ds_is_older("1.4.1.6"), reason="Not implemented") +def test_max_tasks(topology_m4): + """Test we can not create more than 64 cleaning tasks + + This test needs to be a standalone test becuase there is no easy way to + "restore" the instance after running this test + + :id: c34d0b40-3c3e-4f53-8656-5e4c2a310a1f + :setup: Replication setup with four masters + :steps: + 1. Stop masters 3 & 4 + 2. Create over 64 tasks between m1 and m2 + 3. Check logs to see if (>64) tasks were rejected + + :expectedresults: + 1. Success + 2. Success + 3. Success + """ + + # Stop masters 3 & 4 + m1 = topology_m4.ms["master1"] + m2 = topology_m4.ms["master2"] + m3 = topology_m4.ms["master3"] + m4 = topology_m4.ms["master4"] + m3.stop() + m4.stop() + + # Add over 64 tasks between master1 & 2 to try to exceed the 64 task limit + for i in range(1, 64): + cruv_task = CleanAllRUVTask(m1) + cruv_task.create(properties={ + 'replica-id': str(i), + 'replica-base-dn': DEFAULT_SUFFIX, + 'replica-force-cleaning': 'no', # This forces these tasks to stick around + }) + cruv_task = CleanAllRUVTask(m2) + cruv_task.create(properties={ + 'replica-id': "10" + str(i), + 'replica-base-dn': DEFAULT_SUFFIX, + 'replica-force-cleaning': 'yes', # This allows the tasks to propagate + }) + + # Check the errors log for our error message in master 1 + assert m1.searchErrorsLog('Exceeded maximum number of active CLEANALLRUV tasks') + + +if __name__ == '__main__': + # Run isolated + # -s for DEBUG mode + CURRENT_FILE = os.path.realpath(__file__) + pytest.main("-s %s" % CURRENT_FILE) + diff --git a/dirsrvtests/tests/suites/replication/cleanallruv_test.py b/dirsrvtests/tests/suites/replication/cleanallruv_test.py new file mode 100644 index 0000000..e93d16a --- /dev/null +++ b/dirsrvtests/tests/suites/replication/cleanallruv_test.py @@ -0,0 +1,827 @@ +# --- BEGIN COPYRIGHT BLOCK --- +# Copyright (C) 2019 Red Hat, Inc. +# All rights reserved. +# +# License: GPL (version 3 or any later version). +# See LICENSE for details. +# --- END COPYRIGHT BLOCK --- +# +import threading +import pytest +import random +from lib389 import DirSrv +from lib389.tasks import * +from lib389.utils import * +from lib389.topologies import topology_m4, topology_m2 +from lib389._constants import * + +from lib389.idm.directorymanager import DirectoryManager +from lib389.replica import ReplicationManager, Replicas +from lib389.tasks import CleanAllRUVTask +from lib389.idm.user import UserAccounts +from lib389.config import LDBMConfig +from lib389.config import CertmapLegacy +from lib389.idm.services import ServiceAccounts + +pytestmark = pytest.mark.tier1 + +logging.getLogger(__name__).setLevel(logging.DEBUG) +log = logging.getLogger(__name__) + + +class AddUsers(threading.Thread): + def __init__(self, inst, num_users): + threading.Thread.__init__(self) + self.daemon = True + self.inst = inst + self.num_users = num_users + + def run(self): + """Start adding users""" + + dm = DirectoryManager(self.inst) + conn = dm.bind() + + users = UserAccounts(conn, DEFAULT_SUFFIX) + + u_range = list(range(self.num_users)) + random.shuffle(u_range) + + for idx in u_range: + try: + users.create(properties={ + 'uid': 'testuser%s' % idx, + 'cn' : 'testuser%s' % idx, + 'sn' : 'user%s' % idx, + 'uidNumber' : '%s' % (1000 + idx), + 'gidNumber' : '%s' % (1000 + idx), + 'homeDirectory' : '/home/testuser%s' % idx + }) + # One of the masters was probably put into read only mode - just break out + except ldap.UNWILLING_TO_PERFORM: + break + except ldap.ALREADY_EXISTS: + pass + conn.close() + + +def remove_master4_agmts(msg, topology_m4): + """Remove all the repl agmts to master4. """ + + log.info('%s: remove all the agreements to master 4...' % msg) + repl = ReplicationManager(DEFAULT_SUFFIX) + # This will delete m4 frm the topo *and* remove all incoming agreements + # to m4. + repl.remove_master(topology_m4.ms["master4"], + [topology_m4.ms["master1"], topology_m4.ms["master2"], topology_m4.ms["master3"]]) + + +def check_ruvs(msg, topology_m4, m4rid): + """Check masters 1- 3 for master 4's rid.""" + for inst in (topology_m4.ms["master1"], topology_m4.ms["master2"], topology_m4.ms["master3"]): + clean = False + replicas = Replicas(inst) + replica = replicas.get(DEFAULT_SUFFIX) + + count = 0 + while not clean and count < 10: + ruv = replica.get_ruv() + if m4rid in ruv._rids: + time.sleep(5) + count = count + 1 + else: + clean = True + if not clean: + raise Exception("Master %s was not cleaned in time." % inst.serverid) + return True + + +def task_done(topology_m4, task_dn, timeout=60): + """Check if the task is complete""" + + attrlist = ['nsTaskLog', 'nsTaskStatus', 'nsTaskExitCode', + 'nsTaskCurrentItem', 'nsTaskTotalItems'] + done = False + count = 0 + + while not done and count < timeout: + try: + entry = topology_m4.ms["master1"].getEntry(task_dn, attrlist=attrlist) + if entry is not None: + if entry.hasAttr('nsTaskExitCode'): + done = True + break + else: + done = True + break + except ldap.NO_SUCH_OBJECT: + done = True + break + except ldap.LDAPError: + break + time.sleep(1) + count += 1 + + return done + + +def restore_master4(topology_m4): + """In our tests will always be removing master 4, so we need a common + way to restore it for another test + """ + + # Restart the remaining masters to allow rid 4 to be reused. + for inst in topology_m4.ms.values(): + inst.restart() + + repl = ReplicationManager(DEFAULT_SUFFIX) + repl.join_master(topology_m4.ms["master1"], topology_m4.ms["master4"]) + + # Add the 2,3 -> 4 agmt. + repl.ensure_agreement(topology_m4.ms["master2"], topology_m4.ms["master4"]) + repl.ensure_agreement(topology_m4.ms["master3"], topology_m4.ms["master4"]) + # And in reverse ... + repl.ensure_agreement(topology_m4.ms["master4"], topology_m4.ms["master2"]) + repl.ensure_agreement(topology_m4.ms["master4"], topology_m4.ms["master3"]) + + log.info('Master 4 has been successfully restored.') + + +@pytest.fixture() +def m4rid(request, topology_m4): + log.debug("Wait a bit before the reset - it is required for the slow machines") + time.sleep(5) + log.debug("-------------- BEGIN RESET of m4 -----------------") + repl = ReplicationManager(DEFAULT_SUFFIX) + repl.test_replication_topology(topology_m4.ms.values()) + # What is master4's rid? + m4rid = repl.get_rid(topology_m4.ms["master4"]) + + def fin(): + try: + # Restart the masters and rerun cleanallruv + for inst in topology_m4.ms.values(): + inst.restart() + + cruv_task = CleanAllRUVTask(topology_m4.ms["master1"]) + cruv_task.create(properties={ + 'replica-id': m4rid, + 'replica-base-dn': DEFAULT_SUFFIX, + 'replica-force-cleaning': 'no', + }) + cruv_task.wait() + except ldap.UNWILLING_TO_PERFORM: + # In some casse we already cleaned rid4, so if we fail, it's okay + pass + restore_master4(topology_m4) + # Make sure everything works. + repl.test_replication_topology(topology_m4.ms.values()) + request.addfinalizer(fin) + log.debug("-------------- FINISH RESET of m4 -----------------") + return m4rid + + +def test_clean(topology_m4, m4rid): + """Check that cleanallruv task works properly + + :id: e9b3ce5c-e17c-409e-aafc-e97d630f2878 + :setup: Replication setup with four masters + :steps: + 1. Check that replication works on all masters + 2. Disable replication on master 4 + 3. Remove agreements to master 4 from other masters + 4. Run a cleanallruv task on master 1 with a 'force' option 'on' + 5. Check that everything was cleaned + :expectedresults: + 1. Replication should work properly on all masters + 2. Operation should be successful + 3. Agreements to master 4 should be removed + 4. Cleanallruv task should be successfully executed + 5. Everything should be cleaned + """ + + log.info('Running test_clean...') + # Disable master 4 + # Remove the agreements from the other masters that point to master 4 + log.info('test_clean: disable master 4...') + remove_master4_agmts("test_clean", topology_m4) + + # Run the task + log.info('test_clean: run the cleanAllRUV task...') + cruv_task = CleanAllRUVTask(topology_m4.ms["master1"]) + cruv_task.create(properties={ + 'replica-id': m4rid, + 'replica-base-dn': DEFAULT_SUFFIX, + 'replica-force-cleaning': 'no' + }) + cruv_task.wait() + + # Check the other master's RUV for 'replica 4' + log.info('test_clean: check all the masters have been cleaned...') + clean = check_ruvs("test_clean", topology_m4, m4rid) + assert clean + + log.info('test_clean PASSED, restoring master 4...') + + +def test_clean_restart(topology_m4, m4rid): + """Check that cleanallruv task works properly after a restart + + :id: c6233bb3-092c-4919-9ac9-80dd02cc6e02 + :setup: Replication setup with four masters + :steps: + 1. Disable replication on master 4 + 2. Remove agreements to master 4 from other masters + 3. Stop master 3 + 4. Run a cleanallruv task on master 1 + 5. Stop master 1 + 6. Start master 3 + 7. Make sure that no crash happened + 8. Start master 1 + 9. Make sure that no crash happened + 10. Check that everything was cleaned + :expectedresults: + 1. Operation should be successful + 2. Agreements to master 4 should be removed + 3. Master 3 should be stopped + 4. Cleanallruv task should be successfully executed + 5. Master 1 should be stopped + 6. Master 3 should be started + 7. No crash should happened + 8. Master 1 should be started + 9. No crash should happened + 10. Everything should be cleaned + """ + log.info('Running test_clean_restart...') + + # Disable master 4 + log.info('test_clean: disable master 4...') + # Remove the agreements from the other masters that point to master 4 + remove_master4_agmts("test_clean", topology_m4) + + # Stop master 3 to keep the task running, so we can stop master 1... + topology_m4.ms["master3"].stop() + + # Run the task + log.info('test_clean: run the cleanAllRUV task...') + cruv_task = CleanAllRUVTask(topology_m4.ms["master1"]) + cruv_task.create(properties={ + 'replica-id': m4rid, + 'replica-base-dn': DEFAULT_SUFFIX, + 'replica-force-cleaning': 'no', + 'replica-certify-all': 'yes' + }) + + # Sleep a bit, then stop master 1 + time.sleep(5) + topology_m4.ms["master1"].stop() + + # Now start master 3 & 1, and make sure we didn't crash + topology_m4.ms["master3"].start() + if topology_m4.ms["master3"].detectDisorderlyShutdown(): + log.fatal('test_clean_restart: Master 3 previously crashed!') + assert False + + topology_m4.ms["master1"].start(timeout=30) + if topology_m4.ms["master1"].detectDisorderlyShutdown(): + log.fatal('test_clean_restart: Master 1 previously crashed!') + assert False + + # Check the other master's RUV for 'replica 4' + log.info('test_clean_restart: check all the masters have been cleaned...') + clean = check_ruvs("test_clean_restart", topology_m4, m4rid) + assert clean + + log.info('test_clean_restart PASSED, restoring master 4...') + + +def test_clean_force(topology_m4, m4rid): + """Check that multiple tasks with a 'force' option work properly + + :id: f8810dfe-d2d2-4dd9-ba03-5fc14896fabe + :setup: Replication setup with four masters + :steps: + 1. Stop master 3 + 2. Add a bunch of updates to master 4 + 3. Disable replication on master 4 + 4. Start master 3 + 5. Remove agreements to master 4 from other masters + 6. Run a cleanallruv task on master 1 with a 'force' option 'on' + 7. Check that everything was cleaned + :expectedresults: + 1. Master 3 should be stopped + 2. Operation should be successful + 3. Replication on master 4 should be disabled + 4. Master 3 should be started + 5. Agreements to master 4 should be removed + 6. Operation should be successful + 7. Everything should be cleaned + """ + + log.info('Running test_clean_force...') + + # Stop master 3, while we update master 4, so that 3 is behind the other masters + topology_m4.ms["master3"].stop() + + # Add a bunch of updates to master 4 + m4_add_users = AddUsers(topology_m4.ms["master4"], 1500) + m4_add_users.start() + m4_add_users.join() + + # Start master 3, it should be out of sync with the other replicas... + topology_m4.ms["master3"].start() + + # Remove the agreements from the other masters that point to master 4 + remove_master4_agmts("test_clean_force", topology_m4) + + # Run the task, use "force" because master 3 is not in sync with the other replicas + # in regards to the replica 4 RUV + log.info('test_clean: run the cleanAllRUV task...') + cruv_task = CleanAllRUVTask(topology_m4.ms["master1"]) + cruv_task.create(properties={ + 'replica-id': m4rid, + 'replica-base-dn': DEFAULT_SUFFIX, + 'replica-force-cleaning': 'yes' + }) + cruv_task.wait() + + # Check the other master's RUV for 'replica 4' + log.info('test_clean_force: check all the masters have been cleaned...') + clean = check_ruvs("test_clean_force", topology_m4, m4rid) + assert clean + + log.info('test_clean_force PASSED, restoring master 4...') + + +def test_abort(topology_m4, m4rid): + """Test the abort task basic functionality + + :id: b09a6887-8de0-4fac-8e41-73ccbaaf7a08 + :setup: Replication setup with four masters + :steps: + 1. Disable replication on master 4 + 2. Remove agreements to master 4 from other masters + 3. Stop master 2 + 4. Run a cleanallruv task on master 1 + 5. Run a cleanallruv abort task on master 1 + :expectedresults: No hanging tasks left + 1. Replication on master 4 should be disabled + 2. Agreements to master 4 should be removed + 3. Master 2 should be stopped + 4. Operation should be successful + 5. Operation should be successful + """ + + log.info('Running test_abort...') + # Remove the agreements from the other masters that point to master 4 + remove_master4_agmts("test_abort", topology_m4) + + # Stop master 2 + log.info('test_abort: stop master 2 to freeze the cleanAllRUV task...') + topology_m4.ms["master2"].stop() + + # Run the task + log.info('test_abort: add the cleanAllRUV task...') + cruv_task = CleanAllRUVTask(topology_m4.ms["master1"]) + cruv_task.create(properties={ + 'replica-id': m4rid, + 'replica-base-dn': DEFAULT_SUFFIX, + 'replica-force-cleaning': 'no', + 'replica-certify-all': 'yes' + }) + # Wait a bit + time.sleep(2) + + # Abort the task + cruv_task.abort() + + # Check master 1 does not have the clean task running + log.info('test_abort: check master 1 no longer has a cleanAllRUV task...') + if not task_done(topology_m4, cruv_task.dn): + log.fatal('test_abort: CleanAllRUV task was not aborted') + assert False + + # Start master 2 + log.info('test_abort: start master 2 to begin the restore process...') + topology_m4.ms["master2"].start() + + log.info('test_abort PASSED, restoring master 4...') + + +def test_abort_restart(topology_m4, m4rid): + """Test the abort task can handle a restart, and then resume + + :id: b66e33d4-fe85-4e1c-b882-75da80f70ab3 + :setup: Replication setup with four masters + :steps: + 1. Disable replication on master 4 + 2. Remove agreements to master 4 from other masters + 3. Stop master 3 + 4. Run a cleanallruv task on master 1 + 5. Run a cleanallruv abort task on master 1 + 6. Restart master 1 + 7. Make sure that no crash happened + 8. Start master 3 + 9. Check master 1 does not have the clean task running + 10. Check that errors log doesn't have 'Aborting abort task' message + :expectedresults: + 1. Replication on master 4 should be disabled + 2. Agreements to master 4 should be removed + 3. Master 3 should be stopped + 4. Operation should be successful + 5. Operation should be successful + 6. Master 1 should be restarted + 7. No crash should happened + 8. Master 3 should be started + 9. Check master 1 shouldn't have the clean task running + 10. Errors log shouldn't have 'Aborting abort task' message + """ + + log.info('Running test_abort_restart...') + # Remove the agreements from the other masters that point to master 4 + remove_master4_agmts("test_abort", topology_m4) + + # Stop master 3 + log.info('test_abort_restart: stop master 3 to freeze the cleanAllRUV task...') + topology_m4.ms["master3"].stop() + + # Run the task + log.info('test_abort_restart: add the cleanAllRUV task...') + cruv_task = CleanAllRUVTask(topology_m4.ms["master1"]) + cruv_task.create(properties={ + 'replica-id': m4rid, + 'replica-base-dn': DEFAULT_SUFFIX, + 'replica-force-cleaning': 'no', + 'replica-certify-all': 'yes' + }) + # Wait a bit + time.sleep(2) + + # Abort the task + cruv_task.abort(certify=True) + + # Check master 1 does not have the clean task running + log.info('test_abort_abort: check master 1 no longer has a cleanAllRUV task...') + if not task_done(topology_m4, cruv_task.dn): + log.fatal('test_abort_restart: CleanAllRUV task was not aborted') + assert False + + # Now restart master 1, and make sure the abort process completes + topology_m4.ms["master1"].restart() + if topology_m4.ms["master1"].detectDisorderlyShutdown(): + log.fatal('test_abort_restart: Master 1 previously crashed!') + assert False + + # Start master 3 + topology_m4.ms["master3"].start() + + # Need to wait 5 seconds before server processes any leftover tasks + time.sleep(6) + + # Check master 1 tried to run abort task. We expect the abort task to be aborted. + if not topology_m4.ms["master1"].searchErrorsLog('Aborting abort task'): + log.fatal('test_abort_restart: Abort task did not restart') + assert False + + log.info('test_abort_restart PASSED, restoring master 4...') + + +def test_abort_certify(topology_m4, m4rid): + """Test the abort task with a replica-certify-all option + + :id: 78959966-d644-44a8-b98c-1fcf21b45eb0 + :setup: Replication setup with four masters + :steps: + 1. Disable replication on master 4 + 2. Remove agreements to master 4 from other masters + 3. Stop master 2 + 4. Run a cleanallruv task on master 1 + 5. Run a cleanallruv abort task on master 1 with a replica-certify-all option + :expectedresults: No hanging tasks left + 1. Replication on master 4 should be disabled + 2. Agreements to master 4 should be removed + 3. Master 2 should be stopped + 4. Operation should be successful + 5. Operation should be successful + """ + + log.info('Running test_abort_certify...') + + # Remove the agreements from the other masters that point to master 4 + remove_master4_agmts("test_abort_certify", topology_m4) + + # Stop master 2 + log.info('test_abort_certify: stop master 2 to freeze the cleanAllRUV task...') + topology_m4.ms["master2"].stop() + + # Run the task + log.info('test_abort_certify: add the cleanAllRUV task...') + cruv_task = CleanAllRUVTask(topology_m4.ms["master1"]) + cruv_task.create(properties={ + 'replica-id': m4rid, + 'replica-base-dn': DEFAULT_SUFFIX, + 'replica-force-cleaning': 'no', + 'replica-certify-all': 'yes' + }) + # Wait a bit + time.sleep(2) + + # Abort the task + log.info('test_abort_certify: abort the cleanAllRUV task...') + abort_task = cruv_task.abort(certify=True) + + # Wait a while and make sure the abort task is still running + log.info('test_abort_certify...') + + if task_done(topology_m4, abort_task.dn, 10): + log.fatal('test_abort_certify: abort task incorrectly finished') + assert False + + # Now start master 2 so it can be aborted + log.info('test_abort_certify: start master 2 to allow the abort task to finish...') + topology_m4.ms["master2"].start() + + # Wait for the abort task to stop + if not task_done(topology_m4, abort_task.dn, 90): + log.fatal('test_abort_certify: The abort CleanAllRUV task was not aborted') + assert False + + # Check master 1 does not have the clean task running + log.info('test_abort_certify: check master 1 no longer has a cleanAllRUV task...') + if not task_done(topology_m4, cruv_task.dn): + log.fatal('test_abort_certify: CleanAllRUV task was not aborted') + assert False + + log.info('test_abort_certify PASSED, restoring master 4...') + + +def test_stress_clean(topology_m4, m4rid): + """Put each server(m1 - m4) under a stress, and perform the entire clean process + + :id: a8263cd6-f068-4357-86e0-e7c34504c8c5 + :setup: Replication setup with four masters + :steps: + 1. Add a bunch of updates to all masters + 2. Put master 4 to read-only mode + 3. Disable replication on master 4 + 5. Remove agreements to master 4 from other masters + 6. Run a cleanallruv task on master 1 + 7. Check that everything was cleaned + :expectedresults: + 1. Operation should be successful + 2. Master 4 should be put to read-only mode + 3. Replication on master 4 should be disabled + 2. Agreements to master 4 should be removed + 5. Agreements to master 4 should be removed + 6. Operation should be successful + 7. Everything should be cleaned + """ + + log.info('Running test_stress_clean...') + log.info('test_stress_clean: put all the masters under load...') + + ldbm_config = LDBMConfig(topology_m4.ms["master4"]) + + # Put all the masters under load + m1_add_users = AddUsers(topology_m4.ms["master1"], 2000) + m1_add_users.start() + m2_add_users = AddUsers(topology_m4.ms["master2"], 2000) + m2_add_users.start() + m3_add_users = AddUsers(topology_m4.ms["master3"], 2000) + m3_add_users.start() + m4_add_users = AddUsers(topology_m4.ms["master4"], 2000) + m4_add_users.start() + + # Allow sometime to get replication flowing in all directions + log.info('test_stress_clean: allow some time for replication to get flowing...') + time.sleep(5) + + # Put master 4 into read only mode + ldbm_config.set('nsslapd-readonly', 'on') + # We need to wait for master 4 to push its changes out + log.info('test_stress_clean: allow some time for master 4 to push changes out (60 seconds)...') + time.sleep(30) + + # Remove the agreements from the other masters that point to master 4 + remove_master4_agmts("test_stress_clean", topology_m4) + + # Run the task + cruv_task = CleanAllRUVTask(topology_m4.ms["master1"]) + cruv_task.create(properties={ + 'replica-id': m4rid, + 'replica-base-dn': DEFAULT_SUFFIX, + 'replica-force-cleaning': 'no' + }) + cruv_task.wait() + + # Wait for the update to finish + log.info('test_stress_clean: wait for all the updates to finish...') + m1_add_users.join() + m2_add_users.join() + m3_add_users.join() + m4_add_users.join() + + # Check the other master's RUV for 'replica 4' + log.info('test_stress_clean: check if all the replicas have been cleaned...') + clean = check_ruvs("test_stress_clean", topology_m4, m4rid) + assert clean + + log.info('test_stress_clean: PASSED, restoring master 4...') + + # Sleep for a bit to replication complete + log.info("Sleep for 120 seconds to allow replication to complete...") + repl = ReplicationManager(DEFAULT_SUFFIX) + repl.test_replication_topology([ + topology_m4.ms["master1"], + topology_m4.ms["master2"], + topology_m4.ms["master3"], + ], timeout=120) + + # Turn off readonly mode + ldbm_config.set('nsslapd-readonly', 'off') + + +def test_multiple_tasks_with_force(topology_m4, m4rid): + """Check that multiple tasks with a 'force' option work properly + + :id: eb76a93d-8d1c-405e-9f25-6e8d5a781098 + :setup: Replication setup with four masters + :steps: + 1. Stop master 3 + 2. Add a bunch of updates to master 4 + 3. Disable replication on master 4 + 4. Start master 3 + 5. Remove agreements to master 4 from other masters + 6. Run a cleanallruv task on master 1 with a 'force' option 'on' + 7. Run one more cleanallruv task on master 1 with a 'force' option 'off' + 8. Check that everything was cleaned + :expectedresults: + 1. Master 3 should be stopped + 2. Operation should be successful + 3. Replication on master 4 should be disabled + 4. Master 3 should be started + 5. Agreements to master 4 should be removed + 6. Operation should be successful + 7. Operation should be successful + 8. Everything should be cleaned + """ + + log.info('Running test_multiple_tasks_with_force...') + + # Stop master 3, while we update master 4, so that 3 is behind the other masters + topology_m4.ms["master3"].stop() + + # Add a bunch of updates to master 4 + m4_add_users = AddUsers(topology_m4.ms["master4"], 1500) + m4_add_users.start() + m4_add_users.join() + + # Start master 3, it should be out of sync with the other replicas... + topology_m4.ms["master3"].start() + + # Disable master 4 + # Remove the agreements from the other masters that point to master 4 + remove_master4_agmts("test_multiple_tasks_with_force", topology_m4) + + # Run the task, use "force" because master 3 is not in sync with the other replicas + # in regards to the replica 4 RUV + log.info('test_multiple_tasks_with_force: run the cleanAllRUV task with "force" on...') + cruv_task = CleanAllRUVTask(topology_m4.ms["master1"]) + cruv_task.create(properties={ + 'replica-id': m4rid, + 'replica-base-dn': DEFAULT_SUFFIX, + 'replica-force-cleaning': 'yes', + 'replica-certify-all': 'no' + }) + + log.info('test_multiple_tasks_with_force: run the cleanAllRUV task with "force" off...') + + # NOTE: This must be try not py.test raises, because the above may or may + # not have completed yet .... + try: + cruv_task_fail = CleanAllRUVTask(topology_m4.ms["master1"]) + cruv_task_fail.create(properties={ + 'replica-id': m4rid, + 'replica-base-dn': DEFAULT_SUFFIX, + 'replica-force-cleaning': 'no', + 'replica-certify-all': 'no' + }) + cruv_task_fail.wait() + except ldap.UNWILLING_TO_PERFORM: + pass + # Wait for the force task .... + cruv_task.wait() + + # Check the other master's RUV for 'replica 4' + log.info('test_multiple_tasks_with_force: check all the masters have been cleaned...') + clean = check_ruvs("test_clean_force", topology_m4, m4rid) + assert clean + # Check master 1 does not have the clean task running + log.info('test_abort: check master 1 no longer has a cleanAllRUV task...') + if not task_done(topology_m4, cruv_task.dn): + log.fatal('test_abort: CleanAllRUV task was not aborted') + assert False + + +@pytest.mark.bz1466441 +@pytest.mark.ds50370 +def test_clean_shutdown_crash(topology_m2): + """Check that server didn't crash after shutdown when running CleanAllRUV task + + :id: c34d0b40-3c3e-4f53-8656-5e4c2a310aaf + :setup: Replication setup with two masters + :steps: + 1. Enable TLS on both masters + 2. Reconfigure both agreements to use TLS Client auth + 3. Stop master2 + 4. Run the CleanAllRUV task + 5. Restart master1 + 6. Check if master1 didn't crash + 7. Restart master1 again + 8. Check if master1 didn't crash + + :expectedresults: + 1. Success + 2. Success + 3. Success + 4. Success + 5. Success + 6. Success + 7. Success + 8. Success + """ + + m1 = topology_m2.ms["master1"] + m2 = topology_m2.ms["master2"] + + repl = ReplicationManager(DEFAULT_SUFFIX) + + cm_m1 = CertmapLegacy(m1) + cm_m2 = CertmapLegacy(m2) + + certmaps = cm_m1.list() + certmaps['default']['DNComps'] = None + certmaps['default']['CmapLdapAttr'] = 'nsCertSubjectDN' + + cm_m1.set(certmaps) + cm_m2.set(certmaps) + + log.info('Enabling TLS') + [i.enable_tls() for i in topology_m2] + + log.info('Creating replication dns') + services = ServiceAccounts(m1, DEFAULT_SUFFIX) + repl_m1 = services.get('%s:%s' % (m1.host, m1.sslport)) + repl_m1.set('nsCertSubjectDN', m1.get_server_tls_subject()) + + repl_m2 = services.get('%s:%s' % (m2.host, m2.sslport)) + repl_m2.set('nsCertSubjectDN', m2.get_server_tls_subject()) + + log.info('Changing auth type') + replica_m1 = Replicas(m1).get(DEFAULT_SUFFIX) + agmt_m1 = replica_m1.get_agreements().list()[0] + agmt_m1.replace_many( + ('nsDS5ReplicaBindMethod', 'SSLCLIENTAUTH'), + ('nsDS5ReplicaTransportInfo', 'SSL'), + ('nsDS5ReplicaPort', '%s' % m2.sslport), + ) + + agmt_m1.remove_all('nsDS5ReplicaBindDN') + + replica_m2 = Replicas(m2).get(DEFAULT_SUFFIX) + agmt_m2 = replica_m2.get_agreements().list()[0] + + agmt_m2.replace_many( + ('nsDS5ReplicaBindMethod', 'SSLCLIENTAUTH'), + ('nsDS5ReplicaTransportInfo', 'SSL'), + ('nsDS5ReplicaPort', '%s' % m1.sslport), + ) + agmt_m2.remove_all('nsDS5ReplicaBindDN') + + log.info('Stopping master2') + m2.stop() + + log.info('Run the cleanAllRUV task') + cruv_task = CleanAllRUVTask(m1) + cruv_task.create(properties={ + 'replica-id': repl.get_rid(m1), + 'replica-base-dn': DEFAULT_SUFFIX, + 'replica-force-cleaning': 'no', + 'replica-certify-all': 'yes' + }) + + m1.restart() + + log.info('Check if master1 crashed') + assert not m1.detectDisorderlyShutdown() + + log.info('Repeat') + m1.restart() + assert not m1.detectDisorderlyShutdown() + + +if __name__ == '__main__': + # Run isolated + # -s for DEBUG mode + CURRENT_FILE = os.path.realpath(__file__) + pytest.main("-s %s" % CURRENT_FILE) diff --git a/dirsrvtests/tests/suites/replication/conflict_resolve_test.py b/dirsrvtests/tests/suites/replication/conflict_resolve_test.py new file mode 100644 index 0000000..99a0729 --- /dev/null +++ b/dirsrvtests/tests/suites/replication/conflict_resolve_test.py @@ -0,0 +1,880 @@ +# --- BEGIN COPYRIGHT BLOCK --- +# Copyright (C) 2018 Red Hat, Inc. +# All rights reserved. +# +# License: GPL (version 3 or any later version). +# See LICENSE for details. +# --- END COPYRIGHT BLOCK --- +# +import time +import logging +import ldap +import pytest +from itertools import permutations +from lib389._constants import * +from lib389.idm.nscontainer import nsContainers +from lib389.idm.user import UserAccounts +from lib389.idm.group import Groups +from lib389.idm.organizationalunit import OrganizationalUnits +from lib389.replica import ReplicationManager +from lib389.agreement import Agreements +from lib389.plugins import MemberOfPlugin + +pytestmark = pytest.mark.tier1 + +DEBUGGING = os.getenv("DEBUGGING", default=False) +if DEBUGGING: + logging.getLogger(__name__).setLevel(logging.DEBUG) +else: + logging.getLogger(__name__).setLevel(logging.INFO) +log = logging.getLogger(__name__) + + +def _create_user(users, user_num, group_num=2000, sleep=False): + """Creates user entry""" + + user = users.create_test_user(user_num, group_num) + if sleep: + time.sleep(1) + return user + + +def _rename_user(users, user_num, new_num, sleep=False): + """Rename user entry""" + + assert user_num != new_num, "New user number should not be the same as the old one" + + user = users.get('test_user_{}'.format(user_num)) + user.rename('uid=test_user_{}'.format(new_num)) + if sleep: + time.sleep(1) + + +def _modify_user(users, user_num, sleep=False): + """Modify user entry""" + + user = users.get('test_user_{}'.format(user_num)) + user.replace("homeDirectory", "/home/test_user0{}".format(user_num)) + if sleep: + time.sleep(1) + time.sleep(1) + + +def _delete_user(users, user_num, sleep=False): + """Delete user entry""" + + user = users.get('test_user_{}'.format(user_num)) + user.delete() + if sleep: + time.sleep(1) + time.sleep(1) + + +def _create_group(groups, num, member, sleep=False): + """Creates group entry""" + + group_props = {'cn': 'test_group_{}'.format(num), + 'member': member} + group = groups.create(properties=group_props) + if sleep: + time.sleep(1) + return group + + +def _delete_group(groups, num, sleep=False): + """Delete group entry""" + + group = groups.get('test_group_{}'.format(num)) + group.delete() + if sleep: + time.sleep(1) + + +def _create_container(inst, dn, name, sleep=False): + """Creates container entry""" + + conts = nsContainers(inst, dn) + cont = conts.create(properties={'cn': name}) + if sleep: + time.sleep(1) + return cont + + +def _delete_container(cont, sleep=False): + """Deletes container entry""" + + cont.delete() + if sleep: + time.sleep(1) + + +def _test_base(topology): + """Add test container for entries, enable plugin logging, + audit log, error log for replica and access log for internal + """ + + M1 = topology.ms["master1"] + + conts = nsContainers(M1, SUFFIX) + base_m2 = conts.create(properties={'cn': 'test_container'}) + + for inst in topology: + inst.config.loglevel([ErrorLog.DEFAULT, ErrorLog.REPLICA], service='error') + inst.config.loglevel([AccessLog.DEFAULT, AccessLog.INTERNAL], service='access') + inst.config.set('nsslapd-plugin-logging', 'on') + inst.config.enable_log('audit') + inst.restart() + + return base_m2 + + +def _delete_test_base(inst, base_m2_dn): + """Delete test container with entries and entry conflicts""" + + ents = inst.search_s(base_m2_dn, ldap.SCOPE_SUBTREE, filterstr="(|(objectclass=*)(objectclass=ldapsubentry))") + + for ent in sorted(ents, key=lambda e: len(e.dn), reverse=True): + log.debug("Delete entry children {}".format(ent.dn)) + try: + inst.delete_ext_s(ent.dn) + except ldap.NO_SUCH_OBJECT: # For the case with objectclass: glue entries + pass + + +@pytest.fixture +def base_m2(topology_m2, request): + tb = _test_base(topology_m2) + + def fin(): + if not DEBUGGING: + _delete_test_base(topology_m2.ms["master1"], tb.dn) + request.addfinalizer(fin) + + return tb + + +@pytest.fixture +def base_m3(topology_m3, request): + tb = _test_base(topology_m3) + + def fin(): + if not DEBUGGING: + _delete_test_base(topology_m3.ms["master1"], tb.dn) + request.addfinalizer(fin) + + return tb + + +class TestTwoMasters: + def test_add_modrdn(self, topology_m2, base_m2): + """Check that conflict properly resolved for create - modrdn operations + + :id: 77f09b18-03d1-45da-940b-1ad2c2908ebb + :setup: Two master replication, test container for entries, enable plugin logging, + audit log, error log for replica and access log for internal + :steps: + 1. Add five users to m1 and wait for replication to happen + 2. Pause replication + 3. Create an entry on m1 and m2 + 4. Create an entry on m1 and rename on m2 + 5. Rename an entry on m1 and create on m2 + 6. Rename an entry on m1 and rename on m2 + 7. Rename an entry on m1 and rename on m2. Use different entries + but rename them to the same entry + 8. Resume replication + 9. Check that the entries on both masters are the same and replication is working + :expectedresults: + 1. It should pass + 2. It should pass + 3. It should pass + 4. It should pass + 5. It should pass + 6. It should pass + 7. It should pass + 8. It should pass + """ + + M1 = topology_m2.ms["master1"] + M2 = topology_m2.ms["master2"] + test_users_m1 = UserAccounts(M1, base_m2.dn, rdn=None) + test_users_m2 = UserAccounts(M2, base_m2.dn, rdn=None) + repl = ReplicationManager(SUFFIX) + + for user_num in range(1000, 1005): + _create_user(test_users_m1, user_num) + + repl.test_replication(M1, M2) + topology_m2.pause_all_replicas() + + log.info("Test create - modrdn") + user_num += 1 + _create_user(test_users_m1, user_num, sleep=True) + _create_user(test_users_m2, user_num, sleep=True) + + user_num += 1 + _create_user(test_users_m1, user_num, sleep=True) + _rename_user(test_users_m2, 1000, user_num, sleep=True) + + user_num += 1 + _rename_user(test_users_m1, 1001, user_num, sleep=True) + _create_user(test_users_m2, user_num, sleep=True) + + user_num += 1 + _rename_user(test_users_m1, 1002, user_num, sleep=True) + _rename_user(test_users_m2, 1002, user_num, sleep=True) + + user_num += 1 + _rename_user(test_users_m1, 1003, user_num, sleep=True) + _rename_user(test_users_m2, 1004, user_num) + + topology_m2.resume_all_replicas() + + repl.test_replication_topology(topology_m2) + + user_dns_m1 = [user.dn for user in test_users_m1.list()] + user_dns_m2 = [user.dn for user in test_users_m2.list()] + assert set(user_dns_m1) == set(user_dns_m2) + + def test_complex_add_modify_modrdn_delete(self, topology_m2, base_m2): + """Check that conflict properly resolved for complex operations + which involve add, modify, modrdn and delete + + :id: 77f09b18-03d1-45da-940b-1ad2c2908eb1 + :setup: Two master replication, test container for entries, enable plugin logging, + audit log, error log for replica and access log for internal + :steps: + 1. Add ten users to m1 and wait for replication to happen + 2. Pause replication + 3. Test add-del on m1 and add on m2 + 4. Test add-mod on m1 and add on m2 + 5. Test add-modrdn on m1 and add on m2 + 6. Test multiple add, modrdn + 7. Test Add-del on both masters + 8. Test modrdn-modrdn + 9. Test modrdn-del + 10. Resume replication + 11. Check that the entries on both masters are the same and replication is working + :expectedresults: + 1. It should pass + 2. It should pass + 3. It should pass + 4. It should pass + 5. It should pass + 6. It should pass + 7. It should pass + 8. It should pass + 9. It should pass + 10. It should pass + 11. It should pass + """ + + M1 = topology_m2.ms["master1"] + M2 = topology_m2.ms["master2"] + + test_users_m1 = UserAccounts(M1, base_m2.dn, rdn=None) + test_users_m2 = UserAccounts(M2, base_m2.dn, rdn=None) + repl = ReplicationManager(SUFFIX) + + for user_num in range(1100, 1110): + _create_user(test_users_m1, user_num) + + repl.test_replication(M1, M2) + topology_m2.pause_all_replicas() + + log.info("Test add-del on M1 and add on M2") + user_num += 1 + _create_user(test_users_m1, user_num) + _delete_user(test_users_m1, user_num, sleep=True) + _create_user(test_users_m2, user_num, sleep=True) + + user_num += 1 + _create_user(test_users_m1, user_num, sleep=True) + _create_user(test_users_m2, user_num, sleep=True) + _delete_user(test_users_m1, user_num, sleep=True) + + user_num += 1 + _create_user(test_users_m2, user_num, sleep=True) + _create_user(test_users_m1, user_num) + _delete_user(test_users_m1, user_num) + + log.info("Test add-mod on M1 and add on M2") + user_num += 1 + _create_user(test_users_m1, user_num) + _modify_user(test_users_m1, user_num, sleep=True) + _create_user(test_users_m2, user_num, sleep=True) + + user_num += 1 + _create_user(test_users_m1, user_num, sleep=True) + _create_user(test_users_m2, user_num, sleep=True) + _modify_user(test_users_m1, user_num, sleep=True) + + user_num += 1 + _create_user(test_users_m2, user_num, sleep=True) + _create_user(test_users_m1, user_num) + _modify_user(test_users_m1, user_num) + + log.info("Test add-modrdn on M1 and add on M2") + user_num += 1 + _create_user(test_users_m1, user_num) + _rename_user(test_users_m1, user_num, user_num+20, sleep=True) + _create_user(test_users_m2, user_num, sleep=True) + + user_num += 1 + _create_user(test_users_m1, user_num, sleep=True) + _create_user(test_users_m2, user_num, sleep=True) + _rename_user(test_users_m1, user_num, user_num+20, sleep=True) + + user_num += 1 + _create_user(test_users_m2, user_num, sleep=True) + _create_user(test_users_m1, user_num) + _rename_user(test_users_m1, user_num, user_num+20) + + log.info("Test multiple add, modrdn") + user_num += 1 + _create_user(test_users_m1, user_num, sleep=True) + _create_user(test_users_m2, user_num, sleep=True) + _rename_user(test_users_m1, user_num, user_num+20) + _create_user(test_users_m1, user_num, sleep=True) + _modify_user(test_users_m2, user_num, sleep=True) + + log.info("Add - del on both masters") + user_num += 1 + _create_user(test_users_m1, user_num) + _delete_user(test_users_m1, user_num, sleep=True) + _create_user(test_users_m2, user_num) + _delete_user(test_users_m2, user_num, sleep=True) + + log.info("Test modrdn - modrdn") + user_num += 1 + _rename_user(test_users_m1, 1109, 1129, sleep=True) + _rename_user(test_users_m2, 1109, 1129, sleep=True) + + log.info("Test modrdn - del") + user_num += 1 + _rename_user(test_users_m1, 1100, 1120, sleep=True) + _delete_user(test_users_m2, 1100) + + user_num += 1 + _delete_user(test_users_m2, 1101, sleep=True) + _rename_user(test_users_m1, 1101, 1121) + + topology_m2.resume_all_replicas() + + repl.test_replication_topology(topology_m2) + time.sleep(30) + + user_dns_m1 = [user.dn for user in test_users_m1.list()] + user_dns_m2 = [user.dn for user in test_users_m2.list()] + assert set(user_dns_m1) == set(user_dns_m2) + + def test_memberof_groups(self, topology_m2, base_m2): + """Check that conflict properly resolved for operations + with memberOf and groups + + :id: 77f09b18-03d1-45da-940b-1ad2c2908eb3 + :setup: Two master replication, test container for entries, enable plugin logging, + audit log, error log for replica and access log for internal + :steps: + 1. Enable memberOf plugin + 2. Add 30 users to m1 and wait for replication to happen + 3. Pause replication + 4. Create a group on m1 and m2 + 5. Create a group on m1 and m2, delete from m1 + 6. Create a group on m1, delete from m1, and create on m2, + 7. Create a group on m2 and m1, delete from m1 + 8. Create two different groups on m2 + 9. Resume replication + 10. Check that the entries on both masters are the same and replication is working + :expectedresults: + 1. It should pass + 2. It should pass + 3. It should pass + 4. It should pass + 5. It should pass + 6. It should pass + 7. It should pass + 8. It should pass + 9. It should pass + 10. It should pass + """ + + pytest.xfail("Issue 49591 - work in progress") + + M1 = topology_m2.ms["master1"] + M2 = topology_m2.ms["master2"] + test_users_m1 = UserAccounts(M1, base_m2.dn, rdn=None) + test_groups_m1 = Groups(M1, base_m2.dn, rdn=None) + test_groups_m2 = Groups(M2, base_m2.dn, rdn=None) + + repl = ReplicationManager(SUFFIX) + + for inst in topology_m2.ms.values(): + memberof = MemberOfPlugin(inst) + memberof.enable() + agmt = Agreements(inst).list()[0] + agmt.replace_many(('nsDS5ReplicatedAttributeListTotal', + '(objectclass=*) $ EXCLUDE '), + ('nsDS5ReplicatedAttributeList', + '(objectclass=*) $ EXCLUDE memberOf')) + inst.restart() + user_dns = [] + for user_num in range(10): + user_trio = [] + for num in range(0, 30, 10): + user = _create_user(test_users_m1, 1200 + user_num + num) + user_trio.append(user.dn) + user_dns.append(user_trio) + + repl.test_replication(M1, M2) + topology_m2.pause_all_replicas() + + log.info("Check a simple conflict") + group_num = 0 + _create_group(test_groups_m1, group_num, user_dns[group_num], sleep=True) + _create_group(test_groups_m2, group_num, user_dns[group_num], sleep=True) + + log.info("Check a add - del") + group_num += 1 + _create_group(test_groups_m1, group_num, user_dns[group_num], sleep=True) + _create_group(test_groups_m2, group_num, user_dns[group_num], sleep=True) + _delete_group(test_groups_m1, group_num) + + group_num += 1 + _create_group(test_groups_m1, group_num, user_dns[group_num]) + _delete_group(test_groups_m1, group_num, sleep=True) + _create_group(test_groups_m2, group_num, user_dns[group_num]) + + group_num += 1 + _create_group(test_groups_m2, group_num, user_dns[group_num], sleep=True) + _create_group(test_groups_m1, group_num, user_dns[group_num]) + _delete_group(test_groups_m1, group_num, sleep=True) + + group_num += 1 + _create_group(test_groups_m2, group_num, user_dns[group_num]) + group_num += 1 + _create_group(test_groups_m2, group_num, user_dns[group_num]) + + topology_m2.resume_all_replicas() + + repl.test_replication_topology(topology_m2) + + group_dns_m1 = [group.dn for group in test_groups_m1.list()] + group_dns_m2 = [group.dn for group in test_groups_m2.list()] + assert set(group_dns_m1) == set(group_dns_m2) + + def test_managed_entries(self, topology_m2): + """Check that conflict properly resolved for operations + with managed entries + + :id: 77f09b18-03d1-45da-940b-1ad2c2908eb4 + :setup: Two master replication, test container for entries, enable plugin logging, + audit log, error log for replica and access log for internal + :steps: + 1. Create ou=managed_users and ou=managed_groups under test container + 2. Configure managed entries plugin and add a template to test container + 3. Add a user to m1 and wait for replication to happen + 4. Pause replication + 5. Create a user on m1 and m2 with a same group ID on both master + 6. Create a user on m1 and m2 with a different group ID on both master + 7. Resume replication + 8. Check that the entries on both masters are the same and replication is working + :expectedresults: + 1. It should pass + 2. It should pass + 3. It should pass + 4. It should pass + 5. It should pass + 6. It should pass + 7. It should pass + 8. It should pass + """ + + pytest.xfail("Issue 49591 - work in progress") + + M1 = topology_m2.ms["master1"] + M2 = topology_m2.ms["master2"] + repl = ReplicationManager(SUFFIX) + + ous = OrganizationalUnits(M1, DEFAULT_SUFFIX) + ou_people = ous.create(properties={'ou': 'managed_people'}) + ou_groups = ous.create(properties={'ou': 'managed_groups'}) + + test_users_m1 = UserAccounts(M1, DEFAULT_SUFFIX, rdn='ou={}'.format(ou_people.rdn)) + test_users_m2 = UserAccounts(M2, DEFAULT_SUFFIX, rdn='ou={}'.format(ou_people.rdn)) + + # TODO: Refactor ManagedPlugin class functionality (also add configs and templates) + conts = nsContainers(M1, SUFFIX) + template = conts.create(properties={ + 'objectclass': 'top mepTemplateEntry extensibleObject'.split(), + 'cn': 'MEP Template', + 'mepRDNAttr': 'cn', + 'mepStaticAttr': ['objectclass: posixGroup', 'objectclass: extensibleObject'], + 'mepMappedAttr': ['cn: $uid', 'uid: $cn', 'gidNumber: $uidNumber'] + }) + repl.test_replication(M1, M2) + + for inst in topology_m2.ms.values(): + conts = nsContainers(inst, "cn={},{}".format(PLUGIN_MANAGED_ENTRY, DN_PLUGIN)) + conts.create(properties={'objectclass': 'top extensibleObject'.split(), + 'cn': 'config', + 'originScope': ou_people.dn, + 'originFilter': 'objectclass=posixAccount', + 'managedBase': ou_groups.dn, + 'managedTemplate': template.dn}) + inst.restart() + + _create_user(test_users_m1, 1, 1) + + topology_m2.pause_all_replicas() + + _create_user(test_users_m1, 2, 2, sleep=True) + _create_user(test_users_m2, 2, 2, sleep=True) + + _create_user(test_users_m1, 3, 3, sleep=True) + _create_user(test_users_m2, 3, 33) + + topology_m2.resume_all_replicas() + + repl.test_replication_topology(topology_m2) + + user_dns_m1 = [user.dn for user in test_users_m1.list()] + user_dns_m2 = [user.dn for user in test_users_m2.list()] + assert set(user_dns_m1) == set(user_dns_m2) + + def test_nested_entries_with_children(self, topology_m2, base_m2): + """Check that conflict properly resolved for operations + with nested entries with children + + :id: 77f09b18-03d1-45da-940b-1ad2c2908eb5 + :setup: Two master replication, test container for entries, enable plugin logging, + audit log, error log for replica and access log for internal + :steps: + 1. Add 15 containers to m1 and wait for replication to happen + 2. Pause replication + 3. Create parent-child on master2 and master1 + 4. Create parent-child on master1 and master2 + 5. Create parent-child on master1 and master2 different child rdn + 6. Create parent-child on master1 and delete parent on master2 + 7. Create parent on master1, delete it and parent-child on master2, delete them + 8. Create parent on master1, delete it and parent-two children on master2 + 9. Create parent-two children on master1 and parent-child on master2, delete them + 10. Create three subsets inside existing container entry, applying only part of changes on m2 + 11. Create more combinations of the subset with parent-child on m1 and parent on m2 + 12. Delete container on m1, modify user1 on m1, create parent on m2 and modify user2 on m2 + 13. Resume replication + 14. Check that the entries on both masters are the same and replication is working + :expectedresults: + 1. It should pass + 2. It should pass + 3. It should pass + 4. It should pass + 5. It should pass + 6. It should pass + 7. It should pass + 8. It should pass + 9. It should pass + 10. It should pass + 11. It should pass + 12. It should pass + 13. It should pass + 14. It should pass + """ + + pytest.xfail("Issue 49591 - work in progress") + + M1 = topology_m2.ms["master1"] + M2 = topology_m2.ms["master2"] + repl = ReplicationManager(SUFFIX) + test_users_m1 = UserAccounts(M1, base_m2.dn, rdn=None) + test_users_m2 = UserAccounts(M2, base_m2.dn, rdn=None) + _create_user(test_users_m1, 4000) + _create_user(test_users_m1, 4001) + + cont_list = [] + for num in range(15): + cont = _create_container(M1, base_m2.dn, 'sub{}'.format(num)) + cont_list.append(cont) + + repl.test_replication(M1, M2) + + topology_m2.pause_all_replicas() + + log.info("Create parent-child on master2 and master1") + _create_container(M2, base_m2.dn, 'p0', sleep=True) + cont_p = _create_container(M1, base_m2.dn, 'p0', sleep=True) + _create_container(M1, cont_p.dn, 'c0', sleep=True) + _create_container(M2, cont_p.dn, 'c0', sleep=True) + + log.info("Create parent-child on master1 and master2") + cont_p = _create_container(M1, base_m2.dn, 'p1', sleep=True) + _create_container(M2, base_m2.dn, 'p1', sleep=True) + _create_container(M1, cont_p.dn, 'c1', sleep=True) + _create_container(M2, cont_p.dn, 'c1', sleep=True) + + log.info("Create parent-child on master1 and master2 different child rdn") + cont_p = _create_container(M1, base_m2.dn, 'p2', sleep=True) + _create_container(M2, base_m2.dn, 'p2', sleep=True) + _create_container(M1, cont_p.dn, 'c2', sleep=True) + _create_container(M2, cont_p.dn, 'c3', sleep=True) + + log.info("Create parent-child on master1 and delete parent on master2") + cont_num = 0 + cont_p_m1 = _create_container(M1, cont_list[cont_num].dn, 'p0', sleep=True) + cont_p_m2 = _create_container(M2, cont_list[cont_num].dn, 'p0', sleep=True) + _create_container(M1, cont_p_m1.dn, 'c0', sleep=True) + _delete_container(cont_p_m2) + + cont_num += 1 + cont_p_m2 = _create_container(M2, cont_list[cont_num].dn, 'p0', sleep=True) + cont_p_m1 = _create_container(M1, cont_list[cont_num].dn, 'p0') + _create_container(M1, cont_p_m1.dn, 'c0', sleep=True) + _delete_container(cont_p_m2, sleep=True) + + log.info("Create parent on master1, delete it and parent-child on master2, delete them") + cont_num += 1 + cont_p_m1 = _create_container(M1, cont_list[cont_num].dn, 'p0') + _delete_container(cont_p_m1, sleep=True) + + cont_p_m2 = _create_container(M2, cont_list[cont_num].dn, 'p0') + cont_c_m2 = _create_container(M2, cont_p_m2.dn, 'c0') + _delete_container(cont_c_m2) + _delete_container(cont_p_m2) + + cont_num += 1 + cont_p_m2 = _create_container(M2, cont_list[cont_num].dn, 'p0') + cont_c_m2 = _create_container(M2, cont_p_m2.dn, 'c0') + _delete_container(cont_c_m2) + _delete_container(cont_p_m2, sleep=True) + + cont_p_m1 = _create_container(M1, cont_list[cont_num].dn, 'p0') + _delete_container(cont_p_m1) + + log.info("Create parent on master1, delete it and parent-two children on master2") + cont_num += 1 + cont_p_m1 = _create_container(M1, cont_list[cont_num].dn, 'p0') + _delete_container(cont_p_m1, sleep=True) + + cont_p_m2 = _create_container(M2, cont_list[cont_num].dn, 'p0') + _create_container(M2, cont_p_m2.dn, 'c0') + _create_container(M2, cont_p_m2.dn, 'c1') + + cont_num += 1 + cont_p_m2 = _create_container(M2, cont_list[cont_num].dn, 'p0') + _create_container(M2, cont_p_m2.dn, 'c0') + _create_container(M2, cont_p_m2.dn, 'c1', sleep=True) + + cont_p_m1 = _create_container(M1, cont_list[cont_num].dn, 'p0') + _delete_container(cont_p_m1, sleep=True) + + log.info("Create parent-two children on master1 and parent-child on master2, delete them") + cont_num += 1 + cont_p_m2 = _create_container(M2, cont_list[cont_num].dn, 'p0') + cont_c_m2 = _create_container(M2, cont_p_m2.dn, 'c0') + _delete_container(cont_c_m2) + _delete_container(cont_p_m2, sleep=True) + + cont_p_m1 = _create_container(M1, cont_list[cont_num].dn, 'p0') + _create_container(M1, cont_p_m1.dn, 'c0') + _create_container(M1, cont_p_m1.dn, 'c1') + + cont_num += 1 + cont_p_m1 = _create_container(M1, cont_list[cont_num].dn, 'p0') + _create_container(M1, cont_p_m1.dn, 'c0') + _create_container(M1, cont_p_m1.dn, 'c1', sleep=True) + + cont_p_m2 = _create_container(M2, cont_list[cont_num].dn, 'p0') + cont_c_m2 = _create_container(M2, cont_p_m2.dn, 'c0') + _delete_container(cont_c_m2) + _delete_container(cont_p_m2, sleep=True) + + log.info("Create three subsets inside existing container entry, applying only part of changes on m2") + cont_num += 1 + cont_p_m1 = _create_container(M1, cont_list[cont_num].dn, 'p0') + _create_container(M1, cont_p_m1.dn, 'c0') + _create_container(M1, cont_p_m1.dn, 'c1', sleep=True) + _create_container(M2, cont_list[cont_num].dn, 'p0', sleep=True) + + cont_num += 1 + cont_p_m1 = _create_container(M1, cont_list[cont_num].dn, 'p0') + _create_container(M1, cont_p_m1.dn, 'c0') + _create_container(M1, cont_p_m1.dn, 'c1', sleep=True) + cont_p_m2 = _create_container(M2, cont_list[cont_num].dn, 'p0') + _create_container(M2, cont_p_m2.dn, 'c0', sleep=True) + + cont_num += 1 + cont_p_m1 = _create_container(M1, cont_list[cont_num].dn, 'p0') + _create_container(M1, cont_p_m1.dn, 'c0') + _create_container(M1, cont_p_m1.dn, 'c1', sleep=True) + cont_p_m2 = _create_container(M2, cont_list[cont_num].dn, 'p0') + cont_c_m2 = _create_container(M2, cont_p_m2.dn, 'c0') + _delete_container(cont_c_m2, sleep=True) + + log.info("Create more combinations of the subset with parent-child on m1 and parent on m2") + cont_num += 1 + cont_p_m1 = _create_container(M1, cont_list[cont_num].dn, 'p0', sleep=True) + cont_p_m2 = _create_container(M2, cont_list[cont_num].dn, 'p0', sleep=True) + _delete_container(cont_p_m1, sleep=True) + cont_c_m2 = _create_container(M2, cont_p_m2.dn, 'c0') + _delete_container(cont_c_m2) + _delete_container(cont_p_m2, sleep=True) + + cont_num += 1 + cont_p_m1 = _create_container(M1, cont_list[cont_num].dn, 'p0', sleep=True) + cont_p_m2 = _create_container(M2, cont_list[cont_num].dn, 'p0', sleep=True) + _delete_container(cont_p_m1, sleep=True) + _create_container(M2, cont_p_m2.dn, 'c0', sleep=True) + + cont_num += 1 + cont_p_m1 = _create_container(M1, cont_list[cont_num].dn, 'p0', sleep=True) + cont_p_m2 = _create_container(M2, cont_list[cont_num].dn, 'p0', sleep=True) + cont_c_m1 = _create_container(M1, cont_p_m1.dn, 'c0', sleep=True) + _create_container(M2, cont_p_m2.dn, 'c0', sleep=True) + _delete_container(cont_c_m1, sleep=True) + _create_container(M2, cont_p_m2.dn, 'c1', sleep=True) + _delete_container(cont_p_m1, sleep=True) + + log.info("Delete container on m1, modify user1 on m1, create parent on m2 and modify user2 on m2") + cont_num += 1 + _delete_container(cont_list[cont_num]) + _modify_user(test_users_m1, 4000, sleep=True) + _create_container(M2, cont_list[cont_num].dn, 'p0') + _modify_user(test_users_m2, 4001) + + topology_m2.resume_all_replicas() + + repl.test_replication_topology(topology_m2, timeout=60) + + conts_dns = {} + for num in range(1, 3): + inst = topology_m2.ms["master{}".format(num)] + conts_dns[inst.serverid] = [] + conts = nsContainers(inst, base_m2.dn) + for cont in conts.list(): + conts_p = nsContainers(inst, cont.dn) + for cont_p in conts_p.list(): + conts_c = nsContainers(inst, cont_p.dn) + conts_dns[inst.serverid].extend([cont_c.dn for cont_c in conts_c.list()]) + conts_dns[inst.serverid].extend([cont_p.dn for cont_p in conts_p.list()]) + conts_dns[inst.serverid].extend([cont.dn for cont in conts.list()]) + + assert set(conts_dns[M1.serverid]) == set(conts_dns[M2.serverid]) + + user_dns_m1 = [user.dn for user in test_users_m1.list()] + user_dns_m2 = [user.dn for user in test_users_m2.list()] + assert set(user_dns_m1) == set(user_dns_m2) + + +class TestThreeMasters: + def test_nested_entries(self, topology_m3, base_m3): + """Check that conflict properly resolved for operations + with nested entries with children + + :id: 77f09b18-03d1-45da-940b-1ad2c2908eb6 + :setup: Three master replication, test container for entries, enable plugin logging, + audit log, error log for replica and access log for internal + :steps: + 1. Add 15 containers to m1 and wait for replication to happen + 2. Pause replication + 3. Create two child entries under each of two entries + 4. Create three child entries under each of three entries + 5. Create two parents on m1 and m2, then on m1 - create a child and delete one parent, + on m2 - delete one parent and create a child + 6. Test a few more parent-child combinations with three instances + 7. Resume replication + 8. Check that the entries on both masters are the same and replication is working + :expectedresults: + 1. It should pass + 2. It should pass + 3. It should pass + 4. It should pass + 5. It should pass + 6. It should pass + 7. It should pass + 8. It should pass + """ + + pytest.xfail("Issue 49591 - work in progress") + + M1 = topology_m3.ms["master1"] + M2 = topology_m3.ms["master2"] + M3 = topology_m3.ms["master3"] + repl = ReplicationManager(SUFFIX) + + cont_list = [] + for num in range(11): + cont = _create_container(M1, base_m3.dn, 'sub{}'.format(num)) + cont_list.append(cont) + + repl.test_replication(M1, M2) + repl.test_replication(M1, M3) + + topology_m3.pause_all_replicas() + + log.info("Create two child entries under each of two entries") + cont_num = -1 + for num in range(2): + cont_num += 1 + _create_container(M1, cont_list[cont_num].dn, 'p0', sleep=True) + _create_container(M2, cont_list[cont_num].dn, 'p1', sleep=True) + + log.info("Create three child entries under each of three entries") + for num in range(3): + cont_num += 1 + _create_container(M1, cont_list[cont_num].dn, 'p0', sleep=True) + _create_container(M2, cont_list[cont_num].dn, 'p1', sleep=True) + _create_container(M3, cont_list[cont_num].dn, 'p2', sleep=True) + + log.info("Create two parents on m1 and m2, then on m1 - create a child and delete one parent," + "on m2 - delete one parent and create a child") + for inst1, inst2 in ((M1, M2), (M2, M1)): + cont_num += 1 + cont_p_m1_1 = _create_container(inst1, cont_list[cont_num].dn, 'p0') + cont_p_m1_2 = _create_container(inst1, cont_list[cont_num].dn, 'p1', sleep=True) + cont_p_m2_1 = _create_container(inst2, cont_list[cont_num].dn, 'p0') + cont_p_m2_2 = _create_container(inst2, cont_list[cont_num].dn, 'p1', sleep=True) + _create_container(inst1, cont_p_m1_1.dn, 'c0', sleep=True) + _delete_container(cont_p_m2_1, sleep=True) + _delete_container(cont_p_m1_2, sleep=True) + _create_container(inst2, cont_p_m2_2.dn, 'c0', sleep=True) + + log.info("Test a few more parent-child combinations on three instances") + for inst1, inst2, inst3 in ((M1, M2, M3), (M2, M1, M3), (M3, M1, M2)): + cont_num += 1 + cont_p_m1 = _create_container(inst1, cont_list[cont_num].dn, 'p0') + _delete_container(cont_p_m1, sleep=True) + + cont_p_m2 = _create_container(inst2, cont_list[cont_num].dn, 'p0') + cont_c_m2 = _create_container(inst2, cont_p_m2.dn, 'c0') + _delete_container(cont_c_m2) + _delete_container(cont_p_m2, sleep=True) + + cont_p_m3 = _create_container(inst3, cont_list[cont_num].dn, 'p0') + _create_container(inst3, cont_p_m3.dn, 'c0') + _create_container(inst3, cont_p_m3.dn, 'c1', sleep=True) + + topology_m3.resume_all_replicas() + + repl.test_replication_topology(topology_m3) + + conts_dns = {} + for num in range(1, 4): + inst = topology_m3.ms["master{}".format(num)] + conts_dns[inst.serverid] = [] + conts = nsContainers(inst, base_m3.dn) + for cont in conts.list(): + conts_p = nsContainers(inst, cont.dn) + for cont_p in conts_p.list(): + conts_c = nsContainers(inst, cont_p.dn) + conts_dns[inst.serverid].extend([cont_c.dn for cont_c in conts_c.list()]) + conts_dns[inst.serverid].extend([cont_p.dn for cont_p in conts_p.list()]) + conts_dns[inst.serverid].extend([cont.dn for cont in conts.list()]) + + for conts1, conts2 in permutations(conts_dns.values(), 2): + assert set(conts1) == set(conts2) + + +if __name__ == '__main__': + # Run isolated + # -s for DEBUG mode + CURRENT_FILE = os.path.realpath(__file__) + pytest.main("-s %s" % CURRENT_FILE) diff --git a/dirsrvtests/tests/suites/replication/conftest.py b/dirsrvtests/tests/suites/replication/conftest.py new file mode 100644 index 0000000..4749211 --- /dev/null +++ b/dirsrvtests/tests/suites/replication/conftest.py @@ -0,0 +1,53 @@ +# --- BEGIN COPYRIGHT BLOCK --- +# Copyright (C) 2018 Red Hat, Inc. +# All rights reserved. +# +# License: GPL (version 3 or any later version). +# See LICENSE for details. +# --- END COPYRIGHT BLOCK --- +# +import os +import logging +import pytest +from lib389.topologies import create_topology +from lib389._constants import ReplicaRole + +DEBUGGING = os.getenv('DEBUGGING', default=False) +if DEBUGGING: + logging.getLogger(__name__).setLevel(logging.DEBUG) +else: + logging.getLogger(__name__).setLevel(logging.INFO) +log = logging.getLogger(__name__) + + +# Redefine some fixtures so we can use them with class scope +@pytest.fixture(scope="class") +def topology_m2(request): + """Create Replication Deployment with two masters""" + + topology = create_topology({ReplicaRole.MASTER: 2}) + + def fin(): + if DEBUGGING: + [inst.stop() for inst in topology] + else: + [inst.delete() for inst in topology] + request.addfinalizer(fin) + + return topology + + +@pytest.fixture(scope="class") +def topology_m3(request): + """Create Replication Deployment with three masters""" + + topology = create_topology({ReplicaRole.MASTER: 3}) + + def fin(): + if DEBUGGING: + [inst.stop() for inst in topology] + else: + [inst.delete() for inst in topology] + request.addfinalizer(fin) + + return topology diff --git a/dirsrvtests/tests/suites/replication/encryption_cl5_test.py b/dirsrvtests/tests/suites/replication/encryption_cl5_test.py new file mode 100644 index 0000000..d6a5b9e --- /dev/null +++ b/dirsrvtests/tests/suites/replication/encryption_cl5_test.py @@ -0,0 +1,135 @@ +# --- BEGIN COPYRIGHT BLOCK --- +# Copyright (C) 2017 Red Hat, Inc. +# All rights reserved. +# +# License: GPL (version 3 or any later version). +# See LICENSE for details. +# --- END COPYRIGHT BLOCK --- +# +import logging +import pytest +from lib389.utils import ensure_bytes +from lib389.replica import ReplicationManager +from lib389.dseldif import DSEldif +from lib389.idm.user import UserAccounts, TEST_USER_PROPERTIES +from lib389.topologies import topology_m2 +from lib389._constants import * + +pytestmark = pytest.mark.tier1 + +ATTRIBUTE = 'unhashed#user#password' + +DEBUGGING = os.getenv("DEBUGGING", default=False) +if DEBUGGING: + logging.getLogger(__name__).setLevel(logging.DEBUG) +else: + logging.getLogger(__name__).setLevel(logging.INFO) +log = logging.getLogger(__name__) + + +@pytest.fixture(scope="module") +def topology_with_tls(topology_m2): + """Enable TLS on all masters""" + + [i.enable_tls() for i in topology_m2] + + repl = ReplicationManager(DEFAULT_SUFFIX) + repl.test_replication(topology_m2.ms['master1'], topology_m2.ms['master2']) + + return topology_m2 + + +def _enable_changelog_encryption(inst, encrypt_algorithm): + """Configure changelog encryption for master""" + + dse_ldif = DSEldif(inst) + log.info('Configuring changelog encryption:{} for: {}'.format(inst.serverid, encrypt_algorithm)) + inst.stop() + dse_ldif.replace(DN_CHANGELOG, 'nsslapd-encryptionalgorithm', encrypt_algorithm) + if dse_ldif.get(DN_CHANGELOG, 'nsSymmetricKey'): + dse_ldif.delete(DN_CHANGELOG, 'nsSymmetricKey') + inst.start() + + +def _check_unhashed_userpw_encrypted(inst, change_type, user_dn, user_pw, is_encrypted): + """Check if unhashed#user#password attribute value is encrypted or not""" + + changelog_dbdir = os.path.join(os.path.dirname(inst.dbdir), DEFAULT_CHANGELOG_DB) + for dbfile in os.listdir(changelog_dbdir): + if dbfile.endswith('.db'): + changelog_dbfile = os.path.join(changelog_dbdir, dbfile) + log.info('Changelog dbfile file exist: {}'.format(changelog_dbfile)) + log.info('Running dbscan -f to check {} attr'.format(ATTRIBUTE)) + dbscanOut = inst.dbscan(DEFAULT_CHANGELOG_DB, changelog_dbfile) + count = 0 + for entry in dbscanOut.split(b'dbid: '): + if ensure_bytes('operation: {}'.format(change_type)) in entry and\ + ensure_bytes(ATTRIBUTE) in entry and ensure_bytes(user_dn) in entry: + count += 1 + user_pw_attr = ensure_bytes('{}: {}'.format(ATTRIBUTE, user_pw)) + if is_encrypted: + assert user_pw_attr not in entry, 'Changelog entry contains clear text password' + else: + assert user_pw_attr in entry, 'Changelog entry does not contain clear text password' + assert count, 'Operation type and DN of the entry not matched in changelog' + + +@pytest.mark.parametrize("encryption", ["AES", "3DES"]) +def test_algorithm_unhashed(topology_with_tls, encryption): + """Check encryption algowithm AES and 3DES. + And check unhashed#user#password attribute for encryption. + + :id: b7a37bf8-4b2e-4dbd-9891-70117d67558c + :parametrized: yes + :setup: Replication with two masters and SSL configured. + :steps: 1. Enable changelog encrytion on master1 (try AES and 3DES). + 2. Add a user to master1/master2 + 3. Run dbscan -f on m1 to check unhashed#user#password + attribute is encrypted. + 4. Run dbscan -f on m2 to check unhashed#user#password + attribute is in cleartext. + 5. Modify password in master2/master1 + 6. Run dbscan -f on m1 to check unhashed#user#password + attribute is encrypted. + 7. Run dbscan -f on m2 to check unhashed#user#password + attribute is in cleartext. + :expectedresults: + 1. It should pass + 2. It should pass + 3. It should pass + 4. It should pass + 5. It should pass + 6. It should pass + 7. It should pass + """ + + m1 = topology_with_tls.ms['master1'] + m2 = topology_with_tls.ms['master2'] + m1.config.set('nsslapd-unhashed-pw-switch', 'on') + m2.config.set('nsslapd-unhashed-pw-switch', 'on') + test_passw = 'm2Test199' + + _enable_changelog_encryption(m1, encryption) + + for inst1, inst2 in ((m1, m2), (m2, m1)): + user_props = TEST_USER_PROPERTIES.copy() + user_props["userPassword"] = PASSWORD + users = UserAccounts(inst1, DEFAULT_SUFFIX) + tuser = users.create(properties=user_props) + + _check_unhashed_userpw_encrypted(m1, 'add', tuser.dn, PASSWORD, True) + _check_unhashed_userpw_encrypted(m2, 'add', tuser.dn, PASSWORD, False) + + users = UserAccounts(inst2, DEFAULT_SUFFIX) + tuser = users.get(tuser.rdn) + tuser.set('userPassword', test_passw) + _check_unhashed_userpw_encrypted(m1, 'modify', tuser.dn, test_passw, True) + _check_unhashed_userpw_encrypted(m2, 'modify', tuser.dn, test_passw, False) + tuser.delete() + + +if __name__ == '__main__': + # Run isolated + # -s for DEBUG mode + CURRENT_FILE = os.path.realpath(__file__) + pytest.main("-s {}".format(CURRENT_FILE)) diff --git a/dirsrvtests/tests/suites/replication/regression_test.py b/dirsrvtests/tests/suites/replication/regression_test.py new file mode 100644 index 0000000..844d762 --- /dev/null +++ b/dirsrvtests/tests/suites/replication/regression_test.py @@ -0,0 +1,904 @@ +# --- BEGIN COPYRIGHT BLOCK --- +# Copyright (C) 2017 Red Hat, Inc. +# All rights reserved. +# +# License: GPL (version 3 or any later version). +# See LICENSE for details. +# --- END COPYRIGHT BLOCK --- +# +import ldif +import pytest +import subprocess +from lib389.idm.user import TEST_USER_PROPERTIES, UserAccounts +from lib389.pwpolicy import PwPolicyManager +from lib389.utils import * +from lib389.topologies import topology_m2 as topo_m2, TopologyMain, topology_m3 as topo_m3, create_topology, _remove_ssca_db, topology_i2 as topo_i2 +from lib389._constants import * +from lib389.idm.organizationalunit import OrganizationalUnits +from lib389.idm.user import UserAccount +from lib389.idm.group import Groups, Group +from lib389.idm.domain import Domain +from lib389.idm.directorymanager import DirectoryManager +from lib389.replica import Replicas, ReplicationManager, Changelog5, BootstrapReplicationManager +from lib389.agreement import Agreements +from lib389 import pid_from_file + + +pytestmark = pytest.mark.tier1 + +NEW_SUFFIX_NAME = 'test_repl' +NEW_SUFFIX = 'o={}'.format(NEW_SUFFIX_NAME) +NEW_BACKEND = 'repl_base' +MAXAGE_ATTR = 'nsslapd-changelogmaxage' +MAXAGE_STR = '30' +TRIMINTERVAL_STR = '5' +TRIMINTERVAL = 'nsslapd-changelogtrim-interval' + +DEBUGGING = os.getenv("DEBUGGING", default=False) +if DEBUGGING: + logging.getLogger(__name__).setLevel(logging.DEBUG) +else: + logging.getLogger(__name__).setLevel(logging.INFO) +log = logging.getLogger(__name__) + + +def find_start_location(file, no): + log_pattern = re.compile("slapd_daemon - slapd started.") + count = 0 + while True: + line = file.readline() + log.debug("_pattern_errorlog: [%d] %s" % (file.tell(), line)) + found = log_pattern.search(line) + if (found): + count = count + 1 + if (count == no): + return file.tell() + if (line == ''): + break + return -1 + + +def pattern_errorlog(file, log_pattern, start_location=0): + + count = 0 + log.debug("_pattern_errorlog: start from the beginning") + file.seek(start_location) + + # Use a while true iteration because 'for line in file: hit a + # python bug that break file.tell() + while True: + line = file.readline() + log.debug("_pattern_errorlog: [%d] %s" % (file.tell(), line)) + found = log_pattern.search(line) + if (found): + count = count + 1 + if (line == ''): + break + + log.debug("_pattern_errorlog: complete (count=%d)" % count) + return count + + +def _move_ruv(ldif_file): + """ Move RUV entry in an ldif file to the top""" + + with open(ldif_file) as f: + parser = ldif.LDIFRecordList(f) + parser.parse() + + ldif_list = parser.all_records + for dn in ldif_list: + if dn[0].startswith('nsuniqueid=ffffffff-ffffffff-ffffffff-ffffffff'): + ruv_index = ldif_list.index(dn) + ldif_list.insert(0, ldif_list.pop(ruv_index)) + break + + with open(ldif_file, 'w') as f: + ldif_writer = ldif.LDIFWriter(f) + for dn, entry in ldif_list: + ldif_writer.unparse(dn, entry) + + +@pytest.fixture(scope="module") +def topo_with_sigkill(request): + """Create Replication Deployment with two masters""" + + topology = create_topology({ReplicaRole.MASTER: 2}) + + def _kill_ns_slapd(inst): + pid = str(pid_from_file(inst.ds_paths.pid_file)) + cmd = ['kill', '-9', pid] + subprocess.Popen(cmd, stdout=subprocess.PIPE) + + def fin(): + # Kill the hanging process at the end of test to prevent failures in the following tests + if DEBUGGING: + [_kill_ns_slapd(inst) for inst in topology] + else: + [_kill_ns_slapd(inst) for inst in topology] + assert _remove_ssca_db(topology) + [inst.delete() for inst in topology if inst.exists()] + request.addfinalizer(fin) + + return topology + + +@pytest.fixture() +def create_entry(topo_m2, request): + """Add test entry using UserAccounts""" + + log.info('Adding a test entry user') + users = UserAccounts(topo_m2.ms["master1"], DEFAULT_SUFFIX) + tuser = users.ensure_state(properties=TEST_USER_PROPERTIES) + return tuser + + +def add_ou_entry(server, idx, parent): + ous = OrganizationalUnits(server, parent) + name = 'OU%d' % idx + ous.create(properties={'ou': '%s' % name}) + + +def add_user_entry(server, idx, parent): + users = UserAccounts(server, DEFAULT_SUFFIX, rdn=parent) + user_properties = { + 'uid': 'tuser%d' % idx, + 'givenname': 'test', + 'cn': 'Test User%d' % idx, + 'sn': 'user%d' % idx, + 'userpassword': PW_DM, + 'uidNumber' : '1000%d' % idx, + 'gidNumber': '2000%d' % idx, + 'homeDirectory': '/home/{}'.format('tuser%d' % idx) + } + users.create(properties=user_properties) + + +def del_user_entry(server, idx, parent): + users = UserAccounts(server, DEFAULT_SUFFIX, rdn=parent) + test_user = users.get('tuser%d' % idx) + test_user.delete() + + +def rename_entry(server, idx, ou_name, new_parent): + users = UserAccounts(server, DEFAULT_SUFFIX, rdn=ou_name) + name = 'tuser%d' % idx + rdn = 'uid=%s' % name + test_user = users.get(name) + test_user.rename(new_rdn=rdn, newsuperior=new_parent) + + +def add_ldapsubentry(server, parent): + pwp = PwPolicyManager(server) + policy_props = {'passwordStorageScheme': 'ssha', + 'passwordCheckSyntax': 'on', + 'passwordInHistory': '6', + 'passwordChange': 'on', + 'passwordMinAge': '0', + 'passwordExp': 'off', + 'passwordMustChange': 'off',} + log.info('Create password policy for subtree {}'.format(parent)) + pwp.create_subtree_policy(parent, policy_props) + + +def test_special_symbol_replica_agreement(topo_i2): + """ Check if agreement starts with "cn=->..." then + after upgrade does it get removed. + + :id: 68aa0072-4dd4-4e33-b107-cb383a439125 + :setup: two standalone instance + :steps: + 1. Create and Enable Replication on standalone2 and role as consumer + 2. Create and Enable Replication on standalone1 and role as master + 3. Create a Replication agreement starts with "cn=->..." + 4. Perform an upgrade operation over the master + 5. Check if the agreement is still present or not. + :expectedresults: + 1. It should be successful + 2. It should be successful + 3. It should be successful + 4. It should be successful + 5. It should be successful + """ + + master = topo_i2.ins["standalone1"] + consumer = topo_i2.ins["standalone2"] + consumer.replica.enableReplication(suffix=DEFAULT_SUFFIX, role=ReplicaRole.CONSUMER, replicaId=CONSUMER_REPLICAID) + repl = ReplicationManager(DEFAULT_SUFFIX) + repl.create_first_master(master) + + properties = {RA_NAME: '-\\3meTo_{}:{}'.format(consumer.host, + str(consumer.port)), + RA_BINDDN: defaultProperties[REPLICATION_BIND_DN], + RA_BINDPW: defaultProperties[REPLICATION_BIND_PW], + RA_METHOD: defaultProperties[REPLICATION_BIND_METHOD], + RA_TRANSPORT_PROT: defaultProperties[REPLICATION_TRANSPORT]} + + master.agreement.create(suffix=SUFFIX, + host=consumer.host, + port=consumer.port, + properties=properties) + + master.agreement.init(SUFFIX, consumer.host, consumer.port) + + replica_server = Replicas(master).get(DEFAULT_SUFFIX) + + master.upgrade('online') + + agmt = replica_server.get_agreements().list()[0] + + assert agmt.get_attr_val_utf8('cn') == '-\\3meTo_{}:{}'.format(consumer.host, + str(consumer.port)) + + + +def test_double_delete(topo_m2, create_entry): + """Check that double delete of the entry doesn't crash server + + :id: 3496c82d-636a-48c9-973c-2455b12164cc + :setup: Two masters replication setup, a test entry + :steps: + 1. Delete the entry on the first master + 2. Delete the entry on the second master + 3. Check that server is alive + :expectedresults: + 1. Entry should be successfully deleted from first master + 2. Entry should be successfully deleted from second aster + 3. Server should me alive + """ + + m1 = topo_m2.ms["master1"] + m2 = topo_m2.ms["master2"] + + repl = ReplicationManager(DEFAULT_SUFFIX) + repl.disable_to_master(m1, [m2]) + repl.disable_to_master(m2, [m1]) + + log.info('Deleting entry {} from master1'.format(create_entry.dn)) + topo_m2.ms["master1"].delete_s(create_entry.dn) + + log.info('Deleting entry {} from master2'.format(create_entry.dn)) + topo_m2.ms["master2"].delete_s(create_entry.dn) + + repl.enable_to_master(m2, [m1]) + repl.enable_to_master(m1, [m2]) + + repl.test_replication(m1, m2) + repl.test_replication(m2, m1) + + +@pytest.mark.bz1506831 +def test_repl_modrdn(topo_m2): + """Test that replicated MODRDN does not break replication + + :id: a3e17698-9eb4-41e0-b537-8724b9915fa6 + :setup: Two masters replication setup + :steps: + 1. Add 3 test OrganizationalUnits A, B and C + 2. Add 1 test user under OU=A + 3. Add same test user under OU=B + 4. Stop Replication + 5. Apply modrdn to M1 - move test user from OU A -> C + 6. Apply modrdn on M2 - move test user from OU B -> C + 7. Start Replication + 8. Check that there should be only one test entry under ou=C on both masters + 9. Check that the replication is working fine both ways M1 <-> M2 + :expectedresults: + 1. This should pass + 2. This should pass + 3. This should pass + 4. This should pass + 5. This should pass + 6. This should pass + 7. This should pass + 8. This should pass + 9. This should pass + """ + + master1 = topo_m2.ms["master1"] + master2 = topo_m2.ms["master2"] + + repl = ReplicationManager(DEFAULT_SUFFIX) + + log.info("Add test entries - Add 3 OUs and 2 same users under 2 different OUs") + OUs = OrganizationalUnits(master1, DEFAULT_SUFFIX) + OU_A = OUs.create(properties={ + 'ou': 'A', + 'description': 'A', + }) + OU_B = OUs.create(properties={ + 'ou': 'B', + 'description': 'B', + }) + OU_C = OUs.create(properties={ + 'ou': 'C', + 'description': 'C', + }) + + users = UserAccounts(master1, DEFAULT_SUFFIX, rdn='ou={}'.format(OU_A.rdn)) + tuser_A = users.create(properties=TEST_USER_PROPERTIES) + + users = UserAccounts(master1, DEFAULT_SUFFIX, rdn='ou={}'.format(OU_B.rdn)) + tuser_B = users.create(properties=TEST_USER_PROPERTIES) + + repl.test_replication(master1, master2) + repl.test_replication(master2, master1) + + log.info("Stop Replication") + topo_m2.pause_all_replicas() + + log.info("Apply modrdn to M1 - move test user from OU A -> C") + master1.rename_s(tuser_A.dn, 'uid=testuser1', newsuperior=OU_C.dn, delold=1) + + log.info("Apply modrdn on M2 - move test user from OU B -> C") + master2.rename_s(tuser_B.dn, 'uid=testuser1', newsuperior=OU_C.dn, delold=1) + + log.info("Start Replication") + topo_m2.resume_all_replicas() + + log.info("Wait for sometime for repl to resume") + repl.test_replication(master1, master2) + repl.test_replication(master2, master1) + + log.info("Check that there should be only one test entry under ou=C on both masters") + users = UserAccounts(master1, DEFAULT_SUFFIX, rdn='ou={}'.format(OU_C.rdn)) + assert len(users.list()) == 1 + + users = UserAccounts(master2, DEFAULT_SUFFIX, rdn='ou={}'.format(OU_C.rdn)) + assert len(users.list()) == 1 + + log.info("Check that the replication is working fine both ways, M1 <-> M2") + repl.test_replication(master1, master2) + repl.test_replication(master2, master1) + + +def test_password_repl_error(topo_m2, create_entry): + """Check that error about userpassword replication is properly logged + + :id: 714130ff-e4f0-4633-9def-c1f4b24abfef + :setup: Four masters replication setup, a test entry + :steps: + 1. Change userpassword on the first master + 2. Restart the servers to flush the logs + 3. Check the error log for an replication error + :expectedresults: + 1. Password should be successfully changed + 2. Server should be successfully restarted + 3. There should be no replication errors in the error log + """ + + m1 = topo_m2.ms["master1"] + m2 = topo_m2.ms["master2"] + TEST_ENTRY_NEW_PASS = 'new_pass' + + log.info('Clean the error log') + m2.deleteErrorLogs() + + log.info('Set replication loglevel') + m2.config.loglevel((ErrorLog.REPLICA,)) + + log.info('Modifying entry {} - change userpassword on master 1'.format(create_entry.dn)) + + create_entry.set('userpassword', TEST_ENTRY_NEW_PASS) + + repl = ReplicationManager(DEFAULT_SUFFIX) + repl.wait_for_replication(m1, m2) + + log.info('Restart the servers to flush the logs') + for num in range(1, 3): + topo_m2.ms["master{}".format(num)].restart() + + try: + log.info('Check that password works on master 2') + create_entry_m2 = UserAccount(m2, create_entry.dn) + create_entry_m2.bind(TEST_ENTRY_NEW_PASS) + + log.info('Check the error log for the error with {}'.format(create_entry.dn)) + assert not m2.ds_error_log.match('.*can.t add a change for {}.*'.format(create_entry.dn)) + finally: + log.info('Set the default loglevel') + m2.config.loglevel((ErrorLog.DEFAULT,)) + + +def test_invalid_agmt(topo_m2): + """Test adding that an invalid agreement is properly rejected and does not crash the server + + :id: 6c3b2a7e-edcd-4327-a003-6bd878ff722b + :setup: Four masters replication setup + :steps: + 1. Add invalid agreement (nsds5ReplicaEnabled set to invalid value) + 2. Verify the server is still running + :expectedresults: + 1. Invalid repl agreement should be rejected + 2. Server should be still running + """ + + m1 = topo_m2.ms["master1"] + m2 = topo_m2.ms["master2"] + + repl = ReplicationManager(DEFAULT_SUFFIX) + + replicas = Replicas(m1) + replica = replicas.get(DEFAULT_SUFFIX) + agmts = replica.get_agreements() + + # Add invalid agreement (nsds5ReplicaEnabled set to invalid value) + with pytest.raises(ldap.UNWILLING_TO_PERFORM): + agmts.create(properties={ + 'cn': 'whatever', + 'nsDS5ReplicaRoot': DEFAULT_SUFFIX, + 'nsDS5ReplicaBindDN': 'cn=replication manager,cn=config', + 'nsDS5ReplicaBindMethod': 'simple', + 'nsDS5ReplicaTransportInfo': 'LDAP', + 'nsds5replicaTimeout': '5', + 'description': "test agreement", + 'nsDS5ReplicaHost': m2.host, + 'nsDS5ReplicaPort': str(m2.port), + 'nsDS5ReplicaCredentials': 'whatever', + 'nsds5ReplicaEnabled': 'YEAH MATE, LETS REPLICATE' + }) + + # Verify the server is still running + repl = ReplicationManager(DEFAULT_SUFFIX) + repl.test_replication(m1, m2) + repl.test_replication(m2, m1) + + +def test_fetch_bindDnGroup(topo_m2): + """Check the bindDNGroup is fetched on first replication session + + :id: 5f1b1f59-6744-4260-b091-c82d22130025 + :setup: 2 Master Instances + :steps: + 1. Create a replication bound user and group, but the user *not* member of the group + 2. Check that replication is working + 3. Some preparation is required because of lib389 magic that already define a replication via group + - define the group as groupDN for replication and 60sec as fetch interval + - pause RA in both direction + - Define the user as bindDn of the RAs + 4. restart servers. + It sets the fetch time to 0, so next session will refetch the group + 5. Before resuming RA, add user to groupDN (on both side as replication is not working at that time) + 6. trigger an update and check replication is working and + there is no failure logged on supplier side 'does not have permission to supply replication updates to the replica' + :expectedresults: + 1. Success + 2. Success + 3. Success + 4. Success + 5. Success + 6. Success + """ + + # If you need any test suite initialization, + # please, write additional fixture for that (including finalizer). + # Topology for suites are predefined in lib389/topologies.py. + + # If you need host, port or any other data about instance, + # Please, use the instance object attributes for that (for example, topo.ms["master1"].serverid) + M1 = topo_m2.ms['master1'] + M2 = topo_m2.ms['master2'] + + # Enable replication log level. Not really necessary + M1.modify_s('cn=config', [(ldap.MOD_REPLACE, 'nsslapd-errorlog-level', b'8192')]) + M2.modify_s('cn=config', [(ldap.MOD_REPLACE, 'nsslapd-errorlog-level', b'8192')]) + + # Create a group and a user + PEOPLE = "ou=People,%s" % SUFFIX + PASSWD = 'password' + REPL_MGR_BOUND_DN = 'repl_mgr_bound_dn' + + uid = REPL_MGR_BOUND_DN.encode() + users = UserAccounts(M1, PEOPLE, rdn=None) + user_props = TEST_USER_PROPERTIES.copy() + user_props.update({'uid': uid, 'cn': uid, 'sn': '_%s' % uid, 'userpassword': PASSWD.encode(), 'description': b'value creation'}) + create_user = users.create(properties=user_props) + + groups_M1 = Groups(M1, DEFAULT_SUFFIX) + group_properties = { + 'cn': 'group1', + 'description': 'testgroup'} + group_M1 = groups_M1.create(properties=group_properties) + group_M2 = Group(M2, group_M1.dn) + assert(not group_M1.is_member(create_user.dn)) + + # Check that M1 and M2 are in sync + repl = ReplicationManager(DEFAULT_SUFFIX) + repl.wait_for_replication(M1, M2, timeout=20) + + # Define the group as the replication manager and fetch interval as 60sec + replicas = Replicas(M1) + replica = replicas.list()[0] + replica.apply_mods([(ldap.MOD_REPLACE, 'nsDS5ReplicaBindDnGroupCheckInterval', '60'), + (ldap.MOD_REPLACE, 'nsDS5ReplicaBindDnGroup', group_M1.dn)]) + + replicas = Replicas(M2) + replica = replicas.list()[0] + replica.apply_mods([(ldap.MOD_REPLACE, 'nsDS5ReplicaBindDnGroupCheckInterval', '60'), + (ldap.MOD_REPLACE, 'nsDS5ReplicaBindDnGroup', group_M1.dn)]) + + # Then pause the replication agreement to prevent them trying to acquire + # while the user is not member of the group + topo_m2.pause_all_replicas() + + # Define the user as the bindDN of the RAs + for inst in (M1, M2): + agmts = Agreements(inst) + agmt = agmts.list()[0] + agmt.replace('nsDS5ReplicaBindDN', create_user.dn.encode()) + agmt.replace('nsds5ReplicaCredentials', PASSWD.encode()) + + # Key step + # The restart will fetch the group/members define in the replica + # + # The user NOT member of the group replication will not work until bindDNcheckInterval + # + # With the fix, the first fetch is not taken into account (fetch time=0) + # so on the first session, the group will be fetched + M1.restart() + M2.restart() + + # Replication being broken here we need to directly do the same update. + # Sorry not found another solution except total update + group_M1.add_member(create_user.dn) + group_M2.add_member(create_user.dn) + + topo_m2.resume_all_replicas() + + # trigger updates to be sure to have a replication session, giving some time + M1.modify_s(create_user.dn, [(ldap.MOD_ADD, 'description', b'value_1_1')]) + M2.modify_s(create_user.dn, [(ldap.MOD_ADD, 'description', b'value_2_2')]) + time.sleep(10) + + # Check replication is working + ents = M1.search_s(create_user.dn, ldap.SCOPE_BASE, '(objectclass=*)') + for ent in ents: + assert (ent.hasAttr('description')) + found = 0 + for val in ent.getValues('description'): + if (val == b'value_1_1'): + found = found + 1 + elif (val == b'value_2_2'): + found = found + 1 + assert (found == 2) + + ents = M2.search_s(create_user.dn, ldap.SCOPE_BASE, '(objectclass=*)') + for ent in ents: + assert (ent.hasAttr('description')) + found = 0 + for val in ent.getValues('description'): + if (val == b'value_1_1'): + found = found + 1 + elif (val == b'value_2_2'): + found = found + 1 + assert (found == 2) + + # Check in the logs that the member was detected in the group although + # at startup it was not member of the group + regex = re.compile("does not have permission to supply replication updates to the replica.") + errorlog_M1 = open(M1.errlog, "r") + errorlog_M2 = open(M1.errlog, "r") + + # Find the last restart position + restart_location_M1 = find_start_location(errorlog_M1, 2) + assert (restart_location_M1 != -1) + restart_location_M2 = find_start_location(errorlog_M2, 2) + assert (restart_location_M2 != -1) + + # Then check there is no failure to authenticate + count = pattern_errorlog(errorlog_M1, regex, start_location=restart_location_M1) + assert(count <= 1) + count = pattern_errorlog(errorlog_M2, regex, start_location=restart_location_M2) + assert(count <= 1) + + +def test_plugin_bind_dn_tracking_and_replication(topo_m2): + """Testing nsslapd-plugin-binddn-tracking does not cause issues around + access control and reconfiguring replication/repl agmt. + + :id: dd689d03-69b8-4bf9-a06e-2acd19d5e2c9 + :setup: 2 master topology + :steps: + 1. Turn on plugin binddn tracking + 2. Add some users + 3. Make an update as a user + 4. Make an update to the replica config + 5. Make an update to the repliocation agreement + :expectedresults: + 1. Success + 2. Success + 3. Success + 4. Success + 5. Success + """ + + m1 = topo_m2.ms["master1"] + + # Turn on bind dn tracking + m1.config.set('nsslapd-plugin-binddn-tracking', 'on') + + # Add two users + users = UserAccounts(m1, DEFAULT_SUFFIX) + user1 = users.create_test_user(uid=1011) + user1.set('userpassword', PASSWORD) + user2 = users.create_test_user(uid=1012) + + # Add an aci + acival = '(targetattr ="cn")(version 3.0;acl "Test bind dn tracking"' + \ + ';allow (all) (userdn = "ldap:///{}");)'.format(user1.dn) + Domain(m1, DEFAULT_SUFFIX).add('aci', acival) + + # Bind as user and make an update + user1.rebind(PASSWORD) + user2.set('cn', 'new value') + dm = DirectoryManager(m1) + dm.rebind() + + # modify replica + replica = Replicas(m1).get(DEFAULT_SUFFIX) + replica.set(REPL_PROTOCOL_TIMEOUT, "30") + + # modify repl agmt + agmt = replica.get_agreements().list()[0] + agmt.set(REPL_PROTOCOL_TIMEOUT, "20") + + +def test_cleanallruv_repl(topo_m3): + """Test that cleanallruv could not break replication if anchor csn in ruv originated in deleted replica + :id: 46faba9a-897e-45b8-98dc-aec7fa8cec9a + :setup: 3 Masters + :steps: + 1. Configure error log level to 8192 in all masters + 2. Modify nsslapd-changelogmaxage=30 and nsslapd-changelogtrim-interval=5 for M1 and M2 + 3. Add test users to 3 masters + 4. Launch ClearRuv but withForce + 5. Check the users after CleanRUV, because of changelog trimming, it will effect the CLs + :expectedresults: + 1. Error logs should be configured successfully + 2. Modify should be successful + 3. Test users should be added successfully + 4. ClearRuv should be launched successfully + 5. Users should be present according to the changelog trimming effect + """ + + M1 = topo_m3.ms["master1"] + M2 = topo_m3.ms["master2"] + M3 = topo_m3.ms["master3"] + + log.info("Change the error log levels for all masters") + for s in (M1, M2, M3): + s.config.replace('nsslapd-errorlog-level', "8192") + + log.info("Get the replication agreements for all 3 masters") + m1_m2 = M1.agreement.list(suffix=SUFFIX, consumer_host=M2.host, consumer_port=M2.port) + m1_m3 = M1.agreement.list(suffix=SUFFIX, consumer_host=M3.host, consumer_port=M3.port) + m3_m1 = M3.agreement.list(suffix=SUFFIX, consumer_host=M1.host, consumer_port=M1.port) + + log.info("Get the changelog enteries for M1 and M2") + changelog_m1 = Changelog5(M1) + changelog_m2 = Changelog5(M2) + + log.info("Modify nsslapd-changelogmaxage=30 and nsslapd-changelogtrim-interval=5 for M1 and M2") + changelog_m1.set_max_age(MAXAGE_STR) + changelog_m1.set_trim_interval(TRIMINTERVAL_STR) + + log.info("Add test users to 3 masters") + users_m1 = UserAccounts(M1, DEFAULT_SUFFIX) + users_m2 = UserAccounts(M2, DEFAULT_SUFFIX) + users_m3 = UserAccounts(M3, DEFAULT_SUFFIX) + user_props = TEST_USER_PROPERTIES.copy() + + user_props.update({'uid': "testuser10"}) + user10 = users_m1.create(properties=user_props) + + user_props.update({'uid': "testuser20"}) + user20 = users_m2.create(properties=user_props) + + user_props.update({'uid': "testuser30"}) + user30 = users_m3.create(properties=user_props) + + # ::important:: the testuser31 is the oldest csn in M2, + # because it will be cleared by changelog trimming + user_props.update({'uid': "testuser31"}) + user31 = users_m3.create(properties=user_props) + + user_props.update({'uid': "testuser11"}) + user11 = users_m1.create(properties=user_props) + + user_props.update({'uid': "testuser21"}) + user21 = users_m2.create(properties=user_props) + # this is to trigger changelog trim and interval values + time.sleep(40) + + # Here M1, M2, M3 should have 11,21,31 and 10,20,30 are CL cleared + M2.stop() + M1.agreement.pause(m1_m2[0].dn) + user_props.update({'uid': "testuser32"}) + user32 = users_m3.create(properties=user_props) + + user_props.update({'uid': "testuser33"}) + user33 = users_m3.create(properties=user_props) + + user_props.update({'uid': "testuser12"}) + user12 = users_m1.create(properties=user_props) + + M3.agreement.pause(m3_m1[0].dn) + M3.agreement.resume(m3_m1[0].dn) + time.sleep(40) + + # Here because of changelog trimming testusers 31 and 32 are CL cleared + # ClearRuv is launched but with Force + M3.stop() + M1.tasks.cleanAllRUV(suffix=SUFFIX, replicaid='3', + force=True, args={TASK_WAIT: False}) + + # here M1 should clear 31 + M2.start() + M1.agreement.pause(m1_m2[0].dn) + M1.agreement.resume(m1_m2[0].dn) + time.sleep(10) + + # Check the users after CleanRUV + expected_m1_users = [user31.dn, user11.dn, user21.dn, user32.dn, user33.dn, user12.dn] + expected_m1_users = [x.lower() for x in expected_m1_users] + expected_m2_users = [user31.dn, user11.dn, user21.dn, user12.dn] + expected_m2_users = [x.lower() for x in expected_m2_users] + + current_m1_users = [user.dn for user in users_m1.list()] + current_m1_users = [x.lower() for x in current_m1_users] + current_m2_users = [user.dn for user in users_m2.list()] + current_m2_users = [x.lower() for x in current_m2_users] + + assert set(expected_m1_users).issubset(current_m1_users) + assert set(expected_m2_users).issubset(current_m2_users) + + +@pytest.mark.ds49915 +@pytest.mark.bz1626375 +def test_online_reinit_may_hang(topo_with_sigkill): + """Online reinitialization may hang when the first + entry of the DB is RUV entry instead of the suffix + + :id: cded6afa-66c0-4c65-9651-993ba3f7a49c + :setup: 2 Master Instances + :steps: + 1. Export the database + 2. Move RUV entry to the top in the ldif file + 3. Import the ldif file + 4. Online replica initializaton + :expectedresults: + 1. Ldif file should be created successfully + 2. RUV entry should be on top in the ldif file + 3. Import should be successful + 4. Server should not hang and consume 100% CPU + """ + M1 = topo_with_sigkill.ms["master1"] + M2 = topo_with_sigkill.ms["master2"] + M1.stop() + ldif_file = '%s/master1.ldif' % M1.get_ldif_dir() + M1.db2ldif(bename=DEFAULT_BENAME, suffixes=[DEFAULT_SUFFIX], + excludeSuffixes=None, repl_data=True, + outputfile=ldif_file, encrypt=False) + _move_ruv(ldif_file) + M1.ldif2db(DEFAULT_BENAME, None, None, None, ldif_file) + M1.start() + # After this server may hang + agmt = Agreements(M1).list()[0] + agmt.begin_reinit() + (done, error) = agmt.wait_reinit() + assert done is True + assert error is False + repl = ReplicationManager(DEFAULT_SUFFIX) + repl.test_replication_topology(topo_with_sigkill) + + if DEBUGGING: + # Add debugging steps(if any)... + pass + + +@pytest.mark.bz1314956 +@pytest.mark.ds48755 +def test_moving_entry_make_online_init_fail(topology_m2): + """ + Moving an entry could make the online init fail + + :id: e3895be7-884a-4e9f-80e3-24e9a5167c9e + :setup: Two masters replication setup + :steps: + 1. Generate DIT_0 + 2. Generate password policy for DIT_0 + 3. Create users for DIT_0 + 4. Turn idx % 2 == 0 users into tombstones + 5. Generate DIT_1 + 6. Move 'ou=OU0,ou=OU0,dc=example,dc=com' to DIT_1 + 7. Move 'ou=OU0,dc=example,dc=com' to DIT_1 + 8. Move idx % 2 == 1 users to 'ou=OU0,ou=OU0,ou=OU1,dc=example,dc=com' + 9. Init replicas + 10. Number of entries should match on both masters + + :expectedresults: + 1. Success + 2. Success + 3. Success + 4. Success + 5. Success + 6. Success + 7. Success + 8. Success + 9. Success + 10. Success + """ + + M1 = topology_m2.ms["master1"] + M2 = topology_m2.ms["master2"] + + log.info("Generating DIT_0") + idx = 0 + add_ou_entry(M1, idx, DEFAULT_SUFFIX) + log.info("Created entry: ou=OU0, dc=example, dc=com") + + ou0 = 'ou=OU%d' % idx + first_parent = '%s,%s' % (ou0, DEFAULT_SUFFIX) + add_ou_entry(M1, idx, first_parent) + log.info("Created entry: ou=OU0, ou=OU0, dc=example, dc=com") + + add_ldapsubentry(M1, first_parent) + + ou_name = 'ou=OU%d,ou=OU%d' % (idx, idx) + second_parent = 'ou=OU%d,%s' % (idx, first_parent) + for idx in range(0, 9): + add_user_entry(M1, idx, ou_name) + if idx % 2 == 0: + log.info("Turning tuser%d into a tombstone entry" % idx) + del_user_entry(M1, idx, ou_name) + + log.info('%s => %s => %s => 10 USERS' % (DEFAULT_SUFFIX, first_parent, second_parent)) + + log.info("Generating DIT_1") + idx = 1 + add_ou_entry(M1, idx, DEFAULT_SUFFIX) + log.info("Created entry: ou=OU1,dc=example,dc=com") + + third_parent = 'ou=OU%d,%s' % (idx, DEFAULT_SUFFIX) + add_ou_entry(M1, idx, third_parent) + log.info("Created entry: ou=OU1, ou=OU1, dc=example, dc=com") + + add_ldapsubentry(M1, third_parent) + + log.info("Moving %s to DIT_1" % second_parent) + OrganizationalUnits(M1, second_parent).get('OU0').rename(ou0, newsuperior=third_parent) + + log.info("Moving %s to DIT_1" % first_parent) + fourth_parent = '%s,%s' % (ou0, third_parent) + OrganizationalUnits(M1, first_parent).get('OU0').rename(ou0, newsuperior=fourth_parent) + + fifth_parent = '%s,%s' % (ou0, fourth_parent) + + ou_name = 'ou=OU0,ou=OU1' + log.info("Moving USERS to %s" % fifth_parent) + for idx in range(0, 9): + if idx % 2 == 1: + rename_entry(M1, idx, ou_name, fifth_parent) + + log.info('%s => %s => %s => %s => 10 USERS' % (DEFAULT_SUFFIX, third_parent, fourth_parent, fifth_parent)) + + log.info("Run Initialization.") + repl = ReplicationManager(DEFAULT_SUFFIX) + repl.wait_for_replication(M1, M2, timeout=5) + + m1entries = M1.search_s(DEFAULT_SUFFIX, ldap.SCOPE_SUBTREE, + '(|(objectclass=ldapsubentry)(objectclass=nstombstone)(nsuniqueid=*))') + m2entries = M2.search_s(DEFAULT_SUFFIX, ldap.SCOPE_SUBTREE, + '(|(objectclass=ldapsubentry)(objectclass=nstombstone)(nsuniqueid=*))') + + log.info("m1entry count - %d", len(m1entries)) + log.info("m2entry count - %d", len(m2entries)) + + assert len(m1entries) == len(m2entries) + + +if __name__ == '__main__': + # Run isolated + # -s for DEBUG mode + CURRENT_FILE = os.path.realpath(__file__) + pytest.main("-s %s" % CURRENT_FILE) diff --git a/dirsrvtests/tests/suites/replication/replica_config_test.py b/dirsrvtests/tests/suites/replication/replica_config_test.py new file mode 100644 index 0000000..c2140a2 --- /dev/null +++ b/dirsrvtests/tests/suites/replication/replica_config_test.py @@ -0,0 +1,285 @@ +import logging +import pytest +import copy +import os +import ldap +from lib389._constants import * +from lib389.topologies import topology_st as topo + +from lib389.replica import Replicas +from lib389.agreement import Agreements +from lib389.utils import ds_is_older + +pytestmark = pytest.mark.tier1 + +DEBUGGING = os.getenv("DEBUGGING", default=False) +if DEBUGGING: + logging.getLogger(__name__).setLevel(logging.DEBUG) +else: + logging.getLogger(__name__).setLevel(logging.INFO) +log = logging.getLogger(__name__) + +notnum = 'invalid' +too_big = '9223372036854775807' +overflow = '9999999999999999999999999999999999999999999999999999999999999999999' + +replica_dict = {'nsDS5ReplicaRoot': 'dc=example,dc=com', + 'nsDS5ReplicaType': '3', + 'nsDS5Flags': '1', + 'nsDS5ReplicaId': '65534', + 'nsds5ReplicaPurgeDelay': '604800', + 'nsDS5ReplicaBindDN': 'cn=u', + 'cn': 'replica'} + +agmt_dict = {'cn': 'test_agreement', + 'nsDS5ReplicaRoot': 'dc=example,dc=com', + 'nsDS5ReplicaHost': 'localhost.localdomain', + 'nsDS5ReplicaPort': '5555', + 'nsDS5ReplicaBindDN': 'uid=tester', + 'nsds5ReplicaCredentials': 'password', + 'nsDS5ReplicaTransportInfo': 'LDAP', + 'nsDS5ReplicaBindMethod': 'SIMPLE'} + + +repl_add_attrs = [('nsDS5ReplicaType', '-1', '4', overflow, notnum, '1'), + ('nsDS5Flags', '-1', '2', overflow, notnum, '1'), + ('nsDS5ReplicaId', '0', '65536', overflow, notnum, '1'), + ('nsds5ReplicaPurgeDelay', '-2', too_big, overflow, notnum, '1'), + ('nsDS5ReplicaBindDnGroupCheckInterval', '-2', too_big, overflow, notnum, '1'), + ('nsds5ReplicaTombstonePurgeInterval', '-2', too_big, overflow, notnum, '1'), + ('nsds5ReplicaProtocolTimeout', '-1', too_big, overflow, notnum, '1'), + ('nsds5ReplicaReleaseTimeout', '-1', too_big, overflow, notnum, '1'), + ('nsds5ReplicaBackoffMin', '0', too_big, overflow, notnum, '3'), + ('nsds5ReplicaBackoffMax', '0', too_big, overflow, notnum, '6')] + +repl_mod_attrs = [('nsDS5Flags', '-1', '2', overflow, notnum, '1'), + ('nsds5ReplicaPurgeDelay', '-2', too_big, overflow, notnum, '1'), + ('nsDS5ReplicaBindDnGroupCheckInterval', '-2', too_big, overflow, notnum, '1'), + ('nsds5ReplicaTombstonePurgeInterval', '-2', too_big, overflow, notnum, '1'), + ('nsds5ReplicaProtocolTimeout', '-1', too_big, overflow, notnum, '1'), + ('nsds5ReplicaReleaseTimeout', '-1', too_big, overflow, notnum, '1'), + ('nsds5ReplicaBackoffMin', '0', too_big, overflow, notnum, '3'), + ('nsds5ReplicaBackoffMax', '0', too_big, overflow, notnum, '6')] + +agmt_attrs = [ + ('nsds5ReplicaPort', '0', '65535', overflow, notnum, '389'), + ('nsds5ReplicaTimeout', '-1', too_big, overflow, notnum, '6'), + ('nsds5ReplicaBusyWaitTime', '-1', too_big, overflow, notnum, '6'), + ('nsds5ReplicaSessionPauseTime', '-1', too_big, overflow, notnum, '6'), + ('nsds5ReplicaFlowControlWindow', '-1', too_big, overflow, notnum, '6'), + ('nsds5ReplicaFlowControlPause', '-1', too_big, overflow, notnum, '6'), + ('nsds5ReplicaProtocolTimeout', '-1', too_big, overflow, notnum, '6') + ] + +def replica_reset(topo): + """Purge all existing replica details""" + replicas = Replicas(topo.standalone) + for r in replicas.list(): + r.delete() + +def replica_setup(topo): + """Add a valid replica config entry to modify + """ + replicas = Replicas(topo.standalone) + for r in replicas.list(): + r.delete() + return replicas.create(properties=replica_dict) + +def agmt_reset(topo): + """Purge all existing agreements for testing""" + agmts = Agreements(topo.standalone) + for a in agmts.list(): + a.delete() + +def agmt_setup(topo): + """Add a valid replica config entry to modify + """ + # Reset the agreements too. + replica = replica_setup(topo) + agmts = Agreements(topo.standalone, basedn=replica.dn) + for a in agmts.list(): + a.delete() + return agmts.create(properties=agmt_dict) + +def perform_invalid_create(many, properties, attr, value): + my_properties = copy.deepcopy(properties) + my_properties[attr] = value + with pytest.raises(ldap.LDAPError) as ei: + many.create(properties=my_properties) + return ei.value + +def perform_invalid_modify(o, attr, value): + with pytest.raises(ldap.LDAPError) as ei: + o.replace(attr, value) + return ei.value + +@pytest.mark.parametrize("attr, too_small, too_big, overflow, notnum, valid", repl_add_attrs) +def test_replica_num_add(topo, attr, too_small, too_big, overflow, notnum, valid): + """Test all the number values you can set for a replica config entry + + :id: a8b47d4a-a089-4d70-8070-e6181209bf92 + :parametrized: yes + :setup: standalone instance + :steps: + 1. Use a value that is too small + 2. Use a value that is too big + 3. Use a value that overflows the int + 4. Use a value with character value (not a number) + 5. Use a valid value + :expectedresults: + 1. Add is rejected + 2. Add is rejected + 3. Add is rejected + 4. Add is rejected + 5. Add is allowed + """ + replica_reset(topo) + + replicas = Replicas(topo.standalone) + + # Test too small + perform_invalid_create(replicas, replica_dict, attr, too_small) + # Test too big + perform_invalid_create(replicas, replica_dict, attr, too_big) + # Test overflow + perform_invalid_create(replicas, replica_dict, attr, overflow) + # test not a number + perform_invalid_create(replicas, replica_dict, attr, notnum) + # Test valid value + my_replica = copy.deepcopy(replica_dict) + my_replica[attr] = valid + replicas.create(properties=my_replica) + +@pytest.mark.parametrize("attr, too_small, too_big, overflow, notnum, valid", repl_mod_attrs) +def test_replica_num_modify(topo, attr, too_small, too_big, overflow, notnum, valid): + """Test all the number values you can set for a replica config entry + + :id: a8b47d4a-a089-4d70-8070-e6181209bf93 + :parametrized: yes + :setup: standalone instance + :steps: + 1. Replace a value that is too small + 2. Repalce a value that is too big + 3. Replace a value that overflows the int + 4. Replace a value with character value (not a number) + 5. Replace a vlue with a valid value + :expectedresults: + 1. Value is rejected + 2. Value is rejected + 3. Value is rejected + 4. Value is rejected + 5. Value is allowed + """ + replica = replica_setup(topo) + + # Value too small + perform_invalid_modify(replica, attr, too_small) + # Value too big + perform_invalid_modify(replica, attr, too_big) + # Value overflow + perform_invalid_modify(replica, attr, overflow) + # Value not a number + perform_invalid_modify(replica, attr, notnum) + # Value is valid + replica.replace(attr, valid) + + +@pytest.mark.xfail(reason="Agreement validation current does not work.") +@pytest.mark.parametrize("attr, too_small, too_big, overflow, notnum, valid", agmt_attrs) +def test_agmt_num_add(topo, attr, too_small, too_big, overflow, notnum, valid): + """Test all the number values you can set for a replica config entry + + :id: a8b47d4a-a089-4d70-8070-e6181209bf94 + :parametrized: yes + :setup: standalone instance + :steps: + 1. Use a value that is too small + 2. Use a value that is too big + 3. Use a value that overflows the int + 4. Use a value with character value (not a number) + 5. Use a valid value + :expectedresults: + 1. Add is rejected + 2. Add is rejected + 3. Add is rejected + 4. Add is rejected + 5. Add is allowed + """ + + agmt_reset(topo) + replica = replica_setup(topo) + + agmts = Agreements(topo.standalone, basedn=replica.dn) + + # Test too small + perform_invalid_create(agmts, agmt_dict, attr, too_small) + # Test too big + perform_invalid_create(agmts, agmt_dict, attr, too_big) + # Test overflow + perform_invalid_create(agmts, agmt_dict, attr, overflow) + # test not a number + perform_invalid_create(agmts, agmt_dict, attr, notnum) + # Test valid value + my_agmt = copy.deepcopy(agmt_dict) + my_agmt[attr] = valid + agmts.create(properties=my_agmt) + + +@pytest.mark.xfail(reason="Agreement validation current does not work.") +@pytest.mark.parametrize("attr, too_small, too_big, overflow, notnum, valid", agmt_attrs) +def test_agmt_num_modify(topo, attr, too_small, too_big, overflow, notnum, valid): + """Test all the number values you can set for a replica config entry + + :id: a8b47d4a-a089-4d70-8070-e6181209bf95 + :parametrized: yes + :setup: standalone instance + :steps: + 1. Replace a value that is too small + 2. Replace a value that is too big + 3. Replace a value that overflows the int + 4. Replace a value with character value (not a number) + 5. Replace a vlue with a valid value + :expectedresults: + 1. Value is rejected + 2. Value is rejected + 3. Value is rejected + 4. Value is rejected + 5. Value is allowed + """ + + agmt = agmt_setup(topo) + + # Value too small + perform_invalid_modify(agmt, attr, too_small) + # Value too big + perform_invalid_modify(agmt, attr, too_big) + # Value overflow + perform_invalid_modify(agmt, attr, overflow) + # Value not a number + perform_invalid_modify(agmt, attr, notnum) + # Value is valid + agmt.replace(attr, valid) + + +@pytest.mark.skipif(ds_is_older('1.4.1.4'), reason="Not implemented") +@pytest.mark.bz1546739 +def test_same_attr_yields_same_return_code(topo): + """Test that various operations with same incorrect attribute value yield same return code + """ + attr = 'nsDS5ReplicaId' + + replica_reset(topo) + replicas = Replicas(topo.standalone) + e = perform_invalid_create(replicas, replica_dict, attr, too_big) + assert type(e) is ldap.UNWILLING_TO_PERFORM + + replica = replica_setup(topo) + e = perform_invalid_modify(replica, attr, too_big) + assert type(e) is ldap.UNWILLING_TO_PERFORM + + +if __name__ == '__main__': + # Run isolated + # -s for DEBUG mode + CURRENT_FILE = os.path.realpath(__file__) + pytest.main(["-s", CURRENT_FILE]) diff --git a/dirsrvtests/tests/suites/replication/ruvstore_test.py b/dirsrvtests/tests/suites/replication/ruvstore_test.py new file mode 100644 index 0000000..599f2a5 --- /dev/null +++ b/dirsrvtests/tests/suites/replication/ruvstore_test.py @@ -0,0 +1,163 @@ +# --- BEGIN COPYRIGHT BLOCK --- +# Copyright (C) 2017 Red Hat, Inc. +# All rights reserved. +# +# License: GPL (version 3 or any later version). +# See LICENSE for details. +# --- END COPYRIGHT BLOCK --- +# +import os +import logging +import ldap +import pytest +from ldif import LDIFParser +from lib389.replica import Replicas +from lib389.idm.user import UserAccounts +from lib389.topologies import topology_m2 as topo +from lib389._constants import * + +pytestmark = pytest.mark.tier1 + +TEST_ENTRY_NAME = 'rep2lusr' +NEW_RDN_NAME = 'ruvusr' +ATTRIBUTES = ['objectClass', 'nsUniqueId', 'nsds50ruv', 'nsruvReplicaLastModified'] +USER_PROPERTIES = { + 'uid': TEST_ENTRY_NAME, + 'cn': TEST_ENTRY_NAME, + 'sn': TEST_ENTRY_NAME, + 'uidNumber': '1001', + 'gidNumber': '2001', + 'userpassword': PASSWORD, + 'description': 'userdesc', + 'homeDirectory': '/home/testuser' +} + +DEBUGGING = os.getenv('DEBUGGING', default=False) +if DEBUGGING: + logging.getLogger(__name__).setLevel(logging.DEBUG) +else: + logging.getLogger(__name__).setLevel(logging.INFO) +log = logging.getLogger(__name__) + + +class MyLDIF(LDIFParser): + def __init__(self, input): + LDIFParser.__init__(self, input) + + def handle(self, dn, entry): + if 'nsuniqueid=' + REPLICA_RUV_UUID in dn: + for attr in ATTRIBUTES: + assert entry.get(attr), 'Failed to find attribute: {}'.format(attr) + log.info('Attribute found in RUV: {}'.format(attr)) + + +def _perform_ldap_operations(topo): + """Add a test user, modify description, modrdn user and delete it""" + + users = UserAccounts(topo.ms['master1'], DEFAULT_SUFFIX) + log.info('Adding user to master1') + tuser = users.create(properties=USER_PROPERTIES) + tuser.replace('description', 'newdesc') + log.info('Modify RDN of user: {}'.format(tuser.dn)) + try: + topo.ms['master1'].modrdn_s(tuser.dn, 'uid={}'.format(NEW_RDN_NAME), 0) + except ldap.LDAPError as e: + log.fatal('Failed to modrdn entry: {}'.format(tuser.dn)) + raise e + tuser = users.get(NEW_RDN_NAME) + log.info('Deleting user: {}'.format(tuser.dn)) + tuser.delete() + + +def _compare_memoryruv_and_databaseruv(topo, operation_type): + """Compare the memoryruv and databaseruv for ldap operations""" + + log.info('Checking memory ruv for ldap: {} operation'.format(operation_type)) + replicas = Replicas(topo.ms['master1']) + replica = replicas.list()[0] + memory_ruv = replica.get_attr_val_utf8('nsds50ruv') + + log.info('Checking database ruv for ldap: {} operation'.format(operation_type)) + entry = replicas.get_ruv_entry(DEFAULT_SUFFIX) + database_ruv = entry.getValues('nsds50ruv')[0] + assert memory_ruv == database_ruv + + +def test_ruv_entry_backup(topo): + """Check if db2ldif stores the RUV details in the backup file + + :id: cbe2c473-8578-4caf-ac0a-841140e41e66 + :setup: Replication with two masters. + :steps: 1. Add user to server. + 2. Perform ldap modify, modrdn and delete operations. + 3. Stop the server and backup the database using db2ldif task. + 4. Start the server and check if correct RUV is stored in the backup file. + :expectedresults: + 1. Add user should PASS. + 2. Ldap operations should PASS. + 3. Database backup using db2ldif task should PASS. + 4. Backup file should contain the correct RUV details. + """ + + log.info('LDAP operations add, modify, modrdn and delete') + _perform_ldap_operations(topo) + + output_file = os.path.join(topo.ms['master1'].get_ldif_dir(), 'master1.ldif') + log.info('Stopping the server instance to run db2ldif task to create backup file') + topo.ms['master1'].stop() + topo.ms['master1'].db2ldif(bename=DEFAULT_BENAME, suffixes=[DEFAULT_SUFFIX], excludeSuffixes=[], + encrypt=False, repl_data=True, outputfile=output_file) + log.info('Starting the server after backup') + topo.ms['master1'].start() + + log.info('Checking if backup file contains RUV and required attributes') + with open(output_file, 'r') as ldif_file: + parser = MyLDIF(ldif_file) + parser.parse() + + +@pytest.mark.xfail(reason="No method to safety access DB ruv currently exists online.") +def test_memoryruv_sync_with_databaseruv(topo): + """Check if memory ruv and database ruv are synced + + :id: 5f38ac5f-6353-460d-bf60-49cafffda5b3 + :setup: Replication with two masters. + :steps: 1. Add user to server and compare memory ruv and database ruv. + 2. Modify description of user and compare memory ruv and database ruv. + 3. Modrdn of user and compare memory ruv and database ruv. + 4. Delete user and compare memory ruv and database ruv. + :expectedresults: + 1. For add user, the memory ruv and database ruv should be the same. + 2. For modify operation, the memory ruv and database ruv should be the same. + 3. For modrdn operation, the memory ruv and database ruv should be the same. + 4. For delete operation, the memory ruv and database ruv should be the same. + """ + + log.info('Adding user: {} to master1'.format(TEST_ENTRY_NAME)) + users = UserAccounts(topo.ms['master1'], DEFAULT_SUFFIX) + tuser = users.create(properties=USER_PROPERTIES) + _compare_memoryruv_and_databaseruv(topo, 'add') + + log.info('Modify user: {} description'.format(TEST_ENTRY_NAME)) + tuser.replace('description', 'newdesc') + _compare_memoryruv_and_databaseruv(topo, 'modify') + + log.info('Modify RDN of user: {}'.format(tuser.dn)) + try: + topo.ms['master1'].modrdn_s(tuser.dn, 'uid={}'.format(NEW_RDN_NAME), 0) + except ldap.LDAPError as e: + log.fatal('Failed to modrdn entry: {}'.format(tuser.dn)) + raise e + _compare_memoryruv_and_databaseruv(topo, 'modrdn') + + tuser = users.get(NEW_RDN_NAME) + log.info('Delete user: {}'.format(tuser.dn)) + tuser.delete() + _compare_memoryruv_and_databaseruv(topo, 'delete') + + +if __name__ == '__main__': + # Run isolated + # -s for DEBUG mode + CURRENT_FILE = os.path.realpath(__file__) + pytest.main('-s {}'.format(CURRENT_FILE)) diff --git a/dirsrvtests/tests/suites/replication/single_master_test.py b/dirsrvtests/tests/suites/replication/single_master_test.py new file mode 100644 index 0000000..78f849d --- /dev/null +++ b/dirsrvtests/tests/suites/replication/single_master_test.py @@ -0,0 +1,159 @@ +# --- BEGIN COPYRIGHT BLOCK --- +# Copyright (C) 2017 Red Hat, Inc. +# All rights reserved. +# +# License: GPL (version 3 or any later version). +# See LICENSE for details. +# --- END COPYRIGHT BLOCK --- +# +import pytest +from lib389.tasks import * +from lib389.utils import * + +from lib389.idm.user import UserAccounts, TEST_USER_PROPERTIES + +from lib389.replica import ReplicationManager, Replicas +from lib389.backend import Backends + +from lib389.topologies import topology_m1c1 as topo_r # Replication +from lib389.topologies import topology_i2 as topo_nr # No replication + +from lib389._constants import (ReplicaRole, DEFAULT_SUFFIX, REPLICAID_MASTER_1, + REPLICATION_BIND_DN, REPLICATION_BIND_PW, + REPLICATION_BIND_METHOD, REPLICATION_TRANSPORT, DEFAULT_BACKUPDIR, + RA_NAME, RA_BINDDN, RA_BINDPW, RA_METHOD, RA_TRANSPORT_PROT, + defaultProperties) +import json + +pytestmark = pytest.mark.tier1 + +DEBUGGING = os.getenv("DEBUGGING", default=False) +if DEBUGGING: + logging.getLogger(__name__).setLevel(logging.DEBUG) +else: + logging.getLogger(__name__).setLevel(logging.INFO) +log = logging.getLogger(__name__) + +def test_mail_attr_repl(topo_r): + """Check that no crash happens during mail attribute replication + + :id: 959edc84-05be-4bf9-a541-53afae482052 + :setup: Replication setup with master and consumer instances, + test user on master + :steps: + 1. Check that user was replicated to consumer + 2. Back up mail database file + 3. Remove mail attribute from the user entry + 4. Restore mail database + 5. Search for the entry with a substring 'mail=user*' + 6. Search for the entry once again to make sure that server is alive + :expectedresults: + 1. The user should be replicated to consumer + 2. Operation should be successful + 3. The mail attribute should be removed + 4. Operation should be successful + 5. Search should be successful + 6. No crash should happen + """ + + master = topo_r.ms["master1"] + consumer = topo_r.cs["consumer1"] + repl = ReplicationManager(DEFAULT_SUFFIX) + + m_users = UserAccounts(topo_r.ms["master1"], DEFAULT_SUFFIX) + m_user = m_users.ensure_state(properties=TEST_USER_PROPERTIES) + m_user.ensure_present('mail', 'testuser@redhat.com') + + log.info("Check that replication is working") + repl.wait_for_replication(master, consumer) + c_users = UserAccounts(topo_r.cs["consumer1"], DEFAULT_SUFFIX) + c_user = c_users.get('testuser') + + c_bes = Backends(consumer) + c_be = c_bes.get(DEFAULT_SUFFIX) + + db_dir = c_be.get_attr_val_utf8('nsslapd-directory') + + mail_db = list(filter(lambda fl: fl.startswith("mail"), os.listdir(db_dir))) + assert mail_db, "mail.* wasn't found in {}" + mail_db_path = os.path.join(db_dir, mail_db[0]) + backup_path = os.path.join(DEFAULT_BACKUPDIR, mail_db[0]) + + consumer.stop() + log.info("Back up {} to {}".format(mail_db_path, backup_path)) + shutil.copyfile(mail_db_path, backup_path) + consumer.start() + + log.info("Remove 'mail' attr from master") + m_user.remove_all('mail') + + log.info("Wait for the replication to happen") + repl.wait_for_replication(master, consumer) + + consumer.stop() + log.info("Restore {} to {}".format(backup_path, mail_db_path)) + shutil.copyfile(backup_path, mail_db_path) + consumer.start() + + log.info("Make a search for mail attribute in attempt to crash server") + c_user.get_attr_val("mail") + + log.info("Make sure that server hasn't crashed") + repl.test_replication(master, consumer) + + +def test_lastupdate_attr_before_init(topo_nr): + """Check that LastUpdate replica attributes show right values + + :id: bc8ce431-ff65-41f5-9331-605cbcaaa887 + :setup: Replication setup with master and consumer instances + without initialization + :steps: + 1. Check nsds5replicaLastUpdateStart value + 2. Check nsds5replicaLastUpdateEnd value + 3. Check nsds5replicaLastUpdateStatus value + 4. Check nsds5replicaLastUpdateStatusJSON is parsable + :expectedresults: + 1. nsds5replicaLastUpdateStart should be equal to 0 + 2. nsds5replicaLastUpdateEnd should be equal to 0 + 3. nsds5replicaLastUpdateStatus should not be equal + to "Replica acquired successfully: Incremental update started" + 4. Success + """ + + master = topo_nr.ins["standalone1"] + consumer = topo_nr.ins["standalone2"] + + repl = ReplicationManager(DEFAULT_SUFFIX) + repl.create_first_master(master) + + # Manually create an un-synced consumer. + + consumer_replicas = Replicas(consumer) + consumer_replicas.create(properties={ + 'cn': 'replica', + 'nsDS5ReplicaRoot': DEFAULT_SUFFIX, + 'nsDS5ReplicaId': '65535', + 'nsDS5Flags': '0', + 'nsDS5ReplicaType': '2', + }) + + agmt = repl.ensure_agreement(master, consumer) + with pytest.raises(Exception): + repl.wait_for_replication(master, consumer, timeout=5) + + assert agmt.get_attr_val_utf8('nsds5replicaLastUpdateStart') == "19700101000000Z" + assert agmt.get_attr_val_utf8("nsds5replicaLastUpdateEnd") == "19700101000000Z" + assert "replica acquired successfully" not in agmt.get_attr_val_utf8_l("nsds5replicaLastUpdateStatus") + + # make sure the JSON attribute is parsable + json_status = agmt.get_attr_val_utf8("nsds5replicaLastUpdateStatusJSON") + if json_status is not None: + json_obj = json.loads(json_status) + log.debug("JSON status message: {}".format(json_obj)) + +if __name__ == '__main__': + # Run isolated + # -s for DEBUG mode + CURRENT_FILE = os.path.realpath(__file__) + pytest.main("-s %s" % CURRENT_FILE) diff --git a/dirsrvtests/tests/suites/replication/tls_client_auth_repl_test.py b/dirsrvtests/tests/suites/replication/tls_client_auth_repl_test.py new file mode 100644 index 0000000..e78a435 --- /dev/null +++ b/dirsrvtests/tests/suites/replication/tls_client_auth_repl_test.py @@ -0,0 +1,176 @@ +# --- BEGIN COPYRIGHT BLOCK --- +# Copyright (C) 2017 Red Hat, Inc. +# All rights reserved. +# +# License: GPL (version 3 or any later version). +# See LICENSE for details. +# --- END COPYRIGHT BLOCK --- +# +import logging +import os +import pytest +from lib389.utils import ds_is_older +from lib389.idm.services import ServiceAccounts +from lib389.config import CertmapLegacy +from lib389._constants import DEFAULT_SUFFIX +from lib389.replica import ReplicationManager, Replicas +from lib389.topologies import topology_m2 as topo_m2 + +pytestmark = pytest.mark.tier1 + +DEBUGGING = os.getenv("DEBUGGING", default=False) +if DEBUGGING: + logging.getLogger(__name__).setLevel(logging.DEBUG) +else: + logging.getLogger(__name__).setLevel(logging.INFO) +log = logging.getLogger(__name__) + + +@pytest.fixture(scope="module") +def tls_client_auth(topo_m2): + """Enable TLS on both masters and reconfigure + both agreements to use TLS Client auth + """ + + m1 = topo_m2.ms['master1'] + m2 = topo_m2.ms['master2'] + + if ds_is_older('1.4.0.6'): + transport = 'SSL' + else: + transport = 'LDAPS' + + # Create the certmap before we restart for enable_tls + cm_m1 = CertmapLegacy(m1) + cm_m2 = CertmapLegacy(m2) + + # We need to configure the same maps for both .... + certmaps = cm_m1.list() + certmaps['default']['DNComps'] = None + certmaps['default']['CmapLdapAttr'] = 'nsCertSubjectDN' + + cm_m1.set(certmaps) + cm_m2.set(certmaps) + + [i.enable_tls() for i in topo_m2] + + # Create the replication dns + services = ServiceAccounts(m1, DEFAULT_SUFFIX) + repl_m1 = services.get('%s:%s' % (m1.host, m1.sslport)) + repl_m1.set('nsCertSubjectDN', m1.get_server_tls_subject()) + + repl_m2 = services.get('%s:%s' % (m2.host, m2.sslport)) + repl_m2.set('nsCertSubjectDN', m2.get_server_tls_subject()) + + # Check the replication is "done". + repl = ReplicationManager(DEFAULT_SUFFIX) + repl.wait_for_replication(m1, m2) + # Now change the auth type + + replica_m1 = Replicas(m1).get(DEFAULT_SUFFIX) + agmt_m1 = replica_m1.get_agreements().list()[0] + + agmt_m1.replace_many( + ('nsDS5ReplicaBindMethod', 'SSLCLIENTAUTH'), + ('nsDS5ReplicaTransportInfo', transport), + ('nsDS5ReplicaPort', str(m2.sslport)), + ) + agmt_m1.remove_all('nsDS5ReplicaBindDN') + + replica_m2 = Replicas(m2).get(DEFAULT_SUFFIX) + agmt_m2 = replica_m2.get_agreements().list()[0] + + agmt_m2.replace_many( + ('nsDS5ReplicaBindMethod', 'SSLCLIENTAUTH'), + ('nsDS5ReplicaTransportInfo', transport), + ('nsDS5ReplicaPort', str(m1.sslport)), + ) + agmt_m2.remove_all('nsDS5ReplicaBindDN') + + repl.test_replication_topology(topo_m2) + + return topo_m2 + + +def test_ssl_transport(tls_client_auth): + """Test different combinations for nsDS5ReplicaTransportInfo values + + :id: 922d16f8-662a-4915-a39e-0aecd7c8e6e2 + :setup: Two master replication, enabled TLS client auth + :steps: + 1. Set nsDS5ReplicaTransportInfoCheck: SSL or StartTLS or TLS + 2. Restart the instance + 3. Check that replication works + 4. Set nsDS5ReplicaTransportInfoCheck: LDAPS back + :expectedresults: + 1. Success + 2. Success + 3. Replication works + 4. Success + """ + + m1 = tls_client_auth.ms['master1'] + m2 = tls_client_auth.ms['master2'] + repl = ReplicationManager(DEFAULT_SUFFIX) + replica_m1 = Replicas(m1).get(DEFAULT_SUFFIX) + replica_m2 = Replicas(m2).get(DEFAULT_SUFFIX) + agmt_m1 = replica_m1.get_agreements().list()[0] + agmt_m2 = replica_m2.get_agreements().list()[0] + + if ds_is_older('1.4.0.6'): + check_list = (('TLS', False),) + else: + check_list = (('SSL', True), ('StartTLS', False), ('TLS', False)) + + for transport, secure_port in check_list: + agmt_m1.replace_many(('nsDS5ReplicaTransportInfo', transport), + ('nsDS5ReplicaPort', '{}'.format(m2.port if not secure_port else m2.sslport))) + agmt_m2.replace_many(('nsDS5ReplicaTransportInfo', transport), + ('nsDS5ReplicaPort', '{}'.format(m1.port if not secure_port else m1.sslport))) + repl.test_replication_topology(tls_client_auth) + + if ds_is_older('1.4.0.6'): + agmt_m1.replace_many(('nsDS5ReplicaTransportInfo', 'SSL'), + ('nsDS5ReplicaPort', str(m2.sslport))) + agmt_m2.replace_many(('nsDS5ReplicaTransportInfo', 'SSL'), + ('nsDS5ReplicaPort', str(m1.sslport))) + else: + agmt_m1.replace_many(('nsDS5ReplicaTransportInfo', 'LDAPS'), + ('nsDS5ReplicaPort', str(m2.sslport))) + agmt_m2.replace_many(('nsDS5ReplicaTransportInfo', 'LDAPS'), + ('nsDS5ReplicaPort', str(m1.sslport))) + repl.test_replication_topology(tls_client_auth) + + +def test_extract_pemfiles(tls_client_auth): + """Test TLS client authentication between two masters operates + as expected with 'on' and 'off' options of nsslapd-extract-pemfiles + + :id: 922d16f8-662a-4915-a39e-0aecd7c8e6e1 + :setup: Two master replication, enabled TLS client auth + :steps: + 1. Check that nsslapd-extract-pemfiles default value is right + 2. Check that replication works with both 'on' and 'off' values + :expectedresults: + 1. Success + 2. Replication works + """ + + m1 = tls_client_auth.ms['master1'] + m2 = tls_client_auth.ms['master2'] + repl = ReplicationManager(DEFAULT_SUFFIX) + + if ds_is_older('1.3.7'): + default_val = 'off' + else: + default_val = 'on' + attr_val = m1.config.get_attr_val_utf8('nsslapd-extract-pemfiles') + log.info("Check that nsslapd-extract-pemfiles is {}".format(default_val)) + assert attr_val == default_val + + for extract_pemfiles in ('on', 'off'): + log.info("Set nsslapd-extract-pemfiles = '{}' and check replication works)") + m1.config.set('nsslapd-extract-pemfiles', extract_pemfiles) + m2.config.set('nsslapd-extract-pemfiles', extract_pemfiles) + repl.test_replication_topology(tls_client_auth) + diff --git a/dirsrvtests/tests/suites/replication/tombstone_fixup_test.py b/dirsrvtests/tests/suites/replication/tombstone_fixup_test.py new file mode 100644 index 0000000..b9c6aee --- /dev/null +++ b/dirsrvtests/tests/suites/replication/tombstone_fixup_test.py @@ -0,0 +1,129 @@ +# --- BEGIN COPYRIGHT BLOCK --- +# Copyright (C) 2017 Red Hat, Inc. +# All rights reserved. +# +# License: GPL (version 3 or any later version). +# See LICENSE for details. +# --- END COPYRIGHT BLOCK --- +# +import pytest +from lib389.tasks import * +from lib389.utils import * +from lib389.topologies import topology_m1 +from lib389.tombstone import Tombstones +from lib389.idm.user import UserAccounts, TEST_USER_PROPERTIES +from lib389.replica import ReplicationManager +from lib389._constants import (defaultProperties, DEFAULT_SUFFIX, ReplicaRole, + REPLICAID_MASTER_1, REPLICA_PRECISE_PURGING, REPLICA_PURGE_DELAY, + REPLICA_PURGE_INTERVAL) + +pytestmark = pytest.mark.tier2 + + +def test_precise_tombstone_purging(topology_m1): + """ Test precise tombstone purging + + :id: adb86f50-ae76-4ed6-82b4-3cdc30ccab79 + :setup: master1 instance + :steps: + 1. Create and Delete entry to create a tombstone + 2. export ldif, edit, and import ldif + 3. Check tombstones do not contain nsTombstoneCSN + 4. Run fixup task, and verify tombstones now have nsTombstone CSN + 5. Configure tombstone purging + 6. Verify tombstones are purged + :expectedresults: + 1. Success + 2. Success + 3. Success + 4. Success + 5. Success + 6. Success + """ + + m1 = topology_m1.ms['master1'] + m1_tasks = Tasks(m1) + + # Create tombstone entry + users = UserAccounts(m1, DEFAULT_SUFFIX) + user = users.create_test_user(uid=1001) + user.delete() + + # Verify tombstone was created + tombstones = Tombstones(m1, DEFAULT_SUFFIX) + assert len(tombstones.list()) == 1 + + # Export db, strip nsTombstoneCSN, and import it + ldif_file = "{}/export.ldif".format(m1.get_ldif_dir()) + args = {EXPORT_REPL_INFO: True, + TASK_WAIT: True} + m1_tasks.exportLDIF(DEFAULT_SUFFIX, None, ldif_file, args) + time.sleep(.5) + + # Strip LDIF of nsTombstoneCSN, getthe LDIF lines, the n create new ldif + ldif = open(ldif_file, "r") + lines = ldif.readlines() + ldif.close() + time.sleep(.5) + + ldif = open(ldif_file, "w") + for line in lines: + if not line.lower().startswith('nstombstonecsn'): + ldif.write(line) + ldif.close() + time.sleep(.5) + + # import the new ldif file + log.info('Import replication LDIF file...') + args = {TASK_WAIT: True} + m1_tasks.importLDIF(DEFAULT_SUFFIX, None, ldif_file, args) + time.sleep(.5) + + # Search for the tombstone again + tombstones = Tombstones(m1, DEFAULT_SUFFIX) + assert len(tombstones.list()) == 1 + + # + # Part 3 - test fixup task using the strip option. + # + args = {TASK_WAIT: True, + TASK_TOMB_STRIP: True} + m1_tasks.fixupTombstones(DEFAULT_BENAME, args) + time.sleep(.5) + + # Search for tombstones with nsTombstoneCSN - better not find any + for ts in tombstones.list(): + assert not ts.present("nsTombstoneCSN") + + # Now run the fixup task + args = {TASK_WAIT: True} + m1_tasks.fixupTombstones(DEFAULT_BENAME, args) + time.sleep(.5) + + # Search for tombstones with nsTombstoneCSN - better find some + tombstones = Tombstones(m1, DEFAULT_SUFFIX) + assert len(tombstones.list()) == 1 + + # + # Part 4 - Test tombstone purging + # + args = {REPLICA_PRECISE_PURGING: b'on', + REPLICA_PURGE_DELAY: b'5', + REPLICA_PURGE_INTERVAL: b'5'} + m1.replica.setProperties(DEFAULT_SUFFIX, None, None, args) + + # Wait for the interval to pass + log.info('Wait for tombstone purge interval to pass...') + time.sleep(6) + + # Add an entry to trigger replication + users.create_test_user(uid=1002) + + # Wait for the interval to pass again + log.info('Wait for tombstone purge interval to pass again...') + time.sleep(6) + + # search for tombstones, there should be none + tombstones = Tombstones(m1, DEFAULT_SUFFIX) + assert len(tombstones.list()) == 0 + diff --git a/dirsrvtests/tests/suites/replication/tombstone_test.py b/dirsrvtests/tests/suites/replication/tombstone_test.py new file mode 100644 index 0000000..67f8001 --- /dev/null +++ b/dirsrvtests/tests/suites/replication/tombstone_test.py @@ -0,0 +1,63 @@ +# --- BEGIN COPYRIGHT BLOCK --- +# Copyright (C) 2017 Red Hat, Inc. +# All rights reserved. +# +# License: GPL (version 3 or any later version). +# See LICENSE for details. +# --- END COPYRIGHT BLOCK --- +# +import pytest +from lib389.tasks import * +from lib389.utils import * +from lib389.topologies import topology_m1 +from lib389.tombstone import Tombstones +from lib389.idm.user import UserAccounts, TEST_USER_PROPERTIES + +pytestmark = pytest.mark.tier1 + + +def test_purge_success(topology_m1): + """Verify that tombstones are created successfully + + :id: adb86f50-ae76-4ed6-82b4-3cdc30ccab78 + :setup: Standalone instance + :steps: + 1. Enable replication to unexisting instance + 2. Add an entry to the replicated suffix + 3. Delete the entry + 4. Check that tombstone entry exists (objectclass=nsTombstone) + :expectedresults: Tombstone entry exist + 1. Operation should be successful + 2. The entry should be successfully added + 3. The entry should be successfully deleted + 4. Tombstone entry should exist + """ + m1 = topology_m1.ms['master1'] + + users = UserAccounts(m1, DEFAULT_SUFFIX) + user = users.create(properties=TEST_USER_PROPERTIES) + + tombstones = Tombstones(m1, DEFAULT_SUFFIX) + + assert len(tombstones.list()) == 0 + + user.delete() + + assert len(tombstones.list()) == 1 + assert len(users.list()) == 0 + + ts = tombstones.get('testuser') + assert ts.exists() + + if not ds_is_older('1.4.0'): + ts.revive() + + assert len(users.list()) == 1 + user_revived = users.get('testuser') + + +if __name__ == '__main__': + # Run isolated + # -s for DEBUG mode + CURRENT_FILE = os.path.realpath(__file__) + pytest.main("-s %s" % CURRENT_FILE) diff --git a/dirsrvtests/tests/suites/replication/wait_for_async_feature_test.py b/dirsrvtests/tests/suites/replication/wait_for_async_feature_test.py new file mode 100644 index 0000000..270c41e --- /dev/null +++ b/dirsrvtests/tests/suites/replication/wait_for_async_feature_test.py @@ -0,0 +1,212 @@ +# --- BEGIN COPYRIGHT BLOCK --- +# Copyright (C) 2016 Red Hat, Inc. +# All rights reserved. +# +# License: GPL (version 3 or any later version). +# See LICENSE for details. +# --- END COPYRIGHT BLOCK --- +# +from collections import Counter + +import pytest +from lib389.tasks import * +from lib389.utils import * +from lib389.topologies import topology_m2 + +from lib389._constants import SUFFIX, DEFAULT_SUFFIX, ErrorLog + +from lib389.agreement import Agreements +from lib389.idm.organizationalunit import OrganizationalUnits + +pytestmark = pytest.mark.tier1 + +logging.getLogger(__name__).setLevel(logging.DEBUG) +log = logging.getLogger(__name__) + +installation1_prefix = None + +@pytest.fixture(params=[(None, (4, 11)), + ('2000', (0, 2)), + ('0', (4, 11)), + ('-5', (4, 11))]) +def waitfor_async_attr(topology_m2, request): + """Sets attribute on all replicas""" + + attr_value = request.param[0] + expected_result = request.param[1] + + # Run through all masters + + for master in topology_m2.ms.values(): + agmt = Agreements(master).list()[0] + + if attr_value: + agmt.set_wait_for_async_results(attr_value) + else: + try: + # Sometimes we can double remove this. + agmt.remove_wait_for_async_results() + except ldap.NO_SUCH_ATTRIBUTE: + pass + + return (attr_value, expected_result) + + +@pytest.fixture +def entries(topology_m2, request): + """Adds entries to the master1""" + + master1 = topology_m2.ms["master1"] + + test_list = [] + + log.info("Add 100 nested entries under replicated suffix on %s" % master1.serverid) + ous = OrganizationalUnits(master1, DEFAULT_SUFFIX) + for i in range(100): + ou = ous.create(properties={ + 'ou' : 'test_ou_%s' % i, + }) + test_list.append(ou) + + log.info("Delete created entries") + for test_ou in test_list: + test_ou.delete() + + def fin(): + log.info("Clear the errors log in the end of the test case") + with open(master1.errlog, 'w') as errlog: + errlog.writelines("") + + request.addfinalizer(fin) + + +def test_not_int_value(topology_m2): + """Tests not integer value + + :id: 67c9994f-9251-425a-8197-8d12ad9beafc + :setup: Replication with two masters + :steps: + 1. Try to set some string value + to nsDS5ReplicaWaitForAsyncResults + :expectedresults: + 1. Invalid syntax error should be raised + """ + master1 = topology_m2.ms["master1"] + agmt = Agreements(master1).list()[0] + + with pytest.raises(ldap.INVALID_SYNTAX): + agmt.set_wait_for_async_results("ws2") + +def test_multi_value(topology_m2): + """Tests multi value + + :id: 1932301a-db29-407e-b27e-4466a876d1d3 + :setup: Replication with two masters + :steps: + 1. Set nsDS5ReplicaWaitForAsyncResults to some int + 2. Try to add one more int value + to nsDS5ReplicaWaitForAsyncResults + :expectedresults: + 1. nsDS5ReplicaWaitForAsyncResults should be set + 2. Object class violation error should be raised + """ + + master1 = topology_m2.ms["master1"] + agmt = Agreements(master1).list()[0] + + agmt.set_wait_for_async_results('100') + with pytest.raises(ldap.OBJECT_CLASS_VIOLATION): + agmt.add('nsDS5ReplicaWaitForAsyncResults', '101') + +def test_value_check(topology_m2, waitfor_async_attr): + """Checks that value has been set correctly + + :id: 3e81afe9-5130-410d-a1bb-d798d8ab8519 + :parametrized: yes + :setup: Replication with two masters, + wait for async set on all masters, try: + None, '2000', '0', '-5' + :steps: + 1. Search for nsDS5ReplicaWaitForAsyncResults on master 1 + 2. Search for nsDS5ReplicaWaitForAsyncResults on master 2 + :expectedresults: + 1. nsDS5ReplicaWaitForAsyncResults should be set correctly + 2. nsDS5ReplicaWaitForAsyncResults should be set correctly + """ + + attr_value = waitfor_async_attr[0] + + for master in topology_m2.ms.values(): + agmt = Agreements(master).list()[0] + + server_value = agmt.get_wait_for_async_results_utf8() + assert server_value == attr_value + +def test_behavior_with_value(topology_m2, waitfor_async_attr, entries): + """Tests replication behavior with valid + nsDS5ReplicaWaitForAsyncResults attribute values + + :id: 117b6be2-cdab-422e-b0c7-3b88bbeec036 + :parametrized: yes + :setup: Replication with two masters, + wait for async set on all masters, try: + None, '2000', '0', '-5' + :steps: + 1. Set Replication Debugging loglevel for the errorlog + 2. Set nsslapd-logging-hr-timestamps-enabled to 'off' on both masters + 3. Gather all sync attempts, group by timestamp + 4. Take the most common timestamp and assert it has appeared + in the set range + :expectedresults: + 1. Replication Debugging loglevel should be set + 2. nsslapd-logging-hr-timestamps-enabled should be set + 3. Operation should be successful + 4. Errors log should have all timestamp appear + """ + + master1 = topology_m2.ms["master1"] + master2 = topology_m2.ms["master2"] + + log.info("Set Replication Debugging loglevel for the errorlog") + master1.config.loglevel((ErrorLog.REPLICA,)) + master2.config.loglevel((ErrorLog.REPLICA,)) + + sync_dict = Counter() + min_ap = waitfor_async_attr[1][0] + max_ap = waitfor_async_attr[1][1] + + time.sleep(20) + + log.info("Gather all sync attempts within Counter dict, group by timestamp") + with open(master1.errlog, 'r') as errlog: + errlog_filtered = filter(lambda x: "waitfor_async_results" in x, errlog) + + # Watch only over unsuccessful sync attempts + for line in errlog_filtered: + if line.split()[3] != line.split()[4]: + # A timestamp looks like: + # [03/Jan/2018:14:35:15.806396035 +1000] LOGMESSAGE HERE + # We want to assert a range of "seconds", so we need to reduce + # this to a reasonable amount. IE: + # [03/Jan/2018:14:35:15 + # So to achieve this we split on ] and . IE. + # [03/Jan/2018:14:35:15.806396035 +1000] LOGMESSAGE HERE + # ^ split here first + # ^ now split here + # [03/Jan/2018:14:35:15 + # ^ final result + timestamp = line.split(']')[0].split('.')[0] + sync_dict[timestamp] += 1 + + log.info("Take the most common timestamp and assert it has appeared " \ + "in the range from %s to %s times" % (min_ap, max_ap)) + most_common_val = sync_dict.most_common(1)[0][1] + log.debug("%s <= %s <= %s" % (min_ap, most_common_val, max_ap)) + assert min_ap <= most_common_val <= max_ap + + +if __name__ == '__main__': + # Run isolated + # -s for DEBUG mode + CURRENT_FILE = os.path.realpath(__file__) + pytest.main("-s %s" % CURRENT_FILE) diff --git a/dirsrvtests/tests/suites/resource_limits/__init__.py b/dirsrvtests/tests/suites/resource_limits/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/dirsrvtests/tests/suites/resource_limits/fdlimits_test.py b/dirsrvtests/tests/suites/resource_limits/fdlimits_test.py new file mode 100644 index 0000000..1a2f547 --- /dev/null +++ b/dirsrvtests/tests/suites/resource_limits/fdlimits_test.py @@ -0,0 +1,76 @@ +import logging +import pytest +import os +import ldap +import resource +from lib389._constants import * +from lib389.topologies import topology_st +from lib389.utils import ds_is_older, ensure_str +from subprocess import check_output + +pytestmark = pytest.mark.tier1 + +logging.getLogger(__name__).setLevel(logging.INFO) +log = logging.getLogger(__name__) + +FD_ATTR = "nsslapd-maxdescriptors" +GLOBAL_LIMIT = resource.getrlimit(resource.RLIMIT_NOFILE)[1] +SYSTEMD_LIMIT = ensure_str(check_output("systemctl show -p LimitNOFILE dirsrv@standalone1".split(" ")).strip()).split('=')[1] +CUSTOM_VAL = str(int(SYSTEMD_LIMIT) - 10) +TOO_HIGH_VAL = str(GLOBAL_LIMIT * 2) +TOO_HIGH_VAL2 = str(int(SYSTEMD_LIMIT) * 2) +TOO_LOW_VAL = "0" + +@pytest.mark.skipif(ds_is_older("1.4.1.2"), reason="Not implemented") +def test_fd_limits(topology_st): + """Test the default limits, and custom limits + + :id: fa0a5106-612f-428f-84c0-9c85c34d0433 + :setup: Standalone Instance + :steps: + 1. Check default limit + 2. Change default limit + 3. Check invalid/too high limits are rejected + 4. Check invalid/too low limit is rejected + :expectedresults: + 1. Success + 2. Success + 3. Success + 4 Success + """ + + # Check systemd default + max_fd = topology_st.standalone.config.get_attr_val_utf8(FD_ATTR) + assert max_fd == SYSTEMD_LIMIT + + # Check custom value is applied + topology_st.standalone.config.set(FD_ATTR, CUSTOM_VAL) + max_fd = topology_st.standalone.config.get_attr_val_utf8(FD_ATTR) + assert max_fd == CUSTOM_VAL + + # # Attempt to use value that is higher than the global system limit + with pytest.raises(ldap.UNWILLING_TO_PERFORM): + topology_st.standalone.config.set(FD_ATTR, TOO_HIGH_VAL) + max_fd = topology_st.standalone.config.get_attr_val_utf8(FD_ATTR) + assert max_fd == CUSTOM_VAL + + # Attempt to use value that is higher than the value defined in the systemd service + with pytest.raises(ldap.UNWILLING_TO_PERFORM): + topology_st.standalone.config.set(FD_ATTR, TOO_HIGH_VAL2) + max_fd = topology_st.standalone.config.get_attr_val_utf8(FD_ATTR) + assert max_fd == CUSTOM_VAL + + # Attempt to use val that is too low + with pytest.raises(ldap.OPERATIONS_ERROR): + topology_st.standalone.config.set(FD_ATTR, TOO_LOW_VAL) + max_fd = topology_st.standalone.config.get_attr_val_utf8(FD_ATTR) + assert max_fd == CUSTOM_VAL + + log.info("Test PASSED") + + +if __name__ == '__main__': + # Run isolated + # -s for DEBUG mode + CURRENT_FILE = os.path.realpath(__file__) + pytest.main(["-s", CURRENT_FILE]) diff --git a/dirsrvtests/tests/suites/roles/basic_test.py b/dirsrvtests/tests/suites/roles/basic_test.py new file mode 100644 index 0000000..3f1b756 --- /dev/null +++ b/dirsrvtests/tests/suites/roles/basic_test.py @@ -0,0 +1,295 @@ +# --- BEGIN COPYRIGHT BLOCK --- +# Copyright (C) 2019 Red Hat, Inc. +# All rights reserved. +# +# License: GPL (version 3 or any later version). +# See LICENSE for details. +# --- END COPYRIGHT BLOCK --- + + +""" +Importing necessary Modules. +""" + +import os +import pytest + +from lib389._constants import PW_DM, DEFAULT_SUFFIX +from lib389.idm.user import UserAccount, UserAccounts +from lib389.idm.organization import Organization +from lib389.idm.organizationalunit import OrganizationalUnit +from lib389.topologies import topology_st as topo +from lib389.idm.role import FilteredRoles, ManagedRoles, NestedRoles +from lib389.idm.domain import Domain + +pytestmark = pytest.mark.tier1 + +DNBASE = "o=acivattr,{}".format(DEFAULT_SUFFIX) +ENG_USER = "cn=enguser1,ou=eng,{}".format(DNBASE) +SALES_UESER = "cn=salesuser1,ou=sales,{}".format(DNBASE) +ENG_MANAGER = "cn=engmanager1,ou=eng,{}".format(DNBASE) +SALES_MANAGER = "cn=salesmanager1,ou=sales,{}".format(DNBASE) +SALES_OU = "ou=sales,{}".format(DNBASE) +ENG_OU = "ou=eng,{}".format(DNBASE) +FILTERROLESALESROLE = "cn=FILTERROLESALESROLE,{}".format(DNBASE) +FILTERROLEENGROLE = "cn=FILTERROLEENGROLE,{}".format(DNBASE) + + +def test_filterrole(topo): + ''' + :id: 8ada4064-786b-11e8-8634-8c16451d917b + :setup: server + :steps: + 1. Add test entry + 2. Add ACI + 3. Search nsconsole role + :expectedresults: + 1. Entry should be added + 2. Operation should succeed + 3. Operation should succeed + ''' + Organization(topo.standalone).create(properties={"o": "acivattr"}, basedn=DEFAULT_SUFFIX) + properties = { + 'ou': 'eng', + } + + ou_ou = OrganizationalUnit(topo.standalone, "ou=eng,o=acivattr,{}".format(DEFAULT_SUFFIX)) + ou_ou.create(properties=properties) + properties = {'ou': 'sales'} + ou_ou = OrganizationalUnit(topo.standalone, "ou=sales,o=acivattr,{}".format(DEFAULT_SUFFIX)) + ou_ou.create(properties=properties) + + roles = FilteredRoles(topo.standalone, DNBASE) + roles.create(properties={'cn': 'FILTERROLEENGROLE', 'nsRoleFilter': 'cn=eng*'}) + roles.create(properties={'cn': 'FILTERROLESALESROLE', 'nsRoleFilter': 'cn=sales*'}) + + properties = { + 'uid': 'salesuser1', + 'cn': 'salesuser1', + 'sn': 'user', + 'uidNumber': '1000', + 'gidNumber': '2000', + 'homeDirectory': '/home/' + 'salesuser1', + 'userPassword': PW_DM + } + user = UserAccount(topo.standalone, + 'cn=salesuser1,ou=sales,o=acivattr,{}'.format(DEFAULT_SUFFIX)) + user.create(properties=properties) + + properties = { + 'uid': 'salesmanager1', + 'cn': 'salesmanager1', + 'sn': 'user', + 'uidNumber': '1000', + 'gidNumber': '2000', + 'homeDirectory': '/home/' + 'salesmanager1', + 'userPassword': PW_DM, + } + user = UserAccount(topo.standalone, + 'cn=salesmanager1,ou=sales,o=acivattr,{}'.format(DEFAULT_SUFFIX)) + user.create(properties=properties) + + properties = { + 'uid': 'enguser1', + 'cn': 'enguser1', + 'sn': 'user', + 'uidNumber': '1000', + 'gidNumber': '2000', + 'homeDirectory': '/home/' + 'enguser1', + 'userPassword': PW_DM + } + user = UserAccount(topo.standalone, + 'cn=enguser1,ou=eng,o=acivattr,{}'.format(DEFAULT_SUFFIX)) + user.create(properties=properties) + + properties = { + 'uid': 'engmanager1', + 'cn': 'engmanager1', + 'sn': 'user', + 'uidNumber': '1000', + 'gidNumber': '2000', + 'homeDirectory': '/home/' + 'engmanager1', + 'userPassword': PW_DM + } + user = UserAccount(topo.standalone, + 'cn=engmanager1,ou=eng,o=acivattr,{}'.format(DEFAULT_SUFFIX)) + user.create(properties=properties) + + # user with cn=sales* will automatically memeber of nsfilterrole + # cn=filterrolesalesrole,o=acivattr,dc=example,dc=com + assert UserAccount(topo.standalone, + 'cn=salesuser1,ou=sales,o=acivattr,dc=example,dc=com').\ + get_attr_val_utf8('nsrole') == 'cn=filterrolesalesrole,o=acivattr,dc=example,dc=com' + # same goes to SALES_MANAGER + assert UserAccount(topo.standalone, SALES_MANAGER).get_attr_val_utf8( + 'nsrole') == 'cn=filterrolesalesrole,o=acivattr,dc=example,dc=com' + # user with cn=eng* will automatically memeber of nsfilterrole + # cn=filterroleengrole,o=acivattr,dc=example,dc=com + assert UserAccount(topo.standalone, 'cn=enguser1,ou=eng,o=acivattr,dc=example,dc=com').\ + get_attr_val_utf8('nsrole') == 'cn=filterroleengrole,o=acivattr,dc=example,dc=com' + # same goes to ENG_MANAGER + assert UserAccount(topo.standalone, ENG_MANAGER).get_attr_val_utf8( + 'nsrole') == 'cn=filterroleengrole,o=acivattr,dc=example,dc=com' + for dn_dn in [ENG_USER, SALES_UESER, ENG_MANAGER, SALES_MANAGER, + FILTERROLESALESROLE, FILTERROLEENGROLE, ENG_OU, + SALES_OU, DNBASE]: + UserAccount(topo.standalone, dn_dn).delete() + + +def test_managedrole(topo): + ''' + :id: d52a9c00-3bf6-11e9-9b7b-8c16451d917b + :setup: server + :steps: + 1. Add test entry + 2. Add ACI + 3. Search managed role entries + :expectedresults: + 1. Entry should be added + 2. Operation should succeed + 3. Operation should succeed + ''' + # Create Managed role entry + roles = ManagedRoles(topo.standalone, DEFAULT_SUFFIX) + role = roles.create(properties={"cn": 'ROLE1'}) + + # Create user and Assign the role to the entry + uas = UserAccounts(topo.standalone, DEFAULT_SUFFIX, rdn=None) + uas.create(properties={ + 'uid': 'Fail', + 'cn': 'Fail', + 'sn': 'user', + 'uidNumber': '1000', + 'gidNumber': '2000', + 'homeDirectory': '/home/' + 'Fail', + 'nsRoleDN': role.dn, + 'userPassword': PW_DM + }) + + # Create user and do not Assign any role to the entry + uas.create( + properties={ + 'uid': 'Success', + 'cn': 'Success', + 'sn': 'user', + 'uidNumber': '1000', + 'gidNumber': '2000', + 'homeDirectory': '/home/' + 'Success', + 'userPassword': PW_DM + }) + + # Assert that Manage role entry is created and its searchable + assert ManagedRoles(topo.standalone, DEFAULT_SUFFIX).list()[0].dn \ + == 'cn=ROLE1,dc=example,dc=com' + + # Set an aci that will deny ROLE1 manage role + Domain(topo.standalone, DEFAULT_SUFFIX).\ + add('aci', '(targetattr=*)(version 3.0; aci "role aci";' + ' deny(all) roledn="ldap:///{}";)'.format(role.dn),) + + # Crate a connection with cn=Fail which is member of ROLE1 + conn = UserAccount(topo.standalone, "uid=Fail,{}".format(DEFAULT_SUFFIX)).bind(PW_DM) + # Access denied to ROLE1 members + assert not ManagedRoles(conn, DEFAULT_SUFFIX).list() + + # Now create a connection with cn=Success which is not a member of ROLE1 + conn = UserAccount(topo.standalone, "uid=Success,{}".format(DEFAULT_SUFFIX)).bind(PW_DM) + # Access allowed here + assert ManagedRoles(conn, DEFAULT_SUFFIX).list() + + for i in uas.list(): + i.delete() + + for i in roles.list(): + i.delete() + + +@pytest.fixture(scope="function") +def _final(request, topo): + """ + Removes and Restores ACIs after the test. + """ + aci_list = Domain(topo.standalone, DEFAULT_SUFFIX).get_attr_vals('aci') + + def finofaci(): + """ + Removes and Restores ACIs and other users after the test. + """ + domain = Domain(topo.standalone, DEFAULT_SUFFIX) + domain.remove_all('aci') + + managed_roles = ManagedRoles(topo.standalone, DEFAULT_SUFFIX) + nested_roles = NestedRoles(topo.standalone, DEFAULT_SUFFIX) + users = UserAccounts(topo.standalone, DEFAULT_SUFFIX) + + for i in managed_roles.list() + nested_roles.list() + users.list(): + i.delete() + + for i in aci_list: + domain.add("aci", i) + + request.addfinalizer(finofaci) + + +def test_nestedrole(topo, _final): + """ + :id: 867b40c0-7fcf-4332-afc7-bd01025b77f2 + :setup: Standalone server + :steps: + 1. Add test entry + 2. Add ACI + 3. Search managed role entries + :expectedresults: + 1. Entry should be added + 2. Operation should succeed + 3. Operation should succeed + """ + # Create Managed role entry + managed_roles = ManagedRoles(topo.standalone, DEFAULT_SUFFIX) + managed_role1 = managed_roles.create(properties={"cn": 'managed_role1'}) + managed_role2 = managed_roles.create(properties={"cn": 'managed_role2'}) + + # Create nested role entry + nested_roles = NestedRoles(topo.standalone, DEFAULT_SUFFIX) + nested_role = nested_roles.create(properties={"cn": 'nested_role', + "nsRoleDN": [managed_role1.dn, managed_role2.dn]}) + + # Create user and assign managed role to it + users = UserAccounts(topo.standalone, DEFAULT_SUFFIX) + user1 = users.create_test_user(uid=1, gid=1) + user1.set('nsRoleDN', managed_role1.dn) + user1.set('userPassword', PW_DM) + + # Create another user and assign managed role to it + user2 = users.create_test_user(uid=2, gid=2) + user2.set('nsRoleDN', managed_role2.dn) + user2.set('userPassword', PW_DM) + + # Create another user and do not assign any role to it + user3 = users.create_test_user(uid=3, gid=3) + user3.set('userPassword', PW_DM) + + # Create a ACI with deny access to nested role entry + Domain(topo.standalone, DEFAULT_SUFFIX).\ + add('aci', f'(targetattr=*)(version 3.0; aci ' + f'"role aci"; deny(all) roledn="ldap:///{nested_role.dn}";)') + + # Create connection with 'uid=test_user_1,ou=People,dc=example,dc=com' member of managed_role1 + # and search while bound as the user + conn = users.get('test_user_1').bind(PW_DM) + assert not UserAccounts(conn, DEFAULT_SUFFIX).list() + + # Create connection with 'uid=test_user_2,ou=People,dc=example,dc=com' member of managed_role2 + # and search while bound as the user + conn = users.get('test_user_2').bind(PW_DM) + assert not UserAccounts(conn, DEFAULT_SUFFIX).list() + + # Create connection with 'uid=test_user_3,ou=People,dc=example,dc=com' and + # search while bound as the user + conn = users.get('test_user_3').bind(PW_DM) + assert UserAccounts(conn, DEFAULT_SUFFIX).list() + + +if __name__ == "__main__": + CURRENT_FILE = os.path.realpath(__file__) + pytest.main("-s -v %s" % CURRENT_FILE) diff --git a/dirsrvtests/tests/suites/sasl/__init__.py b/dirsrvtests/tests/suites/sasl/__init__.py new file mode 100644 index 0000000..78c30ff --- /dev/null +++ b/dirsrvtests/tests/suites/sasl/__init__.py @@ -0,0 +1,3 @@ +""" + :Requirement: 389-ds-base: SASL Mechanism +""" diff --git a/dirsrvtests/tests/suites/sasl/allowed_mechs_test.py b/dirsrvtests/tests/suites/sasl/allowed_mechs_test.py new file mode 100644 index 0000000..7c807f3 --- /dev/null +++ b/dirsrvtests/tests/suites/sasl/allowed_mechs_test.py @@ -0,0 +1,183 @@ +# --- BEGIN COPYRIGHT BLOCK --- +# Copyright (C) 2017 Red Hat, Inc. +# All rights reserved. +# +# License: GPL (version 3 or any later version). +# See LICENSE for details. +# --- END COPYRIGHT BLOCK --- +# + +import pytest +import os +from lib389.topologies import topology_st + +pytestmark = pytest.mark.tier1 + +def test_basic_feature(topology_st): + """Test the alloweed sasl mechanism feature + + :id: b0453b91-9955-4e8f-9d2f-a6bf440022b1 + :setup: Standalone instance + :steps: + 1. Get the default list of mechanisms + 2. Set allowed mechanism PLAIN + 3. Verify the list + 4. Restart the server + 5. Verify that list is still correct + 6. Edit mechanisms to allow just PLAIN and EXTERNAL + 7. Verify the list + 8. Edit mechanisms to allow just PLAIN and GSSAPI + 9. Verify the list + 10. Restart the server + 11. Verify that list is still correct + 12. Edit mechanisms to allow just PLAIN, GSSAPI, and ANONYMOUS + 13. Verify the list + 14. Restart the server + 15. Verify that list is still correct + 16. Edit mechanisms to allow just PLAIN and ANONYMOUS + 17. Verify the list + 18. Restart the server + 19. Verify that list is still correct + 20. Reset the allowed list to nothing, + 21. Verify that the returned mechanisms are the default ones + 22. Restart the server + 23. Verify that list is still correct + :expectedresults: + 1. GSSAPI, PLAIN and EXTERNAL mechanisms should be acquired + 2. Operation should be successful + 3. List should have - PLAIN, EXTERNAL; shouldn't have - GSSAPI + 4. Server should be restarted + 5. List should have - PLAIN, EXTERNAL; shouldn't have - GSSAPI + 6. Operation should be successful + 7. List should have - PLAIN, EXTERNAL; shouldn't have - GSSAPI + 8. Operation should be successful + 9. List should have - PLAIN, EXTERNAL, GSSAPI + 10. Server should be restarted + 11. List should have - PLAIN, EXTERNAL, GSSAPI + 12. Operation should be successful + 13. List should have - PLAIN, EXTERNAL, GSSAPI, ANONYMOUS + 14. Server should be restarted + 15. List should have - PLAIN, EXTERNAL, GSSAPI, ANONYMOUS + 16. Operation should be successful + 17. List should have - PLAIN, EXTERNAL, ANONYMOUS; shouldn't have - GSSAPI + 18. Server should be restarted + 19. List should have - PLAIN, EXTERNAL, ANONYMOUS; shouldn't have - GSSAPI + 20. Operation should be successful + 21. List should have - PLAIN, EXTERNAL, GSSAPI + 22. Server should be restarted + 23. List should have - PLAIN, EXTERNAL, GSSAPI + """ + + standalone = topology_st.standalone + + # Get the supported mechanisms. This should contain PLAIN, GSSAPI, EXTERNAL at least + standalone.log.info("Test we have some of the default mechanisms") + orig_mechs = standalone.rootdse.supported_sasl() + print(orig_mechs) + assert('GSSAPI' in orig_mechs) + assert('PLAIN' in orig_mechs) + assert('EXTERNAL' in orig_mechs) + + # Now edit the supported mechanisms. Check them again. + standalone.log.info("Edit mechanisms to allow just PLAIN") + standalone.config.set('nsslapd-allowed-sasl-mechanisms', 'PLAIN') + limit_mechs = standalone.rootdse.supported_sasl() + assert('PLAIN' in limit_mechs) + assert('EXTERNAL' in limit_mechs) # Should always be in the allowed list, even if not set. + assert('GSSAPI' not in limit_mechs) # Should not be there! + + # Restart the server a few times and make sure nothing changes + standalone.log.info("Restart server and make sure we still have correct allowed mechs") + standalone.restart() + standalone.restart() + limit_mechs = standalone.rootdse.supported_sasl() + assert('PLAIN' in limit_mechs) + assert('EXTERNAL' in limit_mechs) + assert('GSSAPI' not in limit_mechs) + + # Set EXTERNAL, even though its always supported + standalone.log.info("Edit mechanisms to allow just PLAIN and EXTERNAL") + standalone.config.set('nsslapd-allowed-sasl-mechanisms', 'PLAIN, EXTERNAL') + limit_mechs = standalone.rootdse.supported_sasl() + assert('PLAIN' in limit_mechs) + assert('EXTERNAL' in limit_mechs) + assert('GSSAPI' not in limit_mechs) + + # Now edit the supported mechanisms. Check them again. + standalone.log.info("Edit mechanisms to allow just PLAIN and GSSAPI") + standalone.config.set('nsslapd-allowed-sasl-mechanisms', 'PLAIN, GSSAPI') + limit_mechs = standalone.rootdse.supported_sasl() + assert('PLAIN' in limit_mechs) + assert('EXTERNAL' in limit_mechs) + assert('GSSAPI' in limit_mechs) + assert(len(limit_mechs) == 3) + + # Restart server twice and make sure the allowed list is the same + standalone.restart() + standalone.restart() # For ticket 49379 (test double restart) + limit_mechs = standalone.rootdse.supported_sasl() + assert('PLAIN' in limit_mechs) + assert('EXTERNAL' in limit_mechs) + assert('GSSAPI' in limit_mechs) + assert(len(limit_mechs) == 3) + + # Add ANONYMOUS to the supported mechanisms and test again. + standalone.log.info("Edit mechanisms to allow just PLAIN, GSSAPI, and ANONYMOUS") + standalone.config.set('nsslapd-allowed-sasl-mechanisms', 'PLAIN, GSSAPI, ANONYMOUS') + limit_mechs = standalone.rootdse.supported_sasl() + assert('PLAIN' in limit_mechs) + assert('EXTERNAL' in limit_mechs) + assert('GSSAPI' in limit_mechs) + assert('ANONYMOUS' in limit_mechs) + assert(len(limit_mechs) == 4) + + # Restart server and make sure the allowed list is the same + standalone.restart() + standalone.restart() # For ticket 49379 (test double restart) + limit_mechs = standalone.rootdse.supported_sasl() + assert('PLAIN' in limit_mechs) + assert('EXTERNAL' in limit_mechs) + assert('GSSAPI' in limit_mechs) + assert('ANONYMOUS' in limit_mechs) + assert(len(limit_mechs) == 4) + + # Remove GSSAPI + standalone.log.info("Edit mechanisms to allow just PLAIN and ANONYMOUS") + standalone.config.set('nsslapd-allowed-sasl-mechanisms', 'PLAIN, ANONYMOUS') + limit_mechs = standalone.rootdse.supported_sasl() + assert('PLAIN' in limit_mechs) + assert('EXTERNAL' in limit_mechs) + assert('GSSAPI' not in limit_mechs) + assert('ANONYMOUS' in limit_mechs) + assert(len(limit_mechs) == 3) + + # Restart server and make sure the allowed list is the same + standalone.restart() + limit_mechs = standalone.rootdse.supported_sasl() + assert('PLAIN' in limit_mechs) + assert('EXTERNAL' in limit_mechs) + assert('GSSAPI' not in limit_mechs) + assert('ANONYMOUS' in limit_mechs) + assert(len(limit_mechs) == 3) + + # Do a config reset + standalone.log.info("Reset allowed mechaisms") + standalone.config.reset('nsslapd-allowed-sasl-mechanisms') + + # check the supported list is the same as our first check. + standalone.log.info("Check that we have the original set of mechanisms") + final_mechs = standalone.rootdse.supported_sasl() + assert(set(final_mechs) == set(orig_mechs)) + + # Check it after a restart + standalone.log.info("Check that we have the original set of mechanisms after a restart") + standalone.restart() + final_mechs = standalone.rootdse.supported_sasl() + assert(set(final_mechs) == set(orig_mechs)) + + +if __name__ == '__main__': + # Run isolated + # -s for DEBUG mode + CURRENT_FILE = os.path.realpath(__file__) + pytest.main("-s %s" % CURRENT_FILE) diff --git a/dirsrvtests/tests/suites/sasl/plain_test.py b/dirsrvtests/tests/suites/sasl/plain_test.py new file mode 100644 index 0000000..c7e672f --- /dev/null +++ b/dirsrvtests/tests/suites/sasl/plain_test.py @@ -0,0 +1,95 @@ +# --- BEGIN COPYRIGHT BLOCK --- +# Copyright (C) 2017 Red Hat, Inc. +# All rights reserved. +# +# License: GPL (version 3 or any later version). +# See LICENSE for details. +# --- END COPYRIGHT BLOCK --- +# + +import pytest +import ldap + +from lib389.topologies import topology_st +# This pulls in logging I think +from lib389.utils import * +from lib389._constants import DEFAULT_SUFFIX, DEFAULT_SECURE_PORT +from lib389.sasl import PlainSASL +from lib389.idm.services import ServiceAccounts, ServiceAccount + +pytestmark = pytest.mark.tier1 + +log = logging.getLogger(__name__) + + +def test_basic_feature(topology_st): + """Check basic SASL functionality for PLAIN mechanism + + :id: 75ddc6fa-aa5a-4025-9c71-1abad20c91fc + :setup: Standalone instance + :steps: + 1. Stop the instance + 2. Clean up confdir from previous cert and key files + 3. Create RSA files: CA, key and cert + 4. Start the instance + 5. Create RSA entry + 6. Set nsslapd-secureport to 636 and nsslapd-security to 'on' + 7. Restart the instance + 8. Create a user + 9. Check we can bind + 10. Check that PLAIN is listed in supported mechs + 11. Set up Plain SASL credentials + 12. Try to open a connection without TLS + 13. Try to open a connection with TLS + 14. Try to open a connection with a wrong password + :expectedresults: + 1. The instance should stop + 2. Confdir should be clean + 3. RSA files should be created + 4. The instance should start + 5. RSA entry should be created + 6. nsslapd-secureport and nsslapd-security should be set successfully + 7. The instance should be restarted + 8. User should be created + 9. Bind should be successful + 10. PLAIN should be listed in supported mechs + 11. Plain SASL should be successfully set + 12. AUTH_UNKNOWN exception should be raised + 13. The connection should open + 14. INVALID_CREDENTIALS exception should be raised + """ + + standalone = topology_st.standalone + standalone.enable_tls() + + # Create a user + sas = ServiceAccounts(standalone, DEFAULT_SUFFIX) + sas._basedn = DEFAULT_SUFFIX + sa = sas.create(properties={'cn': 'testaccount', 'userPassword': 'password'}) + # Check we can bind. This will raise exceptions if it fails. + sa.bind('password') + + # Check that PLAIN is listed in supported mechns. + assert(standalone.rootdse.supports_sasl_plain()) + + # The sasl parameters don't change, so set them up now. + # Do we need the sasl map dn:? + auth_tokens = PlainSASL("dn:%s" % sa.dn, 'password') + + # Check that it fails without TLS + with pytest.raises(ldap.AUTH_UNKNOWN): + conn = sa.sasl_bind(uri=standalone.get_ldap_uri(), saslmethod='PLAIN', sasltoken=auth_tokens, connOnly=True) + + # We *have* to use REQCERT NEVER here because python ldap fails cert verification for .... some reason that even + # I can not solve. I think it's leaking state across connections in start_tls_s? + + # Check that it works with TLS + conn = sa.sasl_bind(uri=standalone.get_ldaps_uri(), saslmethod='PLAIN', sasltoken=auth_tokens, connOnly=True) + conn.close() + + # Check that it correct fails our bind if we don't have the password. + auth_tokens = PlainSASL("dn:%s" % sa.dn, 'password-wrong') + with pytest.raises(ldap.INVALID_CREDENTIALS): + conn = sa.sasl_bind(uri=standalone.get_ldaps_uri(), saslmethod='PLAIN', sasltoken=auth_tokens, connOnly=True) + + # Done! diff --git a/dirsrvtests/tests/suites/sasl/regression_test.py b/dirsrvtests/tests/suites/sasl/regression_test.py new file mode 100644 index 0000000..2db76ce --- /dev/null +++ b/dirsrvtests/tests/suites/sasl/regression_test.py @@ -0,0 +1,182 @@ +# --- BEGIN COPYRIGHT BLOCK --- +# Copyright (C) 2016 Red Hat, Inc. +# All rights reserved. +# +# License: GPL (version 3 or any later version). +# See LICENSE for details. +# --- END COPYRIGHT BLOCK --- +# +import base64 +import os +import pytest +import subprocess +from lib389.tasks import * +from lib389.utils import * +from lib389.topologies import topology_m2 +from lib389._constants import * +from lib389.replica import ReplicationManager + +pytestmark = [pytest.mark.tier1, + pytest.mark.skipif(ds_is_older('1.3.5'), reason="Not implemented")] + +logging.getLogger(__name__).setLevel(logging.DEBUG) +log = logging.getLogger(__name__) + +ISSUER = 'cn=CAcert' +CACERT = 'CAcertificate' +M1SERVERCERT = 'Server-Cert1' +M2SERVERCERT = 'Server-Cert2' +M1LDAPSPORT = '41636' +M2LDAPSPORT = '42636' +M1SUBJECT = 'CN=' + os.uname()[1] + ',OU=389 Directory Server' +M2SUBJECT = 'CN=' + os.uname()[1] + ',OU=390 Directory Server' + + +def add_entry(server, name, rdntmpl, start, num): + log.info("\n######################### Adding %d entries to %s ######################\n" % (num, name)) + + for i in range(num): + ii = start + i + dn = '%s%d,%s' % (rdntmpl, ii, DEFAULT_SUFFIX) + server.add_s(Entry((dn, {'objectclass': 'top person extensibleObject'.split(), + 'uid': '%s%d' % (rdntmpl, ii), + 'cn': '%s user%d' % (name, ii), + 'sn': 'user%d' % (ii)}))) + + +def check_pems(confdir, mycacert, myservercert, myserverkey, notexist): + log.info("\n######################### Check PEM files (%s, %s, %s)%s in %s ######################\n" + % (mycacert, myservercert, myserverkey, notexist, confdir)) + global cacert + cacert = '%s/%s.pem' % (confdir, mycacert) + if os.path.isfile(cacert): + if notexist == "": + log.info('%s is successfully generated.' % cacert) + else: + log.info('%s is incorrecly generated.' % cacert) + assert False + else: + if notexist == "": + log.fatal('%s is not generated.' % cacert) + assert False + else: + log.info('%s is correctly not generated.' % cacert) + servercert = '%s/%s.pem' % (confdir, myservercert) + if os.path.isfile(servercert): + if notexist == "": + log.info('%s is successfully generated.' % servercert) + else: + log.info('%s is incorrecly generated.' % servercert) + assert False + else: + if notexist == "": + log.fatal('%s was not generated.' % servercert) + assert False + else: + log.info('%s is correctly not generated.' % servercert) + serverkey = '%s/%s.pem' % (confdir, myserverkey) + if os.path.isfile(serverkey): + if notexist == "": + log.info('%s is successfully generated.' % serverkey) + else: + log.info('%s is incorrectly generated.' % serverkey) + assert False + else: + if notexist == "": + log.fatal('%s was not generated.' % serverkey) + assert False + else: + log.info('%s is correctly not generated.' % serverkey) + + +def relocate_pem_files(topology_m2): + log.info("######################### Relocate PEM files on master1 ######################") + mycacert = 'MyCA' + topology_m2.ms["master1"].encryption.set('CACertExtractFile', mycacert) + myservercert = 'MyServerCert1' + myserverkey = 'MyServerKey1' + topology_m2.ms["master1"].rsa.apply_mods([(ldap.MOD_REPLACE, 'ServerCertExtractFile', myservercert), + (ldap.MOD_REPLACE, 'ServerKeyExtractFile', myserverkey)]) + log.info("##### restart master1") + topology_m2.ms["master1"].restart() + m1confdir = topology_m2.ms["master1"].confdir + check_pems(m1confdir, mycacert, myservercert, myserverkey, "") + +@pytest.mark.ds47536 +def test_openldap_no_nss_crypto(topology_m2): + """Check that we allow usage of OpenLDAP libraries + that don't use NSS for crypto + + :id: 0a622f3d-8ba5-4df2-a1de-1fb2237da40a + :setup: Replication with two masters: + master_1 ----- startTLS -----> master_2; + master_1 <-- TLS_clientAuth -- master_2; + nsslapd-extract-pemfiles set to 'on' on both masters + without specifying cert names + :steps: + 1. Add 5 users to master 1 and 2 + 2. Check that the users were successfully replicated + 3. Relocate PEM files on master 1 + 4. Check PEM files in master 1 config directory + 5. Add 5 users more to master 1 and 2 + 6. Check that the users were successfully replicated + 7. Export userRoot on master 1 + :expectedresults: + 1. Users should be successfully added + 2. Users should be successfully replicated + 3. Operation should be successful + 4. PEM files should be found + 5. Users should be successfully added + 6. Users should be successfully replicated + 7. Operation should be successful + """ + + log.info("Ticket 47536 - Allow usage of OpenLDAP libraries that don't use NSS for crypto") + + m1 = topology_m2.ms["master1"] + m2 = topology_m2.ms["master2"] + [i.enable_tls() for i in topology_m2] + repl = ReplicationManager(DEFAULT_SUFFIX) + repl.test_replication(m1, m2) + + add_entry(m1, 'master1', 'uid=m1user', 0, 5) + add_entry(m2, 'master2', 'uid=m2user', 0, 5) + repl.wait_for_replication(m1, m2) + repl.wait_for_replication(m2, m1) + + log.info('##### Searching for entries on master1...') + entries = m1.search_s(DEFAULT_SUFFIX, ldap.SCOPE_SUBTREE, '(uid=*)') + assert 10 == len(entries) + + log.info('##### Searching for entries on master2...') + entries = m2.search_s(DEFAULT_SUFFIX, ldap.SCOPE_SUBTREE, '(uid=*)') + assert 10 == len(entries) + + relocate_pem_files(topology_m2) + + add_entry(m1, 'master1', 'uid=m1user', 10, 5) + add_entry(m2, 'master2', 'uid=m2user', 10, 5) + + repl.wait_for_replication(m1, m2) + repl.wait_for_replication(m2, m1) + + log.info('##### Searching for entries on master1...') + entries = m1.search_s(DEFAULT_SUFFIX, ldap.SCOPE_SUBTREE, '(uid=*)') + assert 20 == len(entries) + + log.info('##### Searching for entries on master2...') + entries = m2.search_s(DEFAULT_SUFFIX, ldap.SCOPE_SUBTREE, '(uid=*)') + assert 20 == len(entries) + + output_file = os.path.join(m1.get_ldif_dir(), "master1.ldif") + m1.tasks.exportLDIF(benamebase='userRoot', output_file=output_file, args={'wait': True}) + + log.info("Ticket 47536 - PASSED") + + +if __name__ == '__main__': + # Run isolated + # -s for DEBUG mode + + CURRENT_FILE = os.path.realpath(__file__) + pytest.main("-s %s" % CURRENT_FILE) diff --git a/dirsrvtests/tests/suites/schema/__init__.py b/dirsrvtests/tests/suites/schema/__init__.py new file mode 100644 index 0000000..fe69a45 --- /dev/null +++ b/dirsrvtests/tests/suites/schema/__init__.py @@ -0,0 +1,3 @@ +""" + :Requirement: 389-ds-base: Directory Server Schema +""" diff --git a/dirsrvtests/tests/suites/schema/eduperson_test.py b/dirsrvtests/tests/suites/schema/eduperson_test.py new file mode 100644 index 0000000..1ddcc63 --- /dev/null +++ b/dirsrvtests/tests/suites/schema/eduperson_test.py @@ -0,0 +1,90 @@ +# --- BEGIN COPYRIGHT BLOCK --- +# Copyright (C) 2017 Red Hat, Inc. +# All rights reserved. +# +# License: GPL (version 3 or any later version). +# See LICENSE for details. +# --- END COPYRIGHT BLOCK --- +# + + +import os +import logging +import pytest +import ldap + +from lib389.idm.user import UserAccounts +from lib389.topologies import topology_st as topology +from lib389._constants import DEFAULT_SUFFIX + +pytestmark = pytest.mark.tier1 + +DEBUGGING = os.getenv('DEBUGGING', False) + +if DEBUGGING is not False: + DEBUGGING = True + +if DEBUGGING: + logging.getLogger(__name__).setLevel(logging.DEBUG) +else: + logging.getLogger(__name__).setLevel(logging.INFO) + +log = logging.getLogger(__name__) + + +def test_account_locking(topology): + """Test the eduperson schema works + + :id: f2f15449-a822-4ec6-b4ea-bd6db6240a6c + + :setup: Standalone instance + + :steps: + 1. Add a common user + 2. Extend the user with eduPerson objectClass + 3. Add attributes in eduPerson + + :expectedresults: + 1. User should be added with its properties + 2. User should be extended with eduPerson as the objectClass + 3. eduPerson should be added + """ + + if DEBUGGING: + # Add debugging steps(if any)... + pass + + users = UserAccounts(topology.standalone, DEFAULT_SUFFIX) + + user_properties = { + 'uid': 'testuser', + 'cn' : 'testuser', + 'sn' : 'user', + 'uidNumber' : '1000', + 'gidNumber' : '2000', + 'homeDirectory' : '/home/testuser', + } + testuser = users.create(properties=user_properties) + + # Extend the user with eduPerson + testuser.add('objectClass', 'eduPerson') + + # now add eduPerson attrs + testuser.add('eduPersonAffiliation', 'value') # From 2002 + testuser.add('eduPersonNickName', 'value') # From 2002 + testuser.add('eduPersonOrgDN', 'ou=People,%s' % DEFAULT_SUFFIX) # From 2002 + testuser.add('eduPersonOrgUnitDN', 'ou=People,%s' % DEFAULT_SUFFIX) # From 2002 + testuser.add('eduPersonPrimaryAffiliation', 'value') # From 2002 + testuser.add('eduPersonPrincipalName', 'value') # From 2002 + testuser.add('eduPersonEntitlement', 'value') # From 2002 + testuser.add('eduPersonPrimaryOrgUnitDN', 'ou=People,%s' % DEFAULT_SUFFIX) # From 2002 + testuser.add('eduPersonScopedAffiliation', 'value') # From 2003 + testuser.add('eduPersonTargetedID', 'value') # From 2003 + testuser.add('eduPersonAssurance', 'value') # From 2008 + testuser.add('eduPersonPrincipalNamePrior', 'value') # From 2012 + testuser.add('eduPersonUniqueId', 'value') # From 2013 + testuser.add('eduPersonOrcid', 'value') # From 2016 + + log.info('Test PASSED') + + diff --git a/dirsrvtests/tests/suites/schema/schema_reload_test.py b/dirsrvtests/tests/suites/schema/schema_reload_test.py new file mode 100644 index 0000000..2ece5dd --- /dev/null +++ b/dirsrvtests/tests/suites/schema/schema_reload_test.py @@ -0,0 +1,155 @@ +# --- BEGIN COPYRIGHT BLOCK --- +# Copyright (C) 2017 Red Hat, Inc. +# All rights reserved. +# +# License: GPL (version 3 or any later version). +# See LICENSE for details. +# --- END COPYRIGHT BLOCK --- + +import logging +import pytest +import ldap +import os +from lib389.topologies import topology_st as topo +from lib389._constants import TASK_WAIT + +pytestmark = pytest.mark.tier1 + +DEBUGGING = os.getenv("DEBUGGING", default=False) +if DEBUGGING: + logging.getLogger(__name__).setLevel(logging.DEBUG) +else: + logging.getLogger(__name__).setLevel(logging.INFO) +log = logging.getLogger(__name__) + +INVALID_SCHEMA = 'givenName $ cn $ MoZiLLaATTRiBuTe' + + +def test_valid_schema(topo): + """Test schema-reload task with valid schema + + :id: 2ab304c0-3e58-4d34-b23b-a14b5997c7a8 + :setup: Standalone instance + :steps: + 1. Create schema file with valid schema + 2. Run schema-reload.pl script + 3. Run ldapsearch and check if schema was added + :expectedresults: + 1. File creation should work + 2. The schema reload task should be successful + 3. Searching the server should return the new schema + """ + + log.info("Test schema-reload task with valid schema") + + # Step 1 - Create schema file + log.info("Create valid schema file (99user.ldif)...") + schema_filename = (topo.standalone.schemadir + "/99user.ldif") + try: + with open(schema_filename, 'w') as schema_file: + schema_file.write("dn: cn=schema\n") + schema_file.write("attributetypes: ( 8.9.10.11.12.13.13 NAME " + + "'ValidAttribute' SYNTAX 1.3.6.1.4.1.1466.115.121.1.15" + + " X-ORIGIN 'Mozilla Dummy Schema' )\n") + schema_file.write("objectclasses: ( 1.2.3.4.5.6.7.8 NAME 'TestObject' " + + "SUP top MUST ( objectclass $ cn ) MAY ( givenName $ " + + "sn $ ValidAttribute ) X-ORIGIN 'user defined' )')\n") + except OSError as e: + log.fatal("Failed to create schema file: " + + "{} Error: {}".format(schema_filename, str(e))) + + # Step 2 - Run the schema-reload task + log.info("Run the schema-reload task...") + reload_result = topo.standalone.tasks.schemaReload(args={TASK_WAIT: True}) + if reload_result != 0: + log.fatal("The schema reload task failed") + assert False + else: + log.info("The schema reload task worked as expected") + + # Step 3 - Verify valid schema was added to the server + log.info("Check cn=schema to verify the valid schema was added") + subschema = topo.standalone.schema.get_subschema() + + oc_obj = subschema.get_obj(ldap.schema.ObjectClass, 'TestObject') + assert oc_obj is not None, "The new objectclass was not found on server" + + at_obj = subschema.get_obj(ldap.schema.AttributeType, 'ValidAttribute') + assert at_obj is not None, "The new attribute was not found on server" + + +def test_invalid_schema(topo): + """Test schema-reload task with invalid schema + + :id: 2ab304c0-3e58-4d34-b23b-a14b5997c7a9 + :setup: Standalone instance + :steps: + 1. Create schema files with invalid schema + 2. Run schema-reload.pl script + 3. Run ldapsearch and check if schema was added + :expectedresults: + 1. File creation should work + 2. The schema reload task should return an error + 3. Searching the server should not return the invalid schema + """ + log.info("Test schema-reload task with invalid schema") + + # Step 1 - Create schema files: one valid, one invalid + log.info("Create valid schema file (98user.ldif)...") + schema_filename = (topo.standalone.schemadir + "/98user.ldif") + try: + with open(schema_filename, 'w') as schema_file: + schema_file.write("dn: cn=schema\n") + schema_file.write("attributetypes: ( 8.9.10.11.12.13.14 NAME " + + "'MozillaAttribute' SYNTAX 1.3.6.1.4.1.1466.115.121.1.15" + + " X-ORIGIN 'Mozilla Dummy Schema' )\n") + schema_file.write("objectclasses: ( 1.2.3.4.5.6.7 NAME 'MoZiLLaOBJeCT' " + + "SUP top MUST ( objectclass $ cn ) MAY ( givenName $ " + + "sn $ MozillaAttribute ) X-ORIGIN 'user defined' )')\n") + except OSError as e: + log.fatal("Failed to create schema file: " + + "{} Error: {}".format(schema_filename, str(e))) + + log.info("Create invalid schema file (99user.ldif)...") + schema_filename = (topo.standalone.schemadir + "/99user.ldif") + try: + with open(schema_filename, 'w') as schema_file: + schema_file.write("dn: cn=schema\n") + # Same attribute/objclass names, but different OIDs and MAY attributes + schema_file.write("attributetypes: ( 8.9.10.11.12.13.140 NAME " + + "'MozillaAttribute' SYNTAX 1.3.6.1.4.1.1466.115.121.1.15" + + " X-ORIGIN 'Mozilla Dummy Schema' )\n") + schema_file.write("objectclasses: ( 1.2.3.4.5.6.70 NAME 'MoZiLLaOBJeCT' " + + "SUP top MUST ( objectclass $ cn ) MAY ( givenName $ " + + "cn $ MoZiLLaATTRiBuTe ) X-ORIGIN 'user defined' )')\n") + except OSError as e: + log.fatal("Failed to create schema file: " + + "{} Error: {}".format(schema_filename, str(e))) + + # Step 2 - Run the schema-reload task + log.info("Run the schema-reload task, it should fail...") + reload_result = topo.standalone.tasks.schemaReload(args={TASK_WAIT: True}) + if reload_result == 0: + log.fatal("The schema reload task incorectly reported success") + assert False + else: + log.info("The schema reload task failed as expected:" + + " error {}".format(reload_result)) + + # Step 3 - Verify invalid schema was not added to the server + log.info("Check cn=schema to verify the invalid schema was not added") + subschema = topo.standalone.schema.get_subschema() + oc_obj = subschema.get_obj(ldap.schema.ObjectClass, 'MoZiLLaOBJeCT') + if oc_obj is not None and INVALID_SCHEMA in str(oc_obj): + log.fatal("The invalid schema was returned from the server: " + str(oc_obj)) + assert False + else: + log.info("The invalid schema is not present on the server") + + +if __name__ == '__main__': + # Run isolated + # -s for DEBUG mode + CURRENT_FILE = os.path.realpath(__file__) + pytest.main("-s %s" % CURRENT_FILE) + diff --git a/dirsrvtests/tests/suites/schema/schema_replication_test.py b/dirsrvtests/tests/suites/schema/schema_replication_test.py new file mode 100644 index 0000000..bc3170f --- /dev/null +++ b/dirsrvtests/tests/suites/schema/schema_replication_test.py @@ -0,0 +1,702 @@ +# --- BEGIN COPYRIGHT BLOCK --- +# Copyright (C) 2016 Red Hat, Inc. +# All rights reserved. +# +# License: GPL (version 3 or any later version). +# See LICENSE for details. +# --- END COPYRIGHT BLOCK --- +# +''' +Created on Nov 7, 2013 + +@author: tbordaz +''' +import logging +import re +import time + +import ldap +import pytest +from lib389 import Entry +from lib389._constants import * +from lib389.topologies import topology_m1c1 + +from lib389.utils import * + +# Skip on older versions +pytestmark = [pytest.mark.tier1, + pytest.mark.skipif(ds_is_older('1.3'), reason="Not implemented")] +logging.getLogger(__name__).setLevel(logging.DEBUG) +log = logging.getLogger(__name__) + +TEST_REPL_DN = "cn=test_repl, %s" % SUFFIX +ENTRY_DN = "cn=test_entry, %s" % SUFFIX +MUST_OLD = "(postalAddress $ preferredLocale)" +MUST_NEW = "(postalAddress $ preferredLocale $ telexNumber)" +MAY_OLD = "(postalCode $ street)" +MAY_NEW = "(postalCode $ street $ postOfficeBox)" + + +def _header(topology_m1c1, label): + topology_m1c1.ms["master1"].log.info("\n\n###############################################") + topology_m1c1.ms["master1"].log.info("#######") + topology_m1c1.ms["master1"].log.info("####### %s" % label) + topology_m1c1.ms["master1"].log.info("#######") + topology_m1c1.ms["master1"].log.info("###################################################") + + +def pattern_errorlog(file, log_pattern): + try: + pattern_errorlog.last_pos += 1 + except AttributeError: + pattern_errorlog.last_pos = 0 + + found = None + log.debug("_pattern_errorlog: start at offset %d" % pattern_errorlog.last_pos) + file.seek(pattern_errorlog.last_pos) + + # Use a while true iteration because 'for line in file: hit a + # python bug that break file.tell() + while True: + line = file.readline() + log.debug("_pattern_errorlog: [%d] %s" % (file.tell(), line)) + found = log_pattern.search(line) + if ((line == '') or (found)): + break + + log.debug("_pattern_errorlog: end at offset %d" % file.tell()) + pattern_errorlog.last_pos = file.tell() + return found + + +def _oc_definition(oid_ext, name, must=None, may=None): + oid = "1.2.3.4.5.6.7.8.9.10.%d" % oid_ext + desc = 'To test ticket 47490' + sup = 'person' + if not must: + must = MUST_OLD + if not may: + may = MAY_OLD + + new_oc = "( %s NAME '%s' DESC '%s' SUP %s AUXILIARY MUST %s MAY %s )" % (oid, name, desc, sup, must, may) + return new_oc + + +def add_OC(instance, oid_ext, name): + new_oc = _oc_definition(oid_ext, name) + instance.schema.add_schema('objectClasses', ensure_bytes(new_oc)) + + +def mod_OC(instance, oid_ext, name, old_must=None, old_may=None, new_must=None, new_may=None): + old_oc = _oc_definition(oid_ext, name, old_must, old_may) + new_oc = _oc_definition(oid_ext, name, new_must, new_may) + instance.schema.del_schema('objectClasses', ensure_bytes(old_oc)) + instance.schema.add_schema('objectClasses', ensure_bytes(new_oc)) + + +def support_schema_learning(topology_m1c1): + """ + with https://fedorahosted.org/389/ticket/47721, the supplier and consumer can learn + schema definitions when a replication occurs. + Before that ticket: replication of the schema fails requiring administrative operation + In the test the schemaCSN (master consumer) differs + + After that ticket: replication of the schema succeeds (after an initial phase of learning) + In the test the schema CSN (master consumer) are in sync + + This function returns True if 47721 is fixed in the current release + False else + """ + ent = topology_m1c1.cs["consumer1"].getEntry(DN_CONFIG, ldap.SCOPE_BASE, "(cn=config)", ['nsslapd-versionstring']) + if ent.hasAttr('nsslapd-versionstring'): + val = ent.getValue('nsslapd-versionstring') + version = ensure_str(val).split('/')[1].split('.') # something like ['1', '3', '1', '23', 'final_fix'] + major = int(version[0]) + minor = int(version[1]) + if major > 1: + return True + if minor > 3: + # version is 1.4 or after + return True + if minor == 3: + if version[2].isdigit(): + if int(version[2]) >= 3: + return True + return False + + +def trigger_update(topology_m1c1): + """ + It triggers an update on the supplier. This will start a replication + session and a schema push + """ + try: + trigger_update.value += 1 + except AttributeError: + trigger_update.value = 1 + replace = [(ldap.MOD_REPLACE, 'telephonenumber', ensure_bytes(str(trigger_update.value)))] + topology_m1c1.ms["master1"].modify_s(ENTRY_DN, replace) + + # wait 10 seconds that the update is replicated + loop = 0 + while loop <= 10: + try: + ent = topology_m1c1.cs["consumer1"].getEntry(ENTRY_DN, ldap.SCOPE_BASE, "(objectclass=*)", + ['telephonenumber']) + val = ent.telephonenumber or "0" + if int(val) == trigger_update.value: + return + # the expected value is not yet replicated. try again + time.sleep(1) + loop += 1 + log.debug("trigger_update: receive %s (expected %d)" % (val, trigger_update.value)) + except ldap.NO_SUCH_OBJECT: + time.sleep(1) + loop += 1 + + +def trigger_schema_push(topology_m1c1): + ''' + Trigger update to create a replication session. + In case of 47721 is fixed and the replica needs to learn the missing definition, then + the first replication session learn the definition and the second replication session + push the schema (and the schemaCSN. + This is why there is two updates and replica agreement is stopped/start (to create a second session) + ''' + agreements = topology_m1c1.ms["master1"].agreement.list(suffix=SUFFIX, + consumer_host=topology_m1c1.cs["consumer1"].host, + consumer_port=topology_m1c1.cs["consumer1"].port) + assert (len(agreements) == 1) + ra = agreements[0] + trigger_update(topology_m1c1) + topology_m1c1.ms["master1"].agreement.pause(ra.dn) + topology_m1c1.ms["master1"].agreement.resume(ra.dn) + trigger_update(topology_m1c1) + + +@pytest.fixture(scope="module") +def schema_replication_init(topology_m1c1): + """Initialize the test environment + + """ + log.debug("test_schema_replication_init topology_m1c1 %r (master %r, consumer %r" % ( + topology_m1c1, topology_m1c1.ms["master1"], topology_m1c1.cs["consumer1"])) + # check if a warning message is logged in the + # error log of the supplier + topology_m1c1.ms["master1"].errorlog_file = open(topology_m1c1.ms["master1"].errlog, "r") + + # This entry will be used to trigger attempt of schema push + topology_m1c1.ms["master1"].add_s(Entry((ENTRY_DN, { + 'objectclass': "top person".split(), + 'sn': 'test_entry', + 'cn': 'test_entry'}))) + + +@pytest.mark.ds47490 +def test_schema_replication_one(topology_m1c1, schema_replication_init): + """Check supplier schema is a superset (one extra OC) of consumer schema, then + schema is pushed and there is no message in the error log + + :id: d6c6ff30-b3ae-4001-80ff-0fb18563a393 + :setup: Master Consumer, check if a warning message is logged in the + error log of the supplier and add a test entry to trigger attempt of schema push. + :steps: + 1. Update the schema of supplier, so it will be superset of consumer + 2. Push the Schema (no error) + 3. Check both master and consumer has same schemaCSN + 4. Check the startup/final state + :expectedresults: + 1. Operation should be successful + 2. Operation should be successful + 3. Operation should be successful + 4. State at startup: + - supplier default schema + - consumer default schema + Final state + - supplier +masterNewOCA + - consumer +masterNewOCA + """ + + _header(topology_m1c1, "Extra OC Schema is pushed - no error") + + log.debug("test_schema_replication_one topology_m1c1 %r (master %r, consumer %r" % ( + topology_m1c1, topology_m1c1.ms["master1"], topology_m1c1.cs["consumer1"])) + # update the schema of the supplier so that it is a superset of + # consumer. Schema should be pushed + add_OC(topology_m1c1.ms["master1"], 2, 'masterNewOCA') + + trigger_schema_push(topology_m1c1) + master_schema_csn = topology_m1c1.ms["master1"].schema.get_schema_csn() + consumer_schema_csn = topology_m1c1.cs["consumer1"].schema.get_schema_csn() + + # Check the schemaCSN was updated on the consumer + log.debug("test_schema_replication_one master_schema_csn=%s", master_schema_csn) + log.debug("ctest_schema_replication_one onsumer_schema_csn=%s", consumer_schema_csn) + assert master_schema_csn == consumer_schema_csn + + # Check the error log of the supplier does not contain an error + regex = re.compile(r"must not be overwritten \(set replication log for additional info\)") + res = pattern_errorlog(topology_m1c1.ms["master1"].errorlog_file, regex) + if res is not None: + assert False + + +@pytest.mark.ds47490 +def test_schema_replication_two(topology_m1c1, schema_replication_init): + """Check consumer schema is a superset (one extra OC) of supplier schema, then + schema is pushed and there is a message in the error log + + :id: b5db9b75-a9a7-458e-86ec-2a8e7bd1c014 + :setup: Master Consumer, check if a warning message is logged in the + error log of the supplier and add a test entry to trigger attempt of schema push. + :steps: + 1. Update the schema of consumer, so it will be superset of supplier + 2. Update the schema of supplier so ti make it's nsSchemaCSN larger than consumer + 3. Push the Schema (error should be generated) + 4. Check supplier learns the missing definition + 5. Check the error logs + 6. Check the startup/final state + :expectedresults: + 1. Operation should be successful + 2. Operation should be successful + 3. Operation should be successful + 4. Operation should be successful + 5. Operation should be successful + 6. State at startup + - supplier +masterNewOCA + - consumer +masterNewOCA + Final state + - supplier +masterNewOCA +masterNewOCB + - consumer +masterNewOCA +consumerNewOCA + """ + + _header(topology_m1c1, "Extra OC Schema is pushed - (ticket 47721 allows to learn missing def)") + + # add this OC on consumer. Supplier will no push the schema + add_OC(topology_m1c1.cs["consumer1"], 1, 'consumerNewOCA') + + # add a new OC on the supplier so that its nsSchemaCSN is larger than the consumer (wait 2s) + time.sleep(2) + add_OC(topology_m1c1.ms["master1"], 3, 'masterNewOCB') + + # now push the scheam + trigger_schema_push(topology_m1c1) + master_schema_csn = topology_m1c1.ms["master1"].schema.get_schema_csn() + consumer_schema_csn = topology_m1c1.cs["consumer1"].schema.get_schema_csn() + + # Check the schemaCSN was NOT updated on the consumer + # with 47721, supplier learns the missing definition + log.debug("test_schema_replication_two master_schema_csn=%s", master_schema_csn) + log.debug("test_schema_replication_two consumer_schema_csn=%s", consumer_schema_csn) + if support_schema_learning(topology_m1c1): + assert master_schema_csn == consumer_schema_csn + else: + assert master_schema_csn != consumer_schema_csn + + # Check the error log of the supplier does not contain an error + # This message may happen during the learning phase + regex = re.compile(r"must not be overwritten \(set replication log for additional info\)") + res = pattern_errorlog(topology_m1c1.ms["master1"].errorlog_file, regex) + + +@pytest.mark.ds47490 +def test_schema_replication_three(topology_m1c1, schema_replication_init): + """Check supplier schema is again a superset (one extra OC), then + schema is pushed and there is no message in the error log + + :id: 45888895-76bc-4cc3-9f90-33a69d027116 + :setup: Master Consumer, check if a warning message is logged in the + error log of the supplier and add a test entry to trigger attempt of schema push. + :steps: + 1. Update the schema of master + 2. Push the Schema (no error) + 3. Check the schemaCSN was NOT updated on the consumer + 4. Check the error logs for no errors + 5. Check the startup/final state + :expectedresults: + 1. Operation should be successful + 2. Operation should be successful + 3. Operation should be successful + 4. Operation should be successful + 5. State at startup + - supplier +masterNewOCA +masterNewOCB + - consumer +masterNewOCA +consumerNewOCA + Final state + - supplier +masterNewOCA +masterNewOCB +consumerNewOCA + - consumer +masterNewOCA +masterNewOCB +consumerNewOCA + """ + _header(topology_m1c1, "Extra OC Schema is pushed - no error") + + # Do an upate to trigger the schema push attempt + # add this OC on consumer. Supplier will no push the schema + add_OC(topology_m1c1.ms["master1"], 1, 'consumerNewOCA') + + # now push the scheam + trigger_schema_push(topology_m1c1) + master_schema_csn = topology_m1c1.ms["master1"].schema.get_schema_csn() + consumer_schema_csn = topology_m1c1.cs["consumer1"].schema.get_schema_csn() + + # Check the schemaCSN was NOT updated on the consumer + log.debug("test_schema_replication_three master_schema_csn=%s", master_schema_csn) + log.debug("test_schema_replication_three consumer_schema_csn=%s", consumer_schema_csn) + assert master_schema_csn == consumer_schema_csn + + # Check the error log of the supplier does not contain an error + regex = re.compile(r"must not be overwritten \(set replication log for additional info\)") + res = pattern_errorlog(topology_m1c1.ms["master1"].errorlog_file, regex) + if res is not None: + assert False + + +@pytest.mark.ds47490 +def test_schema_replication_four(topology_m1c1, schema_replication_init): + """Check supplier schema is again a superset (OC with more MUST), then + schema is pushed and there is no message in the error log + + :id: 39304242-2641-4eb8-a9fb-5ff0cf80718f + :setup: Master Consumer, check if a warning message is logged in the + error log of the supplier and add a test entry to trigger attempt of schema push. + :steps: + 1. Add telenumber to 'masterNewOCA' on the master + 2. Push the Schema (no error) + 3. Check the schemaCSN was updated on the consumer + 4. Check the error log of the supplier does not contain an error + 5. Check the startup/final state + :expectedresults: + 1. Operation should be successful + 2. Operation should be successful + 3. Operation should be successful + 4. Operation should be successful + 5. State at startup + - supplier +masterNewOCA +masterNewOCB +consumerNewOCA + - consumer +masterNewOCA +masterNewOCB +consumerNewOCA + Final state + - supplier +masterNewOCA +masterNewOCB +consumerNewOCA + +must=telexnumber + - consumer +masterNewOCA +masterNewOCB +consumerNewOCA + +must=telexnumber + """ + _header(topology_m1c1, "Same OC - extra MUST: Schema is pushed - no error") + + mod_OC(topology_m1c1.ms["master1"], 2, 'masterNewOCA', old_must=MUST_OLD, new_must=MUST_NEW, old_may=MAY_OLD, + new_may=MAY_OLD) + + trigger_schema_push(topology_m1c1) + master_schema_csn = topology_m1c1.ms["master1"].schema.get_schema_csn() + consumer_schema_csn = topology_m1c1.cs["consumer1"].schema.get_schema_csn() + + # Check the schemaCSN was updated on the consumer + log.debug("test_schema_replication_four master_schema_csn=%s", master_schema_csn) + log.debug("ctest_schema_replication_four onsumer_schema_csn=%s", consumer_schema_csn) + assert master_schema_csn == consumer_schema_csn + + # Check the error log of the supplier does not contain an error + regex = re.compile(r"must not be overwritten \(set replication log for additional info\)") + res = pattern_errorlog(topology_m1c1.ms["master1"].errorlog_file, regex) + if res is not None: + assert False + + +@pytest.mark.ds47490 +def test_schema_replication_five(topology_m1c1, schema_replication_init): + """Check consumer schema is a superset (OC with more MUST), then + schema is pushed (fix for 47721) and there is a message in the error log + + :id: 498527df-28c8-4e1a-bc9e-799fd2b7b2bb + :setup: Master Consumer, check if a warning message is logged in the + error log of the supplier and add a test entry to trigger attempt of schema push. + :steps: + 1. Add telenumber to 'consumerNewOCA' on the consumer + 2. Add a new OC on the supplier so that its nsSchemaCSN is larger than the consumer + 3. Push the Schema + 4. Check the schemaCSN was NOT updated on the consumer + 5. Check the error log of the supplier contain an error + 6. Check the startup/final state + :expectedresults: + 1. Operation should be successful + 2. Operation should be successful + 3. Operation should be successful + 4. Operation should be successful + 5. Operation should be successful + 6. State at startup + - supplier +masterNewOCA +masterNewOCB +consumerNewOCA + +must=telexnumber + - consumer +masterNewOCA +masterNewOCB +consumerNewOCA + +must=telexnumber + Final state + - supplier +masterNewOCA +masterNewOCB +consumerNewOCA +masterNewOCC + +must=telexnumber + - consumer +masterNewOCA +masterNewOCB +consumerNewOCA + +must=telexnumber +must=telexnumber + + Note: replication log is enabled to get more details + """ + _header(topology_m1c1, "Same OC - extra MUST: Schema is pushed - (fix for 47721)") + + # get more detail why it fails + topology_m1c1.ms["master1"].enableReplLogging() + + # add telenumber to 'consumerNewOCA' on the consumer + mod_OC(topology_m1c1.cs["consumer1"], 1, 'consumerNewOCA', old_must=MUST_OLD, new_must=MUST_NEW, old_may=MAY_OLD, + new_may=MAY_OLD) + # add a new OC on the supplier so that its nsSchemaCSN is larger than the consumer (wait 2s) + time.sleep(2) + add_OC(topology_m1c1.ms["master1"], 4, 'masterNewOCC') + + trigger_schema_push(topology_m1c1) + master_schema_csn = topology_m1c1.ms["master1"].schema.get_schema_csn() + consumer_schema_csn = topology_m1c1.cs["consumer1"].schema.get_schema_csn() + + # Check the schemaCSN was NOT updated on the consumer + # with 47721, supplier learns the missing definition + log.debug("test_schema_replication_five master_schema_csn=%s", master_schema_csn) + log.debug("ctest_schema_replication_five onsumer_schema_csn=%s", consumer_schema_csn) + if support_schema_learning(topology_m1c1): + assert master_schema_csn == consumer_schema_csn + else: + assert master_schema_csn != consumer_schema_csn + + # Check the error log of the supplier does not contain an error + # This message may happen during the learning phase + regex = re.compile(r"must not be overwritten \(set replication log for additional info\)") + res = pattern_errorlog(topology_m1c1.ms["master1"].errorlog_file, regex) + + +@pytest.mark.ds47490 +def test_schema_replication_six(topology_m1c1, schema_replication_init): + """Check supplier schema is again a superset (OC with more MUST), then + schema is pushed and there is no message in the error log + + :id: ed57b0cc-6a10-4f89-94ae-9f18542b1954 + :setup: Master Consumer, check if a warning message is logged in the + error log of the supplier and add a test entry to trigger attempt of schema push. + :steps: + 1. Add telenumber to 'consumerNewOCA' on the master + 2. Push the Schema (no error) + 3. Check the schemaCSN was NOT updated on the consumer + 4. Check the error log of the supplier does not contain an error + 5. Check the startup/final state + :expectedresults: + 1. Operation should be successful + 2. Operation should be successful + 3. Operation should be successful + 4. Operation should be successful + 5. State at startup + - supplier +masterNewOCA +masterNewOCB +consumerNewOCA +masterNewOCC + +must=telexnumber + - consumer +masterNewOCA +masterNewOCB +consumerNewOCA + +must=telexnumber +must=telexnumber + Final state + + - supplier +masterNewOCA +masterNewOCB +consumerNewOCA +masterNewOCC + +must=telexnumber +must=telexnumber + - consumer +masterNewOCA +masterNewOCB +consumerNewOCA +masterNewOCC + +must=telexnumber +must=telexnumber + Note: replication log is enabled to get more details + """ + _header(topology_m1c1, "Same OC - extra MUST: Schema is pushed - no error") + + # add telenumber to 'consumerNewOCA' on the consumer + mod_OC(topology_m1c1.ms["master1"], 1, 'consumerNewOCA', old_must=MUST_OLD, new_must=MUST_NEW, old_may=MAY_OLD, + new_may=MAY_OLD) + + trigger_schema_push(topology_m1c1) + master_schema_csn = topology_m1c1.ms["master1"].schema.get_schema_csn() + consumer_schema_csn = topology_m1c1.cs["consumer1"].schema.get_schema_csn() + + # Check the schemaCSN was NOT updated on the consumer + log.debug("test_schema_replication_six master_schema_csn=%s", master_schema_csn) + log.debug("ctest_schema_replication_six onsumer_schema_csn=%s", consumer_schema_csn) + assert master_schema_csn == consumer_schema_csn + + # Check the error log of the supplier does not contain an error + # This message may happen during the learning phase + regex = re.compile(r"must not be overwritten \(set replication log for additional info\)") + res = pattern_errorlog(topology_m1c1.ms["master1"].errorlog_file, regex) + if res is not None: + assert False + + +@pytest.mark.ds47490 +def test_schema_replication_seven(topology_m1c1, schema_replication_init): + """Check supplier schema is again a superset (OC with more MAY), then + schema is pushed and there is no message in the error log + + :id: 8725055a-b3f8-4d1d-a4d6-bb7dccf644d0 + :setup: Master Consumer, check if a warning message is logged in the + error log of the supplier and add a test entry to trigger attempt of schema push. + :steps: + 1. Add telenumber to 'masterNewOCA' on the master + 2. Push the Schema (no error) + 3. Check the schemaCSN was updated on the consumer + 4. Check the error log of the supplier does not contain an error + 5. Check the startup/final state + :expectedresults: + 1. Operation should be successful + 2. Operation should be successful + 3. Operation should be successful + 4. Operation should be successful + 5. State at startup + - supplier +masterNewOCA +masterNewOCB +consumerNewOCA +masterNewOCC + +must=telexnumber +must=telexnumber + - consumer +masterNewOCA +masterNewOCB +consumerNewOCA +masterNewOCC + +must=telexnumber +must=telexnumber + Final stat + - supplier +masterNewOCA +masterNewOCB +consumerNewOCA +masterNewOCC + +must=telexnumber +must=telexnumber + +may=postOfficeBox + - consumer +masterNewOCA +masterNewOCB +consumerNewOCA +masterNewOCC + +must=telexnumber +must=telexnumber + +may=postOfficeBox + """ + _header(topology_m1c1, "Same OC - extra MAY: Schema is pushed - no error") + + mod_OC(topology_m1c1.ms["master1"], 2, 'masterNewOCA', old_must=MUST_NEW, new_must=MUST_NEW, old_may=MAY_OLD, + new_may=MAY_NEW) + + trigger_schema_push(topology_m1c1) + master_schema_csn = topology_m1c1.ms["master1"].schema.get_schema_csn() + consumer_schema_csn = topology_m1c1.cs["consumer1"].schema.get_schema_csn() + + # Check the schemaCSN was updated on the consumer + log.debug("test_schema_replication_seven master_schema_csn=%s", master_schema_csn) + log.debug("ctest_schema_replication_seven consumer_schema_csn=%s", consumer_schema_csn) + assert master_schema_csn == consumer_schema_csn + + # Check the error log of the supplier does not contain an error + regex = re.compile(r"must not be overwritten \(set replication log for additional info\)") + res = pattern_errorlog(topology_m1c1.ms["master1"].errorlog_file, regex) + if res is not None: + assert False + + +@pytest.mark.ds47490 +def test_schema_replication_eight(topology_m1c1, schema_replication_init): + """Check consumer schema is a superset (OC with more MAY), then + schema is pushed (fix for 47721) and there is message in the error log + + :id: 2310d150-a71a-498d-add8-4056beeb58c6 + :setup: Master Consumer, check if a warning message is logged in the + error log of the supplier and add a test entry to trigger attempt of schema push. + :steps: + 1. Add telenumber to 'consumerNewOCA' on the consumer + 2. Modify OC on the supplier so that its nsSchemaCSN is larger than the consumer + 3. Push the Schema (no error) + 4. Check the schemaCSN was updated on the consumer + 5. Check the error log of the supplier does not contain an error + 6. Check the startup/final state + :expectedresults: + 1. Operation should be successful + 2. Operation should be successful + 3. Operation should be successful + 4. Operation should be successful + 5. Operation should be successful + 6. State at startup + - supplier +masterNewOCA +masterNewOCB +consumerNewOCA +masterNewOCC + +must=telexnumber +must=telexnumber + +may=postOfficeBox + - consumer +masterNewOCA +masterNewOCB +consumerNewOCA +masterNewOCC + +must=telexnumber +must=telexnumber + +may=postOfficeBox + Final state + - supplier +masterNewOCA +masterNewOCB +consumerNewOCA +masterNewOCC + +must=telexnumber +must=telexnumber + +may=postOfficeBox +may=postOfficeBox + - consumer +masterNewOCA +masterNewOCB +consumerNewOCA +masterNewOCC + +must=telexnumber +must=telexnumber + +may=postOfficeBox +may=postOfficeBox + """ + _header(topology_m1c1, "Same OC - extra MAY: Schema is pushed (fix for 47721)") + + mod_OC(topology_m1c1.cs["consumer1"], 1, 'consumerNewOCA', old_must=MUST_NEW, new_must=MUST_NEW, old_may=MAY_OLD, + new_may=MAY_NEW) + + # modify OC on the supplier so that its nsSchemaCSN is larger than the consumer (wait 2s) + time.sleep(2) + mod_OC(topology_m1c1.ms["master1"], 4, 'masterNewOCC', old_must=MUST_OLD, new_must=MUST_OLD, old_may=MAY_OLD, + new_may=MAY_NEW) + + trigger_schema_push(topology_m1c1) + master_schema_csn = topology_m1c1.ms["master1"].schema.get_schema_csn() + consumer_schema_csn = topology_m1c1.cs["consumer1"].schema.get_schema_csn() + + # Check the schemaCSN was not updated on the consumer + # with 47721, supplier learns the missing definition + log.debug("test_schema_replication_eight master_schema_csn=%s", master_schema_csn) + log.debug("ctest_schema_replication_eight onsumer_schema_csn=%s", consumer_schema_csn) + if support_schema_learning(topology_m1c1): + assert master_schema_csn == consumer_schema_csn + else: + assert master_schema_csn != consumer_schema_csn + + # Check the error log of the supplier does not contain an error + # This message may happen during the learning phase + regex = re.compile(r"must not be overwritten \(set replication log for additional info\)") + res = pattern_errorlog(topology_m1c1.ms["master1"].errorlog_file, regex) + + +@pytest.mark.ds47490 +def test_schema_replication_nine(topology_m1c1, schema_replication_init): + """Check consumer schema is a superset (OC with more MAY), then + schema is not pushed and there is message in the error log + + :id: 851b24c6-b1e0-466f-9714-aa2940fbfeeb + :setup: Master Consumer, check if a warning message is logged in the + error log of the supplier and add a test entry to trigger attempt of schema push. + :steps: + 1. Add postOfficeBox to 'consumerNewOCA' on the master + 3. Push the Schema + 4. Check the schemaCSN was updated on the consumer + 5. Check the error log of the supplier does contain an error + 6. Check the startup/final state + :expectedresults: + 1. Operation should be successful + 2. Operation should be successful + 3. Operation should be successful + 4. Operation should be successful + 5. Operation should be successful + 6. State at startup + - supplier +masterNewOCA +masterNewOCB +consumerNewOCA +masterNewOCC + +must=telexnumber +must=telexnumber + +may=postOfficeBox +may=postOfficeBox + - consumer +masterNewOCA +masterNewOCB +consumerNewOCA +masterNewOCC + +must=telexnumber +must=telexnumber + +may=postOfficeBox +may=postOfficeBox + + Final state + + - supplier +masterNewOCA +masterNewOCB +consumerNewOCA +masterNewOCC + +must=telexnumber +must=telexnumber + +may=postOfficeBox +may=postOfficeBox +may=postOfficeBox + - consumer +masterNewOCA +masterNewOCB +consumerNewOCA +masterNewOCC + +must=telexnumber +must=telexnumber + +may=postOfficeBox +may=postOfficeBox +may=postOfficeBox + """ + _header(topology_m1c1, "Same OC - extra MAY: Schema is pushed - no error") + + mod_OC(topology_m1c1.ms["master1"], 1, 'consumerNewOCA', old_must=MUST_NEW, new_must=MUST_NEW, old_may=MAY_OLD, + new_may=MAY_NEW) + + trigger_schema_push(topology_m1c1) + master_schema_csn = topology_m1c1.ms["master1"].schema.get_schema_csn() + consumer_schema_csn = topology_m1c1.cs["consumer1"].schema.get_schema_csn() + + # Check the schemaCSN was updated on the consumer + log.debug("test_schema_replication_nine master_schema_csn=%s", master_schema_csn) + log.debug("ctest_schema_replication_nine onsumer_schema_csn=%s", consumer_schema_csn) + assert master_schema_csn == consumer_schema_csn + + # Check the error log of the supplier does not contain an error + regex = re.compile(r"must not be overwritten \(set replication log for additional info\)") + res = pattern_errorlog(topology_m1c1.ms["master1"].errorlog_file, regex) + if res is not None: + assert False + + log.info('Testcase PASSED') + + +if __name__ == '__main__': + # Run isolated + # -s for DEBUG mode + CURRENT_FILE = os.path.realpath(__file__) + pytest.main("-s %s" % CURRENT_FILE) diff --git a/dirsrvtests/tests/suites/schema/schema_test.py b/dirsrvtests/tests/suites/schema/schema_test.py new file mode 100644 index 0000000..d590624 --- /dev/null +++ b/dirsrvtests/tests/suites/schema/schema_test.py @@ -0,0 +1,173 @@ +# --- BEGIN COPYRIGHT BLOCK --- +# Copyright (C) 2016 Red Hat, Inc. +# All rights reserved. +# +# License: GPL (version 3 or any later version). +# See LICENSE for details. +# --- END COPYRIGHT BLOCK --- +# +''' +Created on Dec 18, 2013 + +@author: rmeggins +''' +import logging + +import ldap +import pytest +import six +from ldap.cidict import cidict +from ldap.schema import SubSchema +from lib389._constants import * +from lib389.topologies import topology_st + +pytestmark = pytest.mark.tier1 + +logging.getLogger(__name__).setLevel(logging.DEBUG) +log = logging.getLogger(__name__) + +attrclass = ldap.schema.models.AttributeType +occlass = ldap.schema.models.ObjectClass +syntax_len_supported = False + + +def ochasattr(subschema, oc, mustormay, attr, key): + """See if the oc and any of its parents and ancestors have the + given attr""" + rc = False + if not key in oc.__dict__: + dd = cidict() + for ii in oc.__dict__[mustormay]: + dd[ii] = ii + oc.__dict__[key] = dd + if attr in oc.__dict__[key]: + rc = True + else: + # look in parents + for noroid in oc.sup: + ocpar = subschema.get_obj(occlass, noroid) + assert (ocpar) + rc = ochasattr(subschema, ocpar, mustormay, attr, key) + if rc: + break + return rc + + +def ochasattrs(subschema, oc, mustormay, attrs): + key = mustormay + "dict" + ret = [] + for attr in attrs: + if not ochasattr(subschema, oc, mustormay, attr, key): + ret.append(attr) + return ret + + +def mycmp(v1, v2): + v1ary, v2ary = [v1], [v2] + if isinstance(v1, list) or isinstance(v1, tuple): + v1ary, v2ary = list(set([x.lower() for x in v1])), list(set([x.lower() for x in v2])) + if not len(v1ary) == len(v2ary): + return False + for v1, v2 in zip(v1ary, v2ary): + if isinstance(v1, six.string_types): + if not len(v1) == len(v2): + return False + if not v1 == v2: + return False + return True + + +def ocgetdiffs(ldschema, oc1, oc2): + fields = ['obsolete', 'names', 'desc', 'must', 'may', 'kind', 'sup'] + ret = '' + for field in fields: + v1, v2 = oc1.__dict__[field], oc2.__dict__[field] + if field == 'may' or field == 'must': + missing = ochasattrs(ldschema, oc1, field, oc2.__dict__[field]) + if missing: + ret = ret + '\t%s is missing %s\n' % (field, missing) + missing = ochasattrs(ldschema, oc2, field, oc1.__dict__[field]) + if missing: + ret = ret + '\t%s is missing %s\n' % (field, missing) + elif not mycmp(v1, v2): + ret = ret + '\t%s differs: [%s] vs. [%s]\n' % (field, oc1.__dict__[field], oc2.__dict__[field]) + return ret + + +def atgetparfield(subschema, at, field): + v = None + for nameoroid in at.sup: + atpar = subschema.get_obj(attrclass, nameoroid) + assert (atpar) + v = atpar.__dict__.get(field, atgetparfield(subschema, atpar, field)) + if v is not None: + break + return v + + +def atgetdiffs(ldschema, at1, at2): + fields = ['names', 'desc', 'obsolete', 'sup', 'equality', 'ordering', 'substr', 'syntax', + 'single_value', 'collective', 'no_user_mod', 'usage'] + if syntax_len_supported: + fields.append('syntax_len') + ret = '' + for field in fields: + v1 = at1.__dict__.get(field) or atgetparfield(ldschema, at1, field) + v2 = at2.__dict__.get(field) or atgetparfield(ldschema, at2, field) + if not mycmp(v1, v2): + ret = ret + '\t%s differs: [%s] vs. [%s]\n' % (field, at1.__dict__[field], at2.__dict__[field]) + return ret + + +def test_schema_comparewithfiles(topology_st): + '''Compare the schema from ldap cn=schema with the schema files''' + + log.info('Running test_schema_comparewithfiles...') + + retval = True + schemainst = topology_st.standalone + ldschema = schemainst.schema.get_subschema() + assert ldschema + for fn in schemainst.schema.list_files(): + try: + fschema = schemainst.schema.file_to_subschema(fn) + if fschema is None: + raise Exception("Empty schema file %s" % fn) + except: + log.warning("Unable to parse %s as a schema file - skipping" % fn) + continue + log.info("Parsed %s as a schema file - checking" % fn) + for oid in fschema.listall(occlass): + se = fschema.get_obj(occlass, oid) + assert se + ldse = ldschema.get_obj(occlass, oid) + if not ldse: + log.error("objectclass in %s but not in %s: %s" % (fn, DN_SCHEMA, se)) + retval = False + continue + ret = ocgetdiffs(ldschema, ldse, se) + if ret: + log.error("name %s oid %s\n%s" % (se.names[0], oid, ret)) + retval = False + for oid in fschema.listall(attrclass): + se = fschema.get_obj(attrclass, oid) + assert se + ldse = ldschema.get_obj(attrclass, oid) + if not ldse: + log.error("attributetype in %s but not in %s: %s" % (fn, DN_SCHEMA, se)) + retval = False + continue + ret = atgetdiffs(ldschema, ldse, se) + if ret: + log.error("name %s oid %s\n%s" % (se.names[0], oid, ret)) + retval = False + assert retval + + log.info('test_schema_comparewithfiles: PASSED') + + +if __name__ == '__main__': + # Run isolated + # -s for DEBUG mode + CURRENT_FILE = os.path.realpath(__file__) + pytest.main("-s %s" % CURRENT_FILE) diff --git a/dirsrvtests/tests/suites/setup_ds/__init__.py b/dirsrvtests/tests/suites/setup_ds/__init__.py new file mode 100644 index 0000000..80ce751 --- /dev/null +++ b/dirsrvtests/tests/suites/setup_ds/__init__.py @@ -0,0 +1,12 @@ +# --- BEGIN COPYRIGHT BLOCK --- +# Copyright (C) 2019 Red Hat, Inc. +# All rights reserved. +# +# License: GPL (version 3 or any later version). +# See LICENSE for details. +# --- END COPYRIGHT BLOCK --- + +""" + :Requirement: 389-ds-base: Basic Directory Server Operations +""" + diff --git a/dirsrvtests/tests/suites/setup_ds/dscreate_test.py b/dirsrvtests/tests/suites/setup_ds/dscreate_test.py new file mode 100644 index 0000000..b8a73dd --- /dev/null +++ b/dirsrvtests/tests/suites/setup_ds/dscreate_test.py @@ -0,0 +1,125 @@ +# --- BEGIN COPYRIGHT BLOCK --- +# Copyright (C) 2016 Red Hat, Inc. +# All rights reserved. +# +# License: GPL (version 3 or any later version). +# See LICENSE for details. +# --- END COPYRIGHT BLOCK --- + + +import sys +import pytest +from lib389 import DirSrv +from lib389.cli_base import LogCapture +from lib389.instance.setup import SetupDs +from lib389.instance.remove import remove_ds_instance +from lib389.instance.options import General2Base, Slapd2Base +from lib389._constants import * +from lib389.utils import ds_is_older + +import tempfile + +pytestmark = [pytest.mark.tier0, + pytest.mark.skipif(ds_is_older('1.4.1.2'), reason="Needs a compatible systemd unit, see PR#50213")] + +INSTANCE_PORT = 54321 +INSTANCE_SERVERID = 'standalone' + +DEBUGGING = True + +MAJOR, MINOR, _, _, _ = sys.version_info + +class TopologyInstance(object): + def __init__(self, standalone): + # For these tests, we don't want to open the instance. + # instance.open() + self.standalone = standalone + +# Need a teardown to destroy the instance. +@pytest.fixture +def topology(request): + instance = DirSrv(verbose=DEBUGGING) + instance.log.debug("Instance allocated") + args = {SER_PORT: INSTANCE_PORT, + SER_SERVERID_PROP: INSTANCE_SERVERID} + instance.allocate(args) + if instance.exists(): + instance.delete() + + def fin(): + if instance.exists() and not DEBUGGING: + instance.delete() + request.addfinalizer(fin) + + return TopologyInstance(instance) + +def test_setup_ds_minimal_dry(topology): + # Create the setupDs + lc = LogCapture() + # Give it the right types. + sds = SetupDs(verbose=DEBUGGING, dryrun=True, log=lc.log) + + # Get the dicts from Type2Base, as though they were from _validate_ds_2_config + # IE get the defaults back just from Slapd2Base.collect + # Override instance name, root password, port and secure port. + + general_options = General2Base(lc.log) + general_options.verify() + general = general_options.collect() + + slapd_options = Slapd2Base(lc.log) + slapd_options.set('instance_name', INSTANCE_SERVERID) + slapd_options.set('port', INSTANCE_PORT) + slapd_options.set('root_password', PW_DM) + slapd_options.verify() + slapd = slapd_options.collect() + + sds.create_from_args(general, slapd, {}, None) + + insts = topology.standalone.list(serverid=INSTANCE_SERVERID) + # Assert we did not change the system. + assert(len(insts) == 0) + +def test_setup_ds_minimal(topology): + # Create the setupDs + lc = LogCapture() + # Give it the right types. + sds = SetupDs(verbose=DEBUGGING, dryrun=False, log=lc.log) + + # Get the dicts from Type2Base, as though they were from _validate_ds_2_config + # IE get the defaults back just from Slapd2Base.collect + # Override instance name, root password, port and secure port. + + general_options = General2Base(lc.log) + general_options.verify() + general = general_options.collect() + + slapd_options = Slapd2Base(lc.log) + slapd_options.set('instance_name', INSTANCE_SERVERID) + slapd_options.set('port', INSTANCE_PORT) + slapd_options.set('root_password', PW_DM) + slapd_options.verify() + slapd = slapd_options.collect() + + sds.create_from_args(general, slapd, {}, None) + insts = topology.standalone.list(serverid=INSTANCE_SERVERID) + # Assert we did change the system. + assert(len(insts) == 1) + # Make sure we can connect + topology.standalone.open() + # Make sure we can start stop. + topology.standalone.stop() + topology.standalone.start() + # Okay, actually remove the instance + remove_ds_instance(topology.standalone) + + +def test_setup_ds_inf_minimal(topology): + if MAJOR < 3: + return + # Write a template inf + # Check it? + # Setup the server + + pass + diff --git a/dirsrvtests/tests/suites/setup_ds/remove_test.py b/dirsrvtests/tests/suites/setup_ds/remove_test.py new file mode 100644 index 0000000..a66ddee --- /dev/null +++ b/dirsrvtests/tests/suites/setup_ds/remove_test.py @@ -0,0 +1,68 @@ +# --- BEGIN COPYRIGHT BLOCK --- +# Copyright (C) 2018 Red Hat, Inc. +# All rights reserved. +# +# License: GPL (version 3 or any later version). +# See LICENSE for details. +# --- END COPYRIGHT BLOCK --- + +import os +import subprocess +import pytest +import logging +from lib389 import DirSrv +from lib389.instance.remove import remove_ds_instance +from lib389._constants import ReplicaRole +from lib389.topologies import create_topology + +pytestmark = pytest.mark.tier0 + + +@pytest.fixture(scope="function") +def topology_st(request): + """Create DS standalone instance""" + + topology = create_topology({ReplicaRole.STANDALONE: 1}) + + def fin(): + if topology.standalone.exists(): + topology.standalone.delete() + request.addfinalizer(fin) + + return topology + + +@pytest.mark.parametrize("simple_allocate", (True, False)) +def test_basic(topology_st, simple_allocate): + """Check that all DS directories and systemd items were removed + + :id: 9e8bbcda-358d-4e9c-a38c-9b4c3b63308e + :parametrized: yes + """ + + inst = topology_st.standalone + + # FreeIPA uses local_simple_allocate for the removal process + if simple_allocate: + inst = DirSrv(verbose=inst.verbose) + inst.local_simple_allocate(topology_st.standalone.serverid) + + remove_ds_instance(inst) + + paths = [inst.ds_paths.backup_dir, + inst.ds_paths.cert_dir, + inst.ds_paths.config_dir, + inst.ds_paths.db_dir, + inst.get_changelog_dir(), + inst.ds_paths.ldif_dir, + inst.ds_paths.lock_dir, + inst.ds_paths.log_dir] + for path in paths: + assert not os.path.exists(path) + + try: + subprocess.check_output(['systemctl', 'is-enabled', 'dirsrv@{}'.format(inst.serverid)], encoding='utf-8') + except subprocess.CalledProcessError as ex: + assert "disabled" in ex.output + + diff --git a/dirsrvtests/tests/suites/setup_ds/setup_ds_test.py b/dirsrvtests/tests/suites/setup_ds/setup_ds_test.py new file mode 100644 index 0000000..aee7576 --- /dev/null +++ b/dirsrvtests/tests/suites/setup_ds/setup_ds_test.py @@ -0,0 +1,84 @@ +import pytest +from lib389.utils import * +from lib389._constants import (DEFAULT_SUFFIX, SER_HOST, SER_PORT, + SER_SERVERID_PROP, SER_CREATION_SUFFIX, SER_INST_SCRIPTS_ENABLED, + args_instance, ReplicaRole) + +from lib389 import DirSrv + +pytestmark = pytest.mark.tier0 + +DEBUGGING = os.getenv("DEBUGGING", default=False) +if DEBUGGING: + logging.getLogger(__name__).setLevel(logging.DEBUG) +else: + logging.getLogger(__name__).setLevel(logging.INFO) +log = logging.getLogger(__name__) + + +def create_instance(config_attr): + log.info('create_instance - Installs the instance and Sets the value of InstScriptsEnabled to true OR false.') + + log.info("Set up the instance and set the config_attr") + instance_data = generate_ds_params(1, ReplicaRole.STANDALONE) + # Create instance + standalone = DirSrv(verbose=False) + + # Args for the instance + args_instance[SER_HOST] = instance_data[SER_HOST] + args_instance[SER_PORT] = instance_data[SER_PORT] + args_instance[SER_SERVERID_PROP] = instance_data[SER_SERVERID_PROP] + args_instance[SER_CREATION_SUFFIX] = DEFAULT_SUFFIX + args_instance[SER_INST_SCRIPTS_ENABLED] = config_attr + args_standalone = args_instance.copy() + standalone.allocate(args_standalone) + if standalone.exists(): + standalone.delete() + standalone.create() + standalone.open() + return standalone + + +@pytest.mark.parametrize("config_attr", ('true', 'false')) +def test_slapd_InstScriptsEnabled(config_attr): + """Tests InstScriptsEnabled attribute with "True" and "False" options + + :id: 02faac7f-c44d-4a3e-bf2d-1021e51da1ed + :parametrized: yes + :setup: Standalone instance with slapd.InstScriptsEnabled option as "True" and "False" + + :steps: + 1. Execute setup-ds.pl with slapd.InstScriptsEnabled option as "True". + 2. Check if /usr/lib64/dirsrv/slapd-instance instance script directory is created or not. + 3. Execute setup-ds.pl with slapd.InstScriptsEnabled option as "False". + 4. Check if /usr/lib64/dirsrv/slapd-instance instance script directory is created or not. + + :expectedresults: + 1. Instance should be created. + 2. /usr/lib64/dirsrv/slapd-instance instance script directory should be created. + 3. Instance should be created. + 4. /usr/lib64/dirsrv/slapd-instance instance script directory should not be created. + """ + + log.info('set SER_INST_SCRIPTS_ENABLED to {}'.format(config_attr)) + standalone = create_instance(config_attr) + + # Checking the presence of instance script directory when SER_INST_SCRIPTS_ENABLED is set to true and false + if config_attr == 'true': + log.info('checking the presence of instance script directory when SER_INST_SCRIPTS_ENABLED is set to true') + assert os.listdir('/usr/lib64/dirsrv/slapd-standalone1') + + elif config_attr == 'false': + log.info('checking instance script directory does not present when SER_INST_SCRIPTS_ENABLED is set to false') + assert not os.path.exists("/usr/lib64/dirsrv/slapd-standalone1") + + # Remove instance + standalone.delete() + + +if __name__ == '__main__': + # Run isolated + # -s for DEBUG mode + CURRENT_FILE = os.path.realpath(__file__) + pytest.main("-s %s" % CURRENT_FILE) + diff --git a/dirsrvtests/tests/suites/snmp/__init__.py b/dirsrvtests/tests/suites/snmp/__init__.py new file mode 100644 index 0000000..da86e48 --- /dev/null +++ b/dirsrvtests/tests/suites/snmp/__init__.py @@ -0,0 +1,3 @@ +""" + :Requirement: 389-ds-base: SNMP +""" diff --git a/dirsrvtests/tests/suites/stat/__init__.py b/dirsrvtests/tests/suites/stat/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/dirsrvtests/tests/suites/stat/mmt_state_test.py b/dirsrvtests/tests/suites/stat/mmt_state_test.py new file mode 100644 index 0000000..7c1b250 --- /dev/null +++ b/dirsrvtests/tests/suites/stat/mmt_state_test.py @@ -0,0 +1,361 @@ +import os +import logging +import ldap +import pytest +from lib389.idm.user import UserAccounts +from lib389.topologies import topology_m2 as topo +from lib389._constants import * + +pytestmark = pytest.mark.tier1 + +DEBUGGING = os.getenv("DEBUGGING", default=False) +if DEBUGGING: + logging.getLogger(__name__).setLevel(logging.DEBUG) +else: + logging.getLogger(__name__).setLevel(logging.INFO) +log = logging.getLogger(__name__) + +BINVALUE1 = 'thedeadbeef1' +BINVALUE2 = 'thedeadbeef2' +BINVALUE3 = 'thedeadbeef3' + +USER_PROPERTIES = { + 'uid': 'state1usr', + 'cn': 'state1usr', + 'sn': 'state1usr', + 'uidNumber': '1001', + 'gidNumber': '2001', + 'userpassword': PASSWORD, + 'homeDirectory': '/home/testuser' +} + + +def _check_user_oper_attrs(topo, tuser, attr_name, attr_value, oper_type, exp_values, oper_attr): + """Check if list of operational attributes present for a given entry""" + + log.info('Checking if operational attrs vucsn, adcsn and vdcsn present for: {}'.format(tuser)) + entry = topo.ms["master1"].search_s(tuser.dn, ldap.SCOPE_BASE, 'objectclass=*',['nscpentrywsi']) + if oper_attr: + for line in str(entry).split('\n'): + if attr_name + ';' in line: + if not 'DELETE' in oper_type: + assert any(attr in line for attr in exp_values) and oper_attr in line + else: + assert 'deleted' in line and oper_attr in line and attr_value in line + + +@pytest.mark.parametrize("attr_name, attr_value, oper_type, exp_values, oper_attr", + [('description', 'Test1usr1', 'ldap.MOD_ADD', ['Test1usr1'], 'vucsn'), + ('description', 'Test1usr2', 'ldap.MOD_ADD', ['Test1usr1', + 'Test1usr2'], 'vucsn'), + ('description', 'Test1usr3', 'ldap.MOD_ADD', + ['Test1usr1', 'Test1usr2', 'Test1usr3'], 'vucsn'), + ('description', 'Test1usr4', 'ldap.MOD_REPLACE', ['Test1usr4'], + 'adcsn'), + ('description', 'Test1usr4', 'ldap.MOD_DELETE', [], 'vdcsn')]) +def test_check_desc_attr_state(topo, attr_name, attr_value, oper_type, exp_values, oper_attr): + """Modify user's description attribute and check if description attribute is + added/modified/deleted and operational attributes vucsn, adcsn and vdcsn are present. + + :id: f0830538-02cf-11e9-8be0-8c16451d917b + :parametrized: yes + :setup: Replication with two masters. + :steps: 1. Add user to Master1 without description attribute. + 2. Add description attribute to user. + 3. Check if only one description attribute exist. + 4. Check if operational attribute vucsn exist. + 5. Add second description attribute to user. + 6. Check if two description attributes exist. + 7. Check if operational attribute vucsn exist. + 8. Add third description attribute to user. + 9. Check if three description attributes exist. + 10. Check if operational attribute vucsn exist. + 11. Replace description attribute for the user. + 12. Check if only one description attribute exist. + 13. Check if operational attribute adcsn exist. + 14. Delete description attribute for the user. + 15. Check if no description attribute exist. + 16. Check if no operational attribute vdcsn exist. + :expectedresults: + 1. Add user to M1 should PASS. + 2. Adding description attribute should PASS + 3. Only one description attribute should be present. + 4. Vucsn attribute should be present. + 5. Adding a new description attribute should PASS + 6. Two description attribute should be present. + 7. Vucsn attribute should be present. + 8. Adding a new description attribute should PASS + 9. Three description attribute should be present. + 10. Vucsn attribute should be present. + 11. Replacing new description attribute should PASS + 12. Only one description attribute should be present. + 13. Adcsn attribute should be present. + 14. Deleting description attribute should PASS + 15. No description attribute should be present. + 16. Vdcsn attribute should be present. + """ + + test_entry = 'state1test' + log.info('Add user: {}'.format(test_entry)) + users = UserAccounts(topo.ms['master1'], DEFAULT_SUFFIX) + try: + tuser = users.get(test_entry) + except ldap.NO_SUCH_OBJECT: + USER_PROPERTIES.update(dict.fromkeys(['uid', 'cn'], test_entry)) + tuser = users.create(properties=USER_PROPERTIES) + tuser.set(attr_name, attr_value, eval(oper_type)) + log.info('Check if list of description attrs present for: {}'.format(test_entry)) + assert sorted([i.decode() for i in tuser.get_attr_vals(attr_name)]) == sorted(exp_values) + + log.info('Checking for operational attributes') + _check_user_oper_attrs(topo, tuser, attr_name, attr_value, oper_type, exp_values, oper_attr) + + +@pytest.mark.parametrize("attr_name, attr_value, oper_type, exp_values, oper_attr", + [('cn', 'TestCN1', 'ldap.MOD_ADD', ['TestCN1', 'TestCNusr1'], 'vucsn'), + ('cn', 'TestCN2', 'ldap.MOD_ADD', ['TestCN1', + 'TestCN2', 'TestCNusr1'], 'vucsn'), + ('cn', 'TestnewCN3', 'ldap.MOD_REPLACE', ['TestnewCN3'], 'adcsn'), + ('cn', 'TestnewCN3', 'ldap.MOD_DELETE', None, None)]) +def test_check_cn_attr_state(topo, attr_name, attr_value, oper_type, exp_values, oper_attr): + """Modify user's cn attribute and check if cn attribute is added/modified/deleted and + operational attributes vucsn, adcsn and vdcsn are present. + + :id: 19614bae-02d0-11e9-a295-8c16451d917b + :parametrized: yes + :setup: Replication with two masters. + :steps: 1. Add user to Master1 with cn attribute. + 2. Add a new cn attribute to user. + 3. Check if two cn attributes exist. + 4. Check if operational attribute vucsn exist for each cn attribute. + 5. Add a new cn attribute to user. + 6. Check if three cn attributes exist. + 7. Check if operational attribute vucsn exist for each cn attribute. + 8. Replace cn attribute for the user. + 9. Check if only one cn attribute exist. + 10. Check if operational attribute adcsn exist. + 11. Delete cn attribute from user and check if it fails. + :expectedresults: + 1. Add user to M1 should PASS. + 2. Adding a new cn attribute should PASS + 3. Two cn attribute should be present. + 4. Vucsn attribute should be present. + 5. Adding a new cn attribute should PASS + 6. Three cn attribute should be present. + 7. Vucsn attribute should be present. + 8. Replacing new cn attribute should PASS + 9. Only one cn attribute should be present. + 10. Operational attribute adcsn should be present. + 11. Deleting cn attribute should fail with ObjectClass violation error. + """ + + test_entry = 'TestCNusr1' + log.info('Add user: {}'.format(test_entry)) + users = UserAccounts(topo.ms['master1'], DEFAULT_SUFFIX) + try: + tuser = users.get(test_entry) + except ldap.NO_SUCH_OBJECT: + USER_PROPERTIES.update(dict.fromkeys(['uid', 'cn'], test_entry)) + tuser = users.create(properties=USER_PROPERTIES) + + if 'MOD_DELETE' in oper_type: + with pytest.raises(ldap.OBJECT_CLASS_VIOLATION): + tuser.set(attr_name, attr_value, eval(oper_type)) + else: + tuser.set(attr_name, attr_value, eval(oper_type)) + log.info('Check if list of cn attrs present for: {}'.format(test_entry)) + assert sorted([i.decode() for i in tuser.get_attr_vals(attr_name)]) == sorted(exp_values) + log.info('Checking for operational attributes') + _check_user_oper_attrs(topo, tuser, attr_name, attr_value, oper_type, exp_values, oper_attr) + + +@pytest.mark.parametrize("attr_name, attr_value, oper_type, exp_values, oper_attr", + [('preferredlanguage', 'Chinese', 'ldap.MOD_REPLACE', ['Chinese'], + 'vucsn'), + ('preferredlanguage', 'French', 'ldap.MOD_ADD', None, None), + ('preferredlanguage', 'German', 'ldap.MOD_REPLACE', ['German'], 'adcsn'), + ('preferredlanguage', 'German', 'ldap.MOD_DELETE', [], 'vdcsn')]) +def test_check_single_value_attr_state(topo, attr_name, attr_value, oper_type, + exp_values, oper_attr): + """Modify user's preferredlanguage attribute and check if preferredlanguage attribute is + added/modified/deleted and operational attributes vucsn, adcsn and vdcsn are present. + + :id: 22fd645e-02d0-11e9-a9e4-8c16451d917b + :parametrized: yes + :setup: Replication with two masters. + :steps: 1. Add user to Master1 without preferredlanguage attribute. + 2. Add a new preferredlanguage attribute to user. + 3. Check if one preferredlanguage attributes exist. + 4. Check if operational attribute vucsn exist. + 5. Add a new preferredlanguage attribute for the user and check if its rejected. + 6. Replace preferredlanguage attribute for the user. + 7. Check if only one preferredlanguage attribute exist. + 8. Check if operational attribute adcsn exist with preferredlanguage. + :expectedresults: + 1. Add user to M1 should PASS. + 2. Adding a new preferredlanguage attribute should PASS + 3. Only one preferredlanguage attribute should be present. + 4. Vucsn attribute should be present. + 5. Adding a new preferredlanguage should fail with ObjectClass violation error. + 6. Replace preferredlanguage should PASS. + 7. Only one preferredlanguage attribute should be present. + 8. Operational attribute adcsn should be present with preferredlanguage. + """ + + test_entry = 'Langusr1' + log.info('Add user: {}'.format(test_entry)) + users = UserAccounts(topo.ms['master1'], DEFAULT_SUFFIX) + try: + tuser = users.get(test_entry) + except ldap.NO_SUCH_OBJECT: + USER_PROPERTIES.update(dict.fromkeys(['uid', 'cn'], test_entry)) + tuser = users.create(properties=USER_PROPERTIES) + + if 'MOD_ADD' in oper_type: + with pytest.raises(ldap.OBJECT_CLASS_VIOLATION): + tuser.set(attr_name, attr_value, eval(oper_type)) + else: + tuser.set(attr_name, attr_value, eval(oper_type)) + log.info('Check if list of cn attrs present for: {}'.format(test_entry)) + assert sorted([i.decode() for i in tuser.get_attr_vals(attr_name)]) == sorted(exp_values) + log.info('Checking for operational attributes') + _check_user_oper_attrs(topo, tuser, attr_name, attr_value, oper_type, exp_values, oper_attr) + + +@pytest.mark.parametrize("attr_name, attr_value, oper_type, exp_values, oper_attr", + [('roomnumber;office', 'Tower1', 'ldap.MOD_ADD', ['Tower1'], 'vucsn'), + ('roomnumber;office', 'Tower2', 'ldap.MOD_ADD', ['Tower1', 'Tower2'], + 'vucsn'), + ('roomnumber;office', 'Tower3', 'ldap.MOD_ADD', ['Tower1', 'Tower2', + 'Tower3'], 'vucsn'), + ('roomnumber;office', 'Tower4', 'ldap.MOD_REPLACE', ['Tower4'], 'adcsn'), + ('roomnumber;office', 'Tower4', 'ldap.MOD_DELETE', [], 'vucsn')]) +def test_check_subtype_attr_state(topo, attr_name, attr_value, oper_type, exp_values, oper_attr): + """Modify user's roomnumber;office attribute subtype and check if roomnumber;office attribute + is added/modified/deleted and operational attributes vucsn, adcsn and vdcsn are present. + + :id: 29ab87a4-02d0-11e9-b104-8c16451d917b + :parametrized: yes + :setup: Replication with two masters. + :steps: 1. Add user to Master1 without roomnumber;office attribute. + 2. Add roomnumber;office attribute to user. + 3. Check if only one roomnumber;office attribute exist. + 4. Check if operational attribute vucsn exist. + 5. Add second roomnumber;office attribute to user. + 6. Check if two roomnumber;office attributes exist. + 7. Check if operational attribute vucsn exist. + 8. Add third roomnumber;office attribute to user. + 9. Check if three roomnumber;office attributes exist. + 10. Check if operational attribute vucsn exist. + 11. Replace roomnumber;office attribute for the user. + 12. Check if only one roomnumber;office attribute exist. + 13. Check if operational attribute adcsn exist. + 14. Delete roomnumber;office attribute for the user. + 15. Check if no roomnumber;office attribute exist. + 16. Check if no operational attribute vdcsn exist. + :expectedresults: + 1. Add user to M1 should PASS. + 2. Adding roomnumber;office attribute should PASS + 3. Only one roomnumber;office attribute should be present. + 4. Vucsn attribute should be present. + 5. Adding a new roomnumber;office attribute should PASS + 6. Two roomnumber;office attribute should be present. + 7. Vucsn attribute should be present. + 8. Adding a new roomnumber;office attribute should PASS + 9. Three roomnumber;office attribute should be present. + 10. Vucsn attribute should be present. + 11. Replacing new roomnumber;office attribute should PASS + 12. Only one roomnumber;office attribute should be present. + 13. Adcsn attribute should be present. + 14. Deleting roomnumber;office attribute should PASS + 15. No roomnumber;office attribute should be present. + 16. Vdcsn attribute should be present. + """ + + test_entry = 'roomoffice1usr' + log.info('Add user: {}'.format(test_entry)) + users = UserAccounts(topo.ms['master1'], DEFAULT_SUFFIX) + try: + tuser = users.get(test_entry) + except ldap.NO_SUCH_OBJECT: + USER_PROPERTIES.update(dict.fromkeys(['uid', 'cn'], test_entry)) + tuser = users.create(properties=USER_PROPERTIES) + + tuser.set(attr_name, attr_value, eval(oper_type)) + log.info('Check if list of roomnumber;office attributes are present for a given entry') + assert sorted([i.decode() for i in tuser.get_attr_vals(attr_name)]) == sorted(exp_values) + log.info('Checking if operational attributes are present for cn') + _check_user_oper_attrs(topo, tuser, attr_name, attr_value, oper_type, exp_values, oper_attr) + + +@pytest.mark.parametrize("attr_name, attr_value, oper_type, exp_values, oper_attr", + [('jpegphoto', BINVALUE1, 'ldap.MOD_ADD', [BINVALUE1], 'vucsn'), + ('jpegphoto', BINVALUE2, 'ldap.MOD_ADD', [BINVALUE1, BINVALUE2], + 'vucsn'), + ('jpegphoto', BINVALUE3, 'ldap.MOD_ADD', [BINVALUE1, BINVALUE2, + BINVALUE3], 'vucsn'), + ('jpegphoto', BINVALUE2, 'ldap.MOD_REPLACE', [BINVALUE2], 'adcsn'), + ('jpegphoto', BINVALUE2, 'ldap.MOD_DELETE', [], 'vdcsn')]) +def test_check_jpeg_attr_state(topo, attr_name, attr_value, oper_type, exp_values, oper_attr): + """Modify user's jpegphoto attribute and check if jpegphoto attribute is added/modified/deleted + and operational attributes vucsn, adcsn and vdcsn are present. + + :id: 312ac0d0-02d0-11e9-9d34-8c16451d917b + :parametrized: yes + :setup: Replication with two masters. + :steps: 1. Add user to Master1 without jpegphoto attribute. + 2. Add jpegphoto attribute to user. + 3. Check if only one jpegphoto attribute exist. + 4. Check if operational attribute vucsn exist. + 5. Add second jpegphoto attribute to user. + 6. Check if two jpegphoto attributes exist. + 7. Check if operational attribute vucsn exist. + 8. Add third jpegphoto attribute to user. + 9. Check if three jpegphoto attributes exist. + 10. Check if operational attribute vucsn exist. + 11. Replace jpegphoto attribute for the user. + 12. Check if only one jpegphoto attribute exist. + 13. Check if operational attribute adcsn exist. + 14. Delete jpegphoto attribute for the user. + 15. Check if no jpegphoto attribute exist. + 16. Check if no operational attribute vdcsn exist. + :expectedresults: + 1. Add user to M1 should PASS. + 2. Adding jpegphoto attribute should PASS + 3. Only one jpegphoto attribute should be present. + 4. Vucsn attribute should be present. + 5. Adding a new jpegphoto attribute should PASS + 6. Two jpegphoto attribute should be present. + 7. Vucsn attribute should be present. + 8. Adding a new jpegphoto attribute should PASS + 9. Three jpegphoto attribute should be present. + 10. Vucsn attribute should be present. + 11. Replacing new jpegphoto attribute should PASS + 12. Only one jpegphoto attribute should be present. + 13. Adcsn attribute should be present. + 14. Deleting jpegphoto attribute should PASS + 15. No jpegphoto attribute should be present. + 16. Vdcsn attribute should be present. + """ + + test_entry = 'testJpeg1usr' + log.info('Add user: {}'.format(test_entry)) + users = UserAccounts(topo.ms['master1'], DEFAULT_SUFFIX) + try: + tuser = users.get(test_entry) + except ldap.NO_SUCH_OBJECT: + USER_PROPERTIES.update(dict.fromkeys(['uid', 'cn'], test_entry)) + tuser = users.create(properties=USER_PROPERTIES) + + tuser.set(attr_name, attr_value, eval(oper_type)) + log.info('Check if list of jpeg attributes are present for a given entry') + assert sorted([i.decode() for i in tuser.get_attr_vals(attr_name)]) == sorted(exp_values) + log.info('Checking if operational attributes are present for cn') + _check_user_oper_attrs(topo, tuser, attr_name, attr_value, oper_type, exp_values, oper_attr) + + +if __name__ == "__main__": + # Run isolated + # -s for DEBUG mode + CURRENT_FILE = os.path.realpath(__file__) + pytest.main("-s -v %s" % CURRENT_FILE) diff --git a/dirsrvtests/tests/suites/syntax/__init__.py b/dirsrvtests/tests/suites/syntax/__init__.py new file mode 100644 index 0000000..c083413 --- /dev/null +++ b/dirsrvtests/tests/suites/syntax/__init__.py @@ -0,0 +1,3 @@ +""" + :Requirement: 389-ds-base: Syntax +""" diff --git a/dirsrvtests/tests/suites/syntax/acceptance_test.py b/dirsrvtests/tests/suites/syntax/acceptance_test.py new file mode 100644 index 0000000..db8f63c --- /dev/null +++ b/dirsrvtests/tests/suites/syntax/acceptance_test.py @@ -0,0 +1,112 @@ +# --- BEGIN COPYRIGHT BLOCK --- +# Copyright (C) 2019 Red Hat, Inc. +# All rights reserved. +# +# License: GPL (version 3 or any later version). +# See LICENSE for details. +# --- END COPYRIGHT BLOCK --- + +import logging +import pytest +import os +from lib389.schema import Schema +from lib389.config import Config +from lib389.idm.user import UserAccounts +from lib389._constants import DEFAULT_SUFFIX +from lib389.topologies import log, topology_st as topo + +pytestmark = pytest.mark.tier0 + +log = log.getChild(__name__) + + +@pytest.fixture(scope="function") +def validate_syntax_off(topo, request): + config = Config(topo.standalone) + config.replace("nsslapd-syntaxcheck", "off") + + def fin(): + config.replace("nsslapd-syntaxcheck", "on") + request.addfinalizer(fin) + + +def test_valid(topo, validate_syntax_off): + """Test syntax-validate task with valid entries + + :id: ec402a5b-bfb1-494d-b751-71b0d31a4d83 + :setup: Standalone instance + :steps: + 1. Set nsslapd-syntaxcheck to off + 2. Clean error log + 3. Run syntax validate task + 4. Assert that there are no errors in the error log + 5. Set nsslapd-syntaxcheck to on + :expectedresults: + 1. It should succeed + 2. It should succeed + 3. It should succeed + 4. It should succeed + 5. It should succeed + """ + + inst = topo.standalone + + log.info('Clean the error log') + inst.deleteErrorLogs() + + schema = Schema(inst) + log.info('Attempting to add task entry...') + validate_task = schema.validate_syntax(DEFAULT_SUFFIX) + validate_task.wait() + exitcode = validate_task.get_exit_code() + assert exitcode == 0 + error_lines = inst.ds_error_log.match('.*Found 0 invalid entries.*') + assert (len(error_lines) == 1) + log.info('Found 0 invalid entries - Success') + + +def test_invalid_uidnumber(topo, validate_syntax_off): + """Test syntax-validate task with invalid uidNumber attribute value + + :id: 30fdcae6-ffa6-4ec4-8da9-6fb138fc1828 + :setup: Standalone instance + :steps: + 1. Set nsslapd-syntaxcheck to off + 2. Clean error log + 3. Add a user with uidNumber attribute set to an invalid value (string) + 4. Run syntax validate task + 5. Assert that there is corresponding error in the error log + 6. Set nsslapd-syntaxcheck to on + :expectedresults: + 1. It should succeed + 2. It should succeed + 3. It should succeed + 4. It should succeed + 5. It should succeed + 6. It should succeed + """ + + inst = topo.standalone + + log.info('Clean the error log') + inst.deleteErrorLogs() + + users = UserAccounts(inst, DEFAULT_SUFFIX) + users.create_test_user(uid="invalid_value") + + schema = Schema(inst) + log.info('Attempting to add task entry...') + validate_task = schema.validate_syntax(DEFAULT_SUFFIX) + validate_task.wait() + exitcode = validate_task.get_exit_code() + assert exitcode == 0 + error_lines = inst.ds_error_log.match('.*uidNumber: value #0 invalid per syntax.*') + assert (len(error_lines) == 1) + log.info('Found an invalid entry with wrong uidNumber - Success') + + +if __name__ == '__main__': + # Run isolated + # -s for DEBUG mode + CURRENT_FILE = os.path.realpath(__file__) + pytest.main("-s %s" % CURRENT_FILE) diff --git a/dirsrvtests/tests/suites/syntax/mr_test.py b/dirsrvtests/tests/suites/syntax/mr_test.py new file mode 100644 index 0000000..f622b75 --- /dev/null +++ b/dirsrvtests/tests/suites/syntax/mr_test.py @@ -0,0 +1,61 @@ +import logging +import pytest +import os +import ldap +from lib389.dbgen import dbgen +from lib389._constants import * +from lib389.topologies import topology_st as topo +from lib389._controls import SSSRequestControl + +pytestmark = pytest.mark.tier1 + +DEBUGGING = os.getenv("DEBUGGING", default=False) +if DEBUGGING: + logging.getLogger(__name__).setLevel(logging.DEBUG) +else: + logging.getLogger(__name__).setLevel(logging.INFO) +log = logging.getLogger(__name__) + + +def test_sss_mr(topo): + """Test matching rule/server side sort does not crash DS + + :id: 48c73d76-1694-420f-ab55-187135f2d260 + :setup: Standalone Instance + :steps: + 1. Add sample entries to the database + 2. Perform search using server side control (uid:2.5.13.3) + :expectedresults: + 1. Success + 2. Success + """ + + log.info("Creating LDIF...") + ldif_dir = topo.standalone.get_ldif_dir() + ldif_file = os.path.join(ldif_dir, 'mr-crash.ldif') + dbgen(topo.standalone, 5, ldif_file, DEFAULT_SUFFIX) + + log.info("Importing LDIF...") + topo.standalone.stop() + assert topo.standalone.ldif2db(DEFAULT_BENAME, None, None, None, ldif_file) + topo.standalone.start() + + log.info('Search using server side sorting using undefined mr in the attr...') + sort_ctrl = SSSRequestControl(True, ['uid:2.5.13.3']) + controls = [sort_ctrl] + msg_id = topo.standalone.search_ext(DEFAULT_SUFFIX, ldap.SCOPE_SUBTREE, + "objectclass=*", serverctrls=controls) + try: + rtype, rdata, rmsgid, response_ctrl = topo.standalone.result3(msg_id) + except ldap.OPERATIONS_ERROR: + pass + + log.info("Test PASSED") + + +if __name__ == '__main__': + # Run isolated + # -s for DEBUG mode + CURRENT_FILE = os.path.realpath(__file__) + pytest.main(["-s", CURRENT_FILE]) + diff --git a/dirsrvtests/tests/suites/tls/__init__.py b/dirsrvtests/tests/suites/tls/__init__.py new file mode 100644 index 0000000..6846c00 --- /dev/null +++ b/dirsrvtests/tests/suites/tls/__init__.py @@ -0,0 +1,3 @@ +""" + :Requirement: 389-ds-base: Transport Layer Security +""" diff --git a/dirsrvtests/tests/suites/tls/cipher_test.py b/dirsrvtests/tests/suites/tls/cipher_test.py new file mode 100644 index 0000000..0589310 --- /dev/null +++ b/dirsrvtests/tests/suites/tls/cipher_test.py @@ -0,0 +1,51 @@ +import pytest +import os +from lib389.config import Encryption +from lib389.topologies import topology_st as topo + + +def test_long_cipher_list(topo): + """Test a long cipher list, and makre sure it is not truncated + + :id: bc400f54-3966-49c8-b640-abbf4fb2377d + :setup: Standalone Instance + :steps: + 1. Set nsSSL3Ciphers to a very long list of ciphers + 2. Ciphers are applied correctly + :expectedresults: + 1. Success + 2. Success + """ + ENABLED_CIPHER = "TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384::AES-GCM::AEAD::256" + DISABLED_CIPHER = "TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256::AES-GCM::AEAD::128" + CIPHER_LIST = ( + "-all,-SSL_CK_RC4_128_WITH_MD5,-SSL_CK_RC4_128_EXPORT40_WITH_MD5,-SSL_CK_RC2_128_CBC_WITH_MD5," + "-SSL_CK_RC2_128_CBC_EXPORT40_WITH_MD5,-SSL_CK_DES_64_CBC_WITH_MD5,-SSL_CK_DES_192_EDE3_CBC_WITH_MD5," + "-TLS_RSA_WITH_RC4_128_MD5,-TLS_RSA_WITH_RC4_128_SHA,-TLS_RSA_WITH_3DES_EDE_CBC_SHA," + "-TLS_RSA_WITH_DES_CBC_SHA,-SSL_RSA_FIPS_WITH_3DES_EDE_CBC_SHA,-SSL_RSA_FIPS_WITH_DES_CBC_SHA," + "-TLS_RSA_EXPORT_WITH_RC4_40_MD5,-TLS_RSA_EXPORT_WITH_RC2_CBC_40_MD5,-TLS_RSA_WITH_NULL_MD5," + "-TLS_RSA_WITH_NULL_SHA,-TLS_RSA_EXPORT1024_WITH_DES_CBC_SHA,-SSL_FORTEZZA_DMS_WITH_FORTEZZA_CBC_SHA," + "-SSL_FORTEZZA_DMS_WITH_RC4_128_SHA,-SSL_FORTEZZA_DMS_WITH_NULL_SHA,-TLS_DHE_DSS_WITH_DES_CBC_SHA," + "-TLS_DHE_DSS_WITH_3DES_EDE_CBC_SHA,-TLS_DHE_RSA_WITH_DES_CBC_SHA,-TLS_DHE_RSA_WITH_3DES_EDE_CBC_SHA," + "+TLS_RSA_WITH_AES_128_CBC_SHA,-TLS_DHE_DSS_WITH_AES_128_CBC_SHA,-TLS_DHE_RSA_WITH_AES_128_CBC_SHA," + "+TLS_RSA_WITH_AES_256_CBC_SHA,-TLS_DHE_DSS_WITH_AES_256_CBC_SHA,-TLS_DHE_RSA_WITH_AES_256_CBC_SHA," + "-TLS_RSA_EXPORT1024_WITH_RC4_56_SHA,-TLS_DHE_DSS_WITH_RC4_128_SHA,-TLS_ECDHE_RSA_WITH_RC4_128_SHA," + "-TLS_RSA_WITH_NULL_SHA,-TLS_RSA_EXPORT1024_WITH_DES_CBC_SHA,-SSL_CK_DES_192_EDE3_CBC_WITH_MD5," + "-TLS_RSA_WITH_RC4_128_MD5,-TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,-TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA," + "-TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,+TLS_AES_128_GCM_SHA256,+TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384" + ) + + topo.standalone.enable_tls() + enc = Encryption(topo.standalone) + enc.set('nsSSL3Ciphers', CIPHER_LIST) + topo.standalone.restart() + enabled_ciphers = enc.get_attr_vals_utf8('nssslenabledciphers') + assert ENABLED_CIPHER in enabled_ciphers + assert DISABLED_CIPHER not in enabled_ciphers + + +if __name__ == '__main__': + # Run isolated + # -s for DEBUG mode + CURRENT_FILE = os.path.realpath(__file__) + pytest.main(["-s", CURRENT_FILE]) diff --git a/dirsrvtests/tests/suites/tls/ssl_version_test.py b/dirsrvtests/tests/suites/tls/ssl_version_test.py new file mode 100644 index 0000000..acc8b23 --- /dev/null +++ b/dirsrvtests/tests/suites/tls/ssl_version_test.py @@ -0,0 +1,55 @@ +import logging +import pytest +import os +from lib389.config import Encryption +from lib389.topologies import topology_st as topo + +DEBUGGING = os.getenv("DEBUGGING", default=False) +if DEBUGGING: + logging.getLogger(__name__).setLevel(logging.DEBUG) +else: + logging.getLogger(__name__).setLevel(logging.INFO) +log = logging.getLogger(__name__) + + +def test_ssl_version_range(topo): + """Specify a test case purpose or name here + + :id: bc400f54-3966-49c8-b640-abbf4fb2377e + 1. Get current default range + 2. Set sslVersionMin and verify it is applied after a restart + 3. Set sslVersionMax and verify it is applied after a restart + :expectedresults: + 1. Success + 2. Success + 3. Success + """ + + topo.standalone.enable_tls() + enc = Encryption(topo.standalone) + default_min = enc.get_attr_val_utf8('sslVersionMin') + default_max = enc.get_attr_val_utf8('sslVersionMax') + log.info(f"default min: {default_min} max: {default_max}") + if DEBUGGING: + topo.standalone.config.set('nsslapd-auditlog-logging-enabled', 'on') + + # Test that setting the min version is applied after a restart + enc.replace('sslVersionMin', default_max) + enc.replace('sslVersionMax', default_max) + topo.standalone.restart() + min = enc.get_attr_val_utf8('sslVersionMin') + assert min == default_max + + # Test that setting the max version is applied after a restart + enc.replace('sslVersionMin', default_min) + enc.replace('sslVersionMax', default_min) + topo.standalone.restart() + max = enc.get_attr_val_utf8('sslVersionMax') + assert max == default_min + + +if __name__ == '__main__': + # Run isolated + # -s for DEBUG mode + CURRENT_FILE = os.path.realpath(__file__) + pytest.main(["-s", CURRENT_FILE]) diff --git a/dirsrvtests/tests/suites/tls/tls_check_crl_test.py b/dirsrvtests/tests/suites/tls/tls_check_crl_test.py new file mode 100644 index 0000000..eb55985 --- /dev/null +++ b/dirsrvtests/tests/suites/tls/tls_check_crl_test.py @@ -0,0 +1,54 @@ +# --- BEGIN COPYRIGHT BLOCK --- +# Copyright (C) 2018 Red Hat, Inc. +# All rights reserved. +# +# License: GPL (version 3 or any later version). +# See LICENSE for details. +# --- END COPYRIGHT BLOCK --- +# + + +import pytest +import ldap +from lib389.topologies import topology_st + +pytestmark = pytest.mark.tier1 + +def test_tls_check_crl(topology_st): + """Test that TLS check_crl configurations work as expected. + + :id: 9dfc6c62-dcae-44a9-83e8-b15c8e61c609 + :steps: + 1. Enable TLS + 2. Set invalid value + 3. Set valid values + 4. Check config reset + :expectedresults: + 1. TlS is setup + 2. The invalid value is rejected + 3. The valid values are used + 4. The value can be reset + """ + standalone = topology_st.standalone + # Enable TLS + standalone.enable_tls() + # Check all the valid values. + assert(standalone.config.get_attr_val_utf8('nsslapd-tls-check-crl') == 'none') + with pytest.raises(ldap.OPERATIONS_ERROR): + standalone.config.set('nsslapd-tls-check-crl', 'tnhoeutnoeutn') + assert(standalone.config.get_attr_val_utf8('nsslapd-tls-check-crl') == 'none') + + standalone.config.set('nsslapd-tls-check-crl', 'peer') + assert(standalone.config.get_attr_val_utf8('nsslapd-tls-check-crl') == 'peer') + + standalone.config.set('nsslapd-tls-check-crl', 'none') + assert(standalone.config.get_attr_val_utf8('nsslapd-tls-check-crl') == 'none') + + standalone.config.set('nsslapd-tls-check-crl', 'all') + assert(standalone.config.get_attr_val_utf8('nsslapd-tls-check-crl') == 'all') + + standalone.config.remove_all('nsslapd-tls-check-crl') + assert(standalone.config.get_attr_val_utf8('nsslapd-tls-check-crl') == 'none') + + + diff --git a/dirsrvtests/tests/suites/tls/tls_ldaps_only_test.py b/dirsrvtests/tests/suites/tls/tls_ldaps_only_test.py new file mode 100644 index 0000000..4bb5989 --- /dev/null +++ b/dirsrvtests/tests/suites/tls/tls_ldaps_only_test.py @@ -0,0 +1,46 @@ +# --- BEGIN COPYRIGHT BLOCK --- +# Copyright (C) 2020 William Brown 0 + + +if __name__ == "__main__": + # Run isolated + # -s for DEBUG mode + CURRENT_FILE = os.path.realpath(__file__) + pytest.main("-s %s" % CURRENT_FILE) diff --git a/dirsrvtests/tests/tickets/__init__.py b/dirsrvtests/tests/tickets/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/dirsrvtests/tests/tickets/ticket47462_test.py b/dirsrvtests/tests/tickets/ticket47462_test.py new file mode 100644 index 0000000..021fe09 --- /dev/null +++ b/dirsrvtests/tests/tickets/ticket47462_test.py @@ -0,0 +1,296 @@ +# --- BEGIN COPYRIGHT BLOCK --- +# Copyright (C) 2016 Red Hat, Inc. +# All rights reserved. +# +# License: GPL (version 3 or any later version). +# See LICENSE for details. +# --- END COPYRIGHT BLOCK --- +# +import logging +import time + +import ldap +import pytest +from lib389 import Entry +from lib389._constants import * +from lib389.properties import * +from lib389.topologies import topology_m2 +from lib389.utils import * +from lib389.replica import BootstrapReplicationManager +from lib389.plugins import * + +pytestmark = [pytest.mark.tier2, + pytest.mark.skipif(ds_is_newer('1.4.0'), reason="Upgrade scripts are supported only on versions < 1.4.x")] + + +logging.getLogger(__name__).setLevel(logging.DEBUG) +log = logging.getLogger(__name__) + +DES_PLUGIN = 'cn=DES,cn=Password Storage Schemes,cn=plugins,cn=config' +AES_PLUGIN = 'cn=AES,cn=Password Storage Schemes,cn=plugins,cn=config' +MMR_PLUGIN = 'cn=Multimaster Replication Plugin,cn=plugins,cn=config' +AGMT_DN = '' +USER_DN = 'cn=test_user,' + DEFAULT_SUFFIX +USER1_DN = 'cn=test_user1,' + DEFAULT_SUFFIX +TEST_REPL_DN = 'cn=test repl,' + DEFAULT_SUFFIX +DES2AES_TASK_DN = 'cn=convert,cn=des2aes,cn=tasks,cn=config' + + +def test_ticket47462(topology_m2): + """ + Test that AES properly replaces DES during an update/restart, and that + replication also works correctly. + """ + + # + # First set config as if it's an older version. Set DES to use + # libdes-plugin, MMR to depend on DES, delete the existing AES plugin, + # and set a DES password for the replication agreement. + # + # Add an extra attribute to the DES plugin args + # + plugin_des = Plugin(topology_m2.ms["master1"], DES_PLUGIN) + plugin_des.set('nsslapd-pluginEnabled', 'on') + plugin_des.set('nsslapd-pluginarg2', 'description') + + plugin_mmr = Plugin(topology_m2.ms["master1"], MMR_PLUGIN) + plugin_mmr.remove('nsslapd-plugin-depends-on-named', 'AES') + # + # Delete the AES plugin + # + topology_m2.ms["master1"].delete_s(AES_PLUGIN) + # restart the server so we must use DES plugin + topology_m2.ms["master1"].restart(timeout=10) + + manager = BootstrapReplicationManager(topology_m2.ms["master2"]) + + manager.create(properties={ + 'cn': 'replication manager', + 'userPassword': 'password' + }) + + DN = topology_m2.ms["master2"].replica._get_mt_entry(DEFAULT_SUFFIX) + + topology_m2.ms["master2"].modify_s(DN, [(ldap.MOD_REPLACE, + 'nsDS5ReplicaBindDN', ensure_bytes(defaultProperties[REPLICATION_BIND_DN]))]) + # + # Create repl agreement from the newly promoted master to master1 + + properties = {RA_NAME: 'meTo_{}:{}'.format(topology_m2.ms["master2"].host, + str(topology_m2.ms["master2"].port)), + RA_BINDDN: defaultProperties[REPLICATION_BIND_DN], + RA_BINDPW: defaultProperties[REPLICATION_BIND_PW], + RA_METHOD: defaultProperties[REPLICATION_BIND_METHOD], + RA_TRANSPORT_PROT: defaultProperties[REPLICATION_TRANSPORT]} + topology_m2.ms["master1"].agreement.create(suffix=SUFFIX, + host=topology_m2.ms["master2"].host, + port=topology_m2.ms["master2"].port, + properties=properties) + # + # Check replication works with the new DES password + # + try: + topology_m2.ms["master1"].add_s(Entry((USER1_DN, + {'objectclass': "top person".split(), + 'sn': 'sn', + 'description': 'DES value to convert', + 'cn': 'test_user'}))) + loop = 0 + ent = None + while loop <= 10: + try: + ent = topology_m2.ms["master2"].getEntry(USER1_DN, ldap.SCOPE_BASE, + "(objectclass=*)") + break + except ldap.NO_SUCH_OBJECT: + time.sleep(1) + loop += 1 + if not ent: + log.fatal('Replication test failed fo user1!') + assert False + else: + log.info('Replication test passed') + except ldap.LDAPError as e: + log.fatal('Failed to add test user: ' + e.args[0]['desc']) + assert False + + # + # Add a backend (that has no entries) + # + try: + topology_m2.ms["master1"].backend.create("o=empty", {BACKEND_NAME: "empty"}) + except ldap.LDAPError as e: + log.fatal('Failed to create extra/empty backend: ' + e.args[0]['desc']) + assert False + + # + # Run the upgrade... + # + topology_m2.ms["master1"].stop() + topology_m2.ms["master2"].stop() + topology_m2.ms["master1"].upgrade('offline') + topology_m2.ms["master1"].restart() + topology_m2.ms["master2"].restart() + + # + # Check that the restart converted existing DES credentials + # + try: + entry = topology_m2.ms["master1"].search_s('cn=config', ldap.SCOPE_SUBTREE, + 'nsDS5ReplicaCredentials=*') + if entry: + val = entry[0].getValue('nsDS5ReplicaCredentials') + if val.startswith(b'{AES-'): + log.info('The DES credentials have been converted to AES') + else: + log.fatal('Failed to convert credentials from DES to AES!') + assert False + else: + log.fatal('Failed to find entries with nsDS5ReplicaCredentials') + assert False + except ldap.LDAPError as e: + log.fatal('Failed to search for replica credentials: ' + + e.args[0]['desc']) + assert False + + # + # Check that the AES plugin exists, and has all the attributes listed in + # DES plugin. The attributes might not be in the expected order so check + # all the attributes. + # + try: + entry = topology_m2.ms["master1"].search_s(AES_PLUGIN, ldap.SCOPE_BASE, + 'objectclass=*') + if not entry[0].hasValue('nsslapd-pluginarg0', 'description') and \ + not entry[0].hasValue('nsslapd-pluginarg1', 'description') and \ + not entry[0].hasValue('nsslapd-pluginarg2', 'description'): + log.fatal('The AES plugin did not have the DES attribute copied ' + + 'over correctly') + assert False + else: + log.info('The AES plugin was correctly setup') + except ldap.LDAPError as e: + log.fatal('Failed to find AES plugin: ' + e.args[0]['desc']) + assert False + + # + # Check that the MMR plugin was updated + # + try: + entry = topology_m2.ms["master1"].search_s(MMR_PLUGIN, ldap.SCOPE_BASE, + 'objectclass=*') + if not entry[0].hasValue('nsslapd-plugin-depends-on-named', 'AES'): + log.fatal('The MMR Plugin was not correctly updated') + assert False + else: + log.info('The MMR plugin was correctly updated') + except ldap.LDAPError as e: + log.fatal('Failed to find AES plugin: ' + e.args[0]['desc']) + assert False + + # + # Check that the DES plugin was correctly updated + # + try: + entry = topology_m2.ms["master1"].search_s(DES_PLUGIN, ldap.SCOPE_BASE, + 'objectclass=*') + if not entry[0].hasValue('nsslapd-pluginPath', 'libpbe-plugin'): + log.fatal('The DES Plugin was not correctly updated') + assert False + else: + log.info('The DES plugin was correctly updated') + except ldap.LDAPError as e: + log.fatal('Failed to find AES plugin: ' + e.args[0]['desc']) + assert False + + # + # Check replication one last time + # + try: + topology_m2.ms["master1"].add_s(Entry((USER_DN, + {'objectclass': "top person".split(), + 'sn': 'sn', + 'cn': 'test_user'}))) + loop = 0 + ent = None + while loop <= 10: + try: + ent = topology_m2.ms["master2"].getEntry(USER_DN, ldap.SCOPE_BASE, + "(objectclass=*)") + break + except ldap.NO_SUCH_OBJECT: + time.sleep(1) + loop += 1 + if not ent: + log.fatal('Replication test failed!') + assert False + else: + log.info('Replication test passed') + except ldap.LDAPError as e: + log.fatal('Failed to add test user: ' + e.args[0]['desc']) + assert False + + # Check the entry + log.info('Entry before running task...') + try: + entry = topology_m2.ms["master1"].search_s(USER1_DN, + ldap.SCOPE_BASE, + 'objectclass=*') + if entry: + print(str(entry)) + else: + log.fatal('Failed to find entries') + assert False + except ldap.LDAPError as e: + log.fatal('Failed to search for entries: ' + + e.args[0]['desc']) + assert False + + # + # Test the DES2AES Task on USER1_DN + # + try: + topology_m2.ms["master1"].add_s(Entry((DES2AES_TASK_DN, + {'objectclass': ['top', + 'extensibleObject'], + 'suffix': DEFAULT_SUFFIX, + 'cn': 'convert'}))) + except ldap.LDAPError as e: + log.fatal('Failed to add task entry: ' + e.args[0]['desc']) + assert False + + # Wait for task + task_entry = Entry(DES2AES_TASK_DN) + (done, exitCode) = topology_m2.ms["master1"].tasks.checkTask(task_entry, True) + if exitCode: + log.fatal("Error: des2aes task exited with %d" % (exitCode)) + assert False + + # Check the entry + try: + entry = topology_m2.ms["master1"].search_s(USER1_DN, + ldap.SCOPE_BASE, + 'objectclass=*') + if entry: + val = entry[0].getValue('description') + print(str(entry[0])) + if val.startswith(b'{AES-'): + log.info('Task: DES credentials have been converted to AES') + else: + log.fatal('Task: Failed to convert credentials from DES to ' + + 'AES! (%s)' % (val)) + assert False + else: + log.fatal('Failed to find entries') + assert False + except ldap.LDAPError as e: + log.fatal('Failed to search for entries: ' + + e.args[0]['desc']) + assert False + + +if __name__ == '__main__': + # Run isolated + # -s for DEBUG mode + CURRENT_FILE = os.path.realpath(__file__) + pytest.main("-s %s" % CURRENT_FILE) diff --git a/dirsrvtests/tests/tickets/ticket47560_test.py b/dirsrvtests/tests/tickets/ticket47560_test.py new file mode 100644 index 0000000..38479bd --- /dev/null +++ b/dirsrvtests/tests/tickets/ticket47560_test.py @@ -0,0 +1,191 @@ +# --- BEGIN COPYRIGHT BLOCK --- +# Copyright (C) 2016 Red Hat, Inc. +# All rights reserved. +# +# License: GPL (version 3 or any later version). +# See LICENSE for details. +# --- END COPYRIGHT BLOCK --- +# +import logging +import time + +import ldap +import pytest +from lib389 import Entry +from lib389._constants import * +from lib389.properties import * +from lib389.topologies import topology_st +from lib389.utils import * + +pytestmark = pytest.mark.tier2 + +log = logging.getLogger(__name__) + + +def test_ticket47560(topology_st): + """ + This test case does the following: + SETUP + - Create entry cn=group,SUFFIX + - Create entry cn=member,SUFFIX + - Update 'cn=member,SUFFIX' to add "memberOf: cn=group,SUFFIX" + - Enable Memberof Plugins + + # Here the cn=member entry has a 'memberOf' but + # cn=group entry does not contain 'cn=member' in its member + + TEST CASE + - start the fixupmemberof task + - read the cn=member entry + - check 'memberOf is now empty + + TEARDOWN + - Delete entry cn=group,SUFFIX + - Delete entry cn=member,SUFFIX + - Disable Memberof Plugins + """ + + def _enable_disable_mbo(value): + """ + Enable or disable mbo plugin depending on 'value' ('on'/'off') + """ + # enable/disable the mbo plugin + if value == 'on': + topology_st.standalone.plugins.enable(name=PLUGIN_MEMBER_OF) + else: + topology_st.standalone.plugins.disable(name=PLUGIN_MEMBER_OF) + + log.debug("-------------> _enable_disable_mbo(%s)" % value) + + topology_st.standalone.stop(timeout=120) + time.sleep(1) + topology_st.standalone.start(timeout=120) + time.sleep(3) + + # need to reopen a connection toward the instance + topology_st.standalone.open() + + def _test_ticket47560_setup(): + """ + - Create entry cn=group,SUFFIX + - Create entry cn=member,SUFFIX + - Update 'cn=member,SUFFIX' to add "memberOf: cn=group,SUFFIX" + - Enable Memberof Plugins + """ + log.debug("-------- > _test_ticket47560_setup\n") + + # + # By default the memberof plugin is disabled create + # - create a group entry + # - create a member entry + # - set the member entry as memberof the group entry + # + entry = Entry(group_DN) + entry.setValues('objectclass', 'top', 'groupOfNames', 'inetUser') + entry.setValues('cn', 'group') + try: + topology_st.standalone.add_s(entry) + except ldap.ALREADY_EXISTS: + log.debug("Entry %s already exists" % (group_DN)) + + entry = Entry(member_DN) + entry.setValues('objectclass', 'top', 'person', 'organizationalPerson', 'inetorgperson', 'inetUser') + entry.setValues('uid', 'member') + entry.setValues('cn', 'member') + entry.setValues('sn', 'member') + try: + topology_st.standalone.add_s(entry) + except ldap.ALREADY_EXISTS: + log.debug("Entry %s already exists" % (member_DN)) + + replace = [(ldap.MOD_REPLACE, 'memberof', ensure_bytes(group_DN))] + topology_st.standalone.modify_s(member_DN, replace) + + # + # enable the memberof plugin and restart the instance + # + _enable_disable_mbo('on') + + # + # check memberof attribute is still present + # + filt = 'uid=member' + ents = topology_st.standalone.search_s(member_DN, ldap.SCOPE_BASE, filt) + assert len(ents) == 1 + ent = ents[0] + # print ent + value = ensure_str(ent.getValue('memberof')) + # print "memberof: %s" % (value) + assert value == group_DN + + def _test_ticket47560_teardown(): + """ + - Delete entry cn=group,SUFFIX + - Delete entry cn=member,SUFFIX + - Disable Memberof Plugins + """ + log.debug("-------- > _test_ticket47560_teardown\n") + # remove the entries group_DN and member_DN + try: + topology_st.standalone.delete_s(group_DN) + except: + log.warning("Entry %s fail to delete" % (group_DN)) + try: + topology_st.standalone.delete_s(member_DN) + except: + log.warning("Entry %s fail to delete" % (member_DN)) + # + # disable the memberof plugin and restart the instance + # + _enable_disable_mbo('off') + + group_DN = "cn=group,%s" % (SUFFIX) + member_DN = "uid=member,%s" % (SUFFIX) + + # + # Initialize the test case + # + _test_ticket47560_setup() + + # + # start the test + # - start the fixup task + # - check the entry is fixed (no longer memberof the group) + # + log.debug("-------- > Start ticket tests\n") + + filt = 'uid=member' + ents = topology_st.standalone.search_s(member_DN, ldap.SCOPE_BASE, filt) + assert len(ents) == 1 + ent = ents[0] + log.debug("Unfixed entry %r\n" % ent) + + # run the fixup task + topology_st.standalone.tasks.fixupMemberOf(suffix=SUFFIX, args={TASK_WAIT: True}) + + ents = topology_st.standalone.search_s(member_DN, ldap.SCOPE_BASE, filt) + assert len(ents) == 1 + ent = ents[0] + log.debug("Fixed entry %r\n" % ent) + + if ensure_str(ent.getValue('memberof')) == group_DN: + log.warning("Error the fixupMemberOf did not fix %s" % (member_DN)) + result_successful = False + else: + result_successful = True + + # + # cleanup up the test case + # + _test_ticket47560_teardown() + + assert result_successful is True + + log.info('Testcase PASSED') + + +if __name__ == '__main__': + # Run isolated + # -s for DEBUG mode + CURRENT_FILE = os.path.realpath(__file__) + pytest.main("-s %s" % CURRENT_FILE) diff --git a/dirsrvtests/tests/tickets/ticket47573_test.py b/dirsrvtests/tests/tickets/ticket47573_test.py new file mode 100644 index 0000000..08dfbb4 --- /dev/null +++ b/dirsrvtests/tests/tickets/ticket47573_test.py @@ -0,0 +1,235 @@ +# --- BEGIN COPYRIGHT BLOCK --- +# Copyright (C) 2016 Red Hat, Inc. +# All rights reserved. +# +# License: GPL (version 3 or any later version). +# See LICENSE for details. +# --- END COPYRIGHT BLOCK --- +# +''' +Created on Nov 7, 2013 + +@author: tbordaz +''' +import logging +import re +import time + +import ldap +import pytest +from lib389 import Entry +from lib389._constants import * +from lib389.topologies import topology_m1c1 +from lib389.utils import * + +pytestmark = pytest.mark.tier2 + +logging.getLogger(__name__).setLevel(logging.DEBUG) +log = logging.getLogger(__name__) + +TEST_REPL_DN = "cn=test_repl, %s" % SUFFIX +ENTRY_DN = "cn=test_entry, %s" % SUFFIX + +MUST_OLD = "(postalAddress $ preferredLocale $ telexNumber)" +MAY_OLD = "(postalCode $ street)" + +MUST_NEW = "(postalAddress $ preferredLocale)" +MAY_NEW = "(telexNumber $ postalCode $ street)" + + +def pattern_errorlog(file, log_pattern): + try: + pattern_errorlog.last_pos += 1 + except AttributeError: + pattern_errorlog.last_pos = 0 + + found = None + log.debug("_pattern_errorlog: start at offset %d" % pattern_errorlog.last_pos) + file.seek(pattern_errorlog.last_pos) + + # Use a while true iteration because 'for line in file: hit a + # python bug that break file.tell() + while True: + line = file.readline() + log.debug("_pattern_errorlog: [%d] %s" % (file.tell(), line)) + found = log_pattern.search(line) + if ((line == '') or (found)): + break + + log.debug("_pattern_errorlog: end at offset %d" % file.tell()) + pattern_errorlog.last_pos = file.tell() + return found + + +def _oc_definition(oid_ext, name, must=None, may=None): + oid = "1.2.3.4.5.6.7.8.9.10.%d" % oid_ext + desc = 'To test ticket 47573' + sup = 'person' + if not must: + must = MUST_OLD + if not may: + may = MAY_OLD + + new_oc = "( %s NAME '%s' DESC '%s' SUP %s AUXILIARY MUST %s MAY %s )" % (oid, name, desc, sup, must, may) + return ensure_bytes(new_oc) + + +def add_OC(instance, oid_ext, name): + new_oc = _oc_definition(oid_ext, name) + instance.schema.add_schema('objectClasses', new_oc) + + +def mod_OC(instance, oid_ext, name, old_must=None, old_may=None, new_must=None, new_may=None): + old_oc = _oc_definition(oid_ext, name, old_must, old_may) + new_oc = _oc_definition(oid_ext, name, new_must, new_may) + instance.schema.del_schema('objectClasses', old_oc) + instance.schema.add_schema('objectClasses', new_oc) + + +def trigger_schema_push(topology_m1c1): + """ + It triggers an update on the supplier. This will start a replication + session and a schema push + """ + try: + trigger_schema_push.value += 1 + except AttributeError: + trigger_schema_push.value = 1 + replace = [(ldap.MOD_REPLACE, 'telephonenumber', ensure_bytes(str(trigger_schema_push.value)))] + topology_m1c1.ms["master1"].modify_s(ENTRY_DN, replace) + + # wait 10 seconds that the update is replicated + loop = 0 + while loop <= 10: + try: + ent = topology_m1c1.cs["consumer1"].getEntry(ENTRY_DN, ldap.SCOPE_BASE, "(objectclass=*)", + ['telephonenumber']) + val = ent.telephonenumber or "0" + if int(val) == trigger_schema_push.value: + return + # the expected value is not yet replicated. try again + time.sleep(1) + loop += 1 + log.debug("trigger_schema_push: receive %s (expected %d)" % (val, trigger_schema_push.value)) + except ldap.NO_SUCH_OBJECT: + time.sleep(1) + loop += 1 + + +def test_ticket47573_init(topology_m1c1): + """ + Initialize the test environment + """ + log.debug("test_ticket47573_init topology_m1c1 %r (master %r, consumer %r" % + (topology_m1c1, topology_m1c1.ms["master1"], topology_m1c1.cs["consumer1"])) + # the test case will check if a warning message is logged in the + # error log of the supplier + topology_m1c1.ms["master1"].errorlog_file = open(topology_m1c1.ms["master1"].errlog, "r") + + # This entry will be used to trigger attempt of schema push + topology_m1c1.ms["master1"].add_s(Entry((ENTRY_DN, { + 'objectclass': "top person".split(), + 'sn': 'test_entry', + 'cn': 'test_entry'}))) + + +def test_ticket47573_one(topology_m1c1): + """ + Summary: Add a custom OC with MUST and MAY + MUST = postalAddress $ preferredLocale + MAY = telexNumber $ postalCode $ street + + Final state + - supplier +OCwithMayAttr + - consumer +OCwithMayAttr + + """ + log.debug("test_ticket47573_one topology_m1c1 %r (master %r, consumer %r" % ( + topology_m1c1, topology_m1c1.ms["master1"], topology_m1c1.cs["consumer1"])) + # update the schema of the supplier so that it is a superset of + # consumer. Schema should be pushed + new_oc = _oc_definition(2, 'OCwithMayAttr', + must=MUST_OLD, + may=MAY_OLD) + topology_m1c1.ms["master1"].schema.add_schema('objectClasses', new_oc) + + trigger_schema_push(topology_m1c1) + master_schema_csn = topology_m1c1.ms["master1"].schema.get_schema_csn() + consumer_schema_csn = topology_m1c1.cs["consumer1"].schema.get_schema_csn() + + # Check the schemaCSN was updated on the consumer + log.debug("test_ticket47573_one master_schema_csn=%s", master_schema_csn) + log.debug("ctest_ticket47573_one onsumer_schema_csn=%s", consumer_schema_csn) + assert master_schema_csn == consumer_schema_csn + + # Check the error log of the supplier does not contain an error + regex = re.compile("must not be overwritten \(set replication log for additional info\)") + res = pattern_errorlog(topology_m1c1.ms["master1"].errorlog_file, regex) + assert res is None + + +def test_ticket47573_two(topology_m1c1): + """ + Summary: Change OCwithMayAttr to move a MAY attribute to a MUST attribute + + + Final state + - supplier OCwithMayAttr updated + - consumer OCwithMayAttr updated + + """ + + # Update the objectclass so that a MAY attribute is moved to MUST attribute + mod_OC(topology_m1c1.ms["master1"], 2, 'OCwithMayAttr', old_must=MUST_OLD, new_must=MUST_NEW, old_may=MAY_OLD, + new_may=MAY_NEW) + + # now push the scheam + trigger_schema_push(topology_m1c1) + master_schema_csn = topology_m1c1.ms["master1"].schema.get_schema_csn() + consumer_schema_csn = topology_m1c1.cs["consumer1"].schema.get_schema_csn() + + # Check the schemaCSN was NOT updated on the consumer + log.debug("test_ticket47573_two master_schema_csn=%s", master_schema_csn) + log.debug("test_ticket47573_two consumer_schema_csn=%s", consumer_schema_csn) + assert master_schema_csn == consumer_schema_csn + + # Check the error log of the supplier does not contain an error + regex = re.compile("must not be overwritten \(set replication log for additional info\)") + res = pattern_errorlog(topology_m1c1.ms["master1"].errorlog_file, regex) + assert res is None + + +def test_ticket47573_three(topology_m1c1): + ''' + Create a entry with OCwithMayAttr OC + ''' + # Check replication is working fine + dn = "cn=ticket47573, %s" % SUFFIX + topology_m1c1.ms["master1"].add_s(Entry((dn, + {'objectclass': "top person OCwithMayAttr".split(), + 'sn': 'test_repl', + 'cn': 'test_repl', + 'postalAddress': 'here', + 'preferredLocale': 'en', + 'telexNumber': '12$us$21', + 'postalCode': '54321'}))) + loop = 0 + ent = None + while loop <= 10: + try: + ent = topology_m1c1.cs["consumer1"].getEntry(dn, ldap.SCOPE_BASE, "(objectclass=*)") + break + except ldap.NO_SUCH_OBJECT: + time.sleep(1) + loop += 1 + if ent is None: + assert False + + log.info('Testcase PASSED') + + +if __name__ == '__main__': + # Run isolated + # -s for DEBUG mode + CURRENT_FILE = os.path.realpath(__file__) + pytest.main("-s %s" % CURRENT_FILE) diff --git a/dirsrvtests/tests/tickets/ticket47619_test.py b/dirsrvtests/tests/tickets/ticket47619_test.py new file mode 100644 index 0000000..9ffd3eb --- /dev/null +++ b/dirsrvtests/tests/tickets/ticket47619_test.py @@ -0,0 +1,97 @@ +# --- BEGIN COPYRIGHT BLOCK --- +# Copyright (C) 2016 Red Hat, Inc. +# All rights reserved. +# +# License: GPL (version 3 or any later version). +# See LICENSE for details. +# --- END COPYRIGHT BLOCK --- +# +''' +Created on Nov 7, 2013 + +@author: tbordaz +''' +import logging +import time + +import ldap +import pytest +from lib389 import Entry +from lib389._constants import * +from lib389.properties import * +from lib389.topologies import topology_m1c1 + +pytestmark = pytest.mark.tier2 + +logging.getLogger(__name__).setLevel(logging.DEBUG) +log = logging.getLogger(__name__) + +TEST_REPL_DN = "cn=test_repl, %s" % SUFFIX +ENTRY_DN = "cn=test_entry, %s" % SUFFIX + +OTHER_NAME = 'other_entry' +MAX_OTHERS = 100 + +ATTRIBUTES = ['street', 'countryName', 'description', 'postalAddress', 'postalCode', 'title', 'l', 'roomNumber'] + + +def test_ticket47619_init(topology_m1c1): + """ + Initialize the test environment + """ + topology_m1c1.ms["master1"].plugins.enable(name=PLUGIN_RETRO_CHANGELOG) + # topology_m1c1.ms["master1"].plugins.enable(name=PLUGIN_MEMBER_OF) + # topology_m1c1.ms["master1"].plugins.enable(name=PLUGIN_REFER_INTEGRITY) + topology_m1c1.ms["master1"].stop(timeout=10) + topology_m1c1.ms["master1"].start(timeout=10) + + topology_m1c1.ms["master1"].log.info("test_ticket47619_init topology_m1c1 %r" % (topology_m1c1)) + # the test case will check if a warning message is logged in the + # error log of the supplier + topology_m1c1.ms["master1"].errorlog_file = open(topology_m1c1.ms["master1"].errlog, "r") + + # add dummy entries + for cpt in range(MAX_OTHERS): + name = "%s%d" % (OTHER_NAME, cpt) + topology_m1c1.ms["master1"].add_s(Entry(("cn=%s,%s" % (name, SUFFIX), { + 'objectclass': "top person".split(), + 'sn': name, + 'cn': name}))) + + topology_m1c1.ms["master1"].log.info( + "test_ticket47619_init: %d entries ADDed %s[0..%d]" % (MAX_OTHERS, OTHER_NAME, MAX_OTHERS - 1)) + + # Check the number of entries in the retro changelog + time.sleep(2) + ents = topology_m1c1.ms["master1"].search_s(RETROCL_SUFFIX, ldap.SCOPE_ONELEVEL, "(objectclass=*)") + assert len(ents) == MAX_OTHERS + + +def test_ticket47619_create_index(topology_m1c1): + args = {INDEX_TYPE: 'eq'} + for attr in ATTRIBUTES: + topology_m1c1.ms["master1"].index.create(suffix=RETROCL_SUFFIX, attr=attr, args=args) + topology_m1c1.ms["master1"].restart(timeout=10) + + +def test_ticket47619_reindex(topology_m1c1): + ''' + Reindex all the attributes in ATTRIBUTES + ''' + args = {TASK_WAIT: True} + for attr in ATTRIBUTES: + rc = topology_m1c1.ms["master1"].tasks.reindex(suffix=RETROCL_SUFFIX, attrname=attr, args=args) + assert rc == 0 + + +def test_ticket47619_check_indexed_search(topology_m1c1): + for attr in ATTRIBUTES: + ents = topology_m1c1.ms["master1"].search_s(RETROCL_SUFFIX, ldap.SCOPE_SUBTREE, "(%s=hello)" % attr) + assert len(ents) == 0 + + +if __name__ == '__main__': + # Run isolated + # -s for DEBUG mode + CURRENT_FILE = os.path.realpath(__file__) + pytest.main("-s %s" % CURRENT_FILE) diff --git a/dirsrvtests/tests/tickets/ticket47640_test.py b/dirsrvtests/tests/tickets/ticket47640_test.py new file mode 100644 index 0000000..996735f --- /dev/null +++ b/dirsrvtests/tests/tickets/ticket47640_test.py @@ -0,0 +1,82 @@ +# --- BEGIN COPYRIGHT BLOCK --- +# Copyright (C) 2016 Red Hat, Inc. +# All rights reserved. +# +# License: GPL (version 3 or any later version). +# See LICENSE for details. +# --- END COPYRIGHT BLOCK --- +# +import pytest +from lib389.tasks import * +from lib389.utils import * +from lib389.topologies import topology_st + +from lib389._constants import PLUGIN_LINKED_ATTRS, DEFAULT_SUFFIX + +# Skip on older versions +pytestmark = [pytest.mark.tier2, + pytest.mark.skipif(ds_is_older('1.3.4'), reason="Not implemented")] + +logging.getLogger(__name__).setLevel(logging.DEBUG) +log = logging.getLogger(__name__) + + +def test_ticket47640(topology_st): + ''' + Linked Attrs Plugins - verify that if the plugin fails to update the link entry + that the entire operation is aborted + ''' + + # Enable Dynamic plugins, and the linked Attrs plugin + try: + topology_st.standalone.modify_s(DN_CONFIG, [(ldap.MOD_REPLACE, 'nsslapd-dynamic-plugins', b'on')]) + except ldap.LDAPError as e: + log.fatal('Failed to enable dynamic plugin!' + e.message['desc']) + assert False + + try: + topology_st.standalone.plugins.enable(name=PLUGIN_LINKED_ATTRS) + except ValueError as e: + log.fatal('Failed to enable linked attributes plugin!' + e.message['desc']) + assert False + + # Add the plugin config entry + try: + topology_st.standalone.add_s(Entry(('cn=manager link,cn=Linked Attributes,cn=plugins,cn=config', { + 'objectclass': 'top extensibleObject'.split(), + 'cn': 'Manager Link', + 'linkType': 'seeAlso', + 'managedType': 'seeAlso' + }))) + except ldap.LDAPError as e: + log.fatal('Failed to add linked attr config entry: error ' + e.message['desc']) + assert False + + # Add an entry who has a link to an entry that does not exist + OP_REJECTED = False + try: + topology_st.standalone.add_s(Entry(('uid=manager,' + DEFAULT_SUFFIX, { + 'objectclass': 'top extensibleObject'.split(), + 'uid': 'manager', + 'seeAlso': 'uid=user,dc=example,dc=com' + }))) + except ldap.UNWILLING_TO_PERFORM: + # Success + log.info('Add operation correctly rejected.') + OP_REJECTED = True + except ldap.LDAPError as e: + log.fatal('Add operation incorrectly rejected: error %s - ' + + 'expected "unwilling to perform"' % e.message['desc']) + assert False + if not OP_REJECTED: + log.fatal('Add operation incorrectly allowed') + assert False + + log.info('Test complete') + + +if __name__ == '__main__': + # Run isolated + # -s for DEBUG mode + CURRENT_FILE = os.path.realpath(__file__) + pytest.main("-s %s" % CURRENT_FILE) diff --git a/dirsrvtests/tests/tickets/ticket47653MMR_test.py b/dirsrvtests/tests/tickets/ticket47653MMR_test.py new file mode 100644 index 0000000..b36befe --- /dev/null +++ b/dirsrvtests/tests/tickets/ticket47653MMR_test.py @@ -0,0 +1,348 @@ +# --- BEGIN COPYRIGHT BLOCK --- +# Copyright (C) 2016 Red Hat, Inc. +# All rights reserved. +# +# License: GPL (version 3 or any later version). +# See LICENSE for details. +# --- END COPYRIGHT BLOCK --- +# +''' +Created on Nov 7, 2013 + +@author: tbordaz +''' +import logging +import time + +import ldap +import pytest +from lib389 import Entry +from lib389._constants import * +from lib389.topologies import topology_m2 + +logging.getLogger(__name__).setLevel(logging.DEBUG) +from lib389.utils import * + +# Skip on older versions +pytestmark =[pytest.mark.tier2, + pytest.mark.skipif(ds_is_older('1.3.2'), reason="Not implemented")] +log = logging.getLogger(__name__) + +DEBUGGING = os.getenv("DEBUGGING", default=False) + +TEST_REPL_DN = "cn=test_repl, %s" % SUFFIX +OC_NAME = 'OCticket47653' +MUST = "(postalAddress $ postalCode)" +MAY = "(member $ street)" + +OTHER_NAME = 'other_entry' +MAX_OTHERS = 10 + +BIND_NAME = 'bind_entry' +BIND_DN = 'cn=%s, %s' % (BIND_NAME, SUFFIX) +BIND_PW = 'password' + +ENTRY_NAME = 'test_entry' +ENTRY_DN = 'cn=%s, %s' % (ENTRY_NAME, SUFFIX) +ENTRY_OC = "top person %s" % OC_NAME + + +def _oc_definition(oid_ext, name, must=None, may=None): + oid = "1.2.3.4.5.6.7.8.9.10.%d" % oid_ext + desc = 'To test ticket 47490' + sup = 'person' + if not must: + must = MUST + if not may: + may = MAY + + new_oc = "( %s NAME '%s' DESC '%s' SUP %s AUXILIARY MUST %s MAY %s )" % (oid, name, desc, sup, must, may) + return ensure_bytes(new_oc) + + +def test_ticket47653_init(topology_m2): + """ + It adds + - Objectclass with MAY 'member' + - an entry ('bind_entry') with which we bind to test the 'SELFDN' operation + It deletes the anonymous aci + + """ + + topology_m2.ms["master1"].log.info("Add %s that allows 'member' attribute" % OC_NAME) + new_oc = _oc_definition(2, OC_NAME, must=MUST, may=MAY) + topology_m2.ms["master1"].schema.add_schema('objectClasses', new_oc) + + # entry used to bind with + topology_m2.ms["master1"].log.info("Add %s" % BIND_DN) + topology_m2.ms["master1"].add_s(Entry((BIND_DN, { + 'objectclass': "top person".split(), + 'sn': BIND_NAME, + 'cn': BIND_NAME, + 'userpassword': BIND_PW}))) + + if DEBUGGING: + # enable acl error logging + mod = [(ldap.MOD_REPLACE, 'nsslapd-errorlog-level', ensure_bytes(str(128 + 8192)))] # ACL + REPL + topology_m2.ms["master1"].modify_s(DN_CONFIG, mod) + topology_m2.ms["master2"].modify_s(DN_CONFIG, mod) + + # remove all aci's and start with a clean slate + mod = [(ldap.MOD_DELETE, 'aci', None)] + topology_m2.ms["master1"].modify_s(SUFFIX, mod) + topology_m2.ms["master2"].modify_s(SUFFIX, mod) + + # add dummy entries + for cpt in range(MAX_OTHERS): + name = "%s%d" % (OTHER_NAME, cpt) + topology_m2.ms["master1"].add_s(Entry(("cn=%s,%s" % (name, SUFFIX), { + 'objectclass': "top person".split(), + 'sn': name, + 'cn': name}))) + + +def test_ticket47653_add(topology_m2): + ''' + This test ADD an entry on MASTER1 where 47653 is fixed. Then it checks that entry is replicated + on MASTER2 (even if on MASTER2 47653 is NOT fixed). Then update on MASTER2 and check the update on MASTER1 + + It checks that, bound as bind_entry, + - we can not ADD an entry without the proper SELFDN aci. + - with the proper ACI we can not ADD with 'member' attribute + - with the proper ACI and 'member' it succeeds to ADD + ''' + topology_m2.ms["master1"].log.info("\n\n######################### ADD ######################\n") + + # bind as bind_entry + topology_m2.ms["master1"].log.info("Bind as %s" % BIND_DN) + topology_m2.ms["master1"].simple_bind_s(BIND_DN, BIND_PW) + + # Prepare the entry with multivalued members + entry_with_members = Entry(ENTRY_DN) + entry_with_members.setValues('objectclass', 'top', 'person', 'OCticket47653') + entry_with_members.setValues('sn', ENTRY_NAME) + entry_with_members.setValues('cn', ENTRY_NAME) + entry_with_members.setValues('postalAddress', 'here') + entry_with_members.setValues('postalCode', '1234') + members = [] + for cpt in range(MAX_OTHERS): + name = "%s%d" % (OTHER_NAME, cpt) + members.append("cn=%s,%s" % (name, SUFFIX)) + members.append(BIND_DN) + entry_with_members.setValues('member', members) + + # Prepare the entry with only one member value + entry_with_member = Entry(ENTRY_DN) + entry_with_member.setValues('objectclass', 'top', 'person', 'OCticket47653') + entry_with_member.setValues('sn', ENTRY_NAME) + entry_with_member.setValues('cn', ENTRY_NAME) + entry_with_member.setValues('postalAddress', 'here') + entry_with_member.setValues('postalCode', '1234') + member = [] + member.append(BIND_DN) + entry_with_member.setValues('member', member) + + # entry to add WITH member being BIND_DN but WITHOUT the ACI -> ldap.INSUFFICIENT_ACCESS + try: + topology_m2.ms["master1"].log.info("Try to add Add %s (aci is missing): %r" % (ENTRY_DN, entry_with_member)) + + topology_m2.ms["master1"].add_s(entry_with_member) + except Exception as e: + topology_m2.ms["master1"].log.info("Exception (expected): %s" % type(e).__name__) + assert isinstance(e, ldap.INSUFFICIENT_ACCESS) + + # Ok Now add the proper ACI + topology_m2.ms["master1"].log.info("Bind as %s and add the ADD SELFDN aci" % DN_DM) + topology_m2.ms["master1"].simple_bind_s(DN_DM, PASSWORD) + + ACI_TARGET = "(target = \"ldap:///cn=*,%s\")" % SUFFIX + ACI_TARGETFILTER = "(targetfilter =\"(objectClass=%s)\")" % OC_NAME + ACI_ALLOW = "(version 3.0; acl \"SelfDN add\"; allow (add)" + ACI_SUBJECT = " userattr = \"member#selfDN\";)" + ACI_BODY = ACI_TARGET + ACI_TARGETFILTER + ACI_ALLOW + ACI_SUBJECT + mod = [(ldap.MOD_ADD, 'aci', ensure_bytes(ACI_BODY))] + topology_m2.ms["master1"].modify_s(SUFFIX, mod) + time.sleep(1) + + # bind as bind_entry + topology_m2.ms["master1"].log.info("Bind as %s" % BIND_DN) + topology_m2.ms["master1"].simple_bind_s(BIND_DN, BIND_PW) + + # entry to add WITHOUT member and WITH the ACI -> ldap.INSUFFICIENT_ACCESS + try: + topology_m2.ms["master1"].log.info("Try to add Add %s (member is missing)" % ENTRY_DN) + topology_m2.ms["master1"].add_s(Entry((ENTRY_DN, { + 'objectclass': ENTRY_OC.split(), + 'sn': ENTRY_NAME, + 'cn': ENTRY_NAME, + 'postalAddress': 'here', + 'postalCode': '1234'}))) + except Exception as e: + topology_m2.ms["master1"].log.info("Exception (expected): %s" % type(e).__name__) + assert isinstance(e, ldap.INSUFFICIENT_ACCESS) + time.sleep(1) + + # entry to add WITH memberS and WITH the ACI -> ldap.INSUFFICIENT_ACCESS + # member should contain only one value + try: + topology_m2.ms["master1"].log.info("Try to add Add %s (with several member values)" % ENTRY_DN) + topology_m2.ms["master1"].add_s(entry_with_members) + except Exception as e: + topology_m2.ms["master1"].log.info("Exception (expected): %s" % type(e).__name__) + assert isinstance(e, ldap.INSUFFICIENT_ACCESS) + time.sleep(2) + + topology_m2.ms["master1"].log.info("Try to add Add %s should be successful" % ENTRY_DN) + try: + topology_m2.ms["master1"].add_s(entry_with_member) + except ldap.LDAPError as e: + topology_m2.ms["master1"].log.info("Failed to add entry, error: " + e.message['desc']) + assert False + + # + # Now check the entry as been replicated + # + topology_m2.ms["master2"].simple_bind_s(DN_DM, PASSWORD) + topology_m2.ms["master1"].log.info("Try to retrieve %s from Master2" % ENTRY_DN) + loop = 0 + while loop <= 10: + try: + ent = topology_m2.ms["master2"].getEntry(ENTRY_DN, ldap.SCOPE_BASE, "(objectclass=*)") + break + except ldap.NO_SUCH_OBJECT: + time.sleep(1) + loop += 1 + assert loop <= 10 + + # Now update the entry on Master2 (as DM because 47653 is possibly not fixed on M2) + topology_m2.ms["master1"].log.info("Update %s on M2" % ENTRY_DN) + mod = [(ldap.MOD_REPLACE, 'description', b'test_add')] + topology_m2.ms["master2"].modify_s(ENTRY_DN, mod) + time.sleep(1) + + topology_m2.ms["master1"].simple_bind_s(DN_DM, PASSWORD) + loop = 0 + while loop <= 10: + try: + ent = topology_m2.ms["master1"].getEntry(ENTRY_DN, ldap.SCOPE_BASE, "(objectclass=*)") + if ent.hasAttr('description') and (ensure_str(ent.getValue('description')) == 'test_add'): + break + except ldap.NO_SUCH_OBJECT: + time.sleep(1) + loop += 1 + + assert ensure_str(ent.getValue('description')) == 'test_add' + + +def test_ticket47653_modify(topology_m2): + ''' + This test MOD an entry on MASTER1 where 47653 is fixed. Then it checks that update is replicated + on MASTER2 (even if on MASTER2 47653 is NOT fixed). Then update on MASTER2 (bound as BIND_DN). + This update may fail whether or not 47653 is fixed on MASTER2 + + It checks that, bound as bind_entry, + - we can not modify an entry without the proper SELFDN aci. + - adding the ACI, we can modify the entry + ''' + # bind as bind_entry + topology_m2.ms["master1"].log.info("Bind as %s" % BIND_DN) + topology_m2.ms["master1"].simple_bind_s(BIND_DN, BIND_PW) + + topology_m2.ms["master1"].log.info("\n\n######################### MODIFY ######################\n") + + # entry to modify WITH member being BIND_DN but WITHOUT the ACI -> ldap.INSUFFICIENT_ACCESS + try: + topology_m2.ms["master1"].log.info("Try to modify %s (aci is missing)" % ENTRY_DN) + mod = [(ldap.MOD_REPLACE, 'postalCode', b'9876')] + topology_m2.ms["master1"].modify_s(ENTRY_DN, mod) + except Exception as e: + topology_m2.ms["master1"].log.info("Exception (expected): %s" % type(e).__name__) + assert isinstance(e, ldap.INSUFFICIENT_ACCESS) + + # Ok Now add the proper ACI + topology_m2.ms["master1"].log.info("Bind as %s and add the WRITE SELFDN aci" % DN_DM) + topology_m2.ms["master1"].simple_bind_s(DN_DM, PASSWORD) + + ACI_TARGET = "(target = \"ldap:///cn=*,%s\")" % SUFFIX + ACI_TARGETATTR = "(targetattr = *)" + ACI_TARGETFILTER = "(targetfilter =\"(objectClass=%s)\")" % OC_NAME + ACI_ALLOW = "(version 3.0; acl \"SelfDN write\"; allow (write)" + ACI_SUBJECT = " userattr = \"member#selfDN\";)" + ACI_BODY = ACI_TARGET + ACI_TARGETATTR + ACI_TARGETFILTER + ACI_ALLOW + ACI_SUBJECT + mod = [(ldap.MOD_ADD, 'aci', ensure_bytes(ACI_BODY))] + topology_m2.ms["master1"].modify_s(SUFFIX, mod) + time.sleep(2) + + # bind as bind_entry + topology_m2.ms["master1"].log.info("M1: Bind as %s" % BIND_DN) + topology_m2.ms["master1"].simple_bind_s(BIND_DN, BIND_PW) + time.sleep(1) + + # modify the entry and checks the value + topology_m2.ms["master1"].log.info("M1: Try to modify %s. It should succeeds" % ENTRY_DN) + mod = [(ldap.MOD_REPLACE, 'postalCode', b'1928')] + topology_m2.ms["master1"].modify_s(ENTRY_DN, mod) + + topology_m2.ms["master1"].log.info("M1: Bind as %s" % DN_DM) + topology_m2.ms["master1"].simple_bind_s(DN_DM, PASSWORD) + + topology_m2.ms["master1"].log.info("M1: Check the update of %s" % ENTRY_DN) + ents = topology_m2.ms["master1"].search_s(ENTRY_DN, ldap.SCOPE_BASE, 'objectclass=*') + assert len(ents) == 1 + assert ensure_str(ents[0].postalCode) == '1928' + + # Now check the update has been replicated on M2 + topology_m2.ms["master1"].log.info("M2: Bind as %s" % DN_DM) + topology_m2.ms["master2"].simple_bind_s(DN_DM, PASSWORD) + topology_m2.ms["master1"].log.info("M2: Try to retrieve %s" % ENTRY_DN) + loop = 0 + while loop <= 10: + try: + ent = topology_m2.ms["master2"].getEntry(ENTRY_DN, ldap.SCOPE_BASE, "(objectclass=*)") + if ent.hasAttr('postalCode') and (ensure_str(ent.getValue('postalCode')) == '1928'): + break + except ldap.NO_SUCH_OBJECT: + time.sleep(1) + loop += 1 + assert loop <= 10 + assert ensure_str(ent.getValue('postalCode')) == '1928' + + # Now update the entry on Master2 bound as BIND_DN (update may fail if 47653 is not fixed on M2) + topology_m2.ms["master1"].log.info("M2: Update %s (bound as %s)" % (ENTRY_DN, BIND_DN)) + topology_m2.ms["master2"].simple_bind_s(BIND_DN, PASSWORD) + time.sleep(1) + fail = False + try: + mod = [(ldap.MOD_REPLACE, 'postalCode', b'1929')] + topology_m2.ms["master2"].modify_s(ENTRY_DN, mod) + fail = False + except ldap.INSUFFICIENT_ACCESS: + topology_m2.ms["master1"].log.info( + "M2: Exception (INSUFFICIENT_ACCESS): that is fine the bug is possibly not fixed on M2") + fail = True + except Exception as e: + topology_m2.ms["master1"].log.info("M2: Exception (not expected): %s" % type(e).__name__) + assert 0 + + if not fail: + # Check the update has been replicaed on M1 + topology_m2.ms["master1"].log.info("M1: Bind as %s" % DN_DM) + topology_m2.ms["master1"].simple_bind_s(DN_DM, PASSWORD) + topology_m2.ms["master1"].log.info("M1: Check %s.postalCode=1929)" % (ENTRY_DN)) + loop = 0 + while loop <= 10: + try: + ent = topology_m2.ms["master1"].getEntry(ENTRY_DN, ldap.SCOPE_BASE, "(objectclass=*)") + if ent.hasAttr('postalCode') and (ensure_str(ent.getValue('postalCode')) == '1929'): + break + except ldap.NO_SUCH_OBJECT: + time.sleep(1) + loop += 1 + assert ensure_str(ent.getValue('postalCode')) == '1929' + + +if __name__ == '__main__': + # Run isolated + # -s for DEBUG mode + CURRENT_FILE = os.path.realpath(__file__) + pytest.main("-s %s" % CURRENT_FILE) diff --git a/dirsrvtests/tests/tickets/ticket47676_test.py b/dirsrvtests/tests/tickets/ticket47676_test.py new file mode 100644 index 0000000..eb0c9fb --- /dev/null +++ b/dirsrvtests/tests/tickets/ticket47676_test.py @@ -0,0 +1,252 @@ +# --- BEGIN COPYRIGHT BLOCK --- +# Copyright (C) 2016 Red Hat, Inc. +# All rights reserved. +# +# License: GPL (version 3 or any later version). +# See LICENSE for details. +# --- END COPYRIGHT BLOCK --- +# +''' +Created on Nov 7, 2013 + +@author: tbordaz +''' +import logging +import time + +import ldap +import pytest +from lib389 import Entry +from lib389._constants import * +from lib389.topologies import topology_m2 +from lib389.replica import ReplicationManager + +logging.getLogger(__name__).setLevel(logging.DEBUG) +from lib389.utils import * + +# Skip on older versions +pytestmark = [pytest.mark.tier2, + pytest.mark.skipif(ds_is_older('1.3.2'), reason="Not implemented")] +log = logging.getLogger(__name__) + +SCHEMA_DN = "cn=schema" +TEST_REPL_DN = "cn=test_repl, %s" % SUFFIX +OC_NAME = 'OCticket47676' +OC_OID_EXT = 2 +MUST = "(postalAddress $ postalCode)" +MAY = "(member $ street)" + +OC2_NAME = 'OC2ticket47676' +OC2_OID_EXT = 3 +MUST_2 = "(postalAddress $ postalCode)" +MAY_2 = "(member $ street)" + +REPL_SCHEMA_POLICY_CONSUMER = "cn=consumerUpdatePolicy,cn=replSchema,cn=config" +REPL_SCHEMA_POLICY_SUPPLIER = "cn=supplierUpdatePolicy,cn=replSchema,cn=config" + +OTHER_NAME = 'other_entry' +MAX_OTHERS = 10 + +BIND_NAME = 'bind_entry' +BIND_DN = 'cn=%s, %s' % (BIND_NAME, SUFFIX) +BIND_PW = 'password' + +ENTRY_NAME = 'test_entry' +ENTRY_DN = 'cn=%s, %s' % (ENTRY_NAME, SUFFIX) +ENTRY_OC = "top person %s" % OC_NAME + +BASE_OID = "1.2.3.4.5.6.7.8.9.10" + + +def _oc_definition(oid_ext, name, must=None, may=None): + oid = "%s.%d" % (BASE_OID, oid_ext) + desc = 'To test ticket 47490' + sup = 'person' + if not must: + must = MUST + if not may: + may = MAY + + new_oc = "( %s NAME '%s' DESC '%s' SUP %s AUXILIARY MUST %s MAY %s )" % (oid, name, desc, sup, must, may) + return ensure_bytes(new_oc) + +def replication_check(topology_m2): + repl = ReplicationManager(SUFFIX) + master1 = topology_m2.ms["master1"] + master2 = topology_m2.ms["master2"] + return repl.test_replication(master1, master2) + +def test_ticket47676_init(topology_m2): + """ + It adds + - Objectclass with MAY 'member' + - an entry ('bind_entry') with which we bind to test the 'SELFDN' operation + It deletes the anonymous aci + + """ + + topology_m2.ms["master1"].log.info("Add %s that allows 'member' attribute" % OC_NAME) + new_oc = _oc_definition(OC_OID_EXT, OC_NAME, must=MUST, may=MAY) + topology_m2.ms["master1"].schema.add_schema('objectClasses', new_oc) + + # entry used to bind with + topology_m2.ms["master1"].log.info("Add %s" % BIND_DN) + topology_m2.ms["master1"].add_s(Entry((BIND_DN, { + 'objectclass': "top person".split(), + 'sn': BIND_NAME, + 'cn': BIND_NAME, + 'userpassword': BIND_PW}))) + + # enable acl error logging + mod = [(ldap.MOD_REPLACE, 'nsslapd-errorlog-level', ensure_bytes(str(128 + 8192)))] # ACL + REPL + topology_m2.ms["master1"].modify_s(DN_CONFIG, mod) + topology_m2.ms["master2"].modify_s(DN_CONFIG, mod) + + # add dummy entries + for cpt in range(MAX_OTHERS): + name = "%s%d" % (OTHER_NAME, cpt) + topology_m2.ms["master1"].add_s(Entry(("cn=%s,%s" % (name, SUFFIX), { + 'objectclass': "top person".split(), + 'sn': name, + 'cn': name}))) + + +def test_ticket47676_skip_oc_at(topology_m2): + ''' + This test ADD an entry on MASTER1 where 47676 is fixed. Then it checks that entry is replicated + on MASTER2 (even if on MASTER2 47676 is NOT fixed). Then update on MASTER2. + If the schema has successfully been pushed, updating Master2 should succeed + ''' + topology_m2.ms["master1"].log.info("\n\n######################### ADD ######################\n") + + # bind as 'cn=Directory manager' + topology_m2.ms["master1"].log.info("Bind as %s and add the add the entry with specific oc" % DN_DM) + topology_m2.ms["master1"].simple_bind_s(DN_DM, PASSWORD) + + # Prepare the entry with multivalued members + entry = Entry(ENTRY_DN) + entry.setValues('objectclass', 'top', 'person', 'OCticket47676') + entry.setValues('sn', ENTRY_NAME) + entry.setValues('cn', ENTRY_NAME) + entry.setValues('postalAddress', 'here') + entry.setValues('postalCode', '1234') + members = [] + for cpt in range(MAX_OTHERS): + name = "%s%d" % (OTHER_NAME, cpt) + members.append("cn=%s,%s" % (name, SUFFIX)) + members.append(BIND_DN) + entry.setValues('member', members) + + topology_m2.ms["master1"].log.info("Try to add Add %s should be successful" % ENTRY_DN) + topology_m2.ms["master1"].add_s(entry) + + # + # Now check the entry as been replicated + # + topology_m2.ms["master2"].simple_bind_s(DN_DM, PASSWORD) + topology_m2.ms["master1"].log.info("Try to retrieve %s from Master2" % ENTRY_DN) + replication_check(topology_m2) + ent = topology_m2.ms["master2"].getEntry(ENTRY_DN, ldap.SCOPE_BASE, "(objectclass=*)") + assert ent + # Now update the entry on Master2 (as DM because 47676 is possibly not fixed on M2) + topology_m2.ms["master1"].log.info("Update %s on M2" % ENTRY_DN) + mod = [(ldap.MOD_REPLACE, 'description', b'test_add')] + topology_m2.ms["master2"].modify_s(ENTRY_DN, mod) + + topology_m2.ms["master1"].simple_bind_s(DN_DM, PASSWORD) + replication_check(topology_m2) + ent = topology_m2.ms["master1"].getEntry(ENTRY_DN, ldap.SCOPE_BASE, "(objectclass=*)") + assert ensure_str(ent.getValue('description')) == 'test_add' + + +def test_ticket47676_reject_action(topology_m2): + topology_m2.ms["master1"].log.info("\n\n######################### REJECT ACTION ######################\n") + + topology_m2.ms["master1"].simple_bind_s(DN_DM, PASSWORD) + topology_m2.ms["master2"].simple_bind_s(DN_DM, PASSWORD) + + # make master1 to refuse to push the schema if OC_NAME is present in consumer schema + mod = [(ldap.MOD_ADD, 'schemaUpdateObjectclassReject', ensure_bytes('%s' % (OC_NAME)))] # ACL + REPL + topology_m2.ms["master1"].modify_s(REPL_SCHEMA_POLICY_SUPPLIER, mod) + + # Restart is required to take into account that policy + topology_m2.ms["master1"].stop(timeout=10) + topology_m2.ms["master1"].start(timeout=10) + + # Add a new OC on M1 so that schema CSN will change and M1 will try to push the schema + topology_m2.ms["master1"].log.info("Add %s on M1" % OC2_NAME) + new_oc = _oc_definition(OC2_OID_EXT, OC2_NAME, must=MUST, may=MAY) + topology_m2.ms["master1"].schema.add_schema('objectClasses', new_oc) + + # Safety checking that the schema has been updated on M1 + topology_m2.ms["master1"].log.info("Check %s is in M1" % OC2_NAME) + ent = topology_m2.ms["master1"].getEntry(SCHEMA_DN, ldap.SCOPE_BASE, "(objectclass=*)", ["objectclasses"]) + assert ent.hasAttr('objectclasses') + found = False + for objectclass in ent.getValues('objectclasses'): + if str(objectclass).find(OC2_NAME) >= 0: + found = True + break + assert found + + # Do an update of M1 so that M1 will try to push the schema + topology_m2.ms["master1"].log.info("Update %s on M1" % ENTRY_DN) + mod = [(ldap.MOD_REPLACE, 'description', b'test_reject')] + topology_m2.ms["master1"].modify_s(ENTRY_DN, mod) + + # Check the replication occured and so also M1 attempted to push the schema + topology_m2.ms["master1"].log.info("Check updated %s on M2" % ENTRY_DN) + + replication_check(topology_m2) + ent = topology_m2.ms["master2"].getEntry(ENTRY_DN, ldap.SCOPE_BASE, "(objectclass=*)", ['description']) + assert ensure_str(ent.getValue('description')) == 'test_reject' + + # Check that the schema has not been pushed + topology_m2.ms["master1"].log.info("Check %s is not in M2" % OC2_NAME) + ent = topology_m2.ms["master2"].getEntry(SCHEMA_DN, ldap.SCOPE_BASE, "(objectclass=*)", ["objectclasses"]) + assert ent.hasAttr('objectclasses') + found = False + for objectclass in ent.getValues('objectclasses'): + if str(objectclass).find(OC2_NAME) >= 0: + found = True + break + assert not found + + topology_m2.ms["master1"].log.info("\n\n######################### NO MORE REJECT ACTION ######################\n") + + # make master1 to do no specific action on OC_NAME + mod = [(ldap.MOD_DELETE, 'schemaUpdateObjectclassReject', ensure_bytes('%s' % (OC_NAME)))] # ACL + REPL + topology_m2.ms["master1"].modify_s(REPL_SCHEMA_POLICY_SUPPLIER, mod) + + # Restart is required to take into account that policy + topology_m2.ms["master1"].stop(timeout=10) + topology_m2.ms["master1"].start(timeout=10) + + # Do an update of M1 so that M1 will try to push the schema + topology_m2.ms["master1"].log.info("Update %s on M1" % ENTRY_DN) + mod = [(ldap.MOD_REPLACE, 'description', b'test_no_more_reject')] + topology_m2.ms["master1"].modify_s(ENTRY_DN, mod) + + # Check the replication occured and so also M1 attempted to push the schema + topology_m2.ms["master1"].log.info("Check updated %s on M2" % ENTRY_DN) + + replication_check(topology_m2) + ent = topology_m2.ms["master2"].getEntry(ENTRY_DN, ldap.SCOPE_BASE, "(objectclass=*)", ['description']) + assert ensure_str(ent.getValue('description')) == 'test_no_more_reject' + # Check that the schema has been pushed + topology_m2.ms["master1"].log.info("Check %s is in M2" % OC2_NAME) + ent = topology_m2.ms["master2"].getEntry(SCHEMA_DN, ldap.SCOPE_BASE, "(objectclass=*)", ["objectclasses"]) + assert ent.hasAttr('objectclasses') + found = False + for objectclass in ent.getValues('objectclasses'): + if str(objectclass).find(OC2_NAME) >= 0: + found = True + break + assert found + + +if __name__ == '__main__': + # Run isolated + # -s for DEBUG mode + CURRENT_FILE = os.path.realpath(__file__) + pytest.main("-s %s" % CURRENT_FILE) diff --git a/dirsrvtests/tests/tickets/ticket47714_test.py b/dirsrvtests/tests/tickets/ticket47714_test.py new file mode 100644 index 0000000..7a6a564 --- /dev/null +++ b/dirsrvtests/tests/tickets/ticket47714_test.py @@ -0,0 +1,213 @@ +# --- BEGIN COPYRIGHT BLOCK --- +# Copyright (C) 2016 Red Hat, Inc. +# All rights reserved. +# +# License: GPL (version 3 or any later version). +# See LICENSE for details. +# --- END COPYRIGHT BLOCK --- +# +import logging +import time + +import ldap +import pytest +from lib389 import Entry +from lib389._constants import * +from lib389.topologies import topology_st + +log = logging.getLogger(__name__) + +from lib389.utils import * + +# Skip on older versions +pytestmark = [pytest.mark.tier2, + pytest.mark.skipif(ds_is_older('1.3.3'), reason="Not implemented")] +ACCT_POLICY_CONFIG_DN = ('cn=config,cn=%s,cn=plugins,cn=config' % + PLUGIN_ACCT_POLICY) +ACCT_POLICY_DN = 'cn=Account Inactivation Policy,%s' % SUFFIX +# Set inactivty high to prevent timing issues with debug options or gdb on test runs. +INACTIVITY_LIMIT = '3000' +SEARCHFILTER = '(objectclass=*)' + +TEST_USER = 'ticket47714user' +TEST_USER_DN = 'uid=%s,%s' % (TEST_USER, SUFFIX) +TEST_USER_PW = '%s' % TEST_USER + + +def _header(topology_st, label): + topology_st.standalone.log.info("\n\n###############################################") + topology_st.standalone.log.info("#######") + topology_st.standalone.log.info("####### %s" % label) + topology_st.standalone.log.info("#######") + topology_st.standalone.log.info("###############################################") + + +def test_ticket47714_init(topology_st): + """ + 1. Add account policy entry to the DB + 2. Add a test user to the DB + """ + _header(topology_st, + 'Testing Ticket 47714 - [RFE] Update lastLoginTime also in Account Policy plugin if account lockout is based on passwordExpirationTime.') + + topology_st.standalone.simple_bind_s(DN_DM, PASSWORD) + + log.info("\n######################### Adding Account Policy entry: %s ######################\n" % ACCT_POLICY_DN) + topology_st.standalone.add_s( + Entry((ACCT_POLICY_DN, {'objectclass': "top ldapsubentry extensibleObject accountpolicy".split(), + 'accountInactivityLimit': INACTIVITY_LIMIT}))) + + log.info("\n######################### Adding Test User entry: %s ######################\n" % TEST_USER_DN) + topology_st.standalone.add_s( + Entry((TEST_USER_DN, {'objectclass': "top person organizationalPerson inetOrgPerson".split(), + 'cn': TEST_USER, + 'sn': TEST_USER, + 'givenname': TEST_USER, + 'userPassword': TEST_USER_PW, + 'acctPolicySubentry': ACCT_POLICY_DN}))) + + +def test_ticket47714_run_0(topology_st): + """ + Check this change has no inpact to the existing functionality. + 1. Set account policy config without the new attr alwaysRecordLoginAttr + 2. Bind as a test user + 3. Bind as the test user again and check the lastLoginTime is updated + 4. Waint longer than the accountInactivityLimit time and bind as the test user, + which should fail with CONSTANT_VIOLATION. + """ + _header(topology_st, 'Account Policy - No new attr alwaysRecordLoginAttr in config') + + topology_st.standalone.simple_bind_s(DN_DM, PASSWORD) + + # Modify Account Policy config entry + topology_st.standalone.modify_s(ACCT_POLICY_CONFIG_DN, [(ldap.MOD_REPLACE, 'alwaysrecordlogin', b'yes'), + (ldap.MOD_REPLACE, 'stateattrname', b'lastLoginTime'), + (ldap.MOD_REPLACE, 'altstateattrname', b'createTimestamp'), + (ldap.MOD_REPLACE, 'specattrname', b'acctPolicySubentry'), + (ldap.MOD_REPLACE, 'limitattrname', + b'accountInactivityLimit')]) + + # Enable the plugins + topology_st.standalone.plugins.enable(name=PLUGIN_ACCT_POLICY) + + topology_st.standalone.restart() + + log.info("\n######################### Bind as %s ######################\n" % TEST_USER_DN) + try: + topology_st.standalone.simple_bind_s(TEST_USER_DN, TEST_USER_PW) + except ldap.CONSTRAINT_VIOLATION as e: + log.error('CONSTRAINT VIOLATION {}'.format(e.args[0]['desc'])) + + time.sleep(2) + + topology_st.standalone.simple_bind_s(DN_DM, PASSWORD) + entry = topology_st.standalone.search_s(TEST_USER_DN, ldap.SCOPE_BASE, SEARCHFILTER, ['lastLoginTime']) + + lastLoginTime0 = entry[0].lastLoginTime + + log.info("\n######################### Bind as %s again ######################\n" % TEST_USER_DN) + try: + topology_st.standalone.simple_bind_s(TEST_USER_DN, TEST_USER_PW) + except ldap.CONSTRAINT_VIOLATION as e: + log.error('CONSTRAINT VIOLATION {}'.format(e.args[0]['desc'])) + + time.sleep(2) + + topology_st.standalone.simple_bind_s(DN_DM, PASSWORD) + entry = topology_st.standalone.search_s(TEST_USER_DN, ldap.SCOPE_BASE, SEARCHFILTER, ['lastLoginTime']) + + lastLoginTime1 = entry[0].lastLoginTime + + log.info("First lastLoginTime: %s, Second lastLoginTime: %s" % (lastLoginTime0, lastLoginTime1)) + assert lastLoginTime0 < lastLoginTime1 + + topology_st.standalone.simple_bind_s(DN_DM, PASSWORD) + + # Now, change the inactivity limit, because that should trigger the account to now be locked. This is possible because the check is "delayed" until the usage of the account. + + topology_st.standalone.modify_s(ACCT_POLICY_DN, [(ldap.MOD_REPLACE, 'accountInactivityLimit', b'1'),]) + time.sleep(2) + + entry = topology_st.standalone.search_s(ACCT_POLICY_DN, ldap.SCOPE_BASE, SEARCHFILTER) + log.info("\n######################### %s ######################\n" % ACCT_POLICY_CONFIG_DN) + log.info("accountInactivityLimit: %s" % entry[0].accountInactivityLimit) + log.info("\n######################### %s DONE ######################\n" % ACCT_POLICY_CONFIG_DN) + + log.info("\n######################### Bind as %s again to fail ######################\n" % TEST_USER_DN) + try: + topology_st.standalone.simple_bind_s(TEST_USER_DN, TEST_USER_PW) + except ldap.CONSTRAINT_VIOLATION as e: + log.info('CONSTRAINT VIOLATION {}'.format(e.args[0]['desc'])) + log.info("%s was successfully inactivated." % TEST_USER_DN) + pass + + # Now reset the value high to prevent issues with the next test. + topology_st.standalone.simple_bind_s(DN_DM, PASSWORD) + topology_st.standalone.modify_s(ACCT_POLICY_DN, [(ldap.MOD_REPLACE, 'accountInactivityLimit', ensure_bytes(INACTIVITY_LIMIT)),]) + + +def test_ticket47714_run_1(topology_st): + """ + Verify a new config attr alwaysRecordLoginAttr + 1. Set account policy config with the new attr alwaysRecordLoginAttr: lastLoginTime + Note: bogus attr is set to stateattrname. + altstateattrname type value is used for checking whether the account is idle or not. + 2. Bind as a test user + 3. Bind as the test user again and check the alwaysRecordLoginAttr: lastLoginTime is updated + """ + _header(topology_st, 'Account Policy - With new attr alwaysRecordLoginAttr in config') + + topology_st.standalone.simple_bind_s(DN_DM, PASSWORD) + topology_st.standalone.modify_s(TEST_USER_DN, [(ldap.MOD_DELETE, 'lastLoginTime', None)]) + + # Modify Account Policy config entry + topology_st.standalone.modify_s(ACCT_POLICY_CONFIG_DN, [(ldap.MOD_REPLACE, 'alwaysrecordlogin', b'yes'), + (ldap.MOD_REPLACE, 'stateattrname', b'bogus'), + (ldap.MOD_REPLACE, 'altstateattrname', b'modifyTimestamp'), + ( + ldap.MOD_REPLACE, 'alwaysRecordLoginAttr', b'lastLoginTime'), + (ldap.MOD_REPLACE, 'specattrname', b'acctPolicySubentry'), + (ldap.MOD_REPLACE, 'limitattrname', + b'accountInactivityLimit')]) + + # Enable the plugins + topology_st.standalone.plugins.enable(name=PLUGIN_ACCT_POLICY) + + topology_st.standalone.restart() + + log.info("\n######################### Bind as %s ######################\n" % TEST_USER_DN) + try: + topology_st.standalone.simple_bind_s(TEST_USER_DN, TEST_USER_PW) + except ldap.CONSTRAINT_VIOLATION as e: + log.error('CONSTRAINT VIOLATION {}'.format(e.args[0]['desc'])) + + time.sleep(1) + + topology_st.standalone.simple_bind_s(DN_DM, PASSWORD) + entry = topology_st.standalone.search_s(TEST_USER_DN, ldap.SCOPE_BASE, SEARCHFILTER, ['lastLoginTime']) + lastLoginTime0 = entry[0].lastLoginTime + + log.info("\n######################### Bind as %s again ######################\n" % TEST_USER_DN) + try: + topology_st.standalone.simple_bind_s(TEST_USER_DN, TEST_USER_PW) + except ldap.CONSTRAINT_VIOLATION as e: + log.error('CONSTRAINT VIOLATION {}'.format(e.args[0]['desc'])) + + time.sleep(1) + + topology_st.standalone.simple_bind_s(DN_DM, PASSWORD) + entry = topology_st.standalone.search_s(TEST_USER_DN, ldap.SCOPE_BASE, SEARCHFILTER, ['lastLoginTime']) + lastLoginTime1 = entry[0].lastLoginTime + + log.info("First lastLoginTime: %s, Second lastLoginTime: %s" % (lastLoginTime0, lastLoginTime1)) + assert lastLoginTime0 < lastLoginTime1 + + topology_st.standalone.log.info("ticket47714 was successfully verified.") + + +if __name__ == '__main__': + # Run isolated + # -s for DEBUG mode + CURRENT_FILE = os.path.realpath(__file__) + pytest.main("-s %s" % CURRENT_FILE) diff --git a/dirsrvtests/tests/tickets/ticket47721_test.py b/dirsrvtests/tests/tickets/ticket47721_test.py new file mode 100644 index 0000000..44cf38b --- /dev/null +++ b/dirsrvtests/tests/tickets/ticket47721_test.py @@ -0,0 +1,293 @@ +# --- BEGIN COPYRIGHT BLOCK --- +# Copyright (C) 2016 Red Hat, Inc. +# All rights reserved. +# +# License: GPL (version 3 or any later version). +# See LICENSE for details. +# --- END COPYRIGHT BLOCK --- +# +''' +Created on Nov 7, 2013 + +@author: tbordaz +''' +import logging +import time + +import ldap +import pytest +from lib389 import Entry +from lib389._constants import * +from lib389.topologies import topology_m2 +from lib389.replica import ReplicationManager +from lib389.utils import * + +pytestmark = pytest.mark.tier2 + +logging.getLogger(__name__).setLevel(logging.DEBUG) +log = logging.getLogger(__name__) + +SCHEMA_DN = "cn=schema" +TEST_REPL_DN = "cn=test_repl, %s" % SUFFIX +OC_NAME = 'OCticket47721' +OC_OID_EXT = 2 +MUST = "(postalAddress $ postalCode)" +MAY = "(member $ street)" + +OC2_NAME = 'OC2ticket47721' +OC2_OID_EXT = 3 +MUST_2 = "(postalAddress $ postalCode)" +MAY_2 = "(member $ street)" + +REPL_SCHEMA_POLICY_CONSUMER = "cn=consumerUpdatePolicy,cn=replSchema,cn=config" +REPL_SCHEMA_POLICY_SUPPLIER = "cn=supplierUpdatePolicy,cn=replSchema,cn=config" + +OTHER_NAME = 'other_entry' +MAX_OTHERS = 10 + +BIND_NAME = 'bind_entry' +BIND_DN = 'cn=%s, %s' % (BIND_NAME, SUFFIX) +BIND_PW = 'password' + +ENTRY_NAME = 'test_entry' +ENTRY_DN = 'cn=%s, %s' % (ENTRY_NAME, SUFFIX) +ENTRY_OC = "top person %s" % OC_NAME + +BASE_OID = "1.2.3.4.5.6.7.8.9.10" + +SLEEP_INTERVAL = 60 + + +def _add_custom_at_definition(name='ATticket47721'): + new_at = "( %s-oid NAME '%s' DESC 'test AT ticket 47721' SYNTAX 1.3.6.1.4.1.1466.115.121.1.15 X-ORIGIN ( 'Test 47721' 'user defined' ) )" % ( + name, name) + return ensure_bytes(new_at) + + +def _chg_std_at_defintion(): + new_at = "( 2.16.840.1.113730.3.1.569 NAME 'cosPriority' DESC 'Netscape defined attribute type' SYNTAX 1.3.6.1.4.1.1466.115.121.1.27 X-ORIGIN 'Netscape Directory Server' )" + return ensure_bytes(new_at) + + +def _add_custom_oc_defintion(name='OCticket47721'): + new_oc = "( %s-oid NAME '%s' DESC 'An group of related automount objects' SUP top STRUCTURAL MUST ou X-ORIGIN 'draft-howard-rfc2307bis' )" % ( + name, name) + return ensure_bytes(new_oc) + + +def _chg_std_oc_defintion(): + new_oc = "( 5.3.6.1.1.1.2.0 NAME 'trustAccount' DESC 'Sets trust accounts information' SUP top AUXILIARY MUST trustModel MAY ( accessTo $ ou ) X-ORIGIN 'nss_ldap/pam_ldap' )" + return ensure_bytes(new_oc) + +def replication_check(topology_m2): + repl = ReplicationManager(SUFFIX) + master1 = topology_m2.ms["master1"] + master2 = topology_m2.ms["master2"] + return repl.test_replication(master1, master2) + +def test_ticket47721_init(topology_m2): + """ + It adds + - Objectclass with MAY 'member' + - an entry ('bind_entry') with which we bind to test the 'SELFDN' operation + It deletes the anonymous aci + + """ + + # entry used to bind with + topology_m2.ms["master1"].log.info("Add %s" % BIND_DN) + topology_m2.ms["master1"].add_s(Entry((BIND_DN, { + 'objectclass': "top person".split(), + 'sn': BIND_NAME, + 'cn': BIND_NAME, + 'userpassword': BIND_PW}))) + + # enable repl error logging + mod = [(ldap.MOD_REPLACE, 'nsslapd-errorlog-level', ensure_bytes(str(8192)))] # REPL logging + topology_m2.ms["master1"].modify_s(DN_CONFIG, mod) + topology_m2.ms["master2"].modify_s(DN_CONFIG, mod) + + # add dummy entries + for cpt in range(MAX_OTHERS): + name = "%s%d" % (OTHER_NAME, cpt) + topology_m2.ms["master1"].add_s(Entry(("cn=%s,%s" % (name, SUFFIX), { + 'objectclass': "top person".split(), + 'sn': name, + 'cn': name}))) + + +def test_ticket47721_0(topology_m2): + dn = "cn=%s0,%s" % (OTHER_NAME, SUFFIX) + replication_check(topology_m2) + ent = topology_m2.ms["master2"].getEntry(dn, ldap.SCOPE_BASE, "(objectclass=*)") + assert ent + + +def test_ticket47721_1(topology_m2): + log.info('Running test 1...') + # topology_m2.ms["master1"].log.info("Attach debugger\n\n") + # time.sleep(30) + + new = _add_custom_at_definition() + topology_m2.ms["master1"].log.info("Add (M2) %s " % new) + topology_m2.ms["master2"].schema.add_schema('attributetypes', new) + + new = _chg_std_at_defintion() + topology_m2.ms["master1"].log.info("Chg (M2) %s " % new) + topology_m2.ms["master2"].schema.add_schema('attributetypes', new) + + new = _add_custom_oc_defintion() + topology_m2.ms["master1"].log.info("Add (M2) %s " % new) + topology_m2.ms["master2"].schema.add_schema('objectClasses', new) + + new = _chg_std_oc_defintion() + topology_m2.ms["master1"].log.info("Chg (M2) %s " % new) + topology_m2.ms["master2"].schema.add_schema('objectClasses', new) + + mod = [(ldap.MOD_REPLACE, 'description', b'Hello world 1')] + dn = "cn=%s0,%s" % (OTHER_NAME, SUFFIX) + topology_m2.ms["master2"].modify_s(dn, mod) + + replication_check(topology_m2) + ent = topology_m2.ms["master1"].getEntry(dn, ldap.SCOPE_BASE, "(objectclass=*)") + assert ensure_str(ent.getValue('description')) == 'Hello world 1' + + time.sleep(2) + schema_csn_master1 = topology_m2.ms["master1"].schema.get_schema_csn() + schema_csn_master2 = topology_m2.ms["master2"].schema.get_schema_csn() + log.debug('Master 1 schemaCSN: %s' % schema_csn_master1) + log.debug('Master 2 schemaCSN: %s' % schema_csn_master2) + + +def test_ticket47721_2(topology_m2): + log.info('Running test 2...') + + mod = [(ldap.MOD_REPLACE, 'description', b'Hello world 2')] + dn = "cn=%s0,%s" % (OTHER_NAME, SUFFIX) + topology_m2.ms["master1"].modify_s(dn, mod) + + replication_check(topology_m2) + ent = topology_m2.ms["master2"].getEntry(dn, ldap.SCOPE_BASE, "(objectclass=*)") + assert ensure_str(ent.getValue('description')) == 'Hello world 2' + + time.sleep(2) + schema_csn_master1 = topology_m2.ms["master1"].schema.get_schema_csn() + schema_csn_master2 = topology_m2.ms["master2"].schema.get_schema_csn() + log.debug('Master 1 schemaCSN: %s' % schema_csn_master1) + log.debug('Master 2 schemaCSN: %s' % schema_csn_master2) + if schema_csn_master1 != schema_csn_master2: + # We need to give the server a little more time, then check it again + log.info('Schema CSNs are not in sync yet: m1 (%s) vs m2 (%s), wait a little...' + % (schema_csn_master1, schema_csn_master2)) + time.sleep(SLEEP_INTERVAL) + schema_csn_master1 = topology_m2.ms["master1"].schema.get_schema_csn() + schema_csn_master2 = topology_m2.ms["master2"].schema.get_schema_csn() + + assert schema_csn_master1 is not None + assert schema_csn_master1 == schema_csn_master2 + + +def test_ticket47721_3(topology_m2): + ''' + Check that the supplier can update its schema from consumer schema + Update M2 schema, then trigger a replication M1->M2 + ''' + log.info('Running test 3...') + + # stop RA M2->M1, so that M1 can only learn being a supplier + ents = topology_m2.ms["master2"].agreement.list(suffix=SUFFIX) + assert len(ents) == 1 + topology_m2.ms["master2"].agreement.pause(ents[0].dn) + + new = _add_custom_at_definition('ATtest3') + topology_m2.ms["master1"].log.info("Update schema (M2) %s " % new) + topology_m2.ms["master2"].schema.add_schema('attributetypes', new) + time.sleep(1) + + new = _add_custom_oc_defintion('OCtest3') + topology_m2.ms["master1"].log.info("Update schema (M2) %s " % new) + topology_m2.ms["master2"].schema.add_schema('objectClasses', new) + time.sleep(1) + + mod = [(ldap.MOD_REPLACE, 'description', b'Hello world 3')] + dn = "cn=%s0,%s" % (OTHER_NAME, SUFFIX) + topology_m2.ms["master1"].modify_s(dn, mod) + + replication_check(topology_m2) + ent = topology_m2.ms["master2"].getEntry(dn, ldap.SCOPE_BASE, "(objectclass=*)") + assert ensure_str(ent.getValue('description')) == 'Hello world 3' + + time.sleep(5) + schema_csn_master1 = topology_m2.ms["master1"].schema.get_schema_csn() + schema_csn_master2 = topology_m2.ms["master2"].schema.get_schema_csn() + log.debug('Master 1 schemaCSN: %s' % schema_csn_master1) + log.debug('Master 2 schemaCSN: %s' % schema_csn_master2) + if schema_csn_master1 == schema_csn_master2: + # We need to give the server a little more time, then check it again + log.info('Schema CSNs are not in sync yet: m1 (%s) vs m2 (%s), wait a little...' + % (schema_csn_master1, schema_csn_master2)) + time.sleep(SLEEP_INTERVAL) + schema_csn_master1 = topology_m2.ms["master1"].schema.get_schema_csn() + schema_csn_master2 = topology_m2.ms["master2"].schema.get_schema_csn() + + assert schema_csn_master1 is not None + # schema csn on M2 is larger that on M1. M1 only took the new definitions + assert schema_csn_master1 != schema_csn_master2 + + +def test_ticket47721_4(topology_m2): + ''' + Here M2->M1 agreement is disabled. + with test_ticket47721_3, M1 schema and M2 should be identical BUT + the nsschemacsn is M2>M1. But as the RA M2->M1 is disabled, M1 keeps its schemacsn. + Update schema on M2 (nsschemaCSN update), update M2. Check they have the same schemacsn + ''' + log.info('Running test 4...') + + new = _add_custom_at_definition('ATtest4') + topology_m2.ms["master1"].log.info("Update schema (M1) %s " % new) + topology_m2.ms["master1"].schema.add_schema('attributetypes', new) + + new = _add_custom_oc_defintion('OCtest4') + topology_m2.ms["master1"].log.info("Update schema (M1) %s " % new) + topology_m2.ms["master1"].schema.add_schema('objectClasses', new) + + topology_m2.ms["master1"].log.info("trigger replication M1->M2: to update the schema") + mod = [(ldap.MOD_REPLACE, 'description', b'Hello world 4')] + dn = "cn=%s0,%s" % (OTHER_NAME, SUFFIX) + topology_m2.ms["master1"].modify_s(dn, mod) + + replication_check(topology_m2) + ent = topology_m2.ms["master2"].getEntry(dn, ldap.SCOPE_BASE, "(objectclass=*)") + assert ensure_str(ent.getValue('description')) == 'Hello world 4' + + topology_m2.ms["master1"].log.info("trigger replication M1->M2: to push the schema") + mod = [(ldap.MOD_REPLACE, 'description', b'Hello world 5')] + dn = "cn=%s0,%s" % (OTHER_NAME, SUFFIX) + topology_m2.ms["master1"].modify_s(dn, mod) + + replication_check(topology_m2) + ent = topology_m2.ms["master2"].getEntry(dn, ldap.SCOPE_BASE, "(objectclass=*)") + assert ensure_str(ent.getValue('description')) == 'Hello world 5' + + time.sleep(2) + schema_csn_master1 = topology_m2.ms["master1"].schema.get_schema_csn() + schema_csn_master2 = topology_m2.ms["master2"].schema.get_schema_csn() + log.debug('Master 1 schemaCSN: %s' % schema_csn_master1) + log.debug('Master 2 schemaCSN: %s' % schema_csn_master2) + if schema_csn_master1 != schema_csn_master2: + # We need to give the server a little more time, then check it again + log.info('Schema CSNs are incorrectly in sync, wait a little...') + time.sleep(SLEEP_INTERVAL) + schema_csn_master1 = topology_m2.ms["master1"].schema.get_schema_csn() + schema_csn_master2 = topology_m2.ms["master2"].schema.get_schema_csn() + + assert schema_csn_master1 is not None + assert schema_csn_master1 == schema_csn_master2 + + +if __name__ == '__main__': + # Run isolated + # -s for DEBUG mode + CURRENT_FILE = os.path.realpath(__file__) + pytest.main("-s %s" % CURRENT_FILE) diff --git a/dirsrvtests/tests/tickets/ticket47781_test.py b/dirsrvtests/tests/tickets/ticket47781_test.py new file mode 100644 index 0000000..3f8d808 --- /dev/null +++ b/dirsrvtests/tests/tickets/ticket47781_test.py @@ -0,0 +1,104 @@ +# --- BEGIN COPYRIGHT BLOCK --- +# Copyright (C) 2016 Red Hat, Inc. +# All rights reserved. +# +# License: GPL (version 3 or any later version). +# See LICENSE for details. +# --- END COPYRIGHT BLOCK --- +# +import logging + +import pytest +from lib389.tasks import * +from lib389.topologies import topology_st +from lib389.replica import ReplicationManager + +from lib389._constants import (defaultProperties, DEFAULT_SUFFIX, ReplicaRole, + REPLICAID_MASTER_1, REPLICATION_BIND_DN, REPLICATION_BIND_PW, + REPLICATION_BIND_METHOD, REPLICATION_TRANSPORT, RA_NAME, + RA_BINDDN, RA_BINDPW, RA_METHOD, RA_TRANSPORT_PROT) + +pytestmark = pytest.mark.tier2 + +log = logging.getLogger(__name__) + + +def test_ticket47781(topology_st): + """ + Testing for a deadlock after doing an online import of an LDIF with + replication data. The replication agreement should be invalid. + """ + + log.info('Testing Ticket 47781 - Testing for deadlock after importing LDIF with replication data') + + master = topology_st.standalone + repl = ReplicationManager(DEFAULT_SUFFIX) + repl.create_first_master(master) + + properties = {RA_NAME: r'meTo_$host:$port', + RA_BINDDN: defaultProperties[REPLICATION_BIND_DN], + RA_BINDPW: defaultProperties[REPLICATION_BIND_PW], + RA_METHOD: defaultProperties[REPLICATION_BIND_METHOD], + RA_TRANSPORT_PROT: defaultProperties[REPLICATION_TRANSPORT]} + # The agreement should point to a server that does NOT exist (invalid port) + repl_agreement = master.agreement.create(suffix=DEFAULT_SUFFIX, + host=master.host, + port=5555, + properties=properties) + + # + # add two entries + # + log.info('Adding two entries...') + + master.add_s(Entry(('cn=entry1,dc=example,dc=com', { + 'objectclass': 'top person'.split(), + 'sn': 'user', + 'cn': 'entry1'}))) + + master.add_s(Entry(('cn=entry2,dc=example,dc=com', { + 'objectclass': 'top person'.split(), + 'sn': 'user', + 'cn': 'entry2'}))) + + # + # export the replication ldif + # + log.info('Exporting replication ldif...') + args = {EXPORT_REPL_INFO: True} + exportTask = Tasks(master) + exportTask.exportLDIF(DEFAULT_SUFFIX, None, "/tmp/export.ldif", args) + + # + # Restart the server + # + log.info('Restarting server...') + master.stop() + master.start() + + # + # Import the ldif + # + log.info('Import replication LDIF file...') + importTask = Tasks(master) + args = {TASK_WAIT: True} + importTask.importLDIF(DEFAULT_SUFFIX, None, "/tmp/export.ldif", args) + os.remove("/tmp/export.ldif") + + # + # Search for tombstones - we should not hang/timeout + # + log.info('Search for tombstone entries(should find one and not hang)...') + master.set_option(ldap.OPT_NETWORK_TIMEOUT, 5) + master.set_option(ldap.OPT_TIMEOUT, 5) + entries = master.search_s(DEFAULT_SUFFIX, ldap.SCOPE_SUBTREE, 'objectclass=nsTombstone') + if not entries: + log.fatal('Search failed to find any entries.') + assert PR_False + + +if __name__ == '__main__': + # Run isolated + # -s for DEBUG mode + CURRENT_FILE = os.path.realpath(__file__) + pytest.main("-s %s" % CURRENT_FILE) diff --git a/dirsrvtests/tests/tickets/ticket47787_test.py b/dirsrvtests/tests/tickets/ticket47787_test.py new file mode 100644 index 0000000..7b2c104 --- /dev/null +++ b/dirsrvtests/tests/tickets/ticket47787_test.py @@ -0,0 +1,428 @@ +# --- BEGIN COPYRIGHT BLOCK --- +# Copyright (C) 2016 Red Hat, Inc. +# All rights reserved. +# +# License: GPL (version 3 or any later version). +# See LICENSE for details. +# --- END COPYRIGHT BLOCK --- +# +''' +Created on April 14, 2014 + +@author: tbordaz +''' +import logging +import re +import time + +import ldap +import pytest +from lib389 import Entry +from lib389._constants import * +from lib389.topologies import topology_m2 +from lib389.utils import * + +pytestmark = pytest.mark.tier2 + +logging.getLogger(__name__).setLevel(logging.DEBUG) +log = logging.getLogger(__name__) + +# set this flag to False so that it will assert on failure _status_entry_both_server +DEBUG_FLAG = False + +TEST_REPL_DN = "cn=test_repl, %s" % SUFFIX + +STAGING_CN = "staged user" +PRODUCTION_CN = "accounts" +EXCEPT_CN = "excepts" + +STAGING_DN = "cn=%s,%s" % (STAGING_CN, SUFFIX) +PRODUCTION_DN = "cn=%s,%s" % (PRODUCTION_CN, SUFFIX) +PROD_EXCEPT_DN = "cn=%s,%s" % (EXCEPT_CN, PRODUCTION_DN) + +STAGING_PATTERN = "cn=%s*,%s" % (STAGING_CN[:2], SUFFIX) +PRODUCTION_PATTERN = "cn=%s*,%s" % (PRODUCTION_CN[:2], SUFFIX) +BAD_STAGING_PATTERN = "cn=bad*,%s" % (SUFFIX) +BAD_PRODUCTION_PATTERN = "cn=bad*,%s" % (SUFFIX) + +BIND_CN = "bind_entry" +BIND_DN = "cn=%s,%s" % (BIND_CN, SUFFIX) +BIND_PW = "password" + +NEW_ACCOUNT = "new_account" +MAX_ACCOUNTS = 20 + +CONFIG_MODDN_ACI_ATTR = "nsslapd-moddn-aci" + + +def _bind_manager(server): + server.log.info("Bind as %s " % DN_DM) + server.simple_bind_s(DN_DM, PASSWORD) + + +def _bind_normal(server): + server.log.info("Bind as %s " % BIND_DN) + server.simple_bind_s(BIND_DN, BIND_PW) + + +def _header(topology_m2, label): + topology_m2.ms["master1"].log.info("\n\n###############################################") + topology_m2.ms["master1"].log.info("#######") + topology_m2.ms["master1"].log.info("####### %s" % label) + topology_m2.ms["master1"].log.info("#######") + topology_m2.ms["master1"].log.info("###############################################") + + +def _status_entry_both_server(topology_m2, name=None, desc=None, debug=True): + if not name: + return + topology_m2.ms["master1"].log.info("\n\n######################### Tombstone on M1 ######################\n") + attr = 'description' + found = False + attempt = 0 + while not found and attempt < 10: + ent_m1 = _find_tombstone(topology_m2.ms["master1"], SUFFIX, 'sn', name) + if attr in ent_m1.getAttrs(): + found = True + else: + time.sleep(1) + attempt = attempt + 1 + assert ent_m1 + + topology_m2.ms["master1"].log.info("\n\n######################### Tombstone on M2 ######################\n") + ent_m2 = _find_tombstone(topology_m2.ms["master2"], SUFFIX, 'sn', name) + assert ent_m2 + + topology_m2.ms["master1"].log.info("\n\n######################### Description ######################\n%s\n" % desc) + topology_m2.ms["master1"].log.info("M1 only\n") + for attr in ent_m1.getAttrs(): + + if not debug: + assert attr in ent_m2.getAttrs() + + if not attr in ent_m2.getAttrs(): + topology_m2.ms["master1"].log.info(" %s" % attr) + for val in ent_m1.getValues(attr): + topology_m2.ms["master1"].log.info(" %s" % val) + + topology_m2.ms["master1"].log.info("M2 only\n") + for attr in ent_m2.getAttrs(): + + if not debug: + assert attr in ent_m1.getAttrs() + + if not attr in ent_m1.getAttrs(): + topology_m2.ms["master1"].log.info(" %s" % attr) + for val in ent_m2.getValues(attr): + topology_m2.ms["master1"].log.info(" %s" % val) + + topology_m2.ms["master1"].log.info("M1 differs M2\n") + + if not debug: + assert ent_m1.dn == ent_m2.dn + + if ent_m1.dn != ent_m2.dn: + topology_m2.ms["master1"].log.info(" M1[dn] = %s\n M2[dn] = %s" % (ent_m1.dn, ent_m2.dn)) + + for attr1 in ent_m1.getAttrs(): + if attr1 in ent_m2.getAttrs(): + for val1 in ent_m1.getValues(attr1): + found = False + for val2 in ent_m2.getValues(attr1): + if val1 == val2: + found = True + break + + if not debug: + assert found + + if not found: + topology_m2.ms["master1"].log.info(" M1[%s] = %s" % (attr1, val1)) + + for attr2 in ent_m2.getAttrs(): + if attr2 in ent_m1.getAttrs(): + for val2 in ent_m2.getValues(attr2): + found = False + for val1 in ent_m1.getValues(attr2): + if val2 == val1: + found = True + break + + if not debug: + assert found + + if not found: + topology_m2.ms["master1"].log.info(" M2[%s] = %s" % (attr2, val2)) + + +def _pause_RAs(topology_m2): + topology_m2.ms["master1"].log.info("\n\n######################### Pause RA M1<->M2 ######################\n") + ents = topology_m2.ms["master1"].agreement.list(suffix=SUFFIX) + assert len(ents) == 1 + topology_m2.ms["master1"].agreement.pause(ents[0].dn) + + ents = topology_m2.ms["master2"].agreement.list(suffix=SUFFIX) + assert len(ents) == 1 + topology_m2.ms["master2"].agreement.pause(ents[0].dn) + + +def _resume_RAs(topology_m2): + topology_m2.ms["master1"].log.info("\n\n######################### resume RA M1<->M2 ######################\n") + ents = topology_m2.ms["master1"].agreement.list(suffix=SUFFIX) + assert len(ents) == 1 + topology_m2.ms["master1"].agreement.resume(ents[0].dn) + + ents = topology_m2.ms["master2"].agreement.list(suffix=SUFFIX) + assert len(ents) == 1 + topology_m2.ms["master2"].agreement.resume(ents[0].dn) + + +def _find_tombstone(instance, base, attr, value): + # + # we can not use a filter with a (&(objeclass=nsTombstone)(sn=name)) because + # tombstone are not index in 'sn' so 'sn=name' will return NULL + # and even if tombstone are indexed for objectclass the '&' will set + # the candidate list to NULL + # + filt = '(objectclass=%s)' % REPLICA_OC_TOMBSTONE + ents = instance.search_s(base, ldap.SCOPE_SUBTREE, filt) + # found = False + for ent in ents: + if ent.hasAttr(attr): + for val in ent.getValues(attr): + if ensure_str(val) == value: + instance.log.debug("tombstone found: %r" % ent) + return ent + return None + + +def _delete_entry(instance, entry_dn, name): + instance.log.info("\n\n######################### DELETE %s (M1) ######################\n" % name) + + # delete the entry + instance.delete_s(entry_dn) + ent = _find_tombstone(instance, SUFFIX, 'sn', name) + assert ent is not None + + +def _mod_entry(instance, entry_dn, attr, value): + instance.log.info("\n\n######################### MOD %s (M2) ######################\n" % entry_dn) + mod = [(ldap.MOD_REPLACE, attr, ensure_bytes(value))] + instance.modify_s(entry_dn, mod) + + +def _modrdn_entry(instance=None, entry_dn=None, new_rdn=None, del_old=0, new_superior=None): + assert instance is not None + assert entry_dn is not None + + if not new_rdn: + pattern = 'cn=(.*),(.*)' + rdnre = re.compile(pattern) + match = rdnre.match(entry_dn) + old_value = match.group(1) + new_rdn_val = "%s_modrdn" % old_value + new_rdn = "cn=%s" % new_rdn_val + + instance.log.info("\n\n######################### MODRDN %s (M2) ######################\n" % new_rdn) + if new_superior: + instance.rename_s(entry_dn, new_rdn, newsuperior=new_superior, delold=del_old) + else: + instance.rename_s(entry_dn, new_rdn, delold=del_old) + + +def _check_entry_exists(instance, entry_dn): + loop = 0 + ent = None + while loop <= 10: + try: + ent = instance.getEntry(entry_dn, ldap.SCOPE_BASE, "(objectclass=*)") + break + except ldap.NO_SUCH_OBJECT: + time.sleep(1) + loop += 1 + if ent is None: + assert False + + +def _check_mod_received(instance, base, filt, attr, value): + instance.log.info( + "\n\n######################### Check MOD replicated on %s ######################\n" % instance.serverid) + loop = 0 + while loop <= 10: + ent = instance.getEntry(base, ldap.SCOPE_SUBTREE, filt) + if ent.hasAttr(attr) and ent.getValue(attr) == value: + break + time.sleep(1) + loop += 1 + assert loop <= 10 + + +def _check_replication(topology_m2, entry_dn): + # prepare the filter to retrieve the entry + filt = entry_dn.split(',')[0] + + topology_m2.ms["master1"].log.info("\n######################### Check replicat M1->M2 ######################\n") + loop = 0 + while loop <= 10: + attr = 'description' + value = 'test_value_%d' % loop + mod = [(ldap.MOD_REPLACE, attr, ensure_bytes(value))] + topology_m2.ms["master1"].modify_s(entry_dn, mod) + _check_mod_received(topology_m2.ms["master2"], SUFFIX, filt, attr, value) + loop += 1 + + topology_m2.ms["master1"].log.info("\n######################### Check replicat M2->M1 ######################\n") + loop = 0 + while loop <= 10: + attr = 'description' + value = 'test_value_%d' % loop + mod = [(ldap.MOD_REPLACE, attr, ensure_bytes(value))] + topology_m2.ms["master2"].modify_s(entry_dn, mod) + _check_mod_received(topology_m2.ms["master1"], SUFFIX, filt, attr, value) + loop += 1 + + +def test_ticket47787_init(topology_m2): + """ + Creates + - a staging DIT + - a production DIT + - add accounts in staging DIT + + """ + + topology_m2.ms["master1"].log.info("\n\n######################### INITIALIZATION ######################\n") + + # entry used to bind with + topology_m2.ms["master1"].log.info("Add %s" % BIND_DN) + topology_m2.ms["master1"].add_s(Entry((BIND_DN, { + 'objectclass': "top person".split(), + 'sn': BIND_CN, + 'cn': BIND_CN, + 'userpassword': BIND_PW}))) + + # DIT for staging + topology_m2.ms["master1"].log.info("Add %s" % STAGING_DN) + topology_m2.ms["master1"].add_s(Entry((STAGING_DN, { + 'objectclass': "top organizationalRole".split(), + 'cn': STAGING_CN, + 'description': "staging DIT"}))) + + # DIT for production + topology_m2.ms["master1"].log.info("Add %s" % PRODUCTION_DN) + topology_m2.ms["master1"].add_s(Entry((PRODUCTION_DN, { + 'objectclass': "top organizationalRole".split(), + 'cn': PRODUCTION_CN, + 'description': "production DIT"}))) + + # enable replication error logging + mod = [(ldap.MOD_REPLACE, 'nsslapd-errorlog-level', b'8192')] + topology_m2.ms["master1"].modify_s(DN_CONFIG, mod) + topology_m2.ms["master2"].modify_s(DN_CONFIG, mod) + + # add dummy entries in the staging DIT + for cpt in range(MAX_ACCOUNTS): + name = "%s%d" % (NEW_ACCOUNT, cpt) + topology_m2.ms["master1"].add_s(Entry(("cn=%s,%s" % (name, STAGING_DN), { + 'objectclass': "top person".split(), + 'sn': name, + 'cn': name}))) + + +def test_ticket47787_2(topology_m2): + ''' + Disable replication so that updates are not replicated + Delete an entry on M1. Modrdn it on M2 (chg rdn + delold=0 + same superior). + update a test entry on M2 + Reenable the RA. + checks that entry was deleted on M2 (with the modified RDN) + checks that test entry was replicated on M1 (replication M2->M1 not broken by modrdn) + ''' + + _header(topology_m2, "test_ticket47787_2") + _bind_manager(topology_m2.ms["master1"]) + _bind_manager(topology_m2.ms["master2"]) + + # entry to test the replication is still working + name = "%s%d" % (NEW_ACCOUNT, MAX_ACCOUNTS - 1) + test_rdn = "cn=%s" % (name) + testentry_dn = "%s,%s" % (test_rdn, STAGING_DN) + + name = "%s%d" % (NEW_ACCOUNT, MAX_ACCOUNTS - 2) + test2_rdn = "cn=%s" % (name) + testentry2_dn = "%s,%s" % (test2_rdn, STAGING_DN) + + # value of updates to test the replication both ways + attr = 'description' + value = 'test_ticket47787_2' + + # entry for the modrdn + name = "%s%d" % (NEW_ACCOUNT, 1) + rdn = "cn=%s" % (name) + entry_dn = "%s,%s" % (rdn, STAGING_DN) + + # created on M1, wait the entry exists on M2 + _check_entry_exists(topology_m2.ms["master2"], entry_dn) + _check_entry_exists(topology_m2.ms["master2"], testentry_dn) + + _pause_RAs(topology_m2) + + # Delete 'entry_dn' on M1. + # dummy update is only have a first CSN before the DEL + # else the DEL will be in min_csn RUV and make diagnostic a bit more complex + _mod_entry(topology_m2.ms["master1"], testentry2_dn, attr, 'dummy') + _delete_entry(topology_m2.ms["master1"], entry_dn, name) + _mod_entry(topology_m2.ms["master1"], testentry2_dn, attr, value) + + time.sleep(1) # important to have MOD.csn != DEL.csn + + # MOD 'entry_dn' on M1. + # dummy update is only have a first CSN before the MOD entry_dn + # else the DEL will be in min_csn RUV and make diagnostic a bit more complex + _mod_entry(topology_m2.ms["master2"], testentry_dn, attr, 'dummy') + _mod_entry(topology_m2.ms["master2"], entry_dn, attr, value) + _mod_entry(topology_m2.ms["master2"], testentry_dn, attr, value) + + _resume_RAs(topology_m2) + + topology_m2.ms["master1"].log.info( + "\n\n######################### Check DEL replicated on M2 ######################\n") + loop = 0 + while loop <= 10: + ent = _find_tombstone(topology_m2.ms["master2"], SUFFIX, 'sn', name) + if ent: + break + time.sleep(1) + loop += 1 + assert loop <= 10 + assert ent + + # the following checks are not necessary + # as this bug is only for failing replicated MOD (entry_dn) on M1 + # _check_mod_received(topology_m2.ms["master1"], SUFFIX, "(%s)" % (test_rdn), attr, value) + # _check_mod_received(topology_m2.ms["master2"], SUFFIX, "(%s)" % (test2_rdn), attr, value) + # + # _check_replication(topology_m2, testentry_dn) + + _status_entry_both_server(topology_m2, name=name, desc="DEL M1 - MOD M2", debug=DEBUG_FLAG) + + topology_m2.ms["master1"].log.info( + "\n\n######################### Check MOD replicated on M1 ######################\n") + loop = 0 + while loop <= 10: + ent = _find_tombstone(topology_m2.ms["master1"], SUFFIX, 'sn', name) + if ent: + break + time.sleep(1) + loop += 1 + assert loop <= 10 + assert ent + assert ent.hasAttr(attr) + assert ensure_str(ent.getValue(attr)) == value + + +if __name__ == '__main__': + # Run isolated + # -s for DEBUG mode + CURRENT_FILE = os.path.realpath(__file__) + pytest.main("-s %s" % CURRENT_FILE) diff --git a/dirsrvtests/tests/tickets/ticket47808_test.py b/dirsrvtests/tests/tickets/ticket47808_test.py new file mode 100644 index 0000000..8ca75eb --- /dev/null +++ b/dirsrvtests/tests/tickets/ticket47808_test.py @@ -0,0 +1,101 @@ +# --- BEGIN COPYRIGHT BLOCK --- +# Copyright (C) 2016 Red Hat, Inc. +# All rights reserved. +# +# License: GPL (version 3 or any later version). +# See LICENSE for details. +# --- END COPYRIGHT BLOCK --- +# +import logging + +import ldap +import pytest +from lib389 import Entry +from lib389._constants import * +from lib389.topologies import topology_st +from lib389.utils import * + +pytestmark = pytest.mark.tier2 + +log = logging.getLogger(__name__) + +ATTRIBUTE_UNIQUENESS_PLUGIN = 'cn=attribute uniqueness,cn=plugins,cn=config' +ENTRY_NAME = 'test_entry' + + +def test_ticket47808_run(topology_st): + """ + It enables attribute uniqueness plugin with sn as a unique attribute + Add an entry 1 with sn = ENTRY_NAME + Add an entry 2 with sn = ENTRY_NAME + If the second add does not crash the server and the following search found none, + the bug is fixed. + """ + + # bind as directory manager + topology_st.standalone.log.info("Bind as %s" % DN_DM) + topology_st.standalone.simple_bind_s(DN_DM, PASSWORD) + + topology_st.standalone.log.info("\n\n######################### SETUP ATTR UNIQ PLUGIN ######################\n") + + # enable attribute uniqueness plugin + mod = [(ldap.MOD_REPLACE, 'nsslapd-pluginEnabled', b'on'), (ldap.MOD_REPLACE, 'nsslapd-pluginarg0', b'sn'), + (ldap.MOD_REPLACE, 'nsslapd-pluginarg1', ensure_bytes(SUFFIX))] + topology_st.standalone.modify_s(ATTRIBUTE_UNIQUENESS_PLUGIN, mod) + + topology_st.standalone.log.info("\n\n######################### ADD USER 1 ######################\n") + + # Prepare entry 1 + entry_name = '%s 1' % (ENTRY_NAME) + entry_dn_1 = 'cn=%s, %s' % (entry_name, SUFFIX) + entry_1 = Entry(entry_dn_1) + entry_1.setValues('objectclass', 'top', 'person') + entry_1.setValues('sn', ENTRY_NAME) + entry_1.setValues('cn', entry_name) + topology_st.standalone.log.info("Try to add Add %s: %r" % (entry_1, entry_1)) + topology_st.standalone.add_s(entry_1) + + topology_st.standalone.log.info("\n\n######################### Restart Server ######################\n") + topology_st.standalone.stop(timeout=10) + topology_st.standalone.start(timeout=10) + + topology_st.standalone.log.info("\n\n######################### ADD USER 2 ######################\n") + + # Prepare entry 2 having the same sn, which crashes the server + entry_name = '%s 2' % (ENTRY_NAME) + entry_dn_2 = 'cn=%s, %s' % (entry_name, SUFFIX) + entry_2 = Entry(entry_dn_2) + entry_2.setValues('objectclass', 'top', 'person') + entry_2.setValues('sn', ENTRY_NAME) + entry_2.setValues('cn', entry_name) + topology_st.standalone.log.info("Try to add Add %s: %r" % (entry_2, entry_2)) + try: + topology_st.standalone.add_s(entry_2) + except: + topology_st.standalone.log.warning("Adding %s failed" % entry_dn_2) + pass + + topology_st.standalone.log.info("\n\n######################### IS SERVER UP? ######################\n") + ents = topology_st.standalone.search_s(entry_dn_1, ldap.SCOPE_BASE, '(objectclass=*)') + assert len(ents) == 1 + topology_st.standalone.log.info("Yes, it's up.") + + topology_st.standalone.log.info("\n\n######################### CHECK USER 2 NOT ADDED ######################\n") + topology_st.standalone.log.info("Try to search %s" % entry_dn_2) + try: + ents = topology_st.standalone.search_s(entry_dn_2, ldap.SCOPE_BASE, '(objectclass=*)') + except ldap.NO_SUCH_OBJECT: + topology_st.standalone.log.info("Found none") + + topology_st.standalone.log.info("\n\n######################### DELETE USER 1 ######################\n") + + topology_st.standalone.log.info("Try to delete %s " % entry_dn_1) + topology_st.standalone.delete_s(entry_dn_1) + log.info('Testcase PASSED') + + +if __name__ == '__main__': + # Run isolated + # -s for DEBUG mode + CURRENT_FILE = os.path.realpath(__file__) + pytest.main("-s %s" % CURRENT_FILE) diff --git a/dirsrvtests/tests/tickets/ticket47815_test.py b/dirsrvtests/tests/tickets/ticket47815_test.py new file mode 100644 index 0000000..4263ab7 --- /dev/null +++ b/dirsrvtests/tests/tickets/ticket47815_test.py @@ -0,0 +1,116 @@ +# --- BEGIN COPYRIGHT BLOCK --- +# Copyright (C) 2016 Red Hat, Inc. +# All rights reserved. +# +# License: GPL (version 3 or any later version). +# See LICENSE for details. +# --- END COPYRIGHT BLOCK --- +# +import logging +import time + +import ldap +import pytest +from lib389 import Entry +from lib389._constants import * +from lib389.topologies import topology_st + +log = logging.getLogger(__name__) +from lib389.utils import * + +# Skip on older versions +pytestmark = [pytest.mark.tier2, + pytest.mark.skipif(ds_is_older('1.3.3') or ds_is_newer('1.3.7'), + reason="Not implemented, or invalid by nsMemberOf")] + +def test_ticket47815(topology_st): + """ + Test betxn plugins reject an invalid option, and make sure that the rejected entry + is not in the entry cache. + + Enable memberOf, automember, and retrocl plugins + Add the automember config entry + Add the automember group + Add a user that will be rejected by a betxn plugin - result error 53 + Attempt the same add again, and it should result in another error 53 (not error 68) + """ + result = 0 + result2 = 0 + + log.info( + 'Testing Ticket 47815 - Add entries that should be rejected by the betxn plugins, and are not left in the entry cache') + + # Enabled the plugins + topology_st.standalone.plugins.enable(name=PLUGIN_MEMBER_OF) + topology_st.standalone.plugins.enable(name=PLUGIN_AUTOMEMBER) + topology_st.standalone.plugins.enable(name=PLUGIN_RETRO_CHANGELOG) + + # configure automember config entry + log.info('Adding automember config') + try: + topology_st.standalone.add_s(Entry(('cn=group cfg,cn=Auto Membership Plugin,cn=plugins,cn=config', { + 'objectclass': 'top autoMemberDefinition'.split(), + 'autoMemberScope': 'dc=example,dc=com', + 'autoMemberFilter': 'cn=user', + 'autoMemberDefaultGroup': 'cn=group,dc=example,dc=com', + 'autoMemberGroupingAttr': 'member:dn', + 'cn': 'group cfg'}))) + except: + log.error('Failed to add automember config') + exit(1) + + topology_st.standalone.restart() + + # need to reopen a connection toward the instance + topology_st.standalone.open() + + # add automember group + log.info('Adding automember group') + try: + topology_st.standalone.add_s(Entry(('cn=group,dc=example,dc=com', { + 'objectclass': 'top groupOfNames'.split(), + 'cn': 'group'}))) + except: + log.error('Failed to add automember group') + exit(1) + + # add user that should result in an error 53 + log.info('Adding invalid entry') + + try: + topology_st.standalone.add_s(Entry(('cn=user,dc=example,dc=com', { + 'objectclass': 'top person'.split(), + 'sn': 'user', + 'cn': 'user'}))) + except ldap.UNWILLING_TO_PERFORM: + log.debug('Adding invalid entry failed as expected') + result = 53 + except ldap.LDAPError as e: + log.error('Unexpected result ' + e.message['desc']) + assert False + if result == 0: + log.error('Add operation unexpectedly succeeded') + assert False + + # Attempt to add user again, should result in error 53 again + try: + topology_st.standalone.add_s(Entry(('cn=user,dc=example,dc=com', { + 'objectclass': 'top person'.split(), + 'sn': 'user', + 'cn': 'user'}))) + except ldap.UNWILLING_TO_PERFORM: + log.debug('2nd add of invalid entry failed as expected') + result2 = 53 + except ldap.LDAPError as e: + log.error('Unexpected result ' + e.message['desc']) + assert False + if result2 == 0: + log.error('2nd Add operation unexpectedly succeeded') + assert False + + +if __name__ == '__main__': + # Run isolated + # -s for DEBUG mode + CURRENT_FILE = os.path.realpath(__file__) + pytest.main("-s %s" % CURRENT_FILE) diff --git a/dirsrvtests/tests/tickets/ticket47823_test.py b/dirsrvtests/tests/tickets/ticket47823_test.py new file mode 100644 index 0000000..07a3d90 --- /dev/null +++ b/dirsrvtests/tests/tickets/ticket47823_test.py @@ -0,0 +1,965 @@ +# --- BEGIN COPYRIGHT BLOCK --- +# Copyright (C) 2016 Red Hat, Inc. +# All rights reserved. +# +# License: GPL (version 3 or any later version). +# See LICENSE for details. +# --- END COPYRIGHT BLOCK --- +# +import logging +import re +import shutil +import subprocess +import time + +import ldap +import pytest +from lib389 import Entry +from lib389._constants import * +from lib389.topologies import topology_st + +log = logging.getLogger(__name__) +from lib389.utils import * + +# Skip on older versions +pytestmark = [pytest.mark.tier2, + pytest.mark.skipif(ds_is_older('1.3.3'), reason="Not implemented")] +PROVISIONING_CN = "provisioning" +PROVISIONING_DN = "cn=%s,%s" % (PROVISIONING_CN, SUFFIX) + +ACTIVE_CN = "accounts" +STAGE_CN = "staged users" +DELETE_CN = "deleted users" +ACTIVE_DN = "cn=%s,%s" % (ACTIVE_CN, SUFFIX) +STAGE_DN = "cn=%s,%s" % (STAGE_CN, PROVISIONING_DN) +DELETE_DN = "cn=%s,%s" % (DELETE_CN, PROVISIONING_DN) + +STAGE_USER_CN = "stage guy" +STAGE_USER_DN = "cn=%s,%s" % (STAGE_USER_CN, STAGE_DN) + +ACTIVE_USER_CN = "active guy" +ACTIVE_USER_DN = "cn=%s,%s" % (ACTIVE_USER_CN, ACTIVE_DN) + +ACTIVE_USER_1_CN = "test_1" +ACTIVE_USER_1_DN = "cn=%s,%s" % (ACTIVE_USER_1_CN, ACTIVE_DN) +ACTIVE_USER_2_CN = "test_2" +ACTIVE_USER_2_DN = "cn=%s,%s" % (ACTIVE_USER_2_CN, ACTIVE_DN) + +STAGE_USER_1_CN = ACTIVE_USER_1_CN +STAGE_USER_1_DN = "cn=%s,%s" % (STAGE_USER_1_CN, STAGE_DN) +STAGE_USER_2_CN = ACTIVE_USER_2_CN +STAGE_USER_2_DN = "cn=%s,%s" % (STAGE_USER_2_CN, STAGE_DN) + +ALL_CONFIG_ATTRS = ['nsslapd-pluginarg0', 'nsslapd-pluginarg1', 'nsslapd-pluginarg2', + 'uniqueness-attribute-name', 'uniqueness-subtrees', 'uniqueness-across-all-subtrees'] + + +def _header(topology_st, label): + topology_st.standalone.log.info("\n\n###############################################") + topology_st.standalone.log.info("#######") + topology_st.standalone.log.info("####### %s" % label) + topology_st.standalone.log.info("#######") + topology_st.standalone.log.info("###############################################") + + +def _uniqueness_config_entry(topology_st, name=None): + if not name: + return None + + ent = topology_st.standalone.getEntry("cn=%s,%s" % (PLUGIN_ATTR_UNIQUENESS, DN_PLUGIN), ldap.SCOPE_BASE, + "(objectclass=nsSlapdPlugin)", + ['objectClass', 'cn', 'nsslapd-pluginPath', 'nsslapd-pluginInitfunc', + 'nsslapd-pluginType', 'nsslapd-pluginEnabled', + 'nsslapd-plugin-depends-on-type', + 'nsslapd-pluginId', 'nsslapd-pluginVersion', 'nsslapd-pluginVendor', + 'nsslapd-pluginDescription']) + ent.dn = "cn=%s uniqueness,%s" % (name, DN_PLUGIN) + return ent + + +def _build_config(topology_st, attr_name='cn', subtree_1=None, subtree_2=None, type_config='old', + across_subtrees=False): + assert topology_st + assert attr_name + assert subtree_1 + + if type_config == 'old': + # enable the 'cn' uniqueness on Active + config = _uniqueness_config_entry(topology_st, attr_name) + config.setValue('nsslapd-pluginarg0', attr_name) + config.setValue('nsslapd-pluginarg1', subtree_1) + if subtree_2: + config.setValue('nsslapd-pluginarg2', subtree_2) + else: + # prepare the config entry + config = _uniqueness_config_entry(topology_st, attr_name) + config.setValue('uniqueness-attribute-name', attr_name) + config.setValue('uniqueness-subtrees', subtree_1) + if subtree_2: + config.setValue('uniqueness-subtrees', subtree_2) + if across_subtrees: + config.setValue('uniqueness-across-all-subtrees', 'on') + return config + + +def _active_container_invalid_cfg_add(topology_st): + ''' + Check uniqueness is not enforced with ADD (invalid config) + ''' + topology_st.standalone.add_s(Entry((ACTIVE_USER_1_DN, { + 'objectclass': "top person".split(), + 'sn': ACTIVE_USER_1_CN, + 'cn': ACTIVE_USER_1_CN}))) + + topology_st.standalone.add_s(Entry((ACTIVE_USER_2_DN, { + 'objectclass': "top person".split(), + 'sn': ACTIVE_USER_2_CN, + 'cn': [ACTIVE_USER_1_CN, ACTIVE_USER_2_CN]}))) + + topology_st.standalone.delete_s(ACTIVE_USER_1_DN) + topology_st.standalone.delete_s(ACTIVE_USER_2_DN) + + +def _active_container_add(topology_st, type_config='old'): + ''' + Check uniqueness in a single container (Active) + Add an entry with a given 'cn', then check we can not add an entry with the same 'cn' value + + ''' + config = _build_config(topology_st, attr_name='cn', subtree_1=ACTIVE_DN, subtree_2=None, type_config=type_config, + across_subtrees=False) + + # remove the 'cn' uniqueness entry + try: + topology_st.standalone.delete_s(config.dn) + + except ldap.NO_SUCH_OBJECT: + pass + topology_st.standalone.restart(timeout=120) + + topology_st.standalone.log.info('Uniqueness not enforced: create the entries') + + topology_st.standalone.add_s(Entry((ACTIVE_USER_1_DN, { + 'objectclass': "top person".split(), + 'sn': ACTIVE_USER_1_CN, + 'cn': ACTIVE_USER_1_CN}))) + + topology_st.standalone.add_s(Entry((ACTIVE_USER_2_DN, { + 'objectclass': "top person".split(), + 'sn': ACTIVE_USER_2_CN, + 'cn': [ACTIVE_USER_1_CN, ACTIVE_USER_2_CN]}))) + + topology_st.standalone.delete_s(ACTIVE_USER_1_DN) + topology_st.standalone.delete_s(ACTIVE_USER_2_DN) + + topology_st.standalone.log.info('Uniqueness enforced: checks second entry is rejected') + + # enable the 'cn' uniqueness on Active + topology_st.standalone.add_s(config) + topology_st.standalone.restart(timeout=120) + topology_st.standalone.add_s(Entry((ACTIVE_USER_1_DN, { + 'objectclass': "top person".split(), + 'sn': ACTIVE_USER_1_CN, + 'cn': ACTIVE_USER_1_CN}))) + + try: + topology_st.standalone.add_s(Entry((ACTIVE_USER_2_DN, { + 'objectclass': "top person".split(), + 'sn': ACTIVE_USER_2_CN, + 'cn': [ACTIVE_USER_1_CN, ACTIVE_USER_2_CN]}))) + except ldap.CONSTRAINT_VIOLATION: + # yes it is expected + pass + + # cleanup the stuff now + topology_st.standalone.delete_s(config.dn) + topology_st.standalone.delete_s(ACTIVE_USER_1_DN) + + +def _active_container_mod(topology_st, type_config='old'): + ''' + Check uniqueness in a single container (active) + Add and entry with a given 'cn', then check we can not modify an entry with the same 'cn' value + + ''' + + config = _build_config(topology_st, attr_name='cn', subtree_1=ACTIVE_DN, subtree_2=None, type_config=type_config, + across_subtrees=False) + + # enable the 'cn' uniqueness on Active + topology_st.standalone.add_s(config) + topology_st.standalone.restart(timeout=120) + + topology_st.standalone.log.info('Uniqueness enforced: checks MOD ADD entry is rejected') + topology_st.standalone.add_s(Entry((ACTIVE_USER_1_DN, { + 'objectclass': "top person".split(), + 'sn': ACTIVE_USER_1_CN, + 'cn': ACTIVE_USER_1_CN}))) + + topology_st.standalone.add_s(Entry((ACTIVE_USER_2_DN, { + 'objectclass': "top person".split(), + 'sn': ACTIVE_USER_2_CN, + 'cn': ACTIVE_USER_2_CN}))) + + try: + topology_st.standalone.modify_s(ACTIVE_USER_2_DN, [(ldap.MOD_ADD, 'cn', ensure_bytes(ACTIVE_USER_1_CN))]) + except ldap.CONSTRAINT_VIOLATION: + # yes it is expected + pass + + topology_st.standalone.log.info('Uniqueness enforced: checks MOD REPLACE entry is rejected') + try: + topology_st.standalone.modify_s(ACTIVE_USER_2_DN, + [(ldap.MOD_REPLACE, 'cn', [ensure_bytes(ACTIVE_USER_1_CN), ensure_bytes(ACTIVE_USER_2_CN)])]) + except ldap.CONSTRAINT_VIOLATION: + # yes it is expected + pass + + # cleanup the stuff now + topology_st.standalone.delete_s(config.dn) + topology_st.standalone.delete_s(ACTIVE_USER_1_DN) + topology_st.standalone.delete_s(ACTIVE_USER_2_DN) + + +def _active_container_modrdn(topology_st, type_config='old'): + ''' + Check uniqueness in a single container + Add and entry with a given 'cn', then check we can not modrdn an entry with the same 'cn' value + + ''' + config = _build_config(topology_st, attr_name='cn', subtree_1=ACTIVE_DN, subtree_2=None, type_config=type_config, + across_subtrees=False) + + # enable the 'cn' uniqueness on Active + topology_st.standalone.add_s(config) + topology_st.standalone.restart(timeout=120) + + topology_st.standalone.log.info('Uniqueness enforced: checks MODRDN entry is rejected') + + topology_st.standalone.add_s(Entry((ACTIVE_USER_1_DN, { + 'objectclass': "top person".split(), + 'sn': ACTIVE_USER_1_CN, + 'cn': [ACTIVE_USER_1_CN, 'dummy']}))) + + topology_st.standalone.add_s(Entry((ACTIVE_USER_2_DN, { + 'objectclass': "top person".split(), + 'sn': ACTIVE_USER_2_CN, + 'cn': ACTIVE_USER_2_CN}))) + + try: + topology_st.standalone.rename_s(ACTIVE_USER_2_DN, 'cn=dummy', delold=0) + except ldap.CONSTRAINT_VIOLATION: + # yes it is expected + pass + + # cleanup the stuff now + topology_st.standalone.delete_s(config.dn) + topology_st.standalone.delete_s(ACTIVE_USER_1_DN) + topology_st.standalone.delete_s(ACTIVE_USER_2_DN) + + +def _active_stage_containers_add(topology_st, type_config='old', across_subtrees=False): + ''' + Check uniqueness in several containers + Add an entry on a container with a given 'cn' + with across_subtrees=False check we CAN add an entry with the same 'cn' value on the other container + with across_subtrees=True check we CAN NOT add an entry with the same 'cn' value on the other container + + ''' + config = _build_config(topology_st, attr_name='cn', subtree_1=ACTIVE_DN, subtree_2=STAGE_DN, + type_config=type_config, across_subtrees=False) + + topology_st.standalone.add_s(config) + topology_st.standalone.restart(timeout=120) + topology_st.standalone.add_s(Entry((ACTIVE_USER_1_DN, { + 'objectclass': "top person".split(), + 'sn': ACTIVE_USER_1_CN, + 'cn': ACTIVE_USER_1_CN}))) + try: + + # adding an entry on a separated contains with the same 'cn' + topology_st.standalone.add_s(Entry((STAGE_USER_1_DN, { + 'objectclass': "top person".split(), + 'sn': STAGE_USER_1_CN, + 'cn': ACTIVE_USER_1_CN}))) + except ldap.CONSTRAINT_VIOLATION: + assert across_subtrees + + # cleanup the stuff now + topology_st.standalone.delete_s(config.dn) + topology_st.standalone.delete_s(ACTIVE_USER_1_DN) + topology_st.standalone.delete_s(STAGE_USER_1_DN) + + +def _active_stage_containers_mod(topology_st, type_config='old', across_subtrees=False): + ''' + Check uniqueness in a several containers + Add an entry on a container with a given 'cn', then check we CAN mod an entry with the same 'cn' value on the other container + + ''' + config = _build_config(topology_st, attr_name='cn', subtree_1=ACTIVE_DN, subtree_2=STAGE_DN, + type_config=type_config, across_subtrees=False) + + topology_st.standalone.add_s(config) + topology_st.standalone.restart(timeout=120) + # adding an entry on active with a different 'cn' + topology_st.standalone.add_s(Entry((ACTIVE_USER_1_DN, { + 'objectclass': "top person".split(), + 'sn': ACTIVE_USER_1_CN, + 'cn': ACTIVE_USER_2_CN}))) + + # adding an entry on a stage with a different 'cn' + topology_st.standalone.add_s(Entry((STAGE_USER_1_DN, { + 'objectclass': "top person".split(), + 'sn': STAGE_USER_1_CN, + 'cn': STAGE_USER_1_CN}))) + + try: + + # modify add same value + topology_st.standalone.modify_s(STAGE_USER_1_DN, [(ldap.MOD_ADD, 'cn', [ensure_bytes(ACTIVE_USER_2_CN)])]) + except ldap.CONSTRAINT_VIOLATION: + assert across_subtrees + + topology_st.standalone.delete_s(STAGE_USER_1_DN) + topology_st.standalone.add_s(Entry((STAGE_USER_1_DN, { + 'objectclass': "top person".split(), + 'sn': STAGE_USER_1_CN, + 'cn': STAGE_USER_2_CN}))) + try: + # modify replace same value + topology_st.standalone.modify_s(STAGE_USER_1_DN, + [(ldap.MOD_REPLACE, 'cn', [ensure_bytes(STAGE_USER_2_CN), ensure_bytes(ACTIVE_USER_1_CN)])]) + except ldap.CONSTRAINT_VIOLATION: + assert across_subtrees + + # cleanup the stuff now + topology_st.standalone.delete_s(config.dn) + topology_st.standalone.delete_s(ACTIVE_USER_1_DN) + topology_st.standalone.delete_s(STAGE_USER_1_DN) + + +def _active_stage_containers_modrdn(topology_st, type_config='old', across_subtrees=False): + ''' + Check uniqueness in a several containers + Add and entry with a given 'cn', then check we CAN modrdn an entry with the same 'cn' value on the other container + + ''' + + config = _build_config(topology_st, attr_name='cn', subtree_1=ACTIVE_DN, subtree_2=STAGE_DN, + type_config=type_config, across_subtrees=False) + + # enable the 'cn' uniqueness on Active and Stage + topology_st.standalone.add_s(config) + topology_st.standalone.restart(timeout=120) + topology_st.standalone.add_s(Entry((ACTIVE_USER_1_DN, { + 'objectclass': "top person".split(), + 'sn': ACTIVE_USER_1_CN, + 'cn': [ACTIVE_USER_1_CN, 'dummy']}))) + + topology_st.standalone.add_s(Entry((STAGE_USER_1_DN, { + 'objectclass': "top person".split(), + 'sn': STAGE_USER_1_CN, + 'cn': STAGE_USER_1_CN}))) + + try: + + topology_st.standalone.rename_s(STAGE_USER_1_DN, 'cn=dummy', delold=0) + + # check stage entry has 'cn=dummy' + stage_ent = topology_st.standalone.getEntry("cn=dummy,%s" % (STAGE_DN), ldap.SCOPE_BASE, "objectclass=*", + ['cn']) + assert stage_ent.hasAttr('cn') + found = False + for value in stage_ent.getValues('cn'): + if ensure_str(value) == 'dummy': + found = True + assert found + + # check active entry has 'cn=dummy' + active_ent = topology_st.standalone.getEntry(ACTIVE_USER_1_DN, ldap.SCOPE_BASE, "objectclass=*", ['cn']) + assert active_ent.hasAttr('cn') + found = False + for value in stage_ent.getValues('cn'): + if ensure_str(value) == 'dummy': + found = True + assert found + + topology_st.standalone.delete_s("cn=dummy,%s" % (STAGE_DN)) + except ldap.CONSTRAINT_VIOLATION: + assert across_subtrees + topology_st.standalone.delete_s(STAGE_USER_1_DN) + + # cleanup the stuff now + topology_st.standalone.delete_s(config.dn) + topology_st.standalone.delete_s(ACTIVE_USER_1_DN) + + +def _config_file(topology_st, action='save'): + dse_ldif = topology_st.standalone.confdir + '/dse.ldif' + sav_file = topology_st.standalone.confdir + '/dse.ldif.ticket47823' + if action == 'save': + shutil.copy(dse_ldif, sav_file) + else: + shutil.copy(sav_file, dse_ldif) + time.sleep(1) + + +def _pattern_errorlog(file, log_pattern): + try: + _pattern_errorlog.last_pos += 1 + except AttributeError: + _pattern_errorlog.last_pos = 0 + + found = None + log.debug("_pattern_errorlog: start at offset %d" % _pattern_errorlog.last_pos) + file.seek(_pattern_errorlog.last_pos) + + # Use a while true iteration because 'for line in file: hit a + # python bug that break file.tell() + while True: + line = file.readline() + log.debug("_pattern_errorlog: [%d] %s" % (file.tell(), line)) + found = log_pattern.search(line) + if ((line == '') or (found)): + break + + log.debug("_pattern_errorlog: end at offset %d" % file.tell()) + _pattern_errorlog.last_pos = file.tell() + return found + + +def test_ticket47823_init(topology_st): + """ + + """ + + # Enabled the plugins + topology_st.standalone.plugins.enable(name=PLUGIN_ATTR_UNIQUENESS) + topology_st.standalone.restart(timeout=120) + + topology_st.standalone.add_s(Entry((PROVISIONING_DN, {'objectclass': "top nscontainer".split(), + 'cn': PROVISIONING_CN}))) + topology_st.standalone.add_s(Entry((ACTIVE_DN, {'objectclass': "top nscontainer".split(), + 'cn': ACTIVE_CN}))) + topology_st.standalone.add_s(Entry((STAGE_DN, {'objectclass': "top nscontainer".split(), + 'cn': STAGE_CN}))) + topology_st.standalone.add_s(Entry((DELETE_DN, {'objectclass': "top nscontainer".split(), + 'cn': DELETE_CN}))) + topology_st.standalone.errorlog_file = open(topology_st.standalone.errlog, "r") + + topology_st.standalone.stop(timeout=120) + time.sleep(1) + topology_st.standalone.start(timeout=120) + time.sleep(3) + + +def test_ticket47823_one_container_add(topology_st): + ''' + Check uniqueness in a single container + Add and entry with a given 'cn', then check we can not add an entry with the same 'cn' value + + ''' + _header(topology_st, "With former config (args), check attribute uniqueness with 'cn' (ADD) ") + + _active_container_add(topology_st, type_config='old') + + _header(topology_st, "With new config (args), check attribute uniqueness with 'cn' (ADD) ") + + _active_container_add(topology_st, type_config='new') + + +def test_ticket47823_one_container_mod(topology_st): + ''' + Check uniqueness in a single container + Add and entry with a given 'cn', then check we can not modify an entry with the same 'cn' value + + ''' + _header(topology_st, "With former config (args), check attribute uniqueness with 'cn' (MOD)") + + _active_container_mod(topology_st, type_config='old') + + _header(topology_st, "With new config (args), check attribute uniqueness with 'cn' (MOD)") + + _active_container_mod(topology_st, type_config='new') + + +def test_ticket47823_one_container_modrdn(topology_st): + ''' + Check uniqueness in a single container + Add and entry with a given 'cn', then check we can not modrdn an entry with the same 'cn' value + + ''' + _header(topology_st, "With former config (args), check attribute uniqueness with 'cn' (MODRDN)") + + _active_container_modrdn(topology_st, type_config='old') + + _header(topology_st, "With former config (args), check attribute uniqueness with 'cn' (MODRDN)") + + _active_container_modrdn(topology_st, type_config='new') + + +def test_ticket47823_multi_containers_add(topology_st): + ''' + Check uniqueness in a several containers + Add and entry with a given 'cn', then check we can not add an entry with the same 'cn' value + + ''' + _header(topology_st, "With former config (args), check attribute uniqueness with 'cn' (ADD) ") + + _active_stage_containers_add(topology_st, type_config='old', across_subtrees=False) + + _header(topology_st, "With new config (args), check attribute uniqueness with 'cn' (ADD) ") + + _active_stage_containers_add(topology_st, type_config='new', across_subtrees=False) + + +def test_ticket47823_multi_containers_mod(topology_st): + ''' + Check uniqueness in a several containers + Add an entry on a container with a given 'cn', then check we CAN mod an entry with the same 'cn' value on the other container + + ''' + _header(topology_st, "With former config (args), check attribute uniqueness with 'cn' (MOD) on separated container") + + topology_st.standalone.log.info( + 'Uniqueness not enforced: if same \'cn\' modified (add/replace) on separated containers') + _active_stage_containers_mod(topology_st, type_config='old', across_subtrees=False) + + _header(topology_st, "With new config (args), check attribute uniqueness with 'cn' (MOD) on separated container") + + topology_st.standalone.log.info( + 'Uniqueness not enforced: if same \'cn\' modified (add/replace) on separated containers') + _active_stage_containers_mod(topology_st, type_config='new', across_subtrees=False) + + +def test_ticket47823_multi_containers_modrdn(topology_st): + ''' + Check uniqueness in a several containers + Add and entry with a given 'cn', then check we CAN modrdn an entry with the same 'cn' value on the other container + + ''' + _header(topology_st, + "With former config (args), check attribute uniqueness with 'cn' (MODRDN) on separated containers") + + topology_st.standalone.log.info('Uniqueness not enforced: checks MODRDN entry is accepted on separated containers') + _active_stage_containers_modrdn(topology_st, type_config='old', across_subtrees=False) + + topology_st.standalone.log.info('Uniqueness not enforced: checks MODRDN entry is accepted on separated containers') + _active_stage_containers_modrdn(topology_st, type_config='old') + + +def test_ticket47823_across_multi_containers_add(topology_st): + ''' + Check uniqueness across several containers, uniquely with the new configuration + Add and entry with a given 'cn', then check we can not add an entry with the same 'cn' value + + ''' + _header(topology_st, "With new config (args), check attribute uniqueness with 'cn' (ADD) across several containers") + + _active_stage_containers_add(topology_st, type_config='old', across_subtrees=True) + + +def test_ticket47823_across_multi_containers_mod(topology_st): + ''' + Check uniqueness across several containers, uniquely with the new configuration + Add and entry with a given 'cn', then check we can not modifiy an entry with the same 'cn' value + + ''' + _header(topology_st, "With new config (args), check attribute uniqueness with 'cn' (MOD) across several containers") + + _active_stage_containers_mod(topology_st, type_config='old', across_subtrees=True) + + +def test_ticket47823_across_multi_containers_modrdn(topology_st): + ''' + Check uniqueness across several containers, uniquely with the new configuration + Add and entry with a given 'cn', then check we can not modrdn an entry with the same 'cn' value + + ''' + _header(topology_st, + "With new config (args), check attribute uniqueness with 'cn' (MODRDN) across several containers") + + _active_stage_containers_modrdn(topology_st, type_config='old', across_subtrees=True) + + +def test_ticket47823_invalid_config_1(topology_st): + ''' + Check that an invalid config is detected. No uniqueness enforced + Using old config: arg0 is missing + ''' + _header(topology_st, "Invalid config (old): arg0 is missing") + + _config_file(topology_st, action='save') + + # create an invalid config without arg0 + config = _build_config(topology_st, attr_name='cn', subtree_1=ACTIVE_DN, subtree_2=None, type_config='old', + across_subtrees=False) + + del config.data['nsslapd-pluginarg0'] + # replace 'cn' uniqueness entry + try: + topology_st.standalone.delete_s(config.dn) + + except ldap.NO_SUCH_OBJECT: + pass + topology_st.standalone.add_s(config) + + topology_st.standalone.getEntry(config.dn, ldap.SCOPE_BASE, "(objectclass=nsSlapdPlugin)", ALL_CONFIG_ATTRS) + + # Check the server did not restart + topology_st.standalone.modify_s(DN_CONFIG, [(ldap.MOD_REPLACE, 'nsslapd-errorlog-level', b'65536')]) + try: + topology_st.standalone.restart() + ent = topology_st.standalone.getEntry(config.dn, ldap.SCOPE_BASE, "(objectclass=nsSlapdPlugin)", + ALL_CONFIG_ATTRS) + if ent: + # be sure to restore a valid config before assert + _config_file(topology_st, action='restore') + assert not ent + except: + pass + + # Check the expected error message + regex = re.compile("[U|u]nable to parse old style") + res = _pattern_errorlog(topology_st.standalone.errorlog_file, regex) + if not res: + # be sure to restore a valid config before assert + _config_file(topology_st, action='restore') + assert res + + # Check we can restart the server + _config_file(topology_st, action='restore') + topology_st.standalone.start() + try: + topology_st.standalone.getEntry(config.dn, ldap.SCOPE_BASE, "(objectclass=nsSlapdPlugin)", ALL_CONFIG_ATTRS) + except ldap.NO_SUCH_OBJECT: + pass + + +def test_ticket47823_invalid_config_2(topology_st): + ''' + Check that an invalid config is detected. No uniqueness enforced + Using old config: arg1 is missing + ''' + _header(topology_st, "Invalid config (old): arg1 is missing") + + _config_file(topology_st, action='save') + + # create an invalid config without arg0 + config = _build_config(topology_st, attr_name='cn', subtree_1=ACTIVE_DN, subtree_2=None, type_config='old', + across_subtrees=False) + + del config.data['nsslapd-pluginarg1'] + # replace 'cn' uniqueness entry + try: + topology_st.standalone.delete_s(config.dn) + + except ldap.NO_SUCH_OBJECT: + pass + topology_st.standalone.add_s(config) + + topology_st.standalone.getEntry(config.dn, ldap.SCOPE_BASE, "(objectclass=nsSlapdPlugin)", ALL_CONFIG_ATTRS) + + # Check the server did not restart + try: + topology_st.standalone.restart() + ent = topology_st.standalone.getEntry(config.dn, ldap.SCOPE_BASE, "(objectclass=nsSlapdPlugin)", + ALL_CONFIG_ATTRS) + if ent: + # be sure to restore a valid config before assert + _config_file(topology_st, action='restore') + assert not ent + except: + pass + + # Check the expected error message + regex = re.compile("No valid subtree is defined") + res = _pattern_errorlog(topology_st.standalone.errorlog_file, regex) + if not res: + # be sure to restore a valid config before assert + _config_file(topology_st, action='restore') + assert res + + # Check we can restart the server + _config_file(topology_st, action='restore') + topology_st.standalone.start() + try: + topology_st.standalone.getEntry(config.dn, ldap.SCOPE_BASE, "(objectclass=nsSlapdPlugin)", ALL_CONFIG_ATTRS) + except ldap.NO_SUCH_OBJECT: + pass + + +def test_ticket47823_invalid_config_3(topology_st): + ''' + Check that an invalid config is detected. No uniqueness enforced + Using old config: arg0 is missing + ''' + _header(topology_st, "Invalid config (old): arg0 is missing but new config attrname exists") + + _config_file(topology_st, action='save') + + # create an invalid config without arg0 + config = _build_config(topology_st, attr_name='cn', subtree_1=ACTIVE_DN, subtree_2=None, type_config='old', + across_subtrees=False) + + del config.data['nsslapd-pluginarg0'] + config.data['uniqueness-attribute-name'] = 'cn' + # replace 'cn' uniqueness entry + try: + topology_st.standalone.delete_s(config.dn) + + except ldap.NO_SUCH_OBJECT: + pass + topology_st.standalone.add_s(config) + + topology_st.standalone.getEntry(config.dn, ldap.SCOPE_BASE, "(objectclass=nsSlapdPlugin)", ALL_CONFIG_ATTRS) + + # Check the server did not restart + topology_st.standalone.modify_s(DN_CONFIG, [(ldap.MOD_REPLACE, 'nsslapd-errorlog-level', b'65536')]) + try: + topology_st.standalone.restart() + ent = topology_st.standalone.getEntry(config.dn, ldap.SCOPE_BASE, "(objectclass=nsSlapdPlugin)", + ALL_CONFIG_ATTRS) + if ent: + # be sure to restore a valid config before assert + _config_file(topology_st, action='restore') + assert not ent + except: + pass + + # Check the expected error message + regex = re.compile("[U|u]nable to parse old style") + res = _pattern_errorlog(topology_st.standalone.errorlog_file, regex) + if not res: + # be sure to restore a valid config before assert + _config_file(topology_st, action='restore') + assert res + + # Check we can restart the server + _config_file(topology_st, action='restore') + topology_st.standalone.start() + try: + topology_st.standalone.getEntry(config.dn, ldap.SCOPE_BASE, "(objectclass=nsSlapdPlugin)", ALL_CONFIG_ATTRS) + except ldap.NO_SUCH_OBJECT: + pass + + +def test_ticket47823_invalid_config_4(topology_st): + ''' + Check that an invalid config is detected. No uniqueness enforced + Using old config: arg1 is missing + ''' + _header(topology_st, "Invalid config (old): arg1 is missing but new config exist") + + _config_file(topology_st, action='save') + + # create an invalid config without arg0 + config = _build_config(topology_st, attr_name='cn', subtree_1=ACTIVE_DN, subtree_2=None, type_config='old', + across_subtrees=False) + + del config.data['nsslapd-pluginarg1'] + config.data['uniqueness-subtrees'] = ACTIVE_DN + # replace 'cn' uniqueness entry + try: + topology_st.standalone.delete_s(config.dn) + + except ldap.NO_SUCH_OBJECT: + pass + topology_st.standalone.add_s(config) + + topology_st.standalone.getEntry(config.dn, ldap.SCOPE_BASE, "(objectclass=nsSlapdPlugin)", ALL_CONFIG_ATTRS) + + # Check the server did not restart + try: + topology_st.standalone.restart() + ent = topology_st.standalone.getEntry(config.dn, ldap.SCOPE_BASE, "(objectclass=nsSlapdPlugin)", + ALL_CONFIG_ATTRS) + if ent: + # be sure to restore a valid config before assert + _config_file(topology_st, action='restore') + assert not ent + except: + pass + + # Check the expected error message + regex = re.compile("No valid subtree is defined") + res = _pattern_errorlog(topology_st.standalone.errorlog_file, regex) + if not res: + # be sure to restore a valid config before assert + _config_file(topology_st, action='restore') + assert res + + # Check we can restart the server + _config_file(topology_st, action='restore') + topology_st.standalone.start() + try: + topology_st.standalone.getEntry(config.dn, ldap.SCOPE_BASE, "(objectclass=nsSlapdPlugin)", ALL_CONFIG_ATTRS) + except ldap.NO_SUCH_OBJECT: + pass + + +def test_ticket47823_invalid_config_5(topology_st): + ''' + Check that an invalid config is detected. No uniqueness enforced + Using new config: uniqueness-attribute-name is missing + ''' + _header(topology_st, "Invalid config (new): uniqueness-attribute-name is missing") + + _config_file(topology_st, action='save') + + # create an invalid config without arg0 + config = _build_config(topology_st, attr_name='cn', subtree_1=ACTIVE_DN, subtree_2=None, type_config='new', + across_subtrees=False) + + del config.data['uniqueness-attribute-name'] + # replace 'cn' uniqueness entry + try: + topology_st.standalone.delete_s(config.dn) + + except ldap.NO_SUCH_OBJECT: + pass + topology_st.standalone.add_s(config) + + topology_st.standalone.getEntry(config.dn, ldap.SCOPE_BASE, "(objectclass=nsSlapdPlugin)", ALL_CONFIG_ATTRS) + + # Check the server did not restart + try: + topology_st.standalone.restart() + ent = topology_st.standalone.getEntry(config.dn, ldap.SCOPE_BASE, "(objectclass=nsSlapdPlugin)", + ALL_CONFIG_ATTRS) + if ent: + # be sure to restore a valid config before assert + _config_file(topology_st, action='restore') + assert not ent + except: + pass + + # Check the expected error message + regex = re.compile("[A|a]ttribute name not defined") + res = _pattern_errorlog(topology_st.standalone.errorlog_file, regex) + if not res: + # be sure to restore a valid config before assert + _config_file(topology_st, action='restore') + assert res + + # Check we can restart the server + _config_file(topology_st, action='restore') + topology_st.standalone.start() + try: + topology_st.standalone.getEntry(config.dn, ldap.SCOPE_BASE, "(objectclass=nsSlapdPlugin)", ALL_CONFIG_ATTRS) + except ldap.NO_SUCH_OBJECT: + pass + + +def test_ticket47823_invalid_config_6(topology_st): + ''' + Check that an invalid config is detected. No uniqueness enforced + Using new config: uniqueness-subtrees is missing + ''' + _header(topology_st, "Invalid config (new): uniqueness-subtrees is missing") + + _config_file(topology_st, action='save') + + # create an invalid config without arg0 + config = _build_config(topology_st, attr_name='cn', subtree_1=ACTIVE_DN, subtree_2=None, type_config='new', + across_subtrees=False) + + del config.data['uniqueness-subtrees'] + # replace 'cn' uniqueness entry + try: + topology_st.standalone.delete_s(config.dn) + + except ldap.NO_SUCH_OBJECT: + pass + topology_st.standalone.add_s(config) + + topology_st.standalone.getEntry(config.dn, ldap.SCOPE_BASE, "(objectclass=nsSlapdPlugin)", ALL_CONFIG_ATTRS) + + # Check the server did not restart + try: + topology_st.standalone.restart() + ent = topology_st.standalone.getEntry(config.dn, ldap.SCOPE_BASE, "(objectclass=nsSlapdPlugin)", + ALL_CONFIG_ATTRS) + if ent: + # be sure to restore a valid config before assert + _config_file(topology_st, action='restore') + assert not ent + except: + pass + + # Check the expected error message + regex = re.compile("[O|o]bjectclass for subtree entries is not defined") + res = _pattern_errorlog(topology_st.standalone.errorlog_file, regex) + if not res: + # be sure to restore a valid config before assert + _config_file(topology_st, action='restore') + assert res + + # Check we can restart the server + _config_file(topology_st, action='restore') + topology_st.standalone.start() + try: + topology_st.standalone.getEntry(config.dn, ldap.SCOPE_BASE, "(objectclass=nsSlapdPlugin)", ALL_CONFIG_ATTRS) + except ldap.NO_SUCH_OBJECT: + pass + + +def test_ticket47823_invalid_config_7(topology_st): + ''' + Check that an invalid config is detected. No uniqueness enforced + Using new config: uniqueness-subtrees is missing + ''' + _header(topology_st, "Invalid config (new): uniqueness-subtrees are invalid") + + _config_file(topology_st, action='save') + + # create an invalid config without arg0 + config = _build_config(topology_st, attr_name='cn', subtree_1="this_is dummy DN", subtree_2="an other=dummy DN", + type_config='new', across_subtrees=False) + + topology_st.standalone.modify_s(DN_CONFIG, [(ldap.MOD_REPLACE, 'nsslapd-errorlog-level', b'65536')]) + # replace 'cn' uniqueness entry + try: + topology_st.standalone.delete_s(config.dn) + + except ldap.NO_SUCH_OBJECT: + pass + topology_st.standalone.add_s(config) + + topology_st.standalone.getEntry(config.dn, ldap.SCOPE_BASE, "(objectclass=nsSlapdPlugin)", ALL_CONFIG_ATTRS) + + # Check the server did not restart + try: + topology_st.standalone.restart() + ent = topology_st.standalone.getEntry(config.dn, ldap.SCOPE_BASE, "(objectclass=nsSlapdPlugin)", + ALL_CONFIG_ATTRS) + if ent: + # be sure to restore a valid config before assert + _config_file(topology_st, action='restore') + assert not ent + except: + pass + + # Check the expected error message + regex = re.compile("No valid subtree is defined") + res = _pattern_errorlog(topology_st.standalone.errorlog_file, regex) + if not res: + # be sure to restore a valid config before assert + _config_file(topology_st, action='restore') + assert res + + # Check we can restart the server + _config_file(topology_st, action='restore') + topology_st.standalone.start() + try: + topology_st.standalone.getEntry(config.dn, ldap.SCOPE_BASE, "(objectclass=nsSlapdPlugin)", ALL_CONFIG_ATTRS) + except ldap.NO_SUCH_OBJECT: + pass + + +if __name__ == '__main__': + # Run isolated + # -s for DEBUG mode + CURRENT_FILE = os.path.realpath(__file__) + pytest.main("-s %s" % CURRENT_FILE) diff --git a/dirsrvtests/tests/tickets/ticket47828_test.py b/dirsrvtests/tests/tickets/ticket47828_test.py new file mode 100644 index 0000000..e20f753 --- /dev/null +++ b/dirsrvtests/tests/tickets/ticket47828_test.py @@ -0,0 +1,652 @@ +# --- BEGIN COPYRIGHT BLOCK --- +# Copyright (C) 2016 Red Hat, Inc. +# All rights reserved. +# +# License: GPL (version 3 or any later version). +# See LICENSE for details. +# --- END COPYRIGHT BLOCK --- +# +import logging + +import ldap +import pytest +from lib389 import Entry +from lib389._constants import * +from lib389.topologies import topology_st + +log = logging.getLogger(__name__) + +ACCT_POLICY_CONFIG_DN = 'cn=config,cn=%s,cn=plugins,cn=config' % PLUGIN_ACCT_POLICY +ACCT_POLICY_DN = 'cn=Account Inactivation Policy,%s' % SUFFIX +from lib389.utils import * + +# Skip on older versions +pytestmark = [pytest.mark.tier2, + pytest.mark.skipif(ds_is_older('1.3.3'), reason="Not implemented")] +INACTIVITY_LIMIT = '9' +SEARCHFILTER = '(objectclass=*)' + +DUMMY_CONTAINER = 'cn=dummy container,%s' % SUFFIX +PROVISIONING = 'cn=provisioning,%s' % SUFFIX +ACTIVE_USER1_CN = 'active user1' +ACTIVE_USER1_DN = 'cn=%s,%s' % (ACTIVE_USER1_CN, SUFFIX) +STAGED_USER1_CN = 'staged user1' +STAGED_USER1_DN = 'cn=%s,%s' % (STAGED_USER1_CN, PROVISIONING) +DUMMY_USER1_CN = 'dummy user1' +DUMMY_USER1_DN = 'cn=%s,%s' % (DUMMY_USER1_CN, DUMMY_CONTAINER) + +ALLOCATED_ATTR = 'employeeNumber' + + +def _header(topology_st, label): + topology_st.standalone.log.info("\n\n###############################################") + topology_st.standalone.log.info("#######") + topology_st.standalone.log.info("####### %s" % label) + topology_st.standalone.log.info("#######") + topology_st.standalone.log.info("###############################################") + + +def test_ticket47828_init(topology_st): + """ + Enable DNA + """ + topology_st.standalone.plugins.enable(name=PLUGIN_DNA) + + topology_st.standalone.add_s(Entry((PROVISIONING, {'objectclass': "top nscontainer".split(), + 'cn': 'provisioning'}))) + topology_st.standalone.add_s(Entry((DUMMY_CONTAINER, {'objectclass': "top nscontainer".split(), + 'cn': 'dummy container'}))) + + dn_config = "cn=excluded scope, cn=%s, %s" % (PLUGIN_DNA, DN_PLUGIN) + topology_st.standalone.add_s(Entry((dn_config, {'objectclass': "top extensibleObject".split(), + 'cn': 'excluded scope', + 'dnaType': ALLOCATED_ATTR, + 'dnaNextValue': str(1000), + 'dnaMaxValue': str(2000), + 'dnaMagicRegen': str(-1), + 'dnaFilter': '(&(objectClass=person)(objectClass=organizationalPerson)(objectClass=inetOrgPerson))', + 'dnaScope': SUFFIX}))) + topology_st.standalone.restart(timeout=10) + + +def test_ticket47828_run_0(topology_st): + """ + NO exclude scope: Add an active entry and check its ALLOCATED_ATTR is set + """ + _header(topology_st, 'NO exclude scope: Add an active entry and check its ALLOCATED_ATTR is set') + + topology_st.standalone.add_s( + Entry((ACTIVE_USER1_DN, {'objectclass': "top person organizationalPerson inetOrgPerson".split(), + 'cn': ACTIVE_USER1_CN, + 'sn': ACTIVE_USER1_CN, + ALLOCATED_ATTR: str(-1)}))) + ent = topology_st.standalone.getEntry(ACTIVE_USER1_DN, ldap.SCOPE_BASE, "(objectclass=*)") + assert ent.hasAttr(ALLOCATED_ATTR) + assert ensure_str(ent.getValue(ALLOCATED_ATTR)) != str(-1) + topology_st.standalone.log.debug('%s.%s=%s' % (ACTIVE_USER1_CN, ALLOCATED_ATTR, ensure_str(ent.getValue(ALLOCATED_ATTR)))) + topology_st.standalone.delete_s(ACTIVE_USER1_DN) + + +def test_ticket47828_run_1(topology_st): + """ + NO exclude scope: Add an active entry and check its ALLOCATED_ATTR is unchanged (!= magic) + """ + _header(topology_st, 'NO exclude scope: Add an active entry and check its ALLOCATED_ATTR is unchanged (!= magic)') + + topology_st.standalone.add_s( + Entry((ACTIVE_USER1_DN, {'objectclass': "top person organizationalPerson inetOrgPerson".split(), + 'cn': ACTIVE_USER1_CN, + 'sn': ACTIVE_USER1_CN, + ALLOCATED_ATTR: str(20)}))) + ent = topology_st.standalone.getEntry(ACTIVE_USER1_DN, ldap.SCOPE_BASE, "(objectclass=*)") + assert ent.hasAttr(ALLOCATED_ATTR) + assert ensure_str(ent.getValue(ALLOCATED_ATTR)) == str(20) + topology_st.standalone.log.debug('%s.%s=%s' % (ACTIVE_USER1_CN, ALLOCATED_ATTR, ensure_str(ent.getValue(ALLOCATED_ATTR)))) + topology_st.standalone.delete_s(ACTIVE_USER1_DN) + + +def test_ticket47828_run_2(topology_st): + """ + NO exclude scope: Add a staged entry and check its ALLOCATED_ATTR is set + """ + _header(topology_st, 'NO exclude scope: Add a staged entry and check its ALLOCATED_ATTR is set') + + topology_st.standalone.add_s( + Entry((STAGED_USER1_DN, {'objectclass': "top person organizationalPerson inetOrgPerson".split(), + 'cn': STAGED_USER1_CN, + 'sn': STAGED_USER1_CN, + ALLOCATED_ATTR: str(-1)}))) + ent = topology_st.standalone.getEntry(STAGED_USER1_DN, ldap.SCOPE_BASE, "(objectclass=*)") + assert ent.hasAttr(ALLOCATED_ATTR) + assert ensure_str(ent.getValue(ALLOCATED_ATTR)) != str(-1) + topology_st.standalone.log.debug('%s.%s=%s' % (STAGED_USER1_CN, ALLOCATED_ATTR, ensure_str(ent.getValue(ALLOCATED_ATTR)))) + topology_st.standalone.delete_s(STAGED_USER1_DN) + + +def test_ticket47828_run_3(topology_st): + """ + NO exclude scope: Add a staged entry and check its ALLOCATED_ATTR is unchanged (!= magic) + """ + _header(topology_st, 'NO exclude scope: Add a staged entry and check its ALLOCATED_ATTR is unchanged (!= magic)') + + topology_st.standalone.add_s( + Entry((STAGED_USER1_DN, {'objectclass': "top person organizationalPerson inetOrgPerson".split(), + 'cn': STAGED_USER1_CN, + 'sn': STAGED_USER1_CN, + ALLOCATED_ATTR: str(20)}))) + ent = topology_st.standalone.getEntry(STAGED_USER1_DN, ldap.SCOPE_BASE, "(objectclass=*)") + assert ent.hasAttr(ALLOCATED_ATTR) + assert ensure_str(ent.getValue(ALLOCATED_ATTR)) == str(20) + topology_st.standalone.log.debug('%s.%s=%s' % (STAGED_USER1_CN, ALLOCATED_ATTR, ensure_str(ent.getValue(ALLOCATED_ATTR)))) + topology_st.standalone.delete_s(STAGED_USER1_DN) + + +def test_ticket47828_run_4(topology_st): + ''' + Exclude the provisioning container + ''' + _header(topology_st, 'Exclude the provisioning container') + + dn_config = "cn=excluded scope, cn=%s, %s" % (PLUGIN_DNA, DN_PLUGIN) + mod = [(ldap.MOD_REPLACE, 'dnaExcludeScope', ensure_bytes(PROVISIONING))] + topology_st.standalone.modify_s(dn_config, mod) + + +def test_ticket47828_run_5(topology_st): + """ + Provisioning excluded scope: Add an active entry and check its ALLOCATED_ATTR is set + """ + _header(topology_st, 'Provisioning excluded scope: Add an active entry and check its ALLOCATED_ATTR is set') + + topology_st.standalone.add_s( + Entry((ACTIVE_USER1_DN, {'objectclass': "top person organizationalPerson inetOrgPerson".split(), + 'cn': ACTIVE_USER1_CN, + 'sn': ACTIVE_USER1_CN, + ALLOCATED_ATTR: str(-1)}))) + ent = topology_st.standalone.getEntry(ACTIVE_USER1_DN, ldap.SCOPE_BASE, "(objectclass=*)") + assert ent.hasAttr(ALLOCATED_ATTR) + assert ensure_str(ensure_str(ent.getValue(ALLOCATED_ATTR))) != str(-1) + topology_st.standalone.log.debug('%s.%s=%s' % (ACTIVE_USER1_CN, ALLOCATED_ATTR, ensure_str(ensure_str(ent.getValue(ALLOCATED_ATTR))))) + topology_st.standalone.delete_s(ACTIVE_USER1_DN) + + +def test_ticket47828_run_6(topology_st): + """ + Provisioning excluded scope: Add an active entry and check its ALLOCATED_ATTR is unchanged (!= magic) + """ + _header(topology_st, + 'Provisioning excluded scope: Add an active entry and check its ALLOCATED_ATTR is unchanged (!= magic)') + + topology_st.standalone.add_s( + Entry((ACTIVE_USER1_DN, {'objectclass': "top person organizationalPerson inetOrgPerson".split(), + 'cn': ACTIVE_USER1_CN, + 'sn': ACTIVE_USER1_CN, + ALLOCATED_ATTR: str(20)}))) + ent = topology_st.standalone.getEntry(ACTIVE_USER1_DN, ldap.SCOPE_BASE, "(objectclass=*)") + assert ent.hasAttr(ALLOCATED_ATTR) + assert ensure_str(ensure_str(ent.getValue(ALLOCATED_ATTR))) == str(20) + topology_st.standalone.log.debug('%s.%s=%s' % (ACTIVE_USER1_CN, ALLOCATED_ATTR, ensure_str(ensure_str(ent.getValue(ALLOCATED_ATTR))))) + topology_st.standalone.delete_s(ACTIVE_USER1_DN) + + +def test_ticket47828_run_7(topology_st): + """ + Provisioning excluded scope: Add a staged entry and check its ALLOCATED_ATTR is not set + """ + _header(topology_st, 'Provisioning excluded scope: Add a staged entry and check its ALLOCATED_ATTR is not set') + + topology_st.standalone.add_s( + Entry((STAGED_USER1_DN, {'objectclass': "top person organizationalPerson inetOrgPerson".split(), + 'cn': STAGED_USER1_CN, + 'sn': STAGED_USER1_CN, + ALLOCATED_ATTR: str(-1)}))) + ent = topology_st.standalone.getEntry(STAGED_USER1_DN, ldap.SCOPE_BASE, "(objectclass=*)") + assert ent.hasAttr(ALLOCATED_ATTR) + assert ensure_str(ent.getValue(ALLOCATED_ATTR)) == str(-1) + topology_st.standalone.log.debug('%s.%s=%s' % (STAGED_USER1_CN, ALLOCATED_ATTR, ensure_str(ent.getValue(ALLOCATED_ATTR)))) + topology_st.standalone.delete_s(STAGED_USER1_DN) + + +def test_ticket47828_run_8(topology_st): + """ + Provisioning excluded scope: Add a staged entry and check its ALLOCATED_ATTR is unchanged (!= magic) + """ + _header(topology_st, + 'Provisioning excluded scope: Add a staged entry and check its ALLOCATED_ATTR is unchanged (!= magic)') + + topology_st.standalone.add_s( + Entry((STAGED_USER1_DN, {'objectclass': "top person organizationalPerson inetOrgPerson".split(), + 'cn': STAGED_USER1_CN, + 'sn': STAGED_USER1_CN, + ALLOCATED_ATTR: str(20)}))) + ent = topology_st.standalone.getEntry(STAGED_USER1_DN, ldap.SCOPE_BASE, "(objectclass=*)") + assert ent.hasAttr(ALLOCATED_ATTR) + assert ensure_str(ent.getValue(ALLOCATED_ATTR)) == str(20) + topology_st.standalone.log.debug('%s.%s=%s' % (STAGED_USER1_CN, ALLOCATED_ATTR, ensure_str(ent.getValue(ALLOCATED_ATTR)))) + topology_st.standalone.delete_s(STAGED_USER1_DN) + + +def test_ticket47828_run_9(topology_st): + """ + Provisioning excluded scope: Add an dummy entry and check its ALLOCATED_ATTR is set + """ + _header(topology_st, 'Provisioning excluded scope: Add an dummy entry and check its ALLOCATED_ATTR is set') + + topology_st.standalone.add_s( + Entry((DUMMY_USER1_DN, {'objectclass': "top person organizationalPerson inetOrgPerson".split(), + 'cn': DUMMY_USER1_CN, + 'sn': DUMMY_USER1_CN, + ALLOCATED_ATTR: str(-1)}))) + ent = topology_st.standalone.getEntry(DUMMY_USER1_DN, ldap.SCOPE_BASE, "(objectclass=*)") + assert ent.hasAttr(ALLOCATED_ATTR) + assert ensure_str(ent.getValue(ALLOCATED_ATTR)) != str(-1) + topology_st.standalone.log.debug('%s.%s=%s' % (DUMMY_USER1_CN, ALLOCATED_ATTR, ensure_str(ent.getValue(ALLOCATED_ATTR)))) + topology_st.standalone.delete_s(DUMMY_USER1_DN) + + +def test_ticket47828_run_10(topology_st): + """ + Provisioning excluded scope: Add an dummy entry and check its ALLOCATED_ATTR is unchanged (!= magic) + """ + _header(topology_st, + 'Provisioning excluded scope: Add an dummy entry and check its ALLOCATED_ATTR is unchanged (!= magic)') + + topology_st.standalone.add_s( + Entry((DUMMY_USER1_DN, {'objectclass': "top person organizationalPerson inetOrgPerson".split(), + 'cn': DUMMY_USER1_CN, + 'sn': DUMMY_USER1_CN, + ALLOCATED_ATTR: str(20)}))) + ent = topology_st.standalone.getEntry(DUMMY_USER1_DN, ldap.SCOPE_BASE, "(objectclass=*)") + assert ent.hasAttr(ALLOCATED_ATTR) + assert ensure_str(ent.getValue(ALLOCATED_ATTR)) == str(20) + topology_st.standalone.log.debug('%s.%s=%s' % (DUMMY_USER1_CN, ALLOCATED_ATTR, ensure_str(ent.getValue(ALLOCATED_ATTR)))) + topology_st.standalone.delete_s(DUMMY_USER1_DN) + + +def test_ticket47828_run_11(topology_st): + ''' + Exclude (in addition) the dummy container + ''' + _header(topology_st, 'Exclude (in addition) the dummy container') + + dn_config = "cn=excluded scope, cn=%s, %s" % (PLUGIN_DNA, DN_PLUGIN) + mod = [(ldap.MOD_ADD, 'dnaExcludeScope', ensure_bytes(DUMMY_CONTAINER))] + topology_st.standalone.modify_s(dn_config, mod) + + +def test_ticket47828_run_12(topology_st): + """ + Provisioning/Dummy excluded scope: Add an active entry and check its ALLOCATED_ATTR is set + """ + _header(topology_st, 'Provisioning/Dummy excluded scope: Add an active entry and check its ALLOCATED_ATTR is set') + + topology_st.standalone.add_s( + Entry((ACTIVE_USER1_DN, {'objectclass': "top person organizationalPerson inetOrgPerson".split(), + 'cn': ACTIVE_USER1_CN, + 'sn': ACTIVE_USER1_CN, + ALLOCATED_ATTR: str(-1)}))) + ent = topology_st.standalone.getEntry(ACTIVE_USER1_DN, ldap.SCOPE_BASE, "(objectclass=*)") + assert ent.hasAttr(ALLOCATED_ATTR) + assert ensure_str(ent.getValue(ALLOCATED_ATTR)) != str(-1) + topology_st.standalone.log.debug('%s.%s=%s' % (ACTIVE_USER1_CN, ALLOCATED_ATTR, ensure_str(ent.getValue(ALLOCATED_ATTR)))) + topology_st.standalone.delete_s(ACTIVE_USER1_DN) + + +def test_ticket47828_run_13(topology_st): + """ + Provisioning/Dummy excluded scope: Add an active entry and check its ALLOCATED_ATTR is unchanged (!= magic) + """ + _header(topology_st, + 'Provisioning/Dummy excluded scope: Add an active entry and check its ALLOCATED_ATTR is unchanged (!= magic)') + + topology_st.standalone.add_s( + Entry((ACTIVE_USER1_DN, {'objectclass': "top person organizationalPerson inetOrgPerson".split(), + 'cn': ACTIVE_USER1_CN, + 'sn': ACTIVE_USER1_CN, + ALLOCATED_ATTR: str(20)}))) + ent = topology_st.standalone.getEntry(ACTIVE_USER1_DN, ldap.SCOPE_BASE, "(objectclass=*)") + assert ent.hasAttr(ALLOCATED_ATTR) + assert ensure_str(ent.getValue(ALLOCATED_ATTR)) == str(20) + topology_st.standalone.log.debug('%s.%s=%s' % (ACTIVE_USER1_CN, ALLOCATED_ATTR, ensure_str(ent.getValue(ALLOCATED_ATTR)))) + topology_st.standalone.delete_s(ACTIVE_USER1_DN) + + +def test_ticket47828_run_14(topology_st): + """ + Provisioning/Dummy excluded scope: Add a staged entry and check its ALLOCATED_ATTR is not set + """ + _header(topology_st, + 'Provisioning/Dummy excluded scope: Add a staged entry and check its ALLOCATED_ATTR is not set') + + topology_st.standalone.add_s( + Entry((STAGED_USER1_DN, {'objectclass': "top person organizationalPerson inetOrgPerson".split(), + 'cn': STAGED_USER1_CN, + 'sn': STAGED_USER1_CN, + ALLOCATED_ATTR: str(-1)}))) + ent = topology_st.standalone.getEntry(STAGED_USER1_DN, ldap.SCOPE_BASE, "(objectclass=*)") + assert ent.hasAttr(ALLOCATED_ATTR) + assert ensure_str(ent.getValue(ALLOCATED_ATTR)) == str(-1) + topology_st.standalone.log.debug('%s.%s=%s' % (STAGED_USER1_CN, ALLOCATED_ATTR, ensure_str(ent.getValue(ALLOCATED_ATTR)))) + topology_st.standalone.delete_s(STAGED_USER1_DN) + + +def test_ticket47828_run_15(topology_st): + """ + Provisioning/Dummy excluded scope: Add a staged entry and check its ALLOCATED_ATTR is unchanged (!= magic) + """ + _header(topology_st, + 'Provisioning/Dummy excluded scope: Add a staged entry and check its ALLOCATED_ATTR is unchanged (!= magic)') + + topology_st.standalone.add_s( + Entry((STAGED_USER1_DN, {'objectclass': "top person organizationalPerson inetOrgPerson".split(), + 'cn': STAGED_USER1_CN, + 'sn': STAGED_USER1_CN, + ALLOCATED_ATTR: str(20)}))) + ent = topology_st.standalone.getEntry(STAGED_USER1_DN, ldap.SCOPE_BASE, "(objectclass=*)") + assert ent.hasAttr(ALLOCATED_ATTR) + assert ensure_str(ent.getValue(ALLOCATED_ATTR)) == str(20) + topology_st.standalone.log.debug('%s.%s=%s' % (STAGED_USER1_CN, ALLOCATED_ATTR, ensure_str(ent.getValue(ALLOCATED_ATTR)))) + topology_st.standalone.delete_s(STAGED_USER1_DN) + + +def test_ticket47828_run_16(topology_st): + """ + Provisioning/Dummy excluded scope: Add an dummy entry and check its ALLOCATED_ATTR is not set + """ + _header(topology_st, + 'Provisioning/Dummy excluded scope: Add an dummy entry and check its ALLOCATED_ATTR not is set') + + topology_st.standalone.add_s( + Entry((DUMMY_USER1_DN, {'objectclass': "top person organizationalPerson inetOrgPerson".split(), + 'cn': DUMMY_USER1_CN, + 'sn': DUMMY_USER1_CN, + ALLOCATED_ATTR: str(-1)}))) + ent = topology_st.standalone.getEntry(DUMMY_USER1_DN, ldap.SCOPE_BASE, "(objectclass=*)") + assert ent.hasAttr(ALLOCATED_ATTR) + assert ensure_str(ent.getValue(ALLOCATED_ATTR)) == str(-1) + topology_st.standalone.log.debug('%s.%s=%s' % (DUMMY_USER1_CN, ALLOCATED_ATTR, ensure_str(ent.getValue(ALLOCATED_ATTR)))) + topology_st.standalone.delete_s(DUMMY_USER1_DN) + + +def test_ticket47828_run_17(topology_st): + """ + Provisioning/Dummy excluded scope: Add an dummy entry and check its ALLOCATED_ATTR is unchanged (!= magic) + """ + _header(topology_st, + 'Provisioning/Dummy excluded scope: Add an dummy entry and check its ALLOCATED_ATTR is unchanged (!= magic)') + + topology_st.standalone.add_s( + Entry((DUMMY_USER1_DN, {'objectclass': "top person organizationalPerson inetOrgPerson".split(), + 'cn': DUMMY_USER1_CN, + 'sn': DUMMY_USER1_CN, + ALLOCATED_ATTR: str(20)}))) + ent = topology_st.standalone.getEntry(DUMMY_USER1_DN, ldap.SCOPE_BASE, "(objectclass=*)") + assert ent.hasAttr(ALLOCATED_ATTR) + assert ensure_str(ent.getValue(ALLOCATED_ATTR)) == str(20) + topology_st.standalone.log.debug('%s.%s=%s' % (DUMMY_USER1_CN, ALLOCATED_ATTR, ensure_str(ent.getValue(ALLOCATED_ATTR)))) + topology_st.standalone.delete_s(DUMMY_USER1_DN) + + +def test_ticket47828_run_18(topology_st): + ''' + Exclude PROVISIONING and a wrong container + ''' + _header(topology_st, 'Exclude PROVISIONING and a wrong container') + + dn_config = "cn=excluded scope, cn=%s, %s" % (PLUGIN_DNA, DN_PLUGIN) + mod = [(ldap.MOD_REPLACE, 'dnaExcludeScope', ensure_bytes(PROVISIONING))] + topology_st.standalone.modify_s(dn_config, mod) + try: + mod = [(ldap.MOD_ADD, 'dnaExcludeScope', ensure_bytes("invalidDN,%s" % SUFFIX))] + topology_st.standalone.modify_s(dn_config, mod) + raise ValueError("invalid dnaExcludeScope value (not a DN)") + except ldap.INVALID_SYNTAX: + pass + + +def test_ticket47828_run_19(topology_st): + """ + Provisioning+wrong container excluded scope: Add an active entry and check its ALLOCATED_ATTR is set + """ + _header(topology_st, + 'Provisioning+wrong container excluded scope: Add an active entry and check its ALLOCATED_ATTR is set') + + topology_st.standalone.add_s( + Entry((ACTIVE_USER1_DN, {'objectclass': "top person organizationalPerson inetOrgPerson".split(), + 'cn': ACTIVE_USER1_CN, + 'sn': ACTIVE_USER1_CN, + ALLOCATED_ATTR: str(-1)}))) + ent = topology_st.standalone.getEntry(ACTIVE_USER1_DN, ldap.SCOPE_BASE, "(objectclass=*)") + assert ent.hasAttr(ALLOCATED_ATTR) + assert ensure_str(ent.getValue(ALLOCATED_ATTR)) != str(-1) + topology_st.standalone.log.debug('%s.%s=%s' % (ACTIVE_USER1_CN, ALLOCATED_ATTR, ensure_str(ent.getValue(ALLOCATED_ATTR)))) + topology_st.standalone.delete_s(ACTIVE_USER1_DN) + + +def test_ticket47828_run_20(topology_st): + """ + Provisioning+wrong container excluded scope: Add an active entry and check its ALLOCATED_ATTR is unchanged (!= magic) + """ + _header(topology_st, + 'Provisioning+wrong container excluded scope: Add an active entry and check its ALLOCATED_ATTR is unchanged (!= magic)') + + topology_st.standalone.add_s( + Entry((ACTIVE_USER1_DN, {'objectclass': "top person organizationalPerson inetOrgPerson".split(), + 'cn': ACTIVE_USER1_CN, + 'sn': ACTIVE_USER1_CN, + ALLOCATED_ATTR: str(20)}))) + ent = topology_st.standalone.getEntry(ACTIVE_USER1_DN, ldap.SCOPE_BASE, "(objectclass=*)") + assert ent.hasAttr(ALLOCATED_ATTR) + assert ensure_str(ent.getValue(ALLOCATED_ATTR)) == str(20) + topology_st.standalone.log.debug('%s.%s=%s' % (ACTIVE_USER1_CN, ALLOCATED_ATTR, ensure_str(ent.getValue(ALLOCATED_ATTR)))) + topology_st.standalone.delete_s(ACTIVE_USER1_DN) + + +def test_ticket47828_run_21(topology_st): + """ + Provisioning+wrong container excluded scope: Add a staged entry and check its ALLOCATED_ATTR is not set + """ + _header(topology_st, + 'Provisioning+wrong container excluded scope: Add a staged entry and check its ALLOCATED_ATTR is not set') + + topology_st.standalone.add_s( + Entry((STAGED_USER1_DN, {'objectclass': "top person organizationalPerson inetOrgPerson".split(), + 'cn': STAGED_USER1_CN, + 'sn': STAGED_USER1_CN, + ALLOCATED_ATTR: str(-1)}))) + ent = topology_st.standalone.getEntry(STAGED_USER1_DN, ldap.SCOPE_BASE, "(objectclass=*)") + assert ent.hasAttr(ALLOCATED_ATTR) + assert ensure_str(ent.getValue(ALLOCATED_ATTR)) == str(-1) + topology_st.standalone.log.debug('%s.%s=%s' % (STAGED_USER1_CN, ALLOCATED_ATTR, ensure_str(ent.getValue(ALLOCATED_ATTR)))) + topology_st.standalone.delete_s(STAGED_USER1_DN) + + +def test_ticket47828_run_22(topology_st): + """ + Provisioning+wrong container excluded scope: Add a staged entry and check its ALLOCATED_ATTR is unchanged (!= magic) + """ + _header(topology_st, + 'Provisioning+wrong container excluded scope: Add a staged entry and check its ALLOCATED_ATTR is unchanged (!= magic)') + + topology_st.standalone.add_s( + Entry((STAGED_USER1_DN, {'objectclass': "top person organizationalPerson inetOrgPerson".split(), + 'cn': STAGED_USER1_CN, + 'sn': STAGED_USER1_CN, + ALLOCATED_ATTR: str(20)}))) + ent = topology_st.standalone.getEntry(STAGED_USER1_DN, ldap.SCOPE_BASE, "(objectclass=*)") + assert ent.hasAttr(ALLOCATED_ATTR) + assert ensure_str(ent.getValue(ALLOCATED_ATTR)) == str(20) + topology_st.standalone.log.debug('%s.%s=%s' % (STAGED_USER1_CN, ALLOCATED_ATTR, ensure_str(ent.getValue(ALLOCATED_ATTR)))) + topology_st.standalone.delete_s(STAGED_USER1_DN) + + +def test_ticket47828_run_23(topology_st): + """ + Provisioning+wrong container excluded scope: Add an dummy entry and check its ALLOCATED_ATTR is set + """ + _header(topology_st, + 'Provisioning+wrong container excluded scope: Add an dummy entry and check its ALLOCATED_ATTR is set') + + topology_st.standalone.add_s( + Entry((DUMMY_USER1_DN, {'objectclass': "top person organizationalPerson inetOrgPerson".split(), + 'cn': DUMMY_USER1_CN, + 'sn': DUMMY_USER1_CN, + ALLOCATED_ATTR: str(-1)}))) + ent = topology_st.standalone.getEntry(DUMMY_USER1_DN, ldap.SCOPE_BASE, "(objectclass=*)") + assert ent.hasAttr(ALLOCATED_ATTR) + assert ensure_str(ent.getValue(ALLOCATED_ATTR)) != str(-1) + topology_st.standalone.log.debug('%s.%s=%s' % (DUMMY_USER1_CN, ALLOCATED_ATTR, ensure_str(ent.getValue(ALLOCATED_ATTR)))) + topology_st.standalone.delete_s(DUMMY_USER1_DN) + + +def test_ticket47828_run_24(topology_st): + """ + Provisioning+wrong container excluded scope: Add an dummy entry and check its ALLOCATED_ATTR is unchanged (!= magic) + """ + _header(topology_st, + 'Provisioning+wrong container excluded scope: Add an dummy entry and check its ALLOCATED_ATTR is unchanged (!= magic)') + + topology_st.standalone.add_s( + Entry((DUMMY_USER1_DN, {'objectclass': "top person organizationalPerson inetOrgPerson".split(), + 'cn': DUMMY_USER1_CN, + 'sn': DUMMY_USER1_CN, + ALLOCATED_ATTR: str(20)}))) + ent = topology_st.standalone.getEntry(DUMMY_USER1_DN, ldap.SCOPE_BASE, "(objectclass=*)") + assert ent.hasAttr(ALLOCATED_ATTR) + assert ensure_str(ent.getValue(ALLOCATED_ATTR)) == str(20) + topology_st.standalone.log.debug('%s.%s=%s' % (DUMMY_USER1_CN, ALLOCATED_ATTR, ensure_str(ent.getValue(ALLOCATED_ATTR)))) + topology_st.standalone.delete_s(DUMMY_USER1_DN) + + +def test_ticket47828_run_25(topology_st): + ''' + Exclude a wrong container + ''' + _header(topology_st, 'Exclude a wrong container') + + dn_config = "cn=excluded scope, cn=%s, %s" % (PLUGIN_DNA, DN_PLUGIN) + + try: + mod = [(ldap.MOD_REPLACE, 'dnaExcludeScope', ensure_bytes("invalidDN,%s" % SUFFIX))] + topology_st.standalone.modify_s(dn_config, mod) + raise ValueError("invalid dnaExcludeScope value (not a DN)") + except ldap.INVALID_SYNTAX: + pass + + +def test_ticket47828_run_26(topology_st): + """ + Wrong container excluded scope: Add an active entry and check its ALLOCATED_ATTR is set + """ + _header(topology_st, 'Wrong container excluded scope: Add an active entry and check its ALLOCATED_ATTR is set') + + topology_st.standalone.add_s( + Entry((ACTIVE_USER1_DN, {'objectclass': "top person organizationalPerson inetOrgPerson".split(), + 'cn': ACTIVE_USER1_CN, + 'sn': ACTIVE_USER1_CN, + ALLOCATED_ATTR: str(-1)}))) + ent = topology_st.standalone.getEntry(ACTIVE_USER1_DN, ldap.SCOPE_BASE, "(objectclass=*)") + assert ent.hasAttr(ALLOCATED_ATTR) + assert ensure_str(ent.getValue(ALLOCATED_ATTR)) != str(-1) + topology_st.standalone.log.debug('%s.%s=%s' % (ACTIVE_USER1_CN, ALLOCATED_ATTR, ensure_str(ent.getValue(ALLOCATED_ATTR)))) + topology_st.standalone.delete_s(ACTIVE_USER1_DN) + + +def test_ticket47828_run_27(topology_st): + """ + Wrong container excluded scope: Add an active entry and check its ALLOCATED_ATTR is unchanged (!= magic) + """ + _header(topology_st, + 'Wrong container excluded scope: Add an active entry and check its ALLOCATED_ATTR is unchanged (!= magic)') + + topology_st.standalone.add_s( + Entry((ACTIVE_USER1_DN, {'objectclass': "top person organizationalPerson inetOrgPerson".split(), + 'cn': ACTIVE_USER1_CN, + 'sn': ACTIVE_USER1_CN, + ALLOCATED_ATTR: str(20)}))) + ent = topology_st.standalone.getEntry(ACTIVE_USER1_DN, ldap.SCOPE_BASE, "(objectclass=*)") + assert ent.hasAttr(ALLOCATED_ATTR) + assert ensure_str(ent.getValue(ALLOCATED_ATTR)) == str(20) + topology_st.standalone.log.debug('%s.%s=%s' % (ACTIVE_USER1_CN, ALLOCATED_ATTR, ensure_str(ent.getValue(ALLOCATED_ATTR)))) + topology_st.standalone.delete_s(ACTIVE_USER1_DN) + + +def test_ticket47828_run_28(topology_st): + """ + Wrong container excluded scope: Add a staged entry and check its ALLOCATED_ATTR is not set + """ + _header(topology_st, 'Wrong container excluded scope: Add a staged entry and check its ALLOCATED_ATTR is not set') + + topology_st.standalone.add_s( + Entry((STAGED_USER1_DN, {'objectclass': "top person organizationalPerson inetOrgPerson".split(), + 'cn': STAGED_USER1_CN, + 'sn': STAGED_USER1_CN, + ALLOCATED_ATTR: str(-1)}))) + ent = topology_st.standalone.getEntry(STAGED_USER1_DN, ldap.SCOPE_BASE, "(objectclass=*)") + assert ent.hasAttr(ALLOCATED_ATTR) + assert ensure_str(ent.getValue(ALLOCATED_ATTR)) == str(-1) + topology_st.standalone.log.debug('%s.%s=%s' % (STAGED_USER1_CN, ALLOCATED_ATTR, ensure_str(ent.getValue(ALLOCATED_ATTR)))) + topology_st.standalone.delete_s(STAGED_USER1_DN) + + +def test_ticket47828_run_29(topology_st): + """ + Wrong container excluded scope: Add a staged entry and check its ALLOCATED_ATTR is unchanged (!= magic) + """ + _header(topology_st, + 'Wrong container excluded scope: Add a staged entry and check its ALLOCATED_ATTR is unchanged (!= magic)') + + topology_st.standalone.add_s( + Entry((STAGED_USER1_DN, {'objectclass': "top person organizationalPerson inetOrgPerson".split(), + 'cn': STAGED_USER1_CN, + 'sn': STAGED_USER1_CN, + ALLOCATED_ATTR: str(20)}))) + ent = topology_st.standalone.getEntry(STAGED_USER1_DN, ldap.SCOPE_BASE, "(objectclass=*)") + assert ent.hasAttr(ALLOCATED_ATTR) + assert ensure_str(ent.getValue(ALLOCATED_ATTR)) == str(20) + topology_st.standalone.log.debug('%s.%s=%s' % (STAGED_USER1_CN, ALLOCATED_ATTR, ensure_str(ent.getValue(ALLOCATED_ATTR)))) + topology_st.standalone.delete_s(STAGED_USER1_DN) + + +def test_ticket47828_run_30(topology_st): + """ + Wrong container excluded scope: Add an dummy entry and check its ALLOCATED_ATTR is set + """ + _header(topology_st, 'Wrong container excluded scope: Add an dummy entry and check its ALLOCATED_ATTR is set') + + topology_st.standalone.add_s( + Entry((DUMMY_USER1_DN, {'objectclass': "top person organizationalPerson inetOrgPerson".split(), + 'cn': DUMMY_USER1_CN, + 'sn': DUMMY_USER1_CN, + ALLOCATED_ATTR: str(-1)}))) + ent = topology_st.standalone.getEntry(DUMMY_USER1_DN, ldap.SCOPE_BASE, "(objectclass=*)") + assert ent.hasAttr(ALLOCATED_ATTR) + assert ensure_str(ent.getValue(ALLOCATED_ATTR)) != str(-1) + topology_st.standalone.log.debug('%s.%s=%s' % (DUMMY_USER1_CN, ALLOCATED_ATTR, ensure_str(ent.getValue(ALLOCATED_ATTR)))) + topology_st.standalone.delete_s(DUMMY_USER1_DN) + + +def test_ticket47828_run_31(topology_st): + """ + Wrong container excluded scope: Add an dummy entry and check its ALLOCATED_ATTR is unchanged (!= magic) + """ + _header(topology_st, + 'Wrong container excluded scope: Add an dummy entry and check its ALLOCATED_ATTR is unchanged (!= magic)') + + topology_st.standalone.add_s( + Entry((DUMMY_USER1_DN, {'objectclass': "top person organizationalPerson inetOrgPerson".split(), + 'cn': DUMMY_USER1_CN, + 'sn': DUMMY_USER1_CN, + ALLOCATED_ATTR: str(20)}))) + ent = topology_st.standalone.getEntry(DUMMY_USER1_DN, ldap.SCOPE_BASE, "(objectclass=*)") + assert ent.hasAttr(ALLOCATED_ATTR) + assert ensure_str(ent.getValue(ALLOCATED_ATTR)) == str(20) + topology_st.standalone.log.debug('%s.%s=%s' % (DUMMY_USER1_CN, ALLOCATED_ATTR, ensure_str(ent.getValue(ALLOCATED_ATTR)))) + topology_st.standalone.delete_s(DUMMY_USER1_DN) + + +if __name__ == '__main__': + # Run isolated + # -s for DEBUG mode + CURRENT_FILE = os.path.realpath(__file__) + pytest.main("-s %s" % CURRENT_FILE) diff --git a/dirsrvtests/tests/tickets/ticket47829_test.py b/dirsrvtests/tests/tickets/ticket47829_test.py new file mode 100644 index 0000000..64aee67 --- /dev/null +++ b/dirsrvtests/tests/tickets/ticket47829_test.py @@ -0,0 +1,629 @@ +# --- BEGIN COPYRIGHT BLOCK --- +# Copyright (C) 2016 Red Hat, Inc. +# All rights reserved. +# +# License: GPL (version 3 or any later version). +# See LICENSE for details. +# --- END COPYRIGHT BLOCK --- +# +import logging +import time + +import ldap +import pytest +from lib389 import Entry +from lib389._constants import * +from lib389.topologies import topology_st +from lib389.utils import * + +# Skip on older versions +pytestmark = [pytest.mark.tier2, + pytest.mark.skipif(ds_is_older('1.3.4'), reason="Not implemented")] +SCOPE_IN_CN = 'in' +SCOPE_OUT_CN = 'out' +SCOPE_IN_DN = 'cn=%s,%s' % (SCOPE_IN_CN, SUFFIX) +SCOPE_OUT_DN = 'cn=%s,%s' % (SCOPE_OUT_CN, SUFFIX) + +PROVISIONING_CN = "provisioning" +PROVISIONING_DN = "cn=%s,%s" % (PROVISIONING_CN, SCOPE_IN_DN) + +ACTIVE_CN = "accounts" +STAGE_CN = "staged users" +DELETE_CN = "deleted users" +ACTIVE_DN = "cn=%s,%s" % (ACTIVE_CN, SCOPE_IN_DN) +STAGE_DN = "cn=%s,%s" % (STAGE_CN, PROVISIONING_DN) +DELETE_DN = "cn=%s,%s" % (DELETE_CN, PROVISIONING_DN) + +STAGE_USER_CN = "stage guy" +STAGE_USER_DN = "cn=%s,%s" % (STAGE_USER_CN, STAGE_DN) + +ACTIVE_USER_CN = "active guy" +ACTIVE_USER_DN = "cn=%s,%s" % (ACTIVE_USER_CN, ACTIVE_DN) + +OUT_USER_CN = "out guy" +OUT_USER_DN = "cn=%s,%s" % (OUT_USER_CN, SCOPE_OUT_DN) + +STAGE_GROUP_CN = "stage group" +STAGE_GROUP_DN = "cn=%s,%s" % (STAGE_GROUP_CN, STAGE_DN) + +ACTIVE_GROUP_CN = "active group" +ACTIVE_GROUP_DN = "cn=%s,%s" % (ACTIVE_GROUP_CN, ACTIVE_DN) + +OUT_GROUP_CN = "out group" +OUT_GROUP_DN = "cn=%s,%s" % (OUT_GROUP_CN, SCOPE_OUT_DN) + +INDIRECT_ACTIVE_GROUP_CN = "indirect active group" +INDIRECT_ACTIVE_GROUP_DN = "cn=%s,%s" % (INDIRECT_ACTIVE_GROUP_CN, ACTIVE_DN) + +log = logging.getLogger(__name__) + + +def _header(topology_st, label): + topology_st.standalone.log.info("\n\n###############################################") + topology_st.standalone.log.info("#######") + topology_st.standalone.log.info("####### %s" % label) + topology_st.standalone.log.info("#######") + topology_st.standalone.log.info("###############################################") + + +def _add_user(topology_st, type='active'): + if type == 'active': + topology_st.standalone.add_s(Entry((ACTIVE_USER_DN, { + 'objectclass': "top person inetuser".split(), + 'sn': ACTIVE_USER_CN, + 'cn': ACTIVE_USER_CN}))) + elif type == 'stage': + topology_st.standalone.add_s(Entry((STAGE_USER_DN, { + 'objectclass': "top person inetuser".split(), + 'sn': STAGE_USER_CN, + 'cn': STAGE_USER_CN}))) + else: + topology_st.standalone.add_s(Entry((OUT_USER_DN, { + 'objectclass': "top person inetuser".split(), + 'sn': OUT_USER_CN, + 'cn': OUT_USER_CN}))) + + +def _find_memberof(topology_st, user_dn=None, group_dn=None, find_result=True): + assert (topology_st) + assert (user_dn) + assert (group_dn) + ent = topology_st.standalone.getEntry(user_dn, ldap.SCOPE_BASE, "(objectclass=*)", ['memberof']) + found = False + if ent.hasAttr('memberof'): + + for val in ent.getValues('memberof'): + topology_st.standalone.log.info("!!!!!!! %s: memberof->%s" % (user_dn, val)) + if ensure_str(val) == group_dn: + found = True + break + + if find_result: + assert (found) + else: + assert (not found) + + +def _find_member(topology_st, user_dn=None, group_dn=None, find_result=True): + assert (topology_st) + assert (user_dn) + assert (group_dn) + ent = topology_st.standalone.getEntry(group_dn, ldap.SCOPE_BASE, "(objectclass=*)", ['member']) + found = False + if ent.hasAttr('member'): + + for val in ent.getValues('member'): + topology_st.standalone.log.info("!!!!!!! %s: member ->%s" % (group_dn, val)) + if ensure_str(val) == user_dn: + found = True + break + + if find_result: + assert (found) + else: + assert (not found) + + +def _modrdn_entry(topology_st=None, entry_dn=None, new_rdn=None, del_old=0, new_superior=None): + assert topology_st is not None + assert entry_dn is not None + assert new_rdn is not None + + topology_st.standalone.log.info("\n\n######################### MODRDN %s ######################\n" % new_rdn) + try: + if new_superior: + topology_st.standalone.rename_s(entry_dn, new_rdn, newsuperior=new_superior, delold=del_old) + else: + topology_st.standalone.rename_s(entry_dn, new_rdn, delold=del_old) + except ldap.NO_SUCH_ATTRIBUTE: + topology_st.standalone.log.info("accepted failure due to 47833: modrdn reports error.. but succeeds") + attempt = 0 + if new_superior: + dn = "%s,%s" % (new_rdn, new_superior) + base = new_superior + else: + base = ','.join(entry_dn.split(",")[1:]) + dn = "%s, %s" % (new_rdn, base) + myfilter = entry_dn.split(',')[0] + + while attempt < 10: + try: + ent = topology_st.standalone.getEntry(dn, ldap.SCOPE_BASE, myfilter) + break + except ldap.NO_SUCH_OBJECT: + topology_st.standalone.log.info("Accept failure due to 47833: unable to find (base) a modrdn entry") + attempt += 1 + time.sleep(1) + if attempt == 10: + ent = topology_st.standalone.getEntry(base, ldap.SCOPE_SUBTREE, myfilter) + ent = topology_st.standalone.getEntry(dn, ldap.SCOPE_BASE, myfilter) + + +def _check_memberof(topology_st=None, action=None, user_dn=None, group_dn=None, find_result=None): + assert (topology_st) + assert (user_dn) + assert (group_dn) + if action == ldap.MOD_ADD: + txt = 'add' + elif action == ldap.MOD_DELETE: + txt = 'delete' + else: + txt = 'replace' + topology_st.standalone.log.info('\n%s entry %s' % (txt, user_dn)) + topology_st.standalone.log.info('to group %s' % group_dn) + + topology_st.standalone.modify_s(group_dn, [(action, 'member', ensure_bytes(user_dn))]) + time.sleep(1) + _find_memberof(topology_st, user_dn=user_dn, group_dn=group_dn, find_result=find_result) + + +def test_ticket47829_init(topology_st): + topology_st.standalone.add_s(Entry((SCOPE_IN_DN, { + 'objectclass': "top nscontainer".split(), + 'cn': SCOPE_IN_DN}))) + topology_st.standalone.add_s(Entry((SCOPE_OUT_DN, { + 'objectclass': "top nscontainer".split(), + 'cn': SCOPE_OUT_DN}))) + topology_st.standalone.add_s(Entry((PROVISIONING_DN, { + 'objectclass': "top nscontainer".split(), + 'cn': PROVISIONING_CN}))) + topology_st.standalone.add_s(Entry((ACTIVE_DN, { + 'objectclass': "top nscontainer".split(), + 'cn': ACTIVE_CN}))) + topology_st.standalone.add_s(Entry((STAGE_DN, { + 'objectclass': "top nscontainer".split(), + 'cn': STAGE_DN}))) + topology_st.standalone.add_s(Entry((DELETE_DN, { + 'objectclass': "top nscontainer".split(), + 'cn': DELETE_CN}))) + + # add groups + topology_st.standalone.add_s(Entry((ACTIVE_GROUP_DN, { + 'objectclass': "top groupOfNames inetuser".split(), + 'cn': ACTIVE_GROUP_CN}))) + topology_st.standalone.add_s(Entry((STAGE_GROUP_DN, { + 'objectclass': "top groupOfNames inetuser".split(), + 'cn': STAGE_GROUP_CN}))) + topology_st.standalone.add_s(Entry((OUT_GROUP_DN, { + 'objectclass': "top groupOfNames inetuser".split(), + 'cn': OUT_GROUP_CN}))) + topology_st.standalone.add_s(Entry((INDIRECT_ACTIVE_GROUP_DN, { + 'objectclass': "top groupOfNames".split(), + 'cn': INDIRECT_ACTIVE_GROUP_CN}))) + + # add users + _add_user(topology_st, 'active') + _add_user(topology_st, 'stage') + _add_user(topology_st, 'out') + + # enable memberof of with scope IN except provisioning + topology_st.standalone.plugins.enable(name=PLUGIN_MEMBER_OF) + dn = "cn=%s,%s" % (PLUGIN_MEMBER_OF, DN_PLUGIN) + topology_st.standalone.modify_s(dn, [(ldap.MOD_REPLACE, 'memberOfEntryScope', ensure_bytes(SCOPE_IN_DN))]) + topology_st.standalone.modify_s(dn, [(ldap.MOD_REPLACE, 'memberOfEntryScopeExcludeSubtree', ensure_bytes(PROVISIONING_DN))]) + + # enable RI with scope IN except provisioning + topology_st.standalone.plugins.enable(name=PLUGIN_REFER_INTEGRITY) + dn = "cn=%s,%s" % (PLUGIN_REFER_INTEGRITY, DN_PLUGIN) + topology_st.standalone.modify_s(dn, [(ldap.MOD_REPLACE, 'nsslapd-pluginentryscope', ensure_bytes(SCOPE_IN_DN))]) + topology_st.standalone.modify_s(dn, [(ldap.MOD_REPLACE, 'nsslapd-plugincontainerscope', ensure_bytes(SCOPE_IN_DN))]) + topology_st.standalone.modify_s(dn, [(ldap.MOD_REPLACE, 'nsslapd-pluginExcludeEntryScope', ensure_bytes(PROVISIONING_DN))]) + + topology_st.standalone.restart(timeout=10) + + +def test_ticket47829_mod_active_user_1(topology_st): + _header(topology_st, 'MOD: add an active user to an active group') + + # add active user to active group + _check_memberof(topology_st, action=ldap.MOD_ADD, user_dn=ACTIVE_USER_DN, group_dn=ACTIVE_GROUP_DN, + find_result=True) + _find_member(topology_st, user_dn=ACTIVE_USER_DN, group_dn=ACTIVE_GROUP_DN, find_result=True) + + # remove active user to active group + _check_memberof(topology_st, action=ldap.MOD_DELETE, user_dn=ACTIVE_USER_DN, group_dn=ACTIVE_GROUP_DN, + find_result=False) + + +def test_ticket47829_mod_active_user_2(topology_st): + _header(topology_st, 'MOD: add an Active user to a Stage group') + + # add active user to stage group + _check_memberof(topology_st, action=ldap.MOD_ADD, user_dn=ACTIVE_USER_DN, group_dn=STAGE_GROUP_DN, + find_result=False) + _find_member(topology_st, user_dn=ACTIVE_USER_DN, group_dn=STAGE_GROUP_DN, find_result=True) + + # remove active user to stage group + _check_memberof(topology_st, action=ldap.MOD_DELETE, user_dn=ACTIVE_USER_DN, group_dn=STAGE_GROUP_DN, + find_result=False) + + +def test_ticket47829_mod_active_user_3(topology_st): + _header(topology_st, 'MOD: add an Active user to a out of scope group') + + # add active user to out of scope group + _check_memberof(topology_st, action=ldap.MOD_ADD, user_dn=ACTIVE_USER_DN, group_dn=OUT_GROUP_DN, find_result=False) + _find_member(topology_st, user_dn=ACTIVE_USER_DN, group_dn=OUT_GROUP_DN, find_result=True) + + # remove active user to out of scope group + _check_memberof(topology_st, action=ldap.MOD_DELETE, user_dn=ACTIVE_USER_DN, group_dn=OUT_GROUP_DN, + find_result=False) + + +def test_ticket47829_mod_stage_user_1(topology_st): + _header(topology_st, 'MOD: add an Stage user to a Active group') + + # add stage user to active group + _check_memberof(topology_st, action=ldap.MOD_ADD, user_dn=STAGE_USER_DN, group_dn=ACTIVE_GROUP_DN, + find_result=False) + _find_member(topology_st, user_dn=STAGE_USER_DN, group_dn=ACTIVE_GROUP_DN, find_result=True) + + # remove stage user to active group + _check_memberof(topology_st, action=ldap.MOD_DELETE, user_dn=STAGE_USER_DN, group_dn=ACTIVE_GROUP_DN, + find_result=False) + + +def test_ticket47829_mod_stage_user_2(topology_st): + _header(topology_st, 'MOD: add an Stage user to a Stage group') + + # add stage user to stage group + _check_memberof(topology_st, action=ldap.MOD_ADD, user_dn=STAGE_USER_DN, group_dn=STAGE_GROUP_DN, find_result=False) + _find_member(topology_st, user_dn=STAGE_USER_DN, group_dn=STAGE_GROUP_DN, find_result=True) + + # remove stage user to stage group + _check_memberof(topology_st, action=ldap.MOD_DELETE, user_dn=STAGE_USER_DN, group_dn=STAGE_GROUP_DN, + find_result=False) + + +def test_ticket47829_mod_stage_user_3(topology_st): + _header(topology_st, 'MOD: add an Stage user to a out of scope group') + + # add stage user to an out of scope group + _check_memberof(topology_st, action=ldap.MOD_ADD, user_dn=STAGE_USER_DN, group_dn=OUT_GROUP_DN, find_result=False) + _find_member(topology_st, user_dn=STAGE_USER_DN, group_dn=OUT_GROUP_DN, find_result=True) + + # remove stage user to out of scope group + _check_memberof(topology_st, action=ldap.MOD_DELETE, user_dn=STAGE_USER_DN, group_dn=OUT_GROUP_DN, + find_result=False) + + +def test_ticket47829_mod_out_user_1(topology_st): + _header(topology_st, 'MOD: add an out of scope user to an active group') + + # add out of scope user to active group + _check_memberof(topology_st, action=ldap.MOD_ADD, user_dn=OUT_USER_DN, group_dn=ACTIVE_GROUP_DN, find_result=False) + _find_member(topology_st, user_dn=OUT_USER_DN, group_dn=ACTIVE_GROUP_DN, find_result=True) + + # remove out of scope user to active group + _check_memberof(topology_st, action=ldap.MOD_DELETE, user_dn=OUT_USER_DN, group_dn=ACTIVE_GROUP_DN, + find_result=False) + + +def test_ticket47829_mod_out_user_2(topology_st): + _header(topology_st, 'MOD: add an out of scope user to a Stage group') + + # add out of scope user to stage group + _check_memberof(topology_st, action=ldap.MOD_ADD, user_dn=OUT_USER_DN, group_dn=STAGE_GROUP_DN, find_result=False) + _find_member(topology_st, user_dn=OUT_USER_DN, group_dn=STAGE_GROUP_DN, find_result=True) + + # remove out of scope user to stage group + _check_memberof(topology_st, action=ldap.MOD_DELETE, user_dn=OUT_USER_DN, group_dn=STAGE_GROUP_DN, + find_result=False) + + +def test_ticket47829_mod_out_user_3(topology_st): + _header(topology_st, 'MOD: add an out of scope user to an out of scope group') + + # add out of scope user to stage group + _check_memberof(topology_st, action=ldap.MOD_ADD, user_dn=OUT_USER_DN, group_dn=OUT_GROUP_DN, find_result=False) + _find_member(topology_st, user_dn=OUT_USER_DN, group_dn=OUT_GROUP_DN, find_result=True) + + # remove out of scope user to stage group + _check_memberof(topology_st, action=ldap.MOD_DELETE, user_dn=OUT_USER_DN, group_dn=OUT_GROUP_DN, find_result=False) + + +def test_ticket47829_mod_active_user_modrdn_active_user_1(topology_st): + _header(topology_st, 'add an Active user to a Active group. Then move Active user to Active') + + # add Active user to active group + _check_memberof(topology_st, action=ldap.MOD_ADD, user_dn=ACTIVE_USER_DN, group_dn=ACTIVE_GROUP_DN, + find_result=True) + _find_member(topology_st, user_dn=ACTIVE_USER_DN, group_dn=ACTIVE_GROUP_DN, find_result=True) + + # move the Active entry to active, expect 'member' and 'memberof' + _modrdn_entry(topology_st, entry_dn=ACTIVE_USER_DN, new_rdn="cn=x%s" % ACTIVE_USER_CN, new_superior=ACTIVE_DN) + _find_memberof(topology_st, user_dn="cn=x%s,%s" % (ACTIVE_USER_CN, ACTIVE_DN), group_dn=ACTIVE_GROUP_DN, + find_result=True) + _find_member(topology_st, user_dn="cn=x%s,%s" % (ACTIVE_USER_CN, ACTIVE_DN), group_dn=ACTIVE_GROUP_DN, + find_result=True) + + # move the Active entry to active, expect 'member' and no 'memberof' + _modrdn_entry(topology_st, entry_dn="cn=x%s,%s" % (ACTIVE_USER_CN, ACTIVE_DN), new_rdn="cn=%s" % ACTIVE_USER_CN, + new_superior=ACTIVE_DN) + _find_memberof(topology_st, user_dn="cn=%s,%s" % (ACTIVE_USER_CN, ACTIVE_DN), group_dn=ACTIVE_GROUP_DN, + find_result=True) + _find_member(topology_st, user_dn="cn=%s,%s" % (ACTIVE_USER_CN, ACTIVE_DN), group_dn=ACTIVE_GROUP_DN, + find_result=True) + + # remove active user to active group + _check_memberof(topology_st, action=ldap.MOD_DELETE, user_dn=ACTIVE_USER_DN, group_dn=ACTIVE_GROUP_DN, + find_result=False) + + +def test_ticket47829_mod_active_user_modrdn_stage_user_1(topology_st): + _header(topology_st, 'add an Active user to a Active group. Then move Active user to Stage') + + # add Active user to active group + _check_memberof(topology_st, action=ldap.MOD_ADD, user_dn=ACTIVE_USER_DN, group_dn=ACTIVE_GROUP_DN, + find_result=True) + _find_member(topology_st, user_dn=ACTIVE_USER_DN, group_dn=ACTIVE_GROUP_DN, find_result=True) + + # move the Active entry to stage, expect no 'member' and 'memberof' + _modrdn_entry(topology_st, entry_dn=ACTIVE_USER_DN, new_rdn="cn=%s" % ACTIVE_USER_CN, new_superior=STAGE_DN) + _find_memberof(topology_st, user_dn="cn=%s,%s" % (ACTIVE_USER_CN, STAGE_DN), group_dn=ACTIVE_GROUP_DN, + find_result=False) + _find_member(topology_st, user_dn="cn=%s,%s" % (ACTIVE_USER_CN, STAGE_DN), group_dn=ACTIVE_GROUP_DN, + find_result=False) + + # move the Active entry to Stage, expect 'member' and no 'memberof' + _modrdn_entry(topology_st, entry_dn="cn=%s,%s" % (ACTIVE_USER_CN, STAGE_DN), new_rdn="cn=%s" % ACTIVE_USER_CN, + new_superior=ACTIVE_DN) + _find_memberof(topology_st, user_dn="cn=%s,%s" % (ACTIVE_USER_CN, ACTIVE_DN), group_dn=ACTIVE_GROUP_DN, + find_result=False) + _find_member(topology_st, user_dn="cn=%s,%s" % (ACTIVE_USER_CN, ACTIVE_DN), group_dn=ACTIVE_GROUP_DN, + find_result=False) + + +def test_ticket47829_mod_active_user_modrdn_out_user_1(topology_st): + _header(topology_st, 'add an Active user to a Active group. Then move Active user to out of scope') + + # add Active user to active group + _check_memberof(topology_st, action=ldap.MOD_ADD, user_dn=ACTIVE_USER_DN, group_dn=ACTIVE_GROUP_DN, + find_result=True) + _find_member(topology_st, user_dn=ACTIVE_USER_DN, group_dn=ACTIVE_GROUP_DN, find_result=True) + + # move the Active entry to out of scope, expect no 'member' and no 'memberof' + _modrdn_entry(topology_st, entry_dn=ACTIVE_USER_DN, new_rdn="cn=%s" % ACTIVE_USER_CN, new_superior=OUT_GROUP_DN) + _find_memberof(topology_st, user_dn="cn=%s,%s" % (ACTIVE_USER_CN, OUT_GROUP_DN), group_dn=ACTIVE_GROUP_DN, + find_result=False) + _find_member(topology_st, user_dn="cn=%s,%s" % (ACTIVE_USER_CN, OUT_GROUP_DN), group_dn=ACTIVE_GROUP_DN, + find_result=False) + + # move the Active entry to out of scope, expect no 'member' and no 'memberof' + _modrdn_entry(topology_st, entry_dn="cn=%s,%s" % (ACTIVE_USER_CN, OUT_GROUP_DN), new_rdn="cn=%s" % ACTIVE_USER_CN, + new_superior=ACTIVE_DN) + _find_memberof(topology_st, user_dn="cn=%s,%s" % (ACTIVE_USER_CN, ACTIVE_DN), group_dn=ACTIVE_GROUP_DN, + find_result=False) + _find_member(topology_st, user_dn="cn=%s,%s" % (ACTIVE_USER_CN, ACTIVE_DN), group_dn=ACTIVE_GROUP_DN, + find_result=False) + + +def test_ticket47829_mod_modrdn_1(topology_st): + _header(topology_st, 'add an Stage user to a Active group. Then move Stage user to Active') + + # add Stage user to active group + _check_memberof(topology_st, action=ldap.MOD_ADD, user_dn=STAGE_USER_DN, group_dn=ACTIVE_GROUP_DN, + find_result=False) + _find_member(topology_st, user_dn=STAGE_USER_DN, group_dn=ACTIVE_GROUP_DN, find_result=True) + + # move the Stage entry to active, expect 'member' and 'memberof' + _modrdn_entry(topology_st, entry_dn=STAGE_USER_DN, new_rdn="cn=%s" % STAGE_USER_CN, new_superior=ACTIVE_DN) + _find_memberof(topology_st, user_dn="cn=%s,%s" % (STAGE_USER_CN, ACTIVE_DN), group_dn=ACTIVE_GROUP_DN, + find_result=True) + _find_member(topology_st, user_dn="cn=%s,%s" % (STAGE_USER_CN, ACTIVE_DN), group_dn=ACTIVE_GROUP_DN, + find_result=True) + + # move the Active entry to Stage, expect no 'member' and no 'memberof' + _modrdn_entry(topology_st, entry_dn="cn=%s,%s" % (STAGE_USER_CN, ACTIVE_DN), new_rdn="cn=%s" % STAGE_USER_CN, + new_superior=STAGE_DN) + _find_memberof(topology_st, user_dn="cn=%s,%s" % (STAGE_USER_CN, STAGE_DN), group_dn=ACTIVE_GROUP_DN, + find_result=False) + _find_member(topology_st, user_dn="cn=%s,%s" % (STAGE_USER_CN, ACTIVE_DN), group_dn=ACTIVE_GROUP_DN, + find_result=False) + + +def test_ticket47829_mod_stage_user_modrdn_active_user_1(topology_st): + _header(topology_st, 'add an Stage user to a Active group. Then move Stage user to Active') + + stage_user_dn = STAGE_USER_DN + stage_user_rdn = "cn=%s" % STAGE_USER_CN + active_user_dn = "cn=%s,%s" % (STAGE_USER_CN, ACTIVE_DN) + + # add Stage user to active group + _check_memberof(topology_st, action=ldap.MOD_ADD, user_dn=stage_user_dn, group_dn=ACTIVE_GROUP_DN, + find_result=False) + _find_member(topology_st, user_dn=stage_user_dn, group_dn=ACTIVE_GROUP_DN, find_result=True) + + # move the Stage entry to Actve, expect 'member' and 'memberof' + _modrdn_entry(topology_st, entry_dn=stage_user_dn, new_rdn=stage_user_rdn, new_superior=ACTIVE_DN) + _find_memberof(topology_st, user_dn=active_user_dn, group_dn=ACTIVE_GROUP_DN, find_result=True) + _find_member(topology_st, user_dn=active_user_dn, group_dn=ACTIVE_GROUP_DN, find_result=True) + + # move the Active entry to Stage, expect no 'member' and no 'memberof' + _modrdn_entry(topology_st, entry_dn=active_user_dn, new_rdn=stage_user_rdn, new_superior=STAGE_DN) + _find_memberof(topology_st, user_dn=stage_user_dn, group_dn=ACTIVE_GROUP_DN, find_result=False) + _find_member(topology_st, user_dn=stage_user_dn, group_dn=ACTIVE_GROUP_DN, find_result=False) + + +def test_ticket47829_mod_stage_user_modrdn_stage_user_1(topology_st): + _header(topology_st, 'add an Stage user to a Active group. Then move Stage user to Stage') + + _header(topology_st, 'Return because it requires a fix for 47833') + return + + old_stage_user_dn = STAGE_USER_DN + old_stage_user_rdn = "cn=%s" % STAGE_USER_CN + new_stage_user_rdn = "cn=x%s" % STAGE_USER_CN + new_stage_user_dn = "%s,%s" % (new_stage_user_rdn, STAGE_DN) + + # add Stage user to active group + _check_memberof(topology_st, action=ldap.MOD_ADD, user_dn=old_stage_user_dn, group_dn=ACTIVE_GROUP_DN, + find_result=False) + _find_member(topology_st, user_dn=old_stage_user_dn, group_dn=ACTIVE_GROUP_DN, find_result=True) + + # move the Stage entry to Stage, expect no 'member' and 'memberof' + _modrdn_entry(topology_st, entry_dn=old_stage_user_dn, new_rdn=new_stage_user_rdn, new_superior=STAGE_DN) + _find_memberof(topology_st, user_dn=new_stage_user_dn, group_dn=ACTIVE_GROUP_DN, find_result=False) + _find_member(topology_st, user_dn=new_stage_user_dn, group_dn=ACTIVE_GROUP_DN, find_result=False) + + # move the Stage entry to Stage, expect no 'member' and no 'memberof' + _modrdn_entry(topology_st, entry_dn=new_stage_user_dn, new_rdn=old_stage_user_rdn, new_superior=STAGE_DN) + _find_memberof(topology_st, user_dn=old_stage_user_dn, group_dn=ACTIVE_GROUP_DN, find_result=False) + _find_member(topology_st, user_dn=old_stage_user_dn, group_dn=ACTIVE_GROUP_DN, find_result=False) + + +def test_ticket47829_indirect_active_group_1(topology_st): + _header(topology_st, 'add an Active group (G1) to an active group (G0). Then add active user to G1') + + topology_st.standalone.modify_s(INDIRECT_ACTIVE_GROUP_DN, [(ldap.MOD_ADD, 'member', ensure_bytes(ACTIVE_GROUP_DN))]) + + # add an active user to G1. Checks that user is memberof G1 + _check_memberof(topology_st, action=ldap.MOD_ADD, user_dn=ACTIVE_USER_DN, group_dn=ACTIVE_GROUP_DN, + find_result=True) + _find_memberof(topology_st, user_dn=ACTIVE_USER_DN, group_dn=INDIRECT_ACTIVE_GROUP_DN, find_result=True) + + # remove G1 from G0 + topology_st.standalone.modify_s(INDIRECT_ACTIVE_GROUP_DN, [(ldap.MOD_DELETE, 'member', ensure_bytes(ACTIVE_GROUP_DN))]) + _find_memberof(topology_st, user_dn=ACTIVE_USER_DN, group_dn=INDIRECT_ACTIVE_GROUP_DN, find_result=False) + _find_memberof(topology_st, user_dn=ACTIVE_USER_DN, group_dn=ACTIVE_GROUP_DN, find_result=True) + + # remove active user from G1 + _check_memberof(topology_st, action=ldap.MOD_DELETE, user_dn=ACTIVE_USER_DN, group_dn=ACTIVE_GROUP_DN, + find_result=False) + + +def test_ticket47829_indirect_active_group_2(topology_st): + _header(topology_st, + 'add an Active group (G1) to an active group (G0). Then add active user to G1. Then move active user to stage') + + topology_st.standalone.modify_s(INDIRECT_ACTIVE_GROUP_DN, [(ldap.MOD_ADD, 'member', ensure_bytes(ACTIVE_GROUP_DN))]) + + # add an active user to G1. Checks that user is memberof G1 + _check_memberof(topology_st, action=ldap.MOD_ADD, user_dn=ACTIVE_USER_DN, group_dn=ACTIVE_GROUP_DN, + find_result=True) + _find_memberof(topology_st, user_dn=ACTIVE_USER_DN, group_dn=INDIRECT_ACTIVE_GROUP_DN, find_result=True) + + # remove G1 from G0 + topology_st.standalone.modify_s(INDIRECT_ACTIVE_GROUP_DN, [(ldap.MOD_DELETE, 'member', ensure_bytes(ACTIVE_GROUP_DN))]) + _find_memberof(topology_st, user_dn=ACTIVE_USER_DN, group_dn=INDIRECT_ACTIVE_GROUP_DN, find_result=False) + _find_memberof(topology_st, user_dn=ACTIVE_USER_DN, group_dn=ACTIVE_GROUP_DN, find_result=True) + + # move active user to stage + _modrdn_entry(topology_st, entry_dn=ACTIVE_USER_DN, new_rdn="cn=%s" % ACTIVE_USER_CN, new_superior=STAGE_DN) + + # stage user is no long member of active group and indirect active group + _find_memberof(topology_st, user_dn="cn=%s,%s" % (ACTIVE_USER_CN, STAGE_DN), group_dn=ACTIVE_GROUP_DN, + find_result=False) + _find_memberof(topology_st, user_dn="cn=%s,%s" % (ACTIVE_USER_CN, STAGE_DN), group_dn=INDIRECT_ACTIVE_GROUP_DN, + find_result=False) + + # active group and indirect active group do no longer have stage user as member + _find_member(topology_st, user_dn="cn=%s,%s" % (ACTIVE_USER_CN, STAGE_DN), group_dn=ACTIVE_GROUP_DN, + find_result=False) + _find_member(topology_st, user_dn="cn=%s,%s" % (ACTIVE_USER_CN, STAGE_DN), group_dn=INDIRECT_ACTIVE_GROUP_DN, + find_result=False) + + # return back the entry to active. It remains not member + _modrdn_entry(topology_st, entry_dn="cn=%s,%s" % (ACTIVE_USER_CN, STAGE_DN), new_rdn="cn=%s" % ACTIVE_USER_CN, + new_superior=ACTIVE_DN) + _find_member(topology_st, user_dn="cn=%s,%s" % (ACTIVE_USER_CN, ACTIVE_DN), group_dn=ACTIVE_GROUP_DN, + find_result=False) + _find_member(topology_st, user_dn="cn=%s,%s" % (ACTIVE_USER_CN, ACTIVE_DN), group_dn=INDIRECT_ACTIVE_GROUP_DN, + find_result=False) + + +def test_ticket47829_indirect_active_group_3(topology_st): + _header(topology_st, + 'add an Active group (G1) to an active group (G0). Then add active user to G1. Then move active user to out of the scope') + + topology_st.standalone.modify_s(INDIRECT_ACTIVE_GROUP_DN, [(ldap.MOD_ADD, 'member', ensure_bytes(ACTIVE_GROUP_DN))]) + + # add an active user to G1. Checks that user is memberof G1 + _check_memberof(topology_st, action=ldap.MOD_ADD, user_dn=ACTIVE_USER_DN, group_dn=ACTIVE_GROUP_DN, + find_result=True) + _find_memberof(topology_st, user_dn=ACTIVE_USER_DN, group_dn=INDIRECT_ACTIVE_GROUP_DN, find_result=True) + + # remove G1 from G0 + topology_st.standalone.modify_s(INDIRECT_ACTIVE_GROUP_DN, [(ldap.MOD_DELETE, 'member', ensure_bytes(ACTIVE_GROUP_DN))]) + _find_memberof(topology_st, user_dn=ACTIVE_USER_DN, group_dn=INDIRECT_ACTIVE_GROUP_DN, find_result=False) + _find_memberof(topology_st, user_dn=ACTIVE_USER_DN, group_dn=ACTIVE_GROUP_DN, find_result=True) + + # move active user to out of the scope + _modrdn_entry(topology_st, entry_dn=ACTIVE_USER_DN, new_rdn="cn=%s" % ACTIVE_USER_CN, new_superior=SCOPE_OUT_DN) + + # stage user is no long member of active group and indirect active group + _find_memberof(topology_st, user_dn="cn=%s,%s" % (ACTIVE_USER_CN, SCOPE_OUT_DN), group_dn=ACTIVE_GROUP_DN, + find_result=False) + _find_memberof(topology_st, user_dn="cn=%s,%s" % (ACTIVE_USER_CN, SCOPE_OUT_DN), group_dn=INDIRECT_ACTIVE_GROUP_DN, + find_result=False) + + # active group and indirect active group do no longer have stage user as member + _find_member(topology_st, user_dn="cn=%s,%s" % (ACTIVE_USER_CN, SCOPE_OUT_DN), group_dn=ACTIVE_GROUP_DN, + find_result=False) + _find_member(topology_st, user_dn="cn=%s,%s" % (ACTIVE_USER_CN, SCOPE_OUT_DN), group_dn=INDIRECT_ACTIVE_GROUP_DN, + find_result=False) + + # return back the entry to active. It remains not member + _modrdn_entry(topology_st, entry_dn="cn=%s,%s" % (ACTIVE_USER_CN, SCOPE_OUT_DN), new_rdn="cn=%s" % ACTIVE_USER_CN, + new_superior=ACTIVE_DN) + _find_member(topology_st, user_dn="cn=%s,%s" % (ACTIVE_USER_CN, ACTIVE_DN), group_dn=ACTIVE_GROUP_DN, + find_result=False) + _find_member(topology_st, user_dn="cn=%s,%s" % (ACTIVE_USER_CN, ACTIVE_DN), group_dn=INDIRECT_ACTIVE_GROUP_DN, + find_result=False) + + +def test_ticket47829_indirect_active_group_4(topology_st): + _header(topology_st, + 'add an Active group (G1) to an active group (G0). Then add stage user to G1. Then move user to active. Then move it back') + + topology_st.standalone.modify_s(INDIRECT_ACTIVE_GROUP_DN, [(ldap.MOD_ADD, 'member', ensure_bytes(ACTIVE_GROUP_DN))]) + + # add stage user to active group + _check_memberof(topology_st, action=ldap.MOD_ADD, user_dn=STAGE_USER_DN, group_dn=ACTIVE_GROUP_DN, + find_result=False) + _find_member(topology_st, user_dn=STAGE_USER_DN, group_dn=ACTIVE_GROUP_DN, find_result=True) + _find_member(topology_st, user_dn=STAGE_USER_DN, group_dn=INDIRECT_ACTIVE_GROUP_DN, find_result=False) + _find_memberof(topology_st, user_dn=STAGE_USER_DN, group_dn=INDIRECT_ACTIVE_GROUP_DN, find_result=False) + _find_memberof(topology_st, user_dn=STAGE_USER_DN, group_dn=ACTIVE_GROUP_DN, find_result=False) + + # move stage user to active + _modrdn_entry(topology_st, entry_dn=STAGE_USER_DN, new_rdn="cn=%s" % STAGE_USER_CN, new_superior=ACTIVE_DN) + renamed_stage_dn = "cn=%s,%s" % (STAGE_USER_CN, ACTIVE_DN) + _find_member(topology_st, user_dn=renamed_stage_dn, group_dn=ACTIVE_GROUP_DN, find_result=True) + _find_member(topology_st, user_dn=renamed_stage_dn, group_dn=INDIRECT_ACTIVE_GROUP_DN, find_result=False) + _find_memberof(topology_st, user_dn=renamed_stage_dn, group_dn=INDIRECT_ACTIVE_GROUP_DN, find_result=True) + _find_memberof(topology_st, user_dn=renamed_stage_dn, group_dn=ACTIVE_GROUP_DN, find_result=True) + + # move back active to stage + _modrdn_entry(topology_st, entry_dn=renamed_stage_dn, new_rdn="cn=%s" % STAGE_USER_CN, new_superior=STAGE_DN) + _find_member(topology_st, user_dn=STAGE_USER_DN, group_dn=ACTIVE_GROUP_DN, find_result=False) + _find_member(topology_st, user_dn=STAGE_USER_DN, group_dn=INDIRECT_ACTIVE_GROUP_DN, find_result=False) + _find_memberof(topology_st, user_dn=STAGE_USER_DN, group_dn=INDIRECT_ACTIVE_GROUP_DN, find_result=False) + _find_memberof(topology_st, user_dn=STAGE_USER_DN, group_dn=ACTIVE_GROUP_DN, find_result=False) + + +if __name__ == '__main__': + # Run isolated + # -s for DEBUG mode + CURRENT_FILE = os.path.realpath(__file__) + pytest.main("-s %s" % CURRENT_FILE) diff --git a/dirsrvtests/tests/tickets/ticket47833_test.py b/dirsrvtests/tests/tickets/ticket47833_test.py new file mode 100644 index 0000000..bb6e3fb --- /dev/null +++ b/dirsrvtests/tests/tickets/ticket47833_test.py @@ -0,0 +1,220 @@ +# --- BEGIN COPYRIGHT BLOCK --- +# Copyright (C) 2016 Red Hat, Inc. +# All rights reserved. +# +# License: GPL (version 3 or any later version). +# See LICENSE for details. +# --- END COPYRIGHT BLOCK --- +# +import pytest +from lib389.tasks import * +from lib389.utils import * +from lib389.topologies import topology_st + +from lib389._constants import SUFFIX, DEFAULT_SUFFIX, PLUGIN_MEMBER_OF, DN_PLUGIN + +SCOPE_IN_CN = 'in' +SCOPE_OUT_CN = 'out' +SCOPE_IN_DN = 'cn=%s,%s' % (SCOPE_IN_CN, SUFFIX) +SCOPE_OUT_DN = 'cn=%s,%s' % (SCOPE_OUT_CN, SUFFIX) + +PROVISIONING_CN = "provisioning" +PROVISIONING_DN = "cn=%s,%s" % (PROVISIONING_CN, SCOPE_IN_DN) + +# Skip on older versions +pytestmark = [pytest.mark.tier2, + pytest.mark.skipif(ds_is_older('1.3.3'), reason="Not implemented")] + +ACTIVE_CN = "accounts" +STAGE_CN = "staged users" +DELETE_CN = "deleted users" +ACTIVE_DN = "cn=%s,%s" % (ACTIVE_CN, SCOPE_IN_DN) +STAGE_DN = "cn=%s,%s" % (STAGE_CN, PROVISIONING_DN) +DELETE_DN = "cn=%s,%s" % (DELETE_CN, PROVISIONING_DN) + +STAGE_USER_CN = "stage guy" +STAGE_USER_DN = "cn=%s,%s" % (STAGE_USER_CN, STAGE_DN) + +ACTIVE_USER_CN = "active guy" +ACTIVE_USER_DN = "cn=%s,%s" % (ACTIVE_USER_CN, ACTIVE_DN) + +OUT_USER_CN = "out guy" +OUT_USER_DN = "cn=%s,%s" % (OUT_USER_CN, SCOPE_OUT_DN) + +STAGE_GROUP_CN = "stage group" +STAGE_GROUP_DN = "cn=%s,%s" % (STAGE_GROUP_CN, STAGE_DN) + +ACTIVE_GROUP_CN = "active group" +ACTIVE_GROUP_DN = "cn=%s,%s" % (ACTIVE_GROUP_CN, ACTIVE_DN) + +OUT_GROUP_CN = "out group" +OUT_GROUP_DN = "cn=%s,%s" % (OUT_GROUP_CN, SCOPE_OUT_DN) + +logging.getLogger(__name__).setLevel(logging.DEBUG) +log = logging.getLogger(__name__) + + +def _header(topology_st, label): + topology_st.standalone.log.info("\n\n###############################################") + topology_st.standalone.log.info("#######") + topology_st.standalone.log.info("####### %s" % label) + topology_st.standalone.log.info("#######") + topology_st.standalone.log.info("###############################################") + + +def _add_user(topology_st, type='active'): + if type == 'active': + topology_st.standalone.add_s(Entry((ACTIVE_USER_DN, { + 'objectclass': "top person inetuser".split(), + 'sn': ACTIVE_USER_CN, + 'cn': ACTIVE_USER_CN}))) + elif type == 'stage': + topology_st.standalone.add_s(Entry((STAGE_USER_DN, { + 'objectclass': "top person inetuser".split(), + 'sn': STAGE_USER_CN, + 'cn': STAGE_USER_CN}))) + else: + topology_st.standalone.add_s(Entry((OUT_USER_DN, { + 'objectclass': "top person inetuser".split(), + 'sn': OUT_USER_CN, + 'cn': OUT_USER_CN}))) + + +def _find_memberof(topology_st, user_dn=None, group_dn=None, find_result=True): + assert (topology_st) + assert (user_dn) + assert (group_dn) + ent = topology_st.standalone.getEntry(user_dn, ldap.SCOPE_BASE, "(objectclass=*)", ['memberof']) + found = False + if ent.hasAttr('memberof'): + + for val in ent.getValues('memberof'): + topology_st.standalone.log.info("!!!!!!! %s: memberof->%s" % (user_dn, val)) + if val == group_dn: + found = True + break + + if find_result: + assert (found) + else: + assert (not found) + + +def _find_member(topology_st, user_dn=None, group_dn=None, find_result=True): + assert (topology_st) + assert (user_dn) + assert (group_dn) + ent = topology_st.standalone.getEntry(group_dn, ldap.SCOPE_BASE, "(objectclass=*)", ['member']) + found = False + if ent.hasAttr('member'): + + for val in ent.getValues('member'): + topology_st.standalone.log.info("!!!!!!! %s: member ->%s" % (group_dn, val)) + if ensure_str(val) == user_dn: + found = True + break + + if find_result: + assert (found) + else: + assert (not found) + + +def _modrdn_entry(topology_st=None, entry_dn=None, new_rdn=None, del_old=0, new_superior=None): + assert topology_st != None + assert entry_dn != None + assert new_rdn != None + + topology_st.standalone.log.info("\n\n######################### MODRDN %s ######################\n" % new_rdn) + if new_superior: + topology_st.standalone.rename_s(entry_dn, new_rdn, newsuperior=new_superior, delold=del_old) + else: + topology_st.standalone.rename_s(entry_dn, new_rdn, delold=del_old) + + +def _check_memberof(topology_st=None, action=None, user_dn=None, group_dn=None, find_result=None): + assert (topology_st) + assert (user_dn) + assert (group_dn) + if action == ldap.MOD_ADD: + txt = 'add' + elif action == ldap.MOD_DELETE: + txt = 'delete' + else: + txt = 'replace' + topology_st.standalone.log.info('\n%s entry %s' % (txt, user_dn)) + topology_st.standalone.log.info('to group %s' % group_dn) + + topology_st.standalone.modify_s(group_dn, [(action, 'member', ensure_bytes(user_dn))]) + time.sleep(1) + _find_memberof(topology_st, user_dn=user_dn, group_dn=group_dn, find_result=find_result) + + +def test_ticket47829_init(topology_st): + topology_st.standalone.add_s(Entry((SCOPE_IN_DN, { + 'objectclass': "top nscontainer".split(), + 'cn': SCOPE_IN_DN}))) + topology_st.standalone.add_s(Entry((SCOPE_OUT_DN, { + 'objectclass': "top nscontainer".split(), + 'cn': SCOPE_OUT_DN}))) + topology_st.standalone.add_s(Entry((PROVISIONING_DN, { + 'objectclass': "top nscontainer".split(), + 'cn': PROVISIONING_CN}))) + topology_st.standalone.add_s(Entry((ACTIVE_DN, { + 'objectclass': "top nscontainer".split(), + 'cn': ACTIVE_CN}))) + topology_st.standalone.add_s(Entry((STAGE_DN, { + 'objectclass': "top nscontainer".split(), + 'cn': STAGE_DN}))) + topology_st.standalone.add_s(Entry((DELETE_DN, { + 'objectclass': "top nscontainer".split(), + 'cn': DELETE_CN}))) + + # add groups + topology_st.standalone.add_s(Entry((ACTIVE_GROUP_DN, { + 'objectclass': "top groupOfNames".split(), + 'cn': ACTIVE_GROUP_CN}))) + topology_st.standalone.add_s(Entry((STAGE_GROUP_DN, { + 'objectclass': "top groupOfNames".split(), + 'cn': STAGE_GROUP_CN}))) + topology_st.standalone.add_s(Entry((OUT_GROUP_DN, { + 'objectclass': "top groupOfNames".split(), + 'cn': OUT_GROUP_CN}))) + + # add users + _add_user(topology_st, 'active') + _add_user(topology_st, 'stage') + _add_user(topology_st, 'out') + + # enable memberof of with scope account + topology_st.standalone.plugins.enable(name=PLUGIN_MEMBER_OF) + dn = "cn=%s,%s" % (PLUGIN_MEMBER_OF, DN_PLUGIN) + topology_st.standalone.modify_s(dn, [(ldap.MOD_REPLACE, 'memberOfEntryScope', ensure_bytes(ACTIVE_DN))]) + + topology_st.standalone.restart(timeout=10) + + +def test_ticket47829_mod_stage_user_modrdn_stage_user_1(topology_st): + _header(topology_st, 'add an Stage user to a Active group. Then move Stage user to Stage') + + old_stage_user_dn = STAGE_USER_DN + old_stage_user_rdn = "cn=%s" % STAGE_USER_CN + new_stage_user_rdn = "cn=x%s" % STAGE_USER_CN + new_stage_user_dn = "%s,%s" % (new_stage_user_rdn, STAGE_DN) + + # add Stage user to active group + _check_memberof(topology_st, action=ldap.MOD_ADD, user_dn=old_stage_user_dn, group_dn=ACTIVE_GROUP_DN, + find_result=False) + _find_member(topology_st, user_dn=old_stage_user_dn, group_dn=ACTIVE_GROUP_DN, find_result=True) + + # move the Stage entry to Stage, expect no 'member' and 'memberof' + _modrdn_entry(topology_st, entry_dn=old_stage_user_dn, new_rdn=new_stage_user_rdn, new_superior=STAGE_DN) + _find_memberof(topology_st, user_dn=new_stage_user_dn, group_dn=ACTIVE_GROUP_DN, find_result=False) + _find_member(topology_st, user_dn=new_stage_user_dn, group_dn=ACTIVE_GROUP_DN, find_result=False) + + +if __name__ == '__main__': + # Run isolated + # -s for DEBUG mode + CURRENT_FILE = os.path.realpath(__file__) + pytest.main("-s %s" % CURRENT_FILE) diff --git a/dirsrvtests/tests/tickets/ticket47869MMR_test.py b/dirsrvtests/tests/tickets/ticket47869MMR_test.py new file mode 100644 index 0000000..2e1af22 --- /dev/null +++ b/dirsrvtests/tests/tickets/ticket47869MMR_test.py @@ -0,0 +1,200 @@ +# --- BEGIN COPYRIGHT BLOCK --- +# Copyright (C) 2016 Red Hat, Inc. +# All rights reserved. +# +# License: GPL (version 3 or any later version). +# See LICENSE for details. +# --- END COPYRIGHT BLOCK --- +# +import logging +import time + +import ldap +import pytest +from lib389 import Entry +from lib389._constants import * +from lib389.topologies import topology_m2 +from lib389.replica import ReplicationManager +from lib389.utils import * + +pytestmark = pytest.mark.tier2 + +logging.getLogger(__name__).setLevel(logging.DEBUG) +log = logging.getLogger(__name__) + +TEST_REPL_DN = "cn=test_repl, %s" % SUFFIX +ENTRY_NAME = 'test_entry' +MAX_ENTRIES = 10 + +BIND_NAME = 'bind_entry' +BIND_DN = 'cn=%s, %s' % (BIND_NAME, SUFFIX) +BIND_PW = 'password' + +def replication_check(topology_m2): + repl = ReplicationManager(SUFFIX) + master1 = topology_m2.ms["master1"] + master2 = topology_m2.ms["master2"] + return repl.test_replication(master1, master2) + +def test_ticket47869_init(topology_m2): + """ + It adds an entry ('bind_entry') and 10 test entries + It sets the anonymous aci + + """ + # enable acl error logging + mod = [(ldap.MOD_REPLACE, 'nsslapd-errorlog-level', ensure_bytes(str(8192)))] # REPL + topology_m2.ms["master1"].modify_s(DN_CONFIG, mod) + topology_m2.ms["master2"].modify_s(DN_CONFIG, mod) + + # entry used to bind with + topology_m2.ms["master1"].log.info("Add %s" % BIND_DN) + topology_m2.ms["master1"].add_s(Entry((BIND_DN, { + 'objectclass': "top person".split(), + 'sn': BIND_NAME, + 'cn': BIND_NAME, + 'userpassword': BIND_PW}))) + replication_check(topology_m2) + ent = topology_m2.ms["master2"].getEntry(BIND_DN, ldap.SCOPE_BASE, "(objectclass=*)") + assert ent + # keep anonymous ACI for use 'read-search' aci in SEARCH test + ACI_ANONYMOUS = "(targetattr!=\"userPassword\")(version 3.0; acl \"Enable anonymous access\"; allow (read, search, compare) userdn=\"ldap:///anyone\";)" + mod = [(ldap.MOD_REPLACE, 'aci', ensure_bytes(ACI_ANONYMOUS))] + topology_m2.ms["master1"].modify_s(SUFFIX, mod) + topology_m2.ms["master2"].modify_s(SUFFIX, mod) + + # add entries + for cpt in range(MAX_ENTRIES): + name = "%s%d" % (ENTRY_NAME, cpt) + mydn = "cn=%s,%s" % (name, SUFFIX) + topology_m2.ms["master1"].add_s(Entry((mydn, + {'objectclass': "top person".split(), + 'sn': name, + 'cn': name}))) + replication_check(topology_m2) + ent = topology_m2.ms["master2"].getEntry(mydn, ldap.SCOPE_BASE, "(objectclass=*)") + assert ent + +def test_ticket47869_check(topology_m2): + ''' + On Master 1 and 2: + Bind as Directory Manager. + Search all specifying nscpEntryWsi in the attribute list. + Check nscpEntryWsi is returned. + On Master 1 and 2: + Bind as Bind Entry. + Search all specifying nscpEntryWsi in the attribute list. + Check nscpEntryWsi is not returned. + On Master 1 and 2: + Bind as anonymous. + Search all specifying nscpEntryWsi in the attribute list. + Check nscpEntryWsi is not returned. + ''' + topology_m2.ms["master1"].log.info("\n\n######################### CHECK nscpentrywsi ######################\n") + + topology_m2.ms["master1"].log.info("##### Master1: Bind as %s #####" % DN_DM) + topology_m2.ms["master1"].simple_bind_s(DN_DM, PASSWORD) + + topology_m2.ms["master1"].log.info("Master1: Calling search_ext...") + msgid = topology_m2.ms["master1"].search_ext(SUFFIX, ldap.SCOPE_SUBTREE, 'objectclass=*', ['nscpentrywsi']) + nscpentrywsicnt = 0 + rtype, rdata, rmsgid = topology_m2.ms["master1"].result2(msgid) + topology_m2.ms["master1"].log.info("%d results" % len(rdata)) + + topology_m2.ms["master1"].log.info("Results:") + for dn, attrs in rdata: + topology_m2.ms["master1"].log.info("dn: %s" % dn) + if 'nscpentrywsi' in attrs: + nscpentrywsicnt += 1 + + topology_m2.ms["master1"].log.info("Master1: count of nscpentrywsi: %d" % nscpentrywsicnt) + + topology_m2.ms["master2"].log.info("##### Master2: Bind as %s #####" % DN_DM) + topology_m2.ms["master2"].simple_bind_s(DN_DM, PASSWORD) + + topology_m2.ms["master2"].log.info("Master2: Calling search_ext...") + msgid = topology_m2.ms["master2"].search_ext(SUFFIX, ldap.SCOPE_SUBTREE, 'objectclass=*', ['nscpentrywsi']) + nscpentrywsicnt = 0 + rtype, rdata, rmsgid = topology_m2.ms["master2"].result2(msgid) + topology_m2.ms["master2"].log.info("%d results" % len(rdata)) + + topology_m2.ms["master2"].log.info("Results:") + for dn, attrs in rdata: + topology_m2.ms["master2"].log.info("dn: %s" % dn) + if 'nscpentrywsi' in attrs: + nscpentrywsicnt += 1 + + topology_m2.ms["master2"].log.info("Master2: count of nscpentrywsi: %d" % nscpentrywsicnt) + + # bind as bind_entry + topology_m2.ms["master1"].log.info("##### Master1: Bind as %s #####" % BIND_DN) + topology_m2.ms["master1"].simple_bind_s(BIND_DN, BIND_PW) + + topology_m2.ms["master1"].log.info("Master1: Calling search_ext...") + msgid = topology_m2.ms["master1"].search_ext(SUFFIX, ldap.SCOPE_SUBTREE, 'objectclass=*', ['nscpentrywsi']) + nscpentrywsicnt = 0 + rtype, rdata, rmsgid = topology_m2.ms["master1"].result2(msgid) + topology_m2.ms["master1"].log.info("%d results" % len(rdata)) + + for dn, attrs in rdata: + if 'nscpentrywsi' in attrs: + nscpentrywsicnt += 1 + assert nscpentrywsicnt == 0 + topology_m2.ms["master1"].log.info("Master1: count of nscpentrywsi: %d" % nscpentrywsicnt) + + # bind as bind_entry + topology_m2.ms["master2"].log.info("##### Master2: Bind as %s #####" % BIND_DN) + topology_m2.ms["master2"].simple_bind_s(BIND_DN, BIND_PW) + + topology_m2.ms["master2"].log.info("Master2: Calling search_ext...") + msgid = topology_m2.ms["master2"].search_ext(SUFFIX, ldap.SCOPE_SUBTREE, 'objectclass=*', ['nscpentrywsi']) + nscpentrywsicnt = 0 + rtype, rdata, rmsgid = topology_m2.ms["master2"].result2(msgid) + topology_m2.ms["master2"].log.info("%d results" % len(rdata)) + + for dn, attrs in rdata: + if 'nscpentrywsi' in attrs: + nscpentrywsicnt += 1 + assert nscpentrywsicnt == 0 + topology_m2.ms["master2"].log.info("Master2: count of nscpentrywsi: %d" % nscpentrywsicnt) + + # bind as anonymous + topology_m2.ms["master1"].log.info("##### Master1: Bind as anonymous #####") + topology_m2.ms["master1"].simple_bind_s("", "") + + topology_m2.ms["master1"].log.info("Master1: Calling search_ext...") + msgid = topology_m2.ms["master1"].search_ext(SUFFIX, ldap.SCOPE_SUBTREE, 'objectclass=*', ['nscpentrywsi']) + nscpentrywsicnt = 0 + rtype, rdata, rmsgid = topology_m2.ms["master1"].result2(msgid) + topology_m2.ms["master1"].log.info("%d results" % len(rdata)) + + for dn, attrs in rdata: + if 'nscpentrywsi' in attrs: + nscpentrywsicnt += 1 + assert nscpentrywsicnt == 0 + topology_m2.ms["master1"].log.info("Master1: count of nscpentrywsi: %d" % nscpentrywsicnt) + + # bind as bind_entry + topology_m2.ms["master2"].log.info("##### Master2: Bind as anonymous #####") + topology_m2.ms["master2"].simple_bind_s("", "") + + topology_m2.ms["master2"].log.info("Master2: Calling search_ext...") + msgid = topology_m2.ms["master2"].search_ext(SUFFIX, ldap.SCOPE_SUBTREE, 'objectclass=*', ['nscpentrywsi']) + nscpentrywsicnt = 0 + rtype, rdata, rmsgid = topology_m2.ms["master2"].result2(msgid) + topology_m2.ms["master2"].log.info("%d results" % len(rdata)) + + for dn, attrs in rdata: + if 'nscpentrywsi' in attrs: + nscpentrywsicnt += 1 + assert nscpentrywsicnt == 0 + topology_m2.ms["master2"].log.info("Master2: count of nscpentrywsi: %d" % nscpentrywsicnt) + + topology_m2.ms["master1"].log.info("##### ticket47869 was successfully verified. #####") + + +if __name__ == '__main__': + # Run isolated + # -s for DEBUG mode + CURRENT_FILE = os.path.realpath(__file__) + pytest.main("-s %s" % CURRENT_FILE) diff --git a/dirsrvtests/tests/tickets/ticket47871_test.py b/dirsrvtests/tests/tickets/ticket47871_test.py new file mode 100644 index 0000000..6012de3 --- /dev/null +++ b/dirsrvtests/tests/tickets/ticket47871_test.py @@ -0,0 +1,108 @@ +# --- BEGIN COPYRIGHT BLOCK --- +# Copyright (C) 2016 Red Hat, Inc. +# All rights reserved. +# +# License: GPL (version 3 or any later version). +# See LICENSE for details. +# --- END COPYRIGHT BLOCK --- +# +''' +Created on Nov 7, 2013 + +@author: tbordaz +''' +import logging +import time + +import ldap +import pytest +from lib389 import Entry +from lib389._constants import * +from lib389.topologies import topology_m1c1 + +logging.getLogger(__name__).setLevel(logging.DEBUG) +from lib389.utils import * + +# Skip on older versions +pytestmark = [pytest.mark.tier2, + pytest.mark.skipif(ds_is_older('1.3.2'), reason="Not implemented")] +log = logging.getLogger(__name__) + +TEST_REPL_DN = "cn=test_repl, %s" % SUFFIX +ENTRY_DN = "cn=test_entry, %s" % SUFFIX + +OTHER_NAME = 'other_entry' +MAX_OTHERS = 10 + +ATTRIBUTES = ['street', 'countryName', 'description', 'postalAddress', 'postalCode', 'title', 'l', 'roomNumber'] + + +def test_ticket47871_init(topology_m1c1): + """ + Initialize the test environment + """ + topology_m1c1.ms["master1"].plugins.enable(name=PLUGIN_RETRO_CHANGELOG) + mod = [(ldap.MOD_REPLACE, 'nsslapd-changelogmaxage', b"10s"), # 10 second triming + (ldap.MOD_REPLACE, 'nsslapd-changelog-trim-interval', b"5s")] + topology_m1c1.ms["master1"].modify_s("cn=%s,%s" % (PLUGIN_RETRO_CHANGELOG, DN_PLUGIN), mod) + # topology_m1c1.ms["master1"].plugins.enable(name=PLUGIN_MEMBER_OF) + # topology_m1c1.ms["master1"].plugins.enable(name=PLUGIN_REFER_INTEGRITY) + topology_m1c1.ms["master1"].stop(timeout=10) + topology_m1c1.ms["master1"].start(timeout=10) + + topology_m1c1.ms["master1"].log.info("test_ticket47871_init topology_m1c1 %r" % (topology_m1c1)) + # the test case will check if a warning message is logged in the + # error log of the supplier + topology_m1c1.ms["master1"].errorlog_file = open(topology_m1c1.ms["master1"].errlog, "r") + + +def test_ticket47871_1(topology_m1c1): + ''' + ADD entries and check they are all in the retrocl + ''' + # add dummy entries + for cpt in range(MAX_OTHERS): + name = "%s%d" % (OTHER_NAME, cpt) + topology_m1c1.ms["master1"].add_s(Entry(("cn=%s,%s" % (name, SUFFIX), { + 'objectclass': "top person".split(), + 'sn': name, + 'cn': name}))) + + topology_m1c1.ms["master1"].log.info( + "test_ticket47871_init: %d entries ADDed %s[0..%d]" % (MAX_OTHERS, OTHER_NAME, MAX_OTHERS - 1)) + + # Check the number of entries in the retro changelog + time.sleep(1) + ents = topology_m1c1.ms["master1"].search_s(RETROCL_SUFFIX, ldap.SCOPE_ONELEVEL, "(objectclass=*)") + assert len(ents) == MAX_OTHERS + topology_m1c1.ms["master1"].log.info("Added entries are") + for ent in ents: + topology_m1c1.ms["master1"].log.info("%s" % ent.dn) + + +def test_ticket47871_2(topology_m1c1): + ''' + Wait until there is just a last entries + ''' + MAX_TRIES = 10 + TRY_NO = 1 + while TRY_NO <= MAX_TRIES: + time.sleep(6) # at least 1 trimming occurred + ents = topology_m1c1.ms["master1"].search_s(RETROCL_SUFFIX, ldap.SCOPE_ONELEVEL, "(objectclass=*)") + assert len(ents) <= MAX_OTHERS + topology_m1c1.ms["master1"].log.info("\nTry no %d it remains %d entries" % (TRY_NO, len(ents))) + for ent in ents: + topology_m1c1.ms["master1"].log.info("%s" % ent.dn) + if len(ents) > 1: + TRY_NO += 1 + else: + break + assert TRY_NO <= MAX_TRIES + assert len(ents) <= 1 + + +if __name__ == '__main__': + # Run isolated + # -s for DEBUG mode + CURRENT_FILE = os.path.realpath(__file__) + pytest.main("-s %s" % CURRENT_FILE) diff --git a/dirsrvtests/tests/tickets/ticket47900_test.py b/dirsrvtests/tests/tickets/ticket47900_test.py new file mode 100644 index 0000000..ce5ce33 --- /dev/null +++ b/dirsrvtests/tests/tickets/ticket47900_test.py @@ -0,0 +1,212 @@ +# --- BEGIN COPYRIGHT BLOCK --- +# Copyright (C) 2016 Red Hat, Inc. +# All rights reserved. +# +# License: GPL (version 3 or any later version). +# See LICENSE for details. +# --- END COPYRIGHT BLOCK --- +# +import logging + +import ldap +import pytest +from lib389 import Entry +from lib389._constants import * +from lib389.topologies import topology_st +from lib389.utils import * + +pytestmark = pytest.mark.tier2 + +log = logging.getLogger(__name__) + +CONFIG_DN = 'cn=config' +ADMIN_NAME = 'passwd_admin' +ADMIN_DN = 'cn=%s,%s' % (ADMIN_NAME, SUFFIX) +ADMIN_PWD = 'adminPassword_1' +ENTRY_NAME = 'Joe Schmo' +ENTRY_DN = 'cn=%s,%s' % (ENTRY_NAME, SUFFIX) +INVALID_PWDS = ('2_Short', 'No_Number', 'N0Special', '{SSHA}bBy8UdtPZwu8uZna9QOYG3Pr41RpIRVDl8wddw==') + + +def test_ticket47900(topology_st): + """ + Test that password administrators/root DN can + bypass password syntax/policy. + + We need to test how passwords are modified in + existing entries, and when adding new entries. + + Create the Password Admin entry, but do not set + it as an admin yet. Use the entry to verify invalid + passwords are caught. Then activate the password + admin and make sure it can bypass password policy. + """ + + # Prepare the Password Administator + entry = Entry(ADMIN_DN) + entry.setValues('objectclass', 'top', 'person') + entry.setValues('sn', ADMIN_NAME) + entry.setValues('cn', ADMIN_NAME) + entry.setValues('userpassword', ADMIN_PWD) + + topology_st.standalone.log.info("Creating Password Administator entry %s..." % ADMIN_DN) + try: + topology_st.standalone.add_s(entry) + except ldap.LDAPError as e: + topology_st.standalone.log.error('Unexpected result ' + e.args[0]['desc']) + assert False + topology_st.standalone.log.error("Failed to add Password Administator %s, error: %s " + % (ADMIN_DN, e.args[0]['desc'])) + assert False + + topology_st.standalone.log.info("Configuring password policy...") + topology_st.standalone.config.replace_many(('nsslapd-pwpolicy-local', 'on'), + ('passwordCheckSyntax', 'on'), + ('passwordMinCategories', '1'), + ('passwordMinTokenLength', '1'), + ('passwordExp', 'on'), + ('passwordMinDigits', '1'), + ('passwordMinSpecials', '1')) + + # + # Add an aci to allow everyone all access (just makes things easier) + # + topology_st.standalone.log.info("Add aci to allow password admin to add/update entries...") + + ACI_TARGET = "(target = \"ldap:///%s\")" % SUFFIX + ACI_TARGETATTR = "(targetattr = *)" + ACI_ALLOW = "(version 3.0; acl \"Password Admin Access\"; allow (all) " + ACI_SUBJECT = "(userdn = \"ldap:///anyone\");)" + ACI_BODY = ACI_TARGET + ACI_TARGETATTR + ACI_ALLOW + ACI_SUBJECT + mod = [(ldap.MOD_ADD, 'aci', ensure_bytes(ACI_BODY))] + topology_st.standalone.modify_s(SUFFIX, mod) + + # + # Bind as the Password Admin + # + topology_st.standalone.log.info("Bind as the Password Administator (before activating)...") + topology_st.standalone.simple_bind_s(ADMIN_DN, ADMIN_PWD) + + # + # Setup our test entry, and test password policy is working + # + entry = Entry(ENTRY_DN) + entry.setValues('objectclass', 'top', 'person') + entry.setValues('sn', ENTRY_NAME) + entry.setValues('cn', ENTRY_NAME) + + # + # Start by attempting to add an entry with an invalid password + # + topology_st.standalone.log.info("Attempt to add entries with invalid passwords, these adds should fail...") + for passwd in INVALID_PWDS: + failed_as_expected = False + entry.setValues('userpassword', passwd) + topology_st.standalone.log.info("Create a regular user entry %s with password (%s)..." % (ENTRY_DN, passwd)) + try: + topology_st.standalone.add_s(entry) + except ldap.LDAPError as e: + # We failed as expected + failed_as_expected = True + topology_st.standalone.log.info('Add failed as expected: password (%s) result (%s)' + % (passwd, e.args[0]['desc'])) + + if not failed_as_expected: + topology_st.standalone.log.error("We were incorrectly able to add an entry " + + "with an invalid password (%s)" % (passwd)) + assert False + + # + # Now activate a password administator, bind as root dn to do the config + # update, then rebind as the password admin + # + topology_st.standalone.log.info("Activate the Password Administator...") + + # Bind as Root DN + topology_st.standalone.simple_bind_s(DN_DM, PASSWORD) + + # Update config + topology_st.standalone.modify_s(CONFIG_DN, [(ldap.MOD_REPLACE, 'passwordAdminDN', ensure_bytes(ADMIN_DN))]) + + # Bind as Password Admin + topology_st.standalone.simple_bind_s(ADMIN_DN, ADMIN_PWD) + + # + # Start adding entries with invalid passwords, delete the entry after each pass. + # + for passwd in INVALID_PWDS: + entry.setValues('userpassword', passwd) + topology_st.standalone.log.info("Create a regular user entry %s with password (%s)..." % (ENTRY_DN, passwd)) + topology_st.standalone.add_s(entry) + + topology_st.standalone.log.info('Succesfully added entry (%s)' % ENTRY_DN) + + # Delete entry for the next pass + topology_st.standalone.delete_s(ENTRY_DN) + # + # Add the entry for the next round of testing (modify password) + # + entry.setValues('userpassword', ADMIN_PWD) + topology_st.standalone.add_s(entry) + + # + # Deactivate the password admin and make sure invalid password updates fail + # + topology_st.standalone.log.info("Deactivate Password Administator and try invalid password updates...") + + # Bind as root DN + topology_st.standalone.simple_bind_s(DN_DM, PASSWORD) + + # Update conf + topology_st.standalone.modify_s(CONFIG_DN, [(ldap.MOD_DELETE, 'passwordAdminDN', None)]) + + # Bind as Password Admin + topology_st.standalone.simple_bind_s(ADMIN_DN, ADMIN_PWD) + + # + # Make invalid password updates that should fail + # + for passwd in INVALID_PWDS: + failed_as_expected = False + entry.setValues('userpassword', passwd) + try: + topology_st.standalone.modify_s(ENTRY_DN, [(ldap.MOD_REPLACE, 'userpassword', ensure_bytes(passwd))]) + except ldap.LDAPError as e: + # We failed as expected + failed_as_expected = True + topology_st.standalone.log.info('Password update failed as expected: password (%s) result (%s)' + % (passwd, e.args[0]['desc'])) + + if not failed_as_expected: + topology_st.standalone.log.error("We were incorrectly able to add an invalid password (%s)" + % (passwd)) + assert False + + # + # Now activate a password administator + # + topology_st.standalone.log.info("Activate Password Administator and try updates again...") + + # Bind as root D + topology_st.standalone.simple_bind_s(DN_DM, PASSWORD) + + # Update config + topology_st.standalone.modify_s(CONFIG_DN, [(ldap.MOD_REPLACE, 'passwordAdminDN', ensure_bytes(ADMIN_DN))]) + + # Bind as Password Admin + topology_st.standalone.simple_bind_s(ADMIN_DN, ADMIN_PWD) + + # + # Make the same password updates, but this time they should succeed + # + for passwd in INVALID_PWDS: + entry.setValues('userpassword', passwd) + topology_st.standalone.modify_s(ENTRY_DN, [(ldap.MOD_REPLACE, 'userpassword', ensure_bytes(passwd))]) + topology_st.standalone.log.info('Password update succeeded (%s)' % passwd) + + +if __name__ == '__main__': + # Run isolated + # -s for DEBUG mode + CURRENT_FILE = os.path.realpath(__file__) + pytest.main("-s %s" % CURRENT_FILE) diff --git a/dirsrvtests/tests/tickets/ticket47910_test.py b/dirsrvtests/tests/tickets/ticket47910_test.py new file mode 100644 index 0000000..799d44d --- /dev/null +++ b/dirsrvtests/tests/tickets/ticket47910_test.py @@ -0,0 +1,166 @@ +# Copyright (C) 2016 Red Hat, Inc. +# All rights reserved. +# +# License: GPL (version 3 or any later version). +# See LICENSE for details. +# --- END COPYRIGHT BLOCK --- +# +import logging +import subprocess +from datetime import datetime, timedelta + +import pytest +from lib389.tasks import * +from lib389.topologies import topology_st + +from lib389._constants import SUFFIX, DEFAULT_SUFFIX + +logging.getLogger(__name__).setLevel(logging.DEBUG) +log = logging.getLogger(__name__) + +from lib389.utils import * + +# Skip on older versions +pytestmark = [pytest.mark.tier2, + pytest.mark.skipif(ds_is_older('1.3.4'), reason="Not implemented")] + + +@pytest.fixture(scope="module") +def log_dir(topology_st): + ''' + Do a search operation + and disable access log buffering + to generate the access log + ''' + + log.info("Diable access log buffering") + topology_st.standalone.setAccessLogBuffering(False) + + log.info("Do a ldapsearch operation") + topology_st.standalone.search_s(SUFFIX, ldap.SCOPE_SUBTREE, "(objectclass=*)") + + log.info("sleep for sometime so that access log file get generated") + time.sleep(1) + + return topology_st.standalone.accesslog + + +def format_time(local_datetime): + formatted_time = (local_datetime.strftime("[%d/%b/%Y:%H:%M:%S]")) + return formatted_time + + +def execute_logconv(inst, start_time_stamp, end_time_stamp, access_log): + ''' + This function will take start time and end time + as input parameter and + assign these values to -S and -E options of logconv + and, it will execute logconv and return result value + ''' + + log.info("Executing logconv.pl with -S current time and -E end time") + cmd = [os.path.join(inst.get_bin_dir(), 'logconv.pl'), '-S', start_time_stamp, '-E', end_time_stamp, access_log] + log.info(" ".join(cmd)) + proc = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE) + stdout, stderr = proc.communicate() + log.info("standard output" + ensure_str(stdout)) + log.info("standard errors" + ensure_str(stderr)) + return proc.returncode + + +def test_ticket47910_logconv_start_end_positive(topology_st, log_dir): + ''' + Execute logconv.pl with -S and -E(endtime) with random time stamp + This is execute successfully + ''' + # + # Execute logconv.pl -S -E with random timestamp + # + log.info('Running test_ticket47910 - Execute logconv.pl -S -E with random values') + + log.info("taking current time with offset of 2 mins and formatting it to feed -S") + start_time_stamp = (datetime.now() - timedelta(minutes=2)) + formatted_start_time_stamp = format_time(start_time_stamp) + + log.info("taking current time with offset of 2 mins and formatting it to feed -E") + end_time_stamp = (datetime.now() + timedelta(minutes=2)) + formatted_end_time_stamp = format_time(end_time_stamp) + + log.info("Executing logconv.pl with -S and -E") + result = execute_logconv(topology_st.standalone, formatted_start_time_stamp, formatted_end_time_stamp, log_dir) + assert result == 0 + + +def test_ticket47910_logconv_start_end_negative(topology_st, log_dir): + ''' + Execute logconv.pl with -S and -E(endtime) with random time stamp + This is a negative test case, where endtime will be lesser than the + starttime + This should give error message + ''' + + # + # Execute logconv.pl -S and -E with random timestamp + # + log.info('Running test_ticket47910 - Execute logconv.pl -S -E with starttime>endtime') + + log.info("taking current time with offset of 2 mins and formatting it to feed -S") + start_time_stamp = (datetime.now() + timedelta(minutes=2)) + formatted_start_time_stamp = format_time(start_time_stamp) + + log.info("taking current time with offset of 2 mins and formatting it to feed -E") + end_time_stamp = (datetime.now() - timedelta(minutes=2)) + formatted_end_time_stamp = format_time(end_time_stamp) + + log.info("Executing logconv.pl with -S and -E") + result = execute_logconv(topology_st.standalone, formatted_start_time_stamp, formatted_end_time_stamp, log_dir) + assert result == 1 + + +def test_ticket47910_logconv_start_end_invalid(topology_st, log_dir): + ''' + Execute logconv.pl with -S and -E(endtime) with invalid time stamp + This is a negative test case, where it should give error message + ''' + # + # Execute logconv.pl -S and -E with invalid timestamp + # + log.info('Running test_ticket47910 - Execute logconv.pl -S -E with invalid timestamp') + log.info("Set start time and end time to invalid values") + start_time_stamp = "invalid" + end_time_stamp = "invalid" + + log.info("Executing logconv.pl with -S and -E") + result = execute_logconv(topology_st.standalone, start_time_stamp, end_time_stamp, log_dir) + assert result == 1 + + +def test_ticket47910_logconv_noaccesslogs(topology_st, log_dir): + ''' + Execute logconv.pl -S(starttime) without specify + access logs location + ''' + + # + # Execute logconv.pl -S with random timestamp and no access log location + # + log.info('Running test_ticket47910 - Execute logconv.pl without access logs') + + log.info("taking current time with offset of 2 mins and formatting it to feed -S") + time_stamp = (datetime.now() - timedelta(minutes=2)) + formatted_time_stamp = format_time(time_stamp) + log.info("Executing logconv.pl with -S current time") + cmd = [os.path.join(topology_st.standalone.get_bin_dir(), 'logconv.pl'), '-S', formatted_time_stamp] + log.info(" ".join(cmd)) + proc = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE) + stdout, stderr = proc.communicate() + log.info("standard output" + ensure_str(stdout)) + log.info("standard errors" + ensure_str(stderr)) + + assert proc.returncode == 1 + + +if __name__ == '__main__': + # Run isolated + # -s for DEBUG mode + pytest.main("-s ticket47910_test.py") diff --git a/dirsrvtests/tests/tickets/ticket47920_test.py b/dirsrvtests/tests/tickets/ticket47920_test.py new file mode 100644 index 0000000..25203d4 --- /dev/null +++ b/dirsrvtests/tests/tickets/ticket47920_test.py @@ -0,0 +1,130 @@ +# --- BEGIN COPYRIGHT BLOCK --- +# Copyright (C) 2016 Red Hat, Inc. +# All rights reserved. +# +# License: GPL (version 3 or any later version). +# See LICENSE for details. +# --- END COPYRIGHT BLOCK --- +# +import logging + +import ldap +import pytest +from ldap.controls.readentry import PostReadControl +from lib389 import Entry +from lib389._constants import * +from lib389.topologies import topology_st + +SCOPE_IN_CN = 'in' +SCOPE_OUT_CN = 'out' +SCOPE_IN_DN = 'cn=%s,%s' % (SCOPE_IN_CN, SUFFIX) +SCOPE_OUT_DN = 'cn=%s,%s' % (SCOPE_OUT_CN, SUFFIX) +from lib389.utils import * + +# Skip on older versions +pytestmark = [pytest.mark.tier2, + pytest.mark.skipif(ds_is_older('1.3.2'), reason="Not implemented")] +PROVISIONING_CN = "provisioning" +PROVISIONING_DN = "cn=%s,%s" % (PROVISIONING_CN, SCOPE_IN_DN) + +ACTIVE_CN = "accounts" +STAGE_CN = "staged users" +DELETE_CN = "deleted users" +ACTIVE_DN = "cn=%s,%s" % (ACTIVE_CN, SCOPE_IN_DN) +STAGE_DN = "cn=%s,%s" % (STAGE_CN, PROVISIONING_DN) +DELETE_DN = "cn=%s,%s" % (DELETE_CN, PROVISIONING_DN) + +STAGE_USER_CN = "stage guy" +STAGE_USER_DN = "cn=%s,%s" % (STAGE_USER_CN, STAGE_DN) + +ACTIVE_USER_CN = "active guy" +ACTIVE_USER_DN = "cn=%s,%s" % (ACTIVE_USER_CN, ACTIVE_DN) + +OUT_USER_CN = "out guy" +OUT_USER_DN = "cn=%s,%s" % (OUT_USER_CN, SCOPE_OUT_DN) + +STAGE_GROUP_CN = "stage group" +STAGE_GROUP_DN = "cn=%s,%s" % (STAGE_GROUP_CN, STAGE_DN) + +ACTIVE_GROUP_CN = "active group" +ACTIVE_GROUP_DN = "cn=%s,%s" % (ACTIVE_GROUP_CN, ACTIVE_DN) + +OUT_GROUP_CN = "out group" +OUT_GROUP_DN = "cn=%s,%s" % (OUT_GROUP_CN, SCOPE_OUT_DN) + +INDIRECT_ACTIVE_GROUP_CN = "indirect active group" +INDIRECT_ACTIVE_GROUP_DN = "cn=%s,%s" % (INDIRECT_ACTIVE_GROUP_CN, ACTIVE_DN) + +INITIAL_DESC = "inital description" +FINAL_DESC = "final description" + +log = logging.getLogger(__name__) + + +def _header(topology_st, label): + topology_st.standalone.log.info("\n\n###############################################") + topology_st.standalone.log.info("#######") + topology_st.standalone.log.info("####### %s" % label) + topology_st.standalone.log.info("#######") + topology_st.standalone.log.info("###############################################") + + +def _add_user(topology_st, type='active'): + if type == 'active': + topology_st.standalone.add_s(Entry((ACTIVE_USER_DN, { + 'objectclass': "top person inetuser".split(), + 'sn': ACTIVE_USER_CN, + 'cn': ACTIVE_USER_CN, + 'description': INITIAL_DESC}))) + elif type == 'stage': + topology_st.standalone.add_s(Entry((STAGE_USER_DN, { + 'objectclass': "top person inetuser".split(), + 'sn': STAGE_USER_CN, + 'cn': STAGE_USER_CN}))) + else: + topology_st.standalone.add_s(Entry((OUT_USER_DN, { + 'objectclass': "top person inetuser".split(), + 'sn': OUT_USER_CN, + 'cn': OUT_USER_CN}))) + + +def test_ticket47920_init(topology_st): + topology_st.standalone.add_s(Entry((SCOPE_IN_DN, { + 'objectclass': "top nscontainer".split(), + 'cn': SCOPE_IN_DN}))) + topology_st.standalone.add_s(Entry((ACTIVE_DN, { + 'objectclass': "top nscontainer".split(), + 'cn': ACTIVE_CN}))) + + # add users + _add_user(topology_st, 'active') + + +def test_ticket47920_mod_readentry_ctrl(topology_st): + _header(topology_st, 'MOD: with a readentry control') + + topology_st.standalone.log.info("Check the initial value of the entry") + ent = topology_st.standalone.getEntry(ACTIVE_USER_DN, ldap.SCOPE_BASE, "(objectclass=*)", ['description']) + assert ent.hasAttr('description') + assert ensure_str(ent.getValue('description')) == INITIAL_DESC + + pr = PostReadControl(criticality=True, attrList=['cn', 'description']) + _, _, _, resp_ctrls = topology_st.standalone.modify_ext_s(ACTIVE_USER_DN, + [(ldap.MOD_REPLACE, 'description', [ensure_bytes(FINAL_DESC)])], + serverctrls=[pr]) + + assert resp_ctrls[0].dn == ACTIVE_USER_DN + assert 'description' in resp_ctrls[0].entry + assert 'cn' in resp_ctrls[0].entry + print(resp_ctrls[0].entry['description']) + + ent = topology_st.standalone.getEntry(ACTIVE_USER_DN, ldap.SCOPE_BASE, "(objectclass=*)", ['description']) + assert ent.hasAttr('description') + assert ensure_str(ent.getValue('description')) == FINAL_DESC + + +if __name__ == '__main__': + # Run isolated + # -s for DEBUG mode + CURRENT_FILE = os.path.realpath(__file__) + pytest.main("-s %s" % CURRENT_FILE) diff --git a/dirsrvtests/tests/tickets/ticket47921_test.py b/dirsrvtests/tests/tickets/ticket47921_test.py new file mode 100644 index 0000000..d52d0c5 --- /dev/null +++ b/dirsrvtests/tests/tickets/ticket47921_test.py @@ -0,0 +1,88 @@ +# --- BEGIN COPYRIGHT BLOCK --- +# Copyright (C) 2016 Red Hat, Inc. +# All rights reserved. +# +# License: GPL (version 3 or any later version). +# See LICENSE for details. +# --- END COPYRIGHT BLOCK --- +# +import pytest +from lib389.tasks import * +from lib389.utils import * +from lib389.topologies import topology_st + +pytestmark = pytest.mark.tier2 + +logging.getLogger(__name__).setLevel(logging.DEBUG) +log = logging.getLogger(__name__) + + +def test_ticket47921(topology_st): + ''' + Test that indirect cos reflects the current value of the indirect entry + ''' + + INDIRECT_COS_DN = 'cn=cos definition,' + DEFAULT_SUFFIX + MANAGER_DN = 'uid=my manager,ou=people,' + DEFAULT_SUFFIX + USER_DN = 'uid=user,ou=people,' + DEFAULT_SUFFIX + + # Add COS definition + topology_st.standalone.add_s(Entry((INDIRECT_COS_DN, + { + 'objectclass': 'top cosSuperDefinition cosIndirectDefinition ldapSubEntry'.split(), + 'cosIndirectSpecifier': 'manager', + 'cosAttribute': 'roomnumber' + }))) + + # Add manager entry + topology_st.standalone.add_s(Entry((MANAGER_DN, + {'objectclass': 'top extensibleObject'.split(), + 'uid': 'my manager', + 'roomnumber': '1' + }))) + + # Add user entry + topology_st.standalone.add_s(Entry((USER_DN, + {'objectclass': 'top person organizationalPerson inetorgperson'.split(), + 'sn': 'last', + 'cn': 'full', + 'givenname': 'mark', + 'uid': 'user', + 'manager': MANAGER_DN + }))) + + # Test COS is working + entry = topology_st.standalone.search_s(DEFAULT_SUFFIX, ldap.SCOPE_SUBTREE, + "uid=user", + ['roomnumber']) + if entry: + if ensure_str(entry[0].getValue('roomnumber')) != '1': + log.fatal('COS is not working.') + assert False + else: + log.fatal('Failed to find user entry') + assert False + + # Modify manager entry + topology_st.standalone.modify_s(MANAGER_DN, [(ldap.MOD_REPLACE, 'roomnumber', b'2')]) + + # Confirm COS is returning the new value + entry = topology_st.standalone.search_s(DEFAULT_SUFFIX, ldap.SCOPE_SUBTREE, + "uid=user", + ['roomnumber']) + if entry: + if ensure_str(entry[0].getValue('roomnumber')) != '2': + log.fatal('COS is not working after manager update.') + assert False + else: + log.fatal('Failed to find user entry') + assert False + + log.info('Test complete') + + +if __name__ == '__main__': + # Run isolated + # -s for DEBUG mode + CURRENT_FILE = os.path.realpath(__file__) + pytest.main("-s %s" % CURRENT_FILE) diff --git a/dirsrvtests/tests/tickets/ticket47927_test.py b/dirsrvtests/tests/tickets/ticket47927_test.py new file mode 100644 index 0000000..887fe1a --- /dev/null +++ b/dirsrvtests/tests/tickets/ticket47927_test.py @@ -0,0 +1,267 @@ +# --- BEGIN COPYRIGHT BLOCK --- +# Copyright (C) 2016 Red Hat, Inc. +# All rights reserved. +# +# License: GPL (version 3 or any later version). +# See LICENSE for details. +# --- END COPYRIGHT BLOCK --- +# +import pytest +from lib389.tasks import * +from lib389.utils import * +from lib389.topologies import topology_st + +from lib389._constants import SUFFIX, DEFAULT_SUFFIX, PLUGIN_ATTR_UNIQUENESS + +# Skip on older versions +pytestmark = [pytest.mark.tier2, + pytest.mark.skipif(ds_is_older('1.3.4'), reason="Not implemented")] + +logging.getLogger(__name__).setLevel(logging.DEBUG) +log = logging.getLogger(__name__) + +EXCLUDED_CONTAINER_CN = "excluded_container" +EXCLUDED_CONTAINER_DN = "cn=%s,%s" % (EXCLUDED_CONTAINER_CN, SUFFIX) + +EXCLUDED_BIS_CONTAINER_CN = "excluded_bis_container" +EXCLUDED_BIS_CONTAINER_DN = "cn=%s,%s" % (EXCLUDED_BIS_CONTAINER_CN, SUFFIX) + +ENFORCED_CONTAINER_CN = "enforced_container" +ENFORCED_CONTAINER_DN = "cn=%s,%s" % (ENFORCED_CONTAINER_CN, SUFFIX) + +USER_1_CN = "test_1" +USER_1_DN = "cn=%s,%s" % (USER_1_CN, ENFORCED_CONTAINER_DN) +USER_2_CN = "test_2" +USER_2_DN = "cn=%s,%s" % (USER_2_CN, ENFORCED_CONTAINER_DN) +USER_3_CN = "test_3" +USER_3_DN = "cn=%s,%s" % (USER_3_CN, EXCLUDED_CONTAINER_DN) +USER_4_CN = "test_4" +USER_4_DN = "cn=%s,%s" % (USER_4_CN, EXCLUDED_BIS_CONTAINER_DN) + + +def test_ticket47927_init(topology_st): + topology_st.standalone.plugins.enable(name=PLUGIN_ATTR_UNIQUENESS) + try: + topology_st.standalone.modify_s('cn=' + PLUGIN_ATTR_UNIQUENESS + ',cn=plugins,cn=config', + [(ldap.MOD_REPLACE, 'uniqueness-attribute-name', b'telephonenumber'), + (ldap.MOD_REPLACE, 'uniqueness-subtrees', ensure_bytes(DEFAULT_SUFFIX)), + ]) + except ldap.LDAPError as e: + log.fatal('test_ticket47927: Failed to configure plugin for "telephonenumber": error ' + e.args[0]['desc']) + assert False + topology_st.standalone.restart(timeout=120) + + topology_st.standalone.add_s(Entry((EXCLUDED_CONTAINER_DN, {'objectclass': "top nscontainer".split(), + 'cn': EXCLUDED_CONTAINER_CN}))) + topology_st.standalone.add_s(Entry((EXCLUDED_BIS_CONTAINER_DN, {'objectclass': "top nscontainer".split(), + 'cn': EXCLUDED_BIS_CONTAINER_CN}))) + topology_st.standalone.add_s(Entry((ENFORCED_CONTAINER_DN, {'objectclass': "top nscontainer".split(), + 'cn': ENFORCED_CONTAINER_CN}))) + + # adding an entry on a stage with a different 'cn' + topology_st.standalone.add_s(Entry((USER_1_DN, { + 'objectclass': "top person".split(), + 'sn': USER_1_CN, + 'cn': USER_1_CN}))) + # adding an entry on a stage with a different 'cn' + topology_st.standalone.add_s(Entry((USER_2_DN, { + 'objectclass': "top person".split(), + 'sn': USER_2_CN, + 'cn': USER_2_CN}))) + topology_st.standalone.add_s(Entry((USER_3_DN, { + 'objectclass': "top person".split(), + 'sn': USER_3_CN, + 'cn': USER_3_CN}))) + topology_st.standalone.add_s(Entry((USER_4_DN, { + 'objectclass': "top person".split(), + 'sn': USER_4_CN, + 'cn': USER_4_CN}))) + + +def test_ticket47927_one(topology_st): + ''' + Check that uniqueness is enforce on all SUFFIX + ''' + UNIQUE_VALUE = b'1234' + try: + topology_st.standalone.modify_s(USER_1_DN, + [(ldap.MOD_REPLACE, 'telephonenumber', UNIQUE_VALUE)]) + except ldap.LDAPError as e: + log.fatal('test_ticket47927_one: Failed to set the telephonenumber for %s: %s' % (USER_1_DN, e.args[0]['desc'])) + assert False + + # we expect to fail because user1 is in the scope of the plugin + try: + topology_st.standalone.modify_s(USER_2_DN, + [(ldap.MOD_REPLACE, 'telephonenumber', UNIQUE_VALUE)]) + log.fatal('test_ticket47927_one: unexpected success to set the telephonenumber for %s' % (USER_2_DN)) + assert False + except ldap.LDAPError as e: + log.fatal('test_ticket47927_one: Failed (expected) to set the telephonenumber for %s: %s' % ( + USER_2_DN, e.args[0]['desc'])) + pass + + # we expect to fail because user1 is in the scope of the plugin + try: + topology_st.standalone.modify_s(USER_3_DN, + [(ldap.MOD_REPLACE, 'telephonenumber', UNIQUE_VALUE)]) + log.fatal('test_ticket47927_one: unexpected success to set the telephonenumber for %s' % (USER_3_DN)) + assert False + except ldap.LDAPError as e: + log.fatal('test_ticket47927_one: Failed (expected) to set the telephonenumber for %s: %s' % ( + USER_3_DN, e.args[0]['desc'])) + pass + + +def test_ticket47927_two(topology_st): + ''' + Exclude the EXCLUDED_CONTAINER_DN from the uniqueness plugin + ''' + try: + topology_st.standalone.modify_s('cn=' + PLUGIN_ATTR_UNIQUENESS + ',cn=plugins,cn=config', + [(ldap.MOD_REPLACE, 'uniqueness-exclude-subtrees', ensure_bytes(EXCLUDED_CONTAINER_DN))]) + except ldap.LDAPError as e: + log.fatal('test_ticket47927_two: Failed to configure plugin for to exclude %s: error %s' % ( + EXCLUDED_CONTAINER_DN, e.args[0]['desc'])) + assert False + topology_st.standalone.restart(timeout=120) + + +def test_ticket47927_three(topology_st): + ''' + Check that uniqueness is enforced on full SUFFIX except EXCLUDED_CONTAINER_DN + First case: it exists an entry (with the same attribute value) in the scope + of the plugin and we set the value in an entry that is in an excluded scope + ''' + UNIQUE_VALUE = b'9876' + try: + topology_st.standalone.modify_s(USER_1_DN, + [(ldap.MOD_REPLACE, 'telephonenumber', UNIQUE_VALUE)]) + except ldap.LDAPError as e: + log.fatal('test_ticket47927_three: Failed to set the telephonenumber ' + e.args[0]['desc']) + assert False + + # we should not be allowed to set this value (because user1 is in the scope) + try: + topology_st.standalone.modify_s(USER_2_DN, + [(ldap.MOD_REPLACE, 'telephonenumber', UNIQUE_VALUE)]) + log.fatal('test_ticket47927_three: unexpected success to set the telephonenumber for %s' % (USER_2_DN)) + assert False + except ldap.LDAPError as e: + log.fatal('test_ticket47927_three: Failed (expected) to set the telephonenumber for %s: %s' % ( + USER_2_DN, e.args[0]['desc'])) + + # USER_3_DN is in EXCLUDED_CONTAINER_DN so update should be successful + try: + topology_st.standalone.modify_s(USER_3_DN, + [(ldap.MOD_REPLACE, 'telephonenumber', UNIQUE_VALUE)]) + log.fatal('test_ticket47927_three: success to set the telephonenumber for %s' % (USER_3_DN)) + except ldap.LDAPError as e: + log.fatal('test_ticket47927_three: Failed (unexpected) to set the telephonenumber for %s: %s' % ( + USER_3_DN, e.args[0]['desc'])) + assert False + + +def test_ticket47927_four(topology_st): + ''' + Check that uniqueness is enforced on full SUFFIX except EXCLUDED_CONTAINER_DN + Second case: it exists an entry (with the same attribute value) in an excluded scope + of the plugin and we set the value in an entry is in the scope + ''' + UNIQUE_VALUE = b'1111' + # USER_3_DN is in EXCLUDED_CONTAINER_DN so update should be successful + try: + topology_st.standalone.modify_s(USER_3_DN, + [(ldap.MOD_REPLACE, 'telephonenumber', UNIQUE_VALUE)]) + log.fatal('test_ticket47927_four: success to set the telephonenumber for %s' % USER_3_DN) + except ldap.LDAPError as e: + log.fatal('test_ticket47927_four: Failed (unexpected) to set the telephonenumber for %s: %s' % ( + USER_3_DN, e.args[0]['desc'])) + assert False + + # we should be allowed to set this value (because user3 is excluded from scope) + try: + topology_st.standalone.modify_s(USER_1_DN, + [(ldap.MOD_REPLACE, 'telephonenumber', UNIQUE_VALUE)]) + except ldap.LDAPError as e: + log.fatal( + 'test_ticket47927_four: Failed to set the telephonenumber for %s: %s' % (USER_1_DN, e.args[0]['desc'])) + assert False + + # we should not be allowed to set this value (because user1 is in the scope) + try: + topology_st.standalone.modify_s(USER_2_DN, + [(ldap.MOD_REPLACE, 'telephonenumber', UNIQUE_VALUE)]) + log.fatal('test_ticket47927_four: unexpected success to set the telephonenumber %s' % USER_2_DN) + assert False + except ldap.LDAPError as e: + log.fatal('test_ticket47927_four: Failed (expected) to set the telephonenumber for %s: %s' % ( + USER_2_DN, e.args[0]['desc'])) + pass + + +def test_ticket47927_five(topology_st): + ''' + Exclude the EXCLUDED_BIS_CONTAINER_DN from the uniqueness plugin + ''' + try: + topology_st.standalone.modify_s('cn=' + PLUGIN_ATTR_UNIQUENESS + ',cn=plugins,cn=config', + [(ldap.MOD_ADD, 'uniqueness-exclude-subtrees', ensure_bytes(EXCLUDED_BIS_CONTAINER_DN))]) + except ldap.LDAPError as e: + log.fatal('test_ticket47927_five: Failed to configure plugin for to exclude %s: error %s' % ( + EXCLUDED_BIS_CONTAINER_DN, e.args[0]['desc'])) + assert False + topology_st.standalone.restart(timeout=120) + topology_st.standalone.getEntry('cn=' + PLUGIN_ATTR_UNIQUENESS + ',cn=plugins,cn=config', ldap.SCOPE_BASE) + + +def test_ticket47927_six(topology_st): + ''' + Check that uniqueness is enforced on full SUFFIX except EXCLUDED_CONTAINER_DN + and EXCLUDED_BIS_CONTAINER_DN + First case: it exists an entry (with the same attribute value) in the scope + of the plugin and we set the value in an entry that is in an excluded scope + ''' + UNIQUE_VALUE = b'222' + try: + topology_st.standalone.modify_s(USER_1_DN, + [(ldap.MOD_REPLACE, 'telephonenumber', UNIQUE_VALUE)]) + except ldap.LDAPError as e: + log.fatal('test_ticket47927_six: Failed to set the telephonenumber ' + e.args[0]['desc']) + assert False + + # we should not be allowed to set this value (because user1 is in the scope) + try: + topology_st.standalone.modify_s(USER_2_DN, + [(ldap.MOD_REPLACE, 'telephonenumber', UNIQUE_VALUE)]) + log.fatal('test_ticket47927_six: unexpected success to set the telephonenumber for %s' % (USER_2_DN)) + assert False + except ldap.LDAPError as e: + log.fatal('test_ticket47927_six: Failed (expected) to set the telephonenumber for %s: %s' % ( + USER_2_DN, e.args[0]['desc'])) + + # USER_3_DN is in EXCLUDED_CONTAINER_DN so update should be successful + try: + topology_st.standalone.modify_s(USER_3_DN, + [(ldap.MOD_REPLACE, 'telephonenumber', UNIQUE_VALUE)]) + log.fatal('test_ticket47927_six: success to set the telephonenumber for %s' % (USER_3_DN)) + except ldap.LDAPError as e: + log.fatal('test_ticket47927_six: Failed (unexpected) to set the telephonenumber for %s: %s' % ( + USER_3_DN, e.args[0]['desc'])) + assert False + # USER_4_DN is in EXCLUDED_CONTAINER_DN so update should be successful + try: + topology_st.standalone.modify_s(USER_4_DN, + [(ldap.MOD_REPLACE, 'telephonenumber', UNIQUE_VALUE)]) + log.fatal('test_ticket47927_six: success to set the telephonenumber for %s' % (USER_4_DN)) + except ldap.LDAPError as e: + log.fatal('test_ticket47927_six: Failed (unexpected) to set the telephonenumber for %s: %s' % ( + USER_4_DN, e.args[0]['desc'])) + assert False + + +if __name__ == '__main__': + # Run isolated + # -s for DEBUG mode + CURRENT_FILE = os.path.realpath(__file__) + pytest.main("-s %s" % CURRENT_FILE) diff --git a/dirsrvtests/tests/tickets/ticket47931_test.py b/dirsrvtests/tests/tickets/ticket47931_test.py new file mode 100644 index 0000000..2387992 --- /dev/null +++ b/dirsrvtests/tests/tickets/ticket47931_test.py @@ -0,0 +1,171 @@ +import threading +import time +import pytest +from lib389.tasks import * +from lib389.utils import * +from lib389.topologies import topology_st + +from lib389._constants import DEFAULT_SUFFIX, PLUGIN_RETRO_CHANGELOG, PLUGIN_MEMBER_OF, BACKEND_NAME + +# Skip on older versions +pytestmark = [pytest.mark.tier2, + pytest.mark.skipif(ds_is_older('1.3.3'), reason="Not implemented")] + +logging.getLogger(__name__).setLevel(logging.DEBUG) +log = logging.getLogger(__name__) + +installation1_prefix = None +SECOND_SUFFIX = "dc=deadlock" +SECOND_BACKEND = "deadlock" +RETROCL_PLUGIN_DN = ('cn=' + PLUGIN_RETRO_CHANGELOG + ',cn=plugins,cn=config') +MEMBEROF_PLUGIN_DN = ('cn=' + PLUGIN_MEMBER_OF + ',cn=plugins,cn=config') +GROUP_DN = ("cn=group," + DEFAULT_SUFFIX) +MEMBER_DN_COMP = "uid=member" +TIME_OUT = 5 + + +class modifySecondBackendThread(threading.Thread): + def __init__(self, inst, timeout): + threading.Thread.__init__(self) + self.daemon = True + self.inst = inst + self.timeout = timeout + + def run(self): + conn = self.inst.clone() + conn.set_option(ldap.OPT_TIMEOUT, self.timeout) + log.info('Modify second suffix...') + for x in range(0, 5000): + try: + conn.modify_s(SECOND_SUFFIX, + [(ldap.MOD_REPLACE, + 'description', + b'new description')]) + except ldap.LDAPError as e: + log.fatal('Failed to modify second suffix - error: %s' % + (e.args[0]['desc'])) + assert False + + conn.close() + log.info('Finished modifying second suffix') + + +def test_ticket47931(topology_st): + """Test Retro Changelog and MemberOf deadlock fix. + Verification steps: + - Enable retro cl and memberOf. + - Create two backends: A & B. + - Configure retrocl scoping for backend A. + - Configure memberOf plugin for uniquemember + - Create group in backend A. + - In parallel, add members to the group on A, and make modifications + to entries in backend B. + - Make sure the server does not hang during the updates to both + backends. + + """ + + # Enable dynamic plugins to make plugin configuration easier + try: + topology_st.standalone.modify_s(DN_CONFIG, + [(ldap.MOD_REPLACE, + 'nsslapd-dynamic-plugins', + b'on')]) + except ldap.LDAPError as e: + log.error('Failed to enable dynamic plugins! ' + e.args[0]['desc']) + assert False + + # Enable the plugins + topology_st.standalone.plugins.enable(name=PLUGIN_MEMBER_OF) + topology_st.standalone.plugins.enable(name=PLUGIN_RETRO_CHANGELOG) + + # Create second backend + topology_st.standalone.backend.create(SECOND_SUFFIX, {BACKEND_NAME: SECOND_BACKEND}) + topology_st.standalone.mappingtree.create(SECOND_SUFFIX, bename=SECOND_BACKEND) + + # Create the root node of the second backend + try: + topology_st.standalone.add_s(Entry((SECOND_SUFFIX, + {'objectclass': 'top domain'.split(), + 'dc': 'deadlock'}))) + except ldap.LDAPError as e: + log.fatal('Failed to create suffix entry: error ' + e.args[0]['desc']) + assert False + + # Configure retrocl scope + try: + topology_st.standalone.modify_s(RETROCL_PLUGIN_DN, + [(ldap.MOD_REPLACE, + 'nsslapd-include-suffix', + ensure_bytes(DEFAULT_SUFFIX))]) + except ldap.LDAPError as e: + log.error('Failed to configure retrocl plugin: ' + e.args[0]['desc']) + assert False + + # Configure memberOf group attribute + try: + topology_st.standalone.modify_s(MEMBEROF_PLUGIN_DN, + [(ldap.MOD_REPLACE, + 'memberofgroupattr', + b'uniquemember')]) + except ldap.LDAPError as e: + log.fatal('Failed to configure memberOf plugin: error ' + e.args[0]['desc']) + assert False + time.sleep(1) + + # Create group + try: + topology_st.standalone.add_s(Entry((GROUP_DN, + {'objectclass': 'top extensibleObject'.split(), + 'cn': 'group'}))) + except ldap.LDAPError as e: + log.fatal('Failed to add grouo: error ' + e.args[0]['desc']) + assert False + + # Create 1500 entries (future members of the group) + for idx in range(1, 1500): + try: + USER_DN = ("uid=member%d,%s" % (idx, DEFAULT_SUFFIX)) + topology_st.standalone.add_s(Entry((USER_DN, + {'objectclass': 'top extensibleObject'.split(), + 'uid': 'member%d' % (idx)}))) + except ldap.LDAPError as e: + log.fatal('Failed to add user (%s): error %s' % (USER_DN, e.args[0]['desc'])) + assert False + + # Modify second backend (separate thread) + mod_backend_thrd = modifySecondBackendThread(topology_st.standalone, TIME_OUT) + mod_backend_thrd.start() + time.sleep(1) + + # Add members to the group - set timeout + log.info('Adding members to the group...') + topology_st.standalone.set_option(ldap.OPT_TIMEOUT, TIME_OUT) + for idx in range(1, 1500): + try: + MEMBER_VAL = ("uid=member%d,%s" % (idx, DEFAULT_SUFFIX)) + topology_st.standalone.modify_s(GROUP_DN, + [(ldap.MOD_ADD, + 'uniquemember', + ensure_bytes(MEMBER_VAL))]) + except ldap.TIMEOUT: + log.fatal('Deadlock! Bug verification failed.') + assert False + except ldap.LDAPError as e: + log.fatal('Failed to update group(not a deadlock) member (%s) - error: %s' % + (MEMBER_VAL, e.args[0]['desc'])) + assert False + log.info('Finished adding members to the group.') + + # Wait for the thread to finish + mod_backend_thrd.join() + + # No timeout, test passed! + log.info('Test complete\n') + + +if __name__ == '__main__': + # Run isolated + # -s for DEBUG mode + CURRENT_FILE = os.path.realpath(__file__) + pytest.main("-s %s" % CURRENT_FILE) diff --git a/dirsrvtests/tests/tickets/ticket47953_test.py b/dirsrvtests/tests/tickets/ticket47953_test.py new file mode 100644 index 0000000..c99a1ca --- /dev/null +++ b/dirsrvtests/tests/tickets/ticket47953_test.py @@ -0,0 +1,73 @@ +# --- BEGIN COPYRIGHT BLOCK --- +# Copyright (C) 2016 Red Hat, Inc. +# All rights reserved. +# +# License: GPL (version 3 or any later version). +# See LICENSE for details. +# --- END COPYRIGHT BLOCK --- +# +import logging +import shutil + +import pytest +from lib389.tasks import * +from lib389.utils import * +from lib389.topologies import topology_st + +from lib389._constants import DATA_DIR, DEFAULT_SUFFIX + +pytestmark = pytest.mark.tier2 + +log = logging.getLogger(__name__) + + +def test_ticket47953(topology_st): + """ + Test that we can delete an aci that has an invalid syntax. + Sart by importing an ldif with a "bad" aci, then simply try + to remove that value without error. + """ + + log.info('Testing Ticket 47953 - Test we can delete aci that has invalid syntax') + + # + # Import an invalid ldif + # + ldif_file = (topology_st.standalone.getDir(__file__, DATA_DIR) + + "ticket47953/ticket47953.ldif") + try: + ldif_dir = topology_st.standalone.get_ldif_dir() + shutil.copy(ldif_file, ldif_dir) + ldif_file = ldif_dir + '/ticket47953.ldif' + except: + log.fatal('Failed to copy ldif to instance ldif dir') + assert False + importTask = Tasks(topology_st.standalone) + args = {TASK_WAIT: True} + try: + importTask.importLDIF(DEFAULT_SUFFIX, None, ldif_file, args) + except ValueError: + assert False + + time.sleep(2) + + # + # Delete the invalid aci + # + acival = '(targetattr ="fffff")(version 3.0;acl "Directory Administrators Group"' + \ + ';allow (all) (groupdn = "ldap:///cn=Directory Administrators, dc=example,dc=com");)' + + log.info('Attempting to remove invalid aci...') + try: + topology_st.standalone.modify_s(DEFAULT_SUFFIX, [(ldap.MOD_DELETE, 'aci', ensure_bytes(acival))]) + log.info('Removed invalid aci.') + except ldap.LDAPError as e: + log.error('Failed to remove invalid aci: ' + e.args[0]['desc']) + assert False + + +if __name__ == '__main__': + # Run isolated + # -s for DEBUG mode + CURRENT_FILE = os.path.realpath(__file__) + pytest.main("-s %s" % CURRENT_FILE) diff --git a/dirsrvtests/tests/tickets/ticket47963_test.py b/dirsrvtests/tests/tickets/ticket47963_test.py new file mode 100644 index 0000000..8736511 --- /dev/null +++ b/dirsrvtests/tests/tickets/ticket47963_test.py @@ -0,0 +1,152 @@ +# --- BEGIN COPYRIGHT BLOCK --- +# Copyright (C) 2016 Red Hat, Inc. +# All rights reserved. +# +# License: GPL (version 3 or any later version). +# See LICENSE for details. +# --- END COPYRIGHT BLOCK --- +# +import logging + +import pytest +from lib389.tasks import * +from lib389.utils import * +from lib389.topologies import topology_st + +from lib389._constants import DEFAULT_SUFFIX, PLUGIN_MEMBER_OF + +pytestmark = pytest.mark.tier2 + +logging.getLogger(__name__).setLevel(logging.DEBUG) +log = logging.getLogger(__name__) + + +def test_ticket47963(topology_st): + ''' + Test that the memberOf plugin works correctly after setting: + + memberofskipnested: on + + ''' + PLUGIN_DN = 'cn=' + PLUGIN_MEMBER_OF + ',cn=plugins,cn=config' + USER_DN = 'uid=test_user,' + DEFAULT_SUFFIX + GROUP_DN1 = 'cn=group1,' + DEFAULT_SUFFIX + GROUP_DN2 = 'cn=group2,' + DEFAULT_SUFFIX + GROUP_DN3 = 'cn=group3,' + DEFAULT_SUFFIX + + # + # Enable the plugin and configure the skiop nest attribute, then restart the server + # + topology_st.standalone.plugins.enable(name=PLUGIN_MEMBER_OF) + try: + topology_st.standalone.modify_s(PLUGIN_DN, [(ldap.MOD_REPLACE, 'memberofskipnested', b'on')]) + except ldap.LDAPError as e: + log.error('test_automember: Failed to modify config entry: error ' + e.args[0]['desc']) + assert False + + topology_st.standalone.restart(timeout=10) + + # + # Add our groups, users, memberships, etc + # + try: + topology_st.standalone.add_s(Entry((USER_DN, { + 'objectclass': 'top extensibleObject'.split(), + 'uid': 'test_user' + }))) + except ldap.LDAPError as e: + log.error('Failed to add teset user: error ' + e.args[0]['desc']) + assert False + + try: + topology_st.standalone.add_s(Entry((GROUP_DN1, { + 'objectclass': 'top groupOfNames groupOfUniqueNames extensibleObject'.split(), + 'cn': 'group1', + 'member': USER_DN + }))) + except ldap.LDAPError as e: + log.error('Failed to add group1: error ' + e.args[0]['desc']) + assert False + + try: + topology_st.standalone.add_s(Entry((GROUP_DN2, { + 'objectclass': 'top groupOfNames groupOfUniqueNames extensibleObject'.split(), + 'cn': 'group2', + 'member': USER_DN + }))) + except ldap.LDAPError as e: + log.error('Failed to add group2: error ' + e.args[0]['desc']) + assert False + + # Add group with no member(yet) + try: + topology_st.standalone.add_s(Entry((GROUP_DN3, { + 'objectclass': 'top groupOfNames groupOfUniqueNames extensibleObject'.split(), + 'cn': 'group' + }))) + except ldap.LDAPError as e: + log.error('Failed to add group3: error ' + e.args[0]['desc']) + assert False + time.sleep(1) + + # + # Test we have the correct memberOf values in the user entry + # + try: + member_filter = ('(&(memberOf=' + GROUP_DN1 + ')(memberOf=' + GROUP_DN2 + '))') + entries = topology_st.standalone.search_s(USER_DN, ldap.SCOPE_BASE, member_filter) + if not entries: + log.fatal('User is missing expected memberOf attrs') + assert False + except ldap.LDAPError as e: + log.fatal('Search for user1 failed: ' + e.args[0]['desc']) + assert False + + # Add the user to the group + try: + topology_st.standalone.modify_s(GROUP_DN3, [(ldap.MOD_ADD, 'member', ensure_bytes(USER_DN))]) + except ldap.LDAPError as e: + log.error('Failed to member to group: error ' + e.args[0]['desc']) + assert False + time.sleep(1) + + # Check that the test user is a "memberOf" all three groups + try: + member_filter = ('(&(memberOf=' + GROUP_DN1 + ')(memberOf=' + GROUP_DN2 + + ')(memberOf=' + GROUP_DN3 + '))') + entries = topology_st.standalone.search_s(USER_DN, ldap.SCOPE_BASE, member_filter) + if not entries: + log.fatal('User is missing expected memberOf attrs') + assert False + except ldap.LDAPError as e: + log.fatal('Search for user1 failed: ' + e.args[0]['desc']) + assert False + + # + # Delete group2, and check memberOf values in the user entry + # + try: + topology_st.standalone.delete_s(GROUP_DN2) + except ldap.LDAPError as e: + log.error('Failed to delete test group2: ' + e.args[0]['desc']) + assert False + time.sleep(1) + + try: + member_filter = ('(&(memberOf=' + GROUP_DN1 + ')(memberOf=' + GROUP_DN3 + '))') + entries = topology_st.standalone.search_s(USER_DN, ldap.SCOPE_BASE, member_filter) + if not entries: + log.fatal('User incorrect memberOf attrs') + assert False + except ldap.LDAPError as e: + log.fatal('Search for user1 failed: ' + e.args[0]['desc']) + assert False + + log.info('Test complete') + + +if __name__ == '__main__': + # Run isolated + # -s for DEBUG mode + CURRENT_FILE = os.path.realpath(__file__) + pytest.main("-s %s" % CURRENT_FILE) diff --git a/dirsrvtests/tests/tickets/ticket47970_test.py b/dirsrvtests/tests/tickets/ticket47970_test.py new file mode 100644 index 0000000..f59405d --- /dev/null +++ b/dirsrvtests/tests/tickets/ticket47970_test.py @@ -0,0 +1,89 @@ +# --- BEGIN COPYRIGHT BLOCK --- +# Copyright (C) 2016 Red Hat, Inc. +# All rights reserved. +# +# License: GPL (version 3 or any later version). +# See LICENSE for details. +# --- END COPYRIGHT BLOCK --- +# +import logging + +import ldap.sasl +import pytest +from lib389.tasks import * +from lib389.topologies import topology_st + +pytestmark = pytest.mark.tier2 + +log = logging.getLogger(__name__) + +USER1_DN = "uid=user1,%s" % DEFAULT_SUFFIX +USER2_DN = "uid=user2,%s" % DEFAULT_SUFFIX + + +def test_ticket47970(topology_st): + """ + Testing that a failed SASL bind does not trigger account lockout - + which would attempt to update the passwordRetryCount on the root dse entry + """ + + log.info('Testing Ticket 47970 - Testing that a failed SASL bind does not trigger account lockout') + + # + # Enable account lockout + # + try: + topology_st.standalone.modify_s("cn=config", [(ldap.MOD_REPLACE, 'passwordLockout', b'on')]) + log.info('account lockout enabled.') + except ldap.LDAPError as e: + log.error('Failed to enable account lockout: ' + e.args[0]['desc']) + assert False + + try: + topology_st.standalone.modify_s("cn=config", [(ldap.MOD_REPLACE, 'passwordMaxFailure', b'5')]) + log.info('passwordMaxFailure set.') + except ldap.LDAPError as e: + log.error('Failed to to set passwordMaxFailure: ' + e.args[0]['desc']) + assert False + + # + # Perform SASL bind that should fail + # + failed_as_expected = False + try: + user_name = "mark" + pw = "secret" + auth_tokens = ldap.sasl.digest_md5(user_name, pw) + topology_st.standalone.sasl_interactive_bind_s("", auth_tokens) + except ldap.INVALID_CREDENTIALS as e: + log.info("SASL Bind failed as expected") + failed_as_expected = True + + if not failed_as_expected: + log.error("SASL bind unexpectedly succeeded!") + assert False + + # + # Check that passwordRetryCount was not set on the root dse entry + # + try: + entry = topology_st.standalone.search_s("", ldap.SCOPE_BASE, + "passwordRetryCount=*", + ['passwordRetryCount']) + except ldap.LDAPError as e: + log.error('Failed to search Root DSE entry: ' + e.args[0]['desc']) + assert False + + if entry: + log.error('Root DSE was incorrectly updated') + assert False + + # We passed + log.info('Root DSE was correctly not updated') + + +if __name__ == '__main__': + # Run isolated + # -s for DEBUG mode + CURRENT_FILE = os.path.realpath(__file__) + pytest.main("-s %s" % CURRENT_FILE) diff --git a/dirsrvtests/tests/tickets/ticket47973_test.py b/dirsrvtests/tests/tickets/ticket47973_test.py new file mode 100644 index 0000000..2712156 --- /dev/null +++ b/dirsrvtests/tests/tickets/ticket47973_test.py @@ -0,0 +1,227 @@ +# --- BEGIN COPYRIGHT BLOCK --- +# Copyright (C) 2016 Red Hat, Inc. +# All rights reserved. +# +# License: GPL (version 3 or any later version). +# See LICENSE for details. +# --- END COPYRIGHT BLOCK --- +# +import logging +import re + +import ldap.sasl +import pytest +from lib389.tasks import * +from lib389.topologies import topology_st + +from lib389._constants import SUFFIX, DEFAULT_SUFFIX + +pytestmark = pytest.mark.tier2 + +log = logging.getLogger(__name__) + +USER_DN = 'uid=user1,%s' % (DEFAULT_SUFFIX) +SCHEMA_RELOAD_COUNT = 10 + + +def task_complete(conn, task_dn): + finished = False + + try: + task_entry = conn.search_s(task_dn, ldap.SCOPE_BASE, 'objectclass=*') + if not task_entry: + log.fatal('wait_for_task: Search failed to find task: ' + task_dn) + assert False + if task_entry[0].hasAttr('nstaskexitcode'): + # task is done + finished = True + except ldap.LDAPError as e: + log.fatal('wait_for_task: Search failed: ' + e.args[0]['desc']) + assert False + + return finished + + +def test_ticket47973(topology_st): + """ + During the schema reload task there is a small window where the new schema is not loaded + into the asi hashtables - this results in searches not returning entries. + """ + + log.info('Testing Ticket 47973 - Test the searches still work as expected during schema reload tasks') + + # + # Add a user + # + try: + topology_st.standalone.add_s(Entry((USER_DN, { + 'objectclass': 'top extensibleObject'.split(), + 'uid': 'user1' + }))) + except ldap.LDAPError as e: + log.error('Failed to add user1: error ' + e.args[0]['desc']) + assert False + + # + # Run a series of schema_reload tasks while searching for our user. Since + # this is a race condition, run it several times. + # + task_count = 0 + while task_count < SCHEMA_RELOAD_COUNT: + # + # Add a schema reload task + # + + TASK_DN = 'cn=task-' + str(task_count) + ',cn=schema reload task, cn=tasks, cn=config' + try: + topology_st.standalone.add_s(Entry((TASK_DN, { + 'objectclass': 'top extensibleObject'.split(), + 'cn': 'task-' + str(task_count) + }))) + except ldap.LDAPError as e: + log.error('Failed to add task entry: error ' + e.args[0]['desc']) + assert False + + # + # While we wait for the task to complete keep searching for our user + # + search_count = 0 + while search_count < 100: + # + # Now check the user is still being returned + # + try: + entries = topology_st.standalone.search_s(DEFAULT_SUFFIX, + ldap.SCOPE_SUBTREE, + '(uid=user1)') + if not entries or not entries[0]: + log.fatal('User was not returned from search!') + assert False + except ldap.LDAPError as e: + log.fatal('Unable to search for entry %s: error %s' % (USER_DN, e.args[0]['desc'])) + assert False + + # + # Check if task is complete + # + if task_complete(topology_st.standalone, TASK_DN): + break + + search_count += 1 + + task_count += 1 + + +def test_ticket47973_case(topology_st): + log.info('Testing Ticket 47973 (case) - Test the cases in the original schema are preserved.') + + log.info('case 1 - Test the cases in the original schema are preserved.') + + tsfile = topology_st.standalone.schemadir + '/98test.ldif' + tsfd = open(tsfile, "w") + Mozattr0 = "MoZiLLaaTTRiBuTe" + testschema = "dn: cn=schema\nattributetypes: ( 8.9.10.11.12.13.14 NAME '" + Mozattr0 + "' SYNTAX 1.3.6.1.4.1.1466.115.121.1.15 X-ORIGIN 'Mozilla Dummy Schema' )\nobjectclasses: ( 1.2.3.4.5.6.7 NAME 'MozillaObject' SUP top MUST ( objectclass $ cn ) MAY ( " + Mozattr0 + " ) X-ORIGIN 'user defined' )" + tsfd.write(testschema) + tsfd.close() + + try: + # run the schema reload task with the default schemadir + topology_st.standalone.tasks.schemaReload(schemadir=topology_st.standalone.schemadir, + args={TASK_WAIT: False}) + except ValueError: + log.error('Schema Reload task failed.') + assert False + + time.sleep(5) + + try: + schemaentry = topology_st.standalone.search_s("cn=schema", ldap.SCOPE_BASE, + 'objectclass=top', + ["objectclasses"]) + oclist = schemaentry[0].data.get("objectclasses") + except ldap.LDAPError as e: + log.error('Failed to get schema entry: error (%s)' % e.args[0]['desc']) + raise e + + found = 0 + for oc in oclist: + log.info('OC: %s' % oc) + moz = re.findall(Mozattr0, oc.decode('utf-8')) + if moz: + found = 1 + log.info('case 1: %s is in the objectclasses list -- PASS' % Mozattr0) + + if found == 0: + log.error('case 1: %s is not in the objectclasses list -- FAILURE' % Mozattr0) + assert False + + log.info('case 2 - Duplicated schema except cases are not loaded.') + + tsfile = topology_st.standalone.schemadir + '/97test.ldif' + tsfd = open(tsfile, "w") + Mozattr1 = "MOZILLAATTRIBUTE" + testschema = "dn: cn=schema\nattributetypes: ( 8.9.10.11.12.13.14 NAME '" + Mozattr1 + "' SYNTAX 1.3.6.1.4.1.1466.115.121.1.15 X-ORIGIN 'Mozilla Dummy Schema' )\nobjectclasses: ( 1.2.3.4.5.6.7 NAME 'MozillaObject' SUP top MUST ( objectclass $ cn ) MAY ( " + Mozattr1 + " ) X-ORIGIN 'user defined' )" + tsfd.write(testschema) + tsfd.close() + + try: + # run the schema reload task with the default schemadir + topology_st.standalone.tasks.schemaReload(schemadir=topology_st.standalone.schemadir, + args={TASK_WAIT: False}) + except ValueError: + log.error('Schema Reload task failed.') + assert False + + time.sleep(5) + + try: + schemaentry = topology_st.standalone.search_s("cn=schema", ldap.SCOPE_BASE, + 'objectclass=top', + ["objectclasses"]) + oclist = schemaentry[0].data.get("objectclasses") + except ldap.LDAPError as e: + log.error('Failed to get schema entry: error (%s)' % e.args[0]['desc']) + raise e + + for oc in oclist: + log.info('OC: %s' % oc) + moz = re.findall(Mozattr1, oc.decode('utf-8')) + if moz: + log.error('case 2: %s is in the objectclasses list -- FAILURE' % Mozattr1) + assert False + + log.info('case 2: %s is not in the objectclasses list -- PASS' % Mozattr1) + + Mozattr2 = "mozillaattribute" + log.info('case 2-1: Use the custom schema with %s' % Mozattr2) + name = "test user" + try: + topology_st.standalone.add_s(Entry(("cn=%s,%s" % (name, SUFFIX), { + 'objectclass': "top person MozillaObject".split(), + 'sn': name, + 'cn': name, + Mozattr2: name}))) + except ldap.LDAPError as e: + log.error('Failed to add a test entry: error (%s)' % e.args[0]['desc']) + raise e + + try: + testentry = topology_st.standalone.search_s(SUFFIX, ldap.SCOPE_SUBTREE, + 'objectclass=mozillaobject', + [Mozattr2]) + except ldap.LDAPError as e: + log.error('Failed to get schema entry: error (%s)' % e.args[0]['desc']) + raise e + + mozattrval = testentry[0].data.get(Mozattr2) + if mozattrval[0] == name: + log.info('case 2-1: %s: %s found-- PASS' % (Mozattr2, name)) + else: + log.info('case 2-1: %s: %s not found-- FAILURE' % (Mozattr2, mozattrval[0])) + + +if __name__ == '__main__': + # Run isolated + # -s for DEBUG mode + CURRENT_FILE = os.path.realpath(__file__) + pytest.main("-s %s" % CURRENT_FILE) diff --git a/dirsrvtests/tests/tickets/ticket47976_test.py b/dirsrvtests/tests/tickets/ticket47976_test.py new file mode 100644 index 0000000..47ea709 --- /dev/null +++ b/dirsrvtests/tests/tickets/ticket47976_test.py @@ -0,0 +1,160 @@ +import pytest +from lib389.tasks import * +from lib389.utils import * +from lib389.topologies import topology_st + +from lib389._constants import SUFFIX, DEFAULT_SUFFIX, PLUGIN_MANAGED_ENTRY, DN_LDBM + +pytestmark = pytest.mark.tier2 + +PEOPLE_OU = 'people' +PEOPLE_DN = "ou=%s,%s" % (PEOPLE_OU, SUFFIX) +GROUPS_OU = 'groups' +GROUPS_DN = "ou=%s,%s" % (GROUPS_OU, SUFFIX) +DEFINITIONS_CN = 'definitions' +DEFINITIONS_DN = "cn=%s,%s" % (DEFINITIONS_CN, SUFFIX) +TEMPLATES_CN = 'templates' +TEMPLATES_DN = "cn=%s,%s" % (TEMPLATES_CN, SUFFIX) +MANAGED_GROUP_TEMPLATES_CN = 'managed group templates' +MANAGED_GROUP_TEMPLATES_DN = 'cn=%s,%s' % (MANAGED_GROUP_TEMPLATES_CN, TEMPLATES_DN) +MANAGED_GROUP_MEP_TMPL_CN = 'UPG' +MANAGED_GROUP_MEP_TMPL_DN = 'cn=%s,%s' % (MANAGED_GROUP_MEP_TMPL_CN, MANAGED_GROUP_TEMPLATES_DN) +MANAGED_GROUP_DEF_CN = 'managed group definition' +MANAGED_GROUP_DEF_DN = 'cn=%s,%s' % (MANAGED_GROUP_DEF_CN, DEFINITIONS_DN) + +MAX_ACCOUNTS = 2 + +logging.getLogger(__name__).setLevel(logging.DEBUG) +log = logging.getLogger(__name__) + + +def test_ticket47976_init(topology_st): + """Create mep definitions and templates""" + + try: + topology_st.standalone.add_s(Entry((PEOPLE_DN, { + 'objectclass': "top extensibleObject".split(), + 'ou': 'people'}))) + except ldap.ALREADY_EXISTS: + pass + try: + topology_st.standalone.add_s(Entry((GROUPS_DN, { + 'objectclass': "top extensibleObject".split(), + 'ou': GROUPS_OU}))) + except ldap.ALREADY_EXISTS: + pass + topology_st.standalone.add_s(Entry((DEFINITIONS_DN, { + 'objectclass': "top nsContainer".split(), + 'cn': DEFINITIONS_CN}))) + topology_st.standalone.add_s(Entry((TEMPLATES_DN, { + 'objectclass': "top nsContainer".split(), + 'cn': TEMPLATES_CN}))) + topology_st.standalone.add_s(Entry((MANAGED_GROUP_DEF_DN, { + 'objectclass': "top extensibleObject".split(), + 'cn': MANAGED_GROUP_DEF_CN, + 'originScope': PEOPLE_DN, + 'originFilter': '(objectclass=posixAccount)', + 'managedBase': GROUPS_DN, + 'managedTemplate': MANAGED_GROUP_MEP_TMPL_DN}))) + + topology_st.standalone.add_s(Entry((MANAGED_GROUP_TEMPLATES_DN, { + 'objectclass': "top nsContainer".split(), + 'cn': MANAGED_GROUP_TEMPLATES_CN}))) + + topology_st.standalone.add_s(Entry((MANAGED_GROUP_MEP_TMPL_DN, { + 'objectclass': "top mepTemplateEntry".split(), + 'cn': MANAGED_GROUP_MEP_TMPL_CN, + 'mepRDNAttr': 'cn', + 'mepStaticAttr': ['objectclass: posixGroup', + 'objectclass: extensibleObject'], + 'mepMappedAttr': ['cn: $cn|uid: $cn', + 'gidNumber: $uidNumber']}))) + + topology_st.standalone.plugins.enable(name=PLUGIN_MANAGED_ENTRY) + topology_st.standalone.restart(timeout=10) + + +def test_ticket47976_1(topology_st): + mod = [(ldap.MOD_REPLACE, 'nsslapd-pluginConfigArea', ensure_bytes(DEFINITIONS_DN))] + topology_st.standalone.modify_s('cn=%s,cn=plugins,cn=config' % PLUGIN_MANAGED_ENTRY, mod) + topology_st.standalone.stop(timeout=10) + topology_st.standalone.start(timeout=10) + for cpt in range(MAX_ACCOUNTS): + name = "user%d" % (cpt) + topology_st.standalone.add_s(Entry(("uid=%s,%s" % (name, PEOPLE_DN), { + 'objectclass': 'top posixAccount extensibleObject'.split(), + 'uid': name, + 'cn': name, + 'uidNumber': '1', + 'gidNumber': '1', + 'homeDirectory': '/home/%s' % name + }))) + + +def test_ticket47976_2(topology_st): + """It reimports the database with a very large page size + so all the entries (user and its private group). + """ + + log.info('Test complete') + mod = [(ldap.MOD_REPLACE, 'nsslapd-db-page-size', ensure_bytes(str(128 * 1024)))] + topology_st.standalone.modify_s(DN_LDBM, mod) + + # Get the the full path and name for our LDIF we will be exporting + log.info('Export LDIF file...') + ldif_dir = topology_st.standalone.get_ldif_dir() + ldif_file = ldif_dir + "/export.ldif" + args = {EXPORT_REPL_INFO: False, + TASK_WAIT: True} + exportTask = Tasks(topology_st.standalone) + try: + exportTask.exportLDIF(DEFAULT_SUFFIX, None, ldif_file, args) + except ValueError: + assert False + # import the new ldif file + log.info('Import LDIF file...') + importTask = Tasks(topology_st.standalone) + args = {TASK_WAIT: True} + try: + importTask.importLDIF(DEFAULT_SUFFIX, None, ldif_file, args) + os.remove(ldif_file) + except ValueError: + os.remove(ldif_file) + assert False + + +def test_ticket47976_3(topology_st): + """A single delete of a user should hit 47976, because mep post op will + delete its related group. + """ + + log.info('Testing if the delete will hang or not') + # log.info("\n\nAttach\n\n debugger") + # time.sleep(60) + topology_st.standalone.set_option(ldap.OPT_TIMEOUT, 5) + try: + for cpt in range(MAX_ACCOUNTS): + name = "user%d" % (cpt) + topology_st.standalone.delete_s("uid=%s,%s" % (name, PEOPLE_DN)) + except ldap.TIMEOUT as e: + log.fatal('Timeout... likely it hangs (47976)') + assert False + + # check the entry has been deleted + for cpt in range(MAX_ACCOUNTS): + try: + name = "user%d" % (cpt) + topology_st.standalone.getEntry("uid=%s,%s" % (name, PEOPLE_DN), ldap.SCOPE_BASE, 'objectclass=*') + assert False + except ldap.NO_SUCH_OBJECT: + log.info('%s was correctly deleted' % name) + pass + + assert cpt == (MAX_ACCOUNTS - 1) + + +if __name__ == '__main__': + # Run isolated + # -s for DEBUG mode + CURRENT_FILE = os.path.realpath(__file__) + pytest.main("-s %s" % CURRENT_FILE) diff --git a/dirsrvtests/tests/tickets/ticket47980_test.py b/dirsrvtests/tests/tickets/ticket47980_test.py new file mode 100644 index 0000000..61cda2f --- /dev/null +++ b/dirsrvtests/tests/tickets/ticket47980_test.py @@ -0,0 +1,595 @@ +# --- BEGIN COPYRIGHT BLOCK --- +# Copyright (C) 2016 Red Hat, Inc. +# All rights reserved. +# +# License: GPL (version 3 or any later version). +# See LICENSE for details. +# --- END COPYRIGHT BLOCK --- +# +import logging + +import ldap.sasl +import pytest +from lib389.tasks import * +from lib389.topologies import topology_st + +from lib389._constants import DN_CONFIG, DEFAULT_SUFFIX + +pytestmark = pytest.mark.tier2 + +log = logging.getLogger(__name__) + +BRANCH1 = 'ou=level1,' + DEFAULT_SUFFIX +BRANCH2 = 'ou=level2,ou=level1,' + DEFAULT_SUFFIX +BRANCH3 = 'ou=level3,ou=level2,ou=level1,' + DEFAULT_SUFFIX +BRANCH4 = 'ou=people,' + DEFAULT_SUFFIX +BRANCH5 = 'ou=lower,ou=people,' + DEFAULT_SUFFIX +BRANCH6 = 'ou=lower,ou=lower,ou=people,' + DEFAULT_SUFFIX +USER1_DN = 'uid=user1,%s' % (BRANCH1) +USER2_DN = 'uid=user2,%s' % (BRANCH2) +USER3_DN = 'uid=user3,%s' % (BRANCH3) +USER4_DN = 'uid=user4,%s' % (BRANCH4) +USER5_DN = 'uid=user5,%s' % (BRANCH5) +USER6_DN = 'uid=user6,%s' % (BRANCH6) + +BRANCH1_CONTAINER = 'cn=nsPwPolicyContainer,ou=level1,dc=example,dc=com' +BRANCH1_PWP = 'cn=cn\3DnsPwPolicyEntry\2Cou\3Dlevel1\2Cdc\3Dexample\2Cdc\3Dcom,' + \ + 'cn=nsPwPolicyContainer,ou=level1,dc=example,dc=com' +BRANCH1_COS_TMPL = 'cn=cn\3DnsPwTemplateEntry\2Cou\3Dlevel1\2Cdc\3Dexample\2Cdc\3Dcom,' + \ + 'cn=nsPwPolicyContainer,ou=level1,dc=example,dc=com' +BRANCH1_COS_DEF = 'cn=nsPwPolicy_CoS,ou=level1,dc=example,dc=com' + +BRANCH2_CONTAINER = 'cn=nsPwPolicyContainer,ou=level2,ou=level1,dc=example,dc=com' +BRANCH2_PWP = 'cn=cn\3DnsPwPolicyEntry\2Cou\3Dlevel2\2Cou\3Dlevel1\2Cdc\3Dexample\2Cdc\3Dcom,' + \ + 'cn=nsPwPolicyContainer,ou=level2,ou=level1,dc=example,dc=com' +BRANCH2_COS_TMPL = 'cn=cn\3DnsPwTemplateEntry\2Cou\3Dlevel2\2Cou\3Dlevel1\2Cdc\3Dexample\2Cdc\3Dcom,' + \ + 'cn=nsPwPolicyContainer,ou=level2,ou=level1,dc=example,dc=com' +BRANCH2_COS_DEF = 'cn=nsPwPolicy_CoS,ou=level2,ou=level1,dc=example,dc=com' + +BRANCH3_CONTAINER = 'cn=nsPwPolicyContainer,ou=level3,ou=level2,ou=level1,dc=example,dc=com' +BRANCH3_PWP = 'cn=cn\3DnsPwPolicyEntry\2Cou\3Dlevel3\2Cou\3Dlevel2\2Cou\3Dlevel1\2Cdc\3Dexample\2Cdc\3Dcom,' + \ + 'cn=nsPwPolicyContainer,ou=level3,ou=level2,ou=level1,dc=example,dc=com' +BRANCH3_COS_TMPL = 'cn=cn\3DnsPwTemplateEntry\2Cou\3Dlevel3\2Cou\3Dlevel2\2Cou\3Dlevel1\2Cdc\3Dexample\2Cdc\3Dcom,' + \ + 'cn=nsPwPolicyContainer,ou=level3,ou=level2,ou=level1,dc=example,dc=com' +BRANCH3_COS_DEF = 'cn=nsPwPolicy_CoS,ou=level3,ou=level2,ou=level1,dc=example,dc=com' + +BRANCH4_CONTAINER = 'cn=nsPwPolicyContainer,ou=people,dc=example,dc=com' +BRANCH4_PWP = 'cn=cn\3DnsPwPolicyEntry\2Cou\3DPeople\2Cdc\3Dexample\2Cdc\3Dcom,' + \ + 'cn=nsPwPolicyContainer,ou=People,dc=example,dc=com' +BRANCH4_COS_TMPL = 'cn=cn\3DnsPwTemplateEntry\2Cou\3DPeople\2Cdc\3Dexample\2Cdc\3Dcom,' + \ + 'cn=nsPwPolicyContainer,ou=People,dc=example,dc=com' +BRANCH4_COS_DEF = 'cn=nsPwPolicy_CoS,ou=people,dc=example,dc=com' + +BRANCH5_CONTAINER = 'cn=nsPwPolicyContainer,ou=lower,ou=people,dc=example,dc=com' +BRANCH5_PWP = 'cn=cn\3DnsPwPolicyEntry\2Cou\3Dlower\2Cou\3DPeople\2Cdc\3Dexample\2Cdc\3Dcom,' + \ + 'cn=nsPwPolicyContainer,ou=lower,ou=People,dc=example,dc=com' +BRANCH5_COS_TMPL = 'cn=cn\3DnsPwTemplateEntry\2Cou\3Dlower\2Cou\3DPeople\2Cdc\3Dexample\2Cdc\3Dcom,' + \ + 'cn=nsPwPolicyContainer,ou=lower,ou=People,dc=example,dc=com' +BRANCH5_COS_DEF = 'cn=nsPwPolicy_CoS,ou=lower,ou=People,dc=example,dc=com' + +BRANCH6_CONTAINER = 'cn=nsPwPolicyContainer,ou=lower,ou=lower,ou=People,dc=example,dc=com' +BRANCH6_PWP = 'cn=cn\3DnsPwPolicyEntry\2Cou\3Dlower\2Cou\3Dlower\2Cou\3DPeople\2Cdc\3Dexample\2Cdc\3Dcom,' + \ + 'cn=nsPwPolicyContainer,ou=lower,ou=lower,ou=People,dc=example,dc=com' +BRANCH6_COS_TMPL = 'cn=cn\3DnsPwTemplateEntry\2Cou\3Dlower\2Cou\3Dlower\2Cou\3DPeople\2Cdc\3Dexample\2Cdc\3Dcom,' + \ + 'cn=nsPwPolicyContainer,ou=lower,ou=lower,ou=People,dc=example,dc=com' +BRANCH6_COS_DEF = 'cn=nsPwPolicy_CoS,ou=lower,ou=lower,ou=People,dc=example,dc=com' + + +def test_ticket47980(topology_st): + """ + Multiple COS pointer definitions that use the same attribute are not correctly ordered. + The cos plugin was incorrectly sorting the attribute indexes based on subtree, which lead + to the wrong cos attribute value being applied to the entry. + """ + + log.info('Testing Ticket 47980 - Testing multiple nested COS pointer definitions are processed correctly') + + # Add our nested branches + try: + topology_st.standalone.add_s(Entry((BRANCH1, { + 'objectclass': 'top extensibleObject'.split(), + 'ou': 'level1' + }))) + except ldap.LDAPError as e: + log.error('Failed to add level1: error ' + e.args[0]['desc']) + assert False + + try: + topology_st.standalone.add_s(Entry((BRANCH2, { + 'objectclass': 'top extensibleObject'.split(), + 'ou': 'level2' + }))) + except ldap.LDAPError as e: + log.error('Failed to add level2: error ' + e.args[0]['desc']) + assert False + + try: + topology_st.standalone.add_s(Entry((BRANCH3, { + 'objectclass': 'top extensibleObject'.split(), + 'uid': 'level3' + }))) + except ldap.LDAPError as e: + log.error('Failed to add level3: error ' + e.args[0]['desc']) + assert False + + # People branch, might already exist + try: + topology_st.standalone.add_s(Entry((BRANCH4, { + 'objectclass': 'top extensibleObject'.split(), + 'ou': 'level4' + }))) + except ldap.ALREADY_EXISTS: + pass + except ldap.LDAPError as e: + log.error('Failed to add level4: error ' + e.args[0]['desc']) + assert False + + try: + topology_st.standalone.add_s(Entry((BRANCH5, { + 'objectclass': 'top extensibleObject'.split(), + 'ou': 'level5' + }))) + except ldap.LDAPError as e: + log.error('Failed to add level5: error ' + e.args[0]['desc']) + assert False + + try: + topology_st.standalone.add_s(Entry((BRANCH6, { + 'objectclass': 'top extensibleObject'.split(), + 'uid': 'level6' + }))) + except ldap.LDAPError as e: + log.error('Failed to add level6: error ' + e.args[0]['desc']) + assert False + + # Add users to each branch + try: + topology_st.standalone.add_s(Entry((USER1_DN, { + 'objectclass': 'top extensibleObject'.split(), + 'uid': 'user1' + }))) + except ldap.LDAPError as e: + log.error('Failed to add user1: error ' + e.args[0]['desc']) + assert False + + try: + topology_st.standalone.add_s(Entry((USER2_DN, { + 'objectclass': 'top extensibleObject'.split(), + 'uid': 'user2' + }))) + except ldap.LDAPError as e: + log.error('Failed to add user2: error ' + e.args[0]['desc']) + assert False + + try: + topology_st.standalone.add_s(Entry((USER3_DN, { + 'objectclass': 'top extensibleObject'.split(), + 'uid': 'user3' + }))) + except ldap.LDAPError as e: + log.error('Failed to add user3: error ' + e.args[0]['desc']) + assert False + + try: + topology_st.standalone.add_s(Entry((USER4_DN, { + 'objectclass': 'top extensibleObject'.split(), + 'uid': 'user4' + }))) + except ldap.LDAPError as e: + log.error('Failed to add user4: error ' + e.args[0]['desc']) + assert False + + try: + topology_st.standalone.add_s(Entry((USER5_DN, { + 'objectclass': 'top extensibleObject'.split(), + 'uid': 'user5' + }))) + except ldap.LDAPError as e: + log.error('Failed to add user5: error ' + e.args[0]['desc']) + assert False + + try: + topology_st.standalone.add_s(Entry((USER6_DN, { + 'objectclass': 'top extensibleObject'.split(), + 'uid': 'user6' + }))) + except ldap.LDAPError as e: + log.error('Failed to add user6: error ' + e.args[0]['desc']) + assert False + + # Enable password policy + try: + topology_st.standalone.modify_s(DN_CONFIG, [(ldap.MOD_REPLACE, 'nsslapd-pwpolicy-local', b'on')]) + except ldap.LDAPError as e: + log.error('Failed to set pwpolicy-local: error ' + e.args[0]['desc']) + assert False + + # + # Add subtree policy to branch 1 + # + # Add the container + try: + topology_st.standalone.add_s(Entry((BRANCH1_CONTAINER, { + 'objectclass': 'top nsContainer'.split(), + 'cn': 'nsPwPolicyContainer' + }))) + except ldap.LDAPError as e: + log.error('Failed to add subtree container for level1: error ' + e.args[0]['desc']) + assert False + + # Add the password policy subentry + try: + topology_st.standalone.add_s(Entry((BRANCH1_PWP, { + 'objectclass': 'top ldapsubentry passwordpolicy'.split(), + 'cn': 'cn=nsPwPolicyEntry,ou=level1,dc=example,dc=com', + 'passwordMustChange': 'off', + 'passwordExp': 'off', + 'passwordHistory': 'off', + 'passwordMinAge': '0', + 'passwordChange': 'off', + 'passwordStorageScheme': 'ssha' + }))) + except ldap.LDAPError as e: + log.error('Failed to add passwordpolicy for level1: error ' + e.args[0]['desc']) + assert False + + # Add the COS template + try: + topology_st.standalone.add_s(Entry((BRANCH1_COS_TMPL, { + 'objectclass': 'top ldapsubentry costemplate extensibleObject'.split(), + 'cn': 'cn=nsPwPolicyEntry,ou=level1,dc=example,dc=com', + 'cosPriority': '1', + 'cn': 'cn=nsPwTemplateEntry,ou=level1,dc=example,dc=com', + 'pwdpolicysubentry': BRANCH1_PWP + }))) + except ldap.LDAPError as e: + log.error('Failed to add COS template for level1: error ' + e.args[0]['desc']) + assert False + + # Add the COS definition + try: + topology_st.standalone.add_s(Entry((BRANCH1_COS_DEF, { + 'objectclass': 'top ldapsubentry cosSuperDefinition cosPointerDefinition'.split(), + 'cn': 'cn=nsPwPolicyEntry,ou=level1,dc=example,dc=com', + 'costemplatedn': BRANCH1_COS_TMPL, + 'cosAttribute': 'pwdpolicysubentry default operational-default' + }))) + except ldap.LDAPError as e: + log.error('Failed to add COS def for level1: error ' + e.args[0]['desc']) + assert False + + # + # Add subtree policy to branch 2 + # + # Add the container + try: + topology_st.standalone.add_s(Entry((BRANCH2_CONTAINER, { + 'objectclass': 'top nsContainer'.split(), + 'cn': 'nsPwPolicyContainer' + }))) + except ldap.LDAPError as e: + log.error('Failed to add subtree container for level2: error ' + e.args[0]['desc']) + assert False + + # Add the password policy subentry + try: + topology_st.standalone.add_s(Entry((BRANCH2_PWP, { + 'objectclass': 'top ldapsubentry passwordpolicy'.split(), + 'cn': 'cn=nsPwPolicyEntry,ou=level2,dc=example,dc=com', + 'passwordMustChange': 'off', + 'passwordExp': 'off', + 'passwordHistory': 'off', + 'passwordMinAge': '0', + 'passwordChange': 'off', + 'passwordStorageScheme': 'ssha' + }))) + except ldap.LDAPError as e: + log.error('Failed to add passwordpolicy for level2: error ' + e.args[0]['desc']) + assert False + + # Add the COS template + try: + topology_st.standalone.add_s(Entry((BRANCH2_COS_TMPL, { + 'objectclass': 'top ldapsubentry costemplate extensibleObject'.split(), + 'cn': 'cn=nsPwPolicyEntry,ou=level2,dc=example,dc=com', + 'cosPriority': '1', + 'cn': 'cn=nsPwTemplateEntry,ou=level2,dc=example,dc=com', + 'pwdpolicysubentry': BRANCH2_PWP + }))) + except ldap.LDAPError as e: + log.error('Failed to add COS template for level2: error ' + e.args[0]['desc']) + assert False + + # Add the COS definition + try: + topology_st.standalone.add_s(Entry((BRANCH2_COS_DEF, { + 'objectclass': 'top ldapsubentry cosSuperDefinition cosPointerDefinition'.split(), + 'cn': 'cn=nsPwPolicyEntry,ou=level2,dc=example,dc=com', + 'costemplatedn': BRANCH2_COS_TMPL, + 'cosAttribute': 'pwdpolicysubentry default operational-default' + }))) + except ldap.LDAPError as e: + log.error('Failed to add COS def for level2: error ' + e.args[0]['desc']) + assert False + + # + # Add subtree policy to branch 3 + # + # Add the container + try: + topology_st.standalone.add_s(Entry((BRANCH3_CONTAINER, { + 'objectclass': 'top nsContainer'.split(), + 'cn': 'nsPwPolicyContainer' + }))) + except ldap.LDAPError as e: + log.error('Failed to add subtree container for level3: error ' + e.args[0]['desc']) + assert False + + # Add the password policy subentry + try: + topology_st.standalone.add_s(Entry((BRANCH3_PWP, { + 'objectclass': 'top ldapsubentry passwordpolicy'.split(), + 'cn': 'cn=nsPwPolicyEntry,ou=level3,dc=example,dc=com', + 'passwordMustChange': 'off', + 'passwordExp': 'off', + 'passwordHistory': 'off', + 'passwordMinAge': '0', + 'passwordChange': 'off', + 'passwordStorageScheme': 'ssha' + }))) + except ldap.LDAPError as e: + log.error('Failed to add passwordpolicy for level3: error ' + e.args[0]['desc']) + assert False + + # Add the COS template + try: + topology_st.standalone.add_s(Entry((BRANCH3_COS_TMPL, { + 'objectclass': 'top ldapsubentry costemplate extensibleObject'.split(), + 'cn': 'cn=nsPwPolicyEntry,ou=level3,dc=example,dc=com', + 'cosPriority': '1', + 'cn': 'cn=nsPwTemplateEntry,ou=level3,dc=example,dc=com', + 'pwdpolicysubentry': BRANCH3_PWP + }))) + except ldap.LDAPError as e: + log.error('Failed to add COS template for level3: error ' + e.args[0]['desc']) + assert False + + # Add the COS definition + try: + topology_st.standalone.add_s(Entry((BRANCH3_COS_DEF, { + 'objectclass': 'top ldapsubentry cosSuperDefinition cosPointerDefinition'.split(), + 'cn': 'cn=nsPwPolicyEntry,ou=level3,dc=example,dc=com', + 'costemplatedn': BRANCH3_COS_TMPL, + 'cosAttribute': 'pwdpolicysubentry default operational-default' + }))) + except ldap.LDAPError as e: + log.error('Failed to add COS def for level3: error ' + e.args[0]['desc']) + assert False + + # + # Add subtree policy to branch 4 + # + # Add the container + try: + topology_st.standalone.add_s(Entry((BRANCH4_CONTAINER, { + 'objectclass': 'top nsContainer'.split(), + 'cn': 'nsPwPolicyContainer' + }))) + except ldap.LDAPError as e: + log.error('Failed to add subtree container for level3: error ' + e.args[0]['desc']) + assert False + + # Add the password policy subentry + try: + topology_st.standalone.add_s(Entry((BRANCH4_PWP, { + 'objectclass': 'top ldapsubentry passwordpolicy'.split(), + 'cn': 'cn=nsPwPolicyEntry,ou=people,dc=example,dc=com', + 'passwordMustChange': 'off', + 'passwordExp': 'off', + 'passwordHistory': 'off', + 'passwordMinAge': '0', + 'passwordChange': 'off', + 'passwordStorageScheme': 'ssha' + }))) + except ldap.LDAPError as e: + log.error('Failed to add passwordpolicy for branch4: error ' + e.args[0]['desc']) + assert False + + # Add the COS template + try: + topology_st.standalone.add_s(Entry((BRANCH4_COS_TMPL, { + 'objectclass': 'top ldapsubentry costemplate extensibleObject'.split(), + 'cn': 'cn=nsPwPolicyEntry,ou=people,dc=example,dc=com', + 'cosPriority': '1', + 'cn': 'cn=nsPwTemplateEntry,ou=people,dc=example,dc=com', + 'pwdpolicysubentry': BRANCH4_PWP + }))) + except ldap.LDAPError as e: + log.error('Failed to add COS template for level3: error ' + e.args[0]['desc']) + assert False + + # Add the COS definition + try: + topology_st.standalone.add_s(Entry((BRANCH4_COS_DEF, { + 'objectclass': 'top ldapsubentry cosSuperDefinition cosPointerDefinition'.split(), + 'cn': 'cn=nsPwPolicyEntry,ou=people,dc=example,dc=com', + 'costemplatedn': BRANCH4_COS_TMPL, + 'cosAttribute': 'pwdpolicysubentry default operational-default' + }))) + except ldap.LDAPError as e: + log.error('Failed to add COS def for branch4: error ' + e.args[0]['desc']) + assert False + + # + # Add subtree policy to branch 5 + # + # Add the container + try: + topology_st.standalone.add_s(Entry((BRANCH5_CONTAINER, { + 'objectclass': 'top nsContainer'.split(), + 'cn': 'nsPwPolicyContainer' + }))) + except ldap.LDAPError as e: + log.error('Failed to add subtree container for branch5: error ' + e.args[0]['desc']) + assert False + + # Add the password policy subentry + try: + topology_st.standalone.add_s(Entry((BRANCH5_PWP, { + 'objectclass': 'top ldapsubentry passwordpolicy'.split(), + 'cn': 'cn=nsPwPolicyEntry,ou=lower,ou=people,dc=example,dc=com', + 'passwordMustChange': 'off', + 'passwordExp': 'off', + 'passwordHistory': 'off', + 'passwordMinAge': '0', + 'passwordChange': 'off', + 'passwordStorageScheme': 'ssha' + }))) + except ldap.LDAPError as e: + log.error('Failed to add passwordpolicy for branch5: error ' + e.args[0]['desc']) + assert False + + # Add the COS template + try: + topology_st.standalone.add_s(Entry((BRANCH5_COS_TMPL, { + 'objectclass': 'top ldapsubentry costemplate extensibleObject'.split(), + 'cn': 'cn=nsPwPolicyEntry,ou=lower,ou=people,dc=example,dc=com', + 'cosPriority': '1', + 'cn': 'cn=nsPwTemplateEntry,ou=lower,ou=people,dc=example,dc=com', + 'pwdpolicysubentry': BRANCH5_PWP + }))) + except ldap.LDAPError as e: + log.error('Failed to add COS template for branch5: error ' + e.args[0]['desc']) + assert False + + # Add the COS definition + try: + topology_st.standalone.add_s(Entry((BRANCH5_COS_DEF, { + 'objectclass': 'top ldapsubentry cosSuperDefinition cosPointerDefinition'.split(), + 'cn': 'cn=nsPwPolicyEntry,ou=lower,ou=people,dc=example,dc=com', + 'costemplatedn': BRANCH5_COS_TMPL, + 'cosAttribute': 'pwdpolicysubentry default operational-default' + }))) + except ldap.LDAPError as e: + log.error('Failed to add COS def for level3: error ' + e.args[0]['desc']) + assert False + + # + # Add subtree policy to branch 6 + # + # Add the container + try: + topology_st.standalone.add_s(Entry((BRANCH6_CONTAINER, { + 'objectclass': 'top nsContainer'.split(), + 'cn': 'nsPwPolicyContainer' + }))) + except ldap.LDAPError as e: + log.error('Failed to add subtree container for branch6: error ' + e.args[0]['desc']) + assert False + + # Add the password policy subentry + try: + topology_st.standalone.add_s(Entry((BRANCH6_PWP, { + 'objectclass': 'top ldapsubentry passwordpolicy'.split(), + 'cn': 'cn=nsPwPolicyEntry,ou=level3,dc=example,dc=com', + 'passwordMustChange': 'off', + 'passwordExp': 'off', + 'passwordHistory': 'off', + 'passwordMinAge': '0', + 'passwordChange': 'off', + 'passwordStorageScheme': 'ssha' + }))) + except ldap.LDAPError as e: + log.error('Failed to add passwordpolicy for branch6: error ' + e.args[0]['desc']) + assert False + + # Add the COS template + try: + topology_st.standalone.add_s(Entry((BRANCH6_COS_TMPL, { + 'objectclass': 'top ldapsubentry costemplate extensibleObject'.split(), + 'cn': 'cn=nsPwPolicyEntry,ou=lower,ou=lower,ou=people,dc=example,dc=com', + 'cosPriority': '1', + 'cn': 'cn=nsPwTemplateEntry,ou=lower,ou=lower,ou=people,dc=example,dc=com', + 'pwdpolicysubentry': BRANCH6_PWP + }))) + except ldap.LDAPError as e: + log.error('Failed to add COS template for branch6: error ' + e.args[0]['desc']) + assert False + + # Add the COS definition + try: + topology_st.standalone.add_s(Entry((BRANCH6_COS_DEF, { + 'objectclass': 'top ldapsubentry cosSuperDefinition cosPointerDefinition'.split(), + 'cn': 'cn=nsPwPolicyEntry,ou=lower,ou=lower,ou=people,dc=example,dc=com', + 'costemplatedn': BRANCH6_COS_TMPL, + 'cosAttribute': 'pwdpolicysubentry default operational-default' + }))) + except ldap.LDAPError as e: + log.error('Failed to add COS def for branch6: error ' + e.args[0]['desc']) + assert False + + time.sleep(2) + + # + # Now check that each user has its expected passwordPolicy subentry + # + try: + entries = topology_st.standalone.search_s(USER1_DN, ldap.SCOPE_BASE, '(objectclass=top)', ['pwdpolicysubentry']) + if not entries[0].hasValue('pwdpolicysubentry', BRANCH1_PWP): + log.fatal('User %s does not have expected pwdpolicysubentry!') + assert False + except ldap.LDAPError as e: + log.fatal('Unable to search for entry %s: error %s' % (USER1_DN, e.args[0]['desc'])) + assert False + + try: + entries = topology_st.standalone.search_s(USER2_DN, ldap.SCOPE_BASE, '(objectclass=top)', ['pwdpolicysubentry']) + if not entries[0].hasValue('pwdpolicysubentry', BRANCH2_PWP): + log.fatal('User %s does not have expected pwdpolicysubentry!' % USER2_DN) + assert False + except ldap.LDAPError as e: + log.fatal('Unable to search for entry %s: error %s' % (USER2_DN, e.args[0]['desc'])) + assert False + + try: + entries = topology_st.standalone.search_s(USER3_DN, ldap.SCOPE_BASE, '(objectclass=top)', ['pwdpolicysubentry']) + if not entries[0].hasValue('pwdpolicysubentry', BRANCH3_PWP): + log.fatal('User %s does not have expected pwdpolicysubentry!' % USER3_DN) + assert False + except ldap.LDAPError as e: + log.fatal('Unable to search for entry %s: error %s' % (USER3_DN, e.args[0]['desc'])) + assert False + + try: + entries = topology_st.standalone.search_s(USER4_DN, ldap.SCOPE_BASE, '(objectclass=top)', ['pwdpolicysubentry']) + if not entries[0].hasValue('pwdpolicysubentry', BRANCH4_PWP): + log.fatal('User %s does not have expected pwdpolicysubentry!' % USER4_DN) + assert False + except ldap.LDAPError as e: + log.fatal('Unable to search for entry %s: error %s' % (USER4_DN, e.args[0]['desc'])) + assert False + + try: + entries = topology_st.standalone.search_s(USER5_DN, ldap.SCOPE_BASE, '(objectclass=top)', ['pwdpolicysubentry']) + if not entries[0].hasValue('pwdpolicysubentry', BRANCH5_PWP): + log.fatal('User %s does not have expected pwdpolicysubentry!' % USER5_DN) + assert False + except ldap.LDAPError as e: + log.fatal('Unable to search for entry %s: error %s' % (USER5_DN, e.args[0]['desc'])) + assert False + + try: + entries = topology_st.standalone.search_s(USER6_DN, ldap.SCOPE_BASE, '(objectclass=top)', ['pwdpolicysubentry']) + if not entries[0].hasValue('pwdpolicysubentry', BRANCH6_PWP): + log.fatal('User %s does not have expected pwdpolicysubentry!' % USER6_DN) + assert False + except ldap.LDAPError as e: + log.fatal('Unable to search for entry %s: error %s' % (USER6_DN, e.args[0]['desc'])) + assert False + + +if __name__ == '__main__': + # Run isolated + # -s for DEBUG mode + CURRENT_FILE = os.path.realpath(__file__) + pytest.main("-s %s" % CURRENT_FILE) diff --git a/dirsrvtests/tests/tickets/ticket47981_test.py b/dirsrvtests/tests/tickets/ticket47981_test.py new file mode 100644 index 0000000..e8ab9d6 --- /dev/null +++ b/dirsrvtests/tests/tickets/ticket47981_test.py @@ -0,0 +1,228 @@ +# --- BEGIN COPYRIGHT BLOCK --- +# Copyright (C) 2016 Red Hat, Inc. +# All rights reserved. +# +# License: GPL (version 3 or any later version). +# See LICENSE for details. +# --- END COPYRIGHT BLOCK --- +# +import logging + +import ldap.sasl +import pytest +from lib389.tasks import * +from lib389.topologies import topology_st + +from lib389._constants import DEFAULT_SUFFIX, BACKEND_NAME, DN_CONFIG + +pytestmark = pytest.mark.tier2 + +log = logging.getLogger(__name__) + +BRANCH = 'ou=people,' + DEFAULT_SUFFIX +USER_DN = 'uid=user1,%s' % (BRANCH) +BRANCH_CONTAINER = 'cn=nsPwPolicyContainer,ou=people,dc=example,dc=com' +BRANCH_COS_DEF = 'cn=nsPwPolicy_CoS,ou=people,dc=example,dc=com' +BRANCH_PWP = 'cn=cn\\3DnsPwPolicyEntry\\2Cou\\3DPeople\\2Cdc\\3Dexample\\2Cdc\\3Dcom,' + \ + 'cn=nsPwPolicyContainer,ou=People,dc=example,dc=com' +BRANCH_COS_TMPL = 'cn=cn\\3DnsPwTemplateEntry\\2Cou\\3DPeople\\2Cdc\\3Dexample\\2Cdc\\3Dcom,' + \ + 'cn=nsPwPolicyContainer,ou=People,dc=example,dc=com' +SECOND_SUFFIX = 'o=netscaperoot' +BE_NAME = 'netscaperoot' + + +def addSubtreePwPolicy(inst): + # + # Add subtree policy to the people branch + # + try: + inst.add_s(Entry((BRANCH_CONTAINER, { + 'objectclass': 'top nsContainer'.split(), + 'cn': 'nsPwPolicyContainer' + }))) + except ldap.LDAPError as e: + log.error('Failed to add subtree container for ou=people: error ' + e.args[0]['desc']) + assert False + + # Add the password policy subentry + try: + inst.add_s(Entry((BRANCH_PWP, { + 'objectclass': 'top ldapsubentry passwordpolicy'.split(), + 'cn': 'cn=nsPwPolicyEntry,ou=people,dc=example,dc=com', + 'passwordMustChange': 'off', + 'passwordExp': 'off', + 'passwordHistory': 'off', + 'passwordMinAge': '0', + 'passwordChange': 'off', + 'passwordStorageScheme': 'ssha' + }))) + except ldap.LDAPError as e: + log.error('Failed to add passwordpolicy: error ' + e.args[0]['desc']) + assert False + + # Add the COS template + try: + inst.add_s(Entry((BRANCH_COS_TMPL, { + 'objectclass': 'top ldapsubentry costemplate extensibleObject'.split(), + 'cn': 'cn=nsPwPolicyEntry,ou=people,dc=example,dc=com', + 'cosPriority': '1', + 'cn': 'cn=nsPwTemplateEntry,ou=people,dc=example,dc=com', + 'pwdpolicysubentry': BRANCH_PWP + }))) + except ldap.LDAPError as e: + log.error('Failed to add COS template: error ' + e.args[0]['desc']) + assert False + + # Add the COS definition + try: + inst.add_s(Entry((BRANCH_COS_DEF, { + 'objectclass': 'top ldapsubentry cosSuperDefinition cosPointerDefinition'.split(), + 'cn': 'cn=nsPwPolicyEntry,ou=people,dc=example,dc=com', + 'costemplatedn': BRANCH_COS_TMPL, + 'cosAttribute': 'pwdpolicysubentry default operational-default' + }))) + except ldap.LDAPError as e: + log.error('Failed to add COS def: error ' + e.args[0]['desc']) + assert False + time.sleep(1) + + +def delSubtreePwPolicy(inst): + try: + inst.delete_s(BRANCH_COS_DEF) + except ldap.LDAPError as e: + log.error('Failed to delete COS def: error ' + e.args[0]['desc']) + assert False + + try: + inst.delete_s(BRANCH_COS_TMPL) + except ldap.LDAPError as e: + log.error('Failed to delete COS template: error ' + e.args[0]['desc']) + assert False + + try: + inst.delete_s(BRANCH_PWP) + except ldap.LDAPError as e: + log.error('Failed to delete COS password policy: error ' + e.args[0]['desc']) + assert False + + try: + inst.delete_s(BRANCH_CONTAINER) + except ldap.LDAPError as e: + log.error('Failed to delete COS container: error ' + e.args[0]['desc']) + assert False + time.sleep(1) + + +def test_ticket47981(topology_st): + """ + If there are multiple suffixes, and the last suffix checked does not contain any COS entries, + while other suffixes do, then the vattr cache is not invalidated as it should be. Then any + cached entries will still contain the old COS attributes/values. + """ + + log.info('Testing Ticket 47981 - Test that COS def changes are correctly reflected in affected users') + + # + # Create a second backend that does not have any COS entries + # + log.info('Adding second suffix that will not contain any COS entries...\n') + + topology_st.standalone.backend.create(SECOND_SUFFIX, {BACKEND_NAME: BE_NAME}) + topology_st.standalone.mappingtree.create(SECOND_SUFFIX, bename=BE_NAME) + try: + topology_st.standalone.add_s(Entry((SECOND_SUFFIX, { + 'objectclass': 'top organization'.split(), + 'o': BE_NAME}))) + except ldap.ALREADY_EXISTS: + pass + except ldap.LDAPError as e: + log.error('Failed to create suffix entry: error ' + e.args[0]['desc']) + assert False + + # + # Add People branch, it might already exist + # + log.info('Add our test entries to the default suffix, and proceed with the test...') + + try: + topology_st.standalone.add_s(Entry((BRANCH, { + 'objectclass': 'top extensibleObject'.split(), + 'ou': 'level4' + }))) + except ldap.ALREADY_EXISTS: + pass + except ldap.LDAPError as e: + log.error('Failed to add ou=people: error ' + e.args[0]['desc']) + assert False + + # + # Add a user to the branch + # + try: + topology_st.standalone.add_s(Entry((USER_DN, { + 'objectclass': 'top extensibleObject'.split(), + 'uid': 'user1' + }))) + except ldap.LDAPError as e: + log.error('Failed to add user1: error ' + e.args[0]['desc']) + assert False + + # + # Enable password policy and add the subtree policy + # + try: + topology_st.standalone.modify_s(DN_CONFIG, [(ldap.MOD_REPLACE, 'nsslapd-pwpolicy-local', b'on')]) + except ldap.LDAPError as e: + log.error('Failed to set pwpolicy-local: error ' + e.args[0]['desc']) + assert False + + addSubtreePwPolicy(topology_st.standalone) + + # + # Now check the user has its expected passwordPolicy subentry + # + try: + entries = topology_st.standalone.search_s(USER_DN, + ldap.SCOPE_BASE, + '(objectclass=top)', + ['pwdpolicysubentry', 'dn']) + if not entries[0].hasAttr('pwdpolicysubentry'): + log.fatal('User does not have expected pwdpolicysubentry!') + assert False + except ldap.LDAPError as e: + log.fatal('Unable to search for entry %s: error %s' % (USER_DN, e.args[0]['desc'])) + assert False + + # + # Delete the password policy and make sure it is removed from the same user + # + delSubtreePwPolicy(topology_st.standalone) + try: + entries = topology_st.standalone.search_s(USER_DN, ldap.SCOPE_BASE, '(objectclass=top)', ['pwdpolicysubentry']) + if entries[0].hasAttr('pwdpolicysubentry'): + log.fatal('User unexpectedly does have the pwdpolicysubentry!') + assert False + except ldap.LDAPError as e: + log.fatal('Unable to search for entry %s: error %s' % (USER_DN, e.args[0]['desc'])) + assert False + + # + # Add the subtree policvy back and see if the user now has it + # + addSubtreePwPolicy(topology_st.standalone) + try: + entries = topology_st.standalone.search_s(USER_DN, ldap.SCOPE_BASE, '(objectclass=top)', ['pwdpolicysubentry']) + if not entries[0].hasAttr('pwdpolicysubentry'): + log.fatal('User does not have expected pwdpolicysubentry!') + assert False + except ldap.LDAPError as e: + log.fatal('Unable to search for entry %s: error %s' % (USER_DN, e.args[0]['desc'])) + assert False + + +if __name__ == '__main__': + # Run isolated + # -s for DEBUG mode + CURRENT_FILE = os.path.realpath(__file__) + pytest.main("-s %s" % CURRENT_FILE) diff --git a/dirsrvtests/tests/tickets/ticket47988_test.py b/dirsrvtests/tests/tickets/ticket47988_test.py new file mode 100644 index 0000000..e3aaee2 --- /dev/null +++ b/dirsrvtests/tests/tickets/ticket47988_test.py @@ -0,0 +1,371 @@ +# --- BEGIN COPYRIGHT BLOCK --- +# Copyright (C) 2016 Red Hat, Inc. +# All rights reserved. +# +# License: GPL (version 3 or any later version). +# See LICENSE for details. +# --- END COPYRIGHT BLOCK --- +# +''' +Created on Nov 7, 2013 + +@author: tbordaz +''' +import logging +import shutil +import stat +import tarfile +import time +from random import randint + +import ldap +import pytest +from lib389 import Entry +from lib389._constants import * +from lib389.topologies import topology_m2 +from lib389.utils import * + +pytestmark = pytest.mark.tier2 + +logging.getLogger(__name__).setLevel(logging.DEBUG) +log = logging.getLogger(__name__) + +TEST_REPL_DN = "cn=test_repl, %s" % SUFFIX +OC_NAME = 'OCticket47988' +MUST = "(postalAddress $ postalCode)" +MAY = "(member $ street)" + +OTHER_NAME = 'other_entry' +MAX_OTHERS = 10 + +BIND_NAME = 'bind_entry' +BIND_DN = 'cn=%s, %s' % (BIND_NAME, SUFFIX) +BIND_PW = 'password' + +ENTRY_NAME = 'test_entry' +ENTRY_DN = 'cn=%s, %s' % (ENTRY_NAME, SUFFIX) +ENTRY_OC = "top person %s" % OC_NAME + + +def _oc_definition(oid_ext, name, must=None, may=None): + oid = "1.2.3.4.5.6.7.8.9.10.%d" % oid_ext + desc = 'To test ticket 47490' + sup = 'person' + if not must: + must = MUST + if not may: + may = MAY + + new_oc = "( %s NAME '%s' DESC '%s' SUP %s AUXILIARY MUST %s MAY %s )" % (oid, name, desc, sup, must, may) + return new_oc + + +def _header(topology_m2, label): + topology_m2.ms["master1"].log.info("\n\n###############################################") + topology_m2.ms["master1"].log.info("#######") + topology_m2.ms["master1"].log.info("####### %s" % label) + topology_m2.ms["master1"].log.info("#######") + topology_m2.ms["master1"].log.info("###################################################") + + +def _install_schema(server, tarFile): + server.stop(timeout=10) + + tmpSchema = '/tmp/schema_47988' + if not os.path.isdir(tmpSchema): + os.mkdir(tmpSchema) + + for the_file in os.listdir(tmpSchema): + file_path = os.path.join(tmpSchema, the_file) + if os.path.isfile(file_path): + os.unlink(file_path) + + os.chdir(tmpSchema) + tar = tarfile.open(tarFile, 'r:gz') + for member in tar.getmembers(): + tar.extract(member.name) + + tar.close() + + st = os.stat(server.schemadir) + os.chmod(server.schemadir, st.st_mode | stat.S_IWUSR | stat.S_IXUSR | stat.S_IRUSR) + for the_file in os.listdir(tmpSchema): + schemaFile = os.path.join(server.schemadir, the_file) + if os.path.isfile(schemaFile): + if the_file.startswith('99user.ldif'): + # only replace 99user.ldif, the other standard definition are kept + os.chmod(schemaFile, stat.S_IWUSR | stat.S_IRUSR) + server.log.info("replace %s" % schemaFile) + shutil.copy(the_file, schemaFile) + + else: + server.log.info("add %s" % schemaFile) + shutil.copy(the_file, schemaFile) + os.chmod(schemaFile, stat.S_IRUSR | stat.S_IRGRP) + os.chmod(server.schemadir, st.st_mode | stat.S_IRUSR | stat.S_IRGRP) + + +def test_ticket47988_init(topology_m2): + """ + It adds + - Objectclass with MAY 'member' + - an entry ('bind_entry') with which we bind to test the 'SELFDN' operation + It deletes the anonymous aci + + """ + + _header(topology_m2, 'test_ticket47988_init') + + # enable acl error logging + mod = [(ldap.MOD_REPLACE, 'nsslapd-errorlog-level', ensure_bytes(str(8192)))] # REPL + topology_m2.ms["master1"].modify_s(DN_CONFIG, mod) + topology_m2.ms["master2"].modify_s(DN_CONFIG, mod) + + mod = [(ldap.MOD_REPLACE, 'nsslapd-accesslog-level', ensure_bytes(str(260)))] # Internal op + topology_m2.ms["master1"].modify_s(DN_CONFIG, mod) + topology_m2.ms["master2"].modify_s(DN_CONFIG, mod) + + # add dummy entries + for cpt in range(MAX_OTHERS): + name = "%s%d" % (OTHER_NAME, cpt) + topology_m2.ms["master1"].add_s(Entry(("cn=%s,%s" % (name, SUFFIX), { + 'objectclass': "top person".split(), + 'sn': name, + 'cn': name}))) + + # check that entry 0 is replicated before + loop = 0 + entryDN = "cn=%s0,%s" % (OTHER_NAME, SUFFIX) + while loop <= 10: + try: + ent = topology_m2.ms["master2"].getEntry(entryDN, ldap.SCOPE_BASE, "(objectclass=*)", ['telephonenumber']) + break + except ldap.NO_SUCH_OBJECT: + time.sleep(1) + loop += 1 + assert (loop <= 10) + + topology_m2.ms["master1"].stop(timeout=10) + topology_m2.ms["master2"].stop(timeout=10) + + # install the specific schema M1: ipa3.3, M2: ipa4.1 + schema_file = os.path.join(topology_m2.ms["master1"].getDir(__file__, DATA_DIR), "ticket47988/schema_ipa3.3.tar.gz") + _install_schema(topology_m2.ms["master1"], schema_file) + schema_file = os.path.join(topology_m2.ms["master1"].getDir(__file__, DATA_DIR), "ticket47988/schema_ipa4.1.tar.gz") + _install_schema(topology_m2.ms["master2"], schema_file) + + topology_m2.ms["master1"].start(timeout=10) + topology_m2.ms["master2"].start(timeout=10) + + +def _do_update_schema(server, range=3999): + ''' + Update the schema of the M2 (IPA4.1). to generate a nsSchemaCSN + ''' + postfix = str(randint(range, range + 1000)) + OID = '2.16.840.1.113730.3.8.12.%s' % postfix + NAME = 'thierry%s' % postfix + value = '( %s NAME \'%s\' DESC \'Override for Group Attributes\' STRUCTURAL MUST ( cn ) MAY sn X-ORIGIN ( \'IPA v4.1.2\' \'user defined\' ) )' % ( + OID, NAME) + mod = [(ldap.MOD_ADD, 'objectclasses', ensure_bytes(value))] + server.modify_s('cn=schema', mod) + + +def _do_update_entry(supplier=None, consumer=None, attempts=10): + ''' + This is doing an update on M2 (IPA4.1) and checks the update has been + propagated to M1 (IPA3.3) + ''' + assert (supplier) + assert (consumer) + entryDN = "cn=%s0,%s" % (OTHER_NAME, SUFFIX) + value = str(randint(100, 200)) + mod = [(ldap.MOD_REPLACE, 'telephonenumber', ensure_bytes(value))] + supplier.modify_s(entryDN, mod) + + loop = 0 + while loop <= attempts: + ent = consumer.getEntry(entryDN, ldap.SCOPE_BASE, "(objectclass=*)", ['telephonenumber']) + read_val = ensure_str(ent.telephonenumber) or "0" + if read_val == value: + break + # the expected value is not yet replicated. try again + time.sleep(5) + loop += 1 + supplier.log.debug("test_do_update: receive %s (expected %s)" % (read_val, value)) + assert (loop <= attempts) + + +def _pause_M2_to_M1(topology_m2): + topology_m2.ms["master1"].log.info("\n\n######################### Pause RA M2->M1 ######################\n") + ents = topology_m2.ms["master2"].agreement.list(suffix=SUFFIX) + assert len(ents) == 1 + topology_m2.ms["master2"].agreement.pause(ents[0].dn) + + +def _resume_M1_to_M2(topology_m2): + topology_m2.ms["master1"].log.info("\n\n######################### resume RA M1->M2 ######################\n") + ents = topology_m2.ms["master1"].agreement.list(suffix=SUFFIX) + assert len(ents) == 1 + topology_m2.ms["master1"].agreement.resume(ents[0].dn) + + +def _pause_M1_to_M2(topology_m2): + topology_m2.ms["master1"].log.info("\n\n######################### Pause RA M1->M2 ######################\n") + ents = topology_m2.ms["master1"].agreement.list(suffix=SUFFIX) + assert len(ents) == 1 + topology_m2.ms["master1"].agreement.pause(ents[0].dn) + + +def _resume_M2_to_M1(topology_m2): + topology_m2.ms["master1"].log.info("\n\n######################### resume RA M2->M1 ######################\n") + ents = topology_m2.ms["master2"].agreement.list(suffix=SUFFIX) + assert len(ents) == 1 + topology_m2.ms["master2"].agreement.resume(ents[0].dn) + + +def test_ticket47988_1(topology_m2): + ''' + Check that replication is working and pause replication M2->M1 + ''' + _header(topology_m2, 'test_ticket47988_1') + + topology_m2.ms["master1"].log.debug("\n\nCheck that replication is working and pause replication M2->M1\n") + _do_update_entry(supplier=topology_m2.ms["master2"], consumer=topology_m2.ms["master1"], attempts=5) + _pause_M2_to_M1(topology_m2) + + +def test_ticket47988_2(topology_m2): + ''' + Update M1 schema and trigger update M1->M2 + So M1 should learn new/extended definitions that are in M2 schema + ''' + _header(topology_m2, 'test_ticket47988_2') + + topology_m2.ms["master1"].log.debug("\n\nUpdate M1 schema and an entry on M1\n") + master1_schema_csn = topology_m2.ms["master1"].schema.get_schema_csn() + master2_schema_csn = topology_m2.ms["master2"].schema.get_schema_csn() + topology_m2.ms["master1"].log.debug("\nBefore updating the schema on M1\n") + topology_m2.ms["master1"].log.debug("Master1 nsschemaCSN: %s" % master1_schema_csn) + topology_m2.ms["master1"].log.debug("Master2 nsschemaCSN: %s" % master2_schema_csn) + + # Here M1 should no, should check M2 schema and learn + _do_update_schema(topology_m2.ms["master1"]) + master1_schema_csn = topology_m2.ms["master1"].schema.get_schema_csn() + master2_schema_csn = topology_m2.ms["master2"].schema.get_schema_csn() + topology_m2.ms["master1"].log.debug("\nAfter updating the schema on M1\n") + topology_m2.ms["master1"].log.debug("Master1 nsschemaCSN: %s" % master1_schema_csn) + topology_m2.ms["master1"].log.debug("Master2 nsschemaCSN: %s" % master2_schema_csn) + assert (master1_schema_csn) + + # to avoid linger effect where a replication session is reused without checking the schema + _pause_M1_to_M2(topology_m2) + _resume_M1_to_M2(topology_m2) + + # topo.master1.log.debug("\n\nSleep.... attach the debugger dse_modify") + # time.sleep(60) + _do_update_entry(supplier=topology_m2.ms["master1"], consumer=topology_m2.ms["master2"], attempts=15) + master1_schema_csn = topology_m2.ms["master1"].schema.get_schema_csn() + master2_schema_csn = topology_m2.ms["master2"].schema.get_schema_csn() + topology_m2.ms["master1"].log.debug("\nAfter a full replication session\n") + topology_m2.ms["master1"].log.debug("Master1 nsschemaCSN: %s" % master1_schema_csn) + topology_m2.ms["master1"].log.debug("Master2 nsschemaCSN: %s" % master2_schema_csn) + assert (master1_schema_csn) + assert (master2_schema_csn) + + +def test_ticket47988_3(topology_m2): + ''' + Resume replication M2->M1 and check replication is still working + ''' + _header(topology_m2, 'test_ticket47988_3') + + _resume_M2_to_M1(topology_m2) + _do_update_entry(supplier=topology_m2.ms["master1"], consumer=topology_m2.ms["master2"], attempts=5) + _do_update_entry(supplier=topology_m2.ms["master2"], consumer=topology_m2.ms["master1"], attempts=5) + + +def test_ticket47988_4(topology_m2): + ''' + Check schemaCSN is identical on both server + And save the nsschemaCSN to later check they do not change unexpectedly + ''' + _header(topology_m2, 'test_ticket47988_4') + + master1_schema_csn = topology_m2.ms["master1"].schema.get_schema_csn() + master2_schema_csn = topology_m2.ms["master2"].schema.get_schema_csn() + topology_m2.ms["master1"].log.debug("\n\nMaster1 nsschemaCSN: %s" % master1_schema_csn) + topology_m2.ms["master1"].log.debug("\n\nMaster2 nsschemaCSN: %s" % master2_schema_csn) + assert (master1_schema_csn) + assert (master2_schema_csn) + assert (master1_schema_csn == master2_schema_csn) + + topology_m2.ms["master1"].saved_schema_csn = master1_schema_csn + topology_m2.ms["master2"].saved_schema_csn = master2_schema_csn + + +def test_ticket47988_5(topology_m2): + ''' + Check schemaCSN do not change unexpectedly + ''' + _header(topology_m2, 'test_ticket47988_5') + + _do_update_entry(supplier=topology_m2.ms["master1"], consumer=topology_m2.ms["master2"], attempts=5) + _do_update_entry(supplier=topology_m2.ms["master2"], consumer=topology_m2.ms["master1"], attempts=5) + master1_schema_csn = topology_m2.ms["master1"].schema.get_schema_csn() + master2_schema_csn = topology_m2.ms["master2"].schema.get_schema_csn() + topology_m2.ms["master1"].log.debug("\n\nMaster1 nsschemaCSN: %s" % master1_schema_csn) + topology_m2.ms["master1"].log.debug("\n\nMaster2 nsschemaCSN: %s" % master2_schema_csn) + assert (master1_schema_csn) + assert (master2_schema_csn) + assert (master1_schema_csn == master2_schema_csn) + + assert (topology_m2.ms["master1"].saved_schema_csn == master1_schema_csn) + assert (topology_m2.ms["master2"].saved_schema_csn == master2_schema_csn) + + +def test_ticket47988_6(topology_m2): + ''' + Update M1 schema and trigger update M2->M1 + So M2 should learn new/extended definitions that are in M1 schema + ''' + + _header(topology_m2, 'test_ticket47988_6') + + topology_m2.ms["master1"].log.debug("\n\nUpdate M1 schema and an entry on M1\n") + master1_schema_csn = topology_m2.ms["master1"].schema.get_schema_csn() + master2_schema_csn = topology_m2.ms["master2"].schema.get_schema_csn() + topology_m2.ms["master1"].log.debug("\nBefore updating the schema on M1\n") + topology_m2.ms["master1"].log.debug("Master1 nsschemaCSN: %s" % master1_schema_csn) + topology_m2.ms["master1"].log.debug("Master2 nsschemaCSN: %s" % master2_schema_csn) + + # Here M1 should no, should check M2 schema and learn + _do_update_schema(topology_m2.ms["master1"], range=5999) + master1_schema_csn = topology_m2.ms["master1"].schema.get_schema_csn() + master2_schema_csn = topology_m2.ms["master2"].schema.get_schema_csn() + topology_m2.ms["master1"].log.debug("\nAfter updating the schema on M1\n") + topology_m2.ms["master1"].log.debug("Master1 nsschemaCSN: %s" % master1_schema_csn) + topology_m2.ms["master1"].log.debug("Master2 nsschemaCSN: %s" % master2_schema_csn) + assert (master1_schema_csn) + + # to avoid linger effect where a replication session is reused without checking the schema + _pause_M1_to_M2(topology_m2) + _resume_M1_to_M2(topology_m2) + + # topo.master1.log.debug("\n\nSleep.... attach the debugger dse_modify") + # time.sleep(60) + _do_update_entry(supplier=topology_m2.ms["master2"], consumer=topology_m2.ms["master1"], attempts=15) + master1_schema_csn = topology_m2.ms["master1"].schema.get_schema_csn() + master2_schema_csn = topology_m2.ms["master2"].schema.get_schema_csn() + topology_m2.ms["master1"].log.debug("\nAfter a full replication session\n") + topology_m2.ms["master1"].log.debug("Master1 nsschemaCSN: %s" % master1_schema_csn) + topology_m2.ms["master1"].log.debug("Master2 nsschemaCSN: %s" % master2_schema_csn) + assert (master1_schema_csn) + assert (master2_schema_csn) + + +if __name__ == '__main__': + # Run isolated + # -s for DEBUG mode + CURRENT_FILE = os.path.realpath(__file__) + pytest.main("-s %s" % CURRENT_FILE) diff --git a/dirsrvtests/tests/tickets/ticket48005_test.py b/dirsrvtests/tests/tickets/ticket48005_test.py new file mode 100644 index 0000000..6ad7aaf --- /dev/null +++ b/dirsrvtests/tests/tickets/ticket48005_test.py @@ -0,0 +1,365 @@ +# --- BEGIN COPYRIGHT BLOCK --- +# Copyright (C) 2016 Red Hat, Inc. +# All rights reserved. +# +# License: GPL (version 3 or any later version). +# See LICENSE for details. +# --- END COPYRIGHT BLOCK --- +# +import logging +import re + +import pytest +from lib389.tasks import * +from lib389.topologies import topology_st + +from lib389._constants import (DEFAULT_SUFFIX, SUFFIX, PLUGIN_REFER_INTEGRITY, PLUGIN_AUTOMEMBER, + PLUGIN_MEMBER_OF, PLUGIN_USN) + +pytestmark = pytest.mark.tier2 + +logging.getLogger(__name__).setLevel(logging.DEBUG) +log = logging.getLogger(__name__) + + +def test_ticket48005_setup(topology_st): + ''' + allow dump core + generate a test ldif file using dbgen.pl + import the ldif + ''' + log.info("Ticket 48005 setup...") + if hasattr(topology_st.standalone, 'prefix'): + prefix = topology_st.standalone.prefix + else: + prefix = None + sysconfig_dirsrv = os.path.join(topology_st.standalone.get_initconfig_dir(), 'dirsrv') + cmdline = 'egrep "ulimit -c unlimited" %s' % sysconfig_dirsrv + p = os.popen(cmdline, "r") + ulimitc = p.readline() + if ulimitc == "": + log.info('No ulimit -c in %s' % sysconfig_dirsrv) + log.info('Adding it') + cmdline = 'echo "ulimit -c unlimited" >> %s' % sysconfig_dirsrv + + sysconfig_dirsrv_systemd = sysconfig_dirsrv + ".systemd" + cmdline = 'egrep LimitCORE=infinity %s' % sysconfig_dirsrv_systemd + p = os.popen(cmdline, "r") + lcore = p.readline() + if lcore == "": + log.info('No LimitCORE in %s' % sysconfig_dirsrv_systemd) + log.info('Adding it') + cmdline = 'echo LimitCORE=infinity >> %s' % sysconfig_dirsrv_systemd + + topology_st.standalone.restart(timeout=10) + + ldif_file = topology_st.standalone.get_ldif_dir() + "/ticket48005.ldif" + os.system('ls %s' % ldif_file) + os.system('rm -f %s' % ldif_file) + if hasattr(topology_st.standalone, 'prefix'): + prefix = topology_st.standalone.prefix + else: + prefix = "" + dbgen_prog = prefix + '/bin/dbgen.pl' + log.info('dbgen_prog: %s' % dbgen_prog) + os.system('%s -s %s -o %s -u -n 10000' % (dbgen_prog, SUFFIX, ldif_file)) + cmdline = 'egrep dn: %s | wc -l' % ldif_file + p = os.popen(cmdline, "r") + dnnumstr = p.readline() + num = int(dnnumstr) + log.info("We have %d entries.\n", num) + + importTask = Tasks(topology_st.standalone) + args = {TASK_WAIT: True} + importTask.importLDIF(SUFFIX, None, ldif_file, args) + log.info('Importing %s complete.' % ldif_file) + + +def test_ticket48005_memberof(topology_st): + ''' + Enable memberof and referint plugin + Run fixmemberof task without waiting + Shutdown the server + Check if a core file was generated or not + If no core was found, this test case was successful. + ''' + log.info("Ticket 48005 memberof test...") + topology_st.standalone.plugins.enable(name=PLUGIN_MEMBER_OF) + topology_st.standalone.plugins.enable(name=PLUGIN_REFER_INTEGRITY) + + topology_st.standalone.restart(timeout=10) + + try: + # run the fixup task + topology_st.standalone.tasks.fixupMemberOf(suffix=SUFFIX, args={TASK_WAIT: False}) + except ValueError: + log.error('Some problem occured with a value that was provided') + assert False + + topology_st.standalone.stop(timeout=10) + + mytmp = '/tmp' + logdir = re.sub('errors', '', topology_st.standalone.errlog) + cmdline = 'ls ' + logdir + 'core*' + p = os.popen(cmdline, "r") + lcore = p.readline() + if lcore != "": + s.system('mv %score* %s/core.ticket48005_memberof' % (logdir, mytmp)) + log.error('FixMemberof: Moved core file(s) to %s; Test failed' % mytmp) + assert False + log.info('No core files are found') + + topology_st.standalone.start(timeout=10) + + topology_st.standalone.plugins.disable(name=PLUGIN_REFER_INTEGRITY) + topology_st.standalone.plugins.disable(name=PLUGIN_MEMBER_OF) + + topology_st.standalone.restart(timeout=10) + + log.info("Ticket 48005 memberof test complete") + + +def test_ticket48005_automember(topology_st): + ''' + Enable automember and referint plugin + 1. Run automember rebuild membership task without waiting + Shutdown the server + Check if a core file was generated or not + If no core was found, this test case was successful. + 2. Run automember export updates task without waiting + Shutdown the server + Check if a core file was generated or not + If no core was found, this test case was successful. + 3. Run automember map updates task without waiting + Shutdown the server + Check if a core file was generated or not + If no core was found, this test case was successful. + ''' + log.info("Ticket 48005 automember test...") + topology_st.standalone.plugins.enable(name=PLUGIN_AUTOMEMBER) + topology_st.standalone.plugins.enable(name=PLUGIN_REFER_INTEGRITY) + + # configure automember config entry + log.info('Adding automember config') + try: + topology_st.standalone.add_s(Entry(('cn=group cfg,cn=Auto Membership Plugin,cn=plugins,cn=config', { + 'objectclass': 'top autoMemberDefinition'.split(), + 'autoMemberScope': 'dc=example,dc=com', + 'autoMemberFilter': 'objectclass=inetorgperson', + 'autoMemberDefaultGroup': 'cn=group0,dc=example,dc=com', + 'autoMemberGroupingAttr': 'uniquemember:dn', + 'cn': 'group cfg'}))) + except ValueError: + log.error('Failed to add automember config') + assert False + + topology_st.standalone.restart(timeout=10) + + try: + # run the automember rebuild task + topology_st.standalone.tasks.automemberRebuild(suffix=SUFFIX, args={TASK_WAIT: False}) + except ValueError: + log.error('Automember rebuild task failed.') + assert False + + topology_st.standalone.stop(timeout=10) + + mytmp = '/tmp' + logdir = re.sub('errors', '', topology_st.standalone.errlog) + cmdline = 'ls ' + logdir + 'core*' + p = os.popen(cmdline, "r") + lcore = p.readline() + if lcore != "": + s.system('mv %score* %s/core.ticket48005_automember_rebuild' % (logdir, mytmp)) + log.error('Automember_rebuld: Moved core file(s) to %s; Test failed' % mytmp) + assert False + log.info('No core files are found') + + topology_st.standalone.start(timeout=10) + + ldif_out_file = mytmp + "/ticket48005_automember_exported.ldif" + try: + # run the automember export task + topology_st.standalone.tasks.automemberExport(suffix=SUFFIX, ldif_out=ldif_out_file, args={TASK_WAIT: False}) + except ValueError: + log.error('Automember Export task failed.') + assert False + + topology_st.standalone.stop(timeout=10) + + logdir = re.sub('errors', '', topology_st.standalone.errlog) + cmdline = 'ls ' + logdir + 'core*' + p = os.popen(cmdline, "r") + lcore = p.readline() + if lcore != "": + s.system('mv %score* %s/core.ticket48005_automember_export' % (logdir, mytmp)) + log.error('Automember_export: Moved core file(s) to %s; Test failed' % mytmp) + assert False + log.info('No core files are found') + + topology_st.standalone.start(timeout=10) + + ldif_in_file = topology_st.standalone.get_ldif_dir() + "/ticket48005.ldif" + ldif_out_file = mytmp + "/ticket48005_automember_map.ldif" + try: + # run the automember map task + topology_st.standalone.tasks.automemberMap(ldif_in=ldif_in_file, ldif_out=ldif_out_file, + args={TASK_WAIT: False}) + except ValueError: + log.error('Automember Map task failed.') + assert False + + topology_st.standalone.stop(timeout=10) + + logdir = re.sub('errors', '', topology_st.standalone.errlog) + cmdline = 'ls ' + logdir + 'core*' + p = os.popen(cmdline, "r") + lcore = p.readline() + if lcore != "": + s.system('mv %score* %s/core.ticket48005_automember_map' % (logdir, mytmp)) + log.error('Automember_map: Moved core file(s) to %s; Test failed' % mytmp) + assert False + log.info('No core files are found') + + topology_st.standalone.start(timeout=10) + + topology_st.standalone.plugins.disable(name=PLUGIN_REFER_INTEGRITY) + topology_st.standalone.plugins.enable(name=PLUGIN_AUTOMEMBER) + + topology_st.standalone.restart(timeout=10) + + log.info("Ticket 48005 automember test complete") + + +def test_ticket48005_syntaxvalidate(topology_st): + ''' + Run syntax validate task without waiting + Shutdown the server + Check if a core file was generated or not + If no core was found, this test case was successful. + ''' + log.info("Ticket 48005 syntax validate test...") + + try: + # run the fixup task + topology_st.standalone.tasks.syntaxValidate(suffix=SUFFIX, args={TASK_WAIT: False}) + except ValueError: + log.error('Some problem occured with a value that was provided') + assert False + + topology_st.standalone.stop(timeout=10) + + mytmp = '/tmp' + logdir = re.sub('errors', '', topology_st.standalone.errlog) + cmdline = 'ls ' + logdir + 'core*' + p = os.popen(cmdline, "r") + lcore = p.readline() + if lcore != "": + s.system('mv %score* %s/core.ticket48005_syntaxvalidate' % (logdir, mytmp)) + log.error('SyntaxValidate: Moved core file(s) to %s; Test failed' % mytmp) + assert False + log.info('No core files are found') + + topology_st.standalone.start(timeout=10) + + log.info("Ticket 48005 syntax validate test complete") + + +def test_ticket48005_usn(topology_st): + ''' + Enable entryusn + Delete all user entries. + Run USN tombstone cleanup task + Shutdown the server + Check if a core file was generated or not + If no core was found, this test case was successful. + ''' + log.info("Ticket 48005 usn test...") + topology_st.standalone.plugins.enable(name=PLUGIN_USN) + + topology_st.standalone.restart(timeout=10) + + try: + entries = topology_st.standalone.search_s(SUFFIX, ldap.SCOPE_SUBTREE, "(objectclass=inetorgperson)") + if len(entries) == 0: + log.info("No user entries.") + else: + for i in range(len(entries)): + # log.info('Deleting %s' % entries[i].dn) + try: + topology_st.standalone.delete_s(entries[i].dn) + except ValueError: + log.error('delete_s %s failed.' % entries[i].dn) + assert False + except ValueError: + log.error('search_s failed.') + assert False + + try: + # run the usn tombstone cleanup + topology_st.standalone.tasks.usnTombstoneCleanup(suffix=SUFFIX, bename="userRoot", args={TASK_WAIT: False}) + except ValueError: + log.error('Some problem occured with a value that was provided') + assert False + + topology_st.standalone.stop(timeout=10) + + mytmp = '/tmp' + logdir = re.sub('errors', '', topology_st.standalone.errlog) + cmdline = 'ls ' + logdir + 'core*' + p = os.popen(cmdline, "r") + lcore = p.readline() + if lcore != "": + s.system('mv %score* %s/core.ticket48005_usn' % (logdir, mytmp)) + log.error('usnTombstoneCleanup: Moved core file(s) to %s; Test failed' % mytmp) + assert False + log.info('No core files are found') + + topology_st.standalone.start(timeout=10) + + topology_st.standalone.plugins.disable(name=PLUGIN_USN) + + topology_st.standalone.restart(timeout=10) + + log.info("Ticket 48005 usn test complete") + + +def test_ticket48005_schemareload(topology_st): + ''' + Run schema reload task without waiting + Shutdown the server + Check if a core file was generated or not + If no core was found, this test case was successful. + ''' + log.info("Ticket 48005 schema reload test...") + + try: + # run the schema reload task + topology_st.standalone.tasks.schemaReload(args={TASK_WAIT: False}) + except ValueError: + log.error('Schema Reload task failed.') + assert False + + topology_st.standalone.stop(timeout=10) + + logdir = re.sub('errors', '', topology_st.standalone.errlog) + cmdline = 'ls ' + logdir + 'core*' + p = os.popen(cmdline, "r") + lcore = p.readline() + if lcore != "": + mytmp = '/tmp' + s.system('mv %score* %s/core.ticket48005_schema_reload' % (logdir, mytmp)) + log.error('Schema reload: Moved core file(s) to %s; Test failed' % mytmp) + assert False + log.info('No core files are found') + + topology_st.standalone.start(timeout=10) + + log.info("Ticket 48005 schema reload test complete") + + +if __name__ == '__main__': + # Run isolated + # -s for DEBUG mode + CURRENT_FILE = os.path.realpath(__file__) + pytest.main("-s %s" % CURRENT_FILE) diff --git a/dirsrvtests/tests/tickets/ticket48013_test.py b/dirsrvtests/tests/tickets/ticket48013_test.py new file mode 100644 index 0000000..915d589 --- /dev/null +++ b/dirsrvtests/tests/tickets/ticket48013_test.py @@ -0,0 +1,95 @@ +# --- BEGIN COPYRIGHT BLOCK --- +# Copyright (C) 2016 Red Hat, Inc. +# All rights reserved. +# +# License: GPL (version 3 or any later version). +# See LICENSE for details. +# --- END COPYRIGHT BLOCK --- +# +import ldapurl +import pytest +from ldap.ldapobject import SimpleLDAPObject +from ldap.syncrepl import SyncreplConsumer +from lib389.utils import * +from lib389.topologies import topology_st + +from lib389._constants import (PLUGIN_RETRO_CHANGELOG, DEFAULT_SUFFIX, DN_CONFIG, + DN_DM, PASSWORD, PLUGIN_REPL_SYNC, HOST_STANDALONE, + PORT_STANDALONE) + + +# Skip on older versions +pytestmark = [pytest.mark.tier2, + pytest.mark.skipif(ds_is_older('1.3.4'), reason="Not implemented")] + +logging.getLogger(__name__).setLevel(logging.DEBUG) +log = logging.getLogger(__name__) + + +class SyncObject(SimpleLDAPObject, SyncreplConsumer): + def __init__(self, uri): + # Init the ldap connection + SimpleLDAPObject.__init__(self, uri) + + def sync_search(self, test_cookie): + self.syncrepl_search('dc=example,dc=com', ldap.SCOPE_SUBTREE, + filterstr='(objectclass=*)', mode='refreshOnly', + cookie=test_cookie) + + def poll(self): + self.syncrepl_poll(all=1) + + +def test_ticket48013(topology_st): + ''' + Content Synchonization: Test that invalid cookies are caught + ''' + + cookies = ('#', '##', 'a#a#a', 'a#a#1') + + # Enable dynamic plugins + try: + topology_st.standalone.modify_s(DN_CONFIG, [(ldap.MOD_REPLACE, 'nsslapd-dynamic-plugins', b'on')]) + except ldap.LDAPError as e: + log.error('Failed to enable dynamic plugin! {}'.format(e.args[0]['desc'])) + assert False + + # Enable retro changelog + topology_st.standalone.plugins.enable(name=PLUGIN_RETRO_CHANGELOG) + + # Enbale content sync plugin + topology_st.standalone.plugins.enable(name=PLUGIN_REPL_SYNC) + + # Set everything up + ldap_url = ldapurl.LDAPUrl('ldap://%s:%s' % (HOST_STANDALONE, + PORT_STANDALONE)) + ldap_connection = SyncObject(ldap_url.initializeUrl()) + + # Authenticate + try: + ldap_connection.simple_bind_s(DN_DM, PASSWORD) + except ldap.LDAPError as e: + log.error('Login to LDAP server failed: {}'.format(e.args[0]['desc'])) + assert False + + # Test invalid cookies + for invalid_cookie in cookies: + log.info('Testing cookie: %s' % invalid_cookie) + try: + ldap_connection.sync_search(invalid_cookie) + ldap_connection.poll() + log.fatal('Invalid cookie accepted!') + assert False + except Exception as e: + log.info('Invalid cookie correctly rejected: {}'.format(e.args[0]['info'])) + pass + + # Success + log.info('Test complete') + + +if __name__ == '__main__': + # Run isolated + # -s for DEBUG mode + CURRENT_FILE = os.path.realpath(__file__) + pytest.main("-s %s" % CURRENT_FILE) diff --git a/dirsrvtests/tests/tickets/ticket48026_test.py b/dirsrvtests/tests/tickets/ticket48026_test.py new file mode 100644 index 0000000..f00d3d5 --- /dev/null +++ b/dirsrvtests/tests/tickets/ticket48026_test.py @@ -0,0 +1,121 @@ +# --- BEGIN COPYRIGHT BLOCK --- +# Copyright (C) 2016 Red Hat, Inc. +# All rights reserved. +# +# License: GPL (version 3 or any later version). +# See LICENSE for details. +# --- END COPYRIGHT BLOCK --- +# +import pytest +from lib389.tasks import * +from lib389.utils import * +from lib389.topologies import topology_st + +from lib389._constants import PLUGIN_ATTR_UNIQUENESS, DEFAULT_SUFFIX + +# Skip on older versions +pytestmark = [pytest.mark.tier2, + pytest.mark.skipif(ds_is_older('1.3.4'), reason="Not implemented")] + +logging.getLogger(__name__).setLevel(logging.DEBUG) +log = logging.getLogger(__name__) + +USER1_DN = 'uid=user1,' + DEFAULT_SUFFIX +USER2_DN = 'uid=user2,' + DEFAULT_SUFFIX + + +def test_ticket48026(topology_st): + ''' + Test that multiple attribute uniqueness works correctly. + ''' + # Configure the plugin + inst = topology_st.standalone + inst.plugins.enable(name=PLUGIN_ATTR_UNIQUENESS) + + try: + # This plugin enable / disable doesn't seem to create the nsslapd-pluginId correctly? + inst.modify_s('cn=' + PLUGIN_ATTR_UNIQUENESS + ',cn=plugins,cn=config', + [(ldap.MOD_REPLACE, 'uniqueness-attribute-name', b'mail'), + (ldap.MOD_ADD, 'uniqueness-attribute-name', + b'mailAlternateAddress'), + ]) + except ldap.LDAPError as e: + log.fatal('test_ticket48026: Failed to configure plugin for "mail": error {}'.format(e.args[0]['desc'])) + assert False + + inst.restart(timeout=30) + + # Add an entry + try: + inst.add_s(Entry((USER1_DN, {'objectclass': "top extensibleObject".split(), + 'sn': '1', + 'cn': 'user 1', + 'uid': 'user1', + 'mail': 'user1@example.com', + 'mailAlternateAddress': 'user1@alt.example.com', + 'userpassword': 'password'}))) + except ldap.LDAPError as e: + log.fatal('test_ticket48026: Failed to add test user' + USER1_DN + ': error {}'.format(e.args[0]['desc'])) + assert False + + try: + inst.add_s(Entry((USER2_DN, {'objectclass': "top extensibleObject".split(), + 'sn': '2', + 'cn': 'user 2', + 'uid': 'user2', + 'mail': 'user1@example.com', + 'userpassword': 'password'}))) + except ldap.CONSTRAINT_VIOLATION: + pass + else: + log.error('test_ticket48026: Adding of 1st entry(mail v mail) incorrectly succeeded') + assert False + + try: + inst.add_s(Entry((USER2_DN, {'objectclass': "top extensibleObject".split(), + 'sn': '2', + 'cn': 'user 2', + 'uid': 'user2', + 'mailAlternateAddress': 'user1@alt.example.com', + 'userpassword': 'password'}))) + except ldap.CONSTRAINT_VIOLATION: + pass + else: + log.error( + 'test_ticket48026: Adding of 2nd entry(mailAlternateAddress v mailAlternateAddress) incorrectly succeeded') + assert False + + try: + inst.add_s(Entry((USER2_DN, {'objectclass': "top extensibleObject".split(), + 'sn': '2', + 'cn': 'user 2', + 'uid': 'user2', + 'mail': 'user1@alt.example.com', + 'userpassword': 'password'}))) + except ldap.CONSTRAINT_VIOLATION: + pass + else: + log.error('test_ticket48026: Adding of 3rd entry(mail v mailAlternateAddress) incorrectly succeeded') + assert False + + try: + inst.add_s(Entry((USER2_DN, {'objectclass': "top extensibleObject".split(), + 'sn': '2', + 'cn': 'user 2', + 'uid': 'user2', + 'mailAlternateAddress': 'user1@example.com', + 'userpassword': 'password'}))) + except ldap.CONSTRAINT_VIOLATION: + pass + else: + log.error('test_ticket48026: Adding of 4th entry(mailAlternateAddress v mail) incorrectly succeeded') + assert False + + log.info('Test complete') + + +if __name__ == '__main__': + # Run isolated + # -s for DEBUG mode + CURRENT_FILE = os.path.realpath(__file__) + pytest.main("-s %s" % CURRENT_FILE) diff --git a/dirsrvtests/tests/tickets/ticket48109_test.py b/dirsrvtests/tests/tickets/ticket48109_test.py new file mode 100644 index 0000000..5f9b657 --- /dev/null +++ b/dirsrvtests/tests/tickets/ticket48109_test.py @@ -0,0 +1,338 @@ +# --- BEGIN COPYRIGHT BLOCK --- +# Copyright (C) 2016 Red Hat, Inc. +# All rights reserved. +# +# License: GPL (version 3 or any later version). +# See LICENSE for details. +# --- END COPYRIGHT BLOCK --- +# +import pytest +from lib389.tasks import * +from lib389.utils import * +from lib389.topologies import topology_st + +from lib389._constants import SUFFIX, DEFAULT_SUFFIX + +pytestmark = pytest.mark.tier2 + +logging.getLogger(__name__).setLevel(logging.DEBUG) +log = logging.getLogger(__name__) + +UID_INDEX = 'cn=uid,cn=index,cn=userRoot,cn=ldbm database,cn=plugins,cn=config' + + +def test_ticket48109(topology_st): + ''' + Set SubStr lengths to cn=uid,cn=index,... + objectClass: extensibleObject + nsIndexType: sub + nsSubStrBegin: 2 + nsSubStrEnd: 2 + ''' + log.info('Test case 0') + # add substr setting to UID_INDEX + try: + topology_st.standalone.modify_s(UID_INDEX, + [(ldap.MOD_ADD, 'objectClass', b'extensibleObject'), + (ldap.MOD_ADD, 'nsIndexType', b'sub'), + (ldap.MOD_ADD, 'nsSubStrBegin', b'2'), + (ldap.MOD_ADD, 'nsSubStrEnd', b'2')]) + except ldap.LDAPError as e: + log.error('Failed to add substr lengths: error {}'.format(e.args[0]['desc'])) + assert False + + # restart the server to apply the indexing + topology_st.standalone.restart(timeout=10) + + # add a test user + UID = 'auser0' + USER_DN = 'uid=%s,%s' % (UID, SUFFIX) + try: + topology_st.standalone.add_s(Entry((USER_DN, { + 'objectclass': 'top person organizationalPerson inetOrgPerson'.split(), + 'cn': 'a user0', + 'sn': 'user0', + 'givenname': 'a', + 'mail': UID}))) + except ldap.LDAPError as e: + log.error('Failed to add ' + USER_DN + ': error {}'.format(e.args[0]['desc'])) + assert False + + entries = topology_st.standalone.search_s(SUFFIX, ldap.SCOPE_SUBTREE, '(uid=a*)') + assert len(entries) == 1 + + # restart the server to check the access log + topology_st.standalone.restart(timeout=10) + + cmdline = 'egrep %s %s | egrep "uid=a\*"' % (SUFFIX, topology_st.standalone.accesslog) + p = os.popen(cmdline, "r") + l0 = p.readline() + if l0 == "": + log.error('Search with "(uid=a*)" is not logged in ' + topology_st.standalone.accesslog) + assert False + else: + # regex = re.compile('\(conn=[0-9]* op=[0-9]*\) SRCH .*') + regex = re.compile(r'.*\s+(conn=\d+ op=\d+)\s+SRCH .*') + match = regex.match(l0) + log.info('match: %s' % match.group(1)) + cmdline = 'egrep "%s" %s | egrep "RESULT"' % (match.group(1), topology_st.standalone.accesslog) + p = os.popen(cmdline, "r") + l1 = p.readline() + if l1 == "": + log.error('Search result of "(uid=a*)" is not logged in ' + topology_st.standalone.accesslog) + assert False + else: + log.info('l1: %s' % l1) + regex = re.compile(r'.*nentries=(\d+)\s+.*') + match = regex.match(l1) + log.info('match: nentires=%s' % match.group(1)) + if match.group(1) == "0": + log.error('Entry uid=a* not found.') + assert False + else: + log.info('Entry uid=a* found.') + regex = re.compile(r'.*(notes=[AU]).*') + match = regex.match(l1) + if match: + log.error('%s - substr index was not used' % match.group(1)) + assert False + else: + log.info('Test case 0 - OK - substr index used') + + # clean up substr setting to UID_INDEX + try: + topology_st.standalone.modify_s(UID_INDEX, + [(ldap.MOD_DELETE, 'objectClass', b'extensibleObject'), + (ldap.MOD_DELETE, 'nsIndexType', b'sub'), + (ldap.MOD_DELETE, 'nsSubStrBegin', b'2'), + (ldap.MOD_DELETE, 'nsSubStrEnd', b'2')]) + except ldap.LDAPError as e: + log.error('Failed to delete substr lengths: error {}'.format(e.args[0]['desc'])) + assert False + + ''' + Set SubStr lengths to cn=uid,cn=index,... + nsIndexType: sub + nsMatchingRule: nsSubStrBegin=2 + nsMatchingRule: nsSubStrEnd=2 + ''' + log.info('Test case 1') + # add substr setting to UID_INDEX + try: + topology_st.standalone.modify_s(UID_INDEX, + [(ldap.MOD_ADD, 'nsIndexType', b'sub'), + (ldap.MOD_ADD, 'nsMatchingRule', b'nssubstrbegin=2'), + (ldap.MOD_ADD, 'nsMatchingRule', b'nssubstrend=2')]) + except ldap.LDAPError as e: + log.error('Failed to add substr lengths: error {}'.format(e.args[0]['desc'])) + assert False + + # restart the server to apply the indexing + topology_st.standalone.restart(timeout=10) + + # add a test user + UID = 'buser1' + USER_DN = 'uid=%s,%s' % (UID, SUFFIX) + try: + topology_st.standalone.add_s(Entry((USER_DN, { + 'objectclass': 'top person organizationalPerson inetOrgPerson'.split(), + 'cn': 'b user1', + 'sn': 'user1', + 'givenname': 'b', + 'mail': UID}))) + except ldap.LDAPError as e: + log.error('Failed to add ' + USER_DN + ': error {}'.format(e.args[0]['desc'])) + assert False + + entries = topology_st.standalone.search_s(SUFFIX, ldap.SCOPE_SUBTREE, '(uid=b*)') + assert len(entries) == 1 + + # restart the server to check the access log + topology_st.standalone.restart(timeout=10) + + cmdline = 'egrep %s %s | egrep "uid=b\*"' % (SUFFIX, topology_st.standalone.accesslog) + p = os.popen(cmdline, "r") + l0 = p.readline() + if l0 == "": + log.error('Search with "(uid=b*)" is not logged in ' + topology_st.standalone.accesslog) + assert False + else: + # regex = re.compile('\(conn=[0-9]* op=[0-9]*\) SRCH .*') + regex = re.compile(r'.*\s+(conn=\d+ op=\d+)\s+SRCH .*') + match = regex.match(l0) + log.info('match: %s' % match.group(1)) + cmdline = 'egrep "%s" %s | egrep "RESULT"' % (match.group(1), topology_st.standalone.accesslog) + p = os.popen(cmdline, "r") + l1 = p.readline() + if l1 == "": + log.error('Search result of "(uid=*b)" is not logged in ' + topology_st.standalone.accesslog) + assert False + else: + log.info('l1: %s' % l1) + regex = re.compile(r'.*nentries=(\d+)\s+.*') + match = regex.match(l1) + log.info('match: nentires=%s' % match.group(1)) + if match.group(1) == "0": + log.error('Entry uid=*b not found.') + assert False + else: + log.info('Entry uid=*b found.') + regex = re.compile(r'.*(notes=[AU]).*') + match = regex.match(l1) + if match: + log.error('%s - substr index was not used' % match.group(1)) + assert False + else: + log.info('Test case 1 - OK - substr index used') + + # clean up substr setting to UID_INDEX + try: + topology_st.standalone.modify_s(UID_INDEX, + [(ldap.MOD_DELETE, 'nsIndexType', b'sub'), + (ldap.MOD_DELETE, 'nsMatchingRule', b'nssubstrbegin=2'), + (ldap.MOD_DELETE, 'nsMatchingRule', b'nssubstrend=2')]) + except ldap.LDAPError as e: + log.error('Failed to delete substr lengths: error {}'.format(e.args[0]['desc'])) + assert False + + ''' + Set SubStr conflict formats/lengths to cn=uid,cn=index,... + objectClass: extensibleObject + nsIndexType: sub + nsMatchingRule: nsSubStrBegin=3 + nsMatchingRule: nsSubStrEnd=3 + nsSubStrBegin: 2 + nsSubStrEnd: 2 + nsSubStr{Begin,End} are honored. + ''' + log.info('Test case 2') + + # add substr setting to UID_INDEX + try: + topology_st.standalone.modify_s(UID_INDEX, + [(ldap.MOD_ADD, 'nsIndexType', b'sub'), + (ldap.MOD_ADD, 'nsMatchingRule', b'nssubstrbegin=3'), + (ldap.MOD_ADD, 'nsMatchingRule', b'nssubstrend=3'), + (ldap.MOD_ADD, 'objectClass', b'extensibleObject'), + (ldap.MOD_ADD, 'nsSubStrBegin', b'2'), + (ldap.MOD_ADD, 'nsSubStrEnd', b'2')]) + except ldap.LDAPError as e: + log.error('Failed to add substr lengths: error {}'.format(e.args[0]['desc'])) + assert False + + # restart the server to apply the indexing + topology_st.standalone.restart(timeout=10) + + # add a test user + UID = 'cuser2' + USER_DN = 'uid=%s,%s' % (UID, SUFFIX) + try: + topology_st.standalone.add_s(Entry((USER_DN, { + 'objectclass': 'top person organizationalPerson inetOrgPerson'.split(), + 'cn': 'c user2', + 'sn': 'user2', + 'givenname': 'c', + 'mail': UID}))) + except ldap.LDAPError as e: + log.error('Failed to add ' + USER_DN + ': error {}'.format(e.args[0]['desc'])) + assert False + + entries = topology_st.standalone.search_s(SUFFIX, ldap.SCOPE_SUBTREE, '(uid=c*)') + assert len(entries) == 1 + + entries = topology_st.standalone.search_s(SUFFIX, ldap.SCOPE_SUBTREE, '(uid=*2)') + assert len(entries) == 1 + + # restart the server to check the access log + topology_st.standalone.restart(timeout=10) + + cmdline = 'egrep %s %s | egrep "uid=c\*"' % (SUFFIX, topology_st.standalone.accesslog) + p = os.popen(cmdline, "r") + l0 = p.readline() + if l0 == "": + log.error('Search with "(uid=c*)" is not logged in ' + topology_st.standalone.accesslog) + assert False + else: + # regex = re.compile('\(conn=[0-9]* op=[0-9]*\) SRCH .*') + regex = re.compile(r'.*\s+(conn=\d+ op=\d+)\s+SRCH .*') + match = regex.match(l0) + log.info('match: %s' % match.group(1)) + cmdline = 'egrep "%s" %s | egrep "RESULT"' % (match.group(1), topology_st.standalone.accesslog) + p = os.popen(cmdline, "r") + l1 = p.readline() + if l1 == "": + log.error('Search result of "(uid=c*)" is not logged in ' + topology_st.standalone.accesslog) + assert False + else: + log.info('l1: %s' % l1) + regex = re.compile(r'.*nentries=(\d+)\s+.*') + match = regex.match(l1) + log.info('match: nentires=%s' % match.group(1)) + if match.group(1) == "0": + log.error('Entry uid=c* not found.') + assert False + else: + log.info('Entry uid=c* found.') + regex = re.compile(r'.*(notes=[AU]).*') + match = regex.match(l1) + if match: + log.error('%s - substr index was not used' % match.group(1)) + assert False + else: + log.info('Test case 2-1 - OK - correct substr index used') + + cmdline = 'egrep %s %s | egrep "uid=\*2"' % (SUFFIX, topology_st.standalone.accesslog) + p = os.popen(cmdline, "r") + l0 = p.readline() + if l0 == "": + log.error('Search with "(uid=*2)" is not logged in ' + topology_st.standalone.accesslog) + assert False + else: + # regex = re.compile('\(conn=[0-9]* op=[0-9]*\) SRCH .*') + regex = re.compile(r'.*\s+(conn=\d+ op=\d+)\s+SRCH .*') + match = regex.match(l0) + log.info('match: %s' % match.group(1)) + cmdline = 'egrep "%s" %s | egrep "RESULT"' % (match.group(1), topology_st.standalone.accesslog) + p = os.popen(cmdline, "r") + l1 = p.readline() + if l1 == "": + log.error('Search result of "(uid=*2)" is not logged in ' + topology_st.standalone.accesslog) + assert False + else: + log.info('l1: %s' % l1) + regex = re.compile(r'.*nentries=(\d+)\s+.*') + match = regex.match(l1) + log.info('match: nentires=%s' % match.group(1)) + if match.group(1) == "0": + log.error('Entry uid=*2 not found.') + assert False + else: + log.info('Entry uid=*2 found.') + regex = re.compile(r'.*(notes=[AU]).*') + match = regex.match(l1) + if match: + log.error('%s - substr index was not used' % match.group(1)) + assert False + else: + log.info('Test case 2-2 - OK - correct substr index used') + + # clean up substr setting to UID_INDEX + try: + topology_st.standalone.modify_s(UID_INDEX, + [(ldap.MOD_DELETE, 'nsIndexType', b'sub'), + (ldap.MOD_DELETE, 'nsMatchingRule', b'nssubstrbegin=3'), + (ldap.MOD_DELETE, 'nsMatchingRule', b'nssubstrend=3'), + (ldap.MOD_DELETE, 'objectClass', b'extensibleObject'), + (ldap.MOD_DELETE, 'nsSubStrBegin', b'2'), + (ldap.MOD_DELETE, 'nsSubStrEnd', b'2')]) + except ldap.LDAPError as e: + log.error('Failed to delete substr lengths: error {}'.format(e.args[0]['desc'])) + assert False + log.info('Testcase PASSED') + + +if __name__ == '__main__': + # Run isolated + # -s for DEBUG mode + CURRENT_FILE = os.path.realpath(__file__) + pytest.main("-s %s" % CURRENT_FILE) diff --git a/dirsrvtests/tests/tickets/ticket48170_test.py b/dirsrvtests/tests/tickets/ticket48170_test.py new file mode 100644 index 0000000..e3c8a27 --- /dev/null +++ b/dirsrvtests/tests/tickets/ticket48170_test.py @@ -0,0 +1,43 @@ +# --- BEGIN COPYRIGHT BLOCK --- +# Copyright (C) 2016 Red Hat, Inc. +# All rights reserved. +# +# License: GPL (version 3 or any later version). +# See LICENSE for details. +# --- END COPYRIGHT BLOCK --- +# +import pytest +from lib389.utils import * +from lib389.topologies import topology_st + +pytestmark = pytest.mark.tier2 + +logging.getLogger(__name__).setLevel(logging.DEBUG) +log = logging.getLogger(__name__) + + +def test_ticket48170(topology_st): + ''' + Attempt to add a nsIndexType wikth an invalid value: "eq,pres" + ''' + + INDEX_DN = 'cn=cn,cn=index,cn=userroot,cn=ldbm database,cn=plugins,cn=config' + REJECTED = False + try: + topology_st.standalone.modify_s(INDEX_DN, [(ldap.MOD_ADD, 'nsINdexType', b'eq,pres')]) + except ldap.UNWILLING_TO_PERFORM: + log.info('Index update correctly rejected') + REJECTED = True + + if not REJECTED: + log.fatal('Invalid nsIndexType value was incorrectly accepted.') + assert False + + log.info('Test complete') + + +if __name__ == '__main__': + # Run isolated + # -s for DEBUG mode + CURRENT_FILE = os.path.realpath(__file__) + pytest.main("-s %s" % CURRENT_FILE) diff --git a/dirsrvtests/tests/tickets/ticket48194_test.py b/dirsrvtests/tests/tickets/ticket48194_test.py new file mode 100644 index 0000000..4431ebd --- /dev/null +++ b/dirsrvtests/tests/tickets/ticket48194_test.py @@ -0,0 +1,352 @@ +# --- BEGIN COPYRIGHT BLOCK --- +# Copyright (C) 2016 Red Hat, Inc. +# All rights reserved. +# +# License: GPL (version 3 or any later version). +# See LICENSE for details. +# --- END COPYRIGHT BLOCK --- +# +import logging +import subprocess +import time + +import ldap +import pytest +from lib389 import Entry +from lib389._constants import * +from lib389.topologies import topology_st +from lib389.nss_ssl import NssSsl + +log = logging.getLogger(__name__) + +CONFIG_DN = 'cn=config' +from lib389.utils import * + +# Skip on older versions +pytestmark = [pytest.mark.tier2, + pytest.mark.skipif(ds_is_older('1.3.4'), reason="Not implemented")] +ENCRYPTION_DN = 'cn=encryption,%s' % CONFIG_DN +RSA = 'RSA' +RSA_DN = 'cn=%s,%s' % (RSA, ENCRYPTION_DN) +LDAPSPORT = str(SECUREPORT_STANDALONE) +SERVERCERT = 'Server-Cert' +plus_all_ecount = 0 +plus_all_dcount = 0 +plus_all_ecount_noweak = 0 +plus_all_dcount_noweak = 0 + + +def _header(topology_st, label): + topology_st.standalone.log.info("\n\n###############################################") + topology_st.standalone.log.info("####### %s" % label) + topology_st.standalone.log.info("###############################################") + + +def test_init(topology_st): + """ + Generate self signed cert and import it to the DS cert db. + Enable SSL + """ + _header(topology_st, 'Testing Ticket 48194 - harden the list of ciphers available by default') + + nss_ssl = NssSsl(dbpath=topology_st.standalone.get_cert_dir()) + nss_ssl.reinit() + nss_ssl.create_rsa_ca() + nss_ssl.create_rsa_key_and_cert() + + log.info("\n######################### enable SSL in the directory server with all ciphers ######################\n") + topology_st.standalone.simple_bind_s(DN_DM, PASSWORD) + topology_st.standalone.modify_s(ENCRYPTION_DN, [(ldap.MOD_REPLACE, 'nsSSL3', b'off'), + (ldap.MOD_REPLACE, 'nsTLS1', b'on'), + (ldap.MOD_REPLACE, 'nsSSLClientAuth', b'allowed'), + (ldap.MOD_REPLACE, 'allowWeakCipher', b'on'), + (ldap.MOD_REPLACE, 'nsSSL3Ciphers', b'+all')]) + + topology_st.standalone.modify_s(CONFIG_DN, [(ldap.MOD_REPLACE, 'nsslapd-security', b'on'), + (ldap.MOD_REPLACE, 'nsslapd-ssl-check-hostname', b'off'), + (ldap.MOD_REPLACE, 'nsslapd-secureport', ensure_bytes(LDAPSPORT))]) + + if ds_is_older('1.4.0'): + topology_st.standalone.add_s(Entry((RSA_DN, {'objectclass': "top nsEncryptionModule".split(), + 'cn': RSA, + 'nsSSLPersonalitySSL': SERVERCERT, + 'nsSSLToken': 'internal (software)', + 'nsSSLActivation': 'on'}))) + + +def connectWithOpenssl(topology_st, cipher, expect): + """ + Connect with the given cipher + Condition: + If expect is True, the handshake should be successful. + If expect is False, the handshake should be refused with + access log: "Cannot communicate securely with peer: + no common encryption algorithm(s)." + """ + log.info("Testing %s -- expect to handshake %s", cipher, "successfully" if expect else "failed") + + myurl = 'localhost:%s' % LDAPSPORT + cmdline = ['/usr/bin/openssl', 's_client', '-connect', myurl, '-cipher', cipher] + + strcmdline = " ".join(cmdline) + log.info("Running cmdline: %s", strcmdline) + + try: + proc = subprocess.Popen(cmdline, stdout=subprocess.PIPE, stdin=subprocess.PIPE, stderr=subprocess.STDOUT) + except ValueError: + log.info("%s failed: %s", cmdline, ValueError) + proc.kill() + + while True: + l = proc.stdout.readline() + if l == b"": + break + if b'Cipher is' in l: + log.info("Found: %s", l) + if expect: + if b'(NONE)' in l: + assert False + else: + proc.stdin.close() + assert True + else: + if b'(NONE)' in l: + assert True + else: + proc.stdin.close() + assert False + + +def test_run_0(topology_st): + """ + Check nsSSL3Ciphers: +all + All ciphers are enabled except null. + Note: allowWeakCipher: on + """ + _header(topology_st, 'Test Case 1 - Check the ciphers availability for "+all"; allowWeakCipher: on') + + topology_st.standalone.simple_bind_s(DN_DM, PASSWORD) + topology_st.standalone.modify_s(CONFIG_DN, [(ldap.MOD_REPLACE, 'nsslapd-errorlog-level', b'64')]) + + log.info("\n######################### Restarting the server ######################\n") + topology_st.standalone.restart(timeout=120) + + connectWithOpenssl(topology_st, 'DES-CBC3-SHA', True) + connectWithOpenssl(topology_st, 'AES256-SHA256', True) + + +def test_run_1(topology_st): + """ + Check nsSSL3Ciphers: +all + All ciphers are enabled except null. + Note: default allowWeakCipher (i.e., off) for +all + """ + _header(topology_st, 'Test Case 2 - Check the ciphers availability for "+all" with default allowWeakCiphers') + + topology_st.standalone.simple_bind_s(DN_DM, PASSWORD) + topology_st.standalone.modify_s(CONFIG_DN, [(ldap.MOD_REPLACE, 'nsslapd-errorlog-level', b'64')]) + # Make sure allowWeakCipher is not set. + topology_st.standalone.modify_s(ENCRYPTION_DN, [(ldap.MOD_DELETE, 'allowWeakCipher', None)]) + + log.info("\n######################### Restarting the server ######################\n") + topology_st.standalone.stop(timeout=10) + os.system('mv %s %s.48194_0' % (topology_st.standalone.errlog, topology_st.standalone.errlog)) + os.system('touch %s' % (topology_st.standalone.errlog)) + time.sleep(2) + topology_st.standalone.start(timeout=120) + + connectWithOpenssl(topology_st, 'DES-CBC3-SHA', False) + connectWithOpenssl(topology_st, 'AES256-SHA256', True) + + +def test_run_2(topology_st): + """ + Check nsSSL3Ciphers: +rsa_aes_128_sha,+rsa_aes_256_sha + rsa_aes_128_sha, tls_rsa_aes_128_sha, rsa_aes_256_sha, tls_rsa_aes_256_sha are enabled. + default allowWeakCipher + """ + _header(topology_st, + 'Test Case 3 - Check the ciphers availability for "+rsa_aes_128_sha,+rsa_aes_256_sha" with default allowWeakCipher') + + topology_st.standalone.simple_bind_s(DN_DM, PASSWORD) + topology_st.standalone.modify_s(ENCRYPTION_DN, + [(ldap.MOD_REPLACE, 'nsSSL3Ciphers', b'+rsa_aes_128_sha,+rsa_aes_256_sha')]) + + log.info("\n######################### Restarting the server ######################\n") + topology_st.standalone.stop(timeout=10) + os.system('mv %s %s.48194_1' % (topology_st.standalone.errlog, topology_st.standalone.errlog)) + os.system('touch %s' % (topology_st.standalone.errlog)) + time.sleep(2) + topology_st.standalone.start(timeout=120) + + connectWithOpenssl(topology_st, 'DES-CBC3-SHA', False) + connectWithOpenssl(topology_st, 'AES256-SHA256', False) + connectWithOpenssl(topology_st, 'AES128-SHA', True) + connectWithOpenssl(topology_st, 'AES256-SHA', True) + + +def test_run_3(topology_st): + """ + Check nsSSL3Ciphers: -all + All ciphers are disabled. + default allowWeakCipher + """ + _header(topology_st, 'Test Case 4 - Check the ciphers availability for "-all"') + + topology_st.standalone.simple_bind_s(DN_DM, PASSWORD) + topology_st.standalone.modify_s(ENCRYPTION_DN, [(ldap.MOD_REPLACE, 'nsSSL3Ciphers', b'-all')]) + + log.info("\n######################### Restarting the server ######################\n") + topology_st.standalone.stop(timeout=10) + os.system('mv %s %s.48194_2' % (topology_st.standalone.errlog, topology_st.standalone.errlog)) + os.system('touch %s' % (topology_st.standalone.errlog)) + time.sleep(1) + topology_st.standalone.start(timeout=120) + + connectWithOpenssl(topology_st, 'DES-CBC3-SHA', False) + connectWithOpenssl(topology_st, 'AES256-SHA256', False) + + +def test_run_4(topology_st): + """ + Check no nsSSL3Ciphers + Default ciphers are enabled. + default allowWeakCipher + """ + _header(topology_st, 'Test Case 5 - Check no nsSSL3Ciphers (-all) with default allowWeakCipher') + + topology_st.standalone.simple_bind_s(DN_DM, PASSWORD) + topology_st.standalone.modify_s(ENCRYPTION_DN, [(ldap.MOD_DELETE, 'nsSSL3Ciphers', b'-all')]) + + log.info("\n######################### Restarting the server ######################\n") + topology_st.standalone.stop(timeout=10) + os.system('mv %s %s.48194_3' % (topology_st.standalone.errlog, topology_st.standalone.errlog)) + os.system('touch %s' % (topology_st.standalone.errlog)) + time.sleep(2) + topology_st.standalone.start(timeout=120) + + connectWithOpenssl(topology_st, 'DES-CBC3-SHA', False) + connectWithOpenssl(topology_st, 'AES256-SHA256', True) + + +def test_run_5(topology_st): + """ + Check nsSSL3Ciphers: default + Default ciphers are enabled. + default allowWeakCipher + """ + _header(topology_st, 'Test Case 6 - Check default nsSSL3Ciphers (default setting) with default allowWeakCipher') + + topology_st.standalone.simple_bind_s(DN_DM, PASSWORD) + topology_st.standalone.modify_s(ENCRYPTION_DN, [(ldap.MOD_REPLACE, 'nsSSL3Ciphers', b'default')]) + + log.info("\n######################### Restarting the server ######################\n") + topology_st.standalone.stop(timeout=10) + os.system('mv %s %s.48194_4' % (topology_st.standalone.errlog, topology_st.standalone.errlog)) + os.system('touch %s' % (topology_st.standalone.errlog)) + time.sleep(2) + topology_st.standalone.start(timeout=120) + + connectWithOpenssl(topology_st, 'DES-CBC3-SHA', False) + connectWithOpenssl(topology_st, 'AES256-SHA256', True) + + +def test_run_6(topology_st): + """ + Check nsSSL3Ciphers: +all,-TLS_RSA_WITH_AES_256_CBC_SHA256 + All ciphers are disabled. + default allowWeakCipher + """ + _header(topology_st, + 'Test Case 7 - Check nsSSL3Ciphers: +all,-TLS_RSA_WITH_AES_256_CBC_SHA256 with default allowWeakCipher') + + topology_st.standalone.simple_bind_s(DN_DM, PASSWORD) + topology_st.standalone.modify_s(ENCRYPTION_DN, + [(ldap.MOD_REPLACE, 'nsSSL3Ciphers', b'+all,-TLS_RSA_WITH_AES_256_CBC_SHA256')]) + + log.info("\n######################### Restarting the server ######################\n") + topology_st.standalone.stop(timeout=10) + os.system('mv %s %s.48194_5' % (topology_st.standalone.errlog, topology_st.standalone.errlog)) + os.system('touch %s' % (topology_st.standalone.errlog)) + time.sleep(2) + topology_st.standalone.start(timeout=120) + + connectWithOpenssl(topology_st, 'DES-CBC3-SHA', False) + connectWithOpenssl(topology_st, 'AES256-SHA256', False) + connectWithOpenssl(topology_st, 'AES128-SHA', True) + + +def test_run_8(topology_st): + """ + Check nsSSL3Ciphers: default + allowWeakCipher: off + Strong Default ciphers are enabled. + """ + _header(topology_st, 'Test Case 9 - Check default nsSSL3Ciphers (default setting + allowWeakCipher: off)') + + topology_st.standalone.simple_bind_s(DN_DM, PASSWORD) + topology_st.standalone.modify_s(ENCRYPTION_DN, [(ldap.MOD_REPLACE, 'nsSSL3Ciphers', b'default'), + (ldap.MOD_REPLACE, 'allowWeakCipher', b'off')]) + + log.info("\n######################### Restarting the server ######################\n") + topology_st.standalone.stop(timeout=10) + os.system('mv %s %s.48194_7' % (topology_st.standalone.errlog, topology_st.standalone.errlog)) + os.system('touch %s' % (topology_st.standalone.errlog)) + time.sleep(2) + topology_st.standalone.start(timeout=120) + + connectWithOpenssl(topology_st, 'DES-CBC3-SHA', False) + connectWithOpenssl(topology_st, 'AES256-SHA256', True) + + +def test_run_9(topology_st): + """ + Check no nsSSL3Ciphers + Default ciphers are enabled. + allowWeakCipher: on + nsslapd-errorlog-level: 0 + """ + _header(topology_st, + 'Test Case 10 - Check no nsSSL3Ciphers (default setting) with no errorlog-level & allowWeakCipher on') + + topology_st.standalone.simple_bind_s(DN_DM, PASSWORD) + topology_st.standalone.modify_s(ENCRYPTION_DN, [(ldap.MOD_REPLACE, 'nsSSL3Ciphers', None), + (ldap.MOD_REPLACE, 'allowWeakCipher', b'on')]) + topology_st.standalone.modify_s(CONFIG_DN, [(ldap.MOD_REPLACE, 'nsslapd-errorlog-level', None)]) + + log.info("\n######################### Restarting the server ######################\n") + topology_st.standalone.stop(timeout=10) + os.system('mv %s %s.48194_8' % (topology_st.standalone.errlog, topology_st.standalone.errlog)) + os.system('touch %s' % (topology_st.standalone.errlog)) + time.sleep(2) + topology_st.standalone.start(timeout=120) + + connectWithOpenssl(topology_st, 'DES-CBC3-SHA', True) + connectWithOpenssl(topology_st, 'AES256-SHA256', True) + + +def test_run_11(topology_st): + """ + Check nsSSL3Ciphers: +fortezza + SSL_GetImplementedCiphers does not return this as a secuire cipher suite + """ + _header(topology_st, 'Test Case 12 - Check nsSSL3Ciphers: +fortezza, which is not supported') + + topology_st.standalone.simple_bind_s(DN_DM, PASSWORD) + topology_st.standalone.modify_s(ENCRYPTION_DN, [(ldap.MOD_REPLACE, 'nsSSL3Ciphers', b'+fortezza')]) + + log.info("\n######################### Restarting the server ######################\n") + topology_st.standalone.stop(timeout=10) + os.system('mv %s %s.48194_10' % (topology_st.standalone.errlog, topology_st.standalone.errlog)) + os.system('touch %s' % (topology_st.standalone.errlog)) + time.sleep(1) + topology_st.standalone.start(timeout=120) + + connectWithOpenssl(topology_st, 'DES-CBC3-SHA', False) + connectWithOpenssl(topology_st, 'AES256-SHA256', False) + + +if __name__ == '__main__': + # Run isolated + # -s for DEBUG mode + CURRENT_FILE = os.path.realpath(__file__) + pytest.main("-s %s" % CURRENT_FILE) diff --git a/dirsrvtests/tests/tickets/ticket48212_test.py b/dirsrvtests/tests/tickets/ticket48212_test.py new file mode 100644 index 0000000..ee58a82 --- /dev/null +++ b/dirsrvtests/tests/tickets/ticket48212_test.py @@ -0,0 +1,134 @@ +import pytest +from lib389.tasks import * +from lib389.utils import * +from lib389.topologies import topology_st + +from lib389._constants import DN_DM, PASSWORD, DEFAULT_SUFFIX, DATA_DIR + +pytestmark = pytest.mark.tier2 + +log = logging.getLogger(__name__) + +MYSUFFIX = 'dc=example,dc=com' +MYSUFFIXBE = 'userRoot' +_MYLDIF = 'example1k_posix.ldif' +UIDNUMBERDN = "cn=uidnumber,cn=index,cn=userroot,cn=ldbm database,cn=plugins,cn=config" + + +def runDbVerify(topology_st): + topology_st.standalone.log.info("\n\n +++++ dbverify +++++\n") + sbin_dir = get_sbin_dir() + dbverifyCMD = sbin_dir + "/dbverify -Z " + topology_st.standalone.serverid + " -V" + dbverifyOUT = os.popen(dbverifyCMD, "r") + topology_st.standalone.log.info("Running %s" % dbverifyCMD) + running = True + error = False + while running: + l = dbverifyOUT.readline() + if l == "": + running = False + elif "libdb:" in l: + running = False + error = True + topology_st.standalone.log.info("%s" % l) + elif "verify failed" in l: + error = True + running = False + topology_st.standalone.log.info("%s" % l) + + if error: + topology_st.standalone.log.fatal("dbverify failed") + assert False + else: + topology_st.standalone.log.info("dbverify passed") + + +def reindexUidNumber(topology_st): + topology_st.standalone.log.info("\n\n +++++ reindex uidnumber +++++\n") + try: + args = {TASK_WAIT: True} + topology_st.standalone.tasks.reindex(suffix=MYSUFFIX, attrname='uidNumber', args=args) + except: + topology_st.standalone.log.fatal("Reindexing failed") + assert False + + +def test_ticket48212(topology_st): + """ + Import posixAccount entries. + Index uidNumber + add nsMatchingRule: integerOrderingMatch + run dbverify to see if it reports the db corruption or not + delete nsMatchingRule: integerOrderingMatch + run dbverify to see if it reports the db corruption or not + if no corruption is reported, the bug fix was verified. + """ + log.info( + 'Testing Ticket 48212 - Dynamic nsMatchingRule changes had no effect on the attrinfo thus following reindexing, as well.') + + # bind as directory manager + topology_st.standalone.log.info("Bind as %s" % DN_DM) + topology_st.standalone.simple_bind_s(DN_DM, PASSWORD) + + data_dir_path = topology_st.standalone.getDir(__file__, DATA_DIR) + ldif_file = f"{data_dir_path}ticket48212/{_MYLDIF}" + try: + ldif_dir = topology_st.standalone.get_ldif_dir() + shutil.copy(ldif_file, ldif_dir) + ldif_file = ldif_dir + '/' + _MYLDIF + except: + log.fatal('Failed to copy ldif to instance ldif dir') + assert False + + topology_st.standalone.log.info( + "\n\n######################### Import Test data (%s) ######################\n" % ldif_file) + args = {TASK_WAIT: True} + importTask = Tasks(topology_st.standalone) + importTask.importLDIF(MYSUFFIX, MYSUFFIXBE, ldif_file, args) + args = {TASK_WAIT: True} + + runDbVerify(topology_st) + + topology_st.standalone.log.info("\n\n######################### Add index by uidnumber ######################\n") + try: + topology_st.standalone.add_s(Entry((UIDNUMBERDN, {'objectclass': "top nsIndex".split(), + 'cn': 'uidnumber', + 'nsSystemIndex': 'false', + 'nsIndexType': "pres eq".split()}))) + except ValueError: + topology_st.standalone.log.fatal("add_s failed: %s", ValueError) + + topology_st.standalone.log.info("\n\n######################### reindexing... ######################\n") + reindexUidNumber(topology_st) + + runDbVerify(topology_st) + + topology_st.standalone.log.info("\n\n######################### Add nsMatchingRule ######################\n") + try: + topology_st.standalone.modify_s(UIDNUMBERDN, [(ldap.MOD_ADD, 'nsMatchingRule', b'integerOrderingMatch')]) + except ValueError: + topology_st.standalone.log.fatal("modify_s failed: %s", ValueError) + + topology_st.standalone.log.info("\n\n######################### reindexing... ######################\n") + reindexUidNumber(topology_st) + + runDbVerify(topology_st) + + topology_st.standalone.log.info("\n\n######################### Delete nsMatchingRule ######################\n") + try: + topology_st.standalone.modify_s(UIDNUMBERDN, [(ldap.MOD_DELETE, 'nsMatchingRule', b'integerOrderingMatch')]) + except ValueError: + topology_st.standalone.log.fatal("modify_s failed: %s", ValueError) + + reindexUidNumber(topology_st) + + runDbVerify(topology_st) + + log.info('Testcase PASSED') + + +if __name__ == '__main__': + # Run isolated + # -s for DEBUG mode + CURRENT_FILE = os.path.realpath(__file__) + pytest.main("-s %s" % CURRENT_FILE) diff --git a/dirsrvtests/tests/tickets/ticket48214_test.py b/dirsrvtests/tests/tickets/ticket48214_test.py new file mode 100644 index 0000000..a2e71f5 --- /dev/null +++ b/dirsrvtests/tests/tickets/ticket48214_test.py @@ -0,0 +1,105 @@ +import logging + +import pytest +from lib389.tasks import * +from lib389.topologies import topology_st + +from lib389._constants import DEFAULT_SUFFIX, DN_DM, PASSWORD + +pytestmark = pytest.mark.tier2 + +log = logging.getLogger(__name__) + +MYSUFFIX = 'dc=example,dc=com' +MYSUFFIXBE = 'userRoot' + + +def getMaxBerSizeFromDseLdif(topology_st): + topology_st.standalone.log.info(" +++++ Get maxbersize from dse.ldif +++++\n") + dse_ldif = topology_st.standalone.confdir + '/dse.ldif' + grepMaxBerCMD = "egrep nsslapd-maxbersize " + dse_ldif + topology_st.standalone.log.info(" Run CMD: %s\n" % grepMaxBerCMD) + grepMaxBerOUT = os.popen(grepMaxBerCMD, "r") + running = True + maxbersize = -1 + while running: + l = grepMaxBerOUT.readline() + if l == "": + topology_st.standalone.log.info(" Empty: %s\n" % l) + running = False + elif "nsslapd-maxbersize:" in l.lower(): + running = False + fields = l.split() + if len(fields) >= 2: + maxbersize = fields[1] + topology_st.standalone.log.info(" Right format - %s %s\n" % (fields[0], fields[1])) + else: + topology_st.standalone.log.info(" Wrong format - %s\n" % l) + else: + topology_st.standalone.log.info(" Else?: %s\n" % l) + return maxbersize + + +def checkMaxBerSize(topology_st): + topology_st.standalone.log.info(" +++++ Check Max Ber Size +++++\n") + maxbersizestr = getMaxBerSizeFromDseLdif(topology_st) + maxbersize = int(maxbersizestr) + isdefault = True + defaultvalue = 2097152 + if maxbersize < 0: + topology_st.standalone.log.info(" No nsslapd-maxbersize found in dse.ldif\n") + elif maxbersize == 0: + topology_st.standalone.log.info(" nsslapd-maxbersize: %d\n" % maxbersize) + else: + isdefault = False + topology_st.standalone.log.info(" nsslapd-maxbersize: %d\n" % maxbersize) + + try: + entry = topology_st.standalone.search_s('cn=config', ldap.SCOPE_BASE, + "(cn=*)", + ['nsslapd-maxbersize']) + if entry: + searchedsize = entry[0].getValue('nsslapd-maxbersize') + topology_st.standalone.log.info(" ldapsearch returned nsslapd-maxbersize: %s\n" % searchedsize) + else: + topology_st.standalone.log.fatal('ERROR: cn=config is not found?') + assert False + except ldap.LDAPError as e: + topology_st.standalone.log.error('ERROR: Failed to search for user entry: ' + e.message['desc']) + assert False + + if isdefault: + topology_st.standalone.log.info(" Checking %d vs %d\n" % (int(searchedsize), defaultvalue)) + assert int(searchedsize) == defaultvalue + + +def test_ticket48214_run(topology_st): + """ + Check ldapsearch returns the correct maxbersize when it is not explicitly set. + """ + log.info('Testing Ticket 48214 - ldapsearch on nsslapd-maxbersize returns 0 instead of current value') + + # bind as directory manager + topology_st.standalone.log.info("Bind as %s" % DN_DM) + topology_st.standalone.simple_bind_s(DN_DM, PASSWORD) + + topology_st.standalone.log.info("\n\n######################### Out of Box ######################\n") + checkMaxBerSize(topology_st) + + topology_st.standalone.log.info("\n\n######################### Add nsslapd-maxbersize: 0 ######################\n") + topology_st.standalone.modify_s('cn=config', [(ldap.MOD_REPLACE, 'nsslapd-maxbersize', b'0')]) + checkMaxBerSize(topology_st) + + topology_st.standalone.log.info( + "\n\n######################### Add nsslapd-maxbersize: 10000 ######################\n") + topology_st.standalone.modify_s('cn=config', [(ldap.MOD_REPLACE, 'nsslapd-maxbersize', b'10000')]) + checkMaxBerSize(topology_st) + + topology_st.standalone.log.info("ticket48214 was successfully verified.") + + +if __name__ == '__main__': + # Run isolated + # -s for DEBUG mode + CURRENT_FILE = os.path.realpath(__file__) + pytest.main("-s %s" % CURRENT_FILE) diff --git a/dirsrvtests/tests/tickets/ticket48228_test.py b/dirsrvtests/tests/tickets/ticket48228_test.py new file mode 100644 index 0000000..530cd51 --- /dev/null +++ b/dirsrvtests/tests/tickets/ticket48228_test.py @@ -0,0 +1,274 @@ +# --- BEGIN COPYRIGHT BLOCK --- +# Copyright (C) 2016 Red Hat, Inc. +# All rights reserved. +# +# License: GPL (version 3 or any later version). +# See LICENSE for details. +# --- END COPYRIGHT BLOCK --- +# +import logging +import time + +import pytest +from lib389.tasks import * +from lib389.topologies import topology_st + +from lib389._constants import DEFAULT_SUFFIX, DN_DM, PASSWORD, DN_CONFIG + +pytestmark = pytest.mark.tier2 + +log = logging.getLogger(__name__) + +# Assuming DEFAULT_SUFFIX is "dc=example,dc=com", otherwise it does not work... :( +SUBTREE_CONTAINER = 'cn=nsPwPolicyContainer,' + DEFAULT_SUFFIX +SUBTREE_PWPDN = 'cn=nsPwPolicyEntry,' + DEFAULT_SUFFIX +SUBTREE_PWP = 'cn=cn\3DnsPwPolicyEntry\2Cdc\3Dexample\2Cdc\3Dcom,' + SUBTREE_CONTAINER +SUBTREE_COS_TMPLDN = 'cn=nsPwTemplateEntry,' + DEFAULT_SUFFIX +SUBTREE_COS_TMPL = 'cn=cn\3DnsPwTemplateEntry\2Cdc\3Dexample\2Cdc\3Dcom,' + SUBTREE_CONTAINER +SUBTREE_COS_DEF = 'cn=nsPwPolicy_CoS,' + DEFAULT_SUFFIX + +USER1_DN = 'uid=user1,' + DEFAULT_SUFFIX +USER2_DN = 'uid=user2,' + DEFAULT_SUFFIX + + +def set_global_pwpolicy(topology_st, inhistory): + log.info(" +++++ Enable global password policy +++++\n") + topology_st.standalone.simple_bind_s(DN_DM, PASSWORD) + # Enable password policy + try: + topology_st.standalone.modify_s(DN_CONFIG, [(ldap.MOD_REPLACE, 'nsslapd-pwpolicy-local', b'on')]) + except ldap.LDAPError as e: + log.error('Failed to set pwpolicy-local: error ' + e.message['desc']) + assert False + + log.info(" Set global password history on\n") + try: + topology_st.standalone.modify_s(DN_CONFIG, [(ldap.MOD_REPLACE, 'passwordHistory', b'on')]) + except ldap.LDAPError as e: + log.error('Failed to set passwordHistory: error ' + e.message['desc']) + assert False + + log.info(" Set global passwords in history\n") + try: + count = "%d" % inhistory + topology_st.standalone.modify_s(DN_CONFIG, [(ldap.MOD_REPLACE, 'passwordInHistory', count.encode())]) + except ldap.LDAPError as e: + log.error('Failed to set passwordInHistory: error ' + e.message['desc']) + assert False + time.sleep(1) + + +def set_subtree_pwpolicy(topology_st): + log.info(" +++++ Enable subtree level password policy +++++\n") + topology_st.standalone.simple_bind_s(DN_DM, PASSWORD) + log.info(" Add the container") + try: + topology_st.standalone.add_s(Entry((SUBTREE_CONTAINER, {'objectclass': 'top nsContainer'.split(), + 'cn': 'nsPwPolicyContainer'}))) + except ldap.LDAPError as e: + log.error('Failed to add subtree container: error ' + e.message['desc']) + assert False + + log.info(" Add the password policy subentry {passwordHistory: on, passwordInHistory: 6}") + try: + topology_st.standalone.add_s(Entry((SUBTREE_PWP, {'objectclass': 'top ldapsubentry passwordpolicy'.split(), + 'cn': SUBTREE_PWPDN, + 'passwordMustChange': 'off', + 'passwordExp': 'off', + 'passwordHistory': 'on', + 'passwordInHistory': '6', + 'passwordMinAge': '0', + 'passwordChange': 'on', + 'passwordStorageScheme': 'clear'}))) + except ldap.LDAPError as e: + log.error('Failed to add passwordpolicy: error ' + e.message['desc']) + assert False + + log.info(" Add the COS template") + try: + topology_st.standalone.add_s( + Entry((SUBTREE_COS_TMPL, {'objectclass': 'top ldapsubentry costemplate extensibleObject'.split(), + 'cn': SUBTREE_PWPDN, + 'cosPriority': '1', + 'cn': SUBTREE_COS_TMPLDN, + 'pwdpolicysubentry': SUBTREE_PWP}))) + except ldap.LDAPError as e: + log.error('Failed to add COS template: error ' + e.message['desc']) + assert False + + log.info(" Add the COS definition") + try: + topology_st.standalone.add_s( + Entry((SUBTREE_COS_DEF, {'objectclass': 'top ldapsubentry cosSuperDefinition cosPointerDefinition'.split(), + 'cn': SUBTREE_PWPDN, + 'costemplatedn': SUBTREE_COS_TMPL, + 'cosAttribute': 'pwdpolicysubentry default operational-default'}))) + except ldap.LDAPError as e: + log.error('Failed to add COS def: error ' + e.message['desc']) + assert False + time.sleep(1) + + +def check_passwd_inhistory(topology_st, user, cpw, passwd): + + inhistory = 0 + log.info(" Bind as {%s,%s}" % (user, cpw)) + topology_st.standalone.simple_bind_s(user, cpw) + time.sleep(1) + try: + topology_st.standalone.modify_s(user, [(ldap.MOD_REPLACE, 'userpassword', passwd.encode())]) + except ldap.LDAPError as e: + log.info(' The password ' + passwd + ' of user' + USER1_DN + ' in history: error {0}'.format(e)) + inhistory = 1 + time.sleep(1) + return inhistory + + +def update_passwd(topology_st, user, passwd, times): + # Set the default value + cpw = passwd + for i in range(times): + log.info(" Bind as {%s,%s}" % (user, cpw)) + topology_st.standalone.simple_bind_s(user, cpw) + # Now update the value for this iter. + cpw = 'password%d' % i + try: + topology_st.standalone.modify_s(user, [(ldap.MOD_REPLACE, 'userpassword', cpw.encode())]) + except ldap.LDAPError as e: + log.fatal( + 'test_ticket48228: Failed to update the password ' + cpw + ' of user ' + user + ': error ' + e.message[ + 'desc']) + assert False + + # checking the first password, which is supposed to be in history + inhistory = check_passwd_inhistory(topology_st, user, cpw, passwd) + assert inhistory == 1 + + +def test_ticket48228_test_global_policy(topology_st): + """ + Check global password policy + """ + log.info(' Set inhistory = 6') + set_global_pwpolicy(topology_st, 6) + + log.info(' Bind as directory manager') + log.info("Bind as %s" % DN_DM) + topology_st.standalone.simple_bind_s(DN_DM, PASSWORD) + + log.info(' Add an entry' + USER1_DN) + try: + topology_st.standalone.add_s( + Entry((USER1_DN, {'objectclass': "top person organizationalPerson inetOrgPerson".split(), + 'sn': '1', + 'cn': 'user 1', + 'uid': 'user1', + 'givenname': 'user', + 'mail': 'user1@example.com', + 'userpassword': 'password'}))) + except ldap.LDAPError as e: + log.fatal('test_ticket48228: Failed to add user' + USER1_DN + ': error ' + e.message['desc']) + assert False + + log.info(' Update the password of ' + USER1_DN + ' 6 times') + update_passwd(topology_st, USER1_DN, 'password', 6) + + log.info(' Set inhistory = 4') + set_global_pwpolicy(topology_st, 4) + + log.info(' checking the first password, which is supposed NOT to be in history any more') + cpw = 'password%d' % 5 + tpw = 'password' + inhistory = check_passwd_inhistory(topology_st, USER1_DN, cpw, tpw) + assert inhistory == 0 + + log.info(' checking the second password, which is supposed NOT to be in history any more') + cpw = tpw + tpw = 'password%d' % 0 + inhistory = check_passwd_inhistory(topology_st, USER1_DN, cpw, tpw) + assert inhistory == 0 + + log.info(' checking the third password, which is supposed NOT to be in history any more') + cpw = tpw + tpw = 'password%d' % 1 + inhistory = check_passwd_inhistory(topology_st, USER1_DN, cpw, tpw) + assert inhistory == 0 + + log.info(' checking the sixth password, which is supposed to be in history') + cpw = tpw + tpw = 'password%d' % 5 + inhistory = check_passwd_inhistory(topology_st, USER1_DN, cpw, tpw) + assert inhistory == 1 + + log.info("Global policy was successfully verified.") + + +def text_ticket48228_text_subtree_policy(topology_st): + """ + Check subtree level password policy + """ + + log.info(' Set inhistory = 6') + set_subtree_pwpolicy(topology_st) + + log.info(' Bind as directory manager') + log.info("Bind as %s" % DN_DM) + topology_st.standalone.simple_bind_s(DN_DM, PASSWORD) + + log.info(' Add an entry' + USER2_DN) + try: + topology_st.standalone.add_s( + Entry((USER2_DN, {'objectclass': "top person organizationalPerson inetOrgPerson".split(), + 'sn': '2', + 'cn': 'user 2', + 'uid': 'user2', + 'givenname': 'user', + 'mail': 'user2@example.com', + 'userpassword': 'password'}))) + except ldap.LDAPError as e: + log.fatal('test_ticket48228: Failed to add user' + USER2_DN + ': error ' + e.message['desc']) + assert False + + log.info(' Update the password of ' + USER2_DN + ' 6 times') + update_passwd(topology_st, USER2_DN, 'password', 6) + + log.info(' Set inhistory = 4') + topology_st.standalone.simple_bind_s(DN_DM, PASSWORD) + try: + topology_st.standalone.modify_s(SUBTREE_PWP, [(ldap.MOD_REPLACE, 'passwordInHistory', b'4')]) + except ldap.LDAPError as e: + log.error('Failed to set pwpolicy-local: error ' + e.message['desc']) + assert False + + log.info(' checking the first password, which is supposed NOT to be in history any more') + cpw = 'password%d' % 5 + tpw = 'password' + inhistory = check_passwd_inhistory(topology_st, USER2_DN, cpw, tpw) + assert inhistory == 0 + + log.info(' checking the second password, which is supposed NOT to be in history any more') + cpw = tpw + tpw = 'password%d' % 1 + inhistory = check_passwd_inhistory(topology_st, USER2_DN, cpw, tpw) + assert inhistory == 0 + + log.info(' checking the third password, which is supposed NOT to be in history any more') + cpw = tpw + tpw = 'password%d' % 2 + inhistory = check_passwd_inhistory(topology_st, USER2_DN, cpw, tpw) + assert inhistory == 0 + + log.info(' checking the six password, which is supposed to be in history') + cpw = tpw + tpw = 'password%d' % 5 + inhistory = check_passwd_inhistory(topology_st, USER2_DN, cpw, tpw) + assert inhistory == 1 + + log.info("Subtree level policy was successfully verified.") + + +if __name__ == '__main__': + # Run isolated + # -s for DEBUG mode + CURRENT_FILE = os.path.realpath(__file__) + pytest.main("-s %s" % CURRENT_FILE) diff --git a/dirsrvtests/tests/tickets/ticket48233_test.py b/dirsrvtests/tests/tickets/ticket48233_test.py new file mode 100644 index 0000000..3eee70f --- /dev/null +++ b/dirsrvtests/tests/tickets/ticket48233_test.py @@ -0,0 +1,61 @@ +import pytest +from lib389.utils import * +from lib389.topologies import topology_st + +from lib389._constants import DEFAULT_SUFFIX + +pytestmark = pytest.mark.tier2 + +logging.getLogger(__name__).setLevel(logging.DEBUG) +log = logging.getLogger(__name__) + + +def test_ticket48233(topology_st): + """Test that ACI's that use IP restrictions do not crash the server at + shutdown + """ + + # Add aci to restrict access my ip + aci_text = ('(targetattr != "userPassword")(version 3.0;acl ' + + '"Enable anonymous access - IP"; allow (read,compare,search)' + + '(userdn = "ldap:///anyone") and (ip="127.0.0.1");)') + + try: + topology_st.standalone.modify_s(DEFAULT_SUFFIX, [(ldap.MOD_ADD, 'aci', ensure_bytes(aci_text))]) + except ldap.LDAPError as e: + log.error('Failed to add aci: ({}) error {}'.format(aci_text,e.args[0]['desc'])) + assert False + time.sleep(1) + + # Anonymous search to engage the aci + try: + topology_st.standalone.simple_bind_s("", "") + except ldap.LDAPError as e: + log.error('Failed to anonymously bind -error {}'.format(e.args[0]['desc'])) + assert False + + try: + entries = topology_st.standalone.search_s(DEFAULT_SUFFIX, ldap.SCOPE_SUBTREE, 'objectclass=*') + if not entries: + log.fatal('Failed return an entries from search') + assert False + except ldap.LDAPError as e: + log.fatal('Search failed: ' + e.message['desc']) + assert False + + # Restart the server + topology_st.standalone.restart(timeout=10) + + # Check for crash + if topology_st.standalone.detectDisorderlyShutdown(): + log.fatal('Server crashed!') + assert False + + log.info('Test complete') + + +if __name__ == '__main__': + # Run isolated + # -s for DEBUG mode + CURRENT_FILE = os.path.realpath(__file__) + pytest.main("-s %s" % CURRENT_FILE) diff --git a/dirsrvtests/tests/tickets/ticket48234_test.py b/dirsrvtests/tests/tickets/ticket48234_test.py new file mode 100644 index 0000000..238a2bd --- /dev/null +++ b/dirsrvtests/tests/tickets/ticket48234_test.py @@ -0,0 +1,99 @@ +import pytest +from lib389.tasks import * +from lib389.utils import * +from lib389.topologies import topology_st + +from lib389._constants import DEFAULT_SUFFIX, DN_DM, PASSWORD + +pytestmark = pytest.mark.tier2 + +logging.getLogger(__name__).setLevel(logging.DEBUG) +log = logging.getLogger(__name__) + + +def add_ou_entry(server, name, myparent): + dn = 'ou=%s,%s' % (name, myparent) + server.add_s(Entry((dn, {'objectclass': ['top', 'organizationalunit'], + 'ou': name}))) + + +def add_user_entry(server, name, pw, myparent): + dn = 'cn=%s,%s' % (name, myparent) + server.add_s(Entry((dn, {'objectclass': ['top', 'person'], + 'sn': name, + 'cn': name, + 'telephonenumber': '+1 222 333-4444', + 'userpassword': pw}))) + + +def test_ticket48234(topology_st): + """ + Test aci which contains an extensible filter. + shutdown + """ + + log.info('Bind as root DN') + try: + topology_st.standalone.simple_bind_s(DN_DM, PASSWORD) + except ldap.LDAPError as e: + topology_st.standalone.log.error('Root DN failed to authenticate: ' + e.args[0]['desc']) + assert False + + ouname = 'outest' + username = 'admin' + passwd = 'Password' + deniedattr = 'telephonenumber' + log.info('Add aci which contains extensible filter.') + aci_text = ('(targetattr = "%s")' % (deniedattr) + + '(target = "ldap:///%s")' % (DEFAULT_SUFFIX) + + '(version 3.0;acl "admin-tel-matching-rule-outest";deny (all)' + + '(userdn = "ldap:///%s??sub?(&(cn=%s)(ou:dn:=%s))");)' % (DEFAULT_SUFFIX, username, ouname)) + + try: + topology_st.standalone.modify_s(DEFAULT_SUFFIX, [(ldap.MOD_ADD, 'aci', ensure_bytes(aci_text))]) + except ldap.LDAPError as e: + log.error('Failed to add aci: (%s) error %s' % (aci_text, e.args[0]['desc'])) + assert False + + log.info('Add entries ...') + for idx in range(0, 2): + ou0 = 'OU%d' % idx + log.info('adding %s under %s...' % (ou0, DEFAULT_SUFFIX)) + add_ou_entry(topology_st.standalone, ou0, DEFAULT_SUFFIX) + parent = 'ou=%s,%s' % (ou0, DEFAULT_SUFFIX) + log.info('adding %s under %s...' % (ouname, parent)) + add_ou_entry(topology_st.standalone, ouname, parent) + + for idx in range(0, 2): + parent = 'ou=%s,ou=OU%d,%s' % (ouname, idx, DEFAULT_SUFFIX) + log.info('adding %s under %s...' % (username, parent)) + add_user_entry(topology_st.standalone, username, passwd, parent) + + binddn = 'cn=%s,%s' % (username, parent) + log.info('Bind as user %s' % binddn) + try: + topology_st.standalone.simple_bind_s(binddn, passwd) + except ldap.LDAPError as e: + topology_st.standalone.log.error(bindn + ' failed to authenticate: ' + e.args[0]['desc']) + assert False + + filter = '(cn=%s)' % username + try: + entries = topology_st.standalone.search_s(DEFAULT_SUFFIX, ldap.SCOPE_SUBTREE, filter, [deniedattr, 'dn']) + assert 2 == len(entries) + for idx in range(0, 1): + if entries[idx].hasAttr(deniedattr): + log.fatal('aci with extensible filter failed -- %s') + assert False + except ldap.LDAPError as e: + topology_st.standalone.log.error('Search (%s, %s) failed: ' % (DEFAULT_SUFFIX, filter) + e.args[0]['desc']) + assert False + + log.info('Test complete') + + +if __name__ == '__main__': + # Run isolated + # -s for DEBUG mode + CURRENT_FILE = os.path.realpath(__file__) + pytest.main("-s %s" % CURRENT_FILE) diff --git a/dirsrvtests/tests/tickets/ticket48252_test.py b/dirsrvtests/tests/tickets/ticket48252_test.py new file mode 100644 index 0000000..05419ba --- /dev/null +++ b/dirsrvtests/tests/tickets/ticket48252_test.py @@ -0,0 +1,120 @@ +# --- BEGIN COPYRIGHT BLOCK --- +# Copyright (C) 2016 Red Hat, Inc. +# All rights reserved. +# +# License: GPL (version 3 or any later version). +# See LICENSE for details. +# --- END COPYRIGHT BLOCK --- +# +import logging + +import pytest +from lib389.tasks import * +from lib389.utils import * +from lib389.topologies import topology_st +from lib389.idm.user import UserAccounts + +from lib389._constants import DEFAULT_SUFFIX, SUFFIX, DEFAULT_BENAME, PLUGIN_USN + +pytestmark = pytest.mark.tier2 + +log = logging.getLogger(__name__) + +# Assuming DEFAULT_SUFFIX is "dc=example,dc=com", otherwise it does not work... :( +USER_NUM = 10 +TEST_USER = "test_user" + + +def test_ticket48252_setup(topology_st): + """ + Enable USN plug-in for enabling tombstones + Add test entries + """ + + log.info("Enable the USN plugin...") + try: + topology_st.standalone.plugins.enable(name=PLUGIN_USN) + except e: + log.error("Failed to enable USN Plugin: error " + e.message['desc']) + assert False + + log.info("Adding test entries...") + ua = UserAccounts(topology_st.standalone, DEFAULT_SUFFIX) + for i in range(USER_NUM): + ua.create(properties={ + 'uid': "%s%d" % (TEST_USER, i), + 'cn' : "%s%d" % (TEST_USER, i), + 'sn' : 'user', + 'uidNumber' : '1000', + 'gidNumber' : '2000', + 'homeDirectory' : '/home/testuser' + }) + + +def in_index_file(topology_st, id, index): + key = "%s%s" % (TEST_USER, id) + log.info(" dbscan - checking %s is in index file %s..." % (key, index)) + dbscanOut = topology_st.standalone.dbscan(DEFAULT_BENAME, index) + if ensure_bytes(key) in ensure_bytes(dbscanOut): + found = True + topology_st.standalone.log.info("Found key %s in dbscan output" % key) + else: + found = False + topology_st.standalone.log.info("Did not found key %s in dbscan output" % key) + + return found + + +def test_ticket48252_run_0(topology_st): + """ + Delete an entry cn=test_entry0 + Check it is not in the 'cn' index file + """ + log.info("Case 1 - Check deleted entry is not in the 'cn' index file") + uas = UserAccounts(topology_st.standalone, DEFAULT_SUFFIX) + del_rdn = "uid=%s0" % TEST_USER + del_entry = uas.get('%s0' % TEST_USER) + log.info(" Deleting a test entry %s..." % del_entry) + del_entry.delete() + + assert in_index_file(topology_st, 0, 'cn') is False + log.info(" db2index - reindexing %s ..." % 'cn') + topology_st.standalone.stop() + assert topology_st.standalone.db2index(suffixes=[DEFAULT_SUFFIX], attrs=['cn']) + topology_st.standalone.start() + assert in_index_file(topology_st, 0, 'cn') is False + log.info(" entry %s is not in the cn index file after reindexed." % del_rdn) + log.info('Case 1 - PASSED') + + +def test_ticket48252_run_1(topology_st): + """ + Delete an entry cn=test_entry1 + Check it is in the 'objectclass' index file as a tombstone entry + """ + log.info("Case 2 - Check deleted entry is in the 'objectclass' index file as a tombstone entry") + uas = UserAccounts(topology_st.standalone, DEFAULT_SUFFIX) + del_rdn = "uid=%s1" % TEST_USER + del_entry = uas.get('%s1' % TEST_USER) + log.info(" Deleting a test entry %s..." % del_rdn) + del_entry.delete() + + entry = topology_st.standalone.search_s(SUFFIX, ldap.SCOPE_SUBTREE, '(&(objectclass=nstombstone)(%s))' % del_rdn) + assert len(entry) == 1 + log.info(" entry %s is in the objectclass index file." % del_rdn) + + log.info(" db2index - reindexing %s ..." % 'objectclass') + topology_st.standalone.stop() + assert topology_st.standalone.db2index(suffixes=[DEFAULT_SUFFIX], attrs=['objectclass']) + topology_st.standalone.start() + entry = topology_st.standalone.search_s(SUFFIX, ldap.SCOPE_SUBTREE, '(&(objectclass=nstombstone)(%s))' % del_rdn) + assert len(entry) == 1 + log.info(" entry %s is in the objectclass index file after reindexed." % del_rdn) + log.info('Case 2 - PASSED') + + +if __name__ == '__main__': + # Run isolated + # -s for DEBUG mode + CURRENT_FILE = os.path.realpath(__file__) + pytest.main("-s %s" % CURRENT_FILE) diff --git a/dirsrvtests/tests/tickets/ticket48265_test.py b/dirsrvtests/tests/tickets/ticket48265_test.py new file mode 100644 index 0000000..1652b7a --- /dev/null +++ b/dirsrvtests/tests/tickets/ticket48265_test.py @@ -0,0 +1,76 @@ +# --- BEGIN COPYRIGHT BLOCK --- +# Copyright (C) 2016 Red Hat, Inc. +# All rights reserved. +# +# License: GPL (version 3 or any later version). +# See LICENSE for details. +# --- END COPYRIGHT BLOCK --- +# +import pytest +from lib389.tasks import * +from lib389.utils import * +from lib389.topologies import topology_st + +from lib389._constants import SUFFIX, DEFAULT_SUFFIX + +pytestmark = pytest.mark.tier2 + +logging.getLogger(__name__).setLevel(logging.DEBUG) +log = logging.getLogger(__name__) + +USER_NUM = 20 +TEST_USER = 'test_user' + + +def test_ticket48265_test(topology_st): + """ + Complex filter issues + Ticket 47521 type complex filter: + (&(|(uid=tuser*)(cn=Test user*))(&(givenname=test*3))(mail=tuser@example.com)(&(description=*))) + Ticket 48264 type complex filter: + (&(&(|(l=EU)(l=AP)(l=NA))(|(c=SE)(c=DE)))(|(uid=*test*)(cn=*test*))(l=eu)) + """ + + log.info("Adding %d test entries..." % USER_NUM) + for id in range(USER_NUM): + name = "%s%d" % (TEST_USER, id) + mail = "%s@example.com" % name + secretary = "cn=%s,ou=secretary,%s" % (name, SUFFIX) + topology_st.standalone.add_s(Entry(("cn=%s,%s" % (name, SUFFIX), { + 'objectclass': "top person organizationalPerson inetOrgPerson".split(), + 'sn': name, + 'cn': name, + 'uid': name, + 'givenname': 'test', + 'mail': mail, + 'description': 'description', + 'secretary': secretary, + 'l': 'MV', + 'title': 'Engineer'}))) + + log.info("Search with Ticket 47521 type complex filter") + for id in range(USER_NUM): + name = "%s%d" % (TEST_USER, id) + mail = "%s@example.com" % name + filter47521 = '(&(|(uid=%s*)(cn=%s*))(&(givenname=test))(mail=%s)(&(description=*)))' % ( + TEST_USER, TEST_USER, mail) + entry = topology_st.standalone.search_s(SUFFIX, ldap.SCOPE_SUBTREE, filter47521) + assert len(entry) == 1 + + log.info("Search with Ticket 48265 type complex filter") + for id in range(USER_NUM): + name = "%s%d" % (TEST_USER, id) + mail = "%s@example.com" % name + filter48265 = '(&(&(|(l=AA)(l=BB)(l=MV))(|(title=admin)(title=engineer)))(|(uid=%s)(mail=%s))(description=description))' % ( + name, mail) + entry = topology_st.standalone.search_s(SUFFIX, ldap.SCOPE_SUBTREE, filter48265) + assert len(entry) == 1 + + log.info('Test 48265 complete\n') + + +if __name__ == '__main__': + # Run isolated + # -s for DEBUG mode + CURRENT_FILE = os.path.realpath(__file__) + pytest.main("-s %s" % CURRENT_FILE) diff --git a/dirsrvtests/tests/tickets/ticket48266_test.py b/dirsrvtests/tests/tickets/ticket48266_test.py new file mode 100644 index 0000000..e0ed9de --- /dev/null +++ b/dirsrvtests/tests/tickets/ticket48266_test.py @@ -0,0 +1,280 @@ +import pytest +from lib389.tasks import * +from lib389.utils import * +from lib389.topologies import topology_m2 +from lib389.replica import ReplicationManager + +from lib389._constants import SUFFIX, DEFAULT_SUFFIX, HOST_MASTER_2, PORT_MASTER_2 + +pytestmark = pytest.mark.tier2 + +logging.getLogger(__name__).setLevel(logging.DEBUG) +log = logging.getLogger(__name__) + +NEW_ACCOUNT = "new_account" +MAX_ACCOUNTS = 20 + + +@pytest.fixture(scope="module") +def entries(topology_m2): + # add dummy entries in the staging DIT + for cpt in range(MAX_ACCOUNTS): + name = "%s%d" % (NEW_ACCOUNT, cpt) + topology_m2.ms["master1"].add_s(Entry(("cn=%s,%s" % (name, SUFFIX), { + 'objectclass': "top person".split(), + 'sn': name, + 'cn': name}))) + topology_m2.ms["master1"].config.set('nsslapd-accesslog-logbuffering', 'off') + topology_m2.ms["master1"].config.set('nsslapd-errorlog-level', '8192') + # 256 + 4 + topology_m2.ms["master1"].config.set('nsslapd-accesslog-level', '260') + + topology_m2.ms["master2"].config.set('nsslapd-accesslog-logbuffering', 'off') + topology_m2.ms["master2"].config.set('nsslapd-errorlog-level', '8192') + # 256 + 4 + topology_m2.ms["master2"].config.set('nsslapd-accesslog-level', '260') + + +def test_ticket48266_fractional(topology_m2, entries): + ents = topology_m2.ms["master1"].agreement.list(suffix=SUFFIX) + assert len(ents) == 1 + + mod = [(ldap.MOD_REPLACE, 'nsDS5ReplicatedAttributeList', [b'(objectclass=*) $ EXCLUDE telephonenumber']), + (ldap.MOD_REPLACE, 'nsds5ReplicaStripAttrs', [b'modifiersname modifytimestamp'])] + ents = topology_m2.ms["master1"].agreement.list(suffix=SUFFIX) + assert len(ents) == 1 + m1_m2_agmt = ents[0].dn + topology_m2.ms["master1"].modify_s(ents[0].dn, mod) + + ents = topology_m2.ms["master2"].agreement.list(suffix=SUFFIX) + assert len(ents) == 1 + topology_m2.ms["master2"].modify_s(ents[0].dn, mod) + + topology_m2.ms["master1"].restart() + topology_m2.ms["master2"].restart() + + repl = ReplicationManager(DEFAULT_SUFFIX) + repl.ensure_agreement(topology_m2.ms["master1"], topology_m2.ms["master2"]) + repl.test_replication(topology_m2.ms["master1"], topology_m2.ms["master2"]) + + +def test_ticket48266_check_repl_desc(topology_m2, entries): + name = "cn=%s1,%s" % (NEW_ACCOUNT, SUFFIX) + value = 'check repl. description' + mod = [(ldap.MOD_REPLACE, 'description', ensure_bytes(value))] + topology_m2.ms["master1"].modify_s(name, mod) + + loop = 0 + while loop <= 10: + ent = topology_m2.ms["master2"].getEntry(name, ldap.SCOPE_BASE, "(objectclass=*)") + if ent.hasAttr('description') and ent.getValue('description') == ensure_bytes(value): + break + time.sleep(1) + loop += 1 + assert loop <= 10 + + +# will use this CSN as a starting point on error log +# after this is one 'Skipped' then the first csn _get_first_not_replicated_csn +# should no longer be Skipped in the error log +def _get_last_not_replicated_csn(topology_m2): + name = "cn=%s5,%s" % (NEW_ACCOUNT, SUFFIX) + + # read the first CSN that will not be replicated + mod = [(ldap.MOD_REPLACE, 'telephonenumber', ensure_bytes('123456'))] + topology_m2.ms["master1"].modify_s(name, mod) + msgid = topology_m2.ms["master1"].search_ext(name, ldap.SCOPE_SUBTREE, 'objectclass=*', ['nscpentrywsi']) + rtype, rdata, rmsgid = topology_m2.ms["master1"].result2(msgid) + attrs = None + for dn, raw_attrs in rdata: + topology_m2.ms["master1"].log.info("dn: %s" % dn) + if 'nscpentrywsi' in raw_attrs: + attrs = raw_attrs['nscpentrywsi'] + assert attrs + for attr in attrs: + if ensure_str(attr.lower()).startswith('telephonenumber'): + break + assert attr + + log.info("############# %s " % name) + # now retrieve the CSN of the operation we are looking for + csn = None + found_ops = topology_m2.ms['master1'].ds_access_log.match(".*MOD dn=\"%s\".*" % name) + assert(len(found_ops) > 0) + found_op = topology_m2.ms['master1'].ds_access_log.parse_line(found_ops[-1]) + log.info(found_op) + + # Now look for the related CSN + found_csns = topology_m2.ms['master1'].ds_access_log.match(".*conn=%s op=%s RESULT.*" % (found_op['conn'], found_op['op'])) + assert(len(found_csns) > 0) + found_csn = topology_m2.ms['master1'].ds_access_log.parse_line(found_csns[-1]) + log.info(found_csn) + return found_csn['csn'] + + +def _get_first_not_replicated_csn(topology_m2): + name = "cn=%s2,%s" % (NEW_ACCOUNT, SUFFIX) + + # read the first CSN that will not be replicated + mod = [(ldap.MOD_REPLACE, 'telephonenumber', ensure_bytes('123456'))] + topology_m2.ms["master1"].modify_s(name, mod) + msgid = topology_m2.ms["master1"].search_ext(name, ldap.SCOPE_SUBTREE, 'objectclass=*', ['nscpentrywsi']) + rtype, rdata, rmsgid = topology_m2.ms["master1"].result2(msgid) + attrs = None + for dn, raw_attrs in rdata: + topology_m2.ms["master1"].log.info("dn: %s" % dn) + if 'nscpentrywsi' in raw_attrs: + attrs = raw_attrs['nscpentrywsi'] + assert attrs + for attr in attrs: + if ensure_str(attr.lower()).startswith('telephonenumber'): + break + assert attr + + log.info("############# %s " % name) + # now retrieve the CSN of the operation we are looking for + csn = None + found_ops = topology_m2.ms['master1'].ds_access_log.match(".*MOD dn=\"%s\".*" % name) + assert(len(found_ops) > 0) + found_op = topology_m2.ms['master1'].ds_access_log.parse_line(found_ops[-1]) + log.info(found_op) + + # Now look for the related CSN + found_csns = topology_m2.ms['master1'].ds_access_log.match(".*conn=%s op=%s RESULT.*" % (found_op['conn'], found_op['op'])) + assert(len(found_csns) > 0) + found_csn = topology_m2.ms['master1'].ds_access_log.parse_line(found_csns[-1]) + log.info(found_csn) + return found_csn['csn'] + + +def _count_full_session(topology_m2): + # + # compute the number of 'No more updates' + # + file_obj = open(topology_m2.ms["master1"].errlog, "r") + # pattern to find + pattern = ".*No more updates to send.*" + regex = re.compile(pattern) + no_more_updates = 0 + + # check initiation number of 'No more updates + while True: + line = file_obj.readline() + found = regex.search(line) + if (found): + no_more_updates = no_more_updates + 1 + if (line == ''): + break + file_obj.close() + + return no_more_updates + + +def test_ticket48266_count_csn_evaluation(topology_m2, entries): + ents = topology_m2.ms["master1"].agreement.list(suffix=SUFFIX) + assert len(ents) == 1 + first_csn = _get_first_not_replicated_csn(topology_m2) + name = "cn=%s3,%s" % (NEW_ACCOUNT, SUFFIX) + NB_SESSION = 102 + + no_more_update_cnt = _count_full_session(topology_m2) + topology_m2.ms["master1"].agreement.pause(ents[0].dn) + # now do a set of updates that will NOT be replicated + for telNumber in range(NB_SESSION): + mod = [(ldap.MOD_REPLACE, 'telephonenumber', ensure_bytes(str(telNumber)))] + topology_m2.ms["master1"].modify_s(name, mod) + + topology_m2.ms["master1"].agreement.resume(ents[0].dn) + + # let's wait all replication session complete + MAX_LOOP = 10 + cnt = 0 + current_no_more_update = _count_full_session(topology_m2) + while (current_no_more_update == no_more_update_cnt): + cnt = cnt + 1 + if (cnt > MAX_LOOP): + break + time.sleep(5) + current_no_more_update = _count_full_session(topology_m2) + + log.info('after %d MODs we have completed %d replication sessions' % ( + NB_SESSION, (current_no_more_update - no_more_update_cnt))) + no_more_update_cnt = current_no_more_update + + # At this point, with the fix a dummy update was made BUT may be not sent it + # make sure it was sent so that the consumer CSN will be updated + last_csn = _get_last_not_replicated_csn(topology_m2) + + # let's wait all replication session complete + MAX_LOOP = 10 + cnt = 0 + current_no_more_update = _count_full_session(topology_m2) + while (current_no_more_update == no_more_update_cnt): + cnt = cnt + 1 + if (cnt > MAX_LOOP): + break + time.sleep(5) + current_no_more_update = _count_full_session(topology_m2) + + log.info('This MODs %s triggered the send of the dummy update completed %d replication sessions' % ( + last_csn, (current_no_more_update - no_more_update_cnt))) + no_more_update_cnt = current_no_more_update + + # so we should no longer see the first_csn in the log + # Let's create a new csn (last_csn) and check there is no longer first_csn + topology_m2.ms["master1"].agreement.pause(ents[0].dn) + last_csn = _get_last_not_replicated_csn(topology_m2) + topology_m2.ms["master1"].agreement.resume(ents[0].dn) + + # let's wait for the session to complete + MAX_LOOP = 10 + cnt = 0 + while (current_no_more_update == no_more_update_cnt): + cnt = cnt + 1 + if (cnt > MAX_LOOP): + break + time.sleep(5) + current_no_more_update = _count_full_session(topology_m2) + + log.info('This MODs %s completed in %d replication sessions, should be sent without evaluating %s' % ( + last_csn, (current_no_more_update - no_more_update_cnt), first_csn)) + no_more_update_cnt = current_no_more_update + + # Now determine how many times we have skipped 'csn' + # no need to stop the server to check the error log + file_obj = open(topology_m2.ms["master1"].errlog, "r") + + # find where the last_csn operation was processed + pattern = ".*ruv_add_csn_inprogress: successfully inserted csn %s.*" % last_csn + regex = re.compile(pattern) + cnt = 0 + + while True: + line = file_obj.readline() + found = regex.search(line) + if ((line == '') or (found)): + break + if (found): + log.info('last operation was found at %d' % file_obj.tell()) + log.info(line) + log.info('Now check the we can not find the first csn %s in the log' % first_csn) + + pattern = ".*Skipping update operation.*CSN %s.*" % first_csn + regex = re.compile(pattern) + found = False + while True: + line = file_obj.readline() + found = regex.search(line) + if ((line == '') or (found)): + break + if (found): + log.info('Unexpected found %s' % line) + assert not found + + +if __name__ == '__main__': + # Run isolated + # -s for DEBUG mode + + CURRENT_FILE = os.path.realpath(__file__) + pytest.main("-s %s" % CURRENT_FILE) diff --git a/dirsrvtests/tests/tickets/ticket48270_test.py b/dirsrvtests/tests/tickets/ticket48270_test.py new file mode 100644 index 0000000..3dcbfbf --- /dev/null +++ b/dirsrvtests/tests/tickets/ticket48270_test.py @@ -0,0 +1,118 @@ +import pytest +from lib389.tasks import * +from lib389.utils import * +from lib389.topologies import topology_st + +from lib389._constants import SUFFIX, DEFAULT_SUFFIX + +pytestmark = pytest.mark.tier2 + +logging.getLogger(__name__).setLevel(logging.DEBUG) +log = logging.getLogger(__name__) + +NEW_ACCOUNT = "new_account" +MAX_ACCOUNTS = 20 + +MIXED_VALUE = "/home/mYhOmEdIrEcToRy" +LOWER_VALUE = "/home/myhomedirectory" +HOMEDIRECTORY_INDEX = 'cn=homeDirectory,cn=index,cn=userRoot,cn=ldbm database,cn=plugins,cn=config' +HOMEDIRECTORY_CN = "homedirectory" +MATCHINGRULE = 'nsMatchingRule' +UIDNUMBER_INDEX = 'cn=uidnumber,cn=index,cn=userRoot,cn=ldbm database,cn=plugins,cn=config' +UIDNUMBER_CN = "uidnumber" + + +def test_ticket48270_init(topology_st): + log.info("Initialization: add dummy entries for the tests") + for cpt in range(MAX_ACCOUNTS): + name = "%s%d" % (NEW_ACCOUNT, cpt) + topology_st.standalone.add_s(Entry(("uid=%s,%s" % (name, SUFFIX), { + 'objectclass': "top posixAccount".split(), + 'uid': name, + 'cn': name, + 'uidnumber': str(111), + 'gidnumber': str(222), + 'homedirectory': "/home/tbordaz_%d" % cpt}))) + + +def test_ticket48270_homeDirectory_indexed_cis(topology_st): + log.info("\n\nindex homeDirectory in caseIgnoreIA5Match and caseExactIA5Match") + try: + ent = topology_st.standalone.getEntry(HOMEDIRECTORY_INDEX, ldap.SCOPE_BASE) + except ldap.NO_SUCH_OBJECT: + topology_st.standalone.add_s(Entry((HOMEDIRECTORY_INDEX, { + 'objectclass': "top nsIndex".split(), + 'cn': HOMEDIRECTORY_CN, + 'nsSystemIndex': 'false', + 'nsIndexType': 'eq'}))) + # log.info("attach debugger") + # time.sleep(60) + + IGNORE_MR_NAME = b'caseIgnoreIA5Match' + EXACT_MR_NAME = b'caseExactIA5Match' + mod = [(ldap.MOD_REPLACE, MATCHINGRULE, (IGNORE_MR_NAME, EXACT_MR_NAME))] + topology_st.standalone.modify_s(HOMEDIRECTORY_INDEX, mod) + + # topology_st.standalone.stop(timeout=10) + log.info("successfully checked that filter with exact mr , a filter with lowercase eq is failing") + # assert topology_st.standalone.db2index(bename=DEFAULT_BENAME, suffixes=None, attrs=['homeDirectory']) + # topology_st.standalone.start(timeout=10) + args = {TASK_WAIT: True} + topology_st.standalone.tasks.reindex(suffix=SUFFIX, attrname='homeDirectory', args=args) + + log.info("Check indexing succeeded with a specified matching rule") + file_obj = open(topology_st.standalone.errlog, "r") + + # Check if the MR configuration failure occurs + regex = re.compile("unknown or invalid matching rule") + while True: + line = file_obj.readline() + found = regex.search(line) + if ((line == '') or (found)): + break + + if (found): + log.info("The configuration of a specific MR fails") + log.info(line) + # assert not found + + +def test_ticket48270_homeDirectory_mixed_value(topology_st): + # Set a homedirectory value with mixed case + name = "uid=%s1,%s" % (NEW_ACCOUNT, SUFFIX) + mod = [(ldap.MOD_REPLACE, 'homeDirectory', ensure_bytes(MIXED_VALUE))] + topology_st.standalone.modify_s(name, mod) + + +def test_ticket48270_extensible_search(topology_st): + name = "uid=%s1,%s" % (NEW_ACCOUNT, SUFFIX) + + # check with the exact stored value + log.info("Default: can retrieve an entry filter syntax with exact stored value") + ent = topology_st.standalone.getEntry(name, ldap.SCOPE_BASE, "(homeDirectory=%s)" % MIXED_VALUE) + log.info("Default: can retrieve an entry filter caseExactIA5Match with exact stored value") + ent = topology_st.standalone.getEntry(name, ldap.SCOPE_BASE, "(homeDirectory:caseExactIA5Match:=%s)" % MIXED_VALUE) + + # check with a lower case value that is different from the stored value + log.info("Default: can not retrieve an entry filter syntax match with lowered stored value") + try: + ent = topology_st.standalone.getEntry(name, ldap.SCOPE_BASE, "(homeDirectory=%s)" % LOWER_VALUE) + assert ent is None + except ldap.NO_SUCH_OBJECT: + pass + log.info("Default: can not retrieve an entry filter caseExactIA5Match with lowered stored value") + try: + ent = topology_st.standalone.getEntry(name, ldap.SCOPE_BASE, + "(homeDirectory:caseExactIA5Match:=%s)" % LOWER_VALUE) + assert ent is None + except ldap.NO_SUCH_OBJECT: + pass + log.info("Default: can retrieve an entry filter caseIgnoreIA5Match with lowered stored value") + ent = topology_st.standalone.getEntry(name, ldap.SCOPE_BASE, "(homeDirectory:caseIgnoreIA5Match:=%s)" % LOWER_VALUE) + + +if __name__ == '__main__': + # Run isolated + # -s for DEBUG mode + CURRENT_FILE = os.path.realpath(__file__) + pytest.main("-s %s" % CURRENT_FILE) diff --git a/dirsrvtests/tests/tickets/ticket48272_test.py b/dirsrvtests/tests/tickets/ticket48272_test.py new file mode 100644 index 0000000..5d79d28 --- /dev/null +++ b/dirsrvtests/tests/tickets/ticket48272_test.py @@ -0,0 +1,136 @@ +import pytest +from lib389.tasks import * +from lib389.utils import * +from lib389.topologies import topology_st + +from lib389._constants import DEFAULT_SUFFIX, HOST_STANDALONE, PORT_STANDALONE + +DEBUGGING = os.getenv('DEBUGGING', False) + +if DEBUGGING: + logging.getLogger(__name__).setLevel(logging.DEBUG) +else: + logging.getLogger(__name__).setLevel(logging.INFO) + +log = logging.getLogger(__name__) +# Skip on older versions +pytestmark = [pytest.mark.tier2, + pytest.mark.skipif(ds_is_older('1.3.6'), reason="Not implemented")] + + +USER1 = 'user1' +USER1_DOMAIN = 'user1@example.com' +PW = 'password' +USER1_DN = 'uid=user1,ou=People,%s' % DEFAULT_SUFFIX +USER1_CONFLICT_DN = 'uid=user1,%s' % DEFAULT_SUFFIX + + +def _create_user(inst, name, dn): + inst.add_s(Entry(( + dn, { + 'objectClass': 'top account simplesecurityobject'.split(), + 'uid': name, + 'userpassword': PW + }))) + + +def _bind(name, cred): + # Returns true or false if it worked. + if DEBUGGING: + print('test 48272 BINDING AS %s:%s' % (name, cred)) + status = True + conn = ldap.initialize("ldap://%s:%s" % (HOST_STANDALONE, PORT_STANDALONE)) + try: + conn.simple_bind_s(name, cred) + conn.unbind_s() + except ldap.INVALID_CREDENTIALS: + status = False + return status + + +def test_ticket48272(topology_st): + """ + Test the functionality of the addn bind plugin. This should allow users + of the type "name" or "name@domain.com" to bind. + """ + + # There will be a better way to do this in the future. + topology_st.standalone.add_s(Entry(( + "cn=addn,cn=plugins,cn=config", { + "objectClass": "top nsSlapdPlugin extensibleObject".split(), + "cn": "addn", + "nsslapd-pluginPath": "libaddn-plugin", + "nsslapd-pluginInitfunc": "addn_init", + "nsslapd-pluginType": "preoperation", + "nsslapd-pluginEnabled": "on", + "nsslapd-pluginId": "addn", + "nsslapd-pluginVendor": "389 Project", + "nsslapd-pluginVersion": "1.3.6.0", + "nsslapd-pluginDescription": "Allow AD DN style bind names to LDAP", + "addn_default_domain": "example.com", + } + ))) + + topology_st.standalone.add_s(Entry(( + "cn=example.com,cn=addn,cn=plugins,cn=config", { + "objectClass": "top extensibleObject".split(), + "cn": "example.com", + "addn_base": "ou=People,%s" % DEFAULT_SUFFIX, + "addn_filter": "(&(objectClass=account)(uid=%s))", + } + ))) + + topology_st.standalone.restart(60) + + # Add a user + _create_user(topology_st.standalone, USER1, USER1_DN) + + if DEBUGGING is not False: + print("Attach now") + time.sleep(20) + + # Make sure our binds still work. + assert (_bind(USER1_DN, PW)) + # Test an anonymous bind + for i in range(0, 10): + # Test bind as name + assert (_bind(USER1, PW)) + + # Make sure that name@fakedom fails + assert (_bind(USER1_DOMAIN, PW)) + + # Add a conflicting user to an alternate subtree + _create_user(topology_st.standalone, USER1, USER1_CONFLICT_DN) + # Change the plugin to search from the rootdn instead + # This means we have a conflicting user in scope now! + + topology_st.standalone.modify_s("cn=example.com,cn=addn,cn=plugins,cn=config", + [(ldap.MOD_REPLACE, 'addn_base', ensure_bytes(DEFAULT_SUFFIX))]) + topology_st.standalone.restart(60) + + # Make sure our binds still work. + assert (_bind(USER1_DN, PW)) + assert (_bind(USER1_CONFLICT_DN, PW)) + for i in range(0, 10): + + # Test bind as name fails + try: + _bind(USER1, PW) + assert (False) + except: + pass + # Test bind as name@domain fails too + try: + _bind(USER1_DOMAIN, PW) + assert (False) + except: + pass + + log.info('Test PASSED') + + +if __name__ == '__main__': + # Run isolated + # -s for DEBUG mode + CURRENT_FILE = os.path.realpath(__file__) + pytest.main("-s %s" % CURRENT_FILE) diff --git a/dirsrvtests/tests/tickets/ticket48294_test.py b/dirsrvtests/tests/tickets/ticket48294_test.py new file mode 100644 index 0000000..73df896 --- /dev/null +++ b/dirsrvtests/tests/tickets/ticket48294_test.py @@ -0,0 +1,220 @@ +# --- BEGIN COPYRIGHT BLOCK --- +# Copyright (C) 2016 Red Hat, Inc. +# All rights reserved. +# +# License: GPL (version 3 or any later version). +# See LICENSE for details. +# --- END COPYRIGHT BLOCK --- +# +import logging +import time + +import ldap +import pytest +from lib389 import Entry +from lib389._constants import * +from lib389.topologies import topology_st +from lib389.utils import * + +pytestmark = pytest.mark.tier2 + +log = logging.getLogger(__name__) + +LINKEDATTR_PLUGIN = 'cn=Linked Attributes,cn=plugins,cn=config' +MANAGER_LINK = 'cn=Manager Link,' + LINKEDATTR_PLUGIN +OU_PEOPLE = 'ou=People,' + DEFAULT_SUFFIX +LINKTYPE = 'directReport' +MANAGEDTYPE = 'manager' + + +def _header(topology_st, label): + topology_st.standalone.log.info("###############################################") + topology_st.standalone.log.info("####### %s" % label) + topology_st.standalone.log.info("###############################################") + + +def check_attr_val(topology_st, dn, attr, expected): + try: + centry = topology_st.standalone.search_s(dn, ldap.SCOPE_BASE, 'uid=*') + if centry: + val = centry[0].getValue(attr) + if val.lower() == expected.lower(): + log.info('Value of %s is %s' % (attr, expected)) + else: + log.info('Value of %s is not %s, but %s' % (attr, expected, val)) + assert False + else: + log.fatal('Failed to get %s' % dn) + assert False + except ldap.LDAPError as e: + log.fatal('Failed to search ' + dn + ': ' + e.args[0]['desc']) + assert False + + +def _modrdn_entry(topology_st=None, entry_dn=None, new_rdn=None, del_old=0, new_superior=None): + assert topology_st is not None + assert entry_dn is not None + assert new_rdn is not None + + topology_st.standalone.log.info("\n\n######################### MODRDN %s ######################\n" % new_rdn) + try: + if new_superior: + topology_st.standalone.rename_s(entry_dn, new_rdn, newsuperior=new_superior, delold=del_old) + else: + topology_st.standalone.rename_s(entry_dn, new_rdn, delold=del_old) + except ldap.NO_SUCH_ATTRIBUTE: + topology_st.standalone.log.info("accepted failure due to 47833: modrdn reports error.. but succeeds") + attempt = 0 + if new_superior: + dn = "%s,%s" % (new_rdn, new_superior) + base = new_superior + else: + base = ','.join(entry_dn.split(",")[1:]) + dn = "%s, %s" % (new_rdn, base) + myfilter = entry_dn.split(',')[0] + + while attempt < 10: + try: + ent = topology_st.standalone.getEntry(dn, ldap.SCOPE_BASE, myfilter) + break + except ldap.NO_SUCH_OBJECT: + topology_st.standalone.log.info("Accept failure due to 47833: unable to find (base) a modrdn entry") + attempt += 1 + time.sleep(1) + if attempt == 10: + ent = topology_st.standalone.getEntry(base, ldap.SCOPE_SUBTREE, myfilter) + ent = topology_st.standalone.getEntry(dn, ldap.SCOPE_BASE, myfilter) + + +def test_48294_init(topology_st): + """ + Set up Linked Attribute + """ + _header(topology_st, + 'Testing Ticket 48294 - Linked Attributes plug-in - won\'t update links after MODRDN operation') + + log.info('Enable Dynamic plugins, and the linked Attrs plugin') + try: + topology_st.standalone.modify_s(DN_CONFIG, [(ldap.MOD_REPLACE, 'nsslapd-dynamic-plugins', b'on')]) + except ldap.LDAPError as e: + log.fatal('Failed to enable dynamic plugin!' + e.args[0]['desc']) + assert False + + try: + topology_st.standalone.plugins.enable(name=PLUGIN_LINKED_ATTRS) + except ValueError as e: + log.fatal('Failed to enable linked attributes plugin!' + e.args[0]['desc']) + assert False + + log.info('Add the plugin config entry') + try: + topology_st.standalone.add_s(Entry((MANAGER_LINK, { + 'objectclass': 'top extensibleObject'.split(), + 'cn': 'Manager Link', + 'linkType': LINKTYPE, + 'managedType': MANAGEDTYPE + }))) + except ldap.LDAPError as e: + log.fatal('Failed to add linked attr config entry: error ' + e.args[0]['desc']) + assert False + + log.info('Add 2 entries: manager1 and employee1') + try: + topology_st.standalone.add_s(Entry(('uid=manager1,%s' % OU_PEOPLE, { + 'objectclass': 'top extensibleObject'.split(), + 'uid': 'manager1'}))) + except ldap.LDAPError as e: + log.fatal('Add manager1 failed: error ' + e.args[0]['desc']) + assert False + + try: + topology_st.standalone.add_s(Entry(('uid=employee1,%s' % OU_PEOPLE, { + 'objectclass': 'top extensibleObject'.split(), + 'uid': 'employee1'}))) + except ldap.LDAPError as e: + log.fatal('Add employee1 failed: error ' + e.args[0]['desc']) + assert False + + log.info('Add linktype to manager1') + topology_st.standalone.modify_s('uid=manager1,%s' % OU_PEOPLE, + [(ldap.MOD_ADD, LINKTYPE, ensure_bytes('uid=employee1,%s' % OU_PEOPLE))]) + + log.info('Check managed attribute') + check_attr_val(topology_st, 'uid=employee1,%s' % OU_PEOPLE, MANAGEDTYPE, ensure_bytes('uid=manager1,%s' % OU_PEOPLE)) + + log.info('PASSED') + + +def test_48294_run_0(topology_st): + """ + Rename employee1 to employee2 and adjust the value of directReport by replace + """ + _header(topology_st, 'Case 0 - Rename employee1 and adjust the link type value by replace') + + log.info('Rename employee1 to employee2') + _modrdn_entry(topology_st, entry_dn='uid=employee1,%s' % OU_PEOPLE, new_rdn='uid=employee2') + + log.info('Modify the value of directReport to uid=employee2') + try: + topology_st.standalone.modify_s('uid=manager1,%s' % OU_PEOPLE, + [(ldap.MOD_REPLACE, LINKTYPE, ensure_bytes('uid=employee2,%s' % OU_PEOPLE))]) + except ldap.LDAPError as e: + log.fatal('Failed to replace uid=employee1 with employee2: ' + e.args[0]['desc']) + assert False + + log.info('Check managed attribute') + check_attr_val(topology_st, 'uid=employee2,%s' % OU_PEOPLE, MANAGEDTYPE, ensure_bytes('uid=manager1,%s' % OU_PEOPLE)) + + log.info('PASSED') + + +def test_48294_run_1(topology_st): + """ + Rename employee2 to employee3 and adjust the value of directReport by delete and add + """ + _header(topology_st, 'Case 1 - Rename employee2 and adjust the link type value by delete and add') + + log.info('Rename employee2 to employee3') + _modrdn_entry(topology_st, entry_dn='uid=employee2,%s' % OU_PEOPLE, new_rdn='uid=employee3') + + log.info('Modify the value of directReport to uid=employee3') + try: + topology_st.standalone.modify_s('uid=manager1,%s' % OU_PEOPLE, + [(ldap.MOD_DELETE, LINKTYPE, ensure_bytes('uid=employee2,%s' % OU_PEOPLE))]) + except ldap.LDAPError as e: + log.fatal('Failed to delete employee2: ' + e.args[0]['desc']) + assert False + + try: + topology_st.standalone.modify_s('uid=manager1,%s' % OU_PEOPLE, + [(ldap.MOD_ADD, LINKTYPE, ensure_bytes('uid=employee3,%s' % OU_PEOPLE))]) + except ldap.LDAPError as e: + log.fatal('Failed to add employee3: ' + e.args[0]['desc']) + assert False + + log.info('Check managed attribute') + check_attr_val(topology_st, 'uid=employee3,%s' % OU_PEOPLE, MANAGEDTYPE, ensure_bytes('uid=manager1,%s' % OU_PEOPLE)) + + log.info('PASSED') + + +def test_48294_run_2(topology_st): + """ + Rename manager1 to manager2 and make sure the managed attribute value is updated + """ + _header(topology_st, 'Case 2 - Rename manager1 to manager2 and make sure the managed attribute value is updated') + + log.info('Rename manager1 to manager2') + _modrdn_entry(topology_st, entry_dn='uid=manager1,%s' % OU_PEOPLE, new_rdn='uid=manager2') + + log.info('Check managed attribute') + check_attr_val(topology_st, 'uid=employee3,%s' % OU_PEOPLE, MANAGEDTYPE, ensure_bytes('uid=manager2,%s' % OU_PEOPLE)) + + log.info('PASSED') + + +if __name__ == '__main__': + # Run isolated + # -s for DEBUG mode + CURRENT_FILE = os.path.realpath(__file__) + pytest.main("-s %s" % CURRENT_FILE) diff --git a/dirsrvtests/tests/tickets/ticket48295_test.py b/dirsrvtests/tests/tickets/ticket48295_test.py new file mode 100644 index 0000000..c175b21 --- /dev/null +++ b/dirsrvtests/tests/tickets/ticket48295_test.py @@ -0,0 +1,144 @@ +# --- BEGIN COPYRIGHT BLOCK --- +# Copyright (C) 2016 Red Hat, Inc. +# All rights reserved. +# +# License: GPL (version 3 or any later version). +# See LICENSE for details. +# --- END COPYRIGHT BLOCK --- +# +import logging + +import ldap +import pytest +from lib389 import Entry +from lib389._constants import * +from lib389.topologies import topology_st +from lib389.utils import * + +pytestmark = pytest.mark.tier2 + +log = logging.getLogger(__name__) + +LINKEDATTR_PLUGIN = 'cn=Linked Attributes,cn=plugins,cn=config' +MANAGER_LINK = 'cn=Manager Link,' + LINKEDATTR_PLUGIN +OU_PEOPLE = 'ou=People,' + DEFAULT_SUFFIX +LINKTYPE = 'directReport' +MANAGEDTYPE = 'manager' + + +def _header(topology_st, label): + topology_st.standalone.log.info("###############################################") + topology_st.standalone.log.info("####### %s" % label) + topology_st.standalone.log.info("###############################################") + + +def check_attr_val(topology_st, dn, attr, expected, revert): + try: + centry = topology_st.standalone.search_s(dn, ldap.SCOPE_BASE, 'uid=*') + if centry: + val = centry[0].getValue(attr) + if val: + if val.lower() == expected.lower(): + if revert: + log.info('Value of %s %s exists, which should not.' % (attr, expected)) + assert False + else: + log.info('Value of %s is %s' % (attr, expected)) + else: + if revert: + log.info('NEEDINFO: Value of %s is not %s, but %s' % (attr, expected, val)) + else: + log.info('Value of %s is not %s, but %s' % (attr, expected, val)) + assert False + else: + if revert: + log.info('Value of %s does not expectedly exist' % attr) + else: + log.info('Value of %s does not exist' % attr) + assert False + else: + log.fatal('Failed to get %s' % dn) + assert False + except ldap.LDAPError as e: + log.fatal('Failed to search ' + dn + ': ' + e.args[0]['desc']) + assert False + + +def test_48295_init(topology_st): + """ + Set up Linked Attribute + """ + _header(topology_st, + 'Testing Ticket 48295 - Entry cache is not rolled back -- Linked Attributes plug-in - wrong behaviour when adding valid and broken links') + + log.info('Enable Dynamic plugins, and the linked Attrs plugin') + try: + topology_st.standalone.modify_s(DN_CONFIG, [(ldap.MOD_REPLACE, 'nsslapd-dynamic-plugins', b'on')]) + except ldap.LDAPError as e: + log.fatal('Failed to enable dynamic plugin!' + e.args[0]['desc']) + assert False + + try: + topology_st.standalone.plugins.enable(name=PLUGIN_LINKED_ATTRS) + except ValueError as e: + log.fatal('Failed to enable linked attributes plugin!' + e.args[0]['desc']) + assert False + + log.info('Add the plugin config entry') + try: + topology_st.standalone.add_s(Entry((MANAGER_LINK, { + 'objectclass': 'top extensibleObject'.split(), + 'cn': 'Manager Link', + 'linkType': LINKTYPE, + 'managedType': MANAGEDTYPE + }))) + except ldap.LDAPError as e: + log.fatal('Failed to add linked attr config entry: error ' + e.args[0]['desc']) + assert False + + log.info('Add 2 entries: manager1 and employee1') + try: + topology_st.standalone.add_s(Entry(('uid=manager1,%s' % OU_PEOPLE, { + 'objectclass': 'top extensibleObject'.split(), + 'uid': 'manager1'}))) + except ldap.LDAPError as e: + log.fatal('Add manager1 failed: error ' + e.args[0]['desc']) + assert False + + try: + topology_st.standalone.add_s(Entry(('uid=employee1,%s' % OU_PEOPLE, { + 'objectclass': 'top extensibleObject'.split(), + 'uid': 'employee1'}))) + except ldap.LDAPError as e: + log.fatal('Add employee1 failed: error ' + e.args[0]['desc']) + assert False + + log.info('PASSED') + + +def test_48295_run(topology_st): + """ + Add 2 linktypes - one exists, another does not + """ + + _header(topology_st, + 'Add 2 linktypes to manager1 - one exists, another does not to make sure the managed entry does not have managed type.') + try: + topology_st.standalone.modify_s('uid=manager1,%s' % OU_PEOPLE, + [(ldap.MOD_ADD, LINKTYPE, ensure_bytes('uid=employee1,%s' % OU_PEOPLE)), + (ldap.MOD_ADD, LINKTYPE, ensure_bytes('uid=doNotExist,%s' % OU_PEOPLE))]) + except ldap.UNWILLING_TO_PERFORM: + log.info('Add uid=employee1 and uid=doNotExist expectedly failed.') + pass + + log.info('Check managed attribute does not exist.') + check_attr_val(topology_st, 'uid=employee1,%s' % OU_PEOPLE, MANAGEDTYPE, ensure_bytes('uid=manager1,%s' % OU_PEOPLE), True) + + log.info('PASSED') + + +if __name__ == '__main__': + # Run isolated + # -s for DEBUG mode + CURRENT_FILE = os.path.realpath(__file__) + pytest.main("-s %s" % CURRENT_FILE) diff --git a/dirsrvtests/tests/tickets/ticket48312_test.py b/dirsrvtests/tests/tickets/ticket48312_test.py new file mode 100644 index 0000000..d7da6a5 --- /dev/null +++ b/dirsrvtests/tests/tickets/ticket48312_test.py @@ -0,0 +1,124 @@ +import pytest +from lib389.tasks import * +from lib389.utils import * +from lib389.topologies import topology_st + +from lib389._constants import DEFAULT_SUFFIX, PLUGIN_MANAGED_ENTRY, DN_CONFIG + +pytestmark = pytest.mark.tier2 + +logging.getLogger(__name__).setLevel(logging.DEBUG) +log = logging.getLogger(__name__) + + +def test_ticket48312(topology_st): + """ + Configure managed entries plugins(tempalte/definition), then perform a + modrdn(deleteoldrdn 1), and make sure the server does not crash. + """ + + GROUP_OU = 'ou=groups,' + DEFAULT_SUFFIX + PEOPLE_OU = 'ou=people,' + DEFAULT_SUFFIX + USER_DN = 'uid=user1,ou=people,' + DEFAULT_SUFFIX + CONFIG_DN = 'cn=config,cn=' + PLUGIN_MANAGED_ENTRY + ',cn=plugins,cn=config' + TEMPLATE_DN = 'cn=MEP Template,' + DEFAULT_SUFFIX + USER_NEWRDN = 'uid=\+user1' + + # + # First enable dynamic plugins + # + try: + topology_st.standalone.modify_s(DN_CONFIG, [(ldap.MOD_REPLACE, 'nsslapd-dynamic-plugins', b'on')]) + except ldap.LDAPError as e: + log.fatal('Failed to enable dynamic plugin!' + e.args[0]['desc']) + assert False + topology_st.standalone.plugins.enable(name=PLUGIN_MANAGED_ENTRY) + + # + # Add our org units (they should already exist, but do it just in case) + # + try: + topology_st.standalone.add_s(Entry((PEOPLE_OU, { + 'objectclass': 'top extensibleObject'.split(), + 'ou': 'people'}))) + except ldap.ALREADY_EXISTS: + pass + except ldap.LDAPError as e: + log.fatal('test_mep: Failed to add people org unit: error ' + e.args[0]['desc']) + assert False + + try: + topology_st.standalone.add_s(Entry((GROUP_OU, { + 'objectclass': 'top extensibleObject'.split(), + 'ou': 'people'}))) + except ldap.ALREADY_EXISTS: + pass + except ldap.LDAPError as e: + log.fatal('test_mep: Failed to add people org unit: error ' + e.args[0]['desc']) + assert False + + # + # Add the template entry + # + try: + topology_st.standalone.add_s(Entry((TEMPLATE_DN, { + 'objectclass': 'top mepTemplateEntry extensibleObject'.split(), + 'cn': 'MEP Template', + 'mepRDNAttr': 'cn', + 'mepStaticAttr': ['objectclass: posixGroup', 'objectclass: extensibleObject'], + 'mepMappedAttr': ['cn: $uid', 'uid: $cn', 'gidNumber: $uidNumber'] + }))) + except ldap.LDAPError as e: + log.fatal('test_mep: Failed to add template entry: error ' + e.args[0]['desc']) + assert False + + # + # Add the definition entry + # + try: + topology_st.standalone.add_s(Entry((CONFIG_DN, { + 'objectclass': 'top extensibleObject'.split(), + 'cn': 'config', + 'originScope': PEOPLE_OU, + 'originFilter': 'objectclass=posixAccount', + 'managedBase': GROUP_OU, + 'managedTemplate': TEMPLATE_DN + }))) + except ldap.LDAPError as e: + log.fatal('test_mep: Failed to add config entry: error ' + e.args[0]['desc']) + assert False + + # + # Add an entry that meets the MEP scope + # + try: + topology_st.standalone.add_s(Entry((USER_DN, { + 'objectclass': 'top posixAccount extensibleObject'.split(), + 'uid': 'user1', + 'cn': 'user1', + 'uidNumber': '1', + 'gidNumber': '1', + 'homeDirectory': '/home/user1', + 'description': 'uiser description' + }))) + except ldap.LDAPError as e: + log.fatal('test_mep: Failed to user1: error ' + e.args[0]['desc']) + assert False + + # + # Perform a modrdn on USER_DN + # + try: + topology_st.standalone.rename_s(USER_DN, USER_NEWRDN, delold=1) + except ldap.LDAPError as e: + log.error('Failed to modrdn: error ' + e.args[0]['desc']) + assert False + + log.info('Test complete') + + +if __name__ == '__main__': + # Run isolated + # -s for DEBUG mode + CURRENT_FILE = os.path.realpath(__file__) + pytest.main("-s %s" % CURRENT_FILE) diff --git a/dirsrvtests/tests/tickets/ticket48325_test.py b/dirsrvtests/tests/tickets/ticket48325_test.py new file mode 100644 index 0000000..5108e61 --- /dev/null +++ b/dirsrvtests/tests/tickets/ticket48325_test.py @@ -0,0 +1,132 @@ +import pytest +from lib389.utils import * +from lib389.tasks import * +from lib389.topologies import topology_m1h1c1 +from lib389.replica import ReplicationManager + +from lib389._constants import (DEFAULT_SUFFIX, REPLICA_RUV_FILTER, defaultProperties, + REPLICATION_BIND_DN, REPLICATION_BIND_PW, REPLICATION_BIND_METHOD, + REPLICATION_TRANSPORT, RA_NAME, RA_BINDDN, RA_BINDPW, + RA_METHOD, RA_TRANSPORT_PROT, SUFFIX) + +pytestmark = pytest.mark.tier2 + +logging.getLogger(__name__).setLevel(logging.DEBUG) +log = logging.getLogger(__name__) + + +def checkFirstElement(ds, rid): + """ + Return True if the first RUV element is for the specified rid + """ + try: + entry = ds.search_s(DEFAULT_SUFFIX, + ldap.SCOPE_SUBTREE, + REPLICA_RUV_FILTER, + ['nsds50ruv']) + assert entry + entry = entry[0] + except ldap.LDAPError as e: + log.fatal('Failed to retrieve RUV entry: %s' % str(e)) + assert False + + ruv_elements = entry.getValues('nsds50ruv') + if ('replica %s ' % rid) in ensure_str(ruv_elements[1]): + return True + else: + return False + + +def test_ticket48325(topology_m1h1c1): + """ + Test that the RUV element order is correctly maintained when promoting + a hub or consumer. + """ + + # + # Promote consumer to master + # + C1 = topology_m1h1c1.cs["consumer1"] + M1 = topology_m1h1c1.ms["master1"] + H1 = topology_m1h1c1.hs["hub1"] + repl = ReplicationManager(DEFAULT_SUFFIX) + repl._ensure_changelog(C1) + DN = topology_m1h1c1.cs["consumer1"].replica._get_mt_entry(DEFAULT_SUFFIX) + topology_m1h1c1.cs["consumer1"].modify_s(DN, [(ldap.MOD_REPLACE, + 'nsDS5ReplicaType', + b'3'), + (ldap.MOD_REPLACE, + 'nsDS5ReplicaID', + b'1234'), + (ldap.MOD_REPLACE, + 'nsDS5Flags', + b'1')]) + time.sleep(1) + + # + # Check ruv has been reordered + # + if not checkFirstElement(topology_m1h1c1.cs["consumer1"], '1234'): + log.fatal('RUV was not reordered') + assert False + + topology_m1h1c1.ms["master1"].add_s(Entry((defaultProperties[REPLICATION_BIND_DN], + {'objectclass': 'top netscapeServer'.split(), + 'cn': 'replication manager', + 'userPassword': 'password'}))) + + DN = topology_m1h1c1.ms["master1"].replica._get_mt_entry(DEFAULT_SUFFIX) + topology_m1h1c1.ms["master1"].modify_s(DN, [(ldap.MOD_REPLACE, + 'nsDS5ReplicaBindDN', ensure_bytes(defaultProperties[REPLICATION_BIND_DN]))]) + # + # Create repl agreement from the newly promoted master to master1 + + properties = {RA_NAME: 'meTo_{}:{}'.format(topology_m1h1c1.ms["master1"].host, + str(topology_m1h1c1.ms["master1"].port)), + RA_BINDDN: defaultProperties[REPLICATION_BIND_DN], + RA_BINDPW: defaultProperties[REPLICATION_BIND_PW], + RA_METHOD: defaultProperties[REPLICATION_BIND_METHOD], + RA_TRANSPORT_PROT: defaultProperties[REPLICATION_TRANSPORT]} + new_agmt = topology_m1h1c1.cs["consumer1"].agreement.create(suffix=SUFFIX, + host=topology_m1h1c1.ms["master1"].host, + port=topology_m1h1c1.ms["master1"].port, + properties=properties) + + if not new_agmt: + log.fatal("Fail to create new agmt from old consumer to the master") + assert False + + # Test replication is working + repl.test_replication(C1, M1) + + # + # Promote hub to master + # + DN = topology_m1h1c1.hs["hub1"].replica._get_mt_entry(DEFAULT_SUFFIX) + topology_m1h1c1.hs["hub1"].modify_s(DN, [(ldap.MOD_REPLACE, + 'nsDS5ReplicaType', + b'3'), + (ldap.MOD_REPLACE, + 'nsDS5ReplicaID', + b'5678')]) + time.sleep(1) + + # + # Check ruv has been reordered + # + if not checkFirstElement(topology_m1h1c1.hs["hub1"], '5678'): + log.fatal('RUV was not reordered') + assert False + + # Test replication is working + repl.test_replication(M1, H1) + + # Done + log.info('Test complete') + + +if __name__ == '__main__': + # Run isolated + # -s for DEBUG mode + CURRENT_FILE = os.path.realpath(__file__) + pytest.main("-s %s" % CURRENT_FILE) diff --git a/dirsrvtests/tests/tickets/ticket48342_test.py b/dirsrvtests/tests/tickets/ticket48342_test.py new file mode 100644 index 0000000..0db6b5f --- /dev/null +++ b/dirsrvtests/tests/tickets/ticket48342_test.py @@ -0,0 +1,142 @@ +import pytest +from lib389.tasks import * +from lib389.utils import * +from lib389.topologies import topology_m3 + +from lib389._constants import SUFFIX, DEFAULT_SUFFIX, PLUGIN_DNA + +pytestmark = pytest.mark.tier2 + +logging.getLogger(__name__).setLevel(logging.DEBUG) +log = logging.getLogger(__name__) + +PEOPLE_OU = 'people' +PEOPLE_DN = "ou=%s,%s" % (PEOPLE_OU, SUFFIX) +MAX_ACCOUNTS = 5 + + +def _dna_config(server, nextValue=500, maxValue=510): + log.info("Add dna plugin config entry...%s" % server) + + try: + server.add_s(Entry(('cn=dna config,cn=Distributed Numeric Assignment Plugin,cn=plugins,cn=config', { + 'objectclass': 'top dnaPluginConfig'.split(), + 'dnaType': 'description', + 'dnaMagicRegen': '-1', + 'dnaFilter': '(objectclass=posixAccount)', + 'dnaScope': 'ou=people,%s' % SUFFIX, + 'dnaNextValue': str(nextValue), + 'dnaMaxValue': str(nextValue + maxValue), + 'dnaSharedCfgDN': 'ou=ranges,%s' % SUFFIX + }))) + + except ldap.LDAPError as e: + log.error('Failed to add DNA config entry: error ' + e.args[0]['desc']) + assert False + + log.info("Enable the DNA plugin...") + try: + server.plugins.enable(name=PLUGIN_DNA) + except e: + log.error("Failed to enable DNA Plugin: error " + e.args[0]['desc']) + assert False + + log.info("Restarting the server...") + server.stop(timeout=120) + time.sleep(1) + server.start(timeout=120) + time.sleep(3) + + +def test_ticket4026(topology_m3): + """Write your replication testcase here. + + To access each DirSrv instance use: topology_m3.ms["master1"], topology_m3.ms["master2"], + ..., topology_m3.hub1, ..., topology_m3.consumer1, ... + + Also, if you need any testcase initialization, + please, write additional fixture for that(include finalizer). + """ + + try: + topology_m3.ms["master1"].add_s(Entry((PEOPLE_DN, { + 'objectclass': "top extensibleObject".split(), + 'ou': 'people'}))) + except ldap.ALREADY_EXISTS: + pass + + topology_m3.ms["master1"].add_s(Entry(('ou=ranges,' + SUFFIX, { + 'objectclass': 'top organizationalunit'.split(), + 'ou': 'ranges' + }))) + for cpt in range(MAX_ACCOUNTS): + name = "user%d" % (cpt) + topology_m3.ms["master1"].add_s(Entry(("uid=%s,%s" % (name, PEOPLE_DN), { + 'objectclass': 'top posixAccount extensibleObject'.split(), + 'uid': name, + 'cn': name, + 'uidNumber': '1', + 'gidNumber': '1', + 'homeDirectory': '/home/%s' % name + }))) + + # make master3 having more free slots that master2 + # so master1 will contact master3 + _dna_config(topology_m3.ms["master1"], nextValue=100, maxValue=10) + _dna_config(topology_m3.ms["master2"], nextValue=200, maxValue=10) + _dna_config(topology_m3.ms["master3"], nextValue=300, maxValue=3000) + + # Turn on lots of error logging now. + + mod = [(ldap.MOD_REPLACE, 'nsslapd-errorlog-level', b'16384')] + # mod = [(ldap.MOD_REPLACE, 'nsslapd-errorlog-level', '1')] + topology_m3.ms["master1"].modify_s('cn=config', mod) + topology_m3.ms["master2"].modify_s('cn=config', mod) + topology_m3.ms["master3"].modify_s('cn=config', mod) + + # We need to wait for the event in dna.c to fire to start the servers + # see dna.c line 899 + time.sleep(60) + + # add on master1 users with description DNA + for cpt in range(10): + name = "user_with_desc1_%d" % (cpt) + topology_m3.ms["master1"].add_s(Entry(("uid=%s,%s" % (name, PEOPLE_DN), { + 'objectclass': 'top posixAccount extensibleObject'.split(), + 'uid': name, + 'cn': name, + 'description': '-1', + 'uidNumber': '1', + 'gidNumber': '1', + 'homeDirectory': '/home/%s' % name + }))) + # give time to negociate master1 <--> master3 + time.sleep(10) + # add on master1 users with description DNA + for cpt in range(11, 20): + name = "user_with_desc1_%d" % (cpt) + topology_m3.ms["master1"].add_s(Entry(("uid=%s,%s" % (name, PEOPLE_DN), { + 'objectclass': 'top posixAccount extensibleObject'.split(), + 'uid': name, + 'cn': name, + 'description': '-1', + 'uidNumber': '1', + 'gidNumber': '1', + 'homeDirectory': '/home/%s' % name + }))) + log.info('Test complete') + # add on master1 users with description DNA + mod = [(ldap.MOD_REPLACE, 'nsslapd-errorlog-level', b'16384')] + # mod = [(ldap.MOD_REPLACE, 'nsslapd-errorlog-level', '1')] + topology_m3.ms["master1"].modify_s('cn=config', mod) + topology_m3.ms["master2"].modify_s('cn=config', mod) + topology_m3.ms["master3"].modify_s('cn=config', mod) + + log.info('Test complete') + + +if __name__ == '__main__': + # Run isolated + # -s for DEBUG mode + CURRENT_FILE = os.path.realpath(__file__) + pytest.main("-s %s" % CURRENT_FILE) diff --git a/dirsrvtests/tests/tickets/ticket48354_test.py b/dirsrvtests/tests/tickets/ticket48354_test.py new file mode 100644 index 0000000..73cf307 --- /dev/null +++ b/dirsrvtests/tests/tickets/ticket48354_test.py @@ -0,0 +1,57 @@ +import pytest +from lib389.utils import * +from lib389.topologies import topology_st + +from lib389._constants import DEFAULT_SUFFIX, HOST_STANDALONE, PORT_STANDALONE + +pytestmark = pytest.mark.tier2 + +DEBUGGING = os.getenv('DEBUGGING', False) + +if DEBUGGING: + logging.getLogger(__name__).setLevel(logging.DEBUG) +else: + logging.getLogger(__name__).setLevel(logging.INFO) + +log = logging.getLogger(__name__) + + +def _attr_present(conn, name): + results = conn.search_s(DEFAULT_SUFFIX, ldap.SCOPE_SUBTREE, '(%s=*)' % name, [name, ]) + if DEBUGGING: + print(results) + if len(results) > 0: + return True + return False + + +def test_ticket48354(topology_st): + """ + Test that we cannot view ACIs, userPassword, or certain other attributes as anonymous. + """ + + if DEBUGGING: + # Add debugging steps(if any)... + pass + + # Do an anonymous bind + conn = ldap.initialize("ldap://%s:%s" % (HOST_STANDALONE, PORT_STANDALONE)) + conn.simple_bind_s() + + # Make sure that we cannot see: + # * userPassword + assert (not _attr_present(conn, 'userPassword')) + # * aci + assert (not _attr_present(conn, 'aci')) + # * anything else? + + conn.unbind_s() + + log.info('Test PASSED') + + +if __name__ == '__main__': + # Run isolated + # -s for DEBUG mode + CURRENT_FILE = os.path.realpath(__file__) + pytest.main("-s %s" % CURRENT_FILE) diff --git a/dirsrvtests/tests/tickets/ticket48362_test.py b/dirsrvtests/tests/tickets/ticket48362_test.py new file mode 100644 index 0000000..ad34801 --- /dev/null +++ b/dirsrvtests/tests/tickets/ticket48362_test.py @@ -0,0 +1,169 @@ +import pytest +from lib389.tasks import * +from lib389.utils import * +from lib389.topologies import topology_m2 + +from lib389._constants import SUFFIX, DEFAULT_SUFFIX, PLUGIN_DNA + +# Skip on older versions +pytestmark = [pytest.mark.tier2, + pytest.mark.skipif(ds_is_older('1.3.4'), reason="Not implemented")] + +logging.getLogger(__name__).setLevel(logging.DEBUG) +log = logging.getLogger(__name__) + +PEOPLE_OU = 'people' +PEOPLE_DN = "ou=%s,%s" % (PEOPLE_OU, SUFFIX) +MAX_ACCOUNTS = 5 + +BINDMETHOD_ATTR = 'dnaRemoteBindMethod' +BINDMETHOD_VALUE = b'SASL/GSSAPI' +PROTOCOLE_ATTR = 'dnaRemoteConnProtocol' +PROTOCOLE_VALUE = b'LDAP' + +SHARE_CFG_BASE = 'ou=ranges,' + SUFFIX + + +def _dna_config(server, nextValue=500, maxValue=510): + log.info("Add dna plugin config entry...%s" % server) + + cfg_base_dn = 'cn=dna config,cn=Distributed Numeric Assignment Plugin,cn=plugins,cn=config' + + try: + server.add_s(Entry((cfg_base_dn, { + 'objectclass': 'top dnaPluginConfig'.split(), + 'dnaType': 'description', + 'dnaMagicRegen': '-1', + 'dnaFilter': '(objectclass=posixAccount)', + 'dnaScope': 'ou=people,%s' % SUFFIX, + 'dnaNextValue': str(nextValue), + 'dnaMaxValue': str(nextValue + maxValue), + 'dnaSharedCfgDN': 'ou=ranges,%s' % SUFFIX + }))) + + except ldap.LDAPError as e: + log.error('Failed to add DNA config entry: error ' + e.message['desc']) + assert False + + log.info("Enable the DNA plugin...") + try: + server.plugins.enable(name=PLUGIN_DNA) + except e: + log.error("Failed to enable DNA Plugin: error " + e.message['desc']) + assert False + + log.info("Restarting the server...") + server.stop(timeout=120) + time.sleep(1) + server.start(timeout=120) + time.sleep(3) + + +def _wait_shared_cfg_servers(server, expected): + attempts = 0 + ents = [] + try: + ents = server.search_s(SHARE_CFG_BASE, ldap.SCOPE_ONELEVEL, "(objectclass=*)") + except ldap.NO_SUCH_OBJECT: + pass + except lib389.NoSuchEntryError: + pass + while (len(ents) != expected): + assert attempts < 10 + time.sleep(5) + try: + ents = server.search_s(SHARE_CFG_BASE, ldap.SCOPE_ONELEVEL, "(objectclass=*)") + except ldap.NO_SUCH_OBJECT: + pass + except lib389.NoSuchEntryError: + pass + + +def _shared_cfg_server_update(server, method=BINDMETHOD_VALUE, transport=PROTOCOLE_VALUE): + log.info('\n======================== Update dnaPortNum=%d ============================\n' % server.port) + try: + ent = server.getEntry(SHARE_CFG_BASE, ldap.SCOPE_ONELEVEL, "(dnaPortNum=%d)" % server.port) + mod = [(ldap.MOD_REPLACE, BINDMETHOD_ATTR, ensure_bytes(method)), + (ldap.MOD_REPLACE, PROTOCOLE_ATTR, ensure_bytes(transport))] + server.modify_s(ent.dn, mod) + + log.info('\n======================== Update done\n') + ent = server.getEntry(SHARE_CFG_BASE, ldap.SCOPE_ONELEVEL, "(dnaPortNum=%d)" % server.port) + except ldap.NO_SUCH_OBJECT: + log.fatal("Unknown host") + assert False + + +def test_ticket48362(topology_m2): + """Write your replication testcase here. + + To access each DirSrv instance use: topology_m2.ms["master1"], topology_m2.ms["master2"], + ..., topology_m2.hub1, ..., topology_m2.consumer1, ... + + Also, if you need any testcase initialization, + please, write additional fixture for that(include finalizer). + """ + + try: + topology_m2.ms["master1"].add_s(Entry((PEOPLE_DN, { + 'objectclass': "top extensibleObject".split(), + 'ou': 'people'}))) + except ldap.ALREADY_EXISTS: + pass + + topology_m2.ms["master1"].add_s(Entry((SHARE_CFG_BASE, { + 'objectclass': 'top organizationalunit'.split(), + 'ou': 'ranges' + }))) + # master 1 will have a valid remaining range (i.e. 101) + # master 2 will not have a valid remaining range (i.e. 0) so dna servers list on master2 + # will not contain master 2. So at restart, master 2 is recreated without the method/protocol attribute + _dna_config(topology_m2.ms["master1"], nextValue=1000, maxValue=100) + _dna_config(topology_m2.ms["master2"], nextValue=2000, maxValue=-1) + + # check we have all the servers available + _wait_shared_cfg_servers(topology_m2.ms["master1"], 2) + _wait_shared_cfg_servers(topology_m2.ms["master2"], 2) + + # now force the method/transport on the servers entry + _shared_cfg_server_update(topology_m2.ms["master1"]) + _shared_cfg_server_update(topology_m2.ms["master2"]) + + log.info('\n======================== BEFORE RESTART ============================\n') + ent = topology_m2.ms["master1"].getEntry(SHARE_CFG_BASE, ldap.SCOPE_ONELEVEL, + "(dnaPortNum=%d)" % topology_m2.ms["master1"].port) + log.info('\n======================== BEFORE RESTART ============================\n') + assert (ent.hasAttr(BINDMETHOD_ATTR) and ent.getValue(BINDMETHOD_ATTR) == BINDMETHOD_VALUE) + assert (ent.hasAttr(PROTOCOLE_ATTR) and ent.getValue(PROTOCOLE_ATTR) == PROTOCOLE_VALUE) + + ent = topology_m2.ms["master2"].getEntry(SHARE_CFG_BASE, ldap.SCOPE_ONELEVEL, + "(dnaPortNum=%d)" % topology_m2.ms["master2"].port) + log.info('\n======================== BEFORE RESTART ============================\n') + assert (ent.hasAttr(BINDMETHOD_ATTR) and ent.getValue(BINDMETHOD_ATTR) == BINDMETHOD_VALUE) + assert (ent.hasAttr(PROTOCOLE_ATTR) and ent.getValue(PROTOCOLE_ATTR) == PROTOCOLE_VALUE) + topology_m2.ms["master1"].restart(10) + topology_m2.ms["master2"].restart(10) + + # to allow DNA plugin to recreate the local host entry + time.sleep(40) + + log.info('\n=================== AFTER RESTART =================================\n') + ent = topology_m2.ms["master1"].getEntry(SHARE_CFG_BASE, ldap.SCOPE_ONELEVEL, + "(dnaPortNum=%d)" % topology_m2.ms["master1"].port) + log.info('\n=================== AFTER RESTART =================================\n') + assert (ent.hasAttr(BINDMETHOD_ATTR) and ent.getValue(BINDMETHOD_ATTR) == BINDMETHOD_VALUE) + assert (ent.hasAttr(PROTOCOLE_ATTR) and ent.getValue(PROTOCOLE_ATTR) == PROTOCOLE_VALUE) + + ent = topology_m2.ms["master2"].getEntry(SHARE_CFG_BASE, ldap.SCOPE_ONELEVEL, + "(dnaPortNum=%d)" % topology_m2.ms["master2"].port) + log.info('\n=================== AFTER RESTART =================================\n') + assert (ent.hasAttr(BINDMETHOD_ATTR) and ent.getValue(BINDMETHOD_ATTR) == BINDMETHOD_VALUE) + assert (ent.hasAttr(PROTOCOLE_ATTR) and ent.getValue(PROTOCOLE_ATTR) == PROTOCOLE_VALUE) + log.info('Test complete') + + +if __name__ == '__main__': + # Run isolated + # -s for DEBUG mode + CURRENT_FILE = os.path.realpath(__file__) + pytest.main("-s %s" % CURRENT_FILE) diff --git a/dirsrvtests/tests/tickets/ticket48366_test.py b/dirsrvtests/tests/tickets/ticket48366_test.py new file mode 100644 index 0000000..d30697d --- /dev/null +++ b/dirsrvtests/tests/tickets/ticket48366_test.py @@ -0,0 +1,148 @@ +# --- BEGIN COPYRIGHT BLOCK --- +# Copyright (C) 2016 Red Hat, Inc. +# All rights reserved. +# +# License: GPL (version 3 or any later version). +# See LICENSE for details. +# --- END COPYRIGHT BLOCK --- +# +import logging + +import ldap +import pytest +from ldap.controls.simple import ProxyAuthzControl +from lib389 import Entry +from lib389._constants import * +from lib389.topologies import topology_st + +log = logging.getLogger(__name__) + +from lib389.utils import * + +# Skip on older versions +pytestmark = [pytest.mark.tier2, + pytest.mark.skipif(ds_is_older('1.3.5'), reason="Not implemented")] +PROXY_USER_DN = 'cn=proxy,ou=people,%s' % SUFFIX +TEST_USER_DN = 'cn=test,ou=people,%s' % SUFFIX +USER_PW = 'password' + +# subtrees used in test +SUBTREE_GREEN = "ou=green,%s" % SUFFIX +SUBTREE_RED = "ou=red,%s" % SUFFIX +SUBTREES = (SUBTREE_GREEN, SUBTREE_RED) + + +def test_ticket48366_init(topology_st): + """ + It creates identical entries in 3 subtrees + It creates aci which allow access to a set of attrs + in two of these subtrees for bound users + It creates a user to be used for test + + """ + + topology_st.standalone.log.info("Add subtree: %s" % SUBTREE_GREEN) + topology_st.standalone.add_s(Entry((SUBTREE_GREEN, { + 'objectclass': "top organizationalunit".split(), + 'ou': "green_one"}))) + topology_st.standalone.log.info("Add subtree: %s" % SUBTREE_RED) + topology_st.standalone.add_s(Entry((SUBTREE_RED, { + 'objectclass': "top organizationalunit".split(), + 'ou': "red"}))) + + # add proxy user and test user + topology_st.standalone.log.info("Add %s" % TEST_USER_DN) + topology_st.standalone.add_s(Entry((TEST_USER_DN, { + 'objectclass': "top person".split(), + 'sn': 'test', + 'cn': 'test', + 'userpassword': USER_PW}))) + topology_st.standalone.log.info("Add %s" % PROXY_USER_DN) + topology_st.standalone.add_s(Entry((PROXY_USER_DN, { + 'objectclass': "top person".split(), + 'sn': 'proxy', + 'cn': 'proxy', + 'userpassword': USER_PW}))) + + # enable acl error logging + # mod = [(ldap.MOD_REPLACE, 'nsslapd-errorlog-level', '128')] + # topology_st.standalone.modify_s(DN_CONFIG, mod) + + # get rid of default ACIs + mod = [(ldap.MOD_DELETE, 'aci', None)] + topology_st.standalone.modify_s(SUFFIX, mod) + + # Ok Now add the proper ACIs + ACI_TARGET = "(target = \"ldap:///%s\")" % SUBTREE_GREEN + ACI_TARGETATTR = "(targetattr = \"objectclass || cn || sn || uid || givenname \")" + ACI_ALLOW = "(version 3.0; acl \"Allow search-read to green subtree\"; allow (read, search, compare)" + ACI_SUBJECT = " userdn = \"ldap:///%s\";)" % TEST_USER_DN + ACI_BODY = ACI_TARGET + ACI_TARGETATTR + ACI_ALLOW + ACI_SUBJECT + mod = [(ldap.MOD_ADD, 'aci', ensure_bytes(ACI_BODY))] + topology_st.standalone.modify_s(SUFFIX, mod) + + ACI_ALLOW = "(version 3.0; acl \"Allow use pf proxy auth to green subtree\"; allow (proxy)" + ACI_SUBJECT = " userdn = \"ldap:///%s\";)" % PROXY_USER_DN + ACI_BODY = ACI_TARGET + ACI_TARGETATTR + ACI_ALLOW + ACI_SUBJECT + mod = [(ldap.MOD_ADD, 'aci', ensure_bytes(ACI_BODY))] + topology_st.standalone.modify_s(SUFFIX, mod) + + log.info("Adding %d test entries...") + for id in range(2): + name = "%s%d" % ('test', id) + mail = "%s@example.com" % name + for subtree in SUBTREES: + topology_st.standalone.add_s(Entry(("cn=%s,%s" % (name, subtree), { + 'objectclass': "top person organizationalPerson inetOrgPerson".split(), + 'sn': name, + 'cn': name, + 'uid': name, + 'givenname': 'test', + 'mail': mail, + 'description': 'description', + 'employeenumber': "%d" % id, + 'telephonenumber': "%d%d%d" % (id, id, id), + 'mobile': "%d%d%d" % (id, id, id), + 'l': 'MV', + 'title': 'Engineer'}))) + + +def test_ticket48366_search_user(topology_st): + proxy_ctrl = ProxyAuthzControl(criticality=True, authzId=ensure_bytes("dn: " + TEST_USER_DN)) + # searching as test user should return one entry from the green subtree + topology_st.standalone.simple_bind_s(TEST_USER_DN, PASSWORD) + ents = topology_st.standalone.search_s(SUFFIX, ldap.SCOPE_SUBTREE, 'uid=test1') + assert (len(ents) == 1) + + # searching as proxy user should return no entry + topology_st.standalone.simple_bind_s(PROXY_USER_DN, PASSWORD) + ents = topology_st.standalone.search_s(SUFFIX, ldap.SCOPE_SUBTREE, 'uid=test1') + assert (len(ents) == 0) + + # serching as proxy user, authorizing as test user should return 1 entry + ents = topology_st.standalone.search_ext_s(SUFFIX, ldap.SCOPE_SUBTREE, 'uid=test1', serverctrls=[proxy_ctrl]) + assert (len(ents) == 1) + + +def test_ticket48366_search_dm(topology_st): + # searching as directory manager should return one entries from both subtrees + topology_st.standalone.simple_bind_s(DN_DM, PASSWORD) + ents = topology_st.standalone.search_s(SUFFIX, ldap.SCOPE_SUBTREE, 'uid=test1') + assert (len(ents) == 2) + + # searching as directory manager proxying test user should return one entry + proxy_ctrl = ProxyAuthzControl(criticality=True, authzId=ensure_bytes("dn: " + TEST_USER_DN)) + ents = topology_st.standalone.search_ext_s(SUFFIX, ldap.SCOPE_SUBTREE, 'uid=test1', serverctrls=[proxy_ctrl]) + assert (len(ents) == 1) + + # searching as directory manager proxying proxy user should return no entry + proxy_ctrl = ProxyAuthzControl(criticality=True, authzId=ensure_bytes("dn: " + PROXY_USER_DN)) + ents = topology_st.standalone.search_ext_s(SUFFIX, ldap.SCOPE_SUBTREE, 'uid=test1', serverctrls=[proxy_ctrl]) + assert (len(ents) == 0) + + +if __name__ == '__main__': + # Run isolated + # -s for DEBUG mode + CURRENT_FILE = os.path.realpath(__file__) + pytest.main("-s %s" % CURRENT_FILE) diff --git a/dirsrvtests/tests/tickets/ticket48370_test.py b/dirsrvtests/tests/tickets/ticket48370_test.py new file mode 100644 index 0000000..836888f --- /dev/null +++ b/dirsrvtests/tests/tickets/ticket48370_test.py @@ -0,0 +1,194 @@ +import pytest +from lib389.tasks import * +from lib389.utils import * +from lib389.topologies import topology_st + +pytestmark = pytest.mark.tier2 + +logging.getLogger(__name__).setLevel(logging.DEBUG) +log = logging.getLogger(__name__) + + +def test_ticket48370(topology_st): + """ + Deleting attirbute values and readding a value does not properly update + the pres index. The values are not actually deleted from the index + """ + + DN = 'uid=user0099,' + DEFAULT_SUFFIX + + # + # Add an entry + # + topology_st.standalone.add_s(Entry((DN, { + 'objectclass': ['top', 'person', + 'organizationalPerson', + 'inetorgperson', + 'posixAccount'], + 'givenname': 'test', + 'sn': 'user', + 'loginshell': '/bin/bash', + 'uidNumber': '10099', + 'gidNumber': '10099', + 'gecos': 'Test User', + 'mail': ['user0099@dev.null', + 'alias@dev.null', + 'user0099@redhat.com'], + 'cn': 'Test User', + 'homeDirectory': '/home/user0099', + 'uid': 'admin2', + 'userpassword': 'password'}))) + + # + # Perform modify (delete & add mail attributes) + # + try: + topology_st.standalone.modify_s(DN, [(ldap.MOD_DELETE, + 'mail', + b'user0099@dev.null'), + (ldap.MOD_DELETE, + 'mail', + b'alias@dev.null'), + (ldap.MOD_ADD, + 'mail', b'user0099@dev.null')]) + except ldap.LDAPError as e: + log.fatal('Failedto modify user: ' + str(e)) + assert False + + # + # Search using deleted attribute value- no entries should be returned + # + try: + entry = topology_st.standalone.search_s(DEFAULT_SUFFIX, + ldap.SCOPE_SUBTREE, + 'mail=alias@dev.null') + if entry: + log.fatal('Entry incorrectly returned') + assert False + except ldap.LDAPError as e: + log.fatal('Failed to search for user: ' + str(e)) + assert False + + # + # Search using existing attribute value - the entry should be returned + # + try: + entry = topology_st.standalone.search_s(DEFAULT_SUFFIX, + ldap.SCOPE_SUBTREE, + 'mail=user0099@dev.null') + if entry is None: + log.fatal('Entry not found, but it should have been') + assert False + except ldap.LDAPError as e: + log.fatal('Failed to search for user: ' + str(e)) + assert False + + # + # Delete the last values + # + try: + topology_st.standalone.modify_s(DN, [(ldap.MOD_DELETE, + 'mail', + b'user0099@dev.null'), + (ldap.MOD_DELETE, + 'mail', + b'user0099@redhat.com') + ]) + except ldap.LDAPError as e: + log.fatal('Failed to modify user: ' + str(e)) + assert False + + # + # Search using deleted attribute value - no entries should be returned + # + try: + entry = topology_st.standalone.search_s(DEFAULT_SUFFIX, + ldap.SCOPE_SUBTREE, + 'mail=user0099@redhat.com') + if entry: + log.fatal('Entry incorrectly returned') + assert False + except ldap.LDAPError as e: + log.fatal('Failed to search for user: ' + str(e)) + assert False + + # + # Make sure presence index is correctly updated - no entries should be + # returned + # + try: + entry = topology_st.standalone.search_s(DEFAULT_SUFFIX, + ldap.SCOPE_SUBTREE, + 'mail=*') + if entry: + log.fatal('Entry incorrectly returned') + assert False + except ldap.LDAPError as e: + log.fatal('Failed to search for user: ' + str(e)) + assert False + + # + # Now add the attributes back, and lets run a different set of tests with + # a different number of attributes + # + try: + topology_st.standalone.modify_s(DN, [(ldap.MOD_ADD, + 'mail', + [b'user0099@dev.null', + b'alias@dev.null'])]) + except ldap.LDAPError as e: + log.fatal('Failedto modify user: ' + str(e)) + assert False + + # + # Remove and readd some attibutes + # + try: + topology_st.standalone.modify_s(DN, [(ldap.MOD_DELETE, + 'mail', + b'alias@dev.null'), + (ldap.MOD_DELETE, + 'mail', + b'user0099@dev.null'), + (ldap.MOD_ADD, + 'mail', b'user0099@dev.null')]) + except ldap.LDAPError as e: + log.fatal('Failedto modify user: ' + str(e)) + assert False + + # + # Search using deleted attribute value - no entries should be returned + # + try: + entry = topology_st.standalone.search_s(DEFAULT_SUFFIX, + ldap.SCOPE_SUBTREE, + 'mail=alias@dev.null') + if entry: + log.fatal('Entry incorrectly returned') + assert False + except ldap.LDAPError as e: + log.fatal('Failed to search for user: ' + str(e)) + assert False + + # + # Search using existing attribute value - the entry should be returned + # + try: + entry = topology_st.standalone.search_s(DEFAULT_SUFFIX, + ldap.SCOPE_SUBTREE, + 'mail=user0099@dev.null') + if entry is None: + log.fatal('Entry not found, but it should have been') + assert False + except ldap.LDAPError as e: + log.fatal('Failed to search for user: ' + str(e)) + assert False + + log.info('Test PASSED') + + +if __name__ == '__main__': + # Run isolated + # -s for DEBUG mode + CURRENT_FILE = os.path.realpath(__file__) + pytest.main("-s %s" % CURRENT_FILE) diff --git a/dirsrvtests/tests/tickets/ticket48383_test.py b/dirsrvtests/tests/tickets/ticket48383_test.py new file mode 100644 index 0000000..7d52a8f --- /dev/null +++ b/dirsrvtests/tests/tickets/ticket48383_test.py @@ -0,0 +1,95 @@ +import random +import string + +import pytest +from lib389.tasks import * +from lib389.utils import * +from lib389.topologies import topology_st + +from lib389._constants import DEFAULT_SUFFIX, SERVERID_STANDALONE + +pytestmark = pytest.mark.tier2 + +logging.getLogger(__name__).setLevel(logging.DEBUG) +log = logging.getLogger(__name__) + + +def test_ticket48383(topology_st): + """ + This test case will check that we re-alloc buffer sizes on import.c + + We achieve this by setting the servers dbcachesize to a stupid small value + and adding huge objects to ds. + + Then when we run db2index, either: + * If we are not using the re-alloc code, it will FAIL (Bad) + * If we re-alloc properly, it all works regardless. + """ + + topology_st.standalone.config.set('nsslapd-maxbersize', '200000000') + topology_st.standalone.restart() + + # Create some stupid huge objects / attributes in DS. + # seeAlso is indexed by default. Lets do that! + # This will take a while ... + data = [random.choice(string.ascii_letters) for x in range(10000000)] + s = "".join(data) + + # This was here for an iteration test. + i = 1 + USER_DN = 'uid=user%s,ou=people,%s' % (i, DEFAULT_SUFFIX) + padding = ['%s' % n for n in range(400)] + + user = Entry((USER_DN, { + 'objectclass': 'top posixAccount person extensibleObject'.split(), + 'uid': 'user%s' % (i), + 'cn': 'user%s' % (i), + 'uidNumber': '%s' % (i), + 'gidNumber': '%s' % (i), + 'homeDirectory': '/home/user%s' % (i), + 'description': 'user description', + 'sn': s, + 'padding': padding, + })) + + topology_st.standalone.add_s(user) + + # Set the dbsize really low. + try: + topology_st.standalone.modify_s(DEFAULT_BENAME, [(ldap.MOD_REPLACE, + 'nsslapd-cachememsize', b'1')]) + except ldap.LDAPError as e: + log.fatal('Failed to change nsslapd-cachememsize {}'.format(e.args[0]['desc'])) + + ## Does ds try and set a minimum possible value for this? + ## Yes: [16/Feb/2016:16:39:18 +1000] - WARNING: cache too small, increasing to 500K bytes + # Given the formula, by default, this means DS will make the buffsize 400k + # So an object with a 1MB attribute should break indexing + + ldifpath = os.path.join(topology_st.standalone.get_ldif_dir(), "%s.ldif" % SERVERID_STANDALONE) + + # stop the server + topology_st.standalone.stop() + # Now export and import the DB. It's easier than db2index ... + topology_st.standalone.db2ldif(bename=DEFAULT_BENAME, suffixes=[DEFAULT_SUFFIX], excludeSuffixes=[], + encrypt=False, repl_data=True, outputfile=ldifpath) + + result = topology_st.standalone.ldif2db(DEFAULT_BENAME, None, None, False, ldifpath) + + assert (result) + topology_st.standalone.start() + + # see if user1 exists at all .... + + result_user = topology_st.standalone.search_s(DEFAULT_SUFFIX, ldap.SCOPE_SUBTREE, '(uid=user1)') + + assert (len(result_user) > 0) + + log.info('Test complete') + + +if __name__ == '__main__': + # Run isolated + # -s for DEBUG mode + CURRENT_FILE = os.path.realpath(__file__) + pytest.main("-s %s" % CURRENT_FILE) diff --git a/dirsrvtests/tests/tickets/ticket48497_test.py b/dirsrvtests/tests/tickets/ticket48497_test.py new file mode 100644 index 0000000..df10b6b --- /dev/null +++ b/dirsrvtests/tests/tickets/ticket48497_test.py @@ -0,0 +1,114 @@ +import pytest +from lib389.tasks import * +from lib389.utils import * +from lib389.topologies import topology_st + +from lib389._constants import DEFAULT_SUFFIX, SUFFIX + +pytestmark = pytest.mark.tier2 + +logging.getLogger(__name__).setLevel(logging.DEBUG) +log = logging.getLogger(__name__) + +NEW_ACCOUNT = "new_account" +MAX_ACCOUNTS = 20 + +MIXED_VALUE = "/home/mYhOmEdIrEcToRy" +LOWER_VALUE = "/home/myhomedirectory" +HOMEDIRECTORY_INDEX = 'cn=homeDirectory,cn=index,cn=userRoot,cn=ldbm database,cn=plugins,cn=config' +HOMEDIRECTORY_CN = "homedirectory" +MATCHINGRULE = 'nsMatchingRule' +UIDNUMBER_INDEX = 'cn=uidnumber,cn=index,cn=userRoot,cn=ldbm database,cn=plugins,cn=config' +UIDNUMBER_CN = "uidnumber" + + +def test_ticket48497_init(topology_st): + log.info("Initialization: add dummy entries for the tests") + for cpt in range(MAX_ACCOUNTS): + name = "%s%d" % (NEW_ACCOUNT, cpt) + topology_st.standalone.add_s(Entry(("uid=%s,%s" % (name, SUFFIX), { + 'objectclass': "top posixAccount".split(), + 'uid': name, + 'cn': name, + 'uidnumber': str(111), + 'gidnumber': str(222), + 'homedirectory': "/home/tb_%d" % cpt}))) + + +def test_ticket48497_homeDirectory_mixed_value(topology_st): + # Set a homedirectory value with mixed case + name = "uid=%s1,%s" % (NEW_ACCOUNT, SUFFIX) + mod = [(ldap.MOD_REPLACE, 'homeDirectory', ensure_bytes(MIXED_VALUE))] + topology_st.standalone.modify_s(name, mod) + + +def test_ticket48497_extensible_search(topology_st): + name = "uid=%s1,%s" % (NEW_ACCOUNT, SUFFIX) + + # check with the exact stored value + log.info("Default: can retrieve an entry filter syntax with exact stored value") + ent = topology_st.standalone.getEntry(name, ldap.SCOPE_BASE, "(homeDirectory=%s)" % MIXED_VALUE) + log.info("Default: can retrieve an entry filter caseExactIA5Match with exact stored value") + ent = topology_st.standalone.getEntry(name, ldap.SCOPE_BASE, "(homeDirectory:caseExactIA5Match:=%s)" % MIXED_VALUE) + + # check with a lower case value that is different from the stored value + log.info("Default: can not retrieve an entry filter syntax match with lowered stored value") + try: + ent = topology_st.standalone.getEntry(name, ldap.SCOPE_BASE, "(homeDirectory=%s)" % LOWER_VALUE) + assert ent is None + except ldap.NO_SUCH_OBJECT: + pass + log.info("Default: can not retrieve an entry filter caseExactIA5Match with lowered stored value") + try: + ent = topology_st.standalone.getEntry(name, ldap.SCOPE_BASE, + "(homeDirectory:caseExactIA5Match:=%s)" % LOWER_VALUE) + assert ent is None + except ldap.NO_SUCH_OBJECT: + pass + log.info("Default: can retrieve an entry filter caseIgnoreIA5Match with lowered stored value") + ent = topology_st.standalone.getEntry(name, ldap.SCOPE_BASE, "(homeDirectory:caseIgnoreIA5Match:=%s)" % LOWER_VALUE) + + +def test_ticket48497_homeDirectory_index_cfg(topology_st): + log.info("\n\nindex homeDirectory in caseIgnoreIA5Match and caseExactIA5Match") + try: + ent = topology_st.standalone.getEntry(HOMEDIRECTORY_INDEX, ldap.SCOPE_BASE) + except ldap.NO_SUCH_OBJECT: + topology_st.standalone.add_s(Entry((HOMEDIRECTORY_INDEX, { + 'objectclass': "top nsIndex".split(), + 'cn': HOMEDIRECTORY_CN, + 'nsSystemIndex': 'false', + 'nsIndexType': 'eq'}))) + + IGNORE_MR_NAME = b'caseIgnoreIA5Match' + EXACT_MR_NAME = b'caseExactIA5Match' + mod = [(ldap.MOD_REPLACE, MATCHINGRULE, (IGNORE_MR_NAME, EXACT_MR_NAME))] + topology_st.standalone.modify_s(HOMEDIRECTORY_INDEX, mod) + + +def test_ticket48497_homeDirectory_index_run(topology_st): + args = {TASK_WAIT: True} + topology_st.standalone.tasks.reindex(suffix=SUFFIX, attrname='homeDirectory', args=args) + + log.info("Check indexing succeeded with a specified matching rule") + file_obj = open(topology_st.standalone.errlog, "r") + + # Check if the MR configuration failure occurs + regex = re.compile("unknown or invalid matching rule") + while True: + line = file_obj.readline() + found = regex.search(line) + if ((line == '') or (found)): + break + + if (found): + log.info("The configuration of a specific MR fails") + log.info(line) + assert 0 + + +if __name__ == '__main__': + # Run isolated + # -s for DEBUG mode + CURRENT_FILE = os.path.realpath(__file__) + pytest.main("-s %s" % CURRENT_FILE) diff --git a/dirsrvtests/tests/tickets/ticket48637_test.py b/dirsrvtests/tests/tickets/ticket48637_test.py new file mode 100644 index 0000000..d33c861 --- /dev/null +++ b/dirsrvtests/tests/tickets/ticket48637_test.py @@ -0,0 +1,150 @@ +import pytest +from lib389.tasks import * +from lib389.utils import * +from lib389.topologies import topology_st + +pytestmark = pytest.mark.tier2 + +DEBUGGING = os.getenv('DEBUGGING', False) + +if DEBUGGING: + logging.getLogger(__name__).setLevel(logging.DEBUG) +else: + logging.getLogger(__name__).setLevel(logging.INFO) + +log = logging.getLogger(__name__) + +USER_DN = "uid=test,ou=people,dc=example,dc=com" +GROUP_DN = "cn=group,dc=example,dc=com" +GROUP_OU = "ou=groups,dc=example,dc=com" +PEOPLE_OU = "ou=people,dc=example,dc=com" +MEP_OU = "ou=mep,dc=example,dc=com" +MEP_TEMPLATE = "cn=mep template,dc=example,dc=com" +AUTO_DN = "cn=All Users,cn=Auto Membership Plugin,cn=plugins,cn=config" +MEP_DN = "cn=MEP Definition,cn=Managed Entries,cn=plugins,cn=config" + + +def test_ticket48637(topology_st): + """Test for entry cache corruption + + This requires automember and managed entry plugins to be configured. + + Then remove the group that automember would use to trigger a failure when + adding a new entry. Automember fails, and then managed entry also fails. + + Make sure a base search on the entry returns error 32 + """ + + if DEBUGGING: + # Add debugging steps(if any)... + pass + + # + # Add our setup entries + # + try: + topology_st.standalone.add_s(Entry((PEOPLE_OU, { + 'objectclass': 'top organizationalunit'.split(), + 'ou': 'people'}))) + except ldap.ALREADY_EXISTS: + pass + except ldap.LDAPError as e: + log.fatal('Failed to add people ou: ' + str(e)) + assert False + + try: + topology_st.standalone.add_s(Entry((GROUP_OU, { + 'objectclass': 'top organizationalunit'.split(), + 'ou': 'groups'}))) + except ldap.ALREADY_EXISTS: + pass + except ldap.LDAPError as e: + log.fatal('Failed to add groups ou: ' + str(e)) + assert False + + try: + topology_st.standalone.add_s(Entry((MEP_OU, { + 'objectclass': 'top extensibleObject'.split(), + 'ou': 'mep'}))) + except ldap.LDAPError as e: + log.fatal('Failed to add MEP ou: ' + str(e)) + assert False + + try: + topology_st.standalone.add_s(Entry((MEP_TEMPLATE, { + 'objectclass': 'top mepTemplateEntry'.split(), + 'cn': 'mep template', + 'mepRDNAttr': 'cn', + 'mepStaticAttr': 'objectclass: groupofuniquenames', + 'mepMappedAttr': 'cn: $uid'}))) + except ldap.LDAPError as e: + log.fatal('Failed to add MEP ou: ' + str(e)) + assert False + + # + # Configure automember + # + try: + topology_st.standalone.add_s(Entry((AUTO_DN, { + 'cn': 'All Users', + 'objectclass': ['top', 'autoMemberDefinition'], + 'autoMemberScope': 'dc=example,dc=com', + 'autoMemberFilter': 'objectclass=person', + 'autoMemberDefaultGroup': GROUP_DN, + 'autoMemberGroupingAttr': 'uniquemember:dn'}))) + except ldap.LDAPError as e: + log.fatal('Failed to configure automember plugin : ' + str(e)) + assert False + + # + # Configure managed entry plugin + # + try: + topology_st.standalone.add_s(Entry((MEP_DN, { + 'cn': 'MEP Definition', + 'objectclass': ['top', 'extensibleObject'], + 'originScope': 'ou=people,dc=example,dc=com', + 'originFilter': 'objectclass=person', + 'managedBase': 'ou=groups,dc=example,dc=com', + 'managedTemplate': MEP_TEMPLATE}))) + except ldap.LDAPError as e: + log.fatal('Failed to configure managed entry plugin : ' + str(e)) + assert False + + # + # Restart DS + # + topology_st.standalone.restart(timeout=30) + + # + # Add entry that should fail since the automember group does not exist + # + try: + topology_st.standalone.add_s(Entry((USER_DN, { + 'uid': 'test', + 'objectclass': ['top', 'person', 'extensibleObject'], + 'sn': 'test', + 'cn': 'test'}))) + except ldap.LDAPError as e: + pass + + # + # Search for the entry - it should not be returned + # + try: + entry = topology_st.standalone.search_s(USER_DN, ldap.SCOPE_SUBTREE, + 'objectclass=*') + if entry: + log.fatal('Entry was incorrectly returned') + assert False + except ldap.NO_SUCH_OBJECT: + pass + + log.info('Test PASSED') + + +if __name__ == '__main__': + # Run isolated + # -s for DEBUG mode + CURRENT_FILE = os.path.realpath(__file__) + pytest.main("-s %s" % CURRENT_FILE) diff --git a/dirsrvtests/tests/tickets/ticket48665_test.py b/dirsrvtests/tests/tickets/ticket48665_test.py new file mode 100644 index 0000000..4216a3b --- /dev/null +++ b/dirsrvtests/tests/tickets/ticket48665_test.py @@ -0,0 +1,72 @@ +import pytest +from lib389.utils import * +from lib389.topologies import topology_st + +from lib389._constants import DEFAULT_SUFFIX, DEFAULT_BENAME + +pytestmark = pytest.mark.tier2 + +logging.getLogger(__name__).setLevel(logging.DEBUG) +log = logging.getLogger(__name__) + + +def test_ticket48665(topology_st): + """ + This tests deletion of certain cn=config values. + + First, it should be able to delete, and not crash the server. + + Second, we might be able to delete then add to replace values. + + We should also still be able to mod replace the values and keep the server alive. + """ + # topology_st.standalone.config.enable_log('audit') + # topology_st.standalone.config.enable_log('auditfail') + # This will trigger a mod delete then add. + + topology_st.standalone.modify_s('cn=config,cn=ldbm database,cn=plugins,cn=config', + [(ldap.MOD_REPLACE, 'nsslapd-cache-autosize', b'0')]) + + try: + modlist = [(ldap.MOD_DELETE, 'nsslapd-cachememsize', None), (ldap.MOD_ADD, 'nsslapd-cachememsize', b'1')] + topology_st.standalone.modify_s("cn=%s,cn=ldbm database,cn=plugins,cn=config" % DEFAULT_BENAME, + modlist) + except: + pass + + # Check the server has not commited seppuku. + entries = topology_st.standalone.search_s(DEFAULT_SUFFIX, ldap.SCOPE_SUBTREE, '(cn=*)') + assert len(entries) > 0 + log.info('{} entries are returned from the server.'.format(len(entries))) + + # This has a magic hack to determine if we are in cn=config. + try: + topology_st.standalone.modify_s(DEFAULT_BENAME, [(ldap.MOD_REPLACE, + 'nsslapd-cachememsize', b'1')]) + except ldap.LDAPError as e: + log.fatal('Failed to change nsslapd-cachememsize ' + e.args[0]['desc']) + + # Check the server has not commited seppuku. + entries = topology_st.standalone.search_s(DEFAULT_SUFFIX, ldap.SCOPE_SUBTREE, '(cn=*)') + assert len(entries) > 0 + log.info('{} entries are returned from the server.'.format(len(entries))) + + # Now try with mod_replace. This should be okay. + + modlist = [(ldap.MOD_REPLACE, 'nsslapd-cachememsize', b'1')] + topology_st.standalone.modify_s("cn=%s,cn=ldbm database,cn=plugins,cn=config" % DEFAULT_BENAME, + modlist) + + # Check the server has not commited seppuku. + entries = topology_st.standalone.search_s(DEFAULT_SUFFIX, ldap.SCOPE_SUBTREE, '(cn=*)') + assert len(entries) > 0 + log.info('{} entries are returned from the server.'.format(len(entries))) + + log.info('Test complete') + + +if __name__ == '__main__': + # Run isolated + # -s for DEBUG mode + CURRENT_FILE = os.path.realpath(__file__) + pytest.main("-s %s" % CURRENT_FILE) diff --git a/dirsrvtests/tests/tickets/ticket48745_test.py b/dirsrvtests/tests/tickets/ticket48745_test.py new file mode 100644 index 0000000..f0bcaa1 --- /dev/null +++ b/dirsrvtests/tests/tickets/ticket48745_test.py @@ -0,0 +1,128 @@ +import pytest +from lib389.tasks import * +from lib389.utils import * +from lib389.topologies import topology_st + +from lib389._constants import SUFFIX, DEFAULT_SUFFIX + +pytestmark = pytest.mark.tier2 + +logging.getLogger(__name__).setLevel(logging.DEBUG) +log = logging.getLogger(__name__) + +NEW_ACCOUNT = "new_account" +MAX_ACCOUNTS = 20 + +MIXED_VALUE = "/home/mYhOmEdIrEcToRy" +LOWER_VALUE = "/home/myhomedirectory" +HOMEDIRECTORY_INDEX = 'cn=homeDirectory,cn=index,cn=userRoot,cn=ldbm database,cn=plugins,cn=config' +HOMEDIRECTORY_CN = "homedirectory" +MATCHINGRULE = 'nsMatchingRule' +UIDNUMBER_INDEX = 'cn=uidnumber,cn=index,cn=userRoot,cn=ldbm database,cn=plugins,cn=config' +UIDNUMBER_CN = "uidnumber" + + +def test_ticket48745_init(topology_st): + log.info("Initialization: add dummy entries for the tests") + for cpt in range(MAX_ACCOUNTS): + name = "%s%d" % (NEW_ACCOUNT, cpt) + topology_st.standalone.add_s(Entry(("uid=%s,%s" % (name, SUFFIX), { + 'objectclass': "top posixAccount".split(), + 'uid': name, + 'cn': name, + 'uidnumber': str(111), + 'gidnumber': str(222), + 'homedirectory': "/home/tbordaz_%d" % cpt}))) + + +def test_ticket48745_homeDirectory_indexed_cis(topology_st): + log.info("\n\nindex homeDirectory in caseIgnoreIA5Match and caseExactIA5Match") + try: + ent = topology_st.standalone.getEntry(HOMEDIRECTORY_INDEX, ldap.SCOPE_BASE) + except ldap.NO_SUCH_OBJECT: + topology_st.standalone.add_s(Entry((HOMEDIRECTORY_INDEX, { + 'objectclass': "top nsIndex".split(), + 'cn': HOMEDIRECTORY_CN, + 'nsSystemIndex': 'false', + 'nsIndexType': 'eq'}))) + # log.info("attach debugger") + # time.sleep(60) + + IGNORE_MR_NAME = b'caseIgnoreIA5Match' + EXACT_MR_NAME = b'caseExactIA5Match' + mod = [(ldap.MOD_REPLACE, MATCHINGRULE, (IGNORE_MR_NAME, EXACT_MR_NAME))] + topology_st.standalone.modify_s(HOMEDIRECTORY_INDEX, mod) + + # topology_st.standalone.stop(timeout=10) + log.info("successfully checked that filter with exact mr , a filter with lowercase eq is failing") + # assert topology_st.standalone.db2index(bename=DEFAULT_BENAME, suffixes=None, attrs=['homeDirectory']) + # topology_st.standalone.start(timeout=10) + args = {TASK_WAIT: True} + topology_st.standalone.tasks.reindex(suffix=SUFFIX, attrname='homeDirectory', args=args) + + log.info("Check indexing succeeded with a specified matching rule") + file_obj = open(topology_st.standalone.errlog, "r") + + # Check if the MR configuration failure occurs + regex = re.compile("unknown or invalid matching rule") + while True: + line = file_obj.readline() + found = regex.search(line) + if ((line == '') or (found)): + break + + if (found): + log.info("The configuration of a specific MR fails") + log.info(line) + assert 0 + + +def test_ticket48745_homeDirectory_mixed_value(topology_st): + # Set a homedirectory value with mixed case + name = "uid=%s1,%s" % (NEW_ACCOUNT, SUFFIX) + mod = [(ldap.MOD_REPLACE, 'homeDirectory', ensure_bytes(MIXED_VALUE))] + topology_st.standalone.modify_s(name, mod) + + +def test_ticket48745_extensible_search_after_index(topology_st): + name = "uid=%s1,%s" % (NEW_ACCOUNT, SUFFIX) + + # check with the exact stored value + log.info("Default: can retrieve an entry filter syntax with exact stored value") + ent = topology_st.standalone.getEntry(SUFFIX, ldap.SCOPE_SUBTREE, "(homeDirectory=%s)" % MIXED_VALUE) + # log.info("attach debugger") + # time.sleep(60) + + # This search will fail because a + # subtree search with caseExactIA5Match will find a key + # where the value has been lowercase + log.info("Default: can retrieve an entry filter caseExactIA5Match with exact stored value") + ent = topology_st.standalone.getEntry(SUFFIX, ldap.SCOPE_SUBTREE, + "(homeDirectory:caseExactIA5Match:=%s)" % MIXED_VALUE) + assert ent + + # But do additional searches.. just for more tests + # check with a lower case value that is different from the stored value + log.info("Default: can not retrieve an entry filter syntax match with lowered stored value") + try: + ent = topology_st.standalone.getEntry(SUFFIX, ldap.SCOPE_SUBTREE, "(homeDirectory=%s)" % LOWER_VALUE) + assert ent is None + except ldap.NO_SUCH_OBJECT: + pass + log.info("Default: can not retrieve an entry filter caseExactIA5Match with lowered stored value") + try: + ent = topology_st.standalone.getEntry(SUFFIX, ldap.SCOPE_SUBTREE, + "(homeDirectory:caseExactIA5Match:=%s)" % LOWER_VALUE) + assert ent is None + except ldap.NO_SUCH_OBJECT: + pass + log.info("Default: can retrieve an entry filter caseIgnoreIA5Match with lowered stored value") + ent = topology_st.standalone.getEntry(SUFFIX, ldap.SCOPE_SUBTREE, + "(homeDirectory:caseIgnoreIA5Match:=%s)" % LOWER_VALUE) + + +if __name__ == '__main__': + # Run isolated + # -s for DEBUG mode + CURRENT_FILE = os.path.realpath(__file__) + pytest.main("-s %s" % CURRENT_FILE) diff --git a/dirsrvtests/tests/tickets/ticket48746_test.py b/dirsrvtests/tests/tickets/ticket48746_test.py new file mode 100644 index 0000000..f574c49 --- /dev/null +++ b/dirsrvtests/tests/tickets/ticket48746_test.py @@ -0,0 +1,148 @@ +import pytest +from lib389.tasks import * +from lib389.utils import * +from lib389.topologies import topology_st + +from lib389._constants import SUFFIX, DEFAULT_SUFFIX, DEFAULT_BENAME + +pytestmark = pytest.mark.tier2 + +logging.getLogger(__name__).setLevel(logging.DEBUG) +log = logging.getLogger(__name__) + +NEW_ACCOUNT = "new_account" +MAX_ACCOUNTS = 20 + +MIXED_VALUE = "/home/mYhOmEdIrEcToRy" +LOWER_VALUE = "/home/myhomedirectory" +HOMEDIRECTORY_INDEX = 'cn=homeDirectory,cn=index,cn=userRoot,cn=ldbm database,cn=plugins,cn=config' +HOMEDIRECTORY_CN = "homedirectory" +MATCHINGRULE = 'nsMatchingRule' +UIDNUMBER_INDEX = 'cn=uidnumber,cn=index,cn=userRoot,cn=ldbm database,cn=plugins,cn=config' +UIDNUMBER_CN = "uidnumber" + + +def test_ticket48746_init(topology_st): + log.info("Initialization: add dummy entries for the tests") + for cpt in range(MAX_ACCOUNTS): + name = "%s%d" % (NEW_ACCOUNT, cpt) + topology_st.standalone.add_s(Entry(("uid=%s,%s" % (name, SUFFIX), { + 'objectclass': "top posixAccount".split(), + 'uid': name, + 'cn': name, + 'uidnumber': str(111), + 'gidnumber': str(222), + 'homedirectory': "/home/tbordaz_%d" % cpt}))) + + +def test_ticket48746_homeDirectory_indexed_cis(topology_st): + log.info("\n\nindex homeDirectory in caseIgnoreIA5Match and caseExactIA5Match") + try: + ent = topology_st.standalone.getEntry(HOMEDIRECTORY_INDEX, ldap.SCOPE_BASE) + except ldap.NO_SUCH_OBJECT: + topology_st.standalone.add_s(Entry((HOMEDIRECTORY_INDEX, { + 'objectclass': "top nsIndex".split(), + 'cn': HOMEDIRECTORY_CN, + 'nsSystemIndex': 'false', + 'nsIndexType': 'eq'}))) + # log.info("attach debugger") + # time.sleep(60) + + IGNORE_MR_NAME = b'caseIgnoreIA5Match' + EXACT_MR_NAME = b'caseExactIA5Match' + mod = [(ldap.MOD_REPLACE, MATCHINGRULE, (IGNORE_MR_NAME, EXACT_MR_NAME))] + topology_st.standalone.modify_s(HOMEDIRECTORY_INDEX, mod) + + # topology_st.standalone.stop(timeout=10) + log.info("successfully checked that filter with exact mr , a filter with lowercase eq is failing") + # assert topology_st.standalone.db2index(bename=DEFAULT_BENAME, suffixes=None, attrs=['homeDirectory']) + # topology_st.standalone.start(timeout=10) + args = {TASK_WAIT: True} + topology_st.standalone.tasks.reindex(suffix=SUFFIX, attrname='homeDirectory', args=args) + + log.info("Check indexing succeeded with a specified matching rule") + file_obj = open(topology_st.standalone.errlog, "r") + + # Check if the MR configuration failure occurs + regex = re.compile("unknown or invalid matching rule") + while True: + line = file_obj.readline() + found = regex.search(line) + if ((line == '') or (found)): + break + + if (found): + log.info("The configuration of a specific MR fails") + log.info(line) + assert not found + + +def test_ticket48746_homeDirectory_mixed_value(topology_st): + # Set a homedirectory value with mixed case + name = "uid=%s1,%s" % (NEW_ACCOUNT, SUFFIX) + mod = [(ldap.MOD_REPLACE, 'homeDirectory', ensure_bytes(MIXED_VALUE))] + topology_st.standalone.modify_s(name, mod) + + +def test_ticket48746_extensible_search_after_index(topology_st): + name = "uid=%s1,%s" % (NEW_ACCOUNT, SUFFIX) + + # check with the exact stored value + # log.info("Default: can retrieve an entry filter syntax with exact stored value") + # ent = topology_st.standalone.getEntry(name, ldap.SCOPE_BASE, "(homeDirectory=%s)" % MIXED_VALUE) + # log.info("attach debugger") + # time.sleep(60) + + # This search is enought to trigger the crash + # because it loads a registered filter MR plugin that has no indexer create function + # following index will trigger the crash + log.info("Default: can retrieve an entry filter caseExactIA5Match with exact stored value") + ent = topology_st.standalone.getEntry(name, ldap.SCOPE_BASE, "(homeDirectory:caseExactIA5Match:=%s)" % MIXED_VALUE) + + +def test_ticket48746_homeDirectory_indexed_ces(topology_st): + log.info("\n\nindex homeDirectory in caseExactIA5Match, this would trigger the crash") + try: + ent = topology_st.standalone.getEntry(HOMEDIRECTORY_INDEX, ldap.SCOPE_BASE) + except ldap.NO_SUCH_OBJECT: + topology_st.standalone.add_s(Entry((HOMEDIRECTORY_INDEX, { + 'objectclass': "top nsIndex".split(), + 'cn': HOMEDIRECTORY_CN, + 'nsSystemIndex': 'false', + 'nsIndexType': 'eq'}))) + # log.info("attach debugger") + # time.sleep(60) + + EXACT_MR_NAME = b'caseExactIA5Match' + mod = [(ldap.MOD_REPLACE, MATCHINGRULE, (EXACT_MR_NAME))] + topology_st.standalone.modify_s(HOMEDIRECTORY_INDEX, mod) + + # topology_st.standalone.stop(timeout=10) + log.info("successfully checked that filter with exact mr , a filter with lowercase eq is failing") + # assert topology_st.standalone.db2index(bename=DEFAULT_BENAME, suffixes=None, attrs=['homeDirectory']) + # topology_st.standalone.start(timeout=10) + args = {TASK_WAIT: True} + topology_st.standalone.tasks.reindex(suffix=SUFFIX, attrname='homeDirectory', args=args) + + log.info("Check indexing succeeded with a specified matching rule") + file_obj = open(topology_st.standalone.errlog, "r") + + # Check if the MR configuration failure occurs + regex = re.compile("unknown or invalid matching rule") + while True: + line = file_obj.readline() + found = regex.search(line) + if ((line == '') or (found)): + break + + if (found): + log.info("The configuration of a specific MR fails") + log.info(line) + assert not found + + +if __name__ == '__main__': + # Run isolated + # -s for DEBUG mode + CURRENT_FILE = os.path.realpath(__file__) + pytest.main("-s %s" % CURRENT_FILE) diff --git a/dirsrvtests/tests/tickets/ticket48759_test.py b/dirsrvtests/tests/tickets/ticket48759_test.py new file mode 100644 index 0000000..f9b2e3b --- /dev/null +++ b/dirsrvtests/tests/tickets/ticket48759_test.py @@ -0,0 +1,227 @@ +# --- BEGIN COPYRIGHT BLOCK --- +# Copyright (C) 2016 Red Hat, Inc. +# All rights reserved. +# +# License: GPL (version 3 or any later version). +# See LICENSE for details. +# --- END COPYRIGHT BLOCK --- +# +import logging + +import pytest +from lib389.tasks import * +from lib389.utils import * +from lib389.topologies import topology_st +from lib389.replica import ReplicationManager,Replicas + +from lib389._constants import (PLUGIN_MEMBER_OF, DEFAULT_SUFFIX, ReplicaRole, REPLICAID_MASTER_1, + PLUGIN_RETRO_CHANGELOG, REPLICA_PRECISE_PURGING, REPLICA_PURGE_DELAY, + REPLICA_PURGE_INTERVAL) + +pytestmark = pytest.mark.tier2 + +log = logging.getLogger(__name__) + +MEMBEROF_PLUGIN_DN = ('cn=' + PLUGIN_MEMBER_OF + ',cn=plugins,cn=config') +GROUP_DN = ("cn=group," + DEFAULT_SUFFIX) +MEMBER_DN_COMP = "uid=member" + + +def _add_group_with_members(topology_st): + # Create group + try: + topology_st.standalone.add_s(Entry((GROUP_DN, + {'objectclass': 'top groupofnames'.split(), + 'cn': 'group'}))) + except ldap.LDAPError as e: + log.fatal('Failed to add group: error ' + e.args[0]['desc']) + assert False + + # Add members to the group - set timeout + log.info('Adding members to the group...') + for idx in range(1, 5): + try: + MEMBER_VAL = ("uid=member%d,%s" % (idx, DEFAULT_SUFFIX)) + topology_st.standalone.modify_s(GROUP_DN, + [(ldap.MOD_ADD, + 'member', + ensure_bytes(MEMBER_VAL))]) + except ldap.LDAPError as e: + log.fatal('Failed to update group: member (%s) - error: %s' % + (MEMBER_VAL, e.args[0]['desc'])) + assert False + + +def _find_retrocl_changes(topology_st, user_dn=None): + ents = topology_st.standalone.search_s('cn=changelog', ldap.SCOPE_SUBTREE, '(targetDn=%s)' % user_dn) + return len(ents) + + +def _find_memberof(topology_st, user_dn=None, group_dn=None, find_result=True): + ent = topology_st.standalone.getEntry(user_dn, ldap.SCOPE_BASE, "(objectclass=*)", ['memberof']) + found = False + if ent.hasAttr('memberof'): + + for val in ent.getValues('memberof'): + topology_st.standalone.log.info("!!!!!!! %s: memberof->%s" % (user_dn, val)) + if ensure_str(val) == group_dn: + found = True + break + + if find_result: + assert (found) + else: + assert (not found) + + +def test_ticket48759(topology_st): + """ + The fix for ticket 48759 has to prevent plugin calls for tombstone purging + + The test uses the memberof and retrocl plugins to verify this. + In tombstone purging without the fix the mmeberof plugin is called, + if the tombstone entry is a group, + it modifies the user entries for the group + and if retrocl is enabled this mod is written to the retrocl + + The test sequence is: + - enable replication + - enable memberof and retro cl plugin + - add user entries + - add a group and add the users as members + - verify memberof is set to users + - delete the group + - verify memberof is removed from users + - add group again + - verify memberof is set to users + - get number of changes in retro cl for one user + - configure tombstone purging + - wait for purge interval to pass + - add a dummy entry to increase maxcsn + - wait for purge interval to pass two times + - get number of changes in retro cl for user again + - assert there was no additional change + """ + + log.info('Testing Ticket 48759 - no plugin calls for tombstone purging') + + # + # Setup Replication + # + log.info('Setting up replication...') + repl = ReplicationManager(DEFAULT_SUFFIX) + repl.create_first_master(topology_st.standalone) + # + # enable dynamic plugins, memberof and retro cl plugin + # + log.info('Enable plugins...') + try: + topology_st.standalone.config.set('nsslapd-dynamic-plugins', 'on') + except ldap.LDAPError as e: + ldap.error('Failed to enable dynamic plugins! ' + e.args[0]['desc']) + assert False + + topology_st.standalone.plugins.enable(name=PLUGIN_MEMBER_OF) + topology_st.standalone.plugins.enable(name=PLUGIN_RETRO_CHANGELOG) + # Configure memberOf group attribute + try: + topology_st.standalone.modify_s(MEMBEROF_PLUGIN_DN, + [(ldap.MOD_REPLACE, + 'memberofgroupattr', + b'member')]) + except ldap.LDAPError as e: + log.fatal('Failed to configure memberOf plugin: error ' + e.args[0]['desc']) + assert False + + # + # create some users and a group + # + log.info('create users and group...') + for idx in range(1, 5): + try: + USER_DN = ("uid=member%d,%s" % (idx, DEFAULT_SUFFIX)) + topology_st.standalone.add_s(Entry((USER_DN, + {'objectclass': 'top extensibleObject'.split(), + 'uid': 'member%d' % (idx)}))) + except ldap.LDAPError as e: + log.fatal('Failed to add user (%s): error %s' % (USER_DN, e.args[0]['desc'])) + assert False + + _add_group_with_members(topology_st) + + MEMBER_VAL = ("uid=member2,%s" % DEFAULT_SUFFIX) + time.sleep(1) + _find_memberof(topology_st, MEMBER_VAL, GROUP_DN, True) + + # delete group + log.info('delete group...') + try: + topology_st.standalone.delete_s(GROUP_DN) + except ldap.LDAPError as e: + log.error('Failed to delete entry: ' + e.args[0]['desc']) + assert False + + time.sleep(1) + _find_memberof(topology_st, MEMBER_VAL, GROUP_DN, False) + + # add group again + log.info('add group again') + _add_group_with_members(topology_st) + time.sleep(1) + _find_memberof(topology_st, MEMBER_VAL, GROUP_DN, True) + + # + # get number of changelog records for one user entry + log.info('get number of changes for %s before tombstone purging' % MEMBER_VAL) + changes_pre = _find_retrocl_changes(topology_st, MEMBER_VAL) + + # configure tombstone purging + args = {REPLICA_PRECISE_PURGING: 'on', + REPLICA_PURGE_DELAY: '5', + REPLICA_PURGE_INTERVAL: '5'} + try: + Repl_DN = 'cn=replica,cn=dc\\3Dexample\\2Cdc\\3Dcom,cn=mapping tree,cn=config' + topology_st.standalone.modify_s(Repl_DN, + [(ldap.MOD_ADD, 'nsDS5ReplicaPreciseTombstonePurging', b'on'), + (ldap.MOD_ADD, 'nsDS5ReplicaPurgeDelay', b'5'), + (ldap.MOD_ADD, 'nsDS5ReplicaTombstonePurgeInterval', b'5')]) + except: + log.fatal('Failed to configure replica') + assert False + + # Wait for the interval to pass + log.info('Wait for tombstone purge interval to pass ...') + time.sleep(6) + + # Add an entry to trigger replication + log.info('add dummy entry') + try: + topology_st.standalone.add_s(Entry(('cn=test_entry,dc=example,dc=com', { + 'objectclass': 'top person'.split(), + 'sn': 'user', + 'cn': 'entry1'}))) + except ldap.LDAPError as e: + log.error('Failed to add entry: ' + e.args[0]['desc']) + assert False + + # check memberof is still correct + time.sleep(1) + _find_memberof(topology_st, MEMBER_VAL, GROUP_DN, True) + + # Wait for the interval to pass again + log.info('Wait for tombstone purge interval to pass again...') + time.sleep(10) + + # + # get number of changelog records for one user entry + log.info('get number of changes for %s before tombstone purging' % MEMBER_VAL) + changes_post = _find_retrocl_changes(topology_st, MEMBER_VAL) + + assert (changes_pre == changes_post) + + +if __name__ == '__main__': + # Run isolated + # -s for DEBUG mode + CURRENT_FILE = os.path.realpath(__file__) + pytest.main("-s %s" % CURRENT_FILE) diff --git a/dirsrvtests/tests/tickets/ticket48784_test.py b/dirsrvtests/tests/tickets/ticket48784_test.py new file mode 100644 index 0000000..8e5b204 --- /dev/null +++ b/dirsrvtests/tests/tickets/ticket48784_test.py @@ -0,0 +1,141 @@ +# --- BEGIN COPYRIGHT BLOCK --- +# Copyright (C) 2016 Red Hat, Inc. +# All rights reserved. +# +# License: GPL (version 3 or any later version). +# See LICENSE for details. +# --- END COPYRIGHT BLOCK --- +# +import pytest +from lib389.tasks import * + +from lib389.utils import * +# Skip on older versions +pytestmark = [pytest.mark.tier2, + pytest.mark.skipif(ds_is_older('1.3.5'), reason="Not implemented")] + +from lib389.topologies import topology_m2 + +from lib389._constants import DEFAULT_SUFFIX, DN_DM, PASSWORD + +logging.getLogger(__name__).setLevel(logging.DEBUG) +log = logging.getLogger(__name__) + +CONFIG_DN = 'cn=config' +ENCRYPTION_DN = 'cn=encryption,%s' % CONFIG_DN +RSA = 'RSA' +RSA_DN = 'cn=%s,%s' % (RSA, ENCRYPTION_DN) +ISSUER = 'cn=CAcert' +CACERT = 'CAcertificate' +SERVERCERT = 'Server-Cert' + + +@pytest.fixture(scope="module") +def add_entry(server, name, rdntmpl, start, num): + log.info("\n######################### Adding %d entries to %s ######################" % (num, name)) + + for i in range(num): + ii = start + i + dn = '%s%d,%s' % (rdntmpl, ii, DEFAULT_SUFFIX) + try: + server.add_s(Entry((dn, {'objectclass': 'top person extensibleObject'.split(), + 'uid': '%s%d' % (rdntmpl, ii), + 'cn': '%s user%d' % (name, ii), + 'sn': 'user%d' % (ii)}))) + except ldap.LDAPError as e: + log.error('Failed to add %s ' % dn + e.message['desc']) + assert False + +def config_tls_agreements(topology_m2): + log.info("######################### Configure SSL/TLS agreements ######################") + log.info("######################## master1 <-- startTLS -> master2 #####################") + + log.info("##### Update the agreement of master1") + m1 = topology_m2.ms["master1"] + m1_m2_agmt = m1.agreement.list(suffix=DEFAULT_SUFFIX)[0].dn + topology_m2.ms["master1"].modify_s(m1_m2_agmt, [(ldap.MOD_REPLACE, 'nsDS5ReplicaTransportInfo', b'TLS')]) + + log.info("##### Update the agreement of master2") + m2 = topology_m2.ms["master2"] + m2_m1_agmt = m2.agreement.list(suffix=DEFAULT_SUFFIX)[0].dn + topology_m2.ms["master2"].modify_s(m2_m1_agmt, [(ldap.MOD_REPLACE, 'nsDS5ReplicaTransportInfo', b'TLS')]) + + time.sleep(1) + + topology_m2.ms["master1"].restart(10) + topology_m2.ms["master2"].restart(10) + + log.info("\n######################### Configure SSL/TLS agreements Done ######################\n") + + +def set_ssl_Version(server, name, version): + log.info("\n######################### Set %s on %s ######################\n" % + (version, name)) + server.simple_bind_s(DN_DM, PASSWORD) + server.modify_s(ENCRYPTION_DN, [(ldap.MOD_REPLACE, 'nsSSL3', b'off'), + (ldap.MOD_REPLACE, 'nsTLS1', b'on'), + (ldap.MOD_REPLACE, 'sslVersionMin', ensure_bytes(version)), + (ldap.MOD_REPLACE, 'sslVersionMax', ensure_bytes(version))]) + + +def test_ticket48784(topology_m2): + """ + Set up 2way MMR: + master_1 <----- startTLS -----> master_2 + + Make sure the replication is working. + Then, stop the servers and set only TLS1.0 on master_1 while TLS1.2 on master_2 + Replication is supposed to fail. + """ + log.info("Ticket 48784 - Allow usage of OpenLDAP libraries that don't use NSS for crypto") + + #create_keys_certs(topology_m2) + [i.enable_tls() for i in topology_m2] + + config_tls_agreements(topology_m2) + + add_entry(topology_m2.ms["master1"], 'master1', 'uid=m1user', 0, 5) + add_entry(topology_m2.ms["master2"], 'master2', 'uid=m2user', 0, 5) + + time.sleep(10) + + log.info('##### Searching for entries on master1...') + entries = topology_m2.ms["master1"].search_s(DEFAULT_SUFFIX, ldap.SCOPE_SUBTREE, '(uid=*)') + assert 10 == len(entries) + + log.info('##### Searching for entries on master2...') + entries = topology_m2.ms["master2"].search_s(DEFAULT_SUFFIX, ldap.SCOPE_SUBTREE, '(uid=*)') + assert 10 == len(entries) + + log.info("##### openldap client just accepts sslVersionMin not Max.") + set_ssl_Version(topology_m2.ms["master1"], 'master1', 'TLS1.0') + set_ssl_Version(topology_m2.ms["master2"], 'master2', 'TLS1.2') + + log.info("##### restart master[12]") + topology_m2.ms["master1"].restart(timeout=10) + topology_m2.ms["master2"].restart(timeout=10) + + log.info("##### replication from master_1 to master_2 should be ok.") + add_entry(topology_m2.ms["master1"], 'master1', 'uid=m1user', 10, 1) + log.info("##### replication from master_2 to master_1 should fail.") + add_entry(topology_m2.ms["master2"], 'master2', 'uid=m2user', 10, 1) + + time.sleep(10) + + log.info('##### Searching for entries on master1...') + entries = topology_m2.ms["master1"].search_s(DEFAULT_SUFFIX, ldap.SCOPE_SUBTREE, '(uid=*)') + assert 11 == len(entries) # This is supposed to be "1" less than master 2's entry count + + log.info('##### Searching for entries on master2...') + entries = topology_m2.ms["master2"].search_s(DEFAULT_SUFFIX, ldap.SCOPE_SUBTREE, '(uid=*)') + assert 12 == len(entries) + + log.info("Ticket 48784 - PASSED") + + +if __name__ == '__main__': + # Run isolated + # -s for DEBUG mode + + CURRENT_FILE = os.path.realpath(__file__) + pytest.main("-s %s" % CURRENT_FILE) diff --git a/dirsrvtests/tests/tickets/ticket48798_test.py b/dirsrvtests/tests/tickets/ticket48798_test.py new file mode 100644 index 0000000..a513292 --- /dev/null +++ b/dirsrvtests/tests/tickets/ticket48798_test.py @@ -0,0 +1,65 @@ +from subprocess import check_output + +import pytest +from lib389.tasks import * +from lib389.utils import * +from lib389.topologies import topology_st +from lib389.config import Encryption + +from lib389._constants import DEFAULT_SUFFIX, DEFAULT_SECURE_PORT + +pytestmark = pytest.mark.tier2 + +logging.getLogger(__name__).setLevel(logging.DEBUG) +log = logging.getLogger(__name__) + + +def check_socket_dh_param_size(hostname, port): + ### You know why we have to do this? + # Because TLS and SSL suck. Hard. They are impossible. It's all terrible, burn it all down. + cmd = "echo quit | openssl s_client -connect {HOSTNAME}:{PORT} -msg -cipher DH | grep -A 1 ServerKeyExchange".format( + HOSTNAME=hostname, + PORT=port) + output = check_output(cmd, shell=True) + dhheader = output.split(b'\n')[1] + # Get rid of all the other whitespace. + dhheader = dhheader.replace(b' ', b'') + # Example is 0c00040b0100ffffffffffffffffadf8 + # We need the bits 0100 here. Which means 256 bytes aka 256 * 8, for 2048 bit. + dhheader = dhheader[8:12] + # make it an int, and times 8 + i = int(dhheader, 16) * 8 + return i + + +def test_ticket48798(topology_st): + """ + Test DH param sizes offered by DS. + + """ + topology_st.standalone.enable_tls() + + # Confirm that we have a connection, and that it has DH + + # Open a socket to the port. + # Check the security settings. + size = check_socket_dh_param_size(topology_st.standalone.host, topology_st.standalone.sslport) + + assert size == 2048 + + # Now toggle the settings. + enc = Encryption(topology_st.standalone) + enc.set('allowWeakDHParam', 'on') + + topology_st.standalone.restart() + + # Check the DH params are less than 1024. + size = check_socket_dh_param_size(topology_st.standalone.host, topology_st.standalone.sslport) + + assert size == 1024 + +if __name__ == '__main__': + # Run isolated + # -s for DEBUG mode + CURRENT_FILE = os.path.realpath(__file__) + pytest.main("-s %s" % CURRENT_FILE) diff --git a/dirsrvtests/tests/tickets/ticket48799_test.py b/dirsrvtests/tests/tickets/ticket48799_test.py new file mode 100644 index 0000000..8d9b7e9 --- /dev/null +++ b/dirsrvtests/tests/tickets/ticket48799_test.py @@ -0,0 +1,87 @@ +import pytest +from lib389.tasks import * +from lib389.utils import * +from lib389.topologies import topology_m1c1 + +pytestmark = pytest.mark.tier2 + +logging.getLogger(__name__).setLevel(logging.DEBUG) +log = logging.getLogger(__name__) + + +def _add_custom_schema(server): + attr_value = b"( 10.0.9.2342.19200300.100.1.1 NAME 'customManager' EQUALITY distinguishedNameMatch SYNTAX 1.3.6.1.4.1.1466.115.121.1.12 X-ORIGIN 'user defined' )" + mod = [(ldap.MOD_ADD, 'attributeTypes', attr_value)] + server.modify_s('cn=schema', mod) + + oc_value = b"( 1.3.6.1.4.1.4843.2.1 NAME 'customPerson' SUP inetorgperson STRUCTURAL MAY (customManager) X-ORIGIN 'user defined' )" + mod = [(ldap.MOD_ADD, 'objectclasses', oc_value)] + server.modify_s('cn=schema', mod) + + +def _create_user(server): + server.add_s(Entry(( + "uid=testuser,ou=People,%s" % DEFAULT_SUFFIX, + { + 'objectClass': "top account posixaccount".split(), + 'uid': 'testuser', + 'gecos': 'Test User', + 'cn': 'testuser', + 'homedirectory': '/home/testuser', + 'passwordexpirationtime': '20160710184141Z', + 'userpassword': '!', + 'uidnumber': '1111212', + 'gidnumber': '1111212', + 'loginshell': '/bin/bash' + } + ))) + + +def _modify_user(server): + mod = [ + (ldap.MOD_ADD, 'objectClass', [b'customPerson']), + (ldap.MOD_ADD, 'sn', [b'User']), + (ldap.MOD_ADD, 'customManager', [b'cn=manager']), + ] + server.modify("uid=testuser,ou=People,%s" % DEFAULT_SUFFIX, mod) + + +def test_ticket48799(topology_m1c1): + """Write your replication testcase here. + + To access each DirSrv instance use: topology_m1c1.ms["master1"], topology_m1c1.ms["master1"]2, + ..., topology_m1c1.hub1, ..., topology_m1c1.cs["consumer1"],... + + Also, if you need any testcase initialization, + please, write additional fixture for that(include finalizer). + """ + + # Add the new schema element. + _add_custom_schema(topology_m1c1.ms["master1"]) + _add_custom_schema(topology_m1c1.cs["consumer1"]) + + # Add a new user on the master. + _create_user(topology_m1c1.ms["master1"]) + # Modify the user on the master. + _modify_user(topology_m1c1.ms["master1"]) + + # We need to wait for replication here. + time.sleep(15) + + # Now compare the master vs consumer, and see if the objectClass was dropped. + + master_entry = topology_m1c1.ms["master1"].search_s("uid=testuser,ou=People,%s" % DEFAULT_SUFFIX, ldap.SCOPE_BASE, + '(objectclass=*)', ['objectClass']) + consumer_entry = topology_m1c1.cs["consumer1"].search_s("uid=testuser,ou=People,%s" % DEFAULT_SUFFIX, + ldap.SCOPE_BASE, '(objectclass=*)', ['objectClass']) + + assert (master_entry == consumer_entry) + + log.info('Test complete') + + +if __name__ == '__main__': + # Run isolated + # -s for DEBUG mode + CURRENT_FILE = os.path.realpath(__file__) + pytest.main("-s %s" % CURRENT_FILE) diff --git a/dirsrvtests/tests/tickets/ticket48808_test.py b/dirsrvtests/tests/tickets/ticket48808_test.py new file mode 100644 index 0000000..7ac5a76 --- /dev/null +++ b/dirsrvtests/tests/tickets/ticket48808_test.py @@ -0,0 +1,303 @@ +from random import sample + +import pytest +from ldap.controls import SimplePagedResultsControl +from lib389.tasks import * +from lib389.utils import * +from lib389.topologies import topology_st + +from lib389._constants import DEFAULT_SUFFIX, DN_DM, PASSWORD + +pytestmark = pytest.mark.tier2 + +logging.getLogger(__name__).setLevel(logging.DEBUG) +log = logging.getLogger(__name__) + +TEST_USER_NAME = 'simplepaged_test' +TEST_USER_DN = 'uid=%s,%s' % (TEST_USER_NAME, DEFAULT_SUFFIX) +TEST_USER_PWD = 'simplepaged_test' + + +@pytest.fixture(scope="module") +def create_user(topology_st): + """User for binding operation""" + + try: + topology_st.standalone.add_s(Entry((TEST_USER_DN, { + 'objectclass': 'top person'.split(), + 'objectclass': 'organizationalPerson', + 'objectclass': 'inetorgperson', + 'cn': TEST_USER_NAME, + 'sn': TEST_USER_NAME, + 'userpassword': TEST_USER_PWD, + 'mail': '%s@redhat.com' % TEST_USER_NAME, + 'uid': TEST_USER_NAME + }))) + except ldap.LDAPError as e: + log.error('Failed to add user (%s): error (%s)' % (TEST_USER_DN, + e.args[0]['desc'])) + raise e + + +def add_users(topology_st, users_num): + """Add users to the default suffix + and return a list of added user DNs. + """ + + users_list = [] + log.info('Adding %d users' % users_num) + for num in sample(range(1000), users_num): + num_ran = int(round(num)) + USER_NAME = 'test%05d' % num_ran + USER_DN = 'uid=%s,%s' % (USER_NAME, DEFAULT_SUFFIX) + users_list.append(USER_DN) + try: + topology_st.standalone.add_s(Entry((USER_DN, { + 'objectclass': 'top person'.split(), + 'objectclass': 'organizationalPerson', + 'objectclass': 'inetorgperson', + 'cn': USER_NAME, + 'sn': USER_NAME, + 'userpassword': 'pass%s' % num_ran, + 'mail': '%s@redhat.com' % USER_NAME, + 'uid': USER_NAME + }))) + except ldap.LDAPError as e: + log.error('Failed to add user (%s): error (%s)' % (USER_DN, + e.args[0]['desc'])) + raise e + return users_list + + +def del_users(topology_st, users_list): + """Delete users with DNs from given list""" + + log.info('Deleting %d users' % len(users_list)) + for user_dn in users_list: + try: + topology_st.standalone.delete_s(user_dn) + except ldap.LDAPError as e: + log.error('Failed to delete user (%s): error (%s)' % (user_dn, + e.args[0]['desc'])) + raise e + + +def change_conf_attr(topology_st, suffix, attr_name, attr_value): + """Change configurational attribute in the given suffix. + Funtion returns previous attribute value. + """ + + try: + entries = topology_st.standalone.search_s(suffix, ldap.SCOPE_BASE, + 'objectclass=top', + [attr_name]) + attr_value_bck = entries[0].data.get(attr_name) + log.info('Set %s to %s. Previous value - %s. Modified suffix - %s.' % ( + attr_name, attr_value, attr_value_bck, suffix)) + if attr_value is None: + topology_st.standalone.modify_s(suffix, [(ldap.MOD_DELETE, + attr_name, + attr_value)]) + else: + topology_st.standalone.modify_s(suffix, [(ldap.MOD_REPLACE, + attr_name, + attr_value)]) + except ldap.LDAPError as e: + log.error('Failed to change attr value (%s): error (%s)' % (attr_name, + e.args[0]['desc'])) + raise e + + return attr_value_bck + + +def paged_search(topology_st, controls, search_flt, searchreq_attrlist): + """Search at the DEFAULT_SUFFIX with ldap.SCOPE_SUBTREE + using Simple Paged Control(should the first item in the + list controls. + Return the list with results summarized from all pages + """ + + pages = 0 + pctrls = [] + all_results = [] + req_ctrl = controls[0] + msgid = topology_st.standalone.search_ext(DEFAULT_SUFFIX, + ldap.SCOPE_SUBTREE, + search_flt, + searchreq_attrlist, + serverctrls=controls) + while True: + log.info('Getting page %d' % (pages,)) + rtype, rdata, rmsgid, rctrls = topology_st.standalone.result3(msgid) + all_results.extend(rdata) + pages += 1 + pctrls = [ + c + for c in rctrls + if c.controlType == SimplePagedResultsControl.controlType + ] + + if pctrls: + if pctrls[0].cookie: + # Copy cookie from response control to request control + req_ctrl.cookie = pctrls[0].cookie + msgid = topology_st.standalone.search_ext(DEFAULT_SUFFIX, + ldap.SCOPE_SUBTREE, + search_flt, + searchreq_attrlist, + serverctrls=controls) + else: + break # no more pages available + else: + break + + assert not pctrls[0].cookie + return all_results + + +def test_ticket48808(topology_st, create_user): + log.info('Run multiple paging controls on a single connection') + users_num = 100 + page_size = 30 + users_list = add_users(topology_st, users_num) + search_flt = r'(uid=test*)' + searchreq_attrlist = ['dn', 'sn'] + + log.info('Set user bind') + topology_st.standalone.simple_bind_s(TEST_USER_DN, TEST_USER_PWD) + + log.info('Create simple paged results control instance') + req_ctrl = SimplePagedResultsControl(True, size=page_size, cookie='') + controls = [req_ctrl] + + for ii in range(3): + log.info('Iteration %d' % ii) + msgid = topology_st.standalone.search_ext(DEFAULT_SUFFIX, + ldap.SCOPE_SUBTREE, + search_flt, + searchreq_attrlist, + serverctrls=controls) + rtype, rdata, rmsgid, rctrls = topology_st.standalone.result3(msgid) + pctrls = [ + c + for c in rctrls + if c.controlType == SimplePagedResultsControl.controlType + ] + + req_ctrl.cookie = pctrls[0].cookie + msgid = topology_st.standalone.search_ext(DEFAULT_SUFFIX, + ldap.SCOPE_SUBTREE, + search_flt, + searchreq_attrlist, + serverctrls=controls) + log.info('Set Directory Manager bind back') + topology_st.standalone.simple_bind_s(DN_DM, PASSWORD) + del_users(topology_st, users_list) + + log.info('Abandon the search') + users_num = 10 + page_size = 0 + users_list = add_users(topology_st, users_num) + search_flt = r'(uid=test*)' + searchreq_attrlist = ['dn', 'sn'] + + log.info('Set user bind') + topology_st.standalone.simple_bind_s(TEST_USER_DN, TEST_USER_PWD) + + log.info('Create simple paged results control instance') + req_ctrl = SimplePagedResultsControl(True, size=page_size, cookie='') + controls = [req_ctrl] + + msgid = topology_st.standalone.search_ext(DEFAULT_SUFFIX, + ldap.SCOPE_SUBTREE, + search_flt, + searchreq_attrlist, + serverctrls=controls) + rtype, rdata, rmsgid, rctrls = topology_st.standalone.result3(msgid) + pctrls = [ + c + for c in rctrls + if c.controlType == SimplePagedResultsControl.controlType + ] + assert not pctrls[0].cookie + + log.info('Set Directory Manager bind back') + topology_st.standalone.simple_bind_s(DN_DM, PASSWORD) + del_users(topology_st, users_list) + + log.info("Search should fail with 'nsPagedSizeLimit = 5'" + "and 'nsslapd-pagedsizelimit = 15' with 10 users") + conf_attr = b'15' + user_attr = b'5' + expected_rs = ldap.SIZELIMIT_EXCEEDED + users_num = 10 + page_size = 10 + users_list = add_users(topology_st, users_num) + search_flt = r'(uid=test*)' + searchreq_attrlist = ['dn', 'sn'] + conf_attr_bck = change_conf_attr(topology_st, DN_CONFIG, + 'nsslapd-pagedsizelimit', conf_attr) + user_attr_bck = change_conf_attr(topology_st, TEST_USER_DN, + 'nsPagedSizeLimit', user_attr) + + log.info('Set user bind') + topology_st.standalone.simple_bind_s(TEST_USER_DN, TEST_USER_PWD) + + log.info('Create simple paged results control instance') + req_ctrl = SimplePagedResultsControl(True, size=page_size, cookie='') + controls = [req_ctrl] + + log.info('Expect to fail with SIZELIMIT_EXCEEDED') + with pytest.raises(expected_rs): + all_results = paged_search(topology_st, controls, + search_flt, searchreq_attrlist) + + log.info('Set Directory Manager bind back') + topology_st.standalone.simple_bind_s(DN_DM, PASSWORD) + del_users(topology_st, users_list) + change_conf_attr(topology_st, DN_CONFIG, + 'nsslapd-pagedsizelimit', conf_attr_bck) + change_conf_attr(topology_st, TEST_USER_DN, + 'nsPagedSizeLimit', user_attr_bck) + + log.info("Search should pass with 'nsPagedSizeLimit = 15'" + "and 'nsslapd-pagedsizelimit = 5' with 10 users") + conf_attr = b'5' + user_attr = b'15' + users_num = 10 + page_size = 10 + users_list = add_users(topology_st, users_num) + search_flt = r'(uid=test*)' + searchreq_attrlist = ['dn', 'sn'] + conf_attr_bck = change_conf_attr(topology_st, DN_CONFIG, + 'nsslapd-pagedsizelimit', conf_attr) + user_attr_bck = change_conf_attr(topology_st, TEST_USER_DN, + 'nsPagedSizeLimit', user_attr) + + log.info('Set user bind') + topology_st.standalone.simple_bind_s(TEST_USER_DN, TEST_USER_PWD) + + log.info('Create simple paged results control instance') + req_ctrl = SimplePagedResultsControl(True, size=page_size, cookie='') + controls = [req_ctrl] + + log.info('Search should PASS') + all_results = paged_search(topology_st, controls, + search_flt, searchreq_attrlist) + log.info('%d results' % len(all_results)) + assert len(all_results) == len(users_list) + + log.info('Set Directory Manager bind back') + topology_st.standalone.simple_bind_s(DN_DM, PASSWORD) + del_users(topology_st, users_list) + change_conf_attr(topology_st, DN_CONFIG, + 'nsslapd-pagedsizelimit', conf_attr_bck) + change_conf_attr(topology_st, TEST_USER_DN, + 'nsPagedSizeLimit', user_attr_bck) + + +if __name__ == '__main__': + # Run isolated + # -s for DEBUG mode + CURRENT_FILE = os.path.realpath(__file__) + pytest.main("-s %s" % CURRENT_FILE) diff --git a/dirsrvtests/tests/tickets/ticket48844_test.py b/dirsrvtests/tests/tickets/ticket48844_test.py new file mode 100644 index 0000000..59e4cbd --- /dev/null +++ b/dirsrvtests/tests/tickets/ticket48844_test.py @@ -0,0 +1,136 @@ +import pytest +from lib389.tasks import * +from lib389.utils import * +from lib389.topologies import topology_st + +from lib389._constants import DEFAULT_SUFFIX, BACKEND_NAME + +pytestmark = pytest.mark.tier2 + +logging.getLogger(__name__).setLevel(logging.DEBUG) +log = logging.getLogger(__name__) + +PLUGIN_BITWISE = 'Bitwise Plugin' +TESTBASEDN = "dc=bitwise,dc=com" +TESTBACKEND_NAME = "TestBitw" + +F1 = 'objectclass=testperson' +BITWISE_F2 = '(&(%s)(testUserAccountControl:1.2.840.113556.1.4.803:=514))' % F1 +BITWISE_F3 = '(&(%s)(testUserAccountControl:1.2.840.113556.1.4.803:=513))' % F1 +BITWISE_F6 = '(&(%s)(testUserAccountControl:1.2.840.113556.1.4.803:=16777216))' % F1 + + +def _addBitwiseEntries(topology_st): + users = [ + ('testuser2', '65536', 'PasswordNeverExpired'), + ('testuser3', '8388608', 'PasswordExpired'), + ('testuser4', '256', 'TempDuplicateAccount'), + ('testuser5', '16777216', 'TrustedAuthDelegation'), + ('testuser6', '528', 'AccountLocked'), + ('testuser7', '513', 'AccountActive'), + ('testuser8', '98536 99512 99528'.split(), 'AccountActive PasswordExxpired AccountLocked'.split()), + ('testuser9', '87536 912'.split(), 'AccountActive PasswordNeverExpired'.split()), + ('testuser10', '89536 97546 96579'.split(), 'TestVerify1 TestVerify2 TestVerify3'.split()), + ('testuser11', '655236', 'TestStatus1'), + ('testuser12', '665522', 'TestStatus2'), + ('testuser13', '266552', 'TestStatus3')] + try: + topology_st.standalone.add_s(Entry((TESTBASEDN, + {'objectclass': "top dcobject".split(), + 'dc': 'bitwise', + 'aci': '(target =\"ldap:///dc=bitwise,dc=com\")' + \ + '(targetattr != \"userPassword\")' + \ + '(version 3.0;acl \"Anonymous read-search access\";' + \ + 'allow (read, search, compare)(userdn = \"ldap:///anyone\");)'}))) + + topology_st.standalone.add_s(Entry(('uid=btestuser1,%s' % TESTBASEDN, + {'objectclass': 'top testperson organizationalPerson inetorgperson'.split(), + 'mail': 'btestuser1@redhat.com', + 'uid': 'btestuser1', + 'givenName': 'bit', + 'sn': 'testuser1', + 'userPassword': 'testuser1', + 'testUserAccountControl': '514', + 'testUserStatus': 'Disabled', + 'cn': 'bit tetsuser1'}))) + for (userid, accCtl, accStatus) in users: + topology_st.standalone.add_s(Entry(('uid=b%s,%s' % (userid, TESTBASEDN), + { + 'objectclass': 'top testperson organizationalPerson inetorgperson'.split(), + 'mail': '%s@redhat.com' % userid, + 'uid': 'b%s' % userid, + 'givenName': 'bit', + 'sn': userid, + 'userPassword': userid, + 'testUserAccountControl': accCtl, + 'testUserStatus': accStatus, + 'cn': 'bit %s' % userid}))) + except ValueError: + topology_st.standalone.log.fatal("add_s failed: %s", ValueError) + + +def test_ticket48844_init(topology_st): + # create a suffix where test entries will be stored + BITW_SCHEMA_AT_1 = '( NAME \'testUserAccountControl\' DESC \'Attribute Bitwise filteri-Multi-Valued\' SYNTAX 1.3.6.1.4.1.1466.115.121.1.27 )' + BITW_SCHEMA_AT_2 = '( NAME \'testUserStatus\' DESC \'State of User account active/disabled\' SYNTAX 1.3.6.1.4.1.1466.115.121.1.15 )' + BITW_SCHEMA_OC_1 = '( NAME \'testperson\' SUP top STRUCTURAL MUST ( sn $ cn $ testUserAccountControl $ testUserStatus )' + \ + ' MAY ( userPassword $ telephoneNumber $ seeAlso $ description ) X-ORIGIN \'BitWise\' )' + topology_st.standalone.schema.add_schema('attributetypes', [ensure_bytes(BITW_SCHEMA_AT_1), ensure_bytes(BITW_SCHEMA_AT_2)]) + topology_st.standalone.schema.add_schema('objectClasses', ensure_bytes(BITW_SCHEMA_OC_1)) + + topology_st.standalone.backend.create(TESTBASEDN, {BACKEND_NAME: TESTBACKEND_NAME}) + topology_st.standalone.mappingtree.create(TESTBASEDN, bename=TESTBACKEND_NAME, parent=None) + _addBitwiseEntries(topology_st) + + +def test_ticket48844_bitwise_on(topology_st): + """ + Check that bitwise plugin (old style MR plugin) that defines + Its own indexer create function, is selected to evaluate the filter + """ + + topology_st.standalone.plugins.enable(name=PLUGIN_BITWISE) + topology_st.standalone.restart(timeout=10) + ents = topology_st.standalone.search_s('cn=%s,cn=plugins,cn=config' % PLUGIN_BITWISE, ldap.SCOPE_BASE, + 'objectclass=*') + assert (ents[0].hasValue('nsslapd-pluginEnabled', 'on')) + + expect = 2 + ents = topology_st.standalone.search_s(TESTBASEDN, ldap.SCOPE_SUBTREE, BITWISE_F2) + assert (len(ents) == expect) + + expect = 1 + ents = topology_st.standalone.search_s(TESTBASEDN, ldap.SCOPE_SUBTREE, BITWISE_F3) + assert (len(ents) == expect) + assert (ents[0].hasAttr('testUserAccountControl')) + + expect = 1 + ents = topology_st.standalone.search_s(TESTBASEDN, ldap.SCOPE_SUBTREE, BITWISE_F6) + assert (len(ents) == expect) + assert (ents[0].hasAttr('testUserAccountControl')) + + +def test_ticket48844_bitwise_off(topology_st): + """ + Check that when bitwise plugin is not enabled, no plugin + is identified to evaluate the filter -> ldap.UNAVAILABLE_CRITICAL_EXTENSION: + """ + topology_st.standalone.plugins.disable(name=PLUGIN_BITWISE) + topology_st.standalone.restart(timeout=10) + ents = topology_st.standalone.search_s('cn=%s,cn=plugins,cn=config' % PLUGIN_BITWISE, ldap.SCOPE_BASE, + 'objectclass=*') + assert (ents[0].hasValue('nsslapd-pluginEnabled', 'off')) + + res = 0 + try: + ents = topology_st.standalone.search_s(TESTBASEDN, ldap.SCOPE_SUBTREE, BITWISE_F2) + except ldap.UNAVAILABLE_CRITICAL_EXTENSION: + res = 12 + assert (res == 12) + + +if __name__ == '__main__': + # Run isolated + # -s for DEBUG mode + CURRENT_FILE = os.path.realpath(__file__) + pytest.main("-s %s" % CURRENT_FILE) diff --git a/dirsrvtests/tests/tickets/ticket48891_test.py b/dirsrvtests/tests/tickets/ticket48891_test.py new file mode 100644 index 0000000..041ce21 --- /dev/null +++ b/dirsrvtests/tests/tickets/ticket48891_test.py @@ -0,0 +1,102 @@ +# --- BEGIN COPYRIGHT BLOCK --- +# Copyright (C) 2016 Red Hat, Inc. +# All rights reserved. +# +# License: GPL (version 3 or any later version). +# See LICENSE for details. +# --- END COPYRIGHT BLOCK --- +# +import fnmatch +import logging + +import pytest +from lib389.tasks import * +from lib389.topologies import topology_st + +from lib389._constants import DN_DM, PASSWORD, DEFAULT_SUFFIX, BACKEND_NAME, SUFFIX + +pytestmark = pytest.mark.tier2 + +log = logging.getLogger(__name__) + +CONFIG_DN = 'cn=config' +RDN_VAL_SUFFIX = 'ticket48891.org' +MYSUFFIX = 'dc=%s' % RDN_VAL_SUFFIX +MYSUFFIXBE = 'ticket48891' + +SEARCHFILTER = '(objectclass=person)' + +OTHER_NAME = 'other_entry' +MAX_OTHERS = 10 + + +def test_ticket48891_setup(topology_st): + """ + Check there is no core + Create a second backend + stop DS (that should trigger the core) + check there is no core + """ + log.info('Testing Ticket 48891 - ns-slapd crashes during the shutdown after adding attribute with a matching rule') + + # bind as directory manager + topology_st.standalone.log.info("Bind as %s" % DN_DM) + topology_st.standalone.simple_bind_s(DN_DM, PASSWORD) + + # check there is no core + path = topology_st.standalone.config.get_attr_val_utf8('nsslapd-errorlog').replace('errors', '') + log.debug('Looking for a core file in: ' + path) + cores = fnmatch.filter(os.listdir(path), 'core.*') + assert len(cores) == 0 + + topology_st.standalone.log.info( + "\n\n######################### SETUP SUFFIX o=ticket48891.org ######################\n") + + topology_st.standalone.backend.create(MYSUFFIX, {BACKEND_NAME: MYSUFFIXBE}) + topology_st.standalone.mappingtree.create(MYSUFFIX, bename=MYSUFFIXBE) + topology_st.standalone.add_s(Entry((MYSUFFIX, { + 'objectclass': "top domain".split(), + 'dc': RDN_VAL_SUFFIX}))) + + topology_st.standalone.log.info("\n\n######################### Generate Test data ######################\n") + + # add dummy entries on both backends + for cpt in range(MAX_OTHERS): + name = "%s%d" % (OTHER_NAME, cpt) + topology_st.standalone.add_s(Entry(("cn=%s,%s" % (name, SUFFIX), { + 'objectclass': "top person".split(), + 'sn': name, + 'cn': name}))) + for cpt in range(MAX_OTHERS): + name = "%s%d" % (OTHER_NAME, cpt) + topology_st.standalone.add_s(Entry(("cn=%s,%s" % (name, MYSUFFIX), { + 'objectclass': "top person".split(), + 'sn': name, + 'cn': name}))) + + topology_st.standalone.log.info("\n\n######################### SEARCH ALL ######################\n") + topology_st.standalone.log.info("Bind as %s and add the READ/SEARCH SELFDN aci" % DN_DM) + topology_st.standalone.simple_bind_s(DN_DM, PASSWORD) + + entries = topology_st.standalone.search_s(MYSUFFIX, ldap.SCOPE_SUBTREE, SEARCHFILTER) + topology_st.standalone.log.info("Returned %d entries.\n", len(entries)) + + assert MAX_OTHERS == len(entries) + + topology_st.standalone.log.info('%d person entries are successfully created under %s.' % (len(entries), MYSUFFIX)) + topology_st.standalone.stop(timeout=1) + + cores = fnmatch.filter(os.listdir(path), 'core.*') + for core in cores: + core = os.path.join(path, core) + topology_st.standalone.log.info('cores are %s' % core) + assert not os.path.isfile(core) + + log.info('Testcase PASSED') + + +if __name__ == '__main__': + # Run isolated + # -s for DEBUG mode + CURRENT_FILE = os.path.realpath(__file__) + pytest.main("-s %s" % CURRENT_FILE) diff --git a/dirsrvtests/tests/tickets/ticket48893_test.py b/dirsrvtests/tests/tickets/ticket48893_test.py new file mode 100644 index 0000000..7b811d9 --- /dev/null +++ b/dirsrvtests/tests/tickets/ticket48893_test.py @@ -0,0 +1,53 @@ +import pytest +from lib389.utils import * +from lib389.topologies import topology_st + +from lib389._constants import DEFAULT_SUFFIX, HOST_STANDALONE, PORT_STANDALONE + +pytestmark = pytest.mark.tier2 + +DEBUGGING = os.getenv('DEBUGGING', False) + +if DEBUGGING: + logging.getLogger(__name__).setLevel(logging.DEBUG) +else: + logging.getLogger(__name__).setLevel(logging.INFO) + +log = logging.getLogger(__name__) + + +def _attr_present(conn): + results = conn.search_s('cn=config', ldap.SCOPE_SUBTREE, '(objectClass=*)') + if DEBUGGING: + print(results) + if len(results) > 0: + return True + return False + + +def test_ticket48893(topology_st): + """ + Test that anonymous has NO VIEW to cn=config + """ + + if DEBUGGING: + # Add debugging steps(if any)... + pass + + # Do an anonymous bind + conn = ldap.initialize("ldap://%s:%s" % (HOST_STANDALONE, PORT_STANDALONE)) + conn.simple_bind_s() + + # Make sure that we cannot see what's in cn=config as anonymous + assert (not _attr_present(conn)) + + conn.unbind_s() + + log.info('Test PASSED') + + +if __name__ == '__main__': + # Run isolated + # -s for DEBUG mode + CURRENT_FILE = os.path.realpath(__file__) + pytest.main("-s %s" % CURRENT_FILE) diff --git a/dirsrvtests/tests/tickets/ticket48896_test.py b/dirsrvtests/tests/tickets/ticket48896_test.py new file mode 100644 index 0000000..a189758 --- /dev/null +++ b/dirsrvtests/tests/tickets/ticket48896_test.py @@ -0,0 +1,139 @@ +# --- BEGIN COPYRIGHT BLOCK --- +# Copyright (C) 2016 Red Hat, Inc. +# All rights reserved. +# +# License: GPL (version 3 or any later version). +# See LICENSE for details. +# --- END COPYRIGHT BLOCK --- +# +import pytest +from lib389.tasks import * +from lib389.utils import * +from lib389.topologies import topology_st + +from lib389._constants import DEFAULT_SUFFIX, DN_DM, PASSWORD + +# Skip on older versions +pytestmark = [pytest.mark.tier2, + pytest.mark.skipif(ds_is_older('1.3.6'), reason="Not implemented")] + +logging.getLogger(__name__).setLevel(logging.DEBUG) +log = logging.getLogger(__name__) + +CONFIG_DN = 'cn=config' +UID = 'buser123' +TESTDN = 'uid=%s,' % UID + DEFAULT_SUFFIX + + +def check_attr_val(topology_st, dn, attr, expected): + try: + centry = topology_st.standalone.search_s(dn, ldap.SCOPE_BASE, 'cn=*') + if centry: + val = centry[0].getValue(attr) + if val == expected: + log.info('Default value of %s is %s' % (attr, expected)) + else: + log.info('Default value of %s is not %s, but %s' % (attr, expected, val)) + assert False + else: + log.fatal('Failed to get %s' % dn) + assert False + except ldap.LDAPError as e: + log.fatal('Failed to search ' + dn + ': ' + e.message['desc']) + assert False + + +def replace_pw(server, curpw, newpw, expstr, rc): + log.info('Binding as {%s, %s}' % (TESTDN, curpw)) + server.simple_bind_s(TESTDN, curpw) + + hit = 0 + log.info('Replacing password: %s -> %s, which should %s' % (curpw, newpw, expstr)) + try: + server.modify_s(TESTDN, [(ldap.MOD_REPLACE, 'userPassword', ensure_bytes(newpw))]) + except Exception as e: + log.info("Exception (expected): %s" % type(e).__name__) + hit = 1 + assert isinstance(e, rc) + + if (0 != rc) and (0 == hit): + log.info('Expected to fail with %s, but passed' % rc.__name__) + assert False + + log.info('PASSED') + + +def test_ticket48896(topology_st): + """ + """ + log.info('Testing Ticket 48896 - Default Setting for passwordMinTokenLength does not work') + + log.info("Setting global password policy with password syntax.") + topology_st.standalone.simple_bind_s(DN_DM, PASSWORD) + topology_st.standalone.modify_s(CONFIG_DN, [(ldap.MOD_REPLACE, 'passwordCheckSyntax', b'on'), + (ldap.MOD_REPLACE, 'nsslapd-pwpolicy-local', b'on')]) + + config = topology_st.standalone.search_s(CONFIG_DN, ldap.SCOPE_BASE, 'cn=*') + mintokenlen = config[0].getValue('passwordMinTokenLength') + history = config[0].getValue('passwordInHistory') + + log.info('Default passwordMinTokenLength == %s' % mintokenlen) + log.info('Default passwordInHistory == %s' % history) + + log.info('Adding a user.') + curpw = 'password' + topology_st.standalone.add_s(Entry((TESTDN, + {'objectclass': "top person organizationalPerson inetOrgPerson".split(), + 'cn': 'test user', + 'sn': 'user', + 'userPassword': curpw}))) + + newpw = 'Abcd012+' + exp = 'be ok' + rc = 0 + replace_pw(topology_st.standalone, curpw, newpw, exp, rc) + + curpw = 'Abcd012+' + newpw = 'user' + exp = 'fail' + rc = ldap.CONSTRAINT_VIOLATION + replace_pw(topology_st.standalone, curpw, newpw, exp, rc) + + curpw = 'Abcd012+' + newpw = UID + exp = 'fail' + rc = ldap.CONSTRAINT_VIOLATION + replace_pw(topology_st.standalone, curpw, newpw, exp, rc) + + curpw = 'Abcd012+' + newpw = 'Tuse!1234' + exp = 'fail' + rc = ldap.CONSTRAINT_VIOLATION + replace_pw(topology_st.standalone, curpw, newpw, exp, rc) + + curpw = 'Abcd012+' + newpw = 'Tuse!0987' + exp = 'fail' + rc = ldap.CONSTRAINT_VIOLATION + replace_pw(topology_st.standalone, curpw, newpw, exp, rc) + + curpw = 'Abcd012+' + newpw = 'Tabc!1234' + exp = 'fail' + rc = ldap.CONSTRAINT_VIOLATION + replace_pw(topology_st.standalone, curpw, newpw, exp, rc) + + curpw = 'Abcd012+' + newpw = 'Direc+ory389' + exp = 'be ok' + rc = 0 + replace_pw(topology_st.standalone, curpw, newpw, exp, rc) + + log.info('SUCCESS') + + +if __name__ == '__main__': + # Run isolated + # -s for DEBUG mode + CURRENT_FILE = os.path.realpath(__file__) + pytest.main("-s %s" % CURRENT_FILE) diff --git a/dirsrvtests/tests/tickets/ticket48906_test.py b/dirsrvtests/tests/tickets/ticket48906_test.py new file mode 100644 index 0000000..9a20c1d --- /dev/null +++ b/dirsrvtests/tests/tickets/ticket48906_test.py @@ -0,0 +1,302 @@ +# --- BEGIN COPYRIGHT BLOCK --- +# Copyright (C) 2016 Red Hat, Inc. +# All rights reserved. +# +# License: GPL (version 3 or any later version). +# See LICENSE for details. +# --- END COPYRIGHT BLOCK --- +# +import fnmatch +import logging +import shutil + +import pytest +from lib389.tasks import * +from lib389.topologies import topology_st +from lib389.utils import * + +from lib389._constants import DEFAULT_SUFFIX, DN_LDBM, DN_DM, PASSWORD, SUFFIX + +# Skip on older versions +pytestmark = [pytest.mark.tier2, + pytest.mark.skipif(ds_is_older('1.3.6'), reason="Not implemented")] + +log = logging.getLogger(__name__) + +CONFIG_DN = 'cn=config' +RDN_VAL_SUFFIX = 'ticket48906.org' +MYSUFFIX = 'dc=%s' % RDN_VAL_SUFFIX +MYSUFFIXBE = 'ticket48906' + +SEARCHFILTER = '(objectclass=person)' + +OTHER_NAME = 'other_entry' +MAX_OTHERS = 10 +DBLOCK_DEFAULT = "10000" +DBLOCK_LDAP_UPDATE = "20000" +DBLOCK_EDIT_UPDATE = "40000" +DBLOCK_MIN_UPDATE = DBLOCK_DEFAULT +DBLOCK_ATTR_CONFIG = "nsslapd-db-locks" +DBLOCK_ATTR_MONITOR = "nsslapd-db-configured-locks" +DBLOCK_ATTR_GUARDIAN = "locks" + +DBCACHE_LDAP_UPDATE = "20000000" +DBCACHE_EDIT_UPDATE = "40000000" +DBCACHE_ATTR_CONFIG = "nsslapd-dbcachesize" +DBCACHE_ATTR_GUARDIAN = "cachesize" + +ldbm_config = "cn=config,%s" % (DN_LDBM) +bdb_ldbm_config = "cn=bdb,cn=config,%s" % (DN_LDBM) +ldbm_monitor = "cn=database,cn=monitor,%s" % (DN_LDBM) + + +def test_ticket48906_setup(topology_st): + """ + Check there is no core + Create a second backend + stop DS (that should trigger the core) + check there is no core + """ + log.info('Testing Ticket 48906 - ns-slapd crashes during the shutdown after adding attribute with a matching rule') + + # bind as directory manager + topology_st.standalone.log.info("Bind as %s" % DN_DM) + topology_st.standalone.simple_bind_s(DN_DM, PASSWORD) + + # check there is no core + entry = topology_st.standalone.search_s(CONFIG_DN, ldap.SCOPE_BASE, "(cn=config)", ['nsslapd-workingdir']) + assert entry + assert entry[0] + assert entry[0].hasAttr('nsslapd-workingdir') + path = entry[0].getValue('nsslapd-workingdir') + cores = fnmatch.filter(os.listdir(path), b'core.*') + assert len(cores) == 0 + + # add dummy entries on backend + for cpt in range(MAX_OTHERS): + name = "%s%d" % (OTHER_NAME, cpt) + topology_st.standalone.add_s(Entry(("cn=%s,%s" % (name, SUFFIX), { + 'objectclass': "top person".split(), + 'sn': name, + 'cn': name}))) + + topology_st.standalone.log.info("\n\n######################### SEARCH ALL ######################\n") + topology_st.standalone.log.info("Bind as %s and add the READ/SEARCH SELFDN aci" % DN_DM) + topology_st.standalone.simple_bind_s(DN_DM, PASSWORD) + + entries = topology_st.standalone.search_s(SUFFIX, ldap.SCOPE_SUBTREE, SEARCHFILTER) + topology_st.standalone.log.info("Returned %d entries.\n", len(entries)) + + assert MAX_OTHERS == len(entries) + + topology_st.standalone.log.info('%d person entries are successfully created under %s.' % (len(entries), SUFFIX)) + + +def _check_configured_value(topology_st, attr=DBLOCK_ATTR_CONFIG, expected_value=None, required=False): + entries = topology_st.standalone.search_s(bdb_ldbm_config, ldap.SCOPE_BASE, 'cn=bdb') + if required: + assert (entries[0].hasValue(attr)) + elif entries[0].hasValue(attr): + assert (entries[0].getValue(attr) == ensure_bytes(expected_value)) + + +def _check_monitored_value(topology_st, expected_value): + entries = topology_st.standalone.search_s(ldbm_monitor, ldap.SCOPE_BASE, '(objectclass=*)') + assert (entries[0].hasValue(DBLOCK_ATTR_MONITOR) and entries[0].getValue(DBLOCK_ATTR_MONITOR) == ensure_bytes(expected_value)) + + +def _check_dse_ldif_value(topology_st, attr=DBLOCK_ATTR_CONFIG, expected_value=DBLOCK_LDAP_UPDATE): + dse_ref_ldif = topology_st.standalone.confdir + '/dse.ldif' + dse_ref = open(dse_ref_ldif, "r") + + # Check the DBLOCK in dse.ldif + value = None + while True: + line = dse_ref.readline() + if (line == ''): + break + elif attr in line.lower(): + value = line.split()[1] + assert (value == expected_value) + break + assert (value) + + +def _check_guardian_value(topology_st, attr=DBLOCK_ATTR_CONFIG, expected_value=None): + guardian_file = os.path.join(topology_st.standalone.dbdir, 'guardian') + assert (os.path.exists(guardian_file)) + guardian = open(guardian_file, "r") + + value = None + while True: + line = guardian.readline() + if (line == ''): + break + elif attr in line.lower(): + value = line.split(':')[1].replace("\n", "") + print("line") + print(line) + print("expected_value") + print(expected_value) + print("value") + print(value) + assert (str(value) == str(expected_value)) + break + assert (value) + + +def test_ticket48906_dblock_default(topology_st): + topology_st.standalone.log.info('###################################') + topology_st.standalone.log.info('###') + topology_st.standalone.log.info('### Check that before any change config/monitor') + topology_st.standalone.log.info('### contains the default value') + topology_st.standalone.log.info('###') + topology_st.standalone.log.info('###################################') + _check_monitored_value(topology_st, DBLOCK_DEFAULT) + _check_configured_value(topology_st, attr=DBLOCK_ATTR_CONFIG, expected_value=DBLOCK_DEFAULT, required=False) + + +def test_ticket48906_dblock_ldap_update(topology_st): + topology_st.standalone.log.info('###################################') + topology_st.standalone.log.info('###') + topology_st.standalone.log.info('### Check that after ldap update') + topology_st.standalone.log.info('### - monitor contains DEFAULT') + topology_st.standalone.log.info('### - configured contains DBLOCK_LDAP_UPDATE') + topology_st.standalone.log.info('### - After stop dse.ldif contains DBLOCK_LDAP_UPDATE') + topology_st.standalone.log.info('### - After stop guardian contains DEFAULT') + topology_st.standalone.log.info('### In fact guardian should differ from config to recreate the env') + topology_st.standalone.log.info('### Check that after restart (DBenv recreated)') + topology_st.standalone.log.info('### - monitor contains DBLOCK_LDAP_UPDATE ') + topology_st.standalone.log.info('### - configured contains DBLOCK_LDAP_UPDATE') + topology_st.standalone.log.info('### - dse.ldif contains DBLOCK_LDAP_UPDATE') + topology_st.standalone.log.info('###') + topology_st.standalone.log.info('###################################') + + topology_st.standalone.modify_s(ldbm_config, [(ldap.MOD_REPLACE, DBLOCK_ATTR_CONFIG, ensure_bytes(DBLOCK_LDAP_UPDATE))]) + _check_monitored_value(topology_st, DBLOCK_DEFAULT) + _check_configured_value(topology_st, attr=DBLOCK_ATTR_CONFIG, expected_value=DBLOCK_LDAP_UPDATE, required=True) + + topology_st.standalone.stop(timeout=10) + _check_dse_ldif_value(topology_st, attr=DBLOCK_ATTR_CONFIG, expected_value=DBLOCK_LDAP_UPDATE) + _check_guardian_value(topology_st, attr=DBLOCK_ATTR_GUARDIAN, expected_value=DBLOCK_DEFAULT) + + # Check that the value is the same after restart and recreate + topology_st.standalone.start(timeout=10) + _check_monitored_value(topology_st, DBLOCK_LDAP_UPDATE) + _check_configured_value(topology_st, attr=DBLOCK_ATTR_CONFIG, expected_value=DBLOCK_LDAP_UPDATE, required=True) + _check_dse_ldif_value(topology_st, attr=DBLOCK_ATTR_CONFIG, expected_value=DBLOCK_LDAP_UPDATE) + + +def test_ticket48906_dblock_edit_update(topology_st): + topology_st.standalone.log.info('###################################') + topology_st.standalone.log.info('###') + topology_st.standalone.log.info('### Check that after stop') + topology_st.standalone.log.info('### - dse.ldif contains DBLOCK_LDAP_UPDATE') + topology_st.standalone.log.info('### - guardian contains DBLOCK_LDAP_UPDATE') + topology_st.standalone.log.info('### Check that edit dse+restart') + topology_st.standalone.log.info('### - monitor contains DBLOCK_EDIT_UPDATE') + topology_st.standalone.log.info('### - configured contains DBLOCK_EDIT_UPDATE') + topology_st.standalone.log.info('### Check that after stop') + topology_st.standalone.log.info('### - dse.ldif contains DBLOCK_EDIT_UPDATE') + topology_st.standalone.log.info('### - guardian contains DBLOCK_EDIT_UPDATE') + topology_st.standalone.log.info('###') + topology_st.standalone.log.info('###################################') + + topology_st.standalone.stop(timeout=10) + _check_dse_ldif_value(topology_st, attr=DBLOCK_ATTR_CONFIG, expected_value=DBLOCK_LDAP_UPDATE) + _check_guardian_value(topology_st, attr=DBLOCK_ATTR_GUARDIAN, expected_value=DBLOCK_LDAP_UPDATE) + + dse_ref_ldif = topology_st.standalone.confdir + '/dse.ldif' + dse_new_ldif = topology_st.standalone.confdir + '/dse.ldif.new' + dse_ref = open(dse_ref_ldif, "r") + dse_new = open(dse_new_ldif, "w") + + # Change the DBLOCK in dse.ldif + value = None + while True: + line = dse_ref.readline() + if (line == ''): + break + elif DBLOCK_ATTR_CONFIG in line.lower(): + value = line.split()[1] + assert (value == DBLOCK_LDAP_UPDATE) + new_value = [line.split()[0], DBLOCK_EDIT_UPDATE, ] + new_line = "%s\n" % " ".join(new_value) + else: + new_line = line + dse_new.write(new_line) + + assert (value) + dse_ref.close() + dse_new.close() + shutil.move(dse_new_ldif, dse_ref_ldif) + + # Check that the value is the same after restart + topology_st.standalone.start(timeout=10) + _check_monitored_value(topology_st, DBLOCK_EDIT_UPDATE) + _check_configured_value(topology_st, attr=DBLOCK_ATTR_CONFIG, expected_value=DBLOCK_EDIT_UPDATE, required=True) + + topology_st.standalone.stop(timeout=10) + _check_dse_ldif_value(topology_st, attr=DBLOCK_ATTR_CONFIG, expected_value=DBLOCK_EDIT_UPDATE) + _check_guardian_value(topology_st, attr=DBLOCK_ATTR_GUARDIAN, expected_value=DBLOCK_EDIT_UPDATE) + + +def test_ticket48906_dblock_robust(topology_st): + topology_st.standalone.log.info('###################################') + topology_st.standalone.log.info('###') + topology_st.standalone.log.info('### Check that the following values are rejected') + topology_st.standalone.log.info('### - negative value') + topology_st.standalone.log.info('### - insuffisant value') + topology_st.standalone.log.info('### - invalid value') + topology_st.standalone.log.info('### Check that minimum value is accepted') + topology_st.standalone.log.info('###') + topology_st.standalone.log.info('###################################') + + topology_st.standalone.start(timeout=10) + _check_monitored_value(topology_st, DBLOCK_EDIT_UPDATE) + _check_configured_value(topology_st, attr=DBLOCK_ATTR_CONFIG, expected_value=DBLOCK_EDIT_UPDATE, required=True) + + # Check negative value + try: + topology_st.standalone.modify_s(ldbm_config, [(ldap.MOD_REPLACE, DBLOCK_ATTR_CONFIG, b"-1")]) + except ldap.UNWILLING_TO_PERFORM: + pass + _check_monitored_value(topology_st, DBLOCK_EDIT_UPDATE) + _check_configured_value(topology_st, attr=DBLOCK_ATTR_CONFIG, expected_value=DBLOCK_LDAP_UPDATE, required=True) + + # Check insuffisant value + too_small = int(DBLOCK_MIN_UPDATE) - 1 + try: + topology_st.standalone.modify_s(ldbm_config, [(ldap.MOD_REPLACE, DBLOCK_ATTR_CONFIG, ensure_bytes(str(too_small)))]) + except ldap.UNWILLING_TO_PERFORM: + pass + _check_monitored_value(topology_st, DBLOCK_EDIT_UPDATE) + _check_configured_value(topology_st, attr=DBLOCK_ATTR_CONFIG, expected_value=DBLOCK_LDAP_UPDATE, required=True) + + # Check invalid value + try: + topology_st.standalone.modify_s(ldbm_config, [(ldap.MOD_REPLACE, DBLOCK_ATTR_CONFIG, b"dummy")]) + except ldap.UNWILLING_TO_PERFORM: + pass + _check_monitored_value(topology_st, DBLOCK_EDIT_UPDATE) + _check_configured_value(topology_st, attr=DBLOCK_ATTR_CONFIG, expected_value=DBLOCK_LDAP_UPDATE, required=True) + + # now check the minimal value + topology_st.standalone.modify_s(ldbm_config, [(ldap.MOD_REPLACE, DBLOCK_ATTR_CONFIG, ensure_bytes(DBLOCK_MIN_UPDATE))]) + _check_monitored_value(topology_st, DBLOCK_EDIT_UPDATE) + _check_configured_value(topology_st, attr=DBLOCK_ATTR_CONFIG, expected_value=DBLOCK_MIN_UPDATE, required=True) + + topology_st.standalone.stop(timeout=10) + _check_dse_ldif_value(topology_st, attr=DBLOCK_ATTR_CONFIG, expected_value=DBLOCK_MIN_UPDATE) + _check_guardian_value(topology_st, attr=DBLOCK_ATTR_GUARDIAN, expected_value=DBLOCK_EDIT_UPDATE) + + topology_st.standalone.start(timeout=10) + _check_monitored_value(topology_st, DBLOCK_MIN_UPDATE) + _check_configured_value(topology_st, attr=DBLOCK_ATTR_CONFIG, expected_value=DBLOCK_MIN_UPDATE, required=True) + + +if __name__ == '__main__': + # Run isolated + # -s for DEBUG mode + CURRENT_FILE = os.path.realpath(__file__) + pytest.main("-s %s" % CURRENT_FILE) diff --git a/dirsrvtests/tests/tickets/ticket48916_test.py b/dirsrvtests/tests/tickets/ticket48916_test.py new file mode 100644 index 0000000..3f2da4a --- /dev/null +++ b/dirsrvtests/tests/tickets/ticket48916_test.py @@ -0,0 +1,135 @@ +import pytest +from lib389.tasks import * +from lib389.utils import * +from lib389.topologies import topology_m2 + +DEBUGGING = os.getenv('DEBUGGING', False) + +if DEBUGGING: + logging.getLogger(__name__).setLevel(logging.DEBUG) +else: + logging.getLogger(__name__).setLevel(logging.INFO) + +log = logging.getLogger(__name__) +# Skip on older versions +pytestmark = [pytest.mark.tier2, + pytest.mark.skipif(ds_is_older('1.3.5'), reason="Not implemented")] + + + +def _create_user(inst, idnum): + inst.add_s(Entry( + ('uid=user%s,ou=People,%s' % (idnum, DEFAULT_SUFFIX), { + 'objectClass': 'top account posixAccount'.split(' '), + 'cn': 'user', + 'uid': 'user%s' % idnum, + 'homeDirectory': '/home/user%s' % idnum, + 'loginShell': '/bin/nologin', + 'gidNumber': '-1', + 'uidNumber': '-1', + }) + )) + + +def test_ticket48916(topology_m2): + """ + https://bugzilla.redhat.com/show_bug.cgi?id=1353629 + + This is an issue with ID exhaustion in DNA causing a crash. + + To access each DirSrv instance use: topology_m2.ms["master1"], topology_m2.ms["master2"], + ..., topology_m2.hub1, ..., topology_m2.consumer1,... + + + """ + + if DEBUGGING: + # Add debugging steps(if any)... + pass + + # Enable the plugin on both servers + + dna_m1 = topology_m2.ms["master1"].plugins.get('Distributed Numeric Assignment Plugin') + dna_m2 = topology_m2.ms["master2"].plugins.get('Distributed Numeric Assignment Plugin') + + # Configure it + # Create the container for the ranges to go into. + + topology_m2.ms["master1"].add_s(Entry( + ('ou=Ranges,%s' % DEFAULT_SUFFIX, { + 'objectClass': 'top organizationalUnit'.split(' '), + 'ou': 'Ranges', + }) + )) + + # Create the dnaAdmin? + + # For now we just pinch the dn from the dna_m* types, and add the relevant child config + # but in the future, this could be a better plugin template type from lib389 + + config_dn = dna_m1.dn + + topology_m2.ms["master1"].add_s(Entry( + ('cn=uids,%s' % config_dn, { + 'objectClass': 'top dnaPluginConfig'.split(' '), + 'cn': 'uids', + 'dnatype': 'uidNumber gidNumber'.split(' '), + 'dnafilter': '(objectclass=posixAccount)', + 'dnascope': '%s' % DEFAULT_SUFFIX, + 'dnaNextValue': '1', + 'dnaMaxValue': '50', + 'dnasharedcfgdn': 'ou=Ranges,%s' % DEFAULT_SUFFIX, + 'dnaThreshold': '0', + 'dnaRangeRequestTimeout': '60', + 'dnaMagicRegen': '-1', + 'dnaRemoteBindDN': 'uid=dnaAdmin,ou=People,%s' % DEFAULT_SUFFIX, + 'dnaRemoteBindCred': 'secret123', + 'dnaNextRange': '80-90' + }) + )) + + topology_m2.ms["master2"].add_s(Entry( + ('cn=uids,%s' % config_dn, { + 'objectClass': 'top dnaPluginConfig'.split(' '), + 'cn': 'uids', + 'dnatype': 'uidNumber gidNumber'.split(' '), + 'dnafilter': '(objectclass=posixAccount)', + 'dnascope': '%s' % DEFAULT_SUFFIX, + 'dnaNextValue': '61', + 'dnaMaxValue': '70', + 'dnasharedcfgdn': 'ou=Ranges,%s' % DEFAULT_SUFFIX, + 'dnaThreshold': '2', + 'dnaRangeRequestTimeout': '60', + 'dnaMagicRegen': '-1', + 'dnaRemoteBindDN': 'uid=dnaAdmin,ou=People,%s' % DEFAULT_SUFFIX, + 'dnaRemoteBindCred': 'secret123', + }) + )) + + # Enable the plugins + dna_m1.enable() + dna_m2.enable() + + # Restart the instances + topology_m2.ms["master1"].restart(60) + topology_m2.ms["master2"].restart(60) + + # Wait for a replication ..... + time.sleep(40) + + # Allocate the 10 members to exhaust + + for i in range(1, 11): + _create_user(topology_m2.ms["master2"], i) + + # Allocate the 11th + _create_user(topology_m2.ms["master2"], 11) + + log.info('Test PASSED') + + +if __name__ == '__main__': + # Run isolated + # -s for DEBUG mode + CURRENT_FILE = os.path.realpath(__file__) + pytest.main("-s %s" % CURRENT_FILE) diff --git a/dirsrvtests/tests/tickets/ticket48944_test.py b/dirsrvtests/tests/tickets/ticket48944_test.py new file mode 100644 index 0000000..c9c5152 --- /dev/null +++ b/dirsrvtests/tests/tickets/ticket48944_test.py @@ -0,0 +1,211 @@ +# --- BEGIN COPYRIGHT BLOCK --- +# Copyright (C) 2017 Red Hat, Inc. +# All rights reserved. +# +# License: GPL (version 3 or any later version). +# See LICENSE for details. +# --- END COPYRIGHT BLOCK --- +# +import pytest +from lib389.tasks import * +from lib389.utils import * +from lib389.topologies import topology_m2c2 as topo + +from lib389._constants import (PLUGIN_ACCT_POLICY, DN_PLUGIN, DN_CONFIG, DN_DM, PASSWORD, + DEFAULT_SUFFIX, SUFFIX) + +pytestmark = pytest.mark.tier2 + +DEBUGGING = os.getenv("DEBUGGING", default=False) +if DEBUGGING: + logging.getLogger(__name__).setLevel(logging.DEBUG) +else: + logging.getLogger(__name__).setLevel(logging.INFO) +log = logging.getLogger(__name__) + +ACCPOL_DN = "cn={},{}".format(PLUGIN_ACCT_POLICY, DN_PLUGIN) +ACCP_CONF = "{},{}".format(DN_CONFIG, ACCPOL_DN) +USER_PW = 'Secret123' + + +def _last_login_time(topo, userdn, inst_name, last_login): + """Find lastLoginTime attribute value for a given master/consumer""" + + if 'master' in inst_name: + if (last_login == 'bind_n_check'): + topo.ms[inst_name].simple_bind_s(userdn, USER_PW) + topo.ms[inst_name].simple_bind_s(DN_DM, PASSWORD) + entry = topo.ms[inst_name].search_s(userdn, ldap.SCOPE_BASE, 'objectClass=*', ['lastLoginTime']) + else: + if (last_login == 'bind_n_check'): + topo.cs[inst_name].simple_bind_s(userdn, USER_PW) + topo.cs[inst_name].simple_bind_s(DN_DM, PASSWORD) + entry = topo.cs[inst_name].search_s(userdn, ldap.SCOPE_BASE, 'objectClass=*', ['lastLoginTime']) + lastLogin = entry[0].lastLoginTime + time.sleep(1) + return lastLogin + + +def _enable_plugin(topo, inst_name): + """Enable account policy plugin and configure required attributes""" + + log.info('Enable account policy plugin and configure required attributes') + if 'master' in inst_name: + log.info('Configure Account policy plugin on {}'.format(inst_name)) + topo.ms[inst_name].simple_bind_s(DN_DM, PASSWORD) + try: + topo.ms[inst_name].plugins.enable(name=PLUGIN_ACCT_POLICY) + topo.ms[inst_name].modify_s(ACCPOL_DN, [(ldap.MOD_REPLACE, 'nsslapd-pluginarg0', ensure_bytes(ACCP_CONF))]) + topo.ms[inst_name].modify_s(ACCP_CONF, [(ldap.MOD_REPLACE, 'alwaysrecordlogin', b'yes')]) + topo.ms[inst_name].modify_s(ACCP_CONF, [(ldap.MOD_REPLACE, 'stateattrname', b'lastLoginTime')]) + topo.ms[inst_name].modify_s(ACCP_CONF, [(ldap.MOD_REPLACE, 'altstateattrname', b'createTimestamp')]) + topo.ms[inst_name].modify_s(ACCP_CONF, [(ldap.MOD_REPLACE, 'specattrname', b'acctPolicySubentry')]) + topo.ms[inst_name].modify_s(ACCP_CONF, [(ldap.MOD_REPLACE, 'limitattrname', b'accountInactivityLimit')]) + topo.ms[inst_name].modify_s(ACCP_CONF, [(ldap.MOD_REPLACE, 'accountInactivityLimit', b'3600')]) + except ldap.LDAPError as e: + log.error('Failed to configure {} plugin for inst-{} error: {}'.format(PLUGIN_ACCT_POLICY, inst_name, str(e))) + topo.ms[inst_name].restart(timeout=10) + else: + log.info('Configure Account policy plugin on {}'.format(inst_name)) + topo.cs[inst_name].simple_bind_s(DN_DM, PASSWORD) + try: + topo.cs[inst_name].plugins.enable(name=PLUGIN_ACCT_POLICY) + topo.cs[inst_name].modify_s(ACCPOL_DN, [(ldap.MOD_REPLACE, 'nsslapd-pluginarg0', ensure_bytes(ACCP_CONF))]) + topo.cs[inst_name].modify_s(ACCP_CONF, [(ldap.MOD_REPLACE, 'alwaysrecordlogin', b'yes')]) + topo.cs[inst_name].modify_s(ACCP_CONF, [(ldap.MOD_REPLACE, 'stateattrname', b'lastLoginTime')]) + topo.cs[inst_name].modify_s(ACCP_CONF, [(ldap.MOD_REPLACE, 'altstateattrname', b'createTimestamp')]) + topo.cs[inst_name].modify_s(ACCP_CONF, [(ldap.MOD_REPLACE, 'specattrname', b'acctPolicySubentry')]) + topo.cs[inst_name].modify_s(ACCP_CONF, [(ldap.MOD_REPLACE, 'limitattrname', b'accountInactivityLimit')]) + topo.cs[inst_name].modify_s(ACCP_CONF, [(ldap.MOD_REPLACE, 'accountInactivityLimit', b'3600')]) + except ldap.LDAPError as e: + log.error('Failed to configure {} plugin for inst-{} error {}'.format(PLUGIN_ACCT_POLICY, inst_name, str(e))) + topo.cs[inst_name].restart(timeout=10) + + +def test_ticket48944(topo): + """On a read only replica invalid state info can accumulate + + :id: 833be131-f3bf-493e-97c6-3121438a07b1 + :feature: Account Policy Plugin + :setup: Two master and two consumer setup + :steps: 1. Configure Account policy plugin with alwaysrecordlogin set to yes + 2. Check if entries are synced across masters and consumers + 3. Stop all masters and consumers + 4. Start master1 and bind as user1 to create lastLoginTime attribute + 5. Start master2 and wait for the sync of lastLoginTime attribute + 6. Stop master1 and bind as user1 from master2 + 7. Check if lastLoginTime attribute is updated and greater than master1 + 8. Stop master2, start consumer1, consumer2 and then master2 + 9. Check if lastLoginTime attribute is updated on both consumers + 10. Bind as user1 to both consumers and check the value is updated + 11. Check if lastLoginTime attribute is not updated from consumers + 12. Start master1 and make sure the lastLoginTime attribute is not updated on consumers + 13. Bind as user1 from master1 and check if all masters and consumers have the same value + 14. Check error logs of consumers for "deletedattribute;deleted" message + :expectedresults: No accumulation of replica invalid state info on consumers + """ + + log.info("Ticket 48944 - On a read only replica invalid state info can accumulate") + user_name = 'newbzusr' + tuserdn = 'uid={}1,ou=people,{}'.format(user_name, SUFFIX) + inst_list = ['master1', 'master2', 'consumer1', 'consumer2'] + for inst_name in inst_list: + _enable_plugin(topo, inst_name) + + log.info('Sleep for 10secs for the server to come up') + time.sleep(10) + log.info('Add few entries to server and check if entries are replicated') + for nos in range(10): + userdn = 'uid={}{},ou=people,{}'.format(user_name, nos, SUFFIX) + try: + topo.ms['master1'].add_s(Entry((userdn, { + 'objectclass': 'top person'.split(), + 'objectclass': 'inetorgperson', + 'cn': user_name, + 'sn': user_name, + 'userpassword': USER_PW, + 'mail': '{}@redhat.com'.format(user_name)}))) + except ldap.LDAPError as e: + log.error('Failed to add {} user: error {}'.format(userdn, e.message['desc'])) + raise e + + log.info('Checking if entries are synced across masters and consumers') + entries_m1 = topo.ms['master1'].search_s(SUFFIX, ldap.SCOPE_SUBTREE, 'uid={}*'.format(user_name), ['uid=*']) + exp_entries = str(entries_m1).count('dn: uid={}*'.format(user_name)) + entries_m2 = topo.ms['master2'].search_s(SUFFIX, ldap.SCOPE_SUBTREE, 'uid={}*'.format(user_name), ['uid=*']) + act_entries = str(entries_m2).count('dn: uid={}*'.format(user_name)) + assert act_entries == exp_entries + inst_list = ['consumer1', 'consumer2'] + for inst in inst_list: + entries_other = topo.cs[inst].search_s(SUFFIX, ldap.SCOPE_SUBTREE, 'uid={}*'.format(user_name), ['uid=*']) + act_entries = str(entries_other).count('dn: uid={}*'.format(user_name)) + assert act_entries == exp_entries + + topo.ms['master2'].stop(timeout=10) + topo.ms['master1'].stop(timeout=10) + topo.cs['consumer1'].stop(timeout=10) + topo.cs['consumer2'].stop(timeout=10) + + topo.ms['master1'].start(timeout=10) + lastLogin_m1_1 = _last_login_time(topo, tuserdn, 'master1', 'bind_n_check') + + log.info('Start master2 to sync lastLoginTime attribute from master1') + topo.ms['master2'].start(timeout=10) + time.sleep(5) + log.info('Stop master1') + topo.ms['master1'].stop(timeout=10) + log.info('Bind as user1 to master2 and check if lastLoginTime attribute is greater than master1') + lastLogin_m2_1 = _last_login_time(topo, tuserdn, 'master2', 'bind_n_check') + assert lastLogin_m2_1 > lastLogin_m1_1 + + log.info('Start all servers except master1') + topo.ms['master2'].stop(timeout=10) + topo.cs['consumer1'].start(timeout=10) + topo.cs['consumer2'].start(timeout=10) + topo.ms['master2'].start(timeout=10) + time.sleep(10) + log.info('Check if consumers are updated with lastLoginTime attribute value from master2') + lastLogin_c1_1 = _last_login_time(topo, tuserdn, 'consumer1', 'check') + assert lastLogin_c1_1 == lastLogin_m2_1 + + lastLogin_c2_1 = _last_login_time(topo, tuserdn, 'consumer2', 'check') + assert lastLogin_c2_1 == lastLogin_m2_1 + + log.info('Check if lastLoginTime update in consumers not synced to master2') + lastLogin_c1_2 = _last_login_time(topo, tuserdn, 'consumer1', 'bind_n_check') + assert lastLogin_c1_2 > lastLogin_m2_1 + + lastLogin_c2_2 = _last_login_time(topo, tuserdn, 'consumer2', 'bind_n_check') + assert lastLogin_c2_2 > lastLogin_m2_1 + + time.sleep(10) # Allow replication to kick in + lastLogin_m2_2 = _last_login_time(topo, tuserdn, 'master2', 'check') + assert lastLogin_m2_2 == lastLogin_m2_1 + + log.info('Start master1 and check if its updating its older lastLoginTime attribute to consumers') + topo.ms['master1'].start(timeout=10) + time.sleep(10) + lastLogin_c1_3 = _last_login_time(topo, tuserdn, 'consumer1', 'check') + assert lastLogin_c1_3 == lastLogin_c1_2 + + lastLogin_c2_3 = _last_login_time(topo, tuserdn, 'consumer2', 'check') + assert lastLogin_c2_3 == lastLogin_c2_2 + + log.info('Check if lastLoginTime update from master2 is synced to all masters and consumers') + lastLogin_m2_3 = _last_login_time(topo, tuserdn, 'master2', 'bind_n_check') + time.sleep(10) # Allow replication to kick in + lastLogin_m1_2 = _last_login_time(topo, tuserdn, 'master1', 'check') + lastLogin_c1_4 = _last_login_time(topo, tuserdn, 'consumer1', 'check') + lastLogin_c2_4 = _last_login_time(topo, tuserdn, 'consumer2', 'check') + assert lastLogin_m2_3 == lastLogin_m1_2 == lastLogin_c2_4 == lastLogin_c1_4 + + log.info('Checking consumer error logs for replica invalid state info') + assert not topo.cs['consumer2'].ds_error_log.match('.*deletedattribute;deleted.*') + assert not topo.cs['consumer1'].ds_error_log.match('.*deletedattribute;deleted.*') + + +if __name__ == '__main__': + # Run isolated + # -s for DEBUG mode + CURRENT_FILE = os.path.realpath(__file__) + pytest.main("-s %s" % CURRENT_FILE) diff --git a/dirsrvtests/tests/tickets/ticket48956_test.py b/dirsrvtests/tests/tickets/ticket48956_test.py new file mode 100644 index 0000000..a2a1b3a --- /dev/null +++ b/dirsrvtests/tests/tickets/ticket48956_test.py @@ -0,0 +1,128 @@ +import pytest +import subprocess +from lib389.tasks import * +from lib389.utils import * +from lib389.topologies import topology_st + +from lib389._constants import (PLUGIN_ACCT_POLICY, DEFAULT_SUFFIX, DN_DM, PASSWORD, SUFFIX, + BACKEND_NAME) + +pytestmark = pytest.mark.tier2 + +DEBUGGING = os.getenv('DEBUGGING', False) + +RDN_LONG_SUFFIX = 'this' +LONG_SUFFIX = "dc=%s,dc=is,dc=a,dc=very,dc=long,dc=suffix,dc=so,dc=long,dc=suffix,dc=extremely,dc=long,dc=suffix" % RDN_LONG_SUFFIX +LONG_SUFFIX_BE = 'ticket48956' + +ACCT_POLICY_PLUGIN_DN = 'cn=%s,cn=plugins,cn=config' % PLUGIN_ACCT_POLICY +ACCT_POLICY_CONFIG_DN = 'cn=config,%s' % ACCT_POLICY_PLUGIN_DN + +INACTIVITY_LIMIT = '9' +SEARCHFILTER = '(objectclass=*)' + +TEST_USER = 'ticket48956user' +TEST_USER_PW = '%s' % TEST_USER + +if DEBUGGING: + logging.getLogger(__name__).setLevel(logging.DEBUG) +else: + logging.getLogger(__name__).setLevel(logging.INFO) + +log = logging.getLogger(__name__) + + +def _check_status(topology_st, user, expected): + nsaccountstatus = os.path.join(topology_st.standalone.ds_paths.sbin_dir, "ns-accountstatus.pl") + + try: + output = subprocess.check_output([nsaccountstatus, '-Z', topology_st.standalone.serverid, + '-D', DN_DM, '-w', PASSWORD, + '-p', str(topology_st.standalone.port), '-I', user]) + except subprocess.CalledProcessError as err: + output = err.output + + log.info("output: %s" % output) + + if expected in output: + return True + return False + + +def _check_inactivity(topology_st, mysuffix): + ACCT_POLICY_DN = 'cn=Account Inactivation Policy,%s' % mysuffix + log.info("\n######################### Adding Account Policy entry: %s ######################\n" % ACCT_POLICY_DN) + topology_st.standalone.add_s( + Entry((ACCT_POLICY_DN, {'objectclass': "top ldapsubentry extensibleObject accountpolicy".split(), + 'accountInactivityLimit': INACTIVITY_LIMIT}))) + time.sleep(1) + + TEST_USER_DN = 'uid=%s,%s' % (TEST_USER, mysuffix) + log.info("\n######################### Adding Test User entry: %s ######################\n" % TEST_USER_DN) + topology_st.standalone.add_s( + Entry((TEST_USER_DN, {'objectclass': "top person organizationalPerson inetOrgPerson".split(), + 'cn': TEST_USER, + 'sn': TEST_USER, + 'givenname': TEST_USER, + 'userPassword': TEST_USER_PW, + 'acctPolicySubentry': ACCT_POLICY_DN}))) + time.sleep(1) + + # Setting the lastLoginTime + try: + topology_st.standalone.simple_bind_s(TEST_USER_DN, TEST_USER_PW) + except ldap.CONSTRAINT_VIOLATION as e: + log.error('CONSTRAINT VIOLATION ' + e.message['desc']) + topology_st.standalone.simple_bind_s(DN_DM, PASSWORD) + + assert (_check_status(topology_st, TEST_USER_DN, b'- activated')) + + time.sleep(int(INACTIVITY_LIMIT) + 5) + assert (_check_status(topology_st, TEST_USER_DN, b'- inactivated (inactivity limit exceeded')) + + +def test_ticket48956(topology_st): + """Write your testcase here... + + Also, if you need any testcase initialization, + please, write additional fixture for that(include finalizer). + + """ + + topology_st.standalone.modify_s(ACCT_POLICY_PLUGIN_DN, + [(ldap.MOD_REPLACE, 'nsslapd-pluginarg0', ensure_bytes(ACCT_POLICY_CONFIG_DN))]) + + topology_st.standalone.modify_s(ACCT_POLICY_CONFIG_DN, [(ldap.MOD_REPLACE, 'alwaysrecordlogin', b'yes'), + (ldap.MOD_REPLACE, 'stateattrname', b'lastLoginTime'), + (ldap.MOD_REPLACE, 'altstateattrname', b'createTimestamp'), + (ldap.MOD_REPLACE, 'specattrname', b'acctPolicySubentry'), + (ldap.MOD_REPLACE, 'limitattrname', + b'accountInactivityLimit')]) + + # Enable the plugins + topology_st.standalone.plugins.enable(name=PLUGIN_ACCT_POLICY) + topology_st.standalone.restart(timeout=10) + + # Check inactivity on standard suffix (short) + _check_inactivity(topology_st, SUFFIX) + + # Check inactivity on a long suffix + topology_st.standalone.backend.create(LONG_SUFFIX, {BACKEND_NAME: LONG_SUFFIX_BE}) + topology_st.standalone.mappingtree.create(LONG_SUFFIX, bename=LONG_SUFFIX_BE) + topology_st.standalone.add_s(Entry((LONG_SUFFIX, { + 'objectclass': "top domain".split(), + 'dc': RDN_LONG_SUFFIX}))) + _check_inactivity(topology_st, LONG_SUFFIX) + + if DEBUGGING: + # Add debugging steps(if any)... + pass + + log.info('Test PASSED') + + +if __name__ == '__main__': + # Run isolated + # -s for DEBUG mode + CURRENT_FILE = os.path.realpath(__file__) + pytest.main("-s %s" % CURRENT_FILE) diff --git a/dirsrvtests/tests/tickets/ticket48961_test.py b/dirsrvtests/tests/tickets/ticket48961_test.py new file mode 100644 index 0000000..7011e61 --- /dev/null +++ b/dirsrvtests/tests/tickets/ticket48961_test.py @@ -0,0 +1,145 @@ +import pytest +from lib389.utils import * +from lib389.topologies import topology_st + +DEBUGGING = os.getenv('DEBUGGING', False) + +if DEBUGGING: + logging.getLogger(__name__).setLevel(logging.DEBUG) +else: + logging.getLogger(__name__).setLevel(logging.INFO) + +log = logging.getLogger(__name__) + +# Skip on older versions +pytestmark = [pytest.mark.tier2, + pytest.mark.skipif(ds_is_older('1.3.6'), reason="Not implemented")] + + +def test_ticket48961_storagescheme(topology_st): + """ + Test deleting of the storage scheme. + """ + + default = topology_st.standalone.config.get_attr_val('passwordStorageScheme') + # Change it + topology_st.standalone.config.set('passwordStorageScheme', 'CLEAR') + # Now delete it + topology_st.standalone.config.remove('passwordStorageScheme', None) + # Now check it's been reset. + assert (default == topology_st.standalone.config.get_attr_val('passwordStorageScheme')) + log.info(default) + log.info('Test PASSED') + + +def _reset_config_value(inst, attrname): + # None to value here means remove all instances of the attr. + inst.config.remove(attrname, None) + newval = inst.config.get_attr_val(attrname) + log.info("Reset %s to %s" % (attrname, newval)) + + +def test_ticket48961_deleteall(topology_st): + """ + Test that we can delete all valid attrs, and that a few are rejected. + """ + attr_to_test = { + 'nsslapd-listenhost': 'localhost', + 'nsslapd-securelistenhost': 'localhost', + 'nsslapd-allowed-sasl-mechanisms': 'GSSAPI', + 'nsslapd-svrtab': 'Some bogus data', # This one could reset? + } + attr_to_fail = { + # These are the values that should always be dn dse.ldif too + 'nsslapd-localuser': 'dirsrv', + 'nsslapd-defaultnamingcontext': 'dc=example,dc=com', # Can't delete + 'nsslapd-accesslog': '/opt/dirsrv/var/log/dirsrv/slapd-standalone/access', + 'nsslapd-auditlog': '/opt/dirsrv/var/log/dirsrv/slapd-standalone/audit', + 'nsslapd-errorlog': '/opt/dirsrv/var/log/dirsrv/slapd-standalone/errors', + 'nsslapd-tmpdir': '/tmp', + 'nsslapd-rundir': '/opt/dirsrv/var/run/dirsrv', + 'nsslapd-bakdir': '/opt/dirsrv/var/lib/dirsrv/slapd-standalone/bak', + 'nsslapd-certdir': '/opt/dirsrv/etc/dirsrv/slapd-standalone', + 'nsslapd-instancedir': '/opt/dirsrv/lib/dirsrv/slapd-standalone', + 'nsslapd-ldifdir': '/opt/dirsrv/var/lib/dirsrv/slapd-standalone/ldif', + 'nsslapd-lockdir': '/opt/dirsrv/var/lock/dirsrv/slapd-standalone', + 'nsslapd-schemadir': '/opt/dirsrv/etc/dirsrv/slapd-standalone/schema', + 'nsslapd-workingdir': '/opt/dirsrv/var/log/dirsrv/slapd-standalone', + 'nsslapd-localhost': 'localhost.localdomain', + # These can't be reset, but might be in dse.ldif. Probably in libglobs. + 'nsslapd-certmap-basedn': 'cn=certmap,cn=config', + 'nsslapd-port': '38931', # Can't delete + 'nsslapd-secureport': '636', # Can't delete + 'nsslapd-conntablesize': '1048576', + 'nsslapd-rootpw': '{SSHA512}...', + # These are hardcoded server magic. + 'nsslapd-hash-filters': 'off', # Can't delete + 'nsslapd-requiresrestart': 'cn=config:nsslapd-port', # Can't change + 'nsslapd-plugin': 'cn=case ignore string syntax,cn=plugins,cn=config', # Can't change + 'nsslapd-privatenamespaces': 'cn=schema', # Can't change + 'nsslapd-allowed-to-delete-attrs': 'None', # Can't delete + 'nsslapd-accesslog-list': 'List!', # Can't delete + 'nsslapd-auditfaillog-list': 'List!', + 'nsslapd-auditlog-list': 'List!', + 'nsslapd-errorlog-list': 'List!', + 'nsslapd-config': 'cn=config', + 'nsslapd-versionstring': '389-Directory/1.3.6.0', + 'objectclass': '', + 'cn': '', + # These are the odd values + 'nsslapd-backendconfig': 'cn=config,cn=userRoot,cn=ldbm database,cn=plugins,cn=config', # Doesn't exist? + 'nsslapd-betype': 'ldbm database', # Doesn't exist? + 'nsslapd-connection-buffer': 1, # Has an ldap problem + 'nsslapd-malloc-mmap-threshold': '-10', # Defunct anyway + 'nsslapd-malloc-mxfast': '-10', + 'nsslapd-malloc-trim-threshold': '-10', + 'nsslapd-referralmode': '', + 'nsslapd-saslpath': '', + 'passwordadmindn': '', + } + + config_entry = topology_st.standalone.config.raw_entry() + + for attr in config_entry.getAttrs(): + if attr.lower() in attr_to_fail: + # We know this will fail, so skip + pass + else: + log.info("Reseting %s" % (attr)) + # Check if we have to do some override of this attr. + # Some attributes need specific syntax, so we override just these. + newval = topology_st.standalone.config.get_attr_vals(attr) + log.info(" --> %s" % newval) + if attr.lower() in attr_to_test: + newval = attr_to_test[attr] + log.info("override --> %s" % newval) + # We need to set the attr to its own value + # so that it's "written". + topology_st.standalone.config.set(attr, newval) + # Now we can really reset + _reset_config_value(topology_st.standalone, attr) + + for attr in sorted(attr_to_fail): + log.info("Removing %s" % attr) + try: + _reset_config_value(topology_st.standalone, attr) + # Shouldn't reach here, the reset should fail! + assert (False) + except ldap.UNWILLING_TO_PERFORM: + log.info('Change was rejected') + except ldap.OPERATIONS_ERROR: + log.info('Change was rejected') + except ldap.OBJECT_CLASS_VIOLATION: + log.info('Change was rejected') + except ldap.NO_SUCH_ATTRIBUTE: + log.info("This attribute isn't part of cn=config, so is already default!") + pass + + topology_st.standalone.restart() + + +if __name__ == '__main__': + # Run isolated + # -s for DEBUG mode + CURRENT_FILE = os.path.realpath(__file__) + pytest.main("-s %s" % CURRENT_FILE) diff --git a/dirsrvtests/tests/tickets/ticket48973_test.py b/dirsrvtests/tests/tickets/ticket48973_test.py new file mode 100644 index 0000000..5adca3d --- /dev/null +++ b/dirsrvtests/tests/tickets/ticket48973_test.py @@ -0,0 +1,306 @@ +import os +import sys +import time +import ldap +import logging +import pytest +from lib389 import DirSrv, Entry +from lib389._constants import * +from lib389.properties import * +from lib389.tasks import * +from lib389.utils import * + +pytestmark = pytest.mark.tier2 + +logging.getLogger(__name__).setLevel(logging.DEBUG) +log = logging.getLogger(__name__) + +installation1_prefix = None + +NEW_ACCOUNT = "new_account" +MAX_ACCOUNTS = 100 +HOMEHEAD = "/home/xyz_" + +MIXED_VALUE="/home/mYhOmEdIrEcToRy" +LOWER_VALUE="/home/myhomedirectory" +HOMEDIRECTORY_INDEX = 'cn=homeDirectory,cn=index,cn=userRoot,cn=ldbm database,cn=plugins,cn=config' +HOMEDIRECTORY_CN="homedirectory" +MATCHINGRULE = 'nsMatchingRule' +UIDNUMBER_INDEX = 'cn=uidnumber,cn=index,cn=userRoot,cn=ldbm database,cn=plugins,cn=config' +UIDNUMBER_CN="uidnumber" + + +class TopologyStandalone(object): + def __init__(self, standalone): + standalone.open() + self.standalone = standalone + + +@pytest.fixture(scope="module") +def topology(request): + # Creating standalone instance ... + standalone = DirSrv(verbose=False) + args_instance[SER_HOST] = HOST_STANDALONE + args_instance[SER_PORT] = PORT_STANDALONE + args_instance[SER_SERVERID_PROP] = SERVERID_STANDALONE + args_instance[SER_CREATION_SUFFIX] = DEFAULT_SUFFIX + args_standalone = args_instance.copy() + standalone.allocate(args_standalone) + instance_standalone = standalone.exists() + if instance_standalone: + standalone.delete() + standalone.create() + standalone.open() + + # Delete each instance in the end + def fin(): + #standalone.delete() + pass + request.addfinalizer(fin) + + return TopologyStandalone(standalone) + +def _find_notes_accesslog(file, log_pattern): + try: + _find_notes_accesslog.last_pos += 1 + except AttributeError: + _find_notes_accesslog.last_pos = 0 + + + #position to the where we were last time + found = None + file.seek(_find_notes_accesslog.last_pos) + + while True: + line = file.readline() + found = log_pattern.search(line) + if ((line == '') or (found)): + break + + + if found: + # assuming that the result is the next line of the search + line = file.readline() + _find_notes_accesslog.last_pos = file.tell() + return line + else: + _find_notes_accesslog.last_pos = file.tell() + return None + +def _find_next_notes(topology, Filter): + topology.standalone.stop(timeout=10) + file_path = topology.standalone.accesslog + file_obj = open(file_path, "r") + regex = re.compile("filter=\"\(%s" % Filter) + result = _find_notes_accesslog(file_obj, regex) + file_obj.close() + topology.standalone.start(timeout=10) + + return result + +# +# find the next message showing an indexing failure +# (starting at the specified posistion) +# and return the position in the error log +# If there is not such message -> return None +def _find_next_indexing_failure(topology, pattern, position): + file_path = topology.standalone.errlog + file_obj = open(file_path, "r") + + try: + file_obj.seek(position + 1) + except: + file_obj.close() + return None + + # Check if the MR configuration failure occurs + regex = re.compile(pattern) + while True: + line = file_obj.readline() + found = regex.search(line) + if ((line == '') or (found)): + break + + + + if (found): + log.info("The configuration of a specific MR fails") + log.info(line) + result = file_obj.tell() + file_obj.close() + return result + else: + file_obj.close() + result = None + + return result +# +# find the first message showing an indexing failure +# and return the position in the error log +# If there is not such message -> return None +def _find_first_indexing_failure(topology, pattern): + file_path = topology.standalone.errlog + file_obj = open(file_path, "r") + + # Check if the MR configuration failure occurs + regex = re.compile(pattern) + while True: + line = file_obj.readline() + found = regex.search(line) + if ((line == '') or (found)): + break + + + + if (found): + log.info("pattern is found: \"%s\"") + log.info(line) + result = file_obj.tell() + file_obj.close() + else: + result = None + + return result + +def _check_entry(topology, filterHead=None, filterValueUpper=False, entry_ext=None, found=False, indexed=False): + # Search with CES with exact value -> find an entry + indexed + if filterValueUpper: + homehead = HOMEHEAD.upper() + else: + homehead = HOMEHEAD + searchedHome = "%s%d" % (homehead, entry_ext) + Filter = "(%s=%s)" % (filterHead, searchedHome) + log.info("Search %s" % Filter) + ents = topology.standalone.search_s(SUFFIX, ldap.SCOPE_SUBTREE, Filter) + if found: + assert len(ents) == 1 + assert ents[0].hasAttr('homedirectory') + valueHome = ensure_bytes("%s%d" % (HOMEHEAD, entry_ext)) + assert valueHome in ents[0].getValues('homedirectory') + else: + assert len(ents) == 0 + + result = _find_next_notes(topology, Filter) + log.info("result=%s" % result) + if indexed: + assert not "notes=U" in result + else: + assert "notes=U" in result + +def test_ticket48973_init(topology): + log.info("Initialization: add dummy entries for the tests") + for cpt in range(MAX_ACCOUNTS): + name = "%s%d" % (NEW_ACCOUNT, cpt) + topology.standalone.add_s(Entry(("uid=%s,%s" % (name, SUFFIX), { + 'objectclass': "top posixAccount".split(), + 'uid': name, + 'cn': name, + 'uidnumber': str(111), + 'gidnumber': str(222), + 'homedirectory': "%s%d" % (HOMEHEAD, cpt)}))) + +def test_ticket48973_ces_not_indexed(topology): + """ + Check that homedirectory is not indexed + - do a search unindexed + """ + + entry_ext = 0 + searchedHome = "%s%d" % (HOMEHEAD, entry_ext) + Filter = "(homeDirectory=%s)" % searchedHome + log.info("Search %s" % Filter) + ents = topology.standalone.search_s(SUFFIX, ldap.SCOPE_SUBTREE, Filter) + assert len(ents) == 1 + assert ents[0].hasAttr('homedirectory') + assert ensure_bytes(searchedHome) in ents[0].getValues('homedirectory') + + result = _find_next_notes(topology, Filter) + log.info("result=%s" % result) + assert "notes=U" in result + + +def test_ticket48973_homeDirectory_indexing(topology): + """ + Check that homedirectory is indexed with syntax (ces) + - triggers index + - no failure on index + - do a search indexed with exact value (ces) and no default_mr_indexer_create warning + - do a search indexed with uppercase value (ces) and no default_mr_indexer_create warning + """ + entry_ext = 1 + + try: + ent = topology.standalone.getEntry(HOMEDIRECTORY_INDEX, ldap.SCOPE_BASE) + except ldap.NO_SUCH_OBJECT: + topology.standalone.add_s(Entry((HOMEDIRECTORY_INDEX, { + 'objectclass': "top nsIndex".split(), + 'cn': HOMEDIRECTORY_CN, + 'nsSystemIndex': 'false', + 'nsIndexType': 'eq'}))) + + args = {TASK_WAIT: True} + topology.standalone.tasks.reindex(suffix=SUFFIX, attrname='homeDirectory', args=args) + + log.info("Check indexing succeeded with no specified matching rule") + assert not _find_first_indexing_failure(topology, "unknown or invalid matching rule") + assert not _find_first_indexing_failure(topology, "default_mr_indexer_create: warning") + assert not _find_first_indexing_failure(topology, "default_mr_indexer_create - Plugin .* does not handle") + + _check_entry(topology, filterHead="homeDirectory", filterValueUpper=False, entry_ext=entry_ext,found=True, indexed=True) + + _check_entry(topology, filterHead="homeDirectory:caseExactIA5Match:", filterValueUpper=False, entry_ext=entry_ext, found=True, indexed=False) + + _check_entry(topology, filterHead="homeDirectory:caseIgnoreIA5Match:", filterValueUpper=False, entry_ext=entry_ext, found=True, indexed=False) + + _check_entry(topology, filterHead="homeDirectory", filterValueUpper=True, entry_ext=entry_ext, found=False, indexed=True) + + _check_entry(topology, filterHead="homeDirectory:caseExactIA5Match:", filterValueUpper=True, entry_ext=entry_ext, found=False, indexed=False) + + _check_entry(topology, filterHead="homeDirectory:caseIgnoreIA5Match:", filterValueUpper=True, entry_ext=entry_ext, found=True, indexed=False) + + + +def test_ticket48973_homeDirectory_caseExactIA5Match_caseIgnoreIA5Match_indexing(topology): + """ + Check that homedirectory is indexed with syntax (ces && cis) + - triggers index + - no failure on index + - do a search indexed (ces) and no default_mr_indexer_create warning + - do a search indexed (cis) and no default_mr_indexer_create warning + """ + entry_ext = 4 + + log.info("\n\nindex homeDirectory in caseExactIA5Match and caseIgnoreIA5Match") + EXACTIA5_MR_NAME=b'caseExactIA5Match' + IGNOREIA5_MR_NAME=b'caseIgnoreIA5Match' + EXACT_MR_NAME=b'caseExactMatch' + IGNORE_MR_NAME=b'caseIgnoreMatch' + mod = [(ldap.MOD_REPLACE, MATCHINGRULE, (EXACT_MR_NAME, IGNORE_MR_NAME, EXACTIA5_MR_NAME, IGNOREIA5_MR_NAME))] + topology.standalone.modify_s(HOMEDIRECTORY_INDEX, mod) + + args = {TASK_WAIT: True} + topology.standalone.tasks.reindex(suffix=SUFFIX, attrname='homeDirectory', args=args) + + log.info("Check indexing succeeded with no specified matching rule") + assert not _find_first_indexing_failure(topology, "unknown or invalid matching rule") + assert not _find_first_indexing_failure(topology, "default_mr_indexer_create: warning") + assert not _find_first_indexing_failure(topology, "default_mr_indexer_create - Plugin .* does not handle") + + _check_entry(topology, filterHead="homeDirectory", filterValueUpper=False, entry_ext=entry_ext, found=True, indexed=True) + + _check_entry(topology, filterHead="homeDirectory:caseExactIA5Match:", filterValueUpper=False, entry_ext=entry_ext, found=True, indexed=True) + + _check_entry(topology, filterHead="homeDirectory:caseIgnoreIA5Match:", filterValueUpper=False, entry_ext=entry_ext, found=True, indexed=True) + + _check_entry(topology, filterHead="homeDirectory", filterValueUpper=True, entry_ext=entry_ext, found=False, indexed=True) + + _check_entry(topology, filterHead="homeDirectory:caseExactIA5Match:", filterValueUpper=True, entry_ext=entry_ext, found=False, indexed=True) + + _check_entry(topology, filterHead="homeDirectory:caseIgnoreIA5Match:", filterValueUpper=True, entry_ext=entry_ext, found=True, indexed=True) + + +if __name__ == '__main__': + # Run isolated + # -s for DEBUG mode + CURRENT_FILE = os.path.realpath(__file__) + pytest.main("-s %s" % CURRENT_FILE) diff --git a/dirsrvtests/tests/tickets/ticket49008_test.py b/dirsrvtests/tests/tickets/ticket49008_test.py new file mode 100644 index 0000000..970f42b --- /dev/null +++ b/dirsrvtests/tests/tickets/ticket49008_test.py @@ -0,0 +1,125 @@ +import pytest +from lib389.tasks import * +from lib389.utils import * +from lib389.topologies import topology_m3 as T + +from lib389._constants import DEFAULT_SUFFIX, PLUGIN_MEMBER_OF + +# Skip on older versions +pytestmark = [pytest.mark.tier2, + pytest.mark.skipif(ds_is_older('1.3.6'), reason="Not implemented")] + +DEBUGGING = os.getenv("DEBUGGING", default=False) +if DEBUGGING: + logging.getLogger(__name__).setLevel(logging.DEBUG) +else: + logging.getLogger(__name__).setLevel(logging.INFO) +log = logging.getLogger(__name__) + + +def test_ticket49008(T): + A = T.ms['master1'] + B = T.ms['master2'] + C = T.ms['master3'] + + A.enableReplLogging() + B.enableReplLogging() + C.enableReplLogging() + + AtoB = A.agreement.list(suffix=DEFAULT_SUFFIX)[0].dn + AtoC = A.agreement.list(suffix=DEFAULT_SUFFIX)[1].dn + CtoA = C.agreement.list(suffix=DEFAULT_SUFFIX)[0].dn + CtoB = C.agreement.list(suffix=DEFAULT_SUFFIX)[1].dn + + # we want replication in a line A <==> B <==> C + A.agreement.pause(AtoC) + C.agreement.pause(CtoA) + + # Enable memberOf on Master B + B.plugins.enable(name=PLUGIN_MEMBER_OF) + + # Set the auto OC to an objectclass that does NOT allow memberOf + B.modify_s('cn=MemberOf Plugin,cn=plugins,cn=config', + [(ldap.MOD_REPLACE, 'memberofAutoAddOC', b'referral')]) + B.restart(timeout=10) + + # add a few entries allowing memberof + for i in range(1, 6): + name = "userX{}".format(i) + dn = "cn={},{}".format(name, DEFAULT_SUFFIX) + A.add_s(Entry((dn, {'objectclass': "top person inetuser".split(), + 'sn': name, 'cn': name}))) + + # add a few entries not allowing memberof + for i in range(1, 6): + name = "userY{}".format(i) + dn = "cn={},{}".format(name, DEFAULT_SUFFIX) + A.add_s(Entry((dn, {'objectclass': "top person".split(), + 'sn': name, 'cn': name}))) + + time.sleep(15) + + A_entries = A.search_s(DEFAULT_SUFFIX, ldap.SCOPE_SUBTREE, + '(objectClass=person)') + B_entries = B.search_s(DEFAULT_SUFFIX, ldap.SCOPE_SUBTREE, + '(objectClass=person)') + C_entries = C.search_s(DEFAULT_SUFFIX, ldap.SCOPE_SUBTREE, + '(objectClass=person)') + + log.debug("A contains: %s", A_entries) + log.debug("B contains: %s", B_entries) + log.debug("C contains: %s", C_entries) + + assert len(A_entries) == len(B_entries) + assert len(B_entries) == len(C_entries) + + # add a group with members allowing memberof + dn = "cn=g1,{}".format(DEFAULT_SUFFIX) + A.add_s(Entry((dn, {'objectclass': "top groupOfNames".split(), + 'description': "Test Owned Group {}".format(name), + 'member': "cn=userX1,{}".format(DEFAULT_SUFFIX), + 'cn': "g1"}))) + + # check ruv on m2 before applying failing op + time.sleep(10) + B_RUV = B.search_s("cn=config", ldap.SCOPE_SUBTREE, + "(&(objectclass=nsds5replica)(nsDS5ReplicaRoot={}))".format(DEFAULT_SUFFIX), + ['nsds50ruv']) + elements = B_RUV[0].getValues('nsds50ruv') + ruv_before = 'ruv_before' + for ruv in elements: + if b'replica 2' in ruv: + ruv_before = ruv + + # add a group with members allowing memberof and members which don't + # the op will fail on M2 + dn = "cn=g2,{}".format(DEFAULT_SUFFIX) + A.add_s(Entry((dn, {'objectclass': "top groupOfNames".split(), + 'description': "Test Owned Group {}".format(name), + 'member': ["cn=userX1,{}".format(DEFAULT_SUFFIX), + "cn=userX2,{}".format(DEFAULT_SUFFIX), + "cn=userY1,{}".format(DEFAULT_SUFFIX)], + 'cn': "g2"}))) + + # check ruv on m2 after applying failing op + time.sleep(10) + B_RUV = B.search_s("cn=config", ldap.SCOPE_SUBTREE, + "(&(objectclass=nsds5replica)(nsDS5ReplicaRoot={}))".format(DEFAULT_SUFFIX), + ['nsds50ruv']) + elements = B_RUV[0].getValues('nsds50ruv') + ruv_after = 'ruv_after' + for ruv in elements: + if b'replica 2' in ruv: + ruv_after = ruv + + log.info('ruv before fail: {}'.format(ruv_before)) + log.info('ruv after fail: {}'.format(ruv_after)) + # the ruv should not have changed + assert ruv_before == ruv_after + + +if __name__ == '__main__': + # Run isolated + # -s for DEBUG mode + CURRENT_FILE = os.path.realpath(__file__) + pytest.main("-s {}".format(CURRENT_FILE)) diff --git a/dirsrvtests/tests/tickets/ticket49020_test.py b/dirsrvtests/tests/tickets/ticket49020_test.py new file mode 100644 index 0000000..740c542 --- /dev/null +++ b/dirsrvtests/tests/tickets/ticket49020_test.py @@ -0,0 +1,73 @@ +import time +import ldap +import logging +import pytest +from lib389 import DirSrv, Entry, tools, tasks +from lib389.tools import DirSrvTools +from lib389._constants import * +from lib389.properties import * +from lib389.tasks import * +from lib389.utils import * +from lib389.topologies import topology_m3 as T +import socket + +# Skip on older versions +pytestmark = [pytest.mark.tier2, + pytest.mark.skipif(ds_is_older('1.3.6'), reason="Not implemented")] + +DEBUGGING = os.getenv("DEBUGGING", default=False) +if DEBUGGING: + logging.getLogger(__name__).setLevel(logging.DEBUG) +else: + logging.getLogger(__name__).setLevel(logging.INFO) +log = logging.getLogger(__name__) + + +def test_ticket49020(T): + A = T.ms['master1'] + B = T.ms['master2'] + C = T.ms['master3'] + + A.enableReplLogging() + B.enableReplLogging() + C.enableReplLogging() + + AtoB = A.agreement.list(suffix=DEFAULT_SUFFIX)[0].dn + AtoC = A.agreement.list(suffix=DEFAULT_SUFFIX)[1].dn + CtoB = C.agreement.list(suffix=DEFAULT_SUFFIX)[1].dn + + A.agreement.pause(AtoB) + C.agreement.pause(CtoB) + time.sleep(5) + name = "userX" + dn = "cn={},{}".format(name, DEFAULT_SUFFIX) + A.add_s(Entry((dn, {'objectclass': "top person".split(), + 'sn': name,'cn': name}))) + + A.agreement.init(DEFAULT_SUFFIX, socket.gethostname(), PORT_MASTER_3) + time.sleep(5) + for i in range(1,11): + name = "userY{}".format(i) + dn = "cn={},{}".format(name, DEFAULT_SUFFIX) + A.add_s(Entry((dn, {'objectclass': "top person".split(), + 'sn': name,'cn': name}))) + time.sleep(5) + C.agreement.resume(CtoB) + + time.sleep(5) + A_entries = A.search_s(DEFAULT_SUFFIX, ldap.SCOPE_SUBTREE, + '(objectClass=person)') + B_entries = B.search_s(DEFAULT_SUFFIX, ldap.SCOPE_SUBTREE, + '(objectClass=person)') + C_entries = C.search_s(DEFAULT_SUFFIX, ldap.SCOPE_SUBTREE, + '(objectClass=person)') + + assert len(A_entries) == len(C_entries) + assert len(B_entries) == len(A_entries) - 11 + + +if __name__ == '__main__': + # Run isolated + # -s for DEBUG mode + CURRENT_FILE = os.path.realpath(__file__) + pytest.main("-s %s" % CURRENT_FILE) diff --git a/dirsrvtests/tests/tickets/ticket49039_test.py b/dirsrvtests/tests/tickets/ticket49039_test.py new file mode 100644 index 0000000..8938b14 --- /dev/null +++ b/dirsrvtests/tests/tickets/ticket49039_test.py @@ -0,0 +1,119 @@ +import time +import ldap +import logging +import pytest +import os +from lib389 import Entry +from lib389._constants import * +from lib389.properties import * +from lib389.tasks import * +from lib389.utils import * +from lib389.topologies import topology_st as topo +from lib389.pwpolicy import PwPolicyManager + + +pytestmark = pytest.mark.tier2 + +DEBUGGING = os.getenv("DEBUGGING", default=False) +if DEBUGGING: + logging.getLogger(__name__).setLevel(logging.DEBUG) +else: + logging.getLogger(__name__).setLevel(logging.INFO) +log = logging.getLogger(__name__) + +USER_DN = 'uid=user,dc=example,dc=com' + + +def test_ticket49039(topo): + """Test "password must change" verses "password min age". Min age should not + block password update if the password was reset. + """ + + # Setup SSL (for ldappasswd test) + topo.standalone.enable_tls() + + # Configure password policy + try: + policy = PwPolicyManager(topo.standalone) + policy.set_global_policy(properties={'nsslapd-pwpolicy-local': 'on', + 'passwordMustChange': 'on', + 'passwordExp': 'on', + 'passwordMaxAge': '86400000', + 'passwordMinAge': '8640000', + 'passwordChange': 'on'}) + except ldap.LDAPError as e: + log.fatal('Failed to set password policy: ' + str(e)) + + # Add user, bind, and set password + try: + topo.standalone.add_s(Entry((USER_DN, { + 'objectclass': 'top extensibleObject'.split(), + 'uid': 'user1', + 'userpassword': PASSWORD + }))) + except ldap.LDAPError as e: + log.fatal('Failed to add user: error ' + e.args[0]['desc']) + assert False + + # Reset password as RootDN + try: + topo.standalone.modify_s(USER_DN, [(ldap.MOD_REPLACE, 'userpassword', ensure_bytes(PASSWORD))]) + except ldap.LDAPError as e: + log.fatal('Failed to bind: error ' + e.args[0]['desc']) + assert False + + time.sleep(1) + + # Reset password as user + try: + topo.standalone.simple_bind_s(USER_DN, PASSWORD) + except ldap.LDAPError as e: + log.fatal('Failed to bind: error ' + e.args[0]['desc']) + assert False + + try: + topo.standalone.modify_s(USER_DN, [(ldap.MOD_REPLACE, 'userpassword', ensure_bytes(PASSWORD))]) + except ldap.LDAPError as e: + log.fatal('Failed to change password: error ' + e.args[0]['desc']) + assert False + + ################################### + # Make sure ldappasswd also works + ################################### + + # Reset password as RootDN + try: + topo.standalone.simple_bind_s(DN_DM, PASSWORD) + except ldap.LDAPError as e: + log.fatal('Failed to bind as rootdn: error ' + e.args[0]['desc']) + assert False + + try: + topo.standalone.modify_s(USER_DN, [(ldap.MOD_REPLACE, 'userpassword', ensure_bytes(PASSWORD))]) + except ldap.LDAPError as e: + log.fatal('Failed to bind: error ' + e.args[0]['desc']) + assert False + + time.sleep(1) + + # Run ldappasswd as the User. + os.environ["LDAPTLS_CACERTDIR"] = topo.standalone.get_cert_dir() + cmd = ('ldappasswd' + ' -h ' + topo.standalone.host + ' -Z -p 38901 -D ' + USER_DN + + ' -w password -a password -s password2 ' + USER_DN) + os.system(cmd) + time.sleep(1) + + try: + topo.standalone.simple_bind_s(USER_DN, "password2") + except ldap.LDAPError as e: + log.fatal('Failed to bind: error ' + e.args[0]['desc']) + assert False + + log.info('Test Passed') + + +if __name__ == '__main__': + # Run isolated + # -s for DEBUG mode + CURRENT_FILE = os.path.realpath(__file__) + pytest.main("-s %s" % CURRENT_FILE) diff --git a/dirsrvtests/tests/tickets/ticket49072_test.py b/dirsrvtests/tests/tickets/ticket49072_test.py new file mode 100644 index 0000000..c91ae24 --- /dev/null +++ b/dirsrvtests/tests/tickets/ticket49072_test.py @@ -0,0 +1,114 @@ +# --- BEGIN COPYRIGHT BLOCK --- +# Copyright (C) 2017 Red Hat, Inc. +# All rights reserved. +# +# License: GPL (version 3 or any later version). +# See LICENSE for details. +# --- END COPYRIGHT BLOCK --- +# +import pytest +import subprocess +from lib389.tasks import * +from lib389.utils import * +from lib389.topologies import topology_st as topo + +from lib389._constants import (DEFAULT_SUFFIX, PLUGIN_MEMBER_OF, DN_DM, PASSWORD, SERVERID_STANDALONE, + SUFFIX) + +pytestmark = pytest.mark.tier2 + +logging.getLogger(__name__).setLevel(logging.DEBUG) +log = logging.getLogger(__name__) + +TEST_FILTER = '(objectClass=person' +TEST_BASEDN = 'dc=testdb,dc=com' +FILTER = '(objectClass=person)' +FIXUP_MEMOF = 'fixup-memberof.pl' + + +def test_ticket49072_basedn(topo): + """memberOf fixup task does not validate args + + :id: dce9b898-119d-42b8-a236-1130e59bfe18 + :feature: memberOf + :setup: Standalone instance, with memberOf plugin + :steps: 1. Run fixup-memberOf.pl with invalid DN entry + 2. Check if error log reports "Failed to get be backend" + :expectedresults: Fixup-memberOf.pl task should complete, but errors logged. + """ + + log.info("Ticket 49072 memberof fixup task with invalid basedn...") + topo.standalone.plugins.enable(name=PLUGIN_MEMBER_OF) + topo.standalone.restart(timeout=10) + + if ds_is_older('1.3'): + inst_dir = topo.standalone.get_inst_dir() + memof_task = os.path.join(inst_dir, FIXUP_MEMOF) + try: + output = subprocess.check_output([memof_task, '-D', DN_DM, '-w', PASSWORD, '-b', TEST_BASEDN, '-f', FILTER]) + except subprocess.CalledProcessError as err: + output = err.output + else: + sbin_dir = topo.standalone.get_sbin_dir() + memof_task = os.path.join(sbin_dir, FIXUP_MEMOF) + try: + output = subprocess.check_output( + [memof_task, '-D', DN_DM, '-w', PASSWORD, '-b', TEST_BASEDN, '-Z', SERVERID_STANDALONE, '-f', FILTER]) + except subprocess.CalledProcessError as err: + output = err.output + log.info('output: {}'.format(output)) + expected = b"Successfully added task entry" + assert expected in output + log_entry = topo.standalone.ds_error_log.match('.*Failed to get be backend.*') + log.info('Error log out: {}'.format(log_entry)) + assert topo.standalone.ds_error_log.match('.*Failed to get be backend.*') + + +def test_ticket49072_filter(topo): + """memberOf fixup task does not validate args + + :id: dde9e893-119d-42c8-a236-1190e56bfe98 + :feature: memberOf + :setup: Standalone instance, with memberOf plugin + :steps: 1. Run fixup-memberOf.pl with invalid filter + 2. Check if error log reports "Bad search filter" + :expectedresults: Fixup-memberOf.pl task should complete, but errors logged. + """ + log.info("Ticket 49072 memberof fixup task with invalid filter...") + log.info('Wait for 10 secs and check if task is completed') + time.sleep(10) + task_memof = 'cn=memberOf task,cn=tasks,cn=config' + if topo.standalone.search_s(task_memof, ldap.SCOPE_SUBTREE, 'cn=memberOf_fixup*', ['dn:']): + log.info('memberof task is still running, wait for +10 secs') + time.sleep(10) + + if ds_is_older('1.3'): + inst_dir = topo.standalone.get_inst_dir() + memof_task = os.path.join(inst_dir, FIXUP_MEMOF) + try: + output = subprocess.check_output([memof_task, '-D', DN_DM, '-w', PASSWORD, '-b', SUFFIX, '-f', TEST_FILTER]) + except subprocess.CalledProcessError as err: + output = err.output + else: + sbin_dir = topo.standalone.get_sbin_dir() + memof_task = os.path.join(sbin_dir, FIXUP_MEMOF) + try: + output = subprocess.check_output( + [memof_task, '-D', DN_DM, '-w', PASSWORD, '-b', SUFFIX, '-Z', SERVERID_STANDALONE, '-f', TEST_FILTER]) + except subprocess.CalledProcessError as err: + output = err.output + log.info('output: {}'.format(output)) + expected = b"Successfully added task entry" + assert expected in output + log_entry = topo.standalone.ds_error_log.match('.*Bad search filter.*') + log.info('Error log out: {}'.format(log_entry)) + assert topo.standalone.ds_error_log.match('.*Bad search filter.*') + + log.info("Ticket 49072 complete: memberOf fixup task does not validate args") + + +if __name__ == '__main__': + # Run isolated + # -s for DEBUG mode + CURRENT_FILE = os.path.realpath(__file__) + pytest.main("-s %s" % CURRENT_FILE) diff --git a/dirsrvtests/tests/tickets/ticket49073_test.py b/dirsrvtests/tests/tickets/ticket49073_test.py new file mode 100644 index 0000000..23dcff2 --- /dev/null +++ b/dirsrvtests/tests/tickets/ticket49073_test.py @@ -0,0 +1,150 @@ +import pytest +from lib389.tasks import * +from lib389.utils import * +from lib389.topologies import topology_m2 + +from lib389._constants import (PLUGIN_MEMBER_OF, DEFAULT_SUFFIX, SUFFIX, HOST_MASTER_2, + PORT_MASTER_2) + +# Skip on older versions +pytestmark = [pytest.mark.tier2, + pytest.mark.skipif(ds_is_older('1.3.6'), reason="Not implemented")] + +DEBUGGING = os.getenv('DEBUGGING', False) +GROUP_DN = ("cn=group," + DEFAULT_SUFFIX) + +if DEBUGGING: + logging.getLogger(__name__).setLevel(logging.DEBUG) +else: + logging.getLogger(__name__).setLevel(logging.INFO) +log = logging.getLogger(__name__) + + +def _add_group_with_members(topology_m2): + # Create group + try: + topology_m2.ms["master1"].add_s(Entry((GROUP_DN, + {'objectclass': 'top groupofnames'.split(), + 'cn': 'group'}))) + except ldap.LDAPError as e: + log.fatal('Failed to add group: error ' + e.message['desc']) + assert False + + # Add members to the group - set timeout + log.info('Adding members to the group...') + for idx in range(1, 5): + try: + MEMBER_VAL = ("uid=member%d,%s" % (idx, DEFAULT_SUFFIX)) + topology_m2.ms["master1"].modify_s(GROUP_DN, + [(ldap.MOD_ADD, + 'member', + MEMBER_VAL)]) + except ldap.LDAPError as e: + log.fatal('Failed to update group: member (%s) - error: %s' % + (MEMBER_VAL, e.message['desc'])) + assert False + + +def _check_memberof(master, presence_flag): + # Check that members have memberof attribute on M1 + for idx in range(1, 5): + try: + USER_DN = ("uid=member%d,%s" % (idx, DEFAULT_SUFFIX)) + ent = master.getEntry(USER_DN, ldap.SCOPE_BASE, "(objectclass=*)") + if presence_flag: + assert ent.hasAttr('memberof') and ent.getValue('memberof') == GROUP_DN + else: + assert not ent.hasAttr('memberof') + except ldap.LDAPError as e: + log.fatal('Failed to retrieve user (%s): error %s' % (USER_DN, e.message['desc'])) + assert False + + +def _check_entry_exist(master, dn): + attempt = 0 + while attempt <= 10: + try: + dn + ent = master.getEntry(dn, ldap.SCOPE_BASE, "(objectclass=*)") + break + except ldap.NO_SUCH_OBJECT: + attempt = attempt + 1 + time.sleep(1) + except ldap.LDAPError as e: + log.fatal('Failed to retrieve user (%s): error %s' % (dn, e.message['desc'])) + assert False + assert attempt != 10 + + +def test_ticket49073(topology_m2): + """Write your replication test here. + + To access each DirSrv instance use: topology_m2.ms["master1"], topology_m2.ms["master2"], + ..., topology_m2.hub1, ..., topology_m2.consumer1,... + + Also, if you need any testcase initialization, + please, write additional fixture for that(include finalizer). + """ + topology_m2.ms["master1"].plugins.enable(name=PLUGIN_MEMBER_OF) + topology_m2.ms["master1"].restart(timeout=10) + topology_m2.ms["master2"].plugins.enable(name=PLUGIN_MEMBER_OF) + topology_m2.ms["master2"].restart(timeout=10) + + # Configure fractional to prevent total init to send memberof + ents = topology_m2.ms["master1"].agreement.list(suffix=SUFFIX) + assert len(ents) == 1 + log.info('update %s to add nsDS5ReplicatedAttributeListTotal' % ents[0].dn) + topology_m2.ms["master1"].modify_s(ents[0].dn, + [(ldap.MOD_REPLACE, + 'nsDS5ReplicatedAttributeListTotal', + '(objectclass=*) $ EXCLUDE '), + (ldap.MOD_REPLACE, + 'nsDS5ReplicatedAttributeList', + '(objectclass=*) $ EXCLUDE memberOf')]) + topology_m2.ms["master1"].restart(timeout=10) + + # + # create some users and a group + # + log.info('create users and group...') + for idx in range(1, 5): + try: + USER_DN = ("uid=member%d,%s" % (idx, DEFAULT_SUFFIX)) + topology_m2.ms["master1"].add_s(Entry((USER_DN, + {'objectclass': 'top extensibleObject'.split(), + 'uid': 'member%d' % (idx)}))) + except ldap.LDAPError as e: + log.fatal('Failed to add user (%s): error %s' % (USER_DN, e.message['desc'])) + assert False + + _check_entry_exist(topology_m2.ms["master2"], "uid=member4,%s" % (DEFAULT_SUFFIX)) + _add_group_with_members(topology_m2) + _check_entry_exist(topology_m2.ms["master2"], GROUP_DN) + + # Check that for regular update memberof was on both side (because plugin is enabled both) + time.sleep(5) + _check_memberof(topology_m2.ms["master1"], True) + _check_memberof(topology_m2.ms["master2"], True) + + # reinit with fractional definition + ents = topology_m2.ms["master1"].agreement.list(suffix=SUFFIX) + assert len(ents) == 1 + topology_m2.ms["master1"].agreement.init(SUFFIX, HOST_MASTER_2, PORT_MASTER_2) + topology_m2.ms["master1"].waitForReplInit(ents[0].dn) + + # Check that for total update memberof was on both side + # because memberof is NOT excluded from total init + time.sleep(5) + _check_memberof(topology_m2.ms["master1"], True) + _check_memberof(topology_m2.ms["master2"], True) + + if DEBUGGING: + # Add debugging steps(if any)... + pass + + +if __name__ == '__main__': + # Run isolated + # -s for DEBUG mode + CURRENT_FILE = os.path.realpath(__file__) + pytest.main("-s %s" % CURRENT_FILE) diff --git a/dirsrvtests/tests/tickets/ticket49076_test.py b/dirsrvtests/tests/tickets/ticket49076_test.py new file mode 100644 index 0000000..74b6312 --- /dev/null +++ b/dirsrvtests/tests/tickets/ticket49076_test.py @@ -0,0 +1,105 @@ +import time +import ldap +import logging +import pytest +from lib389 import DirSrv, Entry, tools, tasks +from lib389.tools import DirSrvTools +from lib389._constants import * +from lib389.properties import * +from lib389.tasks import * +from lib389.utils import * +from lib389.topologies import topology_st as topo + +pytestmark = pytest.mark.tier2 + +DEBUGGING = os.getenv("DEBUGGING", default=False) +if DEBUGGING: + logging.getLogger(__name__).setLevel(logging.DEBUG) +else: + logging.getLogger(__name__).setLevel(logging.INFO) +log = logging.getLogger(__name__) + +ldbm_config = "cn=config,%s" % (DN_LDBM) +txn_begin_flag = "nsslapd-db-transaction-wait" +TEST_USER_DN = 'cn=test,%s' % SUFFIX +TEST_USER = "test" + +def _check_configured_value(topology_st, attr=txn_begin_flag, expected_value=None, required=False): + entries = topology_st.standalone.search_s(ldbm_config, ldap.SCOPE_BASE, 'cn=config') + if required: + assert (entries[0].hasValue(attr)) + if entries[0].hasValue(attr): + topology_st.standalone.log.info('Current value is %s' % entries[0].getValue(attr)) + assert (entries[0].getValue(attr) == ensure_bytes(expected_value)) + +def _update_db(topology_st): + topology_st.standalone.add_s( + Entry((TEST_USER_DN, {'objectclass': "top person organizationalPerson inetOrgPerson".split(), + 'cn': TEST_USER, + 'sn': TEST_USER, + 'givenname': TEST_USER}))) + topology_st.standalone.delete_s(TEST_USER_DN) + +def test_ticket49076(topo): + """Write your testcase here... + + Also, if you need any testcase initialization, + please, write additional fixture for that(include finalizer). + """ + + # check default value is DB_TXN_NOWAIT + _check_configured_value(topo, expected_value="off") + + # tests we are able to update DB + _update_db(topo) + + # switch to wait mode + topo.standalone.modify_s(ldbm_config, + [(ldap.MOD_REPLACE, txn_begin_flag, b"on")]) + # check default value is DB_TXN_NOWAIT + _check_configured_value(topo, expected_value="on") + _update_db(topo) + + + # switch back to "normal mode" + topo.standalone.modify_s(ldbm_config, + [(ldap.MOD_REPLACE, txn_begin_flag, b"off")]) + # check default value is DB_TXN_NOWAIT + _check_configured_value(topo, expected_value="off") + # tests we are able to update DB + _update_db(topo) + + # check that settings are not reset by restart + topo.standalone.modify_s(ldbm_config, + [(ldap.MOD_REPLACE, txn_begin_flag, b"on")]) + # check default value is DB_TXN_NOWAIT + _check_configured_value(topo, expected_value="on") + _update_db(topo) + topo.standalone.restart(timeout=10) + _check_configured_value(topo, expected_value="on") + _update_db(topo) + + # switch default value + topo.standalone.modify_s(ldbm_config, + [(ldap.MOD_DELETE, txn_begin_flag, None)]) + # check default value is DB_TXN_NOWAIT + _check_configured_value(topo, expected_value="off") + # tests we are able to update DB + _update_db(topo) + topo.standalone.restart(timeout=10) + _check_configured_value(topo, expected_value="off") + # tests we are able to update DB + _update_db(topo) + + + if DEBUGGING: + # Add debugging steps(if any)... + pass + + +if __name__ == '__main__': + # Run isolated + # -s for DEBUG mode + CURRENT_FILE = os.path.realpath(__file__) + pytest.main("-s %s" % CURRENT_FILE) + diff --git a/dirsrvtests/tests/tickets/ticket49095_test.py b/dirsrvtests/tests/tickets/ticket49095_test.py new file mode 100644 index 0000000..3c49fca --- /dev/null +++ b/dirsrvtests/tests/tickets/ticket49095_test.py @@ -0,0 +1,87 @@ +import time +import ldap +import logging +import pytest +from lib389 import DirSrv, Entry, tools, tasks +from lib389.tools import DirSrvTools +from lib389._constants import * +from lib389.properties import * +from lib389.tasks import * +from lib389.utils import * +from lib389.topologies import topology_st as topo + +pytestmark = pytest.mark.tier2 + +DEBUGGING = os.getenv("DEBUGGING", default=False) +if DEBUGGING: + logging.getLogger(__name__).setLevel(logging.DEBUG) +else: + logging.getLogger(__name__).setLevel(logging.INFO) +log = logging.getLogger(__name__) + +USER_DN = 'uid=testuser,dc=example,dc=com' +acis = ['(targetattr != "tele*") (version 3.0;acl "test case";allow (read,compare,search)(userdn = "ldap:///anyone");)', + '(targetattr != "TELE*") (version 3.0;acl "test case";allow (read,compare,search)(userdn = "ldap:///anyone");)', + '(targetattr != "telephonenum*") (version 3.0;acl "test case";allow (read,compare,search)(userdn = "ldap:///anyone");)', + '(targetattr != "TELEPHONENUM*") (version 3.0;acl "test case";allow (read,compare,search)(userdn = "ldap:///anyone");)'] + + +def test_ticket49095(topo): + """Check that target attrbiutes with wildcards are case insensitive + """ + + # Add an entry + try: + topo.standalone.add_s(Entry((USER_DN, { + 'objectclass': 'top extensibleObject'.split(), + 'uid': 'testuser', + 'telephonenumber': '555-555-5555' + }))) + except ldap.LDAPError as e: + log.fatal('Failed to add test user: ' + e.args[0]['desc']) + assert False + + for aci in acis: + # Add ACI + try: + topo.standalone.modify_s(DEFAULT_SUFFIX, + [(ldap.MOD_REPLACE, 'aci', ensure_bytes(aci))]) + + except ldap.LDAPError as e: + log.fatal('Failed to set aci: ' + aci + ': ' + e.args[0]['desc']) + assert False + + # Set Anonymous Bind to test aci + try: + topo.standalone.simple_bind_s("", "") + except ldap.LDAPError as e: + log.fatal('Failed to bind anonymously: ' + e.args[0]['desc']) + assert False + + # Search for entry - should not get any results + try: + entry = topo.standalone.search_s(DEFAULT_SUFFIX, ldap.SCOPE_BASE, + 'telephonenumber=*') + if entry: + log.fatal('The entry was incorrectly returned') + assert False + except ldap.LDAPError as e: + log.fatal('Failed to search anonymously: ' + e.args[0]['desc']) + assert False + + # Set root DN Bind so we can update aci's + try: + topo.standalone.simple_bind_s(DN_DM, PASSWORD) + except ldap.LDAPError as e: + log.fatal('Failed to bind anonymously: ' + e.args[0]['desc']) + assert False + + log.info("Test Passed") + + +if __name__ == '__main__': + # Run isolated + # -s for DEBUG mode + CURRENT_FILE = os.path.realpath(__file__) + pytest.main("-s %s" % CURRENT_FILE) + diff --git a/dirsrvtests/tests/tickets/ticket49104_test.py b/dirsrvtests/tests/tickets/ticket49104_test.py new file mode 100644 index 0000000..08458ab --- /dev/null +++ b/dirsrvtests/tests/tickets/ticket49104_test.py @@ -0,0 +1,88 @@ +# --- BEGIN COPYRIGHT BLOCK --- +# Copyright (C) 2017 Red Hat, Inc. +# All rights reserved. +# +# License: GPL (version 3 or any later version). +# See LICENSE for details. +# --- END COPYRIGHT BLOCK --- +# +import logging +import subprocess + +import pytest +from lib389.tasks import * +from lib389.utils import * +from lib389.topologies import topology_st + +# Skip on older versions +pytestmark = [pytest.mark.tier2, + pytest.mark.skipif(ds_is_older('1.3.6'), reason="Not implemented")] +log = logging.getLogger(__name__) + +def test_ticket49104_setup(topology_st): + """ + Generate an ldif file having 10K entries and import it. + """ + # Generate a test ldif (100k entries) + ldif_dir = topology_st.standalone.get_ldif_dir() + import_ldif = ldif_dir + '/49104.ldif' + try: + topology_st.standalone.buildLDIF(100000, import_ldif) + except OSError as e: + log.fatal('ticket 49104: failed to create test ldif,\ + error: %s - %s' % (e.errno, e.strerror)) + assert False + + # Online + try: + topology_st.standalone.tasks.importLDIF(suffix=DEFAULT_SUFFIX, + input_file=import_ldif, + args={TASK_WAIT: True}) + except ValueError: + log.fatal('ticket 49104: Online import failed') + assert False + +def test_ticket49104(topology_st): + """ + Run dbscan with valgrind changing the truncate size. + If there is no Invalid report, we can claim the test has passed. + """ + log.info("Test ticket 49104 -- dbscan crashes by memory corruption") + myvallog = '/tmp/val49104.out' + if os.path.exists(myvallog): + os.remove(myvallog) + prog = os.path.join(topology_st.standalone.get_bin_dir(), 'dbscan-bin') + valcmd = 'valgrind --tool=memcheck --leak-check=yes --num-callers=40 --log-file=%s ' % myvallog + if topology_st.standalone.has_asan(): + valcmd = '' + id2entry = os.path.join(topology_st.standalone.dbdir, DEFAULT_BENAME, 'id2entry.db') + + for i in range(20, 30): + cmd = valcmd + '%s -f %s -t %d -R' % (prog, id2entry , i) + log.info('Running script: %s' % cmd) + proc = subprocess.Popen(cmd.split(), stdout=subprocess.PIPE) + outs = '' + try: + outs = proc.communicate() + except OSError as e: + log.exception('dbscan: error executing (%s): error %d - %s' % + (cmd, e.errno, e.strerror)) + raise e + + # If we have asan, this fails in other spectacular ways instead + if not topology_st.standalone.has_asan(): + grep = 'egrep "Invalid read|Invalid write" %s' % myvallog + p = os.popen(grep, "r") + l = p.readline() + if 'Invalid' in l: + log.fatal('ERROR: valgrind reported invalid read/write: %s' % l) + assert False + + log.info('ticket 49104 - PASSED') + + +if __name__ == '__main__': + # Run isolated + # -s for DEBUG mode + CURRENT_FILE = os.path.realpath(__file__) + pytest.main("-s %s" % CURRENT_FILE) diff --git a/dirsrvtests/tests/tickets/ticket49121_test.py b/dirsrvtests/tests/tickets/ticket49121_test.py new file mode 100644 index 0000000..c597085 --- /dev/null +++ b/dirsrvtests/tests/tickets/ticket49121_test.py @@ -0,0 +1,206 @@ +# --- BEGIN COPYRIGHT BLOCK --- +# Copyright (C) 2017 Red Hat, Inc. +# All rights reserved. +# +# License: GPL (version 3 or any later version). +# See LICENSE for details. +# --- END COPYRIGHT BLOCK --- +# +import pytest +import codecs +from lib389.tasks import * +from lib389.utils import * +from lib389.topologies import topology_m2 + +from lib389._constants import DATA_DIR, DEFAULT_SUFFIX, VALGRIND_INVALID_STR + +# Skip on older versions +pytestmark = [pytest.mark.tier2, + pytest.mark.skipif(ds_is_older('1.3.6'), reason="Not implemented")] + +DEBUGGING = os.getenv('DEBUGGING', False) + +if DEBUGGING: + logging.getLogger(__name__).setLevel(logging.DEBUG) +else: + logging.getLogger(__name__).setLevel(logging.INFO) +log = logging.getLogger(__name__) + +ds_paths = Paths() + + +@pytest.mark.skipif(not ds_paths.asan_enabled, reason="Don't run if ASAN is not enabled") +def test_ticket49121(topology_m2): + """ + Creating some users. + Deleting quite a number of attributes which may or may not be in the entry. + The attribute type names are to be long. + Under the conditions, it did not estimate the size of string format entry + shorter than the real size and caused the Invalid write / server crash. + """ + + utf8file = os.path.join(topology_m2.ms["master1"].getDir(__file__, DATA_DIR), "ticket49121/utf8str.txt") + utf8obj = codecs.open(utf8file, 'r', 'utf-8') + utf8strorig = utf8obj.readline() + utf8str = ensure_bytes(utf8strorig).rstrip(b'\n') + utf8obj.close() + assert (utf8str) + + # Get the sbin directory so we know where to replace 'ns-slapd' + sbin_dir = topology_m2.ms["master1"].get_sbin_dir() + log.info('sbin_dir: %s' % sbin_dir) + + # stop M1 to do the next updates + topology_m2.ms["master1"].stop(30) + topology_m2.ms["master2"].stop(30) + + # wait for the servers shutdown + time.sleep(5) + + # start M1 to do the next updates + topology_m2.ms["master1"].start() + topology_m2.ms["master2"].start() + + for idx in range(1, 10): + try: + USER_DN = 'CN=user%d,ou=People,%s' % (idx, DEFAULT_SUFFIX) + log.info('adding user %s...' % (USER_DN)) + topology_m2.ms["master1"].add_s(Entry((USER_DN, + {'objectclass': 'top person extensibleObject'.split(' '), + 'cn': 'user%d' % idx, + 'sn': 'SN%d-%s' % (idx, utf8str)}))) + except ldap.LDAPError as e: + log.fatal('Failed to add user (%s): error %s' % (USER_DN, e.args[0]['desc'])) + assert False + + for i in range(1, 3): + time.sleep(3) + for idx in range(1, 10): + try: + USER_DN = 'CN=user%d,ou=People,%s' % (idx, DEFAULT_SUFFIX) + log.info('[%d] modify user %s - replacing attrs...' % (i, USER_DN)) + topology_m2.ms["master1"].modify_s( + USER_DN, [(ldap.MOD_REPLACE, 'cn', b'user%d' % idx), + (ldap.MOD_REPLACE, 'ABCDEFGH_ID', [b'239001ad-06dd-e011-80fa-c00000ad5174', + b'240f0878-c552-e411-b0f3-000006040037']), + (ldap.MOD_REPLACE, 'attr1', b'NEW_ATTR'), + (ldap.MOD_REPLACE, 'attr20000000000000000000000000000000000000000', None), + (ldap.MOD_REPLACE, 'attr30000000000000000000000000000000000000000000000', None), + (ldap.MOD_REPLACE, 'attr40000000000000000000000000000', None), + (ldap.MOD_REPLACE, 'attr50000000000000000000000000000000000000', None), + (ldap.MOD_REPLACE, 'attr600000000000000000000000000000000000000000', None), + (ldap.MOD_REPLACE, 'attr7000000000000000000000000000000', None), + (ldap.MOD_REPLACE, 'attr8000000000000000000000000000000000', None), + (ldap.MOD_REPLACE, 'attr900000000000000000', None), + (ldap.MOD_REPLACE, 'attr1000000000000000000000', None), + (ldap.MOD_REPLACE, 'attr110000000000000', None), + (ldap.MOD_REPLACE, 'attr120000000000000', None), + (ldap.MOD_REPLACE, 'attr130000000000000000000000000000000000000000000000', None), + (ldap.MOD_REPLACE, 'attr140000000000000000000000000000', None), + (ldap.MOD_REPLACE, 'attr150000000000000000000000000000000000000000000000000000000000000', + None), + (ldap.MOD_REPLACE, 'attr1600000000000000000000000000000000000000000', None), + (ldap.MOD_REPLACE, 'attr17000000000000000000000000000000', None), + (ldap.MOD_REPLACE, 'attr18000000000000000000000000000000000', None), + (ldap.MOD_REPLACE, 'attr1900000000000000000', None), + (ldap.MOD_REPLACE, 'attr2000000000000000000000', None), + (ldap.MOD_REPLACE, 'attr210000000000000000000000000000000000000000', None), + (ldap.MOD_REPLACE, 'attr220000000000000000000000000000000000000000', None), + (ldap.MOD_REPLACE, 'attr230000000000000000000000000000000000000000', None), + (ldap.MOD_REPLACE, 'attr240000000000000000000000000000000000000000', None), + (ldap.MOD_REPLACE, 'attr25000000000000000000000000000000', None), + (ldap.MOD_REPLACE, 'attr260000000000000000000000000000000000000000', None), + (ldap.MOD_REPLACE, + 'attr270000000000000000000000000000000000000000000000000000000000000000000000', None), + (ldap.MOD_REPLACE, 'attr280000000000000000000000000000000000000000', None), + (ldap.MOD_REPLACE, 'attr29000000000000000000000000000000000000000000000000000000000', + None), + (ldap.MOD_REPLACE, 'attr3000000000000000000000000', None), + (ldap.MOD_REPLACE, 'attr310000000000000000000000000000000000000000000000', None), + (ldap.MOD_REPLACE, 'attr320000000000000000000000000000000000000000000000', None), + (ldap.MOD_REPLACE, 'attr330000000000000000000000000000000000000000000000', None), + (ldap.MOD_REPLACE, 'attr340000000000000000000000000000000000000000000000', None), + (ldap.MOD_REPLACE, 'attr350000000000000000000000000000000000000000', None), + (ldap.MOD_REPLACE, 'attr360000000000000000000000000000000000000000000000', None), + (ldap.MOD_REPLACE, 'attr370000000000000000000000000000000000000000000000', None), + (ldap.MOD_REPLACE, 'attr380000000000000000000000000000000000000000000000', None), + (ldap.MOD_REPLACE, 'attr390000000000000000000000000000000000000000000000', None), + (ldap.MOD_REPLACE, 'attr4000000000000000000000000', None), + (ldap.MOD_REPLACE, 'attr410000000000000000000000000000', None), + (ldap.MOD_REPLACE, 'attr420000000000000000000000000000', None), + (ldap.MOD_REPLACE, 'attr430000000000000000000000000000', None), + (ldap.MOD_REPLACE, 'attr440000000000000000000000000000', None), + (ldap.MOD_REPLACE, 'attr4500000000000000000000000000000000000000', None), + (ldap.MOD_REPLACE, 'attr460000000000000000000000000000', None), + (ldap.MOD_REPLACE, 'attr470000000000000000000000000000000000000000000000000000000000', + None), + (ldap.MOD_REPLACE, 'attr480000000000000000000000000000', None), + (ldap.MOD_REPLACE, 'attr49000000000000000000000000000000000000000000000', None), + (ldap.MOD_REPLACE, 'attr5000000000000000000000000', None), + (ldap.MOD_REPLACE, 'attr510000000000000', None), + (ldap.MOD_REPLACE, 'attr520000000000000', None), + (ldap.MOD_REPLACE, 'attr530000000000000', None), + (ldap.MOD_REPLACE, 'attr540000000000000', None), + (ldap.MOD_REPLACE, 'attr550000000000000000000000000000000000000', None), + (ldap.MOD_REPLACE, 'attr5600000000000000000000000000000000000000000', None), + (ldap.MOD_REPLACE, 'attr57000000000000000000000000000000', None), + (ldap.MOD_REPLACE, 'attr58000000000000000000000000000000000', None), + (ldap.MOD_REPLACE, 'attr5900000000000000000', None), + (ldap.MOD_REPLACE, 'attr6000000000000000000000000', None), + (ldap.MOD_REPLACE, 'attr6100000000000000000000000000000000000000000', None), + (ldap.MOD_REPLACE, 'attr6200000000000000000000000000000000000000000', None), + (ldap.MOD_REPLACE, 'attr6300000000000000000000000000000000000000000', None), + (ldap.MOD_REPLACE, 'attr6400000000000000000000000000000000000000000', None), + (ldap.MOD_REPLACE, + 'attr65000000000000000000000000000000000000000000000000000000000000000000000000000000', + None), + (ldap.MOD_REPLACE, 'attr6600000000000000000000000000000000000000000', None), + (ldap.MOD_REPLACE, 'attr6700000000000000000000000000000000000000000', None), + (ldap.MOD_REPLACE, 'attr6800000000000000000000000000000000000000000', None), + (ldap.MOD_REPLACE, 'attr690000000000000000000000000000000000000000000000000000000000', + None), + (ldap.MOD_REPLACE, 'attr7000000000000000000000000', None), + (ldap.MOD_REPLACE, 'attr71000000000000000000000000000000', None), + (ldap.MOD_REPLACE, 'attr72000000000000000000000000000000', None), + (ldap.MOD_REPLACE, 'attr73000000000000000000000000000000', None), + (ldap.MOD_REPLACE, 'attr74000000000000000000000000000000', None), + (ldap.MOD_REPLACE, 'attr750000000000000000000000000000000000000000000', None), + (ldap.MOD_REPLACE, 'attr7600000000000000000000000000000000000000000', None), + (ldap.MOD_REPLACE, 'attr77000000000000000000000000000000', None), + ( + ldap.MOD_REPLACE, 'attr78000000000000000000000000000000000000000000000000000000000000000', + None), + (ldap.MOD_REPLACE, 'attr79000000000000000000000000000000', None), + (ldap.MOD_REPLACE, 'attr800000000000000000000000000000000000000000000000', None), + (ldap.MOD_REPLACE, 'attr81000000000000000000000000000000000', None), + (ldap.MOD_REPLACE, 'attr82000000000000000000000000000000000', None), + (ldap.MOD_REPLACE, 'attr83000000000000000000000000000000000', None), + (ldap.MOD_REPLACE, 'attr84000000000000000000000000000000000', None), + (ldap.MOD_REPLACE, 'attr85000000000000000000000000000000000000000000000000000', None), + (ldap.MOD_REPLACE, 'attr8600000000000000000000000000000000000000000', None), + (ldap.MOD_REPLACE, 'attr87000000000000000000000000000000', None), + (ldap.MOD_REPLACE, 'attr88000000000000000000000000000000000', None), + (ldap.MOD_REPLACE, 'attr89000000000000000000000000000000000', None), + (ldap.MOD_REPLACE, 'attr9000000000000000000000000000000000000000000000000000', None)]) + except ldap.LDAPError as e: + log.fatal('Failed to modify user - deleting attrs (%s): error %s' % (USER_DN, e.args[0]['desc'])) + + # Stop master2 + topology_m2.ms["master1"].stop(30) + topology_m2.ms["master2"].stop(30) + + # start M1 to do the next updates + topology_m2.ms["master1"].start() + topology_m2.ms["master2"].start() + + log.info('Testcase PASSED') + if DEBUGGING: + # Add debugging steps(if any)... + pass + + +if __name__ == '__main__': + # Run isolated + # -s for DEBUG mode + CURRENT_FILE = os.path.realpath(__file__) + pytest.main("-s %s" % CURRENT_FILE) diff --git a/dirsrvtests/tests/tickets/ticket49122_test.py b/dirsrvtests/tests/tickets/ticket49122_test.py new file mode 100644 index 0000000..651cd50 --- /dev/null +++ b/dirsrvtests/tests/tickets/ticket49122_test.py @@ -0,0 +1,94 @@ +import time +import ldap +import logging +import pytest +from lib389 import Entry +from lib389._constants import * +from lib389.properties import * +from lib389.tasks import * +from lib389.utils import * +from lib389.topologies import topology_st as topo + +pytestmark = pytest.mark.tier2 + +DEBUGGING = os.getenv("DEBUGGING", default=False) +if DEBUGGING: + logging.getLogger(__name__).setLevel(logging.DEBUG) +else: + logging.getLogger(__name__).setLevel(logging.INFO) +log = logging.getLogger(__name__) + +USER_DN = 'uid=user,' + DEFAULT_SUFFIX +ROLE_DN = 'cn=Filtered_Role_That_Includes_Empty_Role,' + DEFAULT_SUFFIX +filters = ['nsrole=cn=empty,dc=example,dc=com', + '(nsrole=cn=empty,dc=example,dc=com)', + '(&(nsrole=cn=empty,dc=example,dc=com))', + '(!(nsrole=cn=empty,dc=example,dc=com))', + '(&(|(objectclass=person)(sn=app*))(userpassword=*))', + '(&(|(objectclass=person)(nsrole=cn=empty,dc=example,dc=com))(userpassword=*))', + '(&(|(nsrole=cn=empty,dc=example,dc=com)(sn=app*))(userpassword=*))', + '(&(|(objectclass=person)(sn=app*))(nsrole=cn=empty,dc=example,dc=com))', + '(&(|(&(cn=*)(objectclass=person)(nsrole=cn=empty,dc=example,dc=com)))(uid=*))'] + + +def test_ticket49122(topo): + """Search for non-existant role and make sure the server does not crash + """ + + # Enable roles plugin + topo.standalone.plugins.enable(name=PLUGIN_ROLES) + topo.standalone.restart() + + # Add test user + try: + topo.standalone.add_s(Entry(( + USER_DN, {'objectclass': "top extensibleObject".split(), + 'uid': 'user'}))) + except ldap.LDAPError as e: + topo.standalone.log.fatal('Failed to add test user: error ' + str(e)) + assert False + + if DEBUGGING: + print("Attach gdb") + time.sleep(20) + + # Loop over filters + for role_filter in filters: + log.info('Testing filter: ' + role_filter) + + # Add invalid role + try: + topo.standalone.add_s(Entry(( + ROLE_DN, {'objectclass': ['top', 'ldapsubentry', 'nsroledefinition', + 'nscomplexroledefinition', 'nsfilteredroledefinition'], + 'cn': 'Filtered_Role_That_Includes_Empty_Role', + 'nsRoleFilter': role_filter, + 'description': 'A filtered role with filter that will crash the server'}))) + except ldap.LDAPError as e: + topo.standalone.log.fatal('Failed to add filtered role: error ' + e.message['desc']) + assert False + + # Search for the role + try: + topo.standalone.search_s(USER_DN, ldap.SCOPE_SUBTREE, 'objectclass=*', ['nsrole']) + except ldap.LDAPError as e: + topo.standalone.log.fatal('Search failed: error ' + str(e)) + assert False + + # Cleanup + try: + topo.standalone.delete_s(ROLE_DN) + except ldap.LDAPError as e: + topo.standalone.log.fatal('delete failed: error ' + str(e)) + assert False + time.sleep(1) + + topo.standalone.log.info('Test Passed') + + +if __name__ == '__main__': + # Run isolated + # -s for DEBUG mode + CURRENT_FILE = os.path.realpath(__file__) + pytest.main("-s %s" % CURRENT_FILE) + diff --git a/dirsrvtests/tests/tickets/ticket49180_test.py b/dirsrvtests/tests/tickets/ticket49180_test.py new file mode 100644 index 0000000..4c8f7de --- /dev/null +++ b/dirsrvtests/tests/tickets/ticket49180_test.py @@ -0,0 +1,124 @@ +# --- BEGIN COPYRIGHT BLOCK --- +# Copyright (C) 2016 Red Hat, Inc. +# All rights reserved. +# +# License: GPL (version 3 or any later version). +# See LICENSE for details. +# --- END COPYRIGHT BLOCK --- +# +import threading + +import pytest +from lib389.tasks import * +from lib389.utils import * +from lib389.topologies import topology_m4 +from lib389.replica import ReplicationManager + +from lib389._constants import (DEFAULT_SUFFIX, SUFFIX) + +from lib389 import DirSrv + +pytestmark = pytest.mark.tier2 + +logging.getLogger(__name__).setLevel(logging.DEBUG) +log = logging.getLogger(__name__) + + +def remove_master4_agmts(msg, topology_m4): + """Remove all the repl agmts to master4. """ + + log.info('%s: remove all the agreements to master 4...' % msg) + for num in range(1, 4): + try: + topology_m4.ms["master{}".format(num)].agreement.delete(DEFAULT_SUFFIX, + topology_m4.ms["master4"].host, + topology_m4.ms["master4"].port) + except ldap.LDAPError as e: + log.fatal('{}: Failed to delete agmt(m{} -> m4), error: {}'.format(msg, num, str(e))) + assert False + + +def restore_master4(topology_m4): + """In our tests will always be removing master 4, so we need a common + way to restore it for another test + """ + + log.info('Restoring master 4...') + + # Enable replication on master 4 + M4 = topology_m4.ms["master4"] + M1 = topology_m4.ms["master1"] + repl = ReplicationManager(SUFFIX) + repl.join_master(M1, M4) + repl.ensure_agreement(M4, M1) + repl.ensure_agreement(M1, M4) + + # Test Replication is working + for num in range(2, 5): + if topology_m4.ms["master1"].testReplication(DEFAULT_SUFFIX, topology_m4.ms["master{}".format(num)]): + log.info('Replication is working m1 -> m{}.'.format(num)) + else: + log.fatal('restore_master4: Replication is not working from m1 -> m{}.'.format(num)) + assert False + time.sleep(1) + + # Check replication is working from master 4 to master1... + if topology_m4.ms["master4"].testReplication(DEFAULT_SUFFIX, topology_m4.ms["master1"]): + log.info('Replication is working m4 -> m1.') + else: + log.fatal('restore_master4: Replication is not working from m4 -> 1.') + assert False + time.sleep(5) + + log.info('Master 4 has been successfully restored.') + + +def test_ticket49180(topology_m4): + + log.info('Running test_ticket49180...') + + log.info('Check that replication works properly on all masters') + agmt_nums = {"master1": ("2", "3", "4"), + "master2": ("1", "3", "4"), + "master3": ("1", "2", "4"), + "master4": ("1", "2", "3")} + + for inst_name, agmts in agmt_nums.items(): + for num in agmts: + if not topology_m4.ms[inst_name].testReplication(DEFAULT_SUFFIX, topology_m4.ms["master{}".format(num)]): + log.fatal( + 'test_replication: Replication is not working between {} and master {}.'.format(inst_name, + num)) + assert False + + # Disable master 4 + log.info('test_clean: disable master 4...') + topology_m4.ms["master4"].replica.disableReplication(DEFAULT_SUFFIX) + + # Remove the agreements from the other masters that point to master 4 + remove_master4_agmts("test_clean", topology_m4) + + # Cleanup - restore master 4 + restore_master4(topology_m4) + + attr_errors = os.popen('egrep "attrlist_replace" %s | wc -l' % topology_m4.ms["master1"].errlog) + ecount = int(attr_errors.readline().rstrip()) + log.info("Errors found on m1: %d" % ecount) + assert (ecount == 0) + + attr_errors = os.popen('egrep "attrlist_replace" %s | wc -l' % topology_m4.ms["master2"].errlog) + ecount = int(attr_errors.readline().rstrip()) + log.info("Errors found on m2: %d" % ecount) + assert (ecount == 0) + + attr_errors = os.popen('egrep "attrlist_replace" %s | wc -l' % topology_m4.ms["master3"].errlog) + ecount = int(attr_errors.readline().rstrip()) + log.info("Errors found on m3: %d" % ecount) + assert (ecount == 0) + + +if __name__ == '__main__': + # Run isolated + # -s for DEBUG mode + CURRENT_FILE = os.path.realpath(__file__) + pytest.main("-s %s" % CURRENT_FILE) diff --git a/dirsrvtests/tests/tickets/ticket49184_test.py b/dirsrvtests/tests/tickets/ticket49184_test.py new file mode 100644 index 0000000..4ec78b3 --- /dev/null +++ b/dirsrvtests/tests/tickets/ticket49184_test.py @@ -0,0 +1,148 @@ +import time +import ldap +import logging +import pytest +from lib389 import DirSrv, Entry, tools, tasks +from lib389.tools import DirSrvTools +from lib389._constants import * +from lib389.properties import * +from lib389.tasks import * +from lib389.utils import * +from lib389.topologies import topology_st as topo + +pytestmark = pytest.mark.tier2 + +DEBUGGING = os.getenv("DEBUGGING", default=False) +GROUP_DN_1 = ("cn=group1," + DEFAULT_SUFFIX) +GROUP_DN_2 = ("cn=group2," + DEFAULT_SUFFIX) +SUPER_GRP1 = ("cn=super_grp1," + DEFAULT_SUFFIX) +SUPER_GRP2 = ("cn=super_grp2," + DEFAULT_SUFFIX) +SUPER_GRP3 = ("cn=super_grp3," + DEFAULT_SUFFIX) + +if DEBUGGING: + logging.getLogger(__name__).setLevel(logging.DEBUG) +else: + logging.getLogger(__name__).setLevel(logging.INFO) +log = logging.getLogger(__name__) + +def _add_group_with_members(topo, group_dn): + # Create group + try: + topo.standalone.add_s(Entry((group_dn, + {'objectclass': 'top groupofnames extensibleObject'.split(), + 'cn': 'group'}))) + except ldap.LDAPError as e: + log.fatal('Failed to add group: error ' + e.args[0]['desc']) + assert False + + # Add members to the group - set timeout + log.info('Adding members to the group...') + for idx in range(1, 5): + try: + MEMBER_VAL = ("uid=member%d,%s" % (idx, DEFAULT_SUFFIX)) + topo.standalone.modify_s(group_dn, + [(ldap.MOD_ADD, + 'member', + ensure_bytes(MEMBER_VAL))]) + except ldap.LDAPError as e: + log.fatal('Failed to update group: member (%s) - error: %s' % + (MEMBER_VAL, e.args[0]['desc'])) + assert False + +def _check_memberof(topo, member=None, memberof=True, group_dn=None): + # Check that members have memberof attribute on M1 + for idx in range(1, 5): + try: + USER_DN = ("uid=member%d,%s" % (idx, DEFAULT_SUFFIX)) + ent = topo.standalone.getEntry(USER_DN, ldap.SCOPE_BASE, "(objectclass=*)") + if presence_flag: + assert ent.hasAttr('memberof') and ent.getValue('memberof') == ensure_bytes(group_dn) + else: + assert not ent.hasAttr('memberof') + except ldap.LDAPError as e: + log.fatal('Failed to retrieve user (%s): error %s' % (USER_DN, e.args[0]['desc'])) + assert False + +def _check_memberof(topo, member=None, memberof=True, group_dn=None): + ent = topo.standalone.getEntry(member, ldap.SCOPE_BASE, "(objectclass=*)") + if memberof: + assert group_dn + assert ent.hasAttr('memberof') and ensure_bytes(group_dn) in ent.getValues('memberof') + else: + if ent.hasAttr('memberof'): + assert ensure_bytes(group_dn) not in ent.getValues('memberof') + + +def test_ticket49184(topo): + """Write your testcase here... + + Also, if you need any testcase initialization, + please, write additional fixture for that(include finalizer). + """ + + topo.standalone.plugins.enable(name=PLUGIN_MEMBER_OF) + topo.standalone.restart(timeout=10) + + # + # create some users and a group + # + log.info('create users and group...') + for idx in range(1, 5): + try: + USER_DN = ("uid=member%d,%s" % (idx, DEFAULT_SUFFIX)) + topo.standalone.add_s(Entry((USER_DN, + {'objectclass': 'top extensibleObject'.split(), + 'uid': 'member%d' % (idx)}))) + except ldap.LDAPError as e: + log.fatal('Failed to add user (%s): error %s' % (USER_DN, e.args[0]['desc'])) + assert False + + # add all users in GROUP_DN_1 and checks each users is memberof GROUP_DN_1 + _add_group_with_members(topo, GROUP_DN_1) + for idx in range(1, 5): + USER_DN = ("uid=member%d,%s" % (idx, DEFAULT_SUFFIX)) + _check_memberof(topo, member=USER_DN, memberof=True, group_dn=GROUP_DN_1 ) + + # add all users in GROUP_DN_2 and checks each users is memberof GROUP_DN_2 + _add_group_with_members(topo, GROUP_DN_2) + for idx in range(1, 5): + USER_DN = ("uid=member%d,%s" % (idx, DEFAULT_SUFFIX)) + _check_memberof(topo, member=USER_DN, memberof=True, group_dn=GROUP_DN_2 ) + + # add the level 2, 3 and 4 group + for super_grp in (SUPER_GRP1, SUPER_GRP2, SUPER_GRP3): + topo.standalone.add_s(Entry((super_grp, + {'objectclass': 'top groupofnames extensibleObject'.split(), + 'cn': 'super_grp'}))) + topo.standalone.modify_s(SUPER_GRP1, + [(ldap.MOD_ADD, + 'member', + ensure_bytes(GROUP_DN_1)), + (ldap.MOD_ADD, + 'member', + ensure_bytes(GROUP_DN_2))]) + topo.standalone.modify_s(SUPER_GRP2, + [(ldap.MOD_ADD, + 'member', + ensure_bytes(GROUP_DN_1)), + (ldap.MOD_ADD, + 'member', + ensure_bytes(GROUP_DN_2))]) + return + topo.standalone.delete_s(GROUP_DN_2) + for idx in range(1, 5): + USER_DN = ("uid=member%d,%s" % (idx, DEFAULT_SUFFIX)) + _check_memberof(topo, member=USER_DN, memberof=True, group_dn=GROUP_DN_1 ) + _check_memberof(topo, member=USER_DN, memberof=False, group_dn=GROUP_DN_2 ) + + if DEBUGGING: + # Add debugging steps(if any)... + pass + + +if __name__ == '__main__': + # Run isolated + # -s for DEBUG mode + CURRENT_FILE = os.path.realpath(__file__) + pytest.main("-s %s" % CURRENT_FILE) + diff --git a/dirsrvtests/tests/tickets/ticket49192_test.py b/dirsrvtests/tests/tickets/ticket49192_test.py new file mode 100644 index 0000000..21be4eb --- /dev/null +++ b/dirsrvtests/tests/tickets/ticket49192_test.py @@ -0,0 +1,179 @@ +import time +import ldap +import logging +import pytest +from lib389 import Entry +from lib389._constants import * +from lib389.properties import * +from lib389.tasks import * +from lib389.utils import * +from lib389.topologies import topology_st as topo + +pytestmark = pytest.mark.tier2 + +DEBUGGING = os.getenv("DEBUGGING", default=False) +if DEBUGGING: + logging.getLogger(__name__).setLevel(logging.DEBUG) +else: + logging.getLogger(__name__).setLevel(logging.INFO) +log = logging.getLogger(__name__) + +INDEX_DN = 'cn=index,cn=Second_Backend,cn=ldbm database,cn=plugins,cn=config' +SUFFIX_DN = 'cn=Second_Backend,cn=ldbm database,cn=plugins,cn=config' +MY_SUFFIX = "o=hang.com" +USER_DN = 'uid=user,' + MY_SUFFIX + + +def test_ticket49192(topo): + """Trigger deadlock when removing suffix + """ + + # + # Create a second suffix/backend + # + log.info('Creating second backend...') + topo.standalone.backends.create(None, properties={ + BACKEND_NAME: "Second_Backend", + 'suffix': "o=hang.com", + }) + try: + topo.standalone.add_s(Entry(("o=hang.com", { + 'objectclass': 'top organization'.split(), + 'o': 'hang.com'}))) + except ldap.LDAPError as e: + log.fatal('Failed to create 2nd suffix: error ' + e.args[0]['desc']) + assert False + + # + # Add roles + # + log.info('Adding roles...') + try: + topo.standalone.add_s(Entry(('cn=nsManagedDisabledRole,' + MY_SUFFIX, { + 'objectclass': ['top', 'LdapSubEntry', + 'nsRoleDefinition', + 'nsSimpleRoleDefinition', + 'nsManagedRoleDefinition'], + 'cn': 'nsManagedDisabledRole'}))) + except ldap.LDAPError as e: + log.fatal('Failed to add managed role: error ' + e.args[0]['desc']) + assert False + + try: + topo.standalone.add_s(Entry(('cn=nsDisabledRole,' + MY_SUFFIX, { + 'objectclass': ['top', 'LdapSubEntry', + 'nsRoleDefinition', + 'nsComplexRoleDefinition', + 'nsNestedRoleDefinition'], + 'cn': 'nsDisabledRole', + 'nsRoledn': 'cn=nsManagedDisabledRole,' + MY_SUFFIX}))) + except ldap.LDAPError as e: + log.fatal('Failed to add nested role: error ' + e.args[0]['desc']) + assert False + + try: + topo.standalone.add_s(Entry(('cn=nsAccountInactivationTmp,' + MY_SUFFIX, { + 'objectclass': ['top', 'nsContainer'], + 'cn': 'nsAccountInactivationTmp'}))) + except ldap.LDAPError as e: + log.fatal('Failed to add container: error ' + e.args[0]['desc']) + assert False + + try: + topo.standalone.add_s(Entry(('cn=\"cn=nsDisabledRole,' + MY_SUFFIX + '\",cn=nsAccountInactivationTmp,' + MY_SUFFIX, { + 'objectclass': ['top', 'extensibleObject', 'costemplate', + 'ldapsubentry'], + 'nsAccountLock': 'true'}))) + except ldap.LDAPError as e: + log.fatal('Failed to add cos1: error ' + e.args[0]['desc']) + assert False + + try: + topo.standalone.add_s(Entry(('cn=nsAccountInactivation_cos,' + MY_SUFFIX, { + 'objectclass': ['top', 'LdapSubEntry', 'cosSuperDefinition', + 'cosClassicDefinition'], + 'cn': 'nsAccountInactivation_cos', + 'cosTemplateDn': 'cn=nsAccountInactivationTmp,' + MY_SUFFIX, + 'cosSpecifier': 'nsRole', + 'cosAttribute': 'nsAccountLock operational'}))) + except ldap.LDAPError as e: + log.fatal('Failed to add cos2 : error ' + e.args[0]['desc']) + assert False + + # + # Add test entry + # + try: + topo.standalone.add_s(Entry((USER_DN, { + 'objectclass': 'top extensibleObject'.split(), + 'uid': 'user', + 'userpassword': 'password', + }))) + except ldap.LDAPError as e: + log.fatal('Failed to add user: error ' + e.args[0]['desc']) + assert False + + # + # Inactivate the user account + # + try: + topo.standalone.modify_s(USER_DN, + [(ldap.MOD_ADD, + 'nsRoleDN', + ensure_bytes('cn=nsManagedDisabledRole,' + MY_SUFFIX))]) + except ldap.LDAPError as e: + log.fatal('Failed to disable user: error ' + e.args[0]['desc']) + assert False + + time.sleep(1) + + # Bind as user (should fail) + try: + topo.standalone.simple_bind_s(USER_DN, 'password') + log.error("Bind incorrectly worked") + assert False + except ldap.UNWILLING_TO_PERFORM: + log.info('Got error 53 as expected') + except ldap.LDAPError as e: + log.fatal('Bind has unexpected error ' + e.args[0]['desc']) + assert False + + # Bind as root DN + try: + topo.standalone.simple_bind_s(DN_DM, PASSWORD) + except ldap.LDAPError as e: + log.fatal('RootDN Bind has unexpected error ' + e.args[0]['desc']) + assert False + + # + # Delete suffix + # + log.info('Delete the suffix and children...') + try: + index_entries = topo.standalone.search_s( + SUFFIX_DN, ldap.SCOPE_SUBTREE, 'objectclass=top') + except ldap.LDAPError as e: + log.error('Failed to search: %s - error %s' % (SUFFIX_DN, str(e))) + + for entry in reversed(index_entries): + try: + log.info("Deleting: " + entry.dn) + if entry.dn != SUFFIX_DN and entry.dn != INDEX_DN: + topo.standalone.search_s(entry.dn, + ldap.SCOPE_ONELEVEL, + 'objectclass=top') + topo.standalone.delete_s(entry.dn) + except ldap.LDAPError as e: + log.fatal('Failed to delete entry: %s - error %s' % + (entry.dn, str(e))) + assert False + + log.info("Test Passed") + + +if __name__ == '__main__': + # Run isolated + # -s for DEBUG mode + CURRENT_FILE = os.path.realpath(__file__) + pytest.main("-s %s" % CURRENT_FILE) + diff --git a/dirsrvtests/tests/tickets/ticket49227_test.py b/dirsrvtests/tests/tickets/ticket49227_test.py new file mode 100644 index 0000000..c828c2d --- /dev/null +++ b/dirsrvtests/tests/tickets/ticket49227_test.py @@ -0,0 +1,149 @@ +import os +import time +import ldap +import logging +import pytest +from lib389._constants import * +from lib389.properties import * +from lib389.tasks import * +from lib389.utils import * +from lib389.topologies import topology_st as topo + +pytestmark = pytest.mark.tier2 + +DEBUGGING = os.getenv("DEBUGGING", default=False) +if DEBUGGING: + logging.getLogger(__name__).setLevel(logging.DEBUG) +else: + logging.getLogger(__name__).setLevel(logging.INFO) +log = logging.getLogger(__name__) + +DEFAULT_LEVEL = b"16384" +COMB_LEVEL = b"73864" # 65536+8192+128+8 = 73864 +COMB_DEFAULT_LEVEL = b"90248" # 65536+8192+128+8+16384 = 90248 + + +def set_level(topo, level): + ''' Set the error log level + ''' + try: + topo.standalone.modify_s("cn=config", [(ldap.MOD_REPLACE, 'nsslapd-errorlog-level', ensure_bytes(level))]) + time.sleep(1) + except ldap.LDAPError as e: + log.fatal('Failed to set loglevel to %s - error: %s' % (level, str(e))) + assert False + + +def get_level(topo): + ''' Set the error log level + ''' + try: + config = topo.standalone.search_s("cn=config", ldap.SCOPE_BASE, "objectclass=top") + time.sleep(1) + return config[0].getValue('nsslapd-errorlog-level') + except ldap.LDAPError as e: + log.fatal('Failed to get loglevel - error: %s' % (str(e))) + assert False + + +def get_log_size(topo): + ''' Get the errors log size + ''' + statinfo = os.stat(topo.standalone.errlog) + return statinfo.st_size + + +def test_ticket49227(topo): + """Set the error log to varying levels, and make sure a search for that value + reflects the expected value (not the bitmasked value. + """ + log_size = get_log_size(topo) + + # Check the default level + level = get_level(topo) + if level != DEFAULT_LEVEL: + log.fatal('Incorrect default logging level: %s' % (level)) + assert False + + # Set connection logging + set_level(topo, '8') + level = get_level(topo) + if level != b'8': + log.fatal('Incorrect connection logging level: %s' % (level)) + assert False + + # Check the actual log + new_size = get_log_size(topo) + if new_size == log_size: + # Size should be different + log.fatal('Connection logging is not working') + assert False + + # Set default logging using zero + set_level(topo, '0') + log_size = get_log_size(topo) + level = get_level(topo) + if level != DEFAULT_LEVEL: + log.fatal('Incorrect default logging level: %s' % (level)) + assert False + + # Check the actual log + new_size = get_log_size(topo) + if new_size != log_size: + # Size should be the size + log.fatal('Connection logging is still on') + assert False + + # Set default logging using the default value + set_level(topo, DEFAULT_LEVEL) + level = get_level(topo) + if level != DEFAULT_LEVEL: + log.fatal('Incorrect default logging level: %s' % (level)) + assert False + + # Check the actual log + new_size = get_log_size(topo) + if new_size != log_size: + # Size should be the size + log.fatal('Connection logging is still on') + assert False + + # Set a combined level that includes the default level + set_level(topo, COMB_DEFAULT_LEVEL) + level = get_level(topo) + if level != COMB_DEFAULT_LEVEL: + log.fatal('Incorrect combined logging level with default level: %s expected %s' % + (level, COMB_DEFAULT_LEVEL)) + assert False + + # Set a combined level that does not includes the default level + set_level(topo, COMB_LEVEL) + level = get_level(topo) + if level != COMB_LEVEL: + log.fatal('Incorrect combined logging level without default level: %s expected %s' % + (level, COMB_LEVEL)) + assert False + + # Check our level is present after a restart - previous level was COMB_LEVEL + topo.standalone.restart() + log_size = get_log_size(topo) # Grab the log size for our next check + level = get_level(topo) # This should trigger connection logging + if level != COMB_LEVEL: + log.fatal('Incorrect combined logging level with default level: %s expected %s' % + (level, COMB_LEVEL)) + assert False + + # Now check the actual levels are still working + new_size = get_log_size(topo) + if new_size == log_size: + # Size should be different + log.fatal('Combined logging is not working') + assert False + + +if __name__ == '__main__': + # Run isolated + # -s for DEBUG mode + CURRENT_FILE = os.path.realpath(__file__) + pytest.main("-s %s" % CURRENT_FILE) + diff --git a/dirsrvtests/tests/tickets/ticket49249_test.py b/dirsrvtests/tests/tickets/ticket49249_test.py new file mode 100644 index 0000000..83d2259 --- /dev/null +++ b/dirsrvtests/tests/tickets/ticket49249_test.py @@ -0,0 +1,142 @@ +import time +import ldap +import logging +import pytest +from lib389 import DirSrv, Entry, tools, tasks +from lib389.tools import DirSrvTools +from lib389._constants import * +from lib389.properties import * +from lib389.tasks import * +from lib389.utils import * +from lib389.topologies import topology_st as topo + +pytestmark = pytest.mark.tier2 + +DEBUGGING = os.getenv("DEBUGGING", default=False) +if DEBUGGING: + logging.getLogger(__name__).setLevel(logging.DEBUG) +else: + logging.getLogger(__name__).setLevel(logging.INFO) +log = logging.getLogger(__name__) + +COS_BRANCH = 'ou=cos_scope,' + DEFAULT_SUFFIX +COS_DEF = 'cn=cos_definition,' + COS_BRANCH +COS_TEMPLATE = 'cn=cos_template,' + COS_BRANCH +INVALID_USER_WITH_COS = 'cn=cos_user_no_mail,' + COS_BRANCH +VALID_USER_WITH_COS = 'cn=cos_user_with_mail,' + COS_BRANCH + +NO_COS_BRANCH = 'ou=no_cos_scope,' + DEFAULT_SUFFIX +INVALID_USER_WITHOUT_COS = 'cn=no_cos_user_no_mail,' + NO_COS_BRANCH +VALID_USER_WITHOUT_COS = 'cn=no_cos_user_with_mail,' + NO_COS_BRANCH + +def test_ticket49249(topo): + """Write your testcase here... + + Also, if you need any testcase initialization, + please, write additional fixture for that(include finalizer). + """ + # Add the branches + try: + topo.standalone.add_s(Entry((COS_BRANCH, { + 'objectclass': 'top extensibleObject'.split(), + 'ou': 'cos_scope' + }))) + except ldap.LDAPError as e: + log.error('Failed to add cos_scope: error ' + e.message['desc']) + assert False + + try: + topo.standalone.add_s(Entry((NO_COS_BRANCH, { + 'objectclass': 'top extensibleObject'.split(), + 'ou': 'no_cos_scope' + }))) + except ldap.LDAPError as e: + log.error('Failed to add no_cos_scope: error ' + e.message['desc']) + assert False + + try: + topo.standalone.add_s(Entry((COS_TEMPLATE, { + 'objectclass': 'top ldapsubentry costemplate extensibleObject'.split(), + 'cn': 'cos_template', + 'cosPriority': '1', + 'cn': 'cn=nsPwTemplateEntry,ou=level1,dc=example,dc=com', + 'mailAlternateAddress': 'hello@world' + }))) + except ldap.LDAPError as e: + log.error('Failed to add cos_template: error ' + e.message['desc']) + assert False + + try: + topo.standalone.add_s(Entry((COS_DEF, { + 'objectclass': 'top ldapsubentry cosSuperDefinition cosPointerDefinition'.split(), + 'cn': 'cos_definition', + 'costemplatedn': COS_TEMPLATE, + 'cosAttribute': 'mailAlternateAddress default' + }))) + except ldap.LDAPError as e: + log.error('Failed to add cos_definition: error ' + e.message['desc']) + assert False + + try: + # This entry is not allowed to have mailAlternateAddress + topo.standalone.add_s(Entry((INVALID_USER_WITH_COS, { + 'objectclass': 'top person'.split(), + 'cn': 'cos_user_no_mail', + 'sn': 'cos_user_no_mail' + }))) + except ldap.LDAPError as e: + log.error('Failed to add cos_user_no_mail: error ' + e.message['desc']) + assert False + + try: + # This entry is allowed to have mailAlternateAddress + topo.standalone.add_s(Entry((VALID_USER_WITH_COS, { + 'objectclass': 'top mailGroup'.split(), + 'cn': 'cos_user_with_mail' + }))) + except ldap.LDAPError as e: + log.error('Failed to add cos_user_no_mail: error ' + e.message['desc']) + assert False + + try: + # This entry is not allowed to have mailAlternateAddress + topo.standalone.add_s(Entry((INVALID_USER_WITHOUT_COS, { + 'objectclass': 'top person'.split(), + 'cn': 'no_cos_user_no_mail', + 'sn': 'no_cos_user_no_mail' + }))) + except ldap.LDAPError as e: + log.error('Failed to add no_cos_user_no_mail: error ' + e.message['desc']) + assert False + + try: + # This entry is allowed to have mailAlternateAddress + topo.standalone.add_s(Entry((VALID_USER_WITHOUT_COS, { + 'objectclass': 'top mailGroup'.split(), + 'cn': 'no_cos_user_with_mail' + }))) + except ldap.LDAPError as e: + log.error('Failed to add no_cos_user_with_mail: error ' + e.message['desc']) + assert False + + try: + entries = topo.standalone.search_s(SUFFIX, ldap.SCOPE_SUBTREE, '(mailAlternateAddress=*)') + assert len(entries) == 1 + assert entries[0].hasValue('mailAlternateAddress', 'hello@world') + except ldap.LDAPError as e: + log.fatal('Unable to retrieve cos_user_with_mail (only entry with mailAlternateAddress) : error %s' % (USER1_DN, e.message['desc'])) + assert False + + assert not topo.standalone.ds_error_log.match(".*cos attribute mailAlternateAddress failed schema.*") + + if DEBUGGING: + # Add debugging steps(if any)... + pass + + +if __name__ == '__main__': + # Run isolated + # -s for DEBUG mode + CURRENT_FILE = os.path.realpath(__file__) + pytest.main("-s %s" % CURRENT_FILE) + diff --git a/dirsrvtests/tests/tickets/ticket49273_test.py b/dirsrvtests/tests/tickets/ticket49273_test.py new file mode 100644 index 0000000..e3213bd --- /dev/null +++ b/dirsrvtests/tests/tickets/ticket49273_test.py @@ -0,0 +1,52 @@ +# --- BEGIN COPYRIGHT BLOCK --- +# Copyright (C) 2017 Red Hat, Inc. +# All rights reserved. +# +# License: GPL (version 3 or any later version). +# See LICENSE for details. +# --- END COPYRIGHT BLOCK --- +# + +import pytest +import ldap + +from lib389.topologies import topology_st +# This pulls in logging I think +from lib389.utils import * +from lib389.sasl import PlainSASL +from lib389.idm.services import ServiceAccounts + +pytestmark = pytest.mark.tier2 + +log = logging.getLogger(__name__) + +def test_49273_corrupt_dbversion(topology_st): + """ + ticket 49273 was caused by a disk space full, which corrupted + the users DBVERSION files. We can't prevent this, but we can handle + the error better than "crash". + """ + + standalone = topology_st.standalone + + # Stop the instance + standalone.stop() + # Corrupt userRoot dbversion + dbvf = os.path.join(standalone.ds_paths.db_dir, 'userRoot/DBVERSION') + with open(dbvf, 'w') as f: + # This will trunc the file + f.write('') + # Start up + try: + # post_open false, means ds state is OFFLINE, which allows + # dspaths below to use defaults rather than ldap check. + standalone.start(timeout=20, post_open=False) + except: + pass + # Trigger an update of the running server state, to move it OFFLINE. + standalone.status() + + # CHeck error log? + error_lines = standalone.ds_error_log.match('.*Could not parse file.*') + assert(len(error_lines) > 0) + diff --git a/dirsrvtests/tests/tickets/ticket49287_test.py b/dirsrvtests/tests/tickets/ticket49287_test.py new file mode 100644 index 0000000..28fd8db --- /dev/null +++ b/dirsrvtests/tests/tickets/ticket49287_test.py @@ -0,0 +1,347 @@ +# --- BEGIN COPYRIGHT BLOCK --- +# Copyright (C) 2017 Red Hat, Inc. +# All rights reserved. +# +# License: GPL (version 3 or any later version). +# See LICENSE for details. +# --- END COPYRIGHT BLOCK --- +# +import pytest +from lib389.tasks import * +from lib389.utils import * +from lib389.properties import RA_NAME, RA_BINDDN, RA_BINDPW, RA_METHOD, RA_TRANSPORT_PROT, BACKEND_NAME +from lib389.topologies import topology_m2 +from lib389._constants import * +from lib389.replica import ReplicationManager + +pytestmark = pytest.mark.tier2 + +DEBUGGING = os.getenv('DEBUGGING', False) +GROUP_DN = ("cn=group," + DEFAULT_SUFFIX) + +if DEBUGGING: + logging.getLogger(__name__).setLevel(logging.DEBUG) +else: + logging.getLogger(__name__).setLevel(logging.INFO) +log = logging.getLogger(__name__) + + +def _add_repl_backend(s1, s2, be): + suffix = 'ou=%s,dc=test,dc=com' % be + create_backend(s1, s2, suffix, be) + add_ou(s1, suffix) + replicate_backend(s1, s2, suffix) + + +def _wait_for_sync(s1, s2, testbase, final_db): + + now = time.time() + cn1 = 'sync-%s-%d' % (now, 1) + cn2 = 'sync-%s-%d' % (now, 2) + add_user(s1, cn1, testbase, 'add on m1', sleep=False) + add_user(s2, cn2, testbase, 'add on m2', sleep=False) + dn1 = 'cn=%s,%s' % (cn1, testbase) + dn2 = 'cn=%s,%s' % (cn2, testbase) + if final_db: + final_db.append(dn1) + final_db.append(dn2) + _check_entry_exist(s2, dn1, 10, 5) + _check_entry_exist(s1, dn2, 10, 5) + + +def _check_entry_exist(master, dn, loops=10, wait=1): + attempt = 0 + while attempt <= loops: + try: + dn + ent = master.getEntry(dn, ldap.SCOPE_BASE, "(objectclass=*)") + break + except ldap.NO_SUCH_OBJECT: + attempt = attempt + 1 + time.sleep(wait) + except ldap.LDAPError as e: + log.fatal('Failed to retrieve user (%s): error %s' % (dn, e.message['desc'])) + assert False + assert attempt <= loops + + +def config_memberof(server): + + server.plugins.enable(name=PLUGIN_MEMBER_OF) + MEMBEROF_PLUGIN_DN = ('cn=' + PLUGIN_MEMBER_OF + ',cn=plugins,cn=config') + server.modify_s(MEMBEROF_PLUGIN_DN, [(ldap.MOD_REPLACE, + 'memberOfAllBackends', + b'on')]) + # Configure fractional to prevent total init to send memberof + ents = server.agreement.list(suffix=DEFAULT_SUFFIX) + log.info('update %s to add nsDS5ReplicatedAttributeListTotal' % ents[0].dn) + for ent in ents: + server.modify_s(ent.dn, + [(ldap.MOD_REPLACE, + 'nsDS5ReplicatedAttributeListTotal', + b'(objectclass=*) $ EXCLUDE '), + (ldap.MOD_REPLACE, + 'nsDS5ReplicatedAttributeList', + b'(objectclass=*) $ EXCLUDE memberOf')]) + + +def _disable_auto_oc_memberof(server): + MEMBEROF_PLUGIN_DN = ('cn=' + PLUGIN_MEMBER_OF + ',cn=plugins,cn=config') + server.modify_s(MEMBEROF_PLUGIN_DN, + [(ldap.MOD_REPLACE, 'memberOfAutoAddOC', b'nsContainer')]) + + +def _enable_auto_oc_memberof(server): + MEMBEROF_PLUGIN_DN = ('cn=' + PLUGIN_MEMBER_OF + ',cn=plugins,cn=config') + server.modify_s(MEMBEROF_PLUGIN_DN, + [(ldap.MOD_REPLACE, 'memberOfAutoAddOC', b'nsMemberOf')]) + + +def add_dc(server, dn): + server.add_s(Entry((dn, {'objectclass': ['top', 'domain']}))) + + +def add_ou(server, dn): + server.add_s(Entry((dn, {'objectclass': ['top', 'organizationalunit']}))) + + +def add_container(server, dn): + server.add_s(Entry((dn, {'objectclass': ['top', 'nscontainer']}))) + + +def add_user(server, cn, testbase, desc, sleep=True): + dn = 'cn=%s,%s' % (cn, testbase) + log.fatal('Adding user (%s): ' % dn) + server.add_s(Entry((dn, {'objectclass': ['top', 'person', 'inetuser'], + 'sn': 'user_%s' % cn, + 'description': desc}))) + if sleep: + time.sleep(2) + + +def add_person(server, cn, testbase, desc, sleep=True): + dn = 'cn=%s,%s' % (cn, testbase) + log.fatal('Adding user (%s): ' % dn) + server.add_s(Entry((dn, {'objectclass': ['top', 'person'], + 'sn': 'user_%s' % cn, + 'description': desc}))) + if sleep: + time.sleep(2) + + +def add_multi_member(server, cn, mem_id, mem_usr, testbase, sleep=True): + dn = 'cn=%s,ou=groups,%s' % (cn, testbase) + members = [] + for usr in mem_usr: + members.append('cn=a%d,ou=be_%d,%s' % (mem_id, usr, testbase)) + for mem in members: + mod = [(ldap.MOD_ADD, 'member', ensure_bytes(mem))] + try: + server.modify_s(dn, mod) + except ldap.OBJECT_CLASS_VIOLATION: + log.info('objectclass violation') + + if sleep: + time.sleep(2) + + +def add_member(server, cn, mem, testbase, sleep=True): + dn = 'cn=%s,ou=groups,%s' % (cn, testbase) + mem_dn = 'cn=%s,ou=people,%s' % (mem, testbase) + mod = [(ldap.MOD_ADD, 'member', ensure_bytes(mem_dn))] + server.modify_s(dn, mod) + if sleep: + time.sleep(2) + + +def add_group(server, testbase, nr, sleep=True): + + dn = 'cn=g%d,ou=groups,%s' % (nr, testbase) + server.add_s(Entry((dn, {'objectclass': ['top', 'groupofnames'], + 'member': [ + 'cn=m1_%d,%s' % (nr, testbase), + 'cn=m2_%d,%s' % (nr, testbase), + 'cn=m3_%d,%s' % (nr, testbase) + ], + 'description': 'group %d' % nr}))) + if sleep: + time.sleep(2) + + +def del_group(server, testbase, nr, sleep=True): + + dn = 'cn=g%d,%s' % (nr, testbase) + server.delete_s(dn) + if sleep: + time.sleep(2) + + +def mod_entry(server, cn, testbase, desc): + dn = 'cn=%s,%s' % (cn, testbase) + mod = [(ldap.MOD_ADD, 'description', ensure_bytes(desc))] + server.modify_s(dn, mod) + time.sleep(2) + + +def del_entry(server, testbase, cn): + dn = 'cn=%s,%s' % (cn, testbase) + server.delete_s(dn) + time.sleep(2) + + +def _disable_nunc_stans(server): + server.config.set('nsslapd-enable-nunc-stans', 'off') + + +def _enable_spec_logging(server): + server.config.replace_many(('nsslapd-accesslog-level', '260'), + ('nsslapd-errorlog-level', str(8192 + 65536)), + ('nsslapd-plugin-logging', 'on'), + ('nsslapd-auditlog-logging-enabled', 'on')) + + +def create_backend(s1, s2, beSuffix, beName): + s1.mappingtree.create(beSuffix, beName) + s1.backend.create(beSuffix, {BACKEND_NAME: beName}) + s2.mappingtree.create(beSuffix, beName) + s2.backend.create(beSuffix, {BACKEND_NAME: beName}) + + +def replicate_backend(s1, s2, beSuffix): + repl = ReplicationManager(beSuffix) + repl.create_first_master(s1) + repl.join_master(s1, s2) + repl.ensure_agreement(s1, s2) + repl.ensure_agreement(s2, s2) + # agreement m2_m1_agmt is not needed... :p + # + + +def check_group_mods(server1, server2, group, testbase): + # add members to group + add_multi_member(server1, group, 1, [1,2,3,4,5], testbase, sleep=False) + add_multi_member(server1, group, 2, [3,4,5], testbase, sleep=False) + add_multi_member(server1, group, 3, [0], testbase, sleep=False) + add_multi_member(server1, group, 4, [1,3,5], testbase, sleep=False) + add_multi_member(server1, group, 5, [2,0], testbase, sleep=False) + add_multi_member(server1, group, 6, [2,3,4], testbase, sleep=False) + # check that replication is working + # for main backend and some member backends + _wait_for_sync(server1, server2, testbase, None) + for i in range(6): + be = "be_%d" % i + _wait_for_sync(server1, server2, 'ou=%s,dc=test,dc=com' % be, None) + + +def check_multi_group_mods(server1, server2, group1, group2, testbase): + # add members to group + add_multi_member(server2, group1, 1, [1,2,3,4,5], testbase, sleep=False) + add_multi_member(server1, group2, 1, [1,2,3,4,5], testbase, sleep=False) + add_multi_member(server2, group1, 2, [3,4,5], testbase, sleep=False) + add_multi_member(server1, group2, 2, [3,4,5], testbase, sleep=False) + add_multi_member(server2, group1, 3, [0], testbase, sleep=False) + add_multi_member(server1, group2, 3, [0], testbase, sleep=False) + add_multi_member(server2, group1, 4, [1,3,5], testbase, sleep=False) + add_multi_member(server1, group2, 4, [1,3,5], testbase, sleep=False) + add_multi_member(server2, group1, 5, [2,0], testbase, sleep=False) + add_multi_member(server1, group2, 5, [2,0], testbase, sleep=False) + add_multi_member(server2, group1, 6, [2,3,4], testbase, sleep=False) + add_multi_member(server1, group2, 6, [2,3,4], testbase, sleep=False) + # check that replication is working + # for main backend and some member backends + _wait_for_sync(server1, server2, testbase, None) + for i in range(6): + be = "be_%d" % i + _wait_for_sync(server1, server2, 'ou=%s,dc=test,dc=com' % be, None) + + +def test_ticket49287(topology_m2): + """ + test case for memberof and conflict entries + + """ + + # return + M1 = topology_m2.ms["master1"] + M2 = topology_m2.ms["master2"] + + config_memberof(M1) + config_memberof(M2) + + _enable_spec_logging(M1) + _enable_spec_logging(M2) + + _disable_nunc_stans(M1) + _disable_nunc_stans(M2) + + M1.restart(timeout=10) + M2.restart(timeout=10) + + testbase = 'dc=test,dc=com' + bename = 'test' + create_backend(M1, M2, testbase, bename) + add_dc(M1, testbase) + add_ou(M1, 'ou=groups,%s' % testbase) + replicate_backend(M1, M2, testbase) + + peoplebase = 'ou=people,dc=test,dc=com' + peoplebe = 'people' + create_backend(M1, M2, peoplebase, peoplebe) + add_ou(M1, peoplebase) + replicate_backend(M1, M2, peoplebase) + + for i in range(10): + cn = 'a%d' % i + add_user(M1, cn, peoplebase, 'add on m1', sleep=False) + time.sleep(2) + add_group(M1, testbase, 1) + for i in range(10): + cn = 'a%d' % i + add_member(M1, 'g1', cn, testbase, sleep=False) + cn = 'b%d' % i + add_user(M1, cn, peoplebase, 'add on m1', sleep=False) + time.sleep(2) + + _wait_for_sync(M1, M2, testbase, None) + _wait_for_sync(M1, M2, peoplebase, None) + + # test group with members in multiple backends + for i in range(7): + be = "be_%d" % i + _add_repl_backend(M1, M2, be) + + # add entries akllowing meberof + for i in range(1, 7): + be = "be_%d" % i + for i in range(10): + cn = 'a%d' % i + add_user(M1, cn, 'ou=%s,dc=test,dc=com' % be, 'add on m1', sleep=False) + # add entries not allowing memberof + be = 'be_0' + for i in range(10): + cn = 'a%d' % i + add_person(M1, cn, 'ou=%s,dc=test,dc=com' % be, 'add on m1', sleep=False) + + _disable_auto_oc_memberof(M1) + _disable_auto_oc_memberof(M2) + add_group(M1, testbase, 2) + check_group_mods(M1, M2, 'g2', testbase) + + _enable_auto_oc_memberof(M1) + add_group(M1, testbase, 3) + check_group_mods(M1, M2, 'g3', testbase) + + _enable_auto_oc_memberof(M2) + add_group(M1, testbase, 4) + check_group_mods(M1, M2, 'g4', testbase) + + add_group(M1, testbase, 5) + add_group(M1, testbase, 6) + check_multi_group_mods(M1, M2, 'g5', 'g6', testbase) + + +if __name__ == '__main__': + # Run isolated + # -s for DEBUG mode + CURRENT_FILE = os.path.realpath(__file__) + pytest.main("-s %s" % CURRENT_FILE) diff --git a/dirsrvtests/tests/tickets/ticket49290_test.py b/dirsrvtests/tests/tickets/ticket49290_test.py new file mode 100644 index 0000000..fe47d18 --- /dev/null +++ b/dirsrvtests/tests/tickets/ticket49290_test.py @@ -0,0 +1,68 @@ +# --- BEGIN COPYRIGHT BLOCK --- +# Copyright (C) 2017 Red Hat, Inc. +# All rights reserved. +# +# License: GPL (version 3 or any later version). +# See LICENSE for details. +# --- END COPYRIGHT BLOCK --- +# + +import pytest +import ldap + +from lib389.topologies import topology_st +from lib389._constants import DEFAULT_SUFFIX, DEFAULT_BENAME + +from lib389.backend import Backends + +pytestmark = pytest.mark.tier2 + +def test_49290_range_unindexed_notes(topology_st): + """ + Ticket 49290 had a small collection of issues - the primary issue is + that range requests on an attribute that is unindexed was not reporting + notes=U. This asserts that: + + * When unindexed, the attr shows notes=U + * when indexed, the attr does not + """ + + # First, assert that modifyTimestamp does not have an index. If it does, + # delete it. + topology_st.standalone.config.set('nsslapd-accesslog-logbuffering', 'off') + backends = Backends(topology_st.standalone) + backend = backends.get(DEFAULT_BENAME) + indexes = backend.get_indexes() + + for i in indexes.list(): + i_cn = i.get_attr_val_utf8('cn') + if i_cn.lower() == 'modifytimestamp': + i.delete() + topology_st.standalone.restart() + + # Now restart the server, and perform a modifyTimestamp range operation. + # in access, we should see notes=U (or notes=A) + results = topology_st.standalone.search_s(DEFAULT_SUFFIX, ldap.SCOPE_SUBTREE, '(modifyTimestamp>=0)', ['nsUniqueId',]) + access_lines_unindexed = topology_st.standalone.ds_access_log.match('.*notes=U.*') + assert len(access_lines_unindexed) == 1 + + # Now add the modifyTimestamp index and run db2index. This will restart + # the server + indexes.create(properties={ + 'cn': 'modifytimestamp', + 'nsSystemIndex': 'false', + 'nsIndexType' : 'eq', + }) + topology_st.standalone.stop() + assert topology_st.standalone.db2index(DEFAULT_BENAME, attrs=['modifytimestamp'] ) + topology_st.standalone.start() + + # Now run the modifyTimestamp range query again. Assert that there is no + # notes=U/A in the log + results = topology_st.standalone.search_s(DEFAULT_SUFFIX, ldap.SCOPE_SUBTREE, '(modifyTimestamp>=0)', ['nsUniqueId',]) + access_lines_indexed = topology_st.standalone.ds_access_log.match('.*notes=U.*') + # Remove the old lines too. + access_lines_final = set(access_lines_unindexed) - set(access_lines_indexed) + # Make sure we have no unindexed notes in the log. + assert len(access_lines_final) == 0 + diff --git a/dirsrvtests/tests/tickets/ticket49303_test.py b/dirsrvtests/tests/tickets/ticket49303_test.py new file mode 100644 index 0000000..2ee7eb5 --- /dev/null +++ b/dirsrvtests/tests/tickets/ticket49303_test.py @@ -0,0 +1,113 @@ +# --- BEGIN COPYRIGHT BLOCK --- +# Copyright (C) 2017 Red Hat, Inc. +# All rights reserved. +# +# License: GPL (version 3 or any later version). +# See LICENSE for details. +# --- END COPYRIGHT BLOCK --- +# +import time +import logging +import os +import subprocess +import pytest +from lib389.topologies import topology_st as topo +from lib389.nss_ssl import NssSsl + +from lib389._constants import SECUREPORT_STANDALONE1, HOST_STANDALONE1 + +pytestmark = pytest.mark.tier2 + +DEBUGGING = os.getenv("DEBUGGING", default=False) +if DEBUGGING: + logging.getLogger(__name__).setLevel(logging.DEBUG) +else: + logging.getLogger(__name__).setLevel(logging.INFO) +log = logging.getLogger(__name__) + + +def try_reneg(host, port): + """ + Connect to the specified host and port with openssl, and attempt to + initiate a renegotiation. Returns true if successful, false if not. + """ + + cmd = [ + '/usr/bin/openssl', + 's_client', + '-connect', + '%s:%d' % (host, port), + ] + + try: + proc = subprocess.Popen(cmd, stdout=subprocess.PIPE, + stdin=subprocess.PIPE, + stderr=subprocess.PIPE) + except ValueError as e: + log.info("openssl failed: %s", e) + proc.kill() + + # This 'R' command is intercepted by openssl and triggers a renegotiation + proc.communicate(b'R\n') + + # We rely on openssl returning 0 if no errors occured, and 1 if any did + # (for example, the server rejecting renegotiation and terminating the + # connection) + return proc.returncode == 0 + + +def enable_ssl(server, ldapsport): + server.stop() + nss_ssl = NssSsl(dbpath=server.get_cert_dir()) + nss_ssl.reinit() + nss_ssl.create_rsa_ca() + nss_ssl.create_rsa_key_and_cert() + server.start() + server.config.set('nsslapd-secureport', '%s' % ldapsport) + server.config.set('nsslapd-security', 'on') + server.sslport = SECUREPORT_STANDALONE1 + server.restart() + + +def set_reneg(server, state): + server.encryption.set('nsTLSAllowClientRenegotiation', state) + time.sleep(1) + server.restart() + + +def test_ticket49303(topo): + """ + Test the nsTLSAllowClientRenegotiation setting. + """ + sslport = SECUREPORT_STANDALONE1 + + log.info("Ticket 49303 - Allow disabling of SSL renegotiation") + + # No value set, defaults to reneg allowed + enable_ssl(topo.standalone, sslport) + assert try_reneg(HOST_STANDALONE1, sslport) is True + log.info("Renegotiation allowed by default - OK") + + # Turn reneg off + set_reneg(topo.standalone, 'off') + assert try_reneg(HOST_STANDALONE1, sslport) is False + log.info("Renegotiation disallowed - OK") + + # Explicitly enable + set_reneg(topo.standalone, 'on') + assert try_reneg(HOST_STANDALONE1, sslport) is True + log.info("Renegotiation explicitly allowed - OK") + + # Set to an invalid value, defaults to allowed + set_reneg(topo.standalone, 'invalid') + assert try_reneg(HOST_STANDALONE1, sslport) is True + log.info("Renegotiation allowed when option is invalid - OK") + + log.info("Ticket 49303 - PASSED") + + +if __name__ == '__main__': + # Run isolated + # -s for DEBUG mode + CURRENT_FILE = os.path.realpath(__file__) + pytest.main("-s %s" % CURRENT_FILE) diff --git a/dirsrvtests/tests/tickets/ticket49386_test.py b/dirsrvtests/tests/tickets/ticket49386_test.py new file mode 100644 index 0000000..b416e9e --- /dev/null +++ b/dirsrvtests/tests/tickets/ticket49386_test.py @@ -0,0 +1,151 @@ +import logging +import pytest +import os +import ldap +import time +from lib389.utils import * +from lib389.topologies import topology_st as topo +from lib389._constants import * +from lib389.config import Config +from lib389 import Entry + +pytestmark = [pytest.mark.tier2, + pytest.mark.skipif(ds_is_older('1.3.7'), reason="Not implemented")] + +USER_CN='user_' +GROUP_CN='group_' + +DEBUGGING = os.getenv("DEBUGGING", default=False) +if DEBUGGING: + logging.getLogger(__name__).setLevel(logging.DEBUG) +else: + logging.getLogger(__name__).setLevel(logging.INFO) +log = logging.getLogger(__name__) + +def add_user(server, no, desc='dummy', sleep=True): + cn = '%s%d' % (USER_CN, no) + dn = 'cn=%s,ou=people,%s' % (cn, SUFFIX) + log.fatal('Adding user (%s): ' % dn) + server.add_s(Entry((dn, {'objectclass': ['top', 'person', 'inetuser'], + 'sn': ['_%s' % cn], + 'description': [desc]}))) + if sleep: + time.sleep(2) + +def add_group(server, nr, sleep=True): + cn = '%s%d' % (GROUP_CN, nr) + dn = 'cn=%s,ou=groups,%s' % (cn, SUFFIX) + server.add_s(Entry((dn, {'objectclass': ['top', 'groupofnames'], + 'description': 'group %d' % nr}))) + if sleep: + time.sleep(2) + +def update_member(server, member_dn, group_dn, op, sleep=True): + mod = [(op, 'member', ensure_bytes(member_dn))] + server.modify_s(group_dn, mod) + if sleep: + time.sleep(2) + +def config_memberof(server): + + server.plugins.enable(name=PLUGIN_MEMBER_OF) + MEMBEROF_PLUGIN_DN = ('cn=' + PLUGIN_MEMBER_OF + ',cn=plugins,cn=config') + server.modify_s(MEMBEROF_PLUGIN_DN, [(ldap.MOD_REPLACE, + 'memberOfAllBackends', + b'on'), + (ldap.MOD_REPLACE, 'memberOfAutoAddOC', b'nsMemberOf')]) + + +def _find_memberof(server, member_dn, group_dn, find_result=True): + ent = server.getEntry(member_dn, ldap.SCOPE_BASE, "(objectclass=*)", ['memberof']) + found = False + if ent.hasAttr('memberof'): + + for val in ent.getValues('memberof'): + server.log.info("!!!!!!! %s: memberof->%s" % (member_dn, val)) + server.log.info("!!!!!!! %s" % (val)) + server.log.info("!!!!!!! %s" % (group_dn)) + if val.lower() == ensure_bytes(group_dn.lower()): + found = True + break + + if find_result: + assert (found) + else: + assert (not found) + +def test_ticket49386(topo): + """Specify a test case purpose or name here + + :id: ceb1e2b7-42cb-49f9-8ddd-bc752aa4a589 + :setup: Fill in set up configuration here + :steps: + 1. Configure memberof + 2. Add users (user_1) + 3. Add groups (group_1) + 4. Make user_1 member of group_1 + 5. Check that user_1 has the memberof attribute to group_1 + 6. Enable plugin log to capture memberof modrdn callback notification + 7. Rename group_1 in itself + 8. Check that the operation was skipped by memberof + + :expectedresults: + 1. memberof modrdn callbackk to log notfication that the update is skipped + """ + + S1 = topo.standalone + + # Step 1 + config_memberof(S1) + S1.restart() + + # Step 2 + for i in range(10): + add_user(S1, i, desc='add on S1') + + # Step 3 + for i in range(3): + add_group(S1, i) + + # Step 4 + member_dn = 'cn=%s%d,ou=people,%s' % (USER_CN, 1, SUFFIX) + group_parent_dn = 'ou=groups,%s' % (SUFFIX) + group_rdn = 'cn=%s%d' % (GROUP_CN, 1) + group_dn = '%s,%s' % (group_rdn, group_parent_dn) + update_member(S1, member_dn, group_dn, ldap.MOD_ADD, sleep=False) + + # Step 5 + _find_memberof(S1, member_dn, group_dn, find_result=True) + + # Step 6 + S1.config.loglevel(vals=[LOG_PLUGIN, LOG_DEFAULT], service='error') + + # Step 7 + S1.rename_s(group_dn, group_rdn, newsuperior=group_parent_dn, delold=0) + + # Step 8 + time.sleep(2) # should not be useful.. + found = False + for i in S1.ds_error_log.match('.*Skip modrdn operation because src/dst identical.*'): + log.info('memberof log found: %s' % i) + found = True + assert(found) + + # If you need any test suite initialization, + # please, write additional fixture for that (including finalizer). + # Topology for suites are predefined in lib389/topologies.py. + + # If you need host, port or any other data about instance, + # Please, use the instance object attributes for that (for example, topo.ms["master1"].serverid) + + if DEBUGGING: + # Add debugging steps(if any)... + pass + + +if __name__ == '__main__': + # Run isolated + # -s for DEBUG mode + CURRENT_FILE = os.path.realpath(__file__) + pytest.main("-s %s" % CURRENT_FILE) + diff --git a/dirsrvtests/tests/tickets/ticket49412_test.py b/dirsrvtests/tests/tickets/ticket49412_test.py new file mode 100644 index 0000000..c529913 --- /dev/null +++ b/dirsrvtests/tests/tickets/ticket49412_test.py @@ -0,0 +1,67 @@ +import logging +import pytest +import os +import ldap +import time +from lib389._constants import * +from lib389.topologies import topology_m1c1 as topo +from lib389._constants import * +from lib389 import Entry + +pytestmark = pytest.mark.tier2 + +DEBUGGING = os.getenv("DEBUGGING", default=False) +if DEBUGGING: + logging.getLogger(__name__).setLevel(logging.DEBUG) +else: + logging.getLogger(__name__).setLevel(logging.INFO) +log = logging.getLogger(__name__) +CHANGELOG = 'cn=changelog5,cn=config' +MAXAGE_ATTR = 'nsslapd-changelogmaxage' +TRIMINTERVAL = 'nsslapd-changelogtrim-interval' + + + +def test_ticket49412(topo): + """Specify a test case purpose or name here + + :id: 4c7681ff-0511-4256-9589-bdcad84c13e6 + :setup: Fill in set up configuration here + :steps: + 1. Fill in test case steps here + 2. And indent them like this (RST format requirement) + :expectedresults: + 1. Fill in the result that is expected + 2. For each test step + """ + + M1 = topo.ms["master1"] + + # wrong call with invalid value (should be str(60) + # that create replace with NULL value + # it should fail with UNWILLING_TO_PERFORM + try: + M1.modify_s(CHANGELOG, [(ldap.MOD_REPLACE, MAXAGE_ATTR, 60), + (ldap.MOD_REPLACE, TRIMINTERVAL, 10)]) + assert(False) + except ldap.UNWILLING_TO_PERFORM: + pass + + # If you need any test suite initialization, + # please, write additional fixture for that (including finalizer). + # Topology for suites are predefined in lib389/topologies.py. + + # If you need host, port or any other data about instance, + # Please, use the instance object attributes for that (for example, topo.ms["master1"].serverid) + + if DEBUGGING: + # Add debugging steps(if any)... + pass + + +if __name__ == '__main__': + # Run isolated + # -s for DEBUG mode + CURRENT_FILE = os.path.realpath(__file__) + pytest.main("-s %s" % CURRENT_FILE) + diff --git a/dirsrvtests/tests/tickets/ticket49441_test.py b/dirsrvtests/tests/tickets/ticket49441_test.py new file mode 100644 index 0000000..7beda62 --- /dev/null +++ b/dirsrvtests/tests/tickets/ticket49441_test.py @@ -0,0 +1,76 @@ +import logging +import pytest +import os +import ldap +from lib389._constants import * +from lib389.topologies import topology_st as topo +from lib389.utils import * + +pytestmark = pytest.mark.tier2 + +DEBUGGING = os.getenv("DEBUGGING", default=False) +if DEBUGGING: + logging.getLogger(__name__).setLevel(logging.DEBUG) +else: + logging.getLogger(__name__).setLevel(logging.INFO) +log = logging.getLogger(__name__) + + +def test_ticket49441(topo): + """Import ldif with large indexed binary attributes, the server should not + crash + + :id: 4e5df145-cbd1-4955-8f77-6a7eaa14beba + :setup: standalone topology + :steps: + 1. Add indexes for binary attributes + 2. Perform online import + 3. Verify server is still running + :expectedresults: + 1. Indexes are successfully added + 2. Import succeeds + 3. Server is still running + """ + + log.info('Position ldif files, and add indexes...') + ldif_dir = topo.standalone.get_ldif_dir() + "binary.ldif" + ldif_file = (topo.standalone.getDir(__file__, DATA_DIR) + + "ticket49441/binary.ldif") + shutil.copyfile(ldif_file, ldif_dir) + args = {INDEX_TYPE: ['eq', 'pres']} + for attr in ('usercertificate', 'authorityrevocationlist', + 'certificaterevocationlist', 'crosscertificatepair', + 'cacertificate'): + try: + topo.standalone.index.create(suffix=DEFAULT_SUFFIX, + be_name='userroot', + attr=attr, args=args) + except ldap.LDAPError as e: + log.fatal("Failed to add index '{}' error: {}".format(attr, str(e))) + raise e + + log.info('Import LDIF with large indexed binary attributes...') + try: + topo.standalone.tasks.importLDIF(suffix=DEFAULT_SUFFIX, + input_file=ldif_dir, + args={TASK_WAIT: True}) + except: + log.fatal('Import failed!') + assert False + + log.info('Verify server is still running...') + try: + topo.standalone.search_s("", ldap.SCOPE_BASE, "objectclass=*") + except ldap.LDAPError as e: + log.fatal('Server is not alive: ' + str(e)) + assert False + + log.info('Test PASSED') + + +if __name__ == '__main__': + # Run isolated + # -s for DEBUG mode + CURRENT_FILE = os.path.realpath(__file__) + pytest.main("-s %s" % CURRENT_FILE) + diff --git a/dirsrvtests/tests/tickets/ticket49460_test.py b/dirsrvtests/tests/tickets/ticket49460_test.py new file mode 100644 index 0000000..b642663 --- /dev/null +++ b/dirsrvtests/tests/tickets/ticket49460_test.py @@ -0,0 +1,117 @@ +import time +import ldap +import logging +import pytest +import os +import re +from lib389._constants import * +from lib389.config import Config +from lib389 import DirSrv, Entry +from lib389.topologies import topology_m3 as topo + +pytestmark = pytest.mark.tier2 + +DEBUGGING = os.getenv("DEBUGGING", default=False) +if DEBUGGING: + logging.getLogger(__name__).setLevel(logging.DEBUG) +else: + logging.getLogger(__name__).setLevel(logging.INFO) +log = logging.getLogger(__name__) + +USER_CN="user" + +def add_user(server, no, desc='dummy', sleep=True): + cn = '%s%d' % (USER_CN, no) + dn = 'cn=%s,ou=people,%s' % (cn, SUFFIX) + log.fatal('Adding user (%s): ' % dn) + server.add_s(Entry((dn, {'objectclass': ['top', 'person', 'inetuser', 'userSecurityInformation'], + 'sn': ['_%s' % cn], + 'description': [desc]}))) + time.sleep(1) + +def check_user(server, no, timeout=10): + + cn = '%s%d' % (USER_CN, no) + dn = 'cn=%s,ou=people,%s' % (cn, SUFFIX) + found = False + cpt = 0 + while cpt < timeout: + try: + server.getEntry(dn, ldap.SCOPE_BASE, "(objectclass=*)") + found = True + break + except ldap.NO_SUCH_OBJECT: + time.sleep(1) + cpt += 1 + return found + +def pattern_errorlog(server, log_pattern): + file_obj = open(server.errlog, "r") + + found = None + # Use a while true iteration because 'for line in file: hit a + while True: + line = file_obj.readline() + found = log_pattern.search(line) + if ((line == '') or (found)): + break + + return found + +def test_ticket_49460(topo): + """Specify a test case purpose or name here + + :id: d1aa2e8b-e6ab-4fc6-9c63-c6f622544f2d + :setup: Fill in set up configuration here + :steps: + 1. Enable replication logging + 2. Do few updates to generatat RUV update + :expectedresults: + 1. No report of failure when the RUV is updated + """ + + M1 = topo.ms["master1"] + M2 = topo.ms["master2"] + M3 = topo.ms["master3"] + + for i in (M1, M2, M3): + i.config.loglevel(vals=[256 + 4], service='access') + i.config.loglevel(vals=[LOG_REPLICA, LOG_DEFAULT], service='error') + + add_user(M1, 11, desc="add to M1") + add_user(M2, 21, desc="add to M2") + add_user(M3, 31, desc="add to M3") + + for i in (M1, M2, M3): + assert check_user(i, 11) + assert check_user(i, 21) + assert check_user(i, 31) + + time.sleep(10) + + #M1.tasks.cleanAllRUV(suffix=SUFFIX, replicaid='3', + # force=False, args={TASK_WAIT: True}) + #time.sleep(10) + regex = re.compile(".*Failed to update RUV tombstone.*LDAP error - 0") + assert not pattern_errorlog(M1, regex) + assert not pattern_errorlog(M2, regex) + assert not pattern_errorlog(M3, regex) + + # If you need any test suite initialization, + # please, write additional fixture for that (including finalizer). + # Topology for suites are predefined in lib389/topologies.py. + + # If you need host, port or any other data about instance, + # Please, use the instance object attributes for that (for example, topo.ms["master1"].serverid) + + if DEBUGGING: + # Add debugging steps(if any)... + pass + + +if __name__ == '__main__': + # Run isolated + # -s for DEBUG mode + CURRENT_FILE = os.path.realpath(__file__) + pytest.main("-s %s" % CURRENT_FILE) + diff --git a/dirsrvtests/tests/tickets/ticket49463_test.py b/dirsrvtests/tests/tickets/ticket49463_test.py new file mode 100644 index 0000000..8fc114e --- /dev/null +++ b/dirsrvtests/tests/tickets/ticket49463_test.py @@ -0,0 +1,228 @@ +import time +import ldap +import logging +import pytest +import os +import re +from lib389._constants import DEFAULT_SUFFIX, SUFFIX, LOG_REPLICA, LOG_DEFAULT +from lib389.config import Config +from lib389 import DirSrv, Entry +from lib389.topologies import topology_m4 as topo +from lib389.replica import Replicas, ReplicationManager +from lib389.idm.user import UserAccounts, UserAccount +from lib389.tasks import * +from lib389.utils import * + +pytestmark = pytest.mark.tier2 + +USER_CN = "test_user" + + +def add_user(server, no, desc='dummy'): + user = UserAccounts(server, DEFAULT_SUFFIX) + users = user.create_test_user(uid=no) + users.add('description', [desc]) + users.add('objectclass', 'userSecurityInformation') + + +def pattern_errorlog(server, log_pattern): + for i in range(10): + time.sleep(5) + found = server.ds_error_log.match(log_pattern) + if found == '' or found: + return found + break + + +def fractional_server_to_replica(server, replica): + repl = ReplicationManager(DEFAULT_SUFFIX) + repl.ensure_agreement(server, replica) + replica_server = Replicas(server).get(DEFAULT_SUFFIX) + agmt_server = replica_server.get_agreements().list()[0] + agmt_server.replace_many( + ('nsDS5ReplicatedAttributeListTotal', '(objectclass=*) $ EXCLUDE telephoneNumber'), + ('nsDS5ReplicatedAttributeList', '(objectclass=*) $ EXCLUDE telephoneNumber'), + ('nsds5ReplicaStripAttrs', 'modifiersname modifytimestamp'), + ) + + +def count_pattern_accesslog(server, log_pattern): + count = 0 + server.config.set('nsslapd-accesslog-logbuffering', 'off') + if server.ds_access_log.match(log_pattern): + count = count + 1 + + return count + + +def test_ticket_49463(topo): + """Specify a test case purpose or name here + + :id: 2a68e8be-387d-4ac7-9452-1439e8483c13 + :setup: Fill in set up configuration here + :steps: + 1. Enable fractional replication + 2. Enable replication logging + 3. Check that replication is working fine + 4. Generate skipped updates to create keep alive entries + 5. Remove M3 from the topology + 6. issue cleanAllRuv FORCE that will run on M1 then propagated M2 and M4 + 7. Check that Number DEL keep alive '3' is <= 1 + 8. Check M1 is the originator of cleanAllRuv and M2/M4 the propagated ones + 9. Check replication M1,M2 and M4 can recover + 10. Remove M4 from the topology + 11. Issue cleanAllRuv not force while M2 is stopped (that hangs the cleanAllRuv) + 12. Check that nsds5ReplicaCleanRUV is correctly encoded on M1 (last value: 1) + 13. Check that nsds5ReplicaCleanRUV encoding survives M1 restart + 14. Check that nsds5ReplicaCleanRUV encoding is valid on M2 (last value: 0) + 15. Check that (for M4 cleanAllRUV) M1 is Originator and M2 propagation + :expectedresults: + 1. No report of failure when the RUV is updated + """ + + # Step 1 - Configure fractional (skip telephonenumber) replication + M1 = topo.ms["master1"] + M2 = topo.ms["master2"] + M3 = topo.ms["master3"] + M4 = topo.ms["master4"] + repl = ReplicationManager(DEFAULT_SUFFIX) + fractional_server_to_replica(M1, M2) + fractional_server_to_replica(M1, M3) + fractional_server_to_replica(M1, M4) + + fractional_server_to_replica(M2, M1) + fractional_server_to_replica(M2, M3) + fractional_server_to_replica(M2, M4) + + fractional_server_to_replica(M3, M1) + fractional_server_to_replica(M3, M2) + fractional_server_to_replica(M3, M4) + + fractional_server_to_replica(M4, M1) + fractional_server_to_replica(M4, M2) + fractional_server_to_replica(M4, M3) + + # Step 2 - enable internal op logging and replication debug + for i in (M1, M2, M3, M4): + i.config.loglevel(vals=[256 + 4], service='access') + i.config.loglevel(vals=[LOG_REPLICA, LOG_DEFAULT], service='error') + + # Step 3 - Check that replication is working fine + add_user(M1, 11, desc="add to M1") + add_user(M2, 21, desc="add to M2") + add_user(M3, 31, desc="add to M3") + add_user(M4, 41, desc="add to M4") + + for i in (M1, M2, M3, M4): + for j in (M1, M2, M3, M4): + if i == j: + continue + repl.wait_for_replication(i, j) + + # Step 4 - Generate skipped updates to create keep alive entries + for i in (M1, M2, M3, M4): + cn = '%s_%d' % (USER_CN, 11) + dn = 'uid=%s,ou=People,%s' % (cn, SUFFIX) + users = UserAccount(i, dn) + for j in range(110): + users.set('telephoneNumber', str(j)) + + # Step 5 - Remove M3 from the topology + M3.stop() + M1.agreement.delete(suffix=SUFFIX, consumer_host=M3.host, consumer_port=M3.port) + M2.agreement.delete(suffix=SUFFIX, consumer_host=M3.host, consumer_port=M3.port) + M4.agreement.delete(suffix=SUFFIX, consumer_host=M3.host, consumer_port=M3.port) + + # Step 6 - Then issue cleanAllRuv FORCE that will run on M1, M2 and M4 + M1.tasks.cleanAllRUV(suffix=SUFFIX, replicaid='3', + force=True, args={TASK_WAIT: True}) + + # Step 7 - Count the number of received DEL of the keep alive 3 + for i in (M1, M2, M4): + i.restart() + regex = re.compile(".*DEL dn=.cn=repl keep alive 3.*") + for i in (M1, M2, M4): + count = count_pattern_accesslog(M1, regex) + log.debug("count on %s = %d" % (i, count)) + + # check that DEL is replicated once (If DEL is kept in the fix) + # check that DEL is is not replicated (If DEL is finally no long done in the fix) + assert ((count == 1) or (count == 0)) + + # Step 8 - Check that M1 is Originator of cleanAllRuv and M2, M4 propagation + regex = re.compile(".*Original task deletes Keep alive entry .3.*") + assert pattern_errorlog(M1, regex) + + regex = re.compile(".*Propagated task does not delete Keep alive entry .3.*") + assert pattern_errorlog(M2, regex) + assert pattern_errorlog(M4, regex) + + # Step 9 - Check replication M1,M2 and M4 can recover + add_user(M1, 12, desc="add to M1") + add_user(M2, 22, desc="add to M2") + for i in (M1, M2, M4): + for j in (M1, M2, M4): + if i == j: + continue + repl.wait_for_replication(i, j) + + # Step 10 - Remove M4 from the topology + M4.stop() + M1.agreement.delete(suffix=SUFFIX, consumer_host=M4.host, consumer_port=M4.port) + M2.agreement.delete(suffix=SUFFIX, consumer_host=M4.host, consumer_port=M4.port) + + # Step 11 - Issue cleanAllRuv not force while M2 is stopped (that hangs the cleanAllRuv) + M2.stop() + M1.tasks.cleanAllRUV(suffix=SUFFIX, replicaid='4', + force=False, args={TASK_WAIT: False}) + + # Step 12 + # CleanAllRuv is hanging waiting for M2 to restart + # Check that nsds5ReplicaCleanRUV is correctly encoded on M1 + replicas = Replicas(M1) + replica = replicas.list()[0] + time.sleep(0.5) + replica.present('nsds5ReplicaCleanRUV') + log.info("M1: nsds5ReplicaCleanRUV=%s" % replica.get_attr_val_utf8('nsds5replicacleanruv')) + regex = re.compile("^4:.*:no:1$") + assert regex.match(replica.get_attr_val_utf8('nsds5replicacleanruv')) + + # Step 13 + # Check that it encoding survives restart + M1.restart() + assert replica.present('nsds5ReplicaCleanRUV') + assert regex.match(replica.get_attr_val_utf8('nsds5replicacleanruv')) + + # Step 14 - Check that nsds5ReplicaCleanRUV encoding is valid on M2 + M1.stop() + M2.start() + replicas = Replicas(M2) + replica = replicas.list()[0] + M1.start() + time.sleep(0.5) + if replica.present('nsds5ReplicaCleanRUV'): + log.info("M2: nsds5ReplicaCleanRUV=%s" % replica.get_attr_val_utf8('nsds5replicacleanruv')) + regex = re.compile("^4:.*:no:0$") + assert regex.match(replica.get_attr_val_utf8('nsds5replicacleanruv')) + + # time to run cleanAllRuv + for i in (M1, M2): + for j in (M1, M2): + if i == j: + continue + repl.wait_for_replication(i, j) + + # Step 15 - Check that M1 is Originator of cleanAllRuv and M2 propagation + regex = re.compile(".*Original task deletes Keep alive entry .4.*") + assert pattern_errorlog(M1, regex) + + regex = re.compile(".*Propagated task does not delete Keep alive entry .4.*") + assert pattern_errorlog(M2, regex) + + +if __name__ == '__main__': + # Run isolated + # -s for DEBUG mode + CURRENT_FILE = os.path.realpath(__file__) + pytest.main("-s %s" % CURRENT_FILE) + diff --git a/dirsrvtests/tests/tickets/ticket49471_test.py b/dirsrvtests/tests/tickets/ticket49471_test.py new file mode 100644 index 0000000..dcab15f --- /dev/null +++ b/dirsrvtests/tests/tickets/ticket49471_test.py @@ -0,0 +1,81 @@ +import logging +import pytest +import os +import time +import ldap +from lib389._constants import * +from lib389.topologies import topology_st as topo +from lib389 import Entry + +pytestmark = pytest.mark.tier2 + +DEBUGGING = os.getenv("DEBUGGING", default=False) +if DEBUGGING: + logging.getLogger(__name__).setLevel(logging.DEBUG) +else: + logging.getLogger(__name__).setLevel(logging.INFO) +log = logging.getLogger(__name__) + + +USER_CN='user_' +def _user_get_dn(no): + cn = '%s%d' % (USER_CN, no) + dn = 'cn=%s,ou=people,%s' % (cn, SUFFIX) + return (cn, dn) + +def add_user(server, no, desc='dummy', sleep=True): + (cn, dn) = _user_get_dn(no) + log.fatal('Adding user (%s): ' % dn) + server.add_s(Entry((dn, {'objectclass': ['top', 'person', 'inetuser', 'userSecurityInformation'], + 'cn': [cn], + 'description': [desc], + 'sn': [cn], + 'description': ['add on that host']}))) + if sleep: + time.sleep(2) + +def test_ticket49471(topo): + """Specify a test case purpose or name here + + :id: 457ab172-9455-4eb2-89a0-150e3de5993f + :setup: Fill in set up configuration here + :steps: + 1. Fill in test case steps here + 2. And indent them like this (RST format requirement) + :expectedresults: + 1. Fill in the result that is expected + 2. For each test step + """ + + # If you need any test suite initialization, + # please, write additional fixture for that (including finalizer). + # Topology for suites are predefined in lib389/topologies.py. + + # If you need host, port or any other data about instance, + # Please, use the instance object attributes for that (for example, topo.ms["master1"].serverid) + + S1 = topo.standalone + add_user(S1, 1) + + Filter = "(description:2.16.840.1.113730.3.3.2.1.1.6:=\*on\*)" + ents = S1.search_s(SUFFIX, ldap.SCOPE_SUBTREE, Filter) + assert len(ents) == 1 + + # + # The following is for the test 49491 + # skipped here else it crashes in ASAN + #Filter = "(description:2.16.840.1.113730.3.3.2.1.1.6:=\*host)" + #ents = S1.search_s(SUFFIX, ldap.SCOPE_SUBTREE, Filter) + #assert len(ents) == 1 + + if DEBUGGING: + # Add debugging steps(if any)... + pass + + +if __name__ == '__main__': + # Run isolated + # -s for DEBUG mode + CURRENT_FILE = os.path.realpath(__file__) + pytest.main("-s %s" % CURRENT_FILE) + diff --git a/dirsrvtests/tests/tickets/ticket49540_test.py b/dirsrvtests/tests/tickets/ticket49540_test.py new file mode 100644 index 0000000..36fd967 --- /dev/null +++ b/dirsrvtests/tests/tickets/ticket49540_test.py @@ -0,0 +1,135 @@ +import logging +import pytest +import os +import ldap +import time +import re +from lib389._constants import * +from lib389.tasks import * +from lib389.topologies import topology_st as topo +from lib389.idm.user import UserAccount, UserAccounts, TEST_USER_PROPERTIES +from lib389 import Entry + +pytestmark = pytest.mark.tier2 + +DEBUGGING = os.getenv("DEBUGGING", default=False) +if DEBUGGING: + logging.getLogger(__name__).setLevel(logging.DEBUG) +else: + logging.getLogger(__name__).setLevel(logging.INFO) +log = logging.getLogger(__name__) + +HOMEDIRECTORY_INDEX = 'cn=homeDirectory,cn=index,cn=userRoot,cn=ldbm database,cn=plugins,cn=config' +HOMEDIRECTORY_CN = "homedirectory" +MATCHINGRULE = 'nsMatchingRule' +USER_CN = 'user_' + +def create_index_entry(topo): + log.info("\n\nindex homeDirectory") + try: + ent = topo.getEntry(HOMEDIRECTORY_INDEX, ldap.SCOPE_BASE) + except ldap.NO_SUCH_OBJECT: + topo.add_s(Entry((HOMEDIRECTORY_INDEX, { + 'objectclass': "top nsIndex".split(), + 'cn': HOMEDIRECTORY_CN, + 'nsSystemIndex': 'false', + MATCHINGRULE: ['caseIgnoreIA5Match', 'caseExactIA5Match' ], + 'nsIndexType': ['eq', 'sub', 'pres']}))) + + +def provision_users(topo): + test_users = [] + homeValue = b'x' * (32 * 1024) # just to slow down indexing + for i in range(100): + CN = '%s%d' % (USER_CN, i) + users = UserAccounts(topo, SUFFIX) + user_props = TEST_USER_PROPERTIES.copy() + user_props.update({'uid': CN, 'cn': CN, 'sn': '_%s' % CN, HOMEDIRECTORY_CN: homeValue}) + testuser = users.create(properties=user_props) + test_users.append(testuser) + return test_users + +def start_start_status(server): + args = {TASK_WAIT: False} + indexTask = Tasks(server) + indexTask.reindex(suffix=SUFFIX, attrname='homeDirectory', args=args) + return indexTask + +def check_task_status(server, indexTask, test_entry): + finish_pattern = re.compile(".*Finished indexing.*") + mod = [(ldap.MOD_REPLACE, 'sn', b'foo')] + for i in range(10): + log.info("check_task_status =========> %d th loop" % i) + try: + ent = server.getEntry(indexTask.dn, ldap.SCOPE_BASE) + if ent.hasAttr('nsTaskStatus'): + value = str(ent.getValue('nsTaskStatus')) + finish = finish_pattern.search(value) + log.info("%s ---> %s" % (indexTask.dn, value)) + else: + finish = None + log.info("%s ---> NO STATUS" % (indexTask.dn)) + + if not finish: + # This is not yet finished try an update + try: + server.modify_s(test_entry, mod) + + # weird, may be indexing just complete + ent = server.getEntry(indexTask.dn, ldap.SCOPE_BASE, ['nsTaskStatus']) + assert (ent.hasAttr('nsTaskStatus') and regex.search(ent.getValue('nsTaskStatus'))) + log.info("Okay, it just finished so the MOD was successful") + except ldap.UNWILLING_TO_PERFORM: + log.info("=========> Great it was expected in the middle of index") + else: + # The update should be successful + server.modify_s(test_entry, mod) + + except ldap.NO_SUCH_OBJECT: + log.info("%s: no found" % (indexTask.dn)) + + time.sleep(1) + +def test_ticket49540(topo): + """Specify a test case purpose or name here + + :id: 1df16d5a-1b92-46b7-8435-876b87545748 + :setup: Standalone Instance + :steps: + 1. Create homeDirectory index (especially with substring) + 2. Creates 100 users with large homeDirectory value => long to index + 3. Start an indexing task WITHOUT waiting for its completion + 4. Monitor that until task.status = 'Finish', any update -> UNWILLING to perform + :expectedresults: + 1. Index configuration succeeds + 2. users entry are successfully created + 3. Indexing task is started + 4. If the task.status does not contain 'Finished indexing', any update should return UNWILLING_TO_PERFORM + When it contains 'Finished indexing', updates should be successful + """ + + server = topo.standalone + create_index_entry(server) + test_users = provision_users(server) + + indexTask = start_start_status(server) + check_task_status(server, indexTask, test_users[0].dn) + + # If you need any test suite initialization, + # please, write additional fixture for that (including finalizer). + # Topology for suites are predefined in lib389/topologies.py. + + # If you need host, port or any other data about instance, + # Please, use the instance object attributes for that (for example, topo.ms["master1"].serverid) + + if DEBUGGING: + # Add debugging steps(if any)... + pass + + +if __name__ == '__main__': + # Run isolated + # -s for DEBUG mode + CURRENT_FILE = os.path.realpath(__file__) + pytest.main(["-s", CURRENT_FILE]) + diff --git a/dirsrvtests/tests/tickets/ticket49623_2_test.py b/dirsrvtests/tests/tickets/ticket49623_2_test.py new file mode 100644 index 0000000..1d3167d --- /dev/null +++ b/dirsrvtests/tests/tickets/ticket49623_2_test.py @@ -0,0 +1,66 @@ +# --- BEGIN COPYRIGHT BLOCK --- +# Copyright (C) 2020 Red Hat, Inc. +# All rights reserved. +# +# License: GPL (version 3 or any later version). +# See LICENSE for details. +# --- END COPYRIGHT BLOCK --- +# +import os +import ldap +import pytest +import subprocess +from lib389.tasks import * +from lib389.utils import * +from lib389.topologies import topology_m1 +from lib389.idm.user import UserAccounts +from lib389._constants import DEFAULT_SUFFIX +from contextlib import contextmanager + +pytestmark = pytest.mark.tier1 + +logging.getLogger(__name__).setLevel(logging.DEBUG) +log = logging.getLogger(__name__) + + +@pytest.mark.ds49623 +@pytest.mark.bz1790986 +def test_modrdn_loop(topology_m1): + """Test that renaming the same entry multiple times reusing the same + RDN multiple times does not result in cenotaph error messages + + :id: 631b2be9-5c03-44c7-9853-a87c923d5b30 + + :setup: Single master instance + + :steps: 1. Add an entry with RDN start rdn + 2. Rename the entry to rdn change + 3. Rename the entry to start again + 4. Rename the entry to rdn change + 5. check for cenotaph error messages + :expectedresults: + 1. No error messages + """ + + topo = topology_m1.ms['master1'] + TEST_ENTRY_RDN_START = 'start' + TEST_ENTRY_RDN_CHANGE = 'change' + TEST_ENTRY_NAME = 'tuser' + users = UserAccounts(topo, DEFAULT_SUFFIX) + user_properties = { + 'uid': TEST_ENTRY_RDN_START, + 'cn': TEST_ENTRY_NAME, + 'sn': TEST_ENTRY_NAME, + 'uidNumber': '1001', + 'gidNumber': '2001', + 'homeDirectory': '/home/{}'.format(TEST_ENTRY_NAME) + } + + tuser = users.create(properties=user_properties) + tuser.rename('uid={}'.format(TEST_ENTRY_RDN_CHANGE), newsuperior=None, deloldrdn=True) + tuser.rename('uid={}'.format(TEST_ENTRY_RDN_START), newsuperior=None, deloldrdn=True) + tuser.rename('uid={}'.format(TEST_ENTRY_RDN_CHANGE), newsuperior=None, deloldrdn=True) + + log.info("Check the log messages for cenotaph error") + error_msg = ".*urp_fixup_add_cenotaph - failed to add cenotaph, err= 68" + assert not topo.ds_error_log.match(error_msg) diff --git a/dirsrvtests/tests/tickets/ticket49658_test.py b/dirsrvtests/tests/tickets/ticket49658_test.py new file mode 100644 index 0000000..335739f --- /dev/null +++ b/dirsrvtests/tests/tickets/ticket49658_test.py @@ -0,0 +1,4266 @@ +import logging +import pytest +import os +import ldap +import time +import sys +print(sys.path) +from lib389 import Entry +from lib389._constants import DEFAULT_SUFFIX +from lib389.idm.user import UserAccounts, TEST_USER_PROPERTIES +from lib389.topologies import topology_m3 as topo + +pytestmark = pytest.mark.tier2 + +DEBUGGING = os.getenv("DEBUGGING", default=False) +if DEBUGGING: + logging.getLogger(__name__).setLevel(logging.DEBUG) +else: + logging.getLogger(__name__).setLevel(logging.INFO) +log = logging.getLogger(__name__) + + +MAX_EMPLOYEENUMBER_USER = 20 +MAX_STANDARD_USER = 100 +MAX_USER = MAX_STANDARD_USER + MAX_EMPLOYEENUMBER_USER +EMPLOYEENUMBER_RDN_START = 0 + +USER_UID='user_' +BASE_DISTINGUISHED = 'ou=distinguished,ou=people,%s' % (DEFAULT_SUFFIX) +BASE_REGULAR = 'ou=regular,ou=people,%s' % (DEFAULT_SUFFIX) + +def _user_get_dn(no): + uid = '%s%d' % (USER_UID, no) + dn = 'uid=%s,%s' % (uid, BASE_REGULAR) + return (uid, dn) + +def add_user(server, no, init_val): + (uid, dn) = _user_get_dn(no) + log.fatal('Adding user (%s): ' % dn) + server.add_s(Entry((dn, {'objectclass': ['top', 'person', 'organizationalPerson', 'inetOrgPerson'], + 'uid': [uid], + 'sn' : [uid], + 'cn' : [uid], + 'employeeNumber': init_val}))) + return dn + +def _employeenumber_user_get_dn(no): + employeeNumber = str(no) + dn = 'employeeNumber=%s,%s' % (employeeNumber, BASE_DISTINGUISHED) + return (employeeNumber, dn) + +def add_employeenumber_user(server, no): + (uid, dn) = _employeenumber_user_get_dn(EMPLOYEENUMBER_RDN_START + no) + log.fatal('Adding user (%s): ' % dn) + server.add_s(Entry((dn, {'objectclass': ['top', 'person', 'organizationalPerson', 'inetOrgPerson'], + 'uid': [uid], + 'sn' : [uid], + 'cn' : [uid], + 'employeeNumber': str(EMPLOYEENUMBER_RDN_START + no)}))) + return dn + +def save_stuff(): + M1 = topo.ms["master1"] + M2 = topo.ms["master2"] + M3 = topo.ms["master3"] + value_11 = '11'.encode() + value_1000 = '1000'.encode() + value_13 = '13'.encode() + value_14 = '14'.encode() + + # Step 2 + test_user_dn= add_user(M3, 0, value_11) + log.info('Adding %s on M3' % test_user_dn) + M3.modify_s(test_user_dn, [(ldap.MOD_DELETE, 'employeeNumber', value_11), (ldap.MOD_ADD, 'employeeNumber', value_1000)]) + ents = M3.search_s(DEFAULT_SUFFIX, ldap.SCOPE_SUBTREE, '(employeeNumber=*)') + assert len(ents) == 1 + + + # Step 3 + # Check the entry is replicated on M1 + for j in range(30): + try: + ent = M1.getEntry(test_user_dn, ldap.SCOPE_BASE,) + if not ent.hasAttr('employeeNumber'): + # wait for the MOD + log.info('M1 waiting for employeeNumber') + time.sleep(1) + continue; + break; + except ldap.NO_SUCH_OBJECT: + time.sleep(1) + pass + time.sleep(1) + ents = M1.search_s(DEFAULT_SUFFIX, ldap.SCOPE_SUBTREE, '(employeeNumber=*)') + assert len(ents) == 1 + + # Check the entry is replicated on M2 + for j in range(30): + try: + ent = M2.getEntry(test_user_dn, ldap.SCOPE_BASE,) + if not ent.hasAttr('employeeNumber'): + # wait for the MOD + log.info('M2 waiting for employeeNumber') + time.sleep(1) + continue; + + break; + except ldap.NO_SUCH_OBJECT: + time.sleep(1) + pass + time.sleep(1) + ents = M2.search_s(DEFAULT_SUFFIX, ldap.SCOPE_SUBTREE, '(employeeNumber=*)') + assert len(ents) == 1 + +def test_ticket49658_init(topo): + """Specify a test case purpose or name here + + :id: f8d43cef-c385-46a2-b32b-fdde2114b45e + :setup: 3 Master Instances + :steps: + 1. Create 3 suppliers + 2. Create on M3 MAX_USER test entries having a single-value attribute employeeNumber=11 + and update it MOD_DEL 11 + MOD_ADD 1000 + 3. Check they are replicated on M1 and M2 + :expectedresults: + 1. Fill in the result that is expected + 2. For each test step + """ + + # If you need any test suite initialization, + # please, write additional fixture for that (including finalizer). + # Topology for suites are predefined in lib389/topologies.py. + + # If you need host, port or any other data about instance, + # Please, use the instance object attributes for that (for example, topo.ms["master1"].serverid) + + if DEBUGGING: + # Add debugging steps(if any)... + pass + M1 = topo.ms["master1"] + M2 = topo.ms["master2"] + M3 = topo.ms["master3"] + value_11 = '11'.encode() + value_1000 = '1000'.encode() + + # Step 2 + M3.add_s(Entry((BASE_DISTINGUISHED, {'objectclass': ['top', 'organizationalUnit'], + 'ou': ['distinguished']}))) + for i in range(MAX_EMPLOYEENUMBER_USER): + test_user_dn= add_employeenumber_user(M3, i) + log.info('Adding %s on M3' % test_user_dn) + ents = M3.search_s(DEFAULT_SUFFIX, ldap.SCOPE_SUBTREE, '(employeeNumber=*)') + assert len(ents) == (i + 1) + + M3.add_s(Entry((BASE_REGULAR, {'objectclass': ['top', 'organizationalUnit'], + 'ou': ['regular']}))) + for i in range(MAX_STANDARD_USER): + test_user_dn= add_user(M3, i, value_11) + log.info('Adding %s on M3' % test_user_dn) + M3.modify_s(test_user_dn, [(ldap.MOD_DELETE, 'employeeNumber', value_11), (ldap.MOD_ADD, 'employeeNumber', value_1000)]) + ents = M3.search_s(DEFAULT_SUFFIX, ldap.SCOPE_SUBTREE, '(employeeNumber=*)') + assert len(ents) == (MAX_EMPLOYEENUMBER_USER + i + 1) + + + # Step 3 + # Check the last entry is replicated on M1 + (uid, test_user_dn) = _user_get_dn(MAX_STANDARD_USER - 1) + for j in range(30): + try: + ent = M1.getEntry(test_user_dn, ldap.SCOPE_BASE,) + if not ent.hasAttr('employeeNumber'): + # wait for the MOD + log.info('M1 waiting for employeeNumber') + time.sleep(1) + continue; + break; + except ldap.NO_SUCH_OBJECT: + time.sleep(1) + pass + time.sleep(1) + ents = M1.search_s(DEFAULT_SUFFIX, ldap.SCOPE_SUBTREE, '(employeeNumber=*)') + assert len(ents) == MAX_USER + + # Check the last entry is replicated on M2 + for j in range(30): + try: + ent = M2.getEntry(test_user_dn, ldap.SCOPE_BASE,) + if not ent.hasAttr('employeeNumber'): + # wait for the MOD + log.info('M2 waiting for employeeNumber') + time.sleep(1) + continue; + + break; + except ldap.NO_SUCH_OBJECT: + time.sleep(1) + pass + time.sleep(1) + ents = M2.search_s(DEFAULT_SUFFIX, ldap.SCOPE_SUBTREE, '(employeeNumber=*)') + assert len(ents) == MAX_USER + +def test_ticket49658_0(topo): + """Do MOD(DEL+ADD) and replicate MOST RECENT first + M1: MOD(DEL+ADD) -> V1 + M2: MOD(DEL+ADD) -> V1 + expected: V1 + + :id: 5360b304-9b33-4d37-935f-ab73e0baa1aa + :setup: 3 Master Instances + 1. using user_0 where employNumber=1000 + :steps: + 1. Create 3 suppliers + 2. Isolate M1 and M2 by pausing the replication agreements + 3. On M1 do MOD_DEL 1000 + MOD_ADD_13 + 4. On M2 do MOD_DEL 1000 + MOD_ADD_13 + 5. Enable replication agreement M2 -> M3, so that update step 6 is replicated first + 6. Enable replication agreement M1 -> M3, so that update step 5 is replicated second + 7. Check that the employeeNumber is 13 on all servers + :expectedresults: + 1. Fill in the result that is expected + 2. For each test step + """ + + # If you need any test suite initialization, + # please, write additional fixture for that (including finalizer). + # Topology for suites are predefined in lib389/topologies.py. + + # If you need host, port or any other data about instance, + # Please, use the instance object attributes for that (for example, topo.ms["master1"].serverid) + + if DEBUGGING: + # Add debugging steps(if any)... + pass + M1 = topo.ms["master1"] + M2 = topo.ms["master2"] + M3 = topo.ms["master3"] + value_1000 = '1000'.encode() + last = '0' + value_end = last.encode() + theFilter = '(employeeNumber=%s)' % last + (uid, test_user_dn) = _user_get_dn(int(last)) + + # + # Step 2 + # + # disable all RA from M1 and M2 + # only M3 can replicate the update + # + agreement_m1_m2 = M1.agreement.list(suffix=DEFAULT_SUFFIX, consumer_host=M2.host, consumer_port=M2.port) + agreement_m1_m3 = M1.agreement.list(suffix=DEFAULT_SUFFIX, consumer_host=M3.host, consumer_port=M3.port) + agreement_m2_m1 = M2.agreement.list(suffix=DEFAULT_SUFFIX, consumer_host=M1.host, consumer_port=M1.port) + agreement_m2_m3 = M2.agreement.list(suffix=DEFAULT_SUFFIX, consumer_host=M3.host, consumer_port=M3.port) + + M1.agreement.pause(agreement_m1_m2[0].dn) + M1.agreement.pause(agreement_m1_m3[0].dn) + M2.agreement.pause(agreement_m2_m1[0].dn) + M2.agreement.pause(agreement_m2_m3[0].dn) + + # Step 3 + # Oldest update + # check that the entry on M1 contains employeeNumber= + M1.modify_s(test_user_dn, [(ldap.MOD_DELETE, 'employeeNumber', value_1000), (ldap.MOD_ADD, 'employeeNumber', value_end)]) + ents = M1.search_s(BASE_REGULAR, ldap.SCOPE_SUBTREE, theFilter) + assert len(ents) == 1 + time.sleep(1) + + # Step 4 + # More recent update + # check that the entry on M2 contains employeeNumber= + M2.modify_s(test_user_dn, [(ldap.MOD_DELETE, 'employeeNumber', value_1000), (ldap.MOD_ADD, 'employeeNumber', value_end)]) + ents = M2.search_s(BASE_REGULAR, ldap.SCOPE_SUBTREE, theFilter) + assert len(ents) == 1 + + #time.sleep(60) + # Step 7 + # Renable M2 before M1 so that on M3, the most recent update is replicated before + for ra in agreement_m2_m1, agreement_m2_m3: + M2.agreement.resume(ra[0].dn) + + # Step 8 + # Renable M1 so that on M3 oldest update is now replicated + time.sleep(4) + for ra in agreement_m1_m2, agreement_m1_m3: + M1.agreement.resume(ra[0].dn) + + # Step 9 + # Check that M1 still contains employeeNumber= + ents = M1.search_s(BASE_REGULAR, ldap.SCOPE_SUBTREE, theFilter) + log.info('Search M1 employeeNumber=%s (vs. %s)' % (ents[0].getValue('employeeNumber'), value_end)) + assert len(ents) == 1 + assert ents[0].hasAttr('employeeNumber') and ents[0].getValue('employeeNumber') == value_end + + # Check that M2 still contains employeeNumber= + ents = M2.search_s(BASE_REGULAR, ldap.SCOPE_SUBTREE, theFilter) + log.info('Search M2 employeeNumber=%s (vs. %s)' % (ents[0].getValue('employeeNumber'), value_end)) + assert len(ents) == 1 + assert ents[0].hasAttr('employeeNumber') and ents[0].getValue('employeeNumber') == value_end + + # Check that M3 still contain employeeNumber and it contains employeeNumber= + ents = M3.search_s(BASE_REGULAR, ldap.SCOPE_SUBTREE, '(employeeNumber=*)') + assert len(ents) == MAX_STANDARD_USER + ents = M3.search_s(BASE_REGULAR, ldap.SCOPE_SUBTREE, theFilter) + log.info('Search M3 employeeNumber=%s (vs. %s)' % (ents[0].getValue('employeeNumber'), value_end)) + assert len(ents) == 1 + assert ents[0].hasAttr('employeeNumber') and ents[0].getValue('employeeNumber') == value_end + +def test_ticket49658_1(topo): + """Do MOD(DEL+ADD) and replicate OLDEST first + M2: MOD(DEL+ADD) -> V1 + M1: MOD(DEL+ADD) -> V1 + expected: V1 + + :id: bc6620d9-eae1-48af-8a4f-bc14405ea6b6 + :setup: 3 Master Instances + 1. using user_1 where employNumber=1000 + :steps: + 1. Isolate M1 and M2 by pausing the replication agreements + 2. On M2 do MOD_DEL 1000 + MOD_ADD_13 + 3. On M1 do MOD_DEL 1000 + MOD_ADD_13 + 4. Enable replication agreement M2 -> M3, so that update step 2 is replicated first + 5. Enable replication agreement M1 -> M3, so that update step 3 is replicated second + 6. Check that the employeeNumber is 13 on all servers + :expectedresults: + 1. Fill in the result that is expected + 2. For each test step + """ + + # If you need any test suite initialization, + # please, write additional fixture for that (including finalizer). + # Topology for suites are predefined in lib389/topologies.py. + + # If you need host, port or any other data about instance, + # Please, use the instance object attributes for that (for example, topo.ms["master1"].serverid) + + if DEBUGGING: + # Add debugging steps(if any)... + pass + M1 = topo.ms["master1"] + M2 = topo.ms["master2"] + M3 = topo.ms["master3"] + value_1000 = '1000'.encode() + last = '1' + value_end = last.encode() + theFilter = '(employeeNumber=%s)' % last + + # This test takes the user_1 + (uid, test_user_dn) = _user_get_dn(int(1)) + + # + # Step 4 + # + # disable all RA from M1 and M2 + # only M3 can replicate the update + # + agreement_m1_m2 = M1.agreement.list(suffix=DEFAULT_SUFFIX, consumer_host=M2.host, consumer_port=M2.port) + agreement_m1_m3 = M1.agreement.list(suffix=DEFAULT_SUFFIX, consumer_host=M3.host, consumer_port=M3.port) + agreement_m2_m1 = M2.agreement.list(suffix=DEFAULT_SUFFIX, consumer_host=M1.host, consumer_port=M1.port) + agreement_m2_m3 = M2.agreement.list(suffix=DEFAULT_SUFFIX, consumer_host=M3.host, consumer_port=M3.port) + + M1.agreement.pause(agreement_m1_m2[0].dn) + M1.agreement.pause(agreement_m1_m3[0].dn) + M2.agreement.pause(agreement_m2_m1[0].dn) + M2.agreement.pause(agreement_m2_m3[0].dn) + + # Step 5 + # Oldest update + # check that the entry on M2 contains employeeNumber= + M2.modify_s(test_user_dn, [(ldap.MOD_DELETE, 'employeeNumber', value_1000), (ldap.MOD_ADD, 'employeeNumber', value_end)]) + ents = M2.search_s(BASE_REGULAR, ldap.SCOPE_SUBTREE, theFilter) + assert len(ents) == 1 + time.sleep(1) + + # Step 6 + # More recent update + # check that the entry on M2 contains employeeNumber= + M1.modify_s(test_user_dn, [(ldap.MOD_DELETE, 'employeeNumber', value_1000), (ldap.MOD_ADD, 'employeeNumber', value_end)]) + ents = M1.search_s(BASE_REGULAR, ldap.SCOPE_SUBTREE, theFilter) + assert len(ents) == 1 + + #time.sleep(60) + # Step 7 + # Renable M2 before M1 so that on M3, the most recent update is replicated before + for ra in agreement_m2_m1, agreement_m2_m3: + M2.agreement.resume(ra[0].dn) + + # Step 8 + # Renable M1 so that on M3 oldest update is now replicated + time.sleep(4) + for ra in agreement_m1_m2, agreement_m1_m3: + M1.agreement.resume(ra[0].dn) + + # Step 9 + # Check that M1 still contains employeeNumber= + ents = M1.search_s(BASE_REGULAR, ldap.SCOPE_SUBTREE, theFilter) + log.info('Search M1 employeeNumber=%s (vs. %s)' % (ents[0].getValue('employeeNumber'), value_end)) + assert len(ents) == 1 + assert ents[0].hasAttr('employeeNumber') and ents[0].getValue('employeeNumber') == value_end + + # Check that M2 still contains employeeNumber= + ents = M2.search_s(BASE_REGULAR, ldap.SCOPE_SUBTREE, theFilter) + log.info('Search M2 employeeNumber=%s (vs. %s)' % (ents[0].getValue('employeeNumber'), value_end)) + assert len(ents) == 1 + assert ents[0].hasAttr('employeeNumber') and ents[0].getValue('employeeNumber') == value_end + + # Check that M3 still contain employeeNumber and it contains employeeNumber= + ents = M3.search_s(BASE_REGULAR, ldap.SCOPE_SUBTREE, '(employeeNumber=*)') + assert len(ents) == MAX_STANDARD_USER + ents = M3.search_s(BASE_REGULAR, ldap.SCOPE_SUBTREE, theFilter) + log.info('Search M3 employeeNumber=%s (vs. %s)' % (ents[0].getValue('employeeNumber'), value_end)) + assert len(ents) == 1 + assert ents[0].hasAttr('employeeNumber') and ents[0].getValue('employeeNumber') == value_end + +def test_ticket49658_2(topo): + """Do MOD(ADD+DEL) and replicate OLDEST first + M2: MOD(ADD+DEL) -> V1 + M1: MOD(ADD+DEL) -> V1 + expected: V1 + + :id: 672ff689-5b76-4107-92be-fb95d08400b3 + :setup: 3 Master Instances + 1. using user_2 where employNumber=1000 + :steps: + 1. Isolate M1 and M2 by pausing the replication agreements + 2. On M2 do MOD_DEL 1000 + MOD_ADD_13 + 3. On M1 do MOD_DEL 1000 + MOD_ADD_13 + 4. Enable replication agreement M2 -> M3, so that update step 2 is replicated first + 5. Enable replication agreement M1 -> M3, so that update step 3 is replicated second + 6. Check that the employeeNumber is 13 on all servers + :expectedresults: + 1. Fill in the result that is expected + 2. For each test step + """ + + # If you need any test suite initialization, + # please, write additional fixture for that (including finalizer). + # Topology for suites are predefined in lib389/topologies.py. + + # If you need host, port or any other data about instance, + # Please, use the instance object attributes for that (for example, topo.ms["master1"].serverid) + + if DEBUGGING: + # Add debugging steps(if any)... + pass + M1 = topo.ms["master1"] + M2 = topo.ms["master2"] + M3 = topo.ms["master3"] + value_1000 = '1000'.encode() + last = '2' + value_end = last.encode() + theFilter = '(employeeNumber=%s)' % last + + # This test takes the user_1 + (uid, test_user_dn) = _user_get_dn(int(last)) + + # + # Step 4 + # + # disable all RA from M1 and M2 + # only M3 can replicate the update + # + agreement_m1_m2 = M1.agreement.list(suffix=DEFAULT_SUFFIX, consumer_host=M2.host, consumer_port=M2.port) + agreement_m1_m3 = M1.agreement.list(suffix=DEFAULT_SUFFIX, consumer_host=M3.host, consumer_port=M3.port) + agreement_m2_m1 = M2.agreement.list(suffix=DEFAULT_SUFFIX, consumer_host=M1.host, consumer_port=M1.port) + agreement_m2_m3 = M2.agreement.list(suffix=DEFAULT_SUFFIX, consumer_host=M3.host, consumer_port=M3.port) + + M1.agreement.pause(agreement_m1_m2[0].dn) + M1.agreement.pause(agreement_m1_m3[0].dn) + M2.agreement.pause(agreement_m2_m1[0].dn) + M2.agreement.pause(agreement_m2_m3[0].dn) + + # Step 5 + # Oldest update + # check that the entry on M2 contains employeeNumber= + M2.modify_s(test_user_dn, [(ldap.MOD_ADD, 'employeeNumber', value_end),(ldap.MOD_DELETE, 'employeeNumber', value_1000)]) + ents = M2.search_s(BASE_REGULAR, ldap.SCOPE_SUBTREE, theFilter) + assert len(ents) == 1 + time.sleep(1) + + # Step 6 + # More recent update + # check that the entry on M2 contains employeeNumber= + M1.modify_s(test_user_dn, [(ldap.MOD_ADD, 'employeeNumber', value_end), (ldap.MOD_DELETE, 'employeeNumber', value_1000)]) + ents = M1.search_s(BASE_REGULAR, ldap.SCOPE_SUBTREE, theFilter) + assert len(ents) == 1 + + #time.sleep(60) + # Step 7 + # Renable M2 before M1 so that on M3, the most recent update is replicated before + for ra in agreement_m2_m1, agreement_m2_m3: + M2.agreement.resume(ra[0].dn) + + # Step 8 + # Renable M1 so that on M3 oldest update is now replicated + time.sleep(4) + for ra in agreement_m1_m2, agreement_m1_m3: + M1.agreement.resume(ra[0].dn) + + # Step 9 + # Check that M1 still contains employeeNumber= + ents = M1.search_s(BASE_REGULAR, ldap.SCOPE_SUBTREE, theFilter) + log.info('Search M1 employeeNumber=%s (vs. %s)' % (ents[0].getValue('employeeNumber'), value_end)) + assert len(ents) == 1 + assert ents[0].hasAttr('employeeNumber') and ents[0].getValue('employeeNumber') == value_end + + # Check that M2 still contains employeeNumber= + ents = M2.search_s(BASE_REGULAR, ldap.SCOPE_SUBTREE, theFilter) + log.info('Search M2 employeeNumber=%s (vs. %s)' % (ents[0].getValue('employeeNumber'), value_end)) + assert len(ents) == 1 + assert ents[0].hasAttr('employeeNumber') and ents[0].getValue('employeeNumber') == value_end + + # Check that M3 still contain employeeNumber and it contains employeeNumber= + ents = M3.search_s(BASE_REGULAR, ldap.SCOPE_SUBTREE, '(employeeNumber=*)') + assert len(ents) == MAX_STANDARD_USER + ents = M3.search_s(BASE_REGULAR, ldap.SCOPE_SUBTREE, theFilter) + log.info('Search M3 employeeNumber=%s (vs. %s)' % (ents[0].getValue('employeeNumber'), value_end)) + assert len(ents) == 1 + assert ents[0].hasAttr('employeeNumber') and ents[0].getValue('employeeNumber') == value_end + +def test_ticket49658_3(topo): + """Do MOD(ADD+DEL) and replicate MOST RECENT first + M1: MOD(ADD+DEL) -> V1 + M2: MOD(ADD+DEL) -> V1 + expected: V1 + :id: b25e508a-8bf2-4351-88f6-3b6c098ccc44 + :setup: 3 Master Instances + 1. using user_2 where employNumber=1000 + :steps: + 1. Isolate M1 and M2 by pausing the replication agreements + 2. On M1 do MOD_DEL 1000 + MOD_ADD_13 + 3. On M2 do MOD_DEL 1000 + MOD_ADD_13 + 4. Enable replication agreement M2 -> M3, so that update step 2 is replicated first + 5. Enable replication agreement M1 -> M3, so that update step 3 is replicated second + 6. Check that the employeeNumber is 13 on all servers + :expectedresults: + 1. Fill in the result that is expected + 2. For each test step + """ + + # If you need any test suite initialization, + # please, write additional fixture for that (including finalizer). + # Topology for suites are predefined in lib389/topologies.py. + + # If you need host, port or any other data about instance, + # Please, use the instance object attributes for that (for example, topo.ms["master1"].serverid) + + if DEBUGGING: + # Add debugging steps(if any)... + pass + M1 = topo.ms["master1"] + M2 = topo.ms["master2"] + M3 = topo.ms["master3"] + value_1000 = '1000'.encode() + last = '3' + value_end = last.encode() + theFilter = '(employeeNumber=%s)' % last + + # This test takes the user_1 + (uid, test_user_dn) = _user_get_dn(int(last)) + + # + # Step 4 + # + # disable all RA from M1 and M2 + # only M3 can replicate the update + # + agreement_m1_m2 = M1.agreement.list(suffix=DEFAULT_SUFFIX, consumer_host=M2.host, consumer_port=M2.port) + agreement_m1_m3 = M1.agreement.list(suffix=DEFAULT_SUFFIX, consumer_host=M3.host, consumer_port=M3.port) + agreement_m2_m1 = M2.agreement.list(suffix=DEFAULT_SUFFIX, consumer_host=M1.host, consumer_port=M1.port) + agreement_m2_m3 = M2.agreement.list(suffix=DEFAULT_SUFFIX, consumer_host=M3.host, consumer_port=M3.port) + + M1.agreement.pause(agreement_m1_m2[0].dn) + M1.agreement.pause(agreement_m1_m3[0].dn) + M2.agreement.pause(agreement_m2_m1[0].dn) + M2.agreement.pause(agreement_m2_m3[0].dn) + + # Step 5 + # Oldest update + # check that the entry on M1 contains employeeNumber= + M1.modify_s(test_user_dn, [(ldap.MOD_ADD, 'employeeNumber', value_end),(ldap.MOD_DELETE, 'employeeNumber', value_1000)]) + ents = M1.search_s(BASE_REGULAR, ldap.SCOPE_SUBTREE, theFilter) + assert len(ents) == 1 + time.sleep(1) + + # Step 6 + # More recent update + # check that the entry on M2 contains employeeNumber= + M2.modify_s(test_user_dn, [(ldap.MOD_ADD, 'employeeNumber', value_end), (ldap.MOD_DELETE, 'employeeNumber', value_1000)]) + ents = M2.search_s(BASE_REGULAR, ldap.SCOPE_SUBTREE, theFilter) + assert len(ents) == 1 + + #time.sleep(60) + # Step 7 + # Renable M2 before M1 so that on M3, the most recent update is replicated before + for ra in agreement_m2_m1, agreement_m2_m3: + M2.agreement.resume(ra[0].dn) + + # Step 8 + # Renable M1 so that on M3 oldest update is now replicated + time.sleep(4) + for ra in agreement_m1_m2, agreement_m1_m3: + M1.agreement.resume(ra[0].dn) + + # Step 9 + # Check that M1 still contains employeeNumber= + ents = M1.search_s(BASE_REGULAR, ldap.SCOPE_SUBTREE, theFilter) + log.info('Search M1 employeeNumber=%s (vs. %s)' % (ents[0].getValue('employeeNumber'), value_end)) + assert len(ents) == 1 + assert ents[0].hasAttr('employeeNumber') and ents[0].getValue('employeeNumber') == value_end + + # Check that M2 still contains employeeNumber= + ents = M2.search_s(BASE_REGULAR, ldap.SCOPE_SUBTREE, theFilter) + log.info('Search M2 employeeNumber=%s (vs. %s)' % (ents[0].getValue('employeeNumber'), value_end)) + assert len(ents) == 1 + assert ents[0].hasAttr('employeeNumber') and ents[0].getValue('employeeNumber') == value_end + + # Check that M3 still contain employeeNumber and it contains employeeNumber= + ents = M3.search_s(BASE_REGULAR, ldap.SCOPE_SUBTREE, '(employeeNumber=*)') + assert len(ents) == MAX_STANDARD_USER + ents = M3.search_s(BASE_REGULAR, ldap.SCOPE_SUBTREE, theFilter) + log.info('Search M3 employeeNumber=%s (vs. %s)' % (ents[0].getValue('employeeNumber'), value_end)) + assert len(ents) == 1 + assert ents[0].hasAttr('employeeNumber') and ents[0].getValue('employeeNumber') == value_end + +def test_ticket49658_4(topo): + """Do MOD(ADD+DEL) MOD(REPL) and replicate MOST RECENT first + M1: MOD(ADD+DEL) -> V1 + M2: MOD(REPL) -> V1 + expected: V1 + + :id: 8f7ce9ff-e36f-48cd-b0ed-b7077a3e7341 + :setup: 3 Master Instances + 1. using user_2 where employNumber=1000 + :steps: + 1. Isolate M1 and M2 by pausing the replication agreements + 2. On M1 do MOD_DEL 1000 + MOD_ADD_13 + 3. On M2 do MOD_REPL _13 + 4. Enable replication agreement M2 -> M3, so that update step 2 is replicated first + 5. Enable replication agreement M1 -> M3, so that update step 3 is replicated second + 6. Check that the employeeNumber is 13 on all servers + :expectedresults: + 1. Fill in the result that is expected + 2. For each test step + """ + + # If you need any test suite initialization, + # please, write additional fixture for that (including finalizer). + # Topology for suites are predefined in lib389/topologies.py. + + # If you need host, port or any other data about instance, + # Please, use the instance object attributes for that (for example, topo.ms["master1"].serverid) + + if DEBUGGING: + # Add debugging steps(if any)... + pass + M1 = topo.ms["master1"] + M2 = topo.ms["master2"] + M3 = topo.ms["master3"] + value_1000 = '1000'.encode() + last = '4' + value_end = last.encode() + theFilter = '(employeeNumber=%s)' % last + + # This test takes the user_1 + (uid, test_user_dn) = _user_get_dn(int(last)) + + # + # Step 4 + # + # disable all RA from M1 and M2 + # only M3 can replicate the update + # + agreement_m1_m2 = M1.agreement.list(suffix=DEFAULT_SUFFIX, consumer_host=M2.host, consumer_port=M2.port) + agreement_m1_m3 = M1.agreement.list(suffix=DEFAULT_SUFFIX, consumer_host=M3.host, consumer_port=M3.port) + agreement_m2_m1 = M2.agreement.list(suffix=DEFAULT_SUFFIX, consumer_host=M1.host, consumer_port=M1.port) + agreement_m2_m3 = M2.agreement.list(suffix=DEFAULT_SUFFIX, consumer_host=M3.host, consumer_port=M3.port) + + M1.agreement.pause(agreement_m1_m2[0].dn) + M1.agreement.pause(agreement_m1_m3[0].dn) + M2.agreement.pause(agreement_m2_m1[0].dn) + M2.agreement.pause(agreement_m2_m3[0].dn) + + # Step 5 + # Oldest update + # check that the entry on M1 contains employeeNumber= + M1.modify_s(test_user_dn, [(ldap.MOD_ADD, 'employeeNumber', value_end),(ldap.MOD_DELETE, 'employeeNumber', value_1000)]) + ents = M1.search_s(BASE_REGULAR, ldap.SCOPE_SUBTREE, theFilter) + assert len(ents) == 1 + time.sleep(1) + + # Step 6 + # More recent update + # check that the entry on M2 contains employeeNumber= + M2.modify_s(test_user_dn, [(ldap.MOD_REPLACE, 'employeeNumber', value_end)]) + ents = M2.search_s(BASE_REGULAR, ldap.SCOPE_SUBTREE, theFilter) + assert len(ents) == 1 + + #time.sleep(60) + # Step 7 + # Renable M2 before M1 so that on M3, the most recent update is replicated before + for ra in agreement_m2_m1, agreement_m2_m3: + M2.agreement.resume(ra[0].dn) + + # Step 8 + # Renable M1 so that on M3 oldest update is now replicated + time.sleep(4) + for ra in agreement_m1_m2, agreement_m1_m3: + M1.agreement.resume(ra[0].dn) + + # Step 9 + # Check that M1 still contains employeeNumber= + ents = M1.search_s(BASE_REGULAR, ldap.SCOPE_SUBTREE, theFilter) + log.info('Search M1 employeeNumber=%s (vs. %s)' % (ents[0].getValue('employeeNumber'), value_end)) + assert len(ents) == 1 + assert ents[0].hasAttr('employeeNumber') and ents[0].getValue('employeeNumber') == value_end + + # Check that M2 still contains employeeNumber= + ents = M2.search_s(BASE_REGULAR, ldap.SCOPE_SUBTREE, theFilter) + log.info('Search M2 employeeNumber=%s (vs. %s)' % (ents[0].getValue('employeeNumber'), value_end)) + assert len(ents) == 1 + assert ents[0].hasAttr('employeeNumber') and ents[0].getValue('employeeNumber') == value_end + + # Check that M3 still contain employeeNumber and it contains employeeNumber= + ents = M3.search_s(BASE_REGULAR, ldap.SCOPE_SUBTREE, '(employeeNumber=*)') + assert len(ents) == MAX_STANDARD_USER + ents = M3.search_s(BASE_REGULAR, ldap.SCOPE_SUBTREE, theFilter) + log.info('Search M3 employeeNumber=%s (vs. %s)' % (ents[0].getValue('employeeNumber'), value_end)) + assert len(ents) == 1 + assert ents[0].hasAttr('employeeNumber') and ents[0].getValue('employeeNumber') == value_end + +def test_ticket49658_5(topo): + """Do MOD(REPL) MOD(ADD+DEL) and replicate MOST RECENT first + M1: MOD(REPL) -> V1 + M2: MOD(ADD+DEL) -> V1 + expected: V1 + :id: d6b88e3c-a509-4d3e-8e5d-849237993f47 + :setup: 3 Master Instances + 1. using user_2 where employNumber=1000 + :steps: + 1. Isolate M1 and M2 by pausing the replication agreements + 2. On M1 do MOD_DEL 1000 + MOD_ADD_13 + 3. On M2 do MOD_REPL _13 + 4. Enable replication agreement M2 -> M3, so that update step 2 is replicated first + 5. Enable replication agreement M1 -> M3, so that update step 3 is replicated second + 6. Check that the employeeNumber is 13 on all servers + :expectedresults: + 1. Fill in the result that is expected + 2. For each test step + """ + + # If you need any test suite initialization, + # please, write additional fixture for that (including finalizer). + # Topology for suites are predefined in lib389/topologies.py. + + # If you need host, port or any other data about instance, + # Please, use the instance object attributes for that (for example, topo.ms["master1"].serverid) + + if DEBUGGING: + # Add debugging steps(if any)... + pass + M1 = topo.ms["master1"] + M2 = topo.ms["master2"] + M3 = topo.ms["master3"] + value_1000 = '1000'.encode() + last = '5' + value_end = last.encode() + theFilter = '(employeeNumber=%s)' % last + + # This test takes the user_1 + (uid, test_user_dn) = _user_get_dn(int(last)) + + # + # Step 4 + # + # disable all RA from M1 and M2 + # only M3 can replicate the update + # + agreement_m1_m2 = M1.agreement.list(suffix=DEFAULT_SUFFIX, consumer_host=M2.host, consumer_port=M2.port) + agreement_m1_m3 = M1.agreement.list(suffix=DEFAULT_SUFFIX, consumer_host=M3.host, consumer_port=M3.port) + agreement_m2_m1 = M2.agreement.list(suffix=DEFAULT_SUFFIX, consumer_host=M1.host, consumer_port=M1.port) + agreement_m2_m3 = M2.agreement.list(suffix=DEFAULT_SUFFIX, consumer_host=M3.host, consumer_port=M3.port) + + M1.agreement.pause(agreement_m1_m2[0].dn) + M1.agreement.pause(agreement_m1_m3[0].dn) + M2.agreement.pause(agreement_m2_m1[0].dn) + M2.agreement.pause(agreement_m2_m3[0].dn) + + # Step 5 + # Oldest update + # check that the entry on M1 contains employeeNumber= + M1.modify_s(test_user_dn, [(ldap.MOD_REPLACE, 'employeeNumber', value_end)]) + ents = M1.search_s(BASE_REGULAR, ldap.SCOPE_SUBTREE, theFilter) + assert len(ents) == 1 + time.sleep(1) + + # Step 6 + # More recent update + # check that the entry on M2 contains employeeNumber= + M2.modify_s(test_user_dn, [(ldap.MOD_ADD, 'employeeNumber', value_end),(ldap.MOD_DELETE, 'employeeNumber', value_1000)]) + ents = M2.search_s(BASE_REGULAR, ldap.SCOPE_SUBTREE, theFilter) + assert len(ents) == 1 + + #time.sleep(60) + # Step 7 + # Renable M2 before M1 so that on M3, the most recent update is replicated before + for ra in agreement_m2_m1, agreement_m2_m3: + M2.agreement.resume(ra[0].dn) + + # Step 8 + # Renable M1 so that on M3 oldest update is now replicated + time.sleep(4) + for ra in agreement_m1_m2, agreement_m1_m3: + M1.agreement.resume(ra[0].dn) + + # Step 9 + # Check that M1 still contains employeeNumber= + ents = M1.search_s(BASE_REGULAR, ldap.SCOPE_SUBTREE, theFilter) + log.info('Search M1 employeeNumber=%s (vs. %s)' % (ents[0].getValue('employeeNumber'), value_end)) + assert len(ents) == 1 + assert ents[0].hasAttr('employeeNumber') and ents[0].getValue('employeeNumber') == value_end + + # Check that M2 still contains employeeNumber= + ents = M2.search_s(BASE_REGULAR, ldap.SCOPE_SUBTREE, theFilter) + log.info('Search M2 employeeNumber=%s (vs. %s)' % (ents[0].getValue('employeeNumber'), value_end)) + assert len(ents) == 1 + assert ents[0].hasAttr('employeeNumber') and ents[0].getValue('employeeNumber') == value_end + + # Check that M3 still contain employeeNumber and it contains employeeNumber= + ents = M3.search_s(BASE_REGULAR, ldap.SCOPE_SUBTREE, '(employeeNumber=*)') + assert len(ents) == MAX_STANDARD_USER + ents = M3.search_s(BASE_REGULAR, ldap.SCOPE_SUBTREE, theFilter) + log.info('Search M3 employeeNumber=%s (vs. %s)' % (ents[0].getValue('employeeNumber'), value_end)) + assert len(ents) == 1 + assert ents[0].hasAttr('employeeNumber') and ents[0].getValue('employeeNumber') == value_end + +def test_ticket49658_6(topo): + """Do + M1: MOD(REPL) -> V1 + M2: MOD(ADD+DEL) -> V2 + expected: V2 + + :id: 5eb67db1-2ff2-4c17-85af-e124b45aace3 + :setup: 3 Master Instances + 1. using user_2 where employNumber=1000 + :steps: + 1. Isolate M1 and M2 by pausing the replication agreements + 2. On M1 do MOD_DEL 1000 + MOD_ADD_13 + 3. On M2 do MOD_REPL _13 + 4. Enable replication agreement M2 -> M3, so that update step 2 is replicated first + 5. Enable replication agreement M1 -> M3, so that update step 3 is replicated second + 6. Check that the employeeNumber is 13 on all servers + :expectedresults: + 1. Fill in the result that is expected + 2. For each test step + """ + + # If you need any test suite initialization, + # please, write additional fixture for that (including finalizer). + # Topology for suites are predefined in lib389/topologies.py. + + # If you need host, port or any other data about instance, + # Please, use the instance object attributes for that (for example, topo.ms["master1"].serverid) + + + + if DEBUGGING: + # Add debugging steps(if any)... + pass + M1 = topo.ms["master1"] + M2 = topo.ms["master2"] + M3 = topo.ms["master3"] + value_1000 = '1000' + last = '6' + value_S1 = '6.1' + value_S2 = '6.2' + + description = { + "S1": M1, + "S2": M2, + "S1_MOD": [(ldap.MOD_REPLACE, 'employeeNumber', value_S1.encode())], + "S2_MOD": [(ldap.MOD_ADD, 'employeeNumber', value_S2.encode()),(ldap.MOD_DELETE, 'employeeNumber', value_1000.encode())], + "expected": value_S2} + + # This test takes the user_1 + (uid, test_user_dn) = _user_get_dn(int(last)) + + # + # Step 4 + # + # disable all RA from M1 and M2 + # only M3 can replicate the update + # + agreement_m1_m2 = M1.agreement.list(suffix=DEFAULT_SUFFIX, consumer_host=M2.host, consumer_port=M2.port) + agreement_m1_m3 = M1.agreement.list(suffix=DEFAULT_SUFFIX, consumer_host=M3.host, consumer_port=M3.port) + agreement_m2_m1 = M2.agreement.list(suffix=DEFAULT_SUFFIX, consumer_host=M1.host, consumer_port=M1.port) + agreement_m2_m3 = M2.agreement.list(suffix=DEFAULT_SUFFIX, consumer_host=M3.host, consumer_port=M3.port) + + M1.agreement.pause(agreement_m1_m2[0].dn) + M1.agreement.pause(agreement_m1_m3[0].dn) + M2.agreement.pause(agreement_m2_m1[0].dn) + M2.agreement.pause(agreement_m2_m3[0].dn) + + # Step 5 + # Oldest update + # check that the entry on M1 contains employeeNumber= + description["S1"].modify_s(test_user_dn, description["S1_MOD"]) + ents = description["S1"].search_s(DEFAULT_SUFFIX, ldap.SCOPE_SUBTREE, '(employeeNumber=%s)' % value_S1) + assert len(ents) == 1 + time.sleep(1) + + # Step 6 + # More recent update + # check that the entry on M2 contains employeeNumber= + description["S2"].modify_s(test_user_dn, description["S2_MOD"]) + ents = description["S2"].search_s(DEFAULT_SUFFIX, ldap.SCOPE_SUBTREE, '(employeeNumber=%s)' % value_S2) + assert len(ents) == 1 + + #time.sleep(60) + # Step 7 + # Renable M2 before M1 so that on M3, the most recent update is replicated before + for ra in agreement_m2_m1, agreement_m2_m3: + M2.agreement.resume(ra[0].dn) + + # Step 8 + # Renable M1 so that on M3 oldest update is now replicated + time.sleep(4) + for ra in agreement_m1_m2, agreement_m1_m3: + M1.agreement.resume(ra[0].dn) + + # Step 9 + # Check that M1 still contains employeeNumber= + ents = M1.search_s(BASE_REGULAR, ldap.SCOPE_SUBTREE, '(employeeNumber=%s)' % description["expected"]) + log.info('Search M1 employeeNumber=%s (vs. %s)' % (ents[0].getValue('employeeNumber'), description["expected"])) + assert len(ents) == 1 + assert ents[0].hasAttr('employeeNumber') and ents[0].getValue('employeeNumber') == description["expected"].encode() + + # Check that M2 still contains employeeNumber= + ents = M2.search_s(BASE_REGULAR, ldap.SCOPE_SUBTREE, '(employeeNumber=%s)' % description["expected"]) + log.info('Search M2 employeeNumber=%s (vs. %s)' % (ents[0].getValue('employeeNumber'), description["expected"])) + assert len(ents) == 1 + assert ents[0].hasAttr('employeeNumber') and ents[0].getValue('employeeNumber') == description["expected"].encode() + + # Check that M3 still contain employeeNumber and it contains employeeNumber= + ents = M3.search_s(BASE_REGULAR, ldap.SCOPE_SUBTREE, '(employeeNumber=*)') + assert len(ents) == MAX_STANDARD_USER + ents = M3.search_s(BASE_REGULAR, ldap.SCOPE_SUBTREE, '(employeeNumber=%s)' % description["expected"]) + log.info('Search M3 employeeNumber=%s (vs. %s)' % (ents[0].getValue('employeeNumber'), description["expected"])) + assert len(ents) == 1 + assert ents[0].hasAttr('employeeNumber') and ents[0].getValue('employeeNumber') == description["expected"].encode() + +def test_ticket49658_7(topo): + """Do + M1: MOD(ADD+DEL) -> V1 + M2: MOD(REPL) -> V2 + expected: V2 + + :id: a79036ca-0e1b-453e-9524-fb44e1d7c929 + :setup: 3 Master Instances + :steps: + :expectedresults: + 1. Fill in the result that is expected + 2. For each test step + """ + + # If you need any test suite initialization, + # please, write additional fixture for that (including finalizer). + # Topology for suites are predefined in lib389/topologies.py. + + # If you need host, port or any other data about instance, + # Please, use the instance object attributes for that (for example, topo.ms["master1"].serverid) + + + + if DEBUGGING: + # Add debugging steps(if any)... + pass + M1 = topo.ms["master1"] + M2 = topo.ms["master2"] + M3 = topo.ms["master3"] + value_1000 = '1000' + last = '7' + value_S1 = '7.1' + value_S2 = '7.2' + + description = { + "S1": M1, + "S2": M2, + "S1_MOD": [(ldap.MOD_ADD, 'employeeNumber', value_S1.encode()),(ldap.MOD_DELETE, 'employeeNumber', value_1000.encode())], + "S2_MOD": [(ldap.MOD_REPLACE, 'employeeNumber', value_S2.encode())], + "expected": value_S2} + + # This test takes the user_1 + (uid, test_user_dn) = _user_get_dn(int(last)) + + # + # Step 4 + # + # disable all RA from M1 and M2 + # only M3 can replicate the update + # + agreement_m1_m2 = M1.agreement.list(suffix=DEFAULT_SUFFIX, consumer_host=M2.host, consumer_port=M2.port) + agreement_m1_m3 = M1.agreement.list(suffix=DEFAULT_SUFFIX, consumer_host=M3.host, consumer_port=M3.port) + agreement_m2_m1 = M2.agreement.list(suffix=DEFAULT_SUFFIX, consumer_host=M1.host, consumer_port=M1.port) + agreement_m2_m3 = M2.agreement.list(suffix=DEFAULT_SUFFIX, consumer_host=M3.host, consumer_port=M3.port) + + M1.agreement.pause(agreement_m1_m2[0].dn) + M1.agreement.pause(agreement_m1_m3[0].dn) + M2.agreement.pause(agreement_m2_m1[0].dn) + M2.agreement.pause(agreement_m2_m3[0].dn) + + # Step 5 + # Oldest update + # check that the entry on M1 contains employeeNumber= + description["S1"].modify_s(test_user_dn, description["S1_MOD"]) + ents = description["S1"].search_s(BASE_REGULAR, ldap.SCOPE_SUBTREE, '(employeeNumber=%s)' % value_S1) + assert len(ents) == 1 + time.sleep(1) + + # Step 6 + # More recent update + # check that the entry on M2 contains employeeNumber= + description["S2"].modify_s(test_user_dn, description["S2_MOD"]) + ents = description["S2"].search_s(BASE_REGULAR, ldap.SCOPE_SUBTREE, '(employeeNumber=%s)' % value_S2) + assert len(ents) == 1 + + #time.sleep(60) + # Step 7 + # Renable M2 before M1 so that on M3, the most recent update is replicated before + for ra in agreement_m2_m1, agreement_m2_m3: + M2.agreement.resume(ra[0].dn) + + # Step 8 + # Renable M1 so that on M3 oldest update is now replicated + time.sleep(4) + for ra in agreement_m1_m2, agreement_m1_m3: + M1.agreement.resume(ra[0].dn) + + # Step 9 + # Check that M1 still contains employeeNumber= + ents = M1.search_s(BASE_REGULAR, ldap.SCOPE_SUBTREE, '(employeeNumber=%s)' % description["expected"]) + log.info('Search M1 employeeNumber=%s (vs. %s)' % (ents[0].getValue('employeeNumber'), description["expected"])) + assert len(ents) == 1 + assert ents[0].hasAttr('employeeNumber') and ents[0].getValue('employeeNumber') == description["expected"].encode() + + # Check that M2 still contains employeeNumber= + ents = M2.search_s(BASE_REGULAR, ldap.SCOPE_SUBTREE, '(employeeNumber=%s)' % description["expected"]) + log.info('Search M2 employeeNumber=%s (vs. %s)' % (ents[0].getValue('employeeNumber'), description["expected"])) + assert len(ents) == 1 + assert ents[0].hasAttr('employeeNumber') and ents[0].getValue('employeeNumber') == description["expected"].encode() + + # Check that M3 still contain employeeNumber and it contains employeeNumber= + ents = M3.search_s(BASE_REGULAR, ldap.SCOPE_SUBTREE, '(employeeNumber=*)') + assert len(ents) == MAX_STANDARD_USER + ents = M3.search_s(BASE_REGULAR, ldap.SCOPE_SUBTREE, '(employeeNumber=%s)' % description["expected"]) + log.info('Search M3 employeeNumber=%s (vs. %s)' % (ents[0].getValue('employeeNumber'), description["expected"])) + assert len(ents) == 1 + assert ents[0].hasAttr('employeeNumber') and ents[0].getValue('employeeNumber') == description["expected"].encode() + +def test_ticket49658_8(topo): + """Do + M1: MOD(DEL+ADD) -> V1 + M2: MOD(REPL) -> V2 + expected: V2 + + :id: 06acb988-b735-424a-9886-b0557ee12a9a + :setup: 3 Master Instances + :steps: + :expectedresults: + 1. Fill in the result that is expected + 2. For each test step + """ + + # If you need any test suite initialization, + # please, write additional fixture for that (including finalizer). + # Topology for suites are predefined in lib389/topologies.py. + + # If you need host, port or any other data about instance, + # Please, use the instance object attributes for that (for example, topo.ms["master1"].serverid) + + + + if DEBUGGING: + # Add debugging steps(if any)... + pass + M1 = topo.ms["master1"] + M2 = topo.ms["master2"] + M3 = topo.ms["master3"] + value_1000 = '1000' + last = '8' + value_S1 = '8.1' + value_S2 = '8.2' + + description = { + "S1": M1, + "S2": M2, + "S1_MOD": [(ldap.MOD_DELETE, 'employeeNumber', value_1000.encode()),(ldap.MOD_ADD, 'employeeNumber', value_S1.encode())], + "S2_MOD": [(ldap.MOD_REPLACE, 'employeeNumber', value_S2.encode())], + "expected": value_S2} + + # This test takes the user_1 + (uid, test_user_dn) = _user_get_dn(int(last)) + + # + # Step 4 + # + # disable all RA from M1 and M2 + # only M3 can replicate the update + # + agreement_m1_m2 = M1.agreement.list(suffix=DEFAULT_SUFFIX, consumer_host=M2.host, consumer_port=M2.port) + agreement_m1_m3 = M1.agreement.list(suffix=DEFAULT_SUFFIX, consumer_host=M3.host, consumer_port=M3.port) + agreement_m2_m1 = M2.agreement.list(suffix=DEFAULT_SUFFIX, consumer_host=M1.host, consumer_port=M1.port) + agreement_m2_m3 = M2.agreement.list(suffix=DEFAULT_SUFFIX, consumer_host=M3.host, consumer_port=M3.port) + + M1.agreement.pause(agreement_m1_m2[0].dn) + M1.agreement.pause(agreement_m1_m3[0].dn) + M2.agreement.pause(agreement_m2_m1[0].dn) + M2.agreement.pause(agreement_m2_m3[0].dn) + + # Step 5 + # Oldest update + # check that the entry on M1 contains employeeNumber= + description["S1"].modify_s(test_user_dn, description["S1_MOD"]) + ents = description["S1"].search_s(BASE_REGULAR, ldap.SCOPE_SUBTREE, '(employeeNumber=%s)' % value_S1) + assert len(ents) == 1 + time.sleep(1) + + # Step 6 + # More recent update + # check that the entry on M2 contains employeeNumber= + description["S2"].modify_s(test_user_dn, description["S2_MOD"]) + ents = description["S2"].search_s(BASE_REGULAR, ldap.SCOPE_SUBTREE, '(employeeNumber=%s)' % value_S2) + assert len(ents) == 1 + + #time.sleep(60) + # Step 7 + # Renable M2 before M1 so that on M3, the most recent update is replicated before + for ra in agreement_m2_m1, agreement_m2_m3: + M2.agreement.resume(ra[0].dn) + + # Step 8 + # Renable M1 so that on M3 oldest update is now replicated + time.sleep(4) + for ra in agreement_m1_m2, agreement_m1_m3: + M1.agreement.resume(ra[0].dn) + + # Step 9 + # Check that M1 still contains employeeNumber= + ents = M1.search_s(BASE_REGULAR, ldap.SCOPE_SUBTREE, '(employeeNumber=%s)' % description["expected"]) + log.info('Search M1 employeeNumber=%s (vs. %s)' % (ents[0].getValue('employeeNumber'), description["expected"])) + assert len(ents) == 1 + assert ents[0].hasAttr('employeeNumber') and ents[0].getValue('employeeNumber') == description["expected"].encode() + + # Check that M2 still contains employeeNumber= + ents = M2.search_s(BASE_REGULAR, ldap.SCOPE_SUBTREE, '(employeeNumber=%s)' % description["expected"]) + log.info('Search M2 employeeNumber=%s (vs. %s)' % (ents[0].getValue('employeeNumber'), description["expected"])) + assert len(ents) == 1 + assert ents[0].hasAttr('employeeNumber') and ents[0].getValue('employeeNumber') == description["expected"].encode() + + # Check that M3 still contain employeeNumber and it contains employeeNumber= + ents = M3.search_s(BASE_REGULAR, ldap.SCOPE_SUBTREE, '(employeeNumber=*)') + assert len(ents) == MAX_STANDARD_USER + ents = M3.search_s(BASE_REGULAR, ldap.SCOPE_SUBTREE, '(employeeNumber=%s)' % description["expected"]) + log.info('Search M3 employeeNumber=%s (vs. %s)' % (ents[0].getValue('employeeNumber'), description["expected"])) + assert len(ents) == 1 + assert ents[0].hasAttr('employeeNumber') and ents[0].getValue('employeeNumber') == description["expected"].encode() + + +def test_ticket49658_9(topo): + """Do + M1: MOD(REPL) -> V1 + M2: MOD(DEL+ADD) -> V2 + expected: V2 + + :id: 3a4c1be3-e3b9-44fe-aa5a-72a3b1a8985c + :setup: 3 Master Instances + :steps: + :expectedresults: + 1. Fill in the result that is expected + 2. For each test step + """ + + # If you need any test suite initialization, + # please, write additional fixture for that (including finalizer). + # Topology for suites are predefined in lib389/topologies.py. + + # If you need host, port or any other data about instance, + # Please, use the instance object attributes for that (for example, topo.ms["master1"].serverid) + + + + if DEBUGGING: + # Add debugging steps(if any)... + pass + M1 = topo.ms["master1"] + M2 = topo.ms["master2"] + M3 = topo.ms["master3"] + value_1000 = '1000' + last = '9' + value_S1 = '9.1' + value_S2 = '9.2' + + description = { + "S1": M1, + "S2": M2, + "S1_MOD": [(ldap.MOD_REPLACE, 'employeeNumber', value_S1.encode())], + "S2_MOD": [(ldap.MOD_DELETE, 'employeeNumber', value_1000.encode()),(ldap.MOD_ADD, 'employeeNumber', value_S2.encode())], + "expected": value_S2} + + # This test takes the user_1 + (uid, test_user_dn) = _user_get_dn(int(last)) + + # + # Step 4 + # + # disable all RA from M1 and M2 + # only M3 can replicate the update + # + agreement_m1_m2 = M1.agreement.list(suffix=DEFAULT_SUFFIX, consumer_host=M2.host, consumer_port=M2.port) + agreement_m1_m3 = M1.agreement.list(suffix=DEFAULT_SUFFIX, consumer_host=M3.host, consumer_port=M3.port) + agreement_m2_m1 = M2.agreement.list(suffix=DEFAULT_SUFFIX, consumer_host=M1.host, consumer_port=M1.port) + agreement_m2_m3 = M2.agreement.list(suffix=DEFAULT_SUFFIX, consumer_host=M3.host, consumer_port=M3.port) + + M1.agreement.pause(agreement_m1_m2[0].dn) + M1.agreement.pause(agreement_m1_m3[0].dn) + M2.agreement.pause(agreement_m2_m1[0].dn) + M2.agreement.pause(agreement_m2_m3[0].dn) + + # Step 5 + # Oldest update + # check that the entry on M1 contains employeeNumber= + description["S1"].modify_s(test_user_dn, description["S1_MOD"]) + ents = description["S1"].search_s(BASE_REGULAR, ldap.SCOPE_SUBTREE, '(employeeNumber=%s)' % value_S1) + assert len(ents) == 1 + time.sleep(1) + + # Step 6 + # More recent update + # check that the entry on M2 contains employeeNumber= + description["S2"].modify_s(test_user_dn, description["S2_MOD"]) + ents = description["S2"].search_s(BASE_REGULAR, ldap.SCOPE_SUBTREE, '(employeeNumber=%s)' % value_S2) + assert len(ents) == 1 + + #time.sleep(60) + # Step 7 + # Renable M2 before M1 so that on M3, the most recent update is replicated before + for ra in agreement_m2_m1, agreement_m2_m3: + M2.agreement.resume(ra[0].dn) + + # Step 8 + # Renable M1 so that on M3 oldest update is now replicated + time.sleep(4) + for ra in agreement_m1_m2, agreement_m1_m3: + M1.agreement.resume(ra[0].dn) + + # Step 9 + # Check that M1 still contains employeeNumber= + ents = M1.search_s(BASE_REGULAR, ldap.SCOPE_SUBTREE, '(employeeNumber=%s)' % description["expected"]) + log.info('Search M1 employeeNumber=%s (vs. %s)' % (ents[0].getValue('employeeNumber'), description["expected"])) + assert len(ents) == 1 + assert ents[0].hasAttr('employeeNumber') and ents[0].getValue('employeeNumber') == description["expected"].encode() + + # Check that M2 still contains employeeNumber= + ents = M2.search_s(BASE_REGULAR, ldap.SCOPE_SUBTREE, '(employeeNumber=%s)' % description["expected"]) + log.info('Search M2 employeeNumber=%s (vs. %s)' % (ents[0].getValue('employeeNumber'), description["expected"])) + assert len(ents) == 1 + assert ents[0].hasAttr('employeeNumber') and ents[0].getValue('employeeNumber') == description["expected"].encode() + + # Check that M3 still contain employeeNumber and it contains employeeNumber= + ents = M3.search_s(BASE_REGULAR, ldap.SCOPE_SUBTREE, '(employeeNumber=*)') + assert len(ents) == MAX_STANDARD_USER + ents = M3.search_s(BASE_REGULAR, ldap.SCOPE_SUBTREE, '(employeeNumber=%s)' % description["expected"]) + log.info('Search M3 employeeNumber=%s (vs. %s)' % (ents[0].getValue('employeeNumber'), description["expected"])) + assert len(ents) == 1 + assert ents[0].hasAttr('employeeNumber') and ents[0].getValue('employeeNumber') == description["expected"].encode() + + + +def test_ticket49658_10(topo): + """Do + M1: MOD(REPL) -> V1 + M2: MOD(REPL) -> V2 + expected: V2 + + :id: 1413341a-45e6-422a-b6cc-9fde6fc9bb15 + :setup: 3 Master Instances + :steps: + :expectedresults: + 1. Fill in the result that is expected + 2. For each test step + """ + + # If you need any test suite initialization, + # please, write additional fixture for that (including finalizer). + # Topology for suites are predefined in lib389/topologies.py. + + # If you need host, port or any other data about instance, + # Please, use the instance object attributes for that (for example, topo.ms["master1"].serverid) + + + + if DEBUGGING: + # Add debugging steps(if any)... + pass + M1 = topo.ms["master1"] + M2 = topo.ms["master2"] + M3 = topo.ms["master3"] + value_1000 = '1000' + last = '10' + value_S1 = '10.1' + value_S2 = '10.2' + + description = { + "S1": M1, + "S2": M2, + "S1_MOD": [(ldap.MOD_REPLACE, 'employeeNumber', value_S1.encode())], + "S2_MOD": [(ldap.MOD_REPLACE, 'employeeNumber', value_S2.encode())], + "expected": value_S2} + + # This test takes the user_1 + (uid, test_user_dn) = _user_get_dn(int(last)) + + # + # Step 4 + # + # disable all RA from M1 and M2 + # only M3 can replicate the update + # + agreement_m1_m2 = M1.agreement.list(suffix=DEFAULT_SUFFIX, consumer_host=M2.host, consumer_port=M2.port) + agreement_m1_m3 = M1.agreement.list(suffix=DEFAULT_SUFFIX, consumer_host=M3.host, consumer_port=M3.port) + agreement_m2_m1 = M2.agreement.list(suffix=DEFAULT_SUFFIX, consumer_host=M1.host, consumer_port=M1.port) + agreement_m2_m3 = M2.agreement.list(suffix=DEFAULT_SUFFIX, consumer_host=M3.host, consumer_port=M3.port) + + M1.agreement.pause(agreement_m1_m2[0].dn) + M1.agreement.pause(agreement_m1_m3[0].dn) + M2.agreement.pause(agreement_m2_m1[0].dn) + M2.agreement.pause(agreement_m2_m3[0].dn) + + # Step 5 + # Oldest update + # check that the entry on M1 contains employeeNumber= + description["S1"].modify_s(test_user_dn, description["S1_MOD"]) + ents = description["S1"].search_s(BASE_REGULAR, ldap.SCOPE_SUBTREE, '(employeeNumber=%s)' % value_S1) + assert len(ents) == 1 + time.sleep(1) + + # Step 6 + # More recent update + # check that the entry on M2 contains employeeNumber= + description["S2"].modify_s(test_user_dn, description["S2_MOD"]) + ents = description["S2"].search_s(BASE_REGULAR, ldap.SCOPE_SUBTREE, '(employeeNumber=%s)' % value_S2) + assert len(ents) == 1 + + #time.sleep(60) + # Step 7 + # Renable M2 before M1 so that on M3, the most recent update is replicated before + for ra in agreement_m2_m1, agreement_m2_m3: + M2.agreement.resume(ra[0].dn) + + # Step 8 + # Renable M1 so that on M3 oldest update is now replicated + time.sleep(4) + for ra in agreement_m1_m2, agreement_m1_m3: + M1.agreement.resume(ra[0].dn) + + # Step 9 + # Check that M1 still contains employeeNumber= + ents = M1.search_s(BASE_REGULAR, ldap.SCOPE_SUBTREE, '(employeeNumber=%s)' % description["expected"]) + log.info('Search M1 employeeNumber=%s (vs. %s)' % (ents[0].getValue('employeeNumber'), description["expected"])) + assert len(ents) == 1 + assert ents[0].hasAttr('employeeNumber') and ents[0].getValue('employeeNumber') == description["expected"].encode() + + # Check that M2 still contains employeeNumber= + ents = M2.search_s(BASE_REGULAR, ldap.SCOPE_SUBTREE, '(employeeNumber=%s)' % description["expected"]) + log.info('Search M2 employeeNumber=%s (vs. %s)' % (ents[0].getValue('employeeNumber'), description["expected"])) + assert len(ents) == 1 + assert ents[0].hasAttr('employeeNumber') and ents[0].getValue('employeeNumber') == description["expected"].encode() + + # Check that M3 still contain employeeNumber and it contains employeeNumber= + ents = M3.search_s(BASE_REGULAR, ldap.SCOPE_SUBTREE, '(employeeNumber=*)') + assert len(ents) == MAX_STANDARD_USER + ents = M3.search_s(BASE_REGULAR, ldap.SCOPE_SUBTREE, '(employeeNumber=%s)' % description["expected"]) + log.info('Search M3 employeeNumber=%s (vs. %s)' % (ents[0].getValue('employeeNumber'), description["expected"])) + assert len(ents) == 1 + assert ents[0].hasAttr('employeeNumber') and ents[0].getValue('employeeNumber') == description["expected"].encode() + + + +def test_ticket49658_11(topo): + """Do + M2: MOD(REPL) -> V2 + M1: MOD(REPL) -> V1 + expected: V1 + + :id: a2810403-418b-41d7-948c-6f8ca46e2f29 + :setup: 3 Master Instances + :steps: + :expectedresults: + 1. Fill in the result that is expected + 2. For each test step + """ + + # If you need any test suite initialization, + # please, write additional fixture for that (including finalizer). + # Topology for suites are predefined in lib389/topologies.py. + + # If you need host, port or any other data about instance, + # Please, use the instance object attributes for that (for example, topo.ms["master1"].serverid) + + + + if DEBUGGING: + # Add debugging steps(if any)... + pass + M1 = topo.ms["master1"] + M2 = topo.ms["master2"] + M3 = topo.ms["master3"] + value_1000 = '1000' + last = '11' + value_S1 = '11.1' + value_S2 = '11.2' + + description = { + "S1": M1, + "S2": M2, + "S1_MOD": [(ldap.MOD_REPLACE, 'employeeNumber', value_S1.encode())], + "S2_MOD": [(ldap.MOD_REPLACE, 'employeeNumber', value_S2.encode())], + "expected": value_S1} + + # This test takes the user_1 + (uid, test_user_dn) = _user_get_dn(int(last)) + + # + # Step 4 + # + # disable all RA from M1 and M2 + # only M3 can replicate the update + # + agreement_m1_m2 = M1.agreement.list(suffix=DEFAULT_SUFFIX, consumer_host=M2.host, consumer_port=M2.port) + agreement_m1_m3 = M1.agreement.list(suffix=DEFAULT_SUFFIX, consumer_host=M3.host, consumer_port=M3.port) + agreement_m2_m1 = M2.agreement.list(suffix=DEFAULT_SUFFIX, consumer_host=M1.host, consumer_port=M1.port) + agreement_m2_m3 = M2.agreement.list(suffix=DEFAULT_SUFFIX, consumer_host=M3.host, consumer_port=M3.port) + + M1.agreement.pause(agreement_m1_m2[0].dn) + M1.agreement.pause(agreement_m1_m3[0].dn) + M2.agreement.pause(agreement_m2_m1[0].dn) + M2.agreement.pause(agreement_m2_m3[0].dn) + + # Step 5 + # Oldest update + # check that the entry on M1 contains employeeNumber= + description["S2"].modify_s(test_user_dn, description["S2_MOD"]) + ents = description["S2"].search_s(BASE_REGULAR, ldap.SCOPE_SUBTREE, '(employeeNumber=%s)' % value_S2) + assert len(ents) == 1 + time.sleep(1) + + # Step 6 + # More recent update + # check that the entry on M2 contains employeeNumber= + description["S1"].modify_s(test_user_dn, description["S1_MOD"]) + ents = description["S1"].search_s(BASE_REGULAR, ldap.SCOPE_SUBTREE, '(employeeNumber=%s)' % value_S1) + assert len(ents) == 1 + + #time.sleep(60) + # Step 7 + # Renable M2 before M1 so that on M3, the most recent update is replicated before + for ra in agreement_m2_m1, agreement_m2_m3: + M2.agreement.resume(ra[0].dn) + + # Step 8 + # Renable M1 so that on M3 oldest update is now replicated + time.sleep(4) + for ra in agreement_m1_m2, agreement_m1_m3: + M1.agreement.resume(ra[0].dn) + + # Step 9 + # Check that M1 still contains employeeNumber= + ents = M1.search_s(BASE_REGULAR, ldap.SCOPE_SUBTREE, '(employeeNumber=%s)' % description["expected"]) + log.info('Search M1 employeeNumber=%s (vs. %s)' % (ents[0].getValue('employeeNumber'), description["expected"])) + assert len(ents) == 1 + assert ents[0].hasAttr('employeeNumber') and ents[0].getValue('employeeNumber') == description["expected"].encode() + + # Check that M2 still contains employeeNumber= + ents = M2.search_s(BASE_REGULAR, ldap.SCOPE_SUBTREE, '(employeeNumber=%s)' % description["expected"]) + log.info('Search M2 employeeNumber=%s (vs. %s)' % (ents[0].getValue('employeeNumber'), description["expected"])) + assert len(ents) == 1 + assert ents[0].hasAttr('employeeNumber') and ents[0].getValue('employeeNumber') == description["expected"].encode() + + # Check that M3 still contain employeeNumber and it contains employeeNumber= + ents = M3.search_s(BASE_REGULAR, ldap.SCOPE_SUBTREE, '(employeeNumber=*)') + assert len(ents) == MAX_STANDARD_USER + ents = M3.search_s(BASE_REGULAR, ldap.SCOPE_SUBTREE, '(employeeNumber=%s)' % description["expected"]) + log.info('Search M3 employeeNumber=%s (vs. %s)' % (ents[0].getValue('employeeNumber'), description["expected"])) + assert len(ents) == 1 + assert ents[0].hasAttr('employeeNumber') and ents[0].getValue('employeeNumber') == description["expected"].encode() + +def test_ticket49658_12(topo): + """Do + M2: MOD(ADD+DEL) -> V2 + M1: MOD(REPL) -> V1 + expected: V1 + + :id: daba6f3c-e060-4d3f-8f9c-25ea4c1bca48 + :setup: 3 Master Instances + 1. using user_2 where employNumber=1000 + :steps: + 1. Isolate M1 and M2 by pausing the replication agreements + 2. On M1 do MOD_DEL 1000 + MOD_ADD_13 + 3. On M2 do MOD_REPL _13 + 4. Enable replication agreement M2 -> M3, so that update step 2 is replicated first + 5. Enable replication agreement M1 -> M3, so that update step 3 is replicated second + 6. Check that the employeeNumber is 13 on all servers + :expectedresults: + 1. Fill in the result that is expected + 2. For each test step + """ + + # If you need any test suite initialization, + # please, write additional fixture for that (including finalizer). + # Topology for suites are predefined in lib389/topologies.py. + + # If you need host, port or any other data about instance, + # Please, use the instance object attributes for that (for example, topo.ms["master1"].serverid) + + + + if DEBUGGING: + # Add debugging steps(if any)... + pass + M1 = topo.ms["master1"] + M2 = topo.ms["master2"] + M3 = topo.ms["master3"] + value_1000 = '1000' + last = '12' + value_S1 = '12.1' + value_S2 = '12.2' + + description = { + "S1": M1, + "S2": M2, + "S1_MOD": [(ldap.MOD_REPLACE, 'employeeNumber', value_S1.encode())], + "S2_MOD": [(ldap.MOD_ADD, 'employeeNumber', value_S2.encode()),(ldap.MOD_DELETE, 'employeeNumber', value_1000.encode())], + "expected": value_S1} + + # This test takes the user_1 + (uid, test_user_dn) = _user_get_dn(int(last)) + + # + # Step 4 + # + # disable all RA from M1 and M2 + # only M3 can replicate the update + # + agreement_m1_m2 = M1.agreement.list(suffix=DEFAULT_SUFFIX, consumer_host=M2.host, consumer_port=M2.port) + agreement_m1_m3 = M1.agreement.list(suffix=DEFAULT_SUFFIX, consumer_host=M3.host, consumer_port=M3.port) + agreement_m2_m1 = M2.agreement.list(suffix=DEFAULT_SUFFIX, consumer_host=M1.host, consumer_port=M1.port) + agreement_m2_m3 = M2.agreement.list(suffix=DEFAULT_SUFFIX, consumer_host=M3.host, consumer_port=M3.port) + + M1.agreement.pause(agreement_m1_m2[0].dn) + M1.agreement.pause(agreement_m1_m3[0].dn) + M2.agreement.pause(agreement_m2_m1[0].dn) + M2.agreement.pause(agreement_m2_m3[0].dn) + + # Step 5 + # Oldest update + # check that the entry on M1 contains employeeNumber= + description["S2"].modify_s(test_user_dn, description["S2_MOD"]) + ents = description["S2"].search_s(BASE_REGULAR, ldap.SCOPE_SUBTREE, '(employeeNumber=%s)' % value_S2) + assert len(ents) == 1 + time.sleep(1) + + # Step 6 + # More recent update + # check that the entry on M2 contains employeeNumber= + description["S1"].modify_s(test_user_dn, description["S1_MOD"]) + ents = description["S1"].search_s(BASE_REGULAR, ldap.SCOPE_SUBTREE, '(employeeNumber=%s)' % value_S1) + assert len(ents) == 1 + + #time.sleep(60) + # Step 7 + # Renable M2 before M1 so that on M3, the most recent update is replicated before + for ra in agreement_m2_m1, agreement_m2_m3: + M2.agreement.resume(ra[0].dn) + + # Step 8 + # Renable M1 so that on M3 oldest update is now replicated + time.sleep(4) + for ra in agreement_m1_m2, agreement_m1_m3: + M1.agreement.resume(ra[0].dn) + + # Step 9 + # Check that M1 still contains employeeNumber= + ents = M1.search_s(BASE_REGULAR, ldap.SCOPE_SUBTREE, '(employeeNumber=%s)' % description["expected"]) + log.info('Search M1 employeeNumber=%s (vs. %s)' % (ents[0].getValue('employeeNumber'), description["expected"])) + assert len(ents) == 1 + assert ents[0].hasAttr('employeeNumber') and ents[0].getValue('employeeNumber') == description["expected"].encode() + + # Check that M2 still contains employeeNumber= + ents = M2.search_s(BASE_REGULAR, ldap.SCOPE_SUBTREE, '(employeeNumber=%s)' % description["expected"]) + log.info('Search M2 employeeNumber=%s (vs. %s)' % (ents[0].getValue('employeeNumber'), description["expected"])) + assert len(ents) == 1 + assert ents[0].hasAttr('employeeNumber') and ents[0].getValue('employeeNumber') == description["expected"].encode() + + # Check that M3 still contain employeeNumber and it contains employeeNumber= + ents = M3.search_s(BASE_REGULAR, ldap.SCOPE_SUBTREE, '(employeeNumber=*)') + assert len(ents) == MAX_STANDARD_USER + ents = M3.search_s(BASE_REGULAR, ldap.SCOPE_SUBTREE, '(employeeNumber=%s)' % description["expected"]) + log.info('Search M3 employeeNumber=%s (vs. %s)' % (ents[0].getValue('employeeNumber'), description["expected"])) + assert len(ents) == 1 + assert ents[0].hasAttr('employeeNumber') and ents[0].getValue('employeeNumber') == description["expected"].encode() + +def test_ticket49658_13(topo): + """Do + M2: MOD(DEL+ADD) -> V2 + M1: MOD(REPL) -> V1 + expected: V1 + + :id: 50006b1f-d17c-47a1-86a5-4d78b2a6eab1 + :setup: 3 Master Instances + 1. using user_2 where employNumber=1000 + :steps: + 1. Isolate M1 and M2 by pausing the replication agreements + 2. On M1 do MOD_DEL 1000 + MOD_ADD_13 + 3. On M2 do MOD_REPL _13 + 4. Enable replication agreement M2 -> M3, so that update step 2 is replicated first + 5. Enable replication agreement M1 -> M3, so that update step 3 is replicated second + 6. Check that the employeeNumber is 13 on all servers + :expectedresults: + 1. Fill in the result that is expected + 2. For each test step + """ + + # If you need any test suite initialization, + # please, write additional fixture for that (including finalizer). + # Topology for suites are predefined in lib389/topologies.py. + + # If you need host, port or any other data about instance, + # Please, use the instance object attributes for that (for example, topo.ms["master1"].serverid) + + + + if DEBUGGING: + # Add debugging steps(if any)... + pass + M1 = topo.ms["master1"] + M2 = topo.ms["master2"] + M3 = topo.ms["master3"] + value_1000 = '1000' + last = '13' + value_S1 = '13.1' + value_S2 = '13.2' + + description = { + "S1": M1, + "S2": M2, + "S1_MOD": [(ldap.MOD_REPLACE, 'employeeNumber', value_S1.encode())], + "S2_MOD": [(ldap.MOD_DELETE, 'employeeNumber', value_1000.encode()),(ldap.MOD_ADD, 'employeeNumber', value_S2.encode())], + "expected": value_S1} + + # This test takes the user_1 + (uid, test_user_dn) = _user_get_dn(int(last)) + + # + # Step 4 + # + # disable all RA from M1 and M2 + # only M3 can replicate the update + # + agreement_m1_m2 = M1.agreement.list(suffix=DEFAULT_SUFFIX, consumer_host=M2.host, consumer_port=M2.port) + agreement_m1_m3 = M1.agreement.list(suffix=DEFAULT_SUFFIX, consumer_host=M3.host, consumer_port=M3.port) + agreement_m2_m1 = M2.agreement.list(suffix=DEFAULT_SUFFIX, consumer_host=M1.host, consumer_port=M1.port) + agreement_m2_m3 = M2.agreement.list(suffix=DEFAULT_SUFFIX, consumer_host=M3.host, consumer_port=M3.port) + + M1.agreement.pause(agreement_m1_m2[0].dn) + M1.agreement.pause(agreement_m1_m3[0].dn) + M2.agreement.pause(agreement_m2_m1[0].dn) + M2.agreement.pause(agreement_m2_m3[0].dn) + + # Step 5 + # Oldest update + # check that the entry on M1 contains employeeNumber= + description["S2"].modify_s(test_user_dn, description["S2_MOD"]) + ents = description["S2"].search_s(BASE_REGULAR, ldap.SCOPE_SUBTREE, '(employeeNumber=%s)' % value_S2) + assert len(ents) == 1 + time.sleep(1) + + # Step 6 + # More recent update + # check that the entry on M2 contains employeeNumber= + description["S1"].modify_s(test_user_dn, description["S1_MOD"]) + ents = description["S1"].search_s(BASE_REGULAR, ldap.SCOPE_SUBTREE, '(employeeNumber=%s)' % value_S1) + assert len(ents) == 1 + + #time.sleep(60) + # Step 7 + # Renable M2 before M1 so that on M3, the most recent update is replicated before + for ra in agreement_m2_m1, agreement_m2_m3: + M2.agreement.resume(ra[0].dn) + + # Step 8 + # Renable M1 so that on M3 oldest update is now replicated + time.sleep(4) + for ra in agreement_m1_m2, agreement_m1_m3: + M1.agreement.resume(ra[0].dn) + + # Step 9 + # Check that M1 still contains employeeNumber= + ents = M1.search_s(BASE_REGULAR, ldap.SCOPE_SUBTREE, '(employeeNumber=%s)' % description["expected"]) + log.info('Search M1 employeeNumber=%s (vs. %s)' % (ents[0].getValue('employeeNumber'), description["expected"])) + assert len(ents) == 1 + assert ents[0].hasAttr('employeeNumber') and ents[0].getValue('employeeNumber') == description["expected"].encode() + + # Check that M2 still contains employeeNumber= + ents = M2.search_s(BASE_REGULAR, ldap.SCOPE_SUBTREE, '(employeeNumber=%s)' % description["expected"]) + log.info('Search M2 employeeNumber=%s (vs. %s)' % (ents[0].getValue('employeeNumber'), description["expected"])) + assert len(ents) == 1 + assert ents[0].hasAttr('employeeNumber') and ents[0].getValue('employeeNumber') == description["expected"].encode() + + # Check that M3 still contain employeeNumber and it contains employeeNumber= + ents = M3.search_s(BASE_REGULAR, ldap.SCOPE_SUBTREE, '(employeeNumber=*)') + assert len(ents) == MAX_STANDARD_USER + ents = M3.search_s(BASE_REGULAR, ldap.SCOPE_SUBTREE, '(employeeNumber=%s)' % description["expected"]) + log.info('Search M3 employeeNumber=%s (vs. %s)' % (ents[0].getValue('employeeNumber'), description["expected"])) + assert len(ents) == 1 + assert ents[0].hasAttr('employeeNumber') and ents[0].getValue('employeeNumber') == description["expected"].encode() + + +def test_ticket49658_14(topo): + """Do + M2: MOD(DEL+ADD) -> V2 + M1: MOD(DEL+ADD) -> V1 + expected: V1 + + :id: d45c58f1-c95e-4314-9cdd-53a2dd391218 + :setup: 3 Master Instances + 1. using user_2 where employNumber=1000 + :steps: + 1. Isolate M1 and M2 by pausing the replication agreements + 2. On M1 do MOD_DEL 1000 + MOD_ADD_13 + 3. On M2 do MOD_REPL _13 + 4. Enable replication agreement M2 -> M3, so that update step 2 is replicated first + 5. Enable replication agreement M1 -> M3, so that update step 3 is replicated second + 6. Check that the employeeNumber is 13 on all servers + :expectedresults: + 1. Fill in the result that is expected + 2. For each test step + """ + + # If you need any test suite initialization, + # please, write additional fixture for that (including finalizer). + # Topology for suites are predefined in lib389/topologies.py. + + # If you need host, port or any other data about instance, + # Please, use the instance object attributes for that (for example, topo.ms["master1"].serverid) + + + + if DEBUGGING: + # Add debugging steps(if any)... + pass + M1 = topo.ms["master1"] + M2 = topo.ms["master2"] + M3 = topo.ms["master3"] + value_1000 = '1000' + last = '14' + value_S1 = '14.1' + value_S2 = '14.2' + + description = { + "S1": M1, + "S2": M2, + "S1_MOD": [(ldap.MOD_DELETE, 'employeeNumber', value_1000.encode()),(ldap.MOD_ADD, 'employeeNumber', value_S1.encode())], + "S2_MOD": [(ldap.MOD_DELETE, 'employeeNumber', value_1000.encode()),(ldap.MOD_ADD, 'employeeNumber', value_S2.encode())], + "expected": value_S1} + + # This test takes the user_1 + (uid, test_user_dn) = _user_get_dn(int(last)) + + # + # Step 4 + # + # disable all RA from M1 and M2 + # only M3 can replicate the update + # + agreement_m1_m2 = M1.agreement.list(suffix=DEFAULT_SUFFIX, consumer_host=M2.host, consumer_port=M2.port) + agreement_m1_m3 = M1.agreement.list(suffix=DEFAULT_SUFFIX, consumer_host=M3.host, consumer_port=M3.port) + agreement_m2_m1 = M2.agreement.list(suffix=DEFAULT_SUFFIX, consumer_host=M1.host, consumer_port=M1.port) + agreement_m2_m3 = M2.agreement.list(suffix=DEFAULT_SUFFIX, consumer_host=M3.host, consumer_port=M3.port) + + M1.agreement.pause(agreement_m1_m2[0].dn) + M1.agreement.pause(agreement_m1_m3[0].dn) + M2.agreement.pause(agreement_m2_m1[0].dn) + M2.agreement.pause(agreement_m2_m3[0].dn) + + # Step 5 + # Oldest update + # check that the entry on M1 contains employeeNumber= + description["S2"].modify_s(test_user_dn, description["S2_MOD"]) + ents = description["S2"].search_s(BASE_REGULAR, ldap.SCOPE_SUBTREE, '(employeeNumber=%s)' % value_S2) + assert len(ents) == 1 + time.sleep(1) + + # Step 6 + # More recent update + # check that the entry on M2 contains employeeNumber= + description["S1"].modify_s(test_user_dn, description["S1_MOD"]) + ents = description["S1"].search_s(BASE_REGULAR, ldap.SCOPE_SUBTREE, '(employeeNumber=%s)' % value_S1) + assert len(ents) == 1 + + #time.sleep(60) + # Step 7 + # Renable M2 before M1 so that on M3, the most recent update is replicated before + for ra in agreement_m2_m1, agreement_m2_m3: + M2.agreement.resume(ra[0].dn) + + # Step 8 + # Renable M1 so that on M3 oldest update is now replicated + time.sleep(4) + for ra in agreement_m1_m2, agreement_m1_m3: + M1.agreement.resume(ra[0].dn) + + # Step 9 + # Check that M1 still contains employeeNumber= + ents = M1.search_s(BASE_REGULAR, ldap.SCOPE_SUBTREE, '(employeeNumber=%s)' % description["expected"]) + log.info('Search M1 employeeNumber=%s (vs. %s)' % (ents[0].getValue('employeeNumber'), description["expected"])) + assert len(ents) == 1 + assert ents[0].hasAttr('employeeNumber') and ents[0].getValue('employeeNumber') == description["expected"].encode() + + # Check that M2 still contains employeeNumber= + ents = M2.search_s(BASE_REGULAR, ldap.SCOPE_SUBTREE, '(employeeNumber=%s)' % description["expected"]) + log.info('Search M2 employeeNumber=%s (vs. %s)' % (ents[0].getValue('employeeNumber'), description["expected"])) + assert len(ents) == 1 + assert ents[0].hasAttr('employeeNumber') and ents[0].getValue('employeeNumber') == description["expected"].encode() + + # Check that M3 still contain employeeNumber and it contains employeeNumber= + ents = M3.search_s(BASE_REGULAR, ldap.SCOPE_SUBTREE, '(employeeNumber=*)') + assert len(ents) == MAX_STANDARD_USER + ents = M3.search_s(BASE_REGULAR, ldap.SCOPE_SUBTREE, '(employeeNumber=%s)' % description["expected"]) + log.info('Search M3 employeeNumber=%s (vs. %s)' % (ents[0].getValue('employeeNumber'), description["expected"])) + assert len(ents) == 1 + assert ents[0].hasAttr('employeeNumber') and ents[0].getValue('employeeNumber') == description["expected"].encode() + +def test_ticket49658_15(topo): + """Do + M2: MOD(ADD+DEL) -> V2 + M1: MOD(DEL+ADD) -> V1 + expected: V1 + + :id: e077f312-e0af-497a-8a31-3395873512d8 + :setup: 3 Master Instances + 1. using user_2 where employNumber=1000 + :steps: + 1. Isolate M1 and M2 by pausing the replication agreements + 2. On M1 do MOD_DEL 1000 + MOD_ADD_13 + 3. On M2 do MOD_REPL _13 + 4. Enable replication agreement M2 -> M3, so that update step 2 is replicated first + 5. Enable replication agreement M1 -> M3, so that update step 3 is replicated second + 6. Check that the employeeNumber is 13 on all servers + :expectedresults: + 1. Fill in the result that is expected + 2. For each test step + """ + + # If you need any test suite initialization, + # please, write additional fixture for that (including finalizer). + # Topology for suites are predefined in lib389/topologies.py. + + # If you need host, port or any other data about instance, + # Please, use the instance object attributes for that (for example, topo.ms["master1"].serverid) + + + + if DEBUGGING: + # Add debugging steps(if any)... + pass + M1 = topo.ms["master1"] + M2 = topo.ms["master2"] + M3 = topo.ms["master3"] + value_1000 = '1000' + last = '15' + value_S1 = '15.1' + value_S2 = '15.2' + + description = { + "S1": M1, + "S2": M2, + "S1_MOD": [(ldap.MOD_DELETE, 'employeeNumber', value_1000.encode()),(ldap.MOD_ADD, 'employeeNumber', value_S1.encode())], + "S2_MOD": [(ldap.MOD_ADD, 'employeeNumber', value_S2.encode()),(ldap.MOD_DELETE, 'employeeNumber', value_1000.encode())], + "expected": value_S1} + + # This test takes the user_1 + (uid, test_user_dn) = _user_get_dn(int(last)) + + # + # Step 4 + # + # disable all RA from M1 and M2 + # only M3 can replicate the update + # + agreement_m1_m2 = M1.agreement.list(suffix=DEFAULT_SUFFIX, consumer_host=M2.host, consumer_port=M2.port) + agreement_m1_m3 = M1.agreement.list(suffix=DEFAULT_SUFFIX, consumer_host=M3.host, consumer_port=M3.port) + agreement_m2_m1 = M2.agreement.list(suffix=DEFAULT_SUFFIX, consumer_host=M1.host, consumer_port=M1.port) + agreement_m2_m3 = M2.agreement.list(suffix=DEFAULT_SUFFIX, consumer_host=M3.host, consumer_port=M3.port) + + M1.agreement.pause(agreement_m1_m2[0].dn) + M1.agreement.pause(agreement_m1_m3[0].dn) + M2.agreement.pause(agreement_m2_m1[0].dn) + M2.agreement.pause(agreement_m2_m3[0].dn) + + # Step 5 + # Oldest update + # check that the entry on M1 contains employeeNumber= + description["S2"].modify_s(test_user_dn, description["S2_MOD"]) + ents = description["S2"].search_s(BASE_REGULAR, ldap.SCOPE_SUBTREE, '(employeeNumber=%s)' % value_S2) + assert len(ents) == 1 + time.sleep(1) + + # Step 6 + # More recent update + # check that the entry on M2 contains employeeNumber= + description["S1"].modify_s(test_user_dn, description["S1_MOD"]) + ents = description["S1"].search_s(BASE_REGULAR, ldap.SCOPE_SUBTREE, '(employeeNumber=%s)' % value_S1) + assert len(ents) == 1 + + #time.sleep(60) + # Step 7 + # Renable M2 before M1 so that on M3, the most recent update is replicated before + for ra in agreement_m2_m1, agreement_m2_m3: + M2.agreement.resume(ra[0].dn) + + # Step 8 + # Renable M1 so that on M3 oldest update is now replicated + time.sleep(4) + for ra in agreement_m1_m2, agreement_m1_m3: + M1.agreement.resume(ra[0].dn) + + # Step 9 + # Check that M1 still contains employeeNumber= + ents = M1.search_s(BASE_REGULAR, ldap.SCOPE_SUBTREE, '(employeeNumber=%s)' % description["expected"]) + log.info('Search M1 employeeNumber=%s (vs. %s)' % (ents[0].getValue('employeeNumber'), description["expected"])) + assert len(ents) == 1 + assert ents[0].hasAttr('employeeNumber') and ents[0].getValue('employeeNumber') == description["expected"].encode() + + # Check that M2 still contains employeeNumber= + ents = M2.search_s(BASE_REGULAR, ldap.SCOPE_SUBTREE, '(employeeNumber=%s)' % description["expected"]) + log.info('Search M2 employeeNumber=%s (vs. %s)' % (ents[0].getValue('employeeNumber'), description["expected"])) + assert len(ents) == 1 + assert ents[0].hasAttr('employeeNumber') and ents[0].getValue('employeeNumber') == description["expected"].encode() + + # Check that M3 still contain employeeNumber and it contains employeeNumber= + ents = M3.search_s(BASE_REGULAR, ldap.SCOPE_SUBTREE, '(employeeNumber=*)') + assert len(ents) == MAX_STANDARD_USER + ents = M3.search_s(BASE_REGULAR, ldap.SCOPE_SUBTREE, '(employeeNumber=%s)' % description["expected"]) + log.info('Search M3 employeeNumber=%s (vs. %s)' % (ents[0].getValue('employeeNumber'), description["expected"])) + assert len(ents) == 1 + assert ents[0].hasAttr('employeeNumber') and ents[0].getValue('employeeNumber') == description["expected"].encode() + + +def _resume_ra_M1_then_M2(M1, M2, M3): + agreement_m1_m2 = M1.agreement.list(suffix=DEFAULT_SUFFIX, consumer_host=M2.host, consumer_port=M2.port) + agreement_m1_m3 = M1.agreement.list(suffix=DEFAULT_SUFFIX, consumer_host=M3.host, consumer_port=M3.port) + agreement_m2_m1 = M2.agreement.list(suffix=DEFAULT_SUFFIX, consumer_host=M1.host, consumer_port=M1.port) + agreement_m2_m3 = M2.agreement.list(suffix=DEFAULT_SUFFIX, consumer_host=M3.host, consumer_port=M3.port) + + for ra in agreement_m1_m2, agreement_m1_m3: + M1.agreement.resume(ra[0].dn) + + time.sleep(4) + for ra in agreement_m2_m1, agreement_m2_m3: + M2.agreement.resume(ra[0].dn) + time.sleep(4) + +def _resume_ra_M2_then_M1(M1, M2, M3): + agreement_m1_m2 = M1.agreement.list(suffix=DEFAULT_SUFFIX, consumer_host=M2.host, consumer_port=M2.port) + agreement_m1_m3 = M1.agreement.list(suffix=DEFAULT_SUFFIX, consumer_host=M3.host, consumer_port=M3.port) + agreement_m2_m1 = M2.agreement.list(suffix=DEFAULT_SUFFIX, consumer_host=M1.host, consumer_port=M1.port) + agreement_m2_m3 = M2.agreement.list(suffix=DEFAULT_SUFFIX, consumer_host=M3.host, consumer_port=M3.port) + + for ra in agreement_m2_m1, agreement_m2_m3: + M2.agreement.resume(ra[0].dn) + + time.sleep(4) + for ra in agreement_m1_m2, agreement_m1_m3: + M1.agreement.resume(ra[0].dn) + time.sleep(4) + + +def test_ticket49658_16(topo): + """Do + M1: MODRDN -> V1 + M2: MODRDN -> V1 + expected: V1 + resume order: M2, M1 + + :id: 131b4e4c-0a6d-45df-88aa-cb26a1cd6fa6 + :setup: 3 Master Instances + 1. Use employeenumber=1000,ou=distinguished,ou=people, + :steps: + 1. Isolate M1 and M2 by pausing the replication agreements + 2. On M1 do DEL+ADD 1000 + MOD_ADD_13 + 3. On M2 do DEL+ADD 1000 + MOD_ADD_13 + 4. Enable replication agreement M2 -> M3, so that update step 2 is replicated first + 5. Enable replication agreement M1 -> M3, so that update step 3 is replicated second + 6. Check that the employeeNumber is 13 on all servers + :expectedresults: + 1. Fill in the result that is expected + 2. For each test step + """ + + # If you need any test suite initialization, + # please, write additional fixture for that (including finalizer). + # Topology for suites are predefined in lib389/topologies.py. + + # If you need host, port or any other data about instance, + # Please, use the instance object attributes for that (for example, topo.ms["master1"].serverid) + + + + if DEBUGGING: + # Add debugging steps(if any)... + pass + M1 = topo.ms["master1"] + M2 = topo.ms["master2"] + M3 = topo.ms["master3"] + value_init = '1' + last = '1' + value_S1 = '1.1' + value_S2 = value_S1 + + description = { + "S1": M1, + "S2": M2, + "S1_MODRDN": value_S1, + "S2_MODRDN": value_S2, + "expected": value_S1} + + # This test takes the user_1 + (uid, test_user_dn) = _employeenumber_user_get_dn(int(last)) + + # + # Step 4 + # + # disable all RA from M1 and M2 + # only M3 can replicate the update + # + agreement_m1_m2 = M1.agreement.list(suffix=DEFAULT_SUFFIX, consumer_host=M2.host, consumer_port=M2.port) + agreement_m1_m3 = M1.agreement.list(suffix=DEFAULT_SUFFIX, consumer_host=M3.host, consumer_port=M3.port) + agreement_m2_m1 = M2.agreement.list(suffix=DEFAULT_SUFFIX, consumer_host=M1.host, consumer_port=M1.port) + agreement_m2_m3 = M2.agreement.list(suffix=DEFAULT_SUFFIX, consumer_host=M3.host, consumer_port=M3.port) + + M1.agreement.pause(agreement_m1_m2[0].dn) + M1.agreement.pause(agreement_m1_m3[0].dn) + M2.agreement.pause(agreement_m2_m1[0].dn) + M2.agreement.pause(agreement_m2_m3[0].dn) + + # Step 5 + # Oldest update + # check that the entry on M1 contains employeeNumber= + description["S1"].rename_s(test_user_dn, 'employeeNumber=%s' % description["S1_MODRDN"], newsuperior=BASE_DISTINGUISHED, delold=1) + ents = description["S1"].search_s(BASE_DISTINGUISHED, ldap.SCOPE_SUBTREE, '(employeeNumber=%s)' % description["S1_MODRDN"]) + assert len(ents) == 1 + time.sleep(1) + + # Step 6 + # More recent update + # check that the entry on M2 contains employeeNumber= + description["S2"].rename_s(test_user_dn, 'employeeNumber=%s' % description["S2_MODRDN"], newsuperior=BASE_DISTINGUISHED, delold=1) + ents = description["S2"].search_s(BASE_DISTINGUISHED, ldap.SCOPE_SUBTREE, '(employeeNumber=%s)' % description["S2_MODRDN"]) + assert len(ents) == 1 + + _resume_ra_M2_then_M1(M1, M2, M3) + + # Step 9 + # Check that M1 still contains employeeNumber= + ents = M1.search_s(BASE_DISTINGUISHED, ldap.SCOPE_SUBTREE, '(employeeNumber=%s)' % description["expected"]) + log.info('Search M1 employeeNumber=%s (vs. %s)' % (ents[0].getValue('employeeNumber'), description["expected"])) + assert len(ents) == 1 + assert ents[0].hasAttr('employeeNumber') and ents[0].getValue('employeeNumber') == description["expected"].encode() + + # Check that M2 still contains employeeNumber= + ents = M2.search_s(BASE_DISTINGUISHED, ldap.SCOPE_SUBTREE, '(employeeNumber=%s)' % description["expected"]) + log.info('Search M2 employeeNumber=%s (vs. %s)' % (ents[0].getValue('employeeNumber'), description["expected"])) + assert len(ents) == 1 + assert ents[0].hasAttr('employeeNumber') and ents[0].getValue('employeeNumber') == description["expected"].encode() + + # Check that M3 still contain employeeNumber and it contains employeeNumber= + ents = M3.search_s(BASE_DISTINGUISHED, ldap.SCOPE_SUBTREE, '(employeeNumber=*)') + assert len(ents) == MAX_EMPLOYEENUMBER_USER + ents = M3.search_s(BASE_DISTINGUISHED, ldap.SCOPE_SUBTREE, '(employeeNumber=%s)' % description["expected"]) + log.info('Search M3 employeeNumber=%s (vs. %s)' % (ents[0].getValue('employeeNumber'), description["expected"])) + assert len(ents) == 1 + assert ents[0].hasAttr('employeeNumber') and ents[0].getValue('employeeNumber') == description["expected"].encode() + + +def test_ticket49658_17(topo): + """Do + M1: MODRDN -> V1 + M2: MODRDN -> V2 + expected: V2 + resume order: M2 then M1 + + :id: 1d3423ec-a2f3-4c03-9765-ec0924f03cb2 + :setup: 3 Master Instances + 1. Use employeenumber=1000,ou=distinguished,ou=people, + :steps: + 1. Isolate M1 and M2 by pausing the replication agreements + 2. On M1 do DEL+ADD 1000 + MOD_ADD_13 + 3. On M2 do DEL+ADD 1000 + MOD_ADD_13 + 4. Enable replication agreement M2 -> M3, so that update step 2 is replicated first + 5. Enable replication agreement M1 -> M3, so that update step 3 is replicated second + 6. Check that the employeeNumber is 13 on all servers + :expectedresults: + 1. Fill in the result that is expected + 2. For each test step + """ + + # If you need any test suite initialization, + # please, write additional fixture for that (including finalizer). + # Topology for suites are predefined in lib389/topologies.py. + + # If you need host, port or any other data about instance, + # Please, use the instance object attributes for that (for example, topo.ms["master1"].serverid) + + + + if DEBUGGING: + # Add debugging steps(if any)... + pass + M1 = topo.ms["master1"] + M2 = topo.ms["master2"] + M3 = topo.ms["master3"] + value_init = '2' + last = '2' + value_S1 = '2.1' + value_S2 = '2.2' + + description = { + "S1": M1, + "S2": M2, + "S1_MODRDN": value_S1, + "S2_MODRDN": value_S2, + "expected": value_S2} + + # This test takes the user_1 + (uid, test_user_dn) = _employeenumber_user_get_dn(int(last)) + + # + # Step 4 + # + # disable all RA from M1 and M2 + # only M3 can replicate the update + # + agreement_m1_m2 = M1.agreement.list(suffix=DEFAULT_SUFFIX, consumer_host=M2.host, consumer_port=M2.port) + agreement_m1_m3 = M1.agreement.list(suffix=DEFAULT_SUFFIX, consumer_host=M3.host, consumer_port=M3.port) + agreement_m2_m1 = M2.agreement.list(suffix=DEFAULT_SUFFIX, consumer_host=M1.host, consumer_port=M1.port) + agreement_m2_m3 = M2.agreement.list(suffix=DEFAULT_SUFFIX, consumer_host=M3.host, consumer_port=M3.port) + + M1.agreement.pause(agreement_m1_m2[0].dn) + M1.agreement.pause(agreement_m1_m3[0].dn) + M2.agreement.pause(agreement_m2_m1[0].dn) + M2.agreement.pause(agreement_m2_m3[0].dn) + + # Step 5 + # Oldest update + # check that the entry on M1 contains employeeNumber= + description["S1"].rename_s(test_user_dn, 'employeeNumber=%s' % description["S1_MODRDN"], newsuperior=BASE_DISTINGUISHED, delold=1) + ents = description["S1"].search_s(BASE_DISTINGUISHED, ldap.SCOPE_SUBTREE, '(employeeNumber=%s)' % description["S1_MODRDN"]) + assert len(ents) == 1 + time.sleep(1) + + # Step 6 + # More recent update + # check that the entry on M2 contains employeeNumber= + description["S2"].rename_s(test_user_dn, 'employeeNumber=%s' % description["S2_MODRDN"], newsuperior=BASE_DISTINGUISHED, delold=1) + ents = description["S2"].search_s(BASE_DISTINGUISHED, ldap.SCOPE_SUBTREE, '(employeeNumber=%s)' % description["S2_MODRDN"]) + assert len(ents) == 1 + + _resume_ra_M2_then_M1(M1, M2, M3) + + # Step 9 + # Check that M1 still contains employeeNumber= + ents = M1.search_s(BASE_DISTINGUISHED, ldap.SCOPE_SUBTREE, '(employeeNumber=%s)' % description["expected"]) + log.info('Search M1 employeeNumber=%s (vs. %s)' % (ents[0].getValue('employeeNumber'), description["expected"])) + assert len(ents) == 1 + assert ents[0].hasAttr('employeeNumber') and ents[0].getValue('employeeNumber') == description["expected"].encode() + + # Check that M2 still contains employeeNumber= + ents = M2.search_s(BASE_DISTINGUISHED, ldap.SCOPE_SUBTREE, '(employeeNumber=%s)' % description["expected"]) + log.info('Search M2 employeeNumber=%s (vs. %s)' % (ents[0].getValue('employeeNumber'), description["expected"])) + assert len(ents) == 1 + assert ents[0].hasAttr('employeeNumber') and ents[0].getValue('employeeNumber') == description["expected"].encode() + + # Check that M3 still contain employeeNumber and it contains employeeNumber= + ents = M3.search_s(BASE_DISTINGUISHED, ldap.SCOPE_SUBTREE, '(employeeNumber=*)') + assert len(ents) == MAX_EMPLOYEENUMBER_USER + ents = M3.search_s(BASE_DISTINGUISHED, ldap.SCOPE_SUBTREE, '(employeeNumber=%s)' % description["expected"]) + log.info('Search M3 employeeNumber=%s (vs. %s)' % (ents[0].getValue('employeeNumber'), description["expected"])) + assert len(ents) == 1 + assert ents[0].hasAttr('employeeNumber') and ents[0].getValue('employeeNumber') == description["expected"].encode() + +def test_ticket49658_18(topo): + """Do + M1: MODRDN -> V1 + M2: MODRDN -> V2 + expected: V2 + resume order: M1 then M2 + + :id: c50ea634-ba35-4943-833b-0524a446214f + :setup: 3 Master Instances + 1. Use employeenumber=1000,ou=distinguished,ou=people, + :steps: + 1. Isolate M1 and M2 by pausing the replication agreements + 2. On M1 do DEL+ADD 1000 + MOD_ADD_13 + 3. On M2 do DEL+ADD 1000 + MOD_ADD_13 + 4. Enable replication agreement M2 -> M3, so that update step 2 is replicated first + 5. Enable replication agreement M1 -> M3, so that update step 3 is replicated second + 6. Check that the employeeNumber is 13 on all servers + :expectedresults: + 1. Fill in the result that is expected + 2. For each test step + """ + + # If you need any test suite initialization, + # please, write additional fixture for that (including finalizer). + # Topology for suites are predefined in lib389/topologies.py. + + # If you need host, port or any other data about instance, + # Please, use the instance object attributes for that (for example, topo.ms["master1"].serverid) + + + + if DEBUGGING: + # Add debugging steps(if any)... + pass + M1 = topo.ms["master1"] + M2 = topo.ms["master2"] + M3 = topo.ms["master3"] + value_init = '2' + last = '3' + value_S1 = '3.1' + value_S2 = '3.2' + + description = { + "S1": M1, + "S2": M2, + "S1_MODRDN": value_S1, + "S2_MODRDN": value_S2, + "expected": value_S2} + + # This test takes the user_1 + (uid, test_user_dn) = _employeenumber_user_get_dn(int(last)) + + # + # Step 4 + # + # disable all RA from M1 and M2 + # only M3 can replicate the update + # + agreement_m1_m2 = M1.agreement.list(suffix=DEFAULT_SUFFIX, consumer_host=M2.host, consumer_port=M2.port) + agreement_m1_m3 = M1.agreement.list(suffix=DEFAULT_SUFFIX, consumer_host=M3.host, consumer_port=M3.port) + agreement_m2_m1 = M2.agreement.list(suffix=DEFAULT_SUFFIX, consumer_host=M1.host, consumer_port=M1.port) + agreement_m2_m3 = M2.agreement.list(suffix=DEFAULT_SUFFIX, consumer_host=M3.host, consumer_port=M3.port) + + M1.agreement.pause(agreement_m1_m2[0].dn) + M1.agreement.pause(agreement_m1_m3[0].dn) + M2.agreement.pause(agreement_m2_m1[0].dn) + M2.agreement.pause(agreement_m2_m3[0].dn) + + # Step 5 + # Oldest update + # check that the entry on M1 contains employeeNumber= + description["S1"].rename_s(test_user_dn, 'employeeNumber=%s' % description["S1_MODRDN"], newsuperior=BASE_DISTINGUISHED, delold=1) + ents = description["S1"].search_s(BASE_DISTINGUISHED, ldap.SCOPE_SUBTREE, '(employeeNumber=%s)' % description["S1_MODRDN"]) + assert len(ents) == 1 + time.sleep(1) + + # Step 6 + # More recent update + # check that the entry on M2 contains employeeNumber= + description["S2"].rename_s(test_user_dn, 'employeeNumber=%s' % description["S2_MODRDN"], newsuperior=BASE_DISTINGUISHED, delold=1) + ents = description["S2"].search_s(BASE_DISTINGUISHED, ldap.SCOPE_SUBTREE, '(employeeNumber=%s)' % description["S2_MODRDN"]) + assert len(ents) == 1 + + _resume_ra_M1_then_M2(M1, M2, M3) + + # Step 9 + # Check that M1 still contains employeeNumber= + ents = M1.search_s(BASE_DISTINGUISHED, ldap.SCOPE_SUBTREE, '(employeeNumber=%s)' % description["expected"]) + log.info('Search M1 employeeNumber=%s (vs. %s)' % (ents[0].getValue('employeeNumber'), description["expected"])) + assert len(ents) == 1 + assert ents[0].hasAttr('employeeNumber') and ents[0].getValue('employeeNumber') == description["expected"].encode() + + # Check that M2 still contains employeeNumber= + ents = M2.search_s(BASE_DISTINGUISHED, ldap.SCOPE_SUBTREE, '(employeeNumber=%s)' % description["expected"]) + log.info('Search M2 employeeNumber=%s (vs. %s)' % (ents[0].getValue('employeeNumber'), description["expected"])) + assert len(ents) == 1 + assert ents[0].hasAttr('employeeNumber') and ents[0].getValue('employeeNumber') == description["expected"].encode() + + # Check that M3 still contain employeeNumber and it contains employeeNumber= + ents = M3.search_s(BASE_DISTINGUISHED, ldap.SCOPE_SUBTREE, '(employeeNumber=*)') + assert len(ents) == MAX_EMPLOYEENUMBER_USER + ents = M3.search_s(BASE_DISTINGUISHED, ldap.SCOPE_SUBTREE, '(employeeNumber=%s)' % description["expected"]) + log.info('Search M3 employeeNumber=%s (vs. %s)' % (ents[0].getValue('employeeNumber'), description["expected"])) + assert len(ents) == 1 + assert ents[0].hasAttr('employeeNumber') and ents[0].getValue('employeeNumber') == description["expected"].encode() + +def test_ticket49658_19(topo): + """Do + M1: MODRDN -> V1 + M2: MODRDN -> V2 + M1: MOD(REPL) -> V1 + Replicate order: M2 then M1 + expected: V1 + + :id: 787db943-fc95-4fbb-b066-5e8895cfd296 + :setup: 3 Master Instances + 1. Use employeenumber=1000,ou=distinguished,ou=people, + :steps: + 1. Isolate M1 and M2 by pausing the replication agreements + 2. On M1 do DEL+ADD 1000 + MOD_ADD_13 + 3. On M2 do DEL+ADD 1000 + MOD_ADD_13 + 4. Enable replication agreement M2 -> M3, so that update step 2 is replicated first + 5. Enable replication agreement M1 -> M3, so that update step 3 is replicated second + 6. Check that the employeeNumber is 13 on all servers + :expectedresults: + 1. Fill in the result that is expected + 2. For each test step + """ + + # If you need any test suite initialization, + # please, write additional fixture for that (including finalizer). + # Topology for suites are predefined in lib389/topologies.py. + + # If you need host, port or any other data about instance, + # Please, use the instance object attributes for that (for example, topo.ms["master1"].serverid) + + + + if DEBUGGING: + # Add debugging steps(if any)... + pass + M1 = topo.ms["master1"] + M2 = topo.ms["master2"] + M3 = topo.ms["master3"] + value_init = '3' + last = '4' + value_S1 = '4.1' + value_S2 = '4.2' + + description = { + "S1": M1, + "S2": M2, + "S1_MODRDN": value_S1, + "S1_MOD": [(ldap.MOD_REPLACE, 'employeeNumber', value_S1.encode())], + "S2_MODRDN": value_S2, + "expected": value_S1} + + # This test takes the user_1 + (uid, test_user_dn) = _employeenumber_user_get_dn(int(last)) + + # + # Step 4 + # + # disable all RA from M1 and M2 + # only M3 can replicate the update + # + agreement_m1_m2 = M1.agreement.list(suffix=DEFAULT_SUFFIX, consumer_host=M2.host, consumer_port=M2.port) + agreement_m1_m3 = M1.agreement.list(suffix=DEFAULT_SUFFIX, consumer_host=M3.host, consumer_port=M3.port) + agreement_m2_m1 = M2.agreement.list(suffix=DEFAULT_SUFFIX, consumer_host=M1.host, consumer_port=M1.port) + agreement_m2_m3 = M2.agreement.list(suffix=DEFAULT_SUFFIX, consumer_host=M3.host, consumer_port=M3.port) + + M1.agreement.pause(agreement_m1_m2[0].dn) + M1.agreement.pause(agreement_m1_m3[0].dn) + M2.agreement.pause(agreement_m2_m1[0].dn) + M2.agreement.pause(agreement_m2_m3[0].dn) + + # Step 5 + # Oldest update + # check that the entry on M1 contains employeeNumber= + description["S1"].rename_s(test_user_dn, 'employeeNumber=%s' % description["S1_MODRDN"], newsuperior=BASE_DISTINGUISHED, delold=1) + ents = description["S1"].search_s(BASE_DISTINGUISHED, ldap.SCOPE_SUBTREE, '(employeeNumber=%s)' % description["S1_MODRDN"]) + assert len(ents) == 1 + time.sleep(1) + + # Step 6 + # More recent update + # check that the entry on M2 contains employeeNumber= + description["S2"].rename_s(test_user_dn, 'employeeNumber=%s' % description["S2_MODRDN"], newsuperior=BASE_DISTINGUISHED, delold=1) + ents = description["S2"].search_s(BASE_DISTINGUISHED, ldap.SCOPE_SUBTREE, '(employeeNumber=%s)' % description["S2_MODRDN"]) + assert len(ents) == 1 + time.sleep(1) + + (no, new_test_user_dn) = _employeenumber_user_get_dn(description["S1_MODRDN"]) + description["S1"].modify_s(new_test_user_dn, description["S1_MOD"]) + ents = description["S1"].search_s(BASE_DISTINGUISHED, ldap.SCOPE_SUBTREE, '(employeeNumber=%s)' % value_S1) + assert len(ents) == 1 + + _resume_ra_M2_then_M1(M1, M2, M3) + + + #time.sleep(3600) + # Step 9 + # Check that M1 still contains employeeNumber= + ents = M1.search_s(BASE_DISTINGUISHED, ldap.SCOPE_SUBTREE, '(employeeNumber=%s)' % description["expected"]) + log.info('Search M1 employeeNumber=%s (vs. %s)' % (ents[0].getValue('employeeNumber'), description["expected"])) + assert len(ents) == 1 + assert ents[0].hasAttr('employeeNumber') and ents[0].getValue('employeeNumber') == description["expected"].encode() + + # Check that M2 still contains employeeNumber= + ents = M2.search_s(BASE_DISTINGUISHED, ldap.SCOPE_SUBTREE, '(employeeNumber=%s)' % description["expected"]) + log.info('Search M2 employeeNumber=%s (vs. %s)' % (ents[0].getValue('employeeNumber'), description["expected"])) + assert len(ents) == 1 + assert ents[0].hasAttr('employeeNumber') and ents[0].getValue('employeeNumber') == description["expected"].encode() + + # Check that M3 still contain employeeNumber and it contains employeeNumber= + ents = M3.search_s(BASE_DISTINGUISHED, ldap.SCOPE_SUBTREE, '(employeeNumber=*)') + assert len(ents) == MAX_EMPLOYEENUMBER_USER + ents = M3.search_s(BASE_DISTINGUISHED, ldap.SCOPE_SUBTREE, '(employeeNumber=%s)' % description["expected"]) + log.info('Search M3 employeeNumber=%s (vs. %s)' % (ents[0].getValue('employeeNumber'), description["expected"])) + assert len(ents) == 1 + assert ents[0].hasAttr('employeeNumber') and ents[0].getValue('employeeNumber') == description["expected"].encode() + +def test_ticket49658_20(topo): + """Do + M1: MODRDN -> V1 + M2: MODRDN -> V2 + M1: MOD(REPL) -> V1 + Replicate order: M1 then M2 + expected: V1 + + :id: a3df2f72-b8b1-4bb8-b0ca-ebd306539c8b + :setup: 3 Master Instances + 1. Use employeenumber=1000,ou=distinguished,ou=people, + :steps: + 1. Isolate M1 and M2 by pausing the replication agreements + 2. On M1 do DEL+ADD 1000 + MOD_ADD_13 + 3. On M2 do DEL+ADD 1000 + MOD_ADD_13 + 4. Enable replication agreement M2 -> M3, so that update step 2 is replicated first + 5. Enable replication agreement M1 -> M3, so that update step 3 is replicated second + 6. Check that the employeeNumber is 13 on all servers + :expectedresults: + 1. Fill in the result that is expected + 2. For each test step + """ + + # If you need any test suite initialization, + # please, write additional fixture for that (including finalizer). + # Topology for suites are predefined in lib389/topologies.py. + + # If you need host, port or any other data about instance, + # Please, use the instance object attributes for that (for example, topo.ms["master1"].serverid) + + + + if DEBUGGING: + # Add debugging steps(if any)... + pass + M1 = topo.ms["master1"] + M2 = topo.ms["master2"] + M3 = topo.ms["master3"] + value_init = '3' + last = '5' + value_S1 = '5.1' + value_S2 = '5.2' + + description = { + "S1": M1, + "S2": M2, + "S1_MODRDN": value_S1, + "S1_MOD": [(ldap.MOD_REPLACE, 'employeeNumber', value_S1.encode())], + "S2_MODRDN": value_S2, + "expected": value_S1} + + # This test takes the user_1 + (uid, test_user_dn) = _employeenumber_user_get_dn(int(last)) + + # + # Step 4 + # + # disable all RA from M1 and M2 + # only M3 can replicate the update + # + agreement_m1_m2 = M1.agreement.list(suffix=DEFAULT_SUFFIX, consumer_host=M2.host, consumer_port=M2.port) + agreement_m1_m3 = M1.agreement.list(suffix=DEFAULT_SUFFIX, consumer_host=M3.host, consumer_port=M3.port) + agreement_m2_m1 = M2.agreement.list(suffix=DEFAULT_SUFFIX, consumer_host=M1.host, consumer_port=M1.port) + agreement_m2_m3 = M2.agreement.list(suffix=DEFAULT_SUFFIX, consumer_host=M3.host, consumer_port=M3.port) + + M1.agreement.pause(agreement_m1_m2[0].dn) + M1.agreement.pause(agreement_m1_m3[0].dn) + M2.agreement.pause(agreement_m2_m1[0].dn) + M2.agreement.pause(agreement_m2_m3[0].dn) + + # Step 5 + # Oldest update + # check that the entry on M1 contains employeeNumber= + description["S1"].rename_s(test_user_dn, 'employeeNumber=%s' % description["S1_MODRDN"], newsuperior=BASE_DISTINGUISHED, delold=1) + ents = description["S1"].search_s(BASE_DISTINGUISHED, ldap.SCOPE_SUBTREE, '(employeeNumber=%s)' % description["S1_MODRDN"]) + assert len(ents) == 1 + time.sleep(1) + + # Step 6 + # More recent update + # check that the entry on M2 contains employeeNumber= + description["S2"].rename_s(test_user_dn, 'employeeNumber=%s' % description["S2_MODRDN"], newsuperior=BASE_DISTINGUISHED, delold=1) + ents = description["S2"].search_s(BASE_DISTINGUISHED, ldap.SCOPE_SUBTREE, '(employeeNumber=%s)' % description["S2_MODRDN"]) + assert len(ents) == 1 + time.sleep(1) + + (no, new_test_user_dn) = _employeenumber_user_get_dn(description["S1_MODRDN"]) + description["S1"].modify_s(new_test_user_dn, description["S1_MOD"]) + ents = description["S1"].search_s(BASE_DISTINGUISHED, ldap.SCOPE_SUBTREE, '(employeeNumber=%s)' % value_S1) + assert len(ents) == 1 + + _resume_ra_M1_then_M2(M1, M2, M3) + + #time.sleep(3600) + # Step 9 + # Check that M1 still contains employeeNumber= + ents = M1.search_s(BASE_DISTINGUISHED, ldap.SCOPE_SUBTREE, '(employeeNumber=%s)' % description["expected"]) + log.info('Search M1 employeeNumber=%s (vs. %s)' % (ents[0].getValue('employeeNumber'), description["expected"])) + assert len(ents) == 1 + assert ents[0].hasAttr('employeeNumber') and ents[0].getValue('employeeNumber') == description["expected"].encode() + + # Check that M2 still contains employeeNumber= + ents = M2.search_s(BASE_DISTINGUISHED, ldap.SCOPE_SUBTREE, '(employeeNumber=%s)' % description["expected"]) + log.info('Search M2 employeeNumber=%s (vs. %s)' % (ents[0].getValue('employeeNumber'), description["expected"])) + assert len(ents) == 1 + assert ents[0].hasAttr('employeeNumber') and ents[0].getValue('employeeNumber') == description["expected"].encode() + + # Check that M3 still contain employeeNumber and it contains employeeNumber= + ents = M3.search_s(BASE_DISTINGUISHED, ldap.SCOPE_SUBTREE, '(employeeNumber=*)') + assert len(ents) == MAX_EMPLOYEENUMBER_USER + ents = M3.search_s(BASE_DISTINGUISHED, ldap.SCOPE_SUBTREE, '(employeeNumber=%s)' % description["expected"]) + log.info('Search M3 employeeNumber=%s (vs. %s)' % (ents[0].getValue('employeeNumber'), description["expected"])) + assert len(ents) == 1 + assert ents[0].hasAttr('employeeNumber') and ents[0].getValue('employeeNumber') == description["expected"].encode() + +def test_ticket49658_21(topo): + """Do + M1: MODRDN -> V1 + M2: MODRDN -> V2 + M1: MOD(DEL/ADD) -> V1 + Replicate order: M2 then M1 + expected: V1 + + :id: f338188c-6877-4a2e-bbb1-14b81ac7668a + :setup: 3 Master Instances + 1. Use employeenumber=1000,ou=distinguished,ou=people, + :steps: + 1. Isolate M1 and M2 by pausing the replication agreements + 2. On M1 do DEL+ADD 1000 + MOD_ADD_13 + 3. On M2 do DEL+ADD 1000 + MOD_ADD_13 + 4. Enable replication agreement M2 -> M3, so that update step 2 is replicated first + 5. Enable replication agreement M1 -> M3, so that update step 3 is replicated second + 6. Check that the employeeNumber is 13 on all servers + :expectedresults: + 1. Fill in the result that is expected + 2. For each test step + """ + + # If you need any test suite initialization, + # please, write additional fixture for that (including finalizer). + # Topology for suites are predefined in lib389/topologies.py. + + # If you need host, port or any other data about instance, + # Please, use the instance object attributes for that (for example, topo.ms["master1"].serverid) + + + + if DEBUGGING: + # Add debugging steps(if any)... + pass + M1 = topo.ms["master1"] + M2 = topo.ms["master2"] + M3 = topo.ms["master3"] + value_init = '3' + last = '6' + value_S1 = '6.1' + value_S2 = '6.2' + + description = { + "S1": M1, + "S2": M2, + "S1_MODRDN": value_S1, + "S1_MOD": [(ldap.MOD_DELETE, 'employeeNumber', value_S1.encode()),(ldap.MOD_ADD, 'employeeNumber', value_S1.encode())], + "S2_MODRDN": value_S2, + "expected": value_S1} + + # This test takes the user_1 + (uid, test_user_dn) = _employeenumber_user_get_dn(int(last)) + + # + # Step 4 + # + # disable all RA from M1 and M2 + # only M3 can replicate the update + # + agreement_m1_m2 = M1.agreement.list(suffix=DEFAULT_SUFFIX, consumer_host=M2.host, consumer_port=M2.port) + agreement_m1_m3 = M1.agreement.list(suffix=DEFAULT_SUFFIX, consumer_host=M3.host, consumer_port=M3.port) + agreement_m2_m1 = M2.agreement.list(suffix=DEFAULT_SUFFIX, consumer_host=M1.host, consumer_port=M1.port) + agreement_m2_m3 = M2.agreement.list(suffix=DEFAULT_SUFFIX, consumer_host=M3.host, consumer_port=M3.port) + + M1.agreement.pause(agreement_m1_m2[0].dn) + M1.agreement.pause(agreement_m1_m3[0].dn) + M2.agreement.pause(agreement_m2_m1[0].dn) + M2.agreement.pause(agreement_m2_m3[0].dn) + + # Step 5 + # Oldest update + # check that the entry on M1 contains employeeNumber= + description["S1"].rename_s(test_user_dn, 'employeeNumber=%s' % description["S1_MODRDN"], newsuperior=BASE_DISTINGUISHED, delold=1) + ents = description["S1"].search_s(BASE_DISTINGUISHED, ldap.SCOPE_SUBTREE, '(employeeNumber=%s)' % description["S1_MODRDN"]) + assert len(ents) == 1 + time.sleep(1) + + # Step 6 + # More recent update + # check that the entry on M2 contains employeeNumber= + description["S2"].rename_s(test_user_dn, 'employeeNumber=%s' % description["S2_MODRDN"], newsuperior=BASE_DISTINGUISHED, delold=1) + ents = description["S2"].search_s(BASE_DISTINGUISHED, ldap.SCOPE_SUBTREE, '(employeeNumber=%s)' % description["S2_MODRDN"]) + assert len(ents) == 1 + time.sleep(1) + + (no, new_test_user_dn) = _employeenumber_user_get_dn(description["S1_MODRDN"]) + description["S1"].modify_s(new_test_user_dn, description["S1_MOD"]) + ents = description["S1"].search_s(BASE_DISTINGUISHED, ldap.SCOPE_SUBTREE, '(employeeNumber=%s)' % value_S1) + assert len(ents) == 1 + + _resume_ra_M2_then_M1(M1, M2, M3) + + #time.sleep(3600) + # Step 9 + # Check that M1 still contains employeeNumber= + ents = M1.search_s(BASE_DISTINGUISHED, ldap.SCOPE_SUBTREE, '(employeeNumber=%s)' % description["expected"]) + log.info('Search M1 employeeNumber=%s (vs. %s)' % (ents[0].getValue('employeeNumber'), description["expected"])) + assert len(ents) == 1 + assert ents[0].hasAttr('employeeNumber') and ents[0].getValue('employeeNumber') == description["expected"].encode() + + # Check that M2 still contains employeeNumber= + ents = M2.search_s(BASE_DISTINGUISHED, ldap.SCOPE_SUBTREE, '(employeeNumber=%s)' % description["expected"]) + log.info('Search M2 employeeNumber=%s (vs. %s)' % (ents[0].getValue('employeeNumber'), description["expected"])) + assert len(ents) == 1 + assert ents[0].hasAttr('employeeNumber') and ents[0].getValue('employeeNumber') == description["expected"].encode() + + # Check that M3 still contain employeeNumber and it contains employeeNumber= + ents = M3.search_s(BASE_DISTINGUISHED, ldap.SCOPE_SUBTREE, '(employeeNumber=*)') + assert len(ents) == MAX_EMPLOYEENUMBER_USER + ents = M3.search_s(BASE_DISTINGUISHED, ldap.SCOPE_SUBTREE, '(employeeNumber=%s)' % description["expected"]) + log.info('Search M3 employeeNumber=%s (vs. %s)' % (ents[0].getValue('employeeNumber'), description["expected"])) + assert len(ents) == 1 + assert ents[0].hasAttr('employeeNumber') and ents[0].getValue('employeeNumber') == description["expected"].encode() + +def test_ticket49658_22(topo): + """Do + M1: MODRDN -> V1 + M2: MODRDN -> V2 + M1: MOD(DEL/ADD) -> V1 + Replicate: M1 then M2 + expected: V1 + + :id: f3b33f52-d5c7-4b49-89cf-3cbe4b060674 + :setup: 3 Master Instances + 1. Use employeenumber=1000,ou=distinguished,ou=people, + :steps: + 1. Isolate M1 and M2 by pausing the replication agreements + 2. On M1 do DEL+ADD 1000 + MOD_ADD_13 + 3. On M2 do DEL+ADD 1000 + MOD_ADD_13 + 4. Enable replication agreement M2 -> M3, so that update step 2 is replicated first + 5. Enable replication agreement M1 -> M3, so that update step 3 is replicated second + 6. Check that the employeeNumber is 13 on all servers + :expectedresults: + 1. Fill in the result that is expected + 2. For each test step + """ + + # If you need any test suite initialization, + # please, write additional fixture for that (including finalizer). + # Topology for suites are predefined in lib389/topologies.py. + + # If you need host, port or any other data about instance, + # Please, use the instance object attributes for that (for example, topo.ms["master1"].serverid) + + + + if DEBUGGING: + # Add debugging steps(if any)... + pass + M1 = topo.ms["master1"] + M2 = topo.ms["master2"] + M3 = topo.ms["master3"] + value_init = '3' + last = '7' + value_S1 = '7.1' + value_S2 = '7.2' + + description = { + "S1": M1, + "S2": M2, + "S1_MODRDN": value_S1, + "S1_MOD": [(ldap.MOD_DELETE, 'employeeNumber', value_S1.encode()),(ldap.MOD_ADD, 'employeeNumber', value_S1.encode())], + "S2_MODRDN": value_S2, + "expected": value_S1} + + # This test takes the user_1 + (uid, test_user_dn) = _employeenumber_user_get_dn(int(last)) + + # + # Step 4 + # + # disable all RA from M1 and M2 + # only M3 can replicate the update + # + agreement_m1_m2 = M1.agreement.list(suffix=DEFAULT_SUFFIX, consumer_host=M2.host, consumer_port=M2.port) + agreement_m1_m3 = M1.agreement.list(suffix=DEFAULT_SUFFIX, consumer_host=M3.host, consumer_port=M3.port) + agreement_m2_m1 = M2.agreement.list(suffix=DEFAULT_SUFFIX, consumer_host=M1.host, consumer_port=M1.port) + agreement_m2_m3 = M2.agreement.list(suffix=DEFAULT_SUFFIX, consumer_host=M3.host, consumer_port=M3.port) + + M1.agreement.pause(agreement_m1_m2[0].dn) + M1.agreement.pause(agreement_m1_m3[0].dn) + M2.agreement.pause(agreement_m2_m1[0].dn) + M2.agreement.pause(agreement_m2_m3[0].dn) + + # Step 5 + # Oldest update + # check that the entry on M1 contains employeeNumber= + description["S1"].rename_s(test_user_dn, 'employeeNumber=%s' % description["S1_MODRDN"], newsuperior=BASE_DISTINGUISHED, delold=1) + ents = description["S1"].search_s(BASE_DISTINGUISHED, ldap.SCOPE_SUBTREE, '(employeeNumber=%s)' % description["S1_MODRDN"]) + assert len(ents) == 1 + time.sleep(1) + + # Step 6 + # More recent update + # check that the entry on M2 contains employeeNumber= + description["S2"].rename_s(test_user_dn, 'employeeNumber=%s' % description["S2_MODRDN"], newsuperior=BASE_DISTINGUISHED, delold=1) + ents = description["S2"].search_s(BASE_DISTINGUISHED, ldap.SCOPE_SUBTREE, '(employeeNumber=%s)' % description["S2_MODRDN"]) + assert len(ents) == 1 + time.sleep(1) + + (no, new_test_user_dn) = _employeenumber_user_get_dn(description["S1_MODRDN"]) + description["S1"].modify_s(new_test_user_dn, description["S1_MOD"]) + ents = description["S1"].search_s(BASE_DISTINGUISHED, ldap.SCOPE_SUBTREE, '(employeeNumber=%s)' % value_S1) + assert len(ents) == 1 + + _resume_ra_M1_then_M2(M1, M2, M3) + + #time.sleep(3600) + # Step 9 + # Check that M1 still contains employeeNumber= + ents = M1.search_s(BASE_DISTINGUISHED, ldap.SCOPE_SUBTREE, '(employeeNumber=%s)' % description["expected"]) + log.info('Search M1 employeeNumber=%s (vs. %s)' % (ents[0].getValue('employeeNumber'), description["expected"])) + assert len(ents) == 1 + assert ents[0].hasAttr('employeeNumber') and ents[0].getValue('employeeNumber') == description["expected"].encode() + + # Check that M2 still contains employeeNumber= + ents = M2.search_s(BASE_DISTINGUISHED, ldap.SCOPE_SUBTREE, '(employeeNumber=%s)' % description["expected"]) + log.info('Search M2 employeeNumber=%s (vs. %s)' % (ents[0].getValue('employeeNumber'), description["expected"])) + assert len(ents) == 1 + assert ents[0].hasAttr('employeeNumber') and ents[0].getValue('employeeNumber') == description["expected"].encode() + + # Check that M3 still contain employeeNumber and it contains employeeNumber= + ents = M3.search_s(BASE_DISTINGUISHED, ldap.SCOPE_SUBTREE, '(employeeNumber=*)') + assert len(ents) == MAX_EMPLOYEENUMBER_USER + ents = M3.search_s(BASE_DISTINGUISHED, ldap.SCOPE_SUBTREE, '(employeeNumber=%s)' % description["expected"]) + log.info('Search M3 employeeNumber=%s (vs. %s)' % (ents[0].getValue('employeeNumber'), description["expected"])) + assert len(ents) == 1 + assert ents[0].hasAttr('employeeNumber') and ents[0].getValue('employeeNumber') == description["expected"].encode() + +def test_ticket49658_23(topo): + """Do + M1: MODRDN -> V1 + M2: MODRDN -> V2 + M1: MOD(REPL) -> V1 + M2: MOD(REPL) -> V2 + Replicate order: M2 then M1 + expected: V2 + + :id: 2c550174-33a0-4666-8abf-f3362e19ae29 + :setup: 3 Master Instances + 1. Use employeenumber=1000,ou=distinguished,ou=people, + :steps: + 1. Isolate M1 and M2 by pausing the replication agreements + 2. On M1 do DEL+ADD 1000 + MOD_ADD_13 + 3. On M2 do DEL+ADD 1000 + MOD_ADD_13 + 4. Enable replication agreement M2 -> M3, so that update step 2 is replicated first + 5. Enable replication agreement M1 -> M3, so that update step 3 is replicated second + 6. Check that the employeeNumber is 13 on all servers + :expectedresults: + 1. Fill in the result that is expected + 2. For each test step + """ + + # If you need any test suite initialization, + # please, write additional fixture for that (including finalizer). + # Topology for suites are predefined in lib389/topologies.py. + + # If you need host, port or any other data about instance, + # Please, use the instance object attributes for that (for example, topo.ms["master1"].serverid) + + + + if DEBUGGING: + # Add debugging steps(if any)... + pass + M1 = topo.ms["master1"] + M2 = topo.ms["master2"] + M3 = topo.ms["master3"] + value_init = '7' + last = '8' + value_S1 = '8.1' + value_S2 = '8.2' + + description = { + "S1": M1, + "S2": M2, + "S1_MODRDN": value_S1, + "S1_MOD": [(ldap.MOD_REPLACE, 'employeeNumber', value_S1.encode())], + "S2_MOD": [(ldap.MOD_REPLACE, 'employeeNumber', value_S2.encode())], + "S2_MODRDN": value_S2, + "expected": value_S2} + + # This test takes the user_1 + (uid, test_user_dn) = _employeenumber_user_get_dn(int(last)) + + # + # Step 4 + # + # disable all RA from M1 and M2 + # only M3 can replicate the update + # + agreement_m1_m2 = M1.agreement.list(suffix=DEFAULT_SUFFIX, consumer_host=M2.host, consumer_port=M2.port) + agreement_m1_m3 = M1.agreement.list(suffix=DEFAULT_SUFFIX, consumer_host=M3.host, consumer_port=M3.port) + agreement_m2_m1 = M2.agreement.list(suffix=DEFAULT_SUFFIX, consumer_host=M1.host, consumer_port=M1.port) + agreement_m2_m3 = M2.agreement.list(suffix=DEFAULT_SUFFIX, consumer_host=M3.host, consumer_port=M3.port) + + M1.agreement.pause(agreement_m1_m2[0].dn) + M1.agreement.pause(agreement_m1_m3[0].dn) + M2.agreement.pause(agreement_m2_m1[0].dn) + M2.agreement.pause(agreement_m2_m3[0].dn) + + # Step 5 + # Oldest update + # check that the entry on M1 contains employeeNumber= + description["S1"].rename_s(test_user_dn, 'employeeNumber=%s' % description["S1_MODRDN"], newsuperior=BASE_DISTINGUISHED, delold=1) + ents = description["S1"].search_s(BASE_DISTINGUISHED, ldap.SCOPE_SUBTREE, '(employeeNumber=%s)' % description["S1_MODRDN"]) + assert len(ents) == 1 + time.sleep(1) + + # Step 6 + # More recent update + # check that the entry on M2 contains employeeNumber= + description["S2"].rename_s(test_user_dn, 'employeeNumber=%s' % description["S2_MODRDN"], newsuperior=BASE_DISTINGUISHED, delold=1) + ents = description["S2"].search_s(BASE_DISTINGUISHED, ldap.SCOPE_SUBTREE, '(employeeNumber=%s)' % description["S2_MODRDN"]) + assert len(ents) == 1 + time.sleep(1) + + (no, new_test_user_dn) = _employeenumber_user_get_dn(description["S1_MODRDN"]) + description["S1"].modify_s(new_test_user_dn, description["S1_MOD"]) + ents = description["S1"].search_s(BASE_DISTINGUISHED, ldap.SCOPE_SUBTREE, '(employeeNumber=%s)' % value_S1) + assert len(ents) == 1 + time.sleep(1) + + (no, new_test_user_dn) = _employeenumber_user_get_dn(description["S2_MODRDN"]) + description["S2"].modify_s(new_test_user_dn, description["S2_MOD"]) + ents = description["S2"].search_s(BASE_DISTINGUISHED, ldap.SCOPE_SUBTREE, '(employeeNumber=%s)' % value_S2) + assert len(ents) == 1 + + _resume_ra_M2_then_M1(M1, M2, M3) + + #time.sleep(3600) + # Step 9 + # Check that M1 still contains employeeNumber= + ents = M1.search_s(BASE_DISTINGUISHED, ldap.SCOPE_SUBTREE, '(employeeNumber=%s)' % description["expected"]) + log.info('Search M1 employeeNumber=%s (vs. %s)' % (ents[0].getValue('employeeNumber'), description["expected"])) + assert len(ents) == 1 + assert ents[0].hasAttr('employeeNumber') and ents[0].getValue('employeeNumber') == description["expected"].encode() + + # Check that M2 still contains employeeNumber= + ents = M2.search_s(BASE_DISTINGUISHED, ldap.SCOPE_SUBTREE, '(employeeNumber=%s)' % description["expected"]) + log.info('Search M2 employeeNumber=%s (vs. %s)' % (ents[0].getValue('employeeNumber'), description["expected"])) + assert len(ents) == 1 + assert ents[0].hasAttr('employeeNumber') and ents[0].getValue('employeeNumber') == description["expected"].encode() + + # Check that M3 still contain employeeNumber and it contains employeeNumber= + ents = M3.search_s(BASE_DISTINGUISHED, ldap.SCOPE_SUBTREE, '(employeeNumber=*)') + assert len(ents) == MAX_EMPLOYEENUMBER_USER + ents = M3.search_s(BASE_DISTINGUISHED, ldap.SCOPE_SUBTREE, '(employeeNumber=%s)' % description["expected"]) + log.info('Search M3 employeeNumber=%s (vs. %s)' % (ents[0].getValue('employeeNumber'), description["expected"])) + assert len(ents) == 1 + assert ents[0].hasAttr('employeeNumber') and ents[0].getValue('employeeNumber') == description["expected"].encode() + +def test_ticket49658_24(topo): + """Do + M1: MODRDN -> V1 + M2: MODRDN -> V2 + M1: MOD(REPL) -> V1 + M2: MOD(REPL) -> V2 + Replicate order: M1 then M2 + expected: V2 + + :id: af6a472c-29e3-4833-a5dc-d96c684d33f9 + :setup: 3 Master Instances + 1. Use employeenumber=1000,ou=distinguished,ou=people, + :steps: + 1. Isolate M1 and M2 by pausing the replication agreements + 2. On M1 do DEL+ADD 1000 + MOD_ADD_13 + 3. On M2 do DEL+ADD 1000 + MOD_ADD_13 + 4. Enable replication agreement M2 -> M3, so that update step 2 is replicated first + 5. Enable replication agreement M1 -> M3, so that update step 3 is replicated second + 6. Check that the employeeNumber is 13 on all servers + :expectedresults: + 1. Fill in the result that is expected + 2. For each test step + """ + + # If you need any test suite initialization, + # please, write additional fixture for that (including finalizer). + # Topology for suites are predefined in lib389/topologies.py. + + # If you need host, port or any other data about instance, + # Please, use the instance object attributes for that (for example, topo.ms["master1"].serverid) + + + + if DEBUGGING: + # Add debugging steps(if any)... + pass + M1 = topo.ms["master1"] + M2 = topo.ms["master2"] + M3 = topo.ms["master3"] + value_init = '7' + last = '9' + value_S1 = '9.1' + value_S2 = '9.2' + + description = { + "S1": M1, + "S2": M2, + "S1_MODRDN": value_S1, + "S1_MOD": [(ldap.MOD_REPLACE, 'employeeNumber', value_S1.encode())], + "S2_MOD": [(ldap.MOD_REPLACE, 'employeeNumber', value_S2.encode())], + "S2_MODRDN": value_S2, + "expected": value_S2} + + # This test takes the user_1 + (uid, test_user_dn) = _employeenumber_user_get_dn(int(last)) + + # + # Step 4 + # + # disable all RA from M1 and M2 + # only M3 can replicate the update + # + agreement_m1_m2 = M1.agreement.list(suffix=DEFAULT_SUFFIX, consumer_host=M2.host, consumer_port=M2.port) + agreement_m1_m3 = M1.agreement.list(suffix=DEFAULT_SUFFIX, consumer_host=M3.host, consumer_port=M3.port) + agreement_m2_m1 = M2.agreement.list(suffix=DEFAULT_SUFFIX, consumer_host=M1.host, consumer_port=M1.port) + agreement_m2_m3 = M2.agreement.list(suffix=DEFAULT_SUFFIX, consumer_host=M3.host, consumer_port=M3.port) + + M1.agreement.pause(agreement_m1_m2[0].dn) + M1.agreement.pause(agreement_m1_m3[0].dn) + M2.agreement.pause(agreement_m2_m1[0].dn) + M2.agreement.pause(agreement_m2_m3[0].dn) + + # Step 5 + # Oldest update + # check that the entry on M1 contains employeeNumber= + description["S1"].rename_s(test_user_dn, 'employeeNumber=%s' % description["S1_MODRDN"], newsuperior=BASE_DISTINGUISHED, delold=1) + ents = description["S1"].search_s(BASE_DISTINGUISHED, ldap.SCOPE_SUBTREE, '(employeeNumber=%s)' % description["S1_MODRDN"]) + assert len(ents) == 1 + time.sleep(1) + + # Step 6 + # More recent update + # check that the entry on M2 contains employeeNumber= + description["S2"].rename_s(test_user_dn, 'employeeNumber=%s' % description["S2_MODRDN"], newsuperior=BASE_DISTINGUISHED, delold=1) + ents = description["S2"].search_s(BASE_DISTINGUISHED, ldap.SCOPE_SUBTREE, '(employeeNumber=%s)' % description["S2_MODRDN"]) + assert len(ents) == 1 + time.sleep(1) + + (no, new_test_user_dn) = _employeenumber_user_get_dn(description["S1_MODRDN"]) + description["S1"].modify_s(new_test_user_dn, description["S1_MOD"]) + ents = description["S1"].search_s(BASE_DISTINGUISHED, ldap.SCOPE_SUBTREE, '(employeeNumber=%s)' % value_S1) + assert len(ents) == 1 + time.sleep(1) + + (no, new_test_user_dn) = _employeenumber_user_get_dn(description["S2_MODRDN"]) + description["S2"].modify_s(new_test_user_dn, description["S2_MOD"]) + ents = description["S2"].search_s(BASE_DISTINGUISHED, ldap.SCOPE_SUBTREE, '(employeeNumber=%s)' % value_S2) + assert len(ents) == 1 + + _resume_ra_M1_then_M2(M1, M2, M3) + + #time.sleep(3600) + # Step 9 + # Check that M1 still contains employeeNumber= + ents = M1.search_s(BASE_DISTINGUISHED, ldap.SCOPE_SUBTREE, '(employeeNumber=%s)' % description["expected"]) + log.info('Search M1 employeeNumber=%s (vs. %s)' % (ents[0].getValue('employeeNumber'), description["expected"])) + assert len(ents) == 1 + assert ents[0].hasAttr('employeeNumber') and ents[0].getValue('employeeNumber') == description["expected"].encode() + + # Check that M2 still contains employeeNumber= + ents = M2.search_s(BASE_DISTINGUISHED, ldap.SCOPE_SUBTREE, '(employeeNumber=%s)' % description["expected"]) + log.info('Search M2 employeeNumber=%s (vs. %s)' % (ents[0].getValue('employeeNumber'), description["expected"])) + assert len(ents) == 1 + assert ents[0].hasAttr('employeeNumber') and ents[0].getValue('employeeNumber') == description["expected"].encode() + + # Check that M3 still contain employeeNumber and it contains employeeNumber= + ents = M3.search_s(BASE_DISTINGUISHED, ldap.SCOPE_SUBTREE, '(employeeNumber=*)') + assert len(ents) == MAX_EMPLOYEENUMBER_USER + ents = M3.search_s(BASE_DISTINGUISHED, ldap.SCOPE_SUBTREE, '(employeeNumber=%s)' % description["expected"]) + log.info('Search M3 employeeNumber=%s (vs. %s)' % (ents[0].getValue('employeeNumber'), description["expected"])) + assert len(ents) == 1 + assert ents[0].hasAttr('employeeNumber') and ents[0].getValue('employeeNumber') == description["expected"].encode() + +def test_ticket49658_25(topo): + """Do + M1: MODRDN -> V1 + M2: MODRDN -> V2 + M1: MOD(REPL) -> V1 + M2: MOD(DEL/ADD) -> V2 + Replicate order: M1 then M2 + expected: V2 + + :id: df2cba7c-7afa-44b3-b1df-261e8bf0c9b4 + :setup: 3 Master Instances + 1. Use employeenumber=1000,ou=distinguished,ou=people, + :steps: + 1. Isolate M1 and M2 by pausing the replication agreements + 2. On M1 do DEL+ADD 1000 + MOD_ADD_13 + 3. On M2 do DEL+ADD 1000 + MOD_ADD_13 + 4. Enable replication agreement M2 -> M3, so that update step 2 is replicated first + 5. Enable replication agreement M1 -> M3, so that update step 3 is replicated second + 6. Check that the employeeNumber is 13 on all servers + :expectedresults: + 1. Fill in the result that is expected + 2. For each test step + """ + + # If you need any test suite initialization, + # please, write additional fixture for that (including finalizer). + # Topology for suites are predefined in lib389/topologies.py. + + # If you need host, port or any other data about instance, + # Please, use the instance object attributes for that (for example, topo.ms["master1"].serverid) + + + + if DEBUGGING: + # Add debugging steps(if any)... + pass + M1 = topo.ms["master1"] + M2 = topo.ms["master2"] + M3 = topo.ms["master3"] + value_init = '7' + last = '10' + value_S1 = '10.1' + value_S2 = '10.2' + + description = { + "S1": M1, + "S2": M2, + "S1_MODRDN": value_S1, + "S1_MOD": [(ldap.MOD_REPLACE, 'employeeNumber', value_S1.encode())], + "S2_MOD": [(ldap.MOD_DELETE, 'employeeNumber', value_S2.encode()),(ldap.MOD_ADD, 'employeeNumber', value_S2.encode())], + "S2_MODRDN": value_S2, + "expected": value_S2} + + # This test takes the user_1 + (uid, test_user_dn) = _employeenumber_user_get_dn(int(last)) + + # + # Step 4 + # + # disable all RA from M1 and M2 + # only M3 can replicate the update + # + agreement_m1_m2 = M1.agreement.list(suffix=DEFAULT_SUFFIX, consumer_host=M2.host, consumer_port=M2.port) + agreement_m1_m3 = M1.agreement.list(suffix=DEFAULT_SUFFIX, consumer_host=M3.host, consumer_port=M3.port) + agreement_m2_m1 = M2.agreement.list(suffix=DEFAULT_SUFFIX, consumer_host=M1.host, consumer_port=M1.port) + agreement_m2_m3 = M2.agreement.list(suffix=DEFAULT_SUFFIX, consumer_host=M3.host, consumer_port=M3.port) + + M1.agreement.pause(agreement_m1_m2[0].dn) + M1.agreement.pause(agreement_m1_m3[0].dn) + M2.agreement.pause(agreement_m2_m1[0].dn) + M2.agreement.pause(agreement_m2_m3[0].dn) + + # Step 5 + # Oldest update + # check that the entry on M1 contains employeeNumber= + description["S1"].rename_s(test_user_dn, 'employeeNumber=%s' % description["S1_MODRDN"], newsuperior=BASE_DISTINGUISHED, delold=1) + ents = description["S1"].search_s(BASE_DISTINGUISHED, ldap.SCOPE_SUBTREE, '(employeeNumber=%s)' % description["S1_MODRDN"]) + assert len(ents) == 1 + time.sleep(1) + + # Step 6 + # More recent update + # check that the entry on M2 contains employeeNumber= + description["S2"].rename_s(test_user_dn, 'employeeNumber=%s' % description["S2_MODRDN"], newsuperior=BASE_DISTINGUISHED, delold=1) + ents = description["S2"].search_s(BASE_DISTINGUISHED, ldap.SCOPE_SUBTREE, '(employeeNumber=%s)' % description["S2_MODRDN"]) + assert len(ents) == 1 + time.sleep(1) + + (no, new_test_user_dn) = _employeenumber_user_get_dn(description["S1_MODRDN"]) + description["S1"].modify_s(new_test_user_dn, description["S1_MOD"]) + ents = description["S1"].search_s(BASE_DISTINGUISHED, ldap.SCOPE_SUBTREE, '(employeeNumber=%s)' % value_S1) + assert len(ents) == 1 + time.sleep(1) + + (no, new_test_user_dn) = _employeenumber_user_get_dn(description["S2_MODRDN"]) + description["S2"].modify_s(new_test_user_dn, description["S2_MOD"]) + ents = description["S2"].search_s(BASE_DISTINGUISHED, ldap.SCOPE_SUBTREE, '(employeeNumber=%s)' % value_S2) + assert len(ents) == 1 + + _resume_ra_M1_then_M2(M1, M2, M3) + + #time.sleep(3600) + # Step 9 + # Check that M1 still contains employeeNumber= + ents = M1.search_s(BASE_DISTINGUISHED, ldap.SCOPE_SUBTREE, '(employeeNumber=%s)' % description["expected"]) + log.info('Search M1 employeeNumber=%s (vs. %s)' % (ents[0].getValue('employeeNumber'), description["expected"])) + assert len(ents) == 1 + assert ents[0].hasAttr('employeeNumber') and ents[0].getValue('employeeNumber') == description["expected"].encode() + + # Check that M2 still contains employeeNumber= + ents = M2.search_s(BASE_DISTINGUISHED, ldap.SCOPE_SUBTREE, '(employeeNumber=%s)' % description["expected"]) + log.info('Search M2 employeeNumber=%s (vs. %s)' % (ents[0].getValue('employeeNumber'), description["expected"])) + assert len(ents) == 1 + assert ents[0].hasAttr('employeeNumber') and ents[0].getValue('employeeNumber') == description["expected"].encode() + + # Check that M3 still contain employeeNumber and it contains employeeNumber= + ents = M3.search_s(BASE_DISTINGUISHED, ldap.SCOPE_SUBTREE, '(employeeNumber=*)') + assert len(ents) == MAX_EMPLOYEENUMBER_USER + ents = M3.search_s(BASE_DISTINGUISHED, ldap.SCOPE_SUBTREE, '(employeeNumber=%s)' % description["expected"]) + log.info('Search M3 employeeNumber=%s (vs. %s)' % (ents[0].getValue('employeeNumber'), description["expected"])) + assert len(ents) == 1 + assert ents[0].hasAttr('employeeNumber') and ents[0].getValue('employeeNumber') == description["expected"].encode() + +def test_ticket49658_26(topo): + """Do + M1: MODRDN -> V1 + M2: MODRDN -> V2 + M1: MOD(REPL) -> V1 + M2: MOD(DEL/ADD) -> V2 + Replicate order: M2 then M1 + expected: V2 + + :id: 8e9f85d3-22cc-4a84-a828-cec29202821f + :setup: 3 Master Instances + 1. Use employeenumber=1000,ou=distinguished,ou=people, + :steps: + 1. Isolate M1 and M2 by pausing the replication agreements + 2. On M1 do DEL+ADD 1000 + MOD_ADD_13 + 3. On M2 do DEL+ADD 1000 + MOD_ADD_13 + 4. Enable replication agreement M2 -> M3, so that update step 2 is replicated first + 5. Enable replication agreement M1 -> M3, so that update step 3 is replicated second + 6. Check that the employeeNumber is 13 on all servers + :expectedresults: + 1. Fill in the result that is expected + 2. For each test step + """ + + # If you need any test suite initialization, + # please, write additional fixture for that (including finalizer). + # Topology for suites are predefined in lib389/topologies.py. + + # If you need host, port or any other data about instance, + # Please, use the instance object attributes for that (for example, topo.ms["master1"].serverid) + + + + if DEBUGGING: + # Add debugging steps(if any)... + pass + M1 = topo.ms["master1"] + M2 = topo.ms["master2"] + M3 = topo.ms["master3"] + value_init = '7' + last = '11' + value_S1 = '11.1' + value_S2 = '11.2' + + description = { + "S1": M1, + "S2": M2, + "S1_MODRDN": value_S1, + "S1_MOD": [(ldap.MOD_REPLACE, 'employeeNumber', value_S1.encode())], + "S2_MOD": [(ldap.MOD_DELETE, 'employeeNumber', value_S2.encode()),(ldap.MOD_ADD, 'employeeNumber', value_S2.encode())], + "S2_MODRDN": value_S2, + "expected": value_S2} + + # This test takes the user_1 + (uid, test_user_dn) = _employeenumber_user_get_dn(int(last)) + + # + # Step 4 + # + # disable all RA from M1 and M2 + # only M3 can replicate the update + # + agreement_m1_m2 = M1.agreement.list(suffix=DEFAULT_SUFFIX, consumer_host=M2.host, consumer_port=M2.port) + agreement_m1_m3 = M1.agreement.list(suffix=DEFAULT_SUFFIX, consumer_host=M3.host, consumer_port=M3.port) + agreement_m2_m1 = M2.agreement.list(suffix=DEFAULT_SUFFIX, consumer_host=M1.host, consumer_port=M1.port) + agreement_m2_m3 = M2.agreement.list(suffix=DEFAULT_SUFFIX, consumer_host=M3.host, consumer_port=M3.port) + + M1.agreement.pause(agreement_m1_m2[0].dn) + M1.agreement.pause(agreement_m1_m3[0].dn) + M2.agreement.pause(agreement_m2_m1[0].dn) + M2.agreement.pause(agreement_m2_m3[0].dn) + + # Step 5 + # Oldest update + # check that the entry on M1 contains employeeNumber= + description["S1"].rename_s(test_user_dn, 'employeeNumber=%s' % description["S1_MODRDN"], newsuperior=BASE_DISTINGUISHED, delold=1) + ents = description["S1"].search_s(BASE_DISTINGUISHED, ldap.SCOPE_SUBTREE, '(employeeNumber=%s)' % description["S1_MODRDN"]) + assert len(ents) == 1 + time.sleep(1) + + # Step 6 + # More recent update + # check that the entry on M2 contains employeeNumber= + description["S2"].rename_s(test_user_dn, 'employeeNumber=%s' % description["S2_MODRDN"], newsuperior=BASE_DISTINGUISHED, delold=1) + ents = description["S2"].search_s(BASE_DISTINGUISHED, ldap.SCOPE_SUBTREE, '(employeeNumber=%s)' % description["S2_MODRDN"]) + assert len(ents) == 1 + time.sleep(1) + + (no, new_test_user_dn) = _employeenumber_user_get_dn(description["S1_MODRDN"]) + description["S1"].modify_s(new_test_user_dn, description["S1_MOD"]) + ents = description["S1"].search_s(BASE_DISTINGUISHED, ldap.SCOPE_SUBTREE, '(employeeNumber=%s)' % value_S1) + assert len(ents) == 1 + time.sleep(1) + + (no, new_test_user_dn) = _employeenumber_user_get_dn(description["S2_MODRDN"]) + description["S2"].modify_s(new_test_user_dn, description["S2_MOD"]) + ents = description["S2"].search_s(BASE_DISTINGUISHED, ldap.SCOPE_SUBTREE, '(employeeNumber=%s)' % value_S2) + assert len(ents) == 1 + + _resume_ra_M2_then_M1(M1, M2, M3) + + #time.sleep(3600) + # Step 9 + # Check that M1 still contains employeeNumber= + ents = M1.search_s(BASE_DISTINGUISHED, ldap.SCOPE_SUBTREE, '(employeeNumber=%s)' % description["expected"]) + log.info('Search M1 employeeNumber=%s (vs. %s)' % (ents[0].getValue('employeeNumber'), description["expected"])) + assert len(ents) == 1 + assert ents[0].hasAttr('employeeNumber') and ents[0].getValue('employeeNumber') == description["expected"].encode() + + # Check that M2 still contains employeeNumber= + ents = M2.search_s(BASE_DISTINGUISHED, ldap.SCOPE_SUBTREE, '(employeeNumber=%s)' % description["expected"]) + log.info('Search M2 employeeNumber=%s (vs. %s)' % (ents[0].getValue('employeeNumber'), description["expected"])) + assert len(ents) == 1 + assert ents[0].hasAttr('employeeNumber') and ents[0].getValue('employeeNumber') == description["expected"].encode() + + # Check that M3 still contain employeeNumber and it contains employeeNumber= + ents = M3.search_s(BASE_DISTINGUISHED, ldap.SCOPE_SUBTREE, '(employeeNumber=*)') + assert len(ents) == MAX_EMPLOYEENUMBER_USER + ents = M3.search_s(BASE_DISTINGUISHED, ldap.SCOPE_SUBTREE, '(employeeNumber=%s)' % description["expected"]) + log.info('Search M3 employeeNumber=%s (vs. %s)' % (ents[0].getValue('employeeNumber'), description["expected"])) + assert len(ents) == 1 + assert ents[0].hasAttr('employeeNumber') and ents[0].getValue('employeeNumber') == description["expected"].encode() + +def test_ticket49658_27(topo): + """Do + M1: MODRDN -> V1 + M2: MODRDN -> V2 + M1: MOD(DEL/ADD) -> V1 + M2: MOD(REPL) -> V2 + Replicate order: M1 then M2 + expected: V2 + + :id: d85bd9ef-b257-4027-a29c-dfba87c0bf51 + :setup: 3 Master Instances + 1. Use employeenumber=1000,ou=distinguished,ou=people, + :steps: + 1. Isolate M1 and M2 by pausing the replication agreements + 2. On M1 do DEL+ADD 1000 + MOD_ADD_13 + 3. On M2 do DEL+ADD 1000 + MOD_ADD_13 + 4. Enable replication agreement M2 -> M3, so that update step 2 is replicated first + 5. Enable replication agreement M1 -> M3, so that update step 3 is replicated second + 6. Check that the employeeNumber is 13 on all servers + :expectedresults: + 1. Fill in the result that is expected + 2. For each test step + """ + + # If you need any test suite initialization, + # please, write additional fixture for that (including finalizer). + # Topology for suites are predefined in lib389/topologies.py. + + # If you need host, port or any other data about instance, + # Please, use the instance object attributes for that (for example, topo.ms["master1"].serverid) + + + + if DEBUGGING: + # Add debugging steps(if any)... + pass + M1 = topo.ms["master1"] + M2 = topo.ms["master2"] + M3 = topo.ms["master3"] + value_init = '7' + last = '12' + value_S1 = '12.1' + value_S2 = '12.2' + + description = { + "S1": M1, + "S2": M2, + "S1_MODRDN": value_S1, + "S1_MOD": [(ldap.MOD_DELETE, 'employeeNumber', value_S1.encode()),(ldap.MOD_ADD, 'employeeNumber', value_S1.encode())], + "S2_MOD": [(ldap.MOD_REPLACE, 'employeeNumber', value_S2.encode())], + "S2_MODRDN": value_S2, + "expected": value_S2} + + # This test takes the user_1 + (uid, test_user_dn) = _employeenumber_user_get_dn(int(last)) + + # + # Step 4 + # + # disable all RA from M1 and M2 + # only M3 can replicate the update + # + agreement_m1_m2 = M1.agreement.list(suffix=DEFAULT_SUFFIX, consumer_host=M2.host, consumer_port=M2.port) + agreement_m1_m3 = M1.agreement.list(suffix=DEFAULT_SUFFIX, consumer_host=M3.host, consumer_port=M3.port) + agreement_m2_m1 = M2.agreement.list(suffix=DEFAULT_SUFFIX, consumer_host=M1.host, consumer_port=M1.port) + agreement_m2_m3 = M2.agreement.list(suffix=DEFAULT_SUFFIX, consumer_host=M3.host, consumer_port=M3.port) + + M1.agreement.pause(agreement_m1_m2[0].dn) + M1.agreement.pause(agreement_m1_m3[0].dn) + M2.agreement.pause(agreement_m2_m1[0].dn) + M2.agreement.pause(agreement_m2_m3[0].dn) + + # Step 5 + # Oldest update + # check that the entry on M1 contains employeeNumber= + description["S1"].rename_s(test_user_dn, 'employeeNumber=%s' % description["S1_MODRDN"], newsuperior=BASE_DISTINGUISHED, delold=1) + ents = description["S1"].search_s(BASE_DISTINGUISHED, ldap.SCOPE_SUBTREE, '(employeeNumber=%s)' % description["S1_MODRDN"]) + assert len(ents) == 1 + time.sleep(1) + + # Step 6 + # More recent update + # check that the entry on M2 contains employeeNumber= + description["S2"].rename_s(test_user_dn, 'employeeNumber=%s' % description["S2_MODRDN"], newsuperior=BASE_DISTINGUISHED, delold=1) + ents = description["S2"].search_s(BASE_DISTINGUISHED, ldap.SCOPE_SUBTREE, '(employeeNumber=%s)' % description["S2_MODRDN"]) + assert len(ents) == 1 + time.sleep(1) + + (no, new_test_user_dn) = _employeenumber_user_get_dn(description["S1_MODRDN"]) + description["S1"].modify_s(new_test_user_dn, description["S1_MOD"]) + ents = description["S1"].search_s(BASE_DISTINGUISHED, ldap.SCOPE_SUBTREE, '(employeeNumber=%s)' % value_S1) + assert len(ents) == 1 + time.sleep(1) + + (no, new_test_user_dn) = _employeenumber_user_get_dn(description["S2_MODRDN"]) + description["S2"].modify_s(new_test_user_dn, description["S2_MOD"]) + ents = description["S2"].search_s(BASE_DISTINGUISHED, ldap.SCOPE_SUBTREE, '(employeeNumber=%s)' % value_S2) + assert len(ents) == 1 + + _resume_ra_M1_then_M2(M1, M2, M3) + + #time.sleep(3600) + # Step 9 + # Check that M1 still contains employeeNumber= + ents = M1.search_s(BASE_DISTINGUISHED, ldap.SCOPE_SUBTREE, '(employeeNumber=%s)' % description["expected"]) + log.info('Search M1 employeeNumber=%s (vs. %s)' % (ents[0].getValue('employeeNumber'), description["expected"])) + assert len(ents) == 1 + assert ents[0].hasAttr('employeeNumber') and ents[0].getValue('employeeNumber') == description["expected"].encode() + + # Check that M2 still contains employeeNumber= + ents = M2.search_s(BASE_DISTINGUISHED, ldap.SCOPE_SUBTREE, '(employeeNumber=%s)' % description["expected"]) + log.info('Search M2 employeeNumber=%s (vs. %s)' % (ents[0].getValue('employeeNumber'), description["expected"])) + assert len(ents) == 1 + assert ents[0].hasAttr('employeeNumber') and ents[0].getValue('employeeNumber') == description["expected"].encode() + + # Check that M3 still contain employeeNumber and it contains employeeNumber= + ents = M3.search_s(BASE_DISTINGUISHED, ldap.SCOPE_SUBTREE, '(employeeNumber=*)') + assert len(ents) == MAX_EMPLOYEENUMBER_USER + ents = M3.search_s(BASE_DISTINGUISHED, ldap.SCOPE_SUBTREE, '(employeeNumber=%s)' % description["expected"]) + log.info('Search M3 employeeNumber=%s (vs. %s)' % (ents[0].getValue('employeeNumber'), description["expected"])) + assert len(ents) == 1 + assert ents[0].hasAttr('employeeNumber') and ents[0].getValue('employeeNumber') == description["expected"].encode() + +def test_ticket49658_28(topo): + """Do + M1: MODRDN -> V1 + M2: MODRDN -> V2 + M1: MOD(DEL/ADD) -> V1 + M2: MOD(REPL) -> V2 + Replicate order: M2 then M1 + expected: V2 + + :id: 286cd17e-225e-490f-83c9-20618b9407a9 + :setup: 3 Master Instances + 1. Use employeenumber=1000,ou=distinguished,ou=people, + :steps: + 1. Isolate M1 and M2 by pausing the replication agreements + 2. On M1 do DEL+ADD 1000 + MOD_ADD_13 + 3. On M2 do DEL+ADD 1000 + MOD_ADD_13 + 4. Enable replication agreement M2 -> M3, so that update step 2 is replicated first + 5. Enable replication agreement M1 -> M3, so that update step 3 is replicated second + 6. Check that the employeeNumber is 13 on all servers + :expectedresults: + 1. Fill in the result that is expected + 2. For each test step + """ + + # If you need any test suite initialization, + # please, write additional fixture for that (including finalizer). + # Topology for suites are predefined in lib389/topologies.py. + + # If you need host, port or any other data about instance, + # Please, use the instance object attributes for that (for example, topo.ms["master1"].serverid) + + + + if DEBUGGING: + # Add debugging steps(if any)... + pass + M1 = topo.ms["master1"] + M2 = topo.ms["master2"] + M3 = topo.ms["master3"] + value_init = '7' + last = '13' + value_S1 = '13.1' + value_S2 = '13.2' + + description = { + "S1": M1, + "S2": M2, + "S1_MODRDN": value_S1, + "S1_MOD": [(ldap.MOD_DELETE, 'employeeNumber', value_S1.encode()),(ldap.MOD_ADD, 'employeeNumber', value_S1.encode())], + "S2_MOD": [(ldap.MOD_REPLACE, 'employeeNumber', value_S2.encode())], + "S2_MODRDN": value_S2, + "expected": value_S2} + + # This test takes the user_1 + (uid, test_user_dn) = _employeenumber_user_get_dn(int(last)) + + # + # Step 4 + # + # disable all RA from M1 and M2 + # only M3 can replicate the update + # + agreement_m1_m2 = M1.agreement.list(suffix=DEFAULT_SUFFIX, consumer_host=M2.host, consumer_port=M2.port) + agreement_m1_m3 = M1.agreement.list(suffix=DEFAULT_SUFFIX, consumer_host=M3.host, consumer_port=M3.port) + agreement_m2_m1 = M2.agreement.list(suffix=DEFAULT_SUFFIX, consumer_host=M1.host, consumer_port=M1.port) + agreement_m2_m3 = M2.agreement.list(suffix=DEFAULT_SUFFIX, consumer_host=M3.host, consumer_port=M3.port) + + M1.agreement.pause(agreement_m1_m2[0].dn) + M1.agreement.pause(agreement_m1_m3[0].dn) + M2.agreement.pause(agreement_m2_m1[0].dn) + M2.agreement.pause(agreement_m2_m3[0].dn) + + # Step 5 + # Oldest update + # check that the entry on M1 contains employeeNumber= + description["S1"].rename_s(test_user_dn, 'employeeNumber=%s' % description["S1_MODRDN"], newsuperior=BASE_DISTINGUISHED, delold=1) + ents = description["S1"].search_s(BASE_DISTINGUISHED, ldap.SCOPE_SUBTREE, '(employeeNumber=%s)' % description["S1_MODRDN"]) + assert len(ents) == 1 + time.sleep(1) + + # Step 6 + # More recent update + # check that the entry on M2 contains employeeNumber= + description["S2"].rename_s(test_user_dn, 'employeeNumber=%s' % description["S2_MODRDN"], newsuperior=BASE_DISTINGUISHED, delold=1) + ents = description["S2"].search_s(BASE_DISTINGUISHED, ldap.SCOPE_SUBTREE, '(employeeNumber=%s)' % description["S2_MODRDN"]) + assert len(ents) == 1 + time.sleep(1) + + (no, new_test_user_dn) = _employeenumber_user_get_dn(description["S1_MODRDN"]) + description["S1"].modify_s(new_test_user_dn, description["S1_MOD"]) + ents = description["S1"].search_s(BASE_DISTINGUISHED, ldap.SCOPE_SUBTREE, '(employeeNumber=%s)' % value_S1) + assert len(ents) == 1 + time.sleep(1) + + (no, new_test_user_dn) = _employeenumber_user_get_dn(description["S2_MODRDN"]) + description["S2"].modify_s(new_test_user_dn, description["S2_MOD"]) + ents = description["S2"].search_s(BASE_DISTINGUISHED, ldap.SCOPE_SUBTREE, '(employeeNumber=%s)' % value_S2) + assert len(ents) == 1 + + _resume_ra_M2_then_M1(M1, M2, M3) + + #time.sleep(3600) + # Step 9 + # Check that M1 still contains employeeNumber= + ents = M1.search_s(BASE_DISTINGUISHED, ldap.SCOPE_SUBTREE, '(employeeNumber=%s)' % description["expected"]) + log.info('Search M1 employeeNumber=%s (vs. %s)' % (ents[0].getValue('employeeNumber'), description["expected"])) + assert len(ents) == 1 + assert ents[0].hasAttr('employeeNumber') and ents[0].getValue('employeeNumber') == description["expected"].encode() + + # Check that M2 still contains employeeNumber= + ents = M2.search_s(BASE_DISTINGUISHED, ldap.SCOPE_SUBTREE, '(employeeNumber=%s)' % description["expected"]) + log.info('Search M2 employeeNumber=%s (vs. %s)' % (ents[0].getValue('employeeNumber'), description["expected"])) + assert len(ents) == 1 + assert ents[0].hasAttr('employeeNumber') and ents[0].getValue('employeeNumber') == description["expected"].encode() + + # Check that M3 still contain employeeNumber and it contains employeeNumber= + ents = M3.search_s(BASE_DISTINGUISHED, ldap.SCOPE_SUBTREE, '(employeeNumber=*)') + assert len(ents) == MAX_EMPLOYEENUMBER_USER + ents = M3.search_s(BASE_DISTINGUISHED, ldap.SCOPE_SUBTREE, '(employeeNumber=%s)' % description["expected"]) + log.info('Search M3 employeeNumber=%s (vs. %s)' % (ents[0].getValue('employeeNumber'), description["expected"])) + assert len(ents) == 1 + assert ents[0].hasAttr('employeeNumber') and ents[0].getValue('employeeNumber') == description["expected"].encode() + + +def test_ticket49658_29(topo): + """Do + M1: MODRDN -> V1 + M2: MODRDN -> V2 + M1: MOD(DEL/ADD) -> V1 + M2: MOD(DEL/ADD) -> V2 + Replicate order: M1 then M2 + expected: V2 + + :id: b81f3885-7965-48fe-8dbf-692d1150d061 + :setup: 3 Master Instances + 1. Use employeenumber=1000,ou=distinguished,ou=people, + :steps: + 1. Isolate M1 and M2 by pausing the replication agreements + 2. On M1 do DEL+ADD 1000 + MOD_ADD_13 + 3. On M2 do DEL+ADD 1000 + MOD_ADD_13 + 4. Enable replication agreement M2 -> M3, so that update step 2 is replicated first + 5. Enable replication agreement M1 -> M3, so that update step 3 is replicated second + 6. Check that the employeeNumber is 13 on all servers + :expectedresults: + 1. Fill in the result that is expected + 2. For each test step + """ + + # If you need any test suite initialization, + # please, write additional fixture for that (including finalizer). + # Topology for suites are predefined in lib389/topologies.py. + + # If you need host, port or any other data about instance, + # Please, use the instance object attributes for that (for example, topo.ms["master1"].serverid) + + + + if DEBUGGING: + # Add debugging steps(if any)... + pass + M1 = topo.ms["master1"] + M2 = topo.ms["master2"] + M3 = topo.ms["master3"] + value_init = '7' + last = '14' + value_S1 = '14.1' + value_S2 = '14.2' + + description = { + "S1": M1, + "S2": M2, + "S1_MODRDN": value_S1, + "S1_MOD": [(ldap.MOD_DELETE, 'employeeNumber', value_S1.encode()),(ldap.MOD_ADD, 'employeeNumber', value_S1.encode())], + "S2_MOD": [(ldap.MOD_DELETE, 'employeeNumber', value_S2.encode()),(ldap.MOD_ADD, 'employeeNumber', value_S2.encode())], + "S2_MODRDN": value_S2, + "expected": value_S2} + + # This test takes the user_1 + (uid, test_user_dn) = _employeenumber_user_get_dn(int(last)) + + # + # Step 4 + # + # disable all RA from M1 and M2 + # only M3 can replicate the update + # + agreement_m1_m2 = M1.agreement.list(suffix=DEFAULT_SUFFIX, consumer_host=M2.host, consumer_port=M2.port) + agreement_m1_m3 = M1.agreement.list(suffix=DEFAULT_SUFFIX, consumer_host=M3.host, consumer_port=M3.port) + agreement_m2_m1 = M2.agreement.list(suffix=DEFAULT_SUFFIX, consumer_host=M1.host, consumer_port=M1.port) + agreement_m2_m3 = M2.agreement.list(suffix=DEFAULT_SUFFIX, consumer_host=M3.host, consumer_port=M3.port) + + M1.agreement.pause(agreement_m1_m2[0].dn) + M1.agreement.pause(agreement_m1_m3[0].dn) + M2.agreement.pause(agreement_m2_m1[0].dn) + M2.agreement.pause(agreement_m2_m3[0].dn) + + # Step 5 + # Oldest update + # check that the entry on M1 contains employeeNumber= + description["S1"].rename_s(test_user_dn, 'employeeNumber=%s' % description["S1_MODRDN"], newsuperior=BASE_DISTINGUISHED, delold=1) + ents = description["S1"].search_s(BASE_DISTINGUISHED, ldap.SCOPE_SUBTREE, '(employeeNumber=%s)' % description["S1_MODRDN"]) + assert len(ents) == 1 + time.sleep(1) + + # Step 6 + # More recent update + # check that the entry on M2 contains employeeNumber= + description["S2"].rename_s(test_user_dn, 'employeeNumber=%s' % description["S2_MODRDN"], newsuperior=BASE_DISTINGUISHED, delold=1) + ents = description["S2"].search_s(BASE_DISTINGUISHED, ldap.SCOPE_SUBTREE, '(employeeNumber=%s)' % description["S2_MODRDN"]) + assert len(ents) == 1 + time.sleep(1) + + (no, new_test_user_dn) = _employeenumber_user_get_dn(description["S1_MODRDN"]) + description["S1"].modify_s(new_test_user_dn, description["S1_MOD"]) + ents = description["S1"].search_s(BASE_DISTINGUISHED, ldap.SCOPE_SUBTREE, '(employeeNumber=%s)' % value_S1) + assert len(ents) == 1 + time.sleep(1) + + (no, new_test_user_dn) = _employeenumber_user_get_dn(description["S2_MODRDN"]) + description["S2"].modify_s(new_test_user_dn, description["S2_MOD"]) + ents = description["S2"].search_s(BASE_DISTINGUISHED, ldap.SCOPE_SUBTREE, '(employeeNumber=%s)' % value_S2) + assert len(ents) == 1 + + _resume_ra_M1_then_M2(M1, M2, M3) + + #time.sleep(3600) + # Step 9 + # Check that M1 still contains employeeNumber= + ents = M1.search_s(BASE_DISTINGUISHED, ldap.SCOPE_SUBTREE, '(employeeNumber=%s)' % description["expected"]) + log.info('Search M1 employeeNumber=%s (vs. %s)' % (ents[0].getValue('employeeNumber'), description["expected"])) + assert len(ents) == 1 + assert ents[0].hasAttr('employeeNumber') and ents[0].getValue('employeeNumber') == description["expected"].encode() + + # Check that M2 still contains employeeNumber= + ents = M2.search_s(BASE_DISTINGUISHED, ldap.SCOPE_SUBTREE, '(employeeNumber=%s)' % description["expected"]) + log.info('Search M2 employeeNumber=%s (vs. %s)' % (ents[0].getValue('employeeNumber'), description["expected"])) + assert len(ents) == 1 + assert ents[0].hasAttr('employeeNumber') and ents[0].getValue('employeeNumber') == description["expected"].encode() + + # Check that M3 still contain employeeNumber and it contains employeeNumber= + ents = M3.search_s(BASE_DISTINGUISHED, ldap.SCOPE_SUBTREE, '(employeeNumber=*)') + assert len(ents) == MAX_EMPLOYEENUMBER_USER + ents = M3.search_s(BASE_DISTINGUISHED, ldap.SCOPE_SUBTREE, '(employeeNumber=%s)' % description["expected"]) + log.info('Search M3 employeeNumber=%s (vs. %s)' % (ents[0].getValue('employeeNumber'), description["expected"])) + assert len(ents) == 1 + assert ents[0].hasAttr('employeeNumber') and ents[0].getValue('employeeNumber') == description["expected"].encode() + +def test_ticket49658_30(topo): + """Do + M1: MODRDN -> V1 + M2: MODRDN -> V2 + M1: MOD(DEL/ADD) -> V1 + M2: MOD(DEL/ADD) -> V2 + Replicate order: M2 then M1 + expected: V2 + + :id: 4dce88f8-31db-488b-aeb4-fce4173e3f12 + :setup: 3 Master Instances + 1. Use employeenumber=1000,ou=distinguished,ou=people, + :steps: + 1. Isolate M1 and M2 by pausing the replication agreements + 2. On M1 do DEL+ADD 1000 + MOD_ADD_13 + 3. On M2 do DEL+ADD 1000 + MOD_ADD_13 + 4. Enable replication agreement M2 -> M3, so that update step 2 is replicated first + 5. Enable replication agreement M1 -> M3, so that update step 3 is replicated second + 6. Check that the employeeNumber is 13 on all servers + :expectedresults: + 1. Fill in the result that is expected + 2. For each test step + """ + + # If you need any test suite initialization, + # please, write additional fixture for that (including finalizer). + # Topology for suites are predefined in lib389/topologies.py. + + # If you need host, port or any other data about instance, + # Please, use the instance object attributes for that (for example, topo.ms["master1"].serverid) + + + + if DEBUGGING: + # Add debugging steps(if any)... + pass + M1 = topo.ms["master1"] + M2 = topo.ms["master2"] + M3 = topo.ms["master3"] + value_init = '7' + last = '15' + value_S1 = '15.1' + value_S2 = '15.2' + + description = { + "S1": M1, + "S2": M2, + "S1_MODRDN": value_S1, + "S1_MOD": [(ldap.MOD_DELETE, 'employeeNumber', value_S1.encode()),(ldap.MOD_ADD, 'employeeNumber', value_S1.encode())], + "S2_MOD": [(ldap.MOD_DELETE, 'employeeNumber', value_S2.encode()),(ldap.MOD_ADD, 'employeeNumber', value_S2.encode())], + "S2_MODRDN": value_S2, + "expected": value_S2} + + # This test takes the user_1 + (uid, test_user_dn) = _employeenumber_user_get_dn(int(last)) + + # + # Step 4 + # + # disable all RA from M1 and M2 + # only M3 can replicate the update + # + agreement_m1_m2 = M1.agreement.list(suffix=DEFAULT_SUFFIX, consumer_host=M2.host, consumer_port=M2.port) + agreement_m1_m3 = M1.agreement.list(suffix=DEFAULT_SUFFIX, consumer_host=M3.host, consumer_port=M3.port) + agreement_m2_m1 = M2.agreement.list(suffix=DEFAULT_SUFFIX, consumer_host=M1.host, consumer_port=M1.port) + agreement_m2_m3 = M2.agreement.list(suffix=DEFAULT_SUFFIX, consumer_host=M3.host, consumer_port=M3.port) + + M1.agreement.pause(agreement_m1_m2[0].dn) + M1.agreement.pause(agreement_m1_m3[0].dn) + M2.agreement.pause(agreement_m2_m1[0].dn) + M2.agreement.pause(agreement_m2_m3[0].dn) + + # Step 5 + # Oldest update + # check that the entry on M1 contains employeeNumber= + description["S1"].rename_s(test_user_dn, 'employeeNumber=%s' % description["S1_MODRDN"], newsuperior=BASE_DISTINGUISHED, delold=1) + ents = description["S1"].search_s(BASE_DISTINGUISHED, ldap.SCOPE_SUBTREE, '(employeeNumber=%s)' % description["S1_MODRDN"]) + assert len(ents) == 1 + time.sleep(1) + + # Step 6 + # More recent update + # check that the entry on M2 contains employeeNumber= + description["S2"].rename_s(test_user_dn, 'employeeNumber=%s' % description["S2_MODRDN"], newsuperior=BASE_DISTINGUISHED, delold=1) + ents = description["S2"].search_s(BASE_DISTINGUISHED, ldap.SCOPE_SUBTREE, '(employeeNumber=%s)' % description["S2_MODRDN"]) + assert len(ents) == 1 + time.sleep(1) + + (no, new_test_user_dn) = _employeenumber_user_get_dn(description["S1_MODRDN"]) + description["S1"].modify_s(new_test_user_dn, description["S1_MOD"]) + ents = description["S1"].search_s(BASE_DISTINGUISHED, ldap.SCOPE_SUBTREE, '(employeeNumber=%s)' % value_S1) + assert len(ents) == 1 + time.sleep(1) + + (no, new_test_user_dn) = _employeenumber_user_get_dn(description["S2_MODRDN"]) + description["S2"].modify_s(new_test_user_dn, description["S2_MOD"]) + ents = description["S2"].search_s(BASE_DISTINGUISHED, ldap.SCOPE_SUBTREE, '(employeeNumber=%s)' % value_S2) + assert len(ents) == 1 + + _resume_ra_M2_then_M1(M1, M2, M3) + + #time.sleep(3600) + # Step 9 + # Check that M1 still contains employeeNumber= + ents = M1.search_s(BASE_DISTINGUISHED, ldap.SCOPE_SUBTREE, '(employeeNumber=%s)' % description["expected"]) + log.info('Search M1 employeeNumber=%s (vs. %s)' % (ents[0].getValue('employeeNumber'), description["expected"])) + assert len(ents) == 1 + assert ents[0].hasAttr('employeeNumber') and ents[0].getValue('employeeNumber') == description["expected"].encode() + + # Check that M2 still contains employeeNumber= + ents = M2.search_s(BASE_DISTINGUISHED, ldap.SCOPE_SUBTREE, '(employeeNumber=%s)' % description["expected"]) + log.info('Search M2 employeeNumber=%s (vs. %s)' % (ents[0].getValue('employeeNumber'), description["expected"])) + assert len(ents) == 1 + assert ents[0].hasAttr('employeeNumber') and ents[0].getValue('employeeNumber') == description["expected"].encode() + + # Check that M3 still contain employeeNumber and it contains employeeNumber= + ents = M3.search_s(BASE_DISTINGUISHED, ldap.SCOPE_SUBTREE, '(employeeNumber=*)') + assert len(ents) == MAX_EMPLOYEENUMBER_USER + ents = M3.search_s(BASE_DISTINGUISHED, ldap.SCOPE_SUBTREE, '(employeeNumber=%s)' % description["expected"]) + log.info('Search M3 employeeNumber=%s (vs. %s)' % (ents[0].getValue('employeeNumber'), description["expected"])) + assert len(ents) == 1 + assert ents[0].hasAttr('employeeNumber') and ents[0].getValue('employeeNumber') == description["expected"].encode() + +def test_ticket49658_31(topo): + """Do + M1: MODRDN -> V1 + M2: MODRDN -> V2 + M1: MOD(REPL) -> V1 + M2: MOD(REPL) -> V2 + M2: MODRDN -> V1 + Replicate order: M2 then M1 + expected: V1 + + :id: 2791a3df-25a2-4e6e-a5e9-514d76af43fb + :setup: 3 Master Instances + 1. Use employeenumber=1000,ou=distinguished,ou=people, + :steps: + 1. Isolate M1 and M2 by pausing the replication agreements + 2. On M1 do DEL+ADD 1000 + MOD_ADD_13 + 3. On M2 do DEL+ADD 1000 + MOD_ADD_13 + 4. Enable replication agreement M2 -> M3, so that update step 2 is replicated first + 5. Enable replication agreement M1 -> M3, so that update step 3 is replicated second + 6. Check that the employeeNumber is 13 on all servers + :expectedresults: + 1. Fill in the result that is expected + 2. For each test step + """ + + # If you need any test suite initialization, + # please, write additional fixture for that (including finalizer). + # Topology for suites are predefined in lib389/topologies.py. + + # If you need host, port or any other data about instance, + # Please, use the instance object attributes for that (for example, topo.ms["master1"].serverid) + + + + if DEBUGGING: + # Add debugging steps(if any)... + pass + M1 = topo.ms["master1"] + M2 = topo.ms["master2"] + M3 = topo.ms["master3"] + value_init = '7' + last = '16' + value_S1 = '16.1' + value_S2 = '16.2' + + description = { + "S1": M1, + "S2": M2, + "S1_MODRDN": value_S1, + "S1_MOD": [(ldap.MOD_REPLACE, 'employeeNumber', value_S1.encode())], + "S2_MOD": [(ldap.MOD_REPLACE, 'employeeNumber', value_S2.encode())], + "S2_MODRDN_1": value_S2, + "S2_MODRDN_2": value_S1, + "expected": value_S1} + + # This test takes the user_1 + (uid, test_user_dn) = _employeenumber_user_get_dn(int(last)) + + # + # Step 4 + # + # disable all RA from M1 and M2 + # only M3 can replicate the update + # + agreement_m1_m2 = M1.agreement.list(suffix=DEFAULT_SUFFIX, consumer_host=M2.host, consumer_port=M2.port) + agreement_m1_m3 = M1.agreement.list(suffix=DEFAULT_SUFFIX, consumer_host=M3.host, consumer_port=M3.port) + agreement_m2_m1 = M2.agreement.list(suffix=DEFAULT_SUFFIX, consumer_host=M1.host, consumer_port=M1.port) + agreement_m2_m3 = M2.agreement.list(suffix=DEFAULT_SUFFIX, consumer_host=M3.host, consumer_port=M3.port) + + M1.agreement.pause(agreement_m1_m2[0].dn) + M1.agreement.pause(agreement_m1_m3[0].dn) + M2.agreement.pause(agreement_m2_m1[0].dn) + M2.agreement.pause(agreement_m2_m3[0].dn) + + # Step 5 + # Oldest update + # check that the entry on M1 contains employeeNumber= + description["S1"].rename_s(test_user_dn, 'employeeNumber=%s' % description["S1_MODRDN"], newsuperior=BASE_DISTINGUISHED, delold=1) + ents = description["S1"].search_s(BASE_DISTINGUISHED, ldap.SCOPE_SUBTREE, '(employeeNumber=%s)' % description["S1_MODRDN"]) + assert len(ents) == 1 + time.sleep(1) + + # Step 6 + # More recent update + # check that the entry on M2 contains employeeNumber= + description["S2"].rename_s(test_user_dn, 'employeeNumber=%s' % description["S2_MODRDN_1"], newsuperior=BASE_DISTINGUISHED, delold=1) + ents = description["S2"].search_s(BASE_DISTINGUISHED, ldap.SCOPE_SUBTREE, '(employeeNumber=%s)' % description["S2_MODRDN_1"]) + assert len(ents) == 1 + time.sleep(1) + + (no, new_test_user_dn) = _employeenumber_user_get_dn(description["S1_MODRDN"]) + description["S1"].modify_s(new_test_user_dn, description["S1_MOD"]) + ents = description["S1"].search_s(BASE_DISTINGUISHED, ldap.SCOPE_SUBTREE, '(employeeNumber=%s)' % value_S1) + assert len(ents) == 1 + time.sleep(1) + + + (no, new_test_user_dn) = _employeenumber_user_get_dn(description["S2_MODRDN_1"]) + description["S2"].modify_s(new_test_user_dn, description["S2_MOD"]) + ents = description["S2"].search_s(BASE_DISTINGUISHED, ldap.SCOPE_SUBTREE, '(employeeNumber=%s)' % value_S2) + assert len(ents) == 1 + + description["S2"].rename_s(new_test_user_dn, 'employeeNumber=%s' % description["S2_MODRDN_2"], newsuperior=BASE_DISTINGUISHED, delold=1) + ents = description["S2"].search_s(BASE_DISTINGUISHED, ldap.SCOPE_SUBTREE, '(employeeNumber=%s)' % description["S2_MODRDN_2"]) + assert len(ents) == 1 + time.sleep(1) + + _resume_ra_M2_then_M1(M1, M2, M3) + + #time.sleep(3600) + # Step 9 + # Check that M1 still contains employeeNumber= + ents = M1.search_s(BASE_DISTINGUISHED, ldap.SCOPE_SUBTREE, '(employeeNumber=%s)' % description["expected"]) + log.info('Search M1 employeeNumber=%s (vs. %s)' % (ents[0].getValue('employeeNumber'), description["expected"])) + assert len(ents) == 1 + assert ents[0].hasAttr('employeeNumber') and ents[0].getValue('employeeNumber') == description["expected"].encode() + + # Check that M2 still contains employeeNumber= + ents = M2.search_s(BASE_DISTINGUISHED, ldap.SCOPE_SUBTREE, '(employeeNumber=%s)' % description["expected"]) + log.info('Search M2 employeeNumber=%s (vs. %s)' % (ents[0].getValue('employeeNumber'), description["expected"])) + assert len(ents) == 1 + assert ents[0].hasAttr('employeeNumber') and ents[0].getValue('employeeNumber') == description["expected"].encode() + + # Check that M3 still contain employeeNumber and it contains employeeNumber= + ents = M3.search_s(BASE_DISTINGUISHED, ldap.SCOPE_SUBTREE, '(employeeNumber=*)') + assert len(ents) == MAX_EMPLOYEENUMBER_USER + ents = M3.search_s(BASE_DISTINGUISHED, ldap.SCOPE_SUBTREE, '(employeeNumber=%s)' % description["expected"]) + log.info('Search M3 employeeNumber=%s (vs. %s)' % (ents[0].getValue('employeeNumber'), description["expected"])) + assert len(ents) == 1 + assert ents[0].hasAttr('employeeNumber') and ents[0].getValue('employeeNumber') == description["expected"].encode() + + +def test_ticket49658_32(topo): + """Do + M1: MODRDN -> V1 + M2: MODRDN -> V2 + M1: MOD(REPL) -> V1 + M2: MOD(REPL) -> V2 + M2: MODRDN -> V1 + Replicate order: M1 then M2 + expected: V1 + + :id: 6af57e2e-a325-474a-9c9d-f07cd2244657 + :setup: 3 Master Instances + 1. Use employeenumber=1000,ou=distinguished,ou=people, + :steps: + 1. Isolate M1 and M2 by pausing the replication agreements + 2. On M1 do DEL+ADD 1000 + MOD_ADD_13 + 3. On M2 do DEL+ADD 1000 + MOD_ADD_13 + 4. Enable replication agreement M2 -> M3, so that update step 2 is replicated first + 5. Enable replication agreement M1 -> M3, so that update step 3 is replicated second + 6. Check that the employeeNumber is 13 on all servers + :expectedresults: + 1. Fill in the result that is expected + 2. For each test step + """ + + # If you need any test suite initialization, + # please, write additional fixture for that (including finalizer). + # Topology for suites are predefined in lib389/topologies.py. + + # If you need host, port or any other data about instance, + # Please, use the instance object attributes for that (for example, topo.ms["master1"].serverid) + + + + if DEBUGGING: + # Add debugging steps(if any)... + pass + M1 = topo.ms["master1"] + M2 = topo.ms["master2"] + M3 = topo.ms["master3"] + value_init = '7' + last = '17' + value_S1 = '17.1' + value_S2 = '17.2' + + description = { + "S1": M1, + "S2": M2, + "S1_MODRDN": value_S1, + "S1_MOD": [(ldap.MOD_REPLACE, 'employeeNumber', value_S1.encode())], + "S2_MOD": [(ldap.MOD_REPLACE, 'employeeNumber', value_S2.encode())], + "S2_MODRDN_1": value_S2, + "S2_MODRDN_2": value_S1, + "expected": value_S1} + + # This test takes the user_1 + (uid, test_user_dn) = _employeenumber_user_get_dn(int(last)) + + # + # Step 4 + # + # disable all RA from M1 and M2 + # only M3 can replicate the update + # + agreement_m1_m2 = M1.agreement.list(suffix=DEFAULT_SUFFIX, consumer_host=M2.host, consumer_port=M2.port) + agreement_m1_m3 = M1.agreement.list(suffix=DEFAULT_SUFFIX, consumer_host=M3.host, consumer_port=M3.port) + agreement_m2_m1 = M2.agreement.list(suffix=DEFAULT_SUFFIX, consumer_host=M1.host, consumer_port=M1.port) + agreement_m2_m3 = M2.agreement.list(suffix=DEFAULT_SUFFIX, consumer_host=M3.host, consumer_port=M3.port) + + M1.agreement.pause(agreement_m1_m2[0].dn) + M1.agreement.pause(agreement_m1_m3[0].dn) + M2.agreement.pause(agreement_m2_m1[0].dn) + M2.agreement.pause(agreement_m2_m3[0].dn) + + # Step 5 + # Oldest update + # check that the entry on M1 contains employeeNumber= + description["S1"].rename_s(test_user_dn, 'employeeNumber=%s' % description["S1_MODRDN"], newsuperior=BASE_DISTINGUISHED, delold=1) + ents = description["S1"].search_s(BASE_DISTINGUISHED, ldap.SCOPE_SUBTREE, '(employeeNumber=%s)' % description["S1_MODRDN"]) + assert len(ents) == 1 + time.sleep(1) + + # Step 6 + # More recent update + # check that the entry on M2 contains employeeNumber= + description["S2"].rename_s(test_user_dn, 'employeeNumber=%s' % description["S2_MODRDN_1"], newsuperior=BASE_DISTINGUISHED, delold=1) + ents = description["S2"].search_s(BASE_DISTINGUISHED, ldap.SCOPE_SUBTREE, '(employeeNumber=%s)' % description["S2_MODRDN_1"]) + assert len(ents) == 1 + time.sleep(1) + + (no, new_test_user_dn) = _employeenumber_user_get_dn(description["S1_MODRDN"]) + description["S1"].modify_s(new_test_user_dn, description["S1_MOD"]) + ents = description["S1"].search_s(BASE_DISTINGUISHED, ldap.SCOPE_SUBTREE, '(employeeNumber=%s)' % value_S1) + assert len(ents) == 1 + time.sleep(1) + + + (no, new_test_user_dn) = _employeenumber_user_get_dn(description["S2_MODRDN_1"]) + description["S2"].modify_s(new_test_user_dn, description["S2_MOD"]) + ents = description["S2"].search_s(BASE_DISTINGUISHED, ldap.SCOPE_SUBTREE, '(employeeNumber=%s)' % value_S2) + assert len(ents) == 1 + + description["S2"].rename_s(new_test_user_dn, 'employeeNumber=%s' % description["S2_MODRDN_2"], newsuperior=BASE_DISTINGUISHED, delold=1) + ents = description["S2"].search_s(BASE_DISTINGUISHED, ldap.SCOPE_SUBTREE, '(employeeNumber=%s)' % description["S2_MODRDN_2"]) + assert len(ents) == 1 + time.sleep(1) + + _resume_ra_M1_then_M2(M1, M2, M3) + + #time.sleep(3600) + # Step 9 + # Check that M1 still contains employeeNumber= + ents = M1.search_s(BASE_DISTINGUISHED, ldap.SCOPE_SUBTREE, '(employeeNumber=%s)' % description["expected"]) + log.info('Search M1 employeeNumber=%s (vs. %s)' % (ents[0].getValue('employeeNumber'), description["expected"])) + assert len(ents) == 1 + assert ents[0].hasAttr('employeeNumber') and ents[0].getValue('employeeNumber') == description["expected"].encode() + + # Check that M2 still contains employeeNumber= + ents = M2.search_s(BASE_DISTINGUISHED, ldap.SCOPE_SUBTREE, '(employeeNumber=%s)' % description["expected"]) + log.info('Search M2 employeeNumber=%s (vs. %s)' % (ents[0].getValue('employeeNumber'), description["expected"])) + assert len(ents) == 1 + assert ents[0].hasAttr('employeeNumber') and ents[0].getValue('employeeNumber') == description["expected"].encode() + + # Check that M3 still contain employeeNumber and it contains employeeNumber= + ents = M3.search_s(BASE_DISTINGUISHED, ldap.SCOPE_SUBTREE, '(employeeNumber=*)') + assert len(ents) == MAX_EMPLOYEENUMBER_USER + ents = M3.search_s(BASE_DISTINGUISHED, ldap.SCOPE_SUBTREE, '(employeeNumber=%s)' % description["expected"]) + log.info('Search M3 employeeNumber=%s (vs. %s)' % (ents[0].getValue('employeeNumber'), description["expected"])) + assert len(ents) == 1 + assert ents[0].hasAttr('employeeNumber') and ents[0].getValue('employeeNumber') == description["expected"].encode() + +def test_ticket49658_33(topo): + """Do + M1: MODRDN -> V1 + M2: MODRDN -> V2 + M1: MOD(REPL) -> V1 + M2: MODRDN -> V1 + Replicate order: M2 then M1 + expected: V1 + + :id: 81100b04-d3b6-47df-90eb-d96ef14a3722 + :setup: 3 Master Instances + 1. Use employeenumber=1000,ou=distinguished,ou=people, + :steps: + 1. Isolate M1 and M2 by pausing the replication agreements + 2. On M1 do DEL+ADD 1000 + MOD_ADD_13 + 3. On M2 do DEL+ADD 1000 + MOD_ADD_13 + 4. Enable replication agreement M2 -> M3, so that update step 2 is replicated first + 5. Enable replication agreement M1 -> M3, so that update step 3 is replicated second + 6. Check that the employeeNumber is 13 on all servers + :expectedresults: + 1. Fill in the result that is expected + 2. For each test step + """ + + # If you need any test suite initialization, + # please, write additional fixture for that (including finalizer). + # Topology for suites are predefined in lib389/topologies.py. + + # If you need host, port or any other data about instance, + # Please, use the instance object attributes for that (for example, topo.ms["master1"].serverid) + + + + if DEBUGGING: + # Add debugging steps(if any)... + pass + M1 = topo.ms["master1"] + M2 = topo.ms["master2"] + M3 = topo.ms["master3"] + value_init = '7' + last = '18' + value_S1 = '18.1' + value_S2 = '18.2' + + description = { + "S1": M1, + "S2": M2, + "S1_MODRDN": value_S1, + "S1_MOD": [(ldap.MOD_REPLACE, 'employeeNumber', value_S1.encode())], + "S2_MODRDN_1": value_S2, + "S2_MODRDN_2": value_S1, + "expected": value_S1} + + # This test takes the user_1 + (uid, test_user_dn) = _employeenumber_user_get_dn(int(last)) + + # + # Step 4 + # + # disable all RA from M1 and M2 + # only M3 can replicate the update + # + agreement_m1_m2 = M1.agreement.list(suffix=DEFAULT_SUFFIX, consumer_host=M2.host, consumer_port=M2.port) + agreement_m1_m3 = M1.agreement.list(suffix=DEFAULT_SUFFIX, consumer_host=M3.host, consumer_port=M3.port) + agreement_m2_m1 = M2.agreement.list(suffix=DEFAULT_SUFFIX, consumer_host=M1.host, consumer_port=M1.port) + agreement_m2_m3 = M2.agreement.list(suffix=DEFAULT_SUFFIX, consumer_host=M3.host, consumer_port=M3.port) + + M1.agreement.pause(agreement_m1_m2[0].dn) + M1.agreement.pause(agreement_m1_m3[0].dn) + M2.agreement.pause(agreement_m2_m1[0].dn) + M2.agreement.pause(agreement_m2_m3[0].dn) + + # Step 5 + # Oldest update + # check that the entry on M1 contains employeeNumber= + description["S1"].rename_s(test_user_dn, 'employeeNumber=%s' % description["S1_MODRDN"], newsuperior=BASE_DISTINGUISHED, delold=1) + ents = description["S1"].search_s(BASE_DISTINGUISHED, ldap.SCOPE_SUBTREE, '(employeeNumber=%s)' % description["S1_MODRDN"]) + assert len(ents) == 1 + time.sleep(1) + + # Step 6 + # More recent update + # check that the entry on M2 contains employeeNumber= + description["S2"].rename_s(test_user_dn, 'employeeNumber=%s' % description["S2_MODRDN_1"], newsuperior=BASE_DISTINGUISHED, delold=1) + ents = description["S2"].search_s(BASE_DISTINGUISHED, ldap.SCOPE_SUBTREE, '(employeeNumber=%s)' % description["S2_MODRDN_1"]) + assert len(ents) == 1 + time.sleep(1) + + (no, new_test_user_dn) = _employeenumber_user_get_dn(description["S1_MODRDN"]) + description["S1"].modify_s(new_test_user_dn, description["S1_MOD"]) + ents = description["S1"].search_s(BASE_DISTINGUISHED, ldap.SCOPE_SUBTREE, '(employeeNumber=%s)' % value_S1) + assert len(ents) == 1 + time.sleep(1) + + (no, new_test_user_dn) = _employeenumber_user_get_dn(description["S2_MODRDN_1"]) + description["S2"].rename_s(new_test_user_dn, 'employeeNumber=%s' % description["S2_MODRDN_2"], newsuperior=BASE_DISTINGUISHED, delold=1) + ents = description["S2"].search_s(BASE_DISTINGUISHED, ldap.SCOPE_SUBTREE, '(employeeNumber=%s)' % description["S2_MODRDN_2"]) + assert len(ents) == 1 + time.sleep(1) + + _resume_ra_M2_then_M1(M1, M2, M3) + + #time.sleep(3600) + # Step 9 + # Check that M1 still contains employeeNumber= + ents = M1.search_s(BASE_DISTINGUISHED, ldap.SCOPE_SUBTREE, '(employeeNumber=%s)' % description["expected"]) + log.info('Search M1 employeeNumber=%s (vs. %s)' % (ents[0].getValue('employeeNumber'), description["expected"])) + assert len(ents) == 1 + assert ents[0].hasAttr('employeeNumber') and ents[0].getValue('employeeNumber') == description["expected"].encode() + + # Check that M2 still contains employeeNumber= + ents = M2.search_s(BASE_DISTINGUISHED, ldap.SCOPE_SUBTREE, '(employeeNumber=%s)' % description["expected"]) + log.info('Search M2 employeeNumber=%s (vs. %s)' % (ents[0].getValue('employeeNumber'), description["expected"])) + assert len(ents) == 1 + assert ents[0].hasAttr('employeeNumber') and ents[0].getValue('employeeNumber') == description["expected"].encode() + + # Check that M3 still contain employeeNumber and it contains employeeNumber= + ents = M3.search_s(BASE_DISTINGUISHED, ldap.SCOPE_SUBTREE, '(employeeNumber=*)') + assert len(ents) == MAX_EMPLOYEENUMBER_USER + ents = M3.search_s(BASE_DISTINGUISHED, ldap.SCOPE_SUBTREE, '(employeeNumber=%s)' % description["expected"]) + log.info('Search M3 employeeNumber=%s (vs. %s)' % (ents[0].getValue('employeeNumber'), description["expected"])) + assert len(ents) == 1 + assert ents[0].hasAttr('employeeNumber') and ents[0].getValue('employeeNumber') == description["expected"].encode() + +def test_ticket49658_34(topo): + """Do + M1: MODRDN -> V1 + M2: MODRDN -> V2 + M1: MOD(REPL) -> V1 + M2: MODRDN -> V1 + Replicate order: M1 then M2 + expected: V1 + + :id: 796d3d77-2401-49f5-89fa-80b231d3e758 + :setup: 3 Master Instances + 1. Use employeenumber=1000,ou=distinguished,ou=people, + :steps: + 1. Isolate M1 and M2 by pausing the replication agreements + 2. On M1 do DEL+ADD 1000 + MOD_ADD_13 + 3. On M2 do DEL+ADD 1000 + MOD_ADD_13 + 4. Enable replication agreement M2 -> M3, so that update step 2 is replicated first + 5. Enable replication agreement M1 -> M3, so that update step 3 is replicated second + 6. Check that the employeeNumber is 13 on all servers + :expectedresults: + 1. Fill in the result that is expected + 2. For each test step + """ + + # If you need any test suite initialization, + # please, write additional fixture for that (including finalizer). + # Topology for suites are predefined in lib389/topologies.py. + + # If you need host, port or any other data about instance, + # Please, use the instance object attributes for that (for example, topo.ms["master1"].serverid) + + + + if DEBUGGING: + # Add debugging steps(if any)... + pass + M1 = topo.ms["master1"] + M2 = topo.ms["master2"] + M3 = topo.ms["master3"] + value_init = '7' + last = '19' + value_S1 = '19.1' + value_S2 = '19.2' + + description = { + "S1": M1, + "S2": M2, + "S1_MODRDN": value_S1, + "S1_MOD": [(ldap.MOD_REPLACE, 'employeeNumber', value_S1.encode())], + "S2_MODRDN_1": value_S2, + "S2_MODRDN_2": value_S1, + "expected": value_S1} + + # This test takes the user_1 + (uid, test_user_dn) = _employeenumber_user_get_dn(int(last)) + + # + # Step 4 + # + # disable all RA from M1 and M2 + # only M3 can replicate the update + # + agreement_m1_m2 = M1.agreement.list(suffix=DEFAULT_SUFFIX, consumer_host=M2.host, consumer_port=M2.port) + agreement_m1_m3 = M1.agreement.list(suffix=DEFAULT_SUFFIX, consumer_host=M3.host, consumer_port=M3.port) + agreement_m2_m1 = M2.agreement.list(suffix=DEFAULT_SUFFIX, consumer_host=M1.host, consumer_port=M1.port) + agreement_m2_m3 = M2.agreement.list(suffix=DEFAULT_SUFFIX, consumer_host=M3.host, consumer_port=M3.port) + + M1.agreement.pause(agreement_m1_m2[0].dn) + M1.agreement.pause(agreement_m1_m3[0].dn) + M2.agreement.pause(agreement_m2_m1[0].dn) + M2.agreement.pause(agreement_m2_m3[0].dn) + + # Step 5 + # Oldest update + # check that the entry on M1 contains employeeNumber= + description["S1"].rename_s(test_user_dn, 'employeeNumber=%s' % description["S1_MODRDN"], newsuperior=BASE_DISTINGUISHED, delold=1) + ents = description["S1"].search_s(BASE_DISTINGUISHED, ldap.SCOPE_SUBTREE, '(employeeNumber=%s)' % description["S1_MODRDN"]) + assert len(ents) == 1 + time.sleep(1) + + # Step 6 + # More recent update + # check that the entry on M2 contains employeeNumber= + description["S2"].rename_s(test_user_dn, 'employeeNumber=%s' % description["S2_MODRDN_1"], newsuperior=BASE_DISTINGUISHED, delold=1) + ents = description["S2"].search_s(BASE_DISTINGUISHED, ldap.SCOPE_SUBTREE, '(employeeNumber=%s)' % description["S2_MODRDN_1"]) + assert len(ents) == 1 + time.sleep(1) + + (no, new_test_user_dn) = _employeenumber_user_get_dn(description["S1_MODRDN"]) + description["S1"].modify_s(new_test_user_dn, description["S1_MOD"]) + ents = description["S1"].search_s(BASE_DISTINGUISHED, ldap.SCOPE_SUBTREE, '(employeeNumber=%s)' % value_S1) + assert len(ents) == 1 + time.sleep(1) + + (no, new_test_user_dn) = _employeenumber_user_get_dn(description["S2_MODRDN_1"]) + description["S2"].rename_s(new_test_user_dn, 'employeeNumber=%s' % description["S2_MODRDN_2"], newsuperior=BASE_DISTINGUISHED, delold=1) + ents = description["S2"].search_s(BASE_DISTINGUISHED, ldap.SCOPE_SUBTREE, '(employeeNumber=%s)' % description["S2_MODRDN_2"]) + assert len(ents) == 1 + time.sleep(1) + + _resume_ra_M1_then_M2(M1, M2, M3) + + #time.sleep(3600) + # Step 9 + # Check that M1 still contains employeeNumber= + ents = M1.search_s(BASE_DISTINGUISHED, ldap.SCOPE_SUBTREE, '(employeeNumber=%s)' % description["expected"]) + log.info('Search M1 employeeNumber=%s (vs. %s)' % (ents[0].getValue('employeeNumber'), description["expected"])) + assert len(ents) == 1 + assert ents[0].hasAttr('employeeNumber') and ents[0].getValue('employeeNumber') == description["expected"].encode() + + # Check that M2 still contains employeeNumber= + ents = M2.search_s(BASE_DISTINGUISHED, ldap.SCOPE_SUBTREE, '(employeeNumber=%s)' % description["expected"]) + log.info('Search M2 employeeNumber=%s (vs. %s)' % (ents[0].getValue('employeeNumber'), description["expected"])) + assert len(ents) == 1 + assert ents[0].hasAttr('employeeNumber') and ents[0].getValue('employeeNumber') == description["expected"].encode() + + # Check that M3 still contain employeeNumber and it contains employeeNumber= + ents = M3.search_s(BASE_DISTINGUISHED, ldap.SCOPE_SUBTREE, '(employeeNumber=*)') + assert len(ents) == MAX_EMPLOYEENUMBER_USER + ents = M3.search_s(BASE_DISTINGUISHED, ldap.SCOPE_SUBTREE, '(employeeNumber=%s)' % description["expected"]) + log.info('Search M3 employeeNumber=%s (vs. %s)' % (ents[0].getValue('employeeNumber'), description["expected"])) + assert len(ents) == 1 + assert ents[0].hasAttr('employeeNumber') and ents[0].getValue('employeeNumber') == description["expected"].encode() +if __name__ == '__main__': + # Run isolated + # -s for DEBUG mode + CURRENT_FILE = os.path.realpath(__file__) + pytest.main(["-s", CURRENT_FILE]) + diff --git a/dirsrvtests/tests/tickets/ticket49788_test.py b/dirsrvtests/tests/tickets/ticket49788_test.py new file mode 100644 index 0000000..6637957 --- /dev/null +++ b/dirsrvtests/tests/tickets/ticket49788_test.py @@ -0,0 +1,88 @@ +import logging +import time + +import ldap +import base64 +import pytest +import os + +from lib389 import Entry +from lib389.tasks import * +from lib389.utils import * +from lib389.properties import * +from lib389.topologies import topology_st +from lib389._constants import DEFAULT_SUFFIX, DN_CONFIG, DN_DM, PASSWORD, DEFAULT_SUFFIX_ESCAPED + +pytestmark = pytest.mark.tier2 + +logging.getLogger(__name__).setLevel(logging.DEBUG) +log = logging.getLogger(__name__) + +VALID_STRINGS = [ + 'dHJpdmlhbCBzdHJpbmc=' # trivial string + '8J+YjQ==', # 😍 + 'aGVsbG8g8J+YjQ==', # hello 😍 + '8J+krCBTbyB0aGVyZSEg8J+YoQ==', # 🤬 So there! 😡 + 'YnJvY2NvbGkgYmVlZg==', # broccoli beef + 'Y2FybmUgZGUgYnLDs2NvbGk=', # carne de brócoli + '2YTYrdmFINio2YLYsdmKINio2LHZiNmD2YTZig==', # لحم بقري بروكلي + '6KW/5YWw6Iqx54mb6IKJ', # 西兰花牛肉 + '6KW/6Jit6Iqx54mb6IKJ', # 西蘭花牛肉 + '0LPQvtCy0LXQtNGB0LrQviDQvNC10YHQviDQvtC0INCx0YDQvtC60YPQu9Cw', # говедско месо од брокула +] + +INVALID_STRINGS = [ + '0LPQxtCy0LXQtNGB0LrQviDQvNC10YHQviDQvtC0INCx0YDQvtC60YPQu9Cw', + '8R+KjQ==', +] + +USER_DN = 'cn=test_user,' + DEFAULT_SUFFIX + +def test_ticket49781(topology_st): + """ + Test that four-byte UTF-8 characters are accepted by the + directory string syntax. + """ + + # Add a test user + try: + topology_st.standalone.add_s(Entry((USER_DN, + {'objectclass': ['top', 'person'], + 'sn': 'sn', + 'description': 'Four-byte UTF8 test', + 'cn': 'test_user'}))) + except ldap.LDAPError as e: + log.fatal('Failed to add test user') + assert False + + try: + topology_st.standalone.modify_s(USER_DN, [(ldap.MOD_REPLACE, 'description', b'something else')]) + except ldap.LDAPError as e: + log.fatal('trivial test failed!') + assert False + + # Iterate over valid tests + for s in VALID_STRINGS: + decoded = base64.b64decode(s) + try: + topology_st.standalone.modify_s(USER_DN, [(ldap.MOD_REPLACE, 'description', decoded)]) + except ldap.LDAPError as e: + log.fatal('description: ' + decoded.decode('UTF-8') + ' failed') + assert False + + # Iterate over invalid tests + for s in INVALID_STRINGS: + decoded = base64.b64decode(s) + try: + topology_st.standalone.modify_s(USER_DN, [(ldap.MOD_REPLACE, 'description', decoded)]) + log.fatal('base64-decoded string ' + s + " was accepted, when it shouldn't have been!") + assert False + except ldap.LDAPError as e: + pass + + +if __name__ == '__main__': + # Run isolated + # -s for DEBUG mode + CURRENT_FILE = os.path.realpath(__file__) + pytest.main("-s %s" % CURRENT_FILE) diff --git a/dirsrvtests/tests/tickets/ticket50078_test.py b/dirsrvtests/tests/tickets/ticket50078_test.py new file mode 100644 index 0000000..1316467 --- /dev/null +++ b/dirsrvtests/tests/tickets/ticket50078_test.py @@ -0,0 +1,70 @@ +import pytest +from lib389.utils import * +from lib389.topologies import topology_m1h1c1 +from lib389.idm.user import UserAccounts + +from lib389._constants import (DEFAULT_SUFFIX, REPLICA_RUV_FILTER, defaultProperties, + REPLICATION_BIND_DN, REPLICATION_BIND_PW, REPLICATION_BIND_METHOD, + REPLICATION_TRANSPORT, SUFFIX, RA_NAME, RA_BINDDN, RA_BINDPW, + RA_METHOD, RA_TRANSPORT_PROT, SUFFIX) + +pytestmark = pytest.mark.tier2 + +logging.getLogger(__name__).setLevel(logging.DEBUG) +log = logging.getLogger(__name__) + +TEST_USER = "test_user" + +def test_ticket50078(topology_m1h1c1): + """ + Test that for a MODRDN operation the cenotaph entry is created on + a hub or consumer. + """ + + M1 = topology_m1h1c1.ms["master1"] + H1 = topology_m1h1c1.hs["hub1"] + C1 = topology_m1h1c1.cs["consumer1"] + # + # Test replication is working + # + if M1.testReplication(DEFAULT_SUFFIX, topology_m1h1c1.cs["consumer1"]): + log.info('Replication is working.') + else: + log.fatal('Replication is not working.') + assert False + + ua = UserAccounts(M1, DEFAULT_SUFFIX) + ua.create(properties={ + 'uid': "%s%d" % (TEST_USER, 1), + 'cn' : "%s%d" % (TEST_USER, 1), + 'sn' : 'user', + 'uidNumber' : '1000', + 'gidNumber' : '2000', + 'homeDirectory' : '/home/testuser' + }) + + user = ua.get('%s1' % TEST_USER) + log.info(" Rename the test entry %s..." % user) + user.rename('uid=test_user_new') + + # wait until replication is in sync + if M1.testReplication(DEFAULT_SUFFIX, topology_m1h1c1.cs["consumer1"]): + log.info('Replication is working.') + else: + log.fatal('Replication is not working.') + assert False + + # check if cenotaph was created on hub and consumer + ents = H1.search_s(DEFAULT_SUFFIX, ldap.SCOPE_SUBTREE, filterstr="(&(objectclass=nstombstone)(cenotaphid=*))") + assert len(ents) == 1 + + ents = C1.search_s(DEFAULT_SUFFIX, ldap.SCOPE_SUBTREE, filterstr="(&(objectclass=nstombstone)(cenotaphid=*))") + assert len(ents) == 1 + + + +if __name__ == '__main__': + # Run isolated + # -s for DEBUG mode + CURRENT_FILE = os.path.realpath(__file__) + pytest.main("-s %s" % CURRENT_FILE) diff --git a/dirsrvtests/tests/tickets/ticket50232_test.py b/dirsrvtests/tests/tickets/ticket50232_test.py new file mode 100644 index 0000000..64d2c21 --- /dev/null +++ b/dirsrvtests/tests/tickets/ticket50232_test.py @@ -0,0 +1,165 @@ +# --- BEGIN COPYRIGHT BLOCK --- +# Copyright (C) 2016 Red Hat, Inc. +# All rights reserved. +# +# License: GPL (version 3 or any later version). +# See LICENSE for details. +# --- END COPYRIGHT BLOCK --- +# +import logging + +import pytest +# from lib389.tasks import * +# from lib389.utils import * +from lib389.topologies import topology_st +from lib389.replica import ReplicationManager,Replicas + +from lib389._constants import DEFAULT_SUFFIX, BACKEND_NAME + +from lib389.idm.user import UserAccounts +from lib389.idm.organization import Organization +from lib389.idm.organizationalunit import OrganizationalUnit + +pytestmark = pytest.mark.tier2 + +log = logging.getLogger(__name__) + +NORMAL_SUFFIX = 'o=normal' +NORMAL_BACKEND_NAME = 'normal' +REVERSE_SUFFIX = 'o=reverse' +REVERSE_BACKEND_NAME = 'reverse' + +def _enable_replica(instance, suffix): + + repl = ReplicationManager(DEFAULT_SUFFIX) + repl._ensure_changelog(instance) + replicas = Replicas(instance) + replicas.create(properties={ + 'cn': 'replica', + 'nsDS5ReplicaRoot': suffix, + 'nsDS5ReplicaId': '1', + 'nsDS5Flags': '1', + 'nsDS5ReplicaType': '3' + }) + +def _populate_suffix(instance, suffixname): + + o = Organization(instance, 'o={}'.format(suffixname)) + o.create(properties={ + 'o': suffixname, + 'description': 'test' + }) + ou = OrganizationalUnit(instance, 'ou=people,o={}'.format(suffixname)) + ou.create(properties={ + 'ou': 'people' + }) + +def _get_replica_generation(instance, suffix): + + replicas = Replicas(instance) + replica = replicas.get(suffix) + ruv = replica.get_ruv() + return ruv._data_generation + +def _test_export_import(instance, suffix, backend): + + before_generation = _get_replica_generation(instance, suffix) + + instance.stop() + instance.db2ldif( + bename=backend, + suffixes=[suffix], + excludeSuffixes=[], + encrypt=False, + repl_data=True, + outputfile="/tmp/output_file", + ) + instance.ldif2db( + bename=None, + excludeSuffixes=None, + encrypt=False, + suffixes=[suffix], + import_file="/tmp/output_file", + ) + instance.start() + after_generation = _get_replica_generation(instance, suffix) + + assert (before_generation == after_generation) + +def test_ticket50232_normal(topology_st): + """ + The fix for ticket 50232 + + + The test sequence is: + - create suffix + - add suffix entry and some child entries + - "normally" done after populating suffix: enable replication + - get RUV and database generation + - export -r + - import + - get RUV and database generation + - assert database generation has not changed + """ + + log.info('Testing Ticket 50232 - export creates not imprtable ldif file, normal creation order') + + topology_st.standalone.backend.create(NORMAL_SUFFIX, {BACKEND_NAME: NORMAL_BACKEND_NAME}) + topology_st.standalone.mappingtree.create(NORMAL_SUFFIX, bename=NORMAL_BACKEND_NAME, parent=None) + + _populate_suffix(topology_st.standalone, NORMAL_BACKEND_NAME) + + repl = ReplicationManager(DEFAULT_SUFFIX) + repl._ensure_changelog(topology_st.standalone) + replicas = Replicas(topology_st.standalone) + replicas.create(properties={ + 'cn': 'replica', + 'nsDS5ReplicaRoot': NORMAL_SUFFIX, + 'nsDS5ReplicaId': '1', + 'nsDS5Flags': '1', + 'nsDS5ReplicaType': '3' + }) + + _test_export_import(topology_st.standalone, NORMAL_SUFFIX, NORMAL_BACKEND_NAME) + +def test_ticket50232_reverse(topology_st): + """ + The fix for ticket 50232 + + + The test sequence is: + - create suffix + - enable replication before suffix enztry is added + - add suffix entry and some child entries + - get RUV and database generation + - export -r + - import + - get RUV and database generation + - assert database generation has not changed + """ + + log.info('Testing Ticket 50232 - export creates not imprtable ldif file, normal creation order') + + # + # Setup Replication + # + log.info('Setting up replication...') + repl = ReplicationManager(DEFAULT_SUFFIX) + # repl.create_first_master(topology_st.standalone) + # + # enable dynamic plugins, memberof and retro cl plugin + # + topology_st.standalone.backend.create(REVERSE_SUFFIX, {BACKEND_NAME: REVERSE_BACKEND_NAME}) + topology_st.standalone.mappingtree.create(REVERSE_SUFFIX, bename=REVERSE_BACKEND_NAME, parent=None) + + _enable_replica(topology_st.standalone, REVERSE_SUFFIX) + + _populate_suffix(topology_st.standalone, REVERSE_BACKEND_NAME) + + _test_export_import(topology_st.standalone, REVERSE_SUFFIX, REVERSE_BACKEND_NAME) + +if __name__ == '__main__': + # Run isolated + # -s for DEBUG mode + CURRENT_FILE = os.path.realpath(__file__) + pytest.main("-s %s" % CURRENT_FILE) diff --git a/dirsrvtests/tests/tickets/ticket50234_test.py b/dirsrvtests/tests/tickets/ticket50234_test.py new file mode 100644 index 0000000..ac936d4 --- /dev/null +++ b/dirsrvtests/tests/tickets/ticket50234_test.py @@ -0,0 +1,72 @@ +# --- BEGIN COPYRIGHT BLOCK --- +# Copyright (C) 2019 Red Hat, Inc. +# All rights reserved. +# +# License: GPL (version 3 or any later version). +# See LICENSE for details. +# --- END COPYRIGHT BLOCK --- +# +import logging +import time +import ldap +import pytest + +from lib389.topologies import topology_st + +from lib389._constants import DEFAULT_SUFFIX + +from lib389.idm.user import UserAccount, UserAccounts +from lib389.idm.organizationalunit import OrganizationalUnit + +pytestmark = pytest.mark.tier2 + +log = logging.getLogger(__name__) + +def test_ticket50234(topology_st): + """ + The fix for ticket 50234 + + + The test sequence is: + - create more than 10 entries with objectclass organizational units ou=org{} + - add an Account in one of them, eg below ou=org5 + - do searches with search base ou=org5 and search filter "objectclass=organizationalunit" + - a subtree search should return 1 entry, the base entry + - a onelevel search should return no entry + """ + + log.info('Testing Ticket 50234 - onelvel search returns not matching entry') + + for i in range(1,15): + ou = OrganizationalUnit(topology_st.standalone, "ou=Org{},{}".format(i, DEFAULT_SUFFIX)) + ou.create(properties={'ou': 'Org'.format(i)}) + + properties = { + 'uid': 'Jeff Vedder', + 'cn': 'Jeff Vedder', + 'sn': 'user', + 'uidNumber': '1000', + 'gidNumber': '2000', + 'homeDirectory': '/home/' + 'JeffVedder', + 'userPassword': 'password' + } + user = UserAccount(topology_st.standalone, "cn=Jeff Vedder,ou=org5,{}".format(DEFAULT_SUFFIX)) + user.create(properties=properties) + + # in a subtree search the entry used as search base matches the filter and shoul be returned + ent = topology_st.standalone.getEntry("ou=org5,{}".format(DEFAULT_SUFFIX), ldap.SCOPE_SUBTREE, "(objectclass=organizationalunit)") + + # in a onelevel search the only child is an useraccount which does not match the filter + # no entry should be returned, which would cause getEntry to raise an exception we need to handle + found = 1 + try: + ent = topology_st.standalone.getEntry("ou=org5,{}".format(DEFAULT_SUFFIX), ldap.SCOPE_ONELEVEL, "(objectclass=organizationalunit)") + except ldap.NO_SUCH_OBJECT: + found = 0 + assert (found == 0) + +if __name__ == '__main__': + # Run isolated + # -s for DEBUG mode + CURRENT_FILE = os.path.realpath(__file__) + pytest.main("-s %s" % CURRENT_FILE) diff --git a/dirsrvtests/tests/tickets/ticket548_test.py b/dirsrvtests/tests/tickets/ticket548_test.py new file mode 100644 index 0000000..cac3cc5 --- /dev/null +++ b/dirsrvtests/tests/tickets/ticket548_test.py @@ -0,0 +1,408 @@ +# --- BEGIN COPYRIGHT BLOCK --- +# Copyright (C) 2016 Red Hat, Inc. +# All rights reserved. +# +# License: GPL (version 3 or any later version). +# See LICENSE for details. +# --- END COPYRIGHT BLOCK --- +# +import pytest +from lib389.tasks import * +from lib389.utils import * +from lib389.topologies import topology_st + +from lib389._constants import DEFAULT_SUFFIX, DN_CONFIG, DN_DM, PASSWORD, DEFAULT_SUFFIX_ESCAPED + +# Skip on older versions +pytestmark = [pytest.mark.tier2, + pytest.mark.skipif(ds_is_older('1.3.6'), reason="Not implemented")] + +log = logging.getLogger(__name__) + +# Assuming DEFAULT_SUFFIX is "dc=example,dc=com", otherwise it does not work... :( +SUBTREE_CONTAINER = 'cn=nsPwPolicyContainer,' + DEFAULT_SUFFIX +SUBTREE_PWPDN = 'cn=nsPwPolicyEntry,' + DEFAULT_SUFFIX +SUBTREE_PWP = 'cn=cn\3DnsPwPolicyEntry\2C' + DEFAULT_SUFFIX_ESCAPED + ',' + SUBTREE_CONTAINER +SUBTREE_COS_TMPLDN = 'cn=nsPwTemplateEntry,' + DEFAULT_SUFFIX +SUBTREE_COS_TMPL = 'cn=cn\3DnsPwTemplateEntry\2C' + DEFAULT_SUFFIX_ESCAPED + ',' + SUBTREE_CONTAINER +SUBTREE_COS_DEF = 'cn=nsPwPolicy_CoS,' + DEFAULT_SUFFIX + +USER1_DN = 'uid=user1,' + DEFAULT_SUFFIX +USER2_DN = 'uid=user2,' + DEFAULT_SUFFIX +USER3_DN = 'uid=user3,' + DEFAULT_SUFFIX +USER_PW = 'password' + + +def days_to_secs(days): + # Value of 60 * 60 * 24 + return days * 86400 + + +# Values are in days +def set_global_pwpolicy(topology_st, min_=1, max_=10, warn=3): + log.info(" +++++ Enable global password policy +++++\n") + # Enable password policy + try: + topology_st.standalone.modify_s(DN_CONFIG, [(ldap.MOD_REPLACE, 'nsslapd-pwpolicy-local', b'on')]) + except ldap.LDAPError as e: + log.error('Failed to set pwpolicy-local: error ' + e.message['desc']) + assert False + + # Convert our values to seconds + min_secs = days_to_secs(min_) + max_secs = days_to_secs(max_) + warn_secs = days_to_secs(warn) + + log.info(" Set global password Min Age -- %s day\n" % min_) + try: + topology_st.standalone.modify_s(DN_CONFIG, [(ldap.MOD_REPLACE, 'passwordMinAge', ('%s' % min_secs).encode())]) + except ldap.LDAPError as e: + log.error('Failed to set passwordMinAge: error ' + e.message['desc']) + assert False + + log.info(" Set global password Expiration -- on\n") + try: + topology_st.standalone.modify_s(DN_CONFIG, [(ldap.MOD_REPLACE, 'passwordExp', b'on')]) + except ldap.LDAPError as e: + log.error('Failed to set passwordExp: error ' + e.message['desc']) + assert False + + log.info(" Set global password Max Age -- %s days\n" % max_) + try: + topology_st.standalone.modify_s(DN_CONFIG, [(ldap.MOD_REPLACE, 'passwordMaxAge', ('%s' % max_secs).encode())]) + except ldap.LDAPError as e: + log.error('Failed to set passwordMaxAge: error ' + e.message['desc']) + assert False + + log.info(" Set global password Warning -- %s days\n" % warn) + try: + topology_st.standalone.modify_s(DN_CONFIG, [(ldap.MOD_REPLACE, 'passwordWarning', ('%s' % warn_secs).encode())]) + except ldap.LDAPError as e: + log.error('Failed to set passwordWarning: error ' + e.message['desc']) + assert False + + +def set_subtree_pwpolicy(topology_st, min_=2, max_=20, warn=6): + log.info(" +++++ Enable subtree level password policy +++++\n") + + # Convert our values to seconds + min_secs = days_to_secs(min_) + max_secs = days_to_secs(max_) + warn_secs = days_to_secs(warn) + + log.info(" Add the container") + try: + topology_st.standalone.add_s(Entry((SUBTREE_CONTAINER, {'objectclass': 'top nsContainer'.split(), + 'cn': 'nsPwPolicyContainer'}))) + except ldap.ALREADY_EXISTS: + pass + except ldap.LDAPError as e: + log.error('Failed to add subtree container: error ' + e.message['desc']) + # assert False + + try: + # Purge the old policy + topology_st.standalone.delete_s(SUBTREE_PWP) + except: + pass + + log.info( + " Add the password policy subentry {passwordMustChange: on, passwordMinAge: %s, passwordMaxAge: %s, passwordWarning: %s}" % ( + min_, max_, warn)) + try: + topology_st.standalone.add_s(Entry((SUBTREE_PWP, {'objectclass': 'top ldapsubentry passwordpolicy'.split(), + 'cn': SUBTREE_PWPDN, + 'passwordMustChange': 'on', + 'passwordExp': 'on', + 'passwordMinAge': '%s' % min_secs, + 'passwordMaxAge': '%s' % max_secs, + 'passwordWarning': '%s' % warn_secs, + 'passwordChange': 'on', + 'passwordStorageScheme': 'clear'}))) + except ldap.LDAPError as e: + log.error('Failed to add passwordpolicy: error ' + e.message['desc']) + assert False + + log.info(" Add the COS template") + try: + topology_st.standalone.add_s( + Entry((SUBTREE_COS_TMPL, {'objectclass': 'top ldapsubentry costemplate extensibleObject'.split(), + 'cn': SUBTREE_PWPDN, + 'cosPriority': '1', + 'cn': SUBTREE_COS_TMPLDN, + 'pwdpolicysubentry': SUBTREE_PWP}))) + except ldap.ALREADY_EXISTS: + pass + except ldap.LDAPError as e: + log.error('Failed to add COS template: error ' + e.message['desc']) + # assert False + + log.info(" Add the COS definition") + try: + topology_st.standalone.add_s( + Entry((SUBTREE_COS_DEF, {'objectclass': 'top ldapsubentry cosSuperDefinition cosPointerDefinition'.split(), + 'cn': SUBTREE_PWPDN, + 'costemplatedn': SUBTREE_COS_TMPL, + 'cosAttribute': 'pwdpolicysubentry default operational-default'}))) + except ldap.ALREADY_EXISTS: + pass + except ldap.LDAPError as e: + log.error('Failed to add COS def: error ' + e.message['desc']) + # assert False + + time.sleep(1) + + +def update_passwd(topology_st, user, passwd, newpasswd): + log.info(" Bind as {%s,%s}" % (user, passwd)) + topology_st.standalone.simple_bind_s(user, passwd) + try: + topology_st.standalone.modify_s(user, [(ldap.MOD_REPLACE, 'userpassword', newpasswd.encode())]) + except ldap.LDAPError as e: + log.fatal('test_ticket548: Failed to update the password ' + cpw + ' of user ' + user + ': error ' + e.message[ + 'desc']) + assert False + + time.sleep(1) + + +def check_shadow_attr_value(entry, attr_type, expected, dn): + if entry.hasAttr(attr_type): + actual = entry.getValue(attr_type) + if int(actual) == expected: + log.info('%s of entry %s has expected value %s' % (attr_type, dn, actual)) + assert True + else: + log.fatal('%s %s of entry %s does not have expected value %s' % (attr_type, actual, dn, expected)) + assert False + else: + log.fatal('entry %s does not have %s attr' % (dn, attr_type)) + assert False + + +def test_ticket548_test_with_no_policy(topology_st): + """ + Check shadowAccount under no password policy + """ + log.info("Case 1. No password policy") + + log.info("Bind as %s" % DN_DM) + topology_st.standalone.simple_bind_s(DN_DM, PASSWORD) + + log.info('Add an entry' + USER1_DN) + try: + topology_st.standalone.add_s( + Entry((USER1_DN, {'objectclass': "top person organizationalPerson inetOrgPerson shadowAccount".split(), + 'sn': '1', + 'cn': 'user 1', + 'uid': 'user1', + 'givenname': 'user', + 'mail': 'user1@' + DEFAULT_SUFFIX, + 'userpassword': USER_PW}))) + except ldap.LDAPError as e: + log.fatal('test_ticket548: Failed to add user' + USER1_DN + ': error ' + e.message['desc']) + assert False + + edate = int(time.time() / (60 * 60 * 24)) + log.info('Search entry %s' % USER1_DN) + + log.info("Bind as %s" % USER1_DN) + topology_st.standalone.simple_bind_s(USER1_DN, USER_PW) + entry = topology_st.standalone.getEntry(USER1_DN, ldap.SCOPE_BASE, "(objectclass=*)", ['shadowLastChange']) + check_shadow_attr_value(entry, 'shadowLastChange', edate, USER1_DN) + + log.info("Check shadowAccount with no policy was successfully verified.") + + +def test_ticket548_test_global_policy(topology_st): + """ + Check shadowAccount with global password policy + """ + + log.info("Case 2. Check shadowAccount with global password policy") + + log.info("Bind as %s" % DN_DM) + topology_st.standalone.simple_bind_s(DN_DM, PASSWORD) + + set_global_pwpolicy(topology_st) + + log.info('Add an entry' + USER2_DN) + try: + topology_st.standalone.add_s( + Entry((USER2_DN, {'objectclass': "top person organizationalPerson inetOrgPerson shadowAccount".split(), + 'sn': '2', + 'cn': 'user 2', + 'uid': 'user2', + 'givenname': 'user', + 'mail': 'user2@' + DEFAULT_SUFFIX, + 'userpassword': USER_PW}))) + except ldap.LDAPError as e: + log.fatal('test_ticket548: Failed to add user' + USER2_DN + ': error ' + e.message['desc']) + assert False + + edate = int(time.time() / (60 * 60 * 24)) + + log.info("Bind as %s" % USER1_DN) + topology_st.standalone.simple_bind_s(USER1_DN, USER_PW) + + log.info('Search entry %s' % USER1_DN) + entry = topology_st.standalone.getEntry(USER1_DN, ldap.SCOPE_BASE, "(objectclass=*)") + check_shadow_attr_value(entry, 'shadowLastChange', edate, USER1_DN) + + # passwordMinAge -- 1 day + check_shadow_attr_value(entry, 'shadowMin', 1, USER1_DN) + + # passwordMaxAge -- 10 days + check_shadow_attr_value(entry, 'shadowMax', 10, USER1_DN) + + # passwordWarning -- 3 days + check_shadow_attr_value(entry, 'shadowWarning', 3, USER1_DN) + + log.info("Bind as %s" % USER2_DN) + topology_st.standalone.simple_bind_s(USER2_DN, USER_PW) + + log.info('Search entry %s' % USER2_DN) + entry = topology_st.standalone.getEntry(USER2_DN, ldap.SCOPE_BASE, "(objectclass=*)") + check_shadow_attr_value(entry, 'shadowLastChange', edate, USER2_DN) + + # passwordMinAge -- 1 day + check_shadow_attr_value(entry, 'shadowMin', 1, USER2_DN) + + # passwordMaxAge -- 10 days + check_shadow_attr_value(entry, 'shadowMax', 10, USER2_DN) + + # passwordWarning -- 3 days + check_shadow_attr_value(entry, 'shadowWarning', 3, USER2_DN) + + # Bind as DM again, change policy + log.info("Bind as %s" % DN_DM) + topology_st.standalone.simple_bind_s(DN_DM, PASSWORD) + set_global_pwpolicy(topology_st, 3, 30, 9) + + # change the user password, then check again. + log.info("Bind as %s" % USER2_DN) + topology_st.standalone.simple_bind_s(USER2_DN, USER_PW) + + newpasswd = USER_PW + '2' + update_passwd(topology_st, USER2_DN, USER_PW, newpasswd) + + log.info("Re-bind as %s with new password" % USER2_DN) + topology_st.standalone.simple_bind_s(USER2_DN, newpasswd) + + ## This tests if we update the shadow values on password change. + log.info('Search entry %s' % USER2_DN) + entry = topology_st.standalone.getEntry(USER2_DN, ldap.SCOPE_BASE, "(objectclass=*)") + + # passwordMinAge -- 1 day + check_shadow_attr_value(entry, 'shadowMin', 3, USER2_DN) + + # passwordMaxAge -- 10 days + check_shadow_attr_value(entry, 'shadowMax', 30, USER2_DN) + + # passwordWarning -- 3 days + check_shadow_attr_value(entry, 'shadowWarning', 9, USER2_DN) + + log.info("Check shadowAccount with global policy was successfully verified.") + + +def test_ticket548_test_subtree_policy(topology_st): + """ + Check shadowAccount with subtree level password policy + """ + + log.info("Case 3. Check shadowAccount with subtree level password policy") + + log.info("Bind as %s" % DN_DM) + topology_st.standalone.simple_bind_s(DN_DM, PASSWORD) + # Check the global policy values + + set_subtree_pwpolicy(topology_st, 2, 20, 6) + + log.info('Add an entry' + USER3_DN) + try: + topology_st.standalone.add_s( + Entry((USER3_DN, {'objectclass': "top person organizationalPerson inetOrgPerson shadowAccount".split(), + 'sn': '3', + 'cn': 'user 3', + 'uid': 'user3', + 'givenname': 'user', + 'mail': 'user3@' + DEFAULT_SUFFIX, + 'userpassword': USER_PW}))) + except ldap.LDAPError as e: + log.fatal('test_ticket548: Failed to add user' + USER3_DN + ': error ' + e.message['desc']) + assert False + + log.info('Search entry %s' % USER3_DN) + entry0 = topology_st.standalone.getEntry(USER3_DN, ldap.SCOPE_BASE, "(objectclass=*)") + + log.info('Expecting shadowLastChange 0 since passwordMustChange is on') + check_shadow_attr_value(entry0, 'shadowLastChange', 0, USER3_DN) + + # passwordMinAge -- 2 day + check_shadow_attr_value(entry0, 'shadowMin', 2, USER3_DN) + + # passwordMaxAge -- 20 days + check_shadow_attr_value(entry0, 'shadowMax', 20, USER3_DN) + + # passwordWarning -- 6 days + check_shadow_attr_value(entry0, 'shadowWarning', 6, USER3_DN) + + log.info("Bind as %s" % USER3_DN) + topology_st.standalone.simple_bind_s(USER3_DN, USER_PW) + + log.info('Search entry %s' % USER3_DN) + try: + entry1 = topology_st.standalone.getEntry(USER3_DN, ldap.SCOPE_BASE, "(objectclass=*)") + except ldap.UNWILLING_TO_PERFORM: + log.info('test_ticket548: Search by' + USER3_DN + ' failed by UNWILLING_TO_PERFORM as expected') + except ldap.LDAPError as e: + log.fatal('test_ticket548: Failed to serch user' + USER3_DN + ' by self: error ' + e.message['desc']) + assert False + + log.info("Bind as %s and updating the password with a new one" % USER3_DN) + topology_st.standalone.simple_bind_s(USER3_DN, USER_PW) + + # Bind as DM again, change policy + log.info("Bind as %s" % DN_DM) + topology_st.standalone.simple_bind_s(DN_DM, PASSWORD) + + set_subtree_pwpolicy(topology_st, 4, 40, 12) + + newpasswd = USER_PW + '0' + update_passwd(topology_st, USER3_DN, USER_PW, newpasswd) + + log.info("Re-bind as %s with new password" % USER3_DN) + topology_st.standalone.simple_bind_s(USER3_DN, newpasswd) + + try: + entry2 = topology_st.standalone.getEntry(USER3_DN, ldap.SCOPE_BASE, "(objectclass=*)") + except ldap.LDAPError as e: + log.fatal('test_ticket548: Failed to serch user' + USER3_DN + ' by self: error ' + e.message['desc']) + assert False + + edate = int(time.time() / (60 * 60 * 24)) + + log.info('Expecting shadowLastChange %d once userPassword is updated', edate) + check_shadow_attr_value(entry2, 'shadowLastChange', edate, USER3_DN) + + log.info('Search entry %s' % USER3_DN) + entry = topology_st.standalone.getEntry(USER3_DN, ldap.SCOPE_BASE, "(objectclass=*)") + check_shadow_attr_value(entry, 'shadowLastChange', edate, USER3_DN) + + # passwordMinAge -- 1 day + check_shadow_attr_value(entry, 'shadowMin', 4, USER3_DN) + + # passwordMaxAge -- 10 days + check_shadow_attr_value(entry, 'shadowMax', 40, USER3_DN) + + # passwordWarning -- 3 days + check_shadow_attr_value(entry, 'shadowWarning', 12, USER3_DN) + + log.info("Check shadowAccount with subtree level policy was successfully verified.") + + +if __name__ == '__main__': + # Run isolated + # -s for DEBUG mode + CURRENT_FILE = os.path.realpath(__file__) + pytest.main("-s %s" % CURRENT_FILE) diff --git a/dirsrvtests/tests/tmp/README b/dirsrvtests/tests/tmp/README new file mode 100644 index 0000000..0e8f416 --- /dev/null +++ b/dirsrvtests/tests/tmp/README @@ -0,0 +1,10 @@ +TMP DIRECTORY README + +This directory is used to store files(LDIFs, etc) that are created during the ticket script runtime. The script is also responsible for removing any files it places in this directory. This directory can be retrieved via getDir() from the DirSrv class. + +Example: + + tmp_dir_path = topology.standalone.getDir(__file__, TMP_DIR) + + new_ldif = tmp_dir_path + "export.ldif" + diff --git a/dirsrvtests/tests/tmp/__init__.py b/dirsrvtests/tests/tmp/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/docker.mk b/docker.mk new file mode 100644 index 0000000..0f42b0b --- /dev/null +++ b/docker.mk @@ -0,0 +1,6 @@ + +suse: + docker build -t 389-ds-suse:master -f docker/389-ds-suse/Dockerfile . + +fedora: + docker build -t 389-ds-fedora:master -f docker/389-ds-fedora/Dockerfile . diff --git a/docker/389-ds-fedora/Dockerfile b/docker/389-ds-fedora/Dockerfile new file mode 100644 index 0000000..bdd56fd --- /dev/null +++ b/docker/389-ds-fedora/Dockerfile @@ -0,0 +1,51 @@ +# --- BEGIN COPYRIGHT BLOCK --- +# Copyright (C) 2017 Red Hat, Inc. +# All rights reserved. +# +# License: GPL (version 3 or any later version). +# See LICENSE for details. +# --- END COPYRIGHT BLOCK --- + +FROM fedora:latest +MAINTAINER 389-devel@lists.fedoraproject.org +EXPOSE 3389 3636 + +ADD ./ /usr/local/src/389-ds-base +WORKDIR /usr/local/src/389-ds-base + +# install dependencies +RUN dnf upgrade -y \ + && dnf install --setopt=strict=False -y @buildsys-build rpm-build make bzip2 git rsync \ + `grep -E "^(Build)?Requires" rpm/389-ds-base.spec.in \ + | grep -v -E '(name|MODULE)' \ + | awk '{ print $2 }' \ + | sed 's/%{python3_pkgversion}/3/g' \ + | grep -v "^/" \ + | grep -v pkgversion \ + | sort | uniq \ + | tr '\n' ' '` \ + && dnf clean all + +# build +RUN make -f rpm.mk rpms || sh -c 'echo "build failed, sleeping for some time to allow you debug" ; sleep 3600' + +RUN dnf install -y dist/rpms/*389*.rpm && \ + dnf clean all + +# Link some known static locations to point to /data +RUN mkdir -p /data/config && \ + mkdir -p /data/ssca && \ + mkdir -p /data/run && \ + mkdir -p /var/run/dirsrv && \ + ln -s /data/config /etc/dirsrv/slapd-localhost && \ + ln -s /data/ssca /etc/dirsrv/ssca && \ + ln -s /data/run /var/run/dirsrv + +VOLUME /data + +#USER dirsrv + +HEALTHCHECK --start-period=5m --timeout=5s --interval=5s --retries=2 \ + CMD /usr/libexec/dirsrv/dscontainer -H + +CMD [ "/usr/libexec/dirsrv/dscontainer", "-r" ] diff --git a/docker/389-ds-suse/Dockerfile b/docker/389-ds-suse/Dockerfile new file mode 100644 index 0000000..6022d04 --- /dev/null +++ b/docker/389-ds-suse/Dockerfile @@ -0,0 +1,82 @@ +#!BuildTag: 389-ds-container +FROM opensuse/leap:15.1 +MAINTAINER wbrown@suse.de + +EXPOSE 3389 3636 + +# RUN zypper ar -G obs://network:ldap network:ldap && \ +RUN zypper ar http://download.opensuse.org/update/leap/15.1/oss/ u && \ + zypper ar http://download.opensuse.org/distribution/leap/15.1/repo/oss/ m && \ + zypper ar http://download.opensuse.org/repositories/network:ldap/openSUSE_Leap_15.1/ "network:ldap" && \ + zypper mr -p 97 "network:ldap" && \ + zypper --gpg-auto-import-keys ref + +RUN zypper --non-interactive si --build-deps-only 389-ds && \ + zypper in -y acl cargo cyrus-sasl cyrus-sasl-plain db48-utils krb5-client libLLVM7 libedit0 libgit2-26 libhttp_parser2_7_1 libssh2-1 mozilla-nss-tools rust + +# Install build dependencies +# RUN zypper in -C -y autoconf automake cracklib-devel cyrus-sasl-devel db-devel doxygen gcc-c++ \ +# gdb krb5-devel libcmocka-devel libevent-devel libtalloc-devel libtevent-devel libtool \ +# net-snmp-devel openldap2-devel pam-devel pkgconfig python-rpm-macros "pkgconfig(icu-i18n)" \ +# "pkgconfig(icu-uc)" "pkgconfig(libcap)" "pkgconfig(libpcre)" "pkgconfig(libsystemd)" \ +# "pkgconfig(nspr)" "pkgconfig(nss)" rsync cargo rust rust-std acl cyrus-sasl-plain db-utils \ +# bind-utils krb5 fillup shadow openldap2-devel pkgconfig "pkgconfig(nspr)" "pkgconfig(nss)" \ +# "pkgconfig(systemd)" python3-argcomplete python3-argparse-manpage python3-ldap \ +# python3-pyasn1 python3-pyasn1-modules python3-python-dateutil python3-six krb5-client \ +# mozilla-nss-tools + +# Push source code to the container +ADD ./ /usr/local/src/389-ds-base +WORKDIR /usr/local/src/389-ds-base + + +# Build and install +# Derived from rpm --eval '%configure' on opensuse. +RUN autoreconf -fiv && \ + ./configure --host=x86_64-suse-linux-gnu --build=x86_64-suse-linux-gnu \ + --program-prefix= \ + --disable-dependency-tracking \ + --prefix=/usr \ + --exec-prefix=/usr \ + --bindir=/usr/bin \ + --sbindir=/usr/sbin \ + --sysconfdir=/etc \ + --datadir=/usr/share \ + --includedir=/usr/include \ + --libdir=/usr/lib64 \ + --libexecdir=/usr/lib \ + --localstatedir=/var \ + --sharedstatedir=/var/lib \ + --mandir=/usr/share/man \ + --infodir=/usr/share/info \ + --disable-dependency-tracking \ + --enable-debug \ + --enable-gcc-security --enable-autobind --enable-auto-dn-suffix --with-openldap \ + --enable-cmocka --enable-rust --disable-perl --with-pythonexec="python3" --without-systemd \ + --libexecdir=/usr/lib/dirsrv/ --prefix=/ && \ + make -j 12 && \ + make install && \ + make lib389 && \ + make lib389-install + +# Link some known static locations to point to /data +RUN mkdir -p /data/config && \ + mkdir -p /data/ssca && \ + mkdir -p /data/run && \ + mkdir -p /var/run/dirsrv && \ + ln -s /data/config /etc/dirsrv/slapd-localhost && \ + ln -s /data/ssca /etc/dirsrv/ssca && \ + ln -s /data/run /var/run/dirsrv + +# Temporal volumes for each instance + +VOLUME /data + +# Set the userup correctly. +# USER dirsrv + +HEALTHCHECK --start-period=5m --timeout=5s --interval=5s --retries=2 \ + CMD /usr/libexec/dirsrv/dscontainer -H + +CMD [ "/usr/libexec/dirsrv/dscontainer", "-r" ] + diff --git a/docker/389-ds-suse/Dockerfile.release b/docker/389-ds-suse/Dockerfile.release new file mode 100644 index 0000000..6f4adf7 --- /dev/null +++ b/docker/389-ds-suse/Dockerfile.release @@ -0,0 +1,72 @@ +#!BuildTag: 389-ds-container +FROM opensuse/leap:15.1 +MAINTAINER wbrown@suse.de + +EXPOSE 3389 3636 + +# RUN zypper ar -G obs://network:ldap network:ldap && \ +RUN zypper ar http://download.opensuse.org/update/leap/15.1/oss/ u && \ + zypper ar http://download.opensuse.org/distribution/leap/15.1/repo/oss/ m && \ + zypper ar http://download.opensuse.org/repositories/network:ldap/openSUSE_Leap_15.1/ "network:ldap" && \ + zypper mr -p 97 "network:ldap" && \ + zypper --gpg-auto-import-keys ref + +# Push source code to the container - we do this early because we want the zypper and +# build instructions in a single RUN stanza to minimise the container final size. +ADD ./ /usr/local/src/389-ds-base +WORKDIR /usr/local/src/389-ds-base + + +# Build and install +# Derived from rpm --eval '%configure' on opensuse. + +RUN zypper --non-interactive si --build-deps-only 389-ds && \ + zypper in -y 389-ds rust cargo rust-std && \ + zypper rm -y 389-ds lib389 && \ + autoreconf -fiv && \ + ./configure --host=x86_64-suse-linux-gnu --build=x86_64-suse-linux-gnu \ + --program-prefix= \ + --disable-dependency-tracking \ + --prefix=/usr \ + --exec-prefix=/usr \ + --bindir=/usr/bin \ + --sbindir=/usr/sbin \ + --sysconfdir=/etc \ + --datadir=/usr/share \ + --includedir=/usr/include \ + --libdir=/usr/lib64 \ + --libexecdir=/usr/lib \ + --localstatedir=/var \ + --sharedstatedir=/var/lib \ + --mandir=/usr/share/man \ + --infodir=/usr/share/info \ + --disable-dependency-tracking \ + --enable-gcc-security --enable-autobind --enable-auto-dn-suffix --with-openldap \ + --enable-rust --disable-perl --with-pythonexec="python3" --without-systemd \ + --libexecdir=/usr/lib/dirsrv/ --prefix=/ && \ + make -j 12 && \ + make install && \ + make lib389 && \ + make lib389-install && \ + make clean && \ + zypper rm -y -u rust cargo rust-std gcc gcc-c++ automake autoconf + +# Link some known static locations to point to /data +RUN mkdir -p /data/config && \ + mkdir -p /data/ssca && \ + mkdir -p /data/run && \ + mkdir -p /var/run/dirsrv && \ + ln -s /data/config /etc/dirsrv/slapd-localhost && \ + ln -s /data/ssca /etc/dirsrv/ssca && \ + ln -s /data/run /var/run/dirsrv + +# Temporal volumes for each instance + +VOLUME /data + +# Set the userup correctly. This was created as part of the 389ds in above. +# For k8s we'll need 389 to not drop privs? I think we don't specify a user +# here and ds should do the right thing if a non root user runs the server. +# USER dirsrv + +CMD [ "/usr/libexec/dirsrv/dscontainer", "-r" ] diff --git a/docker/README.md b/docker/README.md new file mode 100644 index 0000000..70adf2b --- /dev/null +++ b/docker/README.md @@ -0,0 +1,61 @@ + +#### Issue Description +This folder contains proof of concept dockerfiles for 389 Directory Server. This utilises many of our latest +developments for installing instances and configuring them. We have developed native, clean, and powerful container +integration. This container image is usable on CentOS / RHEL / Fedora atomic host, and pure docker implementations. +Please note this image will not currently work in openshift due to a reliance on volume features that openshift does +not support, but we will correct this. + + +#### Using the files +These docker files are designed to be build from docker hub as the will do a remote git fetch during the build process. +They are not currently designed to operate on a local source tree (we may add this later). + +``` +cd docker/389ds_poc; +docker build -t 389ds_poc:latest . +``` + +#### Deploying and using the final product + +``` +docker create -h ldap.example.com 389ds_poc:latest +docker start +docker inspect | grep IPAddress +ldapsearch -H ldap://

-b '' -s base -x + +.... +supportedLDAPVersion: 3 +vendorName: 389 Project +vendorVersion: 389-Directory/1.3.6.3 B2017.093.354 + +``` + +To expose the ports you may consider adding: + +``` +-P +OR +-p 127.0.0.1:$HOSTPORT:$CONTAINERPORT +``` + +You can not currently use a persistent volume with the 389ds_poc image due to an issue with docker volumes. This will be +corrected by https://pagure.io/389-ds-base/issue/49213 + +#### Warnings + +The 389ds_poc container is supplied with a static Directory Manager password. This is HIGHLY INSECURE and should not be +used in production. The password is "directory manager password". + +The 389ds_poc container has some issues with volume over-rides due to our use of a pre-built instance. We are working to +resolve this, but until a solution is derived, you can not override the datavolumes. + +#### Other ideas + +* We could develop a dockerfile that builds and runs DS tests in an isolated environment. +* Make a container image that allows mounting an arbitrary 389-ds repo into it for simple development purposes. + +#### NOTE of 389 DS project support + +This is not a "supported" method of deployment to a production system and may result in data loss. This should be +considered an experimental deployment method until otherwise announced. + diff --git a/docs/CREDITS.artwork b/docs/CREDITS.artwork new file mode 100644 index 0000000..e9c2b26 --- /dev/null +++ b/docs/CREDITS.artwork @@ -0,0 +1 @@ +Tops artwork by Logan Megginson diff --git a/docs/custom.css b/docs/custom.css new file mode 100644 index 0000000..16d91cd --- /dev/null +++ b/docs/custom.css @@ -0,0 +1,1366 @@ +/* The standard CSS for doxygen 1.8.6 */ + +body, table, div, p, dl { + font: 400 14px/22px Liberation Sans,DejaVu Sans,Roboto,sans-serif; +} + +/* @group Heading Levels */ + +h1.groupheader { + font-size: 150%; +} + +.title { + font: 400 14px/28px Liberation Sans,DejaVu Sans,Roboto,sans-serif; + font-size: 150%; + font-weight: bold; + margin: 10px 2px; +} + +h2.groupheader { + border-bottom: 1px solid #879ECB; + color: #354C7B; + font-size: 150%; + font-weight: normal; + margin-top: 1.75em; + padding-top: 8px; + padding-bottom: 4px; + width: 100%; +} + +h3.groupheader { + font-size: 100%; +} + +h1, h2, h3, h4, h5, h6 { + -webkit-transition: text-shadow 0.5s linear; + -moz-transition: text-shadow 0.5s linear; + -ms-transition: text-shadow 0.5s linear; + -o-transition: text-shadow 0.5s linear; + transition: text-shadow 0.5s linear; + margin-right: 15px; +} + +h1.glow, h2.glow, h3.glow, h4.glow, h5.glow, h6.glow { + text-shadow: 0 0 15px cyan; +} + +dt { + font-weight: bold; +} + +div.multicol { + -moz-column-gap: 1em; + -webkit-column-gap: 1em; + -moz-column-count: 3; + -webkit-column-count: 3; +} + +p.startli, p.startdd { + margin-top: 2px; +} + +p.starttd { + margin-top: 0px; +} + +p.endli { + margin-bottom: 0px; +} + +p.enddd { + margin-bottom: 4px; +} + +p.endtd { + margin-bottom: 2px; +} + +/* @end */ + +caption { + font-weight: bold; +} + +span.legend { + font-size: 70%; + text-align: center; +} + +h3.version { + font-size: 90%; + text-align: center; +} + +div.qindex, div.navtab{ + background-color: #EBEFF6; + border: 1px solid #A3B4D7; + text-align: center; +} + +div.qindex, div.navpath { + width: 100%; + line-height: 140%; +} + +div.navtab { + margin-right: 15px; +} + +/* @group Link Styling */ + +a { + color: #3D578C; + font-weight: normal; + text-decoration: none; +} + +.contents a:visited { + color: #4665A2; +} + +a:hover { + text-decoration: underline; +} + +a.qindex { + font-weight: bold; +} + +a.qindexHL { + font-weight: bold; + background-color: #9CAFD4; + color: #ffffff; + border: 1px double #869DCA; +} + +.contents a.qindexHL:visited { + color: #ffffff; +} + +a.el { + font-weight: bold; +} + +a.elRef { +} + +a.code, a.code:visited, a.line, a.line:visited { + color: #4665A2; +} + +a.codeRef, a.codeRef:visited, a.lineRef, a.lineRef:visited { + color: #4665A2; +} + +/* @end */ + +dl.el { + margin-left: -1cm; +} + +pre.fragment { + border: 1px solid #C4CFE5; + background-color: #FBFCFD; + padding: 4px 6px; + margin: 4px 8px 4px 2px; + overflow: auto; + word-wrap: break-word; + font-size: 9pt; + line-height: 125%; + font-family: monospace, fixed; + font-size: 105%; +} + +div.fragment { + padding: 4px 6px; + margin: 4px 8px 4px 2px; + background-color: #FBFCFD; + border: 1px solid #C4CFE5; +} + +div.line { + font-family: monospace, fixed; + font-size: 13px; + min-height: 13px; + line-height: 1.0; + text-wrap: unrestricted; + white-space: -moz-pre-wrap; /* Moz */ + white-space: -pre-wrap; /* Opera 4-6 */ + white-space: -o-pre-wrap; /* Opera 7 */ + white-space: pre-wrap; /* CSS3 */ + word-wrap: break-word; /* IE 5.5+ */ + text-indent: -53px; + padding-left: 53px; + padding-bottom: 0px; + margin: 0px; + -webkit-transition-property: background-color, box-shadow; + -webkit-transition-duration: 0.5s; + -moz-transition-property: background-color, box-shadow; + -moz-transition-duration: 0.5s; + -ms-transition-property: background-color, box-shadow; + -ms-transition-duration: 0.5s; + -o-transition-property: background-color, box-shadow; + -o-transition-duration: 0.5s; + transition-property: background-color, box-shadow; + transition-duration: 0.5s; +} + +div.line.glow { + background-color: cyan; + box-shadow: 0 0 10px cyan; +} + + +span.lineno { + padding-right: 4px; + text-align: right; + border-right: 2px solid #0F0; + background-color: #E8E8E8; + white-space: pre; +} +span.lineno a { + background-color: #D8D8D8; +} + +span.lineno a:hover { + background-color: #C8C8C8; +} + +div.ah { + background-color: black; + font-weight: bold; + color: #ffffff; + margin-bottom: 3px; + margin-top: 3px; + padding: 0.2em; + border: solid thin #333; + border-radius: 0.5em; + -webkit-border-radius: .5em; + -moz-border-radius: .5em; + box-shadow: 2px 2px 3px #999; + -webkit-box-shadow: 2px 2px 3px #999; + -moz-box-shadow: rgba(0, 0, 0, 0.15) 2px 2px 2px; + background-image: -webkit-gradient(linear, left top, left bottom, from(#eee), to(#000),color-stop(0.3, #444)); + background-image: -moz-linear-gradient(center top, #eee 0%, #444 40%, #000); +} + +div.groupHeader { + margin-left: 16px; + margin-top: 12px; + font-weight: bold; +} + +div.groupText { + margin-left: 16px; + font-style: italic; +} + +body { + background-color: white; + color: black; + margin: 0; +} + +div.contents { + margin-top: 10px; + margin-left: 12px; + margin-right: 8px; +} + +td.indexkey { + background-color: #EBEFF6; + font-weight: bold; + border: 1px solid #C4CFE5; + margin: 2px 0px 2px 0; + padding: 2px 10px; + white-space: nowrap; + vertical-align: top; +} + +td.indexvalue { + background-color: #EBEFF6; + border: 1px solid #C4CFE5; + padding: 2px 10px; + margin: 2px 0px; +} + +tr.memlist { + background-color: #EEF1F7; +} + +p.formulaDsp { + text-align: center; +} + +img.formulaDsp { + +} + +img.formulaInl { + vertical-align: middle; +} + +div.center { + text-align: center; + margin-top: 0px; + margin-bottom: 0px; + padding: 0px; +} + +div.center img { + border: 0px; +} + +address.footer { + text-align: right; + padding-right: 12px; +} + +img.footer { + border: 0px; + vertical-align: middle; +} + +/* @group Code Colorization */ + +span.keyword { + color: #008000 +} + +span.keywordtype { + color: #604020 +} + +span.keywordflow { + color: #e08000 +} + +span.comment { + color: #800000 +} + +span.preprocessor { + color: #806020 +} + +span.stringliteral { + color: #002080 +} + +span.charliteral { + color: #008080 +} + +span.vhdldigit { + color: #ff00ff +} + +span.vhdlchar { + color: #000000 +} + +span.vhdlkeyword { + color: #700070 +} + +span.vhdllogic { + color: #ff0000 +} + +blockquote { + background-color: #F7F8FB; + border-left: 2px solid #9CAFD4; + margin: 0 24px 0 4px; + padding: 0 12px 0 16px; +} + +/* @end */ + +/* +.search { + color: #003399; + font-weight: bold; +} + +form.search { + margin-bottom: 0px; + margin-top: 0px; +} + +input.search { + font-size: 75%; + color: #000080; + font-weight: normal; + background-color: #e8eef2; +} +*/ + +td.tiny { + font-size: 75%; +} + +.dirtab { + padding: 4px; + border-collapse: collapse; + border: 1px solid #A3B4D7; +} + +th.dirtab { + background: #EBEFF6; + font-weight: bold; +} + +hr { + height: 0px; + border: none; + border-top: 1px solid #4A6AAA; +} + +hr.footer { + height: 1px; +} + +/* @group Member Descriptions */ + +table.memberdecls { + border-spacing: 0px; + padding: 0px; +} + +.memberdecls td, .fieldtable tr { + -webkit-transition-property: background-color, box-shadow; + -webkit-transition-duration: 0.5s; + -moz-transition-property: background-color, box-shadow; + -moz-transition-duration: 0.5s; + -ms-transition-property: background-color, box-shadow; + -ms-transition-duration: 0.5s; + -o-transition-property: background-color, box-shadow; + -o-transition-duration: 0.5s; + transition-property: background-color, box-shadow; + transition-duration: 0.5s; +} + +.memberdecls td.glow, .fieldtable tr.glow { + background-color: cyan; + box-shadow: 0 0 15px cyan; +} + +.mdescLeft, .mdescRight, +.memItemLeft, .memItemRight, +.memTemplItemLeft, .memTemplItemRight, .memTemplParams { + background-color: #F9FAFC; + border: none; + margin: 4px; + padding: 1px 0 0 8px; +} + +.mdescLeft, .mdescRight { + padding: 0px 8px 4px 8px; + color: #555; +} + +.memSeparator { + border-bottom: 1px solid #DEE4F0; + line-height: 1px; + margin: 0px; + padding: 0px; +} + +.memItemLeft, .memTemplItemLeft { + white-space: nowrap; +} + +.memItemRight { + width: 100%; +} + +.memTemplParams { + color: #4665A2; + white-space: nowrap; + font-size: 80%; +} + +/* @end */ + +/* @group Member Details */ + +/* Styles for detailed member documentation */ + +.memtemplate { + font-size: 80%; + color: #4665A2; + font-weight: normal; + margin-left: 9px; +} + +.memnav { + background-color: #EBEFF6; + border: 1px solid #A3B4D7; + text-align: center; + margin: 2px; + margin-right: 15px; + padding: 2px; +} + +.mempage { + width: 100%; +} + +.memitem { + padding: 0; + margin-bottom: 10px; + margin-right: 5px; + -webkit-transition: box-shadow 0.5s linear; + -moz-transition: box-shadow 0.5s linear; + -ms-transition: box-shadow 0.5s linear; + -o-transition: box-shadow 0.5s linear; + transition: box-shadow 0.5s linear; + display: table !important; + width: 100%; +} + +.memitem.glow { + box-shadow: 0 0 15px cyan; +} + +.memname { + font-weight: bold; + margin-left: 6px; +} + +.memname td { + vertical-align: bottom; +} + +.memproto, dl.reflist dt { + border-top: 1px solid #A8B8D9; + border-left: 1px solid #A8B8D9; + border-right: 1px solid #A8B8D9; + padding: 6px 0px 6px 0px; + color: #253555; + font-weight: bold; + text-shadow: 0px 1px 1px rgba(255, 255, 255, 0.9); + background-image:url('nav_f.png'); + background-repeat:repeat-x; + background-color: #E2E8F2; + /* opera specific markup */ + box-shadow: 5px 5px 5px rgba(0, 0, 0, 0.15); + border-top-right-radius: 4px; + border-top-left-radius: 4px; + /* firefox specific markup */ + -moz-box-shadow: rgba(0, 0, 0, 0.15) 5px 5px 5px; + -moz-border-radius-topright: 4px; + -moz-border-radius-topleft: 4px; + /* webkit specific markup */ + -webkit-box-shadow: 5px 5px 5px rgba(0, 0, 0, 0.15); + -webkit-border-top-right-radius: 4px; + -webkit-border-top-left-radius: 4px; + +} + +.memdoc, dl.reflist dd { + border-bottom: 1px solid #A8B8D9; + border-left: 1px solid #A8B8D9; + border-right: 1px solid #A8B8D9; + padding: 6px 10px 2px 10px; + background-color: #FBFCFD; + border-top-width: 0; + background-image:url('nav_g.png'); + background-repeat:repeat-x; + background-color: #FFFFFF; + /* opera specific markup */ + border-bottom-left-radius: 4px; + border-bottom-right-radius: 4px; + box-shadow: 5px 5px 5px rgba(0, 0, 0, 0.15); + /* firefox specific markup */ + -moz-border-radius-bottomleft: 4px; + -moz-border-radius-bottomright: 4px; + -moz-box-shadow: rgba(0, 0, 0, 0.15) 5px 5px 5px; + /* webkit specific markup */ + -webkit-border-bottom-left-radius: 4px; + -webkit-border-bottom-right-radius: 4px; + -webkit-box-shadow: 5px 5px 5px rgba(0, 0, 0, 0.15); +} + +dl.reflist dt { + padding: 5px; +} + +dl.reflist dd { + margin: 0px 0px 10px 0px; + padding: 5px; +} + +.paramkey { + text-align: right; +} + +.paramtype { + white-space: nowrap; +} + +.paramname { + color: #602020; + white-space: nowrap; +} +.paramname em { + font-style: normal; +} +.paramname code { + line-height: 14px; +} + +.params, .retval, .exception, .tparams { + margin-left: 0px; + padding-left: 0px; +} + +.params .paramname, .retval .paramname { + font-weight: bold; + vertical-align: top; +} + +.params .paramtype { + font-style: italic; + vertical-align: top; +} + +.params .paramdir { + font-family: "courier new",courier,monospace; + vertical-align: top; +} + +table.mlabels { + border-spacing: 0px; +} + +td.mlabels-left { + width: 100%; + padding: 0px; +} + +td.mlabels-right { + vertical-align: bottom; + padding: 0px; + white-space: nowrap; +} + +span.mlabels { + margin-left: 8px; +} + +span.mlabel { + background-color: #728DC1; + border-top:1px solid #5373B4; + border-left:1px solid #5373B4; + border-right:1px solid #C4CFE5; + border-bottom:1px solid #C4CFE5; + text-shadow: none; + color: white; + margin-right: 4px; + padding: 2px 3px; + border-radius: 3px; + font-size: 7pt; + white-space: nowrap; + vertical-align: middle; +} + + + +/* @end */ + +/* these are for tree view when not used as main index */ + +div.directory { + margin: 10px 0px; + border-top: 1px solid #A8B8D9; + border-bottom: 1px solid #A8B8D9; + width: 100%; +} + +.directory table { + border-collapse:collapse; +} + +.directory td { + margin: 0px; + padding: 0px; + vertical-align: top; +} + +.directory td.entry { + white-space: nowrap; + padding-right: 6px; + padding-top: 3px; +} + +.directory td.entry a { + outline:none; +} + +.directory td.entry a img { + border: none; +} + +.directory td.desc { + width: 100%; + padding-left: 6px; + padding-right: 6px; + padding-top: 3px; + border-left: 1px solid rgba(0,0,0,0.05); +} + +.directory tr.even { + padding-left: 6px; + background-color: #F7F8FB; +} + +.directory img { + vertical-align: -30%; +} + +.directory .levels { + white-space: nowrap; + width: 100%; + text-align: right; + font-size: 9pt; +} + +.directory .levels span { + cursor: pointer; + padding-left: 2px; + padding-right: 2px; + color: #3D578C; +} + +div.dynheader { + margin-top: 8px; + -webkit-touch-callout: none; + -webkit-user-select: none; + -khtml-user-select: none; + -moz-user-select: none; + -ms-user-select: none; + user-select: none; +} + +address { + font-style: normal; + color: #2A3D61; +} + +table.doxtable { + border-collapse:collapse; + margin-top: 4px; + margin-bottom: 4px; +} + +table.doxtable td, table.doxtable th { + border: 1px solid #2D4068; + padding: 3px 7px 2px; +} + +table.doxtable th { + background-color: #374F7F; + color: #FFFFFF; + font-size: 110%; + padding-bottom: 4px; + padding-top: 5px; +} + +table.fieldtable { + /*width: 100%;*/ + margin-bottom: 10px; + border: 1px solid #A8B8D9; + border-spacing: 0px; + -moz-border-radius: 4px; + -webkit-border-radius: 4px; + border-radius: 4px; + -moz-box-shadow: rgba(0, 0, 0, 0.15) 2px 2px 2px; + -webkit-box-shadow: 2px 2px 2px rgba(0, 0, 0, 0.15); + box-shadow: 2px 2px 2px rgba(0, 0, 0, 0.15); +} + +.fieldtable td, .fieldtable th { + padding: 3px 7px 2px; +} + +.fieldtable td.fieldtype, .fieldtable td.fieldname { + white-space: nowrap; + border-right: 1px solid #A8B8D9; + border-bottom: 1px solid #A8B8D9; + vertical-align: top; +} + +.fieldtable td.fieldname { + padding-top: 3px; +} + +.fieldtable td.fielddoc { + border-bottom: 1px solid #A8B8D9; + /*width: 100%;*/ +} + +.fieldtable td.fielddoc p:first-child { + margin-top: 0px; +} + +.fieldtable td.fielddoc p:last-child { + margin-bottom: 2px; +} + +.fieldtable tr:last-child td { + border-bottom: none; +} + +.fieldtable th { + background-image:url('nav_f.png'); + background-repeat:repeat-x; + background-color: #E2E8F2; + font-size: 90%; + color: #253555; + padding-bottom: 4px; + padding-top: 5px; + text-align:left; + -moz-border-radius-topleft: 4px; + -moz-border-radius-topright: 4px; + -webkit-border-top-left-radius: 4px; + -webkit-border-top-right-radius: 4px; + border-top-left-radius: 4px; + border-top-right-radius: 4px; + border-bottom: 1px solid #A8B8D9; +} + + +.tabsearch { + top: 0px; + left: 10px; + height: 36px; + background-image: url('tab_b.png'); + z-index: 101; + overflow: hidden; + font-size: 13px; +} + +.navpath ul +{ + font-size: 11px; + background-image:url('tab_b.png'); + background-repeat:repeat-x; + background-position: 0 -5px; + height:30px; + line-height:30px; + color:#8AA0CC; + border:solid 1px #C2CDE4; + overflow:hidden; + margin:0px; + padding:0px; +} + +.navpath li +{ + list-style-type:none; + float:left; + padding-left:10px; + padding-right:15px; + background-image:url('bc_s.png'); + background-repeat:no-repeat; + background-position:right; + color:#364D7C; +} + +.navpath li.navelem a +{ + height:32px; + display:block; + text-decoration: none; + outline: none; + color: #283A5D; + font-family: 'Lucida Grande',Geneva,Helvetica,Arial,sans-serif; + text-shadow: 0px 1px 1px rgba(255, 255, 255, 0.9); + text-decoration: none; +} + +.navpath li.navelem a:hover +{ + color:#6884BD; +} + +.navpath li.footer +{ + list-style-type:none; + float:right; + padding-left:10px; + padding-right:15px; + background-image:none; + background-repeat:no-repeat; + background-position:right; + color:#364D7C; + font-size: 8pt; +} + + +div.summary +{ + float: right; + font-size: 8pt; + padding-right: 5px; + width: 50%; + text-align: right; +} + +div.summary a +{ + white-space: nowrap; +} + +div.ingroups +{ + font-size: 8pt; + width: 50%; + text-align: left; +} + +div.ingroups a +{ + white-space: nowrap; +} + +div.header +{ + background-image:url('nav_h.png'); + background-repeat:repeat-x; + background-color: #F9FAFC; + margin: 0px; + border-bottom: 1px solid #C4CFE5; +} + +div.headertitle +{ + padding: 5px 5px 5px 10px; +} + +dl +{ + padding: 0 0 0 10px; +} + +/* dl.note, dl.warning, dl.attention, dl.pre, dl.post, dl.invariant, dl.deprecated, dl.todo, dl.test, dl.bug */ +dl.section +{ + margin-left: 0px; + padding-left: 0px; +} + +dl.note +{ + margin-left:-7px; + padding-left: 3px; + border-left:4px solid; + border-color: #D0C000; +} + +dl.warning, dl.attention +{ + margin-left:-7px; + padding-left: 3px; + border-left:4px solid; + border-color: #FF0000; +} + +dl.pre, dl.post, dl.invariant +{ + margin-left:-7px; + padding-left: 3px; + border-left:4px solid; + border-color: #00D000; +} + +dl.deprecated +{ + margin-left:-7px; + padding-left: 3px; + border-left:4px solid; + border-color: #505050; +} + +dl.todo +{ + margin-left:-7px; + padding-left: 3px; + border-left:4px solid; + border-color: #00C0E0; +} + +dl.test +{ + margin-left:-7px; + padding-left: 3px; + border-left:4px solid; + border-color: #3030E0; +} + +dl.bug +{ + margin-left:-7px; + padding-left: 3px; + border-left:4px solid; + border-color: #C08050; +} + +dl.section dd { + margin-bottom: 6px; +} + + +#projectlogo +{ + text-align: center; + vertical-align: bottom; + border-collapse: separate; +} + +#projectlogo img +{ + border: 0px none; +} + +#projectname +{ + font: 300% Tahoma, Arial,sans-serif; + margin: 0px; + padding: 2px 0px; +} + +#projectbrief +{ + font: 120% Tahoma, Arial,sans-serif; + margin: 0px; + padding: 0px; +} + +#projectnumber +{ + font: 50% Tahoma, Arial,sans-serif; + margin: 0px; + padding: 0px; +} + +#titlearea +{ + padding: 0px; + margin: 0px; + width: 100%; + border-bottom: 1px solid #5373B4; +} + +.image +{ + text-align: center; +} + +.dotgraph +{ + text-align: center; +} + +.mscgraph +{ + text-align: center; +} + +.diagraph +{ + text-align: center; +} + +.caption +{ + font-weight: bold; +} + +div.zoom +{ + border: 1px solid #90A5CE; +} + +dl.citelist { + margin-bottom:50px; +} + +dl.citelist dt { + color:#334975; + float:left; + font-weight:bold; + margin-right:10px; + padding:5px; +} + +dl.citelist dd { + margin:2px 0; + padding:5px 0; +} + +div.toc { + padding: 14px 25px; + background-color: #F4F6FA; + border: 1px solid #D8DFEE; + border-radius: 7px 7px 7px 7px; + float: right; + height: auto; + margin: 0 20px 10px 10px; + width: 200px; +} + +div.toc li { + background: url("bdwn.png") no-repeat scroll 0 5px transparent; + font: 10px/1.2 Verdana,DejaVu Sans,Geneva,sans-serif; + margin-top: 5px; + padding-left: 10px; + padding-top: 2px; +} + +div.toc h3 { + font: bold 12px/1.2 Arial,FreeSans,sans-serif; + color: #4665A2; + border-bottom: 0 none; + margin: 0; +} + +div.toc ul { + list-style: none outside none; + border: medium none; + padding: 0px; +} + +div.toc li.level1 { + margin-left: 0px; +} + +div.toc li.level2 { + margin-left: 15px; +} + +div.toc li.level3 { + margin-left: 30px; +} + +div.toc li.level4 { + margin-left: 45px; +} + +.inherit_header { + font-weight: bold; + color: gray; + cursor: pointer; + -webkit-touch-callout: none; + -webkit-user-select: none; + -khtml-user-select: none; + -moz-user-select: none; + -ms-user-select: none; + user-select: none; +} + +.inherit_header td { + padding: 6px 0px 2px 5px; +} + +.inherit { + display: none; +} + +tr.heading h2 { + margin-top: 12px; + margin-bottom: 4px; +} + +/* tooltip related style info */ + +.ttc { + position: absolute; + display: none; +} + +#powerTip { + cursor: default; + white-space: nowrap; + background-color: white; + border: 1px solid gray; + border-radius: 4px 4px 4px 4px; + box-shadow: 1px 1px 7px gray; + display: none; + font-size: smaller; + max-width: 80%; + opacity: 0.9; + padding: 1ex 1em 1em; + position: absolute; + z-index: 2147483647; +} + +#powerTip div.ttdoc { + color: grey; + font-style: italic; +} + +#powerTip div.ttname a { + font-weight: bold; +} + +#powerTip div.ttname { + font-weight: bold; +} + +#powerTip div.ttdeci { + color: #006318; +} + +#powerTip div { + margin: 0px; + padding: 0px; + font: 12px/16px Roboto,sans-serif; +} + +#powerTip:before, #powerTip:after { + content: ""; + position: absolute; + margin: 0px; +} + +#powerTip.n:after, #powerTip.n:before, +#powerTip.s:after, #powerTip.s:before, +#powerTip.w:after, #powerTip.w:before, +#powerTip.e:after, #powerTip.e:before, +#powerTip.ne:after, #powerTip.ne:before, +#powerTip.se:after, #powerTip.se:before, +#powerTip.nw:after, #powerTip.nw:before, +#powerTip.sw:after, #powerTip.sw:before { + border: solid transparent; + content: " "; + height: 0; + width: 0; + position: absolute; +} + +#powerTip.n:after, #powerTip.s:after, +#powerTip.w:after, #powerTip.e:after, +#powerTip.nw:after, #powerTip.ne:after, +#powerTip.sw:after, #powerTip.se:after { + border-color: rgba(255, 255, 255, 0); +} + +#powerTip.n:before, #powerTip.s:before, +#powerTip.w:before, #powerTip.e:before, +#powerTip.nw:before, #powerTip.ne:before, +#powerTip.sw:before, #powerTip.se:before { + border-color: rgba(128, 128, 128, 0); +} + +#powerTip.n:after, #powerTip.n:before, +#powerTip.ne:after, #powerTip.ne:before, +#powerTip.nw:after, #powerTip.nw:before { + top: 100%; +} + +#powerTip.n:after, #powerTip.ne:after, #powerTip.nw:after { + border-top-color: #ffffff; + border-width: 10px; + margin: 0px -10px; +} +#powerTip.n:before { + border-top-color: #808080; + border-width: 11px; + margin: 0px -11px; +} +#powerTip.n:after, #powerTip.n:before { + left: 50%; +} + +#powerTip.nw:after, #powerTip.nw:before { + right: 14px; +} + +#powerTip.ne:after, #powerTip.ne:before { + left: 14px; +} + +#powerTip.s:after, #powerTip.s:before, +#powerTip.se:after, #powerTip.se:before, +#powerTip.sw:after, #powerTip.sw:before { + bottom: 100%; +} + +#powerTip.s:after, #powerTip.se:after, #powerTip.sw:after { + border-bottom-color: #ffffff; + border-width: 10px; + margin: 0px -10px; +} + +#powerTip.s:before, #powerTip.se:before, #powerTip.sw:before { + border-bottom-color: #808080; + border-width: 11px; + margin: 0px -11px; +} + +#powerTip.s:after, #powerTip.s:before { + left: 50%; +} + +#powerTip.sw:after, #powerTip.sw:before { + right: 14px; +} + +#powerTip.se:after, #powerTip.se:before { + left: 14px; +} + +#powerTip.e:after, #powerTip.e:before { + left: 100%; +} +#powerTip.e:after { + border-left-color: #ffffff; + border-width: 10px; + top: 50%; + margin-top: -10px; +} +#powerTip.e:before { + border-left-color: #808080; + border-width: 11px; + top: 50%; + margin-top: -11px; +} + +#powerTip.w:after, #powerTip.w:before { + right: 100%; +} +#powerTip.w:after { + border-right-color: #ffffff; + border-width: 10px; + top: 50%; + margin-top: -10px; +} +#powerTip.w:before { + border-right-color: #808080; + border-width: 11px; + top: 50%; + margin-top: -11px; +} + +@media print +{ + #top { display: none; } + #side-nav { display: none; } + #nav-path { display: none; } + body { overflow:visible; } + h1, h2, h3, h4, h5, h6 { page-break-after: avoid; } + .summary { display: none; } + .memitem { page-break-inside: avoid; } + #doc-content + { + margin-left:0 !important; + height:auto !important; + width:auto !important; + overflow:inherit; + display:inline; + } +} + diff --git a/docs/doc_header.html b/docs/doc_header.html new file mode 100644 index 0000000..aad2db5 --- /dev/null +++ b/docs/doc_header.html @@ -0,0 +1,47 @@ + + + + + + + +$projectname: $title +$title + + + +$treeview +$search +$mathjax + +$extrastylesheet + + +
+ + +
+ + + + + + + + + + + + + + + + + + +
+
$projectbrief
+
$searchbox
+
+ + diff --git a/docs/intro.md b/docs/intro.md new file mode 100644 index 0000000..c21df21 --- /dev/null +++ b/docs/intro.md @@ -0,0 +1,143 @@ +Nunc Stans +========== +Nunc Stans is an event framework wrapper that provides a thread pool for event +callback execution. It provides thread safety to event frameworks by isolating +and protecting the thread safe parts from the non-thread safe parts, and allows +multi-threaded applications to use event frameworks that are not thread safe. +It has been primarily developed using [libevent](http://libevent.org "libevent +home page") , but has also been tested with [tevent](https://tevent.samba.org +"tevent home page"). Nunc Stans uses lock free data structures where possible, +to avoid mutex contention. The ​[liblfds](http://liblfds.org "Lock Free Data Structure") +library is used. + +There are two main components: the *event loop thread and queue*, and the +*worker threads and queues*. The basic concept is the +[Thread Pool Pattern](https://en.wikipedia.org/wiki/Thread_pool_pattern "Thread +Pool Pattern description"), where the primary source of tasks (*job* in nunc +stans) for the task queue (the *work queue* in nunc stans) is provided by the +event framework for I/O, timer, and signal events. + +License +------- +Nunc Stans is licensed under the GNU General Public License version 3 or later. +Nunc Stans also provides an exception for the use of OpenSSL. See the files +'COPYING', 'COPYING.openssl', and 'COPYING.liblfds' for more information. + +Event Loop Thread And Queue +--------------------------- + +The event queue is essentially the "main loop" of the application. It runs in +its own thread. The event queue thread is the only thread that interfaces with +the event framework - adding events, removing events, and +issuing the callbacks when the event is triggered. This guarantees that all +interactions with the event framework are performed in a thread safe manner. +When a threaded application wants to be called back because of some event (I/O, +timer, signal), it posts the event and callback data to the event queue. All +interaction with the event queue is thread safe - multiple threads can post +requests to the event queue at the same time. The event loop thread dequeues +all of the event requests from the event queue, creates/removes +events, then calls the event waiting function of the event framework. This +assumes the underlying event framework has a function that allows waiting for a +single event - something like `event_base_loop()` in libevent, or +`tevent_loop_wait()` in tevent. + +When the application wants events to be triggered as soon as possible, but the +event framework is waiting for very long lived events, the event queue has a +persistent I/O listener called the *event_q_wakeup_pipe*. When the application +adds an event, nunc-stans will write to the pipe, which will cause the event +framework to immediately wake up and add the pending events, then do a thread +yield to allow the event framework thread to execute. + +When an event is triggered by I/O, timer, or signal, the event callback is +called. The callback can either be run in the event loop thread, or can be +handed off to the *work queue* for execution in a *worker thread*. The +application uses the flag *NS_JOB_THREAD* to specify that a job will be +executed in a worker thread. + +**NOTE:** Jobs executed in the event loop thread don't need locking if they +don't use resources shared with other threads. This corresponds to a single +threaded app where all jobs are run inside the main loop and no locking is +required. However, just as in that case, jobs run in the event loop thread +must be very careful to execute very quickly and not block on I/O or other +resources. This can lead to event starvation. + +Worker Threads and Queues +------------------------- + +When a job is placed on the *work queue*, it will be executed in a *worker +thread*. The number of worker threads is specified when nunc stans is +initialized. Each worker thread sleeps on a condition variable +(e.g. `pthread_cond_wait()`). When a job is placed on the work queue, nunc +stans will notify the condition variable, waking up one of the worker threads. +This worker thread will dequeue the job from the work queue and execute it. +The work queue is thread safe - the event loop thread can enqueue jobs at the +same time as the worker threads dequeue jobs. Note that the worker threads +only execute jobs which have the *NS_JOB_THREAD* flag. Jobs without this flag +will be executed in the event loop thread. + +Diagram +------- +![Nunc Stans Diagram](nunc-stans-intro.png "Nunc Stans Diagram") + +Diagram Explanation +------------------- + +The solid thick lines represent the flow of data, typically an `ns_job_t` +object. The small dotted lines represent the flow of the program, or the flow +of control. In the case of the signal and notification events, these represent +the program sending a signal or notification, but not yielding control. The +thick dashed lines represent the flow of data and program i.e. a function that +takes an `ns_job_t` object and is the primary program path. The *event queue* +and the *work queue* are thread safe FIFO/queue objects. The bottom of the +stack of ellipses is the tail and the top is the head, labeled "head". The +shaded box labeled "event framework" is the event framework (e.g. libevent). +The boxes that are partially in and partially outside of the event framework +are functions that take nunc stans objects and convert them into the format +used by the event framework. Note that the "add/remove event in +framework" function will pass ownership of the job into the event framework, so +that the event framework will opaquely own that data in the case of add events. +The shaded box labeled "event loop callback" is called by the +event framework for each triggered event. The event loop callback will either +execute the job immediately (for non-threaded jobs) or queue the job on the +work queue for execution by a worker thread (for threaded jobs - the +`NS_JOB_THREADED` job flag). + +The event loop thread and the worker threads are represented by large boxes. +Everything in the box happens inside that thread. The boxes that are partly +inside and partly outside represent functions (e.g. the functions to +add/delete an event job) and data structures (the event queue, the +wakeup fd) that are thread safe or are otherwise protected and can be accessed +both from within and outside of the thread. Although the diagram shows only 1 +worker thread, there will usually be more than one, and they all share the same +work queue, which is thread safe. + +The usual starting point is the application represented by the **APP** icon on +the left side. The application will typically create a new event job (e.g. a +network socket listener). The job will be handed off to the event queue for +processing by the event loop thread. If this is not happening inside the event +loop thread, the event framework will be notified. This is necessary because +the event framework could be waiting for a very long time if there are no I/O +or signals happening, or if the timer jobs are very long lived. This will +wakeup the event framework immediately so that it will loop back around to +process the events in the event queue. The event loop will dequeue all of the +jobs from the event queue and perform the appropriate add/remove job in +the event framework. This ensures that only the single event loop thread, not +multiple threads, will interact with the event framework. Then the event +framework will wait for events. Once an event is triggered, the event +framework will iterate through all of the triggered events and call the event +loop callback for each one. This callback will either execute the job +immediately or add the job to the work queue for a worker thread. This will +also signal the worker threads (e.g. something like `pthread_cond_wait`) to +notify them that there is a new job for processing. Once all of the events are +processed, the event loop goes back to the top to see if there are more events +to process. The worker thread signal will typically wake up 1 of the worker +threads, which will dequeue the job and execute it. + +Note that the job callback is called both with the data (the `ns_job_t` object) +and the program flow. This callback is entry point into the application. It +is the responsibility of the callback to manage the `ns_job_t` object, either +by calling `ns_job_done` to dispose of it safely, or by calling `ns_job_rearm` to +"re-arm" the event. If the +job is not a threaded job, it is executed in the event loop thread, and can +block all other events from being processed, so great care must be taken not to +perform any long running task or otherwise block the thread. diff --git a/docs/job-safety.md b/docs/job-safety.md new file mode 100644 index 0000000..b122afa --- /dev/null +++ b/docs/job-safety.md @@ -0,0 +1,90 @@ +Nunc Stans Job Safety +===================== + +Nunc Stans 0.2.0 comes with many improvements for job safety. Most consumers of +this framework will not notice the difference if they are using it "correctly", +but in other cases, you may find you have error conditions. + +Jobs now flow through a set of states in their lifetime. + +States +------ + +* WAITING: This represents a job that is idle, and not owned by a worker or event thread. Any thread can alter this job. +* NEEDS_DELETE: This represents a job that is marked for deletion. It cannot be accessed again! +* DELETED: This represents a job that is deleted. In theory, you can never access a job in this state. +* NEEDS_ARM: This is a job that is about to be placed into the event or work queue for arming, but has not yet been queued. +* ARMED: This is a job that is currently in the event queue or work queue waiting to be executed. +* RUNNING: This is a job that is in the process of executing it's callback right now. + +Diagram +------- + +![Nunc Stans Job States](nunc-stans-job-states.png "Nunc Stans Job States") + +WAITING +------- + +All jobs start in the WAITING state. At this point, the job can have two transitions. It is sent to ns_job_done, and marked as NEEDS_DELETE, or it can be sent to ns_job_rearm, and marked as NEEDS_ARM. A job that is WAITING can be safely modify with ns_job_set_* and accessed with ns_job_get_* from any thread. + +NEEDS_ARM +--------- + +Once a job is in the NEEDS_ARM state, it can not be altered by ns_job_set_*. It can be read from with ns_job_get_*. It can be sent to ns_job_done (which moves to NEEDS_DELETE), but generally this is only from within the job callback, with code like the following. + + callback(ns_job_t *job) { + ns_job_rearm(job); + ns_job_done(job); + } + + +NEEDS_ARM in most cases will quickly move to the next state, ARMED + +ARMED +----- + +In the ARMED state, this means that the job has been sucessfully queued into the event *or* work queue. In the ARMED state, the job can be read from with ns_job_get_*, but it cannot be altered with ns_job_set_*. If a job could be altered while queued, this could cause issues with the intent of what the job should do (set_data, set_cb, set_done_cb) etc. + +A job that is ARMED and queued can NOT be removed from the queue, or stopped from running. This is a point of no return! + +RUNNING +------- + +In the RUNNING state, the job is in the process of executing the callback that the job contains. While RUNNING, the thread that is executing the callback may call ns_job_done, ns_job_rearm, ns_job_get_* and ns_job_set_* upon the job. Note, that calling both ns_job_done and ns_job_rearm from the callback, as the 'done' is a 'stronger' action we will delete the job even though rearm was also called. + +While RUNNING other threads (ie, not the worker thread executing the callback) may only call ns_job_get_* upon the job. Due to the design of the synchronisation underneath, this will block until the execution of the callback, so for all intents and purposes by the time the external thread is able to call ns_job_get_*, the job will have moved to NEEDS_DELETE, NEEDS_ARM or WAITING. + +NEEDS_DELETE +------------ + +When you call ns_job_done, this marks the job as NEEDS_DELETE. The deletion actually occurs at "some later point". When a job is set to NEEDS_DELETE, you may *not* call any of the ns_job_get_* and ns_job_set_* functions on the job. + +DELETED +------- + +This state only exists on the job briefly. This means we are in the process of deleting the job internally. We execute the ns_job_done_cb at this point, so that the user may clean up and free any data as required. Only the ns_job_done_cb thread may access the job at this point. + + +Putting it all together +----------------------- + +This state machine encourages certain types of work flows with jobs. This is because the current states are opaque to the caller, and are enforced inside of nunc-stans. The most obviously side effect of a state machine violation is a ASSERT failure with -DDEBUG, or PR_FAILURE from get()/set(). This encourages certain practices: + +* Only single threads should be accessing jobs. This prevents races and sync issues. +* Data and variables should exist in a single job. Avoid shared (heap) memory locations! +* Changing jobs should only happen from within the callback, as you can guarantee a consistent state without needing to spin/block on ns_job_set_*. +* You may not need mutexes on your data or thread locals, as the job provides the correct cpu synchronisation guarantees. Consider that each job takes a "root" data node, then all other allocated variables are referenced there only by the single thread. You can now dispose of mutexes, as the job will guarantee the synchronisation of this data. +* Jobs work well if stack variables are used inside the callback functions, rather than heap. + +Some work flows that don't work well here: + +* Having threads alter in-flight jobs. This causes race conditions and inconsistencies. +* Sharing heap data via pointers in jobs. This means you need a mutex on the data, which causes a serialisation point: Why bother with thread pools if you are just going to serialise on some data points anyway! +* Modifying jobs and what they handle. Don't do it! Just ns_job_done on the job, and create a new one that matches what you want to do. +* Map reduce: Nunc-Stans doesn't provide a good way to aggregate data on the return, IE reduce. You may need to provide a queue or some other method to reduce if you were interested in this. + +Examples +-------- + +Inside of the nunc-stans project, the tests/cmocka/stress_test.c code is a good example of a socket server and socket client using nunc-stans that adheres to these principles. + diff --git a/docs/logo-banner.png b/docs/logo-banner.png new file mode 100644 index 0000000000000000000000000000000000000000..96ea1761f4c247a8756dd15676c763c8bc3f56dd GIT binary patch literal 6193 zcmZu#WmuHY+g%Xp?rsF>?)uRoDN>TsEe$S>!~!DST_WA6tb_#2W&^A^G=(0?Emv1P?KM)U}i`meJ@4P#E>>p|0Q&os)sFkD`a0 zo1ME4_!9zA^s=+|v9o3Lck*#$R8`l~wdJ+L01Kt7EB$8>Fn?eUH82>UdN_!XMtdCS zOqdn}D`7+@3R=MtKw+5qIWrY!I^R#O*fQ#rdzP&9ciXAvqh5;ybxn?q%?$NaX{!V_ zdY~i14OgWU&RHG77!TiQtgjF4KAc;GQurM$DNkJI3t?Pz_;+SY`5$_8W?$Q@C@7(! zqoYSV5b7JE;o{;ZK&U9eKoR5z1J%|F474BJU|=SNgF(dj2n-f^@DX&CFz|j{MZCly z1(?z|sJDUw>R7%MI|q|&{)O?|S2_4Ew*`eK=jLF|4&!UPi0Y(4+se8+6-`YXQ&ZC- z1J2dWO>{9av8$UKW^r-Ko}Qj8=|DmIk*x5)e_xA>i#I6PZWr=hWqiwKk%wH}?9~%d zQAu!M=`ttF+cDGhtG!lRUGV7Y150|IZR@qPwP8^iH2DMHN6&1akJaF(D)GL;6LdcMt<S&U!Xg>m+}zZ(wN*Sl8_mzao2>8?f2!zmrq4Dz5HvM6o9qb8 z;+E^f`r;|pr^+>d5AAHnUY~B+%r)7?CM0}+i$%ikwV{T~AWO@`!{g}e{Ov3Bk}-7< zymWPS!)|Wg+`hzf&AXm!g`ji~3}ASDrmd^18$q>gz%ns03E0Z_7e7f$BfYyG4{dY4 z)2>F8w6u^G247Gm(n^%DD+(;j(Kv*Bu}Y>%h>3}rJnHXP3A#HTs5zr_RWLWFYja;5 zN`BZB>-x{aq5zRfq5qN8dvl~Gkxq&+bNKjpfWf9efvRMyqrWJ*R>0{sRe=J|-;QM@ z@5$-u#&kY*c8u)oY?UT(zog{kareB& ze33oBF}|TdO8=e=^G(xuz9rsUQ;f`0j>7imMg|723s_iKdbbNoN|-Bhz+wy2Z}m-2Hdozbk3qJX{UPIr2Y(d`LVu%A`W)<8Fd{&?!tL zVb^997AB>Br^091;z%@BAmc)T1CYze&w+{X3M_m>r{!Z16-|xAudRxh$97>B=D_s& z{M&WO*~Nv3kkE=%$Omt}GgxZr%^pvwR}EqqwHI9RBxtPE2q zmUnUCip8|Bu=sm&qM)XRxjC8}1E9(xDfvcz=)ZFP)zfjgfsfBW{9S&yrvN~F&mb2a z6N3>H985$+6kb-wT4UUZT2N3R`hh)j*z@kn)2uUy%FfPCmxI*N$thf=G`zW4$jsav z+&iA7rDXz*7(OUPPz6e6X0%(-)_GK192^$rl5{R3ue$W~^tnD%GoUL9691eHf8q3eK z$SSX_sw%M?&PX=kBr}V-4Kmck2Gti68>^wAA^z)U?MnwSR8j@2=tV18V}+vNKeb=K z=AY8WQ}9*bQ1B*2Mn+m}Zf>doTpH-=%D1+*mKkuW7wb*{{5VcmXu%oeAR0P46lR`X zS8F)bqOO$nOGQPDZ}w{2x7YgOeJ&5pcMlIMKn-znag_o>&^Itho5Ma^XLtzc#%!po zbJr!zFDxvaycNa`LgUz2Z$6VEBK}!tuDQ6lSVFwAKJb1g-N=F{tq`E%kAfhqG8<=ue`d?JP1{G)+afz9Sfws7Ey zE%FE)!X)zc@^-|?#DwY{l~8o)a9>{(DRvYhy{)Y+BPb{+U^uWh8gFKDa`Gr^4|?=f zl^FKcXn?&ih*@$FEjwXvo{Nr&AsDA$AE$rnoPs$ZvaW7BB(x-Wgr&+1=FCQP$a$46X@)#Hx$kwh0)6>&H-~HvT z2UWWT5qhEj*tkVks#hmP7;o_2hg?_OeB-WM$P@b8alURA{przB(aNN=&AWCbo z!)2at#+g19&)2Eeb$?#J10dtGGsX7B|A@Y|r6ozDyaY6a$%BRVogc|8X*oI0KTp~( zkh!;pp(m+@lt_Gh{EGd3*DpcWf{0(gp8NZYArOeko#_uwh^iNGNmgPkGIH{Z+z?e& z43BlpFk%vto$l~Q28MR{P<|#ypN8@{RT@?oo7U#+w&W}Y$Q(xzT3Ocm z08Alx+=GIoRmaCeE`+Lm($dq9P2IsglBYJ}Q@`uqznboe#874{UhMEct!Q?fED7Op zd^tEiZ%wB8qqggohsq-{F0KUp_oWPZ(AF4Tj$EiL-;NnPK(EF9lM-{W|8j8q@|%Lp zOjT`dZ8$vv(t5cwI4@vqY|JnalOtV2GUQGiYzd|rrlhpc=C*FO-0ra)MpR^0UD07G zvdAVfsIKKz8-3^ueP9tCH77)l@g=NZDS6AOAwEOJnYs0}lE(bsNWPPa-77=0M=O?72 zqkCf#5Fp-py-CxIU!(BbigPqzPLpdG>Ve8SbJ~7Mpms?B5rL?h6B92U70|E}%ahzl zcR^L;-0_0O!SzNCXY%2AnZRH$ft1JS=n6~kZ{Oo(P6Uv?UUzvgZGxU-s01X{kh0r7SP&pD5ySuhcVjk{r$|6lGFgYB^^WCLpxz@ zAGF(rMb3}bTB?wOt@0iWiduTcV>>hAhfO-rkF zknc<<^d|L%CoL^)Qcg~s+d>=Fpq-#e*ZunnN-4h@^C3-s4AYh;+Al@F|4|JJf)@p_s3vawn4h2LP8@HK$A*k#3pxC#QJMb=6$95`Ll!O~D#|P; z$B--Lp#pU79TIuGTV0Rc^i2zv6r03Lmk7>0ARusOkei#iLZ`a^PHRSp znj|v=S!}E;-AU(Lb8S^;-qFuRAYwu!G@G299DZ`*Rb$!JIo`UoIWa#U2bh~hRFs^{ zutwg_uDnv|lWV`(GcVt$_Q*)RfPgM+f+Hhq*rg-)F+EZVrsGk3;>tW(h(Iuq^O&-7 zSOA23tRpDErX&PAwS)>dP*A%ov~tCp!W&Q#jFqq=rd6Cx1hFUAmb@FIRa{OEwZ-+- z@w&haAthxjf!rOTudnZHy9WaReC@oKRty|2t)#3><(uusrZBA~;yyU+scGTP3bk0f zQi9djf4N%ed6esullXigCG$Qyzn{)Pummk4OUlp=a#2yD)G5pAIy=&4>5oS(nuNf5Kkpv1uv z6Y=vTywQ#7EtEkM1U-2Y9Bo^hsC3BP>?;{CEx8Bzun_|pc2tpyMK!_;zcNLQb;7yT ze*H`=laPWU=Ff1Z&FZ(P*u=!>($e)*mhRub&F;=V$cdzUMP4k-!ULN}89sl`wh{o{ zuuQtSJA>Pm;ThWY;o;aVQ%h@W&*CDT4vR1~Ug!Od3lO=?e0&7T%F6b`!w3XfCa*cM zPZEWt`HW$DX;zvad-v|Hv+EL^ie0Bb2@35Py~)eVn^;`r`6OF~aPDD0!1}lhc!xY+ zUgFhvOm5=_2o@HWaf1~~#k1^Jh>gNgv{?iq3doVtY#N}_e*Ez&{m)GROBa_o4cjN2 z$`x&Ud;3SnX@W{=;{q>lBp%?29w3N3SGrM_ni}kXmE12qmpFe`NR?7FyV35pAPOK| zTwSd>f(LoiCRFsZ1h^0K8GY3*<4 zCrw;&ZB1YGoP9#2LmztY!EX&n=IZWrJdh?T6e#494`Kq_fzNP>-{0ZWOFLe4WQ%(; zi@N`g1txF}$hA*E1mxuAP9hNHg#iM%I*+0W7!u7p{8oXm4mceZt37$Vzr8JQXh=@Z zXW`mn)ys<}{^rc_ZUsY*KV^ru6tBbiLU687NzA%|q)6n^h7Jh;eiQ?8Tb1@Y(=z0# zDfPSXb!9f$B8QQSQL*I>WSe%rWVsxF2+$5UTCy_I(kuJ>Bd}5f&cZ5xYFgR^z;dgH ziyfe0paXCJ4%owH-(|wB@xlSVkYk)%;-WDhv8ZBA+~xSDv?TOzf;3fIk^xZY>5z+y zOX1uh$#NXV^dDn|y|q(f)hntqzq`CVrH@j9fq{7@rvCo^q!bi3gDGr=J8#amC!gQC z_xAQ4&%4wEo+iMKLM<;ZzXAd)42$GR);sU3zt+w7O(jHD1_q?SPB^eiK`&~Sl>0O6 zG$0dJQ&n?J+2D5jhVlnuTSR=udUL~0}{N-gU* zeINo#Doeuq2{6`r78Z%2$UVy}a2rQoG4N>{_)v11Q6Ek+yCB=yb!}u9j8)N0OXgXU zzMSRNX)g^iqkPJHO84@(^z>+(y;~1A>i%Q)q(fx;EDx<`I}qx#As1Za_gp zEiPExQJ9vNCf$6r1j_bho9pV2(b_EHbV#}WQ;SffFN+C~e;0>~T+WY4BhhFIQo`3h zeG*?HV_;)ri*ShWmCd`qzt7^gMy&vT5y;^h0424wLEz_H#87A6sqbZc+xdx{$Ch<) zVCq!?N<`dmpR&O2uiLX%?vX^3GIrC`(-O@>|BlAZg3HD6@h)bKz-u_-4fUe|n&f5O zw)?PxIp1ljfFcL-D_{g8d^WeXN;^7e@bK^|Yiiy;d!qQ?f8l+aAGwvCovX~z_kqnD zOFll(5S;BexWwJ~S zgEidw1ij=9dAMC(H^Eqr#@ZZ1ZyPrKI5P4qQK%~EXN?@!g#Ih8Cu~PDii*aYhc^9G^NnJ2wPfdnGdb zE>ln4e}yuJm&=^P`O!HKk(6YW zVc;s;6-f{jQ>j>XN(j;2nQgpzEx%Aq@TW`^JNg&ydfEm3zU}+S(!#f$AiOy_JKI2{ zXJn`x92_7E*c}IS7jn8XS%cm7!vcT|$v0<}eIL*OtAkdyCKIFdBo*fW zB$f?CCkO2aGUwo(6QlVBH~z)UQCgp+TS=~|pJ#mmZ!0yC>A&^C$JuelhcXD!qyBx<1*<)5*LIVO;$}=s# z0c;S_(8Py@K>%I#&dpJQ=nX^{@_Kqi__X3-K!bUmo%?3M7C{^RmluLP#%hNtsOfT_ z(2SsAp1IOq192ADM)eJ>m7JT)6~M(>jK+Zu$eiPbN&ABJU+et-{rlf9$HsB9v&I`+ zTc_?(q6SzK`8tq(9M zOYYu`)ehFxU#266Of#&vaPy-bPaz~DQ0al|&_x7DmLPVg+>BzXWvV<8uM%h3m5%VW;lhY?&|J6+oYfd#<4Y zg7pNZ68UB`nML(q5)6`)UQNEQZ_70^u_e`&ZOUZdx_}i-T{t(Rw%o8;;%j~D8#)Oos9(^4#kCrH}kj kT-g7cV*lURoATyL4q2}FKzqIb`p-8%n@=v6ZMRm0mohlW7>I~X>ghzNKykHVm zNoyOij&?el#-0SA?2^j4^E?e(F)^DnZdS{ER`# zy?PU4RB(+&aiL5=DNE3#HxBw=!@aN(&DJ(FuU;Jtw}EFQEWM^B7-$PF_kSnUw$lH# zMPFY`l47MAS{-N%);6|;mJ?>Hnw0xLouI1gTGs>`Lg7aLjO4QbwvPrTu-diZP@6v#Zde&? zrTWb)R`@pmAfB8FYRI?aiPIGU&n#+6S82or*$rb5Q7cbR`4C?XdK# zuSw8;+{t3->i1=1`h&z5N`qBM!7SYpOqRQG!MFsKPErl}Hh1Ze~P1%7GiG@$BU)f-Ohm1G%a+loG-Qrt zM;VJ6QNzlSmf(V;S$9-p?x=|L>3t%?JzP7j^*Pw!iQW?@dL!z;-fMyDjk=WfNYAlj zJw3;Ia(no(o&=_MYe#tZk#4P9MDSxpy1S1gL?scYLHp2qhav?LaVUXn2gH8j4O*^(bewQf@#l2$pvdZ?mqhs0KifYF*H9%QzuX7^XRh2Q73#Mv#aS#bgd)dY3n15SQz4qBD{k z5uLz+^iBhas^^6hwXVEQtxID?iq27V320Dktkjv&sk8CY%?hLw zQ*Jf^(PUx@IniY5CZ0xd_32bHn#`Gt43; zOiU45nkGdOv&jO(G+?*eL1)oXzXH~F31MNj9E+-3_Kk%gL{c|nJP}uhz}~6v=cK>@GCOlFX3Za+*qGQ}pNJqs?WuAbV6El@LidHcr5-}tGZ^VqSxz2?xJ<{M#M8s^) zZT!Q3oyXNZ5dnrRxNX=&N^ILa0TOvx zd>p(A9TH`Pj-3%YXd)FlpwpoPrF7_kfqRmnV`qmB*zC};KNdQ$<>R44=6x)5?5xm< z*}1leWrt48W9zZ3(1{s9GD9bp5ju)ip%Y7lPAnNZ9d18uyW3D^q0qLQv&8QYgj+#j zh4_4S=D(+h;Tmmcrj(A-wbi&gIaph8U0ZEapgq_^bzrjgjcKa`Evwe9@tgGyv7jDl zPT1cX`fl*5rRJPDGq0Sx)Lh_nto{m;EvUaDp=u)6SE~=9%>0Tw1t|( z{-(AT)cbgvn999ANl=FO?X|6Kp%v}^rr?S;u*;g))vj({9z;I8E=YMN0pAHNN5(p1 z?%cH11g{K+8dtXY=Ok0egtp26Q{7k@LO+l|s!q_rCN6JBf=I>)(jhM#Hl_(>UAD}- z9ZAtSOIob_+^lVwbZy z%=SNK_J*>>kX&LqHEY> z=8+dZ+SXWC=5ddyxph-7Iv_T}2jivK#J<4nnGby=OC&#j3!BEQ+*_YIO@s4pO995C zU!N0t_|5-1YYm==?Ed-k8szF((c{|7&W~I2;};K|8XE5X!%MBRSUDST-@N5OzjfyD zvw>nnf&IlF?57cZ`+At_LQci+JdP2y&o(2?VtDQr5A%}UEdN>PiKl>;O1|5Jga;#? zvlGtAWtu$C-}&*K^ClL#il_eNwm5ZdgVGTHfQ)9S`|deqp7QIS0gk@E+cXAG=97L) z<09%ndypJ;c#zcd8%Ll&o^jX9G>*?GnOKGq6JZF-881GIJCBzGa!c_z6fENi!nmA? zd#TFp5WujZv6PYB?+EHhuu%!bljx3py@Ml%l;RBszmxDw99$19 zUVi_?0OhZz9BC;@bx1Li=GF_+C&h>(F<1eH1>I~P!qm)zO-10*kByk2Nj8UxzlOhu z4{y4yZlW)T8Bj~gJebWVVF*8QJz7WFV`PZL9VEFh8J$FQW=WnQ4CYnmvn4-;g#WP!@xx2^ zAuY_PG#`!g*GkC$SM0~akQ8Bfc0v?8(LTK}XVQ&NTX4O2GQ$$IWuB$w;caA&scN~5 z*PlVK+OHH-(vfV9LhfIXQN~ZM((&4y7I@jftOvS!M@IVgJbIG{>uE>M=5*1Ivi|DU zUmZR@Vh^5q|IJ^lUosINOlqoQzmb>sPf;1?@H=&m8zWFU6wiK6O9K?1S#}2$`qsOZWP|K_709y!% zk%q?s_7R{K!J1S zZC`@M3b?&U^%L)hHFxh5)Y1L7Ah$X{X7P_+IMg>B?~AX6*>xEPZ#}fXv5rzm~_c6V8<1gW5rL9TT&z+DMu(1jOqKs z6v_MYu*?yRdD2hwYekCZ|JYAk@*z!ae5ApDvw>8uO9A%pCfT%h??@vbu;)mp7Gdc$ zYxlAFgglW#tO3UMP}9TXF(@M-i729_q3Tui839C{_b=}4WzqIk+PYuV8z4<^L7Nj9< z&Rx$P>>as)02EX3Sn&`>gV+BK+c)1e1t%AFPPiM*;_(#_$G0%H)~2QTOdazvKKn3s zAINCuuz$_kC`I^i3?<`BhR_5gUP#Wpa+NLRz~(PcaF<@SanRWU3K;(?<*y@0@2{)$ z_{acy5g6(EgOo2-Ae?w@FL*vw3K5?`?-l&PJUkSiq1=)`VIX|P)zsE|GD$jCcdJ#bcB-)Cy{7uc4&kDpFrX!o8&SI92=xG7Y>A?4jIjHi>#Lfw<=!)jO>@ z!taq#y`h|gP<)l@J%ddj^%jsQkLn2$(Yv2$azs^X9l&$`iNHk;=uLv}5LlzYA~XcG z6NHpyA_IgJWFqQyOV30`NQZJ1%6?Tl>aE9@iF`mh6`9D}gv`!F)L`74iG)e#yO~Hm zA+Ka0+(lU`AIMYVM9;?n|BSR6Gm#lUcq9)_aE!!|QHqImD2?#4k4WoDFJAE+EriKU z0@%p$`{z}-eQN!rMlTrUw9P3nB;xT_TGI8kur}Bxv$%>>yH=g3#MqCBWQVHO6OZrYtQsGxox`rm)L~a8Fz2xA;#8UBWd_EZPP^!kLp$d_h_jc2?q3=h zZ{X6Sj<77v@<`1x=<`0#xpJgoxuwhEh%6lyT^9C9NZoXj(wV!_yi*TLsOwbgs{367e3GM`-{9+n755dWi@{3tnDRdlLI({)tVWb(EV8}00 z3QbAKFAjs_GV%);oYXjek@$4ZL6cw5GAQ!Pr%?E|PkzBJqsIRechn}on4jVPzv~yw zn@V>rJX7l){Bt}L%RLUy9M^CVpk_?WbsU~iyz)#;94F5xAmy2uW~V$8(_6AXFAwqu%-kY^kQBSxM9lk$xHSv(VSJQKs6Jn)J8XYfeUgN|o9+~33rNY>D;lMP_gQ8u7Fg7S2>eDY#oOHo#$ hw4vNX@(v%9e{MnbEhzX41a+Z(WR1=>4L7o%{Wn)DCQASS literal 0 HcmV?d00001 diff --git a/docs/logo-square.xcf b/docs/logo-square.xcf new file mode 100644 index 0000000000000000000000000000000000000000..ee08202a4927f4c8c767ad857084fcd2d700fb20 GIT binary patch literal 9136 zcmds74Rlr2l|J|0cVAxqf54E05T1_|Nf%N6i5OY_EL+j3qWC)t?S$kJUI{OlgjG^) zBeo+m9okT;OY0f~sHL`c>JQ^|6tLnr(V>V1DvZxJp#%&+ff^urN#4EZocZ>-_r1Jm zWYt--Tx;gS$=Unc`<#2u+57CX&rRw>t5*Bh)-Lxi4>bfCo-HU1MBvFs(KP%RhvK_# zBV$zXj7RaH6r&WnH0fOe{ja0Wu1B|(wT-J*1;Z`i83_xnZwl751nd0Y4z;ZCe|^z6 z7Lz1TsfJe7)CVi;n?iMjL0e z`3&`pLCkbT6==F_jHU^>|MLlIy0&?BO>HP#@1L1?7r@rBKo_fA6ArcbL*d#L!DedT zxO{nYuq8P_YW-QrK}I*Fb#Nc;4aU+QWGuG`D}F6wf%_5?)a58sQD}v;uWtygUe;LC zROg>E3p~^52Ocj9*-N7MT$&oByEH0RnnGu!(3vT8RtlY+Lg#{}e*Gv{p`@YE`{+sa z6$fEDCZnFj;GwIxkH+-P#OF$bl}W)Y-4aZe%3&6@tSC`}MfIq{E=CnfUT84HZr2o_t9a4A-E@I*1VoKaC`U9lL0T-@+1a6Y@CcJd(q_?UhcUuL zT&5V3NTfqkl9D8Yd6&pRl5}<|nzKl!-pN5vq?J*qTBEV44kjmI1Rbc$#;VZ|;n6Qp zPsaT6q+=zMO~NPw@K47+0X^W_=kuwgeF9`3lbM%k0qrtVB(1}283@3+y$}k?G8jw) z%PB@#hIyzNEwk{QF^lyxnsDtRJZHScn&OpRgg8@dx`47vz$k?>nvz`trKLg_Fu$@( zARvqpM!F2yMM|M53E9PAFki9@n9@>3!);3SQVyEzf}Yiid@hAmBD)CGwEmx|(VB3q z!(Uwg@7e|Xrqry2Wh%{q|A}Q{zT>dWaV_RrCgwQ~%P3x1CMJ%PWfYLKOiZ(rmWk;x zj@70e69~kN5yo~IvW#uVG$kR+I1E;dECVKK8T*S^CgxZshMFw!nfeP@Bw<0vGHvEP z6pQ^R6(|&oGm^m@%$F9iv0bsL#*Ww~4a9kTl`i2&O(RCc_FX#~^W8=~McI;M#4buk z?4o4EzBrXHMVy%`U|W4$3aLphF?k9GUH`LS*n)4Q}Iyz5Ap)+HkNVMV&Sj<}-yh|{5c=z~L% ztcW<|;@SbRpZEibF7{OQqw4lTt2}G^W!U zI}Bop;jDOW1gpcO2aDB-BQqN;?5nn`f00)-tw9-1f5xqkpnGT|N7(i4zb#Qj3cW9j&%c0=inWpG` z(4g3OsWYQPM`SixmUMi|WW~wzg?z|4RW42?ya~rkiZ_*MWy+fZNpAu|-c*Kt2X6}C zq)BI@jAb(;ki$d(TZGOb@+4-{31Ron4I61&skd6Ud_m2Hp;zL7gu>0p%y?Bo`{FwBs{S%7v2S zGcE($k6q`;_b0qg*Fu`)m;#?K&~9t;a^WPRsz3>N>F$*HN@`9b_fSbz%wEX*2zF zIB!FljY5ZW+7iFNCfp3_%Q=}Pl1aRaPTjHYQgk1uyY?#7&IP(9xVELTp{6z11c-*L z-8OAiP1DLXtNnp$hgevRJ8Ib99Qsc1>ZO6Xb7x&SZ)sqmGqLI`NVc&03Rg9Ni|X>m zaEm{%1k+A%mDJWhvnp}7c61c#TwYg))&?8s-dhuH3Dq=&YMTA6>K@!s6Rux_E51Jv z3|FpE9WJYB4p!EMnu4`0p~kSkp`{7!K7GeeUYtKm(DnRVYnod^%Uk^o!R0Mrmo=`f zT-8_?#K(-aLHcMwz_&wn_}nma-n@}f6TBiAs$bFKpPRV%%Cjm@l;(Mt0OXWDchV|+nokVg_&Uo>VL}KvwEQi_NrT+m$GyvdlX>2|duaoR{ zR)utls3M{Uh%%Fu!j2Ml4`KfdiXZS%;|6-&juJMH@!UUyw)&r3ptCtltVCdo_rAFH zh9WkTiJ!_0a62Eq&IgC)-x|c+>~l}eqnS>6au}yOf3X~lRSEXTYh{e%VWG^n2?1)um-=YLVmy} ze*}mhGJqka+O^n=4xECY)mIgI&2bfXZS27WL_2ISL5hv+E6kqt z_$M-7^5b{1Y0UE7_3UX{ocEiuupWK-+|Uzm|JPY-;6!BiPwOi1nP-&uc=odMmn`|= z%ZE-44)y%;mFC&3i1nLyY&p|4It={@jC)?Cb$njQ#4@ay z2t!cBc-~pmJYEFIl;UY9SjG{!aS;=bP?LKhz`zE_Q%3fvBd8)lyAp_JFdh452S-qs z;w=ZipYTf^Tqiu@;1z`D0?%Ya8B5l``r(Ow%3n`8(gKp|kYXhRTQA@{hZuGw2Ft>- zV4AIi*qZ4WQx15{qa8am!KN|sH?a56;f;4!P0UThXDCln=EZJ4iGlDFH=}o?Jw}R1 z+((iNQ!q(*=P1eZH28HGmzX|U@Sh}@L4t3N7W|n6IV5;$v>;9bvNX@=8!b6S5=wf- zDvUod9WN8_kzgeT%qN{6E%`Ad{7*UXA6~EzX<x}HQ$+tdd!Sv#pl#!sXz*!0p?;>+dHuEuFeFn~Izm`WyN1``!xqm@M89%uaUp-R< znXG^I`p%x=;odzzyWNX;+LpFyr07RkUs>}n51$^k2hM!>_RrTXnTW65cHk9`jffF? zXtH+>AR`C!7>v(vwBQFA8o~gKn0^$mI{$i2Wv-D~a#d(sAG`&y+=ZVaNQ*LbJ6hT1 z9q$0KrMML9Ahy#ww&Bw#X1t036Y;B0rTE5RUj+P)c^;)D#W z^I*KX#5TKh{V^c?jSO_D%WP(Swc({cx(9AmKpwW}mEVuxRd7EKG{!T&=rG12HYxTJ z#WYyN;n^RybRm*U&Ixh{9vATb&=b z`0rji)H@XKjlA^TIan<`OHk-H9`w*R5c(QU5B*MLJ_;p@<}LnkEoyZ7C46#6n#k#8 zc*aT-k(H=4@h7N1FHKx?@icMX%sF#rAWd{~M@k8iJN{K@S+J=_rETA63^zhdvrgua z|KC%|IdjHNCFhQY|FTqa;(D?ko`e^{XcM2sCx#nP)EBi8*~A@;mEFbIrS;UHE&I)p zqW>)&m^8X=T$x~O-|r_$-kXjvM=PjP0SWhbCaNI9is? z*uA9ohzsG@F31*+kyaI-RK*kQ=C3M-o9KJ?IV;7iQHrpjuK|hhGmR5biroTBngPxfqTBm9+#@ z5MD$%koX`8AuLm-YTbx$m#Aq7fGF}=uQS%~LP#y%1ZhuaXGV})33-|wNg`a2qH}ww zaIXs?wFrac57Q*6Yn*0T%Alv7brGEM%AUuPJcl{GN*hVT3nSzLmqZZWgK$sXfxI_K zlA%1ZO{*DkOL9@q-E82h_J2P$6g&3z`YHsZL2d2>FC6R{z5oXl<#?@l9IL^re~;sv z?<&Wyo$a)67rMpkOCXMKVQh^}q4`V|%Vm7dVVpjY(aB-|nzc}h@bNfG#+MAD3rI2{ zIrrMtwv_#wt}QkTu5KT2j({x2zef4%@X<%BO1-%>0J;bacm7ezS4!Ycys`&8A1r`~ z7t_6hKbDS{;xm*f`DO#|E3Tuy>Zt9{iQ0e@7qa)g*fZiSv=&cM(oiX=qrHZbimOR| zB?GIl>1?sBS3dAQWGtcF*I`N4te2N~d;$ zM0EEP4UTA%iUWAhZwOrEfSC#KJpwBfn1ha>c7l+?RHUDftW-o@xAatGm~<#dq3l=n zqi#L9smMp9Q<93jOURs5L@mZ0sYsY~zLSbn6Y^RL!abCw^8R$SPV{~(@J~pqJ{6e> zgh$e01ZR*~GDu+C{^ZJr9R zGNnD(im%d$%zPW76SFHr?_)b54k2&U*3fWp0f+mw(qOwyz+B%?=r;)s67@2up|Zh` z5AJ?(Bi?%w%+RTi_U_tB8a1TxuhdN2OXLcWq8Sv0f4tB0pnAuBh%@7*-y^P9@hWBllz$L`8ui|8;y^kNf%FlVo03Q@gUh zsc}uXjznKdhD0xNpRqC|Ton?}lsuQ(f|`bGFg9g6%9AM1jh0PW3~VXN3KaZ-%an&v lHjqvmE+u0YAMixU1U*(UAC_%yL05POzN|RUG*oJN{~M1RF+2bO literal 0 HcmV?d00001 diff --git a/docs/nunc-stans-intro.dia b/docs/nunc-stans-intro.dia new file mode 100644 index 0000000000000000000000000000000000000000..2cd0899af81519e6345cb1f2d9f8f1e9bae66424 GIT binary patch literal 8292 zcmV-qAe-MGiwFP!000021MOX1cOu8pzSpndD7S2HcXjnwh@ImcZ%!`W*UpKZi{xex zE3pzW0tKO!_vZcWZ`BOa3YdX`o*seJ^09X%mnaH~r>pDZsXzVs_nS%Y%V>T#o=v|y zS3;imM$?Pg<#>Aa-T6O%{p&k_{^#${{&YDW{*nH@nh$S!^c&MV{_eZ;>&4>skMG|< zJUocW*Sq0jHW!oey|^2_|G(j6GJH=$@6W$K>-C;Lz~yi;fwnvaIR z)jzbPuMiG-=-bhJ{b6rzXLn-~S$w_S5UF__{_iuHrPLh>O|QQH!w>j}@+mEU@a?gx zo@lL-#m#VjHJ)zjq1R?oz7QZh0MLtJf)CQ6Q;)C7{^~w;t8UV*x_P(i#&;iYXY<8; zI9_b3{4|?QM#E{Tbuqsm9j@o@VmKie+h$gT6`#k8#cap(|2&-BH8G(4@zbYm-%sw< ze0;h6gS#S8^C%DF%fq5n3_;oa2 zKKx(49n-z#8@xoE-tb~E{_^{ z%kR#Ama8w;x_R_qgo7Ke*6r-cr4WQReJRxXORxzbbqYRxT7UdGSumVlO-A2zkP2xA zR?7`Rl>Lh<$3Jq*0Ae<+H~v+Cols0UU+kORI%aQHSTQ~F-g?`q<}to zpNddg?cz!7t90{C-=hyxq>c^u-caHNtbCX}L|iLsojl?Sv+3$PT>HahGQOqD#io1f zmfYK*>9g;3;oukwU0~S9B`bEFxOlvcO`UuV%yn{?SHha-kPfO_H^esG5QPc8Ge;zC z=$|~%3W(9YuQx`Tn*FiHA5EM7sD*!it4QpRqz&zpKhjf((Y>!fMw*)avBn>5yZ)%2 zUp$lhqe6W0M5r~We*|(A=*%BU8^SYxR57~u^~Xq4vp?4Oqi@q6 zm2jamcO+%s&{5ORx^u=jJ72pmmOpuxKH7B6zfNWk7uUo2f_(GG@$hC=R0r05GsGrF z0fbh;_W8<-4ihFvNfX#olSKAQC%iM{Dw4uTjVbe_tr*!PS4z$=jStdGYb^PufrPnI zvc`MsmC^y7(;f~?%zm29FGurTrb(e5Q?eMOQdYrHq-JY-NG>l|OYXJyM5dY-qu)2) zr1f&-noQj*Q#7Ki$gTCPjJ}Mfi?jdUkM2ijqnq2s*FV?vuxx?9x-Np*bWx?6f8P5% zyctiv(lZ=R@6LO7i?3XvBn&El^>3rem(gN;F?^1N`#*yU^sRW;{Mx&QaZ1Nrq>NKEoQ$ugH>2wJbTqxcspu8hc4k%;yxXbx{mP$>XMJ;z zCBNvRHBiK(!E!93K-rW>+oL8r%D~zIpfIY`4DL|PUE<15KffT3_)Q>r_m|n^@^zv~ zom0LBG?59=q^>2WcuMP^k8e*!x6qdGJOr=lONeev03|eqQ=Ht_7*h7vuqG@uK?rI9 zp1j}C_d&=IRMPmn4I`Vx-%e~Vu=cAk17m9y57VeFw zz0Y&{<%ik)w;T(16AN2z{lI`K$HE=O!q7(ln=5j}!KLniEA?nUc>IP?A5H*U&gd&c zNNNbF@x&bdb{hUZy-?FBeSisU?lwMTkN>B4P4KjI(8a9iTI_xMzjS@uY>Que(3U_@ zpEL++y6>F;2xb7x0GI*rH~|o&0niBB55gn`Ki1(zFhvKglH}?^xDzR@^or6-&V_b_ zgq!Y|QSj)ppbdb@v=Eue@=WxpBeSI*pvcS%TSjK`6e6?h(eN@yXB|XmRi2n}>6mfp z-{Y%)53l}bJRMb`lZ~#?JZ3$mFsPIENTF~_de($N1JG>T^3n2^(dEFikx z>O&jerk5Xw^ZD$d{*eJ9ANu2DG`-s1c4lH#>}+f^3Ga%b_O4j}yXx_tDOZ0P+P-LR z$4%E7Zt^5qTGZ6eAnVb}0i&j~!E@ok1**!EHy)EcT9uZ z|5(>`3#6g7K29fWNZZFLne@pCqaFAGfIzK~^}4t?aCfX*XJREd`o?mO?)*Xet#Zo& zt_{(*0;4%Y@M=+WJ%n`oz~)+dLA4Wq?rwYs*#cmeaZyCduVLG}T@GA$GKa3$ zn7HLV*1RSS?^ZY$S|EMQ4ev)<^}8GuEeXB<%s%x#&u2Gh$I++o*81Hw-3Lu>ckia% z9aL+X=cIx7w~84`Gk8|RQn}GHgGZ$k=01WG>~DhT@f!*|B?8}Ht+B?Z1f?fU(kkNz z5D1)#@0pXtA3rYr>%%X(|FR>1s`JW>Qm2Sg(ep7*1k*P{>69&)g%akq9W2#>&(Ttj zmMWbup0k*|U_f8449k9}o9;2Q(=`{V}t?z=9F2cafNS7JuUix6XyS`ss z&K{h=X$VRK&AaJ}Lsu(dtu+|_TJDODo+X8#1DKZ5*b;h(*L||DaOtF{> zMT(nGB43)RJ3FNAtn|p7r8vcGscR)$`pyba$)CuJi(>@iHu!hc(puGx?gKNhF>%!W^tWR||ZWGm8 z#g^ydA!3=kBo}L5S+NH98iEgrhv3(+@-fcW(E~TU>8$9Xl3-FICFzmDc~%t=&<&I( z9MPu#j<7Qr37xiw}bg2dnM!mh|IrG3;@ABv&!gS1~FbGN%iVxsFj6 zM(8%eh~R)vb7Ox59S)Dxs7Jw>!)p}10zn;xvW%YfzY^V!vWc#~t6PGXiC zx6CoiF~=;`6;5qhU{DQ)mndUe^3=dOv>9k6uuuOS;TT2ims4xDm*OfpZRN={5qEYH*;fK-C8=d|GO` zXw&j4_1p@OSE;{vUx4L3ps?0KnR^409xSefgT-PWj8K^NTrq*Q$iT;nvE(Q&j!Cr1 zxe{ktvt#Ap4S)G0IntBlVmO)f$}{BY069CK%x1T}+4N#`-urw%EsECpROzDItTs z@kr$uxcL$FFdWm7L2y=_Rd+ek?ci|I-W6$3mm}S-@+T-;I7+nXYkK!mpCqgV!a$?I zJ|-@hMO)RVVuoV%L*^15B3|HgIzr__u9)RxG1Gt?;kZ2`53m2rhhW=>U_HYzwy~o| zdmsAR2)ffl+C?>5mzs=67-%Viv7wLjr$>|YTx`S0hCIj0OH$!Ld1n!HQl4WiNtZDw zwMBLCcueLwJ{g{4-oG}!J6l|n9halaOn2PHb`ph-pu-F4LO$Huu4U8=aUirX#G!&! zeV$Y>Ht7mB#Z#Zk7OmO_0vev-@ThsN98et!l>3Zu)R7PI+QWR;O#&$4PK zfT97qwkBOQ@}x$=VEK1Ejb$Te^p)`=dG4gr4RiYB1UHVLJ6%j7&(8~xG9G0-Iz>F% zO4*LSEy{Ktl@8K(K-tdAZb%b@Q!#bvO*e$?8y0R8UEMuU@j1e#Qe+{9tzwo-;p=k8 zNbJ7t_cG~wCVk(7^gTpyzH3*z-M1ai_wA4yTH?cq_`ElQTi$c%fv0@{JPcTCV6BPQ znO7NCiL~fVb2+tAmaCAnLhE(0)-m)*aWXG5f2}aN4s~uXoD46ClSa0{@KDYxf=Id{ zgC0MCWi`B{Qxkp#`VNH(K_!LCYWubb(sNVV?GOd;{9d9cq<3#{r%;B?R|}hK$fxu^ zxM^VZw`I4usan9z@`FvcSt}FsIKf^h6Fo{sL8!+9G|(pnr==b%eBkUvfI_L&Cjd@A{hYC>BW$Yn#*9rT+ZSb` zCzJSIE;*DT&~VzP#i(Fqv7p_9@&oOeo{U2%lQe-^XW$l5dQMwAndq4}6XGrJc1MO) zId7zoFq)=*dT`K2(ce`WG{hnf7DtzggFZ{9!7=F-6?aF zcFa*)%_<)Yt?j7u0@6|%^BP|#HQyU8KskIFfGLfoI<$4D@fx36Gb?W~pj<(%uMg&0 z&P#l9IF&Y>TF^aOwy;FJ3hF zg_gfGN)Ile3LQ{yuexQ9r;Z_>;-0PB6~%RcRoyITas4UnMy$$ZJt3HWSjppTdp#Rr zSM55YfQ7DN8<7h|u1QBIuI?I4WMyUNV8vdd)%{)v=%~PvjRr+nS$*W;aFz8RG(YNS~TA=RijAlAW{N{{GcXvuh+L@iP&KX0C&IrP3 z=*k&ESPzfRNbg*X<~^M;($4IRHO}Z{tIjBOSH7r3c#0UcaJoZJlsY?NogZDnJ@ejTjBwQV>o6jvi+7-+KSOAKj1gz+p$wwBd~zmQHro z&_#1#Y9=mo9kjCZM(R?LBt-+x<|4&S^rzJhQcfDynOHB$?OcfjWW)VlWS5Awi!qauUAkzpOD@=Z@B4W85ONbpE&2jVH8=n|*PZr#=E6zIbQy13 zvjsy+uQrk7pqwl@-X0=~lR;=L(7ttRpox47jk(I>~XAPa3CJTURgVgx@Y_=!haSn#$4RI_YtT zYE>zkN$WE)$y*BSosF5K)vLd3x(e*0i7mabX!z5gbePr`*#*)91i!juCQ%_S-rgD4 zN1N6ptfzwo9pzgg5 zcF}~S6rU<*eV@5Ol}SnWM|VG4YLcEb4Jgpp;Gvv0aY+|bw_;?IBZe*qR`OzM2lRoU zvl9{?F_ufN)@lPLSa0m1WchhD^~tQJ9#5YwYdI?!*z%6*E)F=!J1LYDbOF@)gzGYM z<)bNW6$oYeC|t^W6VC>*5^0^In-gy@v3*150f0)HxT-L&LW*v3HsC&TtIcEel^7Sb zMzSTY+gzf~CF&E*WO)-ovAdoIGi^6YZ--^oR-xL`?J;G)G@aT(TSa=LHH^%_b!)n- z2CdUVbG3n$s7n`U-CS_vB2yo;#9i#(E<|V4JemA|*b6m7;>A|mQ8RkP?&&U~g+9D@ zu_10cQ;t%32Ln=!URuu5qftI7aMszlMHB_@kR#;u&E+D>EGN8tFoWi+1YWfu0 z9CjB?ee_ITpfJz$L8iU@hhmQsY2ls~AYDM;X;Pgss~v4xlZ-Y6oWYs+I%Y1K9%)Ls zOd_9s9gik?HnWpVa?qYvMEP7Q?KqK)EyUhsp_|E?6Gz@!QBdne2u0v+jAf+v=>zX7 z1Gft9=x1^;w3sW2X#r%_B$218xdC9GAyU9yn?}+O8oj z@wSCHn^PM~C*G3&O^+_}Hiq~7V&0OTNlO}dV`eTnI_8pp{_p<~Ed4;y@R#x8YgJSm zKLtr~CST7}q7pZBkq&DHM)33v0n>qibTe8EcLCDn_+pWCWWBv~6qRm<$^UgiW19!2^%_Z;VZ^s)9FyS04uUg>C)f)=YWe6}|3 zNN58(DdiyWioXKZ6jKh8;$r%d2P{lVEUmS0izq#(UC`S|284NgmAlWw$z3kpy@{T= z{fI`^f!20JWFMjj=4`}`ATSv%Jix)l+ItIfE?I~nl|MlUQd{6D>o?7XMIq`b3 zl+i)8)XxiP$xR=FHxSx;7zAOkP^vIx=vHZ4b*R-yC~-WNyQOl-)&A{N*IqD>AmZ8% zZt=5Sn$A0}_&8yca%_?wTX zUUiHf`q~QR{Bqg5@~qb;O~uw2xJA$r%*>gC8auNT@wPQ1pfS3Q#6g~kJ<=@29bH3* zlQUk`F`6$q&F^x~BKa!rGSJW>aEn=yEKOTrjJK^}=x#$-bT&$aR@V9C(0#uGKSOdsEC_Tn@rt zaGSW2*amERP68yQQDDgAIWE?N1z_?NSHY){9x*q}s}=JrsgSxpjHY4{jbj-qGgKZQ zRB{0^VwGV?vP8T&R+_d#OwL|_cZu;*`GR%XIqwR@L`FDGB%7H<~I#W&&S(S9XNR4&d1h+Rf zSl;2?ObW%;btcP-UsEaYsPS~CIP<+e<8@njy#~2vFT4&(COmhqIZ@KLaG~ec1ebWx z$x@BwO(#YPZ&+nsb&^+|?DoaH>*Sbsozx;zXaSjc^Nqq)LfI71WXj#<9vPdgE0h|4 z0^s!1&l#IK!=@^4%;0o%;M8CWOhr$D8B-9y@rKEijiaQ7N$q%pW*Ar{WO1K_gWJ`c z^^L|G*Q{~W(S7|{cV#zYU`u6?8%JN+)Y*rhe(e4Fw|{*2;m3R?c626sp+NhhU)1Lr zv~Jr)RrCy+(R7~3qTQGGGp9VpNK-NMAi1Cp13F+d)BclYle%BD&d5qp4*mBL+};DV zJVw>gPnR88vNUHjQewGJ8yC}p#2)yMpj^b35gX$dP75 zE{2mykq+U%@N1UyAYJ4^ir6Cvl}UEH&V#r%W*dq$mPt(S7$}l0)aasvSQ%Zg&!Tpa zn5Ur1zAb`QIJ+}#QFwfF$%r%i?Q#|D3^Q?waVA)a7SZRF$*b(rSP$Cw&=jY)9}IJ$|OjT{zkct&=v# zk|KEAG0kKrUim`n;Vrq1g2y`hDkWhL!6Vutoi|12c&_7J3Z^y(`(m4-8-pguw(__^ zByBq=cwZ=rgC*~a#ZqTTP5ZKGTOCnLL$L;Z37xUEWVFm^+4b$$M!@CE(XN1MV~b?5 zH&}r)>DNfgA~R`tc{b_P2kY#5EA43BTU?VjUcSkjFwc8?tu+L&Qeyy@`XXH8miZ?9 iqBr64`^oU@X#V|K`3?PhH6Px5fA;@C>ZOO^X#xOXG+=}P literal 0 HcmV?d00001 diff --git a/docs/nunc-stans-intro.png b/docs/nunc-stans-intro.png new file mode 100644 index 0000000000000000000000000000000000000000..c541dfaf95f86fdec90262b7428f93b1922009f4 GIT binary patch literal 74103 zcmagGbwCwu*F8Ka>X9x%=>rNVB_W|SC`bsRv~);^lr$m|(vpId5+WcC0@5HYB}g|S z0@7XIKKi`%yPx-a=MOj>&diy)=DPM?d#$x6Q0bmDJ`N=gf*|;JWu%l51iJ)5PDi3n z!8>2JGTPwRX?uyg4^b%8$duwFg3u#(rNkdTeX}&?@<^qBTy)j(qA%k$ZWd`Jak1M( z7yR2?KQyDo?b@12sGo4)<2SBIa(cAm3Vt8%DS}|` zK%e(5LQ4~Li{C3MdF;LjCw3hW>LfT0Xt?Eizz>fdhlkY7#E240wCok93kcHaKSY3d z2fk_i2!%dbg+mk?J$bIRaLq0!f6Ciy4L8AATi-)KNBBx^+ZJJgl)W zH9cs>nW(7|D_U#veF6wO^uX*+^xx2gL;Naj{iAnbJWw+Yd*=dHD+ebd3 z<~QrTJchSYb0Wf8laf-~-0XjHt;URLixBbXet{qzJ{vH~REyv96crVjeJzKJvkD6f zOG~$B!bF*TqoSj$wudcz!{?LSIOyrUiJtuExa7M2YamzaaaYn6Ux$@(k6+)iGRW#j zYrJaSzBPa$-JIa8-u0tMObOz9AuFX=!=)?p+NH4NFTl^z3kn#o|CN zk1!z&L{pcNhDN#Pk*j33bR#}b~C-{a-E%>OH~<`y{oIMa4&?PuBKF1A2-u^ zr3F?xEgLj_)UBy5EPTaRUi@eK@S*LeA7&ojA`)UagG{k3S&YAvCX|$XoteqpS!6eJ zC%(|*z$tX5)N)ATWUSaMGVn#dS5aZ1KxlSuZu&_@Ma4R+;WW#KgpfP=zTw$wdA(## z_f2rlQ6yKYsiOY5pD(?YP0`TsU3NBWO$Q&pnS?}rlCT?Il`O@hM~^f$H8)EG zSl$ypUnS$iJB18VedIiY4C!UYeblbZaQhTgJv%eQWB3J6qGhfw1k14ytQw=W4NKms$tGtr5E(K!_#Am*nU0(Ocy#^_L4TiUWn^=cb#2OcLYc1nd~J)vMiIU>tL`%5*%o zRQ`1e4i3e_AFQMj5~BPPeTftmyY~0@`?J*+$?HETXgyR_Ep^{}lJJla&Z0wa>%s?w zUK{5iexkH^NE7_}u_yuru!d$?pVD>DfK(-SAB-`}$O!qJj3oVxeA{J3E4 zA|pLKJRmNJIxP)$bjZ(8DcO`Lm=&&>P)?-v_xA^pv;P_{$wY0X!|H=X=arb{E3$kfR%x59VVXvWo8q{PniGP>~)F%G(-WdsZ zRG}{mvG*iqwjiZ`yGk#AzUgXhZSCvhxpvKbt}WX6_bAt#c1*5JHS?U2rWLZ7x`D|b z!t^3hETJ=+T3T{)a-xT;4S!eI1v0Whv)jSKZDs6L(T!4~#rKctf#36VlW#hH2n!2?VT!u?;M%ooEi24~d!*7;Lmj={-QiJD zX+5UAlnTnq%94_&(U*C7d3ksuO$6L`uWPq-bl~CRmsFIiibtVdW{$KHQ$v#XwnB?U z`Jcfhci$OvcGxSQPm+_7Vd3DYcG)ly6RZ88lHIb>Cv|Kl2H#LVqvLN#+uYpz z_U)mBjDy1_q3EF)>M93^4rDS%#~bL`!@UhIF0P6z@0V(yCK6xqVMsOnlSdy49I}aD zi{75^{q}8kYN}^&@O|_~-SczA#KaI`Tqn}`69!)Hp7fR#J&}-*s6O6bTwS%5Xo+T3 zV^U0d6B%j0KHJ(QaQ@wc^==3yIvVh8WfZu?B;au-aUJJd-z&8Ib4PB!xofn zCO5sjPCkDYs(=e4B_a|-Ep(?!+S;xb7&d=1cZ7^&Z@&>p$_xV`BqWq$jz)+iI)v7~ ziWWXMX1i6-jX;c?mb?q;p~_(a?sqSYg($%bloG2x3{)B zzkW42f$yx?-Q8_$Y@GhH!HeD9r}4C;q>*pl2-*LTmuLyT#1q4&@pk8jwiI#ua5Tim2e75N<(3wk z6t;iQuf1ZJj!-`{K27N-jvz&ohv*i+@yYI-#d}z3txC2sn=f8y($K2C#!Pv9v zyx`&z_}2CuSGG=nYKQRe4u}}G$r*n(=FZ@&a8r=M^Ozu#szF4}%_b!kg$g|z8;&_^ zK%d?zIp+yV#si2j0mTw`4Q(3P6HJ<^B<3`zSfJrTiH&9upby?dkW*q1YWx>YwY;$M z%A`UwU=B+?ElKs@=-#`YjwIpA!b0UG%1iI|84I!v`Hm0OL$}WP8;nebO9Y?@&g{^g z!hB!qau3a5QxL`CuU|SYa~BG-`_6_+FIA}H-e-^@U3|U08-td@KoTkmcT-L*EG$3) zH{G_`j?~hXttP6gq{C?84&&qF12Cctx$~~)@|7z$czL^p;H@AE&h51|h%{QImik4) z$8KhATZ`W_?Z)VCQ5ZGa2srw@gL45=~+E z-BkcF@gq1@Dsa5cj*hfT1auNiMEC@c&`kO`b>v?iHNDO2~PdJ zEZeWiq99x%Q#+A3Vm7oZizRhg%=W#Ch^q*U zr@CaicQ($I+v)o!2@onB_SWZo!vR+HZueLV^dFbo3XlKRxm2KOeBndReBD#drN zz7k{F6nA=!P#QBs(q-Z@qq;G*s>32KdR;)xv_+LnT*L zO+}@=vXWHd*WzN;=g+@>{W9IJa@!dl9W|}c%r!w1xp79v#B67Z9xVVy2Kekt^!V}o zj~^!M&D|e6y|yw;ul&mW)!!GV4DtckHPB)vf+p7=B9 zx5RN518?Uj;iDP6$?=|JV9Hi{sdVy|vX0I%%%$GRaDxrL{}DoKdp9;V7P#9}wsfv> za9D3I4G9Vg0x*q^j%H_PcQOwe|KRkq8!aHP3pv%i48cE8wyuk>g_pyO$+G&l&sGb4I+ko zI#GJt+t2jKBuJg*rnKltpg zG#=t}=a}mz!>0v82NYHsUSh&%vECOX#pqMMEbk&RGJi)~l8DYnXcEhTp7;qN!ByV?dTD}6 zM%!AZ)84Ctc;|NLH1B1pdB~J&KNY|Vkh=s&bt8uJ7NeM!45yO{q{q2(#-B_=DjcdG zvW?}_NI2WoFa8eOE3>n+3k#PnTquFXe9QTF+&Tx=uK}k16<*5DrJ*8LR@SDE5nLZc zNh)vth%ihPua2UiRxU<LQy8@Tv!}iB=9KJddVsE-5XAIpw)K%4SA2s>(}}D_?RwV2DD(x8xOS%Wu7~rL8!&PCJD>6k3v)kqZNh{5QY9{ zhA0En$)@u;E~dt~o!s+VqgQ2WM%Kd*5GT%_J6Bm=zO^!Ol9}uY1cXwuh=-}lC5Y2fTrW*VgBuq?9OikaP$<9A7Pg(03Me}FK1oKZ9h^in+IQ89T{|=~W zstdVUq!^NSc0`=@eiKvy|P)kVJ+6I_mA-?pzXgNa%i~hmEpTB;6NZ2?&I)Dtt zzsXTGG~6MHy2{G>F7U>U8|EePbLY>WZ-w01e(BOB*Y7PPa zYvXQ@%7_fd#etCiQ5nfyhsra_;~zq)1=nMBJ<-0@0MOrsiR}M;7luNg>^6(u38l^| zb6;Fj-2>1iXsqEKOL5KinNtcw?dh77$NCUEs zAOI;gVNua!jv~AY8~(E4fo`pSfIZmf&e7d+lFwCb{_=&7pFbIxg3Fg18yXrsZ(tF< zGCYfkD-hA7nHF~Qf!~7U39#!NA+7sHC%>5ia5(_3pbUVVsq4OWJ7(zncm7+q5{@ib zsFJU`>}Dt?S6dDh0{I79TH58HkqdNm$#HS&z^pua^d>IuA{|{Yc9htudc`~*=?XRGU~hOX8pzWFk}Lw4q{-!TuE2 z|ESpDTJ#5i@2AfZJSs4_?cyTnwgor}NFoSY%R@!XRj!d4HY26EP#G-gN)GLA{y^W1 zeN;FHl*jELa-bdnS~#}Hu)DVNUMcwa(Hf8f)Df@&kCvOAZMXm_l!5&O){s%zBlD^; zrM}>Aqm3^Ym?%H|0`vtJWIa*cY4^y*r93F89oQhpcC^=MVnSt(_gA)?mY0fBIM?nz zwvb7qd^w96Q5~>~a-1{1CM5L6dK+%(pECUD@DPQDZ{$5)>BGjr;FBTrbXBi2{szD< z^OE7QF<}_$D_7=V;ccq$#SDD^&J3&%YkRTI+Y=xqZr`~>B7vbslajvLJ+iV|{t`m< z?(JLH2Z%!o5|VWY)2ol0?%;ekFr99Up`xN9B_)NtFbZ+>id@XMfr0o>F?_7$`VD9I zouXy?%kAe+_VZ349)_!KK`^AGqFS7qnp#{;c=Kj-aF8X@BYb98F^{N{DP;nKXLYJv z1GRA@7HHb>@NH8lO2Zbq6E|Pg^F>I|M9J!m3=|HxWrt`QX}i4Etnm~cTLR8yUVuYb zIB}VVo10r8)D0^Ag&LdBYiFD7;H(x#%JP19qycYx;lc%Kn|Z(ynI-S0PPEk3WsTy8_M-lfw4Ui$!bx`RmQUDQPA%QyC`SO!n;jtj_P+P!;5K9;s8bZKpYiolS z=h5FkYHO8Q)6~pT@moKtaL6hz*Kn4$hXR2)*`1r2`SI#x9REaL;a^;Q2bw|HBzelo5@U#3{G_|MR`plCBAWdZWix zy~TV(jJxHz?feGPUHg-*zJfx?U_UWw#YY7_t=IKTiEX7L8s^FIpC)cO7#`U9 zTCpzjwNMZxX^H04Ro|&1cdcNYtV(+b2MU6n3hR&En;fys8#J^f zy-Zq(f)BGS1>X#p@+J_ik+D`39Q|h!*c;B*Z-6Cm9!=+UXa~H7w)R*E6~8M`g8(mJ z2znN;^IHyvzj`HsvU7B_vbJXO{r3GkAcCGVC7KGl=9ZR5&zX;I*c`tcl!KxXki|@M z*vZ7nk#0;T><&@T?^^4cC!xH}RK+I23|m1~R%=t!ix~mH^Dq1 z?@I(M^**oXn3YWT$(hEp)Q@y=p45we#?lk+F{FJ)1JsO=(4$<{KDoFK*Sz0SSYmpg zzFzXJe zM4_w)nI>P~3=I#WJnlzZ-+>qhy0fCG zQ1^M?m>)e(qdz@i0AAe>>eZwMMbRS+KMhj|0%QW0pJxVX3oX*X93m_AaxTJQT$^owZ3M zow}zv(MxcLziN~%1V@>qiIJ*3HlmY`l@6tbQU9RDrmd9q(7dDq)*94k znE59s7qnn86>FHb&Q=5oucqb%!~$66@>!xKhRp==pX}%F0q@fJol-a6MQ|hh*2|r3<7==*({j z$t{dw7cbcdQ+x~>zn!HLV0}I#?Yfof6BzNsD3}?)Vn9TjpPOsy@|%x+IW1Zxu@sMZ z)973Fx3^W@E3+QMXoIj2D9Fiy+DMMZ6kLaghyH|g)oWj<6ouBX<-Ti{jBf&G;Ia9$ zr>Ut4$Wq2T!FqXs$DQCE_>BeC9k@!MfP^d8-^0oS zxNiTW12~T!ElpL`0Z{8!5B#V1|Kv|ZPx_*kqee~pLeINqxK1EQvQ*e^Q9OL`;GJr= zMujS0)dFw<0R0^V85y5fQ38Ng%zTVed?UbRHOw12GY)JKhjvA`ecZpGafq59<#oMU zn9pYVGcNcA`YQ}K9dvIL5HWIw(A)x^=DEUBafbBaA#lgsXrR!Va{3o?5z&Ym_mg9f zdN-7QLg(Sg#PJ=}Uh>Ia^3?|!L=x3U+gg$<-0AB~A0L{tW#@h#et&x)`W)h-e>!xQ zHsTz@=lgiVte{oc9}15VEeK5@hq}7CtpKY5%&XLV;JYPV(!PFkN618#mACee%B?n4 zaveB1^FVljX$2Gm896z@#p`-~8455`kQY&ScpYtR>#M8R3zgeiTLXnpYAN|XKkMse z53tmFT}aM2^ivMVYsfwN`ug)-$r2skp8kgy!1>7&gFVNIxajB$0HDaoqCk7v|Fx;= zo!p{Z?RDZg|246ys%jz6vt+2_ZX{D8nmdo ztGWI=?1dkNkQUq|T^uAdK%pqdg{cjuX3hSuY(P<#5UYUK`oaS`Q&Dmx;4pHz?;pg( z#E{DJEBySi=?4b~MVoKlQ144>fyj*_AP{j{iqp&u4GDpaBVso#B_o5vCm1o>m<&G& zzSJIrui(Z0)HUQ+*e<6TZ#;&n-0V|asXq9)CIKZe3Z0&wZjxKkiF3WAqy(T8v#)#t zUuRSqtk$HPj@P)k*MW>WUO72l0k{b2%&#Av34D2<8`r{YPb1!m82dr9fkyx!@MM6F zUTc9z_oC2}et0hPiOwK%t#54Tx-MLa`3-O{F|qSFQN&{(01~(ZU;{M2yC^F!&&Gk- z?m@{2%FhA>I1rsS363xEn6POSUS?wY(zvg6@>%Js=)THIKX*q_mk4HRLmpb^ujX+5 z(K#_O@#6x$@B4?>Ko#I0f6Bo3~+As$+4Siit3&N8fxK!X4-dzx6*5f<@?I4Mh435A8*@u zcpL$9`uX$cKTZTsFgJ`rJ+rU@iAGyfO-;IwQ@dhzdin_j*3g-@_IB+@kHFH9Q|a{J z_(p7^6Wzc{Zm`iz%|kFnP*V$84qlIWfA9rL2V%EzFLe;(ZKot8T41n8a)AA;%FE09 zeHOzu!fWGb8~gNLqyIjMsBA$M1f_8N0%MvxiG1NwagT5LNqE&}J7B`{~B>C7M?Izd9;T@;`p$U}1S*UM|SW8gv{`nz~OX;{Ftr zP+)bBw?}L=reTMTG3N;ycpKmc`NQ$35GL_YC!jWYVnj)Sn|yQiD@6%1EdlXfY<+AQ zqsRR2yS8H!rcO_ufaheFaV?y z#lIcWPe7uTi)ssMn$`hsDx+af61mb)FwZx|_BL>-EVcNpz}R|E$Z|7rX>QI4%VOJU z@6RBHAP8C@$dppwNAeBLsTsDXN$fzJfdT=i<{R+kyzR>i5%8LR$w4fh2;>8LDc%5W ztGE1~U?E77O$X-0peAc@c$o6S1+cXI=t|D(^*5=6Bmk8k=mx+81M|;g)IwCJiM{?} z29Q)PxcGK@aykIc7 zLrc!C=>YkRP-HiyqGBJoGk_0H*7%6G@2WHJ;DYzJ$vs{~*6%S@9Bx>4s{8~7+W^9W z!m|oMQL~-L{-&9^dBxK;0~wish~M9#vO3sViH(YiTj%-x^UF44o>kC98Dcz1m=8yD z$j55F4XGHBK^-LCjBc|dOn!Ys_dGtnFfg&OhAOScZidc)#u`c|N=rgwaHy@LqqDOz zK?eaJU^yhx*0q`Dz6k^yQR2(Rig^(b`neA)OI#OQiJRK3G_Jw=W;=mNo_c}7!S*tB zGrMlJ`^Jw>&D(1|24072&A{>kkrcvIQC?0X^wbt;;yWK^y^~)c)cPZB7&IdElbg48 znvFpQi^>*m3d^M*GX&he15`#1h6~Ub7qxpS+Y`Pj||B#c|z%gi;KGm zK^YQ8;FagOEfyow$CBZk56-Il56^gG7hs+10z1a_7CTz=Ypb7UF%fS%h!fT>6t_SZ zf;`G&5{xXKL5Pw9Pv^`m*@;bqye`&iyEMc^<|Y4~37^;DSP8fDy4`dmD98{-`FVMP zS%mSk8U57L-rnB$s!sJkFI?#dLH^5$8E>PhePzVoyZl8i%Ts^$ql{FlkuXL}Ese7K zkPi(O5Y{98OJwIU^l_YcR_u`;`6+G*ucvpGG?BQrX)M(&H|lWQ!($+g??&ygs;uJ> za4DWbE)_iSRvD)~XM{Zu3Z8s=nqdX}2MYxRY%0(J`HP!53k6HdID<2PSC03kC_8yI zow`)EYv&QMq3w_NEBYd~VE=QN{W}$Y#VLk0TR$OA^vOX+9un{17=LP*hsuLf8=pYk=yvwF`zDfT8{3i(8<-DpAg``*r zz?#^%HktAg30i=EMv(SaW1A4&p|fXdn;hxIPwB>@W8q(vn15xenn3 z%X03Sr*S~ZL~1o(jcLvJNh*;%qQQT~FpaPdB@(cl#_U2BJ~>WV2Pc^rAW|!9n|b=> zGt3TrmrKgXJ$|3kvxneyS%TqoPMXurl*^bCv%JILW)au{L{OBp^64BYcpqb!K`ifX zR+Xgv-~d!|Kq&4&LalduV!?YIllStuh<64^-c%w8%X6c1>b?PbTHwK`?U_o*iT!p* z2eG%KmaZAQ7A>29%r;bx>#Z;ut0^W$LZ^3oXu(@~3Zj;`Hp*R0ZQ33@5qqzikf5Rf zJyroPmh;NA*4TdH_)${%rz;BMDiQVbV^mV;cUa_)iR+j!#*W%hY=IQkcjdT<_vaRC z?8}>K*AN*V<5;MaBzfkyMnQbNaRDgH3|FQGZL<4kixdW zTnABrm86tLmo~!YJt2x+$D-$bI{-zFktpb?$r&`u8&qpY&mdy)WIUK%+W+hlwROv~ z2}Y~K2Sx=Mz{~$jZaeHj#pQtJ;o;%sjpBes0Fcm(7xdN0sHkt<-L5fC)% z>HyY%u*%IDmr?@c3*0}4jlXfra+`DtMF0ce-uBL>r>6%DC8yD8pFTZ>BnXA2Z#Z!N z&5e!SctC`}T9g0F`GJ8>LBV)M*z5RE6nO4|0aeduz$sGRa#{iq&Tc_q9Qpj!wP--vpN+`JiYA^@(5P?}p6P=G*u0;>|O;1k~}XHY1PkB<*~ zomR$=dgV9){I~)ljOq6;Xw_&0Tc9WKzplF1*x0D~EC(UPiGXE~E^nxwNtU5$qj0pRqyn;N}0#^I>W$ zgIe3!>B8{MkmZ0@u5i@DDBp9jy1<_G$6s61a3No7AQXL=-46v%PX#K5=*jkyhC~aL zJnFK4jEpv32MI46&)f&U148s5;g%(QGqP+EMKMY^AUruSAF&OzW8*Otpa!Jk58ov5)AOVkQ2^fW2dF3<5LTSS8M9())+Ls zgc^@nBFXD`k6o*@+d@I*+VXipxpJWXfg}NUq@{|ast8y4)L{qA`IDuhE{x<4mZ6wI zpjv?q1VXbwI+vV+V&`YiJNO%*d}*%WAmKp@G2Kf%$r^G?qLKRTDB>|CqB^wqmXR$80kv$_Y{m8Mguw3M`yojE~SaGKVnK>SP z0-GTlROP^EmVjf`$?DV~QIg_mgkDg<9&&`uMZHVyN$Nb{l>nM=;{jA7p1>c039B>l z*7uPSjHXpkP;dfV4iR-5*;lwE|Fb23mjSgt1aZki+Or_epi9bAkL{UepalqqjUV{Dmq0qcUcWhbcH{vkc9-{tzaEvqH*7JembVqX_8(YVfBY?%2cHKM zHK=J9NlC%Xlo=Nnhfl>96&stURd#RJ^%T+)KOX6uA-QUZ`pR(X`G!A30tA1wcs;ZKMKt@jPbbROnRH3IL-)4gn#2t{7{BRzA`GQ51 zm{X$!^iSfd^Cv&g>jOlVtr$Gb2;kb3%I_@F_dg560XdjhzTr?X<&#yo&wr&sTcD5> zH@+#c)i!Rx0Cp7bbv@U z-dpyS4EVW9MArrgeYng{Njr*@(i(AXu)Ko@Rwij%IAxu}V zW~HV+t11SeR#a3JN>tD%c=`B-vtA>G`M{Eay<({7@djyg@FhvqUd_pYXv`D9*cjJe zac%!;LP;46N8QnfRVkv(=Mypj_VM!@Cqqw436 zA7!)>Qqs%=L#I=6PRLN`NM^;aU0q(gt5Z}UNi>owc5hmZl!j2=xW>u30;vfOC4cYU z4}sqh_{NJK6E@oc-35fE`|2cCv)x~Is~av0JoC%2${|O=b|?5^0igGo$A9@Upv<6D z)2Z>Sx?weCSs*Tj?lTXPxvM3g%?=7@QBe_(D3_?{#COcL111?lS|I`ZAHjKfnA``@ zSm7!Wl7n94jLKoPxia8%9HkOZIu;%t4%xNhO+^11FJ5R@xkhtLR5-F; zzusMVxSdgXX}JKecP*JSe(**F|6()!Vh!3i9JJAjM}Ih{g~IWkS#Y3W5{$s_<+6$2 zV9JN$2vnNmy?MnXp{z2aQfi=~=njBTSEr2uPV_>IwSv7b9e3KQ;7!UaR$ODHIog$~ ztZ)SD}Lb6$4#yLKp@ z(sFl|a#c4heuBkT!xPKY>W&-RbBFw{GhLh|FUR|Sn^-9?(kWzYVc*}jfkI}1{x;;J zlHy_^FqMKeDL>z~)*CB<&q@|FbtujC^z>9!WBmR7BVPR{1BFhjt{!i14g*n3|cP zeW959_m-cEx>WN2=^xTDSO9(U(4joCWgaZSlis_`yg4qhNaZ?svpBU%bD(AedG+em zz;wb(+_QbhZu9h>1dr6jSsZ zRHgO+UBd*PnjamneZ&8gz{%+Bot~KinQ+0wFEB80ZEbDo3Lg^(*z2GybTSWBmpY-Y z!%F>*_0w3R7U4EFy3Fg)AEvsacQWiIrEvG|c>;pa;9zQ@r`J8`FO}F#h`2WFfey55 zp9`7w-WvhCwy3L#Hyz`-49_M!c%3leY5SwQ4vMSnIDzSlLoM5t)x{8Epgafi7_Mif zFS84H#T(F_@yBof@3VCG(^l}uv#RC@I4wb11H;)wp)qb%#zQa={&k*#VgtP%klBIp zE&9YRCHp=0SK;FPPt*LS;My-(`DhKVRM06|FPT*i=xFg1o>bNQ-_#`K84|bsnLkJ& zJxiN&a3*G^FfcgSj^yxcX>swV%1ZtlH_m_7$-RF!U$L+4)}_k_g0keL7lF;#p}R>Y zX*Iu+r!dNW>YPwvC?PTRP3}=ImQya;JWdO}z^BNYO0Mryi<1v8J^bm2OqvGk-m@HN z`r@N)dd*qltp8x%-Ph1dWGnDQkX*o^F#u3UfHv;JtM+*m_ghE45uYG3Bnn^hqa;^x z@{M$=pVhJWxu@QYMw&@Cy_VDibCD+ia04}QB@=CfYvht{H-dJY3aiiFxw|{2==^Ms zxIz;_a2SjNSZ>vy^WR0K;eC%CMk)kk!Z#bBt}2@J;k_P_nyqM;nxz8>K+`vrP{pf} zK-IcOq$g-@)I)7A4%8texB=379eaz6T`7kOhk;f%XPv;Qbvfz-2XGy~P| zacc9*yCGJYSO1OA+|u??(eLefa?RO<@ewjH7{|6%mw;bx{CwhS96+h zk6&4tA}5FTr4>M(?SoIK#w(c!a^}MO_bkrNJJ0}@Q&Q4jSUSVCq=(1M7WlA1HD{t_ zfH(Q%3m7CC8>Ju*i;C8OH5-~~;`QM=LF3ul-UbZ^n#;Pi?nz2E0Cl;vyu3U=FYVP= zFY~$>io~6>XV3N)Tm{i0qpZ02^VWctxVSjbRZzlt?5?I7hrGI1M)t%0+Y^vwAgT7v z{a5y?Q`KkD&X%ddVn}Qnvb&>$^Jz%=nUx5)**a)T- zuK44lg#NJ}xMz=7jOV<--~zD0!pto71XJEX3jCo)|6fv-oogdr3lqkQu>K2JqqwsiKg0AAU&m z4?fx5WBT9oEq0>-Hg7(el>x0uGZc`30+fOOks2*8`1O{2XpAXY$A}F=u6_8?hG{JJ zt@}YXYHb&(u+k*|jK{dFMCq6hnxBnr33}li_Mia`h|8S!;9IJYSG)$6kpi!N^Ba$M zCv#NKfIZo1wuLx!2BT@hjf;<`JkEx8yIm+1I!$L+vcqLEU&-9?4)b8y)ypwZkgDUX zn=UCbu6pc{FU#y%V=#BIqI2wz7iuKx zErKlJKv{j1FI*kykpl(Q-rk<2!B}sv!N4zQXd09d6Z0Nd0WGfpmQ=QC?mi$&9nfBK zNjj=Fh{CR&=?gqO@YfXeWpREh@xlF9{J*ZQbvd?p+ATi{K&#^F^kLk4EI<^Jo2K<4;(_6YnWeGDMim_}}Q9 zui=7Ti!oLV6LZ62pc~KdOK@tYD@5G}Yx-}6PRp7xRrTLh?#2kA3DnEhu7uPVYwCVDh(~REut@|UF7u&S%=9DG~oPr3n zG+NHX#@5l<$qoV;kmXCun&}J2a-Lr9DbS1xNNH%}1lo5ng3+;7HK^9G6d)}R!IwlHPHTwr7A$bn_TfKXci^yYkN zZq~AA^~dg__Ax5pq?wkC&=5KVX#5wY45ca?yjrMMGVlKl{V;I8g~339BTki}wHb~6pvTdgZT9sDc!kEw^X{nC?fIAV?FD^C+nYHb$7o!P$q95=}{cbG?9Yf%K*b zw$>#s1MDMqH%!@f>q!qY3-Zdx=9wh1uxI`R%2L)$_2d2NIMxD#$8rtb)Pbk|3#+`e~p<$d@d z%g@u9qMQZ3QVKG%G}UaN{xB8O`hiYe%Xo1?0T&uw*W3)=Q_Hc6Xyd-NuHzepPDSg_ z(d0`lL#9{f&f_5740na3qO@%T_G+SlEb+D-cU!5#Goh4Rd?B>SgB%^-RHfWZFWV3z=`1j;Wt6u2V1p!*CL z2M6O7q@hVnNLZYn?u8x5RzEt2P%ZLVw|OKL1~CT{cjz6O%kI z`;J0SL+6M11iD?z%3D!O(f1IfjVWSEt-20fcjVvecR+~cTl92o`UNh9$>Yc1fj2QS zYHVui*#v7W#v=-J0H*E!FTJmNg1JMURJ(iqQM3tc#OGXXJpjA#z&6^CI~dt#)$(&63L%d@m=28XC*mMvONKQ zZ?A;hf)5477^l`FtNQ`Rj5VQs8!r}UpDg(z_2K^tgdA2Bp1Uevyg6gLzNh9BHr<}Q z@Qx1go*}a2{Ax#ifG>|%gdje|Pj^-B*vic6dKqIPP(IzUI^!z0bs(1Zsg)+sBd4c) zPs>E=HRKuiNQMif&Paky&&kK-xki5Kl0sCK0Jx%+@Lcc>PT|$y;1Y0IMRE((kRi2S zq(*~^ZcM-7V<9{_kCXDW(CrcXV)FKTzMzcN3kViaj7lg6_Ha}W;Tj02h`0Jrl;y~j zT14hB*#Py6I!x3_AQl$*rdjzpwEFZ#=1Nl8$v8_l^7|2(`1e~B`$a8+^EMLtVt#oS z1t9dgg|wdrHKYt~OI|KfED)3*NyF>njK-!WIum~D{O=aHkopv=TV&RceZ>P%PndV$6uj9` z#8;{nv8260G(umuLGHI0OIwFY|AGP~%-}3*k%UzF(t{j-vxF$<-bN7eSKmY5crn~x zszB6Fi5~w@isDs&y(@3#viv}-4x~jE>MBe@ zJ)W0>z2~i7t%Ope8Cocy0Mg*$K5TPNKJ^kK&sJ z-14$87+K=u;~}&`1)u)m!^^pHx1Ep4$+{p_Tbq<_gIoqIS62To0QkD)PhP0BPG=sz z-jxwX0+o7Cv)9$F?J&j>Kl_HFq5FVG^of%Gvc8Cc!{vNdi_rCIaOVQ?eQBj z@$mrM=iw_Ie|>|PUL*wG0a)4<(3@&)5;G5W1o&|F`}Y^2umOXMx^a%nPf?hl;Y{cD z1WBb$-|57a-hJI2dtqvim*93wnARo!YcLbd3fD?x1;XnPfP!u(I}<^7@7k4}%yo0Y44iWlPfRQ8FtDX%Ifg zpmyrqWvCAipwr<%-~v2*L{)dlcq!bs|X z@OSF;X^$i1ibQ;DP=;%1WD~k&}I1*s-zj;1V zAbt0dRYlH&vj^O`o6s)}Pd*WG+qTrxOA)}oPMTq*?AJ`<&Hh@TJgq9!>t~B<`xE~S ze`Ui+QVTGlVS1f`@mAfRINmOU?$eXQjV`EwCW;;_LjOoahvxkGp&IC5h0I6(UBTjl z0~XEMwAMl_g@J|?42Eec(=K}kOy>XYAIa_cPyfj7!2zzSi_+zuuXJFHIK#7JKTCJB zu%Fu-zu;MK8$s#+nKDx9QX@M2cK7!ot!O=$jfbG0APq_xhK&i^{W&e5ZDJco_aqA^{*v z`T=x4$)Ui8OjJp!x{4qR^!Hhmt2^t!DL0e(Q&_||G?U^ZXEf^E zgf+RY>C2#ZVm?MkjxmmNxwH9JHrCcoj*jWLAT{w8bmra%h6tYg0)-l&c?9PON%nEHs7zPEN z$p9^}pp!Q;<%0d8H=fTMjq!&=OaFafT?PlA0i8ic>G;H($=@US)?kf$)f6V_TDy8 ze857Tu1mGsKE`R^oMs`Sg=}&~-0N&>{Z_gjUprG@oKM23dBR-%&LUQ}r|;fzSx5@j zH{GWff+sLVHP(qfI%iYO8|dV5by~8#X#F5~s#c)Ue;{9P5iP)r%SX`?k>f88Xhk-3 z)JfXAOU-A{f0|+SWe=fI=RPOTk)TvG4u7@J@|u@rUsUx=)8_ zPn@sz6`P}-*GJS09F^x1`i_Y2Nsh_KP3Hg4ia>?cy+2p}pXrCPeSL(`n)xiTelwP= z&oSOB&?|gPhuoQR*?s(#tFNN5$o}I>-Dbm0GysvLEelicvDsrJ29$Ogj%g7Jl z;rflYlzs@;wuTT1T@tKUG22~PS%Zatx@xQc8JIO1A zNWQo+gz7j+$D1bzqIYR3>_Q&%-Nj%n@kB-Tt&b%?fvQW1l9L#Y} zZGU}la}A5^EY11g2j+xRPeqH;qB-1+rP97_G_-;GzIt-kXx;z8O+Bvm-wGs!?XAwz zED>v1)Bu7VSOm23=*d&N=nRZvneHka+sdB9J|69v3y%D@i@bbRbZ#==K@oj(lA;bP zfsGL`q4JxZAjgw>pR1<s|t^YIs_QU+Xeg%G;IJw3C~aI9UN>ium91fD7^Lm zQT841ShxHCH)SR>dqqNaB{PwomC&$?LRNN}h0HQSLr7MNkd=_kC?iFgk&#(ONf~AQ z-}m#J^ZWmP=bYDn+|TRv98X8w_xF2!uj})9uTK}+-?t~c7bo>WHKL$oQboJS&CEP8 zKflp;?glL1W`@4(+i6Ga>5Z-?6a`z(UMJY2RWf1U>2`qY-YoH*amVZkCv#G5M7atC z)^ixgBF$x2=0zbds zP=%u@SNDlX`l1T=9MUdY8fs;S1NQIT+xg+c%>roX(f7(6IM6+q+~(p&SZ(OvB4yC8 z6k6-aa5+$jde7{1J$(l6%b=+bkp?}oTXhS5tZj&;nk^i;5S65@(EUv8^wlJu)W*&ZK>~Z=8ks(LUp90cCDs;qs3knHmnZ zunIJ$ZTx%$4V`JJ-F8U6(SWQ`qX|#)ch7zK_Lz#;)fj_+R5p9c=AMU$ho6a~T*`jA z9zmiEsOcjw;?XVzzY`7b28>^~vV^juf@N3Tl9DOQJwyJV8& z;^C(azF=dkw{a>`xc&aQoSRXqSC1Pdea}t0t1mS<{{qPH3%AjR+1c4wuUs-*H3m|4?%^JktRHjw7rES=k*CuZHysM?f0X#BLb*wXAxa8iL_2wMQB8M>S zZ}8Rcmt%6p-!S$X@LcCPlw;^=;J$yhTChr|5lfeDGM!DUk)1yFu`#rsg^4qc*6VPX z@zVIwpFIn7mmRp*jK|5#?+?nx5-i{TOsYqbfO`V}0SnKVXWRnG1DeZp z(Z-r|#a2bP8%}EyjD|Y5yygA)3cG`e2yng~{?1m2U_sNNlbx&_SQD@1Ma)O|w*uaX zHzIj$)QXex%@fPFja%N}I${?ob7rPJa40B&aB-NtlOwq9;M&)6uc%U zPo^pP%Rq+myYn6zzp<&=TJTM!kNmX=Uv*SlMw14jQXary-~oy>g!v_vQrB6!I?dmv9NKXQP}_lHox zz-rdB9rus!`utd5V4WjbRkPa=Y0cJ{^DCh9x(4_Y7;eF?_hG!qIagkCuIKfYbQSA5s_rH+A! zi@i1H-|_sFVu-g`Zr9GC+6+upF4nxriiFPw#f|g;*bOQ^oO{wgSWawglYC)#{P;!C zhpO7OSJr6?l5d<|D^Y@E@Bm;l=f)(8<(M$8}ZM|h*LSz;3+*+QafO?Y<=Rt$shJ;j9PdV(rr`NPtIaXgrxW{>n<^u z0=$%HX+JS|0s@@{sEMmqL|6t`H$?A$KoxEPlb!x7UMypd5|r){nw z5UQ6|%J!aJgm85sf@Ud@@^{42p24qHyG?_{*#fnrz!TKM$Alsq#S9XOLgU~nlq(}M zVUaQQO6zK>4#)wFRvYr}*H18MhyEb&6r?**IlCIzbTZxM`1NqH^nE`r+&^Tnv^{pL%%7Okz%J3R;m++dDtzC`zesVtJ z15tLp7FjRxQqr3FLY7b)g9&ZAhR7wh%&e^K;rs_d#y`Z>9deoZ&iV7 zRoSGVs0bzN0XUc-f`s22b=J*ItoX0+#-2o|WkiJS%uHe6dLG2#aO|S+u&ws?FJvjT zx8AmS!24Ii{`0*?JfiCqVWQ@b4W!demo$Y**QMR)nJHhG8@JdUE2*sV9`YsX5s^EH z7j#hwVj2(p2q{f@D}YCCQwqg!`AcB zSq6PeDzQOr6^DD4R#h9D2GZ8xa^@2d71gci2jqb5ijGCm534~*NeQI`{<`tZ_sAD! z(Q=y!QY}tLboEY39lMr$ygxI6!h7ZDr_4**D*OwNgKvwdwY&;eT+-MoX4hIH>1}u@ zhh}%IXqS>_)+d5qT%2yUt+VOx2q^w{g&zFXWei+!abW?Ub{+~-SbotoQS!kHI>j<} z`Zpp%4wUu-2suItIUIw_(w}d&<$ax~BxuOhK$;&r7T^AzhFjqlJ({zIb#o&lemrP? zejjO8abc+V1}Ty%6FF$u_l2I(YcS)#HYWJ9_gv1`!{=7GvVZ<@nEkZ+}`FXsIY+F0zd=Ade#3$XZJ=u2u zLHW#Ze4mr;bGpEvQu6|ghBn=ViBz*PxXDm z%&k#F27vVgq+5x^G|*9?rHGnoXc%fx^CR)5^q{B+R;*eESb4WnoK}JTspLINUKbM*- z2tZxj3&@r5^7B&&r1~%!5K>=+mgwuHzbJZ;jxlZT7QXTEz1D@gdzs&pQWBP^ zgF@_G)(61jYf-}G*?^agXI}L@}KI>&zddWk(7`SB<59DKY8-jS%#=Wp6(2xH?pvJBl)4H=T1_R zFUV)km{%zEAdW7`%6d6u7v>3LeAB1U!56-zzn;uho9Q*^HV72%7T;@m_`)C4hR2qndQ@>yirsL?5T{rcJs zO0}=A4`JKznv|87qvc!&O@XshhLxp1K4gpzMqpy9C!EamL`4tL3<B^=RcC9+0ym-^(>ez@1V7B@XYm?-)15CI)WO}hj-ei?ThnO0F^&aPUt zOLQ@s=Wytjm&@`!Kp8VUoHh{EzDgdF0dcOM_3IX+o0);rVwacbNdNQXv!4-$kePB`33Uwek2l=h)|FJXv6A5*Aa0OOSQwwT z4UCQKUjQ;Tp*(xq%*=6LQsdO(eqvZyL7lCAV?+PDM-nAd<4woIl{VzmDNthP5bCYaEJlAB2WnS%^AAuX^8iG6`$_`nxn_i=g#GMTJmJ_;Y-Z zENs+%(_$doJ9g>!KeYh0jg+w+-_Hr5L{qv-Q+ATztlC57erue5QpEfOhYZJ|PA9@E zUZd-kzqPG@^!OzhbB@uaW=}0ds7kQ7bP+`KW|Tgrb9O+=l+q zS=vs$%etN~Uo9{x{VsR0U8En`;M0&c<}KdFI-p`rYbS3)OW68tu8Y0Qh1Ds)M&l*9 zK;4QO$1#s2tt2Z$M$_0=Z>eYBAOG#`dzrPLQTm9`4Jv}UwCV7BU)Ik4xDFOw^{z)?edJy;y!u#ek&-;a1R z?U4HK%A1SIh&aLm7L6ZSQ9UjHwR30lNb0*Yb_PwK04*tO`V}{;1tWpUm@=HG$d;qc3G1 zcNw$L@d&R4e0l2d0nx6+od(PXFbMc0x<6I1ga@b8Ez^B!uW#(Uz(HM_ImszXXmxvb zsPn@580Bszt<}qWo+&aPi9*2Z7fGx^Y+IW%XEyhxv_lTz?Bs-#;Vc^OX6vsAK!;-} zFE0-o87E1B*xEGB#RyH`9;&u?$9`{Q^wWlg99EwA#{c8p!)?nII6@*LDYH7ET<=3r z{FN5`-Q}6ycgX9Yf8XDavrgWs4g%vKL_5BErRh?Pg6iEp`Tc08-@S8%W)2kR;2KZ{ zC=|)a$&qM76kZJXqV`52%@=UUz==25$;qat^6Th7o!Hhxb|&o$!7r$aLWQbEKR@F4 zof{90vn3T2e#7vepO;6>OGgGMvU-R7W(uGcUMNGN92Fn2q(#8B8wfHmz$ttr)YfZ! z8e-S4H(08>c(Yi`AwLTwC%ScHL+R*HD=W*&F2k*7A?boj3-l%yh6J`QCnx*{rSdn( zzyVNs0*8~Ssl&b`xU*ii55q8sBN^GL2)#isV$BeZjUQD8!f-kZ;wLhgX`dFGQV|G; z6B!3p(v}|zcrg(7dm{0QydB*9ZxCiL!@5HmSSIf{i*AAkiwsT;;%7Ot{`DlVn0IgA z-r37Ca>%FV)+L=wR{3d?1-ZHK+V2j9Y)MjBcpBZ&;2N5RAMfG;v#((LLjop05i30O zvqP=tMmUB`oQH%G5}aM3>RMVv4HN)>zEEP63GX3`iVvui^I`IXv%)225AR|0EgwIx z8l4|qHPV_s_?xh#Ey{tuO_M{HD@NnB>08ZOq>|wM4~9ZI9>SUX_^|?31I}{95J8|? z@i5)kzjxKzgExLkH+z|GPi6R{?Af!01mPZy9lU|fn@&<}=o(fHe?b_Kn|tld8-(&^ zWrffuBqo+Sc0Wei1>%zFi3v(bI-=s!%qm8WG%%1t52=>3G)I{K!uRinlcPqaM$GGK z=QA*{gc_ZsW-@zVR<|A&5jmQ1BsmLW`7+1?E&U#(VrPSJXn7y| zI&Kl%)x;xxG2lKq(JxE1oR8{QRCb_3V(lwARhwpSyje z0^ypFY`&g63g;cHT!?h(!`^iIK4_*x{;AcStSI}% z6EXj}6+IeGAM)qgFFJ9N&k~AukBeAE^zUm7e$udj$qvuMy-!yh9ls*o;{2PtNa9lj zp$=`n%LZCQi2c_~)7G6agaPqnQ;En|y#=y`w#Ya}cYF$e+T*{oA&>QAm0ZSXP9@!pshKO=^J01qj^}}! z8)#A?Mr;2pG@p_Nuk;$1%Wt4oI^py|1{MAU9o7Bmy413gl}t)sNfi30c^w~8?(Mzd z@k0NNMORmLlGt`HB&|I{2xTwkw(Jd%Ma8rBRJK{)b5Ua?fXa&LI=Ob%iTx_KXMfLE zeWPXH*Jv*t%W>c<;013-%d+Iek{uU9#2eCPtB=N}pmLi=JBd#Q5Dsm!&9{r!WS02{ zW3{_h!qCv-DUb|YZG#e1k@3n~6kC-^fh#Y0#dl%@^U==^_AB4^h_qT7Xx?b zTfV5fPGktB{xvt?jTsCLNrT@%qVo$)^Mp!*I5j4}p?*txoShlRh@i&=ef5DY;0u&( zp&XQ!MwbdF;p_fxW~b90LVo>cxX*s87Eu3r*&{R#xHw8Dtm7fv6=GG3p$^~2F7ynP zuNH>zy}+gfg*4J&p4zl9QoMAPNr8wDx$1aT0x*nHQeg8D>f082r-)IKD%fVXqocd` z?>;ddIevaFJubob9sF^dYg5Xr-^e^YQRaofIDGXg(FF$m5)!;nC4jqnl$oiTy>rWS zeCaPLre8PAZs~EEF72*l{|=()!gzB$W+GtQR>tXHTZU~CSo1I}$8YZL#Z$c&5S5yI zp^P5*!4*tNfp0rBG}L+MMN-=E;2;uTSx^-+GA7))(}S%}CI!6$T7*inA&^ooFbqnG zi0B-YdVw>Qc;@y;E^>M=7f+VEW_g~?^Z&d;KDUk*aB6JK81?mjLc;`c5(|@R&lAg; zu6bv26**%$)ZZ6WBh%8-LOR!=+o1eQ81WZ^0BKW6`JG+C-}X^&RtDyb&WHv?_dVY= zO1WLD{UXQP_sK%}k528y_JJDcw44w>|L2CVZRjlkv7)&>RpB;@QiUi;Cz6h&R4R0n zJWPbiiuW{kWnG7GI1pP!aK8Z%m#?j%{hJ&g@9OS`!8H59gGlFkv1wTkPs9iN0G9_H z_~OM2T=4I@E(j0Cp$?A;UdqaWh@}jTLxY8%M6dez@nej!K=Y?d+0x#Qo#FAWV1)dl z-Q+0WL$$;H^p8E~7N;w+uJ{ns12xAGd;C0L%>$Ct&^QQCeYfmvY)Y=pAIyeug+UcR z3a{0`3kgEsX)l={Pu$sIp!+U9Rk- znFeWx2Lfr;;?GQtjbG}61w<~zm71orGE-U_8rXQ_ z$aYT5bLmM$9S43Lh<%bl^^Sz)SM3#8u)dCt?uE8#ei-@UzJ7il@A!{wd*QR=ja_Ms z;(fCTAKG*A&(2MWH5t8pPLuNMSh9epl)KmcaD^L(f|TB04=;7fAY8ZF?QY>pZt=or zp+zW-hK7ci@ua*u&UB@e#oyhrro7zS*qBMSeLG#LJQg@U%`lt@2M=Q15~*E8s^MPk zPiaT%3os>X#Xx?r<4|w^?9-fv`t(by#vUFeV2XjCw!M3Ly11w)j-HW$Vf62H`hi~) zfoN-;JLooX5K!_JRxfcZ6A>g$O4Dk;vhg}^cCvxt@(zKSEoz)6T^=qLzZ_HW$=`AJ z+_9yCqXIsRbU{U{X^RYedbuKS>X! zt=rU`0!cL4uN$?+@G8FyUPd)piyN_UY^~H9ViL+V4d-pC3xYk~9 za@rN@Uf)#?#znb6X zc4EpJEFF_?Q*|j3-u)Is%)&zGnkr9M41P-Y7+3!m`$`3$nz$Dex$quk~X zx&wbmqp_EWetY7JR1IXng3ZM3xCjz@r2){wY2=k*BtuyB41gTsn3?tEVegY8?U}OD zmhrc5H(l_>Hrm5VBZnOl2P}F8eE!eMElv9I*%9BHzsCGE^*)twV8_bj*VHV93)wyUj1;A93WyZQ9PA+Q;uW$=Zo{)!U|=aeAkq!B$i)w8$$ zTqTv=>^^x0GBMdrS1440ch$m9c<6HIH^~c_Ag8ZCF?0sxT&uE^9{>eFHK;LO0zc~M zxir%=hXU!@P}QzY1>F|oqlu3WQmFBlaLFLTU@+RtxBAHv=Py9{c7XWcQov|kiccG-zN^Tu%XxW7&3JbY%e$@A=f8?9 za8Fr%o|f8P^`a*)uZvK0#H@W})=VKkkn8o?&(o#H#;I+MqTlJVJW`%XEx9{$*DAGW zQ(oBRD=#tp1x&O>+6o}~8At#isB_*gjU9Rxg(6T#fsKiZ6@!YgwuAeQcgSyTFS_z394ZAXf^g^n z<<2An4vi;<*@wf<9e%V~6)5~|v>&n)C?&DJD+95qD=H{JZYgRJUgai-_7twLnxF4= z3t#-WzDqgSSURwtCRMGVA%=|5{*rqOy^ud`X8_)8t89*2=A)#wF*?wM;7Vb@7n`J@ zpg`T)4$q2>H?;-FgVnEDDGd+14R&l_0A6_Bdd)iVIae+n!JvLcjeW1eMu%VNeBZt* zlYp0LyZqJTy_hs!^D3+)(HcezO5V-5(knA~iqLUzP>FEdz6AHJjKf5(^Ml^T#wglD zDn2}TsGVy0U2{o3zs+oLHRUYh3-sM`V4M97sgt`Rwoqp1QkAH%#z*AMS>3FvaXl<< zsnL^6o_X~64wHbIuFK~SaPWM8_*GbxP6#fc@9S#a4r1=oEZpaFIWHeP^H>2LT2(HK za8afT$%q6+(Vrg@pL!T{PLFx5-Bf?tIdt2qy)p8(*w$NWn(vvVuMg^!Zx?ZE4E!D8 zC(M)V;9PmNa66Upud74zIbKO8KPA%kRGrv}*`pxeO7MF%=s+obB+XKtLnrMC<30IL zk?S{H9GJ`Gjf#WzE;H|3_oAJLp*slADwCfwUuinRz$2Rd8;PG3(&b zEKJeON+V~kP}O>un3=@iXvlhu?qX8=n96i7fwq1c}I;@s}Am& z$pPX$>)-rE_si2ib&^uV!sKqw_T0y9(8p^l`(M@|5WKT$!@an&zOflIa%lW`e<_4| z+}3x~a50-SRJd^a0gFV3G0y4vKlapFRHT5?x<74saW0zbmh5$l!`2}>AEP1rf6!Fkk+E-0guKf6igxSi7qiuR?1Xe(nu~ibZp7}rbZkJ_BP^WLWJFLI zMb#W@ce+WRn)QX8?&o^vl+?C03|y0t8Dn!KBMp*H78?<(ITvl_NOq$^GhQ@DX)rcz z+F@HIOa9v3Id9WxhkHrm^t`*?f5f@ACl#}{I!elnJEn-UBe%YISKd}GTJ%2Bo7 zQ5pL~ji#$)VZ-Pa_2ta*3a4K=4hK#zRoqSWO3)$rJq#A@6pQ^?+JCZZfW4LBmNY)r zs&)2$le&c>`QFJO{_yYRDpba+@0?}wIL*3mC`M_uGX(6{Uh=8@qTNzrdZ;G)#4dsg z=V*(iB@5r)&6Mkh6Y3QDem;=eZsNM5QZ;Y8h++8L2H&bfu2oJ=(UX$nG&E5r`P<9P z5*I8D+8 zs+Q?|r7ddRsXa*^#x=0Mkv@}%3qYG7vthXE#FXzTz>i~yQ_d~jE|W&@-28m-casS} zGL5-ug7s{lg3A0$!>dwV>8hToc4*G!?PT4C?btPrue9rQhtqW1dJzL-{z>sTTU+zV z5>rt2@Am}zix(bPP*K9h{!9k;Qi{SB_21?E{^MAplhNKq1Sf%c?e3%hhjcU6}-mp17Qkd6N zA@4v3^ZET5O+Q>(U%H4y&-djg2p#bMlTDGT&#IudJNE5Zp6A-(V^bR$MH-P&6t_G- zknLxg*{9&1s-~(9e+axP6 z5r8O!tCe3c70C9SRfxGH=Yap+o*}hLmbL+ZTh`6O>N&Zp7wutGd?;ejUm~RrFMNUj zTsw*g16qU;(a=~RTinShuRZ2_qBH1ophQp|PmPZ9m6(Fq12_daG6Wzf(_v6s0;I_$ z=eBcM+Y@fML@HVVunmql)OU~DQUy{tJi-o!0NlkNR zUwCLl1l%@@As#sDDpv8U(Hu)+MxU!|EpmR9hi&#*!sC+yMAKNPL#KFIs-QZ;BlYr1 z&(NiDvnxPXkPQSA1YS^AA^&bX5T#-ln19T}+yn%U^0(!zMyXYotgO)AKq;8b&nZe* z)>cqOz_f5PApz1|qG$(F`=^j5fM}ZHbs#qkGPBf?O?`Zdo!jaYxm3j~?qCzfzM=p5X-CzI`1u zdrC?Qs&(R!yeq4K#PyT*!QX_JBOIDP$WPs+?8)u4>Ql4F_D^t|O!Xw!PyU#1t6RFBu#33?7$6YR3nygQi5hx%R&W(G(JH#TPVNsuR8lT&ybU+n1;Q_^I}v@r~Lg9r-u zmp*#&A^-_$lGQ)o6N48hk+*yFME)sv5NUtrt6cFvMEyzm!DZ)B5d3JomVr?oS|0S= zM}8_`_WCXdVQVe^=>JacB{|)RPdk*ZO9}9FWWMmD`OxXl0|T+8ZRYARXL|V$BGqf~ z>pz&kn(8?=n(()84Gyhq$T;oqli4hLSj@YY;vK1oSe3=2YTouj?| z&QKpezh~v;On?6R-0n`a;c*)u)js)q=MT$2*j)KaA82^%N=?^kX0?|}er{U>TfLUz z7V@SGBwyEe&mafM3B5ir zK>S(l`kd>hE_p95Z|*jIx?TN$KbuQ;mY0?$hsf0_iGzm|2i!0D`U9zJi+J?(X_w8u z1?7cB5RUx!TyIES(2&_SLi^FD%kUB#cO89{Q?kz4YENxp9uu;yU|1V;j>Q}DIi~Y_ zhK(B>_+aybf2S*QaQ;VwwYZ3n{~S#oATt1sfQmvxw=PT&J?Mb&Aq9qw2<+g4HsVg9 z1UUxCn8BBI_&2^@I~pK|QG3AL$4p8homqky{c<$sj#J|FfXD?YW`GS)r%oo8pFCU{ z@_k|d`N{)UB1YL!Ig2vP!iQD)5~ClZlI2o%gym_AWBa6boSk3L)p}y(@JA#udpVSXtSgO5;kYlLf&`6^AxDa=Xj4?ouY`J`MfAYhD(y&s0N~J)vMMk4Yq2 z`KjpvO8-3d{;I_hg{Ynv2gHxtBv3i2Scmrxg%~xkE$l6`KPTUuX-7YtI0=6ORTjx% z|GUCXeusQbRg5BMN;Jk5V#;!#AM6ad{(R31c0MlKY7Vz{vx|Rm84QhxuSTIlr`ycA6wAdT%+;!%_}GGOy+TshB`CqmY&iHue$`clFI<;a19E-BTt0E16|Ll=t8H4$ocD38yG@Z+ciGKQMe}{h zP0@FWW^Zm4d0*;}E=hQoSfab+cl0K6F6a$~zbM}hg{_WcL8`M4uQge!bKlp>E}l2| z5J+C;t!1(rM+h3~FzlWl*W}-R!>f2mI6*Y8D2298qWgD5`rZZ!er2t0`r^jP3NpXn z*A1!#F!G&&rp!b9zFSXMGorXjwFid~`8%*k_a+4665(q)3J5w1WSZ z!P^0hWo>P*e$0RTb@*DdGeIFk=k=S<+~?M~=6*5GRcp~(synEk?iitz&QZ~L{EIIB z(*YWa-iu4&HIZ+QiAhlxRhYTnAot)fvrISF8xMGKPOl>Rhby^}hxh!>%BY-(w;3|6 zu_uVco^+g`9>smT9;%s|4L&A{_-iVpK`**QR(k%F`FvBzD)6O9)BEznC^f}C{g}D{ zYS4Cr^xdss#8t`%-R%xk#mpYQO@CQJVCl)AXh17O;m~7pK_!b9l1u=!K+)yq=7!n8 zwR-pxvIFQQel-k|l80EsbqKUdGs_*lD`bP}-)|*X3pif5VEN416rpbR_S0YvooikL znQIf!H|#iYQ%ofiNoOOYgxcELUkz*fAgOx;w^Cjn_FzoV;=9VAzx25b`Y05#Q+Q2G zq{?A3jp2kPkBVj2QkC;kq4QFsiM6D_G>gcqdWp=#vZ-q6sRzBe9x#TJe29I-S@t}d zGHrpV_wy_O3byp+10)&KP)Y!XBMuWVG&ICjw6)Fl50SXmo@tXxnJiz!=#?bL?_~O@ z!1zysuAZJAQek8-gWtv0l+sSr2V)0$_DrUtnK&c5-4zm0k$eWaT*U3mWEDezE0$%turfS|7kCNhIi{@#Opx22Cx#J%~1 z?_60g$Tt@J@heW(99Fl1P*&2_Oo8awUkShWxXWZY?{}Xqm~l2cVxFgq`p((W@$=>{ zP%YEbJ3|{^zkZjd90(CEu(Kgzf=z0wcuCRdqg_W=0=H-GGWwtA_ssF@&h7ox1LmhS zT@-`~v7Rza)QO3rTk`z39eeUUWpll+Hgx;PgO&;v#^;wc_WQ?>e3D`O$#g~#yK32# zu7l5K(ykxBX9^*Pf?Cn*(j~JsuOH(_RFMsIQ29JY*oDTSrqFAle}2QDq&li|$LZHb zmOef{m{S&bG3MK0NL|j`+A^r3$vE-EVoyzx(q}4YgP%aaqmKo)un1BZBHq{fXOI@z zmab5BFo@Kvo2JM+_5QER!KGpnyUq1xzFP5lzNh(yImGflHK^u3)&7-}374$hf-$m# z`2>hrS}*m19vb(=iL`#^PkLsq(a=XFSy$@x{~`U!si^21Dnt>D_);Xx;f2b4ZHbl6FKTV+x2$La$lTMPJZv`)8c9Vuce-pPd(3UCsXLM zeaI*gckdpe!`4C3Q=f-wfl(Ei5w8jxs_#t#jr2TIw7b;wR^w(_`5p4Us6@Bekm5Uc zzJrONZe-rh)Q^-uov)JyGBH-=RZS@Wat?gMPXPTNdWX9I>A2xX2>=lS+TMt>m>GWx+0tV^HMy zs@c6IohDV?*iz1ww5+d+W4rW+fhH-|Z@kxC4YfZBTvRf7JEYvWn4KIPLb}iBp&h+% zjY$J6LZ=#!HBISWjMB}6Eqi0mHE`@hJO2b|9wOJ0de9g8YY90yAHb=YyLT4IZ5MUv zRyjpO?e!^L5^0qs$cV%T#%mI0VlA@*bIj$0(-m0%L@Ygz;u4>@;J=Wt{vdoxSAL1V z`B6945{?MFYBxB~0@r&3F(C-%4tqqw5a~$b#r*QcceH^l{CQ-ua*deL=Bg3LeMXY> zJMFSMRP2MAs#Z1;k6Z}SH?G+QRf`;}AgmWpCuoSxrh4t5VTeO=9P<9CY6IH-eEN39 z8?+1}f%Q@qx_f)QVfF?NkJAA?(aA&i5=PaJ?thU+KK`KmrQ>3#NL%)aN0-DUm%~`T zJ?iA++EJvXIuv&&^`2$hg2Lc)9y`%IMV>3YGv6itr12|WymTgijQ_N;F)?@mqy-O zE{`Kr3J5_dBh6;?f{{h_8P%aBzQ3nFGLeUPCT2dpB}M?AaWTE=qLNb9nl_1NT$cSq zGgM%7&N5=5+i3;Ov@J`n2;FbDy>(5H!;@f^trNCqjz7`PeoR0^=z)l#ZrKK(PS}aj zX0uWQrYNCAw#%U{lTHiytT)|Ac*GeA?dNiW7A@5qOo}WY{JwTX`AET0KT$*HdczZZmYRW;L&mKb#op2m)V&BKcfTQ3ps6^+&j z(wSKPjNHPUfLe{TRjssQ@=s@{ZLEpeqGZ2 zeACAX+jCuC2${{+djpGVN~}ED$<(%5alYcc`o_(spmMB4RBew~hIPH+tj&XWr$SsDMr?w;qe-;G zEwuIzVnOc;Fi(ouPeQ1O{3x+|XfHV}qbu`BBw;sJkWKh5r^8brf3nM_axRcPXa4Ax zE}HzN55Fw>6qS&6);lI8QMXO{y1DNcY%i*j5%d1I+uHEL2*r3 zv*8tj611Sii$oS+ZmNo>_S(6d<0ld)jmduoU2~51j$XK>d|GduK2A?a$WPNZhnqkU zpYez$QI?2zU@0zIzF>>XLEx~FN$LKYJ^hT_xR{|ZW-&{tK|q6-${yO*o%^{=omQTe zlur3PdtcA()=MgVBjYC7(B6kG+A25W3hg0pB|NS#zg^GcMN5+3*`fBKCCKs9iNlZ8 zEs|2d@(o|L`1?|clGGG9d){c)C2+hlBzdj)%H&%931c?7j}lt>FR|vXk6;=N{~Vvu z1Lf&EW?3UEmSf!KS|cbdjeEZ=xkwRZw8ZyfI=wc~+N?}Se%;v8FvRqWC4^M?hDIXS zYR^>zo_w;Vor-@gFt#99QK#Fb<@D}eD(||!Mvrozs~{mu7X`Jp?`3>hxT~nHd02)V zX;7)OVI9_JdFFnTovin!9m4TAyM(qqIe2f&toy;6JGUR6$`L2UJylHKB&4%ok2`6~ zr8BcDZ}D8g{GVYHp}Faq{RIRsjhm0iiFfR9bA&WLPFzI$ekGSw_rGqvs)nWo1ZJvghx9kE3ppJYn;M=%Rj(BxjQAF+FnlcuLV=n<-B7Jg=h_&P^? zLy^^|M9q(+-|dBI71#~+U#=T*FY$UMR0UmG+2Cs?*Ees$H#I$@A>6XcVGr_-b~sS- zw3zKm=A2FMrP)&vxb!=)l1N&%S=|uf-XDe4>-n@-_o?qOMoV=9H_^4?b0J=k>c*bf zLeV$Mw$(xP7X-FyURqKmB_!9Y?@wmX;c)@YdMSdn4Mj!f&ibjUw)7}Q-EjGxT&=Q~uBvreMI zLi1_%tH+M=X{QDYem*I$v%eq61Aa%)j~m@B3`sdp6h)1I!9b4-+y9LlM^Pw3>2|?| z598G;Do$mLkgnZwTvvM-r4(eN(6SUDon5BU=h#RH+WOp)e1!X)9}~>3=H@AJaVnHY zw6!Y$k7Cft=FGpzGRrH&5ZrTvQ`X?6?bf85c8`@7!#TH=sO(c{)~B*9`Xxe2;pADs zb0Edx>_VQHyVm6S9kg6-0^?g9l3w?hg|=1snsbEn=qb}i z(rzU}MTiL91Jgn6E>337g^zN|q(zh%CS=H)bwkQDt#X1|+58KEz$n#4Gjc{vpN*r`&&c5zx@3ld?bYGW5p4#lV^A<|jd+(Sn5a5(8Dps5iaRBc6hXQf`Rv{OhUKVanY;SG~3S z@=*`7nUcV$k6Z+k48Mt80iV_U3lJ6&&NY`UMGz%cW%9A-eWG?3{BhQa{rK8xhM;n3 zR%T16 zsz<|iC0#6Ypf}uW#qS&*_xN|3mUWzprZg*4N0U>9^c(%7&-(n8Qy}$4N(4lAr zk`;pDBvS3r1KjnmSL?0%{*_*>&CcEp0jl(0FD0ew?Cdbgp*MDWH-^q`Kr#0l(H;v` zIr;gR*8L6qOW2>e|2ud`Df`;bH~zbM#7)M1%JW}1s6%Y*7H_%XDZ{gdK=9wItL=C9 zW}&f2L4MqG5UDWVb#!zfZT)-OK*~`{;?sHVhLR@bUN#z{bH2AQEh2(~p?wny)R^e# zpFe(ZVD1tGwTKgGN9LDDQ^tb_q2_O%%SG*w_5VYf&K|~Gqhxu)kGwpdo(+@!qLsU% z{*Bqi%x6QGG(ha(m6k@#nfmcfRH28W`RoQ1l^B`S1d?XH7r|CAIkmrgceJ7vdKv(h zh)S`<4A-8XXOJqxje(DamMQIw-R8zRP!YiQGKs8IwbgD{$qoCc?_oQKwh;Tih^Q!( zz6eCFAd-NXH|T+3@5s!;4!SZi*N(gqpjfj2@hSr$s{(xW?)I+uwLqjOL7k0$5~CNJ zUcV+vJ=4I>K@SF>poGyqjq54RHH2Fkc^}PN+H5{n2xxiTQS*vyU{YF(Fh02+EJMqq zuBxrCzg-p+ATVF0@=J4bGiV=|=NMBVB69uA(AkSAi;&lYbi5#w0uq~xg{24IT0D$C zGa-S|zXA0llsb?zZCn;EO2(>@gL)>QC%-1PX}> zykifsV~2)C(j`={!t+6=^pjpzpR&^Lmq@K?{2Hu-!=4`oV-CHJDy{x(=X!?PO8O{i z)xCC)=1hJ8q1eznk3=X`?>sz^y2GGFqLW=Db&T%1BRb{rx-Ap(cca78YW9b0_S#F! zNJ|63VNy*_Nm*ZC2M4Wwr~GapS~~RQ;CY};bOm;dkr*t0=8&&kei-10mX;O;kp#N} z{WOH)R{#dx2diOf`zFigN71)3Q7P4z_o%$|WxSVp%E0ZCR>h;v-BeoocOOfq zRU8QTd29x*R@I1wi03`HRIrb`eLxD0;LU3(mx`|3gV!8WiC}{9#3&_VeZil{wA{4!Eov4IQ`9|5i617$N#3%2G=ld9N4hL zNm(`;3^M+NwT!@r4$QrtA%i6LUnx454QxPo-=+9ooZ~UDh_y}3xl7%v=p5G=GV>{lsX@=H8Lb=RCObl)GN zu*qG*v45>|zwT|#b@d_k#Wb6)kfb3W3Gg>w_=2dT{ij}$!Q`||IrM?h&<0@rd1j^L zgWw{jf@4}O7*LoKpn4n)RB#&!f*%n|OK#Jl(<|Z)d+w9|elr)XIT&vx%2ft~1s#WU z4z?pa41f~Gy)l8|9yof;s)Dl#D=zBY=3?6BL8Oyg?+C4Oli4(1b1IZexxg$NU-HEz zdh&=BYn7>i?sgd}hEA3I!&_q|!~OymosE^)@e}Ufz1uvTn||jjRW{3Zt$R&5ka5#s z`j5D{{#;PbwWci;^D1*IYC3(m20!@X$wbHKZUZYI^~9NI(cRx)3I79V0qCI3=ZXkL z^=H3hj4B!30}_I8dWxCit6z*g5H4fjGS{JiyMLa_21P&hJAQm6lOy$mx#$`Hr2dQA zObkoZkz=lNJ%skxCW{O^9(6NS%jBRJ=qWz+AdL6b&du1Xq8~yvUdfpNu$f zOJ@PMCA?IpFmAnk_%W$H&tpUX#=^iaUs*DreR)ASE%RBg?q=|F;AZU6o@B~TUb#wbAM0I_H}ClNwR+HA3NpP$OV>mqR&-qI;#H!7 zw?tp{3$zYZ?qlKD@drpJSf+V(vPL66&>vvu{dxAO72{93Ikk|xjLx?5Kk39zSW7${ z*~xW=I(JxE{aIXCsEv)+G-=qnn&1y9rr=xQ;W;g*lG()XZe3UV)1ka0;v27-=hrW$ zf@d4mHan6hcTv8nU#7cwUGaMZ9~XEmNPI!Hj2$v|{|^V@8E0_s5U}GRM^&zfQvd;2 z@@^w4e|rJ32diqsSZrrE8AP&$l#~><6pT2f;zMKzq$PuzaJ2pJj>W~n;mFaW!2j{m zKYG;F!J(L4YN#oS73vA_3xi1p>D)d2CE&s3&(Od^2*!s2!=wO0K`p0UEPfx zv(w`GMD$rgv)}k#oY|c#a<|c-|LI97s#d{@wEM4lGg(Op*N+thMkWU2T-EiAYJAt; zj!}*Q=g$lA*z*%i4GM=#tF^v^%ZnptGU_{a9cb1TNyRzWF}<2jTm z*RslYAO__C?_qbpn!%TZ{Y82Who7FhPw(;T>+uxbA;j5=iAA58g=}Q%VdWn>P-knk znuv9m!RSjkQ{DE%>of#zu(Uq_2NX&e5N|-&G2~;~k zqv;b;Qv7j5pitG0-7_86?tFw2Pui4bE0$`3MWs=Iwg2F6=ji8TH~S=0dWtpsx7v;b zOJgGg;<2XwFYRTjnfHI>+~Blny3UMVeRn7T17dU9u-D+<&*i0Oyeuqt@P^+vQKtF-g=`aoxAG2RuleUQ|_LIdSt=$~Jxi-7JE_tbvpCj|v_Jp}{3X9C*` zFe1#{>j-%afuy@70U{!mmb)Y(i80c%-K=ys99a^9mfB5*Xwz(OD|B64J+GlCUhPK z=`fXyOZvsP7nd7xd3{%*)O%n~OMbib=88g4iFrxt*N5JdVs zx9psSBuNh+-4=p&_qaccsh#W0L?n!WW9 z5i!QMv89i4>ZHpy3^DlWsBofVBE2VhJoupA7Mm*s%?xTX_h!D2;X;&u_xNA!=r#S%#oA<7&=gOnsgp;9Dc#v&9M z%G4xfN`*?wJXXd+DpN8<$XF!vJpArgd*1td&biLHuCxB=+PhfmUGMw(e4gjIpZmU_ zuT|!?(to~ZVf^>+mKolpswFqdA3BqjnLmig3dr}q-TOd;(d)0Dv3UQ2MTq-i=1a{+ zA`N)4(qM7?_0C<|)=9Q5?pk}7z{sn(y$ij}C#af#U_3i2yCY~ff4tU|Ag-AJ z`u8_cEiZ6l0=?^@voh(XH(d>hwESMvoazy25|(J+TSSgKkW^b#-9w=!6z%#IQFE~^ zcB9-8Qc#Z+?Bs~O@#vFX%P7nLNnQ*8XkZLTGywb^O*Y+e@}%H0_v1S{H{TzVXf6Ru zLLRCk1g{tq@FQu~-*^nV0eEcggw@9EkVgBJ#>PfM*6i;e4yz5QHG!#Y$8cs)F3{@x z4n@dUdm$frbkXp;V2O~AOGjt~a&0+YLjP<(6!8Ez{dhgXjU>QweVh;tz%}^FmGi^p zLa28y7bdex6GFuG>z9T^Sh7FaPxj-vZ+-WL($iH04zGHV?#Fg~9=rm;byl3|PXh~A zLrv`+T!bn;=N&MDiP8zX_O{zZYR^DHCC-0li0LPC0gSN(@fM^KMIZ~1X!*CPUIS}k zJbFz=);q!+$+}4whE0{Tpw%u6g?Qc!3wvgmu~AOW4Zm6R0|nVMoOz~-dFS`*=t!Uq zil7YUPyY1xL_}loMK`_f*hhb7n?0@|W?@!{@(@0681e&%qi^$B$($v>uveU*1eY8f zAy84#;~ze%7E#ze`*UI5_W1FyVDM#HHGZwKr%qDE3Rb4Vm*!hGoa}*_Fc~jjUO-}q z8hm181j2bxIpCfEK^785RMt=!VTee!{W=+$`p=(daA(;KuD14rmIxohCPX>H?Z77# zHz1em14xD^VOW4tb~X|gXh_7*pF4+XP*=%kzkd{NA4Z}ETB9fAXxW~JlML&SMZt7o z6|*mFWQVdjRA4EjOuyZ2T!bP3GgPDKh)GZfrj}&dwthk30Yn5!T5#ea`@*-xuo-{a z2=Z{FdCVM4`K@eiVdBJF-?}tt2R5k12O@)Q&Jb5A1Mlax7L5FNYG^pr-WD@Kx_1hW z`+&}%pCzd|wJs}g=jfCtDB$?Yswyg&B7nE}($w@lT0fU_^F$R; z;T@+o4Bk@!V#Go*1JSE_By^p6AfJz}t}YCde+&NbmJ#rA2fhNIJv{l5v9T4zeM~-9 zoE?B=!me^esT317)h2(!ZU%Gs}A8obGX z(kQY7k|G36tXCID$Ik9<_&DJOXM1NyM~osTxVLzEKA|wkRw;vWF^A23#J0c$gTgDYG8H6J^ z_$BTzH3d_Djf@RX>xkpVhRD{1`H5fk5{MJQR;K-?sE9hYEb$)@$8eP9~d*bD{9@skE^IVFPd$*h1YsSk{=;X!kw=xGI1c zr&V5%LfD`zU^LlI-G~-^_LBRc~o04KvVbp=Vu5oB|?UL1|YfGQlz43wqb z6bj`cxkzEzP1s}fFkZ_jflA+xJPe6r#cNZPHDnb+!;iaq+xYyhCiRjNghY8p7m;s`$?hEJ*1`|$BW5oF7J9oadw_l5n{?hdfUo;O+Yye3$*javm zgIPTQ!Zc&n29rtNibcH4W?shQ z7?vJz8v9zfOmHr3!%;m;`@q)q4MH~>|7G6KOD`{p?tZ(bz#jKa5wdR#wZI|5xn|8A z#vb%^cLT(_J2Qr|Y-n`U%Eo4VbTksi!QN*A^uZgkvm5kWnDESV{nU!FafB5Rx=OIX z!)w=Pe^(I8H*JyBqv8M77f%)X`Gmp=$i&1129Z$Spvq@DjJYTkD8yX{ z`IKzz{!wGJJ=@N%vi2V&dbR9pfq~2erxy?pwmWoZ%RITKv~;wk(CKJ&^0oILRf>Ba z=7_gZaoyvsr`%3_LBC1;fk0SW)a?jG#aziBksWuzcrx5LabSUovmE+Itg zguTPa>T_KXc0o#=nVAWr2ec>L&Yde~7ZyCgCD6tO?i(M?C|g%iRh)fki14&u0V4=v zIBh=L&1x4VY!vDC%1imo92cuC$jSZnt0`A+rMSCL?c+oAC$HTX*DT;&o@bkQPz3yL zk{S38J7uK}72cK=lT2^CpAL;pRC=q70A^N6DavF_jrkrrDRnRj+5Y%`C3@I@K6JfGxXAK-OfyGkwZHZm^4jFY@7y+j zbDNj%B5O)N#d4l}M)A^HyCIIjHa>R~wJp=`F=@1#j0P6lR<$hO>h}6zvG6XTUy1Z$ zK~Z&+)Cht)smQ2Xzy=bpp_PtT9 zb)eSnO3D)A%b?d!d>JXh!WCe1uoj`qD{y=>TiOak(x+XnS+AanAHH0(tGN78`y(pA zZr^_Y1^LjWS9a7dA7%c%*sy;jsK#e+7Ta$2(e9&H+gY}j#qvk@gv6bXtd|6vW$Yr0 ztmUKxcTkm`A`uzbHN$h;WMy*+%CiT|#j^tdH$k>$GmT^yB7aynjE;;%Y&oHR^k~%U zm4qQR%JN&ccG7?QSEuZO&Jc-oR8>-WCHFpc77Z5qH@r1kpk0wP(LZ+Uez!_R6RL|!;x!le%;JV5DvLXl>{5)652F1i9qN!CCbuV~P>EpRgy zkPi(r$YSOQ{o5V0<+V0;6(6w> zeIG2Pz3tXZJcmFe#$S={(o;}9{9+EgfanUL6`X=H(a_tsWlq;T^ztHk7q<2EV6t)X z^E}u|PL1^7Fu5CZ5XOxh9L0xfp#TI8|Dd$A?r*JJ`}@@Qcox6gV%_)gZ82Put^@`W zmZpK~9dJx(`urL1!Hu~F9UaA@P}=;>ehSe4U>~xuwTkYpfGTGr!5bzV7Vm6nFM4+H zWw>1S#Bb`+>T`LuW)cS`pKw))*Y(j^SJ_EeZnIPdaAK3~NzZy9KKRr49pyI%TabYr zeyYF4W#EWPS>$$yFfo%4Y>EOtQqs~y#y2^MxGZA)LO=VszP>&ZHV_14teZ;S8{|5E zgGu_8t5-oIfSL}Vi5UY$(5GkZwUWxp%8-zd!vbnL*?C9qnbSX2{-Zj$mM$-{XJ);$ zG)};8`4wLvTt|6;BByg2AKowR(wKHEn#eX5Z9JV3tRsoGiw!7f-mcd{Ph{3O6`J!smGTRqtAKD58zhNyrN_ZexObpd zgNqmvNz8_kR#ikLi9%u?qDvTg5gUR>Tb=d2%-J1>NB2{0RQ~Na>stGE`$*2Z?T_#o+k(TI8lXc2FsKBKd>M?n2B+a5Ieph&+BWbsJGW!;;52x$4SUkOJF_yAL==AYmp-i@tJzFzk z1J`R=37uyQyvIr}>n}hr%=mV0qICMCQf-b^cdGd*lUFfH?wM!C`(J*FzVrS?+NZpg z#o~a0k>bcX_xlF~FksAU$iid@y2{lpFoPU=z&p1AQ&{=<+WeGe+tE|sQ2}m}=X7n% zQaU<}oUjM`e*A@?&&UV+XX~3|)Y{X7y&v3ioKqjg7(7TLSlHMIKOc_udne!9dPvI4 z&vx)GOaku+lUu-l;c1#`TN|ML`I}xK#K%n>Z^n@11!67Xvs_|fYW@gQgfdLx~J5Ssi5*X+XqZXKMpGcGl zOIqjb$d;L&o;+h`JF;i_&vVJ0(@Z@j%Od3-ySAPfZx(o6BRgX|c}m;#MyU8@l4^ zoX?)z#ioH6AO?}6P3h~`cYF^4lg^AjnX|VH-2fY7*i5-T3%|1uR|E&wiSKYlcG?gX#_G3IpC zw#}Oda6vdi_y|YD4|lg#L|XUb&FMqy|Fj2!K8p4=MX#M)LFri``&(}Mnh}A-@ zx~ZNR-Ys15^&q`mUZ*yCe3+7StDtqnbeBceaaR5(Pgd1NByjcFV#pDdvHyg(@Ui}@ zmu6LyZzn~w)b3khv}RpqF3M-ay)%~b)y%K%jFp9C|DbA4d^s|fBG$LVsXoXK}`y_0-nN@^V5_ z>*=XPDfw=;J=5-p-RS1K_7kOa^skl7^D__V|R}BTd#I48EM3#UTvSqXs z-JY1AxkJt{OwxIbhuq)TxH9dom{Fz({1(K;2jfuA^+dV!KaLg_+N@akRGN>O9$XrE zQpcW?r5{-5;HjmaScg&5X_cw6!|$KY0=IySoNzX}wYv*_IEd4n2nkdf##jgQY{w~% z!+W@hNo*F@)(MpxMMcqFd5fxc2faatWgTYP2vrA(Ir-PwN}u8k%WlpED+m*{n?GIh zXzmdB(Y{~s{ave7cT8Pl^HMtYi1Ma-1~BC&$oa-Hjs%rwZp$4VOQu1U7rc8JM&>7V$lK)PjNUbT=Sr!22`v!&>eX_z4=C~C%sI}b=%KrkhS>^pQRTZhJKycT zen-(VvH8>SzDzwUUTlSH>ogz*#&*F?eK_aW+FyTAW@}iA{=SzODFk^4>S@Ny$8mrU zY!mjs`}BA|_$kC#OaTD_adB};N#Gk%VV|eN?cCjwO-?>vn11d<_B{rtJKj~RcGd2{ zq($hA(X#AB&;y&$#l@v|Zj~=#z-wjI4`AXHVhP|C!aWHmz!SvWquH3oa8QGVlYu>kYS3B1j=Vs-99y? z9C}t~No1qQS=sY$PaWgxM&5d3y%xXE=;$b>Wf~ooD*Mj32`YD_&1jFK=3mRj6)th| zie4f{U8b7vqR8x+kEPQTHrL*5ZO_pYR&o7}G(GLieOF1vZ9!4Ixx)CX>cr6CYUPAl>z3Rxd{xKnZekkI(xv`CLIRS8QKd>U zrx9Zg=!?ZM<)FW%&T0uC4m#NrX-NjJ-p!wJegj(8`E($ET|GUBd{Oe}2m&%DQM7Hl zP3PaMCvyME-D^5~1>-Wa1MFT2@+;mH9p70vsY41V=I1AgXw>6Ad|1z`6W@BlT6E+@ z0zlL8KpdNEJ|*fU!o>Dhb_hw7>-O6&$C5XQ3AnEkY?hKtj~hL>cTZ;~i%Q4)$^G;{ zHCNeiRDR9Qspx$5(+SlG)VET3dz19s@?`mTUoThc9~fYzryrb{z?e(pJZDiO8|=c2 z7inoDutGtnFys6cNTC4N$0J^VSQ{VjySeFfeq}vvfaPvFu zb#iOh4!& zuE(O!I9W8(E z`rJ>DG+}augwvCJgEzO|Abu57295tsvthP#)m2r>^)V#28+KUkd_4Ya$IJi_=FfJC zeXFVT2{pkq+l{w2!sFSI*DdVy_*a!5_^}P_A2yE&EPkn=z>f+W+I_t@@Gb0jj;tt| z#ORU7dWmSMpyxd{hY>EgvFO?euHWv%vqvSZ=j+K>=b=;A8&sDksm{k{^DU5@cPy9L zv|H`a)^%~!zBn*Vw#gRT2NY~-$P@gk+Gsx@)`c1oUMG4ljTbTLBpMXz<07rQW2gSKqb+t2za(Lcpq2y^!v9bvg8glA5KaYif7-` z7wOOTx<=}-KFhMW7BjNMJ8ha^q6mV)+e_4bXttQ4tsExTkhWKHr6nC%czX}8?9IV( zgbxg8YwwGSz*Un9Xw=>EsQFl#i@~iRP}2G7KEfcSi`DT?H=yi8k9EJm-WFA3{K2OE zj&a{)zPNgmFI{fB$ZnO7F-CjYk7it?ajCzfT4)SN5*66wqno3YE8V|xr8nGb*GkWn ztFk67ZC72OG@OCkA!aF-S5{_CKT@cWIuYv}>IGZTRZIPbtuihmuZH*XG3I%I46+%QRR0})4(^iYEs>b?7D9&-VbaeJ6B zx|yst|MOHi7~gt%@FhJh^$;b{3Sxa6@<=X0VEpoo$?JBxBu8amQs$8zLeP4G>456# z#Kd-pJ1|Ia|9*1mSvmN9>;Nc&V1*%p*p%gmP0ITF!&BXJg%ka1EHmIP8QVvbq@Qve zp;i6C5WGUc$wM)8!UZ*KGw@IZIkevhqi17dLe>hSNpLEwI1H?HfgVF9K~I=q-@6xW zzxCelT!1$48%6`soiGb|ai)IrOXKIY2vN zFt0>KlVW3!fei$S307CNWd@Rco{AqRMK&&HRMFJdCaeQN=vpPb5PIR`r`n`}ck@|~ z9}6SN7*0kdgGY)6N*pl`%NCuj(K1kV!h}+r9uX}{7}_8>iEP@0Q!+;V!MXXVp0q`c zc@9i`&-2IWoCEIUd@L6DuXmxT!4}_R@F#v)Z23TI0ONL_7Dq@DvtKsgH@+U??;)NuE4u?rw2rNz{MEu7*Yl*Ap~KaviFP z+F7}hGp(_)l<`eZ_a%JBxJXpNy~zzI%vqS316|FgO%j6u#UD^nQE_k>K~p^s?*2jQ zBWTZpxQm{23x;V7oX$*!%|R7<-0wH4S0a^*fDNXz_*stE@2{{QIP{)R(XltV&FQ%3 zZR$^Od>~aPXIu(4Op3pA7WR0(^|dM)?p%oN}hR$;?*Wz)cvEP+d9YoZtuLs_BBPvR(0`3P@ zb#+eY%%W2!i0b91)gC-|po(s*mFX!D^YY|iq1D=&pOZrjn`Bj}*L$!MeHJuLKm%qc z`ax)8o81NY1Dmu1k9CQ5+Y4#GjhFlik924L&XFYKeejL0M}IzB}Nu-Fd$ z9T*szsQUzza5v}7Cm7WTo;l6MQaHa<>?6!bsHNpb*mhX>*x9Ds@PA_Ii!|(l--?;n*8qdp6cDF9iS%NL_s(;a_KFSd z@l^CYUqKP2b5-D|+|=1>n$hlU$>GKNwAgQr%zm&1&q{Qy*(@a#B2Xp1Bxc*H}_E&-I zHr!qX6x_Jv;PLP~abNDt*MDcdF5oA9X^>({syx9JAhKff9be++-dxkXhQxDxD}9OS z9unuV9oD~H&*%`nCa0ydze3S!a8650i{6BJ&w^wGedyftq>gR5`E^kY6tq$;GYn(*<1EiGZ2A1TLNOgR9P|jj zFwSmD$JEhEs8mwDa05~Lv0Obgs8V#h+DNO`f^ao``s~@KZ;tQ@H?P3pM~nc(^oB-M zzpbt4%e{S>FPa%#TtAP&F*SW-vQhgIY7ZSgd`60UJQmdfsNos8xzo66EXlPgPxy?= z%$I~pP#*SG$@=F`lv@tRJ~HauFww<(Jf8P}L)VwDB`xJStM8I+ygzIS3bv{<5lKo* z5Y>w*dYLA7mzR^%1z0Ag>5^3}&zylj-_8)^)FuZGYykyZMbpUWGtgLg(rz1(_3X z!5zKx@q1&==*1wtpLX;;1~QJ7!3U-xkGBAeWRH7*f3_@qpV`|m*F*{9_E+PK-*?v> zUS(oW%On=WlYH%Amhr%^YQL%S6UzNCm|q`-R|AOaALd}1gZK!}JjTa?`t4t8@91fS ze6wL1e!8_v##ZM}oLJ`(LBqP3vy!xL*nN!@+BGS0KJoOg#NJV7bB5vjFk4I}1Qi4r zCEgKiJplI2GoBr*TV2s=%gxIpW?X^Tu`C2%CKEuyBi)U}DZ_ne?qp4AUR!~xO-Dqgr{;jFVOAr`LaeXFn&eeA96;@E|GtJ^!PL&K zs=hueKfm>c(AISYoYawCvp8c)t3=ewOEvWtkA=x`VF!AZr@78QxZk*t^2J{*ClzcS zWfPxgrWvFrPNz2&Wq~Ep32yOE?uP>OLe6YkPc5)y4h^hz;#Xnn(ol1gIf9wmXJDNv zgS6V{?92#~jkSvFG}lgpV3?g;*gkmp-CL>xhWMEEcHEoSkp@|ZC?qeZ_#4i5J!m9Z zmYrl5tP!q$d6X$su0*9RlE{@|G;!(2St-FQyDxPv9!^D1KUei1a1@}K~D_Zhif=Wr2vYJ2Pq z@0QG98kdqYP5N>XynyEbqJ^xI^u7H(=T^_{(zeS2t$q?dW1=L|yY0(Y-5pT3eVlr9 zj=F$ha$`A@!BO>u)t~F?1h2G_mHCK!^v{(;f>Kx32&~QPY3ZC0I18tqd5WHR* zYiAJ(-rU&wKJbZqrAq@E-iEIoL>h6`-+!Cy;JuSq(Ix;&NOYV@P$M2 zzmXNWETu9!EaXtRv~w1bGb=9zs*(Ks1?~5>33#qLx^Tctv88_-LMwO$DCYll)H%nX z4nF$AHRZtraI;*{!v?_v{1*#bTa#3?a^$n`KU@zBbN*H%sDdF#A{#c`JG0Lsqvz`%3hDMZvVPLh6e(f@1XKRVnWG0G{BV=_K2N4w=RaYP8}oITIv%BEA0k6TXyP zE}o%l4+NiGN|GZMf7_Iwp7{o+%L0<@*F5Q8sLr?hci!=|SUGT3=KY862*I%}%z$;5 zuUVr-e_#=4y|d-SQp!U(LH%|nf=+`DodA)FhQR3?j+-j6_Z9%304v_2@ESBg3|u10 z8T37YZ)iWLmd8*?iDtJ(iO1_@ro~Z+O+KU{ zO1QnOA`dtB{Yv0nef|BI;Uee*;6wnfSjb}#CWdf81MWd*4LY%y3*(JQm(NJu#iTv7 zcZte^Ah-)1-=w+#Wd$(p9=cV3--#!zATL@7$i0ud#s7Tm;+5PX1 zyS~(~SxBN!qRuh{LU2rTf1cz3GFh4*Y#&W4a0E9RzlWVzS{Et;Bw45GS&w-uXiU&e zDI?%N|Ffr@v>r2E4yAL0nZ9GHSO8iKC z>jnm-(*=Tp)ean9E1+7GLUye{vk{N;#^MVN3pj&DDx%&pG%{Kp6px`{KOvY53+rwU zK)}H2{enTSXkzOt8vX;6P3{6jM`%ir3u8#!X$uVMNp}2JgAx|XON1I}DJMr)J_)hljr_n=Pnll4fLs$DOrEq~;Hj zG`n-qSxRdiM|^s{;!HqdCf}jSaR#7{{%lmrZ(n?|wzv{?Jh&`ggSB9#<2EAwA+UJz zHhi9!)KKd8mvZ5s)I@e&KwzaPBisnK&pOeRD=(m_A9^pBMKpHZGG#B`y<4s?qC?2y zaF%OcDOjg^kpHdb^0O_1TFGQ>9J z$45&f5Q8M*VkP81s1}P8a5ok=F}e@LRw#yhVJ{GgVIjX@39Oa>$}(M`lm!bJG7F#3 z|43DzWehLKgxc`Q(|lXjBCk^<@RyXI%;Ms>IvTY-5P4uDShmE(LNT#?yLJ^fw;2q8 zfH#0Ped9kt=HyE3GMq=NxVia#-~)UQs}5=?d^$|}($jC={?71lT}oWslHwzUeZOi1 zZzEAJ9|nr2_ou?K-l8=$yI1*Q;_4UlGw3N$!r;KbC}YOUm*z27Cp{rS{?}19=ZCW_eRsTx7Wgkn#mMkE{q#1O$<);HAX(1+Rwq&MDrj*y zjaVRwjHv+}4_9d5 zcX^K$jmtIF<|*`4!)!Zt`&th=za%AtAVVLb%&%$*2_2EXoINW+=`2;6x8JoZpsodj zDPRhLDSpUJ6~JJ{DUzL`*Q-gJa$~SSQ~Qs7hxM$}J(mCc)STbA&z2hFhESL2a?Ryd z^OOh)%P_f(v@n+1<>4_8b6Fp~EjGc%yE=Ju*@oEy)FRZ6xeL>?%)}N0W4BwEmhaHq zH@VBB&dsHOJK)mTZ;)h^6_anx&>!bR;anRdNuIsv=krVt{;hq1DrUX>wp!MfO1s8+ z&qm6i!A1HiTi!a=H^;b8qtIaYQkA>)- zvobS-&8`l)^O7Zq%^@qlyg26oq>W&8Uc3l*GPGr!*IMIr1p^ZyoRyUorI{0&N*Wra zJ`5bPa&nCq(9Z%@M{|$JgpiLovjehnsDKd5K0&44cPt#*0Z^5LQJ0~2Ob~tvS)_@UcRGQHZ4MX%&`tV?4!++(ej0kl6W&>UO8`WX%-%^w zCO5|~I9-UyW3`guoZp=LVU(LOd7)|_v{@j7KSO?Gh52?t>2^?8L8O8I1U|^X#s=nQ zM3jac#Fi*v@x#dDKnVcO0dgleUeL6?8F0WB0!IavTjEUZ1w{xh6ZmO+S4YBQ+)bTxravCGd zHLN~>2nH&$ns(>UpSEK!^fqB?%r{~NCkQXNpV+m43((1Rhi3xvWH7T;{xi|k{}gmA z9Kw{-Qg_ihg*MV5Utf*cn{?Sk)hHC)%4i={V+P4e3sv*x+Yi$n%%tYJww}M~#`7^^ z>L_?8*%;~K?hc{#%W)VAMzk^}f4;=R8qhKn8xvy!)Eqsi)3JX7jvK0e0ghz3uf-q; zDSFI?&pi(bscdWGqnII8hhzjfEYMSVL^fa|zz*cH4cg4j-M2|emAukfCtdozuyMgK zsq_CXuRO&$4kMZXX5F-hpTnx<47ah+(pf%x`yPfA2&;JBDYuYd~YS9NmGmB-Z z_ctcXPmJjOq0+?Tmet+FRS8=?w%X7z!}1XB60i!E zgww}x24T243JzevXr%s7Y61ZZ)x!TZZW@g5hoBA!gH^hEs@v}HSz!KXu^Z={6{HBj zSRKL+tKt8MNLFi<`1w^5>j;q)c$LAQg1j4rxc{0GAzKO`nbr<};UXX_wJRWL)o%Zc zUAsjRDCeM$LWWDsXTzqxfcA5I9Zpnz6n;^~J3334D=~?&M_;kY-U17gu8O64}PdoBM$EnVQ5i zZR_uN^GxT=t$6Dxg1*q-Kq%tDWdQOXpO6r-_U-G!xN+tUaksCwR$aS&0{v})ftWjc z@BHW}nJdjP3Ue)HE+Mwe11qN|d$P;#AdS?LHD2i)5t-1tH8t ze8{{pp1Gy67Y?oAKKT9BtZJ>n-l&G|tlfOdJXyi*?Z;|0qVmf5Oi98!khFZVussZr zTHW!1i-o@m&+~ITc3eca52}4U5fpfqvR%H0NyZ>3pmP7w*tk(vwVbe4?Ms6P4hkAj zQ9;ph`EstbTG~)0i{gK&%0u@$Lv_oWA3}?WH&UvHd%36l!j4pGE??3Uf8SnXo+5VS zosyZ=b2Tx`KQkD$CfVB0^cwzUd(hCfATS&oH6YiQfw`b6qW1EqJCk!FNG-NqzmWPN zF-&{IrcDlgLe(c?mIWJK(N#(?7W(pe(qEuiigfWtcV!AmnW4SM*=3YBRiN?OhA%5b zvbTTq)O2IgnqhwZ+0&wNLMjrPh(_^w{9tn(% zkK+s@GJ*{sEuCy_#ay-*i^C-W%;Muv1OPHX>f5ihzE~X9Vo*T9&@a-9wY8`yFslui zQ4W0wBO|8E@QMN@XhPzG-~_fJrltZKS2%9eyU9j6C83Lzwa%v_JXpHc>68oyr&26E zm-23|)h~{U5hahr`e)lhBO2t`tF0~aG1U;63xR5XLl<`|H;c=5N+)U(ykpX{XAnQU z0c`R^Y5k%m5de*)tt~ov=v^s@q8pb7^B=^Im;VjntVt0arLpJ{s{U}-j;FTz z)bP&TS=_PL1~YEi)skrUJ^jW*GG!~rs%1Z_eqT3@MRaJZ=VP76M5J1rHwo#~FRt8! z$YWeU#45(l&%wCGB1bs52V~*DHx?;ou-OGL1>&ldf+n(Q_@9A1O;16m2XA=V(lRyw zX!C;8qxG4rUo9kmP-goT+v}@VcJl5HY_z@2^ZfCg8}*?7OCx?n0*RhaS!5@7&G~b} z&KFz1x~|&4ai#m~e~@K}iQovgxVUhfe@0;uUtOQoDOWgpej@tAA^ja4lH0afc;;m< z^zGLIGcEd5lj>yk?Zp&%DAv_ThX32v_n#V8)*57IyO8WQfj&Tz4!EEjvO$7<>MgK{ zso7#Qpa^Q5wK!M9o2HFIKFM9+x9f?qmO3PXBc9&x6L{rHE?9Achx-{=MW?2xQ!lu{ z2AbF2GjVImSf)$*9hG%8)g7ef&~x-E-jihIS0!ud&T^bp=TBuh3LWQPxP74YHh1}i zVr2LE$AEq?uex6;UNliJY2$_s7J#|?`$a@V9$sE8ZKa=fN(<>2+6BnEz`o~5nt3Dh zYUu5z=Z-Eeu)22x>tWr1ZN~avAuLQzu4yk>k)ibs2s*zyRNb^(`?qU&tqEvu@lEgh zi8?6dTNZ!XyfSyE!20z_^x>$pV&%#&c`lWseAh#^|3`D~D732ofXgiwwpa^e{^_n= zcVuj%!CJg><^E-i)kHXZ|Lz?|kd{?cIBcGlMOPMQ7gA)H>>yv=_H|zGjLwlG@V+@d z6;0eSZ!a{m|2|#^9yVKce7PW7J z7+ef;1u^s%cu@0EPtKfh&t56N)Ncg`inQ4|JXox8z+O928#x)LbNn#LhZ*IZ>5Wm3 zhZE;D?yENX61;AgA5C=_>^fDo>G_MAFJE3^cnzEuf&L}L$D?U6hOR&ZNVRz(ooF;> z3P`3yWLt*gzwGwtS7nB_RLR%?RjG^>mvwZEf-nRmAU>BhGf)g&ssW0sG zIXgS?sUC|^o7%Ia(A8KCaZOTa%M?7Pw21=SDTw6pc<6FQgJ0~-YLJEUbvG=x%xFp< z%xj(7OzDKE)eN6H@oGXLlNJ!pAsrU*IBA!lC+%KNVeRY=4pyt}$d>p}+uFL2QEv=v zZ;L3}<9jfX0sXuf$P{s?&P`Q%V!ja*T;)?wULD!GlvI6v(E3P|R>w(Rl1$;#R#ex` zvu!m-t;f}@{35O7s4--5`vlHa1rgAmvR&uY?SQ3DQoi<%JDuv&Sk9R>GBEJ5TxY{qCqP=# zuwZ*XYr^)s*UBn%;n4Bn8oo>sQi_ayO-V}+5cpmAjj52)aTftbN$9dpl3WEoN3fH; zguqEtekE=oPd5;(=D3>Li>x^*y}3O;MZ|NsAcT*Pwif`I!{m%w<9E&Fu4d?Rgl@dU zW=$duB55HFUw&>dZgKkbF;%s+vto6c%>u0yQrj-}S3!HSPsf|p>?L#Jr%YLuY9hA$ z&=j~7!o0*-ft1i5Px}jz>}0aP$3ecZU2F0->n5p7y}v&DS1(-LFTYfeLFq*-HlNOA zFDiLe*5bCI?bNJTA)QQ~Ye^(Et)eV@a5{fWdS*3kZ|P5W`IxAZdfMV?qXp_FB6!Yg z6zhW4fz%IXg1D5F+8(q=(5ZtJ**sxw)y$o11iTFmTmnpo0RWmZz=f*Z zf;QK8^hQMsi4AXZRxA#mkN zPR_%+x-Ve1hz#jGer6p%zg1MxsDa8U?m22r2I{3G5}T>ogeaNRFXVIWemd(~|EDNI z!Gf5CHPi*D_i?qYQy*NWXJ#;1)*D=^;MiEOU|<#6j#LvA%H9F)^S=6T3K_}WVk@@w z*jOw=ZBkq-Pt3jE^Wx_!H_be!HpcOn7g1`81J_s!f;pD&WVyCSDxg15i?DdXB3mab zdw0fQVOVTo7_wd*i3@FXO5q5gY#xK&3>@qATMmlYBVN(B@l?0mB#}z!f$ZqKS@W&& z{LR+MsuOd5AHh`e9EoUUW@CG9l=Tb-hAR!%*zQVngq^^`Y~{k!$B$z#gUmLHL8U;q zL?<}+xYO@bU5FJ37^M?CZnCf#2F0YciVok>eEI955v1NmUS(@JKo@fP5a>kdm4o<{ z2kJrhFR$};MjcP+EsWt<0KxHDP=#cpK1gOb(Uqj^Q+3~9$7N#qD^v7mYf)=TqB)%W zlLdj5&*XTd-zl{R8Vd`@M!;AP*>z*R0)}D1rfj_rqE=qqDPUGNI;|tG6S)ZiaEITh z6S$}}T^)iB&)jQHlhKI@FkLuQu%67Fv{T+OZ45i7N5z9hidaDu2=3+9p#cVt8s3j^ zToXDEpicIFJ1;8=RIh-`$&Cj($71~I*OMSD{*$H z4tekwv@}dCMZXpsOTgz;300h?7Hwz`xZf`7 zmshoXBqO#QTAr3bP){B$mdzy>Rf?UN4b^@7J*&^rlSFW~@BEmKWEx_6>?v69Aw#Jy z`ZqzJ<9UFf{+<_$wj%1lK@wCDuz^0-*O%HlkQPfEgCx%l?!D5qBr;I%R1|NA z_4I&4fQDB2q=f;- zhDVQH0ND=xvyo5iBo%t1c9b5NWibi?iQ(s0dV2d+EsP_zn%5uIHVEAC-0*CSZ4KDE z!Uv%RhK%F%8NWsSh!2P#>~uW||J8F8XMRxMcRJSixk;wgokCMPlRjm|UpKrU(qDZH zWE9ui)fRzQuS#zE_{|X%0qBiSajh2@PtM4w3s~3sr*n%tULF%`5-@0)_wE`(3 z03N(Bejd5v;M$_}`s2iz&D)KzRly zh&bJiFf!fgahGav1+kY;et56l5iHC1!v1DsDCLMUTfwZZ&>2y~UMz(Y>yK zYc*s2MN=Qp2)q&&bD@hR7fGZx9_$CLi0*c47e|T&bMd%#YJy~o^IRFpeFqU&Xt$c~ z;;SRsE~}uOz?!J>!p`m7qn5abisU8O{D#WQWicA}rqbYFLxmt?D7!6x7&NL-=H@pq zE#NPbo-PG&8(BK?zVyL=b7XrI%V7Kg@Ecr#ROq0<*BtQHS6J8g2b+NJc@ph*g3Z+} zZpwhmH$u+TRdE}_CiIbm)Ypj~Njd%+Q8GxqgW!M{7h@dHn+t>&%ikza-HQHok~$z? z!Ld)teKL7>+}FHVKDqE=dgZs-=66%ZHsp`gRz29BqDXbLo@!#l9f#?;4%!!80=xEe z*lf2fdCQdJ6rV52nC6?=aPCmT-a9(R#)=NzdyV8EH-a#<`K8+!Kb=3-2j`J*`>@%+<8=9Hf-xLsb`ne}m4wVjgU29@F+BS;5WTBw^> z`GQ7lWo_N12+@g1%l?hG`q_?U67L1VI?QH(osQG`k;ai16RUuVq{fnEFSb~B4OGAv zrm_-lqKHtCBXFTBr3mlsb$L7d#O-)WzOQ1S07?h30~w>!V+E4Q;ixk!|AWIAe+ zKetVXG4QQFFu7H*pkWH(RWd$uL>^{^C?Jrh2DyeJ#G|kRH)_vgT}aoiHq)h?SKs(o zP>s%0i}p^@tkhy6EH&_m20N$6)PvjZ+9UH0Y%^m@XX1C+(^;%XE>4{$DF+8^>YrFa zHUD*UC^$FfC?8@{})YQ(OP%1a|Vh=%l&V7-(0 zws9qaze(cL#v%_DJ1y`GTJ--PdOeudG5a$ApY)uZidDWu#q-zL(-WU)HYT#TDxMM( zHTn6!LL}B~PD`?>!T0dGhn-Gx_{yEEtEcA-3=q-|RO}C`HJPBHPJtiugKAd*#bCWb z63}=qoraE87d;xpEh#Ca6t}~|pdI~PIiv{I9b7M17#UM@m0EgW1od~&<)KN3`mAEs zKiezqn}vLiyl@?YSQ@q`kn|?!&aV(VT>jm_Y7Lg+V|*omlCX=_kJGV_S%&*9&)jE3 z(!Ar)XEZ1`m62DU1^Li!Om>Av$NC2-ythI_?O;%8xaz#JbMfB)LQ_Dl*WR<|jD-c4 z!ue59ju9Hu4MmT-z5PDVT>LNEx7*fj#;nt!{|~^O?fHkxqtn9I;<}*+bL^ns<0UDZ$6!7jtH-D1s`a8!tg}TeCzK z{$3X^QXSWD--h||*gPm0g72o-%*H zCh!Ep))=4~#+d?NAX8JUAv&oyX&Sn^AAzg-jP(p=0^7VR5;gMZ??M2w$ZuS;u?#c9i~l5Y3|^L0T0tGKY4lzDK9S%fIQ^fm?H*B$(e;}*drt($U^u5%m9^0f4`OCE7QVD z$ZyccLQ1uH%NBcF4`j7TiHRLwfXM&73!YuUK*1@7sm6HWKhB*y7gpNYwb4M~Y7!A< zC<4E?B$_Rw1+e&BIf0K(u^;BK*aJv@_wKjo7~I?~_#ShXd0-opo$U&#Kdi1ty5FZQ zSo{OPkqj#QXqmA)4&44!2hT*f4L|~!Ertf&Ke;-@bs?*Q2MyfH!*m|Q3!PImq`cx# z>iLyM|2p|f{e_m0Sby8ns2$&S;;`n~9>fm%Bu>Ayq)^Y^6)LZuJiFoCSFobr?L>Ok z8ETTTw-UVd?)!e;_KV>dUL0~2rm-Z4DvK|Ff>`Osi6)aHp@sW18Pvu#_5 z>)69*&lkRfho0nIWX(fz^&5&q@lO**yjdp$D#l^KN9QcOE)#z(w0IYY#mV!^lK6o# z4q_>&@NC%ID@03@Tf7S(ypqC+KahyXZ<*|3P%g?oc+g7Wt*r`^iWZr3*5D@B0rTD< z{d1J852M{;@3^VGgvUKDEq<jMwnu!z{#)pF2LdldN1cSSvv)i+p73&HhMU zux34}jM1$a_nY_$5{XQFgR|?uo5Tw;_aVz86I3a_Dk5bdUOxA*DnAU~NE2_o>sak* zw2edb?A}*bWVpQMB4=K-9b8I!0e#o6pCw>@qsI?;3{(<2x9_0JR{ z*h^rFCc`g_zwD>#OJ-a^XL+y2li;0%+WOx&+e8DG|gD-}Z>5sf% z*`4k3p(SSjtRXGm=KdCTf+Et!?mj|Oexo7cNYM^4QZ{j8hcA~bDj&ozFMbBYqQF6O zDX~V1>RpJ77r#u<_>WxT<;MRA&@BEAzTD!!{Q4by5#nDKw^pAV@h{;-W=q;!$VNPh z>x*eqZ?dxOg&d9V>qsW#`$()!P{0#EN<0PP)^VuBsP5TwH7k+yjFF8k%P^R;p2q5^ zdnk?XF&|=A1Q6-mpmr~(&BapbQ@5n%#D0$P4tY`M>yfB0+Od+RakYDp2mfeP6_ zlyUtw$RU{6*bs6%Tvtv#@f*Zn0@p?ksimo@59vAba{pZd9s9-Z>m|;?YLAPPGeLJF z=0_tw__I}aEDX5h@6pmiUkR-BDF`Yn?XRHU1aa#x#2xsuo~~}yaO-Th3?KjmESN8V zsZ<1BK>t(8(#P9_PbYGVMD3G-yC$+30u^@tM62wp2&2r04+X%afnE%@xlt=qJGK&* z$yN>IU)<#W#^Nx^e=}}|{{s*h5v3+A5<`v0%Y|Z)K;6&JUx1lH2joGp){1r;$Z4G$ za6vih6FTw5Eu>|{6V_#mK0`c}$6XZd+6vII0<##g%?)0Q;EZzISepdmk(^r=?gXeC zbaZs^^M>a?#)YJx?b^*vcUE3)4*JFmGe5ExfII`QkyM&#tRzrqeTCNxbFB-au)9g9 zC5F0!WI`w-YGdZrc+p3w2C{7KU6>4*opmv@ImaINK1QW?8W9M!`>x67)Or=ATkD1cE(FI1>=Fl7P4wU7R^ zt?fcn{i~I5Xo9XCu6KPXe`KR<;5tY&JVDF>I(`9)EH5vTQDv_Cl*A7%d2qG~J5q?Y z2*Lz`;vrKBSd*3%{v-6-wV^BwSjY4WNS4?8%z~;?QC3EXb`3D%VTi0iKNGqy!3gUZ z;T`FTNM-n~TVsTvA0B{K^^f8T3k$*Rz&gsGAA25lD?KviP4vNtHMM$Md#M1HEf75< zpHNVW#0=BR^b`tmw+7|tKturWLjwe8@@(D#%-SY%OI92+9kdTChxYbC- zo7wJ!kyUy*3v+X26B8!kDoIaio&J>d?kzeBdFbcB-w@{A@J3hB1S37FF4M^p{E8<_ zGmw!X9jiBXdoQt#Ijiu)6KYLNblQn_D_mAlR${P52X~pdn0D#XCQn`*+0=_^*S>iQSkdD^%1-t@{rfF(Pc70)O)v zeC%6Wwdse3wk$jYs)c#F-Kf_A3Wmh1JM{hC8X(EYl9@EPf+%DKL&w+%hXw%lKbmt0 zUJqy%IID?4Xk}&hwnA69-Un4Iy9y{xjf@WRS3gTu%B#{7eXyvyO(G(rLqm<&?|b%C z;0eMA<6B3^1HHr?-urub`F)^AN=#^g6(e{E#^=Aqf!#j|<`s_mQ9eftqIJnD@0twv z1-VV{F8o?9XMne(&$fj-2n}x3K4u6-Xnsfn}RUY8#$o zA3_fjeIQ<*069zpUP=-`oy1RdujaiSs^3=Ghv^FMaPQse&!!>pZW162F$@|YzS2~= zb~IW<1S~YWHfA+;QgX?cRlqRTBa+(y9O7BQ!6AuffL)6*c-1@FNXP_`N(ggQOzM2Q zc4&d(x@?+w*Ll^SD#+9Hi9ftnmv59PLDml^^MctpB2^{~+P&C|-qn!!jEG~2bo*Wm zG9G0KB!YY<)CP+hQW7oouM7%27!pbI7l=uv*e`#%77b1q`c z(;|7jyY($$Lk;gKl%A}R!yvJM<10=MP>u!$2G$PcptD%mf-5;vbDW^6wh{VS#4Uaw zn3BOn8Vd5oDiro8-&p^p9vaX2gHjq#N5quhbwG7^_#mKSp44i%sm;t_!a+_96RAz= zDbaUF`>wgrN?Ew{s>MQd8_!eEZTulgp05AzNSRd!rM7N`DRV^hD2(H{gW~VsKaQ2_ z6Z)UlP-8i>(r;G~{lg;-ax{3-!l;r4OgFXLsk!Ey^P99&TYHs_SK&OoWUHE+XK|>* zSkMT|9lbjkPQeFsVgejvc&YrG&3^t06yvoPSB{0FB!Bu;Bs0VwC$^*c+tN~V2=~4_ zOj+ODge=^j$<4d~>tafUs18g3y>hNzJ%h0lzz>j&xB@^x>^;2kmee)@AD_#Yw@ORb zSVQ|hEqC>#SHd4c?&;QF1WL97&&C-nRMIZB9lD;Em-=N!<4!Z;EW=A)KijSys4;W} zyXea+Dn6&5=7{*G0-KQG1grj4ZWBRH{SWeco+#GsVs(lVhBOG)5+ZTXQdILaFzED!cNynEJPUT6ij{iS|Y{5tVk8ineKlv`a)g+Ek*n zr%+AlK}ZPgn=DC4+E7SJC8Q{&l@MhqrFpLt&*%L;Kkq;9{1daBbLPxgnM90rS`s9TB#Qxm|=sS7$x>tVuXcQ=kNfW*PR zI*q{FuruJFIro1HMgmL!42aLAeE<5@_tj0cMT;1jR_|d6t=C4H&nGAM4GxOTpFj3v zxF|gD86mv^_Z*qU4P9=O;M9$xJeN)0`|#iR;TAXxK|-aDLO2)MnCIVIarsV$?2@s!yknNm1)_1#;BSxc2{$KQN_thWYy)#8)%JA>X zv_*fIwk4)%C@8=zM-Dw_)YXqYFe$6Mwp{2x;yYraecXO7KBECo|214$01Hc?OCcnIM$$A!ersd8fq}O6Dc4ho_MSht+YHw)$TruNqJZ$C7f?IE-o=X- zTcAVwHD>|hHuubkCl}y~D^b%nt$OGgM{c)MCm@~PoCcUbJ$CxSo;W0!|B*6sY?Fze9d? z4H#T6ETD0hEX~c&E#E0w9)(N&x8C^=r?n6-0isTrET)ofIMTkS(%l(pr_)Uw$ozT} zx#t2->rQ``V1_Aas5tX24LtqhDFCGnEAZbskx~AAtMgoGbOz?-(V5KsvKt3Is8)UE z88tj3?ObQT>2zuqlS=?cshxvjJ)zxnbudYqfmuyaB11HQZ~jN0bvIw+q{;lAyqNI* zoJ{xrU!Q`P%gl0cR?E%b^PAkv!Dkpy#Npy)+8Qtan)%$P859Tv@%dC9zQ%8icmN5~ z{xfsU$pQzPTsU(|^?yzo-+Cvry@VozAMZDlmS9BB-lII7VVM3{)hr|=1a1iinjHQH zy!ZY>rpx+u5$|i)`PXGVFjA43s{Zy-ORjMEQ}4L_wYd@(Y_IIvqb4eSoIUfj6}8nY zFQt22`%sEE`!U-GT6to&w|DW(ZZXT+!O4AUt%tg1P-;$xMpd(0;lo<(tX?zUjpvNC zv^byV-(wy;=6%#-yjuIi&z&iq+k@Mg;Ujwuik_DQH|Yhde7{v4{sloV%tGF5B`4-sve#J_Lo-js33yg2_M8v_A>mWB!21}`zgZAbap7%C<0K3b%P z)qYTL(VP=eH(=kGl0x<`#3+Mg0i`eG5r-ICYnJ%*G;dd8;Pf(wBe)nUX;E^dofsm$ zfXS4ao=q^HhkmaMZ8RaU2Y?N|j^d^(h7SbP8t_3!nBHghEQkxFvByh~(xN@TBi$xF zrNBEb@fZU>VE1kmaQwO|>>V8!Ob5joG45tQ5>JT7{y~|b#5*+@ zB%?{o{HXc- zBwegJNA?bmK1U)QO97M4G$A4q0jB>ARbClMKF8-47){bLbxgxAZqu1=NZ|0mB8~X_ zL_lanM8&wfcyiJ9GQWaF)LYl~#dCFzb*)_^B7?mNWll7006>ABB{?~@WuHh+CFKqD zSsP9JhSR6|mh|{Ka-CP8n$FzFdl_j`oWy<$r@h|-PlZP0YHU7Zh`DI6m`SSyYnrV>?5urHa*e9X$+70HN zI38{lTW+Z*6P`78w=CQrjKi{X%6d!%p9&QRE!F)>Y~6GE6)}XqG!#XM&fR+2;aJ={ zeL=b{JY=3d(-9$01#kt`8DKG_2nGosbdc%LY2CMD9!#ytM6W-A34ew{6I0p4WiUt~ zibWo~jv0LeW38aOjXztif!SjAv&V`jfZ>3Afh7I`CxE>GiE?qN!Mzo#g)@yQ>FKN< zrW7A5-e(gCV2FNT-})K9PdJu6q(>wRT~ypmpL*1LEK?=0Ib?128G<3yA5(*1&=6h}ap|Tk4^2 z>UY=yqM6uSsiceJ@Sco9nu=t~4CW|Ll>$vzJ@MCey9d0qxTMjg1d!3!+Cm$Jqj`kk zxThMGC%-)SRO5iv>RQO$ajVHLCh$FhAu-7v#1RzYDlf*|?z zWY>3khXp~#}Lh?)e zSI>G4?ZUS5{LP|+y3;~8T0y#iRR2J zu{CwmS+dwtC=Pwb(qXVHh`r>ocC7&01MbTgCoRMRw151RxLBkYSZ>*FCL^Pvbau%$ z;?w>JE<>UCs5|psaeH}Rco6=RocAUPs1;+Z(-edPjSc6L*ci*mJF&0b zhi{d3;Pf2`+DAcFpr&VC?uh0BuqS+M(oHAiB$0*2ga zv~yAL#eF8Y)8wwh#_5T2Ac&IMVP6V5{ercrSIG_%fY11ty--8CmxLmYVtYzWrp{9w zaJ^gN^g8x~T`p!W3CaL|0>lO_=XH-JN&BjRL5qn-=zAm4$h~VR88-r-WqmD*(gQiu zXv82n9-9Ma!|i(th7T$XwzT&mJqDcT`8QFq`1(M~c>OJ%5tOMHmv~{wCfZcoI~kxg&QTH8lbC2}Sx3D{Bk@ z1j+0LI&wOTTvFG>$-M8e;w)wVR9=~#oqg?^tuSbE7BG~Dc`;xNQm^Nt`Ijt{ z(`2cU%oNTC*2aBETcLQs1G~q;gFNh>b)BYVc+k#8y}+W47JyZ(=}9C2c$2ye>_a>OYzLJ`Lac(Opw8t%mmXa+b{>!l z3JzugJtx~{#!$dB@xiw5<8u++&XFxcW&yDUTs(YzKft79aAaf8cH6>{2_rk1ll-h5 z0{8v3RaGex5=$q4XqZ0;5I7CAR#bB9Y%H|eQ~&|5EB9e|19j ztRBI4VA!f60~UDruK`{P=pb z8;TQ(W#cf<-2G06#u2|8y9{D(Bn-GyXoVDRM>YdU%8a$2*8feT~B?qhA)Z; z5Ya9)DfX5}p?$~33^P8p+VgSChl#THj*484@!coGUtE!5}4hB3Ix2IA4U(;sO&`d9A!F3*g#uMidDm2YfDz< z%fT(2{)yt6If>R?1^4?e%(2|LjhI}g3`>T)*yD0?a}l%^UA?M)h3OlTQ0x67GyBe% zl1a{q(8!gq*$uaSj+jGGd|ADdtDpL}wYGebsXqeSs==lzV6Q0YftX5ii4mSA%VOB( zo1YP$KYw8icwO6#_JcBCK8O<6O`yE zKx}B!03D^qt~a%*sp9rS@{2;yi&hg+Modf$RyE*+VMVnc#D&P3k(n8Qz(G%~`q#;# zyVzDULWN%Uuib&xRBs_O#udRajWz7d+ANP=*J!$xY_=V0RYY$qVCac22s1Yb+WOQ{ zqf}B**sV>4d|m5IBh*7l{lba4aJ(okhD#*6R%|-ox?V3WKWO;a74J~z7N z`b}&>UiahvxQ$^GFHO`LE>zDE+E-ws`TY4aVm$ zot5B6^@K*e#o%RqVj6JKe)nDW?W$;BC+pWHw{!|^k+33_A+&(a&jSq zXjNMrY4J*SKDzI7Zgid;bL9l3xx}gu-AVU`05XCfNkRUsv&Q@LBRosmx6QlK?gO_F zZ!fRUHVH2^#d+i4Px#WU7jw^(qyl`|X7b z>+BU5FLrlvG30W+z8On2+H0ewA1y`{Js7~4rQG$@fax_@O*}MsrNQgs;&bxn*(f{~ z?01G`g2}Dmw#3JG|*get)iY98kONg@{>LW@of~LxR)ule!a*alQ{Ke^T80 z>pUp$zJFQTG~u+EmNhE2W7?z^oPV-g!RzZ4Am@|b{V1g%l4*i>NP4=;qZKPwEJ3p` zG4O;EoP0G8Emb{Rx>E-?7x-J(Yuj>tRM=#$$nNAV`tit7CLYmy`Q1EKz~VL zLBYCF)S%Gc8>Kw`B}+V@7q1^hM+A6cVgo|5if#Va^(3a@MwlF!U`D6Kx=p)c)?vf> zfo>CUb+@y#tG^utFHTIH_7C4+rf?C;O%!>RTlpLZq<~LVVssW2si1U!6Bp3FL?xRC6G z6=Rpnr>ew<139dY>(IxHgcW<=mlu%>s&;wtUBH4h#M&(g^ zIFF96cxaWF+_;F1olp%U$+~!8+dI0WX1Scp&)_y*jH=cK0&-``AZ(-o&KV#La z6=;eF%#cRAvrQdNkV_whD+CWjtbvIJR@PHyi;q7<6Hd5HWLIE6{MXWY>r(B+c`c_x zb^-}2P%4cD0&jI<#Niist)QT;$KRnU0*DCA3yo5?D5gM04I1f%XJO-?T(^f>_NSqN z<}>#adzhR)>v-`3%VcsXS+_wBh`u~f!<+;cAYdYcYOkxIk;AL@)kcU2xns4*u&K|| zTZ%uRTKG9tGV`*P`x`G$=Og;mcYCHYUn$T1Q17yiD?_a8_x_4ncZ;2AT>Qva>m{70x0U6G zRoM2ITkl=A=c!5 z9iD5bHQ&h2Hs(@;H6C(6T1<=Q`=O!uLx+Uu(C2`8ZeNnW`(%n-tnwqI>cv>dlv|%Y z)2w*4UeBp;bOFE_g?d^ON+$8T;^Lm-7hQAVD>3_V$p{_Plqlqr2ozDJ1tuUaDw@>q zUefMY`lt=XiTcsBdcD&yd%79{#?RQExw)_WO3N#|dai!>=f|qLlR+l@8&=O3Xt{mX zcw8liiT3b=s6wJJLpH!h^FjH3*li*jJt49HE(#`!-*cdQLA&Z6gsq3M8lo*`zfx`* zBrzy;^rW9d>sc&6k>`1c3V=6;$u^-3UO#P(aMd5FnHDtD1PDu%RaDRnB?8D4=?FOr zKM}Z9LSm2hSP1n0-t_SyPT%_E35v{kfa(KIWO6BdqG67V-wPR>6S~Xwq$~YeYffDH zHZv%u5DxMxq}h17&@*%k0+ijF!}%&1MrAx@(ZbGiLRYZmAJ7YPnp772p3i*{Djrm- zP}Ho`s=4&*XMOW}S+syWbLz#KQV4O@LSL=!F3Rg&<|ca3HTUs`(~a&YBW87ip-`F# z6k#-&%WwlpcxtiOz+rwmAf949Xxj%~39YLeyAubc3LQey64z)`Wy{Vy z4gH&$>fELxGd*SeOy8wujwXwrU3w_lIE}n23D==ji>NX#`qTD@)n8&xAU~b>0qpU( zijUS7n7KuC{*fy`sX*flBsceJZ%#MsM*avyK8cCtSdYyn@!&zkbx5mW!+mw1@|MD> z07z6ZbolkYSOM|^K?(a1#}-M#0f{W}QGWRZ?<$6^ZW1?>D>mcr#mMPC0&<9@@G-Yy z=NPpY1RemO<@lUG7lP$aWu7$Q49-(I|l+^i)eF4+ykmvObv z7Pz@1v^y-RhpX>^g($v3w=%!9x7W^V9K`GMp@Ed%b()KY4v4l--kEp3cU`4JiT{Mw z%5$?Lb@Z@OjC7WNhfMd?OulmHp4-H;qP<^HD$;|EqQTF%ma!D%jBq|Ov$PbUqaKSg z=FQyPRd(bu0{Ch0S>?}fFNG8P(X$y1NJOhHL$&~erQAZqPwiSK_nogdz*y&_YJaL* zc!Z0FZCVZ6AS5WWuT+S5PfKwcHp|b1PRle|BNp`L6zKs5Z%d1eY!u9!zEMYAk5nw7$PL#+Y+s z{73U2=SCuy=;%yjAJJDAKLlI$_ah^$N6ypmj+Lp&q_}}&6;$q6<>%#P{qfzRSF$ev z!2_McCR6Tsvfan7&>Qu#dPMrT(Mz1MlRL^s0h3U1xS6rikj@=Gt;|m-V=55oe~%v4 ze06BHk>HI7!^pl7LG6KaQ3WLnbOzWc*Y_FI7;vcBwX3*>T%Azl0ms^@(DXmUl(_10 z_<1~eVxzOLG$)54&jO|JCPoKjFSzsySf`jykS$-)?<>t9stOJGkS+kN-lz2YDx7Cg z1-O~b1xO;$zQ8!|JY0p5@gmsT7#a-a9XJH23FCqN2BA&Yjx*>CN;((;Rspip*?;N3 z);qcWk&3a$7^+LmQ_+>EmulX2(H>O2roR4uw1>;Qa>XhkZ{{YPisBkNo$7H!^|4j? zRHTMXk7{66&~)aA$ugGe{->WR7$>4u5&IJXxkVEb&#(+|5{Aa)ovWS@#WfRz=N!ZW zlz0Jh_23fC)@5VxkWWaf2psk2BBF&+zaaJnNc?&)88@gZDF1pXhi#(Y)p2tI)^Y>r!5O~nJ)U=+ErR;d#{scvWNWgN0 zM>K`3(L~E3ycK-A_;0hIo!(jJiNFK@yb8>ad>2r5@-LUsufJ=4KTW~k1fKOj@lp-R z1t8zLtvl~w^x^XpT|+BV#JPm`NcJjZ4zza1YjlPX9a(4n$uNmL6s`8fn&pIH@|%O& z_*BPlVlo9lBl&U?c?0pF{r(H2k^Jxf_vIK-iMO)}E}a8+C~rZnVXxJp8fv%v;y_c- lZl<}}D(5A6U?RSP#bLYY@RF7QKL_&vYgbt?F4JB1{|^iF^|=55 literal 0 HcmV?d00001 diff --git a/docs/nunc-stans-job-states.dia b/docs/nunc-stans-job-states.dia new file mode 100644 index 0000000000000000000000000000000000000000..248d9a7a045931e3431cf62865eb22e4226b7bb2 GIT binary patch literal 1766 zcmV^>BDTJNrj#4_fngA ztHd-4#}}_I>8qkk^BW6WWgV>qNvCc+_QOpaGd4jn5Ew{|u^9}eq=JG9^f)HZhZ{Bv z7c>hOHw)({gIN@(vFoRski#env>O(vX?&~q$4N$RV5ZnMw=BH4@zXSVqW?E;knG|> zar+nBp3B^E?0egZyV9b<%DwNUlfjRghO4mtP?PmLKk-Fj_2n8 zSje<9f5G#JLzkB?&G$zsTKRrt4&VC4@`_aUWQ|GmF6$JjwP<@ezm^8%q&O86W z)e&3w(O5LWaAU}3(XV-h34oNVt^|rmzCIY6p`;3BI9vybjW2FE4)h!d1Eq2xDJAb? z1)$=jizwhs?uV-bJspgqINZz{4(wr$l*gD&FCXIzGyW-}IFcMGLX@NySQG7!C+RjY z&J`tsaLK6Pn6v%Bl1X$wD9I?Fmdod$%v{e~mC7}DARvq|W=73q00avn)P6{*{;@G? z>tSTAP1VTc3FJzgxTM!TYOES6{Cxvd%4`x=Qz>Ff9Yk?mIW`)DIp^-`p_i zI7#}D#S{|?ibwy_!JSV1k-HrFp5KFwsaN{GBwW3y9q4M1Dy1>cXw16^0mUV;gIg7` zEtL+0c0eXY9EVd~{!VpxJ1tT7ZTem&QB^YATWE@%$RJyhp(X@F*2M@=QKQ6ABQdOi z!F{TL?BZIL#on!5bcF*!sajl+)Tk@mxUz(01iRIhrgi1=^WRtRTS2K?P$&mP2t!{A z%3`E)^2iw)iTdGskKZ|a9K`B zt0qmWNl~A)deTIJf?y#hs#TNb)Px+MIw6p2%1Yy8f z&2@^sgS%-9QN}-t8AKQ=0a2}`3X)O^1IQuak`vL2%W;;#Ou^F1O9SN!gP3#`;1Dw2 zDok?!5us2t2%IS4=^-(nE8N{4~L1x%kPpt@KBSZhsU@7Qhy2^Y(I zCj!@6kYKv9cS2i5XAE=7iqBjpOKk-SA?jVr*%85a@8cO+X=Fwh&K9^v(#`+c$H}5F*(S zlgGa}6GNeS6i%c3e5AAZt9cchbD`v6G=CUZKm5r3wJtrA=9lrPgyq6IX?%>$6v$fB zMniHXIL*#v2}t@GuEa9m0-=o5O!6u(w;ml6g6!L`+xSioJU=l5Fw)iYxn=wD*v@R& z4v)ijY0dV>+8)(eAPQ4SDb-bg5Jp8mTa6Z&=CFHj^XyLb5*N3vkWP65z6Lg7lL8nCa4W>jC=ct_GlVEu(<}J%lR?Lz0|6?6Q#CAK3!T^ynmvqJu z%O#zelHKIff7 zLot1yNXXhqxc9O~t-*}Gm|exJRAD@eDUhN*kpPPVRuRNbW>e8NwjV!hAuUyF+)g|e z&)JLOIVCjTQ{g{kCH9IsXc|i?fyQn7cUp9}o$U IQ-@Xn0B4e7|`X@NbDf9J*o)>vY(oy z)$#;&XCw(N|Naqq@%F{0>xhZ@uwYx3$e`EZ`odQ}j#B}SO=0b}LvK*PMx8d3K!LGb z2^whDx?UTV)%EFnDhv(O(J%Sw@P^=Crnt9M3<(M#)+#19O-;I9L&icuN^(28EUwmS z4-pt~z$XbOZF^Jxi9BL7JPc4jLVhK65Xqky0YeKwx_qfW< zhv9mUkBJ&mZmpmtVxpC<$Z>_s7sNIYP?cvPL*!?Az?SZ2-*2j<>89FUj2~9ay$D*b zr=MYHOU%8iROGCx(hyMeI|Xs1cqq{JZiPxxx(HZG`up^}5;A%!FsKoSmd`p2j6nZE z(Iunf`k!v#mAvf!9C0Mb0n1StRvNqlbi8$>3oG+-YWy!&^|mL?&16fJUE1BH(Qbd+xtt z7fXN{q*aUJ=1a@PLqUvP=n|Rf4b*UuW+X<0rrlfu@TS*<&>FLP)_+47l}!8W)c})f zn%PBhv%|n(tZLCX+Cn=S2yj-wNADvFPp%qaaYZBu)I|;lfsBF0GIk`V$QHjg0KOM} z4x9=36TsLLThHQ@zn8$${?iA<2F&k2CRUI4V~4zYjao(e;$%phK$KVoxG)_dOzV(GJjhN z>W(BjoNHw9nGJsWl3A-}?XU9)sj!cSqT)#Xl>W_1B+Evk-As*HMn=X!GE-y{lS9ud zi@{8xoBhVc=mpmW&rONlm7Dv^X(^i)nTafB+i!;>O8KXg) z;I94yOyVOM3CV1$_eF_X>652VX+L~9TJS!^aG&>Bje7dx&2)?BDQ%WdjY)5Gntj_< z$5XOFulbyu90CS)!IM5p291h0BqXwgR=v@r__PY?92N>^70aFB_Oo?#w~L!u{`ZM0 z3UsqL1eda$C&>IQ198zOh=fRbBPJM^w7>tubhSyN{o>nJwsWZy1A-Rc?ce#0f1Tux z+}+(TfY&Nl8K!aDrSdu=4{s7^6&G4Oop=Vkp(M6Pu^ydZ#q}1*x@8RcS?&^+btU+hL`C>vc(Kg=||_;G?Gp-#0TOnVX8CRmSqSUWBQ-%T1fIZ70c+iK)rb1+`mzEWy09NjAtSYx zV@PVb9#dqPIdD!}L#la>(0=6Q59^T97m z72b!f?ww)St36Qy`*nXRzO6(I4-W&r%dQo+K9Dk!#>3*a`qJ+xc)XE!(Xp3QQScZx zu`@Y=H}PHX60-@*)kP%~ALBYeUHg{G!5JCFvZSM_J?x{E{BT92^4k|%jhm0ROO(zP zzq|g=d3LH%1+TKrYU~?U4eLX${K!dJ$kYlIM6c|dy8C=0_~i@2GYc1%@=WM(k4%7) z73}zSDI2t-_5lJ!1JNRB2@g37?89@hJZ}>`dE}spfKe!%@V%y&Xp4u* z%YZmbEs<47#^Gxv3UN>#J?DkvL@DphY{YE<-Lw>(ab}6yt(8|nLen8M786ZL{szY8 z>t`FGg#4Hw~vX3(U znDSf7Z!>zHz~-pTzt1sHkXtW=m7l0SDQmLc&*H6X-dkej$JTit>$IJ%N7yAA2IW)M zNVzleEZjx=|J+eTThTZ$rGhlhg220mwAabyAsU< zN-b)7)3Z+!jW`j?dDMR@b3p-Sns*v1CG$YwOJ=x%vxfAym8bs;?dNSf&;_qv{7`pT zAK}f1SD+-~05dgAj7ccyuXqszUY-ODsaOh<6-#PQ3J0P-j(C1KzrhKB6ST`G2fuRE z3P(@GKHCw-yaV&j(z3)2nOAQ#D^wIY04{TVJ|Uu|Z|&gV@H6DuVe5sy!&DT{g2&R2z@aoA|C3*|0&aWXt~S!he2zNOr-r;T(=}K$ zL(AG31o=SnIP}HVJqV&Z{mO zCYfS}EYUr7-{_4Y^WV&HXV$FB_Ivh%l)nyu6ZJMz|AMwo<45d|>B7F(0MKA=-mA&X z%zP<)xwI+ra8+mZhhFd{2!eY6fZ*FN5qV}>C`Deth~Y@4(3noYzLZtHHYq7-u4Gix z-T9m%|8^eW1}1}F*gEup!#fPae%%xG+I`llP|vZO06!F4BX!Bin%NtE4w`{&!1arYSmL%+Hn4Ryqr6lc$Yp0=nn#?x$!NP=zw z51^52_S}5ci^(v=+QKPpLzYq_SUT}Z7xNwUt8p7Nm2trYxeeJl|KFd+ZN&Ryp+ld} zhZ%>?pZ{RD8(9MjBivUiJv?SV%afXa#%JcSn&3Z|v0F%Qe7$z&7mXX@Y<-F4nHEEH zI5G~TBa=|zCHvHLIogomN5&)rt?5R6? zzDoXxpClNWQguJx#Wyi)JdWtQ$?#0A6keSjebDug+jYiH21BJgzA8BdNPAJ>=U2j| zO{zZ#4p~gBU}m_R4aCn%m|&}I%IPvrnomWR(z#BL&D9`7qtVJaoq9T)sp!S(78i7=*Q|oJzwvoRf+%6ZbJPsivzy4{Mf|OB$j}R&f=v z4EgH3le<3#7ipuXji`mIxtiVQ^G`2 zNdA6`;TK0qkDPvvv{tL?=DMrOoG+CuMm|f_x4)WvmdW4`2*SGot(K5q(Ir>wUki{G z!=Kzdxh+pU6B+jcB*sW4h>yjRr$b6NIpJ4x4!NiGVQPOo*F#j*7TUUvqLEx7qYk9m zSHHZIZ;hoCvs@3FhPeHPB*F%45+m#}C%?0l~#C8D&KDmjs1r1jmzP=}RZ} zm-u((9p^55v&6DwCz#Ed>8UF=q1{FLYjQ8#qfP^0#n&-u!spoiWTo&I+|f8Y&JqtZ z4MCduE^lj7>qHRBQu8q#yMM*jWy)RS267bY<8k_(x(_nT9!Rtk&S@2_@s7IbQ`5DI z;A=G&!nz3ctaYT?WYy_qzORT+VOwgdlt)le zJ(q?UaZQytV`}8N>B_=I4J)SSO=>NbZOW5r(D3qwI}B*cj97{*Rr5c4xEEmTY(~)F z+fb5Jdaq(g<0UN>EcvX#P=?~JyYIkHwfEH=UxM}rKW}i2^VQFHI`|k-#Qr54``z$py;N&&I;8tH zU$P^&XZe%zBuT0w20DKCu|s@~N63w7y@PKM#FrhWHBnD_K~opyukP(e{%ov za~!#`mvs9xF+9dAwdI$`B%Y$ zb#B5U$GOkVz7o^o$TiEz?-)%M)}(VWbXu9vO{5N#F+S^Sb}^O3>Kk1np;pUyDcF+Zdv)?IEurbUWbZm_I`21%P!(Bt z_!;noUM&g_fQ^ylD1&wM*}t>_IiL~(Hb$C*{sTf4frd$tO+pBbh=ZDwtIir9#P*&= zu|VG=iG!8UWS6Pl3*mKBg@BcCO!9E4B8-5}yk=OC9+LnV+smBzuKO@sb3IwBlJqlC zyl&|b@F?MFvlmrFF)@=UJz*L4JNR?+Ny*2cUBEh}Y5=DHR>^ z;63j;73xnW>~kq+aQ(Z@_vWWgGLz2a%*=NHm27dq{Hug$t^1{VDkC8wAt1e*tuc%IQj+=^KFW*N{q{MnCv)573Xq9D@%O(y?tRVk@o&4o z|55vh(8XT>2eCV?^)2~_hlQz^X&(1c#+2!Pce1pEo(tY@8vE1!Pksbd@lxd00l-P~ z^Yc0lwtOB(OLO+^Lcp61?sha#rB+V~q4Fp4=3)e}>eT>sn&yLl*7s&=gWC-}mN7<7 zGyU(cmi#MQPWm@9y_tR13gpvh6w(2}we~B4$9~}rlTq1^>Xaje!7ums_W+*zxY#Ci z74Y486JUMv{PO#Mw*+$uJm$22W2Tv_`E&EGi(StbHSuuSD{_7?zw5a$S+4WXc)3oa zSLVfXINAMa*26y%qf<)weA>i)-ZceC$Tx-tv`rz$-7J2N>-|ZM3tqeb!05D||79oH z$L9Tp{t;WAEDYIPX^1xt{fUdK>%2oJ_U=^Wg`oZJWckISzke(gcxXQEdEMO96cmm^ zp<2&pZFuHg5;i>-uZG?Az4z-g1l-mA#+F=e{@RzE=YVFCKW@gnHz4 zW2YV?j0t$&OAOgM&Dk}U>ouQmjpg3m-4Pl19QTs>`vCj|fC(j2uo%?KrW@@I01l{h zV&~vs0bqzC2pD!+wnqHUYenw16)W4WtXf# zb5=NnEl|wzd$_xhB7(l=w*3OU=?k05Pn}`mn?0EX8X?rUimSfM?d7pM0F#3- zuzu>i^;il7;53u&H;0?^{ovqWU>5+MtK<|L!ER2y^drXBOfDx~5>20WQ1Pu&6S{xHCGL z+G$kH*OA_BG$;(o8>kfMt4V12309pxqUJUI>As8>uiGyKY}<`4!9x{+k4Z2ca>r~J z2dyAi1@%cHdAnAe`4=W=(u4=lePx5F@Vc1>TUbf-G@Pj-oG}T$L}NrWNJHFl=~=OE z^sXu4x~A)DUX%JKTdYaWrct6u-u`~2K$K>-Wi-r@C5PxU4DrXS^Jv{N&RpA`n2+9|<;#V@!@x zjVQfA1eugQBS@x0ath=)KbD6fmv|MPGho2d6l^5JMKdTyqM@juI8u%q!iJPf)$U@4!NEjWR`jJ)qnzbZ$)kb8{P zl$9n8F|9F|Y{&-pZ#jr%^0E;jf&>e~!OkjNQFUqG=buyHVT67Wn3aDik>sNJ&H>L5 zFkz?ZoV0LuGSK5kv_lCW&D>u7o2ryZYy0ZFS?MVvNc_X10J{)m!mu5SD-H_nJyT+9 zygk`E@|2togo(ik_egQ+Wm3DIVL=39q0o*XrDtFC{otHdPglA~sKEVfzDGLv5h$)U z;U%2uaG01?!67gpQ^C>rnh%VWhNxN`eBSdY+Tuv#(Fc8!KxTun%uTUMl%ifZu4tz< z=hkkc|4?l^OcQ1anRAc!G1LiX94jP_4deosaNGsS{)p?2_;~t_raM5B3jCPWm$^Y$ z^$dZpqw`N0sn_Hnl43CS(nBIA#%L8tSe{W)z?D+CYJ(4NCLf!U^9S!be>tOj7D5Rp z4*LT}PUn((8{|x@z4PUE zh(Dh6h>JfPG?W%cih>8kyXEF4ZH~qTxaD^j$FEax#ABg2jJIxtH{#T>^`^&EBWcC7 zH>r)RH+mHmaPmwHgR2Sk9Rb{sLOFE~5;iHF#iy|_zz_pVQC@^zlj5DuCDyC1QR7?^ zJPfDrIP^JXF1RNl=0@!U@>F2c>iZzswtltA%~sPSzUIYsbB&mlQ)q18@z^>I7G5=a zA$?jfEKrx*cRdANxw5OM>-jDX9!3PQkO-1Ix@J`xB5kp#;U^PsOIE8rICRGhi-Rf! z()EzOu}J}*`pXhNCe>*7BoeOf|2&e0Ksy&wx_JvQW1&jn_|uG695r;OT)jk8;KY@o zu5+VyEWjOy$W_H9QpM*sem59eK3@EZKB0SQzM^bwaq%SM5!S8@!x!eWk8V9uZE`THC3VG==M(vYptR+e(hG>1`q1iCyfebg#WV)qpe z0)5{>vg{FaRyH~w2LFn#B7WC1SqgYImv}xdV1x@W((N({QGSyY2OTBFPnQK6NvXhR z-R|=|p}haTuiF?OpS+xi?7;&4iG#8}8i34L8jVXKMcvcc8Gfwe!{7L?3LwxQ@e0c( zQ7D*N)@vWQ=;2||I9iyNbgd`wpFHZUe=CRl@qExd{u}qYO#EQd7Sq9*J|0F-mG@z3 zTxd|RN~XhDfwOfm0|5g4lvmhNf%5|0Rf}~5Db^hun#E{k0k0nPXwUd!tJ8wSu@*zYm|O%*$gXV z*}Du_GIL3r^0-p6Xuq+6Om}v*vJ_)6uE4_9C{$*CR#);z-;G^XbK5`e`NE43eZo~+ zcyv!wIek6I?qU0|Y#ynjsZ@Lj3pc|2YyFK%Bp)@l^d#3jrqDo8050FDu= z9+aYfo~Be_$9J-;_%2-7ggO`7ntfKe8?GUu5;R-s3eyU@Xd5>-Iw;Y4UnT ziC!m#MyAH5gdcwPX5T9rZ1&arjZJboZ*gbGWP*m71j4vqi6=i@x@L*9)`{SokMh@G zN4!{+@cZs5rq8^#Jp|K^$)uGv6BG(1j@)v{5T2mFFgCBE*LogOnm(Vw>QTJKVZ6@} zc3>@p@sLtu!Mk%uBjvuzTr4K?NBe}7RbqKm5x!3c+elsw6@M8iVf3vbnPo~UGLJiv zC_x)|@ugdhBQlLcylnC#x43Ve&I^htN_My|uQu~I+=97Lgeul8voYiRajS~xCkAttdh%3f z5BV_==y~M7S^yKRE${qM|NfFc9oEzltKkuW(Jk15rI(^)|VsKX*1IaiI&@1U*uNcINxm=AX zJbRx#f3$+=X)(!cIVKJIE8RJF*TRC-KIhQuFZBzmnh;vb)~(--w?!~#LfGw=VClrD2Ww?-S{v-tm~*~=7bni)Cz{tD6nXz{`fw^<+q z@;gb$+5^&s8Pl;Kj7AF~HEX7tOsdWEvUk4<3TC*LRn1Tq*UO`YxBp#c_wcUsc=YHI zkob-@XZl@l%dxa_w)~R}Mg&hk+~=iu-Tcm%v;V%HJe!0=F8EG%3hMXqM*$(*CLrJp zUkg0^I*oknylyX$CoFt<2xvkk^DIsKv99)Ke5YAmr!9aeJD4T%uva_smfKbn5O~V8 zY8z~4l)o2Pj+(>>xlQT0du9R|atk1#(vlqk`QgI~QTt^NPaAIQ@jvNdq1C(5Zf?VK z0Z>SM0OiNtcUOl%{|*pq4zeC@H)SzWdNjUCBIfb1r_0du7BUTH*IK)?bj>zOt=!~` zDs3;FZY~ZDnq0QKi44+P7_K}-b|2%6el(|~;y_g5|`4=x| zU-8r^dX1DZU&XASEI<_jH;P5O4*G52#^+)6|0Ct!mo8QOcQOASG0%uvSKcgLSkNDv zC=ri@A)eSCYYuxlOp1(goT6S4gE_xIfr5~4|Co%XE##WsuimE*zT5dJH1eq~KsCek z|C-6!`T42t22NN8puEYfWobk|{al7!f)m(QbU{$&@ zRN-DH3#Ld@AsL`{Am@E}{Cb)G#L0(4Z~jDo)Xqe$#Ypr#ugzo`m(AoQLfGvNHC3tm zo&B@K;SV(l?RN07WT4jgB!WnX z`FG2#{uGoOv%YIFlL3~0y#ew zqi;w>F?FA{S$JLT3ouE%Z+JQ0Iac$`P+!N5ZH;8)`(njn5I z!x0HEkh*ac`acta@5+K*e_RtQcjJemvAkw_j@iK{yBbdm1qAaygh`fmpmU=stP)4% z8HkR~!laiKbN!5>G(TJgx5EgZ7?uv|v9KF3&T@o3f&U7fbjyiO?X%iRge_BPznz`T#Z5J_ z8JN>%8OrDOUP$s}H-Kp4DFp-;i26|$(t^LTJSrzyu`ZEq&`*e|X?7#&w?5)Nd}}^9 zCG{E|?2w8SovTe4gi)M!BZW+eAFRJsi3SRvOhP|jT@#PkjdiR!mfL-;0T4_Y!S$e^ zR1ts>$;3*-g7*hU@hc1y8e|fySe!!o!8(H-$GU{Aa!+`4l9p-I<~HowC6_TL#CF$n zeNCOKLT7I9DlQLvbM#oEm9%t)>7o~?9|?`-sytIJc{SjB!75dRsSN-hd5HG!V`x&2 zVFIB%gt-U;aX5(TH^h#BsD*}AyX$K&uJ)atMcgsWMt>mJ+ibwq{kmp zPVsnXOUCkX2OxXMlnbbyPxSFpHAzuQFYS#ct2)vlR3L} zaS8lvr;@x(cuy7>-~1yx_m2nMRVFbu$ll>kc|T6+v6xzfR=(@kVei-1iSo@b z1TG$H>J)DLmY~+IF8A6W%j?MkDvEi0j3Sv*c%hn+lFH6i#onXYzDPnTA;l z&^YalY`K_5kPL*E)`CH8M?efFBvO~Z5#Hi*AVCFA?-39qk11L(QzCi?4={OLfYk~9 e_@6S+TQZ{c;)F^ZhZ>+_86+d20ILu;4Ei4j*L1xA literal 0 HcmV?d00001 diff --git a/docs/slapi.doxy.in b/docs/slapi.doxy.in new file mode 100644 index 0000000..b1e4810 --- /dev/null +++ b/docs/slapi.doxy.in @@ -0,0 +1,2366 @@ +# Doxyfile 1.8.9.1 + +# This file describes the settings to be used by the documentation system +# doxygen (www.doxygen.org) for a project. +# +# All text after a double hash (##) is considered a comment and is placed in +# front of the TAG it is preceding. +# +# All text after a single hash (#) is considered a comment and will be ignored. +# The format is: +# TAG = value [value, ...] +# For lists, items can also be appended using: +# TAG += value [value, ...] +# Values that contain spaces should be placed between quotes (\" \"). + +#--------------------------------------------------------------------------- +# Project related configuration options +#--------------------------------------------------------------------------- + +# This tag specifies the encoding used for all characters in the config file +# that follow. The default is UTF-8 which is also the encoding used for all text +# before the first occurrence of this tag. Doxygen uses libiconv (or the iconv +# built into libc) for the transcoding. See http://www.gnu.org/software/libiconv +# for the list of possible encodings. +# The default value is: UTF-8. + +DOXYFILE_ENCODING = UTF-8 + +# The PROJECT_NAME tag is a single word (or a sequence of words surrounded by +# double-quotes, unless you are using Doxywizard) that should identify the +# project for which the documentation is generated. This name is used in the +# title of most generated pages and in a few other places. +# The default value is: My Project. + +PROJECT_NAME = @PACKAGE_NAME@ + +# The PROJECT_NUMBER tag can be used to enter a project or revision number. This +# could be handy for archiving the generated documentation or if some version +# control system is used. + +PROJECT_NUMBER = @PACKAGE_VERSION@ + +# Using the PROJECT_BRIEF tag one can provide an optional one line description +# for a project that appears at the top of each page and should give viewer a +# quick idea about the purpose of the project. Keep the description short. + +PROJECT_BRIEF = + +# With the PROJECT_LOGO tag one can specify a logo or an icon that is included +# in the documentation. The maximum height of the logo should not exceed 55 +# pixels and the maximum width should not exceed 200 pixels. Doxygen will copy +# the logo to the output directory. + +PROJECT_LOGO = + +# The OUTPUT_DIRECTORY tag is used to specify the (relative or absolute) path +# into which the generated documentation will be written. If a relative path is +# entered, it will be relative to the location where doxygen was started. If +# left blank the current directory will be used. + +OUTPUT_DIRECTORY = @abs_top_builddir@ + +# If the CREATE_SUBDIRS tag is set to YES then doxygen will create 4096 sub- +# directories (in 2 levels) under the output directory of each output format and +# will distribute the generated files over these directories. Enabling this +# option can be useful when feeding doxygen a huge amount of source files, where +# putting all generated files in the same directory would otherwise causes +# performance problems for the file system. +# The default value is: NO. + +CREATE_SUBDIRS = NO + +# If the ALLOW_UNICODE_NAMES tag is set to YES, doxygen will allow non-ASCII +# characters to appear in the names of generated files. If set to NO, non-ASCII +# characters will be escaped, for example _xE3_x81_x84 will be used for Unicode +# U+3044. +# The default value is: NO. + +ALLOW_UNICODE_NAMES = NO + +# The OUTPUT_LANGUAGE tag is used to specify the language in which all +# documentation generated by doxygen is written. Doxygen will use this +# information to generate all constant output in the proper language. +# Possible values are: Afrikaans, Arabic, Armenian, Brazilian, Catalan, Chinese, +# Chinese-Traditional, Croatian, Czech, Danish, Dutch, English (United States), +# Esperanto, Farsi (Persian), Finnish, French, German, Greek, Hungarian, +# Indonesian, Italian, Japanese, Japanese-en (Japanese with English messages), +# Korean, Korean-en (Korean with English messages), Latvian, Lithuanian, +# Macedonian, Norwegian, Persian (Farsi), Polish, Portuguese, Romanian, Russian, +# Serbian, Serbian-Cyrillic, Slovak, Slovene, Spanish, Swedish, Turkish, +# Ukrainian and Vietnamese. +# The default value is: English. + +OUTPUT_LANGUAGE = English + +# If the BRIEF_MEMBER_DESC tag is set to YES, doxygen will include brief member +# descriptions after the members that are listed in the file and class +# documentation (similar to Javadoc). Set to NO to disable this. +# The default value is: YES. + +BRIEF_MEMBER_DESC = YES + +# If the REPEAT_BRIEF tag is set to YES, doxygen will prepend the brief +# description of a member or function before the detailed description +# +# Note: If both HIDE_UNDOC_MEMBERS and BRIEF_MEMBER_DESC are set to NO, the +# brief descriptions will be completely suppressed. +# The default value is: YES. + +REPEAT_BRIEF = YES + +# This tag implements a quasi-intelligent brief description abbreviator that is +# used to form the text in various listings. Each string in this list, if found +# as the leading text of the brief description, will be stripped from the text +# and the result, after processing the whole list, is used as the annotated +# text. Otherwise, the brief description is used as-is. If left blank, the +# following values are used ($name is automatically replaced with the name of +# the entity):The $name class, The $name widget, The $name file, is, provides, +# specifies, contains, represents, a, an and the. + +ABBREVIATE_BRIEF = + +# If the ALWAYS_DETAILED_SEC and REPEAT_BRIEF tags are both set to YES then +# doxygen will generate a detailed section even if there is only a brief +# description. +# The default value is: NO. + +ALWAYS_DETAILED_SEC = NO + +# If the INLINE_INHERITED_MEMB tag is set to YES, doxygen will show all +# inherited members of a class in the documentation of that class as if those +# members were ordinary class members. Constructors, destructors and assignment +# operators of the base classes will not be shown. +# The default value is: NO. + +INLINE_INHERITED_MEMB = NO + +# If the FULL_PATH_NAMES tag is set to YES, doxygen will prepend the full path +# before files name in the file list and in the header files. If set to NO the +# shortest path that makes the file name unique will be used +# The default value is: YES. + +FULL_PATH_NAMES = NO + +# The STRIP_FROM_PATH tag can be used to strip a user-defined part of the path. +# Stripping is only done if one of the specified strings matches the left-hand +# part of the path. The tag can be used to show relative paths in the file list. +# If left blank the directory from which doxygen is run is used as the path to +# strip. +# +# Note that you can specify absolute paths here, but also relative paths, which +# will be relative from the directory where doxygen is started. +# This tag requires that the tag FULL_PATH_NAMES is set to YES. + +STRIP_FROM_PATH = + +# The STRIP_FROM_INC_PATH tag can be used to strip a user-defined part of the +# path mentioned in the documentation of a class, which tells the reader which +# header file to include in order to use a class. If left blank only the name of +# the header file containing the class definition is used. Otherwise one should +# specify the list of include paths that are normally passed to the compiler +# using the -I flag. + +STRIP_FROM_INC_PATH = + +# If the SHORT_NAMES tag is set to YES, doxygen will generate much shorter (but +# less readable) file names. This can be useful is your file systems doesn't +# support long names like on DOS, Mac, or CD-ROM. +# The default value is: NO. + +SHORT_NAMES = NO + +# If the JAVADOC_AUTOBRIEF tag is set to YES then doxygen will interpret the +# first line (until the first dot) of a Javadoc-style comment as the brief +# description. If set to NO, the Javadoc-style will behave just like regular Qt- +# style comments (thus requiring an explicit @brief command for a brief +# description.) +# The default value is: NO. + +JAVADOC_AUTOBRIEF = NO + +# If the QT_AUTOBRIEF tag is set to YES then doxygen will interpret the first +# line (until the first dot) of a Qt-style comment as the brief description. If +# set to NO, the Qt-style will behave just like regular Qt-style comments (thus +# requiring an explicit \brief command for a brief description.) +# The default value is: NO. + +QT_AUTOBRIEF = NO + +# The MULTILINE_CPP_IS_BRIEF tag can be set to YES to make doxygen treat a +# multi-line C++ special comment block (i.e. a block of //! or /// comments) as +# a brief description. This used to be the default behavior. The new default is +# to treat a multi-line C++ comment block as a detailed description. Set this +# tag to YES if you prefer the old behavior instead. +# +# Note that setting this tag to YES also means that rational rose comments are +# not recognized any more. +# The default value is: NO. + +MULTILINE_CPP_IS_BRIEF = NO + +# If the INHERIT_DOCS tag is set to YES then an undocumented member inherits the +# documentation from any documented member that it re-implements. +# The default value is: YES. + +INHERIT_DOCS = YES + +# If the SEPARATE_MEMBER_PAGES tag is set to YES then doxygen will produce a new +# page for each member. If set to NO, the documentation of a member will be part +# of the file/class/namespace that contains it. +# The default value is: NO. + +SEPARATE_MEMBER_PAGES = NO + +# The TAB_SIZE tag can be used to set the number of spaces in a tab. Doxygen +# uses this value to replace tabs by spaces in code fragments. +# Minimum value: 1, maximum value: 16, default value: 4. + +TAB_SIZE = 4 + +# This tag can be used to specify a number of aliases that act as commands in +# the documentation. An alias has the form: +# name=value +# For example adding +# "sideeffect=@par Side Effects:\n" +# will allow you to put the command \sideeffect (or @sideeffect) in the +# documentation, which will result in a user-defined paragraph with heading +# "Side Effects:". You can put \n's in the value part of an alias to insert +# newlines. + +ALIASES = + +# This tag can be used to specify a number of word-keyword mappings (TCL only). +# A mapping has the form "name=value". For example adding "class=itcl::class" +# will allow you to use the command class in the itcl::class meaning. + +TCL_SUBST = + +# Set the OPTIMIZE_OUTPUT_FOR_C tag to YES if your project consists of C sources +# only. Doxygen will then generate output that is more tailored for C. For +# instance, some of the names that are used will be different. The list of all +# members will be omitted, etc. +# The default value is: NO. + +OPTIMIZE_OUTPUT_FOR_C = YES + +# Set the OPTIMIZE_OUTPUT_JAVA tag to YES if your project consists of Java or +# Python sources only. Doxygen will then generate output that is more tailored +# for that language. For instance, namespaces will be presented as packages, +# qualified scopes will look different, etc. +# The default value is: NO. + +OPTIMIZE_OUTPUT_JAVA = NO + +# Set the OPTIMIZE_FOR_FORTRAN tag to YES if your project consists of Fortran +# sources. Doxygen will then generate output that is tailored for Fortran. +# The default value is: NO. + +OPTIMIZE_FOR_FORTRAN = NO + +# Set the OPTIMIZE_OUTPUT_VHDL tag to YES if your project consists of VHDL +# sources. Doxygen will then generate output that is tailored for VHDL. +# The default value is: NO. + +OPTIMIZE_OUTPUT_VHDL = NO + +# Doxygen selects the parser to use depending on the extension of the files it +# parses. With this tag you can assign which parser to use for a given +# extension. Doxygen has a built-in mapping, but you can override or extend it +# using this tag. The format is ext=language, where ext is a file extension, and +# language is one of the parsers supported by doxygen: IDL, Java, Javascript, +# C#, C, C++, D, PHP, Objective-C, Python, Fortran (fixed format Fortran: +# FortranFixed, free formatted Fortran: FortranFree, unknown formatted Fortran: +# Fortran. In the later case the parser tries to guess whether the code is fixed +# or free formatted code, this is the default for Fortran type files), VHDL. For +# instance to make doxygen treat .inc files as Fortran files (default is PHP), +# and .f files as C (default is Fortran), use: inc=Fortran f=C. +# +# Note: For files without extension you can use no_extension as a placeholder. +# +# Note that for custom extensions you also need to set FILE_PATTERNS otherwise +# the files are not read by doxygen. + +EXTENSION_MAPPING = + +# If the MARKDOWN_SUPPORT tag is enabled then doxygen pre-processes all comments +# according to the Markdown format, which allows for more readable +# documentation. See http://daringfireball.net/projects/markdown/ for details. +# The output of markdown processing is further processed by doxygen, so you can +# mix doxygen, HTML, and XML commands with Markdown formatting. Disable only in +# case of backward compatibilities issues. +# The default value is: YES. + +MARKDOWN_SUPPORT = YES + +# When enabled doxygen tries to link words that correspond to documented +# classes, or namespaces to their corresponding documentation. Such a link can +# be prevented in individual cases by putting a % sign in front of the word or +# globally by setting AUTOLINK_SUPPORT to NO. +# The default value is: YES. + +AUTOLINK_SUPPORT = YES + +# If you use STL classes (i.e. std::string, std::vector, etc.) but do not want +# to include (a tag file for) the STL sources as input, then you should set this +# tag to YES in order to let doxygen match functions declarations and +# definitions whose arguments contain STL classes (e.g. func(std::string); +# versus func(std::string) {}). This also make the inheritance and collaboration +# diagrams that involve STL classes more complete and accurate. +# The default value is: NO. + +BUILTIN_STL_SUPPORT = NO + +# If you use Microsoft's C++/CLI language, you should set this option to YES to +# enable parsing support. +# The default value is: NO. + +CPP_CLI_SUPPORT = NO + +# Set the SIP_SUPPORT tag to YES if your project consists of sip (see: +# http://www.riverbankcomputing.co.uk/software/sip/intro) sources only. Doxygen +# will parse them like normal C++ but will assume all classes use public instead +# of private inheritance when no explicit protection keyword is present. +# The default value is: NO. + +SIP_SUPPORT = NO + +# For Microsoft's IDL there are propget and propput attributes to indicate +# getter and setter methods for a property. Setting this option to YES will make +# doxygen to replace the get and set methods by a property in the documentation. +# This will only work if the methods are indeed getting or setting a simple +# type. If this is not the case, or you want to show the methods anyway, you +# should set this option to NO. +# The default value is: YES. + +IDL_PROPERTY_SUPPORT = YES + +# If member grouping is used in the documentation and the DISTRIBUTE_GROUP_DOC +# tag is set to YES then doxygen will reuse the documentation of the first +# member in the group (if any) for the other members of the group. By default +# all members of a group must be documented explicitly. +# The default value is: NO. + +DISTRIBUTE_GROUP_DOC = NO + +# Set the SUBGROUPING tag to YES to allow class member groups of the same type +# (for instance a group of public functions) to be put as a subgroup of that +# type (e.g. under the Public Functions section). Set it to NO to prevent +# subgrouping. Alternatively, this can be done per class using the +# \nosubgrouping command. +# The default value is: YES. + +SUBGROUPING = YES + +# When the INLINE_GROUPED_CLASSES tag is set to YES, classes, structs and unions +# are shown inside the group in which they are included (e.g. using \ingroup) +# instead of on a separate page (for HTML and Man pages) or section (for LaTeX +# and RTF). +# +# Note that this feature does not work in combination with +# SEPARATE_MEMBER_PAGES. +# The default value is: NO. + +INLINE_GROUPED_CLASSES = NO + +# When the INLINE_SIMPLE_STRUCTS tag is set to YES, structs, classes, and unions +# with only public data fields or simple typedef fields will be shown inline in +# the documentation of the scope in which they are defined (i.e. file, +# namespace, or group documentation), provided this scope is documented. If set +# to NO, structs, classes, and unions are shown on a separate page (for HTML and +# Man pages) or section (for LaTeX and RTF). +# The default value is: NO. + +INLINE_SIMPLE_STRUCTS = NO + +# When TYPEDEF_HIDES_STRUCT tag is enabled, a typedef of a struct, union, or +# enum is documented as struct, union, or enum with the name of the typedef. So +# typedef struct TypeS {} TypeT, will appear in the documentation as a struct +# with name TypeT. When disabled the typedef will appear as a member of a file, +# namespace, or class. And the struct will be named TypeS. This can typically be +# useful for C code in case the coding convention dictates that all compound +# types are typedef'ed and only the typedef is referenced, never the tag name. +# The default value is: NO. + +TYPEDEF_HIDES_STRUCT = YES + +# The size of the symbol lookup cache can be set using LOOKUP_CACHE_SIZE. This +# cache is used to resolve symbols given their name and scope. Since this can be +# an expensive process and often the same symbol appears multiple times in the +# code, doxygen keeps a cache of pre-resolved symbols. If the cache is too small +# doxygen will become slower. If the cache is too large, memory is wasted. The +# cache size is given by this formula: 2^(16+LOOKUP_CACHE_SIZE). The valid range +# is 0..9, the default is 0, corresponding to a cache size of 2^16=65536 +# symbols. At the end of a run doxygen will report the cache usage and suggest +# the optimal cache size from a speed point of view. +# Minimum value: 0, maximum value: 9, default value: 0. + +LOOKUP_CACHE_SIZE = 0 + +#--------------------------------------------------------------------------- +# Build related configuration options +#--------------------------------------------------------------------------- + +# If the EXTRACT_ALL tag is set to YES, doxygen will assume all entities in +# documentation are documented, even if no documentation was available. Private +# class members and static file members will be hidden unless the +# EXTRACT_PRIVATE respectively EXTRACT_STATIC tags are set to YES. +# Note: This will also disable the warnings about undocumented members that are +# normally produced when WARNINGS is set to YES. +# The default value is: NO. + +EXTRACT_ALL = NO + +# If the EXTRACT_PRIVATE tag is set to YES, all private members of a class will +# be included in the documentation. +# The default value is: NO. + +EXTRACT_PRIVATE = NO + +# If the EXTRACT_PACKAGE tag is set to YES, all members with package or internal +# scope will be included in the documentation. +# The default value is: NO. + +EXTRACT_PACKAGE = NO + +# If the EXTRACT_STATIC tag is set to YES, all static members of a file will be +# included in the documentation. +# The default value is: NO. + +EXTRACT_STATIC = NO + +# If the EXTRACT_LOCAL_CLASSES tag is set to YES, classes (and structs) defined +# locally in source files will be included in the documentation. If set to NO, +# only classes defined in header files are included. Does not have any effect +# for Java sources. +# The default value is: YES. + +EXTRACT_LOCAL_CLASSES = YES + +# This flag is only useful for Objective-C code. If set to YES, local methods, +# which are defined in the implementation section but not in the interface are +# included in the documentation. If set to NO, only methods in the interface are +# included. +# The default value is: NO. + +EXTRACT_LOCAL_METHODS = NO + +# If this flag is set to YES, the members of anonymous namespaces will be +# extracted and appear in the documentation as a namespace called +# 'anonymous_namespace{file}', where file will be replaced with the base name of +# the file that contains the anonymous namespace. By default anonymous namespace +# are hidden. +# The default value is: NO. + +EXTRACT_ANON_NSPACES = NO + +# If the HIDE_UNDOC_MEMBERS tag is set to YES, doxygen will hide all +# undocumented members inside documented classes or files. If set to NO these +# members will be included in the various overviews, but no documentation +# section is generated. This option has no effect if EXTRACT_ALL is enabled. +# The default value is: NO. + +HIDE_UNDOC_MEMBERS = NO + +# If the HIDE_UNDOC_CLASSES tag is set to YES, doxygen will hide all +# undocumented classes that are normally visible in the class hierarchy. If set +# to NO, these classes will be included in the various overviews. This option +# has no effect if EXTRACT_ALL is enabled. +# The default value is: NO. + +HIDE_UNDOC_CLASSES = NO + +# If the HIDE_FRIEND_COMPOUNDS tag is set to YES, doxygen will hide all friend +# (class|struct|union) declarations. If set to NO, these declarations will be +# included in the documentation. +# The default value is: NO. + +HIDE_FRIEND_COMPOUNDS = NO + +# If the HIDE_IN_BODY_DOCS tag is set to YES, doxygen will hide any +# documentation blocks found inside the body of a function. If set to NO, these +# blocks will be appended to the function's detailed documentation block. +# The default value is: NO. + +HIDE_IN_BODY_DOCS = NO + +# The INTERNAL_DOCS tag determines if documentation that is typed after a +# \internal command is included. If the tag is set to NO then the documentation +# will be excluded. Set it to YES to include the internal documentation. +# The default value is: NO. + +INTERNAL_DOCS = NO + +# If the CASE_SENSE_NAMES tag is set to NO then doxygen will only generate file +# names in lower-case letters. If set to YES, upper-case letters are also +# allowed. This is useful if you have classes or files whose names only differ +# in case and if your file system supports case sensitive file names. Windows +# and Mac users are advised to set this option to NO. +# The default value is: system dependent. + +CASE_SENSE_NAMES = YES + +# If the HIDE_SCOPE_NAMES tag is set to NO then doxygen will show members with +# their full class and namespace scopes in the documentation. If set to YES, the +# scope will be hidden. +# The default value is: NO. + +HIDE_SCOPE_NAMES = NO + +# If the HIDE_COMPOUND_REFERENCE tag is set to NO (default) then doxygen will +# append additional text to a page's title, such as Class Reference. If set to +# YES the compound reference will be hidden. +# The default value is: NO. + +HIDE_COMPOUND_REFERENCE= NO + +# If the SHOW_INCLUDE_FILES tag is set to YES then doxygen will put a list of +# the files that are included by a file in the documentation of that file. +# The default value is: YES. + +SHOW_INCLUDE_FILES = YES + +# If the SHOW_GROUPED_MEMB_INC tag is set to YES then Doxygen will add for each +# grouped member an include statement to the documentation, telling the reader +# which file to include in order to use the member. +# The default value is: NO. + +SHOW_GROUPED_MEMB_INC = NO + +# If the FORCE_LOCAL_INCLUDES tag is set to YES then doxygen will list include +# files with double quotes in the documentation rather than with sharp brackets. +# The default value is: NO. + +FORCE_LOCAL_INCLUDES = NO + +# If the INLINE_INFO tag is set to YES then a tag [inline] is inserted in the +# documentation for inline members. +# The default value is: YES. + +INLINE_INFO = YES + +# If the SORT_MEMBER_DOCS tag is set to YES then doxygen will sort the +# (detailed) documentation of file and class members alphabetically by member +# name. If set to NO, the members will appear in declaration order. +# The default value is: YES. + +SORT_MEMBER_DOCS = YES + +# If the SORT_BRIEF_DOCS tag is set to YES then doxygen will sort the brief +# descriptions of file, namespace and class members alphabetically by member +# name. If set to NO, the members will appear in declaration order. Note that +# this will also influence the order of the classes in the class list. +# The default value is: NO. + +SORT_BRIEF_DOCS = NO + +# If the SORT_MEMBERS_CTORS_1ST tag is set to YES then doxygen will sort the +# (brief and detailed) documentation of class members so that constructors and +# destructors are listed first. If set to NO the constructors will appear in the +# respective orders defined by SORT_BRIEF_DOCS and SORT_MEMBER_DOCS. +# Note: If SORT_BRIEF_DOCS is set to NO this option is ignored for sorting brief +# member documentation. +# Note: If SORT_MEMBER_DOCS is set to NO this option is ignored for sorting +# detailed member documentation. +# The default value is: NO. + +SORT_MEMBERS_CTORS_1ST = NO + +# If the SORT_GROUP_NAMES tag is set to YES then doxygen will sort the hierarchy +# of group names into alphabetical order. If set to NO the group names will +# appear in their defined order. +# The default value is: NO. + +SORT_GROUP_NAMES = NO + +# If the SORT_BY_SCOPE_NAME tag is set to YES, the class list will be sorted by +# fully-qualified names, including namespaces. If set to NO, the class list will +# be sorted only by class name, not including the namespace part. +# Note: This option is not very useful if HIDE_SCOPE_NAMES is set to YES. +# Note: This option applies only to the class list, not to the alphabetical +# list. +# The default value is: NO. + +SORT_BY_SCOPE_NAME = NO + +# If the STRICT_PROTO_MATCHING option is enabled and doxygen fails to do proper +# type resolution of all parameters of a function it will reject a match between +# the prototype and the implementation of a member function even if there is +# only one candidate or it is obvious which candidate to choose by doing a +# simple string match. By disabling STRICT_PROTO_MATCHING doxygen will still +# accept a match between prototype and implementation in such cases. +# The default value is: NO. + +STRICT_PROTO_MATCHING = NO + +# The GENERATE_TODOLIST tag can be used to enable (YES) or disable (NO) the todo +# list. This list is created by putting \todo commands in the documentation. +# The default value is: YES. + +GENERATE_TODOLIST = YES + +# The GENERATE_TESTLIST tag can be used to enable (YES) or disable (NO) the test +# list. This list is created by putting \test commands in the documentation. +# The default value is: YES. + +GENERATE_TESTLIST = YES + +# The GENERATE_BUGLIST tag can be used to enable (YES) or disable (NO) the bug +# list. This list is created by putting \bug commands in the documentation. +# The default value is: YES. + +GENERATE_BUGLIST = YES + +# The GENERATE_DEPRECATEDLIST tag can be used to enable (YES) or disable (NO) +# the deprecated list. This list is created by putting \deprecated commands in +# the documentation. +# The default value is: YES. + +GENERATE_DEPRECATEDLIST= YES + +# The ENABLED_SECTIONS tag can be used to enable conditional documentation +# sections, marked by \if ... \endif and \cond +# ... \endcond blocks. + +ENABLED_SECTIONS = + +# The MAX_INITIALIZER_LINES tag determines the maximum number of lines that the +# initial value of a variable or macro / define can have for it to appear in the +# documentation. If the initializer consists of more lines than specified here +# it will be hidden. Use a value of 0 to hide initializers completely. The +# appearance of the value of individual variables and macros / defines can be +# controlled using \showinitializer or \hideinitializer command in the +# documentation regardless of this setting. +# Minimum value: 0, maximum value: 10000, default value: 30. + +MAX_INITIALIZER_LINES = 30 + +# Set the SHOW_USED_FILES tag to NO to disable the list of files generated at +# the bottom of the documentation of classes and structs. If set to YES, the +# list will mention the files that were used to generate the documentation. +# The default value is: YES. + +SHOW_USED_FILES = NO + +# Set the SHOW_FILES tag to NO to disable the generation of the Files page. This +# will remove the Files entry from the Quick Index and from the Folder Tree View +# (if specified). +# The default value is: YES. + +SHOW_FILES = YES + +# Set the SHOW_NAMESPACES tag to NO to disable the generation of the Namespaces +# page. This will remove the Namespaces entry from the Quick Index and from the +# Folder Tree View (if specified). +# The default value is: YES. + +SHOW_NAMESPACES = NO + +# The FILE_VERSION_FILTER tag can be used to specify a program or script that +# doxygen should invoke to get the current version for each file (typically from +# the version control system). Doxygen will invoke the program by executing (via +# popen()) the command command input-file, where command is the value of the +# FILE_VERSION_FILTER tag, and input-file is the name of an input file provided +# by doxygen. Whatever the program writes to standard output is used as the file +# version. For an example see the documentation. + +FILE_VERSION_FILTER = + +# The LAYOUT_FILE tag can be used to specify a layout file which will be parsed +# by doxygen. The layout file controls the global structure of the generated +# output files in an output format independent way. To create the layout file +# that represents doxygen's defaults, run doxygen with the -l option. You can +# optionally specify a file name after the option, if omitted DoxygenLayout.xml +# will be used as the name of the layout file. +# +# Note that if you run doxygen from a directory containing a file called +# DoxygenLayout.xml, doxygen will parse it automatically even if the LAYOUT_FILE +# tag is left empty. + +LAYOUT_FILE = + +# The CITE_BIB_FILES tag can be used to specify one or more bib files containing +# the reference definitions. This must be a list of .bib files. The .bib +# extension is automatically appended if omitted. This requires the bibtex tool +# to be installed. See also http://en.wikipedia.org/wiki/BibTeX for more info. +# For LaTeX the style of the bibliography can be controlled using +# LATEX_BIB_STYLE. To use this feature you need bibtex and perl available in the +# search path. See also \cite for info how to create references. + +CITE_BIB_FILES = + +#--------------------------------------------------------------------------- +# Configuration options related to warning and progress messages +#--------------------------------------------------------------------------- + +# The QUIET tag can be used to turn on/off the messages that are generated to +# standard output by doxygen. If QUIET is set to YES this implies that the +# messages are off. +# The default value is: NO. + +QUIET = NO + +# The WARNINGS tag can be used to turn on/off the warning messages that are +# generated to standard error (stderr) by doxygen. If WARNINGS is set to YES +# this implies that the warnings are on. +# +# Tip: Turn warnings on while writing the documentation. +# The default value is: YES. + +WARNINGS = YES + +# If the WARN_IF_UNDOCUMENTED tag is set to YES then doxygen will generate +# warnings for undocumented members. If EXTRACT_ALL is set to YES then this flag +# will automatically be disabled. +# The default value is: YES. + +WARN_IF_UNDOCUMENTED = YES + +# If the WARN_IF_DOC_ERROR tag is set to YES, doxygen will generate warnings for +# potential errors in the documentation, such as not documenting some parameters +# in a documented function, or documenting parameters that don't exist or using +# markup commands wrongly. +# The default value is: YES. + +WARN_IF_DOC_ERROR = YES + +# This WARN_NO_PARAMDOC option can be enabled to get warnings for functions that +# are documented, but have no documentation for their parameters or return +# value. If set to NO, doxygen will only warn about wrong or incomplete +# parameter documentation, but not about the absence of documentation. +# The default value is: NO. + +WARN_NO_PARAMDOC = NO + +# The WARN_FORMAT tag determines the format of the warning messages that doxygen +# can produce. The string should contain the $file, $line, and $text tags, which +# will be replaced by the file and line number from which the warning originated +# and the warning text. Optionally the format may contain $version, which will +# be replaced by the version of the file (if it could be obtained via +# FILE_VERSION_FILTER) +# The default value is: $file:$line: $text. + +WARN_FORMAT = "$file:$line: $text" + +# The WARN_LOGFILE tag can be used to specify a file to which warning and error +# messages should be written. If left blank the output is written to standard +# error (stderr). + +WARN_LOGFILE = + +#--------------------------------------------------------------------------- +# Configuration options related to the input files +#--------------------------------------------------------------------------- + +# The INPUT tag is used to specify the files and/or directories that contain +# documented source files. You may enter file names like myfile.cpp or +# directories like /usr/src/myproject. Separate the files or directories with +# spaces. +# Note: If this tag is empty the current directory is searched. + +INPUT = src/libsds/include/sds.h \ + docs/job-safety.md \ + # ldap/servers/slapd/slapi-plugin.h \ + +# This tag can be used to specify the character encoding of the source files +# that doxygen parses. Internally doxygen uses the UTF-8 encoding. Doxygen uses +# libiconv (or the iconv built into libc) for the transcoding. See the libiconv +# documentation (see: http://www.gnu.org/software/libiconv) for the list of +# possible encodings. +# The default value is: UTF-8. + +INPUT_ENCODING = UTF-8 + +# If the value of the INPUT tag contains directories, you can use the +# FILE_PATTERNS tag to specify one or more wildcard patterns (like *.cpp and +# *.h) to filter out the source-files in the directories. If left blank the +# following patterns are tested:*.c, *.cc, *.cxx, *.cpp, *.c++, *.java, *.ii, +# *.ixx, *.ipp, *.i++, *.inl, *.idl, *.ddl, *.odl, *.h, *.hh, *.hxx, *.hpp, +# *.h++, *.cs, *.d, *.php, *.php4, *.php5, *.phtml, *.inc, *.m, *.markdown, +# *.md, *.mm, *.dox, *.py, *.f90, *.f, *.for, *.tcl, *.vhd, *.vhdl, *.ucf, +# *.qsf, *.as and *.js. + +FILE_PATTERNS = + +# The RECURSIVE tag can be used to specify whether or not subdirectories should +# be searched for input files as well. +# The default value is: NO. + +RECURSIVE = NO + +# The EXCLUDE tag can be used to specify files and/or directories that should be +# excluded from the INPUT source files. This way you can easily exclude a +# subdirectory from a directory tree whose root is specified with the INPUT tag. +# +# Note that relative paths are relative to the directory from which doxygen is +# run. + +EXCLUDE = + +# The EXCLUDE_SYMLINKS tag can be used to select whether or not files or +# directories that are symbolic links (a Unix file system feature) are excluded +# from the input. +# The default value is: NO. + +EXCLUDE_SYMLINKS = NO + +# If the value of the INPUT tag contains directories, you can use the +# EXCLUDE_PATTERNS tag to specify one or more wildcard patterns to exclude +# certain files from those directories. +# +# Note that the wildcards are matched against the file with absolute path, so to +# exclude all test directories for example use the pattern */test/* + +EXCLUDE_PATTERNS = + +# The EXCLUDE_SYMBOLS tag can be used to specify one or more symbol names +# (namespaces, classes, functions, etc.) that should be excluded from the +# output. The symbol name can be a fully qualified name, a word, or if the +# wildcard * is used, a substring. Examples: ANamespace, AClass, +# AClass::ANamespace, ANamespace::*Test +# +# Note that the wildcards are matched against the file with absolute path, so to +# exclude all test directories use the pattern */test/* + +EXCLUDE_SYMBOLS = + +# The EXAMPLE_PATH tag can be used to specify one or more files or directories +# that contain example code fragments that are included (see the \include +# command). + +EXAMPLE_PATH = + +# If the value of the EXAMPLE_PATH tag contains directories, you can use the +# EXAMPLE_PATTERNS tag to specify one or more wildcard pattern (like *.cpp and +# *.h) to filter out the source-files in the directories. If left blank all +# files are included. + +EXAMPLE_PATTERNS = + +# If the EXAMPLE_RECURSIVE tag is set to YES then subdirectories will be +# searched for input files to be used with the \include or \dontinclude commands +# irrespective of the value of the RECURSIVE tag. +# The default value is: NO. + +EXAMPLE_RECURSIVE = NO + +# The IMAGE_PATH tag can be used to specify one or more files or directories +# that contain images that are to be included in the documentation (see the +# \image command). + +IMAGE_PATH = + +# The INPUT_FILTER tag can be used to specify a program that doxygen should +# invoke to filter for each input file. Doxygen will invoke the filter program +# by executing (via popen()) the command: +# +# +# +# where is the value of the INPUT_FILTER tag, and is the +# name of an input file. Doxygen will then use the output that the filter +# program writes to standard output. If FILTER_PATTERNS is specified, this tag +# will be ignored. +# +# Note that the filter must not add or remove lines; it is applied before the +# code is scanned, but not when the output code is generated. If lines are added +# or removed, the anchors will not be placed correctly. + +INPUT_FILTER = + +# The FILTER_PATTERNS tag can be used to specify filters on a per file pattern +# basis. Doxygen will compare the file name with each pattern and apply the +# filter if there is a match. The filters are a list of the form: pattern=filter +# (like *.cpp=my_cpp_filter). See INPUT_FILTER for further information on how +# filters are used. If the FILTER_PATTERNS tag is empty or if none of the +# patterns match the file name, INPUT_FILTER is applied. + +FILTER_PATTERNS = + +# If the FILTER_SOURCE_FILES tag is set to YES, the input filter (if set using +# INPUT_FILTER) will also be used to filter the input files that are used for +# producing the source files to browse (i.e. when SOURCE_BROWSER is set to YES). +# The default value is: NO. + +FILTER_SOURCE_FILES = NO + +# The FILTER_SOURCE_PATTERNS tag can be used to specify source filters per file +# pattern. A pattern will override the setting for FILTER_PATTERN (if any) and +# it is also possible to disable source filtering for a specific pattern using +# *.ext= (so without naming a filter). +# This tag requires that the tag FILTER_SOURCE_FILES is set to YES. + +FILTER_SOURCE_PATTERNS = + +# If the USE_MDFILE_AS_MAINPAGE tag refers to the name of a markdown file that +# is part of the input, its contents will be placed on the main page +# (index.html). This can be useful if you have a project on for instance GitHub +# and want to reuse the introduction page also for the doxygen output. + +USE_MDFILE_AS_MAINPAGE = + +#--------------------------------------------------------------------------- +# Configuration options related to source browsing +#--------------------------------------------------------------------------- + +# If the SOURCE_BROWSER tag is set to YES then a list of source files will be +# generated. Documented entities will be cross-referenced with these sources. +# +# Note: To get rid of all source code in the generated output, make sure that +# also VERBATIM_HEADERS is set to NO. +# The default value is: NO. + +SOURCE_BROWSER = NO + +# Setting the INLINE_SOURCES tag to YES will include the body of functions, +# classes and enums directly into the documentation. +# The default value is: NO. + +INLINE_SOURCES = NO + +# Setting the STRIP_CODE_COMMENTS tag to YES will instruct doxygen to hide any +# special comment blocks from generated source code fragments. Normal C, C++ and +# Fortran comments will always remain visible. +# The default value is: YES. + +STRIP_CODE_COMMENTS = YES + +# If the REFERENCED_BY_RELATION tag is set to YES then for each documented +# function all documented functions referencing it will be listed. +# The default value is: NO. + +REFERENCED_BY_RELATION = NO + +# If the REFERENCES_RELATION tag is set to YES then for each documented function +# all documented entities called/used by that function will be listed. +# The default value is: NO. + +REFERENCES_RELATION = NO + +# If the REFERENCES_LINK_SOURCE tag is set to YES and SOURCE_BROWSER tag is set +# to YES then the hyperlinks from functions in REFERENCES_RELATION and +# REFERENCED_BY_RELATION lists will link to the source code. Otherwise they will +# link to the documentation. +# The default value is: YES. + +REFERENCES_LINK_SOURCE = YES + +# If SOURCE_TOOLTIPS is enabled (the default) then hovering a hyperlink in the +# source code will show a tooltip with additional information such as prototype, +# brief description and links to the definition and documentation. Since this +# will make the HTML file larger and loading of large files a bit slower, you +# can opt to disable this feature. +# The default value is: YES. +# This tag requires that the tag SOURCE_BROWSER is set to YES. + +SOURCE_TOOLTIPS = YES + +# If the USE_HTAGS tag is set to YES then the references to source code will +# point to the HTML generated by the htags(1) tool instead of doxygen built-in +# source browser. The htags tool is part of GNU's global source tagging system +# (see http://www.gnu.org/software/global/global.html). You will need version +# 4.8.6 or higher. +# +# To use it do the following: +# - Install the latest version of global +# - Enable SOURCE_BROWSER and USE_HTAGS in the config file +# - Make sure the INPUT points to the root of the source tree +# - Run doxygen as normal +# +# Doxygen will invoke htags (and that will in turn invoke gtags), so these +# tools must be available from the command line (i.e. in the search path). +# +# The result: instead of the source browser generated by doxygen, the links to +# source code will now point to the output of htags. +# The default value is: NO. +# This tag requires that the tag SOURCE_BROWSER is set to YES. + +USE_HTAGS = NO + +# If the VERBATIM_HEADERS tag is set the YES then doxygen will generate a +# verbatim copy of the header file for each class for which an include is +# specified. Set to NO to disable this. +# See also: Section \class. +# The default value is: YES. + +VERBATIM_HEADERS = YES + +#--------------------------------------------------------------------------- +# Configuration options related to the alphabetical class index +#--------------------------------------------------------------------------- + +# If the ALPHABETICAL_INDEX tag is set to YES, an alphabetical index of all +# compounds will be generated. Enable this if the project contains a lot of +# classes, structs, unions or interfaces. +# The default value is: YES. + +ALPHABETICAL_INDEX = YES + +# The COLS_IN_ALPHA_INDEX tag can be used to specify the number of columns in +# which the alphabetical index list will be split. +# Minimum value: 1, maximum value: 20, default value: 5. +# This tag requires that the tag ALPHABETICAL_INDEX is set to YES. + +COLS_IN_ALPHA_INDEX = 5 + +# In case all classes in a project start with a common prefix, all classes will +# be put under the same header in the alphabetical index. The IGNORE_PREFIX tag +# can be used to specify a prefix (or a list of prefixes) that should be ignored +# while generating the index headers. +# This tag requires that the tag ALPHABETICAL_INDEX is set to YES. + +IGNORE_PREFIX = + +#--------------------------------------------------------------------------- +# Configuration options related to the HTML output +#--------------------------------------------------------------------------- + +# If the GENERATE_HTML tag is set to YES, doxygen will generate HTML output +# The default value is: NO. + +GENERATE_HTML = YES + +# The HTML_OUTPUT tag is used to specify where the HTML docs will be put. If a +# relative path is entered the value of OUTPUT_DIRECTORY will be put in front of +# it. +# The default directory is: html. +# This tag requires that the tag GENERATE_HTML is set to YES. + +HTML_OUTPUT = html + +# The HTML_FILE_EXTENSION tag can be used to specify the file extension for each +# generated HTML page (for example: .htm, .php, .asp). +# The default value is: .html. +# This tag requires that the tag GENERATE_HTML is set to YES. + +HTML_FILE_EXTENSION = .html + +# The HTML_HEADER tag can be used to specify a user-defined HTML header file for +# each generated HTML page. If the tag is left blank doxygen will generate a +# standard header. +# +# To get valid HTML the header file that includes any scripts and style sheets +# that doxygen needs, which is dependent on the configuration options used (e.g. +# the setting GENERATE_TREEVIEW). It is highly recommended to start with a +# default header using +# doxygen -w html new_header.html new_footer.html new_stylesheet.css +# YourConfigFile +# and then modify the file new_header.html. See also section "Doxygen usage" +# for information on how to generate the default header that doxygen normally +# uses. +# Note: The header is subject to change so you typically have to regenerate the +# default header when upgrading to a newer version of doxygen. For a description +# of the possible markers and block names see the documentation. +# This tag requires that the tag GENERATE_HTML is set to YES. + +HTML_HEADER = +# HTML_HEADER = docs/doc_header.html + +# The HTML_FOOTER tag can be used to specify a user-defined HTML footer for each +# generated HTML page. If the tag is left blank doxygen will generate a standard +# footer. See HTML_HEADER for more information on how to generate a default +# footer and what special commands can be used inside the footer. See also +# section "Doxygen usage" for information on how to generate the default footer +# that doxygen normally uses. +# This tag requires that the tag GENERATE_HTML is set to YES. + +HTML_FOOTER = + +# The HTML_STYLESHEET tag can be used to specify a user-defined cascading style +# sheet that is used by each HTML page. It can be used to fine-tune the look of +# the HTML output. If left blank doxygen will generate a default style sheet. +# See also section "Doxygen usage" for information on how to generate the style +# sheet that doxygen normally uses. +# Note: It is recommended to use HTML_EXTRA_STYLESHEET instead of this tag, as +# it is more robust and this tag (HTML_STYLESHEET) will in the future become +# obsolete. +# This tag requires that the tag GENERATE_HTML is set to YES. + +HTML_STYLESHEET = + +# The HTML_EXTRA_STYLESHEET tag can be used to specify additional user-defined +# cascading style sheets that are included after the standard style sheets +# created by doxygen. Using this option one can overrule certain style aspects. +# This is preferred over using HTML_STYLESHEET since it does not replace the +# standard style sheet and is therefore more robust against future updates. +# Doxygen will copy the style sheet files to the output directory. +# Note: The order of the extra style sheet files is of importance (e.g. the last +# style sheet in the list overrules the setting of the previous ones in the +# list). For an example see the documentation. +# This tag requires that the tag GENERATE_HTML is set to YES. + +HTML_EXTRA_STYLESHEET = docs/custom.css + +# The HTML_EXTRA_FILES tag can be used to specify one or more extra images or +# other source files which should be copied to the HTML output directory. Note +# that these files will be copied to the base HTML output directory. Use the +# $relpath^ marker in the HTML_HEADER and/or HTML_FOOTER files to load these +# files. In the HTML_STYLESHEET file, use the file name only. Also note that the +# files will be copied as-is; there are no commands or markers available. +# This tag requires that the tag GENERATE_HTML is set to YES. + +# HTML_EXTRA_FILES = docs/nunc-stans-intro.png \ +# docs/nunc-stans-job-states.png + +# The HTML_COLORSTYLE_HUE tag controls the color of the HTML output. Doxygen +# will adjust the colors in the style sheet and background images according to +# this color. Hue is specified as an angle on a colorwheel, see +# http://en.wikipedia.org/wiki/Hue for more information. For instance the value +# 0 represents red, 60 is yellow, 120 is green, 180 is cyan, 240 is blue, 300 +# purple, and 360 is red again. +# Minimum value: 0, maximum value: 359, default value: 220. +# This tag requires that the tag GENERATE_HTML is set to YES. + +HTML_COLORSTYLE_HUE = 195 + +# The HTML_COLORSTYLE_SAT tag controls the purity (or saturation) of the colors +# in the HTML output. For a value of 0 the output will use grayscales only. A +# value of 255 will produce the most vivid colors. +# Minimum value: 0, maximum value: 255, default value: 100. +# This tag requires that the tag GENERATE_HTML is set to YES. + +HTML_COLORSTYLE_SAT = 96 + +# The HTML_COLORSTYLE_GAMMA tag controls the gamma correction applied to the +# luminance component of the colors in the HTML output. Values below 100 +# gradually make the output lighter, whereas values above 100 make the output +# darker. The value divided by 100 is the actual gamma applied, so 80 represents +# a gamma of 0.8, The value 220 represents a gamma of 2.2, and 100 does not +# change the gamma. +# Minimum value: 40, maximum value: 240, default value: 80. +# This tag requires that the tag GENERATE_HTML is set to YES. + +HTML_COLORSTYLE_GAMMA = 80 + +# If the HTML_TIMESTAMP tag is set to YES then the footer of each generated HTML +# page will contain the date and time when the page was generated. Setting this +# to NO can help when comparing the output of multiple runs. +# The default value is: NO. +# This tag requires that the tag GENERATE_HTML is set to YES. + +HTML_TIMESTAMP = NO + +# If the HTML_DYNAMIC_SECTIONS tag is set to YES then the generated HTML +# documentation will contain sections that can be hidden and shown after the +# page has loaded. +# The default value is: NO. +# This tag requires that the tag GENERATE_HTML is set to YES. + +HTML_DYNAMIC_SECTIONS = NO + +# With HTML_INDEX_NUM_ENTRIES one can control the preferred number of entries +# shown in the various tree structured indices initially; the user can expand +# and collapse entries dynamically later on. Doxygen will expand the tree to +# such a level that at most the specified number of entries are visible (unless +# a fully collapsed tree already exceeds this amount). So setting the number of +# entries 1 will produce a full collapsed tree by default. 0 is a special value +# representing an infinite number of entries and will result in a full expanded +# tree by default. +# Minimum value: 0, maximum value: 9999, default value: 100. +# This tag requires that the tag GENERATE_HTML is set to YES. + +HTML_INDEX_NUM_ENTRIES = 100 + +# If the GENERATE_DOCSET tag is set to YES, additional index files will be +# generated that can be used as input for Apple's Xcode 3 integrated development +# environment (see: http://developer.apple.com/tools/xcode/), introduced with +# OSX 10.5 (Leopard). To create a documentation set, doxygen will generate a +# Makefile in the HTML output directory. Running make will produce the docset in +# that directory and running make install will install the docset in +# ~/Library/Developer/Shared/Documentation/DocSets so that Xcode will find it at +# startup. See http://developer.apple.com/tools/creatingdocsetswithdoxygen.html +# for more information. +# The default value is: NO. +# This tag requires that the tag GENERATE_HTML is set to YES. + +GENERATE_DOCSET = NO + +# This tag determines the name of the docset feed. A documentation feed provides +# an umbrella under which multiple documentation sets from a single provider +# (such as a company or product suite) can be grouped. +# The default value is: Doxygen generated docs. +# This tag requires that the tag GENERATE_DOCSET is set to YES. + +DOCSET_FEEDNAME = "Doxygen generated docs" + +# This tag specifies a string that should uniquely identify the documentation +# set bundle. This should be a reverse domain-name style string, e.g. +# com.mycompany.MyDocSet. Doxygen will append .docset to the name. +# The default value is: org.doxygen.Project. +# This tag requires that the tag GENERATE_DOCSET is set to YES. + +DOCSET_BUNDLE_ID = org.doxygen.Project + +# The DOCSET_PUBLISHER_ID tag specifies a string that should uniquely identify +# the documentation publisher. This should be a reverse domain-name style +# string, e.g. com.mycompany.MyDocSet.documentation. +# The default value is: org.doxygen.Publisher. +# This tag requires that the tag GENERATE_DOCSET is set to YES. + +DOCSET_PUBLISHER_ID = org.doxygen.Publisher + +# The DOCSET_PUBLISHER_NAME tag identifies the documentation publisher. +# The default value is: Publisher. +# This tag requires that the tag GENERATE_DOCSET is set to YES. + +DOCSET_PUBLISHER_NAME = Publisher + +# If the GENERATE_HTMLHELP tag is set to YES then doxygen generates three +# additional HTML index files: index.hhp, index.hhc, and index.hhk. The +# index.hhp is a project file that can be read by Microsoft's HTML Help Workshop +# (see: http://www.microsoft.com/en-us/download/details.aspx?id=21138) on +# Windows. +# +# The HTML Help Workshop contains a compiler that can convert all HTML output +# generated by doxygen into a single compiled HTML file (.chm). Compiled HTML +# files are now used as the Windows 98 help format, and will replace the old +# Windows help format (.hlp) on all Windows platforms in the future. Compressed +# HTML files also contain an index, a table of contents, and you can search for +# words in the documentation. The HTML workshop also contains a viewer for +# compressed HTML files. +# The default value is: NO. +# This tag requires that the tag GENERATE_HTML is set to YES. + +GENERATE_HTMLHELP = NO + +# The CHM_FILE tag can be used to specify the file name of the resulting .chm +# file. You can add a path in front of the file if the result should not be +# written to the html output directory. +# This tag requires that the tag GENERATE_HTMLHELP is set to YES. + +CHM_FILE = + +# The HHC_LOCATION tag can be used to specify the location (absolute path +# including file name) of the HTML help compiler (hhc.exe). If non-empty, +# doxygen will try to run the HTML help compiler on the generated index.hhp. +# The file has to be specified with full path. +# This tag requires that the tag GENERATE_HTMLHELP is set to YES. + +HHC_LOCATION = + +# The GENERATE_CHI flag controls if a separate .chi index file is generated +# (YES) or that it should be included in the master .chm file (NO). +# The default value is: NO. +# This tag requires that the tag GENERATE_HTMLHELP is set to YES. + +GENERATE_CHI = NO + +# The CHM_INDEX_ENCODING is used to encode HtmlHelp index (hhk), content (hhc) +# and project file content. +# This tag requires that the tag GENERATE_HTMLHELP is set to YES. + +CHM_INDEX_ENCODING = + +# The BINARY_TOC flag controls whether a binary table of contents is generated +# (YES) or a normal table of contents (NO) in the .chm file. Furthermore it +# enables the Previous and Next buttons. +# The default value is: NO. +# This tag requires that the tag GENERATE_HTMLHELP is set to YES. + +BINARY_TOC = NO + +# The TOC_EXPAND flag can be set to YES to add extra items for group members to +# the table of contents of the HTML help documentation and to the tree view. +# The default value is: NO. +# This tag requires that the tag GENERATE_HTMLHELP is set to YES. + +TOC_EXPAND = NO + +# If the GENERATE_QHP tag is set to YES and both QHP_NAMESPACE and +# QHP_VIRTUAL_FOLDER are set, an additional index file will be generated that +# can be used as input for Qt's qhelpgenerator to generate a Qt Compressed Help +# (.qch) of the generated HTML documentation. +# The default value is: NO. +# This tag requires that the tag GENERATE_HTML is set to YES. + +GENERATE_QHP = NO + +# If the QHG_LOCATION tag is specified, the QCH_FILE tag can be used to specify +# the file name of the resulting .qch file. The path specified is relative to +# the HTML output folder. +# This tag requires that the tag GENERATE_QHP is set to YES. + +QCH_FILE = + +# The QHP_NAMESPACE tag specifies the namespace to use when generating Qt Help +# Project output. For more information please see Qt Help Project / Namespace +# (see: http://qt-project.org/doc/qt-4.8/qthelpproject.html#namespace). +# The default value is: org.doxygen.Project. +# This tag requires that the tag GENERATE_QHP is set to YES. + +QHP_NAMESPACE = org.doxygen.Project + +# The QHP_VIRTUAL_FOLDER tag specifies the namespace to use when generating Qt +# Help Project output. For more information please see Qt Help Project / Virtual +# Folders (see: http://qt-project.org/doc/qt-4.8/qthelpproject.html#virtual- +# folders). +# The default value is: doc. +# This tag requires that the tag GENERATE_QHP is set to YES. + +QHP_VIRTUAL_FOLDER = doc + +# If the QHP_CUST_FILTER_NAME tag is set, it specifies the name of a custom +# filter to add. For more information please see Qt Help Project / Custom +# Filters (see: http://qt-project.org/doc/qt-4.8/qthelpproject.html#custom- +# filters). +# This tag requires that the tag GENERATE_QHP is set to YES. + +QHP_CUST_FILTER_NAME = + +# The QHP_CUST_FILTER_ATTRS tag specifies the list of the attributes of the +# custom filter to add. For more information please see Qt Help Project / Custom +# Filters (see: http://qt-project.org/doc/qt-4.8/qthelpproject.html#custom- +# filters). +# This tag requires that the tag GENERATE_QHP is set to YES. + +QHP_CUST_FILTER_ATTRS = + +# The QHP_SECT_FILTER_ATTRS tag specifies the list of the attributes this +# project's filter section matches. Qt Help Project / Filter Attributes (see: +# http://qt-project.org/doc/qt-4.8/qthelpproject.html#filter-attributes). +# This tag requires that the tag GENERATE_QHP is set to YES. + +QHP_SECT_FILTER_ATTRS = + +# The QHG_LOCATION tag can be used to specify the location of Qt's +# qhelpgenerator. If non-empty doxygen will try to run qhelpgenerator on the +# generated .qhp file. +# This tag requires that the tag GENERATE_QHP is set to YES. + +QHG_LOCATION = + +# If the GENERATE_ECLIPSEHELP tag is set to YES, additional index files will be +# generated, together with the HTML files, they form an Eclipse help plugin. To +# install this plugin and make it available under the help contents menu in +# Eclipse, the contents of the directory containing the HTML and XML files needs +# to be copied into the plugins directory of eclipse. The name of the directory +# within the plugins directory should be the same as the ECLIPSE_DOC_ID value. +# After copying Eclipse needs to be restarted before the help appears. +# The default value is: NO. +# This tag requires that the tag GENERATE_HTML is set to YES. + +GENERATE_ECLIPSEHELP = NO + +# A unique identifier for the Eclipse help plugin. When installing the plugin +# the directory name containing the HTML and XML files should also have this +# name. Each documentation set should have its own identifier. +# The default value is: org.doxygen.Project. +# This tag requires that the tag GENERATE_ECLIPSEHELP is set to YES. + +ECLIPSE_DOC_ID = org.doxygen.Project + +# If you want full control over the layout of the generated HTML pages it might +# be necessary to disable the index and replace it with your own. The +# DISABLE_INDEX tag can be used to turn on/off the condensed index (tabs) at top +# of each HTML page. A value of NO enables the index and the value YES disables +# it. Since the tabs in the index contain the same information as the navigation +# tree, you can set this option to YES if you also set GENERATE_TREEVIEW to YES. +# The default value is: NO. +# This tag requires that the tag GENERATE_HTML is set to YES. + +DISABLE_INDEX = NO + +# The GENERATE_TREEVIEW tag is used to specify whether a tree-like index +# structure should be generated to display hierarchical information. If the tag +# value is set to YES, a side panel will be generated containing a tree-like +# index structure (just like the one that is generated for HTML Help). For this +# to work a browser that supports JavaScript, DHTML, CSS and frames is required +# (i.e. any modern browser). Windows users are probably better off using the +# HTML help feature. Via custom style sheets (see HTML_EXTRA_STYLESHEET) one can +# further fine-tune the look of the index. As an example, the default style +# sheet generated by doxygen has an example that shows how to put an image at +# the root of the tree instead of the PROJECT_NAME. Since the tree basically has +# the same information as the tab index, you could consider setting +# DISABLE_INDEX to YES when enabling this option. +# The default value is: NO. +# This tag requires that the tag GENERATE_HTML is set to YES. + +GENERATE_TREEVIEW = YES + +# The ENUM_VALUES_PER_LINE tag can be used to set the number of enum values that +# doxygen will group on one line in the generated HTML documentation. +# +# Note that a value of 0 will completely suppress the enum values from appearing +# in the overview section. +# Minimum value: 0, maximum value: 20, default value: 4. +# This tag requires that the tag GENERATE_HTML is set to YES. + +ENUM_VALUES_PER_LINE = 4 + +# If the treeview is enabled (see GENERATE_TREEVIEW) then this tag can be used +# to set the initial width (in pixels) of the frame in which the tree is shown. +# Minimum value: 0, maximum value: 1500, default value: 250. +# This tag requires that the tag GENERATE_HTML is set to YES. + +TREEVIEW_WIDTH = 250 + +# If the EXT_LINKS_IN_WINDOW option is set to YES, doxygen will open links to +# external symbols imported via tag files in a separate window. +# The default value is: NO. +# This tag requires that the tag GENERATE_HTML is set to YES. + +EXT_LINKS_IN_WINDOW = NO + +# Use this tag to change the font size of LaTeX formulas included as images in +# the HTML documentation. When you change the font size after a successful +# doxygen run you need to manually remove any form_*.png images from the HTML +# output directory to force them to be regenerated. +# Minimum value: 8, maximum value: 50, default value: 10. +# This tag requires that the tag GENERATE_HTML is set to YES. + +FORMULA_FONTSIZE = 10 + +# Use the FORMULA_TRANPARENT tag to determine whether or not the images +# generated for formulas are transparent PNGs. Transparent PNGs are not +# supported properly for IE 6.0, but are supported on all modern browsers. +# +# Note that when changing this option you need to delete any form_*.png files in +# the HTML output directory before the changes have effect. +# The default value is: YES. +# This tag requires that the tag GENERATE_HTML is set to YES. + +FORMULA_TRANSPARENT = YES + +# Enable the USE_MATHJAX option to render LaTeX formulas using MathJax (see +# http://www.mathjax.org) which uses client side Javascript for the rendering +# instead of using pre-rendered bitmaps. Use this if you do not have LaTeX +# installed or if you want to formulas look prettier in the HTML output. When +# enabled you may also need to install MathJax separately and configure the path +# to it using the MATHJAX_RELPATH option. +# The default value is: NO. +# This tag requires that the tag GENERATE_HTML is set to YES. + +USE_MATHJAX = NO + +# When MathJax is enabled you can set the default output format to be used for +# the MathJax output. See the MathJax site (see: +# http://docs.mathjax.org/en/latest/output.html) for more details. +# Possible values are: HTML-CSS (which is slower, but has the best +# compatibility), NativeMML (i.e. MathML) and SVG. +# The default value is: HTML-CSS. +# This tag requires that the tag USE_MATHJAX is set to YES. + +MATHJAX_FORMAT = HTML-CSS + +# When MathJax is enabled you need to specify the location relative to the HTML +# output directory using the MATHJAX_RELPATH option. The destination directory +# should contain the MathJax.js script. For instance, if the mathjax directory +# is located at the same level as the HTML output directory, then +# MATHJAX_RELPATH should be ../mathjax. The default value points to the MathJax +# Content Delivery Network so you can quickly see the result without installing +# MathJax. However, it is strongly recommended to install a local copy of +# MathJax from http://www.mathjax.org before deployment. +# The default value is: http://cdn.mathjax.org/mathjax/latest. +# This tag requires that the tag USE_MATHJAX is set to YES. + +MATHJAX_RELPATH = http://cdn.mathjax.org/mathjax/latest + +# The MATHJAX_EXTENSIONS tag can be used to specify one or more MathJax +# extension names that should be enabled during MathJax rendering. For example +# MATHJAX_EXTENSIONS = TeX/AMSmath TeX/AMSsymbols +# This tag requires that the tag USE_MATHJAX is set to YES. + +MATHJAX_EXTENSIONS = + +# The MATHJAX_CODEFILE tag can be used to specify a file with javascript pieces +# of code that will be used on startup of the MathJax code. See the MathJax site +# (see: http://docs.mathjax.org/en/latest/output.html) for more details. For an +# example see the documentation. +# This tag requires that the tag USE_MATHJAX is set to YES. + +MATHJAX_CODEFILE = + +# When the SEARCHENGINE tag is enabled doxygen will generate a search box for +# the HTML output. The underlying search engine uses javascript and DHTML and +# should work on any modern browser. Note that when using HTML help +# (GENERATE_HTMLHELP), Qt help (GENERATE_QHP), or docsets (GENERATE_DOCSET) +# there is already a search function so this one should typically be disabled. +# For large projects the javascript based search engine can be slow, then +# enabling SERVER_BASED_SEARCH may provide a better solution. It is possible to +# search using the keyboard; to jump to the search box use + S +# (what the is depends on the OS and browser, but it is typically +# , /