[Pkg-fedora-ds-maintainers] 389-ds-base: Changes to 'upstream'

Timo Aaltonen tjaalton at moszumanska.debian.org
Tue Dec 15 07:09:49 UTC 2015


Rebased ref, commits from common ancestor:
commit e621e1f185eb3b5a2f770d11a649605b278413ba
Author: Noriko Hosoi <nhosoi at redhat.com>
Date:   Wed Nov 18 15:56:23 2015 -0800

    bump version to 1.3.4.5

diff --git a/VERSION.sh b/VERSION.sh
index 79d7624..320d597 100644
--- a/VERSION.sh
+++ b/VERSION.sh
@@ -10,7 +10,7 @@ vendor="389 Project"
 # PACKAGE_VERSION is constructed from these
 VERSION_MAJOR=1
 VERSION_MINOR=3
-VERSION_MAINT=4.4
+VERSION_MAINT=4.5
 # if this is a PRERELEASE, set VERSION_PREREL
 # otherwise, comment it out
 # be sure to include the dot prefix in the prerel

commit 36245abd78f7abfed8219a5ac4a4cf50c1c0237c
Author: Noriko Hosoi <nhosoi at redhat.com>
Date:   Wed Nov 18 11:44:35 2015 -0800

    Ticket #48338 - SimplePagedResults -- abandon could happen between the abandon check and sending results
    
    Description: commit 390b8bd9076e8976facc0858e60985d6b4fac05c introduced
    a self deadlock (see also bz1282607: 389-ds-base-1.2.11.15-67.el6_7 hang)
    
    First phase of the following approach:
      Fix design by Ludwig Krispenz and Rich Megginson (Thanks!)
      Investigate the connection params used in the pblock access one by one and.
      - for fields not used, either remove the access or just leave it as is
      - With a big ASSERT to flag cases if the field is ever used, and a plan to
        deprecate and remove the field.
      - for fields with atomic access, like c_isreplication_session remove the mutex
      - for filelds requiring copying, define them directly in the pblock and when
        the pblock is created, populate them from the connection, the pblock access
        would no longer need the c_mutex.
    Removing PR_Lock(c_mutex) from slapi_pblock_get(SLAPI_CONN_CLIENTNETADDR) since
    acquiring the lock is not necessary for the atomic reads.  This change solves
    the self deadlock.
    
    https://fedorahosted.org/389/ticket/48338#comment:11
    
    Reviewed by nkinder at redhat.com and mreynolds at redhat.com (Thank you, Nathan and Mark!)
    
    (cherry picked from commit 79ca67d1fc5d50d8a9ae6b686b9564f3960f8592)

diff --git a/ldap/servers/slapd/pblock.c b/ldap/servers/slapd/pblock.c
index bf57a33..f2017be 100644
--- a/ldap/servers/slapd/pblock.c
+++ b/ldap/servers/slapd/pblock.c
@@ -223,14 +223,12 @@ slapi_pblock_get( Slapi_PBlock *pblock, int arg, void *value )
 			memset( value, 0, sizeof( PRNetAddr ));
 			break;
 		}
-		PR_Lock( pblock->pb_conn->c_mutex );
+		/* For fields with atomic access, remove the PR_Lock(c_mutex) */
 		if ( pblock->pb_conn->cin_addr == NULL ) {
 			memset( value, 0, sizeof( PRNetAddr ));
 		} else {
-			(*(PRNetAddr *)value) =
-			    *(pblock->pb_conn->cin_addr);
+			(*(PRNetAddr *)value) = *(pblock->pb_conn->cin_addr);
 		}
-		PR_Unlock( pblock->pb_conn->c_mutex );
 		break;
 	case SLAPI_CONN_SERVERNETADDR:
 		if (pblock->pb_conn == NULL)

commit c0d35f7e107d80a3fb173dfbce001243fe78c254
Author: Noriko Hosoi <nhosoi at redhat.com>
Date:   Mon Nov 16 11:32:27 2015 -0800

    Ticket #48316 - Perl-5.20.3-328: Use of literal control characters in variable names is deprecated
    
    Description: "$^O" issues a warning "Use of literal control characters in
    variable names is deprecated at /usr/lib64/dirsrv/perl/DSCreate.pm line 839."
    
    This patch replaces "$^O" with "$Config{'osname'}".
    
    https://fedorahosted.org/389/ticket/48316
    
    Reviewed by rmeggins at redhat.com and wibrown at redhat.com (Thank you, Rich and William!!)
    
    (cherry picked from commit 65362db30f55a41de1df97c8bd92047cac52ce83)

diff --git a/ldap/admin/src/scripts/DSCreate.pm.in b/ldap/admin/src/scripts/DSCreate.pm.in
index 3ce5a73..50498fe 100644
--- a/ldap/admin/src/scripts/DSCreate.pm.in
+++ b/ldap/admin/src/scripts/DSCreate.pm.in
@@ -18,6 +18,7 @@ package DSCreate;
 use DSUtil;
 use Inf;
 use FileConn;
+use Config;
 
 use Sys::Hostname;
 # tempfiles
@@ -836,7 +837,7 @@ sub setDefaults {
     }
 
     if (!defined($inf->{slapd}->{sasl_path})) {
-        if ($ ne "linux") {
+        if ($Config{'osname'} ne "linux") {
             $inf->{slapd}->{sasl_path} = "$inf->{General}->{prefix}@libdir@/sasl2";
         }
     }

commit 9cf628467ad33a46c6c4cb14c6535881e66bd122
Author: Noriko Hosoi <nhosoi at redhat.com>
Date:   Mon Nov 16 11:24:03 2015 -0800

    Ticket #48348 - Running /usr/sbin/setup-ds.pl fails with Can't locate bigint.pm, plus two warnings
    
    Description: bigint.pm is deprecated and no longer packaged in perl-5.22.0
    on FC23.  This patch replaces bigint with more basic Math::BigInt.
    
    https://fedorahosted.org/389/ticket/48348
    
    Reviewed by rmeggins at redhat.com and wibrown at redhat.com (Thank you, Rich and William!!)
    
    (cherry picked from commit dfc9ad00a6bf0f8b86c4c24a01e06e3434a7ca41)

diff --git a/ldap/admin/src/scripts/50fixNsState.pl b/ldap/admin/src/scripts/50fixNsState.pl
index b331e9c..bd20708 100644
--- a/ldap/admin/src/scripts/50fixNsState.pl
+++ b/ldap/admin/src/scripts/50fixNsState.pl
@@ -4,7 +4,7 @@ use Mozilla::LDAP::Utils qw(normalizeDN);
 use Mozilla::LDAP::API qw(:constant ldap_url_parse ldap_explode_dn);
 use DSUtil qw(debug);
 use Config;
-use bigint;
+use Math::BigInt;
 
 # # Determine the endianness of your system
 my $packfmt32 = "VVA6vCx3"; # must be 20 bytes

commit a4c0a9eeba031b9304d63ca05d8b9fab9ebca1b2
Author: Noriko Hosoi <nhosoi at redhat.com>
Date:   Thu Nov 5 13:08:56 2015 -0800

    Ticket #48339 - Share nsslapd-threadnumber in the case nunc-stans is enabled, as well.
    
    Description:  When nunc-stans is enabled, instead of getting the
    thread number from the environment variable MAX_THREADS, use the
    value of config parameter nsslapd-threadnumber.
    
    https://fedorahosted.org/389/ticket/48339
    
    Reviewed by rmeggins at redhat.com (Thank you, Rich!!)
    
    (cherry picked from commit ab8ed9a5ebb0d15b55d7525ed1d5dbeebd8c7563)

diff --git a/ldap/servers/slapd/daemon.c b/ldap/servers/slapd/daemon.c
index bd3bfb2..5d70647 100644
--- a/ldap/servers/slapd/daemon.c
+++ b/ldap/servers/slapd/daemon.c
@@ -1199,10 +1199,7 @@ void slapd_daemon( daemon_ports_t *ports )
 #ifdef ENABLE_NUNC_STANS
 	if (enable_nunc_stans && !g_get_shutdown()) {
 		int ii;
-		PRInt32 maxthreads = 3;
-		if (getenv("MAX_THREADS")) {
-			maxthreads = atoi(getenv("MAX_THREADS"));
-		}
+		PRInt32 maxthreads = (PRInt32)config_get_threadnumber();
 		/* Set the nunc-stans thread pool config */
 		ns_thrpool_config_init(&tp_config);
 

commit b03987689c3a2477630e2a3452e64cc7759ba5f3
Author: William Brown <firstyear at redhat.com>
Date:   Fri Nov 6 14:56:44 2015 +1000

    Ticket 48311  -nunc-stans: Attempt to release connection that is not acquired
    https://fedorahosted.org/389/ticket/48311
    
    Bug Description: DS with nunc stans enabled produces lots of messages like
    [13/Oct/2015:11:29:24 -0400] connection - conn=98 fd=161 Attempt to release
    connection that is not acquired
    
    FixDescription: From the original patch:
     * Do not call connection_acquire_nolock() inside a PR_ASSERT call.
     * Also changed other PR_ASSERTs to only be called if DEBUG is set
    
    This additionally guarantees the return codes of these functions since we have
    removed the PR_ASSERT that previously wrapped these function calls. If these
    assertions fail, we log to the error log in all cases.
    
    Author: wibrown
    
    Review by: mreynolds, nhosoi (Thanks!)
    
    (cherry picked from commit 49aaf98732d1e16dde3edb81272de8203aded21c)

diff --git a/ldap/servers/slapd/daemon.c b/ldap/servers/slapd/daemon.c
index 82099bc..bd3bfb2 100644
--- a/ldap/servers/slapd/daemon.c
+++ b/ldap/servers/slapd/daemon.c
@@ -1839,7 +1839,12 @@ ns_handle_closure(struct ns_job_t *job)
 #ifdef DEBUG
 	PR_ASSERT(0 == NS_JOB_IS_THREAD(ns_job_get_type(job)));
 #else
-	NS_JOB_IS_THREAD(ns_job_get_type(job));
+    /* This doesn't actually confirm it's in the event loop thread, but it's a start */
+	if (NS_JOB_IS_THREAD(ns_job_get_type(job)) != 0) {
+		LDAPDebug2Args(LDAP_DEBUG_ANY, "ns_handle_closure: Attempt to close outside of event loop thread %" NSPRIu64 " for fd=%d\n",
+			c->c_connid, c->c_sd);
+		return;
+	}
 #endif
 	PR_Lock(c->c_mutex);
 	connection_release_nolock_ext(c, 1); /* release ref acquired for event framework */
@@ -1896,7 +1901,14 @@ ns_connection_post_io_or_closing(Connection *conn)
 #ifdef DEBUG
 		PR_ASSERT(0 == connection_acquire_nolock(conn));
 #else
-		connection_acquire_nolock(conn); /* event framework now has a reference */
+		if (connection_acquire_nolock(conn) != 0) { /* event framework now has a reference */
+			/* 
+			 * This has already been logged as an error in ./ldap/servers/slapd/connection.c
+			 * The error occurs when we get a connection in a closing state.
+			 * For now we return, but there is probably a better way to handle the error case.
+			 */
+			return;
+		}
 #endif
 		ns_add_io_timeout_job(conn->c_tp, conn->c_prfd, &tv,
 				      NS_JOB_READ|NS_JOB_PRESERVE_FD,
@@ -1922,7 +1934,12 @@ ns_handle_pr_read_ready(struct ns_job_t *job)
 #ifdef DEBUG
 	PR_ASSERT(0 == NS_JOB_IS_THREAD(ns_job_get_type(job)));
 #else
-	NS_JOB_IS_THREAD(ns_job_get_type(job));
+    /* This doesn't actually confirm it's in the event loop thread, but it's a start */
+	if (NS_JOB_IS_THREAD(ns_job_get_type(job)) != 0) {
+		LDAPDebug2Args(LDAP_DEBUG_ANY, "ns_handle_pr_read_ready: Attempt to handle read ready outside of event loop thread %" NSPRIu64 " for fd=%d\n",
+			c->c_connid, c->c_sd);
+		return;
+	}
 #endif
 
 	PR_Lock(c->c_mutex);

commit d192435486cf6f31f3684828490c610c9521b80e
Author: Mark Reynolds <mreynolds at redhat.com>
Date:   Tue Nov 10 13:54:30 2015 -0500

    Ticket 48325 - Add lib389 test script
    
    Description: Add test script to test replication promotion
    
    https://fedorahosted.org/389/ticket/48325
    
    Reviewed by: wibrown(Thanks!)
    
    (cherry picked from commit a534583fdc7aaaeda11d87fdaf09cfaa603fb48f)

diff --git a/dirsrvtests/tickets/ticket48325_test.py b/dirsrvtests/tickets/ticket48325_test.py
new file mode 100644
index 0000000..3505d1a
--- /dev/null
+++ b/dirsrvtests/tickets/ticket48325_test.py
@@ -0,0 +1,270 @@
+import os
+import sys
+import time
+import ldap
+import logging
+import pytest
+from lib389 import DirSrv, Entry, tools, tasks
+from lib389.tools import DirSrvTools
+from lib389._constants import *
+from lib389.properties import *
+from lib389.tasks import *
+from lib389.utils import *
+
+logging.getLogger(__name__).setLevel(logging.DEBUG)
+log = logging.getLogger(__name__)
+
+installation1_prefix = None
+
+
+class TopologyReplication(object):
+    def __init__(self, master1, hub1, consumer1):
+        master1.open()
+        self.master1 = master1
+        hub1.open()
+        self.hub1 = hub1
+        consumer1.open()
+        self.consumer1 = consumer1
+
+
+ at pytest.fixture(scope="module")
+def topology(request):
+    global installation1_prefix
+    if installation1_prefix:
+        args_instance[SER_DEPLOYED_DIR] = installation1_prefix
+
+    # Creating master 1...
+    master1 = DirSrv(verbose=False)
+    args_instance[SER_HOST] = HOST_MASTER_1
+    args_instance[SER_PORT] = PORT_MASTER_1
+    args_instance[SER_SERVERID_PROP] = SERVERID_MASTER_1
+    args_instance[SER_CREATION_SUFFIX] = DEFAULT_SUFFIX
+    args_master = args_instance.copy()
+    master1.allocate(args_master)
+    instance_master1 = master1.exists()
+    if instance_master1:
+        master1.delete()
+    master1.create()
+    master1.open()
+    master1.replica.enableReplication(suffix=SUFFIX, role=REPLICAROLE_MASTER,
+                                      replicaId=REPLICAID_MASTER_1)
+
+    # Creating hub 1...
+    hub1 = DirSrv(verbose=False)
+    args_instance[SER_HOST] = HOST_HUB_1
+    args_instance[SER_PORT] = PORT_HUB_1
+    args_instance[SER_SERVERID_PROP] = SERVERID_HUB_1
+    args_instance[SER_CREATION_SUFFIX] = DEFAULT_SUFFIX
+    args_hub = args_instance.copy()
+    hub1.allocate(args_hub)
+    instance_hub1 = hub1.exists()
+    if instance_hub1:
+        hub1.delete()
+    hub1.create()
+    hub1.open()
+    hub1.replica.enableReplication(suffix=SUFFIX, role=REPLICAROLE_HUB,
+                                   replicaId=REPLICAID_HUB_1)
+
+    # Creating consumer 1...
+    consumer1 = DirSrv(verbose=False)
+    args_instance[SER_HOST] = HOST_CONSUMER_1
+    args_instance[SER_PORT] = PORT_CONSUMER_1
+    args_instance[SER_SERVERID_PROP] = SERVERID_CONSUMER_1
+    args_instance[SER_CREATION_SUFFIX] = DEFAULT_SUFFIX
+    args_consumer = args_instance.copy()
+    consumer1.allocate(args_consumer)
+    instance_consumer1 = consumer1.exists()
+    if instance_consumer1:
+        consumer1.delete()
+    consumer1.create()
+    consumer1.open()
+    consumer1.changelog.create()
+    consumer1.replica.enableReplication(suffix=SUFFIX,
+                                        role=REPLICAROLE_CONSUMER,
+                                        replicaId=CONSUMER_REPLICAID)
+
+    #
+    # Create all the agreements
+    #
+    # Creating agreement from master 1 to hub 1
+    properties = {RA_NAME: r'meTo_$host:$port',
+                  RA_BINDDN: defaultProperties[REPLICATION_BIND_DN],
+                  RA_BINDPW: defaultProperties[REPLICATION_BIND_PW],
+                  RA_METHOD: defaultProperties[REPLICATION_BIND_METHOD],
+                  RA_TRANSPORT_PROT: defaultProperties[REPLICATION_TRANSPORT]}
+    m1_h1_agmt = master1.agreement.create(suffix=SUFFIX, host=hub1.host,
+                                          port=hub1.port,
+                                          properties=properties)
+    if not m1_h1_agmt:
+        log.fatal("Fail to create a master -> hub replica agreement")
+        sys.exit(1)
+    log.debug("%s created" % m1_h1_agmt)
+
+    # Creating agreement from hub 1 to consumer 1
+    properties = {RA_NAME: r'meTo_$host:$port',
+                  RA_BINDDN: defaultProperties[REPLICATION_BIND_DN],
+                  RA_BINDPW: defaultProperties[REPLICATION_BIND_PW],
+                  RA_METHOD: defaultProperties[REPLICATION_BIND_METHOD],
+                  RA_TRANSPORT_PROT: defaultProperties[REPLICATION_TRANSPORT]}
+    h1_c1_agmt = hub1.agreement.create(suffix=SUFFIX, host=consumer1.host,
+                                       port=consumer1.port,
+                                       properties=properties)
+    if not h1_c1_agmt:
+        log.fatal("Fail to create a hub -> consumer replica agreement")
+        sys.exit(1)
+    log.debug("%s created" % h1_c1_agmt)
+
+    # Allow the replicas to get situated with the new agreements...
+    time.sleep(5)
+
+    #
+    # Initialize all the agreements
+    #
+    master1.agreement.init(SUFFIX, HOST_HUB_1, PORT_HUB_1)
+    master1.waitForReplInit(m1_h1_agmt)
+    hub1.agreement.init(SUFFIX, HOST_CONSUMER_1, PORT_CONSUMER_1)
+    hub1.waitForReplInit(h1_c1_agmt)
+
+    # Check replication is working...
+    if master1.testReplication(DEFAULT_SUFFIX, consumer1):
+        log.info('Replication is working.')
+    else:
+        log.fatal('Replication is not working.')
+        assert False
+
+    # Delete each instance in the end
+    def fin():
+        master1.delete()
+        hub1.delete()
+        consumer1.delete()
+        pass
+
+    request.addfinalizer(fin)
+
+    # Clear out the tmp dir
+    master1.clearTmpDir(__file__)
+
+    return TopologyReplication(master1, hub1, consumer1)
+
+
+def checkFirstElement(ds, rid):
+    """
+    Return True if the first RUV element is for the specified rid
+    """
+    try:
+        entry = ds.search_s(DEFAULT_SUFFIX,
+                            ldap.SCOPE_SUBTREE,
+                            REPLICA_RUV_FILTER,
+                            ['nsds50ruv'])
+        assert entry
+        entry = entry[0]
+    except ldap.LDAPError as e:
+        log.fatal('Failed to retrieve RUV entry: %s' % str(e))
+        assert False
+
+    ruv_elements = entry.getValues('nsds50ruv')
+    if ('replica %s ' % rid) in ruv_elements[1]:
+        return True
+    else:
+        return False
+
+
+def test_ticket48325(topology):
+    """
+    Test that the RUV element order is correctly maintained when promoting
+    a hub or consumer.
+    """
+
+    #
+    # Promote consumer to master
+    #
+    try:
+        DN = topology.consumer1.replica._get_mt_entry(DEFAULT_SUFFIX)
+        topology.consumer1.modify_s(DN, [(ldap.MOD_REPLACE,
+                                          'nsDS5ReplicaType',
+                                          '3'),
+                                         (ldap.MOD_REPLACE,
+                                          'nsDS5ReplicaID',
+                                          '1234'),
+                                         (ldap.MOD_REPLACE,
+                                          'nsDS5Flags',
+                                          '1')])
+    except ldap.LDAPError as e:
+        log.fatal('Failed to promote consuemr to master: error %s' % str(e))
+        assert False
+    time.sleep(1)
+
+    #
+    # Check ruv has been reordered
+    #
+    if not checkFirstElement(topology.consumer1, '1234'):
+        log.fatal('RUV was not reordered')
+        assert False
+
+    #
+    # Create repl agreement from the newly promoted master to master1
+    #
+    properties = {RA_NAME: r'meTo_$host:$port',
+                  RA_BINDDN: defaultProperties[REPLICATION_BIND_DN],
+                  RA_BINDPW: defaultProperties[REPLICATION_BIND_PW],
+                  RA_METHOD: defaultProperties[REPLICATION_BIND_METHOD],
+                  RA_TRANSPORT_PROT: defaultProperties[REPLICATION_TRANSPORT]}
+    new_agmt = topology.consumer1.agreement.create(suffix=SUFFIX,
+                                                   host=topology.master1.host,
+                                                   port=topology.master1.port,
+                                                   properties=properties)
+
+    if not new_agmt:
+        log.fatal("Fail to create new agmt from old consumer to the master")
+        assert False
+
+    #
+    # Test replication is working
+    #
+    if topology.consumer1.testReplication(DEFAULT_SUFFIX, topology.master1):
+        log.info('Replication is working.')
+    else:
+        log.fatal('Replication is not working.')
+        assert False
+
+    #
+    # Promote hub to master
+    #
+    try:
+        DN = topology.hub1.replica._get_mt_entry(DEFAULT_SUFFIX)
+        topology.hub1.modify_s(DN, [(ldap.MOD_REPLACE,
+                                     'nsDS5ReplicaType',
+                                     '3'),
+                                    (ldap.MOD_REPLACE,
+                                     'nsDS5ReplicaID',
+                                     '5678')])
+    except ldap.LDAPError as e:
+        log.fatal('Failed to promote consuemr to master: error %s' % str(e))
+        assert False
+    time.sleep(1)
+
+    #
+    # Check ruv has been reordered
+    #
+    if not checkFirstElement(topology.hub1, '5678'):
+        log.fatal('RUV was not reordered')
+        assert False
+
+    #
+    # Test replication is working
+    #
+    if topology.hub1.testReplication(DEFAULT_SUFFIX, topology.master1):
+        log.info('Replication is working.')
+    else:
+        log.fatal('Replication is not working.')
+        assert False
+
+    # Done
+    log.info('Test complete')
+
+
+if __name__ == '__main__':
+    # Run isolated
+    # -s for DEBUG mode
+    CURRENT_FILE = os.path.realpath(__file__)
+    pytest.main("-s %s" % CURRENT_FILE)
\ No newline at end of file

commit 1a6390d6ffa743f38be206f7ed7bb0ac3bcfe26b
Author: Noriko Hosoi <nhosoi at redhat.com>
Date:   Tue Nov 10 15:35:41 2015 -0800

    Ticket #48344 - acl - regression - trailing ', (comma)' in macro matched value is not removed.
    
    Description: acl_match_macro_in_target in acl plug-in returns matched value
    with a trailing comma, e.g., "o=kaki.com,". It's used to create a group DN,
    e.g., "cn=Domain Administrators,ou=Groups,o=kaki.como=ace industry,c=us".
    
    Due to the duplicated commas, the bind unexpectedly fails with 50 (insufficient
    access).
    
    In getting the matched value from target DN, it checks if a character at the
    end position is a comma or not.  If it is, '\0' is set there.  The position
    was one byte ahead.  It was introduced by #48141 - aci with wildcard and macro
    not correctly evaluated.
    
    https://fedorahosted.org/389/ticket/48344
    
    Reviewed by mreynolds at redhat.com (Thank you, Mark!!)
    
    (cherry picked from commit 8e421fb9af2752144cc93e62090fd873524c5633)

diff --git a/ldap/servers/plugins/acl/aclutil.c b/ldap/servers/plugins/acl/aclutil.c
index 2f37107..308cf8b 100644
--- a/ldap/servers/plugins/acl/aclutil.c
+++ b/ldap/servers/plugins/acl/aclutil.c
@@ -935,7 +935,7 @@ acl_match_macro_in_target( const char *ndn, char * match_this,
 
 					matched_val_len = ndn_len-macro_suffix_len-
 										ndn_prefix_end;
-					if (ndn[ndn_len - macro_suffix_len] == ',')
+					if (ndn[ndn_len - macro_suffix_len - 1] == ',')
 						matched_val_len -= 1;
 					
 					matched_val = (char *)slapi_ch_malloc(matched_val_len + 1);

commit 6180b91c3f65e9c5e375816a72baa95678458a0a
Author: Mark Reynolds <mreynolds at redhat.com>
Date:   Fri Nov 6 14:41:36 2015 -0500

    Ticket 48325 - Replica promotion leaves RUV out of order
    
    Bug Description:  When promoting a consumer to a master the new RUV
                      element is appended to the RUV.  However, when trying
                      to replicate from the newly promoted replica the
                      remote replica checks the first element in the RUV
                      and sees that its the same replica ID, and aborts the
                      replication session.  Essentailly this completely
                      breaks replication between the two servers, and can
                      actually corrupt other RUVs on other replicas.
    
    Fix Description:  When promoting a replica to a master, reorder the RUV
                      so that it is the first in the list.
    
    https://fedorahosted.org/389/ticket/48325
    
    Reviewed by: nhosoi(Thanks!)
    
    (cherry picked from commit b896840d270a540698f35a4aac4f7a91742952b0)

diff --git a/ldap/servers/plugins/replication/repl5_replica_config.c b/ldap/servers/plugins/replication/repl5_replica_config.c
index e85ae3e..4d7135c 100644
--- a/ldap/servers/plugins/replication/repl5_replica_config.c
+++ b/ldap/servers/plugins/replication/repl5_replica_config.c
@@ -1003,6 +1003,7 @@ replica_config_change_type_and_id (Replica *r, const char *new_type,
                 csngen_rewrite_rid(gen, rid);
                 if(purl && type == REPLICA_TYPE_UPDATABLE){
                     ruv_add_replica(ruv, rid, purl);
+                    ruv_move_local_supplier_to_first(ruv, rid);
                     replica_reset_csn_pl(r);
                 }
                 ruv_delete_replica(ruv, oldrid);

commit 8f49d33d30fade7b579062414250a0ddb1a66c62
Author: Noriko Hosoi <nhosoi at redhat.com>
Date:   Thu Nov 5 10:44:08 2015 -0800

    Ticket #48338 - SimplePagedResults -- abandon could happen between the abandon check and sending results
    
    Description: An abandon request for a SimplePagedResults request could
    happened between the abandon check and the code for sending the search
    results.  The abandon frees the search results although sending result
    code still refers it.
    
    Fix description: The code (from getting search results through sending
    the search results) in op_shared_search is protected by c_mutex locking.
    
    https://fedorahosted.org/389/ticket/48338
    
    Reviewed by rmeggins at redhat.com (Thank you, Rich!!)
    
    (cherry picked from commit 390b8bd9076e8976facc0858e60985d6b4fac05c)

diff --git a/ldap/servers/slapd/opshared.c b/ldap/servers/slapd/opshared.c
index dcdbb04..586ca1f 100644
--- a/ldap/servers/slapd/opshared.c
+++ b/ldap/servers/slapd/opshared.c
@@ -500,7 +500,7 @@ op_shared_search (Slapi_PBlock *pb, int send_result)
                       be = be_list[index];
                   }
               }
-              pr_search_result = pagedresults_get_search_result(pb->pb_conn, operation, pr_idx);
+              pr_search_result = pagedresults_get_search_result(pb->pb_conn, operation, 0/*not locked*/, pr_idx);
               estimate = pagedresults_get_search_result_set_size_estimate(pb->pb_conn, operation, pr_idx);
               if (pagedresults_get_unindexed(pb->pb_conn, operation, pr_idx)) {
                   opnote |= SLAPI_OP_NOTE_UNINDEXED;
@@ -675,13 +675,15 @@ op_shared_search (Slapi_PBlock *pb, int send_result)
        * In async paged result case, the search result might be released
        * by other theads.  We need to double check it in the locked region.
        */
-      pr_search_result = pagedresults_get_search_result(pb->pb_conn, operation, pr_idx);
+      PR_Lock(pb->pb_conn->c_mutex);
+      pr_search_result = pagedresults_get_search_result(pb->pb_conn, operation, 1/*locked*/, pr_idx);
       if (pr_search_result) {
-        if (pagedresults_is_abandoned_or_notavailable(pb->pb_conn, pr_idx)) {
+        if (pagedresults_is_abandoned_or_notavailable(pb->pb_conn, 1/*locked*/, pr_idx)) {
           pagedresults_unlock(pb->pb_conn, pr_idx);
           /* Previous operation was abandoned and the simplepaged object is not in use. */
           send_ldap_result(pb, 0, NULL, "Simple Paged Results Search abandoned", 0, NULL);
           rc = LDAP_SUCCESS;
+          PR_Unlock(pb->pb_conn->c_mutex);
           goto free_and_return;
         } else {
           slapi_pblock_set( pb, SLAPI_SEARCH_RESULT_SET, pr_search_result );
@@ -689,7 +691,8 @@ op_shared_search (Slapi_PBlock *pb, int send_result)
 
           /* search result could be reset in the backend/dse */
           slapi_pblock_get(pb, SLAPI_SEARCH_RESULT_SET, &sr);
-          pagedresults_set_search_result(pb->pb_conn, operation, sr, 0, pr_idx);
+          pagedresults_set_search_result(pb->pb_conn, operation, sr, 1/*locked*/, pr_idx);
+          PR_Unlock(pb->pb_conn->c_mutex);
         }
       } else {
         pr_stat = PAGEDRESULTS_SEARCH_END;
@@ -720,7 +723,7 @@ op_shared_search (Slapi_PBlock *pb, int send_result)
       if (PAGEDRESULTS_SEARCH_END == pr_stat) {
         pagedresults_lock(pb->pb_conn, pr_idx);
         slapi_pblock_set(pb, SLAPI_SEARCH_RESULT_SET, NULL);
-        if (!pagedresults_is_abandoned_or_notavailable(pb->pb_conn, pr_idx)) {
+        if (!pagedresults_is_abandoned_or_notavailable(pb->pb_conn, 0/*not locked*/, pr_idx)) {
           pagedresults_free_one(pb->pb_conn, operation, pr_idx);
         }
         pagedresults_unlock(pb->pb_conn, pr_idx);
diff --git a/ldap/servers/slapd/pagedresults.c b/ldap/servers/slapd/pagedresults.c
index 87447c4..4458cfb 100644
--- a/ldap/servers/slapd/pagedresults.c
+++ b/ldap/servers/slapd/pagedresults.c
@@ -395,20 +395,25 @@ pagedresults_set_current_be(Connection *conn, Slapi_Backend *be, int index, int
 }
 
 void *
-pagedresults_get_search_result(Connection *conn, Operation *op, int index)
+pagedresults_get_search_result(Connection *conn, Operation *op, int locked, int index)
 {
     void *sr = NULL;
     if (!op_is_pagedresults(op)) {
         return sr; /* noop */
     }
-    LDAPDebug1Arg(LDAP_DEBUG_TRACE,
-                  "--> pagedresults_get_search_result: idx=%d\n", index);
+    LDAPDebug2Args(LDAP_DEBUG_TRACE,
+                   "--> pagedresults_get_search_result(%s): idx=%d\n",
+                   locked?"locked":"not locked", index);
     if (conn && (index > -1)) {
-        PR_Lock(conn->c_mutex);
+        if (!locked) {
+            PR_Lock(conn->c_mutex);
+        }
         if (index < conn->c_pagedresults.prl_maxlen) {
             sr = conn->c_pagedresults.prl_list[index].pr_search_result_set;
         }
-        PR_Unlock(conn->c_mutex);
+        if (!locked) {
+            PR_Unlock(conn->c_mutex);
+        }
     }
     LDAPDebug1Arg(LDAP_DEBUG_TRACE,
                   "<-- pagedresults_get_search_result: %p\n", sr);
@@ -416,8 +421,7 @@ pagedresults_get_search_result(Connection *conn, Operation *op, int index)
 }
 
 int
-pagedresults_set_search_result(Connection *conn, Operation *op, void *sr, 
-                               int locked, int index)
+pagedresults_set_search_result(Connection *conn, Operation *op, void *sr, int locked, int index)
 {
     int rc = -1;
     if (!op_is_pagedresults(op)) {
@@ -1003,15 +1007,19 @@ pagedresults_unlock( Connection *conn, int index )
 }
 
 int
-pagedresults_is_abandoned_or_notavailable( Connection *conn, int index )
+pagedresults_is_abandoned_or_notavailable(Connection *conn, int locked, int index)
 {
     PagedResults *prp;
     if (!conn || (index < 0) || (index >= conn->c_pagedresults.prl_maxlen)) {
         return 1; /* not abandoned, but do not want to proceed paged results op. */
     }
-    PR_Lock(conn->c_mutex);
+    if (!locked) {
+        PR_Lock(conn->c_mutex);
+    }
     prp = conn->c_pagedresults.prl_list + index;
-    PR_Unlock(conn->c_mutex);
+    if (!locked) {
+        PR_Unlock(conn->c_mutex);
+    }
     return prp->pr_flags & CONN_FLAG_PAGEDRESULTS_ABANDONED;
 }
 
diff --git a/ldap/servers/slapd/proto-slap.h b/ldap/servers/slapd/proto-slap.h
index b10c1eb..e1cb53e 100644
--- a/ldap/servers/slapd/proto-slap.h
+++ b/ldap/servers/slapd/proto-slap.h
@@ -1445,8 +1445,7 @@ void pagedresults_set_response_control(Slapi_PBlock *pb, int iscritical,
                                        int curr_search_count, int index);
 Slapi_Backend *pagedresults_get_current_be(Connection *conn, int index);
 int pagedresults_set_current_be(Connection *conn, Slapi_Backend *be, int index, int nolock);
-void *pagedresults_get_search_result(Connection *conn, Operation *op,
-                                     int index);
+void *pagedresults_get_search_result(Connection *conn, Operation *op, int locked, int index);
 int pagedresults_set_search_result(Connection *conn, Operation *op, void *sr, 
                                    int locked, int index);
 int pagedresults_get_search_result_count(Connection *conn, Operation *op,
@@ -1487,7 +1486,7 @@ int pagedresults_cleanup_all(Connection *conn, int needlock);
 void op_set_pagedresults(Operation *op);
 void pagedresults_lock(Connection *conn, int index);
 void pagedresults_unlock(Connection *conn, int index);
-int pagedresults_is_abandoned_or_notavailable(Connection *conn, int index);
+int pagedresults_is_abandoned_or_notavailable(Connection *conn, int locked, int index);
 int pagedresults_set_search_result_pb(Slapi_PBlock *pb, void *sr, int locked);
 
 /*

commit 55434d308b4e459ba3a169eff94568312dba767c
Author: Thierry Bordaz <tbordaz at redhat.com>
Date:   Tue Nov 3 15:59:54 2015 +0100

    Ticket 47976: deadlock in mep delete post op
    
    Bug Description:
    	When deleting the original entry, some DB pages are acquired in write.
    	The deadlock occurs because when reading the parent entry of the MEP entry
    	MEP plugin requires read access to one of the page acquired by the deletion of the original entry.
            The read access can be granted if it is using the parent txn.
    	This bug requires that the parent entry of the MEP entry is not found in the entry cache, so
    	it requires database access
    
    Fix Description:
    	Fix ldbm_delete, so that it reads id2entry db with parent txn
    
    https://fedorahosted.org/389/ticket/47976
    
    Reviewed by: Ludwig Krispenz, Rich Megginson (Thanks you !!)
    
    Platforms tested: F17
    
    Flag Day: no
    
    Doc impact: no

diff --git a/ldap/servers/slapd/back-ldbm/ldbm_delete.c b/ldap/servers/slapd/back-ldbm/ldbm_delete.c
index f31d545..100a71d 100644
--- a/ldap/servers/slapd/back-ldbm/ldbm_delete.c
+++ b/ldap/servers/slapd/back-ldbm/ldbm_delete.c
@@ -477,7 +477,7 @@ ldbm_back_delete( Slapi_PBlock *pb )
 					 * the parent.  If we fail to lock the entry, just try again.
 					 */
 					while(1){
-						parent = id2entry(be, pid ,NULL, &retval);
+						parent = id2entry(be, pid ,&txn, &retval);
 						if (parent && (cache_retry = cache_lock_entry(&inst->inst_cache, parent))) {
 							/* Failed to obtain parent entry's entry lock */
 							if(cache_retry == RETRY_CACHE_LOCK &&

commit a8d30b356f312b24132f4ced324a67601b7cfb9b
Author: Mark Reynolds <mreynolds at redhat.com>
Date:   Fri Oct 23 15:17:44 2015 -0400

    Ticket 48311 - nunc-stans: Attempt to release connection that
      is not acquired
    
    Bug Description:  ns_connection_post_io_or_closing() was not aquiring the
                      connection in the optimized build, which led to the connection
                      ref count getting out sequence.
    
    Fix Description   Do not call connection_acquire_nolock() inside a PR_ASSERT call.
    
                      Also changed other PR_ASSERTs to only be called if DEBUG is set
    
    https://fedorahosted.org/389/ticket/48311
    
    Reviewed by: nhosoi(Thanks!)
    
    (cherry picked from commit 97946bd212c5094a490d6a3429e0d5763ccd39ce)

diff --git a/ldap/servers/slapd/daemon.c b/ldap/servers/slapd/daemon.c
index ba73da3..82099bc 100644
--- a/ldap/servers/slapd/daemon.c
+++ b/ldap/servers/slapd/daemon.c
@@ -1836,7 +1836,11 @@ ns_handle_closure(struct ns_job_t *job)
 	int do_yield = 0;
 
 	/* this function must be called from the event loop thread */
+#ifdef DEBUG
 	PR_ASSERT(0 == NS_JOB_IS_THREAD(ns_job_get_type(job)));
+#else
+	NS_JOB_IS_THREAD(ns_job_get_type(job));
+#endif
 	PR_Lock(c->c_mutex);
 	connection_release_nolock_ext(c, 1); /* release ref acquired for event framework */
 	PR_ASSERT(c->c_ns_close_jobs == 1); /* should be exactly 1 active close job - this one */
@@ -1889,7 +1893,11 @@ ns_connection_post_io_or_closing(Connection *conn)
 		/* process event normally - wait for I/O until idletimeout */
 		tv.tv_sec = conn->c_idletimeout;
 		tv.tv_usec = 0;
-		PR_ASSERT(0 == connection_acquire_nolock(conn)); /* event framework now has a reference */
+#ifdef DEBUG
+		PR_ASSERT(0 == connection_acquire_nolock(conn));
+#else
+		connection_acquire_nolock(conn); /* event framework now has a reference */
+#endif
 		ns_add_io_timeout_job(conn->c_tp, conn->c_prfd, &tv,
 				      NS_JOB_READ|NS_JOB_PRESERVE_FD,
 				      ns_handle_pr_read_ready, conn, NULL);
@@ -1911,7 +1919,11 @@ ns_handle_pr_read_ready(struct ns_job_t *job)
 	Connection *c = (Connection *)ns_job_get_data(job);
 
 	/* this function must be called from the event loop thread */
+#ifdef DEBUG
 	PR_ASSERT(0 == NS_JOB_IS_THREAD(ns_job_get_type(job)));
+#else
+	NS_JOB_IS_THREAD(ns_job_get_type(job));
+#endif
 
 	PR_Lock(c->c_mutex);
 	LDAPDebug2Args(LDAP_DEBUG_CONNS, "activity on conn %" NSPRIu64 " for fd=%d\n",

commit f27c164a1e04d76ec2cff0422e3073077d2f7cb8
Author: Thierry Bordaz <tbordaz at redhat.com>
Date:   Fri Oct 16 18:18:01 2015 +0200

    Ticket 47978: Deadlock between two MODs on the same entry between entry cache and backend lock
    
    Bug Description:
    	During a modify, the modified entry gets into the entry cache and is locked.
    	If after the be_txn_postop/txn_commit and before the modify returns
    	the modified entry gets out of the entry cache, the entry is not unlocked.
    	It can lead to hang as soon as an other write operation hit that unlocked entry.
    
    	This is a side effect of fix:
    		#47834 - Tombstone_to_glue: if parents are also converted to glue, the target entry's DN must be adjusted.
    
    Fix Description:
    	When the entry is locked, set a flag so that can later be unlocked
    	independently of its presence in the entry cache
    
    https://fedorahosted.org/389/ticket/47978
    
    Reviewed by: Noriko Hosoi (Thanks Noriko)
    
    Platforms tested: F22 (IPA CI test test_integration/test_vault.py, one failure out 2-4)
    
    Flag Day: no
    
    Doc impact: no

diff --git a/ldap/servers/slapd/back-ldbm/ldbm_modify.c b/ldap/servers/slapd/back-ldbm/ldbm_modify.c
index 2682bcf..83b7b55 100644
--- a/ldap/servers/slapd/back-ldbm/ldbm_modify.c
+++ b/ldap/servers/slapd/back-ldbm/ldbm_modify.c
@@ -391,6 +391,7 @@ ldbm_back_modify( Slapi_PBlock *pb )
 	int mod_count = 0;
 	int not_an_error = 0;
 	int fixup_tombstone = 0;
+	int ec_locked = 0;
 
 	slapi_pblock_get( pb, SLAPI_BACKEND, &be);
 	slapi_pblock_get( pb, SLAPI_PLUGIN_PRIVATE, &li );
@@ -799,6 +800,7 @@ ldbm_back_modify( Slapi_PBlock *pb )
 	CACHE_RETURN( &inst->inst_cache, &e );
 	/* lock new entry in cache to prevent usage until we are complete */
 	cache_lock_entry( &inst->inst_cache, ec );
+	ec_locked = 1;
 	postentry = slapi_entry_dup( ec->ep_entry );
 	slapi_pblock_set( pb, SLAPI_ENTRY_POST_OP, postentry );
 
@@ -919,7 +921,7 @@ common_return:
 	slapi_mods_done(&smods);
 	
 	if (inst) {
-		if (cache_is_in_cache(&inst->inst_cache, ec)) {
+		if (ec_locked || cache_is_in_cache(&inst->inst_cache, ec)) {
 			cache_unlock_entry(&inst->inst_cache, ec);
 		} else if (e) {
 			/* if ec was not in cache, cache_replace was not done.

commit 2a05a3a75ce082a28375cb2f687739d7474164a1
Author: Noriko Hosoi <nhosoi at redhat.com>
Date:   Thu Oct 8 11:43:36 2015 -0700

    Ticket #48305 - perl module conditional test is not conditional when checking SELinux policies
    
    Description: If sestatus reports SELinux is disabled, SELinux commands
    such as semanage and restorecon fail.  This patch checks the availability
    and only if the status says SELinux is enabled, it calls the SELinux
    commands.
    
    https://fedorahosted.org/389/ticket/48305
    
    Reviewed by mreynolds at redhat.com (Thank you, Mark!!)
    
    (cherry picked from commit 9fefc13c02c9ae037fad053152193794706aaa31)

diff --git a/ldap/admin/src/scripts/DSCreate.pm.in b/ldap/admin/src/scripts/DSCreate.pm.in
index cdde339..3ce5a73 100644
--- a/ldap/admin/src/scripts/DSCreate.pm.in
+++ b/ldap/admin/src/scripts/DSCreate.pm.in
@@ -956,9 +956,11 @@ sub setDefaults {
 
 sub updateSelinuxPolicy {
     my $inf = shift;
+    my $mydevnull = (-f "/dev/null" ? " /dev/null " : " NUL ");
 
     # if selinux is not available, do nothing
-    if ((getLogin() eq 'root') and "@with_selinux@") {
+    if ((getLogin() eq 'root') and "@with_selinux@" and
+        -f "@sbindir@/sestatus" and !system ("@sbindir@/sestatus | egrep -i \"selinux status:\\s*enabled\" > $mydevnull 2>&1")) {
         my $localstatedir = $inf->{slapd}->{localstatedir};
 
         # run restorecon on all of the parent directories we
@@ -1404,7 +1406,9 @@ sub removeDSInstance {
     }
 
     # remove the selinux label from the ports if needed
-    if ((getLogin() eq 'root') and "@with_selinux@") {
+    my $mydevnull = (-f "/dev/null" ? " /dev/null " : " NUL ");
+    if ((getLogin() eq 'root') and "@with_selinux@" and
+        -f "@sbindir@/sestatus" and !system ("@sbindir@/sestatus | egrep -i \"selinux status:\\s*enabled\" > $mydevnull 2>&1")) {
         foreach my $port (@{$entry->{"nsslapd-port"}}) 
         {
             my $semanage_err;

commit 7ee822e1a246d6d32a52dce5f7ad3ed9e491490d
Author: Simon Pichugin <spichugi at redhat.com>
Date:   Tue Oct 13 14:10:59 2015 +0200

    Ticket 47957 - Add replication test suite for a wait async feature
    
    Description: Test new attribute "nsDS5ReplicaWaitForAsyncResults".
                 After setting the attribute, supplier will sleep established
                 amount of millisecond if it finds the response from consumer
                 is not ready.
    
    Tests:
    - not integer value;
    - multi value;
    - check that value has been set correctly [None, 2000, 0, -5];



More information about the Pkg-fedora-ds-maintainers mailing list