Forward-port 3.1.8 upgrade script
authorDan Wells <dbw2@calvin.edu>
Tue, 27 Nov 2018 22:02:59 +0000 (17:02 -0500)
committerDan Wells <dbw2@calvin.edu>
Tue, 27 Nov 2018 22:02:59 +0000 (17:02 -0500)
Signed-off-by: Dan Wells <dbw2@calvin.edu>

Open-ILS/src/sql/Pg/version-upgrade/3.1.7-3.1.8-upgrade-db.sql [new file with mode: 0644]

diff --git a/Open-ILS/src/sql/Pg/version-upgrade/3.1.7-3.1.8-upgrade-db.sql b/Open-ILS/src/sql/Pg/version-upgrade/3.1.7-3.1.8-upgrade-db.sql
new file mode 100644 (file)
index 0000000..fa0a6be
--- /dev/null
@@ -0,0 +1,560 @@
+--Upgrade Script for 3.1.7 to 3.1.8
+\set eg_version '''3.1.8'''
+BEGIN;
+INSERT INTO config.upgrade_log (version, applied_to) VALUES ('3.1.8', :eg_version);
+
+SELECT evergreen.upgrade_deps_block_check('1134', :eg_version);
+
+CREATE OR REPLACE FUNCTION metabib.staged_browse(query text, fields integer[], context_org integer, context_locations integer[], staff boolean, browse_superpage_size integer, count_up_from_zero boolean, result_limit integer, next_pivot_pos integer)
+ RETURNS SETOF metabib.flat_browse_entry_appearance
+AS $f$
+DECLARE
+    curs                    REFCURSOR;
+    rec                     RECORD;
+    qpfts_query             TEXT;
+    aqpfts_query            TEXT;
+    afields                 INT[];
+    bfields                 INT[];
+    result_row              metabib.flat_browse_entry_appearance%ROWTYPE;
+    results_skipped         INT := 0;
+    row_counter             INT := 0;
+    row_number              INT;
+    slice_start             INT;
+    slice_end               INT;
+    full_end                INT;
+    all_records             BIGINT[];
+    all_brecords             BIGINT[];
+    all_arecords            BIGINT[];
+    superpage_of_records    BIGINT[];
+    superpage_size          INT;
+    c_tests                 TEXT := '';
+    b_tests                 TEXT := '';
+    c_orgs                  INT[];
+    unauthorized_entry      RECORD;
+BEGIN
+    IF count_up_from_zero THEN
+        row_number := 0;
+    ELSE
+        row_number := -1;
+    END IF;
+
+    IF NOT staff THEN
+        SELECT x.c_attrs, x.b_attrs INTO c_tests, b_tests FROM asset.patron_default_visibility_mask() x;
+    END IF;
+
+    -- b_tests supplies its own query_int operator, c_tests does not
+    IF c_tests <> '' THEN c_tests := c_tests || '&'; END IF;
+
+    SELECT ARRAY_AGG(id) INTO c_orgs FROM actor.org_unit_descendants(context_org);
+
+    c_tests := c_tests || search.calculate_visibility_attribute_test('circ_lib',c_orgs)
+               || '&' || search.calculate_visibility_attribute_test('owning_lib',c_orgs);
+
+    PERFORM 1 FROM config.internal_flag WHERE enabled AND name = 'opac.located_uri.act_as_copy';
+    IF FOUND THEN
+        b_tests := b_tests || search.calculate_visibility_attribute_test(
+            'luri_org',
+            (SELECT ARRAY_AGG(id) FROM actor.org_unit_full_path(context_org) x)
+        );
+    ELSE
+        b_tests := b_tests || search.calculate_visibility_attribute_test(
+            'luri_org',
+            (SELECT ARRAY_AGG(id) FROM actor.org_unit_ancestors(context_org) x)
+        );
+    END IF;
+
+    IF context_locations THEN
+        IF c_tests <> '' THEN c_tests := c_tests || '&'; END IF;
+        c_tests := c_tests || search.calculate_visibility_attribute_test('location',context_locations);
+    END IF;
+
+    OPEN curs NO SCROLL FOR EXECUTE query;
+
+    LOOP
+        FETCH curs INTO rec;
+        IF NOT FOUND THEN
+            IF result_row.pivot_point IS NOT NULL THEN
+                RETURN NEXT result_row;
+            END IF;
+            RETURN;
+        END IF;
+
+        --Is unauthorized?
+        SELECT INTO unauthorized_entry *
+        FROM metabib.browse_entry_simple_heading_map mbeshm
+        INNER JOIN authority.simple_heading ash ON ( mbeshm.simple_heading = ash.id )
+        INNER JOIN authority.control_set_authority_field acsaf ON ( acsaf.id = ash.atag )
+        JOIN authority.heading_field ahf ON (ahf.id = acsaf.heading_field)
+        WHERE mbeshm.entry = rec.id
+        AND   ahf.heading_purpose = 'variant';
+
+        -- Gather aggregate data based on the MBE row we're looking at now, authority axis
+        IF (unauthorized_entry.record IS NOT NULL) THEN
+            --unauthorized term belongs to an auth linked to a bib?
+            SELECT INTO all_arecords, result_row.sees, afields
+                    ARRAY_AGG(DISTINCT abl.bib),
+                    STRING_AGG(DISTINCT abl.authority::TEXT, $$,$$),
+                    ARRAY_AGG(DISTINCT map.metabib_field)
+            FROM authority.bib_linking abl
+            INNER JOIN authority.control_set_auth_field_metabib_field_map_refs map ON (
+                    map.authority_field = unauthorized_entry.atag
+                    AND map.metabib_field = ANY(fields)
+            )
+            WHERE abl.authority = unauthorized_entry.record;
+        ELSE
+            --do usual procedure
+            SELECT INTO all_arecords, result_row.sees, afields
+                    ARRAY_AGG(DISTINCT abl.bib), -- bibs to check for visibility
+                    STRING_AGG(DISTINCT aal.source::TEXT, $$,$$), -- authority record ids
+                    ARRAY_AGG(DISTINCT map.metabib_field) -- authority-tag-linked CMF rows
+
+            FROM  metabib.browse_entry_simple_heading_map mbeshm
+                    JOIN authority.simple_heading ash ON ( mbeshm.simple_heading = ash.id )
+                    JOIN authority.authority_linking aal ON ( ash.record = aal.source )
+                    JOIN authority.bib_linking abl ON ( aal.target = abl.authority )
+                    JOIN authority.control_set_auth_field_metabib_field_map_refs map ON (
+                        ash.atag = map.authority_field
+                        AND map.metabib_field = ANY(fields)
+                    )
+                    JOIN authority.control_set_authority_field acsaf ON (
+                        map.authority_field = acsaf.id
+                    )
+                    JOIN authority.heading_field ahf ON (ahf.id = acsaf.heading_field)
+              WHERE mbeshm.entry = rec.id
+              AND   ahf.heading_purpose = 'variant';
+
+        END IF;
+
+        -- Gather aggregate data based on the MBE row we're looking at now, bib axis
+        SELECT INTO all_brecords, result_row.authorities, bfields
+                ARRAY_AGG(DISTINCT source),
+                STRING_AGG(DISTINCT authority::TEXT, $$,$$),
+                ARRAY_AGG(DISTINCT def)
+          FROM  metabib.browse_entry_def_map
+          WHERE entry = rec.id
+                AND def = ANY(fields);
+
+        SELECT INTO result_row.fields STRING_AGG(DISTINCT x::TEXT, $$,$$) FROM UNNEST(afields || bfields) x;
+
+        result_row.sources := 0;
+        result_row.asources := 0;
+
+        -- Bib-linked vis checking
+        IF ARRAY_UPPER(all_brecords,1) IS NOT NULL THEN
+
+            SELECT  INTO result_row.sources COUNT(DISTINCT b.id)
+              FROM  biblio.record_entry b
+                    LEFT JOIN asset.copy_vis_attr_cache acvac ON (acvac.record = b.id)
+              WHERE b.id = ANY(all_brecords[1:browse_superpage_size])
+                    AND (
+                        acvac.vis_attr_vector @@ c_tests::query_int
+                        OR b.vis_attr_vector @@ b_tests::query_int
+                    );
+
+            result_row.accurate := TRUE;
+
+        END IF;
+
+        -- Authority-linked vis checking
+        IF ARRAY_UPPER(all_arecords,1) IS NOT NULL THEN
+
+            SELECT  INTO result_row.asources COUNT(DISTINCT b.id)
+              FROM  biblio.record_entry b
+                    LEFT JOIN asset.copy_vis_attr_cache acvac ON (acvac.record = b.id)
+              WHERE b.id = ANY(all_arecords[1:browse_superpage_size])
+                    AND (
+                        acvac.vis_attr_vector @@ c_tests::query_int
+                        OR b.vis_attr_vector @@ b_tests::query_int
+                    );
+
+            result_row.aaccurate := TRUE;
+
+        END IF;
+
+        IF result_row.sources > 0 OR result_row.asources > 0 THEN
+
+            -- The function that calls this function needs row_number in order
+            -- to correctly order results from two different runs of this
+            -- functions.
+            result_row.row_number := row_number;
+
+            -- Now, if row_counter is still less than limit, return a row.  If
+            -- not, but it is less than next_pivot_pos, continue on without
+            -- returning actual result rows until we find
+            -- that next pivot, and return it.
+
+            IF row_counter < result_limit THEN
+                result_row.browse_entry := rec.id;
+                result_row.value := rec.value;
+
+                RETURN NEXT result_row;
+            ELSE
+                result_row.browse_entry := NULL;
+                result_row.authorities := NULL;
+                result_row.fields := NULL;
+                result_row.value := NULL;
+                result_row.sources := NULL;
+                result_row.sees := NULL;
+                result_row.accurate := NULL;
+                result_row.aaccurate := NULL;
+                result_row.pivot_point := rec.id;
+
+                IF row_counter >= next_pivot_pos THEN
+                    RETURN NEXT result_row;
+                    RETURN;
+                END IF;
+            END IF;
+
+            IF count_up_from_zero THEN
+                row_number := row_number + 1;
+            ELSE
+                row_number := row_number - 1;
+            END IF;
+
+            -- row_counter is different from row_number.
+            -- It simply counts up from zero so that we know when
+            -- we've reached our limit.
+            row_counter := row_counter + 1;
+        END IF;
+    END LOOP;
+END;
+$f$ LANGUAGE plpgsql ROWS 10;
+
+
+SELECT evergreen.upgrade_deps_block_check('1136', :eg_version);
+
+-- update mods33 data entered by 1100 with a format of 'mods32'
+-- harmless if you have not run 1100 yet
+UPDATE config.metabib_field SET format = 'mods33' WHERE format = 'mods32' and id in (38, 39, 40, 41, 42, 43, 44, 46, 47, 48, 49, 50);
+
+-- change the default format to 'mods33'
+ALTER TABLE config.metabib_field ALTER COLUMN format SET DEFAULT 'mods33'::text;
+
+
+
+SELECT evergreen.upgrade_deps_block_check('1137', :eg_version);
+
+-- This function upgrade is only for rel_3_1.  The next upgrade script 1138 in master/rel_3_2 is for future releases
+CREATE OR REPLACE FUNCTION actor.usr_merge( src_usr INT, dest_usr INT, del_addrs BOOLEAN, del_cards BOOLEAN, deactivate_cards BOOLEAN ) RETURNS VOID AS $$
+DECLARE
+       suffix TEXT;
+       bucket_row RECORD;
+       picklist_row RECORD;
+       queue_row RECORD;
+       folder_row RECORD;
+BEGIN
+
+    -- Bail if src_usr equals dest_usr because the result of merging a
+    -- user with itself is not what you want.
+    IF src_usr = dest_usr THEN
+        RETURN;
+    END IF;
+
+    -- do some initial cleanup 
+    UPDATE actor.usr SET card = NULL WHERE id = src_usr;
+    UPDATE actor.usr SET mailing_address = NULL WHERE id = src_usr;
+    UPDATE actor.usr SET billing_address = NULL WHERE id = src_usr;
+
+    -- actor.*
+    IF del_cards THEN
+        DELETE FROM actor.card where usr = src_usr;
+    ELSE
+        IF deactivate_cards THEN
+            UPDATE actor.card SET active = 'f' WHERE usr = src_usr;
+        END IF;
+        UPDATE actor.card SET usr = dest_usr WHERE usr = src_usr;
+    END IF;
+
+
+    IF del_addrs THEN
+        DELETE FROM actor.usr_address WHERE usr = src_usr;
+    ELSE
+        UPDATE actor.usr_address SET usr = dest_usr WHERE usr = src_usr;
+    END IF;
+
+    UPDATE actor.usr_note SET usr = dest_usr WHERE usr = src_usr;
+    -- dupes are technically OK in actor.usr_standing_penalty, should manually delete them...
+    UPDATE actor.usr_standing_penalty SET usr = dest_usr WHERE usr = src_usr;
+    PERFORM actor.usr_merge_rows('actor.usr_org_unit_opt_in', 'usr', src_usr, dest_usr);
+    PERFORM actor.usr_merge_rows('actor.usr_setting', 'usr', src_usr, dest_usr);
+
+    -- permission.*
+    PERFORM actor.usr_merge_rows('permission.usr_perm_map', 'usr', src_usr, dest_usr);
+    PERFORM actor.usr_merge_rows('permission.usr_object_perm_map', 'usr', src_usr, dest_usr);
+    PERFORM actor.usr_merge_rows('permission.usr_grp_map', 'usr', src_usr, dest_usr);
+    PERFORM actor.usr_merge_rows('permission.usr_work_ou_map', 'usr', src_usr, dest_usr);
+
+
+    -- container.*
+       
+       -- For each *_bucket table: transfer every bucket belonging to src_usr
+       -- into the custody of dest_usr.
+       --
+       -- In order to avoid colliding with an existing bucket owned by
+       -- the destination user, append the source user's id (in parenthesese)
+       -- to the name.  If you still get a collision, add successive
+       -- spaces to the name and keep trying until you succeed.
+       --
+       FOR bucket_row in
+               SELECT id, name
+               FROM   container.biblio_record_entry_bucket
+               WHERE  owner = src_usr
+       LOOP
+               suffix := ' (' || src_usr || ')';
+               LOOP
+                       BEGIN
+                               UPDATE  container.biblio_record_entry_bucket
+                               SET     owner = dest_usr, name = name || suffix
+                               WHERE   id = bucket_row.id;
+                       EXCEPTION WHEN unique_violation THEN
+                               suffix := suffix || ' ';
+                               CONTINUE;
+                       END;
+                       EXIT;
+               END LOOP;
+       END LOOP;
+
+       FOR bucket_row in
+               SELECT id, name
+               FROM   container.call_number_bucket
+               WHERE  owner = src_usr
+       LOOP
+               suffix := ' (' || src_usr || ')';
+               LOOP
+                       BEGIN
+                               UPDATE  container.call_number_bucket
+                               SET     owner = dest_usr, name = name || suffix
+                               WHERE   id = bucket_row.id;
+                       EXCEPTION WHEN unique_violation THEN
+                               suffix := suffix || ' ';
+                               CONTINUE;
+                       END;
+                       EXIT;
+               END LOOP;
+       END LOOP;
+
+       FOR bucket_row in
+               SELECT id, name
+               FROM   container.copy_bucket
+               WHERE  owner = src_usr
+       LOOP
+               suffix := ' (' || src_usr || ')';
+               LOOP
+                       BEGIN
+                               UPDATE  container.copy_bucket
+                               SET     owner = dest_usr, name = name || suffix
+                               WHERE   id = bucket_row.id;
+                       EXCEPTION WHEN unique_violation THEN
+                               suffix := suffix || ' ';
+                               CONTINUE;
+                       END;
+                       EXIT;
+               END LOOP;
+       END LOOP;
+
+       FOR bucket_row in
+               SELECT id, name
+               FROM   container.user_bucket
+               WHERE  owner = src_usr
+       LOOP
+               suffix := ' (' || src_usr || ')';
+               LOOP
+                       BEGIN
+                               UPDATE  container.user_bucket
+                               SET     owner = dest_usr, name = name || suffix
+                               WHERE   id = bucket_row.id;
+                       EXCEPTION WHEN unique_violation THEN
+                               suffix := suffix || ' ';
+                               CONTINUE;
+                       END;
+                       EXIT;
+               END LOOP;
+       END LOOP;
+
+       UPDATE container.user_bucket_item SET target_user = dest_usr WHERE target_user = src_usr;
+
+    -- vandelay.*
+       -- transfer queues the same way we transfer buckets (see above)
+       FOR queue_row in
+               SELECT id, name
+               FROM   vandelay.queue
+               WHERE  owner = src_usr
+       LOOP
+               suffix := ' (' || src_usr || ')';
+               LOOP
+                       BEGIN
+                               UPDATE  vandelay.queue
+                               SET     owner = dest_usr, name = name || suffix
+                               WHERE   id = queue_row.id;
+                       EXCEPTION WHEN unique_violation THEN
+                               suffix := suffix || ' ';
+                               CONTINUE;
+                       END;
+                       EXIT;
+               END LOOP;
+       END LOOP;
+
+    -- money.*
+    PERFORM actor.usr_merge_rows('money.collections_tracker', 'usr', src_usr, dest_usr);
+    PERFORM actor.usr_merge_rows('money.collections_tracker', 'collector', src_usr, dest_usr);
+    UPDATE money.billable_xact SET usr = dest_usr WHERE usr = src_usr;
+    UPDATE money.billing SET voider = dest_usr WHERE voider = src_usr;
+    UPDATE money.bnm_payment SET accepting_usr = dest_usr WHERE accepting_usr = src_usr;
+
+    -- action.*
+    UPDATE action.circulation SET usr = dest_usr WHERE usr = src_usr;
+    UPDATE action.circulation SET circ_staff = dest_usr WHERE circ_staff = src_usr;
+    UPDATE action.circulation SET checkin_staff = dest_usr WHERE checkin_staff = src_usr;
+    UPDATE action.usr_circ_history SET usr = dest_usr WHERE usr = src_usr;
+
+    UPDATE action.hold_request SET usr = dest_usr WHERE usr = src_usr;
+    UPDATE action.hold_request SET fulfillment_staff = dest_usr WHERE fulfillment_staff = src_usr;
+    UPDATE action.hold_request SET requestor = dest_usr WHERE requestor = src_usr;
+    UPDATE action.hold_notification SET notify_staff = dest_usr WHERE notify_staff = src_usr;
+
+    UPDATE action.in_house_use SET staff = dest_usr WHERE staff = src_usr;
+    UPDATE action.non_cataloged_circulation SET staff = dest_usr WHERE staff = src_usr;
+    UPDATE action.non_cataloged_circulation SET patron = dest_usr WHERE patron = src_usr;
+    UPDATE action.non_cat_in_house_use SET staff = dest_usr WHERE staff = src_usr;
+    UPDATE action.survey_response SET usr = dest_usr WHERE usr = src_usr;
+
+    -- acq.*
+    UPDATE acq.fund_allocation SET allocator = dest_usr WHERE allocator = src_usr;
+       UPDATE acq.fund_transfer SET transfer_user = dest_usr WHERE transfer_user = src_usr;
+
+       -- transfer picklists the same way we transfer buckets (see above)
+       FOR picklist_row in
+               SELECT id, name
+               FROM   acq.picklist
+               WHERE  owner = src_usr
+       LOOP
+               suffix := ' (' || src_usr || ')';
+               LOOP
+                       BEGIN
+                               UPDATE  acq.picklist
+                               SET     owner = dest_usr, name = name || suffix
+                               WHERE   id = picklist_row.id;
+                       EXCEPTION WHEN unique_violation THEN
+                               suffix := suffix || ' ';
+                               CONTINUE;
+                       END;
+                       EXIT;
+               END LOOP;
+       END LOOP;
+
+    UPDATE acq.purchase_order SET owner = dest_usr WHERE owner = src_usr;
+    UPDATE acq.po_note SET creator = dest_usr WHERE creator = src_usr;
+    UPDATE acq.po_note SET editor = dest_usr WHERE editor = src_usr;
+    UPDATE acq.provider_note SET creator = dest_usr WHERE creator = src_usr;
+    UPDATE acq.provider_note SET editor = dest_usr WHERE editor = src_usr;
+    UPDATE acq.lineitem_note SET creator = dest_usr WHERE creator = src_usr;
+    UPDATE acq.lineitem_note SET editor = dest_usr WHERE editor = src_usr;
+    UPDATE acq.lineitem_usr_attr_definition SET usr = dest_usr WHERE usr = src_usr;
+
+    -- asset.*
+    UPDATE asset.copy SET creator = dest_usr WHERE creator = src_usr;
+    UPDATE asset.copy SET editor = dest_usr WHERE editor = src_usr;
+    UPDATE asset.copy_note SET creator = dest_usr WHERE creator = src_usr;
+    UPDATE asset.call_number SET creator = dest_usr WHERE creator = src_usr;
+    UPDATE asset.call_number SET editor = dest_usr WHERE editor = src_usr;
+    UPDATE asset.call_number_note SET creator = dest_usr WHERE creator = src_usr;
+
+    -- serial.*
+    UPDATE serial.record_entry SET creator = dest_usr WHERE creator = src_usr;
+    UPDATE serial.record_entry SET editor = dest_usr WHERE editor = src_usr;
+
+    -- reporter.*
+    -- It's not uncommon to define the reporter schema in a replica 
+    -- DB only, so don't assume these tables exist in the write DB.
+    BEGIN
+       UPDATE reporter.template SET owner = dest_usr WHERE owner = src_usr;
+    EXCEPTION WHEN undefined_table THEN
+        -- do nothing
+    END;
+    BEGIN
+       UPDATE reporter.report SET owner = dest_usr WHERE owner = src_usr;
+    EXCEPTION WHEN undefined_table THEN
+        -- do nothing
+    END;
+    BEGIN
+       UPDATE reporter.schedule SET runner = dest_usr WHERE runner = src_usr;
+    EXCEPTION WHEN undefined_table THEN
+        -- do nothing
+    END;
+    BEGIN
+               -- transfer folders the same way we transfer buckets (see above)
+               FOR folder_row in
+                       SELECT id, name
+                       FROM   reporter.template_folder
+                       WHERE  owner = src_usr
+               LOOP
+                       suffix := ' (' || src_usr || ')';
+                       LOOP
+                               BEGIN
+                                       UPDATE  reporter.template_folder
+                                       SET     owner = dest_usr, name = name || suffix
+                                       WHERE   id = folder_row.id;
+                               EXCEPTION WHEN unique_violation THEN
+                                       suffix := suffix || ' ';
+                                       CONTINUE;
+                               END;
+                               EXIT;
+                       END LOOP;
+               END LOOP;
+    EXCEPTION WHEN undefined_table THEN
+        -- do nothing
+    END;
+    BEGIN
+               -- transfer folders the same way we transfer buckets (see above)
+               FOR folder_row in
+                       SELECT id, name
+                       FROM   reporter.report_folder
+                       WHERE  owner = src_usr
+               LOOP
+                       suffix := ' (' || src_usr || ')';
+                       LOOP
+                               BEGIN
+                                       UPDATE  reporter.report_folder
+                                       SET     owner = dest_usr, name = name || suffix
+                                       WHERE   id = folder_row.id;
+                               EXCEPTION WHEN unique_violation THEN
+                                       suffix := suffix || ' ';
+                                       CONTINUE;
+                               END;
+                               EXIT;
+                       END LOOP;
+               END LOOP;
+    EXCEPTION WHEN undefined_table THEN
+        -- do nothing
+    END;
+    BEGIN
+               -- transfer folders the same way we transfer buckets (see above)
+               FOR folder_row in
+                       SELECT id, name
+                       FROM   reporter.output_folder
+                       WHERE  owner = src_usr
+               LOOP
+                       suffix := ' (' || src_usr || ')';
+                       LOOP
+                               BEGIN
+                                       UPDATE  reporter.output_folder
+                                       SET     owner = dest_usr, name = name || suffix
+                                       WHERE   id = folder_row.id;
+                               EXCEPTION WHEN unique_violation THEN
+                                       suffix := suffix || ' ';
+                                       CONTINUE;
+                               END;
+                               EXIT;
+                       END LOOP;
+               END LOOP;
+    EXCEPTION WHEN undefined_table THEN
+        -- do nothing
+    END;
+
+    -- Finally, delete the source user
+    PERFORM actor.usr_delete(src_usr,dest_usr);
+
+END;
+$$ LANGUAGE plpgsql;
+
+COMMIT;