| 36 |
|
self.tagNameMatrix = None |
| 37 |
|
self.tagGroupToTagNameMatrix = None |
| 38 |
|
self._UpdateCaseMatrix() |
| 39 |
< |
print "\nEclipse SQL Library Initialized.\n\nCreated by Emanuel Borges\nVersion %s\n\n"% self.version |
| 39 |
> |
print("\nEclipse SQL Library Initialized.\n\nCreated by Emanuel Borges\nVersion %s\n\n"% self.version) |
| 40 |
|
|
| 41 |
|
def _GetNewCnxn(self, internalCaseName): |
| 42 |
|
"""A simple funtion, not method and does not update any instance vars, to grab a new cnxn connection. Just to have one place to keep this line updated""" |
| 59 |
|
|
| 60 |
|
|
| 61 |
|
self.caseMatrix = matrix |
| 62 |
< |
self.caseList = self.caseMatrix.keys() |
| 62 |
> |
self.caseList = list(self.caseMatrix.keys()) |
| 63 |
|
self.caseList.sort() |
| 64 |
|
cnxn.close() |
| 65 |
|
|
| 104 |
|
for row in cursor: |
| 105 |
|
tagGroupMatrix[row.TagGroupName] = row.TagGroupId |
| 106 |
|
tagNameMatrix[row.TagName] = row.TagId |
| 107 |
< |
if row.TagGroupName in tagGroupToTagNameMatrix.keys(): |
| 107 |
> |
if row.TagGroupName in list(tagGroupToTagNameMatrix.keys()): |
| 108 |
|
tagGroupToTagNameMatrix[row.TagGroupName].append(row.TagName) |
| 109 |
|
else: |
| 110 |
|
tagGroupToTagNameMatrix[row.TagGroupName] = [row.TagName,] |
| 164 |
|
matrix[row.FieldName] = (row.TableName,row.ColumnName) |
| 165 |
|
|
| 166 |
|
self.fieldMatrix = matrix |
| 167 |
< |
self.fieldList = self.fieldMatrix.keys() |
| 167 |
> |
self.fieldList = list(self.fieldMatrix.keys()) |
| 168 |
|
self.fieldList.sort() |
| 169 |
|
|
| 170 |
|
cnxn.close() |
| 217 |
|
docKey = row.DocumentKey |
| 218 |
|
else: |
| 219 |
|
docKey = "No Document" |
| 220 |
< |
if row.CategoryName in matrix.keys(): |
| 220 |
> |
if row.CategoryName in list(matrix.keys()): |
| 221 |
|
matrix[row.CategoryName].append((docKey,docText)) |
| 222 |
|
else: |
| 223 |
|
matrix[row.CategoryName] = [(docKey,docText),] |
| 259 |
|
if count3: |
| 260 |
|
if finalCount < 0.01: |
| 261 |
|
finalCount = 0.01 |
| 262 |
< |
print count3,finalCount |
| 262 |
> |
print(count3,finalCount) |
| 263 |
|
return finalCount |
| 264 |
|
|
| 265 |
|
|
| 285 |
|
|
| 286 |
|
|
| 287 |
|
if queryOnly: |
| 288 |
< |
print "running test only" |
| 288 |
> |
print("running test only") |
| 289 |
|
cursor.execute("SELECT Review.BatchDocument.ReviewStatusId, Tags.TagName, Review.ReviewPass.ReviewPassName FROM TagGroupsDetail INNER JOIN TagGroups ON TagGroupsDetail.TagGroupId = TagGroups.TagGroupId INNER JOIN Review.BatchDocument INNER JOIN DocumentTags ON Review.BatchDocument.DocId = DocumentTags.DocId INNER JOIN DocumentFields_0002 ON Review.BatchDocument.DocId = DocumentFields_0002.DocId ON TagGroupsDetail.TagId = DocumentTags.TagId INNER JOIN Tags ON TagGroupsDetail.TagId = Tags.TagId AND DocumentTags.TagId = Tags.TagId INNER JOIN Review.Batch ON Review.BatchDocument.BatchId = Review.Batch.BatchId AND Review.BatchDocument.BatchId = Review.Batch.BatchId INNER JOIN Review.ReviewPass ON Review.Batch.ReviewPassId = Review.ReviewPass.ReviewPassId AND Review.Batch.ReviewPassId = Review.ReviewPass.ReviewPassId AND Review.Batch.ReviewPassId = Review.ReviewPass.ReviewPassId AND Review.Batch.ReviewPassId = Review.ReviewPass.ReviewPassId AND Review.Batch.ReviewPassId = Review.ReviewPass.ReviewPassId WHERE (Review.BatchDocument.ReviewStatusId = '0') AND (TagGroups.TagGroupName = N'Doc Review Designation') AND (Tags.TagName IS NOT NULL) AND (Review.ReviewPass.ReviewPassName = N'%s')"% batchSetName) |
| 290 |
|
toBeUpdatedCount = len(cursor.fetchall()) |
| 291 |
|
else: |
| 292 |
< |
print "performing the update" |
| 292 |
> |
print("performing the update") |
| 293 |
|
cursor.execute("UPDATE Review.BatchDocument SET ReviewStatusId = '1' FROM TagGroupsDetail INNER JOIN TagGroups ON TagGroupsDetail.TagGroupId = TagGroups.TagGroupId INNER JOIN Review.BatchDocument INNER JOIN DocumentTags ON Review.BatchDocument.DocId = DocumentTags.DocId INNER JOIN DocumentFields_0002 ON Review.BatchDocument.DocId = DocumentFields_0002.DocId ON TagGroupsDetail.TagId = DocumentTags.TagId INNER JOIN Tags ON TagGroupsDetail.TagId = Tags.TagId AND DocumentTags.TagId = Tags.TagId INNER JOIN Review.Batch ON Review.BatchDocument.BatchId = Review.Batch.BatchId AND Review.BatchDocument.BatchId = Review.Batch.BatchId INNER JOIN Review.ReviewPass ON Review.Batch.ReviewPassId = Review.ReviewPass.ReviewPassId AND Review.Batch.ReviewPassId = Review.ReviewPass.ReviewPassId AND Review.Batch.ReviewPassId = Review.ReviewPass.ReviewPassId AND Review.Batch.ReviewPassId = Review.ReviewPass.ReviewPassId AND Review.Batch.ReviewPassId = Review.ReviewPass.ReviewPassId WHERE (Review.BatchDocument.ReviewStatusId = '0') AND (TagGroups.TagGroupName = N'Doc Review Designation') AND (Tags.TagName IS NOT NULL) AND (Review.ReviewPass.ReviewPassName = N'%s')"% batchSetName) |
| 294 |
|
toBeUpdatedCount = cursor.rowcount |
| 295 |
|
cnxn.commit() |
| 319 |
|
## Now create the pipedelim file |
| 320 |
|
outputFile = open(pipeFile,'w') |
| 321 |
|
#outputFile.write(foo.encode('utf8')) |
| 322 |
< |
outputFile.write(u'UserName|ParentFolderId|SearchId|SearchName|CreatedDate|ModifiedDate|SearchItems\n'.encode('utf8')) |
| 322 |
> |
outputFile.write('UserName|ParentFolderId|SearchId|SearchName|CreatedDate|ModifiedDate|SearchItems\n'.encode('utf8')) |
| 323 |
|
|
| 324 |
|
cursor.execute('SELECT ActivityTracking.DIM_User.UserName,SavedSearch.Searches.ParentFolderId,SavedSearch.Searches.SearchId, SavedSearch.Searches.SearchName, SavedSearch.Searches.CreatedDate,SavedSearch.Searches.ModifiedDate, SavedSearch.Searches.SearchItems FROM ActivityTracking.DIM_User INNER JOIN SavedSearch.Searches ON ActivityTracking.DIM_User.UserKey = SavedSearch.Searches.CreatedByKey AND ActivityTracking.DIM_User.UserKey = SavedSearch.Searches.ModifiedByKey AND ActivityTracking.DIM_User.UserKey = SavedSearch.Searches.CreatedByKey AND ActivityTracking.DIM_User.UserKey = SavedSearch.Searches.ModifiedByKey') |
| 325 |
|
|
| 326 |
|
for row in cursor: |
| 327 |
|
#outputFile.write("%s|%s|%s|%s|%s|%s|%s\n"% (getattr(row,'UserName'),folderMatrix[getattr(row,'ParentFolderId')][0],getattr(row,'SearchId'),getattr(row,'SearchName'),getattr(row,'CreatedDate'),getattr(row,'ModifiedDate',getattr(row,'SearchItems'))) |
| 328 |
< |
outputFile.write(u"{0}|{1}|{2}|{3}|{4}|{5}|{6}\n".format(getattr(row,u'UserName'),folderMatrix[getattr(row,u'ParentFolderId')][0],getattr(row,u'SearchId'),getattr(row,u'SearchName'),getattr(row,u'CreatedDate'),getattr(row,u'ModifiedDate'),getattr(row,u'SearchItems')).encode('utf8')) |
| 328 |
> |
outputFile.write("{0}|{1}|{2}|{3}|{4}|{5}|{6}\n".format(getattr(row,'UserName'),folderMatrix[getattr(row,'ParentFolderId')][0],getattr(row,'SearchId'),getattr(row,'SearchName'),getattr(row,'CreatedDate'),getattr(row,'ModifiedDate'),getattr(row,'SearchItems')).encode('utf8')) |
| 329 |
|
|
| 330 |
|
outputFile.close() |
| 331 |
|
cnxn.close() |
| 343 |
|
cursor = cnxn.cursor() |
| 344 |
|
|
| 345 |
|
## with the db selected, now execute the sp_updatestats part |
| 346 |
< |
print "\nPerforming SP_Updatestats..." |
| 346 |
> |
print("\nPerforming SP_Updatestats...") |
| 347 |
|
cursor.execute("sp_updatestats") |
| 348 |
|
cnxn.close() |
| 349 |
< |
print "SP_UpdateStats finished." |
| 349 |
> |
print("SP_UpdateStats finished.") |
| 350 |
|
|
| 351 |
|
#cnxn = pyodbc.connect("Driver={ODBC Driver 17 for SQL Server};""Server=XIPRORVW01\XIPRORVW01;""Database=evdxAdmin;""Trusted_Connection=yes;") |
| 352 |
|
cnxn = self._GetNewCnxn('evdxAdmin') |
| 354 |
|
|
| 355 |
|
|
| 356 |
|
## with the eevdxadmin db selected, now run the ap_rebuildindexes part |
| 357 |
< |
print "\nPerforming ap_rebuildIndexes..." |
| 357 |
> |
print("\nPerforming ap_rebuildIndexes...") |
| 358 |
|
cursor.execute("exec ap_rebuildindexes @databasename = '%s'"%internalCaseName) |
| 359 |
< |
print "rebuildIndexes finished" |
| 359 |
> |
print("rebuildIndexes finished") |
| 360 |
|
|
| 361 |
|
#cnxn = pyodbc.connect("Driver={ODBC Driver 17 for SQL Server};""Server=XIPRORVW01\XIPRORVW01;""Database=%s;""Trusted_Connection=yes;"%internalCaseName) |
| 362 |
|
cnxn = self._GetNewCnxn(internalCaseName) |
| 363 |
|
cursor = cnxn.cursor() |
| 364 |
|
|
| 365 |
|
## Now read and run the rest by way of a blob. |
| 366 |
< |
print "\nNow performing full defrag..." |
| 366 |
> |
print("\nNow performing full defrag...") |
| 367 |
|
self._ProcessSQLBlob('defragBlob',cursor) |
| 368 |
|
|
| 369 |
|
cnxn.close() |
| 370 |
< |
print "Full Defrag Finished." |
| 370 |
> |
print("Full Defrag Finished.") |
| 371 |
|
|
| 372 |
|
|