diff --git a/release/scripts/3ds_export.py b/release/scripts/3ds_export.py index f6dc47474e7..15079130939 100644 --- a/release/scripts/3ds_export.py +++ b/release/scripts/3ds_export.py @@ -406,7 +406,7 @@ class _3ds_chunk(object): ###################################################### # EXPORT ###################################################### -''' + def get_material_images(material): # blender utility func. images = [] @@ -417,7 +417,7 @@ def get_material_images(material): if image: images.append(image) # maye want to include info like diffuse, spec here. return images -''' + def make_material_subchunk(id, color): '''Make a material subchunk. @@ -432,7 +432,7 @@ def make_material_subchunk(id, color): # mat_sub.add_subchunk(col2) return mat_sub -''' + def make_material_texture_chunk(id, images): """Make Material Map texture chunk """ @@ -448,8 +448,8 @@ def make_material_texture_chunk(id, images): add_image(image) return mat_sub -''' -def make_material_chunk(material, image): + +def make_material_chunk(material, image, PREF_TEXTURES): '''Make a material chunk out of a blender material.''' material_chunk = _3ds_chunk(MATERIAL) name = _3ds_chunk(MATNAME) @@ -471,13 +471,13 @@ def make_material_chunk(material, image): material_chunk.add_subchunk(make_material_subchunk(MATSPECULAR, (1,1,1) )) # CANT READ IN MAX!!!! SEEMS LIKE THE FILE IS VALID FROM 3DSDUMP :/ - ''' - images = get_material_images(material) # can be None - if image: images.append(image) + if PREF_TEXTURES: + images = get_material_images(material) # can be None + if image: images.append(image) + + if images: + material_chunk.add_subchunk(make_material_texture_chunk(MATMAP, images)) - if images: - material_chunk.add_subchunk(make_material_texture_chunk(MATMAP, images)) - ''' return material_chunk class tri_wrapper(object): @@ -872,6 +872,9 @@ def save_3ds(filename): if not BPyMessages.Warning_SaveOver(filename): return + PREF_TEXTURES = Blender.Draw.PupMenu('Texture (Breaks some importers)%t| YES %x1 | NO %x0') + if PREF_TEXTURES ==-1: return + time1= Blender.sys.time() Blender.Window.WaitCursor(1) scn= Blender.Scene.GetCurrent() @@ -949,7 +952,7 @@ def save_3ds(filename): # Make material chunks for all materials used in the meshes: for mat_and_image in materialDict.itervalues(): - object_info.add_subchunk(make_material_chunk(*mat_and_image)) + object_info.add_subchunk(make_material_chunk(mat_and_image[0], mat_and_image[1], PREF_TEXTURES)) # Give all objects a unique ID and build a dictionary from object name to object id: """ diff --git a/release/scripts/3ds_import.py b/release/scripts/3ds_import.py index f3580868e20..56525d56611 100644 --- a/release/scripts/3ds_import.py +++ b/release/scripts/3ds_import.py @@ -308,7 +308,7 @@ def add_texture_to_material(image, texture, material, mapto): material.setTexture(free_tex_slots[0],texture,Texture.TexCo.UV,map) -def process_next_chunk(file, previous_chunk, importedObjects): +def process_next_chunk(file, previous_chunk, importedObjects, IMAGE_SEARCH): #print previous_chunk.bytes_read, 'BYTES READ' contextObName= None contextLamp= [None, None] # object, Data @@ -441,7 +441,7 @@ def process_next_chunk(file, previous_chunk, importedObjects): elif (new_chunk.ID==OBJECTINFO): #print 'elif (new_chunk.ID==OBJECTINFO):' # print 'found an OBJECTINFO chunk' - process_next_chunk(file, new_chunk, importedObjects) + process_next_chunk(file, new_chunk, importedObjects, IMAGE_SEARCH) #keep track of how much we read in the main chunk new_chunk.bytes_read+=temp_chunk.bytes_read @@ -523,7 +523,8 @@ def process_next_chunk(file, previous_chunk, importedObjects): if (temp_chunk.ID==MAT_MAP_FILENAME): texture_name=read_string(file) - img= TEXTURE_DICT[contextMaterial.name]= BPyImage.comprehensiveImageLoad(texture_name, FILENAME) + #img= TEXTURE_DICT[contextMaterial.name]= BPyImage.comprehensiveImageLoad(texture_name, FILENAME) + img= TEXTURE_DICT[contextMaterial.name]= BPyImage.comprehensiveImageLoad(texture_name, FILENAME, PLACE_HOLDER= False, RECURSIVE= IMAGE_SEARCH) new_chunk.bytes_read += (len(texture_name)+1) #plus one for the null character that gets removed else: @@ -545,7 +546,8 @@ def process_next_chunk(file, previous_chunk, importedObjects): if (temp_chunk.ID==MAT_MAP_FILENAME): texture_name= read_string(file) - img= BPyImage.comprehensiveImageLoad(texture_name, FILENAME) + #img= BPyImage.comprehensiveImageLoad(texture_name, FILENAME) + img= BPyImage.comprehensiveImageLoad(texture_name, FILENAME, PLACE_HOLDER= False, RECURSIVE= IMAGE_SEARCH) new_chunk.bytes_read+= (len(texture_name)+1) #plus one for the null character that gets removed else: skip_to_end(file, temp_chunk) @@ -566,7 +568,8 @@ def process_next_chunk(file, previous_chunk, importedObjects): if (temp_chunk.ID==MAT_MAP_FILENAME): texture_name= read_string(file) - img= BPyImage.comprehensiveImageLoad(texture_name, FILENAME) + #img= BPyImage.comprehensiveImageLoad(texture_name, FILENAME) + img= BPyImage.comprehensiveImageLoad(texture_name, FILENAME, PLACE_HOLDER= False, RECURSIVE= IMAGE_SEARCH) new_chunk.bytes_read += (len(texture_name)+1) #plus one for the null character that gets removed else: skip_to_end(file, temp_chunk) @@ -586,7 +589,8 @@ def process_next_chunk(file, previous_chunk, importedObjects): if (temp_chunk.ID==MAT_MAP_FILENAME): texture_name= read_string(file) - img= BPyImage.comprehensiveImageLoad(texture_name, FILENAME) + #img= BPyImage.comprehensiveImageLoad(texture_name, FILENAME) + img= BPyImage.comprehensiveImageLoad(texture_name, FILENAME, PLACE_HOLDER= False, RECURSIVE= IMAGE_SEARCH) new_chunk.bytes_read += (len(texture_name)+1) #plus one for the null character that gets removed else: skip_to_end(file, temp_chunk) @@ -729,7 +733,8 @@ def process_next_chunk(file, previous_chunk, importedObjects): try: TEXTURE_DICT[contextMaterial.name] except: - img= TEXTURE_DICT[contextMaterial.name]= BPyImage.comprehensiveImageLoad(texture_name, FILENAME) + #img= TEXTURE_DICT[contextMaterial.name]= BPyImage.comprehensiveImageLoad(texture_name, FILENAME) + img= TEXTURE_DICT[contextMaterial.name]= BPyImage.comprehensiveImageLoad(texture_name, FILENAME, PLACE_HOLDER= False, RECURSIVE= IMAGE_SEARCH) new_chunk.bytes_read+= len(texture_name)+1 #plus one for the null character that gets removed @@ -776,12 +781,14 @@ def load_3ds(filename, PREF_UI= True): return - IMPORT_AS_INSTANCE= Blender.Draw.Create(0) + # IMPORT_AS_INSTANCE= Blender.Draw.Create(0) IMPORT_CONSTRAIN_BOUNDS= Blender.Draw.Create(10.0) + IMAGE_SEARCH= Blender.Draw.Create(1) # Get USER Options pup_block= [\ ('Size Constraint:', IMPORT_CONSTRAIN_BOUNDS, 0.0, 1000.0, 'Scale the model by 10 until it reacehs the size constraint. Zero Disables.'),\ + ('Image Search', IMAGE_SEARCH, 'Search subdirs for any assosiated images (Warning, may be slow)'),\ #('Group Instance', IMPORT_AS_INSTANCE, 'Import objects into a new scene and group, creating an instance in the current scene.'),\ ] @@ -792,19 +799,22 @@ def load_3ds(filename, PREF_UI= True): Blender.Window.WaitCursor(1) IMPORT_CONSTRAIN_BOUNDS= IMPORT_CONSTRAIN_BOUNDS.val - IMPORT_AS_INSTANCE= IMPORT_AS_INSTANCE.val + # IMPORT_AS_INSTANCE= IMPORT_AS_INSTANCE.val + IMAGE_SEARCH = IMAGE_SEARCH.val if IMPORT_CONSTRAIN_BOUNDS: BOUNDS_3DS[:]= [1<<30, 1<<30, 1<<30, -1<<30, -1<<30, -1<<30] else: BOUNDS_3DS[:]= [] + ##IMAGE_SEARCH + scn= Scene.GetCurrent() SCN_OBJECTS = scn.objects SCN_OBJECTS.selected = [] # de select all importedObjects= [] # Fill this list with objects - process_next_chunk(file, current_chunk, importedObjects) + process_next_chunk(file, current_chunk, importedObjects, IMAGE_SEARCH) # Link the objects into this scene. diff --git a/release/scripts/export_obj.py b/release/scripts/export_obj.py index 400a8559085..92d0f44bdd9 100644 --- a/release/scripts/export_obj.py +++ b/release/scripts/export_obj.py @@ -340,7 +340,13 @@ EXPORT_GROUP_BY_OB=False, EXPORT_GROUP_BY_MAT=False, EXPORT_MORPH_TARGET=False) contextSmooth = None # Will either be true or false, set bad to force initialization switch. if EXPORT_BLEN_OBS or EXPORT_GROUP_BY_OB: - obnamestring = '%s_%s' % (fixName(ob.name), fixName(ob.getData(1))) + name1 = ob.name + name2 = ob.getData(1) + if name1 == name2: + obnamestring = fixName(name1) + else: + obnamestring = '%s_%s' % (fixName(name1), fixName(name2)) + if EXPORT_BLEN_OBS: file.write('o %s\n' % obnamestring) # Write Object name else: # if EXPORT_GROUP_BY_OB: diff --git a/release/scripts/weightpaint_clean.py b/release/scripts/weightpaint_clean.py index 038c5e18c1a..d6cddba5fa2 100644 --- a/release/scripts/weightpaint_clean.py +++ b/release/scripts/weightpaint_clean.py @@ -40,7 +40,6 @@ It removes very low weighted verts from the current group with a weight option. from Blender import Scene, Draw, Object import BPyMesh -SMALL_NUM= 0.000001 def weightClean(me, PREF_THRESH, PREF_KEEP_SINGLE, PREF_OTHER_GROUPS): groupNames, vWeightDict= BPyMesh.meshWeight2Dict(me) diff --git a/release/scripts/weightpaint_grow_shrink.py b/release/scripts/weightpaint_grow_shrink.py index cfc31a2f7e6..5527f59e5ae 100644 --- a/release/scripts/weightpaint_grow_shrink.py +++ b/release/scripts/weightpaint_grow_shrink.py @@ -3,7 +3,7 @@ Name: 'Grow/Shrink Weight...' Blender: 241 Group: 'WeightPaint' -Tooltip: 'Removed verts from groups below a weight limit.' +Tooltip: 'Grow/Shrink active vertex group.' """ __author__ = ["Campbell Barton"] @@ -40,7 +40,6 @@ It grows/shrinks the bounds of the weight painted area from Blender import Scene, Draw, Window import BPyMesh -SMALL_NUM= 0.000001 def actWeightNormalize(me, PREF_MODE, PREF_MAX_DIST, PREF_STRENGTH, PREF_ITERATIONS): Window.WaitCursor(1) groupNames, vWeightDict= BPyMesh.meshWeight2Dict(me) @@ -102,9 +101,9 @@ def actWeightNormalize(me, PREF_MODE, PREF_MAX_DIST, PREF_STRENGTH, PREF_ITERATI def main(): scn= Scene.GetCurrent() - ob= scn.getActiveObject() + ob= scn.objects.active - if not ob or ob.getType() != 'Mesh': + if not ob or ob.type != 'Mesh': Draw.PupMenu('Error, no active mesh object, aborting.') return @@ -125,12 +124,7 @@ def main(): if not Draw.PupBlock('Grow/Shrink...', pup_block): return - PREF_MAXDIST= PREF_MAXDIST.val - PREF_STRENGTH= PREF_STRENGTH.val - PREF_MODE= PREF_MODE.val - PREF_ITERATIONS= PREF_ITERATIONS.val - - actWeightNormalize(me, PREF_MODE, PREF_MAXDIST, PREF_STRENGTH, PREF_ITERATIONS) + actWeightNormalize(me, PREF_MODE.val, PREF_MAXDIST.val, PREF_STRENGTH.val, PREF_ITERATIONS.val) if __name__=='__main__': diff --git a/source/blender/python/api2_2x/doc/Render.py b/source/blender/python/api2_2x/doc/Render.py index 1ab8afc4556..adadf54c488 100644 --- a/source/blender/python/api2_2x/doc/Render.py +++ b/source/blender/python/api2_2x/doc/Render.py @@ -230,9 +230,6 @@ class RenderData: @ivar gameFrameColor: RGB color triplet for bars. Values are clamped in the range [0.0,1.0]. @type gameFrameColor: list of RGB 3 floats - @ivar saveBuffers: Save tiles for all renderlayers to disk saving memory. - Also see B{SAVE_BUFFERS} in L{SceModes} constant dict. - @type saveBuffers: boolean @ivar sizeY: Image height (in pixels). Values are clamped to the range [4,10000]. @type sizeY: int